xref: /llvm-project/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp (revision 63086d6aa0af9bb7fc73c670d680191ae646f7d8)
1 //===- Tiling.cpp - Implementation of tiling using TilingInterface -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the tiling using TilingInterface.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "mlir/Dialect/SCF/Transforms/TileUsingInterface.h"
14 
15 #include "mlir/Dialect/Affine/IR/AffineOps.h"
16 #include "mlir/Dialect/Arith/IR/Arith.h"
17 #include "mlir/Dialect/Arith/Utils/Utils.h"
18 #include "mlir/Dialect/Func/IR/FuncOps.h"
19 #include "mlir/Dialect/SCF/Utils/Utils.h"
20 #include "mlir/Dialect/Tensor/IR/Tensor.h"
21 #include "mlir/Dialect/Utils/IndexingUtils.h"
22 #include "mlir/IR/Matchers.h"
23 #include "mlir/IR/PatternMatch.h"
24 #include "mlir/Interfaces/DestinationStyleOpInterface.h"
25 #include "mlir/Interfaces/TilingInterface.h"
26 #include "llvm/Support/Debug.h"
27 #include <optional>
28 
29 #define DEBUG_TYPE "tile-using-interface"
30 
31 using namespace mlir;
32 
33 scf::SCFTilingOptions &
34 scf::SCFTilingOptions::setTileSizes(ArrayRef<OpFoldResult> ts) {
35   assert(!tileSizeComputationFunction && "tile sizes already set");
36   auto tileSizes = llvm::to_vector(ts);
37   tileSizeComputationFunction = [tileSizes](OpBuilder &b, Operation *op) {
38     return tileSizes;
39   };
40   return *this;
41 }
42 
43 /// Helper method to adjust the interchange vector to match the iteration
44 /// domain.
45 static SmallVector<int64_t>
46 fillInterchangeVector(ArrayRef<int64_t> interchangeVector,
47                       size_t iterationDomainSize) {
48   SmallVector<int64_t> filledVector = llvm::to_vector(interchangeVector);
49   if (filledVector.size() < iterationDomainSize) {
50     auto range = llvm::seq<int64_t>(filledVector.size(), iterationDomainSize);
51     filledVector.append(range.begin(), range.end());
52   }
53   if (filledVector.size() > iterationDomainSize)
54     filledVector.resize(iterationDomainSize);
55   return filledVector;
56 }
57 
58 /// Convert a list of ops of type `SrcOpTy` to list of `Operation *`.
59 template <typename SrcOpTy>
60 static SmallVector<Operation *> getAsOperations(ArrayRef<SrcOpTy> ops) {
61   return llvm::to_vector(
62       llvm::map_range(ops, [](auto op) -> Operation * { return op; }));
63 }
64 template <typename SrcOpTy>
65 static SmallVector<Operation *>
66 getAsOperations(const SmallVector<SrcOpTy> &ops) {
67   return getAsOperations(ArrayRef<SrcOpTy>(ops));
68 }
69 
70 /// Convert a list of `Operation *` to a list of `DstOpTy.
71 template <typename DstOpTy>
72 static SmallVector<DstOpTy> castToTypedOperations(ArrayRef<Operation *> ops) {
73   return llvm::to_vector(
74       llvm::map_range(ops, [](Operation *op) { return cast<DstOpTy>(op); }));
75 }
76 template <typename DstOpTy>
77 static SmallVector<DstOpTy>
78 castToTypedOperations(const SmallVector<Operation *> &ops) {
79   return castToTypedOperations<DstOpTy>(ArrayRef<Operation *>(ops));
80 }
81 
82 //===----------------------------------------------------------------------===//
83 // tileUsingSCFForOp implementation.
84 //===----------------------------------------------------------------------===//
85 
86 // Check if `stride` evenly divides the trip count `size - offset`.
87 static bool tileDividesIterationDomain(Range loopRange) {
88   std::optional<int64_t> offsetAsInt = getConstantIntValue(loopRange.offset);
89   if (!offsetAsInt)
90     return false;
91   std::optional<int64_t> sizeAsInt = getConstantIntValue(loopRange.size);
92   if (!sizeAsInt)
93     return false;
94   std::optional<int64_t> strideAsInt = getConstantIntValue(loopRange.stride);
95   if (!strideAsInt)
96     return false;
97   return ((sizeAsInt.value() - offsetAsInt.value()) % strideAsInt.value() == 0);
98 }
99 
100 /// Returns the bounded tile size given the current `iv`, `loopRange` and
101 /// `tileSize`, i.e., `min(tileSize, range.end() - iv)`.
102 static OpFoldResult getBoundedTileSize(OpBuilder &b, Location loc,
103                                        Range loopRange, Value iv,
104                                        OpFoldResult tileSize) {
105   if (isConstantIntValue(tileSize, 1))
106     return tileSize;
107 
108   if (tileDividesIterationDomain(
109           Range{loopRange.offset, loopRange.size, tileSize}))
110     return tileSize;
111 
112   // The tile size to use (to avoid out of bounds access) is  minimum of
113   // `tileSize` and `ub - iv`, where `iv` is the induction variable of the tiled
114   // loop.
115   AffineExpr s0, s1, d0;
116   bindDims(b.getContext(), d0);
117   bindSymbols(b.getContext(), s0, s1);
118   AffineMap minMap = AffineMap::get(1, 2, {s0, s1 - d0}, b.getContext());
119   Value size = getValueOrCreateConstantIndexOp(b, loc, loopRange.size);
120   return affine::makeComposedFoldedAffineMin(
121       b, loc, minMap, SmallVector<OpFoldResult>{iv, tileSize, size});
122 }
123 
124 /// Generate an empty loop nest that represents the tiled loop nest shell.
125 /// - `loopRanges` specifies the lb, ub and step of the untiled iteration space.
126 /// - `tileSizes` is the tile sizes to use. Zero represent untiled loops.
127 /// - In `offsets` and `sizes` return the multi-dimensional offset and size of
128 /// the
129 ///   tile processed within the inner most loop.
130 static SmallVector<scf::ForOp> generateTileLoopNest(
131     OpBuilder &builder, Location loc, ArrayRef<Range> loopRanges,
132     ArrayRef<OpFoldResult> tileSizes, SmallVector<OpFoldResult> &offsets,
133     SmallVector<OpFoldResult> &sizes) {
134   assert(!loopRanges.empty() && "expected at least one loop range");
135   assert(loopRanges.size() == tileSizes.size() &&
136          "expected as many tile sizes as loop ranges");
137   OpBuilder::InsertionGuard guard(builder);
138   SmallVector<scf::ForOp> loops;
139   offsets.resize(loopRanges.size());
140   sizes.resize(loopRanges.size());
141 
142   for (auto loopRange : llvm::enumerate(loopRanges)) {
143     Value offset =
144         getValueOrCreateConstantIndexOp(builder, loc, loopRange.value().offset);
145     Value size =
146         getValueOrCreateConstantIndexOp(builder, loc, loopRange.value().size);
147     Value tileSize = getValueOrCreateConstantIndexOp(
148         builder, loc, tileSizes[loopRange.index()]);
149     // No loops if tile size is zero. Set offset and size to the loop
150     // offset and size.
151     if (matchPattern(tileSize, m_Zero())) {
152       offsets[loopRange.index()] = offset;
153       sizes[loopRange.index()] = size;
154       continue;
155     }
156 
157     auto loop = builder.create<scf::ForOp>(
158         loc, offset, size, tileSize, ValueRange{},
159         [&](OpBuilder &bodyBuilder, Location bodyLoc, Value iv,
160             ValueRange /*iterArgs*/) {
161           sizes[loopRange.index()] = getBoundedTileSize(
162               bodyBuilder, bodyLoc, loopRange.value(), iv, tileSize);
163           builder.create<scf::YieldOp>(loc);
164         });
165     offsets[loopRange.index()] = loop.getInductionVar();
166     loops.push_back(loop);
167     builder.setInsertionPoint(loop.getBody()->getTerminator());
168   }
169   return loops;
170 }
171 
172 /// For a value to be yielded (`yieldedValue`) from within a loop nest `loops`,
173 /// construct the destructive update pattern that inserts the yielded
174 /// value into a destination tensor provided by `initValue` at offset
175 /// `tileOffsets` and size `tileSizes`. For example,
176 ///
177 /// ```mlir
178 /// scf.for %iv0 = ... {
179 ///   %0 = tiled_op
180 /// }
181 /// ```
182 ///
183 /// is transformed to
184 ///
185 /// ```mlir
186 /// scf.for %iv0 = ... iter_args(%arg = %0) {
187 ///   %1 = tensor.extract_slice %arg
188 ///   %2 = tiled_op
189 ///   %3 = tensor.insert_slice %2 into %arg
190 ///   scf.yield %3
191 /// }
192 /// ```
193 /// TODO: This API can be cleaned up by using `SubsetExtractOpInterface`.
194 static SmallVector<Value>
195 yieldTiledValues(RewriterBase &rewriter, ValueRange initValues,
196                  ValueRange yieldedValues,
197                  ArrayRef<SmallVector<OpFoldResult>> tileOffsetsList,
198                  ArrayRef<SmallVector<OpFoldResult>> tileSizesList,
199                  MutableArrayRef<scf::ForOp> loops) {
200   NewYieldValuesFn yieldValueFn =
201       [&](OpBuilder &b, Location loc,
202           ArrayRef<BlockArgument> newBBArgs) -> SmallVector<Value> {
203     SmallVector<Value> inserts;
204     for (const auto &yieldedValue : llvm::enumerate(yieldedValues)) {
205       ArrayRef<OpFoldResult> tileOffsets =
206           tileOffsetsList[yieldedValue.index()];
207       ArrayRef<OpFoldResult> tileSizes = tileSizesList[yieldedValue.index()];
208       SmallVector<OpFoldResult> tileStrides(tileOffsets.size(),
209                                             b.getIndexAttr(1));
210       Value insert = b.create<tensor::InsertSliceOp>(
211           loc, yieldedValue.value(), newBBArgs[yieldedValue.index()],
212           tileOffsets, tileSizes, tileStrides);
213       inserts.push_back(insert);
214     }
215     return inserts;
216   };
217 
218   SmallVector<scf::ForOp> newLoops =
219       replaceLoopNestWithNewYields(rewriter, loops, initValues, yieldValueFn,
220                                    /*replaceIterOperandsUsesInLoop =*/false);
221   for (const auto &loop : llvm::enumerate(loops)) {
222     loops[loop.index()] = newLoops[loop.index()];
223   }
224   return llvm::to_vector(llvm::map_range(
225       loops.front().getResults().take_back(yieldedValues.size()),
226       [](OpResult r) -> Value { return r; }));
227 }
228 
229 /// If the tiled operation is destination passing style, update the
230 /// slice of the destination used (which refers to the untiled destination)
231 /// to use the corresponding region argument of the innermost loop.
232 ///
233 /// ```mlir
234 /// %0 =
235 /// scf.for %iv0 = ... iter_args(%arg = %0) {
236 ///   %1 = tensor.extract_slice %0
237 ///   %2 = tiled_op
238 ///   %3 = tensor.insert_slice %2 into %arg
239 ///   scf.yield %3
240 /// }
241 /// ```
242 ///
243 /// is transformed to
244 ///
245 /// ```mlir
246 /// scf.for %iv0 = ... iter_args(%arg = %0) {
247 ///   %1 = tensor.extract_slice %arg
248 ///   %2 = tiled_op
249 ///   %3 = tensor.insert_slice %2 into %arg
250 ///   scf.yield %3
251 /// }
252 /// ```
253 static void
254 updateDestinationOperandsForTiledOp(OpBuilder &builder,
255                                     ValueRange tiledOpDestinationValues,
256                                     ValueRange bbArgsList) {
257   for (const auto &destValue : llvm::enumerate(tiledOpDestinationValues)) {
258     auto sliceOp = destValue.value().getDefiningOp<tensor::ExtractSliceOp>();
259     if (!sliceOp)
260       continue;
261     sliceOp.setOperand(0, bbArgsList[destValue.index()]);
262   }
263 }
264 
265 /// Helper method to yield the values of the tiled op, as well as
266 /// update the destination operands of the tiled op, if it is
267 /// a destination passing style op.
268 static SmallVector<Value>
269 yieldTiledValues(RewriterBase &rewriter, ArrayRef<Value> initValues,
270                  TilingResult tilingResult,
271                  ArrayRef<SmallVector<OpFoldResult>> tileOffsetsList,
272                  ArrayRef<SmallVector<OpFoldResult>> tileSizesList,
273                  MutableArrayRef<scf::ForOp> loops) {
274   SmallVector<Value> replacements =
275       yieldTiledValues(rewriter, initValues, tilingResult.tiledValues,
276                        tileOffsetsList, tileSizesList, loops);
277   for (auto tiledOp : tilingResult.tiledOps) {
278     if (auto dstOp = dyn_cast<DestinationStyleOpInterface>(tiledOp)) {
279       auto innerMostLoop = loops.back();
280       SmallVector<Value> tiledOpDestinationTensors =
281           llvm::to_vector(dstOp.getDpsInits());
282       updateDestinationOperandsForTiledOp(rewriter, tiledOpDestinationTensors,
283                                           innerMostLoop.getRegionIterArgs());
284     }
285   }
286   return replacements;
287 }
288 
289 /// Implementation of tiling transformation of `op` that implements the
290 /// `TilingInterface` using `scf.for` to iterate over the tiles.
291 FailureOr<scf::SCFTilingResult>
292 mlir::scf::tileUsingSCFForOp(RewriterBase &rewriter, TilingInterface op,
293                              const scf::SCFTilingOptions &options) {
294   OpBuilder::InsertionGuard guard(rewriter);
295   rewriter.setInsertionPointAfter(op);
296 
297   if (!options.tileSizeComputationFunction) {
298     return rewriter.notifyMatchFailure(
299         op, "missing tile size computation function");
300   }
301 
302   // 1. Get the range of the loops that are represented by the operation.
303   SmallVector<Range> iterationDomain = op.getIterationDomain(rewriter);
304   size_t numLoops = iterationDomain.size();
305   if (numLoops == 0) {
306     return rewriter.notifyMatchFailure(
307         op, "unable to tile op with no iteration domain");
308   }
309 
310   // 2. Materialize the tile sizes. Enforce the convention that "tiling by zero"
311   // skips tiling a particular dimension. This convention is significantly
312   // simpler to handle instead of adjusting affine maps to account for missing
313   // dimensions.
314   SmallVector<OpFoldResult> tileSizeVector =
315       options.tileSizeComputationFunction(rewriter, op);
316   if (tileSizeVector.size() < iterationDomain.size()) {
317     auto zero = rewriter.getIndexAttr(0);
318     tileSizeVector.append(numLoops - tileSizeVector.size(), zero);
319   }
320 
321   SmallVector<OpFoldResult> offsets, sizes;
322   SmallVector<scf::ForOp> forLoops;
323   {
324     // If there is an interchange specified, permute the iteration domain and
325     // the tile sizes.
326     SmallVector<int64_t> interchangeVector;
327     if (!options.interchangeVector.empty()) {
328       interchangeVector = fillInterchangeVector(options.interchangeVector,
329                                                 iterationDomain.size());
330     }
331     if (!interchangeVector.empty()) {
332       if (!isPermutationVector(interchangeVector)) {
333         return rewriter.notifyMatchFailure(
334             op, "invalid intechange vector, not a permutation of the entire "
335                 "iteration space");
336       }
337 
338       applyPermutationToVector(iterationDomain, interchangeVector);
339       applyPermutationToVector(tileSizeVector, interchangeVector);
340     }
341 
342     // 3. Materialize an empty loop nest that iterates over the tiles. These
343     // loops for now do not return any values even if the original operation has
344     // results.
345     forLoops = generateTileLoopNest(rewriter, op.getLoc(), iterationDomain,
346                                     tileSizeVector, offsets, sizes);
347 
348     if (!interchangeVector.empty()) {
349       auto inversePermutation = invertPermutationVector(interchangeVector);
350       applyPermutationToVector(offsets, inversePermutation);
351       applyPermutationToVector(sizes, inversePermutation);
352     }
353   }
354 
355   LLVM_DEBUG({
356     if (!forLoops.empty()) {
357       llvm::dbgs() << "LoopNest shell :\n";
358       forLoops.front().dump();
359       llvm::dbgs() << "\n";
360     }
361   });
362 
363   // 4. Generate the tiled implementation within the inner most loop.
364   if (!forLoops.empty())
365     rewriter.setInsertionPoint(forLoops.back().getBody()->getTerminator());
366   FailureOr<TilingResult> tiledImplementation =
367       op.getTiledImplementation(rewriter, offsets, sizes);
368 
369   if (op->getNumResults() == 0) {
370     return scf::SCFTilingResult{
371         tiledImplementation->tiledOps, getAsOperations(forLoops), {}};
372   }
373 
374   // If loops are empty, the tiled op is used as the replacement for the untiled
375   // op.
376   if (forLoops.empty()) {
377     return scf::SCFTilingResult{tiledImplementation->tiledOps,
378                                 getAsOperations(forLoops),
379                                 tiledImplementation->tiledValues};
380   }
381 
382   // 5. Yield all the results of the tiled operation. The surrounding loop
383   //    nest is modified to insert a destructive update pattern to yield
384   //    from the loop nest values to replace the untiled op with.
385   int64_t numResults = op->getNumResults();
386   SmallVector<SmallVector<OpFoldResult>> resultOffsetsList(numResults),
387       resultSizesList(numResults);
388   for (const auto &result : llvm::enumerate(op->getResults())) {
389     if (failed(op.getResultTilePosition(rewriter, result.index(), offsets,
390                                         sizes,
391                                         resultOffsetsList[result.index()],
392                                         resultSizesList[result.index()]))) {
393       return rewriter.notifyMatchFailure(
394           op, "failed to get slice of result produced");
395     }
396   }
397 
398   SmallVector<Value> destinationTensors;
399   if (failed(tensor::getOrCreateDestinations(rewriter, op.getLoc(), op,
400                                              destinationTensors)))
401     return rewriter.notifyMatchFailure(op, "failed to get destinations");
402 
403   SmallVector<Value> replacements = yieldTiledValues(
404       rewriter, destinationTensors, tiledImplementation.value(),
405       resultOffsetsList, resultSizesList, forLoops);
406   LLVM_DEBUG({
407     if (!forLoops.empty()) {
408       llvm::dbgs() << "After tiled implementation :\n";
409       forLoops.front().dump();
410       llvm::dbgs() << "\n";
411     }
412   });
413   return scf::SCFTilingResult{tiledImplementation->tiledOps,
414                               getAsOperations(forLoops), replacements};
415 }
416 
417 FailureOr<scf::SCFReductionTilingResult>
418 mlir::scf::tileReductionUsingScf(RewriterBase &b,
419                                  PartialReductionOpInterface op,
420                                  ArrayRef<OpFoldResult> tileSizes) {
421   Location loc = op.getLoc();
422   // Ops implementing PartialReductionOpInterface are expected to implement
423   // TilingInterface.
424   auto tilingInterfaceOp = cast<TilingInterface>(op.getOperation());
425   SmallVector<Range> iterationDomain = tilingInterfaceOp.getIterationDomain(b);
426   auto tileSizesVector = llvm::to_vector(tileSizes);
427   if (tileSizesVector.size() < iterationDomain.size()) {
428     auto zero = b.getIndexAttr(0);
429     tileSizesVector.append(iterationDomain.size() - tileSizesVector.size(),
430                            zero);
431   }
432   if (op->getNumResults() != 1)
433     return b.notifyMatchFailure(
434         op, "don't support ops with multiple results for now");
435   SmallVector<utils::IteratorType> iterators =
436       tilingInterfaceOp.getLoopIteratorTypes();
437 
438   SmallVector<int> reductionDims;
439   for (auto [idx, iteratorType] :
440        llvm::enumerate(tilingInterfaceOp.getLoopIteratorTypes())) {
441     if (iteratorType == utils::IteratorType::reduction)
442       reductionDims.push_back(idx);
443   }
444 
445   // 1. create the inital tensor value.
446   FailureOr<Operation *> identityTensor =
447       op.generateInitialTensorForPartialReduction(b, loc, tileSizesVector,
448                                                   reductionDims);
449   if (failed(identityTensor))
450     return b.notifyMatchFailure(op,
451                                 "cannot create a tensor of identity value.");
452   // 2. Create the nested loops.
453   SmallVector<OpFoldResult> offsets, sizes;
454   SmallVector<scf::ForOp> loops = generateTileLoopNest(
455       b, loc, iterationDomain, tileSizesVector, offsets, sizes);
456 
457   // 3. Generate the tiled implementation within the inner most loop.
458   b.setInsertionPoint(loops.back().getBody()->getTerminator());
459   Operation *parallelOp = op.tileToPartialReduction(
460       b, loc, (*identityTensor)->getResults(), offsets, sizes, reductionDims);
461 
462   SmallVector<OpFoldResult> resultSizesList;
463   for (size_t i = 0; i < offsets.size(); i++)
464     resultSizesList.push_back(
465         tensor::getMixedSize(b, loc, parallelOp->getResult(0), i));
466   SmallVector<OpFoldResult> outOffsets(offsets.size(), b.getIndexAttr(0));
467   SmallVector<Value> replacements = yieldTiledValues(
468       b, (*identityTensor)->getResults(), parallelOp->getResults(), outOffsets,
469       resultSizesList, loops);
470 
471   auto dstOp = cast<DestinationStyleOpInterface>(parallelOp);
472   auto innerMostLoop = loops.back();
473   SmallVector<Value> destinationTensors = llvm::to_vector(dstOp.getDpsInits());
474   assert(destinationTensors.size() ==
475              innerMostLoop.getRegionIterArgs().size() &&
476          "unexpected number of outputs");
477   updateDestinationOperandsForTiledOp(b, destinationTensors,
478                                       innerMostLoop.getRegionIterArgs());
479 
480   // 4. Apply the merge reduction to combine all the partial values.
481   b.setInsertionPointAfter(*loops.begin());
482   Operation *mergeOp = op.mergeReductions(b, loc, replacements, reductionDims);
483   b.replaceOp(op, mergeOp->getResults());
484 
485   SCFReductionTilingResult results;
486   results.initialOp = *identityTensor;
487   results.loops = std::move(loops);
488   results.parallelTiledOp = parallelOp;
489   results.mergeOp = mergeOp;
490   return results;
491 }
492 
493 //===----------------------------------------------------------------------===//
494 // tileConsumerAndFuseProducerGreedilyUsingSCFForOp implementation.
495 //===----------------------------------------------------------------------===//
496 
497 /// Return the untiled producer whose slice is used in a tiled consumer. The
498 /// method traverses the tile loop nest (`loops`) if needed, and returns the
499 /// `iter_args` of the outer most that is encountered. Traversing the iter_args
500 /// indicates that this is a destination operand of the consumer. If there was
501 /// no loop traversal needed, the second value of the returned tuple is empty.
502 static std::tuple<OpResult, std::optional<OpOperand *>>
503 getUntiledProducerFromSliceSource(OpOperand *source,
504                                   ArrayRef<scf::ForOp> loops) {
505   std::optional<OpOperand *> destinationIterArg;
506   auto loopIt = loops.rbegin();
507   while (auto iterArg = dyn_cast<BlockArgument>(source->get())) {
508     scf::ForOp loop = *loopIt;
509     if (iterArg.getOwner()->getParentOp() != loop)
510       break;
511     source = &loop.getOpOperandForRegionIterArg(iterArg);
512     loopIt++;
513   }
514   if (loopIt == loops.rend())
515     destinationIterArg = source;
516   return {dyn_cast<OpResult>(source->get()), destinationIterArg};
517 }
518 
519 /// Implementation of fusing producer of a single slice by computing the
520 /// slice of the producer in-place.
521 std::optional<scf::SCFFuseProducerOfSliceResult>
522 mlir::scf::tileAndFuseProducerOfSlice(RewriterBase &rewriter,
523                                       tensor::ExtractSliceOp candidateSliceOp,
524                                       MutableArrayRef<scf::ForOp> loops) {
525   // 1. Get the producer of the source (potentially walking through
526   // `iter_args` of nested `scf.for`)
527   auto [fusableProducer, destinationInitArg] =
528       getUntiledProducerFromSliceSource(&candidateSliceOp.getSourceMutable()[0],
529                                         loops);
530   if (!fusableProducer)
531     return std::nullopt;
532 
533   // 2. Generate the tiled implementation of the producer of the source
534   OpBuilder::InsertionGuard g(rewriter);
535   rewriter.setInsertionPoint(candidateSliceOp);
536   FailureOr<TilingResult> tileAndFuseResult =
537       tensor::replaceExtractSliceWithTiledProducer(rewriter, candidateSliceOp,
538                                                    fusableProducer);
539   if (failed(tileAndFuseResult))
540     return std::nullopt;
541   rewriter.replaceAllUsesWith(candidateSliceOp,
542                               tileAndFuseResult->tiledValues[0]);
543 
544   // 3. If the slice is for a destination operand, for example,
545   //
546   // ```mlir
547   // %0 = linalg.init
548   // %1 = linalg.fill .. outs(%0 : )
549   // %2 = scf.for .. iter_args(%arg0 = %1) {
550   //   %3 = scf.for .. iter_args(%arg1 = %arg0) {
551   //     %4 = tensor.extract_slice %arg1 [..]
552   //     .. = linalg.matmul .. outs(%4 : )
553   //   }
554   // }
555   // ```
556   //
557   // the IR is currently
558   //
559   // ```
560   // %0 = linalg.init
561   // %1 = linalg.fill
562   // %2 = scf.for .. iter_args(%arg0 = %1 /* incorrect value */ ) {
563   //   %3 = scf.for .. iter_args(%arg1 = %arg0) {
564   //     %4 = tensor.extract_slice %0 /*incorrect value */ [..]
565   //     %5 = linalg.fill .. outs(%4 : )
566   //     .. = linalg.matmul .. outs(%5 : )
567   //   }
568   // }
569   // ```
570   //
571   // The untiled `linalg.fill` is still used as the `init_value` since it
572   // was originally a destination operand of the untiled `linalg.matmul`.
573   // When fusing an operand that is a destination operand.
574   //   - Update the iter_arg of the outer most loop to use the destination
575   //     of the untiled producer.
576   //   - Update the destination of the slice of the tiled producer generated
577   //     to use the same basic block argument as the slice that was used to
578   //     generate inplace the tiled implementation of the producer.
579   // With this the IR will be.
580   //
581   // ```
582   // %0 = linalg.init
583   // %1 = scf.for .. iter_args(%arg0 = %0 /* corrected value */ ) {
584   //   %2 = scf.for .. iter_args(%arg1 = %arg0) {
585   //     %3 = tensor.extract_slice %arg1 /* corrected value */ [..]
586   //     %4 = linalg.fill .. outs(%3 : )
587   //     .. = linalg.matmul .. outs(%4 : )
588   //   }
589   // }
590   // ```
591   // TODO: This can be modeled better if the `DestinationStyleOpInterface`.
592   // Update to use that when it does become available.
593   scf::ForOp outerMostLoop = loops.front();
594   if (destinationInitArg &&
595       (*destinationInitArg)->getOwner() == outerMostLoop) {
596     unsigned iterArgNumber =
597         outerMostLoop.getResultForOpOperand(**destinationInitArg)
598             .getResultNumber();
599     int64_t resultNumber = fusableProducer.getResultNumber();
600     if (auto dstOp =
601             dyn_cast<DestinationStyleOpInterface>(fusableProducer.getOwner())) {
602       (*destinationInitArg)
603           ->set(dstOp.getTiedOpOperand(fusableProducer)->get());
604     }
605     for (auto tileAndFusedOp : tileAndFuseResult->tiledOps) {
606       auto dstOp = dyn_cast<DestinationStyleOpInterface>(tileAndFusedOp);
607       if (!dstOp)
608         continue;
609       scf::ForOp innerMostLoop = loops.back();
610       updateDestinationOperandsForTiledOp(
611           rewriter, dstOp.getDpsInitOperand(resultNumber)->get(),
612           innerMostLoop.getRegionIterArgs()[iterArgNumber]);
613     }
614   }
615   return scf::SCFFuseProducerOfSliceResult{fusableProducer,
616                                            tileAndFuseResult->tiledValues[0],
617                                            tileAndFuseResult->tiledOps};
618 }
619 
620 /// Reconstruct the fused producer from within the tiled-and-fused code.
621 void mlir::scf::yieldReplacementForFusedProducer(
622     RewriterBase &rewriter, tensor::ExtractSliceOp sliceOp,
623     scf::SCFFuseProducerOfSliceResult fusedProducerInfo,
624     MutableArrayRef<scf::ForOp> loops) {
625   auto [fusableProducer, fusedProducerValue, tileAndFusedOps] =
626       fusedProducerInfo;
627   SmallVector<Value> initValues;
628   FailureOr<Value> initValue = tensor::getOrCreateDestination(
629       rewriter, fusableProducer.getOwner()->getLoc(), fusableProducer);
630   if (succeeded(initValue)) {
631     SmallVector<OpFoldResult> resultOffsets = sliceOp.getMixedOffsets();
632     SmallVector<OpFoldResult> resultSizes = sliceOp.getMixedSizes();
633     SmallVector<Value> yieldedVals =
634         yieldTiledValues(rewriter, initValue.value(), fusedProducerValue,
635                          resultOffsets, resultSizes, loops);
636   }
637   for (auto tileAndFusedOp : tileAndFusedOps) {
638     auto dstStyleProducer =
639         dyn_cast<DestinationStyleOpInterface>(tileAndFusedOp);
640     if (!dstStyleProducer)
641       continue;
642     Value dstValue =
643         dstStyleProducer.getDpsInitOperand(fusableProducer.getResultNumber())
644             ->get();
645     updateDestinationOperandsForTiledOp(
646         rewriter, dstValue, loops.back().getRegionIterArgs().back());
647   }
648 }
649 
650 /// Implementation of tile consumer and fuse producer greedily.
651 FailureOr<scf::SCFTileAndFuseResult>
652 mlir::scf::tileConsumerAndFuseProducerGreedilyUsingSCFForOp(
653     RewriterBase &rewriter, TilingInterface consumer,
654     const scf::SCFTileAndFuseOptions &options) {
655   // This transformation is only valid for ops that return values (i.e. not
656   // valid to use with operations that have memref operands).
657   if (!consumer->getNumResults()) {
658     return rewriter.notifyMatchFailure(
659         consumer, "invalid pattern for op with no results");
660   }
661 
662   // 1. First tile the consumer.
663   SmallVector<scf::ForOp> forLoops;
664   SetVector<Operation *> fusedProducers, tiledAndFusedOps;
665   DenseMap<Value, Value> replacements;
666   llvm::SmallDenseMap<Value, int64_t> yieldedValueToResultNumber;
667   {
668     FailureOr<scf::SCFTilingResult> tilingResult =
669         tileUsingSCFForOp(rewriter, consumer, options.tilingOptions);
670     if (failed(tilingResult))
671       return rewriter.notifyMatchFailure(consumer, "failed to tile consumer");
672     for (auto *tiledOp : tilingResult->tiledOps)
673       tiledAndFusedOps.insert(tiledOp);
674     forLoops = castToTypedOperations<scf::ForOp>(tilingResult->loops);
675     for (auto [index, origValue, replacement] :
676          llvm::enumerate(consumer->getResults(), tilingResult->replacements)) {
677       replacements[origValue] = replacement;
678       yieldedValueToResultNumber[tilingResult->tiledOps.back()->getResult(
679           index)] = index;
680     }
681   }
682 
683   // If there are no loops generated, fusion is immaterial.
684   if (forLoops.empty()) {
685     return scf::SCFTileAndFuseResult{fusedProducers, tiledAndFusedOps,
686                                      getAsOperations(forLoops), replacements};
687   }
688 
689   // 2. Typically, the operands of the tiled operation are slices of the
690   //    operands of the untiled operation. These are expressed in IR using
691   //    `tensor.extract_slice` operations with source being the operands of the
692   //    untiled operation. Create a worklist of these `tensor.extract_slice`
693   //    operations. If the producers of the source of the `tensor.extract_slice`
694   //    can be tiled such that the tiled value is generated in-place, that
695   //    effectively tiles + fuses the operations.
696   auto addCandidateSlices = [](Operation *fusedOp,
697                                std::deque<tensor::ExtractSliceOp> &candidates) {
698     for (Value operand : fusedOp->getOperands())
699       if (auto sliceOp = operand.getDefiningOp<tensor::ExtractSliceOp>())
700         candidates.push_back(sliceOp);
701   };
702 
703   std::deque<tensor::ExtractSliceOp> candidates;
704   addCandidateSlices(tiledAndFusedOps.back(), candidates);
705   OpBuilder::InsertionGuard g(rewriter);
706   while (!candidates.empty()) {
707     // Traverse the slices in BFS fashion.
708     tensor::ExtractSliceOp candidateSliceOp = candidates.front();
709     candidates.pop_front();
710 
711     // The operands of the fused producer might themselved be slices of
712     // values produced by operations that implement the `TilingInterface`.
713     // Add these operations to the worklist.
714     std::optional<scf::SCFFuseProducerOfSliceResult> fusedResult =
715         tileAndFuseProducerOfSlice(rewriter, candidateSliceOp, forLoops);
716     if (!fusedResult)
717       continue;
718 
719     if (Operation *tiledAndFusedOp =
720             fusedResult->tiledAndFusedProducer.getDefiningOp()) {
721       fusedProducers.insert(fusedResult->origProducer.getDefiningOp());
722       tiledAndFusedOps.insert(tiledAndFusedOp);
723       addCandidateSlices(tiledAndFusedOp, candidates);
724     }
725   }
726   return scf::SCFTileAndFuseResult{fusedProducers, tiledAndFusedOps,
727                                    getAsOperations(forLoops), replacements};
728 }
729 
730 //===----------------------------------------------------------------------===//
731 // lowerToLoopsUsingSCFForOp implementation.
732 //===----------------------------------------------------------------------===//
733 
734 FailureOr<SmallVector<scf::ForOp>>
735 mlir::scf::lowerToLoopsUsingSCFForOp(RewriterBase &rewriter,
736                                      TilingInterface op) {
737   // TODO: Handle cases where the op has results if needed.
738   if (op->getNumResults() > 0) {
739     return rewriter.notifyMatchFailure(
740         op, "unable to lower to loops operations with return values");
741   }
742 
743   SmallVector<Range> domain = op.getIterationDomain(rewriter);
744   SmallVector<Value> ivs;
745   SmallVector<scf::ForOp> loops;
746   Location loc = op.getLoc();
747   for (auto loopRange : domain) {
748     Value offsetVal =
749         getValueOrCreateConstantIndexOp(rewriter, loc, loopRange.offset);
750     Value sizeVal =
751         getValueOrCreateConstantIndexOp(rewriter, loc, loopRange.size);
752     Value strideVal =
753         getValueOrCreateConstantIndexOp(rewriter, loc, loopRange.stride);
754     auto loop = rewriter.create<scf::ForOp>(op.getLoc(), offsetVal, sizeVal,
755                                             strideVal, ValueRange{});
756     loops.push_back(loop);
757     ivs.push_back(loop.getInductionVar());
758     rewriter.setInsertionPoint(loop.getBody()->getTerminator());
759   }
760   if (failed(op.generateScalarImplementation(rewriter, op.getLoc(), ivs))) {
761     return failure();
762   }
763   return loops;
764 }
765