xref: /llvm-project/mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp (revision 4a020018ce7abdee21e976f7ed5746ef2eb2c0fd)
1 //===- Tiling.cpp - Implementation of tiling using TilingInterface -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the tiling using TilingInterface.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "mlir/Dialect/SCF/Transforms/TileUsingInterface.h"
14 
15 #include "mlir/Dialect/Affine/IR/AffineOps.h"
16 #include "mlir/Dialect/Arith/IR/Arith.h"
17 #include "mlir/Dialect/Arith/Utils/Utils.h"
18 #include "mlir/Dialect/Func/IR/FuncOps.h"
19 #include "mlir/Dialect/SCF/Utils/Utils.h"
20 #include "mlir/Dialect/Tensor/IR/Tensor.h"
21 #include "mlir/Dialect/Utils/IndexingUtils.h"
22 #include "mlir/IR/Matchers.h"
23 #include "mlir/IR/PatternMatch.h"
24 #include "mlir/Interfaces/DestinationStyleOpInterface.h"
25 #include "mlir/Interfaces/TilingInterface.h"
26 #include "llvm/Support/Debug.h"
27 #include <optional>
28 
29 #define DEBUG_TYPE "tile-using-interface"
30 
31 using namespace mlir;
32 
33 scf::SCFTilingOptions &
34 scf::SCFTilingOptions::setTileSizes(ArrayRef<OpFoldResult> ts) {
35   assert(!tileSizeComputationFunction && "tile sizes already set");
36   auto tileSizes = llvm::to_vector(ts);
37   tileSizeComputationFunction = [tileSizes](OpBuilder &b, Operation *op) {
38     return tileSizes;
39   };
40   return *this;
41 }
42 
43 /// Helper method to adjust the interchange vector to match the iteration
44 /// domain.
45 static SmallVector<int64_t>
46 fillInterchangeVector(ArrayRef<int64_t> interchangeVector,
47                       size_t iterationDomainSize) {
48   SmallVector<int64_t> filledVector = llvm::to_vector(interchangeVector);
49   if (filledVector.size() < iterationDomainSize) {
50     auto range = llvm::seq<int64_t>(filledVector.size(), iterationDomainSize);
51     filledVector.append(range.begin(), range.end());
52   }
53   if (filledVector.size() > iterationDomainSize)
54     filledVector.resize(iterationDomainSize);
55   return filledVector;
56 }
57 
58 /// Convert a list of ops of type `SrcOpTy` to list of `Operation *`.
59 template <typename SrcOpTy>
60 static SmallVector<Operation *> getAsOperations(ArrayRef<SrcOpTy> ops) {
61   return llvm::to_vector(
62       llvm::map_range(ops, [](auto op) -> Operation * { return op; }));
63 }
64 template <typename SrcOpTy>
65 static SmallVector<Operation *>
66 getAsOperations(const SmallVector<SrcOpTy> &ops) {
67   return getAsOperations(ArrayRef<SrcOpTy>(ops));
68 }
69 
70 /// Convert a list of `Operation *` to a list of `DstOpTy.
71 template <typename DstOpTy>
72 static SmallVector<DstOpTy> castToTypedOperations(ArrayRef<Operation *> ops) {
73   return llvm::to_vector(
74       llvm::map_range(ops, [](Operation *op) { return cast<DstOpTy>(op); }));
75 }
76 template <typename DstOpTy>
77 static SmallVector<DstOpTy>
78 castToTypedOperations(const SmallVector<Operation *> &ops) {
79   return castToTypedOperations<DstOpTy>(ArrayRef<Operation *>(ops));
80 }
81 
82 //===----------------------------------------------------------------------===//
83 // tileUsingSCFForOp implementation.
84 //===----------------------------------------------------------------------===//
85 
86 // Check if `stride` evenly divides the trip count `size - offset`.
87 static bool tileDividesIterationDomain(Range loopRange) {
88   std::optional<int64_t> offsetAsInt = getConstantIntValue(loopRange.offset);
89   if (!offsetAsInt)
90     return false;
91   std::optional<int64_t> sizeAsInt = getConstantIntValue(loopRange.size);
92   if (!sizeAsInt)
93     return false;
94   std::optional<int64_t> strideAsInt = getConstantIntValue(loopRange.stride);
95   if (!strideAsInt)
96     return false;
97   return ((sizeAsInt.value() - offsetAsInt.value()) % strideAsInt.value() == 0);
98 }
99 
100 /// Returns the bounded tile size given the current `iv`, `loopRange` and
101 /// `tileSize`, i.e., `min(tileSize, range.end() - iv)`.
102 static OpFoldResult getBoundedTileSize(OpBuilder &b, Location loc,
103                                        Range loopRange, Value iv,
104                                        OpFoldResult tileSize) {
105   std::optional<int64_t> ts = getConstantIntValue(tileSize);
106   if (ts && ts.value() == 1)
107     return tileSize;
108 
109   if (tileDividesIterationDomain(
110           Range{loopRange.offset, loopRange.size, tileSize}))
111     return tileSize;
112 
113   // The tile size to use (to avoid out of bounds access) is  minimum of
114   // `tileSize` and `ub - iv`, where `iv` is the induction variable of the tiled
115   // loop.
116   AffineExpr s0, s1, d0;
117   bindDims(b.getContext(), d0);
118   bindSymbols(b.getContext(), s0, s1);
119   AffineMap minMap = AffineMap::get(1, 2, {s0, s1 - d0}, b.getContext());
120   Value size = getValueOrCreateConstantIndexOp(b, loc, loopRange.size);
121   return affine::makeComposedFoldedAffineMin(
122       b, loc, minMap, SmallVector<OpFoldResult>{iv, tileSize, size});
123 }
124 
125 /// Clones the operation and updates the destination if the operation
126 /// implements the `DestinationStyleOpInterface`.
127 static Operation *cloneOpAndUpdateDestinationArgs(RewriterBase &rewriter,
128                                                   Operation *op,
129                                                   ValueRange newDestArgs) {
130   Operation *clonedOp = rewriter.clone(*op);
131   if (newDestArgs.empty())
132     return clonedOp;
133   if (auto destinationStyleOp = dyn_cast<DestinationStyleOpInterface>(clonedOp))
134     destinationStyleOp.getDpsInitsMutable().assign(newDestArgs);
135   return clonedOp;
136 }
137 
138 /// Generate an empty loop nest that represents the tiled loop nest shell.
139 /// - `loopRanges` specifies the lb, ub and step of the untiled iteration space.
140 /// - `tileSizes` is the tile sizes to use. Zero represent untiled loops.
141 /// - In `offsets` and `sizes` return the multi-dimensional offset and size of
142 ///   the tile processed within the inner most loop.
143 /// Note that this methods adds `scf.yield` operation for all but the innermost
144 /// loop. These yield the value returned by the immediately inner loop. The
145 /// caller is expected to add the scf.yield operation for the innermost loop.
146 static SmallVector<scf::ForOp> generateTileLoopNest(
147     OpBuilder &builder, Location loc, ArrayRef<Range> loopRanges,
148     ArrayRef<OpFoldResult> tileSizes, SmallVector<OpFoldResult> &offsets,
149     SmallVector<OpFoldResult> &sizes, ValueRange destinationTensors = {}) {
150   if (loopRanges.empty())
151     return {};
152   assert(loopRanges.size() == tileSizes.size() &&
153          "expected as many tile sizes as loop ranges");
154   OpBuilder::InsertionGuard guard(builder);
155   SmallVector<scf::ForOp> loops;
156   offsets.resize(loopRanges.size());
157   sizes.resize(loopRanges.size());
158 
159   for (auto loopRange : llvm::enumerate(loopRanges)) {
160     Value offset =
161         getValueOrCreateConstantIndexOp(builder, loc, loopRange.value().offset);
162     Value size =
163         getValueOrCreateConstantIndexOp(builder, loc, loopRange.value().size);
164     Value tileSize = getValueOrCreateConstantIndexOp(
165         builder, loc, tileSizes[loopRange.index()]);
166     // No loops if tile size is zero. Set offset and size to the loop
167     // offset and size.
168     if (matchPattern(tileSize, m_Zero())) {
169       offsets[loopRange.index()] = offset;
170       sizes[loopRange.index()] = size;
171       continue;
172     }
173 
174     auto loop = builder.create<scf::ForOp>(
175         loc, offset, size, tileSize, destinationTensors,
176         [&](OpBuilder &bodyBuilder, Location bodyLoc, Value iv,
177             ValueRange /*iterArgs*/) {
178           sizes[loopRange.index()] =
179               getBoundedTileSize(bodyBuilder, bodyLoc, loopRange.value(), iv,
180                                  getAsOpFoldResult(tileSize));
181         });
182     offsets[loopRange.index()] = loop.getInductionVar();
183     loops.push_back(loop);
184     builder.setInsertionPointToEnd(loop.getBody());
185     destinationTensors = loop.getRegionIterArgs();
186   }
187 
188   // Add the scf.yield operations for all the outer loops.
189   if (!loops.empty()) {
190     for (auto [outerLoop, innerLoop] :
191          llvm::zip_equal(MutableArrayRef(loops).drop_back(),
192                          MutableArrayRef(loops).drop_front())) {
193       builder.setInsertionPointToEnd(outerLoop.getBody());
194       builder.create<scf::YieldOp>(outerLoop.getLoc(), innerLoop.getResults());
195     }
196   }
197   return loops;
198 }
199 
200 /// Method to add new init values to a loop nest. Updates `loops` in-place with
201 /// new loops that use the `newInitValues`.
202 /// The outer-loops are updated to yield the new result values of the inner
203 /// loop. For the innermost loop, the call back `getNewYields` is invoked to get
204 /// the additional values to yield form the innermost loop.
205 static void addInitOperandsToLoopNest(
206     RewriterBase &rewriter, MutableArrayRef<scf::ForOp> loops,
207     ValueRange newInitValues,
208     llvm::function_ref<SmallVector<Value>(RewriterBase &rewriter, Value iv,
209                                           ValueRange newRegionIterArgs)>
210         getNewYieldValsFn) {
211   SmallVector<scf::ForOp> newLoops;
212   if (loops.empty())
213     return;
214   OpBuilder::InsertionGuard g(rewriter);
215   rewriter.setInsertionPoint(loops.front());
216   for (auto &loop : loops) {
217     rewriter.setInsertionPoint(loop);
218 
219     // Create a new loop with the new init values for this loop.
220     SmallVector<Value> newInits = llvm::to_vector(loop.getInitArgs());
221     newInits.append(newInitValues.begin(), newInitValues.end());
222     auto newLoop = rewriter.create<scf::ForOp>(
223         loop.getLoc(), loop.getLowerBound(), loop.getUpperBound(),
224         loop.getStep(), newInits,
225         [&](OpBuilder &b, Location loc, Value iv, ValueRange iterArgs) {});
226 
227     // Merge the body of the new loop with the body of the old loops.
228     SmallVector<Value> sourceBlockArgs;
229     sourceBlockArgs.push_back(newLoop.getInductionVar());
230     auto newRegionIterArgs = newLoop.getRegionIterArgs();
231     sourceBlockArgs.append(
232         newRegionIterArgs.begin(),
233         std::next(newRegionIterArgs.begin(), loop.getNumResults()));
234     rewriter.mergeBlocks(loop.getBody(), newLoop.getBody(), sourceBlockArgs);
235     rewriter.replaceOp(loop,
236                        newLoop.getResults().take_front(loop.getNumResults()));
237     loop = newLoop;
238     newInitValues = newLoop.getRegionIterArgs().take_back(newInitValues.size());
239   }
240 
241   // Update the loop body of the innermost loop to get new yield values.
242   scf::ForOp innerMostLoop = loops.back();
243   auto innerMostYieldOp =
244       cast<scf::YieldOp>(innerMostLoop.getBody()->getTerminator());
245   rewriter.setInsertionPoint(innerMostYieldOp);
246   SmallVector<Value> newYieldVals =
247       getNewYieldValsFn(rewriter, innerMostLoop.getInductionVar(),
248                         innerMostLoop.getRegionIterArgs());
249   SmallVector<Value> newYieldOperands =
250       llvm::to_vector(innerMostYieldOp->getOperands());
251   newYieldOperands.append(newYieldVals);
252   rewriter.replaceOpWithNewOp<scf::YieldOp>(innerMostYieldOp, newYieldOperands);
253 
254   // Make all other loops except the innermost loops yield the values returned
255   // by the inner loop.
256   for (auto [outerLoop, innerLoop] :
257        llvm::zip_equal(loops.drop_back(), loops.drop_front())) {
258     auto outerLoopYield =
259         cast<scf::YieldOp>(outerLoop.getBody()->getTerminator());
260     SmallVector<Value> newYields =
261         llvm::to_vector(outerLoopYield.getOperands());
262     ValueRange additionalYields =
263         innerLoop.getResults().take_back(newInitValues.size());
264     newYields.append(additionalYields.begin(), additionalYields.end());
265     rewriter.setInsertionPoint(outerLoopYield);
266     rewriter.replaceOpWithNewOp<scf::YieldOp>(outerLoopYield, newYields);
267   }
268 }
269 
270 /// Implementation of tiling transformation of `op` that implements the
271 /// `TilingInterface` using `scf.for` to iterate over the tiles.
272 FailureOr<scf::SCFTilingResult>
273 mlir::scf::tileUsingSCFForOp(RewriterBase &rewriter, TilingInterface op,
274                              const scf::SCFTilingOptions &options) {
275   OpBuilder::InsertionGuard guard(rewriter);
276   rewriter.setInsertionPointAfter(op);
277 
278   if (!options.tileSizeComputationFunction) {
279     return rewriter.notifyMatchFailure(
280         op, "missing tile size computation function");
281   }
282 
283   // 1. Get the range of the loops that are represented by the operation.
284   SmallVector<Range> iterationDomain = op.getIterationDomain(rewriter);
285   size_t numLoops = iterationDomain.size();
286   if (numLoops == 0) {
287     return rewriter.notifyMatchFailure(
288         op, "unable to tile op with no iteration domain");
289   }
290   // 2. Materialize the tile sizes. Enforce the convention that "tiling by zero"
291   // skips tiling a particular dimension. This convention is significantly
292   // simpler to handle instead of adjusting affine maps to account for missing
293   // dimensions.
294   SmallVector<OpFoldResult> tileSizeVector =
295       options.tileSizeComputationFunction(rewriter, op);
296   if (tileSizeVector.size() < iterationDomain.size()) {
297     auto zero = rewriter.getIndexAttr(0);
298     tileSizeVector.append(numLoops - tileSizeVector.size(), zero);
299   }
300 
301   // 3. Find the destination tensors to use for the operation.
302   SmallVector<Value> destinationTensors;
303   if (failed(tensor::getOrCreateDestinations(rewriter, op.getLoc(), op,
304                                              destinationTensors))) {
305     return rewriter.notifyMatchFailure(op,
306                                        "unable to create destination tensors");
307   }
308 
309   SmallVector<OpFoldResult> offsets, sizes;
310   SmallVector<scf::ForOp> forLoops;
311   {
312     // If there is an interchange specified, permute the iteration domain and
313     // the tile sizes.
314     SmallVector<int64_t> interchangeVector;
315     if (!options.interchangeVector.empty()) {
316       interchangeVector = fillInterchangeVector(options.interchangeVector,
317                                                 iterationDomain.size());
318     }
319     if (!interchangeVector.empty()) {
320       if (!isPermutationVector(interchangeVector)) {
321         return rewriter.notifyMatchFailure(
322             op, "invalid intechange vector, not a permutation of the entire "
323                 "iteration space");
324       }
325 
326       applyPermutationToVector(iterationDomain, interchangeVector);
327       applyPermutationToVector(tileSizeVector, interchangeVector);
328     }
329 
330     // 4. Materialize an empty loop nest that iterates over the tiles. These
331     // loops for now do not return any values even if the original operation has
332     // results.
333     forLoops = generateTileLoopNest(rewriter, op.getLoc(), iterationDomain,
334                                     tileSizeVector, offsets, sizes,
335                                     destinationTensors);
336 
337     if (!interchangeVector.empty()) {
338       auto inversePermutation = invertPermutationVector(interchangeVector);
339       applyPermutationToVector(offsets, inversePermutation);
340       applyPermutationToVector(sizes, inversePermutation);
341     }
342   }
343 
344   LLVM_DEBUG({
345     if (!forLoops.empty()) {
346       llvm::dbgs() << "LoopNest shell :\n";
347       forLoops.front().dump();
348       llvm::dbgs() << "\n";
349     }
350   });
351 
352   // 5. Generate the tiled implementation within the inner most loop.
353   SmallVector<Value> clonedOpDestination = destinationTensors;
354   if (!forLoops.empty()) {
355     rewriter.setInsertionPointToEnd(forLoops.back().getBody());
356     clonedOpDestination =
357         llvm::map_to_vector(forLoops.back().getRegionIterArgs(),
358                             [](BlockArgument b) -> Value { return b; });
359   }
360 
361   // 5a. Clone the operation within the loop body.
362   auto clonedOp = cast<TilingInterface>(
363       cloneOpAndUpdateDestinationArgs(rewriter, op, clonedOpDestination));
364 
365   // 5b. Tile the cloned operation.
366   FailureOr<TilingResult> tiledImplementation =
367       clonedOp.getTiledImplementation(rewriter, offsets, sizes);
368   if (failed(tiledImplementation)) {
369     return rewriter.notifyMatchFailure(op, "failed to tile operation");
370   }
371 
372   // 5c. Delete the cloned operation.
373   rewriter.eraseOp(clonedOp);
374 
375   // If loops are empty, the tiled op is used as the replacement for the untiled
376   // op.
377   if (forLoops.empty()) {
378     return scf::SCFTilingResult{tiledImplementation->tiledOps,
379                                 getAsOperations(forLoops),
380                                 tiledImplementation->tiledValues};
381   }
382 
383   if (op->getNumResults() == 0) {
384     // The innermost loop does not have a `scf.yield` yet. There is nothing to
385     // return, so generate an empty `scf.yield` operation.
386     rewriter.setInsertionPointToEnd(forLoops.back().getBody());
387     rewriter.create<scf::YieldOp>(op->getLoc());
388     return scf::SCFTilingResult{
389         tiledImplementation->tiledOps, getAsOperations(forLoops), {}};
390   }
391 
392   // 6. Yield all the results of the tiled operation.
393   int64_t numResults = op->getNumResults();
394   SmallVector<SmallVector<OpFoldResult>> resultOffsetsList(numResults),
395       resultSizesList(numResults);
396   SmallVector<Value> yieldedValues;
397   for (auto [index, tiledValue] :
398        llvm::enumerate(tiledImplementation->tiledValues)) {
399     SmallVector<OpFoldResult> resultOffsets, resultSizes;
400     if (failed(op.getResultTilePosition(rewriter, index, offsets, sizes,
401                                         resultOffsets, resultSizes))) {
402       return rewriter.notifyMatchFailure(
403           op, "failed to get slice of result produced");
404     }
405     SmallVector<OpFoldResult> resultStrides(resultOffsets.size(),
406                                             rewriter.getIndexAttr(1));
407     auto insertSlice = rewriter.create<tensor::InsertSliceOp>(
408         op->getLoc(), tiledValue, clonedOpDestination[index], resultOffsets,
409         resultSizes, resultStrides);
410     yieldedValues.push_back(insertSlice);
411   }
412   rewriter.create<scf::YieldOp>(op->getLoc(), yieldedValues);
413 
414   SmallVector<Value> replacements = llvm::map_to_vector(
415       forLoops.front().getResults(), [](OpResult r) -> Value { return r; });
416   LLVM_DEBUG({
417     if (!forLoops.empty()) {
418       llvm::dbgs() << "After tiled implementation :\n";
419       forLoops.front().dump();
420       llvm::dbgs() << "\n";
421     }
422   });
423   return scf::SCFTilingResult{tiledImplementation->tiledOps,
424                               getAsOperations(forLoops), replacements};
425 }
426 
427 FailureOr<scf::SCFReductionTilingResult>
428 mlir::scf::tileReductionUsingScf(RewriterBase &b,
429                                  PartialReductionOpInterface op,
430                                  ArrayRef<OpFoldResult> tileSizes) {
431   Location loc = op.getLoc();
432   // Ops implementing PartialReductionOpInterface are expected to implement
433   // TilingInterface.
434   auto tilingInterfaceOp = cast<TilingInterface>(op.getOperation());
435   SmallVector<Range> iterationDomain = tilingInterfaceOp.getIterationDomain(b);
436   auto tileSizesVector = llvm::to_vector(tileSizes);
437   if (tileSizesVector.size() < iterationDomain.size()) {
438     auto zero = b.getIndexAttr(0);
439     tileSizesVector.append(iterationDomain.size() - tileSizesVector.size(),
440                            zero);
441   }
442   if (op->getNumResults() != 1)
443     return b.notifyMatchFailure(
444         op, "don't support ops with multiple results for now");
445   SmallVector<utils::IteratorType> iterators =
446       tilingInterfaceOp.getLoopIteratorTypes();
447 
448   SmallVector<int> reductionDims;
449   for (auto [idx, iteratorType] :
450        llvm::enumerate(tilingInterfaceOp.getLoopIteratorTypes())) {
451     if (iteratorType == utils::IteratorType::reduction)
452       reductionDims.push_back(idx);
453   }
454 
455   // 2. create the inital tensor value.
456   FailureOr<Operation *> identityTensor =
457       op.generateInitialTensorForPartialReduction(b, loc, tileSizesVector,
458                                                   reductionDims);
459   if (failed(identityTensor))
460     return b.notifyMatchFailure(op,
461                                 "cannot create a tensor of identity value.");
462   // 3. Create the nested loops.
463   SmallVector<OpFoldResult> offsets, sizes;
464   SmallVector<scf::ForOp> loops =
465       generateTileLoopNest(b, loc, iterationDomain, tileSizesVector, offsets,
466                            sizes, identityTensor.value()->getResults());
467 
468   // 4. Generate the tiled implementation within the inner most loop.
469   // 4a. Clone the operation within the loop body.
470   SmallVector<Value> clonedOpDestination =
471       llvm::map_to_vector(identityTensor.value()->getResults(),
472                           [](OpResult res) -> Value { return res; });
473   if (!loops.empty()) {
474     b.setInsertionPointToEnd(loops.back().getBody());
475     clonedOpDestination =
476         llvm::map_to_vector(loops.back().getRegionIterArgs(),
477                             [](BlockArgument b) -> Value { return b; });
478   }
479   auto clonedOp = cast<PartialReductionOpInterface>(
480       cloneOpAndUpdateDestinationArgs(b, op, clonedOpDestination));
481 
482   // 4b. Tile the cloned operation.
483   Operation *parallelOp = clonedOp.tileToPartialReduction(
484       b, loc, clonedOpDestination, offsets, sizes, reductionDims);
485   // 4c. Delete the cloned operation.
486   b.eraseOp(clonedOp);
487 
488   SmallVector<OpFoldResult> outSizes;
489   for (size_t i = 0; i < offsets.size(); i++) {
490     outSizes.push_back(
491         tensor::getMixedSize(b, loc, parallelOp->getResult(0), i));
492   }
493   SmallVector<OpFoldResult> outOffsets(offsets.size(), b.getIndexAttr(0));
494   SmallVector<OpFoldResult> outStrides(outOffsets.size(), b.getIndexAttr(1));
495   SmallVector<Value> yieldedVals;
496   auto bbArgs = loops.back().getRegionIterArgs();
497   for (auto [result, bbArg] : llvm::zip(parallelOp->getResults(), bbArgs)) {
498     Value insert = b.create<tensor::InsertSliceOp>(
499         loc, result, bbArg, outOffsets, outSizes, outStrides);
500     yieldedVals.push_back(insert);
501   }
502   b.create<scf::YieldOp>(loc, yieldedVals);
503 
504   SmallVector<Value> replacements = llvm::map_to_vector(
505       loops.front().getResults(), [](OpResult r) -> Value { return r; });
506 
507   // 5. Apply the merge reduction to combine all the partial values.
508   b.setInsertionPointAfter(*loops.begin());
509   Operation *mergeOp = op.mergeReductions(b, loc, replacements, reductionDims);
510   b.replaceOp(op, mergeOp->getResults());
511 
512   SCFReductionTilingResult results;
513   results.initialOp = *identityTensor;
514   results.loops = std::move(loops);
515   results.parallelTiledOp = parallelOp;
516   results.mergeOp = mergeOp;
517   return results;
518 }
519 
520 //===----------------------------------------------------------------------===//
521 // tileConsumerAndFuseProducerGreedilyUsingSCFForOp implementation.
522 //===----------------------------------------------------------------------===//
523 
524 /// Return the untiled producer whose slice is used in a tiled consumer. The
525 /// method traverses the tile loop nest (`loops`) if needed, and returns the
526 /// `iter_args` of the outer most that is encountered. Traversing the iter_args
527 /// indicates that this is a destination operand of the consumer. If there was
528 /// no loop traversal needed, the second value of the returned tuple is empty.
529 static std::tuple<OpResult, std::optional<OpOperand *>>
530 getUntiledProducerFromSliceSource(OpOperand *source,
531                                   ArrayRef<scf::ForOp> loops) {
532   std::optional<OpOperand *> destinationIterArg;
533   auto loopIt = loops.rbegin();
534   while (auto iterArg = dyn_cast<BlockArgument>(source->get())) {
535     scf::ForOp loop = *loopIt;
536     if (iterArg.getOwner()->getParentOp() != loop)
537       break;
538     source = loop.getTiedLoopInit(iterArg);
539     loopIt++;
540   }
541   if (loopIt == loops.rend())
542     destinationIterArg = source;
543   return {dyn_cast<OpResult>(source->get()), destinationIterArg};
544 }
545 
546 /// Implementation of fusing producer of a single slice by computing the
547 /// slice of the producer in-place.
548 std::optional<scf::SCFFuseProducerOfSliceResult>
549 mlir::scf::tileAndFuseProducerOfSlice(RewriterBase &rewriter,
550                                       tensor::ExtractSliceOp candidateSliceOp,
551                                       MutableArrayRef<scf::ForOp> loops) {
552   // 1. Get the producer of the source (potentially walking through
553   // `iter_args` of nested `scf.for`)
554   auto [fusableProducer, destinationInitArg] =
555       getUntiledProducerFromSliceSource(&candidateSliceOp.getSourceMutable(),
556                                         loops);
557   if (!fusableProducer)
558     return std::nullopt;
559   unsigned resultNumber = fusableProducer.getResultNumber();
560 
561   OpBuilder::InsertionGuard g(rewriter);
562   rewriter.setInsertionPoint(candidateSliceOp);
563 
564   // 2. Clone the fused producer
565   // 2a. Compute the destination operands to use for the cloned operation.
566   SmallVector<Value> origDestinationTensors, clonedOpDestinationTensors;
567   Operation *fusableProducerOp = fusableProducer.getOwner();
568   if (isa<DestinationStyleOpInterface>(fusableProducerOp) &&
569       failed(tensor::getOrCreateDestinations(
570           rewriter, fusableProducerOp->getLoc(), fusableProducerOp,
571           origDestinationTensors)))
572     return std::nullopt;
573 
574   clonedOpDestinationTensors = origDestinationTensors;
575   if (destinationInitArg &&
576       isa<DestinationStyleOpInterface>(fusableProducerOp)) {
577     // 2b. If the producer is also destination style, then to maintain the
578     // destination passing style, update the destination of the producer to be
579     // the source of the slice.
580     clonedOpDestinationTensors[resultNumber] = candidateSliceOp.getSource();
581   }
582   // 2c. Clone the fused producer.
583   Operation *clonedProducerOp = cloneOpAndUpdateDestinationArgs(
584       rewriter, fusableProducerOp, clonedOpDestinationTensors);
585   // 2d. Update the source of the candidateSlice to be the cloned producer.
586   //     Easier to just clone the slice with different source since replacements
587   //     and DCE of cloned ops becomes easier
588   SmallVector<Value> candidateSliceOpOperands =
589       llvm::to_vector(candidateSliceOp->getOperands());
590   candidateSliceOpOperands[0] = clonedProducerOp->getResult(resultNumber);
591   tensor::ExtractSliceOp clonedCandidateSliceOp =
592       mlir::clone(rewriter, candidateSliceOp,
593                   candidateSliceOp->getResultTypes(), candidateSliceOpOperands);
594 
595   // 3. Generate the tiled implementation of the producer of the source
596   FailureOr<TilingResult> tileAndFuseResult =
597       tensor::replaceExtractSliceWithTiledProducer(
598           rewriter, clonedCandidateSliceOp,
599           clonedProducerOp->getResult(resultNumber));
600   if (failed(tileAndFuseResult))
601     return std::nullopt;
602   // Note: Do not delete the candidateSliceOp, since its passed in from the
603   // caller.
604   rewriter.replaceAllUsesWith(candidateSliceOp,
605                               tileAndFuseResult->tiledValues[0]);
606   rewriter.eraseOp(clonedCandidateSliceOp);
607   rewriter.eraseOp(clonedProducerOp);
608 
609   // 3. If the slice is for a destination operand, for example,
610   //
611   // ```mlir
612   // %0 = linalg.init
613   // %1 = linalg.fill .. outs(%0 : )
614   // %2 = scf.for .. iter_args(%arg0 = %1) {
615   //   %3 = scf.for .. iter_args(%arg1 = %arg0) {
616   //     %4 = tensor.extract_slice %arg1 [..]
617   //     .. = linalg.matmul .. outs(%4 : )
618   //   }
619   // }
620   // ```
621   //
622   // the IR is currently
623   //
624   // ```
625   // %0 = linalg.init
626   // %1 = linalg.fill
627   // %2 = scf.for .. iter_args(%arg0 = %1 /* incorrect value */ ) {
628   //   %3 = scf.for .. iter_args(%arg1 = %arg0) {
629   //     %4 = tensor.extract_slice %arg1[..]
630   //     %5 = linalg.fill .. outs(%4 : )
631   //     .. = linalg.matmul .. outs(%5 : )
632   //   }
633   // }
634   // ```
635   //
636   // The untiled `linalg.fill` is still used as the `init_value` since it
637   // was originally a destination operand of the untiled `linalg.matmul`.
638   // When fusing an operand that is a destination operand, the iter_arg of
639   // the outer most loop should be changed to use the destination of the
640   // fused operation. With this the IR will be.
641   //
642   // ```
643   // %0 = linalg.init
644   // %1 = scf.for .. iter_args(%arg0 = %0 /* corrected value */ ) {
645   //   %2 = scf.for .. iter_args(%arg1 = %arg0) {
646   //     %3 = tensor.extract_slice %arg1[..]
647   //     %4 = linalg.fill .. outs(%3 : )
648   //     .. = linalg.matmul .. outs(%4 : )
649   //   }
650   // }
651   // ```
652   if (destinationInitArg &&
653       isa<DestinationStyleOpInterface>(fusableProducerOp) && !loops.empty()) {
654     loops.front()
655         ->getOpOperands()[destinationInitArg.value()->getOperandNumber()]
656         .set(origDestinationTensors[resultNumber]);
657   }
658   return scf::SCFFuseProducerOfSliceResult{fusableProducer,
659                                            tileAndFuseResult->tiledValues[0],
660                                            tileAndFuseResult->tiledOps};
661 }
662 
663 /// Reconstruct the fused producer from within the tiled-and-fused code.
664 void mlir::scf::yieldReplacementForFusedProducer(
665     RewriterBase &rewriter, tensor::ExtractSliceOp sliceOp,
666     scf::SCFFuseProducerOfSliceResult fusedProducerInfo,
667     MutableArrayRef<scf::ForOp> loops) {
668   if (loops.empty())
669     return;
670 
671   OpResult fusableProducer = fusedProducerInfo.origProducer;
672   Value tiledAndFusedProducer = fusedProducerInfo.tiledAndFusedProducer;
673   FailureOr<Value> initValue = tensor::getOrCreateDestination(
674       rewriter, fusableProducer.getOwner()->getLoc(), fusableProducer);
675   if (succeeded(initValue)) {
676 
677     auto newYieldValuesFn =
678         [&](RewriterBase &innerRewriter, Value iv,
679             ValueRange newRegionIterArgs) -> SmallVector<Value> {
680       OpBuilder::InsertionGuard g(innerRewriter);
681       if (auto tiledDestStyleOp =
682               tiledAndFusedProducer
683                   .getDefiningOp<DestinationStyleOpInterface>()) {
684         rewriter.setInsertionPoint(tiledDestStyleOp);
685         BlockArgument newRegionArg = loops.back().getRegionIterArgs().back();
686         auto destSlice = rewriter.create<tensor::ExtractSliceOp>(
687             sliceOp.getLoc(), newRegionArg, sliceOp.getMixedOffsets(),
688             sliceOp.getMixedSizes(), sliceOp.getMixedStrides());
689         unsigned resultNumber = fusableProducer.getResultNumber();
690         rewriter.updateRootInPlace(tiledDestStyleOp, [&]() {
691           tiledDestStyleOp.getDpsInitsMutable()[resultNumber].set(destSlice);
692         });
693 
694         Block *block = rewriter.getInsertionPoint()->getBlock();
695         rewriter.setInsertionPoint(block->getTerminator());
696         Value replacement = rewriter.create<tensor::InsertSliceOp>(
697             fusedProducerInfo.origProducer.getLoc(),
698             fusedProducerInfo.tiledAndFusedProducer,
699             loops.back().getRegionIterArgs().back(), sliceOp.getMixedOffsets(),
700             sliceOp.getMixedSizes(), sliceOp.getMixedStrides());
701         return {replacement};
702       }
703     };
704 
705     addInitOperandsToLoopNest(rewriter, loops,
706                               SmallVector<Value>{initValue.value()},
707                               newYieldValuesFn);
708   }
709 }
710 
711 /// Implementation of tile consumer and fuse producer greedily.
712 FailureOr<scf::SCFTileAndFuseResult>
713 mlir::scf::tileConsumerAndFuseProducerGreedilyUsingSCFForOp(
714     RewriterBase &rewriter, TilingInterface consumer,
715     const scf::SCFTileAndFuseOptions &options) {
716   // This transformation is only valid for ops that return values (i.e. not
717   // valid to use with operations that have memref operands).
718   if (!consumer->getNumResults()) {
719     return rewriter.notifyMatchFailure(
720         consumer, "invalid pattern for op with no results");
721   }
722 
723   // 1. First tile the consumer.
724   SmallVector<scf::ForOp> forLoops;
725   SetVector<Operation *> fusedProducers, tiledAndFusedOps;
726   DenseMap<Value, Value> replacements;
727   llvm::SmallDenseMap<Value, int64_t> yieldedValueToResultNumber;
728   {
729     FailureOr<scf::SCFTilingResult> tilingResult =
730         tileUsingSCFForOp(rewriter, consumer, options.tilingOptions);
731     if (failed(tilingResult))
732       return rewriter.notifyMatchFailure(consumer, "failed to tile consumer");
733     for (auto *tiledOp : tilingResult->tiledOps)
734       tiledAndFusedOps.insert(tiledOp);
735     forLoops = castToTypedOperations<scf::ForOp>(tilingResult->loops);
736     for (auto [index, origValue, replacement] :
737          llvm::enumerate(consumer->getResults(), tilingResult->replacements)) {
738       replacements[origValue] = replacement;
739       yieldedValueToResultNumber[tilingResult->tiledOps.back()->getResult(
740           index)] = index;
741     }
742   }
743 
744   // If there are no loops generated, fusion is immaterial.
745   if (forLoops.empty()) {
746     return scf::SCFTileAndFuseResult{fusedProducers, tiledAndFusedOps,
747                                      getAsOperations(forLoops), replacements};
748   }
749 
750   // 2. Typically, the operands of the tiled operation are slices of the
751   //    operands of the untiled operation. These are expressed in IR using
752   //    `tensor.extract_slice` operations with source being the operands of the
753   //    untiled operation. Create a worklist of these `tensor.extract_slice`
754   //    operations. If the producers of the source of the `tensor.extract_slice`
755   //    can be tiled such that the tiled value is generated in-place, that
756   //    effectively tiles + fuses the operations.
757   auto addCandidateSlices = [](Operation *fusedOp,
758                                std::deque<tensor::ExtractSliceOp> &candidates) {
759     for (Value operand : fusedOp->getOperands())
760       if (auto sliceOp = operand.getDefiningOp<tensor::ExtractSliceOp>())
761         candidates.push_back(sliceOp);
762   };
763 
764   std::deque<tensor::ExtractSliceOp> candidates;
765   addCandidateSlices(tiledAndFusedOps.back(), candidates);
766   OpBuilder::InsertionGuard g(rewriter);
767   while (!candidates.empty()) {
768     // Traverse the slices in BFS fashion.
769     tensor::ExtractSliceOp candidateSliceOp = candidates.front();
770     candidates.pop_front();
771 
772     // The operands of the fused producer might themselved be slices of
773     // values produced by operations that implement the `TilingInterface`.
774     // Add these operations to the worklist.
775     std::optional<scf::SCFFuseProducerOfSliceResult> fusedResult =
776         tileAndFuseProducerOfSlice(rewriter, candidateSliceOp, forLoops);
777     if (!fusedResult)
778       continue;
779 
780     if (Operation *tiledAndFusedOp =
781             fusedResult->tiledAndFusedProducer.getDefiningOp()) {
782       fusedProducers.insert(fusedResult->origProducer.getDefiningOp());
783       tiledAndFusedOps.insert(tiledAndFusedOp);
784       addCandidateSlices(tiledAndFusedOp, candidates);
785     }
786   }
787   return scf::SCFTileAndFuseResult{fusedProducers, tiledAndFusedOps,
788                                    getAsOperations(forLoops), replacements};
789 }
790 
791 //===----------------------------------------------------------------------===//
792 // tileUsingSCFForAllOp implementation.
793 //===----------------------------------------------------------------------===//
794 
795 FailureOr<scf::SCFTilingResult>
796 mlir::scf::tileUsingSCFForallOp(RewriterBase &rewriter, TilingInterface op,
797                                 const scf::SCFTilingOptions &options) {
798   Location loc = op->getLoc();
799   OpBuilder::InsertionGuard g(rewriter);
800 
801   // 1. Get the range of loops that are represented by the operation.
802   SmallVector<Range> loopRanges = op.getIterationDomain(rewriter);
803   if (loopRanges.empty())
804     return op->emitOpError("expected non-empty loop ranges");
805   auto hasStrideOne = [](Range r) { return !isConstantIntValue(r.stride, 1); };
806   if (llvm::any_of(loopRanges, hasStrideOne))
807     return op->emitOpError("only stride-1 supported atm");
808 
809   // 2. Get the tile sizes. If tile size is 0, it is not tiled and distributed.
810   // To make it easier, pad the tile sizes to loopRanges.size with value 0.
811   SmallVector<OpFoldResult> tileSizeVector =
812       options.tileSizeComputationFunction(rewriter, op);
813   tileSizeVector.resize(loopRanges.size(), rewriter.getIndexAttr(0));
814 
815   // 3. Build the offsets, sizes and steps for the tile and distributed loops.
816   SmallVector<OpFoldResult> lbs, ubs, steps;
817   for (auto [tileSize, loopRange] : llvm::zip(tileSizeVector, loopRanges)) {
818     if (isConstantIntValue(tileSize, 0))
819       continue;
820     lbs.push_back(loopRange.offset);
821     ubs.push_back(loopRange.size);
822     steps.push_back(tileSize);
823   }
824 
825   // 4. Gather destination tensors.
826   SmallVector<Value> dest;
827   if (failed(tensor::getOrCreateDestinations(rewriter, loc, op, dest)))
828     return op->emitOpError("failed to get destination tensors");
829 
830   // 5. Build the device mapping attribute.
831   std::optional<ArrayAttr> mappingAttr;
832   if (!options.mappingVector.empty()) {
833     mappingAttr = rewriter.getArrayAttr(ArrayRef(options.mappingVector));
834   }
835 
836   // 6. Create the ForallOp. We don't use the lambda body-builder
837   // version because we require the use of RewriterBase in the body, so we
838   // manually move the insertion point to the body below.
839   auto forallOp =
840       rewriter.create<scf::ForallOp>(loc, lbs, ubs, steps, dest, mappingAttr);
841 
842   // 7. Get the tile offset and sizes.
843   rewriter.setInsertionPoint(forallOp.getTerminator());
844   SmallVector<OpFoldResult> tiledOffsets, tiledSizes;
845   ValueRange ivs = forallOp.getInductionVars();
846   {
847     int materializedLoopNum = 0;
848     for (auto [tileSize, loopRange] : llvm::zip(tileSizeVector, loopRanges)) {
849       if (isConstantIntValue(tileSize, 0)) {
850         tiledOffsets.push_back(loopRange.offset);
851         tiledSizes.push_back(loopRange.size);
852         continue;
853       }
854       Value iv = ivs[materializedLoopNum++];
855       tiledOffsets.push_back(iv);
856       tiledSizes.push_back(
857           getBoundedTileSize(rewriter, loc, loopRange, iv, tileSize));
858     }
859   }
860 
861   // 8. Tile the operation. Clone the operation to allow fix up of destination
862   // operands.
863   ArrayRef<BlockArgument> destBbArgs = forallOp.getOutputBlockArguments();
864   Operation *clonedOp =
865       cloneOpAndUpdateDestinationArgs(rewriter, op, destBbArgs);
866   FailureOr<TilingResult> tilingResult =
867       cast<TilingInterface>(clonedOp).getTiledImplementation(
868           rewriter, tiledOffsets, tiledSizes);
869   if (failed(tilingResult))
870     return clonedOp->emitError("failed to tile op: ");
871   rewriter.eraseOp(clonedOp);
872 
873   // 9. Parallel insert back into the result tensor.
874   for (auto [index, tiledValue, destBBArg] :
875        llvm::enumerate(tilingResult->tiledValues, destBbArgs)) {
876     // 9.a. Partial subset information is inserted just before the terminator.
877     rewriter.setInsertionPoint(forallOp.getTerminator());
878 
879     SmallVector<OpFoldResult> resultOffsets, resultSizes;
880     if (failed(op.getResultTilePosition(rewriter, index, tiledOffsets,
881                                         tiledSizes, resultOffsets,
882                                         resultSizes))) {
883       return op->emitOpError("output offsets couldn't be calculated");
884     }
885 
886     SmallVector<OpFoldResult> strides(resultSizes.size(),
887                                       rewriter.getIndexAttr(1));
888     // 9.b. Parallel insertions are inserted at the end of the combining
889     // terminator.
890     rewriter.setInsertionPointToEnd(forallOp.getTerminator().getBody());
891     rewriter.create<tensor::ParallelInsertSliceOp>(
892         loc, tiledValue, destBBArg, resultOffsets, resultSizes, strides);
893   }
894 
895   // 10. Return the tiling result.
896   return scf::SCFTilingResult{
897       tilingResult->tiledOps,
898       {forallOp.getOperation()},
899       llvm::map_to_vector(forallOp.getResults(),
900                           [](auto val) -> Value { return val; })};
901 }
902 
903 //===----------------------------------------------------------------------===//
904 // lowerToLoopsUsingSCFForOp implementation.
905 //===----------------------------------------------------------------------===//
906 
907 FailureOr<SmallVector<scf::ForOp>>
908 mlir::scf::lowerToLoopsUsingSCFForOp(RewriterBase &rewriter,
909                                      TilingInterface op) {
910   // TODO: Handle cases where the op has results if needed.
911   if (op->getNumResults() > 0) {
912     return rewriter.notifyMatchFailure(
913         op, "unable to lower to loops operations with return values");
914   }
915 
916   SmallVector<Range> domain = op.getIterationDomain(rewriter);
917   SmallVector<Value> ivs;
918   SmallVector<scf::ForOp> loops;
919   Location loc = op.getLoc();
920   for (auto loopRange : domain) {
921     Value offsetVal =
922         getValueOrCreateConstantIndexOp(rewriter, loc, loopRange.offset);
923     Value sizeVal =
924         getValueOrCreateConstantIndexOp(rewriter, loc, loopRange.size);
925     Value strideVal =
926         getValueOrCreateConstantIndexOp(rewriter, loc, loopRange.stride);
927     auto loop = rewriter.create<scf::ForOp>(op.getLoc(), offsetVal, sizeVal,
928                                             strideVal, ValueRange{});
929     loops.push_back(loop);
930     ivs.push_back(loop.getInductionVar());
931     rewriter.setInsertionPoint(loop.getBody()->getTerminator());
932   }
933   if (failed(op.generateScalarImplementation(rewriter, op.getLoc(), ivs))) {
934     return failure();
935   }
936   return loops;
937 }
938