xref: /llvm-project/mlir/lib/Dialect/Linalg/Utils/Utils.cpp (revision cb7bda2ace81226c5b33165411dd0316f93fa57e)
1 //===- Utils.cpp - Utilities to support the Linalg dialect ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements utilities for the Linalg dialect.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "mlir/Dialect/Linalg/Utils/Utils.h"
14 
15 #include "mlir/Analysis/SliceAnalysis.h"
16 #include "mlir/Dialect/Affine/Analysis/AffineStructures.h"
17 #include "mlir/Dialect/Affine/IR/AffineOps.h"
18 #include "mlir/Dialect/Affine/IR/AffineValueMap.h"
19 #include "mlir/Dialect/Affine/LoopUtils.h"
20 #include "mlir/Dialect/Arith/IR/Arith.h"
21 #include "mlir/Dialect/Arith/Utils/Utils.h"
22 #include "mlir/Dialect/Func/IR/FuncOps.h"
23 #include "mlir/Dialect/Linalg/IR/Linalg.h"
24 #include "mlir/Dialect/MemRef/IR/MemRef.h"
25 #include "mlir/Dialect/SCF/IR/SCF.h"
26 #include "mlir/Dialect/Tensor/IR/Tensor.h"
27 #include "mlir/Dialect/Tensor/Utils/Utils.h"
28 #include "mlir/Dialect/Utils/IndexingUtils.h"
29 #include "mlir/Dialect/Utils/StaticValueUtils.h"
30 #include "mlir/IR/AffineExpr.h"
31 #include "mlir/IR/AffineExprVisitor.h"
32 #include "mlir/IR/AffineMap.h"
33 #include "mlir/IR/Matchers.h"
34 #include "mlir/IR/OpImplementation.h"
35 #include "mlir/Pass/Pass.h"
36 #include "llvm/ADT/TypeSwitch.h"
37 #include "llvm/Support/Debug.h"
38 #include <optional>
39 
40 #define DEBUG_TYPE "linalg-utils"
41 
42 using namespace mlir;
43 using namespace presburger;
44 using namespace mlir::affine;
45 using namespace mlir::linalg;
46 using namespace mlir::scf;
47 
48 namespace {
49 
50 // Helper visitor to determine whether an AffineExpr is tiled.
51 // This is achieved by traversing every AffineDimExpr with position `pos` and
52 // checking whether the corresponding `tileSizes[pos]` is non-zero.
53 // This also enforces only positive coefficients occur in multiplications.
54 //
55 // Example:
56 //   `d0 + 2 * d1 + d3` is tiled by [0, 0, 0, 2] but not by [0, 0, 2, 0]
57 //
58 struct TileCheck : public AffineExprVisitor<TileCheck> {
59   TileCheck(ArrayRef<OpFoldResult> tileSizes) : tileSizes(tileSizes) {}
60 
61   void visitDimExpr(AffineDimExpr expr) {
62     isTiled |= !isZeroIndex(tileSizes[expr.getPosition()]);
63   }
64   void visitAffineBinaryOpExpr(AffineBinaryOpExpr expr) {
65     visit(expr.getLHS());
66     visit(expr.getRHS());
67     if (expr.getKind() == mlir::AffineExprKind::Mul)
68       assert(expr.getRHS().cast<AffineConstantExpr>().getValue() > 0 &&
69              "nonpositive multiplying coefficient");
70   }
71   bool isTiled = false;
72   ArrayRef<OpFoldResult> tileSizes;
73 };
74 
75 } // namespace
76 
77 static bool isTiled(AffineExpr expr, ArrayRef<OpFoldResult> tileSizes) {
78   if (!expr)
79     return false;
80   TileCheck t(tileSizes);
81   t.visit(expr);
82   return t.isTiled;
83 }
84 
85 // Checks whether the `map  varies with respect to a non-zero `tileSize`.
86 static bool isTiled(AffineMap map, ArrayRef<OpFoldResult> tileSizes) {
87   if (!map)
88     return false;
89   for (unsigned r = 0; r < map.getNumResults(); ++r)
90     if (isTiled(map.getResult(r), tileSizes))
91       return true;
92   return false;
93 }
94 
95 std::optional<RegionMatcher::BinaryOpKind>
96 RegionMatcher::matchAsScalarBinaryOp(GenericOp op) {
97   auto &region = op.getRegion();
98   if (!llvm::hasSingleElement(region))
99     return std::nullopt;
100 
101   Block &block = region.front();
102   if (block.getNumArguments() != 2 ||
103       !block.getArgument(0).getType().isSignlessIntOrFloat() ||
104       !block.getArgument(1).getType().isSignlessIntOrFloat())
105     return std::nullopt;
106 
107   auto &ops = block.getOperations();
108   if (!llvm::hasSingleElement(block.without_terminator()))
109     return std::nullopt;
110 
111   using mlir::matchers::m_Val;
112   auto a = m_Val(block.getArgument(0));
113   auto b = m_Val(block.getArgument(1));
114 
115   auto addPattern = m_Op<linalg::YieldOp>(m_Op<arith::AddIOp>(a, b));
116   if (addPattern.match(&ops.back()))
117     return BinaryOpKind::IAdd;
118 
119   return std::nullopt;
120 }
121 
122 /// Explicit instantiation of loop nest generator for different loop types.
123 template struct mlir::linalg::GenerateLoopNest<scf::ForOp>;
124 template struct mlir::linalg::GenerateLoopNest<scf::ParallelOp>;
125 template struct mlir::linalg::GenerateLoopNest<AffineForOp>;
126 
127 /// Given a list of subview ranges, extract individual values for lower, upper
128 /// bounds and steps and put them into the corresponding vectors.
129 static void unpackRanges(OpBuilder &builder, Location loc,
130                          ArrayRef<Range> ranges, SmallVectorImpl<Value> &lbs,
131                          SmallVectorImpl<Value> &ubs,
132                          SmallVectorImpl<Value> &steps) {
133   for (Range range : ranges) {
134     lbs.emplace_back(
135         getValueOrCreateConstantIndexOp(builder, loc, range.offset));
136     ubs.emplace_back(getValueOrCreateConstantIndexOp(builder, loc, range.size));
137     steps.emplace_back(
138         getValueOrCreateConstantIndexOp(builder, loc, range.stride));
139   }
140 }
141 
142 //===----------------------------------------------------------------------===//
143 // General utilities
144 //===----------------------------------------------------------------------===//
145 
146 namespace mlir {
147 namespace linalg {
148 
149 bool allIndexingsAreProjectedPermutation(LinalgOp op) {
150   return llvm::all_of(op.getIndexingMapsArray(), [](AffineMap m) {
151     return m.isProjectedPermutation(/*allowZeroInResults=*/true);
152   });
153 }
154 
155 bool hasOnlyScalarElementwiseOp(Region &r) {
156   if (!llvm::hasSingleElement(r))
157     return false;
158   for (Operation &op : r.front()) {
159     if (!(isa<arith::ConstantOp, func::ConstantOp, tensor::ExtractOp,
160               linalg::YieldOp, linalg::IndexOp, AffineApplyOp>(op) ||
161           OpTrait::hasElementwiseMappableTraits(&op)) ||
162         llvm::any_of(op.getResultTypes(),
163                      [](Type type) { return !type.isIntOrIndexOrFloat(); }))
164       return false;
165   }
166   return true;
167 }
168 
169 bool isElementwise(LinalgOp op) {
170   if (op.getNumLoops() != op.getNumParallelLoops())
171     return false;
172 
173   if (!allIndexingsAreProjectedPermutation(op))
174     return false;
175 
176   // TODO: relax the restrictions on indexing map.
177   for (OpOperand *opOperand : op.getDpsInitOperands()) {
178     if (!op.getMatchingIndexingMap(opOperand).isPermutation())
179       return false;
180   }
181   return hasOnlyScalarElementwiseOp(op->getRegion(0));
182 }
183 
184 bool isParallelIterator(utils::IteratorType iteratorType) {
185   return iteratorType == utils::IteratorType::parallel;
186 }
187 
188 bool isReductionIterator(utils::IteratorType iteratorType) {
189   return iteratorType == utils::IteratorType::reduction;
190 }
191 
192 Value makeComposedPadHighOp(OpBuilder &b, Location loc, RankedTensorType type,
193                             Value source, Value pad, bool nofold) {
194   // Exit if `source` is not defined by an ExtractSliceOp.
195   auto sliceOp = source.getDefiningOp<tensor::ExtractSliceOp>();
196   if (!sliceOp)
197     return tensor::createPadHighOp(type, source, pad, nofold, loc, b);
198 
199   // Search the `source` use-def chain for padded LinalgOps.
200   Value current = sliceOp.getSource();
201   while (current) {
202     auto linalgOp = current.getDefiningOp<LinalgOp>();
203     if (!linalgOp)
204       break;
205     OpResult opResult = cast<OpResult>(current);
206     current = linalgOp.getDpsInitOperand(opResult.getResultNumber())->get();
207   }
208   auto padOp = current ? current.getDefiningOp<tensor::PadOp>() : nullptr;
209 
210   // Exit if the search fails to match a tensor::PadOp at the end of the matched
211   // LinalgOp sequence.
212   if (!padOp)
213     return tensor::createPadHighOp(type, source, pad, nofold, loc, b);
214 
215   // Exit if the padded result type does not match.
216   if (sliceOp.getSource().getType() != type)
217     return tensor::createPadHighOp(type, source, pad, nofold, loc, b);
218 
219   // Exit if the LinalgOps are not high padded.
220   if (llvm::any_of(padOp.getMixedLowPad(), [](OpFoldResult ofr) {
221         return getConstantIntValue(ofr) != static_cast<int64_t>(0);
222       }))
223     return tensor::createPadHighOp(type, source, pad, nofold, loc, b);
224 
225   // Exit if `padOpSliceOp`, which defines the slice used by
226   // `padOp`, is rank-reducing.
227   auto padOpSliceOp = padOp.getSource().getDefiningOp<tensor::ExtractSliceOp>();
228   if (!padOpSliceOp ||
229       sliceOp.getMixedSizes().size() != padOpSliceOp.getMixedSizes().size())
230     return tensor::createPadHighOp(type, source, pad, nofold, loc, b);
231 
232   // Exit if the sizes of the dynamic sizes of `sliceOp` do not match the size
233   // of the slice padded by `padOp`.
234   if (llvm::any_of(
235           llvm::zip(sliceOp.getMixedSizes(), padOpSliceOp.getMixedSizes()),
236           [](std::tuple<OpFoldResult, OpFoldResult> it) {
237             return !isEqualConstantIntOrValue(std::get<0>(it), std::get<1>(it));
238           }))
239     return tensor::createPadHighOp(type, source, pad, nofold, loc, b);
240 
241   // Exit if the padding values do not match.
242   Attribute padOpPadAttr, padAttr;
243   Value padOpPad = padOp.getConstantPaddingValue();
244   if (!padOpPad || !matchPattern(padOpPad, m_Constant(&padOpPadAttr)) ||
245       !matchPattern(pad, m_Constant(&padAttr)) || padOpPadAttr != padAttr)
246     return tensor::createPadHighOp(type, source, pad, nofold, loc, b);
247 
248   // Return the padded result if the padding values and sizes match.
249   return sliceOp.getSource();
250 }
251 
252 GenericOp makeTransposeOp(OpBuilder &b, Location loc, Value inputTensor,
253                           Value outputTensor,
254                           ArrayRef<int64_t> transposeVector) {
255   auto resultTensorType = cast<RankedTensorType>(outputTensor.getType());
256   Type elementType = resultTensorType.getElementType();
257 
258   assert(isPermutationVector(transposeVector) &&
259          "expect transpose vector to be a permutation");
260   assert(transposeVector.size() ==
261              static_cast<size_t>(resultTensorType.getRank()) &&
262          "expect transpose vector size to match result tensor rank");
263 
264   // Compute the transpose and the indentity indexing maps.
265   SmallVector<AffineMap> indexingMaps = {
266       inversePermutation(AffineMap::getPermutationMap(
267           SmallVector<unsigned>(transposeVector.begin(), transposeVector.end()),
268           b.getContext())),
269       AffineMap::getMultiDimIdentityMap(transposeVector.size(),
270                                         b.getContext())};
271   SmallVector<utils::IteratorType> iteratorTypes(transposeVector.size(),
272                                                  utils::IteratorType::parallel);
273 
274   // Create a GenericOp to transpose `inputTensor` into `outputTensor`.
275   auto transposeOp =
276       b.create<GenericOp>(loc, resultTensorType, inputTensor, outputTensor,
277                           indexingMaps, iteratorTypes);
278   Region &body = transposeOp.getRegion();
279   body.push_back(new Block());
280   body.front().addArguments({elementType, elementType}, {loc, loc});
281 
282   // Create the body of the transpose operation.
283   OpBuilder::InsertionGuard g(b);
284   b.setInsertionPointToEnd(&body.front());
285   b.create<YieldOp>(loc, transposeOp.getRegion().front().getArgument(0));
286   return transposeOp;
287 }
288 
289 GenericOp makeMemRefCopyOp(OpBuilder &b, Location loc, Value from, Value to) {
290   auto memrefTypeTo = cast<MemRefType>(to.getType());
291 #ifndef NDEBUG
292   auto memrefTypeFrom = cast<MemRefType>(from.getType());
293   assert(memrefTypeFrom.getRank() == memrefTypeTo.getRank() &&
294          "`from` and `to` memref must have the same rank");
295 #endif // NDEBUG
296 
297   AffineMap id =
298       AffineMap::getMultiDimIdentityMap(memrefTypeTo.getRank(), b.getContext());
299   SmallVector<utils::IteratorType> iteratorTypes(memrefTypeTo.getRank(),
300                                                  utils::IteratorType::parallel);
301   return b.create<linalg::GenericOp>(
302       loc,
303       /*inputs=*/from,
304       /*outputs=*/to,
305       /*indexingMaps=*/llvm::ArrayRef({id, id}),
306       /*iteratorTypes=*/iteratorTypes,
307       [](OpBuilder &b, Location loc, ValueRange args) {
308         b.create<linalg::YieldOp>(loc, args.front());
309       });
310 }
311 
312 /// Specialization to build an scf "for" nest.
313 template <>
314 void GenerateLoopNest<scf::ForOp>::doit(
315     OpBuilder &b, Location loc, ArrayRef<Range> loopRanges, LinalgOp linalgOp,
316     ArrayRef<utils::IteratorType> iteratorTypes,
317     function_ref<scf::ValueVector(OpBuilder &, Location, ValueRange,
318                                   ValueRange)>
319         bodyBuilderFn,
320     ArrayRef<linalg::ProcInfo> procInfo) {
321   assert((procInfo.empty() || (procInfo.size() == loopRanges.size())) &&
322          "expected as many entries for proc info as number of loops, even if "
323          "they are null entries");
324   SmallVector<Value> iterArgInitValues = linalgOp.hasBufferSemantics()
325                                              ? SmallVector<Value>{}
326                                              : linalgOp.getDpsInitOperands();
327 
328   SmallVector<Value, 4> lbs, ubs, steps;
329   unpackRanges(b, loc, loopRanges, lbs, ubs, steps);
330   LoopNest loopNest = mlir::scf::buildLoopNest(
331       b, loc, lbs, ubs, steps, iterArgInitValues,
332       [&](OpBuilder &b, Location loc, ValueRange ivs, ValueRange iterArgs) {
333         assert(iterArgs.size() == iterArgInitValues.size() &&
334                "expect the number of output tensors and iter args to match");
335         SmallVector<Value> operandValuesToUse = linalgOp->getOperands();
336         if (!iterArgs.empty()) {
337           operandValuesToUse = linalgOp.getDpsInputOperands();
338           operandValuesToUse.append(iterArgs.begin(), iterArgs.end());
339         }
340         return bodyBuilderFn(b, loc, ivs, operandValuesToUse);
341       });
342 
343   if (loopNest.loops.empty() || procInfo.empty())
344     return;
345 
346   // Filter out scf.for loops that were created out of parallel dimensions.
347   for (const auto &loop : llvm::enumerate(loopNest.loops)) {
348     if (procInfo[loop.index()].distributionMethod ==
349         DistributionMethod::Cyclic) {
350       mapLoopToProcessorIds(loop.value(), procInfo[loop.index()].procId,
351                             procInfo[loop.index()].nprocs);
352     }
353   }
354 }
355 
356 /// Specialization to build affine "for" nest.
357 template <>
358 void GenerateLoopNest<AffineForOp>::doit(
359     OpBuilder &b, Location loc, ArrayRef<Range> loopRanges, LinalgOp linalgOp,
360     ArrayRef<utils::IteratorType> iteratorTypes,
361     function_ref<scf::ValueVector(OpBuilder &, Location, ValueRange,
362                                   ValueRange)>
363         bodyBuilderFn,
364     ArrayRef<linalg::ProcInfo> /*procInfo*/) {
365   SmallVector<Value> iterArgInitValues = linalgOp.hasBufferSemantics()
366                                              ? SmallVector<Value>{}
367                                              : linalgOp.getDpsInitOperands();
368   assert(iterArgInitValues.empty() && "unexpected AffineForOp init values");
369   SmallVector<Value, 4> lbs, ubs, steps;
370   unpackRanges(b, loc, loopRanges, lbs, ubs, steps);
371 
372   // Affine loops require constant steps.
373   SmallVector<int64_t, 4> constantSteps;
374   constantSteps.reserve(steps.size());
375   for (Value v : steps) {
376     auto constVal = getConstantIntValue(v);
377     assert(constVal.has_value() && "Affine loops require constant steps");
378     constantSteps.push_back(constVal.value());
379   }
380 
381   affine::buildAffineLoopNest(b, loc, lbs, ubs, constantSteps,
382                               [&](OpBuilder &b, Location loc, ValueRange ivs) {
383                                 bodyBuilderFn(b, loc, ivs,
384                                               linalgOp->getOperands());
385                               });
386 }
387 
388 /// Update the `lb`, `ub` and `step` to get per processor `lb`, `ub` and `step`.
389 void updateBoundsForCyclicDistribution(OpBuilder &b, Location loc, Value procId,
390                                        Value nprocs, Value &lb, Value &ub,
391                                        Value &step) {
392   AffineExpr d0, d1;
393   bindDims(b.getContext(), d0, d1);
394   AffineExpr s0 = getAffineSymbolExpr(0, b.getContext());
395   lb =
396       affine::makeComposedAffineApply(b, loc, d0 + d1 * s0, {lb, procId, step});
397   step = affine::makeComposedAffineApply(b, loc, d0 * s0, {nprocs, step});
398 }
399 
400 /// Generates a loop nest consisting of scf.parallel and scf.for, depending
401 /// on the `iteratorTypes.` Consecutive parallel loops create a single
402 /// scf.parallel operation; each sequential loop creates a new scf.for
403 /// operation. The body of the innermost loop is populated by
404 /// `bodyBuilderFn` that accepts a range of induction variables for all
405 /// loops. `ivStorage` is used to store the partial list of induction
406 /// variables.
407 // TODO: this function can be made iterative instead. However, it
408 // will have at most as many recursive calls as nested loops, which rarely
409 // exceeds 10.
410 static void generateParallelLoopNest(
411     OpBuilder &b, Location loc, ValueRange lbs, ValueRange ubs,
412     ValueRange steps, ArrayRef<utils::IteratorType> iteratorTypes,
413     ArrayRef<linalg::ProcInfo> procInfo,
414     function_ref<void(OpBuilder &, Location, ValueRange)> bodyBuilderFn,
415     SmallVectorImpl<Value> &ivStorage) {
416   assert(lbs.size() == ubs.size());
417   assert(lbs.size() == steps.size());
418   assert(lbs.size() == iteratorTypes.size());
419   assert(procInfo.empty() || (lbs.size() == procInfo.size()));
420 
421   // If there are no (more) loops to be generated, generate the body and be
422   // done with it.
423   if (iteratorTypes.empty()) {
424     bodyBuilderFn(b, loc, ivStorage);
425     return;
426   }
427 
428   // If there are no outer parallel loops, generate one sequential loop and
429   // recurse.
430   if (!isParallelIterator(iteratorTypes.front())) {
431     LoopNest singleLoop = buildLoopNest(
432         b, loc, lbs.take_front(), ubs.take_front(), steps.take_front(),
433         [&](OpBuilder &b, Location loc, ValueRange ivs) {
434           ivStorage.append(ivs.begin(), ivs.end());
435           generateParallelLoopNest(
436               b, loc, lbs.drop_front(), ubs.drop_front(), steps.drop_front(),
437               iteratorTypes.drop_front(),
438               procInfo.empty() ? procInfo : procInfo.drop_front(),
439               bodyBuilderFn, ivStorage);
440         });
441     return;
442   }
443 
444   unsigned nLoops = iteratorTypes.size();
445   unsigned numProcessed = 0;
446   DistributionMethod distributionMethod = DistributionMethod::None;
447   if (procInfo.empty()) {
448     numProcessed = nLoops - iteratorTypes.drop_while(isParallelIterator).size();
449   } else {
450     distributionMethod = procInfo.front().distributionMethod;
451     numProcessed =
452         nLoops - procInfo
453                      .drop_while([&](linalg::ProcInfo p) {
454                        return p.distributionMethod == distributionMethod;
455                      })
456                      .size();
457   }
458 
459   auto remainderProcInfo =
460       procInfo.empty() ? procInfo : procInfo.drop_front(numProcessed);
461   switch (distributionMethod) {
462   case DistributionMethod::None: {
463     // Generate a single parallel loop-nest operation for all outermost
464     // parallel loops and recurse.
465     b.create<scf::ParallelOp>(
466         loc, lbs.take_front(numProcessed), ubs.take_front(numProcessed),
467         steps.take_front(numProcessed),
468         [&](OpBuilder &nestedBuilder, Location nestedLoc, ValueRange localIvs) {
469           ivStorage.append(localIvs.begin(), localIvs.end());
470           generateParallelLoopNest(
471               nestedBuilder, nestedLoc, lbs.drop_front(numProcessed),
472               ubs.drop_front(numProcessed), steps.drop_front(numProcessed),
473               iteratorTypes.drop_front(numProcessed), remainderProcInfo,
474               bodyBuilderFn, ivStorage);
475         });
476     return;
477   }
478   case DistributionMethod::Cyclic: {
479     // Generate a single parallel loop-nest operation for all outermost
480     // parallel loops and recurse.
481     b.create<scf::ParallelOp>(
482         loc, lbs.take_front(numProcessed), ubs.take_front(numProcessed),
483         steps.take_front(numProcessed),
484         [&](OpBuilder &nestedBuilder, Location nestedLoc, ValueRange localIvs) {
485           ivStorage.append(localIvs.begin(), localIvs.end());
486           generateParallelLoopNest(
487               nestedBuilder, nestedLoc, lbs.drop_front(numProcessed),
488               ubs.drop_front(numProcessed), steps.drop_front(numProcessed),
489               iteratorTypes.drop_front(numProcessed), remainderProcInfo,
490               bodyBuilderFn, ivStorage);
491         });
492     return;
493   }
494   case DistributionMethod::CyclicNumProcsGeNumIters: {
495     // Check (for the processed loops) that the iteration is in-bounds.
496     ArithBuilder ab(b, loc);
497     Value cond = ab.slt(lbs[0], ubs[0]);
498     for (unsigned i = 1; i < numProcessed; ++i)
499       cond = ab._and(cond, ab.slt(lbs[i], ubs[i]));
500     ivStorage.append(lbs.begin(), std::next(lbs.begin(), numProcessed));
501     b.create<scf::IfOp>(loc, cond, [&](OpBuilder &b, Location loc) {
502       generateParallelLoopNest(b, loc, lbs.drop_front(numProcessed),
503                                ubs.drop_front(numProcessed),
504                                steps.drop_front(numProcessed),
505                                iteratorTypes.drop_front(numProcessed),
506                                remainderProcInfo, bodyBuilderFn, ivStorage);
507       b.create<scf::YieldOp>(loc, ValueRange{});
508     });
509     return;
510   }
511   case DistributionMethod::CyclicNumProcsEqNumIters:
512     // No check/loops needed here. Set the `%iv` to be the `%lb` and proceed
513     // with inner loop generation.
514     ivStorage.append(lbs.begin(), std::next(lbs.begin(), numProcessed));
515     generateParallelLoopNest(
516         b, loc, lbs.drop_front(numProcessed), ubs.drop_front(numProcessed),
517         steps.drop_front(numProcessed), iteratorTypes.drop_front(numProcessed),
518         remainderProcInfo, bodyBuilderFn, ivStorage);
519     return;
520   }
521 }
522 
523 /// Specialization for generating a mix of parallel and sequential scf loops.
524 template <>
525 void GenerateLoopNest<scf::ParallelOp>::doit(
526     OpBuilder &b, Location loc, ArrayRef<Range> loopRanges, LinalgOp linalgOp,
527     ArrayRef<utils::IteratorType> iteratorTypes,
528     function_ref<scf::ValueVector(OpBuilder &, Location, ValueRange,
529                                   ValueRange)>
530         bodyBuilderFn,
531     ArrayRef<linalg::ProcInfo> procInfo) {
532   SmallVector<Value> iterArgInitValues = linalgOp.hasBufferSemantics()
533                                              ? SmallVector<Value>{}
534                                              : linalgOp.getDpsInitOperands();
535   assert(iterArgInitValues.empty() && "unexpected ParallelOp init values");
536   // This function may be passed more iterator types than ranges.
537   assert(iteratorTypes.size() >= loopRanges.size() &&
538          "expected iterator type for all ranges");
539   assert((procInfo.empty() || (procInfo.size() == loopRanges.size())) &&
540          "expected proc information for all loops when present");
541   iteratorTypes = iteratorTypes.take_front(loopRanges.size());
542   SmallVector<Value, 8> lbsStorage, ubsStorage, stepsStorage, ivs;
543   unsigned numLoops = iteratorTypes.size();
544   ivs.reserve(numLoops);
545   lbsStorage.reserve(numLoops);
546   ubsStorage.reserve(numLoops);
547   stepsStorage.reserve(numLoops);
548 
549   // Get the loop lb, ub, and step.
550   unpackRanges(b, loc, loopRanges, lbsStorage, ubsStorage, stepsStorage);
551 
552   // Modify the lb, ub, and step based on the distribution options.
553   for (const auto &it : llvm::enumerate(procInfo)) {
554     if (it.value().distributionMethod != linalg::DistributionMethod::None) {
555       updateBoundsForCyclicDistribution(
556           b, loc, it.value().procId, it.value().nprocs, lbsStorage[it.index()],
557           ubsStorage[it.index()], stepsStorage[it.index()]);
558     }
559   }
560   ValueRange lbs(lbsStorage), ubs(ubsStorage), steps(stepsStorage);
561   generateParallelLoopNest(
562       b, loc, lbs, ubs, steps, iteratorTypes, procInfo,
563       [&](OpBuilder &b, Location loc, ValueRange ivs) {
564         bodyBuilderFn(b, loc, ivs, linalgOp->getOperands());
565       },
566       ivs);
567 
568   assert(ivs.size() == iteratorTypes.size() && "did not generate enough loops");
569 }
570 
571 static Value materializeTiledShape(OpBuilder &builder, Location loc,
572                                    Value valueToTile,
573                                    const SliceParameters &sliceParams) {
574   auto shapedType = dyn_cast<ShapedType>(valueToTile.getType());
575   auto *sliceOp = TypeSwitch<ShapedType, Operation *>(shapedType)
576                       .Case([&](MemRefType) {
577                         return builder.create<memref::SubViewOp>(
578                             loc, valueToTile, sliceParams.offsets,
579                             sliceParams.sizes, sliceParams.strides);
580                       })
581                       .Case([&](RankedTensorType) {
582                         return builder.create<tensor::ExtractSliceOp>(
583                             loc, valueToTile, sliceParams.offsets,
584                             sliceParams.sizes, sliceParams.strides);
585                       })
586                       .Default([](ShapedType) -> Operation * {
587                         llvm_unreachable("Unexpected shaped type");
588                       });
589   return sliceOp->getResult(0);
590 }
591 
592 Value makeTiledShape(OpBuilder &builder, Location loc, Value valueToTile,
593                      ArrayRef<OpFoldResult> tileSizes, AffineMap map,
594                      ArrayRef<OpFoldResult> lbs, ArrayRef<OpFoldResult> ubs,
595                      ArrayRef<OpFoldResult> subShapeSizes,
596                      bool omitPartialTileCheck) {
597   SliceParameters sliceParams =
598       computeSliceParameters(builder, loc, valueToTile, tileSizes, map, lbs,
599                              ubs, subShapeSizes, omitPartialTileCheck);
600   return materializeTiledShape(builder, loc, valueToTile, sliceParams);
601 }
602 
603 SliceParameters
604 computeSliceParameters(OpBuilder &builder, Location loc, Value valueToTile,
605                        ArrayRef<OpFoldResult> tileSizes, AffineMap map,
606                        ArrayRef<OpFoldResult> lbs, ArrayRef<OpFoldResult> ubs,
607                        ArrayRef<OpFoldResult> subShapeSizes,
608                        bool omitPartialTileCheck) {
609   auto shapedType = dyn_cast<ShapedType>(valueToTile.getType());
610   assert(shapedType && "only shaped types can be tiled");
611   ArrayRef<int64_t> shape = shapedType.getShape();
612   int64_t rank = shapedType.getRank();
613 
614   // Compute offsets/sizes/strides for the tile.
615   SliceParameters sliceParams;
616   sliceParams.offsets.reserve(rank);
617   sliceParams.sizes.reserve(rank);
618   sliceParams.strides.reserve(rank);
619   for (unsigned r = 0; r < rank; ++r) {
620     LLVM_DEBUG(llvm::dbgs() << "computeSliceParameters: for dim#" << r);
621     if (!isTiled(map.getSubMap({r}), tileSizes)) {
622       sliceParams.offsets.push_back(builder.getIndexAttr(0));
623       OpFoldResult dim = createFoldedDimOp(builder, loc, valueToTile, r);
624       sliceParams.sizes.push_back(dim);
625       sliceParams.strides.push_back(builder.getIndexAttr(1));
626       LLVM_DEBUG(llvm::dbgs() << ": not tiled: use size: " << dim << "\n");
627       continue;
628     }
629     LLVM_DEBUG(llvm::dbgs() << ": tiled: figure out subsize...\n");
630 
631     // Tiling creates a new slice at the proper index, the slice step is 1
632     // (i.e. the op does not subsample, stepping occurs in the loop).
633     auto m = map.getSubMap({r});
634     LLVM_DEBUG(llvm::dbgs() << "computeSliceParameters: submap: " << m << "\n");
635     IRRewriter rewriter(builder);
636     OpFoldResult offset = makeComposedFoldedAffineApply(rewriter, loc, m, lbs);
637     sliceParams.offsets.push_back(offset);
638     OpFoldResult closedIntSize =
639         makeComposedFoldedAffineApply(rewriter, loc, m, subShapeSizes);
640     // Resulting size needs to be made half open interval again.
641     AffineExpr s0 = getAffineSymbolExpr(0, builder.getContext());
642     OpFoldResult size =
643         makeComposedFoldedAffineApply(rewriter, loc, s0 + 1, closedIntSize);
644     LLVM_DEBUG(llvm::dbgs()
645                << "computeSliceParameters: raw size: " << size << "\n");
646     LLVM_DEBUG(llvm::dbgs()
647                << "computeSliceParameters: new offset: " << offset << "\n");
648     sliceParams.strides.push_back(builder.getIndexAttr(1));
649 
650     if (omitPartialTileCheck) {
651       // We statically know that the partial/boundary tile condition is
652       // unnecessary.
653       LLVM_DEBUG(llvm::dbgs() << "makeTiledShape: new size: " << size << "\n");
654       sliceParams.sizes.push_back(size);
655       continue;
656     }
657 
658     // The size of the subview / extract_slice should be trimmed to avoid
659     // out-of-bounds accesses, unless:
660     // a. We statically know the subshape size divides the shape size evenly.
661     // b. The subshape size is 1. According to the way the loops are set up,
662     //    tensors with "0" dimensions would never be constructed.
663     int64_t shapeSize = shape[r];
664     std::optional<int64_t> sizeCst = getConstantIntValue(size);
665     auto hasTileSizeOne = sizeCst && *sizeCst == 1;
666     auto dividesEvenly = sizeCst && !ShapedType::isDynamic(shapeSize) &&
667                          ((shapeSize % *sizeCst) == 0);
668     if (!hasTileSizeOne && !dividesEvenly) {
669       LLVM_DEBUG(llvm::dbgs() << "makeTiledShape: shapeSize=" << shapeSize
670                               << ", size: " << size
671                               << ": make sure in bound with affine.min\n");
672 
673       AffineExpr dim0, dim1, dim2;
674       bindDims(builder.getContext(), dim0, dim1, dim2);
675 
676       // Get the dimension size for this dimension. We need to first calculate
677       // the max index and then plus one. This is important because for
678       // convolution ops, we have its input window dimension's affine map of the
679       // form `(d0 * s0 + d1)`, where `d0`/`d1 is an output/filter window
680       // dimension and `s0` is stride. Directly use the dimension size of
681       // output/filer window dimensions will cause incorrect calculation.
682       AffineMap minusOneMap =
683           AffineMap::inferFromExprList({ArrayRef<AffineExpr>{dim0 - 1}})
684               .front();
685       AffineMap plusOneMap =
686           AffineMap::inferFromExprList({ArrayRef<AffineExpr>{dim0 + 1}})
687               .front();
688       SmallVector<OpFoldResult> maxIndices =
689           llvm::to_vector(llvm::map_range(ubs, [&](OpFoldResult ub) {
690             return makeComposedFoldedAffineApply(rewriter, loc, minusOneMap,
691                                                  {ub});
692           }));
693       OpFoldResult maxIndex =
694           makeComposedFoldedAffineApply(rewriter, loc, m, maxIndices);
695       OpFoldResult d =
696           makeComposedFoldedAffineApply(rewriter, loc, plusOneMap, {maxIndex});
697 
698       // Compute min(dim - offset, size) to avoid out-of-bounds accesses.
699       AffineMap minMap = AffineMap::inferFromExprList(
700                              {ArrayRef<AffineExpr>{dim1 - dim2, dim0}})
701                              .front();
702       size =
703           makeComposedFoldedAffineMin(rewriter, loc, minMap, {size, d, offset});
704     }
705     LLVM_DEBUG(llvm::dbgs() << "makeTiledShape: new size: " << size << "\n");
706     sliceParams.sizes.push_back(size);
707   }
708   return sliceParams;
709 }
710 
711 SmallVector<OpFoldResult> computeTileOffsets(OpBuilder &b, Location loc,
712                                              ArrayRef<OpFoldResult> ivs,
713                                              ArrayRef<OpFoldResult> tileSizes) {
714   SmallVector<OpFoldResult> offsets;
715   for (unsigned idx = 0, idxIvs = 0, e = tileSizes.size(); idx < e; ++idx) {
716     LLVM_DEBUG(llvm::dbgs() << "makeTiledShapes: for loop#" << idx << "\n");
717     bool isTiled = !isZeroIndex(tileSizes[idx]);
718     offsets.push_back(isTiled ? ivs[idxIvs++] : b.getIndexAttr(0));
719     LLVM_DEBUG(llvm::dbgs()
720                << "computeTileOffsets: " << offsets.back() << "\n");
721   }
722   return offsets;
723 }
724 
725 SmallVector<OpFoldResult> computeTileSizes(OpBuilder &b, Location loc,
726                                            ArrayRef<OpFoldResult> tileSizes,
727                                            ArrayRef<OpFoldResult> sizeBounds) {
728   SmallVector<OpFoldResult> sizes;
729   for (unsigned idx = 0, e = tileSizes.size(); idx < e; ++idx) {
730     bool isTiled = !isZeroIndex(tileSizes[idx]);
731     // Before composing, we need to make range a closed interval.
732     OpFoldResult size = isTiled ? tileSizes[idx] : sizeBounds[idx];
733     AffineExpr d0 = getAffineDimExpr(0, b.getContext());
734     IRRewriter rewriter(b);
735     sizes.push_back(makeComposedFoldedAffineApply(rewriter, loc, d0 - 1, size));
736     LLVM_DEBUG(llvm::dbgs() << "computeTileSizes: " << sizes.back() << "\n");
737   }
738   return sizes;
739 }
740 
741 SmallVector<Type> getTensorOutputTypes(LinalgOp op, ValueRange operands) {
742   if (op.hasBufferSemantics())
743     return {};
744   return llvm::to_vector(
745       llvm::map_range(op.getDpsInitOperands(), [&](OpOperand *opOperand) {
746         return operands[opOperand->getOperandNumber()].getType();
747       }));
748 }
749 
750 SmallVector<Value> insertSlicesBack(OpBuilder &builder, Location loc,
751                                     LinalgOp op, ValueRange operands,
752                                     ValueRange results) {
753   if (op.hasBufferSemantics())
754     return {};
755   SmallVector<Value> tensorResults;
756   tensorResults.reserve(results.size());
757   // Insert a insert_slice for each output tensor.
758   unsigned resultIdx = 0;
759   for (OpOperand *opOperand : op.getDpsInitOperands()) {
760     // TODO: use an interface/adaptor to avoid leaking position in
761     // `tiledOperands`.
762     Value outputTensor = operands[opOperand->getOperandNumber()];
763     if (auto sliceOp = outputTensor.getDefiningOp<tensor::ExtractSliceOp>()) {
764       Value inserted = builder.create<tensor::InsertSliceOp>(
765           loc, sliceOp.getSource().getType(), results[resultIdx],
766           sliceOp.getSource(), sliceOp.getOffsets(), sliceOp.getSizes(),
767           sliceOp.getStrides(), sliceOp.getStaticOffsets(),
768           sliceOp.getStaticSizes(), sliceOp.getStaticStrides());
769       tensorResults.push_back(inserted);
770     } else {
771       tensorResults.push_back(results[resultIdx]);
772     }
773     ++resultIdx;
774   }
775   return tensorResults;
776 }
777 
778 SmallVector<std::optional<SliceParameters>>
779 computeAllSliceParameters(OpBuilder &builder, Location loc, LinalgOp linalgOp,
780                           ValueRange valuesToTile, ArrayRef<OpFoldResult> ivs,
781                           ArrayRef<OpFoldResult> tileSizes,
782                           ArrayRef<OpFoldResult> sizeBounds,
783                           bool omitPartialTileCheck) {
784   assert(ivs.size() == static_cast<size_t>(llvm::count_if(
785                            llvm::make_range(tileSizes.begin(), tileSizes.end()),
786                            [](OpFoldResult v) { return !isZeroIndex(v); })) &&
787          "expected as many ivs as non-zero sizes");
788 
789   // Construct (potentially temporary) mins and maxes on which to apply maps
790   // that define tile subshapes.
791   SmallVector<OpFoldResult> lbs =
792       computeTileOffsets(builder, loc, ivs, tileSizes);
793   SmallVector<OpFoldResult> subShapeSizes =
794       computeTileSizes(builder, loc, tileSizes, sizeBounds);
795 
796   assert(static_cast<int64_t>(valuesToTile.size()) <=
797              linalgOp->getNumOperands() &&
798          "more value to tile than operands.");
799   SmallVector<std::optional<SliceParameters>> allSliceParams;
800   allSliceParams.reserve(valuesToTile.size());
801   for (auto [opOperand, val] :
802        llvm::zip(linalgOp->getOpOperands(), valuesToTile)) {
803     Value shapedOp = val;
804     LLVM_DEBUG(llvm::dbgs() << "makeTiledShapes: for operand " << shapedOp);
805     AffineMap map = linalgOp.getMatchingIndexingMap(&opOperand);
806     // Use `opOperand` as is if it is not tiled and not an output tensor. Having
807     // an extract/insert slice pair for all output tensors simplifies follow up
808     // transformations such as padding and bufferization since the
809     // extract/insert slice pairs make the accessed iteration argument
810     // subdomains explicit.
811 
812     Type operandType = opOperand.get().getType();
813     if (!isTiled(map, tileSizes) && !(isa<RankedTensorType>(operandType) &&
814                                       linalgOp.isDpsInit(&opOperand))) {
815       allSliceParams.push_back(std::nullopt);
816       LLVM_DEBUG(llvm::dbgs()
817                  << ": not tiled: use shape: " << operandType << "\n");
818       continue;
819     }
820     LLVM_DEBUG(llvm::dbgs() << ": tiled: figure out subshape...\n");
821 
822     allSliceParams.push_back(computeSliceParameters(
823         builder, loc, shapedOp, tileSizes, map, lbs, sizeBounds, subShapeSizes,
824         omitPartialTileCheck));
825   }
826 
827   return allSliceParams;
828 }
829 
830 SmallVector<Value> makeTiledShapes(OpBuilder &builder, Location loc,
831                                    LinalgOp linalgOp, ValueRange valuesToTile,
832                                    ArrayRef<OpFoldResult> ivs,
833                                    ArrayRef<OpFoldResult> tileSizes,
834                                    ArrayRef<OpFoldResult> sizeBounds,
835                                    bool omitPartialTileCheck) {
836   SmallVector<std::optional<SliceParameters>> allSliceParameter =
837       computeAllSliceParameters(builder, loc, linalgOp, valuesToTile, ivs,
838                                 tileSizes, sizeBounds, omitPartialTileCheck);
839   SmallVector<Value> tiledShapes;
840   for (auto item : llvm::zip(valuesToTile, allSliceParameter)) {
841     Value valueToTile = std::get<0>(item);
842     std::optional<SliceParameters> sliceParams = std::get<1>(item);
843     tiledShapes.push_back(
844         sliceParams.has_value()
845             ? materializeTiledShape(builder, loc, valueToTile, *sliceParams)
846             : valueToTile);
847   }
848   return tiledShapes;
849 }
850 
851 void offsetIndices(OpBuilder &b, LinalgOp linalgOp,
852                    ArrayRef<OpFoldResult> offsets) {
853   IRRewriter rewriter(b);
854   offsetIndices(rewriter, linalgOp, offsets);
855 }
856 
857 void offsetIndices(RewriterBase &b, LinalgOp linalgOp,
858                    ArrayRef<OpFoldResult> offsets) {
859   if (!linalgOp.hasIndexSemantics())
860     return;
861 
862   for (IndexOp indexOp : linalgOp.getBlock()->getOps<IndexOp>()) {
863     if (indexOp.getDim() >= offsets.size() || !offsets[indexOp.getDim()])
864       continue;
865     OpBuilder::InsertionGuard guard(b);
866     b.setInsertionPointAfter(indexOp);
867     AffineExpr index, offset;
868     bindDims(b.getContext(), index, offset);
869     OpFoldResult applied = makeComposedFoldedAffineApply(
870         b, indexOp.getLoc(), index + offset,
871         {getAsOpFoldResult(indexOp.getResult()), offsets[indexOp.getDim()]});
872     Value materialized =
873         getValueOrCreateConstantIndexOp(b, indexOp.getLoc(), applied);
874     b.replaceOpWithIf(indexOp, materialized, [&](OpOperand &use) {
875       return use.getOwner() != materialized.getDefiningOp();
876     });
877   }
878 }
879 
880 /// Get the reassociation maps to fold the result of a extract_slice (or source
881 /// of a insert_slice) operation with given offsets, and sizes to its
882 /// rank-reduced version. This is only done for the cases where the size is 1
883 /// and offset is 0. Strictly speaking the offset 0 is not required in general,
884 /// but non-zero offsets are not handled by SPIR-V backend at this point (and
885 /// potentially cannot be handled).
886 std::optional<SmallVector<ReassociationIndices>>
887 getReassociationMapForFoldingUnitDims(ArrayRef<OpFoldResult> mixedSizes) {
888   SmallVector<ReassociationIndices> reassociation;
889   ReassociationIndices curr;
890   for (const auto &it : llvm::enumerate(mixedSizes)) {
891     auto dim = it.index();
892     auto size = it.value();
893     curr.push_back(dim);
894     auto attr = llvm::dyn_cast_if_present<Attribute>(size);
895     if (attr && cast<IntegerAttr>(attr).getInt() == 1)
896       continue;
897     reassociation.emplace_back(ReassociationIndices{});
898     std::swap(reassociation.back(), curr);
899   }
900   // When the reassociations are not empty, then fold the remaining
901   // unit-dimensions into the last dimension.  If the reassociations so far is
902   // empty, then leave it emtpy. This will fold everything to a rank-0 tensor.
903   if (!curr.empty() && !reassociation.empty())
904     reassociation.back().append(curr.begin(), curr.end());
905   return reassociation;
906 }
907 
908 } // namespace linalg
909 } // namespace mlir
910