xref: /llvm-project/mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp (revision 754e09f9cef13fb07328169354e38dd86d94a699)
1 //===- LoopTiling.cpp --- Loop tiling pass ------------------------------*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a pass to tile loop nests.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PassDetail.h"
14 #include "mlir/Analysis/AffineAnalysis.h"
15 #include "mlir/Analysis/AffineStructures.h"
16 #include "mlir/Analysis/LoopAnalysis.h"
17 #include "mlir/Analysis/Utils.h"
18 #include "mlir/Dialect/Affine/IR/AffineOps.h"
19 #include "mlir/Dialect/Affine/IR/AffineValueMap.h"
20 #include "mlir/Dialect/Affine/Passes.h"
21 #include "mlir/IR/BlockAndValueMapping.h"
22 #include "mlir/IR/Builders.h"
23 #include "mlir/Transforms/LoopUtils.h"
24 #include "mlir/Transforms/Utils.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Support/Debug.h"
27 using namespace mlir;
28 
29 #define DEBUG_TYPE "affine-loop-tile"
30 
31 namespace {
32 
33 /// A pass to perform loop tiling on all suitable loop nests of a Function.
34 struct LoopTiling : public AffineLoopTilingBase<LoopTiling> {
35   LoopTiling() = default;
36   explicit LoopTiling(uint64_t cacheSizeBytes, bool avoidMaxMinBounds = true)
37       : avoidMaxMinBounds(avoidMaxMinBounds) {
38     this->cacheSizeInKiB = cacheSizeBytes / 1024;
39   }
40 
41   void runOnFunction() override;
42   void getTileSizes(ArrayRef<AffineForOp> band,
43                     SmallVectorImpl<unsigned> *tileSizes);
44 
45   // Default tile size if nothing is provided.
46   constexpr static unsigned kDefaultTileSize = 4;
47 
48   // If true, tile sizes are set to avoid max/min in bounds if possible.
49   bool avoidMaxMinBounds = true;
50 };
51 
52 } // end anonymous namespace
53 
54 /// Creates a pass to perform loop tiling on all suitable loop nests of a
55 /// Function.
56 std::unique_ptr<OperationPass<FuncOp>>
57 mlir::createLoopTilingPass(uint64_t cacheSizeBytes) {
58   return std::make_unique<LoopTiling>(cacheSizeBytes);
59 }
60 std::unique_ptr<OperationPass<FuncOp>> mlir::createLoopTilingPass() {
61   return std::make_unique<LoopTiling>();
62 }
63 
64 // Move the loop body of AffineForOp 'src' from 'src' into the specified
65 // location in destination's body, ignoring the terminator.
66 static inline void moveLoopBody(AffineForOp src, AffineForOp dest,
67                                 Block::iterator loc) {
68   auto &insts = src.getBody()->getOperations();
69   dest.getBody()->getOperations().splice(loc, insts, insts.begin(),
70                                          std::prev(insts.end()));
71 }
72 
73 // Move the loop body of AffineForOp 'src' from 'src' to the start of dest's
74 // body.
75 static inline void moveLoopBody(AffineForOp src, AffineForOp dest) {
76   moveLoopBody(src, dest, dest.getBody()->begin());
77 }
78 
79 /// Constructs and sets new loop bounds after tiling for the case of
80 /// hyper-rectangular index sets, where the bounds of one dimension do not
81 /// depend on other dimensions. Bounds of each dimension can thus be treated
82 /// independently, and deriving the new bounds is much simpler and faster
83 /// than for the case of tiling arbitrary polyhedral shapes.
84 static void
85 constructTiledIndexSetHyperRect(MutableArrayRef<AffineForOp> origLoops,
86                                 MutableArrayRef<AffineForOp> newLoops,
87                                 ArrayRef<unsigned> tileSizes) {
88   assert(!origLoops.empty());
89   assert(origLoops.size() == tileSizes.size());
90 
91   OpBuilder b(origLoops[0].getOperation());
92   unsigned width = origLoops.size();
93 
94   // Bounds for tile space loops.
95   for (unsigned i = 0; i < width; i++) {
96     OperandRange newLbOperands = origLoops[i].getLowerBoundOperands();
97     OperandRange newUbOperands = origLoops[i].getUpperBoundOperands();
98     newLoops[i].setLowerBound(newLbOperands, origLoops[i].getLowerBoundMap());
99     newLoops[i].setUpperBound(newUbOperands, origLoops[i].getUpperBoundMap());
100     newLoops[i].setStep(tileSizes[i]);
101   }
102   // Bounds for intra-tile loops.
103   for (unsigned i = 0; i < width; i++) {
104     int64_t largestDiv = getLargestDivisorOfTripCount(origLoops[i]);
105     auto mayBeConstantCount = getConstantTripCount(origLoops[i]);
106     // The lower bound is just the tile-space loop.
107     AffineMap lbMap = b.getDimIdentityMap();
108     newLoops[width + i].setLowerBound(
109         /*operands=*/newLoops[i].getInductionVar(), lbMap);
110 
111     // Set the upper bound.
112     if (mayBeConstantCount && mayBeConstantCount.getValue() < tileSizes[i]) {
113       // Trip count is less than the tile size: upper bound is lower bound +
114       // trip count.
115       auto ubMap = b.getSingleDimShiftAffineMap(mayBeConstantCount.getValue());
116       newLoops[width + i].setUpperBound(
117           /*operands=*/newLoops[i].getInductionVar(), ubMap);
118     } else if (largestDiv % tileSizes[i] != 0) {
119       // Intra-tile loop ii goes from i to min(i + tileSize, ub_i).
120       // Construct the upper bound map; the operands are the original operands
121       // with 'i' (tile-space loop) appended to it. The new upper bound map is
122       // the original one with an additional expression i + tileSize appended.
123 
124       // Add dim operands from original upper bound.
125       SmallVector<Value, 4> ubOperands;
126       auto ub = origLoops[i].getUpperBound();
127       ubOperands.reserve(ub.getNumOperands() + 1);
128       auto origUbMap = ub.getMap();
129       for (unsigned j = 0, e = origUbMap.getNumDims(); j < e; ++j)
130         ubOperands.push_back(ub.getOperand(j));
131 
132       // Add dim operand for new loop upper bound.
133       ubOperands.push_back(newLoops[i].getInductionVar());
134 
135       // Add symbol operands from original upper bound.
136       for (unsigned j = 0, e = origUbMap.getNumSymbols(); j < e; ++j)
137         ubOperands.push_back(ub.getOperand(origUbMap.getNumDims() + j));
138 
139       SmallVector<AffineExpr, 4> boundExprs;
140       boundExprs.reserve(1 + origUbMap.getNumResults());
141       auto dim = b.getAffineDimExpr(origUbMap.getNumDims());
142       // The new upper bound map is the original one with an additional
143       // expression i + tileSize appended.
144       boundExprs.push_back(dim + tileSizes[i]);
145       boundExprs.append(origUbMap.getResults().begin(),
146                         origUbMap.getResults().end());
147       auto ubMap =
148           AffineMap::get(origUbMap.getNumDims() + 1, origUbMap.getNumSymbols(),
149                          boundExprs, b.getContext());
150       newLoops[width + i].setUpperBound(/*operands=*/ubOperands, ubMap);
151     } else {
152       // No need of the min expression.
153       auto dim = b.getAffineDimExpr(0);
154       auto ubMap = AffineMap::get(1, 0, dim + tileSizes[i]);
155       newLoops[width + i].setUpperBound(newLoops[i].getInductionVar(), ubMap);
156     }
157   }
158 }
159 
160 /// This function checks whether hyper-rectangular loop tiling of the nest
161 /// represented by `origLoops` is valid. The validity condition is from Irigoin
162 /// and Triolet, which states that two tiles cannot depend on each other. We
163 /// simplify such condition to just checking whether there is any negative
164 /// dependence direction, since we have the prior knowledge that the tiling
165 /// results will be hyper-rectangles, which are scheduled in the
166 /// lexicographically increasing order on the vector of loop indices. This
167 /// function will return failure when any dependence component is negative along
168 /// any of `origLoops`.
169 static LogicalResult
170 checkTilingLegality(MutableArrayRef<mlir::AffineForOp> origLoops,
171                     ArrayRef<unsigned> tileSizes) {
172   assert(!origLoops.empty() && "no original loops provided");
173 
174   // We first find out all dependences we intend to check.
175   SmallVector<Operation *, 8> loadAndStoreOps;
176   origLoops[0].getOperation()->walk([&](Operation *op) {
177     if (isa<AffineReadOpInterface, AffineWriteOpInterface>(op))
178       loadAndStoreOps.push_back(op);
179   });
180 
181   unsigned numOps = loadAndStoreOps.size();
182   unsigned numLoops = origLoops.size();
183   FlatAffineConstraints dependenceConstraints;
184   for (unsigned d = 1; d <= numLoops + 1; ++d) {
185     for (unsigned i = 0; i < numOps; ++i) {
186       Operation *srcOp = loadAndStoreOps[i];
187       MemRefAccess srcAccess(srcOp);
188       for (unsigned j = 0; j < numOps; ++j) {
189         Operation *dstOp = loadAndStoreOps[j];
190         MemRefAccess dstAccess(dstOp);
191 
192         SmallVector<DependenceComponent, 2> depComps;
193         dependenceConstraints.reset();
194         DependenceResult result = checkMemrefAccessDependence(
195             srcAccess, dstAccess, d, &dependenceConstraints, &depComps);
196 
197         // Skip if there is no dependence in this case.
198         if (!hasDependence(result))
199           continue;
200 
201         // Check whether there is any negative direction vector in the
202         // dependence components found above, which means that dependence is
203         // violated by the default hyper-rect tiling method.
204         LLVM_DEBUG(llvm::dbgs() << "Checking whether tiling legality violated "
205                                    "for dependence at depth: "
206                                 << Twine(d) << " between:\n";);
207         LLVM_DEBUG(srcAccess.opInst->dump(););
208         LLVM_DEBUG(dstAccess.opInst->dump(););
209         for (unsigned k = 0, e = depComps.size(); k < e; k++) {
210           DependenceComponent depComp = depComps[k];
211           if (depComp.lb.hasValue() && depComp.ub.hasValue() &&
212               depComp.lb.getValue() < depComp.ub.getValue() &&
213               depComp.ub.getValue() < 0) {
214             LLVM_DEBUG(llvm::dbgs()
215                        << "Dependence component lb = "
216                        << Twine(depComp.lb.getValue())
217                        << " ub = " << Twine(depComp.ub.getValue())
218                        << " is negative  at depth: " << Twine(d)
219                        << " and thus violates the legality rule.\n");
220             return failure();
221           }
222         }
223       }
224     }
225   }
226 
227   return success();
228 }
229 /// Tiles the specified band of perfectly nested loops creating tile-space loops
230 /// and intra-tile loops. A band is a contiguous set of loops.
231 //  TODO: handle non hyper-rectangular spaces.
232 LogicalResult
233 mlir::tilePerfectlyNested(MutableArrayRef<AffineForOp> input,
234                           ArrayRef<unsigned> tileSizes,
235                           SmallVectorImpl<AffineForOp> *tiledNest) {
236   // Check if the supplied for op's are all successively nested.
237   assert(!input.empty() && "no loops in input band");
238   assert(input.size() == tileSizes.size() && "Too few/many tile sizes");
239 
240   assert(isPerfectlyNested(input) && "input loops not perfectly nested");
241 
242   auto origLoops = input;
243 
244   // Perform tiling legality test.
245   if (failed(checkTilingLegality(origLoops, tileSizes)))
246     origLoops[0].emitRemark("tiled code is illegal due to dependences");
247 
248   AffineForOp rootAffineForOp = origLoops[0];
249   auto loc = rootAffineForOp.getLoc();
250   // Note that width is at least one since band isn't empty.
251   unsigned width = input.size();
252 
253   SmallVector<AffineForOp, 6> tiledLoops(2 * width);
254 
255   // The outermost among the loops as we add more..
256   auto *topLoop = rootAffineForOp.getOperation();
257   AffineForOp innermostPointLoop;
258 
259   // Add intra-tile (or point) loops.
260   for (unsigned i = 0; i < width; i++) {
261     OpBuilder b(topLoop);
262     // Loop bounds will be set later.
263     auto pointLoop = b.create<AffineForOp>(loc, 0, 0);
264     pointLoop.getBody()->getOperations().splice(
265         pointLoop.getBody()->begin(), topLoop->getBlock()->getOperations(),
266         topLoop);
267     tiledLoops[2 * width - 1 - i] = pointLoop;
268     topLoop = pointLoop.getOperation();
269     if (i == 0)
270       innermostPointLoop = pointLoop;
271   }
272 
273   // Add tile space loops;
274   for (unsigned i = width; i < 2 * width; i++) {
275     OpBuilder b(topLoop);
276     // Loop bounds will be set later.
277     auto tileSpaceLoop = b.create<AffineForOp>(loc, 0, 0);
278     tileSpaceLoop.getBody()->getOperations().splice(
279         tileSpaceLoop.getBody()->begin(), topLoop->getBlock()->getOperations(),
280         topLoop);
281     tiledLoops[2 * width - i - 1] = tileSpaceLoop;
282     topLoop = tileSpaceLoop.getOperation();
283   }
284 
285   // Move the loop body of the original nest to the new one.
286   moveLoopBody(origLoops.back(), innermostPointLoop);
287 
288   SmallVector<Value, 8> origLoopIVs;
289   extractForInductionVars(input, &origLoopIVs);
290 
291   FlatAffineConstraints cst;
292   getIndexSet(input, &cst);
293   if (!cst.isHyperRectangular(0, width)) {
294     rootAffineForOp.emitError("tiled code generation unimplemented for the "
295                               "non-hyperrectangular case");
296     return failure();
297   }
298 
299   constructTiledIndexSetHyperRect(origLoops, tiledLoops, tileSizes);
300 
301   // Replace original IVs with intra-tile loop IVs.
302   for (unsigned i = 0; i < width; i++)
303     origLoopIVs[i].replaceAllUsesWith(tiledLoops[i + width].getInductionVar());
304 
305   // Erase the old loop nest.
306   rootAffineForOp.erase();
307 
308   if (tiledNest)
309     *tiledNest = std::move(tiledLoops);
310 
311   return success();
312 }
313 
314 // Identify valid and profitable bands of loops to tile. This is currently just
315 // a temporary placeholder to test the mechanics of tiled code generation.
316 // Returns all maximal outermost perfect loop nests to tile.
317 static void getTileableBands(FuncOp f,
318                              std::vector<SmallVector<AffineForOp, 6>> *bands) {
319   // Get maximal perfect nest of 'affine.for' insts starting from root
320   // (inclusive).
321   auto getMaximalPerfectLoopNest = [&](AffineForOp root) {
322     SmallVector<AffineForOp, 6> band;
323     getPerfectlyNestedLoops(band, root);
324     bands->push_back(band);
325   };
326 
327   for (auto &block : f)
328     for (auto &op : block)
329       if (auto forOp = dyn_cast<AffineForOp>(op))
330         getMaximalPerfectLoopNest(forOp);
331 }
332 
333 /// Reduces each tile size to the largest divisor of the corresponding trip
334 /// count (if the trip count is known).
335 static void adjustToDivisorsOfTripCounts(ArrayRef<AffineForOp> band,
336                                          SmallVectorImpl<unsigned> *tileSizes) {
337   assert(band.size() == tileSizes->size() && "invalid tile size count");
338   for (unsigned i = 0, e = band.size(); i < e; i++) {
339     unsigned &tSizeAdjusted = (*tileSizes)[i];
340     auto mayConst = getConstantTripCount(band[i]);
341     if (!mayConst)
342       continue;
343     // Adjust the tile size to largest factor of the trip count less than
344     // tSize.
345     uint64_t constTripCount = mayConst.getValue();
346     if (constTripCount > 1 && tSizeAdjusted > constTripCount / 2)
347       tSizeAdjusted = constTripCount / 2;
348     while (constTripCount % tSizeAdjusted != 0)
349       tSizeAdjusted--;
350   }
351 }
352 
353 // Returns tile sizes to use. Checks CL options; if none are specified, sets it
354 // based on a simple model that looks at the memory footprint and determines
355 // tile sizes assuming identity accesses / 1:1 tile size proportional footprint
356 // along each of the dimensions being tiled.
357 // TODO: evolve this model. Tile size determination is a large area
358 // to play with in general.
359 void LoopTiling::getTileSizes(ArrayRef<AffineForOp> band,
360                               SmallVectorImpl<unsigned> *tileSizes) {
361   if (band.empty())
362     return;
363 
364   // Use command-line tileSize for all loops if specified.
365   if (tileSize) {
366     tileSizes->assign(band.size(), tileSize);
367     return;
368   }
369 
370   // Use tileSizes and fill them with default tile size if it's short.
371   if (!this->tileSizes.empty()) {
372     tileSizes->assign(this->tileSizes.begin(), this->tileSizes.end());
373     tileSizes->resize(band.size(), kDefaultTileSize);
374     return;
375   }
376   tileSizes->resize(band.size());
377 
378   // The first loop in the band.
379   auto rootForOp = band[0];
380   (void)rootForOp;
381 
382   // Obtain memory footprint and set tile sizes so that a tile fits in
383   // the cache size. This is an approximation with the assumption that the
384   // footprint increases with the tile size linearly in that dimension (i.e.,
385   // assumes one-to-one access function).
386   auto fp = getMemoryFootprintBytes(band[0], 0);
387   if (!fp) {
388     // Fill with default tile sizes if footprint is unknown.
389     std::fill(tileSizes->begin(), tileSizes->end(),
390               LoopTiling::kDefaultTileSize);
391     if (avoidMaxMinBounds)
392       adjustToDivisorsOfTripCounts(band, tileSizes);
393     LLVM_DEBUG(
394         rootForOp.emitWarning("memory footprint unknown: using default tile "
395                               "sizes adjusted to trip count divisors"));
396     return;
397   }
398 
399   // Check how many times larger the cache size is when compared to footprint.
400   uint64_t cacheSizeBytes = cacheSizeInKiB * 1024;
401   uint64_t excessFactor = llvm::divideCeil(fp.getValue(), cacheSizeBytes);
402   if (excessFactor <= 1) {
403     // No need of any tiling - set tile size to 1.
404     std::fill(tileSizes->begin(), tileSizes->end(), 1);
405     return;
406   }
407 
408   // Divide all loops equally in an attempt to reduce footprint.
409   // TODO: this is approximate. Ideally, obtain reuse factor /
410   // profitability along each dimension and weight tile sizes based on that as
411   // one possible approach. Or compute a polynomial in tile sizes and solve for
412   // it.
413 
414   // For an n-d tileable band, compute the n^th root of the excess.
415   unsigned tSize =
416       static_cast<unsigned>(floorl(std::pow(excessFactor, 1.0 / band.size())));
417   // We'll keep a running product to determine the last tile size better.
418   unsigned cumulProductOfTileSizes = 1;
419   for (unsigned i = 0, e = band.size(); i < e; i++) {
420     if (i < e - 1)
421       (*tileSizes)[i] = tSize;
422     else
423       // Set last tile size to cover the balance.
424       (*tileSizes)[i] = std::max(
425           1U, static_cast<unsigned>(excessFactor / cumulProductOfTileSizes));
426     cumulProductOfTileSizes *= (*tileSizes)[i];
427   }
428   if (avoidMaxMinBounds)
429     adjustToDivisorsOfTripCounts(band, tileSizes);
430 }
431 
432 void LoopTiling::runOnFunction() {
433   // Bands of loops to tile.
434   std::vector<SmallVector<AffineForOp, 6>> bands;
435   getTileableBands(getFunction(), &bands);
436 
437   // Tile each band.
438   for (auto &band : bands) {
439     // Set up tile sizes; fill missing tile sizes at the end with default tile
440     // size or tileSize if one was provided.
441     SmallVector<unsigned, 6> tileSizes;
442     getTileSizes(band, &tileSizes);
443     if (llvm::DebugFlag) {
444       auto diag = band[0].emitRemark("using tile sizes [");
445       for (auto tSize : tileSizes)
446         diag << tSize << ' ';
447       diag << "]\n";
448     }
449     SmallVector<AffineForOp, 6> tiledNest;
450     if (failed(tilePerfectlyNested(band, tileSizes, &tiledNest)))
451       return signalPassFailure();
452 
453     // Separate full and partial tiles.
454     if (separate) {
455       auto intraTileLoops =
456           MutableArrayRef<AffineForOp>(tiledNest).drop_front(band.size());
457       separateFullTiles(intraTileLoops);
458     }
459   }
460 }
461 
462 constexpr unsigned LoopTiling::kDefaultTileSize;
463