1 //===- LoopTiling.cpp --- Loop tiling pass ------------------------------*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements a pass to tile loop nests. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "PassDetail.h" 14 #include "mlir/Analysis/AffineAnalysis.h" 15 #include "mlir/Analysis/AffineStructures.h" 16 #include "mlir/Analysis/LoopAnalysis.h" 17 #include "mlir/Analysis/Utils.h" 18 #include "mlir/Dialect/Affine/IR/AffineOps.h" 19 #include "mlir/Dialect/Affine/IR/AffineValueMap.h" 20 #include "mlir/Dialect/Affine/Passes.h" 21 #include "mlir/IR/BlockAndValueMapping.h" 22 #include "mlir/IR/Builders.h" 23 #include "mlir/Transforms/LoopUtils.h" 24 #include "mlir/Transforms/Utils.h" 25 #include "llvm/Support/CommandLine.h" 26 #include "llvm/Support/Debug.h" 27 using namespace mlir; 28 29 #define DEBUG_TYPE "affine-loop-tile" 30 31 namespace { 32 33 /// A pass to perform loop tiling on all suitable loop nests of a Function. 34 struct LoopTiling : public AffineLoopTilingBase<LoopTiling> { 35 LoopTiling() = default; 36 explicit LoopTiling(uint64_t cacheSizeBytes, bool avoidMaxMinBounds = true) 37 : avoidMaxMinBounds(avoidMaxMinBounds) { 38 this->cacheSizeInKiB = cacheSizeBytes / 1024; 39 } 40 41 void runOnFunction() override; 42 void getTileSizes(ArrayRef<AffineForOp> band, 43 SmallVectorImpl<unsigned> *tileSizes); 44 45 // Default tile size if nothing is provided. 46 constexpr static unsigned kDefaultTileSize = 4; 47 48 // If true, tile sizes are set to avoid max/min in bounds if possible. 49 bool avoidMaxMinBounds = true; 50 }; 51 52 } // end anonymous namespace 53 54 /// Creates a pass to perform loop tiling on all suitable loop nests of a 55 /// Function. 56 std::unique_ptr<OperationPass<FuncOp>> 57 mlir::createLoopTilingPass(uint64_t cacheSizeBytes) { 58 return std::make_unique<LoopTiling>(cacheSizeBytes); 59 } 60 std::unique_ptr<OperationPass<FuncOp>> mlir::createLoopTilingPass() { 61 return std::make_unique<LoopTiling>(); 62 } 63 64 // Move the loop body of AffineForOp 'src' from 'src' into the specified 65 // location in destination's body, ignoring the terminator. 66 static inline void moveLoopBody(AffineForOp src, AffineForOp dest, 67 Block::iterator loc) { 68 auto &insts = src.getBody()->getOperations(); 69 dest.getBody()->getOperations().splice(loc, insts, insts.begin(), 70 std::prev(insts.end())); 71 } 72 73 // Move the loop body of AffineForOp 'src' from 'src' to the start of dest's 74 // body. 75 static inline void moveLoopBody(AffineForOp src, AffineForOp dest) { 76 moveLoopBody(src, dest, dest.getBody()->begin()); 77 } 78 79 /// Constructs and sets new loop bounds after tiling for the case of 80 /// hyper-rectangular index sets, where the bounds of one dimension do not 81 /// depend on other dimensions. Bounds of each dimension can thus be treated 82 /// independently, and deriving the new bounds is much simpler and faster 83 /// than for the case of tiling arbitrary polyhedral shapes. 84 static void 85 constructTiledIndexSetHyperRect(MutableArrayRef<AffineForOp> origLoops, 86 MutableArrayRef<AffineForOp> newLoops, 87 ArrayRef<unsigned> tileSizes) { 88 assert(!origLoops.empty()); 89 assert(origLoops.size() == tileSizes.size()); 90 91 OpBuilder b(origLoops[0].getOperation()); 92 unsigned width = origLoops.size(); 93 94 // Bounds for tile space loops. 95 for (unsigned i = 0; i < width; i++) { 96 OperandRange newLbOperands = origLoops[i].getLowerBoundOperands(); 97 OperandRange newUbOperands = origLoops[i].getUpperBoundOperands(); 98 newLoops[i].setLowerBound(newLbOperands, origLoops[i].getLowerBoundMap()); 99 newLoops[i].setUpperBound(newUbOperands, origLoops[i].getUpperBoundMap()); 100 newLoops[i].setStep(tileSizes[i]); 101 } 102 // Bounds for intra-tile loops. 103 for (unsigned i = 0; i < width; i++) { 104 int64_t largestDiv = getLargestDivisorOfTripCount(origLoops[i]); 105 auto mayBeConstantCount = getConstantTripCount(origLoops[i]); 106 // The lower bound is just the tile-space loop. 107 AffineMap lbMap = b.getDimIdentityMap(); 108 newLoops[width + i].setLowerBound( 109 /*operands=*/newLoops[i].getInductionVar(), lbMap); 110 111 // Set the upper bound. 112 if (mayBeConstantCount && mayBeConstantCount.getValue() < tileSizes[i]) { 113 // Trip count is less than the tile size: upper bound is lower bound + 114 // trip count. 115 auto ubMap = b.getSingleDimShiftAffineMap(mayBeConstantCount.getValue()); 116 newLoops[width + i].setUpperBound( 117 /*operands=*/newLoops[i].getInductionVar(), ubMap); 118 } else if (largestDiv % tileSizes[i] != 0) { 119 // Intra-tile loop ii goes from i to min(i + tileSize, ub_i). 120 // Construct the upper bound map; the operands are the original operands 121 // with 'i' (tile-space loop) appended to it. The new upper bound map is 122 // the original one with an additional expression i + tileSize appended. 123 124 // Add dim operands from original upper bound. 125 SmallVector<Value, 4> ubOperands; 126 auto ub = origLoops[i].getUpperBound(); 127 ubOperands.reserve(ub.getNumOperands() + 1); 128 auto origUbMap = ub.getMap(); 129 for (unsigned j = 0, e = origUbMap.getNumDims(); j < e; ++j) 130 ubOperands.push_back(ub.getOperand(j)); 131 132 // Add dim operand for new loop upper bound. 133 ubOperands.push_back(newLoops[i].getInductionVar()); 134 135 // Add symbol operands from original upper bound. 136 for (unsigned j = 0, e = origUbMap.getNumSymbols(); j < e; ++j) 137 ubOperands.push_back(ub.getOperand(origUbMap.getNumDims() + j)); 138 139 SmallVector<AffineExpr, 4> boundExprs; 140 boundExprs.reserve(1 + origUbMap.getNumResults()); 141 auto dim = b.getAffineDimExpr(origUbMap.getNumDims()); 142 // The new upper bound map is the original one with an additional 143 // expression i + tileSize appended. 144 boundExprs.push_back(dim + tileSizes[i]); 145 boundExprs.append(origUbMap.getResults().begin(), 146 origUbMap.getResults().end()); 147 auto ubMap = 148 AffineMap::get(origUbMap.getNumDims() + 1, origUbMap.getNumSymbols(), 149 boundExprs, b.getContext()); 150 newLoops[width + i].setUpperBound(/*operands=*/ubOperands, ubMap); 151 } else { 152 // No need of the min expression. 153 auto dim = b.getAffineDimExpr(0); 154 auto ubMap = AffineMap::get(1, 0, dim + tileSizes[i]); 155 newLoops[width + i].setUpperBound(newLoops[i].getInductionVar(), ubMap); 156 } 157 } 158 } 159 160 /// Tiles the specified band of perfectly nested loops creating tile-space loops 161 /// and intra-tile loops. A band is a contiguous set of loops. 162 // TODO: handle non hyper-rectangular spaces. 163 LogicalResult 164 mlir::tilePerfectlyNested(MutableArrayRef<AffineForOp> input, 165 ArrayRef<unsigned> tileSizes, 166 SmallVectorImpl<AffineForOp> *tiledNest) { 167 // Check if the supplied for op's are all successively nested. 168 assert(!input.empty() && "no loops in input band"); 169 assert(input.size() == tileSizes.size() && "Too few/many tile sizes"); 170 171 assert(isPerfectlyNested(input) && "input loops not perfectly nested"); 172 173 auto origLoops = input; 174 175 AffineForOp rootAffineForOp = origLoops[0]; 176 auto loc = rootAffineForOp.getLoc(); 177 // Note that width is at least one since band isn't empty. 178 unsigned width = input.size(); 179 180 SmallVector<AffineForOp, 6> tiledLoops(2 * width); 181 182 // The outermost among the loops as we add more.. 183 auto *topLoop = rootAffineForOp.getOperation(); 184 AffineForOp innermostPointLoop; 185 186 // Add intra-tile (or point) loops. 187 for (unsigned i = 0; i < width; i++) { 188 OpBuilder b(topLoop); 189 // Loop bounds will be set later. 190 auto pointLoop = b.create<AffineForOp>(loc, 0, 0); 191 pointLoop.getBody()->getOperations().splice( 192 pointLoop.getBody()->begin(), topLoop->getBlock()->getOperations(), 193 topLoop); 194 tiledLoops[2 * width - 1 - i] = pointLoop; 195 topLoop = pointLoop.getOperation(); 196 if (i == 0) 197 innermostPointLoop = pointLoop; 198 } 199 200 // Add tile space loops; 201 for (unsigned i = width; i < 2 * width; i++) { 202 OpBuilder b(topLoop); 203 // Loop bounds will be set later. 204 auto tileSpaceLoop = b.create<AffineForOp>(loc, 0, 0); 205 tileSpaceLoop.getBody()->getOperations().splice( 206 tileSpaceLoop.getBody()->begin(), topLoop->getBlock()->getOperations(), 207 topLoop); 208 tiledLoops[2 * width - i - 1] = tileSpaceLoop; 209 topLoop = tileSpaceLoop.getOperation(); 210 } 211 212 // Move the loop body of the original nest to the new one. 213 moveLoopBody(origLoops.back(), innermostPointLoop); 214 215 SmallVector<Value, 8> origLoopIVs; 216 extractForInductionVars(input, &origLoopIVs); 217 218 FlatAffineConstraints cst; 219 getIndexSet(input, &cst); 220 if (!cst.isHyperRectangular(0, width)) { 221 rootAffineForOp.emitError("tiled code generation unimplemented for the " 222 "non-hyperrectangular case"); 223 return failure(); 224 } 225 226 constructTiledIndexSetHyperRect(origLoops, tiledLoops, tileSizes); 227 228 // Replace original IVs with intra-tile loop IVs. 229 for (unsigned i = 0; i < width; i++) 230 origLoopIVs[i].replaceAllUsesWith(tiledLoops[i + width].getInductionVar()); 231 232 // Erase the old loop nest. 233 rootAffineForOp.erase(); 234 235 if (tiledNest) 236 *tiledNest = std::move(tiledLoops); 237 238 return success(); 239 } 240 241 // Identify valid and profitable bands of loops to tile. This is currently just 242 // a temporary placeholder to test the mechanics of tiled code generation. 243 // Returns all maximal outermost perfect loop nests to tile. 244 static void getTileableBands(FuncOp f, 245 std::vector<SmallVector<AffineForOp, 6>> *bands) { 246 // Get maximal perfect nest of 'affine.for' insts starting from root 247 // (inclusive). 248 auto getMaximalPerfectLoopNest = [&](AffineForOp root) { 249 SmallVector<AffineForOp, 6> band; 250 getPerfectlyNestedLoops(band, root); 251 bands->push_back(band); 252 }; 253 254 for (auto &block : f) 255 for (auto &op : block) 256 if (auto forOp = dyn_cast<AffineForOp>(op)) 257 getMaximalPerfectLoopNest(forOp); 258 } 259 260 /// Reduces each tile size to the largest divisor of the corresponding trip 261 /// count (if the trip count is known). 262 static void adjustToDivisorsOfTripCounts(ArrayRef<AffineForOp> band, 263 SmallVectorImpl<unsigned> *tileSizes) { 264 assert(band.size() == tileSizes->size() && "invalid tile size count"); 265 for (unsigned i = 0, e = band.size(); i < e; i++) { 266 unsigned &tSizeAdjusted = (*tileSizes)[i]; 267 auto mayConst = getConstantTripCount(band[i]); 268 if (!mayConst) 269 continue; 270 // Adjust the tile size to largest factor of the trip count less than 271 // tSize. 272 uint64_t constTripCount = mayConst.getValue(); 273 if (constTripCount > 1 && tSizeAdjusted > constTripCount / 2) 274 tSizeAdjusted = constTripCount / 2; 275 while (constTripCount % tSizeAdjusted != 0) 276 tSizeAdjusted--; 277 } 278 } 279 280 // Returns tile sizes to use. Checks CL options; if none are specified, sets it 281 // based on a simple model that looks at the memory footprint and determines 282 // tile sizes assuming identity accesses / 1:1 tile size proportional footprint 283 // along each of the dimensions being tiled. 284 // TODO: evolve this model. Tile size determination is a large area 285 // to play with in general. 286 void LoopTiling::getTileSizes(ArrayRef<AffineForOp> band, 287 SmallVectorImpl<unsigned> *tileSizes) { 288 if (band.empty()) 289 return; 290 291 // Use command-line tileSize for all loops if specified. 292 if (tileSize) { 293 tileSizes->assign(band.size(), tileSize); 294 return; 295 } 296 297 // Use tileSizes and fill them with default tile size if it's short. 298 if (!this->tileSizes.empty()) { 299 tileSizes->assign(this->tileSizes.begin(), this->tileSizes.end()); 300 tileSizes->resize(band.size(), kDefaultTileSize); 301 return; 302 } 303 tileSizes->resize(band.size()); 304 305 // The first loop in the band. 306 auto rootForOp = band[0]; 307 (void)rootForOp; 308 309 // Obtain memory footprint and set tile sizes so that a tile fits in 310 // the cache size. This is an approximation with the assumption that the 311 // footprint increases with the tile size linearly in that dimension (i.e., 312 // assumes one-to-one access function). 313 auto fp = getMemoryFootprintBytes(band[0], 0); 314 if (!fp) { 315 // Fill with default tile sizes if footprint is unknown. 316 std::fill(tileSizes->begin(), tileSizes->end(), 317 LoopTiling::kDefaultTileSize); 318 if (avoidMaxMinBounds) 319 adjustToDivisorsOfTripCounts(band, tileSizes); 320 LLVM_DEBUG( 321 rootForOp.emitWarning("memory footprint unknown: using default tile " 322 "sizes adjusted to trip count divisors")); 323 return; 324 } 325 326 // Check how many times larger the cache size is when compared to footprint. 327 uint64_t cacheSizeBytes = cacheSizeInKiB * 1024; 328 uint64_t excessFactor = llvm::divideCeil(fp.getValue(), cacheSizeBytes); 329 if (excessFactor <= 1) { 330 // No need of any tiling - set tile size to 1. 331 std::fill(tileSizes->begin(), tileSizes->end(), 1); 332 return; 333 } 334 335 // Divide all loops equally in an attempt to reduce footprint. 336 // TODO: this is approximate. Ideally, obtain reuse factor / 337 // profitability along each dimension and weight tile sizes based on that as 338 // one possible approach. Or compute a polynomial in tile sizes and solve for 339 // it. 340 341 // For an n-d tileable band, compute the n^th root of the excess. 342 unsigned tSize = 343 static_cast<unsigned>(floorl(std::pow(excessFactor, 1.0 / band.size()))); 344 // We'll keep a running product to determine the last tile size better. 345 unsigned cumulProductOfTileSizes = 1; 346 for (unsigned i = 0, e = band.size(); i < e; i++) { 347 if (i < e - 1) 348 (*tileSizes)[i] = tSize; 349 else 350 // Set last tile size to cover the balance. 351 (*tileSizes)[i] = std::max( 352 1U, static_cast<unsigned>(excessFactor / cumulProductOfTileSizes)); 353 cumulProductOfTileSizes *= (*tileSizes)[i]; 354 } 355 if (avoidMaxMinBounds) 356 adjustToDivisorsOfTripCounts(band, tileSizes); 357 } 358 359 void LoopTiling::runOnFunction() { 360 // Bands of loops to tile. 361 std::vector<SmallVector<AffineForOp, 6>> bands; 362 getTileableBands(getFunction(), &bands); 363 364 // Tile each band. 365 for (auto &band : bands) { 366 // Set up tile sizes; fill missing tile sizes at the end with default tile 367 // size or tileSize if one was provided. 368 SmallVector<unsigned, 6> tileSizes; 369 getTileSizes(band, &tileSizes); 370 if (llvm::DebugFlag) { 371 auto diag = band[0].emitRemark("using tile sizes ["); 372 for (auto tSize : tileSizes) 373 diag << tSize << ' '; 374 diag << "]\n"; 375 } 376 SmallVector<AffineForOp, 6> tiledNest; 377 if (failed(tilePerfectlyNested(band, tileSizes, &tiledNest))) 378 return signalPassFailure(); 379 380 // Separate full and partial tiles. 381 if (separate) { 382 auto intraTileLoops = 383 MutableArrayRef<AffineForOp>(tiledNest).drop_front(band.size()); 384 separateFullTiles(intraTileLoops); 385 } 386 } 387 } 388 389 constexpr unsigned LoopTiling::kDefaultTileSize; 390