1 //===- Promotion.cpp - Implementation of linalg Promotion -----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the linalg dialect Promotion pass. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "PassDetail.h" 14 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 15 #include "mlir/Dialect/Complex/IR/Complex.h" 16 #include "mlir/Dialect/Linalg/IR/Linalg.h" 17 #include "mlir/Dialect/Linalg/Passes.h" 18 #include "mlir/Dialect/Linalg/Transforms/Transforms.h" 19 #include "mlir/Dialect/Linalg/Utils/Utils.h" 20 #include "mlir/Dialect/SCF/IR/SCF.h" 21 #include "mlir/IR/AffineExpr.h" 22 #include "mlir/IR/AffineExprVisitor.h" 23 #include "mlir/IR/AffineMap.h" 24 #include "mlir/IR/ImplicitLocOpBuilder.h" 25 #include "mlir/Support/LLVM.h" 26 #include "mlir/Transforms/FoldUtils.h" 27 #include "llvm/ADT/MapVector.h" 28 #include "llvm/ADT/SmallBitVector.h" 29 #include "llvm/ADT/TypeSwitch.h" 30 #include "llvm/Support/CommandLine.h" 31 #include "llvm/Support/Debug.h" 32 33 using namespace mlir; 34 using namespace mlir::linalg; 35 using namespace mlir::scf; 36 37 using llvm::MapVector; 38 39 #define DEBUG_TYPE "linalg-promotion" 40 41 /// Alloc a new buffer of `size` * `width` i8; where `width` is given by the 42 /// data `layout` for `elementType`. 43 /// Use AllocOp or AllocaOp depending on `options`. 44 /// Take an optional alignment. 45 static Value allocBuffer(ImplicitLocOpBuilder &b, 46 const LinalgPromotionOptions &options, 47 Type elementType, Value allocSize, DataLayout &layout, 48 Optional<unsigned> alignment = None) { 49 auto width = layout.getTypeSize(elementType); 50 51 IntegerAttr alignmentAttr; 52 if (alignment.has_value()) 53 alignmentAttr = b.getI64IntegerAttr(alignment.value()); 54 55 // Static buffer. 56 if (auto cst = allocSize.getDefiningOp<arith::ConstantIndexOp>()) { 57 auto staticBufferType = 58 MemRefType::get(width * cst.value(), b.getIntegerType(8)); 59 if (options.useAlloca) { 60 return b.createOrFold<memref::AllocaOp>(staticBufferType, ValueRange{}, 61 alignmentAttr); 62 } 63 return b.createOrFold<memref::AllocOp>(staticBufferType, ValueRange{}, 64 alignmentAttr); 65 } 66 67 // Fallback dynamic buffer. 68 auto dynamicBufferType = MemRefType::get(-1, b.getIntegerType(8)); 69 Value mul = b.createOrFold<arith::MulIOp>( 70 b.create<arith::ConstantIndexOp>(width), allocSize); 71 if (options.useAlloca) 72 return b.create<memref::AllocaOp>(dynamicBufferType, mul, alignmentAttr); 73 return b.create<memref::AllocOp>(dynamicBufferType, mul, alignmentAttr); 74 } 75 76 /// Default allocation callback function. This allocates a promoted buffer when 77 /// no call back to do so is provided. The default is to allocate a 78 /// memref<..xi8> and return a view to get a memref type of shape 79 /// boundingSubViewSize. 80 static Optional<Value> 81 defaultAllocBufferCallBack(const LinalgPromotionOptions &options, 82 OpBuilder &builder, memref::SubViewOp subView, 83 ArrayRef<Value> boundingSubViewSize, 84 Optional<unsigned> alignment, DataLayout &layout) { 85 ShapedType viewType = subView.getType(); 86 ImplicitLocOpBuilder b(subView.getLoc(), builder); 87 auto zero = b.createOrFold<arith::ConstantIndexOp>(0); 88 auto one = b.createOrFold<arith::ConstantIndexOp>(1); 89 90 Value allocSize = one; 91 for (const auto &size : llvm::enumerate(boundingSubViewSize)) 92 allocSize = b.createOrFold<arith::MulIOp>(allocSize, size.value()); 93 Value buffer = allocBuffer(b, options, viewType.getElementType(), allocSize, 94 layout, alignment); 95 SmallVector<int64_t, 4> dynSizes(boundingSubViewSize.size(), 96 ShapedType::kDynamicSize); 97 Value view = b.createOrFold<memref::ViewOp>( 98 MemRefType::get(dynSizes, viewType.getElementType()), buffer, zero, 99 boundingSubViewSize); 100 return view; 101 } 102 103 /// Default implementation of deallocation of the buffer use for promotion. It 104 /// expects to get the same value that the default allocation method returned, 105 /// i.e. result of a ViewOp. 106 static LogicalResult 107 defaultDeallocBufferCallBack(const LinalgPromotionOptions &options, 108 OpBuilder &b, Value fullLocalView) { 109 if (!options.useAlloca) { 110 auto viewOp = cast<memref::ViewOp>(fullLocalView.getDefiningOp()); 111 b.create<memref::DeallocOp>(viewOp.getSource().getLoc(), 112 viewOp.getSource()); 113 } 114 return success(); 115 } 116 117 namespace { 118 119 /// Helper struct that captures the information required to apply the 120 /// transformation on each op. This bridges the abstraction gap with the 121 /// user-facing API which exposes positional arguments to control which operands 122 /// are promoted. 123 struct LinalgOpInstancePromotionOptions { 124 LinalgOpInstancePromotionOptions(LinalgOp op, 125 const LinalgPromotionOptions &options); 126 /// SubViews to promote. 127 MapVector<int64_t, Value> subViews; 128 /// True if the full view should be used for the promoted buffer. 129 DenseMap<Value, bool> useFullTileBuffers; 130 131 /// Callback functions for allocation and deallocation of promoted buffers, as 132 /// well as to copy the data into and out of these buffers. 133 AllocBufferCallbackFn allocationFn; 134 DeallocBufferCallbackFn deallocationFn; 135 CopyCallbackFn copyInFn; 136 CopyCallbackFn copyOutFn; 137 138 /// Alignment of promoted buffer. 139 Optional<unsigned> alignment; 140 }; 141 } // namespace 142 143 LinalgOpInstancePromotionOptions::LinalgOpInstancePromotionOptions( 144 LinalgOp linalgOp, const LinalgPromotionOptions &options) 145 : subViews(), alignment(options.alignment) { 146 assert(linalgOp.hasBufferSemantics() && "revisit usage of shaped operand"); 147 auto vUseFullTileBuffers = 148 options.useFullTileBuffers.value_or(llvm::SmallBitVector()); 149 vUseFullTileBuffers.resize(linalgOp.getNumInputsAndOutputs(), 150 options.useFullTileBuffersDefault); 151 152 for (OpOperand *opOperand : linalgOp.getInputAndOutputOperands()) { 153 int64_t operandNumber = opOperand->getOperandNumber(); 154 if (options.operandsToPromote && 155 !options.operandsToPromote->count(operandNumber)) 156 continue; 157 Operation *op = opOperand->get().getDefiningOp(); 158 if (auto sv = dyn_cast_or_null<memref::SubViewOp>(op)) { 159 subViews[operandNumber] = sv; 160 useFullTileBuffers[sv] = vUseFullTileBuffers[operandNumber]; 161 } 162 } 163 164 if (options.allocationFn) { 165 allocationFn = *options.allocationFn; 166 } else { 167 allocationFn = [&](OpBuilder &b, memref::SubViewOp subViewOp, 168 ArrayRef<Value> boundingSubViewSize, 169 DataLayout &layout) -> Optional<Value> { 170 return defaultAllocBufferCallBack(options, b, subViewOp, 171 boundingSubViewSize, alignment, layout); 172 }; 173 } 174 175 if (options.deallocationFn) { 176 deallocationFn = *options.deallocationFn; 177 } else { 178 deallocationFn = [&](OpBuilder &b, Value buffer) { 179 return defaultDeallocBufferCallBack(options, b, buffer); 180 }; 181 } 182 183 // Save the loc because `linalgOp` goes out of scope. 184 Location loc = linalgOp.getLoc(); 185 auto defaultCopyCallBack = [loc](OpBuilder &b, Value src, 186 Value dst) -> LogicalResult { 187 b.create<memref::CopyOp>(loc, src, dst); 188 return success(); 189 }; 190 copyInFn = (options.copyInFn ? *(options.copyInFn) : defaultCopyCallBack); 191 copyOutFn = (options.copyOutFn ? *(options.copyOutFn) : defaultCopyCallBack); 192 } 193 194 // Performs promotion of a `subView` into a local buffer of the size of the 195 // *ranges* of the `subView`. This produces a buffer whose size may be bigger 196 // than the actual size of the `subView` at the boundaries. 197 // This is related to the full/partial tile problem. 198 // Returns a PromotionInfo containing a `buffer`, `fullLocalView` and 199 // `partialLocalView` such that: 200 // * `buffer` is always the size of the full tile. 201 // * `fullLocalView` is a dense contiguous view into that buffer. 202 // * `partialLocalView` is a dense non-contiguous slice of `fullLocalView` 203 // that corresponds to the size of `subView` and accounting for boundary 204 // effects. 205 // The point of the full tile buffer is that constant static tile sizes are 206 // folded and result in a buffer type with statically known size and alignment 207 // properties. 208 // To account for general boundary effects, padding must be performed on the 209 // boundary tiles. For now this is done with an unconditional `fill` op followed 210 // by a partial `copy` op. 211 FailureOr<PromotionInfo> mlir::linalg::promoteSubviewAsNewBuffer( 212 OpBuilder &b, Location loc, memref::SubViewOp subView, 213 const AllocBufferCallbackFn &allocationFn, DataLayout &layout) { 214 auto viewType = subView.getType(); 215 auto rank = viewType.getRank(); 216 SmallVector<Value, 4> fullSizes; 217 SmallVector<OpFoldResult> partialSizes; 218 fullSizes.reserve(rank); 219 partialSizes.reserve(rank); 220 llvm::SmallBitVector droppedDims = subView.getDroppedDims(); 221 int64_t resultDimIdx = 0; 222 for (const auto &en : llvm::enumerate(subView.getOrCreateRanges(b, loc))) { 223 if (droppedDims[en.index()]) 224 continue; 225 auto rangeValue = en.value(); 226 // Try to extract a tight constant. If the size is known statically, no need 227 // to look for the bound. 228 LLVM_DEBUG(llvm::dbgs() << "Extract tightest: " << rangeValue.size << "\n"); 229 Value size; 230 if (auto attr = rangeValue.size.dyn_cast<Attribute>()) { 231 size = materializeOpFoldResult(b, loc, rangeValue.size); 232 } else { 233 Value materializedSize = materializeOpFoldResult(b, loc, rangeValue.size); 234 FailureOr<int64_t> upperBound = 235 getConstantUpperBoundForIndex(materializedSize); 236 size = failed(upperBound) 237 ? materializedSize 238 : b.create<arith::ConstantIndexOp>(loc, upperBound.value()); 239 } 240 LLVM_DEBUG(llvm::dbgs() << "Extracted tightest: " << size << "\n"); 241 fullSizes.push_back(size); 242 partialSizes.push_back( 243 b.createOrFold<memref::DimOp>(loc, subView, resultDimIdx++)); 244 } 245 SmallVector<int64_t, 4> dynSizes(fullSizes.size(), -1); 246 // If a callback is not specified, then use the default implementation for 247 // allocating the promoted buffer. 248 Optional<Value> fullLocalView = allocationFn(b, subView, fullSizes, layout); 249 if (!fullLocalView) 250 return failure(); 251 SmallVector<OpFoldResult, 4> zeros(fullSizes.size(), b.getIndexAttr(0)); 252 SmallVector<OpFoldResult, 4> ones(fullSizes.size(), b.getIndexAttr(1)); 253 auto partialLocalView = b.createOrFold<memref::SubViewOp>( 254 loc, *fullLocalView, zeros, partialSizes, ones); 255 return PromotionInfo{*fullLocalView, partialLocalView}; 256 } 257 258 static FailureOr<MapVector<int64_t, PromotionInfo>> 259 promoteSubViews(ImplicitLocOpBuilder &b, 260 LinalgOpInstancePromotionOptions options, DataLayout &layout) { 261 if (options.subViews.empty()) 262 return failure(); 263 264 MapVector<int64_t, PromotionInfo> promotionInfoMap; 265 266 for (auto v : options.subViews) { 267 memref::SubViewOp subView = 268 cast<memref::SubViewOp>(v.second.getDefiningOp()); 269 auto promotionInfo = promoteSubviewAsNewBuffer( 270 b, b.getLoc(), subView, options.allocationFn, layout); 271 if (failed(promotionInfo)) 272 return failure(); 273 promotionInfoMap[v.first] = *promotionInfo; 274 275 // Only fill the buffer if the full local view is used 276 if (!options.useFullTileBuffers[v.second]) 277 continue; 278 Type subviewEltType = subView.getType().getElementType(); 279 Value fillVal = 280 llvm::TypeSwitch<Type, Value>(subviewEltType) 281 .Case([&](FloatType t) { 282 return b.create<arith::ConstantOp>(FloatAttr::get(t, 0.0)); 283 }) 284 .Case([&](IntegerType t) { 285 return b.create<arith::ConstantOp>(IntegerAttr::get(t, 0)); 286 }) 287 .Case([&](ComplexType t) { 288 Value tmp; 289 if (auto et = t.getElementType().dyn_cast<FloatType>()) 290 tmp = b.create<arith::ConstantOp>(FloatAttr::get(et, 0.0)); 291 else if (auto et = t.getElementType().cast<IntegerType>()) 292 tmp = b.create<arith::ConstantOp>(IntegerAttr::get(et, 0)); 293 return b.create<complex::CreateOp>(t, tmp, tmp); 294 }) 295 .Default([](auto) { return Value(); }); 296 if (!fillVal) 297 return failure(); 298 b.create<linalg::FillOp>(fillVal, promotionInfo->fullLocalView); 299 } 300 301 // Copy data into the promoted buffers. Use callback if provided. 302 for (auto v : options.subViews) { 303 auto info = promotionInfoMap.find(v.first); 304 if (info == promotionInfoMap.end()) 305 continue; 306 if (failed(options.copyInFn( 307 b, cast<memref::SubViewOp>(v.second.getDefiningOp()), 308 info->second.partialLocalView))) 309 return failure(); 310 } 311 return promotionInfoMap; 312 } 313 314 static FailureOr<LinalgOp> 315 promoteSubViews(ImplicitLocOpBuilder &b, LinalgOp op, 316 LinalgOpInstancePromotionOptions options, DataLayout &layout) { 317 assert(op.hasBufferSemantics() && "expected linalg op with buffer semantics"); 318 319 // 1. Promote the specified views and use them in the new op. 320 auto promotedBuffersAndViews = promoteSubViews(b, options, layout); 321 if (failed(promotedBuffersAndViews) || 322 promotedBuffersAndViews->size() != options.subViews.size()) 323 return failure(); 324 325 // 2. Append all other operands as they appear, this enforces that such 326 // operands are not views. This is to support cases such as FillOp taking 327 // extra scalars etc. Keep a reference to output buffers; 328 SmallVector<Value, 8> opViews; 329 opViews.reserve(op.getNumInputsAndOutputs()); 330 SmallVector<std::pair<Value, Value>, 8> writebackViews; 331 writebackViews.reserve(promotedBuffersAndViews->size()); 332 for (OpOperand *opOperand : op.getInputAndOutputOperands()) { 333 int64_t operandNumber = opOperand->getOperandNumber(); 334 if (options.subViews.count(operandNumber) != 0) { 335 if (options.useFullTileBuffers[opOperand->get()]) 336 opViews.push_back( 337 (*promotedBuffersAndViews)[operandNumber].fullLocalView); 338 else 339 opViews.push_back( 340 (*promotedBuffersAndViews)[operandNumber].partialLocalView); 341 if (operandNumber >= op.getNumInputs()) 342 writebackViews.emplace_back(std::make_pair( 343 opOperand->get(), 344 (*promotedBuffersAndViews)[operandNumber].partialLocalView)); 345 } else { 346 opViews.push_back(opOperand->get()); 347 } 348 } 349 op->setOperands(0, opViews.size(), opViews); 350 351 OpBuilder::InsertionGuard guard(b); 352 b.setInsertionPointAfter(op); 353 // 3. Emit write-back for the promoted output views: copy the partial view. 354 for (auto viewAndPartialLocalView : writebackViews) { 355 if (failed(options.copyOutFn(b, viewAndPartialLocalView.second, 356 viewAndPartialLocalView.first))) 357 return failure(); 358 } 359 360 // 4. Dealloc all local buffers. 361 for (const auto &pi : *promotedBuffersAndViews) 362 (void)options.deallocationFn(b, pi.second.fullLocalView); 363 return op; 364 } 365 366 LogicalResult 367 mlir::linalg::promoteSubviewsPrecondition(Operation *op, 368 LinalgPromotionOptions options) { 369 LinalgOp linalgOp = dyn_cast<LinalgOp>(op); 370 // Transformation applies to buffers only. 371 if (!linalgOp || !linalgOp.hasBufferSemantics()) 372 return failure(); 373 // Check that at least one of the requested operands is indeed a subview. 374 for (OpOperand *opOperand : linalgOp.getInputAndOutputOperands()) { 375 auto sv = 376 isa_and_nonnull<memref::SubViewOp>(opOperand->get().getDefiningOp()); 377 if (sv) { 378 if (!options.operandsToPromote || 379 options.operandsToPromote->count(opOperand->getOperandNumber())) 380 return success(); 381 } 382 } 383 // TODO: Check all subviews requested are bound by a static constant. 384 // TODO: Check that the total footprint fits within a given size. 385 return failure(); 386 } 387 388 FailureOr<LinalgOp> 389 mlir::linalg::promoteSubViews(OpBuilder &builder, LinalgOp linalgOp, 390 const LinalgPromotionOptions &options) { 391 LinalgOpInstancePromotionOptions linalgOptions(linalgOp, options); 392 auto layout = DataLayout::closest(linalgOp); 393 ImplicitLocOpBuilder b(linalgOp.getLoc(), builder); 394 auto res = ::promoteSubViews(b, linalgOp, linalgOptions, layout); 395 if (failed(res)) 396 return failure(); 397 return res; 398 } 399