1 //===- Promotion.cpp - Implementation of linalg Promotion -----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the linalg dialect Promotion pass. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "mlir/Dialect/Arith/IR/Arith.h" 14 #include "mlir/Dialect/Arith/Utils/Utils.h" 15 #include "mlir/Dialect/Complex/IR/Complex.h" 16 #include "mlir/Dialect/Func/IR/FuncOps.h" 17 #include "mlir/Dialect/GPU/IR/GPUDialect.h" 18 #include "mlir/Dialect/Linalg/IR/Linalg.h" 19 #include "mlir/Dialect/Linalg/Passes.h" 20 #include "mlir/Dialect/Linalg/Transforms/Transforms.h" 21 #include "mlir/Dialect/SCF/IR/SCF.h" 22 #include "mlir/IR/AffineExpr.h" 23 #include "mlir/IR/AffineExprVisitor.h" 24 #include "mlir/IR/AffineMap.h" 25 #include "mlir/IR/ImplicitLocOpBuilder.h" 26 #include "mlir/Interfaces/ValueBoundsOpInterface.h" 27 #include "mlir/Support/LLVM.h" 28 #include "mlir/Transforms/FoldUtils.h" 29 #include "llvm/ADT/MapVector.h" 30 #include "llvm/ADT/SmallBitVector.h" 31 #include "llvm/ADT/SmallSet.h" 32 #include "llvm/ADT/TypeSwitch.h" 33 #include "llvm/Support/CommandLine.h" 34 #include "llvm/Support/Debug.h" 35 36 using namespace mlir; 37 using namespace mlir::linalg; 38 using namespace mlir::scf; 39 40 using llvm::MapVector; 41 42 #define DEBUG_TYPE "linalg-promotion" 43 44 /// Alloc a new buffer of `size` * `width` i8; where `width` is given by the 45 /// data `layout` for `elementType`. 46 /// Use AllocOp or AllocaOp depending on `options`. 47 /// Take an optional alignment. 48 static Value allocBuffer(ImplicitLocOpBuilder &b, 49 const LinalgPromotionOptions &options, 50 Type elementType, Value allocSize, DataLayout &layout, 51 std::optional<unsigned> alignment = std::nullopt) { 52 llvm::TypeSize width = layout.getTypeSize(elementType); 53 assert(!width.isScalable() && "cannot allocate buffer for a scalable vector"); 54 55 IntegerAttr alignmentAttr; 56 if (alignment.has_value()) 57 alignmentAttr = b.getI64IntegerAttr(alignment.value()); 58 59 Attribute memorySpaceAttr; 60 if (options.memorySpace.has_value()) 61 memorySpaceAttr = *options.memorySpace; 62 63 // Static buffer. 64 if (std::optional<int64_t> cst = getConstantIntValue(allocSize)) { 65 auto staticBufferType = MemRefType::get(width.getFixedValue() * cst.value(), 66 b.getIntegerType(8)); 67 staticBufferType = 68 MemRefType::Builder(staticBufferType).setMemorySpace(memorySpaceAttr); 69 if (options.useAlloca) { 70 return b.create<memref::AllocaOp>(staticBufferType, ValueRange{}, 71 alignmentAttr); 72 } 73 return b.create<memref::AllocOp>(staticBufferType, ValueRange{}, 74 alignmentAttr); 75 } 76 77 // Fallback dynamic buffer. 78 auto dynamicBufferType = 79 MemRefType::get(ShapedType::kDynamic, b.getIntegerType(8)); 80 dynamicBufferType = 81 MemRefType::Builder(dynamicBufferType).setMemorySpace(memorySpaceAttr); 82 Value mul = b.createOrFold<arith::MulIOp>( 83 b.create<arith::ConstantIndexOp>(width), allocSize); 84 if (options.useAlloca) 85 return b.create<memref::AllocaOp>(dynamicBufferType, mul, alignmentAttr); 86 return b.create<memref::AllocOp>(dynamicBufferType, mul, alignmentAttr); 87 } 88 89 /// Default allocation callback function. This allocates a promoted buffer when 90 /// no call back to do so is provided. The default is to allocate a 91 /// memref<..xi8> and return a view to get a memref type of shape 92 /// boundingSubViewSize. 93 static std::optional<Value> defaultAllocBufferCallBack( 94 const LinalgPromotionOptions &options, OpBuilder &builder, 95 memref::SubViewOp subView, ArrayRef<Value> boundingSubViewSize, 96 std::optional<unsigned> alignment, DataLayout &layout) { 97 ShapedType viewType = subView.getType(); 98 ImplicitLocOpBuilder b(subView.getLoc(), builder); 99 auto zero = b.create<arith::ConstantIndexOp>(0); 100 auto one = b.create<arith::ConstantIndexOp>(1); 101 102 Attribute memorySpaceAttr; 103 if (options.memorySpace.has_value()) 104 memorySpaceAttr = *options.memorySpace; 105 106 Value allocSize = one; 107 for (const auto &size : llvm::enumerate(boundingSubViewSize)) 108 allocSize = b.createOrFold<arith::MulIOp>(allocSize, size.value()); 109 Value buffer = allocBuffer(b, options, viewType.getElementType(), allocSize, 110 layout, alignment); 111 SmallVector<int64_t, 4> dynSizes(boundingSubViewSize.size(), 112 ShapedType::kDynamic); 113 114 auto viewMemRefType = MemRefType::get(dynSizes, viewType.getElementType()); 115 viewMemRefType = 116 MemRefType::Builder(viewMemRefType).setMemorySpace(memorySpaceAttr); 117 Value view = b.createOrFold<memref::ViewOp>(viewMemRefType, buffer, zero, 118 boundingSubViewSize); 119 return view; 120 } 121 122 /// Default implementation of deallocation of the buffer use for promotion. It 123 /// expects to get the same value that the default allocation method returned, 124 /// i.e. result of a ViewOp. 125 static LogicalResult 126 defaultDeallocBufferCallBack(const LinalgPromotionOptions &options, 127 OpBuilder &b, Value fullLocalView) { 128 if (!options.useAlloca) { 129 auto viewOp = cast<memref::ViewOp>(fullLocalView.getDefiningOp()); 130 b.create<memref::DeallocOp>(viewOp.getSource().getLoc(), 131 viewOp.getSource()); 132 } 133 return success(); 134 } 135 136 namespace { 137 138 /// Helper struct that captures the information required to apply the 139 /// transformation on each op. This bridges the abstraction gap with the 140 /// user-facing API which exposes positional arguments to control which operands 141 /// are promoted. 142 struct LinalgOpInstancePromotionOptions { 143 LinalgOpInstancePromotionOptions(LinalgOp op, 144 const LinalgPromotionOptions &options); 145 /// SubViews to promote. 146 MapVector<int64_t, Value> subViews; 147 /// Subviews operand numbers to copy in using copyInFn. 148 llvm::SmallSet<int64_t, 4> operandsNumbersToCopyIn; 149 /// True if the full view should be used for the promoted buffer. 150 DenseMap<Value, bool> useFullTileBuffers; 151 152 /// Callback functions for allocation and deallocation of promoted buffers, as 153 /// well as to copy the data into and out of these buffers. 154 AllocBufferCallbackFn allocationFn; 155 DeallocBufferCallbackFn deallocationFn; 156 CopyCallbackFn copyInFn; 157 CopyCallbackFn copyOutFn; 158 159 /// Alignment of promoted buffer. 160 std::optional<unsigned> alignment; 161 }; 162 } // namespace 163 164 LinalgOpInstancePromotionOptions::LinalgOpInstancePromotionOptions( 165 LinalgOp linalgOp, const LinalgPromotionOptions &options) 166 : subViews(), alignment(options.alignment) { 167 assert(linalgOp.hasPureBufferSemantics() && 168 "revisit usage of shaped operand"); 169 auto vUseFullTileBuffers = 170 options.useFullTileBuffers.value_or(llvm::SmallBitVector()); 171 vUseFullTileBuffers.resize(linalgOp->getNumOperands(), 172 options.useFullTileBuffersDefault); 173 174 for (OpOperand &opOperand : linalgOp->getOpOperands()) { 175 int64_t operandNumber = opOperand.getOperandNumber(); 176 if (options.operandsToPromote && 177 !options.operandsToPromote->count(operandNumber)) 178 continue; 179 Operation *op = opOperand.get().getDefiningOp(); 180 if (auto sv = dyn_cast_or_null<memref::SubViewOp>(op)) { 181 subViews[operandNumber] = sv; 182 // In case of linalg generic, copy in only if subview is used in linalg 183 // payload. 184 if (!isa<linalg::GenericOp>(linalgOp) || 185 linalgOp.payloadUsesValueFromOperand(&opOperand)) 186 operandsNumbersToCopyIn.insert(operandNumber); 187 useFullTileBuffers[sv] = vUseFullTileBuffers[operandNumber]; 188 } 189 } 190 191 if (options.allocationFn) { 192 allocationFn = *options.allocationFn; 193 } else { 194 allocationFn = [&](OpBuilder &b, memref::SubViewOp subViewOp, 195 ArrayRef<Value> boundingSubViewSize, 196 DataLayout &layout) -> std::optional<Value> { 197 return defaultAllocBufferCallBack(options, b, subViewOp, 198 boundingSubViewSize, alignment, layout); 199 }; 200 } 201 202 if (options.deallocationFn) { 203 deallocationFn = *options.deallocationFn; 204 } else { 205 deallocationFn = [&](OpBuilder &b, Value buffer) { 206 return defaultDeallocBufferCallBack(options, b, buffer); 207 }; 208 } 209 210 // Save the loc because `linalgOp` goes out of scope. 211 Location loc = linalgOp.getLoc(); 212 auto defaultCopyCallBack = [loc](OpBuilder &b, Value src, 213 Value dst) -> LogicalResult { 214 b.create<linalg::CopyOp>(loc, src, dst); 215 return success(); 216 }; 217 copyInFn = (options.copyInFn ? *(options.copyInFn) : defaultCopyCallBack); 218 copyOutFn = (options.copyOutFn ? *(options.copyOutFn) : defaultCopyCallBack); 219 } 220 221 // Performs promotion of a `subView` into a local buffer of the size of the 222 // *ranges* of the `subView`. This produces a buffer whose size may be bigger 223 // than the actual size of the `subView` at the boundaries. 224 // This is related to the full/partial tile problem. 225 // Returns a PromotionInfo containing a `buffer`, `fullLocalView` and 226 // `partialLocalView` such that: 227 // * `buffer` is always the size of the full tile. 228 // * `fullLocalView` is a dense contiguous view into that buffer. 229 // * `partialLocalView` is a dense non-contiguous slice of `fullLocalView` 230 // that corresponds to the size of `subView` and accounting for boundary 231 // effects. 232 // The point of the full tile buffer is that constant static tile sizes are 233 // folded and result in a buffer type with statically known size and alignment 234 // properties. 235 // To account for general boundary effects, padding must be performed on the 236 // boundary tiles. For now this is done with an unconditional `fill` op followed 237 // by a partial `copy` op. 238 FailureOr<PromotionInfo> mlir::linalg::promoteSubviewAsNewBuffer( 239 OpBuilder &b, Location loc, memref::SubViewOp subView, 240 const AllocBufferCallbackFn &allocationFn, DataLayout &layout) { 241 auto viewType = subView.getType(); 242 auto rank = viewType.getRank(); 243 SmallVector<Value, 4> fullSizes; 244 SmallVector<OpFoldResult> partialSizes; 245 fullSizes.reserve(rank); 246 partialSizes.reserve(rank); 247 llvm::SmallBitVector droppedDims = subView.getDroppedDims(); 248 int64_t resultDimIdx = 0; 249 for (const auto &en : llvm::enumerate(subView.getOrCreateRanges(b, loc))) { 250 if (droppedDims[en.index()]) 251 continue; 252 auto rangeValue = en.value(); 253 // Try to extract a tight constant. If the size is known statically, no need 254 // to look for the bound. 255 LLVM_DEBUG(llvm::dbgs() << "Extract tightest: " << rangeValue.size << "\n"); 256 Value size; 257 if (auto attr = llvm::dyn_cast_if_present<Attribute>(rangeValue.size)) { 258 size = getValueOrCreateConstantIndexOp(b, loc, rangeValue.size); 259 } else { 260 Value materializedSize = 261 getValueOrCreateConstantIndexOp(b, loc, rangeValue.size); 262 FailureOr<int64_t> upperBound = 263 ValueBoundsConstraintSet::computeConstantBound( 264 presburger::BoundType::UB, materializedSize, /*dim=*/std::nullopt, 265 /*stopCondition=*/nullptr, /*closedUB=*/true); 266 size = failed(upperBound) 267 ? materializedSize 268 : b.create<arith::ConstantIndexOp>(loc, *upperBound); 269 } 270 LLVM_DEBUG(llvm::dbgs() << "Extracted tightest: " << size << "\n"); 271 fullSizes.push_back(size); 272 partialSizes.push_back( 273 b.createOrFold<memref::DimOp>(loc, subView, resultDimIdx++)); 274 } 275 SmallVector<int64_t, 4> dynSizes(fullSizes.size(), ShapedType::kDynamic); 276 // If a callback is not specified, then use the default implementation for 277 // allocating the promoted buffer. 278 std::optional<Value> fullLocalView = 279 allocationFn(b, subView, fullSizes, layout); 280 if (!fullLocalView) 281 return failure(); 282 SmallVector<OpFoldResult, 4> zeros(fullSizes.size(), b.getIndexAttr(0)); 283 SmallVector<OpFoldResult, 4> ones(fullSizes.size(), b.getIndexAttr(1)); 284 auto partialLocalView = b.createOrFold<memref::SubViewOp>( 285 loc, *fullLocalView, zeros, partialSizes, ones); 286 return PromotionInfo{*fullLocalView, partialLocalView}; 287 } 288 289 static FailureOr<MapVector<int64_t, PromotionInfo>> 290 promoteSubViews(ImplicitLocOpBuilder &b, 291 LinalgOpInstancePromotionOptions options, DataLayout &layout) { 292 if (options.subViews.empty()) 293 return failure(); 294 295 MapVector<int64_t, PromotionInfo> promotionInfoMap; 296 297 for (auto v : options.subViews) { 298 memref::SubViewOp subView = 299 cast<memref::SubViewOp>(v.second.getDefiningOp()); 300 auto promotionInfo = promoteSubviewAsNewBuffer( 301 b, b.getLoc(), subView, options.allocationFn, layout); 302 if (failed(promotionInfo)) 303 return failure(); 304 promotionInfoMap[v.first] = *promotionInfo; 305 306 // Only fill the buffer if the full local view is used 307 if (!options.useFullTileBuffers[v.second]) 308 continue; 309 Type subviewEltType = subView.getType().getElementType(); 310 Value fillVal = 311 llvm::TypeSwitch<Type, Value>(subviewEltType) 312 .Case([&](FloatType t) { 313 return b.create<arith::ConstantOp>(FloatAttr::get(t, 0.0)); 314 }) 315 .Case([&](IntegerType t) { 316 return b.create<arith::ConstantOp>(IntegerAttr::get(t, 0)); 317 }) 318 .Case([&](ComplexType t) { 319 Value tmp; 320 if (auto et = dyn_cast<FloatType>(t.getElementType())) 321 tmp = b.create<arith::ConstantOp>(FloatAttr::get(et, 0.0)); 322 else if (auto et = cast<IntegerType>(t.getElementType())) 323 tmp = b.create<arith::ConstantOp>(IntegerAttr::get(et, 0)); 324 return b.create<complex::CreateOp>(t, tmp, tmp); 325 }) 326 .Default([](auto) { return Value(); }); 327 if (!fillVal) 328 return failure(); 329 b.create<linalg::FillOp>(fillVal, promotionInfo->fullLocalView); 330 } 331 332 // Copy data into the promoted buffers. Use callback if provided. 333 for (auto v : options.subViews) { 334 auto *info = promotionInfoMap.find(v.first); 335 if (info == promotionInfoMap.end()) 336 continue; 337 if (options.operandsNumbersToCopyIn.count(v.first) == 0) 338 continue; 339 if (failed(options.copyInFn( 340 b, cast<memref::SubViewOp>(v.second.getDefiningOp()), 341 info->second.partialLocalView))) 342 return failure(); 343 } 344 return promotionInfoMap; 345 } 346 347 static FailureOr<LinalgOp> 348 promoteSubViews(ImplicitLocOpBuilder &b, LinalgOp op, 349 LinalgOpInstancePromotionOptions options, DataLayout &layout) { 350 assert(op.hasPureBufferSemantics() && 351 "expected linalg op with buffer semantics"); 352 353 // 1. Promote the specified views and use them in the new op. 354 auto promotedBuffersAndViews = promoteSubViews(b, options, layout); 355 if (failed(promotedBuffersAndViews) || 356 promotedBuffersAndViews->size() != options.subViews.size()) 357 return failure(); 358 359 // 2. Append all other operands as they appear, this enforces that such 360 // operands are not views. This is to support cases such as FillOp taking 361 // extra scalars etc. Keep a reference to output buffers; 362 SmallVector<Value, 8> opViews; 363 opViews.reserve(op->getNumOperands()); 364 SmallVector<std::pair<Value, Value>, 8> writebackViews; 365 writebackViews.reserve(promotedBuffersAndViews->size()); 366 for (OpOperand &opOperand : op->getOpOperands()) { 367 int64_t operandNumber = opOperand.getOperandNumber(); 368 if (options.subViews.count(operandNumber) != 0) { 369 if (options.useFullTileBuffers[opOperand.get()]) 370 opViews.push_back( 371 (*promotedBuffersAndViews)[operandNumber].fullLocalView); 372 else 373 opViews.push_back( 374 (*promotedBuffersAndViews)[operandNumber].partialLocalView); 375 if (operandNumber >= op.getNumDpsInputs()) 376 writebackViews.emplace_back(std::make_pair( 377 opOperand.get(), 378 (*promotedBuffersAndViews)[operandNumber].partialLocalView)); 379 } else { 380 opViews.push_back(opOperand.get()); 381 } 382 } 383 op->setOperands(0, opViews.size(), opViews); 384 385 OpBuilder::InsertionGuard guard(b); 386 b.setInsertionPointAfter(op); 387 // 3. Emit write-back for the promoted output views: copy the partial view. 388 for (auto viewAndPartialLocalView : writebackViews) { 389 if (failed(options.copyOutFn(b, viewAndPartialLocalView.second, 390 viewAndPartialLocalView.first))) 391 return failure(); 392 } 393 394 // 4. Dealloc all local buffers. 395 for (const auto &pi : *promotedBuffersAndViews) 396 (void)options.deallocationFn(b, pi.second.fullLocalView); 397 return op; 398 } 399 400 LogicalResult 401 mlir::linalg::promoteSubviewsPrecondition(Operation *op, 402 LinalgPromotionOptions options) { 403 LinalgOp linalgOp = dyn_cast<LinalgOp>(op); 404 // Transformation applies to buffers only. 405 if (!linalgOp || !linalgOp.hasPureBufferSemantics()) 406 return failure(); 407 // Check that at least one of the requested operands is indeed a subview. 408 for (OpOperand &opOperand : linalgOp->getOpOperands()) { 409 auto sv = 410 isa_and_nonnull<memref::SubViewOp>(opOperand.get().getDefiningOp()); 411 if (sv) { 412 if (!options.operandsToPromote || 413 options.operandsToPromote->count(opOperand.getOperandNumber())) 414 return success(); 415 } 416 } 417 // TODO: Check all subviews requested are bound by a static constant. 418 // TODO: Check that the total footprint fits within a given size. 419 return failure(); 420 } 421 422 FailureOr<LinalgOp> 423 mlir::linalg::promoteSubViews(OpBuilder &builder, LinalgOp linalgOp, 424 const LinalgPromotionOptions &options) { 425 LinalgOpInstancePromotionOptions linalgOptions(linalgOp, options); 426 auto layout = DataLayout::closest(linalgOp); 427 ImplicitLocOpBuilder b(linalgOp.getLoc(), builder); 428 auto res = ::promoteSubViews(b, linalgOp, linalgOptions, layout); 429 if (failed(res)) 430 return failure(); 431 return res; 432 } 433 434 /// Allocate the given subview to a memory address space in GPU by creating a 435 /// allocation operation and setting the memref type address space to desired 436 /// address space. 437 static std::optional<Value> allocateSubviewGPUMemoryInAddressSpace( 438 OpBuilder &builder, memref::SubViewOp subview, ArrayRef<Value> sizeBounds, 439 gpu::AddressSpace addressSpace) { 440 OpBuilder::InsertionGuard guard(builder); 441 442 func::FuncOp funcOp = subview->getParentOfType<func::FuncOp>(); 443 if (!funcOp) 444 return std::nullopt; 445 446 // The subview size bounds are expected to be constant; they specify the shape 447 // of the allocation. 448 SmallVector<int64_t> shape; 449 for (Value bound : sizeBounds) { 450 APInt value; 451 if (!matchPattern(bound, m_ConstantInt(&value))) 452 return std::nullopt; 453 shape.push_back(value.getSExtValue()); 454 } 455 456 builder.setInsertionPoint(&funcOp.front(), funcOp.front().begin()); 457 auto type = MemRefType::get( 458 shape, subview.getType().getElementType(), MemRefLayoutAttrInterface{}, 459 gpu::AddressSpaceAttr::get(builder.getContext(), addressSpace)); 460 Value buffer; 461 if (addressSpace == gpu::GPUDialect::getWorkgroupAddressSpace()) { 462 buffer = builder.create<memref::AllocOp>(funcOp.getLoc(), type); 463 } else if (addressSpace == gpu::GPUDialect::getPrivateAddressSpace()) { 464 buffer = builder.create<memref::AllocaOp>(funcOp.getLoc(), type); 465 } else { 466 return std::nullopt; 467 } 468 return buffer; 469 } 470 471 /// Allocate the subview in the GPU workgroup memory. 472 std::optional<Value> mlir::linalg::allocateWorkgroupMemory( 473 OpBuilder &builder, memref::SubViewOp subview, ArrayRef<Value> sizeBounds, 474 DataLayout &) { 475 return allocateSubviewGPUMemoryInAddressSpace( 476 builder, subview, sizeBounds, 477 gpu::GPUDialect::getWorkgroupAddressSpace()); 478 } 479 480 /// In case of GPU group memory there is no need to deallocate. 481 LogicalResult mlir::linalg::deallocateWorkgroupMemory(OpBuilder &, 482 Value /*buffer*/) { 483 return success(); 484 } 485 486 /// Create Memref copy operations and add gpu barrier guards before and after 487 /// the copy operation to ensure data integrity. 488 LogicalResult mlir::linalg::copyToWorkgroupMemory(OpBuilder &b, Value src, 489 Value dst) { 490 b.create<gpu::BarrierOp>(src.getLoc()); 491 Operation *copyOp = b.create<memref::CopyOp>(src.getLoc(), src, dst); 492 b.create<gpu::BarrierOp>(copyOp->getLoc()); 493 return success(); 494 } 495 496 /// Allocate the subview in the GPU private memory. 497 std::optional<Value> mlir::linalg::allocateGPUPrivateMemory( 498 OpBuilder &builder, memref::SubViewOp subview, ArrayRef<Value> sizeBounds, 499 DataLayout &) { 500 return allocateSubviewGPUMemoryInAddressSpace( 501 builder, subview, sizeBounds, gpu::GPUDialect::getPrivateAddressSpace()); 502 } 503 504 /// Normal copy to between src and dst. 505 LogicalResult mlir::linalg::copyToGPUPrivateMemory(OpBuilder &b, Value src, 506 Value dst) { 507 b.create<memref::CopyOp>(src.getLoc(), src, dst); 508 return success(); 509 } 510 511 /// In case of GPU private memory there is no need to deallocate since the 512 /// memory is freed when going outside of the scope. 513 LogicalResult mlir::linalg::deallocateGPUPrivateMemory(OpBuilder &, 514 Value /*buffer*/) { 515 return success(); 516 } 517