xref: /llvm-project/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp (revision b23c8225e8f914d9b0fe987c443eb19fca05344e)
1 //===- Promotion.cpp - Implementation of linalg Promotion -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the linalg dialect Promotion pass.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "mlir/Dialect/Arith/IR/Arith.h"
14 #include "mlir/Dialect/Arith/Utils/Utils.h"
15 #include "mlir/Dialect/Complex/IR/Complex.h"
16 #include "mlir/Dialect/Func/IR/FuncOps.h"
17 #include "mlir/Dialect/GPU/IR/GPUDialect.h"
18 #include "mlir/Dialect/Linalg/IR/Linalg.h"
19 #include "mlir/Dialect/Linalg/Passes.h"
20 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
21 #include "mlir/Dialect/SCF/IR/SCF.h"
22 #include "mlir/IR/AffineExpr.h"
23 #include "mlir/IR/AffineExprVisitor.h"
24 #include "mlir/IR/AffineMap.h"
25 #include "mlir/IR/ImplicitLocOpBuilder.h"
26 #include "mlir/Interfaces/ValueBoundsOpInterface.h"
27 #include "mlir/Support/LLVM.h"
28 #include "mlir/Transforms/FoldUtils.h"
29 #include "llvm/ADT/MapVector.h"
30 #include "llvm/ADT/SmallBitVector.h"
31 #include "llvm/ADT/TypeSwitch.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/Debug.h"
34 
35 using namespace mlir;
36 using namespace mlir::linalg;
37 using namespace mlir::scf;
38 
39 using llvm::MapVector;
40 
41 #define DEBUG_TYPE "linalg-promotion"
42 
43 /// Alloc a new buffer of `size` * `width` i8; where `width` is given by the
44 /// data `layout` for `elementType`.
45 /// Use AllocOp or AllocaOp depending on `options`.
46 /// Take an optional alignment.
47 static Value allocBuffer(ImplicitLocOpBuilder &b,
48                          const LinalgPromotionOptions &options,
49                          Type elementType, Value allocSize, DataLayout &layout,
50                          std::optional<unsigned> alignment = std::nullopt) {
51   auto width = layout.getTypeSize(elementType);
52 
53   IntegerAttr alignmentAttr;
54   if (alignment.has_value())
55     alignmentAttr = b.getI64IntegerAttr(alignment.value());
56 
57   // Static buffer.
58   if (auto cst = allocSize.getDefiningOp<arith::ConstantIndexOp>()) {
59     auto staticBufferType =
60         MemRefType::get(width * cst.value(), b.getIntegerType(8));
61     if (options.useAlloca) {
62       return b.create<memref::AllocaOp>(staticBufferType, ValueRange{},
63                                         alignmentAttr);
64     }
65     return b.create<memref::AllocOp>(staticBufferType, ValueRange{},
66                                      alignmentAttr);
67   }
68 
69   // Fallback dynamic buffer.
70   auto dynamicBufferType =
71       MemRefType::get(ShapedType::kDynamic, b.getIntegerType(8));
72   Value mul = b.createOrFold<arith::MulIOp>(
73       b.create<arith::ConstantIndexOp>(width), allocSize);
74   if (options.useAlloca)
75     return b.create<memref::AllocaOp>(dynamicBufferType, mul, alignmentAttr);
76   return b.create<memref::AllocOp>(dynamicBufferType, mul, alignmentAttr);
77 }
78 
79 /// Default allocation callback function. This allocates a promoted buffer when
80 /// no call back to do so is provided. The default is to allocate a
81 /// memref<..xi8> and return a view to get a memref type of shape
82 /// boundingSubViewSize.
83 static std::optional<Value> defaultAllocBufferCallBack(
84     const LinalgPromotionOptions &options, OpBuilder &builder,
85     memref::SubViewOp subView, ArrayRef<Value> boundingSubViewSize,
86     std::optional<unsigned> alignment, DataLayout &layout) {
87   ShapedType viewType = subView.getType();
88   ImplicitLocOpBuilder b(subView.getLoc(), builder);
89   auto zero = b.create<arith::ConstantIndexOp>(0);
90   auto one = b.create<arith::ConstantIndexOp>(1);
91 
92   Value allocSize = one;
93   for (const auto &size : llvm::enumerate(boundingSubViewSize))
94     allocSize = b.createOrFold<arith::MulIOp>(allocSize, size.value());
95   Value buffer = allocBuffer(b, options, viewType.getElementType(), allocSize,
96                              layout, alignment);
97   SmallVector<int64_t, 4> dynSizes(boundingSubViewSize.size(),
98                                    ShapedType::kDynamic);
99   Value view = b.createOrFold<memref::ViewOp>(
100       MemRefType::get(dynSizes, viewType.getElementType()), buffer, zero,
101       boundingSubViewSize);
102   return view;
103 }
104 
105 /// Default implementation of deallocation of the buffer use for promotion. It
106 /// expects to get the same value that the default allocation method returned,
107 /// i.e. result of a ViewOp.
108 static LogicalResult
109 defaultDeallocBufferCallBack(const LinalgPromotionOptions &options,
110                              OpBuilder &b, Value fullLocalView) {
111   if (!options.useAlloca) {
112     auto viewOp = cast<memref::ViewOp>(fullLocalView.getDefiningOp());
113     b.create<memref::DeallocOp>(viewOp.getSource().getLoc(),
114                                 viewOp.getSource());
115   }
116   return success();
117 }
118 
119 namespace {
120 
121 /// Helper struct that captures the information required to apply the
122 /// transformation on each op. This bridges the abstraction gap with the
123 /// user-facing API which exposes positional arguments to control which operands
124 /// are promoted.
125 struct LinalgOpInstancePromotionOptions {
126   LinalgOpInstancePromotionOptions(LinalgOp op,
127                                    const LinalgPromotionOptions &options);
128   /// SubViews to promote.
129   MapVector<int64_t, Value> subViews;
130   /// True if the full view should be used for the promoted buffer.
131   DenseMap<Value, bool> useFullTileBuffers;
132 
133   /// Callback functions for allocation and deallocation of promoted buffers, as
134   /// well as to copy the data into and out of these buffers.
135   AllocBufferCallbackFn allocationFn;
136   DeallocBufferCallbackFn deallocationFn;
137   CopyCallbackFn copyInFn;
138   CopyCallbackFn copyOutFn;
139 
140   /// Alignment of promoted buffer.
141   std::optional<unsigned> alignment;
142 };
143 } // namespace
144 
145 LinalgOpInstancePromotionOptions::LinalgOpInstancePromotionOptions(
146     LinalgOp linalgOp, const LinalgPromotionOptions &options)
147     : subViews(), alignment(options.alignment) {
148   assert(linalgOp.hasBufferSemantics() && "revisit usage of shaped operand");
149   auto vUseFullTileBuffers =
150       options.useFullTileBuffers.value_or(llvm::SmallBitVector());
151   vUseFullTileBuffers.resize(linalgOp->getNumOperands(),
152                              options.useFullTileBuffersDefault);
153 
154   for (OpOperand &opOperand : linalgOp->getOpOperands()) {
155     int64_t operandNumber = opOperand.getOperandNumber();
156     if (options.operandsToPromote &&
157         !options.operandsToPromote->count(operandNumber))
158       continue;
159     Operation *op = opOperand.get().getDefiningOp();
160     if (auto sv = dyn_cast_or_null<memref::SubViewOp>(op)) {
161       subViews[operandNumber] = sv;
162       useFullTileBuffers[sv] = vUseFullTileBuffers[operandNumber];
163     }
164   }
165 
166   if (options.allocationFn) {
167     allocationFn = *options.allocationFn;
168   } else {
169     allocationFn = [&](OpBuilder &b, memref::SubViewOp subViewOp,
170                        ArrayRef<Value> boundingSubViewSize,
171                        DataLayout &layout) -> std::optional<Value> {
172       return defaultAllocBufferCallBack(options, b, subViewOp,
173                                         boundingSubViewSize, alignment, layout);
174     };
175   }
176 
177   if (options.deallocationFn) {
178     deallocationFn = *options.deallocationFn;
179   } else {
180     deallocationFn = [&](OpBuilder &b, Value buffer) {
181       return defaultDeallocBufferCallBack(options, b, buffer);
182     };
183   }
184 
185   // Save the loc because `linalgOp` goes out of scope.
186   Location loc = linalgOp.getLoc();
187   auto defaultCopyCallBack = [loc](OpBuilder &b, Value src,
188                                    Value dst) -> LogicalResult {
189     b.create<memref::CopyOp>(loc, src, dst);
190     return success();
191   };
192   copyInFn = (options.copyInFn ? *(options.copyInFn) : defaultCopyCallBack);
193   copyOutFn = (options.copyOutFn ? *(options.copyOutFn) : defaultCopyCallBack);
194 }
195 
196 // Performs promotion of a `subView` into a local buffer of the size of the
197 // *ranges* of the `subView`. This produces a buffer whose size may be bigger
198 // than the actual size of the `subView` at the boundaries.
199 // This is related to the full/partial tile problem.
200 // Returns a PromotionInfo containing a `buffer`, `fullLocalView` and
201 // `partialLocalView` such that:
202 //   * `buffer` is always the size of the full tile.
203 //   * `fullLocalView` is a dense contiguous view into that buffer.
204 //   * `partialLocalView` is a dense non-contiguous slice of `fullLocalView`
205 //     that corresponds to the size of `subView` and accounting for boundary
206 //     effects.
207 // The point of the full tile buffer is that constant static tile sizes are
208 // folded and result in a buffer type with statically known size and alignment
209 // properties.
210 // To account for general boundary effects, padding must be performed on the
211 // boundary tiles. For now this is done with an unconditional `fill` op followed
212 // by a partial `copy` op.
213 FailureOr<PromotionInfo> mlir::linalg::promoteSubviewAsNewBuffer(
214     OpBuilder &b, Location loc, memref::SubViewOp subView,
215     const AllocBufferCallbackFn &allocationFn, DataLayout &layout) {
216   auto viewType = subView.getType();
217   auto rank = viewType.getRank();
218   SmallVector<Value, 4> fullSizes;
219   SmallVector<OpFoldResult> partialSizes;
220   fullSizes.reserve(rank);
221   partialSizes.reserve(rank);
222   llvm::SmallBitVector droppedDims = subView.getDroppedDims();
223   int64_t resultDimIdx = 0;
224   for (const auto &en : llvm::enumerate(subView.getOrCreateRanges(b, loc))) {
225     if (droppedDims[en.index()])
226       continue;
227     auto rangeValue = en.value();
228     // Try to extract a tight constant. If the size is known statically, no need
229     // to look for the bound.
230     LLVM_DEBUG(llvm::dbgs() << "Extract tightest: " << rangeValue.size << "\n");
231     Value size;
232     if (auto attr = llvm::dyn_cast_if_present<Attribute>(rangeValue.size)) {
233       size = getValueOrCreateConstantIndexOp(b, loc, rangeValue.size);
234     } else {
235       Value materializedSize =
236           getValueOrCreateConstantIndexOp(b, loc, rangeValue.size);
237       FailureOr<int64_t> upperBound =
238           ValueBoundsConstraintSet::computeConstantBound(
239               presburger::BoundType::UB, materializedSize, /*dim=*/std::nullopt,
240               /*stopCondition=*/nullptr, /*closedUB=*/true);
241       size = failed(upperBound)
242                  ? materializedSize
243                  : b.create<arith::ConstantIndexOp>(loc, *upperBound);
244     }
245     LLVM_DEBUG(llvm::dbgs() << "Extracted tightest: " << size << "\n");
246     fullSizes.push_back(size);
247     partialSizes.push_back(
248         b.createOrFold<memref::DimOp>(loc, subView, resultDimIdx++));
249   }
250   SmallVector<int64_t, 4> dynSizes(fullSizes.size(), ShapedType::kDynamic);
251   // If a callback is not specified, then use the default implementation for
252   // allocating the promoted buffer.
253   std::optional<Value> fullLocalView =
254       allocationFn(b, subView, fullSizes, layout);
255   if (!fullLocalView)
256     return failure();
257   SmallVector<OpFoldResult, 4> zeros(fullSizes.size(), b.getIndexAttr(0));
258   SmallVector<OpFoldResult, 4> ones(fullSizes.size(), b.getIndexAttr(1));
259   auto partialLocalView = b.createOrFold<memref::SubViewOp>(
260       loc, *fullLocalView, zeros, partialSizes, ones);
261   return PromotionInfo{*fullLocalView, partialLocalView};
262 }
263 
264 static FailureOr<MapVector<int64_t, PromotionInfo>>
265 promoteSubViews(ImplicitLocOpBuilder &b,
266                 LinalgOpInstancePromotionOptions options, DataLayout &layout) {
267   if (options.subViews.empty())
268     return failure();
269 
270   MapVector<int64_t, PromotionInfo> promotionInfoMap;
271 
272   for (auto v : options.subViews) {
273     memref::SubViewOp subView =
274         cast<memref::SubViewOp>(v.second.getDefiningOp());
275     auto promotionInfo = promoteSubviewAsNewBuffer(
276         b, b.getLoc(), subView, options.allocationFn, layout);
277     if (failed(promotionInfo))
278       return failure();
279     promotionInfoMap[v.first] = *promotionInfo;
280 
281     // Only fill the buffer if the full local view is used
282     if (!options.useFullTileBuffers[v.second])
283       continue;
284     Type subviewEltType = subView.getType().getElementType();
285     Value fillVal =
286         llvm::TypeSwitch<Type, Value>(subviewEltType)
287             .Case([&](FloatType t) {
288               return b.create<arith::ConstantOp>(FloatAttr::get(t, 0.0));
289             })
290             .Case([&](IntegerType t) {
291               return b.create<arith::ConstantOp>(IntegerAttr::get(t, 0));
292             })
293             .Case([&](ComplexType t) {
294               Value tmp;
295               if (auto et = dyn_cast<FloatType>(t.getElementType()))
296                 tmp = b.create<arith::ConstantOp>(FloatAttr::get(et, 0.0));
297               else if (auto et = cast<IntegerType>(t.getElementType()))
298                 tmp = b.create<arith::ConstantOp>(IntegerAttr::get(et, 0));
299               return b.create<complex::CreateOp>(t, tmp, tmp);
300             })
301             .Default([](auto) { return Value(); });
302     if (!fillVal)
303       return failure();
304     b.create<linalg::FillOp>(fillVal, promotionInfo->fullLocalView);
305   }
306 
307   // Copy data into the promoted buffers. Use callback if provided.
308   for (auto v : options.subViews) {
309     auto info = promotionInfoMap.find(v.first);
310     if (info == promotionInfoMap.end())
311       continue;
312     if (failed(options.copyInFn(
313             b, cast<memref::SubViewOp>(v.second.getDefiningOp()),
314             info->second.partialLocalView)))
315       return failure();
316   }
317   return promotionInfoMap;
318 }
319 
320 static FailureOr<LinalgOp>
321 promoteSubViews(ImplicitLocOpBuilder &b, LinalgOp op,
322                 LinalgOpInstancePromotionOptions options, DataLayout &layout) {
323   assert(op.hasBufferSemantics() && "expected linalg op with buffer semantics");
324 
325   // 1. Promote the specified views and use them in the new op.
326   auto promotedBuffersAndViews = promoteSubViews(b, options, layout);
327   if (failed(promotedBuffersAndViews) ||
328       promotedBuffersAndViews->size() != options.subViews.size())
329     return failure();
330 
331   // 2. Append all other operands as they appear, this enforces that such
332   // operands are not views. This is to support cases such as FillOp taking
333   // extra scalars etc.  Keep a reference to output buffers;
334   SmallVector<Value, 8> opViews;
335   opViews.reserve(op->getNumOperands());
336   SmallVector<std::pair<Value, Value>, 8> writebackViews;
337   writebackViews.reserve(promotedBuffersAndViews->size());
338   for (OpOperand &opOperand : op->getOpOperands()) {
339     int64_t operandNumber = opOperand.getOperandNumber();
340     if (options.subViews.count(operandNumber) != 0) {
341       if (options.useFullTileBuffers[opOperand.get()])
342         opViews.push_back(
343             (*promotedBuffersAndViews)[operandNumber].fullLocalView);
344       else
345         opViews.push_back(
346             (*promotedBuffersAndViews)[operandNumber].partialLocalView);
347       if (operandNumber >= op.getNumDpsInputs())
348         writebackViews.emplace_back(std::make_pair(
349             opOperand.get(),
350             (*promotedBuffersAndViews)[operandNumber].partialLocalView));
351     } else {
352       opViews.push_back(opOperand.get());
353     }
354   }
355   op->setOperands(0, opViews.size(), opViews);
356 
357   OpBuilder::InsertionGuard guard(b);
358   b.setInsertionPointAfter(op);
359   // 3. Emit write-back for the promoted output views: copy the partial view.
360   for (auto viewAndPartialLocalView : writebackViews) {
361     if (failed(options.copyOutFn(b, viewAndPartialLocalView.second,
362                                  viewAndPartialLocalView.first)))
363       return failure();
364   }
365 
366   // 4. Dealloc all local buffers.
367   for (const auto &pi : *promotedBuffersAndViews)
368     (void)options.deallocationFn(b, pi.second.fullLocalView);
369   return op;
370 }
371 
372 LogicalResult
373 mlir::linalg::promoteSubviewsPrecondition(Operation *op,
374                                           LinalgPromotionOptions options) {
375   LinalgOp linalgOp = dyn_cast<LinalgOp>(op);
376   // Transformation applies to buffers only.
377   if (!linalgOp || !linalgOp.hasBufferSemantics())
378     return failure();
379   // Check that at least one of the requested operands is indeed a subview.
380   for (OpOperand &opOperand : linalgOp->getOpOperands()) {
381     auto sv =
382         isa_and_nonnull<memref::SubViewOp>(opOperand.get().getDefiningOp());
383     if (sv) {
384       if (!options.operandsToPromote ||
385           options.operandsToPromote->count(opOperand.getOperandNumber()))
386         return success();
387     }
388   }
389   // TODO: Check all subviews requested are bound by a static constant.
390   // TODO: Check that the total footprint fits within a given size.
391   return failure();
392 }
393 
394 FailureOr<LinalgOp>
395 mlir::linalg::promoteSubViews(OpBuilder &builder, LinalgOp linalgOp,
396                               const LinalgPromotionOptions &options) {
397   LinalgOpInstancePromotionOptions linalgOptions(linalgOp, options);
398   auto layout = DataLayout::closest(linalgOp);
399   ImplicitLocOpBuilder b(linalgOp.getLoc(), builder);
400   auto res = ::promoteSubViews(b, linalgOp, linalgOptions, layout);
401   if (failed(res))
402     return failure();
403   return res;
404 }
405 
406 /// Allocate the given subview to a memory address space in GPU by creating a
407 /// allocation operation and setting the memref type address space to desired
408 /// address space.
409 static std::optional<Value> allocateSubviewGPUMemoryInAddressSpace(
410     OpBuilder &builder, memref::SubViewOp subview, ArrayRef<Value> sizeBounds,
411     gpu::AddressSpace addressSpace) {
412   OpBuilder::InsertionGuard guard(builder);
413 
414   func::FuncOp funcOp = subview->getParentOfType<func::FuncOp>();
415   if (!funcOp)
416     return std::nullopt;
417 
418   // The subview size bounds are expected to be constant; they specify the shape
419   // of the allocation.
420   SmallVector<int64_t> shape;
421   for (Value bound : sizeBounds) {
422     APInt value;
423     if (!matchPattern(bound, m_ConstantInt(&value)))
424       return std::nullopt;
425     shape.push_back(value.getSExtValue());
426   }
427 
428   builder.setInsertionPoint(&funcOp.front(), funcOp.front().begin());
429   auto type = MemRefType::get(
430       shape, subview.getType().getElementType(), MemRefLayoutAttrInterface{},
431       gpu::AddressSpaceAttr::get(builder.getContext(), addressSpace));
432   Value buffer;
433   if (addressSpace == gpu::GPUDialect::getWorkgroupAddressSpace()) {
434     buffer = builder.create<memref::AllocOp>(funcOp.getLoc(), type);
435   } else if (addressSpace == gpu::GPUDialect::getPrivateAddressSpace()) {
436     buffer = builder.create<memref::AllocaOp>(funcOp.getLoc(), type);
437   } else {
438     return std::nullopt;
439   }
440   return buffer;
441 }
442 
443 /// Allocate the subview in the GPU workgroup memory.
444 std::optional<Value> mlir::linalg::allocateWorkgroupMemory(
445     OpBuilder &builder, memref::SubViewOp subview, ArrayRef<Value> sizeBounds,
446     DataLayout &) {
447   return allocateSubviewGPUMemoryInAddressSpace(
448       builder, subview, sizeBounds,
449       gpu::GPUDialect::getWorkgroupAddressSpace());
450 }
451 
452 /// In case of GPU group memory there is no need to deallocate.
453 LogicalResult mlir::linalg::deallocateWorkgroupMemory(OpBuilder &,
454                                                       Value /*buffer*/) {
455   return success();
456 }
457 
458 /// Create Memref copy operations and add gpu barrier guards before and after
459 /// the copy operation to ensure data integrity.
460 LogicalResult mlir::linalg::copyToWorkgroupMemory(OpBuilder &b, Value src,
461                                                   Value dst) {
462   b.create<gpu::BarrierOp>(src.getLoc());
463   Operation *copyOp = b.create<memref::CopyOp>(src.getLoc(), src, dst);
464   b.create<gpu::BarrierOp>(copyOp->getLoc());
465   return success();
466 }
467 
468 /// Allocate the subview in the GPU private memory.
469 std::optional<Value> mlir::linalg::allocateGPUPrivateMemory(
470     OpBuilder &builder, memref::SubViewOp subview, ArrayRef<Value> sizeBounds,
471     DataLayout &) {
472   return allocateSubviewGPUMemoryInAddressSpace(
473       builder, subview, sizeBounds, gpu::GPUDialect::getPrivateAddressSpace());
474 }
475 
476 /// Normal copy to between src and dst.
477 LogicalResult mlir::linalg::copyToGPUPrivateMemory(OpBuilder &b, Value src,
478                                                    Value dst) {
479   b.create<memref::CopyOp>(src.getLoc(), src, dst);
480   return success();
481 }
482 
483 /// In case of GPU private memory there is no need to deallocate since the
484 /// memory is freed when going outside of the scope.
485 LogicalResult mlir::linalg::deallocateGPUPrivateMemory(OpBuilder &,
486                                                        Value /*buffer*/) {
487   return success();
488 }
489