xref: /llvm-project/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp (revision d6a2014eb8b9f2d728e967b18f0bbdfb91629efe)
1 //===- Promotion.cpp - Implementation of linalg Promotion -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the linalg dialect Promotion pass.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "mlir/Dialect/Arith/IR/Arith.h"
14 #include "mlir/Dialect/Arith/Utils/Utils.h"
15 #include "mlir/Dialect/Complex/IR/Complex.h"
16 #include "mlir/Dialect/Func/IR/FuncOps.h"
17 #include "mlir/Dialect/GPU/IR/GPUDialect.h"
18 #include "mlir/Dialect/Linalg/IR/Linalg.h"
19 #include "mlir/Dialect/Linalg/Passes.h"
20 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
21 #include "mlir/Dialect/SCF/IR/SCF.h"
22 #include "mlir/IR/AffineExpr.h"
23 #include "mlir/IR/AffineExprVisitor.h"
24 #include "mlir/IR/AffineMap.h"
25 #include "mlir/IR/ImplicitLocOpBuilder.h"
26 #include "mlir/Interfaces/ValueBoundsOpInterface.h"
27 #include "mlir/Support/LLVM.h"
28 #include "mlir/Transforms/FoldUtils.h"
29 #include "llvm/ADT/MapVector.h"
30 #include "llvm/ADT/SmallBitVector.h"
31 #include "llvm/ADT/TypeSwitch.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/Debug.h"
34 
35 using namespace mlir;
36 using namespace mlir::linalg;
37 using namespace mlir::scf;
38 
39 using llvm::MapVector;
40 
41 #define DEBUG_TYPE "linalg-promotion"
42 
43 /// Alloc a new buffer of `size` * `width` i8; where `width` is given by the
44 /// data `layout` for `elementType`.
45 /// Use AllocOp or AllocaOp depending on `options`.
46 /// Take an optional alignment.
47 static Value allocBuffer(ImplicitLocOpBuilder &b,
48                          const LinalgPromotionOptions &options,
49                          Type elementType, Value allocSize, DataLayout &layout,
50                          std::optional<unsigned> alignment = std::nullopt) {
51   auto width = layout.getTypeSize(elementType);
52 
53   IntegerAttr alignmentAttr;
54   if (alignment.has_value())
55     alignmentAttr = b.getI64IntegerAttr(alignment.value());
56 
57   Attribute memorySpaceAttr;
58   if (options.memorySpace.has_value())
59     memorySpaceAttr = *options.memorySpace;
60 
61   // Static buffer.
62   if (std::optional<int64_t> cst = getConstantIntValue(allocSize)) {
63     auto staticBufferType =
64         MemRefType::get(width * cst.value(), b.getIntegerType(8));
65     staticBufferType =
66         MemRefType::Builder(staticBufferType).setMemorySpace(memorySpaceAttr);
67     if (options.useAlloca) {
68       return b.create<memref::AllocaOp>(staticBufferType, ValueRange{},
69                                         alignmentAttr);
70     }
71     return b.create<memref::AllocOp>(staticBufferType, ValueRange{},
72                                      alignmentAttr);
73   }
74 
75   // Fallback dynamic buffer.
76   auto dynamicBufferType =
77       MemRefType::get(ShapedType::kDynamic, b.getIntegerType(8));
78   dynamicBufferType =
79       MemRefType::Builder(dynamicBufferType).setMemorySpace(memorySpaceAttr);
80   Value mul = b.createOrFold<arith::MulIOp>(
81       b.create<arith::ConstantIndexOp>(width), allocSize);
82   if (options.useAlloca)
83     return b.create<memref::AllocaOp>(dynamicBufferType, mul, alignmentAttr);
84   return b.create<memref::AllocOp>(dynamicBufferType, mul, alignmentAttr);
85 }
86 
87 /// Default allocation callback function. This allocates a promoted buffer when
88 /// no call back to do so is provided. The default is to allocate a
89 /// memref<..xi8> and return a view to get a memref type of shape
90 /// boundingSubViewSize.
91 static std::optional<Value> defaultAllocBufferCallBack(
92     const LinalgPromotionOptions &options, OpBuilder &builder,
93     memref::SubViewOp subView, ArrayRef<Value> boundingSubViewSize,
94     std::optional<unsigned> alignment, DataLayout &layout) {
95   ShapedType viewType = subView.getType();
96   ImplicitLocOpBuilder b(subView.getLoc(), builder);
97   auto zero = b.create<arith::ConstantIndexOp>(0);
98   auto one = b.create<arith::ConstantIndexOp>(1);
99 
100   Attribute memorySpaceAttr;
101   if (options.memorySpace.has_value())
102     memorySpaceAttr = *options.memorySpace;
103 
104   Value allocSize = one;
105   for (const auto &size : llvm::enumerate(boundingSubViewSize))
106     allocSize = b.createOrFold<arith::MulIOp>(allocSize, size.value());
107   Value buffer = allocBuffer(b, options, viewType.getElementType(), allocSize,
108                              layout, alignment);
109   SmallVector<int64_t, 4> dynSizes(boundingSubViewSize.size(),
110                                    ShapedType::kDynamic);
111 
112   auto viewMemRefType = MemRefType::get(dynSizes, viewType.getElementType());
113   viewMemRefType =
114       MemRefType::Builder(viewMemRefType).setMemorySpace(memorySpaceAttr);
115   Value view = b.createOrFold<memref::ViewOp>(viewMemRefType, buffer, zero,
116                                               boundingSubViewSize);
117   return view;
118 }
119 
120 /// Default implementation of deallocation of the buffer use for promotion. It
121 /// expects to get the same value that the default allocation method returned,
122 /// i.e. result of a ViewOp.
123 static LogicalResult
124 defaultDeallocBufferCallBack(const LinalgPromotionOptions &options,
125                              OpBuilder &b, Value fullLocalView) {
126   if (!options.useAlloca) {
127     auto viewOp = cast<memref::ViewOp>(fullLocalView.getDefiningOp());
128     b.create<memref::DeallocOp>(viewOp.getSource().getLoc(),
129                                 viewOp.getSource());
130   }
131   return success();
132 }
133 
134 namespace {
135 
136 /// Helper struct that captures the information required to apply the
137 /// transformation on each op. This bridges the abstraction gap with the
138 /// user-facing API which exposes positional arguments to control which operands
139 /// are promoted.
140 struct LinalgOpInstancePromotionOptions {
141   LinalgOpInstancePromotionOptions(LinalgOp op,
142                                    const LinalgPromotionOptions &options);
143   /// SubViews to promote.
144   MapVector<int64_t, Value> subViews;
145   /// True if the full view should be used for the promoted buffer.
146   DenseMap<Value, bool> useFullTileBuffers;
147 
148   /// Callback functions for allocation and deallocation of promoted buffers, as
149   /// well as to copy the data into and out of these buffers.
150   AllocBufferCallbackFn allocationFn;
151   DeallocBufferCallbackFn deallocationFn;
152   CopyCallbackFn copyInFn;
153   CopyCallbackFn copyOutFn;
154 
155   /// Alignment of promoted buffer.
156   std::optional<unsigned> alignment;
157 };
158 } // namespace
159 
160 LinalgOpInstancePromotionOptions::LinalgOpInstancePromotionOptions(
161     LinalgOp linalgOp, const LinalgPromotionOptions &options)
162     : subViews(), alignment(options.alignment) {
163   assert(linalgOp.hasBufferSemantics() && "revisit usage of shaped operand");
164   auto vUseFullTileBuffers =
165       options.useFullTileBuffers.value_or(llvm::SmallBitVector());
166   vUseFullTileBuffers.resize(linalgOp->getNumOperands(),
167                              options.useFullTileBuffersDefault);
168 
169   for (OpOperand &opOperand : linalgOp->getOpOperands()) {
170     int64_t operandNumber = opOperand.getOperandNumber();
171     if (options.operandsToPromote &&
172         !options.operandsToPromote->count(operandNumber))
173       continue;
174     Operation *op = opOperand.get().getDefiningOp();
175     if (auto sv = dyn_cast_or_null<memref::SubViewOp>(op)) {
176       subViews[operandNumber] = sv;
177       useFullTileBuffers[sv] = vUseFullTileBuffers[operandNumber];
178     }
179   }
180 
181   if (options.allocationFn) {
182     allocationFn = *options.allocationFn;
183   } else {
184     allocationFn = [&](OpBuilder &b, memref::SubViewOp subViewOp,
185                        ArrayRef<Value> boundingSubViewSize,
186                        DataLayout &layout) -> std::optional<Value> {
187       return defaultAllocBufferCallBack(options, b, subViewOp,
188                                         boundingSubViewSize, alignment, layout);
189     };
190   }
191 
192   if (options.deallocationFn) {
193     deallocationFn = *options.deallocationFn;
194   } else {
195     deallocationFn = [&](OpBuilder &b, Value buffer) {
196       return defaultDeallocBufferCallBack(options, b, buffer);
197     };
198   }
199 
200   // Save the loc because `linalgOp` goes out of scope.
201   Location loc = linalgOp.getLoc();
202   auto defaultCopyCallBack = [loc](OpBuilder &b, Value src,
203                                    Value dst) -> LogicalResult {
204     b.create<memref::CopyOp>(loc, src, dst);
205     return success();
206   };
207   copyInFn = (options.copyInFn ? *(options.copyInFn) : defaultCopyCallBack);
208   copyOutFn = (options.copyOutFn ? *(options.copyOutFn) : defaultCopyCallBack);
209 }
210 
211 // Performs promotion of a `subView` into a local buffer of the size of the
212 // *ranges* of the `subView`. This produces a buffer whose size may be bigger
213 // than the actual size of the `subView` at the boundaries.
214 // This is related to the full/partial tile problem.
215 // Returns a PromotionInfo containing a `buffer`, `fullLocalView` and
216 // `partialLocalView` such that:
217 //   * `buffer` is always the size of the full tile.
218 //   * `fullLocalView` is a dense contiguous view into that buffer.
219 //   * `partialLocalView` is a dense non-contiguous slice of `fullLocalView`
220 //     that corresponds to the size of `subView` and accounting for boundary
221 //     effects.
222 // The point of the full tile buffer is that constant static tile sizes are
223 // folded and result in a buffer type with statically known size and alignment
224 // properties.
225 // To account for general boundary effects, padding must be performed on the
226 // boundary tiles. For now this is done with an unconditional `fill` op followed
227 // by a partial `copy` op.
228 FailureOr<PromotionInfo> mlir::linalg::promoteSubviewAsNewBuffer(
229     OpBuilder &b, Location loc, memref::SubViewOp subView,
230     const AllocBufferCallbackFn &allocationFn, DataLayout &layout) {
231   auto viewType = subView.getType();
232   auto rank = viewType.getRank();
233   SmallVector<Value, 4> fullSizes;
234   SmallVector<OpFoldResult> partialSizes;
235   fullSizes.reserve(rank);
236   partialSizes.reserve(rank);
237   llvm::SmallBitVector droppedDims = subView.getDroppedDims();
238   int64_t resultDimIdx = 0;
239   for (const auto &en : llvm::enumerate(subView.getOrCreateRanges(b, loc))) {
240     if (droppedDims[en.index()])
241       continue;
242     auto rangeValue = en.value();
243     // Try to extract a tight constant. If the size is known statically, no need
244     // to look for the bound.
245     LLVM_DEBUG(llvm::dbgs() << "Extract tightest: " << rangeValue.size << "\n");
246     Value size;
247     if (auto attr = llvm::dyn_cast_if_present<Attribute>(rangeValue.size)) {
248       size = getValueOrCreateConstantIndexOp(b, loc, rangeValue.size);
249     } else {
250       Value materializedSize =
251           getValueOrCreateConstantIndexOp(b, loc, rangeValue.size);
252       FailureOr<int64_t> upperBound =
253           ValueBoundsConstraintSet::computeConstantBound(
254               presburger::BoundType::UB, materializedSize, /*dim=*/std::nullopt,
255               /*stopCondition=*/nullptr, /*closedUB=*/true);
256       size = failed(upperBound)
257                  ? materializedSize
258                  : b.create<arith::ConstantIndexOp>(loc, *upperBound);
259     }
260     LLVM_DEBUG(llvm::dbgs() << "Extracted tightest: " << size << "\n");
261     fullSizes.push_back(size);
262     partialSizes.push_back(
263         b.createOrFold<memref::DimOp>(loc, subView, resultDimIdx++));
264   }
265   SmallVector<int64_t, 4> dynSizes(fullSizes.size(), ShapedType::kDynamic);
266   // If a callback is not specified, then use the default implementation for
267   // allocating the promoted buffer.
268   std::optional<Value> fullLocalView =
269       allocationFn(b, subView, fullSizes, layout);
270   if (!fullLocalView)
271     return failure();
272   SmallVector<OpFoldResult, 4> zeros(fullSizes.size(), b.getIndexAttr(0));
273   SmallVector<OpFoldResult, 4> ones(fullSizes.size(), b.getIndexAttr(1));
274   auto partialLocalView = b.createOrFold<memref::SubViewOp>(
275       loc, *fullLocalView, zeros, partialSizes, ones);
276   return PromotionInfo{*fullLocalView, partialLocalView};
277 }
278 
279 static FailureOr<MapVector<int64_t, PromotionInfo>>
280 promoteSubViews(ImplicitLocOpBuilder &b,
281                 LinalgOpInstancePromotionOptions options, DataLayout &layout) {
282   if (options.subViews.empty())
283     return failure();
284 
285   MapVector<int64_t, PromotionInfo> promotionInfoMap;
286 
287   for (auto v : options.subViews) {
288     memref::SubViewOp subView =
289         cast<memref::SubViewOp>(v.second.getDefiningOp());
290     auto promotionInfo = promoteSubviewAsNewBuffer(
291         b, b.getLoc(), subView, options.allocationFn, layout);
292     if (failed(promotionInfo))
293       return failure();
294     promotionInfoMap[v.first] = *promotionInfo;
295 
296     // Only fill the buffer if the full local view is used
297     if (!options.useFullTileBuffers[v.second])
298       continue;
299     Type subviewEltType = subView.getType().getElementType();
300     Value fillVal =
301         llvm::TypeSwitch<Type, Value>(subviewEltType)
302             .Case([&](FloatType t) {
303               return b.create<arith::ConstantOp>(FloatAttr::get(t, 0.0));
304             })
305             .Case([&](IntegerType t) {
306               return b.create<arith::ConstantOp>(IntegerAttr::get(t, 0));
307             })
308             .Case([&](ComplexType t) {
309               Value tmp;
310               if (auto et = dyn_cast<FloatType>(t.getElementType()))
311                 tmp = b.create<arith::ConstantOp>(FloatAttr::get(et, 0.0));
312               else if (auto et = cast<IntegerType>(t.getElementType()))
313                 tmp = b.create<arith::ConstantOp>(IntegerAttr::get(et, 0));
314               return b.create<complex::CreateOp>(t, tmp, tmp);
315             })
316             .Default([](auto) { return Value(); });
317     if (!fillVal)
318       return failure();
319     b.create<linalg::FillOp>(fillVal, promotionInfo->fullLocalView);
320   }
321 
322   // Copy data into the promoted buffers. Use callback if provided.
323   for (auto v : options.subViews) {
324     auto info = promotionInfoMap.find(v.first);
325     if (info == promotionInfoMap.end())
326       continue;
327     if (failed(options.copyInFn(
328             b, cast<memref::SubViewOp>(v.second.getDefiningOp()),
329             info->second.partialLocalView)))
330       return failure();
331   }
332   return promotionInfoMap;
333 }
334 
335 static FailureOr<LinalgOp>
336 promoteSubViews(ImplicitLocOpBuilder &b, LinalgOp op,
337                 LinalgOpInstancePromotionOptions options, DataLayout &layout) {
338   assert(op.hasBufferSemantics() && "expected linalg op with buffer semantics");
339 
340   // 1. Promote the specified views and use them in the new op.
341   auto promotedBuffersAndViews = promoteSubViews(b, options, layout);
342   if (failed(promotedBuffersAndViews) ||
343       promotedBuffersAndViews->size() != options.subViews.size())
344     return failure();
345 
346   // 2. Append all other operands as they appear, this enforces that such
347   // operands are not views. This is to support cases such as FillOp taking
348   // extra scalars etc.  Keep a reference to output buffers;
349   SmallVector<Value, 8> opViews;
350   opViews.reserve(op->getNumOperands());
351   SmallVector<std::pair<Value, Value>, 8> writebackViews;
352   writebackViews.reserve(promotedBuffersAndViews->size());
353   for (OpOperand &opOperand : op->getOpOperands()) {
354     int64_t operandNumber = opOperand.getOperandNumber();
355     if (options.subViews.count(operandNumber) != 0) {
356       if (options.useFullTileBuffers[opOperand.get()])
357         opViews.push_back(
358             (*promotedBuffersAndViews)[operandNumber].fullLocalView);
359       else
360         opViews.push_back(
361             (*promotedBuffersAndViews)[operandNumber].partialLocalView);
362       if (operandNumber >= op.getNumDpsInputs())
363         writebackViews.emplace_back(std::make_pair(
364             opOperand.get(),
365             (*promotedBuffersAndViews)[operandNumber].partialLocalView));
366     } else {
367       opViews.push_back(opOperand.get());
368     }
369   }
370   op->setOperands(0, opViews.size(), opViews);
371 
372   OpBuilder::InsertionGuard guard(b);
373   b.setInsertionPointAfter(op);
374   // 3. Emit write-back for the promoted output views: copy the partial view.
375   for (auto viewAndPartialLocalView : writebackViews) {
376     if (failed(options.copyOutFn(b, viewAndPartialLocalView.second,
377                                  viewAndPartialLocalView.first)))
378       return failure();
379   }
380 
381   // 4. Dealloc all local buffers.
382   for (const auto &pi : *promotedBuffersAndViews)
383     (void)options.deallocationFn(b, pi.second.fullLocalView);
384   return op;
385 }
386 
387 LogicalResult
388 mlir::linalg::promoteSubviewsPrecondition(Operation *op,
389                                           LinalgPromotionOptions options) {
390   LinalgOp linalgOp = dyn_cast<LinalgOp>(op);
391   // Transformation applies to buffers only.
392   if (!linalgOp || !linalgOp.hasBufferSemantics())
393     return failure();
394   // Check that at least one of the requested operands is indeed a subview.
395   for (OpOperand &opOperand : linalgOp->getOpOperands()) {
396     auto sv =
397         isa_and_nonnull<memref::SubViewOp>(opOperand.get().getDefiningOp());
398     if (sv) {
399       if (!options.operandsToPromote ||
400           options.operandsToPromote->count(opOperand.getOperandNumber()))
401         return success();
402     }
403   }
404   // TODO: Check all subviews requested are bound by a static constant.
405   // TODO: Check that the total footprint fits within a given size.
406   return failure();
407 }
408 
409 FailureOr<LinalgOp>
410 mlir::linalg::promoteSubViews(OpBuilder &builder, LinalgOp linalgOp,
411                               const LinalgPromotionOptions &options) {
412   LinalgOpInstancePromotionOptions linalgOptions(linalgOp, options);
413   auto layout = DataLayout::closest(linalgOp);
414   ImplicitLocOpBuilder b(linalgOp.getLoc(), builder);
415   auto res = ::promoteSubViews(b, linalgOp, linalgOptions, layout);
416   if (failed(res))
417     return failure();
418   return res;
419 }
420 
421 /// Allocate the given subview to a memory address space in GPU by creating a
422 /// allocation operation and setting the memref type address space to desired
423 /// address space.
424 static std::optional<Value> allocateSubviewGPUMemoryInAddressSpace(
425     OpBuilder &builder, memref::SubViewOp subview, ArrayRef<Value> sizeBounds,
426     gpu::AddressSpace addressSpace) {
427   OpBuilder::InsertionGuard guard(builder);
428 
429   func::FuncOp funcOp = subview->getParentOfType<func::FuncOp>();
430   if (!funcOp)
431     return std::nullopt;
432 
433   // The subview size bounds are expected to be constant; they specify the shape
434   // of the allocation.
435   SmallVector<int64_t> shape;
436   for (Value bound : sizeBounds) {
437     APInt value;
438     if (!matchPattern(bound, m_ConstantInt(&value)))
439       return std::nullopt;
440     shape.push_back(value.getSExtValue());
441   }
442 
443   builder.setInsertionPoint(&funcOp.front(), funcOp.front().begin());
444   auto type = MemRefType::get(
445       shape, subview.getType().getElementType(), MemRefLayoutAttrInterface{},
446       gpu::AddressSpaceAttr::get(builder.getContext(), addressSpace));
447   Value buffer;
448   if (addressSpace == gpu::GPUDialect::getWorkgroupAddressSpace()) {
449     buffer = builder.create<memref::AllocOp>(funcOp.getLoc(), type);
450   } else if (addressSpace == gpu::GPUDialect::getPrivateAddressSpace()) {
451     buffer = builder.create<memref::AllocaOp>(funcOp.getLoc(), type);
452   } else {
453     return std::nullopt;
454   }
455   return buffer;
456 }
457 
458 /// Allocate the subview in the GPU workgroup memory.
459 std::optional<Value> mlir::linalg::allocateWorkgroupMemory(
460     OpBuilder &builder, memref::SubViewOp subview, ArrayRef<Value> sizeBounds,
461     DataLayout &) {
462   return allocateSubviewGPUMemoryInAddressSpace(
463       builder, subview, sizeBounds,
464       gpu::GPUDialect::getWorkgroupAddressSpace());
465 }
466 
467 /// In case of GPU group memory there is no need to deallocate.
468 LogicalResult mlir::linalg::deallocateWorkgroupMemory(OpBuilder &,
469                                                       Value /*buffer*/) {
470   return success();
471 }
472 
473 /// Create Memref copy operations and add gpu barrier guards before and after
474 /// the copy operation to ensure data integrity.
475 LogicalResult mlir::linalg::copyToWorkgroupMemory(OpBuilder &b, Value src,
476                                                   Value dst) {
477   b.create<gpu::BarrierOp>(src.getLoc());
478   Operation *copyOp = b.create<memref::CopyOp>(src.getLoc(), src, dst);
479   b.create<gpu::BarrierOp>(copyOp->getLoc());
480   return success();
481 }
482 
483 /// Allocate the subview in the GPU private memory.
484 std::optional<Value> mlir::linalg::allocateGPUPrivateMemory(
485     OpBuilder &builder, memref::SubViewOp subview, ArrayRef<Value> sizeBounds,
486     DataLayout &) {
487   return allocateSubviewGPUMemoryInAddressSpace(
488       builder, subview, sizeBounds, gpu::GPUDialect::getPrivateAddressSpace());
489 }
490 
491 /// Normal copy to between src and dst.
492 LogicalResult mlir::linalg::copyToGPUPrivateMemory(OpBuilder &b, Value src,
493                                                    Value dst) {
494   b.create<memref::CopyOp>(src.getLoc(), src, dst);
495   return success();
496 }
497 
498 /// In case of GPU private memory there is no need to deallocate since the
499 /// memory is freed when going outside of the scope.
500 LogicalResult mlir::linalg::deallocateGPUPrivateMemory(OpBuilder &,
501                                                        Value /*buffer*/) {
502   return success();
503 }
504