xref: /llvm-project/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp (revision b613a54075c6e704dcaa15a676bf732955eb4352)
1 //===- Promotion.cpp - Implementation of linalg Promotion -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the linalg dialect Promotion pass.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "mlir/Dialect/Arith/IR/Arith.h"
14 #include "mlir/Dialect/Arith/Utils/Utils.h"
15 #include "mlir/Dialect/Complex/IR/Complex.h"
16 #include "mlir/Dialect/Func/IR/FuncOps.h"
17 #include "mlir/Dialect/GPU/IR/GPUDialect.h"
18 #include "mlir/Dialect/Linalg/IR/Linalg.h"
19 #include "mlir/Dialect/Linalg/Passes.h"
20 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
21 #include "mlir/Dialect/SCF/IR/SCF.h"
22 #include "mlir/IR/AffineExpr.h"
23 #include "mlir/IR/AffineExprVisitor.h"
24 #include "mlir/IR/AffineMap.h"
25 #include "mlir/IR/ImplicitLocOpBuilder.h"
26 #include "mlir/Interfaces/ValueBoundsOpInterface.h"
27 #include "mlir/Support/LLVM.h"
28 #include "mlir/Transforms/FoldUtils.h"
29 #include "llvm/ADT/MapVector.h"
30 #include "llvm/ADT/SmallBitVector.h"
31 #include "llvm/ADT/SmallSet.h"
32 #include "llvm/ADT/TypeSwitch.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 
36 using namespace mlir;
37 using namespace mlir::linalg;
38 using namespace mlir::scf;
39 
40 using llvm::MapVector;
41 
42 #define DEBUG_TYPE "linalg-promotion"
43 
44 /// Alloc a new buffer of `size` * `width` i8; where `width` is given by the
45 /// data `layout` for `elementType`.
46 /// Use AllocOp or AllocaOp depending on `options`.
47 /// Take an optional alignment.
48 static Value allocBuffer(ImplicitLocOpBuilder &b,
49                          const LinalgPromotionOptions &options,
50                          Type elementType, Value allocSize, DataLayout &layout,
51                          std::optional<unsigned> alignment = std::nullopt) {
52   llvm::TypeSize width = layout.getTypeSize(elementType);
53   assert(!width.isScalable() && "cannot allocate buffer for a scalable vector");
54 
55   IntegerAttr alignmentAttr;
56   if (alignment.has_value())
57     alignmentAttr = b.getI64IntegerAttr(alignment.value());
58 
59   Attribute memorySpaceAttr;
60   if (options.memorySpace.has_value())
61     memorySpaceAttr = *options.memorySpace;
62 
63   // Static buffer.
64   if (std::optional<int64_t> cst = getConstantIntValue(allocSize)) {
65     auto staticBufferType = MemRefType::get(width.getFixedValue() * cst.value(),
66                                             b.getIntegerType(8));
67     staticBufferType =
68         MemRefType::Builder(staticBufferType).setMemorySpace(memorySpaceAttr);
69     if (options.useAlloca) {
70       return b.create<memref::AllocaOp>(staticBufferType, ValueRange{},
71                                         alignmentAttr);
72     }
73     return b.create<memref::AllocOp>(staticBufferType, ValueRange{},
74                                      alignmentAttr);
75   }
76 
77   // Fallback dynamic buffer.
78   auto dynamicBufferType =
79       MemRefType::get(ShapedType::kDynamic, b.getIntegerType(8));
80   dynamicBufferType =
81       MemRefType::Builder(dynamicBufferType).setMemorySpace(memorySpaceAttr);
82   Value mul = b.createOrFold<arith::MulIOp>(
83       b.create<arith::ConstantIndexOp>(width), allocSize);
84   if (options.useAlloca)
85     return b.create<memref::AllocaOp>(dynamicBufferType, mul, alignmentAttr);
86   return b.create<memref::AllocOp>(dynamicBufferType, mul, alignmentAttr);
87 }
88 
89 /// Default allocation callback function. This allocates a promoted buffer when
90 /// no call back to do so is provided. The default is to allocate a
91 /// memref<..xi8> and return a view to get a memref type of shape
92 /// boundingSubViewSize.
93 static std::optional<Value> defaultAllocBufferCallBack(
94     const LinalgPromotionOptions &options, OpBuilder &builder,
95     memref::SubViewOp subView, ArrayRef<Value> boundingSubViewSize,
96     std::optional<unsigned> alignment, DataLayout &layout) {
97   ShapedType viewType = subView.getType();
98   ImplicitLocOpBuilder b(subView.getLoc(), builder);
99   auto zero = b.create<arith::ConstantIndexOp>(0);
100   auto one = b.create<arith::ConstantIndexOp>(1);
101 
102   Attribute memorySpaceAttr;
103   if (options.memorySpace.has_value())
104     memorySpaceAttr = *options.memorySpace;
105 
106   Value allocSize = one;
107   for (const auto &size : llvm::enumerate(boundingSubViewSize))
108     allocSize = b.createOrFold<arith::MulIOp>(allocSize, size.value());
109   Value buffer = allocBuffer(b, options, viewType.getElementType(), allocSize,
110                              layout, alignment);
111   SmallVector<int64_t, 4> dynSizes(boundingSubViewSize.size(),
112                                    ShapedType::kDynamic);
113 
114   auto viewMemRefType = MemRefType::get(dynSizes, viewType.getElementType());
115   viewMemRefType =
116       MemRefType::Builder(viewMemRefType).setMemorySpace(memorySpaceAttr);
117   Value view = b.createOrFold<memref::ViewOp>(viewMemRefType, buffer, zero,
118                                               boundingSubViewSize);
119   return view;
120 }
121 
122 /// Default implementation of deallocation of the buffer use for promotion. It
123 /// expects to get the same value that the default allocation method returned,
124 /// i.e. result of a ViewOp.
125 static LogicalResult
126 defaultDeallocBufferCallBack(const LinalgPromotionOptions &options,
127                              OpBuilder &b, Value fullLocalView) {
128   if (!options.useAlloca) {
129     auto viewOp = cast<memref::ViewOp>(fullLocalView.getDefiningOp());
130     b.create<memref::DeallocOp>(viewOp.getSource().getLoc(),
131                                 viewOp.getSource());
132   }
133   return success();
134 }
135 
136 namespace {
137 
138 /// Helper struct that captures the information required to apply the
139 /// transformation on each op. This bridges the abstraction gap with the
140 /// user-facing API which exposes positional arguments to control which operands
141 /// are promoted.
142 struct LinalgOpInstancePromotionOptions {
143   LinalgOpInstancePromotionOptions(LinalgOp op,
144                                    const LinalgPromotionOptions &options);
145   /// SubViews to promote.
146   MapVector<int64_t, Value> subViews;
147   /// Subviews operand numbers to copy in using copyInFn.
148   llvm::SmallSet<int64_t, 4> operandsNumbersToCopyIn;
149   /// True if the full view should be used for the promoted buffer.
150   DenseMap<Value, bool> useFullTileBuffers;
151 
152   /// Callback functions for allocation and deallocation of promoted buffers, as
153   /// well as to copy the data into and out of these buffers.
154   AllocBufferCallbackFn allocationFn;
155   DeallocBufferCallbackFn deallocationFn;
156   CopyCallbackFn copyInFn;
157   CopyCallbackFn copyOutFn;
158 
159   /// Alignment of promoted buffer.
160   std::optional<unsigned> alignment;
161 };
162 } // namespace
163 
164 LinalgOpInstancePromotionOptions::LinalgOpInstancePromotionOptions(
165     LinalgOp linalgOp, const LinalgPromotionOptions &options)
166     : subViews(), alignment(options.alignment) {
167   assert(linalgOp.hasPureBufferSemantics() &&
168          "revisit usage of shaped operand");
169   auto vUseFullTileBuffers =
170       options.useFullTileBuffers.value_or(llvm::SmallBitVector());
171   vUseFullTileBuffers.resize(linalgOp->getNumOperands(),
172                              options.useFullTileBuffersDefault);
173 
174   for (OpOperand &opOperand : linalgOp->getOpOperands()) {
175     int64_t operandNumber = opOperand.getOperandNumber();
176     if (options.operandsToPromote &&
177         !options.operandsToPromote->count(operandNumber))
178       continue;
179     Operation *op = opOperand.get().getDefiningOp();
180     if (auto sv = dyn_cast_or_null<memref::SubViewOp>(op)) {
181       subViews[operandNumber] = sv;
182       // In case of linalg generic, copy in only if subview is used in linalg
183       // payload.
184       if (!isa<linalg::GenericOp>(linalgOp) ||
185           linalgOp.payloadUsesValueFromOperand(&opOperand))
186         operandsNumbersToCopyIn.insert(operandNumber);
187       useFullTileBuffers[sv] = vUseFullTileBuffers[operandNumber];
188     }
189   }
190 
191   if (options.allocationFn) {
192     allocationFn = *options.allocationFn;
193   } else {
194     allocationFn = [&](OpBuilder &b, memref::SubViewOp subViewOp,
195                        ArrayRef<Value> boundingSubViewSize,
196                        DataLayout &layout) -> std::optional<Value> {
197       return defaultAllocBufferCallBack(options, b, subViewOp,
198                                         boundingSubViewSize, alignment, layout);
199     };
200   }
201 
202   if (options.deallocationFn) {
203     deallocationFn = *options.deallocationFn;
204   } else {
205     deallocationFn = [&](OpBuilder &b, Value buffer) {
206       return defaultDeallocBufferCallBack(options, b, buffer);
207     };
208   }
209 
210   // Save the loc because `linalgOp` goes out of scope.
211   Location loc = linalgOp.getLoc();
212   auto defaultCopyCallBack = [loc](OpBuilder &b, Value src,
213                                    Value dst) -> LogicalResult {
214     b.create<linalg::CopyOp>(loc, src, dst);
215     return success();
216   };
217   copyInFn = (options.copyInFn ? *(options.copyInFn) : defaultCopyCallBack);
218   copyOutFn = (options.copyOutFn ? *(options.copyOutFn) : defaultCopyCallBack);
219 }
220 
221 // Performs promotion of a `subView` into a local buffer of the size of the
222 // *ranges* of the `subView`. This produces a buffer whose size may be bigger
223 // than the actual size of the `subView` at the boundaries.
224 // This is related to the full/partial tile problem.
225 // Returns a PromotionInfo containing a `buffer`, `fullLocalView` and
226 // `partialLocalView` such that:
227 //   * `buffer` is always the size of the full tile.
228 //   * `fullLocalView` is a dense contiguous view into that buffer.
229 //   * `partialLocalView` is a dense non-contiguous slice of `fullLocalView`
230 //     that corresponds to the size of `subView` and accounting for boundary
231 //     effects.
232 // The point of the full tile buffer is that constant static tile sizes are
233 // folded and result in a buffer type with statically known size and alignment
234 // properties.
235 // To account for general boundary effects, padding must be performed on the
236 // boundary tiles. For now this is done with an unconditional `fill` op followed
237 // by a partial `copy` op.
238 FailureOr<PromotionInfo> mlir::linalg::promoteSubviewAsNewBuffer(
239     OpBuilder &b, Location loc, memref::SubViewOp subView,
240     const AllocBufferCallbackFn &allocationFn, DataLayout &layout) {
241   auto viewType = subView.getType();
242   auto rank = viewType.getRank();
243   SmallVector<Value, 4> fullSizes;
244   SmallVector<OpFoldResult> partialSizes;
245   fullSizes.reserve(rank);
246   partialSizes.reserve(rank);
247   llvm::SmallBitVector droppedDims = subView.getDroppedDims();
248   int64_t resultDimIdx = 0;
249   for (const auto &en : llvm::enumerate(subView.getOrCreateRanges(b, loc))) {
250     if (droppedDims[en.index()])
251       continue;
252     auto rangeValue = en.value();
253     // Try to extract a tight constant. If the size is known statically, no need
254     // to look for the bound.
255     LLVM_DEBUG(llvm::dbgs() << "Extract tightest: " << rangeValue.size << "\n");
256     Value size;
257     if (auto attr = llvm::dyn_cast_if_present<Attribute>(rangeValue.size)) {
258       size = getValueOrCreateConstantIndexOp(b, loc, rangeValue.size);
259     } else {
260       FailureOr<int64_t> upperBound =
261           ValueBoundsConstraintSet::computeConstantBound(
262               presburger::BoundType::UB, rangeValue.size,
263               /*stopCondition=*/nullptr, /*closedUB=*/true);
264       size = failed(upperBound)
265                  ? getValueOrCreateConstantIndexOp(b, loc, rangeValue.size)
266                  : b.create<arith::ConstantIndexOp>(loc, *upperBound);
267     }
268     LLVM_DEBUG(llvm::dbgs() << "Extracted tightest: " << size << "\n");
269     fullSizes.push_back(size);
270     partialSizes.push_back(
271         b.createOrFold<memref::DimOp>(loc, subView, resultDimIdx++));
272   }
273   SmallVector<int64_t, 4> dynSizes(fullSizes.size(), ShapedType::kDynamic);
274   // If a callback is not specified, then use the default implementation for
275   // allocating the promoted buffer.
276   std::optional<Value> fullLocalView =
277       allocationFn(b, subView, fullSizes, layout);
278   if (!fullLocalView)
279     return failure();
280   SmallVector<OpFoldResult, 4> zeros(fullSizes.size(), b.getIndexAttr(0));
281   SmallVector<OpFoldResult, 4> ones(fullSizes.size(), b.getIndexAttr(1));
282   auto partialLocalView = b.createOrFold<memref::SubViewOp>(
283       loc, *fullLocalView, zeros, partialSizes, ones);
284   return PromotionInfo{*fullLocalView, partialLocalView};
285 }
286 
287 static FailureOr<MapVector<int64_t, PromotionInfo>>
288 promoteSubViews(ImplicitLocOpBuilder &b,
289                 LinalgOpInstancePromotionOptions options, DataLayout &layout) {
290   if (options.subViews.empty())
291     return failure();
292 
293   MapVector<int64_t, PromotionInfo> promotionInfoMap;
294 
295   for (auto v : options.subViews) {
296     memref::SubViewOp subView =
297         cast<memref::SubViewOp>(v.second.getDefiningOp());
298     auto promotionInfo = promoteSubviewAsNewBuffer(
299         b, b.getLoc(), subView, options.allocationFn, layout);
300     if (failed(promotionInfo))
301       return failure();
302     promotionInfoMap[v.first] = *promotionInfo;
303 
304     // Only fill the buffer if the full local view is used
305     if (!options.useFullTileBuffers[v.second])
306       continue;
307     Type subviewEltType = subView.getType().getElementType();
308     Value fillVal =
309         llvm::TypeSwitch<Type, Value>(subviewEltType)
310             .Case([&](FloatType t) {
311               return b.create<arith::ConstantOp>(FloatAttr::get(t, 0.0));
312             })
313             .Case([&](IntegerType t) {
314               return b.create<arith::ConstantOp>(IntegerAttr::get(t, 0));
315             })
316             .Case([&](ComplexType t) {
317               Value tmp;
318               if (auto et = dyn_cast<FloatType>(t.getElementType()))
319                 tmp = b.create<arith::ConstantOp>(FloatAttr::get(et, 0.0));
320               else if (auto et = cast<IntegerType>(t.getElementType()))
321                 tmp = b.create<arith::ConstantOp>(IntegerAttr::get(et, 0));
322               return b.create<complex::CreateOp>(t, tmp, tmp);
323             })
324             .Default([](auto) { return Value(); });
325     if (!fillVal)
326       return failure();
327     b.create<linalg::FillOp>(fillVal, promotionInfo->fullLocalView);
328   }
329 
330   // Copy data into the promoted buffers. Use callback if provided.
331   for (auto v : options.subViews) {
332     auto *info = promotionInfoMap.find(v.first);
333     if (info == promotionInfoMap.end())
334       continue;
335     if (options.operandsNumbersToCopyIn.count(v.first) == 0)
336       continue;
337     if (failed(options.copyInFn(
338             b, cast<memref::SubViewOp>(v.second.getDefiningOp()),
339             info->second.partialLocalView)))
340       return failure();
341   }
342   return promotionInfoMap;
343 }
344 
345 static FailureOr<LinalgOp>
346 promoteSubViews(ImplicitLocOpBuilder &b, LinalgOp op,
347                 LinalgOpInstancePromotionOptions options, DataLayout &layout) {
348   assert(op.hasPureBufferSemantics() &&
349          "expected linalg op with buffer semantics");
350 
351   // 1. Promote the specified views and use them in the new op.
352   auto promotedBuffersAndViews = promoteSubViews(b, options, layout);
353   if (failed(promotedBuffersAndViews) ||
354       promotedBuffersAndViews->size() != options.subViews.size())
355     return failure();
356 
357   // 2. Append all other operands as they appear, this enforces that such
358   // operands are not views. This is to support cases such as FillOp taking
359   // extra scalars etc.  Keep a reference to output buffers;
360   SmallVector<Value, 8> opViews;
361   opViews.reserve(op->getNumOperands());
362   SmallVector<std::pair<Value, Value>, 8> writebackViews;
363   writebackViews.reserve(promotedBuffersAndViews->size());
364   for (OpOperand &opOperand : op->getOpOperands()) {
365     int64_t operandNumber = opOperand.getOperandNumber();
366     if (options.subViews.count(operandNumber) != 0) {
367       if (options.useFullTileBuffers[opOperand.get()])
368         opViews.push_back(
369             (*promotedBuffersAndViews)[operandNumber].fullLocalView);
370       else
371         opViews.push_back(
372             (*promotedBuffersAndViews)[operandNumber].partialLocalView);
373       if (operandNumber >= op.getNumDpsInputs())
374         writebackViews.emplace_back(std::make_pair(
375             opOperand.get(),
376             (*promotedBuffersAndViews)[operandNumber].partialLocalView));
377     } else {
378       opViews.push_back(opOperand.get());
379     }
380   }
381   op->setOperands(0, opViews.size(), opViews);
382 
383   OpBuilder::InsertionGuard guard(b);
384   b.setInsertionPointAfter(op);
385   // 3. Emit write-back for the promoted output views: copy the partial view.
386   for (auto viewAndPartialLocalView : writebackViews) {
387     if (failed(options.copyOutFn(b, viewAndPartialLocalView.second,
388                                  viewAndPartialLocalView.first)))
389       return failure();
390   }
391 
392   // 4. Dealloc all local buffers.
393   for (const auto &pi : *promotedBuffersAndViews)
394     (void)options.deallocationFn(b, pi.second.fullLocalView);
395   return op;
396 }
397 
398 LogicalResult
399 mlir::linalg::promoteSubviewsPrecondition(Operation *op,
400                                           LinalgPromotionOptions options) {
401   LinalgOp linalgOp = dyn_cast<LinalgOp>(op);
402   // Transformation applies to buffers only.
403   if (!linalgOp || !linalgOp.hasPureBufferSemantics())
404     return failure();
405   // Check that at least one of the requested operands is indeed a subview.
406   for (OpOperand &opOperand : linalgOp->getOpOperands()) {
407     auto sv =
408         isa_and_nonnull<memref::SubViewOp>(opOperand.get().getDefiningOp());
409     if (sv) {
410       if (!options.operandsToPromote ||
411           options.operandsToPromote->count(opOperand.getOperandNumber()))
412         return success();
413     }
414   }
415   // TODO: Check all subviews requested are bound by a static constant.
416   // TODO: Check that the total footprint fits within a given size.
417   return failure();
418 }
419 
420 FailureOr<LinalgOp>
421 mlir::linalg::promoteSubViews(OpBuilder &builder, LinalgOp linalgOp,
422                               const LinalgPromotionOptions &options) {
423   LinalgOpInstancePromotionOptions linalgOptions(linalgOp, options);
424   auto layout = DataLayout::closest(linalgOp);
425   ImplicitLocOpBuilder b(linalgOp.getLoc(), builder);
426   auto res = ::promoteSubViews(b, linalgOp, linalgOptions, layout);
427   if (failed(res))
428     return failure();
429   return res;
430 }
431 
432 /// Allocate the given subview to a memory address space in GPU by creating a
433 /// allocation operation and setting the memref type address space to desired
434 /// address space.
435 static std::optional<Value> allocateSubviewGPUMemoryInAddressSpace(
436     OpBuilder &builder, memref::SubViewOp subview, ArrayRef<Value> sizeBounds,
437     gpu::AddressSpace addressSpace) {
438   OpBuilder::InsertionGuard guard(builder);
439 
440   func::FuncOp funcOp = subview->getParentOfType<func::FuncOp>();
441   if (!funcOp)
442     return std::nullopt;
443 
444   // The subview size bounds are expected to be constant; they specify the shape
445   // of the allocation.
446   SmallVector<int64_t> shape;
447   for (Value bound : sizeBounds) {
448     APInt value;
449     if (!matchPattern(bound, m_ConstantInt(&value)))
450       return std::nullopt;
451     shape.push_back(value.getSExtValue());
452   }
453 
454   builder.setInsertionPointToStart(&funcOp.front());
455   auto type = MemRefType::get(
456       shape, subview.getType().getElementType(), MemRefLayoutAttrInterface{},
457       gpu::AddressSpaceAttr::get(builder.getContext(), addressSpace));
458   Value buffer;
459   if (addressSpace == gpu::GPUDialect::getWorkgroupAddressSpace()) {
460     buffer = builder.create<memref::AllocOp>(funcOp.getLoc(), type);
461   } else if (addressSpace == gpu::GPUDialect::getPrivateAddressSpace()) {
462     buffer = builder.create<memref::AllocaOp>(funcOp.getLoc(), type);
463   } else {
464     return std::nullopt;
465   }
466   return buffer;
467 }
468 
469 /// Allocate the subview in the GPU workgroup memory.
470 std::optional<Value> mlir::linalg::allocateWorkgroupMemory(
471     OpBuilder &builder, memref::SubViewOp subview, ArrayRef<Value> sizeBounds,
472     DataLayout &) {
473   return allocateSubviewGPUMemoryInAddressSpace(
474       builder, subview, sizeBounds,
475       gpu::GPUDialect::getWorkgroupAddressSpace());
476 }
477 
478 /// In case of GPU group memory there is no need to deallocate.
479 LogicalResult mlir::linalg::deallocateWorkgroupMemory(OpBuilder &,
480                                                       Value /*buffer*/) {
481   return success();
482 }
483 
484 /// Create Memref copy operations and add gpu barrier guards before and after
485 /// the copy operation to ensure data integrity.
486 LogicalResult mlir::linalg::copyToWorkgroupMemory(OpBuilder &b, Value src,
487                                                   Value dst) {
488   b.create<gpu::BarrierOp>(src.getLoc());
489   Operation *copyOp = b.create<memref::CopyOp>(src.getLoc(), src, dst);
490   b.create<gpu::BarrierOp>(copyOp->getLoc());
491   return success();
492 }
493 
494 /// Allocate the subview in the GPU private memory.
495 std::optional<Value> mlir::linalg::allocateGPUPrivateMemory(
496     OpBuilder &builder, memref::SubViewOp subview, ArrayRef<Value> sizeBounds,
497     DataLayout &) {
498   return allocateSubviewGPUMemoryInAddressSpace(
499       builder, subview, sizeBounds, gpu::GPUDialect::getPrivateAddressSpace());
500 }
501 
502 /// Normal copy to between src and dst.
503 LogicalResult mlir::linalg::copyToGPUPrivateMemory(OpBuilder &b, Value src,
504                                                    Value dst) {
505   b.create<memref::CopyOp>(src.getLoc(), src, dst);
506   return success();
507 }
508 
509 /// In case of GPU private memory there is no need to deallocate since the
510 /// memory is freed when going outside of the scope.
511 LogicalResult mlir::linalg::deallocateGPUPrivateMemory(OpBuilder &,
512                                                        Value /*buffer*/) {
513   return success();
514 }
515