141cb504bSMatthias Springer //===- BufferizableOpInterfaceImpl.cpp - Impl. of BufferizableOpInterface -===//
241cb504bSMatthias Springer //
341cb504bSMatthias Springer // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
441cb504bSMatthias Springer // See https://llvm.org/LICENSE.txt for license information.
541cb504bSMatthias Springer // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
641cb504bSMatthias Springer //
741cb504bSMatthias Springer //===----------------------------------------------------------------------===//
841cb504bSMatthias Springer
941cb504bSMatthias Springer #include "mlir/Dialect/Linalg/Transforms/BufferizableOpInterfaceImpl.h"
1041cb504bSMatthias Springer #include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
1141cb504bSMatthias Springer #include "mlir/Dialect/Bufferization/IR/Bufferization.h"
12100cf456SMatthias Springer #include "mlir/Dialect/Bufferization/IR/DstBufferizableOpInterfaceImpl.h"
1341cb504bSMatthias Springer #include "mlir/Dialect/Linalg/IR/Linalg.h"
14a27d886cSMatthias Springer #include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
1541cb504bSMatthias Springer #include "mlir/Dialect/Tensor/IR/Tensor.h"
1641cb504bSMatthias Springer #include "mlir/IR/Dialect.h"
1741cb504bSMatthias Springer #include "mlir/IR/Operation.h"
18cfc9ddaaSMatthias Springer #include "mlir/Interfaces/DestinationStyleOpInterface.h"
1941cb504bSMatthias Springer
2041cb504bSMatthias Springer using namespace mlir;
2141cb504bSMatthias Springer using namespace linalg;
2241cb504bSMatthias Springer using namespace mlir::bufferization;
2341cb504bSMatthias Springer
2441cb504bSMatthias Springer namespace {
2541cb504bSMatthias Springer
264641c638SAdrian Kuegel /// Generic conversion for any DestinationStyleOpInterface on tensors.
274641c638SAdrian Kuegel static LogicalResult
bufferizeDestinationStyleOpInterface(RewriterBase & rewriter,DestinationStyleOpInterface op,const BufferizationOptions & options)284641c638SAdrian Kuegel bufferizeDestinationStyleOpInterface(RewriterBase &rewriter,
294641c638SAdrian Kuegel DestinationStyleOpInterface op,
30b55d55ecSMatthias Springer const BufferizationOptions &options) {
3141cb504bSMatthias Springer // Take a guard before anything else.
3241cb504bSMatthias Springer OpBuilder::InsertionGuard g(rewriter);
3341cb504bSMatthias Springer rewriter.setInsertionPoint(op);
3441cb504bSMatthias Springer
3541cb504bSMatthias Springer // Nothing to do. This op is already bufferized.
360a8e3dd4SMatthias Springer if (op.hasPureBufferSemantics())
3741cb504bSMatthias Springer return success();
3841cb504bSMatthias Springer
3941cb504bSMatthias Springer // Ensure op has only tensors. Allow mixed tensor-buffer mode on a per-need
4041cb504bSMatthias Springer // basis.
410a8e3dd4SMatthias Springer if (!op.hasPureTensorSemantics())
420a8e3dd4SMatthias Springer return op->emitError() << "op does not have pure tensor semantics";
4341cb504bSMatthias Springer
4441cb504bSMatthias Springer // New input operands for the cloned op.
4541cb504bSMatthias Springer SmallVector<Value> newInputBuffers;
46b4db15a9SAlexander Belyaev newInputBuffers.reserve(op.getNumDpsInputs());
47b4db15a9SAlexander Belyaev for (OpOperand *opOperand : op.getDpsInputOperands()) {
4841cb504bSMatthias Springer if (op.isScalar(opOperand)) {
4941cb504bSMatthias Springer newInputBuffers.push_back(opOperand->get());
5041cb504bSMatthias Springer continue;
5141cb504bSMatthias Springer }
525d50f51cSMatthias Springer FailureOr<Value> buffer = getBuffer(rewriter, opOperand->get(), options);
535d50f51cSMatthias Springer if (failed(buffer))
545d50f51cSMatthias Springer return failure();
555d50f51cSMatthias Springer newInputBuffers.push_back(*buffer);
5641cb504bSMatthias Springer }
5741cb504bSMatthias Springer
5841cb504bSMatthias Springer // New output operands for the cloned op.
5941cb504bSMatthias Springer SmallVector<Value> newOutputBuffers;
6041cb504bSMatthias Springer for (OpResult opResult : op->getOpResults()) {
61b4db15a9SAlexander Belyaev OpOperand *opOperand = op.getDpsInitOperand(opResult.getResultNumber());
625d50f51cSMatthias Springer FailureOr<Value> resultBuffer =
635d50f51cSMatthias Springer getBuffer(rewriter, opOperand->get(), options);
645d50f51cSMatthias Springer if (failed(resultBuffer))
655d50f51cSMatthias Springer return failure();
665d50f51cSMatthias Springer newOutputBuffers.push_back(*resultBuffer);
6741cb504bSMatthias Springer }
6841cb504bSMatthias Springer
6941cb504bSMatthias Springer // Merge input/output operands.
7041cb504bSMatthias Springer SmallVector<Value> newOperands = newInputBuffers;
7141cb504bSMatthias Springer newOperands.append(newOutputBuffers.begin(), newOutputBuffers.end());
7241cb504bSMatthias Springer
7341cb504bSMatthias Springer // Set insertion point now that potential alloc/dealloc are introduced.
7441cb504bSMatthias Springer rewriter.setInsertionPoint(op);
7541cb504bSMatthias Springer // Clone the op, but use the new operands. Move the existing block into the
7641cb504bSMatthias Springer // new op. Since the new op does not have any tensor results, it does not
7741cb504bSMatthias Springer // return anything.
7841cb504bSMatthias Springer assert(op->getNumRegions() == 1 && "expected that op has 1 region");
79a0fdb38aSdonald chen OperationState state(op->getLoc(), op->getName(), newOperands, TypeRange{},
80a0fdb38aSdonald chen op->getAttrs());
81a0fdb38aSdonald chen state.addRegion();
82a0fdb38aSdonald chen Operation *newOp = Operation::create(state);
83a0fdb38aSdonald chen newOp->getRegion(0).getBlocks().splice(newOp->getRegion(0).begin(),
84a0fdb38aSdonald chen op->getRegion(0).getBlocks());
85a0fdb38aSdonald chen
86a0fdb38aSdonald chen // We don't want the rewriter tracks an incomplete operation, so insert new
87a0fdb38aSdonald chen // operation after op was fully constructed.
88a0fdb38aSdonald chen rewriter.insert(newOp);
8941cb504bSMatthias Springer
9041cb504bSMatthias Springer // Replace the results of the old op with the new output buffers.
9141cb504bSMatthias Springer replaceOpWithBufferizedValues(rewriter, op, newOutputBuffers);
9241cb504bSMatthias Springer
9341cb504bSMatthias Springer return success();
9441cb504bSMatthias Springer }
9541cb504bSMatthias Springer
9641cb504bSMatthias Springer /// Bufferization of linalg.generic. Replace with a new linalg.generic that
9741cb504bSMatthias Springer /// operates entirely on memrefs.
9841cb504bSMatthias Springer template <typename OpTy>
9941cb504bSMatthias Springer struct LinalgOpInterface
100100cf456SMatthias Springer : public DstBufferizableOpInterfaceExternalModel<LinalgOpInterface<OpTy>,
10141cb504bSMatthias Springer OpTy> {
bufferizesToMemoryRead__anonb6c62d980111::LinalgOpInterface10241cb504bSMatthias Springer bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
1039597b16aSMatthias Springer const AnalysisState &state) const {
10441cb504bSMatthias Springer // Operand is read if it is used in the computation.
10554683405SMatthias Springer auto linalgOp = cast<linalg::LinalgOp>(op);
10654683405SMatthias Springer return linalgOp.payloadUsesValueFromOperand(&opOperand);
10741cb504bSMatthias Springer }
10841cb504bSMatthias Springer
bufferizesToMemoryWrite__anonb6c62d980111::LinalgOpInterface10941cb504bSMatthias Springer bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
1109597b16aSMatthias Springer const AnalysisState &state) const {
1111ac248e4SMatthias Springer // Operand is written to if it is not an input/init.
1121ac248e4SMatthias Springer auto dpsOp = cast<DestinationStyleOpInterface>(op);
1131ac248e4SMatthias Springer return dpsOp.isDpsInit(&opOperand);
11441cb504bSMatthias Springer }
11541cb504bSMatthias Springer
bufferizesToElementwiseAccess__anonb6c62d980111::LinalgOpInterface116f36e1934SMatthias Springer bool bufferizesToElementwiseAccess(Operation *op, const AnalysisState &state,
117f36e1934SMatthias Springer ArrayRef<OpOperand *> opOperands) const {
11854683405SMatthias Springer auto linalgOp = cast<linalg::LinalgOp>(op);
11954683405SMatthias Springer
120a27d886cSMatthias Springer // Accesses into sparse data structures are not necessarily elementwise.
121a27d886cSMatthias Springer if (sparse_tensor::hasAnySparseOperand(linalgOp))
122a27d886cSMatthias Springer return false;
123a27d886cSMatthias Springer
12454683405SMatthias Springer // All loops must be parallel.
12554683405SMatthias Springer if (linalgOp.getNumLoops() != linalgOp.getNumParallelLoops())
12654683405SMatthias Springer return false;
12754683405SMatthias Springer
12854683405SMatthias Springer // All index maps of tensors must be identity maps.
12954683405SMatthias Springer SmallVector<AffineMap> indexingMaps = linalgOp.getIndexingMapsArray();
13054683405SMatthias Springer assert(linalgOp->getNumOperands() == indexingMaps.size() &&
13154683405SMatthias Springer "unexpected number of indexing maps");
13254683405SMatthias Springer for (auto [operand, map] :
133f36e1934SMatthias Springer llvm::zip(linalgOp->getOpOperands(), indexingMaps)) {
13454683405SMatthias Springer // Non-tensors do not participate in bufferization, so they can be
13554683405SMatthias Springer // ignored.
136f36e1934SMatthias Springer if (!isa<RankedTensorType, MemRefType>(operand.get().getType()))
137f36e1934SMatthias Springer continue;
138f36e1934SMatthias Springer // Only consider operands in `opOperands`.
1398e8bbbd4SKazu Hirata if (!llvm::is_contained(opOperands, &operand))
14054683405SMatthias Springer continue;
14154683405SMatthias Springer // TODO: This could be generalized to other indexing maps. (All indexing
14254683405SMatthias Springer // must be the same.)
14354683405SMatthias Springer if (!map.isIdentity())
14454683405SMatthias Springer return false;
14554683405SMatthias Springer }
14654683405SMatthias Springer
14754683405SMatthias Springer return true;
14854683405SMatthias Springer }
14954683405SMatthias Springer
bufferize__anonb6c62d980111::LinalgOpInterface15041cb504bSMatthias Springer LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
151b55d55ecSMatthias Springer const BufferizationOptions &options) const {
1524641c638SAdrian Kuegel return bufferizeDestinationStyleOpInterface(
1534641c638SAdrian Kuegel rewriter, cast<DestinationStyleOpInterface>(op), options);
15441cb504bSMatthias Springer }
15541cb504bSMatthias Springer };
15641cb504bSMatthias Springer
15741cb504bSMatthias Springer /// Helper structure that iterates over all LinalgOps in `OpTys` and registers
15841cb504bSMatthias Springer /// the `BufferizableOpInterface` with each of them.
15977eee579SRiver Riddle template <typename... Ops>
16077eee579SRiver Riddle struct LinalgOpInterfaceHelper {
registerOpInterface__anonb6c62d980111::LinalgOpInterfaceHelper16177eee579SRiver Riddle static void registerOpInterface(MLIRContext *ctx) {
16226d811b3SMarkus Böck (Ops::template attachInterface<LinalgOpInterface<Ops>>(*ctx), ...);
16341cb504bSMatthias Springer }
16441cb504bSMatthias Springer };
165*6699807fSMatthias Springer
166*6699807fSMatthias Springer struct SoftmaxOpInterface
167*6699807fSMatthias Springer : public DstBufferizableOpInterfaceExternalModel<SoftmaxOpInterface,
168*6699807fSMatthias Springer linalg::SoftmaxOp> {
bufferizesToMemoryRead__anonb6c62d980111::SoftmaxOpInterface169*6699807fSMatthias Springer bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
170*6699807fSMatthias Springer const AnalysisState &state) const {
171*6699807fSMatthias Springer // Output operand is not read.
172*6699807fSMatthias Springer auto softmaxOp = cast<linalg::SoftmaxOp>(op);
173*6699807fSMatthias Springer return &opOperand == &softmaxOp.getInputMutable();
174*6699807fSMatthias Springer }
175*6699807fSMatthias Springer
bufferize__anonb6c62d980111::SoftmaxOpInterface176*6699807fSMatthias Springer LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
177*6699807fSMatthias Springer const BufferizationOptions &options) const {
178*6699807fSMatthias Springer auto softmaxOp = cast<linalg::SoftmaxOp>(op);
179*6699807fSMatthias Springer FailureOr<Value> inputBuffer =
180*6699807fSMatthias Springer getBuffer(rewriter, softmaxOp.getInput(), options);
181*6699807fSMatthias Springer if (failed(inputBuffer))
182*6699807fSMatthias Springer return failure();
183*6699807fSMatthias Springer FailureOr<Value> outputBuffer =
184*6699807fSMatthias Springer getBuffer(rewriter, softmaxOp.getOutput(), options);
185*6699807fSMatthias Springer if (failed(outputBuffer))
186*6699807fSMatthias Springer return failure();
187*6699807fSMatthias Springer rewriter.create<linalg::SoftmaxOp>(softmaxOp.getLoc(),
188*6699807fSMatthias Springer /*result=*/TypeRange(), *inputBuffer,
189*6699807fSMatthias Springer *outputBuffer, softmaxOp.getDimension());
190*6699807fSMatthias Springer replaceOpWithBufferizedValues(rewriter, op, *outputBuffer);
191*6699807fSMatthias Springer return success();
192*6699807fSMatthias Springer }
193*6699807fSMatthias Springer };
19441cb504bSMatthias Springer } // namespace
19541cb504bSMatthias Springer
registerBufferizableOpInterfaceExternalModels(DialectRegistry & registry)19641cb504bSMatthias Springer void mlir::linalg::registerBufferizableOpInterfaceExternalModels(
19741cb504bSMatthias Springer DialectRegistry ®istry) {
19877eee579SRiver Riddle registry.addExtension(+[](MLIRContext *ctx, linalg::LinalgDialect *dialect) {
19941cb504bSMatthias Springer // Register all Linalg structured ops. `LinalgOp` is an interface and it is
20041cb504bSMatthias Springer // not possible to attach an external interface to an existing interface.
20141cb504bSMatthias Springer // Therefore, attach the `BufferizableOpInterface` to all ops one-by-one.
20241cb504bSMatthias Springer LinalgOpInterfaceHelper<
20341cb504bSMatthias Springer #define GET_OP_LIST
20441cb504bSMatthias Springer #include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc"
20577eee579SRiver Riddle >::registerOpInterface(ctx);
206*6699807fSMatthias Springer
207*6699807fSMatthias Springer SoftmaxOp::attachInterface<SoftmaxOpInterface>(*ctx);
20877eee579SRiver Riddle });
20941cb504bSMatthias Springer }
210