1 //===- BufferizableOpInterfaceImpl.cpp - Impl. of BufferizableOpInterface -===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "mlir/Dialect/Linalg/Transforms/BufferizableOpInterfaceImpl.h"
10 #include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
11 #include "mlir/Dialect/Bufferization/IR/Bufferization.h"
12 #include "mlir/Dialect/Bufferization/IR/DstBufferizableOpInterfaceImpl.h"
13 #include "mlir/Dialect/Linalg/IR/Linalg.h"
14 #include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
15 #include "mlir/Dialect/Tensor/IR/Tensor.h"
16 #include "mlir/IR/Dialect.h"
17 #include "mlir/IR/Operation.h"
18 #include "mlir/Interfaces/DestinationStyleOpInterface.h"
19
20 using namespace mlir;
21 using namespace linalg;
22 using namespace mlir::bufferization;
23
24 namespace {
25
26 /// Generic conversion for any DestinationStyleOpInterface on tensors.
27 static LogicalResult
bufferizeDestinationStyleOpInterface(RewriterBase & rewriter,DestinationStyleOpInterface op,const BufferizationOptions & options)28 bufferizeDestinationStyleOpInterface(RewriterBase &rewriter,
29 DestinationStyleOpInterface op,
30 const BufferizationOptions &options) {
31 // Take a guard before anything else.
32 OpBuilder::InsertionGuard g(rewriter);
33 rewriter.setInsertionPoint(op);
34
35 // Nothing to do. This op is already bufferized.
36 if (op.hasPureBufferSemantics())
37 return success();
38
39 // Ensure op has only tensors. Allow mixed tensor-buffer mode on a per-need
40 // basis.
41 if (!op.hasPureTensorSemantics())
42 return op->emitError() << "op does not have pure tensor semantics";
43
44 // New input operands for the cloned op.
45 SmallVector<Value> newInputBuffers;
46 newInputBuffers.reserve(op.getNumDpsInputs());
47 for (OpOperand *opOperand : op.getDpsInputOperands()) {
48 if (op.isScalar(opOperand)) {
49 newInputBuffers.push_back(opOperand->get());
50 continue;
51 }
52 FailureOr<Value> buffer = getBuffer(rewriter, opOperand->get(), options);
53 if (failed(buffer))
54 return failure();
55 newInputBuffers.push_back(*buffer);
56 }
57
58 // New output operands for the cloned op.
59 SmallVector<Value> newOutputBuffers;
60 for (OpResult opResult : op->getOpResults()) {
61 OpOperand *opOperand = op.getDpsInitOperand(opResult.getResultNumber());
62 FailureOr<Value> resultBuffer =
63 getBuffer(rewriter, opOperand->get(), options);
64 if (failed(resultBuffer))
65 return failure();
66 newOutputBuffers.push_back(*resultBuffer);
67 }
68
69 // Merge input/output operands.
70 SmallVector<Value> newOperands = newInputBuffers;
71 newOperands.append(newOutputBuffers.begin(), newOutputBuffers.end());
72
73 // Set insertion point now that potential alloc/dealloc are introduced.
74 rewriter.setInsertionPoint(op);
75 // Clone the op, but use the new operands. Move the existing block into the
76 // new op. Since the new op does not have any tensor results, it does not
77 // return anything.
78 assert(op->getNumRegions() == 1 && "expected that op has 1 region");
79 OperationState state(op->getLoc(), op->getName(), newOperands, TypeRange{},
80 op->getAttrs());
81 state.addRegion();
82 Operation *newOp = Operation::create(state);
83 newOp->getRegion(0).getBlocks().splice(newOp->getRegion(0).begin(),
84 op->getRegion(0).getBlocks());
85
86 // We don't want the rewriter tracks an incomplete operation, so insert new
87 // operation after op was fully constructed.
88 rewriter.insert(newOp);
89
90 // Replace the results of the old op with the new output buffers.
91 replaceOpWithBufferizedValues(rewriter, op, newOutputBuffers);
92
93 return success();
94 }
95
96 /// Bufferization of linalg.generic. Replace with a new linalg.generic that
97 /// operates entirely on memrefs.
98 template <typename OpTy>
99 struct LinalgOpInterface
100 : public DstBufferizableOpInterfaceExternalModel<LinalgOpInterface<OpTy>,
101 OpTy> {
bufferizesToMemoryRead__anonb6c62d980111::LinalgOpInterface102 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
103 const AnalysisState &state) const {
104 // Operand is read if it is used in the computation.
105 auto linalgOp = cast<linalg::LinalgOp>(op);
106 return linalgOp.payloadUsesValueFromOperand(&opOperand);
107 }
108
bufferizesToMemoryWrite__anonb6c62d980111::LinalgOpInterface109 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
110 const AnalysisState &state) const {
111 // Operand is written to if it is not an input/init.
112 auto dpsOp = cast<DestinationStyleOpInterface>(op);
113 return dpsOp.isDpsInit(&opOperand);
114 }
115
bufferizesToElementwiseAccess__anonb6c62d980111::LinalgOpInterface116 bool bufferizesToElementwiseAccess(Operation *op, const AnalysisState &state,
117 ArrayRef<OpOperand *> opOperands) const {
118 auto linalgOp = cast<linalg::LinalgOp>(op);
119
120 // Accesses into sparse data structures are not necessarily elementwise.
121 if (sparse_tensor::hasAnySparseOperand(linalgOp))
122 return false;
123
124 // All loops must be parallel.
125 if (linalgOp.getNumLoops() != linalgOp.getNumParallelLoops())
126 return false;
127
128 // All index maps of tensors must be identity maps.
129 SmallVector<AffineMap> indexingMaps = linalgOp.getIndexingMapsArray();
130 assert(linalgOp->getNumOperands() == indexingMaps.size() &&
131 "unexpected number of indexing maps");
132 for (auto [operand, map] :
133 llvm::zip(linalgOp->getOpOperands(), indexingMaps)) {
134 // Non-tensors do not participate in bufferization, so they can be
135 // ignored.
136 if (!isa<RankedTensorType, MemRefType>(operand.get().getType()))
137 continue;
138 // Only consider operands in `opOperands`.
139 if (!llvm::is_contained(opOperands, &operand))
140 continue;
141 // TODO: This could be generalized to other indexing maps. (All indexing
142 // must be the same.)
143 if (!map.isIdentity())
144 return false;
145 }
146
147 return true;
148 }
149
bufferize__anonb6c62d980111::LinalgOpInterface150 LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
151 const BufferizationOptions &options) const {
152 return bufferizeDestinationStyleOpInterface(
153 rewriter, cast<DestinationStyleOpInterface>(op), options);
154 }
155 };
156
157 /// Helper structure that iterates over all LinalgOps in `OpTys` and registers
158 /// the `BufferizableOpInterface` with each of them.
159 template <typename... Ops>
160 struct LinalgOpInterfaceHelper {
registerOpInterface__anonb6c62d980111::LinalgOpInterfaceHelper161 static void registerOpInterface(MLIRContext *ctx) {
162 (Ops::template attachInterface<LinalgOpInterface<Ops>>(*ctx), ...);
163 }
164 };
165
166 struct SoftmaxOpInterface
167 : public DstBufferizableOpInterfaceExternalModel<SoftmaxOpInterface,
168 linalg::SoftmaxOp> {
bufferizesToMemoryRead__anonb6c62d980111::SoftmaxOpInterface169 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
170 const AnalysisState &state) const {
171 // Output operand is not read.
172 auto softmaxOp = cast<linalg::SoftmaxOp>(op);
173 return &opOperand == &softmaxOp.getInputMutable();
174 }
175
bufferize__anonb6c62d980111::SoftmaxOpInterface176 LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
177 const BufferizationOptions &options) const {
178 auto softmaxOp = cast<linalg::SoftmaxOp>(op);
179 FailureOr<Value> inputBuffer =
180 getBuffer(rewriter, softmaxOp.getInput(), options);
181 if (failed(inputBuffer))
182 return failure();
183 FailureOr<Value> outputBuffer =
184 getBuffer(rewriter, softmaxOp.getOutput(), options);
185 if (failed(outputBuffer))
186 return failure();
187 rewriter.create<linalg::SoftmaxOp>(softmaxOp.getLoc(),
188 /*result=*/TypeRange(), *inputBuffer,
189 *outputBuffer, softmaxOp.getDimension());
190 replaceOpWithBufferizedValues(rewriter, op, *outputBuffer);
191 return success();
192 }
193 };
194 } // namespace
195
registerBufferizableOpInterfaceExternalModels(DialectRegistry & registry)196 void mlir::linalg::registerBufferizableOpInterfaceExternalModels(
197 DialectRegistry ®istry) {
198 registry.addExtension(+[](MLIRContext *ctx, linalg::LinalgDialect *dialect) {
199 // Register all Linalg structured ops. `LinalgOp` is an interface and it is
200 // not possible to attach an external interface to an existing interface.
201 // Therefore, attach the `BufferizableOpInterface` to all ops one-by-one.
202 LinalgOpInterfaceHelper<
203 #define GET_OP_LIST
204 #include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc"
205 >::registerOpInterface(ctx);
206
207 SoftmaxOp::attachInterface<SoftmaxOpInterface>(*ctx);
208 });
209 }
210