xref: /llvm-project/mlir/include/mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td (revision a6d81cdf896d901e0f5e672b9a3eccc4ae8759ce)
1//===-- BufferizableOpInterface.td - Bufferizable Ops ------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef BUFFERIZABLE_OP_INTERFACE
10#define BUFFERIZABLE_OP_INTERFACE
11
12include "mlir/IR/OpBase.td"
13
14def BufferizableOpInterface : OpInterface<"BufferizableOpInterface"> {
15  let description = [{
16    An op interface for One-Shot Bufferize. Ops that implement this interface
17    interface can be analyzed and bufferized using One-Shot Bufferize.
18
19    Note: All "bufferizesTo*" and "getAliasing*" interface methods must be
20    implemented conservatively. If it is not statically known whether an
21    OpOperand/Value bufferizes in a certain way (e.g., to a memory write),
22    the worst case must be assumed (e.g., that it does). Similarly,
23    "getAliasing*" interface methods may always return additional OpOperands or
24    Values, but must not miss an OpOperand or Value that could potentially
25    alias at runtime.
26  }];
27  let cppNamespace = "::mlir::bufferization";
28  let methods = [
29      InterfaceMethod<
30        /*desc=*/[{
31          Return `true` if the given Value may bufferize to a new buffer
32          allocation. If it is statically unknown that the given Value
33          bufferizes to a buffer allocation, `true` should be returned.
34        }],
35        /*retType=*/"bool",
36        /*methodName=*/"bufferizesToAllocation",
37        /*args=*/(ins "::mlir::Value":$value),
38        /*methodBody=*/"",
39        /*defaultImplementation=*/"return false;"
40      >,
41      InterfaceMethod<
42        /*desc=*/[{
43          Return `true` if the given OpOperand bufferizes to a memory read. This
44          method will never be called on OpOperands that do not have a tensor
45          type.
46
47          Note: It is always safe to consider an OpOperand as a memory read,
48          even if it does actually not read; however, this can introduce
49          unnecessary out-of-place bufferization decisions. One-Shot Analysis
50          considers OpOperands of unknown ops (that do not implement this
51          interface) as reading OpOperands.
52        }],
53        /*retType=*/"bool",
54        /*methodName=*/"bufferizesToMemoryRead",
55        /*args=*/(ins "::mlir::OpOperand &":$opOperand,
56                      "const ::mlir::bufferization::AnalysisState &":$state),
57        /*methodBody=*/"",
58        /*defaultImplementation=*/[{
59          // Does not have to be implemented for ops without tensor OpOperands.
60          llvm_unreachable("bufferizesToMemoryRead not implemented");
61         }]
62      >,
63      InterfaceMethod<
64        /*desc=*/[{
65          Return `true` if the given OpOperand bufferizes to a memory write.
66
67          This method will never be called on OpOperands that do not have a
68          tensor type.
69
70          This method will never be called on OpOperands that do not have an
71          aliasing Value. Intuitively, it does not make sense for an OpOperand
72          to bufferize to a memory write without returning an aliasing tensor,
73          because the write would have no visible effect outside of the op.
74
75          Note: It is always safe to consider an OpOperand as a memory write,
76          even if it does actually not write; however, this can introduce
77          unnecessary out-of-place bufferization decisions. One-Shot Analysis
78          considers OpOperands of unknown ops (that do not implement this
79          interface) as writing OpOperands.
80        }],
81        /*retType=*/"bool",
82        /*methodName=*/"bufferizesToMemoryWrite",
83        /*args=*/(ins "::mlir::OpOperand &":$opOperand,
84                      "const ::mlir::bufferization::AnalysisState &":$state),
85        /*methodBody=*/"",
86        /*defaultImplementation=*/[{
87          // Does not have to be implemented for ops without tensor OpOperands.
88          // Does not have to be implemented for OpOperands that do not have an
89          // aliasing Value.
90          llvm_unreachable("bufferizesToMemoryWrite not implemented");
91         }]
92      >,
93      InterfaceMethod<
94        /*desc=*/[{
95          Return `true` if the operation bufferizes to IR that performs only
96          element-wise accesses on the specified tensor operands. (The operands
97          must have the same shape.) The `bufferize` method must be implemented
98          in such a way that it is free of loop-carried dependences. I.e., all
99          loads at a position appear before all stores at the same position.
100
101          Example: Consider a hypothetical op element-wise op, where the "ins"
102          bufferize to a memory read and the "outs" bufferize to a memory write.
103          ```
104          test.element_wise ins(%0), outs(%1) : tensor<3xf32>
105          ```
106
107          The following is a valid access pattern:
108          ```
109          load(%0[1])
110          store(%1[1])
111          load(%0[2])
112          store(%1[2])
113          load(%0[0])
114          store(%1[0])
115          ```
116
117          The following would be an invalid (not element-wise) access pattern:
118          ```
119          load(%0[1])
120          store(%0[1])
121          load(%0[1])
122          ...
123          ```
124
125          Element-wise ops can sometimes bufferize more efficiently: a RaW
126          conflict between two operands of the same op can be avoided if it is
127          guaranteed that an original element value is no longer needed after
128          writing a computed element value at the same location. E.g., such an
129          optimization is possible in the above example if %0 and %1 are
130          equivalent tensors. (It is not possible, if %0 and %1 are merely
131          aliasing. It is not necessary if %0 and %1 are not aliasing at all,
132          because there would be no conflict anyway.)
133
134          Note: Tensor operands that are not included in `opOperands` can be
135          ignored. A conservative implementation of this interface method may
136          always return "false".
137        }],
138        /*retType=*/"bool",
139        /*methodName=*/"bufferizesToElementwiseAccess",
140        /*args=*/(ins "const ::mlir::bufferization::AnalysisState &":$state,
141                      "ArrayRef<OpOperand *>":$opOperands),
142        /*methodBody=*/"",
143        /*defaultImplementation=*/[{
144          // It is always safe to assume that the op is not element-wise.
145          return false;
146        }]
147      >,
148      InterfaceMethod<
149        /*desc=*/[{
150          Return `true` if the given OpResult bufferizes to a memory write.
151          This is the same property as `bufferizesToMemoryWrite`, but from The
152          perspective of OpResults.
153
154          This method will never be called on OpResults that do not have a
155          tensor type.
156
157          This method has a default implementation. By default, it returns
158          `true` if any of the following three cases applies.
159
160          1. There is no corresponding aliasing OpOperand.
161
162             Example: `tensor.generate ... : tensor<10xf32>`
163             The op fills a newly allocated buffer and bufferizes to a memory
164             write.
165
166             Counter-example: bufferization.alloc_tensor
167             The op just allocates and does not specify the data of the tensor,
168             so resultBufferizesToMemoryWrite is overridden to return false.
169
170          2. At least one aliasing OpOperand bufferizes to a memory write.
171
172             Example: `tensor.insert %f into %t[...] : tensor<?xf32>`
173             The destination OpOperand bufferizes to a memory write, so the
174             result also bufferizes to a memory write.
175
176          3. At least one aliasing OpOperand's value is defined inside the
177             defining op of the given OpResult and it is a memory write.
178
179             According to this rule, an aliasing OpOperand value that is defined
180             inside this op and is bufferizing to a memory write makes the given
181             OpResult bufferize to a memory write.
182
183             Example:
184             ```
185             %r = scf.if ... -> tensor<?xf32> {
186               %1 = tensor.insert %f into %t[...] : tensor<?xf32>
187               scf.yield %1 : tensor<?xf32>
188             } else { ... }
189             ```
190             The scf.if result bufferizes to a memory write because %1 (an
191             OpResult defined inside the scf.if op) bufferizes to a memory
192             write.
193          }],
194        /*retType=*/"bool",
195        /*methodName=*/"resultBufferizesToMemoryWrite",
196        /*args=*/(ins "::mlir::OpResult":$opResult,
197                      "const ::mlir::bufferization::AnalysisState &":$state),
198        /*methodBody=*/"",
199        /*defaultImplementation=*/[{
200          assert(opResult.getDefiningOp() == $_op.getOperation() &&
201                 "invalid OpResult");
202          return ::mlir::bufferization::detail::defaultResultBufferizesToMemoryWrite(
203              opResult, state);
204        }]
205      >,
206      InterfaceMethod<
207        /*desc=*/[{
208          Return `true` if the given OpOperand must bufferize in-place. Alias
209          sets and inplace attributes will be set up accordingly before making
210          any other bufferization decisions. This method will never be called on
211          OpOperands that do not have a tensor type.
212
213          Note: Unranked tensor OpOperands always bufferize in-place. This could
214          be extended in the future. Unranked tensors are used with external
215          functions only.
216        }],
217        /*retType=*/"bool",
218        /*methodName=*/"mustBufferizeInPlace",
219        /*args=*/(ins "::mlir::OpOperand &":$opOperand,
220                      "const ::mlir::bufferization::AnalysisState &":$state),
221        /*methodBody=*/"",
222        /*defaultImplementation=*/[{
223          return ::llvm::isa<::mlir::UnrankedTensorType>(opOperand.get().getType());
224        }]
225      >,
226      InterfaceMethod<
227        /*desc=*/[{
228          Return the Values that may alias with a given OpOperand when
229          bufferized in-place. This method will never be called on OpOperands
230          that do not have a tensor type.
231
232          This method can return multiple Values, indicating that a given
233          OpOperand may at runtime alias with any (or multiple) of the returned
234          Values.
235
236          Each alias is specified with a degree of certainty:
237
238          * MAYBE (`isDefinite = false`): At runtime, buffer(opOperand) may
239            alias with the specified Value.
240          * DEFINITE (`isDefinite = true`, default): At runtime,
241            buffer(opOperand) is guaranteed to alias the buffer of the specified
242            Value. This is a stronger property than MAYBE and allows for more
243            precise analyses. DEFINITE properties should be used when possible.
244
245          Furthermore, each alias is specified with a buffer relation:
246
247          * `BufferRelation::Equivalent`: Both aliases are the exact same
248            buffer. I.e., same size, no offset, same strides.
249          * `BufferRelation::Unknown`: There is no further information apart
250            from the fact that both buffers alias.
251
252          False positives are allowed in the list of Values, but they can
253          adversely affect the accuracy of the anlysis. On the contrary,
254          omitting potential aliases is incorrect.
255
256          One possible (conservative) implementation of this interface method,
257          that is always safe, is to return all tensor Values with
258          BufferRelation::Unknown and MAYBE.
259
260          Examples:
261
262          ```
263          // aliasingValues(%t) = DEFINITE {Equivalent %r}
264          %r = tensor.insert_slice %f into %t : tensor<10xf32>
265
266          // aliasingValues(%t) = DEFINITE {Unknown %r}
267          // Note: "Buffer is subset of buffer" relationship are not yet
268          // supported, so "Unknown" is the best we can do for now.
269          %r = tensor.extract_slice %t[0]][5][1]
270              : tensor<10xf32> to tensor<5xf32>
271
272          // aliasingValues(%t1) = MAYBE {Equivalent %r}
273          // aliasingValues(%t2) = MAYBE {Equivalent %r}
274          %r = arith.select %c, %t1, %t2 : tensor<10xf32>
275
276          // A hypothetical op that bufferizes to rolling a dice and based on
277          // the result to either return buffer(%t) or a newly allocated copy
278          // thereof.
279          // aliasingValues(%t) = MAYBE {Equivalent %r}
280          %r = "dummy.alias_or_copy(%t) : (tensor<10xf32>) -> (tensor<10xf32>)"
281          ```
282        }],
283        /*retType=*/"::mlir::bufferization::AliasingValueList",
284        /*methodName=*/"getAliasingValues",
285        /*args=*/(ins "::mlir::OpOperand &":$opOperand,
286                      "const ::mlir::bufferization::AnalysisState &":$state),
287        /*methodBody=*/"",
288        /*defaultImplementation=*/[{
289          // Does not have to be implemented for ops without tensor OpOperands.
290          assert(::llvm::isa<::mlir::TensorType>(opOperand.get().getType()) &&
291                 "expected OpOperand with tensor type");
292          llvm_unreachable("getAliasingValues not implemented");
293        }]
294      >,
295      InterfaceMethod<
296        /*desc=*/[{
297          Return the OpOperands that alias with a given Value when bufferized
298          in-place. This method will never be called on Values that do not
299          have a tensor type.
300
301          By default, this method is the inverse of `getAliasingValues`. Ops
302          with a region that yield values may want to override this method to
303          return the OpOperands that are yielded by the terminator.
304
305          This method can return multiple OpOperands, indicating that a given
306          Value may at runtime alias with any (or multiple) of the returned
307          OpOperands.
308
309          This property is specified with a degree of certainty:
310
311          * MAYBE (`isDefinite = false`): At runtime, buffer(value) may alias
312            with the specified OpOperand.
313          * DEFINITE (`isDefinite = true`, default): At runtime,
314            buffer(value) is guaranteed to alias the buffer of the specified
315            OpOperand. This is a stronger property than MAYBE and allows for
316            more precise analyses. DEFINITE properties should be used when
317            possible.
318
319          For each alias, a BufferRelation can be specified:
320
321          * `BufferRelation::Equivalent`: Both aliases are the exact same
322            buffer. I.e., same size, no offset, same strides.
323          * `BufferRelation::Unknown`: There is no further information apart
324            from the fact that both buffers alias.
325
326          False positives are allowed in the list of OpOperands, but they can
327          adversely affect the accuracy of the anlysis. On the contrary,
328          omitting potential aliases is incorrect.
329
330          One possible (conservative) implementation of this interface method,
331          that is always safe, is to return all tensor OpOperands with
332          BufferRelation::Unknown and MAYBE.
333
334          Note: If the returned list of OpOperands is empty, this op definitely
335          bufferizes to a new allocation. In that case `bufferizesToAllocation`
336          must return `true`.
337
338          Examples:
339
340          ```
341          // aliasingOpOperands(%r) = DEFINITE {Equivalent %t}
342          %r = tensor.insert_slice %f into %t : tensor<10xf32>
343
344          // aliasingOpOperands(%r) = DEFINITE {Unknown %t}
345          %r = tensor.extract_slice %t[0]][5][1]
346              : tensor<10xf32> to tensor<5xf32>
347
348          // aliasingOpOperands(%r) = DEFINITE {Equivalent %t1, Equivalent %t2}
349          %r = arith.select %c, %t1, %t2 : tensor<10xf32>
350
351          // aliasingOpOperands(%r) = MAYBE {}
352          %r = tensor.empty() : tensor<10xf32>
353          ```
354        }],
355        /*retType=*/"::mlir::bufferization::AliasingOpOperandList",
356        /*methodName=*/"getAliasingOpOperands",
357        /*args=*/(ins "::mlir::Value":$value,
358                      "const ::mlir::bufferization::AnalysisState &":$state),
359        /*methodBody=*/"",
360        /*defaultImplementation=*/[{
361          assert(isa<::mlir::TensorType>(value.getType()) &&
362                 "expected tensor type");
363          return ::mlir::bufferization::detail::defaultGetAliasingOpOperands(
364              value, state);
365        }]
366      >,
367      InterfaceMethod<
368        /*desc=*/[{
369          Resolve all inplacability conflicts by inserting explicit
370          `bufferization.alloc_tensor` ops. Examples of inplacability conflicts
371          are read-after-write conflicts or writes into non-writable buffers.
372
373          This method should rewrite the IR in such a way that for each tensor
374          OpOperand t, buffer(t) can be directly used when during bufferization.
375          The bufferization does no longer have to care about inplacability
376          conflicts.
377
378          This method can query analysis information from the given analysis
379          state.
380        }],
381        /*retType=*/"::llvm::LogicalResult",
382        /*methodName=*/"resolveConflicts",
383        /*args=*/(ins "::mlir::RewriterBase &":$rewriter,
384                      "const ::mlir::bufferization::AnalysisState &":$state),
385        /*methodBody=*/"",
386        /*defaultImplementation=*/[{
387          auto bufferizableOp =
388              ::llvm::cast<BufferizableOpInterface>($_op.getOperation());
389          return bufferizableOp.resolveTensorOpOperandConflicts(
390              rewriter, state);
391        }]
392      >,
393      InterfaceMethod<
394        /*desc=*/[{
395          Bufferize this op, i.e., rewrite it into a memref-based equivalent.
396          Buffers of tensor SSA values can be retrieved via `getBuffer`.
397          Uses of tensor results of the existing tensor op can be replaced with
398          `replaceOpWithBufferizedValues` or `replaceOpWithNewBufferizedOp`.
399          These two functions automatically handle the tensor-to-memref type
400          conversion.
401
402          The implementation of this method must be consistent with the
403          remaining methods, in particular `getAliasingOpOperands`. I.e., a
404          tensor result `r` may only be replaced with:
405
406          a) One of the buffers in getAliasingOpOperands(r).
407          b) Or: A newly allocated buffer (only if `bufferizesToAllocation`).
408
409          This method will never be called on ops that do not have at least one
410          tensor operand/result.
411
412          The return value of this method indicates whether there was an error
413          while bufferizing this op (such as failing to create a new buffer
414          allocation op). The bufferization driver immediately stops bufferizing
415          the input IR and returns `failure` in that case. If this op is
416          expected to survive bufferization, `success` should be returned
417          (together with `allow-unknown-ops` enabled).
418
419          Note: If this op supports unstructured control flow in its regions,
420          then this function should also bufferize all block signatures that
421          belong to this op. Branch ops (that branch to a block) are typically
422          bufferized together with the block signature (this is just a
423          suggestion to make sure IR is valid at every point in time and could
424          be done differently).
425        }],
426        /*retType=*/"::llvm::LogicalResult",
427        /*methodName=*/"bufferize",
428        /*args=*/(ins "::mlir::RewriterBase &":$rewriter,
429                      "const ::mlir::bufferization::BufferizationOptions &":$options),
430        /*methodBody=*/"",
431        /*defaultImplementation=*/[{
432          llvm_unreachable("bufferize not implemented");
433          return ::mlir::failure();
434        }]
435      >,
436      InterfaceMethod<
437        /*desc=*/[{
438          Return `true` if the given Value can be written to in-place. Value is
439          either an OpResult of this operation or a BlockArgument of a block of
440          this operation.
441
442          Most OpResult buffers can be written to, but some ops such as
443          ConstantOp may bufferize to non-writable (read-only) memory locations.
444          Therefore, by default, this method returns `true` for OpResults. This
445          method will never be called on OpResults that do not have a tensor
446          type.
447
448          Whether a BlockArgument can be written to or not depends on the
449          operation. This method conservatively returns `false`. This method
450          will never be called on BlockArguments that do not have a tensor type.
451        }],
452        /*retType=*/"bool",
453        /*methodName=*/"isWritable",
454        /*args=*/(ins "::mlir::Value":$value,
455                      "const ::mlir::bufferization::AnalysisState &":$state),
456        /*methodBody=*/"",
457        /*defaultImplementation=*/[{
458          return ::llvm::isa<::mlir::OpResult>(value);
459        }]
460      >,
461      InterfaceMethod<
462        /*desc=*/[{
463          Return `true` if the `uRead` and `uWrite` do not constitute a RaW
464          conflict. If they are conflicting or if it is unknown whether they are
465          conflicting, return `false`. This method will never be called with
466          OpOperands that do not have a tensor type. At least one of the two
467          given OpOperands belongs to this operation.
468
469          This method can be implemented to specify custom RaW analysis rules.
470          If this method returns `true` the given OpOperands are not considered
471          to be conflicting and do not force out-of-place bufferization. (There
472          may still be other conflicts that do.)
473        }],
474        /*retType=*/"bool",
475        /*methodName=*/"isNotConflicting",
476        /*args=*/(ins "::mlir::OpOperand *":$uRead,
477                      "::mlir::OpOperand *":$uWrite,
478                      "const ::mlir::bufferization::AnalysisState &":$state),
479        /*methodBody=*/"",
480        /*defaultImplementation=*/[{
481          return false;
482        }]
483      >,
484      InterfaceMethod<
485        /*desc=*/[{
486          Return `failure` if this op does not pass the analysis. This method
487          is run during One-Shot Bufferize (after all post-analysis steps). If
488          the op does not pass the analysis, bufferization is aborted.
489
490          This method can be used to check expected invariants and limitations
491          of the current bufferization implementation.
492        }],
493        /*retType=*/"::llvm::LogicalResult",
494        /*methodName=*/"verifyAnalysis",
495        /*args=*/(ins "const ::mlir::bufferization::AnalysisState &":$state),
496        /*methodBody=*/"",
497        /*defaultImplementation=*/[{
498          return ::mlir::success();
499        }]
500      >,
501      InterfaceMethod<
502        /*desc=*/[{
503          Return the bufferized type of the given tensor value (without
504          bufferizing the IR). The value is either a BlockArgument of a block
505          that belongs to this op or an OpResult of the given op.
506
507          This method is useful when the bufferized type of value must be
508          predicted before modifying any IR.
509
510          Implementations may call `bufferization::getBufferType` to compute the
511          bufferized type of another SSA value. The same (unmodified)
512          `invocationStack` must be passed to that function. The stack contains
513          all SSA values for which a buffer type computation is currently in
514          progress. Implementations may inspect the stack to detect repetitive
515          computations for the same SSA value. (E.g., when bufferized types of a
516          loop.)
517
518          Note: This interface method should never be called directly from user
519          code. Always use `bufferization::getBufferType`.
520        }],
521        /*retType=*/"::mlir::FailureOr<::mlir::BaseMemRefType>",
522        /*methodName=*/"getBufferType",
523        /*args=*/(ins "::mlir::Value":$value,
524                      "const ::mlir::bufferization::BufferizationOptions &":$options,
525                      "::llvm::SmallVector<::mlir::Value> &":$invocationStack),
526        /*methodBody=*/"",
527        /*defaultImplementation=*/[{
528          assert(getOwnerOfValue(value) == $_op.getOperation() &&
529                 "expected that value belongs to this op");
530          assert(invocationStack.back() == value &&
531                 "inconsistant invocation stack");
532          return ::mlir::bufferization::detail::defaultGetBufferType(
533              value, options, invocationStack);
534        }]
535      >,
536      InterfaceMethod<
537        /*desc=*/[{
538          Return `true` if the given region of this op is repetitive. By default
539          this information is queried from the `RegionBranchOpInterface`. Ops
540          that do not implement this inferface can override this method to
541          declare regions as repetitive.
542
543          The RaW conflict detection of One-Shot Analysis is more strict inside
544          repetitive regions: Op dominance cannot always be used to rule out
545          certain potential conflicts (e.g., a conflicting write happening after
546          a read), because there may not be a meaningful ordering of certain ops
547          that are executed multiple times. This is described in more detail in
548          documentation of One-Shot Analysis.
549        }],
550        /*retType=*/"bool",
551        /*methodName=*/"isRepetitiveRegion",
552        /*args=*/(ins "unsigned":$index),
553        /*methodBody=*/"",
554        /*defaultImplementation=*/[{
555          return ::mlir::bufferization::detail::defaultIsRepetitiveRegion(
556              ::llvm::cast<BufferizableOpInterface>($_op.getOperation()), index);
557        }]
558      >,
559      InterfaceMethod<
560        /*desc=*/[{
561          Return `true` if the given region of this op is parallel, i.e.,
562          multiple instances of the region may be executing at the same time.
563          If a region is parallel, it must also be marked as "repetitive".
564
565          The RaW conflict detection of One-Shot Analysis is more strict inside
566          parallel regions: Buffer may have to be privatized.
567
568          By default, regions are assumed to be sequential.
569        }],
570        /*retType=*/"bool",
571        /*methodName=*/"isParallelRegion",
572        /*args=*/(ins "unsigned":$index),
573        /*methodBody=*/"",
574        /*defaultImplementation=*/[{
575          return false;
576        }]
577      >,
578      InterfaceMethod<
579        /*desc=*/[{
580          Return "true" if the this op has tensor semantics and should be
581          bufferized. By default, ops with tensor operands, tensor op results
582          and/or tensor block arguments have tensor semantics.
583
584          This interface methods can be implemented by ops that should be
585          bufferized but do not have tensor semantics according to the above
586          definition. E.g., this function can return "true" for symbols.
587        }],
588        /*retType=*/"bool",
589        /*methodName=*/"hasTensorSemantics",
590        /*args=*/(ins),
591        /*methodBody=*/"",
592        /*defaultImplementation=*/[{
593          return ::mlir::bufferization::detail
594              ::defaultHasTensorSemantics($_op.getOperation());
595        }]
596      >,
597      StaticInterfaceMethod<
598        /*desc=*/[{
599          Return `true` if the op and this interface implementation supports
600          unstructured control flow. I.e., regions with multiple blocks. This is
601          not supported in most ops, so the default answer is `false`.
602        }],
603        /*retType=*/"bool",
604        /*methodName=*/"supportsUnstructuredControlFlow",
605        /*args=*/(ins),
606        /*methodBody=*/"",
607        /*defaultImplementation=*/[{
608          return false;
609        }]
610      >,
611  ];
612
613  let extraClassDeclaration = [{
614    /// Resolve out-of-place tensor OpOperands with explicit allocations in the
615    /// form of `bufferization.alloc_tensor` ops.
616    ::llvm::LogicalResult resolveTensorOpOperandConflicts(
617        ::mlir::RewriterBase &rewriter,
618        const ::mlir::bufferization::AnalysisState &state);
619
620    /// Return `true` if the given OpOperand creates an alias but does neither
621    /// read nor write. This implies that `bufferizesToMemoryRead` and
622    /// `bufferizesToMemoryWrite` must return `false`. This method will never
623    /// be called on OpOperands that do not have a tensor type.
624    ///
625    /// Examples of such ops are `tensor.extract_slice` and `tensor.cast`.
626    bool bufferizesToAliasOnly(
627        ::mlir::OpOperand &opOperand,
628        const ::mlir::bufferization::AnalysisState &state) {
629      auto bufferizableOp =
630          ::llvm::cast<::mlir::bufferization::BufferizableOpInterface>(getOperation());
631      return !bufferizableOp.bufferizesToMemoryRead(opOperand, state)
632          && !bufferizableOp.bufferizesToMemoryWrite(opOperand, state)
633          && bufferizableOp.getAliasingValues(opOperand, state)
634              .getNumAliases() != 0;
635    }
636  }];
637}
638
639#endif  // BUFFERIZABLE_OP_INTERFACE
640