xref: /llvm-project/mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td (revision ced2fc7819d5ddea616ec330f18e08ff284c1868)
1//===- BufferizationOps.td - Bufferization op definitions --*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef BUFFERIZATION_OPS
10#define BUFFERIZATION_OPS
11
12include "mlir/Dialect/Bufferization/IR/AllocationOpInterface.td"
13include "mlir/Dialect/Bufferization/IR/BufferViewFlowOpInterface.td"
14include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.td"
15include "mlir/Dialect/Bufferization/IR/BufferizationBase.td"
16include "mlir/Interfaces/DestinationStyleOpInterface.td"
17include "mlir/Interfaces/InferTypeOpInterface.td"
18include "mlir/Interfaces/SideEffectInterfaces.td"
19include "mlir/Interfaces/SubsetOpInterface.td"
20include "mlir/Interfaces/CopyOpInterface.td"
21
22class Bufferization_Op<string mnemonic, list<Trait> traits = []>
23    : Op<Bufferization_Dialect, mnemonic, traits>;
24
25//===----------------------------------------------------------------------===//
26// AllocTensorOp
27//===----------------------------------------------------------------------===//
28
29def Bufferization_AllocTensorOp : Bufferization_Op<"alloc_tensor",
30    [AttrSizedOperandSegments, BufferizableOpInterface,
31     DeclareOpInterfaceMethods<ReifyRankedShapedTypeOpInterface>]> {
32  let summary = "allocate buffer for a tensor";
33
34  let description = [{
35    `bufferization.alloc_tensor` materializes an uninitialized tensor with a
36    given shape (dynamic or static). It always bufferizes to a new buffer
37    allocation of the given shape. The optional `copy` operand specifies the
38    contents of the tensors. If no `copy` operand is specified, reading from the
39    result of an `alloc_tensor` op yields an undefined value.
40
41    If `copy` is specified, no dynamic sizes should be passed, since they are
42    the same as the dynamic sizes of the `copy` operand.
43
44    `alloc_tensor` is a helper op for bufferization. The operation is provided
45    as an anchor that marks the beginning of a new tensor SSA use-def chain. It
46    can be used to control in-place bufferization decisions during One-Shot
47    Bufferize: The bufferized result of a `bufferization.alloc_tensor` does not
48    alias with any other buffer, so it can be used to resolve read-after-write
49    conflicts that would have been introduced by the in-place bufferization of
50    another op.
51
52    The optional `memory_space` attribute specifies the memory space when
53    bufferizing this op. The memory space is inferred from `copy` if specified.
54    If neither `copy` nor `memory_space` is specified, the default memory space
55    is used during bufferization.
56
57    The optional `size_hint` operand specifies the number of non-zero elements
58    for sparse tensors. The value of `size_hint` should be not less than 1 and
59    not larger than the linear size of the corresponding dense tensor type. If
60    this requirement is not met, the behavior of the operator is undefined.
61
62    Both dense and sparse tensor types are supported. The result of a
63    `bufferization.alloc_tensor` is a tensor value that can be used like any
64    other tensor value. In practice, it is often used as the "out" operand of
65    another op. Sparse tensor allocations should always be used in a local
66    construction operation and never escape the function boundary directly.
67
68    Example:
69
70    ```mlir
71    %c = bufferization.alloc_tensor(%d1, %d2) : tensor<?x?xf32, #SparseMatrix>
72    %0 = linalg.matmul
73      ins(%a, %b: tensor<?x?xf32, #SparseMatrix>, tensor<?x?xf32, #SparseMatrix>)
74      outs(%c: tensor<?x?xf32, #SparseMatrix>) -> tensor<?x?xf32, #SparseMatrix>
75    return %0 : tensor<?x?xf32, #SparseMatrix>
76    ```
77
78    ```mlir
79    %c = bufferization.alloc_tensor(%d1, %d2) size_hint = %noe
80      : tensor<?x?xf32, #SparseMatrix>
81    ```
82
83    Note: An `alloc_tensor` with a `copy` should also be expressed as an
84    `alloc_tensor` without `copy`, followed by a `copy_tensor`.
85  }];
86
87  let arguments = (ins Variadic<Index>:$dynamic_sizes,
88                       Optional<AnyTensor>:$copy,
89                       Optional<Index>:$size_hint,
90                       OptionalAttr<AnyAttr>:$memory_space);
91
92  let results = (outs AnyTensor:$result);
93
94  let extraClassDeclaration = [{
95    LogicalResult bufferize(RewriterBase &rewriter,
96                            const BufferizationOptions &options);
97
98    bool resultBufferizesToMemoryWrite(OpResult opResult,
99                                       const AnalysisState &state);
100
101    bool bufferizesToAllocation(Value value) { return true; }
102
103    bool bufferizesToMemoryRead(OpOperand &opOperand,
104                                const AnalysisState &state);
105
106    bool bufferizesToMemoryWrite(OpOperand &opOperand,
107                                 const AnalysisState &state);
108
109    AliasingValueList getAliasingValues(
110        OpOperand &opOperand, const AnalysisState &state);
111
112    FailureOr<BaseMemRefType> getBufferType(
113        Value value, const BufferizationOptions &options,
114        SmallVector<Value> &invocationStack);
115
116    RankedTensorType getType() {
117      return ::llvm::cast<RankedTensorType>(getResult().getType());
118    }
119
120    // Return true if the size of the tensor is dynamic at `idx`
121    bool isDynamicDim(unsigned idx) {
122      return getType().isDynamicDim(idx);
123    }
124
125    // Return the argument position that contains the dynamic size of
126    // the tensor at dimension `idx`. Asserts that the shape is
127    // dynamic at that `idx`.
128    unsigned getIndexOfDynamicSize(unsigned idx) {
129      assert(!getCopy() && "no dim sizes specified when copying a tensor");
130      assert(isDynamicDim(idx) && "expected dynamic size");
131      ArrayRef<int64_t> shape = getType().getShape();
132      return std::count_if(
133          shape.begin(), shape.begin() + idx,
134          [&](int64_t size) { return ShapedType::isDynamic(size); });
135    }
136
137    // Return the Value of the dynamic size of the tensor at dimension
138    // `idx`. Asserts that the shape is dynamic at that `idx.
139    Value getDynamicSize(OpBuilder &b, unsigned idx);
140
141    // Assert that the size of the result tensor is static at `idx`
142    // and return the shape.
143    int64_t getStaticSize(unsigned idx) {
144      assert(!isDynamicDim(idx) && "expected static size");
145      return getType().getShape()[idx];
146    }
147  }];
148
149  let builders = [
150    // Build an op without `copy` or `memory_space` or `size_hint`.
151    OpBuilder<(ins "RankedTensorType":$type, "ValueRange":$dynamicSizes)>,
152
153    // Build an op without `memory_space` or `size_hint`.
154    OpBuilder<(ins "RankedTensorType":$type, "ValueRange":$dynamicSizes,
155                   "Value":$copy)>,
156
157    // Build an op without `size_hint`.
158    OpBuilder<(ins "TensorType":$type, "ValueRange":$dynamicSizes,
159                   "Value":$copy, "IntegerAttr":$memory_space)>,
160  ];
161
162  let hasCanonicalizer = 1;
163  let hasCustomAssemblyFormat = 1;
164  let hasVerifier = 1;
165}
166
167//===----------------------------------------------------------------------===//
168// CloneOp
169//===----------------------------------------------------------------------===//
170
171def Bufferization_CloneOp : Bufferization_Op<"clone", [
172    CopyOpInterface,
173    MemoryEffectsOpInterface,
174    DeclareOpInterfaceMethods<AllocationOpInterface, ["buildDealloc", "buildClone"]>
175  ]> {
176  let builders = [
177    OpBuilder<(ins "Value":$value), [{
178      return build($_builder, $_state, value.getType(), value);
179    }]>];
180
181  let summary = "clone a memref";
182  let description = [{
183    Clones the data in the input view into an implicitly defined output view.
184
185    Usage:
186
187    ```mlir
188    %arg1 = bufferization.clone %arg0 : memref<?xf32> to memref<?xf32>
189    ```
190
191    Valid implementations of this operation may alias the input and output
192    views or create an actual copy. Mutating the source or result
193    of the clone operation after the clone operation thus leads to undefined
194    behavior.
195  }];
196
197  let arguments = (ins Arg<AnyRankedOrUnrankedMemRef, "",
198                           [MemRead<DefaultResource>]>:$input);
199  let results = (outs Res<AnyRankedOrUnrankedMemRef, "",
200                          [MemWrite<DefaultResource>,
201                           MemAlloc<DefaultResource>]>:$output);
202
203  let extraClassDeclaration = [{
204    Value getSource() { return getInput(); }
205    Value getTarget() { return getOutput(); }
206  }];
207
208  let assemblyFormat = "$input attr-dict `:` type($input) `to` type($output)";
209
210  let hasFolder = 1;
211  let hasCanonicalizer = 1;
212}
213
214//===----------------------------------------------------------------------===//
215// MaterializeInDestinationOp
216//===----------------------------------------------------------------------===//
217
218def Bufferization_MaterializeInDestinationOp
219    : Bufferization_Op<"materialize_in_destination",
220        [AllElementTypesMatch<["source", "dest"]>,
221         BufferizableOpInterface, DestinationStyleOpInterface,
222         DeclareOpInterfaceMethods<ReifyRankedShapedTypeOpInterface>,
223         DeclareOpInterfaceMethods<SubsetOpInterface,
224            ["operatesOnEquivalentSubset", "operatesOnDisjointSubset"]>,
225         DeclareOpInterfaceMethods<SubsetInsertionOpInterface,
226            ["getSourceOperand", "getValuesNeededToBuildSubsetExtraction",
227             "buildSubsetExtraction", "isEquivalentSubset"]>,
228         DeclareOpInterfaceMethods<MemoryEffectsOpInterface, ["getEffects"]>]> {
229  let summary = "copy a tensor";
230
231  let description = [{
232    This op indicates that the data of the `source` tensor is guaranteed to
233    materialize in `dest`, which can be a tensor or a memref. In case of a
234    tensor, `source` materializes in the future buffer of `dest` and a the
235    updated destination tensor is returned. If this is not possible, e.g.,
236    because the destination tensor is read-only or because its original
237    contents are still read later, the input IR fails to bufferize. In case of a
238    memref, `source` materializes in `dest`, which is already a buffer. The op
239    has no results in that case.
240
241    `source`, `dest` and `result` (if present) must have the same runtime shape
242    and element type. If the op has a result, the types of `result` and `dest`
243    must match exactly (e.g., including any tensor encodings).
244
245    By default, this op bufferizes to a memcpy from the future buffer of the
246    `source` tensor to the future buffer of the `dest` tensor or to the `dest`
247    buffer. However, transformations such as "empty tensor elimination" may
248    rewrite IR such that a computation is performed directly in `dest` and no
249    memcpy is needed.
250
251    If `dest` is a buffer, the `writable` attribute must be specified and the
252    `restrict` keyword can be specified. These attributes have the same meaning
253    as the respective attributes of `bufferization.to_tensor`.
254
255    `writable` indicates that the `dest` buffer is considered writable. It does
256    not make sense to materialize a computation in a read-only buffer, so
257    `writable` is required.
258
259    `restrict` indicates that there is no `bufferization.to_tensor` op and no
260    other `bufferization.materialize_in_destination` op with `dest` (or an alias
261    thereof) and "restrict". Only ops with this attribute are considered for
262    "empty tensor elimination". As part of empty tensor elimination, a new
263    `to_tensor` op with `dest` may be inserted and the `restrict` attribute is
264    transferred from this op to the new `to_tensor` op. Having "restrict" on
265    this op guarantees that performing empty tensor elimination would not create
266    invalid IR (i.e., having multiple `to_tensor restrict` with aliasing
267    buffers).
268
269    Note: `writable` could be removed from this op because it must always be set
270    for memref destinations. This op has that attribute to make clear the
271    requirements on the `dest` operand in the op assembly format.
272
273    Note: If `dest` is a tensor, `tensor.insert_slice` could be used for the
274    same purpose, but since tensor dialect ops only indicate *what* should be
275    computed but not *where*, it could fold away, causing the computation to
276    materialize in a different buffer.
277  }];
278
279  let arguments = (ins AnyTensor:$source, AnyShaped:$dest,
280                       UnitAttr:$restrict, UnitAttr:$writable);
281  let results = (outs Optional<AnyTensor>:$result);
282
283  let extraClassDeclaration = [{
284    LogicalResult bufferize(RewriterBase &rewriter,
285                            const BufferizationOptions &options);
286
287    bool bufferizesToMemoryRead(OpOperand &opOperand,
288                                const AnalysisState &state);
289
290    bool bufferizesToMemoryWrite(OpOperand &opOperand,
291                                 const AnalysisState &state);
292
293    bool bufferizesToElementwiseAccess(const AnalysisState &state,
294                                       ArrayRef<OpOperand *> opOperands);
295
296    bool mustBufferizeInPlace(OpOperand &opOperand,
297                              const AnalysisState &state);
298
299    AliasingValueList getAliasingValues(
300        OpOperand &opOperand, const AnalysisState &state);
301
302    RankedTensorType getType() {
303      return ::llvm::cast<RankedTensorType>(getResult().getType());
304    }
305
306    MutableOperandRange getDpsInitsMutable();
307
308    bool isWritable(Value value, const AnalysisState &state);
309  }];
310
311  let builders = [
312    // Builder that materializes a source tensor in a tensor destination.
313    // Asserts that `dest` has tensor type. Infers the result type of this op
314    // from the destination tensor.
315    OpBuilder<(ins "Value":$source, "Value":$dest)>
316  ];
317
318  let assemblyFormat = [{
319    $source `in` (`restrict` $restrict^)? (`writable` $writable^)? $dest
320        attr-dict `:` functional-type(operands, results)
321  }];
322  let hasVerifier = 1;
323}
324
325//===----------------------------------------------------------------------===//
326// DeallocTensorOp
327//===----------------------------------------------------------------------===//
328
329def Bufferization_DeallocTensorOp : Bufferization_Op<"dealloc_tensor",
330    [BufferizableOpInterface]> {
331  string summary = "release underlying storage format of given tensor";
332  string description = [{
333    `bufferization.dealloc_tensor` is a buffer deallocation in tensor land. This
334    op can be used for manual buffer deallocation. Some bufferizations (such as
335    One-Shot Bufferize) take care of buffer deallocation, in which case this op
336    is usually not needed. Details can be found in the documentation of the
337    respective bufferization passes.
338
339    In case of a dense tensor, this op lowers to a `memref.dealloc` op during
340    bufferization.
341
342    In case of a sparse tensor, this op releases the underlying sparse storage
343    format for a tensor that materialized earlier through a `new` operation, a
344    `convert` operation with annotated destination tensor type (unless the
345    convert is folded away), or a `bufferization.alloc_tensor` operation. The
346    release operation should only be called once for any materialized tensor.
347    After this operation, any subsequent `memref` querying operation on the
348    tensor returns undefined results.
349
350    Example:
351
352    ```mlir
353    bufferization.dealloc_tensor %tensor : tensor<1024x1024xf64, #CSR>
354    ```
355  }];
356
357  let arguments = (ins AnyTensor:$tensor);
358  let results = (outs);
359  let assemblyFormat = "$tensor attr-dict `:` type($tensor)";
360
361  let extraClassDeclaration = [{
362    bool bufferizesToMemoryRead(OpOperand &opOperand,
363                                const AnalysisState &state) const {
364      return false;
365    }
366
367    bool bufferizesToMemoryWrite(OpOperand &opOperand,
368                                 const AnalysisState &state) const {
369      return false;
370    }
371
372    AliasingValueList getAliasingValues(
373        OpOperand &opOperand, const AnalysisState &state) const {
374      return {};
375    }
376
377    LogicalResult bufferize(RewriterBase &rewriter,
378                            const BufferizationOptions &options);
379  }];
380}
381
382//===----------------------------------------------------------------------===//
383// ToTensorOp
384//===----------------------------------------------------------------------===//
385
386def Bufferization_ToTensorOp : Bufferization_Op<"to_tensor", [
387    BufferizableOpInterface,
388    SameOperandsAndResultShape,
389    SameOperandsAndResultElementType,
390    AllElementTypesMatch<["memref", "result"]>
391  ]> {
392  let summary = "create a tensor from a `memref`";
393  let description = [{
394    An operation that creates a tensor from a `memref`. The result value is a
395    tensor whose shape and element type match the memref operand.
396
397    The opposite of this op is `to_memref`. Together, these two ops are
398    useful for source/target materializations when doing type conversions
399    involving tensors and memrefs.
400
401    Example:
402
403    ```mlir
404    // Produces a value of tensor<4x?xf32> type.
405    %t = bufferization.to_tensor %m : memref<4x?xf32, #layout, 0> to tensor<4x?xf32>
406    ```
407
408    If the `writable` unit attribute is set, the produced tensor is considered
409    "writable" during bufferization. Otherwise, every OpOperand that bufferizes
410    to a write to the future buffer of the resulting tensor (or an alias
411    thereof) will bufferize out-of-place to prevent emitting any writes to
412    `memref` during bufferization.
413
414    The `restrict` unit attribute (similar to the C `restrict` keyword)
415    indicates that the produced tensor result is the only way for the tensor
416    IR to gain access to the `memref` operand (or an alias thereof). E.g.,
417    there must be no other `to_tensor` op with the same or with an aliasing
418    `memref` operand.
419
420    Note: Only `to_tensor` ops with the `restrict` unit attribute are supported
421    by One-Shot Bufferize. Other IR is rejected. (To support `to_tensor`
422    without `restrict`, One-Shot Bufferize would have to analyze memref IR.)
423    Ops that have incorrect usage of `restrict` may bufferize incorrectly.
424
425    Example:
426
427    ```
428    %t = bufferization.to_tensor %m restrict writable : memref<4xf32> to tensor<4xf32>
429
430    // %t is writable, so the tensor.insert may bufferize in-place in the
431    // absence of other conflicts.
432    %r = tensor.insert %f into %t[%idx] : tensor<4xf32>
433    ```
434
435    `to_tensor` ops are not bufferized. They are expected to fold away after
436    bufferization. If there are non-bufferizable ops in the IR and
437    `allowUnknownOps` is set, they may be part of the resulting IR and not fold
438    away. However, such IR is no longer bufferizable with One-Shot Bufferize.
439  }];
440
441  let arguments = (ins Arg<AnyRankedOrUnrankedMemRef,
442                           "the reference to load from",
443                           [MemReadAt<0, FullEffect>]>:$memref,
444                       UnitAttr:$restrict, UnitAttr:$writable);
445  let results = (outs AnyTensor:$result);
446
447  let extraClassDeclaration = [{
448    /// The result of a to_tensor is always a tensor.
449    TensorType getType() {
450      Type resultType = getResult().getType();
451      if (::llvm::isa<TensorType>(resultType))
452        return ::llvm::cast<TensorType>(resultType);
453      return {};
454    }
455
456    //===------------------------------------------------------------------===//
457    // BufferizableOpInterface implementation
458    //===------------------------------------------------------------------===//
459
460    LogicalResult bufferize(RewriterBase &rewriter,
461                            const BufferizationOptions &options) const {
462      // to_tensor/to_memref pairs fold away after bufferization.
463      return success();
464    }
465
466    bool isWritable(Value value, const AnalysisState &state);
467
468    FailureOr<BaseMemRefType> getBufferType(
469        Value value, const BufferizationOptions &options,
470        SmallVector<Value> &invocationStack) {
471      return ::llvm::cast<BaseMemRefType>(getMemref().getType());
472    }
473  }];
474
475  let assemblyFormat = [{
476    $memref (`restrict` $restrict^)? (`writable` $writable^)? attr-dict
477      `:` type($memref) `to` type($result)
478  }];
479
480  let builders = [
481    OpBuilder<(ins "Value":$memref, CArg<"bool", "false">:$restrict, CArg<"bool", "false">:$writeable), [{
482      auto rtt = memref::getTensorTypeFromMemRefType(memref.getType());
483      build($_builder, $_state, rtt, memref, restrict, writeable);
484    }]>
485  ];
486
487  let hasCanonicalizer = 1;
488  let hasFolder = 1;
489}
490
491
492//===----------------------------------------------------------------------===//
493// ToMemrefOp
494//===----------------------------------------------------------------------===//
495
496def Bufferization_ToMemrefOp : Bufferization_Op<"to_memref", [
497    BufferizableOpInterface,
498    SameOperandsAndResultShape,
499    SameOperandsAndResultElementType,
500    Pure,
501    AllShapesMatch<["memref", "tensor"]>,
502    AllElementTypesMatch<["memref", "tensor"]>
503  ]> {
504  let summary = "cast a tensor to memref";
505  let description = [{
506    An operation that returns the future buffer of a `tensor`.
507
508    ```mlir
509    // Result type is memref<4x?xf32, #layout, 0>
510    %m = bufferization.to_memref %t : tensor<4x?xf32> to memref<4x?xf32, #layout, 0>
511    ```
512
513    This operation is a specialized variant of the built-in
514    `unrealized_conversion_cast` and is used to make sure that the IR stays
515    valid at any point during the bufferization.
516
517    The `read_only` attribute can optionally be set, indicating to the
518    bufferization that the buffer returned by this op (or an alias created from
519    the returned buffer) will not be written to.
520  }];
521
522  let arguments = (ins AnyTensor:$tensor, UnitAttr:$read_only);
523  let results = (outs AnyRankedOrUnrankedMemRef:$memref);
524
525  let extraClassDeclaration = [{
526    //===------------------------------------------------------------------===//
527    // BufferizableOpInterface implementation
528    //===------------------------------------------------------------------===//
529
530    // Note: ToMemrefOp / ToTensorOp are temporary ops that are inserted at the
531    // bufferization boundary. When One-Shot bufferization is complete, there
532    // should be no such ops left over. If `allowUnknownOps` (or after running a
533    // partial bufferization pass), such ops may be part of the resulting IR,
534    // but such IR may no longer be analyzable by One-Shot analysis.
535
536    bool bufferizesToMemoryRead(OpOperand &opOperand,
537                                const AnalysisState &state) const {
538      // It is unknown whether the resulting memref will be read or not.
539      return true;
540    }
541
542    bool bufferizesToMemoryWrite(OpOperand &opOperand,
543                                 const AnalysisState &state) {
544      return !getReadOnly();
545    }
546
547    AliasingValueList getAliasingValues(
548        OpOperand &opOperand, const AnalysisState &state) const {
549      return {};
550    }
551
552    LogicalResult bufferize(RewriterBase &rewriter,
553                            const BufferizationOptions &options);
554  }];
555
556  let assemblyFormat = [{
557    $tensor (`read_only` $read_only^)? attr-dict `:` type($tensor) `to` type($memref)
558  }];
559
560  let hasFolder = 1;
561  let hasCanonicalizer = 1;
562}
563
564def Bufferization_DeallocOp : Bufferization_Op<"dealloc", [
565    AttrSizedOperandSegments, DeclareOpInterfaceMethods<InferTypeOpInterface>
566  ]> {
567  let summary = "deallocates the given memrefs if no alias is retained";
568  let description = [{
569    This operation deallocates each of the given memrefs if there is no alias
570    to that memref in the list of retained memrefs and the corresponding
571    condition value is set. This condition can be used to indicate and pass on
572    ownership of memref values (or in other words, the responsibility of
573    deallocating that memref). If two memrefs alias each other, only one will be
574    deallocated to avoid double free situations.
575
576    The number of variadic `memref` operands (the memrefs to be deallocated)
577    must equal the number of variadic `condition` operands and correspond to
578    each other element-wise.
579
580    The `memref` operands must be the originally allocated memrefs, however, the
581    `retained` memref operands may be arbitrary memrefs.
582
583    This operation returns a variadic number of `updatedConditions` operands,
584    one updated condition per retained memref. An updated condition indicates
585    the ownership of the respective retained memref. It is computed as the
586    disjunction of all `conditions` operands where the corresponding to
587    `memrefs` operand aliases with the retained memref. If the retained memref
588    has no aliases among `memrefs`, the resulting updated condition is 'false'.
589    This is because all memrefs that need to be deallocated within one basic
590    block should be added to the same `bufferization.dealloc` operation at the
591    end of the block; if no aliasing memref is present, then it does not have to
592    be deallocated and thus we don't need to claim ownership. If the memrefs to
593    be deallocated are split over multiple dealloc operations (e.g., to avoid
594    aliasing checks at runtime between the `memref` operands), then the results
595    have to be manually combined using an `arith.ori` operation and all of them
596    still require the same list of `retained` memref operands unless the
597    (potentially empty) set of aliasing memrefs can be determined statically. In
598    that case, the `updatedCondition` operand can be replaced accordingly (e.g.,
599    by a canonicalizer).
600
601    Example:
602    ```mlir
603    %0:3 = bufferization.dealloc (%a0, %a1 : memref<2xf32>, memref<4xi32>)
604      if (%cond0, %cond1) retain (%r0, %r1, %r2 : memref<?xf32>, memref<f64>,
605      memref<2xi32>)
606    ```
607    Deallocation will be called on `%a0` if `%cond0` is 'true' and neither
608    `%r0`, `%r1`, or `%r2` are aliases of `%a0`. `%a1` will be deallocated when
609    `%cond1` is set to 'true' and none of `%r0`, `%r1`, `%r2`, and `%a0` are
610    aliases.
611
612    Note that this can be an expensive operation if there are many operands that
613    cannot be optimized away. The runtime cost of this operation (assuming that
614    nothing is optimized away) is `O(|memrefs|^2+|memrefs|*|retained|)`. The
615    cost in terms of memory space is `O(|memrefs|+|retained|)`. As a result, it
616    is recommended to place it carefully in the IR such that most operands can
617    be optimized away by running the `buffer-deallocation-simplification` pass.
618  }];
619
620  let arguments = (ins Variadic<AnyRankedOrUnrankedMemRef>:$memrefs,
621                       Variadic<I1>:$conditions,
622                       Variadic<AnyRankedOrUnrankedMemRef>:$retained);
623  let results = (outs Variadic<I1>:$updatedConditions);
624
625  let assemblyFormat = [{
626    (` ``(` $memrefs^ `:` type($memrefs) `)` `if` ` ` `(` $conditions `)` )?
627    (`retain` ` ` `(` $retained^ `:` type($retained) `)` )? attr-dict
628  }];
629
630  let hasVerifier = 1;
631  let hasCanonicalizer = 1;
632}
633
634#endif // BUFFERIZATION_OPS
635