xref: /llvm-project/mlir/include/mlir/Dialect/MemRef/IR/MemRefOps.td (revision 454398ab04e2cd773a4b264433eb841e8cec0470)
1//===- MemRefOps.td - MemRef op definitions ----------------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#ifndef MEMREF_OPS
10#define MEMREF_OPS
11
12include "mlir/Dialect/Arith/IR/ArithBase.td"
13include "mlir/Dialect/MemRef/IR/MemRefBase.td"
14include "mlir/Interfaces/CastInterfaces.td"
15include "mlir/Interfaces/ControlFlowInterfaces.td"
16include "mlir/Interfaces/CopyOpInterface.td"
17include "mlir/Interfaces/InferTypeOpInterface.td"
18include "mlir/Interfaces/MemorySlotInterfaces.td"
19include "mlir/Interfaces/ShapedOpInterfaces.td"
20include "mlir/Interfaces/SideEffectInterfaces.td"
21include "mlir/Interfaces/ViewLikeInterface.td"
22include "mlir/IR/OpAsmInterface.td"
23include "mlir/IR/SymbolInterfaces.td"
24
25/// A TypeAttr for memref types.
26def MemRefTypeAttr
27    : TypeAttrBase<"::mlir::MemRefType", "memref type attribute"> {
28  let constBuilderCall = "::mlir::TypeAttr::get($0)";
29}
30
31class MemRef_Op<string mnemonic, list<Trait> traits = []>
32    : Op<MemRef_Dialect, mnemonic, traits>;
33
34// Base class for ops with static/dynamic offset, sizes and strides
35// attributes/arguments.
36class MemRef_OpWithOffsetSizesAndStrides<string mnemonic,
37                                         list<Trait> traits = []>
38    : MemRef_Op<mnemonic, traits> {
39  code extraBaseClassDeclaration = [{
40    /// Returns the dynamic sizes for this subview operation if specified.
41    ::mlir::Operation::operand_range getDynamicSizes() { return getSizes(); }
42
43    /// Return the list of Range (i.e. offset, size, stride). Each
44    /// Range entry contains either the dynamic value or a ConstantIndexOp
45    /// constructed with `b` at location `loc`.
46    ::mlir::SmallVector<::mlir::Range, 8> getOrCreateRanges(
47        ::mlir::OpBuilder &b, ::mlir::Location loc) {
48      return ::mlir::getOrCreateRanges(*this, b, loc);
49    }
50  }];
51}
52
53//===----------------------------------------------------------------------===//
54// AllocLikeOp
55//===----------------------------------------------------------------------===//
56
57// Base class for memref allocating ops: alloca and alloc.
58//
59//   %0 = alloclike(%m)[%s] : memref<8x?xf32, affine_map<(d0, d1)[s0] -> (d0 + s0, d1)>>
60//
61class AllocLikeOp<string mnemonic,
62                  Resource resource,
63                  list<Trait> traits = []> :
64    MemRef_Op<mnemonic,
65    !listconcat([
66      AttrSizedOperandSegments
67    ], traits)> {
68
69  let arguments = (ins Variadic<Index>:$dynamicSizes,
70                       // The symbolic operands (the ones in square brackets)
71                       // bind to the symbols of the memref's layout map.
72                       Variadic<Index>:$symbolOperands,
73                       ConfinedAttr<OptionalAttr<I64Attr>,
74                                [IntMinValue<0>]>:$alignment);
75  let results = (outs Res<AnyMemRef, "",
76                          [MemAlloc<resource, 0, FullEffect>]>:$memref);
77
78  let builders = [
79    OpBuilder<(ins "MemRefType":$memrefType,
80                  CArg<"IntegerAttr", "IntegerAttr()">:$alignment), [{
81      return build($_builder, $_state, memrefType, {}, alignment);
82    }]>,
83    OpBuilder<(ins "MemRefType":$memrefType, "ValueRange":$dynamicSizes,
84                  CArg<"IntegerAttr", "IntegerAttr()">:$alignment), [{
85      return build($_builder, $_state, memrefType, dynamicSizes, {}, alignment);
86    }]>,
87    OpBuilder<(ins "MemRefType":$memrefType, "ValueRange":$dynamicSizes,
88                  "ValueRange":$symbolOperands,
89                  CArg<"IntegerAttr", "{}">:$alignment), [{
90      $_state.types.push_back(memrefType);
91      $_state.addOperands(dynamicSizes);
92      $_state.addOperands(symbolOperands);
93      $_state.addAttribute(getOperandSegmentSizeAttr(),
94          $_builder.getDenseI32ArrayAttr({
95              static_cast<int32_t>(dynamicSizes.size()),
96              static_cast<int32_t>(symbolOperands.size())}));
97      if (alignment)
98        $_state.addAttribute(getAlignmentAttrStrName(), alignment);
99    }]>,
100    OpBuilder<(ins "ArrayRef<OpFoldResult>":$sizes, "Type":$elementType,
101                   CArg<"Attribute", "{}">:$memorySpace), [{
102      SmallVector<int64_t> staticShape;
103      SmallVector<Value> dynamicSizes;
104      dispatchIndexOpFoldResults(sizes, dynamicSizes, staticShape);
105      MemRefLayoutAttrInterface layout;
106      MemRefType memrefType = MemRefType::get(staticShape, elementType, layout,
107                                              memorySpace);
108      return build($_builder, $_state, memrefType, dynamicSizes);
109    }]>
110  ];
111
112  let extraClassDeclaration = [{
113    static StringRef getAlignmentAttrStrName() { return "alignment"; }
114
115    MemRefType getType() { return ::llvm::cast<MemRefType>(getResult().getType()); }
116
117    SmallVector<OpFoldResult> getMixedSizes() {
118      SmallVector<OpFoldResult> result;
119      unsigned ctr = 0;
120      OpBuilder b(getContext());
121      for (int64_t i = 0, e = getType().getRank(); i < e; ++i) {
122        if (getType().isDynamicDim(i)) {
123          result.push_back(getDynamicSizes()[ctr++]);
124        } else {
125          result.push_back(b.getIndexAttr(getType().getShape()[i]));
126        }
127      }
128      return result;
129    }
130  }];
131
132  let assemblyFormat = [{
133    `(`$dynamicSizes`)` (`` `[` $symbolOperands^ `]`)? attr-dict `:` type($memref)
134  }];
135
136  let hasCanonicalizer = 1;
137  let hasVerifier = 1;
138}
139
140//===----------------------------------------------------------------------===//
141// AssumeAlignmentOp
142//===----------------------------------------------------------------------===//
143
144def AssumeAlignmentOp : MemRef_Op<"assume_alignment"> {
145  let summary =
146      "assertion that gives alignment information to the input memref";
147  let description = [{
148    The `assume_alignment` operation takes a memref and an integer of alignment
149    value, and internally annotates the buffer with the given alignment. If
150    the buffer isn't aligned to the given alignment, the behavior is undefined.
151
152    This operation doesn't affect the semantics of a correct program. It's for
153    optimization only, and the optimization is best-effort.
154  }];
155  let arguments = (ins AnyMemRef:$memref,
156                       ConfinedAttr<I32Attr, [IntPositive]>:$alignment);
157  let results = (outs);
158
159  let assemblyFormat = "$memref `,` $alignment attr-dict `:` type($memref)";
160  let hasVerifier = 1;
161}
162
163//===----------------------------------------------------------------------===//
164// AllocOp
165//===----------------------------------------------------------------------===//
166
167def MemRef_AllocOp : AllocLikeOp<"alloc", DefaultResource, [
168    DeclareOpInterfaceMethods<OpAsmOpInterface, ["getAsmResultNames"]>]> {
169  let summary = "memory allocation operation";
170  let description = [{
171    The `alloc` operation allocates a region of memory, as specified by its
172    memref type.
173
174    Example:
175
176    ```mlir
177    %0 = memref.alloc() : memref<8x64xf32, 1>
178    ```
179
180    The optional list of dimension operands are bound to the dynamic dimensions
181    specified in its memref type. In the example below, the ssa value '%d' is
182    bound to the second dimension of the memref (which is dynamic).
183
184    ```mlir
185    %0 = memref.alloc(%d) : memref<8x?xf32, 1>
186    ```
187
188    The optional list of symbol operands are bound to the symbols of the
189    memrefs affine map. In the example below, the ssa value '%s' is bound to
190    the symbol 's0' in the affine map specified in the allocs memref type.
191
192    ```mlir
193    %0 = memref.alloc()[%s] : memref<8x64xf32,
194                              affine_map<(d0, d1)[s0] -> ((d0 + s0), d1)>, 1>
195    ```
196
197    This operation returns a single ssa value of memref type, which can be used
198    by subsequent load and store operations.
199
200    The optional `alignment` attribute may be specified to ensure that the
201    region of memory that will be indexed is aligned at the specified byte
202    boundary.
203
204    ```mlir
205    %0 = memref.alloc()[%s] {alignment = 8} :
206      memref<8x64xf32, affine_map<(d0, d1)[s0] -> ((d0 + s0), d1)>, 1>
207    ```
208  }];
209  let hasVerifier = 1;
210}
211
212//===----------------------------------------------------------------------===//
213// ReallocOp
214//===----------------------------------------------------------------------===//
215
216
217def MemRef_ReallocOp : MemRef_Op<"realloc"> {
218  let summary = "memory reallocation operation";
219  let description = [{
220    The `realloc` operation changes the size of a memory region. The memory
221    region is specified by a 1D source memref and the size of the new memory
222    region is specified by a 1D result memref type and an optional dynamic Value
223    of `Index` type. The source and the result memref must be in the same memory
224    space and have the same element type.
225
226    The operation may move the memory region to a new location. In this case,
227    the content of the memory block is preserved up to the lesser of the new
228    and old sizes. If the new size if larger, the value of the extended memory
229    is undefined. This is consistent with the ISO C realloc.
230
231    The operation returns an SSA value for the memref.
232
233    Example:
234
235    ```mlir
236    %0 = memref.realloc %src : memref<64xf32> to memref<124xf32>
237    ```
238
239    The source memref may have a dynamic shape, in which case, the compiler will
240    generate code to extract its size from the runtime data structure for the
241    memref.
242
243    ```mlir
244    %1 = memref.realloc %src : memref<?xf32> to memref<124xf32>
245    ```
246
247    If the result memref has a dynamic shape, a result dimension operand is
248    needed to spefify its dynamic dimension. In the example below, the ssa value
249    '%d' specifies the unknown dimension of the result memref.
250
251    ```mlir
252    %2 = memref.realloc %src(%d) : memref<?xf32> to memref<?xf32>
253    ```
254
255    An optional `alignment` attribute may be specified to ensure that the
256    region of memory that will be indexed is aligned at the specified byte
257    boundary.  This is consistent with the fact that memref.alloc supports such
258    an optional alignment attribute. Note that in ISO C standard, neither alloc
259    nor realloc supports alignment, though there is aligned_alloc but not
260    aligned_realloc.
261
262    ```mlir
263    %3 = memref.realloc %src {alignment = 8} : memref<64xf32> to memref<124xf32>
264    ```
265
266    Referencing the memref through the old SSA value after realloc is undefined
267    behavior.
268
269    ```mlir
270    %new = memref.realloc %old : memref<64xf32> to memref<124xf32>
271    %4 = memref.load %new[%index]   // ok
272    %5 = memref.load %old[%index]   // undefined behavior
273    ```
274  }];
275
276  // Note that we conceptually mark the operands as freeing the incoming
277  // memref and allocating the outcoming memref, even though this may not
278  // physically happen on each execution.
279
280  let arguments = (ins Arg<MemRefRankOf<[AnyType], [1]>, "",
281                                        [MemFreeAt<0, FullEffect>]>:$source,
282                   Optional<Index>:$dynamicResultSize,
283                   ConfinedAttr<OptionalAttr<I64Attr>,
284                                [IntMinValue<0>]>:$alignment);
285
286  let results = (outs Res<MemRefRankOf<[AnyType], [1]>, "",
287                                       [MemAlloc<DefaultResource, 1,
288                                                 FullEffect>]>);
289
290  let builders = [
291    OpBuilder<(ins "MemRefType":$resultType,
292                  "Value":$source,
293                  CArg<"Value", "Value()">:$dynamicResultSize), [{
294      return build($_builder, $_state, resultType, source, dynamicResultSize,
295                   IntegerAttr());
296    }]>];
297
298    let extraClassDeclaration = [{
299    /// The result of a realloc is always a memref.
300    MemRefType getType() { return ::llvm::cast<MemRefType>(getResult().getType()); }
301  }];
302
303  let assemblyFormat = [{
304    $source (`(` $dynamicResultSize^ `)`)? attr-dict
305    `:` type($source) `to` type(results)
306  }];
307
308  let hasCanonicalizer = 1;
309  let hasVerifier = 1;
310}
311
312//===----------------------------------------------------------------------===//
313// AllocaOp
314//===----------------------------------------------------------------------===//
315
316def MemRef_AllocaOp : AllocLikeOp<"alloca", AutomaticAllocationScopeResource,[
317    DeclareOpInterfaceMethods<OpAsmOpInterface, ["getAsmResultNames"]>,
318    DeclareOpInterfaceMethods<PromotableAllocationOpInterface>,
319    DeclareOpInterfaceMethods<DestructurableAllocationOpInterface>]> {
320  let summary = "stack memory allocation operation";
321  let description = [{
322    The `alloca` operation allocates memory on the stack, to be automatically
323    released when control transfers back from the region of its closest
324    surrounding operation with an
325    [`AutomaticAllocationScope`](../Traits.md/#automaticallocationscope) trait.
326    The amount of memory allocated is specified by its memref and additional
327    operands. For example:
328
329    ```mlir
330    %0 = memref.alloca() : memref<8x64xf32>
331    ```
332
333    The optional list of dimension operands are bound to the dynamic dimensions
334    specified in its memref type. In the example below, the SSA value '%d' is
335    bound to the second dimension of the memref (which is dynamic).
336
337    ```mlir
338    %0 = memref.alloca(%d) : memref<8x?xf32>
339    ```
340
341    The optional list of symbol operands are bound to the symbols of the
342    memref's affine map. In the example below, the SSA value '%s' is bound to
343    the symbol 's0' in the affine map specified in the allocs memref type.
344
345    ```mlir
346    %0 = memref.alloca()[%s] : memref<8x64xf32,
347                               affine_map<(d0, d1)[s0] -> ((d0 + s0), d1)>>
348    ```
349
350    This operation returns a single SSA value of memref type, which can be used
351    by subsequent load and store operations. An optional alignment attribute, if
352    specified, guarantees alignment at least to that boundary. If not specified,
353    an alignment on any convenient boundary compatible with the type will be
354    chosen.
355  }];
356  let hasVerifier = 1;
357}
358
359//===----------------------------------------------------------------------===//
360// AllocaScopeOp
361//===----------------------------------------------------------------------===//
362
363def MemRef_AllocaScopeOp : MemRef_Op<"alloca_scope",
364      [AutomaticAllocationScope,
365       DeclareOpInterfaceMethods<RegionBranchOpInterface>,
366       SingleBlockImplicitTerminator<"AllocaScopeReturnOp">,
367       RecursiveMemoryEffects,
368       NoRegionArguments]> {
369  let summary = "explicitly delimited scope for stack allocation";
370  let description = [{
371    The `memref.alloca_scope` operation represents an explicitly-delimited
372    scope for the alloca allocations. Any `memref.alloca` operations that are
373    used within this scope are going to be cleaned up automatically once
374    the control-flow exits the nested region. For example:
375
376    ```mlir
377    memref.alloca_scope {
378      %myalloca = memref.alloca(): memref<4x3xf32>
379      ...
380    }
381    ```
382
383    Here, `%myalloca` memref is valid within the explicitly delimited scope
384    and is automatically deallocated at the end of the given region. Conceptually,
385    `memref.alloca_scope` is a passthrough operation with
386    `AutomaticAllocationScope` that spans the body of the region within the operation.
387
388    `memref.alloca_scope` may also return results that are defined in the nested
389    region. To return a value, one should use `memref.alloca_scope.return`
390    operation:
391
392    ```mlir
393    %result = memref.alloca_scope {
394      ...
395      memref.alloca_scope.return %value
396    }
397    ```
398
399    If `memref.alloca_scope` returns no value, the `memref.alloca_scope.return ` can
400    be left out, and will be inserted implicitly.
401  }];
402
403  let results = (outs Variadic<AnyType>:$results);
404  let regions = (region SizedRegion<1>:$bodyRegion);
405  let hasCustomAssemblyFormat = 1;
406  let hasCanonicalizer = 1;
407}
408
409//===----------------------------------------------------------------------===//
410// AllocaScopeReturnOp
411//===----------------------------------------------------------------------===//
412
413def MemRef_AllocaScopeReturnOp : MemRef_Op<"alloca_scope.return",
414      [HasParent<"AllocaScopeOp">,
415       Pure,
416       ReturnLike,
417       Terminator]> {
418  let summary = "terminator for alloca_scope operation";
419  let description = [{
420    `memref.alloca_scope.return` operation returns zero or more SSA values
421    from the region within `memref.alloca_scope`. If no values are returned,
422    the return operation may be omitted. Otherwise, it has to be present
423    to indicate which values are going to be returned. For example:
424
425    ```mlir
426    memref.alloca_scope.return %value
427    ```
428  }];
429
430  let arguments = (ins Variadic<AnyType>:$results);
431  let builders = [OpBuilder<(ins), [{ /*nothing to do */ }]>];
432
433  let assemblyFormat = "attr-dict ($results^ `:` type($results))?";
434}
435
436//===----------------------------------------------------------------------===//
437// CastOp
438//===----------------------------------------------------------------------===//
439
440def MemRef_CastOp : MemRef_Op<"cast", [
441      DeclareOpInterfaceMethods<CastOpInterface>,
442      DeclareOpInterfaceMethods<OpAsmOpInterface, ["getAsmResultNames"]>,
443      MemRefsNormalizable,
444      Pure,
445      SameOperandsAndResultShape,
446      ViewLikeOpInterface
447    ]> {
448  let summary = "memref cast operation";
449  let description = [{
450    The `memref.cast` operation converts a memref from one type to an equivalent
451    type with a compatible shape. The source and destination types are
452    compatible if:
453
454    a. Both are ranked memref types with the same element type, address space,
455    and rank and:
456      1. Both have the same layout or both have compatible strided layouts.
457      2. The individual sizes (resp. offset and strides in the case of strided
458         memrefs) may convert constant dimensions to dynamic dimensions and
459         vice-versa.
460
461    If the cast converts any dimensions from an unknown to a known size, then it
462    acts as an assertion that fails at runtime if the dynamic dimensions
463    disagree with resultant destination size.
464
465    Example:
466
467    ```mlir
468    // Assert that the input dynamic shape matches the destination static shape.
469    %2 = memref.cast %1 : memref<?x?xf32> to memref<4x4xf32>
470    // Erase static shape information, replacing it with dynamic information.
471    %3 = memref.cast %1 : memref<4xf32> to memref<?xf32>
472
473    // The same holds true for offsets and strides.
474
475    // Assert that the input dynamic shape matches the destination static stride.
476    %4 = memref.cast %1 : memref<12x4xf32, strided<[?, ?], offset: ?>> to
477                          memref<12x4xf32, strided<[4, 1], offset: 5>>
478    // Erase static offset and stride information, replacing it with
479    // dynamic information.
480    %5 = memref.cast %1 : memref<12x4xf32, strided<[4, 1], offset: 5>> to
481                          memref<12x4xf32, strided<[?, ?], offset: ?>>
482    ```
483
484    b. Either or both memref types are unranked with the same element type, and
485    address space.
486
487    Example:
488
489    ```mlir
490    Cast to concrete shape.
491        %4 = memref.cast %1 : memref<*xf32> to memref<4x?xf32>
492
493    Erase rank information.
494        %5 = memref.cast %1 : memref<4x?xf32> to memref<*xf32>
495    ```
496  }];
497
498  let arguments = (ins AnyRankedOrUnrankedMemRef:$source);
499  let results = (outs AnyRankedOrUnrankedMemRef:$dest);
500  let assemblyFormat = "$source attr-dict `:` type($source) `to` type($dest)";
501
502  let extraClassDeclaration = [{
503    /// Fold the given CastOp into consumer op.
504    static bool canFoldIntoConsumerOp(CastOp castOp);
505
506    Value getViewSource() { return getSource(); }
507  }];
508
509  let hasFolder = 1;
510}
511
512//===----------------------------------------------------------------------===//
513// CopyOp
514//===----------------------------------------------------------------------===//
515
516def CopyOp : MemRef_Op<"copy", [CopyOpInterface, SameOperandsElementType,
517    SameOperandsShape]> {
518
519  let description = [{
520    Copies the data from the source to the destination memref.
521
522    Usage:
523
524    ```mlir
525    memref.copy %arg0, %arg1 : memref<?xf32> to memref<?xf32>
526    ```
527
528    Source and destination are expected to have the same element type and shape.
529    Otherwise, the result is undefined. They may have different layouts.
530  }];
531
532  let arguments = (ins Arg<AnyRankedOrUnrankedMemRef, "the memref to copy from",
533                           [MemReadAt<0, FullEffect>]>:$source,
534                       Arg<AnyRankedOrUnrankedMemRef, "the memref to copy to",
535                           [MemWriteAt<0, FullEffect>]>:$target);
536
537  let assemblyFormat = [{
538    $source `,` $target attr-dict `:` type($source) `to` type($target)
539  }];
540
541  let hasCanonicalizer = 1;
542  let hasFolder = 1;
543}
544
545//===----------------------------------------------------------------------===//
546// DeallocOp
547//===----------------------------------------------------------------------===//
548
549def MemRef_DeallocOp : MemRef_Op<"dealloc", [MemRefsNormalizable]> {
550  let summary = "memory deallocation operation";
551  let description = [{
552    The `dealloc` operation frees the region of memory referenced by a memref
553    which was originally created by the `alloc` operation.
554    The `dealloc` operation should not be called on memrefs which alias an
555    alloc'd memref (e.g. memrefs returned by `view` operations).
556
557    Example:
558
559    ```mlir
560    %0 = memref.alloc() : memref<8x64xf32, affine_map<(d0, d1) -> (d0, d1), 1>>
561    memref.dealloc %0 : memref<8x64xf32,  affine_map<(d0, d1) -> (d0, d1), 1>>
562    ```
563  }];
564
565  let arguments = (ins Arg<AnyRankedOrUnrankedMemRef, "",
566                           [MemFreeAt<0, FullEffect>]>:$memref);
567
568  let hasFolder = 1;
569  let assemblyFormat = "$memref attr-dict `:` type($memref)";
570}
571
572//===----------------------------------------------------------------------===//
573// DimOp
574//===----------------------------------------------------------------------===//
575
576def MemRef_DimOp : MemRef_Op<"dim", [
577    DeclareOpInterfaceMethods<OpAsmOpInterface, ["getAsmResultNames"]>,
578    MemRefsNormalizable,
579    ConditionallySpeculatable, NoMemoryEffect,
580    ShapedDimOpInterface]> {
581  let summary = "dimension index operation";
582  let description = [{
583    The `dim` operation takes a memref and a dimension operand of type `index`.
584    It returns the size of the requested dimension of the given memref.
585    If the dimension index is out of bounds the behavior is undefined.
586
587    The specified memref type is that of the first operand.
588
589    Example:
590
591    ```mlir
592    // Always returns 4, can be constant folded:
593    %c0 = arith.constant 0 : index
594    %x = memref.dim %A, %c0 : memref<4 x ? x f32>
595
596    // Returns the dynamic dimension of %A.
597    %c1 = arith.constant 1 : index
598    %y = memref.dim %A, %c1 : memref<4 x ? x f32>
599
600    // Equivalent generic form:
601    %x = "memref.dim"(%A, %c0) : (memref<4 x ? x f32>, index) -> index
602    %y = "memref.dim"(%A, %c1) : (memref<4 x ? x f32>, index) -> index
603    ```
604  }];
605
606  let arguments = (ins AnyNon0RankedOrUnrankedMemRef:$source,
607                       Index:$index);
608  let results = (outs Index:$result);
609
610  let assemblyFormat = [{
611    attr-dict $source `,` $index `:` type($source)
612  }];
613
614  let builders = [
615    OpBuilder<(ins "Value":$source, "int64_t":$index)>,
616  ];
617
618  let extraClassDeclaration = [{
619    /// Helper function to get the index as a simple integer if it is constant.
620    std::optional<int64_t> getConstantIndex();
621
622    /// Interface method of ShapedDimOpInterface: Return the source memref.
623    Value getShapedValue() { return getSource(); }
624
625    /// Interface method of ShapedDimOpInterface: Return the dimension.
626    OpFoldResult getDimension() { return getIndex(); }
627
628    /// Interface method for ConditionallySpeculatable.
629    Speculation::Speculatability getSpeculatability();
630  }];
631
632  let hasCanonicalizer = 1;
633  let hasFolder = 1;
634}
635
636//===----------------------------------------------------------------------===//
637// DmaStartOp
638//===----------------------------------------------------------------------===//
639
640def MemRef_DmaStartOp : MemRef_Op<"dma_start"> {
641  let summary = "non-blocking DMA operation that starts a transfer";
642  let description = [{
643    Syntax:
644
645    ```
646    operation ::= `memref.dma_start` ssa-use`[`ssa-use-list`]` `,`
647                   ssa-use`[`ssa-use-list`]` `,` ssa-use `,`
648                   ssa-use`[`ssa-use-list`]` (`,` ssa-use `,` ssa-use)?
649                  `:` memref-type `,` memref-type `,` memref-type
650    ```
651
652    DmaStartOp starts a non-blocking DMA operation that transfers data from a
653    source memref to a destination memref. The source and destination memref
654    need not be of the same dimensionality, but need to have the same elemental
655    type. The operands include the source and destination memref's each followed
656    by its indices, size of the data transfer in terms of the number of elements
657    (of the elemental type of the memref), a tag memref with its indices, and
658    optionally at the end, a stride and a number_of_elements_per_stride
659    arguments. The tag location is used by a DmaWaitOp to check for completion.
660    The indices of the source memref, destination memref, and the tag memref
661    have the same restrictions as any load/store. The optional stride arguments
662    should be of 'index' type, and specify a stride for the slower memory space
663    (memory space with a lower memory space id), transferring chunks of
664    number_of_elements_per_stride every stride until %num_elements are
665    transferred. Either both or no stride arguments should be specified. If the
666    source and destination locations overlap the behavior of this operation is
667    not defined.
668
669    For example, a DmaStartOp operation that transfers 256 elements of a memref
670    '%src' in memory space 0 at indices [%i, %j] to memref '%dst' in memory
671    space 1 at indices [%k, %l], would be specified as follows:
672
673    ```mlir
674    %num_elements = arith.constant 256
675    %idx = arith.constant 0 : index
676    %tag = memref.alloc() : memref<1 x i32, affine_map<(d0) -> (d0)>, 4>
677    dma_start %src[%i, %j], %dst[%k, %l], %num_elements, %tag[%idx] :
678      memref<40 x 128 x f32>, affine_map<(d0) -> (d0)>, 0>,
679      memref<2 x 1024 x f32>, affine_map<(d0) -> (d0)>, 1>,
680      memref<1 x i32>, affine_map<(d0) -> (d0)>, 2>
681    ```
682
683    If %stride and %num_elt_per_stride are specified, the DMA is expected to
684    transfer %num_elt_per_stride elements every %stride elements apart from
685    memory space 0 until %num_elements are transferred.
686
687    ```mlir
688    dma_start %src[%i, %j], %dst[%k, %l], %num_elements, %tag[%idx], %stride,
689              %num_elt_per_stride :
690    ```
691
692    * TODO: add additional operands to allow source and destination striding, and
693    multiple stride levels.
694    * TODO: Consider replacing src/dst memref indices with view memrefs.
695  }];
696  let arguments = (ins Variadic<AnyType>:$operands);
697
698  let builders = [
699    OpBuilder<(ins "Value":$srcMemRef, "ValueRange":$srcIndices,
700                   "Value":$destMemRef, "ValueRange":$destIndices,
701                   "Value":$numElements, "Value":$tagMemRef,
702                   "ValueRange":$tagIndices, CArg<"Value", "{}">:$stride,
703                   CArg<"Value", "{}">:$elementsPerStride)>
704  ];
705
706  let extraClassDeclaration = [{
707    // Returns the source MemRefType for this DMA operation.
708    Value getSrcMemRef() { return getOperand(0); }
709    OpOperand &getSrcMemRefMutable() { return getOperation()->getOpOperand(0); }
710    // Returns the rank (number of indices) of the source MemRefType.
711    unsigned getSrcMemRefRank() {
712      return ::llvm::cast<MemRefType>(getSrcMemRef().getType()).getRank();
713    }
714    // Returns the source memref indices for this DMA operation.
715    operand_range getSrcIndices() {
716      return {(*this)->operand_begin() + 1,
717              (*this)->operand_begin() + 1 + getSrcMemRefRank()};
718    }
719
720    // Returns the destination MemRefType for this DMA operations.
721    Value getDstMemRef() { return getOperand(1 + getSrcMemRefRank()); }
722    OpOperand &getDstMemRefMutable() { return getOperation()->getOpOperand(1 + getSrcMemRefRank()); }
723    // Returns the rank (number of indices) of the destination MemRefType.
724    unsigned getDstMemRefRank() {
725      return ::llvm::cast<MemRefType>(getDstMemRef().getType()).getRank();
726    }
727    unsigned getSrcMemorySpace() {
728      return ::llvm::cast<MemRefType>(getSrcMemRef().getType()).getMemorySpaceAsInt();
729    }
730    unsigned getDstMemorySpace() {
731      return ::llvm::cast<MemRefType>(getDstMemRef().getType()).getMemorySpaceAsInt();
732    }
733
734    // Returns the destination memref indices for this DMA operation.
735    operand_range getDstIndices() {
736      return {(*this)->operand_begin() + 1 + getSrcMemRefRank() + 1,
737              (*this)->operand_begin() + 1 + getSrcMemRefRank() + 1 +
738                  getDstMemRefRank()};
739    }
740
741    // Returns the number of elements being transferred by this DMA operation.
742    Value getNumElements() {
743      return getOperand(1 + getSrcMemRefRank() + 1 + getDstMemRefRank());
744    }
745
746    // Returns the Tag MemRef for this DMA operation.
747    Value getTagMemRef() {
748      return getOperand(1 + getSrcMemRefRank() + 1 + getDstMemRefRank() + 1);
749    }
750    OpOperand &getTagMemRefMutable() {
751      return getOperation()->getOpOperand(1 + getSrcMemRefRank() + 1 + getDstMemRefRank() + 1);
752    }
753
754    // Returns the rank (number of indices) of the tag MemRefType.
755    unsigned getTagMemRefRank() {
756      return ::llvm::cast<MemRefType>(getTagMemRef().getType()).getRank();
757    }
758
759    // Returns the tag memref index for this DMA operation.
760    operand_range getTagIndices() {
761      unsigned tagIndexStartPos =
762          1 + getSrcMemRefRank() + 1 + getDstMemRefRank() + 1 + 1;
763      return {(*this)->operand_begin() + tagIndexStartPos,
764              (*this)->operand_begin() + tagIndexStartPos + getTagMemRefRank()};
765    }
766
767    /// Returns true if this is a DMA from a faster memory space to a slower
768    /// one.
769    bool isDestMemorySpaceFaster() {
770      return (getSrcMemorySpace() < getDstMemorySpace());
771    }
772
773    /// Returns true if this is a DMA from a slower memory space to a faster
774    /// one.
775    bool isSrcMemorySpaceFaster() {
776      // Assumes that a lower number is for a slower memory space.
777      return (getDstMemorySpace() < getSrcMemorySpace());
778    }
779
780    /// Given a DMA start operation, returns the operand position of either the
781    /// source or destination memref depending on the one that is at the higher
782    /// level of the memory hierarchy. Asserts failure if neither is true.
783    unsigned getFasterMemPos() {
784      assert(isSrcMemorySpaceFaster() || isDestMemorySpaceFaster());
785      return isSrcMemorySpaceFaster() ? 0 : getSrcMemRefRank() + 1;
786    }
787
788    bool isStrided() {
789      return getNumOperands() != 1 + getSrcMemRefRank() + 1 +
790                                 getDstMemRefRank() + 1 + 1 +
791                                 getTagMemRefRank();
792    }
793
794    Value getStride() {
795      if (!isStrided())
796        return nullptr;
797      return getOperand(getNumOperands() - 1 - 1);
798    }
799
800    Value getNumElementsPerStride() {
801      if (!isStrided())
802        return nullptr;
803      return getOperand(getNumOperands() - 1);
804    }
805
806    void getEffects(
807        SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>> &
808        effects) {
809      effects.emplace_back(MemoryEffects::Read::get(), &getSrcMemRefMutable(),
810                           SideEffects::DefaultResource::get());
811      effects.emplace_back(MemoryEffects::Write::get(), &getDstMemRefMutable(),
812                           SideEffects::DefaultResource::get());
813      effects.emplace_back(MemoryEffects::Read::get(), &getTagMemRefMutable(),
814                           SideEffects::DefaultResource::get());
815    }
816  }];
817  let hasCustomAssemblyFormat = 1;
818  let hasFolder = 1;
819  let hasVerifier = 1;
820}
821
822//===----------------------------------------------------------------------===//
823// DmaWaitOp
824//===----------------------------------------------------------------------===//
825
826def MemRef_DmaWaitOp : MemRef_Op<"dma_wait"> {
827  let summary = "blocking DMA operation that waits for transfer completion";
828  let description = [{
829   DmaWaitOp blocks until the completion of a DMA operation associated with the
830   tag element '%tag[%index]'. %tag is a memref, and %index has to be an index
831   with the same restrictions as any load/store index. %num_elements is the
832   number of elements associated with the DMA operation.
833
834   Example:
835
836   ```mlir
837    dma_start %src[%i, %j], %dst[%k, %l], %num_elements, %tag[%index] :
838      memref<2048 x f32>, affine_map<(d0) -> (d0)>, 0>,
839      memref<256 x f32>, affine_map<(d0) -> (d0)>, 1>
840      memref<1 x i32>, affine_map<(d0) -> (d0)>, 2>
841    ...
842    ...
843    dma_wait %tag[%index], %num_elements : memref<1 x i32, affine_map<(d0) -> (d0)>, 2>
844    ```
845  }];
846  let arguments = (ins AnyMemRef:$tagMemRef,
847                       Variadic<Index>:$tagIndices,
848                       Index:$numElements);
849  let assemblyFormat = [{
850    $tagMemRef `[` $tagIndices `]` `,` $numElements attr-dict `:` type($tagMemRef)
851  }];
852  let extraClassDeclaration = [{
853    /// Returns the rank (number of indices) of the tag memref.
854    unsigned getTagMemRefRank() {
855      return ::llvm::cast<MemRefType>(getTagMemRef().getType()).getRank();
856    }
857    void getEffects(
858        SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>> &
859        effects) {
860      effects.emplace_back(MemoryEffects::Read::get(), &getTagMemRefMutable(),
861                           SideEffects::DefaultResource::get());
862    }
863  }];
864  let hasFolder = 1;
865  let hasVerifier = 1;
866}
867
868//===----------------------------------------------------------------------===//
869// ExtractAlignedPointerAsIndexOp
870//===----------------------------------------------------------------------===//
871
872def MemRef_ExtractAlignedPointerAsIndexOp :
873  MemRef_Op<"extract_aligned_pointer_as_index", [
874    DeclareOpInterfaceMethods<OpAsmOpInterface, ["getAsmResultNames"]>,
875    Pure,
876    SameVariadicResultSize]> {
877  let summary = "Extracts a memref's underlying aligned pointer as an index";
878  let description = [{
879    Extracts the underlying aligned pointer as an index.
880
881    This operation is useful for lowering to lower-level dialects while still
882    avoiding the need to define a pointer type in higher-level dialects such as
883    the memref dialect.
884
885    This operation is intended solely as step during lowering, it has no side
886    effects. A reverse operation that creates a memref from an index interpreted
887    as a pointer is explicitly discouraged.
888
889    Example:
890
891    ```
892      %0 = memref.extract_aligned_pointer_as_index %arg : memref<4x4xf32> -> index
893      %1 = arith.index_cast %0 : index to i64
894      %2 = llvm.inttoptr %1 : i64 to !llvm.ptr
895      call @foo(%2) : (!llvm.ptr) ->()
896    ```
897  }];
898
899  let arguments = (ins
900    AnyRankedOrUnrankedMemRef:$source
901  );
902  let results = (outs Index:$aligned_pointer);
903
904  let assemblyFormat = [{
905    $source `:` type($source) `->` type(results) attr-dict
906  }];
907}
908
909//===----------------------------------------------------------------------===//
910// ExtractStridedMetadataOp
911//===----------------------------------------------------------------------===//
912
913def MemRef_ExtractStridedMetadataOp : MemRef_Op<"extract_strided_metadata", [
914    DeclareOpInterfaceMethods<OpAsmOpInterface, ["getAsmResultNames"]>,
915    Pure,
916    SameVariadicResultSize,
917    ViewLikeOpInterface,
918    InferTypeOpAdaptor]> {
919  let summary = "Extracts a buffer base with offset and strides";
920  let description = [{
921    Extracts a base buffer, offset and strides. This op allows additional layers
922    of transformations and foldings to be added as lowering progresses from
923    higher-level dialect to lower-level dialects such as the LLVM dialect.
924
925    The op requires a strided memref source operand. If the source operand is not
926    a strided memref, then verification fails.
927
928    This operation is also useful for completeness to the existing memref.dim op.
929    While accessing strides, offsets and the base pointer independently is not
930    available, this is useful for composing with its natural complement op:
931    `memref.reinterpret_cast`.
932
933    Intended Use Cases:
934
935    The main use case is to expose the logic for manipulate memref metadata at a
936    higher level than the LLVM dialect.
937    This makes lowering more progressive and brings the following benefits:
938      - not all users of MLIR want to lower to LLVM and the information to e.g.
939        lower to library calls---like libxsmm---or to SPIR-V was not available.
940      - foldings and canonicalizations can happen at a higher level in MLIR:
941        before this op existed, lowering to LLVM would create large amounts of
942        LLVMIR. Even when LLVM does a good job at folding the low-level IR from
943        a performance perspective, it is unnecessarily opaque and inefficient to
944        send unkempt IR to LLVM.
945
946    Example:
947
948    ```mlir
949      %base, %offset, %sizes:2, %strides:2 =
950        memref.extract_strided_metadata %memref :
951          memref<10x?xf32>, index, index, index, index, index
952
953      // After folding, the type of %m2 can be memref<10x?xf32> and further
954      // folded to %memref.
955      %m2 = memref.reinterpret_cast %base to
956          offset: [%offset],
957          sizes: [%sizes#0, %sizes#1],
958          strides: [%strides#0, %strides#1]
959        : memref<f32> to memref<?x?xf32, offset: ?, strides: [?, ?]>
960    ```
961  }];
962
963  let arguments = (ins
964    AnyStridedMemRef:$source
965  );
966  let results = (outs
967    AnyStridedMemRefOfRank<0>:$base_buffer,
968    Index:$offset,
969    Variadic<Index>:$sizes,
970    Variadic<Index>:$strides
971  );
972
973  let assemblyFormat = [{
974    $source `:` type($source) `->` type(results) attr-dict
975  }];
976
977  let extraClassDeclaration = [{
978    /// Return a vector of all the static or dynamic sizes of the op, while
979    /// statically inferring the sizes of the dynamic sizes, when possible.
980    /// This is best effort.
981    /// E.g., if `getSizes` returns `[%dyn_size0, %dyn_size1]`, but the
982    /// source memref type is `memref<2x8xi16>`, this method will
983    /// return `[2, 8]`.
984    /// Similarly if the resulting memref type is `memref<2x?xi16>`, but
985    /// `%dyn_size1` can statically be pinned to a constant value, this
986    /// constant value is returned instead of `%dyn_size`.
987    SmallVector<OpFoldResult> getConstifiedMixedSizes();
988    /// Similar to `getConstifiedMixedSizes` but for strides.
989    SmallVector<OpFoldResult> getConstifiedMixedStrides();
990    /// Similar to `getConstifiedMixedSizes` but for the offset.
991    OpFoldResult getConstifiedMixedOffset();
992
993    ::mlir::Value getViewSource() { return getSource(); }
994  }];
995
996  let hasFolder = 1;
997}
998
999//===----------------------------------------------------------------------===//
1000// GenericAtomicRMWOp
1001//===----------------------------------------------------------------------===//
1002
1003def GenericAtomicRMWOp : MemRef_Op<"generic_atomic_rmw", [
1004      SingleBlockImplicitTerminator<"AtomicYieldOp">,
1005      TypesMatchWith<"result type matches element type of memref",
1006                     "memref", "result",
1007                     "::llvm::cast<MemRefType>($_self).getElementType()">
1008    ]> {
1009  let summary = "atomic read-modify-write operation with a region";
1010  let description = [{
1011    The `memref.generic_atomic_rmw` operation provides a way to perform a
1012    read-modify-write sequence that is free from data races. The memref operand
1013    represents the buffer that the read and write will be performed against, as
1014    accessed by the specified indices. The arity of the indices is the rank of
1015    the memref. The result represents the latest value that was stored. The
1016    region contains the code for the modification itself. The entry block has
1017    a single argument that represents the value stored in `memref[indices]`
1018    before the write is performed. No side-effecting ops are allowed in the
1019    body of `GenericAtomicRMWOp`.
1020
1021    Example:
1022
1023    ```mlir
1024    %x = memref.generic_atomic_rmw %I[%i] : memref<10xf32> {
1025      ^bb0(%current_value : f32):
1026        %c1 = arith.constant 1.0 : f32
1027        %inc = arith.addf %c1, %current_value : f32
1028        memref.atomic_yield %inc : f32
1029    }
1030    ```
1031  }];
1032
1033  let arguments = (ins
1034      Arg<MemRefOf<[AnySignlessInteger, AnyFloat]>, "the reference to read from and write to", [MemRead, MemWrite]>:$memref,
1035      Variadic<Index>:$indices);
1036
1037  let results = (outs
1038      AnyTypeOf<[AnySignlessInteger, AnyFloat]>:$result);
1039
1040  let regions = (region AnyRegion:$atomic_body);
1041
1042  let skipDefaultBuilders = 1;
1043  let builders = [OpBuilder<(ins "Value":$memref, "ValueRange":$ivs)>];
1044
1045  let extraClassDeclaration = [{
1046    // TODO: remove post migrating callers.
1047    Region &body() { return getRegion(); }
1048
1049    // The value stored in memref[ivs].
1050    Value getCurrentValue() {
1051      return getRegion().getArgument(0);
1052    }
1053    MemRefType getMemRefType() {
1054      return ::llvm::cast<MemRefType>(getMemref().getType());
1055    }
1056  }];
1057  let hasCustomAssemblyFormat = 1;
1058  let hasVerifier = 1;
1059}
1060
1061def AtomicYieldOp : MemRef_Op<"atomic_yield", [
1062      HasParent<"GenericAtomicRMWOp">,
1063      Pure,
1064      Terminator
1065    ]> {
1066  let summary = "yield operation for GenericAtomicRMWOp";
1067  let description = [{
1068    "memref.atomic_yield" yields an SSA value from a
1069    GenericAtomicRMWOp region.
1070  }];
1071
1072  let arguments = (ins AnyType:$result);
1073  let assemblyFormat = "$result attr-dict `:` type($result)";
1074  let hasVerifier = 1;
1075}
1076
1077//===----------------------------------------------------------------------===//
1078// GetGlobalOp
1079//===----------------------------------------------------------------------===//
1080
1081def MemRef_GetGlobalOp : MemRef_Op<"get_global",
1082    [Pure, DeclareOpInterfaceMethods<SymbolUserOpInterface>]> {
1083  let summary = "get the memref pointing to a global variable";
1084  let description = [{
1085     The `memref.get_global` operation retrieves the memref pointing to a
1086     named global variable. If the global variable is marked constant, writing
1087     to the result memref (such as through a `memref.store` operation) is
1088     undefined.
1089
1090     Example:
1091
1092     ```mlir
1093     %x = memref.get_global @foo : memref<2xf32>
1094     ```
1095  }];
1096
1097  let arguments = (ins FlatSymbolRefAttr:$name);
1098  let results = (outs AnyStaticShapeMemRef:$result);
1099  let assemblyFormat = "$name `:` type($result) attr-dict";
1100}
1101
1102//===----------------------------------------------------------------------===//
1103// GlobalOp
1104//===----------------------------------------------------------------------===//
1105
1106def MemRef_GlobalOp : MemRef_Op<"global", [Symbol]> {
1107  let summary = "declare or define a global memref variable";
1108  let description = [{
1109    The `memref.global` operation declares or defines a named global memref
1110    variable. The backing memory for the variable is allocated statically and is
1111    described by the type of the variable (which should be a statically shaped
1112    memref type). The operation is a declaration if no `initial_value` is
1113    specified, else it is a definition. The `initial_value` can either be a unit
1114    attribute to represent a definition of an uninitialized global variable, or
1115    an elements attribute to represent the definition of a global variable with
1116    an initial value. The global variable can also be marked constant using the
1117    `constant` unit attribute. Writing to such constant global variables is
1118    undefined.
1119
1120    The global variable can be accessed by using the `memref.get_global` to
1121    retrieve the memref for the global variable. Note that the memref
1122    for such global variable itself is immutable (i.e., memref.get_global for a
1123    given global variable will always return the same memref descriptor).
1124
1125    Example:
1126
1127    ```mlir
1128    // Private variable with an initial value.
1129    memref.global "private" @x : memref<2xf32> = dense<0.0,2.0>
1130
1131    // Private variable with an initial value and an alignment (power of 2).
1132    memref.global "private" @x : memref<2xf32> = dense<0.0,2.0> {alignment = 64}
1133
1134    // Declaration of an external variable.
1135    memref.global "private" @y : memref<4xi32>
1136
1137    // Uninitialized externally visible variable.
1138    memref.global @z : memref<3xf16> = uninitialized
1139
1140    // Externally visible constant variable.
1141    memref.global constant @c : memref<2xi32> = dense<1, 4>
1142    ```
1143  }];
1144
1145  let arguments = (ins SymbolNameAttr:$sym_name,
1146                       OptionalAttr<StrAttr>:$sym_visibility,
1147                       MemRefTypeAttr:$type,
1148                       OptionalAttr<AnyAttr>:$initial_value,
1149                       UnitAttr:$constant,
1150                       OptionalAttr<I64Attr>:$alignment);
1151
1152  let assemblyFormat = [{
1153       ($sym_visibility^)?
1154       (`constant` $constant^)?
1155       $sym_name `:`
1156       custom<GlobalMemrefOpTypeAndInitialValue>($type, $initial_value)
1157       attr-dict
1158  }];
1159
1160  let extraClassDeclaration = [{
1161     bool isExternal() { return !getInitialValue(); }
1162     bool isUninitialized() {
1163       return !isExternal() && ::llvm::isa<UnitAttr>(*getInitialValue());
1164     }
1165     /// Returns the constant initial value if the memref.global is a constant,
1166     /// or null otherwise.
1167     ElementsAttr getConstantInitValue();
1168  }];
1169  let hasVerifier = 1;
1170}
1171
1172//===----------------------------------------------------------------------===//
1173// LoadOp
1174//===----------------------------------------------------------------------===//
1175
1176def LoadOp : MemRef_Op<"load",
1177     [TypesMatchWith<"result type matches element type of 'memref'",
1178                     "memref", "result",
1179                     "::llvm::cast<MemRefType>($_self).getElementType()">,
1180      MemRefsNormalizable,
1181      DeclareOpInterfaceMethods<PromotableMemOpInterface>,
1182      DeclareOpInterfaceMethods<DestructurableAccessorOpInterface>]> {
1183  let summary = "load operation";
1184  let description = [{
1185    The `load` op reads an element from a memref specified by an index list. The
1186    output of load is a new value with the same type as the elements of the
1187    memref. The arity of indices is the rank of the memref (i.e., if the memref
1188    loaded from is of rank 3, then 3 indices are required for the load following
1189    the memref identifier).
1190
1191    In an `affine.if` or `affine.for` body, the indices of a load are restricted
1192    to SSA values bound to surrounding loop induction variables,
1193    [symbols](Affine.md/#dimensions-and-symbols), results of a
1194    constant operations, or the result of an
1195    `affine.apply` operation that can in turn take as arguments all of the
1196    aforementioned SSA values or the recursively result of such an
1197    `affine.apply` operation.
1198
1199    Example:
1200
1201    ```mlir
1202    %1 = affine.apply affine_map<(d0, d1) -> (3*d0)> (%i, %j)
1203    %2 = affine.apply affine_map<(d0, d1) -> (d1+1)> (%i, %j)
1204    %12 = memref.load %A[%1, %2] : memref<8x?xi32, #layout, memspace0>
1205
1206    // Example of an indirect load (treated as non-affine)
1207    %3 = affine.apply affine_map<(d0) -> (2*d0 + 1)>(%12)
1208    %13 = memref.load %A[%3, %2] : memref<4x?xi32, #layout, memspace0>
1209    ```
1210
1211    **Context:** The `load` and `store` operations are specifically crafted to
1212    fully resolve a reference to an element of a memref, and (in affine
1213    `affine.if` and `affine.for` operations) the compiler can follow use-def
1214    chains (e.g. through [`affine.apply`](Affine.md/#affineapply-affineapplyop)
1215    operations) to precisely analyze references at compile-time using polyhedral
1216    techniques. This is possible because of the
1217    [restrictions on dimensions and symbols](Affine.md/#restrictions-on-dimensions-and-symbols)
1218    in these contexts.
1219  }];
1220
1221  let arguments = (ins Arg<AnyMemRef, "the reference to load from",
1222                           [MemRead]>:$memref,
1223                       Variadic<Index>:$indices,
1224                       DefaultValuedOptionalAttr<BoolAttr, "false">:$nontemporal);
1225  let results = (outs AnyType:$result);
1226
1227  let extraClassDeclaration = [{
1228    Value getMemRef() { return getOperand(0); }
1229    void setMemRef(Value value) { setOperand(0, value); }
1230    MemRefType getMemRefType() {
1231      return ::llvm::cast<MemRefType>(getMemRef().getType());
1232    }
1233  }];
1234
1235  let hasFolder = 1;
1236  let hasVerifier = 1;
1237
1238  let assemblyFormat = "$memref `[` $indices `]` attr-dict `:` type($memref)";
1239}
1240
1241//===----------------------------------------------------------------------===//
1242// MemorySpaceCastOp
1243//===----------------------------------------------------------------------===//
1244def MemRef_MemorySpaceCastOp : MemRef_Op<"memory_space_cast", [
1245      DeclareOpInterfaceMethods<CastOpInterface>,
1246      DeclareOpInterfaceMethods<OpAsmOpInterface, ["getAsmResultNames"]>,
1247      MemRefsNormalizable,
1248      Pure,
1249      SameOperandsAndResultElementType,
1250      SameOperandsAndResultShape,
1251      ViewLikeOpInterface
1252    ]> {
1253  let summary = "memref memory space cast operation";
1254  let description = [{
1255    This operation casts memref values between memory spaces.
1256    The input and result will be memrefs of the same types and shape that alias
1257    the same underlying memory, though, for some casts on some targets,
1258    the underlying values of the pointer stored in the memref may be affected
1259    by the cast.
1260
1261    The input and result must have the same shape, element type, rank, and layout.
1262
1263    If the source and target address spaces are the same, this operation is a noop.
1264
1265    Example:
1266
1267    ```mlir
1268    // Cast a GPU private memory attribution into a generic pointer
1269    %2 = memref.memory_space_cast %1 : memref<?xf32, 5> to memref<?xf32>
1270    // Cast a generic pointer to workgroup-local memory
1271    %4 = memref.memory_space_cast %3 : memref<5x4xi32> to memref<5x34xi32, 3>
1272    // Cast between two non-default memory spaces
1273    %6 = memref.memory_space_cast %5
1274      : memref<*xmemref<?xf32>, 5> to memref<*xmemref<?xf32>, 3>
1275    ```
1276  }];
1277
1278  let arguments = (ins AnyRankedOrUnrankedMemRef:$source);
1279  let results = (outs AnyRankedOrUnrankedMemRef:$dest);
1280  let assemblyFormat = "$source attr-dict `:` type($source) `to` type($dest)";
1281
1282  let extraClassDeclaration = [{
1283    Value getViewSource() { return getSource(); }
1284  }];
1285
1286  let hasFolder = 1;
1287}
1288
1289//===----------------------------------------------------------------------===//
1290// PrefetchOp
1291//===----------------------------------------------------------------------===//
1292
1293def MemRef_PrefetchOp : MemRef_Op<"prefetch"> {
1294  let summary = "prefetch operation";
1295  let description = [{
1296    The "prefetch" op prefetches data from a memref location described with
1297    subscript indices similar to memref.load, and with three attributes: a
1298    read/write specifier, a locality hint, and a cache type specifier as shown
1299    below:
1300
1301    ```mlir
1302    memref.prefetch %0[%i, %j], read, locality<3>, data : memref<400x400xi32>
1303    ```
1304
1305    The read/write specifier is either 'read' or 'write', the locality hint
1306    ranges from locality<0> (no locality) to locality<3> (extremely local keep
1307    in cache). The cache type specifier is either 'data' or 'instr'
1308    and specifies whether the prefetch is performed on data cache or on
1309    instruction cache.
1310  }];
1311
1312  let arguments = (ins AnyMemRef:$memref, Variadic<Index>:$indices,
1313                       BoolAttr:$isWrite,
1314                       ConfinedAttr<I32Attr, [IntMinValue<0>,
1315                                          IntMaxValue<3>]>:$localityHint,
1316                       BoolAttr:$isDataCache);
1317
1318  let extraClassDeclaration = [{
1319    MemRefType getMemRefType() {
1320      return ::llvm::cast<MemRefType>(getMemref().getType());
1321    }
1322    static StringRef getLocalityHintAttrStrName() { return "localityHint"; }
1323    static StringRef getIsWriteAttrStrName() { return "isWrite"; }
1324    static StringRef getIsDataCacheAttrStrName() { return "isDataCache"; }
1325  }];
1326
1327  let hasCustomAssemblyFormat = 1;
1328  let hasFolder = 1;
1329  let hasVerifier = 1;
1330}
1331
1332//===----------------------------------------------------------------------===//
1333// ReinterpretCastOp
1334//===----------------------------------------------------------------------===//
1335
1336def MemRef_ReinterpretCastOp
1337  : MemRef_OpWithOffsetSizesAndStrides<"reinterpret_cast", [
1338      DeclareOpInterfaceMethods<OpAsmOpInterface, ["getAsmResultNames"]>,
1339      AttrSizedOperandSegments,
1340      MemRefsNormalizable,
1341      Pure,
1342      OffsetSizeAndStrideOpInterface,
1343      ViewLikeOpInterface
1344    ]> {
1345  let summary = "memref reinterpret cast operation";
1346  let description = [{
1347    Modify offset, sizes and strides of an unranked/ranked memref.
1348
1349    Example:
1350    ```mlir
1351    memref.reinterpret_cast %ranked to
1352      offset: [0],
1353      sizes: [%size0, 10],
1354      strides: [1, %stride1]
1355    : memref<?x?xf32> to memref<?x10xf32, strided<[1, ?], offset: 0>>
1356
1357    memref.reinterpret_cast %unranked to
1358      offset: [%offset],
1359      sizes: [%size0, %size1],
1360      strides: [%stride0, %stride1]
1361    : memref<*xf32> to memref<?x?xf32, strided<[?, ?], offset: ?>>
1362    ```
1363
1364    This operation creates a new memref descriptor using the base of the
1365    source and applying the input arguments to the other metadata.
1366    In other words:
1367    ```mlir
1368    %dst = memref.reinterpret_cast %src to
1369      offset: [%offset],
1370      sizes: [%sizes],
1371      strides: [%strides]
1372    ```
1373    means that `%dst`'s descriptor will be:
1374    ```mlir
1375    %dst.base = %src.base
1376    %dst.aligned = %src.aligned
1377    %dst.offset = %offset
1378    %dst.sizes = %sizes
1379    %dst.strides = %strides
1380    ```
1381  }];
1382
1383  let arguments = (ins Arg<AnyRankedOrUnrankedMemRef, "", []>:$source,
1384                       Variadic<Index>:$offsets,
1385                       Variadic<Index>:$sizes,
1386                       Variadic<Index>:$strides,
1387                       DenseI64ArrayAttr:$static_offsets,
1388                       DenseI64ArrayAttr:$static_sizes,
1389                       DenseI64ArrayAttr:$static_strides);
1390  let results = (outs AnyMemRef:$result);
1391
1392  let assemblyFormat = [{
1393    $source `to` `offset` `` `:`
1394    custom<DynamicIndexList>($offsets, $static_offsets)
1395    `` `,` `sizes` `` `:`
1396    custom<DynamicIndexList>($sizes, $static_sizes)
1397    `` `,` `strides` `` `:`
1398    custom<DynamicIndexList>($strides, $static_strides)
1399    attr-dict `:` type($source) `to` type($result)
1400  }];
1401
1402  let hasVerifier = 1;
1403
1404  let builders = [
1405    // Build a ReinterpretCastOp with mixed static and dynamic entries.
1406    OpBuilder<(ins "MemRefType":$resultType, "Value":$source,
1407      "OpFoldResult":$offset, "ArrayRef<OpFoldResult>":$sizes,
1408      "ArrayRef<OpFoldResult>":$strides,
1409      CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>,
1410    // Build a ReinterpretCastOp and infer the result type.
1411    OpBuilder<(ins "Value":$source, "OpFoldResult":$offset,
1412      "ArrayRef<OpFoldResult>":$sizes, "ArrayRef<OpFoldResult>":$strides,
1413      CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>,
1414    // Build a ReinterpretCastOp with static entries.
1415    OpBuilder<(ins "MemRefType":$resultType, "Value":$source,
1416      "int64_t":$offset, "ArrayRef<int64_t>":$sizes,
1417      "ArrayRef<int64_t>":$strides,
1418      CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>,
1419    // Build a ReinterpretCastOp with dynamic entries.
1420    OpBuilder<(ins "MemRefType":$resultType, "Value":$source,
1421      "Value":$offset, "ValueRange":$sizes,
1422      "ValueRange":$strides,
1423      CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>
1424  ];
1425
1426  let extraClassDeclaration = extraBaseClassDeclaration # [{
1427    // The result of the op is always a ranked memref.
1428    MemRefType getType() { return getResult().getType(); }
1429    Value getViewSource() { return getSource(); }
1430
1431    /// Return the rank of the result type.
1432    unsigned getResultRank() { return getType().getRank(); }
1433
1434    /// Return the expected rank of each of the`static_offsets`, `static_sizes`
1435    /// and `static_strides` attributes.
1436    std::array<unsigned, 3> getArrayAttrMaxRanks() {
1437      unsigned resultRank = getType().getRank();
1438      return {1, resultRank, resultRank};
1439    }
1440
1441    /// Return the number of leading operands before the `offsets`, `sizes` and
1442    /// and `strides` operands.
1443    static unsigned getOffsetSizeAndStrideStartOperandIndex() { return 1; }
1444
1445    /// Return a vector of all the static or dynamic sizes of the op, while
1446    /// statically inferring the sizes of the dynamic sizes, when possible.
1447    /// This is best effort.
1448    /// E.g., if `getMixedSizes` returns `[2, %dyn_size]`, but the resulting
1449    /// memref type is `memref<2x8xi16>`, this method will return `[2, 8]`.
1450    /// Similarly if the resulting memref type is `memref<2x?xi16>`, but
1451    /// `%dyn_size` can statically be pinned to a constant value, this
1452    /// constant value is returned instead of `%dyn_size`.
1453    SmallVector<OpFoldResult> getConstifiedMixedSizes();
1454    /// Similar to `getConstifiedMixedSizes` but for strides.
1455    SmallVector<OpFoldResult> getConstifiedMixedStrides();
1456    /// Similar to `getConstifiedMixedSizes` but for the offset.
1457    OpFoldResult getConstifiedMixedOffset();
1458  }];
1459
1460  let hasFolder = 1;
1461  let hasCanonicalizer = 1;
1462}
1463
1464//===----------------------------------------------------------------------===//
1465// RankOp
1466//===----------------------------------------------------------------------===//
1467
1468def MemRef_RankOp : MemRef_Op<"rank", [Pure]> {
1469  let summary = "rank operation";
1470  let description = [{
1471    The `memref.rank` operation takes a memref operand and returns its rank.
1472
1473    Example:
1474
1475    ```mlir
1476    %0 = memref.rank %arg0 : memref<*xf32>
1477    %1 = memref.rank %arg1 : memref<?x?xf32>
1478    ```
1479  }];
1480
1481  let arguments = (ins AnyRankedOrUnrankedMemRef:$memref);
1482  let results = (outs Index);
1483
1484  let hasFolder = 1;
1485  let assemblyFormat = "$memref attr-dict `:` type($memref)";
1486}
1487
1488//===----------------------------------------------------------------------===//
1489// ReshapeOp
1490//===----------------------------------------------------------------------===//
1491
1492def MemRef_ReshapeOp: MemRef_Op<"reshape", [
1493    DeclareOpInterfaceMethods<OpAsmOpInterface, ["getAsmResultNames"]>,
1494    Pure,
1495    ViewLikeOpInterface]>  {
1496  let summary = "memref reshape operation";
1497  let description = [{
1498    The `reshape` operation converts a memref from one type to an
1499    equivalent type with a provided shape. The data is never copied or
1500    modified. The source and destination types are compatible if both have the
1501    same element type, same number of elements, address space and identity
1502    layout map. The following combinations are possible:
1503
1504    a. Source type is ranked or unranked. Shape argument has static size.
1505    Result type is ranked.
1506
1507    ```mlir
1508    // Reshape statically-shaped memref.
1509    %dst = memref.reshape %src(%shape)
1510             : (memref<4x1xf32>, memref<1xi32>) to memref<4xf32>
1511    %dst0 = memref.reshape %src(%shape0)
1512             : (memref<4x1xf32>, memref<2xi32>) to memref<2x2xf32>
1513    // Flatten unranked memref.
1514    %dst = memref.reshape %src(%shape)
1515             : (memref<*xf32>, memref<1xi32>) to memref<?xf32>
1516    ```
1517
1518    b. Source type is ranked or unranked. Shape argument has dynamic size.
1519    Result type is unranked.
1520
1521    ```mlir
1522    // Reshape dynamically-shaped 1D memref.
1523    %dst = memref.reshape %src(%shape)
1524             : (memref<?xf32>, memref<?xi32>) to memref<*xf32>
1525    // Reshape unranked memref.
1526    %dst = memref.reshape %src(%shape)
1527             : (memref<*xf32>, memref<?xi32>) to memref<*xf32>
1528    ```
1529  }];
1530
1531  let arguments = (ins AnyRankedOrUnrankedMemRef:$source,
1532                       Arg<MemRefRankOf<[AnySignlessInteger, Index], [1]>,
1533                       "dynamically-sized shape", [MemRead]>:$shape);
1534  let results = (outs AnyRankedOrUnrankedMemRef:$result);
1535
1536  let builders = [OpBuilder<
1537     (ins "MemRefType":$resultType, "Value":$operand, "Value":$shape), [{
1538       $_state.addOperands(operand);
1539       $_state.addOperands(shape);
1540       $_state.addTypes(resultType);
1541     }]>];
1542
1543  let extraClassDeclaration = [{
1544    MemRefType getType() { return ::llvm::cast<MemRefType>(getResult().getType()); }
1545    Value getViewSource() { return getSource(); }
1546  }];
1547
1548  let assemblyFormat = [{
1549    $source `(` $shape `)` attr-dict `:` functional-type(operands, results)
1550  }];
1551  let hasVerifier = 1;
1552}
1553
1554//===----------------------------------------------------------------------===//
1555// ExpandShapeOp / CollapseShapeOp
1556//===----------------------------------------------------------------------===//
1557
1558class MemRef_ReassociativeReshapeOp<string mnemonic, list<Trait> traits = []> :
1559    MemRef_Op<mnemonic, !listconcat(traits,
1560      [Pure, ViewLikeOpInterface])>,
1561    Results<(outs AnyStridedMemRef:$result)>{
1562
1563  code commonExtraClassDeclaration = [{
1564    SmallVector<AffineMap, 4> getReassociationMaps();
1565
1566    SmallVector<ReassociationExprs, 4> getReassociationExprs();
1567
1568    SmallVector<ReassociationIndices, 4> getReassociationIndices() {
1569      SmallVector<ReassociationIndices, 4> reassociationIndices;
1570      for (auto attr : getReassociation())
1571        reassociationIndices.push_back(llvm::to_vector<2>(
1572            llvm::map_range(::llvm::cast<ArrayAttr>(attr), [&](Attribute indexAttr) {
1573              return ::llvm::cast<IntegerAttr>(indexAttr).getInt();
1574            })));
1575      return reassociationIndices;
1576    };
1577
1578    MemRefType getSrcType() { return ::llvm::cast<MemRefType>(getSrc().getType()); }
1579
1580    MemRefType getResultType() { return ::llvm::cast<MemRefType>(getResult().getType()); }
1581
1582    Value getViewSource() { return getSrc(); }
1583  }];
1584
1585  let hasFolder = 1;
1586  let hasCanonicalizer = 1;
1587  let hasVerifier = 1;
1588}
1589
1590def MemRef_ExpandShapeOp : MemRef_ReassociativeReshapeOp<"expand_shape", [
1591    DeclareOpInterfaceMethods<OpAsmOpInterface, ["getAsmResultNames"]>,
1592    DeclareOpInterfaceMethods<ReifyRankedShapedTypeOpInterface>]> {
1593  let summary = "operation to produce a memref with a higher rank.";
1594  let description = [{
1595    The `memref.expand_shape` op produces a new view with a higher rank whose
1596    sizes are a reassociation of the original `view`. The operation is limited
1597    to such reassociations, where a dimension is expanded into one or multiple
1598    contiguous dimensions. Such reassociations never require additional allocs
1599    or copies.
1600
1601    A reassociation is defined as a grouping of dimensions and is represented
1602    with an array of DenseI64ArrayAttr attributes.
1603
1604    Example:
1605
1606    ```mlir
1607    %r = memref.expand_shape %0 [[0, 1], [2]] output_shape [%sz0, %sz1, 32]
1608        : memref<?x32xf32> into memref<?x?x32xf32>
1609    ```
1610
1611    If an op can be statically proven to be invalid (e.g, an expansion from
1612    `memref<10xf32>` to `memref<2x6xf32>`), it is rejected by the verifier. If
1613    it cannot statically be proven invalid (e.g., the full example above; it is
1614    unclear whether the first source dimension is divisible by 5), the op is
1615    accepted by the verifier. However, if the op is in fact invalid at runtime,
1616    the behavior is undefined.
1617
1618    The source memref can be zero-ranked. In that case, the reassociation
1619    indices must be empty and the result shape may only consist of unit
1620    dimensions.
1621
1622    For simplicity, this op may not be used to cast dynamicity of dimension
1623    sizes and/or strides. I.e., if and only if a source dimension is dynamic,
1624    there must be a dynamic result dimension in the corresponding reassociation
1625    group. Same for strides.
1626
1627    The representation for the output shape supports a partially-static
1628    specification via attributes specified through the `static_output_shape`
1629    argument.  A special sentinel value `ShapedType::kDynamic` encodes that the
1630    corresponding entry has a dynamic value.  There must be exactly as many SSA
1631    inputs in `output_shape` as there are `ShapedType::kDynamic` entries in
1632    `static_output_shape`.
1633
1634    Note: This op currently assumes that the inner strides are of the
1635    source/result layout map are the faster-varying ones.
1636  }];
1637
1638  let arguments = (ins AnyStridedMemRef:$src, IndexListArrayAttr:$reassociation,
1639                       Variadic<Index>:$output_shape,
1640                       DenseI64ArrayAttr:$static_output_shape);
1641
1642  let assemblyFormat = [{
1643    $src $reassociation `output_shape`
1644    custom<DynamicIndexList>($output_shape, $static_output_shape) attr-dict `:`
1645    type($src) `into` type($result)
1646  }];
1647
1648  let builders = [
1649    // Builders using ReassociationIndices.
1650    OpBuilder<(ins "Type":$resultType, "Value":$src,
1651      "ArrayRef<ReassociationIndices>":$reassociation,
1652      "ArrayRef<OpFoldResult>":$outputShape)>,
1653
1654    // It will infer output shape using inferOutputShape() method.
1655    OpBuilder<(ins "Type":$resultType, "Value":$src,
1656      "ArrayRef<ReassociationIndices>":$reassociation)>,
1657
1658    // Builder using ReassociationExprs.
1659    OpBuilder<(ins "Type":$resultType, "Value":$src,
1660      "ArrayRef<ReassociationExprs>":$reassociation),
1661    [{
1662      auto reassociationIndices =
1663          convertReassociationMapsToIndices(reassociation);
1664      build($_builder, $_state, resultType, src, reassociationIndices);
1665    }]>,
1666
1667    OpBuilder<(ins "Type":$resultType, "Value":$src,
1668      "ArrayRef<ReassociationExprs>":$reassociation,
1669      "ArrayRef<OpFoldResult>":$outputShape),
1670    [{
1671      auto reassociationMaps =
1672          convertReassociationMapsToIndices(reassociation);
1673      build($_builder, $_state, resultType, src, reassociationMaps,
1674            outputShape);
1675    }]>,
1676
1677    // Builder that infers the result layout map. The result shape must be
1678    // specified. Otherwise, the op may be ambiguous. The output shape for
1679    // the op will be inferred using the inferOutputShape() method.
1680    OpBuilder<(ins "ArrayRef<int64_t>":$resultShape, "Value":$src,
1681               "ArrayRef<ReassociationIndices>":$reassociation)>,
1682
1683    // Builder that infers the result layout map. The result shape must be
1684    // specified. Otherwise, the op may be ambiguous.
1685    OpBuilder<(ins "ArrayRef<int64_t>":$resultShape, "Value":$src,
1686               "ArrayRef<ReassociationIndices>":$reassociation,
1687               "ArrayRef<OpFoldResult>":$outputShape)>
1688  ];
1689
1690  let extraClassDeclaration = commonExtraClassDeclaration # [{
1691    static FailureOr<MemRefType> computeExpandedType(
1692        MemRefType srcType, ArrayRef<int64_t> resultShape,
1693        ArrayRef<ReassociationIndices> reassociation);
1694
1695    // Infer the output shape for a memref.expand_shape when it is possible
1696    // to do so.
1697    static FailureOr<SmallVector<OpFoldResult>> inferOutputShape(
1698        OpBuilder &b, Location loc, MemRefType expandedType,
1699        ArrayRef<ReassociationIndices> reassociation,
1700        ArrayRef<OpFoldResult> inputShape);
1701  }];
1702
1703  let hasVerifier = 1;
1704}
1705
1706def MemRef_CollapseShapeOp : MemRef_ReassociativeReshapeOp<"collapse_shape", [
1707    DeclareOpInterfaceMethods<OpAsmOpInterface, ["getAsmResultNames"]>]> {
1708  let summary = "operation to produce a memref with a smaller rank.";
1709  let description = [{
1710    The `memref.collapse_shape` op produces a new view with a smaller rank
1711    whose sizes are a reassociation of the original `view`. The operation is
1712    limited to such reassociations, where subsequent, contiguous dimensions are
1713    collapsed into a single dimension. Such reassociations never require
1714    additional allocs or copies.
1715
1716    Collapsing non-contiguous dimensions is undefined behavior. When a group of
1717    dimensions can be statically proven to be non-contiguous, collapses of such
1718    groups are rejected in the verifier on a best-effort basis. In the general
1719    case, collapses of dynamically-sized dims with dynamic strides cannot be
1720    proven to be contiguous or non-contiguous due to limitations in the memref
1721    type.
1722
1723    A reassociation is defined as a continuous grouping of dimensions and is
1724    represented with an array of DenseI64ArrayAttr attribute.
1725
1726    Note: Only the dimensions within a reassociation group must be contiguous.
1727    The remaining dimensions may be non-contiguous.
1728
1729    The result memref type can be zero-ranked if the source memref type is
1730    statically shaped with all dimensions being unit extent. In such a case, the
1731    reassociation indices must be empty.
1732
1733    Examples:
1734
1735    ```mlir
1736    // Dimension collapse (i, j) -> i' and k -> k'
1737    %1 = memref.collapse_shape %0 [[0, 1], [2]] :
1738        memref<?x?x?xf32, stride_spec> into memref<?x?xf32, stride_spec_2>
1739    ```
1740
1741    For simplicity, this op may not be used to cast dynamicity of dimension
1742    sizes and/or strides. I.e., a result dimension must be dynamic if and only
1743    if at least one dimension in the corresponding reassociation group is
1744    dynamic. Similarly, the stride of a result dimension must be dynamic if and
1745    only if the corresponding start dimension in the source type is dynamic.
1746
1747    Note: This op currently assumes that the inner strides are of the
1748    source/result layout map are the faster-varying ones.
1749  }];
1750
1751  let arguments = (ins AnyStridedMemRef:$src, IndexListArrayAttr:$reassociation);
1752
1753  let assemblyFormat = [{
1754    $src $reassociation attr-dict `:` type($src) `into` type($result)
1755  }];
1756
1757  let builders = [
1758    // Builders for a contracting reshape whose result type is computed from
1759    // `src` and `reassociation`.
1760    OpBuilder<(ins "Value":$src,
1761      "ArrayRef<ReassociationIndices>":$reassociation,
1762      CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>,
1763    OpBuilder<(ins "Value":$src,
1764      "ArrayRef<ReassociationExprs>":$reassociation,
1765      CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs),
1766    [{
1767      auto reassociationMaps =
1768          convertReassociationMapsToIndices(reassociation);
1769      build($_builder, $_state, src, reassociationMaps, attrs);
1770    }]>,
1771
1772    // Builders for a reshape whose result type is passed explicitly.
1773    OpBuilder<(ins "Type":$resultType, "Value":$src,
1774      "ArrayRef<ReassociationIndices>":$reassociation,
1775      CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs),
1776    [{
1777      $_state.addAttribute("reassociation",
1778                          getReassociationIndicesAttribute($_builder, reassociation));
1779      build($_builder, $_state, resultType, src, attrs);
1780    }]>,
1781    OpBuilder<(ins "Type":$resultType, "Value":$src,
1782      "ArrayRef<ReassociationExprs>":$reassociation,
1783      CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs),
1784    [{
1785      auto reassociationMaps =
1786          convertReassociationMapsToIndices(reassociation);
1787      build($_builder, $_state, resultType, src, reassociationMaps, attrs);
1788    }]>
1789  ];
1790
1791  let extraClassDeclaration = commonExtraClassDeclaration # [{
1792    /// Return `true` if this source MemRef type is guaranteed to be collapsible
1793    /// according to the given reassociation indices. In the presence of dynamic
1794    /// strides this is usually not the case.
1795    static bool isGuaranteedCollapsible(
1796        MemRefType srcType, ArrayRef<ReassociationIndices> reassociation);
1797
1798    static MemRefType computeCollapsedType(
1799        MemRefType srcType, ArrayRef<ReassociationIndices> reassociation);
1800  }];
1801
1802  let hasVerifier = 1;
1803}
1804
1805//===----------------------------------------------------------------------===//
1806// StoreOp
1807//===----------------------------------------------------------------------===//
1808
1809def MemRef_StoreOp : MemRef_Op<"store",
1810     [TypesMatchWith<"type of 'value' matches element type of 'memref'",
1811                     "memref", "value",
1812                     "::llvm::cast<MemRefType>($_self).getElementType()">,
1813      MemRefsNormalizable,
1814      DeclareOpInterfaceMethods<PromotableMemOpInterface>,
1815      DeclareOpInterfaceMethods<DestructurableAccessorOpInterface>]> {
1816  let summary = "store operation";
1817  let description = [{
1818    Store a value to a memref location given by indices. The value stored should
1819    have the same type as the elemental type of the memref. The number of
1820    arguments provided within brackets need to match the rank of the memref.
1821
1822    In an affine context, the indices of a store are restricted to SSA values
1823    bound to surrounding loop induction variables,
1824    [symbols](Affine.md/#restrictions-on-dimensions-and-symbols), results of a
1825    `constant` operation, or the result of an
1826    [`affine.apply`](Affine.md/#affineapply-affineapplyop) operation that can in
1827    turn take as arguments all of the aforementioned SSA values or the
1828    recursively result of such an `affine.apply` operation.
1829
1830    Example:
1831
1832    ```mlir
1833    memref.store %100, %A[%1, 1023] : memref<4x?xf32, #layout, memspace0>
1834    ```
1835
1836    **Context:** The `load` and `store` operations are specifically crafted to
1837    fully resolve a reference to an element of a memref, and (in polyhedral
1838    `affine.if` and `affine.for` operations) the compiler can follow use-def
1839    chains (e.g. through [`affine.apply`](Affine.md/#affineapply-affineapplyop)
1840    operations) to precisely analyze references at compile-time using polyhedral
1841    techniques. This is possible because of the
1842    [restrictions on dimensions and symbols](Affine.md/#restrictions-on-dimensions-and-symbols)
1843    in these contexts.
1844  }];
1845
1846  let arguments = (ins AnyType:$value,
1847                       Arg<AnyMemRef, "the reference to store to",
1848                           [MemWrite]>:$memref,
1849                       Variadic<Index>:$indices,
1850                       DefaultValuedOptionalAttr<BoolAttr, "false">:$nontemporal);
1851
1852  let builders = [
1853    OpBuilder<(ins "Value":$valueToStore, "Value":$memref), [{
1854      $_state.addOperands(valueToStore);
1855      $_state.addOperands(memref);
1856    }]>];
1857
1858  let extraClassDeclaration = [{
1859      Value getValueToStore() { return getOperand(0); }
1860
1861      Value getMemRef() { return getOperand(1); }
1862      void setMemRef(Value value) { setOperand(1, value); }
1863      MemRefType getMemRefType() {
1864        return ::llvm::cast<MemRefType>(getMemRef().getType());
1865      }
1866  }];
1867
1868  let hasFolder = 1;
1869  let hasVerifier = 1;
1870
1871  let assemblyFormat = [{
1872    $value `,` $memref `[` $indices `]` attr-dict `:` type($memref)
1873  }];
1874}
1875
1876//===----------------------------------------------------------------------===//
1877// SubViewOp
1878//===----------------------------------------------------------------------===//
1879
1880def SubViewOp : MemRef_OpWithOffsetSizesAndStrides<"subview", [
1881    DeclareOpInterfaceMethods<OpAsmOpInterface, ["getAsmResultNames"]>,
1882    DeclareOpInterfaceMethods<ViewLikeOpInterface>,
1883    AttrSizedOperandSegments,
1884    OffsetSizeAndStrideOpInterface,
1885    Pure
1886  ]> {
1887  let summary = "memref subview operation";
1888  let description = [{
1889    The "subview" operation converts a memref type to another memref type
1890    which represents a reduced-size view of the original memref as specified by
1891    the operation's offsets, sizes and strides arguments.
1892
1893    The SubView operation supports the following arguments:
1894
1895    * source: the "base" memref on which to create a "view" memref.
1896    * offsets: memref-rank number of offsets into the "base" memref at which to
1897               create the "view" memref.
1898    * sizes: memref-rank number of sizes which specify the sizes of the result
1899             "view" memref type.
1900    * strides: memref-rank number of strides that compose multiplicatively with
1901               the base memref strides in each dimension.
1902
1903    The representation based on offsets, sizes and strides support a
1904    partially-static specification via attributes specified through the
1905    `static_offsets`, `static_sizes` and `static_strides` arguments. A special
1906    sentinel value ShapedType::kDynamic encodes that the corresponding entry has
1907    a dynamic value.
1908
1909    A subview operation may additionally reduce the rank of the resulting view
1910    by removing dimensions that are statically known to be of size 1.
1911
1912    Example 1:
1913
1914    ```mlir
1915    %0 = memref.alloc() : memref<64x4xf32, affine_map<(d0, d1) -> (d0 * 4 + d1)>>
1916
1917    // Create a sub-view of "base" memref '%0' with offset arguments '%c0',
1918    // dynamic sizes for each dimension, and stride arguments '%c1'.
1919    %1 = memref.subview %0[%c0, %c0][%size0, %size1][%c1, %c1]
1920      : memref<64x4xf32, affine_map<(d0, d1) -> (d0 * 4 + d1)>> to
1921        memref<?x?xf32, affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + d1 + s0)>>
1922    ```
1923
1924    Example 2:
1925
1926    ```mlir
1927    %0 = memref.alloc() : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>>
1928
1929    // Create a sub-view of "base" memref '%0' with dynamic offsets, sizes,
1930    // and strides.
1931    // Note that dynamic offsets are represented by the linearized dynamic
1932    // offset symbol 's0' in the subview memref layout map, and that the
1933    // dynamic strides operands, after being applied to the base memref
1934    // strides in each dimension, are represented in the view memref layout
1935    // map as symbols 's1', 's2' and 's3'.
1936    %1 = memref.subview %0[%i, %j, %k][%size0, %size1, %size2][%x, %y, %z]
1937      : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>> to
1938        memref<?x?x?xf32,
1939          affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)>>
1940    ```
1941
1942    Example 3:
1943
1944    ```mlir
1945    %0 = memref.alloc() : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>>
1946
1947    // Subview with constant offsets, sizes and strides.
1948    %1 = memref.subview %0[0, 2, 0][4, 4, 4][1, 1, 1]
1949      : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>> to
1950        memref<4x4x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2 + 8)>>
1951    ```
1952
1953    Example 4:
1954
1955    ```mlir
1956    %0 = memref.alloc(%arg0, %arg1) : memref<?x?xf32>
1957
1958    // Subview with constant size, but dynamic offsets and
1959    // strides. The resulting memref has a static shape, but if the
1960    // base memref has an affine map to describe the layout, the result
1961    // memref also uses an affine map to describe the layout. The
1962    // strides of the result memref is computed as follows:
1963    //
1964    // Let #map1 represents the layout of the base memref, and #map2
1965    // represents the layout of the result memref. A #mapsubview can be
1966    // constructed to map an index from the result memref to the base
1967    // memref (note that the description below uses more convenient
1968    // naming for symbols, while in affine maps, symbols are
1969    // represented as unsigned numbers that identify that symbol in the
1970    // given affine map.
1971    //
1972    // #mapsubview = (d0, d1)[o0, o1, t0, t1] -> (d0 * t0 + o0, d1 * t1 + o1)
1973    //
1974    // where, o0, o1, ... are offsets, and t0, t1, ... are strides. Then,
1975    //
1976    // #map2 = #map1.compose(#mapsubview)
1977    //
1978    // If the layout map is represented as
1979    //
1980    // #map1 = (d0, d1)[s0, s1, s2] -> (d0 * s1 + d1 * s2 + s0)
1981    //
1982    // then,
1983    //
1984    // #map2 = (d0, d1)[s0, s1, s2, o0, o1, t0, t1] ->
1985    //              (d0 * s1 * t0 + d1 * s2 * t1 + o0 * s1 + o1 * s2 + s0)
1986    //
1987    // Representing this canonically
1988    //
1989    // #map2 = (d0, d1)[r0, r1, r2] -> (d0 * r1 + d1 * r2 + r0)
1990    //
1991    // where, r0 = o0 * s1 + o1 * s2 + s0, r1 = s1 * t0, r2 = s2 * t1.
1992    %1 = memref.subview %0[%i, %j][4, 4][%x, %y] :
1993      : memref<?x?xf32, affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + d1 * s2 + s0)>> to
1994        memref<4x4xf32, affine_map<(d0, d1)[r0, r1, r2] -> (d0 * r1 + d1 * r2 + r0)>>
1995
1996    // Note that the subview op does not guarantee that the result
1997    // memref is "inbounds" w.r.t to base memref. It is upto the client
1998    // to ensure that the subview is accessed in a manner that is
1999    // in-bounds.
2000    ```
2001
2002    Example 5:
2003
2004    ```mlir
2005    // Rank-reducing subview.
2006    %1 = memref.subview %0[0, 0, 0][1, 16, 4][1, 1, 1] :
2007      memref<8x16x4xf32> to memref<16x4xf32>
2008
2009    // Original layout:
2010    // (d0, d1, d2) -> (64 * d0 + 16 * d1 + d2)
2011    // Subviewed layout:
2012    // (d0, d1, d2) -> (64 * (d0 + 3) + 4 * (d1 + 4) + d2 + 2) = (64 * d0 + 4 * d1 + d2 + 210)
2013    // After rank reducing:
2014    // (d0, d1) -> (4 * d0 + d1 + 210)
2015    %3 = memref.subview %2[3, 4, 2][1, 6, 3][1, 1, 1] :
2016      memref<8x16x4xf32> to memref<6x3xf32, strided<[4, 1], offset: 210>>
2017    ```
2018  }];
2019
2020  let arguments = (ins AnyMemRef:$source,
2021                       Variadic<Index>:$offsets,
2022                       Variadic<Index>:$sizes,
2023                       Variadic<Index>:$strides,
2024                       DenseI64ArrayAttr:$static_offsets,
2025                       DenseI64ArrayAttr:$static_sizes,
2026                       DenseI64ArrayAttr:$static_strides);
2027  let results = (outs AnyMemRef:$result);
2028
2029  let assemblyFormat = [{
2030    $source ``
2031    custom<DynamicIndexList>($offsets, $static_offsets)
2032    custom<DynamicIndexList>($sizes, $static_sizes)
2033    custom<DynamicIndexList>($strides, $static_strides)
2034    attr-dict `:` type($source) `to` type($result)
2035  }];
2036
2037  let builders = [
2038    // Build a SubViewOp with mixed static and dynamic entries and inferred
2039    // result type.
2040    OpBuilder<(ins "Value":$source, "ArrayRef<OpFoldResult>":$offsets,
2041      "ArrayRef<OpFoldResult>":$sizes, "ArrayRef<OpFoldResult>":$strides,
2042      CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>,
2043    // Build a SubViewOp with mixed static and dynamic entries and custom
2044    // result type. If the type passed is nullptr, it is inferred.
2045    OpBuilder<(ins "MemRefType":$resultType, "Value":$source,
2046      "ArrayRef<OpFoldResult>":$offsets, "ArrayRef<OpFoldResult>":$sizes,
2047      "ArrayRef<OpFoldResult>":$strides,
2048      CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>,
2049    // Build a SubViewOp with static entries and custom result type. If the
2050    // type passed is nullptr, it is inferred.
2051    OpBuilder<(ins "Value":$source, "ArrayRef<int64_t>":$offsets,
2052      "ArrayRef<int64_t>":$sizes, "ArrayRef<int64_t>":$strides,
2053      CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>,
2054    // Build a SubViewOp with static entries and inferred result type.
2055    OpBuilder<(ins "MemRefType":$resultType, "Value":$source,
2056      "ArrayRef<int64_t>":$offsets, "ArrayRef<int64_t>":$sizes,
2057      "ArrayRef<int64_t>":$strides,
2058      CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>,
2059    // Build a SubViewOp with dynamic entries and custom result type. If the
2060    // type passed is nullptr, it is inferred.
2061    OpBuilder<(ins "Value":$source, "ValueRange":$offsets,
2062      "ValueRange":$sizes, "ValueRange":$strides,
2063      CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>,
2064    // Build a SubViewOp with dynamic entries and inferred result type.
2065    OpBuilder<(ins "MemRefType":$resultType, "Value":$source,
2066      "ValueRange":$offsets, "ValueRange":$sizes, "ValueRange":$strides,
2067      CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>
2068  ];
2069
2070  let extraClassDeclaration = extraBaseClassDeclaration # [{
2071    /// Returns the type of the base memref operand.
2072    MemRefType getSourceType() {
2073      return ::llvm::cast<MemRefType>(getSource().getType());
2074    }
2075
2076    /// The result of a subview is always a memref.
2077    MemRefType getType() { return ::llvm::cast<MemRefType>(getResult().getType()); }
2078
2079    /// A subview result type can be fully inferred from the source type and the
2080    /// static representation of offsets, sizes and strides. Special sentinels
2081    /// encode the dynamic case.
2082    static Type inferResultType(MemRefType sourceMemRefType,
2083                                ArrayRef<int64_t> staticOffsets,
2084                                ArrayRef<int64_t> staticSizes,
2085                                ArrayRef<int64_t> staticStrides);
2086    static Type inferResultType(MemRefType sourceMemRefType,
2087                                ArrayRef<OpFoldResult> staticOffsets,
2088                                ArrayRef<OpFoldResult> staticSizes,
2089                                ArrayRef<OpFoldResult> staticStrides);
2090
2091    /// A rank-reducing result type can be inferred from the desired result
2092    /// shape. Only the layout map is inferred.
2093    ///
2094    /// Note: The result shape cannot be inferred with just the result rank and
2095    /// and the desired sizes. In case there are more "ones" among the sizes
2096    /// than the difference in source/result rank, it is not clear which dims of
2097    /// size one should be dropped.
2098    static Type inferRankReducedResultType(ArrayRef<int64_t> resultShape,
2099                                           MemRefType sourceMemRefType,
2100                                           ArrayRef<int64_t> staticOffsets,
2101                                           ArrayRef<int64_t> staticSizes,
2102                                           ArrayRef<int64_t> staticStrides);
2103    static Type inferRankReducedResultType(ArrayRef<int64_t> resultShape,
2104                                           MemRefType sourceMemRefType,
2105                                           ArrayRef<OpFoldResult> staticOffsets,
2106                                           ArrayRef<OpFoldResult> staticSizes,
2107                                           ArrayRef<OpFoldResult> staticStrides);
2108
2109    /// Return the expected rank of each of the`static_offsets`, `static_sizes`
2110    /// and `static_strides` attributes.
2111    std::array<unsigned, 3> getArrayAttrMaxRanks() {
2112      unsigned rank = getSourceType().getRank();
2113      return {rank, rank, rank};
2114    }
2115
2116    /// Return the number of leading operands before the `offsets`, `sizes` and
2117    /// and `strides` operands.
2118    static unsigned getOffsetSizeAndStrideStartOperandIndex() { return 1; }
2119
2120    /// Return the dimensions of the source type that are dropped when
2121    /// the result is rank-reduced.
2122    llvm::SmallBitVector getDroppedDims();
2123
2124    /// Given a `value`, asserted to be of MemRefType, build a SubViewOp that
2125    /// results in a rank reduction to the desired memref shape and return the
2126    /// new value created.
2127    /// If the shape of `value` is already the `desiredShape`, just return
2128    /// `value`.
2129    /// If the shape of `value` cannot be rank-reduced to `desiredShape`, fail.
2130    static FailureOr<Value> rankReduceIfNeeded(
2131      OpBuilder &b, Location loc, Value value, ArrayRef<int64_t> desiredShape);
2132  }];
2133
2134  let hasCanonicalizer = 1;
2135  let hasFolder = 1;
2136  let hasVerifier = 1;
2137}
2138
2139//===----------------------------------------------------------------------===//
2140// TransposeOp
2141//===----------------------------------------------------------------------===//
2142
2143def MemRef_TransposeOp : MemRef_Op<"transpose", [
2144    DeclareOpInterfaceMethods<OpAsmOpInterface, ["getAsmResultNames"]>,
2145    Pure]>,
2146    Arguments<(ins AnyStridedMemRef:$in, AffineMapAttr:$permutation)>,
2147    Results<(outs AnyStridedMemRef)> {
2148  let summary = "`transpose` produces a new strided memref (metadata-only)";
2149  let description = [{
2150    The `transpose` op produces a strided memref whose sizes and strides
2151    are a permutation of the original `in` memref. This is purely a metadata
2152    transformation.
2153
2154    Example:
2155
2156    ```mlir
2157    %1 = memref.transpose %0 (i, j) -> (j, i) : memref<?x?xf32> to memref<?x?xf32, affine_map<(d0, d1)[s0] -> (d1 * s0 + d0)>>
2158    ```
2159  }];
2160
2161  let builders = [
2162    OpBuilder<(ins "Value":$in, "AffineMapAttr":$permutation,
2163      CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>];
2164
2165  let extraClassDeclaration = [{
2166    static StringRef getPermutationAttrStrName() { return "permutation"; }
2167  }];
2168
2169  let hasCustomAssemblyFormat = 1;
2170  let hasFolder = 1;
2171  let hasVerifier = 1;
2172}
2173
2174//===----------------------------------------------------------------------===//
2175// ViewOp
2176//===----------------------------------------------------------------------===//
2177
2178def MemRef_ViewOp : MemRef_Op<"view", [
2179    DeclareOpInterfaceMethods<OpAsmOpInterface, ["getAsmResultNames"]>,
2180    DeclareOpInterfaceMethods<ViewLikeOpInterface>,
2181    Pure]> {
2182  let summary = "memref view operation";
2183  let description = [{
2184    The "view" operation extracts an N-D contiguous memref with empty layout map
2185    with arbitrary element type from a 1-D contiguous memref with empty layout
2186    map of i8 element  type. The ViewOp supports the following arguments:
2187
2188    * A single dynamic byte-shift operand must be specified which represents a
2189      a shift of the base 1-D memref pointer from which to create the resulting
2190      contiguous memref view with identity layout.
2191    * A dynamic size operand that must be specified for each dynamic dimension
2192      in the resulting view memref type.
2193
2194    The "view" operation gives a structured indexing form to a flat 1-D buffer.
2195    Unlike "subview" it can perform a type change. The type change behavior
2196    requires the op to have special semantics because, e.g. a byte shift of 3
2197    cannot be represented as an offset on f64.
2198    For now, a "view" op:
2199
2200    1. Only takes a contiguous source memref with 0 offset and empty layout.
2201    2. Must specify a byte_shift operand (in the future, a special integer
2202       attribute may be added to support the folded case).
2203    3. Returns a contiguous memref with 0 offset and empty layout.
2204
2205    Example:
2206
2207    ```mlir
2208    // Allocate a flat 1D/i8 memref.
2209    %0 = memref.alloc() : memref<2048xi8>
2210
2211    // ViewOp with dynamic offset and static sizes.
2212    %1 = memref.view %0[%offset_1024][] : memref<2048xi8> to memref<64x4xf32>
2213
2214    // ViewOp with dynamic offset and two dynamic size.
2215    %2 = memref.view %0[%offset_1024][%size0, %size1] :
2216      memref<2048xi8> to memref<?x4x?xf32>
2217    ```
2218  }];
2219
2220  let arguments = (ins MemRefRankOf<[I8], [1]>:$source,
2221                       Index:$byte_shift,
2222                       Variadic<Index>:$sizes);
2223  let results = (outs AnyMemRef);
2224
2225  let extraClassDeclaration = [{
2226    /// The result of a view is always a memref.
2227    MemRefType getType() { return ::llvm::cast<MemRefType>(getResult().getType()); }
2228
2229    /// Returns the dynamic sizes for this view operation. This is redundant
2230    /// with `sizes` but needed in template implementations. More specifically:
2231    /// ```
2232    /// template <typename AnyMemRefDefOp>
2233    /// bool isMemRefSizeValidSymbol(AnyMemRefDefOp memrefDefOp, unsigned index,
2234    ///                              Region *region)
2235    /// ```
2236    operand_range getDynamicSizes() {
2237      return {getSizes().begin(), getSizes().end()};
2238    }
2239  }];
2240
2241  let assemblyFormat = [{
2242    $source `[` $byte_shift `]` `` `[` $sizes `]` attr-dict
2243    `:` type($source) `to` type(results)
2244  }];
2245
2246  let hasCanonicalizer = 1;
2247  let hasVerifier = 1;
2248}
2249
2250//===----------------------------------------------------------------------===//
2251// AtomicRMWOp
2252//===----------------------------------------------------------------------===//
2253
2254def AtomicRMWOp : MemRef_Op<"atomic_rmw", [
2255      AllTypesMatch<["value", "result"]>,
2256      TypesMatchWith<"value type matches element type of memref",
2257                     "memref", "value",
2258                     "::llvm::cast<MemRefType>($_self).getElementType()">
2259    ]> {
2260  let summary = "atomic read-modify-write operation";
2261  let description = [{
2262    The `memref.atomic_rmw` operation provides a way to perform a read-modify-write
2263    sequence that is free from data races. The kind enumeration specifies the
2264    modification to perform. The value operand represents the new value to be
2265    applied during the modification. The memref operand represents the buffer
2266    that the read and write will be performed against, as accessed by the
2267    specified indices. The arity of the indices is the rank of the memref. The
2268    result represents the latest value that was stored.
2269
2270    Example:
2271
2272    ```mlir
2273    %x = memref.atomic_rmw "addf" %value, %I[%i] : (f32, memref<10xf32>) -> f32
2274    ```
2275  }];
2276
2277  let arguments = (ins
2278      AtomicRMWKindAttr:$kind,
2279      AnyTypeOf<[AnySignlessInteger, AnyFloat]>:$value,
2280      Arg<MemRefOf<[AnySignlessInteger, AnyFloat]>, "the reference to read from and write to", [MemRead, MemWrite]>:$memref,
2281      Variadic<Index>:$indices);
2282  let results = (outs AnyTypeOf<[AnySignlessInteger, AnyFloat]>:$result);
2283
2284  let assemblyFormat = [{
2285    $kind $value `,` $memref `[` $indices `]` attr-dict `:` `(` type($value) `,`
2286    type($memref) `)` `->` type($result)
2287  }];
2288
2289  let extraClassDeclaration = [{
2290    MemRefType getMemRefType() {
2291      return ::llvm::cast<MemRefType>(getMemref().getType());
2292    }
2293  }];
2294  let hasFolder = 1;
2295  let hasVerifier = 1;
2296}
2297
2298#endif // MEMREF_OPS
2299