xref: /llvm-project/mlir/include/mlir/Dialect/GPU/IR/GPUBase.td (revision 43fd4c49bd8d54b9058620f0a885c7a5672fd602)
1//===-- GPUBase.td - GPU dialect definitions ---------------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Defines the GPU dialect
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef GPU_BASE
14#define GPU_BASE
15
16include "mlir/IR/AttrTypeBase.td"
17include "mlir/IR/EnumAttr.td"
18include "mlir/IR/OpBase.td"
19
20//===----------------------------------------------------------------------===//
21// GPU Dialect.
22//===----------------------------------------------------------------------===//
23
24def GPU_Dialect : Dialect {
25  let name = "gpu";
26  let cppNamespace = "::mlir::gpu";
27  let hasOperationAttrVerify = 1;
28
29  let extraClassDeclaration = [{
30    /// Get the name of the attribute used to annotate the modules that contain
31    /// kernel modules.
32    static StringRef getContainerModuleAttrName() {
33      return "gpu.container_module";
34    }
35    /// Get the name of the attribute used to annotate external kernel
36    /// functions.
37    static StringRef getKernelFuncAttrName() { return "gpu.kernel"; }
38
39    /// Returns whether the given function is a kernel function, i.e., has the
40    /// 'gpu.kernel' attribute.
41    static bool isKernel(Operation *op);
42
43    /// Returns the number of workgroup (thread, block) dimensions supported in
44    /// the GPU dialect.
45    // TODO: consider generalizing this.
46    static unsigned getNumWorkgroupDimensions() { return 3; }
47
48    /// Returns the numeric value used to identify the workgroup memory address
49    /// space.
50    static AddressSpace getWorkgroupAddressSpace() { return AddressSpace::Workgroup; }
51
52    /// Returns the numeric value used to identify the private memory address
53    /// space.
54    static AddressSpace getPrivateAddressSpace() { return AddressSpace::Private; }
55
56    /// Return true if the given MemRefType has an address space that matches
57    /// with the gpu::AddressSpaceAttr attribute with value 'workgroup`.
58    static bool hasWorkgroupMemoryAddressSpace(MemRefType type);
59
60    /// Return true if the given Attribute is an gpu::AddressSpaceAttr
61    /// attribute with value 'workgroup`.
62    static bool isWorkgroupMemoryAddressSpace(Attribute memorySpace);
63  }];
64
65  let discardableAttrs = (ins
66    "::mlir::DenseI32ArrayAttr":$known_block_size,
67    "::mlir::DenseI32ArrayAttr":$known_grid_size
68  );
69
70  let dependentDialects = ["arith::ArithDialect"];
71  let useDefaultAttributePrinterParser = 1;
72  let useDefaultTypePrinterParser = 1;
73}
74
75//===----------------------------------------------------------------------===//
76// GPU Enums.
77//===----------------------------------------------------------------------===//
78
79class GPU_I32Enum<string name, string description, list<I32EnumAttrCase> cases>
80    : I32EnumAttr<name, description, cases> {
81  let genSpecializedAttr = 0;
82  let cppNamespace = "::mlir::gpu";
83}
84class GPU_I32EnumAttr<string mnemonic, GPU_I32Enum enumInfo> :
85    EnumAttr<GPU_Dialect, enumInfo, mnemonic> {
86  let assemblyFormat = "`<` $value `>`";
87}
88
89def GPU_AddressSpaceGlobal : I32EnumAttrCase<"Global", 1, "global">;
90def GPU_AddressSpaceWorkgroup : I32EnumAttrCase<"Workgroup", 2, "workgroup">;
91def GPU_AddressSpacePrivate : I32EnumAttrCase<"Private", 3, "private">;
92def GPU_AddressSpaceEnum : GPU_I32Enum<
93  "AddressSpace", "GPU address space", [
94    GPU_AddressSpaceGlobal,
95    GPU_AddressSpaceWorkgroup,
96    GPU_AddressSpacePrivate
97  ]>;
98
99def GPU_AddressSpaceAttr :
100  GPU_I32EnumAttr<"address_space", GPU_AddressSpaceEnum>;
101
102//===----------------------------------------------------------------------===//
103// GPU Types.
104//===----------------------------------------------------------------------===//
105
106def GPU_AsyncToken : DialectType<
107  GPU_Dialect, CPred<"::llvm::isa<::mlir::gpu::AsyncTokenType>($_self)">, "async token type">,
108             BuildableType<"mlir::gpu::AsyncTokenType::get($_builder.getContext())">;
109
110// Predicat to check if type is gpu::MMAMatrixType.
111def IsMMAMatrixTypePred : CPred<"::llvm::isa<::mlir::gpu::MMAMatrixType>($_self)">;
112
113def GPU_MMAMatrix : DialectType<
114  GPU_Dialect, IsMMAMatrixTypePred, "MMAMatrix type">;
115
116// Memref type acceptable to gpu.subgroup_mma_{load|store}_matrix ops.
117def GPU_MMAMemRef : MemRefOf<[I8, I32, F16, F32, VectorOfRankAndType<[1], [I8, I32, F16, F32]>]>;
118
119class MMAMatrixOf<list<Type> allowedTypes> :
120  ContainerType<AnyTypeOf<allowedTypes>, IsMMAMatrixTypePred,
121  "::llvm::cast<::mlir::gpu::MMAMatrixType>($_self).getElementType()",
122  "gpu.mma_matrix", "::mlir::gpu::MMAMatrixType">;
123
124// Types for all sparse handles.
125class GPU_SparseHandle<string typeStr, string description> :
126    DialectType<GPU_Dialect,
127    CPred<"llvm::isa<::mlir::gpu::"#typeStr#">($_self)">,
128    description#" handle type">,
129  BuildableType<"mlir::gpu::"#typeStr#"::get($_builder.getContext())">;
130
131def GPU_SparseDnTensorHandle : GPU_SparseHandle<"SparseDnTensorHandleType", "dense tensor">;
132def GPU_SparseSpGEMMOpHandle : GPU_SparseHandle<"SparseSpGEMMOpHandleType", "SpGEMM operation">;
133def GPU_SparseSpMatHandle : GPU_SparseHandle<"SparseSpMatHandleType", "sparse matrix">;
134
135
136//===----------------------------------------------------------------------===//
137// GPU Interfaces.
138//===----------------------------------------------------------------------===//
139
140def GPU_AsyncOpInterface : OpInterface<"AsyncOpInterface"> {
141  let description = [{
142    Interface for GPU operations that execute asynchronously on the device.
143
144    GPU operations implementing this interface take a list of dependencies
145    as `gpu.async.token` arguments and optionally return a `gpu.async.token`.
146
147    The op doesn't start executing until all depent ops producing the async
148    dependency tokens have finished executing.
149
150    If the op returns a token, the op merely schedules the execution on the
151    device and returns immediately, without waiting for the execution to
152    complete. On the hand, if the op does not return a token, the op will wait
153    for the execution to complete.
154  }];
155  let cppNamespace = "::mlir::gpu";
156
157  let methods = [
158    InterfaceMethod<[{
159        Query the operands that represent async dependency tokens.
160      }],
161      "OperandRange", "getAsyncDependencies", (ins), [{}], [{
162        ConcreteOp op = cast<ConcreteOp>(this->getOperation());
163        return op.getAsyncDependencies();
164      }]
165    >,
166    InterfaceMethod<[{
167        Adds a new token to the list of async dependencies if it is not already there.
168      }],
169      "void", "addAsyncDependency", (ins "Value":$token),
170      [{}], [{
171        if (!::llvm::is_contained(this->getAsyncDependencies(), token))
172          ::mlir::gpu::addAsyncDependency(this->getOperation(), token);
173      }]
174    >,
175    InterfaceMethod<[{
176        Query the result that represents the async token to depend on.
177      }],
178      "Value", "getAsyncToken"
179    >
180  ];
181}
182
183//===----------------------------------------------------------------------===//
184// GPU Attributes.
185//===----------------------------------------------------------------------===//
186
187class GPU_Attr<string attrName, string attrMnemonic, list<Trait> traits = []>
188    : AttrDef<GPU_Dialect, attrName, traits> {
189  let mnemonic = attrMnemonic;
190}
191
192#endif // GPU_BASE
193