xref: /llvm-project/mlir/test/Conversion/ConvertToSPIRV/gpu.mlir (revision 25ae1a266d50f24a8fffc57152d7f3c3fcb65517)
1// RUN: mlir-opt -test-convert-to-spirv -split-input-file %s | FileCheck %s
2
3module attributes {
4  gpu.container_module,
5  spirv.target_env = #spirv.target_env<#spirv.vce<v1.3, [Kernel, Addresses, Groups, GroupNonUniformArithmetic, GroupUniformArithmeticKHR], []>, #spirv.resource_limits<>>
6} {
7
8gpu.module @kernels {
9  // CHECK-LABEL: spirv.func @all_reduce
10  // CHECK-SAME: (%[[ARG0:.*]]: f32)
11  // CHECK: %{{.*}} = spirv.GroupNonUniformFAdd <Workgroup> <Reduce> %[[ARG0]] : f32 -> f32
12  gpu.func @all_reduce(%arg0 : f32) kernel
13    attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
14    %reduced = gpu.all_reduce add %arg0 {} : (f32) -> (f32)
15    gpu.return
16  }
17}
18
19}
20
21// -----
22
23module attributes {
24  gpu.container_module,
25  spirv.target_env = #spirv.target_env<#spirv.vce<v1.3, [Kernel, Addresses, Groups, GroupNonUniformArithmetic, GroupUniformArithmeticKHR], []>, #spirv.resource_limits<>>
26} {
27
28gpu.module @kernels {
29  // CHECK-LABEL: spirv.func @subgroup_reduce
30  // CHECK-SAME: (%[[ARG0:.*]]: f32)
31  // CHECK: %{{.*}} = spirv.GroupNonUniformFAdd <Subgroup> <Reduce> %[[ARG0]] : f32 -> f32
32  gpu.func @subgroup_reduce(%arg0 : f32) kernel
33    attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
34    %reduced = gpu.subgroup_reduce add %arg0 {} : (f32) -> (f32)
35    gpu.return
36  }
37}
38
39}
40
41// -----
42
43module attributes {
44  gpu.container_module,
45  spirv.target_env = #spirv.target_env<
46    #spirv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>>
47} {
48
49  // CHECK-LABEL: spirv.module @{{.*}} Logical GLSL450
50  // CHECK-LABEL: spirv.func @load_store
51  // CHECK-SAME: %[[ARG0:.*]]: !spirv.ptr<!spirv.struct<(!spirv.array<48 x f32, stride=4> [0])>, StorageBuffer> {spirv.interface_var_abi = #spirv.interface_var_abi<(0, 0)>}
52  // CHECK-SAME: %[[ARG1:.*]]: !spirv.ptr<!spirv.struct<(!spirv.array<48 x f32, stride=4> [0])>, StorageBuffer> {spirv.interface_var_abi = #spirv.interface_var_abi<(0, 1)>}
53  // CHECK-SAME: %[[ARG2:.*]]: !spirv.ptr<!spirv.struct<(!spirv.array<48 x f32, stride=4> [0])>, StorageBuffer> {spirv.interface_var_abi = #spirv.interface_var_abi<(0, 2)>}
54  gpu.module @kernels {
55    gpu.func @load_store(%arg0: memref<12x4xf32, #spirv.storage_class<StorageBuffer>>, %arg1: memref<12x4xf32, #spirv.storage_class<StorageBuffer>>, %arg2: memref<12x4xf32, #spirv.storage_class<StorageBuffer>>, %arg3: index, %arg4: index, %arg5: index, %arg6: index) kernel
56      attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
57      // CHECK: %[[PTR1:.*]] = spirv.AccessChain %[[ARG0]]
58      // CHECK-NEXT: spirv.Load "StorageBuffer" %[[PTR1]]
59      // CHECK: %[[PTR2:.*]] = spirv.AccessChain %[[ARG1]]
60      // CHECK-NEXT: spirv.Load "StorageBuffer" %[[PTR2]]
61      // CHECK: spirv.FAdd
62      // CHECK: %[[PTR3:.*]] = spirv.AccessChain %[[ARG2]]
63      // CHECK-NEXT: spirv.Store "StorageBuffer" %[[PTR3]]
64      %0 = gpu.block_id x
65      %1 = gpu.block_id y
66      %2 = gpu.block_id z
67      %3 = gpu.thread_id x
68      %4 = gpu.thread_id y
69      %5 = gpu.thread_id z
70      %6 = gpu.grid_dim x
71      %7 = gpu.grid_dim y
72      %8 = gpu.grid_dim z
73      %9 = gpu.block_dim x
74      %10 = gpu.block_dim y
75      %11 = gpu.block_dim z
76      %12 = arith.addi %arg3, %0 : index
77      %13 = arith.addi %arg4, %3 : index
78      %14 = memref.load %arg0[%12, %13] : memref<12x4xf32, #spirv.storage_class<StorageBuffer>>
79      %15 = memref.load %arg1[%12, %13] : memref<12x4xf32, #spirv.storage_class<StorageBuffer>>
80      %16 = arith.addf %14, %15 : f32
81      memref.store %16, %arg2[%12, %13] : memref<12x4xf32, #spirv.storage_class<StorageBuffer>>
82      gpu.return
83    }
84  }
85}
86