xref: /llvm-project/mlir/test/Dialect/SparseTensor/GPU/gpu_matvec.mlir (revision 06a65ce500a632048db1058de9ca61072004a640)
119466ebcSAart Bik// RUN: mlir-opt %s --linalg-generalize-named-ops \
219466ebcSAart Bik// RUN:             --pre-sparsification-rewrite \
3*06a65ce5SPeiming Liu// RUN:             --sparse-reinterpret-map \
419466ebcSAart Bik// RUN:             --sparsification="parallelization-strategy=dense-outer-loop" \
519466ebcSAart Bik// RUN:             --sparse-gpu-codegen | FileCheck %s
619466ebcSAart Bik
7e2e429d9SYinying Li#CSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>
819466ebcSAart Bik
919466ebcSAart Bik//
1019466ebcSAart Bik// Compute matrix vector y = Ax
1119466ebcSAart Bik//
124889214aSAart Bik// CHECK-LABEL: gpu.module @sparse_kernels
134889214aSAart Bik// CHECK:       gpu.func @kernel0(
1419466ebcSAart Bik// CHECK-SAME:    %[[VAL_0:.*0]]: index,
1519466ebcSAart Bik// CHECK-SAME:    %[[VAL_1:.*1]]: memref<?xf64>,
1619466ebcSAart Bik// CHECK-SAME:    %[[VAL_2:.*2]]: memref<?xindex>,
1719466ebcSAart Bik// CHECK-SAME:    %[[VAL_3:.*3]]: memref<?xindex>,
1819466ebcSAart Bik// CHECK-SAME:    %[[VAL_4:.*4]]: memref<?xf64>,
1919466ebcSAart Bik// CHECK-SAME:    %[[VAL_5:.*5]]: memref<?xf64>) kernel {
2019466ebcSAart Bik// CHECK:         %[[VAL_6:.*]] = arith.constant 1 : index
2119466ebcSAart Bik// CHECK:         %[[VAL_7:.*]] = gpu.block_id  x
2219466ebcSAart Bik// CHECK:         %[[VAL_8:.*]] = gpu.block_dim  x
2319466ebcSAart Bik// CHECK:         %[[VAL_9:.*]] = gpu.thread_id  x
2419466ebcSAart Bik// CHECK:         %[[VAL_10:.*]] = gpu.grid_dim  x
2519466ebcSAart Bik// CHECK:         %[[VAL_11:.*]] = arith.muli %[[VAL_7]], %[[VAL_8]] : index
2619466ebcSAart Bik// CHECK:         %[[VAL_12:.*]] = arith.addi %[[VAL_11]], %[[VAL_9]] : index
2719466ebcSAart Bik// CHECK:         %[[VAL_13:.*]] = arith.muli %[[VAL_8]], %[[VAL_10]] : index
2819466ebcSAart Bik// CHECK:         scf.for %[[VAL_14:.*]] = %[[VAL_12]] to %[[VAL_0]] step %[[VAL_13]] {
2919466ebcSAart Bik// CHECK:           %[[VAL_15:.*]] = memref.load %[[VAL_1]]{{\[}}%[[VAL_14]]] : memref<?xf64>
3019466ebcSAart Bik// CHECK:           %[[VAL_16:.*]] = memref.load %[[VAL_2]]{{\[}}%[[VAL_14]]] : memref<?xindex>
3119466ebcSAart Bik// CHECK:           %[[VAL_17:.*]] = arith.addi %[[VAL_14]], %[[VAL_6]] : index
3219466ebcSAart Bik// CHECK:           %[[VAL_18:.*]] = memref.load %[[VAL_2]]{{\[}}%[[VAL_17]]] : memref<?xindex>
3319466ebcSAart Bik// CHECK:           %[[VAL_19:.*]] = scf.for %[[VAL_20:.*]] = %[[VAL_16]] to %[[VAL_18]] step %[[VAL_6]] iter_args(%[[VAL_21:.*]] = %[[VAL_15]]) -> (f64) {
3419466ebcSAart Bik// CHECK:             %[[VAL_22:.*]] = memref.load %[[VAL_3]]{{\[}}%[[VAL_20]]] : memref<?xindex>
3519466ebcSAart Bik// CHECK:             %[[VAL_23:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_20]]] : memref<?xf64>
3619466ebcSAart Bik// CHECK:             %[[VAL_24:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_22]]] : memref<?xf64>
3719466ebcSAart Bik// CHECK:             %[[VAL_25:.*]] = arith.mulf %[[VAL_23]], %[[VAL_24]] : f64
3819466ebcSAart Bik// CHECK:             %[[VAL_26:.*]] = arith.addf %[[VAL_21]], %[[VAL_25]] : f64
3919466ebcSAart Bik// CHECK:             scf.yield %[[VAL_26]] : f64
4019466ebcSAart Bik// CHECK:           } {"Emitted from" = "linalg.generic"}
4119466ebcSAart Bik// CHECK:           memref.store %[[VAL_27:.*]], %[[VAL_1]]{{\[}}%[[VAL_14]]] : memref<?xf64>
4219466ebcSAart Bik// CHECK:         }
4319466ebcSAart Bik// CHECK:         gpu.return
4419466ebcSAart Bik// CHECK:       }
4519466ebcSAart Bik//
4619466ebcSAart Bik// CHECK-LABEL: func.func @matvec
4786888e42SAart Bik// CHECK:       gpu.wait async
4886888e42SAart Bik// CHECK:       gpu.alloc async
4986888e42SAart Bik// CHECK:       %[[S0:.*]] = gpu.memcpy async
5086888e42SAart Bik// CHECK:       gpu.wait async
5186888e42SAart Bik// CHECK:       gpu.alloc async
5286888e42SAart Bik// CHECK:       %[[S1:.*]] = gpu.memcpy async
5386888e42SAart Bik// CHECK:       gpu.wait async
5486888e42SAart Bik// CHECK:       gpu.alloc async
5586888e42SAart Bik// CHECK:       %[[S2:.*]] = gpu.memcpy async
5686888e42SAart Bik// CHECK:       gpu.wait async
5786888e42SAart Bik// CHECK:       gpu.alloc async
5886888e42SAart Bik// CHECK:       %[[S3:.*]] = gpu.memcpy async
5986888e42SAart Bik// CHECK:       gpu.wait async
6086888e42SAart Bik// CHECK:       gpu.alloc async
6186888e42SAart Bik// CHECK:       %[[S4:.*]] = gpu.memcpy async
6286888e42SAart Bik// CHECK:       gpu.wait [%[[S0]], %[[S1]], %[[S2]], %[[S3]], %[[S4]]
6386888e42SAart Bik// CHECK:       %[[T0:.*]] = gpu.launch_func async @sparse_kernels::@kernel0 blocks
6486888e42SAart Bik// CHECK:       %[[M0:.*]] = gpu.memcpy async [%[[T0]]]
6586888e42SAart Bik// CHECK:       %[[M1:.*]] = gpu.dealloc async [%[[M0]]]
6686888e42SAart Bik// CHECK:       %[[M2:.*]] = gpu.wait async
6786888e42SAart Bik// CHECK:       %[[M3:.*]] = gpu.dealloc async [%[[M2]]]
6886888e42SAart Bik// CHECK:       %[[M4:.*]] = gpu.wait async
6986888e42SAart Bik// CHECK:       %[[M5:.*]] = gpu.dealloc async [%[[M4]]]
7086888e42SAart Bik// CHECK:       %[[M6:.*]] = gpu.wait async
7186888e42SAart Bik// CHECK:       %[[M7:.*]] = gpu.dealloc async [%[[M6]]]
7286888e42SAart Bik// CHECK:       %[[M8:.*]] = gpu.wait async
7386888e42SAart Bik// CHECK:       %[[M9:.*]] = gpu.dealloc async [%[[M8]]]
7486888e42SAart Bik// CHECK:       gpu.wait [%[[M1]], %[[M3]], %[[M5]], %[[M7]], %[[M9]]
7519466ebcSAart Bik//
7619466ebcSAart Bikfunc.func @matvec(%A: tensor<?x?xf64, #CSR>, %x: tensor<?xf64>, %y_in: tensor<?xf64>) -> tensor<?xf64> {
7719466ebcSAart Bik  %y_out = linalg.matvec
7819466ebcSAart Bik      ins(%A, %x: tensor<?x?xf64, #CSR>, tensor<?xf64>)
7919466ebcSAart Bik      outs(%y_in: tensor<?xf64>) -> tensor<?xf64>
8019466ebcSAart Bik  return %y_out : tensor<?xf64>
8119466ebcSAart Bik}
82