1// RUN: mlir-opt %s --linalg-generalize-named-ops \ 2// RUN: --pre-sparsification-rewrite \ 3// RUN: --sparse-reinterpret-map \ 4// RUN: --sparsification="parallelization-strategy=dense-outer-loop" \ 5// RUN: --sparse-gpu-codegen | FileCheck %s 6 7#CSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }> 8 9// 10// Compute matrix vector y = Ax 11// 12// CHECK-LABEL: gpu.module @sparse_kernels 13// CHECK: gpu.func @kernel0( 14// CHECK-SAME: %[[VAL_0:.*0]]: index, 15// CHECK-SAME: %[[VAL_1:.*1]]: memref<?xf64>, 16// CHECK-SAME: %[[VAL_2:.*2]]: memref<?xindex>, 17// CHECK-SAME: %[[VAL_3:.*3]]: memref<?xindex>, 18// CHECK-SAME: %[[VAL_4:.*4]]: memref<?xf64>, 19// CHECK-SAME: %[[VAL_5:.*5]]: memref<?xf64>) kernel { 20// CHECK: %[[VAL_6:.*]] = arith.constant 1 : index 21// CHECK: %[[VAL_7:.*]] = gpu.block_id x 22// CHECK: %[[VAL_8:.*]] = gpu.block_dim x 23// CHECK: %[[VAL_9:.*]] = gpu.thread_id x 24// CHECK: %[[VAL_10:.*]] = gpu.grid_dim x 25// CHECK: %[[VAL_11:.*]] = arith.muli %[[VAL_7]], %[[VAL_8]] : index 26// CHECK: %[[VAL_12:.*]] = arith.addi %[[VAL_11]], %[[VAL_9]] : index 27// CHECK: %[[VAL_13:.*]] = arith.muli %[[VAL_8]], %[[VAL_10]] : index 28// CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_12]] to %[[VAL_0]] step %[[VAL_13]] { 29// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_1]]{{\[}}%[[VAL_14]]] : memref<?xf64> 30// CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_2]]{{\[}}%[[VAL_14]]] : memref<?xindex> 31// CHECK: %[[VAL_17:.*]] = arith.addi %[[VAL_14]], %[[VAL_6]] : index 32// CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_2]]{{\[}}%[[VAL_17]]] : memref<?xindex> 33// CHECK: %[[VAL_19:.*]] = scf.for %[[VAL_20:.*]] = %[[VAL_16]] to %[[VAL_18]] step %[[VAL_6]] iter_args(%[[VAL_21:.*]] = %[[VAL_15]]) -> (f64) { 34// CHECK: %[[VAL_22:.*]] = memref.load %[[VAL_3]]{{\[}}%[[VAL_20]]] : memref<?xindex> 35// CHECK: %[[VAL_23:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_20]]] : memref<?xf64> 36// CHECK: %[[VAL_24:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_22]]] : memref<?xf64> 37// CHECK: %[[VAL_25:.*]] = arith.mulf %[[VAL_23]], %[[VAL_24]] : f64 38// CHECK: %[[VAL_26:.*]] = arith.addf %[[VAL_21]], %[[VAL_25]] : f64 39// CHECK: scf.yield %[[VAL_26]] : f64 40// CHECK: } {"Emitted from" = "linalg.generic"} 41// CHECK: memref.store %[[VAL_27:.*]], %[[VAL_1]]{{\[}}%[[VAL_14]]] : memref<?xf64> 42// CHECK: } 43// CHECK: gpu.return 44// CHECK: } 45// 46// CHECK-LABEL: func.func @matvec 47// CHECK: gpu.wait async 48// CHECK: gpu.alloc async 49// CHECK: %[[S0:.*]] = gpu.memcpy async 50// CHECK: gpu.wait async 51// CHECK: gpu.alloc async 52// CHECK: %[[S1:.*]] = gpu.memcpy async 53// CHECK: gpu.wait async 54// CHECK: gpu.alloc async 55// CHECK: %[[S2:.*]] = gpu.memcpy async 56// CHECK: gpu.wait async 57// CHECK: gpu.alloc async 58// CHECK: %[[S3:.*]] = gpu.memcpy async 59// CHECK: gpu.wait async 60// CHECK: gpu.alloc async 61// CHECK: %[[S4:.*]] = gpu.memcpy async 62// CHECK: gpu.wait [%[[S0]], %[[S1]], %[[S2]], %[[S3]], %[[S4]] 63// CHECK: %[[T0:.*]] = gpu.launch_func async @sparse_kernels::@kernel0 blocks 64// CHECK: %[[M0:.*]] = gpu.memcpy async [%[[T0]]] 65// CHECK: %[[M1:.*]] = gpu.dealloc async [%[[M0]]] 66// CHECK: %[[M2:.*]] = gpu.wait async 67// CHECK: %[[M3:.*]] = gpu.dealloc async [%[[M2]]] 68// CHECK: %[[M4:.*]] = gpu.wait async 69// CHECK: %[[M5:.*]] = gpu.dealloc async [%[[M4]]] 70// CHECK: %[[M6:.*]] = gpu.wait async 71// CHECK: %[[M7:.*]] = gpu.dealloc async [%[[M6]]] 72// CHECK: %[[M8:.*]] = gpu.wait async 73// CHECK: %[[M9:.*]] = gpu.dealloc async [%[[M8]]] 74// CHECK: gpu.wait [%[[M1]], %[[M3]], %[[M5]], %[[M7]], %[[M9]] 75// 76func.func @matvec(%A: tensor<?x?xf64, #CSR>, %x: tensor<?xf64>, %y_in: tensor<?xf64>) -> tensor<?xf64> { 77 %y_out = linalg.matvec 78 ins(%A, %x: tensor<?x?xf64, #CSR>, tensor<?xf64>) 79 outs(%y_in: tensor<?xf64>) -> tensor<?xf64> 80 return %y_out : tensor<?xf64> 81} 82