xref: /llvm-project/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_select.mlir (revision eb206e9ea84eff0a0596fed2de8316d924f946d1)
1//--------------------------------------------------------------------------------------------------
2// WHEN CREATING A NEW TEST, PLEASE JUST COPY & PASTE WITHOUT EDITS.
3//
4// Set-up that's shared across all tests in this directory. In principle, this
5// config could be moved to lit.local.cfg. However, there are downstream users that
6//  do not use these LIT config files. Hence why this is kept inline.
7//
8// DEFINE: %{sparsifier_opts} = enable-runtime-library=true
9// DEFINE: %{sparsifier_opts_sve} = enable-arm-sve=true %{sparsifier_opts}
10// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
11// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
12// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
13// DEFINE: %{run_libs_sve} = -shared-libs=%native_mlir_runner_utils,%native_mlir_c_runner_utils
14// DEFINE: %{run_opts} = -e main -entry-point-result=void
15// DEFINE: %{run} = mlir-runner %{run_opts} %{run_libs}
16// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs_sve}
17//
18// DEFINE: %{env} =
19//--------------------------------------------------------------------------------------------------
20
21// RUN: %{compile} | %{run} | FileCheck %s
22//
23// Do the same run, but now with direct IR generation.
24// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false enable-buffer-initialization=true
25// RUN: %{compile} | %{run} | FileCheck %s
26//
27// Do the same run, but now with vectorization.
28// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false vl=4 enable-buffer-initialization=true
29// RUN: %{compile} | %{run} | FileCheck %s
30//
31// Do the same run, but now with  VLA vectorization.
32// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
33
34#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
35#CSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : dense, d1 : compressed)}>
36#CSC = #sparse_tensor.encoding<{
37  map = (d0, d1) -> (d1 : dense, d0 : compressed)
38}>
39
40//
41// Traits for tensor operations.
42//
43#trait_vec_select = {
44  indexing_maps = [
45    affine_map<(i) -> (i)>, // A
46    affine_map<(i) -> (i)>  // C (out)
47  ],
48  iterator_types = ["parallel"]
49}
50
51#trait_mat_select = {
52  indexing_maps = [
53    affine_map<(i,j) -> (i,j)>,  // A (in)
54    affine_map<(i,j) -> (i,j)>   // X (out)
55  ],
56  iterator_types = ["parallel", "parallel"]
57}
58
59module {
60  func.func @vecSelect(%arga: tensor<?xf64, #SparseVector>) -> tensor<?xf64, #SparseVector> {
61    %c0 = arith.constant 0 : index
62    %cf1 = arith.constant 1.0 : f64
63    %d0 = tensor.dim %arga, %c0 : tensor<?xf64, #SparseVector>
64    %xv = tensor.empty(%d0): tensor<?xf64, #SparseVector>
65    %0 = linalg.generic #trait_vec_select
66      ins(%arga: tensor<?xf64, #SparseVector>)
67      outs(%xv: tensor<?xf64, #SparseVector>) {
68        ^bb(%a: f64, %b: f64):
69          %1 = sparse_tensor.select %a : f64 {
70              ^bb0(%x: f64):
71                %keep = arith.cmpf "oge", %x, %cf1 : f64
72                sparse_tensor.yield %keep : i1
73            }
74          linalg.yield %1 : f64
75    } -> tensor<?xf64, #SparseVector>
76    return %0 : tensor<?xf64, #SparseVector>
77  }
78
79  func.func @matUpperTriangle(%arga: tensor<?x?xf64, #CSR>) -> tensor<?x?xf64, #CSR> {
80    %c0 = arith.constant 0 : index
81    %c1 = arith.constant 1 : index
82    %d0 = tensor.dim %arga, %c0 : tensor<?x?xf64, #CSR>
83    %d1 = tensor.dim %arga, %c1 : tensor<?x?xf64, #CSR>
84    %xv = tensor.empty(%d0, %d1): tensor<?x?xf64, #CSR>
85    %0 = linalg.generic #trait_mat_select
86      ins(%arga: tensor<?x?xf64, #CSR>)
87      outs(%xv: tensor<?x?xf64, #CSR>) {
88        ^bb(%a: f64, %b: f64):
89          %row = linalg.index 0 : index
90          %col = linalg.index 1 : index
91          %1 = sparse_tensor.select %a : f64 {
92              ^bb0(%x: f64):
93                %keep = arith.cmpi "ugt", %col, %row : index
94                sparse_tensor.yield %keep : i1
95            }
96          linalg.yield %1 : f64
97    } -> tensor<?x?xf64, #CSR>
98    return %0 : tensor<?x?xf64, #CSR>
99  }
100
101  // Driver method to call and verify vector kernels.
102  func.func @main() {
103    %c0 = arith.constant 0 : index
104
105    // Setup sparse matrices.
106    %v1 = arith.constant sparse<
107        [ [1], [3], [5], [7], [9] ],
108        [ 1.0, 2.0, -4.0, 0.0, 5.0 ]
109    > : tensor<10xf64>
110    %m1 = arith.constant sparse<
111        [ [0, 3], [1, 4], [2, 1], [2, 3], [3, 3], [3, 4], [4, 2] ],
112        [ 1., 2., 3., 4., 5., 6., 7.]
113    > : tensor<5x5xf64>
114    %sv1 = sparse_tensor.convert %v1 : tensor<10xf64> to tensor<?xf64, #SparseVector>
115    %sm1 = sparse_tensor.convert %m1 : tensor<5x5xf64> to tensor<?x?xf64, #CSR>
116
117    // Call sparse matrix kernels.
118    %1 = call @vecSelect(%sv1) : (tensor<?xf64, #SparseVector>) -> tensor<?xf64, #SparseVector>
119    %2 = call @matUpperTriangle(%sm1) : (tensor<?x?xf64, #CSR>) -> tensor<?x?xf64, #CSR>
120
121    //
122    // Verify the results.
123    //
124    // CHECK:      ---- Sparse Tensor ----
125    // CHECK-NEXT: nse = 5
126    // CHECK-NEXT: dim = ( 10 )
127    // CHECK-NEXT: lvl = ( 10 )
128    // CHECK-NEXT: pos[0] : ( 0, 5 )
129    // CHECK-NEXT: crd[0] : ( 1, 3, 5, 7, 9 )
130    // CHECK-NEXT: values : ( 1, 2, -4, 0, 5 )
131    // CHECK-NEXT: ----
132    // CHECK:      ---- Sparse Tensor ----
133    // CHECK-NEXT: nse = 7
134    // CHECK-NEXT: dim = ( 5, 5 )
135    // CHECK-NEXT: lvl = ( 5, 5 )
136    // CHECK-NEXT: pos[1] : ( 0, 1, 2, 4, 6, 7 )
137    // CHECK-NEXT: crd[1] : ( 3, 4, 1, 3, 3, 4, 2 )
138    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7 )
139    // CHECK-NEXT: ----
140    // CHECK:      ---- Sparse Tensor ----
141    // CHECK-NEXT: nse = 3
142    // CHECK-NEXT: dim = ( 10 )
143    // CHECK-NEXT: lvl = ( 10 )
144    // CHECK-NEXT: pos[0] : ( 0, 3 )
145    // CHECK-NEXT: crd[0] : ( 1, 3, 9 )
146    // CHECK-NEXT: values : ( 1, 2, 5 )
147    // CHECK-NEXT: ----
148    // CHECK:      ---- Sparse Tensor ----
149    // CHECK-NEXT: nse = 4
150    // CHECK-NEXT: dim = ( 5, 5 )
151    // CHECK-NEXT: lvl = ( 5, 5 )
152    // CHECK-NEXT: pos[1] : ( 0, 1, 2, 3, 4, 4 )
153    // CHECK-NEXT: crd[1] : ( 3, 4, 3, 4 )
154    // CHECK-NEXT: values : ( 1, 2, 4, 6 )
155    // CHECK-NEXT: ----
156    //
157    sparse_tensor.print %sv1 : tensor<?xf64, #SparseVector>
158    sparse_tensor.print %sm1 : tensor<?x?xf64, #CSR>
159    sparse_tensor.print %1 : tensor<?xf64, #SparseVector>
160    sparse_tensor.print %2 : tensor<?x?xf64, #CSR>
161
162    // Release the resources.
163    bufferization.dealloc_tensor %sv1 : tensor<?xf64, #SparseVector>
164    bufferization.dealloc_tensor %sm1 : tensor<?x?xf64, #CSR>
165    bufferization.dealloc_tensor %1 : tensor<?xf64, #SparseVector>
166    bufferization.dealloc_tensor %2 : tensor<?x?xf64, #CSR>
167    return
168  }
169}
170