xref: /llvm-project/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_complex_ops.mlir (revision eb206e9ea84eff0a0596fed2de8316d924f946d1)
1//--------------------------------------------------------------------------------------------------
2// WHEN CREATING A NEW TEST, PLEASE JUST COPY & PASTE WITHOUT EDITS.
3//
4// Set-up that's shared across all tests in this directory. In principle, this
5// config could be moved to lit.local.cfg. However, there are downstream users that
6//  do not use these LIT config files. Hence why this is kept inline.
7//
8// DEFINE: %{sparsifier_opts} = enable-runtime-library=true
9// DEFINE: %{sparsifier_opts_sve} = enable-arm-sve=true %{sparsifier_opts}
10// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
11// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
12// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
13// DEFINE: %{run_libs_sve} = -shared-libs=%native_mlir_runner_utils,%native_mlir_c_runner_utils
14// DEFINE: %{run_opts} = -e main -entry-point-result=void
15// DEFINE: %{run} = mlir-runner %{run_opts} %{run_libs}
16// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs_sve}
17//
18// DEFINE: %{env} =
19//--------------------------------------------------------------------------------------------------
20
21// RUN: %{compile} | %{run} | FileCheck %s
22//
23// Do the same run, but now with direct IR generation.
24// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false
25// RUN: %{compile} | %{run} | FileCheck %s
26//
27// Do the same run, but now with direct IR generation and vectorization.
28// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true
29// RUN: %{compile} | %{run} | FileCheck %s
30//
31// Do the same run, but now with direct IR generation and VLA vectorization.
32// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
33
34#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
35
36#trait_op1 = {
37  indexing_maps = [
38    affine_map<(i) -> (i)>,  // a (in)
39    affine_map<(i) -> (i)>   // x (out)
40  ],
41  iterator_types = ["parallel"],
42  doc = "x(i) = OP a(i)"
43}
44
45#trait_op2 = {
46  indexing_maps = [
47    affine_map<(i) -> (i)>,  // a (in)
48    affine_map<(i) -> (i)>,  // b (in)
49    affine_map<(i) -> (i)>   // x (out)
50  ],
51  iterator_types = ["parallel"],
52  doc = "x(i) = a(i) OP b(i)"
53}
54
55module {
56  func.func @cops(%arga: tensor<?xcomplex<f64>, #SparseVector>,
57                  %argb: tensor<?xcomplex<f64>, #SparseVector>)
58                 -> tensor<?xcomplex<f64>, #SparseVector> {
59    %c0 = arith.constant 0 : index
60    %d = tensor.dim %arga, %c0 : tensor<?xcomplex<f64>, #SparseVector>
61    %xv = tensor.empty(%d) : tensor<?xcomplex<f64>, #SparseVector>
62    %0 = linalg.generic #trait_op2
63       ins(%arga, %argb: tensor<?xcomplex<f64>, #SparseVector>,
64                         tensor<?xcomplex<f64>, #SparseVector>)
65        outs(%xv: tensor<?xcomplex<f64>, #SparseVector>) {
66        ^bb(%a: complex<f64>, %b: complex<f64>, %x: complex<f64>):
67          %1 = complex.neg %b : complex<f64>
68          %2 = complex.sub %a, %1 : complex<f64>
69          linalg.yield %2 : complex<f64>
70    } -> tensor<?xcomplex<f64>, #SparseVector>
71    return %0 : tensor<?xcomplex<f64>, #SparseVector>
72  }
73
74  func.func @csin(%arga: tensor<?xcomplex<f64>, #SparseVector>)
75                 -> tensor<?xcomplex<f64>, #SparseVector> {
76    %c0 = arith.constant 0 : index
77    %d = tensor.dim %arga, %c0 : tensor<?xcomplex<f64>, #SparseVector>
78    %xv = tensor.empty(%d) : tensor<?xcomplex<f64>, #SparseVector>
79    %0 = linalg.generic #trait_op1
80       ins(%arga: tensor<?xcomplex<f64>, #SparseVector>)
81        outs(%xv: tensor<?xcomplex<f64>, #SparseVector>) {
82        ^bb(%a: complex<f64>, %x: complex<f64>):
83          %1 = complex.sin %a : complex<f64>
84          linalg.yield %1 : complex<f64>
85    } -> tensor<?xcomplex<f64>, #SparseVector>
86    return %0 : tensor<?xcomplex<f64>, #SparseVector>
87  }
88
89  func.func @complex_sqrt(%arga: tensor<?xcomplex<f64>, #SparseVector>)
90                 -> tensor<?xcomplex<f64>, #SparseVector> {
91    %c0 = arith.constant 0 : index
92    %d = tensor.dim %arga, %c0 : tensor<?xcomplex<f64>, #SparseVector>
93    %xv = tensor.empty(%d) : tensor<?xcomplex<f64>, #SparseVector>
94    %0 = linalg.generic #trait_op1
95       ins(%arga: tensor<?xcomplex<f64>, #SparseVector>)
96        outs(%xv: tensor<?xcomplex<f64>, #SparseVector>) {
97        ^bb(%a: complex<f64>, %x: complex<f64>):
98          %1 = complex.sqrt %a : complex<f64>
99          linalg.yield %1 : complex<f64>
100    } -> tensor<?xcomplex<f64>, #SparseVector>
101    return %0 : tensor<?xcomplex<f64>, #SparseVector>
102  }
103
104  func.func @complex_tanh(%arga: tensor<?xcomplex<f64>, #SparseVector>)
105                 -> tensor<?xcomplex<f64>, #SparseVector> {
106    %c0 = arith.constant 0 : index
107    %d = tensor.dim %arga, %c0 : tensor<?xcomplex<f64>, #SparseVector>
108    %xv = tensor.empty(%d) : tensor<?xcomplex<f64>, #SparseVector>
109    %0 = linalg.generic #trait_op1
110       ins(%arga: tensor<?xcomplex<f64>, #SparseVector>)
111        outs(%xv: tensor<?xcomplex<f64>, #SparseVector>) {
112       ^bb(%a: complex<f64>, %x: complex<f64>):
113          %1 = complex.tanh %a : complex<f64>
114          linalg.yield %1 : complex<f64>
115   } -> tensor<?xcomplex<f64>, #SparseVector>
116    return %0 : tensor<?xcomplex<f64>, #SparseVector>
117  }
118
119  func.func @clog1p_expm1(%arga: tensor<?xcomplex<f64>, #SparseVector>)
120                 -> tensor<?xcomplex<f64>, #SparseVector> {
121    %c0 = arith.constant 0 : index
122    %d = tensor.dim %arga, %c0 : tensor<?xcomplex<f64>, #SparseVector>
123    %xv = tensor.empty(%d) : tensor<?xcomplex<f64>, #SparseVector>
124    %0 = linalg.generic #trait_op1
125       ins(%arga: tensor<?xcomplex<f64>, #SparseVector>)
126        outs(%xv: tensor<?xcomplex<f64>, #SparseVector>) {
127        ^bb(%a: complex<f64>, %x: complex<f64>):
128          %1 = complex.log1p %a : complex<f64>
129          %2 = complex.expm1 %1 : complex<f64>
130          linalg.yield %2 : complex<f64>
131    } -> tensor<?xcomplex<f64>, #SparseVector>
132    return %0 : tensor<?xcomplex<f64>, #SparseVector>
133  }
134
135  func.func @cdiv(%arga: tensor<?xcomplex<f64>, #SparseVector>)
136                 -> tensor<?xcomplex<f64>, #SparseVector> {
137    %c0 = arith.constant 0 : index
138    %d = tensor.dim %arga, %c0 : tensor<?xcomplex<f64>, #SparseVector>
139    %xv = tensor.empty(%d) : tensor<?xcomplex<f64>, #SparseVector>
140    %c = complex.constant [2.0 : f64, 0.0 : f64] : complex<f64>
141    %0 = linalg.generic #trait_op1
142       ins(%arga: tensor<?xcomplex<f64>, #SparseVector>)
143        outs(%xv: tensor<?xcomplex<f64>, #SparseVector>) {
144        ^bb(%a: complex<f64>, %x: complex<f64>):
145          %1 = complex.div %a, %c  : complex<f64>
146          linalg.yield %1 : complex<f64>
147    } -> tensor<?xcomplex<f64>, #SparseVector>
148    return %0 : tensor<?xcomplex<f64>, #SparseVector>
149  }
150
151  func.func @cabs(%arga: tensor<?xcomplex<f64>, #SparseVector>)
152                 -> tensor<?xf64, #SparseVector> {
153    %c0 = arith.constant 0 : index
154    %d = tensor.dim %arga, %c0 : tensor<?xcomplex<f64>, #SparseVector>
155    %xv = tensor.empty(%d) : tensor<?xf64, #SparseVector>
156    %0 = linalg.generic #trait_op1
157       ins(%arga: tensor<?xcomplex<f64>, #SparseVector>)
158        outs(%xv: tensor<?xf64, #SparseVector>) {
159        ^bb(%a: complex<f64>, %x: f64):
160          %1 = complex.abs %a : complex<f64>
161          linalg.yield %1 : f64
162    } -> tensor<?xf64, #SparseVector>
163    return %0 : tensor<?xf64, #SparseVector>
164  }
165
166  // Driver method to call and verify complex kernels.
167  func.func @main() {
168    // Setup sparse vectors.
169    %v1 = arith.constant sparse<
170       [ [0], [28], [31] ],
171         [ (-5.13, 2.0), (3.0, 4.0), (5.0, 6.0) ] > : tensor<32xcomplex<f64>>
172    %v2 = arith.constant sparse<
173       [ [1], [28], [31] ],
174         [ (1.0, 0.0), (-2.0, 0.0), (3.0, 0.0) ] > : tensor<32xcomplex<f64>>
175    %sv1 = sparse_tensor.convert %v1 : tensor<32xcomplex<f64>> to tensor<?xcomplex<f64>, #SparseVector>
176    %sv2 = sparse_tensor.convert %v2 : tensor<32xcomplex<f64>> to tensor<?xcomplex<f64>, #SparseVector>
177
178    // Call sparse vector kernels.
179    %0 = call @cops(%sv1, %sv2)
180       : (tensor<?xcomplex<f64>, #SparseVector>,
181          tensor<?xcomplex<f64>, #SparseVector>) -> tensor<?xcomplex<f64>, #SparseVector>
182    %1 = call @csin(%sv1)
183       : (tensor<?xcomplex<f64>, #SparseVector>) -> tensor<?xcomplex<f64>, #SparseVector>
184    %2 = call @complex_sqrt(%sv1)
185       : (tensor<?xcomplex<f64>, #SparseVector>) -> tensor<?xcomplex<f64>, #SparseVector>
186    %3 = call @complex_tanh(%sv2)
187       : (tensor<?xcomplex<f64>, #SparseVector>) -> tensor<?xcomplex<f64>, #SparseVector>
188    %4 = call @clog1p_expm1(%sv1)
189       : (tensor<?xcomplex<f64>, #SparseVector>) -> tensor<?xcomplex<f64>, #SparseVector>
190    %5 = call @cdiv(%sv1)
191       : (tensor<?xcomplex<f64>, #SparseVector>) -> tensor<?xcomplex<f64>, #SparseVector>
192    %6 = call @cabs(%sv1)
193       : (tensor<?xcomplex<f64>, #SparseVector>) -> tensor<?xf64, #SparseVector>
194
195    //
196    // Verify the results.
197    //
198    // CHECK:      ---- Sparse Tensor ----
199    // CHECK-NEXT: nse = 4
200    // CHECK-NEXT: dim = ( 32 )
201    // CHECK-NEXT: lvl = ( 32 )
202    // CHECK-NEXT: pos[0] : ( 0, 4 )
203    // CHECK-NEXT: crd[0] : ( 0, 1, 28, 31 )
204    // CHECK-NEXT: values : ( ( -5.13, 2 ), ( 1, 0 ), ( 1, 4 ), ( 8, 6 ) )
205    // CHECK-NEXT: ----
206    //
207    // CHECK-NEXT: ---- Sparse Tensor ----
208    // CHECK-NEXT: nse = 3
209    // CHECK-NEXT: dim = ( 32 )
210    // CHECK-NEXT: lvl = ( 32 )
211    // CHECK-NEXT: pos[0] : ( 0, 3 )
212    // CHECK-NEXT: crd[0] : ( 0, 28, 31 )
213    // CHECK-NEXT: values : ( ( 3.43887, 1.47097 ), ( 3.85374, -27.0168 ), ( -193.43, 57.2184 ) )
214    // CHECK-NEXT: ----
215    //
216    // CHECK-NEXT: ---- Sparse Tensor ----
217    // CHECK-NEXT: nse = 3
218    // CHECK-NEXT: dim = ( 32 )
219    // CHECK-NEXT: lvl = ( 32 )
220    // CHECK-NEXT: pos[0] : ( 0, 3 )
221    // CHECK-NEXT: crd[0] : ( 0, 28, 31 )
222    // CHECK-NEXT: values : ( ( 0.433635, 2.30609 ), ( 2, 1 ), ( 2.53083, 1.18538 ) )
223    // CHECK-NEXT: ----
224    //
225    // CHECK-NEXT: ---- Sparse Tensor ----
226    // CHECK-NEXT: nse = 3
227    // CHECK-NEXT: dim = ( 32 )
228    // CHECK-NEXT: lvl = ( 32 )
229    // CHECK-NEXT: pos[0] : ( 0, 3 )
230    // CHECK-NEXT: crd[0] : ( 1, 28, 31 )
231    // CHECK-NEXT: values : ( ( 0.761594, 0 ), ( -0.964028, 0 ), ( 0.995055, 0 ) )
232    // CHECK-NEXT: ----
233    //
234    // CHECK-NEXT: ---- Sparse Tensor ----
235    // CHECK-NEXT: nse = 3
236    // CHECK-NEXT: dim = ( 32 )
237    // CHECK-NEXT: lvl = ( 32 )
238    // CHECK-NEXT: pos[0] : ( 0, 3 )
239    // CHECK-NEXT: crd[0] : ( 0, 28, 31 )
240    // CHECK-NEXT: values : ( ( -5.13, 2 ), ( 3, 4 ), ( 5, 6 ) )
241    // CHECK-NEXT: ----
242    //
243    // CHECK-NEXT: ---- Sparse Tensor ----
244    // CHECK-NEXT: nse = 3
245    // CHECK-NEXT: dim = ( 32 )
246    // CHECK-NEXT: lvl = ( 32 )
247    // CHECK-NEXT: pos[0] : ( 0, 3 )
248    // CHECK-NEXT: crd[0] : ( 0, 28, 31 )
249    // CHECK-NEXT: values : ( ( -2.565, 1 ), ( 1.5, 2 ), ( 2.5, 3 ) )
250    // CHECK-NEXT: ----
251    //
252    // CHECK-NEXT: ---- Sparse Tensor ----
253    // CHECK-NEXT: nse = 3
254    // CHECK-NEXT: dim = ( 32 )
255    // CHECK-NEXT: lvl = ( 32 )
256    // CHECK-NEXT: pos[0] : ( 0, 3 )
257    // CHECK-NEXT: crd[0] : ( 0, 28, 31 )
258    // CHECK-NEXT: values : ( 5.50608, 5, 7.81025 )
259    // CHECK-NEXT: ----
260    //
261    sparse_tensor.print %0 : tensor<?xcomplex<f64>, #SparseVector>
262    sparse_tensor.print %1 : tensor<?xcomplex<f64>, #SparseVector>
263    sparse_tensor.print %2 : tensor<?xcomplex<f64>, #SparseVector>
264    sparse_tensor.print %3 : tensor<?xcomplex<f64>, #SparseVector>
265    sparse_tensor.print %4 : tensor<?xcomplex<f64>, #SparseVector>
266    sparse_tensor.print %5 : tensor<?xcomplex<f64>, #SparseVector>
267    sparse_tensor.print %6 : tensor<?xf64, #SparseVector>
268
269    // Release the resources.
270    bufferization.dealloc_tensor %sv1 : tensor<?xcomplex<f64>, #SparseVector>
271    bufferization.dealloc_tensor %sv2 : tensor<?xcomplex<f64>, #SparseVector>
272    bufferization.dealloc_tensor %0 : tensor<?xcomplex<f64>, #SparseVector>
273    bufferization.dealloc_tensor %1 : tensor<?xcomplex<f64>, #SparseVector>
274    bufferization.dealloc_tensor %2 : tensor<?xcomplex<f64>, #SparseVector>
275    bufferization.dealloc_tensor %3 : tensor<?xcomplex<f64>, #SparseVector>
276    bufferization.dealloc_tensor %4 : tensor<?xcomplex<f64>, #SparseVector>
277    bufferization.dealloc_tensor %5 : tensor<?xcomplex<f64>, #SparseVector>
278    bufferization.dealloc_tensor %6 : tensor<?xf64, #SparseVector>
279    return
280  }
281}
282