xref: /llvm-project/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir (revision eb206e9ea84eff0a0596fed2de8316d924f946d1)
1//--------------------------------------------------------------------------------------------------
2// WHEN CREATING A NEW TEST, PLEASE JUST COPY & PASTE WITHOUT EDITS.
3//
4// Set-up that's shared across all tests in this directory. In principle, this
5// config could be moved to lit.local.cfg. However, there are downstream users that
6//  do not use these LIT config files. Hence why this is kept inline.
7//
8// DEFINE: %{sparsifier_opts} = enable-runtime-library=true
9// DEFINE: %{sparsifier_opts_sve} = enable-arm-sve=true %{sparsifier_opts}
10// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
11// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
12// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
13// DEFINE: %{run_libs_sve} = -shared-libs=%native_mlir_runner_utils,%native_mlir_c_runner_utils
14// DEFINE: %{run_opts} = -e main -entry-point-result=void
15// DEFINE: %{run} = mlir-runner %{run_opts} %{run_libs}
16// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs_sve}
17//
18// DEFINE: %{env} =
19//--------------------------------------------------------------------------------------------------
20
21// REDEFINE: %{env} = TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx"
22// RUN: %{compile} | env %{env} %{run} | FileCheck %s
23//
24// Do the same run, but now with direct IR generation and vectorization.
25// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true
26// RUN: %{compile} | env %{env} %{run} | FileCheck %s
27//
28// Do the same run, but now with direct IR generation and, if available, VLA
29// vectorization.
30// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
31
32!Filename = !llvm.ptr
33
34#DCSR = #sparse_tensor.encoding<{
35  map = (d0, d1) -> (d0 : compressed, d1 : compressed)
36}>
37
38#eltwise_mult = {
39  indexing_maps = [
40    affine_map<(i,j) -> (i,j)>  // X (out)
41  ],
42  iterator_types = ["parallel", "parallel"],
43  doc = "X(i,j) *= X(i,j)"
44}
45
46//
47// Integration test that lowers a kernel annotated as sparse to
48// actual sparse code, initializes a matching sparse storage scheme
49// from file, and runs the resulting code with the JIT compiler.
50//
51module {
52  //
53  // A kernel that multiplies a sparse matrix A with itself
54  // in an element-wise fashion. In this operation, we have
55  // a sparse tensor as output, but although the values of the
56  // sparse tensor change, its nonzero structure remains the same.
57  //
58  func.func @kernel_eltwise_mult(%argx: tensor<?x?xf64, #DCSR>)
59    -> tensor<?x?xf64, #DCSR> {
60    %0 = linalg.generic #eltwise_mult
61      outs(%argx: tensor<?x?xf64, #DCSR>) {
62      ^bb(%x: f64):
63        %0 = arith.mulf %x, %x : f64
64        linalg.yield %0 : f64
65    } -> tensor<?x?xf64, #DCSR>
66    return %0 : tensor<?x?xf64, #DCSR>
67  }
68
69  func.func private @getTensorFilename(index) -> (!Filename)
70
71  //
72  // Main driver that reads matrix from file and calls the sparse kernel.
73  //
74  func.func @main() {
75    %d0 = arith.constant 0.0 : f64
76    %c0 = arith.constant 0 : index
77
78    // Read the sparse matrix from file, construct sparse storage.
79    %fileName = call @getTensorFilename(%c0) : (index) -> (!Filename)
80    %x = sparse_tensor.new %fileName : !Filename to tensor<?x?xf64, #DCSR>
81
82    // Call kernel.
83    %0 = call @kernel_eltwise_mult(%x) : (tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR>
84
85    // Print the result for verification.
86    //
87    // CHECK:      ---- Sparse Tensor ----
88    // CHECK-NEXT: nse = 9
89    // CHECK-NEXT: dim = ( 5, 5 )
90    // CHECK-NEXT: lvl = ( 5, 5 )
91    // CHECK-NEXT: pos[0] : ( 0, 5 )
92    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4 )
93    // CHECK-NEXT: pos[1] : ( 0, 2, 4, 5, 7, 9 )
94    // CHECK-NEXT: crd[1] : ( 0, 3, 1, 4, 2, 0, 3, 1, 4 )
95    // CHECK-NEXT: values : ( 1, 1.96, 4, 6.25, 9, 16.81, 16, 27.04, 25 )
96    // CHECK-NEXT: ----
97    //
98    sparse_tensor.print %0 : tensor<?x?xf64, #DCSR>
99
100    // Release the resources.
101    bufferization.dealloc_tensor %x : tensor<?x?xf64, #DCSR>
102
103    return
104  }
105}
106