xref: /llvm-project/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir (revision eb206e9ea84eff0a0596fed2de8316d924f946d1)
1//--------------------------------------------------------------------------------------------------
2// WHEN CREATING A NEW TEST, PLEASE JUST COPY & PASTE WITHOUT EDITS.
3//
4// Set-up that's shared across all tests in this directory. In principle, this
5// config could be moved to lit.local.cfg. However, there are downstream users that
6//  do not use these LIT config files. Hence why this is kept inline.
7//
8// DEFINE: %{sparsifier_opts} = enable-runtime-library=true
9// DEFINE: %{sparsifier_opts_sve} = enable-arm-sve=true %{sparsifier_opts}
10// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
11// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
12// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
13// DEFINE: %{run_libs_sve} = -shared-libs=%native_mlir_runner_utils,%native_mlir_c_runner_utils
14// DEFINE: %{run_opts} = -e main -entry-point-result=void
15// DEFINE: %{run} = mlir-runner %{run_opts} %{run_libs}
16// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs_sve}
17//
18// DEFINE: %{env} =
19//--------------------------------------------------------------------------------------------------
20
21// REDEFINE: %{env} = TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx"
22// RUN: %{compile} | env %{env} %{run} | FileCheck %s
23//
24// Do the same run, but now with direct IR generation.
25// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false
26// RUN: %{compile} | env %{env} %{run} | FileCheck %s
27//
28// Do the same run, but now with direct IR generation and vectorization.
29// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true
30// RUN: %{compile} | env %{env} %{run} | FileCheck %s
31//
32// Do the same run, but now with direct IR generation and VLA vectorization.
33// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
34
35!Filename = !llvm.ptr
36
37#DenseMatrix = #sparse_tensor.encoding<{
38  map = (d0, d1) -> (d0 : dense, d1 : dense)
39}>
40
41#SparseMatrix = #sparse_tensor.encoding<{
42  map = (d0, d1) -> (d0 : dense, d1 : compressed),
43}>
44
45#trait_assign = {
46  indexing_maps = [
47    affine_map<(i,j) -> (i,j)>, // A
48    affine_map<(i,j) -> (i,j)>  // X (out)
49  ],
50  iterator_types = ["parallel", "parallel"],
51  doc = "X(i,j) = A(i,j) * 2"
52}
53
54//
55// Integration test that demonstrates assigning a sparse tensor
56// to an all-dense annotated "sparse" tensor, which effectively
57// result in inserting the nonzero elements into a linearized array.
58//
59// Note that there is a subtle difference between a non-annotated
60// tensor and an all-dense annotated tensor. Both tensors are assumed
61// dense, but the former remains an n-dimensional memref whereas the
62// latter is linearized into a one-dimensional memref that is further
63// lowered into a storage scheme that is backed by the runtime support
64// library.
65module {
66  //
67  // A kernel that assigns multiplied elements from A to X.
68  //
69  func.func @dense_output(%arga: tensor<?x?xf64, #SparseMatrix>) -> tensor<?x?xf64, #DenseMatrix> {
70    %c0 = arith.constant 0 : index
71    %c1 = arith.constant 1 : index
72    %c2 = arith.constant 2.0 : f64
73    %d0 = tensor.dim %arga, %c0 : tensor<?x?xf64, #SparseMatrix>
74    %d1 = tensor.dim %arga, %c1 : tensor<?x?xf64, #SparseMatrix>
75    %init = tensor.empty(%d0, %d1) : tensor<?x?xf64, #DenseMatrix>
76    %0 = linalg.generic #trait_assign
77       ins(%arga: tensor<?x?xf64, #SparseMatrix>)
78      outs(%init: tensor<?x?xf64, #DenseMatrix>) {
79      ^bb(%a: f64, %x: f64):
80        %0 = arith.mulf %a, %c2 : f64
81        linalg.yield %0 : f64
82    } -> tensor<?x?xf64, #DenseMatrix>
83    return %0 : tensor<?x?xf64, #DenseMatrix>
84  }
85
86  func.func private @getTensorFilename(index) -> (!Filename)
87
88  //
89  // Main driver that reads matrix from file and calls the kernel.
90  //
91  func.func @main() {
92    %d0 = arith.constant 0.0 : f64
93    %c0 = arith.constant 0 : index
94    %c1 = arith.constant 1 : index
95
96    // Read the sparse matrix from file, construct sparse storage.
97    %fileName = call @getTensorFilename(%c0) : (index) -> (!Filename)
98    %a = sparse_tensor.new %fileName
99      : !Filename to tensor<?x?xf64, #SparseMatrix>
100
101    // Call the kernel.
102    %0 = call @dense_output(%a)
103      : (tensor<?x?xf64, #SparseMatrix>) -> tensor<?x?xf64, #DenseMatrix>
104
105    //
106    // Print the linearized 5x5 result for verification.
107    //
108    // CHECK:      ---- Sparse Tensor ----
109    // CHECK-NEXT: nse = 25
110    // CHECK-NEXT: dim = ( 5, 5 )
111    // CHECK-NEXT: lvl = ( 5, 5 )
112    // CHECK-NEXT: values : ( 2, 0, 0, 2.8, 0, 0, 4, 0, 0, 5, 0, 0, 6, 0, 0, 8.2, 0, 0, 8, 0, 0, 10.4, 0, 0, 10 )
113    // CHECK-NEXT: ----
114    //
115    sparse_tensor.print %0 : tensor<?x?xf64, #DenseMatrix>
116
117    // Release the resources.
118    bufferization.dealloc_tensor %a : tensor<?x?xf64, #SparseMatrix>
119    bufferization.dealloc_tensor %0 : tensor<?x?xf64, #DenseMatrix>
120
121    return
122  }
123}
124