xref: /llvm-project/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_block3d.mlir (revision eb206e9ea84eff0a0596fed2de8316d924f946d1)
19cd41289SAart Bik//--------------------------------------------------------------------------------------------------
29cd41289SAart Bik// WHEN CREATING A NEW TEST, PLEASE JUST COPY & PASTE WITHOUT EDITS.
39cd41289SAart Bik//
49cd41289SAart Bik// Set-up that's shared across all tests in this directory. In principle, this
59cd41289SAart Bik// config could be moved to lit.local.cfg. However, there are downstream users that
69cd41289SAart Bik//  do not use these LIT config files. Hence why this is kept inline.
79cd41289SAart Bik//
89cd41289SAart Bik// DEFINE: %{sparsifier_opts} = enable-runtime-library=true
99cd41289SAart Bik// DEFINE: %{sparsifier_opts_sve} = enable-arm-sve=true %{sparsifier_opts}
109cd41289SAart Bik// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
119cd41289SAart Bik// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
129cd41289SAart Bik// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
13fe55c34dSZhaoshi Zheng// DEFINE: %{run_libs_sve} = -shared-libs=%native_mlir_runner_utils,%native_mlir_c_runner_utils
149cd41289SAart Bik// DEFINE: %{run_opts} = -e main -entry-point-result=void
15*eb206e9eSAndrea Faulds// DEFINE: %{run} = mlir-runner %{run_opts} %{run_libs}
16fe55c34dSZhaoshi Zheng// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs_sve}
179cd41289SAart Bik//
189cd41289SAart Bik// DEFINE: %{env} =
199cd41289SAart Bik//--------------------------------------------------------------------------------------------------
209cd41289SAart Bik
219cd41289SAart Bik// RUN: %{compile} | %{run} | FileCheck %s
229cd41289SAart Bik//
239cd41289SAart Bik// Do the same run, but now with direct IR generation.
249cd41289SAart Bik// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false enable-buffer-initialization=true
259cd41289SAart Bik// RUN: %{compile} | %{run} | FileCheck %s
269cd41289SAart Bik//
279cd41289SAart Bik// Do the same run, but now with direct IR generation and vectorization.
289cd41289SAart Bik// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true
299cd41289SAart Bik// RUN: %{compile} | %{run} | FileCheck %s
309cd41289SAart Bik//
319cd41289SAart Bik// Do the same run, but now with direct IR generation and VLA vectorization.
329cd41289SAart Bik// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
339cd41289SAart Bik
349f3334e9SMatthias Springer// Test that test-bufferization-analysis-only works. This option is useful
359f3334e9SMatthias Springer// for understanding why buffer copies were inserted.
369f3334e9SMatthias Springer// RUN: mlir-opt %s --sparsifier="test-bufferization-analysis-only" -o /dev/null
379f3334e9SMatthias Springer
389cd41289SAart Bik#Sparse1 = #sparse_tensor.encoding<{
399cd41289SAart Bik  map = (i, j, k) -> (
409cd41289SAart Bik    j : compressed,
41575568deSAart Bik    k : compressed,
42575568deSAart Bik    i : dense
439cd41289SAart Bik  )
449cd41289SAart Bik}>
459cd41289SAart Bik
469cd41289SAart Bik#Sparse2 = #sparse_tensor.encoding<{
479cd41289SAart Bik  map = (i, j, k) -> (
489cd41289SAart Bik    i floordiv 2 : compressed,
499cd41289SAart Bik    j floordiv 2 : compressed,
509cd41289SAart Bik    k floordiv 2 : compressed,
519cd41289SAart Bik    i mod 2 : dense,
529cd41289SAart Bik    j mod 2 : dense,
539cd41289SAart Bik    k mod 2 : dense)
549cd41289SAart Bik}>
559cd41289SAart Bik
569cd41289SAart Bikmodule {
579cd41289SAart Bik
589cd41289SAart Bik  //
599cd41289SAart Bik  // Main driver that tests sparse tensor storage.
609cd41289SAart Bik  //
619cd41289SAart Bik  func.func @main() {
629cd41289SAart Bik    %c0 = arith.constant 0 : index
639cd41289SAart Bik    %i0 = arith.constant 0 : i32
649cd41289SAart Bik
659cd41289SAart Bik    // Setup input dense tensor and convert to two sparse tensors.
669cd41289SAart Bik    %d = arith.constant dense <[
679cd41289SAart Bik       [ // i=0
689cd41289SAart Bik         [ 1, 0, 0, 0 ],
699cd41289SAart Bik         [ 0, 0, 0, 0 ],
709cd41289SAart Bik         [ 0, 0, 0, 0 ],
719cd41289SAart Bik         [ 0, 0, 5, 0 ] ],
729cd41289SAart Bik       [ // i=1
739cd41289SAart Bik         [ 2, 0, 0, 0 ],
749cd41289SAart Bik         [ 0, 0, 0, 0 ],
759cd41289SAart Bik         [ 0, 0, 0, 0 ],
769cd41289SAart Bik         [ 0, 0, 6, 0 ] ],
779cd41289SAart Bik       [ //i=2
789cd41289SAart Bik         [ 3, 0, 0, 0 ],
799cd41289SAart Bik         [ 0, 0, 0, 0 ],
809cd41289SAart Bik         [ 0, 0, 0, 0 ],
819cd41289SAart Bik         [ 0, 0, 7, 0 ] ],
829cd41289SAart Bik	 //i=3
839cd41289SAart Bik       [ [ 4, 0, 0, 0 ],
849cd41289SAart Bik         [ 0, 0, 0, 0 ],
859cd41289SAart Bik         [ 0, 0, 0, 0 ],
869cd41289SAart Bik         [ 0, 0, 8, 0 ] ]
879cd41289SAart Bik    ]> : tensor<4x4x4xi32>
889cd41289SAart Bik
899cd41289SAart Bik    %a = sparse_tensor.convert %d : tensor<4x4x4xi32> to tensor<4x4x4xi32, #Sparse1>
909cd41289SAart Bik    %b = sparse_tensor.convert %d : tensor<4x4x4xi32> to tensor<4x4x4xi32, #Sparse2>
919cd41289SAart Bik
929cd41289SAart Bik    //
939cd41289SAart Bik    // If we store the two "fibers" [1,2,3,4] starting at index (0,0,0) and
949cd41289SAart Bik    // ending at index (3,0,0) and [5,6,7,8] starting at index (0,3,2) and
959cd41289SAart Bik    // ending at index (3,3,2)) with a “DCSR-flavored” along (j,k) with
969cd41289SAart Bik    // dense “fibers” in the i-dim, we end up with 8 stored entries.
979cd41289SAart Bik    //
9883c9244aSYinying Li    // CHECK:      ---- Sparse Tensor ----
9983c9244aSYinying Li    // CHECK-NEXT: nse = 8
10083c9244aSYinying Li    // CHECK-NEXT: dim = ( 4, 4, 4 )
10183c9244aSYinying Li    // CHECK-NEXT: lvl = ( 4, 4, 4 )
102eb177803SYinying Li    // CHECK-NEXT: pos[0] : ( 0, 2 )
103eb177803SYinying Li    // CHECK-NEXT: crd[0] : ( 0, 3 )
104eb177803SYinying Li    // CHECK-NEXT: pos[1] : ( 0, 1, 2 )
105eb177803SYinying Li    // CHECK-NEXT: crd[1] : ( 0, 2 )
106eb177803SYinying Li    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8 )
10783c9244aSYinying Li    // CHECK-NEXT: ----
1089cd41289SAart Bik    //
10983c9244aSYinying Li    sparse_tensor.print %a : tensor<4x4x4xi32, #Sparse1>
1109cd41289SAart Bik
1119cd41289SAart Bik    //
1129cd41289SAart Bik    // If we store full 2x2x2 3-D blocks in the original index order
1139cd41289SAart Bik    // in a compressed fashion, we end up with 4 blocks to incorporate
1149cd41289SAart Bik    // all the nonzeros, and thus 32 stored entries.
1159cd41289SAart Bik    //
11683c9244aSYinying Li    // CHECK:      ---- Sparse Tensor ----
11783c9244aSYinying Li    // CHECK-NEXT: nse = 32
11883c9244aSYinying Li    // CHECK-NEXT: dim = ( 4, 4, 4 )
11983c9244aSYinying Li    // CHECK-NEXT: lvl = ( 2, 2, 2, 2, 2, 2 )
120eb177803SYinying Li    // CHECK-NEXT: pos[0] : ( 0, 2 )
121eb177803SYinying Li    // CHECK-NEXT: crd[0] : ( 0, 1 )
122eb177803SYinying Li    // CHECK-NEXT: pos[1] : ( 0, 2, 4 )
123eb177803SYinying Li    // CHECK-NEXT: crd[1] : ( 0, 1, 0, 1 )
124eb177803SYinying Li    // CHECK-NEXT: pos[2] : ( 0, 1, 2, 3, 4 )
125eb177803SYinying Li    // CHECK-NEXT: crd[2] : ( 0, 1, 0, 1 )
126eb177803SYinying Li    // CHECK-NEXT: values : ( 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 5, 0, 0, 0, 6, 0, 3, 0, 0, 0, 4, 0, 0, 0, 0, 0, 7, 0, 0, 0, 8, 0 )
12783c9244aSYinying Li    // CHECK-NEXT: ----
1289cd41289SAart Bik    //
12983c9244aSYinying Li    sparse_tensor.print %b : tensor<4x4x4xi32, #Sparse2>
1309cd41289SAart Bik
1319cd41289SAart Bik    // Release the resources.
1329cd41289SAart Bik    bufferization.dealloc_tensor %a : tensor<4x4x4xi32, #Sparse1>
1339cd41289SAart Bik    bufferization.dealloc_tensor %b : tensor<4x4x4xi32, #Sparse2>
1349cd41289SAart Bik
1359cd41289SAart Bik    return
1369cd41289SAart Bik  }
1379cd41289SAart Bik}
138