xref: /llvm-project/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir (revision eb206e9ea84eff0a0596fed2de8316d924f946d1)
123e5130eSAndrzej Warzynski//--------------------------------------------------------------------------------------------------
223e5130eSAndrzej Warzynski// WHEN CREATING A NEW TEST, PLEASE JUST COPY & PASTE WITHOUT EDITS.
35a1f87f9SAart Bik//
423e5130eSAndrzej Warzynski// Set-up that's shared across all tests in this directory. In principle, this
523e5130eSAndrzej Warzynski// config could be moved to lit.local.cfg. However, there are downstream users that
623e5130eSAndrzej Warzynski//  do not use these LIT config files. Hence why this is kept inline.
723e5130eSAndrzej Warzynski//
8dce7a7cfSTim Harvey// DEFINE: %{sparsifier_opts} = enable-runtime-library=true
9dce7a7cfSTim Harvey// DEFINE: %{sparsifier_opts_sve} = enable-arm-sve=true %{sparsifier_opts}
10dce7a7cfSTim Harvey// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
11dce7a7cfSTim Harvey// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
1223e5130eSAndrzej Warzynski// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
13fe55c34dSZhaoshi Zheng// DEFINE: %{run_libs_sve} = -shared-libs=%native_mlir_runner_utils,%native_mlir_c_runner_utils
146e692e72SYinying Li// DEFINE: %{run_opts} = -e main -entry-point-result=void
15*eb206e9eSAndrea Faulds// DEFINE: %{run} = mlir-runner %{run_opts} %{run_libs}
16fe55c34dSZhaoshi Zheng// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs_sve}
1723e5130eSAndrzej Warzynski//
1823e5130eSAndrzej Warzynski// DEFINE: %{env} =
1923e5130eSAndrzej Warzynski//--------------------------------------------------------------------------------------------------
2023e5130eSAndrzej Warzynski
2123e5130eSAndrzej Warzynski// RUN: %{compile} | %{run} | FileCheck %s
226116ca67SAnlun Xu//
236116ca67SAnlun Xu// Do the same run, but now with direct IR generation.
24dce7a7cfSTim Harvey// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false
2523e5130eSAndrzej Warzynski// RUN: %{compile} | %{run} | FileCheck %s
266116ca67SAnlun Xu//
276116ca67SAnlun Xu// Do the same run, but now with direct IR generation and vectorization.
28dce7a7cfSTim Harvey// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true
2923e5130eSAndrzej Warzynski// RUN: %{compile} | %{run} | FileCheck %s
3023e5130eSAndrzej Warzynski//
3123e5130eSAndrzej Warzynski// Do the same run, but now with direct IR generation and VLA vectorization.
3223e5130eSAndrzej Warzynski// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
336116ca67SAnlun Xu
346116ca67SAnlun Xu#SparseVector = #sparse_tensor.encoding<{
35dbe1be9aSYinying Li  map = (d0) -> (d0 : compressed)
366116ca67SAnlun Xu}>
376116ca67SAnlun Xu
386116ca67SAnlun Xu#SparseMatrix = #sparse_tensor.encoding<{
392a07f0fdSYinying Li  map = (d0, d1) -> (d0 : compressed, d1 : compressed)
406116ca67SAnlun Xu}>
416116ca67SAnlun Xu
426116ca67SAnlun Xu#Sparse3dTensor = #sparse_tensor.encoding<{
433dc62112SYinying Li  map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed)
446116ca67SAnlun Xu}>
456116ca67SAnlun Xu
466116ca67SAnlun Xumodule {
476116ca67SAnlun Xu
486116ca67SAnlun Xu  func.func @reshape0(%arg0: tensor<3x4xf64, #SparseMatrix>) -> tensor<2x6xf64, #SparseMatrix> {
496116ca67SAnlun Xu    %shape = arith.constant dense <[ 2, 6 ]> : tensor<2xi32>
506116ca67SAnlun Xu    %0 = tensor.reshape %arg0(%shape) : (tensor<3x4xf64, #SparseMatrix>, tensor<2xi32>) -> tensor<2x6xf64, #SparseMatrix>
516116ca67SAnlun Xu    return %0 : tensor<2x6xf64, #SparseMatrix>
526116ca67SAnlun Xu  }
536116ca67SAnlun Xu
546116ca67SAnlun Xu  func.func @reshape1(%arg0: tensor<3x4xf64, #SparseMatrix>) -> tensor<12xf64, #SparseVector> {
556116ca67SAnlun Xu    %shape = arith.constant dense <[ 12 ]> : tensor<1xi32>
566116ca67SAnlun Xu    %0 = tensor.reshape %arg0(%shape) : (tensor<3x4xf64, #SparseMatrix>, tensor<1xi32>) -> tensor<12xf64, #SparseVector>
576116ca67SAnlun Xu    return %0 : tensor<12xf64, #SparseVector>
586116ca67SAnlun Xu  }
596116ca67SAnlun Xu
606116ca67SAnlun Xu  func.func @reshape2(%arg0: tensor<3x4xf64, #SparseMatrix>) -> tensor<2x3x2xf64, #Sparse3dTensor> {
616116ca67SAnlun Xu    %shape = arith.constant dense <[ 2, 3, 2 ]> : tensor<3xi32>
626116ca67SAnlun Xu    %0 = tensor.reshape %arg0(%shape) : (tensor<3x4xf64, #SparseMatrix>, tensor<3xi32>) -> tensor<2x3x2xf64, #Sparse3dTensor>
636116ca67SAnlun Xu    return %0 : tensor<2x3x2xf64, #Sparse3dTensor>
646116ca67SAnlun Xu  }
656116ca67SAnlun Xu
666116ca67SAnlun Xu
676e692e72SYinying Li  func.func @main() {
686116ca67SAnlun Xu    %m = arith.constant dense <[ [ 1.1,  0.0,  1.3,  0.0 ],
696116ca67SAnlun Xu                                 [ 2.1,  0.0,  2.3,  0.0 ],
706116ca67SAnlun Xu                                 [ 3.1,  0.0,  3.3,  0.0 ]]> : tensor<3x4xf64>
716116ca67SAnlun Xu    %sm = sparse_tensor.convert %m : tensor<3x4xf64> to tensor<3x4xf64, #SparseMatrix>
726116ca67SAnlun Xu
736116ca67SAnlun Xu    %reshaped0 = call @reshape0(%sm) : (tensor<3x4xf64, #SparseMatrix>) -> tensor<2x6xf64, #SparseMatrix>
746116ca67SAnlun Xu    %reshaped1 = call @reshape1(%sm) : (tensor<3x4xf64, #SparseMatrix>) -> tensor<12xf64, #SparseVector>
756116ca67SAnlun Xu    %reshaped2 = call @reshape2(%sm) : (tensor<3x4xf64, #SparseMatrix>) -> tensor<2x3x2xf64, #Sparse3dTensor>
766116ca67SAnlun Xu
776116ca67SAnlun Xu    %c0 = arith.constant 0 : index
786116ca67SAnlun Xu    %df = arith.constant -1.0 : f64
796116ca67SAnlun Xu
806e692e72SYinying Li    //
816e692e72SYinying Li    // CHECK:      ---- Sparse Tensor ----
826e692e72SYinying Li    // CHECK-NEXT: nse = 6
836e692e72SYinying Li    // CHECK-NEXT: dim = ( 2, 6 )
846e692e72SYinying Li    // CHECK-NEXT: lvl = ( 2, 6 )
85eb177803SYinying Li    // CHECK-NEXT: pos[0] : ( 0, 2 )
86eb177803SYinying Li    // CHECK-NEXT: crd[0] : ( 0, 1 )
87eb177803SYinying Li    // CHECK-NEXT: pos[1] : ( 0, 3, 6 )
88eb177803SYinying Li    // CHECK-NEXT: crd[1] : ( 0, 2, 4, 0, 2, 4 )
89eb177803SYinying Li    // CHECK-NEXT: values : ( 1.1, 1.3, 2.1, 2.3, 3.1, 3.3 )
906e692e72SYinying Li    // CHECK-NEXT: ----
916e692e72SYinying Li    // CHECK:      ---- Sparse Tensor ----
926e692e72SYinying Li    // CHECK-NEXT: nse = 6
936e692e72SYinying Li    // CHECK-NEXT: dim = ( 12 )
946e692e72SYinying Li    // CHECK-NEXT: lvl = ( 12 )
95eb177803SYinying Li    // CHECK-NEXT: pos[0] : ( 0, 6 )
96eb177803SYinying Li    // CHECK-NEXT: crd[0] : ( 0, 2, 4, 6, 8, 10 )
97eb177803SYinying Li    // CHECK-NEXT: values : ( 1.1, 1.3, 2.1, 2.3, 3.1, 3.3 )
986e692e72SYinying Li    // CHECK-NEXT: ----
996e692e72SYinying Li    // CHECK:      ---- Sparse Tensor ----
1006e692e72SYinying Li    // CHECK-NEXT: nse = 6
1016e692e72SYinying Li    // CHECK-NEXT: dim = ( 2, 3, 2 )
1026e692e72SYinying Li    // CHECK-NEXT: lvl = ( 2, 3, 2 )
103eb177803SYinying Li    // CHECK-NEXT: pos[0] : ( 0, 2 )
104eb177803SYinying Li    // CHECK-NEXT: crd[0] : ( 0, 1 )
105eb177803SYinying Li    // CHECK-NEXT: pos[1] : ( 0, 3, 6 )
106eb177803SYinying Li    // CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 1, 2 )
107eb177803SYinying Li    // CHECK-NEXT: pos[2] : ( 0, 1, 2, 3, 4, 5, 6 )
108eb177803SYinying Li    // CHECK-NEXT: crd[2] : ( 0, 0, 0, 0, 0, 0 )
109eb177803SYinying Li    // CHECK-NEXT: values : ( 1.1, 1.3, 2.1, 2.3, 3.1, 3.3 )
1106e692e72SYinying Li    // CHECK-NEXT: ----
1116e692e72SYinying Li    //
1126e692e72SYinying Li    sparse_tensor.print %reshaped0: tensor<2x6xf64, #SparseMatrix>
1136e692e72SYinying Li    sparse_tensor.print %reshaped1: tensor<12xf64, #SparseVector>
1146e692e72SYinying Li    sparse_tensor.print %reshaped2: tensor<2x3x2xf64, #Sparse3dTensor>
1156116ca67SAnlun Xu
1166116ca67SAnlun Xu    bufferization.dealloc_tensor %sm : tensor<3x4xf64, #SparseMatrix>
1176116ca67SAnlun Xu    bufferization.dealloc_tensor %reshaped0 : tensor<2x6xf64, #SparseMatrix>
1186116ca67SAnlun Xu    bufferization.dealloc_tensor %reshaped1 : tensor<12xf64, #SparseVector>
1196116ca67SAnlun Xu    bufferization.dealloc_tensor %reshaped2 : tensor<2x3x2xf64, #Sparse3dTensor>
1206116ca67SAnlun Xu
1216116ca67SAnlun Xu    return
1226116ca67SAnlun Xu  }
1236116ca67SAnlun Xu
1246116ca67SAnlun Xu}
125