xref: /llvm-project/mlir/test/Dialect/SparseTensor/fold.mlir (revision c5a67e16b6117d0c37d004dd5467b56be006ad8f)
1066d786cSAart Bik// RUN: mlir-opt %s  --canonicalize --cse | FileCheck %s
2066d786cSAart Bik
3dbe1be9aSYinying Li#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
4066d786cSAart Bik
50128f801Sbixia1// CHECK-LABEL: func @sparse_nop_dense2dense_convert(
60128f801Sbixia1//  CHECK-SAME: %[[A:.*]]: tensor<64xf32>)
70128f801Sbixia1//   CHECK-NOT: sparse_tensor.convert
80128f801Sbixia1//       CHECK: return %[[A]] : tensor<64xf32>
90128f801Sbixia1func.func @sparse_nop_dense2dense_convert(%arg0: tensor<64xf32>) -> tensor<64xf32> {
100128f801Sbixia1  %0 = sparse_tensor.convert %arg0 : tensor<64xf32> to tensor<64xf32>
110128f801Sbixia1  return %0 : tensor<64xf32>
120128f801Sbixia1}
130128f801Sbixia1
14066d786cSAart Bik// CHECK-LABEL: func @sparse_dce_convert(
15066d786cSAart Bik//  CHECK-SAME: %[[A:.*]]: tensor<64xf32>)
16066d786cSAart Bik//   CHECK-NOT: sparse_tensor.convert
17066d786cSAart Bik//       CHECK: return
18fb35cd3bSRiver Riddlefunc.func @sparse_dce_convert(%arg0: tensor<64xf32>) {
19066d786cSAart Bik  %0 = sparse_tensor.convert %arg0 : tensor<64xf32> to tensor<64xf32, #SparseVector>
20066d786cSAart Bik  return
21066d786cSAart Bik}
22d4e16171SAart Bik
23d4e16171SAart Bik// CHECK-LABEL: func @sparse_dce_getters(
24*c5a67e16SYinying Li//  CHECK-SAME: %[[A:.*]]: tensor<64xf32, #sparse{{[0-9]*}}>)
2584cd51bbSwren romano//   CHECK-NOT: sparse_tensor.positions
2684cd51bbSwren romano//   CHECK-NOT: sparse_tensor.coordinates
27d4e16171SAart Bik//   CHECK-NOT: sparse_tensor.values
28d4e16171SAart Bik//       CHECK: return
29fb35cd3bSRiver Riddlefunc.func @sparse_dce_getters(%arg0: tensor<64xf32, #SparseVector>) {
3084cd51bbSwren romano  %0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<64xf32, #SparseVector> to memref<?xindex>
3184cd51bbSwren romano  %1 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<64xf32, #SparseVector> to memref<?xindex>
32d4e16171SAart Bik  %2 = sparse_tensor.values %arg0 : tensor<64xf32, #SparseVector> to memref<?xf32>
33d4e16171SAart Bik  return
34d4e16171SAart Bik}
35509974afSPeiming Liu
361ab2b007SPeiming Liu// CHECK-LABEL: func @sparse_concat_dce(
371ab2b007SPeiming Liu//   CHECK-NOT: sparse_tensor.concatenate
381ab2b007SPeiming Liu//       CHECK: return
391ab2b007SPeiming Liufunc.func @sparse_concat_dce(%arg0: tensor<2xf64, #SparseVector>,
401ab2b007SPeiming Liu                             %arg1: tensor<3xf64, #SparseVector>,
411ab2b007SPeiming Liu                             %arg2: tensor<4xf64, #SparseVector>) {
421ab2b007SPeiming Liu  %0 = sparse_tensor.concatenate %arg0, %arg1, %arg2 {dimension = 0 : index}
431ab2b007SPeiming Liu       : tensor<2xf64, #SparseVector>,
441ab2b007SPeiming Liu         tensor<3xf64, #SparseVector>,
451ab2b007SPeiming Liu         tensor<4xf64, #SparseVector> to tensor<9xf64, #SparseVector>
461ab2b007SPeiming Liu  return
471ab2b007SPeiming Liu}
481ab2b007SPeiming Liu
49509974afSPeiming Liu// CHECK-LABEL: func @sparse_get_specifier_dce_fold(
50509974afSPeiming Liu//  CHECK-SAME:  %[[A0:.*]]: !sparse_tensor.storage_specifier
5144ff23d5SPeiming Liu//  CHECK-SAME:  %[[A1:.*]]: index,
5244ff23d5SPeiming Liu//  CHECK-SAME:  %[[A2:.*]]: index)
53509974afSPeiming Liu//   CHECK-NOT:  sparse_tensor.storage_specifier.set
54509974afSPeiming Liu//   CHECK-NOT:  sparse_tensor.storage_specifier.get
55509974afSPeiming Liu//       CHECK:  return %[[A1]]
5644ff23d5SPeiming Liufunc.func @sparse_get_specifier_dce_fold(%arg0: !sparse_tensor.storage_specifier<#SparseVector>, %arg1: index, %arg2: index) -> index {
5784cd51bbSwren romano  %0 = sparse_tensor.storage_specifier.set %arg0 lvl_sz at 0 with %arg1
5844ff23d5SPeiming Liu       : !sparse_tensor.storage_specifier<#SparseVector>
5984cd51bbSwren romano  %1 = sparse_tensor.storage_specifier.set %0 pos_mem_sz at 0 with %arg2
6044ff23d5SPeiming Liu       : !sparse_tensor.storage_specifier<#SparseVector>
6184cd51bbSwren romano  %2 = sparse_tensor.storage_specifier.get %1 lvl_sz at 0
6244ff23d5SPeiming Liu       : !sparse_tensor.storage_specifier<#SparseVector>
6344ff23d5SPeiming Liu  return %2 : index
64509974afSPeiming Liu}
650aacc213SPeiming Liu
660aacc213SPeiming Liu
670aacc213SPeiming Liu
680aacc213SPeiming Liu#COO = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)}>
690aacc213SPeiming Liu
700aacc213SPeiming Liu// CHECK-LABEL: func @sparse_reorder_coo(
71*c5a67e16SYinying Li//  CHECK-SAME: %[[A:.*]]: tensor<?x?xf32, #sparse{{[0-9]*}}>
720aacc213SPeiming Liu//   CHECK-NOT: %[[R:.*]] = sparse_tensor.reorder_coo
730aacc213SPeiming Liu//       CHECK: return %[[A]]
740aacc213SPeiming Liufunc.func @sparse_reorder_coo(%arg0 : tensor<?x?xf32, #COO>) -> tensor<?x?xf32, #COO> {
750aacc213SPeiming Liu  %ret = sparse_tensor.reorder_coo quick_sort %arg0 : tensor<?x?xf32, #COO> to tensor<?x?xf32, #COO>
760aacc213SPeiming Liu  return %ret : tensor<?x?xf32, #COO>
770aacc213SPeiming Liu}
78ff21a90eSPeiming Liu
79ff21a90eSPeiming Liu
80ff21a90eSPeiming Liu#BSR = #sparse_tensor.encoding<{
81ff21a90eSPeiming Liu  map = ( i, j ) ->
82ff21a90eSPeiming Liu  ( i floordiv 2 : dense,
83ff21a90eSPeiming Liu    j floordiv 3 : compressed,
84ff21a90eSPeiming Liu    i mod 2      : dense,
85ff21a90eSPeiming Liu    j mod 3      : dense
86ff21a90eSPeiming Liu  )
87ff21a90eSPeiming Liu}>
88ff21a90eSPeiming Liu
89ff21a90eSPeiming Liu// CHECK-LABEL: func @sparse_crd_translate(
90ff21a90eSPeiming Liu//   CHECK-NOT:   sparse_tensor.crd_translate
91ff21a90eSPeiming Liufunc.func @sparse_crd_translate(%arg0: index, %arg1: index) -> (index, index) {
92ff21a90eSPeiming Liu  %l0, %l1, %l2, %l3 = sparse_tensor.crd_translate dim_to_lvl [%arg0, %arg1] as #BSR : index, index, index, index
93ff21a90eSPeiming Liu  %d0, %d1 = sparse_tensor.crd_translate lvl_to_dim [%l0, %l1, %l2, %l3] as #BSR : index, index
94ff21a90eSPeiming Liu  return  %d0, %d1 : index, index
95ff21a90eSPeiming Liu}
96f0f5fdf7SPeiming Liu
97f0f5fdf7SPeiming Liu// CHECK-LABEL:   func.func @sparse_lvl_0(
98f0f5fdf7SPeiming Liu// CHECK:           %[[C5:.*]] = arith.constant 5 : index
99f0f5fdf7SPeiming Liu// CHECK:           return %[[C5]] : index
100f0f5fdf7SPeiming Liufunc.func @sparse_lvl_0(%t : tensor<10x?xi32, #BSR>) -> index {
101f0f5fdf7SPeiming Liu  %lvl = arith.constant 0 : index
102f0f5fdf7SPeiming Liu  %l0 = sparse_tensor.lvl %t, %lvl : tensor<10x?xi32, #BSR>
103f0f5fdf7SPeiming Liu  return  %l0 : index
104f0f5fdf7SPeiming Liu}
105f0f5fdf7SPeiming Liu
106f0f5fdf7SPeiming Liu// CHECK-LABEL:   func.func @sparse_lvl_3(
107f0f5fdf7SPeiming Liu// CHECK:           %[[C3:.*]] = arith.constant 3 : index
108f0f5fdf7SPeiming Liu// CHECK:           return %[[C3]] : index
109f0f5fdf7SPeiming Liufunc.func @sparse_lvl_3(%t : tensor<?x?xi32, #BSR>) -> index {
110f0f5fdf7SPeiming Liu  %lvl = arith.constant 3 : index
111f0f5fdf7SPeiming Liu  %l0 = sparse_tensor.lvl %t, %lvl : tensor<?x?xi32, #BSR>
112f0f5fdf7SPeiming Liu  return  %l0 : index
113f0f5fdf7SPeiming Liu}
114d808d922SPeiming Liu
115d808d922SPeiming Liu#DSDD = #sparse_tensor.encoding<{
116d808d922SPeiming Liu  map = (i, j, k, l) -> (i: dense, j: compressed, k: dense, l: dense)
117d808d922SPeiming Liu}>
118d808d922SPeiming Liu
119d808d922SPeiming Liu
120d808d922SPeiming Liu// CHECK-LABEL:   func.func @sparse_reinterpret_map(
121d808d922SPeiming Liu// CHECK-NOT: sparse_tensor.reinterpret_map
122d808d922SPeiming Liufunc.func @sparse_reinterpret_map(%t0 : tensor<6x12xi32, #BSR>) -> tensor<6x12xi32, #BSR> {
123d808d922SPeiming Liu  %t1 = sparse_tensor.reinterpret_map %t0 : tensor<6x12xi32, #BSR>
124d808d922SPeiming Liu                                         to tensor<3x4x2x3xi32, #DSDD>
125d808d922SPeiming Liu  %t2 = sparse_tensor.reinterpret_map %t1 : tensor<3x4x2x3xi32, #DSDD>
126d808d922SPeiming Liu                                         to tensor<6x12xi32, #BSR>
127d808d922SPeiming Liu  return %t2 : tensor<6x12xi32, #BSR>
128d808d922SPeiming Liu}
129