xref: /llvm-project/mlir/test/Dialect/SparseTensor/fold.mlir (revision c5a67e16b6117d0c37d004dd5467b56be006ad8f)
1// RUN: mlir-opt %s  --canonicalize --cse | FileCheck %s
2
3#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
4
5// CHECK-LABEL: func @sparse_nop_dense2dense_convert(
6//  CHECK-SAME: %[[A:.*]]: tensor<64xf32>)
7//   CHECK-NOT: sparse_tensor.convert
8//       CHECK: return %[[A]] : tensor<64xf32>
9func.func @sparse_nop_dense2dense_convert(%arg0: tensor<64xf32>) -> tensor<64xf32> {
10  %0 = sparse_tensor.convert %arg0 : tensor<64xf32> to tensor<64xf32>
11  return %0 : tensor<64xf32>
12}
13
14// CHECK-LABEL: func @sparse_dce_convert(
15//  CHECK-SAME: %[[A:.*]]: tensor<64xf32>)
16//   CHECK-NOT: sparse_tensor.convert
17//       CHECK: return
18func.func @sparse_dce_convert(%arg0: tensor<64xf32>) {
19  %0 = sparse_tensor.convert %arg0 : tensor<64xf32> to tensor<64xf32, #SparseVector>
20  return
21}
22
23// CHECK-LABEL: func @sparse_dce_getters(
24//  CHECK-SAME: %[[A:.*]]: tensor<64xf32, #sparse{{[0-9]*}}>)
25//   CHECK-NOT: sparse_tensor.positions
26//   CHECK-NOT: sparse_tensor.coordinates
27//   CHECK-NOT: sparse_tensor.values
28//       CHECK: return
29func.func @sparse_dce_getters(%arg0: tensor<64xf32, #SparseVector>) {
30  %0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<64xf32, #SparseVector> to memref<?xindex>
31  %1 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<64xf32, #SparseVector> to memref<?xindex>
32  %2 = sparse_tensor.values %arg0 : tensor<64xf32, #SparseVector> to memref<?xf32>
33  return
34}
35
36// CHECK-LABEL: func @sparse_concat_dce(
37//   CHECK-NOT: sparse_tensor.concatenate
38//       CHECK: return
39func.func @sparse_concat_dce(%arg0: tensor<2xf64, #SparseVector>,
40                             %arg1: tensor<3xf64, #SparseVector>,
41                             %arg2: tensor<4xf64, #SparseVector>) {
42  %0 = sparse_tensor.concatenate %arg0, %arg1, %arg2 {dimension = 0 : index}
43       : tensor<2xf64, #SparseVector>,
44         tensor<3xf64, #SparseVector>,
45         tensor<4xf64, #SparseVector> to tensor<9xf64, #SparseVector>
46  return
47}
48
49// CHECK-LABEL: func @sparse_get_specifier_dce_fold(
50//  CHECK-SAME:  %[[A0:.*]]: !sparse_tensor.storage_specifier
51//  CHECK-SAME:  %[[A1:.*]]: index,
52//  CHECK-SAME:  %[[A2:.*]]: index)
53//   CHECK-NOT:  sparse_tensor.storage_specifier.set
54//   CHECK-NOT:  sparse_tensor.storage_specifier.get
55//       CHECK:  return %[[A1]]
56func.func @sparse_get_specifier_dce_fold(%arg0: !sparse_tensor.storage_specifier<#SparseVector>, %arg1: index, %arg2: index) -> index {
57  %0 = sparse_tensor.storage_specifier.set %arg0 lvl_sz at 0 with %arg1
58       : !sparse_tensor.storage_specifier<#SparseVector>
59  %1 = sparse_tensor.storage_specifier.set %0 pos_mem_sz at 0 with %arg2
60       : !sparse_tensor.storage_specifier<#SparseVector>
61  %2 = sparse_tensor.storage_specifier.get %1 lvl_sz at 0
62       : !sparse_tensor.storage_specifier<#SparseVector>
63  return %2 : index
64}
65
66
67
68#COO = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)}>
69
70// CHECK-LABEL: func @sparse_reorder_coo(
71//  CHECK-SAME: %[[A:.*]]: tensor<?x?xf32, #sparse{{[0-9]*}}>
72//   CHECK-NOT: %[[R:.*]] = sparse_tensor.reorder_coo
73//       CHECK: return %[[A]]
74func.func @sparse_reorder_coo(%arg0 : tensor<?x?xf32, #COO>) -> tensor<?x?xf32, #COO> {
75  %ret = sparse_tensor.reorder_coo quick_sort %arg0 : tensor<?x?xf32, #COO> to tensor<?x?xf32, #COO>
76  return %ret : tensor<?x?xf32, #COO>
77}
78
79
80#BSR = #sparse_tensor.encoding<{
81  map = ( i, j ) ->
82  ( i floordiv 2 : dense,
83    j floordiv 3 : compressed,
84    i mod 2      : dense,
85    j mod 3      : dense
86  )
87}>
88
89// CHECK-LABEL: func @sparse_crd_translate(
90//   CHECK-NOT:   sparse_tensor.crd_translate
91func.func @sparse_crd_translate(%arg0: index, %arg1: index) -> (index, index) {
92  %l0, %l1, %l2, %l3 = sparse_tensor.crd_translate dim_to_lvl [%arg0, %arg1] as #BSR : index, index, index, index
93  %d0, %d1 = sparse_tensor.crd_translate lvl_to_dim [%l0, %l1, %l2, %l3] as #BSR : index, index
94  return  %d0, %d1 : index, index
95}
96
97// CHECK-LABEL:   func.func @sparse_lvl_0(
98// CHECK:           %[[C5:.*]] = arith.constant 5 : index
99// CHECK:           return %[[C5]] : index
100func.func @sparse_lvl_0(%t : tensor<10x?xi32, #BSR>) -> index {
101  %lvl = arith.constant 0 : index
102  %l0 = sparse_tensor.lvl %t, %lvl : tensor<10x?xi32, #BSR>
103  return  %l0 : index
104}
105
106// CHECK-LABEL:   func.func @sparse_lvl_3(
107// CHECK:           %[[C3:.*]] = arith.constant 3 : index
108// CHECK:           return %[[C3]] : index
109func.func @sparse_lvl_3(%t : tensor<?x?xi32, #BSR>) -> index {
110  %lvl = arith.constant 3 : index
111  %l0 = sparse_tensor.lvl %t, %lvl : tensor<?x?xi32, #BSR>
112  return  %l0 : index
113}
114
115#DSDD = #sparse_tensor.encoding<{
116  map = (i, j, k, l) -> (i: dense, j: compressed, k: dense, l: dense)
117}>
118
119
120// CHECK-LABEL:   func.func @sparse_reinterpret_map(
121// CHECK-NOT: sparse_tensor.reinterpret_map
122func.func @sparse_reinterpret_map(%t0 : tensor<6x12xi32, #BSR>) -> tensor<6x12xi32, #BSR> {
123  %t1 = sparse_tensor.reinterpret_map %t0 : tensor<6x12xi32, #BSR>
124                                         to tensor<3x4x2x3xi32, #DSDD>
125  %t2 = sparse_tensor.reinterpret_map %t1 : tensor<3x4x2x3xi32, #DSDD>
126                                         to tensor<6x12xi32, #BSR>
127  return %t2 : tensor<6x12xi32, #BSR>
128}
129