xref: /llvm-project/mlir/test/Dialect/SparseTensor/convert_sparse2sparse_element.mlir (revision dbe1be9aa4e010f8ed945e19ba93a1f927aade8e)
185dbb3fcSPeiming Liu// RUN: mlir-opt %s --sparse-tensor-codegen --canonicalize --cse | FileCheck %s
285dbb3fcSPeiming Liu
385dbb3fcSPeiming Liu#SparseVector64 = #sparse_tensor.encoding<{
4*dbe1be9aSYinying Li  map = (d0) -> (d0 : compressed),
584cd51bbSwren romano  posWidth = 64,
684cd51bbSwren romano  crdWidth = 64
785dbb3fcSPeiming Liu}>
885dbb3fcSPeiming Liu
985dbb3fcSPeiming Liu#SparseVector32 = #sparse_tensor.encoding<{
10*dbe1be9aSYinying Li  map = (d0) -> (d0 : compressed),
1184cd51bbSwren romano  posWidth = 32,
1284cd51bbSwren romano  crdWidth = 32
1385dbb3fcSPeiming Liu}>
1485dbb3fcSPeiming Liu
1585dbb3fcSPeiming Liu
1685dbb3fcSPeiming Liu// CHECK-LABEL:   func.func @sparse_convert(
1785dbb3fcSPeiming Liu// CHECK-SAME:      %[[VAL_0:.*0]]: memref<?xi64>,
1885dbb3fcSPeiming Liu// CHECK-SAME:      %[[VAL_1:.*1]]: memref<?xi64>,
1985dbb3fcSPeiming Liu// CHECK-SAME:      %[[VAL_2:.*2]]: memref<?xf32>,
2085dbb3fcSPeiming Liu// CHECK-SAME:      %[[VAL_3:.*3]]: !sparse_tensor.storage_specifier
2185dbb3fcSPeiming Liu// CHECK:           %[[VAL_4:.*]] = arith.constant 1 : index
2285dbb3fcSPeiming Liu// CHECK:           %[[VAL_5:.*]] = arith.constant 0 : index
2385dbb3fcSPeiming Liu// CHECK:           %[[VAL_6:.*]] = memref.dim %[[VAL_0]], %[[VAL_5]] : memref<?xi64>
2485dbb3fcSPeiming Liu// CHECK:           %[[VAL_7:.*]] = memref.alloc(%[[VAL_6]]) : memref<?xi32>
2585dbb3fcSPeiming Liu// CHECK:           scf.for %[[VAL_8:.*]] = %[[VAL_5]] to %[[VAL_6]] step %[[VAL_4]] {
2685dbb3fcSPeiming Liu// CHECK:             %[[VAL_9:.*]] = memref.load %[[VAL_0]]{{\[}}%[[VAL_8]]] : memref<?xi64>
2785dbb3fcSPeiming Liu// CHECK:             %[[VAL_10:.*]] = arith.trunci %[[VAL_9]] : i64 to i32
2885dbb3fcSPeiming Liu// CHECK:             memref.store %[[VAL_10]], %[[VAL_7]]{{\[}}%[[VAL_8]]] : memref<?xi32>
2985dbb3fcSPeiming Liu// CHECK:           }
3085dbb3fcSPeiming Liu// CHECK:           %[[VAL_11:.*]] = memref.dim %[[VAL_1]], %[[VAL_5]] : memref<?xi64>
3185dbb3fcSPeiming Liu// CHECK:           %[[VAL_12:.*]] = memref.alloc(%[[VAL_11]]) : memref<?xi32>
3285dbb3fcSPeiming Liu// CHECK:           scf.for %[[VAL_13:.*]] = %[[VAL_5]] to %[[VAL_11]] step %[[VAL_4]] {
3385dbb3fcSPeiming Liu// CHECK:             %[[VAL_14:.*]] = memref.load %[[VAL_1]]{{\[}}%[[VAL_13]]] : memref<?xi64>
3485dbb3fcSPeiming Liu// CHECK:             %[[VAL_15:.*]] = arith.trunci %[[VAL_14]] : i64 to i32
3585dbb3fcSPeiming Liu// CHECK:             memref.store %[[VAL_15]], %[[VAL_12]]{{\[}}%[[VAL_13]]] : memref<?xi32>
3685dbb3fcSPeiming Liu// CHECK:           }
3785dbb3fcSPeiming Liu// CHECK:           %[[VAL_16:.*]] = memref.dim %[[VAL_2]], %[[VAL_5]] : memref<?xf32>
3885dbb3fcSPeiming Liu// CHECK:           %[[VAL_17:.*]] = memref.alloc(%[[VAL_16]]) : memref<?xf32>
3985dbb3fcSPeiming Liu// CHECK:           memref.copy %[[VAL_2]], %[[VAL_17]] : memref<?xf32> to memref<?xf32>
4085dbb3fcSPeiming Liu// CHECK:           return %[[VAL_7]], %[[VAL_12]], %[[VAL_17]], %[[VAL_3]] : memref<?xi32>, memref<?xi32>, memref<?xf32>, !sparse_tensor.storage_specifier
4185dbb3fcSPeiming Liu// CHECK:         }
4285dbb3fcSPeiming Liufunc.func @sparse_convert(%arg0: tensor<?xf32, #SparseVector64>) -> tensor<?xf32, #SparseVector32> {
4385dbb3fcSPeiming Liu  %0 = sparse_tensor.convert %arg0 : tensor<?xf32, #SparseVector64> to tensor<?xf32, #SparseVector32>
4485dbb3fcSPeiming Liu  return %0 : tensor<?xf32, #SparseVector32>
4585dbb3fcSPeiming Liu}
4685dbb3fcSPeiming Liu
4785dbb3fcSPeiming Liu// CHECK-LABEL:   func.func @sparse_convert_value(
4885dbb3fcSPeiming Liu// CHECK-SAME:      %[[VAL_0:.*0]]: memref<?xi32>,
4985dbb3fcSPeiming Liu// CHECK-SAME:      %[[VAL_1:.*1]]: memref<?xi32>,
5085dbb3fcSPeiming Liu// CHECK-SAME:      %[[VAL_2:.*2]]: memref<?xf32>,
5185dbb3fcSPeiming Liu// CHECK-SAME:      %[[VAL_3:.*]]: !sparse_tensor.storage_specifier
5285dbb3fcSPeiming Liu// CHECK-DAG:       %[[VAL_4:.*]] = arith.constant 1 : index
5385dbb3fcSPeiming Liu// CHECK-DAG:       %[[VAL_5:.*]] = arith.constant 0 : index
5485dbb3fcSPeiming Liu// CHECK:           %[[VAL_6:.*]] = memref.dim %[[VAL_0]], %[[VAL_5]] : memref<?xi32>
5585dbb3fcSPeiming Liu// CHECK:           %[[VAL_7:.*]] = memref.alloc(%[[VAL_6]]) : memref<?xi32>
5685dbb3fcSPeiming Liu// CHECK:           memref.copy %[[VAL_0]], %[[VAL_7]] : memref<?xi32> to memref<?xi32>
5785dbb3fcSPeiming Liu// CHECK:           %[[VAL_8:.*]] = memref.dim %[[VAL_1]], %[[VAL_5]] : memref<?xi32>
5885dbb3fcSPeiming Liu// CHECK:           %[[VAL_9:.*]] = memref.alloc(%[[VAL_8]]) : memref<?xi32>
5985dbb3fcSPeiming Liu// CHECK:           memref.copy %[[VAL_1]], %[[VAL_9]] : memref<?xi32> to memref<?xi32>
6085dbb3fcSPeiming Liu// CHECK:           %[[VAL_10:.*]] = memref.dim %[[VAL_2]], %[[VAL_5]] : memref<?xf32>
6185dbb3fcSPeiming Liu// CHECK:           %[[VAL_11:.*]] = memref.alloc(%[[VAL_10]]) : memref<?xf64>
6285dbb3fcSPeiming Liu// CHECK:           scf.for %[[VAL_12:.*]] = %[[VAL_5]] to %[[VAL_10]] step %[[VAL_4]] {
6385dbb3fcSPeiming Liu// CHECK:             %[[VAL_13:.*]] = memref.load %[[VAL_2]]{{\[}}%[[VAL_12]]] : memref<?xf32>
6485dbb3fcSPeiming Liu// CHECK:             %[[VAL_14:.*]] = arith.extf %[[VAL_13]] : f32 to f64
6585dbb3fcSPeiming Liu// CHECK:             memref.store %[[VAL_14]], %[[VAL_11]]{{\[}}%[[VAL_12]]] : memref<?xf64>
6685dbb3fcSPeiming Liu// CHECK:           }
6785dbb3fcSPeiming Liu// CHECK:           return %[[VAL_7]], %[[VAL_9]], %[[VAL_11]], %[[VAL_3]] : memref<?xi32>, memref<?xi32>, memref<?xf64>, !sparse_tensor.storage_specifier
6885dbb3fcSPeiming Liu// CHECK:         }
6985dbb3fcSPeiming Liufunc.func @sparse_convert_value(%arg0: tensor<?xf32, #SparseVector32>) -> tensor<?xf64, #SparseVector32> {
7085dbb3fcSPeiming Liu  %0 = sparse_tensor.convert %arg0 : tensor<?xf32, #SparseVector32> to tensor<?xf64, #SparseVector32>
7185dbb3fcSPeiming Liu  return %0 : tensor<?xf64, #SparseVector32>
7285dbb3fcSPeiming Liu}
73