1// RUN: mlir-opt %s --sparse-tensor-codegen --canonicalize --cse | FileCheck %s 2 3#SparseVector64 = #sparse_tensor.encoding<{ 4 map = (d0) -> (d0 : compressed), 5 posWidth = 64, 6 crdWidth = 64 7}> 8 9#SparseVector32 = #sparse_tensor.encoding<{ 10 map = (d0) -> (d0 : compressed), 11 posWidth = 32, 12 crdWidth = 32 13}> 14 15 16// CHECK-LABEL: func.func @sparse_convert( 17// CHECK-SAME: %[[VAL_0:.*0]]: memref<?xi64>, 18// CHECK-SAME: %[[VAL_1:.*1]]: memref<?xi64>, 19// CHECK-SAME: %[[VAL_2:.*2]]: memref<?xf32>, 20// CHECK-SAME: %[[VAL_3:.*3]]: !sparse_tensor.storage_specifier 21// CHECK: %[[VAL_4:.*]] = arith.constant 1 : index 22// CHECK: %[[VAL_5:.*]] = arith.constant 0 : index 23// CHECK: %[[VAL_6:.*]] = memref.dim %[[VAL_0]], %[[VAL_5]] : memref<?xi64> 24// CHECK: %[[VAL_7:.*]] = memref.alloc(%[[VAL_6]]) : memref<?xi32> 25// CHECK: scf.for %[[VAL_8:.*]] = %[[VAL_5]] to %[[VAL_6]] step %[[VAL_4]] { 26// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_0]]{{\[}}%[[VAL_8]]] : memref<?xi64> 27// CHECK: %[[VAL_10:.*]] = arith.trunci %[[VAL_9]] : i64 to i32 28// CHECK: memref.store %[[VAL_10]], %[[VAL_7]]{{\[}}%[[VAL_8]]] : memref<?xi32> 29// CHECK: } 30// CHECK: %[[VAL_11:.*]] = memref.dim %[[VAL_1]], %[[VAL_5]] : memref<?xi64> 31// CHECK: %[[VAL_12:.*]] = memref.alloc(%[[VAL_11]]) : memref<?xi32> 32// CHECK: scf.for %[[VAL_13:.*]] = %[[VAL_5]] to %[[VAL_11]] step %[[VAL_4]] { 33// CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_1]]{{\[}}%[[VAL_13]]] : memref<?xi64> 34// CHECK: %[[VAL_15:.*]] = arith.trunci %[[VAL_14]] : i64 to i32 35// CHECK: memref.store %[[VAL_15]], %[[VAL_12]]{{\[}}%[[VAL_13]]] : memref<?xi32> 36// CHECK: } 37// CHECK: %[[VAL_16:.*]] = memref.dim %[[VAL_2]], %[[VAL_5]] : memref<?xf32> 38// CHECK: %[[VAL_17:.*]] = memref.alloc(%[[VAL_16]]) : memref<?xf32> 39// CHECK: memref.copy %[[VAL_2]], %[[VAL_17]] : memref<?xf32> to memref<?xf32> 40// CHECK: return %[[VAL_7]], %[[VAL_12]], %[[VAL_17]], %[[VAL_3]] : memref<?xi32>, memref<?xi32>, memref<?xf32>, !sparse_tensor.storage_specifier 41// CHECK: } 42func.func @sparse_convert(%arg0: tensor<?xf32, #SparseVector64>) -> tensor<?xf32, #SparseVector32> { 43 %0 = sparse_tensor.convert %arg0 : tensor<?xf32, #SparseVector64> to tensor<?xf32, #SparseVector32> 44 return %0 : tensor<?xf32, #SparseVector32> 45} 46 47// CHECK-LABEL: func.func @sparse_convert_value( 48// CHECK-SAME: %[[VAL_0:.*0]]: memref<?xi32>, 49// CHECK-SAME: %[[VAL_1:.*1]]: memref<?xi32>, 50// CHECK-SAME: %[[VAL_2:.*2]]: memref<?xf32>, 51// CHECK-SAME: %[[VAL_3:.*]]: !sparse_tensor.storage_specifier 52// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index 53// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index 54// CHECK: %[[VAL_6:.*]] = memref.dim %[[VAL_0]], %[[VAL_5]] : memref<?xi32> 55// CHECK: %[[VAL_7:.*]] = memref.alloc(%[[VAL_6]]) : memref<?xi32> 56// CHECK: memref.copy %[[VAL_0]], %[[VAL_7]] : memref<?xi32> to memref<?xi32> 57// CHECK: %[[VAL_8:.*]] = memref.dim %[[VAL_1]], %[[VAL_5]] : memref<?xi32> 58// CHECK: %[[VAL_9:.*]] = memref.alloc(%[[VAL_8]]) : memref<?xi32> 59// CHECK: memref.copy %[[VAL_1]], %[[VAL_9]] : memref<?xi32> to memref<?xi32> 60// CHECK: %[[VAL_10:.*]] = memref.dim %[[VAL_2]], %[[VAL_5]] : memref<?xf32> 61// CHECK: %[[VAL_11:.*]] = memref.alloc(%[[VAL_10]]) : memref<?xf64> 62// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_5]] to %[[VAL_10]] step %[[VAL_4]] { 63// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_2]]{{\[}}%[[VAL_12]]] : memref<?xf32> 64// CHECK: %[[VAL_14:.*]] = arith.extf %[[VAL_13]] : f32 to f64 65// CHECK: memref.store %[[VAL_14]], %[[VAL_11]]{{\[}}%[[VAL_12]]] : memref<?xf64> 66// CHECK: } 67// CHECK: return %[[VAL_7]], %[[VAL_9]], %[[VAL_11]], %[[VAL_3]] : memref<?xi32>, memref<?xi32>, memref<?xf64>, !sparse_tensor.storage_specifier 68// CHECK: } 69func.func @sparse_convert_value(%arg0: tensor<?xf32, #SparseVector32>) -> tensor<?xf64, #SparseVector32> { 70 %0 = sparse_tensor.convert %arg0 : tensor<?xf32, #SparseVector32> to tensor<?xf64, #SparseVector32> 71 return %0 : tensor<?xf64, #SparseVector32> 72} 73