1// RUN: mlir-opt %s --sparse-tensor-codegen=enable-buffer-initialization=true --canonicalize --cse | FileCheck %s 2 3#SV = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> 4 5// CHECK-LABEL: func.func @sparse_alloc_sparse_vector( 6// CHECK-SAME: %[[VAL_0:.*]]: index) -> (memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier 7// CHECK: %[[VAL_1:.*]] = arith.constant 1 : index 8// CHECK: %[[VAL_2:.*]] = arith.constant 0.000000e+00 : f64 9// CHECK: %[[VAL_3:.*]] = arith.constant 0 : index 10// CHECK: %[[VAL_4:.*]] = memref.alloc() : memref<16xindex> 11// CHECK: %[[VAL_5:.*]] = memref.cast %[[VAL_4]] : memref<16xindex> to memref<?xindex> 12// CHECK: linalg.fill ins(%[[VAL_3]] : index) outs(%[[VAL_4]] : memref<16xindex>) 13// CHECK: %[[VAL_6:.*]] = memref.alloc() : memref<16xindex> 14// CHECK: %[[VAL_7:.*]] = memref.cast %[[VAL_6]] : memref<16xindex> to memref<?xindex> 15// CHECK: linalg.fill ins(%[[VAL_3]] : index) outs(%[[VAL_6]] : memref<16xindex>) 16// CHECK: %[[VAL_8:.*]] = memref.alloc() : memref<16xf64> 17// CHECK: %[[VAL_9:.*]] = memref.cast %[[VAL_8]] : memref<16xf64> to memref<?xf64> 18// CHECK: linalg.fill ins(%[[VAL_2]] : f64) outs(%[[VAL_8]] : memref<16xf64>) 19// CHECK: %[[VAL_10:.*]] = sparse_tensor.storage_specifier.init : !sparse_tensor.storage_specifier 20// CHECK: %[[VAL_11:.*]] = arith.index_cast %[[VAL_0]] : index to i64 21// CHECK: %[[VAL_12:.*]] = sparse_tensor.storage_specifier.set %[[VAL_10]] dim_sz at 0 with %[[VAL_11]] : i64, !sparse_tensor.storage_specifier 22// CHECK: %[[VAL_13:.*]] = sparse_tensor.storage_specifier.get %[[VAL_12]] ptr_mem_sz at 0 : !sparse_tensor.storage_specifier 23// CHECK: %[[VAL_14:.*]] = arith.index_cast %[[VAL_13]] : i64 to index 24// CHECK: %[[VAL_15:.*]], %[[VAL_16:.*]] = sparse_tensor.push_back %[[VAL_14]], %[[VAL_5]], %[[VAL_3]] : index, memref<?xindex>, index 25// CHECK: %[[VAL_17:.*]] = arith.index_cast %[[VAL_16]] : index to i64 26// CHECK: %[[VAL_18:.*]] = sparse_tensor.storage_specifier.set %[[VAL_12]] ptr_mem_sz at 0 with %[[VAL_17]] : i64, !sparse_tensor.storage_specifier 27// CHECK: %[[VAL_19:.*]], %[[VAL_20:.*]] = sparse_tensor.push_back %[[VAL_16]], %[[VAL_15]], %[[VAL_3]], %[[VAL_1]] : index, memref<?xindex>, index, index 28// CHECK: %[[VAL_21:.*]] = arith.index_cast %[[VAL_20]] : index to i64 29// CHECK: %[[VAL_22:.*]] = sparse_tensor.storage_specifier.set %[[VAL_18]] ptr_mem_sz at 0 with %[[VAL_21]] : i64, !sparse_tensor.storage_specifier 30// CHECK: return %[[VAL_19]], %[[VAL_7]], %[[VAL_9]], %[[VAL_22]] : memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier 31func.func @sparse_alloc_sparse_vector(%arg0: index) -> tensor<?xf64, #SV> { 32 %0 = bufferization.alloc_tensor(%arg0) : tensor<?xf64, #SV> 33 %1 = sparse_tensor.load %0 : tensor<?xf64, #SV> 34 return %1 : tensor<?xf64, #SV> 35} 36