xref: /llvm-project/mlir/test/Dialect/SparseTensor/codegen_buffer_initialization.mlir (revision fcaf6dd597ea93eb8f746dda236d859e071346c5)
1// RUN: mlir-opt %s --sparse-tensor-codegen=enable-buffer-initialization=true  --canonicalize --cse | FileCheck %s
2
3#SV = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>
4
5// CHECK-LABEL: func @sparse_alloc_sparse_vector(
6//  CHECK-SAME: %[[A:.*]]: index) ->
7//  CHECK-SAME: memref<1xindex>, memref<3xindex>, memref<?xindex>, memref<?xindex>, memref<?xf64>
8//   CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
9//   CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
10//       CHECK: %[[T0:.*]] = memref.alloc() : memref<1xindex>
11//       CHECK: %[[T1:.*]] = memref.alloc() : memref<3xindex>
12//       CHECK: %[[T2:.*]] = memref.alloc() : memref<16xindex>
13//       CHECK: %[[T3:.*]] = memref.cast %[[T2]] : memref<16xindex> to memref<?xindex>
14//       CHECK: linalg.fill ins(%[[C0]] : index) outs(%[[T2]] : memref<16xindex>)
15//       CHECK: %[[T4:.*]] = memref.alloc() : memref<16xindex>
16//       CHECK: %[[T5:.*]] = memref.cast %[[T4]] : memref<16xindex> to memref<?xindex>
17//       CHECK: linalg.fill ins(%[[C0]] : index) outs(%[[T4]] : memref<16xindex>)
18//       CHECK: %[[T6:.*]] = memref.alloc() : memref<16xf64>
19//       CHECK: %[[T7:.*]] = memref.cast %[[T6]] : memref<16xf64> to memref<?xf64>
20//       CHECK: linalg.fill ins(%[[C0]] : index) outs(%[[T1]] : memref<3xindex>)
21//       CHECK: memref.store %[[A]], %[[T0]][%[[C0]]] : memref<1xindex>
22//       CHECK: %[[P0:.*]] = sparse_tensor.push_back %[[T1]], %[[T3]]
23//       CHECK: %[[P1:.*]] = sparse_tensor.push_back %[[T1]], %[[P0]]
24//       CHECK: return %[[T0]], %[[T1]], %[[P1]], %[[T5]], %[[T7]] :
25func.func @sparse_alloc_sparse_vector(%arg0: index) -> tensor<?xf64, #SV> {
26  %0 = bufferization.alloc_tensor(%arg0) : tensor<?xf64, #SV>
27  %1 = sparse_tensor.load %0 : tensor<?xf64, #SV>
28  return %1 : tensor<?xf64, #SV>
29}
30