xref: /llvm-project/mlir/test/Dialect/SparseTensor/rewriting_for_codegen.mlir (revision c5a67e16b6117d0c37d004dd5467b56be006ad8f)
1// RUN: mlir-opt %s --lower-sparse-ops-to-foreach="enable-runtime-library=false enable-convert=false" \
2// RUN: --lower-sparse-foreach-to-scf | FileCheck %s
3
4#CSR = #sparse_tensor.encoding<{
5  map = (d0, d1) -> (d0 : dense, d1 : compressed)
6}>
7
8#CSC = #sparse_tensor.encoding<{
9  map = (d0, d1) -> (d1 : dense, d0 : compressed)
10}>
11
12#COO = #sparse_tensor.encoding<{
13  map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
14}>
15
16// CHECK-LABEL:   func.func @sparse_new(
17// CHECK-SAME:    %[[A:.*]]: !llvm.ptr) -> tensor<?x?xf32, #sparse{{[0-9]*}}> {
18// CHECK:         %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr to tensor<?x?xf32, #sparse{{[0-9]*}}>
19// CHECK:         %[[R:.*]] = sparse_tensor.convert %[[COO]]
20// CHECK:         bufferization.dealloc_tensor %[[COO]]
21// CHECK:         return %[[R]]
22func.func @sparse_new(%arg0: !llvm.ptr) -> tensor<?x?xf32, #CSR> {
23  %0 = sparse_tensor.new %arg0 : !llvm.ptr to tensor<?x?xf32, #CSR>
24  return %0 : tensor<?x?xf32, #CSR>
25}
26
27// CHECK-LABEL:   func.func @sparse_new_csc(
28// CHECK-SAME:    %[[A:.*]]: !llvm.ptr) -> tensor<?x?xf32, #sparse{{[0-9]*}}> {
29// CHECK:         %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr to tensor<?x?xf32, #sparse{{[0-9]*}}>
30// CHECK:         %[[R:.*]] = sparse_tensor.convert %[[COO]]
31// CHECK:         bufferization.dealloc_tensor %[[COO]]
32// CHECK:         return %[[R]]
33func.func @sparse_new_csc(%arg0: !llvm.ptr) -> tensor<?x?xf32, #CSC> {
34  %0 = sparse_tensor.new %arg0 : !llvm.ptr to tensor<?x?xf32, #CSC>
35  return %0 : tensor<?x?xf32, #CSC>
36}
37
38// CHECK-LABEL:   func.func @sparse_new_coo(
39// CHECK-SAME:    %[[A:.*]]: !llvm.ptr) -> tensor<?x?xf32, #sparse{{[0-9]*}}> {
40// CHECK:         %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr to tensor<?x?xf32, #sparse{{[0-9]*}}>
41// CHECK:         return %[[COO]]
42func.func @sparse_new_coo(%arg0: !llvm.ptr) -> tensor<?x?xf32, #COO> {
43  %0 = sparse_tensor.new %arg0 : !llvm.ptr to tensor<?x?xf32, #COO>
44  return %0 : tensor<?x?xf32, #COO>
45}
46
47// CHECK-LABEL:   func.func @sparse_out(
48// CHECK-SAME:    %[[A:.*]]: tensor<10x20xf32, #sparse{{[0-9]*}}>,
49// CHECK-SAME:    %[[B:.*]]: !llvm.ptr) {
50// CHECK-DAG:     %[[C0:.*]] = arith.constant 0 : index
51// CHECK-DAG:     %[[C1:.*]] = arith.constant 1 : index
52// CHECK-DAG:     %[[C2:.*]] = arith.constant 2 : index
53// CHECK-DAG:     %[[C10:.*]] = arith.constant 10 : index
54// CHECK-DAG:     %[[C20:.*]] = arith.constant 20 : index
55// CHECK:         %[[NNZ:.*]] = sparse_tensor.number_of_entries %[[A]]
56// CHECK:         %[[DS:.*]] = memref.alloca(%[[C2]]) : memref<?xindex>
57// CHECK:         memref.store %[[C10]], %[[DS]]{{\[}}%[[C0]]] : memref<?xindex>
58// CHECK:         memref.store %[[C20]], %[[DS]]{{\[}}%[[C1]]] : memref<?xindex>
59// CHECK:         %[[W:.*]] = call @createSparseTensorWriter(%[[B]])
60// CHECK:         call @outSparseTensorWriterMetaData(%[[W]], %[[C2]], %[[NNZ]], %[[DS]])
61// CHECK:         %[[V:.*]] = memref.alloca() : memref<f32>
62// CHECK:         scf.for  %{{.*}} = %[[C0]] to %[[C10]] step %[[C1]] {
63// CHECK:           scf.for  {{.*}} {
64// CHECK:             func.call @outSparseTensorWriterNextF32(%[[W]], %[[C2]], %[[DS]], %[[V]])
65// CHECK:           }
66// CHECK:         }
67// CHECK:         call @delSparseTensorWriter(%[[W]])
68// CHECK:         return
69// CHECK:         }
70func.func @sparse_out( %arg0: tensor<10x20xf32, #CSR>, %arg1: !llvm.ptr) -> () {
71  sparse_tensor.out %arg0, %arg1 : tensor<10x20xf32, #CSR>, !llvm.ptr
72  return
73}
74