xref: /llvm-project/mlir/test/Dialect/SparseTensor/dense.mlir (revision ced2fc7819d5ddea616ec330f18e08ff284c1868)
1// NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
2// RUN: mlir-opt %s --sparse-reinterpret-map -sparsification | FileCheck %s
3
4// Test to demonstrate the difference between non-annotated dense tensors
5// and all-dense-annotated "sparse" tensors. The former class remains as
6// two-dimensional tensors that are bufferized by subsequent passes. The
7// latter class is linearized into one-dimensional buffers that are backed
8// by the runtime support library.
9
10#DenseMatrix = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : dense) }>
11
12#trait_2d = {
13  indexing_maps = [
14    affine_map<(i,j) -> (i,j)>,  // A
15    affine_map<(i,j) -> (i,j)>   // X (out)
16  ],
17  iterator_types = ["parallel", "parallel"],
18  doc = "X(i,j) = A(i,j) + 1"
19}
20
21#trait_3d = {
22  indexing_maps = [
23    affine_map<(i,j,k) -> (i,j,k)>,  // A
24    affine_map<(i,j,k) -> (i,j)>     // X (out)
25  ],
26  iterator_types = ["parallel", "parallel", "reduction"],
27  doc = "X(i,j) += A(i,j,k)"
28}
29
30//
31// Test with an all-dense-annotated "sparse" matrix as input and
32// a non-annotated dense matrix as output.
33//
34// CHECK-LABEL:   func @dense1(
35// CHECK-SAME:                 %[[VAL_0:.*]]: tensor<32x16xf32, #sparse{{[0-9]*}}>,
36// CHECK-SAME:                 %[[VAL_1:.*]]: tensor<32x16xf32>) -> tensor<32x16xf32> {
37// CHECK-DAG:       %[[VAL_2:.*]] = arith.constant 1.000000e+00 : f32
38// CHECK-DAG:       %[[VAL_3:.*]] = arith.constant 32 : index
39// CHECK-DAG:       %[[VAL_4:.*]] = arith.constant 16 : index
40// CHECK-DAG:       %[[VAL_5:.*]] = arith.constant 0 : index
41// CHECK-DAG:       %[[VAL_6:.*]] = arith.constant 1 : index
42// CHECK:           %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
43// CHECK:           %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32x16xf32> to memref<32x16xf32>
44// CHECK:           scf.for %[[VAL_9:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
45// CHECK:             %[[VAL_11:.*]] = arith.muli %[[VAL_9]], %[[VAL_4]] : index
46// CHECK:             scf.for %[[VAL_10:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] {
47// CHECK:               %[[VAL_12:.*]] = arith.addi %[[VAL_10]], %[[VAL_11]] : index
48// CHECK:               %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_12]]] : memref<?xf32>
49// CHECK:               %[[VAL_14:.*]] = arith.addf %[[VAL_13]], %[[VAL_2]] : f32
50// CHECK:               memref.store %[[VAL_14]], %[[VAL_8]]{{\[}}%[[VAL_9]], %[[VAL_10]]] : memref<32x16xf32>
51// CHECK:             }
52// CHECK:           }
53// CHECK:           %[[VAL_15:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32x16xf32>
54// CHECK:           return %[[VAL_15]] : tensor<32x16xf32>
55// CHECK:         }
56func.func @dense1(%arga: tensor<32x16xf32, #DenseMatrix>,
57                  %argx: tensor<32x16xf32>)
58	     -> tensor<32x16xf32> {
59  %c = arith.constant 1.0 : f32
60  %0 = linalg.generic #trait_2d
61     ins(%arga: tensor<32x16xf32, #DenseMatrix>)
62    outs(%argx: tensor<32x16xf32>) {
63      ^bb(%a: f32, %x: f32):
64        %1 = arith.addf %a, %c : f32
65        linalg.yield %1 : f32
66  } -> tensor<32x16xf32>
67  return %0 : tensor<32x16xf32>
68}
69
70//
71// Test with a non-annotated dense matrix as input and
72// an all-dense annotated "sparse" matrix as output.
73//
74// CHECK-LABEL:   func @dense2(
75// CHECK-SAME:      %[[VAL_0:.*]]: tensor<32x16xf32>,
76// CHECK-SAME:      %[[VAL_1:.*]]: tensor<32x16xf32, #sparse{{[0-9]*}}>) -> tensor<32x16xf32, #sparse{{[0-9]*}}> {
77// CHECK-DAG:       %[[VAL_2:.*]] = arith.constant 1.000000e+00 : f32
78// CHECK-DAG:       %[[VAL_3:.*]] = arith.constant 32 : index
79// CHECK-DAG:       %[[VAL_4:.*]] = arith.constant 16 : index
80// CHECK-DAG:       %[[VAL_5:.*]] = arith.constant 0 : index
81// CHECK-DAG:       %[[VAL_6:.*]] = arith.constant 1 : index
82// CHECK:           %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<32x16xf32> to memref<32x16xf32>
83// CHECK:           %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
84// CHECK:           scf.for %[[VAL_9:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
85// CHECK:             %[[VAL_11:.*]] = arith.muli %[[VAL_9]], %[[VAL_4]] : index
86// CHECK:             scf.for %[[VAL_10:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] {
87// CHECK:               %[[VAL_12:.*]] = arith.addi %[[VAL_10]], %[[VAL_11]] : index
88// CHECK:               %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_9]], %[[VAL_10]]] : memref<32x16xf32>
89// CHECK:               %[[VAL_14:.*]] = arith.addf %[[VAL_13]], %[[VAL_2]] : f32
90// CHECK:               memref.store %[[VAL_14]], %[[VAL_8]]{{\[}}%[[VAL_12]]] : memref<?xf32>
91// CHECK:             }
92// CHECK:           }
93// CHECK:           %[[VAL_15:.*]] = sparse_tensor.load %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}>
94// CHECK:           return %[[VAL_15]] : tensor<32x16xf32, #sparse{{[0-9]*}}>
95// CHECK:         }
96func.func @dense2(%arga: tensor<32x16xf32>,
97                  %argx: tensor<32x16xf32, #DenseMatrix>)
98	     -> tensor<32x16xf32, #DenseMatrix> {
99  %c = arith.constant 1.0 : f32
100  %0 = linalg.generic #trait_2d
101     ins(%arga: tensor<32x16xf32>)
102    outs(%argx: tensor<32x16xf32, #DenseMatrix>) {
103      ^bb(%a: f32, %x: f32):
104        %1 = arith.addf %a, %c : f32
105        linalg.yield %1 : f32
106  } -> tensor<32x16xf32, #DenseMatrix>
107  return %0 : tensor<32x16xf32, #DenseMatrix>
108}
109
110
111//
112// Test with a non-annotated dense matrix as input and
113// an all-dense annotated "sparse" matrix as output.
114// The missing innermost "k" index (due to a reduction) is accounted
115// for by scalarizing the reduction operation for the output tensor.
116//
117// CHECK-LABEL:   func @dense3(
118// CHECK-SAME:      %[[VAL_0:.*]]: tensor<32x16x8xf32>,
119// CHECK-SAME:      %[[VAL_1:.*]]: tensor<32x16xf32, #sparse{{[0-9]*}}>) -> tensor<32x16xf32, #sparse{{[0-9]*}}> {
120// CHECK-DAG:       %[[VAL_2:.*]] = arith.constant 8 : index
121// CHECK-DAG:       %[[VAL_3:.*]] = arith.constant 32 : index
122// CHECK-DAG:       %[[VAL_4:.*]] = arith.constant 16 : index
123// CHECK-DAG:       %[[VAL_5:.*]] = arith.constant 0 : index
124// CHECK-DAG:       %[[VAL_6:.*]] = arith.constant 1 : index
125// CHECK:           %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<32x16x8xf32> to memref<32x16x8xf32>
126// CHECK:           %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xf32>
127// CHECK:           scf.for %[[VAL_9:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
128// CHECK:             %[[VAL_11:.*]] = arith.muli %[[VAL_9]], %[[VAL_4]] : index
129// CHECK:             scf.for %[[VAL_10:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_6]] {
130// CHECK:               %[[VAL_12:.*]] = arith.addi %[[VAL_10]], %[[VAL_11]] : index
131// CHECK:               %[[VAL_13:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_12]]] : memref<?xf32>
132// CHECK:               %[[VAL_14:.*]] = scf.for %[[VAL_15:.*]] = %[[VAL_5]] to %[[VAL_2]] step %[[VAL_6]] iter_args(%[[VAL_16:.*]] = %[[VAL_13]]) -> (f32) {
133// CHECK:                 %[[VAL_17:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_9]], %[[VAL_10]], %[[VAL_15]]] : memref<32x16x8xf32>
134// CHECK:                 %[[VAL_18:.*]] = arith.addf %[[VAL_16]], %[[VAL_17]] : f32
135// CHECK:                 scf.yield %[[VAL_18]] : f32
136// CHECK:               }
137// CHECK:               memref.store %[[VAL_19:.*]], %[[VAL_8]]{{\[}}%[[VAL_12]]] : memref<?xf32>
138// CHECK:             }
139// CHECK:           }
140// CHECK:           %[[VAL_20:.*]] = sparse_tensor.load %[[VAL_1]] : tensor<32x16xf32, #sparse{{[0-9]*}}>
141// CHECK:           return %[[VAL_20]] : tensor<32x16xf32, #sparse{{[0-9]*}}>
142// CHECK:         }
143func.func @dense3(%arga: tensor<32x16x8xf32>,
144                  %argx: tensor<32x16xf32, #DenseMatrix>)
145	     -> tensor<32x16xf32, #DenseMatrix> {
146  %0 = linalg.generic #trait_3d
147     ins(%arga: tensor<32x16x8xf32>)
148    outs(%argx: tensor<32x16xf32, #DenseMatrix>) {
149      ^bb(%a: f32, %x: f32):
150        %1 = arith.addf %x, %a : f32
151        linalg.yield %1 : f32
152  } -> tensor<32x16xf32, #DenseMatrix>
153  return %0 : tensor<32x16xf32, #DenseMatrix>
154}
155