xref: /llvm-project/mlir/test/Dialect/SparseTensor/unused-tensor.mlir (revision ced2fc7819d5ddea616ec330f18e08ff284c1868)
1// RUN: mlir-opt %s --sparse-reinterpret-map -sparsification | FileCheck %s
2
3//
4// A contrived example where the sparse tensor B is only
5// used in the linalg op to determine the number of iterations
6// for the k-loop. This is included to make sure the sparse
7// compiler still generates the correct loop nest for this case.
8//
9
10#SM = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>
11
12#trait = {
13  indexing_maps = [
14    affine_map<(i,j,k) -> (i,j)>,  // A
15    affine_map<(i,j,k) -> (k,j)>,  // B
16    affine_map<(i,j,k) -> (i,j)>   // S_out
17  ],
18  iterator_types = ["parallel", "parallel", "reduction"],
19  doc = "C(i,j) = SUM_k A(i,j)"
20}
21
22// CHECK-LABEL:   func.func @b_ununsed(
23// CHECK-SAME:      %[[VAL_0:.*]]: tensor<2x4xf64>,
24// CHECK-SAME:      %[[VAL_1:.*]]: tensor<8x4xf64, #sparse{{[0-9]*}}>,
25// CHECK-SAME:      %[[VAL_2:.*]]: tensor<2x4xf64>) -> tensor<2x4xf64> {
26// CHECK-DAG:       %[[VAL_3:.*]] = arith.constant 8 : index
27// CHECK-DAG:       %[[VAL_4:.*]] = arith.constant 2 : index
28// CHECK-DAG:       %[[VAL_5:.*]] = arith.constant 4 : index
29// CHECK-DAG:       %[[VAL_6:.*]] = arith.constant 0 : index
30// CHECK-DAG:       %[[VAL_7:.*]] = arith.constant 1 : index
31// CHECK-DAG:       %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<2x4xf64>
32// CHECK-DAG:       %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<2x4xf64>
33// CHECK:           scf.for %[[VAL_10:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_7]] {
34// CHECK:             scf.for %[[VAL_11:.*]] = %[[VAL_6]] to %[[VAL_3]] step %[[VAL_7]] {
35// CHECK:               scf.for %[[VAL_12:.*]] = %[[VAL_6]] to %[[VAL_5]] step %[[VAL_7]] {
36// CHECK:                 %[[VAL_13:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_10]], %[[VAL_12]]] : memref<2x4xf64>
37// CHECK:                 %[[VAL_14:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_10]], %[[VAL_12]]] : memref<2x4xf64>
38// CHECK:                 %[[VAL_15:.*]] = arith.addf %[[VAL_13]], %[[VAL_14]] : f64
39// CHECK:                 memref.store %[[VAL_15]], %[[VAL_9]]{{\[}}%[[VAL_10]], %[[VAL_12]]] : memref<2x4xf64>
40// CHECK:               }
41// CHECK:             }
42// CHECK:           }
43// CHECK:           %[[VAL_16:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<2x4xf64>
44// CHECK:           return %[[VAL_16]] : tensor<2x4xf64>
45// CHECK:         }
46func.func @b_ununsed(%argA: tensor<2x4xf64>,
47                     %argB: tensor<8x4xf64, #SM>,
48                     %argC: tensor<2x4xf64>) -> tensor<2x4xf64> {
49  %result = linalg.generic #trait
50    ins(%argA, %argB: tensor<2x4xf64>, tensor<8x4xf64, #SM>)
51    outs(%argC: tensor<2x4xf64>) {
52      ^bb(%a: f64, %b: f64, %c: f64):
53         %0 = arith.addf %c, %a : f64
54         linalg.yield %0 : f64
55  } -> tensor<2x4xf64>
56  return %result : tensor<2x4xf64>
57}
58