xref: /llvm-project/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_binary.mlir (revision eb206e9ea84eff0a0596fed2de8316d924f946d1)
123e5130eSAndrzej Warzynski//--------------------------------------------------------------------------------------------------
223e5130eSAndrzej Warzynski// WHEN CREATING A NEW TEST, PLEASE JUST COPY & PASTE WITHOUT EDITS.
35a1f87f9SAart Bik//
423e5130eSAndrzej Warzynski// Set-up that's shared across all tests in this directory. In principle, this
523e5130eSAndrzej Warzynski// config could be moved to lit.local.cfg. However, there are downstream users that
623e5130eSAndrzej Warzynski//  do not use these LIT config files. Hence why this is kept inline.
723e5130eSAndrzej Warzynski//
8dce7a7cfSTim Harvey// DEFINE: %{sparsifier_opts} = enable-runtime-library=true
9dce7a7cfSTim Harvey// DEFINE: %{sparsifier_opts_sve} = enable-arm-sve=true %{sparsifier_opts}
10dce7a7cfSTim Harvey// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}"
11dce7a7cfSTim Harvey// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}"
1223e5130eSAndrzej Warzynski// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils
13fe55c34dSZhaoshi Zheng// DEFINE: %{run_libs_sve} = -shared-libs=%native_mlir_runner_utils,%native_mlir_c_runner_utils
1405390df4SAart Bik// DEFINE: %{run_opts} = -e main -entry-point-result=void
15*eb206e9eSAndrea Faulds// DEFINE: %{run} = mlir-runner %{run_opts} %{run_libs}
16fe55c34dSZhaoshi Zheng// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs_sve}
1723e5130eSAndrzej Warzynski//
1823e5130eSAndrzej Warzynski// DEFINE: %{env} =
1923e5130eSAndrzej Warzynski//--------------------------------------------------------------------------------------------------
2023e5130eSAndrzej Warzynski
2123e5130eSAndrzej Warzynski// RUN: %{compile} | %{run} | FileCheck %s
22b5d74f0eSbixia1//
23b5d74f0eSbixia1// Do the same run, but now with direct IR generation.
24dce7a7cfSTim Harvey// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false enable-buffer-initialization=true
2523e5130eSAndrzej Warzynski// RUN: %{compile} | %{run} | FileCheck %s
26089e1200Sbixia1//
27089e1200Sbixia1// Do the same run, but now with direct IR generation and vectorization.
28dce7a7cfSTim Harvey// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true
2923e5130eSAndrzej Warzynski// RUN: %{compile} | %{run} | FileCheck %s
3023e5130eSAndrzej Warzynski//
3123e5130eSAndrzej Warzynski// Do the same run, but now with direct IR generation and VLA vectorization.
3223e5130eSAndrzej Warzynski// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
332c332660SJim Kitchen
34dbe1be9aSYinying Li#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
352a07f0fdSYinying Li#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}>
362c332660SJim Kitchen
372c332660SJim Kitchen//
382c332660SJim Kitchen// Traits for tensor operations.
392c332660SJim Kitchen//
402c332660SJim Kitchen#trait_vec_scale = {
412c332660SJim Kitchen  indexing_maps = [
422c332660SJim Kitchen    affine_map<(i) -> (i)>,  // a (in)
432c332660SJim Kitchen    affine_map<(i) -> (i)>   // x (out)
442c332660SJim Kitchen  ],
452c332660SJim Kitchen  iterator_types = ["parallel"]
462c332660SJim Kitchen}
472c332660SJim Kitchen#trait_vec_op = {
482c332660SJim Kitchen  indexing_maps = [
492c332660SJim Kitchen    affine_map<(i) -> (i)>,  // a (in)
502c332660SJim Kitchen    affine_map<(i) -> (i)>,  // b (in)
512c332660SJim Kitchen    affine_map<(i) -> (i)>   // x (out)
522c332660SJim Kitchen  ],
532c332660SJim Kitchen  iterator_types = ["parallel"]
542c332660SJim Kitchen}
552c332660SJim Kitchen#trait_mat_op = {
562c332660SJim Kitchen  indexing_maps = [
572c332660SJim Kitchen    affine_map<(i,j) -> (i,j)>,  // A (in)
582c332660SJim Kitchen    affine_map<(i,j) -> (i,j)>,  // B (in)
592c332660SJim Kitchen    affine_map<(i,j) -> (i,j)>   // X (out)
602c332660SJim Kitchen  ],
612c332660SJim Kitchen  iterator_types = ["parallel", "parallel"],
622c332660SJim Kitchen  doc = "X(i,j) = A(i,j) OP B(i,j)"
632c332660SJim Kitchen}
642c332660SJim Kitchen
6515d1cb45SPeiming Liu//
6615d1cb45SPeiming Liu// Contains test cases for the sparse_tensor.binary operator (different cases when left/right/overlap
6715d1cb45SPeiming Liu// is empty/identity, etc).
6815d1cb45SPeiming Liu//
6915d1cb45SPeiming Liu
702c332660SJim Kitchenmodule {
712c332660SJim Kitchen  // Creates a new sparse vector using the minimum values from two input sparse vectors.
722c332660SJim Kitchen  // When there is no overlap, include the present value in the output.
7366088afbSbixia1  func.func @vector_min(%arga: tensor<?xi32, #SparseVector>,
7466088afbSbixia1                        %argb: tensor<?xi32, #SparseVector>) -> tensor<?xi32, #SparseVector> {
752c332660SJim Kitchen    %c = arith.constant 0 : index
7666088afbSbixia1    %d = tensor.dim %arga, %c : tensor<?xi32, #SparseVector>
77d2e787d5SAart Bik    %xv = tensor.empty(%d) : tensor<?xi32, #SparseVector>
782c332660SJim Kitchen    %0 = linalg.generic #trait_vec_op
7966088afbSbixia1       ins(%arga, %argb: tensor<?xi32, #SparseVector>, tensor<?xi32, #SparseVector>)
8066088afbSbixia1        outs(%xv: tensor<?xi32, #SparseVector>) {
8166088afbSbixia1        ^bb(%a: i32, %b: i32, %x: i32):
8266088afbSbixia1          %1 = sparse_tensor.binary %a, %b : i32, i32 to i32
832c332660SJim Kitchen            overlap={
8466088afbSbixia1              ^bb0(%a0: i32, %b0: i32):
8566088afbSbixia1                %2 = arith.minsi %a0, %b0: i32
8666088afbSbixia1                sparse_tensor.yield %2 : i32
872c332660SJim Kitchen            }
882c332660SJim Kitchen            left=identity
892c332660SJim Kitchen            right=identity
9066088afbSbixia1          linalg.yield %1 : i32
9166088afbSbixia1    } -> tensor<?xi32, #SparseVector>
9266088afbSbixia1    return %0 : tensor<?xi32, #SparseVector>
932c332660SJim Kitchen  }
942c332660SJim Kitchen
952c332660SJim Kitchen  // Creates a new sparse vector by multiplying a sparse vector with a dense vector.
962c332660SJim Kitchen  // When there is no overlap, leave the result empty.
97a8308020SRiver Riddle  func.func @vector_mul(%arga: tensor<?xf64, #SparseVector>,
982c332660SJim Kitchen                        %argb: tensor<?xf64>) -> tensor<?xf64, #SparseVector> {
992c332660SJim Kitchen    %c = arith.constant 0 : index
1002c332660SJim Kitchen    %d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector>
101d2e787d5SAart Bik    %xv = tensor.empty(%d) : tensor<?xf64, #SparseVector>
1022c332660SJim Kitchen    %0 = linalg.generic #trait_vec_op
1032c332660SJim Kitchen       ins(%arga, %argb: tensor<?xf64, #SparseVector>, tensor<?xf64>)
1042c332660SJim Kitchen        outs(%xv: tensor<?xf64, #SparseVector>) {
1052c332660SJim Kitchen        ^bb(%a: f64, %b: f64, %x: f64):
1062c332660SJim Kitchen          %1 = sparse_tensor.binary %a, %b : f64, f64 to f64
1072c332660SJim Kitchen            overlap={
1082c332660SJim Kitchen              ^bb0(%a0: f64, %b0: f64):
1092c332660SJim Kitchen                %ret = arith.mulf %a0, %b0 : f64
1102c332660SJim Kitchen                sparse_tensor.yield %ret : f64
1112c332660SJim Kitchen            }
1122c332660SJim Kitchen            left={}
1132c332660SJim Kitchen            right={}
1142c332660SJim Kitchen          linalg.yield %1 : f64
1152c332660SJim Kitchen    } -> tensor<?xf64, #SparseVector>
1162c332660SJim Kitchen    return %0 : tensor<?xf64, #SparseVector>
1172c332660SJim Kitchen  }
1182c332660SJim Kitchen
1192c332660SJim Kitchen  // Take a set difference of two sparse vectors. The result will include only those
1202c332660SJim Kitchen  // sparse elements present in the first, but not the second vector.
121a8308020SRiver Riddle  func.func @vector_setdiff(%arga: tensor<?xf64, #SparseVector>,
1222c332660SJim Kitchen                            %argb: tensor<?xf64, #SparseVector>) -> tensor<?xf64, #SparseVector> {
1232c332660SJim Kitchen    %c = arith.constant 0 : index
1242c332660SJim Kitchen    %d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector>
125d2e787d5SAart Bik    %xv = tensor.empty(%d) : tensor<?xf64, #SparseVector>
1262c332660SJim Kitchen    %0 = linalg.generic #trait_vec_op
1272c332660SJim Kitchen       ins(%arga, %argb: tensor<?xf64, #SparseVector>, tensor<?xf64, #SparseVector>)
1282c332660SJim Kitchen        outs(%xv: tensor<?xf64, #SparseVector>) {
1292c332660SJim Kitchen        ^bb(%a: f64, %b: f64, %x: f64):
1302c332660SJim Kitchen          %1 = sparse_tensor.binary %a, %b : f64, f64 to f64
1312c332660SJim Kitchen            overlap={}
1322c332660SJim Kitchen            left=identity
1332c332660SJim Kitchen            right={}
1342c332660SJim Kitchen          linalg.yield %1 : f64
1352c332660SJim Kitchen    } -> tensor<?xf64, #SparseVector>
1362c332660SJim Kitchen    return %0 : tensor<?xf64, #SparseVector>
1372c332660SJim Kitchen  }
1382c332660SJim Kitchen
1392c332660SJim Kitchen  // Return the index of each entry
140a8308020SRiver Riddle  func.func @vector_index(%arga: tensor<?xf64, #SparseVector>) -> tensor<?xi32, #SparseVector> {
1412c332660SJim Kitchen    %c = arith.constant 0 : index
1422c332660SJim Kitchen    %d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector>
143d2e787d5SAart Bik    %xv = tensor.empty(%d) : tensor<?xi32, #SparseVector>
1442c332660SJim Kitchen    %0 = linalg.generic #trait_vec_scale
1452c332660SJim Kitchen       ins(%arga: tensor<?xf64, #SparseVector>)
1462c332660SJim Kitchen        outs(%xv: tensor<?xi32, #SparseVector>) {
1472c332660SJim Kitchen        ^bb(%a: f64, %x: i32):
1482c332660SJim Kitchen          %idx = linalg.index 0 : index
1492c332660SJim Kitchen          %1 = sparse_tensor.binary %a, %idx : f64, index to i32
1502c332660SJim Kitchen            overlap={
1512c332660SJim Kitchen              ^bb0(%x0: f64, %i: index):
1522c332660SJim Kitchen                %ret = arith.index_cast %i : index to i32
1532c332660SJim Kitchen                sparse_tensor.yield %ret : i32
1542c332660SJim Kitchen            }
1552c332660SJim Kitchen            left={}
1562c332660SJim Kitchen            right={}
1572c332660SJim Kitchen          linalg.yield %1 : i32
1582c332660SJim Kitchen    } -> tensor<?xi32, #SparseVector>
1592c332660SJim Kitchen    return %0 : tensor<?xi32, #SparseVector>
1602c332660SJim Kitchen  }
1612c332660SJim Kitchen
1622c332660SJim Kitchen  // Adds two sparse matrices when they intersect. Where they don't intersect,
1632c332660SJim Kitchen  // negate the 2nd argument's values; ignore 1st argument-only values.
164a8308020SRiver Riddle  func.func @matrix_intersect(%arga: tensor<?x?xf64, #DCSR>,
1652c332660SJim Kitchen                              %argb: tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR> {
1662c332660SJim Kitchen    %c0 = arith.constant 0 : index
1672c332660SJim Kitchen    %c1 = arith.constant 1 : index
1682c332660SJim Kitchen    %d0 = tensor.dim %arga, %c0 : tensor<?x?xf64, #DCSR>
1692c332660SJim Kitchen    %d1 = tensor.dim %arga, %c1 : tensor<?x?xf64, #DCSR>
170d2e787d5SAart Bik    %xv = tensor.empty(%d0, %d1) : tensor<?x?xf64, #DCSR>
1712c332660SJim Kitchen    %0 = linalg.generic #trait_mat_op
1722c332660SJim Kitchen       ins(%arga, %argb: tensor<?x?xf64, #DCSR>, tensor<?x?xf64, #DCSR>)
1732c332660SJim Kitchen        outs(%xv: tensor<?x?xf64, #DCSR>) {
1742c332660SJim Kitchen        ^bb(%a: f64, %b: f64, %x: f64):
1752c332660SJim Kitchen          %1 = sparse_tensor.binary %a, %b: f64, f64 to f64
1762c332660SJim Kitchen            overlap={
1772c332660SJim Kitchen              ^bb0(%x0: f64, %y0: f64):
1782c332660SJim Kitchen                %ret = arith.addf %x0, %y0 : f64
1792c332660SJim Kitchen                sparse_tensor.yield %ret : f64
1802c332660SJim Kitchen            }
1812c332660SJim Kitchen            left={}
1822c332660SJim Kitchen            right={
1832c332660SJim Kitchen              ^bb0(%x1: f64):
1842c332660SJim Kitchen                %lret = arith.negf %x1 : f64
1852c332660SJim Kitchen                sparse_tensor.yield %lret : f64
1862c332660SJim Kitchen            }
1872c332660SJim Kitchen          linalg.yield %1 : f64
1882c332660SJim Kitchen    } -> tensor<?x?xf64, #DCSR>
1892c332660SJim Kitchen    return %0 : tensor<?x?xf64, #DCSR>
1902c332660SJim Kitchen  }
1912c332660SJim Kitchen
19215d1cb45SPeiming Liu  // Tensor addition (use semi-ring binary operation).
19315d1cb45SPeiming Liu  func.func @add_tensor_1(%A: tensor<4x4xf64, #DCSR>,
19415d1cb45SPeiming Liu                          %B: tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> {
195d2e787d5SAart Bik    %C = tensor.empty() : tensor<4x4xf64, #DCSR>
19615d1cb45SPeiming Liu    %0 = linalg.generic #trait_mat_op
19715d1cb45SPeiming Liu      ins(%A, %B: tensor<4x4xf64, #DCSR>,
19815d1cb45SPeiming Liu                  tensor<4x4xf64, #DCSR>)
19915d1cb45SPeiming Liu      outs(%C: tensor<4x4xf64, #DCSR>) {
20015d1cb45SPeiming Liu        ^bb0(%a: f64, %b: f64, %c: f64) :
20115d1cb45SPeiming Liu          %result = sparse_tensor.binary %a, %b : f64, f64 to f64
20215d1cb45SPeiming Liu            overlap={
20315d1cb45SPeiming Liu              ^bb0(%x: f64, %y: f64):
20415d1cb45SPeiming Liu                %ret = arith.addf %x, %y : f64
20515d1cb45SPeiming Liu                sparse_tensor.yield %ret : f64
20615d1cb45SPeiming Liu            }
20715d1cb45SPeiming Liu            left=identity
20815d1cb45SPeiming Liu            right=identity
20915d1cb45SPeiming Liu          linalg.yield %result : f64
21015d1cb45SPeiming Liu      } -> tensor<4x4xf64, #DCSR>
21115d1cb45SPeiming Liu    return %0 : tensor<4x4xf64, #DCSR>
21215d1cb45SPeiming Liu  }
21315d1cb45SPeiming Liu
21415d1cb45SPeiming Liu  // Same as @add_tensor_1, but use sparse_tensor.yield instead of identity to yield value.
21515d1cb45SPeiming Liu  func.func @add_tensor_2(%A: tensor<4x4xf64, #DCSR>,
21615d1cb45SPeiming Liu                          %B: tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> {
217d2e787d5SAart Bik    %C = tensor.empty() : tensor<4x4xf64, #DCSR>
21815d1cb45SPeiming Liu    %0 = linalg.generic #trait_mat_op
21915d1cb45SPeiming Liu      ins(%A, %B: tensor<4x4xf64, #DCSR>,
22015d1cb45SPeiming Liu                  tensor<4x4xf64, #DCSR>)
22115d1cb45SPeiming Liu      outs(%C: tensor<4x4xf64, #DCSR>) {
22215d1cb45SPeiming Liu        ^bb0(%a: f64, %b: f64, %c: f64) :
22315d1cb45SPeiming Liu          %result = sparse_tensor.binary %a, %b : f64, f64 to f64
22415d1cb45SPeiming Liu            overlap={
22515d1cb45SPeiming Liu              ^bb0(%x: f64, %y: f64):
22615d1cb45SPeiming Liu                %ret = arith.addf %x, %y : f64
22715d1cb45SPeiming Liu                sparse_tensor.yield %ret : f64
22815d1cb45SPeiming Liu            }
22915d1cb45SPeiming Liu            left={
23015d1cb45SPeiming Liu              ^bb0(%x: f64):
23115d1cb45SPeiming Liu                sparse_tensor.yield %x : f64
23215d1cb45SPeiming Liu            }
23315d1cb45SPeiming Liu            right={
23415d1cb45SPeiming Liu              ^bb0(%y: f64):
23515d1cb45SPeiming Liu                sparse_tensor.yield %y : f64
23615d1cb45SPeiming Liu            }
23715d1cb45SPeiming Liu          linalg.yield %result : f64
23815d1cb45SPeiming Liu      } -> tensor<4x4xf64, #DCSR>
23915d1cb45SPeiming Liu    return %0 : tensor<4x4xf64, #DCSR>
24015d1cb45SPeiming Liu  }
24115d1cb45SPeiming Liu
24215d1cb45SPeiming Liu  // Performs triangular add/sub operation (using semi-ring binary op).
24315d1cb45SPeiming Liu  func.func @triangular(%A: tensor<4x4xf64, #DCSR>,
24415d1cb45SPeiming Liu                        %B: tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> {
245d2e787d5SAart Bik    %C = tensor.empty() : tensor<4x4xf64, #DCSR>
24615d1cb45SPeiming Liu    %0 = linalg.generic #trait_mat_op
24715d1cb45SPeiming Liu      ins(%A, %B: tensor<4x4xf64, #DCSR>,
24815d1cb45SPeiming Liu                  tensor<4x4xf64, #DCSR>)
24915d1cb45SPeiming Liu      outs(%C: tensor<4x4xf64, #DCSR>) {
25015d1cb45SPeiming Liu        ^bb0(%a: f64, %b: f64, %c: f64) :
25115d1cb45SPeiming Liu          %row = linalg.index 0 : index
25215d1cb45SPeiming Liu          %col = linalg.index 1 : index
25315d1cb45SPeiming Liu          %result = sparse_tensor.binary %a, %b : f64, f64 to f64
25415d1cb45SPeiming Liu            overlap={
25515d1cb45SPeiming Liu              ^bb0(%x: f64, %y: f64):
25615d1cb45SPeiming Liu                %cmp = arith.cmpi "uge", %col, %row : index
25715d1cb45SPeiming Liu                %upperTriangleResult = arith.addf %x, %y : f64
25815d1cb45SPeiming Liu                %lowerTriangleResult = arith.subf %x, %y : f64
25915d1cb45SPeiming Liu                %ret = arith.select %cmp, %upperTriangleResult, %lowerTriangleResult : f64
26015d1cb45SPeiming Liu                sparse_tensor.yield %ret : f64
26115d1cb45SPeiming Liu            }
26215d1cb45SPeiming Liu            left=identity
26315d1cb45SPeiming Liu            right={
26415d1cb45SPeiming Liu              ^bb0(%y: f64):
26515d1cb45SPeiming Liu                %cmp = arith.cmpi "uge", %col, %row : index
26615d1cb45SPeiming Liu                %lowerTriangleResult = arith.negf %y : f64
26715d1cb45SPeiming Liu                %ret = arith.select %cmp, %y, %lowerTriangleResult : f64
26815d1cb45SPeiming Liu                sparse_tensor.yield %ret : f64
26915d1cb45SPeiming Liu            }
27015d1cb45SPeiming Liu          linalg.yield %result : f64
27115d1cb45SPeiming Liu      } -> tensor<4x4xf64, #DCSR>
27215d1cb45SPeiming Liu    return %0 : tensor<4x4xf64, #DCSR>
27315d1cb45SPeiming Liu  }
27415d1cb45SPeiming Liu
27515d1cb45SPeiming Liu  // Perform sub operation (using semi-ring binary op) with a constant threshold.
27615d1cb45SPeiming Liu  func.func @sub_with_thres(%A: tensor<4x4xf64, #DCSR>,
27715d1cb45SPeiming Liu                            %B: tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> {
278d2e787d5SAart Bik    %C = tensor.empty() : tensor<4x4xf64, #DCSR>
27915d1cb45SPeiming Liu    // Defines out-block constant bounds.
28015d1cb45SPeiming Liu    %thres_out_up = arith.constant 2.0 : f64
28115d1cb45SPeiming Liu    %thres_out_lo = arith.constant -2.0 : f64
28215d1cb45SPeiming Liu
28315d1cb45SPeiming Liu    %0 = linalg.generic #trait_mat_op
28415d1cb45SPeiming Liu      ins(%A, %B: tensor<4x4xf64, #DCSR>,
28515d1cb45SPeiming Liu                  tensor<4x4xf64, #DCSR>)
28615d1cb45SPeiming Liu      outs(%C: tensor<4x4xf64, #DCSR>) {
28715d1cb45SPeiming Liu        ^bb0(%a: f64, %b: f64, %c: f64) :
28815d1cb45SPeiming Liu          %result = sparse_tensor.binary %a, %b : f64, f64 to f64
28915d1cb45SPeiming Liu            overlap={
29015d1cb45SPeiming Liu              ^bb0(%x: f64, %y: f64):
29115d1cb45SPeiming Liu                // Defines in-block constant bounds.
29215d1cb45SPeiming Liu                %thres_up = arith.constant 1.0 : f64
29315d1cb45SPeiming Liu                %thres_lo = arith.constant -1.0 : f64
29415d1cb45SPeiming Liu                %result = arith.subf %x, %y : f64
29515d1cb45SPeiming Liu                %cmp = arith.cmpf "oge", %result, %thres_up : f64
29615d1cb45SPeiming Liu                %tmp = arith.select %cmp, %thres_up, %result : f64
29715d1cb45SPeiming Liu                %cmp1 = arith.cmpf "ole", %tmp, %thres_lo : f64
29815d1cb45SPeiming Liu                %ret = arith.select %cmp1, %thres_lo, %tmp : f64
29915d1cb45SPeiming Liu                sparse_tensor.yield %ret : f64
30015d1cb45SPeiming Liu            }
30115d1cb45SPeiming Liu            left={
30215d1cb45SPeiming Liu              ^bb0(%x: f64):
30315d1cb45SPeiming Liu                // Uses out-block constant bounds.
30415d1cb45SPeiming Liu                %cmp = arith.cmpf "oge", %x, %thres_out_up : f64
30515d1cb45SPeiming Liu                %tmp = arith.select %cmp, %thres_out_up, %x : f64
30615d1cb45SPeiming Liu                %cmp1 = arith.cmpf "ole", %tmp, %thres_out_lo : f64
30715d1cb45SPeiming Liu                %ret = arith.select %cmp1, %thres_out_lo, %tmp : f64
30815d1cb45SPeiming Liu                sparse_tensor.yield %ret : f64
30915d1cb45SPeiming Liu            }
31015d1cb45SPeiming Liu            right={
31115d1cb45SPeiming Liu              ^bb0(%y: f64):
31215d1cb45SPeiming Liu                %ny = arith.negf %y : f64
31315d1cb45SPeiming Liu                %cmp = arith.cmpf "oge", %ny, %thres_out_up : f64
31415d1cb45SPeiming Liu                %tmp = arith.select %cmp, %thres_out_up, %ny : f64
31515d1cb45SPeiming Liu                %cmp1 = arith.cmpf "ole", %tmp, %thres_out_lo : f64
31615d1cb45SPeiming Liu                %ret = arith.select %cmp1, %thres_out_lo, %tmp : f64
31715d1cb45SPeiming Liu                sparse_tensor.yield %ret : f64
31815d1cb45SPeiming Liu            }
31915d1cb45SPeiming Liu          linalg.yield %result : f64
32015d1cb45SPeiming Liu      } -> tensor<4x4xf64, #DCSR>
32115d1cb45SPeiming Liu    return %0 : tensor<4x4xf64, #DCSR>
32215d1cb45SPeiming Liu  }
32315d1cb45SPeiming Liu
32415d1cb45SPeiming Liu  // Performs isEqual only on intersecting elements.
32515d1cb45SPeiming Liu  func.func @intersect_equal(%A: tensor<4x4xf64, #DCSR>,
32615d1cb45SPeiming Liu                             %B: tensor<4x4xf64, #DCSR>) -> tensor<4x4xi8, #DCSR> {
327d2e787d5SAart Bik    %C = tensor.empty() : tensor<4x4xi8, #DCSR>
32815d1cb45SPeiming Liu    %0 = linalg.generic #trait_mat_op
32915d1cb45SPeiming Liu      ins(%A, %B: tensor<4x4xf64, #DCSR>,
33015d1cb45SPeiming Liu                  tensor<4x4xf64, #DCSR>)
33115d1cb45SPeiming Liu      outs(%C: tensor<4x4xi8, #DCSR>) {
33215d1cb45SPeiming Liu        ^bb0(%a: f64, %b: f64, %c: i8) :
33315d1cb45SPeiming Liu          %result = sparse_tensor.binary %a, %b : f64, f64 to i8
33415d1cb45SPeiming Liu            overlap={
33515d1cb45SPeiming Liu              ^bb0(%x: f64, %y: f64):
33615d1cb45SPeiming Liu                %cmp = arith.cmpf "oeq", %x, %y : f64
33715d1cb45SPeiming Liu                %ret = arith.extui %cmp : i1 to i8
33815d1cb45SPeiming Liu                sparse_tensor.yield %ret : i8
33915d1cb45SPeiming Liu            }
34015d1cb45SPeiming Liu            left={}
34115d1cb45SPeiming Liu            right={}
34215d1cb45SPeiming Liu          linalg.yield %result : i8
34315d1cb45SPeiming Liu      } -> tensor<4x4xi8, #DCSR>
34415d1cb45SPeiming Liu    return %0 : tensor<4x4xi8, #DCSR>
34515d1cb45SPeiming Liu  }
34615d1cb45SPeiming Liu
34715d1cb45SPeiming Liu  // Keeps values on left, negate value on right, ignore value when overlapping.
34815d1cb45SPeiming Liu  func.func @only_left_right(%A: tensor<4x4xf64, #DCSR>,
34915d1cb45SPeiming Liu                             %B: tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> {
350d2e787d5SAart Bik    %C = tensor.empty() : tensor<4x4xf64, #DCSR>
35115d1cb45SPeiming Liu    %0 = linalg.generic #trait_mat_op
35215d1cb45SPeiming Liu      ins(%A, %B: tensor<4x4xf64, #DCSR>,
35315d1cb45SPeiming Liu                  tensor<4x4xf64, #DCSR>)
35415d1cb45SPeiming Liu      outs(%C: tensor<4x4xf64, #DCSR>) {
35515d1cb45SPeiming Liu        ^bb0(%a: f64, %b: f64, %c: f64) :
35615d1cb45SPeiming Liu          %result = sparse_tensor.binary %a, %b : f64, f64 to f64
35715d1cb45SPeiming Liu            overlap={}
35815d1cb45SPeiming Liu            left=identity
35915d1cb45SPeiming Liu            right={
36015d1cb45SPeiming Liu              ^bb0(%y: f64):
36115d1cb45SPeiming Liu                %ret = arith.negf %y : f64
36215d1cb45SPeiming Liu                sparse_tensor.yield %ret : f64
36315d1cb45SPeiming Liu            }
36415d1cb45SPeiming Liu          linalg.yield %result : f64
36515d1cb45SPeiming Liu      } -> tensor<4x4xf64, #DCSR>
36615d1cb45SPeiming Liu    return %0 : tensor<4x4xf64, #DCSR>
36715d1cb45SPeiming Liu  }
36815d1cb45SPeiming Liu
36915d1cb45SPeiming Liu  // Driver method to call and verify kernels.
37005390df4SAart Bik  func.func @main() {
3712c332660SJim Kitchen    %c0 = arith.constant 0 : index
3722c332660SJim Kitchen
3732c332660SJim Kitchen    // Setup sparse vectors.
3742c332660SJim Kitchen    %v1 = arith.constant sparse<
3752c332660SJim Kitchen       [ [0], [3], [11], [17], [20], [21], [28], [29], [31] ],
3762c332660SJim Kitchen         [ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0 ]
3772c332660SJim Kitchen    > : tensor<32xf64>
3782c332660SJim Kitchen    %v2 = arith.constant sparse<
3792c332660SJim Kitchen       [ [1], [3], [4], [10], [16], [18], [21], [28], [29], [31] ],
3802c332660SJim Kitchen         [11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0 ]
3812c332660SJim Kitchen    > : tensor<32xf64>
3822c332660SJim Kitchen    %v3 = arith.constant dense<
3832c332660SJim Kitchen      [0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
3842c332660SJim Kitchen       0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 0., 1.]
3852c332660SJim Kitchen    > : tensor<32xf64>
38666088afbSbixia1    %v1_si = arith.fptosi %v1 : tensor<32xf64> to tensor<32xi32>
38766088afbSbixia1    %v2_si = arith.fptosi %v2 : tensor<32xf64> to tensor<32xi32>
38866088afbSbixia1
3892c332660SJim Kitchen    %sv1 = sparse_tensor.convert %v1 : tensor<32xf64> to tensor<?xf64, #SparseVector>
3902c332660SJim Kitchen    %sv2 = sparse_tensor.convert %v2 : tensor<32xf64> to tensor<?xf64, #SparseVector>
39166088afbSbixia1    %sv1_si = sparse_tensor.convert %v1_si : tensor<32xi32> to tensor<?xi32, #SparseVector>
39266088afbSbixia1    %sv2_si = sparse_tensor.convert %v2_si : tensor<32xi32> to tensor<?xi32, #SparseVector>
3932c332660SJim Kitchen    %dv3 = tensor.cast %v3 : tensor<32xf64> to tensor<?xf64>
3942c332660SJim Kitchen
3952c332660SJim Kitchen    // Setup sparse matrices.
3962c332660SJim Kitchen    %m1 = arith.constant sparse<
3972c332660SJim Kitchen       [ [0,0], [0,1], [1,7], [2,2], [2,4], [2,7], [3,0], [3,2], [3,3] ],
3982c332660SJim Kitchen         [ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0 ]
3992c332660SJim Kitchen    > : tensor<4x8xf64>
4002c332660SJim Kitchen    %m2 = arith.constant sparse<
4012c332660SJim Kitchen       [ [0,0], [0,7], [1,0], [1,6], [2,1], [2,7] ],
4022c332660SJim Kitchen         [6.0, 5.0, 4.0, 3.0, 2.0, 1.0 ]
4032c332660SJim Kitchen    > : tensor<4x8xf64>
4042c332660SJim Kitchen    %sm1 = sparse_tensor.convert %m1 : tensor<4x8xf64> to tensor<?x?xf64, #DCSR>
4052c332660SJim Kitchen    %sm2 = sparse_tensor.convert %m2 : tensor<4x8xf64> to tensor<?x?xf64, #DCSR>
4062c332660SJim Kitchen
40715d1cb45SPeiming Liu    %m3 = arith.constant dense<
40815d1cb45SPeiming Liu      [ [ 1.0, 0.0, 3.0, 0.0],
40915d1cb45SPeiming Liu        [ 0.0, 2.0, 0.0, 0.0],
41015d1cb45SPeiming Liu        [ 0.0, 0.0, 0.0, 4.0],
41115d1cb45SPeiming Liu        [ 3.0, 4.0, 0.0, 0.0] ]> : tensor<4x4xf64>
41215d1cb45SPeiming Liu    %m4 = arith.constant dense<
41315d1cb45SPeiming Liu      [ [ 1.0, 0.0, 1.0, 1.0],
41415d1cb45SPeiming Liu        [ 0.0, 0.5, 0.0, 0.0],
41515d1cb45SPeiming Liu        [ 1.0, 5.0, 2.0, 0.0],
41615d1cb45SPeiming Liu        [ 2.0, 0.0, 0.0, 0.0] ]> : tensor<4x4xf64>
41715d1cb45SPeiming Liu
41815d1cb45SPeiming Liu    %sm3 = sparse_tensor.convert %m3 : tensor<4x4xf64> to tensor<4x4xf64, #DCSR>
41915d1cb45SPeiming Liu    %sm4 = sparse_tensor.convert %m4 : tensor<4x4xf64> to tensor<4x4xf64, #DCSR>
42015d1cb45SPeiming Liu
4212c332660SJim Kitchen    // Call sparse vector kernels.
42266088afbSbixia1    %0 = call @vector_min(%sv1_si, %sv2_si)
42366088afbSbixia1       : (tensor<?xi32, #SparseVector>,
42466088afbSbixia1          tensor<?xi32, #SparseVector>) -> tensor<?xi32, #SparseVector>
4252c332660SJim Kitchen    %1 = call @vector_mul(%sv1, %dv3)
4262c332660SJim Kitchen      : (tensor<?xf64, #SparseVector>,
4272c332660SJim Kitchen         tensor<?xf64>) -> tensor<?xf64, #SparseVector>
4282c332660SJim Kitchen    %2 = call @vector_setdiff(%sv1, %sv2)
4292c332660SJim Kitchen       : (tensor<?xf64, #SparseVector>,
4302c332660SJim Kitchen          tensor<?xf64, #SparseVector>) -> tensor<?xf64, #SparseVector>
4312c332660SJim Kitchen    %3 = call @vector_index(%sv1)
4322c332660SJim Kitchen       : (tensor<?xf64, #SparseVector>) -> tensor<?xi32, #SparseVector>
4332c332660SJim Kitchen
4342c332660SJim Kitchen    // Call sparse matrix kernels.
4352c332660SJim Kitchen    %5 = call @matrix_intersect(%sm1, %sm2)
4362c332660SJim Kitchen      : (tensor<?x?xf64, #DCSR>, tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR>
43715d1cb45SPeiming Liu    %6 = call @add_tensor_1(%sm3, %sm4)
43815d1cb45SPeiming Liu      : (tensor<4x4xf64, #DCSR>, tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR>
43915d1cb45SPeiming Liu    %7 = call @add_tensor_2(%sm3, %sm4)
44015d1cb45SPeiming Liu      : (tensor<4x4xf64, #DCSR>, tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR>
44115d1cb45SPeiming Liu    %8 = call @triangular(%sm3, %sm4)
44215d1cb45SPeiming Liu      : (tensor<4x4xf64, #DCSR>, tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR>
44315d1cb45SPeiming Liu    %9 = call @sub_with_thres(%sm3, %sm4)
44415d1cb45SPeiming Liu      : (tensor<4x4xf64, #DCSR>, tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR>
44515d1cb45SPeiming Liu    %10 = call @intersect_equal(%sm3, %sm4)
44615d1cb45SPeiming Liu      : (tensor<4x4xf64, #DCSR>, tensor<4x4xf64, #DCSR>) -> tensor<4x4xi8, #DCSR>
44715d1cb45SPeiming Liu    %11 = call @only_left_right(%sm3, %sm4)
44815d1cb45SPeiming Liu      : (tensor<4x4xf64, #DCSR>, tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR>
4492c332660SJim Kitchen
4502c332660SJim Kitchen    //
4512c332660SJim Kitchen    // Verify the results.
4522c332660SJim Kitchen    //
45305390df4SAart Bik    // CHECK:      ---- Sparse Tensor ----
45405390df4SAart Bik    // CHECK-NEXT: nse = 9
45505390df4SAart Bik    // CHECK-NEXT: dim = ( 32 )
45605390df4SAart Bik    // CHECK-NEXT: lvl = ( 32 )
457eb177803SYinying Li    // CHECK-NEXT: pos[0] : ( 0, 9 )
458eb177803SYinying Li    // CHECK-NEXT: crd[0] : ( 0, 3, 11, 17, 20, 21, 28, 29, 31 )
459eb177803SYinying Li    // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9 )
46005390df4SAart Bik    // CHECK-NEXT: ----
4612c332660SJim Kitchen    //
46205390df4SAart Bik    // CHECK-NEXT: ---- Sparse Tensor ----
46305390df4SAart Bik    // CHECK-NEXT: nse = 10
46405390df4SAart Bik    // CHECK-NEXT: dim = ( 32 )
46505390df4SAart Bik    // CHECK-NEXT: lvl = ( 32 )
466eb177803SYinying Li    // CHECK-NEXT: pos[0] : ( 0, 10 )
467eb177803SYinying Li    // CHECK-NEXT: crd[0] : ( 1, 3, 4, 10, 16, 18, 21, 28, 29, 31 )
468eb177803SYinying Li    // CHECK-NEXT: values : ( 11, 12, 13, 14, 15, 16, 17, 18, 19, 20 )
46905390df4SAart Bik    // CHECK-NEXT: ----
47005390df4SAart Bik    //
47105390df4SAart Bik    // CHECK-NEXT: ---- Sparse Tensor ----
47205390df4SAart Bik    // CHECK-NEXT: nse = 14
47305390df4SAart Bik    // CHECK-NEXT: dim = ( 32 )
47405390df4SAart Bik    // CHECK-NEXT: lvl = ( 32 )
475eb177803SYinying Li    // CHECK-NEXT: pos[0] : ( 0, 14 )
476eb177803SYinying Li    // CHECK-NEXT: crd[0] : ( 0, 1, 3, 4, 10, 11, 16, 17, 18, 20, 21, 28, 29, 31 )
477eb177803SYinying Li    // CHECK-NEXT: values : ( 1, 11, 2, 13, 14, 3, 15, 4, 16, 5, 6, 7, 8, 9 )
47805390df4SAart Bik    // CHECK-NEXT: ----
47905390df4SAart Bik    //
48005390df4SAart Bik    // CHECK-NEXT: ---- Sparse Tensor ----
48105390df4SAart Bik    // CHECK-NEXT: nse = 9
48205390df4SAart Bik    // CHECK-NEXT: dim = ( 32 )
48305390df4SAart Bik    // CHECK-NEXT: lvl = ( 32 )
484eb177803SYinying Li    // CHECK-NEXT: pos[0] : ( 0, 9 )
485eb177803SYinying Li    // CHECK-NEXT: crd[0] : ( 0, 3, 11, 17, 20, 21, 28, 29, 31 )
486eb177803SYinying Li    // CHECK-NEXT: values : ( 0, 6, 3, 28, 0, 6, 56, 72, 9 )
48705390df4SAart Bik    // CHECK-NEXT: ----
48805390df4SAart Bik    //
48905390df4SAart Bik    // CHECK-NEXT: ---- Sparse Tensor ----
49005390df4SAart Bik    // CHECK-NEXT: nse = 4
49105390df4SAart Bik    // CHECK-NEXT: dim = ( 32 )
49205390df4SAart Bik    // CHECK-NEXT: lvl = ( 32 )
493eb177803SYinying Li    // CHECK-NEXT: pos[0] : ( 0, 4 )
494eb177803SYinying Li    // CHECK-NEXT: crd[0] : ( 0, 11, 17, 20 )
495eb177803SYinying Li    // CHECK-NEXT: values : ( 1, 3, 4, 5 )
49605390df4SAart Bik    // CHECK-NEXT: ----
49705390df4SAart Bik    //
49805390df4SAart Bik    // CHECK-NEXT: ---- Sparse Tensor ----
49905390df4SAart Bik    // CHECK-NEXT: nse = 9
50005390df4SAart Bik    // CHECK-NEXT: dim = ( 32 )
50105390df4SAart Bik    // CHECK-NEXT: lvl = ( 32 )
502eb177803SYinying Li    // CHECK-NEXT: pos[0] : ( 0, 9 )
503eb177803SYinying Li    // CHECK-NEXT: crd[0] : ( 0, 3, 11, 17, 20, 21, 28, 29, 31 )
504eb177803SYinying Li    // CHECK-NEXT: values : ( 0, 3, 11, 17, 20, 21, 28, 29, 31 )
50505390df4SAart Bik    // CHECK-NEXT: ----
50605390df4SAart Bik    //
50705390df4SAart Bik    // CHECK-NEXT: ---- Sparse Tensor ----
50805390df4SAart Bik    // CHECK-NEXT: nse = 6
50905390df4SAart Bik    // CHECK-NEXT: dim = ( 4, 8 )
51005390df4SAart Bik    // CHECK-NEXT: lvl = ( 4, 8 )
511eb177803SYinying Li    // CHECK-NEXT: pos[0] : ( 0, 3 )
512eb177803SYinying Li    // CHECK-NEXT: crd[0] : ( 0, 1, 2 )
513eb177803SYinying Li    // CHECK-NEXT: pos[1] : ( 0, 2, 4, 6 )
514eb177803SYinying Li    // CHECK-NEXT: crd[1] : ( 0, 7, 0, 6, 1, 7 )
515eb177803SYinying Li    // CHECK-NEXT: values : ( 7, -5, -4, -3, -2, 7 )
51605390df4SAart Bik    // CHECK-NEXT: ----
51705390df4SAart Bik    //
51805390df4SAart Bik    // CHECK-NEXT: ---- Sparse Tensor ----
51905390df4SAart Bik    // CHECK-NEXT: nse = 10
52005390df4SAart Bik    // CHECK-NEXT: dim = ( 4, 4 )
52105390df4SAart Bik    // CHECK-NEXT: lvl = ( 4, 4 )
522eb177803SYinying Li    // CHECK-NEXT: pos[0] : ( 0, 4 )
523eb177803SYinying Li    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3 )
524eb177803SYinying Li    // CHECK-NEXT: pos[1] : ( 0, 3, 4, 8, 10 )
525eb177803SYinying Li    // CHECK-NEXT: crd[1] : ( 0, 2, 3, 1, 0, 1, 2, 3, 0, 1 )
526eb177803SYinying Li    // CHECK-NEXT: values : ( 2, 4, 1, 2.5, 1, 5, 2, 4, 5, 4 )
52705390df4SAart Bik    // CHECK-NEXT: ----
52805390df4SAart Bik    //
52905390df4SAart Bik    // CHECK-NEXT: ---- Sparse Tensor ----
53005390df4SAart Bik    // CHECK-NEXT: nse = 10
53105390df4SAart Bik    // CHECK-NEXT: dim = ( 4, 4 )
53205390df4SAart Bik    // CHECK-NEXT: lvl = ( 4, 4 )
533eb177803SYinying Li    // CHECK-NEXT: pos[0] : ( 0, 4 )
534eb177803SYinying Li    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3 )
535eb177803SYinying Li    // CHECK-NEXT: pos[1] : ( 0, 3, 4, 8, 10 )
536eb177803SYinying Li    // CHECK-NEXT: crd[1] : ( 0, 2, 3, 1, 0, 1, 2, 3, 0, 1 )
537eb177803SYinying Li    // CHECK-NEXT: values : ( 2, 4, 1, 2.5, 1, 5, 2, 4, 5, 4 )
53805390df4SAart Bik    // CHECK-NEXT: ----
53905390df4SAart Bik    //
54005390df4SAart Bik    // CHECK-NEXT: ---- Sparse Tensor ----
54105390df4SAart Bik    // CHECK-NEXT: nse = 10
54205390df4SAart Bik    // CHECK-NEXT: dim = ( 4, 4 )
54305390df4SAart Bik    // CHECK-NEXT: lvl = ( 4, 4 )
544eb177803SYinying Li    // CHECK-NEXT: pos[0] : ( 0, 4 )
545eb177803SYinying Li    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3 )
546eb177803SYinying Li    // CHECK-NEXT: pos[1] : ( 0, 3, 4, 8, 10 )
547eb177803SYinying Li    // CHECK-NEXT: crd[1] : ( 0, 2, 3, 1, 0, 1, 2, 3, 0, 1 )
548eb177803SYinying Li    // CHECK-NEXT: values : ( 2, 4, 1, 2.5, -1, -5, 2, 4, 1, 4 )
54905390df4SAart Bik    // CHECK-NEXT: ----
55005390df4SAart Bik    //
55105390df4SAart Bik    // CHECK-NEXT: ---- Sparse Tensor ----
55205390df4SAart Bik    // CHECK-NEXT: nse = 10
55305390df4SAart Bik    // CHECK-NEXT: dim = ( 4, 4 )
55405390df4SAart Bik    // CHECK-NEXT: lvl = ( 4, 4 )
555eb177803SYinying Li    // CHECK-NEXT: pos[0] : ( 0, 4 )
556eb177803SYinying Li    // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3 )
557eb177803SYinying Li    // CHECK-NEXT: pos[1] : ( 0, 3, 4, 8, 10 )
558eb177803SYinying Li    // CHECK-NEXT: crd[1] : ( 0, 2, 3, 1, 0, 1, 2, 3, 0, 1 )
559eb177803SYinying Li    // CHECK-NEXT: values : ( 0, 1, -1, 1, -1, -2, -2, 2, 1, 2 )
56005390df4SAart Bik    // CHECK-NEXT: ----
56105390df4SAart Bik    //
56205390df4SAart Bik    // CHECK-NEXT: ---- Sparse Tensor ----
56305390df4SAart Bik    // CHECK-NEXT: nse = 4
56405390df4SAart Bik    // CHECK-NEXT: dim = ( 4, 4 )
56505390df4SAart Bik    // CHECK-NEXT: lvl = ( 4, 4 )
566eb177803SYinying Li    // CHECK-NEXT: pos[0] : ( 0, 3 )
567eb177803SYinying Li    // CHECK-NEXT: crd[0] : ( 0, 1, 3 )
568eb177803SYinying Li    // CHECK-NEXT: pos[1] : ( 0, 2, 3, 4 )
569eb177803SYinying Li    // CHECK-NEXT: crd[1] : ( 0, 2, 1, 0 )
570eb177803SYinying Li    // CHECK-NEXT: values : ( 1, 0, 0, 0 )
57105390df4SAart Bik    // CHECK-NEXT: ----
57205390df4SAart Bik    //
57305390df4SAart Bik    // CHECK-NEXT: ---- Sparse Tensor ----
57405390df4SAart Bik    // CHECK-NEXT: nse = 6
57505390df4SAart Bik    // CHECK-NEXT: dim = ( 4, 4 )
57605390df4SAart Bik    // CHECK-NEXT: lvl = ( 4, 4 )
577eb177803SYinying Li    // CHECK-NEXT: pos[0] : ( 0, 3 )
578eb177803SYinying Li    // CHECK-NEXT: crd[0] : ( 0, 2, 3 )
579eb177803SYinying Li    // CHECK-NEXT: pos[1] : ( 0, 1, 5, 6 )
580eb177803SYinying Li    // CHECK-NEXT: crd[1] : ( 3, 0, 1, 2, 3, 1 )
581eb177803SYinying Li    // CHECK-NEXT: values : ( -1, -1, -5, -2, 4, 4 )
58205390df4SAart Bik    //
58305390df4SAart Bik    sparse_tensor.print %sv1 : tensor<?xf64, #SparseVector>
58405390df4SAart Bik    sparse_tensor.print %sv2 : tensor<?xf64, #SparseVector>
58505390df4SAart Bik    sparse_tensor.print %0   : tensor<?xi32, #SparseVector>
58605390df4SAart Bik    sparse_tensor.print %1   : tensor<?xf64, #SparseVector>
58705390df4SAart Bik    sparse_tensor.print %2   : tensor<?xf64, #SparseVector>
58805390df4SAart Bik    sparse_tensor.print %3   : tensor<?xi32, #SparseVector>
58905390df4SAart Bik    sparse_tensor.print %5   : tensor<?x?xf64, #DCSR>
59005390df4SAart Bik    sparse_tensor.print %6   : tensor<4x4xf64, #DCSR>
59105390df4SAart Bik    sparse_tensor.print %7   : tensor<4x4xf64, #DCSR>
59205390df4SAart Bik    sparse_tensor.print %8   : tensor<4x4xf64, #DCSR>
59305390df4SAart Bik    sparse_tensor.print %9   : tensor<4x4xf64, #DCSR>
59405390df4SAart Bik    sparse_tensor.print %10  : tensor<4x4xi8, #DCSR>
59505390df4SAart Bik    sparse_tensor.print %11  : tensor<4x4xf64, #DCSR>
5962c332660SJim Kitchen
5972c332660SJim Kitchen    // Release the resources.
59827a431f5SMatthias Springer    bufferization.dealloc_tensor %sv1 : tensor<?xf64, #SparseVector>
59927a431f5SMatthias Springer    bufferization.dealloc_tensor %sv2 : tensor<?xf64, #SparseVector>
6003b9bee16SAart Bik    bufferization.dealloc_tensor %sv1_si : tensor<?xi32, #SparseVector>
6013b9bee16SAart Bik    bufferization.dealloc_tensor %sv2_si : tensor<?xi32, #SparseVector>
60227a431f5SMatthias Springer    bufferization.dealloc_tensor %sm1 : tensor<?x?xf64, #DCSR>
60327a431f5SMatthias Springer    bufferization.dealloc_tensor %sm2 : tensor<?x?xf64, #DCSR>
60427a431f5SMatthias Springer    bufferization.dealloc_tensor %sm3 : tensor<4x4xf64, #DCSR>
60527a431f5SMatthias Springer    bufferization.dealloc_tensor %sm4 : tensor<4x4xf64, #DCSR>
60666088afbSbixia1    bufferization.dealloc_tensor %0 : tensor<?xi32, #SparseVector>
60727a431f5SMatthias Springer    bufferization.dealloc_tensor %1 : tensor<?xf64, #SparseVector>
60827a431f5SMatthias Springer    bufferization.dealloc_tensor %2 : tensor<?xf64, #SparseVector>
60927a431f5SMatthias Springer    bufferization.dealloc_tensor %3 : tensor<?xi32, #SparseVector>
61027a431f5SMatthias Springer    bufferization.dealloc_tensor %5 : tensor<?x?xf64, #DCSR>
61127a431f5SMatthias Springer    bufferization.dealloc_tensor %6 : tensor<4x4xf64, #DCSR>
61227a431f5SMatthias Springer    bufferization.dealloc_tensor %7 : tensor<4x4xf64, #DCSR>
61327a431f5SMatthias Springer    bufferization.dealloc_tensor %8 : tensor<4x4xf64, #DCSR>
61427a431f5SMatthias Springer    bufferization.dealloc_tensor %9 : tensor<4x4xf64, #DCSR>
61527a431f5SMatthias Springer    bufferization.dealloc_tensor %10 : tensor<4x4xi8, #DCSR>
61627a431f5SMatthias Springer    bufferization.dealloc_tensor %11 : tensor<4x4xf64, #DCSR>
6172c332660SJim Kitchen    return
6182c332660SJim Kitchen  }
6192c332660SJim Kitchen}
620