123e5130eSAndrzej Warzynski//-------------------------------------------------------------------------------------------------- 223e5130eSAndrzej Warzynski// WHEN CREATING A NEW TEST, PLEASE JUST COPY & PASTE WITHOUT EDITS. 35a1f87f9SAart Bik// 423e5130eSAndrzej Warzynski// Set-up that's shared across all tests in this directory. In principle, this 523e5130eSAndrzej Warzynski// config could be moved to lit.local.cfg. However, there are downstream users that 623e5130eSAndrzej Warzynski// do not use these LIT config files. Hence why this is kept inline. 723e5130eSAndrzej Warzynski// 8dce7a7cfSTim Harvey// DEFINE: %{sparsifier_opts} = enable-runtime-library=true 9dce7a7cfSTim Harvey// DEFINE: %{sparsifier_opts_sve} = enable-arm-sve=true %{sparsifier_opts} 10dce7a7cfSTim Harvey// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}" 11dce7a7cfSTim Harvey// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}" 1223e5130eSAndrzej Warzynski// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils 13fe55c34dSZhaoshi Zheng// DEFINE: %{run_libs_sve} = -shared-libs=%native_mlir_runner_utils,%native_mlir_c_runner_utils 144cb5a96aSYinying Li// DEFINE: %{run_opts} = -e main -entry-point-result=void 15*eb206e9eSAndrea Faulds// DEFINE: %{run} = mlir-runner %{run_opts} %{run_libs} 16fe55c34dSZhaoshi Zheng// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs_sve} 1723e5130eSAndrzej Warzynski// 1823e5130eSAndrzej Warzynski// DEFINE: %{env} = 1923e5130eSAndrzej Warzynski//-------------------------------------------------------------------------------------------------- 2023e5130eSAndrzej Warzynski 2123e5130eSAndrzej Warzynski// RUN: %{compile} | %{run} | FileCheck %s 22b5d74f0eSbixia1// 23b5d74f0eSbixia1// Do the same run, but now with direct IR generation. 24dce7a7cfSTim Harvey// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false enable-buffer-initialization=true 2523e5130eSAndrzej Warzynski// RUN: %{compile} | %{run} | FileCheck %s 26a229c162Sbixia1// 2723e5130eSAndrzej Warzynski// Do the same run, but now with vectorization. 28dce7a7cfSTim Harvey// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true 2923e5130eSAndrzej Warzynski// RUN: %{compile} | %{run} | FileCheck %s 3023e5130eSAndrzej Warzynski// 3123e5130eSAndrzej Warzynski// Do the same run, but now with VLA vectorization. 3223e5130eSAndrzej Warzynski// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} 332c332660SJim Kitchen 34dbe1be9aSYinying Li#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}> 352a07f0fdSYinying Li#DCSR = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed, d1 : compressed)}> 362c332660SJim Kitchen 372c332660SJim Kitchen// 382c332660SJim Kitchen// Traits for tensor operations. 392c332660SJim Kitchen// 406a38c772SAart Bik#trait_vec = { 412c332660SJim Kitchen indexing_maps = [ 422c332660SJim Kitchen affine_map<(i) -> (i)>, // a (in) 432c332660SJim Kitchen affine_map<(i) -> (i)> // x (out) 442c332660SJim Kitchen ], 452c332660SJim Kitchen iterator_types = ["parallel"] 462c332660SJim Kitchen} 476a38c772SAart Bik#trait_mat = { 482c332660SJim Kitchen indexing_maps = [ 492c332660SJim Kitchen affine_map<(i,j) -> (i,j)>, // A (in) 502c332660SJim Kitchen affine_map<(i,j) -> (i,j)> // X (out) 512c332660SJim Kitchen ], 522c332660SJim Kitchen iterator_types = ["parallel", "parallel"] 532c332660SJim Kitchen} 542c332660SJim Kitchen 552c332660SJim Kitchenmodule { 562c332660SJim Kitchen // Invert the structure of a sparse vector. Present values become missing. 576a38c772SAart Bik // Missing values are filled with 1 (i32). Output is sparse. 586a38c772SAart Bik func.func @vector_complement_sparse(%arga: tensor<?xf64, #SparseVector>) -> tensor<?xi32, #SparseVector> { 592c332660SJim Kitchen %c = arith.constant 0 : index 602c332660SJim Kitchen %ci1 = arith.constant 1 : i32 612c332660SJim Kitchen %d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector> 62d2e787d5SAart Bik %xv = tensor.empty(%d) : tensor<?xi32, #SparseVector> 636a38c772SAart Bik %0 = linalg.generic #trait_vec 642c332660SJim Kitchen ins(%arga: tensor<?xf64, #SparseVector>) 652c332660SJim Kitchen outs(%xv: tensor<?xi32, #SparseVector>) { 662c332660SJim Kitchen ^bb(%a: f64, %x: i32): 672c332660SJim Kitchen %1 = sparse_tensor.unary %a : f64 to i32 682c332660SJim Kitchen present={} 692c332660SJim Kitchen absent={ 702c332660SJim Kitchen sparse_tensor.yield %ci1 : i32 712c332660SJim Kitchen } 722c332660SJim Kitchen linalg.yield %1 : i32 732c332660SJim Kitchen } -> tensor<?xi32, #SparseVector> 742c332660SJim Kitchen return %0 : tensor<?xi32, #SparseVector> 752c332660SJim Kitchen } 762c332660SJim Kitchen 776a38c772SAart Bik // Invert the structure of a sparse vector, where missing values are 78c43e6274STim Harvey // filled with 1. For a dense output, the sparsifier initializes 796a38c772SAart Bik // the buffer to all zero at all other places. 806a38c772SAart Bik func.func @vector_complement_dense(%arga: tensor<?xf64, #SparseVector>) -> tensor<?xi32> { 816a38c772SAart Bik %c = arith.constant 0 : index 826a38c772SAart Bik %d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector> 83d2e787d5SAart Bik %xv = tensor.empty(%d) : tensor<?xi32> 846a38c772SAart Bik %0 = linalg.generic #trait_vec 856a38c772SAart Bik ins(%arga: tensor<?xf64, #SparseVector>) 866a38c772SAart Bik outs(%xv: tensor<?xi32>) { 876a38c772SAart Bik ^bb(%a: f64, %x: i32): 886a38c772SAart Bik %1 = sparse_tensor.unary %a : f64 to i32 896a38c772SAart Bik present={} 906a38c772SAart Bik absent={ 916a38c772SAart Bik %ci1 = arith.constant 1 : i32 926a38c772SAart Bik sparse_tensor.yield %ci1 : i32 936a38c772SAart Bik } 946a38c772SAart Bik linalg.yield %1 : i32 956a38c772SAart Bik } -> tensor<?xi32> 966a38c772SAart Bik return %0 : tensor<?xi32> 976a38c772SAart Bik } 986a38c772SAart Bik 992c332660SJim Kitchen // Negate existing values. Fill missing ones with +1. 100a8308020SRiver Riddle func.func @vector_negation(%arga: tensor<?xf64, #SparseVector>) -> tensor<?xf64, #SparseVector> { 1012c332660SJim Kitchen %c = arith.constant 0 : index 1022c332660SJim Kitchen %cf1 = arith.constant 1.0 : f64 1032c332660SJim Kitchen %d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector> 104d2e787d5SAart Bik %xv = tensor.empty(%d) : tensor<?xf64, #SparseVector> 1056a38c772SAart Bik %0 = linalg.generic #trait_vec 1062c332660SJim Kitchen ins(%arga: tensor<?xf64, #SparseVector>) 1072c332660SJim Kitchen outs(%xv: tensor<?xf64, #SparseVector>) { 1082c332660SJim Kitchen ^bb(%a: f64, %x: f64): 1092c332660SJim Kitchen %1 = sparse_tensor.unary %a : f64 to f64 1102c332660SJim Kitchen present={ 1112c332660SJim Kitchen ^bb0(%x0: f64): 1122c332660SJim Kitchen %ret = arith.negf %x0 : f64 1132c332660SJim Kitchen sparse_tensor.yield %ret : f64 1142c332660SJim Kitchen } 1152c332660SJim Kitchen absent={ 1162c332660SJim Kitchen sparse_tensor.yield %cf1 : f64 1172c332660SJim Kitchen } 1182c332660SJim Kitchen linalg.yield %1 : f64 1192c332660SJim Kitchen } -> tensor<?xf64, #SparseVector> 1202c332660SJim Kitchen return %0 : tensor<?xf64, #SparseVector> 1212c332660SJim Kitchen } 1222c332660SJim Kitchen 123057e33efSPeiming Liu // Performs B[i] = i * A[i]. 124057e33efSPeiming Liu func.func @vector_magnify(%arga: tensor<?xf64, #SparseVector>) -> tensor<?xf64, #SparseVector> { 125057e33efSPeiming Liu %c = arith.constant 0 : index 126057e33efSPeiming Liu %d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector> 127d2e787d5SAart Bik %xv = tensor.empty(%d) : tensor<?xf64, #SparseVector> 1286a38c772SAart Bik %0 = linalg.generic #trait_vec 129057e33efSPeiming Liu ins(%arga: tensor<?xf64, #SparseVector>) 130057e33efSPeiming Liu outs(%xv: tensor<?xf64, #SparseVector>) { 131057e33efSPeiming Liu ^bb(%a: f64, %x: f64): 132057e33efSPeiming Liu %idx = linalg.index 0 : index 133057e33efSPeiming Liu %1 = sparse_tensor.unary %a : f64 to f64 134057e33efSPeiming Liu present={ 135057e33efSPeiming Liu ^bb0(%x0: f64): 136057e33efSPeiming Liu %tmp = arith.index_cast %idx : index to i64 137057e33efSPeiming Liu %idxf = arith.uitofp %tmp : i64 to f64 138057e33efSPeiming Liu %ret = arith.mulf %x0, %idxf : f64 139057e33efSPeiming Liu sparse_tensor.yield %ret : f64 140057e33efSPeiming Liu } 141057e33efSPeiming Liu absent={} 142057e33efSPeiming Liu linalg.yield %1 : f64 143057e33efSPeiming Liu } -> tensor<?xf64, #SparseVector> 144057e33efSPeiming Liu return %0 : tensor<?xf64, #SparseVector> 145057e33efSPeiming Liu } 146057e33efSPeiming Liu 1472c332660SJim Kitchen // Clips values to the range [3, 7]. 148a8308020SRiver Riddle func.func @matrix_clip(%argx: tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR> { 1492c332660SJim Kitchen %c0 = arith.constant 0 : index 1502c332660SJim Kitchen %c1 = arith.constant 1 : index 1512c332660SJim Kitchen %cfmin = arith.constant 3.0 : f64 1522c332660SJim Kitchen %cfmax = arith.constant 7.0 : f64 1532c332660SJim Kitchen %d0 = tensor.dim %argx, %c0 : tensor<?x?xf64, #DCSR> 1542c332660SJim Kitchen %d1 = tensor.dim %argx, %c1 : tensor<?x?xf64, #DCSR> 155d2e787d5SAart Bik %xv = tensor.empty(%d0, %d1) : tensor<?x?xf64, #DCSR> 1566a38c772SAart Bik %0 = linalg.generic #trait_mat 1572c332660SJim Kitchen ins(%argx: tensor<?x?xf64, #DCSR>) 1582c332660SJim Kitchen outs(%xv: tensor<?x?xf64, #DCSR>) { 1592c332660SJim Kitchen ^bb(%a: f64, %x: f64): 1602c332660SJim Kitchen %1 = sparse_tensor.unary %a: f64 to f64 1612c332660SJim Kitchen present={ 1622c332660SJim Kitchen ^bb0(%x0: f64): 1632c332660SJim Kitchen %mincmp = arith.cmpf "ogt", %x0, %cfmin : f64 1642c332660SJim Kitchen %x1 = arith.select %mincmp, %x0, %cfmin : f64 1652c332660SJim Kitchen %maxcmp = arith.cmpf "olt", %x1, %cfmax : f64 1662c332660SJim Kitchen %x2 = arith.select %maxcmp, %x1, %cfmax : f64 1672c332660SJim Kitchen sparse_tensor.yield %x2 : f64 1682c332660SJim Kitchen } 1692c332660SJim Kitchen absent={} 1702c332660SJim Kitchen linalg.yield %1 : f64 1712c332660SJim Kitchen } -> tensor<?x?xf64, #DCSR> 1722c332660SJim Kitchen return %0 : tensor<?x?xf64, #DCSR> 1732c332660SJim Kitchen } 1742c332660SJim Kitchen 175057e33efSPeiming Liu // Slices matrix and only keep the value of the lower-right corner of the original 176057e33efSPeiming Liu // matrix (i.e., A[2/d0 ..][2/d1 ..]), and set other values to 99. 177057e33efSPeiming Liu func.func @matrix_slice(%argx: tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR> { 178057e33efSPeiming Liu %c0 = arith.constant 0 : index 179057e33efSPeiming Liu %c1 = arith.constant 1 : index 180057e33efSPeiming Liu %d0 = tensor.dim %argx, %c0 : tensor<?x?xf64, #DCSR> 181057e33efSPeiming Liu %d1 = tensor.dim %argx, %c1 : tensor<?x?xf64, #DCSR> 182d2e787d5SAart Bik %xv = tensor.empty(%d0, %d1) : tensor<?x?xf64, #DCSR> 1836a38c772SAart Bik %0 = linalg.generic #trait_mat 184057e33efSPeiming Liu ins(%argx: tensor<?x?xf64, #DCSR>) 185057e33efSPeiming Liu outs(%xv: tensor<?x?xf64, #DCSR>) { 186057e33efSPeiming Liu ^bb(%a: f64, %x: f64): 187057e33efSPeiming Liu %row = linalg.index 0 : index 188057e33efSPeiming Liu %col = linalg.index 1 : index 189057e33efSPeiming Liu %1 = sparse_tensor.unary %a: f64 to f64 190057e33efSPeiming Liu present={ 191057e33efSPeiming Liu ^bb0(%x0: f64): 192057e33efSPeiming Liu %v = arith.constant 99.0 : f64 193057e33efSPeiming Liu %two = arith.constant 2 : index 194057e33efSPeiming Liu %r = arith.muli %two, %row : index 195057e33efSPeiming Liu %c = arith.muli %two, %col : index 196057e33efSPeiming Liu %cmp1 = arith.cmpi "ult", %r, %d0 : index 197057e33efSPeiming Liu %tmp = arith.select %cmp1, %v, %x0 : f64 198057e33efSPeiming Liu %cmp2 = arith.cmpi "ult", %c, %d1 : index 199057e33efSPeiming Liu %result = arith.select %cmp2, %v, %tmp : f64 200057e33efSPeiming Liu sparse_tensor.yield %result : f64 201057e33efSPeiming Liu } 202057e33efSPeiming Liu absent={} 203057e33efSPeiming Liu linalg.yield %1 : f64 204057e33efSPeiming Liu } -> tensor<?x?xf64, #DCSR> 205057e33efSPeiming Liu return %0 : tensor<?x?xf64, #DCSR> 206057e33efSPeiming Liu } 207057e33efSPeiming Liu 2082c332660SJim Kitchen // Driver method to call and verify vector kernels. 2094cb5a96aSYinying Li func.func @main() { 2106a38c772SAart Bik %cmu = arith.constant -99 : i32 2112c332660SJim Kitchen %c0 = arith.constant 0 : index 2122c332660SJim Kitchen 2132c332660SJim Kitchen // Setup sparse vectors. 2142c332660SJim Kitchen %v1 = arith.constant sparse< 2152c332660SJim Kitchen [ [0], [3], [11], [17], [20], [21], [28], [29], [31] ], 2162c332660SJim Kitchen [ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0 ] 2172c332660SJim Kitchen > : tensor<32xf64> 2182c332660SJim Kitchen %sv1 = sparse_tensor.convert %v1 : tensor<32xf64> to tensor<?xf64, #SparseVector> 2192c332660SJim Kitchen 2202c332660SJim Kitchen // Setup sparse matrices. 2212c332660SJim Kitchen %m1 = arith.constant sparse< 2222c332660SJim Kitchen [ [0,0], [0,1], [1,7], [2,2], [2,4], [2,7], [3,0], [3,2], [3,3] ], 2232c332660SJim Kitchen [ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0 ] 2242c332660SJim Kitchen > : tensor<4x8xf64> 2252c332660SJim Kitchen %sm1 = sparse_tensor.convert %m1 : tensor<4x8xf64> to tensor<?x?xf64, #DCSR> 2262c332660SJim Kitchen 2272c332660SJim Kitchen // Call sparse vector kernels. 2286a38c772SAart Bik %0 = call @vector_complement_sparse(%sv1) 2292c332660SJim Kitchen : (tensor<?xf64, #SparseVector>) -> tensor<?xi32, #SparseVector> 2302c332660SJim Kitchen %1 = call @vector_negation(%sv1) 2312c332660SJim Kitchen : (tensor<?xf64, #SparseVector>) -> tensor<?xf64, #SparseVector> 232057e33efSPeiming Liu %2 = call @vector_magnify(%sv1) 233057e33efSPeiming Liu : (tensor<?xf64, #SparseVector>) -> tensor<?xf64, #SparseVector> 2342c332660SJim Kitchen 2352c332660SJim Kitchen // Call sparse matrix kernels. 236057e33efSPeiming Liu %3 = call @matrix_clip(%sm1) 237057e33efSPeiming Liu : (tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR> 238057e33efSPeiming Liu %4 = call @matrix_slice(%sm1) 2392c332660SJim Kitchen : (tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR> 2402c332660SJim Kitchen 2416a38c772SAart Bik // Call kernel with dense output. 2426a38c772SAart Bik %5 = call @vector_complement_dense(%sv1) : (tensor<?xf64, #SparseVector>) -> tensor<?xi32> 2436a38c772SAart Bik 2442c332660SJim Kitchen // 2452c332660SJim Kitchen // Verify the results. 2462c332660SJim Kitchen // 2474cb5a96aSYinying Li // CHECK: ---- Sparse Tensor ---- 2484cb5a96aSYinying Li // CHECK-NEXT: nse = 9 2494cb5a96aSYinying Li // CHECK-NEXT: dim = ( 32 ) 2504cb5a96aSYinying Li // CHECK-NEXT: lvl = ( 32 ) 251eb177803SYinying Li // CHECK-NEXT: pos[0] : ( 0, 9 ) 252eb177803SYinying Li // CHECK-NEXT: crd[0] : ( 0, 3, 11, 17, 20, 21, 28, 29, 31 ) 253eb177803SYinying Li // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9 ) 2544cb5a96aSYinying Li // CHECK-NEXT: ---- 2554cb5a96aSYinying Li // CHECK: ---- Sparse Tensor ---- 2564cb5a96aSYinying Li // CHECK-NEXT: nse = 23 2574cb5a96aSYinying Li // CHECK-NEXT: dim = ( 32 ) 2584cb5a96aSYinying Li // CHECK-NEXT: lvl = ( 32 ) 259eb177803SYinying Li // CHECK-NEXT: pos[0] : ( 0, 23 ) 260eb177803SYinying Li // CHECK-NEXT: crd[0] : ( 1, 2, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 18, 19, 22, 23, 24, 25, 26, 27, 30 ) 261eb177803SYinying Li // CHECK-NEXT: values : ( 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ) 2624cb5a96aSYinying Li // CHECK-NEXT: ---- 2634cb5a96aSYinying Li // CHECK: ---- Sparse Tensor ---- 2644cb5a96aSYinying Li // CHECK-NEXT: nse = 32 2654cb5a96aSYinying Li // CHECK-NEXT: dim = ( 32 ) 2664cb5a96aSYinying Li // CHECK-NEXT: lvl = ( 32 ) 267eb177803SYinying Li // CHECK-NEXT: pos[0] : ( 0, 32 ) 268eb177803SYinying Li // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 ) 269eb177803SYinying Li // CHECK-NEXT: values : ( -1, 1, 1, -2, 1, 1, 1, 1, 1, 1, 1, -3, 1, 1, 1, 1, 1, -4, 1, 1, -5, -6, 1, 1, 1, 1, 1, 1, -7, -8, 1, -9 ) 2704cb5a96aSYinying Li // CHECK-NEXT: ---- 2714cb5a96aSYinying Li // CHECK: ---- Sparse Tensor ---- 2724cb5a96aSYinying Li // CHECK-NEXT: nse = 9 2734cb5a96aSYinying Li // CHECK-NEXT: dim = ( 32 ) 2744cb5a96aSYinying Li // CHECK-NEXT: lvl = ( 32 ) 275eb177803SYinying Li // CHECK-NEXT: pos[0] : ( 0, 9 ) 276eb177803SYinying Li // CHECK-NEXT: crd[0] : ( 0, 3, 11, 17, 20, 21, 28, 29, 31 ) 277eb177803SYinying Li // CHECK-NEXT: values : ( 0, 6, 33, 68, 100, 126, 196, 232, 279 ) 2784cb5a96aSYinying Li // CHECK-NEXT: ---- 2794cb5a96aSYinying Li // CHECK: ---- Sparse Tensor ---- 2804cb5a96aSYinying Li // CHECK-NEXT: nse = 9 2814cb5a96aSYinying Li // CHECK-NEXT: dim = ( 4, 8 ) 2824cb5a96aSYinying Li // CHECK-NEXT: lvl = ( 4, 8 ) 283eb177803SYinying Li // CHECK-NEXT: pos[0] : ( 0, 4 ) 284eb177803SYinying Li // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3 ) 285eb177803SYinying Li // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 9 ) 286eb177803SYinying Li // CHECK-NEXT: crd[1] : ( 0, 1, 7, 2, 4, 7, 0, 2, 3 ) 287eb177803SYinying Li // CHECK-NEXT: values : ( 3, 3, 3, 4, 5, 6, 7, 7, 7 ) 2884cb5a96aSYinying Li // CHECK-NEXT: ---- 2894cb5a96aSYinying Li // CHECK: ---- Sparse Tensor ---- 2904cb5a96aSYinying Li // CHECK-NEXT: nse = 9 2914cb5a96aSYinying Li // CHECK-NEXT: dim = ( 4, 8 ) 2924cb5a96aSYinying Li // CHECK-NEXT: lvl = ( 4, 8 ) 293eb177803SYinying Li // CHECK-NEXT: pos[0] : ( 0, 4 ) 294eb177803SYinying Li // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3 ) 295eb177803SYinying Li // CHECK-NEXT: pos[1] : ( 0, 2, 3, 6, 9 ) 296eb177803SYinying Li // CHECK-NEXT: crd[1] : ( 0, 1, 7, 2, 4, 7, 0, 2, 3 ) 297eb177803SYinying Li // CHECK-NEXT: values : ( 99, 99, 99, 99, 5, 6, 99, 99, 99 ) 2984cb5a96aSYinying Li // CHECK-NEXT: ---- 2996a38c772SAart Bik // CHECK-NEXT: ( 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0 ) 3002c332660SJim Kitchen // 3014cb5a96aSYinying Li sparse_tensor.print %sv1 : tensor<?xf64, #SparseVector> 3024cb5a96aSYinying Li sparse_tensor.print %0 : tensor<?xi32, #SparseVector> 3034cb5a96aSYinying Li sparse_tensor.print %1 : tensor<?xf64, #SparseVector> 3044cb5a96aSYinying Li sparse_tensor.print %2 : tensor<?xf64, #SparseVector> 3054cb5a96aSYinying Li sparse_tensor.print %3 : tensor<?x?xf64, #DCSR> 3064cb5a96aSYinying Li sparse_tensor.print %4 : tensor<?x?xf64, #DCSR> 3076a38c772SAart Bik %v = vector.transfer_read %5[%c0], %cmu: tensor<?xi32>, vector<32xi32> 3086a38c772SAart Bik vector.print %v : vector<32xi32> 3092c332660SJim Kitchen 3102c332660SJim Kitchen // Release the resources. 31127a431f5SMatthias Springer bufferization.dealloc_tensor %sv1 : tensor<?xf64, #SparseVector> 31227a431f5SMatthias Springer bufferization.dealloc_tensor %sm1 : tensor<?x?xf64, #DCSR> 31327a431f5SMatthias Springer bufferization.dealloc_tensor %0 : tensor<?xi32, #SparseVector> 31427a431f5SMatthias Springer bufferization.dealloc_tensor %1 : tensor<?xf64, #SparseVector> 31527a431f5SMatthias Springer bufferization.dealloc_tensor %2 : tensor<?xf64, #SparseVector> 31627a431f5SMatthias Springer bufferization.dealloc_tensor %3 : tensor<?x?xf64, #DCSR> 31727a431f5SMatthias Springer bufferization.dealloc_tensor %4 : tensor<?x?xf64, #DCSR> 3186a38c772SAart Bik bufferization.dealloc_tensor %5 : tensor<?xi32> 3192c332660SJim Kitchen return 3202c332660SJim Kitchen } 3212c332660SJim Kitchen} 322