1// RUN: mlir-opt %s --stage-sparse-ops --lower-sparse-ops-to-foreach --canonicalize --cse | FileCheck %s 2 3#SparseVector = #sparse_tensor.encoding<{ 4 map = (d0) -> (d0 : compressed) 5}> 6 7#CSR = #sparse_tensor.encoding<{ 8 map = (d0, d1) -> (d0 : dense, d1 : compressed) 9}> 10 11#CSC = #sparse_tensor.encoding<{ 12 map = (d0, d1) -> (d1 : dense, d0 : compressed) 13}> 14 15#SparseTensor = #sparse_tensor.encoding<{ 16 map = (d0, d1, d2) -> (d2 : dense, d0 : compressed, d1 : compressed) 17}> 18 19// CHECK-LABEL: func.func @sparse_convert_1d 20// CHECK: sparse_tensor.foreach 21// CHECK: scf.if 22// CHECK: tensor.insert 23// CHECK-NOT: sparse_tensor.reorder_coo 24// CHECK: sparse_tensor.load 25func.func @sparse_convert_1d(%arg0: tensor<?xi32>) -> tensor<?xi32, #SparseVector> { 26 %0 = sparse_tensor.convert %arg0 : tensor<?xi32> to tensor<?xi32, #SparseVector> 27 return %0 : tensor<?xi32, #SparseVector> 28} 29 30// CHECK-LABEL: func.func @sparse_convert_complex 31// CHECK: sparse_tensor.foreach 32// CHECK: scf.if 33// CHECK: tensor.insert 34// CHECK-NOT: sparse_tensor.reorder_coo 35// CHECK: sparse_tensor.load 36func.func @sparse_convert_complex(%arg0: tensor<100xcomplex<f64>>) -> tensor<100xcomplex<f64>, #SparseVector> { 37 %0 = sparse_tensor.convert %arg0 : tensor<100xcomplex<f64>> to tensor<100xcomplex<f64>, #SparseVector> 38 return %0 : tensor<100xcomplex<f64>, #SparseVector> 39} 40 41// CHECK-LABEL: func.func @sparse_convert_2d 42// CHECK: sparse_tensor.foreach 43// CHECK: scf.if 44// CHECK: tensor.insert 45// CHECK-NOT: sparse_tensor.reorder_coo 46// CHECK: sparse_tensor.load 47func.func @sparse_convert_2d(%arg0: tensor<2x4xf64>) -> tensor<2x4xf64, #CSR> { 48 %0 = sparse_tensor.convert %arg0 : tensor<2x4xf64> to tensor<2x4xf64, #CSR> 49 return %0 : tensor<2x4xf64, #CSR> 50} 51 52// CHECK-LABEL: func.func @sparse_constant 53// CHECK: sparse_tensor.foreach 54// CHECK-NOT: scf.if 55// CHECK: tensor.insert 56// CHECK-NOT: sparse_tensor.reorder_coo 57// CHECK: sparse_tensor.load 58func.func @sparse_constant() -> tensor<8x7xf32, #CSR>{ 59 // Initialize a tensor. 60 %0 = arith.constant sparse<[[0, 0], [1, 6]], [1.0, 5.0]> : tensor<8x7xf32> 61 // Convert the tensor to a sparse tensor. 62 %1 = sparse_tensor.convert %0 : tensor<8x7xf32> to tensor<8x7xf32, #CSR> 63 return %1 : tensor<8x7xf32, #CSR> 64} 65 66// CHECK-LABEL: func.func @sparse_constant_csc 67// CHECK: sparse_tensor.foreach 68// CHECK-NOT: scf.if 69// CHECK: tensor.insert 70// CHECK-NOT: sparse_tensor.reorder_coo 71// CHECK: sparse_tensor.load 72func.func @sparse_constant_csc() -> tensor<8x7xf32, #CSC>{ 73 // Initialize a tensor. 74 %0 = arith.constant sparse<[[0, 0], [1, 6]], [1.0, 5.0]> : tensor<8x7xf32> 75 // Convert the tensor to a sparse tensor. 76 %1 = sparse_tensor.convert %0 : tensor<8x7xf32> to tensor<8x7xf32, #CSC> 77 return %1 : tensor<8x7xf32, #CSC> 78} 79 80// CHECK-LABEL: func.func @sparse_convert_3d 81// CHECK: sparse_tensor.foreach 82// CHECK: scf.if 83// CHECK: tensor.insert 84// CHECK: sparse_tensor.load 85// CHECK: %[[TMP:.*]] = sparse_tensor.reorder_coo 86// CHECK: sparse_tensor.foreach 87// CHECK: tensor.insert 88// CHECK: sparse_tensor.load 89// CHECK: bufferization.dealloc_tensor %[[TMP]] 90func.func @sparse_convert_3d(%arg0: tensor<?x?x?xf64>) -> tensor<?x?x?xf64, #SparseTensor> { 91 %0 = sparse_tensor.convert %arg0 : tensor<?x?x?xf64> to tensor<?x?x?xf64, #SparseTensor> 92 return %0 : tensor<?x?x?xf64, #SparseTensor> 93} 94