1//-------------------------------------------------------------------------------------------------- 2// WHEN CREATING A NEW TEST, PLEASE JUST COPY & PASTE WITHOUT EDITS. 3// 4// Set-up that's shared across all tests in this directory. In principle, this 5// config could be moved to lit.local.cfg. However, there are downstream users that 6// do not use these LIT config files. Hence why this is kept inline. 7// 8// DEFINE: %{sparsifier_opts} = enable-runtime-library=true 9// DEFINE: %{sparsifier_opts_sve} = enable-arm-sve=true %{sparsifier_opts} 10// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}" 11// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}" 12// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils 13// DEFINE: %{run_libs_sve} = -shared-libs=%native_mlir_runner_utils,%native_mlir_c_runner_utils 14// DEFINE: %{run_opts} = -e main -entry-point-result=void 15// DEFINE: %{run} = mlir-runner %{run_opts} %{run_libs} 16// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs_sve} 17// 18// DEFINE: %{env} = 19//-------------------------------------------------------------------------------------------------- 20 21// REDEFINE: %{sparsifier_opts} = enable-runtime-library=true 22// RUN: %{compile} | %{run} | FileCheck %s 23// 24// Do the same run, but now with direct IR generation. 25// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false 26// RUN: %{compile} | %{run} | FileCheck %s 27// 28// Do the same run, but now with direct IR generation and vectorization. 29// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true 30// RUN: %{compile} | %{run} | FileCheck %s 31// 32// Do the same run, but now with direct IR generation and VLA vectorization. 33// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} 34 35#Tensor1 = #sparse_tensor.encoding<{ 36 map = (d0, d1, d2) -> (d0 : dense, d1 : dense, d2 : compressed) 37 38}> 39 40// NOTE: dense after compressed is not currently supported for the target 41// of direct-sparse2sparse conversion. (It's fine for the source though.) 42#Tensor2 = #sparse_tensor.encoding<{ 43 map = (d0, d1, d2) -> (d0 : dense, d1 : compressed, d2 : dense) 44 45}> 46 47#Tensor3 = #sparse_tensor.encoding<{ 48 map = (d0, d1, d2) -> (d0 : dense, d2 : dense, d1 : compressed) 49 50}> 51 52#SingletonTensor1 = #sparse_tensor.encoding<{ 53 map = (d0, d1, d2) -> (d0 : dense, d1 : compressed(nonunique), d2 : singleton) 54 55}> 56 57// This also checks the singleton->compressed conversion. 58#SingletonTensor3 = #sparse_tensor.encoding<{ 59 map = (d0, d1, d2) -> (d0 : dense, d1 : dense, d2 : compressed) 60 61}> 62 63module { 64 // 65 // Utility for output. 66 // 67 func.func @dump(%arg0: tensor<2x3x4xf64>) { 68 %c0 = arith.constant 0 : index 69 %d0 = arith.constant -1.0 : f64 70 %0 = vector.transfer_read %arg0[%c0, %c0, %c0], %d0: tensor<2x3x4xf64>, vector<2x3x4xf64> 71 vector.print %0 : vector<2x3x4xf64> 72 return 73 } 74 75 // 76 // The first test suite (for non-singleton LevelTypes). 77 // 78 func.func @testNonSingleton() { 79 // 80 // Initialize a 3-dim dense tensor. 81 // 82 %src = arith.constant dense<[ 83 [ [ 1.0, 2.0, 3.0, 4.0 ], 84 [ 5.0, 6.0, 7.0, 8.0 ], 85 [ 9.0, 10.0, 11.0, 12.0 ] ], 86 [ [ 13.0, 14.0, 15.0, 16.0 ], 87 [ 17.0, 18.0, 19.0, 20.0 ], 88 [ 21.0, 22.0, 23.0, 24.0 ] ] 89 ]> : tensor<2x3x4xf64> 90 91 // 92 // Convert dense tensor directly to various sparse tensors. 93 // 94 %s1 = sparse_tensor.convert %src : tensor<2x3x4xf64> to tensor<2x3x4xf64, #Tensor1> 95 %s3 = sparse_tensor.convert %src : tensor<2x3x4xf64> to tensor<2x3x4xf64, #Tensor3> 96 97 // 98 // Convert sparse tensor directly to another sparse format. 99 // 100 %t13 = sparse_tensor.convert %s1 : tensor<2x3x4xf64, #Tensor1> to tensor<2x3x4xf64, #Tensor3> 101 %t31 = sparse_tensor.convert %s3 : tensor<2x3x4xf64, #Tensor3> to tensor<2x3x4xf64, #Tensor1> 102 103 // 104 // Convert sparse tensor back to dense. 105 // 106 %d13 = sparse_tensor.convert %t13 : tensor<2x3x4xf64, #Tensor3> to tensor<2x3x4xf64> 107 %d31 = sparse_tensor.convert %t31 : tensor<2x3x4xf64, #Tensor1> to tensor<2x3x4xf64> 108 109 // 110 // Check round-trip equality. And release dense tensors. 111 // 112 // CHECK-COUNT-3: ( ( ( 1, 2, 3, 4 ), ( 5, 6, 7, 8 ), ( 9, 10, 11, 12 ) ), ( ( 13, 14, 15, 16 ), ( 17, 18, 19, 20 ), ( 21, 22, 23, 24 ) ) ) 113 call @dump(%src) : (tensor<2x3x4xf64>) -> () 114 call @dump(%d13) : (tensor<2x3x4xf64>) -> () 115 call @dump(%d31) : (tensor<2x3x4xf64>) -> () 116 117 // 118 // Release the resources. 119 // 120 bufferization.dealloc_tensor %t13 : tensor<2x3x4xf64, #Tensor3> 121 bufferization.dealloc_tensor %t31 : tensor<2x3x4xf64, #Tensor1> 122 bufferization.dealloc_tensor %s1 : tensor<2x3x4xf64, #Tensor1> 123 bufferization.dealloc_tensor %s3 : tensor<2x3x4xf64, #Tensor3> 124 bufferization.dealloc_tensor %d13 : tensor<2x3x4xf64> 125 bufferization.dealloc_tensor %d31 : tensor<2x3x4xf64> 126 127 return 128 } 129 130 // 131 // The second test suite (for singleton LevelTypes). 132 // 133 func.func @testSingleton() { 134 // 135 // Initialize a 3-dim dense tensor with the 3rd dim being singleton. 136 // 137 %src = arith.constant dense<[ 138 [ [ 1.0, 0.0, 0.0, 0.0 ], 139 [ 0.0, 6.0, 0.0, 0.0 ], 140 [ 0.0, 0.0, 11.0, 0.0 ] ], 141 [ [ 0.0, 14.0, 0.0, 0.0 ], 142 [ 0.0, 0.0, 0.0, 20.0 ], 143 [ 21.0, 0.0, 0.0, 0.0 ] ] 144 ]> : tensor<2x3x4xf64> 145 146 // 147 // Convert dense tensor directly to various sparse tensors. 148 // 149 %s1 = sparse_tensor.convert %src : tensor<2x3x4xf64> to tensor<2x3x4xf64, #SingletonTensor1> 150 %s3 = sparse_tensor.convert %src : tensor<2x3x4xf64> to tensor<2x3x4xf64, #SingletonTensor3> 151 152 // 153 // Convert sparse tensor directly to another sparse format. 154 // 155 %t13 = sparse_tensor.convert %s1 : tensor<2x3x4xf64, #SingletonTensor1> to tensor<2x3x4xf64, #SingletonTensor3> 156 %t31 = sparse_tensor.convert %s3 : tensor<2x3x4xf64, #SingletonTensor3> to tensor<2x3x4xf64, #SingletonTensor1> 157 158 // 159 // Convert sparse tensor back to dense. 160 // 161 %d13 = sparse_tensor.convert %t13 : tensor<2x3x4xf64, #SingletonTensor3> to tensor<2x3x4xf64> 162 %d31 = sparse_tensor.convert %t31 : tensor<2x3x4xf64, #SingletonTensor1> to tensor<2x3x4xf64> 163 164 // 165 // Check round-trip equality. And release dense tensors. 166 // 167 // CHECK-COUNT-3: ( ( ( 1, 0, 0, 0 ), ( 0, 6, 0, 0 ), ( 0, 0, 11, 0 ) ), ( ( 0, 14, 0, 0 ), ( 0, 0, 0, 20 ), ( 21, 0, 0, 0 ) ) ) 168 call @dump(%src) : (tensor<2x3x4xf64>) -> () 169 call @dump(%d13) : (tensor<2x3x4xf64>) -> () 170 call @dump(%d31) : (tensor<2x3x4xf64>) -> () 171 172 // 173 // Release the resources. 174 // 175 bufferization.dealloc_tensor %t13 : tensor<2x3x4xf64, #SingletonTensor3> 176 bufferization.dealloc_tensor %t31 : tensor<2x3x4xf64, #SingletonTensor1> 177 bufferization.dealloc_tensor %s1 : tensor<2x3x4xf64, #SingletonTensor1> 178 bufferization.dealloc_tensor %s3 : tensor<2x3x4xf64, #SingletonTensor3> 179 bufferization.dealloc_tensor %d13 : tensor<2x3x4xf64> 180 bufferization.dealloc_tensor %d31 : tensor<2x3x4xf64> 181 182 return 183 } 184 185 // 186 // Main driver. 187 // 188 func.func @main() { 189 call @testNonSingleton() : () -> () 190 call @testSingleton() : () -> () 191 return 192 } 193} 194