1//-------------------------------------------------------------------------------------------------- 2// WHEN CREATING A NEW TEST, PLEASE JUST COPY & PASTE WITHOUT EDITS. 3// 4// Set-up that's shared across all tests in this directory. In principle, this 5// config could be moved to lit.local.cfg. However, there are downstream users that 6// do not use these LIT config files. Hence why this is kept inline. 7// 8// DEFINE: %{sparsifier_opts} = enable-runtime-library=true 9// DEFINE: %{sparsifier_opts_sve} = enable-arm-sve=true %{sparsifier_opts} 10// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}" 11// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}" 12// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils 13// DEFINE: %{run_libs_sve} = -shared-libs=%native_mlir_runner_utils,%native_mlir_c_runner_utils 14// DEFINE: %{run_opts} = -e main -entry-point-result=void 15// DEFINE: %{run} = mlir-runner %{run_opts} %{run_libs} 16// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs_sve} 17// 18// DEFINE: %{env} = 19//-------------------------------------------------------------------------------------------------- 20 21// RUN: %{compile} | %{run} | FileCheck %s 22// 23// Do the same run, but now with direct IR generation. 24// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false enable-buffer-initialization=true 25// RUN: %{compile} | %{run} | FileCheck %s 26// 27// Do the same run, but now with direct IR generation and vectorization. 28// REDEFINE: %{sparsifier_opts} = enable-runtime-library=false enable-buffer-initialization=true vl=2 reassociate-fp-reductions=true enable-index-optimizations=true 29// RUN: %{compile} | %{run} | FileCheck %s 30// 31// Do the same run, but now with direct IR generation and VLA vectorization. 32// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %} 33 34#Tensor1 = #sparse_tensor.encoding<{ 35 map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed) 36}> 37 38#Tensor2 = #sparse_tensor.encoding<{ 39 map = (d0, d1, d2) -> (d1 : compressed, d2 : compressed, d0 : compressed) 40}> 41 42#Tensor3 = #sparse_tensor.encoding<{ 43 map = (d0, d1, d2) -> (d2 : compressed, d0 : compressed, d1 : compressed) 44}> 45 46// 47// Integration test that tests conversions between sparse tensors. 48// 49module { 50 // 51 // Main driver. 52 // 53 func.func @main() { 54 %c0 = arith.constant 0 : index 55 %c1 = arith.constant 1 : index 56 %c2 = arith.constant 2 : index 57 58 // 59 // Initialize a 3-dim dense tensor. 60 // 61 %t = arith.constant dense<[ 62 [ [ 1.0, 2.0, 3.0, 4.0 ], 63 [ 5.0, 6.0, 7.0, 8.0 ], 64 [ 9.0, 10.0, 11.0, 12.0 ] ], 65 [ [ 13.0, 14.0, 15.0, 16.0 ], 66 [ 17.0, 18.0, 19.0, 20.0 ], 67 [ 21.0, 22.0, 23.0, 24.0 ] ] 68 ]> : tensor<2x3x4xf64> 69 70 // 71 // Convert dense tensor directly to various sparse tensors. 72 // tensor1: stored as 2x3x4 73 // tensor2: stored as 3x4x2 74 // tensor3: stored as 4x2x3 75 // 76 %1 = sparse_tensor.convert %t : tensor<2x3x4xf64> to tensor<2x3x4xf64, #Tensor1> 77 %2 = sparse_tensor.convert %t : tensor<2x3x4xf64> to tensor<2x3x4xf64, #Tensor2> 78 %3 = sparse_tensor.convert %t : tensor<2x3x4xf64> to tensor<2x3x4xf64, #Tensor3> 79 80 // 81 // Convert sparse tensor to various sparse tensors. Note that the result 82 // should always correspond to the direct conversion, since the sparse 83 // tensor formats have the ability to restore into the original ordering. 84 // 85 %a = sparse_tensor.convert %1 : tensor<2x3x4xf64, #Tensor1> to tensor<2x3x4xf64, #Tensor1> 86 %b = sparse_tensor.convert %2 : tensor<2x3x4xf64, #Tensor2> to tensor<2x3x4xf64, #Tensor1> 87 %c = sparse_tensor.convert %3 : tensor<2x3x4xf64, #Tensor3> to tensor<2x3x4xf64, #Tensor1> 88 %d = sparse_tensor.convert %1 : tensor<2x3x4xf64, #Tensor1> to tensor<2x3x4xf64, #Tensor2> 89 %e = sparse_tensor.convert %2 : tensor<2x3x4xf64, #Tensor2> to tensor<2x3x4xf64, #Tensor2> 90 %f = sparse_tensor.convert %3 : tensor<2x3x4xf64, #Tensor3> to tensor<2x3x4xf64, #Tensor2> 91 %g = sparse_tensor.convert %1 : tensor<2x3x4xf64, #Tensor1> to tensor<2x3x4xf64, #Tensor3> 92 %h = sparse_tensor.convert %2 : tensor<2x3x4xf64, #Tensor2> to tensor<2x3x4xf64, #Tensor3> 93 %i = sparse_tensor.convert %3 : tensor<2x3x4xf64, #Tensor3> to tensor<2x3x4xf64, #Tensor3> 94 95 // 96 // Verify the outputs. 97 // 98 // CHECK: ---- Sparse Tensor ---- 99 // CHECK-NEXT: nse = 24 100 // CHECK-NEXT: dim = ( 2, 3, 4 ) 101 // CHECK-NEXT: lvl = ( 2, 3, 4 ) 102 // CHECK-NEXT: pos[0] : ( 0, 2 ) 103 // CHECK-NEXT: crd[0] : ( 0, 1 ) 104 // CHECK-NEXT: pos[1] : ( 0, 3, 6 ) 105 // CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 1, 2 ) 106 // CHECK-NEXT: pos[2] : ( 0, 4, 8, 12, 16, 20, 24 ) 107 // CHECK-NEXT: crd[2] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3 ) 108 // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 ) 109 // CHECK-NEXT: ---- 110 // 111 // CHECK: ---- Sparse Tensor ---- 112 // CHECK-NEXT: nse = 24 113 // CHECK-NEXT: dim = ( 2, 3, 4 ) 114 // CHECK-NEXT: lvl = ( 3, 4, 2 ) 115 // CHECK-NEXT: pos[0] : ( 0, 3 ) 116 // CHECK-NEXT: crd[0] : ( 0, 1, 2 ) 117 // CHECK-NEXT: pos[1] : ( 0, 4, 8, 12 ) 118 // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3 ) 119 // CHECK-NEXT: pos[2] : ( 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24 ) 120 // CHECK-NEXT: crd[2] : ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 ) 121 // CHECK-NEXT: values : ( 1, 13, 2, 14, 3, 15, 4, 16, 5, 17, 6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23, 12, 24 ) 122 // CHECK-NEXT: ---- 123 // 124 // CHECK: ---- Sparse Tensor ---- 125 // CHECK-NEXT: nse = 24 126 // CHECK-NEXT: dim = ( 2, 3, 4 ) 127 // CHECK-NEXT: lvl = ( 4, 2, 3 ) 128 // CHECK-NEXT: pos[0] : ( 0, 4 ) 129 // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3 ) 130 // CHECK-NEXT: pos[1] : ( 0, 2, 4, 6, 8 ) 131 // CHECK-NEXT: crd[1] : ( 0, 1, 0, 1, 0, 1, 0, 1 ) 132 // CHECK-NEXT: pos[2] : ( 0, 3, 6, 9, 12, 15, 18, 21, 24 ) 133 // CHECK-NEXT: crd[2] : ( 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2 ) 134 // CHECK-NEXT: values : ( 1, 5, 9, 13, 17, 21, 2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23, 4, 8, 12, 16, 20, 24 ) 135 // CHECK-NEXT: ---- 136 // 137 // CHECK: ---- Sparse Tensor ---- 138 // CHECK-NEXT: nse = 24 139 // CHECK-NEXT: dim = ( 2, 3, 4 ) 140 // CHECK-NEXT: lvl = ( 2, 3, 4 ) 141 // CHECK-NEXT: pos[0] : ( 0, 2 ) 142 // CHECK-NEXT: crd[0] : ( 0, 1 ) 143 // CHECK-NEXT: pos[1] : ( 0, 3, 6 ) 144 // CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 1, 2 ) 145 // CHECK-NEXT: pos[2] : ( 0, 4, 8, 12, 16, 20, 24 ) 146 // CHECK-NEXT: crd[2] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3 ) 147 // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 ) 148 // CHECK-NEXT: ---- 149 // 150 // CHECK: ---- Sparse Tensor ---- 151 // CHECK-NEXT: nse = 24 152 // CHECK-NEXT: dim = ( 2, 3, 4 ) 153 // CHECK-NEXT: lvl = ( 2, 3, 4 ) 154 // CHECK-NEXT: pos[0] : ( 0, 2 ) 155 // CHECK-NEXT: crd[0] : ( 0, 1 ) 156 // CHECK-NEXT: pos[1] : ( 0, 3, 6 ) 157 // CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 1, 2 ) 158 // CHECK-NEXT: pos[2] : ( 0, 4, 8, 12, 16, 20, 24 ) 159 // CHECK-NEXT: crd[2] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3 ) 160 // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 ) 161 // CHECK-NEXT: ---- 162 // 163 // CHECK: ---- Sparse Tensor ---- 164 // CHECK-NEXT: nse = 24 165 // CHECK-NEXT: dim = ( 2, 3, 4 ) 166 // CHECK-NEXT: lvl = ( 2, 3, 4 ) 167 // CHECK-NEXT: pos[0] : ( 0, 2 ) 168 // CHECK-NEXT: crd[0] : ( 0, 1 ) 169 // CHECK-NEXT: pos[1] : ( 0, 3, 6 ) 170 // CHECK-NEXT: crd[1] : ( 0, 1, 2, 0, 1, 2 ) 171 // CHECK-NEXT: pos[2] : ( 0, 4, 8, 12, 16, 20, 24 ) 172 // CHECK-NEXT: crd[2] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3 ) 173 // CHECK-NEXT: values : ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 ) 174 // CHECK-NEXT: ---- 175 // 176 // CHECK: ---- Sparse Tensor ---- 177 // CHECK-NEXT: nse = 24 178 // CHECK-NEXT: dim = ( 2, 3, 4 ) 179 // CHECK-NEXT: lvl = ( 3, 4, 2 ) 180 // CHECK-NEXT: pos[0] : ( 0, 3 ) 181 // CHECK-NEXT: crd[0] : ( 0, 1, 2 ) 182 // CHECK-NEXT: pos[1] : ( 0, 4, 8, 12 ) 183 // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3 ) 184 // CHECK-NEXT: pos[2] : ( 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24 ) 185 // CHECK-NEXT: crd[2] : ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 ) 186 // CHECK-NEXT: values : ( 1, 13, 2, 14, 3, 15, 4, 16, 5, 17, 6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23, 12, 24 ) 187 // CHECK-NEXT: ---- 188 // 189 // CHECK: ---- Sparse Tensor ---- 190 // CHECK-NEXT: nse = 24 191 // CHECK-NEXT: dim = ( 2, 3, 4 ) 192 // CHECK-NEXT: lvl = ( 3, 4, 2 ) 193 // CHECK-NEXT: pos[0] : ( 0, 3 ) 194 // CHECK-NEXT: crd[0] : ( 0, 1, 2 ) 195 // CHECK-NEXT: pos[1] : ( 0, 4, 8, 12 ) 196 // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3 ) 197 // CHECK-NEXT: pos[2] : ( 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24 ) 198 // CHECK-NEXT: crd[2] : ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 ) 199 // CHECK-NEXT: values : ( 1, 13, 2, 14, 3, 15, 4, 16, 5, 17, 6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23, 12, 24 ) 200 // CHECK-NEXT: ---- 201 // 202 // CHECK: ---- Sparse Tensor ---- 203 // CHECK-NEXT: nse = 24 204 // CHECK-NEXT: dim = ( 2, 3, 4 ) 205 // CHECK-NEXT: lvl = ( 3, 4, 2 ) 206 // CHECK-NEXT: pos[0] : ( 0, 3 ) 207 // CHECK-NEXT: crd[0] : ( 0, 1, 2 ) 208 // CHECK-NEXT: pos[1] : ( 0, 4, 8, 12 ) 209 // CHECK-NEXT: crd[1] : ( 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3 ) 210 // CHECK-NEXT: pos[2] : ( 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24 ) 211 // CHECK-NEXT: crd[2] : ( 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 ) 212 // CHECK-NEXT: values : ( 1, 13, 2, 14, 3, 15, 4, 16, 5, 17, 6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23, 12, 24 ) 213 // CHECK-NEXT: ---- 214 // 215 // CHECK: ---- Sparse Tensor ---- 216 // CHECK-NEXT: nse = 24 217 // CHECK-NEXT: dim = ( 2, 3, 4 ) 218 // CHECK-NEXT: lvl = ( 4, 2, 3 ) 219 // CHECK-NEXT: pos[0] : ( 0, 4 ) 220 // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3 ) 221 // CHECK-NEXT: pos[1] : ( 0, 2, 4, 6, 8 ) 222 // CHECK-NEXT: crd[1] : ( 0, 1, 0, 1, 0, 1, 0, 1 ) 223 // CHECK-NEXT: pos[2] : ( 0, 3, 6, 9, 12, 15, 18, 21, 24 ) 224 // CHECK-NEXT: crd[2] : ( 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2 ) 225 // CHECK-NEXT: values : ( 1, 5, 9, 13, 17, 21, 2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23, 4, 8, 12, 16, 20, 24 ) 226 // CHECK-NEXT: ---- 227 // 228 // CHECK: ---- Sparse Tensor ---- 229 // CHECK-NEXT: nse = 24 230 // CHECK-NEXT: dim = ( 2, 3, 4 ) 231 // CHECK-NEXT: lvl = ( 4, 2, 3 ) 232 // CHECK-NEXT: pos[0] : ( 0, 4 ) 233 // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3 ) 234 // CHECK-NEXT: pos[1] : ( 0, 2, 4, 6, 8 ) 235 // CHECK-NEXT: crd[1] : ( 0, 1, 0, 1, 0, 1, 0, 1 ) 236 // CHECK-NEXT: pos[2] : ( 0, 3, 6, 9, 12, 15, 18, 21, 24 ) 237 // CHECK-NEXT: crd[2] : ( 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2 ) 238 // CHECK-NEXT: values : ( 1, 5, 9, 13, 17, 21, 2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23, 4, 8, 12, 16, 20, 24 ) 239 // CHECK-NEXT: ---- 240 // 241 // CHECK: ---- Sparse Tensor ---- 242 // CHECK-NEXT: nse = 24 243 // CHECK-NEXT: dim = ( 2, 3, 4 ) 244 // CHECK-NEXT: lvl = ( 4, 2, 3 ) 245 // CHECK-NEXT: pos[0] : ( 0, 4 ) 246 // CHECK-NEXT: crd[0] : ( 0, 1, 2, 3 ) 247 // CHECK-NEXT: pos[1] : ( 0, 2, 4, 6, 8 ) 248 // CHECK-NEXT: crd[1] : ( 0, 1, 0, 1, 0, 1, 0, 1 ) 249 // CHECK-NEXT: pos[2] : ( 0, 3, 6, 9, 12, 15, 18, 21, 24 ) 250 // CHECK-NEXT: crd[2] : ( 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2 ) 251 // CHECK-NEXT: values : ( 1, 5, 9, 13, 17, 21, 2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23, 4, 8, 12, 16, 20, 24 ) 252 // CHECK-NEXT: ---- 253 // 254 sparse_tensor.print %1 : tensor<2x3x4xf64, #Tensor1> 255 sparse_tensor.print %2 : tensor<2x3x4xf64, #Tensor2> 256 sparse_tensor.print %3 : tensor<2x3x4xf64, #Tensor3> 257 sparse_tensor.print %a : tensor<2x3x4xf64, #Tensor1> 258 sparse_tensor.print %b : tensor<2x3x4xf64, #Tensor1> 259 sparse_tensor.print %c : tensor<2x3x4xf64, #Tensor1> 260 sparse_tensor.print %d : tensor<2x3x4xf64, #Tensor2> 261 sparse_tensor.print %e : tensor<2x3x4xf64, #Tensor2> 262 sparse_tensor.print %f : tensor<2x3x4xf64, #Tensor2> 263 sparse_tensor.print %g : tensor<2x3x4xf64, #Tensor3> 264 sparse_tensor.print %h : tensor<2x3x4xf64, #Tensor3> 265 sparse_tensor.print %i : tensor<2x3x4xf64, #Tensor3> 266 267 // Release the resources. 268 bufferization.dealloc_tensor %1 : tensor<2x3x4xf64, #Tensor1> 269 bufferization.dealloc_tensor %2 : tensor<2x3x4xf64, #Tensor2> 270 bufferization.dealloc_tensor %3 : tensor<2x3x4xf64, #Tensor3> 271 bufferization.dealloc_tensor %b : tensor<2x3x4xf64, #Tensor1> 272 bufferization.dealloc_tensor %c : tensor<2x3x4xf64, #Tensor1> 273 bufferization.dealloc_tensor %d : tensor<2x3x4xf64, #Tensor2> 274 bufferization.dealloc_tensor %f : tensor<2x3x4xf64, #Tensor2> 275 bufferization.dealloc_tensor %g : tensor<2x3x4xf64, #Tensor3> 276 bufferization.dealloc_tensor %h : tensor<2x3x4xf64, #Tensor3> 277 278 return 279 } 280} 281