1//-------------------------------------------------------------------------------------------------- 2// WHEN CREATING A NEW TEST, PLEASE JUST COPY & PASTE WITHOUT EDITS. 3// 4// Set-up that's shared across all tests in this directory. In principle, this 5// config could be moved to lit.local.cfg. However, there are downstream users that 6// do not use these LIT config files. Hence why this is kept inline. 7// 8// DEFINE: %{sparsifier_opts} = enable-runtime-library=true 9// DEFINE: %{sparsifier_opts_sve} = enable-arm-sve=true %{sparsifier_opts} 10// DEFINE: %{compile} = mlir-opt %s --sparsifier="%{sparsifier_opts}" 11// DEFINE: %{compile_sve} = mlir-opt %s --sparsifier="%{sparsifier_opts_sve}" 12// DEFINE: %{run_libs} = -shared-libs=%mlir_c_runner_utils,%mlir_runner_utils 13// DEFINE: %{run_libs_sve} = -shared-libs=%native_mlir_runner_utils,%native_mlir_c_runner_utils 14// DEFINE: %{run_opts} = -e main -entry-point-result=void 15// DEFINE: %{run} = mlir-runner %{run_opts} %{run_libs} 16// DEFINE: %{run_sve} = %mcr_aarch64_cmd --march=aarch64 --mattr="+sve" %{run_opts} %{run_libs_sve} 17// 18// DEFINE: %{env} = 19//-------------------------------------------------------------------------------------------------- 20 21// RUN: %{compile} | %{run} | FileCheck %s 22 23// 24// Integration test that generates a tensor with specified sparsity level. 25// 26 27!Generator = !llvm.ptr 28!Array = !llvm.ptr 29 30#SparseVector = #sparse_tensor.encoding<{ 31 map = (d0) -> (d0 : compressed) 32}> 33 34module { 35 func.func private @rtsrand(index) -> (!Generator) 36 func.func private @rtrand(!Generator, index) -> (index) 37 func.func private @rtdrand(!Generator) -> () 38 func.func private @shuffle(memref<?xi64>, !Generator) -> () attributes { llvm.emit_c_interface } 39 40 // 41 // Main driver. 42 // 43 func.func @main() { 44 %c0 = arith.constant 0 : index 45 %c1 = arith.constant 1 : index 46 %f0 = arith.constant 0.0 : f64 47 %c99 = arith.constant 99 : index 48 %c100 = arith.constant 100 : index 49 50 // Set up input size and sparsity level. 51 %size = arith.constant 50 : index 52 %sparsity = arith.constant 90 : index 53 %zeros = arith.muli %size, %sparsity : index 54 %nz = arith.floordivsi %zeros, %c100 : index 55 %nse = arith.subi %size, %nz : index 56 57 // Set up an empty vector. 58 %empty = tensor.empty(%size) : tensor<?xf64> 59 %zero_vec = linalg.fill ins(%f0 : f64) outs(%empty : tensor<?xf64>) -> tensor<?xf64> 60 61 // Generate shuffled indices in the range of [0, %size). 62 %array = memref.alloc (%size) : memref<?xi64> 63 %g = func.call @rtsrand(%c0) : (index) ->(!Generator) 64 func.call @shuffle(%array, %g) : (memref<?xi64>, !Generator) -> () 65 66 // Iterate through the number of nse indices to insert values. 67 %output = scf.for %iv = %c0 to %nse step %c1 iter_args(%iter = %zero_vec) -> tensor<?xf64> { 68 // Fetch the index to insert value from shuffled index array. 69 %val = memref.load %array[%iv] : memref<?xi64> 70 %idx = arith.index_cast %val : i64 to index 71 // Generate a random number from 1 to 100. 72 %ri0 = func.call @rtrand(%g, %c99) : (!Generator, index) -> (index) 73 %ri1 = arith.addi %ri0, %c1 : index 74 %r0 = arith.index_cast %ri1 : index to i64 75 %fr = arith.uitofp %r0 : i64 to f64 76 // Insert the random number to current index. 77 %out = tensor.insert %fr into %iter[%idx] : tensor<?xf64> 78 scf.yield %out : tensor<?xf64> 79 } 80 81 %sv = sparse_tensor.convert %output : tensor<?xf64> to tensor<?xf64, #SparseVector> 82 %n0 = sparse_tensor.number_of_entries %sv : tensor<?xf64, #SparseVector> 83 84 // Print the number of non-zeros for verification 85 // as shuffle may generate different numbers. 86 // 87 // CHECK: 5 88 vector.print %n0 : index 89 90 // Release the resources. 91 bufferization.dealloc_tensor %sv : tensor<?xf64, #SparseVector> 92 bufferization.dealloc_tensor %empty : tensor<?xf64> 93 memref.dealloc %array : memref<?xi64> 94 func.call @rtdrand(%g) : (!Generator) -> () 95 96 return 97 } 98} 99