1// RUN: mlir-opt %s -test-lower-to-llvm | \ 2// RUN: mlir-runner -e entry -entry-point-result=void \ 3// RUN: -shared-libs=%mlir_c_runner_utils | \ 4// RUN: FileCheck %s 5 6// Illustrates an 8x8 Sparse Matrix x Vector implemented with only operations 7// of the vector dialect (and some std/scf). Essentially, this example performs 8// the following multiplication: 9// 10// 0 1 2 3 4 5 6 7 11// +------------------------+ 12// 0 | 1 0 2 0 0 1 0 1 | | 1 | | 21 | 13// 1 | 1 8 0 0 3 0 1 0 | | 2 | | 39 | 14// 2 | 0 0 1 0 0 2 6 2 | | 3 | | 73 | 15// 3 | 0 3 0 1 0 1 0 1 | x | 4 | = | 24 | 16// 4 | 5 0 0 1 1 1 0 0 | | 5 | | 20 | 17// 5 | 0 3 0 0 2 1 2 0 | | 6 | | 36 | 18// 6 | 4 0 7 0 1 0 1 0 | | 7 | | 37 | 19// 7 | 0 3 0 2 0 0 1 1 | | 8 | | 29 | 20// +------------------------+ 21// 22// The sparse storage scheme used is an extended column scheme (also referred 23// to as jagged diagonal, which is essentially a vector friendly variant of 24// the general sparse row-wise scheme (also called compressed row storage), 25// using fixed length vectors and no explicit pointer indexing into the 26// value array to find the rows. 27// 28// The extended column storage for the matrix shown above is as follows. 29// 30// VALUE INDEX 31// +---------+ +---------+ 32// 0 | 1 2 1 1 | | 0 2 5 7 | 33// 1 | 1 8 3 1 | | 0 1 4 6 | 34// 2 | 1 2 6 2 | | 2 5 6 7 | 35// 3 | 3 1 1 1 | | 1 3 5 7 | 36// 4 | 5 1 1 1 | | 0 3 4 5 | 37// 5 | 3 2 1 2 | | 1 4 5 6 | 38// 6 | 4 7 1 1 | | 0 2 4 6 | 39// 7 | 3 2 1 1 | | 1 3 6 7 | 40// +---------+ +---------+ 41// 42// This example illustrates an effective SAXPY version that operates 43// on the transposed jagged diagonal storage to obtain higher vector 44// lengths. Another example in this directory illustrates a DOT 45// version of the operation. 46 47func.func @spmv8x8(%AVAL: memref<4xvector<8xf32>>, 48 %AIDX: memref<4xvector<8xi32>>, 49 %X: memref<?xf32>, %B: memref<1xvector<8xf32>>) { 50 %c0 = arith.constant 0 : index 51 %c1 = arith.constant 1 : index 52 %cn = arith.constant 4 : index 53 %f0 = arith.constant 0.0 : f32 54 %mask = vector.constant_mask [8] : vector<8xi1> 55 %pass = vector.broadcast %f0 : f32 to vector<8xf32> 56 %b = memref.load %B[%c0] : memref<1xvector<8xf32>> 57 %b_out = scf.for %k = %c0 to %cn step %c1 iter_args(%b_iter = %b) -> (vector<8xf32>) { 58 %aval = memref.load %AVAL[%k] : memref<4xvector<8xf32>> 59 %aidx = memref.load %AIDX[%k] : memref<4xvector<8xi32>> 60 %0 = vector.gather %X[%c0][%aidx], %mask, %pass 61 : memref<?xf32>, vector<8xi32>, vector<8xi1>, vector<8xf32> into vector<8xf32> 62 %b_new = vector.fma %aval, %0, %b_iter : vector<8xf32> 63 scf.yield %b_new : vector<8xf32> 64 } 65 memref.store %b_out, %B[%c0] : memref<1xvector<8xf32>> 66 return 67} 68 69func.func @entry() { 70 %c0 = arith.constant 0 : index 71 %c1 = arith.constant 1 : index 72 %c2 = arith.constant 2 : index 73 %c3 = arith.constant 3 : index 74 %c4 = arith.constant 4 : index 75 %c5 = arith.constant 5 : index 76 %c6 = arith.constant 6 : index 77 %c7 = arith.constant 7 : index 78 %c8 = arith.constant 8 : index 79 80 %f0 = arith.constant 0.0 : f32 81 %f1 = arith.constant 1.0 : f32 82 %f2 = arith.constant 2.0 : f32 83 %f3 = arith.constant 3.0 : f32 84 %f4 = arith.constant 4.0 : f32 85 %f5 = arith.constant 5.0 : f32 86 %f6 = arith.constant 6.0 : f32 87 %f7 = arith.constant 7.0 : f32 88 %f8 = arith.constant 8.0 : f32 89 90 %i0 = arith.constant 0 : i32 91 %i1 = arith.constant 1 : i32 92 %i2 = arith.constant 2 : i32 93 %i3 = arith.constant 3 : i32 94 %i4 = arith.constant 4 : i32 95 %i5 = arith.constant 5 : i32 96 %i6 = arith.constant 6 : i32 97 %i7 = arith.constant 7 : i32 98 99 // 100 // Allocate. 101 // 102 103 %AVAL = memref.alloc() {alignment = 64} : memref<4xvector<8xf32>> 104 %AIDX = memref.alloc() {alignment = 64} : memref<4xvector<8xi32>> 105 %X = memref.alloc(%c8) {alignment = 64} : memref<?xf32> 106 %B = memref.alloc() {alignment = 64} : memref<1xvector<8xf32>> 107 108 // 109 // Initialize. 110 // 111 112 %vf1 = vector.broadcast %f1 : f32 to vector<8xf32> 113 114 %0 = vector.insert %f3, %vf1[3] : f32 into vector<8xf32> 115 %1 = vector.insert %f5, %0[4] : f32 into vector<8xf32> 116 %2 = vector.insert %f3, %1[5] : f32 into vector<8xf32> 117 %3 = vector.insert %f4, %2[6] : f32 into vector<8xf32> 118 %4 = vector.insert %f3, %3[7] : f32 into vector<8xf32> 119 memref.store %4, %AVAL[%c0] : memref<4xvector<8xf32>> 120 121 %5 = vector.insert %f2, %vf1[0] : f32 into vector<8xf32> 122 %6 = vector.insert %f8, %5[1] : f32 into vector<8xf32> 123 %7 = vector.insert %f2, %6[2] : f32 into vector<8xf32> 124 %8 = vector.insert %f2, %7[5] : f32 into vector<8xf32> 125 %9 = vector.insert %f7, %8[6] : f32 into vector<8xf32> 126 %10 = vector.insert %f2, %9[7] : f32 into vector<8xf32> 127 memref.store %10, %AVAL[%c1] : memref<4xvector<8xf32>> 128 129 %11 = vector.insert %f3, %vf1[1] : f32 into vector<8xf32> 130 %12 = vector.insert %f6, %11[2] : f32 into vector<8xf32> 131 memref.store %12, %AVAL[%c2] : memref<4xvector<8xf32>> 132 133 %13 = vector.insert %f2, %vf1[2] : f32 into vector<8xf32> 134 %14 = vector.insert %f2, %13[5] : f32 into vector<8xf32> 135 memref.store %14, %AVAL[%c3] : memref<4xvector<8xf32>> 136 137 %vi0 = vector.broadcast %i0 : i32 to vector<8xi32> 138 139 %20 = vector.insert %i2, %vi0[2] : i32 into vector<8xi32> 140 %21 = vector.insert %i1, %20[3] : i32 into vector<8xi32> 141 %22 = vector.insert %i1, %21[5] : i32 into vector<8xi32> 142 %23 = vector.insert %i1, %22[7] : i32 into vector<8xi32> 143 memref.store %23, %AIDX[%c0] : memref<4xvector<8xi32>> 144 145 %24 = vector.insert %i2, %vi0[0] : i32 into vector<8xi32> 146 %25 = vector.insert %i1, %24[1] : i32 into vector<8xi32> 147 %26 = vector.insert %i5, %25[2] : i32 into vector<8xi32> 148 %27 = vector.insert %i3, %26[3] : i32 into vector<8xi32> 149 %28 = vector.insert %i3, %27[4] : i32 into vector<8xi32> 150 %29 = vector.insert %i4, %28[5] : i32 into vector<8xi32> 151 %30 = vector.insert %i2, %29[6] : i32 into vector<8xi32> 152 %31 = vector.insert %i3, %30[7] : i32 into vector<8xi32> 153 memref.store %31, %AIDX[%c1] : memref<4xvector<8xi32>> 154 155 %32 = vector.insert %i5, %vi0[0] : i32 into vector<8xi32> 156 %33 = vector.insert %i4, %32[1] : i32 into vector<8xi32> 157 %34 = vector.insert %i6, %33[2] : i32 into vector<8xi32> 158 %35 = vector.insert %i5, %34[3] : i32 into vector<8xi32> 159 %36 = vector.insert %i4, %35[4] : i32 into vector<8xi32> 160 %37 = vector.insert %i5, %36[5] : i32 into vector<8xi32> 161 %38 = vector.insert %i4, %37[6] : i32 into vector<8xi32> 162 %39 = vector.insert %i6, %38[7] : i32 into vector<8xi32> 163 memref.store %39, %AIDX[%c2] : memref<4xvector<8xi32>> 164 165 %40 = vector.insert %i7, %vi0[0] : i32 into vector<8xi32> 166 %41 = vector.insert %i6, %40[1] : i32 into vector<8xi32> 167 %42 = vector.insert %i7, %41[2] : i32 into vector<8xi32> 168 %43 = vector.insert %i7, %42[3] : i32 into vector<8xi32> 169 %44 = vector.insert %i5, %43[4] : i32 into vector<8xi32> 170 %45 = vector.insert %i6, %44[5] : i32 into vector<8xi32> 171 %46 = vector.insert %i6, %45[6] : i32 into vector<8xi32> 172 %47 = vector.insert %i7, %46[7] : i32 into vector<8xi32> 173 memref.store %47, %AIDX[%c3] : memref<4xvector<8xi32>> 174 175 %vf0 = vector.broadcast %f0 : f32 to vector<8xf32> 176 memref.store %vf0, %B[%c0] : memref<1xvector<8xf32>> 177 178 scf.for %i = %c0 to %c8 step %c1 { 179 %ix = arith.addi %i, %c1 : index 180 %kx = arith.index_cast %ix : index to i32 181 %fx = arith.sitofp %kx : i32 to f32 182 memref.store %fx, %X[%i] : memref<?xf32> 183 } 184 185 // 186 // Multiply. 187 // 188 189 call @spmv8x8(%AVAL, %AIDX, %X, %B) : (memref<4xvector<8xf32>>, 190 memref<4xvector<8xi32>>, 191 memref<?xf32>, 192 memref<1xvector<8xf32>>) -> () 193 194 // 195 // Print and verify. 196 // 197 198 scf.for %i = %c0 to %c4 step %c1 { 199 %aval = memref.load %AVAL[%i] : memref<4xvector<8xf32>> 200 vector.print %aval : vector<8xf32> 201 } 202 203 scf.for %i = %c0 to %c4 step %c1 { 204 %aidx = memref.load %AIDX[%i] : memref<4xvector<8xi32>> 205 vector.print %aidx : vector<8xi32> 206 } 207 208 %ldb = memref.load %B[%c0] : memref<1xvector<8xf32>> 209 vector.print %ldb : vector<8xf32> 210 211 // 212 // CHECK: ( 1, 1, 1, 3, 5, 3, 4, 3 ) 213 // CHECK-NEXT: ( 2, 8, 2, 1, 1, 2, 7, 2 ) 214 // CHECK-NEXT: ( 1, 3, 6, 1, 1, 1, 1, 1 ) 215 // CHECK-NEXT: ( 1, 1, 2, 1, 1, 2, 1, 1 ) 216 // 217 // CHECK-NEXT: ( 0, 0, 2, 1, 0, 1, 0, 1 ) 218 // CHECK-NEXT: ( 2, 1, 5, 3, 3, 4, 2, 3 ) 219 // CHECK-NEXT: ( 5, 4, 6, 5, 4, 5, 4, 6 ) 220 // CHECK-NEXT: ( 7, 6, 7, 7, 5, 6, 6, 7 ) 221 // 222 // CHECK-NEXT: ( 21, 39, 73, 24, 20, 36, 37, 29 ) 223 // 224 225 // 226 // Free. 227 // 228 229 memref.dealloc %AVAL : memref<4xvector<8xf32>> 230 memref.dealloc %AIDX : memref<4xvector<8xi32>> 231 memref.dealloc %X : memref<?xf32> 232 memref.dealloc %B : memref<1xvector<8xf32>> 233 234 return 235} 236