1// RUN: mlir-opt %s --sparsification-and-bufferization | FileCheck %s --check-prefix=CHECK-NOVEC 2// RUN: mlir-opt %s --sparsification-and-bufferization="vl=8" | FileCheck %s --check-prefix=CHECK-VEC 3 4// Test to ensure we can pass optimization flags into 5// the mini sparsification and bufferization pipeline. 6 7#SV = #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }> 8 9#trait_sum_reduction = { 10 indexing_maps = [ 11 affine_map<(i) -> (i)>, // a 12 affine_map<(i) -> ()> // x (scalar out) 13 ], 14 iterator_types = ["reduction"], 15 doc = "x += SUM_i a(i)" 16} 17 18// 19// CHECK-NOVEC-LABEL: func.func @sum_reduction 20// CHECK-NOVEC: scf.for 21// CHECK-NOVEC: arith.addf %{{.*}} %{{.*}} : f32 22// CHECK-NOVEC: } 23// 24// CHECK-VEC-LABEL: func.func @sum_reduction 25// CHECK-VEC: vector.insertelement 26// CHECK-VEC: scf.for 27// CHECK-VEC: vector.create_mask 28// CHECK-VEC: vector.maskedload 29// CHECK-VEC: arith.addf %{{.*}} %{{.*}} : vector<8xf32> 30// CHECK-VEC: } 31// CHECK-VEC: vector.reduction <add> 32// 33func.func @sum_reduction(%arga: tensor<?xf32, #SV>, 34 %argx: tensor<f32>) -> tensor<f32> { 35 %0 = linalg.generic #trait_sum_reduction 36 ins(%arga: tensor<?xf32, #SV>) 37 outs(%argx: tensor<f32>) { 38 ^bb(%a: f32, %x: f32): 39 %0 = arith.addf %x, %a : f32 40 linalg.yield %0 : f32 41 } -> tensor<f32> 42 return %0 : tensor<f32> 43} 44