1// RUN: mlir-opt %s --sparsifier="enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" 2 3#MAT_D_C = #sparse_tensor.encoding<{ 4 map = (d0, d1) -> (d0 : dense, d1 : compressed) 5}> 6 7#MAT_C_C_P = #sparse_tensor.encoding<{ 8 map = (d0, d1) -> (d1 : compressed, d0 : compressed) 9}> 10 11#MAT_C_D_P = #sparse_tensor.encoding<{ 12 map = (d0, d1) -> (d1 : compressed, d0 : dense) 13}> 14 15// 16// Ensures only last loop is vectorized 17// (vectorizing the others would crash). 18// 19// CHECK-LABEL: llvm.func @foo 20// CHECK: llvm.intr.masked.load 21// CHECK: llvm.intr.masked.scatter 22// 23func.func @foo(%arg0: tensor<2x4xf64, #MAT_C_C_P>, 24 %arg1: tensor<3x4xf64, #MAT_C_D_P>, 25 %arg2: tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64> { 26 %0 = sparse_tensor.concatenate %arg0, %arg1, %arg2 {dimension = 0 : index} 27 : tensor<2x4xf64, #MAT_C_C_P>, tensor<3x4xf64, #MAT_C_D_P>, tensor<4x4xf64, #MAT_D_C> to tensor<9x4xf64> 28 return %0 : tensor<9x4xf64> 29} 30