1*dce7a7cfSTim Harvey// RUN: mlir-opt %s --sparsifier="enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" 270ac5981SAart Bik 370ac5981SAart Bik#MAT_D_C = #sparse_tensor.encoding<{ 4e2e429d9SYinying Li map = (d0, d1) -> (d0 : dense, d1 : compressed) 570ac5981SAart Bik}> 670ac5981SAart Bik 770ac5981SAart Bik#MAT_C_C_P = #sparse_tensor.encoding<{ 82a07f0fdSYinying Li map = (d0, d1) -> (d1 : compressed, d0 : compressed) 970ac5981SAart Bik}> 1070ac5981SAart Bik 1170ac5981SAart Bik#MAT_C_D_P = #sparse_tensor.encoding<{ 122a07f0fdSYinying Li map = (d0, d1) -> (d1 : compressed, d0 : dense) 1370ac5981SAart Bik}> 1470ac5981SAart Bik 1570ac5981SAart Bik// 1670ac5981SAart Bik// Ensures only last loop is vectorized 1770ac5981SAart Bik// (vectorizing the others would crash). 1870ac5981SAart Bik// 1970ac5981SAart Bik// CHECK-LABEL: llvm.func @foo 2070ac5981SAart Bik// CHECK: llvm.intr.masked.load 2170ac5981SAart Bik// CHECK: llvm.intr.masked.scatter 2270ac5981SAart Bik// 2370ac5981SAart Bikfunc.func @foo(%arg0: tensor<2x4xf64, #MAT_C_C_P>, 2470ac5981SAart Bik %arg1: tensor<3x4xf64, #MAT_C_D_P>, 2570ac5981SAart Bik %arg2: tensor<4x4xf64, #MAT_D_C>) -> tensor<9x4xf64> { 2670ac5981SAart Bik %0 = sparse_tensor.concatenate %arg0, %arg1, %arg2 {dimension = 0 : index} 2770ac5981SAart Bik : tensor<2x4xf64, #MAT_C_C_P>, tensor<3x4xf64, #MAT_C_D_P>, tensor<4x4xf64, #MAT_D_C> to tensor<9x4xf64> 2870ac5981SAart Bik return %0 : tensor<9x4xf64> 2970ac5981SAart Bik} 30