106a65ce5SPeiming Liu// RUN: mlir-opt %s --sparse-reinterpret-map --sparsification --canonicalize --cse | FileCheck %s 2015bc346SPeiming Liu 32a07f0fdSYinying Li#DCSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> 43dc62112SYinying Li#SparseTensor = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed) }> 5015bc346SPeiming Liu 6015bc346SPeiming Liu#trait = { 7015bc346SPeiming Liu indexing_maps = [ 8015bc346SPeiming Liu affine_map<(d0, d1, d2) -> (d0, d2)>, 9015bc346SPeiming Liu affine_map<(d0, d1, d2) -> (d0, d1, d2)> 10015bc346SPeiming Liu ], 11015bc346SPeiming Liu iterator_types = ["parallel", "parallel", "parallel"] 12015bc346SPeiming Liu} 13015bc346SPeiming Liu 14015bc346SPeiming Liu// CHECK-LABEL: @main( 15015bc346SPeiming Liu// CHECK-SAME: %[[TMP_arg0:.*]]: tensor<4x5xi32, 16015bc346SPeiming Liu// CHECK-DAG: %[[TMP_c3:.*]] = arith.constant 3 : index 17015bc346SPeiming Liu// CHECK-DAG: %[[TMP_c0:.*]] = arith.constant 0 : index 18015bc346SPeiming Liu// CHECK-DAG: %[[TMP_c1:.*]] = arith.constant 1 : index 19*a02010b3SPeiming Liu// CHECK-DAG: %[[TMP_0:.*]] = tensor.empty() 20*a02010b3SPeiming Liu// CHECK-DAG: %[[TMP_1:.*]] = sparse_tensor.positions %[[TMP_arg0]] {level = 0 : index} 21*a02010b3SPeiming Liu// CHECK-DAG: %[[TMP_2:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 0 : index} 22*a02010b3SPeiming Liu// CHECK-DAG: %[[TMP_3:.*]] = sparse_tensor.positions %[[TMP_arg0]] {level = 1 : index} 23*a02010b3SPeiming Liu// CHECK-DAG: %[[TMP_4:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 1 : index} 24*a02010b3SPeiming Liu// CHECK-DAG: %[[TMP_5:.*]] = sparse_tensor.values %[[TMP_arg0]] 25015bc346SPeiming Liu// CHECK: %[[TMP_6:.*]] = memref.load %[[TMP_1]][%[[TMP_c0]]] : memref<?xindex> 26015bc346SPeiming Liu// CHECK: %[[TMP_7:.*]] = memref.load %[[TMP_1]][%[[TMP_c1]]] : memref<?xindex> 275661647eSAart Bik// CHECK: %[[T:.*]] = scf.for %[[TMP_arg1:.*]] = %[[TMP_6]] to %[[TMP_7]] step %[[TMP_c1]] {{.*}} { 28015bc346SPeiming Liu// CHECK: %[[TMP_9:.*]] = memref.load %[[TMP_2]][%[[TMP_arg1]]] : memref<?xindex> 295661647eSAart Bik// CHECK: %[[L1:.*]] = scf.for %[[TMP_arg2:.*]] = %[[TMP_c0]] to %[[TMP_c3]] step %[[TMP_c1]] {{.*}} { 30015bc346SPeiming Liu// CHECK: %[[TMP_10:.*]] = memref.load %[[TMP_3]][%[[TMP_arg1]]] : memref<?xindex> 31015bc346SPeiming Liu// CHECK: %[[TMP_11:.*]] = arith.addi %[[TMP_arg1]], %[[TMP_c1]] : index 32015bc346SPeiming Liu// CHECK: %[[TMP_12:.*]] = memref.load %[[TMP_3]][%[[TMP_11]]] : memref<?xindex> 335661647eSAart Bik// CHECK: %[[L2:.*]] = scf.for %[[TMP_arg3:.*]] = %[[TMP_10]] to %[[TMP_12]] step %[[TMP_c1]] {{.*}} { 34015bc346SPeiming Liu// CHECK: %[[TMP_13:.*]] = memref.load %[[TMP_4]][%[[TMP_arg3]]] : memref<?xindex> 35015bc346SPeiming Liu// CHECK: %[[TMP_14:.*]] = memref.load %[[TMP_5]][%[[TMP_arg3]]] : memref<?xi32> 3694e27c26SPeiming Liu// CHECK: %[[Y:.*]] = tensor.insert %[[TMP_14]] into %{{.*}}[%[[TMP_9]], %[[TMP_arg2]], %[[TMP_13]]] 375661647eSAart Bik// CHECK: scf.yield %[[Y]] 38015bc346SPeiming Liu// CHECK: } 395661647eSAart Bik// CHECK: scf.yield %[[L2]] 40015bc346SPeiming Liu// CHECK: } 415661647eSAart Bik// CHECK: scf.yield %[[L1]] 42015bc346SPeiming Liu// CHECK: } 435661647eSAart Bik// CHECK: %[[TMP_8:.*]] = sparse_tensor.load %[[T]] hasInserts 44015bc346SPeiming Liu// CHECK: return %[[TMP_8]] 45015bc346SPeiming Liumodule @func_sparse { 46015bc346SPeiming Liu func.func public @main(%arg0: tensor<4x5xi32, #DCSR>) -> tensor<4x3x5xi32, #SparseTensor> { 47c6472f57SAart Bik %0 = tensor.empty() : tensor<4x3x5xi32, #SparseTensor> 48015bc346SPeiming Liu %1 = linalg.generic #trait 49015bc346SPeiming Liu ins(%arg0 : tensor<4x5xi32, #DCSR>) outs(%0 : tensor<4x3x5xi32, #SparseTensor>) { 50015bc346SPeiming Liu ^bb0(%in: i32, %out: i32): 51015bc346SPeiming Liu linalg.yield %in : i32 52015bc346SPeiming Liu } -> tensor<4x3x5xi32, #SparseTensor> 53015bc346SPeiming Liu return %1 : tensor<4x3x5xi32, #SparseTensor> 54015bc346SPeiming Liu } 55015bc346SPeiming Liu} 56