1// RUN: mlir-opt %s --sparse-reinterpret-map --sparsification --canonicalize --cse | FileCheck %s 2 3#DCSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> 4#SparseTensor = #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed) }> 5 6#trait = { 7 indexing_maps = [ 8 affine_map<(d0, d1, d2) -> (d0, d2)>, 9 affine_map<(d0, d1, d2) -> (d0, d1, d2)> 10 ], 11 iterator_types = ["parallel", "parallel", "parallel"] 12} 13 14// CHECK-LABEL: @main( 15// CHECK-SAME: %[[TMP_arg0:.*]]: tensor<4x5xi32, 16// CHECK-DAG: %[[TMP_c3:.*]] = arith.constant 3 : index 17// CHECK-DAG: %[[TMP_c0:.*]] = arith.constant 0 : index 18// CHECK-DAG: %[[TMP_c1:.*]] = arith.constant 1 : index 19// CHECK-DAG: %[[TMP_0:.*]] = tensor.empty() 20// CHECK-DAG: %[[TMP_1:.*]] = sparse_tensor.positions %[[TMP_arg0]] {level = 0 : index} 21// CHECK-DAG: %[[TMP_2:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 0 : index} 22// CHECK-DAG: %[[TMP_3:.*]] = sparse_tensor.positions %[[TMP_arg0]] {level = 1 : index} 23// CHECK-DAG: %[[TMP_4:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 1 : index} 24// CHECK-DAG: %[[TMP_5:.*]] = sparse_tensor.values %[[TMP_arg0]] 25// CHECK: %[[TMP_6:.*]] = memref.load %[[TMP_1]][%[[TMP_c0]]] : memref<?xindex> 26// CHECK: %[[TMP_7:.*]] = memref.load %[[TMP_1]][%[[TMP_c1]]] : memref<?xindex> 27// CHECK: %[[T:.*]] = scf.for %[[TMP_arg1:.*]] = %[[TMP_6]] to %[[TMP_7]] step %[[TMP_c1]] {{.*}} { 28// CHECK: %[[TMP_9:.*]] = memref.load %[[TMP_2]][%[[TMP_arg1]]] : memref<?xindex> 29// CHECK: %[[L1:.*]] = scf.for %[[TMP_arg2:.*]] = %[[TMP_c0]] to %[[TMP_c3]] step %[[TMP_c1]] {{.*}} { 30// CHECK: %[[TMP_10:.*]] = memref.load %[[TMP_3]][%[[TMP_arg1]]] : memref<?xindex> 31// CHECK: %[[TMP_11:.*]] = arith.addi %[[TMP_arg1]], %[[TMP_c1]] : index 32// CHECK: %[[TMP_12:.*]] = memref.load %[[TMP_3]][%[[TMP_11]]] : memref<?xindex> 33// CHECK: %[[L2:.*]] = scf.for %[[TMP_arg3:.*]] = %[[TMP_10]] to %[[TMP_12]] step %[[TMP_c1]] {{.*}} { 34// CHECK: %[[TMP_13:.*]] = memref.load %[[TMP_4]][%[[TMP_arg3]]] : memref<?xindex> 35// CHECK: %[[TMP_14:.*]] = memref.load %[[TMP_5]][%[[TMP_arg3]]] : memref<?xi32> 36// CHECK: %[[Y:.*]] = tensor.insert %[[TMP_14]] into %{{.*}}[%[[TMP_9]], %[[TMP_arg2]], %[[TMP_13]]] 37// CHECK: scf.yield %[[Y]] 38// CHECK: } 39// CHECK: scf.yield %[[L2]] 40// CHECK: } 41// CHECK: scf.yield %[[L1]] 42// CHECK: } 43// CHECK: %[[TMP_8:.*]] = sparse_tensor.load %[[T]] hasInserts 44// CHECK: return %[[TMP_8]] 45module @func_sparse { 46 func.func public @main(%arg0: tensor<4x5xi32, #DCSR>) -> tensor<4x3x5xi32, #SparseTensor> { 47 %0 = tensor.empty() : tensor<4x3x5xi32, #SparseTensor> 48 %1 = linalg.generic #trait 49 ins(%arg0 : tensor<4x5xi32, #DCSR>) outs(%0 : tensor<4x3x5xi32, #SparseTensor>) { 50 ^bb0(%in: i32, %out: i32): 51 linalg.yield %in : i32 52 } -> tensor<4x3x5xi32, #SparseTensor> 53 return %1 : tensor<4x3x5xi32, #SparseTensor> 54 } 55} 56