1// RUN: mlir-opt %s --sparse-reinterpret-map -sparsification -canonicalize | FileCheck %s 2 3#CSR = #sparse_tensor.encoding<{ 4 map = (d0, d1) -> (d0 : dense, d1 : compressed) 5}> 6 7#elemwise = { 8 indexing_maps = [ 9 affine_map<(i,j) -> (i,j)>, // A 10 affine_map<(i,j) -> (i,j)>, // B 11 affine_map<(i,j) -> (i,j)> // X (out) 12 ], 13 iterator_types = ["parallel", "parallel"], 14 doc = "X(i,j) = A(i,j) OP B(i,j)" 15} 16 17 18// CHECK-LABEL: func.func @padded_mul( 19// CHECK-SAME: %[[VAL_0:.*]]: tensor<4x4xf32, #sparse>, 20// CHECK-SAME: %[[VAL_1:.*]]: tensor<8x8xf32>) -> tensor<8x8xf32> { 21// CHECK-DAG: %[[VAL_2:.*]] = arith.constant -1 : index 22// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 6 : index 23// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 8 : index 24// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index 25// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index 26// CHECK-DAG: %[[VAL_7:.*]] = arith.constant 2 : index 27// CHECK-DAG: %[[VAL_8:.*]] = arith.constant 0.000000e+00 : f32 28// CHECK-DAG: %[[VAL_9:.*]] = tensor.empty() : tensor<8x8xf32> 29// CHECK-DAG: %[[VAL_10:.*]] = linalg.fill ins(%[[VAL_8]] : f32) outs(%[[VAL_9]] : tensor<8x8xf32>) -> tensor<8x8xf32> 30// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<4x4xf32, #sparse> to memref<?xindex> 31// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<4x4xf32, #sparse> to memref<?xindex> 32// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<4x4xf32, #sparse> to memref<?xf32> 33// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_10]] : 34// CHECK-DAG: linalg.fill ins(%[[VAL_8]] : f32) outs(%[[VAL_14]] : memref<8x8xf32>) 35// CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_6]] to %[[VAL_4]] step %[[VAL_5]] { 36// CHECK: %[[VAL_16:.*]] = arith.subi %[[VAL_15]], %[[VAL_7]] : index 37// CHECK: %[[VAL_17:.*]] = arith.cmpi ult, %[[VAL_15]], %[[VAL_7]] : index 38// CHECK: %[[VAL_18:.*]] = arith.cmpi uge, %[[VAL_15]], %[[VAL_3]] : index 39// CHECK: %[[VAL_19:.*]] = arith.ori %[[VAL_17]], %[[VAL_18]] : i1 40// CHECK: %[[VAL_20:.*]]:2 = scf.if %[[VAL_19]] -> (index, index) { 41// CHECK: scf.yield %[[VAL_6]], %[[VAL_6]] : index, index 42// CHECK: } else { 43// CHECK: %[[VAL_21:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_16]]] : memref<?xindex> 44// CHECK: %[[VAL_22:.*]] = arith.addi %[[VAL_15]], %[[VAL_2]] : index 45// CHECK: %[[VAL_23:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_22]]] : memref<?xindex> 46// CHECK: scf.yield %[[VAL_21]], %[[VAL_23]] : index, index 47// CHECK: } 48// CHECK: scf.for %[[VAL_24:.*]] = %[[VAL_20]]#0 to %[[VAL_20]]#1 step %[[VAL_5]] { 49// CHECK: %[[VAL_26:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_24]]] : memref<?xindex> 50// CHECK: %[[VAL_27:.*]] = arith.addi %[[VAL_26]], %[[VAL_7]] : index 51// CHECK: %[[VAL_28:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_24]]] : memref<?xf32> 52// CHECK: %[[VAL_29:.*]] = tensor.extract %[[VAL_1]]{{\[}}%[[VAL_15]], %[[VAL_27]]] : tensor<8x8xf32> 53// CHECK: %[[VAL_30:.*]] = arith.mulf %[[VAL_28]], %[[VAL_29]] : f32 54// CHECK: memref.store %[[VAL_30]], %[[VAL_14]]{{\[}}%[[VAL_15]], %[[VAL_27]]] : memref<8x8xf32> 55// CHECK: } {"Emitted from" = "linalg.generic"} 56// CHECK: } {"Emitted from" = "linalg.generic"} 57// CHECK: %[[VAL_31:.*]] = bufferization.to_tensor %[[VAL_14]] : 58// CHECK: return %[[VAL_31]] : tensor<8x8xf32> 59// CHECK: } 60func.func @padded_mul(%arg0: tensor<4x4xf32, #CSR>, %arg1: tensor<8x8xf32>) -> tensor<8x8xf32> { 61 %cst_0 = arith.constant 0.00000e+00 : f32 62 %buf = tensor.empty() : tensor<8x8xf32> 63 %s = linalg.fill ins(%cst_0 : f32) outs(%buf : tensor<8x8xf32>) -> tensor<8x8xf32> 64 65 %padded = tensor.pad %arg0 low[2, 2] high[2, 2] { 66 ^bb0(%arg75: index, %arg76: index): 67 tensor.yield %cst_0 : f32 68 } : tensor<4x4xf32, #CSR> to tensor<8x8xf32, #CSR> 69 70 %0 = linalg.generic #elemwise 71 ins(%padded, %arg1: tensor<8x8xf32, #CSR>, tensor<8x8xf32>) 72 outs(%s: tensor<8x8xf32>) { 73 ^bb(%a: f32, %b: f32, %x: f32): 74 %0 = arith.mulf %a, %b : f32 75 linalg.yield %0 : f32 76 } -> tensor<8x8xf32> 77 78 return %0 : tensor<8x8xf32> 79} 80