1// RUN: mlir-opt %s --linalg-fuse-elementwise-ops \ 2// RUN: --sparsification-and-bufferization | FileCheck %s 3 4#Sparse = #sparse_tensor.encoding<{ 5 map = (d0, d1, d2) -> (d0 : dense, d1 : dense, d2 : compressed), 6 explicitVal = 1.0 : f32 7}> 8 9#trait3p = { 10 indexing_maps = [ 11 affine_map<(i,j,k) -> (i,j,k)>, // A 12 affine_map<(i,j,k) -> (i,j,k)>, // B 13 affine_map<(i,j,k) -> (i,j,k)> // X (out) 14 ], 15 iterator_types = ["parallel", "parallel", "parallel"] 16} 17 18#trait3r = { 19 indexing_maps = [ 20 affine_map<(i,j,k) -> (i,j,k)>, // A 21 affine_map<(i,j,k) -> ()> // X (out) 22 ], 23 iterator_types = ["reduction", "reduction", "reduction"] 24} 25 26// 27// Make sure X += A * A => X += 1 in single loop. 28// 29// CHECK-LABEL: func.func @sum_squares( 30// CHECK-SAME: %[[VAL_0:.*0]]: memref<?xindex>, 31// CHECK-SAME: %[[VAL_1:.*1]]: memref<?xindex>, 32// CHECK-SAME: %[[VAL_2:.*2]]: memref<?xf32>, 33// CHECK-SAME: %[[VAL_3:.*]]: !sparse_tensor.storage_specifier<#{{.*}}>) -> memref<f32> { 34// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1.000000e+00 : f32 35// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index 36// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index 37// CHECK-DAG: %[[VAL_7:.*]] = arith.constant 3 : index 38// CHECK-DAG: %[[VAL_8:.*]] = arith.constant 2 : index 39// CHECK-DAG: %[[VAL_9:.*]] = arith.constant 0.000000e+00 : f32 40// CHECK: %[[VAL_10:.*]] = memref.alloc() {alignment = 64 : i64} : memref<f32> 41// CHECK: linalg.fill ins(%[[VAL_9]] : f32) outs(%[[VAL_10]] : memref<f32>) 42// CHECK: %[[VAL_11:.*]] = sparse_tensor.storage_specifier.get %[[VAL_3]] 43// CHECK: %[[VAL_12:.*]] = memref.subview %[[VAL_0]][0] {{\[}}%[[VAL_11]]] [1] : memref<?xindex> to memref<?xindex> 44// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_10]][] : memref<f32> 45// CHECK: %[[VAL_14:.*]] = scf.for %[[VAL_15:.*]] = %[[VAL_6]] to %[[VAL_8]] step %[[VAL_5]] iter_args(%[[VAL_16:.*]] = %[[VAL_13]]) -> (f32) { 46// CHECK: %[[VAL_17:.*]] = arith.muli %[[VAL_15]], %[[VAL_7]] : index 47// CHECK: %[[VAL_18:.*]] = scf.for %[[VAL_19:.*]] = %[[VAL_6]] to %[[VAL_7]] step %[[VAL_5]] iter_args(%[[VAL_20:.*]] = %[[VAL_16]]) -> (f32) { 48// CHECK: %[[VAL_21:.*]] = arith.addi %[[VAL_19]], %[[VAL_17]] : index 49// CHECK: %[[VAL_22:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_21]]] : memref<?xindex> 50// CHECK: %[[VAL_23:.*]] = arith.addi %[[VAL_21]], %[[VAL_5]] : index 51// CHECK: %[[VAL_24:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_23]]] : memref<?xindex> 52// CHECK: %[[VAL_25:.*]] = scf.for %[[VAL_26:.*]] = %[[VAL_22]] to %[[VAL_24]] step %[[VAL_5]] iter_args(%[[VAL_27:.*]] = %[[VAL_20]]) -> (f32) { 53// CHECK: %[[VAL_28:.*]] = arith.addf %[[VAL_27]], %[[VAL_4]] : f32 54// CHECK: scf.yield %[[VAL_28]] : f32 55// CHECK: } {"Emitted from" = "linalg.generic"} 56// CHECK: scf.yield %[[VAL_25]] : f32 57// CHECK: } {"Emitted from" = "linalg.generic"} 58// CHECK: scf.yield %[[VAL_18]] : f32 59// CHECK: } {"Emitted from" = "linalg.generic"} 60// CHECK: memref.store %[[VAL_14]], %[[VAL_10]][] : memref<f32> 61// CHECK: return %[[VAL_10]] : memref<f32> 62// CHECK: } 63// 64func.func @sum_squares(%a: tensor<2x3x8xf32, #Sparse>) -> tensor<f32> { 65 %cst = arith.constant 0.000000e+00 : f32 66 %0 = tensor.empty() : tensor<2x3x8xf32> 67 %1 = linalg.generic #trait3p 68 ins(%a, %a : tensor<2x3x8xf32, #Sparse>, tensor<2x3x8xf32, #Sparse>) 69 outs(%0 : tensor<2x3x8xf32>) { 70 ^bb0(%in1: f32, %in2: f32, %out: f32): 71 %mul = arith.mulf %in1, %in2 : f32 72 linalg.yield %mul : f32 73 } -> tensor<2x3x8xf32> 74 %2 = tensor.empty() : tensor<f32> 75 %3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<f32>) -> tensor<f32> 76 %4 = linalg.generic #trait3r 77 ins(%1 : tensor<2x3x8xf32>) 78 outs(%3 : tensor<f32>) { 79 ^bb0(%in: f32, %out: f32): 80 %add = arith.addf %in, %out : f32 81 linalg.yield %add : f32 82 } -> tensor<f32> 83 84 return %4 : tensor<f32> 85} 86 87// 88// Make sure X += A * B => X += B in single loop. 89// 90// CHECK-LABEL: func.func @sum_products( 91// CHECK-SAME: %[[VAL_0:.*0]]: memref<?xindex>, 92// CHECK-SAME: %[[VAL_1:.*1]]: memref<?xindex>, 93// CHECK-SAME: %[[VAL_2:.*2]]: memref<?xf32>, 94// CHECK-SAME: %[[VAL_3:.*3]]: !sparse_tensor.storage_specifier<#{{.*}}>, 95// CHECK-SAME: %[[VAL_4:.*4]]: memref<2x3x8xf32>) -> memref<f32> { 96// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index 97// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 0 : index 98// CHECK-DAG: %[[VAL_7:.*]] = arith.constant 3 : index 99// CHECK-DAG: %[[VAL_8:.*]] = arith.constant 2 : index 100// CHECK-DAG: %[[VAL_9:.*]] = arith.constant 0.000000e+00 : f32 101// CHECK: %[[VAL_10:.*]] = memref.alloc() {alignment = 64 : i64} : memref<f32> 102// CHECK: linalg.fill ins(%[[VAL_9]] : f32) outs(%[[VAL_10]] : memref<f32>) 103// CHECK: %[[VAL_11:.*]] = sparse_tensor.storage_specifier.get %[[VAL_3]] 104// CHECK: %[[VAL_12:.*]] = memref.subview %[[VAL_0]][0] {{\[}}%[[VAL_11]]] [1] : memref<?xindex> to memref<?xindex> 105// CHECK: %[[VAL_13:.*]] = sparse_tensor.storage_specifier.get %[[VAL_3]] 106// CHECK: %[[VAL_14:.*]] = memref.subview %[[VAL_1]][0] {{\[}}%[[VAL_13]]] [1] : memref<?xindex> to memref<?xindex> 107// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_10]][] : memref<f32> 108// CHECK: %[[VAL_16:.*]] = scf.for %[[VAL_17:.*]] = %[[VAL_6]] to %[[VAL_8]] step %[[VAL_5]] iter_args(%[[VAL_18:.*]] = %[[VAL_15]]) -> (f32) { 109// CHECK: %[[VAL_19:.*]] = arith.muli %[[VAL_17]], %[[VAL_7]] : index 110// CHECK: %[[VAL_20:.*]] = scf.for %[[VAL_21:.*]] = %[[VAL_6]] to %[[VAL_7]] step %[[VAL_5]] iter_args(%[[VAL_22:.*]] = %[[VAL_18]]) -> (f32) { 111// CHECK: %[[VAL_23:.*]] = arith.addi %[[VAL_21]], %[[VAL_19]] : index 112// CHECK: %[[VAL_24:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_23]]] : memref<?xindex> 113// CHECK: %[[VAL_25:.*]] = arith.addi %[[VAL_23]], %[[VAL_5]] : index 114// CHECK: %[[VAL_26:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_25]]] : memref<?xindex> 115// CHECK: %[[VAL_27:.*]] = scf.for %[[VAL_28:.*]] = %[[VAL_24]] to %[[VAL_26]] step %[[VAL_5]] iter_args(%[[VAL_29:.*]] = %[[VAL_22]]) -> (f32) { 116// CHECK: %[[VAL_30:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_28]]] : memref<?xindex> 117// CHECK: %[[VAL_31:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_17]], %[[VAL_21]], %[[VAL_30]]] : memref<2x3x8xf32> 118// CHECK: %[[VAL_32:.*]] = arith.addf %[[VAL_31]], %[[VAL_29]] : f32 119// CHECK: scf.yield %[[VAL_32]] : f32 120// CHECK: } {"Emitted from" = "linalg.generic"} 121// CHECK: scf.yield %[[VAL_27]] : f32 122// CHECK: } {"Emitted from" = "linalg.generic"} 123// CHECK: scf.yield %[[VAL_20]] : f32 124// CHECK: } {"Emitted from" = "linalg.generic"} 125// CHECK: memref.store %[[VAL_16]], %[[VAL_10]][] : memref<f32> 126// CHECK: return %[[VAL_10]] : memref<f32> 127// CHECK: } 128// 129func.func @sum_products(%a: tensor<2x3x8xf32, #Sparse>, %b: tensor<2x3x8xf32>) -> tensor<f32> { 130 %cst = arith.constant 0.000000e+00 : f32 131 %0 = tensor.empty() : tensor<2x3x8xf32> 132 %1 = linalg.generic #trait3p 133 ins(%a, %b : tensor<2x3x8xf32, #Sparse>, tensor<2x3x8xf32>) 134 outs(%0 : tensor<2x3x8xf32>) { 135 ^bb0(%in1: f32, %in2: f32, %out: f32): 136 %mul = arith.mulf %in1, %in2 : f32 137 linalg.yield %mul : f32 138 } -> tensor<2x3x8xf32> 139 %2 = tensor.empty() : tensor<f32> 140 %3 = linalg.fill ins(%cst : f32) outs(%2 : tensor<f32>) -> tensor<f32> 141 %4 = linalg.generic #trait3r 142 ins(%1 : tensor<2x3x8xf32>) 143 outs(%3 : tensor<f32>) { 144 ^bb0(%in: f32, %out: f32): 145 %add = arith.addf %in, %out : f32 146 linalg.yield %add : f32 147 } -> tensor<f32> 148 149 return %4 : tensor<f32> 150} 151