1// RUN: mlir-opt %s --sparse-reinterpret-map -sparsification | FileCheck %s 2 3#SpVec = #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }> 4#CSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }> 5#Row = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : dense) }> 6#EncDenseVec = #sparse_tensor.encoding<{ map = (d0) -> (d0 : dense) }> 7 8#trait1 = { 9 indexing_maps = [ 10 affine_map<(i) -> (i)>, // a 11 affine_map<(i) -> (3)>, // b 12 affine_map<(i) -> (i)> // x (out) 13 ], 14 iterator_types = ["parallel"], 15 doc = "x(i) += a(i) * b(3)" 16} 17 18// CHECK-LABEL: func @mul_inv_dense1d( 19// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse{{[0-9]*}}>, 20// CHECK-SAME: %[[VAL_1:.*]]: tensor<4xf32>, 21// CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf32>) -> tensor<32xf32> { 22// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index 23// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 3 : index 24// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index 25// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> 26// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> 27// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> 28// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<4xf32> to memref<4xf32> 29// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf32> to memref<32xf32> 30// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_4]]] : memref<4xf32> 31// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_3]]] : memref<?xindex> 32// CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref<?xindex> 33// CHECK: scf.for %[[VAL_15:.*]] = %[[VAL_13]] to %[[VAL_14]] step %[[VAL_5]] { 34// CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_15]]] : memref<?xindex> 35// CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_16]]] : memref<32xf32> 36// CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_15]]] : memref<?xf32> 37// CHECK: %[[VAL_19:.*]] = arith.mulf %[[VAL_18]], %[[VAL_12]] : f32 38// CHECK: %[[VAL_20:.*]] = arith.addf %[[VAL_17]], %[[VAL_19]] : f32 39// CHECK: memref.store %[[VAL_20]], %[[VAL_11]]{{\[}}%[[VAL_16]]] : memref<32xf32> 40// CHECK: } 41// CHECK: %[[VAL_21:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xf32> 42// CHECK: return %[[VAL_21]] : tensor<32xf32> 43// CHECK: } 44func.func @mul_inv_dense1d(%arga: tensor<32xf32, #SpVec>, 45 %argb: tensor<4xf32>, 46 %argx: tensor<32xf32>) -> tensor<32xf32> { 47 %0 = linalg.generic #trait1 48 ins(%arga, %argb: tensor<32xf32, #SpVec>, tensor<4xf32>) 49 outs(%argx: tensor<32xf32>) { 50 ^bb(%a: f32, %b: f32, %x: f32): 51 %0 = arith.mulf %a, %b : f32 52 %1 = arith.addf %x, %0 : f32 53 linalg.yield %1 : f32 54 } -> tensor<32xf32> 55 return %0 : tensor<32xf32> 56} 57 58// CHECK-LABEL: func.func @mul_inv_enc_dense1d( 59// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf32, #sparse{{[0-9]*}}>, 60// CHECK-SAME: %[[VAL_1:.*]]: tensor<4xf32, #sparse{{[0-9]*}}>) -> tensor<32xf32, #sparse{{[0-9]*}}> { 61// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 32 : index 62// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 3 : index 63// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index 64// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index 65// CHECK: %[[VAL_6:.*]] = tensor.empty() : tensor<32xf32, #sparse{{[0-9]*}}> 66// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref<?xf32> 67// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<4xf32, #sparse{{[0-9]*}}> to memref<?xf32> 68// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_6]] : tensor<32xf32, #sparse{{[0-9]*}}> to memref<?xf32> 69// CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_3]]] : memref<?xf32> 70// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_4]] to %[[VAL_2]] step %[[VAL_5]] { 71// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_11]]] : memref<?xf32> 72// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_11]]] : memref<?xf32> 73// CHECK: %[[VAL_14:.*]] = arith.mulf %[[VAL_13]], %[[VAL_10]] : f32 74// CHECK: %[[VAL_15:.*]] = arith.addf %[[VAL_12]], %[[VAL_14]] : f32 75// CHECK: memref.store %[[VAL_15]], %[[VAL_9]]{{\[}}%[[VAL_11]]] : memref<?xf32> 76// CHECK: } 77// CHECK: %[[VAL_16:.*]] = sparse_tensor.load %[[VAL_6]] : tensor<32xf32, #sparse{{[0-9]*}}> 78// CHECK: return %[[VAL_16]] : tensor<32xf32, #sparse{{[0-9]*}}> 79// CHECK: } 80func.func @mul_inv_enc_dense1d(%arga: tensor<32xf32, #EncDenseVec>, 81 %argb: tensor<4xf32, #EncDenseVec>) -> tensor<32xf32, #EncDenseVec> { 82 %argx = tensor.empty() : tensor<32xf32, #EncDenseVec> 83 %0 = linalg.generic #trait1 84 ins(%arga, %argb: tensor<32xf32, #EncDenseVec>, tensor<4xf32, #EncDenseVec>) 85 outs(%argx: tensor<32xf32, #EncDenseVec>) { 86 ^bb(%a: f32, %b: f32, %x: f32): 87 %0 = arith.mulf %a, %b : f32 88 %1 = arith.addf %x, %0 : f32 89 linalg.yield %1 : f32 90 } -> tensor<32xf32, #EncDenseVec> 91 return %0 : tensor<32xf32, #EncDenseVec> 92} 93 94#trait2 = { 95 indexing_maps = [ 96 affine_map<(i) -> (i)>, // a 97 affine_map<(i) -> (i+2)>, // b 98 affine_map<(i) -> (i)> // x (out) 99 ], 100 iterator_types = ["parallel"], 101 doc = "x(i) = a(i) & b(i+2)" 102} 103 104// CHECK-LABEL: func @and_affine_dense1d( 105// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xi32, #sparse{{[0-9]*}}>, 106// CHECK-SAME: %[[VAL_1:.*]]: tensor<34xi32>, 107// CHECK-SAME: %[[VAL_2:.*]]: tensor<32xi32>) -> tensor<32xi32> { 108// CHECK-DAG: %[[ZERO:.*]] = arith.constant 0 : i32 109// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index 110// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index 111// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 2 : index 112// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xi32, #sparse{{[0-9]*}}> 113// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi32, #sparse{{[0-9]*}}> 114// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xi32, #sparse{{[0-9]*}}> 115// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<34xi32> to memref<34xi32> 116// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xi32> to memref<32xi32> 117// CHECK-DAG: linalg.fill ins(%[[ZERO]] : i32) outs(%[[VAL_11]] : memref<32xi32>) 118// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_3]]] : memref<?xindex> 119// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref<?xindex> 120// CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_12]] to %[[VAL_13]] step %[[VAL_4]] { 121// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_14]]] : memref<?xindex> 122// CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_14]]] : memref<?xi32> 123// CHECK: %[[VAL_17:.*]] = arith.addi %[[VAL_15]], %[[VAL_5]] : index 124// CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_17]]] : memref<34xi32> 125// CHECK: %[[VAL_19:.*]] = arith.andi %[[VAL_16]], %[[VAL_18]] : i32 126// CHECK: memref.store %[[VAL_19]], %[[VAL_11]]{{\[}}%[[VAL_15]]] : memref<32xi32> 127// CHECK: } 128// CHECK: %[[VAL_20:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xi32> 129// CHECK: return %[[VAL_20]] : tensor<32xi32> 130// CHECK: } 131func.func @and_affine_dense1d(%arga: tensor<32xi32, #SpVec>, 132 %argb: tensor<34xi32>, 133 %argx: tensor<32xi32>) -> tensor<32xi32> { 134 %0 = linalg.generic #trait2 135 ins(%arga, %argb: tensor<32xi32, #SpVec>, tensor<34xi32>) 136 outs(%argx: tensor<32xi32>) { 137 ^bb(%a: i32, %b: i32, %x: i32): 138 %0 = arith.andi %a, %b : i32 139 linalg.yield %0 : i32 140 } -> tensor<32xi32> 141 return %0 : tensor<32xi32> 142} 143 144#trait3 = { 145 indexing_maps = [ 146 affine_map<(i,j) -> (i,j)>, // a 147 affine_map<(i,j) -> (i+2,j+3)>, // b 148 affine_map<(i,j) -> (i,j)> // x (out) 149 ], 150 iterator_types = ["parallel","parallel"], 151 doc = "x(i,j) += a(i,j) * b(i+2,j+3)" 152} 153 154// CHECK-LABEL: func @mul_affine_dense2d( 155// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x16xf64, #sparse{{[0-9]*}}>, 156// CHECK-SAME: %[[VAL_1:.*]]: tensor<34x19xf64>, 157// CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf64>) -> tensor<32x16xf64> { 158// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index 159// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 32 : index 160// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index 161// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 2 : index 162// CHECK-DAG: %[[VAL_7:.*]] = arith.constant 3 : index 163// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<32x16xf64, #sparse{{[0-9]*}}> 164// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf64, #sparse{{[0-9]*}}> 165// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf64, #sparse{{[0-9]*}}> 166// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<34x19xf64> to memref<34x19xf64> 167// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf64> to memref<32x16xf64> 168// CHECK: scf.for %[[VAL_14:.*]] = %[[VAL_5]] to %[[VAL_4]] step %[[VAL_3]] { 169// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_14]]] : memref<?xindex> 170// CHECK: %[[VAL_16:.*]] = arith.addi %[[VAL_14]], %[[VAL_3]] : index 171// CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_16]]] : memref<?xindex> 172// CHECK: scf.for %[[VAL_18:.*]] = %[[VAL_15]] to %[[VAL_17]] step %[[VAL_3]] { 173// CHECK: %[[VAL_19:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_18]]] : memref<?xindex> 174// CHECK: %[[VAL_20:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_14]], %[[VAL_19]]] : memref<32x16xf64> 175// CHECK: %[[VAL_21:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_18]]] : memref<?xf64> 176// CHECK: %[[VAL_22:.*]] = arith.addi %[[VAL_14]], %[[VAL_6]] : index 177// CHECK: %[[VAL_23:.*]] = arith.addi %[[VAL_19]], %[[VAL_7]] : index 178// CHECK: %[[VAL_24:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_22]], %[[VAL_23]]] : memref<34x19xf64> 179// CHECK: %[[VAL_25:.*]] = arith.mulf %[[VAL_21]], %[[VAL_24]] : f64 180// CHECK: %[[VAL_26:.*]] = arith.addf %[[VAL_20]], %[[VAL_25]] : f64 181// CHECK: memref.store %[[VAL_26]], %[[VAL_13]]{{\[}}%[[VAL_14]], %[[VAL_19]]] : memref<32x16xf64> 182// CHECK: } 183// CHECK: } 184// CHECK: %[[VAL_27:.*]] = bufferization.to_tensor %[[VAL_13]] : memref<32x16xf64> 185// CHECK: return %[[VAL_27]] : tensor<32x16xf64> 186// CHECK: } 187func.func @mul_affine_dense2d(%arga: tensor<32x16xf64, #CSR>, 188 %argb: tensor<34x19xf64>, 189 %argx: tensor<32x16xf64>) -> tensor<32x16xf64> { 190 %0 = linalg.generic #trait3 191 ins(%arga, %argb: tensor<32x16xf64, #CSR>, tensor<34x19xf64>) 192 outs(%argx: tensor<32x16xf64>) { 193 ^bb(%a: f64, %b: f64, %x: f64): 194 %0 = arith.mulf %a, %b : f64 195 %1 = arith.addf %x, %0 : f64 196 linalg.yield %1 : f64 197 } -> tensor<32x16xf64> 198 return %0 : tensor<32x16xf64> 199} 200 201#trait4 = { 202 indexing_maps = [ 203 affine_map<(i,j) -> (i+2,j)>, // a 204 affine_map<(i,j) -> (i,j+3)>, // b 205 affine_map<(i,j) -> (i,j)> // x (out) 206 ], 207 iterator_types = ["parallel","parallel"], 208 doc = "x(i,j) += a(i+2,j) * b(i,j+3)" 209} 210 211// CHECK-LABEL: func.func @mul_affine_dense_dim_2d( 212// CHECK-SAME: %[[VAL_0:.*]]: tensor<34x16xf64, #sparse{{[0-9]*}}> 213// CHECK-SAME: %[[VAL_1:.*]]: tensor<32x19xf64, #sparse{{[0-9]*}}>, 214// CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf64>) -> tensor<32x16xf64> { 215// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 19 : index 216// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index 217// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index 218// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 2 : index 219// CHECK-DAG: %[[VAL_7:.*]] = arith.constant 3 : index 220// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<34x16xf64, #sparse{{[0-9]*}}> to memref<?xindex> 221// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<34x16xf64, #sparse{{[0-9]*}}> to memref<?xindex> 222// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<34x16xf64, #sparse{{[0-9]*}}> to memref<?xf64> 223// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref<?xindex> 224// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref<?xindex> 225// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref<?xf64> 226// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf64> to memref<32x16xf64> 227// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_4]]] : memref<?xindex> 228// CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_5]]] : memref<?xindex> 229// CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_15]] to %[[VAL_16]] step %[[VAL_5]] { 230// CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_17]]] : memref<?xindex> 231// CHECK: %[[VAL_19:.*]] = arith.addi %[[VAL_18]], %[[VAL_6]] : index 232// CHECK: %[[VAL_20:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_19]]] : memref<?xindex> 233// CHECK: %[[VAL_21:.*]] = arith.addi %[[VAL_19]], %[[VAL_5]] : index 234// CHECK: %[[VAL_22:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_21]]] : memref<?xindex> 235// CHECK: scf.for %[[VAL_23:.*]] = %[[VAL_20]] to %[[VAL_22]] step %[[VAL_5]] { 236// CHECK: %[[VAL_24:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_23]]] : memref<?xindex> 237// CHECK: %[[VAL_26:.*]] = arith.muli %[[VAL_17]], %[[VAL_3]] : index 238// CHECK: %[[VAL_25:.*]] = arith.addi %[[VAL_24]], %[[VAL_7]] : index 239// CHECK: %[[VAL_27:.*]] = arith.addi %[[VAL_25]], %[[VAL_26]] : index 240// CHECK: %[[VAL_28:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_18]], %[[VAL_24]]] : memref<32x16xf64> 241// CHECK: %[[VAL_29:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_23]]] : memref<?xf64> 242// CHECK: %[[VAL_30:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_27]]] : memref<?xf64> 243// CHECK: %[[VAL_31:.*]] = arith.mulf %[[VAL_29]], %[[VAL_30]] : f64 244// CHECK: %[[VAL_32:.*]] = arith.addf %[[VAL_28]], %[[VAL_31]] : f64 245// CHECK: memref.store %[[VAL_32]], %[[VAL_14]]{{\[}}%[[VAL_18]], %[[VAL_24]]] : memref<32x16xf64> 246// CHECK: } 247// CHECK: } 248// CHECK: %[[VAL_33:.*]] = bufferization.to_tensor %[[VAL_14]] : memref<32x16xf64> 249// CHECK: return %[[VAL_33]] : tensor<32x16xf64> 250// CHECK: } 251func.func @mul_affine_dense_dim_2d(%arga: tensor<34x16xf64, #CSR>, 252 %argb: tensor<32x19xf64, #Row>, 253 %argx: tensor<32x16xf64>) -> tensor<32x16xf64> { 254 %0 = linalg.generic #trait4 255 ins(%arga, %argb: tensor<34x16xf64, #CSR>, tensor<32x19xf64, #Row>) 256 outs(%argx: tensor<32x16xf64>) { 257 ^bb(%a: f64, %b: f64, %x: f64): 258 %0 = arith.mulf %a, %b : f64 259 %1 = arith.addf %x, %0 : f64 260 linalg.yield %1 : f64 261 } -> tensor<32x16xf64> 262 return %0 : tensor<32x16xf64> 263} 264 265#trait5 = { 266 indexing_maps = [ 267 affine_map<(i,j) -> (2,j)>, // a 268 affine_map<(i,j) -> (i,3)>, // b 269 affine_map<(i,j) -> (i,j)> // x (out) 270 ], 271 iterator_types = ["parallel","parallel"], 272 doc = "x(i,j) += a(2,j) * b(i,3)" 273} 274 275// CHECK-LABEL: func.func @mul_const_affine_dense_dim_2d( 276// CHECK-SAME: %[[VAL_0:.*]]: tensor<34x16xf64, 277// CHECK-SAME: %[[VAL_1:.*]]: tensor<32x19xf64, #sparse{{[0-9]*}}>, 278// CHECK-SAME: %[[VAL_2:.*]]: tensor<32x16xf64>) -> tensor<32x16xf64> { 279// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 19 : index 280// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 2 : index 281// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index 282// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index 283// CHECK-DAG: %[[VAL_7:.*]] = arith.constant 3 : index 284// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<34x16xf64, #sparse{{[0-9]*}}> to memref<?xindex> 285// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<34x16xf64, #sparse{{[0-9]*}}> to memref<?xindex> 286// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<34x16xf64, #sparse{{[0-9]*}}> to memref<?xf64> 287// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref<?xindex> 288// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref<?xindex> 289// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref<?xf64> 290// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32x16xf64> to memref<32x16xf64> 291// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_5]]] : memref<?xindex> 292// CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_6]]] : memref<?xindex> 293// CHECK: scf.for %[[VAL_17:.*]] = %[[VAL_15]] to %[[VAL_16]] step %[[VAL_6]] { 294// CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_17]]] : memref<?xindex> 295// CHECK: %[[VAL_19:.*]] = arith.muli %[[VAL_17]], %[[VAL_3]] : index 296// CHECK: %[[VAL_20:.*]] = arith.addi %[[VAL_19]], %[[VAL_7]] : index 297// CHECK: %[[VAL_21:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_20]]] : memref<?xf64> 298// CHECK: %[[VAL_22:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_4]]] : memref<?xindex> 299// CHECK: %[[VAL_23:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_7]]] : memref<?xindex> 300// CHECK: scf.for %[[VAL_24:.*]] = %[[VAL_22]] to %[[VAL_23]] step %[[VAL_6]] { 301// CHECK: %[[VAL_25:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_24]]] : memref<?xindex> 302// CHECK: %[[VAL_26:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_18]], %[[VAL_25]]] : memref<32x16xf64> 303// CHECK: %[[VAL_27:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_24]]] : memref<?xf64> 304// CHECK: %[[VAL_28:.*]] = arith.mulf %[[VAL_27]], %[[VAL_21]] : f64 305// CHECK: %[[VAL_29:.*]] = arith.addf %[[VAL_26]], %[[VAL_28]] : f64 306// CHECK: memref.store %[[VAL_29]], %[[VAL_14]]{{\[}}%[[VAL_18]], %[[VAL_25]]] : memref<32x16xf64> 307// CHECK: } 308// CHECK: } 309// CHECK: %[[VAL_30:.*]] = bufferization.to_tensor %[[VAL_14]] : memref<32x16xf64> 310// CHECK: return %[[VAL_30]] : tensor<32x16xf64> 311// CHECK: } 312func.func @mul_const_affine_dense_dim_2d(%arga: tensor<34x16xf64, #CSR>, 313 %argb: tensor<32x19xf64, #Row>, 314 %argx: tensor<32x16xf64>) -> tensor<32x16xf64> { 315 %0 = linalg.generic #trait5 316 ins(%arga, %argb: tensor<34x16xf64, #CSR>, tensor<32x19xf64, #Row>) 317 outs(%argx: tensor<32x16xf64>) { 318 ^bb(%a: f64, %b: f64, %x: f64): 319 %0 = arith.mulf %a, %b : f64 320 %1 = arith.addf %x, %0 : f64 321 linalg.yield %1 : f64 322 } -> tensor<32x16xf64> 323 return %0 : tensor<32x16xf64> 324} 325