1// RUN: mlir-opt %s --sparse-reinterpret-map -sparsification | FileCheck %s 2 3#SV = #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }> 4 5#trait1 = { 6 indexing_maps = [ 7 affine_map<(i) -> (i)>, // a 8 affine_map<(i) -> (i)> // x (out) 9 ], 10 iterator_types = ["parallel"], 11 doc = "x(i) = OP a(i)" 12} 13 14#trait2 = { 15 indexing_maps = [ 16 affine_map<(i) -> (i)>, // a 17 affine_map<(i) -> (i)>, // b 18 affine_map<(i) -> (i)> // x (out) 19 ], 20 iterator_types = ["parallel"], 21 doc = "x(i) = a(i) OP b(i)" 22} 23 24#traitc = { 25 indexing_maps = [ 26 affine_map<(i) -> (i)>, // a 27 affine_map<(i) -> (i)> // x (out) 28 ], 29 iterator_types = ["parallel"], 30 doc = "x(i) = a(i) OP c" 31} 32 33// CHECK-LABEL: func @abs( 34// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf64, #sparse{{[0-9]*}}>, 35// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>) -> tensor<32xf64> { 36// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index 37// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index 38// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex> 39// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex> 40// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xf64> 41// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64> 42// CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex> 43// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex> 44// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] { 45// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_10]]] : memref<?xindex> 46// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_10]]] : memref<?xf64> 47// CHECK: %[[VAL_13:.*]] = math.absf %[[VAL_12]] : f64 48// CHECK: memref.store %[[VAL_13]], %[[VAL_7]]{{\[}}%[[VAL_11]]] : memref<32xf64> 49// CHECK: } 50// CHECK: %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<32xf64> 51// CHECK: return %[[VAL_14]] : tensor<32xf64> 52// CHECK: } 53func.func @abs(%arga: tensor<32xf64, #SV>, 54 %argx: tensor<32xf64>) -> tensor<32xf64> { 55 %0 = linalg.generic #trait1 56 ins(%arga: tensor<32xf64, #SV>) 57 outs(%argx: tensor<32xf64>) { 58 ^bb(%a: f64, %x: f64): 59 %0 = math.absf %a : f64 60 linalg.yield %0 : f64 61 } -> tensor<32xf64> 62 return %0 : tensor<32xf64> 63} 64 65// CHECK-LABEL: func @ceil( 66// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf64, #sparse{{[0-9]*}}>, 67// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>) -> tensor<32xf64> { 68// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index 69// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index 70// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex> 71// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex> 72// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xf64> 73// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64> 74// CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex> 75// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex> 76// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] { 77// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_10]]] : memref<?xindex> 78// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_10]]] : memref<?xf64> 79// CHECK: %[[VAL_13:.*]] = math.ceil %[[VAL_12]] : f64 80// CHECK: memref.store %[[VAL_13]], %[[VAL_7]]{{\[}}%[[VAL_11]]] : memref<32xf64> 81// CHECK: } 82// CHECK: %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<32xf64> 83// CHECK: return %[[VAL_14]] : tensor<32xf64> 84// CHECK: } 85func.func @ceil(%arga: tensor<32xf64, #SV>, 86 %argx: tensor<32xf64>) -> tensor<32xf64> { 87 %0 = linalg.generic #trait1 88 ins(%arga: tensor<32xf64, #SV>) 89 outs(%argx: tensor<32xf64>) { 90 ^bb(%a: f64, %x: f64): 91 %0 = math.ceil %a : f64 92 linalg.yield %0 : f64 93 } -> tensor<32xf64> 94 return %0 : tensor<32xf64> 95} 96 97// CHECK-LABEL: func @floor( 98// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf64, #sparse{{[0-9]*}}>, 99// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>) -> tensor<32xf64> { 100// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index 101// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index 102// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex> 103// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex> 104// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xf64> 105// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64> 106// CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex> 107// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex> 108// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] { 109// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_10]]] : memref<?xindex> 110// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_10]]] : memref<?xf64> 111// CHECK: %[[VAL_13:.*]] = math.floor %[[VAL_12]] : f64 112// CHECK: memref.store %[[VAL_13]], %[[VAL_7]]{{\[}}%[[VAL_11]]] : memref<32xf64> 113// CHECK: } 114// CHECK: %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<32xf64> 115// CHECK: return %[[VAL_14]] : tensor<32xf64> 116// CHECK: } 117func.func @floor(%arga: tensor<32xf64, #SV>, 118 %argx: tensor<32xf64>) -> tensor<32xf64> { 119 %0 = linalg.generic #trait1 120 ins(%arga: tensor<32xf64, #SV>) 121 outs(%argx: tensor<32xf64>) { 122 ^bb(%a: f64, %x: f64): 123 %0 = math.floor %a : f64 124 linalg.yield %0 : f64 125 } -> tensor<32xf64> 126 return %0 : tensor<32xf64> 127} 128 129// CHECK-LABEL: func @neg( 130// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf64, #sparse{{[0-9]*}}>, 131// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>) -> tensor<32xf64> { 132// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 0 : index 133// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 1 : index 134// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex> 135// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex> 136// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xf64> 137// CHECK-DAG: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64> 138// CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex> 139// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref<?xindex> 140// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] { 141// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_10]]] : memref<?xindex> 142// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_10]]] : memref<?xf64> 143// CHECK: %[[VAL_13:.*]] = arith.negf %[[VAL_12]] : f64 144// CHECK: memref.store %[[VAL_13]], %[[VAL_7]]{{\[}}%[[VAL_11]]] : memref<32xf64> 145// CHECK: } 146// CHECK: %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<32xf64> 147// CHECK: return %[[VAL_14]] : tensor<32xf64> 148// CHECK: } 149func.func @neg(%arga: tensor<32xf64, #SV>, 150 %argx: tensor<32xf64>) -> tensor<32xf64> { 151 %0 = linalg.generic #trait1 152 ins(%arga: tensor<32xf64, #SV>) 153 outs(%argx: tensor<32xf64>) { 154 ^bb(%a: f64, %x: f64): 155 %0 = arith.negf %a : f64 156 linalg.yield %0 : f64 157 } -> tensor<32xf64> 158 return %0 : tensor<32xf64> 159} 160 161// CHECK-LABEL: func @add( 162// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf64, #sparse{{[0-9]*}}>, 163// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>, 164// CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf64>) -> tensor<32xf64> { 165// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 32 : index 166// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index 167// CHECK-DAG: %[[VAL_5:.*]] = arith.constant true 168// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index 169// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> 170// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> 171// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> 172// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64> 173// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64> 174// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref<?xindex> 175// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex> 176// CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) { 177// CHECK: %[[VAL_17:.*]] = arith.cmpi ult, %[[VAL_15]], %[[VAL_13]] : index 178// CHECK: scf.condition(%[[VAL_17]]) %[[VAL_15]], %[[VAL_16]] : index, index 179// CHECK: } do { 180// CHECK: ^bb0(%[[VAL_18:.*]]: index, %[[VAL_19:.*]]: index): 181// CHECK: %[[VAL_20:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_18]]] : memref<?xindex> 182// CHECK: %[[VAL_21:.*]] = arith.cmpi eq, %[[VAL_20]], %[[VAL_19]] : index 183// CHECK: scf.if %[[VAL_21]] { 184// CHECK: %[[VAL_22:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_18]]] : memref<?xf64> 185// CHECK: %[[VAL_23:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_19]]] : memref<32xf64> 186// CHECK: %[[VAL_24:.*]] = arith.addf %[[VAL_22]], %[[VAL_23]] : f64 187// CHECK: memref.store %[[VAL_24]], %[[VAL_11]]{{\[}}%[[VAL_19]]] : memref<32xf64> 188// CHECK: } else { 189// CHECK: scf.if %[[VAL_5]] { 190// CHECK: %[[VAL_25:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_19]]] : memref<32xf64> 191// CHECK: memref.store %[[VAL_25]], %[[VAL_11]]{{\[}}%[[VAL_19]]] : memref<32xf64> 192// CHECK: } else { 193// CHECK: } 194// CHECK: } 195// CHECK: %[[VAL_26:.*]] = arith.cmpi eq, %[[VAL_20]], %[[VAL_19]] : index 196// CHECK: %[[VAL_27:.*]] = arith.addi %[[VAL_18]], %[[VAL_6]] : index 197// CHECK: %[[VAL_28:.*]] = arith.select %[[VAL_26]], %[[VAL_27]], %[[VAL_18]] : index 198// CHECK: %[[VAL_29:.*]] = arith.addi %[[VAL_19]], %[[VAL_6]] : index 199// CHECK: scf.yield %[[VAL_28]], %[[VAL_29]] : index, index 200// CHECK: } 201// CHECK: scf.for %[[VAL_30:.*]] = %[[VAL_31:.*]]#1 to %[[VAL_3]] step %[[VAL_6]] { 202// CHECK: %[[VAL_32:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_30]]] : memref<32xf64> 203// CHECK: memref.store %[[VAL_32]], %[[VAL_11]]{{\[}}%[[VAL_30]]] : memref<32xf64> 204// CHECK: } 205// CHECK: %[[VAL_33:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xf64> 206// CHECK: return %[[VAL_33]] : tensor<32xf64> 207// CHECK: } 208func.func @add(%arga: tensor<32xf64, #SV>, 209 %argb: tensor<32xf64>, 210 %argx: tensor<32xf64>) -> tensor<32xf64> { 211 %0 = linalg.generic #trait2 212 ins(%arga, %argb: tensor<32xf64, #SV>, tensor<32xf64>) 213 outs(%argx: tensor<32xf64>) { 214 ^bb(%a: f64, %b: f64, %x: f64): 215 %0 = arith.addf %a, %b : f64 216 linalg.yield %0 : f64 217 } -> tensor<32xf64> 218 return %0 : tensor<32xf64> 219} 220 221// CHECK-LABEL: func @sub( 222// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf64, #sparse{{[0-9]*}}>, 223// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>, 224// CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf64>) -> tensor<32xf64> { 225// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 32 : index 226// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index 227// CHECK-DAG: %[[VAL_5:.*]] = arith.constant true 228// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index 229// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex> 230// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex> 231// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xf64> 232// CHECK-DAG: %[[VAL_10:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64> 233// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64> 234// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref<?xindex> 235// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref<?xindex> 236// CHECK: %[[VAL_14:.*]]:2 = scf.while (%[[VAL_15:.*]] = %[[VAL_12]], %[[VAL_16:.*]] = %[[VAL_4]]) : (index, index) -> (index, index) { 237// CHECK: %[[VAL_17:.*]] = arith.cmpi ult, %[[VAL_15]], %[[VAL_13]] : index 238// CHECK: scf.condition(%[[VAL_17]]) %[[VAL_15]], %[[VAL_16]] : index, index 239// CHECK: } do { 240// CHECK: ^bb0(%[[VAL_18:.*]]: index, %[[VAL_19:.*]]: index): 241// CHECK: %[[VAL_20:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_18]]] : memref<?xindex> 242// CHECK: %[[VAL_21:.*]] = arith.cmpi eq, %[[VAL_20]], %[[VAL_19]] : index 243// CHECK: scf.if %[[VAL_21]] { 244// CHECK: %[[VAL_22:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_18]]] : memref<?xf64> 245// CHECK: %[[VAL_23:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_19]]] : memref<32xf64> 246// CHECK: %[[VAL_24:.*]] = arith.subf %[[VAL_22]], %[[VAL_23]] : f64 247// CHECK: memref.store %[[VAL_24]], %[[VAL_11]]{{\[}}%[[VAL_19]]] : memref<32xf64> 248// CHECK: } else { 249// CHECK: scf.if %[[VAL_5]] { 250// CHECK: %[[VAL_25:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_19]]] : memref<32xf64> 251// CHECK: %[[VAL_26:.*]] = arith.negf %[[VAL_25]] : f64 252// CHECK: memref.store %[[VAL_26]], %[[VAL_11]]{{\[}}%[[VAL_19]]] : memref<32xf64> 253// CHECK: } else { 254// CHECK: } 255// CHECK: } 256// CHECK: %[[VAL_27:.*]] = arith.cmpi eq, %[[VAL_20]], %[[VAL_19]] : index 257// CHECK: %[[VAL_28:.*]] = arith.addi %[[VAL_18]], %[[VAL_6]] : index 258// CHECK: %[[VAL_29:.*]] = arith.select %[[VAL_27]], %[[VAL_28]], %[[VAL_18]] : index 259// CHECK: %[[VAL_30:.*]] = arith.addi %[[VAL_19]], %[[VAL_6]] : index 260// CHECK: scf.yield %[[VAL_29]], %[[VAL_30]] : index, index 261// CHECK: } 262// CHECK: scf.for %[[VAL_31:.*]] = %[[VAL_32:.*]]#1 to %[[VAL_3]] step %[[VAL_6]] { 263// CHECK: %[[VAL_33:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_31]]] : memref<32xf64> 264// CHECK: %[[VAL_34:.*]] = arith.negf %[[VAL_33]] : f64 265// CHECK: memref.store %[[VAL_34]], %[[VAL_11]]{{\[}}%[[VAL_31]]] : memref<32xf64> 266// CHECK: } 267// CHECK: %[[VAL_35:.*]] = bufferization.to_tensor %[[VAL_11]] : memref<32xf64> 268// CHECK: return %[[VAL_35]] : tensor<32xf64> 269// CHECK: } 270func.func @sub(%arga: tensor<32xf64, #SV>, 271 %argb: tensor<32xf64>, 272 %argx: tensor<32xf64>) -> tensor<32xf64> { 273 %0 = linalg.generic #trait2 274 ins(%arga, %argb: tensor<32xf64, #SV>, tensor<32xf64>) 275 outs(%argx: tensor<32xf64>) { 276 ^bb(%a: f64, %b: f64, %x: f64): 277 %0 = arith.subf %a, %b : f64 278 linalg.yield %0 : f64 279 } -> tensor<32xf64> 280 return %0 : tensor<32xf64> 281} 282 283// CHECK-LABEL: func @mul( 284// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf64, #sparse{{[0-9]*}}>, 285// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>, 286// CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf64>) -> tensor<32xf64> { 287// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index 288// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index 289// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> 290// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> 291// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> 292// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64> 293// CHECK-DAG: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<32xf64> to memref<32xf64> 294// CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex> 295// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex> 296// CHECK: scf.for %[[VAL_12:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_4]] { 297// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref<?xindex> 298// CHECK: %[[VAL_14:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_12]]] : memref<?xf64> 299// CHECK: %[[VAL_15:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_13]]] : memref<32xf64> 300// CHECK: %[[VAL_16:.*]] = arith.mulf %[[VAL_14]], %[[VAL_15]] : f64 301// CHECK: memref.store %[[VAL_16]], %[[VAL_9]]{{\[}}%[[VAL_13]]] : memref<32xf64> 302// CHECK: } 303// CHECK: %[[VAL_17:.*]] = bufferization.to_tensor %[[VAL_9]] : memref<32xf64> 304// CHECK: return %[[VAL_17]] : tensor<32xf64> 305// CHECK: } 306func.func @mul(%arga: tensor<32xf64, #SV>, 307 %argb: tensor<32xf64>, 308 %argx: tensor<32xf64>) -> tensor<32xf64> { 309 %0 = linalg.generic #trait2 310 ins(%arga, %argb: tensor<32xf64, #SV>, tensor<32xf64>) 311 outs(%argx: tensor<32xf64>) { 312 ^bb(%a: f64, %b: f64, %x: f64): 313 %0 = arith.mulf %a, %b : f64 314 linalg.yield %0 : f64 315 } -> tensor<32xf64> 316 return %0 : tensor<32xf64> 317} 318 319// CHECK-LABEL: func @divbyc( 320// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf64, #sparse{{[0-9]*}}>, 321// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf64>) -> tensor<32xf64> { 322// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2.000000e+00 : f64 323// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index 324// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index 325// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> 326// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> 327// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> 328// CHECK-DAG: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_1]] : tensor<32xf64> to memref<32xf64> 329// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_3]]] : memref<?xindex> 330// CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_4]]] : memref<?xindex> 331// CHECK: scf.for %[[VAL_11:.*]] = %[[VAL_9]] to %[[VAL_10]] step %[[VAL_4]] { 332// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_11]]] : memref<?xindex> 333// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_11]]] : memref<?xf64> 334// CHECK: %[[VAL_14:.*]] = arith.divf %[[VAL_13]], %[[VAL_2]] : f64 335// CHECK: memref.store %[[VAL_14]], %[[VAL_8]]{{\[}}%[[VAL_12]]] : memref<32xf64> 336// CHECK: } 337// CHECK: %[[VAL_15:.*]] = bufferization.to_tensor %[[VAL_8]] : memref<32xf64> 338// CHECK: return %[[VAL_15]] : tensor<32xf64> 339// CHECK: } 340func.func @divbyc(%arga: tensor<32xf64, #SV>, 341 %argx: tensor<32xf64>) -> tensor<32xf64> { 342 %c = arith.constant 2.0 : f64 343 %0 = linalg.generic #traitc 344 ins(%arga: tensor<32xf64, #SV>) 345 outs(%argx: tensor<32xf64>) { 346 ^bb(%a: f64, %x: f64): 347 %0 = arith.divf %a, %c : f64 348 linalg.yield %0 : f64 349 } -> tensor<32xf64> 350 return %0 : tensor<32xf64> 351} 352 353// CHECK-LABEL: func.func @zero_preserving_math( 354// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xf64, #sparse{{[0-9]*}}>) -> tensor<32xf64, #sparse{{[0-9]*}}> { 355// CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : index 356// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index 357// CHECK-DAG: %[[VAL_3:.*]] = tensor.empty() : tensor<32xf64, #sparse{{[0-9]*}}> 358// CHECK-DAG: %[[VAL_4:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex> 359// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex> 360// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xf64> 361// CHECK: %[[VAL_7:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_1]]] : memref<?xindex> 362// CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref<?xindex> 363// CHECK: %[[T:.*]] = scf.for %[[VAL_9:.*]] = %[[VAL_7]] to %[[VAL_8]] step %[[VAL_2]] {{.*}} { 364// CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_9]]] : memref<?xindex> 365// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_9]]] : memref<?xf64> 366// CHECK: %[[VAL_12:.*]] = math.absf %[[VAL_11]] : f64 367// CHECK: %[[VAL_13:.*]] = math.ceil %[[VAL_12]] : f64 368// CHECK: %[[VAL_14:.*]] = math.floor %[[VAL_13]] : f64 369// CHECK: %[[VAL_15:.*]] = math.sqrt %[[VAL_14]] : f64 370// CHECK: %[[VAL_16:.*]] = math.expm1 %[[VAL_15]] : f64 371// CHECK: %[[VAL_17:.*]] = math.log1p %[[VAL_16]] : f64 372// CHECK: %[[VAL_18:.*]] = math.sin %[[VAL_17]] : f64 373// CHECK: %[[VAL_19:.*]] = math.tanh %[[VAL_18]] : f64 374// CHECK: %[[Y:.*]] = tensor.insert %[[VAL_19]] into %{{.*}}[%[[VAL_10]]] : tensor<32xf64, #sparse{{[0-9]*}}> 375// CHECK: scf.yield %[[Y]] 376// CHECK: } 377// CHECK: %[[VAL_20:.*]] = sparse_tensor.load %[[T]] hasInserts : tensor<32xf64, #sparse{{[0-9]*}}> 378// CHECK: return %[[VAL_20]] : tensor<32xf64, #sparse{{[0-9]*}}> 379// CHECK: } 380func.func @zero_preserving_math(%arga: tensor<32xf64, #SV>) -> tensor<32xf64, #SV> { 381 %c32 = arith.constant 32 : index 382 %xinp = tensor.empty() : tensor<32xf64, #SV> 383 %0 = linalg.generic #trait1 384 ins(%arga: tensor<32xf64, #SV>) 385 outs(%xinp: tensor<32xf64, #SV>) { 386 ^bb(%a: f64, %x: f64): 387 %0 = math.absf %a : f64 388 %1 = math.ceil %0 : f64 389 %2 = math.floor %1 : f64 390 %3 = math.sqrt %2 : f64 391 %4 = math.expm1 %3 : f64 392 %5 = math.log1p %4 : f64 393 %6 = math.sin %5 : f64 394 %7 = math.tanh %6 : f64 395 linalg.yield %7 : f64 396 } -> tensor<32xf64, #SV> 397 return %0 : tensor<32xf64, #SV> 398} 399 400// CHECK-LABEL: func.func @complex_divbyc( 401// CHECK-SAME: %[[VAL_0:.*]]: tensor<32xcomplex<f64>, #sparse{{.*}}> { 402// CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : index 403// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index 404// CHECK-DAG: %[[VAL_3:.*]] = complex.constant [0.000000e+00, 1.000000e+00] : complex<f64> 405// CHECK-DAG: %[[VAL_4:.*]] = tensor.empty() : tensor<32xcomplex<f64>, #sparse{{[0-9]*}}> 406// CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32xcomplex<f64>, #sparse{{[0-9]*}}> to memref<?xindex> 407// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xcomplex<f64>, #sparse{{[0-9]*}}> to memref<?xindex> 408// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xcomplex<f64>, #sparse{{[0-9]*}}> to memref<?xcomplex<f64>> 409// CHECK: %[[VAL_8:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_1]]] : memref<?xindex> 410// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_2]]] : memref<?xindex> 411// CHECK: %[[T:.*]] = scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_2]] {{.*}} { 412// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_10]]] : memref<?xindex> 413// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_10]]] : memref<?xcomplex<f64>> 414// CHECK: %[[VAL_13:.*]] = complex.div %[[VAL_12]], %[[VAL_3]] : complex<f64> 415// CHECK: %[[Y:.*]] = tensor.insert %[[VAL_13]] into %{{.*}}[%[[VAL_11]]] : tensor<32xcomplex<f64>, #sparse{{[0-9]*}}> 416// CHECK: scf.yield %[[Y]] 417// CHECK: } 418// CHECK: %[[VAL_14:.*]] = sparse_tensor.load %[[T]] hasInserts : tensor<32xcomplex<f64>, #sparse{{[0-9]*}}> 419// CHECK: return %[[VAL_14]] : tensor<32xcomplex<f64>, #sparse{{[0-9]*}}> 420// CHECK: } 421func.func @complex_divbyc(%arg0: tensor<32xcomplex<f64>, #SV>) -> tensor<32xcomplex<f64>, #SV> { 422 %c = complex.constant [0.0, 1.0] : complex<f64> 423 %init = tensor.empty() : tensor<32xcomplex<f64>, #SV> 424 %0 = linalg.generic #traitc 425 ins(%arg0: tensor<32xcomplex<f64>, #SV>) 426 outs(%init: tensor<32xcomplex<f64>, #SV>) { 427 ^bb(%a: complex<f64>, %x: complex<f64>): 428 %0 = complex.div %a, %c : complex<f64> 429 linalg.yield %0 : complex<f64> 430 } -> tensor<32xcomplex<f64>, #SV> 431 return %0 : tensor<32xcomplex<f64>, #SV> 432} 433