1// RUN: mlir-opt -transform-interpreter -split-input-file --cse %s | FileCheck %s 2 3func.func @simple_matmul(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>, 4 %arg2 : tensor<?x?xf32>) -> tensor<?x?xf32> { 5 %0 = linalg.matmul 6 ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) 7 outs(%arg2 : tensor<?x?xf32>) -> tensor<?x?xf32> 8 return %0 : tensor<?x?xf32> 9} 10 11module attributes {transform.with_named_sequence} { 12 transform.named_sequence @__transform_main(%arg1 : !transform.any_op {transform.readonly}) { 13 %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1 14 : (!transform.any_op) -> !transform.any_op 15 %a, %b = transform.test.tile_using_forall %matmul [10, 20] mapping = [#gpu.block<y>, #gpu.block<x>] 16 : (!transform.any_op) -> (!transform.any_op, !transform.any_op) 17 transform.yield 18 } 19} 20// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 10)> 21// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 20)> 22// CHECK: func.func @simple_matmul( 23// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: tensor<?x?xf32> 24// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: tensor<?x?xf32> 25// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]+]]: tensor<?x?xf32> 26// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index 27// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index 28// CHECK-DAG: %[[M:.+]] = tensor.dim %[[ARG0]], %[[C0]] 29// CHECK-DAG: %[[K:.+]] = tensor.dim %[[ARG0]], %[[C1]] 30// CHECK-DAG: %[[N:.+]] = tensor.dim %[[ARG1]], %[[C1]] 31// CHECK: %[[RESULT:.+]] = scf.forall (%[[IV0:[a-zA-Z0-9]+]], %[[IV1:[a-zA-Z0-9]+]]) = 32// CHECK-SAME: (0, 0) to (%[[M]], %[[N]]) step (10, 20) shared_outs(%[[INIT:.+]] = %[[ARG2]]) 33// CHECK: %[[TS_Y:.+]] = affine.min #[[MAP0]](%[[IV0]])[%[[M]]] 34// CHECK: %[[TS_X:.+]] = affine.min #[[MAP1]](%[[IV1]])[%[[N]]] 35// CHECK: %[[LHS_TILE:.+]] = tensor.extract_slice %[[ARG0]] 36// CHECK-SAME: [%[[IV0]], 0] [%[[TS_Y]], %[[K]]] [1, 1] 37// CHECK: %[[RHS_TILE:.+]] = tensor.extract_slice %[[ARG1]] 38// CHECK-SAME: [0, %[[IV1]]] [%[[K]], %[[TS_X]]] [1, 1] 39// CHECK: %[[INIT_TILE:.+]] = tensor.extract_slice %[[INIT]] 40// CHECK-SAME: [%[[IV0]], %[[IV1]]] [%[[TS_Y]], %[[TS_X]]] [1, 1] 41// CHECK: %[[GEMM_TILE:.+]] = linalg.matmul 42// CHECK-SAME: ins(%[[LHS_TILE]], %[[RHS_TILE]] : 43// CHECK-SAME: outs(%[[INIT_TILE]] : 44// CHECK: scf.forall.in_parallel { 45// CHECK: tensor.parallel_insert_slice %[[GEMM_TILE]] into %[[INIT]] 46// CHECK-SAME: [%[[IV0]], %[[IV1]]] [%[[TS_Y]], %[[TS_X]]] [1, 1] 47// CHECK: mapping = [#gpu.block<y>, #gpu.block<x>] 48// CHECK: return %[[RESULT]] 49 50// ----- 51 52func.func @simple_matmul_memref(%arg0 : memref<?x?xf32>, %arg1 : memref<?x?xf32>, 53 %arg2 : memref<?x?xf32>) { 54 linalg.matmul ins(%arg0, %arg1 : memref<?x?xf32>, memref<?x?xf32>) 55 outs(%arg2 : memref<?x?xf32>) 56 return 57} 58 59module attributes {transform.with_named_sequence} { 60 transform.named_sequence @__transform_main(%arg1 : !transform.any_op {transform.readonly}) { 61 %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1 62 : (!transform.any_op) -> !transform.any_op 63 %a, %b = transform.test.tile_using_forall %matmul [10, 20] 64 : (!transform.any_op) -> (!transform.any_op, !transform.any_op) 65 transform.yield 66 } 67} 68// CHECK-DAG: #[[$MAP0:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 10)> 69// CHECK-DAG: #[[$MAP1:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 20)> 70// CHECK-LABEL: func.func @simple_matmul_memref( 71// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: memref<?x?xf32> 72// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: memref<?x?xf32> 73// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]+]]: memref<?x?xf32> 74// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index 75// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index 76// CHECK-DAG: %[[M:.+]] = memref.dim %[[ARG0]], %[[C0]] 77// CHECK-DAG: %[[K:.+]] = memref.dim %[[ARG0]], %[[C1]] 78// CHECK-DAG: %[[N:.+]] = memref.dim %[[ARG1]], %[[C1]] 79// CHECK: scf.forall (%[[IV0:[a-zA-Z0-9]+]], %[[IV1:[a-zA-Z0-9]+]]) = (0, 0) to (%[[M]], %[[N]]) step (10, 20) { 80// CHECK-DAG: %[[TS_M:.+]] = affine.min #[[$MAP0]](%[[IV0]])[%[[M]]] 81// CHECK-DAG: %[[TS_N:.+]] = affine.min #[[$MAP1]](%[[IV1]])[%[[N]]] 82// CHECK-DAG: %[[LHS_TILE:.+]] = memref.subview %[[ARG0]] 83// CHECK-SAME: [%[[IV0]], 0] [%[[TS_M]], %[[K]]] [1, 1] 84// CHECK-DAG: %[[RHS_TILE:.+]] = memref.subview %[[ARG1]] 85// CHECK-SAME: [0, %[[IV1]]] [%[[K]], %[[TS_N]]] [1, 1] 86// CHECK-DAG: %[[OUT_TILE:.+]] = memref.subview %[[ARG2]] 87// CHECK-SAME: [%[[IV0]], %[[IV1]]] [%[[TS_M]], %[[TS_N]]] [1, 1] 88// CHECK: linalg.matmul 89// CHECK-SAME: ins(%[[LHS_TILE]], %[[RHS_TILE]] : 90// CHECK-SAME: outs(%[[OUT_TILE]] : 91 92// ----- 93 94#map0 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> 95#map1 = affine_map<(d0, d1, d2) -> (d0, d2, d1)> 96#map2 = affine_map<(d0, d1, d2) -> (d2, d0, d1)> 97func.func @multi_result(%arg0 : tensor<128x200x300xf32>) -> (tensor<128x300x200xf32>, tensor<300x128x200xf32>) { 98 %init0 = tensor.empty() : tensor<128x300x200xf32> 99 %init1 = tensor.empty() : tensor<300x128x200xf32> 100 %0:2 = linalg.generic { 101 indexing_maps = [#map0, #map1, #map2], 102 iterator_types = ["parallel", "parallel", "parallel"]} 103 ins(%arg0 : tensor<128x200x300xf32>) 104 outs(%init0, %init1 : tensor<128x300x200xf32>, tensor<300x128x200xf32>) { 105 ^bb0(%b0 : f32, %b1 : f32, %b2 : f32): 106 linalg.yield %b0, %b0 : f32, f32 107 } -> (tensor<128x300x200xf32>, tensor<300x128x200xf32>) 108 return %0#0, %0#1 : tensor<128x300x200xf32>, tensor<300x128x200xf32> 109} 110 111module attributes {transform.with_named_sequence} { 112 transform.named_sequence @__transform_main(%arg1 : !transform.any_op {transform.readonly}) { 113 %generic = transform.structured.match ops{["linalg.generic"]} in %arg1 114 : (!transform.any_op) -> !transform.any_op 115 %a, %b = transform.test.tile_using_forall %generic [10, 0, 20] 116 : (!transform.any_op) -> (!transform.any_op, !transform.any_op) 117 transform.yield 118 } 119} 120// CHECK-DAG: #[[$MAP0:.+]] = affine_map<(d0) -> (-d0 + 128, 10)> 121// CHECK-LABEL: func.func @multi_result( 122// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: tensor<128x200x300xf32>) 123// CHECK-DAG: %[[INIT0:.+]] = tensor.empty() 124// CHECK-DAG: %[[INIT1:.+]] = tensor.empty() 125// CHECK: %[[OUTER:[a-zA-Z0-9]+]]:2 = scf.forall (%[[IV0:[a-zA-Z0-9]+]], %[[IV1:[a-zA-Z0-9]+]]) = (0, 0) to (128, 300) step (10, 20) 126// CHECK-SAME: shared_outs(%[[ARG1:[a-zA-Z0-9]+]] = %[[INIT0]], %[[ARG2:[a-zA-Z0-9]+]] = %[[INIT1]]) 127// CHECK: %[[TS_Y:.+]] = affine.min #[[$MAP0]](%[[IV0]]) 128// CHECK: %[[ARG_TILE:.+]] = tensor.extract_slice %[[ARG0]] 129// CHECK-SAME: [%[[IV0]], 0, %[[IV1]]] [%[[TS_Y]], 200, 20] [1, 1, 1] 130// CHECK-DAG: %[[INIT0_TILE:.+]] = tensor.extract_slice %[[ARG1]] 131// CHECK-SAME: [%[[IV0]], %[[IV1]], 0] [%[[TS_Y]], 20, 200] [1, 1, 1] 132// CHECK-DAG: %[[INIT1_TILE:.+]] = tensor.extract_slice %[[ARG2]] 133// CHECK-SAME: [%[[IV1]], %[[IV0]], 0] [20, %[[TS_Y]], 200] [1, 1, 1] 134// CHECK: %[[RESULT_TILE:.+]]:2 = linalg.generic 135// CHECK-SAME: ins(%[[ARG_TILE]] : 136// CHECK-SAME: outs(%[[INIT0_TILE]], %[[INIT1_TILE]] : 137// CHECK: scf.forall.in_parallel { 138// CHECK-DAG: tensor.parallel_insert_slice %[[RESULT_TILE]]#0 into %[[ARG1]][%[[IV0]], %[[IV1]], 0] [%[[TS_Y]], 20, 200] [1, 1, 1] 139// CHECK-DAG: tensor.parallel_insert_slice %[[RESULT_TILE]]#1 into %[[ARG2]][%[[IV1]], %[[IV0]], 0] [20, %[[TS_Y]], 200] [1, 1, 1] 140// CHECK: } 141// CHECK: return %[[OUTER]]#0, %[[OUTER]]#1 142 143// ----- 144 145func.func @conv2D(%arg0 : tensor<?x?x?x?xf32>, %arg1 : tensor<?x?x?x?xf32>, 146 %arg2 : tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32> { 147 %0 = linalg.conv_2d_nhwc_hwcf { 148 strides = dense<[2, 3]> : tensor<2xi64>, 149 dilation = dense<[4, 5]> : tensor<2xi64>} 150 ins(%arg0, %arg1 : tensor<?x?x?x?xf32>, tensor<?x?x?x?xf32>) 151 outs(%arg2 : tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32> 152 return %0 : tensor<?x?x?x?xf32> 153} 154 155module attributes {transform.with_named_sequence} { 156 transform.named_sequence @__transform_main(%arg1 : !transform.any_op {transform.readonly}) { 157 %conv = transform.structured.match ops{["linalg.conv_2d_nhwc_hwcf"]} in %arg1 158 : (!transform.any_op) -> !transform.any_op 159 %a, %b = transform.test.tile_using_forall %conv [0, 0, 0, 0, 10, 20, 30] 160 : (!transform.any_op) -> (!transform.any_op, !transform.any_op) 161 transform.yield 162 } 163} 164// CHECK-DAG: #[[$MAP0:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 10)> 165// CHECK-DAG: #[[$MAP1:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 20)> 166// CHECK-DAG: #[[$MAP2:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 30)> 167// CHECK-DAG: #[[$MAP3:.+]] = affine_map<(d0)[s0] -> (d0 + s0 * 2 - 2)> 168// CHECK-DAG: #[[$MAP4:.+]] = affine_map<(d0)[s0] -> (d0 + s0 * 3 - 3)> 169// CHECK-LABEL: func.func @conv2D( 170// CHECK-SAME: %[[INPUT:[a-zA-Z0-9]+]]: tensor<?x?x?x?xf32> 171// CHECK-SAME: %[[FILTER:[a-zA-Z0-9]+]]: tensor<?x?x?x?xf32> 172// CHECK-SAME: %[[INIT:[a-zA-Z0-9]+]]: tensor<?x?x?x?xf32> 173// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index 174// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index 175// CHECK-DAG: %[[C2:.+]] = arith.constant 2 : index 176// CHECK-DAG: %[[C3:.+]] = arith.constant 3 : index 177// CHECK-DAG: %[[N:.+]] = tensor.dim %[[INPUT]], %[[C0]] 178// CHECK-DAG: %[[C:.+]] = tensor.dim %[[INPUT]], %[[C3]] 179// CHECK-DAG: %[[P:.+]] = tensor.dim %[[FILTER]], %[[C0]] 180// CHECK-DAG: %[[Q:.+]] = tensor.dim %[[FILTER]], %[[C1]] 181// CHECK-DAG: %[[F:.+]] = tensor.dim %[[FILTER]], %[[C3]] 182// CHECK-DAG: %[[R:.+]] = tensor.dim %[[INIT]], %[[C1]] 183// CHECK-DAG: %[[S:.+]] = tensor.dim %[[INIT]], %[[C2]] 184// CHECK: %[[RESULT:.+]] = scf.forall (%[[IV0:[a-zA-Z0-9]+]], %[[IV1:[a-zA-Z0-9]+]], %[[IV2:[a-zA-Z0-9]+]]) = 185// CHECK-SAME: (0, 0, 0) to (%[[P]], %[[Q]], %[[C]]) step (10, 20, 30) shared_outs(%[[INIT0:.+]] = %[[INIT]]) 186// CHECK-DAG: %[[TS_P:.+]] = affine.min #[[$MAP0]](%[[IV0]])[%[[P]]] 187// CHECK-DAG: %[[TS_Q:.+]] = affine.min #[[$MAP1]](%[[IV1]])[%[[Q]]] 188// CHECK-DAG: %[[TS_C:.+]] = affine.min #[[$MAP2]](%[[IV2]])[%[[C]]] 189// CHECK-DAG: %[[TS_H:.+]] = affine.apply #[[$MAP3]](%[[TS_P]])[%[[R]]] 190// CHECK-DAG: %[[TS_W:.+]] = affine.apply #[[$MAP4]](%[[TS_Q]])[%[[S]]] 191// CHECK-DAG: %[[INPUT_TILE:.+]] = tensor.extract_slice %[[INPUT]] 192// CHECK-SAME: [0, %[[IV0]], %[[IV1]], %[[IV2]]] [%[[N]], %[[TS_H]], %[[TS_W]], %[[TS_C]]] 193// CHECK-DAG: %[[FILTER_TILE:.+]] = tensor.extract_slice %[[FILTER]] 194// CHECK-SAME: [%[[IV0]], %[[IV1]], %[[IV2]], 0] [%[[TS_P]], %[[TS_Q]], %[[TS_C]], %[[F]]] 195// CHECK-DAG: %[[INIT_TILE:.+]] = tensor.extract_slice %[[INIT0]] 196// CHECK-SAME: [0, 0, 0, 0] [%[[N]], %[[R]], %[[S]], %[[F]]] 197// CHECK: %[[CONV_TILE:.+]] = linalg.conv_2d_nhwc_hwcf 198// CHECK-SAME: dilation = dense<[4, 5]> : tensor<2xi64>, strides = dense<[2, 3]> : tensor<2xi64> 199// CHECK-SAME: ins(%[[INPUT_TILE]], %[[FILTER_TILE]] : 200// CHECK-SAME: outs(%[[INIT_TILE]] : 201// CHECK: scf.forall.in_parallel 202// CHECK: tensor.parallel_insert_slice %[[CONV_TILE]] into %[[INIT0]] 203// CHECK-SAME: [0, 0, 0, 0] [%[[N]], %[[R]], %[[S]], %[[F]]] [1, 1, 1, 1] 204// CHECK: return %[[RESULT]] 205 206// ----- 207 208// CHECK: #[[$MAP_ADD:.+]] = affine_map<(d0)[s0] -> (d0 + s0)> 209 210func.func @indexed_semantics(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> { 211 // Check that we correctly amend "linalg.index" results. 212 213 %0 = linalg.generic { 214 indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, 215 affine_map<(d0, d1) -> (d0, d1)>], 216 iterator_types = ["parallel", "parallel"]} 217 ins(%arg0: tensor<?x?xf32>) 218 outs(%arg1: tensor<?x?xf32>) { 219 ^bb0(%arg2: f32, %arg3: f32): 220 %1 = linalg.index 0 : index 221 %2 = linalg.index 1 : index 222 %3 = arith.addi %1, %2 : index 223 %4 = arith.index_cast %3 : index to i64 224 %5 = arith.uitofp %4 : i64 to f32 225 %6 = arith.addf %5, %arg2 : f32 226 linalg.yield %6 : f32 227 } -> (tensor<?x?xf32>) 228 return %0 : tensor<?x?xf32> 229} 230 231module attributes {transform.with_named_sequence} { 232 transform.named_sequence @__transform_main(%arg1 : !transform.any_op {transform.readonly}) { 233 %generic = transform.structured.match ops{["linalg.generic"]} in %arg1 234 : (!transform.any_op) -> !transform.any_op 235 %a, %b = transform.test.tile_using_forall %generic [10, 20] 236 : (!transform.any_op) -> (!transform.any_op, !transform.any_op) 237 transform.yield 238 } 239} 240 241// CHECK-LABEL: @indexed_semantics 242// CHECK: scf.forall (%[[I0:.+]], %[[I1:.+]]) = 243// CHECK: %[[INDEX0:.+]] = linalg.index 0 244// CHECK: %[[INDEX0_AMENDED:.+]] = affine.apply #[[$MAP_ADD]](%[[I0]])[%[[INDEX0]]] 245// CHECK: %[[INDEX1:.+]] = linalg.index 1 246// CHECK: %[[INDEX1_AMENDED:.+]] = affine.apply #[[$MAP_ADD]](%[[I1]])[%[[INDEX1]]] 247// CHECK: arith.addi %[[INDEX0_AMENDED]], %[[INDEX1_AMENDED]] 248 249// ----- 250 251func.func @interchange_matmul(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>, 252 %arg2 : tensor<?x?xf32>) -> tensor<?x?xf32> { 253 %0 = linalg.matmul ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) 254 outs(%arg2 : tensor<?x?xf32>) -> tensor<?x?xf32> 255 return %0 : tensor<?x?xf32> 256} 257 258module attributes {transform.with_named_sequence} { 259 transform.named_sequence @__transform_main(%arg1 : !transform.any_op {transform.readonly}) { 260 %matmul = transform.structured.match ops{["linalg.matmul"]} in %arg1 261 : (!transform.any_op) -> !transform.any_op 262 %a, %b = transform.test.tile_using_forall %matmul [10, 20] interchange = [1, 0] mapping = [#gpu.block<y>, #gpu.block<x>] 263 : (!transform.any_op) -> (!transform.any_op, !transform.any_op) 264 transform.yield 265 } 266} 267// CHECK-DAG: #[[$MAP0:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 20)> 268// CHECK-DAG: #[[$MAP2:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 10)> 269// CHECK-LABEL: func.func @interchange_matmul( 270// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: tensor<?x?xf32> 271// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: tensor<?x?xf32> 272// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]+]]: tensor<?x?xf32> 273// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index 274// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index 275// CHECK-DAG: %[[M:.+]] = tensor.dim %[[ARG0]], %[[C0]] 276// CHECK-DAG: %[[K:.+]] = tensor.dim %[[ARG0]], %[[C1]] 277// CHECK-DAG: %[[N:.+]] = tensor.dim %[[ARG1]], %[[C1]] 278// CHECK: %[[OUTER:[a-zA-Z0-9]+]] = scf.forall (%[[IV0:[a-zA-Z0-9]+]], %[[IV1:[a-zA-Z0-9]+]] 279// CHECK-SAME: (0, 0) to (%[[N]], %[[M]]) step (20, 10) 280// CHECK-SAME: shared_outs(%[[INIT0:.+]] = %[[ARG2]]) 281// CHECK-DAG: %[[TS_N:.+]] = affine.min #[[$MAP0]](%[[IV0]])[%[[N]]] 282// CHECK-DAG: %[[TS_M:.+]] = affine.min #[[$MAP2]](%[[IV1]])[%[[M]]] 283// CHECK-DAG: %[[LHS_TILE:.+]] = tensor.extract_slice %[[ARG0]] 284// CHECK-SAME: [%[[IV1]], 0] [%[[TS_M]], %[[K]]] [1, 1] 285// CHECK-DAG: %[[RHS_TILE:.+]] = tensor.extract_slice %[[ARG1]] 286// CHECK-SAME: [0, %[[IV0]]] [%[[K]], %[[TS_N]]] [1, 1] 287// CHECK-DAG: %[[INIT_TILE:.+]] = tensor.extract_slice %[[INIT0]] 288// CHECK-SAME: [%[[IV1]], %[[IV0]]] [%[[TS_M]], %[[TS_N]]] [1, 1] 289// CHECK: %[[GEMM_TILE:.+]] = linalg.matmul 290// CHECK-SAME: ins(%[[LHS_TILE]], %[[RHS_TILE]] : 291// CHECK-SAME: outs(%[[INIT_TILE]] : 292// CHECK: scf.forall.in_parallel { 293// CHECK: tensor.parallel_insert_slice %[[GEMM_TILE]] into %[[INIT0]] 294// CHECK-SAME: [%[[IV1]], %[[IV0]]] [%[[TS_M]], %[[TS_N]]] [1, 1] 295// CHECK: } {mapping = [#gpu.block<y>, #gpu.block<x>]} 296// CHECK: return %[[OUTER]] 297 298// ----- 299 300func.func @check_scalar_operation(%arg0 : tensor<f32>) -> tensor<f32> { 301 %init = tensor.empty() : tensor<f32> 302 %0 = linalg.generic { 303 indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>], 304 iterator_types = []} 305 ins(%arg0 : tensor<f32>) outs(%init : tensor<f32>){ 306 ^bb0(%b0 : f32, %b1 : f32): 307 %1 = arith.mulf %b0, %b0 : f32 308 linalg.yield %1 : f32 309 } -> tensor<f32> 310 return %0 : tensor<f32> 311} 312 313module attributes {transform.with_named_sequence} { 314 transform.named_sequence @__transform_main(%arg1 : !transform.any_op {transform.readonly}) { 315 %generic = transform.structured.match ops{["linalg.generic"]} in %arg1 316 : (!transform.any_op) -> !transform.any_op 317 %a = transform.test.tile_using_forall %generic [] 318 : (!transform.any_op) -> (!transform.any_op) 319 transform.yield 320 } 321} 322// CHECK-LABEL: func @check_scalar_operation 323// CHECK-NOT: scf.for 324// CHECK: linalg.generic 325 326// ----- 327 328func.func @check_scalar_memref_operation(%arg0 : memref<f32>, %arg1 : memref<f32>){ 329 linalg.generic { 330 indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>], 331 iterator_types = []} 332 ins(%arg0 : memref<f32>) outs(%arg1 : memref<f32>){ 333 ^bb0(%b0 : f32, %b1 : f32): 334 %1 = arith.mulf %b0, %b0 : f32 335 linalg.yield %1 : f32 336 } 337 return 338} 339 340module attributes {transform.with_named_sequence} { 341 transform.named_sequence @__transform_main(%arg1 : !transform.any_op {transform.readonly}) { 342 %generic = transform.structured.match ops{["linalg.generic"]} in %arg1 343 : (!transform.any_op) -> !transform.any_op 344 %a = transform.test.tile_using_forall %generic [] 345 : (!transform.any_op) -> (!transform.any_op) 346 transform.yield 347 } 348} 349// CHECK-LABEL: func @check_scalar_memref_operation 350// CHECK-NOT: scf.for 351// CHECK: linalg.generic 352