1// RUN: mlir-opt --transform-interpreter %s | FileCheck %s 2 3// CHECK-LABEL: func.func @matmul_split 4func.func @matmul_split(%A : tensor<?x256xf32>, %B: tensor<256x32xf32>, %C: tensor<?x32xf32>) -> tensor<?x32xf32> { 5 6 // CHECK: bufferization.alloc_tensor({{.*}}) : tensor<?x32x64xf32> 7 // CHECK: linalg.generic 8 // CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "reduction"] 9 // CHECK-SAME: ins(%{{[a-zA-Z0-9]*}}, %{{[a-zA-Z0-9]*}}, %{{[a-zA-Z0-9]*}} : tensor<?x256xf32>, tensor<256x32xf32>, tensor<64x4xi1>) 10 // CHECK-SAME: outs(%{{[a-zA-Z0-9]*}} : tensor<?x32x64xf32>) { 11 12 // CHECK: linalg.generic 13 // CHECK-SAME: iterator_types = ["parallel", "parallel", "reduction"] 14 // CHECK-SAME: ins(%{{[a-zA-Z0-9]*}} : tensor<?x32x64xf32>) 15 // CHECK-SAME: outs(%{{[a-zA-Z0-9]*}} : tensor<?x32xf32>) { 16 %0 = linalg.matmul ins(%A, %B: tensor<?x256xf32>, tensor<256x32xf32>) 17 outs(%C: tensor<?x32xf32>) -> tensor<?x32xf32> 18 return %0: tensor<?x32xf32> 19} 20 21module attributes {transform.with_named_sequence} { 22 transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { 23 %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op 24 %1:4 = transform.structured.split_reduction %0 25 { split_factor = 4, insert_split_dimension = 2, use_scaling_algorithm, use_alloc} 26 : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op) 27 transform.yield 28 } 29} 30