1// RUN: mlir-opt -transform-interpreter %s | FileCheck %s 2 3func.func @scalarize(%arg0: tensor<24x12xf32>, 4 %arg1: tensor<12x25xf32>, 5 %arg2: tensor<24x25xf32>) -> tensor<24x25xf32> { 6 // The op is first tiled by 10 in the first dimension, which creates a 7 // dynamic size, and then scalarized, which brings the dimension to static 1. 8 // CHECK: %[[RES_LOOP_1:.*]] = scf.for {{.*}} -> (tensor<24x25xf32>) 9 // CHECK: %[[RES_LOOP_2:.*]] = scf.for {{.*}} -> (tensor<?x25xf32>) 10 // CHECK: %[[MM:.*]] = linalg.matmul ins(%{{.*}}, %{{.*}} : tensor<1x12 11 // CHECK: %[[INS_2:.*]] = tensor.insert_slice %[[MM]] into %{{.*}} [1, 25] [1, 1] : tensor<1x25xf32> into tensor<?x25xf32> 12 // CHECK: scf.yield %[[INS_2]] : tensor<?x25xf32> 13 // CHECK: %[[INS_1:.*]] = tensor.insert_slice %[[RES_LOOP_2]] into %{{.*}}, 25] [1, 1] : tensor<?x25xf32> into tensor<24x25xf32> 14 // CHECK: scf.yield %[[INS_1]] : tensor<24x25xf32> 15 %0 = linalg.matmul ins(%arg0, %arg1 : tensor<24x12xf32>, tensor<12x25xf32>) outs(%arg2 : tensor<24x25xf32>) -> tensor<24x25xf32> 16 17 // CHECK: return %[[RES_LOOP_1]] : tensor<24x25xf32> 18 func.return %0 : tensor<24x25xf32> 19} 20 21module attributes {transform.with_named_sequence} { 22 transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { 23 %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op 24 %1, %loops = transform.structured.tile_using_for %0 tile_sizes [10, 0, 0] : (!transform.any_op) -> (!transform.any_op, !transform.any_op) 25 %2 = transform.structured.scalarize %1 : (!transform.any_op) -> !transform.any_op 26 transform.yield 27 } 28} 29