xref: /llvm-project/mlir/test/Dialect/Linalg/transform-op-multitile-sizes.mlir (revision 2798b72ae7e5caad793169b77cbac47fe2362d0f)
1// RUN: mlir-opt %s --transform-interpreter --split-input-file --verify-diagnostics | FileCheck %s
2
3// CHECK-DAG: #[[$MAP13:.+]] = affine_map<() -> (13)>
4
5module attributes {transform.with_named_sequence} {
6  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
7      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
8      transform.structured.multitile_sizes %0 { target_size = 3, dimension = 0 } : (!transform.any_op) -> !transform.any_op
9      transform.yield
10  }
11}
12
13// CHECK-LABEL: @multitile_sizes_static
14func.func @multitile_sizes_static(
15  %arg0: tensor<13x34xf32>, %arg1: tensor<34x42xf32>, %arg2: tensor<13x42xf32>)
16    -> tensor<13x42xf32> {
17  %0 = linalg.matmul  ins(%arg0, %arg1: tensor<13x34xf32>, tensor<34x42xf32>)
18                     outs(%arg2: tensor<13x42xf32>)
19    -> tensor<13x42xf32>
20  // The first application computes the total size.
21  // CHECK: %{{.*}} = affine.apply #[[$MAP13]]()
22  // CHECK: %[[SIZE:.+]] = affine.apply #[[$MAP13]]()
23  // CHECK: %[[COND:.+]] = arith.cmpi eq, %[[SIZE]], %{{.*}}
24  // CHECK: cf.assert %[[COND]], "could not compute dynamic multi-size tile shapes"
25
26  return %0 : tensor<13x42xf32>
27}
28
29// -----
30
31module attributes {transform.with_named_sequence} {
32  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
33      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
34      %low_tile, %high_tile, %split_point =
35        transform.structured.multitile_sizes %0 { target_size = 3, dimension = 0 }
36        : (!transform.any_op) -> !transform.param<i64>
37      // expected-remark @below {{2 : i64}}
38      transform.debug.emit_param_as_remark %low_tile : !transform.param<i64>
39      // expected-remark @below {{3 : i64}}
40      transform.debug.emit_param_as_remark %high_tile : !transform.param<i64>
41      // expected-remark @below {{4 : i64}}
42      transform.debug.emit_param_as_remark %split_point : !transform.param<i64>
43      transform.yield
44  }
45}
46
47// CHECK-LABEL: @multitile_sizes_static_gen
48func.func @multitile_sizes_static_gen(
49  %arg0: tensor<13x34xf32>, %arg1: tensor<34x42xf32>, %arg2: tensor<13x42xf32>)
50    -> tensor<13x42xf32> {
51  %0 = linalg.matmul  ins(%arg0, %arg1: tensor<13x34xf32>, tensor<34x42xf32>)
52                     outs(%arg2: tensor<13x42xf32>)
53    -> tensor<13x42xf32>
54
55  return %0 : tensor<13x42xf32>
56}
57
58// -----
59
60module attributes {transform.with_named_sequence} {
61  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
62      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
63      transform.structured.multitile_sizes %0 { target_size = 3, divisor = 2, dimension = 0 } : (!transform.any_op) -> !transform.any_op
64      transform.yield
65  }
66}
67
68// CHECK: #[[$MAP_A:.+]] = affine_map<()[s0] -> ([[A_IMPL:s0 floordiv 2]])>
69// CHECK: #[[$MAP_T:.+]] = affine_map<() -> (2)>
70// CHECK: #[[$MAP_D:.+]] = affine_map<()[s0] -> ([[D_IMPL:\(s0 floordiv 2 \+ 1\) floordiv 2]])>
71// CHECK: #[[$MAP_S:.+]] = affine_map<()[s0] -> ((([[A_IMPL]]) floordiv ([[D_IMPL]])) * 2)>
72// CHECK: #[[$MAP_V:.+]] = affine_map<()[s0] -> (([[A_IMPL]]) mod ([[D_IMPL]]))>
73// CHECK: #[[$MAP_U:.+]] = affine_map<()[s0] -> ([[D_IMPL]] - ([[A_IMPL]]) mod ([[D_IMPL]]))>
74
75// CHECK-LABEL: @multitile_sizes_dynamic
76// CHECK-SAME: (%[[ARG0:.+]]: tensor<?x?xf32>, %{{.*}}: tensor<?x?xf32>, %{{.*}}: tensor<?x?xf32>)
77func.func @multitile_sizes_dynamic(
78  // For matmul, the extent of the first iteration space dimension is equal to
79  // the size of the first dimension of the first tensor. The indexing map was
80  // folded so there is no map application happening.
81  //
82  // CHECK: %[[C0:.+]] = arith.constant 0
83  // CHECK: %[[DIM:.+]] = tensor.dim %[[ARG0]], %[[C0]]
84  //
85  // The following are the maps as emitted by computeMultiTileSizes.
86  // CHECK: affine.apply #[[$MAP_A]]()[%[[DIM]]]
87  // CHECK: affine.apply #[[$MAP_T]]()
88  // CHECK: affine.apply #[[$MAP_D]]()[%[[DIM]]]
89  // CHECK: affine.apply #[[$MAP_S]]()[%[[DIM]]]
90  // CHECK: affine.apply #[[$MAP_V]]()[%[[DIM]]]
91  // CHECK: affine.apply #[[$MAP_U]]()[%[[DIM]]]
92  %arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>, %arg2: tensor<?x?xf32>)
93    -> tensor<?x?xf32> {
94  %0 = linalg.matmul  ins(%arg0, %arg1: tensor<?x?xf32>, tensor<?x?xf32>)
95                     outs(%arg2: tensor<?x?xf32>)
96    -> tensor<?x?xf32>
97
98  return %0 : tensor<?x?xf32>
99}
100
101// -----
102
103module attributes {transform.with_named_sequence} {
104  transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
105      %0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
106      // expected-error @below {{cannot compute parametric tile sizes for dynamically shaped payload op}}
107      transform.structured.multitile_sizes %0 { target_size = 3, divisor = 2, dimension = 0 }
108        : (!transform.any_op) -> !transform.param<i64>
109        transform.yield
110  }
111}
112
113func.func @multitile_sizes_dynamic_gen(
114  %arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>, %arg2: tensor<?x?xf32>)
115    -> tensor<?x?xf32> {
116  // expected-note @below {{payload op}}
117  %0 = linalg.matmul  ins(%arg0, %arg1: tensor<?x?xf32>, tensor<?x?xf32>)
118                     outs(%arg2: tensor<?x?xf32>)
119    -> tensor<?x?xf32>
120
121  return %0 : tensor<?x?xf32>
122}
123