xref: /llvm-project/mlir/test/Interfaces/TilingInterface/tile-pad-using-interface.mlir (revision 6740d701bde4ad9b95d7d811852fa0a2542e6b28)
1// RUN: mlir-opt -transform-interpreter -cse -split-input-file %s | FileCheck %s
2
3// 2D tiling of dynamic 2D pad tensor op.
4func.func @dynamic_2d_pad_tensor(%input_tensor: tensor<?x?xf32>,
5                         %pad_value: f32) -> tensor<?x?xf32> {
6  %0 = tensor.pad %input_tensor low[3, 4] high[5, 3] {
7    ^bb0(%arg1: index, %arg2: index):
8      tensor.yield %pad_value : f32
9    } : tensor<?x?xf32> to tensor<?x?xf32>
10  return %0 : tensor<?x?xf32>
11}
12
13module attributes {transform.with_named_sequence} {
14  transform.named_sequence @__transform_main(%arg1 : !transform.any_op {transform.readonly}) {
15    %pad = transform.structured.match ops{["tensor.pad"]} in %arg1
16      : (!transform.any_op) -> !transform.any_op
17    %a, %b, %c = transform.structured.tile_using_for %pad tile_sizes [2, 3]
18      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
19    transform.yield
20  }
21}
22
23//  CHECK-DAG:  #[[MAP0:.+]] = affine_map<()[s0] -> (s0 + 8)>
24//  CHECK-DAG:  #[[MAP1:.+]] = affine_map<()[s0] -> (s0 + 7)>
25//       CHECK: func @dynamic_2d_pad_tensor(
26//  CHECK-SAME:     %[[IN:[a-zA-Z0-9]+]]: tensor<?x?xf32>
27//   CHECK-DAG:   %[[C0:.+]] = arith.constant 0 : index
28//   CHECK-DAG:   %[[DIM_IN0:.+]] = tensor.dim %[[IN]], %[[C0]]
29//   CHECK-DAG:   %[[DIM0:.+]] = affine.apply #[[MAP0]]()[%[[DIM_IN0]]]
30//   CHECK-DAG:   %[[C1:.+]] = arith.constant 1 : index
31//   CHECK-DAG:   %[[DIM_IN1:.+]] = tensor.dim %[[IN]], %[[C1]]
32//   CHECK-DAG:   %[[DIM1:.+]] = affine.apply #[[MAP1]]()[%[[DIM_IN1]]]
33//   CHECK-DAG:   %[[C2:.+]] = arith.constant 2 : index
34//   CHECK-DAG:   %[[C3:.+]] = arith.constant 3 : index
35//       CHECK:   %[[RESULT:[a-zA-Z0-9]+]] = scf.for %[[IV0:[a-zA-Z0-9]+]] = %[[C0]] to %[[DIM0]] step %[[C2]]
36//       CHECK:     scf.for {{.*}} = %[[C0]] to %[[DIM1]] step %[[C3]] iter_args(%[[INNER_OUT:.*]] =
37//       CHECK:       %[[SWAP_RESULT:.*]] = scf.if
38//       CHECK:         tensor.generate
39//       CHECK:       else
40//       CHECK:         %[[SLICE:.*]] = tensor.extract_slice %[[IN]][{{.*}}, {{.*}}] [{{.*}}, {{.*}}] [1, 1]
41//       CHECK:         %[[PAD:.*]] = tensor.pad %[[SLICE]]
42//       CHECK:       tensor.insert_slice %[[SWAP_RESULT]] into %[[INNER_OUT]][{{.*}}, {{.*}}] [{{.*}}, {{.*}}] [1, 1]
43//       CHECK:   return %[[RESULT]]
44
45// -----
46
47func.func @dynamic_2d_pad_tensor_inner_tiling(%input_tensor: tensor<?x?xf32>,
48                         %pad_value: f32) -> tensor<?x?xf32> {
49  %0 = tensor.pad %input_tensor low[3, 4] high[5, 3] {
50    ^bb0(%arg1: index, %arg2: index):
51      tensor.yield %pad_value : f32
52    } : tensor<?x?xf32> to tensor<?x?xf32>
53  return %0 : tensor<?x?xf32>
54}
55
56module attributes {transform.with_named_sequence} {
57  transform.named_sequence @__transform_main(%arg1 : !transform.any_op {transform.readonly}) {
58    %pad = transform.structured.match ops{["tensor.pad"]} in %arg1
59      : (!transform.any_op) -> !transform.any_op
60    %a, %b = transform.structured.tile_using_for %pad tile_sizes [0, 3]
61      : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
62    transform.yield
63  }
64}
65//   CHECK-DAG: #[[MAP0:.+]] = affine_map<()[s0] -> (s0 + 8)>
66//   CHECK-DAG: #[[MAP1:.+]] = affine_map<()[s0] -> (s0 + 7)>
67//       CHECK: func @dynamic_2d_pad_tensor_inner_tiling(
68//  CHECK-SAME:     %[[IN:.*]]: tensor<?x?xf32>
69//   CHECK-DAG:   %[[C0:.*]] = arith.constant 0 : index
70//   CHECK-DAG:   %[[DIM_IN0:.*]] = tensor.dim %[[IN]], %[[C0]]
71//   CHECK-DAG:   %[[DIM0:.*]] = affine.apply #[[MAP0]]()[%[[DIM_IN0]]]
72//   CHECK-DAG:   %[[C1:.*]] = arith.constant 1 : index
73//   CHECK-DAG:   %[[DIM_IN1:.*]] = tensor.dim %[[IN]], %[[C1]]
74//   CHECK-DAG:   %[[DIM1:.*]] = affine.apply #[[MAP1]]()[%[[DIM_IN1]]]
75//   CHECK-DAG:   %[[C3:.*]] = arith.constant 3 : index
76//       CHECK:   %[[RESULT:.*]] = scf.for {{.*}} = %[[C0]] to %[[DIM1]] step %[[C3]] iter_args(%[[INNER_OUT:.*]] =
77//       CHECK:     %[[SWAP_RESULT:.*]] = scf.if
78//       CHECK:       tensor.generate
79//       CHECK:     else
80//       CHECK:       %[[SLICE:.*]] = tensor.extract_slice %[[IN]][{{.*}}, {{.*}}] [{{.*}}, {{.*}}] [1, 1]
81//       CHECK:       %[[PAD:.*]] = tensor.pad %[[SLICE]] low[3, %{{.*}}] high[{{.*}}, {{.*}}]
82//       CHECK:     tensor.insert_slice %[[SWAP_RESULT]] into %[[INNER_OUT]][0, {{.*}}] [%[[DIM0]], {{.*}}] [1, 1]
83//       CHECK:   return %[[RESULT]]
84
85// -----
86
87func.func @static_pad_tensor(%input_tensor: tensor<7x9xf32>,
88                        %pad_value: f32) -> tensor<15x16xf32> {
89  %0 = tensor.pad %input_tensor low[3, 4] high[5, 3] {
90    ^bb0(%arg1: index, %arg2: index):
91      tensor.yield %pad_value : f32
92    }  : tensor<7x9xf32> to tensor<15x16xf32>
93  return %0 : tensor<15x16xf32>
94}
95
96module attributes {transform.with_named_sequence} {
97  transform.named_sequence @__transform_main(%arg1 : !transform.any_op {transform.readonly}) {
98    %pad = transform.structured.match ops{["tensor.pad"]} in %arg1
99      : (!transform.any_op) -> !transform.any_op
100    %a, %b, %c = transform.structured.tile_using_for %pad tile_sizes [2, 3]
101      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
102    transform.yield
103  }
104}
105// CHECK-LABEL: func @static_pad_tensor(
106//  CHECK-SAME:     %[[IN:.*]]: tensor<7x9xf32>
107//   CHECK-DAG:   %[[C0:.*]] = arith.constant 0 : index
108//   CHECK-DAG:   %[[C15:.*]] = arith.constant 15 : index
109//   CHECK-DAG:   %[[C2:.*]] = arith.constant 2 : index
110//   CHECK-DAG:   %[[C16:.*]] = arith.constant 16 : index
111//   CHECK-DAG:   %[[C3:.*]] = arith.constant 3 : index
112//       CHECK:   %[[RESULT:.*]] = scf.for {{.*}} = %[[C0]] to %[[C15]] step %[[C2]]
113//       CHECK:     scf.for {{.*}} = %[[C0]] to %[[C16]] step %[[C3]] iter_args(%[[INNER_OUT:.*]] =
114//       CHECK:       %[[SWAP_RESULT:.*]] = scf.if
115//       CHECK:         tensor.generate
116//       CHECK:       else
117//       CHECK:         %[[SLICE:.*]] = tensor.extract_slice %[[IN]][{{.*}}, {{.*}}] [{{.*}}, {{.*}}] [1, 1]
118//       CHECK:         %[[PAD:.*]] = tensor.pad %[[SLICE]]
119//       CHECK:       tensor.insert_slice %[[SWAP_RESULT]] into %[[INNER_OUT]][{{.*}}, {{.*}}] [{{.*}}, {{.*}}] [1, 1]
120//       CHECK:   return %[[RESULT]]
121
122// -----
123
124func.func @static_pad_tensor_inner_tiling(%input_tensor: tensor<7x9xf32>,
125                        %pad_value: f32) -> tensor<15x16xf32> {
126  %0 = tensor.pad %input_tensor low[3, 4] high[5, 3] {
127    ^bb0(%arg1: index, %arg2: index):
128      tensor.yield %pad_value : f32
129    }  : tensor<7x9xf32> to tensor<15x16xf32>
130  return %0 : tensor<15x16xf32>
131}
132
133module attributes {transform.with_named_sequence} {
134  transform.named_sequence @__transform_main(%arg1 : !transform.any_op {transform.readonly}) {
135    %pad = transform.structured.match ops{["tensor.pad"]} in %arg1
136      : (!transform.any_op) -> !transform.any_op
137    %a, %b = transform.structured.tile_using_for %pad tile_sizes [0, 3]
138      : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
139    transform.yield
140  }
141}
142// CHECK-LABEL: func @static_pad_tensor_inner_tiling(
143//  CHECK-SAME:     %[[IN:.*]]: tensor<7x9xf32>
144//   CHECK-DAG:   %[[C0:.*]] = arith.constant 0 : index
145//   CHECK-DAG:   %[[C3:.*]] = arith.constant 3 : index
146//   CHECK-DAG:   %[[C16:.*]] = arith.constant 16 : index
147//       CHECK:   %[[RESULT:.*]] = scf.for {{.*}} = %[[C0]] to %[[C16]] step %[[C3]] iter_args(%[[INNER_OUT:.*]] =
148//       CHECK:     %[[SWAP_RESULT:.*]] = scf.if
149//       CHECK:       tensor.generate
150//       CHECK:     else
151//       CHECK:       %[[SLICE:.*]] = tensor.extract_slice %[[IN]][0, {{.*}}] [7, {{.*}}] [1, 1]
152//       CHECK:       %[[PAD:.*]] = tensor.pad %[[SLICE]] low[3, %{{.*}}] high[5, {{.*}}]
153//       CHECK:     tensor.insert_slice %[[SWAP_RESULT]] into %[[INNER_OUT]][0, {{.*}}] [15, {{.*}}] [1, 1]
154//       CHECK:   return %[[RESULT]]
155
156/// Rest of the tests only check that they dont fail.
157
158// -----
159
160func.func @dynamic_2d_pad_tensor_outer_tiling(%input_tensor: tensor<?x?xf32>,
161                         %pad_value: f32) -> tensor<?x?xf32> {
162  %0 = tensor.pad %input_tensor low[3, 4] high[5, 3] {
163    ^bb0(%arg1: index, %arg2: index):
164      tensor.yield %pad_value : f32
165    } : tensor<?x?xf32> to tensor<?x?xf32>
166  return %0 : tensor<?x?xf32>
167}
168
169module attributes {transform.with_named_sequence} {
170  transform.named_sequence @__transform_main(%arg1 : !transform.any_op {transform.readonly}) {
171    %pad = transform.structured.match ops{["tensor.pad"]} in %arg1
172      : (!transform.any_op) -> !transform.any_op
173    %a, %b, %c = transform.structured.tile_using_for %pad tile_sizes [2, 3]
174      : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
175    transform.yield
176  }
177}
178// CHECK-LABEL: func @dynamic_2d_pad_tensor_outer_tiling
179
180// -----
181
182func.func @static_pad_tensor_outer_tiling(%input_tensor: tensor<7x9xf32>,
183                        %pad_value: f32) -> tensor<15x16xf32> {
184  %0 = tensor.pad %input_tensor low[3, 4] high[5, 3] {
185    ^bb0(%arg1: index, %arg2: index):
186      tensor.yield %pad_value : f32
187    }  : tensor<7x9xf32> to tensor<15x16xf32>
188  return %0 : tensor<15x16xf32>
189}
190
191module attributes {transform.with_named_sequence} {
192  transform.named_sequence @__transform_main(%arg1 : !transform.any_op {transform.readonly}) {
193    %pad = transform.structured.match ops{["tensor.pad"]} in %arg1
194      : (!transform.any_op) -> !transform.any_op
195    %a, %b = transform.structured.tile_using_for %pad tile_sizes [0, 3]
196      : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
197    transform.yield
198  }
199}
200// CHECK-LABEL: func @static_pad_tensor_outer_tiling
201