xref: /llvm-project/mlir/test/Dialect/Linalg/tile-and-fuse-tensors.mlir (revision 0aa459fc8ae823308dd964be89928d558663196d)
1// RUN: mlir-opt %s -test-linalg-greedy-fusion -split-input-file | FileCheck %s
2
3func.func @matmul_tensors(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>, %arg2: tensor<?x?xf32>) -> tensor<?x?xf32> {
4  %t0 = linalg.matmul ins(%arg0, %arg1: tensor<?x?xf32>, tensor<?x?xf32>)
5                     outs(%arg2: tensor<?x?xf32>)
6    -> tensor<?x?xf32>
7
8  %c4 = arith.constant 4 : index
9  %c2 = arith.constant 2 : index
10  %c0 = arith.constant 0 : index
11  %c3 = arith.constant 3 : index
12  %c1 = arith.constant 1 : index
13  %0 = tensor.dim %t0, %c0 : tensor<?x?xf32>
14  %1 = tensor.dim %t0, %c1 : tensor<?x?xf32>
15  %2 = tensor.dim %arg1, %c1 : tensor<?x?xf32>
16  %3 = scf.for %arg3 = %c0 to %0 step %c2 iter_args(%arg4 = %arg2) -> (tensor<?x?xf32>) {
17    %4 = scf.for %arg5 = %c0 to %2 step %c3 iter_args(%arg6 = %arg4) -> (tensor<?x?xf32>) {
18      %5 = scf.for %arg7 = %c0 to %1 step %c4 iter_args(%arg8 = %arg6) -> (tensor<?x?xf32>) {
19        %6 = tensor.extract_slice %t0[%arg3, %arg7][%c2, 4][1, 1] : tensor<?x?xf32> to tensor<?x4xf32>
20        %7 = tensor.extract_slice %arg1[%arg7, %arg5][4, %c3][1, 1] : tensor<?x?xf32> to tensor<4x?xf32>
21        %8 = tensor.extract_slice %arg8[%arg3, %arg5][%c2, %c3][1, 1] : tensor<?x?xf32> to tensor<?x?xf32>
22        %9 = linalg.matmul ins(%6, %7 : tensor<?x4xf32>, tensor<4x?xf32>) outs(%8 : tensor<?x?xf32>) -> tensor<?x?xf32>
23        %10 = tensor.insert_slice %9 into %arg8[%arg3, %arg5] [%c2, %c3] [1, 1]  : tensor<?x?xf32> into tensor<?x?xf32>
24        scf.yield %10 : tensor<?x?xf32>
25      }
26      scf.yield %5 : tensor<?x?xf32>
27    }
28    scf.yield %4 : tensor<?x?xf32>
29  }
30  return %3 : tensor<?x?xf32>
31}
32
33//       CHECK: func @matmul_tensors(
34//  CHECK-SAME: %[[A:[0-9a-z]*]]: tensor<?x?xf32>
35//  CHECK-SAME: %[[B:[0-9a-z]*]]: tensor<?x?xf32>
36//  CHECK-SAME: %[[C:[0-9a-z]*]]: tensor<?x?xf32>
37
38//   CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
39//   CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
40//   CHECK-DAG: %[[dA1:.*]] = tensor.dim %[[A]], %[[C1]] : tensor<?x?xf32>
41//   CHECK-DAG: %[[dB0:.*]] = tensor.dim %[[B]], %[[C0]] : tensor<?x?xf32>
42//   CHECK-DAG: %[[dB1:.*]] = tensor.dim %[[B]], %[[C1]] : tensor<?x?xf32>
43//       CHECK: scf.for %[[I:[0-9a-z]*]]
44//       CHECK:   %[[stA:.*]] = tensor.extract_slice %[[A]][%[[I]], 0] [2, %[[dA1]]] [1, 1]  : tensor<?x?xf32> to tensor<2x?xf32>
45//       CHECK:   scf.for %[[J:[0-9a-z]*]]
46//  CHECK-NEXT:     scf.for %[[K:[0-9a-z]*]] {{.*}} iter_args(%[[RES:[0-9a-z]*]]
47//   CHECK-DAG:       %[[stB1:.*]] = tensor.extract_slice %[[B]][%[[K]], %[[J]]] [4, 3] [1, 1]  : tensor<?x?xf32> to tensor<4x3xf32>
48//   CHECK-DAG:       %[[stF:.*]] = tensor.extract_slice %[[RES]][%[[I]], %[[J]]] [2, 3] [1, 1]  : tensor<?x?xf32> to tensor<2x3xf32>
49//
50// slices of the producing matmul.
51//   CHECK-DAG:       %[[stB2:.*]] = tensor.extract_slice %[[B]][0, %[[K]]] [%[[dB0]], 4] [1, 1]  : tensor<?x?xf32> to tensor<?x4xf32>
52//   CHECK-DAG:       %[[stC:.*]] = tensor.extract_slice %[[C]][%[[I]], %[[K]]] [2, 4] [1, 1]  : tensor<?x?xf32> to tensor<2x4xf32>
53//       CHECK:       %[[stD:.*]] = linalg.matmul ins(%[[stA]], %[[stB2]] : tensor<2x?xf32>, tensor<?x4xf32>) outs(%[[stC]] : tensor<2x4xf32>)  -> tensor<2x4xf32>
54//  CHECK-NEXT:       %[[stG:.*]] = linalg.matmul ins(%[[stD]], %[[stB1]] : tensor<2x4xf32>, tensor<4x3xf32>) outs(%[[stF]] : tensor<2x3xf32>)  -> tensor<2x3xf32>
55//  CHECK-NEXT:       tensor.insert_slice %[[stG]] into %[[RES]][%[[I]], %[[J]]]
56
57// -----
58
59func.func @conv_tensors_static(%input: tensor<1x225x225x3xf32>, %filter: tensor<3x3x3x32xf32>, %elementwise: tensor<1x112x112x32xf32>) -> tensor<1x112x112x32xf32> {
60  %c112 = arith.constant 112 : index
61  %c32 = arith.constant 32 : index
62  %c16 = arith.constant 16 : index
63  %c8 = arith.constant 8 : index
64  %c4 = arith.constant 4 : index
65  %c0 = arith.constant 0 : index
66  %cst = arith.constant 0.0 : f32
67
68  %init = tensor.empty() : tensor<1x112x112x32xf32>
69  %fill = linalg.fill ins(%cst : f32) outs(%init : tensor<1x112x112x32xf32>) -> tensor<1x112x112x32xf32>
70
71  %conv = linalg.conv_2d_nhwc_hwcf
72    {dilations = dense<1> : tensor<2xi64>, strides = dense<2> : tensor<2xi64>}
73    ins(%input, %filter : tensor<1x225x225x3xf32>, tensor<3x3x3x32xf32>)
74    outs(%fill : tensor<1x112x112x32xf32>) -> tensor<1x112x112x32xf32>
75
76  %for0 = scf.for %iv0 = %c0 to %c112 step %c8 iter_args(%arg0 = %fill) -> tensor<1x112x112x32xf32> {
77    %for1 = scf.for %iv1 = %c0 to %c112 step %c16 iter_args(%arg1 = %arg0) -> tensor<1x112x112x32xf32> {
78      %for2 = scf.for %iv2 = %c0 to %c32 step %c4 iter_args(%arg2 = %arg1) -> tensor<1x112x112x32xf32> {
79        %0 = tensor.extract_slice %conv[0, %iv0, %iv1, %iv2][1, 8, 16, 4][1, 1, 1, 1] : tensor<1x112x112x32xf32> to tensor<1x8x16x4xf32>
80        %1 = tensor.extract_slice %elementwise[0, %iv0, %iv1, %iv2][1, 8, 16, 4][1, 1, 1, 1] : tensor<1x112x112x32xf32> to tensor<1x8x16x4xf32>
81        %2 = tensor.extract_slice %arg2[0, %iv0, %iv1, %iv2][1, 8, 16, 4][1, 1, 1, 1] : tensor<1x112x112x32xf32> to tensor<1x8x16x4xf32>
82        %add = linalg.generic
83          {
84            indexing_maps = [
85              affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>,
86              affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>,
87              affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>],
88            iterator_types = ["parallel", "parallel", "parallel", "parallel"]
89          }
90          ins(%0, %1 : tensor<1x8x16x4xf32>, tensor<1x8x16x4xf32>) outs(%2 : tensor<1x8x16x4xf32>) {
91        ^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
92          %result = arith.addf %arg3, %arg4 : f32
93          linalg.yield %result : f32
94        } -> tensor<1x8x16x4xf32>
95
96        %insert = tensor.insert_slice %add into %arg2[0, %iv0, %iv1, %iv2] [1, 8, 16, 4] [1, 1, 1, 1]  : tensor<1x8x16x4xf32> into tensor<1x112x112x32xf32>
97        scf.yield %insert : tensor<1x112x112x32xf32>
98      }
99      scf.yield %for2 : tensor<1x112x112x32xf32>
100    }
101    scf.yield %for1 : tensor<1x112x112x32xf32>
102  }
103  return %for0 : tensor<1x112x112x32xf32>
104}
105
106//      CHECK: #[[MAP0:.+]] = affine_map<(d0) -> (d0 * 2)>
107//      CHECK: #[[MAP1:.+]] = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>
108
109//      CHECK: func @conv_tensors_static
110// CHECK-SAME: (%[[INPUT:.+]]: tensor<1x225x225x3xf32>, %[[FILTER:.+]]: tensor<3x3x3x32xf32>, %[[ELEM:.+]]: tensor<1x112x112x32xf32>)
111
112//      CHECK: %[[INIT:.+]] = tensor.empty() : tensor<1x112x112x32xf32>
113// CHECK-NEXT: %[[FILL:.+]] = linalg.fill ins(%cst : f32) outs(%[[INIT]] : tensor<1x112x112x32xf32>) -> tensor<1x112x112x32xf32>
114
115// CHECK-NEXT: scf.for %[[IV0:.+]] = %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[ARG0:.+]] = %[[FILL]])
116// CHECK-NEXT:   %[[OFFSET_H:.+]] = affine.apply #[[MAP0]](%[[IV0]])
117// CHECK-NEXT:   scf.for %[[IV1:.+]] = %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[ARG1:.+]] = %[[ARG0]])
118// CHECK-NEXT:     %[[OFFSET_W:.+]] = affine.apply #[[MAP0]](%[[IV1]])
119// CHECK-NEXT:     %[[ST_INPUT:.+]] = tensor.extract_slice %arg0[0, %[[OFFSET_H]], %[[OFFSET_W]], 0] [1, 17, 33, 3] [1, 1, 1, 1] : tensor<1x225x225x3xf32> to tensor<1x17x33x3xf32>
120// CHECK-NEXT:     scf.for %[[IV2:.+]] = %{{.+}} to %{{.+}} step %{{.+}} iter_args(%[[ARG2:.+]] = %[[ARG1]])
121// CHECK-NEXT:       %[[ST_ELEM:.+]] = tensor.extract_slice %[[ELEM]][0, %[[IV0]], %[[IV1]], %[[IV2]]] [1, 8, 16, 4] [1, 1, 1, 1] : tensor<1x112x112x32xf32> to tensor<1x8x16x4xf32>
122// CHECK-NEXT:       %[[ST_ARG2:.+]] = tensor.extract_slice %[[ARG2]][0, %[[IV0]], %[[IV1]], %[[IV2]]] [1, 8, 16, 4] [1, 1, 1, 1] : tensor<1x112x112x32xf32> to tensor<1x8x16x4xf32>
123// CHECK-NEXT:       %[[ST_FILTER:.+]] = tensor.extract_slice %[[FILTER]][0, 0, 0, %[[IV2]]] [3, 3, 3, 4] [1, 1, 1, 1] : tensor<3x3x3x32xf32> to tensor<3x3x3x4xf32>
124// CHECK-NEXT:       %[[ST_FILL:.+]] = tensor.extract_slice %[[FILL]][0, %[[IV0]], %[[IV1]], %[[IV2]]] [1, 8, 16, 4] [1, 1, 1, 1] : tensor<1x112x112x32xf32> to tensor<1x8x16x4xf32>
125// CHECK-NEXT:       %[[ST_CONV:.+]] = linalg.conv_2d_nhwc_hwcf
126// CHECK-SAME:         ins(%[[ST_INPUT]], %[[ST_FILTER]] : tensor<1x17x33x3xf32>, tensor<3x3x3x4xf32>)
127// CHECK-SAME:         outs(%[[ST_FILL]] : tensor<1x8x16x4xf32>)
128// CHECK-NEXT:       %[[ADD:.+]] = linalg.generic
129// CHECK-SAME:         ins(%[[ST_CONV]], %[[ST_ELEM]] : tensor<1x8x16x4xf32>, tensor<1x8x16x4xf32>)
130// CHECK-SAME:         outs(%[[ST_ARG2]] : tensor<1x8x16x4xf32>)
131//      CHECK:       tensor.insert_slice %[[ADD]] into %[[ARG2]][0, %[[IV0]], %[[IV1]], %[[IV2]]] [1, 8, 16, 4]
132
133// -----
134
135func.func @conv_tensors_dynamic(%input: tensor<?x?x?x?xf32>, %filter: tensor<?x?x?x?xf32>, %elementwise: tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32> {
136  %cst = arith.constant 0.0 : f32
137  %c0 = arith.constant 0 : index
138  %c1 = arith.constant 1 : index
139  %c2 = arith.constant 2 : index
140  %c3 = arith.constant 3 : index
141  %c4 = arith.constant 4 : index
142  %c8 = arith.constant 8 : index
143  %c16 = arith.constant 16 : index
144
145  %n = tensor.dim %elementwise, %c0 : tensor<?x?x?x?xf32>
146  %oh = tensor.dim %elementwise, %c1 : tensor<?x?x?x?xf32>
147  %ow = tensor.dim %elementwise, %c2 : tensor<?x?x?x?xf32>
148  %oc = tensor.dim %elementwise, %c3 : tensor<?x?x?x?xf32>
149
150  %init = tensor.empty(%n, %oh, %ow, %oc) : tensor<?x?x?x?xf32>
151  %fill = linalg.fill ins(%cst : f32) outs(%init : tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32>
152
153  %conv = linalg.conv_2d_nhwc_hwcf
154    {dilations = dense<1> : tensor<2xi64>, strides = dense<2> : tensor<2xi64>}
155    ins(%input, %filter : tensor<?x?x?x?xf32>, tensor<?x?x?x?xf32>)
156    outs(%fill : tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32>
157
158  %for0 = scf.for %iv0 = %c0 to %n step %c8 iter_args(%arg0 = %fill) -> tensor<?x?x?x?xf32> {
159    %for1 = scf.for %iv1 = %c0 to %oh step %c16 iter_args(%arg1 = %arg0) -> tensor<?x?x?x?xf32> {
160      %for2 = scf.for %iv2 = %c0 to %ow step %c4 iter_args(%arg2 = %arg1) -> tensor<?x?x?x?xf32> {
161        %for3 = scf.for %iv3 = %c0 to %oc step %c2 iter_args(%arg3 = %arg2) -> tensor<?x?x?x?xf32> {
162          %n_size = affine.min affine_map<(d0)[s0] -> (8, -d0 + s0)>(%iv0)[%n]
163          %oh_size = affine.min affine_map<(d0)[s0] -> (16, -d0 + s0)>(%iv1)[%oh]
164          %ow_size = affine.min affine_map<(d0)[s0] -> (4, -d0 + s0)>(%iv2)[%ow]
165          %oc_size = affine.min affine_map<(d0)[s0] -> (2, -d0 + s0)>(%iv2)[%oc]
166          %0 = tensor.extract_slice %conv[%iv0, %iv1, %iv2, %iv3][%n_size, %oh_size, %ow_size, %oc_size][1, 1, 1, 1] : tensor<?x?x?x?xf32> to tensor<?x?x?x?xf32>
167          %1 = tensor.extract_slice %elementwise[%iv0, %iv1, %iv2, %iv3][%n_size, %oh_size, %ow_size, %oc_size][1, 1, 1, 1] : tensor<?x?x?x?xf32> to tensor<?x?x?x?xf32>
168          %2 = tensor.extract_slice %arg3[%iv0, %iv1, %iv2, %iv3][%n_size, %oh_size, %ow_size, %oc_size][1, 1, 1, 1] : tensor<?x?x?x?xf32> to tensor<?x?x?x?xf32>
169          %add = linalg.generic
170            {
171              indexing_maps = [
172                affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>,
173                affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>,
174                affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>],
175              iterator_types = ["parallel", "parallel", "parallel", "parallel"]
176            }
177            ins(%0, %1 : tensor<?x?x?x?xf32>, tensor<?x?x?x?xf32>) outs(%2 : tensor<?x?x?x?xf32>) {
178          ^bb0(%arg4: f32, %arg5: f32, %arg6: f32):
179            %result = arith.addf %arg4, %arg5 : f32
180            linalg.yield %result : f32
181          } -> tensor<?x?x?x?xf32>
182
183          %insert = tensor.insert_slice %add into %arg3[%iv0, %iv1, %iv2, %iv3] [%n_size, %oh_size, %ow_size, %oc_size] [1, 1, 1, 1]  : tensor<?x?x?x?xf32> into tensor<?x?x?x?xf32>
184          scf.yield %insert : tensor<?x?x?x?xf32>
185        }
186        scf.yield %for3 : tensor<?x?x?x?xf32>
187      }
188      scf.yield %for2 : tensor<?x?x?x?xf32>
189    }
190    scf.yield %for1 : tensor<?x?x?x?xf32>
191  }
192  return %for0 : tensor<?x?x?x?xf32>
193}
194
195// CHECK: #[[BOUND8_MAP:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 8)>
196// CHECK: #[[BOUND8_MAP_2:.+]] = affine_map<(d0)[s0, s1] -> (-d0 + s1, -d0 + s0, 8)>
197// CHECK: #[[BOUND16_MAP:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 16)>
198// CHECK: #[[X2_MAP:.+]] = affine_map<(d0) -> (d0 * 2)>
199// CHECK: #[[INPUT_BOUND:.+]] = affine_map<(d0, d1)[s0, s1] -> (d0 * -2 + s0 * 2 + s1 - 2, d1 * 2 + s1 - 2)>
200// CHECK: #[[BOUND4_MAP:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 4)>
201// CHECK: #[[BOUND2_MAP:.+]] = affine_map<(d0)[s0] -> (-d0 + s0, 2)>
202// CHECK: #[[BOUND2_MAP_2:.+]] = affine_map<(d0, d1)[s0, s1] -> (-d0 + s0, -d1 + s1, 2)>
203
204//      CHECK: func @conv_tensors_dynamic
205// CHECK-SAME: (%[[INPUT]]: tensor<?x?x?x?xf32>, %[[FILTER]]: tensor<?x?x?x?xf32>, %[[ELEM]]: tensor<?x?x?x?xf32>)
206
207//  CHECK-DAG:   %[[C0:.+]] = arith.constant 0 : index
208//  CHECK-DAG:   %[[C1:.+]] = arith.constant 1 : index
209//  CHECK-DAG:   %[[C2:.+]] = arith.constant 2 : index
210//  CHECK-DAG:   %[[C3:.+]] = arith.constant 3 : index
211
212//  CHECK-DAG:   %[[ELEM_N:.+]] = tensor.dim %[[ELEM]], %[[C0]] : tensor<?x?x?x?xf32>
213//  CHECK-DAG:   %[[ELEM_OH:.+]] = tensor.dim %[[ELEM]], %[[C1]] : tensor<?x?x?x?xf32>
214//  CHECK-DAG:   %[[ELEM_OW:.+]] = tensor.dim %[[ELEM]], %[[C2]] : tensor<?x?x?x?xf32>
215//  CHECK-DAG:   %[[ELEM_OC:.+]] = tensor.dim %[[ELEM]], %[[C3]] : tensor<?x?x?x?xf32>
216
217//      CHECK:   %[[INIT:.+]] = tensor.empty(%[[ELEM_N]], %[[ELEM_OH]], %[[ELEM_OW]], %[[ELEM_OC]]) : tensor<?x?x?x?xf32>
218//      CHECK:   %[[FILL:.+]] = linalg.fill ins(%cst : f32) outs(%[[INIT]] : tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32>
219
220//  CHECK-DAG:   %[[FILTER_H:.+]] = tensor.dim %[[FILTER]], %[[C0]] : tensor<?x?x?x?xf32>
221//  CHECK-DAG:   %[[FILTER_W:.+]] = tensor.dim %[[FILTER]], %[[C1]] : tensor<?x?x?x?xf32>
222//  CHECK-DAG:   %[[FILTER_IC:.+]] = tensor.dim %[[FILTER]], %[[C2]] : tensor<?x?x?x?xf32>
223//  CHECK-DAG:   %[[FILTER_OC:.+]] = tensor.dim %[[FILTER]], %[[C3]] : tensor<?x?x?x?xf32>
224//  CHECK-DAG:   %[[INPUT_N:.+]] = tensor.dim %[[INPUT]], %[[C0]] : tensor<?x?x?x?xf32>
225//  CHECK-DAG:   %[[INPUT_C:.+]] = tensor.dim %[[INPUT]], %[[C3]] : tensor<?x?x?x?xf32>
226
227//      CHECK:   scf.for %[[IV0:.+]] = %{{.+}} to %[[ELEM_N]] step %{{.+}} iter_args(%{{.+}} = %[[FILL]])
228// CHECK-NEXT:     %[[SIZE_ELEM_N:.+]] = affine.min #[[BOUND8_MAP]](%[[IV0]])[%[[ELEM_N]]]
229// CHECK-NEXT:     %[[SIZE_INPUT_N:.+]] = affine.min #[[BOUND8_MAP_2]](%[[IV0]])[%[[INPUT_N]], %[[ELEM_N]]]
230// CHECK-NEXT:     scf.for %[[IV1:.+]] = %{{.+}} to %[[ELEM_OH]]
231// CHECK-NEXT:       %[[SIZE_ELEM_OH:.+]] = affine.min #[[BOUND16_MAP]](%[[IV1]])[%[[ELEM_OH]]]
232// CHECK-NEXT:       %[[OFFSET_OH:.+]] = affine.apply #[[X2_MAP]](%[[IV1]])
233// CHECK-NEXT:       %[[SIZE_INPUT_H:.+]] = affine.min #[[INPUT_BOUND]](%[[IV1]], %[[SIZE_ELEM_OH]])[%[[ELEM_OH]], %[[FILTER_H]]]
234// CHECK-NEXT:       scf.for %[[IV2:.+]] = %{{.+}} to %[[ELEM_OW]]
235// CHECK-NEXT:         %[[SIZE_ELEM_OW:.+]] = affine.min #[[BOUND4_MAP]](%[[IV2]])[%[[ELEM_OW]]]
236// CHECK-NEXT:         %[[SIZE_ELEM_OC:.+]] = affine.min #[[BOUND2_MAP]](%[[IV2]])[%[[ELEM_OC]]]
237// CHECK-NEXT:         %[[OFFSET_OW:.+]] = affine.apply #[[X2_MAP]](%[[IV2]])
238// CHECK-NEXT:         %[[SIZE_INPUT_W:.+]] = affine.min #[[INPUT_BOUND]](%[[IV2]], %[[SIZE_ELEM_OW]])[%[[ELEM_OW]], %[[FILTER_W]]]
239// CHECK-NEXT:         %[[ST_INPUT:.+]] = tensor.extract_slice %[[INPUT]][%[[IV0]], %[[OFFSET_OH]], %[[OFFSET_OW]], 0]
240// CHECK-SAME:               [%[[SIZE_INPUT_N]], %[[SIZE_INPUT_H]], %[[SIZE_INPUT_W]], %[[INPUT_C]]]
241// CHECK-NEXT:         scf.for %[[IV3:.+]] = %{{.+}} to %[[ELEM_OC]] step %{{.+}} iter_args(%[[ARG:[a-z0-9]+]]
242// CHECK-NEXT:           %[[ST_ELEM:.+]] = tensor.extract_slice %[[ELEM]][%[[IV0]], %[[IV1]], %[[IV2]], %[[IV3]]]
243// CHECK-SAME:                 [%[[SIZE_ELEM_N]], %[[SIZE_ELEM_OH]], %[[SIZE_ELEM_OW]], %[[SIZE_ELEM_OC]]]
244// CHECK-NEXT:           %[[ST_ARG:.+]] = tensor.extract_slice %[[ARG]][%[[IV0]], %[[IV1]], %[[IV2]], %[[IV3]]]
245// CHECK-SAME:                 [%[[SIZE_ELEM_N]], %[[SIZE_ELEM_OH]], %[[SIZE_ELEM_OW]], %[[SIZE_ELEM_OC]]]
246// CHECK-NEXT:           %[[SIZE_ELEM_OC_2:.+]] = affine.min #[[BOUND2_MAP_2]](%[[IV3]], %[[IV2]])[%[[FILTER_OC]], %[[ELEM_OC]]]
247// CHECK-NEXT:           %[[ST_FILTER:.+]] = tensor.extract_slice %[[FILTER]][0, 0, 0, %[[IV3]]]
248// CHECK-SAME:                 [%[[FILTER_H]], %[[FILTER_W]], %[[FILTER_IC]], %[[SIZE_ELEM_OC_2]]]
249// CHECK-NEXT:           %[[ST_FILL:.+]] = tensor.extract_slice %[[FILL]][%[[IV0]], %[[IV1]], %[[IV2]], %[[IV3]]]
250// CHECK-SAME:                 [%[[SIZE_INPUT_N]], %[[SIZE_ELEM_OH]], %[[SIZE_ELEM_OW]], %[[SIZE_ELEM_OC_2]]]
251// CHECK-NEXT:           %[[ST_CONV:.+]] = linalg.conv_2d_nhwc_hwcf
252// CHECK-SAME:                 ins(%[[ST_INPUT]], %[[ST_FILTER]] : tensor<?x?x?x?xf32>, tensor<?x?x?x?xf32>)
253// CHECK-SAME:                 outs(%[[ST_FILL]] : tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32>
254// CHECK-NEXT:           %[[ST_ADD:.+]] = linalg.generic
255// CHECK-SAME:                 ins(%[[ST_CONV]], %[[ST_ELEM]] : tensor<?x?x?x?xf32>, tensor<?x?x?x?xf32>)
256// CHECK-SAME:                 outs(%[[ST_ARG]] : tensor<?x?x?x?xf32>)
257//      CHECK:           tensor.insert_slice %[[ST_ADD]] into %[[ARG]][%[[IV0]], %[[IV1]], %[[IV2]], %[[IV3]]]
258// CHECK-SAME:                 [%[[SIZE_ELEM_N]], %[[SIZE_ELEM_OH]], %[[SIZE_ELEM_OW]], %[[SIZE_ELEM_OC]]]
259
260// -----
261
262#map = affine_map<(d0, d1) -> (d0, d1)>
263//     CHECK: func @pad_generic_static
264// CHECK-DAG:   %[[C0:.*]] = arith.constant 0 : index
265// CHECK-DAG:   %[[C16:.*]] = arith.constant 16 : index
266// CHECK-DAG:   %[[C32:.*]] = arith.constant 32 : index
267// CHECK-DAG:   %[[C64:.*]] = arith.constant 64 : index
268// CHECK-DAG:   %[[C128:.*]] = arith.constant 128 : index
269//     CHECK:   scf.for %{{.*}} = %[[C0]] to %[[C64]] step %[[C16]]
270//     CHECK:     %[[CMPI1:.*]] = arith.cmpi eq
271//     CHECK:     scf.for %{{.*}} = %[[C0]] to %[[C128]] step %[[C32]]
272//     CHECK:       %[[CMPI2:.*]] = arith.cmpi eq
273//     CHECK:       %[[HASZERO:.*]] = arith.ori %[[CMPI2]], %[[CMPI1]] : i1
274//     CHECK:       scf.if %[[HASZERO]]
275//     CHECK:         tensor.generate
276//     CHECK:       else
277//     CHECK:         tensor.extract_slice
278//     CHECK:         tensor.pad
279//     CHECK:       tensor.extract_slice
280//     CHECK:       tensor.extract_slice
281//     CHECK:       linalg.generic
282//     CHECK:       tensor.insert_slice
283func.func @pad_generic_static(%small_input: tensor<58x1xf32>, %large_input: tensor<64x128xf32>) -> tensor<64x128xf32> {
284  %c0 = arith.constant 0 : index
285  %c1 = arith.constant 1 : index
286  %c16 = arith.constant 16 : index
287  %c32 = arith.constant 32 : index
288  %zero = arith.constant 0.0 : f32
289
290  %d0 = tensor.dim %large_input, %c0 : tensor<64x128xf32>
291  %d1 = tensor.dim %large_input, %c1 : tensor<64x128xf32>
292
293  %pad = tensor.pad %small_input low[4, 60] high[2, 67] {
294  ^bb0(%arg0: index, %arg1: index):
295    tensor.yield %zero : f32
296  } : tensor<58x1xf32> to tensor<64x128xf32>
297
298  %fill = linalg.fill ins(%zero : f32) outs(%large_input : tensor<64x128xf32>) -> tensor<64x128xf32>
299
300  %for0 = scf.for %iv0 = %c0 to %d0 step %c16 iter_args(%arg0 = %fill) -> tensor<64x128xf32> {
301    %for1 = scf.for %iv1 = %c0 to %d1 step %c32 iter_args(%arg1 = %arg0) -> tensor<64x128xf32> {
302      %0 = tensor.extract_slice %pad[%iv0, %iv1][16, 32][1, 1] : tensor<64x128xf32> to tensor<16x32xf32>
303      %1 = tensor.extract_slice %large_input[%iv0, %iv1][16, 32][1, 1] : tensor<64x128xf32> to tensor<16x32xf32>
304      %2 = tensor.extract_slice %arg1[%iv0, %iv1][16, 32][1, 1] : tensor<64x128xf32> to tensor<16x32xf32>
305
306      %add = linalg.generic
307        {indexing_maps = [#map, #map, #map], iterator_types = ["parallel", "parallel"]}
308        ins(%0, %1 : tensor<16x32xf32>, tensor<16x32xf32>) outs(%2 : tensor<16x32xf32>) {
309      ^bb0(%arg4: f32, %arg5: f32, %arg6: f32):
310        %result = arith.addf %arg4, %arg5 : f32
311        linalg.yield %result : f32
312      } -> tensor<16x32xf32>
313
314      %insert = tensor.insert_slice %add into %arg1[%iv0, %iv1] [16, 32] [1, 1]  : tensor<16x32xf32> into tensor<64x128xf32>
315      scf.yield %insert : tensor<64x128xf32>
316    }
317    scf.yield %for1 : tensor<64x128xf32>
318  }
319  return %for0 : tensor<64x128xf32>
320}
321