xref: /llvm-project/mlir/test/Dialect/Tensor/ops.mlir (revision 97069a86193a617a9e4cf742a29db6116b2bf449)
1// RUN: mlir-opt --split-input-file %s | mlir-opt | FileCheck %s
2
3// CHECK-LABEL: func @cast(
4func.func @cast(%arg0: tensor<*xf32>, %arg1 : tensor<4x4xf32>, %arg2: tensor<?x?xf32>) {
5  // CHECK: tensor.cast %{{.*}} : tensor<*xf32> to tensor<?x?xf32>
6  %0 = tensor.cast %arg0 : tensor<*xf32> to tensor<?x?xf32>
7  // CHECK: tensor.cast %{{.*}} : tensor<4x4xf32> to tensor<*xf32>
8  %1 = tensor.cast %arg1 : tensor<4x4xf32> to tensor<*xf32>
9  // CHECK: tensor.cast %{{.*}} : tensor<?x?xf32> to tensor<4x?xf32>
10  %2 = tensor.cast %arg2 : tensor<?x?xf32> to tensor<4x?xf32>
11  // CHECK: tensor.cast %{{.*}} : tensor<4x?xf32> to tensor<?x?xf32>
12  %3 = tensor.cast %2 : tensor<4x?xf32> to tensor<?x?xf32>
13  return
14}
15
16// -----
17
18// CHECK-LABEL: func @concat(
19func.func @concat(%arg0: tensor<4x7x3xf32>, %arg1 : tensor<4x4x3xf32>, %arg2: tensor<?x?x?xf32>) {
20  // CHECK: tensor.concat dim(0) %{{.*}} : (tensor<4x7x3xf32>) -> tensor<4x7x3xf32>
21  %0 = tensor.concat dim(0) %arg0 : (tensor<4x7x3xf32>) -> tensor<4x7x3xf32>
22  // CHECK: tensor.concat dim(1) %{{.*}} : (tensor<4x7x3xf32>, tensor<4x4x3xf32>) -> tensor<4x11x3xf32>
23  %1 = tensor.concat dim(1) %arg0, %arg1 : (tensor<4x7x3xf32>, tensor<4x4x3xf32>) -> tensor<4x11x3xf32>
24  // CHECK: tensor.concat dim(2) %{{.*}} : (tensor<4x7x3xf32>, tensor<?x?x?xf32>) -> tensor<?x?x?xf32>
25  %2 = tensor.concat dim(2) %arg0, %arg2 : (tensor<4x7x3xf32>, tensor<?x?x?xf32>) -> tensor<?x?x?xf32>
26  // CHECK: tensor.concat dim(1) %{{.*}} : (tensor<?x?x?xf32>, tensor<?x?x?xf32>) -> tensor<?x10x?xf32>
27  %3 = tensor.concat dim(1) %arg2, %arg2 : (tensor<?x?x?xf32>, tensor<?x?x?xf32>) -> tensor<?x10x?xf32>
28  // CHECK: tensor.concat dim(1) %{{.*}} : (tensor<?x?x?xf32>, tensor<4x4x3xf32>, tensor<4x7x3xf32>) -> tensor<4x?x3xf32>
29  %4 = tensor.concat dim(1) %arg2, %arg1, %arg0 : (tensor<?x?x?xf32>, tensor<4x4x3xf32>, tensor<4x7x3xf32>) -> tensor<4x?x3xf32>
30  return
31}
32
33// -----
34
35// CHECK-LABEL: func @empty(
36//  CHECK-SAME:             %[[sz:.*]]: index
37func.func @empty(%sz: index) -> tensor<5x?x6xf32> {
38  // CHECK: tensor.empty(%[[sz]]) : tensor<5x?x6xf32>
39  %0 = tensor.empty(%sz) : tensor<5x?x6xf32>
40  return %0 : tensor<5x?x6xf32>
41}
42
43// -----
44
45// CHECK-LABEL: func @empty_with_encoding(
46//  CHECK-SAME:             %[[sz:.*]]: index
47func.func @empty_with_encoding(%sz: index) -> tensor<5x?x6xf32, "foo"> {
48  // CHECK: tensor.empty(%[[sz]]) : tensor<5x?x6xf32, "foo">
49  %0 = tensor.empty(%sz) : tensor<5x?x6xf32, "foo">
50  return %0 : tensor<5x?x6xf32, "foo">
51}
52
53// -----
54
55// CHECK-LABEL:   func @extract(
56// CHECK-SAME:                  %[[TENSOR:.*]]: tensor<?x?x?xf32>,
57// CHECK-SAME:                  %[[INDEX:.*]]: index) {
58func.func @extract(%arg0: tensor<?x?x?xf32>, %arg1: index) {
59  // CHECK: tensor.extract %[[TENSOR]][%[[INDEX]], %[[INDEX]], %[[INDEX]]] : tensor<?x?x?xf32>
60  %0 = tensor.extract %arg0[%arg1, %arg1, %arg1] : tensor<?x?x?xf32>
61  return
62}
63
64// -----
65
66// CHECK-LABEL:   func @insert(
67// CHECK-SAME:                  %[[SCALAR:.*]]: f32
68// CHECK-SAME:                  %[[INDEX:.*]]: index
69// CHECK-SAME:                  %[[DEST1:.*]]: tensor<?x?x?xf32>
70func.func @insert(%arg0: f32, %arg1: index, %arg2: tensor<?x?x?xf32>) {
71  // CHECK: tensor.insert %[[SCALAR]] into %[[DEST1]][%[[INDEX]], %[[INDEX]], %[[INDEX]]] : tensor<?x?x?xf32>
72  %0 = tensor.insert %arg0 into %arg2[%arg1, %arg1, %arg1] : tensor<?x?x?xf32>
73  return
74}
75
76// -----
77
78// CHECK-LABEL: func @tensor.from_elements() {
79func.func @tensor.from_elements() {
80  %c0 = "arith.constant"() {value = 0: index} : () -> index
81  // CHECK: tensor.from_elements %c0 : tensor<1xindex>
82  %0 = tensor.from_elements %c0 : tensor<1xindex>
83
84  %c1 = "arith.constant"() {value = 1: index} : () -> index
85  // CHECK: tensor.from_elements %c0, %c1 : tensor<2xindex>
86  %1 = tensor.from_elements %c0, %c1 : tensor<2xindex>
87
88  %c0_f32 = "arith.constant"() {value = 0.0: f32} : () -> f32
89  // CHECK: [[C0_F32:%.*]] = arith.constant
90  // CHECK: tensor.from_elements [[C0_F32]] : tensor<1xf32>
91  %2 = tensor.from_elements %c0_f32 : tensor<1xf32>
92
93  // CHECK: tensor.from_elements : tensor<0xindex>
94  %3 = tensor.from_elements : tensor<0xindex>
95
96  // CHECK: tensor.from_elements %c0, %c1, %c0, %c1, %c0, %c1 : tensor<2x3xindex>
97  %4 = tensor.from_elements %c0, %c1, %c0, %c1, %c0, %c1 : tensor<2x3xindex>
98
99  // CHECK: tensor.from_elements %c0 : tensor<index>
100  %5 = tensor.from_elements %c0 : tensor<index>
101  return
102}
103
104// -----
105
106// CHECK-LABEL: @tensor.generate
107func.func @tensor.generate(%m : index, %n : index)
108    -> tensor<?x3x?xf32> {
109  %tnsr = tensor.generate %m, %n {
110    ^bb0(%i : index, %j : index, %k : index):
111      %elem = arith.constant 8.0 : f32
112      tensor.yield %elem : f32
113  } : tensor<?x3x?xf32>
114  return %tnsr : tensor<?x3x?xf32>
115}
116
117// -----
118
119// CHECK-LABEL: func @tensor_reshape
120func.func @tensor_reshape(%unranked: tensor<*xf32>, %shape1: tensor<1xi32>,
121         %shape2: tensor<2xi32>, %shape3: tensor<?xi32>) -> tensor<*xf32> {
122  %dyn_vec = tensor.reshape %unranked(%shape1)
123               : (tensor<*xf32>, tensor<1xi32>) -> tensor<?xf32>
124  %dyn_mat = tensor.reshape %dyn_vec(%shape2)
125               : (tensor<?xf32>, tensor<2xi32>) -> tensor<?x?xf32>
126  %new_unranked = tensor.reshape %dyn_mat(%shape3)
127               : (tensor<?x?xf32>, tensor<?xi32>) -> tensor<*xf32>
128  return %new_unranked : tensor<*xf32>
129}
130
131// -----
132
133// CHECK-LABEL: func @slice({{.*}}) {
134func.func @slice(%t: tensor<8x16x4xf32>, %idx : index) {
135  %c0 = arith.constant 0 : index
136  %c1 = arith.constant 1 : index
137
138  // CHECK: tensor.extract_slice
139  // CHECK-SAME: tensor<8x16x4xf32> to tensor<?x?x?xf32>
140  %1 = tensor.extract_slice %t[%c0, %c0, %c0][%idx, %idx, %idx][%c1, %c1, %c1]
141    : tensor<8x16x4xf32> to tensor<?x?x?xf32>
142
143  // CHECK: tensor.extract_slice
144  // CHECK-SAME: tensor<8x16x4xf32> to tensor<4x4x4xf32>
145  %2 = tensor.extract_slice %t[0, 2, 0][4, 4, 4][1, 1, 1]
146    : tensor<8x16x4xf32> to tensor<4x4x4xf32>
147
148  // CHECK: tensor.extract_slice
149  // CHECK-SAME: tensor<8x16x4xf32> to tensor<4x4xf32>
150  %3 = tensor.extract_slice %t[0, 2, 0][4, 1, 4][1, 1, 1]
151    : tensor<8x16x4xf32> to tensor<4x4xf32>
152
153  return
154}
155
156// -----
157
158// CHECK-LABEL: func @insert_slice({{.*}}) {
159func.func @insert_slice(
160    %t: tensor<8x16x4xf32>,
161    %td: tensor<8x?x4xf32>,
162    %t2: tensor<16x32x8xf32>,
163    %t3: tensor<4x4xf32>,
164    %idx : index,
165    %sz : index) {
166  %c0 = arith.constant 0 : index
167  %c1 = arith.constant 1 : index
168
169  // CHECK: tensor.insert_slice
170  // CHECK-SAME: tensor<8x16x4xf32> into tensor<16x32x8xf32>
171  %1 = tensor.insert_slice %t into %t2[%c0, %c0, %c0][8, 16, 4][%c1, %c1, %c1]
172    : tensor<8x16x4xf32> into tensor<16x32x8xf32>
173
174  // CHECK: tensor.insert_slice
175  // CHECK-SAME: tensor<8x16x4xf32> into tensor<16x32x8xf32>
176  %2 = tensor.insert_slice %t into %t2[%c0, %idx, %c0][8, 16, 4][%c1, 1, %c1]
177    : tensor<8x16x4xf32> into tensor<16x32x8xf32>
178
179  // CHECK: tensor.insert_slice
180  // CHECK-SAME: tensor<4x4xf32> into tensor<8x16x4xf32>
181  %3 = tensor.insert_slice %t3 into %t[0, 2, 0][4, 1, 4][1, 1, 1]
182    : tensor<4x4xf32> into tensor<8x16x4xf32>
183
184  // CHECK: tensor.insert_slice
185  // CHECK-SAME: tensor<8x?x4xf32> into tensor<8x16x4xf32>
186  %4 = tensor.insert_slice %td into %t[0, %idx, 0][8, %sz, 4][1, 1, 1]
187    : tensor<8x?x4xf32> into tensor<8x16x4xf32>
188
189  return
190}
191
192// -----
193
194func.func @tensor_reshape_zero_dim(%arg0 : tensor<1x1xf32>, %arg1 : tensor<f32>)
195    -> (tensor<f32>, tensor<1x1xf32>) {
196  %0 = tensor.collapse_shape %arg0 [] : tensor<1x1xf32> into tensor<f32>
197  %1 = tensor.expand_shape %0 [] output_shape [1, 1] : tensor<f32> into tensor<1x1xf32>
198  return %0, %1 : tensor<f32>, tensor<1x1xf32>
199}
200// CHECK-LABEL: func @tensor_reshape_zero_dim
201//       CHECK:   tensor.collapse_shape %{{.*}} [] : tensor<1x1xf32> into tensor<f32>
202//       CHECK:   tensor.expand_shape %{{.*}} [] output_shape [1, 1] : tensor<f32> into tensor<1x1xf32>
203
204// -----
205
206func.func @tensor_expand_shape_dynamic_dim(%arg0 : tensor<?x?xf32>, %sz0 : index, %sz1 : index, %sz2 : index)
207    -> (tensor<5x?x?x?xf32>) {
208  %1 = tensor.expand_shape %arg0 [[0, 1], [2, 3]] output_shape [5, %sz0, %sz1, %sz2] : tensor<?x?xf32> into tensor<5x?x?x?xf32>
209  return %1 : tensor<5x?x?x?xf32>
210}
211
212// CHECK-LABEL:  func.func @tensor_expand_shape_dynamic_dim(%arg0: tensor<?x?xf32>, %arg1: index, %arg2: index, %arg3: index) -> tensor<5x?x?x?xf32> {
213//       CHECK:    %expanded = tensor.expand_shape %arg0 {{\[\[}}0, 1], [2, 3{{\]\]}} output_shape [5, %arg1, %arg2, %arg3] : tensor<?x?xf32> into tensor<5x?x?x?xf32>
214//       CHECK:    return %expanded : tensor<5x?x?x?xf32>
215//       CHECK:  }
216
217
218// -----
219
220func.func @legal_collapsing_reshape_dynamic_tensor
221  (%arg0: tensor<?x?x?x4x?xf32>) -> tensor<?x?x?xf32>
222{
223  %0 = tensor.collapse_shape %arg0 [[0], [1], [2, 3, 4]] :
224    tensor<?x?x?x4x?xf32> into tensor<?x?x?xf32>
225  return %0 : tensor<?x?x?xf32>
226}
227//      CHECK: func @legal_collapsing_reshape_dynamic_tensor
228//      CHECK:   tensor.collapse_shape
229// CHECK-SAME:    [0], [1], [2, 3, 4]
230
231// -----
232
233func.func @rank(%t : tensor<4x4x?xf32>) {
234  // CHECK: %{{.*}} = tensor.rank %{{.*}} : tensor<4x4x?xf32>
235  %0 = "tensor.rank"(%t) : (tensor<4x4x?xf32>) -> index
236
237  // CHECK: %{{.*}} = tensor.rank %{{.*}} : tensor<4x4x?xf32>
238  %1 = tensor.rank %t : tensor<4x4x?xf32>
239  return
240}
241
242// -----
243
244func.func @pad_dynamic(%arg0: tensor<1x2x2x?xf32>, %low: index, %high: index,
245                  %pad_value: f32) -> tensor<6x?x?x?xf32> {
246  %0 = tensor.pad %arg0 low[2, %low, 3, 3] high[3, 3, %high, 2] {
247    ^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index):
248      tensor.yield %pad_value : f32
249    } : tensor<1x2x2x?xf32> to tensor<6x?x?x?xf32>
250  return %0 : tensor<6x?x?x?xf32>
251}
252// CHECK-LABEL: func @pad_dynamic
253//  CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]*]]
254//  CHECK-SAME: %[[LOW:[a-zA-Z0-9_]*]]
255//  CHECK-SAME: %[[HIGH:[a-zA-Z0-9_]*]]
256//       CHECK:   tensor.pad %[[ARG0]]
257//  CHECK-SAME:     low[2, %[[LOW]], 3, 3]
258//  CHECK-SAME:     high[3, 3, %[[HIGH]], 2]
259//       CHECK:    : tensor<1x2x2x?xf32> to tensor<6x?x?x?xf32>
260
261// -----
262
263func.func @pad_static(%arg0: tensor<3x4xf32>, %pad_value: f32) -> tensor<6x9xf32> {
264  %0 = tensor.pad %arg0 low[1, 2] high[2, 3] {
265    ^bb0(%arg1 : index, %arg2 : index):
266      tensor.yield %pad_value : f32
267    } : tensor<3x4xf32> to tensor<6x9xf32>
268  return %0 : tensor<6x9xf32>
269}
270// CHECK-LABEL: func @pad_static
271//  CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]*]]
272//       CHECK:   tensor.pad %[[ARG0]] low[1, 2] high[2, 3]
273//       CHECK:    : tensor<3x4xf32> to tensor<6x9xf32>
274
275// -----
276
277func.func @pad_asymmetrical(%arg0: tensor<2x3xf32>, %ub0: index, %ub1: index,
278                       %pad_value: f32) -> tensor<?x?xf32> {
279  %0 = tensor.pad %arg0 low[0, 0] high[%ub0, %ub1] {
280    ^bb0(%arg1: index, %arg2: index):
281      tensor.yield %pad_value : f32
282    } : tensor<2x3xf32> to tensor<?x?xf32>
283  return %0 : tensor<?x?xf32>
284}
285// CHECK-LABEL: func @pad_asymmetrical
286//  CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]*]]
287//  CHECK-SAME: %[[UB0:[a-zA-Z0-9_]*]]
288//  CHECK-SAME: %[[UB1:[a-zA-Z0-9_]*]]
289//       CHECK:   tensor.pad %[[ARG0]]
290//  CHECK-SAME:     low[0, 0]
291//  CHECK-SAME:     high[%[[UB0]], %[[UB1]]]
292//       CHECK:    : tensor<2x3xf32> to tensor<?x?xf32>
293
294// -----
295
296func.func @pad_to_static_size(%arg0: tensor<?x?xf32>, %ub0: index, %ub1: index,
297                         %pad_value: f32) -> tensor<2x3xf32> {
298  %0 = tensor.pad %arg0 low[0, 0] high[%ub0, %ub1] {
299    ^bb0(%arg1: index, %arg2: index):
300      tensor.yield %pad_value : f32
301    } : tensor<?x?xf32> to tensor<2x3xf32>
302  return %0 : tensor<2x3xf32>
303}
304// CHECK-LABEL: func @pad_to_static_size
305//  CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]*]]
306//  CHECK-SAME: %[[UB0:[a-zA-Z0-9_]*]]
307//  CHECK-SAME: %[[UB1:[a-zA-Z0-9_]*]]
308//       CHECK:   tensor.pad %[[ARG0]]
309//  CHECK-SAME:     low[0, 0]
310//  CHECK-SAME:     high[%[[UB0]], %[[UB1]]]
311//       CHECK:    : tensor<?x?xf32> to tensor<2x3xf32>
312
313// -----
314
315// CHECK-LABEL: func @test_splat_op
316// CHECK-SAME: [[S:%arg[0-9]+]]: f32
317func.func @test_splat_op(%s : f32) {
318  // CHECK: tensor.splat [[S]] : tensor<8xf32>
319  %v = tensor.splat %s : tensor<8xf32>
320
321  // CHECK: tensor.splat [[S]] : tensor<4xf32>
322  %u = "tensor.splat"(%s) : (f32) -> tensor<4xf32>
323  return
324}
325
326// CHECK-LABEL: func @test_splat_op
327// CHECK-SAME: [[S:arg[0-9]+]]: f32
328// CHECK-SAME: [[M:arg[0-9]+]]: index
329// CHECK-SAME: [[N:arg[0-9]+]]: index
330func.func @test_splat_op_dynamic(%s: f32, %m: index, %n: index) {
331  // CHECK: tensor.splat %[[S]][%[[M]], %[[N]]] : tensor<?x8x?xf32>
332  %v = tensor.splat %s[%m, %n] : tensor<?x8x?xf32>
333  return
334}
335
336// -----
337
338// CHECK-LABEL: func.func @gather_scatter(
339// CHECK-SAME:  %[[ARG0:.*]]: tensor<4x5x6xf32>,
340// CHECK-SAME:  %[[ARG1:.*]]: tensor<1x3x2xindex>,
341// CHECK-SAME:  %[[ARG2:.*]]: tensor<1x3x2xi32>) {
342func.func @gather_scatter(
343    %dest : tensor<4x5x6xf32>, %indices: tensor<1x3x2xindex>, %indices_i32: tensor<1x3x2xi32>) {
344  // CHECK: %[[GATHER:.*]] = tensor.gather %[[ARG0]][%[[ARG2]]] gather_dims([1, 2]) unique : (tensor<4x5x6xf32>, tensor<1x3x2xi32>) -> tensor<1x3x4x1x1xf32>
345  %gathered = tensor.gather %dest[%indices_i32] gather_dims([1, 2]) unique:
346    (tensor<4x5x6xf32>, tensor<1x3x2xi32>) -> tensor<1x3x4x1x1xf32>
347  // CHECK: %[[GATHER0:.*]] = tensor.gather %[[ARG0]][%[[ARG1]]] gather_dims([1, 2]) unique : (tensor<4x5x6xf32>, tensor<1x3x2xindex>) -> tensor<1x3x4xf32>
348  %rank_reduced_gathered = tensor.gather %dest[%indices] gather_dims([1, 2]) unique:
349    (tensor<4x5x6xf32>, tensor<1x3x2xindex>) -> tensor<1x3x4xf32>
350
351  // CHECK: %{{.*}} = tensor.scatter %[[GATHER]] into %[[ARG0]][%[[ARG1]]] scatter_dims([1, 2]) unique : (tensor<1x3x4x1x1xf32>, tensor<4x5x6xf32>, tensor<1x3x2xindex>) -> tensor<4x5x6xf32>
352  %scattered = tensor.scatter %gathered into %dest[%indices]
353      scatter_dims([1, 2]) unique:
354    (tensor<1x3x4x1x1xf32>, tensor<4x5x6xf32>, tensor<1x3x2xindex>) -> tensor<4x5x6xf32>
355  // CHECK: %{{.*}} = tensor.scatter %[[GATHER0]] into %[[ARG0]][%[[ARG2]]] scatter_dims([1, 2]) unique : (tensor<1x3x4xf32>, tensor<4x5x6xf32>, tensor<1x3x2xi32>) -> tensor<4x5x6xf32>
356  %rank_reduced_scattered = tensor.scatter %rank_reduced_gathered into %dest[%indices_i32]
357      scatter_dims([1, 2]) unique:
358    (tensor<1x3x4xf32>, tensor<4x5x6xf32>, tensor<1x3x2xi32>) -> tensor<4x5x6xf32>
359  return
360}
361
362// -----
363
364func.func @pack_nc_to_ncnc(%source: tensor<128x256xf32>, %dest: tensor<4x16x32x16xf32>) -> tensor<128x256xf32> {
365  %0 = tensor.pack %source inner_dims_pos = [0, 1] inner_tiles = [32, 16] into %dest : tensor<128x256xf32> -> tensor<4x16x32x16xf32>
366  %1 = tensor.empty() : tensor<128x256xf32>
367  %2 = tensor.unpack %0 inner_dims_pos = [0, 1] inner_tiles = [32, 16] into %1 : tensor<4x16x32x16xf32> -> tensor<128x256xf32>
368  return %2 : tensor<128x256xf32>
369}
370
371// CHECK-LABEL: func.func @pack_nc_to_ncnc(
372// CHECK-SAME:  %[[SOURCE:.*]]: tensor<128x256xf32>,
373// CHECK-SAME:  %[[DEST:.*]]: tensor<4x16x32x16xf32>)
374// CHECK: %[[PACKED:.*]] = tensor.pack %[[SOURCE]] inner_dims_pos = [0, 1] inner_tiles = [32, 16] into %[[DEST]] : tensor<128x256xf32> -> tensor<4x16x32x16xf32>
375// CHECK: %[[BUFF:.*]] = tensor.empty() : tensor<128x256xf32>
376// CHECK: %{{.*}} = tensor.unpack %[[PACKED]] inner_dims_pos = [0, 1] inner_tiles = [32, 16] into %[[BUFF]] : tensor<4x16x32x16xf32> -> tensor<128x256xf32>
377
378// -----
379
380func.func @pack_nc_to_ncnc_with_padding(%source: tensor<13x15xf32>, %dest: tensor<2x8x8x2xf32>, %padding: f32) -> tensor<13x15xf32> {
381  %0 = tensor.pack %source padding_value(%padding : f32) inner_dims_pos = [0, 1] inner_tiles = [8, 2] into %dest : tensor<13x15xf32> -> tensor<2x8x8x2xf32>
382  %1 = tensor.empty() : tensor<13x15xf32>
383  %2 = tensor.unpack %0 inner_dims_pos = [0, 1] inner_tiles = [8, 2] into %1 : tensor<2x8x8x2xf32> -> tensor<13x15xf32>
384  return %2 : tensor<13x15xf32>
385}
386
387// CHECK-LABEL: func.func @pack_nc_to_ncnc_with_padding(
388// CHECK-SAME:  %[[SOURCE:.*]]: tensor<13x15xf32>,
389// CHECK-SAME:  %[[DEST:.*]]: tensor<2x8x8x2xf32>,
390// CHECK-SAME:  %[[PADDING:.*]]: f32)
391// CHECK: %[[PACKED:.*]] = tensor.pack %[[SOURCE]] padding_value(%[[PADDING]] : f32) inner_dims_pos = [0, 1] inner_tiles = [8, 2] into %[[DEST]] : tensor<13x15xf32> -> tensor<2x8x8x2xf32>
392// CHECK: %[[BUFF:.*]] = tensor.empty() : tensor<13x15xf32>
393// CHECK: %{{.*}} = tensor.unpack %[[PACKED]] inner_dims_pos = [0, 1] inner_tiles = [8, 2] into %[[BUFF]] : tensor<2x8x8x2xf32> -> tensor<13x15xf32>
394
395// -----
396
397func.func @pack_ck_to_kcck(%source: tensor<128x256xf32>, %dest: tensor<16x4x32x16xf32>) -> tensor<128x256xf32> {
398  %0 = tensor.pack %source outer_dims_perm = [1, 0] inner_dims_pos = [0, 1] inner_tiles = [32, 16] into %dest : tensor<128x256xf32> -> tensor<16x4x32x16xf32>
399  %1 = tensor.empty() : tensor<128x256xf32>
400  %2 = tensor.unpack %0 outer_dims_perm = [1, 0] inner_dims_pos = [0, 1] inner_tiles = [32, 16] into %1 : tensor<16x4x32x16xf32> -> tensor<128x256xf32>
401  return %2 : tensor<128x256xf32>
402}
403
404// CHECK-LABEL: func.func @pack_ck_to_kcck(
405// CHECK-SAME:  %[[SOURCE:.*]]: tensor<128x256xf32>,
406// CHECK-SAME:  %[[DEST:.*]]: tensor<16x4x32x16xf32>)
407// CHECK: %[[PACKED:.*]] = tensor.pack %[[SOURCE]] outer_dims_perm = [1, 0] inner_dims_pos = [0, 1] inner_tiles = [32, 16] into %[[DEST]] : tensor<128x256xf32> -> tensor<16x4x32x16xf32>
408// CHECK: %[[BUFF:.*]] = tensor.empty() : tensor<128x256xf32>
409// CHECK: %{{.*}} = tensor.unpack %[[PACKED]] outer_dims_perm = [1, 0] inner_dims_pos = [0, 1] inner_tiles = [32, 16] into %[[BUFF]] : tensor<16x4x32x16xf32> -> tensor<128x256xf32>
410
411// -----
412
413func.func @pad_and_pack_fully_dynamic(%source: tensor<?x?xf32>, %dest: tensor<?x?x?x?xf32>, %pad: f32, %tile_n : index, %tile_m : index) -> tensor<?x?x?x?xf32> {
414  %0 = tensor.pack %source padding_value(%pad : f32) inner_dims_pos = [0, 1] inner_tiles = [%tile_n, %tile_m] into %dest : tensor<?x?xf32> -> tensor<?x?x?x?xf32>
415  return %0 : tensor<?x?x?x?xf32>
416}
417
418// CHECK-LABEL: func.func @pad_and_pack_fully_dynamic(
419// CHECK-SAME:  %[[SOURCE:.*]]: tensor<?x?xf32>,
420// CHECK-SAME:  %[[DEST:.*]]: tensor<?x?x?x?xf32>,
421// CHECK-SAME:  %[[PAD:.*]]: f32,
422// CHECK-SAME:  %[[TILE_N:.*]]: index,
423// CHECK-SAME:  %[[TILE_M:.*]]: index)
424// CHECK: %{{.*}} = tensor.pack %[[SOURCE]] padding_value(%[[PAD]] : f32) inner_dims_pos = [0, 1] inner_tiles = [%[[TILE_N]], %[[TILE_M]]] into %[[DEST]] : tensor<?x?xf32> -> tensor<?x?x?x?xf32>
425
426// -----
427
428func.func @pad_and_pack_partially_dynamic(%source: tensor<?x?xf32>, %dest: tensor<?x?x8x2xf32>, %pad: f32) -> tensor<?x?x8x2xf32> {
429  %0 = tensor.pack %source padding_value(%pad : f32) inner_dims_pos = [0, 1] inner_tiles = [8, 2] into %dest : tensor<?x?xf32> -> tensor<?x?x8x2xf32>
430  return %0 : tensor<?x?x8x2xf32>
431}
432
433// CHECK-LABEL: func.func @pad_and_pack_partially_dynamic(
434// CHECK-SAME:  %[[SOURCE:.*]]: tensor<?x?xf32>,
435// CHECK-SAME:  %[[DEST:.*]]: tensor<?x?x8x2xf32>,
436// CHECK-SAME:  %[[PAD:.*]]: f32)
437// CHECK: %{{.*}} = tensor.pack %[[SOURCE]] padding_value(%[[PAD]] : f32) inner_dims_pos = [0, 1] inner_tiles = [8, 2] into %[[DEST]] : tensor<?x?xf32> -> tensor<?x?x8x2xf32>
438
439// -----
440
441func.func @unpack_fully_dynamic(%source: tensor<?x?x?x?xf32>, %dest: tensor<?x?xf32>, %tile_n : index, %tile_m : index) -> tensor<?x?xf32> {
442  %0 = tensor.unpack %source inner_dims_pos = [0, 1] inner_tiles = [%tile_n, %tile_m] into %dest : tensor<?x?x?x?xf32> -> tensor<?x?xf32>
443  return %0 : tensor<?x?xf32>
444}
445
446// CHECK-LABEL: func.func @unpack_fully_dynamic(
447// CHECK-SAME:  %[[SOURCE:.*]]: tensor<?x?x?x?xf32>,
448// CHECK-SAME:  %[[DEST:.*]]: tensor<?x?xf32>,
449// CHECK-SAME:  %[[TILE_N:.*]]: index,
450// CHECK-SAME:  %[[TILE_M:.*]]: index)
451// CHECK: %{{.*}} = tensor.unpack %[[SOURCE]] inner_dims_pos = [0, 1] inner_tiles = [%[[TILE_N]], %[[TILE_M]]] into %[[DEST]] : tensor<?x?x?x?xf32> -> tensor<?x?xf32>
452
453// -----
454
455func.func @unpack_partially_dynamic(%source: tensor<?x?x8x2xf32>, %dest: tensor<?x?xf32>) -> tensor<?x?xf32> {
456  %0 = tensor.unpack %source inner_dims_pos = [0, 1] inner_tiles = [8, 2] into %dest : tensor<?x?x8x2xf32> -> tensor<?x?xf32>
457  return %0: tensor<?x?xf32>
458}
459
460// CHECK-LABEL: func.func @unpack_partially_dynamic(
461// CHECK-SAME:  %[[SOURCE:.*]]: tensor<?x?x8x2xf32>,
462// CHECK-SAME:  %[[DEST:.*]]: tensor<?x?xf32>)
463// CHECK: %{{.*}} = tensor.unpack %[[SOURCE]] inner_dims_pos = [0, 1] inner_tiles = [8, 2] into %[[DEST]] : tensor<?x?x8x2xf32> -> tensor<?x?xf32>
464