1// RUN: mlir-opt -split-input-file -convert-tensor-to-linalg -cse -verify-diagnostics %s | FileCheck %s 2 3//===----------------------------------------------------------------------===// 4// tensor.pad 5//===----------------------------------------------------------------------===// 6// CHECK-LABEL: func @generalize_pad_tensor_static_shape( 7// CHECK-SAME: %[[IN:.*]]: tensor<1x28x28x1xf32>) -> tensor<1x32x32x1xf32> { 8// CHECK: %[[C0:.*]] = arith.constant 0.000000e+00 : f32 9// CHECK: %[[INIT:.*]] = tensor.empty() : tensor<1x32x32x1xf32> 10// CHECK: %[[FILL:.*]] = linalg.fill ins(%[[C0]] : f32) outs(%[[INIT]] : tensor<1x32x32x1xf32>) -> tensor<1x32x32x1xf32> 11// CHECK: %[[PADDED:.*]] = tensor.insert_slice %[[IN]] into %[[FILL]][0, 2, 2, 0] [1, 28, 28, 1] [1, 1, 1, 1] : tensor<1x28x28x1xf32> into tensor<1x32x32x1xf32> 12// CHECK: return %[[PADDED]] : tensor<1x32x32x1xf32> 13func.func @generalize_pad_tensor_static_shape(%arg0: tensor<1x28x28x1xf32>) -> tensor<1x32x32x1xf32> { 14 %cst = arith.constant 0.000000e+00 : f32 15 %0 = tensor.pad %arg0 low[0, 2, 2, 0] high[0, 2, 2, 0] { 16 ^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index): 17 tensor.yield %cst : f32 18 } : tensor<1x28x28x1xf32> to tensor<1x32x32x1xf32> 19 return %0 : tensor<1x32x32x1xf32> 20} 21 22// CHECK-LABEL: func @generalize_pad_tensor_dynamic_shape( 23// CHECK-SAME: %[[IN:.*]]: tensor<4x?x2x?xf32>, 24// CHECK-SAME: %[[OFFSET:.*]]: index) -> tensor<4x?x?x?xf32> { 25// CHECK-DAG: %[[CST:.*]] = arith.constant 0.000000e+00 : f32 26// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index 27// CHECK: %[[DIM1:.*]] = tensor.dim %[[IN]], %[[C1]] : tensor<4x?x2x?xf32> 28// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index 29// CHECK: %[[OUT_DIM2:.*]] = arith.addi %[[OFFSET]], %[[C2]] : index 30// CHECK-DAG: %[[C3:.*]] = arith.constant 3 : index 31// CHECK: %[[DIM3:.*]] = tensor.dim %[[IN]], %[[C3]] : tensor<4x?x2x?xf32> 32// CHECK: %[[OUT_DIM3:.*]] = arith.addi %[[DIM3]], %[[OFFSET]] : index 33// CHECK: %[[INIT:.*]] = tensor.empty(%[[DIM1]], %[[OUT_DIM2]], %[[OUT_DIM3]]) : tensor<4x?x?x?xf32> 34// CHECK: %[[FILL:.*]] = linalg.fill ins(%[[CST]] : f32) outs(%[[INIT]] : tensor<4x?x?x?xf32>) -> tensor<4x?x?x?xf32> 35// CHECK: %[[PADDED:.*]] = tensor.insert_slice %[[IN]] into %[[FILL]][0, 0, %[[OFFSET]], 0] [4, %[[DIM1]], 2, %[[DIM3]]] [1, 1, 1, 1] : tensor<4x?x2x?xf32> into tensor<4x?x?x?xf32> 36// CHECK: return %[[PADDED]] : tensor<4x?x?x?xf32> 37// CHECK: } 38func.func @generalize_pad_tensor_dynamic_shape(%arg0: tensor<4x?x2x?xf32>, %arg1: index) -> tensor<4x?x?x?xf32> { 39 %c0 = arith.constant 0 : index 40 %cst = arith.constant 0.0 : f32 41 %out = tensor.pad %arg0 low[%c0, %c0, %arg1, %c0] high[%c0, %c0, %c0, %arg1] { 42 ^bb0(%gen_arg1: index, %gen_arg2: index, %gen_arg3: index, %gen_arg4: index): 43 tensor.yield %cst : f32 44 } : tensor<4x?x2x?xf32> to tensor<4x?x?x?xf32> 45 return %out : tensor<4x?x?x?xf32> 46} 47