1// RUN: mlir-opt -slice-analysis-test -split-input-file %s | FileCheck %s 2 3func.func @slicing_linalg_op(%arg0 : index, %arg1 : index, %arg2 : index) { 4 %a = memref.alloc(%arg0, %arg2) : memref<?x?xf32> 5 %b = memref.alloc(%arg2, %arg1) : memref<?x?xf32> 6 %c = memref.alloc(%arg0, %arg1) : memref<?x?xf32> 7 %d = memref.alloc(%arg0, %arg1) : memref<?x?xf32> 8 linalg.matmul ins(%a, %b : memref<?x?xf32>, memref<?x?xf32>) 9 outs(%c : memref<?x?xf32>) 10 linalg.matmul ins(%a, %b : memref<?x?xf32>, memref<?x?xf32>) 11 outs(%d : memref<?x?xf32>) 12 memref.dealloc %c : memref<?x?xf32> 13 memref.dealloc %b : memref<?x?xf32> 14 memref.dealloc %a : memref<?x?xf32> 15 memref.dealloc %d : memref<?x?xf32> 16 return 17} 18 19// CHECK-LABEL: func @slicing_linalg_op__backward_slice__0 20// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: index 21// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: index 22// CHECK-SAME: %[[ARG2:[a-zA-Z0-9_]+]]: index 23// CHECK-DAG: %[[A:.+]] = memref.alloc(%[[ARG0]], %[[ARG2]]) : memref<?x?xf32> 24// CHECK-DAG: %[[B:.+]] = memref.alloc(%[[ARG2]], %[[ARG1]]) : memref<?x?xf32> 25// CHECK-DAG: %[[C:.+]] = memref.alloc(%[[ARG0]], %[[ARG1]]) : memref<?x?xf32> 26// CHECK: return 27 28// CHECK-LABEL: func @slicing_linalg_op__backward_slice__1 29// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: index 30// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: index 31// CHECK-SAME: %[[ARG2:[a-zA-Z0-9_]+]]: index 32// CHECK-DAG: %[[A:.+]] = memref.alloc(%[[ARG0]], %[[ARG2]]) : memref<?x?xf32> 33// CHECK-DAG: %[[B:.+]] = memref.alloc(%[[ARG2]], %[[ARG1]]) : memref<?x?xf32> 34// CHECK-DAG: %[[C:.+]] = memref.alloc(%[[ARG0]], %[[ARG1]]) : memref<?x?xf32> 35// CHECK: return 36 37// ----- 38 39#map = affine_map<(d0, d1) -> (d0, d1)> 40func.func @slice_use_from_above(%arg0: tensor<5x5xf32>, %arg1: tensor<5x5xf32>) { 41 %0 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%arg0 : tensor<5x5xf32>) outs(%arg1 : tensor<5x5xf32>) { 42 ^bb0(%in: f32, %out: f32): 43 %2 = arith.addf %in, %in : f32 44 linalg.yield %2 : f32 45 } -> tensor<5x5xf32> 46 %collapsed = tensor.collapse_shape %0 [[0, 1]] : tensor<5x5xf32> into tensor<25xf32> 47 %1 = linalg.generic {indexing_maps = [#map, #map], iterator_types = ["parallel", "parallel"]} ins(%0 : tensor<5x5xf32>) outs(%arg1 : tensor<5x5xf32>) { 48 ^bb0(%in: f32, %out: f32): 49 %c2 = arith.constant 2 : index 50 %extracted = tensor.extract %collapsed[%c2] : tensor<25xf32> 51 %2 = arith.addf %extracted, %extracted : f32 52 linalg.yield %2 : f32 53 } -> tensor<5x5xf32> 54 return 55} 56 57// CHECK-LABEL: func @slice_use_from_above__backward_slice__0 58// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor 59// CHECK: %[[A:.+]] = linalg.generic {{.*}} ins(%[[ARG0]] 60// CHECK: %[[B:.+]] = tensor.collapse_shape %[[A]] 61// CHECK: return 62