1// RUN: mlir-opt %s -one-shot-bufferize="allow-unknown-ops" -canonicalize -split-input-file | FileCheck %s
2
3// Run fuzzer with different seeds.
4// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=23" -split-input-file -o /dev/null
5// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=59" -split-input-file -o /dev/null
6// RUN: mlir-opt %s -one-shot-bufferize="test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=91" -split-input-file -o /dev/null
7
8// CHECK-LABEL: func @buffer_not_deallocated(
9//  CHECK-SAME:     %[[t:.*]]: tensor<?xf32>
10func.func @buffer_not_deallocated(%t : tensor<?xf32>, %c : i1) -> tensor<?xf32> {
11  // CHECK: %[[m:.*]] = bufferization.to_memref %[[t]]
12  // CHECK: %[[r:.*]] = scf.if %{{.*}} {
13  %r = scf.if %c -> tensor<?xf32> {
14    // CHECK: %[[some_op:.*]] = "test.some_op"
15    // CHECK: %[[alloc:.*]] = memref.alloc(%[[some_op]])
16    // CHECK: %[[casted:.*]] = memref.cast %[[alloc]]
17    // CHECK: scf.yield %[[casted]]
18    %sz = "test.some_op"() : () -> (index)
19    %0 = bufferization.alloc_tensor(%sz) : tensor<?xf32>
20    scf.yield %0 : tensor<?xf32>
21  } else {
22  // CHECK: } else {
23    // CHECK: scf.yield %[[m]]
24    scf.yield %t : tensor<?xf32>
25  }
26  // CHECK: }
27  // CHECK: %[[r_tensor:.*]] = bufferization.to_tensor %[[r]]
28  // CHECK: return %[[r_tensor]]
29  return %r : tensor<?xf32>
30}
31
32// -----
33
34// CHECK-LABEL: func @write_to_alloc_tensor_or_readonly_tensor(
35//  CHECK-SAME:     %[[arg0:.*]]: tensor<i32>
36func.func @write_to_alloc_tensor_or_readonly_tensor(%arg0: tensor<i32>,
37                                                    %cond: i1, %val: i32)
38  -> tensor<i32>
39{
40  // CHECK: %[[arg0_m:.*]] = bufferization.to_memref %[[arg0]]
41  // CHECK: %[[r:.*]] = scf.if {{.*}} {
42  // CHECK:   scf.yield %[[arg0_m]]
43  // CHECK: } else {
44  // CHECK:   %[[alloc:.*]] = memref.alloc
45  // CHECK:   memref.store %{{.*}}, %[[alloc]]
46  // CHECK:   %[[casted:.*]] = memref.cast %[[alloc]]
47  // CHECK:   scf.yield %[[casted]]
48  // CHECK: }
49  // CHECK: %[[r_t:.*]] = bufferization.to_tensor %[[r]]
50  // CHECK: return %[[r_t]]
51  %3 = scf.if %cond -> (tensor<i32>) {
52    scf.yield %arg0 : tensor<i32>
53  } else {
54    %7 = bufferization.alloc_tensor() : tensor<i32>
55    %8 = tensor.insert %val into %7[] : tensor<i32>
56    scf.yield %8 : tensor<i32>
57  }
58  return %3 : tensor<i32>
59}
60