1// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="allow-unknown-ops" -split-input-file | FileCheck %s 2 3// Test bufferization using memref types that have no layout map. 4// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="allow-unknown-ops unknown-type-conversion=identity-layout-map" -split-input-file | FileCheck %s --check-prefix=CHECK-NO-LAYOUT-MAP 5 6// Run fuzzer with different seeds. 7// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=23" -split-input-file -o /dev/null 8// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=59" -split-input-file -o /dev/null 9// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="test-analysis-only analysis-heuristic=fuzzer analysis-fuzzer-seed=91" -split-input-file -o /dev/null 10 11// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="dialect-filter=tensor,bufferization allow-unknown-ops" -canonicalize -split-input-file | FileCheck %s --check-prefix=CHECK-TENSOR 12// RUN: mlir-opt %s -allow-unregistered-dialect -one-shot-bufferize="dialect-filter=scf,bufferization allow-unknown-ops" -canonicalize -split-input-file | FileCheck %s --check-prefix=CHECK-SCF 13 14// CHECK-LABEL: func @use_of_unknown_op_1( 15// CHECK-SAME: %[[t1:.*]]: tensor<?xf32> 16// CHECK-NO-LAYOUT-MAP-LABEL: func @use_of_unknown_op_1( 17// CHECK-NO-LAYOUT-MAP-SAME: %[[t1:.*]]: tensor<?xf32> 18func.func @use_of_unknown_op_1(%t1: tensor<?xf32>) 19 -> vector<5xf32> { 20 // ToTensorOp is generated because the function is bufferized and has a 21 // memref block argument. 22 // CHECK: %[[dummy:.*]] = "test.dummy_op"(%[[t1]]) 23 // CHECK-NO-LAYOUT-MAP: %[[dummy:.*]] = "test.dummy_op"(%[[t1]]) 24 %0 = "test.dummy_op"(%t1) : (tensor<?xf32>) -> tensor<?xf32> 25 26 %idx = arith.constant 0 : index 27 %cst = arith.constant 0.0 : f32 28 // CHECK: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]] : tensor<?xf32> to memref<?xf32, strided<[?], offset: ?>> 29 // CHECK: vector.transfer_read %[[dummy_memref]][%{{.*}}], %{{.*}} : memref<?xf32, strided<[?], offset: ?>> 30 // CHECK-NO-LAYOUT-MAP: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]] : tensor<?xf32> to memref<?xf32> 31 // CHECK-NO-LAYOUT-MAP: vector.transfer_read %[[dummy_memref]][%{{.*}}], %{{.*}} : memref<?xf32> 32 %1 = vector.transfer_read %0[%idx], %cst : tensor<?xf32>, vector<5xf32> 33 return %1 : vector<5xf32> 34} 35 36// ----- 37 38// CHECK-LABEL: func @use_of_unknown_op_2( 39// CHECK-SAME: %[[t1:.*]]: tensor<?xf32> 40func.func @use_of_unknown_op_2(%t1: tensor<?xf32>) -> tensor<?xf32> { 41 // CHECK: %[[dummy1:.*]] = "test.dummy_op"(%[[t1]]) 42 %0 = "test.dummy_op"(%t1) : (tensor<?xf32>) -> tensor<?xf32> 43 // CHECK: %[[dummy2:.*]] = "test.another_dummy_op"(%[[dummy1]]) 44 %1 = "test.another_dummy_op"(%0) : (tensor<?xf32>) -> tensor<?xf32> 45 46 // CHECK: return %[[dummy2]] 47 return %1 : tensor<?xf32> 48} 49 50// ----- 51 52// CHECK-LABEL: func @use_of_unknown_op_3( 53// CHECK-SAME: %[[t1:.*]]: tensor<?xf32> 54func.func @use_of_unknown_op_3(%t1: tensor<?xf32>) 55 -> (vector<5xf32>, vector<5xf32>) { 56 %idx = arith.constant 0 : index 57 %cst = arith.constant 0.0 : f32 58 // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] 59 // CHECK: %[[v1:.*]] = vector.transfer_read %[[m1]] 60 %1 = vector.transfer_read %t1[%idx], %cst : tensor<?xf32>, vector<5xf32> 61 62 // CHECK: %[[dummy:.*]] = "test.dummy_op"(%[[t1]]) 63 %0 = "test.dummy_op"(%t1) : (tensor<?xf32>) -> tensor<?xf32> 64 // CHECK: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]] : tensor<?xf32> to memref<?xf32, strided<[?], offset: ?>> 65 // CHECK: %[[v2:.*]] = vector.transfer_read %[[dummy_memref]] 66 %2 = vector.transfer_read %0[%idx], %cst : tensor<?xf32>, vector<5xf32> 67 68 // CHECK: return %[[v1]], %[[v2]] 69 return %1, %2 : vector<5xf32>, vector<5xf32> 70} 71 72// ----- 73 74// CHECK-LABEL: func @use_of_unknown_op_4( 75// CHECK-SAME: %[[t1:.*]]: tensor<?xf32> 76func.func @use_of_unknown_op_4(%t1: tensor<?xf32>) 77 -> (vector<5xf32>, tensor<?xf32>) { 78 %idx = arith.constant 0 : index 79 %cst = arith.constant 0.0 : f32 80 81 // CHECK: %[[dummy:.*]] = "test.dummy_op"(%[[t1]]) 82 %0 = "test.dummy_op"(%t1) : (tensor<?xf32>) -> tensor<?xf32> 83 84 // CHECK: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]] 85 // CHECK: %[[v1:.*]] = vector.transfer_read %[[dummy_memref]] 86 %1 = vector.transfer_read %0[%idx], %cst : tensor<?xf32>, vector<5xf32> 87 88 // CHECK: %[[another_dummy:.*]] = "test.another_dummy_op"(%[[dummy]]) 89 %2 = "test.another_dummy_op"(%0) : (tensor<?xf32>) -> tensor<?xf32> 90 91 // CHECK: return %[[v1]], %[[another_dummy]] 92 return %1, %2 : vector<5xf32>, tensor<?xf32> 93} 94 95// ----- 96 97// CHECK-LABEL: func @use_of_bufferizable_op_in_unbufferizable_op 98// CHECK-SAME: %[[t1:.*]]: tensor<?xf32> 99func.func @use_of_bufferizable_op_in_unbufferizable_op( 100 %t1: tensor<?xf32>, %o: index, %s: index) -> (tensor<?xf32>, tensor<?xf32>) { 101 // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] 102 // CHECK: %[[subview:.*]] = memref.subview %[[m1]] 103 // The op must alloc because "test.dummy" may bufferize to a memory write. 104 // CHECK: %[[alloc:.*]] = memref.alloc 105 // CHECK: memref.copy %[[subview]], %[[alloc]] 106 %0 = tensor.extract_slice %t1[%o][%s][1] : tensor<?xf32> to tensor<?xf32> 107 // CHECK: %[[alloc_tensor:.*]] = bufferization.to_tensor %[[alloc]] 108 // CHECK: %[[dummy:.*]] = "test.dummy_op"(%[[alloc_tensor]]) 109 %1 = "test.dummy_op"(%0) : (tensor<?xf32>) -> tensor<?xf32> 110 // CHECK: return %[[alloc_tensor]], %[[dummy]] 111 return %0, %1 : tensor<?xf32>, tensor<?xf32> 112} 113 114// ----- 115 116// CHECK-LABEL: func @unused_unknown_op( 117// CHECK-SAME: %[[t1:.*]]: tensor<?xf32> 118func.func @unused_unknown_op(%t1 : tensor<?xf32>) -> vector<5xf32> { 119 %idx = arith.constant 0 : index 120 %cst = arith.constant 0.0 : f32 121 122 // CHECK: %[[m1:.*]] = bufferization.to_memref %[[t1]] 123 // CHECK: vector.transfer_read %[[m1]] 124 %1 = vector.transfer_read %t1[%idx], %cst : tensor<?xf32>, vector<5xf32> 125 126 // CHECK: "test.dummy_op"(%[[t1]]) 127 "test.dummy_op"(%t1) : (tensor<?xf32>) -> () 128 129 return %1 : vector<5xf32> 130} 131 132// ----- 133 134// CHECK-LABEL: func @unknown_op_may_read( 135func.func @unknown_op_may_read(%v: vector<5xf32>) 136 -> (tensor<10xf32>, tensor<10xf32>) { 137 %idx = arith.constant 0 : index 138 %cst = arith.constant 5.0 : f32 139 140 // One alloc for the alloc_tensor, another one because the transfer_write 141 // bufferizes out-of-place. 142 // CHECK: %[[m1:.*]] = memref.alloc() {{.*}} : memref<10xf32> 143 // CHECK: linalg.fill ins(%{{.*}}{{.*}}outs(%[[m1]] 144 // CHECK: %[[filled_tensor:.*]] = bufferization.to_tensor %[[m1]] 145 %t1 = bufferization.alloc_tensor() : tensor<10xf32> 146 %filled = linalg.fill ins(%cst : f32) outs(%t1 : tensor<10xf32>) -> tensor<10xf32> 147 148 // The transfer_write is out-of-place because "dummy_op" may read. 149 // CHECK: %[[alloc:.*]] = memref.alloc() {{.*}} : memref<10xf32> 150 // CHECK: memref.copy %[[m1]], %[[alloc]] 151 // CHECK: vector.transfer_write %{{.*}}, %[[alloc]] 152 // CHECK: %[[alloc_tensor:.*]] = bufferization.to_tensor %[[alloc]] 153 %1 = vector.transfer_write %v, %filled[%idx] : vector<5xf32>, tensor<10xf32> 154 155 // CHECK: %[[dummy:.*]] = "test.dummy_op"(%[[filled_tensor]]) 156 %2 = "test.dummy_op"(%filled) : (tensor<10xf32>) -> (tensor<10xf32>) 157 158 // CHECK: return %[[alloc_tensor]], %[[dummy]] 159 return %1, %2 : tensor<10xf32>, tensor<10xf32> 160} 161 162// ----- 163 164// CHECK-LABEL: func @unknown_op_not_writable 165// CHECK-SAME: %[[t1:.*]]: tensor<?xf32> 166func.func @unknown_op_not_writable( 167 %t1 : tensor<?xf32>, %v : vector<5xf32>, %idx : index) -> tensor<?xf32> { 168 // CHECK: %[[dummy:.*]] = "test.dummy_op"(%[[t1]]) 169 // CHECK: %[[dummy_memref:.*]] = bufferization.to_memref %[[dummy]] 170 %0 = "test.dummy_op"(%t1) : (tensor<?xf32>) -> (tensor<?xf32>) 171 172 // The result of an unknown op is not writable. Always generate a copy. 173 // CHECK: %[[dim:.*]] = memref.dim %[[dummy_memref]] 174 // CHECK: %[[alloc:.*]] = memref.alloc(%[[dim]]) 175 // CHECK: memref.copy %[[dummy_memref]], %[[alloc]] 176 // CHECK: vector.transfer_write %{{.*}}, %[[alloc]] 177 %1 = vector.transfer_write %v, %0[%idx] : vector<5xf32>, tensor<?xf32> 178 179 // CHECK: %[[alloc_tensor:.*]] = bufferization.to_tensor %[[alloc]] 180 // CHECK: return %[[alloc_tensor]] 181 return %1 : tensor<?xf32> 182} 183 184// ----- 185 186// CHECK-TENSOR-LABEL: func @simple_tensor_test( 187// CHECK-TENSOR-SAME: %[[t1:.*]]: tensor<?xf32> 188func.func @simple_tensor_test(%t1 : tensor<?xf32>, %f : f32) -> tensor<?xf32> { 189 // CHECK-TENSOR: %[[t1_memref:.*]] = bufferization.to_memref %[[t1]] 190 %c0 = arith.constant 0 : index 191 // CHECK-TENSOR: %[[alloc:.*]] = memref.alloc 192 // CHECK-TENSOR: memref.copy %[[t1_memref]], %[[alloc]] 193 // CHECK-TENSOR: memref.store %{{.*}}, %[[alloc]] 194 %0 = tensor.insert %f into %t1[%c0] : tensor<?xf32> 195 // CHECK-TENSOR: %[[casted_alloc:.*]] = bufferization.to_tensor %[[alloc]] 196 // CHECK-TENSOR: return %[[casted_alloc]] 197 return %0 : tensor<?xf32> 198} 199 200// ----- 201 202// CHECK-SCF-LABEL: func @simple_scf_if( 203// CHECK-SCF-SAME: %[[t1:.*]]: tensor<?xf32> {bufferization.writable = true}, %[[c:.*]]: i1, %[[pos:.*]]: index 204func.func @simple_scf_if(%t1: tensor<?xf32> {bufferization.writable = true}, %c: i1, %pos: index, %f: f32) 205 -> (tensor<?xf32>, index) { 206 // CHECK-SCF: %[[t1_memref:.*]] = bufferization.to_memref %[[t1]] 207 // CHECK-SCF: %[[r:.*]] = scf.if %[[c]] -> (memref<?xf32, strided{{.*}}>) { 208 %r1, %r2 = scf.if %c -> (tensor<?xf32>, index) { 209 // CHECK-SCF: scf.yield %[[t1_memref]] 210 scf.yield %t1, %pos : tensor<?xf32>, index 211 // CHECK-SCF: } else { 212 } else { 213 // CHECK-SCF: %[[insert:.*]] = tensor.insert %{{.*}} into %[[t1]][{{.*}}] 214 // CHECK-SCF: %[[insert_memref:.*]] = bufferization.to_memref %[[insert]] 215 %1 = tensor.insert %f into %t1[%pos] : tensor<?xf32> 216 // CHECK-SCF: scf.yield %[[insert_memref]] 217 scf.yield %1, %pos : tensor<?xf32>, index 218 } 219 220 // CHECK-SCF: %[[r_tensor:.*]] = bufferization.to_tensor %[[r]] 221 // CHECK-SCF: return %[[r_tensor]], %[[pos]] 222 return %r1, %r2 : tensor<?xf32>, index 223} 224