1// RUN: mlir-opt -split-input-file -verify-diagnostics %s 2 3func.func @test_conv_op_not_linalg_op(%arg0 : tensor<?xf32>, %arg1 : tensor<?xf32>, 4 %arg2 : tensor<?xf32>) -> tensor<?xf32> { 5 // expected-error @+1 {{expected a LinalgOp}} 6 %0 = "test.conv_op_not_linalg_op"(%arg0, %arg1, %arg2) 7 : (tensor<?xf32>, tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32> 8 return %0 : tensor<?xf32> 9} 10 11// ----- 12 13// Check for number of operands being >= 2. 14#map = affine_map<(d0) -> (d0)> 15func.func @test_conv_op_wrong_num_operands(%arg0 : tensor<?xf32>, 16 %arg1 : tensor<?xf32>) -> tensor<?xf32> { 17 // expected-error @+1 {{expected op with 2 inputs and 1 output}} 18 %0 = test.linalg_conv_op { 19 indexing_maps = [#map, #map], 20 iterator_types = [#test.iterator_type<parallel>]} 21 ins(%arg0 : tensor<?xf32>) outs(%arg1 : tensor<?xf32>) { 22 ^bb0(%arg2 : f32, %arg3 : f32): 23 linalg.yield %arg3 : f32 24 } -> tensor<?xf32> 25 return %0 : tensor<?xf32> 26} 27 28// ----- 29 30func.func @test_conv_op_wrong_input_indexing_map1(%arg0 : tensor<?xf32>, 31 %arg1 : tensor<?xf32>, %arg2 : tensor<?xf32>) -> tensor<?xf32> { 32 // expected-error @+1 {{unexpected input index map for convolution}} 33 %0 = test.linalg_conv_op { 34 indexing_maps = [affine_map<(d0, d1) -> (d0 * 2)>, 35 affine_map<(d0, d1) -> (d1)>, 36 affine_map<(d0, d1) -> (d0)>], 37 iterator_types = [#test.iterator_type<parallel>, 38 #test.iterator_type<reduction>]} 39 ins(%arg0, %arg1 : tensor<?xf32>, tensor<?xf32>) 40 outs(%arg2 : tensor<?xf32>) { 41 ^bb0(%arg3 : f32, %arg4 : f32, %arg5 : f32): 42 linalg.yield %arg5 : f32 43 } -> tensor<?xf32> 44 return %0 : tensor<?xf32> 45} 46 47// ----- 48 49func.func @test_conv_op_wrong_input_indexing_map2(%arg0 : tensor<?x?xf32>, 50 %arg1 : tensor<?xf32>, %arg2 : tensor<?xf32>) -> tensor<?xf32> { 51 // expected-error @+1 {{unexpected input index map for convolution}} 52 %0 = test.linalg_conv_op { 53 indexing_maps = [affine_map<(d0, d1) -> (d0 + d1, d0)>, 54 affine_map<(d0, d1) -> (d1)>, 55 affine_map<(d0, d1) -> (d0)>], 56 iterator_types = [#test.iterator_type<parallel>, 57 #test.iterator_type<reduction>]} 58 ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?xf32>) 59 outs(%arg2 : tensor<?xf32>) { 60 ^bb0(%arg3 : f32, %arg4 : f32, %arg5 : f32): 61 linalg.yield %arg5 : f32 62 } -> tensor<?xf32> 63 return %0 : tensor<?xf32> 64} 65 66// ----- 67 68func.func @test_conv_op_filter_index_map_not_projection(%arg0 : tensor<?xf32>, 69 %arg1 : tensor<?xf32>, %arg2 : tensor<?xf32>) -> tensor<?xf32> { 70 // expected-error @+1 {{expected output/filter indexing maps to be projected permutations}} 71 %0 = test.linalg_conv_op { 72 indexing_maps = [affine_map<(d0, d1) -> (d1)>, 73 affine_map<(d0, d1) -> (d1 + d0)>, 74 affine_map<(d0, d1) -> (d0)>], 75 iterator_types = [#test.iterator_type<parallel>, 76 #test.iterator_type<reduction>]} 77 ins(%arg0, %arg1 : tensor<?xf32>, tensor<?xf32>) 78 outs(%arg2 : tensor<?xf32>) { 79 ^bb0(%arg3 : f32, %arg4 : f32, %arg5 : f32): 80 linalg.yield %arg5 : f32 81 } -> tensor<?xf32> 82 return %0 : tensor<?xf32> 83} 84 85// ----- 86 87func.func @test_conv_op_output_index_map_not_projection(%arg0 : tensor<?xf32>, 88 %arg1 : tensor<?xf32>, %arg2 : tensor<?xf32>) -> tensor<?xf32> { 89 // expected-error @+1 {{expected output/filter indexing maps to be projected permutations}} 90 %0 = test.linalg_conv_op { 91 indexing_maps = [affine_map<(d0, d1) -> (d0)>, 92 affine_map<(d0, d1) -> (d1)>, 93 affine_map<(d0, d1) -> (d0 + d1)>], 94 iterator_types = [#test.iterator_type<parallel>, 95 #test.iterator_type<parallel>]} 96 ins(%arg0, %arg1 : tensor<?xf32>, tensor<?xf32>) 97 outs(%arg2 : tensor<?xf32>) { 98 ^bb0(%arg3 : f32, %arg4 : f32, %arg5 : f32): 99 linalg.yield %arg5 : f32 100 } -> tensor<?xf32> 101 return %0 : tensor<?xf32> 102} 103 104// ----- 105 106// Convolution op illegal if a loop dimension is used to access 107// output, filter and is convolved. 108func.func @test_conv_op_output_filter_convolved(%arg0 : tensor<?xf32>, 109 %arg1 : tensor<?xf32>, %arg2 : tensor<?x?xf32>) -> tensor<?x?xf32> { 110 // expected-error @+1 {{unexpected loop dimension for convolution op}} 111 %0 = test.linalg_conv_op { 112 indexing_maps = [affine_map<(d0, d1) -> (d0 + d1)>, 113 affine_map<(d0, d1) -> (d1)>, 114 affine_map<(d0, d1) -> (d0, d1)>], 115 iterator_types = [#test.iterator_type<parallel>, 116 #test.iterator_type<parallel>]} 117 ins(%arg0, %arg1 : tensor<?xf32>, tensor<?xf32>) 118 outs(%arg2 : tensor<?x?xf32>) { 119 ^bb0(%arg3 : f32, %arg4 : f32, %arg5 : f32): 120 linalg.yield %arg5 : f32 121 } -> tensor<?x?xf32> 122 return %0 : tensor<?x?xf32> 123} 124 125// ----- 126 127// Convolution op illegal if a loop dimension is used only in the output. 128func.func @test_conv_op_output_only_dim(%arg0 : tensor<?xf32>, 129 %arg1 : tensor<?xf32>, %arg2 : tensor<?x?xf32>) -> tensor<?x?xf32> { 130 // expected-error @+1 {{unexpected loop dimension for convolution op}} 131 %0 = test.linalg_conv_op { 132 indexing_maps = [affine_map<(d0, d1, d2) -> (d0 + d1)>, 133 affine_map<(d0, d1, d2) -> (d1)>, 134 affine_map<(d0, d1, d2) -> (d0, d2)>], 135 iterator_types = [#test.iterator_type<parallel>, 136 #test.iterator_type<reduction>, 137 #test.iterator_type<parallel>]} 138 ins(%arg0, %arg1 : tensor<?xf32>, tensor<?xf32>) 139 outs(%arg2 : tensor<?x?xf32>) { 140 ^bb0(%arg3 : f32, %arg4 : f32, %arg5 : f32): 141 linalg.yield %arg5 : f32 142 } -> tensor<?x?xf32> 143 return %0 : tensor<?x?xf32> 144} 145 146// ----- 147 148// Convolution op illegal if a loop dimension is used only in the filter. 149func.func @test_conv_op_filter_only_dim(%arg0 : tensor<?xf32>, 150 %arg1 : tensor<?x?xf32>, %arg2 : tensor<?xf32>) -> tensor<?xf32> { 151 // expected-error @+1 {{unexpected loop dimension for convolution op}} 152 %0 = test.linalg_conv_op { 153 indexing_maps = [affine_map<(d0, d1, d2) -> (d0 + d1)>, 154 affine_map<(d0, d1, d2) -> (d1, d2)>, 155 affine_map<(d0, d1, d2) -> (d0)>], 156 iterator_types = [#test.iterator_type<parallel>, 157 #test.iterator_type<reduction>, 158 #test.iterator_type<reduction>]} 159 ins(%arg0, %arg1 : tensor<?xf32>, tensor<?x?xf32>) 160 outs(%arg2 : tensor<?xf32>) { 161 ^bb0(%arg3 : f32, %arg4 : f32, %arg5 : f32): 162 linalg.yield %arg5 : f32 163 } -> tensor<?xf32> 164 return %0 : tensor<?xf32> 165} 166 167// ----- 168 169// Convolution op illegal if a loop dimension is used only in the input. 170func.func @test_conv_op_input_only_dim(%arg0 : tensor<?x?xf32>, 171 %arg1 : tensor<?xf32>, %arg2 : tensor<?xf32>) -> tensor<?xf32> { 172 // expected-error @+1 {{unexpected loop dimension for convolution op}} 173 %0 = test.linalg_conv_op { 174 indexing_maps = [affine_map<(d0, d1, d2) -> (d0 + d1, d2)>, 175 affine_map<(d0, d1, d2) -> (d1)>, 176 affine_map<(d0, d1, d2) -> (d0)>], 177 iterator_types = [#test.iterator_type<parallel>, 178 #test.iterator_type<reduction>, 179 #test.iterator_type<reduction>]} 180 ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?xf32>) 181 outs(%arg2 : tensor<?xf32>) { 182 ^bb0(%arg3 : f32, %arg4 : f32, %arg5 : f32): 183 linalg.yield %arg5 : f32 184 } -> tensor<?xf32> 185 return %0 : tensor<?xf32> 186} 187 188// ----- 189 190// Convolution op illegal if a loop dimension accessing output is not parallel. 191func.func @test_conv_op_non_output_access_loop_parallel(%arg0 : tensor<?xf32>, 192 %arg1 : tensor<?xf32>, %arg2 : tensor<?xf32>) -> tensor<?xf32> { 193 // expected-error @+1 {{expected all iterators not used to access outputs to be reduction}} 194 %0 = test.linalg_conv_op { 195 indexing_maps = [affine_map<(d0, d1) -> (d0 + d1)>, 196 affine_map<(d0, d1) -> (d1)>, 197 affine_map<(d0, d1) -> (d0)>], 198 iterator_types = [#test.iterator_type<parallel>, 199 #test.iterator_type<parallel>]} 200 ins(%arg0, %arg1 : tensor<?xf32>, tensor<?xf32>) 201 outs(%arg2 : tensor<?xf32>) { 202 ^bb0(%arg3 : f32, %arg4 : f32, %arg5 : f32): 203 linalg.yield %arg5 : f32 204 } -> tensor<?xf32> 205 return %0 : tensor<?xf32> 206} 207