1// RUN: mlir-opt %s -transform-interpreter -split-input-file | FileCheck %s 2 3func.func @masked_static_vectorize_nd_tensor_extract_with_affine_apply_contiguous( 4 %src: tensor<80x16xf32>, 5 %output : tensor<1x3xf32>, 6 %idx: index) -> tensor<1x3xf32> { 7 8 %c79 = arith.constant 79 : index 9 %1 = linalg.generic { 10 indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>], 11 iterator_types = ["parallel", "parallel"] 12 } outs(%output : tensor<1x3xf32>) { 13 ^bb0(%out: f32): 14 %2 = linalg.index 1 : index 15 %3 = affine.apply affine_map<(d0, d1) -> (d0 + d1)>(%2, %idx) 16 %extracted = tensor.extract %src[%c79, %3] : tensor<80x16xf32> 17 linalg.yield %extracted : f32 18 } -> tensor<1x3xf32> 19 return %1 : tensor<1x3xf32> 20} 21 22// CHECK-LABEL: func.func @masked_static_vectorize_nd_tensor_extract_with_affine_apply_contiguous 23// CHECK-SAME: %[[SRC:.*]]: tensor<80x16xf32>, 24// CHECK-SAME: %[[OUTPUT:.*]]: tensor<1x3xf32>, 25// CHECK-SAME: %[[IDX_IN:.*]]: index) -> tensor<1x3xf32> { 26 27/// Create the mask 28// CHECK-DAG: %[[DIM_0:.*]] = arith.constant 1 : index 29// CHECK-DAG: %[[DIM_1:.*]] = arith.constant 3 : index 30// CHECK-DAG: %[[C79:.*]] = arith.constant 79 : index 31// CHECK: %[[MASK:.*]] = vector.create_mask %[[DIM_0]], %[[DIM_1]] : vector<1x4xi1> 32 33/// TODO: This transfer_read is redundant - remove 34// CHECK: vector.mask %[[MASK]] { vector.transfer_read {{.*}} {in_bounds = [true, true]} : tensor<1x3xf32>, vector<1x4xf32> } : vector<1x4xi1> -> vector<1x4xf32> 35 36/// Caluclate the index vector 37// CHECK: %[[STEP:.*]] = vector.step : vector<4xindex> 38// CHECK: %[[IDX_BC:.*]] = vector.broadcast %[[IDX_IN]] : index to vector<4xindex> 39// CHECK: %[[IDX_VEC:.*]] = arith.addi %[[STEP]], %[[IDX_BC]] : vector<4xindex> 40// CHECK: %[[SC:.*]] = vector.shape_cast %[[IDX_VEC]] : vector<4xindex> to vector<4xindex> 41 42/// Extract the starting point from the index vector 43// CHECK: %[[IDX_START:.*]] = vector.extract %[[SC]][0] : index from vector<4xindex> 44 45// Final read and write 46// CHECK: %[[READ:.*]] = vector.mask %[[MASK]] { vector.transfer_read %[[SRC]]{{\[}}%[[C79]], %[[IDX_START]]], {{.*}} {in_bounds = [true, true]} : tensor<80x16xf32>, vector<1x4xf32> } : vector<1x4xi1> -> vector<1x4xf32> 47// CHECK: %[[C0_1:.*]] = arith.constant 0 : index 48// CHECK: vector.mask %[[MASK]] { vector.transfer_write %[[READ]], %[[OUTPUT]]{{\[}}%[[C0_1]], %[[C0_1]]] {in_bounds = [true, true]} : vector<1x4xf32>, tensor<1x3xf32> } : vector<1x4xi1> -> tensor<1x3xf32> 49 50module attributes {transform.with_named_sequence} { 51 transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { 52 %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op 53 transform.structured.vectorize %0 vector_sizes [1, 4] {vectorize_nd_extract} : !transform.any_op 54 transform.yield 55 } 56} 57 58// ----- 59 60// Identical to the above, but with scalable vectors. 61 62func.func @masked_static_vectorize_nd_tensor_extract_with_affine_apply_contiguous_scalable( 63 %src: tensor<80x16xf32>, 64 %output : tensor<1x3xf32>, 65 %idx: index) -> tensor<1x3xf32> { 66 67 %c79 = arith.constant 79 : index 68 %1 = linalg.generic { 69 indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>], 70 iterator_types = ["parallel", "parallel"] 71 } outs(%output : tensor<1x3xf32>) { 72 ^bb0(%out: f32): 73 %2 = linalg.index 1 : index 74 %3 = affine.apply affine_map<(d0, d1) -> (d0 + d1)>(%2, %idx) 75 %extracted = tensor.extract %src[%c79, %3] : tensor<80x16xf32> 76 linalg.yield %extracted : f32 77 } -> tensor<1x3xf32> 78 79 return %1 : tensor<1x3xf32> 80} 81 82// CHECK-LABEL: func.func @masked_static_vectorize_nd_tensor_extract_with_affine_apply_contiguous_scalable 83// CHECK-SAME: %[[SRC:.*]]: tensor<80x16xf32>, 84// CHECK-SAME: %[[OUTPUT:.*]]: tensor<1x3xf32>, 85// CHECK-SAME: %[[IDX_IN:.*]]: index) -> tensor<1x3xf32> { 86 87/// Create the mask 88// CHECK-DAG: %[[DIM_0:.*]] = arith.constant 1 : index 89// CHECK-DAG: %[[DIM_1:.*]] = arith.constant 3 : index 90// CHECK-DAG: %[[C79:.*]] = arith.constant 79 : index 91// CHECK: %[[MASK:.*]] = vector.create_mask %[[DIM_0]], %[[DIM_1]] : vector<1x[4]xi1> 92 93/// TODO: This transfer_read is redundant - remove 94// CHECK: vector.mask %[[MASK]] { vector.transfer_read {{.*}} {in_bounds = [true, true]} : tensor<1x3xf32>, vector<1x[4]xf32> } : vector<1x[4]xi1> -> vector<1x[4]xf32> 95 96/// Caluclate the index vector 97// CHECK: %[[STEP:.*]] = vector.step : vector<[4]xindex> 98// CHECK: %[[IDX_BC:.*]] = vector.broadcast %[[IDX_IN]] : index to vector<[4]xindex> 99// CHECK: %[[IDX_VEC:.*]] = arith.addi %[[STEP]], %[[IDX_BC]] : vector<[4]xindex> 100// CHECK: %[[SC:.*]] = vector.shape_cast %[[IDX_VEC]] : vector<[4]xindex> to vector<[4]xindex> 101 102/// Extract the starting point from the index vector 103// CHECK: %[[IDX_START:.*]] = vector.extract %[[SC]][0] : index from vector<[4]xindex> 104 105// Final read and write 106// CHECK: %[[READ:.*]] = vector.mask %[[MASK]] { vector.transfer_read %[[SRC]]{{\[}}%[[C79]], %[[IDX_START]]], {{.*}} {in_bounds = [true, true]} : tensor<80x16xf32>, vector<1x[4]xf32> } : vector<1x[4]xi1> -> vector<1x[4]xf32> 107// CHECK: %[[C0_1:.*]] = arith.constant 0 : index 108// CHECK: vector.mask %[[MASK]] { vector.transfer_write %[[READ]], %[[OUTPUT]]{{\[}}%[[C0_1]], %[[C0_1]]] {in_bounds = [true, true]} : vector<1x[4]xf32>, tensor<1x3xf32> } : vector<1x[4]xi1> -> tensor<1x3xf32> 109 110 111module attributes {transform.with_named_sequence} { 112 transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { 113 %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op 114 transform.structured.vectorize %0 vector_sizes [1, [4]] {vectorize_nd_extract} : !transform.any_op 115 transform.yield 116 } 117} 118 119// ----- 120 121func.func @masked_dynamic_vectorize_nd_tensor_extract_with_affine_apply_contiguous( 122 %src: tensor<?x?xf32>, 123 %output : tensor<?x?xf32>, 124 %idx: index) -> tensor<?x?xf32> { 125 126 %c79 = arith.constant 79 : index 127 %1 = linalg.generic { 128 indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>], 129 iterator_types = ["parallel", "parallel"] 130 } outs(%output : tensor<?x?xf32>) { 131 ^bb0(%out: f32): 132 %2 = linalg.index 1 : index 133 %3 = affine.apply affine_map<(d0, d1) -> (d0 + d1)>(%2, %idx) 134 %extracted = tensor.extract %src[%c79, %3] : tensor<?x?xf32> 135 linalg.yield %extracted : f32 136 } -> tensor<?x?xf32> 137 return %1 : tensor<?x?xf32> 138} 139 140// CHECK-LABEL: func.func @masked_dynamic_vectorize_nd_tensor_extract_with_affine_apply_contiguous( 141// CHECK-SAME: %[[SRC:[a-zA-Z0-9]*]]: tensor<?x?xf32>, 142// CHECK-SAME: %[[OUTPUT:[a-zA-Z0-9]*]]: tensor<?x?xf32>, 143// CHECK-SAME: %[[IDX:.*]]: index) 144 145/// Create the mask 146// CHECK: %[[C79:.*]] = arith.constant 79 : index 147// CHECK: %[[DIM_0_IDX:.*]] = arith.constant 0 : index 148// CHECK: %[[DIM_0:.*]] = tensor.dim %[[OUTPUT]], %[[DIM_0_IDX]] : tensor<?x?xf32> 149// CHECK: %[[DIM_1_IDX:.*]] = arith.constant 1 : index 150// CHECK: %[[DIM_1:.*]] = tensor.dim %[[OUTPUT]], %[[DIM_1_IDX]] : tensor<?x?xf32> 151// CHECK: %[[MASK:.*]] = vector.create_mask %[[DIM_0]], %[[DIM_1]] : vector<1x4xi1> 152 153/// TODO: This transfer_read is redundant - remove 154// CHECK: vector.mask %[[MASK]] { vector.transfer_read %[[OUTPUT]]{{.*}} {in_bounds = [true, true]} : tensor<?x?xf32>, vector<1x4xf32> } : vector<1x4xi1> -> vector<1x4xf32> 155 156/// Caluclate the index vector 157// CHECK: %[[STEP:.*]] = vector.step : vector<4xindex> 158// CHECK: %[[IDX_BC:.*]] = vector.broadcast %[[IDX]] : index to vector<4xindex> 159// CHECK: %[[IDX_VEC:.*]] = arith.addi %[[STEP]], %[[IDX_BC]] : vector<4xindex> 160// CHECK: %[[SC:.*]] = vector.shape_cast %[[IDX_VEC]] : vector<4xindex> to vector<4xindex> 161 162/// Extract the starting point from the index vector 163// CHECK: %[[IDX_START:.*]] = vector.extract %[[SC]][0] : index from vector<4xindex> 164 165// Final read and write 166// CHECK: %[[READ:.*]] = vector.mask %[[MASK]] { vector.transfer_read %[[SRC]]{{\[}}%[[C79]], %[[IDX_START]]], {{.*}} {in_bounds = [true, true]} : tensor<?x?xf32>, vector<1x4xf32> } : vector<1x4xi1> -> vector<1x4xf32> 167// CHECK: %[[VAL_24:.*]] = vector.mask %[[MASK]] { vector.transfer_write %[[READ]], %[[OUTPUT]]{{.*}} {in_bounds = [true, true]} : vector<1x4xf32>, tensor<?x?xf32> } : vector<1x4xi1> -> tensor<?x?xf32> 168 169module attributes {transform.with_named_sequence} { 170 transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { 171 %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op 172 transform.structured.vectorize %0 vector_sizes [1, 4] {vectorize_nd_extract} : !transform.any_op 173 transform.yield 174 } 175} 176 177// ----- 178 179func.func @masked_dynamic_vectorize_nd_tensor_extract_with_affine_apply_contiguous_scalable( 180 %src: tensor<?x?xf32>, 181 %output : tensor<?x?xf32>, 182 %idx: index) -> tensor<?x?xf32> { 183 184 %c79 = arith.constant 79 : index 185 %1 = linalg.generic { 186 indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>], 187 iterator_types = ["parallel", "parallel"] 188 } outs(%output : tensor<?x?xf32>) { 189 ^bb0(%out: f32): 190 %2 = linalg.index 1 : index 191 %3 = affine.apply affine_map<(d0, d1) -> (d0 + d1)>(%2, %idx) 192 %extracted = tensor.extract %src[%c79, %3] : tensor<?x?xf32> 193 linalg.yield %extracted : f32 194 } -> tensor<?x?xf32> 195 return %1 : tensor<?x?xf32> 196} 197 198// CHECK-LABEL: func.func @masked_dynamic_vectorize_nd_tensor_extract_with_affine_apply_contiguous_scalable( 199// CHECK-SAME: %[[SRC:[a-zA-Z0-9]*]]: tensor<?x?xf32>, 200// CHECK-SAME: %[[OUTPUT:[a-zA-Z0-9]*]]: tensor<?x?xf32>, 201// CHECK-SAME: %[[IDX:.*]]: index) 202 203/// Create the mask 204// CHECK: %[[C79:.*]] = arith.constant 79 : index 205// CHECK: %[[DIM_0_IDX:.*]] = arith.constant 0 : index 206// CHECK: %[[DIM_0:.*]] = tensor.dim %[[OUTPUT]], %[[DIM_0_IDX]] : tensor<?x?xf32> 207// CHECK: %[[DIM_1_IDX:.*]] = arith.constant 1 : index 208// CHECK: %[[DIM_1:.*]] = tensor.dim %[[OUTPUT]], %[[DIM_1_IDX]] : tensor<?x?xf32> 209// CHECK: %[[MASK:.*]] = vector.create_mask %[[DIM_0]], %[[DIM_1]] : vector<1x[4]xi1> 210 211/// TODO: This transfer_read is redundant - remove 212// CHECK: vector.mask %[[MASK]] { vector.transfer_read %[[OUTPUT]]{{.*}} {in_bounds = [true, true]} : tensor<?x?xf32>, vector<1x[4]xf32> } : vector<1x[4]xi1> -> vector<1x[4]xf32> 213 214/// Caluclate the index vector 215// CHECK: %[[STEP:.*]] = vector.step : vector<[4]xindex> 216// CHECK: %[[IDX_BC:.*]] = vector.broadcast %[[IDX]] : index to vector<[4]xindex> 217// CHECK: %[[IDX_VEC:.*]] = arith.addi %[[STEP]], %[[IDX_BC]] : vector<[4]xindex> 218// CHECK: %[[SC:.*]] = vector.shape_cast %[[IDX_VEC]] : vector<[4]xindex> to vector<[4]xindex> 219 220/// Extract the starting point from the index vector 221// CHECK: %[[IDX_START:.*]] = vector.extract %[[SC]][0] : index from vector<[4]xindex> 222 223// Final read and write 224// CHECK: %[[READ:.*]] = vector.mask %[[MASK]] { vector.transfer_read %[[SRC]]{{\[}}%[[C79]], %[[IDX_START]]], {{.*}} {in_bounds = [true, true]} : tensor<?x?xf32>, vector<1x[4]xf32> } : vector<1x[4]xi1> -> vector<1x[4]xf32> 225// CHECK: %[[VAL_24:.*]] = vector.mask %[[MASK]] { vector.transfer_write %[[READ]], %[[OUTPUT]]{{.*}} {in_bounds = [true, true]} : vector<1x[4]xf32>, tensor<?x?xf32> } : vector<1x[4]xi1> -> tensor<?x?xf32> 226 227module attributes {transform.with_named_sequence} { 228 transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { 229 %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op 230 transform.structured.vectorize %0 vector_sizes [1, [4]] {vectorize_nd_extract} : !transform.any_op 231 transform.yield 232 } 233} 234 235// ----- 236 237func.func @masked_vectorize_nd_tensor_extract_with_affine_apply_gather(%6: tensor<80x16xf32>, %arg0: index, %extracted_slice : tensor<1x3xf32>) -> tensor<1x3xf32> { 238 %c16 = arith.constant 16 : index 239 %1 = linalg.generic { 240 indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>], 241 iterator_types = ["parallel", "parallel"] 242 } outs(%extracted_slice : tensor<1x3xf32>) { 243 ^bb0(%out: f32): 244 %2 = linalg.index 1 : index 245 %3 = affine.apply affine_map<(d0, d1) -> (d0 + d1)>(%2, %arg0) 246 %extracted = tensor.extract %6[%3, %c16] : tensor<80x16xf32> 247 linalg.yield %extracted : f32 248 } -> tensor<1x3xf32> 249 return %1 : tensor<1x3xf32> 250} 251 252// CHECK-LABEL: func.func @masked_vectorize_nd_tensor_extract_with_affine_apply_gather 253// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index 254// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 3 : index 255// CHECK: %[[VAL_8:.*]] = vector.create_mask %[[VAL_4]], %[[VAL_5]] : vector<1x4xi1> 256// CHECK: %[[VAL_9:.*]] = vector.mask %[[VAL_8]] { vector.transfer_read {{.*}} {in_bounds = [true, true]} : tensor<1x3xf32>, vector<1x4xf32> } : vector<1x4xi1> -> vector<1x4xf32> 257// CHECK: %[[VAL_11:.*]] = vector.broadcast {{.*}} : index to vector<4xindex> 258// CHECK: %[[VAL_12:.*]] = arith.addi {{.*}} : vector<4xindex> 259// CHECK: %[[VAL_16:.*]] = vector.broadcast {{.*}} : vector<4xindex> to vector<1x4xindex> 260// CHECK: %[[VAL_18:.*]] = tensor.dim {{.*}} : tensor<80x16xf32> 261// CHECK: %[[VAL_19:.*]] = vector.broadcast {{.*}} : index to vector<1x4xindex> 262// CHECK: %[[VAL_20:.*]] = arith.muli {{.*}} : vector<1x4xindex> 263// CHECK: %[[VAL_22:.*]] = arith.addi {{.*}} : vector<1x4xindex> 264// CHECK: %[[VAL_23:.*]] = vector.mask %[[VAL_8]] { vector.gather {{.*}} : tensor<80x16xf32>, vector<1x4xindex>, vector<1x4xi1>, vector<1x4xf32> into vector<1x4xf32> } : vector<1x4xi1> -> vector<1x4xf32> 265// CHECK: %[[VAL_25:.*]] = vector.mask %[[VAL_8]] { vector.transfer_write {{.*}} {in_bounds = [true, true]} : vector<1x4xf32>, tensor<1x3xf32> } : vector<1x4xi1> -> tensor<1x3xf32> 266 267module attributes {transform.with_named_sequence} { 268 transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { 269 %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op 270 transform.structured.vectorize %0 vector_sizes [1, 4] {vectorize_nd_extract} : !transform.any_op 271 transform.yield 272 } 273} 274 275 // ----- 276 277func.func @masked_dynamic_vectorize_nd_tensor_extract_with_affine_apply_gather(%6: tensor<?x?xf32>, %arg0: index, %extracted_slice : tensor<?x?xf32>) -> tensor<?x?xf32> { 278 %c16 = arith.constant 16 : index 279 %1 = linalg.generic { 280 indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>], 281 iterator_types = ["parallel", "parallel"] 282 } outs(%extracted_slice : tensor<?x?xf32>) { 283 ^bb0(%out: f32): 284 %2 = linalg.index 1 : index 285 %3 = affine.apply affine_map<(d0, d1) -> (d0 + d1)>(%2, %arg0) 286 %extracted = tensor.extract %6[%3, %c16] : tensor<?x?xf32> 287 linalg.yield %extracted : f32 288 } -> tensor<?x?xf32> 289 return %1 : tensor<?x?xf32> 290} 291 292// CHECK-LABEL: func.func @masked_dynamic_vectorize_nd_tensor_extract_with_affine_apply_gather( 293// CHECK-SAME: %[[VAL_0:.*]]: tensor<?x?xf32>, 294// CHECK-SAME: %[[VAL_1:.*]]: index, 295// CHECK-SAME: %[[VAL_2:.*]]: tensor<?x?xf32>) -> tensor<?x?xf32> { 296// CHECK: %[[VAL_3:.*]] = arith.constant 16 : index 297// CHECK: %[[VAL_4:.*]] = arith.constant 0 : index 298// CHECK: %[[VAL_5:.*]] = tensor.dim %[[VAL_2]], %[[VAL_4]] : tensor<?x?xf32> 299// CHECK: %[[VAL_6:.*]] = arith.constant 1 : index 300// CHECK: %[[VAL_7:.*]] = tensor.dim %[[VAL_2]], %[[VAL_6]] : tensor<?x?xf32> 301// CHECK: %[[VAL_8:.*]] = arith.constant 0 : index 302// CHECK: %[[VAL_9:.*]] = arith.constant 0.000000e+00 : f32 303// CHECK: %[[VAL_10:.*]] = vector.create_mask %[[VAL_5]], %[[VAL_7]] : vector<1x4xi1> 304// CHECK: %[[VAL_11:.*]] = vector.mask %[[VAL_10]] { vector.transfer_read %[[VAL_2]]{{\[}}%[[VAL_8]], %[[VAL_8]]], %[[VAL_9]] {in_bounds = [true, true]} : tensor<?x?xf32>, vector<1x4xf32> } : vector<1x4xi1> -> vector<1x4xf32> 305// CHECK: %[[VAL_12:.*]] = vector.step : vector<4xindex> 306// CHECK: %[[VAL_13:.*]] = vector.broadcast %[[VAL_1]] : index to vector<4xindex> 307// CHECK: %[[VAL_14:.*]] = arith.addi %[[VAL_12]], %[[VAL_13]] : vector<4xindex> 308// CHECK: %[[VAL_15:.*]] = arith.constant dense<true> : vector<1x4xi1> 309// CHECK: %[[VAL_16:.*]] = arith.constant dense<0.000000e+00> : vector<1x4xf32> 310// CHECK: %[[VAL_17:.*]] = arith.constant 0 : index 311// CHECK: %[[VAL_18:.*]] = vector.broadcast %[[VAL_14]] : vector<4xindex> to vector<1x4xindex> 312// CHECK: %[[VAL_19:.*]] = arith.constant 1 : index 313// CHECK: %[[VAL_20:.*]] = tensor.dim %[[VAL_0]], %[[VAL_19]] : tensor<?x?xf32> 314// CHECK: %[[VAL_21:.*]] = vector.broadcast %[[VAL_20]] : index to vector<1x4xindex> 315// CHECK: %[[VAL_22:.*]] = arith.muli %[[VAL_18]], %[[VAL_21]] : vector<1x4xindex> 316// CHECK: %[[VAL_23:.*]] = arith.constant dense<16> : vector<1x4xindex> 317// CHECK: %[[VAL_24:.*]] = arith.addi %[[VAL_23]], %[[VAL_22]] : vector<1x4xindex> 318// CHECK: %[[VAL_25:.*]] = vector.mask %[[VAL_10]] { vector.gather %[[VAL_0]]{{\[}}%[[VAL_17]], %[[VAL_17]]] {{\[}}%[[VAL_24]]], %[[VAL_15]], %[[VAL_16]] : tensor<?x?xf32>, vector<1x4xindex>, vector<1x4xi1>, vector<1x4xf32> into vector<1x4xf32> } : vector<1x4xi1> -> vector<1x4xf32> 319// CHECK: %[[VAL_26:.*]] = arith.constant 0 : index 320// CHECK: %[[VAL_27:.*]] = vector.mask %[[VAL_10]] { vector.transfer_write %[[VAL_25]], %[[VAL_2]]{{\[}}%[[VAL_26]], %[[VAL_26]]] {in_bounds = [true, true]} : vector<1x4xf32>, tensor<?x?xf32> } : vector<1x4xi1> -> tensor<?x?xf32> 321// CHECK: return %[[VAL_27]] : tensor<?x?xf32> 322// CHECK: } 323 324module attributes {transform.with_named_sequence} { 325 transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { 326 %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op 327 transform.structured.vectorize %0 vector_sizes [1, 4] {vectorize_nd_extract} : !transform.any_op 328 transform.yield 329 } 330} 331 332// ----- 333 334#map1 = affine_map<(d0, d1) -> (d0, d1)> 335func.func @extract_masked_vectorize(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> { 336 %c0 = arith.constant 1 : index 337 %c1 = arith.constant 2 : index 338 %2 = linalg.generic { 339 indexing_maps = [#map1], 340 iterator_types = ["parallel", "parallel"] 341 } outs(%arg1 : tensor<?x?xf32>) { 342 ^bb0(%arg3: f32): 343 %7 = tensor.extract %arg0[%c0, %c1] : tensor<?x?xf32> 344 linalg.yield %7 : f32 345 } -> tensor<?x?xf32> 346 return %2 : tensor<?x?xf32> 347} 348 349// CHECK-LABEL: func.func @extract_masked_vectorize( 350// CHECK-SAME: %[[VAL_0:.*]]: tensor<?x?xf32>, 351// CHECK-SAME: %[[VAL_1:.*]]: tensor<?x?xf32>) -> tensor<?x?xf32> { 352// CHECK: %[[VAL_2:.*]] = arith.constant 1 : index 353// CHECK: %[[VAL_3:.*]] = arith.constant 2 : index 354// CHECK: %[[VAL_4:.*]] = arith.constant 0 : index 355// CHECK: %[[VAL_5:.*]] = tensor.dim %[[VAL_1]], %[[VAL_4]] : tensor<?x?xf32> 356// CHECK: %[[VAL_6:.*]] = arith.constant 1 : index 357// CHECK: %[[VAL_7:.*]] = tensor.dim %[[VAL_1]], %[[VAL_6]] : tensor<?x?xf32> 358// CHECK: %[[VAL_8:.*]] = arith.constant 0 : index 359// CHECK: %[[VAL_9:.*]] = arith.constant 0.000000e+00 : f32 360// CHECK: %[[VAL_10:.*]] = vector.create_mask %[[VAL_5]], %[[VAL_7]] : vector<3x3xi1> 361// CHECK: %[[VAL_11:.*]] = vector.mask %[[VAL_10]] { vector.transfer_read %[[VAL_1]]{{\[}}%[[VAL_8]], %[[VAL_8]]], %[[VAL_9]] {in_bounds = [true, true]} : tensor<?x?xf32>, vector<3x3xf32> } : vector<3x3xi1> -> vector<3x3xf32> 362// CHECK: %[[VAL_12:.*]] = arith.constant dense<true> : vector<3x3xi1> 363// CHECK: %[[VAL_13:.*]] = arith.constant dense<0.000000e+00> : vector<3x3xf32> 364// CHECK: %[[VAL_14:.*]] = arith.constant 0 : index 365// CHECK: %[[VAL_15:.*]] = arith.constant dense<1> : vector<3x3xindex> 366// CHECK: %[[VAL_16:.*]] = arith.constant 1 : index 367// CHECK: %[[VAL_17:.*]] = tensor.dim %[[VAL_0]], %[[VAL_16]] : tensor<?x?xf32> 368// CHECK: %[[VAL_18:.*]] = vector.broadcast %[[VAL_17]] : index to vector<3x3xindex> 369// CHECK: %[[VAL_19:.*]] = arith.muli %[[VAL_15]], %[[VAL_18]] : vector<3x3xindex> 370// CHECK: %[[VAL_20:.*]] = arith.constant dense<2> : vector<3x3xindex> 371// CHECK: %[[VAL_21:.*]] = arith.addi %[[VAL_20]], %[[VAL_19]] : vector<3x3xindex> 372// CHECK: %[[VAL_22:.*]] = vector.mask %[[VAL_10]] { vector.gather %[[VAL_0]]{{\[}}%[[VAL_14]], %[[VAL_14]]] {{\[}}%[[VAL_21]]], %[[VAL_12]], %[[VAL_13]] : tensor<?x?xf32>, vector<3x3xindex>, vector<3x3xi1>, vector<3x3xf32> into vector<3x3xf32> } : vector<3x3xi1> -> vector<3x3xf32> 373// CHECK: %[[VAL_23:.*]] = arith.constant 0 : index 374// CHECK: %[[VAL_24:.*]] = vector.mask %[[VAL_10]] { vector.transfer_write %[[VAL_22]], %[[VAL_1]]{{\[}}%[[VAL_23]], %[[VAL_23]]] {in_bounds = [true, true]} : vector<3x3xf32>, tensor<?x?xf32> } : vector<3x3xi1> -> tensor<?x?xf32> 375 376module attributes {transform.with_named_sequence} { 377 transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { 378 %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op 379 transform.structured.vectorize %0 vector_sizes [3, 3] {vectorize_nd_extract} : !transform.any_op 380 transform.yield 381 } 382} 383 384// ----- 385 386#map1 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> 387func.func @tensor_extract_dynamic_shape(%arg1: tensor<123x321xf32>, %arg2: tensor<1x?x8xf32>) -> tensor<1x?x8xf32> { 388 %c0 = arith.constant 1 : index 389 %c1 = arith.constant 2 : index 390 %2 = linalg.generic { 391 indexing_maps = [#map1], 392 iterator_types = ["parallel", "parallel", "parallel"] 393 } outs(%arg2 : tensor<1x?x8xf32>) 394 { 395 ^bb0(%arg3: f32): 396 %idx_0 = linalg.index 0 : index 397 %idx_1 = linalg.index 1 : index 398 %idx = arith.addi %idx_0, %idx_1 : index 399 %7 = tensor.extract %arg1[%c0, %idx] : tensor<123x321xf32> 400 linalg.yield %7 : f32 401 } -> tensor<1x?x8xf32> 402 return %2 : tensor<1x?x8xf32> 403} 404 405// TODO: Make sure that this is vectorized as "scalar broadcast" when only 406// vectorising the 2nd dimension. 407// CHECK-LABEL: func.func @tensor_extract_dynamic_shape( 408// CHECK-SAME: %[[ARG_1:.*]]: tensor<123x321xf32>, 409// CHECK-SAME: %[[ARG_2:.*]]: tensor<1x?x8xf32>) -> tensor<1x?x8xf32> { 410// CHECK: %[[C2:.*]] = arith.constant 2 : index 411// CHECK: %[[C1_1:.*]] = arith.constant 1 : index 412// CHECK: %[[C1_2:.*]] = arith.constant 1 : index 413// CHECK: %[[DIM:.*]] = tensor.dim %[[ARG_2]], %[[C1_2]] : tensor<1x?x8xf32> 414// CHECK: %[[C8:.*]] = arith.constant 8 : index 415// CHECK: %[[MASK:.*]] = vector.create_mask %[[C1_1]], %[[DIM]], %[[C8]] : vector<1x3x8xi1> 416// CHECK: %[[MASK_2:.*]] = arith.constant dense<true> : vector<1x3x8xi1> 417// CHECK: %[[FALLTHROUGH:.*]] = arith.constant dense<0.000000e+00> : vector<1x3x8xf32> 418// CHECK: %[[C0_1:.*]] = arith.constant 0 : index 419// CHECK: vector.mask %[[MASK]] { vector.gather %[[ARG_1]][%[[C0_1]], %[[C0_1]]] [%{{.*}}], %[[MASK_2]], %[[FALLTHROUGH]] : tensor<123x321xf32>, vector<1x3x8xindex>, vector<1x3x8xi1>, vector<1x3x8xf32> into vector<1x3x8xf32> } : vector<1x3x8xi1> -> vector<1x3x8xf32> 420 421module attributes {transform.with_named_sequence} { 422 transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) { 423 %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op 424 transform.structured.vectorize %0 vector_sizes [1, 3, 8] {vectorize_nd_extract} : !transform.any_op 425 transform.yield 426 } 427} 428 429// ----- 430 431#map = affine_map<(d0, d1, d2) -> (d0, d1, d2)> 432func.func @scalar_broadcast(%init : tensor<1x1x3xi32>, %src: tensor<1x3x2x4xi32>, %idx :index) -> tensor<1x1x3xi32> { 433 434 %c0 = arith.constant 0 :index 435 436 %res = linalg.generic { 437 indexing_maps = [#map], 438 iterator_types = ["parallel", "parallel", "parallel"]} 439 outs(%init : tensor<1x1x3xi32>) { 440 ^bb0(%out: i32): 441 %val = tensor.extract %src[%idx, %idx, %idx, %idx] : tensor<1x3x2x4xi32> 442 linalg.yield %val : i32 443 } -> tensor<1x1x3xi32> 444 445 return %res : tensor<1x1x3xi32> 446} 447 448// CHECK: #[[$MAP:.+]] = affine_map<(d0, d1, d2, d3) -> (0, 0, 0)> 449// CHECK-LABEL: func.func @scalar_broadcast( 450// CHECK-SAME: %[[INIT:.*]]: tensor<1x1x3xi32>, 451// CHECK-SAME: %[[SRC:.*]]: tensor<1x3x2x4xi32>, 452// CHECK-SAME: %[[IDX:.*]]: index) -> tensor<1x1x3xi32> { 453 454/// Compute the mask for saving the final result 455// CHECK: %[[C1:.*]] = arith.constant 1 : index 456// CHECK: %[[C1_2:.*]] = arith.constant 1 : index 457// CHECK: %[[C3:.*]] = arith.constant 3 : index 458// CHECK: %[[MASK_RES:.*]] = vector.create_mask %[[C1]], %[[C1_2]], %[[C3]] : vector<1x1x4xi1> 459 460/// Read and broadcast the scalar 461// CHECK: %[[PAD:.*]] = arith.constant 0 : i32 462// CHECK: %[[MASK_READ:.*]] = vector.constant_mask [1] : vector<1xi1> 463// CHECK: %[[READ:.*]] = vector.mask %[[MASK_READ]] { 464// CHECK-SAME: vector.transfer_read %[[SRC]]{{\[}}%[[IDX]], %[[IDX]], %[[IDX]], %[[IDX]]], %[[PAD]] 465// CHECK-SAME: {in_bounds = [true, true, true], permutation_map = #[[$MAP]]} : tensor<1x3x2x4xi32>, vector<1x1x4xi32> 466// CHECK-SAME: } : vector<1xi1> -> vector<1x1x4xi32> 467 468/// Save the result in the output tensor 469// CHECK: vector.mask %[[MASK_RES]] { 470// CHECK-SAME: vector.transfer_write %[[READ]], %[[INIT]]{{.*}} {in_bounds = [true, true, true]} : vector<1x1x4xi32>, tensor<1x1x3xi32> 471// CHECK-SAME: } : vector<1x1x4xi1> -> tensor<1x1x3xi32> 472 473module attributes {transform.with_named_sequence} { 474 transform.named_sequence @__transform_main(%module: !transform.any_op {transform.readonly}) { 475 %0 = transform.structured.match ops{["linalg.generic"]} in %module : (!transform.any_op) -> !transform.any_op 476 transform.structured.vectorize %0 vector_sizes [1, 1, 4] {vectorize_nd_extract} : !transform.any_op 477 transform.yield 478 } 479} 480