1// RUN: mlir-opt %s -pass-pipeline="builtin.module(func.func(convert-vector-to-scf))" -split-input-file | FileCheck %s 2 3// Ensure that the permutation map is lowered (by inserting a transpose op) 4// before lowering the vector.transfer_read. 5 6// CHECK-LABEL: func @transfer_read_2d_mask_transposed( 7// CHECK-DAG: %[[PADDING:.*]] = arith.constant dense<-4.200000e+01> : vector<9xf32> 8// CHECK-DAG: %[[MASK:.*]] = arith.constant dense<{{.*}}> : vector<4x9xi1> 9// CHECK: %[[MASK_MEM:.*]] = memref.alloca() : memref<vector<4x9xi1>> 10// CHECK: memref.store %[[MASK]], %[[MASK_MEM]][] : memref<vector<4x9xi1>> 11// CHECK: %[[MASK_CASTED:.*]] = vector.type_cast %[[MASK_MEM]] : memref<vector<4x9xi1>> to memref<4xvector<9xi1>> 12// CHECK: scf.for {{.*}} { 13// CHECK: scf.if {{.*}} { 14// CHECK: %[[MASK_LOADED:.*]] = memref.load %[[MASK_CASTED]][%{{.*}}] : memref<4xvector<9xi1>> 15// CHECK: %[[READ:.*]] = vector.transfer_read %{{.*}}, %{{.*}}, %[[MASK_LOADED]] : memref<?x?xf32>, vector<9xf32> 16// CHECK: memref.store %[[READ]], %{{.*}} : memref<4xvector<9xf32>> 17// CHECK: } 18// CHECK: } 19// CHECK: %[[RESULT:.*]] = memref.load %{{.*}} : memref<vector<4x9xf32>> 20// CHECK: %[[RESULT_T:.*]] = vector.transpose %[[RESULT]], [1, 0] : vector<4x9xf32> to vector<9x4xf32> 21// CHECK: return %[[RESULT_T]] : vector<9x4xf32> 22 23// Vector load with mask + transpose. 24func.func @transfer_read_2d_mask_transposed( 25 %A : memref<?x?xf32>, %base1: index, %base2: index) -> (vector<9x4xf32>) { 26 %fm42 = arith.constant -42.0: f32 27 %mask = arith.constant dense<[[1, 0, 1, 0, 1, 1, 1, 0, 1], 28 [0, 0, 1, 1, 1, 1, 1, 0, 1], 29 [1, 1, 1, 1, 1, 1, 1, 0, 1], 30 [0, 0, 1, 0, 1, 1, 1, 0, 1]]> : vector<4x9xi1> 31 %f = vector.transfer_read %A[%base1, %base2], %fm42, %mask 32 {permutation_map = affine_map<(d0, d1) -> (d1, d0)>} : 33 memref<?x?xf32>, vector<9x4xf32> 34 return %f : vector<9x4xf32> 35} 36