1// RUN: mlir-opt %s -pass-pipeline="builtin.module(func.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,finalize-memref-to-llvm,convert-func-to-llvm,convert-arith-to-llvm,convert-cf-to-llvm,reconcile-unrealized-casts)" | \ 2// RUN: mlir-runner -e entry -entry-point-result=void \ 3// RUN: -shared-libs=%mlir_c_runner_utils | \ 4// RUN: FileCheck %s 5 6// RUN: mlir-opt %s -pass-pipeline="builtin.module(func.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,finalize-memref-to-llvm,convert-func-to-llvm,convert-arith-to-llvm,convert-cf-to-llvm,reconcile-unrealized-casts)" | \ 7// RUN: mlir-runner -e entry -entry-point-result=void \ 8// RUN: -shared-libs=%mlir_c_runner_utils | \ 9// RUN: FileCheck %s 10 11func.func @transfer_read_1d(%A : memref<?xf32>, %base: index) { 12 %fm42 = arith.constant -42.0: f32 13 %f = vector.transfer_read %A[%base], %fm42 14 {permutation_map = affine_map<(d0) -> (d0)>} : 15 memref<?xf32>, vector<13xf32> 16 vector.print %f: vector<13xf32> 17 return 18} 19 20func.func @transfer_read_mask_1d(%A : memref<?xf32>, %base: index) { 21 %fm42 = arith.constant -42.0: f32 22 %m = arith.constant dense<[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]> : vector<13xi1> 23 %f = vector.transfer_read %A[%base], %fm42, %m : memref<?xf32>, vector<13xf32> 24 vector.print %f: vector<13xf32> 25 return 26} 27 28func.func @transfer_read_inbounds_4(%A : memref<?xf32>, %base: index) { 29 %fm42 = arith.constant -42.0: f32 30 %f = vector.transfer_read %A[%base], %fm42 31 {permutation_map = affine_map<(d0) -> (d0)>, in_bounds = [true]} : 32 memref<?xf32>, vector<4xf32> 33 vector.print %f: vector<4xf32> 34 return 35} 36 37func.func @transfer_read_mask_inbounds_4(%A : memref<?xf32>, %base: index) { 38 %fm42 = arith.constant -42.0: f32 39 %m = arith.constant dense<[0, 1, 0, 1]> : vector<4xi1> 40 %f = vector.transfer_read %A[%base], %fm42, %m {in_bounds = [true]} 41 : memref<?xf32>, vector<4xf32> 42 vector.print %f: vector<4xf32> 43 return 44} 45 46func.func @transfer_write_1d(%A : memref<?xf32>, %base: index) { 47 %f0 = arith.constant 0.0 : f32 48 %vf0 = vector.splat %f0 : vector<4xf32> 49 vector.transfer_write %vf0, %A[%base] 50 {permutation_map = affine_map<(d0) -> (d0)>} : 51 vector<4xf32>, memref<?xf32> 52 return 53} 54 55func.func @entry() { 56 %c0 = arith.constant 0: index 57 %c1 = arith.constant 1: index 58 %c2 = arith.constant 2: index 59 %c3 = arith.constant 3: index 60 %c4 = arith.constant 4: index 61 %c5 = arith.constant 5: index 62 %A = memref.alloc(%c5) : memref<?xf32> 63 scf.for %i = %c0 to %c5 step %c1 { 64 %i32 = arith.index_cast %i : index to i32 65 %fi = arith.sitofp %i32 : i32 to f32 66 memref.store %fi, %A[%i] : memref<?xf32> 67 } 68 // On input, memory contains [[ 0, 1, 2, 3, 4, xxx garbage xxx ]] 69 // Read shifted by 2 and pad with -42: 70 // ( 2, 3, 4, -42, ..., -42) 71 call @transfer_read_1d(%A, %c2) : (memref<?xf32>, index) -> () 72 // Read with mask and out-of-bounds access. 73 call @transfer_read_mask_1d(%A, %c2) : (memref<?xf32>, index) -> () 74 // Write into memory shifted by 3 75 // memory contains [[ 0, 1, 2, 0, 0, xxx garbage xxx ]] 76 call @transfer_write_1d(%A, %c3) : (memref<?xf32>, index) -> () 77 // Read shifted by 0 and pad with -42: 78 // ( 0, 1, 2, 0, 0, -42, ..., -42) 79 call @transfer_read_1d(%A, %c0) : (memref<?xf32>, index) -> () 80 // Read in-bounds 4 @ 1, guaranteed to not overflow. 81 // Exercises proper alignment. 82 call @transfer_read_inbounds_4(%A, %c1) : (memref<?xf32>, index) -> () 83 // Read in-bounds with mask. 84 call @transfer_read_mask_inbounds_4(%A, %c1) : (memref<?xf32>, index) -> () 85 86 memref.dealloc %A : memref<?xf32> 87 88 return 89} 90 91// CHECK: ( 2, 3, 4, -42, -42, -42, -42, -42, -42, -42, -42, -42, -42 ) 92// CHECK: ( -42, -42, 4, -42, -42, -42, -42, -42, -42, -42, -42, -42, -42 ) 93// CHECK: ( 0, 1, 2, 0, 0, -42, -42, -42, -42, -42, -42, -42, -42 ) 94// CHECK: ( 1, 2, 0, 0 ) 95// CHECK: ( -42, 2, -42, 0 ) 96