1// RUN: mlir-opt %s \ 2// RUN: | mlir-opt -gpu-lower-to-nvvm-pipeline="cubin-format=%gpu_compilation_format" \ 3// RUN: | mlir-runner \ 4// RUN: --shared-libs=%mlir_cuda_runtime \ 5// RUN: --shared-libs=%mlir_runner_utils \ 6// RUN: --shared-libs=%mlir_c_runner_utils \ 7// RUN: --entry-point-result=void \ 8// RUN: | FileCheck %s 9 10func.func @other_func(%arg0 : f32, %arg1 : memref<?xf32>) { 11 %cst = arith.constant 1 : index 12 %c0 = arith.constant 0 : index 13 %cst2 = memref.dim %arg1, %c0 : memref<?xf32> 14 gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %cst, %grid_y = %cst, %grid_z = %cst) 15 threads(%tx, %ty, %tz) in (%block_x = %cst2, %block_y = %cst, %block_z = %cst) { 16 memref.store %arg0, %arg1[%tx] : memref<?xf32> 17 gpu.terminator 18 } 19 return 20} 21 22// CHECK: [1, 1, 1, 1, 1] 23// CHECK: ( 1, 1 ) 24func.func @main() { 25 %v0 = arith.constant 0.0 : f32 26 %c0 = arith.constant 0: index 27 %arg0 = memref.alloc() : memref<5xf32> 28 %21 = arith.constant 5 : i32 29 %22 = memref.cast %arg0 : memref<5xf32> to memref<?xf32> 30 %23 = memref.cast %22 : memref<?xf32> to memref<*xf32> 31 gpu.host_register %23 : memref<*xf32> 32 call @printMemrefF32(%23) : (memref<*xf32>) -> () 33 %24 = arith.constant 1.0 : f32 34 call @other_func(%24, %22) : (f32, memref<?xf32>) -> () 35 call @printMemrefF32(%23) : (memref<*xf32>) -> () 36 %val1 = vector.transfer_read %arg0[%c0], %v0: memref<5xf32>, vector<2xf32> 37 vector.print %val1: vector<2xf32> 38 return 39} 40 41func.func private @printMemrefF32(%ptr : memref<*xf32>) 42