1// RUN: mlir-opt %s \ 2// RUN: | mlir-opt -gpu-lower-to-nvvm-pipeline="cubin-format=%gpu_compilation_format" \ 3// RUN: | mlir-runner \ 4// RUN: --shared-libs=%mlir_cuda_runtime \ 5// RUN: --shared-libs=%mlir_runner_utils \ 6// RUN: --entry-point-result=void \ 7// RUN: | FileCheck %s 8 9// CHECK: [4, 5, 6, 7, 0, 1, 2, 3, 12, -1, -1, -1, 8] 10func.func @main() { 11 %arg = memref.alloc() : memref<13xf32> 12 %dst = memref.cast %arg : memref<13xf32> to memref<?xf32> 13 %one = arith.constant 1 : index 14 %c0 = arith.constant 0 : index 15 %sx = memref.dim %dst, %c0 : memref<?xf32> 16 %cast_dst = memref.cast %dst : memref<?xf32> to memref<*xf32> 17 gpu.host_register %cast_dst : memref<*xf32> 18 gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %one, %grid_y = %one, %grid_z = %one) 19 threads(%tx, %ty, %tz) in (%block_x = %sx, %block_y = %one, %block_z = %one) { 20 %t0 = arith.index_cast %tx : index to i32 21 %val = arith.sitofp %t0 : i32 to f32 22 %width = arith.index_cast %block_x : index to i32 23 %offset = arith.constant 4 : i32 24 %shfl, %valid = gpu.shuffle xor %val, %offset, %width : f32 25 cf.cond_br %valid, ^bb1(%shfl : f32), ^bb0 26 ^bb0: 27 %m1 = arith.constant -1.0 : f32 28 cf.br ^bb1(%m1 : f32) 29 ^bb1(%value : f32): 30 memref.store %value, %dst[%tx] : memref<?xf32> 31 gpu.terminator 32 } 33 call @printMemrefF32(%cast_dst) : (memref<*xf32>) -> () 34 return 35} 36 37func.func private @printMemrefF32(%ptr : memref<*xf32>) 38