xref: /llvm-project/mlir/test/Integration/GPU/CUDA/async.mlir (revision eb206e9ea84eff0a0596fed2de8316d924f946d1)
1// RUN: mlir-opt %s \
2// RUN: | mlir-opt -gpu-kernel-outlining \
3// RUN: | mlir-opt -pass-pipeline='builtin.module(gpu.module(strip-debuginfo,convert-gpu-to-nvvm),nvvm-attach-target)' \
4// RUN: | mlir-opt -gpu-async-region -gpu-to-llvm -reconcile-unrealized-casts -gpu-module-to-binary="format=%gpu_compilation_format" \
5// RUN: | mlir-opt -async-to-async-runtime -async-runtime-ref-counting \
6// RUN: | mlir-opt -convert-async-to-llvm -convert-func-to-llvm -convert-arith-to-llvm -convert-cf-to-llvm -reconcile-unrealized-casts \
7// RUN: | mlir-runner \
8// RUN:   --shared-libs=%mlir_cuda_runtime \
9// RUN:   --shared-libs=%mlir_async_runtime \
10// RUN:   --shared-libs=%mlir_runner_utils \
11// RUN:   --entry-point-result=void -O0 \
12// RUN: | FileCheck %s
13
14func.func @main() {
15  %c0    = arith.constant 0 : index
16  %c1    = arith.constant 1 : index
17  %count = arith.constant 2 : index
18
19  // initialize h0 on host
20  %h0 = memref.alloc(%count) : memref<?xi32>
21  %h0_unranked = memref.cast %h0 : memref<?xi32> to memref<*xi32>
22  gpu.host_register %h0_unranked : memref<*xi32>
23
24  %v0 = arith.constant 42 : i32
25  memref.store %v0, %h0[%c0] : memref<?xi32>
26  memref.store %v0, %h0[%c1] : memref<?xi32>
27
28  // copy h0 to b0 on device.
29  %t0, %f0 = async.execute () -> !async.value<memref<?xi32>> {
30    %b0 = gpu.alloc(%count) : memref<?xi32>
31    gpu.memcpy %b0, %h0 : memref<?xi32>, memref<?xi32>
32    async.yield %b0 : memref<?xi32>
33  }
34
35  // copy h0 to b1 and b2 (fork)
36  %t1, %f1 = async.execute [%t0] (
37    %f0 as %b0 : !async.value<memref<?xi32>>
38  ) -> !async.value<memref<?xi32>> {
39    %b1 = gpu.alloc(%count) : memref<?xi32>
40    gpu.memcpy %b1, %b0 : memref<?xi32>, memref<?xi32>
41    async.yield %b1 : memref<?xi32>
42  }
43  %t2, %f2 = async.execute [%t0] (
44    %f0 as %b0 : !async.value<memref<?xi32>>
45  ) -> !async.value<memref<?xi32>> {
46    %b2 = gpu.alloc(%count) : memref<?xi32>
47    gpu.memcpy %b2, %b0 : memref<?xi32>, memref<?xi32>
48    async.yield %b2 : memref<?xi32>
49  }
50
51  // h0 = b1 + b2 (join).
52  %t3 = async.execute [%t1, %t2] (
53    %f1 as %b1 : !async.value<memref<?xi32>>,
54    %f2 as %b2 : !async.value<memref<?xi32>>
55  ) {
56    gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %c1, %grid_y = %c1, %grid_z = %c1)
57               threads(%tx, %ty, %tz) in (%block_x = %count, %block_y = %c1, %block_z = %c1) {
58      %v1 = memref.load %b1[%tx] : memref<?xi32>
59      %v2 = memref.load %b2[%tx] : memref<?xi32>
60      %sum = arith.addi %v1, %v2 : i32
61      memref.store %sum, %h0[%tx] : memref<?xi32>
62      gpu.terminator
63    }
64    async.yield
65  }
66
67  async.await %t3 : !async.token
68  // CHECK: [84, 84]
69  call @printMemrefI32(%h0_unranked) : (memref<*xi32>) -> ()
70  return
71}
72
73func.func private @printMemrefI32(memref<*xi32>)
74