xref: /llvm-project/mlir/test/Integration/GPU/ROCM/vecadd.mlir (revision eb206e9ea84eff0a0596fed2de8316d924f946d1)
1// RUN: mlir-opt %s \
2// RUN: | mlir-opt -convert-scf-to-cf \
3// RUN: | mlir-opt -gpu-kernel-outlining \
4// RUN: | mlir-opt -pass-pipeline='builtin.module(gpu.module(strip-debuginfo,convert-gpu-to-rocdl{use-bare-ptr-memref-call-conv=true}),rocdl-attach-target{chip=%chip})' \
5// RUN: | mlir-opt -gpu-to-llvm=use-bare-pointers-for-kernels=true -reconcile-unrealized-casts -gpu-module-to-binary \
6// RUN: | mlir-runner \
7// RUN:   --shared-libs=%mlir_rocm_runtime \
8// RUN:   --shared-libs=%mlir_runner_utils \
9// RUN:   --entry-point-result=void \
10// RUN: | FileCheck %s
11
12func.func @vecadd(%arg0 : memref<5xf32>, %arg1 : memref<5xf32>, %arg2 : memref<5xf32>) {
13  %c0 = arith.constant 0 : index
14  %c1 = arith.constant 1 : index
15  %block_dim = arith.constant 5 : index
16  gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %c1, %grid_y = %c1, %grid_z = %c1)
17             threads(%tx, %ty, %tz) in (%block_x = %block_dim, %block_y = %c1, %block_z = %c1) {
18    %a = memref.load %arg0[%tx] : memref<5xf32>
19    %b = memref.load %arg1[%tx] : memref<5xf32>
20    %c = arith.addf %a, %b : f32
21    memref.store %c, %arg2[%tx] : memref<5xf32>
22    gpu.terminator
23  }
24  return
25}
26
27// CHECK: [2.46, 2.46, 2.46, 2.46, 2.46]
28func.func @main() {
29  %c0 = arith.constant 0 : index
30  %c1 = arith.constant 1 : index
31  %c5 = arith.constant 5 : index
32  %cf1dot23 = arith.constant 1.23 : f32
33  %0 = memref.alloc() : memref<5xf32>
34  %1 = memref.alloc() : memref<5xf32>
35  %2 = memref.alloc() : memref<5xf32>
36  %3 = memref.cast %0 : memref<5xf32> to memref<?xf32>
37  %4 = memref.cast %1 : memref<5xf32> to memref<?xf32>
38  %5 = memref.cast %2 : memref<5xf32> to memref<?xf32>
39  scf.for %i = %c0 to %c5 step %c1 {
40    memref.store %cf1dot23, %3[%i] : memref<?xf32>
41    memref.store %cf1dot23, %4[%i] : memref<?xf32>
42  }
43  %6 = memref.cast %3 : memref<?xf32> to memref<*xf32>
44  %7 = memref.cast %4 : memref<?xf32> to memref<*xf32>
45  %8 = memref.cast %5 : memref<?xf32> to memref<*xf32>
46  gpu.host_register %6 : memref<*xf32>
47  gpu.host_register %7 : memref<*xf32>
48  gpu.host_register %8 : memref<*xf32>
49  %9 = call @mgpuMemGetDeviceMemRef1dFloat(%3) : (memref<?xf32>) -> (memref<?xf32>)
50  %10 = call @mgpuMemGetDeviceMemRef1dFloat(%4) : (memref<?xf32>) -> (memref<?xf32>)
51  %11 = call @mgpuMemGetDeviceMemRef1dFloat(%5) : (memref<?xf32>) -> (memref<?xf32>)
52  %12 = memref.cast %9 : memref<?xf32> to memref<5xf32>
53  %13 = memref.cast %10 : memref<?xf32> to memref<5xf32>
54  %14 = memref.cast %11 : memref<?xf32> to memref<5xf32>
55
56  call @vecadd(%12, %13, %14) : (memref<5xf32>, memref<5xf32>, memref<5xf32>) -> ()
57  call @printMemrefF32(%8) : (memref<*xf32>) -> ()
58  return
59}
60
61func.func private @mgpuMemGetDeviceMemRef1dFloat(%ptr : memref<?xf32>) -> (memref<?xf32>)
62func.func private @printMemrefF32(%ptr : memref<*xf32>)
63