xref: /llvm-project/mlir/test/Integration/GPU/ROCM/vector-transferops.mlir (revision eb206e9ea84eff0a0596fed2de8316d924f946d1)
1// RUN: mlir-opt %s \
2// RUN: | mlir-opt -convert-scf-to-cf \
3// RUN: | mlir-opt -gpu-kernel-outlining \
4// RUN: | mlir-opt -pass-pipeline='builtin.module(gpu.module(strip-debuginfo,convert-gpu-to-rocdl{chipset=%chip index-bitwidth=32}),rocdl-attach-target{chip=%chip})' \
5// RUN: | mlir-opt -gpu-to-llvm -reconcile-unrealized-casts -gpu-module-to-binary \
6// RUN: | mlir-runner \
7// RUN:   --shared-libs=%mlir_rocm_runtime \
8// RUN:   --shared-libs=%mlir_runner_utils \
9// RUN:   --entry-point-result=void \
10// RUN: | FileCheck %s
11
12// TODO: swap for vector transfer reads if we ever create a --vector-to-amdgpu
13func.func @vectransferx2(%arg0 : memref<?xf32>, %arg1 : memref<?xf32>) {
14  %cst = arith.constant 1 : index
15  gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %cst, %grid_y = %cst, %grid_z = %cst)
16             threads(%tx, %ty, %tz) in (%block_x = %cst, %block_y = %cst, %block_z = %cst) {
17    %f0 = arith.constant 0.0: f32
18    %base = arith.constant 0 : i32
19    %f = amdgpu.raw_buffer_load {boundsCheck = true } %arg0[%base]
20      : memref<?xf32>, i32 -> vector<2xf32>
21
22    %c = arith.addf %f, %f : vector<2xf32>
23
24    %base1 = arith.constant 1 : i32
25    amdgpu.raw_buffer_store { boundsCheck = false } %c -> %arg1[%base1]
26      : vector<2xf32> -> memref<?xf32>, i32
27
28    gpu.terminator
29  }
30  return
31}
32
33func.func @vectransferx4(%arg0 : memref<?xf32>, %arg1 : memref<?xf32>) {
34  %cst = arith.constant 1 : index
35  gpu.launch blocks(%bx, %by, %bz) in (%grid_x = %cst, %grid_y = %cst, %grid_z = %cst)
36             threads(%tx, %ty, %tz) in (%block_x = %cst, %block_y = %cst, %block_z = %cst) {
37    %f0 = arith.constant 0.0: f32
38    %base = arith.constant 0 : i32
39    %f = amdgpu.raw_buffer_load { boundsCheck = false } %arg0[%base]
40      : memref<?xf32>, i32 -> vector<4xf32>
41
42    %c = arith.addf %f, %f : vector<4xf32>
43
44    amdgpu.raw_buffer_store { boundsCheck = false } %c -> %arg1[%base]
45      : vector<4xf32> -> memref<?xf32>, i32
46
47    gpu.terminator
48  }
49  return
50}
51
52func.func @main() {
53  %c0 = arith.constant 0 : index
54  %c1 = arith.constant 1 : index
55  %c4 = arith.constant 4 : index
56  %cf1 = arith.constant 1.0 : f32
57  %cf1dot23 = arith.constant 1.23 : f32
58
59  %arg0 = memref.alloc() : memref<4xf32>
60  %arg1 = memref.alloc() : memref<4xf32>
61
62  %22 = memref.cast %arg0 : memref<4xf32> to memref<?xf32>
63  %23 = memref.cast %arg1 : memref<4xf32> to memref<?xf32>
64
65  scf.for %i = %c0 to %c4 step %c1 {
66    memref.store %cf1dot23, %22[%i] : memref<?xf32>
67    memref.store %cf1dot23, %23[%i] : memref<?xf32>
68  }
69
70  %cast0 = memref.cast %22 : memref<?xf32> to memref<*xf32>
71  %cast1 = memref.cast %23 : memref<?xf32> to memref<*xf32>
72
73  gpu.host_register %cast0 : memref<*xf32>
74  gpu.host_register %cast1 : memref<*xf32>
75
76  %24 = call @mgpuMemGetDeviceMemRef1dFloat(%22) : (memref<?xf32>) -> (memref<?xf32>)
77  %26 = call @mgpuMemGetDeviceMemRef1dFloat(%23) : (memref<?xf32>) -> (memref<?xf32>)
78
79  // CHECK: [1.23, 2.46, 2.46, 1.23]
80  call @vectransferx2(%24, %26) : (memref<?xf32>,  memref<?xf32>) -> ()
81  call @printMemrefF32(%cast1) : (memref<*xf32>) -> ()
82
83  // CHECK: [2.46, 2.46, 2.46, 2.46]
84  call @vectransferx4(%24, %26) : (memref<?xf32>,  memref<?xf32>) -> ()
85  call @printMemrefF32(%cast1) : (memref<*xf32>) -> ()
86  return
87}
88
89func.func private @mgpuMemGetDeviceMemRef1dFloat(%ptr : memref<?xf32>) -> (memref<?xf32>)
90func.func private @printMemrefF32(%ptr : memref<*xf32>)
91