xref: /llvm-project/llvm/test/CodeGen/AMDGPU/atomic_load_add.ll (revision 9e9907f1cfa424366fba58d9520f9305b537cec9)
1; RUN: llc -mtriple=amdgcn -amdgpu-atomic-optimizer-strategy=None -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SICIVI,FUNC %s
2; RUN: llc -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global -amdgpu-atomic-optimizer-strategy=None -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SICIVI,FUNC %s
3; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -amdgpu-atomic-optimizer-strategy=None -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9,FUNC %s
4; RUN: llc -mtriple=r600 -mcpu=redwood -amdgpu-atomic-optimizer-strategy=None < %s | FileCheck -check-prefixes=R600,FUNC %s
5
6; FUNC-LABEL: {{^}}atomic_add_local:
7; SICIVI: s_mov_b32 m0
8; GFX9-NOT: m0
9; R600: LDS_ADD *
10; GCN: ds_add_u32
11define amdgpu_kernel void @atomic_add_local(ptr addrspace(3) %local) {
12   %unused = atomicrmw volatile add ptr addrspace(3) %local, i32 5 seq_cst
13   ret void
14}
15
16; FUNC-LABEL: {{^}}atomic_add_local_const_offset:
17; SICIVI: s_mov_b32 m0
18; GFX9-NOT: m0
19
20; R600: LDS_ADD *
21; GCN: ds_add_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
22define amdgpu_kernel void @atomic_add_local_const_offset(ptr addrspace(3) %local) {
23  %gep = getelementptr i32, ptr addrspace(3) %local, i32 4
24  %val = atomicrmw volatile add ptr addrspace(3) %gep, i32 5 seq_cst
25  ret void
26}
27
28; FUNC-LABEL: {{^}}atomic_add_ret_local:
29; SICIVI: s_mov_b32 m0
30; GFX9-NOT: m0
31
32; R600: LDS_ADD_RET *
33; GCN: ds_add_rtn_u32
34define amdgpu_kernel void @atomic_add_ret_local(ptr addrspace(1) %out, ptr addrspace(3) %local) {
35  %val = atomicrmw volatile add ptr addrspace(3) %local, i32 5 seq_cst
36  store i32 %val, ptr addrspace(1) %out
37  ret void
38}
39
40; FUNC-LABEL: {{^}}atomic_add_ret_local_const_offset:
41; SICIVI: s_mov_b32 m0
42; GFX9-NOT: m0
43
44; R600: LDS_ADD_RET *
45; GCN: ds_add_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:20
46define amdgpu_kernel void @atomic_add_ret_local_const_offset(ptr addrspace(1) %out, ptr addrspace(3) %local) {
47  %gep = getelementptr i32, ptr addrspace(3) %local, i32 5
48  %val = atomicrmw volatile add ptr addrspace(3) %gep, i32 5 seq_cst
49  store i32 %val, ptr addrspace(1) %out
50  ret void
51}
52