xref: /llvm-project/llvm/test/CodeGen/AMDGPU/atomic_load_sub.ll (revision 9e9907f1cfa424366fba58d9520f9305b537cec9)
1; RUN: llc -mtriple=amdgcn -amdgpu-atomic-optimizer-strategy=None -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SICIVI,FUNC %s
2; RUN: llc -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global -amdgpu-atomic-optimizer-strategy=None -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SICIVI,FUNC %s
3; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -amdgpu-atomic-optimizer-strategy=None -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9,FUNC %s
4; RUN: llc -mtriple=r600 -mcpu=redwood -amdgpu-atomic-optimizer-strategy=None < %s | FileCheck -enable-var-scope -check-prefixes=R600,FUNC %s
5
6; FUNC-LABEL: {{^}}atomic_sub_local:
7; SICIVI: s_mov_b32 m0
8; GFX9-NOT: m0
9
10; R600: LDS_SUB *
11; GCN: ds_sub_u32
12define amdgpu_kernel void @atomic_sub_local(ptr addrspace(3) %local) {
13   %unused = atomicrmw volatile sub ptr addrspace(3) %local, i32 5 seq_cst
14   ret void
15}
16
17; FUNC-LABEL: {{^}}atomic_sub_local_const_offset:
18; SICIVI: s_mov_b32 m0
19; GFX9-NOT: m0
20
21; R600: LDS_SUB *
22; GCN: ds_sub_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
23define amdgpu_kernel void @atomic_sub_local_const_offset(ptr addrspace(3) %local) {
24  %gep = getelementptr i32, ptr addrspace(3) %local, i32 4
25  %val = atomicrmw volatile sub ptr addrspace(3) %gep, i32 5 seq_cst
26  ret void
27}
28
29; FUNC-LABEL: {{^}}atomic_sub_ret_local:
30; SICIVI: s_mov_b32 m0
31; GFX9-NOT: m0
32
33; R600: LDS_SUB_RET *
34; GCN: ds_sub_rtn_u32
35define amdgpu_kernel void @atomic_sub_ret_local(ptr addrspace(1) %out, ptr addrspace(3) %local) {
36  %val = atomicrmw volatile sub ptr addrspace(3) %local, i32 5 seq_cst
37  store i32 %val, ptr addrspace(1) %out
38  ret void
39}
40
41; FUNC-LABEL: {{^}}atomic_sub_ret_local_const_offset:
42; SICIVI: s_mov_b32 m0
43; GFX9-NOT: m0
44
45; R600: LDS_SUB_RET *
46; GCN: ds_sub_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:20
47define amdgpu_kernel void @atomic_sub_ret_local_const_offset(ptr addrspace(1) %out, ptr addrspace(3) %local) {
48  %gep = getelementptr i32, ptr addrspace(3) %local, i32 5
49  %val = atomicrmw volatile sub ptr addrspace(3) %gep, i32 5 seq_cst
50  store i32 %val, ptr addrspace(1) %out
51  ret void
52}
53