xref: /llvm-project/llvm/test/Transforms/InferAddressSpaces/AMDGPU/flat_atomic.ll (revision e28e93550a74752714db6fffe50233aa96e536a5)
120cf170eSjeff; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
29e9907f1SFangrui Song; RUN: llc -mtriple=amdgcn -mcpu=gfx90a < %s | FileCheck %s
320cf170eSjeff
4a982f095SMatt Arsenaultdefine protected amdgpu_kernel void @InferNothing(i32 %a, ptr %b, double %c) {
520cf170eSjeff; CHECK-LABEL: InferNothing:
620cf170eSjeff; CHECK:       ; %bb.0: ; %entry
76548b635SShilei Tian; CHECK-NEXT:    s_load_dword s6, s[4:5], 0x24
86548b635SShilei Tian; CHECK-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x2c
920cf170eSjeff; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
106548b635SShilei Tian; CHECK-NEXT:    s_ashr_i32 s7, s6, 31
116548b635SShilei Tian; CHECK-NEXT:    v_mov_b32_e32 v0, s2
126548b635SShilei Tian; CHECK-NEXT:    v_mov_b32_e32 v1, s3
136548b635SShilei Tian; CHECK-NEXT:    s_lshl_b64 s[2:3], s[6:7], 3
146548b635SShilei Tian; CHECK-NEXT:    s_add_u32 s0, s2, s0
156548b635SShilei Tian; CHECK-NEXT:    s_addc_u32 s1, s3, s1
16eda9ff89SMatt Arsenault; CHECK-NEXT:    v_mov_b32_e32 v3, s1
17eda9ff89SMatt Arsenault; CHECK-NEXT:    v_add_co_u32_e64 v2, vcc, -8, s0
18eda9ff89SMatt Arsenault; CHECK-NEXT:    v_addc_co_u32_e32 v3, vcc, -1, v3, vcc
19eda9ff89SMatt Arsenault; CHECK-NEXT:    flat_atomic_add_f64 v[2:3], v[0:1]
20ee08d9cbSMatt Arsenault; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
21ee08d9cbSMatt Arsenault; CHECK-NEXT:    buffer_wbinvl1_vol
2220cf170eSjeff; CHECK-NEXT:    s_endpgm
2320cf170eSjeffentry:
2420cf170eSjeff  %i = add nsw i32 %a, -1
2520cf170eSjeff  %i.2 = sext i32 %i to i64
26a982f095SMatt Arsenault  %i.3 = getelementptr inbounds double, ptr %b, i64 %i.2
271d037087SMatt Arsenault  %i.4 = atomicrmw fadd ptr %i.3, double %c syncscope("agent") seq_cst, align 8, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
2820cf170eSjeff  ret void
2920cf170eSjeff}
3020cf170eSjeff
31a982f095SMatt Arsenaultdefine protected amdgpu_kernel void @InferFadd(i32 %a, ptr addrspace(1) %b, double %c) {
3220cf170eSjeff; CHECK-LABEL: InferFadd:
3320cf170eSjeff; CHECK:       ; %bb.0: ; %entry
34ee08d9cbSMatt Arsenault; CHECK-NEXT:    s_mov_b64 s[0:1], exec
35ee08d9cbSMatt Arsenault; CHECK-NEXT:    v_mbcnt_lo_u32_b32 v0, s0, 0
36ee08d9cbSMatt Arsenault; CHECK-NEXT:    v_mbcnt_hi_u32_b32 v0, s1, v0
37ee08d9cbSMatt Arsenault; CHECK-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
386548b635SShilei Tian; CHECK-NEXT:    s_and_saveexec_b64 s[2:3], vcc
39ee08d9cbSMatt Arsenault; CHECK-NEXT:    s_cbranch_execz .LBB1_2
40ee08d9cbSMatt Arsenault; CHECK-NEXT:  ; %bb.1:
416548b635SShilei Tian; CHECK-NEXT:    s_load_dword s2, s[4:5], 0x24
426548b635SShilei Tian; CHECK-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x2c
4320cf170eSjeff; CHECK-NEXT:    v_mov_b32_e32 v2, 0
4420cf170eSjeff; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
456548b635SShilei Tian; CHECK-NEXT:    s_ashr_i32 s3, s2, 31
466548b635SShilei Tian; CHECK-NEXT:    s_lshl_b64 s[2:3], s[2:3], 3
476548b635SShilei Tian; CHECK-NEXT:    s_add_u32 s2, s8, s2
486548b635SShilei Tian; CHECK-NEXT:    s_addc_u32 s3, s9, s3
49ee08d9cbSMatt Arsenault; CHECK-NEXT:    s_bcnt1_i32_b64 s0, s[0:1]
50ee08d9cbSMatt Arsenault; CHECK-NEXT:    v_cvt_f64_u32_e32 v[0:1], s0
516548b635SShilei Tian; CHECK-NEXT:    v_mul_f64 v[0:1], s[10:11], v[0:1]
52ee08d9cbSMatt Arsenault; CHECK-NEXT:    global_atomic_add_f64 v2, v[0:1], s[2:3] offset:-8
53ee08d9cbSMatt Arsenault; CHECK-NEXT:    s_waitcnt vmcnt(0)
54ee08d9cbSMatt Arsenault; CHECK-NEXT:    buffer_wbinvl1_vol
55ee08d9cbSMatt Arsenault; CHECK-NEXT:  .LBB1_2:
5620cf170eSjeff; CHECK-NEXT:    s_endpgm
5720cf170eSjeffentry:
5820cf170eSjeff  %i = add nsw i32 %a, -1
5920cf170eSjeff  %i.2 = sext i32 %i to i64
60a982f095SMatt Arsenault  %i.3 = getelementptr inbounds double, ptr addrspace(1) %b, i64 %i.2
61a982f095SMatt Arsenault  %i.4 = addrspacecast ptr addrspace(1) %i.3 to ptr
621d037087SMatt Arsenault  %0 = atomicrmw fadd ptr %i.4, double %c syncscope("agent") seq_cst, align 8, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
6320cf170eSjeff  ret void
6420cf170eSjeff}
6520cf170eSjeff
66a982f095SMatt Arsenaultdefine protected amdgpu_kernel void @InferMixed(i32 %a, ptr addrspace(1) %b, double %c, ptr %d) {
6720cf170eSjeff; CHECK-LABEL: InferMixed:
6820cf170eSjeff; CHECK:       ; %bb.0: ; %entry
696548b635SShilei Tian; CHECK-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x3c
706548b635SShilei Tian; CHECK-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x2c
716548b635SShilei Tian; CHECK-NEXT:    s_mov_b64 s[6:7], exec
7220cf170eSjeff; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
73*e28e9355SMatt Arsenault; CHECK-NEXT:    v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1]
746548b635SShilei Tian; CHECK-NEXT:    v_pk_mov_b32 v[2:3], s[2:3], s[2:3] op_sel:[0,1]
7520cf170eSjeff; CHECK-NEXT:    flat_atomic_add_f64 v[0:1], v[2:3]
76ee08d9cbSMatt Arsenault; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
77ee08d9cbSMatt Arsenault; CHECK-NEXT:    buffer_wbinvl1_vol
786548b635SShilei Tian; CHECK-NEXT:    v_mbcnt_lo_u32_b32 v0, s6, 0
796548b635SShilei Tian; CHECK-NEXT:    v_mbcnt_hi_u32_b32 v0, s7, v0
80ee08d9cbSMatt Arsenault; CHECK-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
81ee08d9cbSMatt Arsenault; CHECK-NEXT:    s_and_saveexec_b64 s[8:9], vcc
82c198f775SMatt Arsenault; CHECK-NEXT:    s_cbranch_execz .LBB2_2
83ee08d9cbSMatt Arsenault; CHECK-NEXT:  ; %bb.1:
846548b635SShilei Tian; CHECK-NEXT:    s_load_dword s4, s[4:5], 0x24
85ee08d9cbSMatt Arsenault; CHECK-NEXT:    v_mov_b32_e32 v2, 0
86ee08d9cbSMatt Arsenault; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
876548b635SShilei Tian; CHECK-NEXT:    s_ashr_i32 s5, s4, 31
886548b635SShilei Tian; CHECK-NEXT:    s_lshl_b64 s[4:5], s[4:5], 3
896548b635SShilei Tian; CHECK-NEXT:    s_add_u32 s0, s0, s4
906548b635SShilei Tian; CHECK-NEXT:    s_addc_u32 s1, s1, s5
916548b635SShilei Tian; CHECK-NEXT:    s_bcnt1_i32_b64 s4, s[6:7]
926548b635SShilei Tian; CHECK-NEXT:    v_cvt_f64_u32_e32 v[0:1], s4
936548b635SShilei Tian; CHECK-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
946548b635SShilei Tian; CHECK-NEXT:    global_atomic_add_f64 v2, v[0:1], s[0:1] offset:-7
95ee08d9cbSMatt Arsenault; CHECK-NEXT:    s_waitcnt vmcnt(0)
96ee08d9cbSMatt Arsenault; CHECK-NEXT:    buffer_wbinvl1_vol
97c198f775SMatt Arsenault; CHECK-NEXT:  .LBB2_2:
9820cf170eSjeff; CHECK-NEXT:    s_endpgm
9920cf170eSjeffentry:
10020cf170eSjeff  %i = add nsw i32 %a, -1
10120cf170eSjeff  %i.2 = sext i32 %i to i64
102a982f095SMatt Arsenault  %i.3 = getelementptr inbounds double, ptr addrspace(1) %b, i64 %i.2
10320cf170eSjeff  br label %bb1
10420cf170eSjeff
105ee08d9cbSMatt Arsenaultbb1:                                              ; preds = %entry
106a982f095SMatt Arsenault  %i.7 = ptrtoint ptr addrspace(1) %i.3 to i64
10720cf170eSjeff  %i.8 = add nsw i64 %i.7, 1
108a982f095SMatt Arsenault  %i.9 = inttoptr i64 %i.8 to ptr addrspace(1)
1091d037087SMatt Arsenault  %0 = atomicrmw fadd ptr %d, double %c syncscope("agent") seq_cst, align 8, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
110a982f095SMatt Arsenault  %i.11 = addrspacecast ptr addrspace(1) %i.9 to ptr
1111d037087SMatt Arsenault  %1 = atomicrmw fadd ptr %i.11, double %c syncscope("agent") seq_cst, align 8, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0
11220cf170eSjeff  ret void
11320cf170eSjeff}
11420cf170eSjeff
115a982f095SMatt Arsenaultdefine protected amdgpu_kernel void @InferPHI(i32 %a, ptr addrspace(1) %b, double %c) {
11620cf170eSjeff; CHECK-LABEL: InferPHI:
11720cf170eSjeff; CHECK:       ; %bb.0: ; %entry
1186548b635SShilei Tian; CHECK-NEXT:    s_load_dword s6, s[4:5], 0x24
1196548b635SShilei Tian; CHECK-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x2c
12020cf170eSjeff; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
1216548b635SShilei Tian; CHECK-NEXT:    s_ashr_i32 s7, s6, 31
1226548b635SShilei Tian; CHECK-NEXT:    s_lshl_b64 s[4:5], s[6:7], 3
123eeac0ffaSNikita Popov; CHECK-NEXT:    s_add_u32 s0, s0, s4
124eeac0ffaSNikita Popov; CHECK-NEXT:    s_addc_u32 s1, s1, s5
125eeac0ffaSNikita Popov; CHECK-NEXT:    s_add_u32 s4, s0, -8
126eeac0ffaSNikita Popov; CHECK-NEXT:    s_addc_u32 s5, s1, -1
127eeac0ffaSNikita Popov; CHECK-NEXT:    s_cmp_eq_u64 s[0:1], 9
128eeac0ffaSNikita Popov; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
129eeac0ffaSNikita Popov; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
130eeac0ffaSNikita Popov; CHECK-NEXT:    v_cmp_ne_u32_e64 s[0:1], 1, v0
131c198f775SMatt Arsenault; CHECK-NEXT:  .LBB3_1: ; %bb0
13220cf170eSjeff; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
133eeac0ffaSNikita Popov; CHECK-NEXT:    s_and_b64 vcc, exec, s[0:1]
134c198f775SMatt Arsenault; CHECK-NEXT:    s_cbranch_vccnz .LBB3_1
13520cf170eSjeff; CHECK-NEXT:  ; %bb.2: ; %bb1
136eeac0ffaSNikita Popov; CHECK-NEXT:    s_mov_b64 s[0:1], exec
137eeac0ffaSNikita Popov; CHECK-NEXT:    v_mbcnt_lo_u32_b32 v0, s0, 0
138eeac0ffaSNikita Popov; CHECK-NEXT:    v_mbcnt_hi_u32_b32 v0, s1, v0
139ee08d9cbSMatt Arsenault; CHECK-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
1406548b635SShilei Tian; CHECK-NEXT:    s_and_saveexec_b64 s[6:7], vcc
141c198f775SMatt Arsenault; CHECK-NEXT:    s_cbranch_execz .LBB3_4
142ee08d9cbSMatt Arsenault; CHECK-NEXT:  ; %bb.3:
143eeac0ffaSNikita Popov; CHECK-NEXT:    s_bcnt1_i32_b64 s0, s[0:1]
144eeac0ffaSNikita Popov; CHECK-NEXT:    v_cvt_f64_u32_e32 v[0:1], s0
1456548b635SShilei Tian; CHECK-NEXT:    v_mul_f64 v[0:1], s[2:3], v[0:1]
14620cf170eSjeff; CHECK-NEXT:    v_mov_b32_e32 v2, 0
147eeac0ffaSNikita Popov; CHECK-NEXT:    global_atomic_add_f64 v2, v[0:1], s[4:5]
148ee08d9cbSMatt Arsenault; CHECK-NEXT:    s_waitcnt vmcnt(0)
149ee08d9cbSMatt Arsenault; CHECK-NEXT:    buffer_wbinvl1_vol
150c198f775SMatt Arsenault; CHECK-NEXT:  .LBB3_4:
15120cf170eSjeff; CHECK-NEXT:    s_endpgm
15220cf170eSjeffentry:
15320cf170eSjeff  %i = add nsw i32 %a, -1
15420cf170eSjeff  %i.2 = sext i32 %i to i64
155a982f095SMatt Arsenault  %i.3 = getelementptr inbounds double, ptr addrspace(1) %b, i64 %i.2
156a982f095SMatt Arsenault  %i.4 = ptrtoint ptr addrspace(1) %i.3 to i64
15720cf170eSjeff  br label %bb0
15820cf170eSjeff
159ee08d9cbSMatt Arsenaultbb0:                                              ; preds = %bb0, %entry
160a982f095SMatt Arsenault  %phi = phi ptr addrspace(1) [ %i.3, %entry ], [ %i.9, %bb0 ]
161a982f095SMatt Arsenault  %i.7 = ptrtoint ptr addrspace(1) %phi to i64
16220cf170eSjeff  %i.8 = sub nsw i64 %i.7, 1
16320cf170eSjeff  %cmp2 = icmp eq i64 %i.8, 0
164a982f095SMatt Arsenault  %i.9 = inttoptr i64 %i.7 to ptr addrspace(1)
16520cf170eSjeff  br i1 %cmp2, label %bb1, label %bb0
16620cf170eSjeff
167ee08d9cbSMatt Arsenaultbb1:                                              ; preds = %bb0
168a982f095SMatt Arsenault  %i.10 = addrspacecast ptr addrspace(1) %i.9 to ptr
169ee08d9cbSMatt Arsenault  %0 = atomicrmw fadd ptr %i.10, double %c syncscope("agent") seq_cst, align 8, !amdgpu.no.fine.grained.memory !0
17020cf170eSjeff  ret void
17120cf170eSjeff}
17220cf170eSjeff
173ee08d9cbSMatt Arsenaultattributes #0 = { nocallback nofree nounwind willreturn memory(argmem: readwrite) }
174ee08d9cbSMatt Arsenaultattributes #1 = { mustprogress nounwind willreturn memory(argmem: readwrite) "target-cpu"="gfx90a" }
17520cf170eSjeff
176ee08d9cbSMatt Arsenault!0 = !{}
1771d037087SMatt Arsenault!1 = !{i32 5, i32 6}
178