xref: /llvm-project/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-atomicrmw.ll (revision 6548b6354d1d990e1c98736f5e7c3de876bedc8e)
1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx940 -O0 -stop-after=irtranslator -o - %s | FileCheck %s
3
4define float @test_atomicrmw_fadd(ptr addrspace(3) %addr) {
5  ; CHECK-LABEL: name: test_atomicrmw_fadd
6  ; CHECK: bb.1 (%ir-block.0):
7  ; CHECK-NEXT:   liveins: $vgpr0
8  ; CHECK-NEXT: {{  $}}
9  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
10  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
11  ; CHECK-NEXT:   [[ATOMICRMW_FADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_FADD [[COPY]](p3), [[C]] :: (load store seq_cst (s32) on %ir.addr, addrspace 3)
12  ; CHECK-NEXT:   $vgpr0 = COPY [[ATOMICRMW_FADD]](s32)
13  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
14  %oldval = atomicrmw fadd ptr addrspace(3) %addr, float 1.0 seq_cst
15  ret float %oldval
16}
17
18define float @test_atomicrmw_fsub(ptr addrspace(3) %addr) {
19  ; CHECK-LABEL: name: test_atomicrmw_fsub
20  ; CHECK: bb.1 (%ir-block.0):
21  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
22  ; CHECK-NEXT:   liveins: $vgpr0
23  ; CHECK-NEXT: {{  $}}
24  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
25  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
26  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
27  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32) from %ir.addr, addrspace 3)
28  ; CHECK-NEXT:   G_BR %bb.2
29  ; CHECK-NEXT: {{  $}}
30  ; CHECK-NEXT: bb.2.atomicrmw.start:
31  ; CHECK-NEXT:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
32  ; CHECK-NEXT: {{  $}}
33  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:_(s64) = G_PHI %16(s64), %bb.2, [[C1]](s64), %bb.1
34  ; CHECK-NEXT:   [[PHI1:%[0-9]+]]:_(s32) = G_PHI [[LOAD]](s32), %bb.1, %14(s32), %bb.2
35  ; CHECK-NEXT:   [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[PHI1]], [[C]]
36  ; CHECK-NEXT:   [[ATOMIC_CMPXCHG_WITH_SUCCESS:%[0-9]+]]:_(s32), [[ATOMIC_CMPXCHG_WITH_SUCCESS1:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[COPY]](p3), [[PHI1]], [[FSUB]] :: (load store seq_cst seq_cst (s32) on %ir.addr, addrspace 3)
37  ; CHECK-NEXT:   [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[ATOMIC_CMPXCHG_WITH_SUCCESS1]](s1), [[PHI]](s64)
38  ; CHECK-NEXT:   [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.loop), [[INT]](s64)
39  ; CHECK-NEXT:   G_BRCOND [[INT1]](s1), %bb.3
40  ; CHECK-NEXT:   G_BR %bb.2
41  ; CHECK-NEXT: {{  $}}
42  ; CHECK-NEXT: bb.3.atomicrmw.end:
43  ; CHECK-NEXT:   [[PHI2:%[0-9]+]]:_(s32) = G_PHI [[ATOMIC_CMPXCHG_WITH_SUCCESS]](s32), %bb.2
44  ; CHECK-NEXT:   [[PHI3:%[0-9]+]]:_(s64) = G_PHI [[INT]](s64), %bb.2
45  ; CHECK-NEXT:   G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI3]](s64)
46  ; CHECK-NEXT:   $vgpr0 = COPY [[PHI2]](s32)
47  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
48  %oldval = atomicrmw fsub ptr addrspace(3) %addr, float 1.0 seq_cst
49  ret float %oldval
50}
51
52define <2 x half> @test_atomicrmw_fadd_vector(ptr addrspace(3) %addr) {
53  ; CHECK-LABEL: name: test_atomicrmw_fadd_vector
54  ; CHECK: bb.1 (%ir-block.0):
55  ; CHECK-NEXT:   liveins: $vgpr0
56  ; CHECK-NEXT: {{  $}}
57  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
58  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
59  ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16)
60  ; CHECK-NEXT:   [[ATOMICRMW_FADD:%[0-9]+]]:_(<2 x s16>) = G_ATOMICRMW_FADD [[COPY]](p3), [[BUILD_VECTOR]] :: (load store seq_cst (<2 x s16>) on %ir.addr, addrspace 3)
61  ; CHECK-NEXT:   $vgpr0 = COPY [[ATOMICRMW_FADD]](<2 x s16>)
62  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
63  %oldval = atomicrmw fadd ptr addrspace(3) %addr, <2 x half> <half 1.0, half 1.0> seq_cst
64  ret <2 x half> %oldval
65}
66
67define <2 x half> @test_atomicrmw_fsub_vector(ptr addrspace(3) %addr) {
68  ; CHECK-LABEL: name: test_atomicrmw_fsub_vector
69  ; CHECK: bb.1 (%ir-block.0):
70  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
71  ; CHECK-NEXT:   liveins: $vgpr0
72  ; CHECK-NEXT: {{  $}}
73  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
74  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
75  ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16)
76  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
77  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p3) :: (load (<2 x s16>) from %ir.addr, addrspace 3)
78  ; CHECK-NEXT:   G_BR %bb.2
79  ; CHECK-NEXT: {{  $}}
80  ; CHECK-NEXT: bb.2.atomicrmw.start:
81  ; CHECK-NEXT:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
82  ; CHECK-NEXT: {{  $}}
83  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:_(s64) = G_PHI %20(s64), %bb.2, [[C1]](s64), %bb.1
84  ; CHECK-NEXT:   [[PHI1:%[0-9]+]]:_(<2 x s16>) = G_PHI [[LOAD]](<2 x s16>), %bb.1, %19(<2 x s16>), %bb.2
85  ; CHECK-NEXT:   [[FSUB:%[0-9]+]]:_(<2 x s16>) = G_FSUB [[PHI1]], [[BUILD_VECTOR]]
86  ; CHECK-NEXT:   [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[FSUB]](<2 x s16>)
87  ; CHECK-NEXT:   [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[PHI1]](<2 x s16>)
88  ; CHECK-NEXT:   [[ATOMIC_CMPXCHG_WITH_SUCCESS:%[0-9]+]]:_(s32), [[ATOMIC_CMPXCHG_WITH_SUCCESS1:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[COPY]](p3), [[BITCAST1]], [[BITCAST]] :: (load store seq_cst seq_cst (s32) on %ir.addr, addrspace 3)
89  ; CHECK-NEXT:   [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[ATOMIC_CMPXCHG_WITH_SUCCESS]](s32)
90  ; CHECK-NEXT:   [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[ATOMIC_CMPXCHG_WITH_SUCCESS1]](s1), [[PHI]](s64)
91  ; CHECK-NEXT:   [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.loop), [[INT]](s64)
92  ; CHECK-NEXT:   G_BRCOND [[INT1]](s1), %bb.3
93  ; CHECK-NEXT:   G_BR %bb.2
94  ; CHECK-NEXT: {{  $}}
95  ; CHECK-NEXT: bb.3.atomicrmw.end:
96  ; CHECK-NEXT:   [[PHI2:%[0-9]+]]:_(<2 x s16>) = G_PHI [[BITCAST2]](<2 x s16>), %bb.2
97  ; CHECK-NEXT:   [[PHI3:%[0-9]+]]:_(s64) = G_PHI [[INT]](s64), %bb.2
98  ; CHECK-NEXT:   G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI3]](s64)
99  ; CHECK-NEXT:   $vgpr0 = COPY [[PHI2]](<2 x s16>)
100  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
101  %oldval = atomicrmw fsub ptr addrspace(3) %addr, <2 x half> <half 1.0, half 1.0> seq_cst
102  ret <2 x half> %oldval
103}
104
105define <2 x half> @test_atomicrmw_fmin_vector(ptr addrspace(3) %addr) {
106  ; CHECK-LABEL: name: test_atomicrmw_fmin_vector
107  ; CHECK: bb.1 (%ir-block.0):
108  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
109  ; CHECK-NEXT:   liveins: $vgpr0
110  ; CHECK-NEXT: {{  $}}
111  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
112  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
113  ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16)
114  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
115  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p3) :: (load (<2 x s16>) from %ir.addr, addrspace 3)
116  ; CHECK-NEXT:   G_BR %bb.2
117  ; CHECK-NEXT: {{  $}}
118  ; CHECK-NEXT: bb.2.atomicrmw.start:
119  ; CHECK-NEXT:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
120  ; CHECK-NEXT: {{  $}}
121  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:_(s64) = G_PHI %20(s64), %bb.2, [[C1]](s64), %bb.1
122  ; CHECK-NEXT:   [[PHI1:%[0-9]+]]:_(<2 x s16>) = G_PHI [[LOAD]](<2 x s16>), %bb.1, %19(<2 x s16>), %bb.2
123  ; CHECK-NEXT:   [[FMINNUM:%[0-9]+]]:_(<2 x s16>) = G_FMINNUM [[PHI1]], [[BUILD_VECTOR]]
124  ; CHECK-NEXT:   [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[FMINNUM]](<2 x s16>)
125  ; CHECK-NEXT:   [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[PHI1]](<2 x s16>)
126  ; CHECK-NEXT:   [[ATOMIC_CMPXCHG_WITH_SUCCESS:%[0-9]+]]:_(s32), [[ATOMIC_CMPXCHG_WITH_SUCCESS1:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[COPY]](p3), [[BITCAST1]], [[BITCAST]] :: (load store seq_cst seq_cst (s32) on %ir.addr, addrspace 3)
127  ; CHECK-NEXT:   [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[ATOMIC_CMPXCHG_WITH_SUCCESS]](s32)
128  ; CHECK-NEXT:   [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[ATOMIC_CMPXCHG_WITH_SUCCESS1]](s1), [[PHI]](s64)
129  ; CHECK-NEXT:   [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.loop), [[INT]](s64)
130  ; CHECK-NEXT:   G_BRCOND [[INT1]](s1), %bb.3
131  ; CHECK-NEXT:   G_BR %bb.2
132  ; CHECK-NEXT: {{  $}}
133  ; CHECK-NEXT: bb.3.atomicrmw.end:
134  ; CHECK-NEXT:   [[PHI2:%[0-9]+]]:_(<2 x s16>) = G_PHI [[BITCAST2]](<2 x s16>), %bb.2
135  ; CHECK-NEXT:   [[PHI3:%[0-9]+]]:_(s64) = G_PHI [[INT]](s64), %bb.2
136  ; CHECK-NEXT:   G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI3]](s64)
137  ; CHECK-NEXT:   $vgpr0 = COPY [[PHI2]](<2 x s16>)
138  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
139  %oldval = atomicrmw fmin ptr addrspace(3) %addr, <2 x half> <half 1.0, half 1.0> seq_cst
140  ret <2 x half> %oldval
141}
142
143define <2 x half> @test_atomicrmw_fmax_vector(ptr addrspace(3) %addr) {
144  ; CHECK-LABEL: name: test_atomicrmw_fmax_vector
145  ; CHECK: bb.1 (%ir-block.0):
146  ; CHECK-NEXT:   successors: %bb.2(0x80000000)
147  ; CHECK-NEXT:   liveins: $vgpr0
148  ; CHECK-NEXT: {{  $}}
149  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
150  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
151  ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16)
152  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
153  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p3) :: (load (<2 x s16>) from %ir.addr, addrspace 3)
154  ; CHECK-NEXT:   G_BR %bb.2
155  ; CHECK-NEXT: {{  $}}
156  ; CHECK-NEXT: bb.2.atomicrmw.start:
157  ; CHECK-NEXT:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
158  ; CHECK-NEXT: {{  $}}
159  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:_(s64) = G_PHI %20(s64), %bb.2, [[C1]](s64), %bb.1
160  ; CHECK-NEXT:   [[PHI1:%[0-9]+]]:_(<2 x s16>) = G_PHI [[LOAD]](<2 x s16>), %bb.1, %19(<2 x s16>), %bb.2
161  ; CHECK-NEXT:   [[FMAXNUM:%[0-9]+]]:_(<2 x s16>) = G_FMAXNUM [[PHI1]], [[BUILD_VECTOR]]
162  ; CHECK-NEXT:   [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[FMAXNUM]](<2 x s16>)
163  ; CHECK-NEXT:   [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[PHI1]](<2 x s16>)
164  ; CHECK-NEXT:   [[ATOMIC_CMPXCHG_WITH_SUCCESS:%[0-9]+]]:_(s32), [[ATOMIC_CMPXCHG_WITH_SUCCESS1:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[COPY]](p3), [[BITCAST1]], [[BITCAST]] :: (load store seq_cst seq_cst (s32) on %ir.addr, addrspace 3)
165  ; CHECK-NEXT:   [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[ATOMIC_CMPXCHG_WITH_SUCCESS]](s32)
166  ; CHECK-NEXT:   [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[ATOMIC_CMPXCHG_WITH_SUCCESS1]](s1), [[PHI]](s64)
167  ; CHECK-NEXT:   [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.loop), [[INT]](s64)
168  ; CHECK-NEXT:   G_BRCOND [[INT1]](s1), %bb.3
169  ; CHECK-NEXT:   G_BR %bb.2
170  ; CHECK-NEXT: {{  $}}
171  ; CHECK-NEXT: bb.3.atomicrmw.end:
172  ; CHECK-NEXT:   [[PHI2:%[0-9]+]]:_(<2 x s16>) = G_PHI [[BITCAST2]](<2 x s16>), %bb.2
173  ; CHECK-NEXT:   [[PHI3:%[0-9]+]]:_(s64) = G_PHI [[INT]](s64), %bb.2
174  ; CHECK-NEXT:   G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI3]](s64)
175  ; CHECK-NEXT:   $vgpr0 = COPY [[PHI2]](<2 x s16>)
176  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
177  %oldval = atomicrmw fmax ptr addrspace(3) %addr, <2 x half> <half 1.0, half 1.0> seq_cst
178  ret <2 x half> %oldval
179}
180
181!llvm.module.flags = !{!0}
182!0 = !{i32 1, !"amdhsa_code_object_version", i32 500}
183