xref: /llvm-project/clang/test/CodeGen/AMDGPU/amdgpu-atomic-float.c (revision 9fef09fd2918e7d8c357b98a9a798fe207941f73)
1e108853aSMatt Arsenault // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
2e108853aSMatt Arsenault // RUN: %clang_cc1 -fnative-half-arguments-and-returns -triple amdgcn-amd-amdhsa-gnu -target-cpu gfx900 -emit-llvm -o - %s | FileCheck -check-prefixes=CHECK,SAFE %s
3e108853aSMatt Arsenault // RUN: %clang_cc1 -fnative-half-arguments-and-returns -triple amdgcn-amd-amdhsa-gnu -target-cpu gfx900 -emit-llvm -munsafe-fp-atomics -o - %s | FileCheck -check-prefixes=CHECK,UNSAFE %s
4e108853aSMatt Arsenault 
5e108853aSMatt Arsenault // SAFE-LABEL: define dso_local float @test_float_post_inc(
6e108853aSMatt Arsenault // SAFE-SAME: ) #[[ATTR0:[0-9]+]] {
7e108853aSMatt Arsenault // SAFE-NEXT:  [[ENTRY:.*:]]
8e108853aSMatt Arsenault // SAFE-NEXT:    [[RETVAL:%.*]] = alloca float, align 4, addrspace(5)
9e108853aSMatt Arsenault // SAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
10e108853aSMatt Arsenault // SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_float_post_inc.n to ptr), float 1.000000e+00 seq_cst, align 4
11e108853aSMatt Arsenault // SAFE-NEXT:    ret float [[TMP0]]
12e108853aSMatt Arsenault //
13e108853aSMatt Arsenault // UNSAFE-LABEL: define dso_local float @test_float_post_inc(
14e108853aSMatt Arsenault // UNSAFE-SAME: ) #[[ATTR0:[0-9]+]] {
15e108853aSMatt Arsenault // UNSAFE-NEXT:  [[ENTRY:.*:]]
16e108853aSMatt Arsenault // UNSAFE-NEXT:    [[RETVAL:%.*]] = alloca float, align 4, addrspace(5)
17e108853aSMatt Arsenault // UNSAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
18e108853aSMatt Arsenault // UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_float_post_inc.n to ptr), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META3:![0-9]+]], !amdgpu.ignore.denormal.mode [[META3]]
19e108853aSMatt Arsenault // UNSAFE-NEXT:    ret float [[TMP0]]
20e108853aSMatt Arsenault //
21e108853aSMatt Arsenault float test_float_post_inc()
22e108853aSMatt Arsenault {
23e108853aSMatt Arsenault     static _Atomic float n;
24e108853aSMatt Arsenault     return n++;
25e108853aSMatt Arsenault }
26e108853aSMatt Arsenault 
27e108853aSMatt Arsenault // SAFE-LABEL: define dso_local float @test_float_post_dc(
28e108853aSMatt Arsenault // SAFE-SAME: ) #[[ATTR0]] {
29e108853aSMatt Arsenault // SAFE-NEXT:  [[ENTRY:.*:]]
30e108853aSMatt Arsenault // SAFE-NEXT:    [[RETVAL:%.*]] = alloca float, align 4, addrspace(5)
31e108853aSMatt Arsenault // SAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
32e108853aSMatt Arsenault // SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test_float_post_dc.n to ptr), float 1.000000e+00 seq_cst, align 4
33e108853aSMatt Arsenault // SAFE-NEXT:    ret float [[TMP0]]
34e108853aSMatt Arsenault //
35e108853aSMatt Arsenault // UNSAFE-LABEL: define dso_local float @test_float_post_dc(
36e108853aSMatt Arsenault // UNSAFE-SAME: ) #[[ATTR0]] {
37e108853aSMatt Arsenault // UNSAFE-NEXT:  [[ENTRY:.*:]]
38e108853aSMatt Arsenault // UNSAFE-NEXT:    [[RETVAL:%.*]] = alloca float, align 4, addrspace(5)
39e108853aSMatt Arsenault // UNSAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
40e108853aSMatt Arsenault // UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test_float_post_dc.n to ptr), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META3]]
41e108853aSMatt Arsenault // UNSAFE-NEXT:    ret float [[TMP0]]
42e108853aSMatt Arsenault //
43e108853aSMatt Arsenault float test_float_post_dc()
44e108853aSMatt Arsenault {
45e108853aSMatt Arsenault     static _Atomic float n;
46e108853aSMatt Arsenault     return n--;
47e108853aSMatt Arsenault }
48e108853aSMatt Arsenault 
49e108853aSMatt Arsenault // SAFE-LABEL: define dso_local float @test_float_pre_dc(
50e108853aSMatt Arsenault // SAFE-SAME: ) #[[ATTR0]] {
51e108853aSMatt Arsenault // SAFE-NEXT:  [[ENTRY:.*:]]
52e108853aSMatt Arsenault // SAFE-NEXT:    [[RETVAL:%.*]] = alloca float, align 4, addrspace(5)
53e108853aSMatt Arsenault // SAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
54e108853aSMatt Arsenault // SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test_float_pre_dc.n to ptr), float 1.000000e+00 seq_cst, align 4
55e108853aSMatt Arsenault // SAFE-NEXT:    [[TMP1:%.*]] = fsub float [[TMP0]], 1.000000e+00
56e108853aSMatt Arsenault // SAFE-NEXT:    ret float [[TMP1]]
57e108853aSMatt Arsenault //
58e108853aSMatt Arsenault // UNSAFE-LABEL: define dso_local float @test_float_pre_dc(
59e108853aSMatt Arsenault // UNSAFE-SAME: ) #[[ATTR0]] {
60e108853aSMatt Arsenault // UNSAFE-NEXT:  [[ENTRY:.*:]]
61e108853aSMatt Arsenault // UNSAFE-NEXT:    [[RETVAL:%.*]] = alloca float, align 4, addrspace(5)
62e108853aSMatt Arsenault // UNSAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
63e108853aSMatt Arsenault // UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test_float_pre_dc.n to ptr), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META3]]
64e108853aSMatt Arsenault // UNSAFE-NEXT:    [[TMP1:%.*]] = fsub float [[TMP0]], 1.000000e+00
65e108853aSMatt Arsenault // UNSAFE-NEXT:    ret float [[TMP1]]
66e108853aSMatt Arsenault //
67e108853aSMatt Arsenault float test_float_pre_dc()
68e108853aSMatt Arsenault {
69e108853aSMatt Arsenault     static _Atomic float n;
70e108853aSMatt Arsenault     return --n;
71e108853aSMatt Arsenault }
72e108853aSMatt Arsenault 
73e108853aSMatt Arsenault // SAFE-LABEL: define dso_local float @test_float_pre_inc(
74e108853aSMatt Arsenault // SAFE-SAME: ) #[[ATTR0]] {
75e108853aSMatt Arsenault // SAFE-NEXT:  [[ENTRY:.*:]]
76e108853aSMatt Arsenault // SAFE-NEXT:    [[RETVAL:%.*]] = alloca float, align 4, addrspace(5)
77e108853aSMatt Arsenault // SAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
78e108853aSMatt Arsenault // SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_float_pre_inc.n to ptr), float 1.000000e+00 seq_cst, align 4
79e108853aSMatt Arsenault // SAFE-NEXT:    [[TMP1:%.*]] = fadd float [[TMP0]], 1.000000e+00
80e108853aSMatt Arsenault // SAFE-NEXT:    ret float [[TMP1]]
81e108853aSMatt Arsenault //
82e108853aSMatt Arsenault // UNSAFE-LABEL: define dso_local float @test_float_pre_inc(
83e108853aSMatt Arsenault // UNSAFE-SAME: ) #[[ATTR0]] {
84e108853aSMatt Arsenault // UNSAFE-NEXT:  [[ENTRY:.*:]]
85e108853aSMatt Arsenault // UNSAFE-NEXT:    [[RETVAL:%.*]] = alloca float, align 4, addrspace(5)
86e108853aSMatt Arsenault // UNSAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
87e108853aSMatt Arsenault // UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_float_pre_inc.n to ptr), float 1.000000e+00 seq_cst, align 4, !amdgpu.no.fine.grained.memory [[META3]], !amdgpu.ignore.denormal.mode [[META3]]
88e108853aSMatt Arsenault // UNSAFE-NEXT:    [[TMP1:%.*]] = fadd float [[TMP0]], 1.000000e+00
89e108853aSMatt Arsenault // UNSAFE-NEXT:    ret float [[TMP1]]
90e108853aSMatt Arsenault //
91e108853aSMatt Arsenault float test_float_pre_inc()
92e108853aSMatt Arsenault {
93e108853aSMatt Arsenault     static _Atomic float n;
94e108853aSMatt Arsenault     return ++n;
95e108853aSMatt Arsenault }
96e108853aSMatt Arsenault 
97e108853aSMatt Arsenault // SAFE-LABEL: define dso_local double @test_double_post_inc(
98e108853aSMatt Arsenault // SAFE-SAME: ) #[[ATTR0]] {
99e108853aSMatt Arsenault // SAFE-NEXT:  [[ENTRY:.*:]]
100e108853aSMatt Arsenault // SAFE-NEXT:    [[RETVAL:%.*]] = alloca double, align 8, addrspace(5)
101e108853aSMatt Arsenault // SAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
102*9fef09fdSYingwei Zheng // SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_double_post_inc.n to ptr), double 1.000000e+00 seq_cst, align 8
103*9fef09fdSYingwei Zheng // SAFE-NEXT:    ret double [[TMP0]]
104e108853aSMatt Arsenault //
105e108853aSMatt Arsenault // UNSAFE-LABEL: define dso_local double @test_double_post_inc(
106e108853aSMatt Arsenault // UNSAFE-SAME: ) #[[ATTR0]] {
107e108853aSMatt Arsenault // UNSAFE-NEXT:  [[ENTRY:.*:]]
108e108853aSMatt Arsenault // UNSAFE-NEXT:    [[RETVAL:%.*]] = alloca double, align 8, addrspace(5)
109e108853aSMatt Arsenault // UNSAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
110*9fef09fdSYingwei Zheng // UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_double_post_inc.n to ptr), double 1.000000e+00 seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META3]]
111*9fef09fdSYingwei Zheng // UNSAFE-NEXT:    ret double [[TMP0]]
112e108853aSMatt Arsenault //
113e108853aSMatt Arsenault double test_double_post_inc()
114e108853aSMatt Arsenault {
115e108853aSMatt Arsenault     static _Atomic double n;
116e108853aSMatt Arsenault     return n++;
117e108853aSMatt Arsenault }
118e108853aSMatt Arsenault 
119e108853aSMatt Arsenault // SAFE-LABEL: define dso_local double @test_double_post_dc(
120e108853aSMatt Arsenault // SAFE-SAME: ) #[[ATTR0]] {
121e108853aSMatt Arsenault // SAFE-NEXT:  [[ENTRY:.*:]]
122e108853aSMatt Arsenault // SAFE-NEXT:    [[RETVAL:%.*]] = alloca double, align 8, addrspace(5)
123e108853aSMatt Arsenault // SAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
124*9fef09fdSYingwei Zheng // SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test_double_post_dc.n to ptr), double 1.000000e+00 seq_cst, align 8
125*9fef09fdSYingwei Zheng // SAFE-NEXT:    ret double [[TMP0]]
126e108853aSMatt Arsenault //
127e108853aSMatt Arsenault // UNSAFE-LABEL: define dso_local double @test_double_post_dc(
128e108853aSMatt Arsenault // UNSAFE-SAME: ) #[[ATTR0]] {
129e108853aSMatt Arsenault // UNSAFE-NEXT:  [[ENTRY:.*:]]
130e108853aSMatt Arsenault // UNSAFE-NEXT:    [[RETVAL:%.*]] = alloca double, align 8, addrspace(5)
131e108853aSMatt Arsenault // UNSAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
132*9fef09fdSYingwei Zheng // UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test_double_post_dc.n to ptr), double 1.000000e+00 seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META3]]
133*9fef09fdSYingwei Zheng // UNSAFE-NEXT:    ret double [[TMP0]]
134e108853aSMatt Arsenault //
135e108853aSMatt Arsenault double test_double_post_dc()
136e108853aSMatt Arsenault {
137e108853aSMatt Arsenault     static _Atomic double n;
138e108853aSMatt Arsenault     return n--;
139e108853aSMatt Arsenault }
140e108853aSMatt Arsenault 
141e108853aSMatt Arsenault // SAFE-LABEL: define dso_local double @test_double_pre_dc(
142e108853aSMatt Arsenault // SAFE-SAME: ) #[[ATTR0]] {
143e108853aSMatt Arsenault // SAFE-NEXT:  [[ENTRY:.*:]]
144e108853aSMatt Arsenault // SAFE-NEXT:    [[RETVAL:%.*]] = alloca double, align 8, addrspace(5)
145e108853aSMatt Arsenault // SAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
146*9fef09fdSYingwei Zheng // SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test_double_pre_dc.n to ptr), double 1.000000e+00 seq_cst, align 8
147*9fef09fdSYingwei Zheng // SAFE-NEXT:    [[TMP1:%.*]] = fsub double [[TMP0]], 1.000000e+00
148*9fef09fdSYingwei Zheng // SAFE-NEXT:    ret double [[TMP1]]
149e108853aSMatt Arsenault //
150e108853aSMatt Arsenault // UNSAFE-LABEL: define dso_local double @test_double_pre_dc(
151e108853aSMatt Arsenault // UNSAFE-SAME: ) #[[ATTR0]] {
152e108853aSMatt Arsenault // UNSAFE-NEXT:  [[ENTRY:.*:]]
153e108853aSMatt Arsenault // UNSAFE-NEXT:    [[RETVAL:%.*]] = alloca double, align 8, addrspace(5)
154e108853aSMatt Arsenault // UNSAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
155*9fef09fdSYingwei Zheng // UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test_double_pre_dc.n to ptr), double 1.000000e+00 seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META3]]
156*9fef09fdSYingwei Zheng // UNSAFE-NEXT:    [[TMP1:%.*]] = fsub double [[TMP0]], 1.000000e+00
157*9fef09fdSYingwei Zheng // UNSAFE-NEXT:    ret double [[TMP1]]
158e108853aSMatt Arsenault //
159e108853aSMatt Arsenault double test_double_pre_dc()
160e108853aSMatt Arsenault {
161e108853aSMatt Arsenault     static _Atomic double n;
162e108853aSMatt Arsenault     return --n;
163e108853aSMatt Arsenault }
164e108853aSMatt Arsenault 
165e108853aSMatt Arsenault // SAFE-LABEL: define dso_local double @test_double_pre_inc(
166e108853aSMatt Arsenault // SAFE-SAME: ) #[[ATTR0]] {
167e108853aSMatt Arsenault // SAFE-NEXT:  [[ENTRY:.*:]]
168e108853aSMatt Arsenault // SAFE-NEXT:    [[RETVAL:%.*]] = alloca double, align 8, addrspace(5)
169e108853aSMatt Arsenault // SAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
170*9fef09fdSYingwei Zheng // SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_double_pre_inc.n to ptr), double 1.000000e+00 seq_cst, align 8
171*9fef09fdSYingwei Zheng // SAFE-NEXT:    [[TMP1:%.*]] = fadd double [[TMP0]], 1.000000e+00
172*9fef09fdSYingwei Zheng // SAFE-NEXT:    ret double [[TMP1]]
173e108853aSMatt Arsenault //
174e108853aSMatt Arsenault // UNSAFE-LABEL: define dso_local double @test_double_pre_inc(
175e108853aSMatt Arsenault // UNSAFE-SAME: ) #[[ATTR0]] {
176e108853aSMatt Arsenault // UNSAFE-NEXT:  [[ENTRY:.*:]]
177e108853aSMatt Arsenault // UNSAFE-NEXT:    [[RETVAL:%.*]] = alloca double, align 8, addrspace(5)
178e108853aSMatt Arsenault // UNSAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
179*9fef09fdSYingwei Zheng // UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test_double_pre_inc.n to ptr), double 1.000000e+00 seq_cst, align 8, !amdgpu.no.fine.grained.memory [[META3]]
180*9fef09fdSYingwei Zheng // UNSAFE-NEXT:    [[TMP1:%.*]] = fadd double [[TMP0]], 1.000000e+00
181*9fef09fdSYingwei Zheng // UNSAFE-NEXT:    ret double [[TMP1]]
182e108853aSMatt Arsenault //
183e108853aSMatt Arsenault double test_double_pre_inc()
184e108853aSMatt Arsenault {
185e108853aSMatt Arsenault     static _Atomic double n;
186e108853aSMatt Arsenault     return ++n;
187e108853aSMatt Arsenault }
188e108853aSMatt Arsenault 
189e108853aSMatt Arsenault // SAFE-LABEL: define dso_local half @test__Float16_post_inc(
190e108853aSMatt Arsenault // SAFE-SAME: ) #[[ATTR0]] {
191e108853aSMatt Arsenault // SAFE-NEXT:  [[ENTRY:.*:]]
192e108853aSMatt Arsenault // SAFE-NEXT:    [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
193e108853aSMatt Arsenault // SAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
194*9fef09fdSYingwei Zheng // SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_post_inc.n to ptr), half 0xH3C00 seq_cst, align 2
195*9fef09fdSYingwei Zheng // SAFE-NEXT:    ret half [[TMP0]]
196e108853aSMatt Arsenault //
197e108853aSMatt Arsenault // UNSAFE-LABEL: define dso_local half @test__Float16_post_inc(
198e108853aSMatt Arsenault // UNSAFE-SAME: ) #[[ATTR0]] {
199e108853aSMatt Arsenault // UNSAFE-NEXT:  [[ENTRY:.*:]]
200e108853aSMatt Arsenault // UNSAFE-NEXT:    [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
201e108853aSMatt Arsenault // UNSAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
202*9fef09fdSYingwei Zheng // UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_post_inc.n to ptr), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]]
203*9fef09fdSYingwei Zheng // UNSAFE-NEXT:    ret half [[TMP0]]
204e108853aSMatt Arsenault //
205e108853aSMatt Arsenault _Float16 test__Float16_post_inc()
206e108853aSMatt Arsenault {
207e108853aSMatt Arsenault     static _Atomic _Float16 n;
208e108853aSMatt Arsenault     return n++;
209e108853aSMatt Arsenault }
210e108853aSMatt Arsenault 
211e108853aSMatt Arsenault // SAFE-LABEL: define dso_local half @test__Float16_post_dc(
212e108853aSMatt Arsenault // SAFE-SAME: ) #[[ATTR0]] {
213e108853aSMatt Arsenault // SAFE-NEXT:  [[ENTRY:.*:]]
214e108853aSMatt Arsenault // SAFE-NEXT:    [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
215e108853aSMatt Arsenault // SAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
216*9fef09fdSYingwei Zheng // SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_post_dc.n to ptr), half 0xH3C00 seq_cst, align 2
217*9fef09fdSYingwei Zheng // SAFE-NEXT:    ret half [[TMP0]]
218e108853aSMatt Arsenault //
219e108853aSMatt Arsenault // UNSAFE-LABEL: define dso_local half @test__Float16_post_dc(
220e108853aSMatt Arsenault // UNSAFE-SAME: ) #[[ATTR0]] {
221e108853aSMatt Arsenault // UNSAFE-NEXT:  [[ENTRY:.*:]]
222e108853aSMatt Arsenault // UNSAFE-NEXT:    [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
223e108853aSMatt Arsenault // UNSAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
224*9fef09fdSYingwei Zheng // UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_post_dc.n to ptr), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]]
225*9fef09fdSYingwei Zheng // UNSAFE-NEXT:    ret half [[TMP0]]
226e108853aSMatt Arsenault //
227e108853aSMatt Arsenault _Float16 test__Float16_post_dc()
228e108853aSMatt Arsenault {
229e108853aSMatt Arsenault     static _Atomic _Float16 n;
230e108853aSMatt Arsenault     return n--;
231e108853aSMatt Arsenault }
232e108853aSMatt Arsenault 
233e108853aSMatt Arsenault // SAFE-LABEL: define dso_local half @test__Float16_pre_dc(
234e108853aSMatt Arsenault // SAFE-SAME: ) #[[ATTR0]] {
235e108853aSMatt Arsenault // SAFE-NEXT:  [[ENTRY:.*:]]
236e108853aSMatt Arsenault // SAFE-NEXT:    [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
237e108853aSMatt Arsenault // SAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
238*9fef09fdSYingwei Zheng // SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_pre_dc.n to ptr), half 0xH3C00 seq_cst, align 2
239*9fef09fdSYingwei Zheng // SAFE-NEXT:    [[TMP1:%.*]] = fsub half [[TMP0]], 0xH3C00
240*9fef09fdSYingwei Zheng // SAFE-NEXT:    ret half [[TMP1]]
241e108853aSMatt Arsenault //
242e108853aSMatt Arsenault // UNSAFE-LABEL: define dso_local half @test__Float16_pre_dc(
243e108853aSMatt Arsenault // UNSAFE-SAME: ) #[[ATTR0]] {
244e108853aSMatt Arsenault // UNSAFE-NEXT:  [[ENTRY:.*:]]
245e108853aSMatt Arsenault // UNSAFE-NEXT:    [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
246e108853aSMatt Arsenault // UNSAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
247*9fef09fdSYingwei Zheng // UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr addrspacecast (ptr addrspace(1) @test__Float16_pre_dc.n to ptr), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]]
248*9fef09fdSYingwei Zheng // UNSAFE-NEXT:    [[TMP1:%.*]] = fsub half [[TMP0]], 0xH3C00
249*9fef09fdSYingwei Zheng // UNSAFE-NEXT:    ret half [[TMP1]]
250e108853aSMatt Arsenault //
251e108853aSMatt Arsenault _Float16 test__Float16_pre_dc()
252e108853aSMatt Arsenault {
253e108853aSMatt Arsenault     static _Atomic _Float16 n;
254e108853aSMatt Arsenault     return --n;
255e108853aSMatt Arsenault }
256e108853aSMatt Arsenault 
257e108853aSMatt Arsenault // SAFE-LABEL: define dso_local half @test__Float16_pre_inc(
258e108853aSMatt Arsenault // SAFE-SAME: ) #[[ATTR0]] {
259e108853aSMatt Arsenault // SAFE-NEXT:  [[ENTRY:.*:]]
260e108853aSMatt Arsenault // SAFE-NEXT:    [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
261e108853aSMatt Arsenault // SAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
262*9fef09fdSYingwei Zheng // SAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_pre_inc.n to ptr), half 0xH3C00 seq_cst, align 2
263*9fef09fdSYingwei Zheng // SAFE-NEXT:    [[TMP1:%.*]] = fadd half [[TMP0]], 0xH3C00
264*9fef09fdSYingwei Zheng // SAFE-NEXT:    ret half [[TMP1]]
265e108853aSMatt Arsenault //
266e108853aSMatt Arsenault // UNSAFE-LABEL: define dso_local half @test__Float16_pre_inc(
267e108853aSMatt Arsenault // UNSAFE-SAME: ) #[[ATTR0]] {
268e108853aSMatt Arsenault // UNSAFE-NEXT:  [[ENTRY:.*:]]
269e108853aSMatt Arsenault // UNSAFE-NEXT:    [[RETVAL:%.*]] = alloca half, align 2, addrspace(5)
270e108853aSMatt Arsenault // UNSAFE-NEXT:    [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
271*9fef09fdSYingwei Zheng // UNSAFE-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr addrspacecast (ptr addrspace(1) @test__Float16_pre_inc.n to ptr), half 0xH3C00 seq_cst, align 2, !amdgpu.no.fine.grained.memory [[META3]]
272*9fef09fdSYingwei Zheng // UNSAFE-NEXT:    [[TMP1:%.*]] = fadd half [[TMP0]], 0xH3C00
273*9fef09fdSYingwei Zheng // UNSAFE-NEXT:    ret half [[TMP1]]
274e108853aSMatt Arsenault //
275e108853aSMatt Arsenault _Float16 test__Float16_pre_inc()
276e108853aSMatt Arsenault {
277e108853aSMatt Arsenault     static _Atomic _Float16 n;
278e108853aSMatt Arsenault     return ++n;
279e108853aSMatt Arsenault }
280e108853aSMatt Arsenault //.
281e108853aSMatt Arsenault // UNSAFE: [[META3]] = !{}
282e108853aSMatt Arsenault //.
283e108853aSMatt Arsenault //// NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
284e108853aSMatt Arsenault // CHECK: {{.*}}
285