xref: /llvm-project/clang/test/CodeGen/atomic-ops-libcall.c (revision 5fdd094837c6d8437803ebf3ccc91c3d494a2ac8)
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
2 // RUN: %clang_cc1 -triple armv5e-none-linux-gnueabi -emit-llvm %s -o - | FileCheck %s
3 
4 enum memory_order {
5   memory_order_relaxed, memory_order_consume, memory_order_acquire,
6   memory_order_release, memory_order_acq_rel, memory_order_seq_cst
7 };
8 
9 // CHECK-LABEL: define dso_local ptr @test_c11_atomic_fetch_add_int_ptr(
10 // CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0:[0-9]+]] {
11 // CHECK-NEXT:  entry:
12 // CHECK-NEXT:    [[P_ADDR:%.*]] = alloca ptr, align 4
13 // CHECK-NEXT:    [[DOTATOMICTMP:%.*]] = alloca i32, align 4
14 // CHECK-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca ptr, align 4
15 // CHECK-NEXT:    store ptr [[P]], ptr [[P_ADDR]], align 4
16 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
17 // CHECK-NEXT:    store i32 12, ptr [[DOTATOMICTMP]], align 4
18 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
19 // CHECK-NEXT:    [[TMP2:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
20 // CHECK-NEXT:    store i32 [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
21 // CHECK-NEXT:    [[TMP3:%.*]] = load ptr, ptr [[ATOMIC_TEMP]], align 4
22 // CHECK-NEXT:    ret ptr [[TMP3]]
23 //
test_c11_atomic_fetch_add_int_ptr(_Atomic (int *)* p)24 int *test_c11_atomic_fetch_add_int_ptr(_Atomic(int *) *p) {
25   return __c11_atomic_fetch_add(p, 3, memory_order_seq_cst);
26 }
27 
28 // CHECK-LABEL: define dso_local ptr @test_c11_atomic_fetch_sub_int_ptr(
29 // CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
30 // CHECK-NEXT:  entry:
31 // CHECK-NEXT:    [[P_ADDR:%.*]] = alloca ptr, align 4
32 // CHECK-NEXT:    [[DOTATOMICTMP:%.*]] = alloca i32, align 4
33 // CHECK-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca ptr, align 4
34 // CHECK-NEXT:    store ptr [[P]], ptr [[P_ADDR]], align 4
35 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
36 // CHECK-NEXT:    store i32 20, ptr [[DOTATOMICTMP]], align 4
37 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
38 // CHECK-NEXT:    [[TMP2:%.*]] = atomicrmw sub ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
39 // CHECK-NEXT:    store i32 [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
40 // CHECK-NEXT:    [[TMP3:%.*]] = load ptr, ptr [[ATOMIC_TEMP]], align 4
41 // CHECK-NEXT:    ret ptr [[TMP3]]
42 //
test_c11_atomic_fetch_sub_int_ptr(_Atomic (int *)* p)43 int *test_c11_atomic_fetch_sub_int_ptr(_Atomic(int *) *p) {
44   return __c11_atomic_fetch_sub(p, 5, memory_order_seq_cst);
45 }
46 
47 // CHECK-LABEL: define dso_local i32 @test_c11_atomic_fetch_add_int(
48 // CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
49 // CHECK-NEXT:  entry:
50 // CHECK-NEXT:    [[P_ADDR:%.*]] = alloca ptr, align 4
51 // CHECK-NEXT:    [[DOTATOMICTMP:%.*]] = alloca i32, align 4
52 // CHECK-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
53 // CHECK-NEXT:    store ptr [[P]], ptr [[P_ADDR]], align 4
54 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
55 // CHECK-NEXT:    store i32 3, ptr [[DOTATOMICTMP]], align 4
56 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
57 // CHECK-NEXT:    [[TMP2:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
58 // CHECK-NEXT:    store i32 [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
59 // CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
60 // CHECK-NEXT:    ret i32 [[TMP3]]
61 //
test_c11_atomic_fetch_add_int(_Atomic (int)* p)62 int test_c11_atomic_fetch_add_int(_Atomic(int) *p) {
63   return __c11_atomic_fetch_add(p, 3, memory_order_seq_cst);
64 }
65 
66 // CHECK-LABEL: define dso_local i32 @test_c11_atomic_fetch_sub_int(
67 // CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
68 // CHECK-NEXT:  entry:
69 // CHECK-NEXT:    [[P_ADDR:%.*]] = alloca ptr, align 4
70 // CHECK-NEXT:    [[DOTATOMICTMP:%.*]] = alloca i32, align 4
71 // CHECK-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
72 // CHECK-NEXT:    store ptr [[P]], ptr [[P_ADDR]], align 4
73 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
74 // CHECK-NEXT:    store i32 5, ptr [[DOTATOMICTMP]], align 4
75 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
76 // CHECK-NEXT:    [[TMP2:%.*]] = atomicrmw sub ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
77 // CHECK-NEXT:    store i32 [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
78 // CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
79 // CHECK-NEXT:    ret i32 [[TMP3]]
80 //
test_c11_atomic_fetch_sub_int(_Atomic (int)* p)81 int test_c11_atomic_fetch_sub_int(_Atomic(int) *p) {
82   return __c11_atomic_fetch_sub(p, 5, memory_order_seq_cst);
83 }
84 
85 // CHECK-LABEL: define dso_local ptr @fp2a(
86 // CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
87 // CHECK-NEXT:  entry:
88 // CHECK-NEXT:    [[P_ADDR:%.*]] = alloca ptr, align 4
89 // CHECK-NEXT:    [[DOTATOMICTMP:%.*]] = alloca i32, align 4
90 // CHECK-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca ptr, align 4
91 // CHECK-NEXT:    store ptr [[P]], ptr [[P_ADDR]], align 4
92 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
93 // CHECK-NEXT:    store i32 4, ptr [[DOTATOMICTMP]], align 4
94 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
95 // CHECK-NEXT:    [[TMP2:%.*]] = atomicrmw sub ptr [[TMP0]], i32 [[TMP1]] monotonic, align 4
96 // CHECK-NEXT:    store i32 [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
97 // CHECK-NEXT:    [[TMP3:%.*]] = load ptr, ptr [[ATOMIC_TEMP]], align 4
98 // CHECK-NEXT:    ret ptr [[TMP3]]
99 //
fp2a(int ** p)100 int *fp2a(int **p) {
101   // Note, the GNU builtins do not multiply by sizeof(T)!
102   return __atomic_fetch_sub(p, 4, memory_order_relaxed);
103 }
104 
105 // CHECK-LABEL: define dso_local i32 @test_atomic_fetch_add(
106 // CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
107 // CHECK-NEXT:  entry:
108 // CHECK-NEXT:    [[P_ADDR:%.*]] = alloca ptr, align 4
109 // CHECK-NEXT:    [[DOTATOMICTMP:%.*]] = alloca i32, align 4
110 // CHECK-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
111 // CHECK-NEXT:    store ptr [[P]], ptr [[P_ADDR]], align 4
112 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
113 // CHECK-NEXT:    store i32 55, ptr [[DOTATOMICTMP]], align 4
114 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
115 // CHECK-NEXT:    [[TMP2:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
116 // CHECK-NEXT:    store i32 [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
117 // CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
118 // CHECK-NEXT:    ret i32 [[TMP3]]
119 //
test_atomic_fetch_add(int * p)120 int test_atomic_fetch_add(int *p) {
121   return __atomic_fetch_add(p, 55, memory_order_seq_cst);
122 }
123 
124 // CHECK-LABEL: define dso_local i32 @test_atomic_fetch_sub(
125 // CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
126 // CHECK-NEXT:  entry:
127 // CHECK-NEXT:    [[P_ADDR:%.*]] = alloca ptr, align 4
128 // CHECK-NEXT:    [[DOTATOMICTMP:%.*]] = alloca i32, align 4
129 // CHECK-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
130 // CHECK-NEXT:    store ptr [[P]], ptr [[P_ADDR]], align 4
131 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
132 // CHECK-NEXT:    store i32 55, ptr [[DOTATOMICTMP]], align 4
133 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
134 // CHECK-NEXT:    [[TMP2:%.*]] = atomicrmw sub ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
135 // CHECK-NEXT:    store i32 [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
136 // CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
137 // CHECK-NEXT:    ret i32 [[TMP3]]
138 //
test_atomic_fetch_sub(int * p)139 int test_atomic_fetch_sub(int *p) {
140   return __atomic_fetch_sub(p, 55, memory_order_seq_cst);
141 }
142 
143 // CHECK-LABEL: define dso_local i32 @test_atomic_fetch_and(
144 // CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
145 // CHECK-NEXT:  entry:
146 // CHECK-NEXT:    [[P_ADDR:%.*]] = alloca ptr, align 4
147 // CHECK-NEXT:    [[DOTATOMICTMP:%.*]] = alloca i32, align 4
148 // CHECK-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
149 // CHECK-NEXT:    store ptr [[P]], ptr [[P_ADDR]], align 4
150 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
151 // CHECK-NEXT:    store i32 55, ptr [[DOTATOMICTMP]], align 4
152 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
153 // CHECK-NEXT:    [[TMP2:%.*]] = atomicrmw and ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
154 // CHECK-NEXT:    store i32 [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
155 // CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
156 // CHECK-NEXT:    ret i32 [[TMP3]]
157 //
test_atomic_fetch_and(int * p)158 int test_atomic_fetch_and(int *p) {
159   return __atomic_fetch_and(p, 55, memory_order_seq_cst);
160 }
161 
162 // CHECK-LABEL: define dso_local i32 @test_atomic_fetch_or(
163 // CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
164 // CHECK-NEXT:  entry:
165 // CHECK-NEXT:    [[P_ADDR:%.*]] = alloca ptr, align 4
166 // CHECK-NEXT:    [[DOTATOMICTMP:%.*]] = alloca i32, align 4
167 // CHECK-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
168 // CHECK-NEXT:    store ptr [[P]], ptr [[P_ADDR]], align 4
169 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
170 // CHECK-NEXT:    store i32 55, ptr [[DOTATOMICTMP]], align 4
171 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
172 // CHECK-NEXT:    [[TMP2:%.*]] = atomicrmw or ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
173 // CHECK-NEXT:    store i32 [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
174 // CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
175 // CHECK-NEXT:    ret i32 [[TMP3]]
176 //
test_atomic_fetch_or(int * p)177 int test_atomic_fetch_or(int *p) {
178   return __atomic_fetch_or(p, 55, memory_order_seq_cst);
179 }
180 
181 // CHECK-LABEL: define dso_local i32 @test_atomic_fetch_xor(
182 // CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
183 // CHECK-NEXT:  entry:
184 // CHECK-NEXT:    [[P_ADDR:%.*]] = alloca ptr, align 4
185 // CHECK-NEXT:    [[DOTATOMICTMP:%.*]] = alloca i32, align 4
186 // CHECK-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
187 // CHECK-NEXT:    store ptr [[P]], ptr [[P_ADDR]], align 4
188 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
189 // CHECK-NEXT:    store i32 55, ptr [[DOTATOMICTMP]], align 4
190 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
191 // CHECK-NEXT:    [[TMP2:%.*]] = atomicrmw xor ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
192 // CHECK-NEXT:    store i32 [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
193 // CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
194 // CHECK-NEXT:    ret i32 [[TMP3]]
195 //
test_atomic_fetch_xor(int * p)196 int test_atomic_fetch_xor(int *p) {
197   return __atomic_fetch_xor(p, 55, memory_order_seq_cst);
198 }
199 
200 // CHECK-LABEL: define dso_local i32 @test_atomic_fetch_nand(
201 // CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
202 // CHECK-NEXT:  entry:
203 // CHECK-NEXT:    [[P_ADDR:%.*]] = alloca ptr, align 4
204 // CHECK-NEXT:    [[DOTATOMICTMP:%.*]] = alloca i32, align 4
205 // CHECK-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
206 // CHECK-NEXT:    store ptr [[P]], ptr [[P_ADDR]], align 4
207 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
208 // CHECK-NEXT:    store i32 55, ptr [[DOTATOMICTMP]], align 4
209 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
210 // CHECK-NEXT:    [[TMP2:%.*]] = atomicrmw nand ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
211 // CHECK-NEXT:    store i32 [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
212 // CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
213 // CHECK-NEXT:    ret i32 [[TMP3]]
214 //
test_atomic_fetch_nand(int * p)215 int test_atomic_fetch_nand(int *p) {
216   return __atomic_fetch_nand(p, 55, memory_order_seq_cst);
217 }
218 
219 // CHECK-LABEL: define dso_local i32 @test_atomic_add_fetch(
220 // CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
221 // CHECK-NEXT:  entry:
222 // CHECK-NEXT:    [[P_ADDR:%.*]] = alloca ptr, align 4
223 // CHECK-NEXT:    [[DOTATOMICTMP:%.*]] = alloca i32, align 4
224 // CHECK-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
225 // CHECK-NEXT:    store ptr [[P]], ptr [[P_ADDR]], align 4
226 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
227 // CHECK-NEXT:    store i32 55, ptr [[DOTATOMICTMP]], align 4
228 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
229 // CHECK-NEXT:    [[TMP2:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
230 // CHECK-NEXT:    [[TMP3:%.*]] = add i32 [[TMP2]], [[TMP1]]
231 // CHECK-NEXT:    store i32 [[TMP3]], ptr [[ATOMIC_TEMP]], align 4
232 // CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
233 // CHECK-NEXT:    ret i32 [[TMP4]]
234 //
test_atomic_add_fetch(int * p)235 int test_atomic_add_fetch(int *p) {
236   return __atomic_add_fetch(p, 55, memory_order_seq_cst);
237 }
238 
239 // CHECK-LABEL: define dso_local i32 @test_atomic_sub_fetch(
240 // CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
241 // CHECK-NEXT:  entry:
242 // CHECK-NEXT:    [[P_ADDR:%.*]] = alloca ptr, align 4
243 // CHECK-NEXT:    [[DOTATOMICTMP:%.*]] = alloca i32, align 4
244 // CHECK-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
245 // CHECK-NEXT:    store ptr [[P]], ptr [[P_ADDR]], align 4
246 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
247 // CHECK-NEXT:    store i32 55, ptr [[DOTATOMICTMP]], align 4
248 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
249 // CHECK-NEXT:    [[TMP2:%.*]] = atomicrmw sub ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
250 // CHECK-NEXT:    [[TMP3:%.*]] = sub i32 [[TMP2]], [[TMP1]]
251 // CHECK-NEXT:    store i32 [[TMP3]], ptr [[ATOMIC_TEMP]], align 4
252 // CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
253 // CHECK-NEXT:    ret i32 [[TMP4]]
254 //
test_atomic_sub_fetch(int * p)255 int test_atomic_sub_fetch(int *p) {
256   return __atomic_sub_fetch(p, 55, memory_order_seq_cst);
257 }
258 
259 // CHECK-LABEL: define dso_local i32 @test_atomic_and_fetch(
260 // CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
261 // CHECK-NEXT:  entry:
262 // CHECK-NEXT:    [[P_ADDR:%.*]] = alloca ptr, align 4
263 // CHECK-NEXT:    [[DOTATOMICTMP:%.*]] = alloca i32, align 4
264 // CHECK-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
265 // CHECK-NEXT:    store ptr [[P]], ptr [[P_ADDR]], align 4
266 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
267 // CHECK-NEXT:    store i32 55, ptr [[DOTATOMICTMP]], align 4
268 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
269 // CHECK-NEXT:    [[TMP2:%.*]] = atomicrmw and ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
270 // CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[TMP2]], [[TMP1]]
271 // CHECK-NEXT:    store i32 [[TMP3]], ptr [[ATOMIC_TEMP]], align 4
272 // CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
273 // CHECK-NEXT:    ret i32 [[TMP4]]
274 //
test_atomic_and_fetch(int * p)275 int test_atomic_and_fetch(int *p) {
276   return __atomic_and_fetch(p, 55, memory_order_seq_cst);
277 }
278 
279 // CHECK-LABEL: define dso_local i32 @test_atomic_or_fetch(
280 // CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
281 // CHECK-NEXT:  entry:
282 // CHECK-NEXT:    [[P_ADDR:%.*]] = alloca ptr, align 4
283 // CHECK-NEXT:    [[DOTATOMICTMP:%.*]] = alloca i32, align 4
284 // CHECK-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
285 // CHECK-NEXT:    store ptr [[P]], ptr [[P_ADDR]], align 4
286 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
287 // CHECK-NEXT:    store i32 55, ptr [[DOTATOMICTMP]], align 4
288 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
289 // CHECK-NEXT:    [[TMP2:%.*]] = atomicrmw or ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
290 // CHECK-NEXT:    [[TMP3:%.*]] = or i32 [[TMP2]], [[TMP1]]
291 // CHECK-NEXT:    store i32 [[TMP3]], ptr [[ATOMIC_TEMP]], align 4
292 // CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
293 // CHECK-NEXT:    ret i32 [[TMP4]]
294 //
test_atomic_or_fetch(int * p)295 int test_atomic_or_fetch(int *p) {
296   return __atomic_or_fetch(p, 55, memory_order_seq_cst);
297 }
298 
299 // CHECK-LABEL: define dso_local i32 @test_atomic_xor_fetch(
300 // CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
301 // CHECK-NEXT:  entry:
302 // CHECK-NEXT:    [[P_ADDR:%.*]] = alloca ptr, align 4
303 // CHECK-NEXT:    [[DOTATOMICTMP:%.*]] = alloca i32, align 4
304 // CHECK-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
305 // CHECK-NEXT:    store ptr [[P]], ptr [[P_ADDR]], align 4
306 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
307 // CHECK-NEXT:    store i32 55, ptr [[DOTATOMICTMP]], align 4
308 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
309 // CHECK-NEXT:    [[TMP2:%.*]] = atomicrmw xor ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
310 // CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP2]], [[TMP1]]
311 // CHECK-NEXT:    store i32 [[TMP3]], ptr [[ATOMIC_TEMP]], align 4
312 // CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
313 // CHECK-NEXT:    ret i32 [[TMP4]]
314 //
test_atomic_xor_fetch(int * p)315 int test_atomic_xor_fetch(int *p) {
316   return __atomic_xor_fetch(p, 55, memory_order_seq_cst);
317 }
318 
319 // CHECK-LABEL: define dso_local i32 @test_atomic_nand_fetch(
320 // CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
321 // CHECK-NEXT:  entry:
322 // CHECK-NEXT:    [[P_ADDR:%.*]] = alloca ptr, align 4
323 // CHECK-NEXT:    [[DOTATOMICTMP:%.*]] = alloca i32, align 4
324 // CHECK-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
325 // CHECK-NEXT:    store ptr [[P]], ptr [[P_ADDR]], align 4
326 // CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
327 // CHECK-NEXT:    store i32 55, ptr [[DOTATOMICTMP]], align 4
328 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
329 // CHECK-NEXT:    [[TMP2:%.*]] = atomicrmw nand ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
330 // CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[TMP2]], [[TMP1]]
331 // CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP3]], -1
332 // CHECK-NEXT:    store i32 [[TMP4]], ptr [[ATOMIC_TEMP]], align 4
333 // CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
334 // CHECK-NEXT:    ret i32 [[TMP5]]
335 //
test_atomic_nand_fetch(int * p)336 int test_atomic_nand_fetch(int *p) {
337   return __atomic_nand_fetch(p, 55, memory_order_seq_cst);
338 }
339