xref: /llvm-project/clang/test/OpenMP/atomic_capture_codegen.cpp (revision 94473f4db6a6f5f12d7c4081455b5b596094eac5)
1 
2 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp -fopenmp-version=50 -x c -emit-llvm %s -o - | FileCheck %s
3 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
4 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix=CHECK --check-prefix=CHECK-50 %s
5 
6 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp-simd -fopenmp-version=50 -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
7 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
8 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
9 
10 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp -x c -emit-llvm %s -o - | FileCheck %s
11 // RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
12 // RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
13 
14 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp-simd -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
15 // RUN: %clang_cc1 -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
16 // RUN: %clang_cc1 -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
17 // SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
18 // expected-no-diagnostics
19 #ifndef HEADER
20 #define HEADER
21 
22 _Bool bv, bx;
23 char cv, cx;
24 unsigned char ucv, ucx;
25 short sv, sx;
26 unsigned short usv, usx;
27 int iv, ix;
28 unsigned int uiv, uix;
29 long lv, lx;
30 unsigned long ulv, ulx;
31 long long llv, llx;
32 unsigned long long ullv, ullx;
33 float fv, fx;
34 double dv, dx;
35 long double ldv, ldx;
36 _Complex int civ, cix;
37 _Complex float cfv, cfx;
38 _Complex double cdv, cdx;
39 
40 typedef int int4 __attribute__((__vector_size__(16)));
41 int4 int4x;
42 
43 struct BitFields {
44   int : 32;
45   int a : 31;
46 } bfx;
47 
48 struct BitFields_packed {
49   int : 32;
50   int a : 31;
51 } __attribute__ ((__packed__)) bfx_packed;
52 
53 struct BitFields2 {
54   int : 31;
55   int a : 1;
56 } bfx2;
57 
58 struct BitFields2_packed {
59   int : 31;
60   int a : 1;
61 } __attribute__ ((__packed__)) bfx2_packed;
62 
63 struct BitFields3 {
64   int : 11;
65   int a : 14;
66 } bfx3;
67 
68 struct BitFields3_packed {
69   int : 11;
70   int a : 14;
71 } __attribute__ ((__packed__)) bfx3_packed;
72 
73 struct BitFields4 {
74   short : 16;
75   int a: 1;
76   long b : 7;
77 } bfx4;
78 
79 struct BitFields4_packed {
80   short : 16;
81   int a: 1;
82   long b : 7;
83 } __attribute__ ((__packed__)) bfx4_packed;
84 
85 typedef float float2 __attribute__((ext_vector_type(2)));
86 float2 float2x;
87 
88 // Register "0" is currently an invalid register for global register variables.
89 // Use "esp" instead of "0".
90 // register int rix __asm__("0");
91 register int rix __asm__("esp");
92 
93 int main(void) {
94 // CHECK: [[PREV:%.+]] = atomicrmw add ptr @{{.+}}, i8 1 monotonic, align 1
95 // CHECK: store i8 [[PREV]], ptr @{{.+}},
96 #pragma omp atomic capture
97   bv = bx++;
98 // CHECK: atomicrmw add ptr @{{.+}}, i8 1 monotonic, align 1
99 // CHECK: add nsw i32 %{{.+}}, 1
100 // CHECK: store i8 %{{.+}}, ptr @{{.+}},
101 #pragma omp atomic capture
102   cv = ++cx;
103 // CHECK: [[PREV:%.+]] = atomicrmw sub ptr @{{.+}}, i8 1 monotonic, align 1
104 // CHECK: store i8 [[PREV]], ptr @{{.+}},
105 #pragma omp atomic capture
106   ucv = ucx--;
107 // CHECK: atomicrmw sub ptr @{{.+}}, i16 1 monotonic, align 2
108 // CHECK: sub nsw i32 %{{.+}}, 1
109 // CHECK: store i16 %{{.+}}, ptr @{{.+}},
110 #pragma omp atomic capture
111   sv = --sx;
112 // CHECK: [[USV:%.+]] = load i16, ptr @{{.+}},
113 // CHECK: [[EXPR:%.+]] = zext i16 [[USV]] to i32
114 // CHECK: [[X:%.+]] = load atomic i16, ptr [[X_ADDR:@.+]] monotonic, align 2
115 // CHECK: br label %[[CONT:.+]]
116 // CHECK: [[CONT]]
117 // CHECK: [[EXPECTED:%.+]] = phi i16 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
118 // CHECK: [[CONV:%.+]] = zext i16 [[EXPECTED]] to i32
119 // CHECK: [[ADD:%.+]] = add nsw i32 [[CONV]], [[EXPR]]
120 // CHECK: [[DESIRED_CALC:%.+]] = trunc i32 [[ADD]] to i16
121 // CHECK: store i16 [[DESIRED_CALC]], ptr [[TEMP:%.+]],
122 // CHECK: [[DESIRED:%.+]] = load i16, ptr [[TEMP]],
123 // CHECK: [[RES:%.+]] = cmpxchg ptr [[X_ADDR]], i16 [[EXPECTED]], i16 [[DESIRED]] monotonic monotonic, align 2
124 // CHECK: [[OLD_X]] = extractvalue { i16, i1 } [[RES]], 0
125 // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i16, i1 } [[RES]], 1
126 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
127 // CHECK: [[EXIT]]
128 // CHECK: store i16 [[DESIRED_CALC]], ptr @{{.+}},
129 #pragma omp atomic capture
130   sv = usx += usv;
131 // CHECK: [[EXPR:%.+]] = load i32, ptr @{{.+}},
132 // CHECK: [[X:%.+]] = load atomic i32, ptr [[X_ADDR:@.+]] monotonic, align 4
133 // CHECK: br label %[[CONT:.+]]
134 // CHECK: [[CONT]]
135 // CHECK: [[EXPECTED:%.+]] = phi i32 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
136 // CHECK: [[DESIRED_CALC:%.+]] = mul nsw i32 [[EXPECTED]], [[EXPR]]
137 // CHECK: store i32 [[DESIRED_CALC]], ptr [[TEMP:%.+]],
138 // CHECK: [[DESIRED:%.+]] = load i32, ptr [[TEMP]],
139 // CHECK: [[RES:%.+]] = cmpxchg ptr [[X_ADDR]], i32 [[EXPECTED]], i32 [[DESIRED]] monotonic monotonic, align 4
140 // CHECK: [[OLD_X]] = extractvalue { i32, i1 } [[RES]], 0
141 // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1
142 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
143 // CHECK: [[EXIT]]
144 // CHECK: store i32 [[DESIRED_CALC]], ptr @{{.+}},
145 #pragma omp atomic capture
146   uiv = ix *= iv;
147 // CHECK: [[EXPR:%.+]] = load i32, ptr @{{.+}},
148 // CHECK: [[PREV:%.+]] = atomicrmw sub ptr @{{.+}}, i32 [[EXPR]] monotonic, align 4
149 // CHECK: store i32 [[PREV]], ptr @{{.+}},
150 #pragma omp atomic capture
151   {iv = uix; uix -= uiv;}
152 // CHECK: [[EXPR:%.+]] = load i32, ptr @{{.+}},
153 // CHECK: [[X:%.+]] = load atomic i32, ptr [[X_ADDR:@.+]] monotonic, align 4
154 // CHECK: br label %[[CONT:.+]]
155 // CHECK: [[CONT]]
156 // CHECK: [[EXPECTED:%.+]] = phi i32 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
157 // CHECK: [[DESIRED_CALC:%.+]] = shl i32 [[EXPECTED]], [[EXPR]]
158 // CHECK: store i32 [[DESIRED_CALC]], ptr [[TEMP:%.+]],
159 // CHECK: [[DESIRED:%.+]] = load i32, ptr [[TEMP]],
160 // CHECK: [[RES:%.+]] = cmpxchg ptr [[X_ADDR]], i32 [[EXPECTED]], i32 [[DESIRED]] monotonic monotonic, align 4
161 // CHECK: [[OLD_X]] = extractvalue { i32, i1 } [[RES]], 0
162 // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1
163 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
164 // CHECK: [[EXIT]]
165 // CHECK: store i32 [[DESIRED_CALC]], ptr @{{.+}},
166 #pragma omp atomic capture
167   {ix <<= iv; uiv = ix;}
168 // CHECK: [[EXPR:%.+]] = load i32, ptr @{{.+}},
169 // CHECK: [[X:%.+]] = load atomic i32, ptr [[X_ADDR:@.+]] monotonic, align 4
170 // CHECK: br label %[[CONT:.+]]
171 // CHECK: [[CONT]]
172 // CHECK: [[EXPECTED:%.+]] = phi i32 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
173 // CHECK: [[DESIRED_CALC:%.+]] = lshr i32 [[EXPECTED]], [[EXPR]]
174 // CHECK: store i32 [[DESIRED_CALC]], ptr [[TEMP:%.+]],
175 // CHECK: [[DESIRED:%.+]] = load i32, ptr [[TEMP]],
176 // CHECK: [[RES:%.+]] = cmpxchg ptr [[X_ADDR]], i32 [[EXPECTED]], i32 [[DESIRED]] monotonic monotonic, align 4
177 // CHECK: [[OLD_X]] = extractvalue { i32, i1 } [[RES]], 0
178 // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1
179 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
180 // CHECK: [[EXIT]]
181 // CHECK: store i32 [[DESIRED_CALC]], ptr @{{.+}},
182 #pragma omp atomic capture
183   iv = uix >>= uiv;
184 // CHECK: [[EXPR:%.+]] = load i64, ptr @{{.+}},
185 // CHECK: [[X:%.+]] = load atomic i64, ptr [[X_ADDR:@.+]] monotonic, align 8
186 // CHECK: br label %[[CONT:.+]]
187 // CHECK: [[CONT]]
188 // CHECK: [[EXPECTED:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
189 // CHECK: [[DESIRED:%.+]] = sdiv i64 [[EXPECTED]], [[EXPR]]
190 // CHECK: store i64 [[DESIRED]], ptr [[TEMP:%.+]],
191 // CHECK: [[DESIRED:%.+]] = load i64, ptr [[TEMP]],
192 // CHECK: [[RES:%.+]] = cmpxchg ptr [[X_ADDR]], i64 [[EXPECTED]], i64 [[DESIRED]] monotonic monotonic, align 8
193 // CHECK: [[OLD_X]] = extractvalue { i64, i1 } [[RES]], 0
194 // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i64, i1 } [[RES]], 1
195 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
196 // CHECK: [[EXIT]]
197 // CHECK: store i64 [[EXPECTED]], ptr @{{.+}},
198 #pragma omp atomic capture
199   {ulv = lx; lx /= lv;}
200 // CHECK: [[EXPR:%.+]] = load i64, ptr @{{.+}},
201 // CHECK: [[OLD:%.+]] = atomicrmw and ptr @{{.+}}, i64 [[EXPR]] monotonic, align 8
202 // CHECK: [[DESIRED:%.+]] = and i64 [[OLD]], [[EXPR]]
203 // CHECK:  store i64 [[DESIRED]], ptr @{{.+}},
204 #pragma omp atomic capture
205   {ulx &= ulv; lv = ulx;}
206 // CHECK: [[EXPR:%.+]] = load i64, ptr @{{.+}},
207 // CHECK: [[OLD:%.+]] = atomicrmw xor ptr @{{.+}}, i64 [[EXPR]] monotonic, align 8
208 // CHECK: [[DESIRED:%.+]] = xor i64 [[OLD]], [[EXPR]]
209 // CHECK:  store i64 [[DESIRED]], ptr @{{.+}},
210 #pragma omp atomic capture
211   ullv = llx ^= llv;
212 // CHECK: [[EXPR:%.+]] = load i64, ptr @{{.+}},
213 // CHECK: [[OLD:%.+]] = atomicrmw or ptr @{{.+}}, i64 [[EXPR]] monotonic, align 8
214 // CHECK: [[DESIRED:%.+]] = or i64 [[OLD]], [[EXPR]]
215 // CHECK:  store i64 [[DESIRED]], ptr @{{.+}},
216 #pragma omp atomic capture
217   llv = ullx |= ullv;
218 // CHECK: [[EXPR:%.+]] = load float, ptr @{{.+}},
219 // CHECK: [[OLD:%.+]] = atomicrmw fadd ptr @{{.+}}, float [[EXPR]] monotonic, align 4
220 // CHECK: [[ADD:%.+]] = fadd float [[OLD]], [[EXPR]]
221 // CHECK: [[CAST:%.+]] = fpext float [[ADD]] to double
222 // CHECK: store double [[CAST]], ptr @{{.+}},
223 #pragma omp atomic capture
224   dv = fx = fx + fv;
225 // CHECK: [[EXPR:%.+]] = load double, ptr @{{.+}},
226 // CHECK: [[X:%.+]] = load atomic i64, ptr [[X_ADDR:@.+]] monotonic, align 8
227 // CHECK: br label %[[CONT:.+]]
228 // CHECK: [[CONT]]
229 // CHECK: [[EXPECTED:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
230 // CHECK: [[OLD:%.+]] = bitcast i64 [[EXPECTED]] to double
231 // CHECK: [[SUB:%.+]] = fsub double [[EXPR]], [[OLD]]
232 // CHECK: store double [[SUB]], ptr [[TEMP:%.+]],
233 // CHECK: [[DESIRED:%.+]] = load i64, ptr [[TEMP]],
234 // CHECK: [[RES:%.+]] = cmpxchg ptr [[X_ADDR]], i64 [[EXPECTED]], i64 [[DESIRED]] monotonic monotonic, align 8
235 // CHECK: [[OLD_X:%.+]] = extractvalue { i64, i1 } [[RES]], 0
236 // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i64, i1 } [[RES]], 1
237 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
238 // CHECK: [[EXIT]]
239 // CHECK: [[CAST:%.+]] = fptrunc double [[OLD]] to float
240 // CHECK: store float [[CAST]], ptr @{{.+}},
241 #pragma omp atomic capture
242   {fv = dx; dx = dv - dx;}
243 // CHECK: [[EXPR:%.+]] = load x86_fp80, ptr @{{.+}},
244 // CHECK: [[X:%.+]] = load atomic i128, ptr [[X_ADDR:@.+]] monotonic, align 16
245 // CHECK: br label %[[CONT:.+]]
246 // CHECK: [[CONT]]
247 // CHECK: [[EXPECTED:%.+]] = phi i128 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
248 // CHECK: store i128 [[EXPECTED]], ptr [[TEMP:%.+]]
249 // CHECK: store i128 [[EXPECTED]], ptr [[TEMP1:%.+]]
250 // CHECK: [[OLD:%.+]] = load x86_fp80, ptr [[TEMP1]]
251 // CHECK: [[MUL:%.+]] = fmul x86_fp80 [[OLD]], [[EXPR]]
252 // CHECK: store x86_fp80 [[MUL]], ptr [[TEMP]]
253 // CHECK: [[DESIRED:%.+]] = load i128, ptr [[TEMP]]
254 // CHECK: [[RES:%.+]] = cmpxchg ptr [[X_ADDR]], i128 [[EXPECTED]], i128 [[DESIRED]] monotonic monotonic, align 16
255 // CHECK: [[OLD_X:%.+]] = extractvalue { i128, i1 } [[RES]], 0
256 // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i128, i1 } [[RES]], 1
257 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
258 // CHECK: [[EXIT]]
259 // CHECK: [[CAST:%.+]] = fptrunc x86_fp80 [[MUL]] to double
260 // CHECK: store double [[CAST]], ptr @{{.+}},
261 #pragma omp atomic capture
262   {ldx = ldx * ldv; dv = ldx;}
263 // CHECK: [[EXPR_RE:%.+]] = load i32, ptr @{{.+}}
264 // CHECK: [[EXPR_IM:%.+]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @{{.+}}, i32 0, i32 1)
265 // CHECK: call void @__atomic_load(i64 noundef 8, ptr noundef [[X_ADDR:@.+]], ptr noundef [[EXPECTED_ADDR:%.+]], i32 noundef 0)
266 // CHECK: br label %[[CONT:.+]]
267 // CHECK: [[CONT]]
268 // CHECK: [[LD_RE_ADDR:%.+]] = getelementptr inbounds nuw { i32, i32 }, ptr [[EXPECTED_ADDR]], i32 0, i32 0
269 // CHECK: [[LD_RE:%.+]] = load i32, ptr [[LD_RE_ADDR]]
270 // CHECK: [[LD_IM_ADDR:%.+]] = getelementptr inbounds nuw { i32, i32 }, ptr [[EXPECTED_ADDR]], i32 0, i32 1
271 // CHECK: [[LD_IM:%.+]] = load i32, ptr [[LD_IM_ADDR]]
272 // <Skip checks for complex calculations>
273 // CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds nuw { i32, i32 }, ptr [[DESIRED_ADDR:%.+]], i32 0, i32 0
274 // CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds nuw { i32, i32 }, ptr [[DESIRED_ADDR]], i32 0, i32 1
275 // CHECK: store i32 [[NEW_RE:%.+]], ptr [[X_RE_ADDR]]
276 // CHECK: store i32 [[NEW_IM:%.+]], ptr [[X_IM_ADDR]]
277 // CHECK: [[SUCCESS_FAIL:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 noundef 8, ptr noundef [[X_ADDR]], ptr noundef [[EXPECTED_ADDR]], ptr noundef [[DESIRED_ADDR]], i32 noundef 0, i32 noundef 0)
278 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
279 // CHECK: [[EXIT]]
280 // CHECK: [[RE_CAST:%.+]] = sitofp i32 [[NEW_RE]] to float
281 // CHECK: [[IM_CAST:%.+]] = sitofp i32 [[NEW_IM]] to float
282 // CHECK: store float [[RE_CAST]], ptr @{{.+}},
283 // CHECK: store float [[IM_CAST]], ptr getelementptr inbounds nuw ({ float, float }, ptr @{{.+}}, i32 0, i32 1),
284 #pragma omp atomic capture
285   cfv = cix = civ / cix;
286 // CHECK: [[EXPR_RE:%.+]] = load float, ptr @{{.+}}
287 // CHECK: [[EXPR_IM:%.+]] = load float, ptr getelementptr inbounds nuw ({ float, float }, ptr @{{.+}}, i32 0, i32 1)
288 // CHECK: call void @__atomic_load(i64 noundef 8, ptr noundef [[X_ADDR:@.+]], ptr noundef [[EXPECTED_ADDR:%.+]], i32 noundef 0)
289 // CHECK: br label %[[CONT:.+]]
290 // CHECK: [[CONT]]
291 // CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds nuw { float, float }, ptr [[EXPECTED_ADDR]], i32 0, i32 0
292 // CHECK: [[X_RE_OLD:%.+]] = load float, ptr [[X_RE_ADDR]]
293 // CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds nuw { float, float }, ptr [[EXPECTED_ADDR]], i32 0, i32 1
294 // CHECK: [[X_IM_OLD:%.+]] = load float, ptr [[X_IM_ADDR]]
295 // <Skip checks for complex calculations>
296 // CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds nuw { float, float }, ptr [[DESIRED_ADDR:%.+]], i32 0, i32 0
297 // CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds nuw { float, float }, ptr [[DESIRED_ADDR]], i32 0, i32 1
298 // CHECK: store float [[NEW_RE:%.+]], ptr [[X_RE_ADDR]]
299 // CHECK: store float [[NEW_IM:%.+]], ptr [[X_IM_ADDR]]
300 // CHECK: [[SUCCESS_FAIL:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 noundef 8, ptr noundef [[X_ADDR]], ptr noundef [[EXPECTED_ADDR]], ptr noundef [[DESIRED_ADDR]], i32 noundef 0, i32 noundef 0)
301 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
302 // CHECK: [[EXIT]]
303 // CHECK: [[RE_CAST:%.+]] = fptosi float [[X_RE_OLD]] to i32
304 // CHECK: [[IM_CAST:%.+]] = fptosi float [[X_IM_OLD]] to i32
305 // CHECK: store i32 [[RE_CAST]], ptr @{{.+}},
306 // CHECK: store i32 [[IM_CAST]], ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @{{.+}}, i32 0, i32 1),
307 #pragma omp atomic capture
308   {civ = cfx; cfx = cfv + cfx;}
309 // CHECK: [[EXPR_RE:%.+]] = load double, ptr @{{.+}}
310 // CHECK: [[EXPR_IM:%.+]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @{{.+}}, i32 0, i32 1)
311 // CHECK: call void @__atomic_load(i64 noundef 16, ptr noundef [[X_ADDR:@.+]], ptr noundef [[EXPECTED_ADDR:%.+]], i32 noundef 5)
312 // CHECK: br label %[[CONT:.+]]
313 // CHECK: [[CONT]]
314 // CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds nuw { double, double }, ptr [[EXPECTED_ADDR]], i32 0, i32 0
315 // CHECK: [[X_RE:%.+]] = load double, ptr [[X_RE_ADDR]]
316 // CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds nuw { double, double }, ptr [[EXPECTED_ADDR]], i32 0, i32 1
317 // CHECK: [[X_IM:%.+]] = load double, ptr [[X_IM_ADDR]]
318 // <Skip checks for complex calculations>
319 // CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds nuw { double, double }, ptr [[DESIRED_ADDR:%.+]], i32 0, i32 0
320 // CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds nuw { double, double }, ptr [[DESIRED_ADDR]], i32 0, i32 1
321 // CHECK: store double [[NEW_RE:%.+]], ptr [[X_RE_ADDR]]
322 // CHECK: store double [[NEW_IM:%.+]], ptr [[X_IM_ADDR]]
323 // CHECK: [[SUCCESS_FAIL:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 noundef 16, ptr noundef [[X_ADDR]], ptr noundef [[EXPECTED_ADDR]], ptr noundef [[DESIRED_ADDR]], i32 noundef 5, i32 noundef 5)
324 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
325 // CHECK: [[EXIT]]
326 // CHECK: [[RE_CAST:%.+]] = fptrunc double [[NEW_RE]] to float
327 // CHECK: [[IM_CAST:%.+]] = fptrunc double [[NEW_IM]] to float
328 // CHECK: store float [[RE_CAST]], ptr @{{.+}},
329 // CHECK: store float [[IM_CAST]], ptr getelementptr inbounds nuw ({ float, float }, ptr @{{.+}}, i32 0, i32 1),
330 // CHECK-50: call{{.*}} @__kmpc_flush(
331 #pragma omp atomic capture seq_cst
332   {cdx = cdx - cdv; cfv = cdx;}
333 // CHECK: [[BV:%.+]] = load i8, ptr @{{.+}}
334 // CHECK: [[BOOL:%.+]] = trunc i8 [[BV]] to i1
335 // CHECK: [[EXPR:%.+]] = zext i1 [[BOOL]] to i64
336 // CHECK: [[OLD:%.+]] = atomicrmw and ptr @{{.+}}, i64 [[EXPR]] monotonic, align 8
337 // CHECK: [[DESIRED:%.+]] = and i64 [[OLD]], [[EXPR]]
338 // CHECK: store i64 [[DESIRED]], ptr @{{.+}},
339 #pragma omp atomic capture
340   ulv = ulx = ulx & bv;
341 // CHECK: [[CV:%.+]]  = load i8, ptr @{{.+}}, align 1
342 // CHECK: [[EXPR:%.+]] = sext i8 [[CV]] to i32
343 // CHECK: [[X:%.+]] = load atomic i8, ptr [[BX_ADDR:@.+]] monotonic, align 1
344 // CHECK: br label %[[CONT:.+]]
345 // CHECK: [[CONT]]
346 // CHECK: [[EXPECTED:%.+]] = phi i8 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
347 // CHECK: [[OLD_BOOL:%.+]] = trunc i8 [[EXPECTED]] to i1
348 // CHECK: [[X_RVAL:%.+]] = zext i1 [[OLD_BOOL]] to i32
349 // CHECK: [[AND:%.+]] = and i32 [[EXPR]], [[X_RVAL]]
350 // CHECK: [[CAST:%.+]] = icmp ne i32 [[AND]], 0
351 // CHECK: [[NEW:%.+]] = zext i1 [[CAST]] to i8
352 // CHECK: store i8 [[NEW]], ptr [[TEMP:%.+]],
353 // CHECK: [[DESIRED:%.+]] = load i8, ptr [[TEMP]],
354 // CHECK: [[RES:%.+]] = cmpxchg ptr [[BX_ADDR]], i8 [[EXPECTED]], i8 [[DESIRED]] monotonic monotonic, align 1
355 // CHECK: [[OLD:%.+]] = extractvalue { i8, i1 } [[RES]], 0
356 // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i8, i1 } [[RES]], 1
357 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
358 // CHECK: [[EXIT]]
359 // CHECK: [[OLD_I8:%.+]] = zext i1 [[OLD_BOOL]] to i8
360 // CHECK: store i8 [[OLD_I8]], ptr @{{.+}},
361 #pragma omp atomic capture
362   {bv = bx; bx = cv & bx;}
363 // CHECK: [[UCV:%.+]]  = load i8, ptr @{{.+}},
364 // CHECK: [[EXPR:%.+]] = zext i8 [[UCV]] to i32
365 // CHECK: [[X:%.+]] = load atomic i8, ptr [[CX_ADDR:@.+]] seq_cst, align 1
366 // CHECK: br label %[[CONT:.+]]
367 // CHECK: [[CONT]]
368 // CHECK: [[EXPECTED:%.+]] = phi i8 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
369 // CHECK: [[X_RVAL:%.+]] = sext i8 [[EXPECTED]] to i32
370 // CHECK: [[ASHR:%.+]] = ashr i32 [[X_RVAL]], [[EXPR]]
371 // CHECK: [[NEW:%.+]] = trunc i32 [[ASHR]] to i8
372 // CHECK: store i8 [[NEW]], ptr [[TEMP:%.+]],
373 // CHECK: [[DESIRED:%.+]] = load i8, ptr [[TEMP]],
374 // CHECK: [[RES:%.+]] = cmpxchg ptr [[CX_ADDR]], i8 [[EXPECTED]], i8 [[DESIRED]] seq_cst seq_cst, align 1
375 // CHECK: [[OLD_X:%.+]] = extractvalue { i8, i1 } [[RES]], 0
376 // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i8, i1 } [[RES]], 1
377 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
378 // CHECK: [[EXIT]]
379 // CHECK: store i8 [[NEW]], ptr @{{.+}},
380 // CHECK-50: call{{.*}} @__kmpc_flush(
381 #pragma omp atomic capture, seq_cst
382   {cx = cx >> ucv; cv = cx;}
383 // CHECK: [[SV:%.+]]  = load i16, ptr @{{.+}},
384 // CHECK: [[EXPR:%.+]] = sext i16 [[SV]] to i32
385 // CHECK: [[X:%.+]] = load atomic i64, ptr [[ULX_ADDR:@.+]] monotonic, align 8
386 // CHECK: br label %[[CONT:.+]]
387 // CHECK: [[CONT]]
388 // CHECK: [[EXPECTED:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
389 // CHECK: [[X_RVAL:%.+]] = trunc i64 [[EXPECTED]] to i32
390 // CHECK: [[SHL:%.+]] = shl i32 [[EXPR]], [[X_RVAL]]
391 // CHECK: [[NEW:%.+]] = sext i32 [[SHL]] to i64
392 // CHECK: store i64 [[NEW]], ptr [[TEMP:%.+]],
393 // CHECK: [[DESIRED:%.+]] = load i64, ptr [[TEMP]],
394 // CHECK: [[RES:%.+]] = cmpxchg ptr [[ULX_ADDR]], i64 [[EXPECTED]], i64 [[DESIRED]] monotonic monotonic, align 8
395 // CHECK: [[OLD_X:%.+]] = extractvalue { i64, i1 } [[RES]], 0
396 // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i64, i1 } [[RES]], 1
397 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
398 // CHECK: [[EXIT]]
399 // CHECK: store i64 [[NEW]], ptr @{{.+}},
400 #pragma omp atomic capture
401   ulv = ulx = sv << ulx;
402 // CHECK: [[USV:%.+]]  = load i16, ptr @{{.+}},
403 // CHECK: [[EXPR:%.+]] = zext i16 [[USV]] to i64
404 // CHECK: [[X:%.+]] = load atomic i64, ptr [[LX_ADDR:@.+]] monotonic, align 8
405 // CHECK: br label %[[CONT:.+]]
406 // CHECK: [[CONT]]
407 // CHECK: [[EXPECTED:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
408 // CHECK: [[DESIRED:%.+]] = srem i64 [[EXPECTED]], [[EXPR]]
409 // CHECK: store i64 [[DESIRED]], ptr [[TEMP:%.+]],
410 // CHECK: [[DESIRED:%.+]] = load i64, ptr [[TEMP]],
411 // CHECK: [[RES:%.+]] = cmpxchg ptr [[LX_ADDR]], i64 [[EXPECTED]], i64 [[DESIRED]] monotonic monotonic, align 8
412 // CHECK: [[OLD_X:%.+]] = extractvalue { i64, i1 } [[RES]], 0
413 // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i64, i1 } [[RES]], 1
414 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
415 // CHECK: [[EXIT]]
416 // CHECK: store i64 [[EXPECTED]], ptr @{{.+}},
417 #pragma omp atomic capture
418   {lv = lx; lx = lx % usv;}
419 // CHECK: [[EXPR:%.+]] = load i32, ptr @{{.+}}
420 // CHECK: [[OLD:%.+]] = atomicrmw or ptr @{{.+}}, i32 [[EXPR]] seq_cst, align 4
421 // CHECK: [[DESIRED:%.+]] = or i32 [[EXPR]], [[OLD]]
422 // CHECK: store i32 [[DESIRED]], ptr @{{.+}},
423 // CHECK-50: call{{.*}} @__kmpc_flush(
424 #pragma omp atomic seq_cst, capture
425   {uix = iv | uix; uiv = uix;}
426 // CHECK: [[EXPR:%.+]] = load i32, ptr @{{.+}}
427 // CHECK: [[OLD:%.+]] = atomicrmw and ptr @{{.+}}, i32 [[EXPR]] monotonic, align 4
428 // CHECK: [[DESIRED:%.+]] = and i32 [[OLD]], [[EXPR]]
429 // CHECK: store i32 [[DESIRED]], ptr @{{.+}},
430 #pragma omp atomic capture
431   iv = ix = ix & uiv;
432 // CHECK: [[EXPR:%.+]] = load i64, ptr @{{.+}},
433 // CHECK: call void @__atomic_load(i64 noundef 8, ptr noundef [[X_ADDR:@.+]], ptr noundef [[EXPECTED_ADDR:%.+]], i32 noundef 0)
434 // CHECK: br label %[[CONT:.+]]
435 // CHECK: [[CONT]]
436 // CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds nuw { i32, i32 }, ptr [[EXPECTED_ADDR]], i32 0, i32 0
437 // CHECK: [[OLD_RE:%.+]] = load i32, ptr [[X_RE_ADDR]]
438 // CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds nuw { i32, i32 }, ptr [[EXPECTED_ADDR]], i32 0, i32 1
439 // CHECK: [[OLD_IM:%.+]] = load i32, ptr [[X_IM_ADDR]]
440 // <Skip checks for complex calculations>
441 // CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds nuw { i32, i32 }, ptr [[DESIRED_ADDR:%.+]], i32 0, i32 0
442 // CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds nuw { i32, i32 }, ptr [[DESIRED_ADDR]], i32 0, i32 1
443 // CHECK: store i32 %{{.+}}, ptr [[X_RE_ADDR]]
444 // CHECK: store i32 %{{.+}}, ptr [[X_IM_ADDR]]
445 // CHECK: [[SUCCESS_FAIL:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 noundef 8, ptr noundef [[X_ADDR]], ptr noundef [[EXPECTED_ADDR]], ptr noundef [[DESIRED_ADDR]], i32 noundef 0, i32 noundef 0)
446 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
447 // CHECK: [[EXIT]]
448 // CHECK: store i32 [[OLD_RE]], ptr @{{.+}},
449 // CHECK: store i32 [[OLD_IM]], ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @{{.+}}, i32 0, i32 1),
450 #pragma omp atomic capture
451   {civ = cix; cix = lv + cix;}
452 // CHECK: [[ULV:%.+]] = load i64, ptr @{{.+}},
453 // CHECK: [[EXPR:%.+]] = uitofp i64 [[ULV]] to float
454 // CHECK: [[X:%.+]] = load atomic i32, ptr [[X_ADDR:@.+]] monotonic, align 4
455 // CHECK: br label %[[CONT:.+]]
456 // CHECK: [[CONT]]
457 // CHECK: [[EXPECTED:%.+]] = phi i32 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
458 // CHECK: [[OLD:%.+]] = bitcast i32 [[EXPECTED]] to float
459 // CHECK: [[MUL:%.+]] = fmul float [[OLD]], [[EXPR]]
460 // CHECK: store float [[MUL]], ptr [[TEMP:%.+]],
461 // CHECK: [[DESIRED:%.+]] = load i32, ptr [[TEMP]],
462 // CHECK: [[RES:%.+]] = cmpxchg ptr [[X_ADDR]], i32 [[EXPECTED]], i32 [[DESIRED]] monotonic monotonic, align 4
463 // CHECK: [[OLD_X:%.+]] = extractvalue { i32, i1 } [[RES]], 0
464 // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i32, i1 } [[RES]], 1
465 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
466 // CHECK: [[EXIT]]
467 // CHECK: store float [[MUL]], ptr @{{.+}},
468 #pragma omp atomic capture
469   {fx = fx * ulv; fv = fx;}
470 // CHECK: [[LLV:%.+]] = load i64, ptr @{{.+}},
471 // CHECK: [[EXPR:%.+]] = sitofp i64 [[LLV]] to double
472 // CHECK: [[X:%.+]] = load atomic i64, ptr [[X_ADDR:@.+]] monotonic, align 8
473 // CHECK: br label %[[CONT:.+]]
474 // CHECK: [[CONT]]
475 // CHECK: [[EXPECTED:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
476 // CHECK: [[OLD:%.+]] = bitcast i64 [[EXPECTED]] to double
477 // CHECK: [[DIV:%.+]] = fdiv double [[OLD]], [[EXPR]]
478 // CHECK: store double [[DIV]], ptr [[TEMP:%.+]],
479 // CHECK: [[DESIRED:%.+]] = load i64, ptr [[TEMP]],
480 // CHECK: [[RES:%.+]] = cmpxchg ptr [[X_ADDR]], i64 [[EXPECTED]], i64 [[DESIRED]] monotonic monotonic, align 8
481 // CHECK: [[OLD_X:%.+]] = extractvalue { i64, i1 } [[RES]], 0
482 // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i64, i1 } [[RES]], 1
483 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
484 // CHECK: [[EXIT]]
485 // CHECK: store double [[DIV]], ptr @{{.+}},
486 #pragma omp atomic capture
487   dv = dx /= llv;
488 // CHECK: [[ULLV:%.+]] = load i64, ptr @{{.+}},
489 // CHECK: [[EXPR:%.+]] = uitofp i64 [[ULLV]] to x86_fp80
490 // CHECK: [[X:%.+]] = load atomic i128, ptr [[X_ADDR:@.+]] monotonic, align 16
491 // CHECK: br label %[[CONT:.+]]
492 // CHECK: [[CONT]]
493 // CHECK: [[EXPECTED:%.+]] = phi i128 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
494 // CHECK: store i128 [[EXPECTED]], ptr [[TEMP1:%.+]],
495 // CHECK: store i128 [[EXPECTED]], ptr [[TEMP:%.+]],
496 // CHECK: [[OLD:%.+]] = load x86_fp80, ptr [[TEMP]],
497 // CHECK: [[SUB:%.+]] = fsub x86_fp80 [[OLD]], [[EXPR]]
498 // CHECK: store x86_fp80 [[SUB]], ptr [[TEMP1]]
499 // CHECK: [[DESIRED:%.+]] = load i128, ptr [[TEMP1]]
500 // CHECK: [[RES:%.+]] = cmpxchg ptr [[X_ADDR]], i128 [[EXPECTED]], i128 [[DESIRED]] monotonic monotonic, align 16
501 // CHECK: [[OLD_X:%.+]] = extractvalue { i128, i1 } [[RES]], 0
502 // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i128, i1 } [[RES]], 1
503 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
504 // CHECK: [[EXIT]]
505 // CHECK: store x86_fp80 [[OLD]], ptr @{{.+}},
506 #pragma omp atomic capture
507   {ldv = ldx; ldx -= ullv;}
508 // CHECK: [[EXPR:%.+]] = load float, ptr @{{.+}},
509 // CHECK: call void @__atomic_load(i64 noundef 8, ptr noundef [[X_ADDR:@.+]], ptr noundef [[EXPECTED_ADDR:%.+]], i32 noundef 0)
510 // CHECK: br label %[[CONT:.+]]
511 // CHECK: [[CONT]]
512 // CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds nuw { i32, i32 }, ptr [[EXPECTED_ADDR]], i32 0, i32 0
513 // CHECK: [[X_RE:%.+]] = load i32, ptr [[X_RE_ADDR]]
514 // CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds nuw { i32, i32 }, ptr [[EXPECTED_ADDR]], i32 0, i32 1
515 // CHECK: [[X_IM:%.+]] = load i32, ptr [[X_IM_ADDR]]
516 // <Skip checks for complex calculations>
517 // CHECK: [[X_RE_ADDR:%.+]] = getelementptr inbounds nuw { i32, i32 }, ptr [[DESIRED_ADDR:%.+]], i32 0, i32 0
518 // CHECK: [[X_IM_ADDR:%.+]] = getelementptr inbounds nuw { i32, i32 }, ptr [[DESIRED_ADDR]], i32 0, i32 1
519 // CHECK: store i32 [[NEW_RE:%.+]], ptr [[X_RE_ADDR]]
520 // CHECK: store i32 [[NEW_IM:%.+]], ptr [[X_IM_ADDR]]
521 // CHECK: [[SUCCESS_FAIL:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 noundef 8, ptr noundef [[X_ADDR]], ptr noundef [[EXPECTED_ADDR]], ptr noundef [[DESIRED_ADDR]], i32 noundef 0, i32 noundef 0)
522 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
523 // CHECK: [[EXIT]]
524 // CHECK: store i32 [[NEW_RE]], ptr @{{.+}},
525 // CHECK: store i32 [[NEW_IM]], ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @{{.+}}, i32 0, i32 1),
526 #pragma omp atomic capture
527   {cix = fv / cix; civ = cix;}
528 // CHECK: [[EXPR:%.+]] = load double, ptr @{{.+}},
529 // CHECK: [[X:%.+]] = load atomic i16, ptr [[X_ADDR:@.+]] monotonic, align 2
530 // CHECK: br label %[[CONT:.+]]
531 // CHECK: [[CONT]]
532 // CHECK: [[EXPECTED:%.+]] = phi i16 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
533 // CHECK: [[CONV:%.+]] = sext i16 [[EXPECTED]] to i32
534 // CHECK: [[X_RVAL:%.+]] = sitofp i32 [[CONV]] to double
535 // CHECK: [[ADD:%.+]] = fadd double [[X_RVAL]], [[EXPR]]
536 // CHECK: [[NEW:%.+]] = fptosi double [[ADD]] to i16
537 // CHECK: store i16 [[NEW]], ptr [[TEMP:%.+]],
538 // CHECK: [[DESIRED:%.+]] = load i16, ptr [[TEMP]],
539 // CHECK: [[RES:%.+]] = cmpxchg ptr [[X_ADDR]], i16 [[EXPECTED]], i16 [[DESIRED]] monotonic monotonic, align 2
540 // CHECK: [[OLD_X]] = extractvalue { i16, i1 } [[RES]], 0
541 // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i16, i1 } [[RES]], 1
542 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
543 // CHECK: [[EXIT]]
544 // CHECK: store i16 [[NEW]], ptr @{{.+}},
545 #pragma omp atomic capture
546   sv = sx = sx + dv;
547 // CHECK: [[EXPR:%.+]] = load x86_fp80, ptr @{{.+}},
548 // CHECK: [[XI8:%.+]] = load atomic i8, ptr [[X_ADDR:@.+]] monotonic, align 1
549 // CHECK: br label %[[CONT:.+]]
550 // CHECK: [[CONT]]
551 // CHECK: [[EXPECTED:%.+]] = phi i8 [ [[XI8]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
552 // CHECK: [[BOOL_EXPECTED:%.+]] = trunc i8 [[EXPECTED]] to i1
553 // CHECK: [[CONV:%.+]] = zext i1 [[BOOL_EXPECTED]] to i32
554 // CHECK: [[X_RVAL:%.+]] = sitofp i32 [[CONV]] to x86_fp80
555 // CHECK: [[MUL:%.+]] = fmul x86_fp80 [[EXPR]], [[X_RVAL]]
556 // CHECK: [[BOOL_DESIRED:%.+]] = fcmp une x86_fp80 [[MUL]], 0xK00000000000000000000
557 // CHECK: [[DESIRED:%.+]] = zext i1 [[BOOL_DESIRED]] to i8
558 // CHECK: store i8 [[DESIRED]], ptr [[TEMP:%.+]],
559 // CHECK: [[DESIRED:%.+]] = load i8, ptr [[TEMP]],
560 // CHECK: [[RES:%.+]] = cmpxchg ptr [[X_ADDR]], i8 [[EXPECTED]], i8 [[DESIRED]] monotonic monotonic, align 1
561 // CHECK: [[OLD_X:%.+]] = extractvalue { i8, i1 } [[RES]], 0
562 // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i8, i1 } [[RES]], 1
563 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
564 // CHECK: [[EXIT]]
565 // CHECK: [[EXPECTED_I8:%.+]] = zext i1 [[BOOL_EXPECTED]] to i8
566 // CHECK: store i8 [[EXPECTED_I8]], ptr @{{.+}},
567 #pragma omp atomic capture
568   {bv = bx; bx = ldv * bx;}
569 // CHECK: [[EXPR_RE:%.+]] = load i32, ptr [[CIV_ADDR:@.+]],
570 // CHECK: [[EXPR_IM:%.+]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr [[CIV_ADDR]], i32 0, i32 1),
571 // CHECK: [[XI8:%.+]] = load atomic i8, ptr [[X_ADDR:@.+]] monotonic, align 1
572 // CHECK: br label %[[CONT:.+]]
573 // CHECK: [[CONT]]
574 // CHECK: [[EXPECTED:%.+]] = phi i8 [ [[XI8]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
575 // CHECK: [[BOOL_EXPECTED:%.+]] = trunc i8 [[EXPECTED]] to i1
576 // CHECK: [[X_RVAL:%.+]] = zext i1 [[BOOL_EXPECTED]] to i32
577 // CHECK: [[SUB_RE:%.+]] = sub i32 [[EXPR_RE:%.+]], [[X_RVAL]]
578 // CHECK: [[SUB_IM:%.+]] = sub i32 [[EXPR_IM:%.+]], 0
579 // CHECK: icmp ne i32 [[SUB_RE]], 0
580 // CHECK: icmp ne i32 [[SUB_IM]], 0
581 // CHECK: [[BOOL_DESIRED:%.+]] = or i1
582 // CHECK: [[DESIRED:%.+]] = zext i1 [[BOOL_DESIRED]] to i8
583 // CHECK: store i8 [[DESIRED]], ptr [[TEMP:%.+]],
584 // CHECK: [[DESIRED:%.+]] = load i8, ptr [[TEMP]],
585 // CHECK: [[RES:%.+]] = cmpxchg ptr [[X_ADDR]], i8 [[EXPECTED]], i8 [[DESIRED]] monotonic monotonic, align 1
586 // CHECK: [[OLD_X:%.+]] = extractvalue { i8, i1 } [[RES]], 0
587 // CHECK: [[SUCCESS_FAIL:%.+]] = extractvalue { i8, i1 } [[RES]], 1
588 // CHECK: br i1 [[SUCCESS_FAIL]], label %[[EXIT:.+]], label %[[CONT]]
589 // CHECK: [[EXIT]]
590 // CHECK: [[DESIRED_I8:%.+]] = zext i1 [[BOOL_DESIRED]] to i8
591 // CHECK: store i8 [[DESIRED_I8]], ptr @{{.+}},
592 #pragma omp atomic capture
593   {bx = civ - bx; bv = bx;}
594 // CHECK: [[IDX:%.+]] = load i16, ptr @{{.+}}
595 // CHECK: load i8, ptr
596 // CHECK: [[VEC_ITEM_VAL:%.+]] = zext i1 %{{.+}} to i32
597 // CHECK: [[I128VAL:%.+]] = load atomic i128, ptr [[DEST:@.+]] monotonic, align 16
598 // CHECK: br label %[[CONT:.+]]
599 // CHECK: [[CONT]]
600 // CHECK: [[OLD_I128:%.+]] = phi i128 [ [[I128VAL]], %{{.+}} ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
601 // CHECK: store i128 [[OLD_I128]], ptr [[TEMP:%.+]],
602 // CHECK: [[LD:%.+]] = bitcast i128 [[OLD_I128]] to <4 x i32>
603 // CHECK: store <4 x i32> [[LD]], ptr [[TEMP1:%.+]],
604 // CHECK: [[VEC_VAL:%.+]] = load <4 x i32>, ptr [[TEMP1]]
605 // CHECK: [[ITEM:%.+]] = extractelement <4 x i32> [[VEC_VAL]], i16 [[IDX]]
606 // CHECK: [[OR:%.+]] = or i32 [[ITEM]], [[VEC_ITEM_VAL]]
607 // CHECK: [[VEC_VAL:%.+]] = load <4 x i32>, ptr [[TEMP]]
608 // CHECK: [[NEW_VEC_VAL:%.+]] = insertelement <4 x i32> [[VEC_VAL]], i32 [[OR]], i16 [[IDX]]
609 // CHECK: store <4 x i32> [[NEW_VEC_VAL]], ptr [[TEMP]]
610 // CHECK: [[NEW_I128:%.+]] = load i128, ptr [[TEMP]],
611 // CHECK: [[RES:%.+]] = cmpxchg ptr [[DEST]], i128 [[OLD_I128]], i128 [[NEW_I128]] monotonic monotonic, align 16
612 // CHECK: [[FAILED_OLD_VAL:%.+]] = extractvalue { i128, i1 } [[RES]], 0
613 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i128, i1 } [[RES]], 1
614 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
615 // CHECK: [[EXIT]]
616 // CHECK: store i32 [[OR]], ptr @{{.+}},
617 #pragma omp atomic capture
618   {int4x[sv] |= bv; iv = int4x[sv];}
619 // CHECK: [[EXPR:%.+]] = load x86_fp80, ptr @{{.+}}
620 // CHECK: [[PREV_VALUE:%.+]] = load atomic i32, ptr getelementptr (i8, ptr @{{.+}}, i64 4) monotonic, align 4
621 // CHECK: br label %[[CONT:.+]]
622 // CHECK: [[CONT]]
623 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
624 // CHECK: store i32 [[OLD_BF_VALUE]], ptr [[TEMP1:%.+]],
625 // CHECK: store i32 [[OLD_BF_VALUE]], ptr [[TEMP:%.+]],
626 // CHECK: [[A_LD:%.+]] = load i32, ptr [[TEMP]],
627 // CHECK: [[A_SHL:%.+]] = shl i32 [[A_LD]], 1
628 // CHECK: [[A_ASHR:%.+]] = ashr i32 [[A_SHL]], 1
629 // CHECK: [[X_RVAL:%.+]] = sitofp i32 [[A_ASHR]] to x86_fp80
630 // CHECK: [[SUB:%.+]] = fsub x86_fp80 [[X_RVAL]], [[EXPR]]
631 // CHECK: [[CONV:%.+]] = fptosi x86_fp80 [[SUB]] to i32
632 // CHECK: [[NEW_VAL:%.+]] = load i32, ptr [[TEMP1]],
633 // CHECK: [[BF_VALUE:%.+]] = and i32 [[CONV]], 2147483647
634 // CHECK: [[BF_CLEAR:%.+]] = and i32 [[NEW_VAL]], -2147483648
635 // CHECK: [[BF_SET:%.+]] = or i32 [[BF_CLEAR]], [[BF_VALUE]]
636 // CHECK: store i32 [[BF_SET]], ptr [[TEMP1]],
637 // CHECK: [[NEW_BF_VALUE:%.+]] = load i32, ptr [[TEMP1]],
638 // CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr (i8, ptr @{{.+}}, i64 4), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic, align 4
639 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
640 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1
641 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
642 // CHECK: [[EXIT]]
643 // CHECK: store i32 [[CONV]], ptr @{{.+}},
644 #pragma omp atomic capture
645   iv = bfx.a = bfx.a - ldv;
646 // CHECK: [[EXPR:%.+]] = load x86_fp80, ptr @{{.+}}
647 // CHECK: call void @__atomic_load(i64 noundef 4, ptr noundef getelementptr (i8, ptr @{{.+}}, i64 4), ptr noundef [[LDTEMP:%.+]], i32 noundef 0)
648 // CHECK: br label %[[CONT:.+]]
649 // CHECK: [[CONT]]
650 // CHECK: [[OLD:%.+]] = load i32, ptr [[LDTEMP]],
651 // CHECK: store i32 [[OLD]], ptr [[TEMP1:%.+]],
652 // CHECK: [[OLD:%.+]] = load i32, ptr [[LDTEMP]],
653 // CHECK: store i32 [[OLD]], ptr [[TEMP:%.+]],
654 // CHECK: [[A_LD:%.+]] = load i32, ptr [[TEMP]],
655 // CHECK: [[A_SHL:%.+]] = shl i32 [[A_LD]], 1
656 // CHECK: [[A_ASHR:%.+]] = ashr i32 [[A_SHL]], 1
657 // CHECK: [[X_RVAL:%.+]] = sitofp i32 [[A_ASHR]] to x86_fp80
658 // CHECK: [[MUL:%.+]] = fmul x86_fp80 [[X_RVAL]], [[EXPR]]
659 // CHECK: [[CONV:%.+]] = fptosi x86_fp80 [[MUL]] to i32
660 // CHECK: [[NEW_VAL:%.+]] = load i32, ptr [[TEMP1]],
661 // CHECK: [[BF_VALUE:%.+]] = and i32 [[CONV]], 2147483647
662 // CHECK: [[BF_CLEAR:%.+]] = and i32 [[NEW_VAL]], -2147483648
663 // CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
664 // CHECK: store i32 %{{.+}}, ptr [[TEMP1]]
665 // CHECK: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 noundef 4, ptr noundef getelementptr (i8, ptr @{{.+}}, i64 4), ptr noundef [[LDTEMP]], ptr noundef [[TEMP1]], i32 noundef 0, i32 noundef 0)
666 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
667 // CHECK: [[EXIT]]
668 // CHECK: store i32 [[A_ASHR]], ptr @{{.+}},
669 #pragma omp atomic capture
670   {iv = bfx_packed.a; bfx_packed.a *= ldv;}
671 // CHECK: [[EXPR:%.+]] = load x86_fp80, ptr @{{.+}}
672 // CHECK: [[PREV_VALUE:%.+]] = load atomic i32, ptr @{{.+}} monotonic, align 4
673 // CHECK: br label %[[CONT:.+]]
674 // CHECK: [[CONT]]
675 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
676 // CHECK: store i32 [[OLD_BF_VALUE]], ptr [[TEMP1:%.+]],
677 // CHECK: store i32 [[OLD_BF_VALUE]], ptr [[TEMP:%.+]],
678 // CHECK: [[A_LD:%.+]] = load i32, ptr [[TEMP]],
679 // CHECK: [[A_ASHR:%.+]] = ashr i32 [[A_LD]], 31
680 // CHECK: [[X_RVAL:%.+]] = sitofp i32 [[A_ASHR]] to x86_fp80
681 // CHECK: [[SUB:%.+]] = fsub x86_fp80 [[X_RVAL]], [[EXPR]]
682 // CHECK: [[CONV:%.+]] = fptosi x86_fp80 [[SUB]] to i32
683 // CHECK: [[NEW_VAL:%.+]] = load i32, ptr [[TEMP1]],
684 // CHECK: [[BF_AND:%.+]] = and i32 [[CONV]], 1
685 // CHECK: [[BF_VALUE:%.+]] = shl i32 [[BF_AND]], 31
686 // CHECK: [[BF_CLEAR:%.+]] = and i32 [[NEW_VAL]], 2147483647
687 // CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
688 // CHECK: store i32 %{{.+}}, ptr [[TEMP1]]
689 // CHECK: [[NEW_BF_VALUE:%.+]] = load i32, ptr [[TEMP1]]
690 // CHECK: [[RES:%.+]] = cmpxchg ptr @{{.+}}, i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic, align 4
691 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
692 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1
693 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
694 // CHECK: [[EXIT]]
695 // CHECK: store i32 [[CONV]], ptr @{{.+}},
696 #pragma omp atomic capture
697   {bfx2.a -= ldv; iv = bfx2.a;}
698 // CHECK: [[EXPR:%.+]] = load x86_fp80, ptr @{{.+}}
699 // CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @{{.+}}, i64 3) monotonic, align 1
700 // CHECK: br label %[[CONT:.+]]
701 // CHECK: [[CONT]]
702 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
703 // CHECK: store i8 [[OLD_BF_VALUE]], ptr [[BITCAST_NEW:%.+]],
704 // CHECK: store i8 [[OLD_BF_VALUE]], ptr [[BITCAST:%.+]],
705 // CHECK: [[A_LD:%.+]] = load i8, ptr [[BITCAST]],
706 // CHECK: [[A_ASHR:%.+]] = ashr i8 [[A_LD]], 7
707 // CHECK: [[CAST:%.+]] = sext i8 [[A_ASHR]] to i32
708 // CHECK: [[X_RVAL:%.+]] = sitofp i32 [[CAST]] to x86_fp80
709 // CHECK: [[DIV:%.+]] = fdiv x86_fp80 [[EXPR]], [[X_RVAL]]
710 // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 [[DIV]] to i32
711 // CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i8
712 // CHECK: [[BF_LD:%.+]] = load i8, ptr [[BITCAST_NEW]],
713 // CHECK: [[BF_AND:%.+]] = and i8 [[TRUNC]], 1
714 // CHECK: [[BF_VALUE:%.+]] = shl i8 [[BF_AND]], 7
715 // CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, 127
716 // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
717 // CHECK: store i8 %{{.+}}, ptr [[BITCAST_NEW]]
718 // CHECK: [[NEW_BF_VALUE:%.+]] = load i8, ptr [[BITCAST_NEW]]
719 // CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr (i8, ptr @{{.+}}, i64 3), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
720 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
721 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
722 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
723 // CHECK: [[EXIT]]
724 // CHECK: store i32 [[NEW_VAL]], ptr @{{.+}},
725 #pragma omp atomic capture
726   iv = bfx2_packed.a = ldv / bfx2_packed.a;
727 // CHECK: [[EXPR:%.+]] = load x86_fp80, ptr @{{.+}}
728 // CHECK: [[PREV_VALUE:%.+]] = load atomic i32, ptr @{{.+}} monotonic, align 4
729 // CHECK: br label %[[CONT:.+]]
730 // CHECK: [[CONT]]
731 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
732 // CHECK: store i32 [[OLD_BF_VALUE]], ptr [[TEMP1:%.+]],
733 // CHECK: store i32 [[OLD_BF_VALUE]], ptr [[TEMP:%.+]],
734 // CHECK: [[A_LD:%.+]] = load i32, ptr [[TEMP]],
735 // CHECK: [[A_SHL:%.+]] = shl i32 [[A_LD]], 7
736 // CHECK: [[A_ASHR:%.+]] = ashr i32 [[A_SHL]], 18
737 // CHECK: [[X_RVAL:%.+]] = sitofp i32 [[A_ASHR]] to x86_fp80
738 // CHECK: [[DIV:%.+]] = fdiv x86_fp80 [[X_RVAL]], [[EXPR]]
739 // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 [[DIV]] to i32
740 // CHECK: [[BF_LD:%.+]] = load i32, ptr [[TEMP1]],
741 // CHECK: [[BF_AND:%.+]] = and i32 [[NEW_VAL]], 16383
742 // CHECK: [[BF_VALUE:%.+]] = shl i32 [[BF_AND]], 11
743 // CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, -33552385
744 // CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
745 // CHECK: store i32 %{{.+}}, ptr [[TEMP1]]
746 // CHECK: [[NEW_BF_VALUE:%.+]] = load i32, ptr [[TEMP1]]
747 // CHECK: [[RES:%.+]] = cmpxchg ptr @{{.+}}, i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic, align 4
748 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
749 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1
750 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
751 // CHECK: [[EXIT]]
752 // CHECK: store i32 [[A_ASHR]], ptr @{{.+}},
753 #pragma omp atomic capture
754   {iv = bfx3.a; bfx3.a /= ldv;}
755 // CHECK: [[EXPR:%.+]] = load x86_fp80, ptr @{{.+}}
756 // CHECK: call void @__atomic_load(i64 noundef 3, ptr noundef getelementptr (i8, ptr @{{.+}}, i64 1), ptr noundef [[LDTEMP:%.+]], i32 noundef 0)
757 // CHECK: br label %[[CONT:.+]]
758 // CHECK: [[CONT]]
759 // CHECK: [[OLD:%.+]] = load i24, ptr [[LDTEMP]],
760 // CHECK: store i24 [[OLD]], ptr [[BITCAST2:%.+]],
761 // CHECK: [[OLD:%.+]] = load i24, ptr [[LDTEMP]],
762 // CHECK: store i24 [[OLD]], ptr [[BITCAST1:%.+]],
763 // CHECK: [[A_LD:%.+]] = load i24, ptr [[BITCAST1]],
764 // CHECK: [[A_SHL:%.+]] = shl i24 [[A_LD]], 7
765 // CHECK: [[A_ASHR:%.+]] = ashr i24 [[A_SHL]], 10
766 // CHECK: [[CAST:%.+]] = sext i24 [[A_ASHR]] to i32
767 // CHECK: [[X_RVAL:%.+]] = sitofp i32 [[CAST]] to x86_fp80
768 // CHECK: [[ADD:%.+]] = fadd x86_fp80 [[X_RVAL]], [[EXPR]]
769 // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 [[ADD]] to i32
770 // CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i24
771 // CHECK: [[BF_LD:%.+]] = load i24, ptr [[BITCAST2]],
772 // CHECK: [[BF_AND:%.+]] = and i24 [[TRUNC]], 16383
773 // CHECK: [[BF_VALUE:%.+]] = shl i24 [[BF_AND]], 3
774 // CHECK: [[BF_CLEAR:%.+]] = and i24 [[BF_LD]], -131065
775 // CHECK: or i24 [[BF_CLEAR]], [[BF_VALUE]]
776 // CHECK: store i24 %{{.+}}, ptr [[BITCAST2]]
777 // CHECK: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 noundef 3, ptr noundef getelementptr (i8, ptr @{{.+}}, i64 1), ptr noundef [[LDTEMP]], ptr noundef [[BITCAST2]], i32 noundef 0, i32 noundef 0)
778 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
779 // CHECK: [[EXIT]]
780 // CHECK: store i32 [[NEW_VAL]], ptr @{{.+}},
781 #pragma omp atomic capture
782   {bfx3_packed.a += ldv; iv = bfx3_packed.a;}
783 // CHECK: [[EXPR:%.+]] = load x86_fp80, ptr @{{.+}}
784 // CHECK: [[PREV_VALUE:%.+]] = load atomic i64, ptr @{{.+}} monotonic, align 8
785 // CHECK: br label %[[CONT:.+]]
786 // CHECK: [[CONT]]
787 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i64 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
788 // CHECK: store i64 [[OLD_BF_VALUE]], ptr [[TEMP1:%.+]],
789 // CHECK: store i64 [[OLD_BF_VALUE]], ptr [[TEMP:%.+]],
790 // CHECK: [[A_LD:%.+]] = load i64, ptr [[TEMP]],
791 // CHECK: [[A_SHL:%.+]] = shl i64 [[A_LD]], 47
792 // CHECK: [[A_ASHR:%.+]] = ashr i64 [[A_SHL:%.+]], 63
793 // CHECK: [[A_CAST:%.+]] = trunc i64 [[A_ASHR:%.+]] to i32
794 // CHECK: [[X_RVAL:%.+]] = sitofp i32 [[CAST:%.+]] to x86_fp80
795 // CHECK: [[MUL:%.+]] = fmul x86_fp80 [[X_RVAL]], [[EXPR]]
796 // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 [[MUL]] to i32
797 // CHECK: [[ZEXT:%.+]] = zext i32 [[NEW_VAL]] to i64
798 // CHECK: [[BF_LD:%.+]] = load i64, ptr [[TEMP1]],
799 // CHECK: [[BF_AND:%.+]] = and i64 [[ZEXT]], 1
800 // CHECK: [[BF_VALUE:%.+]] = shl i64 [[BF_AND]], 16
801 // CHECK: [[BF_CLEAR:%.+]] = and i64 [[BF_LD]], -65537
802 // CHECK: or i64 [[BF_CLEAR]], [[BF_VALUE]]
803 // CHECK: store i64 %{{.+}}, ptr [[TEMP1]]
804 // CHECK: [[NEW_BF_VALUE:%.+]] = load i64, ptr [[TEMP1]]
805 // CHECK: [[RES:%.+]] = cmpxchg ptr @{{.+}}, i64 [[OLD_BF_VALUE]], i64 [[NEW_BF_VALUE]] monotonic monotonic, align 8
806 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i64, i1 } [[RES]], 0
807 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1
808 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
809 // CHECK: [[EXIT]]
810 // CHECK: store i32 [[NEW_VAL]], ptr @{{.+}},
811 #pragma omp atomic relaxed capture
812   iv = bfx4.a = bfx4.a * ldv;
813 // CHECK: [[EXPR:%.+]] = load x86_fp80, ptr @{{.+}}
814 // CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr inbounds nuw (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1) monotonic, align 1
815 // CHECK: br label %[[CONT:.+]]
816 // CHECK: [[CONT]]
817 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %{{.+}} ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
818 // CHECK: store i8 [[OLD_BF_VALUE]], ptr [[BITCAST1:%.+]],
819 // CHECK: store i8 [[OLD_BF_VALUE]], ptr [[BITCAST:%.+]],
820 // CHECK: [[A_LD:%.+]] = load i8, ptr [[BITCAST]],
821 // CHECK: [[A_SHL:%.+]] = shl i8 [[A_LD]], 7
822 // CHECK: [[A_ASHR:%.+]] = ashr i8 [[A_SHL:%.+]], 7
823 // CHECK: [[CAST:%.+]] = sext i8 [[A_ASHR:%.+]] to i32
824 // CHECK: [[CONV:%.+]] = sitofp i32 [[CAST]] to x86_fp80
825 // CHECK: [[SUB: %.+]] = fsub x86_fp80 [[CONV]], [[EXPR]]
826 // CHECK: [[CONV:%.+]] = fptosi x86_fp80 [[SUB:%.+]] to i32
827 // CHECK: [[NEW_VAL:%.+]] = trunc i32 [[CONV]] to i8
828 // CHECK: [[BF_LD:%.+]] = load i8, ptr [[BITCAST1]],
829 // CHECK: [[BF_VALUE:%.+]] = and i8 [[NEW_VAL]], 1
830 // CHECK: [[BF_CLEAR:%.+]] = and i8 [[BF_LD]], -2
831 // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
832 // CHECK: store i8 %{{.+}}, ptr [[BITCAST1]]
833 // CHECK: [[NEW_BF_VALUE:%.+]] = load i8, ptr [[BITCAST1]]
834 // CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr inbounds nuw (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1
835 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
836 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
837 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
838 // CHECK: [[EXIT]]
839 // CHECK: store i32 [[CAST]], ptr @{{.+}},
840 #pragma omp atomic capture relaxed
841   {iv = bfx4_packed.a; bfx4_packed.a -= ldv;}
842 // CHECK: [[EXPR:%.+]] = load x86_fp80, ptr @{{.+}}
843 // CHECK: [[PREV_VALUE:%.+]] = load atomic i64, ptr @{{.+}} monotonic, align 8
844 // CHECK: br label %[[CONT:.+]]
845 // CHECK: [[CONT]]
846 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i64 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
847 // CHECK: store i64 [[OLD_BF_VALUE]], ptr [[TEMP1:%.+]],
848 // CHECK: store i64 [[OLD_BF_VALUE]], ptr [[TEMP:%.+]],
849 // CHECK: [[A_LD:%.+]] = load i64, ptr [[TEMP]],
850 // CHECK: [[A_SHL:%.+]] = shl i64 [[A_LD]], 40
851 // CHECK: [[A_ASHR:%.+]] = ashr i64 [[A_SHL:%.+]], 57
852 // CHECK: [[CONV:%.+]] = sitofp i64 [[A_ASHR]] to x86_fp80
853 // CHECK: [[DIV:%.+]] = fdiv x86_fp80 [[CONV]], [[EXPR]]
854 // CHECK: [[CONV:%.+]] = fptosi x86_fp80 [[DIV]] to i64
855 // CHECK: [[BF_LD:%.+]] = load i64, ptr [[TEMP1]],
856 // CHECK: [[BF_AND:%.+]] = and i64 [[CONV]], 127
857 // CHECK: [[BF_VALUE:%.+]] = shl i64 [[BF_AND:%.+]], 17
858 // CHECK: [[BF_CLEAR:%.+]] = and i64 [[BF_LD]], -16646145
859 // CHECK: [[VAL:%.+]] = or i64 [[BF_CLEAR]], [[BF_VALUE]]
860 // CHECK: store i64 [[VAL]], ptr [[TEMP1]]
861 // CHECK: [[NEW_BF_VALUE:%.+]] = load i64, ptr [[TEMP1]]
862 // CHECK: [[RES:%.+]] = cmpxchg ptr @{{.+}}, i64 [[OLD_BF_VALUE]], i64 [[NEW_BF_VALUE]] release monotonic, align 8
863 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i64, i1 } [[RES]], 0
864 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1
865 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
866 // CHECK: [[EXIT]]
867 // CHECK: [[NEW_VAL:%.+]] = trunc i64 [[CONV]] to i32
868 // CHECK: store i32 [[NEW_VAL]], ptr @{{.+}},
869 // CHECK-50: call{{.*}} @__kmpc_flush(
870 #pragma omp atomic capture release
871   {bfx4.b /= ldv; iv = bfx4.b;}
872 // CHECK: [[EXPR:%.+]] = load x86_fp80, ptr @{{.+}}
873 // CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr inbounds nuw (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1) acquire, align 1
874 // CHECK: br label %[[CONT:.+]]
875 // CHECK: [[CONT]]
876 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
877 // CHECK: store i8 [[OLD_BF_VALUE]], ptr [[BITCAST1:%.+]],
878 // CHECK: store i8 [[OLD_BF_VALUE]], ptr [[BITCAST:%.+]],
879 // CHECK: [[A_LD:%.+]] = load i8, ptr [[BITCAST]],
880 // CHECK: [[A_ASHR:%.+]] = ashr i8 [[A_LD]], 1
881 // CHECK: [[CAST:%.+]] = sext i8 [[A_ASHR]] to i64
882 // CHECK: [[CONV:%.+]] = sitofp i64 [[CAST]] to x86_fp80
883 // CHECK: [[ADD:%.+]] = fadd x86_fp80 [[CONV]], [[EXPR]]
884 // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 [[ADD]] to i64
885 // CHECK: [[TRUNC:%.+]] = trunc i64 [[NEW_VAL]] to i8
886 // CHECK: [[BF_LD:%.+]] = load i8, ptr [[BITCAST1]],
887 // CHECK: [[BF_AND:%.+]] = and i8 [[TRUNC]], 127
888 // CHECK: [[BF_VALUE:%.+]] = shl i8 [[BF_AND]], 1
889 // CHECK: [[BF_CLEAR:%.+]] = and i8 [[BF_LD]], 1
890 // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
891 // CHECK: store i8 %{{.+}}, ptr [[BITCAST1]]
892 // CHECK: [[NEW_BF_VALUE:%.+]] = load i8, ptr [[BITCAST1]]
893 // CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr inbounds nuw (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] acquire acquire, align 1
894 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
895 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
896 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
897 // CHECK: [[EXIT]]
898 // CHECK: [[NEW_VAL_I32:%.+]] = trunc i64 [[NEW_VAL]] to i32
899 // CHECK: store i32 [[NEW_VAL_I32]], ptr @{{.+}},
900 // CHECK-50: call{{.*}} @__kmpc_flush(
901 #pragma omp atomic capture acquire
902   iv = bfx4_packed.b += ldv;
903 // CHECK: load i64, ptr
904 // CHECK: [[EXPR:%.+]] = uitofp i64 %{{.+}} to float
905 // CHECK: [[I64VAL:%.+]] = load atomic i64, ptr [[DEST:@.+]] acquire, align 8
906 // CHECK: br label %[[CONT:.+]]
907 // CHECK: [[CONT]]
908 // CHECK: [[OLD_I64:%.+]] = phi i64 [ [[I64VAL]], %{{.+}} ], [ [[FAILED_I64_OLD_VAL:%.+]], %[[CONT]] ]
909 // CHECK: store i64 [[OLD_I64]], ptr [[LDTEMP1:%.+]],
910 // CHECK: [[OLD_VEC_VAL:%.+]] = bitcast i64 [[OLD_I64]] to <2 x float>
911 // CHECK: store <2 x float> [[OLD_VEC_VAL]], ptr [[LDTEMP:%.+]],
912 // CHECK: [[VEC_VAL:%.+]] = load <2 x float>, ptr [[LDTEMP]]
913 // CHECK: [[X:%.+]] = extractelement <2 x float> [[VEC_VAL]], i64 0
914 // CHECK: [[VEC_ITEM_VAL:%.+]] = fsub float [[EXPR]], [[X]]
915 // CHECK: [[VEC_VAL:%.+]] = load <2 x float>, ptr [[LDTEMP1]],
916 // CHECK: [[NEW_VEC_VAL:%.+]] = insertelement <2 x float> [[VEC_VAL]], float [[VEC_ITEM_VAL]], i64 0
917 // CHECK: store <2 x float> [[NEW_VEC_VAL]], ptr [[LDTEMP1]]
918 // CHECK: [[NEW_I64:%.+]] = load i64, ptr [[LDTEMP1]]
919 // CHECK: [[RES:%.+]] = cmpxchg ptr [[DEST]], i64 [[OLD_I64]], i64 [[NEW_I64]] acq_rel acquire, align 8
920 // CHECK: [[FAILED_I64_OLD_VAL:%.+]] = extractvalue { i64, i1 } [[RES]], 0
921 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1
922 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
923 // CHECK: [[EXIT]]
924 // CHECK: store float [[X]], ptr @{{.+}},
925 // CHECK-50: call{{.*}} @__kmpc_flush(
926 #pragma omp atomic capture acq_rel
927   {fv = float2x.x; float2x.x = ulv - float2x.x;}
928 // CHECK: [[EXPR:%.+]] = load double, ptr @{{.+}},
929 // CHECK: [[OLD_VAL:%.+]] = call i32 @llvm.read_register.i32([[REG:metadata ![0-9]+]])
930 // CHECK: [[X_RVAL:%.+]] = sitofp i32 [[OLD_VAL]] to double
931 // CHECK: [[DIV:%.+]] = fdiv double [[EXPR]], [[X_RVAL]]
932 // CHECK: [[NEW_VAL:%.+]] = fptosi double [[DIV]] to i32
933 // CHECK: call void @llvm.write_register.i32([[REG]], i32 [[NEW_VAL]])
934 // CHECK: store i32 [[NEW_VAL]], ptr @{{.+}},
935 // CHECK-50: call{{.*}} @__kmpc_flush(
936 #pragma omp atomic capture seq_cst
937   {rix = dv / rix; iv = rix;}
938 // CHECK: [[OLD_VAL:%.+]] = atomicrmw xchg ptr @{{.+}}, i32 5 monotonic, align 4
939 // CHECK: call void @llvm.write_register.i32([[REG]], i32 [[OLD_VAL]])
940 #pragma omp atomic capture
941   {rix = ix; ix = 5;}
942   return 0;
943 }
944 #endif
945