1 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp -x c -emit-llvm %s -o - | FileCheck %s 2 // RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s 3 // RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s 4 5 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp-simd -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s 6 // RUN: %clang_cc1 -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s 7 // RUN: %clang_cc1 -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s 8 // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} 9 // expected-no-diagnostics 10 // REQUIRES: x86-registered-target 11 #ifndef HEADER 12 #define HEADER 13 14 _Bool bv, bx; 15 char cv, cx; 16 unsigned char ucv, ucx; 17 short sv, sx; 18 unsigned short usv, usx; 19 int iv, ix; 20 unsigned int uiv, uix; 21 long lv, lx; 22 unsigned long ulv, ulx; 23 long long llv, llx; 24 unsigned long long ullv, ullx; 25 float fv, fx; 26 double dv, dx; 27 long double ldv, ldx; 28 _Complex int civ, cix; 29 _Complex float cfv, cfx; 30 _Complex double cdv, cdx; 31 32 typedef int int4 __attribute__((__vector_size__(16))); 33 int4 int4x; 34 35 struct BitFields { 36 int : 32; 37 int a : 31; 38 } bfx; 39 40 struct BitFields_packed { 41 int : 32; 42 int a : 31; 43 } __attribute__ ((__packed__)) bfx_packed; 44 45 struct BitFields2 { 46 int : 31; 47 int a : 1; 48 } bfx2; 49 50 struct BitFields2_packed { 51 int : 31; 52 int a : 1; 53 } __attribute__ ((__packed__)) bfx2_packed; 54 55 struct BitFields3 { 56 int : 11; 57 int a : 14; 58 } bfx3; 59 60 struct BitFields3_packed { 61 int : 11; 62 int a : 14; 63 } __attribute__ ((__packed__)) bfx3_packed; 64 65 struct BitFields4 { 66 short : 16; 67 int a: 1; 68 long b : 7; 69 } bfx4; 70 71 struct BitFields4_packed { 72 short : 16; 73 int a: 1; 74 long b : 7; 75 } __attribute__ ((__packed__)) bfx4_packed; 76 77 typedef float float2 __attribute__((ext_vector_type(2))); 78 float2 float2x; 79 80 // Register "0" is currently an invalid register for global register variables. 81 // Use "esp" instead of "0". 82 // register int rix __asm__("0"); 83 register int rix __asm__("esp"); 84 85 int main(void) { 86 // CHECK: store atomic i32 1, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @civ, i32 0, i32 1) monotonic, align 4 87 #pragma omp atomic write 88 __imag(civ) = 1; 89 // CHECK: load i8, ptr 90 // CHECK: store atomic i8 {{.*}} monotonic, align 1 91 #pragma omp atomic write 92 bx = bv; 93 // CHECK: load i8, ptr 94 // CHECK: store atomic i8 {{.*}} release, align 1 95 #pragma omp atomic write release 96 cx = cv; 97 // CHECK: load i8, ptr 98 // CHECK: store atomic i8 {{.*}} monotonic, align 1 99 #pragma omp atomic write 100 ucx = ucv; 101 // CHECK: load i16, ptr 102 // CHECK: store atomic i16 {{.*}} monotonic, align 2 103 #pragma omp atomic write 104 sx = sv; 105 // CHECK: load i16, ptr 106 // CHECK: store atomic i16 {{.*}} monotonic, align 2 107 #pragma omp atomic write 108 usx = usv; 109 // CHECK: load i32, ptr 110 // CHECK: store atomic i32 {{.*}} monotonic, align 4 111 #pragma omp atomic write 112 ix = iv; 113 // CHECK: load i32, ptr 114 // CHECK: store atomic i32 {{.*}} monotonic, align 4 115 #pragma omp atomic write 116 uix = uiv; 117 // CHECK: load i64, ptr 118 // CHECK: store atomic i64 {{.*}} monotonic, align 8 119 #pragma omp atomic write 120 lx = lv; 121 // CHECK: load i64, ptr 122 // CHECK: store atomic i64 {{.*}} monotonic, align 8 123 #pragma omp atomic write 124 ulx = ulv; 125 // CHECK: load i64, ptr 126 // CHECK: store atomic i64 {{.*}} monotonic, align 8 127 #pragma omp atomic write 128 llx = llv; 129 // CHECK: load i64, ptr 130 // CHECK: store atomic i64 {{.*}} monotonic, align 8 131 #pragma omp atomic write 132 ullx = ullv; 133 // CHECK: load float, ptr 134 // CHECK: store atomic float {{.*}}, ptr {{.*}} monotonic, align 4 135 #pragma omp atomic write 136 fx = fv; 137 // CHECK: load double, ptr 138 // CHECK: store atomic double {{.*}}, ptr {{.*}} monotonic, align 8 139 #pragma omp atomic write 140 dx = dv; 141 // CHECK: [[LD:%.+]] = load x86_fp80, ptr 142 // CHECK: call void @llvm.memset.p0.i64(ptr align 16 [[LDTEMP:%.*]], i8 0, i64 16, i1 false) 143 // CHECK: store x86_fp80 [[LD]], ptr [[LDTEMP]] 144 // CHECK: [[LD:%.+]] = load i128, ptr [[LDTEMP:%.*]] 145 // CHECK: store atomic i128 [[LD]], ptr {{.*}} monotonic, align 16 146 #pragma omp atomic write 147 ldx = ldv; 148 // CHECK: [[REAL_VAL:%.+]] = load i32, ptr @{{.*}} 149 // CHECK: [[IMG_VAL:%.+]] = load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @{{.*}}, i32 0, i32 1) 150 // CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds nuw { i32, i32 }, ptr [[TEMP:%.+]], i32 0, i32 0 151 // CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds nuw { i32, i32 }, ptr [[TEMP]], i32 0, i32 1 152 // CHECK: store i32 [[REAL_VAL]], ptr [[TEMP_REAL_REF]] 153 // CHECK: store i32 [[IMG_VAL]], ptr [[TEMP_IMG_REF]] 154 // CHECK: call void @__atomic_store(i64 noundef 8, ptr noundef @{{.*}}, ptr noundef [[TEMP]], i32 noundef 0) 155 #pragma omp atomic write 156 cix = civ; 157 // CHECK: [[REAL_VAL:%.+]] = load float, ptr @{{.*}} 158 // CHECK: [[IMG_VAL:%.+]] = load float, ptr getelementptr inbounds nuw ({ float, float }, ptr @{{.*}}, i32 0, i32 1) 159 // CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds nuw { float, float }, ptr [[TEMP:%.+]], i32 0, i32 0 160 // CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds nuw { float, float }, ptr [[TEMP]], i32 0, i32 1 161 // CHECK: store float [[REAL_VAL]], ptr [[TEMP_REAL_REF]] 162 // CHECK: store float [[IMG_VAL]], ptr [[TEMP_IMG_REF]] 163 // CHECK: call void @__atomic_store(i64 noundef 8, ptr noundef @{{.*}}, ptr noundef [[TEMP]], i32 noundef 0) 164 #pragma omp atomic write 165 cfx = cfv; 166 // CHECK: [[REAL_VAL:%.+]] = load double, ptr @{{.*}} 167 // CHECK: [[IMG_VAL:%.+]] = load double, ptr getelementptr inbounds nuw ({ double, double }, ptr @{{.*}}, i32 0, i32 1) 168 // CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds nuw { double, double }, ptr [[TEMP:%.+]], i32 0, i32 0 169 // CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds nuw { double, double }, ptr [[TEMP]], i32 0, i32 1 170 // CHECK: store double [[REAL_VAL]], ptr [[TEMP_REAL_REF]] 171 // CHECK: store double [[IMG_VAL]], ptr [[TEMP_IMG_REF]] 172 // CHECK: call void @__atomic_store(i64 noundef 16, ptr noundef @{{.*}}, ptr noundef [[TEMP]], i32 noundef 5) 173 // CHECK: call{{.*}} @__kmpc_flush( 174 #pragma omp atomic seq_cst write 175 cdx = cdv; 176 // CHECK: load i8, ptr 177 // CHECK: store atomic i64 {{.*}} monotonic, align 8 178 #pragma omp atomic write 179 ulx = bv; 180 // CHECK: load i8, ptr 181 // CHECK: store atomic i8 {{.*}} monotonic, align 1 182 #pragma omp atomic write 183 bx = cv; 184 // CHECK: load i8, ptr 185 // CHECK: store atomic i8 {{.*}} seq_cst, align 1 186 // CHECK: call{{.*}} @__kmpc_flush( 187 #pragma omp atomic write, seq_cst 188 cx = ucv; 189 // CHECK: load i16, ptr 190 // CHECK: store atomic i64 {{.*}} monotonic, align 8 191 #pragma omp atomic write 192 ulx = sv; 193 // CHECK: load i16, ptr 194 // CHECK: store atomic i64 {{.*}} monotonic, align 8 195 #pragma omp atomic write 196 lx = usv; 197 // CHECK: load i32, ptr 198 // CHECK: store atomic i32 {{.*}} seq_cst, align 4 199 // CHECK: call{{.*}} @__kmpc_flush( 200 #pragma omp atomic seq_cst, write 201 uix = iv; 202 // CHECK: load i32, ptr 203 // CHECK: store atomic i32 {{.*}} monotonic, align 4 204 #pragma omp atomic write 205 ix = uiv; 206 // CHECK: load i64, ptr 207 // CHECK: [[VAL:%.+]] = trunc i64 %{{.*}} to i32 208 // CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds nuw { i32, i32 }, ptr [[TEMP:%.+]], i32 0, i32 0 209 // CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds nuw { i32, i32 }, ptr [[TEMP]], i32 0, i32 1 210 // CHECK: store i32 [[VAL]], ptr [[TEMP_REAL_REF]] 211 // CHECK: store i32 0, ptr [[TEMP_IMG_REF]] 212 // CHECK: call void @__atomic_store(i64 noundef 8, ptr noundef @{{.+}}, ptr noundef [[TEMP]], i32 noundef 0) 213 #pragma omp atomic write 214 cix = lv; 215 // CHECK: load i64, ptr 216 // CHECK: store atomic float %{{.+}}, ptr {{.*}} monotonic, align 4 217 #pragma omp atomic write 218 fx = ulv; 219 // CHECK: load i64, ptr 220 // CHECK: store atomic double %{{.+}}, ptr {{.*}} monotonic, align 8 221 #pragma omp atomic write 222 dx = llv; 223 // CHECK: load i64, ptr 224 // CHECK: [[VAL:%.+]] = uitofp i64 %{{.+}} to x86_fp80 225 // CHECK: call void @llvm.memset.p0.i64(ptr align 16 [[TEMP:%.+]], i8 0, i64 16, i1 false) 226 // CHECK: store x86_fp80 [[VAL]], ptr [[TEMP]] 227 // CHECK: [[VAL:%.+]] = load i128, ptr [[TEMP]] 228 // CHECK: store atomic i128 [[VAL]], ptr {{.*}} monotonic, align 16 229 #pragma omp atomic write 230 ldx = ullv; 231 // CHECK: load float, ptr 232 // CHECK: [[VAL:%.+]] = fptosi float %{{.*}} to i32 233 // CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds nuw { i32, i32 }, ptr [[TEMP:%.+]], i32 0, i32 0 234 // CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds nuw { i32, i32 }, ptr [[TEMP]], i32 0, i32 1 235 // CHECK: store i32 [[VAL]], ptr [[TEMP_REAL_REF]] 236 // CHECK: store i32 0, ptr [[TEMP_IMG_REF]] 237 // CHECK: call void @__atomic_store(i64 noundef 8, ptr noundef @{{.+}}, ptr noundef [[TEMP]], i32 noundef 0) 238 #pragma omp atomic write 239 cix = fv; 240 // CHECK: load double, ptr 241 // CHECK: store atomic i16 {{.*}} monotonic, align 2 242 #pragma omp atomic write 243 sx = dv; 244 // CHECK: load x86_fp80, ptr 245 // CHECK: store atomic i8 {{.*}} monotonic, align 1 246 #pragma omp atomic write 247 bx = ldv; 248 // CHECK: load i32, ptr @{{.+}} 249 // CHECK: load i32, ptr getelementptr inbounds nuw ({ i32, i32 }, ptr @{{.+}}, i32 0, i32 1) 250 // CHECK: icmp ne i32 %{{.+}}, 0 251 // CHECK: icmp ne i32 %{{.+}}, 0 252 // CHECK: or i1 253 // CHECK: store atomic i8 {{.*}} monotonic, align 1 254 #pragma omp atomic write 255 bx = civ; 256 // CHECK: load float, ptr @{{.*}} 257 // CHECK: store atomic i16 {{.*}} monotonic, align 2 258 #pragma omp atomic write 259 usx = cfv; 260 // CHECK: load double, ptr @{{.+}} 261 // CHECK: store atomic i64 {{.*}} monotonic, align 8 262 #pragma omp atomic write 263 llx = cdv; 264 // CHECK-DAG: [[IDX:%.+]] = load i16, ptr @{{.+}} 265 // CHECK-DAG: load i8, ptr 266 // CHECK-DAG: [[VEC_ITEM_VAL:%.+]] = zext i1 %{{.+}} to i32 267 // CHECK: [[I128VAL:%.+]] = load atomic i128, ptr [[DEST:@.+]] monotonic, align 16 268 // CHECK: br label %[[CONT:.+]] 269 // CHECK: [[CONT]] 270 // CHECK: [[OLD_I128:%.+]] = phi i128 [ [[I128VAL]], %{{.+}} ], [ [[FAILED_I128_OLD_VAL:%.+]], %[[CONT]] ] 271 // CHECK: store i128 [[OLD_I128]], ptr [[LDTEMP:%.+]], 272 // CHECK: [[VEC_VAL:%.+]] = load <4 x i32>, ptr [[LDTEMP]] 273 // CHECK: [[NEW_VEC_VAL:%.+]] = insertelement <4 x i32> [[VEC_VAL]], i32 [[VEC_ITEM_VAL]], i16 [[IDX]] 274 // CHECK: store <4 x i32> [[NEW_VEC_VAL]], ptr [[LDTEMP]] 275 // CHECK: [[NEW_I128:%.+]] = load i128, ptr [[LDTEMP]] 276 // CHECK: [[RES:%.+]] = cmpxchg ptr [[DEST]], i128 [[OLD_I128]], i128 [[NEW_I128]] monotonic monotonic, align 16 277 // CHECK: [[FAILED_I128_OLD_VAL:%.+]] = extractvalue { i128, i1 } [[RES]], 0 278 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i128, i1 } [[RES]], 1 279 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] 280 // CHECK: [[EXIT]] 281 #pragma omp atomic write 282 int4x[sv] = bv; 283 // CHECK: load x86_fp80, ptr @{{.+}} 284 // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32 285 // CHECK: [[PREV_VALUE:%.+]] = load atomic i32, ptr getelementptr (i8, ptr @{{.+}}, i64 4) monotonic, align 4 286 // CHECK: br label %[[CONT:.+]] 287 // CHECK: [[CONT]] 288 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] 289 // CHECK: [[BF_VALUE:%.+]] = and i32 [[NEW_VAL]], 2147483647 290 // CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, -2147483648 291 // CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]] 292 // CHECK: store i32 %{{.+}}, ptr [[LDTEMP:%.+]] 293 // CHECK: [[NEW_BF_VALUE:%.+]] = load i32, ptr [[LDTEMP]] 294 // CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr (i8, ptr @{{.+}}, i64 4), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic, align 4 295 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0 296 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1 297 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] 298 // CHECK: [[EXIT]] 299 #pragma omp atomic write 300 bfx.a = ldv; 301 // CHECK: load x86_fp80, ptr @{{.+}} 302 // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32 303 // CHECK: call void @__atomic_load(i64 noundef 4, ptr noundef getelementptr (i8, ptr @{{.+}}, i64 4), ptr noundef [[LDTEMP:%.+]], i32 noundef 0) 304 // CHECK: br label %[[CONT:.+]] 305 // CHECK: [[CONT]] 306 // CHECK: [[OLD_BF_VALUE:%.+]] = load i32, ptr [[LDTEMP]], 307 // CHECK: store i32 [[OLD_BF_VALUE]], ptr [[LDTEMP1:%.+]], 308 // CHECK: [[OLD_BF_VALUE:%.+]] = load i32, ptr [[LDTEMP1]], 309 // CHECK: [[BF_VALUE:%.+]] = and i32 [[NEW_VAL]], 2147483647 310 // CHECK: [[BF_CLEAR:%.+]] = and i32 [[OLD_BF_VALUE]], -2147483648 311 // CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]] 312 // CHECK: store i32 %{{.+}}, ptr [[LDTEMP1]] 313 // CHECK: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 noundef 4, ptr noundef getelementptr (i8, ptr @{{.+}}, i64 4), ptr noundef [[LDTEMP]], ptr noundef [[LDTEMP1]], i32 noundef 0, i32 noundef 0) 314 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] 315 // CHECK: [[EXIT]] 316 #pragma omp atomic write 317 bfx_packed.a = ldv; 318 // CHECK: load x86_fp80, ptr @{{.+}} 319 // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32 320 // CHECK: [[PREV_VALUE:%.+]] = load atomic i32, ptr @{{.+}} monotonic, align 4 321 // CHECK: br label %[[CONT:.+]] 322 // CHECK: [[CONT]] 323 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] 324 // CHECK: [[BF_AND:%.+]] = and i32 [[NEW_VAL]], 1 325 // CHECK: [[BF_VALUE:%.+]] = shl i32 [[BF_AND]], 31 326 // CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, 2147483647 327 // CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]] 328 // CHECK: store i32 %{{.+}}, ptr [[LDTEMP:%.+]] 329 // CHECK: [[NEW_BF_VALUE:%.+]] = load i32, ptr [[LDTEMP]] 330 // CHECK: [[RES:%.+]] = cmpxchg ptr @{{.+}}, i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic, align 4 331 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0 332 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1 333 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] 334 // CHECK: [[EXIT]] 335 #pragma omp atomic write 336 bfx2.a = ldv; 337 // CHECK: load x86_fp80, ptr @{{.+}} 338 // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32 339 // CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr (i8, ptr @{{.+}}, i64 3) monotonic, align 1 340 // CHECK: br label %[[CONT:.+]] 341 // CHECK: [[CONT]] 342 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] 343 // CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i8 344 // CHECK: [[BF_AND:%.+]] = and i8 [[TRUNC]], 1 345 // CHECK: [[BF_VALUE:%.+]] = shl i8 [[BF_AND]], 7 346 // CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, 127 347 // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]] 348 // CHECK: store i8 %{{.+}}, ptr [[LDTEMP:%.+]] 349 // CHECK: [[NEW_BF_VALUE:%.+]] = load i8, ptr [[LDTEMP]] 350 // CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr (i8, ptr @{{.+}}, i64 3), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1 351 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0 352 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1 353 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] 354 // CHECK: [[EXIT]] 355 #pragma omp atomic write 356 bfx2_packed.a = ldv; 357 // CHECK: load x86_fp80, ptr @{{.+}} 358 // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32 359 // CHECK: [[PREV_VALUE:%.+]] = load atomic i32, ptr @{{.+}} monotonic, align 4 360 // CHECK: br label %[[CONT:.+]] 361 // CHECK: [[CONT]] 362 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] 363 // CHECK: [[BF_AND:%.+]] = and i32 [[NEW_VAL]], 16383 364 // CHECK: [[BF_VALUE:%.+]] = shl i32 [[BF_AND]], 11 365 // CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, -33552385 366 // CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]] 367 // CHECK: store i32 %{{.+}}, ptr [[LDTEMP:%.+]] 368 // CHECK: [[NEW_BF_VALUE:%.+]] = load i32, ptr [[LDTEMP]] 369 // CHECK: [[RES:%.+]] = cmpxchg ptr @{{.+}}, i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic, align 4 370 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0 371 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1 372 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] 373 // CHECK: [[EXIT]] 374 #pragma omp atomic write 375 bfx3.a = ldv; 376 // CHECK: load x86_fp80, ptr @{{.+}} 377 // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32 378 // CHECK: call void @__atomic_load(i64 noundef 3, ptr noundef getelementptr (i8, ptr @{{.+}}, i64 1), ptr noundef [[BITCAST:%.+]], i32 noundef 0) 379 // CHECK: br label %[[CONT:.+]] 380 // CHECK: [[CONT]] 381 // CHECK: [[OLD_VAL:%.+]] = load i24, ptr %{{.+}}, 382 // CHECK: store i24 [[OLD_VAL]], ptr [[TEMP:%.+]], 383 // CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i24 384 // CHECK: [[BF_AND:%.+]] = and i24 [[TRUNC]], 16383 385 // CHECK: [[BF_VALUE:%.+]] = shl i24 [[BF_AND]], 3 386 // CHECK: [[BF_CLEAR:%.+]] = and i24 %{{.+}}, -131065 387 // CHECK: or i24 [[BF_CLEAR]], [[BF_VALUE]] 388 // CHECK: store i24 %{{.+}}, ptr [[TEMP]] 389 // CHECK: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 noundef 3, ptr noundef getelementptr (i8, ptr @{{.+}}, i64 1), ptr noundef [[LDTEMP:%.+]], ptr noundef [[TEMP]], i32 noundef 0, i32 noundef 0) 390 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] 391 // CHECK: [[EXIT]] 392 #pragma omp atomic write 393 bfx3_packed.a = ldv; 394 // CHECK: load x86_fp80, ptr @{{.+}} 395 // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32 396 // CHECK: [[PREV_VALUE:%.+]] = load atomic i64, ptr @{{.+}} monotonic, align 8 397 // CHECK: br label %[[CONT:.+]] 398 // CHECK: [[CONT]] 399 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i64 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] 400 // CHECK: [[ZEXT:%.+]] = zext i32 [[NEW_VAL]] to i64 401 // CHECK: [[BF_AND:%.+]] = and i64 [[ZEXT]], 1 402 // CHECK: [[BF_VALUE:%.+]] = shl i64 [[BF_AND]], 16 403 // CHECK: [[BF_CLEAR:%.+]] = and i64 %{{.+}}, -65537 404 // CHECK: or i64 [[BF_CLEAR]], [[BF_VALUE]] 405 // CHECK: store i64 %{{.+}}, ptr [[LDTEMP:%.+]] 406 // CHECK: [[NEW_BF_VALUE:%.+]] = load i64, ptr [[LDTEMP]] 407 // CHECK: [[RES:%.+]] = cmpxchg ptr @{{.+}}, i64 [[OLD_BF_VALUE]], i64 [[NEW_BF_VALUE]] monotonic monotonic, align 8 408 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i64, i1 } [[RES]], 0 409 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1 410 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] 411 // CHECK: [[EXIT]] 412 #pragma omp atomic write 413 bfx4.a = ldv; 414 // CHECK: load x86_fp80, ptr @{{.+}} 415 // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32 416 // CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr inbounds nuw (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1) monotonic, align 1 417 // CHECK: br label %[[CONT:.+]] 418 // CHECK: [[CONT]] 419 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] 420 // CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i8 421 // CHECK: [[BF_VALUE:%.+]] = and i8 [[TRUNC]], 1 422 // CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, -2 423 // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]] 424 // CHECK: store i8 %{{.+}}, ptr [[LDTEMP:%.+]] 425 // CHECK: [[NEW_BF_VALUE:%.+]] = load i8, ptr [[LDTEMP]] 426 // CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr inbounds nuw (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1 427 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0 428 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1 429 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] 430 // CHECK: [[EXIT]] 431 #pragma omp atomic write 432 bfx4_packed.a = ldv; 433 // CHECK: load x86_fp80, ptr @{{.+}} 434 // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i64 435 // CHECK: [[PREV_VALUE:%.+]] = load atomic i64, ptr @{{.+}} monotonic, align 8 436 // CHECK: br label %[[CONT:.+]] 437 // CHECK: [[CONT]] 438 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i64 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] 439 // CHECK: [[BF_AND:%.+]] = and i64 [[NEW_VAL]], 127 440 // CHECK: [[BF_VALUE:%.+]] = shl i64 [[BF_AND]], 17 441 // CHECK: [[BF_CLEAR:%.+]] = and i64 %{{.+}}, -16646145 442 // CHECK: or i64 [[BF_CLEAR]], [[BF_VALUE]] 443 // CHECK: store i64 %{{.+}}, ptr [[LDTEMP:%.+]] 444 // CHECK: [[NEW_BF_VALUE:%.+]] = load i64, ptr [[LDTEMP]] 445 // CHECK: [[RES:%.+]] = cmpxchg ptr @{{.+}}, i64 [[OLD_BF_VALUE]], i64 [[NEW_BF_VALUE]] monotonic monotonic, align 8 446 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i64, i1 } [[RES]], 0 447 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1 448 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] 449 // CHECK: [[EXIT]] 450 #pragma omp atomic write 451 bfx4.b = ldv; 452 // CHECK: load x86_fp80, ptr @{{.+}} 453 // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i64 454 // CHECK: [[PREV_VALUE:%.+]] = load atomic i8, ptr getelementptr inbounds nuw (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1) monotonic, align 1 455 // CHECK: br label %[[CONT:.+]] 456 // CHECK: [[CONT]] 457 // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] 458 // CHECK: [[TRUNC:%.+]] = trunc i64 [[NEW_VAL]] to i8 459 // CHECK: [[BF_AND:%.+]] = and i8 [[TRUNC]], 127 460 // CHECK: [[BF_VALUE:%.+]] = shl i8 [[BF_AND]], 1 461 // CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, 1 462 // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]] 463 // CHECK: store i8 %{{.+}}, ptr [[LDTEMP:%.+]] 464 // CHECK: [[NEW_BF_VALUE:%.+]] = load i8, ptr [[LDTEMP]] 465 // CHECK: [[RES:%.+]] = cmpxchg ptr getelementptr inbounds nuw (%struct.BitFields4_packed, ptr @{{.+}}, i32 0, i32 1), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic, align 1 466 // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0 467 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1 468 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] 469 // CHECK: [[EXIT]] 470 #pragma omp atomic relaxed write 471 bfx4_packed.b = ldv; 472 // CHECK: load i64, ptr 473 // CHECK: [[VEC_ITEM_VAL:%.+]] = uitofp i64 %{{.+}} to float 474 // CHECK: [[I64VAL:%.+]] = load atomic i64, ptr [[DEST:@.+]] monotonic, align 8 475 // CHECK: br label %[[CONT:.+]] 476 // CHECK: [[CONT]] 477 // CHECK: [[OLD_I64:%.+]] = phi i64 [ [[I64VAL]], %{{.+}} ], [ [[FAILED_I64_OLD_VAL:%.+]], %[[CONT]] ] 478 // CHECK: store i64 [[OLD_I64]], ptr [[LDTEMP:%.+]], 479 // CHECK: [[VEC_VAL:%.+]] = load <2 x float>, ptr [[LDTEMP]] 480 // CHECK: [[NEW_VEC_VAL:%.+]] = insertelement <2 x float> [[VEC_VAL]], float [[VEC_ITEM_VAL]], i64 0 481 // CHECK: store <2 x float> [[NEW_VEC_VAL]], ptr [[LDTEMP]] 482 // CHECK: [[NEW_I64:%.+]] = load i64, ptr [[LDTEMP]] 483 // CHECK: [[RES:%.+]] = cmpxchg ptr [[DEST]], i64 [[OLD_I64]], i64 [[NEW_I64]] monotonic monotonic, align 8 484 // CHECK: [[FAILED_I64_OLD_VAL:%.+]] = extractvalue { i64, i1 } [[RES]], 0 485 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1 486 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] 487 // CHECK: [[EXIT]] 488 #pragma omp atomic write relaxed 489 float2x.x = ulv; 490 // CHECK: call i32 @llvm.read_register.i32( 491 // CHECK: sitofp i32 %{{.+}} to double 492 // CHECK: store atomic double %{{.+}}, ptr @{{.+}} seq_cst, align 8 493 // CHECK: call{{.*}} @__kmpc_flush( 494 #pragma omp atomic write seq_cst 495 dv = rix; 496 return 0; 497 } 498 499 #endif 500