xref: /llvm-project/clang/test/CodeGen/atomic.c (revision 4d5e834c5b7f0ccccd90a6d543e182df602f6bc8)
1 // RUN: %clang_cc1 %s -emit-llvm -o - -triple=i686-apple-darwin9 | FileCheck %s --check-prefixes=CHECK,X86
2 // RUN: %clang_cc1 %s -emit-llvm -o - -triple=s390x-linux-gnu | FileCheck %s --check-prefixes=CHECK,SYSTEMZ
3 
4 // CHECK: @[[NONSTATIC_GLOB_POINTER_FROM_INT:.+]] = global ptr null
5 // CHECK: @[[GLOB_POINTER:.+]] = internal global ptr null
6 // CHECK: @[[GLOB_POINTER_FROM_INT:.+]] = internal global ptr null
7 // CHECK: @[[GLOB_INT:.+]] = internal global i32 0
8 // CHECK: @[[GLOB_FLT:.+]] = internal global float {{[0e\+-\.]+}}, align
9 // CHECK: @[[GLOB_DBL:.+]] = internal global double {{[0e\+-\.]+}}, align
10 // X86:   @[[GLOB_LONGDBL:.+]] = internal global x86_fp80 {{[0xK]+}}, align
11 // SYSTEMZ: @[[GLOB_LONGDBL:.+]] = internal global fp128 {{[0xL]+}}, align
12 
atomic(void)13 int atomic(void) {
14   // non-sensical test for sync functions
15   int old;
16   int val = 1;
17   char valc = 1;
18   _Bool valb = 0;
19   unsigned int uval = 1;
20   int cmp = 0;
21   int* ptrval;
22 
23   old = __sync_fetch_and_add(&val, 1);
24   // CHECK: atomicrmw add ptr %val, i32 1 seq_cst, align 4
25 
26   old = __sync_fetch_and_sub(&valc, 2);
27   // CHECK: atomicrmw sub ptr %valc, i8 2 seq_cst, align 1
28 
29   old = __sync_fetch_and_min(&val, 3);
30   // CHECK: atomicrmw min ptr %val, i32 3 seq_cst, align 4
31 
32   old = __sync_fetch_and_max(&val, 4);
33   // CHECK: atomicrmw max ptr %val, i32 4 seq_cst, align 4
34 
35   old = __sync_fetch_and_umin(&uval, 5u);
36   // CHECK: atomicrmw umin ptr %uval, i32 5 seq_cst, align 4
37 
38   old = __sync_fetch_and_umax(&uval, 6u);
39   // CHECK: atomicrmw umax ptr %uval, i32 6 seq_cst, align 4
40 
41   old = __sync_lock_test_and_set(&val, 7);
42   // CHECK: atomicrmw xchg ptr %val, i32 7 seq_cst, align 4
43 
44   old = __sync_swap(&val, 8);
45   // CHECK: atomicrmw xchg ptr %val, i32 8 seq_cst, align 4
46 
47   old = __sync_val_compare_and_swap(&val, 4, 1976);
48   // CHECK: [[PAIR:%[a-z0-9_.]+]] = cmpxchg ptr %val, i32 4, i32 1976 seq_cst seq_cst, align 4
49   // CHECK: extractvalue { i32, i1 } [[PAIR]], 0
50 
51   old = __sync_bool_compare_and_swap(&val, 4, 1976);
52   // CHECK: [[PAIR:%[a-z0-9_.]+]] = cmpxchg ptr %val, i32 4, i32 1976 seq_cst seq_cst, align 4
53   // CHECK: extractvalue { i32, i1 } [[PAIR]], 1
54 
55   old = __sync_fetch_and_and(&val, 0x9);
56   // CHECK: atomicrmw and ptr %val, i32 9 seq_cst, align 4
57 
58   old = __sync_fetch_and_or(&val, 0xa);
59   // CHECK: atomicrmw or ptr %val, i32 10 seq_cst, align 4
60 
61   old = __sync_fetch_and_xor(&val, 0xb);
62   // CHECK: atomicrmw xor ptr %val, i32 11 seq_cst, align 4
63 
64   old = __sync_fetch_and_nand(&val, 0xc);
65   // CHECK: atomicrmw nand ptr %val, i32 12 seq_cst, align 4
66 
67   old = __sync_add_and_fetch(&val, 1);
68   // CHECK: atomicrmw add ptr %val, i32 1 seq_cst, align 4
69 
70   old = __sync_sub_and_fetch(&val, 2);
71   // CHECK: atomicrmw sub ptr %val, i32 2 seq_cst, align 4
72 
73   old = __sync_and_and_fetch(&valc, 3);
74   // CHECK: atomicrmw and ptr %valc, i8 3 seq_cst, align 1
75 
76   old = __sync_or_and_fetch(&valc, 4);
77   // CHECK: atomicrmw or ptr %valc, i8 4 seq_cst, align 1
78 
79   old = __sync_xor_and_fetch(&valc, 5);
80   // CHECK: atomicrmw xor ptr %valc, i8 5 seq_cst, align 1
81 
82   old = __sync_nand_and_fetch(&valc, 6);
83   // CHECK: atomicrmw nand ptr %valc, i8 6 seq_cst, align 1
84 
85   __sync_val_compare_and_swap((void **)0, (void *)0, (void *)0);
86   // X86:      [[PAIR:%[a-z0-9_.]+]] = cmpxchg ptr null, i32 0, i32 0 seq_cst seq_cst, align 4
87   // X86-NEXT: extractvalue { i32, i1 } [[PAIR]], 0
88   // SYSTEMZ:      [[PAIR:%[a-z0-9_.]+]] = cmpxchg ptr null, i64 0, i64 0 seq_cst seq_cst, align 8
89   // SYSTEMZ-NEXT: extractvalue { i64, i1 } [[PAIR]], 0
90 
91   if ( __sync_val_compare_and_swap(&valb, 0, 1)) {
92     // CHECK: [[PAIR:%[a-z0-9_.]+]] = cmpxchg ptr %valb, i8 0, i8 1 seq_cst seq_cst, align 1
93     // CHECK: [[VAL:%[a-z0-9_.]+]] = extractvalue { i8, i1 } [[PAIR]], 0
94     // CHECK: trunc i8 [[VAL]] to i1
95     old = 42;
96   }
97 
98   __sync_bool_compare_and_swap((void **)0, (void *)0, (void *)0);
99   // X86:     cmpxchg ptr null, i32 0, i32 0 seq_cst seq_cst, align 4
100   // SYSTEMZ: cmpxchg ptr null, i64 0, i64 0 seq_cst seq_cst, align 8
101 
102   __sync_lock_release(&val);
103   // CHECK: store atomic i32 0, {{.*}} release, align 4
104 
105   __sync_lock_release(&ptrval);
106   // X86:     store atomic i32 0, {{.*}} release, align 4
107   // SYSTEMZ: store atomic i64 0, {{.*}} release, align 8
108 
109   __sync_synchronize ();
110   // CHECK: fence seq_cst
111 
112   return old;
113 }
114 
115 // CHECK: @release_return
release_return(int * lock)116 void release_return(int *lock) {
117   // Ensure this is actually returning void all the way through.
118   return __sync_lock_release(lock);
119   // CHECK: store atomic {{.*}} release, align 4
120 }
121 
122 
123 // Atomics with address spaces.
124 // CHECK: @addrspace
addrspace(int * P)125 void addrspace(int  __attribute__((address_space(256))) * P) {
126   __sync_bool_compare_and_swap(P, 0, 1);
127   // CHECK: cmpxchg ptr addrspace(256){{.*}}, i32 0, i32 1 seq_cst seq_cst, align 4
128 
129   __sync_val_compare_and_swap(P, 0, 1);
130   // CHECK: cmpxchg ptr addrspace(256){{.*}}, i32 0, i32 1 seq_cst seq_cst, align 4
131 
132   __sync_xor_and_fetch(P, 123);
133   // CHECK: atomicrmw xor ptr addrspace(256){{.*}}, i32 123 seq_cst, align 4
134 }
135 
136 // Ensure that global initialization of atomics is correct.
137 static _Atomic(int *) glob_pointer = (void *)0;
138 static _Atomic(int *) glob_pointer_from_int = 0;
139 _Atomic(int *) nonstatic_glob_pointer_from_int = 0LL;
140 static _Atomic int glob_int = 0;
141 static _Atomic float glob_flt = 0.0f;
142 static _Atomic double glob_dbl = 0.0f;
143 static _Atomic long double glob_longdbl = 0.0f;
144 
force_global_uses(void)145 void force_global_uses(void) {
146   // X86:   %atomic-temp = alloca x86_fp80, align 16
147   (void)glob_pointer;
148   // CHECK: load atomic ptr, ptr @[[GLOB_POINTER]] seq_cst
149   (void)glob_pointer_from_int;
150   // CHECK-NEXT: load atomic ptr, ptr @[[GLOB_POINTER_FROM_INT]] seq_cst
151   (void)nonstatic_glob_pointer_from_int;
152   // CHECK-NEXT: load atomic ptr, ptr @[[NONSTATIC_GLOB_POINTER_FROM_INT]] seq_cst
153   (void)glob_int;
154   // CHECK-NEXT: load atomic i32, ptr @[[GLOB_INT]] seq_cst
155   (void)glob_flt;
156   // CHECK-NEXT: load atomic float, ptr @[[GLOB_FLT]] seq_cst
157   (void)glob_dbl;
158   // CHECK-NEXT: load atomic double, ptr @[[GLOB_DBL]] seq_cst
159   (void)glob_longdbl;
160   // X86:      call void @__atomic_load(i32 noundef 16, ptr noundef @glob_longdbl, ptr noundef %atomic-temp
161   // X86-NEXT: %0 = load x86_fp80, ptr %atomic-temp, align 16
162   // SYSTEMZ: load atomic fp128, ptr @[[GLOB_LONGDBL]] seq_cst
163 }
164