1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - \
3 // RUN: | FileCheck %s
4 //
5 // Test __sync_ builtins for __int128 aligned to 16 bytes.
6
7 #include <stdint.h>
8
9 __int128 Ptr __attribute__((aligned(16)));
10 __int128 Val __attribute__((aligned(16)));
11 __int128 OldVal __attribute__((aligned(16)));
12
13 // CHECK-LABEL: @f1(
14 // CHECK-NEXT: entry:
15 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2:![0-9]+]]
16 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw add ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
17 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
18 // CHECK-NEXT: ret void
19 //
f1()20 __int128 f1() {
21 return __sync_fetch_and_add(&Ptr, Val);
22 }
23
24 // CHECK-LABEL: @f2(
25 // CHECK-NEXT: entry:
26 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
27 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw sub ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
28 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
29 // CHECK-NEXT: ret void
30 //
f2()31 __int128 f2() {
32 return __sync_fetch_and_sub(&Ptr, Val);
33 }
34
35 // CHECK-LABEL: @f3(
36 // CHECK-NEXT: entry:
37 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
38 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw or ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
39 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
40 // CHECK-NEXT: ret void
41 //
f3()42 __int128 f3() {
43 return __sync_fetch_and_or(&Ptr, Val);
44 }
45
46 // CHECK-LABEL: @f4(
47 // CHECK-NEXT: entry:
48 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
49 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw and ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
50 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
51 // CHECK-NEXT: ret void
52 //
f4()53 __int128 f4() {
54 return __sync_fetch_and_and(&Ptr, Val);
55 }
56
57 // CHECK-LABEL: @f5(
58 // CHECK-NEXT: entry:
59 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
60 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xor ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
61 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
62 // CHECK-NEXT: ret void
63 //
f5()64 __int128 f5() {
65 return __sync_fetch_and_xor(&Ptr, Val);
66 }
67
68 // CHECK-LABEL: @f6(
69 // CHECK-NEXT: entry:
70 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
71 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw nand ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
72 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
73 // CHECK-NEXT: ret void
74 //
f6()75 __int128 f6() {
76 return __sync_fetch_and_nand(&Ptr, Val);
77 }
78
79 // CHECK-LABEL: @f7(
80 // CHECK-NEXT: entry:
81 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
82 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw add ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
83 // CHECK-NEXT: [[TMP2:%.*]] = add i128 [[TMP1]], [[TMP0]]
84 // CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
85 // CHECK-NEXT: ret void
86 //
f7()87 __int128 f7() {
88 return __sync_add_and_fetch(&Ptr, Val);
89 }
90
91 // CHECK-LABEL: @f8(
92 // CHECK-NEXT: entry:
93 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
94 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw sub ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
95 // CHECK-NEXT: [[TMP2:%.*]] = sub i128 [[TMP1]], [[TMP0]]
96 // CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
97 // CHECK-NEXT: ret void
98 //
f8()99 __int128 f8() {
100 return __sync_sub_and_fetch(&Ptr, Val);
101 }
102
103 // CHECK-LABEL: @f9(
104 // CHECK-NEXT: entry:
105 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
106 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw or ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
107 // CHECK-NEXT: [[TMP2:%.*]] = or i128 [[TMP1]], [[TMP0]]
108 // CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
109 // CHECK-NEXT: ret void
110 //
f9()111 __int128 f9() {
112 return __sync_or_and_fetch(&Ptr, Val);
113 }
114
115 // CHECK-LABEL: @f10(
116 // CHECK-NEXT: entry:
117 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
118 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw and ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
119 // CHECK-NEXT: [[TMP2:%.*]] = and i128 [[TMP1]], [[TMP0]]
120 // CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
121 // CHECK-NEXT: ret void
122 //
f10()123 __int128 f10() {
124 return __sync_and_and_fetch(&Ptr, Val);
125 }
126
127 // CHECK-LABEL: @f11(
128 // CHECK-NEXT: entry:
129 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
130 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xor ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
131 // CHECK-NEXT: [[TMP2:%.*]] = xor i128 [[TMP1]], [[TMP0]]
132 // CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
133 // CHECK-NEXT: ret void
134 //
f11()135 __int128 f11() {
136 return __sync_xor_and_fetch(&Ptr, Val);
137 }
138
139 // CHECK-LABEL: @f12(
140 // CHECK-NEXT: entry:
141 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
142 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw nand ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
143 // CHECK-NEXT: [[TMP2:%.*]] = and i128 [[TMP1]], [[TMP0]]
144 // CHECK-NEXT: [[TMP3:%.*]] = xor i128 [[TMP2]], -1
145 // CHECK-NEXT: store i128 [[TMP3]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
146 // CHECK-NEXT: ret void
147 //
f12()148 __int128 f12() {
149 return __sync_nand_and_fetch(&Ptr, Val);
150 }
151
152 // CHECK-LABEL: @f13(
153 // CHECK-NEXT: entry:
154 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @OldVal, align 16, !tbaa [[TBAA2]]
155 // CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
156 // CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr @Ptr, i128 [[TMP0]], i128 [[TMP1]] seq_cst seq_cst, align 16
157 // CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i128, i1 } [[TMP2]], 1
158 // CHECK-NEXT: ret i1 [[TMP3]]
159 //
f13()160 _Bool f13() {
161 return __sync_bool_compare_and_swap(&Ptr, OldVal, Val);
162 }
163
164 // CHECK-LABEL: @f14(
165 // CHECK-NEXT: entry:
166 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @OldVal, align 16, !tbaa [[TBAA2]]
167 // CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
168 // CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr @Ptr, i128 [[TMP0]], i128 [[TMP1]] seq_cst seq_cst, align 16
169 // CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i128, i1 } [[TMP2]], 0
170 // CHECK-NEXT: store i128 [[TMP3]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
171 // CHECK-NEXT: ret void
172 //
f14()173 __int128 f14() {
174 return __sync_val_compare_and_swap(&Ptr, OldVal, Val);
175 }
176
177 // CHECK-LABEL: @f15(
178 // CHECK-NEXT: entry:
179 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
180 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
181 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
182 // CHECK-NEXT: ret void
183 //
f15()184 __int128 f15() {
185 return __sync_lock_test_and_set(&Ptr, Val);
186 }
187
188 // CHECK-LABEL: @f16(
189 // CHECK-NEXT: entry:
190 // CHECK-NEXT: store atomic i128 0, ptr @Ptr release, align 16
191 // CHECK-NEXT: ret void
192 //
f16()193 void f16() {
194 return __sync_lock_release(&Ptr);
195 }
196
197 // CHECK-LABEL: @f17(
198 // CHECK-NEXT: entry:
199 // CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
200 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
201 // CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
202 // CHECK-NEXT: ret void
203 //
f17()204 __int128 f17() {
205 return __sync_swap(&Ptr, Val);
206 }
207
208 // Test that a statement expression compiles.
209 // CHECK-LABEL: @f18(
210 // CHECK-NEXT: entry:
211 // CHECK-NEXT: [[T_ADDR:%.*]] = alloca i128, align 8
212 // CHECK-NEXT: [[T:%.*]] = load i128, ptr [[TMP0:%.*]], align 8, !tbaa [[TBAA2]]
213 // CHECK-NEXT: store i128 [[T]], ptr [[T_ADDR]], align 8, !tbaa [[TBAA2]]
214 // CHECK-NEXT: [[TMP1:%.*]] = cmpxchg ptr [[T_ADDR]], i128 [[T]], i128 [[T]] seq_cst seq_cst, align 16
215 // CHECK-NEXT: ret void
216 //
f18(__int128 t)217 void f18(__int128 t) {
218 __sync_bool_compare_and_swap(({int x = 1; &t;}), t, t);
219 }
220