xref: /llvm-project/llvm/test/Instrumentation/DataFlowSanitizer/atomics.ll (revision ecb85b5cd89f9797c538675ee3ab93e350c57bd5)
1; RUN: opt < %s -passes=dfsan -S | FileCheck %s
2; RUN: opt < %s -passes=dfsan -dfsan-track-origins=1 -S | FileCheck %s --check-prefixes=CHECK,CHECK_ORIGIN
3; RUN: opt < %s -passes=dfsan -dfsan-track-origins=1 -dfsan-instrument-with-call-threshold=0 -S | FileCheck %s --check-prefixes=CHECK,CHECK_ORIGIN
4target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
5target triple = "x86_64-unknown-linux-gnu"
6
7; CHECK: @__dfsan_arg_tls = external thread_local(initialexec) global [[TLS_ARR:\[100 x i64\]]]
8; CHECK: @__dfsan_retval_tls = external thread_local(initialexec) global [[TLS_ARR]]
9define i32 @AtomicRmwXchg(ptr %p, i32 %x) {
10entry:
11  ; COMM: atomicrmw xchg: store clean shadow/origin, return clean shadow/origin
12
13  ; CHECK-LABEL:       @AtomicRmwXchg.dfsan
14  ; CHECK-NOT:         @__dfsan_arg_origin_tls
15  ; CHECK-NOT:         @__dfsan_arg_tls
16  ; CHECK:             %[[#INTP:]] = ptrtoint ptr %p to i64
17  ; CHECK-NEXT:        %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#%.10d,MASK:]]
18  ; CHECK-NEXT:        %[[#SHADOW_PTR:]] = inttoptr i64 %[[#SHADOW_OFFSET]] to ptr
19  ; CHECK-NEXT:        store i[[#NUM_BITS:32]] 0, ptr %[[#SHADOW_PTR]], align 1
20  ; CHECK-NEXT:        atomicrmw xchg ptr %p, i32 %x seq_cst
21  ; CHECK-NEXT:        store i8 0, ptr @__dfsan_retval_tls, align 2
22  ; CHECK_ORIGIN-NEXT: store i32 0, ptr @__dfsan_retval_origin_tls, align 4
23  ; CHECK-NEXT:        ret i32
24
25  %0 = atomicrmw xchg ptr %p, i32 %x seq_cst
26  ret i32 %0
27}
28
29define i32 @AtomicRmwMax(ptr %p, i32 %x) {
30  ; COMM: atomicrmw max: exactly the same as above
31
32  ; CHECK-LABEL:       @AtomicRmwMax.dfsan
33  ; CHECK-NOT:         @__dfsan_arg_origin_tls
34  ; CHECK-NOT:         @__dfsan_arg_tls
35  ; CHECK:             %[[#INTP:]] = ptrtoint ptr %p to i64
36  ; CHECK-NEXT:        %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#%.10d,MASK:]]
37  ; CHECK-NEXT:        %[[#SHADOW_PTR:]] = inttoptr i64 %[[#SHADOW_OFFSET]] to ptr
38  ; CHECK-NEXT:        store i[[#NUM_BITS]] 0, ptr %[[#SHADOW_PTR]], align 1
39  ; CHECK-NEXT:        atomicrmw max ptr %p, i32 %x seq_cst
40  ; CHECK-NEXT:        store i8 0, ptr @__dfsan_retval_tls, align 2
41  ; CHECK_ORIGIN-NEXT: store i32 0, ptr @__dfsan_retval_origin_tls, align 4
42  ; CHECK-NEXT:        ret i32
43
44entry:
45  %0 = atomicrmw max ptr %p, i32 %x seq_cst
46  ret i32 %0
47}
48
49
50define i32 @Cmpxchg(ptr %p, i32 %a, i32 %b) {
51  ; COMM: cmpxchg: store clean shadow/origin, return clean shadow/origin
52
53  ; CHECK-LABEL:       @Cmpxchg.dfsan
54  ; CHECK-NOT:         @__dfsan_arg_origin_tls
55  ; CHECK-NOT:         @__dfsan_arg_tls
56  ; CHECK:             %[[#INTP:]] = ptrtoint ptr %p to i64
57  ; CHECK-NEXT:        %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#%.10d,MASK:]]
58  ; CHECK-NEXT:        %[[#SHADOW_PTR:]] = inttoptr i64 %[[#SHADOW_OFFSET]] to ptr
59  ; CHECK-NEXT:        store i[[#NUM_BITS]] 0, ptr %[[#SHADOW_PTR]], align 1
60  ; CHECK-NEXT:        %pair = cmpxchg ptr %p, i32 %a, i32 %b seq_cst seq_cst
61  ; CHECK:             store i8 0, ptr @__dfsan_retval_tls, align 2
62  ; CHECK_ORIGIN-NEXT: store i32 0, ptr @__dfsan_retval_origin_tls, align 4
63  ; CHECK-NEXT:        ret i32
64
65entry:
66  %pair = cmpxchg ptr %p, i32 %a, i32 %b seq_cst seq_cst
67  %0 = extractvalue { i32, i1 } %pair, 0
68  ret i32 %0
69}
70
71
72define i32 @CmpxchgMonotonic(ptr %p, i32 %a, i32 %b) {
73  ; COMM: relaxed cmpxchg: bump up to "release monotonic"
74
75  ; CHECK-LABEL:       @CmpxchgMonotonic.dfsan
76  ; CHECK-NOT:         @__dfsan_arg_origin_tls
77  ; CHECK-NOT:         @__dfsan_arg_tls
78  ; CHECK:             %[[#INTP:]] = ptrtoint ptr %p to i64
79  ; CHECK-NEXT:        %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#%.10d,MASK:]]
80  ; CHECK-NEXT:        %[[#SHADOW_PTR:]] = inttoptr i64 %[[#SHADOW_OFFSET]] to ptr
81  ; CHECK-NEXT:        store i[[#NUM_BITS]] 0, ptr %[[#SHADOW_PTR]], align 1
82  ; CHECK-NEXT:        %pair = cmpxchg ptr %p, i32 %a, i32 %b release monotonic
83  ; CHECK:             store i8 0, ptr @__dfsan_retval_tls, align 2
84  ; CHECK_ORIGIN-NEXT: store i32 0, ptr @__dfsan_retval_origin_tls, align 4
85  ; CHECK-NEXT:        ret i32
86
87entry:
88  %pair = cmpxchg ptr %p, i32 %a, i32 %b monotonic monotonic
89  %0 = extractvalue { i32, i1 } %pair, 0
90  ret i32 %0
91}
92
93
94
95define i32 @AtomicLoad(ptr %p) {
96  ; COMM: atomic load: load shadow value after app value
97
98  ; CHECK-LABEL:  @AtomicLoad.dfsan
99  ; CHECK_ORIGIN: %[[#PO:]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
100  ; CHECK:        %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align 2
101  ; CHECK:        %a = load atomic i32, ptr %p seq_cst, align 16
102  ; CHECK:        %[[#SHADOW_PTR:]] = inttoptr i64 {{.*}} to ptr
103  ; CHECK_ORIGIN: %[[#ORIGIN_PTR:]] = inttoptr i64 {{.*}} to ptr
104  ; CHECK_ORIGIN: %[[#AO:]] = load i32, ptr %[[#ORIGIN_PTR]], align 16
105  ; CHECK:        load i[[#NUM_BITS]], ptr %[[#SHADOW_PTR]], align 1
106  ; CHECK:        %[[#AP_S:]] = or i8 {{.*}}, %[[#PS]]
107  ; CHECK_ORIGIN: %[[#PS_NZ:]] = icmp ne i8 %[[#PS]], 0
108  ; CHECK_ORIGIN: %[[#AP_O:]] = select i1 %[[#PS_NZ]], i32 %[[#PO]], i32 %[[#AO]]
109  ; CHECK:        store i8 %[[#AP_S]], ptr @__dfsan_retval_tls, align 2
110  ; CHECK_ORIGIN: store i32 %[[#AP_O]], ptr @__dfsan_retval_origin_tls, align 4
111  ; CHECK:        ret i32 %a
112
113entry:
114  %a = load atomic i32, ptr %p seq_cst, align 16
115  ret i32 %a
116}
117
118
119define i32 @AtomicLoadAcquire(ptr %p) {
120  ; COMM: atomic load: load shadow value after app value
121
122  ; CHECK-LABEL:  @AtomicLoadAcquire.dfsan
123  ; CHECK_ORIGIN: %[[#PO:]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
124  ; CHECK:        %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align 2
125  ; CHECK:        %a = load atomic i32, ptr %p acquire, align 16
126  ; CHECK:        %[[#SHADOW_PTR:]] = inttoptr i64 {{.*}} to ptr
127  ; CHECK_ORIGIN: %[[#ORIGIN_PTR:]] = inttoptr i64 {{.*}} to ptr
128  ; CHECK_ORIGIN: %[[#AO:]] = load i32, ptr %[[#ORIGIN_PTR]], align 16
129  ; CHECK:        load i[[#NUM_BITS]], ptr %[[#SHADOW_PTR]], align 1
130  ; CHECK:        %[[#AP_S:]] = or i8 {{.*}}, %[[#PS]]
131  ; CHECK_ORIGIN: %[[#PS_NZ:]] = icmp ne i8 %[[#PS]], 0
132  ; CHECK_ORIGIN: %[[#AP_O:]] = select i1 %[[#PS_NZ]], i32 %[[#PO]], i32 %[[#AO]]
133  ; CHECK:        store i8 %[[#AP_S]], ptr @__dfsan_retval_tls, align 2
134  ; CHECK_ORIGIN: store i32 %[[#AP_O]], ptr @__dfsan_retval_origin_tls, align 4
135  ; CHECK:        ret i32 %a
136
137entry:
138  %a = load atomic i32, ptr %p acquire, align 16
139  ret i32 %a
140}
141
142
143define i32 @AtomicLoadMonotonic(ptr %p) {
144  ; COMM: atomic load monotonic: bump up to load acquire
145
146  ; CHECK-LABEL:  @AtomicLoadMonotonic.dfsan
147  ; CHECK_ORIGIN: %[[#PO:]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
148  ; CHECK:        %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align 2
149  ; CHECK:        %a = load atomic i32, ptr %p acquire, align 16
150  ; CHECK:        %[[#SHADOW_PTR:]] = inttoptr i64 {{.*}} to ptr
151  ; CHECK_ORIGIN: %[[#ORIGIN_PTR:]] = inttoptr i64 {{.*}} to ptr
152  ; CHECK_ORIGIN: %[[#AO:]] = load i32, ptr %[[#ORIGIN_PTR]], align 16
153  ; CHECK:        load i[[#NUM_BITS]], ptr %[[#SHADOW_PTR]], align 1
154  ; CHECK:        %[[#AP_S:]] = or i8 {{.*}}, %[[#PS]]
155  ; CHECK_ORIGIN: %[[#PS_NZ:]] = icmp ne i8 %[[#PS]], 0
156  ; CHECK_ORIGIN: %[[#AP_O:]] = select i1 %[[#PS_NZ]], i32 %[[#PO]], i32 %[[#AO]]
157  ; CHECK:        store i8 %[[#AP_S]], ptr @__dfsan_retval_tls, align 2
158  ; CHECK_ORIGIN: store i32 %[[#AP_O]], ptr @__dfsan_retval_origin_tls, align 4
159  ; CHECK:        ret i32 %a
160
161entry:
162  %a = load atomic i32, ptr %p monotonic, align 16
163  ret i32 %a
164}
165
166define i32 @AtomicLoadUnordered(ptr %p) {
167  ; COMM: atomic load unordered: bump up to load acquire
168
169  ; CHECK-LABEL:  @AtomicLoadUnordered.dfsan
170  ; CHECK_ORIGIN: %[[#PO:]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
171  ; CHECK:        %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align 2
172  ; CHECK:        %a = load atomic i32, ptr %p acquire, align 16
173  ; CHECK:        %[[#SHADOW_PTR:]] = inttoptr i64 {{.*}} to ptr
174  ; CHECK_ORIGIN: %[[#ORIGIN_PTR:]] = inttoptr i64 {{.*}} to ptr
175  ; CHECK_ORIGIN: %[[#AO:]] = load i32, ptr %[[#ORIGIN_PTR]], align 16
176  ; CHECK:        load i[[#NUM_BITS]], ptr %[[#SHADOW_PTR]], align 1
177  ; CHECK:        %[[#AP_S:]] = or i8 {{.*}}, %[[#PS]]
178  ; CHECK_ORIGIN: %[[#PS_NZ:]] = icmp ne i8 %[[#PS]], 0
179  ; CHECK_ORIGIN: %[[#AP_O:]] = select i1 %[[#PS_NZ]], i32 %[[#PO]], i32 %[[#AO]]
180  ; CHECK:        store i8 %[[#AP_S]], ptr @__dfsan_retval_tls, align 2
181  ; CHECK_ORIGIN: store i32 %[[#AP_O]], ptr @__dfsan_retval_origin_tls, align 4
182  ; CHECK:        ret i32 %a
183
184entry:
185  %a = load atomic i32, ptr %p unordered, align 16
186  ret i32 %a
187}
188
189define void @AtomicStore(ptr %p, i32 %x) {
190  ; COMM: atomic store: store clean shadow value before app value
191
192  ; CHECK-LABEL:       @AtomicStore.dfsan
193  ; CHECK-NOT:         @__dfsan_arg_origin_tls
194  ; CHECK-NOT:         @__dfsan_arg_tls
195  ; CHECK_ORIGIN-NOT:  35184372088832
196  ; CHECK:             %[[#INTP:]] = ptrtoint ptr %p to i64
197  ; CHECK-NEXT:        %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#%.10d,MASK:]]
198  ; CHECK-NEXT:        %[[#SHADOW_PTR:]] = inttoptr i64 %[[#SHADOW_OFFSET]] to ptr
199  ; CHECK-NEXT:        store i[[#NUM_BITS]] 0, ptr %[[#SHADOW_PTR]], align 1
200  ; CHECK:             store atomic i32 %x, ptr %p seq_cst, align 16
201  ; CHECK:             ret void
202
203entry:
204  store atomic i32 %x, ptr %p seq_cst, align 16
205  ret void
206}
207
208define void @AtomicStoreRelease(ptr %p, i32 %x) {
209  ; COMM: atomic store: store clean shadow value before app value
210
211  ; CHECK-LABEL:       @AtomicStoreRelease.dfsan
212  ; CHECK-NOT:         @__dfsan_arg_origin_tls
213  ; CHECK-NOT:         @__dfsan_arg_tls
214  ; CHECK_ORIGIN-NOT:  35184372088832
215  ; CHECK:             %[[#INTP:]] = ptrtoint ptr %p to i64
216  ; CHECK-NEXT:        %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#%.10d,MASK:]]
217  ; CHECK-NEXT:        %[[#SHADOW_PTR:]] = inttoptr i64 %[[#SHADOW_OFFSET]] to ptr
218  ; CHECK-NEXT:        store i[[#NUM_BITS]] 0, ptr %[[#SHADOW_PTR]], align 1
219  ; CHECK:             store atomic i32 %x, ptr %p release, align 16
220  ; CHECK:             ret void
221
222entry:
223  store atomic i32 %x, ptr %p release, align 16
224  ret void
225}
226
227define void @AtomicStoreMonotonic(ptr %p, i32 %x) {
228  ; COMM: atomic store monotonic: bumped up to store release
229
230  ; CHECK-LABEL:       @AtomicStoreMonotonic.dfsan
231  ; CHECK-NOT:         @__dfsan_arg_origin_tls
232  ; CHECK-NOT:         @__dfsan_arg_tls
233  ; CHECK_ORIGIN-NOT:  35184372088832
234  ; CHECK:             %[[#INTP:]] = ptrtoint ptr %p to i64
235  ; CHECK-NEXT:        %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#%.10d,MASK:]]
236  ; CHECK-NEXT:        %[[#SHADOW_PTR:]] = inttoptr i64 %[[#SHADOW_OFFSET]] to ptr
237  ; CHECK-NEXT:        store i[[#NUM_BITS]] 0, ptr %[[#SHADOW_PTR]], align 1
238  ; CHECK:             store atomic i32 %x, ptr %p release, align 16
239  ; CHECK:             ret void
240
241entry:
242  store atomic i32 %x, ptr %p monotonic, align 16
243  ret void
244}
245
246define void @AtomicStoreUnordered(ptr %p, i32 %x) {
247  ; COMM: atomic store unordered: bumped up to store release
248
249  ; CHECK-LABEL: @AtomicStoreUnordered.dfsan
250  ; CHECK-NOT:         @__dfsan_arg_origin_tls
251  ; CHECK-NOT:         @__dfsan_arg_tls
252  ; CHECK_ORIGIN-NOT:  35184372088832
253  ; CHECK:             %[[#INTP:]] = ptrtoint ptr %p to i64
254  ; CHECK-NEXT:        %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#%.10d,MASK:]]
255  ; CHECK-NEXT:        %[[#SHADOW_PTR:]] = inttoptr i64 %[[#SHADOW_OFFSET]] to ptr
256  ; CHECK-NEXT:        store i[[#NUM_BITS]] 0, ptr %[[#SHADOW_PTR]], align 1
257  ; CHECK:             store atomic i32 %x, ptr %p release, align 16
258  ; CHECK:             ret void
259
260entry:
261  store atomic i32 %x, ptr %p unordered, align 16
262  ret void
263}
264