xref: /llvm-project/llvm/test/Instrumentation/DataFlowSanitizer/array.ll (revision ea981165a4ef2d6e8be0655f04cc4b61604db6d4)
1; RUN: opt < %s -dfsan -S | FileCheck %s --check-prefix=LEGACY
2; RUN: opt < %s -dfsan -dfsan-fast-16-labels=true -dfsan-event-callbacks=true -S | FileCheck %s --check-prefix=EVENT_CALLBACKS
3; RUN: opt < %s -dfsan -dfsan-args-abi -S | FileCheck %s --check-prefix=ARGS_ABI
4; RUN: opt < %s -dfsan -dfsan-fast-16-labels=true -S | FileCheck %s --check-prefix=FAST16
5; RUN: opt < %s -dfsan -dfsan-fast-16-labels=true -dfsan-combine-pointer-labels-on-load=false -S | FileCheck %s --check-prefix=NO_COMBINE_LOAD_PTR
6; RUN: opt < %s -dfsan -dfsan-fast-16-labels=true -dfsan-combine-pointer-labels-on-store=true -S | FileCheck %s --check-prefix=COMBINE_STORE_PTR
7; RUN: opt < %s -dfsan -dfsan-fast-16-labels=true -dfsan-debug-nonzero-labels -S | FileCheck %s --check-prefix=DEBUG_NONZERO_LABELS
8target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
9target triple = "x86_64-unknown-linux-gnu"
10
11define [4 x i8] @pass_array([4 x i8] %a) {
12  ; NO_COMBINE_LOAD_PTR: @"dfs$pass_array"
13  ; NO_COMBINE_LOAD_PTR: %1 = load [4 x i16], [4 x i16]* bitcast ([100 x i64]* @__dfsan_arg_tls to [4 x i16]*), align [[ALIGN:2]]
14  ; NO_COMBINE_LOAD_PTR: store [4 x i16] %1, [4 x i16]* bitcast ([100 x i64]* @__dfsan_retval_tls to [4 x i16]*), align [[ALIGN]]
15
16  ; ARGS_ABI: @"dfs$pass_array"
17  ; ARGS_ABI: ret { [4 x i8], i16 }
18
19  ; DEBUG_NONZERO_LABELS: @"dfs$pass_array"
20  ; DEBUG_NONZERO_LABELS: [[L:%.*]] = load [4 x i16], [4 x i16]* bitcast ([100 x i64]* @__dfsan_arg_tls to [4 x i16]*), align [[ALIGN:2]]
21  ; DEBUG_NONZERO_LABELS: [[L0:%.*]] = extractvalue [4 x i16] [[L]], 0
22  ; DEBUG_NONZERO_LABELS: [[L1:%.*]] = extractvalue [4 x i16] [[L]], 1
23  ; DEBUG_NONZERO_LABELS: [[L01:%.*]] = or i16 [[L0]], [[L1]]
24  ; DEBUG_NONZERO_LABELS: [[L2:%.*]] = extractvalue [4 x i16] [[L]], 2
25  ; DEBUG_NONZERO_LABELS: [[L012:%.*]] = or i16 [[L01]], [[L2]]
26  ; DEBUG_NONZERO_LABELS: [[L3:%.*]] = extractvalue [4 x i16] [[L]], 3
27  ; DEBUG_NONZERO_LABELS: [[L0123:%.*]] = or i16 [[L012]], [[L3]]
28  ; DEBUG_NONZERO_LABELS: {{.*}} = icmp ne i16 [[L0123]], 0
29  ; DEBUG_NONZERO_LABELS: call void @__dfsan_nonzero_label()
30
31  ret [4 x i8] %a
32}
33
34%ArrayOfStruct = type [4 x {i8*, i32}]
35
36define %ArrayOfStruct @pass_array_of_struct(%ArrayOfStruct %as) {
37  ; NO_COMBINE_LOAD_PTR: @"dfs$pass_array_of_struct"
38  ; NO_COMBINE_LOAD_PTR: %1 = load [4 x { i16, i16 }], [4 x { i16, i16 }]* bitcast ([100 x i64]* @__dfsan_arg_tls to [4 x { i16, i16 }]*), align [[ALIGN:2]]
39  ; NO_COMBINE_LOAD_PTR: store [4 x { i16, i16 }] %1, [4 x { i16, i16 }]* bitcast ([100 x i64]* @__dfsan_retval_tls to [4 x { i16, i16 }]*), align [[ALIGN]]
40
41  ; ARGS_ABI: @"dfs$pass_array_of_struct"
42  ; ARGS_ABI: ret { [4 x { i8*, i32 }], i16 }
43  ret %ArrayOfStruct %as
44}
45
46define [4 x i1]* @alloca_ret_array() {
47  ; NO_COMBINE_LOAD_PTR: @"dfs$alloca_ret_array"
48  ; NO_COMBINE_LOAD_PTR: store i16 0, i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2
49  %p = alloca [4 x i1]
50  ret [4 x i1]* %p
51}
52
53define [4 x i1] @load_alloca_array() {
54  ; NO_COMBINE_LOAD_PTR: @"dfs$load_alloca_array"
55  ; NO_COMBINE_LOAD_PTR: [[A:%.*]] = alloca i16, align [[ALIGN:2]]
56  ; NO_COMBINE_LOAD_PTR: [[M:%.*]] = load i16, i16* [[A]], align [[ALIGN]]
57  ; NO_COMBINE_LOAD_PTR: [[S0:%.*]] = insertvalue [4 x i16] undef, i16 [[M]], 0
58  ; NO_COMBINE_LOAD_PTR: [[S1:%.*]] = insertvalue [4 x i16] [[S0]], i16 [[M]], 1
59  ; NO_COMBINE_LOAD_PTR: [[S2:%.*]] = insertvalue [4 x i16] [[S1]], i16 [[M]], 2
60  ; NO_COMBINE_LOAD_PTR: [[S3:%.*]] = insertvalue [4 x i16] [[S2]], i16 [[M]], 3
61  ; NO_COMBINE_LOAD_PTR: store [4 x i16] [[S3]], [4 x i16]* bitcast ([100 x i64]* @__dfsan_retval_tls to [4 x i16]*), align [[ALIGN]]
62  %p = alloca [4 x i1]
63  %a = load [4 x i1], [4 x i1]* %p
64  ret [4 x i1] %a
65}
66
67define [0 x i1] @load_array0([0 x i1]* %p) {
68  ; NO_COMBINE_LOAD_PTR: @"dfs$load_array0"
69  ; NO_COMBINE_LOAD_PTR: store [0 x i16] zeroinitializer, [0 x i16]* bitcast ([100 x i64]* @__dfsan_retval_tls to [0 x i16]*), align 2
70  %a = load [0 x i1], [0 x i1]* %p
71  ret [0 x i1] %a
72}
73
74define [1 x i1] @load_array1([1 x i1]* %p) {
75  ; NO_COMBINE_LOAD_PTR: @"dfs$load_array1"
76  ; NO_COMBINE_LOAD_PTR: [[L:%.*]] = load i16,
77  ; NO_COMBINE_LOAD_PTR: [[S:%.*]] = insertvalue [1 x i16] undef, i16 [[L]], 0
78  ; NO_COMBINE_LOAD_PTR: store [1 x i16] [[S]], [1 x i16]* bitcast ([100 x i64]* @__dfsan_retval_tls to [1 x i16]*), align 2
79
80  ; EVENT_CALLBACKS: @"dfs$load_array1"
81  ; EVENT_CALLBACKS: [[L:%.*]] = or i16
82  ; EVENT_CALLBACKS: call void @__dfsan_load_callback(i16 [[L]], i8* {{.*}})
83
84  ; FAST16: @"dfs$load_array1"
85  ; FAST16: [[P:%.*]] = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align [[ALIGN:2]]
86  ; FAST16: [[L:%.*]] = load i16, i16* {{.*}}, align [[ALIGN]]
87  ; FAST16: [[U:%.*]] = or i16 [[L]], [[P]]
88  ; FAST16: [[S1:%.*]] = insertvalue [1 x i16] undef, i16 [[U]], 0
89  ; FAST16: store [1 x i16] [[S1]], [1 x i16]* bitcast ([100 x i64]* @__dfsan_retval_tls to [1 x i16]*), align [[ALIGN]]
90
91  ; LEGACY: @"dfs$load_array1"
92  ; LEGACY: [[P:%.*]] = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align [[ALIGN:2]]
93  ; LEGACY: [[L:%.*]] = load i16, i16* {{.*}}, align [[ALIGN]]
94  ; LEGACY: [[U:%.*]] = call zeroext i16 @__dfsan_union(i16 zeroext [[L]], i16 zeroext [[P]])
95  ; LEGACY: [[PH:%.*]] = phi i16 [ [[U]], {{.*}} ], [ [[L]], {{.*}} ]
96  ; LEGACY: store i16 [[PH]], i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align [[ALIGN]]
97
98  %a = load [1 x i1], [1 x i1]* %p
99  ret [1 x i1] %a
100}
101
102define [2 x i1] @load_array2([2 x i1]* %p) {
103  ; NO_COMBINE_LOAD_PTR: @"dfs$load_array2"
104  ; NO_COMBINE_LOAD_PTR: [[P1:%.*]] = getelementptr i16, i16* [[P0:%.*]], i64 1
105  ; NO_COMBINE_LOAD_PTR-DAG: [[E1:%.*]] = load i16, i16* [[P1]], align [[ALIGN:2]]
106  ; NO_COMBINE_LOAD_PTR-DAG: [[E0:%.*]] = load i16, i16* [[P0]], align [[ALIGN]]
107  ; NO_COMBINE_LOAD_PTR: [[U:%.*]] = or i16 [[E0]], [[E1]]
108  ; NO_COMBINE_LOAD_PTR: [[S1:%.*]] = insertvalue [2 x i16] undef, i16 [[U]], 0
109  ; NO_COMBINE_LOAD_PTR: [[S2:%.*]] = insertvalue [2 x i16] [[S1]], i16 [[U]], 1
110  ; NO_COMBINE_LOAD_PTR: store [2 x i16] [[S2]], [2 x i16]* bitcast ([100 x i64]* @__dfsan_retval_tls to [2 x i16]*), align [[ALIGN]]
111
112  ; EVENT_CALLBACKS: @"dfs$load_array2"
113  ; EVENT_CALLBACKS: [[O1:%.*]] = or i16
114  ; EVENT_CALLBACKS: [[O2:%.*]] = or i16 [[O1]]
115  ; EVENT_CALLBACKS: call void @__dfsan_load_callback(i16 [[O2]], i8* {{.*}})
116
117  ; FAST16: @"dfs$load_array2"
118  ; FAST16: [[P:%.*]] = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align [[ALIGN:2]]
119  ; FAST16: [[O:%.*]] = or i16
120  ; FAST16: [[U:%.*]] = or i16 [[O]], [[P]]
121  ; FAST16: [[S:%.*]] = insertvalue [2 x i16] undef, i16 [[U]], 0
122  ; FAST16: [[S1:%.*]] = insertvalue [2 x i16] [[S]], i16 [[U]], 1
123  ; FAST16: store [2 x i16] [[S1]], [2 x i16]* bitcast ([100 x i64]* @__dfsan_retval_tls to [2 x i16]*), align [[ALIGN]]
124  %a = load [2 x i1], [2 x i1]* %p
125  ret [2 x i1] %a
126}
127
128define [4 x i1] @load_array4([4 x i1]* %p) {
129  ; NO_COMBINE_LOAD_PTR: @"dfs$load_array4"
130  ; NO_COMBINE_LOAD_PTR: [[T:%.*]] = trunc i64 {{.*}} to i16
131  ; NO_COMBINE_LOAD_PTR: [[S1:%.*]] = insertvalue [4 x i16] undef, i16 [[T]], 0
132  ; NO_COMBINE_LOAD_PTR: [[S2:%.*]] = insertvalue [4 x i16] [[S1]], i16 [[T]], 1
133  ; NO_COMBINE_LOAD_PTR: [[S3:%.*]] = insertvalue [4 x i16] [[S2]], i16 [[T]], 2
134  ; NO_COMBINE_LOAD_PTR: [[S4:%.*]] = insertvalue [4 x i16] [[S3]], i16 [[T]], 3
135  ; NO_COMBINE_LOAD_PTR: store [4 x i16] [[S4]], [4 x i16]* bitcast ([100 x i64]* @__dfsan_retval_tls to [4 x i16]*), align 2
136
137  ; EVENT_CALLBACKS: @"dfs$load_array4"
138  ; EVENT_CALLBACKS: [[O0:%.*]] = or i64
139  ; EVENT_CALLBACKS: [[O1:%.*]] = or i64 [[O0]]
140  ; EVENT_CALLBACKS: [[O2:%.*]] = trunc i64 [[O1]] to i16
141  ; EVENT_CALLBACKS: [[O3:%.*]] = or i16 [[O2]]
142  ; EVENT_CALLBACKS: call void @__dfsan_load_callback(i16 [[O3]], i8* {{.*}})
143
144  ; FAST16: @"dfs$load_array4"
145  ; FAST16: [[T:%.*]] = trunc i64 {{.*}} to i16
146  ; FAST16: [[O:%.*]] = or i16 [[T]]
147  ; FAST16: [[S1:%.*]] = insertvalue [4 x i16] undef, i16 [[O]], 0
148  ; FAST16: [[S2:%.*]] = insertvalue [4 x i16] [[S1]], i16 [[O]], 1
149  ; FAST16: [[S3:%.*]] = insertvalue [4 x i16] [[S2]], i16 [[O]], 2
150  ; FAST16: [[S4:%.*]] = insertvalue [4 x i16] [[S3]], i16 [[O]], 3
151  ; FAST16: store [4 x i16] [[S4]], [4 x i16]* bitcast ([100 x i64]* @__dfsan_retval_tls to [4 x i16]*), align 2
152
153  ; LEGACY: @"dfs$load_array4"
154  ; LEGACY: [[P:%.*]] = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align [[ALIGN:2]]
155  ; LEGACY: [[PH1:%.*]] = phi i16
156  ; LEGACY: [[U:%.*]] = call zeroext i16 @__dfsan_union(i16 zeroext [[PH1]], i16 zeroext [[P]])
157  ; LEGACY: [[PH:%.*]] = phi i16 [ [[U]], {{.*}} ], [ [[PH1]], {{.*}} ]
158  ; LEGACY: store i16 [[PH]], i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align [[ALIGN]]
159
160  %a = load [4 x i1], [4 x i1]* %p
161  ret [4 x i1] %a
162}
163
164define i1 @extract_array([4 x i1] %a) {
165  ; NO_COMBINE_LOAD_PTR: @"dfs$extract_array"
166  ; NO_COMBINE_LOAD_PTR: [[AM:%.*]] = load [4 x i16], [4 x i16]* bitcast ([100 x i64]* @__dfsan_arg_tls to [4 x i16]*), align [[ALIGN:2]]
167  ; NO_COMBINE_LOAD_PTR: [[EM:%.*]] = extractvalue [4 x i16] [[AM]], 2
168  ; NO_COMBINE_LOAD_PTR: store i16 [[EM]], i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2
169  %e2 = extractvalue [4 x i1] %a, 2
170  ret i1 %e2
171}
172
173define [4 x i1] @insert_array([4 x i1] %a, i1 %e2) {
174  ; NO_COMBINE_LOAD_PTR: @"dfs$insert_array"
175  ; NO_COMBINE_LOAD_PTR: [[EM:%.*]] = load i16, i16* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__dfsan_arg_tls to i64), i64 8) to i16*), align [[ALIGN:2]]
176  ; NO_COMBINE_LOAD_PTR: [[AM:%.*]] = load [4 x i16], [4 x i16]* bitcast ([100 x i64]* @__dfsan_arg_tls to [4 x i16]*), align [[ALIGN]]
177  ; NO_COMBINE_LOAD_PTR: [[AM1:%.*]] = insertvalue [4 x i16] [[AM]], i16 [[EM]], 0
178  ; NO_COMBINE_LOAD_PTR: store [4 x i16] [[AM1]], [4 x i16]* bitcast ([100 x i64]* @__dfsan_retval_tls to [4 x i16]*), align [[ALIGN]]
179  %a1 = insertvalue [4 x i1] %a, i1 %e2, 0
180  ret [4 x i1] %a1
181}
182
183define void @store_alloca_array([4 x i1] %a) {
184  ; FAST16: @"dfs$store_alloca_array"
185  ; FAST16: [[S:%.*]] = load [4 x i16], [4 x i16]* bitcast ([100 x i64]* @__dfsan_arg_tls to [4 x i16]*), align [[ALIGN:2]]
186  ; FAST16: [[SP:%.*]] = alloca i16, align [[ALIGN]]
187  ; FAST16: [[E0:%.*]] = extractvalue [4 x i16] [[S]], 0
188  ; FAST16: [[E1:%.*]] = extractvalue [4 x i16] [[S]], 1
189  ; FAST16: [[E01:%.*]] = or i16 [[E0]], [[E1]]
190  ; FAST16: [[E2:%.*]] = extractvalue [4 x i16] [[S]], 2
191  ; FAST16: [[E012:%.*]] = or i16 [[E01]], [[E2]]
192  ; FAST16: [[E3:%.*]] = extractvalue [4 x i16] [[S]], 3
193  ; FAST16: [[E0123:%.*]] = or i16 [[E012]], [[E3]]
194  ; FAST16: store i16 [[E0123]], i16* [[SP]], align [[ALIGN]]
195  %p = alloca [4 x i1]
196  store [4 x i1] %a, [4 x i1]* %p
197  ret void
198}
199
200define void @store_zero_array([4 x i1]* %p) {
201  ; FAST16: @"dfs$store_zero_array"
202  ; FAST16: store i64 0, i64* {{.*}}, align 2
203  store [4 x i1] zeroinitializer, [4 x i1]* %p
204  ret void
205}
206
207define void @store_array2([2 x i1] %a, [2 x i1]* %p) {
208  ; LEGACY: @"dfs$store_array2"
209  ; LEGACY: [[S:%.*]] = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align [[ALIGN:2]]
210  ; LEGACY: [[SP0:%.*]] = getelementptr i16, i16* [[SP:%.*]], i32 0
211  ; LEGACY: store i16 [[S]], i16* [[SP0]], align [[ALIGN]]
212  ; LEGACY: [[SP1:%.*]] = getelementptr i16, i16* [[SP]], i32 1
213  ; LEGACY: store i16 [[S]], i16* [[SP1]], align [[ALIGN]]
214
215  ; EVENT_CALLBACKS: @"dfs$store_array2"
216  ; EVENT_CALLBACKS: [[E12:%.*]] = or i16
217  ; EVENT_CALLBACKS: [[P:%.*]] = bitcast [2 x i1]* %p to i8*
218  ; EVENT_CALLBACKS: call void @__dfsan_store_callback(i16 [[E12]], i8* [[P]])
219
220  ; FAST16: @"dfs$store_array2"
221  ; FAST16: [[S:%.*]] = load [2 x i16], [2 x i16]* bitcast ([100 x i64]* @__dfsan_arg_tls to [2 x i16]*), align [[ALIGN:2]]
222  ; FAST16: [[E1:%.*]] = extractvalue [2 x i16] [[S]], 0
223  ; FAST16: [[E2:%.*]] = extractvalue [2 x i16] [[S]], 1
224  ; FAST16: [[E12:%.*]] = or i16 [[E1]], [[E2]]
225  ; FAST16: [[SP0:%.*]] = getelementptr i16, i16* [[SP:%.*]], i32 0
226  ; FAST16: store i16 [[E12]], i16* [[SP0]], align [[ALIGN]]
227  ; FAST16: [[SP1:%.*]] = getelementptr i16, i16* [[SP]], i32 1
228  ; FAST16: store i16 [[E12]], i16* [[SP1]], align [[ALIGN]]
229
230  ; COMBINE_STORE_PTR: @"dfs$store_array2"
231  ; COMBINE_STORE_PTR: [[O:%.*]] = or i16
232  ; COMBINE_STORE_PTR: [[U:%.*]] = or i16 [[O]]
233  ; COMBINE_STORE_PTR: [[P1:%.*]] = getelementptr i16, i16* [[P:%.*]], i32 0
234  ; COMBINE_STORE_PTR: store i16 [[U]], i16* [[P1]], align 2
235  ; COMBINE_STORE_PTR: [[P2:%.*]] = getelementptr i16, i16* [[P]], i32 1
236  ; COMBINE_STORE_PTR: store i16 [[U]], i16* [[P2]], align 2
237
238  store [2 x i1] %a, [2 x i1]* %p
239  ret void
240}
241
242define void @store_array17([17 x i1] %a, [17 x i1]* %p) {
243  ; FAST16: @"dfs$store_array17"
244  ; FAST16: [[AL:%.*]] = load [17 x i16], [17 x i16]* bitcast ([100 x i64]* @__dfsan_arg_tls to [17 x i16]*), align 2
245  ; FAST16: [[AL0:%.*]] = extractvalue [17 x i16] [[AL]], 0
246  ; FAST16: [[AL1:%.*]] = extractvalue [17 x i16] [[AL]], 1
247  ; FAST16: [[AL_0_1:%.*]] = or i16 [[AL0]], [[AL1]]
248  ; FAST16: [[AL2:%.*]] = extractvalue [17 x i16] [[AL]], 2
249  ; FAST16: [[AL_0_2:%.*]] = or i16 [[AL_0_1]], [[AL2]]
250  ; FAST16: [[AL3:%.*]] = extractvalue [17 x i16] [[AL]], 3
251  ; FAST16: [[AL_0_3:%.*]] = or i16 [[AL_0_2]], [[AL3]]
252  ; FAST16: [[AL4:%.*]] = extractvalue [17 x i16] [[AL]], 4
253  ; FAST16: [[AL_0_4:%.*]] = or i16 [[AL_0_3]], [[AL4]]
254  ; FAST16: [[AL5:%.*]] = extractvalue [17 x i16] [[AL]], 5
255  ; FAST16: [[AL_0_5:%.*]] = or i16 %10, [[AL5]]
256  ; FAST16: [[AL6:%.*]] = extractvalue [17 x i16] [[AL]], 6
257  ; FAST16: [[AL_0_6:%.*]] = or i16 %12, [[AL6]]
258  ; FAST16: [[AL7:%.*]] = extractvalue [17 x i16] [[AL]], 7
259  ; FAST16: [[AL_0_7:%.*]] = or i16 %14, [[AL7]]
260  ; FAST16: [[AL8:%.*]] = extractvalue [17 x i16] [[AL]], 8
261  ; FAST16: [[AL_0_8:%.*]] = or i16 %16, [[AL8]]
262  ; FAST16: [[AL9:%.*]] = extractvalue [17 x i16] [[AL]], 9
263  ; FAST16: [[AL_0_9:%.*]] = or i16 %18, [[AL9]]
264  ; FAST16: [[AL10:%.*]] = extractvalue [17 x i16] [[AL]], 10
265  ; FAST16: [[AL_0_10:%.*]] = or i16 %20, [[AL10]]
266  ; FAST16: [[AL11:%.*]] = extractvalue [17 x i16] [[AL]], 11
267  ; FAST16: [[AL_0_11:%.*]] = or i16 %22, [[AL11]]
268  ; FAST16: [[AL12:%.*]] = extractvalue [17 x i16] [[AL]], 12
269  ; FAST16: [[AL_0_12:%.*]] = or i16 %24, [[AL12]]
270  ; FAST16: [[AL13:%.*]] = extractvalue [17 x i16] [[AL]], 13
271  ; FAST16: [[AL_0_13:%.*]] = or i16 %26, [[AL13]]
272  ; FAST16: [[AL14:%.*]] = extractvalue [17 x i16] [[AL]], 14
273  ; FAST16: [[AL_0_14:%.*]] = or i16 %28, [[AL14]]
274  ; FAST16: [[AL15:%.*]] = extractvalue [17 x i16] [[AL]], 15
275  ; FAST16: [[AL_0_15:%.*]] = or i16 %30, [[AL15]]
276  ; FAST16: [[AL16:%.*]] = extractvalue [17 x i16] [[AL]], 16
277  ; FAST16: [[AL_0_16:%.*]] = or i16 {{.*}}, [[AL16]]
278  ; FAST16: [[V1:%.*]] = insertelement <8 x i16> undef, i16 [[AL_0_16]], i32 0
279  ; FAST16: [[V2:%.*]] = insertelement <8 x i16> [[V1]], i16 [[AL_0_16]], i32 1
280  ; FAST16: [[V3:%.*]] = insertelement <8 x i16> [[V2]], i16 [[AL_0_16]], i32 2
281  ; FAST16: [[V4:%.*]] = insertelement <8 x i16> [[V3]], i16 [[AL_0_16]], i32 3
282  ; FAST16: [[V5:%.*]] = insertelement <8 x i16> [[V4]], i16 [[AL_0_16]], i32 4
283  ; FAST16: [[V6:%.*]] = insertelement <8 x i16> [[V5]], i16 [[AL_0_16]], i32 5
284  ; FAST16: [[V7:%.*]] = insertelement <8 x i16> [[V6]], i16 [[AL_0_16]], i32 6
285  ; FAST16: [[V8:%.*]] = insertelement <8 x i16> [[V7]], i16 [[AL_0_16]], i32 7
286  ; FAST16: [[VP:%.*]] = bitcast i16* [[P:%.*]] to <8 x i16>*
287  ; FAST16: [[VP1:%.*]] = getelementptr <8 x i16>, <8 x i16>* [[VP]], i32 0
288  ; FAST16: store <8 x i16> [[V8]], <8 x i16>* [[VP1]], align [[ALIGN:2]]
289  ; FAST16: [[VP2:%.*]] = getelementptr <8 x i16>, <8 x i16>* [[VP]], i32 1
290  ; FAST16: store <8 x i16> [[V8]], <8 x i16>* [[VP2]], align [[ALIGN]]
291  ; FAST16: [[P3:%.*]] = getelementptr i16, i16* [[P]], i32 16
292  ; FAST16: store i16 [[AL_0_16]], i16* [[P3]], align [[ALIGN]]
293  store [17 x i1] %a, [17 x i1]* %p
294  ret void
295}
296
297define [2 x i32] @const_array() {
298  ; FAST16: @"dfs$const_array"
299  ; FAST16: store [2 x i16] zeroinitializer, [2 x i16]* bitcast ([100 x i64]* @__dfsan_retval_tls to [2 x i16]*), align 2
300  ret [2 x i32] [ i32 42, i32 11 ]
301}
302
303define [4 x i8] @call_array([4 x i8] %a) {
304  ; FAST16: @"dfs$call_array"
305  ; FAST16: [[A:%.*]] = load [4 x i16], [4 x i16]* bitcast ([100 x i64]* @__dfsan_arg_tls to [4 x i16]*), align [[ALIGN:2]]
306  ; FAST16: store [4 x i16] [[A]], [4 x i16]* bitcast ([100 x i64]* @__dfsan_arg_tls to [4 x i16]*), align [[ALIGN]]
307  ; FAST16: %_dfsret = load [4 x i16], [4 x i16]* bitcast ([100 x i64]* @__dfsan_retval_tls to [4 x i16]*), align [[ALIGN]]
308  ; FAST16: store [4 x i16] %_dfsret, [4 x i16]* bitcast ([100 x i64]* @__dfsan_retval_tls to [4 x i16]*), align [[ALIGN]]
309
310  %r = call [4 x i8] @pass_array([4 x i8] %a)
311  ret [4 x i8] %r
312}
313
314%LargeArr = type [1000 x i8]
315
316define i8 @fun_with_large_args(i1 %i, %LargeArr %a) {
317  ; FAST16: @"dfs$fun_with_large_args"
318  ; FAST16: store i16 0, i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2
319  %r = extractvalue %LargeArr %a, 0
320  ret i8 %r
321}
322
323define %LargeArr @fun_with_large_ret() {
324  ; FAST16: @"dfs$fun_with_large_ret"
325  ; FAST16-NEXT: ret  [1000 x i8] zeroinitializer
326  ret %LargeArr zeroinitializer
327}
328
329define i8 @call_fun_with_large_ret() {
330  ; FAST16: @"dfs$call_fun_with_large_ret"
331  ; FAST16: store i16 0, i16* bitcast ([100 x i64]* @__dfsan_retval_tls to i16*), align 2
332  %r = call %LargeArr @fun_with_large_ret()
333  %e = extractvalue %LargeArr %r, 0
334  ret i8 %e
335}
336
337define i8 @call_fun_with_large_args(i1 %i, %LargeArr %a) {
338  ; FAST16: @"dfs$call_fun_with_large_args"
339  ; FAST16: [[I:%.*]] = load i16, i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align [[ALIGN:2]]
340  ; FAST16: store i16 [[I]], i16* bitcast ([100 x i64]* @__dfsan_arg_tls to i16*), align [[ALIGN]]
341  ; FAST16: %r = call i8 @"dfs$fun_with_large_args"(i1 %i, [1000 x i8] %a)
342
343  %r = call i8 @fun_with_large_args(i1 %i, %LargeArr %a)
344  ret i8 %r
345}
346