xref: /llvm-project/llvm/test/Instrumentation/MemorySanitizer/msan_basic.ll (revision 29441e4f5fa5f5c7709f7cf180815ba97f611297)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
2; RUN: opt < %s -S -passes='module(msan)' -msan-check-access-address=0                                      | FileCheck %s --check-prefixes=CHECK
3; RUN: opt < %s -S -passes='module(msan)' -msan-check-access-address=0                -msan-track-origins=1 | FileCheck %s --check-prefixes=ORIGIN
4; RUN: opt < %s -S -passes='module(msan)' -msan-instrumentation-with-call-threshold=0 -msan-track-origins=1 | FileCheck %s --check-prefixes=CALLS
5
6target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
7target triple = "x86_64-unknown-linux-gnu"
8
9
10; Check the presence and the linkage type of __msan_track_origins and
11; other interface symbols.
12; ORIGINS: @__msan_track_origins = weak_odr constant i32 1
13
14
15; Check instrumentation of stores
16
17define void @Store(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
18; CHECK-LABEL: define void @Store(
19; CHECK-SAME: ptr captures(none) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0:[0-9]+]] {
20; CHECK-NEXT:  [[ENTRY:.*:]]
21; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
22; CHECK-NEXT:    call void @llvm.donothing()
23; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64
24; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
25; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
26; CHECK-NEXT:    store i32 [[TMP0]], ptr [[TMP3]], align 4
27; CHECK-NEXT:    store i32 [[X]], ptr [[P]], align 4
28; CHECK-NEXT:    ret void
29;
30; ORIGIN-LABEL: define void @Store(
31; ORIGIN-SAME: ptr captures(none) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0:[0-9]+]] {
32; ORIGIN-NEXT:  [[ENTRY:.*:]]
33; ORIGIN-NEXT:    [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
34; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
35; ORIGIN-NEXT:    call void @llvm.donothing()
36; ORIGIN-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
37; ORIGIN-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
38; ORIGIN-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
39; ORIGIN-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
40; ORIGIN-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
41; ORIGIN-NEXT:    store i32 [[TMP0]], ptr [[TMP4]], align 4
42; ORIGIN-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP0]], 0
43; ORIGIN-NEXT:    br i1 [[_MSCMP]], label %[[BB7:.*]], label %[[BB8:.*]], !prof [[PROF1:![0-9]+]]
44; ORIGIN:       [[BB7]]:
45; ORIGIN-NEXT:    store i32 [[TMP1]], ptr [[TMP6]], align 4
46; ORIGIN-NEXT:    br label %[[BB8]]
47; ORIGIN:       [[BB8]]:
48; ORIGIN-NEXT:    store i32 [[X]], ptr [[P]], align 4
49; ORIGIN-NEXT:    ret void
50;
51; CALLS-LABEL: define void @Store(
52; CALLS-SAME: ptr captures(none) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0:[0-9]+]] {
53; CALLS-NEXT:  [[ENTRY:.*:]]
54; CALLS-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
55; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
56; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
57; CALLS-NEXT:    [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
58; CALLS-NEXT:    call void @llvm.donothing()
59; CALLS-NEXT:    call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]])
60; CALLS-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
61; CALLS-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
62; CALLS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
63; CALLS-NEXT:    [[TMP7:%.*]] = add i64 [[TMP5]], 17592186044416
64; CALLS-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
65; CALLS-NEXT:    store i32 [[TMP2]], ptr [[TMP6]], align 4
66; CALLS-NEXT:    call void @__msan_maybe_store_origin_4(i32 zeroext [[TMP2]], ptr [[P]], i32 zeroext [[TMP3]])
67; CALLS-NEXT:    store i32 [[X]], ptr [[P]], align 4
68; CALLS-NEXT:    ret void
69;
70entry:
71  store i32 %x, ptr %p, align 4
72  ret void
73}
74
75; Check instrumentation of aligned stores
76; Shadow store has the same alignment as the original store; origin store
77; does not specify explicit alignment.
78
79define void @AlignedStore(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
80; CHECK-LABEL: define void @AlignedStore(
81; CHECK-SAME: ptr captures(none) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
82; CHECK-NEXT:  [[ENTRY:.*:]]
83; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
84; CHECK-NEXT:    call void @llvm.donothing()
85; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64
86; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
87; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
88; CHECK-NEXT:    store i32 [[TMP0]], ptr [[TMP3]], align 32
89; CHECK-NEXT:    store i32 [[X]], ptr [[P]], align 32
90; CHECK-NEXT:    ret void
91;
92; ORIGIN-LABEL: define void @AlignedStore(
93; ORIGIN-SAME: ptr captures(none) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
94; ORIGIN-NEXT:  [[ENTRY:.*:]]
95; ORIGIN-NEXT:    [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
96; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
97; ORIGIN-NEXT:    call void @llvm.donothing()
98; ORIGIN-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
99; ORIGIN-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
100; ORIGIN-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
101; ORIGIN-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
102; ORIGIN-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
103; ORIGIN-NEXT:    store i32 [[TMP0]], ptr [[TMP4]], align 32
104; ORIGIN-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP0]], 0
105; ORIGIN-NEXT:    br i1 [[_MSCMP]], label %[[BB7:.*]], label %[[BB11:.*]], !prof [[PROF1]]
106; ORIGIN:       [[BB7]]:
107; ORIGIN-NEXT:    [[TMP8:%.*]] = zext i32 [[TMP1]] to i64
108; ORIGIN-NEXT:    [[TMP9:%.*]] = shl i64 [[TMP8]], 32
109; ORIGIN-NEXT:    [[TMP10:%.*]] = or i64 [[TMP8]], [[TMP9]]
110; ORIGIN-NEXT:    store i32 [[TMP1]], ptr [[TMP6]], align 32
111; ORIGIN-NEXT:    br label %[[BB11]]
112; ORIGIN:       [[BB11]]:
113; ORIGIN-NEXT:    store i32 [[X]], ptr [[P]], align 32
114; ORIGIN-NEXT:    ret void
115;
116; CALLS-LABEL: define void @AlignedStore(
117; CALLS-SAME: ptr captures(none) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
118; CALLS-NEXT:  [[ENTRY:.*:]]
119; CALLS-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
120; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
121; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
122; CALLS-NEXT:    [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
123; CALLS-NEXT:    call void @llvm.donothing()
124; CALLS-NEXT:    call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]])
125; CALLS-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
126; CALLS-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
127; CALLS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
128; CALLS-NEXT:    [[TMP7:%.*]] = add i64 [[TMP5]], 17592186044416
129; CALLS-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
130; CALLS-NEXT:    store i32 [[TMP2]], ptr [[TMP6]], align 32
131; CALLS-NEXT:    call void @__msan_maybe_store_origin_4(i32 zeroext [[TMP2]], ptr [[P]], i32 zeroext [[TMP3]])
132; CALLS-NEXT:    store i32 [[X]], ptr [[P]], align 32
133; CALLS-NEXT:    ret void
134;
135entry:
136  store i32 %x, ptr %p, align 32
137  ret void
138}
139
140; load followed by cmp: check that we load the shadow and call __msan_warning_with_origin.
141define void @LoadAndCmp(ptr nocapture %a) nounwind uwtable sanitize_memory {
142; CHECK-LABEL: define void @LoadAndCmp(
143; CHECK-SAME: ptr captures(none) [[A:%.*]]) #[[ATTR0]] {
144; CHECK-NEXT:  [[ENTRY:.*:]]
145; CHECK-NEXT:    call void @llvm.donothing()
146; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A]], align 4
147; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
148; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
149; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
150; CHECK-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP3]], align 4
151; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP0]], 0
152; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[_MSLD]], 0
153; CHECK-NEXT:    [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0
154; CHECK-NEXT:    [[TMP7:%.*]] = xor i32 [[TMP5]], -1
155; CHECK-NEXT:    [[TMP8:%.*]] = and i32 [[TMP7]], [[TMP4]]
156; CHECK-NEXT:    [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
157; CHECK-NEXT:    [[_MSPROP_ICMP:%.*]] = and i1 [[TMP6]], [[TMP9]]
158; CHECK-NEXT:    [[TOBOOL:%.*]] = icmp eq i32 [[TMP0]], 0
159; CHECK-NEXT:    br i1 [[_MSPROP_ICMP]], label %[[BB10:.*]], label %[[BB11:.*]], !prof [[PROF1:![0-9]+]]
160; CHECK:       [[BB10]]:
161; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR12:[0-9]+]]
162; CHECK-NEXT:    unreachable
163; CHECK:       [[BB11]]:
164; CHECK-NEXT:    br i1 [[TOBOOL]], label %[[IF_END:.*]], label %[[IF_THEN:.*]]
165; CHECK:       [[IF_THEN]]:
166; CHECK-NEXT:    store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
167; CHECK-NEXT:    tail call void (...) @foo() #[[ATTR5:[0-9]+]]
168; CHECK-NEXT:    br label %[[IF_END]]
169; CHECK:       [[IF_END]]:
170; CHECK-NEXT:    ret void
171;
172; ORIGIN-LABEL: define void @LoadAndCmp(
173; ORIGIN-SAME: ptr captures(none) [[A:%.*]]) #[[ATTR0]] {
174; ORIGIN-NEXT:  [[ENTRY:.*:]]
175; ORIGIN-NEXT:    call void @llvm.donothing()
176; ORIGIN-NEXT:    [[TMP0:%.*]] = load i32, ptr [[A]], align 4
177; ORIGIN-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
178; ORIGIN-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
179; ORIGIN-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
180; ORIGIN-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
181; ORIGIN-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
182; ORIGIN-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP3]], align 4
183; ORIGIN-NEXT:    [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
184; ORIGIN-NEXT:    [[TMP7:%.*]] = xor i32 [[TMP0]], 0
185; ORIGIN-NEXT:    [[TMP8:%.*]] = or i32 [[_MSLD]], 0
186; ORIGIN-NEXT:    [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0
187; ORIGIN-NEXT:    [[TMP10:%.*]] = xor i32 [[TMP8]], -1
188; ORIGIN-NEXT:    [[TMP11:%.*]] = and i32 [[TMP10]], [[TMP7]]
189; ORIGIN-NEXT:    [[TMP12:%.*]] = icmp eq i32 [[TMP11]], 0
190; ORIGIN-NEXT:    [[_MSPROP_ICMP:%.*]] = and i1 [[TMP9]], [[TMP12]]
191; ORIGIN-NEXT:    [[TOBOOL:%.*]] = icmp eq i32 [[TMP0]], 0
192; ORIGIN-NEXT:    br i1 [[_MSPROP_ICMP]], label %[[BB13:.*]], label %[[BB14:.*]], !prof [[PROF1]]
193; ORIGIN:       [[BB13]]:
194; ORIGIN-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR12:[0-9]+]]
195; ORIGIN-NEXT:    unreachable
196; ORIGIN:       [[BB14]]:
197; ORIGIN-NEXT:    br i1 [[TOBOOL]], label %[[IF_END:.*]], label %[[IF_THEN:.*]]
198; ORIGIN:       [[IF_THEN]]:
199; ORIGIN-NEXT:    store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
200; ORIGIN-NEXT:    tail call void (...) @foo() #[[ATTR5:[0-9]+]]
201; ORIGIN-NEXT:    br label %[[IF_END]]
202; ORIGIN:       [[IF_END]]:
203; ORIGIN-NEXT:    ret void
204;
205; CALLS-LABEL: define void @LoadAndCmp(
206; CALLS-SAME: ptr captures(none) [[A:%.*]]) #[[ATTR0]] {
207; CALLS-NEXT:  [[ENTRY:.*:]]
208; CALLS-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
209; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
210; CALLS-NEXT:    call void @llvm.donothing()
211; CALLS-NEXT:    call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]])
212; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr [[A]], align 4
213; CALLS-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[A]] to i64
214; CALLS-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
215; CALLS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
216; CALLS-NEXT:    [[TMP6:%.*]] = add i64 [[TMP4]], 17592186044416
217; CALLS-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
218; CALLS-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP5]], align 4
219; CALLS-NEXT:    [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
220; CALLS-NEXT:    [[TMP9:%.*]] = xor i32 [[TMP2]], 0
221; CALLS-NEXT:    [[TMP10:%.*]] = or i32 [[_MSLD]], 0
222; CALLS-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0
223; CALLS-NEXT:    [[TMP12:%.*]] = xor i32 [[TMP10]], -1
224; CALLS-NEXT:    [[TMP13:%.*]] = and i32 [[TMP12]], [[TMP9]]
225; CALLS-NEXT:    [[TMP14:%.*]] = icmp eq i32 [[TMP13]], 0
226; CALLS-NEXT:    [[_MSPROP_ICMP:%.*]] = and i1 [[TMP11]], [[TMP14]]
227; CALLS-NEXT:    [[TOBOOL:%.*]] = icmp eq i32 [[TMP2]], 0
228; CALLS-NEXT:    [[TMP15:%.*]] = zext i1 [[_MSPROP_ICMP]] to i8
229; CALLS-NEXT:    call void @__msan_maybe_warning_1(i8 zeroext [[TMP15]], i32 zeroext [[TMP8]])
230; CALLS-NEXT:    br i1 [[TOBOOL]], label %[[IF_END:.*]], label %[[IF_THEN:.*]]
231; CALLS:       [[IF_THEN]]:
232; CALLS-NEXT:    store i64 0, ptr @__msan_va_arg_overflow_size_tls, align 8
233; CALLS-NEXT:    tail call void (...) @foo() #[[ATTR5:[0-9]+]]
234; CALLS-NEXT:    br label %[[IF_END]]
235; CALLS:       [[IF_END]]:
236; CALLS-NEXT:    ret void
237;
238entry:
239  %0 = load i32, ptr %a, align 4
240  %tobool = icmp eq i32 %0, 0
241  br i1 %tobool, label %if.end, label %if.then
242
243if.then:                                          ; preds = %entry
244  tail call void (...) @foo() nounwind
245  br label %if.end
246
247if.end:                                           ; preds = %entry, %if.then
248  ret void
249}
250
251declare void @foo(...)
252
253; Check that we store the shadow for the retval.
254define i32 @ReturnInt() nounwind uwtable readnone sanitize_memory {
255; CHECK-LABEL: define i32 @ReturnInt(
256; CHECK-SAME: ) #[[ATTR0]] {
257; CHECK-NEXT:  [[ENTRY:.*:]]
258; CHECK-NEXT:    call void @llvm.donothing()
259; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
260; CHECK-NEXT:    ret i32 123
261;
262; ORIGIN-LABEL: define i32 @ReturnInt(
263; ORIGIN-SAME: ) #[[ATTR0]] {
264; ORIGIN-NEXT:  [[ENTRY:.*:]]
265; ORIGIN-NEXT:    call void @llvm.donothing()
266; ORIGIN-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
267; ORIGIN-NEXT:    store i32 0, ptr @__msan_retval_origin_tls, align 4
268; ORIGIN-NEXT:    ret i32 123
269;
270; CALLS-LABEL: define i32 @ReturnInt(
271; CALLS-SAME: ) #[[ATTR0]] {
272; CALLS-NEXT:  [[ENTRY:.*:]]
273; CALLS-NEXT:    call void @llvm.donothing()
274; CALLS-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
275; CALLS-NEXT:    store i32 0, ptr @__msan_retval_origin_tls, align 4
276; CALLS-NEXT:    ret i32 123
277;
278entry:
279  ret i32 123
280}
281
282
283; Check that we get the shadow for the retval.
284define void @CopyRetVal(ptr nocapture %a) nounwind uwtable sanitize_memory {
285; CHECK-LABEL: define void @CopyRetVal(
286; CHECK-SAME: ptr captures(none) [[A:%.*]]) #[[ATTR0]] {
287; CHECK-NEXT:  [[ENTRY:.*:]]
288; CHECK-NEXT:    call void @llvm.donothing()
289; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
290; CHECK-NEXT:    [[CALL:%.*]] = tail call i32 @ReturnInt() #[[ATTR5]]
291; CHECK-NEXT:    [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
292; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[A]] to i64
293; CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080
294; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
295; CHECK-NEXT:    store i32 [[_MSRET]], ptr [[TMP2]], align 4
296; CHECK-NEXT:    store i32 [[CALL]], ptr [[A]], align 4
297; CHECK-NEXT:    ret void
298;
299; ORIGIN-LABEL: define void @CopyRetVal(
300; ORIGIN-SAME: ptr captures(none) [[A:%.*]]) #[[ATTR0]] {
301; ORIGIN-NEXT:  [[ENTRY:.*:]]
302; ORIGIN-NEXT:    call void @llvm.donothing()
303; ORIGIN-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
304; ORIGIN-NEXT:    [[CALL:%.*]] = tail call i32 @ReturnInt() #[[ATTR5]]
305; ORIGIN-NEXT:    [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
306; ORIGIN-NEXT:    [[TMP0:%.*]] = load i32, ptr @__msan_retval_origin_tls, align 4
307; ORIGIN-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
308; ORIGIN-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
309; ORIGIN-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
310; ORIGIN-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
311; ORIGIN-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
312; ORIGIN-NEXT:    store i32 [[_MSRET]], ptr [[TMP3]], align 4
313; ORIGIN-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[_MSRET]], 0
314; ORIGIN-NEXT:    br i1 [[_MSCMP]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
315; ORIGIN:       [[BB6]]:
316; ORIGIN-NEXT:    store i32 [[TMP0]], ptr [[TMP5]], align 4
317; ORIGIN-NEXT:    br label %[[BB7]]
318; ORIGIN:       [[BB7]]:
319; ORIGIN-NEXT:    store i32 [[CALL]], ptr [[A]], align 4
320; ORIGIN-NEXT:    ret void
321;
322; CALLS-LABEL: define void @CopyRetVal(
323; CALLS-SAME: ptr captures(none) [[A:%.*]]) #[[ATTR0]] {
324; CALLS-NEXT:  [[ENTRY:.*:]]
325; CALLS-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
326; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
327; CALLS-NEXT:    call void @llvm.donothing()
328; CALLS-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
329; CALLS-NEXT:    [[CALL:%.*]] = tail call i32 @ReturnInt() #[[ATTR5]]
330; CALLS-NEXT:    [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
331; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_retval_origin_tls, align 4
332; CALLS-NEXT:    call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]])
333; CALLS-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[A]] to i64
334; CALLS-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
335; CALLS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
336; CALLS-NEXT:    [[TMP6:%.*]] = add i64 [[TMP4]], 17592186044416
337; CALLS-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
338; CALLS-NEXT:    store i32 [[_MSRET]], ptr [[TMP5]], align 4
339; CALLS-NEXT:    call void @__msan_maybe_store_origin_4(i32 zeroext [[_MSRET]], ptr [[A]], i32 zeroext [[TMP2]])
340; CALLS-NEXT:    store i32 [[CALL]], ptr [[A]], align 4
341; CALLS-NEXT:    ret void
342;
343entry:
344  %call = tail call i32 @ReturnInt() nounwind
345  store i32 %call, ptr %a, align 4
346  ret void
347}
348
349
350
351; Check that we generate PHIs for shadow.
352define void @FuncWithPhi(ptr nocapture %a, ptr %b, ptr nocapture %c) nounwind uwtable sanitize_memory {
353; CHECK-LABEL: define void @FuncWithPhi(
354; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr [[B:%.*]], ptr captures(none) [[C:%.*]]) #[[ATTR0]] {
355; CHECK-NEXT:  [[ENTRY:.*:]]
356; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
357; CHECK-NEXT:    call void @llvm.donothing()
358; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[B]] to i64
359; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 0
360; CHECK-NEXT:    [[TMP3:%.*]] = or i64 [[TMP0]], 0
361; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne i64 [[TMP3]], 0
362; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP3]], -1
363; CHECK-NEXT:    [[TMP6:%.*]] = and i64 [[TMP5]], [[TMP2]]
364; CHECK-NEXT:    [[TMP7:%.*]] = icmp eq i64 [[TMP6]], 0
365; CHECK-NEXT:    [[_MSPROP_ICMP:%.*]] = and i1 [[TMP4]], [[TMP7]]
366; CHECK-NEXT:    [[TOBOOL:%.*]] = icmp eq ptr [[B]], null
367; CHECK-NEXT:    br i1 [[_MSPROP_ICMP]], label %[[BB8:.*]], label %[[BB9:.*]], !prof [[PROF1]]
368; CHECK:       [[BB8]]:
369; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR12]]
370; CHECK-NEXT:    unreachable
371; CHECK:       [[BB9]]:
372; CHECK-NEXT:    br i1 [[TOBOOL]], label %[[IF_ELSE:.*]], label %[[IF_THEN:.*]]
373; CHECK:       [[IF_THEN]]:
374; CHECK-NEXT:    [[TMP10:%.*]] = load i32, ptr [[B]], align 4
375; CHECK-NEXT:    [[TMP11:%.*]] = ptrtoint ptr [[B]] to i64
376; CHECK-NEXT:    [[TMP12:%.*]] = xor i64 [[TMP11]], 87960930222080
377; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
378; CHECK-NEXT:    [[_MSLD1:%.*]] = load i32, ptr [[TMP13]], align 4
379; CHECK-NEXT:    br label %[[IF_END:.*]]
380; CHECK:       [[IF_ELSE]]:
381; CHECK-NEXT:    [[TMP14:%.*]] = load i32, ptr [[C]], align 4
382; CHECK-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[C]] to i64
383; CHECK-NEXT:    [[TMP16:%.*]] = xor i64 [[TMP15]], 87960930222080
384; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
385; CHECK-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP17]], align 4
386; CHECK-NEXT:    br label %[[IF_END]]
387; CHECK:       [[IF_END]]:
388; CHECK-NEXT:    [[_MSPHI_S:%.*]] = phi i32 [ [[_MSLD1]], %[[IF_THEN]] ], [ [[_MSLD]], %[[IF_ELSE]] ]
389; CHECK-NEXT:    [[T_0:%.*]] = phi i32 [ [[TMP10]], %[[IF_THEN]] ], [ [[TMP14]], %[[IF_ELSE]] ]
390; CHECK-NEXT:    [[TMP18:%.*]] = ptrtoint ptr [[A]] to i64
391; CHECK-NEXT:    [[TMP19:%.*]] = xor i64 [[TMP18]], 87960930222080
392; CHECK-NEXT:    [[TMP20:%.*]] = inttoptr i64 [[TMP19]] to ptr
393; CHECK-NEXT:    store i32 [[_MSPHI_S]], ptr [[TMP20]], align 4
394; CHECK-NEXT:    store i32 [[T_0]], ptr [[A]], align 4
395; CHECK-NEXT:    ret void
396;
397; ORIGIN-LABEL: define void @FuncWithPhi(
398; ORIGIN-SAME: ptr captures(none) [[A:%.*]], ptr [[B:%.*]], ptr captures(none) [[C:%.*]]) #[[ATTR0]] {
399; ORIGIN-NEXT:  [[ENTRY:.*:]]
400; ORIGIN-NEXT:    [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
401; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
402; ORIGIN-NEXT:    call void @llvm.donothing()
403; ORIGIN-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[B]] to i64
404; ORIGIN-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 0
405; ORIGIN-NEXT:    [[TMP4:%.*]] = or i64 [[TMP0]], 0
406; ORIGIN-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP4]], 0
407; ORIGIN-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP4]], -1
408; ORIGIN-NEXT:    [[TMP7:%.*]] = and i64 [[TMP6]], [[TMP3]]
409; ORIGIN-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[TMP7]], 0
410; ORIGIN-NEXT:    [[_MSPROP_ICMP:%.*]] = and i1 [[TMP5]], [[TMP8]]
411; ORIGIN-NEXT:    [[TOBOOL:%.*]] = icmp eq ptr [[B]], null
412; ORIGIN-NEXT:    br i1 [[_MSPROP_ICMP]], label %[[BB9:.*]], label %[[BB10:.*]], !prof [[PROF1]]
413; ORIGIN:       [[BB9]]:
414; ORIGIN-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP1]]) #[[ATTR12]]
415; ORIGIN-NEXT:    unreachable
416; ORIGIN:       [[BB10]]:
417; ORIGIN-NEXT:    br i1 [[TOBOOL]], label %[[IF_ELSE:.*]], label %[[IF_THEN:.*]]
418; ORIGIN:       [[IF_THEN]]:
419; ORIGIN-NEXT:    [[TMP11:%.*]] = load i32, ptr [[B]], align 4
420; ORIGIN-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[B]] to i64
421; ORIGIN-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 87960930222080
422; ORIGIN-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
423; ORIGIN-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 17592186044416
424; ORIGIN-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
425; ORIGIN-NEXT:    [[_MSLD1:%.*]] = load i32, ptr [[TMP14]], align 4
426; ORIGIN-NEXT:    [[TMP17:%.*]] = load i32, ptr [[TMP16]], align 4
427; ORIGIN-NEXT:    br label %[[IF_END:.*]]
428; ORIGIN:       [[IF_ELSE]]:
429; ORIGIN-NEXT:    [[TMP18:%.*]] = load i32, ptr [[C]], align 4
430; ORIGIN-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[C]] to i64
431; ORIGIN-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 87960930222080
432; ORIGIN-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
433; ORIGIN-NEXT:    [[TMP22:%.*]] = add i64 [[TMP20]], 17592186044416
434; ORIGIN-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
435; ORIGIN-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP21]], align 4
436; ORIGIN-NEXT:    [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4
437; ORIGIN-NEXT:    br label %[[IF_END]]
438; ORIGIN:       [[IF_END]]:
439; ORIGIN-NEXT:    [[_MSPHI_S:%.*]] = phi i32 [ [[_MSLD1]], %[[IF_THEN]] ], [ [[_MSLD]], %[[IF_ELSE]] ]
440; ORIGIN-NEXT:    [[_MSPHI_O:%.*]] = phi i32 [ [[TMP17]], %[[IF_THEN]] ], [ [[TMP24]], %[[IF_ELSE]] ]
441; ORIGIN-NEXT:    [[T_0:%.*]] = phi i32 [ [[TMP11]], %[[IF_THEN]] ], [ [[TMP18]], %[[IF_ELSE]] ]
442; ORIGIN-NEXT:    [[TMP25:%.*]] = ptrtoint ptr [[A]] to i64
443; ORIGIN-NEXT:    [[TMP26:%.*]] = xor i64 [[TMP25]], 87960930222080
444; ORIGIN-NEXT:    [[TMP27:%.*]] = inttoptr i64 [[TMP26]] to ptr
445; ORIGIN-NEXT:    [[TMP28:%.*]] = add i64 [[TMP26]], 17592186044416
446; ORIGIN-NEXT:    [[TMP29:%.*]] = inttoptr i64 [[TMP28]] to ptr
447; ORIGIN-NEXT:    store i32 [[_MSPHI_S]], ptr [[TMP27]], align 4
448; ORIGIN-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[_MSPHI_S]], 0
449; ORIGIN-NEXT:    br i1 [[_MSCMP]], label %[[BB30:.*]], label %[[BB31:.*]], !prof [[PROF1]]
450; ORIGIN:       [[BB30]]:
451; ORIGIN-NEXT:    store i32 [[_MSPHI_O]], ptr [[TMP29]], align 4
452; ORIGIN-NEXT:    br label %[[BB31]]
453; ORIGIN:       [[BB31]]:
454; ORIGIN-NEXT:    store i32 [[T_0]], ptr [[A]], align 4
455; ORIGIN-NEXT:    ret void
456;
457; CALLS-LABEL: define void @FuncWithPhi(
458; CALLS-SAME: ptr captures(none) [[A:%.*]], ptr [[B:%.*]], ptr captures(none) [[C:%.*]]) #[[ATTR0]] {
459; CALLS-NEXT:  [[ENTRY:.*:]]
460; CALLS-NEXT:    [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
461; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
462; CALLS-NEXT:    [[TMP2:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
463; CALLS-NEXT:    [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
464; CALLS-NEXT:    [[TMP4:%.*]] = load i64, ptr @__msan_param_tls, align 8
465; CALLS-NEXT:    [[TMP5:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
466; CALLS-NEXT:    call void @llvm.donothing()
467; CALLS-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[B]] to i64
468; CALLS-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 0
469; CALLS-NEXT:    [[TMP8:%.*]] = or i64 [[TMP0]], 0
470; CALLS-NEXT:    [[TMP9:%.*]] = icmp ne i64 [[TMP8]], 0
471; CALLS-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP8]], -1
472; CALLS-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], [[TMP7]]
473; CALLS-NEXT:    [[TMP12:%.*]] = icmp eq i64 [[TMP11]], 0
474; CALLS-NEXT:    [[_MSPROP_ICMP:%.*]] = and i1 [[TMP9]], [[TMP12]]
475; CALLS-NEXT:    [[TOBOOL:%.*]] = icmp eq ptr [[B]], null
476; CALLS-NEXT:    [[TMP13:%.*]] = zext i1 [[_MSPROP_ICMP]] to i8
477; CALLS-NEXT:    call void @__msan_maybe_warning_1(i8 zeroext [[TMP13]], i32 zeroext [[TMP1]])
478; CALLS-NEXT:    br i1 [[TOBOOL]], label %[[IF_ELSE:.*]], label %[[IF_THEN:.*]]
479; CALLS:       [[IF_THEN]]:
480; CALLS-NEXT:    call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]])
481; CALLS-NEXT:    [[TMP14:%.*]] = load i32, ptr [[B]], align 4
482; CALLS-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[B]] to i64
483; CALLS-NEXT:    [[TMP16:%.*]] = xor i64 [[TMP15]], 87960930222080
484; CALLS-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
485; CALLS-NEXT:    [[TMP18:%.*]] = add i64 [[TMP16]], 17592186044416
486; CALLS-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
487; CALLS-NEXT:    [[_MSLD1:%.*]] = load i32, ptr [[TMP17]], align 4
488; CALLS-NEXT:    [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4
489; CALLS-NEXT:    br label %[[IF_END:.*]]
490; CALLS:       [[IF_ELSE]]:
491; CALLS-NEXT:    call void @__msan_maybe_warning_8(i64 zeroext [[TMP2]], i32 zeroext [[TMP3]])
492; CALLS-NEXT:    [[TMP21:%.*]] = load i32, ptr [[C]], align 4
493; CALLS-NEXT:    [[TMP22:%.*]] = ptrtoint ptr [[C]] to i64
494; CALLS-NEXT:    [[TMP23:%.*]] = xor i64 [[TMP22]], 87960930222080
495; CALLS-NEXT:    [[TMP24:%.*]] = inttoptr i64 [[TMP23]] to ptr
496; CALLS-NEXT:    [[TMP25:%.*]] = add i64 [[TMP23]], 17592186044416
497; CALLS-NEXT:    [[TMP26:%.*]] = inttoptr i64 [[TMP25]] to ptr
498; CALLS-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP24]], align 4
499; CALLS-NEXT:    [[TMP27:%.*]] = load i32, ptr [[TMP26]], align 4
500; CALLS-NEXT:    br label %[[IF_END]]
501; CALLS:       [[IF_END]]:
502; CALLS-NEXT:    [[_MSPHI_S:%.*]] = phi i32 [ [[_MSLD1]], %[[IF_THEN]] ], [ [[_MSLD]], %[[IF_ELSE]] ]
503; CALLS-NEXT:    [[_MSPHI_O:%.*]] = phi i32 [ [[TMP20]], %[[IF_THEN]] ], [ [[TMP27]], %[[IF_ELSE]] ]
504; CALLS-NEXT:    [[T_0:%.*]] = phi i32 [ [[TMP14]], %[[IF_THEN]] ], [ [[TMP21]], %[[IF_ELSE]] ]
505; CALLS-NEXT:    call void @__msan_maybe_warning_8(i64 zeroext [[TMP4]], i32 zeroext [[TMP5]])
506; CALLS-NEXT:    [[TMP28:%.*]] = ptrtoint ptr [[A]] to i64
507; CALLS-NEXT:    [[TMP29:%.*]] = xor i64 [[TMP28]], 87960930222080
508; CALLS-NEXT:    [[TMP30:%.*]] = inttoptr i64 [[TMP29]] to ptr
509; CALLS-NEXT:    [[TMP31:%.*]] = add i64 [[TMP29]], 17592186044416
510; CALLS-NEXT:    [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
511; CALLS-NEXT:    store i32 [[_MSPHI_S]], ptr [[TMP30]], align 4
512; CALLS-NEXT:    call void @__msan_maybe_store_origin_4(i32 zeroext [[_MSPHI_S]], ptr [[A]], i32 zeroext [[_MSPHI_O]])
513; CALLS-NEXT:    store i32 [[T_0]], ptr [[A]], align 4
514; CALLS-NEXT:    ret void
515;
516entry:
517  %tobool = icmp eq ptr %b, null
518  br i1 %tobool, label %if.else, label %if.then
519
520  if.then:                                          ; preds = %entry
521  %0 = load i32, ptr %b, align 4
522  br label %if.end
523
524  if.else:                                          ; preds = %entry
525  %1 = load i32, ptr %c, align 4
526  br label %if.end
527
528  if.end:                                           ; preds = %if.else, %if.then
529  %t.0 = phi i32 [ %0, %if.then ], [ %1, %if.else ]
530  store i32 %t.0, ptr %a, align 4
531  ret void
532}
533
534; Compute shadow for "x << 10"
535define void @ShlConst(ptr nocapture %x) nounwind uwtable sanitize_memory {
536; CHECK-LABEL: define void @ShlConst(
537; CHECK-SAME: ptr captures(none) [[X:%.*]]) #[[ATTR0]] {
538; CHECK-NEXT:  [[ENTRY:.*:]]
539; CHECK-NEXT:    call void @llvm.donothing()
540; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[X]], align 4
541; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[X]] to i64
542; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
543; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
544; CHECK-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP3]], align 4
545; CHECK-NEXT:    [[TMP4:%.*]] = shl i32 [[_MSLD]], 10
546; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP4]], 0
547; CHECK-NEXT:    [[TMP6:%.*]] = shl i32 [[TMP0]], 10
548; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[X]] to i64
549; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
550; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
551; CHECK-NEXT:    store i32 [[TMP5]], ptr [[TMP9]], align 4
552; CHECK-NEXT:    store i32 [[TMP6]], ptr [[X]], align 4
553; CHECK-NEXT:    ret void
554;
555; ORIGIN-LABEL: define void @ShlConst(
556; ORIGIN-SAME: ptr captures(none) [[X:%.*]]) #[[ATTR0]] {
557; ORIGIN-NEXT:  [[ENTRY:.*:]]
558; ORIGIN-NEXT:    call void @llvm.donothing()
559; ORIGIN-NEXT:    [[TMP0:%.*]] = load i32, ptr [[X]], align 4
560; ORIGIN-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[X]] to i64
561; ORIGIN-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
562; ORIGIN-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
563; ORIGIN-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
564; ORIGIN-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
565; ORIGIN-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP3]], align 4
566; ORIGIN-NEXT:    [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
567; ORIGIN-NEXT:    [[TMP7:%.*]] = shl i32 [[_MSLD]], 10
568; ORIGIN-NEXT:    [[TMP8:%.*]] = or i32 [[TMP7]], 0
569; ORIGIN-NEXT:    [[TMP9:%.*]] = shl i32 [[TMP0]], 10
570; ORIGIN-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[X]] to i64
571; ORIGIN-NEXT:    [[TMP11:%.*]] = xor i64 [[TMP10]], 87960930222080
572; ORIGIN-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
573; ORIGIN-NEXT:    [[TMP13:%.*]] = add i64 [[TMP11]], 17592186044416
574; ORIGIN-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
575; ORIGIN-NEXT:    store i32 [[TMP8]], ptr [[TMP12]], align 4
576; ORIGIN-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP8]], 0
577; ORIGIN-NEXT:    br i1 [[_MSCMP]], label %[[BB15:.*]], label %[[BB16:.*]], !prof [[PROF1]]
578; ORIGIN:       [[BB15]]:
579; ORIGIN-NEXT:    store i32 [[TMP6]], ptr [[TMP14]], align 4
580; ORIGIN-NEXT:    br label %[[BB16]]
581; ORIGIN:       [[BB16]]:
582; ORIGIN-NEXT:    store i32 [[TMP9]], ptr [[X]], align 4
583; ORIGIN-NEXT:    ret void
584;
585; CALLS-LABEL: define void @ShlConst(
586; CALLS-SAME: ptr captures(none) [[X:%.*]]) #[[ATTR0]] {
587; CALLS-NEXT:  [[ENTRY:.*:]]
588; CALLS-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
589; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
590; CALLS-NEXT:    call void @llvm.donothing()
591; CALLS-NEXT:    call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]])
592; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr [[X]], align 4
593; CALLS-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[X]] to i64
594; CALLS-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
595; CALLS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
596; CALLS-NEXT:    [[TMP6:%.*]] = add i64 [[TMP4]], 17592186044416
597; CALLS-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
598; CALLS-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP5]], align 4
599; CALLS-NEXT:    [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
600; CALLS-NEXT:    [[TMP9:%.*]] = shl i32 [[_MSLD]], 10
601; CALLS-NEXT:    [[TMP10:%.*]] = or i32 [[TMP9]], 0
602; CALLS-NEXT:    [[TMP11:%.*]] = shl i32 [[TMP2]], 10
603; CALLS-NEXT:    call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]])
604; CALLS-NEXT:    [[TMP12:%.*]] = ptrtoint ptr [[X]] to i64
605; CALLS-NEXT:    [[TMP13:%.*]] = xor i64 [[TMP12]], 87960930222080
606; CALLS-NEXT:    [[TMP14:%.*]] = inttoptr i64 [[TMP13]] to ptr
607; CALLS-NEXT:    [[TMP15:%.*]] = add i64 [[TMP13]], 17592186044416
608; CALLS-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
609; CALLS-NEXT:    store i32 [[TMP10]], ptr [[TMP14]], align 4
610; CALLS-NEXT:    call void @__msan_maybe_store_origin_4(i32 zeroext [[TMP10]], ptr [[X]], i32 zeroext [[TMP8]])
611; CALLS-NEXT:    store i32 [[TMP11]], ptr [[X]], align 4
612; CALLS-NEXT:    ret void
613;
614entry:
615  %0 = load i32, ptr %x, align 4
616  %1 = shl i32 %0, 10
617  store i32 %1, ptr %x, align 4
618  ret void
619}
620
621
622; Compute shadow for "10 << x": it should have 'sext i1'.
623define void @ShlNonConst(ptr nocapture %x) nounwind uwtable sanitize_memory {
624; CHECK-LABEL: define void @ShlNonConst(
625; CHECK-SAME: ptr captures(none) [[X:%.*]]) #[[ATTR0]] {
626; CHECK-NEXT:  [[ENTRY:.*:]]
627; CHECK-NEXT:    call void @llvm.donothing()
628; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[X]], align 4
629; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[X]] to i64
630; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
631; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
632; CHECK-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP3]], align 4
633; CHECK-NEXT:    [[TMP4:%.*]] = icmp ne i32 [[_MSLD]], 0
634; CHECK-NEXT:    [[TMP5:%.*]] = sext i1 [[TMP4]] to i32
635; CHECK-NEXT:    [[TMP6:%.*]] = shl i32 0, [[TMP0]]
636; CHECK-NEXT:    [[TMP7:%.*]] = or i32 [[TMP6]], [[TMP5]]
637; CHECK-NEXT:    [[TMP8:%.*]] = shl i32 10, [[TMP0]]
638; CHECK-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[X]] to i64
639; CHECK-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080
640; CHECK-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
641; CHECK-NEXT:    store i32 [[TMP7]], ptr [[TMP11]], align 4
642; CHECK-NEXT:    store i32 [[TMP8]], ptr [[X]], align 4
643; CHECK-NEXT:    ret void
644;
645; ORIGIN-LABEL: define void @ShlNonConst(
646; ORIGIN-SAME: ptr captures(none) [[X:%.*]]) #[[ATTR0]] {
647; ORIGIN-NEXT:  [[ENTRY:.*:]]
648; ORIGIN-NEXT:    call void @llvm.donothing()
649; ORIGIN-NEXT:    [[TMP0:%.*]] = load i32, ptr [[X]], align 4
650; ORIGIN-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[X]] to i64
651; ORIGIN-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
652; ORIGIN-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
653; ORIGIN-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
654; ORIGIN-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
655; ORIGIN-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP3]], align 4
656; ORIGIN-NEXT:    [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
657; ORIGIN-NEXT:    [[TMP7:%.*]] = icmp ne i32 [[_MSLD]], 0
658; ORIGIN-NEXT:    [[TMP8:%.*]] = sext i1 [[TMP7]] to i32
659; ORIGIN-NEXT:    [[TMP9:%.*]] = shl i32 0, [[TMP0]]
660; ORIGIN-NEXT:    [[TMP10:%.*]] = or i32 [[TMP9]], [[TMP8]]
661; ORIGIN-NEXT:    [[TMP11:%.*]] = icmp ne i32 [[_MSLD]], 0
662; ORIGIN-NEXT:    [[TMP12:%.*]] = select i1 [[TMP11]], i32 [[TMP6]], i32 0
663; ORIGIN-NEXT:    [[TMP13:%.*]] = shl i32 10, [[TMP0]]
664; ORIGIN-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[X]] to i64
665; ORIGIN-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
666; ORIGIN-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
667; ORIGIN-NEXT:    [[TMP17:%.*]] = add i64 [[TMP15]], 17592186044416
668; ORIGIN-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
669; ORIGIN-NEXT:    store i32 [[TMP10]], ptr [[TMP16]], align 4
670; ORIGIN-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP10]], 0
671; ORIGIN-NEXT:    br i1 [[_MSCMP]], label %[[BB19:.*]], label %[[BB20:.*]], !prof [[PROF1]]
672; ORIGIN:       [[BB19]]:
673; ORIGIN-NEXT:    store i32 [[TMP12]], ptr [[TMP18]], align 4
674; ORIGIN-NEXT:    br label %[[BB20]]
675; ORIGIN:       [[BB20]]:
676; ORIGIN-NEXT:    store i32 [[TMP13]], ptr [[X]], align 4
677; ORIGIN-NEXT:    ret void
678;
679; CALLS-LABEL: define void @ShlNonConst(
680; CALLS-SAME: ptr captures(none) [[X:%.*]]) #[[ATTR0]] {
681; CALLS-NEXT:  [[ENTRY:.*:]]
682; CALLS-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
683; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
684; CALLS-NEXT:    call void @llvm.donothing()
685; CALLS-NEXT:    call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]])
686; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr [[X]], align 4
687; CALLS-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[X]] to i64
688; CALLS-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
689; CALLS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
690; CALLS-NEXT:    [[TMP6:%.*]] = add i64 [[TMP4]], 17592186044416
691; CALLS-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
692; CALLS-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP5]], align 4
693; CALLS-NEXT:    [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
694; CALLS-NEXT:    [[TMP9:%.*]] = icmp ne i32 [[_MSLD]], 0
695; CALLS-NEXT:    [[TMP10:%.*]] = sext i1 [[TMP9]] to i32
696; CALLS-NEXT:    [[TMP11:%.*]] = shl i32 0, [[TMP2]]
697; CALLS-NEXT:    [[TMP12:%.*]] = or i32 [[TMP11]], [[TMP10]]
698; CALLS-NEXT:    [[TMP13:%.*]] = icmp ne i32 [[_MSLD]], 0
699; CALLS-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP8]], i32 0
700; CALLS-NEXT:    [[TMP15:%.*]] = shl i32 10, [[TMP2]]
701; CALLS-NEXT:    call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]])
702; CALLS-NEXT:    [[TMP16:%.*]] = ptrtoint ptr [[X]] to i64
703; CALLS-NEXT:    [[TMP17:%.*]] = xor i64 [[TMP16]], 87960930222080
704; CALLS-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
705; CALLS-NEXT:    [[TMP19:%.*]] = add i64 [[TMP17]], 17592186044416
706; CALLS-NEXT:    [[TMP20:%.*]] = inttoptr i64 [[TMP19]] to ptr
707; CALLS-NEXT:    store i32 [[TMP12]], ptr [[TMP18]], align 4
708; CALLS-NEXT:    call void @__msan_maybe_store_origin_4(i32 zeroext [[TMP12]], ptr [[X]], i32 zeroext [[TMP14]])
709; CALLS-NEXT:    store i32 [[TMP15]], ptr [[X]], align 4
710; CALLS-NEXT:    ret void
711;
712entry:
713  %0 = load i32, ptr %x, align 4
714  %1 = shl i32 10, %0
715  store i32 %1, ptr %x, align 4
716  ret void
717}
718
719
720; SExt
721define void @SExt(ptr nocapture %a, ptr nocapture %b) nounwind uwtable sanitize_memory {
722; CHECK-LABEL: define void @SExt(
723; CHECK-SAME: ptr captures(none) [[A:%.*]], ptr captures(none) [[B:%.*]]) #[[ATTR0]] {
724; CHECK-NEXT:  [[ENTRY:.*:]]
725; CHECK-NEXT:    call void @llvm.donothing()
726; CHECK-NEXT:    [[TMP0:%.*]] = load i16, ptr [[B]], align 2
727; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[B]] to i64
728; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
729; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
730; CHECK-NEXT:    [[_MSLD:%.*]] = load i16, ptr [[TMP3]], align 2
731; CHECK-NEXT:    [[_MSPROP:%.*]] = sext i16 [[_MSLD]] to i32
732; CHECK-NEXT:    [[TMP4:%.*]] = sext i16 [[TMP0]] to i32
733; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[A]] to i64
734; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
735; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
736; CHECK-NEXT:    store i32 [[_MSPROP]], ptr [[TMP7]], align 4
737; CHECK-NEXT:    store i32 [[TMP4]], ptr [[A]], align 4
738; CHECK-NEXT:    ret void
739;
740; ORIGIN-LABEL: define void @SExt(
741; ORIGIN-SAME: ptr captures(none) [[A:%.*]], ptr captures(none) [[B:%.*]]) #[[ATTR0]] {
742; ORIGIN-NEXT:  [[ENTRY:.*:]]
743; ORIGIN-NEXT:    call void @llvm.donothing()
744; ORIGIN-NEXT:    [[TMP0:%.*]] = load i16, ptr [[B]], align 2
745; ORIGIN-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[B]] to i64
746; ORIGIN-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
747; ORIGIN-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
748; ORIGIN-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
749; ORIGIN-NEXT:    [[TMP5:%.*]] = and i64 [[TMP4]], -4
750; ORIGIN-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
751; ORIGIN-NEXT:    [[_MSLD:%.*]] = load i16, ptr [[TMP3]], align 2
752; ORIGIN-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4
753; ORIGIN-NEXT:    [[_MSPROP:%.*]] = sext i16 [[_MSLD]] to i32
754; ORIGIN-NEXT:    [[TMP8:%.*]] = sext i16 [[TMP0]] to i32
755; ORIGIN-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[A]] to i64
756; ORIGIN-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080
757; ORIGIN-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
758; ORIGIN-NEXT:    [[TMP12:%.*]] = add i64 [[TMP10]], 17592186044416
759; ORIGIN-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
760; ORIGIN-NEXT:    store i32 [[_MSPROP]], ptr [[TMP11]], align 4
761; ORIGIN-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[_MSPROP]], 0
762; ORIGIN-NEXT:    br i1 [[_MSCMP]], label %[[BB14:.*]], label %[[BB15:.*]], !prof [[PROF1]]
763; ORIGIN:       [[BB14]]:
764; ORIGIN-NEXT:    store i32 [[TMP7]], ptr [[TMP13]], align 4
765; ORIGIN-NEXT:    br label %[[BB15]]
766; ORIGIN:       [[BB15]]:
767; ORIGIN-NEXT:    store i32 [[TMP8]], ptr [[A]], align 4
768; ORIGIN-NEXT:    ret void
769;
770; CALLS-LABEL: define void @SExt(
771; CALLS-SAME: ptr captures(none) [[A:%.*]], ptr captures(none) [[B:%.*]]) #[[ATTR0]] {
772; CALLS-NEXT:  [[ENTRY:.*:]]
773; CALLS-NEXT:    [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
774; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
775; CALLS-NEXT:    [[TMP2:%.*]] = load i64, ptr @__msan_param_tls, align 8
776; CALLS-NEXT:    [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
777; CALLS-NEXT:    call void @llvm.donothing()
778; CALLS-NEXT:    call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]])
779; CALLS-NEXT:    [[TMP4:%.*]] = load i16, ptr [[B]], align 2
780; CALLS-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[B]] to i64
781; CALLS-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
782; CALLS-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
783; CALLS-NEXT:    [[TMP8:%.*]] = add i64 [[TMP6]], 17592186044416
784; CALLS-NEXT:    [[TMP9:%.*]] = and i64 [[TMP8]], -4
785; CALLS-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
786; CALLS-NEXT:    [[_MSLD:%.*]] = load i16, ptr [[TMP7]], align 2
787; CALLS-NEXT:    [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4
788; CALLS-NEXT:    [[_MSPROP:%.*]] = sext i16 [[_MSLD]] to i32
789; CALLS-NEXT:    [[TMP12:%.*]] = sext i16 [[TMP4]] to i32
790; CALLS-NEXT:    call void @__msan_maybe_warning_8(i64 zeroext [[TMP2]], i32 zeroext [[TMP3]])
791; CALLS-NEXT:    [[TMP13:%.*]] = ptrtoint ptr [[A]] to i64
792; CALLS-NEXT:    [[TMP14:%.*]] = xor i64 [[TMP13]], 87960930222080
793; CALLS-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
794; CALLS-NEXT:    [[TMP16:%.*]] = add i64 [[TMP14]], 17592186044416
795; CALLS-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
796; CALLS-NEXT:    store i32 [[_MSPROP]], ptr [[TMP15]], align 4
797; CALLS-NEXT:    call void @__msan_maybe_store_origin_4(i32 zeroext [[_MSPROP]], ptr [[A]], i32 zeroext [[TMP11]])
798; CALLS-NEXT:    store i32 [[TMP12]], ptr [[A]], align 4
799; CALLS-NEXT:    ret void
800;
801entry:
802  %0 = load i16, ptr %b, align 2
803  %1 = sext i16 %0 to i32
804  store i32 %1, ptr %a, align 4
805  ret void
806}
807
808
809
810; memset
811define void @MemSet(ptr nocapture %x) nounwind uwtable sanitize_memory {
812; CHECK-LABEL: define void @MemSet(
813; CHECK-SAME: ptr captures(none) [[X:%.*]]) #[[ATTR0]] {
814; CHECK-NEXT:  [[ENTRY:.*:]]
815; CHECK-NEXT:    call void @llvm.donothing()
816; CHECK-NEXT:    [[TMP0:%.*]] = call ptr @__msan_memset(ptr [[X]], i32 42, i64 10)
817; CHECK-NEXT:    ret void
818;
819; ORIGIN-LABEL: define void @MemSet(
820; ORIGIN-SAME: ptr captures(none) [[X:%.*]]) #[[ATTR0]] {
821; ORIGIN-NEXT:  [[ENTRY:.*:]]
822; ORIGIN-NEXT:    call void @llvm.donothing()
823; ORIGIN-NEXT:    [[TMP0:%.*]] = call ptr @__msan_memset(ptr [[X]], i32 42, i64 10)
824; ORIGIN-NEXT:    ret void
825;
826; CALLS-LABEL: define void @MemSet(
827; CALLS-SAME: ptr captures(none) [[X:%.*]]) #[[ATTR0]] {
828; CALLS-NEXT:  [[ENTRY:.*:]]
829; CALLS-NEXT:    call void @llvm.donothing()
830; CALLS-NEXT:    [[TMP0:%.*]] = call ptr @__msan_memset(ptr [[X]], i32 42, i64 10)
831; CALLS-NEXT:    ret void
832;
833entry:
834  call void @llvm.memset.p0.i64(ptr %x, i8 42, i64 10, i1 false)
835  ret void
836}
837
838declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) nounwind
839
840
841
842; memcpy
843define void @MemCpy(ptr nocapture %x, ptr nocapture %y) nounwind uwtable sanitize_memory {
844; CHECK-LABEL: define void @MemCpy(
845; CHECK-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] {
846; CHECK-NEXT:  [[ENTRY:.*:]]
847; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
848; CHECK-NEXT:    call void @llvm.donothing()
849; CHECK-NEXT:    [[TMP1:%.*]] = call ptr @__msan_memcpy(ptr [[X]], ptr [[Y]], i64 10)
850; CHECK-NEXT:    ret void
851;
852; ORIGIN-LABEL: define void @MemCpy(
853; ORIGIN-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] {
854; ORIGIN-NEXT:  [[ENTRY:.*:]]
855; ORIGIN-NEXT:    [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
856; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
857; ORIGIN-NEXT:    call void @llvm.donothing()
858; ORIGIN-NEXT:    [[TMP2:%.*]] = call ptr @__msan_memcpy(ptr [[X]], ptr [[Y]], i64 10)
859; ORIGIN-NEXT:    ret void
860;
861; CALLS-LABEL: define void @MemCpy(
862; CALLS-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] {
863; CALLS-NEXT:  [[ENTRY:.*:]]
864; CALLS-NEXT:    [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
865; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
866; CALLS-NEXT:    call void @llvm.donothing()
867; CALLS-NEXT:    [[TMP2:%.*]] = call ptr @__msan_memcpy(ptr [[X]], ptr [[Y]], i64 10)
868; CALLS-NEXT:    ret void
869;
870entry:
871  call void @llvm.memcpy.p0.p0.i64(ptr %x, ptr %y, i64 10, i1 false)
872  ret void
873}
874
875declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
876
877
878; memset.inline
879define void @MemSetInline(ptr nocapture %x) nounwind uwtable sanitize_memory {
880; CHECK-LABEL: define void @MemSetInline(
881; CHECK-SAME: ptr captures(none) [[X:%.*]]) #[[ATTR0]] {
882; CHECK-NEXT:  [[ENTRY:.*:]]
883; CHECK-NEXT:    call void @llvm.donothing()
884; CHECK-NEXT:    [[TMP0:%.*]] = call ptr @__msan_memset(ptr [[X]], i32 42, i64 10)
885; CHECK-NEXT:    ret void
886;
887; ORIGIN-LABEL: define void @MemSetInline(
888; ORIGIN-SAME: ptr captures(none) [[X:%.*]]) #[[ATTR0]] {
889; ORIGIN-NEXT:  [[ENTRY:.*:]]
890; ORIGIN-NEXT:    call void @llvm.donothing()
891; ORIGIN-NEXT:    [[TMP0:%.*]] = call ptr @__msan_memset(ptr [[X]], i32 42, i64 10)
892; ORIGIN-NEXT:    ret void
893;
894; CALLS-LABEL: define void @MemSetInline(
895; CALLS-SAME: ptr captures(none) [[X:%.*]]) #[[ATTR0]] {
896; CALLS-NEXT:  [[ENTRY:.*:]]
897; CALLS-NEXT:    call void @llvm.donothing()
898; CALLS-NEXT:    [[TMP0:%.*]] = call ptr @__msan_memset(ptr [[X]], i32 42, i64 10)
899; CALLS-NEXT:    ret void
900;
901entry:
902  call void @llvm.memset.inline.p0.i64(ptr %x, i8 42, i64 10, i1 false)
903  ret void
904}
905
906declare void @llvm.memset.inline.p0.i64(ptr nocapture, i8, i64, i1) nounwind
907
908
909; memcpy.inline
910define void @MemCpyInline(ptr nocapture %x, ptr nocapture %y) nounwind uwtable sanitize_memory {
911; CHECK-LABEL: define void @MemCpyInline(
912; CHECK-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] {
913; CHECK-NEXT:  [[ENTRY:.*:]]
914; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
915; CHECK-NEXT:    call void @llvm.donothing()
916; CHECK-NEXT:    [[TMP1:%.*]] = call ptr @__msan_memcpy(ptr [[X]], ptr [[Y]], i64 10)
917; CHECK-NEXT:    ret void
918;
919; ORIGIN-LABEL: define void @MemCpyInline(
920; ORIGIN-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] {
921; ORIGIN-NEXT:  [[ENTRY:.*:]]
922; ORIGIN-NEXT:    [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
923; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
924; ORIGIN-NEXT:    call void @llvm.donothing()
925; ORIGIN-NEXT:    [[TMP2:%.*]] = call ptr @__msan_memcpy(ptr [[X]], ptr [[Y]], i64 10)
926; ORIGIN-NEXT:    ret void
927;
928; CALLS-LABEL: define void @MemCpyInline(
929; CALLS-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] {
930; CALLS-NEXT:  [[ENTRY:.*:]]
931; CALLS-NEXT:    [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
932; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
933; CALLS-NEXT:    call void @llvm.donothing()
934; CALLS-NEXT:    [[TMP2:%.*]] = call ptr @__msan_memcpy(ptr [[X]], ptr [[Y]], i64 10)
935; CALLS-NEXT:    ret void
936;
937entry:
938  call void @llvm.memcpy.inline.p0.p0.i64(ptr %x, ptr %y, i64 10, i1 false)
939  ret void
940}
941
942declare void @llvm.memcpy.inline.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
943
944
945; memmove is lowered to a call
946define void @MemMove(ptr nocapture %x, ptr nocapture %y) nounwind uwtable sanitize_memory {
947; CHECK-LABEL: define void @MemMove(
948; CHECK-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] {
949; CHECK-NEXT:  [[ENTRY:.*:]]
950; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
951; CHECK-NEXT:    call void @llvm.donothing()
952; CHECK-NEXT:    [[TMP1:%.*]] = call ptr @__msan_memmove(ptr [[X]], ptr [[Y]], i64 10)
953; CHECK-NEXT:    ret void
954;
955; ORIGIN-LABEL: define void @MemMove(
956; ORIGIN-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] {
957; ORIGIN-NEXT:  [[ENTRY:.*:]]
958; ORIGIN-NEXT:    [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
959; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
960; ORIGIN-NEXT:    call void @llvm.donothing()
961; ORIGIN-NEXT:    [[TMP2:%.*]] = call ptr @__msan_memmove(ptr [[X]], ptr [[Y]], i64 10)
962; ORIGIN-NEXT:    ret void
963;
964; CALLS-LABEL: define void @MemMove(
965; CALLS-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR0]] {
966; CALLS-NEXT:  [[ENTRY:.*:]]
967; CALLS-NEXT:    [[TMP0:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
968; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
969; CALLS-NEXT:    call void @llvm.donothing()
970; CALLS-NEXT:    [[TMP2:%.*]] = call ptr @__msan_memmove(ptr [[X]], ptr [[Y]], i64 10)
971; CALLS-NEXT:    ret void
972;
973entry:
974  call void @llvm.memmove.p0.p0.i64(ptr %x, ptr %y, i64 10, i1 false)
975  ret void
976}
977
978declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture, i64, i1) nounwind
979
980
981;; ------------
982;; Placeholder tests that will fail once element atomic @llvm.mem[cpy|move|set] intrinsics have
983;; been added to the MemIntrinsic class hierarchy. These will act as a reminder to
984;; verify that MSAN handles these intrinsics properly once they have been
985;; added to that class hierarchy.
986declare void @llvm.memset.element.unordered.atomic.p0.i64(ptr nocapture writeonly, i8, i64, i32) nounwind
987declare void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i32) nounwind
988declare void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i32) nounwind
989
990define void @atomic_memcpy(ptr nocapture %x, ptr nocapture %y) nounwind {
991; CHECK-LABEL: define void @atomic_memcpy(
992; CHECK-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR5]] {
993; CHECK-NEXT:    call void @llvm.donothing()
994; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[X]], ptr align 2 [[Y]], i64 16, i32 1)
995; CHECK-NEXT:    ret void
996;
997; ORIGIN-LABEL: define void @atomic_memcpy(
998; ORIGIN-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR5]] {
999; ORIGIN-NEXT:    call void @llvm.donothing()
1000; ORIGIN-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[X]], ptr align 2 [[Y]], i64 16, i32 1)
1001; ORIGIN-NEXT:    ret void
1002;
1003; CALLS-LABEL: define void @atomic_memcpy(
1004; CALLS-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR5]] {
1005; CALLS-NEXT:    call void @llvm.donothing()
1006; CALLS-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 [[X]], ptr align 2 [[Y]], i64 16, i32 1)
1007; CALLS-NEXT:    ret void
1008;
1009  call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %x, ptr align 2 %y, i64 16, i32 1)
1010  ret void
1011}
1012
1013define void @atomic_memmove(ptr nocapture %x, ptr nocapture %y) nounwind {
1014; CHECK-LABEL: define void @atomic_memmove(
1015; CHECK-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR5]] {
1016; CHECK-NEXT:    call void @llvm.donothing()
1017; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 [[X]], ptr align 2 [[Y]], i64 16, i32 1)
1018; CHECK-NEXT:    ret void
1019;
1020; ORIGIN-LABEL: define void @atomic_memmove(
1021; ORIGIN-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR5]] {
1022; ORIGIN-NEXT:    call void @llvm.donothing()
1023; ORIGIN-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 [[X]], ptr align 2 [[Y]], i64 16, i32 1)
1024; ORIGIN-NEXT:    ret void
1025;
1026; CALLS-LABEL: define void @atomic_memmove(
1027; CALLS-SAME: ptr captures(none) [[X:%.*]], ptr captures(none) [[Y:%.*]]) #[[ATTR5]] {
1028; CALLS-NEXT:    call void @llvm.donothing()
1029; CALLS-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 [[X]], ptr align 2 [[Y]], i64 16, i32 1)
1030; CALLS-NEXT:    ret void
1031;
1032  call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %x, ptr align 2 %y, i64 16, i32 1)
1033  ret void
1034}
1035
1036define void @atomic_memset(ptr nocapture %x) nounwind {
1037; CHECK-LABEL: define void @atomic_memset(
1038; CHECK-SAME: ptr captures(none) [[X:%.*]]) #[[ATTR5]] {
1039; CHECK-NEXT:    call void @llvm.donothing()
1040; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 [[X]], i8 88, i64 16, i32 1)
1041; CHECK-NEXT:    ret void
1042;
1043; ORIGIN-LABEL: define void @atomic_memset(
1044; ORIGIN-SAME: ptr captures(none) [[X:%.*]]) #[[ATTR5]] {
1045; ORIGIN-NEXT:    call void @llvm.donothing()
1046; ORIGIN-NEXT:    call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 [[X]], i8 88, i64 16, i32 1)
1047; ORIGIN-NEXT:    ret void
1048;
1049; CALLS-LABEL: define void @atomic_memset(
1050; CALLS-SAME: ptr captures(none) [[X:%.*]]) #[[ATTR5]] {
1051; CALLS-NEXT:    call void @llvm.donothing()
1052; CALLS-NEXT:    call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 [[X]], i8 88, i64 16, i32 1)
1053; CALLS-NEXT:    ret void
1054;
1055  call void @llvm.memset.element.unordered.atomic.p0.i64(ptr align 1 %x, i8 88, i64 16, i32 1)
1056  ret void
1057}
1058
1059;; ------------
1060
1061
1062; Check that we propagate shadow for "select"
1063
1064define i32 @Select(i32 %a, i32 %b, i1 %c) nounwind uwtable readnone sanitize_memory {
1065; CHECK-LABEL: define i32 @Select(
1066; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]], i1 [[C:%.*]]) #[[ATTR0]] {
1067; CHECK-NEXT:  [[ENTRY:.*:]]
1068; CHECK-NEXT:    [[TMP0:%.*]] = load i1, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
1069; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
1070; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
1071; CHECK-NEXT:    call void @llvm.donothing()
1072; CHECK-NEXT:    [[TMP3:%.*]] = select i1 [[C]], i32 [[TMP1]], i32 [[TMP2]]
1073; CHECK-NEXT:    [[TMP4:%.*]] = xor i32 [[A]], [[B]]
1074; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP4]], [[TMP1]]
1075; CHECK-NEXT:    [[TMP6:%.*]] = or i32 [[TMP5]], [[TMP2]]
1076; CHECK-NEXT:    [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], i32 [[TMP6]], i32 [[TMP3]]
1077; CHECK-NEXT:    [[COND:%.*]] = select i1 [[C]], i32 [[A]], i32 [[B]]
1078; CHECK-NEXT:    store i32 [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
1079; CHECK-NEXT:    ret i32 [[COND]]
1080;
1081; ORIGIN-LABEL: define i32 @Select(
1082; ORIGIN-SAME: i32 [[A:%.*]], i32 [[B:%.*]], i1 [[C:%.*]]) #[[ATTR0]] {
1083; ORIGIN-NEXT:  [[ENTRY:.*:]]
1084; ORIGIN-NEXT:    [[TMP0:%.*]] = load i1, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
1085; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
1086; ORIGIN-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8
1087; ORIGIN-NEXT:    [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1088; ORIGIN-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
1089; ORIGIN-NEXT:    [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
1090; ORIGIN-NEXT:    call void @llvm.donothing()
1091; ORIGIN-NEXT:    [[TMP6:%.*]] = select i1 [[C]], i32 [[TMP2]], i32 [[TMP4]]
1092; ORIGIN-NEXT:    [[TMP7:%.*]] = xor i32 [[A]], [[B]]
1093; ORIGIN-NEXT:    [[TMP8:%.*]] = or i32 [[TMP7]], [[TMP2]]
1094; ORIGIN-NEXT:    [[TMP9:%.*]] = or i32 [[TMP8]], [[TMP4]]
1095; ORIGIN-NEXT:    [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], i32 [[TMP9]], i32 [[TMP6]]
1096; ORIGIN-NEXT:    [[TMP10:%.*]] = select i1 [[C]], i32 [[TMP3]], i32 [[TMP5]]
1097; ORIGIN-NEXT:    [[TMP11:%.*]] = select i1 [[TMP0]], i32 [[TMP1]], i32 [[TMP10]]
1098; ORIGIN-NEXT:    [[COND:%.*]] = select i1 [[C]], i32 [[A]], i32 [[B]]
1099; ORIGIN-NEXT:    store i32 [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
1100; ORIGIN-NEXT:    store i32 [[TMP11]], ptr @__msan_retval_origin_tls, align 4
1101; ORIGIN-NEXT:    ret i32 [[COND]]
1102;
1103; CALLS-LABEL: define i32 @Select(
1104; CALLS-SAME: i32 [[A:%.*]], i32 [[B:%.*]], i1 [[C:%.*]]) #[[ATTR0]] {
1105; CALLS-NEXT:  [[ENTRY:.*:]]
1106; CALLS-NEXT:    [[TMP0:%.*]] = load i1, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
1107; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
1108; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8
1109; CALLS-NEXT:    [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1110; CALLS-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
1111; CALLS-NEXT:    [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
1112; CALLS-NEXT:    call void @llvm.donothing()
1113; CALLS-NEXT:    [[TMP6:%.*]] = select i1 [[C]], i32 [[TMP2]], i32 [[TMP4]]
1114; CALLS-NEXT:    [[TMP7:%.*]] = xor i32 [[A]], [[B]]
1115; CALLS-NEXT:    [[TMP8:%.*]] = or i32 [[TMP7]], [[TMP2]]
1116; CALLS-NEXT:    [[TMP9:%.*]] = or i32 [[TMP8]], [[TMP4]]
1117; CALLS-NEXT:    [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], i32 [[TMP9]], i32 [[TMP6]]
1118; CALLS-NEXT:    [[TMP10:%.*]] = select i1 [[C]], i32 [[TMP3]], i32 [[TMP5]]
1119; CALLS-NEXT:    [[TMP11:%.*]] = select i1 [[TMP0]], i32 [[TMP1]], i32 [[TMP10]]
1120; CALLS-NEXT:    [[COND:%.*]] = select i1 [[C]], i32 [[A]], i32 [[B]]
1121; CALLS-NEXT:    store i32 [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
1122; CALLS-NEXT:    store i32 [[TMP11]], ptr @__msan_retval_origin_tls, align 4
1123; CALLS-NEXT:    ret i32 [[COND]]
1124;
1125entry:
1126  %cond = select i1 %c, i32 %a, i32 %b
1127  ret i32 %cond
1128}
1129
1130; Check that we propagate origin for "select" with vector condition.
1131; Select condition is flattened to i1, which is then used to select one of the
1132; argument origins.
1133
1134define <8 x i16> @SelectVector(<8 x i16> %a, <8 x i16> %b, <8 x i1> %c) nounwind uwtable readnone sanitize_memory {
1135; CHECK-LABEL: define <8 x i16> @SelectVector(
1136; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[C:%.*]]) #[[ATTR0]] {
1137; CHECK-NEXT:  [[ENTRY:.*:]]
1138; CHECK-NEXT:    [[TMP0:%.*]] = load <8 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
1139; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
1140; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
1141; CHECK-NEXT:    call void @llvm.donothing()
1142; CHECK-NEXT:    [[TMP3:%.*]] = select <8 x i1> [[C]], <8 x i16> [[TMP1]], <8 x i16> [[TMP2]]
1143; CHECK-NEXT:    [[TMP4:%.*]] = xor <8 x i16> [[A]], [[B]]
1144; CHECK-NEXT:    [[TMP5:%.*]] = or <8 x i16> [[TMP4]], [[TMP1]]
1145; CHECK-NEXT:    [[TMP6:%.*]] = or <8 x i16> [[TMP5]], [[TMP2]]
1146; CHECK-NEXT:    [[_MSPROP_SELECT:%.*]] = select <8 x i1> [[TMP0]], <8 x i16> [[TMP6]], <8 x i16> [[TMP3]]
1147; CHECK-NEXT:    [[COND:%.*]] = select <8 x i1> [[C]], <8 x i16> [[A]], <8 x i16> [[B]]
1148; CHECK-NEXT:    store <8 x i16> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
1149; CHECK-NEXT:    ret <8 x i16> [[COND]]
1150;
1151; ORIGIN-LABEL: define <8 x i16> @SelectVector(
1152; ORIGIN-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[C:%.*]]) #[[ATTR0]] {
1153; ORIGIN-NEXT:  [[ENTRY:.*:]]
1154; ORIGIN-NEXT:    [[TMP0:%.*]] = load <8 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
1155; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
1156; ORIGIN-NEXT:    [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
1157; ORIGIN-NEXT:    [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1158; ORIGIN-NEXT:    [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
1159; ORIGIN-NEXT:    [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
1160; ORIGIN-NEXT:    call void @llvm.donothing()
1161; ORIGIN-NEXT:    [[TMP6:%.*]] = select <8 x i1> [[C]], <8 x i16> [[TMP2]], <8 x i16> [[TMP4]]
1162; ORIGIN-NEXT:    [[TMP7:%.*]] = xor <8 x i16> [[A]], [[B]]
1163; ORIGIN-NEXT:    [[TMP8:%.*]] = or <8 x i16> [[TMP7]], [[TMP2]]
1164; ORIGIN-NEXT:    [[TMP9:%.*]] = or <8 x i16> [[TMP8]], [[TMP4]]
1165; ORIGIN-NEXT:    [[_MSPROP_SELECT:%.*]] = select <8 x i1> [[TMP0]], <8 x i16> [[TMP9]], <8 x i16> [[TMP6]]
1166; ORIGIN-NEXT:    [[TMP10:%.*]] = bitcast <8 x i1> [[C]] to i8
1167; ORIGIN-NEXT:    [[TMP11:%.*]] = icmp ne i8 [[TMP10]], 0
1168; ORIGIN-NEXT:    [[TMP12:%.*]] = bitcast <8 x i1> [[TMP0]] to i8
1169; ORIGIN-NEXT:    [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0
1170; ORIGIN-NEXT:    [[TMP14:%.*]] = select i1 [[TMP11]], i32 [[TMP3]], i32 [[TMP5]]
1171; ORIGIN-NEXT:    [[TMP15:%.*]] = select i1 [[TMP13]], i32 [[TMP1]], i32 [[TMP14]]
1172; ORIGIN-NEXT:    [[COND:%.*]] = select <8 x i1> [[C]], <8 x i16> [[A]], <8 x i16> [[B]]
1173; ORIGIN-NEXT:    store <8 x i16> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
1174; ORIGIN-NEXT:    store i32 [[TMP15]], ptr @__msan_retval_origin_tls, align 4
1175; ORIGIN-NEXT:    ret <8 x i16> [[COND]]
1176;
1177; CALLS-LABEL: define <8 x i16> @SelectVector(
1178; CALLS-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[C:%.*]]) #[[ATTR0]] {
1179; CALLS-NEXT:  [[ENTRY:.*:]]
1180; CALLS-NEXT:    [[TMP0:%.*]] = load <8 x i1>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
1181; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
1182; CALLS-NEXT:    [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
1183; CALLS-NEXT:    [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1184; CALLS-NEXT:    [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
1185; CALLS-NEXT:    [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
1186; CALLS-NEXT:    call void @llvm.donothing()
1187; CALLS-NEXT:    [[TMP6:%.*]] = select <8 x i1> [[C]], <8 x i16> [[TMP2]], <8 x i16> [[TMP4]]
1188; CALLS-NEXT:    [[TMP7:%.*]] = xor <8 x i16> [[A]], [[B]]
1189; CALLS-NEXT:    [[TMP8:%.*]] = or <8 x i16> [[TMP7]], [[TMP2]]
1190; CALLS-NEXT:    [[TMP9:%.*]] = or <8 x i16> [[TMP8]], [[TMP4]]
1191; CALLS-NEXT:    [[_MSPROP_SELECT:%.*]] = select <8 x i1> [[TMP0]], <8 x i16> [[TMP9]], <8 x i16> [[TMP6]]
1192; CALLS-NEXT:    [[TMP10:%.*]] = bitcast <8 x i1> [[C]] to i8
1193; CALLS-NEXT:    [[TMP11:%.*]] = icmp ne i8 [[TMP10]], 0
1194; CALLS-NEXT:    [[TMP12:%.*]] = bitcast <8 x i1> [[TMP0]] to i8
1195; CALLS-NEXT:    [[TMP13:%.*]] = icmp ne i8 [[TMP12]], 0
1196; CALLS-NEXT:    [[TMP14:%.*]] = select i1 [[TMP11]], i32 [[TMP3]], i32 [[TMP5]]
1197; CALLS-NEXT:    [[TMP15:%.*]] = select i1 [[TMP13]], i32 [[TMP1]], i32 [[TMP14]]
1198; CALLS-NEXT:    [[COND:%.*]] = select <8 x i1> [[C]], <8 x i16> [[A]], <8 x i16> [[B]]
1199; CALLS-NEXT:    store <8 x i16> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
1200; CALLS-NEXT:    store i32 [[TMP15]], ptr @__msan_retval_origin_tls, align 4
1201; CALLS-NEXT:    ret <8 x i16> [[COND]]
1202;
1203entry:
1204  %cond = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %b
1205  ret <8 x i16> %cond
1206}
1207
1208; Check that we propagate origin for "select" with scalar condition and vector
1209; arguments. Select condition shadow is sign-extended to the vector type and
1210; mixed into the result shadow.
1211
1212define <8 x i16> @SelectVector2(<8 x i16> %a, <8 x i16> %b, i1 %c) nounwind uwtable readnone sanitize_memory {
1213; CHECK-LABEL: define <8 x i16> @SelectVector2(
1214; CHECK-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i1 [[C:%.*]]) #[[ATTR0]] {
1215; CHECK-NEXT:  [[ENTRY:.*:]]
1216; CHECK-NEXT:    [[TMP0:%.*]] = load i1, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
1217; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
1218; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
1219; CHECK-NEXT:    call void @llvm.donothing()
1220; CHECK-NEXT:    [[TMP3:%.*]] = select i1 [[C]], <8 x i16> [[TMP1]], <8 x i16> [[TMP2]]
1221; CHECK-NEXT:    [[TMP4:%.*]] = xor <8 x i16> [[A]], [[B]]
1222; CHECK-NEXT:    [[TMP5:%.*]] = or <8 x i16> [[TMP4]], [[TMP1]]
1223; CHECK-NEXT:    [[TMP6:%.*]] = or <8 x i16> [[TMP5]], [[TMP2]]
1224; CHECK-NEXT:    [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], <8 x i16> [[TMP6]], <8 x i16> [[TMP3]]
1225; CHECK-NEXT:    [[COND:%.*]] = select i1 [[C]], <8 x i16> [[A]], <8 x i16> [[B]]
1226; CHECK-NEXT:    store <8 x i16> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
1227; CHECK-NEXT:    ret <8 x i16> [[COND]]
1228;
1229; ORIGIN-LABEL: define <8 x i16> @SelectVector2(
1230; ORIGIN-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i1 [[C:%.*]]) #[[ATTR0]] {
1231; ORIGIN-NEXT:  [[ENTRY:.*:]]
1232; ORIGIN-NEXT:    [[TMP0:%.*]] = load i1, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
1233; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
1234; ORIGIN-NEXT:    [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
1235; ORIGIN-NEXT:    [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1236; ORIGIN-NEXT:    [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
1237; ORIGIN-NEXT:    [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
1238; ORIGIN-NEXT:    call void @llvm.donothing()
1239; ORIGIN-NEXT:    [[TMP6:%.*]] = select i1 [[C]], <8 x i16> [[TMP2]], <8 x i16> [[TMP4]]
1240; ORIGIN-NEXT:    [[TMP7:%.*]] = xor <8 x i16> [[A]], [[B]]
1241; ORIGIN-NEXT:    [[TMP8:%.*]] = or <8 x i16> [[TMP7]], [[TMP2]]
1242; ORIGIN-NEXT:    [[TMP9:%.*]] = or <8 x i16> [[TMP8]], [[TMP4]]
1243; ORIGIN-NEXT:    [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], <8 x i16> [[TMP9]], <8 x i16> [[TMP6]]
1244; ORIGIN-NEXT:    [[TMP10:%.*]] = select i1 [[C]], i32 [[TMP3]], i32 [[TMP5]]
1245; ORIGIN-NEXT:    [[TMP11:%.*]] = select i1 [[TMP0]], i32 [[TMP1]], i32 [[TMP10]]
1246; ORIGIN-NEXT:    [[COND:%.*]] = select i1 [[C]], <8 x i16> [[A]], <8 x i16> [[B]]
1247; ORIGIN-NEXT:    store <8 x i16> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
1248; ORIGIN-NEXT:    store i32 [[TMP11]], ptr @__msan_retval_origin_tls, align 4
1249; ORIGIN-NEXT:    ret <8 x i16> [[COND]]
1250;
1251; CALLS-LABEL: define <8 x i16> @SelectVector2(
1252; CALLS-SAME: <8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], i1 [[C:%.*]]) #[[ATTR0]] {
1253; CALLS-NEXT:  [[ENTRY:.*:]]
1254; CALLS-NEXT:    [[TMP0:%.*]] = load i1, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
1255; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
1256; CALLS-NEXT:    [[TMP2:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8
1257; CALLS-NEXT:    [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1258; CALLS-NEXT:    [[TMP4:%.*]] = load <8 x i16>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
1259; CALLS-NEXT:    [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
1260; CALLS-NEXT:    call void @llvm.donothing()
1261; CALLS-NEXT:    [[TMP6:%.*]] = select i1 [[C]], <8 x i16> [[TMP2]], <8 x i16> [[TMP4]]
1262; CALLS-NEXT:    [[TMP7:%.*]] = xor <8 x i16> [[A]], [[B]]
1263; CALLS-NEXT:    [[TMP8:%.*]] = or <8 x i16> [[TMP7]], [[TMP2]]
1264; CALLS-NEXT:    [[TMP9:%.*]] = or <8 x i16> [[TMP8]], [[TMP4]]
1265; CALLS-NEXT:    [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], <8 x i16> [[TMP9]], <8 x i16> [[TMP6]]
1266; CALLS-NEXT:    [[TMP10:%.*]] = select i1 [[C]], i32 [[TMP3]], i32 [[TMP5]]
1267; CALLS-NEXT:    [[TMP11:%.*]] = select i1 [[TMP0]], i32 [[TMP1]], i32 [[TMP10]]
1268; CALLS-NEXT:    [[COND:%.*]] = select i1 [[C]], <8 x i16> [[A]], <8 x i16> [[B]]
1269; CALLS-NEXT:    store <8 x i16> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
1270; CALLS-NEXT:    store i32 [[TMP11]], ptr @__msan_retval_origin_tls, align 4
1271; CALLS-NEXT:    ret <8 x i16> [[COND]]
1272;
1273entry:
1274  %cond = select i1 %c, <8 x i16> %a, <8 x i16> %b
1275  ret <8 x i16> %cond
1276}
1277
1278define { i64, i64 } @SelectStruct(i1 zeroext %x, { i64, i64 } %a, { i64, i64 } %b) readnone sanitize_memory {
1279; CHECK-LABEL: define { i64, i64 } @SelectStruct(
1280; CHECK-SAME: i1 zeroext [[X:%.*]], { i64, i64 } [[A:%.*]], { i64, i64 } [[B:%.*]]) #[[ATTR6:[0-9]+]] {
1281; CHECK-NEXT:  [[ENTRY:.*:]]
1282; CHECK-NEXT:    [[TMP0:%.*]] = load i1, ptr @__msan_param_tls, align 8
1283; CHECK-NEXT:    [[TMP1:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
1284; CHECK-NEXT:    [[TMP2:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
1285; CHECK-NEXT:    call void @llvm.donothing()
1286; CHECK-NEXT:    [[TMP3:%.*]] = select i1 [[X]], { i64, i64 } [[TMP1]], { i64, i64 } [[TMP2]]
1287; CHECK-NEXT:    [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 } [[TMP3]]
1288; CHECK-NEXT:    [[C:%.*]] = select i1 [[X]], { i64, i64 } [[A]], { i64, i64 } [[B]]
1289; CHECK-NEXT:    store { i64, i64 } [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
1290; CHECK-NEXT:    ret { i64, i64 } [[C]]
1291;
1292; ORIGIN-LABEL: define { i64, i64 } @SelectStruct(
1293; ORIGIN-SAME: i1 zeroext [[X:%.*]], { i64, i64 } [[A:%.*]], { i64, i64 } [[B:%.*]]) #[[ATTR6:[0-9]+]] {
1294; ORIGIN-NEXT:  [[ENTRY:.*:]]
1295; ORIGIN-NEXT:    [[TMP0:%.*]] = load i1, ptr @__msan_param_tls, align 8
1296; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1297; ORIGIN-NEXT:    [[TMP2:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
1298; ORIGIN-NEXT:    [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
1299; ORIGIN-NEXT:    [[TMP4:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
1300; ORIGIN-NEXT:    [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
1301; ORIGIN-NEXT:    call void @llvm.donothing()
1302; ORIGIN-NEXT:    [[TMP6:%.*]] = select i1 [[X]], { i64, i64 } [[TMP2]], { i64, i64 } [[TMP4]]
1303; ORIGIN-NEXT:    [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 } [[TMP6]]
1304; ORIGIN-NEXT:    [[TMP7:%.*]] = select i1 [[X]], i32 [[TMP3]], i32 [[TMP5]]
1305; ORIGIN-NEXT:    [[TMP8:%.*]] = select i1 [[TMP0]], i32 [[TMP1]], i32 [[TMP7]]
1306; ORIGIN-NEXT:    [[C:%.*]] = select i1 [[X]], { i64, i64 } [[A]], { i64, i64 } [[B]]
1307; ORIGIN-NEXT:    store { i64, i64 } [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
1308; ORIGIN-NEXT:    store i32 [[TMP8]], ptr @__msan_retval_origin_tls, align 4
1309; ORIGIN-NEXT:    ret { i64, i64 } [[C]]
1310;
1311; CALLS-LABEL: define { i64, i64 } @SelectStruct(
1312; CALLS-SAME: i1 zeroext [[X:%.*]], { i64, i64 } [[A:%.*]], { i64, i64 } [[B:%.*]]) #[[ATTR6:[0-9]+]] {
1313; CALLS-NEXT:  [[ENTRY:.*:]]
1314; CALLS-NEXT:    [[TMP0:%.*]] = load i1, ptr @__msan_param_tls, align 8
1315; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1316; CALLS-NEXT:    [[TMP2:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
1317; CALLS-NEXT:    [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
1318; CALLS-NEXT:    [[TMP4:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
1319; CALLS-NEXT:    [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
1320; CALLS-NEXT:    call void @llvm.donothing()
1321; CALLS-NEXT:    [[TMP6:%.*]] = select i1 [[X]], { i64, i64 } [[TMP2]], { i64, i64 } [[TMP4]]
1322; CALLS-NEXT:    [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 } [[TMP6]]
1323; CALLS-NEXT:    [[TMP7:%.*]] = select i1 [[X]], i32 [[TMP3]], i32 [[TMP5]]
1324; CALLS-NEXT:    [[TMP8:%.*]] = select i1 [[TMP0]], i32 [[TMP1]], i32 [[TMP7]]
1325; CALLS-NEXT:    [[C:%.*]] = select i1 [[X]], { i64, i64 } [[A]], { i64, i64 } [[B]]
1326; CALLS-NEXT:    store { i64, i64 } [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
1327; CALLS-NEXT:    store i32 [[TMP8]], ptr @__msan_retval_origin_tls, align 4
1328; CALLS-NEXT:    ret { i64, i64 } [[C]]
1329;
1330entry:
1331  %c = select i1 %x, { i64, i64 } %a, { i64, i64 } %b
1332  ret { i64, i64 } %c
1333}
1334
1335define { ptr, double } @SelectStruct2(i1 zeroext %x, { ptr, double } %a, { ptr, double } %b) readnone sanitize_memory {
1336; CHECK-LABEL: define { ptr, double } @SelectStruct2(
1337; CHECK-SAME: i1 zeroext [[X:%.*]], { ptr, double } [[A:%.*]], { ptr, double } [[B:%.*]]) #[[ATTR6]] {
1338; CHECK-NEXT:  [[ENTRY:.*:]]
1339; CHECK-NEXT:    [[TMP0:%.*]] = load i1, ptr @__msan_param_tls, align 8
1340; CHECK-NEXT:    [[TMP1:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
1341; CHECK-NEXT:    [[TMP2:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
1342; CHECK-NEXT:    call void @llvm.donothing()
1343; CHECK-NEXT:    [[TMP3:%.*]] = select i1 [[X]], { i64, i64 } [[TMP1]], { i64, i64 } [[TMP2]]
1344; CHECK-NEXT:    [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 } [[TMP3]]
1345; CHECK-NEXT:    [[C:%.*]] = select i1 [[X]], { ptr, double } [[A]], { ptr, double } [[B]]
1346; CHECK-NEXT:    store { i64, i64 } [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
1347; CHECK-NEXT:    ret { ptr, double } [[C]]
1348;
1349; ORIGIN-LABEL: define { ptr, double } @SelectStruct2(
1350; ORIGIN-SAME: i1 zeroext [[X:%.*]], { ptr, double } [[A:%.*]], { ptr, double } [[B:%.*]]) #[[ATTR6]] {
1351; ORIGIN-NEXT:  [[ENTRY:.*:]]
1352; ORIGIN-NEXT:    [[TMP0:%.*]] = load i1, ptr @__msan_param_tls, align 8
1353; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1354; ORIGIN-NEXT:    [[TMP2:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
1355; ORIGIN-NEXT:    [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
1356; ORIGIN-NEXT:    [[TMP4:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
1357; ORIGIN-NEXT:    [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
1358; ORIGIN-NEXT:    call void @llvm.donothing()
1359; ORIGIN-NEXT:    [[TMP6:%.*]] = select i1 [[X]], { i64, i64 } [[TMP2]], { i64, i64 } [[TMP4]]
1360; ORIGIN-NEXT:    [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 } [[TMP6]]
1361; ORIGIN-NEXT:    [[TMP7:%.*]] = select i1 [[X]], i32 [[TMP3]], i32 [[TMP5]]
1362; ORIGIN-NEXT:    [[TMP8:%.*]] = select i1 [[TMP0]], i32 [[TMP1]], i32 [[TMP7]]
1363; ORIGIN-NEXT:    [[C:%.*]] = select i1 [[X]], { ptr, double } [[A]], { ptr, double } [[B]]
1364; ORIGIN-NEXT:    store { i64, i64 } [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
1365; ORIGIN-NEXT:    store i32 [[TMP8]], ptr @__msan_retval_origin_tls, align 4
1366; ORIGIN-NEXT:    ret { ptr, double } [[C]]
1367;
1368; CALLS-LABEL: define { ptr, double } @SelectStruct2(
1369; CALLS-SAME: i1 zeroext [[X:%.*]], { ptr, double } [[A:%.*]], { ptr, double } [[B:%.*]]) #[[ATTR6]] {
1370; CALLS-NEXT:  [[ENTRY:.*:]]
1371; CALLS-NEXT:    [[TMP0:%.*]] = load i1, ptr @__msan_param_tls, align 8
1372; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1373; CALLS-NEXT:    [[TMP2:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
1374; CALLS-NEXT:    [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
1375; CALLS-NEXT:    [[TMP4:%.*]] = load { i64, i64 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
1376; CALLS-NEXT:    [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
1377; CALLS-NEXT:    call void @llvm.donothing()
1378; CALLS-NEXT:    [[TMP6:%.*]] = select i1 [[X]], { i64, i64 } [[TMP2]], { i64, i64 } [[TMP4]]
1379; CALLS-NEXT:    [[_MSPROP_SELECT:%.*]] = select i1 [[TMP0]], { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 } [[TMP6]]
1380; CALLS-NEXT:    [[TMP7:%.*]] = select i1 [[X]], i32 [[TMP3]], i32 [[TMP5]]
1381; CALLS-NEXT:    [[TMP8:%.*]] = select i1 [[TMP0]], i32 [[TMP1]], i32 [[TMP7]]
1382; CALLS-NEXT:    [[C:%.*]] = select i1 [[X]], { ptr, double } [[A]], { ptr, double } [[B]]
1383; CALLS-NEXT:    store { i64, i64 } [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
1384; CALLS-NEXT:    store i32 [[TMP8]], ptr @__msan_retval_origin_tls, align 4
1385; CALLS-NEXT:    ret { ptr, double } [[C]]
1386;
1387entry:
1388  %c = select i1 %x, { ptr, double } %a, { ptr, double } %b
1389  ret { ptr, double } %c
1390}
1391
1392define ptr @IntToPtr(i64 %x) nounwind uwtable readnone sanitize_memory {
1393; CHECK-LABEL: define ptr @IntToPtr(
1394; CHECK-SAME: i64 [[X:%.*]]) #[[ATTR0]] {
1395; CHECK-NEXT:  [[ENTRY:.*:]]
1396; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
1397; CHECK-NEXT:    call void @llvm.donothing()
1398; CHECK-NEXT:    [[TMP1:%.*]] = inttoptr i64 [[X]] to ptr
1399; CHECK-NEXT:    store i64 [[TMP0]], ptr @__msan_retval_tls, align 8
1400; CHECK-NEXT:    ret ptr [[TMP1]]
1401;
1402; ORIGIN-LABEL: define ptr @IntToPtr(
1403; ORIGIN-SAME: i64 [[X:%.*]]) #[[ATTR0]] {
1404; ORIGIN-NEXT:  [[ENTRY:.*:]]
1405; ORIGIN-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
1406; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1407; ORIGIN-NEXT:    call void @llvm.donothing()
1408; ORIGIN-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[X]] to ptr
1409; ORIGIN-NEXT:    store i64 [[TMP0]], ptr @__msan_retval_tls, align 8
1410; ORIGIN-NEXT:    store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
1411; ORIGIN-NEXT:    ret ptr [[TMP2]]
1412;
1413; CALLS-LABEL: define ptr @IntToPtr(
1414; CALLS-SAME: i64 [[X:%.*]]) #[[ATTR0]] {
1415; CALLS-NEXT:  [[ENTRY:.*:]]
1416; CALLS-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
1417; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1418; CALLS-NEXT:    call void @llvm.donothing()
1419; CALLS-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[X]] to ptr
1420; CALLS-NEXT:    store i64 [[TMP0]], ptr @__msan_retval_tls, align 8
1421; CALLS-NEXT:    store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
1422; CALLS-NEXT:    ret ptr [[TMP2]]
1423;
1424entry:
1425  %0 = inttoptr i64 %x to ptr
1426  ret ptr %0
1427}
1428
1429define ptr @IntToPtr_ZExt(i16 %x) nounwind uwtable readnone sanitize_memory {
1430; CHECK-LABEL: define ptr @IntToPtr_ZExt(
1431; CHECK-SAME: i16 [[X:%.*]]) #[[ATTR0]] {
1432; CHECK-NEXT:  [[ENTRY:.*:]]
1433; CHECK-NEXT:    [[TMP0:%.*]] = load i16, ptr @__msan_param_tls, align 8
1434; CHECK-NEXT:    call void @llvm.donothing()
1435; CHECK-NEXT:    [[_MSPROP_INTTOPTR:%.*]] = zext i16 [[TMP0]] to i64
1436; CHECK-NEXT:    [[TMP1:%.*]] = inttoptr i16 [[X]] to ptr
1437; CHECK-NEXT:    store i64 [[_MSPROP_INTTOPTR]], ptr @__msan_retval_tls, align 8
1438; CHECK-NEXT:    ret ptr [[TMP1]]
1439;
1440; ORIGIN-LABEL: define ptr @IntToPtr_ZExt(
1441; ORIGIN-SAME: i16 [[X:%.*]]) #[[ATTR0]] {
1442; ORIGIN-NEXT:  [[ENTRY:.*:]]
1443; ORIGIN-NEXT:    [[TMP0:%.*]] = load i16, ptr @__msan_param_tls, align 8
1444; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1445; ORIGIN-NEXT:    call void @llvm.donothing()
1446; ORIGIN-NEXT:    [[_MSPROP_INTTOPTR:%.*]] = zext i16 [[TMP0]] to i64
1447; ORIGIN-NEXT:    [[TMP2:%.*]] = inttoptr i16 [[X]] to ptr
1448; ORIGIN-NEXT:    store i64 [[_MSPROP_INTTOPTR]], ptr @__msan_retval_tls, align 8
1449; ORIGIN-NEXT:    store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
1450; ORIGIN-NEXT:    ret ptr [[TMP2]]
1451;
1452; CALLS-LABEL: define ptr @IntToPtr_ZExt(
1453; CALLS-SAME: i16 [[X:%.*]]) #[[ATTR0]] {
1454; CALLS-NEXT:  [[ENTRY:.*:]]
1455; CALLS-NEXT:    [[TMP0:%.*]] = load i16, ptr @__msan_param_tls, align 8
1456; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1457; CALLS-NEXT:    call void @llvm.donothing()
1458; CALLS-NEXT:    [[_MSPROP_INTTOPTR:%.*]] = zext i16 [[TMP0]] to i64
1459; CALLS-NEXT:    [[TMP2:%.*]] = inttoptr i16 [[X]] to ptr
1460; CALLS-NEXT:    store i64 [[_MSPROP_INTTOPTR]], ptr @__msan_retval_tls, align 8
1461; CALLS-NEXT:    store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
1462; CALLS-NEXT:    ret ptr [[TMP2]]
1463;
1464entry:
1465  %0 = inttoptr i16 %x to ptr
1466  ret ptr %0
1467}
1468
1469
1470
1471; Check that we insert exactly one check on udiv
1472; (2nd arg shadow is checked, 1st arg shadow is propagated)
1473
1474define i32 @Div(i32 %a, i32 %b) nounwind uwtable readnone sanitize_memory {
1475; CHECK-LABEL: define i32 @Div(
1476; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
1477; CHECK-NEXT:  [[ENTRY:.*:]]
1478; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
1479; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
1480; CHECK-NEXT:    call void @llvm.donothing()
1481; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP0]], 0
1482; CHECK-NEXT:    br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]]
1483; CHECK:       [[BB2]]:
1484; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR12]]
1485; CHECK-NEXT:    unreachable
1486; CHECK:       [[BB3]]:
1487; CHECK-NEXT:    [[DIV:%.*]] = udiv i32 [[A]], [[B]]
1488; CHECK-NEXT:    store i32 [[TMP1]], ptr @__msan_retval_tls, align 8
1489; CHECK-NEXT:    ret i32 [[DIV]]
1490;
1491; ORIGIN-LABEL: define i32 @Div(
1492; ORIGIN-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
1493; ORIGIN-NEXT:  [[ENTRY:.*:]]
1494; ORIGIN-NEXT:    [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
1495; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
1496; ORIGIN-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8
1497; ORIGIN-NEXT:    [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1498; ORIGIN-NEXT:    call void @llvm.donothing()
1499; ORIGIN-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP0]], 0
1500; ORIGIN-NEXT:    br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
1501; ORIGIN:       [[BB4]]:
1502; ORIGIN-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP1]]) #[[ATTR12]]
1503; ORIGIN-NEXT:    unreachable
1504; ORIGIN:       [[BB5]]:
1505; ORIGIN-NEXT:    [[DIV:%.*]] = udiv i32 [[A]], [[B]]
1506; ORIGIN-NEXT:    store i32 [[TMP2]], ptr @__msan_retval_tls, align 8
1507; ORIGIN-NEXT:    store i32 [[TMP3]], ptr @__msan_retval_origin_tls, align 4
1508; ORIGIN-NEXT:    ret i32 [[DIV]]
1509;
1510; CALLS-LABEL: define i32 @Div(
1511; CALLS-SAME: i32 [[A:%.*]], i32 [[B:%.*]]) #[[ATTR0]] {
1512; CALLS-NEXT:  [[ENTRY:.*:]]
1513; CALLS-NEXT:    [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
1514; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
1515; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_tls, align 8
1516; CALLS-NEXT:    [[TMP3:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1517; CALLS-NEXT:    call void @llvm.donothing()
1518; CALLS-NEXT:    call void @__msan_maybe_warning_4(i32 zeroext [[TMP0]], i32 zeroext [[TMP1]])
1519; CALLS-NEXT:    [[DIV:%.*]] = udiv i32 [[A]], [[B]]
1520; CALLS-NEXT:    store i32 [[TMP2]], ptr @__msan_retval_tls, align 8
1521; CALLS-NEXT:    store i32 [[TMP3]], ptr @__msan_retval_origin_tls, align 4
1522; CALLS-NEXT:    ret i32 [[DIV]]
1523;
1524entry:
1525  %div = udiv i32 %a, %b
1526  ret i32 %div
1527}
1528
1529; Check that fdiv, unlike udiv, simply propagates shadow.
1530
1531define float @FDiv(float %a, float %b) nounwind uwtable readnone sanitize_memory {
1532; CHECK-LABEL: define float @FDiv(
1533; CHECK-SAME: float [[A:%.*]], float [[B:%.*]]) #[[ATTR0]] {
1534; CHECK-NEXT:  [[ENTRY:.*:]]
1535; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8
1536; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
1537; CHECK-NEXT:    call void @llvm.donothing()
1538; CHECK-NEXT:    [[_MSPROP:%.*]] = or i32 [[TMP0]], [[TMP1]]
1539; CHECK-NEXT:    [[C:%.*]] = fdiv float [[A]], [[B]]
1540; CHECK-NEXT:    store i32 [[_MSPROP]], ptr @__msan_retval_tls, align 8
1541; CHECK-NEXT:    ret float [[C]]
1542;
1543; ORIGIN-LABEL: define float @FDiv(
1544; ORIGIN-SAME: float [[A:%.*]], float [[B:%.*]]) #[[ATTR0]] {
1545; ORIGIN-NEXT:  [[ENTRY:.*:]]
1546; ORIGIN-NEXT:    [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8
1547; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1548; ORIGIN-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
1549; ORIGIN-NEXT:    [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
1550; ORIGIN-NEXT:    call void @llvm.donothing()
1551; ORIGIN-NEXT:    [[_MSPROP:%.*]] = or i32 [[TMP0]], [[TMP2]]
1552; ORIGIN-NEXT:    [[TMP4:%.*]] = icmp ne i32 [[TMP2]], 0
1553; ORIGIN-NEXT:    [[TMP5:%.*]] = select i1 [[TMP4]], i32 [[TMP3]], i32 [[TMP1]]
1554; ORIGIN-NEXT:    [[C:%.*]] = fdiv float [[A]], [[B]]
1555; ORIGIN-NEXT:    store i32 [[_MSPROP]], ptr @__msan_retval_tls, align 8
1556; ORIGIN-NEXT:    store i32 [[TMP5]], ptr @__msan_retval_origin_tls, align 4
1557; ORIGIN-NEXT:    ret float [[C]]
1558;
1559; CALLS-LABEL: define float @FDiv(
1560; CALLS-SAME: float [[A:%.*]], float [[B:%.*]]) #[[ATTR0]] {
1561; CALLS-NEXT:  [[ENTRY:.*:]]
1562; CALLS-NEXT:    [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8
1563; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1564; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
1565; CALLS-NEXT:    [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
1566; CALLS-NEXT:    call void @llvm.donothing()
1567; CALLS-NEXT:    [[_MSPROP:%.*]] = or i32 [[TMP0]], [[TMP2]]
1568; CALLS-NEXT:    [[TMP4:%.*]] = icmp ne i32 [[TMP2]], 0
1569; CALLS-NEXT:    [[TMP5:%.*]] = select i1 [[TMP4]], i32 [[TMP3]], i32 [[TMP1]]
1570; CALLS-NEXT:    [[C:%.*]] = fdiv float [[A]], [[B]]
1571; CALLS-NEXT:    store i32 [[_MSPROP]], ptr @__msan_retval_tls, align 8
1572; CALLS-NEXT:    store i32 [[TMP5]], ptr @__msan_retval_origin_tls, align 4
1573; CALLS-NEXT:    ret float [[C]]
1574;
1575entry:
1576  %c = fdiv float %a, %b
1577  ret float %c
1578}
1579
1580
1581; Check that fneg simply propagates shadow.
1582
1583define float @FNeg(float %a) nounwind uwtable readnone sanitize_memory {
1584; CHECK-LABEL: define float @FNeg(
1585; CHECK-SAME: float [[A:%.*]]) #[[ATTR0]] {
1586; CHECK-NEXT:  [[ENTRY:.*:]]
1587; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8
1588; CHECK-NEXT:    call void @llvm.donothing()
1589; CHECK-NEXT:    [[C:%.*]] = fneg float [[A]]
1590; CHECK-NEXT:    store i32 [[TMP0]], ptr @__msan_retval_tls, align 8
1591; CHECK-NEXT:    ret float [[C]]
1592;
1593; ORIGIN-LABEL: define float @FNeg(
1594; ORIGIN-SAME: float [[A:%.*]]) #[[ATTR0]] {
1595; ORIGIN-NEXT:  [[ENTRY:.*:]]
1596; ORIGIN-NEXT:    [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8
1597; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1598; ORIGIN-NEXT:    call void @llvm.donothing()
1599; ORIGIN-NEXT:    [[C:%.*]] = fneg float [[A]]
1600; ORIGIN-NEXT:    store i32 [[TMP0]], ptr @__msan_retval_tls, align 8
1601; ORIGIN-NEXT:    store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
1602; ORIGIN-NEXT:    ret float [[C]]
1603;
1604; CALLS-LABEL: define float @FNeg(
1605; CALLS-SAME: float [[A:%.*]]) #[[ATTR0]] {
1606; CALLS-NEXT:  [[ENTRY:.*:]]
1607; CALLS-NEXT:    [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8
1608; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1609; CALLS-NEXT:    call void @llvm.donothing()
1610; CALLS-NEXT:    [[C:%.*]] = fneg float [[A]]
1611; CALLS-NEXT:    store i32 [[TMP0]], ptr @__msan_retval_tls, align 8
1612; CALLS-NEXT:    store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
1613; CALLS-NEXT:    ret float [[C]]
1614;
1615entry:
1616  %c = fneg float %a
1617  ret float %c
1618}
1619
1620define zeroext i1 @ICmpSLTZero(i32 %x) nounwind uwtable readnone sanitize_memory {
1621; CHECK-LABEL: define zeroext i1 @ICmpSLTZero(
1622; CHECK-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
1623; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
1624; CHECK-NEXT:    call void @llvm.donothing()
1625; CHECK-NEXT:    [[TMP2:%.*]] = xor i32 [[X]], -2147483648
1626; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1]], -1
1627; CHECK-NEXT:    [[TMP4:%.*]] = and i32 [[TMP2]], [[TMP3]]
1628; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP2]], [[TMP1]]
1629; CHECK-NEXT:    [[TMP8:%.*]] = icmp ult i32 [[TMP4]], -2147483648
1630; CHECK-NEXT:    [[TMP15:%.*]] = icmp ult i32 [[TMP5]], -2147483648
1631; CHECK-NEXT:    [[TMP16:%.*]] = xor i1 [[TMP8]], [[TMP15]]
1632; CHECK-NEXT:    [[TMP17:%.*]] = icmp slt i32 [[X]], 0
1633; CHECK-NEXT:    store i1 [[TMP16]], ptr @__msan_retval_tls, align 8
1634; CHECK-NEXT:    ret i1 [[TMP17]]
1635;
1636; ORIGIN-LABEL: define zeroext i1 @ICmpSLTZero(
1637; ORIGIN-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
1638; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
1639; ORIGIN-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1640; ORIGIN-NEXT:    call void @llvm.donothing()
1641; ORIGIN-NEXT:    [[TMP3:%.*]] = xor i32 [[X]], -2147483648
1642; ORIGIN-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP1]], -1
1643; ORIGIN-NEXT:    [[TMP5:%.*]] = and i32 [[TMP3]], [[TMP4]]
1644; ORIGIN-NEXT:    [[TMP6:%.*]] = or i32 [[TMP3]], [[TMP1]]
1645; ORIGIN-NEXT:    [[TMP9:%.*]] = icmp ult i32 [[TMP5]], -2147483648
1646; ORIGIN-NEXT:    [[TMP16:%.*]] = icmp ult i32 [[TMP6]], -2147483648
1647; ORIGIN-NEXT:    [[TMP17:%.*]] = xor i1 [[TMP9]], [[TMP16]]
1648; ORIGIN-NEXT:    [[TMP18:%.*]] = icmp slt i32 [[X]], 0
1649; ORIGIN-NEXT:    store i1 [[TMP17]], ptr @__msan_retval_tls, align 8
1650; ORIGIN-NEXT:    store i32 [[TMP2]], ptr @__msan_retval_origin_tls, align 4
1651; ORIGIN-NEXT:    ret i1 [[TMP18]]
1652;
1653; CALLS-LABEL: define zeroext i1 @ICmpSLTZero(
1654; CALLS-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
1655; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
1656; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1657; CALLS-NEXT:    call void @llvm.donothing()
1658; CALLS-NEXT:    [[TMP3:%.*]] = xor i32 [[X]], -2147483648
1659; CALLS-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP1]], -1
1660; CALLS-NEXT:    [[TMP5:%.*]] = and i32 [[TMP3]], [[TMP4]]
1661; CALLS-NEXT:    [[TMP6:%.*]] = or i32 [[TMP3]], [[TMP1]]
1662; CALLS-NEXT:    [[TMP9:%.*]] = icmp ult i32 [[TMP5]], -2147483648
1663; CALLS-NEXT:    [[TMP16:%.*]] = icmp ult i32 [[TMP6]], -2147483648
1664; CALLS-NEXT:    [[TMP17:%.*]] = xor i1 [[TMP9]], [[TMP16]]
1665; CALLS-NEXT:    [[TMP18:%.*]] = icmp slt i32 [[X]], 0
1666; CALLS-NEXT:    store i1 [[TMP17]], ptr @__msan_retval_tls, align 8
1667; CALLS-NEXT:    store i32 [[TMP2]], ptr @__msan_retval_origin_tls, align 4
1668; CALLS-NEXT:    ret i1 [[TMP18]]
1669;
1670  %1 = icmp slt i32 %x, 0
1671  ret i1 %1
1672}
1673
1674
1675define zeroext i1 @ICmpSGEZero(i32 %x) nounwind uwtable readnone sanitize_memory {
1676; CHECK-LABEL: define zeroext i1 @ICmpSGEZero(
1677; CHECK-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
1678; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
1679; CHECK-NEXT:    call void @llvm.donothing()
1680; CHECK-NEXT:    [[TMP2:%.*]] = xor i32 [[X]], -2147483648
1681; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1]], -1
1682; CHECK-NEXT:    [[TMP4:%.*]] = and i32 [[TMP2]], [[TMP3]]
1683; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP2]], [[TMP1]]
1684; CHECK-NEXT:    [[TMP8:%.*]] = icmp uge i32 [[TMP4]], -2147483648
1685; CHECK-NEXT:    [[TMP15:%.*]] = icmp uge i32 [[TMP5]], -2147483648
1686; CHECK-NEXT:    [[TMP16:%.*]] = xor i1 [[TMP8]], [[TMP15]]
1687; CHECK-NEXT:    [[TMP17:%.*]] = icmp sge i32 [[X]], 0
1688; CHECK-NEXT:    store i1 [[TMP16]], ptr @__msan_retval_tls, align 8
1689; CHECK-NEXT:    ret i1 [[TMP17]]
1690;
1691; ORIGIN-LABEL: define zeroext i1 @ICmpSGEZero(
1692; ORIGIN-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
1693; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
1694; ORIGIN-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1695; ORIGIN-NEXT:    call void @llvm.donothing()
1696; ORIGIN-NEXT:    [[TMP3:%.*]] = xor i32 [[X]], -2147483648
1697; ORIGIN-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP1]], -1
1698; ORIGIN-NEXT:    [[TMP5:%.*]] = and i32 [[TMP3]], [[TMP4]]
1699; ORIGIN-NEXT:    [[TMP6:%.*]] = or i32 [[TMP3]], [[TMP1]]
1700; ORIGIN-NEXT:    [[TMP9:%.*]] = icmp uge i32 [[TMP5]], -2147483648
1701; ORIGIN-NEXT:    [[TMP16:%.*]] = icmp uge i32 [[TMP6]], -2147483648
1702; ORIGIN-NEXT:    [[TMP17:%.*]] = xor i1 [[TMP9]], [[TMP16]]
1703; ORIGIN-NEXT:    [[TMP18:%.*]] = icmp sge i32 [[X]], 0
1704; ORIGIN-NEXT:    store i1 [[TMP17]], ptr @__msan_retval_tls, align 8
1705; ORIGIN-NEXT:    store i32 [[TMP2]], ptr @__msan_retval_origin_tls, align 4
1706; ORIGIN-NEXT:    ret i1 [[TMP18]]
1707;
1708; CALLS-LABEL: define zeroext i1 @ICmpSGEZero(
1709; CALLS-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
1710; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
1711; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1712; CALLS-NEXT:    call void @llvm.donothing()
1713; CALLS-NEXT:    [[TMP3:%.*]] = xor i32 [[X]], -2147483648
1714; CALLS-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP1]], -1
1715; CALLS-NEXT:    [[TMP5:%.*]] = and i32 [[TMP3]], [[TMP4]]
1716; CALLS-NEXT:    [[TMP6:%.*]] = or i32 [[TMP3]], [[TMP1]]
1717; CALLS-NEXT:    [[TMP9:%.*]] = icmp uge i32 [[TMP5]], -2147483648
1718; CALLS-NEXT:    [[TMP16:%.*]] = icmp uge i32 [[TMP6]], -2147483648
1719; CALLS-NEXT:    [[TMP17:%.*]] = xor i1 [[TMP9]], [[TMP16]]
1720; CALLS-NEXT:    [[TMP18:%.*]] = icmp sge i32 [[X]], 0
1721; CALLS-NEXT:    store i1 [[TMP17]], ptr @__msan_retval_tls, align 8
1722; CALLS-NEXT:    store i32 [[TMP2]], ptr @__msan_retval_origin_tls, align 4
1723; CALLS-NEXT:    ret i1 [[TMP18]]
1724;
1725  %1 = icmp sge i32 %x, 0
1726  ret i1 %1
1727}
1728
1729
1730define zeroext i1 @ICmpSGTZero(i32 %x) nounwind uwtable readnone sanitize_memory {
1731; CHECK-LABEL: define zeroext i1 @ICmpSGTZero(
1732; CHECK-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
1733; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
1734; CHECK-NEXT:    call void @llvm.donothing()
1735; CHECK-NEXT:    [[TMP2:%.*]] = xor i32 [[X]], -2147483648
1736; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1]], -1
1737; CHECK-NEXT:    [[TMP4:%.*]] = and i32 [[TMP2]], [[TMP3]]
1738; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP2]], [[TMP1]]
1739; CHECK-NEXT:    [[TMP8:%.*]] = icmp ugt i32 -2147483648, [[TMP5]]
1740; CHECK-NEXT:    [[TMP15:%.*]] = icmp ugt i32 -2147483648, [[TMP4]]
1741; CHECK-NEXT:    [[TMP16:%.*]] = xor i1 [[TMP8]], [[TMP15]]
1742; CHECK-NEXT:    [[TMP17:%.*]] = icmp sgt i32 0, [[X]]
1743; CHECK-NEXT:    store i1 [[TMP16]], ptr @__msan_retval_tls, align 8
1744; CHECK-NEXT:    ret i1 [[TMP17]]
1745;
1746; ORIGIN-LABEL: define zeroext i1 @ICmpSGTZero(
1747; ORIGIN-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
1748; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
1749; ORIGIN-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1750; ORIGIN-NEXT:    call void @llvm.donothing()
1751; ORIGIN-NEXT:    [[TMP3:%.*]] = xor i32 [[X]], -2147483648
1752; ORIGIN-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP1]], -1
1753; ORIGIN-NEXT:    [[TMP5:%.*]] = and i32 [[TMP3]], [[TMP4]]
1754; ORIGIN-NEXT:    [[TMP6:%.*]] = or i32 [[TMP3]], [[TMP1]]
1755; ORIGIN-NEXT:    [[TMP9:%.*]] = icmp ugt i32 -2147483648, [[TMP6]]
1756; ORIGIN-NEXT:    [[TMP16:%.*]] = icmp ugt i32 -2147483648, [[TMP5]]
1757; ORIGIN-NEXT:    [[TMP17:%.*]] = xor i1 [[TMP9]], [[TMP16]]
1758; ORIGIN-NEXT:    [[TMP18:%.*]] = icmp ne i32 [[TMP1]], 0
1759; ORIGIN-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP2]], i32 0
1760; ORIGIN-NEXT:    [[TMP20:%.*]] = icmp sgt i32 0, [[X]]
1761; ORIGIN-NEXT:    store i1 [[TMP17]], ptr @__msan_retval_tls, align 8
1762; ORIGIN-NEXT:    store i32 [[TMP19]], ptr @__msan_retval_origin_tls, align 4
1763; ORIGIN-NEXT:    ret i1 [[TMP20]]
1764;
1765; CALLS-LABEL: define zeroext i1 @ICmpSGTZero(
1766; CALLS-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
1767; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
1768; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1769; CALLS-NEXT:    call void @llvm.donothing()
1770; CALLS-NEXT:    [[TMP3:%.*]] = xor i32 [[X]], -2147483648
1771; CALLS-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP1]], -1
1772; CALLS-NEXT:    [[TMP5:%.*]] = and i32 [[TMP3]], [[TMP4]]
1773; CALLS-NEXT:    [[TMP6:%.*]] = or i32 [[TMP3]], [[TMP1]]
1774; CALLS-NEXT:    [[TMP9:%.*]] = icmp ugt i32 -2147483648, [[TMP6]]
1775; CALLS-NEXT:    [[TMP16:%.*]] = icmp ugt i32 -2147483648, [[TMP5]]
1776; CALLS-NEXT:    [[TMP17:%.*]] = xor i1 [[TMP9]], [[TMP16]]
1777; CALLS-NEXT:    [[TMP18:%.*]] = icmp ne i32 [[TMP1]], 0
1778; CALLS-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP2]], i32 0
1779; CALLS-NEXT:    [[TMP20:%.*]] = icmp sgt i32 0, [[X]]
1780; CALLS-NEXT:    store i1 [[TMP17]], ptr @__msan_retval_tls, align 8
1781; CALLS-NEXT:    store i32 [[TMP19]], ptr @__msan_retval_origin_tls, align 4
1782; CALLS-NEXT:    ret i1 [[TMP20]]
1783;
1784  %1 = icmp sgt i32 0, %x
1785  ret i1 %1
1786}
1787
1788
1789define zeroext i1 @ICmpSLEZero(i32 %x) nounwind uwtable readnone sanitize_memory {
1790; CHECK-LABEL: define zeroext i1 @ICmpSLEZero(
1791; CHECK-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
1792; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
1793; CHECK-NEXT:    call void @llvm.donothing()
1794; CHECK-NEXT:    [[TMP2:%.*]] = xor i32 [[X]], -2147483648
1795; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1]], -1
1796; CHECK-NEXT:    [[TMP4:%.*]] = and i32 [[TMP2]], [[TMP3]]
1797; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP2]], [[TMP1]]
1798; CHECK-NEXT:    [[TMP8:%.*]] = icmp ule i32 -2147483648, [[TMP5]]
1799; CHECK-NEXT:    [[TMP15:%.*]] = icmp ule i32 -2147483648, [[TMP4]]
1800; CHECK-NEXT:    [[TMP16:%.*]] = xor i1 [[TMP8]], [[TMP15]]
1801; CHECK-NEXT:    [[TMP17:%.*]] = icmp sle i32 0, [[X]]
1802; CHECK-NEXT:    store i1 [[TMP16]], ptr @__msan_retval_tls, align 8
1803; CHECK-NEXT:    ret i1 [[TMP17]]
1804;
1805; ORIGIN-LABEL: define zeroext i1 @ICmpSLEZero(
1806; ORIGIN-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
1807; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
1808; ORIGIN-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1809; ORIGIN-NEXT:    call void @llvm.donothing()
1810; ORIGIN-NEXT:    [[TMP3:%.*]] = xor i32 [[X]], -2147483648
1811; ORIGIN-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP1]], -1
1812; ORIGIN-NEXT:    [[TMP5:%.*]] = and i32 [[TMP3]], [[TMP4]]
1813; ORIGIN-NEXT:    [[TMP6:%.*]] = or i32 [[TMP3]], [[TMP1]]
1814; ORIGIN-NEXT:    [[TMP9:%.*]] = icmp ule i32 -2147483648, [[TMP6]]
1815; ORIGIN-NEXT:    [[TMP16:%.*]] = icmp ule i32 -2147483648, [[TMP5]]
1816; ORIGIN-NEXT:    [[TMP17:%.*]] = xor i1 [[TMP9]], [[TMP16]]
1817; ORIGIN-NEXT:    [[TMP18:%.*]] = icmp ne i32 [[TMP1]], 0
1818; ORIGIN-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP2]], i32 0
1819; ORIGIN-NEXT:    [[TMP20:%.*]] = icmp sle i32 0, [[X]]
1820; ORIGIN-NEXT:    store i1 [[TMP17]], ptr @__msan_retval_tls, align 8
1821; ORIGIN-NEXT:    store i32 [[TMP19]], ptr @__msan_retval_origin_tls, align 4
1822; ORIGIN-NEXT:    ret i1 [[TMP20]]
1823;
1824; CALLS-LABEL: define zeroext i1 @ICmpSLEZero(
1825; CALLS-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
1826; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
1827; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1828; CALLS-NEXT:    call void @llvm.donothing()
1829; CALLS-NEXT:    [[TMP3:%.*]] = xor i32 [[X]], -2147483648
1830; CALLS-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP1]], -1
1831; CALLS-NEXT:    [[TMP5:%.*]] = and i32 [[TMP3]], [[TMP4]]
1832; CALLS-NEXT:    [[TMP6:%.*]] = or i32 [[TMP3]], [[TMP1]]
1833; CALLS-NEXT:    [[TMP9:%.*]] = icmp ule i32 -2147483648, [[TMP6]]
1834; CALLS-NEXT:    [[TMP16:%.*]] = icmp ule i32 -2147483648, [[TMP5]]
1835; CALLS-NEXT:    [[TMP17:%.*]] = xor i1 [[TMP9]], [[TMP16]]
1836; CALLS-NEXT:    [[TMP18:%.*]] = icmp ne i32 [[TMP1]], 0
1837; CALLS-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP2]], i32 0
1838; CALLS-NEXT:    [[TMP20:%.*]] = icmp sle i32 0, [[X]]
1839; CALLS-NEXT:    store i1 [[TMP17]], ptr @__msan_retval_tls, align 8
1840; CALLS-NEXT:    store i32 [[TMP19]], ptr @__msan_retval_origin_tls, align 4
1841; CALLS-NEXT:    ret i1 [[TMP20]]
1842;
1843  %1 = icmp sle i32 0, %x
1844  ret i1 %1
1845}
1846
1847
1848
1849; Check that we propagate shadow for x<=-1, x>-1, etc (i.e. sign bit tests)
1850
1851define zeroext i1 @ICmpSLTAllOnes(i32 %x) nounwind uwtable readnone sanitize_memory {
1852; CHECK-LABEL: define zeroext i1 @ICmpSLTAllOnes(
1853; CHECK-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
1854; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
1855; CHECK-NEXT:    call void @llvm.donothing()
1856; CHECK-NEXT:    [[TMP2:%.*]] = xor i32 [[X]], -2147483648
1857; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1]], -1
1858; CHECK-NEXT:    [[TMP4:%.*]] = and i32 [[TMP2]], [[TMP3]]
1859; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP2]], [[TMP1]]
1860; CHECK-NEXT:    [[TMP8:%.*]] = icmp ult i32 2147483647, [[TMP5]]
1861; CHECK-NEXT:    [[TMP15:%.*]] = icmp ult i32 2147483647, [[TMP4]]
1862; CHECK-NEXT:    [[TMP16:%.*]] = xor i1 [[TMP8]], [[TMP15]]
1863; CHECK-NEXT:    [[TMP17:%.*]] = icmp slt i32 -1, [[X]]
1864; CHECK-NEXT:    store i1 [[TMP16]], ptr @__msan_retval_tls, align 8
1865; CHECK-NEXT:    ret i1 [[TMP17]]
1866;
1867; ORIGIN-LABEL: define zeroext i1 @ICmpSLTAllOnes(
1868; ORIGIN-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
1869; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
1870; ORIGIN-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1871; ORIGIN-NEXT:    call void @llvm.donothing()
1872; ORIGIN-NEXT:    [[TMP3:%.*]] = xor i32 [[X]], -2147483648
1873; ORIGIN-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP1]], -1
1874; ORIGIN-NEXT:    [[TMP5:%.*]] = and i32 [[TMP3]], [[TMP4]]
1875; ORIGIN-NEXT:    [[TMP6:%.*]] = or i32 [[TMP3]], [[TMP1]]
1876; ORIGIN-NEXT:    [[TMP9:%.*]] = icmp ult i32 2147483647, [[TMP6]]
1877; ORIGIN-NEXT:    [[TMP16:%.*]] = icmp ult i32 2147483647, [[TMP5]]
1878; ORIGIN-NEXT:    [[TMP17:%.*]] = xor i1 [[TMP9]], [[TMP16]]
1879; ORIGIN-NEXT:    [[TMP18:%.*]] = icmp ne i32 [[TMP1]], 0
1880; ORIGIN-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP2]], i32 0
1881; ORIGIN-NEXT:    [[TMP20:%.*]] = icmp slt i32 -1, [[X]]
1882; ORIGIN-NEXT:    store i1 [[TMP17]], ptr @__msan_retval_tls, align 8
1883; ORIGIN-NEXT:    store i32 [[TMP19]], ptr @__msan_retval_origin_tls, align 4
1884; ORIGIN-NEXT:    ret i1 [[TMP20]]
1885;
1886; CALLS-LABEL: define zeroext i1 @ICmpSLTAllOnes(
1887; CALLS-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
1888; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
1889; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1890; CALLS-NEXT:    call void @llvm.donothing()
1891; CALLS-NEXT:    [[TMP3:%.*]] = xor i32 [[X]], -2147483648
1892; CALLS-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP1]], -1
1893; CALLS-NEXT:    [[TMP5:%.*]] = and i32 [[TMP3]], [[TMP4]]
1894; CALLS-NEXT:    [[TMP6:%.*]] = or i32 [[TMP3]], [[TMP1]]
1895; CALLS-NEXT:    [[TMP9:%.*]] = icmp ult i32 2147483647, [[TMP6]]
1896; CALLS-NEXT:    [[TMP16:%.*]] = icmp ult i32 2147483647, [[TMP5]]
1897; CALLS-NEXT:    [[TMP17:%.*]] = xor i1 [[TMP9]], [[TMP16]]
1898; CALLS-NEXT:    [[TMP18:%.*]] = icmp ne i32 [[TMP1]], 0
1899; CALLS-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP2]], i32 0
1900; CALLS-NEXT:    [[TMP20:%.*]] = icmp slt i32 -1, [[X]]
1901; CALLS-NEXT:    store i1 [[TMP17]], ptr @__msan_retval_tls, align 8
1902; CALLS-NEXT:    store i32 [[TMP19]], ptr @__msan_retval_origin_tls, align 4
1903; CALLS-NEXT:    ret i1 [[TMP20]]
1904;
1905  %1 = icmp slt i32 -1, %x
1906  ret i1 %1
1907}
1908
1909
1910define zeroext i1 @ICmpSGEAllOnes(i32 %x) nounwind uwtable readnone sanitize_memory {
1911; CHECK-LABEL: define zeroext i1 @ICmpSGEAllOnes(
1912; CHECK-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
1913; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
1914; CHECK-NEXT:    call void @llvm.donothing()
1915; CHECK-NEXT:    [[TMP2:%.*]] = xor i32 [[X]], -2147483648
1916; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1]], -1
1917; CHECK-NEXT:    [[TMP4:%.*]] = and i32 [[TMP2]], [[TMP3]]
1918; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP2]], [[TMP1]]
1919; CHECK-NEXT:    [[TMP8:%.*]] = icmp uge i32 2147483647, [[TMP5]]
1920; CHECK-NEXT:    [[TMP15:%.*]] = icmp uge i32 2147483647, [[TMP4]]
1921; CHECK-NEXT:    [[TMP16:%.*]] = xor i1 [[TMP8]], [[TMP15]]
1922; CHECK-NEXT:    [[TMP17:%.*]] = icmp sge i32 -1, [[X]]
1923; CHECK-NEXT:    store i1 [[TMP16]], ptr @__msan_retval_tls, align 8
1924; CHECK-NEXT:    ret i1 [[TMP17]]
1925;
1926; ORIGIN-LABEL: define zeroext i1 @ICmpSGEAllOnes(
1927; ORIGIN-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
1928; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
1929; ORIGIN-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1930; ORIGIN-NEXT:    call void @llvm.donothing()
1931; ORIGIN-NEXT:    [[TMP3:%.*]] = xor i32 [[X]], -2147483648
1932; ORIGIN-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP1]], -1
1933; ORIGIN-NEXT:    [[TMP5:%.*]] = and i32 [[TMP3]], [[TMP4]]
1934; ORIGIN-NEXT:    [[TMP6:%.*]] = or i32 [[TMP3]], [[TMP1]]
1935; ORIGIN-NEXT:    [[TMP9:%.*]] = icmp uge i32 2147483647, [[TMP6]]
1936; ORIGIN-NEXT:    [[TMP16:%.*]] = icmp uge i32 2147483647, [[TMP5]]
1937; ORIGIN-NEXT:    [[TMP17:%.*]] = xor i1 [[TMP9]], [[TMP16]]
1938; ORIGIN-NEXT:    [[TMP18:%.*]] = icmp ne i32 [[TMP1]], 0
1939; ORIGIN-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP2]], i32 0
1940; ORIGIN-NEXT:    [[TMP20:%.*]] = icmp sge i32 -1, [[X]]
1941; ORIGIN-NEXT:    store i1 [[TMP17]], ptr @__msan_retval_tls, align 8
1942; ORIGIN-NEXT:    store i32 [[TMP19]], ptr @__msan_retval_origin_tls, align 4
1943; ORIGIN-NEXT:    ret i1 [[TMP20]]
1944;
1945; CALLS-LABEL: define zeroext i1 @ICmpSGEAllOnes(
1946; CALLS-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
1947; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
1948; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1949; CALLS-NEXT:    call void @llvm.donothing()
1950; CALLS-NEXT:    [[TMP3:%.*]] = xor i32 [[X]], -2147483648
1951; CALLS-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP1]], -1
1952; CALLS-NEXT:    [[TMP5:%.*]] = and i32 [[TMP3]], [[TMP4]]
1953; CALLS-NEXT:    [[TMP6:%.*]] = or i32 [[TMP3]], [[TMP1]]
1954; CALLS-NEXT:    [[TMP9:%.*]] = icmp uge i32 2147483647, [[TMP6]]
1955; CALLS-NEXT:    [[TMP16:%.*]] = icmp uge i32 2147483647, [[TMP5]]
1956; CALLS-NEXT:    [[TMP17:%.*]] = xor i1 [[TMP9]], [[TMP16]]
1957; CALLS-NEXT:    [[TMP18:%.*]] = icmp ne i32 [[TMP1]], 0
1958; CALLS-NEXT:    [[TMP19:%.*]] = select i1 [[TMP18]], i32 [[TMP2]], i32 0
1959; CALLS-NEXT:    [[TMP20:%.*]] = icmp sge i32 -1, [[X]]
1960; CALLS-NEXT:    store i1 [[TMP17]], ptr @__msan_retval_tls, align 8
1961; CALLS-NEXT:    store i32 [[TMP19]], ptr @__msan_retval_origin_tls, align 4
1962; CALLS-NEXT:    ret i1 [[TMP20]]
1963;
1964  %1 = icmp sge i32 -1, %x
1965  ret i1 %1
1966}
1967
1968
1969define zeroext i1 @ICmpSGTAllOnes(i32 %x) nounwind uwtable readnone sanitize_memory {
1970; CHECK-LABEL: define zeroext i1 @ICmpSGTAllOnes(
1971; CHECK-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
1972; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
1973; CHECK-NEXT:    call void @llvm.donothing()
1974; CHECK-NEXT:    [[TMP2:%.*]] = xor i32 [[X]], -2147483648
1975; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1]], -1
1976; CHECK-NEXT:    [[TMP4:%.*]] = and i32 [[TMP2]], [[TMP3]]
1977; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP2]], [[TMP1]]
1978; CHECK-NEXT:    [[TMP8:%.*]] = icmp ugt i32 [[TMP4]], 2147483647
1979; CHECK-NEXT:    [[TMP15:%.*]] = icmp ugt i32 [[TMP5]], 2147483647
1980; CHECK-NEXT:    [[TMP16:%.*]] = xor i1 [[TMP8]], [[TMP15]]
1981; CHECK-NEXT:    [[TMP17:%.*]] = icmp sgt i32 [[X]], -1
1982; CHECK-NEXT:    store i1 [[TMP16]], ptr @__msan_retval_tls, align 8
1983; CHECK-NEXT:    ret i1 [[TMP17]]
1984;
1985; ORIGIN-LABEL: define zeroext i1 @ICmpSGTAllOnes(
1986; ORIGIN-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
1987; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
1988; ORIGIN-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
1989; ORIGIN-NEXT:    call void @llvm.donothing()
1990; ORIGIN-NEXT:    [[TMP3:%.*]] = xor i32 [[X]], -2147483648
1991; ORIGIN-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP1]], -1
1992; ORIGIN-NEXT:    [[TMP5:%.*]] = and i32 [[TMP3]], [[TMP4]]
1993; ORIGIN-NEXT:    [[TMP6:%.*]] = or i32 [[TMP3]], [[TMP1]]
1994; ORIGIN-NEXT:    [[TMP9:%.*]] = icmp ugt i32 [[TMP5]], 2147483647
1995; ORIGIN-NEXT:    [[TMP16:%.*]] = icmp ugt i32 [[TMP6]], 2147483647
1996; ORIGIN-NEXT:    [[TMP17:%.*]] = xor i1 [[TMP9]], [[TMP16]]
1997; ORIGIN-NEXT:    [[TMP18:%.*]] = icmp sgt i32 [[X]], -1
1998; ORIGIN-NEXT:    store i1 [[TMP17]], ptr @__msan_retval_tls, align 8
1999; ORIGIN-NEXT:    store i32 [[TMP2]], ptr @__msan_retval_origin_tls, align 4
2000; ORIGIN-NEXT:    ret i1 [[TMP18]]
2001;
2002; CALLS-LABEL: define zeroext i1 @ICmpSGTAllOnes(
2003; CALLS-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
2004; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
2005; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
2006; CALLS-NEXT:    call void @llvm.donothing()
2007; CALLS-NEXT:    [[TMP3:%.*]] = xor i32 [[X]], -2147483648
2008; CALLS-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP1]], -1
2009; CALLS-NEXT:    [[TMP5:%.*]] = and i32 [[TMP3]], [[TMP4]]
2010; CALLS-NEXT:    [[TMP6:%.*]] = or i32 [[TMP3]], [[TMP1]]
2011; CALLS-NEXT:    [[TMP9:%.*]] = icmp ugt i32 [[TMP5]], 2147483647
2012; CALLS-NEXT:    [[TMP16:%.*]] = icmp ugt i32 [[TMP6]], 2147483647
2013; CALLS-NEXT:    [[TMP17:%.*]] = xor i1 [[TMP9]], [[TMP16]]
2014; CALLS-NEXT:    [[TMP18:%.*]] = icmp sgt i32 [[X]], -1
2015; CALLS-NEXT:    store i1 [[TMP17]], ptr @__msan_retval_tls, align 8
2016; CALLS-NEXT:    store i32 [[TMP2]], ptr @__msan_retval_origin_tls, align 4
2017; CALLS-NEXT:    ret i1 [[TMP18]]
2018;
2019  %1 = icmp sgt i32 %x, -1
2020  ret i1 %1
2021}
2022
2023
2024define zeroext i1 @ICmpSLEAllOnes(i32 %x) nounwind uwtable readnone sanitize_memory {
2025; CHECK-LABEL: define zeroext i1 @ICmpSLEAllOnes(
2026; CHECK-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
2027; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
2028; CHECK-NEXT:    call void @llvm.donothing()
2029; CHECK-NEXT:    [[TMP2:%.*]] = xor i32 [[X]], -2147483648
2030; CHECK-NEXT:    [[TMP3:%.*]] = xor i32 [[TMP1]], -1
2031; CHECK-NEXT:    [[TMP4:%.*]] = and i32 [[TMP2]], [[TMP3]]
2032; CHECK-NEXT:    [[TMP5:%.*]] = or i32 [[TMP2]], [[TMP1]]
2033; CHECK-NEXT:    [[TMP8:%.*]] = icmp ule i32 [[TMP4]], 2147483647
2034; CHECK-NEXT:    [[TMP15:%.*]] = icmp ule i32 [[TMP5]], 2147483647
2035; CHECK-NEXT:    [[TMP16:%.*]] = xor i1 [[TMP8]], [[TMP15]]
2036; CHECK-NEXT:    [[TMP17:%.*]] = icmp sle i32 [[X]], -1
2037; CHECK-NEXT:    store i1 [[TMP16]], ptr @__msan_retval_tls, align 8
2038; CHECK-NEXT:    ret i1 [[TMP17]]
2039;
2040; ORIGIN-LABEL: define zeroext i1 @ICmpSLEAllOnes(
2041; ORIGIN-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
2042; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
2043; ORIGIN-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
2044; ORIGIN-NEXT:    call void @llvm.donothing()
2045; ORIGIN-NEXT:    [[TMP3:%.*]] = xor i32 [[X]], -2147483648
2046; ORIGIN-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP1]], -1
2047; ORIGIN-NEXT:    [[TMP5:%.*]] = and i32 [[TMP3]], [[TMP4]]
2048; ORIGIN-NEXT:    [[TMP6:%.*]] = or i32 [[TMP3]], [[TMP1]]
2049; ORIGIN-NEXT:    [[TMP9:%.*]] = icmp ule i32 [[TMP5]], 2147483647
2050; ORIGIN-NEXT:    [[TMP16:%.*]] = icmp ule i32 [[TMP6]], 2147483647
2051; ORIGIN-NEXT:    [[TMP17:%.*]] = xor i1 [[TMP9]], [[TMP16]]
2052; ORIGIN-NEXT:    [[TMP18:%.*]] = icmp sle i32 [[X]], -1
2053; ORIGIN-NEXT:    store i1 [[TMP17]], ptr @__msan_retval_tls, align 8
2054; ORIGIN-NEXT:    store i32 [[TMP2]], ptr @__msan_retval_origin_tls, align 4
2055; ORIGIN-NEXT:    ret i1 [[TMP18]]
2056;
2057; CALLS-LABEL: define zeroext i1 @ICmpSLEAllOnes(
2058; CALLS-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
2059; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
2060; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
2061; CALLS-NEXT:    call void @llvm.donothing()
2062; CALLS-NEXT:    [[TMP3:%.*]] = xor i32 [[X]], -2147483648
2063; CALLS-NEXT:    [[TMP4:%.*]] = xor i32 [[TMP1]], -1
2064; CALLS-NEXT:    [[TMP5:%.*]] = and i32 [[TMP3]], [[TMP4]]
2065; CALLS-NEXT:    [[TMP6:%.*]] = or i32 [[TMP3]], [[TMP1]]
2066; CALLS-NEXT:    [[TMP9:%.*]] = icmp ule i32 [[TMP5]], 2147483647
2067; CALLS-NEXT:    [[TMP16:%.*]] = icmp ule i32 [[TMP6]], 2147483647
2068; CALLS-NEXT:    [[TMP17:%.*]] = xor i1 [[TMP9]], [[TMP16]]
2069; CALLS-NEXT:    [[TMP18:%.*]] = icmp sle i32 [[X]], -1
2070; CALLS-NEXT:    store i1 [[TMP17]], ptr @__msan_retval_tls, align 8
2071; CALLS-NEXT:    store i32 [[TMP2]], ptr @__msan_retval_origin_tls, align 4
2072; CALLS-NEXT:    ret i1 [[TMP18]]
2073;
2074  %1 = icmp sle i32 %x, -1
2075  ret i1 %1
2076}
2077
2078
2079
2080; Check that we propagate shadow for x<0, x>=0, etc (i.e. sign bit tests)
2081; of the vector arguments.
2082
2083define <2 x i1> @ICmpSLT_vector_Zero(<2 x ptr> %x) nounwind uwtable readnone sanitize_memory {
2084; CHECK-LABEL: define <2 x i1> @ICmpSLT_vector_Zero(
2085; CHECK-SAME: <2 x ptr> [[X:%.*]]) #[[ATTR0]] {
2086; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
2087; CHECK-NEXT:    call void @llvm.donothing()
2088; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint <2 x ptr> [[X]] to <2 x i64>
2089; CHECK-NEXT:    [[TMP3:%.*]] = xor <2 x i64> [[TMP2]], splat (i64 -9223372036854775808)
2090; CHECK-NEXT:    [[TMP4:%.*]] = xor <2 x i64> [[TMP1]], splat (i64 -1)
2091; CHECK-NEXT:    [[TMP5:%.*]] = and <2 x i64> [[TMP3]], [[TMP4]]
2092; CHECK-NEXT:    [[TMP6:%.*]] = or <2 x i64> [[TMP3]], [[TMP1]]
2093; CHECK-NEXT:    [[TMP9:%.*]] = icmp ult <2 x i64> [[TMP5]], splat (i64 -9223372036854775808)
2094; CHECK-NEXT:    [[TMP16:%.*]] = icmp ult <2 x i64> [[TMP6]], splat (i64 -9223372036854775808)
2095; CHECK-NEXT:    [[TMP17:%.*]] = xor <2 x i1> [[TMP9]], [[TMP16]]
2096; CHECK-NEXT:    [[TMP18:%.*]] = icmp slt <2 x ptr> [[X]], zeroinitializer
2097; CHECK-NEXT:    store <2 x i1> [[TMP17]], ptr @__msan_retval_tls, align 8
2098; CHECK-NEXT:    ret <2 x i1> [[TMP18]]
2099;
2100; ORIGIN-LABEL: define <2 x i1> @ICmpSLT_vector_Zero(
2101; ORIGIN-SAME: <2 x ptr> [[X:%.*]]) #[[ATTR0]] {
2102; ORIGIN-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
2103; ORIGIN-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
2104; ORIGIN-NEXT:    call void @llvm.donothing()
2105; ORIGIN-NEXT:    [[TMP3:%.*]] = ptrtoint <2 x ptr> [[X]] to <2 x i64>
2106; ORIGIN-NEXT:    [[TMP4:%.*]] = xor <2 x i64> [[TMP3]], splat (i64 -9223372036854775808)
2107; ORIGIN-NEXT:    [[TMP5:%.*]] = xor <2 x i64> [[TMP1]], splat (i64 -1)
2108; ORIGIN-NEXT:    [[TMP6:%.*]] = and <2 x i64> [[TMP4]], [[TMP5]]
2109; ORIGIN-NEXT:    [[TMP7:%.*]] = or <2 x i64> [[TMP4]], [[TMP1]]
2110; ORIGIN-NEXT:    [[TMP10:%.*]] = icmp ult <2 x i64> [[TMP6]], splat (i64 -9223372036854775808)
2111; ORIGIN-NEXT:    [[TMP17:%.*]] = icmp ult <2 x i64> [[TMP7]], splat (i64 -9223372036854775808)
2112; ORIGIN-NEXT:    [[TMP18:%.*]] = xor <2 x i1> [[TMP10]], [[TMP17]]
2113; ORIGIN-NEXT:    [[TMP19:%.*]] = icmp slt <2 x ptr> [[X]], zeroinitializer
2114; ORIGIN-NEXT:    store <2 x i1> [[TMP18]], ptr @__msan_retval_tls, align 8
2115; ORIGIN-NEXT:    store i32 [[TMP2]], ptr @__msan_retval_origin_tls, align 4
2116; ORIGIN-NEXT:    ret <2 x i1> [[TMP19]]
2117;
2118; CALLS-LABEL: define <2 x i1> @ICmpSLT_vector_Zero(
2119; CALLS-SAME: <2 x ptr> [[X:%.*]]) #[[ATTR0]] {
2120; CALLS-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr @__msan_param_tls, align 8
2121; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
2122; CALLS-NEXT:    call void @llvm.donothing()
2123; CALLS-NEXT:    [[TMP3:%.*]] = ptrtoint <2 x ptr> [[X]] to <2 x i64>
2124; CALLS-NEXT:    [[TMP4:%.*]] = xor <2 x i64> [[TMP3]], splat (i64 -9223372036854775808)
2125; CALLS-NEXT:    [[TMP5:%.*]] = xor <2 x i64> [[TMP1]], splat (i64 -1)
2126; CALLS-NEXT:    [[TMP6:%.*]] = and <2 x i64> [[TMP4]], [[TMP5]]
2127; CALLS-NEXT:    [[TMP7:%.*]] = or <2 x i64> [[TMP4]], [[TMP1]]
2128; CALLS-NEXT:    [[TMP10:%.*]] = icmp ult <2 x i64> [[TMP6]], splat (i64 -9223372036854775808)
2129; CALLS-NEXT:    [[TMP17:%.*]] = icmp ult <2 x i64> [[TMP7]], splat (i64 -9223372036854775808)
2130; CALLS-NEXT:    [[TMP18:%.*]] = xor <2 x i1> [[TMP10]], [[TMP17]]
2131; CALLS-NEXT:    [[TMP19:%.*]] = icmp slt <2 x ptr> [[X]], zeroinitializer
2132; CALLS-NEXT:    store <2 x i1> [[TMP18]], ptr @__msan_retval_tls, align 8
2133; CALLS-NEXT:    store i32 [[TMP2]], ptr @__msan_retval_origin_tls, align 4
2134; CALLS-NEXT:    ret <2 x i1> [[TMP19]]
2135;
2136  %1 = icmp slt <2 x ptr> %x, zeroinitializer
2137  ret <2 x i1> %1
2138}
2139
2140
2141; Check that we propagate shadow for x<=-1, x>0, etc (i.e. sign bit tests)
2142; of the vector arguments.
2143
2144define <2 x i1> @ICmpSLT_vector_AllOnes(<2 x i32> %x) nounwind uwtable readnone sanitize_memory {
2145; CHECK-LABEL: define <2 x i1> @ICmpSLT_vector_AllOnes(
2146; CHECK-SAME: <2 x i32> [[X:%.*]]) #[[ATTR0]] {
2147; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
2148; CHECK-NEXT:    call void @llvm.donothing()
2149; CHECK-NEXT:    [[TMP2:%.*]] = xor <2 x i32> [[X]], splat (i32 -2147483648)
2150; CHECK-NEXT:    [[TMP3:%.*]] = xor <2 x i32> [[TMP1]], splat (i32 -1)
2151; CHECK-NEXT:    [[TMP4:%.*]] = and <2 x i32> [[TMP2]], [[TMP3]]
2152; CHECK-NEXT:    [[TMP5:%.*]] = or <2 x i32> [[TMP2]], [[TMP1]]
2153; CHECK-NEXT:    [[TMP8:%.*]] = icmp ult <2 x i32> splat (i32 2147483647), [[TMP5]]
2154; CHECK-NEXT:    [[TMP15:%.*]] = icmp ult <2 x i32> splat (i32 2147483647), [[TMP4]]
2155; CHECK-NEXT:    [[TMP16:%.*]] = xor <2 x i1> [[TMP8]], [[TMP15]]
2156; CHECK-NEXT:    [[TMP17:%.*]] = icmp slt <2 x i32> splat (i32 -1), [[X]]
2157; CHECK-NEXT:    store <2 x i1> [[TMP16]], ptr @__msan_retval_tls, align 8
2158; CHECK-NEXT:    ret <2 x i1> [[TMP17]]
2159;
2160; ORIGIN-LABEL: define <2 x i1> @ICmpSLT_vector_AllOnes(
2161; ORIGIN-SAME: <2 x i32> [[X:%.*]]) #[[ATTR0]] {
2162; ORIGIN-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
2163; ORIGIN-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
2164; ORIGIN-NEXT:    call void @llvm.donothing()
2165; ORIGIN-NEXT:    [[TMP3:%.*]] = xor <2 x i32> [[X]], splat (i32 -2147483648)
2166; ORIGIN-NEXT:    [[TMP4:%.*]] = xor <2 x i32> [[TMP1]], splat (i32 -1)
2167; ORIGIN-NEXT:    [[TMP5:%.*]] = and <2 x i32> [[TMP3]], [[TMP4]]
2168; ORIGIN-NEXT:    [[TMP6:%.*]] = or <2 x i32> [[TMP3]], [[TMP1]]
2169; ORIGIN-NEXT:    [[TMP9:%.*]] = icmp ult <2 x i32> splat (i32 2147483647), [[TMP6]]
2170; ORIGIN-NEXT:    [[TMP16:%.*]] = icmp ult <2 x i32> splat (i32 2147483647), [[TMP5]]
2171; ORIGIN-NEXT:    [[TMP17:%.*]] = xor <2 x i1> [[TMP9]], [[TMP16]]
2172; ORIGIN-NEXT:    [[TMP18:%.*]] = bitcast <2 x i32> [[TMP1]] to i64
2173; ORIGIN-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP18]], 0
2174; ORIGIN-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP2]], i32 0
2175; ORIGIN-NEXT:    [[TMP21:%.*]] = icmp slt <2 x i32> splat (i32 -1), [[X]]
2176; ORIGIN-NEXT:    store <2 x i1> [[TMP17]], ptr @__msan_retval_tls, align 8
2177; ORIGIN-NEXT:    store i32 [[TMP20]], ptr @__msan_retval_origin_tls, align 4
2178; ORIGIN-NEXT:    ret <2 x i1> [[TMP21]]
2179;
2180; CALLS-LABEL: define <2 x i1> @ICmpSLT_vector_AllOnes(
2181; CALLS-SAME: <2 x i32> [[X:%.*]]) #[[ATTR0]] {
2182; CALLS-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
2183; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
2184; CALLS-NEXT:    call void @llvm.donothing()
2185; CALLS-NEXT:    [[TMP3:%.*]] = xor <2 x i32> [[X]], splat (i32 -2147483648)
2186; CALLS-NEXT:    [[TMP4:%.*]] = xor <2 x i32> [[TMP1]], splat (i32 -1)
2187; CALLS-NEXT:    [[TMP5:%.*]] = and <2 x i32> [[TMP3]], [[TMP4]]
2188; CALLS-NEXT:    [[TMP6:%.*]] = or <2 x i32> [[TMP3]], [[TMP1]]
2189; CALLS-NEXT:    [[TMP9:%.*]] = icmp ult <2 x i32> splat (i32 2147483647), [[TMP6]]
2190; CALLS-NEXT:    [[TMP16:%.*]] = icmp ult <2 x i32> splat (i32 2147483647), [[TMP5]]
2191; CALLS-NEXT:    [[TMP17:%.*]] = xor <2 x i1> [[TMP9]], [[TMP16]]
2192; CALLS-NEXT:    [[TMP18:%.*]] = bitcast <2 x i32> [[TMP1]] to i64
2193; CALLS-NEXT:    [[TMP19:%.*]] = icmp ne i64 [[TMP18]], 0
2194; CALLS-NEXT:    [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP2]], i32 0
2195; CALLS-NEXT:    [[TMP21:%.*]] = icmp slt <2 x i32> splat (i32 -1), [[X]]
2196; CALLS-NEXT:    store <2 x i1> [[TMP17]], ptr @__msan_retval_tls, align 8
2197; CALLS-NEXT:    store i32 [[TMP20]], ptr @__msan_retval_origin_tls, align 4
2198; CALLS-NEXT:    ret <2 x i1> [[TMP21]]
2199;
2200  %1 = icmp slt <2 x i32> <i32 -1, i32 -1>, %x
2201  ret <2 x i1> %1
2202}
2203
2204
2205
2206; Check that we propagate shadow for unsigned relational comparisons with
2207; constants
2208
2209define zeroext i1 @ICmpUGTConst(i32 %x) nounwind uwtable readnone sanitize_memory {
2210; CHECK-LABEL: define zeroext i1 @ICmpUGTConst(
2211; CHECK-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
2212; CHECK-NEXT:  [[ENTRY:.*:]]
2213; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8
2214; CHECK-NEXT:    call void @llvm.donothing()
2215; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[TMP0]], -1
2216; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[X]], [[TMP1]]
2217; CHECK-NEXT:    [[TMP4:%.*]] = or i32 [[X]], [[TMP0]]
2218; CHECK-NEXT:    [[TMP3:%.*]] = icmp ugt i32 [[TMP2]], 7
2219; CHECK-NEXT:    [[TMP5:%.*]] = icmp ugt i32 [[TMP4]], 7
2220; CHECK-NEXT:    [[TMP6:%.*]] = xor i1 [[TMP3]], [[TMP5]]
2221; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i32 [[X]], 7
2222; CHECK-NEXT:    store i1 [[TMP6]], ptr @__msan_retval_tls, align 8
2223; CHECK-NEXT:    ret i1 [[CMP]]
2224;
2225; ORIGIN-LABEL: define zeroext i1 @ICmpUGTConst(
2226; ORIGIN-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
2227; ORIGIN-NEXT:  [[ENTRY:.*:]]
2228; ORIGIN-NEXT:    [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8
2229; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
2230; ORIGIN-NEXT:    call void @llvm.donothing()
2231; ORIGIN-NEXT:    [[TMP2:%.*]] = xor i32 [[TMP0]], -1
2232; ORIGIN-NEXT:    [[TMP3:%.*]] = and i32 [[X]], [[TMP2]]
2233; ORIGIN-NEXT:    [[TMP5:%.*]] = or i32 [[X]], [[TMP0]]
2234; ORIGIN-NEXT:    [[TMP4:%.*]] = icmp ugt i32 [[TMP3]], 7
2235; ORIGIN-NEXT:    [[TMP6:%.*]] = icmp ugt i32 [[TMP5]], 7
2236; ORIGIN-NEXT:    [[TMP7:%.*]] = xor i1 [[TMP4]], [[TMP6]]
2237; ORIGIN-NEXT:    [[CMP:%.*]] = icmp ugt i32 [[X]], 7
2238; ORIGIN-NEXT:    store i1 [[TMP7]], ptr @__msan_retval_tls, align 8
2239; ORIGIN-NEXT:    store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
2240; ORIGIN-NEXT:    ret i1 [[CMP]]
2241;
2242; CALLS-LABEL: define zeroext i1 @ICmpUGTConst(
2243; CALLS-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
2244; CALLS-NEXT:  [[ENTRY:.*:]]
2245; CALLS-NEXT:    [[TMP0:%.*]] = load i32, ptr @__msan_param_tls, align 8
2246; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
2247; CALLS-NEXT:    call void @llvm.donothing()
2248; CALLS-NEXT:    [[TMP2:%.*]] = xor i32 [[TMP0]], -1
2249; CALLS-NEXT:    [[TMP3:%.*]] = and i32 [[X]], [[TMP2]]
2250; CALLS-NEXT:    [[TMP5:%.*]] = or i32 [[X]], [[TMP0]]
2251; CALLS-NEXT:    [[TMP4:%.*]] = icmp ugt i32 [[TMP3]], 7
2252; CALLS-NEXT:    [[TMP6:%.*]] = icmp ugt i32 [[TMP5]], 7
2253; CALLS-NEXT:    [[TMP7:%.*]] = xor i1 [[TMP4]], [[TMP6]]
2254; CALLS-NEXT:    [[CMP:%.*]] = icmp ugt i32 [[X]], 7
2255; CALLS-NEXT:    store i1 [[TMP7]], ptr @__msan_retval_tls, align 8
2256; CALLS-NEXT:    store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
2257; CALLS-NEXT:    ret i1 [[CMP]]
2258;
2259entry:
2260  %cmp = icmp ugt i32 %x, 7
2261  ret i1 %cmp
2262}
2263
2264
2265
2266; Check that loads of shadow have the same alignment as the original loads.
2267; Check that loads of origin have the alignment of max(4, original alignment).
2268
2269define i32 @ShadowLoadAlignmentLarge() nounwind uwtable sanitize_memory {
2270; CHECK-LABEL: define i32 @ShadowLoadAlignmentLarge(
2271; CHECK-SAME: ) #[[ATTR0]] {
2272; CHECK-NEXT:    call void @llvm.donothing()
2273; CHECK-NEXT:    [[Y:%.*]] = alloca i32, align 64
2274; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[Y]] to i64
2275; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
2276; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
2277; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 64 [[TMP3]], i8 -1, i64 4, i1 false)
2278; CHECK-NEXT:    [[TMP4:%.*]] = load volatile i32, ptr [[Y]], align 64
2279; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[Y]] to i64
2280; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
2281; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
2282; CHECK-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP7]], align 64
2283; CHECK-NEXT:    store i32 [[_MSLD]], ptr @__msan_retval_tls, align 8
2284; CHECK-NEXT:    ret i32 [[TMP4]]
2285;
2286; ORIGIN-LABEL: define i32 @ShadowLoadAlignmentLarge(
2287; ORIGIN-SAME: ) #[[ATTR0]] {
2288; ORIGIN-NEXT:    call void @llvm.donothing()
2289; ORIGIN-NEXT:    [[Y:%.*]] = alloca i32, align 64
2290; ORIGIN-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[Y]] to i64
2291; ORIGIN-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
2292; ORIGIN-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
2293; ORIGIN-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
2294; ORIGIN-NEXT:    [[TMP5:%.*]] = and i64 [[TMP4]], -4
2295; ORIGIN-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
2296; ORIGIN-NEXT:    call void @llvm.memset.p0.i64(ptr align 64 [[TMP3]], i8 -1, i64 4, i1 false)
2297; ORIGIN-NEXT:    call void @__msan_set_alloca_origin_with_descr(ptr [[Y]], i64 4, ptr @[[GLOB0:[0-9]+]], ptr @[[GLOB1:[0-9]+]])
2298; ORIGIN-NEXT:    [[TMP7:%.*]] = load volatile i32, ptr [[Y]], align 64
2299; ORIGIN-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[Y]] to i64
2300; ORIGIN-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 87960930222080
2301; ORIGIN-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
2302; ORIGIN-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 17592186044416
2303; ORIGIN-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
2304; ORIGIN-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP10]], align 64
2305; ORIGIN-NEXT:    [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 64
2306; ORIGIN-NEXT:    store i32 [[_MSLD]], ptr @__msan_retval_tls, align 8
2307; ORIGIN-NEXT:    store i32 [[TMP13]], ptr @__msan_retval_origin_tls, align 4
2308; ORIGIN-NEXT:    ret i32 [[TMP7]]
2309;
2310; CALLS-LABEL: define i32 @ShadowLoadAlignmentLarge(
2311; CALLS-SAME: ) #[[ATTR0]] {
2312; CALLS-NEXT:    call void @llvm.donothing()
2313; CALLS-NEXT:    [[Y:%.*]] = alloca i32, align 64
2314; CALLS-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[Y]] to i64
2315; CALLS-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
2316; CALLS-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
2317; CALLS-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
2318; CALLS-NEXT:    [[TMP5:%.*]] = and i64 [[TMP4]], -4
2319; CALLS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
2320; CALLS-NEXT:    call void @llvm.memset.p0.i64(ptr align 64 [[TMP3]], i8 -1, i64 4, i1 false)
2321; CALLS-NEXT:    call void @__msan_set_alloca_origin_with_descr(ptr [[Y]], i64 4, ptr @[[GLOB0:[0-9]+]], ptr @[[GLOB1:[0-9]+]])
2322; CALLS-NEXT:    [[TMP7:%.*]] = load volatile i32, ptr [[Y]], align 64
2323; CALLS-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[Y]] to i64
2324; CALLS-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 87960930222080
2325; CALLS-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
2326; CALLS-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 17592186044416
2327; CALLS-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
2328; CALLS-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP10]], align 64
2329; CALLS-NEXT:    [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 64
2330; CALLS-NEXT:    store i32 [[_MSLD]], ptr @__msan_retval_tls, align 8
2331; CALLS-NEXT:    store i32 [[TMP13]], ptr @__msan_retval_origin_tls, align 4
2332; CALLS-NEXT:    ret i32 [[TMP7]]
2333;
2334  %y = alloca i32, align 64
2335  %1 = load volatile i32, ptr %y, align 64
2336  ret i32 %1
2337}
2338
2339
2340define i32 @ShadowLoadAlignmentSmall() nounwind uwtable sanitize_memory {
2341; CHECK-LABEL: define i32 @ShadowLoadAlignmentSmall(
2342; CHECK-SAME: ) #[[ATTR0]] {
2343; CHECK-NEXT:    call void @llvm.donothing()
2344; CHECK-NEXT:    [[Y:%.*]] = alloca i32, align 2
2345; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[Y]] to i64
2346; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
2347; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
2348; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 2 [[TMP3]], i8 -1, i64 4, i1 false)
2349; CHECK-NEXT:    [[TMP4:%.*]] = load volatile i32, ptr [[Y]], align 2
2350; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[Y]] to i64
2351; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
2352; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
2353; CHECK-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP7]], align 2
2354; CHECK-NEXT:    store i32 [[_MSLD]], ptr @__msan_retval_tls, align 8
2355; CHECK-NEXT:    ret i32 [[TMP4]]
2356;
2357; ORIGIN-LABEL: define i32 @ShadowLoadAlignmentSmall(
2358; ORIGIN-SAME: ) #[[ATTR0]] {
2359; ORIGIN-NEXT:    call void @llvm.donothing()
2360; ORIGIN-NEXT:    [[Y:%.*]] = alloca i32, align 2
2361; ORIGIN-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[Y]] to i64
2362; ORIGIN-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
2363; ORIGIN-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
2364; ORIGIN-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
2365; ORIGIN-NEXT:    [[TMP5:%.*]] = and i64 [[TMP4]], -4
2366; ORIGIN-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
2367; ORIGIN-NEXT:    call void @llvm.memset.p0.i64(ptr align 2 [[TMP3]], i8 -1, i64 4, i1 false)
2368; ORIGIN-NEXT:    call void @__msan_set_alloca_origin_with_descr(ptr [[Y]], i64 4, ptr @[[GLOB2:[0-9]+]], ptr @[[GLOB3:[0-9]+]])
2369; ORIGIN-NEXT:    [[TMP7:%.*]] = load volatile i32, ptr [[Y]], align 2
2370; ORIGIN-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[Y]] to i64
2371; ORIGIN-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 87960930222080
2372; ORIGIN-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
2373; ORIGIN-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 17592186044416
2374; ORIGIN-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
2375; ORIGIN-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
2376; ORIGIN-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP10]], align 2
2377; ORIGIN-NEXT:    [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
2378; ORIGIN-NEXT:    store i32 [[_MSLD]], ptr @__msan_retval_tls, align 8
2379; ORIGIN-NEXT:    store i32 [[TMP14]], ptr @__msan_retval_origin_tls, align 4
2380; ORIGIN-NEXT:    ret i32 [[TMP7]]
2381;
2382; CALLS-LABEL: define i32 @ShadowLoadAlignmentSmall(
2383; CALLS-SAME: ) #[[ATTR0]] {
2384; CALLS-NEXT:    call void @llvm.donothing()
2385; CALLS-NEXT:    [[Y:%.*]] = alloca i32, align 2
2386; CALLS-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[Y]] to i64
2387; CALLS-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
2388; CALLS-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
2389; CALLS-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
2390; CALLS-NEXT:    [[TMP5:%.*]] = and i64 [[TMP4]], -4
2391; CALLS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
2392; CALLS-NEXT:    call void @llvm.memset.p0.i64(ptr align 2 [[TMP3]], i8 -1, i64 4, i1 false)
2393; CALLS-NEXT:    call void @__msan_set_alloca_origin_with_descr(ptr [[Y]], i64 4, ptr @[[GLOB2:[0-9]+]], ptr @[[GLOB3:[0-9]+]])
2394; CALLS-NEXT:    [[TMP7:%.*]] = load volatile i32, ptr [[Y]], align 2
2395; CALLS-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[Y]] to i64
2396; CALLS-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 87960930222080
2397; CALLS-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
2398; CALLS-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 17592186044416
2399; CALLS-NEXT:    [[TMP12:%.*]] = and i64 [[TMP11]], -4
2400; CALLS-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
2401; CALLS-NEXT:    [[_MSLD:%.*]] = load i32, ptr [[TMP10]], align 2
2402; CALLS-NEXT:    [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
2403; CALLS-NEXT:    store i32 [[_MSLD]], ptr @__msan_retval_tls, align 8
2404; CALLS-NEXT:    store i32 [[TMP14]], ptr @__msan_retval_origin_tls, align 4
2405; CALLS-NEXT:    ret i32 [[TMP7]]
2406;
2407  %y = alloca i32, align 2
2408  %1 = load volatile i32, ptr %y, align 2
2409  ret i32 %1
2410}
2411
2412; Test vector manipulation instructions.
2413; Check that the same bit manipulation is applied to the shadow values.
2414; Check that there is a zero test of the shadow of %idx argument, where present.
2415
2416define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory {
2417; CHECK-LABEL: define i32 @ExtractElement(
2418; CHECK-SAME: <4 x i32> [[VEC:%.*]], i32 [[IDX:%.*]]) #[[ATTR6]] {
2419; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
2420; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
2421; CHECK-NEXT:    call void @llvm.donothing()
2422; CHECK-NEXT:    [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP2]], i32 [[IDX]]
2423; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP1]], 0
2424; CHECK-NEXT:    br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
2425; CHECK:       [[BB3]]:
2426; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR12]]
2427; CHECK-NEXT:    unreachable
2428; CHECK:       [[BB4]]:
2429; CHECK-NEXT:    [[X:%.*]] = extractelement <4 x i32> [[VEC]], i32 [[IDX]]
2430; CHECK-NEXT:    store i32 [[_MSPROP]], ptr @__msan_retval_tls, align 8
2431; CHECK-NEXT:    ret i32 [[X]]
2432;
2433; ORIGIN-LABEL: define i32 @ExtractElement(
2434; ORIGIN-SAME: <4 x i32> [[VEC:%.*]], i32 [[IDX:%.*]]) #[[ATTR6]] {
2435; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
2436; ORIGIN-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
2437; ORIGIN-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
2438; ORIGIN-NEXT:    [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
2439; ORIGIN-NEXT:    call void @llvm.donothing()
2440; ORIGIN-NEXT:    [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP3]], i32 [[IDX]]
2441; ORIGIN-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP1]], 0
2442; ORIGIN-NEXT:    br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
2443; ORIGIN:       [[BB5]]:
2444; ORIGIN-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR12]]
2445; ORIGIN-NEXT:    unreachable
2446; ORIGIN:       [[BB6]]:
2447; ORIGIN-NEXT:    [[X:%.*]] = extractelement <4 x i32> [[VEC]], i32 [[IDX]]
2448; ORIGIN-NEXT:    store i32 [[_MSPROP]], ptr @__msan_retval_tls, align 8
2449; ORIGIN-NEXT:    store i32 [[TMP4]], ptr @__msan_retval_origin_tls, align 4
2450; ORIGIN-NEXT:    ret i32 [[X]]
2451;
2452; CALLS-LABEL: define i32 @ExtractElement(
2453; CALLS-SAME: <4 x i32> [[VEC:%.*]], i32 [[IDX:%.*]]) #[[ATTR6]] {
2454; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
2455; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
2456; CALLS-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
2457; CALLS-NEXT:    [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
2458; CALLS-NEXT:    call void @llvm.donothing()
2459; CALLS-NEXT:    [[_MSPROP:%.*]] = extractelement <4 x i32> [[TMP3]], i32 [[IDX]]
2460; CALLS-NEXT:    call void @__msan_maybe_warning_4(i32 zeroext [[TMP1]], i32 zeroext [[TMP2]])
2461; CALLS-NEXT:    [[X:%.*]] = extractelement <4 x i32> [[VEC]], i32 [[IDX]]
2462; CALLS-NEXT:    store i32 [[_MSPROP]], ptr @__msan_retval_tls, align 8
2463; CALLS-NEXT:    store i32 [[TMP4]], ptr @__msan_retval_origin_tls, align 4
2464; CALLS-NEXT:    ret i32 [[X]]
2465;
2466  %x = extractelement <4 x i32> %vec, i32 %idx
2467  ret i32 %x
2468}
2469
2470define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) sanitize_memory {
2471; CHECK-LABEL: define <4 x i32> @InsertElement(
2472; CHECK-SAME: <4 x i32> [[VEC:%.*]], i32 [[IDX:%.*]], i32 [[X:%.*]]) #[[ATTR6]] {
2473; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
2474; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
2475; CHECK-NEXT:    [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
2476; CHECK-NEXT:    call void @llvm.donothing()
2477; CHECK-NEXT:    [[_MSPROP:%.*]] = insertelement <4 x i32> [[TMP2]], i32 [[TMP3]], i32 [[IDX]]
2478; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP1]], 0
2479; CHECK-NEXT:    br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
2480; CHECK:       [[BB4]]:
2481; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR12]]
2482; CHECK-NEXT:    unreachable
2483; CHECK:       [[BB5]]:
2484; CHECK-NEXT:    [[VEC1:%.*]] = insertelement <4 x i32> [[VEC]], i32 [[X]], i32 [[IDX]]
2485; CHECK-NEXT:    store <4 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
2486; CHECK-NEXT:    ret <4 x i32> [[VEC1]]
2487;
2488; ORIGIN-LABEL: define <4 x i32> @InsertElement(
2489; ORIGIN-SAME: <4 x i32> [[VEC:%.*]], i32 [[IDX:%.*]], i32 [[X:%.*]]) #[[ATTR6]] {
2490; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
2491; ORIGIN-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
2492; ORIGIN-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
2493; ORIGIN-NEXT:    [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
2494; ORIGIN-NEXT:    [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
2495; ORIGIN-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
2496; ORIGIN-NEXT:    call void @llvm.donothing()
2497; ORIGIN-NEXT:    [[_MSPROP:%.*]] = insertelement <4 x i32> [[TMP3]], i32 [[TMP5]], i32 [[IDX]]
2498; ORIGIN-NEXT:    [[TMP7:%.*]] = icmp ne i32 [[TMP5]], 0
2499; ORIGIN-NEXT:    [[TMP8:%.*]] = select i1 [[TMP7]], i32 [[TMP6]], i32 [[TMP4]]
2500; ORIGIN-NEXT:    [[TMP9:%.*]] = icmp ne i32 [[TMP1]], 0
2501; ORIGIN-NEXT:    [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP2]], i32 [[TMP8]]
2502; ORIGIN-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP1]], 0
2503; ORIGIN-NEXT:    br i1 [[_MSCMP]], label %[[BB11:.*]], label %[[BB12:.*]], !prof [[PROF1]]
2504; ORIGIN:       [[BB11]]:
2505; ORIGIN-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR12]]
2506; ORIGIN-NEXT:    unreachable
2507; ORIGIN:       [[BB12]]:
2508; ORIGIN-NEXT:    [[VEC1:%.*]] = insertelement <4 x i32> [[VEC]], i32 [[X]], i32 [[IDX]]
2509; ORIGIN-NEXT:    store <4 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
2510; ORIGIN-NEXT:    store i32 [[TMP10]], ptr @__msan_retval_origin_tls, align 4
2511; ORIGIN-NEXT:    ret <4 x i32> [[VEC1]]
2512;
2513; CALLS-LABEL: define <4 x i32> @InsertElement(
2514; CALLS-SAME: <4 x i32> [[VEC:%.*]], i32 [[IDX:%.*]], i32 [[X:%.*]]) #[[ATTR6]] {
2515; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
2516; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
2517; CALLS-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
2518; CALLS-NEXT:    [[TMP4:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
2519; CALLS-NEXT:    [[TMP5:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
2520; CALLS-NEXT:    [[TMP6:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
2521; CALLS-NEXT:    call void @llvm.donothing()
2522; CALLS-NEXT:    [[_MSPROP:%.*]] = insertelement <4 x i32> [[TMP3]], i32 [[TMP5]], i32 [[IDX]]
2523; CALLS-NEXT:    [[TMP7:%.*]] = icmp ne i32 [[TMP5]], 0
2524; CALLS-NEXT:    [[TMP8:%.*]] = select i1 [[TMP7]], i32 [[TMP6]], i32 [[TMP4]]
2525; CALLS-NEXT:    [[TMP9:%.*]] = icmp ne i32 [[TMP1]], 0
2526; CALLS-NEXT:    [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP2]], i32 [[TMP8]]
2527; CALLS-NEXT:    call void @__msan_maybe_warning_4(i32 zeroext [[TMP1]], i32 zeroext [[TMP2]])
2528; CALLS-NEXT:    [[VEC1:%.*]] = insertelement <4 x i32> [[VEC]], i32 [[X]], i32 [[IDX]]
2529; CALLS-NEXT:    store <4 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
2530; CALLS-NEXT:    store i32 [[TMP10]], ptr @__msan_retval_origin_tls, align 4
2531; CALLS-NEXT:    ret <4 x i32> [[VEC1]]
2532;
2533  %vec1 = insertelement <4 x i32> %vec, i32 %x, i32 %idx
2534  ret <4 x i32> %vec1
2535}
2536
2537define <4 x i32> @ShuffleVector(<4 x i32> %vec, <4 x i32> %vec1) sanitize_memory {
2538; CHECK-LABEL: define <4 x i32> @ShuffleVector(
2539; CHECK-SAME: <4 x i32> [[VEC:%.*]], <4 x i32> [[VEC1:%.*]]) #[[ATTR6]] {
2540; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
2541; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
2542; CHECK-NEXT:    call void @llvm.donothing()
2543; CHECK-NEXT:    [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP2]], <4 x i32> <i32 0, i32 4, i32 1, i32 5>
2544; CHECK-NEXT:    [[VEC2:%.*]] = shufflevector <4 x i32> [[VEC]], <4 x i32> [[VEC1]], <4 x i32> <i32 0, i32 4, i32 1, i32 5>
2545; CHECK-NEXT:    store <4 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
2546; CHECK-NEXT:    ret <4 x i32> [[VEC2]]
2547;
2548; ORIGIN-LABEL: define <4 x i32> @ShuffleVector(
2549; ORIGIN-SAME: <4 x i32> [[VEC:%.*]], <4 x i32> [[VEC1:%.*]]) #[[ATTR6]] {
2550; ORIGIN-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
2551; ORIGIN-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
2552; ORIGIN-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
2553; ORIGIN-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
2554; ORIGIN-NEXT:    call void @llvm.donothing()
2555; ORIGIN-NEXT:    [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP3]], <4 x i32> <i32 0, i32 4, i32 1, i32 5>
2556; ORIGIN-NEXT:    [[TMP5:%.*]] = bitcast <4 x i32> [[TMP3]] to i128
2557; ORIGIN-NEXT:    [[TMP6:%.*]] = icmp ne i128 [[TMP5]], 0
2558; ORIGIN-NEXT:    [[TMP7:%.*]] = select i1 [[TMP6]], i32 [[TMP4]], i32 [[TMP2]]
2559; ORIGIN-NEXT:    [[VEC2:%.*]] = shufflevector <4 x i32> [[VEC]], <4 x i32> [[VEC1]], <4 x i32> <i32 0, i32 4, i32 1, i32 5>
2560; ORIGIN-NEXT:    store <4 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
2561; ORIGIN-NEXT:    store i32 [[TMP7]], ptr @__msan_retval_origin_tls, align 4
2562; ORIGIN-NEXT:    ret <4 x i32> [[VEC2]]
2563;
2564; CALLS-LABEL: define <4 x i32> @ShuffleVector(
2565; CALLS-SAME: <4 x i32> [[VEC:%.*]], <4 x i32> [[VEC1:%.*]]) #[[ATTR6]] {
2566; CALLS-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
2567; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
2568; CALLS-NEXT:    [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
2569; CALLS-NEXT:    [[TMP4:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
2570; CALLS-NEXT:    call void @llvm.donothing()
2571; CALLS-NEXT:    [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> [[TMP3]], <4 x i32> <i32 0, i32 4, i32 1, i32 5>
2572; CALLS-NEXT:    [[TMP5:%.*]] = bitcast <4 x i32> [[TMP3]] to i128
2573; CALLS-NEXT:    [[TMP6:%.*]] = icmp ne i128 [[TMP5]], 0
2574; CALLS-NEXT:    [[TMP7:%.*]] = select i1 [[TMP6]], i32 [[TMP4]], i32 [[TMP2]]
2575; CALLS-NEXT:    [[VEC2:%.*]] = shufflevector <4 x i32> [[VEC]], <4 x i32> [[VEC1]], <4 x i32> <i32 0, i32 4, i32 1, i32 5>
2576; CALLS-NEXT:    store <4 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
2577; CALLS-NEXT:    store i32 [[TMP7]], ptr @__msan_retval_origin_tls, align 4
2578; CALLS-NEXT:    ret <4 x i32> [[VEC2]]
2579;
2580  %vec2 = shufflevector <4 x i32> %vec, <4 x i32> %vec1,
2581  <4 x i32> <i32 0, i32 4, i32 1, i32 5>
2582  ret <4 x i32> %vec2
2583}
2584
2585
2586
2587; Test bswap intrinsic instrumentation
2588define i32 @BSwap(i32 %x) nounwind uwtable readnone sanitize_memory {
2589; CHECK-LABEL: define i32 @BSwap(
2590; CHECK-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
2591; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
2592; CHECK-NEXT:    call void @llvm.donothing()
2593; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP1]])
2594; CHECK-NEXT:    [[Y:%.*]] = tail call i32 @llvm.bswap.i32(i32 [[X]])
2595; CHECK-NEXT:    store i32 [[TMP2]], ptr @__msan_retval_tls, align 8
2596; CHECK-NEXT:    ret i32 [[Y]]
2597;
2598; ORIGIN-LABEL: define i32 @BSwap(
2599; ORIGIN-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
2600; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
2601; ORIGIN-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
2602; ORIGIN-NEXT:    call void @llvm.donothing()
2603; ORIGIN-NEXT:    [[TMP3:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP1]])
2604; ORIGIN-NEXT:    [[Y:%.*]] = tail call i32 @llvm.bswap.i32(i32 [[X]])
2605; ORIGIN-NEXT:    store i32 [[TMP3]], ptr @__msan_retval_tls, align 8
2606; ORIGIN-NEXT:    store i32 [[TMP2]], ptr @__msan_retval_origin_tls, align 4
2607; ORIGIN-NEXT:    ret i32 [[Y]]
2608;
2609; CALLS-LABEL: define i32 @BSwap(
2610; CALLS-SAME: i32 [[X:%.*]]) #[[ATTR0]] {
2611; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
2612; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
2613; CALLS-NEXT:    call void @llvm.donothing()
2614; CALLS-NEXT:    [[TMP3:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP1]])
2615; CALLS-NEXT:    [[Y:%.*]] = tail call i32 @llvm.bswap.i32(i32 [[X]])
2616; CALLS-NEXT:    store i32 [[TMP3]], ptr @__msan_retval_tls, align 8
2617; CALLS-NEXT:    store i32 [[TMP2]], ptr @__msan_retval_origin_tls, align 4
2618; CALLS-NEXT:    ret i32 [[Y]]
2619;
2620  %y = tail call i32 @llvm.bswap.i32(i32 %x)
2621  ret i32 %y
2622}
2623
2624declare i32 @llvm.bswap.i32(i32) nounwind readnone
2625
2626
2627; Test handling of vectors of pointers.
2628; Check that shadow of such vector is a vector of integers.
2629
2630define <8 x ptr> @VectorOfPointers(ptr %p) nounwind uwtable sanitize_memory {
2631; CHECK-LABEL: define <8 x ptr> @VectorOfPointers(
2632; CHECK-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
2633; CHECK-NEXT:    call void @llvm.donothing()
2634; CHECK-NEXT:    [[X:%.*]] = load <8 x ptr>, ptr [[P]], align 64
2635; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64
2636; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
2637; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
2638; CHECK-NEXT:    [[_MSLD:%.*]] = load <8 x i64>, ptr [[TMP3]], align 64
2639; CHECK-NEXT:    store <8 x i64> [[_MSLD]], ptr @__msan_retval_tls, align 8
2640; CHECK-NEXT:    ret <8 x ptr> [[X]]
2641;
2642; ORIGIN-LABEL: define <8 x ptr> @VectorOfPointers(
2643; ORIGIN-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
2644; ORIGIN-NEXT:    call void @llvm.donothing()
2645; ORIGIN-NEXT:    [[X:%.*]] = load <8 x ptr>, ptr [[P]], align 64
2646; ORIGIN-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64
2647; ORIGIN-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
2648; ORIGIN-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
2649; ORIGIN-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
2650; ORIGIN-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
2651; ORIGIN-NEXT:    [[_MSLD:%.*]] = load <8 x i64>, ptr [[TMP3]], align 64
2652; ORIGIN-NEXT:    [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 64
2653; ORIGIN-NEXT:    store <8 x i64> [[_MSLD]], ptr @__msan_retval_tls, align 8
2654; ORIGIN-NEXT:    store i32 [[TMP6]], ptr @__msan_retval_origin_tls, align 4
2655; ORIGIN-NEXT:    ret <8 x ptr> [[X]]
2656;
2657; CALLS-LABEL: define <8 x ptr> @VectorOfPointers(
2658; CALLS-SAME: ptr [[P:%.*]]) #[[ATTR0]] {
2659; CALLS-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
2660; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
2661; CALLS-NEXT:    call void @llvm.donothing()
2662; CALLS-NEXT:    call void @__msan_maybe_warning_8(i64 zeroext [[TMP1]], i32 zeroext [[TMP2]])
2663; CALLS-NEXT:    [[X:%.*]] = load <8 x ptr>, ptr [[P]], align 64
2664; CALLS-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[P]] to i64
2665; CALLS-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
2666; CALLS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
2667; CALLS-NEXT:    [[TMP6:%.*]] = add i64 [[TMP4]], 17592186044416
2668; CALLS-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
2669; CALLS-NEXT:    [[_MSLD:%.*]] = load <8 x i64>, ptr [[TMP5]], align 64
2670; CALLS-NEXT:    [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 64
2671; CALLS-NEXT:    store <8 x i64> [[_MSLD]], ptr @__msan_retval_tls, align 8
2672; CALLS-NEXT:    store i32 [[TMP8]], ptr @__msan_retval_origin_tls, align 4
2673; CALLS-NEXT:    ret <8 x ptr> [[X]]
2674;
2675  %x = load <8 x ptr>, ptr %p
2676  ret <8 x ptr> %x
2677}
2678
2679
2680; Test handling of va_copy.
2681
2682declare void @llvm.va_copy(ptr, ptr) nounwind
2683
2684define void @VACopy(ptr %p1, ptr %p2) nounwind uwtable sanitize_memory {
2685; CHECK-LABEL: define void @VACopy(
2686; CHECK-SAME: ptr [[P1:%.*]], ptr [[P2:%.*]]) #[[ATTR0]] {
2687; CHECK-NEXT:    call void @llvm.donothing()
2688; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P1]] to i64
2689; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
2690; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
2691; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP3]], i8 0, i64 24, i1 false)
2692; CHECK-NEXT:    call void @llvm.va_copy.p0(ptr [[P1]], ptr [[P2]]) #[[ATTR5]]
2693; CHECK-NEXT:    ret void
2694;
2695; ORIGIN-LABEL: define void @VACopy(
2696; ORIGIN-SAME: ptr [[P1:%.*]], ptr [[P2:%.*]]) #[[ATTR0]] {
2697; ORIGIN-NEXT:    call void @llvm.donothing()
2698; ORIGIN-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P1]] to i64
2699; ORIGIN-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
2700; ORIGIN-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
2701; ORIGIN-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
2702; ORIGIN-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
2703; ORIGIN-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP3]], i8 0, i64 24, i1 false)
2704; ORIGIN-NEXT:    call void @llvm.va_copy.p0(ptr [[P1]], ptr [[P2]]) #[[ATTR5]]
2705; ORIGIN-NEXT:    ret void
2706;
2707; CALLS-LABEL: define void @VACopy(
2708; CALLS-SAME: ptr [[P1:%.*]], ptr [[P2:%.*]]) #[[ATTR0]] {
2709; CALLS-NEXT:    call void @llvm.donothing()
2710; CALLS-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P1]] to i64
2711; CALLS-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
2712; CALLS-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
2713; CALLS-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
2714; CALLS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
2715; CALLS-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP3]], i8 0, i64 24, i1 false)
2716; CALLS-NEXT:    call void @llvm.va_copy.p0(ptr [[P1]], ptr [[P2]]) #[[ATTR5]]
2717; CALLS-NEXT:    ret void
2718;
2719  call void @llvm.va_copy(ptr %p1, ptr %p2) nounwind
2720  ret void
2721}
2722
2723
2724
2725; Test that va_start instrumentation does not use va_arg_tls*.
2726; It should work with a local stack copy instead.
2727
2728%struct.__va_list_tag = type { i32, i32, ptr, ptr }
2729declare void @llvm.va_start(ptr) nounwind
2730
2731; Function Attrs: nounwind uwtable
2732define void @VAStart(i32 %x, ...) sanitize_memory {
2733; CHECK-LABEL: define void @VAStart(
2734; CHECK-SAME: i32 [[X:%.*]], ...) #[[ATTR6]] {
2735; CHECK-NEXT:  [[ENTRY:.*:]]
2736; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
2737; CHECK-NEXT:    [[TMP1:%.*]] = add i64 176, [[TMP0]]
2738; CHECK-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
2739; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
2740; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
2741; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
2742; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr @__msan_param_tls, align 8
2743; CHECK-NEXT:    call void @llvm.donothing()
2744; CHECK-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
2745; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[X_ADDR]] to i64
2746; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
2747; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
2748; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP7]], i8 -1, i64 4, i1 false)
2749; CHECK-NEXT:    [[VA:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
2750; CHECK-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[VA]] to i64
2751; CHECK-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 87960930222080
2752; CHECK-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
2753; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP10]], i8 -1, i64 24, i1 false)
2754; CHECK-NEXT:    [[TMP11:%.*]] = ptrtoint ptr [[X_ADDR]] to i64
2755; CHECK-NEXT:    [[TMP12:%.*]] = xor i64 [[TMP11]], 87960930222080
2756; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
2757; CHECK-NEXT:    store i32 [[TMP4]], ptr [[TMP13]], align 4
2758; CHECK-NEXT:    store i32 [[X]], ptr [[X_ADDR]], align 4
2759; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[VA]] to i64
2760; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
2761; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
2762; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP16]], i8 0, i64 24, i1 false)
2763; CHECK-NEXT:    call void @llvm.va_start.p0(ptr [[VA]])
2764; CHECK-NEXT:    [[TMP17:%.*]] = ptrtoint ptr [[VA]] to i64
2765; CHECK-NEXT:    [[TMP18:%.*]] = add i64 [[TMP17]], 16
2766; CHECK-NEXT:    [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
2767; CHECK-NEXT:    [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8
2768; CHECK-NEXT:    [[TMP21:%.*]] = ptrtoint ptr [[TMP20]] to i64
2769; CHECK-NEXT:    [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
2770; CHECK-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
2771; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP23]], ptr align 16 [[TMP2]], i64 176, i1 false)
2772; CHECK-NEXT:    [[TMP24:%.*]] = ptrtoint ptr [[VA]] to i64
2773; CHECK-NEXT:    [[TMP25:%.*]] = add i64 [[TMP24]], 8
2774; CHECK-NEXT:    [[TMP26:%.*]] = inttoptr i64 [[TMP25]] to ptr
2775; CHECK-NEXT:    [[TMP27:%.*]] = load ptr, ptr [[TMP26]], align 8
2776; CHECK-NEXT:    [[TMP28:%.*]] = ptrtoint ptr [[TMP27]] to i64
2777; CHECK-NEXT:    [[TMP29:%.*]] = xor i64 [[TMP28]], 87960930222080
2778; CHECK-NEXT:    [[TMP30:%.*]] = inttoptr i64 [[TMP29]] to ptr
2779; CHECK-NEXT:    [[TMP31:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
2780; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP30]], ptr align 16 [[TMP31]], i64 [[TMP0]], i1 false)
2781; CHECK-NEXT:    ret void
2782;
2783; ORIGIN-LABEL: define void @VAStart(
2784; ORIGIN-SAME: i32 [[X:%.*]], ...) #[[ATTR6]] {
2785; ORIGIN-NEXT:  [[ENTRY:.*:]]
2786; ORIGIN-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
2787; ORIGIN-NEXT:    [[TMP1:%.*]] = add i64 176, [[TMP0]]
2788; ORIGIN-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
2789; ORIGIN-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
2790; ORIGIN-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
2791; ORIGIN-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
2792; ORIGIN-NEXT:    [[TMP4:%.*]] = alloca i8, i64 [[TMP1]], align 8
2793; ORIGIN-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP4]], ptr align 8 @__msan_va_arg_origin_tls, i64 [[TMP3]], i1 false)
2794; ORIGIN-NEXT:    [[TMP5:%.*]] = load i32, ptr @__msan_param_tls, align 8
2795; ORIGIN-NEXT:    [[TMP6:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
2796; ORIGIN-NEXT:    call void @llvm.donothing()
2797; ORIGIN-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
2798; ORIGIN-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[X_ADDR]] to i64
2799; ORIGIN-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
2800; ORIGIN-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
2801; ORIGIN-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 17592186044416
2802; ORIGIN-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
2803; ORIGIN-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
2804; ORIGIN-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP9]], i8 -1, i64 4, i1 false)
2805; ORIGIN-NEXT:    call void @__msan_set_alloca_origin_with_descr(ptr [[X_ADDR]], i64 4, ptr @[[GLOB4:[0-9]+]], ptr @[[GLOB5:[0-9]+]])
2806; ORIGIN-NEXT:    [[VA:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
2807; ORIGIN-NEXT:    [[TMP13:%.*]] = ptrtoint ptr [[VA]] to i64
2808; ORIGIN-NEXT:    [[TMP14:%.*]] = xor i64 [[TMP13]], 87960930222080
2809; ORIGIN-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
2810; ORIGIN-NEXT:    [[TMP16:%.*]] = add i64 [[TMP14]], 17592186044416
2811; ORIGIN-NEXT:    [[TMP17:%.*]] = and i64 [[TMP16]], -4
2812; ORIGIN-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
2813; ORIGIN-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP15]], i8 -1, i64 24, i1 false)
2814; ORIGIN-NEXT:    call void @__msan_set_alloca_origin_with_descr(ptr [[VA]], i64 24, ptr @[[GLOB6:[0-9]+]], ptr @[[GLOB7:[0-9]+]])
2815; ORIGIN-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[X_ADDR]] to i64
2816; ORIGIN-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 87960930222080
2817; ORIGIN-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
2818; ORIGIN-NEXT:    [[TMP22:%.*]] = add i64 [[TMP20]], 17592186044416
2819; ORIGIN-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
2820; ORIGIN-NEXT:    store i32 [[TMP5]], ptr [[TMP21]], align 4
2821; ORIGIN-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP5]], 0
2822; ORIGIN-NEXT:    br i1 [[_MSCMP]], label %[[BB24:.*]], label %[[BB25:.*]], !prof [[PROF1]]
2823; ORIGIN:       [[BB24]]:
2824; ORIGIN-NEXT:    store i32 [[TMP6]], ptr [[TMP23]], align 4
2825; ORIGIN-NEXT:    br label %[[BB25]]
2826; ORIGIN:       [[BB25]]:
2827; ORIGIN-NEXT:    store i32 [[X]], ptr [[X_ADDR]], align 4
2828; ORIGIN-NEXT:    [[TMP26:%.*]] = ptrtoint ptr [[VA]] to i64
2829; ORIGIN-NEXT:    [[TMP27:%.*]] = xor i64 [[TMP26]], 87960930222080
2830; ORIGIN-NEXT:    [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
2831; ORIGIN-NEXT:    [[TMP29:%.*]] = add i64 [[TMP27]], 17592186044416
2832; ORIGIN-NEXT:    [[TMP30:%.*]] = inttoptr i64 [[TMP29]] to ptr
2833; ORIGIN-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP28]], i8 0, i64 24, i1 false)
2834; ORIGIN-NEXT:    call void @llvm.va_start.p0(ptr [[VA]])
2835; ORIGIN-NEXT:    [[TMP31:%.*]] = ptrtoint ptr [[VA]] to i64
2836; ORIGIN-NEXT:    [[TMP32:%.*]] = add i64 [[TMP31]], 16
2837; ORIGIN-NEXT:    [[TMP33:%.*]] = inttoptr i64 [[TMP32]] to ptr
2838; ORIGIN-NEXT:    [[TMP34:%.*]] = load ptr, ptr [[TMP33]], align 8
2839; ORIGIN-NEXT:    [[TMP35:%.*]] = ptrtoint ptr [[TMP34]] to i64
2840; ORIGIN-NEXT:    [[TMP36:%.*]] = xor i64 [[TMP35]], 87960930222080
2841; ORIGIN-NEXT:    [[TMP37:%.*]] = inttoptr i64 [[TMP36]] to ptr
2842; ORIGIN-NEXT:    [[TMP38:%.*]] = add i64 [[TMP36]], 17592186044416
2843; ORIGIN-NEXT:    [[TMP39:%.*]] = inttoptr i64 [[TMP38]] to ptr
2844; ORIGIN-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP37]], ptr align 16 [[TMP2]], i64 176, i1 false)
2845; ORIGIN-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP39]], ptr align 16 [[TMP4]], i64 176, i1 false)
2846; ORIGIN-NEXT:    [[TMP40:%.*]] = ptrtoint ptr [[VA]] to i64
2847; ORIGIN-NEXT:    [[TMP41:%.*]] = add i64 [[TMP40]], 8
2848; ORIGIN-NEXT:    [[TMP42:%.*]] = inttoptr i64 [[TMP41]] to ptr
2849; ORIGIN-NEXT:    [[TMP43:%.*]] = load ptr, ptr [[TMP42]], align 8
2850; ORIGIN-NEXT:    [[TMP44:%.*]] = ptrtoint ptr [[TMP43]] to i64
2851; ORIGIN-NEXT:    [[TMP45:%.*]] = xor i64 [[TMP44]], 87960930222080
2852; ORIGIN-NEXT:    [[TMP46:%.*]] = inttoptr i64 [[TMP45]] to ptr
2853; ORIGIN-NEXT:    [[TMP47:%.*]] = add i64 [[TMP45]], 17592186044416
2854; ORIGIN-NEXT:    [[TMP48:%.*]] = inttoptr i64 [[TMP47]] to ptr
2855; ORIGIN-NEXT:    [[TMP49:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
2856; ORIGIN-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP46]], ptr align 16 [[TMP49]], i64 [[TMP0]], i1 false)
2857; ORIGIN-NEXT:    [[TMP50:%.*]] = getelementptr i8, ptr [[TMP4]], i32 176
2858; ORIGIN-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP48]], ptr align 16 [[TMP50]], i64 [[TMP0]], i1 false)
2859; ORIGIN-NEXT:    ret void
2860;
2861; CALLS-LABEL: define void @VAStart(
2862; CALLS-SAME: i32 [[X:%.*]], ...) #[[ATTR6]] {
2863; CALLS-NEXT:  [[ENTRY:.*:]]
2864; CALLS-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_va_arg_overflow_size_tls, align 8
2865; CALLS-NEXT:    [[TMP1:%.*]] = add i64 176, [[TMP0]]
2866; CALLS-NEXT:    [[TMP2:%.*]] = alloca i8, i64 [[TMP1]], align 8
2867; CALLS-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP2]], i8 0, i64 [[TMP1]], i1 false)
2868; CALLS-NEXT:    [[TMP3:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP1]], i64 800)
2869; CALLS-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP2]], ptr align 8 @__msan_va_arg_tls, i64 [[TMP3]], i1 false)
2870; CALLS-NEXT:    [[TMP4:%.*]] = alloca i8, i64 [[TMP1]], align 8
2871; CALLS-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP4]], ptr align 8 @__msan_va_arg_origin_tls, i64 [[TMP3]], i1 false)
2872; CALLS-NEXT:    [[TMP5:%.*]] = load i32, ptr @__msan_param_tls, align 8
2873; CALLS-NEXT:    [[TMP6:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
2874; CALLS-NEXT:    call void @llvm.donothing()
2875; CALLS-NEXT:    [[X_ADDR:%.*]] = alloca i32, align 4
2876; CALLS-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[X_ADDR]] to i64
2877; CALLS-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
2878; CALLS-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
2879; CALLS-NEXT:    [[TMP10:%.*]] = add i64 [[TMP8]], 17592186044416
2880; CALLS-NEXT:    [[TMP11:%.*]] = and i64 [[TMP10]], -4
2881; CALLS-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
2882; CALLS-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP9]], i8 -1, i64 4, i1 false)
2883; CALLS-NEXT:    call void @__msan_set_alloca_origin_with_descr(ptr [[X_ADDR]], i64 4, ptr @[[GLOB4:[0-9]+]], ptr @[[GLOB5:[0-9]+]])
2884; CALLS-NEXT:    [[VA:%.*]] = alloca [1 x %struct.__va_list_tag], align 16
2885; CALLS-NEXT:    [[TMP13:%.*]] = ptrtoint ptr [[VA]] to i64
2886; CALLS-NEXT:    [[TMP14:%.*]] = xor i64 [[TMP13]], 87960930222080
2887; CALLS-NEXT:    [[TMP15:%.*]] = inttoptr i64 [[TMP14]] to ptr
2888; CALLS-NEXT:    [[TMP16:%.*]] = add i64 [[TMP14]], 17592186044416
2889; CALLS-NEXT:    [[TMP17:%.*]] = and i64 [[TMP16]], -4
2890; CALLS-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
2891; CALLS-NEXT:    call void @llvm.memset.p0.i64(ptr align 16 [[TMP15]], i8 -1, i64 24, i1 false)
2892; CALLS-NEXT:    call void @__msan_set_alloca_origin_with_descr(ptr [[VA]], i64 24, ptr @[[GLOB6:[0-9]+]], ptr @[[GLOB7:[0-9]+]])
2893; CALLS-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[X_ADDR]] to i64
2894; CALLS-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 87960930222080
2895; CALLS-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
2896; CALLS-NEXT:    [[TMP22:%.*]] = add i64 [[TMP20]], 17592186044416
2897; CALLS-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
2898; CALLS-NEXT:    store i32 [[TMP5]], ptr [[TMP21]], align 4
2899; CALLS-NEXT:    call void @__msan_maybe_store_origin_4(i32 zeroext [[TMP5]], ptr [[X_ADDR]], i32 zeroext [[TMP6]])
2900; CALLS-NEXT:    store i32 [[X]], ptr [[X_ADDR]], align 4
2901; CALLS-NEXT:    [[TMP24:%.*]] = ptrtoint ptr [[VA]] to i64
2902; CALLS-NEXT:    [[TMP25:%.*]] = xor i64 [[TMP24]], 87960930222080
2903; CALLS-NEXT:    [[TMP26:%.*]] = inttoptr i64 [[TMP25]] to ptr
2904; CALLS-NEXT:    [[TMP27:%.*]] = add i64 [[TMP25]], 17592186044416
2905; CALLS-NEXT:    [[TMP28:%.*]] = inttoptr i64 [[TMP27]] to ptr
2906; CALLS-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP26]], i8 0, i64 24, i1 false)
2907; CALLS-NEXT:    call void @llvm.va_start.p0(ptr [[VA]])
2908; CALLS-NEXT:    [[TMP29:%.*]] = ptrtoint ptr [[VA]] to i64
2909; CALLS-NEXT:    [[TMP30:%.*]] = add i64 [[TMP29]], 16
2910; CALLS-NEXT:    [[TMP31:%.*]] = inttoptr i64 [[TMP30]] to ptr
2911; CALLS-NEXT:    [[TMP32:%.*]] = load ptr, ptr [[TMP31]], align 8
2912; CALLS-NEXT:    [[TMP33:%.*]] = ptrtoint ptr [[TMP32]] to i64
2913; CALLS-NEXT:    [[TMP34:%.*]] = xor i64 [[TMP33]], 87960930222080
2914; CALLS-NEXT:    [[TMP35:%.*]] = inttoptr i64 [[TMP34]] to ptr
2915; CALLS-NEXT:    [[TMP36:%.*]] = add i64 [[TMP34]], 17592186044416
2916; CALLS-NEXT:    [[TMP37:%.*]] = inttoptr i64 [[TMP36]] to ptr
2917; CALLS-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP35]], ptr align 16 [[TMP2]], i64 176, i1 false)
2918; CALLS-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP37]], ptr align 16 [[TMP4]], i64 176, i1 false)
2919; CALLS-NEXT:    [[TMP38:%.*]] = ptrtoint ptr [[VA]] to i64
2920; CALLS-NEXT:    [[TMP39:%.*]] = add i64 [[TMP38]], 8
2921; CALLS-NEXT:    [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr
2922; CALLS-NEXT:    [[TMP41:%.*]] = load ptr, ptr [[TMP40]], align 8
2923; CALLS-NEXT:    [[TMP42:%.*]] = ptrtoint ptr [[TMP41]] to i64
2924; CALLS-NEXT:    [[TMP43:%.*]] = xor i64 [[TMP42]], 87960930222080
2925; CALLS-NEXT:    [[TMP44:%.*]] = inttoptr i64 [[TMP43]] to ptr
2926; CALLS-NEXT:    [[TMP45:%.*]] = add i64 [[TMP43]], 17592186044416
2927; CALLS-NEXT:    [[TMP46:%.*]] = inttoptr i64 [[TMP45]] to ptr
2928; CALLS-NEXT:    [[TMP47:%.*]] = getelementptr i8, ptr [[TMP2]], i32 176
2929; CALLS-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP44]], ptr align 16 [[TMP47]], i64 [[TMP0]], i1 false)
2930; CALLS-NEXT:    [[TMP48:%.*]] = getelementptr i8, ptr [[TMP4]], i32 176
2931; CALLS-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP46]], ptr align 16 [[TMP48]], i64 [[TMP0]], i1 false)
2932; CALLS-NEXT:    ret void
2933;
2934entry:
2935  %x.addr = alloca i32, align 4
2936  %va = alloca [1 x %struct.__va_list_tag], align 16
2937  store i32 %x, ptr %x.addr, align 4
2938  call void @llvm.va_start(ptr %va)
2939  ret void
2940}
2941
2942
2943
2944; Test handling of volatile stores.
2945; Check that MemorySanitizer does not add a check of the value being stored.
2946
2947define void @VolatileStore(ptr nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
2948; CHECK-LABEL: define void @VolatileStore(
2949; CHECK-SAME: ptr captures(none) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
2950; CHECK-NEXT:  [[ENTRY:.*:]]
2951; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
2952; CHECK-NEXT:    call void @llvm.donothing()
2953; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64
2954; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
2955; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
2956; CHECK-NEXT:    store i32 [[TMP0]], ptr [[TMP3]], align 4
2957; CHECK-NEXT:    store volatile i32 [[X]], ptr [[P]], align 4
2958; CHECK-NEXT:    ret void
2959;
2960; ORIGIN-LABEL: define void @VolatileStore(
2961; ORIGIN-SAME: ptr captures(none) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
2962; ORIGIN-NEXT:  [[ENTRY:.*:]]
2963; ORIGIN-NEXT:    [[TMP0:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
2964; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
2965; ORIGIN-NEXT:    call void @llvm.donothing()
2966; ORIGIN-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
2967; ORIGIN-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
2968; ORIGIN-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
2969; ORIGIN-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
2970; ORIGIN-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
2971; ORIGIN-NEXT:    store i32 [[TMP0]], ptr [[TMP4]], align 4
2972; ORIGIN-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP0]], 0
2973; ORIGIN-NEXT:    br i1 [[_MSCMP]], label %[[BB7:.*]], label %[[BB8:.*]], !prof [[PROF1]]
2974; ORIGIN:       [[BB7]]:
2975; ORIGIN-NEXT:    store i32 [[TMP1]], ptr [[TMP6]], align 4
2976; ORIGIN-NEXT:    br label %[[BB8]]
2977; ORIGIN:       [[BB8]]:
2978; ORIGIN-NEXT:    store volatile i32 [[X]], ptr [[P]], align 4
2979; ORIGIN-NEXT:    ret void
2980;
2981; CALLS-LABEL: define void @VolatileStore(
2982; CALLS-SAME: ptr captures(none) [[P:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
2983; CALLS-NEXT:  [[ENTRY:.*:]]
2984; CALLS-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
2985; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
2986; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
2987; CALLS-NEXT:    [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
2988; CALLS-NEXT:    call void @llvm.donothing()
2989; CALLS-NEXT:    call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]])
2990; CALLS-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P]] to i64
2991; CALLS-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
2992; CALLS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
2993; CALLS-NEXT:    [[TMP7:%.*]] = add i64 [[TMP5]], 17592186044416
2994; CALLS-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
2995; CALLS-NEXT:    store i32 [[TMP2]], ptr [[TMP6]], align 4
2996; CALLS-NEXT:    call void @__msan_maybe_store_origin_4(i32 zeroext [[TMP2]], ptr [[P]], i32 zeroext [[TMP3]])
2997; CALLS-NEXT:    store volatile i32 [[X]], ptr [[P]], align 4
2998; CALLS-NEXT:    ret void
2999;
3000entry:
3001  store volatile i32 %x, ptr %p, align 4
3002  ret void
3003}
3004
3005
3006
3007; Test that checks are omitted and returned value is always initialized if
3008; sanitize_memory attribute is missing.
3009
3010define i32 @NoSanitizeMemory(i32 %x) uwtable {
3011; CHECK-LABEL: define i32 @NoSanitizeMemory(
3012; CHECK-SAME: i32 [[X:%.*]]) #[[ATTR8:[0-9]+]] {
3013; CHECK-NEXT:  [[ENTRY:.*:]]
3014; CHECK-NEXT:    call void @llvm.donothing()
3015; CHECK-NEXT:    [[TMP0:%.*]] = xor i32 [[X]], 0
3016; CHECK-NEXT:    [[TMP1:%.*]] = and i32 -1, [[TMP0]]
3017; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 0
3018; CHECK-NEXT:    [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]]
3019; CHECK-NEXT:    [[TOBOOL:%.*]] = icmp eq i32 [[X]], 0
3020; CHECK-NEXT:    br i1 [[TOBOOL]], label %[[IF_END:.*]], label %[[IF_THEN:.*]]
3021; CHECK:       [[IF_THEN]]:
3022; CHECK-NEXT:    tail call void @bar()
3023; CHECK-NEXT:    br label %[[IF_END]]
3024; CHECK:       [[IF_END]]:
3025; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3026; CHECK-NEXT:    ret i32 [[X]]
3027;
3028; ORIGIN-LABEL: define i32 @NoSanitizeMemory(
3029; ORIGIN-SAME: i32 [[X:%.*]]) #[[ATTR8:[0-9]+]] {
3030; ORIGIN-NEXT:  [[ENTRY:.*:]]
3031; ORIGIN-NEXT:    call void @llvm.donothing()
3032; ORIGIN-NEXT:    [[TMP0:%.*]] = xor i32 [[X]], 0
3033; ORIGIN-NEXT:    [[TMP1:%.*]] = and i32 -1, [[TMP0]]
3034; ORIGIN-NEXT:    [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 0
3035; ORIGIN-NEXT:    [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]]
3036; ORIGIN-NEXT:    [[TOBOOL:%.*]] = icmp eq i32 [[X]], 0
3037; ORIGIN-NEXT:    br i1 [[TOBOOL]], label %[[IF_END:.*]], label %[[IF_THEN:.*]]
3038; ORIGIN:       [[IF_THEN]]:
3039; ORIGIN-NEXT:    tail call void @bar()
3040; ORIGIN-NEXT:    br label %[[IF_END]]
3041; ORIGIN:       [[IF_END]]:
3042; ORIGIN-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3043; ORIGIN-NEXT:    store i32 0, ptr @__msan_retval_origin_tls, align 4
3044; ORIGIN-NEXT:    ret i32 [[X]]
3045;
3046; CALLS-LABEL: define i32 @NoSanitizeMemory(
3047; CALLS-SAME: i32 [[X:%.*]]) #[[ATTR8:[0-9]+]] {
3048; CALLS-NEXT:  [[ENTRY:.*:]]
3049; CALLS-NEXT:    call void @llvm.donothing()
3050; CALLS-NEXT:    [[TMP0:%.*]] = xor i32 [[X]], 0
3051; CALLS-NEXT:    [[TMP1:%.*]] = and i32 -1, [[TMP0]]
3052; CALLS-NEXT:    [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 0
3053; CALLS-NEXT:    [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]]
3054; CALLS-NEXT:    [[TOBOOL:%.*]] = icmp eq i32 [[X]], 0
3055; CALLS-NEXT:    br i1 [[TOBOOL]], label %[[IF_END:.*]], label %[[IF_THEN:.*]]
3056; CALLS:       [[IF_THEN]]:
3057; CALLS-NEXT:    tail call void @bar()
3058; CALLS-NEXT:    br label %[[IF_END]]
3059; CALLS:       [[IF_END]]:
3060; CALLS-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3061; CALLS-NEXT:    store i32 0, ptr @__msan_retval_origin_tls, align 4
3062; CALLS-NEXT:    ret i32 [[X]]
3063;
3064entry:
3065  %tobool = icmp eq i32 %x, 0
3066  br i1 %tobool, label %if.end, label %if.then
3067
3068if.then:                                          ; preds = %entry
3069  tail call void @bar()
3070  br label %if.end
3071
3072if.end:                                           ; preds = %entry, %if.then
3073  ret i32 %x
3074}
3075
3076declare void @bar()
3077
3078
3079
3080; Test that stack allocations are unpoisoned in functions missing
3081; sanitize_memory attribute
3082
3083define i32 @NoSanitizeMemoryAlloca() {
3084; CHECK-LABEL: define i32 @NoSanitizeMemoryAlloca() {
3085; CHECK-NEXT:  [[ENTRY:.*:]]
3086; CHECK-NEXT:    call void @llvm.donothing()
3087; CHECK-NEXT:    [[P:%.*]] = alloca i32, align 4
3088; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[P]] to i64
3089; CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080
3090; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
3091; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP2]], i8 0, i64 4, i1 false)
3092; CHECK-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
3093; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3094; CHECK-NEXT:    [[X:%.*]] = call i32 @NoSanitizeMemoryAllocaHelper(ptr [[P]])
3095; CHECK-NEXT:    [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
3096; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3097; CHECK-NEXT:    ret i32 [[X]]
3098;
3099; ORIGIN-LABEL: define i32 @NoSanitizeMemoryAlloca() {
3100; ORIGIN-NEXT:  [[ENTRY:.*:]]
3101; ORIGIN-NEXT:    call void @llvm.donothing()
3102; ORIGIN-NEXT:    [[P:%.*]] = alloca i32, align 4
3103; ORIGIN-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[P]] to i64
3104; ORIGIN-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080
3105; ORIGIN-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
3106; ORIGIN-NEXT:    [[TMP3:%.*]] = add i64 [[TMP1]], 17592186044416
3107; ORIGIN-NEXT:    [[TMP4:%.*]] = and i64 [[TMP3]], -4
3108; ORIGIN-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
3109; ORIGIN-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP2]], i8 0, i64 4, i1 false)
3110; ORIGIN-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
3111; ORIGIN-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3112; ORIGIN-NEXT:    [[X:%.*]] = call i32 @NoSanitizeMemoryAllocaHelper(ptr [[P]])
3113; ORIGIN-NEXT:    [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
3114; ORIGIN-NEXT:    [[TMP6:%.*]] = load i32, ptr @__msan_retval_origin_tls, align 4
3115; ORIGIN-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3116; ORIGIN-NEXT:    store i32 0, ptr @__msan_retval_origin_tls, align 4
3117; ORIGIN-NEXT:    ret i32 [[X]]
3118;
3119; CALLS-LABEL: define i32 @NoSanitizeMemoryAlloca() {
3120; CALLS-NEXT:  [[ENTRY:.*:]]
3121; CALLS-NEXT:    call void @llvm.donothing()
3122; CALLS-NEXT:    [[P:%.*]] = alloca i32, align 4
3123; CALLS-NEXT:    [[TMP0:%.*]] = ptrtoint ptr [[P]] to i64
3124; CALLS-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080
3125; CALLS-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to ptr
3126; CALLS-NEXT:    [[TMP3:%.*]] = add i64 [[TMP1]], 17592186044416
3127; CALLS-NEXT:    [[TMP4:%.*]] = and i64 [[TMP3]], -4
3128; CALLS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
3129; CALLS-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP2]], i8 0, i64 4, i1 false)
3130; CALLS-NEXT:    store i64 0, ptr @__msan_param_tls, align 8
3131; CALLS-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3132; CALLS-NEXT:    [[X:%.*]] = call i32 @NoSanitizeMemoryAllocaHelper(ptr [[P]])
3133; CALLS-NEXT:    [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
3134; CALLS-NEXT:    [[TMP6:%.*]] = load i32, ptr @__msan_retval_origin_tls, align 4
3135; CALLS-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3136; CALLS-NEXT:    store i32 0, ptr @__msan_retval_origin_tls, align 4
3137; CALLS-NEXT:    ret i32 [[X]]
3138;
3139entry:
3140  %p = alloca i32, align 4
3141  %x = call i32 @NoSanitizeMemoryAllocaHelper(ptr %p)
3142  ret i32 %x
3143}
3144
3145declare i32 @NoSanitizeMemoryAllocaHelper(ptr %p)
3146
3147
3148
3149; Test that undef is unpoisoned in functions missing
3150; sanitize_memory attribute
3151
3152define i32 @NoSanitizeMemoryUndef() {
3153; CHECK-LABEL: define i32 @NoSanitizeMemoryUndef() {
3154; CHECK-NEXT:  [[ENTRY:.*:]]
3155; CHECK-NEXT:    call void @llvm.donothing()
3156; CHECK-NEXT:    store i32 0, ptr @__msan_param_tls, align 8
3157; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3158; CHECK-NEXT:    [[X:%.*]] = call i32 @NoSanitizeMemoryUndefHelper(i32 undef)
3159; CHECK-NEXT:    [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
3160; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3161; CHECK-NEXT:    ret i32 [[X]]
3162;
3163; ORIGIN-LABEL: define i32 @NoSanitizeMemoryUndef() {
3164; ORIGIN-NEXT:  [[ENTRY:.*:]]
3165; ORIGIN-NEXT:    call void @llvm.donothing()
3166; ORIGIN-NEXT:    store i32 0, ptr @__msan_param_tls, align 8
3167; ORIGIN-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3168; ORIGIN-NEXT:    [[X:%.*]] = call i32 @NoSanitizeMemoryUndefHelper(i32 undef)
3169; ORIGIN-NEXT:    [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
3170; ORIGIN-NEXT:    [[TMP0:%.*]] = load i32, ptr @__msan_retval_origin_tls, align 4
3171; ORIGIN-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3172; ORIGIN-NEXT:    store i32 0, ptr @__msan_retval_origin_tls, align 4
3173; ORIGIN-NEXT:    ret i32 [[X]]
3174;
3175; CALLS-LABEL: define i32 @NoSanitizeMemoryUndef() {
3176; CALLS-NEXT:  [[ENTRY:.*:]]
3177; CALLS-NEXT:    call void @llvm.donothing()
3178; CALLS-NEXT:    store i32 0, ptr @__msan_param_tls, align 8
3179; CALLS-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3180; CALLS-NEXT:    [[X:%.*]] = call i32 @NoSanitizeMemoryUndefHelper(i32 undef)
3181; CALLS-NEXT:    [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
3182; CALLS-NEXT:    [[TMP0:%.*]] = load i32, ptr @__msan_retval_origin_tls, align 4
3183; CALLS-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3184; CALLS-NEXT:    store i32 0, ptr @__msan_retval_origin_tls, align 4
3185; CALLS-NEXT:    ret i32 [[X]]
3186;
3187entry:
3188  %x = call i32 @NoSanitizeMemoryUndefHelper(i32 undef)
3189  ret i32 %x
3190}
3191
3192declare i32 @NoSanitizeMemoryUndefHelper(i32 %x)
3193
3194
3195
3196; Test PHINode instrumentation in ignorelisted functions
3197
3198define i32 @NoSanitizeMemoryPHI(i32 %x) {
3199; CHECK-LABEL: define i32 @NoSanitizeMemoryPHI(
3200; CHECK-SAME: i32 [[X:%.*]]) {
3201; CHECK-NEXT:  [[ENTRY:.*:]]
3202; CHECK-NEXT:    call void @llvm.donothing()
3203; CHECK-NEXT:    [[TMP0:%.*]] = xor i32 [[X]], 0
3204; CHECK-NEXT:    [[TMP1:%.*]] = and i32 -1, [[TMP0]]
3205; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 0
3206; CHECK-NEXT:    [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]]
3207; CHECK-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[X]], 0
3208; CHECK-NEXT:    br i1 [[TOBOOL]], label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]]
3209; CHECK:       [[COND_TRUE]]:
3210; CHECK-NEXT:    br label %[[COND_END:.*]]
3211; CHECK:       [[COND_FALSE]]:
3212; CHECK-NEXT:    br label %[[COND_END]]
3213; CHECK:       [[COND_END]]:
3214; CHECK-NEXT:    [[COND:%.*]] = phi i32 [ undef, %[[COND_TRUE]] ], [ undef, %[[COND_FALSE]] ]
3215; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3216; CHECK-NEXT:    ret i32 [[COND]]
3217;
3218; ORIGIN-LABEL: define i32 @NoSanitizeMemoryPHI(
3219; ORIGIN-SAME: i32 [[X:%.*]]) {
3220; ORIGIN-NEXT:  [[ENTRY:.*:]]
3221; ORIGIN-NEXT:    call void @llvm.donothing()
3222; ORIGIN-NEXT:    [[TMP0:%.*]] = xor i32 [[X]], 0
3223; ORIGIN-NEXT:    [[TMP1:%.*]] = and i32 -1, [[TMP0]]
3224; ORIGIN-NEXT:    [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 0
3225; ORIGIN-NEXT:    [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]]
3226; ORIGIN-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[X]], 0
3227; ORIGIN-NEXT:    br i1 [[TOBOOL]], label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]]
3228; ORIGIN:       [[COND_TRUE]]:
3229; ORIGIN-NEXT:    br label %[[COND_END:.*]]
3230; ORIGIN:       [[COND_FALSE]]:
3231; ORIGIN-NEXT:    br label %[[COND_END]]
3232; ORIGIN:       [[COND_END]]:
3233; ORIGIN-NEXT:    [[COND:%.*]] = phi i32 [ undef, %[[COND_TRUE]] ], [ undef, %[[COND_FALSE]] ]
3234; ORIGIN-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3235; ORIGIN-NEXT:    store i32 0, ptr @__msan_retval_origin_tls, align 4
3236; ORIGIN-NEXT:    ret i32 [[COND]]
3237;
3238; CALLS-LABEL: define i32 @NoSanitizeMemoryPHI(
3239; CALLS-SAME: i32 [[X:%.*]]) {
3240; CALLS-NEXT:  [[ENTRY:.*:]]
3241; CALLS-NEXT:    call void @llvm.donothing()
3242; CALLS-NEXT:    [[TMP0:%.*]] = xor i32 [[X]], 0
3243; CALLS-NEXT:    [[TMP1:%.*]] = and i32 -1, [[TMP0]]
3244; CALLS-NEXT:    [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 0
3245; CALLS-NEXT:    [[_MSPROP_ICMP:%.*]] = and i1 false, [[TMP2]]
3246; CALLS-NEXT:    [[TOBOOL:%.*]] = icmp ne i32 [[X]], 0
3247; CALLS-NEXT:    br i1 [[TOBOOL]], label %[[COND_TRUE:.*]], label %[[COND_FALSE:.*]]
3248; CALLS:       [[COND_TRUE]]:
3249; CALLS-NEXT:    br label %[[COND_END:.*]]
3250; CALLS:       [[COND_FALSE]]:
3251; CALLS-NEXT:    br label %[[COND_END]]
3252; CALLS:       [[COND_END]]:
3253; CALLS-NEXT:    [[COND:%.*]] = phi i32 [ undef, %[[COND_TRUE]] ], [ undef, %[[COND_FALSE]] ]
3254; CALLS-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3255; CALLS-NEXT:    store i32 0, ptr @__msan_retval_origin_tls, align 4
3256; CALLS-NEXT:    ret i32 [[COND]]
3257;
3258entry:
3259  %tobool = icmp ne i32 %x, 0
3260  br i1 %tobool, label %cond.true, label %cond.false
3261
3262cond.true:                                        ; preds = %entry
3263  br label %cond.end
3264
3265cond.false:                                       ; preds = %entry
3266  br label %cond.end
3267
3268cond.end:                                         ; preds = %cond.false, %cond.true
3269  %cond = phi i32 [ undef, %cond.true ], [ undef, %cond.false ]
3270  ret i32 %cond
3271}
3272
3273
3274
3275; Test that there are no __msan_param_origin_tls stores when
3276; argument shadow is a compile-time zero constant (which is always the case
3277; in functions missing sanitize_memory attribute).
3278
3279define i32 @NoSanitizeMemoryParamTLS(ptr nocapture readonly %x) {
3280; CHECK-LABEL: define i32 @NoSanitizeMemoryParamTLS(
3281; CHECK-SAME: ptr readonly captures(none) [[X:%.*]]) {
3282; CHECK-NEXT:  [[ENTRY:.*:]]
3283; CHECK-NEXT:    call void @llvm.donothing()
3284; CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[X]], align 4
3285; CHECK-NEXT:    store i32 0, ptr @__msan_param_tls, align 8
3286; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3287; CHECK-NEXT:    [[CALL:%.*]] = tail call i32 @NoSanitizeMemoryParamTLSHelper(i32 [[TMP0]])
3288; CHECK-NEXT:    [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
3289; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3290; CHECK-NEXT:    ret i32 [[CALL]]
3291;
3292; ORIGIN-LABEL: define i32 @NoSanitizeMemoryParamTLS(
3293; ORIGIN-SAME: ptr readonly captures(none) [[X:%.*]]) {
3294; ORIGIN-NEXT:  [[ENTRY:.*:]]
3295; ORIGIN-NEXT:    call void @llvm.donothing()
3296; ORIGIN-NEXT:    [[TMP0:%.*]] = load i32, ptr [[X]], align 4
3297; ORIGIN-NEXT:    store i32 0, ptr @__msan_param_tls, align 8
3298; ORIGIN-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3299; ORIGIN-NEXT:    [[CALL:%.*]] = tail call i32 @NoSanitizeMemoryParamTLSHelper(i32 [[TMP0]])
3300; ORIGIN-NEXT:    [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
3301; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_retval_origin_tls, align 4
3302; ORIGIN-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3303; ORIGIN-NEXT:    store i32 0, ptr @__msan_retval_origin_tls, align 4
3304; ORIGIN-NEXT:    ret i32 [[CALL]]
3305;
3306; CALLS-LABEL: define i32 @NoSanitizeMemoryParamTLS(
3307; CALLS-SAME: ptr readonly captures(none) [[X:%.*]]) {
3308; CALLS-NEXT:  [[ENTRY:.*:]]
3309; CALLS-NEXT:    call void @llvm.donothing()
3310; CALLS-NEXT:    [[TMP0:%.*]] = load i32, ptr [[X]], align 4
3311; CALLS-NEXT:    store i32 0, ptr @__msan_param_tls, align 8
3312; CALLS-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3313; CALLS-NEXT:    [[CALL:%.*]] = tail call i32 @NoSanitizeMemoryParamTLSHelper(i32 [[TMP0]])
3314; CALLS-NEXT:    [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
3315; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_retval_origin_tls, align 4
3316; CALLS-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3317; CALLS-NEXT:    store i32 0, ptr @__msan_retval_origin_tls, align 4
3318; CALLS-NEXT:    ret i32 [[CALL]]
3319;
3320entry:
3321  %0 = load i32, ptr %x, align 4
3322  %call = tail call i32 @NoSanitizeMemoryParamTLSHelper(i32 %0)
3323  ret i32 %call
3324}
3325
3326declare i32 @NoSanitizeMemoryParamTLSHelper(i32 %x)
3327
3328
3329
3330; Test argument shadow alignment
3331
3332define <2 x i64> @ArgumentShadowAlignment(i64 %a, <2 x i64> %b) sanitize_memory {
3333; CHECK-LABEL: define <2 x i64> @ArgumentShadowAlignment(
3334; CHECK-SAME: i64 [[A:%.*]], <2 x i64> [[B:%.*]]) #[[ATTR6]] {
3335; CHECK-NEXT:  [[ENTRY:.*:]]
3336; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
3337; CHECK-NEXT:    call void @llvm.donothing()
3338; CHECK-NEXT:    store <2 x i64> [[TMP0]], ptr @__msan_retval_tls, align 8
3339; CHECK-NEXT:    ret <2 x i64> [[B]]
3340;
3341; ORIGIN-LABEL: define <2 x i64> @ArgumentShadowAlignment(
3342; ORIGIN-SAME: i64 [[A:%.*]], <2 x i64> [[B:%.*]]) #[[ATTR6]] {
3343; ORIGIN-NEXT:  [[ENTRY:.*:]]
3344; ORIGIN-NEXT:    [[TMP0:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
3345; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
3346; ORIGIN-NEXT:    call void @llvm.donothing()
3347; ORIGIN-NEXT:    store <2 x i64> [[TMP0]], ptr @__msan_retval_tls, align 8
3348; ORIGIN-NEXT:    store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
3349; ORIGIN-NEXT:    ret <2 x i64> [[B]]
3350;
3351; CALLS-LABEL: define <2 x i64> @ArgumentShadowAlignment(
3352; CALLS-SAME: i64 [[A:%.*]], <2 x i64> [[B:%.*]]) #[[ATTR6]] {
3353; CALLS-NEXT:  [[ENTRY:.*:]]
3354; CALLS-NEXT:    [[TMP0:%.*]] = load <2 x i64>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
3355; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
3356; CALLS-NEXT:    call void @llvm.donothing()
3357; CALLS-NEXT:    store <2 x i64> [[TMP0]], ptr @__msan_retval_tls, align 8
3358; CALLS-NEXT:    store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
3359; CALLS-NEXT:    ret <2 x i64> [[B]]
3360;
3361entry:
3362  ret <2 x i64> %b
3363}
3364
3365
3366
3367; Test origin propagation for insertvalue
3368
3369define { i64, i32 } @make_pair_64_32(i64 %x, i32 %y) sanitize_memory {
3370; CHECK-LABEL: define { i64, i32 } @make_pair_64_32(
3371; CHECK-SAME: i64 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR6]] {
3372; CHECK-NEXT:  [[ENTRY:.*:]]
3373; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
3374; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
3375; CHECK-NEXT:    call void @llvm.donothing()
3376; CHECK-NEXT:    [[TMP2:%.*]] = insertvalue { i64, i32 } { i64 -1, i32 -1 }, i64 [[TMP0]], 0
3377; CHECK-NEXT:    [[A:%.*]] = insertvalue { i64, i32 } undef, i64 [[X]], 0
3378; CHECK-NEXT:    [[TMP3:%.*]] = insertvalue { i64, i32 } [[TMP2]], i32 [[TMP1]], 1
3379; CHECK-NEXT:    [[B:%.*]] = insertvalue { i64, i32 } [[A]], i32 [[Y]], 1
3380; CHECK-NEXT:    store { i64, i32 } [[TMP3]], ptr @__msan_retval_tls, align 8
3381; CHECK-NEXT:    ret { i64, i32 } [[B]]
3382;
3383; ORIGIN-LABEL: define { i64, i32 } @make_pair_64_32(
3384; ORIGIN-SAME: i64 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR6]] {
3385; ORIGIN-NEXT:  [[ENTRY:.*:]]
3386; ORIGIN-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
3387; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
3388; ORIGIN-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
3389; ORIGIN-NEXT:    [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
3390; ORIGIN-NEXT:    call void @llvm.donothing()
3391; ORIGIN-NEXT:    [[TMP4:%.*]] = insertvalue { i64, i32 } { i64 -1, i32 -1 }, i64 [[TMP0]], 0
3392; ORIGIN-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP0]], 0
3393; ORIGIN-NEXT:    [[TMP6:%.*]] = select i1 [[TMP5]], i32 [[TMP1]], i32 0
3394; ORIGIN-NEXT:    [[A:%.*]] = insertvalue { i64, i32 } undef, i64 [[X]], 0
3395; ORIGIN-NEXT:    [[TMP7:%.*]] = insertvalue { i64, i32 } [[TMP4]], i32 [[TMP2]], 1
3396; ORIGIN-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP2]], 0
3397; ORIGIN-NEXT:    [[TMP9:%.*]] = select i1 [[TMP8]], i32 [[TMP3]], i32 [[TMP6]]
3398; ORIGIN-NEXT:    [[B:%.*]] = insertvalue { i64, i32 } [[A]], i32 [[Y]], 1
3399; ORIGIN-NEXT:    store { i64, i32 } [[TMP7]], ptr @__msan_retval_tls, align 8
3400; ORIGIN-NEXT:    store i32 [[TMP9]], ptr @__msan_retval_origin_tls, align 4
3401; ORIGIN-NEXT:    ret { i64, i32 } [[B]]
3402;
3403; CALLS-LABEL: define { i64, i32 } @make_pair_64_32(
3404; CALLS-SAME: i64 [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR6]] {
3405; CALLS-NEXT:  [[ENTRY:.*:]]
3406; CALLS-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
3407; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
3408; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
3409; CALLS-NEXT:    [[TMP3:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
3410; CALLS-NEXT:    call void @llvm.donothing()
3411; CALLS-NEXT:    [[TMP4:%.*]] = insertvalue { i64, i32 } { i64 -1, i32 -1 }, i64 [[TMP0]], 0
3412; CALLS-NEXT:    [[TMP5:%.*]] = icmp ne i64 [[TMP0]], 0
3413; CALLS-NEXT:    [[TMP6:%.*]] = select i1 [[TMP5]], i32 [[TMP1]], i32 0
3414; CALLS-NEXT:    [[A:%.*]] = insertvalue { i64, i32 } undef, i64 [[X]], 0
3415; CALLS-NEXT:    [[TMP7:%.*]] = insertvalue { i64, i32 } [[TMP4]], i32 [[TMP2]], 1
3416; CALLS-NEXT:    [[TMP8:%.*]] = icmp ne i32 [[TMP2]], 0
3417; CALLS-NEXT:    [[TMP9:%.*]] = select i1 [[TMP8]], i32 [[TMP3]], i32 [[TMP6]]
3418; CALLS-NEXT:    [[B:%.*]] = insertvalue { i64, i32 } [[A]], i32 [[Y]], 1
3419; CALLS-NEXT:    store { i64, i32 } [[TMP7]], ptr @__msan_retval_tls, align 8
3420; CALLS-NEXT:    store i32 [[TMP9]], ptr @__msan_retval_origin_tls, align 4
3421; CALLS-NEXT:    ret { i64, i32 } [[B]]
3422;
3423entry:
3424  %a = insertvalue { i64, i32 } undef, i64 %x, 0
3425  %b = insertvalue { i64, i32 } %a, i32 %y, 1
3426  ret { i64, i32 } %b
3427}
3428
3429; Test shadow propagation for aggregates passed through ellipsis.
3430
3431%struct.StructByVal = type { i32, i32, i32, i32 }
3432
3433declare void @VAArgStructFn(i32 %guard, ...)
3434
3435define void @VAArgStruct(ptr nocapture %s) sanitize_memory {
3436; CHECK-LABEL: define void @VAArgStruct(
3437; CHECK-SAME: ptr captures(none) [[S:%.*]]) #[[ATTR6]] {
3438; CHECK-NEXT:  [[ENTRY:.*:]]
3439; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
3440; CHECK-NEXT:    call void @llvm.donothing()
3441; CHECK-NEXT:    [[AGG_TMP2:%.*]] = alloca [[STRUCT_STRUCTBYVAL:%.*]], align 8
3442; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
3443; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
3444; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
3445; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP3]], i8 -1, i64 16, i1 false)
3446; CHECK-NEXT:    [[AGG_TMP_SROA_0_0_COPYLOAD:%.*]] = load i64, ptr [[S]], align 4
3447; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[S]] to i64
3448; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
3449; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
3450; CHECK-NEXT:    [[_MSLD:%.*]] = load i64, ptr [[TMP6]], align 4
3451; CHECK-NEXT:    [[_MSPROP:%.*]] = or i64 [[TMP0]], 0
3452; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0
3453; CHECK-NEXT:    [[AGG_TMP_SROA_2_0__SROA_IDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCTBYVAL]], ptr [[S]], i64 0, i32 2
3454; CHECK-NEXT:    [[AGG_TMP_SROA_2_0_COPYLOAD:%.*]] = load i64, ptr [[AGG_TMP_SROA_2_0__SROA_IDX]], align 4
3455; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[AGG_TMP_SROA_2_0__SROA_IDX]] to i64
3456; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
3457; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
3458; CHECK-NEXT:    [[_MSLD2:%.*]] = load i64, ptr [[TMP9]], align 4
3459; CHECK-NEXT:    [[TMP10:%.*]] = call ptr @__msan_memcpy(ptr [[AGG_TMP2]], ptr [[S]], i64 16)
3460; CHECK-NEXT:    store i32 -1, ptr @__msan_param_tls, align 8
3461; CHECK-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
3462; CHECK-NEXT:    store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
3463; CHECK-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
3464; CHECK-NEXT:    store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
3465; CHECK-NEXT:    [[TMP11:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
3466; CHECK-NEXT:    [[TMP12:%.*]] = xor i64 [[TMP11]], 87960930222080
3467; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
3468; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP13]], i64 16, i1 false)
3469; CHECK-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
3470; CHECK-NEXT:    store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
3471; CHECK-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
3472; CHECK-NEXT:    store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
3473; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
3474; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
3475; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
3476; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), ptr align 8 [[TMP16]], i64 16, i1 false)
3477; CHECK-NEXT:    store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
3478; CHECK-NEXT:    call void (i32, ...) @VAArgStructFn(i32 undef, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], ptr byval([[STRUCT_STRUCTBYVAL]]) align 8 [[AGG_TMP2]])
3479; CHECK-NEXT:    ret void
3480;
3481; ORIGIN-LABEL: define void @VAArgStruct(
3482; ORIGIN-SAME: ptr captures(none) [[S:%.*]]) #[[ATTR6]] {
3483; ORIGIN-NEXT:  [[ENTRY:.*:]]
3484; ORIGIN-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
3485; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
3486; ORIGIN-NEXT:    call void @llvm.donothing()
3487; ORIGIN-NEXT:    [[AGG_TMP2:%.*]] = alloca [[STRUCT_STRUCTBYVAL:%.*]], align 8
3488; ORIGIN-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
3489; ORIGIN-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
3490; ORIGIN-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
3491; ORIGIN-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
3492; ORIGIN-NEXT:    [[TMP6:%.*]] = and i64 [[TMP5]], -4
3493; ORIGIN-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
3494; ORIGIN-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP4]], i8 -1, i64 16, i1 false)
3495; ORIGIN-NEXT:    call void @__msan_set_alloca_origin_with_descr(ptr [[AGG_TMP2]], i64 16, ptr @[[GLOB8:[0-9]+]], ptr @[[GLOB9:[0-9]+]])
3496; ORIGIN-NEXT:    [[AGG_TMP_SROA_0_0_COPYLOAD:%.*]] = load i64, ptr [[S]], align 4
3497; ORIGIN-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[S]] to i64
3498; ORIGIN-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 87960930222080
3499; ORIGIN-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
3500; ORIGIN-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 17592186044416
3501; ORIGIN-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
3502; ORIGIN-NEXT:    [[_MSLD:%.*]] = load i64, ptr [[TMP10]], align 4
3503; ORIGIN-NEXT:    [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
3504; ORIGIN-NEXT:    [[_MSPROP:%.*]] = or i64 [[TMP0]], 0
3505; ORIGIN-NEXT:    [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0
3506; ORIGIN-NEXT:    [[AGG_TMP_SROA_2_0__SROA_IDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCTBYVAL]], ptr [[S]], i64 0, i32 2
3507; ORIGIN-NEXT:    [[AGG_TMP_SROA_2_0_COPYLOAD:%.*]] = load i64, ptr [[AGG_TMP_SROA_2_0__SROA_IDX]], align 4
3508; ORIGIN-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[AGG_TMP_SROA_2_0__SROA_IDX]] to i64
3509; ORIGIN-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
3510; ORIGIN-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
3511; ORIGIN-NEXT:    [[TMP17:%.*]] = add i64 [[TMP15]], 17592186044416
3512; ORIGIN-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
3513; ORIGIN-NEXT:    [[_MSLD2:%.*]] = load i64, ptr [[TMP16]], align 4
3514; ORIGIN-NEXT:    [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4
3515; ORIGIN-NEXT:    [[TMP20:%.*]] = call ptr @__msan_memcpy(ptr [[AGG_TMP2]], ptr [[S]], i64 16)
3516; ORIGIN-NEXT:    store i32 -1, ptr @__msan_param_tls, align 8
3517; ORIGIN-NEXT:    store i32 0, ptr @__msan_param_origin_tls, align 4
3518; ORIGIN-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
3519; ORIGIN-NEXT:    store i32 [[TMP13]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
3520; ORIGIN-NEXT:    store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
3521; ORIGIN-NEXT:    store i32 [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
3522; ORIGIN-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
3523; ORIGIN-NEXT:    store i32 [[TMP13]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
3524; ORIGIN-NEXT:    store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
3525; ORIGIN-NEXT:    store i32 [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
3526; ORIGIN-NEXT:    [[TMP21:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
3527; ORIGIN-NEXT:    [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
3528; ORIGIN-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
3529; ORIGIN-NEXT:    [[TMP24:%.*]] = add i64 [[TMP22]], 17592186044416
3530; ORIGIN-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
3531; ORIGIN-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP23]], i64 16, i1 false)
3532; ORIGIN-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 40) to ptr), ptr align 4 [[TMP25]], i64 16, i1 false)
3533; ORIGIN-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
3534; ORIGIN-NEXT:    [[TMP26:%.*]] = zext i32 [[TMP13]] to i64
3535; ORIGIN-NEXT:    [[TMP27:%.*]] = shl i64 [[TMP26]], 32
3536; ORIGIN-NEXT:    [[TMP28:%.*]] = or i64 [[TMP26]], [[TMP27]]
3537; ORIGIN-NEXT:    store i64 [[TMP28]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 8) to ptr), align 8
3538; ORIGIN-NEXT:    store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
3539; ORIGIN-NEXT:    [[TMP29:%.*]] = zext i32 [[TMP19]] to i64
3540; ORIGIN-NEXT:    [[TMP30:%.*]] = shl i64 [[TMP29]], 32
3541; ORIGIN-NEXT:    [[TMP31:%.*]] = or i64 [[TMP29]], [[TMP30]]
3542; ORIGIN-NEXT:    store i64 [[TMP31]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 16) to ptr), align 8
3543; ORIGIN-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
3544; ORIGIN-NEXT:    [[TMP32:%.*]] = zext i32 [[TMP13]] to i64
3545; ORIGIN-NEXT:    [[TMP33:%.*]] = shl i64 [[TMP32]], 32
3546; ORIGIN-NEXT:    [[TMP34:%.*]] = or i64 [[TMP32]], [[TMP33]]
3547; ORIGIN-NEXT:    store i64 [[TMP34]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 24) to ptr), align 8
3548; ORIGIN-NEXT:    store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
3549; ORIGIN-NEXT:    [[TMP35:%.*]] = zext i32 [[TMP19]] to i64
3550; ORIGIN-NEXT:    [[TMP36:%.*]] = shl i64 [[TMP35]], 32
3551; ORIGIN-NEXT:    [[TMP37:%.*]] = or i64 [[TMP35]], [[TMP36]]
3552; ORIGIN-NEXT:    store i64 [[TMP37]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 32) to ptr), align 8
3553; ORIGIN-NEXT:    [[TMP38:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
3554; ORIGIN-NEXT:    [[TMP39:%.*]] = xor i64 [[TMP38]], 87960930222080
3555; ORIGIN-NEXT:    [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr
3556; ORIGIN-NEXT:    [[TMP41:%.*]] = add i64 [[TMP39]], 17592186044416
3557; ORIGIN-NEXT:    [[TMP42:%.*]] = inttoptr i64 [[TMP41]] to ptr
3558; ORIGIN-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), ptr align 8 [[TMP40]], i64 16, i1 false)
3559; ORIGIN-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 176) to ptr), ptr align 8 [[TMP42]], i64 16, i1 false)
3560; ORIGIN-NEXT:    store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
3561; ORIGIN-NEXT:    call void (i32, ...) @VAArgStructFn(i32 undef, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], ptr byval([[STRUCT_STRUCTBYVAL]]) align 8 [[AGG_TMP2]])
3562; ORIGIN-NEXT:    ret void
3563;
3564; CALLS-LABEL: define void @VAArgStruct(
3565; CALLS-SAME: ptr captures(none) [[S:%.*]]) #[[ATTR6]] {
3566; CALLS-NEXT:  [[ENTRY:.*:]]
3567; CALLS-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
3568; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
3569; CALLS-NEXT:    call void @llvm.donothing()
3570; CALLS-NEXT:    [[AGG_TMP2:%.*]] = alloca [[STRUCT_STRUCTBYVAL:%.*]], align 8
3571; CALLS-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
3572; CALLS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
3573; CALLS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
3574; CALLS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
3575; CALLS-NEXT:    [[TMP6:%.*]] = and i64 [[TMP5]], -4
3576; CALLS-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
3577; CALLS-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP4]], i8 -1, i64 16, i1 false)
3578; CALLS-NEXT:    call void @__msan_set_alloca_origin_with_descr(ptr [[AGG_TMP2]], i64 16, ptr @[[GLOB8:[0-9]+]], ptr @[[GLOB9:[0-9]+]])
3579; CALLS-NEXT:    call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]])
3580; CALLS-NEXT:    [[AGG_TMP_SROA_0_0_COPYLOAD:%.*]] = load i64, ptr [[S]], align 4
3581; CALLS-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[S]] to i64
3582; CALLS-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 87960930222080
3583; CALLS-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
3584; CALLS-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 17592186044416
3585; CALLS-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
3586; CALLS-NEXT:    [[_MSLD:%.*]] = load i64, ptr [[TMP10]], align 4
3587; CALLS-NEXT:    [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
3588; CALLS-NEXT:    [[_MSPROP:%.*]] = or i64 [[TMP0]], 0
3589; CALLS-NEXT:    [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0
3590; CALLS-NEXT:    [[AGG_TMP_SROA_2_0__SROA_IDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCTBYVAL]], ptr [[S]], i64 0, i32 2
3591; CALLS-NEXT:    call void @__msan_maybe_warning_8(i64 zeroext [[_MSPROP1]], i32 zeroext [[TMP1]])
3592; CALLS-NEXT:    [[AGG_TMP_SROA_2_0_COPYLOAD:%.*]] = load i64, ptr [[AGG_TMP_SROA_2_0__SROA_IDX]], align 4
3593; CALLS-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[AGG_TMP_SROA_2_0__SROA_IDX]] to i64
3594; CALLS-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
3595; CALLS-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
3596; CALLS-NEXT:    [[TMP17:%.*]] = add i64 [[TMP15]], 17592186044416
3597; CALLS-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
3598; CALLS-NEXT:    [[_MSLD2:%.*]] = load i64, ptr [[TMP16]], align 4
3599; CALLS-NEXT:    [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4
3600; CALLS-NEXT:    [[TMP20:%.*]] = call ptr @__msan_memcpy(ptr [[AGG_TMP2]], ptr [[S]], i64 16)
3601; CALLS-NEXT:    store i32 -1, ptr @__msan_param_tls, align 8
3602; CALLS-NEXT:    store i32 0, ptr @__msan_param_origin_tls, align 4
3603; CALLS-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
3604; CALLS-NEXT:    store i32 [[TMP13]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
3605; CALLS-NEXT:    store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
3606; CALLS-NEXT:    store i32 [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
3607; CALLS-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
3608; CALLS-NEXT:    store i32 [[TMP13]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
3609; CALLS-NEXT:    store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
3610; CALLS-NEXT:    store i32 [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
3611; CALLS-NEXT:    [[TMP21:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
3612; CALLS-NEXT:    [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
3613; CALLS-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
3614; CALLS-NEXT:    [[TMP24:%.*]] = add i64 [[TMP22]], 17592186044416
3615; CALLS-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
3616; CALLS-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP23]], i64 16, i1 false)
3617; CALLS-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 40) to ptr), ptr align 4 [[TMP25]], i64 16, i1 false)
3618; CALLS-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
3619; CALLS-NEXT:    [[TMP26:%.*]] = zext i32 [[TMP13]] to i64
3620; CALLS-NEXT:    [[TMP27:%.*]] = shl i64 [[TMP26]], 32
3621; CALLS-NEXT:    [[TMP28:%.*]] = or i64 [[TMP26]], [[TMP27]]
3622; CALLS-NEXT:    store i64 [[TMP28]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 8) to ptr), align 8
3623; CALLS-NEXT:    store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
3624; CALLS-NEXT:    [[TMP29:%.*]] = zext i32 [[TMP19]] to i64
3625; CALLS-NEXT:    [[TMP30:%.*]] = shl i64 [[TMP29]], 32
3626; CALLS-NEXT:    [[TMP31:%.*]] = or i64 [[TMP29]], [[TMP30]]
3627; CALLS-NEXT:    store i64 [[TMP31]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 16) to ptr), align 8
3628; CALLS-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
3629; CALLS-NEXT:    [[TMP32:%.*]] = zext i32 [[TMP13]] to i64
3630; CALLS-NEXT:    [[TMP33:%.*]] = shl i64 [[TMP32]], 32
3631; CALLS-NEXT:    [[TMP34:%.*]] = or i64 [[TMP32]], [[TMP33]]
3632; CALLS-NEXT:    store i64 [[TMP34]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 24) to ptr), align 8
3633; CALLS-NEXT:    store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
3634; CALLS-NEXT:    [[TMP35:%.*]] = zext i32 [[TMP19]] to i64
3635; CALLS-NEXT:    [[TMP36:%.*]] = shl i64 [[TMP35]], 32
3636; CALLS-NEXT:    [[TMP37:%.*]] = or i64 [[TMP35]], [[TMP36]]
3637; CALLS-NEXT:    store i64 [[TMP37]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 32) to ptr), align 8
3638; CALLS-NEXT:    [[TMP38:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
3639; CALLS-NEXT:    [[TMP39:%.*]] = xor i64 [[TMP38]], 87960930222080
3640; CALLS-NEXT:    [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr
3641; CALLS-NEXT:    [[TMP41:%.*]] = add i64 [[TMP39]], 17592186044416
3642; CALLS-NEXT:    [[TMP42:%.*]] = inttoptr i64 [[TMP41]] to ptr
3643; CALLS-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 176) to ptr), ptr align 8 [[TMP40]], i64 16, i1 false)
3644; CALLS-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 176) to ptr), ptr align 8 [[TMP42]], i64 16, i1 false)
3645; CALLS-NEXT:    store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
3646; CALLS-NEXT:    call void (i32, ...) @VAArgStructFn(i32 undef, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], ptr byval([[STRUCT_STRUCTBYVAL]]) align 8 [[AGG_TMP2]])
3647; CALLS-NEXT:    ret void
3648;
3649entry:
3650  %agg.tmp2 = alloca %struct.StructByVal, align 8
3651  %agg.tmp.sroa.0.0.copyload = load i64, ptr %s, align 4
3652  %agg.tmp.sroa.2.0..sroa_idx = getelementptr inbounds %struct.StructByVal, ptr %s, i64 0, i32 2
3653  %agg.tmp.sroa.2.0.copyload = load i64, ptr %agg.tmp.sroa.2.0..sroa_idx, align 4
3654  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %agg.tmp2, ptr align 4 %s, i64 16, i1 false)
3655  call void (i32, ...) @VAArgStructFn(i32 undef, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, ptr byval(%struct.StructByVal) align 8 %agg.tmp2)
3656  ret void
3657}
3658
3659
3660; Same code compiled without SSE (see attributes below).
3661; The register save area is only 48 bytes instead of 176.
3662define void @VAArgStructNoSSE(ptr nocapture %s) sanitize_memory #0 {
3663; CHECK-LABEL: define void @VAArgStructNoSSE(
3664; CHECK-SAME: ptr captures(none) [[S:%.*]]) #[[ATTR9:[0-9]+]] {
3665; CHECK-NEXT:  [[ENTRY:.*:]]
3666; CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
3667; CHECK-NEXT:    call void @llvm.donothing()
3668; CHECK-NEXT:    [[AGG_TMP2:%.*]] = alloca [[STRUCT_STRUCTBYVAL:%.*]], align 8
3669; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
3670; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
3671; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
3672; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP3]], i8 -1, i64 16, i1 false)
3673; CHECK-NEXT:    [[AGG_TMP_SROA_0_0_COPYLOAD:%.*]] = load i64, ptr [[S]], align 4
3674; CHECK-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[S]] to i64
3675; CHECK-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
3676; CHECK-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
3677; CHECK-NEXT:    [[_MSLD:%.*]] = load i64, ptr [[TMP6]], align 4
3678; CHECK-NEXT:    [[_MSPROP:%.*]] = or i64 [[TMP0]], 0
3679; CHECK-NEXT:    [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0
3680; CHECK-NEXT:    [[AGG_TMP_SROA_2_0__SROA_IDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCTBYVAL]], ptr [[S]], i64 0, i32 2
3681; CHECK-NEXT:    [[AGG_TMP_SROA_2_0_COPYLOAD:%.*]] = load i64, ptr [[AGG_TMP_SROA_2_0__SROA_IDX]], align 4
3682; CHECK-NEXT:    [[TMP7:%.*]] = ptrtoint ptr [[AGG_TMP_SROA_2_0__SROA_IDX]] to i64
3683; CHECK-NEXT:    [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
3684; CHECK-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
3685; CHECK-NEXT:    [[_MSLD2:%.*]] = load i64, ptr [[TMP9]], align 4
3686; CHECK-NEXT:    [[TMP10:%.*]] = call ptr @__msan_memcpy(ptr [[AGG_TMP2]], ptr [[S]], i64 16)
3687; CHECK-NEXT:    store i32 -1, ptr @__msan_param_tls, align 8
3688; CHECK-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
3689; CHECK-NEXT:    store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
3690; CHECK-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
3691; CHECK-NEXT:    store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
3692; CHECK-NEXT:    [[TMP11:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
3693; CHECK-NEXT:    [[TMP12:%.*]] = xor i64 [[TMP11]], 87960930222080
3694; CHECK-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
3695; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP13]], i64 16, i1 false)
3696; CHECK-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
3697; CHECK-NEXT:    store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
3698; CHECK-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
3699; CHECK-NEXT:    store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
3700; CHECK-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
3701; CHECK-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
3702; CHECK-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
3703; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 48) to ptr), ptr align 8 [[TMP16]], i64 16, i1 false)
3704; CHECK-NEXT:    store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
3705; CHECK-NEXT:    call void (i32, ...) @VAArgStructFn(i32 undef, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], ptr byval([[STRUCT_STRUCTBYVAL]]) align 8 [[AGG_TMP2]])
3706; CHECK-NEXT:    ret void
3707;
3708; ORIGIN-LABEL: define void @VAArgStructNoSSE(
3709; ORIGIN-SAME: ptr captures(none) [[S:%.*]]) #[[ATTR9:[0-9]+]] {
3710; ORIGIN-NEXT:  [[ENTRY:.*:]]
3711; ORIGIN-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
3712; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
3713; ORIGIN-NEXT:    call void @llvm.donothing()
3714; ORIGIN-NEXT:    [[AGG_TMP2:%.*]] = alloca [[STRUCT_STRUCTBYVAL:%.*]], align 8
3715; ORIGIN-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
3716; ORIGIN-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
3717; ORIGIN-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
3718; ORIGIN-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
3719; ORIGIN-NEXT:    [[TMP6:%.*]] = and i64 [[TMP5]], -4
3720; ORIGIN-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
3721; ORIGIN-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP4]], i8 -1, i64 16, i1 false)
3722; ORIGIN-NEXT:    call void @__msan_set_alloca_origin_with_descr(ptr [[AGG_TMP2]], i64 16, ptr @[[GLOB10:[0-9]+]], ptr @[[GLOB11:[0-9]+]])
3723; ORIGIN-NEXT:    [[AGG_TMP_SROA_0_0_COPYLOAD:%.*]] = load i64, ptr [[S]], align 4
3724; ORIGIN-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[S]] to i64
3725; ORIGIN-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 87960930222080
3726; ORIGIN-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
3727; ORIGIN-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 17592186044416
3728; ORIGIN-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
3729; ORIGIN-NEXT:    [[_MSLD:%.*]] = load i64, ptr [[TMP10]], align 4
3730; ORIGIN-NEXT:    [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
3731; ORIGIN-NEXT:    [[_MSPROP:%.*]] = or i64 [[TMP0]], 0
3732; ORIGIN-NEXT:    [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0
3733; ORIGIN-NEXT:    [[AGG_TMP_SROA_2_0__SROA_IDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCTBYVAL]], ptr [[S]], i64 0, i32 2
3734; ORIGIN-NEXT:    [[AGG_TMP_SROA_2_0_COPYLOAD:%.*]] = load i64, ptr [[AGG_TMP_SROA_2_0__SROA_IDX]], align 4
3735; ORIGIN-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[AGG_TMP_SROA_2_0__SROA_IDX]] to i64
3736; ORIGIN-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
3737; ORIGIN-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
3738; ORIGIN-NEXT:    [[TMP17:%.*]] = add i64 [[TMP15]], 17592186044416
3739; ORIGIN-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
3740; ORIGIN-NEXT:    [[_MSLD2:%.*]] = load i64, ptr [[TMP16]], align 4
3741; ORIGIN-NEXT:    [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4
3742; ORIGIN-NEXT:    [[TMP20:%.*]] = call ptr @__msan_memcpy(ptr [[AGG_TMP2]], ptr [[S]], i64 16)
3743; ORIGIN-NEXT:    store i32 -1, ptr @__msan_param_tls, align 8
3744; ORIGIN-NEXT:    store i32 0, ptr @__msan_param_origin_tls, align 4
3745; ORIGIN-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
3746; ORIGIN-NEXT:    store i32 [[TMP13]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
3747; ORIGIN-NEXT:    store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
3748; ORIGIN-NEXT:    store i32 [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
3749; ORIGIN-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
3750; ORIGIN-NEXT:    store i32 [[TMP13]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
3751; ORIGIN-NEXT:    store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
3752; ORIGIN-NEXT:    store i32 [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
3753; ORIGIN-NEXT:    [[TMP21:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
3754; ORIGIN-NEXT:    [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
3755; ORIGIN-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
3756; ORIGIN-NEXT:    [[TMP24:%.*]] = add i64 [[TMP22]], 17592186044416
3757; ORIGIN-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
3758; ORIGIN-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP23]], i64 16, i1 false)
3759; ORIGIN-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 40) to ptr), ptr align 4 [[TMP25]], i64 16, i1 false)
3760; ORIGIN-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
3761; ORIGIN-NEXT:    [[TMP26:%.*]] = zext i32 [[TMP13]] to i64
3762; ORIGIN-NEXT:    [[TMP27:%.*]] = shl i64 [[TMP26]], 32
3763; ORIGIN-NEXT:    [[TMP28:%.*]] = or i64 [[TMP26]], [[TMP27]]
3764; ORIGIN-NEXT:    store i64 [[TMP28]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 8) to ptr), align 8
3765; ORIGIN-NEXT:    store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
3766; ORIGIN-NEXT:    [[TMP29:%.*]] = zext i32 [[TMP19]] to i64
3767; ORIGIN-NEXT:    [[TMP30:%.*]] = shl i64 [[TMP29]], 32
3768; ORIGIN-NEXT:    [[TMP31:%.*]] = or i64 [[TMP29]], [[TMP30]]
3769; ORIGIN-NEXT:    store i64 [[TMP31]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 16) to ptr), align 8
3770; ORIGIN-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
3771; ORIGIN-NEXT:    [[TMP32:%.*]] = zext i32 [[TMP13]] to i64
3772; ORIGIN-NEXT:    [[TMP33:%.*]] = shl i64 [[TMP32]], 32
3773; ORIGIN-NEXT:    [[TMP34:%.*]] = or i64 [[TMP32]], [[TMP33]]
3774; ORIGIN-NEXT:    store i64 [[TMP34]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 24) to ptr), align 8
3775; ORIGIN-NEXT:    store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
3776; ORIGIN-NEXT:    [[TMP35:%.*]] = zext i32 [[TMP19]] to i64
3777; ORIGIN-NEXT:    [[TMP36:%.*]] = shl i64 [[TMP35]], 32
3778; ORIGIN-NEXT:    [[TMP37:%.*]] = or i64 [[TMP35]], [[TMP36]]
3779; ORIGIN-NEXT:    store i64 [[TMP37]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 32) to ptr), align 8
3780; ORIGIN-NEXT:    [[TMP38:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
3781; ORIGIN-NEXT:    [[TMP39:%.*]] = xor i64 [[TMP38]], 87960930222080
3782; ORIGIN-NEXT:    [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr
3783; ORIGIN-NEXT:    [[TMP41:%.*]] = add i64 [[TMP39]], 17592186044416
3784; ORIGIN-NEXT:    [[TMP42:%.*]] = inttoptr i64 [[TMP41]] to ptr
3785; ORIGIN-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 48) to ptr), ptr align 8 [[TMP40]], i64 16, i1 false)
3786; ORIGIN-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 48) to ptr), ptr align 8 [[TMP42]], i64 16, i1 false)
3787; ORIGIN-NEXT:    store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
3788; ORIGIN-NEXT:    call void (i32, ...) @VAArgStructFn(i32 undef, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], ptr byval([[STRUCT_STRUCTBYVAL]]) align 8 [[AGG_TMP2]])
3789; ORIGIN-NEXT:    ret void
3790;
3791; CALLS-LABEL: define void @VAArgStructNoSSE(
3792; CALLS-SAME: ptr captures(none) [[S:%.*]]) #[[ATTR9:[0-9]+]] {
3793; CALLS-NEXT:  [[ENTRY:.*:]]
3794; CALLS-NEXT:    [[TMP0:%.*]] = load i64, ptr @__msan_param_tls, align 8
3795; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
3796; CALLS-NEXT:    call void @llvm.donothing()
3797; CALLS-NEXT:    [[AGG_TMP2:%.*]] = alloca [[STRUCT_STRUCTBYVAL:%.*]], align 8
3798; CALLS-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
3799; CALLS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
3800; CALLS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
3801; CALLS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
3802; CALLS-NEXT:    [[TMP6:%.*]] = and i64 [[TMP5]], -4
3803; CALLS-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
3804; CALLS-NEXT:    call void @llvm.memset.p0.i64(ptr align 8 [[TMP4]], i8 -1, i64 16, i1 false)
3805; CALLS-NEXT:    call void @__msan_set_alloca_origin_with_descr(ptr [[AGG_TMP2]], i64 16, ptr @[[GLOB10:[0-9]+]], ptr @[[GLOB11:[0-9]+]])
3806; CALLS-NEXT:    call void @__msan_maybe_warning_8(i64 zeroext [[TMP0]], i32 zeroext [[TMP1]])
3807; CALLS-NEXT:    [[AGG_TMP_SROA_0_0_COPYLOAD:%.*]] = load i64, ptr [[S]], align 4
3808; CALLS-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[S]] to i64
3809; CALLS-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 87960930222080
3810; CALLS-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
3811; CALLS-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 17592186044416
3812; CALLS-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
3813; CALLS-NEXT:    [[_MSLD:%.*]] = load i64, ptr [[TMP10]], align 4
3814; CALLS-NEXT:    [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4
3815; CALLS-NEXT:    [[_MSPROP:%.*]] = or i64 [[TMP0]], 0
3816; CALLS-NEXT:    [[_MSPROP1:%.*]] = or i64 [[_MSPROP]], 0
3817; CALLS-NEXT:    [[AGG_TMP_SROA_2_0__SROA_IDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCTBYVAL]], ptr [[S]], i64 0, i32 2
3818; CALLS-NEXT:    call void @__msan_maybe_warning_8(i64 zeroext [[_MSPROP1]], i32 zeroext [[TMP1]])
3819; CALLS-NEXT:    [[AGG_TMP_SROA_2_0_COPYLOAD:%.*]] = load i64, ptr [[AGG_TMP_SROA_2_0__SROA_IDX]], align 4
3820; CALLS-NEXT:    [[TMP14:%.*]] = ptrtoint ptr [[AGG_TMP_SROA_2_0__SROA_IDX]] to i64
3821; CALLS-NEXT:    [[TMP15:%.*]] = xor i64 [[TMP14]], 87960930222080
3822; CALLS-NEXT:    [[TMP16:%.*]] = inttoptr i64 [[TMP15]] to ptr
3823; CALLS-NEXT:    [[TMP17:%.*]] = add i64 [[TMP15]], 17592186044416
3824; CALLS-NEXT:    [[TMP18:%.*]] = inttoptr i64 [[TMP17]] to ptr
3825; CALLS-NEXT:    [[_MSLD2:%.*]] = load i64, ptr [[TMP16]], align 4
3826; CALLS-NEXT:    [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4
3827; CALLS-NEXT:    [[TMP20:%.*]] = call ptr @__msan_memcpy(ptr [[AGG_TMP2]], ptr [[S]], i64 16)
3828; CALLS-NEXT:    store i32 -1, ptr @__msan_param_tls, align 8
3829; CALLS-NEXT:    store i32 0, ptr @__msan_param_origin_tls, align 4
3830; CALLS-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
3831; CALLS-NEXT:    store i32 [[TMP13]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
3832; CALLS-NEXT:    store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
3833; CALLS-NEXT:    store i32 [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 16) to ptr), align 4
3834; CALLS-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
3835; CALLS-NEXT:    store i32 [[TMP13]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 24) to ptr), align 4
3836; CALLS-NEXT:    store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
3837; CALLS-NEXT:    store i32 [[TMP19]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 32) to ptr), align 4
3838; CALLS-NEXT:    [[TMP21:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
3839; CALLS-NEXT:    [[TMP22:%.*]] = xor i64 [[TMP21]], 87960930222080
3840; CALLS-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
3841; CALLS-NEXT:    [[TMP24:%.*]] = add i64 [[TMP22]], 17592186044416
3842; CALLS-NEXT:    [[TMP25:%.*]] = inttoptr i64 [[TMP24]] to ptr
3843; CALLS-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), ptr align 8 [[TMP23]], i64 16, i1 false)
3844; CALLS-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 4 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 40) to ptr), ptr align 4 [[TMP25]], i64 16, i1 false)
3845; CALLS-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
3846; CALLS-NEXT:    [[TMP26:%.*]] = zext i32 [[TMP13]] to i64
3847; CALLS-NEXT:    [[TMP27:%.*]] = shl i64 [[TMP26]], 32
3848; CALLS-NEXT:    [[TMP28:%.*]] = or i64 [[TMP26]], [[TMP27]]
3849; CALLS-NEXT:    store i64 [[TMP28]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 8) to ptr), align 8
3850; CALLS-NEXT:    store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
3851; CALLS-NEXT:    [[TMP29:%.*]] = zext i32 [[TMP19]] to i64
3852; CALLS-NEXT:    [[TMP30:%.*]] = shl i64 [[TMP29]], 32
3853; CALLS-NEXT:    [[TMP31:%.*]] = or i64 [[TMP29]], [[TMP30]]
3854; CALLS-NEXT:    store i64 [[TMP31]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 16) to ptr), align 8
3855; CALLS-NEXT:    store i64 [[_MSLD]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 24) to ptr), align 8
3856; CALLS-NEXT:    [[TMP32:%.*]] = zext i32 [[TMP13]] to i64
3857; CALLS-NEXT:    [[TMP33:%.*]] = shl i64 [[TMP32]], 32
3858; CALLS-NEXT:    [[TMP34:%.*]] = or i64 [[TMP32]], [[TMP33]]
3859; CALLS-NEXT:    store i64 [[TMP34]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 24) to ptr), align 8
3860; CALLS-NEXT:    store i64 [[_MSLD2]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 32) to ptr), align 8
3861; CALLS-NEXT:    [[TMP35:%.*]] = zext i32 [[TMP19]] to i64
3862; CALLS-NEXT:    [[TMP36:%.*]] = shl i64 [[TMP35]], 32
3863; CALLS-NEXT:    [[TMP37:%.*]] = or i64 [[TMP35]], [[TMP36]]
3864; CALLS-NEXT:    store i64 [[TMP37]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 32) to ptr), align 8
3865; CALLS-NEXT:    [[TMP38:%.*]] = ptrtoint ptr [[AGG_TMP2]] to i64
3866; CALLS-NEXT:    [[TMP39:%.*]] = xor i64 [[TMP38]], 87960930222080
3867; CALLS-NEXT:    [[TMP40:%.*]] = inttoptr i64 [[TMP39]] to ptr
3868; CALLS-NEXT:    [[TMP41:%.*]] = add i64 [[TMP39]], 17592186044416
3869; CALLS-NEXT:    [[TMP42:%.*]] = inttoptr i64 [[TMP41]] to ptr
3870; CALLS-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 48) to ptr), ptr align 8 [[TMP40]], i64 16, i1 false)
3871; CALLS-NEXT:    call void @llvm.memcpy.p0.p0.i64(ptr align 8 inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_origin_tls to i64), i64 48) to ptr), ptr align 8 [[TMP42]], i64 16, i1 false)
3872; CALLS-NEXT:    store i64 16, ptr @__msan_va_arg_overflow_size_tls, align 8
3873; CALLS-NEXT:    call void (i32, ...) @VAArgStructFn(i32 undef, i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], i64 [[AGG_TMP_SROA_0_0_COPYLOAD]], i64 [[AGG_TMP_SROA_2_0_COPYLOAD]], ptr byval([[STRUCT_STRUCTBYVAL]]) align 8 [[AGG_TMP2]])
3874; CALLS-NEXT:    ret void
3875;
3876entry:
3877  %agg.tmp2 = alloca %struct.StructByVal, align 8
3878  %agg.tmp.sroa.0.0.copyload = load i64, ptr %s, align 4
3879  %agg.tmp.sroa.2.0..sroa_idx = getelementptr inbounds %struct.StructByVal, ptr %s, i64 0, i32 2
3880  %agg.tmp.sroa.2.0.copyload = load i64, ptr %agg.tmp.sroa.2.0..sroa_idx, align 4
3881  call void @llvm.memcpy.p0.p0.i64(ptr align 4 %agg.tmp2, ptr align 4 %s, i64 16, i1 false)
3882  call void (i32, ...) @VAArgStructFn(i32 undef, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, ptr byval(%struct.StructByVal) align 8 %agg.tmp2)
3883  ret void
3884}
3885
3886attributes #0 = { "target-features"="+fxsr,+x87,-sse" }
3887
3888
3889declare i32 @InnerTailCall(i32 %a)
3890
3891define void @MismatchedReturnTypeTailCall(i32 %a) sanitize_memory {
3892; CHECK-LABEL: define void @MismatchedReturnTypeTailCall(
3893; CHECK-SAME: i32 [[A:%.*]]) #[[ATTR6]] {
3894; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
3895; CHECK-NEXT:    call void @llvm.donothing()
3896; CHECK-NEXT:    store i32 [[TMP1]], ptr @__msan_param_tls, align 8
3897; CHECK-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3898; CHECK-NEXT:    [[B:%.*]] = tail call i32 @InnerTailCall(i32 [[A]])
3899; CHECK-NEXT:    [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
3900; CHECK-NEXT:    ret void
3901;
3902; ORIGIN-LABEL: define void @MismatchedReturnTypeTailCall(
3903; ORIGIN-SAME: i32 [[A:%.*]]) #[[ATTR6]] {
3904; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
3905; ORIGIN-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
3906; ORIGIN-NEXT:    call void @llvm.donothing()
3907; ORIGIN-NEXT:    store i32 [[TMP1]], ptr @__msan_param_tls, align 8
3908; ORIGIN-NEXT:    store i32 [[TMP2]], ptr @__msan_param_origin_tls, align 4
3909; ORIGIN-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3910; ORIGIN-NEXT:    [[B:%.*]] = tail call i32 @InnerTailCall(i32 [[A]])
3911; ORIGIN-NEXT:    [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
3912; ORIGIN-NEXT:    [[TMP3:%.*]] = load i32, ptr @__msan_retval_origin_tls, align 4
3913; ORIGIN-NEXT:    ret void
3914;
3915; CALLS-LABEL: define void @MismatchedReturnTypeTailCall(
3916; CALLS-SAME: i32 [[A:%.*]]) #[[ATTR6]] {
3917; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
3918; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
3919; CALLS-NEXT:    call void @llvm.donothing()
3920; CALLS-NEXT:    store i32 [[TMP1]], ptr @__msan_param_tls, align 8
3921; CALLS-NEXT:    store i32 [[TMP2]], ptr @__msan_param_origin_tls, align 4
3922; CALLS-NEXT:    store i32 0, ptr @__msan_retval_tls, align 8
3923; CALLS-NEXT:    [[B:%.*]] = tail call i32 @InnerTailCall(i32 [[A]])
3924; CALLS-NEXT:    [[_MSRET:%.*]] = load i32, ptr @__msan_retval_tls, align 8
3925; CALLS-NEXT:    [[TMP3:%.*]] = load i32, ptr @__msan_retval_origin_tls, align 4
3926; CALLS-NEXT:    ret void
3927;
3928  %b = tail call i32 @InnerTailCall(i32 %a)
3929  ret void
3930}
3931
3932; We used to strip off the 'tail' modifier, but now that we unpoison return slot
3933; shadow before the call, we don't need to anymore.
3934
3935
3936
3937declare i32 @MustTailCall(i32 %a)
3938
3939define i32 @CallMustTailCall(i32 %a) sanitize_memory {
3940; CHECK-LABEL: define i32 @CallMustTailCall(
3941; CHECK-SAME: i32 [[A:%.*]]) #[[ATTR6]] {
3942; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
3943; CHECK-NEXT:    call void @llvm.donothing()
3944; CHECK-NEXT:    store i32 [[TMP1]], ptr @__msan_param_tls, align 8
3945; CHECK-NEXT:    [[B:%.*]] = musttail call i32 @MustTailCall(i32 [[A]])
3946; CHECK-NEXT:    ret i32 [[B]]
3947;
3948; ORIGIN-LABEL: define i32 @CallMustTailCall(
3949; ORIGIN-SAME: i32 [[A:%.*]]) #[[ATTR6]] {
3950; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
3951; ORIGIN-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
3952; ORIGIN-NEXT:    call void @llvm.donothing()
3953; ORIGIN-NEXT:    store i32 [[TMP1]], ptr @__msan_param_tls, align 8
3954; ORIGIN-NEXT:    store i32 [[TMP2]], ptr @__msan_param_origin_tls, align 4
3955; ORIGIN-NEXT:    [[B:%.*]] = musttail call i32 @MustTailCall(i32 [[A]])
3956; ORIGIN-NEXT:    ret i32 [[B]]
3957;
3958; CALLS-LABEL: define i32 @CallMustTailCall(
3959; CALLS-SAME: i32 [[A:%.*]]) #[[ATTR6]] {
3960; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
3961; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
3962; CALLS-NEXT:    call void @llvm.donothing()
3963; CALLS-NEXT:    store i32 [[TMP1]], ptr @__msan_param_tls, align 8
3964; CALLS-NEXT:    store i32 [[TMP2]], ptr @__msan_param_origin_tls, align 4
3965; CALLS-NEXT:    [[B:%.*]] = musttail call i32 @MustTailCall(i32 [[A]])
3966; CALLS-NEXT:    ret i32 [[B]]
3967;
3968  %b = musttail call i32 @MustTailCall(i32 %a)
3969  ret i32 %b
3970}
3971
3972; For "musttail" calls we can not insert any shadow manipulating code between
3973; call and the return instruction. And we don't need to, because everything is
3974; taken care of in the callee.
3975
3976declare ptr @MismatchingMustTailCall(i32 %a)
3977
3978define ptr @MismatchingCallMustTailCall(i32 %a) sanitize_memory {
3979; CHECK-LABEL: define ptr @MismatchingCallMustTailCall(
3980; CHECK-SAME: i32 [[A:%.*]]) #[[ATTR6]] {
3981; CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
3982; CHECK-NEXT:    call void @llvm.donothing()
3983; CHECK-NEXT:    store i32 [[TMP1]], ptr @__msan_param_tls, align 8
3984; CHECK-NEXT:    [[B:%.*]] = musttail call ptr @MismatchingMustTailCall(i32 [[A]])
3985; CHECK-NEXT:    ret ptr [[B]]
3986;
3987; ORIGIN-LABEL: define ptr @MismatchingCallMustTailCall(
3988; ORIGIN-SAME: i32 [[A:%.*]]) #[[ATTR6]] {
3989; ORIGIN-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
3990; ORIGIN-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
3991; ORIGIN-NEXT:    call void @llvm.donothing()
3992; ORIGIN-NEXT:    store i32 [[TMP1]], ptr @__msan_param_tls, align 8
3993; ORIGIN-NEXT:    store i32 [[TMP2]], ptr @__msan_param_origin_tls, align 4
3994; ORIGIN-NEXT:    [[B:%.*]] = musttail call ptr @MismatchingMustTailCall(i32 [[A]])
3995; ORIGIN-NEXT:    ret ptr [[B]]
3996;
3997; CALLS-LABEL: define ptr @MismatchingCallMustTailCall(
3998; CALLS-SAME: i32 [[A:%.*]]) #[[ATTR6]] {
3999; CALLS-NEXT:    [[TMP1:%.*]] = load i32, ptr @__msan_param_tls, align 8
4000; CALLS-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
4001; CALLS-NEXT:    call void @llvm.donothing()
4002; CALLS-NEXT:    store i32 [[TMP1]], ptr @__msan_param_tls, align 8
4003; CALLS-NEXT:    store i32 [[TMP2]], ptr @__msan_param_origin_tls, align 4
4004; CALLS-NEXT:    [[B:%.*]] = musttail call ptr @MismatchingMustTailCall(i32 [[A]])
4005; CALLS-NEXT:    ret ptr [[B]]
4006;
4007  %b = musttail call ptr @MismatchingMustTailCall(i32 %a)
4008  ret ptr %b
4009}
4010
4011; For "musttail" calls we can not insert any shadow manipulating code between
4012; call and the return instruction. And we don't need to, because everything is
4013; taken care of in the callee.
4014
4015
4016
4017
4018; CHECK-CALLS: declare void @__msan_maybe_warning_1(i8, i32)
4019; CHECK-CALLS: declare void @__msan_maybe_store_origin_1(i8, ptr, i32)
4020; CHECK-CALLS: declare void @__msan_maybe_warning_2(i16, i32)
4021; CHECK-CALLS: declare void @__msan_maybe_store_origin_2(i16, ptr, i32)
4022; CHECK-CALLS: declare void @__msan_maybe_warning_4(i32, i32)
4023; CHECK-CALLS: declare void @__msan_maybe_store_origin_4(i32, ptr, i32)
4024; CHECK-CALLS: declare void @__msan_maybe_warning_8(i64, i32)
4025; CHECK-CALLS: declare void @__msan_maybe_store_origin_8(i64, ptr, i32)
4026;.
4027; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575}
4028;.
4029; ORIGIN: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575}
4030;.
4031