xref: /llvm-project/llvm/test/Instrumentation/MemorySanitizer/vscale.ll (revision 3016c0636fd2df86d2c1dc8e7d49efe77a1bdedf)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
2; RUN: opt < %s -S -msan-check-access-address=0 -passes="msan" 2>&1 | FileCheck %s
3; RUN: opt < %s -S -msan-check-access-address=0 -passes="msan" -msan-track-origins=2 2>&1 | FileCheck %s --check-prefixes=ORIGIN
4
5target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
6target triple = "x86_64-unknown-linux-gnu"
7
8define void @test_load_store_i32(ptr %a, ptr %b) sanitize_memory {
9; CHECK-LABEL: define void @test_load_store_i32(
10; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] {
11; CHECK-NEXT:    call void @llvm.donothing()
12; CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 4 x i32>, ptr [[A]], align 16
13; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
14; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
15; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
16; CHECK-NEXT:    [[_MSLD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP4]], align 16
17; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[B]] to i64
18; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
19; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
20; CHECK-NEXT:    store <vscale x 4 x i32> [[_MSLD]], ptr [[TMP7]], align 16
21; CHECK-NEXT:    store <vscale x 4 x i32> [[TMP1]], ptr [[B]], align 16
22; CHECK-NEXT:    ret void
23;
24; ORIGIN-LABEL: define void @test_load_store_i32(
25; ORIGIN-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0:[0-9]+]] {
26; ORIGIN-NEXT:    call void @llvm.donothing()
27; ORIGIN-NEXT:    [[TMP1:%.*]] = load <vscale x 4 x i32>, ptr [[A]], align 16
28; ORIGIN-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
29; ORIGIN-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
30; ORIGIN-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
31; ORIGIN-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
32; ORIGIN-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
33; ORIGIN-NEXT:    [[_MSLD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP4]], align 16
34; ORIGIN-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 16
35; ORIGIN-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[B]] to i64
36; ORIGIN-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 87960930222080
37; ORIGIN-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
38; ORIGIN-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 17592186044416
39; ORIGIN-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
40; ORIGIN-NEXT:    store <vscale x 4 x i32> [[_MSLD]], ptr [[TMP10]], align 16
41; ORIGIN-NEXT:    [[TMP13:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> [[_MSLD]])
42; ORIGIN-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP13]], 0
43; ORIGIN-NEXT:    br i1 [[_MSCMP]], label [[TMP14:%.*]], label [[TMP21:%.*]], !prof [[PROF0:![0-9]+]]
44; ORIGIN:       14:
45; ORIGIN-NEXT:    [[TMP15:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP7]])
46; ORIGIN-NEXT:    [[TMP16:%.*]] = call i64 @llvm.vscale.i64()
47; ORIGIN-NEXT:    [[TMP17:%.*]] = mul i64 [[TMP16]], 16
48; ORIGIN-NEXT:    [[TMP18:%.*]] = add i64 [[TMP17]], 3
49; ORIGIN-NEXT:    [[TMP19:%.*]] = udiv i64 [[TMP18]], 4
50; ORIGIN-NEXT:    br label [[DOTSPLIT:%.*]]
51; ORIGIN:       .split:
52; ORIGIN-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[TMP14]] ], [ [[IV_NEXT:%.*]], [[DOTSPLIT]] ]
53; ORIGIN-NEXT:    [[TMP20:%.*]] = getelementptr i32, ptr [[TMP12]], i64 [[IV]]
54; ORIGIN-NEXT:    store i32 [[TMP15]], ptr [[TMP20]], align 4
55; ORIGIN-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
56; ORIGIN-NEXT:    [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP19]]
57; ORIGIN-NEXT:    br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
58; ORIGIN:       .split.split:
59; ORIGIN-NEXT:    br label [[TMP21]]
60; ORIGIN:       21:
61; ORIGIN-NEXT:    store <vscale x 4 x i32> [[TMP1]], ptr [[B]], align 16
62; ORIGIN-NEXT:    ret void
63;
64  %1 = load <vscale x 4 x i32>, ptr %a
65  store <vscale x 4 x i32> %1, ptr %b
66  ret void
67}
68
69define void @test_load_store_add_int(ptr %a, ptr %b) sanitize_memory {
70; CHECK-LABEL: define void @test_load_store_add_int(
71; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
72; CHECK-NEXT:    call void @llvm.donothing()
73; CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 8 x i64>, ptr [[A]], align 64
74; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
75; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
76; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
77; CHECK-NEXT:    [[_MSLD:%.*]] = load <vscale x 8 x i64>, ptr [[TMP4]], align 64
78; CHECK-NEXT:    [[TMP5:%.*]] = load <vscale x 8 x i64>, ptr [[B]], align 64
79; CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[B]] to i64
80; CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080
81; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
82; CHECK-NEXT:    [[_MSLD1:%.*]] = load <vscale x 8 x i64>, ptr [[TMP8]], align 64
83; CHECK-NEXT:    [[_MSPROP:%.*]] = or <vscale x 8 x i64> [[_MSLD]], [[_MSLD1]]
84; CHECK-NEXT:    [[TMP9:%.*]] = add <vscale x 8 x i64> [[TMP1]], [[TMP5]]
85; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[B]] to i64
86; CHECK-NEXT:    [[TMP11:%.*]] = xor i64 [[TMP10]], 87960930222080
87; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
88; CHECK-NEXT:    store <vscale x 8 x i64> [[_MSLD1]], ptr [[TMP12]], align 64
89; CHECK-NEXT:    store <vscale x 8 x i64> [[TMP5]], ptr [[B]], align 64
90; CHECK-NEXT:    ret void
91;
92; ORIGIN-LABEL: define void @test_load_store_add_int(
93; ORIGIN-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
94; ORIGIN-NEXT:    call void @llvm.donothing()
95; ORIGIN-NEXT:    [[TMP1:%.*]] = load <vscale x 8 x i64>, ptr [[A]], align 64
96; ORIGIN-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
97; ORIGIN-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
98; ORIGIN-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
99; ORIGIN-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
100; ORIGIN-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
101; ORIGIN-NEXT:    [[_MSLD:%.*]] = load <vscale x 8 x i64>, ptr [[TMP4]], align 64
102; ORIGIN-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 64
103; ORIGIN-NEXT:    [[TMP8:%.*]] = load <vscale x 8 x i64>, ptr [[B]], align 64
104; ORIGIN-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[B]] to i64
105; ORIGIN-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080
106; ORIGIN-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
107; ORIGIN-NEXT:    [[TMP12:%.*]] = add i64 [[TMP10]], 17592186044416
108; ORIGIN-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
109; ORIGIN-NEXT:    [[_MSLD1:%.*]] = load <vscale x 8 x i64>, ptr [[TMP11]], align 64
110; ORIGIN-NEXT:    [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 64
111; ORIGIN-NEXT:    [[_MSPROP:%.*]] = or <vscale x 8 x i64> [[_MSLD]], [[_MSLD1]]
112; ORIGIN-NEXT:    [[TMP15:%.*]] = call i64 @llvm.vector.reduce.or.nxv8i64(<vscale x 8 x i64> [[_MSLD1]])
113; ORIGIN-NEXT:    [[TMP16:%.*]] = icmp ne i64 [[TMP15]], 0
114; ORIGIN-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP14]], i32 [[TMP7]]
115; ORIGIN-NEXT:    [[TMP18:%.*]] = add <vscale x 8 x i64> [[TMP1]], [[TMP8]]
116; ORIGIN-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[B]] to i64
117; ORIGIN-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 87960930222080
118; ORIGIN-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
119; ORIGIN-NEXT:    [[TMP22:%.*]] = add i64 [[TMP20]], 17592186044416
120; ORIGIN-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
121; ORIGIN-NEXT:    store <vscale x 8 x i64> [[_MSLD1]], ptr [[TMP21]], align 64
122; ORIGIN-NEXT:    [[TMP24:%.*]] = call i64 @llvm.vector.reduce.or.nxv8i64(<vscale x 8 x i64> [[_MSLD1]])
123; ORIGIN-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP24]], 0
124; ORIGIN-NEXT:    br i1 [[_MSCMP]], label [[TMP25:%.*]], label [[TMP32:%.*]], !prof [[PROF0]]
125; ORIGIN:       25:
126; ORIGIN-NEXT:    [[TMP26:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP14]])
127; ORIGIN-NEXT:    [[TMP27:%.*]] = call i64 @llvm.vscale.i64()
128; ORIGIN-NEXT:    [[TMP28:%.*]] = mul i64 [[TMP27]], 64
129; ORIGIN-NEXT:    [[TMP29:%.*]] = add i64 [[TMP28]], 3
130; ORIGIN-NEXT:    [[TMP30:%.*]] = udiv i64 [[TMP29]], 4
131; ORIGIN-NEXT:    br label [[DOTSPLIT:%.*]]
132; ORIGIN:       .split:
133; ORIGIN-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[TMP25]] ], [ [[IV_NEXT:%.*]], [[DOTSPLIT]] ]
134; ORIGIN-NEXT:    [[TMP31:%.*]] = getelementptr i32, ptr [[TMP23]], i64 [[IV]]
135; ORIGIN-NEXT:    store i32 [[TMP26]], ptr [[TMP31]], align 4
136; ORIGIN-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
137; ORIGIN-NEXT:    [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP30]]
138; ORIGIN-NEXT:    br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
139; ORIGIN:       .split.split:
140; ORIGIN-NEXT:    br label [[TMP32]]
141; ORIGIN:       32:
142; ORIGIN-NEXT:    store <vscale x 8 x i64> [[TMP8]], ptr [[B]], align 64
143; ORIGIN-NEXT:    ret void
144;
145  %1 = load <vscale x 8 x i64>, ptr %a
146  %2 = load <vscale x 8 x i64>, ptr %b
147  %3 = add <vscale x 8 x i64> %1, %2
148  store <vscale x 8 x i64> %2, ptr %b
149  ret void
150}
151
152define void @test_load_store_float(ptr %a, ptr %b) sanitize_memory {
153; CHECK-LABEL: define void @test_load_store_float(
154; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
155; CHECK-NEXT:    call void @llvm.donothing()
156; CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 4 x float>, ptr [[A]], align 16
157; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
158; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
159; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
160; CHECK-NEXT:    [[_MSLD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP4]], align 16
161; CHECK-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[B]] to i64
162; CHECK-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
163; CHECK-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
164; CHECK-NEXT:    store <vscale x 4 x i32> [[_MSLD]], ptr [[TMP7]], align 16
165; CHECK-NEXT:    store <vscale x 4 x float> [[TMP1]], ptr [[B]], align 16
166; CHECK-NEXT:    ret void
167;
168; ORIGIN-LABEL: define void @test_load_store_float(
169; ORIGIN-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
170; ORIGIN-NEXT:    call void @llvm.donothing()
171; ORIGIN-NEXT:    [[TMP1:%.*]] = load <vscale x 4 x float>, ptr [[A]], align 16
172; ORIGIN-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
173; ORIGIN-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
174; ORIGIN-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
175; ORIGIN-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
176; ORIGIN-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
177; ORIGIN-NEXT:    [[_MSLD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP4]], align 16
178; ORIGIN-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 16
179; ORIGIN-NEXT:    [[TMP8:%.*]] = ptrtoint ptr [[B]] to i64
180; ORIGIN-NEXT:    [[TMP9:%.*]] = xor i64 [[TMP8]], 87960930222080
181; ORIGIN-NEXT:    [[TMP10:%.*]] = inttoptr i64 [[TMP9]] to ptr
182; ORIGIN-NEXT:    [[TMP11:%.*]] = add i64 [[TMP9]], 17592186044416
183; ORIGIN-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
184; ORIGIN-NEXT:    store <vscale x 4 x i32> [[_MSLD]], ptr [[TMP10]], align 16
185; ORIGIN-NEXT:    [[TMP13:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> [[_MSLD]])
186; ORIGIN-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP13]], 0
187; ORIGIN-NEXT:    br i1 [[_MSCMP]], label [[TMP14:%.*]], label [[TMP21:%.*]], !prof [[PROF0]]
188; ORIGIN:       14:
189; ORIGIN-NEXT:    [[TMP15:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP7]])
190; ORIGIN-NEXT:    [[TMP16:%.*]] = call i64 @llvm.vscale.i64()
191; ORIGIN-NEXT:    [[TMP17:%.*]] = mul i64 [[TMP16]], 16
192; ORIGIN-NEXT:    [[TMP18:%.*]] = add i64 [[TMP17]], 3
193; ORIGIN-NEXT:    [[TMP19:%.*]] = udiv i64 [[TMP18]], 4
194; ORIGIN-NEXT:    br label [[DOTSPLIT:%.*]]
195; ORIGIN:       .split:
196; ORIGIN-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[TMP14]] ], [ [[IV_NEXT:%.*]], [[DOTSPLIT]] ]
197; ORIGIN-NEXT:    [[TMP20:%.*]] = getelementptr i32, ptr [[TMP12]], i64 [[IV]]
198; ORIGIN-NEXT:    store i32 [[TMP15]], ptr [[TMP20]], align 4
199; ORIGIN-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
200; ORIGIN-NEXT:    [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP19]]
201; ORIGIN-NEXT:    br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
202; ORIGIN:       .split.split:
203; ORIGIN-NEXT:    br label [[TMP21]]
204; ORIGIN:       21:
205; ORIGIN-NEXT:    store <vscale x 4 x float> [[TMP1]], ptr [[B]], align 16
206; ORIGIN-NEXT:    ret void
207;
208  %1 = load <vscale x 4 x float>, ptr %a
209  store <vscale x 4 x float> %1, ptr %b
210  ret void
211}
212
213define void @test_load_store_add_float(ptr %a, ptr %b) sanitize_memory {
214; CHECK-LABEL: define void @test_load_store_add_float(
215; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
216; CHECK-NEXT:    call void @llvm.donothing()
217; CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x float>, ptr [[A]], align 8
218; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
219; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
220; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
221; CHECK-NEXT:    [[_MSLD:%.*]] = load <vscale x 2 x i32>, ptr [[TMP4]], align 8
222; CHECK-NEXT:    [[TMP5:%.*]] = load <vscale x 2 x float>, ptr [[B]], align 8
223; CHECK-NEXT:    [[TMP6:%.*]] = ptrtoint ptr [[B]] to i64
224; CHECK-NEXT:    [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080
225; CHECK-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
226; CHECK-NEXT:    [[_MSLD1:%.*]] = load <vscale x 2 x i32>, ptr [[TMP8]], align 8
227; CHECK-NEXT:    [[_MSPROP:%.*]] = or <vscale x 2 x i32> [[_MSLD]], [[_MSLD1]]
228; CHECK-NEXT:    [[TMP9:%.*]] = fadd <vscale x 2 x float> [[TMP1]], [[TMP5]]
229; CHECK-NEXT:    [[TMP10:%.*]] = ptrtoint ptr [[B]] to i64
230; CHECK-NEXT:    [[TMP11:%.*]] = xor i64 [[TMP10]], 87960930222080
231; CHECK-NEXT:    [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
232; CHECK-NEXT:    store <vscale x 2 x i32> [[_MSLD1]], ptr [[TMP12]], align 8
233; CHECK-NEXT:    store <vscale x 2 x float> [[TMP5]], ptr [[B]], align 8
234; CHECK-NEXT:    ret void
235;
236; ORIGIN-LABEL: define void @test_load_store_add_float(
237; ORIGIN-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
238; ORIGIN-NEXT:    call void @llvm.donothing()
239; ORIGIN-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x float>, ptr [[A]], align 8
240; ORIGIN-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
241; ORIGIN-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
242; ORIGIN-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
243; ORIGIN-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
244; ORIGIN-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
245; ORIGIN-NEXT:    [[_MSLD:%.*]] = load <vscale x 2 x i32>, ptr [[TMP4]], align 8
246; ORIGIN-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 8
247; ORIGIN-NEXT:    [[TMP8:%.*]] = load <vscale x 2 x float>, ptr [[B]], align 8
248; ORIGIN-NEXT:    [[TMP9:%.*]] = ptrtoint ptr [[B]] to i64
249; ORIGIN-NEXT:    [[TMP10:%.*]] = xor i64 [[TMP9]], 87960930222080
250; ORIGIN-NEXT:    [[TMP11:%.*]] = inttoptr i64 [[TMP10]] to ptr
251; ORIGIN-NEXT:    [[TMP12:%.*]] = add i64 [[TMP10]], 17592186044416
252; ORIGIN-NEXT:    [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr
253; ORIGIN-NEXT:    [[_MSLD1:%.*]] = load <vscale x 2 x i32>, ptr [[TMP11]], align 8
254; ORIGIN-NEXT:    [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 8
255; ORIGIN-NEXT:    [[_MSPROP:%.*]] = or <vscale x 2 x i32> [[_MSLD]], [[_MSLD1]]
256; ORIGIN-NEXT:    [[TMP15:%.*]] = call i32 @llvm.vector.reduce.or.nxv2i32(<vscale x 2 x i32> [[_MSLD1]])
257; ORIGIN-NEXT:    [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
258; ORIGIN-NEXT:    [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP14]], i32 [[TMP7]]
259; ORIGIN-NEXT:    [[TMP18:%.*]] = fadd <vscale x 2 x float> [[TMP1]], [[TMP8]]
260; ORIGIN-NEXT:    [[TMP19:%.*]] = ptrtoint ptr [[B]] to i64
261; ORIGIN-NEXT:    [[TMP20:%.*]] = xor i64 [[TMP19]], 87960930222080
262; ORIGIN-NEXT:    [[TMP21:%.*]] = inttoptr i64 [[TMP20]] to ptr
263; ORIGIN-NEXT:    [[TMP22:%.*]] = add i64 [[TMP20]], 17592186044416
264; ORIGIN-NEXT:    [[TMP23:%.*]] = inttoptr i64 [[TMP22]] to ptr
265; ORIGIN-NEXT:    store <vscale x 2 x i32> [[_MSLD1]], ptr [[TMP21]], align 8
266; ORIGIN-NEXT:    [[TMP24:%.*]] = call i32 @llvm.vector.reduce.or.nxv2i32(<vscale x 2 x i32> [[_MSLD1]])
267; ORIGIN-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP24]], 0
268; ORIGIN-NEXT:    br i1 [[_MSCMP]], label [[TMP25:%.*]], label [[TMP32:%.*]], !prof [[PROF0]]
269; ORIGIN:       25:
270; ORIGIN-NEXT:    [[TMP26:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP14]])
271; ORIGIN-NEXT:    [[TMP27:%.*]] = call i64 @llvm.vscale.i64()
272; ORIGIN-NEXT:    [[TMP28:%.*]] = mul i64 [[TMP27]], 8
273; ORIGIN-NEXT:    [[TMP29:%.*]] = add i64 [[TMP28]], 3
274; ORIGIN-NEXT:    [[TMP30:%.*]] = udiv i64 [[TMP29]], 4
275; ORIGIN-NEXT:    br label [[DOTSPLIT:%.*]]
276; ORIGIN:       .split:
277; ORIGIN-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[TMP25]] ], [ [[IV_NEXT:%.*]], [[DOTSPLIT]] ]
278; ORIGIN-NEXT:    [[TMP31:%.*]] = getelementptr i32, ptr [[TMP23]], i64 [[IV]]
279; ORIGIN-NEXT:    store i32 [[TMP26]], ptr [[TMP31]], align 4
280; ORIGIN-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
281; ORIGIN-NEXT:    [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP30]]
282; ORIGIN-NEXT:    br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
283; ORIGIN:       .split.split:
284; ORIGIN-NEXT:    br label [[TMP32]]
285; ORIGIN:       32:
286; ORIGIN-NEXT:    store <vscale x 2 x float> [[TMP8]], ptr [[B]], align 8
287; ORIGIN-NEXT:    ret void
288;
289  %1 = load <vscale x 2 x float>, ptr %a
290  %2 = load <vscale x 2 x float>, ptr %b
291  %3 = fadd <vscale x 2 x float> %1, %2
292  store <vscale x 2 x float> %2, ptr %b
293  ret void
294}
295
296define <vscale x 2 x float> @fn_ret(ptr %a) sanitize_memory {
297; CHECK-LABEL: define <vscale x 2 x float> @fn_ret(
298; CHECK-SAME: ptr [[A:%.*]]) #[[ATTR0]] {
299; CHECK-NEXT:    call void @llvm.donothing()
300; CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x float>, ptr [[A]], align 8
301; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
302; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
303; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
304; CHECK-NEXT:    [[_MSLD:%.*]] = load <vscale x 2 x i32>, ptr [[TMP4]], align 8
305; CHECK-NEXT:    store <vscale x 2 x i32> [[_MSLD]], ptr @__msan_retval_tls, align 8
306; CHECK-NEXT:    ret <vscale x 2 x float> [[TMP1]]
307;
308; ORIGIN-LABEL: define <vscale x 2 x float> @fn_ret(
309; ORIGIN-SAME: ptr [[A:%.*]]) #[[ATTR0]] {
310; ORIGIN-NEXT:    call void @llvm.donothing()
311; ORIGIN-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x float>, ptr [[A]], align 8
312; ORIGIN-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[A]] to i64
313; ORIGIN-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
314; ORIGIN-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
315; ORIGIN-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
316; ORIGIN-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
317; ORIGIN-NEXT:    [[_MSLD:%.*]] = load <vscale x 2 x i32>, ptr [[TMP4]], align 8
318; ORIGIN-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 8
319; ORIGIN-NEXT:    store <vscale x 2 x i32> [[_MSLD]], ptr @__msan_retval_tls, align 8
320; ORIGIN-NEXT:    store i32 [[TMP7]], ptr @__msan_retval_origin_tls, align 4
321; ORIGIN-NEXT:    ret <vscale x 2 x float> [[TMP1]]
322;
323  %1 = load <vscale x 2 x float>, ptr %a
324  ret <vscale x 2 x float> %1
325}
326
327define void @test_ret(ptr %a, ptr %b) sanitize_memory {
328; CHECK-LABEL: define void @test_ret(
329; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
330; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
331; CHECK-NEXT:    call void @llvm.donothing()
332; CHECK-NEXT:    store i64 [[TMP1]], ptr @__msan_param_tls, align 8
333; CHECK-NEXT:    store <vscale x 2 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
334; CHECK-NEXT:    [[TMP5:%.*]] = call <vscale x 2 x float> @fn_ret(ptr [[A]])
335; CHECK-NEXT:    [[_MSRET:%.*]] = load <vscale x 2 x i32>, ptr @__msan_retval_tls, align 8
336; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[B]] to i64
337; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
338; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
339; CHECK-NEXT:    store <vscale x 2 x i32> [[_MSRET]], ptr [[TMP4]], align 8
340; CHECK-NEXT:    store <vscale x 2 x float> [[TMP5]], ptr [[B]], align 8
341; CHECK-NEXT:    ret void
342;
343; ORIGIN-LABEL: define void @test_ret(
344; ORIGIN-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
345; ORIGIN-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
346; ORIGIN-NEXT:    [[TMP2:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
347; ORIGIN-NEXT:    call void @llvm.donothing()
348; ORIGIN-NEXT:    store i64 [[TMP1]], ptr @__msan_param_tls, align 8
349; ORIGIN-NEXT:    store i32 [[TMP2]], ptr @__msan_param_origin_tls, align 4
350; ORIGIN-NEXT:    store <vscale x 2 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
351; ORIGIN-NEXT:    [[TMP3:%.*]] = call <vscale x 2 x float> @fn_ret(ptr [[A]])
352; ORIGIN-NEXT:    [[_MSRET:%.*]] = load <vscale x 2 x i32>, ptr @__msan_retval_tls, align 8
353; ORIGIN-NEXT:    [[TMP4:%.*]] = load i32, ptr @__msan_retval_origin_tls, align 4
354; ORIGIN-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[B]] to i64
355; ORIGIN-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
356; ORIGIN-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
357; ORIGIN-NEXT:    [[TMP8:%.*]] = add i64 [[TMP6]], 17592186044416
358; ORIGIN-NEXT:    [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
359; ORIGIN-NEXT:    store <vscale x 2 x i32> [[_MSRET]], ptr [[TMP7]], align 8
360; ORIGIN-NEXT:    [[TMP10:%.*]] = call i32 @llvm.vector.reduce.or.nxv2i32(<vscale x 2 x i32> [[_MSRET]])
361; ORIGIN-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP10]], 0
362; ORIGIN-NEXT:    br i1 [[_MSCMP]], label [[TMP11:%.*]], label [[TMP18:%.*]], !prof [[PROF0]]
363; ORIGIN:       11:
364; ORIGIN-NEXT:    [[TMP12:%.*]] = call i32 @__msan_chain_origin(i32 [[TMP4]])
365; ORIGIN-NEXT:    [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
366; ORIGIN-NEXT:    [[TMP14:%.*]] = mul i64 [[TMP13]], 8
367; ORIGIN-NEXT:    [[TMP15:%.*]] = add i64 [[TMP14]], 3
368; ORIGIN-NEXT:    [[TMP16:%.*]] = udiv i64 [[TMP15]], 4
369; ORIGIN-NEXT:    br label [[DOTSPLIT:%.*]]
370; ORIGIN:       .split:
371; ORIGIN-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[TMP11]] ], [ [[IV_NEXT:%.*]], [[DOTSPLIT]] ]
372; ORIGIN-NEXT:    [[TMP17:%.*]] = getelementptr i32, ptr [[TMP9]], i64 [[IV]]
373; ORIGIN-NEXT:    store i32 [[TMP12]], ptr [[TMP17]], align 4
374; ORIGIN-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
375; ORIGIN-NEXT:    [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP16]]
376; ORIGIN-NEXT:    br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
377; ORIGIN:       .split.split:
378; ORIGIN-NEXT:    br label [[TMP18]]
379; ORIGIN:       18:
380; ORIGIN-NEXT:    store <vscale x 2 x float> [[TMP3]], ptr [[B]], align 8
381; ORIGIN-NEXT:    ret void
382;
383  %1 = call <vscale x 2 x float> @fn_ret(ptr %a)
384  store <vscale x 2 x float> %1, ptr %b
385  ret void
386}
387
388define void @fn_param(<vscale x 2 x float> %a, ptr %b) sanitize_memory {
389; CHECK-LABEL: define void @fn_param(
390; CHECK-SAME: <vscale x 2 x float> [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
391; CHECK-NEXT:    call void @llvm.donothing()
392; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[B]] to i64
393; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
394; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
395; CHECK-NEXT:    store <vscale x 2 x i32> zeroinitializer, ptr [[TMP3]], align 8
396; CHECK-NEXT:    store <vscale x 2 x float> [[A]], ptr [[B]], align 8
397; CHECK-NEXT:    ret void
398;
399; ORIGIN-LABEL: define void @fn_param(
400; ORIGIN-SAME: <vscale x 2 x float> [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
401; ORIGIN-NEXT:    call void @llvm.donothing()
402; ORIGIN-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[B]] to i64
403; ORIGIN-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
404; ORIGIN-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
405; ORIGIN-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
406; ORIGIN-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
407; ORIGIN-NEXT:    store <vscale x 2 x i32> zeroinitializer, ptr [[TMP3]], align 8
408; ORIGIN-NEXT:    [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.nxv2i32(<vscale x 2 x i32> zeroinitializer)
409; ORIGIN-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP6]], 0
410; ORIGIN-NEXT:    br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
411; ORIGIN:       7:
412; ORIGIN-NEXT:    [[TMP8:%.*]] = call i32 @__msan_chain_origin(i32 0)
413; ORIGIN-NEXT:    [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
414; ORIGIN-NEXT:    [[TMP10:%.*]] = mul i64 [[TMP9]], 8
415; ORIGIN-NEXT:    [[TMP11:%.*]] = add i64 [[TMP10]], 3
416; ORIGIN-NEXT:    [[TMP12:%.*]] = udiv i64 [[TMP11]], 4
417; ORIGIN-NEXT:    br label [[DOTSPLIT:%.*]]
418; ORIGIN:       .split:
419; ORIGIN-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[DOTSPLIT]] ]
420; ORIGIN-NEXT:    [[TMP13:%.*]] = getelementptr i32, ptr [[TMP5]], i64 [[IV]]
421; ORIGIN-NEXT:    store i32 [[TMP8]], ptr [[TMP13]], align 4
422; ORIGIN-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
423; ORIGIN-NEXT:    [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP12]]
424; ORIGIN-NEXT:    br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
425; ORIGIN:       .split.split:
426; ORIGIN-NEXT:    br label [[TMP14]]
427; ORIGIN:       14:
428; ORIGIN-NEXT:    store <vscale x 2 x float> [[A]], ptr [[B]], align 8
429; ORIGIN-NEXT:    ret void
430;
431  store <vscale x 2 x float> %a, ptr %b
432  ret void
433}
434
435define void @test_param(ptr %a, ptr %b) sanitize_memory {
436; CHECK-LABEL: define void @test_param(
437; CHECK-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
438; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
439; CHECK-NEXT:    call void @llvm.donothing()
440; CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 2 x float>, ptr [[A]], align 8
441; CHECK-NEXT:    [[TMP3:%.*]] = ptrtoint ptr [[A]] to i64
442; CHECK-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
443; CHECK-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
444; CHECK-NEXT:    [[_MSLD:%.*]] = load <vscale x 2 x i32>, ptr [[TMP5]], align 8
445; CHECK-NEXT:    store i64 [[TMP1]], ptr @__msan_param_tls, align 8
446; CHECK-NEXT:    [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.nxv2i32(<vscale x 2 x i32> [[_MSLD]])
447; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP6]], 0
448; CHECK-NEXT:    br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0:![0-9]+]]
449; CHECK:       7:
450; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR5:[0-9]+]]
451; CHECK-NEXT:    unreachable
452; CHECK:       8:
453; CHECK-NEXT:    call void @fn_param(<vscale x 2 x float> [[TMP2]], ptr [[B]])
454; CHECK-NEXT:    ret void
455;
456; ORIGIN-LABEL: define void @test_param(
457; ORIGIN-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
458; ORIGIN-NEXT:    [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
459; ORIGIN-NEXT:    [[TMP2:%.*]] = load i32, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_origin_tls to i64), i64 8) to ptr), align 4
460; ORIGIN-NEXT:    call void @llvm.donothing()
461; ORIGIN-NEXT:    [[TMP3:%.*]] = load <vscale x 2 x float>, ptr [[A]], align 8
462; ORIGIN-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64
463; ORIGIN-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
464; ORIGIN-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
465; ORIGIN-NEXT:    [[TMP7:%.*]] = add i64 [[TMP5]], 17592186044416
466; ORIGIN-NEXT:    [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
467; ORIGIN-NEXT:    [[_MSLD:%.*]] = load <vscale x 2 x i32>, ptr [[TMP6]], align 8
468; ORIGIN-NEXT:    [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 8
469; ORIGIN-NEXT:    store i64 [[TMP1]], ptr @__msan_param_tls, align 8
470; ORIGIN-NEXT:    store i32 [[TMP2]], ptr @__msan_param_origin_tls, align 4
471; ORIGIN-NEXT:    [[TMP10:%.*]] = call i32 @llvm.vector.reduce.or.nxv2i32(<vscale x 2 x i32> [[_MSLD]])
472; ORIGIN-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP10]], 0
473; ORIGIN-NEXT:    br i1 [[_MSCMP]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF0]]
474; ORIGIN:       11:
475; ORIGIN-NEXT:    call void @__msan_warning_with_origin_noreturn(i32 [[TMP9]]) #[[ATTR5:[0-9]+]]
476; ORIGIN-NEXT:    unreachable
477; ORIGIN:       12:
478; ORIGIN-NEXT:    call void @fn_param(<vscale x 2 x float> [[TMP3]], ptr [[B]])
479; ORIGIN-NEXT:    ret void
480;
481  %1 = load <vscale x 2 x float>, ptr %a
482  call void @fn_param(<vscale x 2 x float> %1, ptr %b)
483  ret void
484}
485
486define void @test_alloca1() sanitize_memory {
487; CHECK-LABEL: define void @test_alloca1(
488; CHECK-SAME: ) #[[ATTR0]] {
489; CHECK-NEXT:  entry:
490; CHECK-NEXT:    call void @llvm.donothing()
491; CHECK-NEXT:    [[X:%.*]] = alloca <vscale x 64 x i1>, align 4
492; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
493; CHECK-NEXT:    [[TMP1:%.*]] = mul i64 [[TMP0]], 8
494; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[X]] to i64
495; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
496; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
497; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP4]], i8 -1, i64 [[TMP1]], i1 false)
498; CHECK-NEXT:    ret void
499;
500; ORIGIN-LABEL: define void @test_alloca1(
501; ORIGIN-SAME: ) #[[ATTR0]] {
502; ORIGIN-NEXT:  entry:
503; ORIGIN-NEXT:    call void @llvm.donothing()
504; ORIGIN-NEXT:    [[X:%.*]] = alloca <vscale x 64 x i1>, align 4
505; ORIGIN-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
506; ORIGIN-NEXT:    [[TMP1:%.*]] = mul i64 [[TMP0]], 8
507; ORIGIN-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[X]] to i64
508; ORIGIN-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
509; ORIGIN-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
510; ORIGIN-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
511; ORIGIN-NEXT:    [[TMP6:%.*]] = and i64 [[TMP5]], -4
512; ORIGIN-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
513; ORIGIN-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP4]], i8 -1, i64 [[TMP1]], i1 false)
514; ORIGIN-NEXT:    call void @__msan_set_alloca_origin_with_descr(ptr [[X]], i64 [[TMP1]], ptr @[[GLOB0:[0-9]+]], ptr @[[GLOB1:[0-9]+]])
515; ORIGIN-NEXT:    ret void
516;
517entry:
518  %x = alloca <vscale x 64 x i1>, align 4
519  ret void
520}
521
522define void @test_alloca2() sanitize_memory {
523; CHECK-LABEL: define void @test_alloca2(
524; CHECK-SAME: ) #[[ATTR0]] {
525; CHECK-NEXT:  entry:
526; CHECK-NEXT:    call void @llvm.donothing()
527; CHECK-NEXT:    [[X:%.*]] = alloca <vscale x 64 x double>, align 4
528; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
529; CHECK-NEXT:    [[TMP1:%.*]] = mul i64 [[TMP0]], 512
530; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[X]] to i64
531; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
532; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
533; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP4]], i8 -1, i64 [[TMP1]], i1 false)
534; CHECK-NEXT:    ret void
535;
536; ORIGIN-LABEL: define void @test_alloca2(
537; ORIGIN-SAME: ) #[[ATTR0]] {
538; ORIGIN-NEXT:  entry:
539; ORIGIN-NEXT:    call void @llvm.donothing()
540; ORIGIN-NEXT:    [[X:%.*]] = alloca <vscale x 64 x double>, align 4
541; ORIGIN-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
542; ORIGIN-NEXT:    [[TMP1:%.*]] = mul i64 [[TMP0]], 512
543; ORIGIN-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[X]] to i64
544; ORIGIN-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
545; ORIGIN-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
546; ORIGIN-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
547; ORIGIN-NEXT:    [[TMP6:%.*]] = and i64 [[TMP5]], -4
548; ORIGIN-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
549; ORIGIN-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP4]], i8 -1, i64 [[TMP1]], i1 false)
550; ORIGIN-NEXT:    call void @__msan_set_alloca_origin_with_descr(ptr [[X]], i64 [[TMP1]], ptr @[[GLOB2:[0-9]+]], ptr @[[GLOB3:[0-9]+]])
551; ORIGIN-NEXT:    ret void
552;
553entry:
554  %x = alloca <vscale x 64 x double>, align 4
555  ret void
556}
557
558define void @test_alloca3() sanitize_memory {
559; CHECK-LABEL: define void @test_alloca3(
560; CHECK-SAME: ) #[[ATTR0]] {
561; CHECK-NEXT:  entry:
562; CHECK-NEXT:    call void @llvm.donothing()
563; CHECK-NEXT:    [[X:%.*]] = alloca <vscale x 1 x i1>, align 4
564; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
565; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[X]] to i64
566; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
567; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
568; CHECK-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP3]], i8 -1, i64 [[TMP0]], i1 false)
569; CHECK-NEXT:    ret void
570;
571; ORIGIN-LABEL: define void @test_alloca3(
572; ORIGIN-SAME: ) #[[ATTR0]] {
573; ORIGIN-NEXT:  entry:
574; ORIGIN-NEXT:    call void @llvm.donothing()
575; ORIGIN-NEXT:    [[X:%.*]] = alloca <vscale x 1 x i1>, align 4
576; ORIGIN-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
577; ORIGIN-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[X]] to i64
578; ORIGIN-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
579; ORIGIN-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
580; ORIGIN-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
581; ORIGIN-NEXT:    [[TMP5:%.*]] = and i64 [[TMP4]], -4
582; ORIGIN-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
583; ORIGIN-NEXT:    call void @llvm.memset.p0.i64(ptr align 4 [[TMP3]], i8 -1, i64 [[TMP0]], i1 false)
584; ORIGIN-NEXT:    call void @__msan_set_alloca_origin_with_descr(ptr [[X]], i64 [[TMP0]], ptr @[[GLOB4:[0-9]+]], ptr @[[GLOB5:[0-9]+]])
585; ORIGIN-NEXT:    ret void
586;
587entry:
588  %x = alloca <vscale x 1 x i1>, align 4
589  ret void
590}
591
592;.
593; CHECK: [[PROF0]] = !{!"branch_weights", i32 1, i32 1048575}
594;.
595; ORIGIN: [[PROF0]] = !{!"branch_weights", i32 1, i32 1048575}
596;.
597