xref: /llvm-project/llvm/test/Instrumentation/MemorySanitizer/vector-load-store.ll (revision 3016c0636fd2df86d2c1dc8e7d49efe77a1bdedf)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck %s --implicit-check-not="call void @__msan_warning"
3; RUN: opt < %s -msan-check-access-address=1 -S -passes=msan 2>&1 | FileCheck %s --check-prefixes=ADDR --implicit-check-not="call void @__msan_warning"
4; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -S -passes=msan 2>&1 | FileCheck %s --check-prefixes=ORIGINS --implicit-check-not="call void @__msan_warning"
5
6target triple = "x86_64-unknown-linux-gnu"
7target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
8
9define void @load.v1i32(ptr %p) sanitize_memory {
10; CHECK-LABEL: @load.v1i32(
11; CHECK-NEXT:    call void @llvm.donothing()
12; CHECK-NEXT:    [[TMP1:%.*]] = load <1 x i32>, ptr [[P:%.*]], align 4
13; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
14; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
15; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
16; CHECK-NEXT:    [[_MSLD:%.*]] = load <1 x i32>, ptr [[TMP4]], align 4
17; CHECK-NEXT:    ret void
18;
19; ADDR-LABEL: @load.v1i32(
20; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
21; ADDR-NEXT:    call void @llvm.donothing()
22; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
23; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0:![0-9]+]]
24; ADDR:       2:
25; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3:[0-9]+]]
26; ADDR-NEXT:    unreachable
27; ADDR:       3:
28; ADDR-NEXT:    [[TMP4:%.*]] = load <1 x i32>, ptr [[P:%.*]], align 4
29; ADDR-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
30; ADDR-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
31; ADDR-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
32; ADDR-NEXT:    [[_MSLD:%.*]] = load <1 x i32>, ptr [[TMP7]], align 4
33; ADDR-NEXT:    ret void
34;
35; ORIGINS-LABEL: @load.v1i32(
36; ORIGINS-NEXT:    call void @llvm.donothing()
37; ORIGINS-NEXT:    [[TMP1:%.*]] = load <1 x i32>, ptr [[P:%.*]], align 4
38; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
39; ORIGINS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
40; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
41; ORIGINS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
42; ORIGINS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
43; ORIGINS-NEXT:    [[_MSLD:%.*]] = load <1 x i32>, ptr [[TMP4]], align 4
44; ORIGINS-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4
45; ORIGINS-NEXT:    ret void
46;
47  load <1 x i32>, ptr %p
48  ret void
49}
50
51define void @load.v2i32(ptr %p) sanitize_memory {
52; CHECK-LABEL: @load.v2i32(
53; CHECK-NEXT:    call void @llvm.donothing()
54; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[P:%.*]], align 8
55; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
56; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
57; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
58; CHECK-NEXT:    [[_MSLD:%.*]] = load <2 x i32>, ptr [[TMP4]], align 8
59; CHECK-NEXT:    ret void
60;
61; ADDR-LABEL: @load.v2i32(
62; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
63; ADDR-NEXT:    call void @llvm.donothing()
64; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
65; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
66; ADDR:       2:
67; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
68; ADDR-NEXT:    unreachable
69; ADDR:       3:
70; ADDR-NEXT:    [[TMP4:%.*]] = load <2 x i32>, ptr [[P:%.*]], align 8
71; ADDR-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
72; ADDR-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
73; ADDR-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
74; ADDR-NEXT:    [[_MSLD:%.*]] = load <2 x i32>, ptr [[TMP7]], align 8
75; ADDR-NEXT:    ret void
76;
77; ORIGINS-LABEL: @load.v2i32(
78; ORIGINS-NEXT:    call void @llvm.donothing()
79; ORIGINS-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[P:%.*]], align 8
80; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
81; ORIGINS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
82; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
83; ORIGINS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
84; ORIGINS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
85; ORIGINS-NEXT:    [[_MSLD:%.*]] = load <2 x i32>, ptr [[TMP4]], align 8
86; ORIGINS-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 8
87; ORIGINS-NEXT:    ret void
88;
89  load <2 x i32>, ptr %p
90  ret void
91}
92
93define void @load.v4i32(ptr %p) sanitize_memory {
94; CHECK-LABEL: @load.v4i32(
95; CHECK-NEXT:    call void @llvm.donothing()
96; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[P:%.*]], align 16
97; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
98; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
99; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
100; CHECK-NEXT:    [[_MSLD:%.*]] = load <4 x i32>, ptr [[TMP4]], align 16
101; CHECK-NEXT:    ret void
102;
103; ADDR-LABEL: @load.v4i32(
104; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
105; ADDR-NEXT:    call void @llvm.donothing()
106; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
107; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
108; ADDR:       2:
109; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
110; ADDR-NEXT:    unreachable
111; ADDR:       3:
112; ADDR-NEXT:    [[TMP4:%.*]] = load <4 x i32>, ptr [[P:%.*]], align 16
113; ADDR-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
114; ADDR-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
115; ADDR-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
116; ADDR-NEXT:    [[_MSLD:%.*]] = load <4 x i32>, ptr [[TMP7]], align 16
117; ADDR-NEXT:    ret void
118;
119; ORIGINS-LABEL: @load.v4i32(
120; ORIGINS-NEXT:    call void @llvm.donothing()
121; ORIGINS-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[P:%.*]], align 16
122; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
123; ORIGINS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
124; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
125; ORIGINS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
126; ORIGINS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
127; ORIGINS-NEXT:    [[_MSLD:%.*]] = load <4 x i32>, ptr [[TMP4]], align 16
128; ORIGINS-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 16
129; ORIGINS-NEXT:    ret void
130;
131  load <4 x i32>, ptr %p
132  ret void
133}
134
135define void @load.v8i32(ptr %p) sanitize_memory {
136; CHECK-LABEL: @load.v8i32(
137; CHECK-NEXT:    call void @llvm.donothing()
138; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr [[P:%.*]], align 32
139; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
140; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
141; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
142; CHECK-NEXT:    [[_MSLD:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
143; CHECK-NEXT:    ret void
144;
145; ADDR-LABEL: @load.v8i32(
146; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
147; ADDR-NEXT:    call void @llvm.donothing()
148; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
149; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
150; ADDR:       2:
151; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
152; ADDR-NEXT:    unreachable
153; ADDR:       3:
154; ADDR-NEXT:    [[TMP4:%.*]] = load <8 x i32>, ptr [[P:%.*]], align 32
155; ADDR-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
156; ADDR-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
157; ADDR-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
158; ADDR-NEXT:    [[_MSLD:%.*]] = load <8 x i32>, ptr [[TMP7]], align 32
159; ADDR-NEXT:    ret void
160;
161; ORIGINS-LABEL: @load.v8i32(
162; ORIGINS-NEXT:    call void @llvm.donothing()
163; ORIGINS-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr [[P:%.*]], align 32
164; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
165; ORIGINS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
166; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
167; ORIGINS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
168; ORIGINS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
169; ORIGINS-NEXT:    [[_MSLD:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
170; ORIGINS-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 32
171; ORIGINS-NEXT:    ret void
172;
173  load <8 x i32>, ptr %p
174  ret void
175}
176
177define void @load.v16i32(ptr %p) sanitize_memory {
178; CHECK-LABEL: @load.v16i32(
179; CHECK-NEXT:    call void @llvm.donothing()
180; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i32>, ptr [[P:%.*]], align 64
181; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
182; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
183; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
184; CHECK-NEXT:    [[_MSLD:%.*]] = load <16 x i32>, ptr [[TMP4]], align 64
185; CHECK-NEXT:    ret void
186;
187; ADDR-LABEL: @load.v16i32(
188; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
189; ADDR-NEXT:    call void @llvm.donothing()
190; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
191; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
192; ADDR:       2:
193; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
194; ADDR-NEXT:    unreachable
195; ADDR:       3:
196; ADDR-NEXT:    [[TMP4:%.*]] = load <16 x i32>, ptr [[P:%.*]], align 64
197; ADDR-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
198; ADDR-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
199; ADDR-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
200; ADDR-NEXT:    [[_MSLD:%.*]] = load <16 x i32>, ptr [[TMP7]], align 64
201; ADDR-NEXT:    ret void
202;
203; ORIGINS-LABEL: @load.v16i32(
204; ORIGINS-NEXT:    call void @llvm.donothing()
205; ORIGINS-NEXT:    [[TMP1:%.*]] = load <16 x i32>, ptr [[P:%.*]], align 64
206; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
207; ORIGINS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
208; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
209; ORIGINS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
210; ORIGINS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
211; ORIGINS-NEXT:    [[_MSLD:%.*]] = load <16 x i32>, ptr [[TMP4]], align 64
212; ORIGINS-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 64
213; ORIGINS-NEXT:    ret void
214;
215  load <16 x i32>, ptr %p
216  ret void
217}
218
219
220define void @store.v1i32(ptr %p) sanitize_memory {
221; CHECK-LABEL: @store.v1i32(
222; CHECK-NEXT:    call void @llvm.donothing()
223; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
224; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
225; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
226; CHECK-NEXT:    store <1 x i32> zeroinitializer, ptr [[TMP3]], align 4
227; CHECK-NEXT:    store <1 x i32> zeroinitializer, ptr [[P]], align 4
228; CHECK-NEXT:    ret void
229;
230; ADDR-LABEL: @store.v1i32(
231; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
232; ADDR-NEXT:    call void @llvm.donothing()
233; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
234; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
235; ADDR:       2:
236; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
237; ADDR-NEXT:    unreachable
238; ADDR:       3:
239; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
240; ADDR-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
241; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
242; ADDR-NEXT:    store <1 x i32> zeroinitializer, ptr [[TMP6]], align 4
243; ADDR-NEXT:    store <1 x i32> zeroinitializer, ptr [[P]], align 4
244; ADDR-NEXT:    ret void
245;
246; ORIGINS-LABEL: @store.v1i32(
247; ORIGINS-NEXT:    call void @llvm.donothing()
248; ORIGINS-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
249; ORIGINS-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
250; ORIGINS-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
251; ORIGINS-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
252; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
253; ORIGINS-NEXT:    store <1 x i32> zeroinitializer, ptr [[TMP3]], align 4
254; ORIGINS-NEXT:    store <1 x i32> zeroinitializer, ptr [[P]], align 4
255; ORIGINS-NEXT:    ret void
256;
257  store <1 x i32> zeroinitializer, ptr %p
258  ret void
259}
260
261define void @store.v2i32(ptr %p) sanitize_memory {
262; CHECK-LABEL: @store.v2i32(
263; CHECK-NEXT:    call void @llvm.donothing()
264; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
265; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
266; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
267; CHECK-NEXT:    store <2 x i32> zeroinitializer, ptr [[TMP3]], align 8
268; CHECK-NEXT:    store <2 x i32> zeroinitializer, ptr [[P]], align 8
269; CHECK-NEXT:    ret void
270;
271; ADDR-LABEL: @store.v2i32(
272; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
273; ADDR-NEXT:    call void @llvm.donothing()
274; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
275; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
276; ADDR:       2:
277; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
278; ADDR-NEXT:    unreachable
279; ADDR:       3:
280; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
281; ADDR-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
282; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
283; ADDR-NEXT:    store <2 x i32> zeroinitializer, ptr [[TMP6]], align 8
284; ADDR-NEXT:    store <2 x i32> zeroinitializer, ptr [[P]], align 8
285; ADDR-NEXT:    ret void
286;
287; ORIGINS-LABEL: @store.v2i32(
288; ORIGINS-NEXT:    call void @llvm.donothing()
289; ORIGINS-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
290; ORIGINS-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
291; ORIGINS-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
292; ORIGINS-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
293; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
294; ORIGINS-NEXT:    store <2 x i32> zeroinitializer, ptr [[TMP3]], align 8
295; ORIGINS-NEXT:    store <2 x i32> zeroinitializer, ptr [[P]], align 8
296; ORIGINS-NEXT:    ret void
297;
298  store <2 x i32> zeroinitializer, ptr %p
299  ret void
300}
301
302define void @store.v4i32(ptr %p) sanitize_memory {
303; CHECK-LABEL: @store.v4i32(
304; CHECK-NEXT:    call void @llvm.donothing()
305; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
306; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
307; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
308; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr [[TMP3]], align 16
309; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr [[P]], align 16
310; CHECK-NEXT:    ret void
311;
312; ADDR-LABEL: @store.v4i32(
313; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
314; ADDR-NEXT:    call void @llvm.donothing()
315; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
316; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
317; ADDR:       2:
318; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
319; ADDR-NEXT:    unreachable
320; ADDR:       3:
321; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
322; ADDR-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
323; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
324; ADDR-NEXT:    store <4 x i32> zeroinitializer, ptr [[TMP6]], align 16
325; ADDR-NEXT:    store <4 x i32> zeroinitializer, ptr [[P]], align 16
326; ADDR-NEXT:    ret void
327;
328; ORIGINS-LABEL: @store.v4i32(
329; ORIGINS-NEXT:    call void @llvm.donothing()
330; ORIGINS-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
331; ORIGINS-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
332; ORIGINS-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
333; ORIGINS-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
334; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
335; ORIGINS-NEXT:    store <4 x i32> zeroinitializer, ptr [[TMP3]], align 16
336; ORIGINS-NEXT:    store <4 x i32> zeroinitializer, ptr [[P]], align 16
337; ORIGINS-NEXT:    ret void
338;
339  store <4 x i32> zeroinitializer, ptr %p
340  ret void
341}
342
343define void @store.v8i32(ptr %p) sanitize_memory {
344; CHECK-LABEL: @store.v8i32(
345; CHECK-NEXT:    call void @llvm.donothing()
346; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
347; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
348; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
349; CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr [[TMP3]], align 32
350; CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr [[P]], align 32
351; CHECK-NEXT:    ret void
352;
353; ADDR-LABEL: @store.v8i32(
354; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
355; ADDR-NEXT:    call void @llvm.donothing()
356; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
357; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
358; ADDR:       2:
359; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
360; ADDR-NEXT:    unreachable
361; ADDR:       3:
362; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
363; ADDR-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
364; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
365; ADDR-NEXT:    store <8 x i32> zeroinitializer, ptr [[TMP6]], align 32
366; ADDR-NEXT:    store <8 x i32> zeroinitializer, ptr [[P]], align 32
367; ADDR-NEXT:    ret void
368;
369; ORIGINS-LABEL: @store.v8i32(
370; ORIGINS-NEXT:    call void @llvm.donothing()
371; ORIGINS-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
372; ORIGINS-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
373; ORIGINS-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
374; ORIGINS-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
375; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
376; ORIGINS-NEXT:    store <8 x i32> zeroinitializer, ptr [[TMP3]], align 32
377; ORIGINS-NEXT:    store <8 x i32> zeroinitializer, ptr [[P]], align 32
378; ORIGINS-NEXT:    ret void
379;
380  store <8 x i32> zeroinitializer, ptr %p
381  ret void
382}
383
384define void @store.v16i32(ptr %p) sanitize_memory {
385; CHECK-LABEL: @store.v16i32(
386; CHECK-NEXT:    call void @llvm.donothing()
387; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
388; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
389; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
390; CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr [[TMP3]], align 64
391; CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr [[P]], align 64
392; CHECK-NEXT:    ret void
393;
394; ADDR-LABEL: @store.v16i32(
395; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
396; ADDR-NEXT:    call void @llvm.donothing()
397; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
398; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
399; ADDR:       2:
400; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
401; ADDR-NEXT:    unreachable
402; ADDR:       3:
403; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
404; ADDR-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
405; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
406; ADDR-NEXT:    store <16 x i32> zeroinitializer, ptr [[TMP6]], align 64
407; ADDR-NEXT:    store <16 x i32> zeroinitializer, ptr [[P]], align 64
408; ADDR-NEXT:    ret void
409;
410; ORIGINS-LABEL: @store.v16i32(
411; ORIGINS-NEXT:    call void @llvm.donothing()
412; ORIGINS-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
413; ORIGINS-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
414; ORIGINS-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
415; ORIGINS-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
416; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
417; ORIGINS-NEXT:    store <16 x i32> zeroinitializer, ptr [[TMP3]], align 64
418; ORIGINS-NEXT:    store <16 x i32> zeroinitializer, ptr [[P]], align 64
419; ORIGINS-NEXT:    ret void
420;
421  store <16 x i32> zeroinitializer, ptr %p
422  ret void
423}
424
425define void @load.nxv1i32(ptr %p) sanitize_memory {
426; CHECK-LABEL: @load.nxv1i32(
427; CHECK-NEXT:    call void @llvm.donothing()
428; CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 1 x i32>, ptr [[P:%.*]], align 4
429; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
430; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
431; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
432; CHECK-NEXT:    [[_MSLD:%.*]] = load <vscale x 1 x i32>, ptr [[TMP4]], align 4
433; CHECK-NEXT:    ret void
434;
435; ADDR-LABEL: @load.nxv1i32(
436; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
437; ADDR-NEXT:    call void @llvm.donothing()
438; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
439; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
440; ADDR:       2:
441; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
442; ADDR-NEXT:    unreachable
443; ADDR:       3:
444; ADDR-NEXT:    [[TMP4:%.*]] = load <vscale x 1 x i32>, ptr [[P:%.*]], align 4
445; ADDR-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
446; ADDR-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
447; ADDR-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
448; ADDR-NEXT:    [[_MSLD:%.*]] = load <vscale x 1 x i32>, ptr [[TMP7]], align 4
449; ADDR-NEXT:    ret void
450;
451; ORIGINS-LABEL: @load.nxv1i32(
452; ORIGINS-NEXT:    call void @llvm.donothing()
453; ORIGINS-NEXT:    [[TMP1:%.*]] = load <vscale x 1 x i32>, ptr [[P:%.*]], align 4
454; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
455; ORIGINS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
456; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
457; ORIGINS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
458; ORIGINS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
459; ORIGINS-NEXT:    [[_MSLD:%.*]] = load <vscale x 1 x i32>, ptr [[TMP4]], align 4
460; ORIGINS-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4
461; ORIGINS-NEXT:    ret void
462;
463  load <vscale x 1 x i32>, ptr %p
464  ret void
465}
466
467define void @load.nxv2i32(ptr %p) sanitize_memory {
468; CHECK-LABEL: @load.nxv2i32(
469; CHECK-NEXT:    call void @llvm.donothing()
470; CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x i32>, ptr [[P:%.*]], align 8
471; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
472; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
473; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
474; CHECK-NEXT:    [[_MSLD:%.*]] = load <vscale x 2 x i32>, ptr [[TMP4]], align 8
475; CHECK-NEXT:    ret void
476;
477; ADDR-LABEL: @load.nxv2i32(
478; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
479; ADDR-NEXT:    call void @llvm.donothing()
480; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
481; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
482; ADDR:       2:
483; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
484; ADDR-NEXT:    unreachable
485; ADDR:       3:
486; ADDR-NEXT:    [[TMP4:%.*]] = load <vscale x 2 x i32>, ptr [[P:%.*]], align 8
487; ADDR-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
488; ADDR-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
489; ADDR-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
490; ADDR-NEXT:    [[_MSLD:%.*]] = load <vscale x 2 x i32>, ptr [[TMP7]], align 8
491; ADDR-NEXT:    ret void
492;
493; ORIGINS-LABEL: @load.nxv2i32(
494; ORIGINS-NEXT:    call void @llvm.donothing()
495; ORIGINS-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x i32>, ptr [[P:%.*]], align 8
496; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
497; ORIGINS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
498; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
499; ORIGINS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
500; ORIGINS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
501; ORIGINS-NEXT:    [[_MSLD:%.*]] = load <vscale x 2 x i32>, ptr [[TMP4]], align 8
502; ORIGINS-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 8
503; ORIGINS-NEXT:    ret void
504;
505  load <vscale x 2 x i32>, ptr %p
506  ret void
507}
508
509define void @load.nxv4i32(ptr %p) sanitize_memory {
510; CHECK-LABEL: @load.nxv4i32(
511; CHECK-NEXT:    call void @llvm.donothing()
512; CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 4 x i32>, ptr [[P:%.*]], align 16
513; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
514; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
515; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
516; CHECK-NEXT:    [[_MSLD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP4]], align 16
517; CHECK-NEXT:    ret void
518;
519; ADDR-LABEL: @load.nxv4i32(
520; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
521; ADDR-NEXT:    call void @llvm.donothing()
522; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
523; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
524; ADDR:       2:
525; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
526; ADDR-NEXT:    unreachable
527; ADDR:       3:
528; ADDR-NEXT:    [[TMP4:%.*]] = load <vscale x 4 x i32>, ptr [[P:%.*]], align 16
529; ADDR-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
530; ADDR-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
531; ADDR-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
532; ADDR-NEXT:    [[_MSLD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 16
533; ADDR-NEXT:    ret void
534;
535; ORIGINS-LABEL: @load.nxv4i32(
536; ORIGINS-NEXT:    call void @llvm.donothing()
537; ORIGINS-NEXT:    [[TMP1:%.*]] = load <vscale x 4 x i32>, ptr [[P:%.*]], align 16
538; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
539; ORIGINS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
540; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
541; ORIGINS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
542; ORIGINS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
543; ORIGINS-NEXT:    [[_MSLD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP4]], align 16
544; ORIGINS-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 16
545; ORIGINS-NEXT:    ret void
546;
547  load <vscale x 4 x i32>, ptr %p
548  ret void
549}
550
551define void @load.nxv8i32(ptr %p) sanitize_memory {
552; CHECK-LABEL: @load.nxv8i32(
553; CHECK-NEXT:    call void @llvm.donothing()
554; CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 8 x i32>, ptr [[P:%.*]], align 32
555; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
556; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
557; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
558; CHECK-NEXT:    [[_MSLD:%.*]] = load <vscale x 8 x i32>, ptr [[TMP4]], align 32
559; CHECK-NEXT:    ret void
560;
561; ADDR-LABEL: @load.nxv8i32(
562; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
563; ADDR-NEXT:    call void @llvm.donothing()
564; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
565; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
566; ADDR:       2:
567; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
568; ADDR-NEXT:    unreachable
569; ADDR:       3:
570; ADDR-NEXT:    [[TMP4:%.*]] = load <vscale x 8 x i32>, ptr [[P:%.*]], align 32
571; ADDR-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
572; ADDR-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
573; ADDR-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
574; ADDR-NEXT:    [[_MSLD:%.*]] = load <vscale x 8 x i32>, ptr [[TMP7]], align 32
575; ADDR-NEXT:    ret void
576;
577; ORIGINS-LABEL: @load.nxv8i32(
578; ORIGINS-NEXT:    call void @llvm.donothing()
579; ORIGINS-NEXT:    [[TMP1:%.*]] = load <vscale x 8 x i32>, ptr [[P:%.*]], align 32
580; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
581; ORIGINS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
582; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
583; ORIGINS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
584; ORIGINS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
585; ORIGINS-NEXT:    [[_MSLD:%.*]] = load <vscale x 8 x i32>, ptr [[TMP4]], align 32
586; ORIGINS-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 32
587; ORIGINS-NEXT:    ret void
588;
589  load <vscale x 8 x i32>, ptr %p
590  ret void
591}
592
593define void @load.nxv16i32(ptr %p) sanitize_memory {
594; CHECK-LABEL: @load.nxv16i32(
595; CHECK-NEXT:    call void @llvm.donothing()
596; CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i32>, ptr [[P:%.*]], align 64
597; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
598; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
599; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
600; CHECK-NEXT:    [[_MSLD:%.*]] = load <vscale x 16 x i32>, ptr [[TMP4]], align 64
601; CHECK-NEXT:    ret void
602;
603; ADDR-LABEL: @load.nxv16i32(
604; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
605; ADDR-NEXT:    call void @llvm.donothing()
606; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
607; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
608; ADDR:       2:
609; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
610; ADDR-NEXT:    unreachable
611; ADDR:       3:
612; ADDR-NEXT:    [[TMP4:%.*]] = load <vscale x 16 x i32>, ptr [[P:%.*]], align 64
613; ADDR-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
614; ADDR-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
615; ADDR-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
616; ADDR-NEXT:    [[_MSLD:%.*]] = load <vscale x 16 x i32>, ptr [[TMP7]], align 64
617; ADDR-NEXT:    ret void
618;
619; ORIGINS-LABEL: @load.nxv16i32(
620; ORIGINS-NEXT:    call void @llvm.donothing()
621; ORIGINS-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i32>, ptr [[P:%.*]], align 64
622; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
623; ORIGINS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
624; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
625; ORIGINS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
626; ORIGINS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
627; ORIGINS-NEXT:    [[_MSLD:%.*]] = load <vscale x 16 x i32>, ptr [[TMP4]], align 64
628; ORIGINS-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 64
629; ORIGINS-NEXT:    ret void
630;
631  load <vscale x 16 x i32>, ptr %p
632  ret void
633}
634
635
636define void @store.nxv1i32(ptr %p) sanitize_memory {
637; CHECK-LABEL: @store.nxv1i32(
638; CHECK-NEXT:    call void @llvm.donothing()
639; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
640; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
641; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
642; CHECK-NEXT:    store <vscale x 1 x i32> zeroinitializer, ptr [[TMP3]], align 4
643; CHECK-NEXT:    store <vscale x 1 x i32> zeroinitializer, ptr [[P]], align 4
644; CHECK-NEXT:    ret void
645;
646; ADDR-LABEL: @store.nxv1i32(
647; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
648; ADDR-NEXT:    call void @llvm.donothing()
649; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
650; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
651; ADDR:       2:
652; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
653; ADDR-NEXT:    unreachable
654; ADDR:       3:
655; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
656; ADDR-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
657; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
658; ADDR-NEXT:    store <vscale x 1 x i32> zeroinitializer, ptr [[TMP6]], align 4
659; ADDR-NEXT:    store <vscale x 1 x i32> zeroinitializer, ptr [[P]], align 4
660; ADDR-NEXT:    ret void
661;
662; ORIGINS-LABEL: @store.nxv1i32(
663; ORIGINS-NEXT:    call void @llvm.donothing()
664; ORIGINS-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
665; ORIGINS-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
666; ORIGINS-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
667; ORIGINS-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
668; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
669; ORIGINS-NEXT:    store <vscale x 1 x i32> zeroinitializer, ptr [[TMP3]], align 4
670; ORIGINS-NEXT:    [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.nxv1i32(<vscale x 1 x i32> zeroinitializer)
671; ORIGINS-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP6]], 0
672; ORIGINS-NEXT:    br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP13:%.*]], !prof [[PROF0:![0-9]+]]
673; ORIGINS:       7:
674; ORIGINS-NEXT:    [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
675; ORIGINS-NEXT:    [[TMP9:%.*]] = mul i64 [[TMP8]], 4
676; ORIGINS-NEXT:    [[TMP10:%.*]] = add i64 [[TMP9]], 3
677; ORIGINS-NEXT:    [[TMP11:%.*]] = udiv i64 [[TMP10]], 4
678; ORIGINS-NEXT:    br label [[DOTSPLIT:%.*]]
679; ORIGINS:       .split:
680; ORIGINS-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[DOTSPLIT]] ]
681; ORIGINS-NEXT:    [[TMP12:%.*]] = getelementptr i32, ptr [[TMP5]], i64 [[IV]]
682; ORIGINS-NEXT:    store i32 0, ptr [[TMP12]], align 4
683; ORIGINS-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
684; ORIGINS-NEXT:    [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]]
685; ORIGINS-NEXT:    br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
686; ORIGINS:       .split.split:
687; ORIGINS-NEXT:    br label [[TMP13]]
688; ORIGINS:       13:
689; ORIGINS-NEXT:    store <vscale x 1 x i32> zeroinitializer, ptr [[P]], align 4
690; ORIGINS-NEXT:    ret void
691;
692  store <vscale x 1 x i32> zeroinitializer, ptr %p
693  ret void
694}
695
696define void @store.nxv2i32(ptr %p) sanitize_memory {
697; CHECK-LABEL: @store.nxv2i32(
698; CHECK-NEXT:    call void @llvm.donothing()
699; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
700; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
701; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
702; CHECK-NEXT:    store <vscale x 2 x i32> zeroinitializer, ptr [[TMP3]], align 8
703; CHECK-NEXT:    store <vscale x 2 x i32> zeroinitializer, ptr [[P]], align 8
704; CHECK-NEXT:    ret void
705;
706; ADDR-LABEL: @store.nxv2i32(
707; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
708; ADDR-NEXT:    call void @llvm.donothing()
709; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
710; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
711; ADDR:       2:
712; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
713; ADDR-NEXT:    unreachable
714; ADDR:       3:
715; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
716; ADDR-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
717; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
718; ADDR-NEXT:    store <vscale x 2 x i32> zeroinitializer, ptr [[TMP6]], align 8
719; ADDR-NEXT:    store <vscale x 2 x i32> zeroinitializer, ptr [[P]], align 8
720; ADDR-NEXT:    ret void
721;
722; ORIGINS-LABEL: @store.nxv2i32(
723; ORIGINS-NEXT:    call void @llvm.donothing()
724; ORIGINS-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
725; ORIGINS-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
726; ORIGINS-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
727; ORIGINS-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
728; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
729; ORIGINS-NEXT:    store <vscale x 2 x i32> zeroinitializer, ptr [[TMP3]], align 8
730; ORIGINS-NEXT:    [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.nxv2i32(<vscale x 2 x i32> zeroinitializer)
731; ORIGINS-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP6]], 0
732; ORIGINS-NEXT:    br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP13:%.*]], !prof [[PROF0]]
733; ORIGINS:       7:
734; ORIGINS-NEXT:    [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
735; ORIGINS-NEXT:    [[TMP9:%.*]] = mul i64 [[TMP8]], 8
736; ORIGINS-NEXT:    [[TMP10:%.*]] = add i64 [[TMP9]], 3
737; ORIGINS-NEXT:    [[TMP11:%.*]] = udiv i64 [[TMP10]], 4
738; ORIGINS-NEXT:    br label [[DOTSPLIT:%.*]]
739; ORIGINS:       .split:
740; ORIGINS-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[DOTSPLIT]] ]
741; ORIGINS-NEXT:    [[TMP12:%.*]] = getelementptr i32, ptr [[TMP5]], i64 [[IV]]
742; ORIGINS-NEXT:    store i32 0, ptr [[TMP12]], align 4
743; ORIGINS-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
744; ORIGINS-NEXT:    [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]]
745; ORIGINS-NEXT:    br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
746; ORIGINS:       .split.split:
747; ORIGINS-NEXT:    br label [[TMP13]]
748; ORIGINS:       13:
749; ORIGINS-NEXT:    store <vscale x 2 x i32> zeroinitializer, ptr [[P]], align 8
750; ORIGINS-NEXT:    ret void
751;
752  store <vscale x 2 x i32> zeroinitializer, ptr %p
753  ret void
754}
755
756define void @store.nxv4i32(ptr %p) sanitize_memory {
757; CHECK-LABEL: @store.nxv4i32(
758; CHECK-NEXT:    call void @llvm.donothing()
759; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
760; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
761; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
762; CHECK-NEXT:    store <vscale x 4 x i32> zeroinitializer, ptr [[TMP3]], align 16
763; CHECK-NEXT:    store <vscale x 4 x i32> zeroinitializer, ptr [[P]], align 16
764; CHECK-NEXT:    ret void
765;
766; ADDR-LABEL: @store.nxv4i32(
767; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
768; ADDR-NEXT:    call void @llvm.donothing()
769; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
770; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
771; ADDR:       2:
772; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
773; ADDR-NEXT:    unreachable
774; ADDR:       3:
775; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
776; ADDR-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
777; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
778; ADDR-NEXT:    store <vscale x 4 x i32> zeroinitializer, ptr [[TMP6]], align 16
779; ADDR-NEXT:    store <vscale x 4 x i32> zeroinitializer, ptr [[P]], align 16
780; ADDR-NEXT:    ret void
781;
782; ORIGINS-LABEL: @store.nxv4i32(
783; ORIGINS-NEXT:    call void @llvm.donothing()
784; ORIGINS-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
785; ORIGINS-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
786; ORIGINS-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
787; ORIGINS-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
788; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
789; ORIGINS-NEXT:    store <vscale x 4 x i32> zeroinitializer, ptr [[TMP3]], align 16
790; ORIGINS-NEXT:    [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> zeroinitializer)
791; ORIGINS-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP6]], 0
792; ORIGINS-NEXT:    br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP13:%.*]], !prof [[PROF0]]
793; ORIGINS:       7:
794; ORIGINS-NEXT:    [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
795; ORIGINS-NEXT:    [[TMP9:%.*]] = mul i64 [[TMP8]], 16
796; ORIGINS-NEXT:    [[TMP10:%.*]] = add i64 [[TMP9]], 3
797; ORIGINS-NEXT:    [[TMP11:%.*]] = udiv i64 [[TMP10]], 4
798; ORIGINS-NEXT:    br label [[DOTSPLIT:%.*]]
799; ORIGINS:       .split:
800; ORIGINS-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[DOTSPLIT]] ]
801; ORIGINS-NEXT:    [[TMP12:%.*]] = getelementptr i32, ptr [[TMP5]], i64 [[IV]]
802; ORIGINS-NEXT:    store i32 0, ptr [[TMP12]], align 4
803; ORIGINS-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
804; ORIGINS-NEXT:    [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]]
805; ORIGINS-NEXT:    br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
806; ORIGINS:       .split.split:
807; ORIGINS-NEXT:    br label [[TMP13]]
808; ORIGINS:       13:
809; ORIGINS-NEXT:    store <vscale x 4 x i32> zeroinitializer, ptr [[P]], align 16
810; ORIGINS-NEXT:    ret void
811;
812  store <vscale x 4 x i32> zeroinitializer, ptr %p
813  ret void
814}
815
816define void @store.nxv8i32(ptr %p) sanitize_memory {
817; CHECK-LABEL: @store.nxv8i32(
818; CHECK-NEXT:    call void @llvm.donothing()
819; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
820; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
821; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
822; CHECK-NEXT:    store <vscale x 8 x i32> zeroinitializer, ptr [[TMP3]], align 32
823; CHECK-NEXT:    store <vscale x 8 x i32> zeroinitializer, ptr [[P]], align 32
824; CHECK-NEXT:    ret void
825;
826; ADDR-LABEL: @store.nxv8i32(
827; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
828; ADDR-NEXT:    call void @llvm.donothing()
829; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
830; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
831; ADDR:       2:
832; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
833; ADDR-NEXT:    unreachable
834; ADDR:       3:
835; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
836; ADDR-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
837; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
838; ADDR-NEXT:    store <vscale x 8 x i32> zeroinitializer, ptr [[TMP6]], align 32
839; ADDR-NEXT:    store <vscale x 8 x i32> zeroinitializer, ptr [[P]], align 32
840; ADDR-NEXT:    ret void
841;
842; ORIGINS-LABEL: @store.nxv8i32(
843; ORIGINS-NEXT:    call void @llvm.donothing()
844; ORIGINS-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
845; ORIGINS-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
846; ORIGINS-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
847; ORIGINS-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
848; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
849; ORIGINS-NEXT:    store <vscale x 8 x i32> zeroinitializer, ptr [[TMP3]], align 32
850; ORIGINS-NEXT:    [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.nxv8i32(<vscale x 8 x i32> zeroinitializer)
851; ORIGINS-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP6]], 0
852; ORIGINS-NEXT:    br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP13:%.*]], !prof [[PROF0]]
853; ORIGINS:       7:
854; ORIGINS-NEXT:    [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
855; ORIGINS-NEXT:    [[TMP9:%.*]] = mul i64 [[TMP8]], 32
856; ORIGINS-NEXT:    [[TMP10:%.*]] = add i64 [[TMP9]], 3
857; ORIGINS-NEXT:    [[TMP11:%.*]] = udiv i64 [[TMP10]], 4
858; ORIGINS-NEXT:    br label [[DOTSPLIT:%.*]]
859; ORIGINS:       .split:
860; ORIGINS-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[DOTSPLIT]] ]
861; ORIGINS-NEXT:    [[TMP12:%.*]] = getelementptr i32, ptr [[TMP5]], i64 [[IV]]
862; ORIGINS-NEXT:    store i32 0, ptr [[TMP12]], align 4
863; ORIGINS-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
864; ORIGINS-NEXT:    [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]]
865; ORIGINS-NEXT:    br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
866; ORIGINS:       .split.split:
867; ORIGINS-NEXT:    br label [[TMP13]]
868; ORIGINS:       13:
869; ORIGINS-NEXT:    store <vscale x 8 x i32> zeroinitializer, ptr [[P]], align 32
870; ORIGINS-NEXT:    ret void
871;
872  store <vscale x 8 x i32> zeroinitializer, ptr %p
873  ret void
874}
875
876define void @store.nxv16i32(ptr %p) sanitize_memory {
877; CHECK-LABEL: @store.nxv16i32(
878; CHECK-NEXT:    call void @llvm.donothing()
879; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
880; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
881; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
882; CHECK-NEXT:    store <vscale x 16 x i32> zeroinitializer, ptr [[TMP3]], align 64
883; CHECK-NEXT:    store <vscale x 16 x i32> zeroinitializer, ptr [[P]], align 64
884; CHECK-NEXT:    ret void
885;
886; ADDR-LABEL: @store.nxv16i32(
887; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
888; ADDR-NEXT:    call void @llvm.donothing()
889; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
890; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
891; ADDR:       2:
892; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
893; ADDR-NEXT:    unreachable
894; ADDR:       3:
895; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
896; ADDR-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
897; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
898; ADDR-NEXT:    store <vscale x 16 x i32> zeroinitializer, ptr [[TMP6]], align 64
899; ADDR-NEXT:    store <vscale x 16 x i32> zeroinitializer, ptr [[P]], align 64
900; ADDR-NEXT:    ret void
901;
902; ORIGINS-LABEL: @store.nxv16i32(
903; ORIGINS-NEXT:    call void @llvm.donothing()
904; ORIGINS-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
905; ORIGINS-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
906; ORIGINS-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
907; ORIGINS-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
908; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
909; ORIGINS-NEXT:    store <vscale x 16 x i32> zeroinitializer, ptr [[TMP3]], align 64
910; ORIGINS-NEXT:    [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.nxv16i32(<vscale x 16 x i32> zeroinitializer)
911; ORIGINS-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP6]], 0
912; ORIGINS-NEXT:    br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP13:%.*]], !prof [[PROF0]]
913; ORIGINS:       7:
914; ORIGINS-NEXT:    [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
915; ORIGINS-NEXT:    [[TMP9:%.*]] = mul i64 [[TMP8]], 64
916; ORIGINS-NEXT:    [[TMP10:%.*]] = add i64 [[TMP9]], 3
917; ORIGINS-NEXT:    [[TMP11:%.*]] = udiv i64 [[TMP10]], 4
918; ORIGINS-NEXT:    br label [[DOTSPLIT:%.*]]
919; ORIGINS:       .split:
920; ORIGINS-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[DOTSPLIT]] ]
921; ORIGINS-NEXT:    [[TMP12:%.*]] = getelementptr i32, ptr [[TMP5]], i64 [[IV]]
922; ORIGINS-NEXT:    store i32 0, ptr [[TMP12]], align 4
923; ORIGINS-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
924; ORIGINS-NEXT:    [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]]
925; ORIGINS-NEXT:    br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
926; ORIGINS:       .split.split:
927; ORIGINS-NEXT:    br label [[TMP13]]
928; ORIGINS:       13:
929; ORIGINS-NEXT:    store <vscale x 16 x i32> zeroinitializer, ptr [[P]], align 64
930; ORIGINS-NEXT:    ret void
931;
932  store <vscale x 16 x i32> zeroinitializer, ptr %p
933  ret void
934}
935