xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vlse.ll (revision 84a3739ac072c95af9fa80e36d9e0f52d11e28eb)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
6
7declare <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64(
8  <vscale x 1 x i64>,
9  ptr,
10  iXLen,
11  iXLen);
12
13define <vscale x 1 x i64> @intrinsic_vlse_v_nxv1i64_nxv1i64(ptr %0, iXLen %1, iXLen %2) nounwind {
14; CHECK-LABEL: intrinsic_vlse_v_nxv1i64_nxv1i64:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
17; CHECK-NEXT:    vlse64.v v8, (a0), a1
18; CHECK-NEXT:    ret
19entry:
20  %a = call <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64(
21    <vscale x 1 x i64> undef,
22    ptr %0,
23    iXLen %1,
24    iXLen %2)
25
26  ret <vscale x 1 x i64> %a
27}
28
29declare <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
30  <vscale x 1 x i64>,
31  ptr,
32  iXLen,
33  <vscale x 1 x i1>,
34  iXLen,
35  iXLen);
36
37define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
38; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64:
39; CHECK:       # %bb.0: # %entry
40; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
41; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
42; CHECK-NEXT:    ret
43entry:
44  %a = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
45    <vscale x 1 x i64> %0,
46    ptr %1,
47    iXLen %2,
48    <vscale x 1 x i1> %3,
49    iXLen %4, iXLen 1)
50
51  ret <vscale x 1 x i64> %a
52}
53
54declare <vscale x 2 x i64> @llvm.riscv.vlse.nxv2i64(
55  <vscale x 2 x i64>,
56  ptr,
57  iXLen,
58  iXLen);
59
60define <vscale x 2 x i64> @intrinsic_vlse_v_nxv2i64_nxv2i64(ptr %0, iXLen %1, iXLen %2) nounwind {
61; CHECK-LABEL: intrinsic_vlse_v_nxv2i64_nxv2i64:
62; CHECK:       # %bb.0: # %entry
63; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
64; CHECK-NEXT:    vlse64.v v8, (a0), a1
65; CHECK-NEXT:    ret
66entry:
67  %a = call <vscale x 2 x i64> @llvm.riscv.vlse.nxv2i64(
68    <vscale x 2 x i64> undef,
69    ptr %0,
70    iXLen %1,
71    iXLen %2)
72
73  ret <vscale x 2 x i64> %a
74}
75
76declare <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64(
77  <vscale x 2 x i64>,
78  ptr,
79  iXLen,
80  <vscale x 2 x i1>,
81  iXLen,
82  iXLen);
83
84define <vscale x 2 x i64> @intrinsic_vlse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
85; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i64_nxv2i64:
86; CHECK:       # %bb.0: # %entry
87; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
88; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
89; CHECK-NEXT:    ret
90entry:
91  %a = call <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64(
92    <vscale x 2 x i64> %0,
93    ptr %1,
94    iXLen %2,
95    <vscale x 2 x i1> %3,
96    iXLen %4, iXLen 1)
97
98  ret <vscale x 2 x i64> %a
99}
100
101declare <vscale x 4 x i64> @llvm.riscv.vlse.nxv4i64(
102  <vscale x 4 x i64>,
103  ptr,
104  iXLen,
105  iXLen);
106
107define <vscale x 4 x i64> @intrinsic_vlse_v_nxv4i64_nxv4i64(ptr %0, iXLen %1, iXLen %2) nounwind {
108; CHECK-LABEL: intrinsic_vlse_v_nxv4i64_nxv4i64:
109; CHECK:       # %bb.0: # %entry
110; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
111; CHECK-NEXT:    vlse64.v v8, (a0), a1
112; CHECK-NEXT:    ret
113entry:
114  %a = call <vscale x 4 x i64> @llvm.riscv.vlse.nxv4i64(
115    <vscale x 4 x i64> undef,
116    ptr %0,
117    iXLen %1,
118    iXLen %2)
119
120  ret <vscale x 4 x i64> %a
121}
122
123declare <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64(
124  <vscale x 4 x i64>,
125  ptr,
126  iXLen,
127  <vscale x 4 x i1>,
128  iXLen,
129  iXLen);
130
131define <vscale x 4 x i64> @intrinsic_vlse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
132; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i64_nxv4i64:
133; CHECK:       # %bb.0: # %entry
134; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
135; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
136; CHECK-NEXT:    ret
137entry:
138  %a = call <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64(
139    <vscale x 4 x i64> %0,
140    ptr %1,
141    iXLen %2,
142    <vscale x 4 x i1> %3,
143    iXLen %4, iXLen 1)
144
145  ret <vscale x 4 x i64> %a
146}
147
148declare <vscale x 8 x i64> @llvm.riscv.vlse.nxv8i64(
149  <vscale x 8 x i64>,
150  ptr,
151  iXLen,
152  iXLen);
153
154define <vscale x 8 x i64> @intrinsic_vlse_v_nxv8i64_nxv8i64(ptr %0, iXLen %1, iXLen %2) nounwind {
155; CHECK-LABEL: intrinsic_vlse_v_nxv8i64_nxv8i64:
156; CHECK:       # %bb.0: # %entry
157; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
158; CHECK-NEXT:    vlse64.v v8, (a0), a1
159; CHECK-NEXT:    ret
160entry:
161  %a = call <vscale x 8 x i64> @llvm.riscv.vlse.nxv8i64(
162    <vscale x 8 x i64> undef,
163    ptr %0,
164    iXLen %1,
165    iXLen %2)
166
167  ret <vscale x 8 x i64> %a
168}
169
170declare <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64(
171  <vscale x 8 x i64>,
172  ptr,
173  iXLen,
174  <vscale x 8 x i1>,
175  iXLen,
176  iXLen);
177
178define <vscale x 8 x i64> @intrinsic_vlse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
179; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i64_nxv8i64:
180; CHECK:       # %bb.0: # %entry
181; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
182; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
183; CHECK-NEXT:    ret
184entry:
185  %a = call <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64(
186    <vscale x 8 x i64> %0,
187    ptr %1,
188    iXLen %2,
189    <vscale x 8 x i1> %3,
190    iXLen %4, iXLen 1)
191
192  ret <vscale x 8 x i64> %a
193}
194
195declare <vscale x 1 x double> @llvm.riscv.vlse.nxv1f64(
196  <vscale x 1 x double>,
197  ptr,
198  iXLen,
199  iXLen);
200
201define <vscale x 1 x double> @intrinsic_vlse_v_nxv1f64_nxv1f64(ptr %0, iXLen %1, iXLen %2) nounwind {
202; CHECK-LABEL: intrinsic_vlse_v_nxv1f64_nxv1f64:
203; CHECK:       # %bb.0: # %entry
204; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
205; CHECK-NEXT:    vlse64.v v8, (a0), a1
206; CHECK-NEXT:    ret
207entry:
208  %a = call <vscale x 1 x double> @llvm.riscv.vlse.nxv1f64(
209    <vscale x 1 x double> undef,
210    ptr %0,
211    iXLen %1,
212    iXLen %2)
213
214  ret <vscale x 1 x double> %a
215}
216
217declare <vscale x 1 x double> @llvm.riscv.vlse.mask.nxv1f64(
218  <vscale x 1 x double>,
219  ptr,
220  iXLen,
221  <vscale x 1 x i1>,
222  iXLen,
223  iXLen);
224
225define <vscale x 1 x double> @intrinsic_vlse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
226; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f64_nxv1f64:
227; CHECK:       # %bb.0: # %entry
228; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
229; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
230; CHECK-NEXT:    ret
231entry:
232  %a = call <vscale x 1 x double> @llvm.riscv.vlse.mask.nxv1f64(
233    <vscale x 1 x double> %0,
234    ptr %1,
235    iXLen %2,
236    <vscale x 1 x i1> %3,
237    iXLen %4, iXLen 1)
238
239  ret <vscale x 1 x double> %a
240}
241
242declare <vscale x 2 x double> @llvm.riscv.vlse.nxv2f64(
243  <vscale x 2 x double>,
244  ptr,
245  iXLen,
246  iXLen);
247
248define <vscale x 2 x double> @intrinsic_vlse_v_nxv2f64_nxv2f64(ptr %0, iXLen %1, iXLen %2) nounwind {
249; CHECK-LABEL: intrinsic_vlse_v_nxv2f64_nxv2f64:
250; CHECK:       # %bb.0: # %entry
251; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
252; CHECK-NEXT:    vlse64.v v8, (a0), a1
253; CHECK-NEXT:    ret
254entry:
255  %a = call <vscale x 2 x double> @llvm.riscv.vlse.nxv2f64(
256    <vscale x 2 x double> undef,
257    ptr %0,
258    iXLen %1,
259    iXLen %2)
260
261  ret <vscale x 2 x double> %a
262}
263
264declare <vscale x 2 x double> @llvm.riscv.vlse.mask.nxv2f64(
265  <vscale x 2 x double>,
266  ptr,
267  iXLen,
268  <vscale x 2 x i1>,
269  iXLen,
270  iXLen);
271
272define <vscale x 2 x double> @intrinsic_vlse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
273; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f64_nxv2f64:
274; CHECK:       # %bb.0: # %entry
275; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
276; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
277; CHECK-NEXT:    ret
278entry:
279  %a = call <vscale x 2 x double> @llvm.riscv.vlse.mask.nxv2f64(
280    <vscale x 2 x double> %0,
281    ptr %1,
282    iXLen %2,
283    <vscale x 2 x i1> %3,
284    iXLen %4, iXLen 1)
285
286  ret <vscale x 2 x double> %a
287}
288
289declare <vscale x 4 x double> @llvm.riscv.vlse.nxv4f64(
290  <vscale x 4 x double>,
291  ptr,
292  iXLen,
293  iXLen);
294
295define <vscale x 4 x double> @intrinsic_vlse_v_nxv4f64_nxv4f64(ptr %0, iXLen %1, iXLen %2) nounwind {
296; CHECK-LABEL: intrinsic_vlse_v_nxv4f64_nxv4f64:
297; CHECK:       # %bb.0: # %entry
298; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
299; CHECK-NEXT:    vlse64.v v8, (a0), a1
300; CHECK-NEXT:    ret
301entry:
302  %a = call <vscale x 4 x double> @llvm.riscv.vlse.nxv4f64(
303    <vscale x 4 x double> undef,
304    ptr %0,
305    iXLen %1,
306    iXLen %2)
307
308  ret <vscale x 4 x double> %a
309}
310
311declare <vscale x 4 x double> @llvm.riscv.vlse.mask.nxv4f64(
312  <vscale x 4 x double>,
313  ptr,
314  iXLen,
315  <vscale x 4 x i1>,
316  iXLen,
317  iXLen);
318
319define <vscale x 4 x double> @intrinsic_vlse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
320; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f64_nxv4f64:
321; CHECK:       # %bb.0: # %entry
322; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
323; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
324; CHECK-NEXT:    ret
325entry:
326  %a = call <vscale x 4 x double> @llvm.riscv.vlse.mask.nxv4f64(
327    <vscale x 4 x double> %0,
328    ptr %1,
329    iXLen %2,
330    <vscale x 4 x i1> %3,
331    iXLen %4, iXLen 1)
332
333  ret <vscale x 4 x double> %a
334}
335
336declare <vscale x 8 x double> @llvm.riscv.vlse.nxv8f64(
337  <vscale x 8 x double>,
338  ptr,
339  iXLen,
340  iXLen);
341
342define <vscale x 8 x double> @intrinsic_vlse_v_nxv8f64_nxv8f64(ptr %0, iXLen %1, iXLen %2) nounwind {
343; CHECK-LABEL: intrinsic_vlse_v_nxv8f64_nxv8f64:
344; CHECK:       # %bb.0: # %entry
345; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
346; CHECK-NEXT:    vlse64.v v8, (a0), a1
347; CHECK-NEXT:    ret
348entry:
349  %a = call <vscale x 8 x double> @llvm.riscv.vlse.nxv8f64(
350    <vscale x 8 x double> undef,
351    ptr %0,
352    iXLen %1,
353    iXLen %2)
354
355  ret <vscale x 8 x double> %a
356}
357
358declare <vscale x 8 x double> @llvm.riscv.vlse.mask.nxv8f64(
359  <vscale x 8 x double>,
360  ptr,
361  iXLen,
362  <vscale x 8 x i1>,
363  iXLen,
364  iXLen);
365
366define <vscale x 8 x double> @intrinsic_vlse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
367; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f64_nxv8f64:
368; CHECK:       # %bb.0: # %entry
369; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
370; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
371; CHECK-NEXT:    ret
372entry:
373  %a = call <vscale x 8 x double> @llvm.riscv.vlse.mask.nxv8f64(
374    <vscale x 8 x double> %0,
375    ptr %1,
376    iXLen %2,
377    <vscale x 8 x i1> %3,
378    iXLen %4, iXLen 1)
379
380  ret <vscale x 8 x double> %a
381}
382
383declare <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32(
384  <vscale x 1 x i32>,
385  ptr,
386  iXLen,
387  iXLen);
388
389define <vscale x 1 x i32> @intrinsic_vlse_v_nxv1i32_nxv1i32(ptr %0, iXLen %1, iXLen %2) nounwind {
390; CHECK-LABEL: intrinsic_vlse_v_nxv1i32_nxv1i32:
391; CHECK:       # %bb.0: # %entry
392; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
393; CHECK-NEXT:    vlse32.v v8, (a0), a1
394; CHECK-NEXT:    ret
395entry:
396  %a = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32(
397    <vscale x 1 x i32> undef,
398    ptr %0,
399    iXLen %1,
400    iXLen %2)
401
402  ret <vscale x 1 x i32> %a
403}
404
405declare <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32(
406  <vscale x 1 x i32>,
407  ptr,
408  iXLen,
409  <vscale x 1 x i1>,
410  iXLen,
411  iXLen);
412
413define <vscale x 1 x i32> @intrinsic_vlse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
414; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i32_nxv1i32:
415; CHECK:       # %bb.0: # %entry
416; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
417; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
418; CHECK-NEXT:    ret
419entry:
420  %a = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32(
421    <vscale x 1 x i32> %0,
422    ptr %1,
423    iXLen %2,
424    <vscale x 1 x i1> %3,
425    iXLen %4, iXLen 1)
426
427  ret <vscale x 1 x i32> %a
428}
429
430declare <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(
431  <vscale x 2 x i32>,
432  ptr,
433  iXLen,
434  iXLen);
435
436define <vscale x 2 x i32> @intrinsic_vlse_v_nxv2i32_nxv2i32(ptr %0, iXLen %1, iXLen %2) nounwind {
437; CHECK-LABEL: intrinsic_vlse_v_nxv2i32_nxv2i32:
438; CHECK:       # %bb.0: # %entry
439; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
440; CHECK-NEXT:    vlse32.v v8, (a0), a1
441; CHECK-NEXT:    ret
442entry:
443  %a = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(
444    <vscale x 2 x i32> undef,
445    ptr %0,
446    iXLen %1,
447    iXLen %2)
448
449  ret <vscale x 2 x i32> %a
450}
451
452declare <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32(
453  <vscale x 2 x i32>,
454  ptr,
455  iXLen,
456  <vscale x 2 x i1>,
457  iXLen,
458  iXLen);
459
460define <vscale x 2 x i32> @intrinsic_vlse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
461; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i32_nxv2i32:
462; CHECK:       # %bb.0: # %entry
463; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
464; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
465; CHECK-NEXT:    ret
466entry:
467  %a = call <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32(
468    <vscale x 2 x i32> %0,
469    ptr %1,
470    iXLen %2,
471    <vscale x 2 x i1> %3,
472    iXLen %4, iXLen 1)
473
474  ret <vscale x 2 x i32> %a
475}
476
477declare <vscale x 4 x i32> @llvm.riscv.vlse.nxv4i32(
478  <vscale x 4 x i32>,
479  ptr,
480  iXLen,
481  iXLen);
482
483define <vscale x 4 x i32> @intrinsic_vlse_v_nxv4i32_nxv4i32(ptr %0, iXLen %1, iXLen %2) nounwind {
484; CHECK-LABEL: intrinsic_vlse_v_nxv4i32_nxv4i32:
485; CHECK:       # %bb.0: # %entry
486; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
487; CHECK-NEXT:    vlse32.v v8, (a0), a1
488; CHECK-NEXT:    ret
489entry:
490  %a = call <vscale x 4 x i32> @llvm.riscv.vlse.nxv4i32(
491    <vscale x 4 x i32> undef,
492    ptr %0,
493    iXLen %1,
494    iXLen %2)
495
496  ret <vscale x 4 x i32> %a
497}
498
499declare <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32(
500  <vscale x 4 x i32>,
501  ptr,
502  iXLen,
503  <vscale x 4 x i1>,
504  iXLen,
505  iXLen);
506
507define <vscale x 4 x i32> @intrinsic_vlse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
508; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i32_nxv4i32:
509; CHECK:       # %bb.0: # %entry
510; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
511; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
512; CHECK-NEXT:    ret
513entry:
514  %a = call <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32(
515    <vscale x 4 x i32> %0,
516    ptr %1,
517    iXLen %2,
518    <vscale x 4 x i1> %3,
519    iXLen %4, iXLen 1)
520
521  ret <vscale x 4 x i32> %a
522}
523
524declare <vscale x 8 x i32> @llvm.riscv.vlse.nxv8i32(
525  <vscale x 8 x i32>,
526  ptr,
527  iXLen,
528  iXLen);
529
530define <vscale x 8 x i32> @intrinsic_vlse_v_nxv8i32_nxv8i32(ptr %0, iXLen %1, iXLen %2) nounwind {
531; CHECK-LABEL: intrinsic_vlse_v_nxv8i32_nxv8i32:
532; CHECK:       # %bb.0: # %entry
533; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
534; CHECK-NEXT:    vlse32.v v8, (a0), a1
535; CHECK-NEXT:    ret
536entry:
537  %a = call <vscale x 8 x i32> @llvm.riscv.vlse.nxv8i32(
538    <vscale x 8 x i32> undef,
539    ptr %0,
540    iXLen %1,
541    iXLen %2)
542
543  ret <vscale x 8 x i32> %a
544}
545
546declare <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32(
547  <vscale x 8 x i32>,
548  ptr,
549  iXLen,
550  <vscale x 8 x i1>,
551  iXLen,
552  iXLen);
553
554define <vscale x 8 x i32> @intrinsic_vlse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
555; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i32_nxv8i32:
556; CHECK:       # %bb.0: # %entry
557; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
558; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
559; CHECK-NEXT:    ret
560entry:
561  %a = call <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32(
562    <vscale x 8 x i32> %0,
563    ptr %1,
564    iXLen %2,
565    <vscale x 8 x i1> %3,
566    iXLen %4, iXLen 1)
567
568  ret <vscale x 8 x i32> %a
569}
570
571declare <vscale x 16 x i32> @llvm.riscv.vlse.nxv16i32(
572  <vscale x 16 x i32>,
573  ptr,
574  iXLen,
575  iXLen);
576
577define <vscale x 16 x i32> @intrinsic_vlse_v_nxv16i32_nxv16i32(ptr %0, iXLen %1, iXLen %2) nounwind {
578; CHECK-LABEL: intrinsic_vlse_v_nxv16i32_nxv16i32:
579; CHECK:       # %bb.0: # %entry
580; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
581; CHECK-NEXT:    vlse32.v v8, (a0), a1
582; CHECK-NEXT:    ret
583entry:
584  %a = call <vscale x 16 x i32> @llvm.riscv.vlse.nxv16i32(
585    <vscale x 16 x i32> undef,
586    ptr %0,
587    iXLen %1,
588    iXLen %2)
589
590  ret <vscale x 16 x i32> %a
591}
592
593declare <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32(
594  <vscale x 16 x i32>,
595  ptr,
596  iXLen,
597  <vscale x 16 x i1>,
598  iXLen,
599  iXLen);
600
601define <vscale x 16 x i32> @intrinsic_vlse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
602; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i32_nxv16i32:
603; CHECK:       # %bb.0: # %entry
604; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
605; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
606; CHECK-NEXT:    ret
607entry:
608  %a = call <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32(
609    <vscale x 16 x i32> %0,
610    ptr %1,
611    iXLen %2,
612    <vscale x 16 x i1> %3,
613    iXLen %4, iXLen 1)
614
615  ret <vscale x 16 x i32> %a
616}
617
618declare <vscale x 1 x float> @llvm.riscv.vlse.nxv1f32(
619  <vscale x 1 x float>,
620  ptr,
621  iXLen,
622  iXLen);
623
624define <vscale x 1 x float> @intrinsic_vlse_v_nxv1f32_nxv1f32(ptr %0, iXLen %1, iXLen %2) nounwind {
625; CHECK-LABEL: intrinsic_vlse_v_nxv1f32_nxv1f32:
626; CHECK:       # %bb.0: # %entry
627; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
628; CHECK-NEXT:    vlse32.v v8, (a0), a1
629; CHECK-NEXT:    ret
630entry:
631  %a = call <vscale x 1 x float> @llvm.riscv.vlse.nxv1f32(
632    <vscale x 1 x float> undef,
633    ptr %0,
634    iXLen %1,
635    iXLen %2)
636
637  ret <vscale x 1 x float> %a
638}
639
640declare <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32(
641  <vscale x 1 x float>,
642  ptr,
643  iXLen,
644  <vscale x 1 x i1>,
645  iXLen,
646  iXLen);
647
648define <vscale x 1 x float> @intrinsic_vlse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
649; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f32_nxv1f32:
650; CHECK:       # %bb.0: # %entry
651; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
652; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
653; CHECK-NEXT:    ret
654entry:
655  %a = call <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32(
656    <vscale x 1 x float> %0,
657    ptr %1,
658    iXLen %2,
659    <vscale x 1 x i1> %3,
660    iXLen %4, iXLen 1)
661
662  ret <vscale x 1 x float> %a
663}
664
665declare <vscale x 2 x float> @llvm.riscv.vlse.nxv2f32(
666  <vscale x 2 x float>,
667  ptr,
668  iXLen,
669  iXLen);
670
671define <vscale x 2 x float> @intrinsic_vlse_v_nxv2f32_nxv2f32(ptr %0, iXLen %1, iXLen %2) nounwind {
672; CHECK-LABEL: intrinsic_vlse_v_nxv2f32_nxv2f32:
673; CHECK:       # %bb.0: # %entry
674; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
675; CHECK-NEXT:    vlse32.v v8, (a0), a1
676; CHECK-NEXT:    ret
677entry:
678  %a = call <vscale x 2 x float> @llvm.riscv.vlse.nxv2f32(
679    <vscale x 2 x float> undef,
680    ptr %0,
681    iXLen %1,
682    iXLen %2)
683
684  ret <vscale x 2 x float> %a
685}
686
687declare <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32(
688  <vscale x 2 x float>,
689  ptr,
690  iXLen,
691  <vscale x 2 x i1>,
692  iXLen,
693  iXLen);
694
695define <vscale x 2 x float> @intrinsic_vlse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
696; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f32_nxv2f32:
697; CHECK:       # %bb.0: # %entry
698; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
699; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
700; CHECK-NEXT:    ret
701entry:
702  %a = call <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32(
703    <vscale x 2 x float> %0,
704    ptr %1,
705    iXLen %2,
706    <vscale x 2 x i1> %3,
707    iXLen %4, iXLen 1)
708
709  ret <vscale x 2 x float> %a
710}
711
712declare <vscale x 4 x float> @llvm.riscv.vlse.nxv4f32(
713  <vscale x 4 x float>,
714  ptr,
715  iXLen,
716  iXLen);
717
718define <vscale x 4 x float> @intrinsic_vlse_v_nxv4f32_nxv4f32(ptr %0, iXLen %1, iXLen %2) nounwind {
719; CHECK-LABEL: intrinsic_vlse_v_nxv4f32_nxv4f32:
720; CHECK:       # %bb.0: # %entry
721; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
722; CHECK-NEXT:    vlse32.v v8, (a0), a1
723; CHECK-NEXT:    ret
724entry:
725  %a = call <vscale x 4 x float> @llvm.riscv.vlse.nxv4f32(
726    <vscale x 4 x float> undef,
727    ptr %0,
728    iXLen %1,
729    iXLen %2)
730
731  ret <vscale x 4 x float> %a
732}
733
734declare <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32(
735  <vscale x 4 x float>,
736  ptr,
737  iXLen,
738  <vscale x 4 x i1>,
739  iXLen,
740  iXLen);
741
742define <vscale x 4 x float> @intrinsic_vlse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
743; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f32_nxv4f32:
744; CHECK:       # %bb.0: # %entry
745; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
746; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
747; CHECK-NEXT:    ret
748entry:
749  %a = call <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32(
750    <vscale x 4 x float> %0,
751    ptr %1,
752    iXLen %2,
753    <vscale x 4 x i1> %3,
754    iXLen %4, iXLen 1)
755
756  ret <vscale x 4 x float> %a
757}
758
759declare <vscale x 8 x float> @llvm.riscv.vlse.nxv8f32(
760  <vscale x 8 x float>,
761  ptr,
762  iXLen,
763  iXLen);
764
765define <vscale x 8 x float> @intrinsic_vlse_v_nxv8f32_nxv8f32(ptr %0, iXLen %1, iXLen %2) nounwind {
766; CHECK-LABEL: intrinsic_vlse_v_nxv8f32_nxv8f32:
767; CHECK:       # %bb.0: # %entry
768; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
769; CHECK-NEXT:    vlse32.v v8, (a0), a1
770; CHECK-NEXT:    ret
771entry:
772  %a = call <vscale x 8 x float> @llvm.riscv.vlse.nxv8f32(
773    <vscale x 8 x float> undef,
774    ptr %0,
775    iXLen %1,
776    iXLen %2)
777
778  ret <vscale x 8 x float> %a
779}
780
781declare <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32(
782  <vscale x 8 x float>,
783  ptr,
784  iXLen,
785  <vscale x 8 x i1>,
786  iXLen,
787  iXLen);
788
789define <vscale x 8 x float> @intrinsic_vlse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
790; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f32_nxv8f32:
791; CHECK:       # %bb.0: # %entry
792; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
793; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
794; CHECK-NEXT:    ret
795entry:
796  %a = call <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32(
797    <vscale x 8 x float> %0,
798    ptr %1,
799    iXLen %2,
800    <vscale x 8 x i1> %3,
801    iXLen %4, iXLen 1)
802
803  ret <vscale x 8 x float> %a
804}
805
806declare <vscale x 16 x float> @llvm.riscv.vlse.nxv16f32(
807  <vscale x 16 x float>,
808  ptr,
809  iXLen,
810  iXLen);
811
812define <vscale x 16 x float> @intrinsic_vlse_v_nxv16f32_nxv16f32(ptr %0, iXLen %1, iXLen %2) nounwind {
813; CHECK-LABEL: intrinsic_vlse_v_nxv16f32_nxv16f32:
814; CHECK:       # %bb.0: # %entry
815; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
816; CHECK-NEXT:    vlse32.v v8, (a0), a1
817; CHECK-NEXT:    ret
818entry:
819  %a = call <vscale x 16 x float> @llvm.riscv.vlse.nxv16f32(
820    <vscale x 16 x float> undef,
821    ptr %0,
822    iXLen %1,
823    iXLen %2)
824
825  ret <vscale x 16 x float> %a
826}
827
828declare <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32(
829  <vscale x 16 x float>,
830  ptr,
831  iXLen,
832  <vscale x 16 x i1>,
833  iXLen,
834  iXLen);
835
836define <vscale x 16 x float> @intrinsic_vlse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
837; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f32_nxv16f32:
838; CHECK:       # %bb.0: # %entry
839; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
840; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
841; CHECK-NEXT:    ret
842entry:
843  %a = call <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32(
844    <vscale x 16 x float> %0,
845    ptr %1,
846    iXLen %2,
847    <vscale x 16 x i1> %3,
848    iXLen %4, iXLen 1)
849
850  ret <vscale x 16 x float> %a
851}
852
853declare <vscale x 1 x i16> @llvm.riscv.vlse.nxv1i16(
854  <vscale x 1 x i16>,
855  ptr,
856  iXLen,
857  iXLen);
858
859define <vscale x 1 x i16> @intrinsic_vlse_v_nxv1i16_nxv1i16(ptr %0, iXLen %1, iXLen %2) nounwind {
860; CHECK-LABEL: intrinsic_vlse_v_nxv1i16_nxv1i16:
861; CHECK:       # %bb.0: # %entry
862; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
863; CHECK-NEXT:    vlse16.v v8, (a0), a1
864; CHECK-NEXT:    ret
865entry:
866  %a = call <vscale x 1 x i16> @llvm.riscv.vlse.nxv1i16(
867    <vscale x 1 x i16> undef,
868    ptr %0,
869    iXLen %1,
870    iXLen %2)
871
872  ret <vscale x 1 x i16> %a
873}
874
875declare <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16(
876  <vscale x 1 x i16>,
877  ptr,
878  iXLen,
879  <vscale x 1 x i1>,
880  iXLen,
881  iXLen);
882
883define <vscale x 1 x i16> @intrinsic_vlse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
884; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i16_nxv1i16:
885; CHECK:       # %bb.0: # %entry
886; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
887; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
888; CHECK-NEXT:    ret
889entry:
890  %a = call <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16(
891    <vscale x 1 x i16> %0,
892    ptr %1,
893    iXLen %2,
894    <vscale x 1 x i1> %3,
895    iXLen %4, iXLen 1)
896
897  ret <vscale x 1 x i16> %a
898}
899
900declare <vscale x 2 x i16> @llvm.riscv.vlse.nxv2i16(
901  <vscale x 2 x i16>,
902  ptr,
903  iXLen,
904  iXLen);
905
906define <vscale x 2 x i16> @intrinsic_vlse_v_nxv2i16_nxv2i16(ptr %0, iXLen %1, iXLen %2) nounwind {
907; CHECK-LABEL: intrinsic_vlse_v_nxv2i16_nxv2i16:
908; CHECK:       # %bb.0: # %entry
909; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
910; CHECK-NEXT:    vlse16.v v8, (a0), a1
911; CHECK-NEXT:    ret
912entry:
913  %a = call <vscale x 2 x i16> @llvm.riscv.vlse.nxv2i16(
914    <vscale x 2 x i16> undef,
915    ptr %0,
916    iXLen %1,
917    iXLen %2)
918
919  ret <vscale x 2 x i16> %a
920}
921
922declare <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16(
923  <vscale x 2 x i16>,
924  ptr,
925  iXLen,
926  <vscale x 2 x i1>,
927  iXLen,
928  iXLen);
929
930define <vscale x 2 x i16> @intrinsic_vlse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
931; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i16_nxv2i16:
932; CHECK:       # %bb.0: # %entry
933; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
934; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
935; CHECK-NEXT:    ret
936entry:
937  %a = call <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16(
938    <vscale x 2 x i16> %0,
939    ptr %1,
940    iXLen %2,
941    <vscale x 2 x i1> %3,
942    iXLen %4, iXLen 1)
943
944  ret <vscale x 2 x i16> %a
945}
946
947declare <vscale x 4 x i16> @llvm.riscv.vlse.nxv4i16(
948  <vscale x 4 x i16>,
949  ptr,
950  iXLen,
951  iXLen);
952
953define <vscale x 4 x i16> @intrinsic_vlse_v_nxv4i16_nxv4i16(ptr %0, iXLen %1, iXLen %2) nounwind {
954; CHECK-LABEL: intrinsic_vlse_v_nxv4i16_nxv4i16:
955; CHECK:       # %bb.0: # %entry
956; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
957; CHECK-NEXT:    vlse16.v v8, (a0), a1
958; CHECK-NEXT:    ret
959entry:
960  %a = call <vscale x 4 x i16> @llvm.riscv.vlse.nxv4i16(
961    <vscale x 4 x i16> undef,
962    ptr %0,
963    iXLen %1,
964    iXLen %2)
965
966  ret <vscale x 4 x i16> %a
967}
968
969declare <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16(
970  <vscale x 4 x i16>,
971  ptr,
972  iXLen,
973  <vscale x 4 x i1>,
974  iXLen,
975  iXLen);
976
977define <vscale x 4 x i16> @intrinsic_vlse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
978; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i16_nxv4i16:
979; CHECK:       # %bb.0: # %entry
980; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
981; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
982; CHECK-NEXT:    ret
983entry:
984  %a = call <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16(
985    <vscale x 4 x i16> %0,
986    ptr %1,
987    iXLen %2,
988    <vscale x 4 x i1> %3,
989    iXLen %4, iXLen 1)
990
991  ret <vscale x 4 x i16> %a
992}
993
994declare <vscale x 8 x i16> @llvm.riscv.vlse.nxv8i16(
995  <vscale x 8 x i16>,
996  ptr,
997  iXLen,
998  iXLen);
999
1000define <vscale x 8 x i16> @intrinsic_vlse_v_nxv8i16_nxv8i16(ptr %0, iXLen %1, iXLen %2) nounwind {
1001; CHECK-LABEL: intrinsic_vlse_v_nxv8i16_nxv8i16:
1002; CHECK:       # %bb.0: # %entry
1003; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
1004; CHECK-NEXT:    vlse16.v v8, (a0), a1
1005; CHECK-NEXT:    ret
1006entry:
1007  %a = call <vscale x 8 x i16> @llvm.riscv.vlse.nxv8i16(
1008    <vscale x 8 x i16> undef,
1009    ptr %0,
1010    iXLen %1,
1011    iXLen %2)
1012
1013  ret <vscale x 8 x i16> %a
1014}
1015
1016declare <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16(
1017  <vscale x 8 x i16>,
1018  ptr,
1019  iXLen,
1020  <vscale x 8 x i1>,
1021  iXLen,
1022  iXLen);
1023
1024define <vscale x 8 x i16> @intrinsic_vlse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1025; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i16_nxv8i16:
1026; CHECK:       # %bb.0: # %entry
1027; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
1028; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
1029; CHECK-NEXT:    ret
1030entry:
1031  %a = call <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16(
1032    <vscale x 8 x i16> %0,
1033    ptr %1,
1034    iXLen %2,
1035    <vscale x 8 x i1> %3,
1036    iXLen %4, iXLen 1)
1037
1038  ret <vscale x 8 x i16> %a
1039}
1040
1041declare <vscale x 16 x i16> @llvm.riscv.vlse.nxv16i16(
1042  <vscale x 16 x i16>,
1043  ptr,
1044  iXLen,
1045  iXLen);
1046
1047define <vscale x 16 x i16> @intrinsic_vlse_v_nxv16i16_nxv16i16(ptr %0, iXLen %1, iXLen %2) nounwind {
1048; CHECK-LABEL: intrinsic_vlse_v_nxv16i16_nxv16i16:
1049; CHECK:       # %bb.0: # %entry
1050; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
1051; CHECK-NEXT:    vlse16.v v8, (a0), a1
1052; CHECK-NEXT:    ret
1053entry:
1054  %a = call <vscale x 16 x i16> @llvm.riscv.vlse.nxv16i16(
1055    <vscale x 16 x i16> undef,
1056    ptr %0,
1057    iXLen %1,
1058    iXLen %2)
1059
1060  ret <vscale x 16 x i16> %a
1061}
1062
1063declare <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16(
1064  <vscale x 16 x i16>,
1065  ptr,
1066  iXLen,
1067  <vscale x 16 x i1>,
1068  iXLen,
1069  iXLen);
1070
1071define <vscale x 16 x i16> @intrinsic_vlse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1072; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i16_nxv16i16:
1073; CHECK:       # %bb.0: # %entry
1074; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
1075; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
1076; CHECK-NEXT:    ret
1077entry:
1078  %a = call <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16(
1079    <vscale x 16 x i16> %0,
1080    ptr %1,
1081    iXLen %2,
1082    <vscale x 16 x i1> %3,
1083    iXLen %4, iXLen 1)
1084
1085  ret <vscale x 16 x i16> %a
1086}
1087
1088declare <vscale x 32 x i16> @llvm.riscv.vlse.nxv32i16(
1089  <vscale x 32 x i16>,
1090  ptr,
1091  iXLen,
1092  iXLen);
1093
1094define <vscale x 32 x i16> @intrinsic_vlse_v_nxv32i16_nxv32i16(ptr %0, iXLen %1, iXLen %2) nounwind {
1095; CHECK-LABEL: intrinsic_vlse_v_nxv32i16_nxv32i16:
1096; CHECK:       # %bb.0: # %entry
1097; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, ma
1098; CHECK-NEXT:    vlse16.v v8, (a0), a1
1099; CHECK-NEXT:    ret
1100entry:
1101  %a = call <vscale x 32 x i16> @llvm.riscv.vlse.nxv32i16(
1102    <vscale x 32 x i16> undef,
1103    ptr %0,
1104    iXLen %1,
1105    iXLen %2)
1106
1107  ret <vscale x 32 x i16> %a
1108}
1109
1110declare <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16(
1111  <vscale x 32 x i16>,
1112  ptr,
1113  iXLen,
1114  <vscale x 32 x i1>,
1115  iXLen,
1116  iXLen);
1117
1118define <vscale x 32 x i16> @intrinsic_vlse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1119; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i16_nxv32i16:
1120; CHECK:       # %bb.0: # %entry
1121; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
1122; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
1123; CHECK-NEXT:    ret
1124entry:
1125  %a = call <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16(
1126    <vscale x 32 x i16> %0,
1127    ptr %1,
1128    iXLen %2,
1129    <vscale x 32 x i1> %3,
1130    iXLen %4, iXLen 1)
1131
1132  ret <vscale x 32 x i16> %a
1133}
1134
1135declare <vscale x 1 x half> @llvm.riscv.vlse.nxv1f16(
1136  <vscale x 1 x half>,
1137  ptr,
1138  iXLen,
1139  iXLen);
1140
1141define <vscale x 1 x half> @intrinsic_vlse_v_nxv1f16_nxv1f16(ptr %0, iXLen %1, iXLen %2) nounwind {
1142; CHECK-LABEL: intrinsic_vlse_v_nxv1f16_nxv1f16:
1143; CHECK:       # %bb.0: # %entry
1144; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
1145; CHECK-NEXT:    vlse16.v v8, (a0), a1
1146; CHECK-NEXT:    ret
1147entry:
1148  %a = call <vscale x 1 x half> @llvm.riscv.vlse.nxv1f16(
1149    <vscale x 1 x half> undef,
1150    ptr %0,
1151    iXLen %1,
1152    iXLen %2)
1153
1154  ret <vscale x 1 x half> %a
1155}
1156
1157declare <vscale x 1 x half> @llvm.riscv.vlse.mask.nxv1f16(
1158  <vscale x 1 x half>,
1159  ptr,
1160  iXLen,
1161  <vscale x 1 x i1>,
1162  iXLen,
1163  iXLen);
1164
1165define <vscale x 1 x half> @intrinsic_vlse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1166; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f16_nxv1f16:
1167; CHECK:       # %bb.0: # %entry
1168; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
1169; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
1170; CHECK-NEXT:    ret
1171entry:
1172  %a = call <vscale x 1 x half> @llvm.riscv.vlse.mask.nxv1f16(
1173    <vscale x 1 x half> %0,
1174    ptr %1,
1175    iXLen %2,
1176    <vscale x 1 x i1> %3,
1177    iXLen %4, iXLen 1)
1178
1179  ret <vscale x 1 x half> %a
1180}
1181
1182declare <vscale x 2 x half> @llvm.riscv.vlse.nxv2f16(
1183  <vscale x 2 x half>,
1184  ptr,
1185  iXLen,
1186  iXLen);
1187
1188define <vscale x 2 x half> @intrinsic_vlse_v_nxv2f16_nxv2f16(ptr %0, iXLen %1, iXLen %2) nounwind {
1189; CHECK-LABEL: intrinsic_vlse_v_nxv2f16_nxv2f16:
1190; CHECK:       # %bb.0: # %entry
1191; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
1192; CHECK-NEXT:    vlse16.v v8, (a0), a1
1193; CHECK-NEXT:    ret
1194entry:
1195  %a = call <vscale x 2 x half> @llvm.riscv.vlse.nxv2f16(
1196    <vscale x 2 x half> undef,
1197    ptr %0,
1198    iXLen %1,
1199    iXLen %2)
1200
1201  ret <vscale x 2 x half> %a
1202}
1203
1204declare <vscale x 2 x half> @llvm.riscv.vlse.mask.nxv2f16(
1205  <vscale x 2 x half>,
1206  ptr,
1207  iXLen,
1208  <vscale x 2 x i1>,
1209  iXLen,
1210  iXLen);
1211
1212define <vscale x 2 x half> @intrinsic_vlse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1213; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f16_nxv2f16:
1214; CHECK:       # %bb.0: # %entry
1215; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
1216; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
1217; CHECK-NEXT:    ret
1218entry:
1219  %a = call <vscale x 2 x half> @llvm.riscv.vlse.mask.nxv2f16(
1220    <vscale x 2 x half> %0,
1221    ptr %1,
1222    iXLen %2,
1223    <vscale x 2 x i1> %3,
1224    iXLen %4, iXLen 1)
1225
1226  ret <vscale x 2 x half> %a
1227}
1228
1229declare <vscale x 4 x half> @llvm.riscv.vlse.nxv4f16(
1230  <vscale x 4 x half>,
1231  ptr,
1232  iXLen,
1233  iXLen);
1234
1235define <vscale x 4 x half> @intrinsic_vlse_v_nxv4f16_nxv4f16(ptr %0, iXLen %1, iXLen %2) nounwind {
1236; CHECK-LABEL: intrinsic_vlse_v_nxv4f16_nxv4f16:
1237; CHECK:       # %bb.0: # %entry
1238; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
1239; CHECK-NEXT:    vlse16.v v8, (a0), a1
1240; CHECK-NEXT:    ret
1241entry:
1242  %a = call <vscale x 4 x half> @llvm.riscv.vlse.nxv4f16(
1243    <vscale x 4 x half> undef,
1244    ptr %0,
1245    iXLen %1,
1246    iXLen %2)
1247
1248  ret <vscale x 4 x half> %a
1249}
1250
1251declare <vscale x 4 x half> @llvm.riscv.vlse.mask.nxv4f16(
1252  <vscale x 4 x half>,
1253  ptr,
1254  iXLen,
1255  <vscale x 4 x i1>,
1256  iXLen,
1257  iXLen);
1258
1259define <vscale x 4 x half> @intrinsic_vlse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1260; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f16_nxv4f16:
1261; CHECK:       # %bb.0: # %entry
1262; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
1263; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
1264; CHECK-NEXT:    ret
1265entry:
1266  %a = call <vscale x 4 x half> @llvm.riscv.vlse.mask.nxv4f16(
1267    <vscale x 4 x half> %0,
1268    ptr %1,
1269    iXLen %2,
1270    <vscale x 4 x i1> %3,
1271    iXLen %4, iXLen 1)
1272
1273  ret <vscale x 4 x half> %a
1274}
1275
1276declare <vscale x 8 x half> @llvm.riscv.vlse.nxv8f16(
1277  <vscale x 8 x half>,
1278  ptr,
1279  iXLen,
1280  iXLen);
1281
1282define <vscale x 8 x half> @intrinsic_vlse_v_nxv8f16_nxv8f16(ptr %0, iXLen %1, iXLen %2) nounwind {
1283; CHECK-LABEL: intrinsic_vlse_v_nxv8f16_nxv8f16:
1284; CHECK:       # %bb.0: # %entry
1285; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
1286; CHECK-NEXT:    vlse16.v v8, (a0), a1
1287; CHECK-NEXT:    ret
1288entry:
1289  %a = call <vscale x 8 x half> @llvm.riscv.vlse.nxv8f16(
1290    <vscale x 8 x half> undef,
1291    ptr %0,
1292    iXLen %1,
1293    iXLen %2)
1294
1295  ret <vscale x 8 x half> %a
1296}
1297
1298declare <vscale x 8 x half> @llvm.riscv.vlse.mask.nxv8f16(
1299  <vscale x 8 x half>,
1300  ptr,
1301  iXLen,
1302  <vscale x 8 x i1>,
1303  iXLen,
1304  iXLen);
1305
1306define <vscale x 8 x half> @intrinsic_vlse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1307; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f16_nxv8f16:
1308; CHECK:       # %bb.0: # %entry
1309; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
1310; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
1311; CHECK-NEXT:    ret
1312entry:
1313  %a = call <vscale x 8 x half> @llvm.riscv.vlse.mask.nxv8f16(
1314    <vscale x 8 x half> %0,
1315    ptr %1,
1316    iXLen %2,
1317    <vscale x 8 x i1> %3,
1318    iXLen %4, iXLen 1)
1319
1320  ret <vscale x 8 x half> %a
1321}
1322
1323declare <vscale x 16 x half> @llvm.riscv.vlse.nxv16f16(
1324  <vscale x 16 x half>,
1325  ptr,
1326  iXLen,
1327  iXLen);
1328
1329define <vscale x 16 x half> @intrinsic_vlse_v_nxv16f16_nxv16f16(ptr %0, iXLen %1, iXLen %2) nounwind {
1330; CHECK-LABEL: intrinsic_vlse_v_nxv16f16_nxv16f16:
1331; CHECK:       # %bb.0: # %entry
1332; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
1333; CHECK-NEXT:    vlse16.v v8, (a0), a1
1334; CHECK-NEXT:    ret
1335entry:
1336  %a = call <vscale x 16 x half> @llvm.riscv.vlse.nxv16f16(
1337    <vscale x 16 x half> undef,
1338    ptr %0,
1339    iXLen %1,
1340    iXLen %2)
1341
1342  ret <vscale x 16 x half> %a
1343}
1344
1345declare <vscale x 16 x half> @llvm.riscv.vlse.mask.nxv16f16(
1346  <vscale x 16 x half>,
1347  ptr,
1348  iXLen,
1349  <vscale x 16 x i1>,
1350  iXLen,
1351  iXLen);
1352
1353define <vscale x 16 x half> @intrinsic_vlse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1354; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f16_nxv16f16:
1355; CHECK:       # %bb.0: # %entry
1356; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
1357; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
1358; CHECK-NEXT:    ret
1359entry:
1360  %a = call <vscale x 16 x half> @llvm.riscv.vlse.mask.nxv16f16(
1361    <vscale x 16 x half> %0,
1362    ptr %1,
1363    iXLen %2,
1364    <vscale x 16 x i1> %3,
1365    iXLen %4, iXLen 1)
1366
1367  ret <vscale x 16 x half> %a
1368}
1369
1370declare <vscale x 32 x half> @llvm.riscv.vlse.nxv32f16(
1371  <vscale x 32 x half>,
1372  ptr,
1373  iXLen,
1374  iXLen);
1375
1376define <vscale x 32 x half> @intrinsic_vlse_v_nxv32f16_nxv32f16(ptr %0, iXLen %1, iXLen %2) nounwind {
1377; CHECK-LABEL: intrinsic_vlse_v_nxv32f16_nxv32f16:
1378; CHECK:       # %bb.0: # %entry
1379; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, ma
1380; CHECK-NEXT:    vlse16.v v8, (a0), a1
1381; CHECK-NEXT:    ret
1382entry:
1383  %a = call <vscale x 32 x half> @llvm.riscv.vlse.nxv32f16(
1384    <vscale x 32 x half> undef,
1385    ptr %0,
1386    iXLen %1,
1387    iXLen %2)
1388
1389  ret <vscale x 32 x half> %a
1390}
1391
1392declare <vscale x 32 x half> @llvm.riscv.vlse.mask.nxv32f16(
1393  <vscale x 32 x half>,
1394  ptr,
1395  iXLen,
1396  <vscale x 32 x i1>,
1397  iXLen,
1398  iXLen);
1399
1400define <vscale x 32 x half> @intrinsic_vlse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, ptr %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1401; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32f16_nxv32f16:
1402; CHECK:       # %bb.0: # %entry
1403; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
1404; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
1405; CHECK-NEXT:    ret
1406entry:
1407  %a = call <vscale x 32 x half> @llvm.riscv.vlse.mask.nxv32f16(
1408    <vscale x 32 x half> %0,
1409    ptr %1,
1410    iXLen %2,
1411    <vscale x 32 x i1> %3,
1412    iXLen %4, iXLen 1)
1413
1414  ret <vscale x 32 x half> %a
1415}
1416
1417declare <vscale x 1 x bfloat> @llvm.riscv.vlse.nxv1bf16(
1418  <vscale x 1 x bfloat>,
1419  ptr,
1420  iXLen,
1421  iXLen);
1422
1423define <vscale x 1 x bfloat> @intrinsic_vlse_v_nxv1bf16_nxv1bf16(ptr %0, iXLen %1, iXLen %2) nounwind {
1424; CHECK-LABEL: intrinsic_vlse_v_nxv1bf16_nxv1bf16:
1425; CHECK:       # %bb.0: # %entry
1426; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
1427; CHECK-NEXT:    vlse16.v v8, (a0), a1
1428; CHECK-NEXT:    ret
1429entry:
1430  %a = call <vscale x 1 x bfloat> @llvm.riscv.vlse.nxv1bf16(
1431    <vscale x 1 x bfloat> undef,
1432    ptr %0,
1433    iXLen %1,
1434    iXLen %2)
1435
1436  ret <vscale x 1 x bfloat> %a
1437}
1438
1439declare <vscale x 1 x bfloat> @llvm.riscv.vlse.mask.nxv1bf16(
1440  <vscale x 1 x bfloat>,
1441  ptr,
1442  iXLen,
1443  <vscale x 1 x i1>,
1444  iXLen,
1445  iXLen);
1446
1447define <vscale x 1 x bfloat> @intrinsic_vlse_mask_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1448; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1bf16_nxv1bf16:
1449; CHECK:       # %bb.0: # %entry
1450; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
1451; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
1452; CHECK-NEXT:    ret
1453entry:
1454  %a = call <vscale x 1 x bfloat> @llvm.riscv.vlse.mask.nxv1bf16(
1455    <vscale x 1 x bfloat> %0,
1456    ptr %1,
1457    iXLen %2,
1458    <vscale x 1 x i1> %3,
1459    iXLen %4, iXLen 1)
1460
1461  ret <vscale x 1 x bfloat> %a
1462}
1463
1464declare <vscale x 2 x bfloat> @llvm.riscv.vlse.nxv2bf16(
1465  <vscale x 2 x bfloat>,
1466  ptr,
1467  iXLen,
1468  iXLen);
1469
1470define <vscale x 2 x bfloat> @intrinsic_vlse_v_nxv2bf16_nxv2bf16(ptr %0, iXLen %1, iXLen %2) nounwind {
1471; CHECK-LABEL: intrinsic_vlse_v_nxv2bf16_nxv2bf16:
1472; CHECK:       # %bb.0: # %entry
1473; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
1474; CHECK-NEXT:    vlse16.v v8, (a0), a1
1475; CHECK-NEXT:    ret
1476entry:
1477  %a = call <vscale x 2 x bfloat> @llvm.riscv.vlse.nxv2bf16(
1478    <vscale x 2 x bfloat> undef,
1479    ptr %0,
1480    iXLen %1,
1481    iXLen %2)
1482
1483  ret <vscale x 2 x bfloat> %a
1484}
1485
1486declare <vscale x 2 x bfloat> @llvm.riscv.vlse.mask.nxv2bf16(
1487  <vscale x 2 x bfloat>,
1488  ptr,
1489  iXLen,
1490  <vscale x 2 x i1>,
1491  iXLen,
1492  iXLen);
1493
1494define <vscale x 2 x bfloat> @intrinsic_vlse_mask_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1495; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2bf16_nxv2bf16:
1496; CHECK:       # %bb.0: # %entry
1497; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
1498; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
1499; CHECK-NEXT:    ret
1500entry:
1501  %a = call <vscale x 2 x bfloat> @llvm.riscv.vlse.mask.nxv2bf16(
1502    <vscale x 2 x bfloat> %0,
1503    ptr %1,
1504    iXLen %2,
1505    <vscale x 2 x i1> %3,
1506    iXLen %4, iXLen 1)
1507
1508  ret <vscale x 2 x bfloat> %a
1509}
1510
1511declare <vscale x 4 x bfloat> @llvm.riscv.vlse.nxv4bf16(
1512  <vscale x 4 x bfloat>,
1513  ptr,
1514  iXLen,
1515  iXLen);
1516
1517define <vscale x 4 x bfloat> @intrinsic_vlse_v_nxv4bf16_nxv4bf16(ptr %0, iXLen %1, iXLen %2) nounwind {
1518; CHECK-LABEL: intrinsic_vlse_v_nxv4bf16_nxv4bf16:
1519; CHECK:       # %bb.0: # %entry
1520; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
1521; CHECK-NEXT:    vlse16.v v8, (a0), a1
1522; CHECK-NEXT:    ret
1523entry:
1524  %a = call <vscale x 4 x bfloat> @llvm.riscv.vlse.nxv4bf16(
1525    <vscale x 4 x bfloat> undef,
1526    ptr %0,
1527    iXLen %1,
1528    iXLen %2)
1529
1530  ret <vscale x 4 x bfloat> %a
1531}
1532
1533declare <vscale x 4 x bfloat> @llvm.riscv.vlse.mask.nxv4bf16(
1534  <vscale x 4 x bfloat>,
1535  ptr,
1536  iXLen,
1537  <vscale x 4 x i1>,
1538  iXLen,
1539  iXLen);
1540
1541define <vscale x 4 x bfloat> @intrinsic_vlse_mask_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1542; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4bf16_nxv4bf16:
1543; CHECK:       # %bb.0: # %entry
1544; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
1545; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
1546; CHECK-NEXT:    ret
1547entry:
1548  %a = call <vscale x 4 x bfloat> @llvm.riscv.vlse.mask.nxv4bf16(
1549    <vscale x 4 x bfloat> %0,
1550    ptr %1,
1551    iXLen %2,
1552    <vscale x 4 x i1> %3,
1553    iXLen %4, iXLen 1)
1554
1555  ret <vscale x 4 x bfloat> %a
1556}
1557
1558declare <vscale x 8 x bfloat> @llvm.riscv.vlse.nxv8bf16(
1559  <vscale x 8 x bfloat>,
1560  ptr,
1561  iXLen,
1562  iXLen);
1563
1564define <vscale x 8 x bfloat> @intrinsic_vlse_v_nxv8bf16_nxv8bf16(ptr %0, iXLen %1, iXLen %2) nounwind {
1565; CHECK-LABEL: intrinsic_vlse_v_nxv8bf16_nxv8bf16:
1566; CHECK:       # %bb.0: # %entry
1567; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
1568; CHECK-NEXT:    vlse16.v v8, (a0), a1
1569; CHECK-NEXT:    ret
1570entry:
1571  %a = call <vscale x 8 x bfloat> @llvm.riscv.vlse.nxv8bf16(
1572    <vscale x 8 x bfloat> undef,
1573    ptr %0,
1574    iXLen %1,
1575    iXLen %2)
1576
1577  ret <vscale x 8 x bfloat> %a
1578}
1579
1580declare <vscale x 8 x bfloat> @llvm.riscv.vlse.mask.nxv8bf16(
1581  <vscale x 8 x bfloat>,
1582  ptr,
1583  iXLen,
1584  <vscale x 8 x i1>,
1585  iXLen,
1586  iXLen);
1587
1588define <vscale x 8 x bfloat> @intrinsic_vlse_mask_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1589; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8bf16_nxv8bf16:
1590; CHECK:       # %bb.0: # %entry
1591; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
1592; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
1593; CHECK-NEXT:    ret
1594entry:
1595  %a = call <vscale x 8 x bfloat> @llvm.riscv.vlse.mask.nxv8bf16(
1596    <vscale x 8 x bfloat> %0,
1597    ptr %1,
1598    iXLen %2,
1599    <vscale x 8 x i1> %3,
1600    iXLen %4, iXLen 1)
1601
1602  ret <vscale x 8 x bfloat> %a
1603}
1604
1605declare <vscale x 16 x bfloat> @llvm.riscv.vlse.nxv16bf16(
1606  <vscale x 16 x bfloat>,
1607  ptr,
1608  iXLen,
1609  iXLen);
1610
1611define <vscale x 16 x bfloat> @intrinsic_vlse_v_nxv16bf16_nxv16bf16(ptr %0, iXLen %1, iXLen %2) nounwind {
1612; CHECK-LABEL: intrinsic_vlse_v_nxv16bf16_nxv16bf16:
1613; CHECK:       # %bb.0: # %entry
1614; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
1615; CHECK-NEXT:    vlse16.v v8, (a0), a1
1616; CHECK-NEXT:    ret
1617entry:
1618  %a = call <vscale x 16 x bfloat> @llvm.riscv.vlse.nxv16bf16(
1619    <vscale x 16 x bfloat> undef,
1620    ptr %0,
1621    iXLen %1,
1622    iXLen %2)
1623
1624  ret <vscale x 16 x bfloat> %a
1625}
1626
1627declare <vscale x 16 x bfloat> @llvm.riscv.vlse.mask.nxv16bf16(
1628  <vscale x 16 x bfloat>,
1629  ptr,
1630  iXLen,
1631  <vscale x 16 x i1>,
1632  iXLen,
1633  iXLen);
1634
1635define <vscale x 16 x bfloat> @intrinsic_vlse_mask_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1636; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16bf16_nxv16bf16:
1637; CHECK:       # %bb.0: # %entry
1638; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
1639; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
1640; CHECK-NEXT:    ret
1641entry:
1642  %a = call <vscale x 16 x bfloat> @llvm.riscv.vlse.mask.nxv16bf16(
1643    <vscale x 16 x bfloat> %0,
1644    ptr %1,
1645    iXLen %2,
1646    <vscale x 16 x i1> %3,
1647    iXLen %4, iXLen 1)
1648
1649  ret <vscale x 16 x bfloat> %a
1650}
1651
1652declare <vscale x 32 x bfloat> @llvm.riscv.vlse.nxv32bf16(
1653  <vscale x 32 x bfloat>,
1654  ptr,
1655  iXLen,
1656  iXLen);
1657
1658define <vscale x 32 x bfloat> @intrinsic_vlse_v_nxv32bf16_nxv32bf16(ptr %0, iXLen %1, iXLen %2) nounwind {
1659; CHECK-LABEL: intrinsic_vlse_v_nxv32bf16_nxv32bf16:
1660; CHECK:       # %bb.0: # %entry
1661; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, ma
1662; CHECK-NEXT:    vlse16.v v8, (a0), a1
1663; CHECK-NEXT:    ret
1664entry:
1665  %a = call <vscale x 32 x bfloat> @llvm.riscv.vlse.nxv32bf16(
1666    <vscale x 32 x bfloat> undef,
1667    ptr %0,
1668    iXLen %1,
1669    iXLen %2)
1670
1671  ret <vscale x 32 x bfloat> %a
1672}
1673
1674declare <vscale x 32 x bfloat> @llvm.riscv.vlse.mask.nxv32bf16(
1675  <vscale x 32 x bfloat>,
1676  ptr,
1677  iXLen,
1678  <vscale x 32 x i1>,
1679  iXLen,
1680  iXLen);
1681
1682define <vscale x 32 x bfloat> @intrinsic_vlse_mask_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, ptr %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1683; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32bf16_nxv32bf16:
1684; CHECK:       # %bb.0: # %entry
1685; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
1686; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
1687; CHECK-NEXT:    ret
1688entry:
1689  %a = call <vscale x 32 x bfloat> @llvm.riscv.vlse.mask.nxv32bf16(
1690    <vscale x 32 x bfloat> %0,
1691    ptr %1,
1692    iXLen %2,
1693    <vscale x 32 x i1> %3,
1694    iXLen %4, iXLen 1)
1695
1696  ret <vscale x 32 x bfloat> %a
1697}
1698
1699declare <vscale x 1 x i8> @llvm.riscv.vlse.nxv1i8(
1700  <vscale x 1 x i8>,
1701  ptr,
1702  iXLen,
1703  iXLen);
1704
1705define <vscale x 1 x i8> @intrinsic_vlse_v_nxv1i8_nxv1i8(ptr %0, iXLen %1, iXLen %2) nounwind {
1706; CHECK-LABEL: intrinsic_vlse_v_nxv1i8_nxv1i8:
1707; CHECK:       # %bb.0: # %entry
1708; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
1709; CHECK-NEXT:    vlse8.v v8, (a0), a1
1710; CHECK-NEXT:    ret
1711entry:
1712  %a = call <vscale x 1 x i8> @llvm.riscv.vlse.nxv1i8(
1713    <vscale x 1 x i8> undef,
1714    ptr %0,
1715    iXLen %1,
1716    iXLen %2)
1717
1718  ret <vscale x 1 x i8> %a
1719}
1720
1721declare <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8(
1722  <vscale x 1 x i8>,
1723  ptr,
1724  iXLen,
1725  <vscale x 1 x i1>,
1726  iXLen,
1727  iXLen);
1728
1729define <vscale x 1 x i8> @intrinsic_vlse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1730; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i8_nxv1i8:
1731; CHECK:       # %bb.0: # %entry
1732; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
1733; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
1734; CHECK-NEXT:    ret
1735entry:
1736  %a = call <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8(
1737    <vscale x 1 x i8> %0,
1738    ptr %1,
1739    iXLen %2,
1740    <vscale x 1 x i1> %3,
1741    iXLen %4, iXLen 1)
1742
1743  ret <vscale x 1 x i8> %a
1744}
1745
1746declare <vscale x 2 x i8> @llvm.riscv.vlse.nxv2i8(
1747  <vscale x 2 x i8>,
1748  ptr,
1749  iXLen,
1750  iXLen);
1751
1752define <vscale x 2 x i8> @intrinsic_vlse_v_nxv2i8_nxv2i8(ptr %0, iXLen %1, iXLen %2) nounwind {
1753; CHECK-LABEL: intrinsic_vlse_v_nxv2i8_nxv2i8:
1754; CHECK:       # %bb.0: # %entry
1755; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
1756; CHECK-NEXT:    vlse8.v v8, (a0), a1
1757; CHECK-NEXT:    ret
1758entry:
1759  %a = call <vscale x 2 x i8> @llvm.riscv.vlse.nxv2i8(
1760    <vscale x 2 x i8> undef,
1761    ptr %0,
1762    iXLen %1,
1763    iXLen %2)
1764
1765  ret <vscale x 2 x i8> %a
1766}
1767
1768declare <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8(
1769  <vscale x 2 x i8>,
1770  ptr,
1771  iXLen,
1772  <vscale x 2 x i1>,
1773  iXLen,
1774  iXLen);
1775
1776define <vscale x 2 x i8> @intrinsic_vlse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1777; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i8_nxv2i8:
1778; CHECK:       # %bb.0: # %entry
1779; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
1780; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
1781; CHECK-NEXT:    ret
1782entry:
1783  %a = call <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8(
1784    <vscale x 2 x i8> %0,
1785    ptr %1,
1786    iXLen %2,
1787    <vscale x 2 x i1> %3,
1788    iXLen %4, iXLen 1)
1789
1790  ret <vscale x 2 x i8> %a
1791}
1792
1793declare <vscale x 4 x i8> @llvm.riscv.vlse.nxv4i8(
1794  <vscale x 4 x i8>,
1795  ptr,
1796  iXLen,
1797  iXLen);
1798
1799define <vscale x 4 x i8> @intrinsic_vlse_v_nxv4i8_nxv4i8(ptr %0, iXLen %1, iXLen %2) nounwind {
1800; CHECK-LABEL: intrinsic_vlse_v_nxv4i8_nxv4i8:
1801; CHECK:       # %bb.0: # %entry
1802; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
1803; CHECK-NEXT:    vlse8.v v8, (a0), a1
1804; CHECK-NEXT:    ret
1805entry:
1806  %a = call <vscale x 4 x i8> @llvm.riscv.vlse.nxv4i8(
1807    <vscale x 4 x i8> undef,
1808    ptr %0,
1809    iXLen %1,
1810    iXLen %2)
1811
1812  ret <vscale x 4 x i8> %a
1813}
1814
1815declare <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8(
1816  <vscale x 4 x i8>,
1817  ptr,
1818  iXLen,
1819  <vscale x 4 x i1>,
1820  iXLen,
1821  iXLen);
1822
1823define <vscale x 4 x i8> @intrinsic_vlse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1824; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i8_nxv4i8:
1825; CHECK:       # %bb.0: # %entry
1826; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
1827; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
1828; CHECK-NEXT:    ret
1829entry:
1830  %a = call <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8(
1831    <vscale x 4 x i8> %0,
1832    ptr %1,
1833    iXLen %2,
1834    <vscale x 4 x i1> %3,
1835    iXLen %4, iXLen 1)
1836
1837  ret <vscale x 4 x i8> %a
1838}
1839
1840declare <vscale x 8 x i8> @llvm.riscv.vlse.nxv8i8(
1841  <vscale x 8 x i8>,
1842  ptr,
1843  iXLen,
1844  iXLen);
1845
1846define <vscale x 8 x i8> @intrinsic_vlse_v_nxv8i8_nxv8i8(ptr %0, iXLen %1, iXLen %2) nounwind {
1847; CHECK-LABEL: intrinsic_vlse_v_nxv8i8_nxv8i8:
1848; CHECK:       # %bb.0: # %entry
1849; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
1850; CHECK-NEXT:    vlse8.v v8, (a0), a1
1851; CHECK-NEXT:    ret
1852entry:
1853  %a = call <vscale x 8 x i8> @llvm.riscv.vlse.nxv8i8(
1854    <vscale x 8 x i8> undef,
1855    ptr %0,
1856    iXLen %1,
1857    iXLen %2)
1858
1859  ret <vscale x 8 x i8> %a
1860}
1861
1862declare <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8(
1863  <vscale x 8 x i8>,
1864  ptr,
1865  iXLen,
1866  <vscale x 8 x i1>,
1867  iXLen,
1868  iXLen);
1869
1870define <vscale x 8 x i8> @intrinsic_vlse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1871; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i8_nxv8i8:
1872; CHECK:       # %bb.0: # %entry
1873; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
1874; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
1875; CHECK-NEXT:    ret
1876entry:
1877  %a = call <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8(
1878    <vscale x 8 x i8> %0,
1879    ptr %1,
1880    iXLen %2,
1881    <vscale x 8 x i1> %3,
1882    iXLen %4, iXLen 1)
1883
1884  ret <vscale x 8 x i8> %a
1885}
1886
1887declare <vscale x 16 x i8> @llvm.riscv.vlse.nxv16i8(
1888  <vscale x 16 x i8>,
1889  ptr,
1890  iXLen,
1891  iXLen);
1892
1893define <vscale x 16 x i8> @intrinsic_vlse_v_nxv16i8_nxv16i8(ptr %0, iXLen %1, iXLen %2) nounwind {
1894; CHECK-LABEL: intrinsic_vlse_v_nxv16i8_nxv16i8:
1895; CHECK:       # %bb.0: # %entry
1896; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
1897; CHECK-NEXT:    vlse8.v v8, (a0), a1
1898; CHECK-NEXT:    ret
1899entry:
1900  %a = call <vscale x 16 x i8> @llvm.riscv.vlse.nxv16i8(
1901    <vscale x 16 x i8> undef,
1902    ptr %0,
1903    iXLen %1,
1904    iXLen %2)
1905
1906  ret <vscale x 16 x i8> %a
1907}
1908
1909declare <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8(
1910  <vscale x 16 x i8>,
1911  ptr,
1912  iXLen,
1913  <vscale x 16 x i1>,
1914  iXLen,
1915  iXLen);
1916
1917define <vscale x 16 x i8> @intrinsic_vlse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1918; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i8_nxv16i8:
1919; CHECK:       # %bb.0: # %entry
1920; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
1921; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
1922; CHECK-NEXT:    ret
1923entry:
1924  %a = call <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8(
1925    <vscale x 16 x i8> %0,
1926    ptr %1,
1927    iXLen %2,
1928    <vscale x 16 x i1> %3,
1929    iXLen %4, iXLen 1)
1930
1931  ret <vscale x 16 x i8> %a
1932}
1933
1934declare <vscale x 32 x i8> @llvm.riscv.vlse.nxv32i8(
1935  <vscale x 32 x i8>,
1936  ptr,
1937  iXLen,
1938  iXLen);
1939
1940define <vscale x 32 x i8> @intrinsic_vlse_v_nxv32i8_nxv32i8(ptr %0, iXLen %1, iXLen %2) nounwind {
1941; CHECK-LABEL: intrinsic_vlse_v_nxv32i8_nxv32i8:
1942; CHECK:       # %bb.0: # %entry
1943; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, ma
1944; CHECK-NEXT:    vlse8.v v8, (a0), a1
1945; CHECK-NEXT:    ret
1946entry:
1947  %a = call <vscale x 32 x i8> @llvm.riscv.vlse.nxv32i8(
1948    <vscale x 32 x i8> undef,
1949    ptr %0,
1950    iXLen %1,
1951    iXLen %2)
1952
1953  ret <vscale x 32 x i8> %a
1954}
1955
1956declare <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8(
1957  <vscale x 32 x i8>,
1958  ptr,
1959  iXLen,
1960  <vscale x 32 x i1>,
1961  iXLen,
1962  iXLen);
1963
1964define <vscale x 32 x i8> @intrinsic_vlse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1965; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i8_nxv32i8:
1966; CHECK:       # %bb.0: # %entry
1967; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
1968; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
1969; CHECK-NEXT:    ret
1970entry:
1971  %a = call <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8(
1972    <vscale x 32 x i8> %0,
1973    ptr %1,
1974    iXLen %2,
1975    <vscale x 32 x i1> %3,
1976    iXLen %4, iXLen 1)
1977
1978  ret <vscale x 32 x i8> %a
1979}
1980
1981declare <vscale x 64 x i8> @llvm.riscv.vlse.nxv64i8(
1982  <vscale x 64 x i8>,
1983  ptr,
1984  iXLen,
1985  iXLen);
1986
1987define <vscale x 64 x i8> @intrinsic_vlse_v_nxv64i8_nxv64i8(ptr %0, iXLen %1, iXLen %2) nounwind {
1988; CHECK-LABEL: intrinsic_vlse_v_nxv64i8_nxv64i8:
1989; CHECK:       # %bb.0: # %entry
1990; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
1991; CHECK-NEXT:    vlse8.v v8, (a0), a1
1992; CHECK-NEXT:    ret
1993entry:
1994  %a = call <vscale x 64 x i8> @llvm.riscv.vlse.nxv64i8(
1995    <vscale x 64 x i8> undef,
1996    ptr %0,
1997    iXLen %1,
1998    iXLen %2)
1999
2000  ret <vscale x 64 x i8> %a
2001}
2002
2003declare <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8(
2004  <vscale x 64 x i8>,
2005  ptr,
2006  iXLen,
2007  <vscale x 64 x i1>,
2008  iXLen,
2009  iXLen);
2010
2011define <vscale x 64 x i8> @intrinsic_vlse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, iXLen %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
2012; CHECK-LABEL: intrinsic_vlse_mask_v_nxv64i8_nxv64i8:
2013; CHECK:       # %bb.0: # %entry
2014; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
2015; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
2016; CHECK-NEXT:    ret
2017entry:
2018  %a = call <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8(
2019    <vscale x 64 x i8> %0,
2020    ptr %1,
2021    iXLen %2,
2022    <vscale x 64 x i1> %3,
2023    iXLen %4, iXLen 1)
2024
2025  ret <vscale x 64 x i8> %a
2026}
2027