xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vle.ll (revision 84a3739ac072c95af9fa80e36d9e0f52d11e28eb)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
6
7declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
8  <vscale x 1 x i64>,
9  ptr,
10  iXLen);
11
12define <vscale x 1 x i64> @intrinsic_vle_v_nxv1i64_nxv1i64(ptr %0, iXLen %1) nounwind {
13; CHECK-LABEL: intrinsic_vle_v_nxv1i64_nxv1i64:
14; CHECK:       # %bb.0: # %entry
15; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
16; CHECK-NEXT:    vle64.v v8, (a0)
17; CHECK-NEXT:    ret
18entry:
19  %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
20    <vscale x 1 x i64> undef,
21    ptr %0,
22    iXLen %1)
23
24  ret <vscale x 1 x i64> %a
25}
26
27declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
28  <vscale x 1 x i64>,
29  ptr,
30  <vscale x 1 x i1>,
31  iXLen,
32  iXLen);
33
34define <vscale x 1 x i64> @intrinsic_vle_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
35; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i64_nxv1i64:
36; CHECK:       # %bb.0: # %entry
37; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
38; CHECK-NEXT:    vle64.v v8, (a0), v0.t
39; CHECK-NEXT:    ret
40entry:
41  %a = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
42    <vscale x 1 x i64> %0,
43    ptr %1,
44    <vscale x 1 x i1> %2,
45    iXLen %3, iXLen 1)
46
47  ret <vscale x 1 x i64> %a
48}
49
50declare <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64(
51  <vscale x 2 x i64>,
52  ptr,
53  iXLen);
54
55define <vscale x 2 x i64> @intrinsic_vle_v_nxv2i64_nxv2i64(ptr %0, iXLen %1) nounwind {
56; CHECK-LABEL: intrinsic_vle_v_nxv2i64_nxv2i64:
57; CHECK:       # %bb.0: # %entry
58; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
59; CHECK-NEXT:    vle64.v v8, (a0)
60; CHECK-NEXT:    ret
61entry:
62  %a = call <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64(
63    <vscale x 2 x i64> undef,
64    ptr %0,
65    iXLen %1)
66
67  ret <vscale x 2 x i64> %a
68}
69
70declare <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64(
71  <vscale x 2 x i64>,
72  ptr,
73  <vscale x 2 x i1>,
74  iXLen,
75  iXLen);
76
77define <vscale x 2 x i64> @intrinsic_vle_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
78; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i64_nxv2i64:
79; CHECK:       # %bb.0: # %entry
80; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
81; CHECK-NEXT:    vle64.v v8, (a0), v0.t
82; CHECK-NEXT:    ret
83entry:
84  %a = call <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64(
85    <vscale x 2 x i64> %0,
86    ptr %1,
87    <vscale x 2 x i1> %2,
88    iXLen %3, iXLen 1)
89
90  ret <vscale x 2 x i64> %a
91}
92
93declare <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64(
94  <vscale x 4 x i64>,
95  ptr,
96  iXLen);
97
98define <vscale x 4 x i64> @intrinsic_vle_v_nxv4i64_nxv4i64(ptr %0, iXLen %1) nounwind {
99; CHECK-LABEL: intrinsic_vle_v_nxv4i64_nxv4i64:
100; CHECK:       # %bb.0: # %entry
101; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
102; CHECK-NEXT:    vle64.v v8, (a0)
103; CHECK-NEXT:    ret
104entry:
105  %a = call <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64(
106    <vscale x 4 x i64> undef,
107    ptr %0,
108    iXLen %1)
109
110  ret <vscale x 4 x i64> %a
111}
112
113declare <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64(
114  <vscale x 4 x i64>,
115  ptr,
116  <vscale x 4 x i1>,
117  iXLen,
118  iXLen);
119
120define <vscale x 4 x i64> @intrinsic_vle_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
121; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i64_nxv4i64:
122; CHECK:       # %bb.0: # %entry
123; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
124; CHECK-NEXT:    vle64.v v8, (a0), v0.t
125; CHECK-NEXT:    ret
126entry:
127  %a = call <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64(
128    <vscale x 4 x i64> %0,
129    ptr %1,
130    <vscale x 4 x i1> %2,
131    iXLen %3, iXLen 1)
132
133  ret <vscale x 4 x i64> %a
134}
135
136declare <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64(
137  <vscale x 8 x i64>,
138  ptr,
139  iXLen);
140
141define <vscale x 8 x i64> @intrinsic_vle_v_nxv8i64_nxv8i64(ptr %0, iXLen %1) nounwind {
142; CHECK-LABEL: intrinsic_vle_v_nxv8i64_nxv8i64:
143; CHECK:       # %bb.0: # %entry
144; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
145; CHECK-NEXT:    vle64.v v8, (a0)
146; CHECK-NEXT:    ret
147entry:
148  %a = call <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64(
149    <vscale x 8 x i64> undef,
150    ptr %0,
151    iXLen %1)
152
153  ret <vscale x 8 x i64> %a
154}
155
156declare <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64(
157  <vscale x 8 x i64>,
158  ptr,
159  <vscale x 8 x i1>,
160  iXLen,
161  iXLen);
162
163define <vscale x 8 x i64> @intrinsic_vle_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
164; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i64_nxv8i64:
165; CHECK:       # %bb.0: # %entry
166; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
167; CHECK-NEXT:    vle64.v v8, (a0), v0.t
168; CHECK-NEXT:    ret
169entry:
170  %a = call <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64(
171    <vscale x 8 x i64> %0,
172    ptr %1,
173    <vscale x 8 x i1> %2,
174    iXLen %3, iXLen 1)
175
176  ret <vscale x 8 x i64> %a
177}
178
179declare <vscale x 1 x double> @llvm.riscv.vle.nxv1f64(
180  <vscale x 1 x double>,
181  ptr,
182  iXLen);
183
184define <vscale x 1 x double> @intrinsic_vle_v_nxv1f64_nxv1f64(ptr %0, iXLen %1) nounwind {
185; CHECK-LABEL: intrinsic_vle_v_nxv1f64_nxv1f64:
186; CHECK:       # %bb.0: # %entry
187; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
188; CHECK-NEXT:    vle64.v v8, (a0)
189; CHECK-NEXT:    ret
190entry:
191  %a = call <vscale x 1 x double> @llvm.riscv.vle.nxv1f64(
192    <vscale x 1 x double> undef,
193    ptr %0,
194    iXLen %1)
195
196  ret <vscale x 1 x double> %a
197}
198
199declare <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64(
200  <vscale x 1 x double>,
201  ptr,
202  <vscale x 1 x i1>,
203  iXLen,
204  iXLen);
205
206define <vscale x 1 x double> @intrinsic_vle_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
207; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f64_nxv1f64:
208; CHECK:       # %bb.0: # %entry
209; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
210; CHECK-NEXT:    vle64.v v8, (a0), v0.t
211; CHECK-NEXT:    ret
212entry:
213  %a = call <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64(
214    <vscale x 1 x double> %0,
215    ptr %1,
216    <vscale x 1 x i1> %2,
217    iXLen %3, iXLen 1)
218
219  ret <vscale x 1 x double> %a
220}
221
222declare <vscale x 2 x double> @llvm.riscv.vle.nxv2f64(
223  <vscale x 2 x double>,
224  ptr,
225  iXLen);
226
227define <vscale x 2 x double> @intrinsic_vle_v_nxv2f64_nxv2f64(ptr %0, iXLen %1) nounwind {
228; CHECK-LABEL: intrinsic_vle_v_nxv2f64_nxv2f64:
229; CHECK:       # %bb.0: # %entry
230; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
231; CHECK-NEXT:    vle64.v v8, (a0)
232; CHECK-NEXT:    ret
233entry:
234  %a = call <vscale x 2 x double> @llvm.riscv.vle.nxv2f64(
235    <vscale x 2 x double> undef,
236    ptr %0,
237    iXLen %1)
238
239  ret <vscale x 2 x double> %a
240}
241
242declare <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64(
243  <vscale x 2 x double>,
244  ptr,
245  <vscale x 2 x i1>,
246  iXLen,
247  iXLen);
248
249define <vscale x 2 x double> @intrinsic_vle_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
250; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f64_nxv2f64:
251; CHECK:       # %bb.0: # %entry
252; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
253; CHECK-NEXT:    vle64.v v8, (a0), v0.t
254; CHECK-NEXT:    ret
255entry:
256  %a = call <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64(
257    <vscale x 2 x double> %0,
258    ptr %1,
259    <vscale x 2 x i1> %2,
260    iXLen %3, iXLen 1)
261
262  ret <vscale x 2 x double> %a
263}
264
265declare <vscale x 4 x double> @llvm.riscv.vle.nxv4f64(
266  <vscale x 4 x double>,
267  ptr,
268  iXLen);
269
270define <vscale x 4 x double> @intrinsic_vle_v_nxv4f64_nxv4f64(ptr %0, iXLen %1) nounwind {
271; CHECK-LABEL: intrinsic_vle_v_nxv4f64_nxv4f64:
272; CHECK:       # %bb.0: # %entry
273; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
274; CHECK-NEXT:    vle64.v v8, (a0)
275; CHECK-NEXT:    ret
276entry:
277  %a = call <vscale x 4 x double> @llvm.riscv.vle.nxv4f64(
278    <vscale x 4 x double> undef,
279    ptr %0,
280    iXLen %1)
281
282  ret <vscale x 4 x double> %a
283}
284
285declare <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64(
286  <vscale x 4 x double>,
287  ptr,
288  <vscale x 4 x i1>,
289  iXLen,
290  iXLen);
291
292define <vscale x 4 x double> @intrinsic_vle_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
293; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f64_nxv4f64:
294; CHECK:       # %bb.0: # %entry
295; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
296; CHECK-NEXT:    vle64.v v8, (a0), v0.t
297; CHECK-NEXT:    ret
298entry:
299  %a = call <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64(
300    <vscale x 4 x double> %0,
301    ptr %1,
302    <vscale x 4 x i1> %2,
303    iXLen %3, iXLen 1)
304
305  ret <vscale x 4 x double> %a
306}
307
308declare <vscale x 8 x double> @llvm.riscv.vle.nxv8f64(
309  <vscale x 8 x double>,
310  ptr,
311  iXLen);
312
313define <vscale x 8 x double> @intrinsic_vle_v_nxv8f64_nxv8f64(ptr %0, iXLen %1) nounwind {
314; CHECK-LABEL: intrinsic_vle_v_nxv8f64_nxv8f64:
315; CHECK:       # %bb.0: # %entry
316; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
317; CHECK-NEXT:    vle64.v v8, (a0)
318; CHECK-NEXT:    ret
319entry:
320  %a = call <vscale x 8 x double> @llvm.riscv.vle.nxv8f64(
321    <vscale x 8 x double> undef,
322    ptr %0,
323    iXLen %1)
324
325  ret <vscale x 8 x double> %a
326}
327
328declare <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64(
329  <vscale x 8 x double>,
330  ptr,
331  <vscale x 8 x i1>,
332  iXLen,
333  iXLen);
334
335define <vscale x 8 x double> @intrinsic_vle_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
336; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f64_nxv8f64:
337; CHECK:       # %bb.0: # %entry
338; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
339; CHECK-NEXT:    vle64.v v8, (a0), v0.t
340; CHECK-NEXT:    ret
341entry:
342  %a = call <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64(
343    <vscale x 8 x double> %0,
344    ptr %1,
345    <vscale x 8 x i1> %2,
346    iXLen %3, iXLen 1)
347
348  ret <vscale x 8 x double> %a
349}
350
351declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
352  <vscale x 1 x i32>,
353  ptr,
354  iXLen);
355
356define <vscale x 1 x i32> @intrinsic_vle_v_nxv1i32_nxv1i32(ptr %0, iXLen %1) nounwind {
357; CHECK-LABEL: intrinsic_vle_v_nxv1i32_nxv1i32:
358; CHECK:       # %bb.0: # %entry
359; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
360; CHECK-NEXT:    vle32.v v8, (a0)
361; CHECK-NEXT:    ret
362entry:
363  %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
364    <vscale x 1 x i32> undef,
365    ptr %0,
366    iXLen %1)
367
368  ret <vscale x 1 x i32> %a
369}
370
371declare <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
372  <vscale x 1 x i32>,
373  ptr,
374  <vscale x 1 x i1>,
375  iXLen,
376  iXLen);
377
378define <vscale x 1 x i32> @intrinsic_vle_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
379; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i32_nxv1i32:
380; CHECK:       # %bb.0: # %entry
381; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
382; CHECK-NEXT:    vle32.v v8, (a0), v0.t
383; CHECK-NEXT:    ret
384entry:
385  %a = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
386    <vscale x 1 x i32> %0,
387    ptr %1,
388    <vscale x 1 x i1> %2,
389    iXLen %3, iXLen 1)
390
391  ret <vscale x 1 x i32> %a
392}
393
394declare <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(
395  <vscale x 2 x i32>,
396  ptr,
397  iXLen);
398
399define <vscale x 2 x i32> @intrinsic_vle_v_nxv2i32_nxv2i32(ptr %0, iXLen %1) nounwind {
400; CHECK-LABEL: intrinsic_vle_v_nxv2i32_nxv2i32:
401; CHECK:       # %bb.0: # %entry
402; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
403; CHECK-NEXT:    vle32.v v8, (a0)
404; CHECK-NEXT:    ret
405entry:
406  %a = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(
407    <vscale x 2 x i32> undef,
408    ptr %0,
409    iXLen %1)
410
411  ret <vscale x 2 x i32> %a
412}
413
414declare <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
415  <vscale x 2 x i32>,
416  ptr,
417  <vscale x 2 x i1>,
418  iXLen,
419  iXLen);
420
421define <vscale x 2 x i32> @intrinsic_vle_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
422; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i32_nxv2i32:
423; CHECK:       # %bb.0: # %entry
424; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
425; CHECK-NEXT:    vle32.v v8, (a0), v0.t
426; CHECK-NEXT:    ret
427entry:
428  %a = call <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
429    <vscale x 2 x i32> %0,
430    ptr %1,
431    <vscale x 2 x i1> %2,
432    iXLen %3, iXLen 1)
433
434  ret <vscale x 2 x i32> %a
435}
436
437declare <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32(
438  <vscale x 4 x i32>,
439  ptr,
440  iXLen);
441
442define <vscale x 4 x i32> @intrinsic_vle_v_nxv4i32_nxv4i32(ptr %0, iXLen %1) nounwind {
443; CHECK-LABEL: intrinsic_vle_v_nxv4i32_nxv4i32:
444; CHECK:       # %bb.0: # %entry
445; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
446; CHECK-NEXT:    vle32.v v8, (a0)
447; CHECK-NEXT:    ret
448entry:
449  %a = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32(
450    <vscale x 4 x i32> undef,
451    ptr %0,
452    iXLen %1)
453
454  ret <vscale x 4 x i32> %a
455}
456
457declare <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32(
458  <vscale x 4 x i32>,
459  ptr,
460  <vscale x 4 x i1>,
461  iXLen,
462  iXLen);
463
464define <vscale x 4 x i32> @intrinsic_vle_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
465; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i32_nxv4i32:
466; CHECK:       # %bb.0: # %entry
467; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
468; CHECK-NEXT:    vle32.v v8, (a0), v0.t
469; CHECK-NEXT:    ret
470entry:
471  %a = call <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32(
472    <vscale x 4 x i32> %0,
473    ptr %1,
474    <vscale x 4 x i1> %2,
475    iXLen %3, iXLen 1)
476
477  ret <vscale x 4 x i32> %a
478}
479
480declare <vscale x 8 x i32> @llvm.riscv.vle.nxv8i32(
481  <vscale x 8 x i32>,
482  ptr,
483  iXLen);
484
485define <vscale x 8 x i32> @intrinsic_vle_v_nxv8i32_nxv8i32(ptr %0, iXLen %1) nounwind {
486; CHECK-LABEL: intrinsic_vle_v_nxv8i32_nxv8i32:
487; CHECK:       # %bb.0: # %entry
488; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
489; CHECK-NEXT:    vle32.v v8, (a0)
490; CHECK-NEXT:    ret
491entry:
492  %a = call <vscale x 8 x i32> @llvm.riscv.vle.nxv8i32(
493    <vscale x 8 x i32> undef,
494    ptr %0,
495    iXLen %1)
496
497  ret <vscale x 8 x i32> %a
498}
499
500declare <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32(
501  <vscale x 8 x i32>,
502  ptr,
503  <vscale x 8 x i1>,
504  iXLen,
505  iXLen);
506
507define <vscale x 8 x i32> @intrinsic_vle_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
508; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i32_nxv8i32:
509; CHECK:       # %bb.0: # %entry
510; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
511; CHECK-NEXT:    vle32.v v8, (a0), v0.t
512; CHECK-NEXT:    ret
513entry:
514  %a = call <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32(
515    <vscale x 8 x i32> %0,
516    ptr %1,
517    <vscale x 8 x i1> %2,
518    iXLen %3, iXLen 1)
519
520  ret <vscale x 8 x i32> %a
521}
522
523declare <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32(
524  <vscale x 16 x i32>,
525  ptr,
526  iXLen);
527
528define <vscale x 16 x i32> @intrinsic_vle_v_nxv16i32_nxv16i32(ptr %0, iXLen %1) nounwind {
529; CHECK-LABEL: intrinsic_vle_v_nxv16i32_nxv16i32:
530; CHECK:       # %bb.0: # %entry
531; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
532; CHECK-NEXT:    vle32.v v8, (a0)
533; CHECK-NEXT:    ret
534entry:
535  %a = call <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32(
536    <vscale x 16 x i32> undef,
537    ptr %0,
538    iXLen %1)
539
540  ret <vscale x 16 x i32> %a
541}
542
543declare <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32(
544  <vscale x 16 x i32>,
545  ptr,
546  <vscale x 16 x i1>,
547  iXLen,
548  iXLen);
549
550define <vscale x 16 x i32> @intrinsic_vle_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
551; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i32_nxv16i32:
552; CHECK:       # %bb.0: # %entry
553; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
554; CHECK-NEXT:    vle32.v v8, (a0), v0.t
555; CHECK-NEXT:    ret
556entry:
557  %a = call <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32(
558    <vscale x 16 x i32> %0,
559    ptr %1,
560    <vscale x 16 x i1> %2,
561    iXLen %3, iXLen 1)
562
563  ret <vscale x 16 x i32> %a
564}
565
566declare <vscale x 1 x float> @llvm.riscv.vle.nxv1f32(
567  <vscale x 1 x float>,
568  ptr,
569  iXLen);
570
571define <vscale x 1 x float> @intrinsic_vle_v_nxv1f32_nxv1f32(ptr %0, iXLen %1) nounwind {
572; CHECK-LABEL: intrinsic_vle_v_nxv1f32_nxv1f32:
573; CHECK:       # %bb.0: # %entry
574; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
575; CHECK-NEXT:    vle32.v v8, (a0)
576; CHECK-NEXT:    ret
577entry:
578  %a = call <vscale x 1 x float> @llvm.riscv.vle.nxv1f32(
579    <vscale x 1 x float> undef,
580    ptr %0,
581    iXLen %1)
582
583  ret <vscale x 1 x float> %a
584}
585
586declare <vscale x 1 x float> @llvm.riscv.vle.mask.nxv1f32(
587  <vscale x 1 x float>,
588  ptr,
589  <vscale x 1 x i1>,
590  iXLen,
591  iXLen);
592
593define <vscale x 1 x float> @intrinsic_vle_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
594; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f32_nxv1f32:
595; CHECK:       # %bb.0: # %entry
596; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
597; CHECK-NEXT:    vle32.v v8, (a0), v0.t
598; CHECK-NEXT:    ret
599entry:
600  %a = call <vscale x 1 x float> @llvm.riscv.vle.mask.nxv1f32(
601    <vscale x 1 x float> %0,
602    ptr %1,
603    <vscale x 1 x i1> %2,
604    iXLen %3, iXLen 1)
605
606  ret <vscale x 1 x float> %a
607}
608
609declare <vscale x 2 x float> @llvm.riscv.vle.nxv2f32(
610  <vscale x 2 x float>,
611  ptr,
612  iXLen);
613
614define <vscale x 2 x float> @intrinsic_vle_v_nxv2f32_nxv2f32(ptr %0, iXLen %1) nounwind {
615; CHECK-LABEL: intrinsic_vle_v_nxv2f32_nxv2f32:
616; CHECK:       # %bb.0: # %entry
617; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
618; CHECK-NEXT:    vle32.v v8, (a0)
619; CHECK-NEXT:    ret
620entry:
621  %a = call <vscale x 2 x float> @llvm.riscv.vle.nxv2f32(
622    <vscale x 2 x float> undef,
623    ptr %0,
624    iXLen %1)
625
626  ret <vscale x 2 x float> %a
627}
628
629declare <vscale x 2 x float> @llvm.riscv.vle.mask.nxv2f32(
630  <vscale x 2 x float>,
631  ptr,
632  <vscale x 2 x i1>,
633  iXLen,
634  iXLen);
635
636define <vscale x 2 x float> @intrinsic_vle_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
637; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f32_nxv2f32:
638; CHECK:       # %bb.0: # %entry
639; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
640; CHECK-NEXT:    vle32.v v8, (a0), v0.t
641; CHECK-NEXT:    ret
642entry:
643  %a = call <vscale x 2 x float> @llvm.riscv.vle.mask.nxv2f32(
644    <vscale x 2 x float> %0,
645    ptr %1,
646    <vscale x 2 x i1> %2,
647    iXLen %3, iXLen 1)
648
649  ret <vscale x 2 x float> %a
650}
651
652declare <vscale x 4 x float> @llvm.riscv.vle.nxv4f32(
653  <vscale x 4 x float>,
654  ptr,
655  iXLen);
656
657define <vscale x 4 x float> @intrinsic_vle_v_nxv4f32_nxv4f32(ptr %0, iXLen %1) nounwind {
658; CHECK-LABEL: intrinsic_vle_v_nxv4f32_nxv4f32:
659; CHECK:       # %bb.0: # %entry
660; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
661; CHECK-NEXT:    vle32.v v8, (a0)
662; CHECK-NEXT:    ret
663entry:
664  %a = call <vscale x 4 x float> @llvm.riscv.vle.nxv4f32(
665    <vscale x 4 x float> undef,
666    ptr %0,
667    iXLen %1)
668
669  ret <vscale x 4 x float> %a
670}
671
672declare <vscale x 4 x float> @llvm.riscv.vle.mask.nxv4f32(
673  <vscale x 4 x float>,
674  ptr,
675  <vscale x 4 x i1>,
676  iXLen,
677  iXLen);
678
679define <vscale x 4 x float> @intrinsic_vle_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
680; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f32_nxv4f32:
681; CHECK:       # %bb.0: # %entry
682; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
683; CHECK-NEXT:    vle32.v v8, (a0), v0.t
684; CHECK-NEXT:    ret
685entry:
686  %a = call <vscale x 4 x float> @llvm.riscv.vle.mask.nxv4f32(
687    <vscale x 4 x float> %0,
688    ptr %1,
689    <vscale x 4 x i1> %2,
690    iXLen %3, iXLen 1)
691
692  ret <vscale x 4 x float> %a
693}
694
695declare <vscale x 8 x float> @llvm.riscv.vle.nxv8f32(
696  <vscale x 8 x float>,
697  ptr,
698  iXLen);
699
700define <vscale x 8 x float> @intrinsic_vle_v_nxv8f32_nxv8f32(ptr %0, iXLen %1) nounwind {
701; CHECK-LABEL: intrinsic_vle_v_nxv8f32_nxv8f32:
702; CHECK:       # %bb.0: # %entry
703; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
704; CHECK-NEXT:    vle32.v v8, (a0)
705; CHECK-NEXT:    ret
706entry:
707  %a = call <vscale x 8 x float> @llvm.riscv.vle.nxv8f32(
708    <vscale x 8 x float> undef,
709    ptr %0,
710    iXLen %1)
711
712  ret <vscale x 8 x float> %a
713}
714
715declare <vscale x 8 x float> @llvm.riscv.vle.mask.nxv8f32(
716  <vscale x 8 x float>,
717  ptr,
718  <vscale x 8 x i1>,
719  iXLen,
720  iXLen);
721
722define <vscale x 8 x float> @intrinsic_vle_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
723; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f32_nxv8f32:
724; CHECK:       # %bb.0: # %entry
725; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
726; CHECK-NEXT:    vle32.v v8, (a0), v0.t
727; CHECK-NEXT:    ret
728entry:
729  %a = call <vscale x 8 x float> @llvm.riscv.vle.mask.nxv8f32(
730    <vscale x 8 x float> %0,
731    ptr %1,
732    <vscale x 8 x i1> %2,
733    iXLen %3, iXLen 1)
734
735  ret <vscale x 8 x float> %a
736}
737
738declare <vscale x 16 x float> @llvm.riscv.vle.nxv16f32(
739  <vscale x 16 x float>,
740  ptr,
741  iXLen);
742
743define <vscale x 16 x float> @intrinsic_vle_v_nxv16f32_nxv16f32(ptr %0, iXLen %1) nounwind {
744; CHECK-LABEL: intrinsic_vle_v_nxv16f32_nxv16f32:
745; CHECK:       # %bb.0: # %entry
746; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
747; CHECK-NEXT:    vle32.v v8, (a0)
748; CHECK-NEXT:    ret
749entry:
750  %a = call <vscale x 16 x float> @llvm.riscv.vle.nxv16f32(
751    <vscale x 16 x float> undef,
752    ptr %0,
753    iXLen %1)
754
755  ret <vscale x 16 x float> %a
756}
757
758declare <vscale x 16 x float> @llvm.riscv.vle.mask.nxv16f32(
759  <vscale x 16 x float>,
760  ptr,
761  <vscale x 16 x i1>,
762  iXLen,
763  iXLen);
764
765define <vscale x 16 x float> @intrinsic_vle_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
766; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f32_nxv16f32:
767; CHECK:       # %bb.0: # %entry
768; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
769; CHECK-NEXT:    vle32.v v8, (a0), v0.t
770; CHECK-NEXT:    ret
771entry:
772  %a = call <vscale x 16 x float> @llvm.riscv.vle.mask.nxv16f32(
773    <vscale x 16 x float> %0,
774    ptr %1,
775    <vscale x 16 x i1> %2,
776    iXLen %3, iXLen 1)
777
778  ret <vscale x 16 x float> %a
779}
780
781declare <vscale x 1 x i16> @llvm.riscv.vle.nxv1i16(
782  <vscale x 1 x i16>,
783  ptr,
784  iXLen);
785
786define <vscale x 1 x i16> @intrinsic_vle_v_nxv1i16_nxv1i16(ptr %0, iXLen %1) nounwind {
787; CHECK-LABEL: intrinsic_vle_v_nxv1i16_nxv1i16:
788; CHECK:       # %bb.0: # %entry
789; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
790; CHECK-NEXT:    vle16.v v8, (a0)
791; CHECK-NEXT:    ret
792entry:
793  %a = call <vscale x 1 x i16> @llvm.riscv.vle.nxv1i16(
794    <vscale x 1 x i16> undef,
795    ptr %0,
796    iXLen %1)
797
798  ret <vscale x 1 x i16> %a
799}
800
801declare <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16(
802  <vscale x 1 x i16>,
803  ptr,
804  <vscale x 1 x i1>,
805  iXLen,
806  iXLen);
807
808define <vscale x 1 x i16> @intrinsic_vle_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
809; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i16_nxv1i16:
810; CHECK:       # %bb.0: # %entry
811; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
812; CHECK-NEXT:    vle16.v v8, (a0), v0.t
813; CHECK-NEXT:    ret
814entry:
815  %a = call <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16(
816    <vscale x 1 x i16> %0,
817    ptr %1,
818    <vscale x 1 x i1> %2,
819    iXLen %3, iXLen 1)
820
821  ret <vscale x 1 x i16> %a
822}
823
824declare <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(
825  <vscale x 2 x i16>,
826  ptr,
827  iXLen);
828
829define <vscale x 2 x i16> @intrinsic_vle_v_nxv2i16_nxv2i16(ptr %0, iXLen %1) nounwind {
830; CHECK-LABEL: intrinsic_vle_v_nxv2i16_nxv2i16:
831; CHECK:       # %bb.0: # %entry
832; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
833; CHECK-NEXT:    vle16.v v8, (a0)
834; CHECK-NEXT:    ret
835entry:
836  %a = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(
837    <vscale x 2 x i16> undef,
838    ptr %0,
839    iXLen %1)
840
841  ret <vscale x 2 x i16> %a
842}
843
844declare <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16(
845  <vscale x 2 x i16>,
846  ptr,
847  <vscale x 2 x i1>,
848  iXLen,
849  iXLen);
850
851define <vscale x 2 x i16> @intrinsic_vle_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
852; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i16_nxv2i16:
853; CHECK:       # %bb.0: # %entry
854; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
855; CHECK-NEXT:    vle16.v v8, (a0), v0.t
856; CHECK-NEXT:    ret
857entry:
858  %a = call <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16(
859    <vscale x 2 x i16> %0,
860    ptr %1,
861    <vscale x 2 x i1> %2,
862    iXLen %3, iXLen 1)
863
864  ret <vscale x 2 x i16> %a
865}
866
867declare <vscale x 4 x i16> @llvm.riscv.vle.nxv4i16(
868  <vscale x 4 x i16>,
869  ptr,
870  iXLen);
871
872define <vscale x 4 x i16> @intrinsic_vle_v_nxv4i16_nxv4i16(ptr %0, iXLen %1) nounwind {
873; CHECK-LABEL: intrinsic_vle_v_nxv4i16_nxv4i16:
874; CHECK:       # %bb.0: # %entry
875; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
876; CHECK-NEXT:    vle16.v v8, (a0)
877; CHECK-NEXT:    ret
878entry:
879  %a = call <vscale x 4 x i16> @llvm.riscv.vle.nxv4i16(
880    <vscale x 4 x i16> undef,
881    ptr %0,
882    iXLen %1)
883
884  ret <vscale x 4 x i16> %a
885}
886
887declare <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16(
888  <vscale x 4 x i16>,
889  ptr,
890  <vscale x 4 x i1>,
891  iXLen,
892  iXLen);
893
894define <vscale x 4 x i16> @intrinsic_vle_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
895; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i16_nxv4i16:
896; CHECK:       # %bb.0: # %entry
897; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
898; CHECK-NEXT:    vle16.v v8, (a0), v0.t
899; CHECK-NEXT:    ret
900entry:
901  %a = call <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16(
902    <vscale x 4 x i16> %0,
903    ptr %1,
904    <vscale x 4 x i1> %2,
905    iXLen %3, iXLen 1)
906
907  ret <vscale x 4 x i16> %a
908}
909
910declare <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16(
911  <vscale x 8 x i16>,
912  ptr,
913  iXLen);
914
915define <vscale x 8 x i16> @intrinsic_vle_v_nxv8i16_nxv8i16(ptr %0, iXLen %1) nounwind {
916; CHECK-LABEL: intrinsic_vle_v_nxv8i16_nxv8i16:
917; CHECK:       # %bb.0: # %entry
918; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
919; CHECK-NEXT:    vle16.v v8, (a0)
920; CHECK-NEXT:    ret
921entry:
922  %a = call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16(
923    <vscale x 8 x i16> undef,
924    ptr %0,
925    iXLen %1)
926
927  ret <vscale x 8 x i16> %a
928}
929
930declare <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16(
931  <vscale x 8 x i16>,
932  ptr,
933  <vscale x 8 x i1>,
934  iXLen,
935  iXLen);
936
937define <vscale x 8 x i16> @intrinsic_vle_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
938; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i16_nxv8i16:
939; CHECK:       # %bb.0: # %entry
940; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
941; CHECK-NEXT:    vle16.v v8, (a0), v0.t
942; CHECK-NEXT:    ret
943entry:
944  %a = call <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16(
945    <vscale x 8 x i16> %0,
946    ptr %1,
947    <vscale x 8 x i1> %2,
948    iXLen %3, iXLen 1)
949
950  ret <vscale x 8 x i16> %a
951}
952
953declare <vscale x 16 x i16> @llvm.riscv.vle.nxv16i16(
954  <vscale x 16 x i16>,
955  ptr,
956  iXLen);
957
958define <vscale x 16 x i16> @intrinsic_vle_v_nxv16i16_nxv16i16(ptr %0, iXLen %1) nounwind {
959; CHECK-LABEL: intrinsic_vle_v_nxv16i16_nxv16i16:
960; CHECK:       # %bb.0: # %entry
961; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
962; CHECK-NEXT:    vle16.v v8, (a0)
963; CHECK-NEXT:    ret
964entry:
965  %a = call <vscale x 16 x i16> @llvm.riscv.vle.nxv16i16(
966    <vscale x 16 x i16> undef,
967    ptr %0,
968    iXLen %1)
969
970  ret <vscale x 16 x i16> %a
971}
972
973declare <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16(
974  <vscale x 16 x i16>,
975  ptr,
976  <vscale x 16 x i1>,
977  iXLen,
978  iXLen);
979
980define <vscale x 16 x i16> @intrinsic_vle_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
981; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i16_nxv16i16:
982; CHECK:       # %bb.0: # %entry
983; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
984; CHECK-NEXT:    vle16.v v8, (a0), v0.t
985; CHECK-NEXT:    ret
986entry:
987  %a = call <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16(
988    <vscale x 16 x i16> %0,
989    ptr %1,
990    <vscale x 16 x i1> %2,
991    iXLen %3, iXLen 1)
992
993  ret <vscale x 16 x i16> %a
994}
995
996declare <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16(
997  <vscale x 32 x i16>,
998  ptr,
999  iXLen);
1000
1001define <vscale x 32 x i16> @intrinsic_vle_v_nxv32i16_nxv32i16(ptr %0, iXLen %1) nounwind {
1002; CHECK-LABEL: intrinsic_vle_v_nxv32i16_nxv32i16:
1003; CHECK:       # %bb.0: # %entry
1004; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
1005; CHECK-NEXT:    vle16.v v8, (a0)
1006; CHECK-NEXT:    ret
1007entry:
1008  %a = call <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16(
1009    <vscale x 32 x i16> undef,
1010    ptr %0,
1011    iXLen %1)
1012
1013  ret <vscale x 32 x i16> %a
1014}
1015
1016declare <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16(
1017  <vscale x 32 x i16>,
1018  ptr,
1019  <vscale x 32 x i1>,
1020  iXLen,
1021  iXLen);
1022
1023define <vscale x 32 x i16> @intrinsic_vle_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1024; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i16_nxv32i16:
1025; CHECK:       # %bb.0: # %entry
1026; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
1027; CHECK-NEXT:    vle16.v v8, (a0), v0.t
1028; CHECK-NEXT:    ret
1029entry:
1030  %a = call <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16(
1031    <vscale x 32 x i16> %0,
1032    ptr %1,
1033    <vscale x 32 x i1> %2,
1034    iXLen %3, iXLen 1)
1035
1036  ret <vscale x 32 x i16> %a
1037}
1038
1039declare <vscale x 1 x half> @llvm.riscv.vle.nxv1f16(
1040  <vscale x 1 x half>,
1041  ptr,
1042  iXLen);
1043
1044define <vscale x 1 x half> @intrinsic_vle_v_nxv1f16_nxv1f16(ptr %0, iXLen %1) nounwind {
1045; CHECK-LABEL: intrinsic_vle_v_nxv1f16_nxv1f16:
1046; CHECK:       # %bb.0: # %entry
1047; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1048; CHECK-NEXT:    vle16.v v8, (a0)
1049; CHECK-NEXT:    ret
1050entry:
1051  %a = call <vscale x 1 x half> @llvm.riscv.vle.nxv1f16(
1052    <vscale x 1 x half> undef,
1053    ptr %0,
1054    iXLen %1)
1055
1056  ret <vscale x 1 x half> %a
1057}
1058
1059declare <vscale x 1 x half> @llvm.riscv.vle.mask.nxv1f16(
1060  <vscale x 1 x half>,
1061  ptr,
1062  <vscale x 1 x i1>,
1063  iXLen,
1064  iXLen);
1065
1066define <vscale x 1 x half> @intrinsic_vle_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1067; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f16_nxv1f16:
1068; CHECK:       # %bb.0: # %entry
1069; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1070; CHECK-NEXT:    vle16.v v8, (a0), v0.t
1071; CHECK-NEXT:    ret
1072entry:
1073  %a = call <vscale x 1 x half> @llvm.riscv.vle.mask.nxv1f16(
1074    <vscale x 1 x half> %0,
1075    ptr %1,
1076    <vscale x 1 x i1> %2,
1077    iXLen %3, iXLen 1)
1078
1079  ret <vscale x 1 x half> %a
1080}
1081
1082declare <vscale x 2 x half> @llvm.riscv.vle.nxv2f16(
1083  <vscale x 2 x half>,
1084  ptr,
1085  iXLen);
1086
1087define <vscale x 2 x half> @intrinsic_vle_v_nxv2f16_nxv2f16(ptr %0, iXLen %1) nounwind {
1088; CHECK-LABEL: intrinsic_vle_v_nxv2f16_nxv2f16:
1089; CHECK:       # %bb.0: # %entry
1090; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1091; CHECK-NEXT:    vle16.v v8, (a0)
1092; CHECK-NEXT:    ret
1093entry:
1094  %a = call <vscale x 2 x half> @llvm.riscv.vle.nxv2f16(
1095    <vscale x 2 x half> undef,
1096    ptr %0,
1097    iXLen %1)
1098
1099  ret <vscale x 2 x half> %a
1100}
1101
1102declare <vscale x 2 x half> @llvm.riscv.vle.mask.nxv2f16(
1103  <vscale x 2 x half>,
1104  ptr,
1105  <vscale x 2 x i1>,
1106  iXLen,
1107  iXLen);
1108
1109define <vscale x 2 x half> @intrinsic_vle_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1110; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f16_nxv2f16:
1111; CHECK:       # %bb.0: # %entry
1112; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1113; CHECK-NEXT:    vle16.v v8, (a0), v0.t
1114; CHECK-NEXT:    ret
1115entry:
1116  %a = call <vscale x 2 x half> @llvm.riscv.vle.mask.nxv2f16(
1117    <vscale x 2 x half> %0,
1118    ptr %1,
1119    <vscale x 2 x i1> %2,
1120    iXLen %3, iXLen 1)
1121
1122  ret <vscale x 2 x half> %a
1123}
1124
1125declare <vscale x 4 x half> @llvm.riscv.vle.nxv4f16(
1126  <vscale x 4 x half>,
1127  ptr,
1128  iXLen);
1129
1130define <vscale x 4 x half> @intrinsic_vle_v_nxv4f16_nxv4f16(ptr %0, iXLen %1) nounwind {
1131; CHECK-LABEL: intrinsic_vle_v_nxv4f16_nxv4f16:
1132; CHECK:       # %bb.0: # %entry
1133; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1134; CHECK-NEXT:    vle16.v v8, (a0)
1135; CHECK-NEXT:    ret
1136entry:
1137  %a = call <vscale x 4 x half> @llvm.riscv.vle.nxv4f16(
1138    <vscale x 4 x half> undef,
1139    ptr %0,
1140    iXLen %1)
1141
1142  ret <vscale x 4 x half> %a
1143}
1144
1145declare <vscale x 4 x half> @llvm.riscv.vle.mask.nxv4f16(
1146  <vscale x 4 x half>,
1147  ptr,
1148  <vscale x 4 x i1>,
1149  iXLen,
1150  iXLen);
1151
1152define <vscale x 4 x half> @intrinsic_vle_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1153; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f16_nxv4f16:
1154; CHECK:       # %bb.0: # %entry
1155; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1156; CHECK-NEXT:    vle16.v v8, (a0), v0.t
1157; CHECK-NEXT:    ret
1158entry:
1159  %a = call <vscale x 4 x half> @llvm.riscv.vle.mask.nxv4f16(
1160    <vscale x 4 x half> %0,
1161    ptr %1,
1162    <vscale x 4 x i1> %2,
1163    iXLen %3, iXLen 1)
1164
1165  ret <vscale x 4 x half> %a
1166}
1167
1168declare <vscale x 8 x half> @llvm.riscv.vle.nxv8f16(
1169  <vscale x 8 x half>,
1170  ptr,
1171  iXLen);
1172
1173define <vscale x 8 x half> @intrinsic_vle_v_nxv8f16_nxv8f16(ptr %0, iXLen %1) nounwind {
1174; CHECK-LABEL: intrinsic_vle_v_nxv8f16_nxv8f16:
1175; CHECK:       # %bb.0: # %entry
1176; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1177; CHECK-NEXT:    vle16.v v8, (a0)
1178; CHECK-NEXT:    ret
1179entry:
1180  %a = call <vscale x 8 x half> @llvm.riscv.vle.nxv8f16(
1181    <vscale x 8 x half> undef,
1182    ptr %0,
1183    iXLen %1)
1184
1185  ret <vscale x 8 x half> %a
1186}
1187
1188declare <vscale x 8 x half> @llvm.riscv.vle.mask.nxv8f16(
1189  <vscale x 8 x half>,
1190  ptr,
1191  <vscale x 8 x i1>,
1192  iXLen,
1193  iXLen);
1194
1195define <vscale x 8 x half> @intrinsic_vle_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1196; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f16_nxv8f16:
1197; CHECK:       # %bb.0: # %entry
1198; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1199; CHECK-NEXT:    vle16.v v8, (a0), v0.t
1200; CHECK-NEXT:    ret
1201entry:
1202  %a = call <vscale x 8 x half> @llvm.riscv.vle.mask.nxv8f16(
1203    <vscale x 8 x half> %0,
1204    ptr %1,
1205    <vscale x 8 x i1> %2,
1206    iXLen %3, iXLen 1)
1207
1208  ret <vscale x 8 x half> %a
1209}
1210
1211declare <vscale x 16 x half> @llvm.riscv.vle.nxv16f16(
1212  <vscale x 16 x half>,
1213  ptr,
1214  iXLen);
1215
1216define <vscale x 16 x half> @intrinsic_vle_v_nxv16f16_nxv16f16(ptr %0, iXLen %1) nounwind {
1217; CHECK-LABEL: intrinsic_vle_v_nxv16f16_nxv16f16:
1218; CHECK:       # %bb.0: # %entry
1219; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
1220; CHECK-NEXT:    vle16.v v8, (a0)
1221; CHECK-NEXT:    ret
1222entry:
1223  %a = call <vscale x 16 x half> @llvm.riscv.vle.nxv16f16(
1224    <vscale x 16 x half> undef,
1225    ptr %0,
1226    iXLen %1)
1227
1228  ret <vscale x 16 x half> %a
1229}
1230
1231declare <vscale x 16 x half> @llvm.riscv.vle.mask.nxv16f16(
1232  <vscale x 16 x half>,
1233  ptr,
1234  <vscale x 16 x i1>,
1235  iXLen,
1236  iXLen);
1237
1238define <vscale x 16 x half> @intrinsic_vle_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1239; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f16_nxv16f16:
1240; CHECK:       # %bb.0: # %entry
1241; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1242; CHECK-NEXT:    vle16.v v8, (a0), v0.t
1243; CHECK-NEXT:    ret
1244entry:
1245  %a = call <vscale x 16 x half> @llvm.riscv.vle.mask.nxv16f16(
1246    <vscale x 16 x half> %0,
1247    ptr %1,
1248    <vscale x 16 x i1> %2,
1249    iXLen %3, iXLen 1)
1250
1251  ret <vscale x 16 x half> %a
1252}
1253
1254declare <vscale x 32 x half> @llvm.riscv.vle.nxv32f16(
1255  <vscale x 32 x half>,
1256  ptr,
1257  iXLen);
1258
1259define <vscale x 32 x half> @intrinsic_vle_v_nxv32f16_nxv32f16(ptr %0, iXLen %1) nounwind {
1260; CHECK-LABEL: intrinsic_vle_v_nxv32f16_nxv32f16:
1261; CHECK:       # %bb.0: # %entry
1262; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
1263; CHECK-NEXT:    vle16.v v8, (a0)
1264; CHECK-NEXT:    ret
1265entry:
1266  %a = call <vscale x 32 x half> @llvm.riscv.vle.nxv32f16(
1267    <vscale x 32 x half> undef,
1268    ptr %0,
1269    iXLen %1)
1270
1271  ret <vscale x 32 x half> %a
1272}
1273
1274declare <vscale x 32 x half> @llvm.riscv.vle.mask.nxv32f16(
1275  <vscale x 32 x half>,
1276  ptr,
1277  <vscale x 32 x i1>,
1278  iXLen,
1279  iXLen);
1280
1281define <vscale x 32 x half> @intrinsic_vle_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1282; CHECK-LABEL: intrinsic_vle_mask_v_nxv32f16_nxv32f16:
1283; CHECK:       # %bb.0: # %entry
1284; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
1285; CHECK-NEXT:    vle16.v v8, (a0), v0.t
1286; CHECK-NEXT:    ret
1287entry:
1288  %a = call <vscale x 32 x half> @llvm.riscv.vle.mask.nxv32f16(
1289    <vscale x 32 x half> %0,
1290    ptr %1,
1291    <vscale x 32 x i1> %2,
1292    iXLen %3, iXLen 1)
1293
1294  ret <vscale x 32 x half> %a
1295}
1296
1297declare <vscale x 1 x bfloat> @llvm.riscv.vle.nxv1bf16(
1298  <vscale x 1 x bfloat>,
1299  ptr,
1300  iXLen);
1301
1302define <vscale x 1 x bfloat> @intrinsic_vle_v_nxv1bf16_nxv1bf16(ptr %0, iXLen %1) nounwind {
1303; CHECK-LABEL: intrinsic_vle_v_nxv1bf16_nxv1bf16:
1304; CHECK:       # %bb.0: # %entry
1305; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1306; CHECK-NEXT:    vle16.v v8, (a0)
1307; CHECK-NEXT:    ret
1308entry:
1309  %a = call <vscale x 1 x bfloat> @llvm.riscv.vle.nxv1bf16(
1310    <vscale x 1 x bfloat> undef,
1311    ptr %0,
1312    iXLen %1)
1313
1314  ret <vscale x 1 x bfloat> %a
1315}
1316
1317declare <vscale x 1 x bfloat> @llvm.riscv.vle.mask.nxv1bf16(
1318  <vscale x 1 x bfloat>,
1319  ptr,
1320  <vscale x 1 x i1>,
1321  iXLen,
1322  iXLen);
1323
1324define <vscale x 1 x bfloat> @intrinsic_vle_mask_v_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1325; CHECK-LABEL: intrinsic_vle_mask_v_nxv1bf16_nxv1bf16:
1326; CHECK:       # %bb.0: # %entry
1327; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1328; CHECK-NEXT:    vle16.v v8, (a0), v0.t
1329; CHECK-NEXT:    ret
1330entry:
1331  %a = call <vscale x 1 x bfloat> @llvm.riscv.vle.mask.nxv1bf16(
1332    <vscale x 1 x bfloat> %0,
1333    ptr %1,
1334    <vscale x 1 x i1> %2,
1335    iXLen %3, iXLen 1)
1336
1337  ret <vscale x 1 x bfloat> %a
1338}
1339
1340declare <vscale x 2 x bfloat> @llvm.riscv.vle.nxv2bf16(
1341  <vscale x 2 x bfloat>,
1342  ptr,
1343  iXLen);
1344
1345define <vscale x 2 x bfloat> @intrinsic_vle_v_nxv2bf16_nxv2bf16(ptr %0, iXLen %1) nounwind {
1346; CHECK-LABEL: intrinsic_vle_v_nxv2bf16_nxv2bf16:
1347; CHECK:       # %bb.0: # %entry
1348; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1349; CHECK-NEXT:    vle16.v v8, (a0)
1350; CHECK-NEXT:    ret
1351entry:
1352  %a = call <vscale x 2 x bfloat> @llvm.riscv.vle.nxv2bf16(
1353    <vscale x 2 x bfloat> undef,
1354    ptr %0,
1355    iXLen %1)
1356
1357  ret <vscale x 2 x bfloat> %a
1358}
1359
1360declare <vscale x 2 x bfloat> @llvm.riscv.vle.mask.nxv2bf16(
1361  <vscale x 2 x bfloat>,
1362  ptr,
1363  <vscale x 2 x i1>,
1364  iXLen,
1365  iXLen);
1366
1367define <vscale x 2 x bfloat> @intrinsic_vle_mask_v_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1368; CHECK-LABEL: intrinsic_vle_mask_v_nxv2bf16_nxv2bf16:
1369; CHECK:       # %bb.0: # %entry
1370; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1371; CHECK-NEXT:    vle16.v v8, (a0), v0.t
1372; CHECK-NEXT:    ret
1373entry:
1374  %a = call <vscale x 2 x bfloat> @llvm.riscv.vle.mask.nxv2bf16(
1375    <vscale x 2 x bfloat> %0,
1376    ptr %1,
1377    <vscale x 2 x i1> %2,
1378    iXLen %3, iXLen 1)
1379
1380  ret <vscale x 2 x bfloat> %a
1381}
1382
1383declare <vscale x 4 x bfloat> @llvm.riscv.vle.nxv4bf16(
1384  <vscale x 4 x bfloat>,
1385  ptr,
1386  iXLen);
1387
1388define <vscale x 4 x bfloat> @intrinsic_vle_v_nxv4bf16_nxv4bf16(ptr %0, iXLen %1) nounwind {
1389; CHECK-LABEL: intrinsic_vle_v_nxv4bf16_nxv4bf16:
1390; CHECK:       # %bb.0: # %entry
1391; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1392; CHECK-NEXT:    vle16.v v8, (a0)
1393; CHECK-NEXT:    ret
1394entry:
1395  %a = call <vscale x 4 x bfloat> @llvm.riscv.vle.nxv4bf16(
1396    <vscale x 4 x bfloat> undef,
1397    ptr %0,
1398    iXLen %1)
1399
1400  ret <vscale x 4 x bfloat> %a
1401}
1402
1403declare <vscale x 4 x bfloat> @llvm.riscv.vle.mask.nxv4bf16(
1404  <vscale x 4 x bfloat>,
1405  ptr,
1406  <vscale x 4 x i1>,
1407  iXLen,
1408  iXLen);
1409
1410define <vscale x 4 x bfloat> @intrinsic_vle_mask_v_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1411; CHECK-LABEL: intrinsic_vle_mask_v_nxv4bf16_nxv4bf16:
1412; CHECK:       # %bb.0: # %entry
1413; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1414; CHECK-NEXT:    vle16.v v8, (a0), v0.t
1415; CHECK-NEXT:    ret
1416entry:
1417  %a = call <vscale x 4 x bfloat> @llvm.riscv.vle.mask.nxv4bf16(
1418    <vscale x 4 x bfloat> %0,
1419    ptr %1,
1420    <vscale x 4 x i1> %2,
1421    iXLen %3, iXLen 1)
1422
1423  ret <vscale x 4 x bfloat> %a
1424}
1425
1426declare <vscale x 8 x bfloat> @llvm.riscv.vle.nxv8bf16(
1427  <vscale x 8 x bfloat>,
1428  ptr,
1429  iXLen);
1430
1431define <vscale x 8 x bfloat> @intrinsic_vle_v_nxv8bf16_nxv8bf16(ptr %0, iXLen %1) nounwind {
1432; CHECK-LABEL: intrinsic_vle_v_nxv8bf16_nxv8bf16:
1433; CHECK:       # %bb.0: # %entry
1434; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1435; CHECK-NEXT:    vle16.v v8, (a0)
1436; CHECK-NEXT:    ret
1437entry:
1438  %a = call <vscale x 8 x bfloat> @llvm.riscv.vle.nxv8bf16(
1439    <vscale x 8 x bfloat> undef,
1440    ptr %0,
1441    iXLen %1)
1442
1443  ret <vscale x 8 x bfloat> %a
1444}
1445
1446declare <vscale x 8 x bfloat> @llvm.riscv.vle.mask.nxv8bf16(
1447  <vscale x 8 x bfloat>,
1448  ptr,
1449  <vscale x 8 x i1>,
1450  iXLen,
1451  iXLen);
1452
1453define <vscale x 8 x bfloat> @intrinsic_vle_mask_v_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1454; CHECK-LABEL: intrinsic_vle_mask_v_nxv8bf16_nxv8bf16:
1455; CHECK:       # %bb.0: # %entry
1456; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1457; CHECK-NEXT:    vle16.v v8, (a0), v0.t
1458; CHECK-NEXT:    ret
1459entry:
1460  %a = call <vscale x 8 x bfloat> @llvm.riscv.vle.mask.nxv8bf16(
1461    <vscale x 8 x bfloat> %0,
1462    ptr %1,
1463    <vscale x 8 x i1> %2,
1464    iXLen %3, iXLen 1)
1465
1466  ret <vscale x 8 x bfloat> %a
1467}
1468
1469declare <vscale x 16 x bfloat> @llvm.riscv.vle.nxv16bf16(
1470  <vscale x 16 x bfloat>,
1471  ptr,
1472  iXLen);
1473
1474define <vscale x 16 x bfloat> @intrinsic_vle_v_nxv16bf16_nxv16bf16(ptr %0, iXLen %1) nounwind {
1475; CHECK-LABEL: intrinsic_vle_v_nxv16bf16_nxv16bf16:
1476; CHECK:       # %bb.0: # %entry
1477; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
1478; CHECK-NEXT:    vle16.v v8, (a0)
1479; CHECK-NEXT:    ret
1480entry:
1481  %a = call <vscale x 16 x bfloat> @llvm.riscv.vle.nxv16bf16(
1482    <vscale x 16 x bfloat> undef,
1483    ptr %0,
1484    iXLen %1)
1485
1486  ret <vscale x 16 x bfloat> %a
1487}
1488
1489declare <vscale x 16 x bfloat> @llvm.riscv.vle.mask.nxv16bf16(
1490  <vscale x 16 x bfloat>,
1491  ptr,
1492  <vscale x 16 x i1>,
1493  iXLen,
1494  iXLen);
1495
1496define <vscale x 16 x bfloat> @intrinsic_vle_mask_v_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1497; CHECK-LABEL: intrinsic_vle_mask_v_nxv16bf16_nxv16bf16:
1498; CHECK:       # %bb.0: # %entry
1499; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1500; CHECK-NEXT:    vle16.v v8, (a0), v0.t
1501; CHECK-NEXT:    ret
1502entry:
1503  %a = call <vscale x 16 x bfloat> @llvm.riscv.vle.mask.nxv16bf16(
1504    <vscale x 16 x bfloat> %0,
1505    ptr %1,
1506    <vscale x 16 x i1> %2,
1507    iXLen %3, iXLen 1)
1508
1509  ret <vscale x 16 x bfloat> %a
1510}
1511
1512declare <vscale x 32 x bfloat> @llvm.riscv.vle.nxv32bf16(
1513  <vscale x 32 x bfloat>,
1514  ptr,
1515  iXLen);
1516
1517define <vscale x 32 x bfloat> @intrinsic_vle_v_nxv32bf16_nxv32bf16(ptr %0, iXLen %1) nounwind {
1518; CHECK-LABEL: intrinsic_vle_v_nxv32bf16_nxv32bf16:
1519; CHECK:       # %bb.0: # %entry
1520; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
1521; CHECK-NEXT:    vle16.v v8, (a0)
1522; CHECK-NEXT:    ret
1523entry:
1524  %a = call <vscale x 32 x bfloat> @llvm.riscv.vle.nxv32bf16(
1525    <vscale x 32 x bfloat> undef,
1526    ptr %0,
1527    iXLen %1)
1528
1529  ret <vscale x 32 x bfloat> %a
1530}
1531
1532declare <vscale x 32 x bfloat> @llvm.riscv.vle.mask.nxv32bf16(
1533  <vscale x 32 x bfloat>,
1534  ptr,
1535  <vscale x 32 x i1>,
1536  iXLen,
1537  iXLen);
1538
1539define <vscale x 32 x bfloat> @intrinsic_vle_mask_v_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1540; CHECK-LABEL: intrinsic_vle_mask_v_nxv32bf16_nxv32bf16:
1541; CHECK:       # %bb.0: # %entry
1542; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
1543; CHECK-NEXT:    vle16.v v8, (a0), v0.t
1544; CHECK-NEXT:    ret
1545entry:
1546  %a = call <vscale x 32 x bfloat> @llvm.riscv.vle.mask.nxv32bf16(
1547    <vscale x 32 x bfloat> %0,
1548    ptr %1,
1549    <vscale x 32 x i1> %2,
1550    iXLen %3, iXLen 1)
1551
1552  ret <vscale x 32 x bfloat> %a
1553}
1554
1555declare <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
1556  <vscale x 1 x i8>,
1557  ptr,
1558  iXLen);
1559
1560define <vscale x 1 x i8> @intrinsic_vle_v_nxv1i8_nxv1i8(ptr %0, iXLen %1) nounwind {
1561; CHECK-LABEL: intrinsic_vle_v_nxv1i8_nxv1i8:
1562; CHECK:       # %bb.0: # %entry
1563; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
1564; CHECK-NEXT:    vle8.v v8, (a0)
1565; CHECK-NEXT:    ret
1566entry:
1567  %a = call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
1568    <vscale x 1 x i8> undef,
1569    ptr %0,
1570    iXLen %1)
1571
1572  ret <vscale x 1 x i8> %a
1573}
1574
1575declare <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8(
1576  <vscale x 1 x i8>,
1577  ptr,
1578  <vscale x 1 x i1>,
1579  iXLen,
1580  iXLen);
1581
1582define <vscale x 1 x i8> @intrinsic_vle_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1583; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i8_nxv1i8:
1584; CHECK:       # %bb.0: # %entry
1585; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
1586; CHECK-NEXT:    vle8.v v8, (a0), v0.t
1587; CHECK-NEXT:    ret
1588entry:
1589  %a = call <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8(
1590    <vscale x 1 x i8> %0,
1591    ptr %1,
1592    <vscale x 1 x i1> %2,
1593    iXLen %3, iXLen 1)
1594
1595  ret <vscale x 1 x i8> %a
1596}
1597
1598declare <vscale x 2 x i8> @llvm.riscv.vle.nxv2i8(
1599  <vscale x 2 x i8>,
1600  ptr,
1601  iXLen);
1602
1603define <vscale x 2 x i8> @intrinsic_vle_v_nxv2i8_nxv2i8(ptr %0, iXLen %1) nounwind {
1604; CHECK-LABEL: intrinsic_vle_v_nxv2i8_nxv2i8:
1605; CHECK:       # %bb.0: # %entry
1606; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
1607; CHECK-NEXT:    vle8.v v8, (a0)
1608; CHECK-NEXT:    ret
1609entry:
1610  %a = call <vscale x 2 x i8> @llvm.riscv.vle.nxv2i8(
1611    <vscale x 2 x i8> undef,
1612    ptr %0,
1613    iXLen %1)
1614
1615  ret <vscale x 2 x i8> %a
1616}
1617
1618declare <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8(
1619  <vscale x 2 x i8>,
1620  ptr,
1621  <vscale x 2 x i1>,
1622  iXLen,
1623  iXLen);
1624
1625define <vscale x 2 x i8> @intrinsic_vle_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1626; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i8_nxv2i8:
1627; CHECK:       # %bb.0: # %entry
1628; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
1629; CHECK-NEXT:    vle8.v v8, (a0), v0.t
1630; CHECK-NEXT:    ret
1631entry:
1632  %a = call <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8(
1633    <vscale x 2 x i8> %0,
1634    ptr %1,
1635    <vscale x 2 x i1> %2,
1636    iXLen %3, iXLen 1)
1637
1638  ret <vscale x 2 x i8> %a
1639}
1640
1641declare <vscale x 4 x i8> @llvm.riscv.vle.nxv4i8(
1642  <vscale x 4 x i8>,
1643  ptr,
1644  iXLen);
1645
1646define <vscale x 4 x i8> @intrinsic_vle_v_nxv4i8_nxv4i8(ptr %0, iXLen %1) nounwind {
1647; CHECK-LABEL: intrinsic_vle_v_nxv4i8_nxv4i8:
1648; CHECK:       # %bb.0: # %entry
1649; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
1650; CHECK-NEXT:    vle8.v v8, (a0)
1651; CHECK-NEXT:    ret
1652entry:
1653  %a = call <vscale x 4 x i8> @llvm.riscv.vle.nxv4i8(
1654    <vscale x 4 x i8> undef,
1655    ptr %0,
1656    iXLen %1)
1657
1658  ret <vscale x 4 x i8> %a
1659}
1660
1661declare <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8(
1662  <vscale x 4 x i8>,
1663  ptr,
1664  <vscale x 4 x i1>,
1665  iXLen,
1666  iXLen);
1667
1668define <vscale x 4 x i8> @intrinsic_vle_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1669; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i8_nxv4i8:
1670; CHECK:       # %bb.0: # %entry
1671; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
1672; CHECK-NEXT:    vle8.v v8, (a0), v0.t
1673; CHECK-NEXT:    ret
1674entry:
1675  %a = call <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8(
1676    <vscale x 4 x i8> %0,
1677    ptr %1,
1678    <vscale x 4 x i1> %2,
1679    iXLen %3, iXLen 1)
1680
1681  ret <vscale x 4 x i8> %a
1682}
1683
1684declare <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8(
1685  <vscale x 8 x i8>,
1686  ptr,
1687  iXLen);
1688
1689define <vscale x 8 x i8> @intrinsic_vle_v_nxv8i8_nxv8i8(ptr %0, iXLen %1) nounwind {
1690; CHECK-LABEL: intrinsic_vle_v_nxv8i8_nxv8i8:
1691; CHECK:       # %bb.0: # %entry
1692; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
1693; CHECK-NEXT:    vle8.v v8, (a0)
1694; CHECK-NEXT:    ret
1695entry:
1696  %a = call <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8(
1697    <vscale x 8 x i8> undef,
1698    ptr %0,
1699    iXLen %1)
1700
1701  ret <vscale x 8 x i8> %a
1702}
1703
1704declare <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8(
1705  <vscale x 8 x i8>,
1706  ptr,
1707  <vscale x 8 x i1>,
1708  iXLen,
1709  iXLen);
1710
1711define <vscale x 8 x i8> @intrinsic_vle_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1712; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i8_nxv8i8:
1713; CHECK:       # %bb.0: # %entry
1714; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
1715; CHECK-NEXT:    vle8.v v8, (a0), v0.t
1716; CHECK-NEXT:    ret
1717entry:
1718  %a = call <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8(
1719    <vscale x 8 x i8> %0,
1720    ptr %1,
1721    <vscale x 8 x i1> %2,
1722    iXLen %3, iXLen 1)
1723
1724  ret <vscale x 8 x i8> %a
1725}
1726
1727declare <vscale x 16 x i8> @llvm.riscv.vle.nxv16i8(
1728  <vscale x 16 x i8>,
1729  ptr,
1730  iXLen);
1731
1732define <vscale x 16 x i8> @intrinsic_vle_v_nxv16i8_nxv16i8(ptr %0, iXLen %1) nounwind {
1733; CHECK-LABEL: intrinsic_vle_v_nxv16i8_nxv16i8:
1734; CHECK:       # %bb.0: # %entry
1735; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
1736; CHECK-NEXT:    vle8.v v8, (a0)
1737; CHECK-NEXT:    ret
1738entry:
1739  %a = call <vscale x 16 x i8> @llvm.riscv.vle.nxv16i8(
1740    <vscale x 16 x i8> undef,
1741    ptr %0,
1742    iXLen %1)
1743
1744  ret <vscale x 16 x i8> %a
1745}
1746
1747declare <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8(
1748  <vscale x 16 x i8>,
1749  ptr,
1750  <vscale x 16 x i1>,
1751  iXLen,
1752  iXLen);
1753
1754define <vscale x 16 x i8> @intrinsic_vle_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1755; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i8_nxv16i8:
1756; CHECK:       # %bb.0: # %entry
1757; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
1758; CHECK-NEXT:    vle8.v v8, (a0), v0.t
1759; CHECK-NEXT:    ret
1760entry:
1761  %a = call <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8(
1762    <vscale x 16 x i8> %0,
1763    ptr %1,
1764    <vscale x 16 x i1> %2,
1765    iXLen %3, iXLen 1)
1766
1767  ret <vscale x 16 x i8> %a
1768}
1769
1770declare <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8(
1771  <vscale x 32 x i8>,
1772  ptr,
1773  iXLen);
1774
1775define <vscale x 32 x i8> @intrinsic_vle_v_nxv32i8_nxv32i8(ptr %0, iXLen %1) nounwind {
1776; CHECK-LABEL: intrinsic_vle_v_nxv32i8_nxv32i8:
1777; CHECK:       # %bb.0: # %entry
1778; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
1779; CHECK-NEXT:    vle8.v v8, (a0)
1780; CHECK-NEXT:    ret
1781entry:
1782  %a = call <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8(
1783    <vscale x 32 x i8> undef,
1784    ptr %0,
1785    iXLen %1)
1786
1787  ret <vscale x 32 x i8> %a
1788}
1789
1790declare <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8(
1791  <vscale x 32 x i8>,
1792  ptr,
1793  <vscale x 32 x i1>,
1794  iXLen,
1795  iXLen);
1796
1797define <vscale x 32 x i8> @intrinsic_vle_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1798; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i8_nxv32i8:
1799; CHECK:       # %bb.0: # %entry
1800; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
1801; CHECK-NEXT:    vle8.v v8, (a0), v0.t
1802; CHECK-NEXT:    ret
1803entry:
1804  %a = call <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8(
1805    <vscale x 32 x i8> %0,
1806    ptr %1,
1807    <vscale x 32 x i1> %2,
1808    iXLen %3, iXLen 1)
1809
1810  ret <vscale x 32 x i8> %a
1811}
1812
1813declare <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8(
1814  <vscale x 64 x i8>,
1815  ptr,
1816  iXLen);
1817
1818define <vscale x 64 x i8> @intrinsic_vle_v_nxv64i8_nxv64i8(ptr %0, iXLen %1) nounwind {
1819; CHECK-LABEL: intrinsic_vle_v_nxv64i8_nxv64i8:
1820; CHECK:       # %bb.0: # %entry
1821; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
1822; CHECK-NEXT:    vle8.v v8, (a0)
1823; CHECK-NEXT:    ret
1824entry:
1825  %a = call <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8(
1826    <vscale x 64 x i8> undef,
1827    ptr %0,
1828    iXLen %1)
1829
1830  ret <vscale x 64 x i8> %a
1831}
1832
1833declare <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8(
1834  <vscale x 64 x i8>,
1835  ptr,
1836  <vscale x 64 x i1>,
1837  iXLen,
1838  iXLen);
1839
1840define <vscale x 64 x i8> @intrinsic_vle_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
1841; CHECK-LABEL: intrinsic_vle_mask_v_nxv64i8_nxv64i8:
1842; CHECK:       # %bb.0: # %entry
1843; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
1844; CHECK-NEXT:    vle8.v v8, (a0), v0.t
1845; CHECK-NEXT:    ret
1846entry:
1847  %a = call <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8(
1848    <vscale x 64 x i8> %0,
1849    ptr %1,
1850    <vscale x 64 x i1> %2,
1851    iXLen %3, iXLen 1)
1852
1853  ret <vscale x 64 x i8> %a
1854}
1855