xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vluxei.ll (revision 84a3739ac072c95af9fa80e36d9e0f52d11e28eb)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
6
7declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32(
8  <vscale x 1 x i8>,
9  ptr,
10  <vscale x 1 x i32>,
11  iXLen);
12
13define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
14; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
17; CHECK-NEXT:    vluxei32.v v9, (a0), v8
18; CHECK-NEXT:    vmv1r.v v8, v9
19; CHECK-NEXT:    ret
20entry:
21  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32(
22    <vscale x 1 x i8> undef,
23    ptr %0,
24    <vscale x 1 x i32> %1,
25    iXLen %2)
26
27  ret <vscale x 1 x i8> %a
28}
29
30declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32(
31  <vscale x 1 x i8>,
32  ptr,
33  <vscale x 1 x i32>,
34  <vscale x 1 x i1>,
35  iXLen,
36  iXLen);
37
38define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
39; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
40; CHECK:       # %bb.0: # %entry
41; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
42; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
43; CHECK-NEXT:    ret
44entry:
45  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32(
46    <vscale x 1 x i8> %0,
47    ptr %1,
48    <vscale x 1 x i32> %2,
49    <vscale x 1 x i1> %3,
50    iXLen %4, iXLen 1)
51
52  ret <vscale x 1 x i8> %a
53}
54
55declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32(
56  <vscale x 2 x i8>,
57  ptr,
58  <vscale x 2 x i32>,
59  iXLen);
60
61define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
62; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32:
63; CHECK:       # %bb.0: # %entry
64; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
65; CHECK-NEXT:    vluxei32.v v9, (a0), v8
66; CHECK-NEXT:    vmv1r.v v8, v9
67; CHECK-NEXT:    ret
68entry:
69  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32(
70    <vscale x 2 x i8> undef,
71    ptr %0,
72    <vscale x 2 x i32> %1,
73    iXLen %2)
74
75  ret <vscale x 2 x i8> %a
76}
77
78declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32(
79  <vscale x 2 x i8>,
80  ptr,
81  <vscale x 2 x i32>,
82  <vscale x 2 x i1>,
83  iXLen,
84  iXLen);
85
86define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
87; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
88; CHECK:       # %bb.0: # %entry
89; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
90; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
91; CHECK-NEXT:    ret
92entry:
93  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32(
94    <vscale x 2 x i8> %0,
95    ptr %1,
96    <vscale x 2 x i32> %2,
97    <vscale x 2 x i1> %3,
98    iXLen %4, iXLen 1)
99
100  ret <vscale x 2 x i8> %a
101}
102
103declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32(
104  <vscale x 4 x i8>,
105  ptr,
106  <vscale x 4 x i32>,
107  iXLen);
108
109define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
110; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32:
111; CHECK:       # %bb.0: # %entry
112; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
113; CHECK-NEXT:    vluxei32.v v10, (a0), v8
114; CHECK-NEXT:    vmv1r.v v8, v10
115; CHECK-NEXT:    ret
116entry:
117  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32(
118    <vscale x 4 x i8> undef,
119    ptr %0,
120    <vscale x 4 x i32> %1,
121    iXLen %2)
122
123  ret <vscale x 4 x i8> %a
124}
125
126declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32(
127  <vscale x 4 x i8>,
128  ptr,
129  <vscale x 4 x i32>,
130  <vscale x 4 x i1>,
131  iXLen,
132  iXLen);
133
134define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
135; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
136; CHECK:       # %bb.0: # %entry
137; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
138; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
139; CHECK-NEXT:    ret
140entry:
141  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32(
142    <vscale x 4 x i8> %0,
143    ptr %1,
144    <vscale x 4 x i32> %2,
145    <vscale x 4 x i1> %3,
146    iXLen %4, iXLen 1)
147
148  ret <vscale x 4 x i8> %a
149}
150
151declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32(
152  <vscale x 8 x i8>,
153  ptr,
154  <vscale x 8 x i32>,
155  iXLen);
156
157define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
158; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32:
159; CHECK:       # %bb.0: # %entry
160; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
161; CHECK-NEXT:    vluxei32.v v12, (a0), v8
162; CHECK-NEXT:    vmv.v.v v8, v12
163; CHECK-NEXT:    ret
164entry:
165  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32(
166    <vscale x 8 x i8> undef,
167    ptr %0,
168    <vscale x 8 x i32> %1,
169    iXLen %2)
170
171  ret <vscale x 8 x i8> %a
172}
173
174declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32(
175  <vscale x 8 x i8>,
176  ptr,
177  <vscale x 8 x i32>,
178  <vscale x 8 x i1>,
179  iXLen,
180  iXLen);
181
182define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
183; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
184; CHECK:       # %bb.0: # %entry
185; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
186; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
187; CHECK-NEXT:    ret
188entry:
189  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32(
190    <vscale x 8 x i8> %0,
191    ptr %1,
192    <vscale x 8 x i32> %2,
193    <vscale x 8 x i1> %3,
194    iXLen %4, iXLen 1)
195
196  ret <vscale x 8 x i8> %a
197}
198
199declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32(
200  <vscale x 16 x i8>,
201  ptr,
202  <vscale x 16 x i32>,
203  iXLen);
204
205define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
206; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32:
207; CHECK:       # %bb.0: # %entry
208; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
209; CHECK-NEXT:    vluxei32.v v16, (a0), v8
210; CHECK-NEXT:    vmv.v.v v8, v16
211; CHECK-NEXT:    ret
212entry:
213  %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32(
214    <vscale x 16 x i8> undef,
215    ptr %0,
216    <vscale x 16 x i32> %1,
217    iXLen %2)
218
219  ret <vscale x 16 x i8> %a
220}
221
222declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32(
223  <vscale x 16 x i8>,
224  ptr,
225  <vscale x 16 x i32>,
226  <vscale x 16 x i1>,
227  iXLen,
228  iXLen);
229
230define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
231; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
232; CHECK:       # %bb.0: # %entry
233; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
234; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
235; CHECK-NEXT:    ret
236entry:
237  %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32(
238    <vscale x 16 x i8> %0,
239    ptr %1,
240    <vscale x 16 x i32> %2,
241    <vscale x 16 x i1> %3,
242    iXLen %4, iXLen 1)
243
244  ret <vscale x 16 x i8> %a
245}
246
247declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32(
248  <vscale x 1 x i16>,
249  ptr,
250  <vscale x 1 x i32>,
251  iXLen);
252
253define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
254; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32:
255; CHECK:       # %bb.0: # %entry
256; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
257; CHECK-NEXT:    vluxei32.v v9, (a0), v8
258; CHECK-NEXT:    vmv1r.v v8, v9
259; CHECK-NEXT:    ret
260entry:
261  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32(
262    <vscale x 1 x i16> undef,
263    ptr %0,
264    <vscale x 1 x i32> %1,
265    iXLen %2)
266
267  ret <vscale x 1 x i16> %a
268}
269
270declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32(
271  <vscale x 1 x i16>,
272  ptr,
273  <vscale x 1 x i32>,
274  <vscale x 1 x i1>,
275  iXLen,
276  iXLen);
277
278define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
279; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
280; CHECK:       # %bb.0: # %entry
281; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
282; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
283; CHECK-NEXT:    ret
284entry:
285  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32(
286    <vscale x 1 x i16> %0,
287    ptr %1,
288    <vscale x 1 x i32> %2,
289    <vscale x 1 x i1> %3,
290    iXLen %4, iXLen 1)
291
292  ret <vscale x 1 x i16> %a
293}
294
295declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32(
296  <vscale x 2 x i16>,
297  ptr,
298  <vscale x 2 x i32>,
299  iXLen);
300
301define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
302; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32:
303; CHECK:       # %bb.0: # %entry
304; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
305; CHECK-NEXT:    vluxei32.v v9, (a0), v8
306; CHECK-NEXT:    vmv1r.v v8, v9
307; CHECK-NEXT:    ret
308entry:
309  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32(
310    <vscale x 2 x i16> undef,
311    ptr %0,
312    <vscale x 2 x i32> %1,
313    iXLen %2)
314
315  ret <vscale x 2 x i16> %a
316}
317
318declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32(
319  <vscale x 2 x i16>,
320  ptr,
321  <vscale x 2 x i32>,
322  <vscale x 2 x i1>,
323  iXLen,
324  iXLen);
325
326define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
327; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
328; CHECK:       # %bb.0: # %entry
329; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
330; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
331; CHECK-NEXT:    ret
332entry:
333  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32(
334    <vscale x 2 x i16> %0,
335    ptr %1,
336    <vscale x 2 x i32> %2,
337    <vscale x 2 x i1> %3,
338    iXLen %4, iXLen 1)
339
340  ret <vscale x 2 x i16> %a
341}
342
343declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32(
344  <vscale x 4 x i16>,
345  ptr,
346  <vscale x 4 x i32>,
347  iXLen);
348
349define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
350; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32:
351; CHECK:       # %bb.0: # %entry
352; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
353; CHECK-NEXT:    vluxei32.v v10, (a0), v8
354; CHECK-NEXT:    vmv.v.v v8, v10
355; CHECK-NEXT:    ret
356entry:
357  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32(
358    <vscale x 4 x i16> undef,
359    ptr %0,
360    <vscale x 4 x i32> %1,
361    iXLen %2)
362
363  ret <vscale x 4 x i16> %a
364}
365
366declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32(
367  <vscale x 4 x i16>,
368  ptr,
369  <vscale x 4 x i32>,
370  <vscale x 4 x i1>,
371  iXLen,
372  iXLen);
373
374define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
375; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
376; CHECK:       # %bb.0: # %entry
377; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
378; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
379; CHECK-NEXT:    ret
380entry:
381  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32(
382    <vscale x 4 x i16> %0,
383    ptr %1,
384    <vscale x 4 x i32> %2,
385    <vscale x 4 x i1> %3,
386    iXLen %4, iXLen 1)
387
388  ret <vscale x 4 x i16> %a
389}
390
391declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32(
392  <vscale x 8 x i16>,
393  ptr,
394  <vscale x 8 x i32>,
395  iXLen);
396
397define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
398; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32:
399; CHECK:       # %bb.0: # %entry
400; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
401; CHECK-NEXT:    vluxei32.v v12, (a0), v8
402; CHECK-NEXT:    vmv.v.v v8, v12
403; CHECK-NEXT:    ret
404entry:
405  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32(
406    <vscale x 8 x i16> undef,
407    ptr %0,
408    <vscale x 8 x i32> %1,
409    iXLen %2)
410
411  ret <vscale x 8 x i16> %a
412}
413
414declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32(
415  <vscale x 8 x i16>,
416  ptr,
417  <vscale x 8 x i32>,
418  <vscale x 8 x i1>,
419  iXLen,
420  iXLen);
421
422define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
423; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
424; CHECK:       # %bb.0: # %entry
425; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
426; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
427; CHECK-NEXT:    ret
428entry:
429  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32(
430    <vscale x 8 x i16> %0,
431    ptr %1,
432    <vscale x 8 x i32> %2,
433    <vscale x 8 x i1> %3,
434    iXLen %4, iXLen 1)
435
436  ret <vscale x 8 x i16> %a
437}
438
439declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32(
440  <vscale x 16 x i16>,
441  ptr,
442  <vscale x 16 x i32>,
443  iXLen);
444
445define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
446; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32:
447; CHECK:       # %bb.0: # %entry
448; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
449; CHECK-NEXT:    vluxei32.v v16, (a0), v8
450; CHECK-NEXT:    vmv.v.v v8, v16
451; CHECK-NEXT:    ret
452entry:
453  %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32(
454    <vscale x 16 x i16> undef,
455    ptr %0,
456    <vscale x 16 x i32> %1,
457    iXLen %2)
458
459  ret <vscale x 16 x i16> %a
460}
461
462declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32(
463  <vscale x 16 x i16>,
464  ptr,
465  <vscale x 16 x i32>,
466  <vscale x 16 x i1>,
467  iXLen,
468  iXLen);
469
470define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
471; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
472; CHECK:       # %bb.0: # %entry
473; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
474; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
475; CHECK-NEXT:    ret
476entry:
477  %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32(
478    <vscale x 16 x i16> %0,
479    ptr %1,
480    <vscale x 16 x i32> %2,
481    <vscale x 16 x i1> %3,
482    iXLen %4, iXLen 1)
483
484  ret <vscale x 16 x i16> %a
485}
486
487declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32(
488  <vscale x 1 x i32>,
489  ptr,
490  <vscale x 1 x i32>,
491  iXLen);
492
493define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
494; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32:
495; CHECK:       # %bb.0: # %entry
496; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
497; CHECK-NEXT:    vluxei32.v v8, (a0), v8
498; CHECK-NEXT:    ret
499entry:
500  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32(
501    <vscale x 1 x i32> undef,
502    ptr %0,
503    <vscale x 1 x i32> %1,
504    iXLen %2)
505
506  ret <vscale x 1 x i32> %a
507}
508
509declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32(
510  <vscale x 1 x i32>,
511  ptr,
512  <vscale x 1 x i32>,
513  <vscale x 1 x i1>,
514  iXLen,
515  iXLen);
516
517define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
518; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
519; CHECK:       # %bb.0: # %entry
520; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
521; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
522; CHECK-NEXT:    ret
523entry:
524  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32(
525    <vscale x 1 x i32> %0,
526    ptr %1,
527    <vscale x 1 x i32> %2,
528    <vscale x 1 x i1> %3,
529    iXLen %4, iXLen 1)
530
531  ret <vscale x 1 x i32> %a
532}
533
534declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32(
535  <vscale x 2 x i32>,
536  ptr,
537  <vscale x 2 x i32>,
538  iXLen);
539
540define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
541; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32:
542; CHECK:       # %bb.0: # %entry
543; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
544; CHECK-NEXT:    vluxei32.v v8, (a0), v8
545; CHECK-NEXT:    ret
546entry:
547  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32(
548    <vscale x 2 x i32> undef,
549    ptr %0,
550    <vscale x 2 x i32> %1,
551    iXLen %2)
552
553  ret <vscale x 2 x i32> %a
554}
555
556declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32(
557  <vscale x 2 x i32>,
558  ptr,
559  <vscale x 2 x i32>,
560  <vscale x 2 x i1>,
561  iXLen,
562  iXLen);
563
564define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
565; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
566; CHECK:       # %bb.0: # %entry
567; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
568; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
569; CHECK-NEXT:    ret
570entry:
571  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32(
572    <vscale x 2 x i32> %0,
573    ptr %1,
574    <vscale x 2 x i32> %2,
575    <vscale x 2 x i1> %3,
576    iXLen %4, iXLen 1)
577
578  ret <vscale x 2 x i32> %a
579}
580
581declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32(
582  <vscale x 4 x i32>,
583  ptr,
584  <vscale x 4 x i32>,
585  iXLen);
586
587define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
588; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32:
589; CHECK:       # %bb.0: # %entry
590; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
591; CHECK-NEXT:    vluxei32.v v8, (a0), v8
592; CHECK-NEXT:    ret
593entry:
594  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32(
595    <vscale x 4 x i32> undef,
596    ptr %0,
597    <vscale x 4 x i32> %1,
598    iXLen %2)
599
600  ret <vscale x 4 x i32> %a
601}
602
603declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32(
604  <vscale x 4 x i32>,
605  ptr,
606  <vscale x 4 x i32>,
607  <vscale x 4 x i1>,
608  iXLen,
609  iXLen);
610
611define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
612; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
613; CHECK:       # %bb.0: # %entry
614; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
615; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
616; CHECK-NEXT:    ret
617entry:
618  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32(
619    <vscale x 4 x i32> %0,
620    ptr %1,
621    <vscale x 4 x i32> %2,
622    <vscale x 4 x i1> %3,
623    iXLen %4, iXLen 1)
624
625  ret <vscale x 4 x i32> %a
626}
627
628declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32(
629  <vscale x 8 x i32>,
630  ptr,
631  <vscale x 8 x i32>,
632  iXLen);
633
634define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
635; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32:
636; CHECK:       # %bb.0: # %entry
637; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
638; CHECK-NEXT:    vluxei32.v v8, (a0), v8
639; CHECK-NEXT:    ret
640entry:
641  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32(
642    <vscale x 8 x i32> undef,
643    ptr %0,
644    <vscale x 8 x i32> %1,
645    iXLen %2)
646
647  ret <vscale x 8 x i32> %a
648}
649
650declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32(
651  <vscale x 8 x i32>,
652  ptr,
653  <vscale x 8 x i32>,
654  <vscale x 8 x i1>,
655  iXLen,
656  iXLen);
657
658define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
659; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
660; CHECK:       # %bb.0: # %entry
661; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
662; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
663; CHECK-NEXT:    ret
664entry:
665  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32(
666    <vscale x 8 x i32> %0,
667    ptr %1,
668    <vscale x 8 x i32> %2,
669    <vscale x 8 x i1> %3,
670    iXLen %4, iXLen 1)
671
672  ret <vscale x 8 x i32> %a
673}
674
675declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32(
676  <vscale x 16 x i32>,
677  ptr,
678  <vscale x 16 x i32>,
679  iXLen);
680
681define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
682; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32:
683; CHECK:       # %bb.0: # %entry
684; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
685; CHECK-NEXT:    vluxei32.v v8, (a0), v8
686; CHECK-NEXT:    ret
687entry:
688  %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32(
689    <vscale x 16 x i32> undef,
690    ptr %0,
691    <vscale x 16 x i32> %1,
692    iXLen %2)
693
694  ret <vscale x 16 x i32> %a
695}
696
697declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32(
698  <vscale x 16 x i32>,
699  ptr,
700  <vscale x 16 x i32>,
701  <vscale x 16 x i1>,
702  iXLen,
703  iXLen);
704
705define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
706; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
707; CHECK:       # %bb.0: # %entry
708; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
709; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
710; CHECK-NEXT:    ret
711entry:
712  %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32(
713    <vscale x 16 x i32> %0,
714    ptr %1,
715    <vscale x 16 x i32> %2,
716    <vscale x 16 x i1> %3,
717    iXLen %4, iXLen 1)
718
719  ret <vscale x 16 x i32> %a
720}
721
722declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32(
723  <vscale x 1 x i64>,
724  ptr,
725  <vscale x 1 x i32>,
726  iXLen);
727
728define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
729; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32:
730; CHECK:       # %bb.0: # %entry
731; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
732; CHECK-NEXT:    vluxei32.v v9, (a0), v8
733; CHECK-NEXT:    vmv.v.v v8, v9
734; CHECK-NEXT:    ret
735entry:
736  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32(
737    <vscale x 1 x i64> undef,
738    ptr %0,
739    <vscale x 1 x i32> %1,
740    iXLen %2)
741
742  ret <vscale x 1 x i64> %a
743}
744
745declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32(
746  <vscale x 1 x i64>,
747  ptr,
748  <vscale x 1 x i32>,
749  <vscale x 1 x i1>,
750  iXLen,
751  iXLen);
752
753define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
754; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
755; CHECK:       # %bb.0: # %entry
756; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
757; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
758; CHECK-NEXT:    ret
759entry:
760  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32(
761    <vscale x 1 x i64> %0,
762    ptr %1,
763    <vscale x 1 x i32> %2,
764    <vscale x 1 x i1> %3,
765    iXLen %4, iXLen 1)
766
767  ret <vscale x 1 x i64> %a
768}
769
770declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32(
771  <vscale x 2 x i64>,
772  ptr,
773  <vscale x 2 x i32>,
774  iXLen);
775
776define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
777; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32:
778; CHECK:       # %bb.0: # %entry
779; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
780; CHECK-NEXT:    vluxei32.v v10, (a0), v8
781; CHECK-NEXT:    vmv.v.v v8, v10
782; CHECK-NEXT:    ret
783entry:
784  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32(
785    <vscale x 2 x i64> undef,
786    ptr %0,
787    <vscale x 2 x i32> %1,
788    iXLen %2)
789
790  ret <vscale x 2 x i64> %a
791}
792
793declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32(
794  <vscale x 2 x i64>,
795  ptr,
796  <vscale x 2 x i32>,
797  <vscale x 2 x i1>,
798  iXLen,
799  iXLen);
800
801define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
802; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
803; CHECK:       # %bb.0: # %entry
804; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
805; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
806; CHECK-NEXT:    ret
807entry:
808  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32(
809    <vscale x 2 x i64> %0,
810    ptr %1,
811    <vscale x 2 x i32> %2,
812    <vscale x 2 x i1> %3,
813    iXLen %4, iXLen 1)
814
815  ret <vscale x 2 x i64> %a
816}
817
818declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32(
819  <vscale x 4 x i64>,
820  ptr,
821  <vscale x 4 x i32>,
822  iXLen);
823
824define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
825; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32:
826; CHECK:       # %bb.0: # %entry
827; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
828; CHECK-NEXT:    vluxei32.v v12, (a0), v8
829; CHECK-NEXT:    vmv.v.v v8, v12
830; CHECK-NEXT:    ret
831entry:
832  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32(
833    <vscale x 4 x i64> undef,
834    ptr %0,
835    <vscale x 4 x i32> %1,
836    iXLen %2)
837
838  ret <vscale x 4 x i64> %a
839}
840
841declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32(
842  <vscale x 4 x i64>,
843  ptr,
844  <vscale x 4 x i32>,
845  <vscale x 4 x i1>,
846  iXLen,
847  iXLen);
848
849define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
850; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
851; CHECK:       # %bb.0: # %entry
852; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
853; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
854; CHECK-NEXT:    ret
855entry:
856  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32(
857    <vscale x 4 x i64> %0,
858    ptr %1,
859    <vscale x 4 x i32> %2,
860    <vscale x 4 x i1> %3,
861    iXLen %4, iXLen 1)
862
863  ret <vscale x 4 x i64> %a
864}
865
866declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32(
867  <vscale x 8 x i64>,
868  ptr,
869  <vscale x 8 x i32>,
870  iXLen);
871
872define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
873; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32:
874; CHECK:       # %bb.0: # %entry
875; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
876; CHECK-NEXT:    vluxei32.v v16, (a0), v8
877; CHECK-NEXT:    vmv.v.v v8, v16
878; CHECK-NEXT:    ret
879entry:
880  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32(
881    <vscale x 8 x i64> undef,
882    ptr %0,
883    <vscale x 8 x i32> %1,
884    iXLen %2)
885
886  ret <vscale x 8 x i64> %a
887}
888
889declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32(
890  <vscale x 8 x i64>,
891  ptr,
892  <vscale x 8 x i32>,
893  <vscale x 8 x i1>,
894  iXLen,
895  iXLen);
896
897define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
898; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
899; CHECK:       # %bb.0: # %entry
900; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
901; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
902; CHECK-NEXT:    ret
903entry:
904  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32(
905    <vscale x 8 x i64> %0,
906    ptr %1,
907    <vscale x 8 x i32> %2,
908    <vscale x 8 x i1> %3,
909    iXLen %4, iXLen 1)
910
911  ret <vscale x 8 x i64> %a
912}
913
914declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i32(
915  <vscale x 1 x half>,
916  ptr,
917  <vscale x 1 x i32>,
918  iXLen);
919
920define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
921; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32:
922; CHECK:       # %bb.0: # %entry
923; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
924; CHECK-NEXT:    vluxei32.v v9, (a0), v8
925; CHECK-NEXT:    vmv1r.v v8, v9
926; CHECK-NEXT:    ret
927entry:
928  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i32(
929    <vscale x 1 x half> undef,
930    ptr %0,
931    <vscale x 1 x i32> %1,
932    iXLen %2)
933
934  ret <vscale x 1 x half> %a
935}
936
937declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32(
938  <vscale x 1 x half>,
939  ptr,
940  <vscale x 1 x i32>,
941  <vscale x 1 x i1>,
942  iXLen,
943  iXLen);
944
945define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
946; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
947; CHECK:       # %bb.0: # %entry
948; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
949; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
950; CHECK-NEXT:    ret
951entry:
952  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32(
953    <vscale x 1 x half> %0,
954    ptr %1,
955    <vscale x 1 x i32> %2,
956    <vscale x 1 x i1> %3,
957    iXLen %4, iXLen 1)
958
959  ret <vscale x 1 x half> %a
960}
961
962declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i32(
963  <vscale x 2 x half>,
964  ptr,
965  <vscale x 2 x i32>,
966  iXLen);
967
968define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
969; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32:
970; CHECK:       # %bb.0: # %entry
971; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
972; CHECK-NEXT:    vluxei32.v v9, (a0), v8
973; CHECK-NEXT:    vmv1r.v v8, v9
974; CHECK-NEXT:    ret
975entry:
976  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i32(
977    <vscale x 2 x half> undef,
978    ptr %0,
979    <vscale x 2 x i32> %1,
980    iXLen %2)
981
982  ret <vscale x 2 x half> %a
983}
984
985declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32(
986  <vscale x 2 x half>,
987  ptr,
988  <vscale x 2 x i32>,
989  <vscale x 2 x i1>,
990  iXLen,
991  iXLen);
992
993define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
994; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
995; CHECK:       # %bb.0: # %entry
996; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
997; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
998; CHECK-NEXT:    ret
999entry:
1000  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32(
1001    <vscale x 2 x half> %0,
1002    ptr %1,
1003    <vscale x 2 x i32> %2,
1004    <vscale x 2 x i1> %3,
1005    iXLen %4, iXLen 1)
1006
1007  ret <vscale x 2 x half> %a
1008}
1009
1010declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i32(
1011  <vscale x 4 x half>,
1012  ptr,
1013  <vscale x 4 x i32>,
1014  iXLen);
1015
1016define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
1017; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32:
1018; CHECK:       # %bb.0: # %entry
1019; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1020; CHECK-NEXT:    vluxei32.v v10, (a0), v8
1021; CHECK-NEXT:    vmv.v.v v8, v10
1022; CHECK-NEXT:    ret
1023entry:
1024  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i32(
1025    <vscale x 4 x half> undef,
1026    ptr %0,
1027    <vscale x 4 x i32> %1,
1028    iXLen %2)
1029
1030  ret <vscale x 4 x half> %a
1031}
1032
1033declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32(
1034  <vscale x 4 x half>,
1035  ptr,
1036  <vscale x 4 x i32>,
1037  <vscale x 4 x i1>,
1038  iXLen,
1039  iXLen);
1040
1041define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1042; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
1043; CHECK:       # %bb.0: # %entry
1044; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1045; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
1046; CHECK-NEXT:    ret
1047entry:
1048  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32(
1049    <vscale x 4 x half> %0,
1050    ptr %1,
1051    <vscale x 4 x i32> %2,
1052    <vscale x 4 x i1> %3,
1053    iXLen %4, iXLen 1)
1054
1055  ret <vscale x 4 x half> %a
1056}
1057
1058declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i32(
1059  <vscale x 8 x half>,
1060  ptr,
1061  <vscale x 8 x i32>,
1062  iXLen);
1063
1064define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
1065; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32:
1066; CHECK:       # %bb.0: # %entry
1067; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1068; CHECK-NEXT:    vluxei32.v v12, (a0), v8
1069; CHECK-NEXT:    vmv.v.v v8, v12
1070; CHECK-NEXT:    ret
1071entry:
1072  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i32(
1073    <vscale x 8 x half> undef,
1074    ptr %0,
1075    <vscale x 8 x i32> %1,
1076    iXLen %2)
1077
1078  ret <vscale x 8 x half> %a
1079}
1080
1081declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32(
1082  <vscale x 8 x half>,
1083  ptr,
1084  <vscale x 8 x i32>,
1085  <vscale x 8 x i1>,
1086  iXLen,
1087  iXLen);
1088
1089define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1090; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
1091; CHECK:       # %bb.0: # %entry
1092; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1093; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
1094; CHECK-NEXT:    ret
1095entry:
1096  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32(
1097    <vscale x 8 x half> %0,
1098    ptr %1,
1099    <vscale x 8 x i32> %2,
1100    <vscale x 8 x i1> %3,
1101    iXLen %4, iXLen 1)
1102
1103  ret <vscale x 8 x half> %a
1104}
1105
1106declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i32(
1107  <vscale x 16 x half>,
1108  ptr,
1109  <vscale x 16 x i32>,
1110  iXLen);
1111
1112define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
1113; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32:
1114; CHECK:       # %bb.0: # %entry
1115; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
1116; CHECK-NEXT:    vluxei32.v v16, (a0), v8
1117; CHECK-NEXT:    vmv.v.v v8, v16
1118; CHECK-NEXT:    ret
1119entry:
1120  %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i32(
1121    <vscale x 16 x half> undef,
1122    ptr %0,
1123    <vscale x 16 x i32> %1,
1124    iXLen %2)
1125
1126  ret <vscale x 16 x half> %a
1127}
1128
1129declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32(
1130  <vscale x 16 x half>,
1131  ptr,
1132  <vscale x 16 x i32>,
1133  <vscale x 16 x i1>,
1134  iXLen,
1135  iXLen);
1136
1137define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1138; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
1139; CHECK:       # %bb.0: # %entry
1140; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1141; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
1142; CHECK-NEXT:    ret
1143entry:
1144  %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32(
1145    <vscale x 16 x half> %0,
1146    ptr %1,
1147    <vscale x 16 x i32> %2,
1148    <vscale x 16 x i1> %3,
1149    iXLen %4, iXLen 1)
1150
1151  ret <vscale x 16 x half> %a
1152}
1153
1154declare <vscale x 1 x bfloat> @llvm.riscv.vluxei.nxv1bf16.nxv1i32(
1155  <vscale x 1 x bfloat>,
1156  ptr,
1157  <vscale x 1 x i32>,
1158  iXLen);
1159
1160define <vscale x 1 x bfloat> @intrinsic_vluxei_v_nxv1bf16_nxv1bf16_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
1161; CHECK-LABEL: intrinsic_vluxei_v_nxv1bf16_nxv1bf16_nxv1i32:
1162; CHECK:       # %bb.0: # %entry
1163; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1164; CHECK-NEXT:    vluxei32.v v9, (a0), v8
1165; CHECK-NEXT:    vmv1r.v v8, v9
1166; CHECK-NEXT:    ret
1167entry:
1168  %a = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.nxv1bf16.nxv1i32(
1169    <vscale x 1 x bfloat> undef,
1170    ptr %0,
1171    <vscale x 1 x i32> %1,
1172    iXLen %2)
1173
1174  ret <vscale x 1 x bfloat> %a
1175}
1176
1177declare <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.nxv1i32(
1178  <vscale x 1 x bfloat>,
1179  ptr,
1180  <vscale x 1 x i32>,
1181  <vscale x 1 x i1>,
1182  iXLen,
1183  iXLen);
1184
1185define <vscale x 1 x bfloat> @intrinsic_vluxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1186; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32:
1187; CHECK:       # %bb.0: # %entry
1188; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1189; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
1190; CHECK-NEXT:    ret
1191entry:
1192  %a = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.nxv1i32(
1193    <vscale x 1 x bfloat> %0,
1194    ptr %1,
1195    <vscale x 1 x i32> %2,
1196    <vscale x 1 x i1> %3,
1197    iXLen %4, iXLen 1)
1198
1199  ret <vscale x 1 x bfloat> %a
1200}
1201
1202declare <vscale x 2 x bfloat> @llvm.riscv.vluxei.nxv2bf16.nxv2i32(
1203  <vscale x 2 x bfloat>,
1204  ptr,
1205  <vscale x 2 x i32>,
1206  iXLen);
1207
1208define <vscale x 2 x bfloat> @intrinsic_vluxei_v_nxv2bf16_nxv2bf16_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
1209; CHECK-LABEL: intrinsic_vluxei_v_nxv2bf16_nxv2bf16_nxv2i32:
1210; CHECK:       # %bb.0: # %entry
1211; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1212; CHECK-NEXT:    vluxei32.v v9, (a0), v8
1213; CHECK-NEXT:    vmv1r.v v8, v9
1214; CHECK-NEXT:    ret
1215entry:
1216  %a = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.nxv2bf16.nxv2i32(
1217    <vscale x 2 x bfloat> undef,
1218    ptr %0,
1219    <vscale x 2 x i32> %1,
1220    iXLen %2)
1221
1222  ret <vscale x 2 x bfloat> %a
1223}
1224
1225declare <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.nxv2i32(
1226  <vscale x 2 x bfloat>,
1227  ptr,
1228  <vscale x 2 x i32>,
1229  <vscale x 2 x i1>,
1230  iXLen,
1231  iXLen);
1232
1233define <vscale x 2 x bfloat> @intrinsic_vluxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1234; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32:
1235; CHECK:       # %bb.0: # %entry
1236; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1237; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
1238; CHECK-NEXT:    ret
1239entry:
1240  %a = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.nxv2i32(
1241    <vscale x 2 x bfloat> %0,
1242    ptr %1,
1243    <vscale x 2 x i32> %2,
1244    <vscale x 2 x i1> %3,
1245    iXLen %4, iXLen 1)
1246
1247  ret <vscale x 2 x bfloat> %a
1248}
1249
1250declare <vscale x 4 x bfloat> @llvm.riscv.vluxei.nxv4bf16.nxv4i32(
1251  <vscale x 4 x bfloat>,
1252  ptr,
1253  <vscale x 4 x i32>,
1254  iXLen);
1255
1256define <vscale x 4 x bfloat> @intrinsic_vluxei_v_nxv4bf16_nxv4bf16_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
1257; CHECK-LABEL: intrinsic_vluxei_v_nxv4bf16_nxv4bf16_nxv4i32:
1258; CHECK:       # %bb.0: # %entry
1259; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1260; CHECK-NEXT:    vluxei32.v v10, (a0), v8
1261; CHECK-NEXT:    vmv.v.v v8, v10
1262; CHECK-NEXT:    ret
1263entry:
1264  %a = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.nxv4bf16.nxv4i32(
1265    <vscale x 4 x bfloat> undef,
1266    ptr %0,
1267    <vscale x 4 x i32> %1,
1268    iXLen %2)
1269
1270  ret <vscale x 4 x bfloat> %a
1271}
1272
1273declare <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.nxv4i32(
1274  <vscale x 4 x bfloat>,
1275  ptr,
1276  <vscale x 4 x i32>,
1277  <vscale x 4 x i1>,
1278  iXLen,
1279  iXLen);
1280
1281define <vscale x 4 x bfloat> @intrinsic_vluxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1282; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32:
1283; CHECK:       # %bb.0: # %entry
1284; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1285; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
1286; CHECK-NEXT:    ret
1287entry:
1288  %a = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.nxv4i32(
1289    <vscale x 4 x bfloat> %0,
1290    ptr %1,
1291    <vscale x 4 x i32> %2,
1292    <vscale x 4 x i1> %3,
1293    iXLen %4, iXLen 1)
1294
1295  ret <vscale x 4 x bfloat> %a
1296}
1297
1298declare <vscale x 8 x bfloat> @llvm.riscv.vluxei.nxv8bf16.nxv8i32(
1299  <vscale x 8 x bfloat>,
1300  ptr,
1301  <vscale x 8 x i32>,
1302  iXLen);
1303
1304define <vscale x 8 x bfloat> @intrinsic_vluxei_v_nxv8bf16_nxv8bf16_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
1305; CHECK-LABEL: intrinsic_vluxei_v_nxv8bf16_nxv8bf16_nxv8i32:
1306; CHECK:       # %bb.0: # %entry
1307; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1308; CHECK-NEXT:    vluxei32.v v12, (a0), v8
1309; CHECK-NEXT:    vmv.v.v v8, v12
1310; CHECK-NEXT:    ret
1311entry:
1312  %a = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.nxv8bf16.nxv8i32(
1313    <vscale x 8 x bfloat> undef,
1314    ptr %0,
1315    <vscale x 8 x i32> %1,
1316    iXLen %2)
1317
1318  ret <vscale x 8 x bfloat> %a
1319}
1320
1321declare <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.nxv8i32(
1322  <vscale x 8 x bfloat>,
1323  ptr,
1324  <vscale x 8 x i32>,
1325  <vscale x 8 x i1>,
1326  iXLen,
1327  iXLen);
1328
1329define <vscale x 8 x bfloat> @intrinsic_vluxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1330; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32:
1331; CHECK:       # %bb.0: # %entry
1332; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1333; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
1334; CHECK-NEXT:    ret
1335entry:
1336  %a = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.nxv8i32(
1337    <vscale x 8 x bfloat> %0,
1338    ptr %1,
1339    <vscale x 8 x i32> %2,
1340    <vscale x 8 x i1> %3,
1341    iXLen %4, iXLen 1)
1342
1343  ret <vscale x 8 x bfloat> %a
1344}
1345
1346declare <vscale x 16 x bfloat> @llvm.riscv.vluxei.nxv16bf16.nxv16i32(
1347  <vscale x 16 x bfloat>,
1348  ptr,
1349  <vscale x 16 x i32>,
1350  iXLen);
1351
1352define <vscale x 16 x bfloat> @intrinsic_vluxei_v_nxv16bf16_nxv16bf16_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
1353; CHECK-LABEL: intrinsic_vluxei_v_nxv16bf16_nxv16bf16_nxv16i32:
1354; CHECK:       # %bb.0: # %entry
1355; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
1356; CHECK-NEXT:    vluxei32.v v16, (a0), v8
1357; CHECK-NEXT:    vmv.v.v v8, v16
1358; CHECK-NEXT:    ret
1359entry:
1360  %a = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.nxv16bf16.nxv16i32(
1361    <vscale x 16 x bfloat> undef,
1362    ptr %0,
1363    <vscale x 16 x i32> %1,
1364    iXLen %2)
1365
1366  ret <vscale x 16 x bfloat> %a
1367}
1368
1369declare <vscale x 16 x bfloat> @llvm.riscv.vluxei.mask.nxv16bf16.nxv16i32(
1370  <vscale x 16 x bfloat>,
1371  ptr,
1372  <vscale x 16 x i32>,
1373  <vscale x 16 x i1>,
1374  iXLen,
1375  iXLen);
1376
1377define <vscale x 16 x bfloat> @intrinsic_vluxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32(<vscale x 16 x bfloat> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1378; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32:
1379; CHECK:       # %bb.0: # %entry
1380; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1381; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
1382; CHECK-NEXT:    ret
1383entry:
1384  %a = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.mask.nxv16bf16.nxv16i32(
1385    <vscale x 16 x bfloat> %0,
1386    ptr %1,
1387    <vscale x 16 x i32> %2,
1388    <vscale x 16 x i1> %3,
1389    iXLen %4, iXLen 1)
1390
1391  ret <vscale x 16 x bfloat> %a
1392}
1393
1394declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32(
1395  <vscale x 1 x float>,
1396  ptr,
1397  <vscale x 1 x i32>,
1398  iXLen);
1399
1400define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
1401; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32:
1402; CHECK:       # %bb.0: # %entry
1403; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1404; CHECK-NEXT:    vluxei32.v v8, (a0), v8
1405; CHECK-NEXT:    ret
1406entry:
1407  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32(
1408    <vscale x 1 x float> undef,
1409    ptr %0,
1410    <vscale x 1 x i32> %1,
1411    iXLen %2)
1412
1413  ret <vscale x 1 x float> %a
1414}
1415
1416declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32(
1417  <vscale x 1 x float>,
1418  ptr,
1419  <vscale x 1 x i32>,
1420  <vscale x 1 x i1>,
1421  iXLen,
1422  iXLen);
1423
1424define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1425; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
1426; CHECK:       # %bb.0: # %entry
1427; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
1428; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
1429; CHECK-NEXT:    ret
1430entry:
1431  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32(
1432    <vscale x 1 x float> %0,
1433    ptr %1,
1434    <vscale x 1 x i32> %2,
1435    <vscale x 1 x i1> %3,
1436    iXLen %4, iXLen 1)
1437
1438  ret <vscale x 1 x float> %a
1439}
1440
1441declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32(
1442  <vscale x 2 x float>,
1443  ptr,
1444  <vscale x 2 x i32>,
1445  iXLen);
1446
1447define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
1448; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32:
1449; CHECK:       # %bb.0: # %entry
1450; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1451; CHECK-NEXT:    vluxei32.v v8, (a0), v8
1452; CHECK-NEXT:    ret
1453entry:
1454  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32(
1455    <vscale x 2 x float> undef,
1456    ptr %0,
1457    <vscale x 2 x i32> %1,
1458    iXLen %2)
1459
1460  ret <vscale x 2 x float> %a
1461}
1462
1463declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32(
1464  <vscale x 2 x float>,
1465  ptr,
1466  <vscale x 2 x i32>,
1467  <vscale x 2 x i1>,
1468  iXLen,
1469  iXLen);
1470
1471define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1472; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
1473; CHECK:       # %bb.0: # %entry
1474; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1475; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
1476; CHECK-NEXT:    ret
1477entry:
1478  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32(
1479    <vscale x 2 x float> %0,
1480    ptr %1,
1481    <vscale x 2 x i32> %2,
1482    <vscale x 2 x i1> %3,
1483    iXLen %4, iXLen 1)
1484
1485  ret <vscale x 2 x float> %a
1486}
1487
1488declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32(
1489  <vscale x 4 x float>,
1490  ptr,
1491  <vscale x 4 x i32>,
1492  iXLen);
1493
1494define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
1495; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32:
1496; CHECK:       # %bb.0: # %entry
1497; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1498; CHECK-NEXT:    vluxei32.v v8, (a0), v8
1499; CHECK-NEXT:    ret
1500entry:
1501  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32(
1502    <vscale x 4 x float> undef,
1503    ptr %0,
1504    <vscale x 4 x i32> %1,
1505    iXLen %2)
1506
1507  ret <vscale x 4 x float> %a
1508}
1509
1510declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32(
1511  <vscale x 4 x float>,
1512  ptr,
1513  <vscale x 4 x i32>,
1514  <vscale x 4 x i1>,
1515  iXLen,
1516  iXLen);
1517
1518define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1519; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
1520; CHECK:       # %bb.0: # %entry
1521; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1522; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
1523; CHECK-NEXT:    ret
1524entry:
1525  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32(
1526    <vscale x 4 x float> %0,
1527    ptr %1,
1528    <vscale x 4 x i32> %2,
1529    <vscale x 4 x i1> %3,
1530    iXLen %4, iXLen 1)
1531
1532  ret <vscale x 4 x float> %a
1533}
1534
1535declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32(
1536  <vscale x 8 x float>,
1537  ptr,
1538  <vscale x 8 x i32>,
1539  iXLen);
1540
1541define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
1542; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32:
1543; CHECK:       # %bb.0: # %entry
1544; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
1545; CHECK-NEXT:    vluxei32.v v8, (a0), v8
1546; CHECK-NEXT:    ret
1547entry:
1548  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32(
1549    <vscale x 8 x float> undef,
1550    ptr %0,
1551    <vscale x 8 x i32> %1,
1552    iXLen %2)
1553
1554  ret <vscale x 8 x float> %a
1555}
1556
1557declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32(
1558  <vscale x 8 x float>,
1559  ptr,
1560  <vscale x 8 x i32>,
1561  <vscale x 8 x i1>,
1562  iXLen,
1563  iXLen);
1564
1565define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1566; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
1567; CHECK:       # %bb.0: # %entry
1568; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1569; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
1570; CHECK-NEXT:    ret
1571entry:
1572  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32(
1573    <vscale x 8 x float> %0,
1574    ptr %1,
1575    <vscale x 8 x i32> %2,
1576    <vscale x 8 x i1> %3,
1577    iXLen %4, iXLen 1)
1578
1579  ret <vscale x 8 x float> %a
1580}
1581
1582declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32(
1583  <vscale x 16 x float>,
1584  ptr,
1585  <vscale x 16 x i32>,
1586  iXLen);
1587
1588define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
1589; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32:
1590; CHECK:       # %bb.0: # %entry
1591; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
1592; CHECK-NEXT:    vluxei32.v v8, (a0), v8
1593; CHECK-NEXT:    ret
1594entry:
1595  %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32(
1596    <vscale x 16 x float> undef,
1597    ptr %0,
1598    <vscale x 16 x i32> %1,
1599    iXLen %2)
1600
1601  ret <vscale x 16 x float> %a
1602}
1603
1604declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32(
1605  <vscale x 16 x float>,
1606  ptr,
1607  <vscale x 16 x i32>,
1608  <vscale x 16 x i1>,
1609  iXLen,
1610  iXLen);
1611
1612define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1613; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
1614; CHECK:       # %bb.0: # %entry
1615; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
1616; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
1617; CHECK-NEXT:    ret
1618entry:
1619  %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32(
1620    <vscale x 16 x float> %0,
1621    ptr %1,
1622    <vscale x 16 x i32> %2,
1623    <vscale x 16 x i1> %3,
1624    iXLen %4, iXLen 1)
1625
1626  ret <vscale x 16 x float> %a
1627}
1628
1629declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32(
1630  <vscale x 1 x double>,
1631  ptr,
1632  <vscale x 1 x i32>,
1633  iXLen);
1634
1635define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
1636; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32:
1637; CHECK:       # %bb.0: # %entry
1638; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1639; CHECK-NEXT:    vluxei32.v v9, (a0), v8
1640; CHECK-NEXT:    vmv.v.v v8, v9
1641; CHECK-NEXT:    ret
1642entry:
1643  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32(
1644    <vscale x 1 x double> undef,
1645    ptr %0,
1646    <vscale x 1 x i32> %1,
1647    iXLen %2)
1648
1649  ret <vscale x 1 x double> %a
1650}
1651
1652declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32(
1653  <vscale x 1 x double>,
1654  ptr,
1655  <vscale x 1 x i32>,
1656  <vscale x 1 x i1>,
1657  iXLen,
1658  iXLen);
1659
1660define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1661; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
1662; CHECK:       # %bb.0: # %entry
1663; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
1664; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
1665; CHECK-NEXT:    ret
1666entry:
1667  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32(
1668    <vscale x 1 x double> %0,
1669    ptr %1,
1670    <vscale x 1 x i32> %2,
1671    <vscale x 1 x i1> %3,
1672    iXLen %4, iXLen 1)
1673
1674  ret <vscale x 1 x double> %a
1675}
1676
1677declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32(
1678  <vscale x 2 x double>,
1679  ptr,
1680  <vscale x 2 x i32>,
1681  iXLen);
1682
1683define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
1684; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32:
1685; CHECK:       # %bb.0: # %entry
1686; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1687; CHECK-NEXT:    vluxei32.v v10, (a0), v8
1688; CHECK-NEXT:    vmv.v.v v8, v10
1689; CHECK-NEXT:    ret
1690entry:
1691  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32(
1692    <vscale x 2 x double> undef,
1693    ptr %0,
1694    <vscale x 2 x i32> %1,
1695    iXLen %2)
1696
1697  ret <vscale x 2 x double> %a
1698}
1699
1700declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32(
1701  <vscale x 2 x double>,
1702  ptr,
1703  <vscale x 2 x i32>,
1704  <vscale x 2 x i1>,
1705  iXLen,
1706  iXLen);
1707
1708define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1709; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
1710; CHECK:       # %bb.0: # %entry
1711; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
1712; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
1713; CHECK-NEXT:    ret
1714entry:
1715  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32(
1716    <vscale x 2 x double> %0,
1717    ptr %1,
1718    <vscale x 2 x i32> %2,
1719    <vscale x 2 x i1> %3,
1720    iXLen %4, iXLen 1)
1721
1722  ret <vscale x 2 x double> %a
1723}
1724
1725declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32(
1726  <vscale x 4 x double>,
1727  ptr,
1728  <vscale x 4 x i32>,
1729  iXLen);
1730
1731define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
1732; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32:
1733; CHECK:       # %bb.0: # %entry
1734; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1735; CHECK-NEXT:    vluxei32.v v12, (a0), v8
1736; CHECK-NEXT:    vmv.v.v v8, v12
1737; CHECK-NEXT:    ret
1738entry:
1739  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32(
1740    <vscale x 4 x double> undef,
1741    ptr %0,
1742    <vscale x 4 x i32> %1,
1743    iXLen %2)
1744
1745  ret <vscale x 4 x double> %a
1746}
1747
1748declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32(
1749  <vscale x 4 x double>,
1750  ptr,
1751  <vscale x 4 x i32>,
1752  <vscale x 4 x i1>,
1753  iXLen,
1754  iXLen);
1755
1756define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1757; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
1758; CHECK:       # %bb.0: # %entry
1759; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
1760; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
1761; CHECK-NEXT:    ret
1762entry:
1763  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32(
1764    <vscale x 4 x double> %0,
1765    ptr %1,
1766    <vscale x 4 x i32> %2,
1767    <vscale x 4 x i1> %3,
1768    iXLen %4, iXLen 1)
1769
1770  ret <vscale x 4 x double> %a
1771}
1772
1773declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32(
1774  <vscale x 8 x double>,
1775  ptr,
1776  <vscale x 8 x i32>,
1777  iXLen);
1778
1779define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
1780; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32:
1781; CHECK:       # %bb.0: # %entry
1782; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1783; CHECK-NEXT:    vluxei32.v v16, (a0), v8
1784; CHECK-NEXT:    vmv.v.v v8, v16
1785; CHECK-NEXT:    ret
1786entry:
1787  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32(
1788    <vscale x 8 x double> undef,
1789    ptr %0,
1790    <vscale x 8 x i32> %1,
1791    iXLen %2)
1792
1793  ret <vscale x 8 x double> %a
1794}
1795
1796declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32(
1797  <vscale x 8 x double>,
1798  ptr,
1799  <vscale x 8 x i32>,
1800  <vscale x 8 x i1>,
1801  iXLen,
1802  iXLen);
1803
1804define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1805; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
1806; CHECK:       # %bb.0: # %entry
1807; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
1808; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
1809; CHECK-NEXT:    ret
1810entry:
1811  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32(
1812    <vscale x 8 x double> %0,
1813    ptr %1,
1814    <vscale x 8 x i32> %2,
1815    <vscale x 8 x i1> %3,
1816    iXLen %4, iXLen 1)
1817
1818  ret <vscale x 8 x double> %a
1819}
1820
1821declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16(
1822  <vscale x 1 x i8>,
1823  ptr,
1824  <vscale x 1 x i16>,
1825  iXLen);
1826
1827define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
1828; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16:
1829; CHECK:       # %bb.0: # %entry
1830; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
1831; CHECK-NEXT:    vluxei16.v v9, (a0), v8
1832; CHECK-NEXT:    vmv1r.v v8, v9
1833; CHECK-NEXT:    ret
1834entry:
1835  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16(
1836    <vscale x 1 x i8> undef,
1837    ptr %0,
1838    <vscale x 1 x i16> %1,
1839    iXLen %2)
1840
1841  ret <vscale x 1 x i8> %a
1842}
1843
1844declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16(
1845  <vscale x 1 x i8>,
1846  ptr,
1847  <vscale x 1 x i16>,
1848  <vscale x 1 x i1>,
1849  iXLen,
1850  iXLen);
1851
1852define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1853; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
1854; CHECK:       # %bb.0: # %entry
1855; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
1856; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
1857; CHECK-NEXT:    ret
1858entry:
1859  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16(
1860    <vscale x 1 x i8> %0,
1861    ptr %1,
1862    <vscale x 1 x i16> %2,
1863    <vscale x 1 x i1> %3,
1864    iXLen %4, iXLen 1)
1865
1866  ret <vscale x 1 x i8> %a
1867}
1868
1869declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16(
1870  <vscale x 2 x i8>,
1871  ptr,
1872  <vscale x 2 x i16>,
1873  iXLen);
1874
1875define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
1876; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16:
1877; CHECK:       # %bb.0: # %entry
1878; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
1879; CHECK-NEXT:    vluxei16.v v9, (a0), v8
1880; CHECK-NEXT:    vmv1r.v v8, v9
1881; CHECK-NEXT:    ret
1882entry:
1883  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16(
1884    <vscale x 2 x i8> undef,
1885    ptr %0,
1886    <vscale x 2 x i16> %1,
1887    iXLen %2)
1888
1889  ret <vscale x 2 x i8> %a
1890}
1891
1892declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16(
1893  <vscale x 2 x i8>,
1894  ptr,
1895  <vscale x 2 x i16>,
1896  <vscale x 2 x i1>,
1897  iXLen,
1898  iXLen);
1899
1900define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1901; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
1902; CHECK:       # %bb.0: # %entry
1903; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
1904; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
1905; CHECK-NEXT:    ret
1906entry:
1907  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16(
1908    <vscale x 2 x i8> %0,
1909    ptr %1,
1910    <vscale x 2 x i16> %2,
1911    <vscale x 2 x i1> %3,
1912    iXLen %4, iXLen 1)
1913
1914  ret <vscale x 2 x i8> %a
1915}
1916
1917declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16(
1918  <vscale x 4 x i8>,
1919  ptr,
1920  <vscale x 4 x i16>,
1921  iXLen);
1922
1923define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
1924; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16:
1925; CHECK:       # %bb.0: # %entry
1926; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
1927; CHECK-NEXT:    vluxei16.v v9, (a0), v8
1928; CHECK-NEXT:    vmv1r.v v8, v9
1929; CHECK-NEXT:    ret
1930entry:
1931  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16(
1932    <vscale x 4 x i8> undef,
1933    ptr %0,
1934    <vscale x 4 x i16> %1,
1935    iXLen %2)
1936
1937  ret <vscale x 4 x i8> %a
1938}
1939
1940declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16(
1941  <vscale x 4 x i8>,
1942  ptr,
1943  <vscale x 4 x i16>,
1944  <vscale x 4 x i1>,
1945  iXLen,
1946  iXLen);
1947
1948define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1949; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
1950; CHECK:       # %bb.0: # %entry
1951; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
1952; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
1953; CHECK-NEXT:    ret
1954entry:
1955  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16(
1956    <vscale x 4 x i8> %0,
1957    ptr %1,
1958    <vscale x 4 x i16> %2,
1959    <vscale x 4 x i1> %3,
1960    iXLen %4, iXLen 1)
1961
1962  ret <vscale x 4 x i8> %a
1963}
1964
1965declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16(
1966  <vscale x 8 x i8>,
1967  ptr,
1968  <vscale x 8 x i16>,
1969  iXLen);
1970
1971define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
1972; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16:
1973; CHECK:       # %bb.0: # %entry
1974; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
1975; CHECK-NEXT:    vluxei16.v v10, (a0), v8
1976; CHECK-NEXT:    vmv.v.v v8, v10
1977; CHECK-NEXT:    ret
1978entry:
1979  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16(
1980    <vscale x 8 x i8> undef,
1981    ptr %0,
1982    <vscale x 8 x i16> %1,
1983    iXLen %2)
1984
1985  ret <vscale x 8 x i8> %a
1986}
1987
1988declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16(
1989  <vscale x 8 x i8>,
1990  ptr,
1991  <vscale x 8 x i16>,
1992  <vscale x 8 x i1>,
1993  iXLen,
1994  iXLen);
1995
1996define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1997; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
1998; CHECK:       # %bb.0: # %entry
1999; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
2000; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
2001; CHECK-NEXT:    ret
2002entry:
2003  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16(
2004    <vscale x 8 x i8> %0,
2005    ptr %1,
2006    <vscale x 8 x i16> %2,
2007    <vscale x 8 x i1> %3,
2008    iXLen %4, iXLen 1)
2009
2010  ret <vscale x 8 x i8> %a
2011}
2012
2013declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16(
2014  <vscale x 16 x i8>,
2015  ptr,
2016  <vscale x 16 x i16>,
2017  iXLen);
2018
2019define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
2020; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16:
2021; CHECK:       # %bb.0: # %entry
2022; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
2023; CHECK-NEXT:    vluxei16.v v12, (a0), v8
2024; CHECK-NEXT:    vmv.v.v v8, v12
2025; CHECK-NEXT:    ret
2026entry:
2027  %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16(
2028    <vscale x 16 x i8> undef,
2029    ptr %0,
2030    <vscale x 16 x i16> %1,
2031    iXLen %2)
2032
2033  ret <vscale x 16 x i8> %a
2034}
2035
2036declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16(
2037  <vscale x 16 x i8>,
2038  ptr,
2039  <vscale x 16 x i16>,
2040  <vscale x 16 x i1>,
2041  iXLen,
2042  iXLen);
2043
2044define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2045; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
2046; CHECK:       # %bb.0: # %entry
2047; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
2048; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
2049; CHECK-NEXT:    ret
2050entry:
2051  %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16(
2052    <vscale x 16 x i8> %0,
2053    ptr %1,
2054    <vscale x 16 x i16> %2,
2055    <vscale x 16 x i1> %3,
2056    iXLen %4, iXLen 1)
2057
2058  ret <vscale x 16 x i8> %a
2059}
2060
2061declare <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16(
2062  <vscale x 32 x i8>,
2063  ptr,
2064  <vscale x 32 x i16>,
2065  iXLen);
2066
2067define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
2068; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16:
2069; CHECK:       # %bb.0: # %entry
2070; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
2071; CHECK-NEXT:    vluxei16.v v16, (a0), v8
2072; CHECK-NEXT:    vmv.v.v v8, v16
2073; CHECK-NEXT:    ret
2074entry:
2075  %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16(
2076    <vscale x 32 x i8> undef,
2077    ptr %0,
2078    <vscale x 32 x i16> %1,
2079    iXLen %2)
2080
2081  ret <vscale x 32 x i8> %a
2082}
2083
2084declare <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16(
2085  <vscale x 32 x i8>,
2086  ptr,
2087  <vscale x 32 x i16>,
2088  <vscale x 32 x i1>,
2089  iXLen,
2090  iXLen);
2091
2092define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
2093; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
2094; CHECK:       # %bb.0: # %entry
2095; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
2096; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
2097; CHECK-NEXT:    ret
2098entry:
2099  %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16(
2100    <vscale x 32 x i8> %0,
2101    ptr %1,
2102    <vscale x 32 x i16> %2,
2103    <vscale x 32 x i1> %3,
2104    iXLen %4, iXLen 1)
2105
2106  ret <vscale x 32 x i8> %a
2107}
2108
2109declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16(
2110  <vscale x 1 x i16>,
2111  ptr,
2112  <vscale x 1 x i16>,
2113  iXLen);
2114
2115define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
2116; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16:
2117; CHECK:       # %bb.0: # %entry
2118; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2119; CHECK-NEXT:    vluxei16.v v8, (a0), v8
2120; CHECK-NEXT:    ret
2121entry:
2122  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16(
2123    <vscale x 1 x i16> undef,
2124    ptr %0,
2125    <vscale x 1 x i16> %1,
2126    iXLen %2)
2127
2128  ret <vscale x 1 x i16> %a
2129}
2130
2131declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16(
2132  <vscale x 1 x i16>,
2133  ptr,
2134  <vscale x 1 x i16>,
2135  <vscale x 1 x i1>,
2136  iXLen,
2137  iXLen);
2138
2139define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2140; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
2141; CHECK:       # %bb.0: # %entry
2142; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
2143; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
2144; CHECK-NEXT:    ret
2145entry:
2146  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16(
2147    <vscale x 1 x i16> %0,
2148    ptr %1,
2149    <vscale x 1 x i16> %2,
2150    <vscale x 1 x i1> %3,
2151    iXLen %4, iXLen 1)
2152
2153  ret <vscale x 1 x i16> %a
2154}
2155
2156declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16(
2157  <vscale x 2 x i16>,
2158  ptr,
2159  <vscale x 2 x i16>,
2160  iXLen);
2161
2162define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
2163; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16:
2164; CHECK:       # %bb.0: # %entry
2165; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2166; CHECK-NEXT:    vluxei16.v v8, (a0), v8
2167; CHECK-NEXT:    ret
2168entry:
2169  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16(
2170    <vscale x 2 x i16> undef,
2171    ptr %0,
2172    <vscale x 2 x i16> %1,
2173    iXLen %2)
2174
2175  ret <vscale x 2 x i16> %a
2176}
2177
2178declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16(
2179  <vscale x 2 x i16>,
2180  ptr,
2181  <vscale x 2 x i16>,
2182  <vscale x 2 x i1>,
2183  iXLen,
2184  iXLen);
2185
2186define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2187; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
2188; CHECK:       # %bb.0: # %entry
2189; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
2190; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
2191; CHECK-NEXT:    ret
2192entry:
2193  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16(
2194    <vscale x 2 x i16> %0,
2195    ptr %1,
2196    <vscale x 2 x i16> %2,
2197    <vscale x 2 x i1> %3,
2198    iXLen %4, iXLen 1)
2199
2200  ret <vscale x 2 x i16> %a
2201}
2202
2203declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16(
2204  <vscale x 4 x i16>,
2205  ptr,
2206  <vscale x 4 x i16>,
2207  iXLen);
2208
2209define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
2210; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16:
2211; CHECK:       # %bb.0: # %entry
2212; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2213; CHECK-NEXT:    vluxei16.v v8, (a0), v8
2214; CHECK-NEXT:    ret
2215entry:
2216  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16(
2217    <vscale x 4 x i16> undef,
2218    ptr %0,
2219    <vscale x 4 x i16> %1,
2220    iXLen %2)
2221
2222  ret <vscale x 4 x i16> %a
2223}
2224
2225declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16(
2226  <vscale x 4 x i16>,
2227  ptr,
2228  <vscale x 4 x i16>,
2229  <vscale x 4 x i1>,
2230  iXLen,
2231  iXLen);
2232
2233define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2234; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
2235; CHECK:       # %bb.0: # %entry
2236; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
2237; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
2238; CHECK-NEXT:    ret
2239entry:
2240  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16(
2241    <vscale x 4 x i16> %0,
2242    ptr %1,
2243    <vscale x 4 x i16> %2,
2244    <vscale x 4 x i1> %3,
2245    iXLen %4, iXLen 1)
2246
2247  ret <vscale x 4 x i16> %a
2248}
2249
2250declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16(
2251  <vscale x 8 x i16>,
2252  ptr,
2253  <vscale x 8 x i16>,
2254  iXLen);
2255
2256define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
2257; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16:
2258; CHECK:       # %bb.0: # %entry
2259; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
2260; CHECK-NEXT:    vluxei16.v v8, (a0), v8
2261; CHECK-NEXT:    ret
2262entry:
2263  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16(
2264    <vscale x 8 x i16> undef,
2265    ptr %0,
2266    <vscale x 8 x i16> %1,
2267    iXLen %2)
2268
2269  ret <vscale x 8 x i16> %a
2270}
2271
2272declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16(
2273  <vscale x 8 x i16>,
2274  ptr,
2275  <vscale x 8 x i16>,
2276  <vscale x 8 x i1>,
2277  iXLen,
2278  iXLen);
2279
2280define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2281; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
2282; CHECK:       # %bb.0: # %entry
2283; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
2284; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
2285; CHECK-NEXT:    ret
2286entry:
2287  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16(
2288    <vscale x 8 x i16> %0,
2289    ptr %1,
2290    <vscale x 8 x i16> %2,
2291    <vscale x 8 x i1> %3,
2292    iXLen %4, iXLen 1)
2293
2294  ret <vscale x 8 x i16> %a
2295}
2296
2297declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16(
2298  <vscale x 16 x i16>,
2299  ptr,
2300  <vscale x 16 x i16>,
2301  iXLen);
2302
2303define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
2304; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16:
2305; CHECK:       # %bb.0: # %entry
2306; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
2307; CHECK-NEXT:    vluxei16.v v8, (a0), v8
2308; CHECK-NEXT:    ret
2309entry:
2310  %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16(
2311    <vscale x 16 x i16> undef,
2312    ptr %0,
2313    <vscale x 16 x i16> %1,
2314    iXLen %2)
2315
2316  ret <vscale x 16 x i16> %a
2317}
2318
2319declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16(
2320  <vscale x 16 x i16>,
2321  ptr,
2322  <vscale x 16 x i16>,
2323  <vscale x 16 x i1>,
2324  iXLen,
2325  iXLen);
2326
2327define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2328; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
2329; CHECK:       # %bb.0: # %entry
2330; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
2331; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
2332; CHECK-NEXT:    ret
2333entry:
2334  %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16(
2335    <vscale x 16 x i16> %0,
2336    ptr %1,
2337    <vscale x 16 x i16> %2,
2338    <vscale x 16 x i1> %3,
2339    iXLen %4, iXLen 1)
2340
2341  ret <vscale x 16 x i16> %a
2342}
2343
2344declare <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16(
2345  <vscale x 32 x i16>,
2346  ptr,
2347  <vscale x 32 x i16>,
2348  iXLen);
2349
2350define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
2351; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16:
2352; CHECK:       # %bb.0: # %entry
2353; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
2354; CHECK-NEXT:    vluxei16.v v8, (a0), v8
2355; CHECK-NEXT:    ret
2356entry:
2357  %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16(
2358    <vscale x 32 x i16> undef,
2359    ptr %0,
2360    <vscale x 32 x i16> %1,
2361    iXLen %2)
2362
2363  ret <vscale x 32 x i16> %a
2364}
2365
2366declare <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16(
2367  <vscale x 32 x i16>,
2368  ptr,
2369  <vscale x 32 x i16>,
2370  <vscale x 32 x i1>,
2371  iXLen,
2372  iXLen);
2373
2374define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
2375; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
2376; CHECK:       # %bb.0: # %entry
2377; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
2378; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
2379; CHECK-NEXT:    ret
2380entry:
2381  %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16(
2382    <vscale x 32 x i16> %0,
2383    ptr %1,
2384    <vscale x 32 x i16> %2,
2385    <vscale x 32 x i1> %3,
2386    iXLen %4, iXLen 1)
2387
2388  ret <vscale x 32 x i16> %a
2389}
2390
2391declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16(
2392  <vscale x 1 x i32>,
2393  ptr,
2394  <vscale x 1 x i16>,
2395  iXLen);
2396
2397define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
2398; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16:
2399; CHECK:       # %bb.0: # %entry
2400; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
2401; CHECK-NEXT:    vluxei16.v v9, (a0), v8
2402; CHECK-NEXT:    vmv1r.v v8, v9
2403; CHECK-NEXT:    ret
2404entry:
2405  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16(
2406    <vscale x 1 x i32> undef,
2407    ptr %0,
2408    <vscale x 1 x i16> %1,
2409    iXLen %2)
2410
2411  ret <vscale x 1 x i32> %a
2412}
2413
2414declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16(
2415  <vscale x 1 x i32>,
2416  ptr,
2417  <vscale x 1 x i16>,
2418  <vscale x 1 x i1>,
2419  iXLen,
2420  iXLen);
2421
2422define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2423; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
2424; CHECK:       # %bb.0: # %entry
2425; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
2426; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
2427; CHECK-NEXT:    ret
2428entry:
2429  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16(
2430    <vscale x 1 x i32> %0,
2431    ptr %1,
2432    <vscale x 1 x i16> %2,
2433    <vscale x 1 x i1> %3,
2434    iXLen %4, iXLen 1)
2435
2436  ret <vscale x 1 x i32> %a
2437}
2438
2439declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16(
2440  <vscale x 2 x i32>,
2441  ptr,
2442  <vscale x 2 x i16>,
2443  iXLen);
2444
2445define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
2446; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16:
2447; CHECK:       # %bb.0: # %entry
2448; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
2449; CHECK-NEXT:    vluxei16.v v9, (a0), v8
2450; CHECK-NEXT:    vmv.v.v v8, v9
2451; CHECK-NEXT:    ret
2452entry:
2453  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16(
2454    <vscale x 2 x i32> undef,
2455    ptr %0,
2456    <vscale x 2 x i16> %1,
2457    iXLen %2)
2458
2459  ret <vscale x 2 x i32> %a
2460}
2461
2462declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16(
2463  <vscale x 2 x i32>,
2464  ptr,
2465  <vscale x 2 x i16>,
2466  <vscale x 2 x i1>,
2467  iXLen,
2468  iXLen);
2469
2470define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2471; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
2472; CHECK:       # %bb.0: # %entry
2473; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
2474; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
2475; CHECK-NEXT:    ret
2476entry:
2477  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16(
2478    <vscale x 2 x i32> %0,
2479    ptr %1,
2480    <vscale x 2 x i16> %2,
2481    <vscale x 2 x i1> %3,
2482    iXLen %4, iXLen 1)
2483
2484  ret <vscale x 2 x i32> %a
2485}
2486
2487declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16(
2488  <vscale x 4 x i32>,
2489  ptr,
2490  <vscale x 4 x i16>,
2491  iXLen);
2492
2493define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
2494; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16:
2495; CHECK:       # %bb.0: # %entry
2496; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
2497; CHECK-NEXT:    vluxei16.v v10, (a0), v8
2498; CHECK-NEXT:    vmv.v.v v8, v10
2499; CHECK-NEXT:    ret
2500entry:
2501  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16(
2502    <vscale x 4 x i32> undef,
2503    ptr %0,
2504    <vscale x 4 x i16> %1,
2505    iXLen %2)
2506
2507  ret <vscale x 4 x i32> %a
2508}
2509
2510declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16(
2511  <vscale x 4 x i32>,
2512  ptr,
2513  <vscale x 4 x i16>,
2514  <vscale x 4 x i1>,
2515  iXLen,
2516  iXLen);
2517
2518define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2519; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
2520; CHECK:       # %bb.0: # %entry
2521; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
2522; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
2523; CHECK-NEXT:    ret
2524entry:
2525  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16(
2526    <vscale x 4 x i32> %0,
2527    ptr %1,
2528    <vscale x 4 x i16> %2,
2529    <vscale x 4 x i1> %3,
2530    iXLen %4, iXLen 1)
2531
2532  ret <vscale x 4 x i32> %a
2533}
2534
2535declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16(
2536  <vscale x 8 x i32>,
2537  ptr,
2538  <vscale x 8 x i16>,
2539  iXLen);
2540
2541define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
2542; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16:
2543; CHECK:       # %bb.0: # %entry
2544; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
2545; CHECK-NEXT:    vluxei16.v v12, (a0), v8
2546; CHECK-NEXT:    vmv.v.v v8, v12
2547; CHECK-NEXT:    ret
2548entry:
2549  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16(
2550    <vscale x 8 x i32> undef,
2551    ptr %0,
2552    <vscale x 8 x i16> %1,
2553    iXLen %2)
2554
2555  ret <vscale x 8 x i32> %a
2556}
2557
2558declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16(
2559  <vscale x 8 x i32>,
2560  ptr,
2561  <vscale x 8 x i16>,
2562  <vscale x 8 x i1>,
2563  iXLen,
2564  iXLen);
2565
2566define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2567; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
2568; CHECK:       # %bb.0: # %entry
2569; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
2570; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
2571; CHECK-NEXT:    ret
2572entry:
2573  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16(
2574    <vscale x 8 x i32> %0,
2575    ptr %1,
2576    <vscale x 8 x i16> %2,
2577    <vscale x 8 x i1> %3,
2578    iXLen %4, iXLen 1)
2579
2580  ret <vscale x 8 x i32> %a
2581}
2582
2583declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16(
2584  <vscale x 16 x i32>,
2585  ptr,
2586  <vscale x 16 x i16>,
2587  iXLen);
2588
2589define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
2590; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16:
2591; CHECK:       # %bb.0: # %entry
2592; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
2593; CHECK-NEXT:    vluxei16.v v16, (a0), v8
2594; CHECK-NEXT:    vmv.v.v v8, v16
2595; CHECK-NEXT:    ret
2596entry:
2597  %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16(
2598    <vscale x 16 x i32> undef,
2599    ptr %0,
2600    <vscale x 16 x i16> %1,
2601    iXLen %2)
2602
2603  ret <vscale x 16 x i32> %a
2604}
2605
2606declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16(
2607  <vscale x 16 x i32>,
2608  ptr,
2609  <vscale x 16 x i16>,
2610  <vscale x 16 x i1>,
2611  iXLen,
2612  iXLen);
2613
2614define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2615; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
2616; CHECK:       # %bb.0: # %entry
2617; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
2618; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
2619; CHECK-NEXT:    ret
2620entry:
2621  %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16(
2622    <vscale x 16 x i32> %0,
2623    ptr %1,
2624    <vscale x 16 x i16> %2,
2625    <vscale x 16 x i1> %3,
2626    iXLen %4, iXLen 1)
2627
2628  ret <vscale x 16 x i32> %a
2629}
2630
2631declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16(
2632  <vscale x 1 x i64>,
2633  ptr,
2634  <vscale x 1 x i16>,
2635  iXLen);
2636
2637define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
2638; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16:
2639; CHECK:       # %bb.0: # %entry
2640; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
2641; CHECK-NEXT:    vluxei16.v v9, (a0), v8
2642; CHECK-NEXT:    vmv.v.v v8, v9
2643; CHECK-NEXT:    ret
2644entry:
2645  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16(
2646    <vscale x 1 x i64> undef,
2647    ptr %0,
2648    <vscale x 1 x i16> %1,
2649    iXLen %2)
2650
2651  ret <vscale x 1 x i64> %a
2652}
2653
2654declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16(
2655  <vscale x 1 x i64>,
2656  ptr,
2657  <vscale x 1 x i16>,
2658  <vscale x 1 x i1>,
2659  iXLen,
2660  iXLen);
2661
2662define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2663; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
2664; CHECK:       # %bb.0: # %entry
2665; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
2666; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
2667; CHECK-NEXT:    ret
2668entry:
2669  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16(
2670    <vscale x 1 x i64> %0,
2671    ptr %1,
2672    <vscale x 1 x i16> %2,
2673    <vscale x 1 x i1> %3,
2674    iXLen %4, iXLen 1)
2675
2676  ret <vscale x 1 x i64> %a
2677}
2678
2679declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16(
2680  <vscale x 2 x i64>,
2681  ptr,
2682  <vscale x 2 x i16>,
2683  iXLen);
2684
2685define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
2686; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16:
2687; CHECK:       # %bb.0: # %entry
2688; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
2689; CHECK-NEXT:    vluxei16.v v10, (a0), v8
2690; CHECK-NEXT:    vmv.v.v v8, v10
2691; CHECK-NEXT:    ret
2692entry:
2693  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16(
2694    <vscale x 2 x i64> undef,
2695    ptr %0,
2696    <vscale x 2 x i16> %1,
2697    iXLen %2)
2698
2699  ret <vscale x 2 x i64> %a
2700}
2701
2702declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16(
2703  <vscale x 2 x i64>,
2704  ptr,
2705  <vscale x 2 x i16>,
2706  <vscale x 2 x i1>,
2707  iXLen,
2708  iXLen);
2709
2710define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2711; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
2712; CHECK:       # %bb.0: # %entry
2713; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
2714; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
2715; CHECK-NEXT:    ret
2716entry:
2717  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16(
2718    <vscale x 2 x i64> %0,
2719    ptr %1,
2720    <vscale x 2 x i16> %2,
2721    <vscale x 2 x i1> %3,
2722    iXLen %4, iXLen 1)
2723
2724  ret <vscale x 2 x i64> %a
2725}
2726
2727declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16(
2728  <vscale x 4 x i64>,
2729  ptr,
2730  <vscale x 4 x i16>,
2731  iXLen);
2732
2733define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
2734; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16:
2735; CHECK:       # %bb.0: # %entry
2736; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
2737; CHECK-NEXT:    vluxei16.v v12, (a0), v8
2738; CHECK-NEXT:    vmv.v.v v8, v12
2739; CHECK-NEXT:    ret
2740entry:
2741  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16(
2742    <vscale x 4 x i64> undef,
2743    ptr %0,
2744    <vscale x 4 x i16> %1,
2745    iXLen %2)
2746
2747  ret <vscale x 4 x i64> %a
2748}
2749
2750declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16(
2751  <vscale x 4 x i64>,
2752  ptr,
2753  <vscale x 4 x i16>,
2754  <vscale x 4 x i1>,
2755  iXLen,
2756  iXLen);
2757
2758define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2759; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
2760; CHECK:       # %bb.0: # %entry
2761; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
2762; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
2763; CHECK-NEXT:    ret
2764entry:
2765  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16(
2766    <vscale x 4 x i64> %0,
2767    ptr %1,
2768    <vscale x 4 x i16> %2,
2769    <vscale x 4 x i1> %3,
2770    iXLen %4, iXLen 1)
2771
2772  ret <vscale x 4 x i64> %a
2773}
2774
2775declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16(
2776  <vscale x 8 x i64>,
2777  ptr,
2778  <vscale x 8 x i16>,
2779  iXLen);
2780
2781define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
2782; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16:
2783; CHECK:       # %bb.0: # %entry
2784; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
2785; CHECK-NEXT:    vluxei16.v v16, (a0), v8
2786; CHECK-NEXT:    vmv.v.v v8, v16
2787; CHECK-NEXT:    ret
2788entry:
2789  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16(
2790    <vscale x 8 x i64> undef,
2791    ptr %0,
2792    <vscale x 8 x i16> %1,
2793    iXLen %2)
2794
2795  ret <vscale x 8 x i64> %a
2796}
2797
2798declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16(
2799  <vscale x 8 x i64>,
2800  ptr,
2801  <vscale x 8 x i16>,
2802  <vscale x 8 x i1>,
2803  iXLen,
2804  iXLen);
2805
2806define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2807; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
2808; CHECK:       # %bb.0: # %entry
2809; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
2810; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
2811; CHECK-NEXT:    ret
2812entry:
2813  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16(
2814    <vscale x 8 x i64> %0,
2815    ptr %1,
2816    <vscale x 8 x i16> %2,
2817    <vscale x 8 x i1> %3,
2818    iXLen %4, iXLen 1)
2819
2820  ret <vscale x 8 x i64> %a
2821}
2822
2823declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i16(
2824  <vscale x 1 x half>,
2825  ptr,
2826  <vscale x 1 x i16>,
2827  iXLen);
2828
2829define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
2830; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16:
2831; CHECK:       # %bb.0: # %entry
2832; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2833; CHECK-NEXT:    vluxei16.v v8, (a0), v8
2834; CHECK-NEXT:    ret
2835entry:
2836  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i16(
2837    <vscale x 1 x half> undef,
2838    ptr %0,
2839    <vscale x 1 x i16> %1,
2840    iXLen %2)
2841
2842  ret <vscale x 1 x half> %a
2843}
2844
2845declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16(
2846  <vscale x 1 x half>,
2847  ptr,
2848  <vscale x 1 x i16>,
2849  <vscale x 1 x i1>,
2850  iXLen,
2851  iXLen);
2852
2853define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2854; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
2855; CHECK:       # %bb.0: # %entry
2856; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
2857; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
2858; CHECK-NEXT:    ret
2859entry:
2860  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16(
2861    <vscale x 1 x half> %0,
2862    ptr %1,
2863    <vscale x 1 x i16> %2,
2864    <vscale x 1 x i1> %3,
2865    iXLen %4, iXLen 1)
2866
2867  ret <vscale x 1 x half> %a
2868}
2869
2870declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i16(
2871  <vscale x 2 x half>,
2872  ptr,
2873  <vscale x 2 x i16>,
2874  iXLen);
2875
2876define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
2877; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16:
2878; CHECK:       # %bb.0: # %entry
2879; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2880; CHECK-NEXT:    vluxei16.v v8, (a0), v8
2881; CHECK-NEXT:    ret
2882entry:
2883  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i16(
2884    <vscale x 2 x half> undef,
2885    ptr %0,
2886    <vscale x 2 x i16> %1,
2887    iXLen %2)
2888
2889  ret <vscale x 2 x half> %a
2890}
2891
2892declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16(
2893  <vscale x 2 x half>,
2894  ptr,
2895  <vscale x 2 x i16>,
2896  <vscale x 2 x i1>,
2897  iXLen,
2898  iXLen);
2899
2900define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2901; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
2902; CHECK:       # %bb.0: # %entry
2903; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
2904; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
2905; CHECK-NEXT:    ret
2906entry:
2907  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16(
2908    <vscale x 2 x half> %0,
2909    ptr %1,
2910    <vscale x 2 x i16> %2,
2911    <vscale x 2 x i1> %3,
2912    iXLen %4, iXLen 1)
2913
2914  ret <vscale x 2 x half> %a
2915}
2916
2917declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i16(
2918  <vscale x 4 x half>,
2919  ptr,
2920  <vscale x 4 x i16>,
2921  iXLen);
2922
2923define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
2924; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16:
2925; CHECK:       # %bb.0: # %entry
2926; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2927; CHECK-NEXT:    vluxei16.v v8, (a0), v8
2928; CHECK-NEXT:    ret
2929entry:
2930  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i16(
2931    <vscale x 4 x half> undef,
2932    ptr %0,
2933    <vscale x 4 x i16> %1,
2934    iXLen %2)
2935
2936  ret <vscale x 4 x half> %a
2937}
2938
2939declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16(
2940  <vscale x 4 x half>,
2941  ptr,
2942  <vscale x 4 x i16>,
2943  <vscale x 4 x i1>,
2944  iXLen,
2945  iXLen);
2946
2947define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2948; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
2949; CHECK:       # %bb.0: # %entry
2950; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
2951; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
2952; CHECK-NEXT:    ret
2953entry:
2954  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16(
2955    <vscale x 4 x half> %0,
2956    ptr %1,
2957    <vscale x 4 x i16> %2,
2958    <vscale x 4 x i1> %3,
2959    iXLen %4, iXLen 1)
2960
2961  ret <vscale x 4 x half> %a
2962}
2963
2964declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i16(
2965  <vscale x 8 x half>,
2966  ptr,
2967  <vscale x 8 x i16>,
2968  iXLen);
2969
2970define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
2971; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16:
2972; CHECK:       # %bb.0: # %entry
2973; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
2974; CHECK-NEXT:    vluxei16.v v8, (a0), v8
2975; CHECK-NEXT:    ret
2976entry:
2977  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i16(
2978    <vscale x 8 x half> undef,
2979    ptr %0,
2980    <vscale x 8 x i16> %1,
2981    iXLen %2)
2982
2983  ret <vscale x 8 x half> %a
2984}
2985
2986declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16(
2987  <vscale x 8 x half>,
2988  ptr,
2989  <vscale x 8 x i16>,
2990  <vscale x 8 x i1>,
2991  iXLen,
2992  iXLen);
2993
2994define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2995; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
2996; CHECK:       # %bb.0: # %entry
2997; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
2998; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
2999; CHECK-NEXT:    ret
3000entry:
3001  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16(
3002    <vscale x 8 x half> %0,
3003    ptr %1,
3004    <vscale x 8 x i16> %2,
3005    <vscale x 8 x i1> %3,
3006    iXLen %4, iXLen 1)
3007
3008  ret <vscale x 8 x half> %a
3009}
3010
3011declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i16(
3012  <vscale x 16 x half>,
3013  ptr,
3014  <vscale x 16 x i16>,
3015  iXLen);
3016
3017define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
3018; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16:
3019; CHECK:       # %bb.0: # %entry
3020; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
3021; CHECK-NEXT:    vluxei16.v v8, (a0), v8
3022; CHECK-NEXT:    ret
3023entry:
3024  %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i16(
3025    <vscale x 16 x half> undef,
3026    ptr %0,
3027    <vscale x 16 x i16> %1,
3028    iXLen %2)
3029
3030  ret <vscale x 16 x half> %a
3031}
3032
3033declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16(
3034  <vscale x 16 x half>,
3035  ptr,
3036  <vscale x 16 x i16>,
3037  <vscale x 16 x i1>,
3038  iXLen,
3039  iXLen);
3040
3041define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
3042; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
3043; CHECK:       # %bb.0: # %entry
3044; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
3045; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
3046; CHECK-NEXT:    ret
3047entry:
3048  %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16(
3049    <vscale x 16 x half> %0,
3050    ptr %1,
3051    <vscale x 16 x i16> %2,
3052    <vscale x 16 x i1> %3,
3053    iXLen %4, iXLen 1)
3054
3055  ret <vscale x 16 x half> %a
3056}
3057
3058declare <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i16(
3059  <vscale x 32 x half>,
3060  ptr,
3061  <vscale x 32 x i16>,
3062  iXLen);
3063
3064define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
3065; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16:
3066; CHECK:       # %bb.0: # %entry
3067; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
3068; CHECK-NEXT:    vluxei16.v v8, (a0), v8
3069; CHECK-NEXT:    ret
3070entry:
3071  %a = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i16(
3072    <vscale x 32 x half> undef,
3073    ptr %0,
3074    <vscale x 32 x i16> %1,
3075    iXLen %2)
3076
3077  ret <vscale x 32 x half> %a
3078}
3079
3080declare <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16(
3081  <vscale x 32 x half>,
3082  ptr,
3083  <vscale x 32 x i16>,
3084  <vscale x 32 x i1>,
3085  iXLen,
3086  iXLen);
3087
3088define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
3089; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
3090; CHECK:       # %bb.0: # %entry
3091; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
3092; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
3093; CHECK-NEXT:    ret
3094entry:
3095  %a = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16(
3096    <vscale x 32 x half> %0,
3097    ptr %1,
3098    <vscale x 32 x i16> %2,
3099    <vscale x 32 x i1> %3,
3100    iXLen %4, iXLen 1)
3101
3102  ret <vscale x 32 x half> %a
3103}
3104
3105declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16(
3106  <vscale x 1 x float>,
3107  ptr,
3108  <vscale x 1 x i16>,
3109  iXLen);
3110
3111define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
3112; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16:
3113; CHECK:       # %bb.0: # %entry
3114; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
3115; CHECK-NEXT:    vluxei16.v v9, (a0), v8
3116; CHECK-NEXT:    vmv1r.v v8, v9
3117; CHECK-NEXT:    ret
3118entry:
3119  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16(
3120    <vscale x 1 x float> undef,
3121    ptr %0,
3122    <vscale x 1 x i16> %1,
3123    iXLen %2)
3124
3125  ret <vscale x 1 x float> %a
3126}
3127
3128declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16(
3129  <vscale x 1 x float>,
3130  ptr,
3131  <vscale x 1 x i16>,
3132  <vscale x 1 x i1>,
3133  iXLen,
3134  iXLen);
3135
3136define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3137; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
3138; CHECK:       # %bb.0: # %entry
3139; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
3140; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
3141; CHECK-NEXT:    ret
3142entry:
3143  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16(
3144    <vscale x 1 x float> %0,
3145    ptr %1,
3146    <vscale x 1 x i16> %2,
3147    <vscale x 1 x i1> %3,
3148    iXLen %4, iXLen 1)
3149
3150  ret <vscale x 1 x float> %a
3151}
3152
3153declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16(
3154  <vscale x 2 x float>,
3155  ptr,
3156  <vscale x 2 x i16>,
3157  iXLen);
3158
3159define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
3160; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16:
3161; CHECK:       # %bb.0: # %entry
3162; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
3163; CHECK-NEXT:    vluxei16.v v9, (a0), v8
3164; CHECK-NEXT:    vmv.v.v v8, v9
3165; CHECK-NEXT:    ret
3166entry:
3167  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16(
3168    <vscale x 2 x float> undef,
3169    ptr %0,
3170    <vscale x 2 x i16> %1,
3171    iXLen %2)
3172
3173  ret <vscale x 2 x float> %a
3174}
3175
3176declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16(
3177  <vscale x 2 x float>,
3178  ptr,
3179  <vscale x 2 x i16>,
3180  <vscale x 2 x i1>,
3181  iXLen,
3182  iXLen);
3183
3184define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3185; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
3186; CHECK:       # %bb.0: # %entry
3187; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
3188; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
3189; CHECK-NEXT:    ret
3190entry:
3191  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16(
3192    <vscale x 2 x float> %0,
3193    ptr %1,
3194    <vscale x 2 x i16> %2,
3195    <vscale x 2 x i1> %3,
3196    iXLen %4, iXLen 1)
3197
3198  ret <vscale x 2 x float> %a
3199}
3200
3201declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16(
3202  <vscale x 4 x float>,
3203  ptr,
3204  <vscale x 4 x i16>,
3205  iXLen);
3206
3207define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
3208; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16:
3209; CHECK:       # %bb.0: # %entry
3210; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
3211; CHECK-NEXT:    vluxei16.v v10, (a0), v8
3212; CHECK-NEXT:    vmv.v.v v8, v10
3213; CHECK-NEXT:    ret
3214entry:
3215  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16(
3216    <vscale x 4 x float> undef,
3217    ptr %0,
3218    <vscale x 4 x i16> %1,
3219    iXLen %2)
3220
3221  ret <vscale x 4 x float> %a
3222}
3223
3224declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16(
3225  <vscale x 4 x float>,
3226  ptr,
3227  <vscale x 4 x i16>,
3228  <vscale x 4 x i1>,
3229  iXLen,
3230  iXLen);
3231
3232define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3233; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
3234; CHECK:       # %bb.0: # %entry
3235; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
3236; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
3237; CHECK-NEXT:    ret
3238entry:
3239  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16(
3240    <vscale x 4 x float> %0,
3241    ptr %1,
3242    <vscale x 4 x i16> %2,
3243    <vscale x 4 x i1> %3,
3244    iXLen %4, iXLen 1)
3245
3246  ret <vscale x 4 x float> %a
3247}
3248
3249declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16(
3250  <vscale x 8 x float>,
3251  ptr,
3252  <vscale x 8 x i16>,
3253  iXLen);
3254
3255define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
3256; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16:
3257; CHECK:       # %bb.0: # %entry
3258; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
3259; CHECK-NEXT:    vluxei16.v v12, (a0), v8
3260; CHECK-NEXT:    vmv.v.v v8, v12
3261; CHECK-NEXT:    ret
3262entry:
3263  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16(
3264    <vscale x 8 x float> undef,
3265    ptr %0,
3266    <vscale x 8 x i16> %1,
3267    iXLen %2)
3268
3269  ret <vscale x 8 x float> %a
3270}
3271
3272declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16(
3273  <vscale x 8 x float>,
3274  ptr,
3275  <vscale x 8 x i16>,
3276  <vscale x 8 x i1>,
3277  iXLen,
3278  iXLen);
3279
3280define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3281; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
3282; CHECK:       # %bb.0: # %entry
3283; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
3284; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
3285; CHECK-NEXT:    ret
3286entry:
3287  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16(
3288    <vscale x 8 x float> %0,
3289    ptr %1,
3290    <vscale x 8 x i16> %2,
3291    <vscale x 8 x i1> %3,
3292    iXLen %4, iXLen 1)
3293
3294  ret <vscale x 8 x float> %a
3295}
3296
3297declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16(
3298  <vscale x 16 x float>,
3299  ptr,
3300  <vscale x 16 x i16>,
3301  iXLen);
3302
3303define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
3304; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16:
3305; CHECK:       # %bb.0: # %entry
3306; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
3307; CHECK-NEXT:    vluxei16.v v16, (a0), v8
3308; CHECK-NEXT:    vmv.v.v v8, v16
3309; CHECK-NEXT:    ret
3310entry:
3311  %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16(
3312    <vscale x 16 x float> undef,
3313    ptr %0,
3314    <vscale x 16 x i16> %1,
3315    iXLen %2)
3316
3317  ret <vscale x 16 x float> %a
3318}
3319
3320declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16(
3321  <vscale x 16 x float>,
3322  ptr,
3323  <vscale x 16 x i16>,
3324  <vscale x 16 x i1>,
3325  iXLen,
3326  iXLen);
3327
3328define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
3329; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
3330; CHECK:       # %bb.0: # %entry
3331; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
3332; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
3333; CHECK-NEXT:    ret
3334entry:
3335  %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16(
3336    <vscale x 16 x float> %0,
3337    ptr %1,
3338    <vscale x 16 x i16> %2,
3339    <vscale x 16 x i1> %3,
3340    iXLen %4, iXLen 1)
3341
3342  ret <vscale x 16 x float> %a
3343}
3344
3345declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16(
3346  <vscale x 1 x double>,
3347  ptr,
3348  <vscale x 1 x i16>,
3349  iXLen);
3350
3351define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
3352; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16:
3353; CHECK:       # %bb.0: # %entry
3354; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
3355; CHECK-NEXT:    vluxei16.v v9, (a0), v8
3356; CHECK-NEXT:    vmv.v.v v8, v9
3357; CHECK-NEXT:    ret
3358entry:
3359  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16(
3360    <vscale x 1 x double> undef,
3361    ptr %0,
3362    <vscale x 1 x i16> %1,
3363    iXLen %2)
3364
3365  ret <vscale x 1 x double> %a
3366}
3367
3368declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16(
3369  <vscale x 1 x double>,
3370  ptr,
3371  <vscale x 1 x i16>,
3372  <vscale x 1 x i1>,
3373  iXLen,
3374  iXLen);
3375
3376define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3377; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
3378; CHECK:       # %bb.0: # %entry
3379; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
3380; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
3381; CHECK-NEXT:    ret
3382entry:
3383  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16(
3384    <vscale x 1 x double> %0,
3385    ptr %1,
3386    <vscale x 1 x i16> %2,
3387    <vscale x 1 x i1> %3,
3388    iXLen %4, iXLen 1)
3389
3390  ret <vscale x 1 x double> %a
3391}
3392
3393declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16(
3394  <vscale x 2 x double>,
3395  ptr,
3396  <vscale x 2 x i16>,
3397  iXLen);
3398
3399define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
3400; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16:
3401; CHECK:       # %bb.0: # %entry
3402; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
3403; CHECK-NEXT:    vluxei16.v v10, (a0), v8
3404; CHECK-NEXT:    vmv.v.v v8, v10
3405; CHECK-NEXT:    ret
3406entry:
3407  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16(
3408    <vscale x 2 x double> undef,
3409    ptr %0,
3410    <vscale x 2 x i16> %1,
3411    iXLen %2)
3412
3413  ret <vscale x 2 x double> %a
3414}
3415
3416declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16(
3417  <vscale x 2 x double>,
3418  ptr,
3419  <vscale x 2 x i16>,
3420  <vscale x 2 x i1>,
3421  iXLen,
3422  iXLen);
3423
3424define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3425; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
3426; CHECK:       # %bb.0: # %entry
3427; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
3428; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
3429; CHECK-NEXT:    ret
3430entry:
3431  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16(
3432    <vscale x 2 x double> %0,
3433    ptr %1,
3434    <vscale x 2 x i16> %2,
3435    <vscale x 2 x i1> %3,
3436    iXLen %4, iXLen 1)
3437
3438  ret <vscale x 2 x double> %a
3439}
3440
3441declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16(
3442  <vscale x 4 x double>,
3443  ptr,
3444  <vscale x 4 x i16>,
3445  iXLen);
3446
3447define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
3448; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16:
3449; CHECK:       # %bb.0: # %entry
3450; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
3451; CHECK-NEXT:    vluxei16.v v12, (a0), v8
3452; CHECK-NEXT:    vmv.v.v v8, v12
3453; CHECK-NEXT:    ret
3454entry:
3455  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16(
3456    <vscale x 4 x double> undef,
3457    ptr %0,
3458    <vscale x 4 x i16> %1,
3459    iXLen %2)
3460
3461  ret <vscale x 4 x double> %a
3462}
3463
3464declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16(
3465  <vscale x 4 x double>,
3466  ptr,
3467  <vscale x 4 x i16>,
3468  <vscale x 4 x i1>,
3469  iXLen,
3470  iXLen);
3471
3472define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3473; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
3474; CHECK:       # %bb.0: # %entry
3475; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
3476; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
3477; CHECK-NEXT:    ret
3478entry:
3479  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16(
3480    <vscale x 4 x double> %0,
3481    ptr %1,
3482    <vscale x 4 x i16> %2,
3483    <vscale x 4 x i1> %3,
3484    iXLen %4, iXLen 1)
3485
3486  ret <vscale x 4 x double> %a
3487}
3488
3489declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16(
3490  <vscale x 8 x double>,
3491  ptr,
3492  <vscale x 8 x i16>,
3493  iXLen);
3494
3495define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
3496; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16:
3497; CHECK:       # %bb.0: # %entry
3498; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
3499; CHECK-NEXT:    vluxei16.v v16, (a0), v8
3500; CHECK-NEXT:    vmv.v.v v8, v16
3501; CHECK-NEXT:    ret
3502entry:
3503  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16(
3504    <vscale x 8 x double> undef,
3505    ptr %0,
3506    <vscale x 8 x i16> %1,
3507    iXLen %2)
3508
3509  ret <vscale x 8 x double> %a
3510}
3511
3512declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16(
3513  <vscale x 8 x double>,
3514  ptr,
3515  <vscale x 8 x i16>,
3516  <vscale x 8 x i1>,
3517  iXLen,
3518  iXLen);
3519
3520define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3521; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
3522; CHECK:       # %bb.0: # %entry
3523; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
3524; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
3525; CHECK-NEXT:    ret
3526entry:
3527  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16(
3528    <vscale x 8 x double> %0,
3529    ptr %1,
3530    <vscale x 8 x i16> %2,
3531    <vscale x 8 x i1> %3,
3532    iXLen %4, iXLen 1)
3533
3534  ret <vscale x 8 x double> %a
3535}
3536
3537declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8(
3538  <vscale x 1 x i8>,
3539  ptr,
3540  <vscale x 1 x i8>,
3541  iXLen);
3542
3543define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
3544; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8:
3545; CHECK:       # %bb.0: # %entry
3546; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
3547; CHECK-NEXT:    vluxei8.v v8, (a0), v8
3548; CHECK-NEXT:    ret
3549entry:
3550  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8(
3551    <vscale x 1 x i8> undef,
3552    ptr %0,
3553    <vscale x 1 x i8> %1,
3554    iXLen %2)
3555
3556  ret <vscale x 1 x i8> %a
3557}
3558
3559declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8(
3560  <vscale x 1 x i8>,
3561  ptr,
3562  <vscale x 1 x i8>,
3563  <vscale x 1 x i1>,
3564  iXLen,
3565  iXLen);
3566
3567define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3568; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
3569; CHECK:       # %bb.0: # %entry
3570; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
3571; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
3572; CHECK-NEXT:    ret
3573entry:
3574  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8(
3575    <vscale x 1 x i8> %0,
3576    ptr %1,
3577    <vscale x 1 x i8> %2,
3578    <vscale x 1 x i1> %3,
3579    iXLen %4, iXLen 1)
3580
3581  ret <vscale x 1 x i8> %a
3582}
3583
3584declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8(
3585  <vscale x 2 x i8>,
3586  ptr,
3587  <vscale x 2 x i8>,
3588  iXLen);
3589
3590define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
3591; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8:
3592; CHECK:       # %bb.0: # %entry
3593; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
3594; CHECK-NEXT:    vluxei8.v v8, (a0), v8
3595; CHECK-NEXT:    ret
3596entry:
3597  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8(
3598    <vscale x 2 x i8> undef,
3599    ptr %0,
3600    <vscale x 2 x i8> %1,
3601    iXLen %2)
3602
3603  ret <vscale x 2 x i8> %a
3604}
3605
3606declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8(
3607  <vscale x 2 x i8>,
3608  ptr,
3609  <vscale x 2 x i8>,
3610  <vscale x 2 x i1>,
3611  iXLen,
3612  iXLen);
3613
3614define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3615; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
3616; CHECK:       # %bb.0: # %entry
3617; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
3618; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
3619; CHECK-NEXT:    ret
3620entry:
3621  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8(
3622    <vscale x 2 x i8> %0,
3623    ptr %1,
3624    <vscale x 2 x i8> %2,
3625    <vscale x 2 x i1> %3,
3626    iXLen %4, iXLen 1)
3627
3628  ret <vscale x 2 x i8> %a
3629}
3630
3631declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8(
3632  <vscale x 4 x i8>,
3633  ptr,
3634  <vscale x 4 x i8>,
3635  iXLen);
3636
3637define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
3638; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8:
3639; CHECK:       # %bb.0: # %entry
3640; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
3641; CHECK-NEXT:    vluxei8.v v8, (a0), v8
3642; CHECK-NEXT:    ret
3643entry:
3644  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8(
3645    <vscale x 4 x i8> undef,
3646    ptr %0,
3647    <vscale x 4 x i8> %1,
3648    iXLen %2)
3649
3650  ret <vscale x 4 x i8> %a
3651}
3652
3653declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8(
3654  <vscale x 4 x i8>,
3655  ptr,
3656  <vscale x 4 x i8>,
3657  <vscale x 4 x i1>,
3658  iXLen,
3659  iXLen);
3660
3661define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3662; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
3663; CHECK:       # %bb.0: # %entry
3664; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
3665; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
3666; CHECK-NEXT:    ret
3667entry:
3668  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8(
3669    <vscale x 4 x i8> %0,
3670    ptr %1,
3671    <vscale x 4 x i8> %2,
3672    <vscale x 4 x i1> %3,
3673    iXLen %4, iXLen 1)
3674
3675  ret <vscale x 4 x i8> %a
3676}
3677
3678declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8(
3679  <vscale x 8 x i8>,
3680  ptr,
3681  <vscale x 8 x i8>,
3682  iXLen);
3683
3684define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
3685; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8:
3686; CHECK:       # %bb.0: # %entry
3687; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
3688; CHECK-NEXT:    vluxei8.v v8, (a0), v8
3689; CHECK-NEXT:    ret
3690entry:
3691  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8(
3692    <vscale x 8 x i8> undef,
3693    ptr %0,
3694    <vscale x 8 x i8> %1,
3695    iXLen %2)
3696
3697  ret <vscale x 8 x i8> %a
3698}
3699
3700declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8(
3701  <vscale x 8 x i8>,
3702  ptr,
3703  <vscale x 8 x i8>,
3704  <vscale x 8 x i1>,
3705  iXLen,
3706  iXLen);
3707
3708define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3709; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
3710; CHECK:       # %bb.0: # %entry
3711; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
3712; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
3713; CHECK-NEXT:    ret
3714entry:
3715  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8(
3716    <vscale x 8 x i8> %0,
3717    ptr %1,
3718    <vscale x 8 x i8> %2,
3719    <vscale x 8 x i1> %3,
3720    iXLen %4, iXLen 1)
3721
3722  ret <vscale x 8 x i8> %a
3723}
3724
3725declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8(
3726  <vscale x 16 x i8>,
3727  ptr,
3728  <vscale x 16 x i8>,
3729  iXLen);
3730
3731define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
3732; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8:
3733; CHECK:       # %bb.0: # %entry
3734; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
3735; CHECK-NEXT:    vluxei8.v v8, (a0), v8
3736; CHECK-NEXT:    ret
3737entry:
3738  %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8(
3739    <vscale x 16 x i8> undef,
3740    ptr %0,
3741    <vscale x 16 x i8> %1,
3742    iXLen %2)
3743
3744  ret <vscale x 16 x i8> %a
3745}
3746
3747declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8(
3748  <vscale x 16 x i8>,
3749  ptr,
3750  <vscale x 16 x i8>,
3751  <vscale x 16 x i1>,
3752  iXLen,
3753  iXLen);
3754
3755define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
3756; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
3757; CHECK:       # %bb.0: # %entry
3758; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
3759; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
3760; CHECK-NEXT:    ret
3761entry:
3762  %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8(
3763    <vscale x 16 x i8> %0,
3764    ptr %1,
3765    <vscale x 16 x i8> %2,
3766    <vscale x 16 x i1> %3,
3767    iXLen %4, iXLen 1)
3768
3769  ret <vscale x 16 x i8> %a
3770}
3771
3772declare <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8(
3773  <vscale x 32 x i8>,
3774  ptr,
3775  <vscale x 32 x i8>,
3776  iXLen);
3777
3778define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
3779; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8:
3780; CHECK:       # %bb.0: # %entry
3781; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
3782; CHECK-NEXT:    vluxei8.v v8, (a0), v8
3783; CHECK-NEXT:    ret
3784entry:
3785  %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8(
3786    <vscale x 32 x i8> undef,
3787    ptr %0,
3788    <vscale x 32 x i8> %1,
3789    iXLen %2)
3790
3791  ret <vscale x 32 x i8> %a
3792}
3793
3794declare <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8(
3795  <vscale x 32 x i8>,
3796  ptr,
3797  <vscale x 32 x i8>,
3798  <vscale x 32 x i1>,
3799  iXLen,
3800  iXLen);
3801
3802define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
3803; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
3804; CHECK:       # %bb.0: # %entry
3805; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
3806; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
3807; CHECK-NEXT:    ret
3808entry:
3809  %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8(
3810    <vscale x 32 x i8> %0,
3811    ptr %1,
3812    <vscale x 32 x i8> %2,
3813    <vscale x 32 x i1> %3,
3814    iXLen %4, iXLen 1)
3815
3816  ret <vscale x 32 x i8> %a
3817}
3818
3819declare <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8(
3820  <vscale x 64 x i8>,
3821  ptr,
3822  <vscale x 64 x i8>,
3823  iXLen);
3824
3825define <vscale x 64 x i8> @intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8(ptr %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
3826; CHECK-LABEL: intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8:
3827; CHECK:       # %bb.0: # %entry
3828; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
3829; CHECK-NEXT:    vluxei8.v v8, (a0), v8
3830; CHECK-NEXT:    ret
3831entry:
3832  %a = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8(
3833    <vscale x 64 x i8> undef,
3834    ptr %0,
3835    <vscale x 64 x i8> %1,
3836    iXLen %2)
3837
3838  ret <vscale x 64 x i8> %a
3839}
3840
3841declare <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8(
3842  <vscale x 64 x i8>,
3843  ptr,
3844  <vscale x 64 x i8>,
3845  <vscale x 64 x i1>,
3846  iXLen,
3847  iXLen);
3848
3849define <vscale x 64 x i8> @intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
3850; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
3851; CHECK:       # %bb.0: # %entry
3852; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
3853; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
3854; CHECK-NEXT:    ret
3855entry:
3856  %a = call <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8(
3857    <vscale x 64 x i8> %0,
3858    ptr %1,
3859    <vscale x 64 x i8> %2,
3860    <vscale x 64 x i1> %3,
3861    iXLen %4, iXLen 1)
3862
3863  ret <vscale x 64 x i8> %a
3864}
3865
3866declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8(
3867  <vscale x 1 x i16>,
3868  ptr,
3869  <vscale x 1 x i8>,
3870  iXLen);
3871
3872define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
3873; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8:
3874; CHECK:       # %bb.0: # %entry
3875; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
3876; CHECK-NEXT:    vluxei8.v v9, (a0), v8
3877; CHECK-NEXT:    vmv1r.v v8, v9
3878; CHECK-NEXT:    ret
3879entry:
3880  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8(
3881    <vscale x 1 x i16> undef,
3882    ptr %0,
3883    <vscale x 1 x i8> %1,
3884    iXLen %2)
3885
3886  ret <vscale x 1 x i16> %a
3887}
3888
3889declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8(
3890  <vscale x 1 x i16>,
3891  ptr,
3892  <vscale x 1 x i8>,
3893  <vscale x 1 x i1>,
3894  iXLen,
3895  iXLen);
3896
3897define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3898; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
3899; CHECK:       # %bb.0: # %entry
3900; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
3901; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
3902; CHECK-NEXT:    ret
3903entry:
3904  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8(
3905    <vscale x 1 x i16> %0,
3906    ptr %1,
3907    <vscale x 1 x i8> %2,
3908    <vscale x 1 x i1> %3,
3909    iXLen %4, iXLen 1)
3910
3911  ret <vscale x 1 x i16> %a
3912}
3913
3914declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8(
3915  <vscale x 2 x i16>,
3916  ptr,
3917  <vscale x 2 x i8>,
3918  iXLen);
3919
3920define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
3921; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8:
3922; CHECK:       # %bb.0: # %entry
3923; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
3924; CHECK-NEXT:    vluxei8.v v9, (a0), v8
3925; CHECK-NEXT:    vmv1r.v v8, v9
3926; CHECK-NEXT:    ret
3927entry:
3928  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8(
3929    <vscale x 2 x i16> undef,
3930    ptr %0,
3931    <vscale x 2 x i8> %1,
3932    iXLen %2)
3933
3934  ret <vscale x 2 x i16> %a
3935}
3936
3937declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8(
3938  <vscale x 2 x i16>,
3939  ptr,
3940  <vscale x 2 x i8>,
3941  <vscale x 2 x i1>,
3942  iXLen,
3943  iXLen);
3944
3945define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3946; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
3947; CHECK:       # %bb.0: # %entry
3948; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
3949; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
3950; CHECK-NEXT:    ret
3951entry:
3952  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8(
3953    <vscale x 2 x i16> %0,
3954    ptr %1,
3955    <vscale x 2 x i8> %2,
3956    <vscale x 2 x i1> %3,
3957    iXLen %4, iXLen 1)
3958
3959  ret <vscale x 2 x i16> %a
3960}
3961
3962declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8(
3963  <vscale x 4 x i16>,
3964  ptr,
3965  <vscale x 4 x i8>,
3966  iXLen);
3967
3968define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
3969; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8:
3970; CHECK:       # %bb.0: # %entry
3971; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
3972; CHECK-NEXT:    vluxei8.v v9, (a0), v8
3973; CHECK-NEXT:    vmv.v.v v8, v9
3974; CHECK-NEXT:    ret
3975entry:
3976  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8(
3977    <vscale x 4 x i16> undef,
3978    ptr %0,
3979    <vscale x 4 x i8> %1,
3980    iXLen %2)
3981
3982  ret <vscale x 4 x i16> %a
3983}
3984
3985declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8(
3986  <vscale x 4 x i16>,
3987  ptr,
3988  <vscale x 4 x i8>,
3989  <vscale x 4 x i1>,
3990  iXLen,
3991  iXLen);
3992
3993define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3994; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
3995; CHECK:       # %bb.0: # %entry
3996; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
3997; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
3998; CHECK-NEXT:    ret
3999entry:
4000  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8(
4001    <vscale x 4 x i16> %0,
4002    ptr %1,
4003    <vscale x 4 x i8> %2,
4004    <vscale x 4 x i1> %3,
4005    iXLen %4, iXLen 1)
4006
4007  ret <vscale x 4 x i16> %a
4008}
4009
4010declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8(
4011  <vscale x 8 x i16>,
4012  ptr,
4013  <vscale x 8 x i8>,
4014  iXLen);
4015
4016define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
4017; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8:
4018; CHECK:       # %bb.0: # %entry
4019; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
4020; CHECK-NEXT:    vluxei8.v v10, (a0), v8
4021; CHECK-NEXT:    vmv.v.v v8, v10
4022; CHECK-NEXT:    ret
4023entry:
4024  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8(
4025    <vscale x 8 x i16> undef,
4026    ptr %0,
4027    <vscale x 8 x i8> %1,
4028    iXLen %2)
4029
4030  ret <vscale x 8 x i16> %a
4031}
4032
4033declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8(
4034  <vscale x 8 x i16>,
4035  ptr,
4036  <vscale x 8 x i8>,
4037  <vscale x 8 x i1>,
4038  iXLen,
4039  iXLen);
4040
4041define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
4042; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
4043; CHECK:       # %bb.0: # %entry
4044; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
4045; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
4046; CHECK-NEXT:    ret
4047entry:
4048  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8(
4049    <vscale x 8 x i16> %0,
4050    ptr %1,
4051    <vscale x 8 x i8> %2,
4052    <vscale x 8 x i1> %3,
4053    iXLen %4, iXLen 1)
4054
4055  ret <vscale x 8 x i16> %a
4056}
4057
4058declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8(
4059  <vscale x 16 x i16>,
4060  ptr,
4061  <vscale x 16 x i8>,
4062  iXLen);
4063
4064define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
4065; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8:
4066; CHECK:       # %bb.0: # %entry
4067; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
4068; CHECK-NEXT:    vluxei8.v v12, (a0), v8
4069; CHECK-NEXT:    vmv.v.v v8, v12
4070; CHECK-NEXT:    ret
4071entry:
4072  %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8(
4073    <vscale x 16 x i16> undef,
4074    ptr %0,
4075    <vscale x 16 x i8> %1,
4076    iXLen %2)
4077
4078  ret <vscale x 16 x i16> %a
4079}
4080
4081declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8(
4082  <vscale x 16 x i16>,
4083  ptr,
4084  <vscale x 16 x i8>,
4085  <vscale x 16 x i1>,
4086  iXLen,
4087  iXLen);
4088
4089define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
4090; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
4091; CHECK:       # %bb.0: # %entry
4092; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
4093; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
4094; CHECK-NEXT:    ret
4095entry:
4096  %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8(
4097    <vscale x 16 x i16> %0,
4098    ptr %1,
4099    <vscale x 16 x i8> %2,
4100    <vscale x 16 x i1> %3,
4101    iXLen %4, iXLen 1)
4102
4103  ret <vscale x 16 x i16> %a
4104}
4105
4106declare <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8(
4107  <vscale x 32 x i16>,
4108  ptr,
4109  <vscale x 32 x i8>,
4110  iXLen);
4111
4112define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
4113; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8:
4114; CHECK:       # %bb.0: # %entry
4115; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
4116; CHECK-NEXT:    vluxei8.v v16, (a0), v8
4117; CHECK-NEXT:    vmv.v.v v8, v16
4118; CHECK-NEXT:    ret
4119entry:
4120  %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8(
4121    <vscale x 32 x i16> undef,
4122    ptr %0,
4123    <vscale x 32 x i8> %1,
4124    iXLen %2)
4125
4126  ret <vscale x 32 x i16> %a
4127}
4128
4129declare <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8(
4130  <vscale x 32 x i16>,
4131  ptr,
4132  <vscale x 32 x i8>,
4133  <vscale x 32 x i1>,
4134  iXLen,
4135  iXLen);
4136
4137define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
4138; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
4139; CHECK:       # %bb.0: # %entry
4140; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
4141; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
4142; CHECK-NEXT:    ret
4143entry:
4144  %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8(
4145    <vscale x 32 x i16> %0,
4146    ptr %1,
4147    <vscale x 32 x i8> %2,
4148    <vscale x 32 x i1> %3,
4149    iXLen %4, iXLen 1)
4150
4151  ret <vscale x 32 x i16> %a
4152}
4153
4154declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8(
4155  <vscale x 1 x i32>,
4156  ptr,
4157  <vscale x 1 x i8>,
4158  iXLen);
4159
4160define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
4161; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8:
4162; CHECK:       # %bb.0: # %entry
4163; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
4164; CHECK-NEXT:    vluxei8.v v9, (a0), v8
4165; CHECK-NEXT:    vmv1r.v v8, v9
4166; CHECK-NEXT:    ret
4167entry:
4168  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8(
4169    <vscale x 1 x i32> undef,
4170    ptr %0,
4171    <vscale x 1 x i8> %1,
4172    iXLen %2)
4173
4174  ret <vscale x 1 x i32> %a
4175}
4176
4177declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8(
4178  <vscale x 1 x i32>,
4179  ptr,
4180  <vscale x 1 x i8>,
4181  <vscale x 1 x i1>,
4182  iXLen,
4183  iXLen);
4184
4185define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4186; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
4187; CHECK:       # %bb.0: # %entry
4188; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
4189; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
4190; CHECK-NEXT:    ret
4191entry:
4192  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8(
4193    <vscale x 1 x i32> %0,
4194    ptr %1,
4195    <vscale x 1 x i8> %2,
4196    <vscale x 1 x i1> %3,
4197    iXLen %4, iXLen 1)
4198
4199  ret <vscale x 1 x i32> %a
4200}
4201
4202declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8(
4203  <vscale x 2 x i32>,
4204  ptr,
4205  <vscale x 2 x i8>,
4206  iXLen);
4207
4208define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
4209; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8:
4210; CHECK:       # %bb.0: # %entry
4211; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
4212; CHECK-NEXT:    vluxei8.v v9, (a0), v8
4213; CHECK-NEXT:    vmv.v.v v8, v9
4214; CHECK-NEXT:    ret
4215entry:
4216  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8(
4217    <vscale x 2 x i32> undef,
4218    ptr %0,
4219    <vscale x 2 x i8> %1,
4220    iXLen %2)
4221
4222  ret <vscale x 2 x i32> %a
4223}
4224
4225declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8(
4226  <vscale x 2 x i32>,
4227  ptr,
4228  <vscale x 2 x i8>,
4229  <vscale x 2 x i1>,
4230  iXLen,
4231  iXLen);
4232
4233define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4234; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
4235; CHECK:       # %bb.0: # %entry
4236; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
4237; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
4238; CHECK-NEXT:    ret
4239entry:
4240  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8(
4241    <vscale x 2 x i32> %0,
4242    ptr %1,
4243    <vscale x 2 x i8> %2,
4244    <vscale x 2 x i1> %3,
4245    iXLen %4, iXLen 1)
4246
4247  ret <vscale x 2 x i32> %a
4248}
4249
4250declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8(
4251  <vscale x 4 x i32>,
4252  ptr,
4253  <vscale x 4 x i8>,
4254  iXLen);
4255
4256define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
4257; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8:
4258; CHECK:       # %bb.0: # %entry
4259; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
4260; CHECK-NEXT:    vluxei8.v v10, (a0), v8
4261; CHECK-NEXT:    vmv.v.v v8, v10
4262; CHECK-NEXT:    ret
4263entry:
4264  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8(
4265    <vscale x 4 x i32> undef,
4266    ptr %0,
4267    <vscale x 4 x i8> %1,
4268    iXLen %2)
4269
4270  ret <vscale x 4 x i32> %a
4271}
4272
4273declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8(
4274  <vscale x 4 x i32>,
4275  ptr,
4276  <vscale x 4 x i8>,
4277  <vscale x 4 x i1>,
4278  iXLen,
4279  iXLen);
4280
4281define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
4282; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
4283; CHECK:       # %bb.0: # %entry
4284; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
4285; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
4286; CHECK-NEXT:    ret
4287entry:
4288  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8(
4289    <vscale x 4 x i32> %0,
4290    ptr %1,
4291    <vscale x 4 x i8> %2,
4292    <vscale x 4 x i1> %3,
4293    iXLen %4, iXLen 1)
4294
4295  ret <vscale x 4 x i32> %a
4296}
4297
4298declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8(
4299  <vscale x 8 x i32>,
4300  ptr,
4301  <vscale x 8 x i8>,
4302  iXLen);
4303
4304define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
4305; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8:
4306; CHECK:       # %bb.0: # %entry
4307; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
4308; CHECK-NEXT:    vluxei8.v v12, (a0), v8
4309; CHECK-NEXT:    vmv.v.v v8, v12
4310; CHECK-NEXT:    ret
4311entry:
4312  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8(
4313    <vscale x 8 x i32> undef,
4314    ptr %0,
4315    <vscale x 8 x i8> %1,
4316    iXLen %2)
4317
4318  ret <vscale x 8 x i32> %a
4319}
4320
4321declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8(
4322  <vscale x 8 x i32>,
4323  ptr,
4324  <vscale x 8 x i8>,
4325  <vscale x 8 x i1>,
4326  iXLen,
4327  iXLen);
4328
4329define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
4330; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
4331; CHECK:       # %bb.0: # %entry
4332; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
4333; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
4334; CHECK-NEXT:    ret
4335entry:
4336  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8(
4337    <vscale x 8 x i32> %0,
4338    ptr %1,
4339    <vscale x 8 x i8> %2,
4340    <vscale x 8 x i1> %3,
4341    iXLen %4, iXLen 1)
4342
4343  ret <vscale x 8 x i32> %a
4344}
4345
4346declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8(
4347  <vscale x 16 x i32>,
4348  ptr,
4349  <vscale x 16 x i8>,
4350  iXLen);
4351
4352define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
4353; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8:
4354; CHECK:       # %bb.0: # %entry
4355; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
4356; CHECK-NEXT:    vluxei8.v v16, (a0), v8
4357; CHECK-NEXT:    vmv.v.v v8, v16
4358; CHECK-NEXT:    ret
4359entry:
4360  %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8(
4361    <vscale x 16 x i32> undef,
4362    ptr %0,
4363    <vscale x 16 x i8> %1,
4364    iXLen %2)
4365
4366  ret <vscale x 16 x i32> %a
4367}
4368
4369declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8(
4370  <vscale x 16 x i32>,
4371  ptr,
4372  <vscale x 16 x i8>,
4373  <vscale x 16 x i1>,
4374  iXLen,
4375  iXLen);
4376
4377define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
4378; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
4379; CHECK:       # %bb.0: # %entry
4380; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
4381; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
4382; CHECK-NEXT:    ret
4383entry:
4384  %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8(
4385    <vscale x 16 x i32> %0,
4386    ptr %1,
4387    <vscale x 16 x i8> %2,
4388    <vscale x 16 x i1> %3,
4389    iXLen %4, iXLen 1)
4390
4391  ret <vscale x 16 x i32> %a
4392}
4393
4394declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8(
4395  <vscale x 1 x i64>,
4396  ptr,
4397  <vscale x 1 x i8>,
4398  iXLen);
4399
4400define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
4401; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8:
4402; CHECK:       # %bb.0: # %entry
4403; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
4404; CHECK-NEXT:    vluxei8.v v9, (a0), v8
4405; CHECK-NEXT:    vmv.v.v v8, v9
4406; CHECK-NEXT:    ret
4407entry:
4408  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8(
4409    <vscale x 1 x i64> undef,
4410    ptr %0,
4411    <vscale x 1 x i8> %1,
4412    iXLen %2)
4413
4414  ret <vscale x 1 x i64> %a
4415}
4416
4417declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8(
4418  <vscale x 1 x i64>,
4419  ptr,
4420  <vscale x 1 x i8>,
4421  <vscale x 1 x i1>,
4422  iXLen,
4423  iXLen);
4424
4425define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4426; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
4427; CHECK:       # %bb.0: # %entry
4428; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
4429; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
4430; CHECK-NEXT:    ret
4431entry:
4432  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8(
4433    <vscale x 1 x i64> %0,
4434    ptr %1,
4435    <vscale x 1 x i8> %2,
4436    <vscale x 1 x i1> %3,
4437    iXLen %4, iXLen 1)
4438
4439  ret <vscale x 1 x i64> %a
4440}
4441
4442declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8(
4443  <vscale x 2 x i64>,
4444  ptr,
4445  <vscale x 2 x i8>,
4446  iXLen);
4447
4448define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
4449; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8:
4450; CHECK:       # %bb.0: # %entry
4451; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
4452; CHECK-NEXT:    vluxei8.v v10, (a0), v8
4453; CHECK-NEXT:    vmv.v.v v8, v10
4454; CHECK-NEXT:    ret
4455entry:
4456  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8(
4457    <vscale x 2 x i64> undef,
4458    ptr %0,
4459    <vscale x 2 x i8> %1,
4460    iXLen %2)
4461
4462  ret <vscale x 2 x i64> %a
4463}
4464
4465declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8(
4466  <vscale x 2 x i64>,
4467  ptr,
4468  <vscale x 2 x i8>,
4469  <vscale x 2 x i1>,
4470  iXLen,
4471  iXLen);
4472
4473define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4474; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
4475; CHECK:       # %bb.0: # %entry
4476; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
4477; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
4478; CHECK-NEXT:    ret
4479entry:
4480  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8(
4481    <vscale x 2 x i64> %0,
4482    ptr %1,
4483    <vscale x 2 x i8> %2,
4484    <vscale x 2 x i1> %3,
4485    iXLen %4, iXLen 1)
4486
4487  ret <vscale x 2 x i64> %a
4488}
4489
4490declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8(
4491  <vscale x 4 x i64>,
4492  ptr,
4493  <vscale x 4 x i8>,
4494  iXLen);
4495
4496define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
4497; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8:
4498; CHECK:       # %bb.0: # %entry
4499; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
4500; CHECK-NEXT:    vluxei8.v v12, (a0), v8
4501; CHECK-NEXT:    vmv.v.v v8, v12
4502; CHECK-NEXT:    ret
4503entry:
4504  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8(
4505    <vscale x 4 x i64> undef,
4506    ptr %0,
4507    <vscale x 4 x i8> %1,
4508    iXLen %2)
4509
4510  ret <vscale x 4 x i64> %a
4511}
4512
4513declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8(
4514  <vscale x 4 x i64>,
4515  ptr,
4516  <vscale x 4 x i8>,
4517  <vscale x 4 x i1>,
4518  iXLen,
4519  iXLen);
4520
4521define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
4522; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
4523; CHECK:       # %bb.0: # %entry
4524; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
4525; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
4526; CHECK-NEXT:    ret
4527entry:
4528  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8(
4529    <vscale x 4 x i64> %0,
4530    ptr %1,
4531    <vscale x 4 x i8> %2,
4532    <vscale x 4 x i1> %3,
4533    iXLen %4, iXLen 1)
4534
4535  ret <vscale x 4 x i64> %a
4536}
4537
4538declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8(
4539  <vscale x 8 x i64>,
4540  ptr,
4541  <vscale x 8 x i8>,
4542  iXLen);
4543
4544define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
4545; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8:
4546; CHECK:       # %bb.0: # %entry
4547; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
4548; CHECK-NEXT:    vluxei8.v v16, (a0), v8
4549; CHECK-NEXT:    vmv.v.v v8, v16
4550; CHECK-NEXT:    ret
4551entry:
4552  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8(
4553    <vscale x 8 x i64> undef,
4554    ptr %0,
4555    <vscale x 8 x i8> %1,
4556    iXLen %2)
4557
4558  ret <vscale x 8 x i64> %a
4559}
4560
4561declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8(
4562  <vscale x 8 x i64>,
4563  ptr,
4564  <vscale x 8 x i8>,
4565  <vscale x 8 x i1>,
4566  iXLen,
4567  iXLen);
4568
4569define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
4570; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
4571; CHECK:       # %bb.0: # %entry
4572; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
4573; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
4574; CHECK-NEXT:    ret
4575entry:
4576  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8(
4577    <vscale x 8 x i64> %0,
4578    ptr %1,
4579    <vscale x 8 x i8> %2,
4580    <vscale x 8 x i1> %3,
4581    iXLen %4, iXLen 1)
4582
4583  ret <vscale x 8 x i64> %a
4584}
4585
4586declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i8(
4587  <vscale x 1 x half>,
4588  ptr,
4589  <vscale x 1 x i8>,
4590  iXLen);
4591
4592define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
4593; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8:
4594; CHECK:       # %bb.0: # %entry
4595; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
4596; CHECK-NEXT:    vluxei8.v v9, (a0), v8
4597; CHECK-NEXT:    vmv1r.v v8, v9
4598; CHECK-NEXT:    ret
4599entry:
4600  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i8(
4601    <vscale x 1 x half> undef,
4602    ptr %0,
4603    <vscale x 1 x i8> %1,
4604    iXLen %2)
4605
4606  ret <vscale x 1 x half> %a
4607}
4608
4609declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8(
4610  <vscale x 1 x half>,
4611  ptr,
4612  <vscale x 1 x i8>,
4613  <vscale x 1 x i1>,
4614  iXLen,
4615  iXLen);
4616
4617define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4618; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
4619; CHECK:       # %bb.0: # %entry
4620; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
4621; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
4622; CHECK-NEXT:    ret
4623entry:
4624  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8(
4625    <vscale x 1 x half> %0,
4626    ptr %1,
4627    <vscale x 1 x i8> %2,
4628    <vscale x 1 x i1> %3,
4629    iXLen %4, iXLen 1)
4630
4631  ret <vscale x 1 x half> %a
4632}
4633
4634declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i8(
4635  <vscale x 2 x half>,
4636  ptr,
4637  <vscale x 2 x i8>,
4638  iXLen);
4639
4640define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
4641; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8:
4642; CHECK:       # %bb.0: # %entry
4643; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
4644; CHECK-NEXT:    vluxei8.v v9, (a0), v8
4645; CHECK-NEXT:    vmv1r.v v8, v9
4646; CHECK-NEXT:    ret
4647entry:
4648  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i8(
4649    <vscale x 2 x half> undef,
4650    ptr %0,
4651    <vscale x 2 x i8> %1,
4652    iXLen %2)
4653
4654  ret <vscale x 2 x half> %a
4655}
4656
4657declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8(
4658  <vscale x 2 x half>,
4659  ptr,
4660  <vscale x 2 x i8>,
4661  <vscale x 2 x i1>,
4662  iXLen,
4663  iXLen);
4664
4665define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4666; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
4667; CHECK:       # %bb.0: # %entry
4668; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
4669; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
4670; CHECK-NEXT:    ret
4671entry:
4672  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8(
4673    <vscale x 2 x half> %0,
4674    ptr %1,
4675    <vscale x 2 x i8> %2,
4676    <vscale x 2 x i1> %3,
4677    iXLen %4, iXLen 1)
4678
4679  ret <vscale x 2 x half> %a
4680}
4681
4682declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i8(
4683  <vscale x 4 x half>,
4684  ptr,
4685  <vscale x 4 x i8>,
4686  iXLen);
4687
4688define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
4689; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8:
4690; CHECK:       # %bb.0: # %entry
4691; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
4692; CHECK-NEXT:    vluxei8.v v9, (a0), v8
4693; CHECK-NEXT:    vmv.v.v v8, v9
4694; CHECK-NEXT:    ret
4695entry:
4696  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i8(
4697    <vscale x 4 x half> undef,
4698    ptr %0,
4699    <vscale x 4 x i8> %1,
4700    iXLen %2)
4701
4702  ret <vscale x 4 x half> %a
4703}
4704
4705declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8(
4706  <vscale x 4 x half>,
4707  ptr,
4708  <vscale x 4 x i8>,
4709  <vscale x 4 x i1>,
4710  iXLen,
4711  iXLen);
4712
4713define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
4714; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
4715; CHECK:       # %bb.0: # %entry
4716; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
4717; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
4718; CHECK-NEXT:    ret
4719entry:
4720  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8(
4721    <vscale x 4 x half> %0,
4722    ptr %1,
4723    <vscale x 4 x i8> %2,
4724    <vscale x 4 x i1> %3,
4725    iXLen %4, iXLen 1)
4726
4727  ret <vscale x 4 x half> %a
4728}
4729
4730declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i8(
4731  <vscale x 8 x half>,
4732  ptr,
4733  <vscale x 8 x i8>,
4734  iXLen);
4735
4736define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
4737; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8:
4738; CHECK:       # %bb.0: # %entry
4739; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
4740; CHECK-NEXT:    vluxei8.v v10, (a0), v8
4741; CHECK-NEXT:    vmv.v.v v8, v10
4742; CHECK-NEXT:    ret
4743entry:
4744  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i8(
4745    <vscale x 8 x half> undef,
4746    ptr %0,
4747    <vscale x 8 x i8> %1,
4748    iXLen %2)
4749
4750  ret <vscale x 8 x half> %a
4751}
4752
4753declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8(
4754  <vscale x 8 x half>,
4755  ptr,
4756  <vscale x 8 x i8>,
4757  <vscale x 8 x i1>,
4758  iXLen,
4759  iXLen);
4760
4761define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
4762; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
4763; CHECK:       # %bb.0: # %entry
4764; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
4765; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
4766; CHECK-NEXT:    ret
4767entry:
4768  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8(
4769    <vscale x 8 x half> %0,
4770    ptr %1,
4771    <vscale x 8 x i8> %2,
4772    <vscale x 8 x i1> %3,
4773    iXLen %4, iXLen 1)
4774
4775  ret <vscale x 8 x half> %a
4776}
4777
4778declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i8(
4779  <vscale x 16 x half>,
4780  ptr,
4781  <vscale x 16 x i8>,
4782  iXLen);
4783
4784define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
4785; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8:
4786; CHECK:       # %bb.0: # %entry
4787; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
4788; CHECK-NEXT:    vluxei8.v v12, (a0), v8
4789; CHECK-NEXT:    vmv.v.v v8, v12
4790; CHECK-NEXT:    ret
4791entry:
4792  %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i8(
4793    <vscale x 16 x half> undef,
4794    ptr %0,
4795    <vscale x 16 x i8> %1,
4796    iXLen %2)
4797
4798  ret <vscale x 16 x half> %a
4799}
4800
4801declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8(
4802  <vscale x 16 x half>,
4803  ptr,
4804  <vscale x 16 x i8>,
4805  <vscale x 16 x i1>,
4806  iXLen,
4807  iXLen);
4808
4809define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
4810; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
4811; CHECK:       # %bb.0: # %entry
4812; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
4813; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
4814; CHECK-NEXT:    ret
4815entry:
4816  %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8(
4817    <vscale x 16 x half> %0,
4818    ptr %1,
4819    <vscale x 16 x i8> %2,
4820    <vscale x 16 x i1> %3,
4821    iXLen %4, iXLen 1)
4822
4823  ret <vscale x 16 x half> %a
4824}
4825
4826declare <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i8(
4827  <vscale x 32 x half>,
4828  ptr,
4829  <vscale x 32 x i8>,
4830  iXLen);
4831
4832define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
4833; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8:
4834; CHECK:       # %bb.0: # %entry
4835; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
4836; CHECK-NEXT:    vluxei8.v v16, (a0), v8
4837; CHECK-NEXT:    vmv.v.v v8, v16
4838; CHECK-NEXT:    ret
4839entry:
4840  %a = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i8(
4841    <vscale x 32 x half> undef,
4842    ptr %0,
4843    <vscale x 32 x i8> %1,
4844    iXLen %2)
4845
4846  ret <vscale x 32 x half> %a
4847}
4848
4849declare <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8(
4850  <vscale x 32 x half>,
4851  ptr,
4852  <vscale x 32 x i8>,
4853  <vscale x 32 x i1>,
4854  iXLen,
4855  iXLen);
4856
4857define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
4858; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
4859; CHECK:       # %bb.0: # %entry
4860; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
4861; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
4862; CHECK-NEXT:    ret
4863entry:
4864  %a = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8(
4865    <vscale x 32 x half> %0,
4866    ptr %1,
4867    <vscale x 32 x i8> %2,
4868    <vscale x 32 x i1> %3,
4869    iXLen %4, iXLen 1)
4870
4871  ret <vscale x 32 x half> %a
4872}
4873
4874declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8(
4875  <vscale x 1 x float>,
4876  ptr,
4877  <vscale x 1 x i8>,
4878  iXLen);
4879
4880define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
4881; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8:
4882; CHECK:       # %bb.0: # %entry
4883; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
4884; CHECK-NEXT:    vluxei8.v v9, (a0), v8
4885; CHECK-NEXT:    vmv1r.v v8, v9
4886; CHECK-NEXT:    ret
4887entry:
4888  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8(
4889    <vscale x 1 x float> undef,
4890    ptr %0,
4891    <vscale x 1 x i8> %1,
4892    iXLen %2)
4893
4894  ret <vscale x 1 x float> %a
4895}
4896
4897declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8(
4898  <vscale x 1 x float>,
4899  ptr,
4900  <vscale x 1 x i8>,
4901  <vscale x 1 x i1>,
4902  iXLen,
4903  iXLen);
4904
4905define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4906; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
4907; CHECK:       # %bb.0: # %entry
4908; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
4909; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
4910; CHECK-NEXT:    ret
4911entry:
4912  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8(
4913    <vscale x 1 x float> %0,
4914    ptr %1,
4915    <vscale x 1 x i8> %2,
4916    <vscale x 1 x i1> %3,
4917    iXLen %4, iXLen 1)
4918
4919  ret <vscale x 1 x float> %a
4920}
4921
4922declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8(
4923  <vscale x 2 x float>,
4924  ptr,
4925  <vscale x 2 x i8>,
4926  iXLen);
4927
4928define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
4929; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8:
4930; CHECK:       # %bb.0: # %entry
4931; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
4932; CHECK-NEXT:    vluxei8.v v9, (a0), v8
4933; CHECK-NEXT:    vmv.v.v v8, v9
4934; CHECK-NEXT:    ret
4935entry:
4936  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8(
4937    <vscale x 2 x float> undef,
4938    ptr %0,
4939    <vscale x 2 x i8> %1,
4940    iXLen %2)
4941
4942  ret <vscale x 2 x float> %a
4943}
4944
4945declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8(
4946  <vscale x 2 x float>,
4947  ptr,
4948  <vscale x 2 x i8>,
4949  <vscale x 2 x i1>,
4950  iXLen,
4951  iXLen);
4952
4953define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4954; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
4955; CHECK:       # %bb.0: # %entry
4956; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
4957; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
4958; CHECK-NEXT:    ret
4959entry:
4960  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8(
4961    <vscale x 2 x float> %0,
4962    ptr %1,
4963    <vscale x 2 x i8> %2,
4964    <vscale x 2 x i1> %3,
4965    iXLen %4, iXLen 1)
4966
4967  ret <vscale x 2 x float> %a
4968}
4969
4970declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8(
4971  <vscale x 4 x float>,
4972  ptr,
4973  <vscale x 4 x i8>,
4974  iXLen);
4975
4976define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
4977; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8:
4978; CHECK:       # %bb.0: # %entry
4979; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
4980; CHECK-NEXT:    vluxei8.v v10, (a0), v8
4981; CHECK-NEXT:    vmv.v.v v8, v10
4982; CHECK-NEXT:    ret
4983entry:
4984  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8(
4985    <vscale x 4 x float> undef,
4986    ptr %0,
4987    <vscale x 4 x i8> %1,
4988    iXLen %2)
4989
4990  ret <vscale x 4 x float> %a
4991}
4992
4993declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8(
4994  <vscale x 4 x float>,
4995  ptr,
4996  <vscale x 4 x i8>,
4997  <vscale x 4 x i1>,
4998  iXLen,
4999  iXLen);
5000
5001define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
5002; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
5003; CHECK:       # %bb.0: # %entry
5004; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
5005; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
5006; CHECK-NEXT:    ret
5007entry:
5008  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8(
5009    <vscale x 4 x float> %0,
5010    ptr %1,
5011    <vscale x 4 x i8> %2,
5012    <vscale x 4 x i1> %3,
5013    iXLen %4, iXLen 1)
5014
5015  ret <vscale x 4 x float> %a
5016}
5017
5018declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8(
5019  <vscale x 8 x float>,
5020  ptr,
5021  <vscale x 8 x i8>,
5022  iXLen);
5023
5024define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
5025; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8:
5026; CHECK:       # %bb.0: # %entry
5027; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
5028; CHECK-NEXT:    vluxei8.v v12, (a0), v8
5029; CHECK-NEXT:    vmv.v.v v8, v12
5030; CHECK-NEXT:    ret
5031entry:
5032  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8(
5033    <vscale x 8 x float> undef,
5034    ptr %0,
5035    <vscale x 8 x i8> %1,
5036    iXLen %2)
5037
5038  ret <vscale x 8 x float> %a
5039}
5040
5041declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8(
5042  <vscale x 8 x float>,
5043  ptr,
5044  <vscale x 8 x i8>,
5045  <vscale x 8 x i1>,
5046  iXLen,
5047  iXLen);
5048
5049define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
5050; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
5051; CHECK:       # %bb.0: # %entry
5052; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
5053; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
5054; CHECK-NEXT:    ret
5055entry:
5056  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8(
5057    <vscale x 8 x float> %0,
5058    ptr %1,
5059    <vscale x 8 x i8> %2,
5060    <vscale x 8 x i1> %3,
5061    iXLen %4, iXLen 1)
5062
5063  ret <vscale x 8 x float> %a
5064}
5065
5066declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8(
5067  <vscale x 16 x float>,
5068  ptr,
5069  <vscale x 16 x i8>,
5070  iXLen);
5071
5072define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
5073; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8:
5074; CHECK:       # %bb.0: # %entry
5075; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
5076; CHECK-NEXT:    vluxei8.v v16, (a0), v8
5077; CHECK-NEXT:    vmv.v.v v8, v16
5078; CHECK-NEXT:    ret
5079entry:
5080  %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8(
5081    <vscale x 16 x float> undef,
5082    ptr %0,
5083    <vscale x 16 x i8> %1,
5084    iXLen %2)
5085
5086  ret <vscale x 16 x float> %a
5087}
5088
5089declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8(
5090  <vscale x 16 x float>,
5091  ptr,
5092  <vscale x 16 x i8>,
5093  <vscale x 16 x i1>,
5094  iXLen,
5095  iXLen);
5096
5097define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
5098; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
5099; CHECK:       # %bb.0: # %entry
5100; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
5101; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
5102; CHECK-NEXT:    ret
5103entry:
5104  %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8(
5105    <vscale x 16 x float> %0,
5106    ptr %1,
5107    <vscale x 16 x i8> %2,
5108    <vscale x 16 x i1> %3,
5109    iXLen %4, iXLen 1)
5110
5111  ret <vscale x 16 x float> %a
5112}
5113
5114declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8(
5115  <vscale x 1 x double>,
5116  ptr,
5117  <vscale x 1 x i8>,
5118  iXLen);
5119
5120define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
5121; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8:
5122; CHECK:       # %bb.0: # %entry
5123; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
5124; CHECK-NEXT:    vluxei8.v v9, (a0), v8
5125; CHECK-NEXT:    vmv.v.v v8, v9
5126; CHECK-NEXT:    ret
5127entry:
5128  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8(
5129    <vscale x 1 x double> undef,
5130    ptr %0,
5131    <vscale x 1 x i8> %1,
5132    iXLen %2)
5133
5134  ret <vscale x 1 x double> %a
5135}
5136
5137declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8(
5138  <vscale x 1 x double>,
5139  ptr,
5140  <vscale x 1 x i8>,
5141  <vscale x 1 x i1>,
5142  iXLen,
5143  iXLen);
5144
5145define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
5146; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
5147; CHECK:       # %bb.0: # %entry
5148; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
5149; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
5150; CHECK-NEXT:    ret
5151entry:
5152  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8(
5153    <vscale x 1 x double> %0,
5154    ptr %1,
5155    <vscale x 1 x i8> %2,
5156    <vscale x 1 x i1> %3,
5157    iXLen %4, iXLen 1)
5158
5159  ret <vscale x 1 x double> %a
5160}
5161
5162declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8(
5163  <vscale x 2 x double>,
5164  ptr,
5165  <vscale x 2 x i8>,
5166  iXLen);
5167
5168define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
5169; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8:
5170; CHECK:       # %bb.0: # %entry
5171; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
5172; CHECK-NEXT:    vluxei8.v v10, (a0), v8
5173; CHECK-NEXT:    vmv.v.v v8, v10
5174; CHECK-NEXT:    ret
5175entry:
5176  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8(
5177    <vscale x 2 x double> undef,
5178    ptr %0,
5179    <vscale x 2 x i8> %1,
5180    iXLen %2)
5181
5182  ret <vscale x 2 x double> %a
5183}
5184
5185declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8(
5186  <vscale x 2 x double>,
5187  ptr,
5188  <vscale x 2 x i8>,
5189  <vscale x 2 x i1>,
5190  iXLen,
5191  iXLen);
5192
5193define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
5194; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
5195; CHECK:       # %bb.0: # %entry
5196; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
5197; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
5198; CHECK-NEXT:    ret
5199entry:
5200  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8(
5201    <vscale x 2 x double> %0,
5202    ptr %1,
5203    <vscale x 2 x i8> %2,
5204    <vscale x 2 x i1> %3,
5205    iXLen %4, iXLen 1)
5206
5207  ret <vscale x 2 x double> %a
5208}
5209
5210declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8(
5211  <vscale x 4 x double>,
5212  ptr,
5213  <vscale x 4 x i8>,
5214  iXLen);
5215
5216define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
5217; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8:
5218; CHECK:       # %bb.0: # %entry
5219; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
5220; CHECK-NEXT:    vluxei8.v v12, (a0), v8
5221; CHECK-NEXT:    vmv.v.v v8, v12
5222; CHECK-NEXT:    ret
5223entry:
5224  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8(
5225    <vscale x 4 x double> undef,
5226    ptr %0,
5227    <vscale x 4 x i8> %1,
5228    iXLen %2)
5229
5230  ret <vscale x 4 x double> %a
5231}
5232
5233declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8(
5234  <vscale x 4 x double>,
5235  ptr,
5236  <vscale x 4 x i8>,
5237  <vscale x 4 x i1>,
5238  iXLen,
5239  iXLen);
5240
5241define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
5242; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
5243; CHECK:       # %bb.0: # %entry
5244; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
5245; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
5246; CHECK-NEXT:    ret
5247entry:
5248  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8(
5249    <vscale x 4 x double> %0,
5250    ptr %1,
5251    <vscale x 4 x i8> %2,
5252    <vscale x 4 x i1> %3,
5253    iXLen %4, iXLen 1)
5254
5255  ret <vscale x 4 x double> %a
5256}
5257
5258declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8(
5259  <vscale x 8 x double>,
5260  ptr,
5261  <vscale x 8 x i8>,
5262  iXLen);
5263
5264define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
5265; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8:
5266; CHECK:       # %bb.0: # %entry
5267; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
5268; CHECK-NEXT:    vluxei8.v v16, (a0), v8
5269; CHECK-NEXT:    vmv.v.v v8, v16
5270; CHECK-NEXT:    ret
5271entry:
5272  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8(
5273    <vscale x 8 x double> undef,
5274    ptr %0,
5275    <vscale x 8 x i8> %1,
5276    iXLen %2)
5277
5278  ret <vscale x 8 x double> %a
5279}
5280
5281declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8(
5282  <vscale x 8 x double>,
5283  ptr,
5284  <vscale x 8 x i8>,
5285  <vscale x 8 x i1>,
5286  iXLen,
5287  iXLen);
5288
5289define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
5290; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
5291; CHECK:       # %bb.0: # %entry
5292; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
5293; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
5294; CHECK-NEXT:    ret
5295entry:
5296  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8(
5297    <vscale x 8 x double> %0,
5298    ptr %1,
5299    <vscale x 8 x i8> %2,
5300    <vscale x 8 x i1> %3,
5301    iXLen %4, iXLen 1)
5302
5303  ret <vscale x 8 x double> %a
5304}
5305