xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vloxei.ll (revision 84a3739ac072c95af9fa80e36d9e0f52d11e28eb)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
6
7declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32(
8  <vscale x 1 x i8>,
9  ptr,
10  <vscale x 1 x i32>,
11  iXLen);
12
13define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
14; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
17; CHECK-NEXT:    vloxei32.v v9, (a0), v8
18; CHECK-NEXT:    vmv1r.v v8, v9
19; CHECK-NEXT:    ret
20entry:
21  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32(
22    <vscale x 1 x i8> undef,
23    ptr %0,
24    <vscale x 1 x i32> %1,
25    iXLen %2)
26
27  ret <vscale x 1 x i8> %a
28}
29
30declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32(
31  <vscale x 1 x i8>,
32  ptr,
33  <vscale x 1 x i32>,
34  <vscale x 1 x i1>,
35  iXLen,
36  iXLen);
37
38define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
39; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
40; CHECK:       # %bb.0: # %entry
41; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
42; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
43; CHECK-NEXT:    ret
44entry:
45  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32(
46    <vscale x 1 x i8> %0,
47    ptr %1,
48    <vscale x 1 x i32> %2,
49    <vscale x 1 x i1> %3,
50    iXLen %4, iXLen 1)
51
52  ret <vscale x 1 x i8> %a
53}
54
55declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32(
56  <vscale x 2 x i8>,
57  ptr,
58  <vscale x 2 x i32>,
59  iXLen);
60
61define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
62; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32:
63; CHECK:       # %bb.0: # %entry
64; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
65; CHECK-NEXT:    vloxei32.v v9, (a0), v8
66; CHECK-NEXT:    vmv1r.v v8, v9
67; CHECK-NEXT:    ret
68entry:
69  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32(
70    <vscale x 2 x i8> undef,
71    ptr %0,
72    <vscale x 2 x i32> %1,
73    iXLen %2)
74
75  ret <vscale x 2 x i8> %a
76}
77
78declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32(
79  <vscale x 2 x i8>,
80  ptr,
81  <vscale x 2 x i32>,
82  <vscale x 2 x i1>,
83  iXLen,
84  iXLen);
85
86define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
87; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
88; CHECK:       # %bb.0: # %entry
89; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
90; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
91; CHECK-NEXT:    ret
92entry:
93  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32(
94    <vscale x 2 x i8> %0,
95    ptr %1,
96    <vscale x 2 x i32> %2,
97    <vscale x 2 x i1> %3,
98    iXLen %4, iXLen 1)
99
100  ret <vscale x 2 x i8> %a
101}
102
103declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32(
104  <vscale x 4 x i8>,
105  ptr,
106  <vscale x 4 x i32>,
107  iXLen);
108
109define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
110; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32:
111; CHECK:       # %bb.0: # %entry
112; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
113; CHECK-NEXT:    vloxei32.v v10, (a0), v8
114; CHECK-NEXT:    vmv1r.v v8, v10
115; CHECK-NEXT:    ret
116entry:
117  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32(
118    <vscale x 4 x i8> undef,
119    ptr %0,
120    <vscale x 4 x i32> %1,
121    iXLen %2)
122
123  ret <vscale x 4 x i8> %a
124}
125
126declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32(
127  <vscale x 4 x i8>,
128  ptr,
129  <vscale x 4 x i32>,
130  <vscale x 4 x i1>,
131  iXLen,
132  iXLen);
133
134define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
135; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
136; CHECK:       # %bb.0: # %entry
137; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
138; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
139; CHECK-NEXT:    ret
140entry:
141  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32(
142    <vscale x 4 x i8> %0,
143    ptr %1,
144    <vscale x 4 x i32> %2,
145    <vscale x 4 x i1> %3,
146    iXLen %4, iXLen 1)
147
148  ret <vscale x 4 x i8> %a
149}
150
151declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32(
152  <vscale x 8 x i8>,
153  ptr,
154  <vscale x 8 x i32>,
155  iXLen);
156
157define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
158; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32:
159; CHECK:       # %bb.0: # %entry
160; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
161; CHECK-NEXT:    vloxei32.v v12, (a0), v8
162; CHECK-NEXT:    vmv.v.v v8, v12
163; CHECK-NEXT:    ret
164entry:
165  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32(
166    <vscale x 8 x i8> undef,
167    ptr %0,
168    <vscale x 8 x i32> %1,
169    iXLen %2)
170
171  ret <vscale x 8 x i8> %a
172}
173
174declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32(
175  <vscale x 8 x i8>,
176  ptr,
177  <vscale x 8 x i32>,
178  <vscale x 8 x i1>,
179  iXLen,
180  iXLen);
181
182define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
183; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
184; CHECK:       # %bb.0: # %entry
185; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
186; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
187; CHECK-NEXT:    ret
188entry:
189  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32(
190    <vscale x 8 x i8> %0,
191    ptr %1,
192    <vscale x 8 x i32> %2,
193    <vscale x 8 x i1> %3,
194    iXLen %4, iXLen 1)
195
196  ret <vscale x 8 x i8> %a
197}
198
199declare <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32(
200  <vscale x 16 x i8>,
201  ptr,
202  <vscale x 16 x i32>,
203  iXLen);
204
205define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
206; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32:
207; CHECK:       # %bb.0: # %entry
208; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
209; CHECK-NEXT:    vloxei32.v v16, (a0), v8
210; CHECK-NEXT:    vmv.v.v v8, v16
211; CHECK-NEXT:    ret
212entry:
213  %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32(
214    <vscale x 16 x i8> undef,
215    ptr %0,
216    <vscale x 16 x i32> %1,
217    iXLen %2)
218
219  ret <vscale x 16 x i8> %a
220}
221
222declare <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32(
223  <vscale x 16 x i8>,
224  ptr,
225  <vscale x 16 x i32>,
226  <vscale x 16 x i1>,
227  iXLen,
228  iXLen);
229
230define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
231; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
232; CHECK:       # %bb.0: # %entry
233; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
234; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
235; CHECK-NEXT:    ret
236entry:
237  %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32(
238    <vscale x 16 x i8> %0,
239    ptr %1,
240    <vscale x 16 x i32> %2,
241    <vscale x 16 x i1> %3,
242    iXLen %4, iXLen 1)
243
244  ret <vscale x 16 x i8> %a
245}
246
247declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32(
248  <vscale x 1 x i16>,
249  ptr,
250  <vscale x 1 x i32>,
251  iXLen);
252
253define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
254; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32:
255; CHECK:       # %bb.0: # %entry
256; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
257; CHECK-NEXT:    vloxei32.v v9, (a0), v8
258; CHECK-NEXT:    vmv1r.v v8, v9
259; CHECK-NEXT:    ret
260entry:
261  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32(
262    <vscale x 1 x i16> undef,
263    ptr %0,
264    <vscale x 1 x i32> %1,
265    iXLen %2)
266
267  ret <vscale x 1 x i16> %a
268}
269
270declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32(
271  <vscale x 1 x i16>,
272  ptr,
273  <vscale x 1 x i32>,
274  <vscale x 1 x i1>,
275  iXLen,
276  iXLen);
277
278define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
279; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
280; CHECK:       # %bb.0: # %entry
281; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
282; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
283; CHECK-NEXT:    ret
284entry:
285  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32(
286    <vscale x 1 x i16> %0,
287    ptr %1,
288    <vscale x 1 x i32> %2,
289    <vscale x 1 x i1> %3,
290    iXLen %4, iXLen 1)
291
292  ret <vscale x 1 x i16> %a
293}
294
295declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32(
296  <vscale x 2 x i16>,
297  ptr,
298  <vscale x 2 x i32>,
299  iXLen);
300
301define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
302; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32:
303; CHECK:       # %bb.0: # %entry
304; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
305; CHECK-NEXT:    vloxei32.v v9, (a0), v8
306; CHECK-NEXT:    vmv1r.v v8, v9
307; CHECK-NEXT:    ret
308entry:
309  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32(
310    <vscale x 2 x i16> undef,
311    ptr %0,
312    <vscale x 2 x i32> %1,
313    iXLen %2)
314
315  ret <vscale x 2 x i16> %a
316}
317
318declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32(
319  <vscale x 2 x i16>,
320  ptr,
321  <vscale x 2 x i32>,
322  <vscale x 2 x i1>,
323  iXLen,
324  iXLen);
325
326define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
327; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
328; CHECK:       # %bb.0: # %entry
329; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
330; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
331; CHECK-NEXT:    ret
332entry:
333  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32(
334    <vscale x 2 x i16> %0,
335    ptr %1,
336    <vscale x 2 x i32> %2,
337    <vscale x 2 x i1> %3,
338    iXLen %4, iXLen 1)
339
340  ret <vscale x 2 x i16> %a
341}
342
343declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32(
344  <vscale x 4 x i16>,
345  ptr,
346  <vscale x 4 x i32>,
347  iXLen);
348
349define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
350; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32:
351; CHECK:       # %bb.0: # %entry
352; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
353; CHECK-NEXT:    vloxei32.v v10, (a0), v8
354; CHECK-NEXT:    vmv.v.v v8, v10
355; CHECK-NEXT:    ret
356entry:
357  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32(
358    <vscale x 4 x i16> undef,
359    ptr %0,
360    <vscale x 4 x i32> %1,
361    iXLen %2)
362
363  ret <vscale x 4 x i16> %a
364}
365
366declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32(
367  <vscale x 4 x i16>,
368  ptr,
369  <vscale x 4 x i32>,
370  <vscale x 4 x i1>,
371  iXLen,
372  iXLen);
373
374define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
375; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
376; CHECK:       # %bb.0: # %entry
377; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
378; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
379; CHECK-NEXT:    ret
380entry:
381  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32(
382    <vscale x 4 x i16> %0,
383    ptr %1,
384    <vscale x 4 x i32> %2,
385    <vscale x 4 x i1> %3,
386    iXLen %4, iXLen 1)
387
388  ret <vscale x 4 x i16> %a
389}
390
391declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32(
392  <vscale x 8 x i16>,
393  ptr,
394  <vscale x 8 x i32>,
395  iXLen);
396
397define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
398; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32:
399; CHECK:       # %bb.0: # %entry
400; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
401; CHECK-NEXT:    vloxei32.v v12, (a0), v8
402; CHECK-NEXT:    vmv.v.v v8, v12
403; CHECK-NEXT:    ret
404entry:
405  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32(
406    <vscale x 8 x i16> undef,
407    ptr %0,
408    <vscale x 8 x i32> %1,
409    iXLen %2)
410
411  ret <vscale x 8 x i16> %a
412}
413
414declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32(
415  <vscale x 8 x i16>,
416  ptr,
417  <vscale x 8 x i32>,
418  <vscale x 8 x i1>,
419  iXLen,
420  iXLen);
421
422define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
423; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
424; CHECK:       # %bb.0: # %entry
425; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
426; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
427; CHECK-NEXT:    ret
428entry:
429  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32(
430    <vscale x 8 x i16> %0,
431    ptr %1,
432    <vscale x 8 x i32> %2,
433    <vscale x 8 x i1> %3,
434    iXLen %4, iXLen 1)
435
436  ret <vscale x 8 x i16> %a
437}
438
439declare <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32(
440  <vscale x 16 x i16>,
441  ptr,
442  <vscale x 16 x i32>,
443  iXLen);
444
445define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
446; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32:
447; CHECK:       # %bb.0: # %entry
448; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
449; CHECK-NEXT:    vloxei32.v v16, (a0), v8
450; CHECK-NEXT:    vmv.v.v v8, v16
451; CHECK-NEXT:    ret
452entry:
453  %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32(
454    <vscale x 16 x i16> undef,
455    ptr %0,
456    <vscale x 16 x i32> %1,
457    iXLen %2)
458
459  ret <vscale x 16 x i16> %a
460}
461
462declare <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32(
463  <vscale x 16 x i16>,
464  ptr,
465  <vscale x 16 x i32>,
466  <vscale x 16 x i1>,
467  iXLen,
468  iXLen);
469
470define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
471; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
472; CHECK:       # %bb.0: # %entry
473; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
474; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
475; CHECK-NEXT:    ret
476entry:
477  %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32(
478    <vscale x 16 x i16> %0,
479    ptr %1,
480    <vscale x 16 x i32> %2,
481    <vscale x 16 x i1> %3,
482    iXLen %4, iXLen 1)
483
484  ret <vscale x 16 x i16> %a
485}
486
487declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32(
488  <vscale x 1 x i32>,
489  ptr,
490  <vscale x 1 x i32>,
491  iXLen);
492
493define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
494; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32:
495; CHECK:       # %bb.0: # %entry
496; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
497; CHECK-NEXT:    vloxei32.v v8, (a0), v8
498; CHECK-NEXT:    ret
499entry:
500  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32(
501    <vscale x 1 x i32> undef,
502    ptr %0,
503    <vscale x 1 x i32> %1,
504    iXLen %2)
505
506  ret <vscale x 1 x i32> %a
507}
508
509declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32(
510  <vscale x 1 x i32>,
511  ptr,
512  <vscale x 1 x i32>,
513  <vscale x 1 x i1>,
514  iXLen,
515  iXLen);
516
517define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
518; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
519; CHECK:       # %bb.0: # %entry
520; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
521; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
522; CHECK-NEXT:    ret
523entry:
524  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32(
525    <vscale x 1 x i32> %0,
526    ptr %1,
527    <vscale x 1 x i32> %2,
528    <vscale x 1 x i1> %3,
529    iXLen %4, iXLen 1)
530
531  ret <vscale x 1 x i32> %a
532}
533
534declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32(
535  <vscale x 2 x i32>,
536  ptr,
537  <vscale x 2 x i32>,
538  iXLen);
539
540define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
541; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32:
542; CHECK:       # %bb.0: # %entry
543; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
544; CHECK-NEXT:    vloxei32.v v8, (a0), v8
545; CHECK-NEXT:    ret
546entry:
547  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32(
548    <vscale x 2 x i32> undef,
549    ptr %0,
550    <vscale x 2 x i32> %1,
551    iXLen %2)
552
553  ret <vscale x 2 x i32> %a
554}
555
556declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32(
557  <vscale x 2 x i32>,
558  ptr,
559  <vscale x 2 x i32>,
560  <vscale x 2 x i1>,
561  iXLen,
562  iXLen);
563
564define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
565; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
566; CHECK:       # %bb.0: # %entry
567; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
568; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
569; CHECK-NEXT:    ret
570entry:
571  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32(
572    <vscale x 2 x i32> %0,
573    ptr %1,
574    <vscale x 2 x i32> %2,
575    <vscale x 2 x i1> %3,
576    iXLen %4, iXLen 1)
577
578  ret <vscale x 2 x i32> %a
579}
580
581declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32(
582  <vscale x 4 x i32>,
583  ptr,
584  <vscale x 4 x i32>,
585  iXLen);
586
587define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
588; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32:
589; CHECK:       # %bb.0: # %entry
590; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
591; CHECK-NEXT:    vloxei32.v v8, (a0), v8
592; CHECK-NEXT:    ret
593entry:
594  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32(
595    <vscale x 4 x i32> undef,
596    ptr %0,
597    <vscale x 4 x i32> %1,
598    iXLen %2)
599
600  ret <vscale x 4 x i32> %a
601}
602
603declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32(
604  <vscale x 4 x i32>,
605  ptr,
606  <vscale x 4 x i32>,
607  <vscale x 4 x i1>,
608  iXLen,
609  iXLen);
610
611define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
612; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
613; CHECK:       # %bb.0: # %entry
614; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
615; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
616; CHECK-NEXT:    ret
617entry:
618  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32(
619    <vscale x 4 x i32> %0,
620    ptr %1,
621    <vscale x 4 x i32> %2,
622    <vscale x 4 x i1> %3,
623    iXLen %4, iXLen 1)
624
625  ret <vscale x 4 x i32> %a
626}
627
628declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32(
629  <vscale x 8 x i32>,
630  ptr,
631  <vscale x 8 x i32>,
632  iXLen);
633
634define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
635; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32:
636; CHECK:       # %bb.0: # %entry
637; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
638; CHECK-NEXT:    vloxei32.v v8, (a0), v8
639; CHECK-NEXT:    ret
640entry:
641  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32(
642    <vscale x 8 x i32> undef,
643    ptr %0,
644    <vscale x 8 x i32> %1,
645    iXLen %2)
646
647  ret <vscale x 8 x i32> %a
648}
649
650declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32(
651  <vscale x 8 x i32>,
652  ptr,
653  <vscale x 8 x i32>,
654  <vscale x 8 x i1>,
655  iXLen,
656  iXLen);
657
658define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
659; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
660; CHECK:       # %bb.0: # %entry
661; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
662; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
663; CHECK-NEXT:    ret
664entry:
665  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32(
666    <vscale x 8 x i32> %0,
667    ptr %1,
668    <vscale x 8 x i32> %2,
669    <vscale x 8 x i1> %3,
670    iXLen %4, iXLen 1)
671
672  ret <vscale x 8 x i32> %a
673}
674
675declare <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32(
676  <vscale x 16 x i32>,
677  ptr,
678  <vscale x 16 x i32>,
679  iXLen);
680
681define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
682; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32:
683; CHECK:       # %bb.0: # %entry
684; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
685; CHECK-NEXT:    vloxei32.v v8, (a0), v8
686; CHECK-NEXT:    ret
687entry:
688  %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32(
689    <vscale x 16 x i32> undef,
690    ptr %0,
691    <vscale x 16 x i32> %1,
692    iXLen %2)
693
694  ret <vscale x 16 x i32> %a
695}
696
697declare <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32(
698  <vscale x 16 x i32>,
699  ptr,
700  <vscale x 16 x i32>,
701  <vscale x 16 x i1>,
702  iXLen,
703  iXLen);
704
705define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
706; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
707; CHECK:       # %bb.0: # %entry
708; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
709; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
710; CHECK-NEXT:    ret
711entry:
712  %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32(
713    <vscale x 16 x i32> %0,
714    ptr %1,
715    <vscale x 16 x i32> %2,
716    <vscale x 16 x i1> %3,
717    iXLen %4, iXLen 1)
718
719  ret <vscale x 16 x i32> %a
720}
721
722declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32(
723  <vscale x 1 x i64>,
724  ptr,
725  <vscale x 1 x i32>,
726  iXLen);
727
728define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
729; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32:
730; CHECK:       # %bb.0: # %entry
731; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
732; CHECK-NEXT:    vloxei32.v v9, (a0), v8
733; CHECK-NEXT:    vmv.v.v v8, v9
734; CHECK-NEXT:    ret
735entry:
736  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32(
737    <vscale x 1 x i64> undef,
738    ptr %0,
739    <vscale x 1 x i32> %1,
740    iXLen %2)
741
742  ret <vscale x 1 x i64> %a
743}
744
745declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32(
746  <vscale x 1 x i64>,
747  ptr,
748  <vscale x 1 x i32>,
749  <vscale x 1 x i1>,
750  iXLen,
751  iXLen);
752
753define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
754; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
755; CHECK:       # %bb.0: # %entry
756; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
757; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
758; CHECK-NEXT:    ret
759entry:
760  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32(
761    <vscale x 1 x i64> %0,
762    ptr %1,
763    <vscale x 1 x i32> %2,
764    <vscale x 1 x i1> %3,
765    iXLen %4, iXLen 1)
766
767  ret <vscale x 1 x i64> %a
768}
769
770declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32(
771  <vscale x 2 x i64>,
772  ptr,
773  <vscale x 2 x i32>,
774  iXLen);
775
776define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
777; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32:
778; CHECK:       # %bb.0: # %entry
779; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
780; CHECK-NEXT:    vloxei32.v v10, (a0), v8
781; CHECK-NEXT:    vmv.v.v v8, v10
782; CHECK-NEXT:    ret
783entry:
784  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32(
785    <vscale x 2 x i64> undef,
786    ptr %0,
787    <vscale x 2 x i32> %1,
788    iXLen %2)
789
790  ret <vscale x 2 x i64> %a
791}
792
793declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32(
794  <vscale x 2 x i64>,
795  ptr,
796  <vscale x 2 x i32>,
797  <vscale x 2 x i1>,
798  iXLen,
799  iXLen);
800
801define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
802; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
803; CHECK:       # %bb.0: # %entry
804; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
805; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
806; CHECK-NEXT:    ret
807entry:
808  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32(
809    <vscale x 2 x i64> %0,
810    ptr %1,
811    <vscale x 2 x i32> %2,
812    <vscale x 2 x i1> %3,
813    iXLen %4, iXLen 1)
814
815  ret <vscale x 2 x i64> %a
816}
817
818declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32(
819  <vscale x 4 x i64>,
820  ptr,
821  <vscale x 4 x i32>,
822  iXLen);
823
824define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
825; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32:
826; CHECK:       # %bb.0: # %entry
827; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
828; CHECK-NEXT:    vloxei32.v v12, (a0), v8
829; CHECK-NEXT:    vmv.v.v v8, v12
830; CHECK-NEXT:    ret
831entry:
832  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32(
833    <vscale x 4 x i64> undef,
834    ptr %0,
835    <vscale x 4 x i32> %1,
836    iXLen %2)
837
838  ret <vscale x 4 x i64> %a
839}
840
841declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32(
842  <vscale x 4 x i64>,
843  ptr,
844  <vscale x 4 x i32>,
845  <vscale x 4 x i1>,
846  iXLen,
847  iXLen);
848
849define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
850; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
851; CHECK:       # %bb.0: # %entry
852; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
853; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
854; CHECK-NEXT:    ret
855entry:
856  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32(
857    <vscale x 4 x i64> %0,
858    ptr %1,
859    <vscale x 4 x i32> %2,
860    <vscale x 4 x i1> %3,
861    iXLen %4, iXLen 1)
862
863  ret <vscale x 4 x i64> %a
864}
865
866declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32(
867  <vscale x 8 x i64>,
868  ptr,
869  <vscale x 8 x i32>,
870  iXLen);
871
872define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
873; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32:
874; CHECK:       # %bb.0: # %entry
875; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
876; CHECK-NEXT:    vloxei32.v v16, (a0), v8
877; CHECK-NEXT:    vmv.v.v v8, v16
878; CHECK-NEXT:    ret
879entry:
880  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32(
881    <vscale x 8 x i64> undef,
882    ptr %0,
883    <vscale x 8 x i32> %1,
884    iXLen %2)
885
886  ret <vscale x 8 x i64> %a
887}
888
889declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32(
890  <vscale x 8 x i64>,
891  ptr,
892  <vscale x 8 x i32>,
893  <vscale x 8 x i1>,
894  iXLen,
895  iXLen);
896
897define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
898; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
899; CHECK:       # %bb.0: # %entry
900; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
901; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
902; CHECK-NEXT:    ret
903entry:
904  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32(
905    <vscale x 8 x i64> %0,
906    ptr %1,
907    <vscale x 8 x i32> %2,
908    <vscale x 8 x i1> %3,
909    iXLen %4, iXLen 1)
910
911  ret <vscale x 8 x i64> %a
912}
913
914declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i32(
915  <vscale x 1 x half>,
916  ptr,
917  <vscale x 1 x i32>,
918  iXLen);
919
920define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
921; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32:
922; CHECK:       # %bb.0: # %entry
923; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
924; CHECK-NEXT:    vloxei32.v v9, (a0), v8
925; CHECK-NEXT:    vmv1r.v v8, v9
926; CHECK-NEXT:    ret
927entry:
928  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i32(
929    <vscale x 1 x half> undef,
930    ptr %0,
931    <vscale x 1 x i32> %1,
932    iXLen %2)
933
934  ret <vscale x 1 x half> %a
935}
936
937declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32(
938  <vscale x 1 x half>,
939  ptr,
940  <vscale x 1 x i32>,
941  <vscale x 1 x i1>,
942  iXLen,
943  iXLen);
944
945define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
946; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
947; CHECK:       # %bb.0: # %entry
948; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
949; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
950; CHECK-NEXT:    ret
951entry:
952  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32(
953    <vscale x 1 x half> %0,
954    ptr %1,
955    <vscale x 1 x i32> %2,
956    <vscale x 1 x i1> %3,
957    iXLen %4, iXLen 1)
958
959  ret <vscale x 1 x half> %a
960}
961
962declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i32(
963  <vscale x 2 x half>,
964  ptr,
965  <vscale x 2 x i32>,
966  iXLen);
967
968define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
969; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32:
970; CHECK:       # %bb.0: # %entry
971; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
972; CHECK-NEXT:    vloxei32.v v9, (a0), v8
973; CHECK-NEXT:    vmv1r.v v8, v9
974; CHECK-NEXT:    ret
975entry:
976  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i32(
977    <vscale x 2 x half> undef,
978    ptr %0,
979    <vscale x 2 x i32> %1,
980    iXLen %2)
981
982  ret <vscale x 2 x half> %a
983}
984
985declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32(
986  <vscale x 2 x half>,
987  ptr,
988  <vscale x 2 x i32>,
989  <vscale x 2 x i1>,
990  iXLen,
991  iXLen);
992
993define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
994; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
995; CHECK:       # %bb.0: # %entry
996; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
997; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
998; CHECK-NEXT:    ret
999entry:
1000  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32(
1001    <vscale x 2 x half> %0,
1002    ptr %1,
1003    <vscale x 2 x i32> %2,
1004    <vscale x 2 x i1> %3,
1005    iXLen %4, iXLen 1)
1006
1007  ret <vscale x 2 x half> %a
1008}
1009
1010declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i32(
1011  <vscale x 4 x half>,
1012  ptr,
1013  <vscale x 4 x i32>,
1014  iXLen);
1015
1016define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
1017; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32:
1018; CHECK:       # %bb.0: # %entry
1019; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1020; CHECK-NEXT:    vloxei32.v v10, (a0), v8
1021; CHECK-NEXT:    vmv.v.v v8, v10
1022; CHECK-NEXT:    ret
1023entry:
1024  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i32(
1025    <vscale x 4 x half> undef,
1026    ptr %0,
1027    <vscale x 4 x i32> %1,
1028    iXLen %2)
1029
1030  ret <vscale x 4 x half> %a
1031}
1032
1033declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32(
1034  <vscale x 4 x half>,
1035  ptr,
1036  <vscale x 4 x i32>,
1037  <vscale x 4 x i1>,
1038  iXLen,
1039  iXLen);
1040
1041define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1042; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
1043; CHECK:       # %bb.0: # %entry
1044; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1045; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
1046; CHECK-NEXT:    ret
1047entry:
1048  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32(
1049    <vscale x 4 x half> %0,
1050    ptr %1,
1051    <vscale x 4 x i32> %2,
1052    <vscale x 4 x i1> %3,
1053    iXLen %4, iXLen 1)
1054
1055  ret <vscale x 4 x half> %a
1056}
1057
1058declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i32(
1059  <vscale x 8 x half>,
1060  ptr,
1061  <vscale x 8 x i32>,
1062  iXLen);
1063
1064define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
1065; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32:
1066; CHECK:       # %bb.0: # %entry
1067; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1068; CHECK-NEXT:    vloxei32.v v12, (a0), v8
1069; CHECK-NEXT:    vmv.v.v v8, v12
1070; CHECK-NEXT:    ret
1071entry:
1072  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i32(
1073    <vscale x 8 x half> undef,
1074    ptr %0,
1075    <vscale x 8 x i32> %1,
1076    iXLen %2)
1077
1078  ret <vscale x 8 x half> %a
1079}
1080
1081declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32(
1082  <vscale x 8 x half>,
1083  ptr,
1084  <vscale x 8 x i32>,
1085  <vscale x 8 x i1>,
1086  iXLen,
1087  iXLen);
1088
1089define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1090; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
1091; CHECK:       # %bb.0: # %entry
1092; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1093; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
1094; CHECK-NEXT:    ret
1095entry:
1096  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32(
1097    <vscale x 8 x half> %0,
1098    ptr %1,
1099    <vscale x 8 x i32> %2,
1100    <vscale x 8 x i1> %3,
1101    iXLen %4, iXLen 1)
1102
1103  ret <vscale x 8 x half> %a
1104}
1105
1106declare <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i32(
1107  <vscale x 16 x half>,
1108  ptr,
1109  <vscale x 16 x i32>,
1110  iXLen);
1111
1112define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
1113; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32:
1114; CHECK:       # %bb.0: # %entry
1115; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
1116; CHECK-NEXT:    vloxei32.v v16, (a0), v8
1117; CHECK-NEXT:    vmv.v.v v8, v16
1118; CHECK-NEXT:    ret
1119entry:
1120  %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i32(
1121    <vscale x 16 x half> undef,
1122    ptr %0,
1123    <vscale x 16 x i32> %1,
1124    iXLen %2)
1125
1126  ret <vscale x 16 x half> %a
1127}
1128
1129declare <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32(
1130  <vscale x 16 x half>,
1131  ptr,
1132  <vscale x 16 x i32>,
1133  <vscale x 16 x i1>,
1134  iXLen,
1135  iXLen);
1136
1137define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1138; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
1139; CHECK:       # %bb.0: # %entry
1140; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1141; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
1142; CHECK-NEXT:    ret
1143entry:
1144  %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32(
1145    <vscale x 16 x half> %0,
1146    ptr %1,
1147    <vscale x 16 x i32> %2,
1148    <vscale x 16 x i1> %3,
1149    iXLen %4, iXLen 1)
1150
1151  ret <vscale x 16 x half> %a
1152}
1153
1154declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32(
1155  <vscale x 1 x float>,
1156  ptr,
1157  <vscale x 1 x i32>,
1158  iXLen);
1159
1160define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
1161; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32:
1162; CHECK:       # %bb.0: # %entry
1163; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1164; CHECK-NEXT:    vloxei32.v v8, (a0), v8
1165; CHECK-NEXT:    ret
1166entry:
1167  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32(
1168    <vscale x 1 x float> undef,
1169    ptr %0,
1170    <vscale x 1 x i32> %1,
1171    iXLen %2)
1172
1173  ret <vscale x 1 x float> %a
1174}
1175
1176declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32(
1177  <vscale x 1 x float>,
1178  ptr,
1179  <vscale x 1 x i32>,
1180  <vscale x 1 x i1>,
1181  iXLen,
1182  iXLen);
1183
1184define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1185; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
1186; CHECK:       # %bb.0: # %entry
1187; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
1188; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
1189; CHECK-NEXT:    ret
1190entry:
1191  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32(
1192    <vscale x 1 x float> %0,
1193    ptr %1,
1194    <vscale x 1 x i32> %2,
1195    <vscale x 1 x i1> %3,
1196    iXLen %4, iXLen 1)
1197
1198  ret <vscale x 1 x float> %a
1199}
1200
1201declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32(
1202  <vscale x 2 x float>,
1203  ptr,
1204  <vscale x 2 x i32>,
1205  iXLen);
1206
1207define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
1208; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32:
1209; CHECK:       # %bb.0: # %entry
1210; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1211; CHECK-NEXT:    vloxei32.v v8, (a0), v8
1212; CHECK-NEXT:    ret
1213entry:
1214  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32(
1215    <vscale x 2 x float> undef,
1216    ptr %0,
1217    <vscale x 2 x i32> %1,
1218    iXLen %2)
1219
1220  ret <vscale x 2 x float> %a
1221}
1222
1223declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32(
1224  <vscale x 2 x float>,
1225  ptr,
1226  <vscale x 2 x i32>,
1227  <vscale x 2 x i1>,
1228  iXLen,
1229  iXLen);
1230
1231define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1232; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
1233; CHECK:       # %bb.0: # %entry
1234; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1235; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
1236; CHECK-NEXT:    ret
1237entry:
1238  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32(
1239    <vscale x 2 x float> %0,
1240    ptr %1,
1241    <vscale x 2 x i32> %2,
1242    <vscale x 2 x i1> %3,
1243    iXLen %4, iXLen 1)
1244
1245  ret <vscale x 2 x float> %a
1246}
1247
1248declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32(
1249  <vscale x 4 x float>,
1250  ptr,
1251  <vscale x 4 x i32>,
1252  iXLen);
1253
1254define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
1255; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32:
1256; CHECK:       # %bb.0: # %entry
1257; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1258; CHECK-NEXT:    vloxei32.v v8, (a0), v8
1259; CHECK-NEXT:    ret
1260entry:
1261  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32(
1262    <vscale x 4 x float> undef,
1263    ptr %0,
1264    <vscale x 4 x i32> %1,
1265    iXLen %2)
1266
1267  ret <vscale x 4 x float> %a
1268}
1269
1270declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32(
1271  <vscale x 4 x float>,
1272  ptr,
1273  <vscale x 4 x i32>,
1274  <vscale x 4 x i1>,
1275  iXLen,
1276  iXLen);
1277
1278define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1279; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
1280; CHECK:       # %bb.0: # %entry
1281; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1282; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
1283; CHECK-NEXT:    ret
1284entry:
1285  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32(
1286    <vscale x 4 x float> %0,
1287    ptr %1,
1288    <vscale x 4 x i32> %2,
1289    <vscale x 4 x i1> %3,
1290    iXLen %4, iXLen 1)
1291
1292  ret <vscale x 4 x float> %a
1293}
1294
1295declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32(
1296  <vscale x 8 x float>,
1297  ptr,
1298  <vscale x 8 x i32>,
1299  iXLen);
1300
1301define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
1302; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32:
1303; CHECK:       # %bb.0: # %entry
1304; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
1305; CHECK-NEXT:    vloxei32.v v8, (a0), v8
1306; CHECK-NEXT:    ret
1307entry:
1308  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32(
1309    <vscale x 8 x float> undef,
1310    ptr %0,
1311    <vscale x 8 x i32> %1,
1312    iXLen %2)
1313
1314  ret <vscale x 8 x float> %a
1315}
1316
1317declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32(
1318  <vscale x 8 x float>,
1319  ptr,
1320  <vscale x 8 x i32>,
1321  <vscale x 8 x i1>,
1322  iXLen,
1323  iXLen);
1324
1325define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1326; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
1327; CHECK:       # %bb.0: # %entry
1328; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1329; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
1330; CHECK-NEXT:    ret
1331entry:
1332  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32(
1333    <vscale x 8 x float> %0,
1334    ptr %1,
1335    <vscale x 8 x i32> %2,
1336    <vscale x 8 x i1> %3,
1337    iXLen %4, iXLen 1)
1338
1339  ret <vscale x 8 x float> %a
1340}
1341
1342declare <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32(
1343  <vscale x 16 x float>,
1344  ptr,
1345  <vscale x 16 x i32>,
1346  iXLen);
1347
1348define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
1349; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32:
1350; CHECK:       # %bb.0: # %entry
1351; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
1352; CHECK-NEXT:    vloxei32.v v8, (a0), v8
1353; CHECK-NEXT:    ret
1354entry:
1355  %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32(
1356    <vscale x 16 x float> undef,
1357    ptr %0,
1358    <vscale x 16 x i32> %1,
1359    iXLen %2)
1360
1361  ret <vscale x 16 x float> %a
1362}
1363
1364declare <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32(
1365  <vscale x 16 x float>,
1366  ptr,
1367  <vscale x 16 x i32>,
1368  <vscale x 16 x i1>,
1369  iXLen,
1370  iXLen);
1371
1372define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1373; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
1374; CHECK:       # %bb.0: # %entry
1375; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
1376; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
1377; CHECK-NEXT:    ret
1378entry:
1379  %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32(
1380    <vscale x 16 x float> %0,
1381    ptr %1,
1382    <vscale x 16 x i32> %2,
1383    <vscale x 16 x i1> %3,
1384    iXLen %4, iXLen 1)
1385
1386  ret <vscale x 16 x float> %a
1387}
1388
1389declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32(
1390  <vscale x 1 x double>,
1391  ptr,
1392  <vscale x 1 x i32>,
1393  iXLen);
1394
1395define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
1396; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32:
1397; CHECK:       # %bb.0: # %entry
1398; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1399; CHECK-NEXT:    vloxei32.v v9, (a0), v8
1400; CHECK-NEXT:    vmv.v.v v8, v9
1401; CHECK-NEXT:    ret
1402entry:
1403  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32(
1404    <vscale x 1 x double> undef,
1405    ptr %0,
1406    <vscale x 1 x i32> %1,
1407    iXLen %2)
1408
1409  ret <vscale x 1 x double> %a
1410}
1411
1412declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32(
1413  <vscale x 1 x double>,
1414  ptr,
1415  <vscale x 1 x i32>,
1416  <vscale x 1 x i1>,
1417  iXLen,
1418  iXLen);
1419
1420define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1421; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
1422; CHECK:       # %bb.0: # %entry
1423; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
1424; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
1425; CHECK-NEXT:    ret
1426entry:
1427  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32(
1428    <vscale x 1 x double> %0,
1429    ptr %1,
1430    <vscale x 1 x i32> %2,
1431    <vscale x 1 x i1> %3,
1432    iXLen %4, iXLen 1)
1433
1434  ret <vscale x 1 x double> %a
1435}
1436
1437declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32(
1438  <vscale x 2 x double>,
1439  ptr,
1440  <vscale x 2 x i32>,
1441  iXLen);
1442
1443define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
1444; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32:
1445; CHECK:       # %bb.0: # %entry
1446; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1447; CHECK-NEXT:    vloxei32.v v10, (a0), v8
1448; CHECK-NEXT:    vmv.v.v v8, v10
1449; CHECK-NEXT:    ret
1450entry:
1451  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32(
1452    <vscale x 2 x double> undef,
1453    ptr %0,
1454    <vscale x 2 x i32> %1,
1455    iXLen %2)
1456
1457  ret <vscale x 2 x double> %a
1458}
1459
1460declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32(
1461  <vscale x 2 x double>,
1462  ptr,
1463  <vscale x 2 x i32>,
1464  <vscale x 2 x i1>,
1465  iXLen,
1466  iXLen);
1467
1468define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1469; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
1470; CHECK:       # %bb.0: # %entry
1471; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
1472; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
1473; CHECK-NEXT:    ret
1474entry:
1475  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32(
1476    <vscale x 2 x double> %0,
1477    ptr %1,
1478    <vscale x 2 x i32> %2,
1479    <vscale x 2 x i1> %3,
1480    iXLen %4, iXLen 1)
1481
1482  ret <vscale x 2 x double> %a
1483}
1484
1485declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32(
1486  <vscale x 4 x double>,
1487  ptr,
1488  <vscale x 4 x i32>,
1489  iXLen);
1490
1491define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
1492; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32:
1493; CHECK:       # %bb.0: # %entry
1494; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1495; CHECK-NEXT:    vloxei32.v v12, (a0), v8
1496; CHECK-NEXT:    vmv.v.v v8, v12
1497; CHECK-NEXT:    ret
1498entry:
1499  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32(
1500    <vscale x 4 x double> undef,
1501    ptr %0,
1502    <vscale x 4 x i32> %1,
1503    iXLen %2)
1504
1505  ret <vscale x 4 x double> %a
1506}
1507
1508declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32(
1509  <vscale x 4 x double>,
1510  ptr,
1511  <vscale x 4 x i32>,
1512  <vscale x 4 x i1>,
1513  iXLen,
1514  iXLen);
1515
1516define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1517; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
1518; CHECK:       # %bb.0: # %entry
1519; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
1520; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
1521; CHECK-NEXT:    ret
1522entry:
1523  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32(
1524    <vscale x 4 x double> %0,
1525    ptr %1,
1526    <vscale x 4 x i32> %2,
1527    <vscale x 4 x i1> %3,
1528    iXLen %4, iXLen 1)
1529
1530  ret <vscale x 4 x double> %a
1531}
1532
1533declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32(
1534  <vscale x 8 x double>,
1535  ptr,
1536  <vscale x 8 x i32>,
1537  iXLen);
1538
1539define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
1540; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32:
1541; CHECK:       # %bb.0: # %entry
1542; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1543; CHECK-NEXT:    vloxei32.v v16, (a0), v8
1544; CHECK-NEXT:    vmv.v.v v8, v16
1545; CHECK-NEXT:    ret
1546entry:
1547  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32(
1548    <vscale x 8 x double> undef,
1549    ptr %0,
1550    <vscale x 8 x i32> %1,
1551    iXLen %2)
1552
1553  ret <vscale x 8 x double> %a
1554}
1555
1556declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32(
1557  <vscale x 8 x double>,
1558  ptr,
1559  <vscale x 8 x i32>,
1560  <vscale x 8 x i1>,
1561  iXLen,
1562  iXLen);
1563
1564define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1565; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
1566; CHECK:       # %bb.0: # %entry
1567; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
1568; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
1569; CHECK-NEXT:    ret
1570entry:
1571  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32(
1572    <vscale x 8 x double> %0,
1573    ptr %1,
1574    <vscale x 8 x i32> %2,
1575    <vscale x 8 x i1> %3,
1576    iXLen %4, iXLen 1)
1577
1578  ret <vscale x 8 x double> %a
1579}
1580
1581declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16(
1582  <vscale x 1 x i8>,
1583  ptr,
1584  <vscale x 1 x i16>,
1585  iXLen);
1586
1587define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
1588; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16:
1589; CHECK:       # %bb.0: # %entry
1590; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
1591; CHECK-NEXT:    vloxei16.v v9, (a0), v8
1592; CHECK-NEXT:    vmv1r.v v8, v9
1593; CHECK-NEXT:    ret
1594entry:
1595  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16(
1596    <vscale x 1 x i8> undef,
1597    ptr %0,
1598    <vscale x 1 x i16> %1,
1599    iXLen %2)
1600
1601  ret <vscale x 1 x i8> %a
1602}
1603
1604declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16(
1605  <vscale x 1 x i8>,
1606  ptr,
1607  <vscale x 1 x i16>,
1608  <vscale x 1 x i1>,
1609  iXLen,
1610  iXLen);
1611
1612define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1613; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
1614; CHECK:       # %bb.0: # %entry
1615; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
1616; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
1617; CHECK-NEXT:    ret
1618entry:
1619  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16(
1620    <vscale x 1 x i8> %0,
1621    ptr %1,
1622    <vscale x 1 x i16> %2,
1623    <vscale x 1 x i1> %3,
1624    iXLen %4, iXLen 1)
1625
1626  ret <vscale x 1 x i8> %a
1627}
1628
1629declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16(
1630  <vscale x 2 x i8>,
1631  ptr,
1632  <vscale x 2 x i16>,
1633  iXLen);
1634
1635define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
1636; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16:
1637; CHECK:       # %bb.0: # %entry
1638; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
1639; CHECK-NEXT:    vloxei16.v v9, (a0), v8
1640; CHECK-NEXT:    vmv1r.v v8, v9
1641; CHECK-NEXT:    ret
1642entry:
1643  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16(
1644    <vscale x 2 x i8> undef,
1645    ptr %0,
1646    <vscale x 2 x i16> %1,
1647    iXLen %2)
1648
1649  ret <vscale x 2 x i8> %a
1650}
1651
1652declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16(
1653  <vscale x 2 x i8>,
1654  ptr,
1655  <vscale x 2 x i16>,
1656  <vscale x 2 x i1>,
1657  iXLen,
1658  iXLen);
1659
1660define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1661; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
1662; CHECK:       # %bb.0: # %entry
1663; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
1664; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
1665; CHECK-NEXT:    ret
1666entry:
1667  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16(
1668    <vscale x 2 x i8> %0,
1669    ptr %1,
1670    <vscale x 2 x i16> %2,
1671    <vscale x 2 x i1> %3,
1672    iXLen %4, iXLen 1)
1673
1674  ret <vscale x 2 x i8> %a
1675}
1676
1677declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16(
1678  <vscale x 4 x i8>,
1679  ptr,
1680  <vscale x 4 x i16>,
1681  iXLen);
1682
1683define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
1684; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16:
1685; CHECK:       # %bb.0: # %entry
1686; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
1687; CHECK-NEXT:    vloxei16.v v9, (a0), v8
1688; CHECK-NEXT:    vmv1r.v v8, v9
1689; CHECK-NEXT:    ret
1690entry:
1691  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16(
1692    <vscale x 4 x i8> undef,
1693    ptr %0,
1694    <vscale x 4 x i16> %1,
1695    iXLen %2)
1696
1697  ret <vscale x 4 x i8> %a
1698}
1699
1700declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16(
1701  <vscale x 4 x i8>,
1702  ptr,
1703  <vscale x 4 x i16>,
1704  <vscale x 4 x i1>,
1705  iXLen,
1706  iXLen);
1707
1708define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1709; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
1710; CHECK:       # %bb.0: # %entry
1711; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
1712; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
1713; CHECK-NEXT:    ret
1714entry:
1715  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16(
1716    <vscale x 4 x i8> %0,
1717    ptr %1,
1718    <vscale x 4 x i16> %2,
1719    <vscale x 4 x i1> %3,
1720    iXLen %4, iXLen 1)
1721
1722  ret <vscale x 4 x i8> %a
1723}
1724
1725declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16(
1726  <vscale x 8 x i8>,
1727  ptr,
1728  <vscale x 8 x i16>,
1729  iXLen);
1730
1731define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
1732; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16:
1733; CHECK:       # %bb.0: # %entry
1734; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
1735; CHECK-NEXT:    vloxei16.v v10, (a0), v8
1736; CHECK-NEXT:    vmv.v.v v8, v10
1737; CHECK-NEXT:    ret
1738entry:
1739  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16(
1740    <vscale x 8 x i8> undef,
1741    ptr %0,
1742    <vscale x 8 x i16> %1,
1743    iXLen %2)
1744
1745  ret <vscale x 8 x i8> %a
1746}
1747
1748declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16(
1749  <vscale x 8 x i8>,
1750  ptr,
1751  <vscale x 8 x i16>,
1752  <vscale x 8 x i1>,
1753  iXLen,
1754  iXLen);
1755
1756define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1757; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
1758; CHECK:       # %bb.0: # %entry
1759; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
1760; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
1761; CHECK-NEXT:    ret
1762entry:
1763  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16(
1764    <vscale x 8 x i8> %0,
1765    ptr %1,
1766    <vscale x 8 x i16> %2,
1767    <vscale x 8 x i1> %3,
1768    iXLen %4, iXLen 1)
1769
1770  ret <vscale x 8 x i8> %a
1771}
1772
1773declare <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16(
1774  <vscale x 16 x i8>,
1775  ptr,
1776  <vscale x 16 x i16>,
1777  iXLen);
1778
1779define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
1780; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16:
1781; CHECK:       # %bb.0: # %entry
1782; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
1783; CHECK-NEXT:    vloxei16.v v12, (a0), v8
1784; CHECK-NEXT:    vmv.v.v v8, v12
1785; CHECK-NEXT:    ret
1786entry:
1787  %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16(
1788    <vscale x 16 x i8> undef,
1789    ptr %0,
1790    <vscale x 16 x i16> %1,
1791    iXLen %2)
1792
1793  ret <vscale x 16 x i8> %a
1794}
1795
1796declare <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16(
1797  <vscale x 16 x i8>,
1798  ptr,
1799  <vscale x 16 x i16>,
1800  <vscale x 16 x i1>,
1801  iXLen,
1802  iXLen);
1803
1804define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1805; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
1806; CHECK:       # %bb.0: # %entry
1807; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
1808; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
1809; CHECK-NEXT:    ret
1810entry:
1811  %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16(
1812    <vscale x 16 x i8> %0,
1813    ptr %1,
1814    <vscale x 16 x i16> %2,
1815    <vscale x 16 x i1> %3,
1816    iXLen %4, iXLen 1)
1817
1818  ret <vscale x 16 x i8> %a
1819}
1820
1821declare <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16(
1822  <vscale x 32 x i8>,
1823  ptr,
1824  <vscale x 32 x i16>,
1825  iXLen);
1826
1827define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
1828; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16:
1829; CHECK:       # %bb.0: # %entry
1830; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
1831; CHECK-NEXT:    vloxei16.v v16, (a0), v8
1832; CHECK-NEXT:    vmv.v.v v8, v16
1833; CHECK-NEXT:    ret
1834entry:
1835  %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16(
1836    <vscale x 32 x i8> undef,
1837    ptr %0,
1838    <vscale x 32 x i16> %1,
1839    iXLen %2)
1840
1841  ret <vscale x 32 x i8> %a
1842}
1843
1844declare <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16(
1845  <vscale x 32 x i8>,
1846  ptr,
1847  <vscale x 32 x i16>,
1848  <vscale x 32 x i1>,
1849  iXLen,
1850  iXLen);
1851
1852define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1853; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
1854; CHECK:       # %bb.0: # %entry
1855; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
1856; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
1857; CHECK-NEXT:    ret
1858entry:
1859  %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16(
1860    <vscale x 32 x i8> %0,
1861    ptr %1,
1862    <vscale x 32 x i16> %2,
1863    <vscale x 32 x i1> %3,
1864    iXLen %4, iXLen 1)
1865
1866  ret <vscale x 32 x i8> %a
1867}
1868
1869declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16(
1870  <vscale x 1 x i16>,
1871  ptr,
1872  <vscale x 1 x i16>,
1873  iXLen);
1874
1875define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
1876; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16:
1877; CHECK:       # %bb.0: # %entry
1878; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1879; CHECK-NEXT:    vloxei16.v v8, (a0), v8
1880; CHECK-NEXT:    ret
1881entry:
1882  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16(
1883    <vscale x 1 x i16> undef,
1884    ptr %0,
1885    <vscale x 1 x i16> %1,
1886    iXLen %2)
1887
1888  ret <vscale x 1 x i16> %a
1889}
1890
1891declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16(
1892  <vscale x 1 x i16>,
1893  ptr,
1894  <vscale x 1 x i16>,
1895  <vscale x 1 x i1>,
1896  iXLen,
1897  iXLen);
1898
1899define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1900; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
1901; CHECK:       # %bb.0: # %entry
1902; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1903; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
1904; CHECK-NEXT:    ret
1905entry:
1906  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16(
1907    <vscale x 1 x i16> %0,
1908    ptr %1,
1909    <vscale x 1 x i16> %2,
1910    <vscale x 1 x i1> %3,
1911    iXLen %4, iXLen 1)
1912
1913  ret <vscale x 1 x i16> %a
1914}
1915
1916declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16(
1917  <vscale x 2 x i16>,
1918  ptr,
1919  <vscale x 2 x i16>,
1920  iXLen);
1921
1922define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
1923; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16:
1924; CHECK:       # %bb.0: # %entry
1925; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1926; CHECK-NEXT:    vloxei16.v v8, (a0), v8
1927; CHECK-NEXT:    ret
1928entry:
1929  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16(
1930    <vscale x 2 x i16> undef,
1931    ptr %0,
1932    <vscale x 2 x i16> %1,
1933    iXLen %2)
1934
1935  ret <vscale x 2 x i16> %a
1936}
1937
1938declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16(
1939  <vscale x 2 x i16>,
1940  ptr,
1941  <vscale x 2 x i16>,
1942  <vscale x 2 x i1>,
1943  iXLen,
1944  iXLen);
1945
1946define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1947; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
1948; CHECK:       # %bb.0: # %entry
1949; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1950; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
1951; CHECK-NEXT:    ret
1952entry:
1953  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16(
1954    <vscale x 2 x i16> %0,
1955    ptr %1,
1956    <vscale x 2 x i16> %2,
1957    <vscale x 2 x i1> %3,
1958    iXLen %4, iXLen 1)
1959
1960  ret <vscale x 2 x i16> %a
1961}
1962
1963declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16(
1964  <vscale x 4 x i16>,
1965  ptr,
1966  <vscale x 4 x i16>,
1967  iXLen);
1968
1969define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
1970; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16:
1971; CHECK:       # %bb.0: # %entry
1972; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1973; CHECK-NEXT:    vloxei16.v v8, (a0), v8
1974; CHECK-NEXT:    ret
1975entry:
1976  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16(
1977    <vscale x 4 x i16> undef,
1978    ptr %0,
1979    <vscale x 4 x i16> %1,
1980    iXLen %2)
1981
1982  ret <vscale x 4 x i16> %a
1983}
1984
1985declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16(
1986  <vscale x 4 x i16>,
1987  ptr,
1988  <vscale x 4 x i16>,
1989  <vscale x 4 x i1>,
1990  iXLen,
1991  iXLen);
1992
1993define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1994; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
1995; CHECK:       # %bb.0: # %entry
1996; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1997; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
1998; CHECK-NEXT:    ret
1999entry:
2000  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16(
2001    <vscale x 4 x i16> %0,
2002    ptr %1,
2003    <vscale x 4 x i16> %2,
2004    <vscale x 4 x i1> %3,
2005    iXLen %4, iXLen 1)
2006
2007  ret <vscale x 4 x i16> %a
2008}
2009
2010declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16(
2011  <vscale x 8 x i16>,
2012  ptr,
2013  <vscale x 8 x i16>,
2014  iXLen);
2015
2016define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
2017; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16:
2018; CHECK:       # %bb.0: # %entry
2019; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
2020; CHECK-NEXT:    vloxei16.v v8, (a0), v8
2021; CHECK-NEXT:    ret
2022entry:
2023  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16(
2024    <vscale x 8 x i16> undef,
2025    ptr %0,
2026    <vscale x 8 x i16> %1,
2027    iXLen %2)
2028
2029  ret <vscale x 8 x i16> %a
2030}
2031
2032declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16(
2033  <vscale x 8 x i16>,
2034  ptr,
2035  <vscale x 8 x i16>,
2036  <vscale x 8 x i1>,
2037  iXLen,
2038  iXLen);
2039
2040define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2041; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
2042; CHECK:       # %bb.0: # %entry
2043; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
2044; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
2045; CHECK-NEXT:    ret
2046entry:
2047  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16(
2048    <vscale x 8 x i16> %0,
2049    ptr %1,
2050    <vscale x 8 x i16> %2,
2051    <vscale x 8 x i1> %3,
2052    iXLen %4, iXLen 1)
2053
2054  ret <vscale x 8 x i16> %a
2055}
2056
2057declare <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16(
2058  <vscale x 16 x i16>,
2059  ptr,
2060  <vscale x 16 x i16>,
2061  iXLen);
2062
2063define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
2064; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16:
2065; CHECK:       # %bb.0: # %entry
2066; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
2067; CHECK-NEXT:    vloxei16.v v8, (a0), v8
2068; CHECK-NEXT:    ret
2069entry:
2070  %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16(
2071    <vscale x 16 x i16> undef,
2072    ptr %0,
2073    <vscale x 16 x i16> %1,
2074    iXLen %2)
2075
2076  ret <vscale x 16 x i16> %a
2077}
2078
2079declare <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16(
2080  <vscale x 16 x i16>,
2081  ptr,
2082  <vscale x 16 x i16>,
2083  <vscale x 16 x i1>,
2084  iXLen,
2085  iXLen);
2086
2087define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2088; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
2089; CHECK:       # %bb.0: # %entry
2090; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
2091; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
2092; CHECK-NEXT:    ret
2093entry:
2094  %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16(
2095    <vscale x 16 x i16> %0,
2096    ptr %1,
2097    <vscale x 16 x i16> %2,
2098    <vscale x 16 x i1> %3,
2099    iXLen %4, iXLen 1)
2100
2101  ret <vscale x 16 x i16> %a
2102}
2103
2104declare <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16(
2105  <vscale x 32 x i16>,
2106  ptr,
2107  <vscale x 32 x i16>,
2108  iXLen);
2109
2110define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
2111; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16:
2112; CHECK:       # %bb.0: # %entry
2113; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
2114; CHECK-NEXT:    vloxei16.v v8, (a0), v8
2115; CHECK-NEXT:    ret
2116entry:
2117  %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16(
2118    <vscale x 32 x i16> undef,
2119    ptr %0,
2120    <vscale x 32 x i16> %1,
2121    iXLen %2)
2122
2123  ret <vscale x 32 x i16> %a
2124}
2125
2126declare <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16(
2127  <vscale x 32 x i16>,
2128  ptr,
2129  <vscale x 32 x i16>,
2130  <vscale x 32 x i1>,
2131  iXLen,
2132  iXLen);
2133
2134define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
2135; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
2136; CHECK:       # %bb.0: # %entry
2137; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
2138; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
2139; CHECK-NEXT:    ret
2140entry:
2141  %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16(
2142    <vscale x 32 x i16> %0,
2143    ptr %1,
2144    <vscale x 32 x i16> %2,
2145    <vscale x 32 x i1> %3,
2146    iXLen %4, iXLen 1)
2147
2148  ret <vscale x 32 x i16> %a
2149}
2150
2151declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16(
2152  <vscale x 1 x i32>,
2153  ptr,
2154  <vscale x 1 x i16>,
2155  iXLen);
2156
2157define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
2158; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16:
2159; CHECK:       # %bb.0: # %entry
2160; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
2161; CHECK-NEXT:    vloxei16.v v9, (a0), v8
2162; CHECK-NEXT:    vmv1r.v v8, v9
2163; CHECK-NEXT:    ret
2164entry:
2165  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16(
2166    <vscale x 1 x i32> undef,
2167    ptr %0,
2168    <vscale x 1 x i16> %1,
2169    iXLen %2)
2170
2171  ret <vscale x 1 x i32> %a
2172}
2173
2174declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16(
2175  <vscale x 1 x i32>,
2176  ptr,
2177  <vscale x 1 x i16>,
2178  <vscale x 1 x i1>,
2179  iXLen,
2180  iXLen);
2181
2182define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2183; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
2184; CHECK:       # %bb.0: # %entry
2185; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
2186; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
2187; CHECK-NEXT:    ret
2188entry:
2189  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16(
2190    <vscale x 1 x i32> %0,
2191    ptr %1,
2192    <vscale x 1 x i16> %2,
2193    <vscale x 1 x i1> %3,
2194    iXLen %4, iXLen 1)
2195
2196  ret <vscale x 1 x i32> %a
2197}
2198
2199declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16(
2200  <vscale x 2 x i32>,
2201  ptr,
2202  <vscale x 2 x i16>,
2203  iXLen);
2204
2205define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
2206; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16:
2207; CHECK:       # %bb.0: # %entry
2208; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
2209; CHECK-NEXT:    vloxei16.v v9, (a0), v8
2210; CHECK-NEXT:    vmv.v.v v8, v9
2211; CHECK-NEXT:    ret
2212entry:
2213  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16(
2214    <vscale x 2 x i32> undef,
2215    ptr %0,
2216    <vscale x 2 x i16> %1,
2217    iXLen %2)
2218
2219  ret <vscale x 2 x i32> %a
2220}
2221
2222declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16(
2223  <vscale x 2 x i32>,
2224  ptr,
2225  <vscale x 2 x i16>,
2226  <vscale x 2 x i1>,
2227  iXLen,
2228  iXLen);
2229
2230define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2231; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
2232; CHECK:       # %bb.0: # %entry
2233; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
2234; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
2235; CHECK-NEXT:    ret
2236entry:
2237  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16(
2238    <vscale x 2 x i32> %0,
2239    ptr %1,
2240    <vscale x 2 x i16> %2,
2241    <vscale x 2 x i1> %3,
2242    iXLen %4, iXLen 1)
2243
2244  ret <vscale x 2 x i32> %a
2245}
2246
2247declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16(
2248  <vscale x 4 x i32>,
2249  ptr,
2250  <vscale x 4 x i16>,
2251  iXLen);
2252
2253define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
2254; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16:
2255; CHECK:       # %bb.0: # %entry
2256; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
2257; CHECK-NEXT:    vloxei16.v v10, (a0), v8
2258; CHECK-NEXT:    vmv.v.v v8, v10
2259; CHECK-NEXT:    ret
2260entry:
2261  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16(
2262    <vscale x 4 x i32> undef,
2263    ptr %0,
2264    <vscale x 4 x i16> %1,
2265    iXLen %2)
2266
2267  ret <vscale x 4 x i32> %a
2268}
2269
2270declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16(
2271  <vscale x 4 x i32>,
2272  ptr,
2273  <vscale x 4 x i16>,
2274  <vscale x 4 x i1>,
2275  iXLen,
2276  iXLen);
2277
2278define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2279; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
2280; CHECK:       # %bb.0: # %entry
2281; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
2282; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
2283; CHECK-NEXT:    ret
2284entry:
2285  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16(
2286    <vscale x 4 x i32> %0,
2287    ptr %1,
2288    <vscale x 4 x i16> %2,
2289    <vscale x 4 x i1> %3,
2290    iXLen %4, iXLen 1)
2291
2292  ret <vscale x 4 x i32> %a
2293}
2294
2295declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16(
2296  <vscale x 8 x i32>,
2297  ptr,
2298  <vscale x 8 x i16>,
2299  iXLen);
2300
2301define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
2302; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16:
2303; CHECK:       # %bb.0: # %entry
2304; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
2305; CHECK-NEXT:    vloxei16.v v12, (a0), v8
2306; CHECK-NEXT:    vmv.v.v v8, v12
2307; CHECK-NEXT:    ret
2308entry:
2309  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16(
2310    <vscale x 8 x i32> undef,
2311    ptr %0,
2312    <vscale x 8 x i16> %1,
2313    iXLen %2)
2314
2315  ret <vscale x 8 x i32> %a
2316}
2317
2318declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16(
2319  <vscale x 8 x i32>,
2320  ptr,
2321  <vscale x 8 x i16>,
2322  <vscale x 8 x i1>,
2323  iXLen,
2324  iXLen);
2325
2326define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2327; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
2328; CHECK:       # %bb.0: # %entry
2329; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
2330; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
2331; CHECK-NEXT:    ret
2332entry:
2333  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16(
2334    <vscale x 8 x i32> %0,
2335    ptr %1,
2336    <vscale x 8 x i16> %2,
2337    <vscale x 8 x i1> %3,
2338    iXLen %4, iXLen 1)
2339
2340  ret <vscale x 8 x i32> %a
2341}
2342
2343declare <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16(
2344  <vscale x 16 x i32>,
2345  ptr,
2346  <vscale x 16 x i16>,
2347  iXLen);
2348
2349define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
2350; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16:
2351; CHECK:       # %bb.0: # %entry
2352; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
2353; CHECK-NEXT:    vloxei16.v v16, (a0), v8
2354; CHECK-NEXT:    vmv.v.v v8, v16
2355; CHECK-NEXT:    ret
2356entry:
2357  %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16(
2358    <vscale x 16 x i32> undef,
2359    ptr %0,
2360    <vscale x 16 x i16> %1,
2361    iXLen %2)
2362
2363  ret <vscale x 16 x i32> %a
2364}
2365
2366declare <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16(
2367  <vscale x 16 x i32>,
2368  ptr,
2369  <vscale x 16 x i16>,
2370  <vscale x 16 x i1>,
2371  iXLen,
2372  iXLen);
2373
2374define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2375; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
2376; CHECK:       # %bb.0: # %entry
2377; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
2378; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
2379; CHECK-NEXT:    ret
2380entry:
2381  %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16(
2382    <vscale x 16 x i32> %0,
2383    ptr %1,
2384    <vscale x 16 x i16> %2,
2385    <vscale x 16 x i1> %3,
2386    iXLen %4, iXLen 1)
2387
2388  ret <vscale x 16 x i32> %a
2389}
2390
2391declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16(
2392  <vscale x 1 x i64>,
2393  ptr,
2394  <vscale x 1 x i16>,
2395  iXLen);
2396
2397define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
2398; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16:
2399; CHECK:       # %bb.0: # %entry
2400; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
2401; CHECK-NEXT:    vloxei16.v v9, (a0), v8
2402; CHECK-NEXT:    vmv.v.v v8, v9
2403; CHECK-NEXT:    ret
2404entry:
2405  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16(
2406    <vscale x 1 x i64> undef,
2407    ptr %0,
2408    <vscale x 1 x i16> %1,
2409    iXLen %2)
2410
2411  ret <vscale x 1 x i64> %a
2412}
2413
2414declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16(
2415  <vscale x 1 x i64>,
2416  ptr,
2417  <vscale x 1 x i16>,
2418  <vscale x 1 x i1>,
2419  iXLen,
2420  iXLen);
2421
2422define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2423; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
2424; CHECK:       # %bb.0: # %entry
2425; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
2426; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
2427; CHECK-NEXT:    ret
2428entry:
2429  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16(
2430    <vscale x 1 x i64> %0,
2431    ptr %1,
2432    <vscale x 1 x i16> %2,
2433    <vscale x 1 x i1> %3,
2434    iXLen %4, iXLen 1)
2435
2436  ret <vscale x 1 x i64> %a
2437}
2438
2439declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16(
2440  <vscale x 2 x i64>,
2441  ptr,
2442  <vscale x 2 x i16>,
2443  iXLen);
2444
2445define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
2446; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16:
2447; CHECK:       # %bb.0: # %entry
2448; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
2449; CHECK-NEXT:    vloxei16.v v10, (a0), v8
2450; CHECK-NEXT:    vmv.v.v v8, v10
2451; CHECK-NEXT:    ret
2452entry:
2453  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16(
2454    <vscale x 2 x i64> undef,
2455    ptr %0,
2456    <vscale x 2 x i16> %1,
2457    iXLen %2)
2458
2459  ret <vscale x 2 x i64> %a
2460}
2461
2462declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16(
2463  <vscale x 2 x i64>,
2464  ptr,
2465  <vscale x 2 x i16>,
2466  <vscale x 2 x i1>,
2467  iXLen,
2468  iXLen);
2469
2470define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2471; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
2472; CHECK:       # %bb.0: # %entry
2473; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
2474; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
2475; CHECK-NEXT:    ret
2476entry:
2477  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16(
2478    <vscale x 2 x i64> %0,
2479    ptr %1,
2480    <vscale x 2 x i16> %2,
2481    <vscale x 2 x i1> %3,
2482    iXLen %4, iXLen 1)
2483
2484  ret <vscale x 2 x i64> %a
2485}
2486
2487declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16(
2488  <vscale x 4 x i64>,
2489  ptr,
2490  <vscale x 4 x i16>,
2491  iXLen);
2492
2493define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
2494; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16:
2495; CHECK:       # %bb.0: # %entry
2496; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
2497; CHECK-NEXT:    vloxei16.v v12, (a0), v8
2498; CHECK-NEXT:    vmv.v.v v8, v12
2499; CHECK-NEXT:    ret
2500entry:
2501  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16(
2502    <vscale x 4 x i64> undef,
2503    ptr %0,
2504    <vscale x 4 x i16> %1,
2505    iXLen %2)
2506
2507  ret <vscale x 4 x i64> %a
2508}
2509
2510declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16(
2511  <vscale x 4 x i64>,
2512  ptr,
2513  <vscale x 4 x i16>,
2514  <vscale x 4 x i1>,
2515  iXLen,
2516  iXLen);
2517
2518define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2519; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
2520; CHECK:       # %bb.0: # %entry
2521; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
2522; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
2523; CHECK-NEXT:    ret
2524entry:
2525  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16(
2526    <vscale x 4 x i64> %0,
2527    ptr %1,
2528    <vscale x 4 x i16> %2,
2529    <vscale x 4 x i1> %3,
2530    iXLen %4, iXLen 1)
2531
2532  ret <vscale x 4 x i64> %a
2533}
2534
2535declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16(
2536  <vscale x 8 x i64>,
2537  ptr,
2538  <vscale x 8 x i16>,
2539  iXLen);
2540
2541define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
2542; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16:
2543; CHECK:       # %bb.0: # %entry
2544; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
2545; CHECK-NEXT:    vloxei16.v v16, (a0), v8
2546; CHECK-NEXT:    vmv.v.v v8, v16
2547; CHECK-NEXT:    ret
2548entry:
2549  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16(
2550    <vscale x 8 x i64> undef,
2551    ptr %0,
2552    <vscale x 8 x i16> %1,
2553    iXLen %2)
2554
2555  ret <vscale x 8 x i64> %a
2556}
2557
2558declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16(
2559  <vscale x 8 x i64>,
2560  ptr,
2561  <vscale x 8 x i16>,
2562  <vscale x 8 x i1>,
2563  iXLen,
2564  iXLen);
2565
2566define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2567; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
2568; CHECK:       # %bb.0: # %entry
2569; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
2570; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
2571; CHECK-NEXT:    ret
2572entry:
2573  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16(
2574    <vscale x 8 x i64> %0,
2575    ptr %1,
2576    <vscale x 8 x i16> %2,
2577    <vscale x 8 x i1> %3,
2578    iXLen %4, iXLen 1)
2579
2580  ret <vscale x 8 x i64> %a
2581}
2582
2583declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i16(
2584  <vscale x 1 x half>,
2585  ptr,
2586  <vscale x 1 x i16>,
2587  iXLen);
2588
2589define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
2590; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16:
2591; CHECK:       # %bb.0: # %entry
2592; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2593; CHECK-NEXT:    vloxei16.v v8, (a0), v8
2594; CHECK-NEXT:    ret
2595entry:
2596  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i16(
2597    <vscale x 1 x half> undef,
2598    ptr %0,
2599    <vscale x 1 x i16> %1,
2600    iXLen %2)
2601
2602  ret <vscale x 1 x half> %a
2603}
2604
2605declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16(
2606  <vscale x 1 x half>,
2607  ptr,
2608  <vscale x 1 x i16>,
2609  <vscale x 1 x i1>,
2610  iXLen,
2611  iXLen);
2612
2613define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2614; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
2615; CHECK:       # %bb.0: # %entry
2616; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
2617; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
2618; CHECK-NEXT:    ret
2619entry:
2620  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16(
2621    <vscale x 1 x half> %0,
2622    ptr %1,
2623    <vscale x 1 x i16> %2,
2624    <vscale x 1 x i1> %3,
2625    iXLen %4, iXLen 1)
2626
2627  ret <vscale x 1 x half> %a
2628}
2629
2630declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i16(
2631  <vscale x 2 x half>,
2632  ptr,
2633  <vscale x 2 x i16>,
2634  iXLen);
2635
2636define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
2637; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16:
2638; CHECK:       # %bb.0: # %entry
2639; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2640; CHECK-NEXT:    vloxei16.v v8, (a0), v8
2641; CHECK-NEXT:    ret
2642entry:
2643  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i16(
2644    <vscale x 2 x half> undef,
2645    ptr %0,
2646    <vscale x 2 x i16> %1,
2647    iXLen %2)
2648
2649  ret <vscale x 2 x half> %a
2650}
2651
2652declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16(
2653  <vscale x 2 x half>,
2654  ptr,
2655  <vscale x 2 x i16>,
2656  <vscale x 2 x i1>,
2657  iXLen,
2658  iXLen);
2659
2660define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2661; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
2662; CHECK:       # %bb.0: # %entry
2663; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
2664; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
2665; CHECK-NEXT:    ret
2666entry:
2667  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16(
2668    <vscale x 2 x half> %0,
2669    ptr %1,
2670    <vscale x 2 x i16> %2,
2671    <vscale x 2 x i1> %3,
2672    iXLen %4, iXLen 1)
2673
2674  ret <vscale x 2 x half> %a
2675}
2676
2677declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i16(
2678  <vscale x 4 x half>,
2679  ptr,
2680  <vscale x 4 x i16>,
2681  iXLen);
2682
2683define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
2684; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16:
2685; CHECK:       # %bb.0: # %entry
2686; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2687; CHECK-NEXT:    vloxei16.v v8, (a0), v8
2688; CHECK-NEXT:    ret
2689entry:
2690  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i16(
2691    <vscale x 4 x half> undef,
2692    ptr %0,
2693    <vscale x 4 x i16> %1,
2694    iXLen %2)
2695
2696  ret <vscale x 4 x half> %a
2697}
2698
2699declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16(
2700  <vscale x 4 x half>,
2701  ptr,
2702  <vscale x 4 x i16>,
2703  <vscale x 4 x i1>,
2704  iXLen,
2705  iXLen);
2706
2707define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2708; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
2709; CHECK:       # %bb.0: # %entry
2710; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
2711; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
2712; CHECK-NEXT:    ret
2713entry:
2714  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16(
2715    <vscale x 4 x half> %0,
2716    ptr %1,
2717    <vscale x 4 x i16> %2,
2718    <vscale x 4 x i1> %3,
2719    iXLen %4, iXLen 1)
2720
2721  ret <vscale x 4 x half> %a
2722}
2723
2724declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i16(
2725  <vscale x 8 x half>,
2726  ptr,
2727  <vscale x 8 x i16>,
2728  iXLen);
2729
2730define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
2731; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16:
2732; CHECK:       # %bb.0: # %entry
2733; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
2734; CHECK-NEXT:    vloxei16.v v8, (a0), v8
2735; CHECK-NEXT:    ret
2736entry:
2737  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i16(
2738    <vscale x 8 x half> undef,
2739    ptr %0,
2740    <vscale x 8 x i16> %1,
2741    iXLen %2)
2742
2743  ret <vscale x 8 x half> %a
2744}
2745
2746declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16(
2747  <vscale x 8 x half>,
2748  ptr,
2749  <vscale x 8 x i16>,
2750  <vscale x 8 x i1>,
2751  iXLen,
2752  iXLen);
2753
2754define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2755; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
2756; CHECK:       # %bb.0: # %entry
2757; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
2758; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
2759; CHECK-NEXT:    ret
2760entry:
2761  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16(
2762    <vscale x 8 x half> %0,
2763    ptr %1,
2764    <vscale x 8 x i16> %2,
2765    <vscale x 8 x i1> %3,
2766    iXLen %4, iXLen 1)
2767
2768  ret <vscale x 8 x half> %a
2769}
2770
2771declare <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i16(
2772  <vscale x 16 x half>,
2773  ptr,
2774  <vscale x 16 x i16>,
2775  iXLen);
2776
2777define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
2778; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16:
2779; CHECK:       # %bb.0: # %entry
2780; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
2781; CHECK-NEXT:    vloxei16.v v8, (a0), v8
2782; CHECK-NEXT:    ret
2783entry:
2784  %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i16(
2785    <vscale x 16 x half> undef,
2786    ptr %0,
2787    <vscale x 16 x i16> %1,
2788    iXLen %2)
2789
2790  ret <vscale x 16 x half> %a
2791}
2792
2793declare <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16(
2794  <vscale x 16 x half>,
2795  ptr,
2796  <vscale x 16 x i16>,
2797  <vscale x 16 x i1>,
2798  iXLen,
2799  iXLen);
2800
2801define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2802; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
2803; CHECK:       # %bb.0: # %entry
2804; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
2805; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
2806; CHECK-NEXT:    ret
2807entry:
2808  %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16(
2809    <vscale x 16 x half> %0,
2810    ptr %1,
2811    <vscale x 16 x i16> %2,
2812    <vscale x 16 x i1> %3,
2813    iXLen %4, iXLen 1)
2814
2815  ret <vscale x 16 x half> %a
2816}
2817
2818declare <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i16(
2819  <vscale x 32 x half>,
2820  ptr,
2821  <vscale x 32 x i16>,
2822  iXLen);
2823
2824define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
2825; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16:
2826; CHECK:       # %bb.0: # %entry
2827; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
2828; CHECK-NEXT:    vloxei16.v v8, (a0), v8
2829; CHECK-NEXT:    ret
2830entry:
2831  %a = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i16(
2832    <vscale x 32 x half> undef,
2833    ptr %0,
2834    <vscale x 32 x i16> %1,
2835    iXLen %2)
2836
2837  ret <vscale x 32 x half> %a
2838}
2839
2840declare <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16(
2841  <vscale x 32 x half>,
2842  ptr,
2843  <vscale x 32 x i16>,
2844  <vscale x 32 x i1>,
2845  iXLen,
2846  iXLen);
2847
2848define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
2849; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
2850; CHECK:       # %bb.0: # %entry
2851; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
2852; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
2853; CHECK-NEXT:    ret
2854entry:
2855  %a = call <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16(
2856    <vscale x 32 x half> %0,
2857    ptr %1,
2858    <vscale x 32 x i16> %2,
2859    <vscale x 32 x i1> %3,
2860    iXLen %4, iXLen 1)
2861
2862  ret <vscale x 32 x half> %a
2863}
2864
2865declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16(
2866  <vscale x 1 x float>,
2867  ptr,
2868  <vscale x 1 x i16>,
2869  iXLen);
2870
2871define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
2872; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16:
2873; CHECK:       # %bb.0: # %entry
2874; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
2875; CHECK-NEXT:    vloxei16.v v9, (a0), v8
2876; CHECK-NEXT:    vmv1r.v v8, v9
2877; CHECK-NEXT:    ret
2878entry:
2879  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16(
2880    <vscale x 1 x float> undef,
2881    ptr %0,
2882    <vscale x 1 x i16> %1,
2883    iXLen %2)
2884
2885  ret <vscale x 1 x float> %a
2886}
2887
2888declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16(
2889  <vscale x 1 x float>,
2890  ptr,
2891  <vscale x 1 x i16>,
2892  <vscale x 1 x i1>,
2893  iXLen,
2894  iXLen);
2895
2896define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2897; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
2898; CHECK:       # %bb.0: # %entry
2899; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
2900; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
2901; CHECK-NEXT:    ret
2902entry:
2903  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16(
2904    <vscale x 1 x float> %0,
2905    ptr %1,
2906    <vscale x 1 x i16> %2,
2907    <vscale x 1 x i1> %3,
2908    iXLen %4, iXLen 1)
2909
2910  ret <vscale x 1 x float> %a
2911}
2912
2913declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16(
2914  <vscale x 2 x float>,
2915  ptr,
2916  <vscale x 2 x i16>,
2917  iXLen);
2918
2919define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
2920; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16:
2921; CHECK:       # %bb.0: # %entry
2922; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
2923; CHECK-NEXT:    vloxei16.v v9, (a0), v8
2924; CHECK-NEXT:    vmv.v.v v8, v9
2925; CHECK-NEXT:    ret
2926entry:
2927  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16(
2928    <vscale x 2 x float> undef,
2929    ptr %0,
2930    <vscale x 2 x i16> %1,
2931    iXLen %2)
2932
2933  ret <vscale x 2 x float> %a
2934}
2935
2936declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16(
2937  <vscale x 2 x float>,
2938  ptr,
2939  <vscale x 2 x i16>,
2940  <vscale x 2 x i1>,
2941  iXLen,
2942  iXLen);
2943
2944define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2945; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
2946; CHECK:       # %bb.0: # %entry
2947; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
2948; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
2949; CHECK-NEXT:    ret
2950entry:
2951  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16(
2952    <vscale x 2 x float> %0,
2953    ptr %1,
2954    <vscale x 2 x i16> %2,
2955    <vscale x 2 x i1> %3,
2956    iXLen %4, iXLen 1)
2957
2958  ret <vscale x 2 x float> %a
2959}
2960
2961declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16(
2962  <vscale x 4 x float>,
2963  ptr,
2964  <vscale x 4 x i16>,
2965  iXLen);
2966
2967define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
2968; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16:
2969; CHECK:       # %bb.0: # %entry
2970; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
2971; CHECK-NEXT:    vloxei16.v v10, (a0), v8
2972; CHECK-NEXT:    vmv.v.v v8, v10
2973; CHECK-NEXT:    ret
2974entry:
2975  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16(
2976    <vscale x 4 x float> undef,
2977    ptr %0,
2978    <vscale x 4 x i16> %1,
2979    iXLen %2)
2980
2981  ret <vscale x 4 x float> %a
2982}
2983
2984declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16(
2985  <vscale x 4 x float>,
2986  ptr,
2987  <vscale x 4 x i16>,
2988  <vscale x 4 x i1>,
2989  iXLen,
2990  iXLen);
2991
2992define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2993; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
2994; CHECK:       # %bb.0: # %entry
2995; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
2996; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
2997; CHECK-NEXT:    ret
2998entry:
2999  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16(
3000    <vscale x 4 x float> %0,
3001    ptr %1,
3002    <vscale x 4 x i16> %2,
3003    <vscale x 4 x i1> %3,
3004    iXLen %4, iXLen 1)
3005
3006  ret <vscale x 4 x float> %a
3007}
3008
3009declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16(
3010  <vscale x 8 x float>,
3011  ptr,
3012  <vscale x 8 x i16>,
3013  iXLen);
3014
3015define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
3016; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16:
3017; CHECK:       # %bb.0: # %entry
3018; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
3019; CHECK-NEXT:    vloxei16.v v12, (a0), v8
3020; CHECK-NEXT:    vmv.v.v v8, v12
3021; CHECK-NEXT:    ret
3022entry:
3023  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16(
3024    <vscale x 8 x float> undef,
3025    ptr %0,
3026    <vscale x 8 x i16> %1,
3027    iXLen %2)
3028
3029  ret <vscale x 8 x float> %a
3030}
3031
3032declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16(
3033  <vscale x 8 x float>,
3034  ptr,
3035  <vscale x 8 x i16>,
3036  <vscale x 8 x i1>,
3037  iXLen,
3038  iXLen);
3039
3040define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3041; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
3042; CHECK:       # %bb.0: # %entry
3043; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
3044; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
3045; CHECK-NEXT:    ret
3046entry:
3047  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16(
3048    <vscale x 8 x float> %0,
3049    ptr %1,
3050    <vscale x 8 x i16> %2,
3051    <vscale x 8 x i1> %3,
3052    iXLen %4, iXLen 1)
3053
3054  ret <vscale x 8 x float> %a
3055}
3056
3057declare <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16(
3058  <vscale x 16 x float>,
3059  ptr,
3060  <vscale x 16 x i16>,
3061  iXLen);
3062
3063define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
3064; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16:
3065; CHECK:       # %bb.0: # %entry
3066; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
3067; CHECK-NEXT:    vloxei16.v v16, (a0), v8
3068; CHECK-NEXT:    vmv.v.v v8, v16
3069; CHECK-NEXT:    ret
3070entry:
3071  %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16(
3072    <vscale x 16 x float> undef,
3073    ptr %0,
3074    <vscale x 16 x i16> %1,
3075    iXLen %2)
3076
3077  ret <vscale x 16 x float> %a
3078}
3079
3080declare <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16(
3081  <vscale x 16 x float>,
3082  ptr,
3083  <vscale x 16 x i16>,
3084  <vscale x 16 x i1>,
3085  iXLen,
3086  iXLen);
3087
3088define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
3089; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
3090; CHECK:       # %bb.0: # %entry
3091; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
3092; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
3093; CHECK-NEXT:    ret
3094entry:
3095  %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16(
3096    <vscale x 16 x float> %0,
3097    ptr %1,
3098    <vscale x 16 x i16> %2,
3099    <vscale x 16 x i1> %3,
3100    iXLen %4, iXLen 1)
3101
3102  ret <vscale x 16 x float> %a
3103}
3104
3105declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16(
3106  <vscale x 1 x double>,
3107  ptr,
3108  <vscale x 1 x i16>,
3109  iXLen);
3110
3111define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
3112; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16:
3113; CHECK:       # %bb.0: # %entry
3114; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
3115; CHECK-NEXT:    vloxei16.v v9, (a0), v8
3116; CHECK-NEXT:    vmv.v.v v8, v9
3117; CHECK-NEXT:    ret
3118entry:
3119  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16(
3120    <vscale x 1 x double> undef,
3121    ptr %0,
3122    <vscale x 1 x i16> %1,
3123    iXLen %2)
3124
3125  ret <vscale x 1 x double> %a
3126}
3127
3128declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16(
3129  <vscale x 1 x double>,
3130  ptr,
3131  <vscale x 1 x i16>,
3132  <vscale x 1 x i1>,
3133  iXLen,
3134  iXLen);
3135
3136define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3137; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
3138; CHECK:       # %bb.0: # %entry
3139; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
3140; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
3141; CHECK-NEXT:    ret
3142entry:
3143  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16(
3144    <vscale x 1 x double> %0,
3145    ptr %1,
3146    <vscale x 1 x i16> %2,
3147    <vscale x 1 x i1> %3,
3148    iXLen %4, iXLen 1)
3149
3150  ret <vscale x 1 x double> %a
3151}
3152
3153declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16(
3154  <vscale x 2 x double>,
3155  ptr,
3156  <vscale x 2 x i16>,
3157  iXLen);
3158
3159define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
3160; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16:
3161; CHECK:       # %bb.0: # %entry
3162; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
3163; CHECK-NEXT:    vloxei16.v v10, (a0), v8
3164; CHECK-NEXT:    vmv.v.v v8, v10
3165; CHECK-NEXT:    ret
3166entry:
3167  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16(
3168    <vscale x 2 x double> undef,
3169    ptr %0,
3170    <vscale x 2 x i16> %1,
3171    iXLen %2)
3172
3173  ret <vscale x 2 x double> %a
3174}
3175
3176declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16(
3177  <vscale x 2 x double>,
3178  ptr,
3179  <vscale x 2 x i16>,
3180  <vscale x 2 x i1>,
3181  iXLen,
3182  iXLen);
3183
3184define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3185; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
3186; CHECK:       # %bb.0: # %entry
3187; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
3188; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
3189; CHECK-NEXT:    ret
3190entry:
3191  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16(
3192    <vscale x 2 x double> %0,
3193    ptr %1,
3194    <vscale x 2 x i16> %2,
3195    <vscale x 2 x i1> %3,
3196    iXLen %4, iXLen 1)
3197
3198  ret <vscale x 2 x double> %a
3199}
3200
3201declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16(
3202  <vscale x 4 x double>,
3203  ptr,
3204  <vscale x 4 x i16>,
3205  iXLen);
3206
3207define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
3208; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16:
3209; CHECK:       # %bb.0: # %entry
3210; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
3211; CHECK-NEXT:    vloxei16.v v12, (a0), v8
3212; CHECK-NEXT:    vmv.v.v v8, v12
3213; CHECK-NEXT:    ret
3214entry:
3215  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16(
3216    <vscale x 4 x double> undef,
3217    ptr %0,
3218    <vscale x 4 x i16> %1,
3219    iXLen %2)
3220
3221  ret <vscale x 4 x double> %a
3222}
3223
3224declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16(
3225  <vscale x 4 x double>,
3226  ptr,
3227  <vscale x 4 x i16>,
3228  <vscale x 4 x i1>,
3229  iXLen,
3230  iXLen);
3231
3232define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3233; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
3234; CHECK:       # %bb.0: # %entry
3235; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
3236; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
3237; CHECK-NEXT:    ret
3238entry:
3239  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16(
3240    <vscale x 4 x double> %0,
3241    ptr %1,
3242    <vscale x 4 x i16> %2,
3243    <vscale x 4 x i1> %3,
3244    iXLen %4, iXLen 1)
3245
3246  ret <vscale x 4 x double> %a
3247}
3248
3249declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16(
3250  <vscale x 8 x double>,
3251  ptr,
3252  <vscale x 8 x i16>,
3253  iXLen);
3254
3255define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
3256; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16:
3257; CHECK:       # %bb.0: # %entry
3258; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
3259; CHECK-NEXT:    vloxei16.v v16, (a0), v8
3260; CHECK-NEXT:    vmv.v.v v8, v16
3261; CHECK-NEXT:    ret
3262entry:
3263  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16(
3264    <vscale x 8 x double> undef,
3265    ptr %0,
3266    <vscale x 8 x i16> %1,
3267    iXLen %2)
3268
3269  ret <vscale x 8 x double> %a
3270}
3271
3272declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16(
3273  <vscale x 8 x double>,
3274  ptr,
3275  <vscale x 8 x i16>,
3276  <vscale x 8 x i1>,
3277  iXLen,
3278  iXLen);
3279
3280define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3281; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
3282; CHECK:       # %bb.0: # %entry
3283; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
3284; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
3285; CHECK-NEXT:    ret
3286entry:
3287  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16(
3288    <vscale x 8 x double> %0,
3289    ptr %1,
3290    <vscale x 8 x i16> %2,
3291    <vscale x 8 x i1> %3,
3292    iXLen %4, iXLen 1)
3293
3294  ret <vscale x 8 x double> %a
3295}
3296
3297declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8(
3298  <vscale x 1 x i8>,
3299  ptr,
3300  <vscale x 1 x i8>,
3301  iXLen);
3302
3303define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
3304; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8:
3305; CHECK:       # %bb.0: # %entry
3306; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
3307; CHECK-NEXT:    vloxei8.v v8, (a0), v8
3308; CHECK-NEXT:    ret
3309entry:
3310  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8(
3311    <vscale x 1 x i8> undef,
3312    ptr %0,
3313    <vscale x 1 x i8> %1,
3314    iXLen %2)
3315
3316  ret <vscale x 1 x i8> %a
3317}
3318
3319declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8(
3320  <vscale x 1 x i8>,
3321  ptr,
3322  <vscale x 1 x i8>,
3323  <vscale x 1 x i1>,
3324  iXLen,
3325  iXLen);
3326
3327define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3328; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
3329; CHECK:       # %bb.0: # %entry
3330; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
3331; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
3332; CHECK-NEXT:    ret
3333entry:
3334  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8(
3335    <vscale x 1 x i8> %0,
3336    ptr %1,
3337    <vscale x 1 x i8> %2,
3338    <vscale x 1 x i1> %3,
3339    iXLen %4, iXLen 1)
3340
3341  ret <vscale x 1 x i8> %a
3342}
3343
3344declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8(
3345  <vscale x 2 x i8>,
3346  ptr,
3347  <vscale x 2 x i8>,
3348  iXLen);
3349
3350define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
3351; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8:
3352; CHECK:       # %bb.0: # %entry
3353; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
3354; CHECK-NEXT:    vloxei8.v v8, (a0), v8
3355; CHECK-NEXT:    ret
3356entry:
3357  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8(
3358    <vscale x 2 x i8> undef,
3359    ptr %0,
3360    <vscale x 2 x i8> %1,
3361    iXLen %2)
3362
3363  ret <vscale x 2 x i8> %a
3364}
3365
3366declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8(
3367  <vscale x 2 x i8>,
3368  ptr,
3369  <vscale x 2 x i8>,
3370  <vscale x 2 x i1>,
3371  iXLen,
3372  iXLen);
3373
3374define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3375; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
3376; CHECK:       # %bb.0: # %entry
3377; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
3378; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
3379; CHECK-NEXT:    ret
3380entry:
3381  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8(
3382    <vscale x 2 x i8> %0,
3383    ptr %1,
3384    <vscale x 2 x i8> %2,
3385    <vscale x 2 x i1> %3,
3386    iXLen %4, iXLen 1)
3387
3388  ret <vscale x 2 x i8> %a
3389}
3390
3391declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8(
3392  <vscale x 4 x i8>,
3393  ptr,
3394  <vscale x 4 x i8>,
3395  iXLen);
3396
3397define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
3398; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8:
3399; CHECK:       # %bb.0: # %entry
3400; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
3401; CHECK-NEXT:    vloxei8.v v8, (a0), v8
3402; CHECK-NEXT:    ret
3403entry:
3404  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8(
3405    <vscale x 4 x i8> undef,
3406    ptr %0,
3407    <vscale x 4 x i8> %1,
3408    iXLen %2)
3409
3410  ret <vscale x 4 x i8> %a
3411}
3412
3413declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8(
3414  <vscale x 4 x i8>,
3415  ptr,
3416  <vscale x 4 x i8>,
3417  <vscale x 4 x i1>,
3418  iXLen,
3419  iXLen);
3420
3421define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3422; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
3423; CHECK:       # %bb.0: # %entry
3424; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
3425; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
3426; CHECK-NEXT:    ret
3427entry:
3428  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8(
3429    <vscale x 4 x i8> %0,
3430    ptr %1,
3431    <vscale x 4 x i8> %2,
3432    <vscale x 4 x i1> %3,
3433    iXLen %4, iXLen 1)
3434
3435  ret <vscale x 4 x i8> %a
3436}
3437
3438declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8(
3439  <vscale x 8 x i8>,
3440  ptr,
3441  <vscale x 8 x i8>,
3442  iXLen);
3443
3444define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
3445; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8:
3446; CHECK:       # %bb.0: # %entry
3447; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
3448; CHECK-NEXT:    vloxei8.v v8, (a0), v8
3449; CHECK-NEXT:    ret
3450entry:
3451  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8(
3452    <vscale x 8 x i8> undef,
3453    ptr %0,
3454    <vscale x 8 x i8> %1,
3455    iXLen %2)
3456
3457  ret <vscale x 8 x i8> %a
3458}
3459
3460declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8(
3461  <vscale x 8 x i8>,
3462  ptr,
3463  <vscale x 8 x i8>,
3464  <vscale x 8 x i1>,
3465  iXLen,
3466  iXLen);
3467
3468define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3469; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
3470; CHECK:       # %bb.0: # %entry
3471; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
3472; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
3473; CHECK-NEXT:    ret
3474entry:
3475  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8(
3476    <vscale x 8 x i8> %0,
3477    ptr %1,
3478    <vscale x 8 x i8> %2,
3479    <vscale x 8 x i1> %3,
3480    iXLen %4, iXLen 1)
3481
3482  ret <vscale x 8 x i8> %a
3483}
3484
3485declare <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8(
3486  <vscale x 16 x i8>,
3487  ptr,
3488  <vscale x 16 x i8>,
3489  iXLen);
3490
3491define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
3492; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8:
3493; CHECK:       # %bb.0: # %entry
3494; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
3495; CHECK-NEXT:    vloxei8.v v8, (a0), v8
3496; CHECK-NEXT:    ret
3497entry:
3498  %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8(
3499    <vscale x 16 x i8> undef,
3500    ptr %0,
3501    <vscale x 16 x i8> %1,
3502    iXLen %2)
3503
3504  ret <vscale x 16 x i8> %a
3505}
3506
3507declare <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8(
3508  <vscale x 16 x i8>,
3509  ptr,
3510  <vscale x 16 x i8>,
3511  <vscale x 16 x i1>,
3512  iXLen,
3513  iXLen);
3514
3515define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
3516; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
3517; CHECK:       # %bb.0: # %entry
3518; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
3519; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
3520; CHECK-NEXT:    ret
3521entry:
3522  %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8(
3523    <vscale x 16 x i8> %0,
3524    ptr %1,
3525    <vscale x 16 x i8> %2,
3526    <vscale x 16 x i1> %3,
3527    iXLen %4, iXLen 1)
3528
3529  ret <vscale x 16 x i8> %a
3530}
3531
3532declare <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8(
3533  <vscale x 32 x i8>,
3534  ptr,
3535  <vscale x 32 x i8>,
3536  iXLen);
3537
3538define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
3539; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8:
3540; CHECK:       # %bb.0: # %entry
3541; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
3542; CHECK-NEXT:    vloxei8.v v8, (a0), v8
3543; CHECK-NEXT:    ret
3544entry:
3545  %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8(
3546    <vscale x 32 x i8> undef,
3547    ptr %0,
3548    <vscale x 32 x i8> %1,
3549    iXLen %2)
3550
3551  ret <vscale x 32 x i8> %a
3552}
3553
3554declare <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8(
3555  <vscale x 32 x i8>,
3556  ptr,
3557  <vscale x 32 x i8>,
3558  <vscale x 32 x i1>,
3559  iXLen,
3560  iXLen);
3561
3562define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
3563; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
3564; CHECK:       # %bb.0: # %entry
3565; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
3566; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
3567; CHECK-NEXT:    ret
3568entry:
3569  %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8(
3570    <vscale x 32 x i8> %0,
3571    ptr %1,
3572    <vscale x 32 x i8> %2,
3573    <vscale x 32 x i1> %3,
3574    iXLen %4, iXLen 1)
3575
3576  ret <vscale x 32 x i8> %a
3577}
3578
3579declare <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8(
3580  <vscale x 64 x i8>,
3581  ptr,
3582  <vscale x 64 x i8>,
3583  iXLen);
3584
3585define <vscale x 64 x i8> @intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8(ptr %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
3586; CHECK-LABEL: intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8:
3587; CHECK:       # %bb.0: # %entry
3588; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
3589; CHECK-NEXT:    vloxei8.v v8, (a0), v8
3590; CHECK-NEXT:    ret
3591entry:
3592  %a = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8(
3593    <vscale x 64 x i8> undef,
3594    ptr %0,
3595    <vscale x 64 x i8> %1,
3596    iXLen %2)
3597
3598  ret <vscale x 64 x i8> %a
3599}
3600
3601declare <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8(
3602  <vscale x 64 x i8>,
3603  ptr,
3604  <vscale x 64 x i8>,
3605  <vscale x 64 x i1>,
3606  iXLen,
3607  iXLen);
3608
3609define <vscale x 64 x i8> @intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
3610; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
3611; CHECK:       # %bb.0: # %entry
3612; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
3613; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
3614; CHECK-NEXT:    ret
3615entry:
3616  %a = call <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8(
3617    <vscale x 64 x i8> %0,
3618    ptr %1,
3619    <vscale x 64 x i8> %2,
3620    <vscale x 64 x i1> %3,
3621    iXLen %4, iXLen 1)
3622
3623  ret <vscale x 64 x i8> %a
3624}
3625
3626declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8(
3627  <vscale x 1 x i16>,
3628  ptr,
3629  <vscale x 1 x i8>,
3630  iXLen);
3631
3632define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
3633; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8:
3634; CHECK:       # %bb.0: # %entry
3635; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
3636; CHECK-NEXT:    vloxei8.v v9, (a0), v8
3637; CHECK-NEXT:    vmv1r.v v8, v9
3638; CHECK-NEXT:    ret
3639entry:
3640  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8(
3641    <vscale x 1 x i16> undef,
3642    ptr %0,
3643    <vscale x 1 x i8> %1,
3644    iXLen %2)
3645
3646  ret <vscale x 1 x i16> %a
3647}
3648
3649declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8(
3650  <vscale x 1 x i16>,
3651  ptr,
3652  <vscale x 1 x i8>,
3653  <vscale x 1 x i1>,
3654  iXLen,
3655  iXLen);
3656
3657define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3658; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
3659; CHECK:       # %bb.0: # %entry
3660; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
3661; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
3662; CHECK-NEXT:    ret
3663entry:
3664  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8(
3665    <vscale x 1 x i16> %0,
3666    ptr %1,
3667    <vscale x 1 x i8> %2,
3668    <vscale x 1 x i1> %3,
3669    iXLen %4, iXLen 1)
3670
3671  ret <vscale x 1 x i16> %a
3672}
3673
3674declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8(
3675  <vscale x 2 x i16>,
3676  ptr,
3677  <vscale x 2 x i8>,
3678  iXLen);
3679
3680define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
3681; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8:
3682; CHECK:       # %bb.0: # %entry
3683; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
3684; CHECK-NEXT:    vloxei8.v v9, (a0), v8
3685; CHECK-NEXT:    vmv1r.v v8, v9
3686; CHECK-NEXT:    ret
3687entry:
3688  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8(
3689    <vscale x 2 x i16> undef,
3690    ptr %0,
3691    <vscale x 2 x i8> %1,
3692    iXLen %2)
3693
3694  ret <vscale x 2 x i16> %a
3695}
3696
3697declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8(
3698  <vscale x 2 x i16>,
3699  ptr,
3700  <vscale x 2 x i8>,
3701  <vscale x 2 x i1>,
3702  iXLen,
3703  iXLen);
3704
3705define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3706; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
3707; CHECK:       # %bb.0: # %entry
3708; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
3709; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
3710; CHECK-NEXT:    ret
3711entry:
3712  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8(
3713    <vscale x 2 x i16> %0,
3714    ptr %1,
3715    <vscale x 2 x i8> %2,
3716    <vscale x 2 x i1> %3,
3717    iXLen %4, iXLen 1)
3718
3719  ret <vscale x 2 x i16> %a
3720}
3721
3722declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8(
3723  <vscale x 4 x i16>,
3724  ptr,
3725  <vscale x 4 x i8>,
3726  iXLen);
3727
3728define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
3729; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8:
3730; CHECK:       # %bb.0: # %entry
3731; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
3732; CHECK-NEXT:    vloxei8.v v9, (a0), v8
3733; CHECK-NEXT:    vmv.v.v v8, v9
3734; CHECK-NEXT:    ret
3735entry:
3736  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8(
3737    <vscale x 4 x i16> undef,
3738    ptr %0,
3739    <vscale x 4 x i8> %1,
3740    iXLen %2)
3741
3742  ret <vscale x 4 x i16> %a
3743}
3744
3745declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8(
3746  <vscale x 4 x i16>,
3747  ptr,
3748  <vscale x 4 x i8>,
3749  <vscale x 4 x i1>,
3750  iXLen,
3751  iXLen);
3752
3753define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3754; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
3755; CHECK:       # %bb.0: # %entry
3756; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
3757; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
3758; CHECK-NEXT:    ret
3759entry:
3760  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8(
3761    <vscale x 4 x i16> %0,
3762    ptr %1,
3763    <vscale x 4 x i8> %2,
3764    <vscale x 4 x i1> %3,
3765    iXLen %4, iXLen 1)
3766
3767  ret <vscale x 4 x i16> %a
3768}
3769
3770declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8(
3771  <vscale x 8 x i16>,
3772  ptr,
3773  <vscale x 8 x i8>,
3774  iXLen);
3775
3776define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
3777; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8:
3778; CHECK:       # %bb.0: # %entry
3779; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
3780; CHECK-NEXT:    vloxei8.v v10, (a0), v8
3781; CHECK-NEXT:    vmv.v.v v8, v10
3782; CHECK-NEXT:    ret
3783entry:
3784  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8(
3785    <vscale x 8 x i16> undef,
3786    ptr %0,
3787    <vscale x 8 x i8> %1,
3788    iXLen %2)
3789
3790  ret <vscale x 8 x i16> %a
3791}
3792
3793declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8(
3794  <vscale x 8 x i16>,
3795  ptr,
3796  <vscale x 8 x i8>,
3797  <vscale x 8 x i1>,
3798  iXLen,
3799  iXLen);
3800
3801define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3802; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
3803; CHECK:       # %bb.0: # %entry
3804; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
3805; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
3806; CHECK-NEXT:    ret
3807entry:
3808  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8(
3809    <vscale x 8 x i16> %0,
3810    ptr %1,
3811    <vscale x 8 x i8> %2,
3812    <vscale x 8 x i1> %3,
3813    iXLen %4, iXLen 1)
3814
3815  ret <vscale x 8 x i16> %a
3816}
3817
3818declare <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8(
3819  <vscale x 16 x i16>,
3820  ptr,
3821  <vscale x 16 x i8>,
3822  iXLen);
3823
3824define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
3825; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8:
3826; CHECK:       # %bb.0: # %entry
3827; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
3828; CHECK-NEXT:    vloxei8.v v12, (a0), v8
3829; CHECK-NEXT:    vmv.v.v v8, v12
3830; CHECK-NEXT:    ret
3831entry:
3832  %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8(
3833    <vscale x 16 x i16> undef,
3834    ptr %0,
3835    <vscale x 16 x i8> %1,
3836    iXLen %2)
3837
3838  ret <vscale x 16 x i16> %a
3839}
3840
3841declare <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8(
3842  <vscale x 16 x i16>,
3843  ptr,
3844  <vscale x 16 x i8>,
3845  <vscale x 16 x i1>,
3846  iXLen,
3847  iXLen);
3848
3849define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
3850; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
3851; CHECK:       # %bb.0: # %entry
3852; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
3853; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
3854; CHECK-NEXT:    ret
3855entry:
3856  %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8(
3857    <vscale x 16 x i16> %0,
3858    ptr %1,
3859    <vscale x 16 x i8> %2,
3860    <vscale x 16 x i1> %3,
3861    iXLen %4, iXLen 1)
3862
3863  ret <vscale x 16 x i16> %a
3864}
3865
3866declare <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8(
3867  <vscale x 32 x i16>,
3868  ptr,
3869  <vscale x 32 x i8>,
3870  iXLen);
3871
3872define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
3873; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8:
3874; CHECK:       # %bb.0: # %entry
3875; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
3876; CHECK-NEXT:    vloxei8.v v16, (a0), v8
3877; CHECK-NEXT:    vmv.v.v v8, v16
3878; CHECK-NEXT:    ret
3879entry:
3880  %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8(
3881    <vscale x 32 x i16> undef,
3882    ptr %0,
3883    <vscale x 32 x i8> %1,
3884    iXLen %2)
3885
3886  ret <vscale x 32 x i16> %a
3887}
3888
3889declare <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8(
3890  <vscale x 32 x i16>,
3891  ptr,
3892  <vscale x 32 x i8>,
3893  <vscale x 32 x i1>,
3894  iXLen,
3895  iXLen);
3896
3897define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
3898; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
3899; CHECK:       # %bb.0: # %entry
3900; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
3901; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
3902; CHECK-NEXT:    ret
3903entry:
3904  %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8(
3905    <vscale x 32 x i16> %0,
3906    ptr %1,
3907    <vscale x 32 x i8> %2,
3908    <vscale x 32 x i1> %3,
3909    iXLen %4, iXLen 1)
3910
3911  ret <vscale x 32 x i16> %a
3912}
3913
3914declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8(
3915  <vscale x 1 x i32>,
3916  ptr,
3917  <vscale x 1 x i8>,
3918  iXLen);
3919
3920define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
3921; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8:
3922; CHECK:       # %bb.0: # %entry
3923; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
3924; CHECK-NEXT:    vloxei8.v v9, (a0), v8
3925; CHECK-NEXT:    vmv1r.v v8, v9
3926; CHECK-NEXT:    ret
3927entry:
3928  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8(
3929    <vscale x 1 x i32> undef,
3930    ptr %0,
3931    <vscale x 1 x i8> %1,
3932    iXLen %2)
3933
3934  ret <vscale x 1 x i32> %a
3935}
3936
3937declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8(
3938  <vscale x 1 x i32>,
3939  ptr,
3940  <vscale x 1 x i8>,
3941  <vscale x 1 x i1>,
3942  iXLen,
3943  iXLen);
3944
3945define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3946; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
3947; CHECK:       # %bb.0: # %entry
3948; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
3949; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
3950; CHECK-NEXT:    ret
3951entry:
3952  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8(
3953    <vscale x 1 x i32> %0,
3954    ptr %1,
3955    <vscale x 1 x i8> %2,
3956    <vscale x 1 x i1> %3,
3957    iXLen %4, iXLen 1)
3958
3959  ret <vscale x 1 x i32> %a
3960}
3961
3962declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8(
3963  <vscale x 2 x i32>,
3964  ptr,
3965  <vscale x 2 x i8>,
3966  iXLen);
3967
3968define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
3969; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8:
3970; CHECK:       # %bb.0: # %entry
3971; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
3972; CHECK-NEXT:    vloxei8.v v9, (a0), v8
3973; CHECK-NEXT:    vmv.v.v v8, v9
3974; CHECK-NEXT:    ret
3975entry:
3976  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8(
3977    <vscale x 2 x i32> undef,
3978    ptr %0,
3979    <vscale x 2 x i8> %1,
3980    iXLen %2)
3981
3982  ret <vscale x 2 x i32> %a
3983}
3984
3985declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8(
3986  <vscale x 2 x i32>,
3987  ptr,
3988  <vscale x 2 x i8>,
3989  <vscale x 2 x i1>,
3990  iXLen,
3991  iXLen);
3992
3993define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3994; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
3995; CHECK:       # %bb.0: # %entry
3996; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
3997; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
3998; CHECK-NEXT:    ret
3999entry:
4000  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8(
4001    <vscale x 2 x i32> %0,
4002    ptr %1,
4003    <vscale x 2 x i8> %2,
4004    <vscale x 2 x i1> %3,
4005    iXLen %4, iXLen 1)
4006
4007  ret <vscale x 2 x i32> %a
4008}
4009
4010declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8(
4011  <vscale x 4 x i32>,
4012  ptr,
4013  <vscale x 4 x i8>,
4014  iXLen);
4015
4016define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
4017; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8:
4018; CHECK:       # %bb.0: # %entry
4019; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
4020; CHECK-NEXT:    vloxei8.v v10, (a0), v8
4021; CHECK-NEXT:    vmv.v.v v8, v10
4022; CHECK-NEXT:    ret
4023entry:
4024  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8(
4025    <vscale x 4 x i32> undef,
4026    ptr %0,
4027    <vscale x 4 x i8> %1,
4028    iXLen %2)
4029
4030  ret <vscale x 4 x i32> %a
4031}
4032
4033declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8(
4034  <vscale x 4 x i32>,
4035  ptr,
4036  <vscale x 4 x i8>,
4037  <vscale x 4 x i1>,
4038  iXLen,
4039  iXLen);
4040
4041define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
4042; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
4043; CHECK:       # %bb.0: # %entry
4044; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
4045; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
4046; CHECK-NEXT:    ret
4047entry:
4048  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8(
4049    <vscale x 4 x i32> %0,
4050    ptr %1,
4051    <vscale x 4 x i8> %2,
4052    <vscale x 4 x i1> %3,
4053    iXLen %4, iXLen 1)
4054
4055  ret <vscale x 4 x i32> %a
4056}
4057
4058declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8(
4059  <vscale x 8 x i32>,
4060  ptr,
4061  <vscale x 8 x i8>,
4062  iXLen);
4063
4064define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
4065; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8:
4066; CHECK:       # %bb.0: # %entry
4067; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
4068; CHECK-NEXT:    vloxei8.v v12, (a0), v8
4069; CHECK-NEXT:    vmv.v.v v8, v12
4070; CHECK-NEXT:    ret
4071entry:
4072  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8(
4073    <vscale x 8 x i32> undef,
4074    ptr %0,
4075    <vscale x 8 x i8> %1,
4076    iXLen %2)
4077
4078  ret <vscale x 8 x i32> %a
4079}
4080
4081declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8(
4082  <vscale x 8 x i32>,
4083  ptr,
4084  <vscale x 8 x i8>,
4085  <vscale x 8 x i1>,
4086  iXLen,
4087  iXLen);
4088
4089define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
4090; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
4091; CHECK:       # %bb.0: # %entry
4092; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
4093; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
4094; CHECK-NEXT:    ret
4095entry:
4096  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8(
4097    <vscale x 8 x i32> %0,
4098    ptr %1,
4099    <vscale x 8 x i8> %2,
4100    <vscale x 8 x i1> %3,
4101    iXLen %4, iXLen 1)
4102
4103  ret <vscale x 8 x i32> %a
4104}
4105
4106declare <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8(
4107  <vscale x 16 x i32>,
4108  ptr,
4109  <vscale x 16 x i8>,
4110  iXLen);
4111
4112define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
4113; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8:
4114; CHECK:       # %bb.0: # %entry
4115; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
4116; CHECK-NEXT:    vloxei8.v v16, (a0), v8
4117; CHECK-NEXT:    vmv.v.v v8, v16
4118; CHECK-NEXT:    ret
4119entry:
4120  %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8(
4121    <vscale x 16 x i32> undef,
4122    ptr %0,
4123    <vscale x 16 x i8> %1,
4124    iXLen %2)
4125
4126  ret <vscale x 16 x i32> %a
4127}
4128
4129declare <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8(
4130  <vscale x 16 x i32>,
4131  ptr,
4132  <vscale x 16 x i8>,
4133  <vscale x 16 x i1>,
4134  iXLen,
4135  iXLen);
4136
4137define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
4138; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
4139; CHECK:       # %bb.0: # %entry
4140; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
4141; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
4142; CHECK-NEXT:    ret
4143entry:
4144  %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8(
4145    <vscale x 16 x i32> %0,
4146    ptr %1,
4147    <vscale x 16 x i8> %2,
4148    <vscale x 16 x i1> %3,
4149    iXLen %4, iXLen 1)
4150
4151  ret <vscale x 16 x i32> %a
4152}
4153
4154declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8(
4155  <vscale x 1 x i64>,
4156  ptr,
4157  <vscale x 1 x i8>,
4158  iXLen);
4159
4160define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
4161; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8:
4162; CHECK:       # %bb.0: # %entry
4163; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
4164; CHECK-NEXT:    vloxei8.v v9, (a0), v8
4165; CHECK-NEXT:    vmv.v.v v8, v9
4166; CHECK-NEXT:    ret
4167entry:
4168  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8(
4169    <vscale x 1 x i64> undef,
4170    ptr %0,
4171    <vscale x 1 x i8> %1,
4172    iXLen %2)
4173
4174  ret <vscale x 1 x i64> %a
4175}
4176
4177declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8(
4178  <vscale x 1 x i64>,
4179  ptr,
4180  <vscale x 1 x i8>,
4181  <vscale x 1 x i1>,
4182  iXLen,
4183  iXLen);
4184
4185define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4186; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
4187; CHECK:       # %bb.0: # %entry
4188; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
4189; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
4190; CHECK-NEXT:    ret
4191entry:
4192  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8(
4193    <vscale x 1 x i64> %0,
4194    ptr %1,
4195    <vscale x 1 x i8> %2,
4196    <vscale x 1 x i1> %3,
4197    iXLen %4, iXLen 1)
4198
4199  ret <vscale x 1 x i64> %a
4200}
4201
4202declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8(
4203  <vscale x 2 x i64>,
4204  ptr,
4205  <vscale x 2 x i8>,
4206  iXLen);
4207
4208define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
4209; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8:
4210; CHECK:       # %bb.0: # %entry
4211; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
4212; CHECK-NEXT:    vloxei8.v v10, (a0), v8
4213; CHECK-NEXT:    vmv.v.v v8, v10
4214; CHECK-NEXT:    ret
4215entry:
4216  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8(
4217    <vscale x 2 x i64> undef,
4218    ptr %0,
4219    <vscale x 2 x i8> %1,
4220    iXLen %2)
4221
4222  ret <vscale x 2 x i64> %a
4223}
4224
4225declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8(
4226  <vscale x 2 x i64>,
4227  ptr,
4228  <vscale x 2 x i8>,
4229  <vscale x 2 x i1>,
4230  iXLen,
4231  iXLen);
4232
4233define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4234; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
4235; CHECK:       # %bb.0: # %entry
4236; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
4237; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
4238; CHECK-NEXT:    ret
4239entry:
4240  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8(
4241    <vscale x 2 x i64> %0,
4242    ptr %1,
4243    <vscale x 2 x i8> %2,
4244    <vscale x 2 x i1> %3,
4245    iXLen %4, iXLen 1)
4246
4247  ret <vscale x 2 x i64> %a
4248}
4249
4250declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8(
4251  <vscale x 4 x i64>,
4252  ptr,
4253  <vscale x 4 x i8>,
4254  iXLen);
4255
4256define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
4257; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8:
4258; CHECK:       # %bb.0: # %entry
4259; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
4260; CHECK-NEXT:    vloxei8.v v12, (a0), v8
4261; CHECK-NEXT:    vmv.v.v v8, v12
4262; CHECK-NEXT:    ret
4263entry:
4264  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8(
4265    <vscale x 4 x i64> undef,
4266    ptr %0,
4267    <vscale x 4 x i8> %1,
4268    iXLen %2)
4269
4270  ret <vscale x 4 x i64> %a
4271}
4272
4273declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8(
4274  <vscale x 4 x i64>,
4275  ptr,
4276  <vscale x 4 x i8>,
4277  <vscale x 4 x i1>,
4278  iXLen,
4279  iXLen);
4280
4281define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
4282; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
4283; CHECK:       # %bb.0: # %entry
4284; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
4285; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
4286; CHECK-NEXT:    ret
4287entry:
4288  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8(
4289    <vscale x 4 x i64> %0,
4290    ptr %1,
4291    <vscale x 4 x i8> %2,
4292    <vscale x 4 x i1> %3,
4293    iXLen %4, iXLen 1)
4294
4295  ret <vscale x 4 x i64> %a
4296}
4297
4298declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8(
4299  <vscale x 8 x i64>,
4300  ptr,
4301  <vscale x 8 x i8>,
4302  iXLen);
4303
4304define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
4305; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8:
4306; CHECK:       # %bb.0: # %entry
4307; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
4308; CHECK-NEXT:    vloxei8.v v16, (a0), v8
4309; CHECK-NEXT:    vmv.v.v v8, v16
4310; CHECK-NEXT:    ret
4311entry:
4312  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8(
4313    <vscale x 8 x i64> undef,
4314    ptr %0,
4315    <vscale x 8 x i8> %1,
4316    iXLen %2)
4317
4318  ret <vscale x 8 x i64> %a
4319}
4320
4321declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8(
4322  <vscale x 8 x i64>,
4323  ptr,
4324  <vscale x 8 x i8>,
4325  <vscale x 8 x i1>,
4326  iXLen,
4327  iXLen);
4328
4329define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
4330; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
4331; CHECK:       # %bb.0: # %entry
4332; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
4333; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
4334; CHECK-NEXT:    ret
4335entry:
4336  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8(
4337    <vscale x 8 x i64> %0,
4338    ptr %1,
4339    <vscale x 8 x i8> %2,
4340    <vscale x 8 x i1> %3,
4341    iXLen %4, iXLen 1)
4342
4343  ret <vscale x 8 x i64> %a
4344}
4345
4346declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i8(
4347  <vscale x 1 x half>,
4348  ptr,
4349  <vscale x 1 x i8>,
4350  iXLen);
4351
4352define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
4353; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8:
4354; CHECK:       # %bb.0: # %entry
4355; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
4356; CHECK-NEXT:    vloxei8.v v9, (a0), v8
4357; CHECK-NEXT:    vmv1r.v v8, v9
4358; CHECK-NEXT:    ret
4359entry:
4360  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i8(
4361    <vscale x 1 x half> undef,
4362    ptr %0,
4363    <vscale x 1 x i8> %1,
4364    iXLen %2)
4365
4366  ret <vscale x 1 x half> %a
4367}
4368
4369declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8(
4370  <vscale x 1 x half>,
4371  ptr,
4372  <vscale x 1 x i8>,
4373  <vscale x 1 x i1>,
4374  iXLen,
4375  iXLen);
4376
4377define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4378; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
4379; CHECK:       # %bb.0: # %entry
4380; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
4381; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
4382; CHECK-NEXT:    ret
4383entry:
4384  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8(
4385    <vscale x 1 x half> %0,
4386    ptr %1,
4387    <vscale x 1 x i8> %2,
4388    <vscale x 1 x i1> %3,
4389    iXLen %4, iXLen 1)
4390
4391  ret <vscale x 1 x half> %a
4392}
4393
4394declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i8(
4395  <vscale x 2 x half>,
4396  ptr,
4397  <vscale x 2 x i8>,
4398  iXLen);
4399
4400define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
4401; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8:
4402; CHECK:       # %bb.0: # %entry
4403; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
4404; CHECK-NEXT:    vloxei8.v v9, (a0), v8
4405; CHECK-NEXT:    vmv1r.v v8, v9
4406; CHECK-NEXT:    ret
4407entry:
4408  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i8(
4409    <vscale x 2 x half> undef,
4410    ptr %0,
4411    <vscale x 2 x i8> %1,
4412    iXLen %2)
4413
4414  ret <vscale x 2 x half> %a
4415}
4416
4417declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8(
4418  <vscale x 2 x half>,
4419  ptr,
4420  <vscale x 2 x i8>,
4421  <vscale x 2 x i1>,
4422  iXLen,
4423  iXLen);
4424
4425define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4426; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
4427; CHECK:       # %bb.0: # %entry
4428; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
4429; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
4430; CHECK-NEXT:    ret
4431entry:
4432  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8(
4433    <vscale x 2 x half> %0,
4434    ptr %1,
4435    <vscale x 2 x i8> %2,
4436    <vscale x 2 x i1> %3,
4437    iXLen %4, iXLen 1)
4438
4439  ret <vscale x 2 x half> %a
4440}
4441
4442declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i8(
4443  <vscale x 4 x half>,
4444  ptr,
4445  <vscale x 4 x i8>,
4446  iXLen);
4447
4448define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
4449; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8:
4450; CHECK:       # %bb.0: # %entry
4451; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
4452; CHECK-NEXT:    vloxei8.v v9, (a0), v8
4453; CHECK-NEXT:    vmv.v.v v8, v9
4454; CHECK-NEXT:    ret
4455entry:
4456  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i8(
4457    <vscale x 4 x half> undef,
4458    ptr %0,
4459    <vscale x 4 x i8> %1,
4460    iXLen %2)
4461
4462  ret <vscale x 4 x half> %a
4463}
4464
4465declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8(
4466  <vscale x 4 x half>,
4467  ptr,
4468  <vscale x 4 x i8>,
4469  <vscale x 4 x i1>,
4470  iXLen,
4471  iXLen);
4472
4473define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
4474; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
4475; CHECK:       # %bb.0: # %entry
4476; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
4477; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
4478; CHECK-NEXT:    ret
4479entry:
4480  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8(
4481    <vscale x 4 x half> %0,
4482    ptr %1,
4483    <vscale x 4 x i8> %2,
4484    <vscale x 4 x i1> %3,
4485    iXLen %4, iXLen 1)
4486
4487  ret <vscale x 4 x half> %a
4488}
4489
4490declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i8(
4491  <vscale x 8 x half>,
4492  ptr,
4493  <vscale x 8 x i8>,
4494  iXLen);
4495
4496define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
4497; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8:
4498; CHECK:       # %bb.0: # %entry
4499; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
4500; CHECK-NEXT:    vloxei8.v v10, (a0), v8
4501; CHECK-NEXT:    vmv.v.v v8, v10
4502; CHECK-NEXT:    ret
4503entry:
4504  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i8(
4505    <vscale x 8 x half> undef,
4506    ptr %0,
4507    <vscale x 8 x i8> %1,
4508    iXLen %2)
4509
4510  ret <vscale x 8 x half> %a
4511}
4512
4513declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8(
4514  <vscale x 8 x half>,
4515  ptr,
4516  <vscale x 8 x i8>,
4517  <vscale x 8 x i1>,
4518  iXLen,
4519  iXLen);
4520
4521define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
4522; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
4523; CHECK:       # %bb.0: # %entry
4524; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
4525; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
4526; CHECK-NEXT:    ret
4527entry:
4528  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8(
4529    <vscale x 8 x half> %0,
4530    ptr %1,
4531    <vscale x 8 x i8> %2,
4532    <vscale x 8 x i1> %3,
4533    iXLen %4, iXLen 1)
4534
4535  ret <vscale x 8 x half> %a
4536}
4537
4538declare <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i8(
4539  <vscale x 16 x half>,
4540  ptr,
4541  <vscale x 16 x i8>,
4542  iXLen);
4543
4544define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
4545; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8:
4546; CHECK:       # %bb.0: # %entry
4547; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
4548; CHECK-NEXT:    vloxei8.v v12, (a0), v8
4549; CHECK-NEXT:    vmv.v.v v8, v12
4550; CHECK-NEXT:    ret
4551entry:
4552  %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i8(
4553    <vscale x 16 x half> undef,
4554    ptr %0,
4555    <vscale x 16 x i8> %1,
4556    iXLen %2)
4557
4558  ret <vscale x 16 x half> %a
4559}
4560
4561declare <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8(
4562  <vscale x 16 x half>,
4563  ptr,
4564  <vscale x 16 x i8>,
4565  <vscale x 16 x i1>,
4566  iXLen,
4567  iXLen);
4568
4569define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
4570; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
4571; CHECK:       # %bb.0: # %entry
4572; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
4573; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
4574; CHECK-NEXT:    ret
4575entry:
4576  %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8(
4577    <vscale x 16 x half> %0,
4578    ptr %1,
4579    <vscale x 16 x i8> %2,
4580    <vscale x 16 x i1> %3,
4581    iXLen %4, iXLen 1)
4582
4583  ret <vscale x 16 x half> %a
4584}
4585
4586declare <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i8(
4587  <vscale x 32 x half>,
4588  ptr,
4589  <vscale x 32 x i8>,
4590  iXLen);
4591
4592define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
4593; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8:
4594; CHECK:       # %bb.0: # %entry
4595; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
4596; CHECK-NEXT:    vloxei8.v v16, (a0), v8
4597; CHECK-NEXT:    vmv.v.v v8, v16
4598; CHECK-NEXT:    ret
4599entry:
4600  %a = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i8(
4601    <vscale x 32 x half> undef,
4602    ptr %0,
4603    <vscale x 32 x i8> %1,
4604    iXLen %2)
4605
4606  ret <vscale x 32 x half> %a
4607}
4608
4609declare <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8(
4610  <vscale x 32 x half>,
4611  ptr,
4612  <vscale x 32 x i8>,
4613  <vscale x 32 x i1>,
4614  iXLen,
4615  iXLen);
4616
4617define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
4618; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
4619; CHECK:       # %bb.0: # %entry
4620; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
4621; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
4622; CHECK-NEXT:    ret
4623entry:
4624  %a = call <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8(
4625    <vscale x 32 x half> %0,
4626    ptr %1,
4627    <vscale x 32 x i8> %2,
4628    <vscale x 32 x i1> %3,
4629    iXLen %4, iXLen 1)
4630
4631  ret <vscale x 32 x half> %a
4632}
4633
4634declare <vscale x 1 x bfloat> @llvm.riscv.vloxei.nxv1bf16.nxv1i32(
4635  <vscale x 1 x bfloat>,
4636  ptr,
4637  <vscale x 1 x i32>,
4638  iXLen);
4639
4640define <vscale x 1 x bfloat> @intrinsic_vloxei_v_nxv1bf16_nxv1bf16_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
4641; CHECK-LABEL: intrinsic_vloxei_v_nxv1bf16_nxv1bf16_nxv1i32:
4642; CHECK:       # %bb.0: # %entry
4643; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
4644; CHECK-NEXT:    vloxei32.v v9, (a0), v8
4645; CHECK-NEXT:    vmv1r.v v8, v9
4646; CHECK-NEXT:    ret
4647entry:
4648  %a = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.nxv1bf16.nxv1i32(
4649    <vscale x 1 x bfloat> undef,
4650    ptr %0,
4651    <vscale x 1 x i32> %1,
4652    iXLen %2)
4653
4654  ret <vscale x 1 x bfloat> %a
4655}
4656
4657declare <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.nxv1i32(
4658  <vscale x 1 x bfloat>,
4659  ptr,
4660  <vscale x 1 x i32>,
4661  <vscale x 1 x i1>,
4662  iXLen,
4663  iXLen);
4664
4665define <vscale x 1 x bfloat> @intrinsic_vloxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4666; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32:
4667; CHECK:       # %bb.0: # %entry
4668; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
4669; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
4670; CHECK-NEXT:    ret
4671entry:
4672  %a = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.nxv1i32(
4673    <vscale x 1 x bfloat> %0,
4674    ptr %1,
4675    <vscale x 1 x i32> %2,
4676    <vscale x 1 x i1> %3,
4677    iXLen %4, iXLen 1)
4678
4679  ret <vscale x 1 x bfloat> %a
4680}
4681
4682declare <vscale x 2 x bfloat> @llvm.riscv.vloxei.nxv2bf16.nxv2i32(
4683  <vscale x 2 x bfloat>,
4684  ptr,
4685  <vscale x 2 x i32>,
4686  iXLen);
4687
4688define <vscale x 2 x bfloat> @intrinsic_vloxei_v_nxv2bf16_nxv2bf16_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
4689; CHECK-LABEL: intrinsic_vloxei_v_nxv2bf16_nxv2bf16_nxv2i32:
4690; CHECK:       # %bb.0: # %entry
4691; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
4692; CHECK-NEXT:    vloxei32.v v9, (a0), v8
4693; CHECK-NEXT:    vmv1r.v v8, v9
4694; CHECK-NEXT:    ret
4695entry:
4696  %a = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.nxv2bf16.nxv2i32(
4697    <vscale x 2 x bfloat> undef,
4698    ptr %0,
4699    <vscale x 2 x i32> %1,
4700    iXLen %2)
4701
4702  ret <vscale x 2 x bfloat> %a
4703}
4704
4705declare <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.nxv2i32(
4706  <vscale x 2 x bfloat>,
4707  ptr,
4708  <vscale x 2 x i32>,
4709  <vscale x 2 x i1>,
4710  iXLen,
4711  iXLen);
4712
4713define <vscale x 2 x bfloat> @intrinsic_vloxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4714; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32:
4715; CHECK:       # %bb.0: # %entry
4716; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
4717; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
4718; CHECK-NEXT:    ret
4719entry:
4720  %a = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.nxv2i32(
4721    <vscale x 2 x bfloat> %0,
4722    ptr %1,
4723    <vscale x 2 x i32> %2,
4724    <vscale x 2 x i1> %3,
4725    iXLen %4, iXLen 1)
4726
4727  ret <vscale x 2 x bfloat> %a
4728}
4729
4730declare <vscale x 4 x bfloat> @llvm.riscv.vloxei.nxv4bf16.nxv4i32(
4731  <vscale x 4 x bfloat>,
4732  ptr,
4733  <vscale x 4 x i32>,
4734  iXLen);
4735
4736define <vscale x 4 x bfloat> @intrinsic_vloxei_v_nxv4bf16_nxv4bf16_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
4737; CHECK-LABEL: intrinsic_vloxei_v_nxv4bf16_nxv4bf16_nxv4i32:
4738; CHECK:       # %bb.0: # %entry
4739; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
4740; CHECK-NEXT:    vloxei32.v v10, (a0), v8
4741; CHECK-NEXT:    vmv.v.v v8, v10
4742; CHECK-NEXT:    ret
4743entry:
4744  %a = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.nxv4bf16.nxv4i32(
4745    <vscale x 4 x bfloat> undef,
4746    ptr %0,
4747    <vscale x 4 x i32> %1,
4748    iXLen %2)
4749
4750  ret <vscale x 4 x bfloat> %a
4751}
4752
4753declare <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.nxv4i32(
4754  <vscale x 4 x bfloat>,
4755  ptr,
4756  <vscale x 4 x i32>,
4757  <vscale x 4 x i1>,
4758  iXLen,
4759  iXLen);
4760
4761define <vscale x 4 x bfloat> @intrinsic_vloxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
4762; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32:
4763; CHECK:       # %bb.0: # %entry
4764; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
4765; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
4766; CHECK-NEXT:    ret
4767entry:
4768  %a = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.nxv4i32(
4769    <vscale x 4 x bfloat> %0,
4770    ptr %1,
4771    <vscale x 4 x i32> %2,
4772    <vscale x 4 x i1> %3,
4773    iXLen %4, iXLen 1)
4774
4775  ret <vscale x 4 x bfloat> %a
4776}
4777
4778declare <vscale x 8 x bfloat> @llvm.riscv.vloxei.nxv8bf16.nxv8i32(
4779  <vscale x 8 x bfloat>,
4780  ptr,
4781  <vscale x 8 x i32>,
4782  iXLen);
4783
4784define <vscale x 8 x bfloat> @intrinsic_vloxei_v_nxv8bf16_nxv8bf16_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
4785; CHECK-LABEL: intrinsic_vloxei_v_nxv8bf16_nxv8bf16_nxv8i32:
4786; CHECK:       # %bb.0: # %entry
4787; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
4788; CHECK-NEXT:    vloxei32.v v12, (a0), v8
4789; CHECK-NEXT:    vmv.v.v v8, v12
4790; CHECK-NEXT:    ret
4791entry:
4792  %a = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.nxv8bf16.nxv8i32(
4793    <vscale x 8 x bfloat> undef,
4794    ptr %0,
4795    <vscale x 8 x i32> %1,
4796    iXLen %2)
4797
4798  ret <vscale x 8 x bfloat> %a
4799}
4800
4801declare <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.nxv8i32(
4802  <vscale x 8 x bfloat>,
4803  ptr,
4804  <vscale x 8 x i32>,
4805  <vscale x 8 x i1>,
4806  iXLen,
4807  iXLen);
4808
4809define <vscale x 8 x bfloat> @intrinsic_vloxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
4810; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32:
4811; CHECK:       # %bb.0: # %entry
4812; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
4813; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
4814; CHECK-NEXT:    ret
4815entry:
4816  %a = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.nxv8i32(
4817    <vscale x 8 x bfloat> %0,
4818    ptr %1,
4819    <vscale x 8 x i32> %2,
4820    <vscale x 8 x i1> %3,
4821    iXLen %4, iXLen 1)
4822
4823  ret <vscale x 8 x bfloat> %a
4824}
4825
4826declare <vscale x 16 x bfloat> @llvm.riscv.vloxei.nxv16bf16.nxv16i32(
4827  <vscale x 16 x bfloat>,
4828  ptr,
4829  <vscale x 16 x i32>,
4830  iXLen);
4831
4832define <vscale x 16 x bfloat> @intrinsic_vloxei_v_nxv16bf16_nxv16bf16_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
4833; CHECK-LABEL: intrinsic_vloxei_v_nxv16bf16_nxv16bf16_nxv16i32:
4834; CHECK:       # %bb.0: # %entry
4835; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
4836; CHECK-NEXT:    vloxei32.v v16, (a0), v8
4837; CHECK-NEXT:    vmv.v.v v8, v16
4838; CHECK-NEXT:    ret
4839entry:
4840  %a = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.nxv16bf16.nxv16i32(
4841    <vscale x 16 x bfloat> undef,
4842    ptr %0,
4843    <vscale x 16 x i32> %1,
4844    iXLen %2)
4845
4846  ret <vscale x 16 x bfloat> %a
4847}
4848
4849declare <vscale x 16 x bfloat> @llvm.riscv.vloxei.mask.nxv16bf16.nxv16i32(
4850  <vscale x 16 x bfloat>,
4851  ptr,
4852  <vscale x 16 x i32>,
4853  <vscale x 16 x i1>,
4854  iXLen,
4855  iXLen);
4856
4857define <vscale x 16 x bfloat> @intrinsic_vloxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32(<vscale x 16 x bfloat> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
4858; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32:
4859; CHECK:       # %bb.0: # %entry
4860; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
4861; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
4862; CHECK-NEXT:    ret
4863entry:
4864  %a = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.mask.nxv16bf16.nxv16i32(
4865    <vscale x 16 x bfloat> %0,
4866    ptr %1,
4867    <vscale x 16 x i32> %2,
4868    <vscale x 16 x i1> %3,
4869    iXLen %4, iXLen 1)
4870
4871  ret <vscale x 16 x bfloat> %a
4872}
4873
4874declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8(
4875  <vscale x 1 x float>,
4876  ptr,
4877  <vscale x 1 x i8>,
4878  iXLen);
4879
4880define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
4881; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8:
4882; CHECK:       # %bb.0: # %entry
4883; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
4884; CHECK-NEXT:    vloxei8.v v9, (a0), v8
4885; CHECK-NEXT:    vmv1r.v v8, v9
4886; CHECK-NEXT:    ret
4887entry:
4888  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8(
4889    <vscale x 1 x float> undef,
4890    ptr %0,
4891    <vscale x 1 x i8> %1,
4892    iXLen %2)
4893
4894  ret <vscale x 1 x float> %a
4895}
4896
4897declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8(
4898  <vscale x 1 x float>,
4899  ptr,
4900  <vscale x 1 x i8>,
4901  <vscale x 1 x i1>,
4902  iXLen,
4903  iXLen);
4904
4905define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4906; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
4907; CHECK:       # %bb.0: # %entry
4908; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
4909; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
4910; CHECK-NEXT:    ret
4911entry:
4912  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8(
4913    <vscale x 1 x float> %0,
4914    ptr %1,
4915    <vscale x 1 x i8> %2,
4916    <vscale x 1 x i1> %3,
4917    iXLen %4, iXLen 1)
4918
4919  ret <vscale x 1 x float> %a
4920}
4921
4922declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8(
4923  <vscale x 2 x float>,
4924  ptr,
4925  <vscale x 2 x i8>,
4926  iXLen);
4927
4928define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
4929; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8:
4930; CHECK:       # %bb.0: # %entry
4931; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
4932; CHECK-NEXT:    vloxei8.v v9, (a0), v8
4933; CHECK-NEXT:    vmv.v.v v8, v9
4934; CHECK-NEXT:    ret
4935entry:
4936  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8(
4937    <vscale x 2 x float> undef,
4938    ptr %0,
4939    <vscale x 2 x i8> %1,
4940    iXLen %2)
4941
4942  ret <vscale x 2 x float> %a
4943}
4944
4945declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8(
4946  <vscale x 2 x float>,
4947  ptr,
4948  <vscale x 2 x i8>,
4949  <vscale x 2 x i1>,
4950  iXLen,
4951  iXLen);
4952
4953define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4954; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
4955; CHECK:       # %bb.0: # %entry
4956; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
4957; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
4958; CHECK-NEXT:    ret
4959entry:
4960  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8(
4961    <vscale x 2 x float> %0,
4962    ptr %1,
4963    <vscale x 2 x i8> %2,
4964    <vscale x 2 x i1> %3,
4965    iXLen %4, iXLen 1)
4966
4967  ret <vscale x 2 x float> %a
4968}
4969
4970declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8(
4971  <vscale x 4 x float>,
4972  ptr,
4973  <vscale x 4 x i8>,
4974  iXLen);
4975
4976define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
4977; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8:
4978; CHECK:       # %bb.0: # %entry
4979; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
4980; CHECK-NEXT:    vloxei8.v v10, (a0), v8
4981; CHECK-NEXT:    vmv.v.v v8, v10
4982; CHECK-NEXT:    ret
4983entry:
4984  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8(
4985    <vscale x 4 x float> undef,
4986    ptr %0,
4987    <vscale x 4 x i8> %1,
4988    iXLen %2)
4989
4990  ret <vscale x 4 x float> %a
4991}
4992
4993declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8(
4994  <vscale x 4 x float>,
4995  ptr,
4996  <vscale x 4 x i8>,
4997  <vscale x 4 x i1>,
4998  iXLen,
4999  iXLen);
5000
5001define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
5002; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
5003; CHECK:       # %bb.0: # %entry
5004; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
5005; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
5006; CHECK-NEXT:    ret
5007entry:
5008  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8(
5009    <vscale x 4 x float> %0,
5010    ptr %1,
5011    <vscale x 4 x i8> %2,
5012    <vscale x 4 x i1> %3,
5013    iXLen %4, iXLen 1)
5014
5015  ret <vscale x 4 x float> %a
5016}
5017
5018declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8(
5019  <vscale x 8 x float>,
5020  ptr,
5021  <vscale x 8 x i8>,
5022  iXLen);
5023
5024define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
5025; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8:
5026; CHECK:       # %bb.0: # %entry
5027; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
5028; CHECK-NEXT:    vloxei8.v v12, (a0), v8
5029; CHECK-NEXT:    vmv.v.v v8, v12
5030; CHECK-NEXT:    ret
5031entry:
5032  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8(
5033    <vscale x 8 x float> undef,
5034    ptr %0,
5035    <vscale x 8 x i8> %1,
5036    iXLen %2)
5037
5038  ret <vscale x 8 x float> %a
5039}
5040
5041declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8(
5042  <vscale x 8 x float>,
5043  ptr,
5044  <vscale x 8 x i8>,
5045  <vscale x 8 x i1>,
5046  iXLen,
5047  iXLen);
5048
5049define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
5050; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
5051; CHECK:       # %bb.0: # %entry
5052; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
5053; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
5054; CHECK-NEXT:    ret
5055entry:
5056  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8(
5057    <vscale x 8 x float> %0,
5058    ptr %1,
5059    <vscale x 8 x i8> %2,
5060    <vscale x 8 x i1> %3,
5061    iXLen %4, iXLen 1)
5062
5063  ret <vscale x 8 x float> %a
5064}
5065
5066declare <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8(
5067  <vscale x 16 x float>,
5068  ptr,
5069  <vscale x 16 x i8>,
5070  iXLen);
5071
5072define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
5073; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8:
5074; CHECK:       # %bb.0: # %entry
5075; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
5076; CHECK-NEXT:    vloxei8.v v16, (a0), v8
5077; CHECK-NEXT:    vmv.v.v v8, v16
5078; CHECK-NEXT:    ret
5079entry:
5080  %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8(
5081    <vscale x 16 x float> undef,
5082    ptr %0,
5083    <vscale x 16 x i8> %1,
5084    iXLen %2)
5085
5086  ret <vscale x 16 x float> %a
5087}
5088
5089declare <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8(
5090  <vscale x 16 x float>,
5091  ptr,
5092  <vscale x 16 x i8>,
5093  <vscale x 16 x i1>,
5094  iXLen,
5095  iXLen);
5096
5097define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
5098; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
5099; CHECK:       # %bb.0: # %entry
5100; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
5101; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
5102; CHECK-NEXT:    ret
5103entry:
5104  %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8(
5105    <vscale x 16 x float> %0,
5106    ptr %1,
5107    <vscale x 16 x i8> %2,
5108    <vscale x 16 x i1> %3,
5109    iXLen %4, iXLen 1)
5110
5111  ret <vscale x 16 x float> %a
5112}
5113
5114declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8(
5115  <vscale x 1 x double>,
5116  ptr,
5117  <vscale x 1 x i8>,
5118  iXLen);
5119
5120define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
5121; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8:
5122; CHECK:       # %bb.0: # %entry
5123; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
5124; CHECK-NEXT:    vloxei8.v v9, (a0), v8
5125; CHECK-NEXT:    vmv.v.v v8, v9
5126; CHECK-NEXT:    ret
5127entry:
5128  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8(
5129    <vscale x 1 x double> undef,
5130    ptr %0,
5131    <vscale x 1 x i8> %1,
5132    iXLen %2)
5133
5134  ret <vscale x 1 x double> %a
5135}
5136
5137declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8(
5138  <vscale x 1 x double>,
5139  ptr,
5140  <vscale x 1 x i8>,
5141  <vscale x 1 x i1>,
5142  iXLen,
5143  iXLen);
5144
5145define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
5146; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
5147; CHECK:       # %bb.0: # %entry
5148; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
5149; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
5150; CHECK-NEXT:    ret
5151entry:
5152  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8(
5153    <vscale x 1 x double> %0,
5154    ptr %1,
5155    <vscale x 1 x i8> %2,
5156    <vscale x 1 x i1> %3,
5157    iXLen %4, iXLen 1)
5158
5159  ret <vscale x 1 x double> %a
5160}
5161
5162declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8(
5163  <vscale x 2 x double>,
5164  ptr,
5165  <vscale x 2 x i8>,
5166  iXLen);
5167
5168define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
5169; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8:
5170; CHECK:       # %bb.0: # %entry
5171; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
5172; CHECK-NEXT:    vloxei8.v v10, (a0), v8
5173; CHECK-NEXT:    vmv.v.v v8, v10
5174; CHECK-NEXT:    ret
5175entry:
5176  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8(
5177    <vscale x 2 x double> undef,
5178    ptr %0,
5179    <vscale x 2 x i8> %1,
5180    iXLen %2)
5181
5182  ret <vscale x 2 x double> %a
5183}
5184
5185declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8(
5186  <vscale x 2 x double>,
5187  ptr,
5188  <vscale x 2 x i8>,
5189  <vscale x 2 x i1>,
5190  iXLen,
5191  iXLen);
5192
5193define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
5194; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
5195; CHECK:       # %bb.0: # %entry
5196; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
5197; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
5198; CHECK-NEXT:    ret
5199entry:
5200  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8(
5201    <vscale x 2 x double> %0,
5202    ptr %1,
5203    <vscale x 2 x i8> %2,
5204    <vscale x 2 x i1> %3,
5205    iXLen %4, iXLen 1)
5206
5207  ret <vscale x 2 x double> %a
5208}
5209
5210declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8(
5211  <vscale x 4 x double>,
5212  ptr,
5213  <vscale x 4 x i8>,
5214  iXLen);
5215
5216define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
5217; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8:
5218; CHECK:       # %bb.0: # %entry
5219; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
5220; CHECK-NEXT:    vloxei8.v v12, (a0), v8
5221; CHECK-NEXT:    vmv.v.v v8, v12
5222; CHECK-NEXT:    ret
5223entry:
5224  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8(
5225    <vscale x 4 x double> undef,
5226    ptr %0,
5227    <vscale x 4 x i8> %1,
5228    iXLen %2)
5229
5230  ret <vscale x 4 x double> %a
5231}
5232
5233declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8(
5234  <vscale x 4 x double>,
5235  ptr,
5236  <vscale x 4 x i8>,
5237  <vscale x 4 x i1>,
5238  iXLen,
5239  iXLen);
5240
5241define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
5242; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
5243; CHECK:       # %bb.0: # %entry
5244; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
5245; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
5246; CHECK-NEXT:    ret
5247entry:
5248  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8(
5249    <vscale x 4 x double> %0,
5250    ptr %1,
5251    <vscale x 4 x i8> %2,
5252    <vscale x 4 x i1> %3,
5253    iXLen %4, iXLen 1)
5254
5255  ret <vscale x 4 x double> %a
5256}
5257
5258declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8(
5259  <vscale x 8 x double>,
5260  ptr,
5261  <vscale x 8 x i8>,
5262  iXLen);
5263
5264define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
5265; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8:
5266; CHECK:       # %bb.0: # %entry
5267; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
5268; CHECK-NEXT:    vloxei8.v v16, (a0), v8
5269; CHECK-NEXT:    vmv.v.v v8, v16
5270; CHECK-NEXT:    ret
5271entry:
5272  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8(
5273    <vscale x 8 x double> undef,
5274    ptr %0,
5275    <vscale x 8 x i8> %1,
5276    iXLen %2)
5277
5278  ret <vscale x 8 x double> %a
5279}
5280
5281declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8(
5282  <vscale x 8 x double>,
5283  ptr,
5284  <vscale x 8 x i8>,
5285  <vscale x 8 x i1>,
5286  iXLen,
5287  iXLen);
5288
5289define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
5290; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
5291; CHECK:       # %bb.0: # %entry
5292; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
5293; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
5294; CHECK-NEXT:    ret
5295entry:
5296  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8(
5297    <vscale x 8 x double> %0,
5298    ptr %1,
5299    <vscale x 8 x i8> %2,
5300    <vscale x 8 x i1> %3,
5301    iXLen %4, iXLen 1)
5302
5303  ret <vscale x 8 x double> %a
5304}
5305