xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vsoxei.ll (revision 84a3739ac072c95af9fa80e36d9e0f52d11e28eb)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \
3; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \
5; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
6
7declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i32(
8  <vscale x 1 x i8>,
9  ptr,
10  <vscale x 1 x i32>,
11  iXLen);
12
13define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
14; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
17; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
18; CHECK-NEXT:    ret
19entry:
20  call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32(
21    <vscale x 1 x i8> %0,
22    ptr %1,
23    <vscale x 1 x i32> %2,
24    iXLen %3)
25
26  ret void
27}
28
29declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32(
30  <vscale x 1 x i8>,
31  ptr,
32  <vscale x 1 x i32>,
33  <vscale x 1 x i1>,
34  iXLen);
35
36define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
37; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
38; CHECK:       # %bb.0: # %entry
39; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
40; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
41; CHECK-NEXT:    ret
42entry:
43  call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32(
44    <vscale x 1 x i8> %0,
45    ptr %1,
46    <vscale x 1 x i32> %2,
47    <vscale x 1 x i1> %3,
48    iXLen %4)
49
50  ret void
51}
52
53declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i32(
54  <vscale x 2 x i8>,
55  ptr,
56  <vscale x 2 x i32>,
57  iXLen);
58
59define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
60; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32:
61; CHECK:       # %bb.0: # %entry
62; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
63; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
64; CHECK-NEXT:    ret
65entry:
66  call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32(
67    <vscale x 2 x i8> %0,
68    ptr %1,
69    <vscale x 2 x i32> %2,
70    iXLen %3)
71
72  ret void
73}
74
75declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32(
76  <vscale x 2 x i8>,
77  ptr,
78  <vscale x 2 x i32>,
79  <vscale x 2 x i1>,
80  iXLen);
81
82define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
83; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
84; CHECK:       # %bb.0: # %entry
85; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
86; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
87; CHECK-NEXT:    ret
88entry:
89  call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32(
90    <vscale x 2 x i8> %0,
91    ptr %1,
92    <vscale x 2 x i32> %2,
93    <vscale x 2 x i1> %3,
94    iXLen %4)
95
96  ret void
97}
98
99declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i32(
100  <vscale x 4 x i8>,
101  ptr,
102  <vscale x 4 x i32>,
103  iXLen);
104
105define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
106; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32:
107; CHECK:       # %bb.0: # %entry
108; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
109; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
110; CHECK-NEXT:    ret
111entry:
112  call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32(
113    <vscale x 4 x i8> %0,
114    ptr %1,
115    <vscale x 4 x i32> %2,
116    iXLen %3)
117
118  ret void
119}
120
121declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32(
122  <vscale x 4 x i8>,
123  ptr,
124  <vscale x 4 x i32>,
125  <vscale x 4 x i1>,
126  iXLen);
127
128define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
129; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
130; CHECK:       # %bb.0: # %entry
131; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
132; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
133; CHECK-NEXT:    ret
134entry:
135  call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32(
136    <vscale x 4 x i8> %0,
137    ptr %1,
138    <vscale x 4 x i32> %2,
139    <vscale x 4 x i1> %3,
140    iXLen %4)
141
142  ret void
143}
144
145declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i32(
146  <vscale x 8 x i8>,
147  ptr,
148  <vscale x 8 x i32>,
149  iXLen);
150
151define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
152; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32:
153; CHECK:       # %bb.0: # %entry
154; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
155; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
156; CHECK-NEXT:    ret
157entry:
158  call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32(
159    <vscale x 8 x i8> %0,
160    ptr %1,
161    <vscale x 8 x i32> %2,
162    iXLen %3)
163
164  ret void
165}
166
167declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32(
168  <vscale x 8 x i8>,
169  ptr,
170  <vscale x 8 x i32>,
171  <vscale x 8 x i1>,
172  iXLen);
173
174define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
175; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
176; CHECK:       # %bb.0: # %entry
177; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
178; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
179; CHECK-NEXT:    ret
180entry:
181  call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32(
182    <vscale x 8 x i8> %0,
183    ptr %1,
184    <vscale x 8 x i32> %2,
185    <vscale x 8 x i1> %3,
186    iXLen %4)
187
188  ret void
189}
190
191declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i32(
192  <vscale x 16 x i8>,
193  ptr,
194  <vscale x 16 x i32>,
195  iXLen);
196
197define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
198; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32:
199; CHECK:       # %bb.0: # %entry
200; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
201; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
202; CHECK-NEXT:    ret
203entry:
204  call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32(
205    <vscale x 16 x i8> %0,
206    ptr %1,
207    <vscale x 16 x i32> %2,
208    iXLen %3)
209
210  ret void
211}
212
213declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32(
214  <vscale x 16 x i8>,
215  ptr,
216  <vscale x 16 x i32>,
217  <vscale x 16 x i1>,
218  iXLen);
219
220define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
221; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
222; CHECK:       # %bb.0: # %entry
223; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
224; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
225; CHECK-NEXT:    ret
226entry:
227  call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32(
228    <vscale x 16 x i8> %0,
229    ptr %1,
230    <vscale x 16 x i32> %2,
231    <vscale x 16 x i1> %3,
232    iXLen %4)
233
234  ret void
235}
236
237declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i32(
238  <vscale x 1 x i16>,
239  ptr,
240  <vscale x 1 x i32>,
241  iXLen);
242
243define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
244; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32:
245; CHECK:       # %bb.0: # %entry
246; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
247; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
248; CHECK-NEXT:    ret
249entry:
250  call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32(
251    <vscale x 1 x i16> %0,
252    ptr %1,
253    <vscale x 1 x i32> %2,
254    iXLen %3)
255
256  ret void
257}
258
259declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32(
260  <vscale x 1 x i16>,
261  ptr,
262  <vscale x 1 x i32>,
263  <vscale x 1 x i1>,
264  iXLen);
265
266define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
267; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
268; CHECK:       # %bb.0: # %entry
269; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
270; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
271; CHECK-NEXT:    ret
272entry:
273  call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32(
274    <vscale x 1 x i16> %0,
275    ptr %1,
276    <vscale x 1 x i32> %2,
277    <vscale x 1 x i1> %3,
278    iXLen %4)
279
280  ret void
281}
282
283declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i32(
284  <vscale x 2 x i16>,
285  ptr,
286  <vscale x 2 x i32>,
287  iXLen);
288
289define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
290; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32:
291; CHECK:       # %bb.0: # %entry
292; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
293; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
294; CHECK-NEXT:    ret
295entry:
296  call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32(
297    <vscale x 2 x i16> %0,
298    ptr %1,
299    <vscale x 2 x i32> %2,
300    iXLen %3)
301
302  ret void
303}
304
305declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32(
306  <vscale x 2 x i16>,
307  ptr,
308  <vscale x 2 x i32>,
309  <vscale x 2 x i1>,
310  iXLen);
311
312define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
313; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
314; CHECK:       # %bb.0: # %entry
315; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
316; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
317; CHECK-NEXT:    ret
318entry:
319  call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32(
320    <vscale x 2 x i16> %0,
321    ptr %1,
322    <vscale x 2 x i32> %2,
323    <vscale x 2 x i1> %3,
324    iXLen %4)
325
326  ret void
327}
328
329declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i32(
330  <vscale x 4 x i16>,
331  ptr,
332  <vscale x 4 x i32>,
333  iXLen);
334
335define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
336; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32:
337; CHECK:       # %bb.0: # %entry
338; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
339; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
340; CHECK-NEXT:    ret
341entry:
342  call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32(
343    <vscale x 4 x i16> %0,
344    ptr %1,
345    <vscale x 4 x i32> %2,
346    iXLen %3)
347
348  ret void
349}
350
351declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32(
352  <vscale x 4 x i16>,
353  ptr,
354  <vscale x 4 x i32>,
355  <vscale x 4 x i1>,
356  iXLen);
357
358define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
359; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
360; CHECK:       # %bb.0: # %entry
361; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
362; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
363; CHECK-NEXT:    ret
364entry:
365  call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32(
366    <vscale x 4 x i16> %0,
367    ptr %1,
368    <vscale x 4 x i32> %2,
369    <vscale x 4 x i1> %3,
370    iXLen %4)
371
372  ret void
373}
374
375declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i32(
376  <vscale x 8 x i16>,
377  ptr,
378  <vscale x 8 x i32>,
379  iXLen);
380
381define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
382; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32:
383; CHECK:       # %bb.0: # %entry
384; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
385; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
386; CHECK-NEXT:    ret
387entry:
388  call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32(
389    <vscale x 8 x i16> %0,
390    ptr %1,
391    <vscale x 8 x i32> %2,
392    iXLen %3)
393
394  ret void
395}
396
397declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32(
398  <vscale x 8 x i16>,
399  ptr,
400  <vscale x 8 x i32>,
401  <vscale x 8 x i1>,
402  iXLen);
403
404define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
405; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
406; CHECK:       # %bb.0: # %entry
407; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
408; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
409; CHECK-NEXT:    ret
410entry:
411  call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32(
412    <vscale x 8 x i16> %0,
413    ptr %1,
414    <vscale x 8 x i32> %2,
415    <vscale x 8 x i1> %3,
416    iXLen %4)
417
418  ret void
419}
420
421declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i32(
422  <vscale x 16 x i16>,
423  ptr,
424  <vscale x 16 x i32>,
425  iXLen);
426
427define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
428; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32:
429; CHECK:       # %bb.0: # %entry
430; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
431; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
432; CHECK-NEXT:    ret
433entry:
434  call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32(
435    <vscale x 16 x i16> %0,
436    ptr %1,
437    <vscale x 16 x i32> %2,
438    iXLen %3)
439
440  ret void
441}
442
443declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32(
444  <vscale x 16 x i16>,
445  ptr,
446  <vscale x 16 x i32>,
447  <vscale x 16 x i1>,
448  iXLen);
449
450define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
451; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
452; CHECK:       # %bb.0: # %entry
453; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
454; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
455; CHECK-NEXT:    ret
456entry:
457  call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32(
458    <vscale x 16 x i16> %0,
459    ptr %1,
460    <vscale x 16 x i32> %2,
461    <vscale x 16 x i1> %3,
462    iXLen %4)
463
464  ret void
465}
466
467declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i32(
468  <vscale x 1 x i32>,
469  ptr,
470  <vscale x 1 x i32>,
471  iXLen);
472
473define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
474; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32:
475; CHECK:       # %bb.0: # %entry
476; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
477; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
478; CHECK-NEXT:    ret
479entry:
480  call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32(
481    <vscale x 1 x i32> %0,
482    ptr %1,
483    <vscale x 1 x i32> %2,
484    iXLen %3)
485
486  ret void
487}
488
489declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32(
490  <vscale x 1 x i32>,
491  ptr,
492  <vscale x 1 x i32>,
493  <vscale x 1 x i1>,
494  iXLen);
495
496define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
497; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
498; CHECK:       # %bb.0: # %entry
499; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
500; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
501; CHECK-NEXT:    ret
502entry:
503  call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32(
504    <vscale x 1 x i32> %0,
505    ptr %1,
506    <vscale x 1 x i32> %2,
507    <vscale x 1 x i1> %3,
508    iXLen %4)
509
510  ret void
511}
512
513declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i32(
514  <vscale x 2 x i32>,
515  ptr,
516  <vscale x 2 x i32>,
517  iXLen);
518
519define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
520; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32:
521; CHECK:       # %bb.0: # %entry
522; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
523; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
524; CHECK-NEXT:    ret
525entry:
526  call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32(
527    <vscale x 2 x i32> %0,
528    ptr %1,
529    <vscale x 2 x i32> %2,
530    iXLen %3)
531
532  ret void
533}
534
535declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32(
536  <vscale x 2 x i32>,
537  ptr,
538  <vscale x 2 x i32>,
539  <vscale x 2 x i1>,
540  iXLen);
541
542define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
543; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
544; CHECK:       # %bb.0: # %entry
545; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
546; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
547; CHECK-NEXT:    ret
548entry:
549  call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32(
550    <vscale x 2 x i32> %0,
551    ptr %1,
552    <vscale x 2 x i32> %2,
553    <vscale x 2 x i1> %3,
554    iXLen %4)
555
556  ret void
557}
558
559declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i32(
560  <vscale x 4 x i32>,
561  ptr,
562  <vscale x 4 x i32>,
563  iXLen);
564
565define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
566; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32:
567; CHECK:       # %bb.0: # %entry
568; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
569; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
570; CHECK-NEXT:    ret
571entry:
572  call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32(
573    <vscale x 4 x i32> %0,
574    ptr %1,
575    <vscale x 4 x i32> %2,
576    iXLen %3)
577
578  ret void
579}
580
581declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32(
582  <vscale x 4 x i32>,
583  ptr,
584  <vscale x 4 x i32>,
585  <vscale x 4 x i1>,
586  iXLen);
587
588define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
589; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
590; CHECK:       # %bb.0: # %entry
591; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
592; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
593; CHECK-NEXT:    ret
594entry:
595  call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32(
596    <vscale x 4 x i32> %0,
597    ptr %1,
598    <vscale x 4 x i32> %2,
599    <vscale x 4 x i1> %3,
600    iXLen %4)
601
602  ret void
603}
604
605declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i32(
606  <vscale x 8 x i32>,
607  ptr,
608  <vscale x 8 x i32>,
609  iXLen);
610
611define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
612; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32:
613; CHECK:       # %bb.0: # %entry
614; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
615; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
616; CHECK-NEXT:    ret
617entry:
618  call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32(
619    <vscale x 8 x i32> %0,
620    ptr %1,
621    <vscale x 8 x i32> %2,
622    iXLen %3)
623
624  ret void
625}
626
627declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32(
628  <vscale x 8 x i32>,
629  ptr,
630  <vscale x 8 x i32>,
631  <vscale x 8 x i1>,
632  iXLen);
633
634define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
635; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
636; CHECK:       # %bb.0: # %entry
637; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
638; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
639; CHECK-NEXT:    ret
640entry:
641  call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32(
642    <vscale x 8 x i32> %0,
643    ptr %1,
644    <vscale x 8 x i32> %2,
645    <vscale x 8 x i1> %3,
646    iXLen %4)
647
648  ret void
649}
650
651declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i32(
652  <vscale x 16 x i32>,
653  ptr,
654  <vscale x 16 x i32>,
655  iXLen);
656
657define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
658; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32:
659; CHECK:       # %bb.0: # %entry
660; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
661; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
662; CHECK-NEXT:    ret
663entry:
664  call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32(
665    <vscale x 16 x i32> %0,
666    ptr %1,
667    <vscale x 16 x i32> %2,
668    iXLen %3)
669
670  ret void
671}
672
673declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32(
674  <vscale x 16 x i32>,
675  ptr,
676  <vscale x 16 x i32>,
677  <vscale x 16 x i1>,
678  iXLen);
679
680define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
681; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
682; CHECK:       # %bb.0: # %entry
683; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
684; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
685; CHECK-NEXT:    ret
686entry:
687  call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32(
688    <vscale x 16 x i32> %0,
689    ptr %1,
690    <vscale x 16 x i32> %2,
691    <vscale x 16 x i1> %3,
692    iXLen %4)
693
694  ret void
695}
696
697declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i32(
698  <vscale x 1 x i64>,
699  ptr,
700  <vscale x 1 x i32>,
701  iXLen);
702
703define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
704; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32:
705; CHECK:       # %bb.0: # %entry
706; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
707; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
708; CHECK-NEXT:    ret
709entry:
710  call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32(
711    <vscale x 1 x i64> %0,
712    ptr %1,
713    <vscale x 1 x i32> %2,
714    iXLen %3)
715
716  ret void
717}
718
719declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32(
720  <vscale x 1 x i64>,
721  ptr,
722  <vscale x 1 x i32>,
723  <vscale x 1 x i1>,
724  iXLen);
725
726define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
727; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
728; CHECK:       # %bb.0: # %entry
729; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
730; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
731; CHECK-NEXT:    ret
732entry:
733  call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32(
734    <vscale x 1 x i64> %0,
735    ptr %1,
736    <vscale x 1 x i32> %2,
737    <vscale x 1 x i1> %3,
738    iXLen %4)
739
740  ret void
741}
742
743declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i32(
744  <vscale x 2 x i64>,
745  ptr,
746  <vscale x 2 x i32>,
747  iXLen);
748
749define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
750; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32:
751; CHECK:       # %bb.0: # %entry
752; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
753; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
754; CHECK-NEXT:    ret
755entry:
756  call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32(
757    <vscale x 2 x i64> %0,
758    ptr %1,
759    <vscale x 2 x i32> %2,
760    iXLen %3)
761
762  ret void
763}
764
765declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32(
766  <vscale x 2 x i64>,
767  ptr,
768  <vscale x 2 x i32>,
769  <vscale x 2 x i1>,
770  iXLen);
771
772define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
773; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
774; CHECK:       # %bb.0: # %entry
775; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
776; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
777; CHECK-NEXT:    ret
778entry:
779  call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32(
780    <vscale x 2 x i64> %0,
781    ptr %1,
782    <vscale x 2 x i32> %2,
783    <vscale x 2 x i1> %3,
784    iXLen %4)
785
786  ret void
787}
788
789declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i32(
790  <vscale x 4 x i64>,
791  ptr,
792  <vscale x 4 x i32>,
793  iXLen);
794
795define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
796; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32:
797; CHECK:       # %bb.0: # %entry
798; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
799; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
800; CHECK-NEXT:    ret
801entry:
802  call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32(
803    <vscale x 4 x i64> %0,
804    ptr %1,
805    <vscale x 4 x i32> %2,
806    iXLen %3)
807
808  ret void
809}
810
811declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32(
812  <vscale x 4 x i64>,
813  ptr,
814  <vscale x 4 x i32>,
815  <vscale x 4 x i1>,
816  iXLen);
817
818define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
819; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
820; CHECK:       # %bb.0: # %entry
821; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
822; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
823; CHECK-NEXT:    ret
824entry:
825  call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32(
826    <vscale x 4 x i64> %0,
827    ptr %1,
828    <vscale x 4 x i32> %2,
829    <vscale x 4 x i1> %3,
830    iXLen %4)
831
832  ret void
833}
834
835declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i32(
836  <vscale x 8 x i64>,
837  ptr,
838  <vscale x 8 x i32>,
839  iXLen);
840
841define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
842; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32:
843; CHECK:       # %bb.0: # %entry
844; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
845; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
846; CHECK-NEXT:    ret
847entry:
848  call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32(
849    <vscale x 8 x i64> %0,
850    ptr %1,
851    <vscale x 8 x i32> %2,
852    iXLen %3)
853
854  ret void
855}
856
857declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32(
858  <vscale x 8 x i64>,
859  ptr,
860  <vscale x 8 x i32>,
861  <vscale x 8 x i1>,
862  iXLen);
863
864define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
865; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
866; CHECK:       # %bb.0: # %entry
867; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
868; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
869; CHECK-NEXT:    ret
870entry:
871  call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32(
872    <vscale x 8 x i64> %0,
873    ptr %1,
874    <vscale x 8 x i32> %2,
875    <vscale x 8 x i1> %3,
876    iXLen %4)
877
878  ret void
879}
880
881declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i32(
882  <vscale x 1 x half>,
883  ptr,
884  <vscale x 1 x i32>,
885  iXLen);
886
887define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
888; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32:
889; CHECK:       # %bb.0: # %entry
890; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
891; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
892; CHECK-NEXT:    ret
893entry:
894  call void @llvm.riscv.vsoxei.nxv1f16.nxv1i32(
895    <vscale x 1 x half> %0,
896    ptr %1,
897    <vscale x 1 x i32> %2,
898    iXLen %3)
899
900  ret void
901}
902
903declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32(
904  <vscale x 1 x half>,
905  ptr,
906  <vscale x 1 x i32>,
907  <vscale x 1 x i1>,
908  iXLen);
909
910define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
911; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
912; CHECK:       # %bb.0: # %entry
913; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
914; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
915; CHECK-NEXT:    ret
916entry:
917  call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32(
918    <vscale x 1 x half> %0,
919    ptr %1,
920    <vscale x 1 x i32> %2,
921    <vscale x 1 x i1> %3,
922    iXLen %4)
923
924  ret void
925}
926
927declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i32(
928  <vscale x 2 x half>,
929  ptr,
930  <vscale x 2 x i32>,
931  iXLen);
932
933define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
934; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32:
935; CHECK:       # %bb.0: # %entry
936; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
937; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
938; CHECK-NEXT:    ret
939entry:
940  call void @llvm.riscv.vsoxei.nxv2f16.nxv2i32(
941    <vscale x 2 x half> %0,
942    ptr %1,
943    <vscale x 2 x i32> %2,
944    iXLen %3)
945
946  ret void
947}
948
949declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32(
950  <vscale x 2 x half>,
951  ptr,
952  <vscale x 2 x i32>,
953  <vscale x 2 x i1>,
954  iXLen);
955
956define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
957; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
958; CHECK:       # %bb.0: # %entry
959; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
960; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
961; CHECK-NEXT:    ret
962entry:
963  call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32(
964    <vscale x 2 x half> %0,
965    ptr %1,
966    <vscale x 2 x i32> %2,
967    <vscale x 2 x i1> %3,
968    iXLen %4)
969
970  ret void
971}
972
973declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i32(
974  <vscale x 4 x half>,
975  ptr,
976  <vscale x 4 x i32>,
977  iXLen);
978
979define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
980; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32:
981; CHECK:       # %bb.0: # %entry
982; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
983; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
984; CHECK-NEXT:    ret
985entry:
986  call void @llvm.riscv.vsoxei.nxv4f16.nxv4i32(
987    <vscale x 4 x half> %0,
988    ptr %1,
989    <vscale x 4 x i32> %2,
990    iXLen %3)
991
992  ret void
993}
994
995declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32(
996  <vscale x 4 x half>,
997  ptr,
998  <vscale x 4 x i32>,
999  <vscale x 4 x i1>,
1000  iXLen);
1001
1002define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1003; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
1004; CHECK:       # %bb.0: # %entry
1005; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1006; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
1007; CHECK-NEXT:    ret
1008entry:
1009  call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32(
1010    <vscale x 4 x half> %0,
1011    ptr %1,
1012    <vscale x 4 x i32> %2,
1013    <vscale x 4 x i1> %3,
1014    iXLen %4)
1015
1016  ret void
1017}
1018
1019declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i32(
1020  <vscale x 8 x half>,
1021  ptr,
1022  <vscale x 8 x i32>,
1023  iXLen);
1024
1025define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
1026; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32:
1027; CHECK:       # %bb.0: # %entry
1028; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1029; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
1030; CHECK-NEXT:    ret
1031entry:
1032  call void @llvm.riscv.vsoxei.nxv8f16.nxv8i32(
1033    <vscale x 8 x half> %0,
1034    ptr %1,
1035    <vscale x 8 x i32> %2,
1036    iXLen %3)
1037
1038  ret void
1039}
1040
1041declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32(
1042  <vscale x 8 x half>,
1043  ptr,
1044  <vscale x 8 x i32>,
1045  <vscale x 8 x i1>,
1046  iXLen);
1047
1048define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1049; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
1050; CHECK:       # %bb.0: # %entry
1051; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1052; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
1053; CHECK-NEXT:    ret
1054entry:
1055  call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32(
1056    <vscale x 8 x half> %0,
1057    ptr %1,
1058    <vscale x 8 x i32> %2,
1059    <vscale x 8 x i1> %3,
1060    iXLen %4)
1061
1062  ret void
1063}
1064
1065declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i32(
1066  <vscale x 16 x half>,
1067  ptr,
1068  <vscale x 16 x i32>,
1069  iXLen);
1070
1071define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
1072; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32:
1073; CHECK:       # %bb.0: # %entry
1074; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
1075; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
1076; CHECK-NEXT:    ret
1077entry:
1078  call void @llvm.riscv.vsoxei.nxv16f16.nxv16i32(
1079    <vscale x 16 x half> %0,
1080    ptr %1,
1081    <vscale x 16 x i32> %2,
1082    iXLen %3)
1083
1084  ret void
1085}
1086
1087declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32(
1088  <vscale x 16 x half>,
1089  ptr,
1090  <vscale x 16 x i32>,
1091  <vscale x 16 x i1>,
1092  iXLen);
1093
1094define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1095; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
1096; CHECK:       # %bb.0: # %entry
1097; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
1098; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
1099; CHECK-NEXT:    ret
1100entry:
1101  call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32(
1102    <vscale x 16 x half> %0,
1103    ptr %1,
1104    <vscale x 16 x i32> %2,
1105    <vscale x 16 x i1> %3,
1106    iXLen %4)
1107
1108  ret void
1109}
1110
1111declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i32(
1112  <vscale x 1 x float>,
1113  ptr,
1114  <vscale x 1 x i32>,
1115  iXLen);
1116
1117define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
1118; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32:
1119; CHECK:       # %bb.0: # %entry
1120; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1121; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
1122; CHECK-NEXT:    ret
1123entry:
1124  call void @llvm.riscv.vsoxei.nxv1f32.nxv1i32(
1125    <vscale x 1 x float> %0,
1126    ptr %1,
1127    <vscale x 1 x i32> %2,
1128    iXLen %3)
1129
1130  ret void
1131}
1132
1133declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32(
1134  <vscale x 1 x float>,
1135  ptr,
1136  <vscale x 1 x i32>,
1137  <vscale x 1 x i1>,
1138  iXLen);
1139
1140define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1141; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
1142; CHECK:       # %bb.0: # %entry
1143; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1144; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
1145; CHECK-NEXT:    ret
1146entry:
1147  call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32(
1148    <vscale x 1 x float> %0,
1149    ptr %1,
1150    <vscale x 1 x i32> %2,
1151    <vscale x 1 x i1> %3,
1152    iXLen %4)
1153
1154  ret void
1155}
1156
1157declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i32(
1158  <vscale x 2 x float>,
1159  ptr,
1160  <vscale x 2 x i32>,
1161  iXLen);
1162
1163define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
1164; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32:
1165; CHECK:       # %bb.0: # %entry
1166; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1167; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
1168; CHECK-NEXT:    ret
1169entry:
1170  call void @llvm.riscv.vsoxei.nxv2f32.nxv2i32(
1171    <vscale x 2 x float> %0,
1172    ptr %1,
1173    <vscale x 2 x i32> %2,
1174    iXLen %3)
1175
1176  ret void
1177}
1178
1179declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32(
1180  <vscale x 2 x float>,
1181  ptr,
1182  <vscale x 2 x i32>,
1183  <vscale x 2 x i1>,
1184  iXLen);
1185
1186define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1187; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
1188; CHECK:       # %bb.0: # %entry
1189; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1190; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
1191; CHECK-NEXT:    ret
1192entry:
1193  call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32(
1194    <vscale x 2 x float> %0,
1195    ptr %1,
1196    <vscale x 2 x i32> %2,
1197    <vscale x 2 x i1> %3,
1198    iXLen %4)
1199
1200  ret void
1201}
1202
1203declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i32(
1204  <vscale x 4 x float>,
1205  ptr,
1206  <vscale x 4 x i32>,
1207  iXLen);
1208
1209define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
1210; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32:
1211; CHECK:       # %bb.0: # %entry
1212; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1213; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
1214; CHECK-NEXT:    ret
1215entry:
1216  call void @llvm.riscv.vsoxei.nxv4f32.nxv4i32(
1217    <vscale x 4 x float> %0,
1218    ptr %1,
1219    <vscale x 4 x i32> %2,
1220    iXLen %3)
1221
1222  ret void
1223}
1224
1225declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32(
1226  <vscale x 4 x float>,
1227  ptr,
1228  <vscale x 4 x i32>,
1229  <vscale x 4 x i1>,
1230  iXLen);
1231
1232define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1233; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
1234; CHECK:       # %bb.0: # %entry
1235; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1236; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
1237; CHECK-NEXT:    ret
1238entry:
1239  call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32(
1240    <vscale x 4 x float> %0,
1241    ptr %1,
1242    <vscale x 4 x i32> %2,
1243    <vscale x 4 x i1> %3,
1244    iXLen %4)
1245
1246  ret void
1247}
1248
1249declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i32(
1250  <vscale x 8 x float>,
1251  ptr,
1252  <vscale x 8 x i32>,
1253  iXLen);
1254
1255define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
1256; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32:
1257; CHECK:       # %bb.0: # %entry
1258; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
1259; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
1260; CHECK-NEXT:    ret
1261entry:
1262  call void @llvm.riscv.vsoxei.nxv8f32.nxv8i32(
1263    <vscale x 8 x float> %0,
1264    ptr %1,
1265    <vscale x 8 x i32> %2,
1266    iXLen %3)
1267
1268  ret void
1269}
1270
1271declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32(
1272  <vscale x 8 x float>,
1273  ptr,
1274  <vscale x 8 x i32>,
1275  <vscale x 8 x i1>,
1276  iXLen);
1277
1278define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1279; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
1280; CHECK:       # %bb.0: # %entry
1281; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
1282; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
1283; CHECK-NEXT:    ret
1284entry:
1285  call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32(
1286    <vscale x 8 x float> %0,
1287    ptr %1,
1288    <vscale x 8 x i32> %2,
1289    <vscale x 8 x i1> %3,
1290    iXLen %4)
1291
1292  ret void
1293}
1294
1295declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i32(
1296  <vscale x 16 x float>,
1297  ptr,
1298  <vscale x 16 x i32>,
1299  iXLen);
1300
1301define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
1302; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32:
1303; CHECK:       # %bb.0: # %entry
1304; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
1305; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
1306; CHECK-NEXT:    ret
1307entry:
1308  call void @llvm.riscv.vsoxei.nxv16f32.nxv16i32(
1309    <vscale x 16 x float> %0,
1310    ptr %1,
1311    <vscale x 16 x i32> %2,
1312    iXLen %3)
1313
1314  ret void
1315}
1316
1317declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32(
1318  <vscale x 16 x float>,
1319  ptr,
1320  <vscale x 16 x i32>,
1321  <vscale x 16 x i1>,
1322  iXLen);
1323
1324define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1325; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
1326; CHECK:       # %bb.0: # %entry
1327; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
1328; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
1329; CHECK-NEXT:    ret
1330entry:
1331  call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32(
1332    <vscale x 16 x float> %0,
1333    ptr %1,
1334    <vscale x 16 x i32> %2,
1335    <vscale x 16 x i1> %3,
1336    iXLen %4)
1337
1338  ret void
1339}
1340
1341declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i32(
1342  <vscale x 1 x double>,
1343  ptr,
1344  <vscale x 1 x i32>,
1345  iXLen);
1346
1347define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
1348; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32:
1349; CHECK:       # %bb.0: # %entry
1350; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1351; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
1352; CHECK-NEXT:    ret
1353entry:
1354  call void @llvm.riscv.vsoxei.nxv1f64.nxv1i32(
1355    <vscale x 1 x double> %0,
1356    ptr %1,
1357    <vscale x 1 x i32> %2,
1358    iXLen %3)
1359
1360  ret void
1361}
1362
1363declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32(
1364  <vscale x 1 x double>,
1365  ptr,
1366  <vscale x 1 x i32>,
1367  <vscale x 1 x i1>,
1368  iXLen);
1369
1370define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1371; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
1372; CHECK:       # %bb.0: # %entry
1373; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1374; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
1375; CHECK-NEXT:    ret
1376entry:
1377  call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32(
1378    <vscale x 1 x double> %0,
1379    ptr %1,
1380    <vscale x 1 x i32> %2,
1381    <vscale x 1 x i1> %3,
1382    iXLen %4)
1383
1384  ret void
1385}
1386
1387declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i32(
1388  <vscale x 2 x double>,
1389  ptr,
1390  <vscale x 2 x i32>,
1391  iXLen);
1392
1393define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
1394; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32:
1395; CHECK:       # %bb.0: # %entry
1396; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1397; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
1398; CHECK-NEXT:    ret
1399entry:
1400  call void @llvm.riscv.vsoxei.nxv2f64.nxv2i32(
1401    <vscale x 2 x double> %0,
1402    ptr %1,
1403    <vscale x 2 x i32> %2,
1404    iXLen %3)
1405
1406  ret void
1407}
1408
1409declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32(
1410  <vscale x 2 x double>,
1411  ptr,
1412  <vscale x 2 x i32>,
1413  <vscale x 2 x i1>,
1414  iXLen);
1415
1416define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1417; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
1418; CHECK:       # %bb.0: # %entry
1419; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1420; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
1421; CHECK-NEXT:    ret
1422entry:
1423  call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32(
1424    <vscale x 2 x double> %0,
1425    ptr %1,
1426    <vscale x 2 x i32> %2,
1427    <vscale x 2 x i1> %3,
1428    iXLen %4)
1429
1430  ret void
1431}
1432
1433declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i32(
1434  <vscale x 4 x double>,
1435  ptr,
1436  <vscale x 4 x i32>,
1437  iXLen);
1438
1439define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
1440; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32:
1441; CHECK:       # %bb.0: # %entry
1442; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1443; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
1444; CHECK-NEXT:    ret
1445entry:
1446  call void @llvm.riscv.vsoxei.nxv4f64.nxv4i32(
1447    <vscale x 4 x double> %0,
1448    ptr %1,
1449    <vscale x 4 x i32> %2,
1450    iXLen %3)
1451
1452  ret void
1453}
1454
1455declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32(
1456  <vscale x 4 x double>,
1457  ptr,
1458  <vscale x 4 x i32>,
1459  <vscale x 4 x i1>,
1460  iXLen);
1461
1462define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1463; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
1464; CHECK:       # %bb.0: # %entry
1465; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1466; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
1467; CHECK-NEXT:    ret
1468entry:
1469  call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32(
1470    <vscale x 4 x double> %0,
1471    ptr %1,
1472    <vscale x 4 x i32> %2,
1473    <vscale x 4 x i1> %3,
1474    iXLen %4)
1475
1476  ret void
1477}
1478
1479declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i32(
1480  <vscale x 8 x double>,
1481  ptr,
1482  <vscale x 8 x i32>,
1483  iXLen);
1484
1485define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
1486; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32:
1487; CHECK:       # %bb.0: # %entry
1488; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1489; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
1490; CHECK-NEXT:    ret
1491entry:
1492  call void @llvm.riscv.vsoxei.nxv8f64.nxv8i32(
1493    <vscale x 8 x double> %0,
1494    ptr %1,
1495    <vscale x 8 x i32> %2,
1496    iXLen %3)
1497
1498  ret void
1499}
1500
1501declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32(
1502  <vscale x 8 x double>,
1503  ptr,
1504  <vscale x 8 x i32>,
1505  <vscale x 8 x i1>,
1506  iXLen);
1507
1508define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1509; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
1510; CHECK:       # %bb.0: # %entry
1511; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1512; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
1513; CHECK-NEXT:    ret
1514entry:
1515  call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32(
1516    <vscale x 8 x double> %0,
1517    ptr %1,
1518    <vscale x 8 x i32> %2,
1519    <vscale x 8 x i1> %3,
1520    iXLen %4)
1521
1522  ret void
1523}
1524
1525declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i16(
1526  <vscale x 1 x i8>,
1527  ptr,
1528  <vscale x 1 x i16>,
1529  iXLen);
1530
1531define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
1532; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16:
1533; CHECK:       # %bb.0: # %entry
1534; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
1535; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
1536; CHECK-NEXT:    ret
1537entry:
1538  call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16(
1539    <vscale x 1 x i8> %0,
1540    ptr %1,
1541    <vscale x 1 x i16> %2,
1542    iXLen %3)
1543
1544  ret void
1545}
1546
1547declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16(
1548  <vscale x 1 x i8>,
1549  ptr,
1550  <vscale x 1 x i16>,
1551  <vscale x 1 x i1>,
1552  iXLen);
1553
1554define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1555; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
1556; CHECK:       # %bb.0: # %entry
1557; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
1558; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
1559; CHECK-NEXT:    ret
1560entry:
1561  call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16(
1562    <vscale x 1 x i8> %0,
1563    ptr %1,
1564    <vscale x 1 x i16> %2,
1565    <vscale x 1 x i1> %3,
1566    iXLen %4)
1567
1568  ret void
1569}
1570
1571declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i16(
1572  <vscale x 2 x i8>,
1573  ptr,
1574  <vscale x 2 x i16>,
1575  iXLen);
1576
1577define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
1578; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16:
1579; CHECK:       # %bb.0: # %entry
1580; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
1581; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
1582; CHECK-NEXT:    ret
1583entry:
1584  call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16(
1585    <vscale x 2 x i8> %0,
1586    ptr %1,
1587    <vscale x 2 x i16> %2,
1588    iXLen %3)
1589
1590  ret void
1591}
1592
1593declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16(
1594  <vscale x 2 x i8>,
1595  ptr,
1596  <vscale x 2 x i16>,
1597  <vscale x 2 x i1>,
1598  iXLen);
1599
1600define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1601; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
1602; CHECK:       # %bb.0: # %entry
1603; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
1604; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
1605; CHECK-NEXT:    ret
1606entry:
1607  call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16(
1608    <vscale x 2 x i8> %0,
1609    ptr %1,
1610    <vscale x 2 x i16> %2,
1611    <vscale x 2 x i1> %3,
1612    iXLen %4)
1613
1614  ret void
1615}
1616
1617declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i16(
1618  <vscale x 4 x i8>,
1619  ptr,
1620  <vscale x 4 x i16>,
1621  iXLen);
1622
1623define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
1624; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16:
1625; CHECK:       # %bb.0: # %entry
1626; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
1627; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
1628; CHECK-NEXT:    ret
1629entry:
1630  call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16(
1631    <vscale x 4 x i8> %0,
1632    ptr %1,
1633    <vscale x 4 x i16> %2,
1634    iXLen %3)
1635
1636  ret void
1637}
1638
1639declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16(
1640  <vscale x 4 x i8>,
1641  ptr,
1642  <vscale x 4 x i16>,
1643  <vscale x 4 x i1>,
1644  iXLen);
1645
1646define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1647; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
1648; CHECK:       # %bb.0: # %entry
1649; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
1650; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
1651; CHECK-NEXT:    ret
1652entry:
1653  call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16(
1654    <vscale x 4 x i8> %0,
1655    ptr %1,
1656    <vscale x 4 x i16> %2,
1657    <vscale x 4 x i1> %3,
1658    iXLen %4)
1659
1660  ret void
1661}
1662
1663declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i16(
1664  <vscale x 8 x i8>,
1665  ptr,
1666  <vscale x 8 x i16>,
1667  iXLen);
1668
1669define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
1670; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16:
1671; CHECK:       # %bb.0: # %entry
1672; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
1673; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
1674; CHECK-NEXT:    ret
1675entry:
1676  call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16(
1677    <vscale x 8 x i8> %0,
1678    ptr %1,
1679    <vscale x 8 x i16> %2,
1680    iXLen %3)
1681
1682  ret void
1683}
1684
1685declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16(
1686  <vscale x 8 x i8>,
1687  ptr,
1688  <vscale x 8 x i16>,
1689  <vscale x 8 x i1>,
1690  iXLen);
1691
1692define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1693; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
1694; CHECK:       # %bb.0: # %entry
1695; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
1696; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
1697; CHECK-NEXT:    ret
1698entry:
1699  call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16(
1700    <vscale x 8 x i8> %0,
1701    ptr %1,
1702    <vscale x 8 x i16> %2,
1703    <vscale x 8 x i1> %3,
1704    iXLen %4)
1705
1706  ret void
1707}
1708
1709declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i16(
1710  <vscale x 16 x i8>,
1711  ptr,
1712  <vscale x 16 x i16>,
1713  iXLen);
1714
1715define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
1716; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16:
1717; CHECK:       # %bb.0: # %entry
1718; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
1719; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
1720; CHECK-NEXT:    ret
1721entry:
1722  call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16(
1723    <vscale x 16 x i8> %0,
1724    ptr %1,
1725    <vscale x 16 x i16> %2,
1726    iXLen %3)
1727
1728  ret void
1729}
1730
1731declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16(
1732  <vscale x 16 x i8>,
1733  ptr,
1734  <vscale x 16 x i16>,
1735  <vscale x 16 x i1>,
1736  iXLen);
1737
1738define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1739; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
1740; CHECK:       # %bb.0: # %entry
1741; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
1742; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
1743; CHECK-NEXT:    ret
1744entry:
1745  call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16(
1746    <vscale x 16 x i8> %0,
1747    ptr %1,
1748    <vscale x 16 x i16> %2,
1749    <vscale x 16 x i1> %3,
1750    iXLen %4)
1751
1752  ret void
1753}
1754
1755declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i16(
1756  <vscale x 32 x i8>,
1757  ptr,
1758  <vscale x 32 x i16>,
1759  iXLen);
1760
1761define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
1762; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16:
1763; CHECK:       # %bb.0: # %entry
1764; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
1765; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
1766; CHECK-NEXT:    ret
1767entry:
1768  call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16(
1769    <vscale x 32 x i8> %0,
1770    ptr %1,
1771    <vscale x 32 x i16> %2,
1772    iXLen %3)
1773
1774  ret void
1775}
1776
1777declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16(
1778  <vscale x 32 x i8>,
1779  ptr,
1780  <vscale x 32 x i16>,
1781  <vscale x 32 x i1>,
1782  iXLen);
1783
1784define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1785; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
1786; CHECK:       # %bb.0: # %entry
1787; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
1788; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
1789; CHECK-NEXT:    ret
1790entry:
1791  call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16(
1792    <vscale x 32 x i8> %0,
1793    ptr %1,
1794    <vscale x 32 x i16> %2,
1795    <vscale x 32 x i1> %3,
1796    iXLen %4)
1797
1798  ret void
1799}
1800
1801declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i16(
1802  <vscale x 1 x i16>,
1803  ptr,
1804  <vscale x 1 x i16>,
1805  iXLen);
1806
1807define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
1808; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16:
1809; CHECK:       # %bb.0: # %entry
1810; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1811; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
1812; CHECK-NEXT:    ret
1813entry:
1814  call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16(
1815    <vscale x 1 x i16> %0,
1816    ptr %1,
1817    <vscale x 1 x i16> %2,
1818    iXLen %3)
1819
1820  ret void
1821}
1822
1823declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16(
1824  <vscale x 1 x i16>,
1825  ptr,
1826  <vscale x 1 x i16>,
1827  <vscale x 1 x i1>,
1828  iXLen);
1829
1830define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1831; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
1832; CHECK:       # %bb.0: # %entry
1833; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1834; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
1835; CHECK-NEXT:    ret
1836entry:
1837  call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16(
1838    <vscale x 1 x i16> %0,
1839    ptr %1,
1840    <vscale x 1 x i16> %2,
1841    <vscale x 1 x i1> %3,
1842    iXLen %4)
1843
1844  ret void
1845}
1846
1847declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i16(
1848  <vscale x 2 x i16>,
1849  ptr,
1850  <vscale x 2 x i16>,
1851  iXLen);
1852
1853define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
1854; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16:
1855; CHECK:       # %bb.0: # %entry
1856; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1857; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
1858; CHECK-NEXT:    ret
1859entry:
1860  call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16(
1861    <vscale x 2 x i16> %0,
1862    ptr %1,
1863    <vscale x 2 x i16> %2,
1864    iXLen %3)
1865
1866  ret void
1867}
1868
1869declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16(
1870  <vscale x 2 x i16>,
1871  ptr,
1872  <vscale x 2 x i16>,
1873  <vscale x 2 x i1>,
1874  iXLen);
1875
1876define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1877; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
1878; CHECK:       # %bb.0: # %entry
1879; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1880; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
1881; CHECK-NEXT:    ret
1882entry:
1883  call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16(
1884    <vscale x 2 x i16> %0,
1885    ptr %1,
1886    <vscale x 2 x i16> %2,
1887    <vscale x 2 x i1> %3,
1888    iXLen %4)
1889
1890  ret void
1891}
1892
1893declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i16(
1894  <vscale x 4 x i16>,
1895  ptr,
1896  <vscale x 4 x i16>,
1897  iXLen);
1898
1899define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
1900; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16:
1901; CHECK:       # %bb.0: # %entry
1902; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1903; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
1904; CHECK-NEXT:    ret
1905entry:
1906  call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16(
1907    <vscale x 4 x i16> %0,
1908    ptr %1,
1909    <vscale x 4 x i16> %2,
1910    iXLen %3)
1911
1912  ret void
1913}
1914
1915declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16(
1916  <vscale x 4 x i16>,
1917  ptr,
1918  <vscale x 4 x i16>,
1919  <vscale x 4 x i1>,
1920  iXLen);
1921
1922define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1923; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
1924; CHECK:       # %bb.0: # %entry
1925; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1926; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
1927; CHECK-NEXT:    ret
1928entry:
1929  call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16(
1930    <vscale x 4 x i16> %0,
1931    ptr %1,
1932    <vscale x 4 x i16> %2,
1933    <vscale x 4 x i1> %3,
1934    iXLen %4)
1935
1936  ret void
1937}
1938
1939declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i16(
1940  <vscale x 8 x i16>,
1941  ptr,
1942  <vscale x 8 x i16>,
1943  iXLen);
1944
1945define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
1946; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16:
1947; CHECK:       # %bb.0: # %entry
1948; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1949; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
1950; CHECK-NEXT:    ret
1951entry:
1952  call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16(
1953    <vscale x 8 x i16> %0,
1954    ptr %1,
1955    <vscale x 8 x i16> %2,
1956    iXLen %3)
1957
1958  ret void
1959}
1960
1961declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16(
1962  <vscale x 8 x i16>,
1963  ptr,
1964  <vscale x 8 x i16>,
1965  <vscale x 8 x i1>,
1966  iXLen);
1967
1968define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1969; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
1970; CHECK:       # %bb.0: # %entry
1971; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1972; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
1973; CHECK-NEXT:    ret
1974entry:
1975  call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16(
1976    <vscale x 8 x i16> %0,
1977    ptr %1,
1978    <vscale x 8 x i16> %2,
1979    <vscale x 8 x i1> %3,
1980    iXLen %4)
1981
1982  ret void
1983}
1984
1985declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i16(
1986  <vscale x 16 x i16>,
1987  ptr,
1988  <vscale x 16 x i16>,
1989  iXLen);
1990
1991define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
1992; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16:
1993; CHECK:       # %bb.0: # %entry
1994; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
1995; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
1996; CHECK-NEXT:    ret
1997entry:
1998  call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16(
1999    <vscale x 16 x i16> %0,
2000    ptr %1,
2001    <vscale x 16 x i16> %2,
2002    iXLen %3)
2003
2004  ret void
2005}
2006
2007declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16(
2008  <vscale x 16 x i16>,
2009  ptr,
2010  <vscale x 16 x i16>,
2011  <vscale x 16 x i1>,
2012  iXLen);
2013
2014define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2015; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
2016; CHECK:       # %bb.0: # %entry
2017; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
2018; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
2019; CHECK-NEXT:    ret
2020entry:
2021  call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16(
2022    <vscale x 16 x i16> %0,
2023    ptr %1,
2024    <vscale x 16 x i16> %2,
2025    <vscale x 16 x i1> %3,
2026    iXLen %4)
2027
2028  ret void
2029}
2030
2031declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i16(
2032  <vscale x 32 x i16>,
2033  ptr,
2034  <vscale x 32 x i16>,
2035  iXLen);
2036
2037define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
2038; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16:
2039; CHECK:       # %bb.0: # %entry
2040; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
2041; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
2042; CHECK-NEXT:    ret
2043entry:
2044  call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16(
2045    <vscale x 32 x i16> %0,
2046    ptr %1,
2047    <vscale x 32 x i16> %2,
2048    iXLen %3)
2049
2050  ret void
2051}
2052
2053declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16(
2054  <vscale x 32 x i16>,
2055  ptr,
2056  <vscale x 32 x i16>,
2057  <vscale x 32 x i1>,
2058  iXLen);
2059
2060define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
2061; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
2062; CHECK:       # %bb.0: # %entry
2063; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
2064; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
2065; CHECK-NEXT:    ret
2066entry:
2067  call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16(
2068    <vscale x 32 x i16> %0,
2069    ptr %1,
2070    <vscale x 32 x i16> %2,
2071    <vscale x 32 x i1> %3,
2072    iXLen %4)
2073
2074  ret void
2075}
2076
2077declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i16(
2078  <vscale x 1 x i32>,
2079  ptr,
2080  <vscale x 1 x i16>,
2081  iXLen);
2082
2083define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
2084; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16:
2085; CHECK:       # %bb.0: # %entry
2086; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
2087; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
2088; CHECK-NEXT:    ret
2089entry:
2090  call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16(
2091    <vscale x 1 x i32> %0,
2092    ptr %1,
2093    <vscale x 1 x i16> %2,
2094    iXLen %3)
2095
2096  ret void
2097}
2098
2099declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16(
2100  <vscale x 1 x i32>,
2101  ptr,
2102  <vscale x 1 x i16>,
2103  <vscale x 1 x i1>,
2104  iXLen);
2105
2106define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2107; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
2108; CHECK:       # %bb.0: # %entry
2109; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
2110; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
2111; CHECK-NEXT:    ret
2112entry:
2113  call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16(
2114    <vscale x 1 x i32> %0,
2115    ptr %1,
2116    <vscale x 1 x i16> %2,
2117    <vscale x 1 x i1> %3,
2118    iXLen %4)
2119
2120  ret void
2121}
2122
2123declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i16(
2124  <vscale x 2 x i32>,
2125  ptr,
2126  <vscale x 2 x i16>,
2127  iXLen);
2128
2129define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
2130; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16:
2131; CHECK:       # %bb.0: # %entry
2132; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
2133; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
2134; CHECK-NEXT:    ret
2135entry:
2136  call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16(
2137    <vscale x 2 x i32> %0,
2138    ptr %1,
2139    <vscale x 2 x i16> %2,
2140    iXLen %3)
2141
2142  ret void
2143}
2144
2145declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16(
2146  <vscale x 2 x i32>,
2147  ptr,
2148  <vscale x 2 x i16>,
2149  <vscale x 2 x i1>,
2150  iXLen);
2151
2152define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2153; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
2154; CHECK:       # %bb.0: # %entry
2155; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
2156; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
2157; CHECK-NEXT:    ret
2158entry:
2159  call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16(
2160    <vscale x 2 x i32> %0,
2161    ptr %1,
2162    <vscale x 2 x i16> %2,
2163    <vscale x 2 x i1> %3,
2164    iXLen %4)
2165
2166  ret void
2167}
2168
2169declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i16(
2170  <vscale x 4 x i32>,
2171  ptr,
2172  <vscale x 4 x i16>,
2173  iXLen);
2174
2175define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
2176; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16:
2177; CHECK:       # %bb.0: # %entry
2178; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
2179; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
2180; CHECK-NEXT:    ret
2181entry:
2182  call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16(
2183    <vscale x 4 x i32> %0,
2184    ptr %1,
2185    <vscale x 4 x i16> %2,
2186    iXLen %3)
2187
2188  ret void
2189}
2190
2191declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16(
2192  <vscale x 4 x i32>,
2193  ptr,
2194  <vscale x 4 x i16>,
2195  <vscale x 4 x i1>,
2196  iXLen);
2197
2198define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2199; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
2200; CHECK:       # %bb.0: # %entry
2201; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
2202; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
2203; CHECK-NEXT:    ret
2204entry:
2205  call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16(
2206    <vscale x 4 x i32> %0,
2207    ptr %1,
2208    <vscale x 4 x i16> %2,
2209    <vscale x 4 x i1> %3,
2210    iXLen %4)
2211
2212  ret void
2213}
2214
2215declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i16(
2216  <vscale x 8 x i32>,
2217  ptr,
2218  <vscale x 8 x i16>,
2219  iXLen);
2220
2221define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
2222; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16:
2223; CHECK:       # %bb.0: # %entry
2224; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
2225; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
2226; CHECK-NEXT:    ret
2227entry:
2228  call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16(
2229    <vscale x 8 x i32> %0,
2230    ptr %1,
2231    <vscale x 8 x i16> %2,
2232    iXLen %3)
2233
2234  ret void
2235}
2236
2237declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16(
2238  <vscale x 8 x i32>,
2239  ptr,
2240  <vscale x 8 x i16>,
2241  <vscale x 8 x i1>,
2242  iXLen);
2243
2244define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2245; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
2246; CHECK:       # %bb.0: # %entry
2247; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
2248; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
2249; CHECK-NEXT:    ret
2250entry:
2251  call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16(
2252    <vscale x 8 x i32> %0,
2253    ptr %1,
2254    <vscale x 8 x i16> %2,
2255    <vscale x 8 x i1> %3,
2256    iXLen %4)
2257
2258  ret void
2259}
2260
2261declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i16(
2262  <vscale x 16 x i32>,
2263  ptr,
2264  <vscale x 16 x i16>,
2265  iXLen);
2266
2267define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
2268; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16:
2269; CHECK:       # %bb.0: # %entry
2270; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
2271; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
2272; CHECK-NEXT:    ret
2273entry:
2274  call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16(
2275    <vscale x 16 x i32> %0,
2276    ptr %1,
2277    <vscale x 16 x i16> %2,
2278    iXLen %3)
2279
2280  ret void
2281}
2282
2283declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16(
2284  <vscale x 16 x i32>,
2285  ptr,
2286  <vscale x 16 x i16>,
2287  <vscale x 16 x i1>,
2288  iXLen);
2289
2290define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2291; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
2292; CHECK:       # %bb.0: # %entry
2293; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
2294; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
2295; CHECK-NEXT:    ret
2296entry:
2297  call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16(
2298    <vscale x 16 x i32> %0,
2299    ptr %1,
2300    <vscale x 16 x i16> %2,
2301    <vscale x 16 x i1> %3,
2302    iXLen %4)
2303
2304  ret void
2305}
2306
2307declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i16(
2308  <vscale x 1 x i64>,
2309  ptr,
2310  <vscale x 1 x i16>,
2311  iXLen);
2312
2313define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
2314; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16:
2315; CHECK:       # %bb.0: # %entry
2316; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
2317; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
2318; CHECK-NEXT:    ret
2319entry:
2320  call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16(
2321    <vscale x 1 x i64> %0,
2322    ptr %1,
2323    <vscale x 1 x i16> %2,
2324    iXLen %3)
2325
2326  ret void
2327}
2328
2329declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16(
2330  <vscale x 1 x i64>,
2331  ptr,
2332  <vscale x 1 x i16>,
2333  <vscale x 1 x i1>,
2334  iXLen);
2335
2336define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2337; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
2338; CHECK:       # %bb.0: # %entry
2339; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
2340; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
2341; CHECK-NEXT:    ret
2342entry:
2343  call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16(
2344    <vscale x 1 x i64> %0,
2345    ptr %1,
2346    <vscale x 1 x i16> %2,
2347    <vscale x 1 x i1> %3,
2348    iXLen %4)
2349
2350  ret void
2351}
2352
2353declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i16(
2354  <vscale x 2 x i64>,
2355  ptr,
2356  <vscale x 2 x i16>,
2357  iXLen);
2358
2359define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
2360; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16:
2361; CHECK:       # %bb.0: # %entry
2362; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
2363; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
2364; CHECK-NEXT:    ret
2365entry:
2366  call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16(
2367    <vscale x 2 x i64> %0,
2368    ptr %1,
2369    <vscale x 2 x i16> %2,
2370    iXLen %3)
2371
2372  ret void
2373}
2374
2375declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16(
2376  <vscale x 2 x i64>,
2377  ptr,
2378  <vscale x 2 x i16>,
2379  <vscale x 2 x i1>,
2380  iXLen);
2381
2382define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2383; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
2384; CHECK:       # %bb.0: # %entry
2385; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
2386; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
2387; CHECK-NEXT:    ret
2388entry:
2389  call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16(
2390    <vscale x 2 x i64> %0,
2391    ptr %1,
2392    <vscale x 2 x i16> %2,
2393    <vscale x 2 x i1> %3,
2394    iXLen %4)
2395
2396  ret void
2397}
2398
2399declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i16(
2400  <vscale x 4 x i64>,
2401  ptr,
2402  <vscale x 4 x i16>,
2403  iXLen);
2404
2405define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
2406; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16:
2407; CHECK:       # %bb.0: # %entry
2408; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
2409; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
2410; CHECK-NEXT:    ret
2411entry:
2412  call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16(
2413    <vscale x 4 x i64> %0,
2414    ptr %1,
2415    <vscale x 4 x i16> %2,
2416    iXLen %3)
2417
2418  ret void
2419}
2420
2421declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16(
2422  <vscale x 4 x i64>,
2423  ptr,
2424  <vscale x 4 x i16>,
2425  <vscale x 4 x i1>,
2426  iXLen);
2427
2428define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2429; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
2430; CHECK:       # %bb.0: # %entry
2431; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
2432; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
2433; CHECK-NEXT:    ret
2434entry:
2435  call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16(
2436    <vscale x 4 x i64> %0,
2437    ptr %1,
2438    <vscale x 4 x i16> %2,
2439    <vscale x 4 x i1> %3,
2440    iXLen %4)
2441
2442  ret void
2443}
2444
2445declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i16(
2446  <vscale x 8 x i64>,
2447  ptr,
2448  <vscale x 8 x i16>,
2449  iXLen);
2450
2451define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
2452; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16:
2453; CHECK:       # %bb.0: # %entry
2454; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
2455; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
2456; CHECK-NEXT:    ret
2457entry:
2458  call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16(
2459    <vscale x 8 x i64> %0,
2460    ptr %1,
2461    <vscale x 8 x i16> %2,
2462    iXLen %3)
2463
2464  ret void
2465}
2466
2467declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16(
2468  <vscale x 8 x i64>,
2469  ptr,
2470  <vscale x 8 x i16>,
2471  <vscale x 8 x i1>,
2472  iXLen);
2473
2474define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2475; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
2476; CHECK:       # %bb.0: # %entry
2477; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
2478; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
2479; CHECK-NEXT:    ret
2480entry:
2481  call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16(
2482    <vscale x 8 x i64> %0,
2483    ptr %1,
2484    <vscale x 8 x i16> %2,
2485    <vscale x 8 x i1> %3,
2486    iXLen %4)
2487
2488  ret void
2489}
2490
2491declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i16(
2492  <vscale x 1 x half>,
2493  ptr,
2494  <vscale x 1 x i16>,
2495  iXLen);
2496
2497define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
2498; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16:
2499; CHECK:       # %bb.0: # %entry
2500; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2501; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
2502; CHECK-NEXT:    ret
2503entry:
2504  call void @llvm.riscv.vsoxei.nxv1f16.nxv1i16(
2505    <vscale x 1 x half> %0,
2506    ptr %1,
2507    <vscale x 1 x i16> %2,
2508    iXLen %3)
2509
2510  ret void
2511}
2512
2513declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16(
2514  <vscale x 1 x half>,
2515  ptr,
2516  <vscale x 1 x i16>,
2517  <vscale x 1 x i1>,
2518  iXLen);
2519
2520define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2521; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
2522; CHECK:       # %bb.0: # %entry
2523; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2524; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
2525; CHECK-NEXT:    ret
2526entry:
2527  call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16(
2528    <vscale x 1 x half> %0,
2529    ptr %1,
2530    <vscale x 1 x i16> %2,
2531    <vscale x 1 x i1> %3,
2532    iXLen %4)
2533
2534  ret void
2535}
2536
2537declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i16(
2538  <vscale x 2 x half>,
2539  ptr,
2540  <vscale x 2 x i16>,
2541  iXLen);
2542
2543define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
2544; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16:
2545; CHECK:       # %bb.0: # %entry
2546; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2547; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
2548; CHECK-NEXT:    ret
2549entry:
2550  call void @llvm.riscv.vsoxei.nxv2f16.nxv2i16(
2551    <vscale x 2 x half> %0,
2552    ptr %1,
2553    <vscale x 2 x i16> %2,
2554    iXLen %3)
2555
2556  ret void
2557}
2558
2559declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16(
2560  <vscale x 2 x half>,
2561  ptr,
2562  <vscale x 2 x i16>,
2563  <vscale x 2 x i1>,
2564  iXLen);
2565
2566define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2567; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
2568; CHECK:       # %bb.0: # %entry
2569; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2570; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
2571; CHECK-NEXT:    ret
2572entry:
2573  call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16(
2574    <vscale x 2 x half> %0,
2575    ptr %1,
2576    <vscale x 2 x i16> %2,
2577    <vscale x 2 x i1> %3,
2578    iXLen %4)
2579
2580  ret void
2581}
2582
2583declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i16(
2584  <vscale x 4 x half>,
2585  ptr,
2586  <vscale x 4 x i16>,
2587  iXLen);
2588
2589define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
2590; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16:
2591; CHECK:       # %bb.0: # %entry
2592; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2593; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
2594; CHECK-NEXT:    ret
2595entry:
2596  call void @llvm.riscv.vsoxei.nxv4f16.nxv4i16(
2597    <vscale x 4 x half> %0,
2598    ptr %1,
2599    <vscale x 4 x i16> %2,
2600    iXLen %3)
2601
2602  ret void
2603}
2604
2605declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16(
2606  <vscale x 4 x half>,
2607  ptr,
2608  <vscale x 4 x i16>,
2609  <vscale x 4 x i1>,
2610  iXLen);
2611
2612define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2613; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
2614; CHECK:       # %bb.0: # %entry
2615; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2616; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
2617; CHECK-NEXT:    ret
2618entry:
2619  call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16(
2620    <vscale x 4 x half> %0,
2621    ptr %1,
2622    <vscale x 4 x i16> %2,
2623    <vscale x 4 x i1> %3,
2624    iXLen %4)
2625
2626  ret void
2627}
2628
2629declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i16(
2630  <vscale x 8 x half>,
2631  ptr,
2632  <vscale x 8 x i16>,
2633  iXLen);
2634
2635define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
2636; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16:
2637; CHECK:       # %bb.0: # %entry
2638; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
2639; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
2640; CHECK-NEXT:    ret
2641entry:
2642  call void @llvm.riscv.vsoxei.nxv8f16.nxv8i16(
2643    <vscale x 8 x half> %0,
2644    ptr %1,
2645    <vscale x 8 x i16> %2,
2646    iXLen %3)
2647
2648  ret void
2649}
2650
2651declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16(
2652  <vscale x 8 x half>,
2653  ptr,
2654  <vscale x 8 x i16>,
2655  <vscale x 8 x i1>,
2656  iXLen);
2657
2658define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2659; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
2660; CHECK:       # %bb.0: # %entry
2661; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
2662; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
2663; CHECK-NEXT:    ret
2664entry:
2665  call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16(
2666    <vscale x 8 x half> %0,
2667    ptr %1,
2668    <vscale x 8 x i16> %2,
2669    <vscale x 8 x i1> %3,
2670    iXLen %4)
2671
2672  ret void
2673}
2674
2675declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i16(
2676  <vscale x 16 x half>,
2677  ptr,
2678  <vscale x 16 x i16>,
2679  iXLen);
2680
2681define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
2682; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16:
2683; CHECK:       # %bb.0: # %entry
2684; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
2685; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
2686; CHECK-NEXT:    ret
2687entry:
2688  call void @llvm.riscv.vsoxei.nxv16f16.nxv16i16(
2689    <vscale x 16 x half> %0,
2690    ptr %1,
2691    <vscale x 16 x i16> %2,
2692    iXLen %3)
2693
2694  ret void
2695}
2696
2697declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16(
2698  <vscale x 16 x half>,
2699  ptr,
2700  <vscale x 16 x i16>,
2701  <vscale x 16 x i1>,
2702  iXLen);
2703
2704define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2705; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
2706; CHECK:       # %bb.0: # %entry
2707; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
2708; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
2709; CHECK-NEXT:    ret
2710entry:
2711  call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16(
2712    <vscale x 16 x half> %0,
2713    ptr %1,
2714    <vscale x 16 x i16> %2,
2715    <vscale x 16 x i1> %3,
2716    iXLen %4)
2717
2718  ret void
2719}
2720
2721declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i16(
2722  <vscale x 32 x half>,
2723  ptr,
2724  <vscale x 32 x i16>,
2725  iXLen);
2726
2727define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
2728; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16:
2729; CHECK:       # %bb.0: # %entry
2730; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
2731; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
2732; CHECK-NEXT:    ret
2733entry:
2734  call void @llvm.riscv.vsoxei.nxv32f16.nxv32i16(
2735    <vscale x 32 x half> %0,
2736    ptr %1,
2737    <vscale x 32 x i16> %2,
2738    iXLen %3)
2739
2740  ret void
2741}
2742
2743declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16(
2744  <vscale x 32 x half>,
2745  ptr,
2746  <vscale x 32 x i16>,
2747  <vscale x 32 x i1>,
2748  iXLen);
2749
2750define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
2751; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
2752; CHECK:       # %bb.0: # %entry
2753; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
2754; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
2755; CHECK-NEXT:    ret
2756entry:
2757  call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16(
2758    <vscale x 32 x half> %0,
2759    ptr %1,
2760    <vscale x 32 x i16> %2,
2761    <vscale x 32 x i1> %3,
2762    iXLen %4)
2763
2764  ret void
2765}
2766
2767declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i16(
2768  <vscale x 1 x float>,
2769  ptr,
2770  <vscale x 1 x i16>,
2771  iXLen);
2772
2773define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
2774; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16:
2775; CHECK:       # %bb.0: # %entry
2776; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
2777; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
2778; CHECK-NEXT:    ret
2779entry:
2780  call void @llvm.riscv.vsoxei.nxv1f32.nxv1i16(
2781    <vscale x 1 x float> %0,
2782    ptr %1,
2783    <vscale x 1 x i16> %2,
2784    iXLen %3)
2785
2786  ret void
2787}
2788
2789declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16(
2790  <vscale x 1 x float>,
2791  ptr,
2792  <vscale x 1 x i16>,
2793  <vscale x 1 x i1>,
2794  iXLen);
2795
2796define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2797; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
2798; CHECK:       # %bb.0: # %entry
2799; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
2800; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
2801; CHECK-NEXT:    ret
2802entry:
2803  call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16(
2804    <vscale x 1 x float> %0,
2805    ptr %1,
2806    <vscale x 1 x i16> %2,
2807    <vscale x 1 x i1> %3,
2808    iXLen %4)
2809
2810  ret void
2811}
2812
2813declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i16(
2814  <vscale x 2 x float>,
2815  ptr,
2816  <vscale x 2 x i16>,
2817  iXLen);
2818
2819define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
2820; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16:
2821; CHECK:       # %bb.0: # %entry
2822; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
2823; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
2824; CHECK-NEXT:    ret
2825entry:
2826  call void @llvm.riscv.vsoxei.nxv2f32.nxv2i16(
2827    <vscale x 2 x float> %0,
2828    ptr %1,
2829    <vscale x 2 x i16> %2,
2830    iXLen %3)
2831
2832  ret void
2833}
2834
2835declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16(
2836  <vscale x 2 x float>,
2837  ptr,
2838  <vscale x 2 x i16>,
2839  <vscale x 2 x i1>,
2840  iXLen);
2841
2842define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2843; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
2844; CHECK:       # %bb.0: # %entry
2845; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
2846; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
2847; CHECK-NEXT:    ret
2848entry:
2849  call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16(
2850    <vscale x 2 x float> %0,
2851    ptr %1,
2852    <vscale x 2 x i16> %2,
2853    <vscale x 2 x i1> %3,
2854    iXLen %4)
2855
2856  ret void
2857}
2858
2859declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i16(
2860  <vscale x 4 x float>,
2861  ptr,
2862  <vscale x 4 x i16>,
2863  iXLen);
2864
2865define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
2866; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16:
2867; CHECK:       # %bb.0: # %entry
2868; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
2869; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
2870; CHECK-NEXT:    ret
2871entry:
2872  call void @llvm.riscv.vsoxei.nxv4f32.nxv4i16(
2873    <vscale x 4 x float> %0,
2874    ptr %1,
2875    <vscale x 4 x i16> %2,
2876    iXLen %3)
2877
2878  ret void
2879}
2880
2881declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16(
2882  <vscale x 4 x float>,
2883  ptr,
2884  <vscale x 4 x i16>,
2885  <vscale x 4 x i1>,
2886  iXLen);
2887
2888define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2889; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
2890; CHECK:       # %bb.0: # %entry
2891; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
2892; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
2893; CHECK-NEXT:    ret
2894entry:
2895  call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16(
2896    <vscale x 4 x float> %0,
2897    ptr %1,
2898    <vscale x 4 x i16> %2,
2899    <vscale x 4 x i1> %3,
2900    iXLen %4)
2901
2902  ret void
2903}
2904
2905declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i16(
2906  <vscale x 8 x float>,
2907  ptr,
2908  <vscale x 8 x i16>,
2909  iXLen);
2910
2911define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
2912; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16:
2913; CHECK:       # %bb.0: # %entry
2914; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
2915; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
2916; CHECK-NEXT:    ret
2917entry:
2918  call void @llvm.riscv.vsoxei.nxv8f32.nxv8i16(
2919    <vscale x 8 x float> %0,
2920    ptr %1,
2921    <vscale x 8 x i16> %2,
2922    iXLen %3)
2923
2924  ret void
2925}
2926
2927declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16(
2928  <vscale x 8 x float>,
2929  ptr,
2930  <vscale x 8 x i16>,
2931  <vscale x 8 x i1>,
2932  iXLen);
2933
2934define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2935; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
2936; CHECK:       # %bb.0: # %entry
2937; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
2938; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
2939; CHECK-NEXT:    ret
2940entry:
2941  call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16(
2942    <vscale x 8 x float> %0,
2943    ptr %1,
2944    <vscale x 8 x i16> %2,
2945    <vscale x 8 x i1> %3,
2946    iXLen %4)
2947
2948  ret void
2949}
2950
2951declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i16(
2952  <vscale x 16 x float>,
2953  ptr,
2954  <vscale x 16 x i16>,
2955  iXLen);
2956
2957define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
2958; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16:
2959; CHECK:       # %bb.0: # %entry
2960; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
2961; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
2962; CHECK-NEXT:    ret
2963entry:
2964  call void @llvm.riscv.vsoxei.nxv16f32.nxv16i16(
2965    <vscale x 16 x float> %0,
2966    ptr %1,
2967    <vscale x 16 x i16> %2,
2968    iXLen %3)
2969
2970  ret void
2971}
2972
2973declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16(
2974  <vscale x 16 x float>,
2975  ptr,
2976  <vscale x 16 x i16>,
2977  <vscale x 16 x i1>,
2978  iXLen);
2979
2980define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2981; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
2982; CHECK:       # %bb.0: # %entry
2983; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
2984; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
2985; CHECK-NEXT:    ret
2986entry:
2987  call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16(
2988    <vscale x 16 x float> %0,
2989    ptr %1,
2990    <vscale x 16 x i16> %2,
2991    <vscale x 16 x i1> %3,
2992    iXLen %4)
2993
2994  ret void
2995}
2996
2997declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i16(
2998  <vscale x 1 x double>,
2999  ptr,
3000  <vscale x 1 x i16>,
3001  iXLen);
3002
3003define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
3004; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16:
3005; CHECK:       # %bb.0: # %entry
3006; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
3007; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
3008; CHECK-NEXT:    ret
3009entry:
3010  call void @llvm.riscv.vsoxei.nxv1f64.nxv1i16(
3011    <vscale x 1 x double> %0,
3012    ptr %1,
3013    <vscale x 1 x i16> %2,
3014    iXLen %3)
3015
3016  ret void
3017}
3018
3019declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16(
3020  <vscale x 1 x double>,
3021  ptr,
3022  <vscale x 1 x i16>,
3023  <vscale x 1 x i1>,
3024  iXLen);
3025
3026define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3027; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
3028; CHECK:       # %bb.0: # %entry
3029; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
3030; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
3031; CHECK-NEXT:    ret
3032entry:
3033  call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16(
3034    <vscale x 1 x double> %0,
3035    ptr %1,
3036    <vscale x 1 x i16> %2,
3037    <vscale x 1 x i1> %3,
3038    iXLen %4)
3039
3040  ret void
3041}
3042
3043declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i16(
3044  <vscale x 2 x double>,
3045  ptr,
3046  <vscale x 2 x i16>,
3047  iXLen);
3048
3049define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
3050; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16:
3051; CHECK:       # %bb.0: # %entry
3052; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
3053; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
3054; CHECK-NEXT:    ret
3055entry:
3056  call void @llvm.riscv.vsoxei.nxv2f64.nxv2i16(
3057    <vscale x 2 x double> %0,
3058    ptr %1,
3059    <vscale x 2 x i16> %2,
3060    iXLen %3)
3061
3062  ret void
3063}
3064
3065declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16(
3066  <vscale x 2 x double>,
3067  ptr,
3068  <vscale x 2 x i16>,
3069  <vscale x 2 x i1>,
3070  iXLen);
3071
3072define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3073; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
3074; CHECK:       # %bb.0: # %entry
3075; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
3076; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
3077; CHECK-NEXT:    ret
3078entry:
3079  call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16(
3080    <vscale x 2 x double> %0,
3081    ptr %1,
3082    <vscale x 2 x i16> %2,
3083    <vscale x 2 x i1> %3,
3084    iXLen %4)
3085
3086  ret void
3087}
3088
3089declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i16(
3090  <vscale x 4 x double>,
3091  ptr,
3092  <vscale x 4 x i16>,
3093  iXLen);
3094
3095define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
3096; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16:
3097; CHECK:       # %bb.0: # %entry
3098; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
3099; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
3100; CHECK-NEXT:    ret
3101entry:
3102  call void @llvm.riscv.vsoxei.nxv4f64.nxv4i16(
3103    <vscale x 4 x double> %0,
3104    ptr %1,
3105    <vscale x 4 x i16> %2,
3106    iXLen %3)
3107
3108  ret void
3109}
3110
3111declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16(
3112  <vscale x 4 x double>,
3113  ptr,
3114  <vscale x 4 x i16>,
3115  <vscale x 4 x i1>,
3116  iXLen);
3117
3118define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3119; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
3120; CHECK:       # %bb.0: # %entry
3121; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
3122; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
3123; CHECK-NEXT:    ret
3124entry:
3125  call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16(
3126    <vscale x 4 x double> %0,
3127    ptr %1,
3128    <vscale x 4 x i16> %2,
3129    <vscale x 4 x i1> %3,
3130    iXLen %4)
3131
3132  ret void
3133}
3134
3135declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i16(
3136  <vscale x 8 x double>,
3137  ptr,
3138  <vscale x 8 x i16>,
3139  iXLen);
3140
3141define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
3142; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16:
3143; CHECK:       # %bb.0: # %entry
3144; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
3145; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
3146; CHECK-NEXT:    ret
3147entry:
3148  call void @llvm.riscv.vsoxei.nxv8f64.nxv8i16(
3149    <vscale x 8 x double> %0,
3150    ptr %1,
3151    <vscale x 8 x i16> %2,
3152    iXLen %3)
3153
3154  ret void
3155}
3156
3157declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16(
3158  <vscale x 8 x double>,
3159  ptr,
3160  <vscale x 8 x i16>,
3161  <vscale x 8 x i1>,
3162  iXLen);
3163
3164define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3165; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
3166; CHECK:       # %bb.0: # %entry
3167; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
3168; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
3169; CHECK-NEXT:    ret
3170entry:
3171  call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16(
3172    <vscale x 8 x double> %0,
3173    ptr %1,
3174    <vscale x 8 x i16> %2,
3175    <vscale x 8 x i1> %3,
3176    iXLen %4)
3177
3178  ret void
3179}
3180
3181declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i8(
3182  <vscale x 1 x i8>,
3183  ptr,
3184  <vscale x 1 x i8>,
3185  iXLen);
3186
3187define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
3188; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8:
3189; CHECK:       # %bb.0: # %entry
3190; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
3191; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
3192; CHECK-NEXT:    ret
3193entry:
3194  call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8(
3195    <vscale x 1 x i8> %0,
3196    ptr %1,
3197    <vscale x 1 x i8> %2,
3198    iXLen %3)
3199
3200  ret void
3201}
3202
3203declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8(
3204  <vscale x 1 x i8>,
3205  ptr,
3206  <vscale x 1 x i8>,
3207  <vscale x 1 x i1>,
3208  iXLen);
3209
3210define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3211; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
3212; CHECK:       # %bb.0: # %entry
3213; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
3214; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
3215; CHECK-NEXT:    ret
3216entry:
3217  call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8(
3218    <vscale x 1 x i8> %0,
3219    ptr %1,
3220    <vscale x 1 x i8> %2,
3221    <vscale x 1 x i1> %3,
3222    iXLen %4)
3223
3224  ret void
3225}
3226
3227declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i8(
3228  <vscale x 2 x i8>,
3229  ptr,
3230  <vscale x 2 x i8>,
3231  iXLen);
3232
3233define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
3234; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8:
3235; CHECK:       # %bb.0: # %entry
3236; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
3237; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
3238; CHECK-NEXT:    ret
3239entry:
3240  call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8(
3241    <vscale x 2 x i8> %0,
3242    ptr %1,
3243    <vscale x 2 x i8> %2,
3244    iXLen %3)
3245
3246  ret void
3247}
3248
3249declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8(
3250  <vscale x 2 x i8>,
3251  ptr,
3252  <vscale x 2 x i8>,
3253  <vscale x 2 x i1>,
3254  iXLen);
3255
3256define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3257; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
3258; CHECK:       # %bb.0: # %entry
3259; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
3260; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
3261; CHECK-NEXT:    ret
3262entry:
3263  call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8(
3264    <vscale x 2 x i8> %0,
3265    ptr %1,
3266    <vscale x 2 x i8> %2,
3267    <vscale x 2 x i1> %3,
3268    iXLen %4)
3269
3270  ret void
3271}
3272
3273declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i8(
3274  <vscale x 4 x i8>,
3275  ptr,
3276  <vscale x 4 x i8>,
3277  iXLen);
3278
3279define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
3280; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8:
3281; CHECK:       # %bb.0: # %entry
3282; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
3283; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
3284; CHECK-NEXT:    ret
3285entry:
3286  call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8(
3287    <vscale x 4 x i8> %0,
3288    ptr %1,
3289    <vscale x 4 x i8> %2,
3290    iXLen %3)
3291
3292  ret void
3293}
3294
3295declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8(
3296  <vscale x 4 x i8>,
3297  ptr,
3298  <vscale x 4 x i8>,
3299  <vscale x 4 x i1>,
3300  iXLen);
3301
3302define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3303; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
3304; CHECK:       # %bb.0: # %entry
3305; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
3306; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
3307; CHECK-NEXT:    ret
3308entry:
3309  call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8(
3310    <vscale x 4 x i8> %0,
3311    ptr %1,
3312    <vscale x 4 x i8> %2,
3313    <vscale x 4 x i1> %3,
3314    iXLen %4)
3315
3316  ret void
3317}
3318
3319declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i8(
3320  <vscale x 8 x i8>,
3321  ptr,
3322  <vscale x 8 x i8>,
3323  iXLen);
3324
3325define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
3326; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8:
3327; CHECK:       # %bb.0: # %entry
3328; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
3329; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
3330; CHECK-NEXT:    ret
3331entry:
3332  call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8(
3333    <vscale x 8 x i8> %0,
3334    ptr %1,
3335    <vscale x 8 x i8> %2,
3336    iXLen %3)
3337
3338  ret void
3339}
3340
3341declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8(
3342  <vscale x 8 x i8>,
3343  ptr,
3344  <vscale x 8 x i8>,
3345  <vscale x 8 x i1>,
3346  iXLen);
3347
3348define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3349; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
3350; CHECK:       # %bb.0: # %entry
3351; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
3352; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
3353; CHECK-NEXT:    ret
3354entry:
3355  call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8(
3356    <vscale x 8 x i8> %0,
3357    ptr %1,
3358    <vscale x 8 x i8> %2,
3359    <vscale x 8 x i1> %3,
3360    iXLen %4)
3361
3362  ret void
3363}
3364
3365declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i8(
3366  <vscale x 16 x i8>,
3367  ptr,
3368  <vscale x 16 x i8>,
3369  iXLen);
3370
3371define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
3372; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8:
3373; CHECK:       # %bb.0: # %entry
3374; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
3375; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
3376; CHECK-NEXT:    ret
3377entry:
3378  call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8(
3379    <vscale x 16 x i8> %0,
3380    ptr %1,
3381    <vscale x 16 x i8> %2,
3382    iXLen %3)
3383
3384  ret void
3385}
3386
3387declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8(
3388  <vscale x 16 x i8>,
3389  ptr,
3390  <vscale x 16 x i8>,
3391  <vscale x 16 x i1>,
3392  iXLen);
3393
3394define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
3395; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
3396; CHECK:       # %bb.0: # %entry
3397; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
3398; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
3399; CHECK-NEXT:    ret
3400entry:
3401  call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8(
3402    <vscale x 16 x i8> %0,
3403    ptr %1,
3404    <vscale x 16 x i8> %2,
3405    <vscale x 16 x i1> %3,
3406    iXLen %4)
3407
3408  ret void
3409}
3410
3411declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i8(
3412  <vscale x 32 x i8>,
3413  ptr,
3414  <vscale x 32 x i8>,
3415  iXLen);
3416
3417define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
3418; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8:
3419; CHECK:       # %bb.0: # %entry
3420; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
3421; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
3422; CHECK-NEXT:    ret
3423entry:
3424  call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8(
3425    <vscale x 32 x i8> %0,
3426    ptr %1,
3427    <vscale x 32 x i8> %2,
3428    iXLen %3)
3429
3430  ret void
3431}
3432
3433declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8(
3434  <vscale x 32 x i8>,
3435  ptr,
3436  <vscale x 32 x i8>,
3437  <vscale x 32 x i1>,
3438  iXLen);
3439
3440define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
3441; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
3442; CHECK:       # %bb.0: # %entry
3443; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
3444; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
3445; CHECK-NEXT:    ret
3446entry:
3447  call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8(
3448    <vscale x 32 x i8> %0,
3449    ptr %1,
3450    <vscale x 32 x i8> %2,
3451    <vscale x 32 x i1> %3,
3452    iXLen %4)
3453
3454  ret void
3455}
3456
3457declare void @llvm.riscv.vsoxei.nxv64i8.nxv64i8(
3458  <vscale x 64 x i8>,
3459  ptr,
3460  <vscale x 64 x i8>,
3461  iXLen);
3462
3463define void @intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i8> %2, iXLen %3) nounwind {
3464; CHECK-LABEL: intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8:
3465; CHECK:       # %bb.0: # %entry
3466; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
3467; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
3468; CHECK-NEXT:    ret
3469entry:
3470  call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8(
3471    <vscale x 64 x i8> %0,
3472    ptr %1,
3473    <vscale x 64 x i8> %2,
3474    iXLen %3)
3475
3476  ret void
3477}
3478
3479declare void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8(
3480  <vscale x 64 x i8>,
3481  ptr,
3482  <vscale x 64 x i8>,
3483  <vscale x 64 x i1>,
3484  iXLen);
3485
3486define void @intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
3487; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
3488; CHECK:       # %bb.0: # %entry
3489; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
3490; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
3491; CHECK-NEXT:    ret
3492entry:
3493  call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8(
3494    <vscale x 64 x i8> %0,
3495    ptr %1,
3496    <vscale x 64 x i8> %2,
3497    <vscale x 64 x i1> %3,
3498    iXLen %4)
3499
3500  ret void
3501}
3502
3503declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i8(
3504  <vscale x 1 x i16>,
3505  ptr,
3506  <vscale x 1 x i8>,
3507  iXLen);
3508
3509define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
3510; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8:
3511; CHECK:       # %bb.0: # %entry
3512; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
3513; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
3514; CHECK-NEXT:    ret
3515entry:
3516  call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8(
3517    <vscale x 1 x i16> %0,
3518    ptr %1,
3519    <vscale x 1 x i8> %2,
3520    iXLen %3)
3521
3522  ret void
3523}
3524
3525declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8(
3526  <vscale x 1 x i16>,
3527  ptr,
3528  <vscale x 1 x i8>,
3529  <vscale x 1 x i1>,
3530  iXLen);
3531
3532define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3533; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
3534; CHECK:       # %bb.0: # %entry
3535; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
3536; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
3537; CHECK-NEXT:    ret
3538entry:
3539  call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8(
3540    <vscale x 1 x i16> %0,
3541    ptr %1,
3542    <vscale x 1 x i8> %2,
3543    <vscale x 1 x i1> %3,
3544    iXLen %4)
3545
3546  ret void
3547}
3548
3549declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i8(
3550  <vscale x 2 x i16>,
3551  ptr,
3552  <vscale x 2 x i8>,
3553  iXLen);
3554
3555define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
3556; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8:
3557; CHECK:       # %bb.0: # %entry
3558; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
3559; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
3560; CHECK-NEXT:    ret
3561entry:
3562  call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8(
3563    <vscale x 2 x i16> %0,
3564    ptr %1,
3565    <vscale x 2 x i8> %2,
3566    iXLen %3)
3567
3568  ret void
3569}
3570
3571declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8(
3572  <vscale x 2 x i16>,
3573  ptr,
3574  <vscale x 2 x i8>,
3575  <vscale x 2 x i1>,
3576  iXLen);
3577
3578define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3579; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
3580; CHECK:       # %bb.0: # %entry
3581; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
3582; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
3583; CHECK-NEXT:    ret
3584entry:
3585  call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8(
3586    <vscale x 2 x i16> %0,
3587    ptr %1,
3588    <vscale x 2 x i8> %2,
3589    <vscale x 2 x i1> %3,
3590    iXLen %4)
3591
3592  ret void
3593}
3594
3595declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i8(
3596  <vscale x 4 x i16>,
3597  ptr,
3598  <vscale x 4 x i8>,
3599  iXLen);
3600
3601define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
3602; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8:
3603; CHECK:       # %bb.0: # %entry
3604; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
3605; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
3606; CHECK-NEXT:    ret
3607entry:
3608  call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8(
3609    <vscale x 4 x i16> %0,
3610    ptr %1,
3611    <vscale x 4 x i8> %2,
3612    iXLen %3)
3613
3614  ret void
3615}
3616
3617declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8(
3618  <vscale x 4 x i16>,
3619  ptr,
3620  <vscale x 4 x i8>,
3621  <vscale x 4 x i1>,
3622  iXLen);
3623
3624define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3625; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
3626; CHECK:       # %bb.0: # %entry
3627; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
3628; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
3629; CHECK-NEXT:    ret
3630entry:
3631  call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8(
3632    <vscale x 4 x i16> %0,
3633    ptr %1,
3634    <vscale x 4 x i8> %2,
3635    <vscale x 4 x i1> %3,
3636    iXLen %4)
3637
3638  ret void
3639}
3640
3641declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i8(
3642  <vscale x 8 x i16>,
3643  ptr,
3644  <vscale x 8 x i8>,
3645  iXLen);
3646
3647define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
3648; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8:
3649; CHECK:       # %bb.0: # %entry
3650; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
3651; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
3652; CHECK-NEXT:    ret
3653entry:
3654  call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8(
3655    <vscale x 8 x i16> %0,
3656    ptr %1,
3657    <vscale x 8 x i8> %2,
3658    iXLen %3)
3659
3660  ret void
3661}
3662
3663declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8(
3664  <vscale x 8 x i16>,
3665  ptr,
3666  <vscale x 8 x i8>,
3667  <vscale x 8 x i1>,
3668  iXLen);
3669
3670define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3671; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
3672; CHECK:       # %bb.0: # %entry
3673; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
3674; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
3675; CHECK-NEXT:    ret
3676entry:
3677  call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8(
3678    <vscale x 8 x i16> %0,
3679    ptr %1,
3680    <vscale x 8 x i8> %2,
3681    <vscale x 8 x i1> %3,
3682    iXLen %4)
3683
3684  ret void
3685}
3686
3687declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i8(
3688  <vscale x 16 x i16>,
3689  ptr,
3690  <vscale x 16 x i8>,
3691  iXLen);
3692
3693define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
3694; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8:
3695; CHECK:       # %bb.0: # %entry
3696; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
3697; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
3698; CHECK-NEXT:    ret
3699entry:
3700  call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8(
3701    <vscale x 16 x i16> %0,
3702    ptr %1,
3703    <vscale x 16 x i8> %2,
3704    iXLen %3)
3705
3706  ret void
3707}
3708
3709declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8(
3710  <vscale x 16 x i16>,
3711  ptr,
3712  <vscale x 16 x i8>,
3713  <vscale x 16 x i1>,
3714  iXLen);
3715
3716define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
3717; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
3718; CHECK:       # %bb.0: # %entry
3719; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
3720; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
3721; CHECK-NEXT:    ret
3722entry:
3723  call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8(
3724    <vscale x 16 x i16> %0,
3725    ptr %1,
3726    <vscale x 16 x i8> %2,
3727    <vscale x 16 x i1> %3,
3728    iXLen %4)
3729
3730  ret void
3731}
3732
3733declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i8(
3734  <vscale x 32 x i16>,
3735  ptr,
3736  <vscale x 32 x i8>,
3737  iXLen);
3738
3739define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
3740; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8:
3741; CHECK:       # %bb.0: # %entry
3742; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
3743; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
3744; CHECK-NEXT:    ret
3745entry:
3746  call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8(
3747    <vscale x 32 x i16> %0,
3748    ptr %1,
3749    <vscale x 32 x i8> %2,
3750    iXLen %3)
3751
3752  ret void
3753}
3754
3755declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8(
3756  <vscale x 32 x i16>,
3757  ptr,
3758  <vscale x 32 x i8>,
3759  <vscale x 32 x i1>,
3760  iXLen);
3761
3762define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
3763; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
3764; CHECK:       # %bb.0: # %entry
3765; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
3766; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
3767; CHECK-NEXT:    ret
3768entry:
3769  call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8(
3770    <vscale x 32 x i16> %0,
3771    ptr %1,
3772    <vscale x 32 x i8> %2,
3773    <vscale x 32 x i1> %3,
3774    iXLen %4)
3775
3776  ret void
3777}
3778
3779declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i8(
3780  <vscale x 1 x i32>,
3781  ptr,
3782  <vscale x 1 x i8>,
3783  iXLen);
3784
3785define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
3786; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8:
3787; CHECK:       # %bb.0: # %entry
3788; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
3789; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
3790; CHECK-NEXT:    ret
3791entry:
3792  call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8(
3793    <vscale x 1 x i32> %0,
3794    ptr %1,
3795    <vscale x 1 x i8> %2,
3796    iXLen %3)
3797
3798  ret void
3799}
3800
3801declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8(
3802  <vscale x 1 x i32>,
3803  ptr,
3804  <vscale x 1 x i8>,
3805  <vscale x 1 x i1>,
3806  iXLen);
3807
3808define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3809; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
3810; CHECK:       # %bb.0: # %entry
3811; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
3812; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
3813; CHECK-NEXT:    ret
3814entry:
3815  call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8(
3816    <vscale x 1 x i32> %0,
3817    ptr %1,
3818    <vscale x 1 x i8> %2,
3819    <vscale x 1 x i1> %3,
3820    iXLen %4)
3821
3822  ret void
3823}
3824
3825declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i8(
3826  <vscale x 2 x i32>,
3827  ptr,
3828  <vscale x 2 x i8>,
3829  iXLen);
3830
3831define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
3832; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8:
3833; CHECK:       # %bb.0: # %entry
3834; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
3835; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
3836; CHECK-NEXT:    ret
3837entry:
3838  call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8(
3839    <vscale x 2 x i32> %0,
3840    ptr %1,
3841    <vscale x 2 x i8> %2,
3842    iXLen %3)
3843
3844  ret void
3845}
3846
3847declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8(
3848  <vscale x 2 x i32>,
3849  ptr,
3850  <vscale x 2 x i8>,
3851  <vscale x 2 x i1>,
3852  iXLen);
3853
3854define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3855; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
3856; CHECK:       # %bb.0: # %entry
3857; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
3858; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
3859; CHECK-NEXT:    ret
3860entry:
3861  call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8(
3862    <vscale x 2 x i32> %0,
3863    ptr %1,
3864    <vscale x 2 x i8> %2,
3865    <vscale x 2 x i1> %3,
3866    iXLen %4)
3867
3868  ret void
3869}
3870
3871declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i8(
3872  <vscale x 4 x i32>,
3873  ptr,
3874  <vscale x 4 x i8>,
3875  iXLen);
3876
3877define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
3878; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8:
3879; CHECK:       # %bb.0: # %entry
3880; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
3881; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
3882; CHECK-NEXT:    ret
3883entry:
3884  call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8(
3885    <vscale x 4 x i32> %0,
3886    ptr %1,
3887    <vscale x 4 x i8> %2,
3888    iXLen %3)
3889
3890  ret void
3891}
3892
3893declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8(
3894  <vscale x 4 x i32>,
3895  ptr,
3896  <vscale x 4 x i8>,
3897  <vscale x 4 x i1>,
3898  iXLen);
3899
3900define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3901; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
3902; CHECK:       # %bb.0: # %entry
3903; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
3904; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
3905; CHECK-NEXT:    ret
3906entry:
3907  call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8(
3908    <vscale x 4 x i32> %0,
3909    ptr %1,
3910    <vscale x 4 x i8> %2,
3911    <vscale x 4 x i1> %3,
3912    iXLen %4)
3913
3914  ret void
3915}
3916
3917declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i8(
3918  <vscale x 8 x i32>,
3919  ptr,
3920  <vscale x 8 x i8>,
3921  iXLen);
3922
3923define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
3924; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8:
3925; CHECK:       # %bb.0: # %entry
3926; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
3927; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
3928; CHECK-NEXT:    ret
3929entry:
3930  call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8(
3931    <vscale x 8 x i32> %0,
3932    ptr %1,
3933    <vscale x 8 x i8> %2,
3934    iXLen %3)
3935
3936  ret void
3937}
3938
3939declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8(
3940  <vscale x 8 x i32>,
3941  ptr,
3942  <vscale x 8 x i8>,
3943  <vscale x 8 x i1>,
3944  iXLen);
3945
3946define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3947; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
3948; CHECK:       # %bb.0: # %entry
3949; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
3950; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
3951; CHECK-NEXT:    ret
3952entry:
3953  call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8(
3954    <vscale x 8 x i32> %0,
3955    ptr %1,
3956    <vscale x 8 x i8> %2,
3957    <vscale x 8 x i1> %3,
3958    iXLen %4)
3959
3960  ret void
3961}
3962
3963declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i8(
3964  <vscale x 16 x i32>,
3965  ptr,
3966  <vscale x 16 x i8>,
3967  iXLen);
3968
3969define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
3970; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8:
3971; CHECK:       # %bb.0: # %entry
3972; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
3973; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
3974; CHECK-NEXT:    ret
3975entry:
3976  call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8(
3977    <vscale x 16 x i32> %0,
3978    ptr %1,
3979    <vscale x 16 x i8> %2,
3980    iXLen %3)
3981
3982  ret void
3983}
3984
3985declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8(
3986  <vscale x 16 x i32>,
3987  ptr,
3988  <vscale x 16 x i8>,
3989  <vscale x 16 x i1>,
3990  iXLen);
3991
3992define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
3993; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
3994; CHECK:       # %bb.0: # %entry
3995; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
3996; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
3997; CHECK-NEXT:    ret
3998entry:
3999  call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8(
4000    <vscale x 16 x i32> %0,
4001    ptr %1,
4002    <vscale x 16 x i8> %2,
4003    <vscale x 16 x i1> %3,
4004    iXLen %4)
4005
4006  ret void
4007}
4008
4009declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i8(
4010  <vscale x 1 x i64>,
4011  ptr,
4012  <vscale x 1 x i8>,
4013  iXLen);
4014
4015define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
4016; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8:
4017; CHECK:       # %bb.0: # %entry
4018; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
4019; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
4020; CHECK-NEXT:    ret
4021entry:
4022  call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8(
4023    <vscale x 1 x i64> %0,
4024    ptr %1,
4025    <vscale x 1 x i8> %2,
4026    iXLen %3)
4027
4028  ret void
4029}
4030
4031declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8(
4032  <vscale x 1 x i64>,
4033  ptr,
4034  <vscale x 1 x i8>,
4035  <vscale x 1 x i1>,
4036  iXLen);
4037
4038define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4039; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
4040; CHECK:       # %bb.0: # %entry
4041; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
4042; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
4043; CHECK-NEXT:    ret
4044entry:
4045  call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8(
4046    <vscale x 1 x i64> %0,
4047    ptr %1,
4048    <vscale x 1 x i8> %2,
4049    <vscale x 1 x i1> %3,
4050    iXLen %4)
4051
4052  ret void
4053}
4054
4055declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i8(
4056  <vscale x 2 x i64>,
4057  ptr,
4058  <vscale x 2 x i8>,
4059  iXLen);
4060
4061define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
4062; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8:
4063; CHECK:       # %bb.0: # %entry
4064; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
4065; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
4066; CHECK-NEXT:    ret
4067entry:
4068  call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8(
4069    <vscale x 2 x i64> %0,
4070    ptr %1,
4071    <vscale x 2 x i8> %2,
4072    iXLen %3)
4073
4074  ret void
4075}
4076
4077declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8(
4078  <vscale x 2 x i64>,
4079  ptr,
4080  <vscale x 2 x i8>,
4081  <vscale x 2 x i1>,
4082  iXLen);
4083
4084define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4085; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
4086; CHECK:       # %bb.0: # %entry
4087; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
4088; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
4089; CHECK-NEXT:    ret
4090entry:
4091  call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8(
4092    <vscale x 2 x i64> %0,
4093    ptr %1,
4094    <vscale x 2 x i8> %2,
4095    <vscale x 2 x i1> %3,
4096    iXLen %4)
4097
4098  ret void
4099}
4100
4101declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i8(
4102  <vscale x 4 x i64>,
4103  ptr,
4104  <vscale x 4 x i8>,
4105  iXLen);
4106
4107define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
4108; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8:
4109; CHECK:       # %bb.0: # %entry
4110; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
4111; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
4112; CHECK-NEXT:    ret
4113entry:
4114  call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8(
4115    <vscale x 4 x i64> %0,
4116    ptr %1,
4117    <vscale x 4 x i8> %2,
4118    iXLen %3)
4119
4120  ret void
4121}
4122
4123declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8(
4124  <vscale x 4 x i64>,
4125  ptr,
4126  <vscale x 4 x i8>,
4127  <vscale x 4 x i1>,
4128  iXLen);
4129
4130define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
4131; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
4132; CHECK:       # %bb.0: # %entry
4133; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
4134; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
4135; CHECK-NEXT:    ret
4136entry:
4137  call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8(
4138    <vscale x 4 x i64> %0,
4139    ptr %1,
4140    <vscale x 4 x i8> %2,
4141    <vscale x 4 x i1> %3,
4142    iXLen %4)
4143
4144  ret void
4145}
4146
4147declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i8(
4148  <vscale x 8 x i64>,
4149  ptr,
4150  <vscale x 8 x i8>,
4151  iXLen);
4152
4153define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
4154; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8:
4155; CHECK:       # %bb.0: # %entry
4156; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
4157; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
4158; CHECK-NEXT:    ret
4159entry:
4160  call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8(
4161    <vscale x 8 x i64> %0,
4162    ptr %1,
4163    <vscale x 8 x i8> %2,
4164    iXLen %3)
4165
4166  ret void
4167}
4168
4169declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8(
4170  <vscale x 8 x i64>,
4171  ptr,
4172  <vscale x 8 x i8>,
4173  <vscale x 8 x i1>,
4174  iXLen);
4175
4176define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
4177; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
4178; CHECK:       # %bb.0: # %entry
4179; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
4180; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
4181; CHECK-NEXT:    ret
4182entry:
4183  call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8(
4184    <vscale x 8 x i64> %0,
4185    ptr %1,
4186    <vscale x 8 x i8> %2,
4187    <vscale x 8 x i1> %3,
4188    iXLen %4)
4189
4190  ret void
4191}
4192
4193declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i8(
4194  <vscale x 1 x half>,
4195  ptr,
4196  <vscale x 1 x i8>,
4197  iXLen);
4198
4199define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
4200; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8:
4201; CHECK:       # %bb.0: # %entry
4202; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
4203; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
4204; CHECK-NEXT:    ret
4205entry:
4206  call void @llvm.riscv.vsoxei.nxv1f16.nxv1i8(
4207    <vscale x 1 x half> %0,
4208    ptr %1,
4209    <vscale x 1 x i8> %2,
4210    iXLen %3)
4211
4212  ret void
4213}
4214
4215declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8(
4216  <vscale x 1 x half>,
4217  ptr,
4218  <vscale x 1 x i8>,
4219  <vscale x 1 x i1>,
4220  iXLen);
4221
4222define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4223; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
4224; CHECK:       # %bb.0: # %entry
4225; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
4226; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
4227; CHECK-NEXT:    ret
4228entry:
4229  call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8(
4230    <vscale x 1 x half> %0,
4231    ptr %1,
4232    <vscale x 1 x i8> %2,
4233    <vscale x 1 x i1> %3,
4234    iXLen %4)
4235
4236  ret void
4237}
4238
4239declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i8(
4240  <vscale x 2 x half>,
4241  ptr,
4242  <vscale x 2 x i8>,
4243  iXLen);
4244
4245define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
4246; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8:
4247; CHECK:       # %bb.0: # %entry
4248; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
4249; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
4250; CHECK-NEXT:    ret
4251entry:
4252  call void @llvm.riscv.vsoxei.nxv2f16.nxv2i8(
4253    <vscale x 2 x half> %0,
4254    ptr %1,
4255    <vscale x 2 x i8> %2,
4256    iXLen %3)
4257
4258  ret void
4259}
4260
4261declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8(
4262  <vscale x 2 x half>,
4263  ptr,
4264  <vscale x 2 x i8>,
4265  <vscale x 2 x i1>,
4266  iXLen);
4267
4268define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4269; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
4270; CHECK:       # %bb.0: # %entry
4271; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
4272; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
4273; CHECK-NEXT:    ret
4274entry:
4275  call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8(
4276    <vscale x 2 x half> %0,
4277    ptr %1,
4278    <vscale x 2 x i8> %2,
4279    <vscale x 2 x i1> %3,
4280    iXLen %4)
4281
4282  ret void
4283}
4284
4285declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i8(
4286  <vscale x 4 x half>,
4287  ptr,
4288  <vscale x 4 x i8>,
4289  iXLen);
4290
4291define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
4292; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8:
4293; CHECK:       # %bb.0: # %entry
4294; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
4295; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
4296; CHECK-NEXT:    ret
4297entry:
4298  call void @llvm.riscv.vsoxei.nxv4f16.nxv4i8(
4299    <vscale x 4 x half> %0,
4300    ptr %1,
4301    <vscale x 4 x i8> %2,
4302    iXLen %3)
4303
4304  ret void
4305}
4306
4307declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8(
4308  <vscale x 4 x half>,
4309  ptr,
4310  <vscale x 4 x i8>,
4311  <vscale x 4 x i1>,
4312  iXLen);
4313
4314define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
4315; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
4316; CHECK:       # %bb.0: # %entry
4317; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
4318; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
4319; CHECK-NEXT:    ret
4320entry:
4321  call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8(
4322    <vscale x 4 x half> %0,
4323    ptr %1,
4324    <vscale x 4 x i8> %2,
4325    <vscale x 4 x i1> %3,
4326    iXLen %4)
4327
4328  ret void
4329}
4330
4331declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i8(
4332  <vscale x 8 x half>,
4333  ptr,
4334  <vscale x 8 x i8>,
4335  iXLen);
4336
4337define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
4338; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8:
4339; CHECK:       # %bb.0: # %entry
4340; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
4341; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
4342; CHECK-NEXT:    ret
4343entry:
4344  call void @llvm.riscv.vsoxei.nxv8f16.nxv8i8(
4345    <vscale x 8 x half> %0,
4346    ptr %1,
4347    <vscale x 8 x i8> %2,
4348    iXLen %3)
4349
4350  ret void
4351}
4352
4353declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8(
4354  <vscale x 8 x half>,
4355  ptr,
4356  <vscale x 8 x i8>,
4357  <vscale x 8 x i1>,
4358  iXLen);
4359
4360define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
4361; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
4362; CHECK:       # %bb.0: # %entry
4363; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
4364; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
4365; CHECK-NEXT:    ret
4366entry:
4367  call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8(
4368    <vscale x 8 x half> %0,
4369    ptr %1,
4370    <vscale x 8 x i8> %2,
4371    <vscale x 8 x i1> %3,
4372    iXLen %4)
4373
4374  ret void
4375}
4376
4377declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i8(
4378  <vscale x 16 x half>,
4379  ptr,
4380  <vscale x 16 x i8>,
4381  iXLen);
4382
4383define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
4384; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8:
4385; CHECK:       # %bb.0: # %entry
4386; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
4387; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
4388; CHECK-NEXT:    ret
4389entry:
4390  call void @llvm.riscv.vsoxei.nxv16f16.nxv16i8(
4391    <vscale x 16 x half> %0,
4392    ptr %1,
4393    <vscale x 16 x i8> %2,
4394    iXLen %3)
4395
4396  ret void
4397}
4398
4399declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8(
4400  <vscale x 16 x half>,
4401  ptr,
4402  <vscale x 16 x i8>,
4403  <vscale x 16 x i1>,
4404  iXLen);
4405
4406define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
4407; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
4408; CHECK:       # %bb.0: # %entry
4409; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
4410; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
4411; CHECK-NEXT:    ret
4412entry:
4413  call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8(
4414    <vscale x 16 x half> %0,
4415    ptr %1,
4416    <vscale x 16 x i8> %2,
4417    <vscale x 16 x i1> %3,
4418    iXLen %4)
4419
4420  ret void
4421}
4422
4423declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i8(
4424  <vscale x 32 x half>,
4425  ptr,
4426  <vscale x 32 x i8>,
4427  iXLen);
4428
4429define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
4430; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8:
4431; CHECK:       # %bb.0: # %entry
4432; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
4433; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
4434; CHECK-NEXT:    ret
4435entry:
4436  call void @llvm.riscv.vsoxei.nxv32f16.nxv32i8(
4437    <vscale x 32 x half> %0,
4438    ptr %1,
4439    <vscale x 32 x i8> %2,
4440    iXLen %3)
4441
4442  ret void
4443}
4444
4445declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8(
4446  <vscale x 32 x half>,
4447  ptr,
4448  <vscale x 32 x i8>,
4449  <vscale x 32 x i1>,
4450  iXLen);
4451
4452define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
4453; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
4454; CHECK:       # %bb.0: # %entry
4455; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
4456; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
4457; CHECK-NEXT:    ret
4458entry:
4459  call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8(
4460    <vscale x 32 x half> %0,
4461    ptr %1,
4462    <vscale x 32 x i8> %2,
4463    <vscale x 32 x i1> %3,
4464    iXLen %4)
4465
4466  ret void
4467}
4468
4469declare void @llvm.riscv.vsoxei.nxv1bf16.nxv1i32(
4470  <vscale x 1 x bfloat>,
4471  ptr,
4472  <vscale x 1 x i32>,
4473  iXLen);
4474
4475define void @intrinsic_vsoxei_v_nxv1bf16_nxv1bf16_nxv1i32(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
4476; CHECK-LABEL: intrinsic_vsoxei_v_nxv1bf16_nxv1bf16_nxv1i32:
4477; CHECK:       # %bb.0: # %entry
4478; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
4479; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
4480; CHECK-NEXT:    ret
4481entry:
4482  call void @llvm.riscv.vsoxei.nxv1bf16.nxv1i32(
4483    <vscale x 1 x bfloat> %0,
4484    ptr %1,
4485    <vscale x 1 x i32> %2,
4486    iXLen %3)
4487
4488  ret void
4489}
4490
4491declare void @llvm.riscv.vsoxei.mask.nxv1bf16.nxv1i32(
4492  <vscale x 1 x bfloat>,
4493  ptr,
4494  <vscale x 1 x i32>,
4495  <vscale x 1 x i1>,
4496  iXLen);
4497
4498define void @intrinsic_vsoxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4499; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1bf16_nxv1bf16_nxv1i32:
4500; CHECK:       # %bb.0: # %entry
4501; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
4502; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
4503; CHECK-NEXT:    ret
4504entry:
4505  call void @llvm.riscv.vsoxei.mask.nxv1bf16.nxv1i32(
4506    <vscale x 1 x bfloat> %0,
4507    ptr %1,
4508    <vscale x 1 x i32> %2,
4509    <vscale x 1 x i1> %3,
4510    iXLen %4)
4511
4512  ret void
4513}
4514
4515declare void @llvm.riscv.vsoxei.nxv2bf16.nxv2i32(
4516  <vscale x 2 x bfloat>,
4517  ptr,
4518  <vscale x 2 x i32>,
4519  iXLen);
4520
4521define void @intrinsic_vsoxei_v_nxv2bf16_nxv2bf16_nxv2i32(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
4522; CHECK-LABEL: intrinsic_vsoxei_v_nxv2bf16_nxv2bf16_nxv2i32:
4523; CHECK:       # %bb.0: # %entry
4524; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
4525; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
4526; CHECK-NEXT:    ret
4527entry:
4528  call void @llvm.riscv.vsoxei.nxv2bf16.nxv2i32(
4529    <vscale x 2 x bfloat> %0,
4530    ptr %1,
4531    <vscale x 2 x i32> %2,
4532    iXLen %3)
4533
4534  ret void
4535}
4536
4537declare void @llvm.riscv.vsoxei.mask.nxv2bf16.nxv2i32(
4538  <vscale x 2 x bfloat>,
4539  ptr,
4540  <vscale x 2 x i32>,
4541  <vscale x 2 x i1>,
4542  iXLen);
4543
4544define void @intrinsic_vsoxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4545; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2bf16_nxv2bf16_nxv2i32:
4546; CHECK:       # %bb.0: # %entry
4547; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
4548; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
4549; CHECK-NEXT:    ret
4550entry:
4551  call void @llvm.riscv.vsoxei.mask.nxv2bf16.nxv2i32(
4552    <vscale x 2 x bfloat> %0,
4553    ptr %1,
4554    <vscale x 2 x i32> %2,
4555    <vscale x 2 x i1> %3,
4556    iXLen %4)
4557
4558  ret void
4559}
4560
4561declare void @llvm.riscv.vsoxei.nxv4bf16.nxv4i32(
4562  <vscale x 4 x bfloat>,
4563  ptr,
4564  <vscale x 4 x i32>,
4565  iXLen);
4566
4567define void @intrinsic_vsoxei_v_nxv4bf16_nxv4bf16_nxv4i32(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
4568; CHECK-LABEL: intrinsic_vsoxei_v_nxv4bf16_nxv4bf16_nxv4i32:
4569; CHECK:       # %bb.0: # %entry
4570; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
4571; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
4572; CHECK-NEXT:    ret
4573entry:
4574  call void @llvm.riscv.vsoxei.nxv4bf16.nxv4i32(
4575    <vscale x 4 x bfloat> %0,
4576    ptr %1,
4577    <vscale x 4 x i32> %2,
4578    iXLen %3)
4579
4580  ret void
4581}
4582
4583declare void @llvm.riscv.vsoxei.mask.nxv4bf16.nxv4i32(
4584  <vscale x 4 x bfloat>,
4585  ptr,
4586  <vscale x 4 x i32>,
4587  <vscale x 4 x i1>,
4588  iXLen);
4589
4590define void @intrinsic_vsoxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
4591; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4bf16_nxv4bf16_nxv4i32:
4592; CHECK:       # %bb.0: # %entry
4593; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
4594; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
4595; CHECK-NEXT:    ret
4596entry:
4597  call void @llvm.riscv.vsoxei.mask.nxv4bf16.nxv4i32(
4598    <vscale x 4 x bfloat> %0,
4599    ptr %1,
4600    <vscale x 4 x i32> %2,
4601    <vscale x 4 x i1> %3,
4602    iXLen %4)
4603
4604  ret void
4605}
4606
4607declare void @llvm.riscv.vsoxei.nxv8bf16.nxv8i32(
4608  <vscale x 8 x bfloat>,
4609  ptr,
4610  <vscale x 8 x i32>,
4611  iXLen);
4612
4613define void @intrinsic_vsoxei_v_nxv8bf16_nxv8bf16_nxv8i32(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
4614; CHECK-LABEL: intrinsic_vsoxei_v_nxv8bf16_nxv8bf16_nxv8i32:
4615; CHECK:       # %bb.0: # %entry
4616; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
4617; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
4618; CHECK-NEXT:    ret
4619entry:
4620  call void @llvm.riscv.vsoxei.nxv8bf16.nxv8i32(
4621    <vscale x 8 x bfloat> %0,
4622    ptr %1,
4623    <vscale x 8 x i32> %2,
4624    iXLen %3)
4625
4626  ret void
4627}
4628
4629declare void @llvm.riscv.vsoxei.mask.nxv8bf16.nxv8i32(
4630  <vscale x 8 x bfloat>,
4631  ptr,
4632  <vscale x 8 x i32>,
4633  <vscale x 8 x i1>,
4634  iXLen);
4635
4636define void @intrinsic_vsoxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
4637; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8bf16_nxv8bf16_nxv8i32:
4638; CHECK:       # %bb.0: # %entry
4639; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
4640; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
4641; CHECK-NEXT:    ret
4642entry:
4643  call void @llvm.riscv.vsoxei.mask.nxv8bf16.nxv8i32(
4644    <vscale x 8 x bfloat> %0,
4645    ptr %1,
4646    <vscale x 8 x i32> %2,
4647    <vscale x 8 x i1> %3,
4648    iXLen %4)
4649
4650  ret void
4651}
4652
4653declare void @llvm.riscv.vsoxei.nxv16bf16.nxv16i32(
4654  <vscale x 16 x bfloat>,
4655  ptr,
4656  <vscale x 16 x i32>,
4657  iXLen);
4658
4659define void @intrinsic_vsoxei_v_nxv16bf16_nxv16bf16_nxv16i32(<vscale x 16 x bfloat> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
4660; CHECK-LABEL: intrinsic_vsoxei_v_nxv16bf16_nxv16bf16_nxv16i32:
4661; CHECK:       # %bb.0: # %entry
4662; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
4663; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
4664; CHECK-NEXT:    ret
4665entry:
4666  call void @llvm.riscv.vsoxei.nxv16bf16.nxv16i32(
4667    <vscale x 16 x bfloat> %0,
4668    ptr %1,
4669    <vscale x 16 x i32> %2,
4670    iXLen %3)
4671
4672  ret void
4673}
4674
4675declare void @llvm.riscv.vsoxei.mask.nxv16bf16.nxv16i32(
4676  <vscale x 16 x bfloat>,
4677  ptr,
4678  <vscale x 16 x i32>,
4679  <vscale x 16 x i1>,
4680  iXLen);
4681
4682define void @intrinsic_vsoxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32(<vscale x 16 x bfloat> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
4683; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16bf16_nxv16bf16_nxv16i32:
4684; CHECK:       # %bb.0: # %entry
4685; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
4686; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
4687; CHECK-NEXT:    ret
4688entry:
4689  call void @llvm.riscv.vsoxei.mask.nxv16bf16.nxv16i32(
4690    <vscale x 16 x bfloat> %0,
4691    ptr %1,
4692    <vscale x 16 x i32> %2,
4693    <vscale x 16 x i1> %3,
4694    iXLen %4)
4695
4696  ret void
4697}
4698
4699declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i8(
4700  <vscale x 1 x float>,
4701  ptr,
4702  <vscale x 1 x i8>,
4703  iXLen);
4704
4705define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
4706; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8:
4707; CHECK:       # %bb.0: # %entry
4708; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
4709; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
4710; CHECK-NEXT:    ret
4711entry:
4712  call void @llvm.riscv.vsoxei.nxv1f32.nxv1i8(
4713    <vscale x 1 x float> %0,
4714    ptr %1,
4715    <vscale x 1 x i8> %2,
4716    iXLen %3)
4717
4718  ret void
4719}
4720
4721declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8(
4722  <vscale x 1 x float>,
4723  ptr,
4724  <vscale x 1 x i8>,
4725  <vscale x 1 x i1>,
4726  iXLen);
4727
4728define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4729; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
4730; CHECK:       # %bb.0: # %entry
4731; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
4732; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
4733; CHECK-NEXT:    ret
4734entry:
4735  call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8(
4736    <vscale x 1 x float> %0,
4737    ptr %1,
4738    <vscale x 1 x i8> %2,
4739    <vscale x 1 x i1> %3,
4740    iXLen %4)
4741
4742  ret void
4743}
4744
4745declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i8(
4746  <vscale x 2 x float>,
4747  ptr,
4748  <vscale x 2 x i8>,
4749  iXLen);
4750
4751define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
4752; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8:
4753; CHECK:       # %bb.0: # %entry
4754; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
4755; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
4756; CHECK-NEXT:    ret
4757entry:
4758  call void @llvm.riscv.vsoxei.nxv2f32.nxv2i8(
4759    <vscale x 2 x float> %0,
4760    ptr %1,
4761    <vscale x 2 x i8> %2,
4762    iXLen %3)
4763
4764  ret void
4765}
4766
4767declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8(
4768  <vscale x 2 x float>,
4769  ptr,
4770  <vscale x 2 x i8>,
4771  <vscale x 2 x i1>,
4772  iXLen);
4773
4774define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4775; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
4776; CHECK:       # %bb.0: # %entry
4777; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
4778; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
4779; CHECK-NEXT:    ret
4780entry:
4781  call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8(
4782    <vscale x 2 x float> %0,
4783    ptr %1,
4784    <vscale x 2 x i8> %2,
4785    <vscale x 2 x i1> %3,
4786    iXLen %4)
4787
4788  ret void
4789}
4790
4791declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i8(
4792  <vscale x 4 x float>,
4793  ptr,
4794  <vscale x 4 x i8>,
4795  iXLen);
4796
4797define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
4798; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8:
4799; CHECK:       # %bb.0: # %entry
4800; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
4801; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
4802; CHECK-NEXT:    ret
4803entry:
4804  call void @llvm.riscv.vsoxei.nxv4f32.nxv4i8(
4805    <vscale x 4 x float> %0,
4806    ptr %1,
4807    <vscale x 4 x i8> %2,
4808    iXLen %3)
4809
4810  ret void
4811}
4812
4813declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8(
4814  <vscale x 4 x float>,
4815  ptr,
4816  <vscale x 4 x i8>,
4817  <vscale x 4 x i1>,
4818  iXLen);
4819
4820define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
4821; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
4822; CHECK:       # %bb.0: # %entry
4823; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
4824; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
4825; CHECK-NEXT:    ret
4826entry:
4827  call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8(
4828    <vscale x 4 x float> %0,
4829    ptr %1,
4830    <vscale x 4 x i8> %2,
4831    <vscale x 4 x i1> %3,
4832    iXLen %4)
4833
4834  ret void
4835}
4836
4837declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i8(
4838  <vscale x 8 x float>,
4839  ptr,
4840  <vscale x 8 x i8>,
4841  iXLen);
4842
4843define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
4844; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8:
4845; CHECK:       # %bb.0: # %entry
4846; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
4847; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
4848; CHECK-NEXT:    ret
4849entry:
4850  call void @llvm.riscv.vsoxei.nxv8f32.nxv8i8(
4851    <vscale x 8 x float> %0,
4852    ptr %1,
4853    <vscale x 8 x i8> %2,
4854    iXLen %3)
4855
4856  ret void
4857}
4858
4859declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8(
4860  <vscale x 8 x float>,
4861  ptr,
4862  <vscale x 8 x i8>,
4863  <vscale x 8 x i1>,
4864  iXLen);
4865
4866define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
4867; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
4868; CHECK:       # %bb.0: # %entry
4869; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
4870; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
4871; CHECK-NEXT:    ret
4872entry:
4873  call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8(
4874    <vscale x 8 x float> %0,
4875    ptr %1,
4876    <vscale x 8 x i8> %2,
4877    <vscale x 8 x i1> %3,
4878    iXLen %4)
4879
4880  ret void
4881}
4882
4883declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i8(
4884  <vscale x 16 x float>,
4885  ptr,
4886  <vscale x 16 x i8>,
4887  iXLen);
4888
4889define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
4890; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8:
4891; CHECK:       # %bb.0: # %entry
4892; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
4893; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
4894; CHECK-NEXT:    ret
4895entry:
4896  call void @llvm.riscv.vsoxei.nxv16f32.nxv16i8(
4897    <vscale x 16 x float> %0,
4898    ptr %1,
4899    <vscale x 16 x i8> %2,
4900    iXLen %3)
4901
4902  ret void
4903}
4904
4905declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8(
4906  <vscale x 16 x float>,
4907  ptr,
4908  <vscale x 16 x i8>,
4909  <vscale x 16 x i1>,
4910  iXLen);
4911
4912define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
4913; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
4914; CHECK:       # %bb.0: # %entry
4915; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
4916; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
4917; CHECK-NEXT:    ret
4918entry:
4919  call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8(
4920    <vscale x 16 x float> %0,
4921    ptr %1,
4922    <vscale x 16 x i8> %2,
4923    <vscale x 16 x i1> %3,
4924    iXLen %4)
4925
4926  ret void
4927}
4928
4929declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i8(
4930  <vscale x 1 x double>,
4931  ptr,
4932  <vscale x 1 x i8>,
4933  iXLen);
4934
4935define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
4936; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8:
4937; CHECK:       # %bb.0: # %entry
4938; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
4939; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
4940; CHECK-NEXT:    ret
4941entry:
4942  call void @llvm.riscv.vsoxei.nxv1f64.nxv1i8(
4943    <vscale x 1 x double> %0,
4944    ptr %1,
4945    <vscale x 1 x i8> %2,
4946    iXLen %3)
4947
4948  ret void
4949}
4950
4951declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8(
4952  <vscale x 1 x double>,
4953  ptr,
4954  <vscale x 1 x i8>,
4955  <vscale x 1 x i1>,
4956  iXLen);
4957
4958define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4959; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
4960; CHECK:       # %bb.0: # %entry
4961; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
4962; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
4963; CHECK-NEXT:    ret
4964entry:
4965  call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8(
4966    <vscale x 1 x double> %0,
4967    ptr %1,
4968    <vscale x 1 x i8> %2,
4969    <vscale x 1 x i1> %3,
4970    iXLen %4)
4971
4972  ret void
4973}
4974
4975declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i8(
4976  <vscale x 2 x double>,
4977  ptr,
4978  <vscale x 2 x i8>,
4979  iXLen);
4980
4981define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
4982; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8:
4983; CHECK:       # %bb.0: # %entry
4984; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
4985; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
4986; CHECK-NEXT:    ret
4987entry:
4988  call void @llvm.riscv.vsoxei.nxv2f64.nxv2i8(
4989    <vscale x 2 x double> %0,
4990    ptr %1,
4991    <vscale x 2 x i8> %2,
4992    iXLen %3)
4993
4994  ret void
4995}
4996
4997declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8(
4998  <vscale x 2 x double>,
4999  ptr,
5000  <vscale x 2 x i8>,
5001  <vscale x 2 x i1>,
5002  iXLen);
5003
5004define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
5005; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
5006; CHECK:       # %bb.0: # %entry
5007; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
5008; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
5009; CHECK-NEXT:    ret
5010entry:
5011  call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8(
5012    <vscale x 2 x double> %0,
5013    ptr %1,
5014    <vscale x 2 x i8> %2,
5015    <vscale x 2 x i1> %3,
5016    iXLen %4)
5017
5018  ret void
5019}
5020
5021declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i8(
5022  <vscale x 4 x double>,
5023  ptr,
5024  <vscale x 4 x i8>,
5025  iXLen);
5026
5027define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
5028; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8:
5029; CHECK:       # %bb.0: # %entry
5030; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
5031; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
5032; CHECK-NEXT:    ret
5033entry:
5034  call void @llvm.riscv.vsoxei.nxv4f64.nxv4i8(
5035    <vscale x 4 x double> %0,
5036    ptr %1,
5037    <vscale x 4 x i8> %2,
5038    iXLen %3)
5039
5040  ret void
5041}
5042
5043declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8(
5044  <vscale x 4 x double>,
5045  ptr,
5046  <vscale x 4 x i8>,
5047  <vscale x 4 x i1>,
5048  iXLen);
5049
5050define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
5051; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
5052; CHECK:       # %bb.0: # %entry
5053; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
5054; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
5055; CHECK-NEXT:    ret
5056entry:
5057  call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8(
5058    <vscale x 4 x double> %0,
5059    ptr %1,
5060    <vscale x 4 x i8> %2,
5061    <vscale x 4 x i1> %3,
5062    iXLen %4)
5063
5064  ret void
5065}
5066
5067declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i8(
5068  <vscale x 8 x double>,
5069  ptr,
5070  <vscale x 8 x i8>,
5071  iXLen);
5072
5073define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
5074; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8:
5075; CHECK:       # %bb.0: # %entry
5076; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
5077; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
5078; CHECK-NEXT:    ret
5079entry:
5080  call void @llvm.riscv.vsoxei.nxv8f64.nxv8i8(
5081    <vscale x 8 x double> %0,
5082    ptr %1,
5083    <vscale x 8 x i8> %2,
5084    iXLen %3)
5085
5086  ret void
5087}
5088
5089declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8(
5090  <vscale x 8 x double>,
5091  ptr,
5092  <vscale x 8 x i8>,
5093  <vscale x 8 x i1>,
5094  iXLen);
5095
5096define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
5097; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
5098; CHECK:       # %bb.0: # %entry
5099; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
5100; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
5101; CHECK-NEXT:    ret
5102entry:
5103  call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8(
5104    <vscale x 8 x double> %0,
5105    ptr %1,
5106    <vscale x 8 x i8> %2,
5107    <vscale x 8 x i1> %3,
5108    iXLen %4)
5109
5110  ret void
5111}
5112