xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll (revision 84a3739ac072c95af9fa80e36d9e0f52d11e28eb)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4
5; The intrinsics are not supported with RV32.
6
7declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i64(
8  <vscale x 1 x i8>,
9  ptr,
10  <vscale x 1 x i64>,
11  i64);
12
13define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
14; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
17; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
18; CHECK-NEXT:    ret
19entry:
20  call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64(
21    <vscale x 1 x i8> %0,
22    ptr %1,
23    <vscale x 1 x i64> %2,
24    i64 %3)
25
26  ret void
27}
28
29declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64(
30  <vscale x 1 x i8>,
31  ptr,
32  <vscale x 1 x i64>,
33  <vscale x 1 x i1>,
34  i64);
35
36define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
37; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64:
38; CHECK:       # %bb.0: # %entry
39; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
40; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
41; CHECK-NEXT:    ret
42entry:
43  call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64(
44    <vscale x 1 x i8> %0,
45    ptr %1,
46    <vscale x 1 x i64> %2,
47    <vscale x 1 x i1> %3,
48    i64 %4)
49
50  ret void
51}
52
53declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i64(
54  <vscale x 2 x i8>,
55  ptr,
56  <vscale x 2 x i64>,
57  i64);
58
59define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
60; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64:
61; CHECK:       # %bb.0: # %entry
62; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
63; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
64; CHECK-NEXT:    ret
65entry:
66  call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64(
67    <vscale x 2 x i8> %0,
68    ptr %1,
69    <vscale x 2 x i64> %2,
70    i64 %3)
71
72  ret void
73}
74
75declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64(
76  <vscale x 2 x i8>,
77  ptr,
78  <vscale x 2 x i64>,
79  <vscale x 2 x i1>,
80  i64);
81
82define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
83; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64:
84; CHECK:       # %bb.0: # %entry
85; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
86; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
87; CHECK-NEXT:    ret
88entry:
89  call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64(
90    <vscale x 2 x i8> %0,
91    ptr %1,
92    <vscale x 2 x i64> %2,
93    <vscale x 2 x i1> %3,
94    i64 %4)
95
96  ret void
97}
98
99declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i64(
100  <vscale x 4 x i8>,
101  ptr,
102  <vscale x 4 x i64>,
103  i64);
104
105define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
106; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64:
107; CHECK:       # %bb.0: # %entry
108; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
109; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
110; CHECK-NEXT:    ret
111entry:
112  call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64(
113    <vscale x 4 x i8> %0,
114    ptr %1,
115    <vscale x 4 x i64> %2,
116    i64 %3)
117
118  ret void
119}
120
121declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64(
122  <vscale x 4 x i8>,
123  ptr,
124  <vscale x 4 x i64>,
125  <vscale x 4 x i1>,
126  i64);
127
128define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
129; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64:
130; CHECK:       # %bb.0: # %entry
131; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
132; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
133; CHECK-NEXT:    ret
134entry:
135  call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64(
136    <vscale x 4 x i8> %0,
137    ptr %1,
138    <vscale x 4 x i64> %2,
139    <vscale x 4 x i1> %3,
140    i64 %4)
141
142  ret void
143}
144
145declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i64(
146  <vscale x 8 x i8>,
147  ptr,
148  <vscale x 8 x i64>,
149  i64);
150
151define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
152; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64:
153; CHECK:       # %bb.0: # %entry
154; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
155; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
156; CHECK-NEXT:    ret
157entry:
158  call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64(
159    <vscale x 8 x i8> %0,
160    ptr %1,
161    <vscale x 8 x i64> %2,
162    i64 %3)
163
164  ret void
165}
166
167declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64(
168  <vscale x 8 x i8>,
169  ptr,
170  <vscale x 8 x i64>,
171  <vscale x 8 x i1>,
172  i64);
173
174define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
175; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64:
176; CHECK:       # %bb.0: # %entry
177; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
178; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
179; CHECK-NEXT:    ret
180entry:
181  call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64(
182    <vscale x 8 x i8> %0,
183    ptr %1,
184    <vscale x 8 x i64> %2,
185    <vscale x 8 x i1> %3,
186    i64 %4)
187
188  ret void
189}
190
191declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i64(
192  <vscale x 1 x i16>,
193  ptr,
194  <vscale x 1 x i64>,
195  i64);
196
197define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
198; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64:
199; CHECK:       # %bb.0: # %entry
200; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
201; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
202; CHECK-NEXT:    ret
203entry:
204  call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64(
205    <vscale x 1 x i16> %0,
206    ptr %1,
207    <vscale x 1 x i64> %2,
208    i64 %3)
209
210  ret void
211}
212
213declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64(
214  <vscale x 1 x i16>,
215  ptr,
216  <vscale x 1 x i64>,
217  <vscale x 1 x i1>,
218  i64);
219
220define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
221; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64:
222; CHECK:       # %bb.0: # %entry
223; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
224; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
225; CHECK-NEXT:    ret
226entry:
227  call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64(
228    <vscale x 1 x i16> %0,
229    ptr %1,
230    <vscale x 1 x i64> %2,
231    <vscale x 1 x i1> %3,
232    i64 %4)
233
234  ret void
235}
236
237declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i64(
238  <vscale x 2 x i16>,
239  ptr,
240  <vscale x 2 x i64>,
241  i64);
242
243define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
244; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64:
245; CHECK:       # %bb.0: # %entry
246; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
247; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
248; CHECK-NEXT:    ret
249entry:
250  call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64(
251    <vscale x 2 x i16> %0,
252    ptr %1,
253    <vscale x 2 x i64> %2,
254    i64 %3)
255
256  ret void
257}
258
259declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64(
260  <vscale x 2 x i16>,
261  ptr,
262  <vscale x 2 x i64>,
263  <vscale x 2 x i1>,
264  i64);
265
266define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
267; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64:
268; CHECK:       # %bb.0: # %entry
269; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
270; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
271; CHECK-NEXT:    ret
272entry:
273  call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64(
274    <vscale x 2 x i16> %0,
275    ptr %1,
276    <vscale x 2 x i64> %2,
277    <vscale x 2 x i1> %3,
278    i64 %4)
279
280  ret void
281}
282
283declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i64(
284  <vscale x 4 x i16>,
285  ptr,
286  <vscale x 4 x i64>,
287  i64);
288
289define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
290; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64:
291; CHECK:       # %bb.0: # %entry
292; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
293; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
294; CHECK-NEXT:    ret
295entry:
296  call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64(
297    <vscale x 4 x i16> %0,
298    ptr %1,
299    <vscale x 4 x i64> %2,
300    i64 %3)
301
302  ret void
303}
304
305declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64(
306  <vscale x 4 x i16>,
307  ptr,
308  <vscale x 4 x i64>,
309  <vscale x 4 x i1>,
310  i64);
311
312define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
313; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64:
314; CHECK:       # %bb.0: # %entry
315; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
316; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
317; CHECK-NEXT:    ret
318entry:
319  call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64(
320    <vscale x 4 x i16> %0,
321    ptr %1,
322    <vscale x 4 x i64> %2,
323    <vscale x 4 x i1> %3,
324    i64 %4)
325
326  ret void
327}
328
329declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i64(
330  <vscale x 8 x i16>,
331  ptr,
332  <vscale x 8 x i64>,
333  i64);
334
335define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
336; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64:
337; CHECK:       # %bb.0: # %entry
338; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
339; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
340; CHECK-NEXT:    ret
341entry:
342  call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64(
343    <vscale x 8 x i16> %0,
344    ptr %1,
345    <vscale x 8 x i64> %2,
346    i64 %3)
347
348  ret void
349}
350
351declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64(
352  <vscale x 8 x i16>,
353  ptr,
354  <vscale x 8 x i64>,
355  <vscale x 8 x i1>,
356  i64);
357
358define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
359; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64:
360; CHECK:       # %bb.0: # %entry
361; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
362; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
363; CHECK-NEXT:    ret
364entry:
365  call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64(
366    <vscale x 8 x i16> %0,
367    ptr %1,
368    <vscale x 8 x i64> %2,
369    <vscale x 8 x i1> %3,
370    i64 %4)
371
372  ret void
373}
374
375declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i64(
376  <vscale x 1 x i32>,
377  ptr,
378  <vscale x 1 x i64>,
379  i64);
380
381define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
382; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64:
383; CHECK:       # %bb.0: # %entry
384; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
385; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
386; CHECK-NEXT:    ret
387entry:
388  call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64(
389    <vscale x 1 x i32> %0,
390    ptr %1,
391    <vscale x 1 x i64> %2,
392    i64 %3)
393
394  ret void
395}
396
397declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64(
398  <vscale x 1 x i32>,
399  ptr,
400  <vscale x 1 x i64>,
401  <vscale x 1 x i1>,
402  i64);
403
404define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
405; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64:
406; CHECK:       # %bb.0: # %entry
407; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
408; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
409; CHECK-NEXT:    ret
410entry:
411  call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64(
412    <vscale x 1 x i32> %0,
413    ptr %1,
414    <vscale x 1 x i64> %2,
415    <vscale x 1 x i1> %3,
416    i64 %4)
417
418  ret void
419}
420
421declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i64(
422  <vscale x 2 x i32>,
423  ptr,
424  <vscale x 2 x i64>,
425  i64);
426
427define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
428; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64:
429; CHECK:       # %bb.0: # %entry
430; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
431; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
432; CHECK-NEXT:    ret
433entry:
434  call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64(
435    <vscale x 2 x i32> %0,
436    ptr %1,
437    <vscale x 2 x i64> %2,
438    i64 %3)
439
440  ret void
441}
442
443declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64(
444  <vscale x 2 x i32>,
445  ptr,
446  <vscale x 2 x i64>,
447  <vscale x 2 x i1>,
448  i64);
449
450define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
451; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64:
452; CHECK:       # %bb.0: # %entry
453; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
454; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
455; CHECK-NEXT:    ret
456entry:
457  call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64(
458    <vscale x 2 x i32> %0,
459    ptr %1,
460    <vscale x 2 x i64> %2,
461    <vscale x 2 x i1> %3,
462    i64 %4)
463
464  ret void
465}
466
467declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i64(
468  <vscale x 4 x i32>,
469  ptr,
470  <vscale x 4 x i64>,
471  i64);
472
473define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
474; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64:
475; CHECK:       # %bb.0: # %entry
476; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
477; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
478; CHECK-NEXT:    ret
479entry:
480  call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64(
481    <vscale x 4 x i32> %0,
482    ptr %1,
483    <vscale x 4 x i64> %2,
484    i64 %3)
485
486  ret void
487}
488
489declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64(
490  <vscale x 4 x i32>,
491  ptr,
492  <vscale x 4 x i64>,
493  <vscale x 4 x i1>,
494  i64);
495
496define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
497; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64:
498; CHECK:       # %bb.0: # %entry
499; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
500; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
501; CHECK-NEXT:    ret
502entry:
503  call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64(
504    <vscale x 4 x i32> %0,
505    ptr %1,
506    <vscale x 4 x i64> %2,
507    <vscale x 4 x i1> %3,
508    i64 %4)
509
510  ret void
511}
512
513declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i64(
514  <vscale x 8 x i32>,
515  ptr,
516  <vscale x 8 x i64>,
517  i64);
518
519define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
520; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64:
521; CHECK:       # %bb.0: # %entry
522; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
523; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
524; CHECK-NEXT:    ret
525entry:
526  call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64(
527    <vscale x 8 x i32> %0,
528    ptr %1,
529    <vscale x 8 x i64> %2,
530    i64 %3)
531
532  ret void
533}
534
535declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64(
536  <vscale x 8 x i32>,
537  ptr,
538  <vscale x 8 x i64>,
539  <vscale x 8 x i1>,
540  i64);
541
542define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
543; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64:
544; CHECK:       # %bb.0: # %entry
545; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
546; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
547; CHECK-NEXT:    ret
548entry:
549  call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64(
550    <vscale x 8 x i32> %0,
551    ptr %1,
552    <vscale x 8 x i64> %2,
553    <vscale x 8 x i1> %3,
554    i64 %4)
555
556  ret void
557}
558
559declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i64(
560  <vscale x 1 x i64>,
561  ptr,
562  <vscale x 1 x i64>,
563  i64);
564
565define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
566; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64:
567; CHECK:       # %bb.0: # %entry
568; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
569; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
570; CHECK-NEXT:    ret
571entry:
572  call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64(
573    <vscale x 1 x i64> %0,
574    ptr %1,
575    <vscale x 1 x i64> %2,
576    i64 %3)
577
578  ret void
579}
580
581declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64(
582  <vscale x 1 x i64>,
583  ptr,
584  <vscale x 1 x i64>,
585  <vscale x 1 x i1>,
586  i64);
587
588define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
589; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64:
590; CHECK:       # %bb.0: # %entry
591; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
592; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
593; CHECK-NEXT:    ret
594entry:
595  call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64(
596    <vscale x 1 x i64> %0,
597    ptr %1,
598    <vscale x 1 x i64> %2,
599    <vscale x 1 x i1> %3,
600    i64 %4)
601
602  ret void
603}
604
605declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i64(
606  <vscale x 2 x i64>,
607  ptr,
608  <vscale x 2 x i64>,
609  i64);
610
611define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
612; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64:
613; CHECK:       # %bb.0: # %entry
614; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
615; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
616; CHECK-NEXT:    ret
617entry:
618  call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64(
619    <vscale x 2 x i64> %0,
620    ptr %1,
621    <vscale x 2 x i64> %2,
622    i64 %3)
623
624  ret void
625}
626
627declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64(
628  <vscale x 2 x i64>,
629  ptr,
630  <vscale x 2 x i64>,
631  <vscale x 2 x i1>,
632  i64);
633
634define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
635; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64:
636; CHECK:       # %bb.0: # %entry
637; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
638; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
639; CHECK-NEXT:    ret
640entry:
641  call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64(
642    <vscale x 2 x i64> %0,
643    ptr %1,
644    <vscale x 2 x i64> %2,
645    <vscale x 2 x i1> %3,
646    i64 %4)
647
648  ret void
649}
650
651declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i64(
652  <vscale x 4 x i64>,
653  ptr,
654  <vscale x 4 x i64>,
655  i64);
656
657define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
658; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64:
659; CHECK:       # %bb.0: # %entry
660; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
661; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
662; CHECK-NEXT:    ret
663entry:
664  call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64(
665    <vscale x 4 x i64> %0,
666    ptr %1,
667    <vscale x 4 x i64> %2,
668    i64 %3)
669
670  ret void
671}
672
673declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64(
674  <vscale x 4 x i64>,
675  ptr,
676  <vscale x 4 x i64>,
677  <vscale x 4 x i1>,
678  i64);
679
680define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
681; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64:
682; CHECK:       # %bb.0: # %entry
683; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
684; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
685; CHECK-NEXT:    ret
686entry:
687  call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64(
688    <vscale x 4 x i64> %0,
689    ptr %1,
690    <vscale x 4 x i64> %2,
691    <vscale x 4 x i1> %3,
692    i64 %4)
693
694  ret void
695}
696
697declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i64(
698  <vscale x 8 x i64>,
699  ptr,
700  <vscale x 8 x i64>,
701  i64);
702
703define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
704; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64:
705; CHECK:       # %bb.0: # %entry
706; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
707; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
708; CHECK-NEXT:    ret
709entry:
710  call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64(
711    <vscale x 8 x i64> %0,
712    ptr %1,
713    <vscale x 8 x i64> %2,
714    i64 %3)
715
716  ret void
717}
718
719declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64(
720  <vscale x 8 x i64>,
721  ptr,
722  <vscale x 8 x i64>,
723  <vscale x 8 x i1>,
724  i64);
725
726define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
727; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64:
728; CHECK:       # %bb.0: # %entry
729; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
730; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
731; CHECK-NEXT:    ret
732entry:
733  call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64(
734    <vscale x 8 x i64> %0,
735    ptr %1,
736    <vscale x 8 x i64> %2,
737    <vscale x 8 x i1> %3,
738    i64 %4)
739
740  ret void
741}
742
743declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i64(
744  <vscale x 1 x half>,
745  ptr,
746  <vscale x 1 x i64>,
747  i64);
748
749define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
750; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64:
751; CHECK:       # %bb.0: # %entry
752; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
753; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
754; CHECK-NEXT:    ret
755entry:
756  call void @llvm.riscv.vsoxei.nxv1f16.nxv1i64(
757    <vscale x 1 x half> %0,
758    ptr %1,
759    <vscale x 1 x i64> %2,
760    i64 %3)
761
762  ret void
763}
764
765declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64(
766  <vscale x 1 x half>,
767  ptr,
768  <vscale x 1 x i64>,
769  <vscale x 1 x i1>,
770  i64);
771
772define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
773; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64:
774; CHECK:       # %bb.0: # %entry
775; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
776; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
777; CHECK-NEXT:    ret
778entry:
779  call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64(
780    <vscale x 1 x half> %0,
781    ptr %1,
782    <vscale x 1 x i64> %2,
783    <vscale x 1 x i1> %3,
784    i64 %4)
785
786  ret void
787}
788
789declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i64(
790  <vscale x 2 x half>,
791  ptr,
792  <vscale x 2 x i64>,
793  i64);
794
795define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
796; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64:
797; CHECK:       # %bb.0: # %entry
798; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
799; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
800; CHECK-NEXT:    ret
801entry:
802  call void @llvm.riscv.vsoxei.nxv2f16.nxv2i64(
803    <vscale x 2 x half> %0,
804    ptr %1,
805    <vscale x 2 x i64> %2,
806    i64 %3)
807
808  ret void
809}
810
811declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64(
812  <vscale x 2 x half>,
813  ptr,
814  <vscale x 2 x i64>,
815  <vscale x 2 x i1>,
816  i64);
817
818define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
819; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64:
820; CHECK:       # %bb.0: # %entry
821; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
822; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
823; CHECK-NEXT:    ret
824entry:
825  call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64(
826    <vscale x 2 x half> %0,
827    ptr %1,
828    <vscale x 2 x i64> %2,
829    <vscale x 2 x i1> %3,
830    i64 %4)
831
832  ret void
833}
834
835declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i64(
836  <vscale x 4 x half>,
837  ptr,
838  <vscale x 4 x i64>,
839  i64);
840
841define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
842; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64:
843; CHECK:       # %bb.0: # %entry
844; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
845; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
846; CHECK-NEXT:    ret
847entry:
848  call void @llvm.riscv.vsoxei.nxv4f16.nxv4i64(
849    <vscale x 4 x half> %0,
850    ptr %1,
851    <vscale x 4 x i64> %2,
852    i64 %3)
853
854  ret void
855}
856
857declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64(
858  <vscale x 4 x half>,
859  ptr,
860  <vscale x 4 x i64>,
861  <vscale x 4 x i1>,
862  i64);
863
864define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
865; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64:
866; CHECK:       # %bb.0: # %entry
867; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
868; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
869; CHECK-NEXT:    ret
870entry:
871  call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64(
872    <vscale x 4 x half> %0,
873    ptr %1,
874    <vscale x 4 x i64> %2,
875    <vscale x 4 x i1> %3,
876    i64 %4)
877
878  ret void
879}
880
881declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i64(
882  <vscale x 8 x half>,
883  ptr,
884  <vscale x 8 x i64>,
885  i64);
886
887define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
888; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64:
889; CHECK:       # %bb.0: # %entry
890; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
891; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
892; CHECK-NEXT:    ret
893entry:
894  call void @llvm.riscv.vsoxei.nxv8f16.nxv8i64(
895    <vscale x 8 x half> %0,
896    ptr %1,
897    <vscale x 8 x i64> %2,
898    i64 %3)
899
900  ret void
901}
902
903declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64(
904  <vscale x 8 x half>,
905  ptr,
906  <vscale x 8 x i64>,
907  <vscale x 8 x i1>,
908  i64);
909
910define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
911; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64:
912; CHECK:       # %bb.0: # %entry
913; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
914; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
915; CHECK-NEXT:    ret
916entry:
917  call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64(
918    <vscale x 8 x half> %0,
919    ptr %1,
920    <vscale x 8 x i64> %2,
921    <vscale x 8 x i1> %3,
922    i64 %4)
923
924  ret void
925}
926
927declare void @llvm.riscv.vsoxei.nxv1bf16.nxv1i64(
928  <vscale x 1 x bfloat>,
929  ptr,
930  <vscale x 1 x i64>,
931  i64);
932
933define void @intrinsic_vsoxei_v_nxv1bf16_nxv1bf16_nxv1i64(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
934; CHECK-LABEL: intrinsic_vsoxei_v_nxv1bf16_nxv1bf16_nxv1i64:
935; CHECK:       # %bb.0: # %entry
936; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
937; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
938; CHECK-NEXT:    ret
939entry:
940  call void @llvm.riscv.vsoxei.nxv1bf16.nxv1i64(
941    <vscale x 1 x bfloat> %0,
942    ptr %1,
943    <vscale x 1 x i64> %2,
944    i64 %3)
945
946  ret void
947}
948
949declare void @llvm.riscv.vsoxei.mask.nxv1bf16.nxv1i64(
950  <vscale x 1 x bfloat>,
951  ptr,
952  <vscale x 1 x i64>,
953  <vscale x 1 x i1>,
954  i64);
955
956define void @intrinsic_vsoxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
957; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64:
958; CHECK:       # %bb.0: # %entry
959; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
960; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
961; CHECK-NEXT:    ret
962entry:
963  call void @llvm.riscv.vsoxei.mask.nxv1bf16.nxv1i64(
964    <vscale x 1 x bfloat> %0,
965    ptr %1,
966    <vscale x 1 x i64> %2,
967    <vscale x 1 x i1> %3,
968    i64 %4)
969
970  ret void
971}
972
973declare void @llvm.riscv.vsoxei.nxv2bf16.nxv2i64(
974  <vscale x 2 x bfloat>,
975  ptr,
976  <vscale x 2 x i64>,
977  i64);
978
979define void @intrinsic_vsoxei_v_nxv2bf16_nxv2bf16_nxv2i64(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
980; CHECK-LABEL: intrinsic_vsoxei_v_nxv2bf16_nxv2bf16_nxv2i64:
981; CHECK:       # %bb.0: # %entry
982; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
983; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
984; CHECK-NEXT:    ret
985entry:
986  call void @llvm.riscv.vsoxei.nxv2bf16.nxv2i64(
987    <vscale x 2 x bfloat> %0,
988    ptr %1,
989    <vscale x 2 x i64> %2,
990    i64 %3)
991
992  ret void
993}
994
995declare void @llvm.riscv.vsoxei.mask.nxv2bf16.nxv2i64(
996  <vscale x 2 x bfloat>,
997  ptr,
998  <vscale x 2 x i64>,
999  <vscale x 2 x i1>,
1000  i64);
1001
1002define void @intrinsic_vsoxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1003; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64:
1004; CHECK:       # %bb.0: # %entry
1005; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1006; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
1007; CHECK-NEXT:    ret
1008entry:
1009  call void @llvm.riscv.vsoxei.mask.nxv2bf16.nxv2i64(
1010    <vscale x 2 x bfloat> %0,
1011    ptr %1,
1012    <vscale x 2 x i64> %2,
1013    <vscale x 2 x i1> %3,
1014    i64 %4)
1015
1016  ret void
1017}
1018
1019declare void @llvm.riscv.vsoxei.nxv4bf16.nxv4i64(
1020  <vscale x 4 x bfloat>,
1021  ptr,
1022  <vscale x 4 x i64>,
1023  i64);
1024
1025define void @intrinsic_vsoxei_v_nxv4bf16_nxv4bf16_nxv4i64(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
1026; CHECK-LABEL: intrinsic_vsoxei_v_nxv4bf16_nxv4bf16_nxv4i64:
1027; CHECK:       # %bb.0: # %entry
1028; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1029; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
1030; CHECK-NEXT:    ret
1031entry:
1032  call void @llvm.riscv.vsoxei.nxv4bf16.nxv4i64(
1033    <vscale x 4 x bfloat> %0,
1034    ptr %1,
1035    <vscale x 4 x i64> %2,
1036    i64 %3)
1037
1038  ret void
1039}
1040
1041declare void @llvm.riscv.vsoxei.mask.nxv4bf16.nxv4i64(
1042  <vscale x 4 x bfloat>,
1043  ptr,
1044  <vscale x 4 x i64>,
1045  <vscale x 4 x i1>,
1046  i64);
1047
1048define void @intrinsic_vsoxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1049; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64:
1050; CHECK:       # %bb.0: # %entry
1051; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1052; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
1053; CHECK-NEXT:    ret
1054entry:
1055  call void @llvm.riscv.vsoxei.mask.nxv4bf16.nxv4i64(
1056    <vscale x 4 x bfloat> %0,
1057    ptr %1,
1058    <vscale x 4 x i64> %2,
1059    <vscale x 4 x i1> %3,
1060    i64 %4)
1061
1062  ret void
1063}
1064
1065declare void @llvm.riscv.vsoxei.nxv8bf16.nxv8i64(
1066  <vscale x 8 x bfloat>,
1067  ptr,
1068  <vscale x 8 x i64>,
1069  i64);
1070
1071define void @intrinsic_vsoxei_v_nxv8bf16_nxv8bf16_nxv8i64(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
1072; CHECK-LABEL: intrinsic_vsoxei_v_nxv8bf16_nxv8bf16_nxv8i64:
1073; CHECK:       # %bb.0: # %entry
1074; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1075; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
1076; CHECK-NEXT:    ret
1077entry:
1078  call void @llvm.riscv.vsoxei.nxv8bf16.nxv8i64(
1079    <vscale x 8 x bfloat> %0,
1080    ptr %1,
1081    <vscale x 8 x i64> %2,
1082    i64 %3)
1083
1084  ret void
1085}
1086
1087declare void @llvm.riscv.vsoxei.mask.nxv8bf16.nxv8i64(
1088  <vscale x 8 x bfloat>,
1089  ptr,
1090  <vscale x 8 x i64>,
1091  <vscale x 8 x i1>,
1092  i64);
1093
1094define void @intrinsic_vsoxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1095; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64:
1096; CHECK:       # %bb.0: # %entry
1097; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1098; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
1099; CHECK-NEXT:    ret
1100entry:
1101  call void @llvm.riscv.vsoxei.mask.nxv8bf16.nxv8i64(
1102    <vscale x 8 x bfloat> %0,
1103    ptr %1,
1104    <vscale x 8 x i64> %2,
1105    <vscale x 8 x i1> %3,
1106    i64 %4)
1107
1108  ret void
1109}
1110
1111declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i64(
1112  <vscale x 1 x float>,
1113  ptr,
1114  <vscale x 1 x i64>,
1115  i64);
1116
1117define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
1118; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64:
1119; CHECK:       # %bb.0: # %entry
1120; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1121; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
1122; CHECK-NEXT:    ret
1123entry:
1124  call void @llvm.riscv.vsoxei.nxv1f32.nxv1i64(
1125    <vscale x 1 x float> %0,
1126    ptr %1,
1127    <vscale x 1 x i64> %2,
1128    i64 %3)
1129
1130  ret void
1131}
1132
1133declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64(
1134  <vscale x 1 x float>,
1135  ptr,
1136  <vscale x 1 x i64>,
1137  <vscale x 1 x i1>,
1138  i64);
1139
1140define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1141; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64:
1142; CHECK:       # %bb.0: # %entry
1143; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1144; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
1145; CHECK-NEXT:    ret
1146entry:
1147  call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64(
1148    <vscale x 1 x float> %0,
1149    ptr %1,
1150    <vscale x 1 x i64> %2,
1151    <vscale x 1 x i1> %3,
1152    i64 %4)
1153
1154  ret void
1155}
1156
1157declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i64(
1158  <vscale x 2 x float>,
1159  ptr,
1160  <vscale x 2 x i64>,
1161  i64);
1162
1163define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
1164; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64:
1165; CHECK:       # %bb.0: # %entry
1166; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1167; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
1168; CHECK-NEXT:    ret
1169entry:
1170  call void @llvm.riscv.vsoxei.nxv2f32.nxv2i64(
1171    <vscale x 2 x float> %0,
1172    ptr %1,
1173    <vscale x 2 x i64> %2,
1174    i64 %3)
1175
1176  ret void
1177}
1178
1179declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64(
1180  <vscale x 2 x float>,
1181  ptr,
1182  <vscale x 2 x i64>,
1183  <vscale x 2 x i1>,
1184  i64);
1185
1186define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1187; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64:
1188; CHECK:       # %bb.0: # %entry
1189; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1190; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
1191; CHECK-NEXT:    ret
1192entry:
1193  call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64(
1194    <vscale x 2 x float> %0,
1195    ptr %1,
1196    <vscale x 2 x i64> %2,
1197    <vscale x 2 x i1> %3,
1198    i64 %4)
1199
1200  ret void
1201}
1202
1203declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i64(
1204  <vscale x 4 x float>,
1205  ptr,
1206  <vscale x 4 x i64>,
1207  i64);
1208
1209define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
1210; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64:
1211; CHECK:       # %bb.0: # %entry
1212; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1213; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
1214; CHECK-NEXT:    ret
1215entry:
1216  call void @llvm.riscv.vsoxei.nxv4f32.nxv4i64(
1217    <vscale x 4 x float> %0,
1218    ptr %1,
1219    <vscale x 4 x i64> %2,
1220    i64 %3)
1221
1222  ret void
1223}
1224
1225declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64(
1226  <vscale x 4 x float>,
1227  ptr,
1228  <vscale x 4 x i64>,
1229  <vscale x 4 x i1>,
1230  i64);
1231
1232define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1233; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64:
1234; CHECK:       # %bb.0: # %entry
1235; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1236; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
1237; CHECK-NEXT:    ret
1238entry:
1239  call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64(
1240    <vscale x 4 x float> %0,
1241    ptr %1,
1242    <vscale x 4 x i64> %2,
1243    <vscale x 4 x i1> %3,
1244    i64 %4)
1245
1246  ret void
1247}
1248
1249declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i64(
1250  <vscale x 8 x float>,
1251  ptr,
1252  <vscale x 8 x i64>,
1253  i64);
1254
1255define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
1256; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64:
1257; CHECK:       # %bb.0: # %entry
1258; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
1259; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
1260; CHECK-NEXT:    ret
1261entry:
1262  call void @llvm.riscv.vsoxei.nxv8f32.nxv8i64(
1263    <vscale x 8 x float> %0,
1264    ptr %1,
1265    <vscale x 8 x i64> %2,
1266    i64 %3)
1267
1268  ret void
1269}
1270
1271declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64(
1272  <vscale x 8 x float>,
1273  ptr,
1274  <vscale x 8 x i64>,
1275  <vscale x 8 x i1>,
1276  i64);
1277
1278define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1279; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64:
1280; CHECK:       # %bb.0: # %entry
1281; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
1282; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
1283; CHECK-NEXT:    ret
1284entry:
1285  call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64(
1286    <vscale x 8 x float> %0,
1287    ptr %1,
1288    <vscale x 8 x i64> %2,
1289    <vscale x 8 x i1> %3,
1290    i64 %4)
1291
1292  ret void
1293}
1294
1295declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i64(
1296  <vscale x 1 x double>,
1297  ptr,
1298  <vscale x 1 x i64>,
1299  i64);
1300
1301define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
1302; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64:
1303; CHECK:       # %bb.0: # %entry
1304; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1305; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
1306; CHECK-NEXT:    ret
1307entry:
1308  call void @llvm.riscv.vsoxei.nxv1f64.nxv1i64(
1309    <vscale x 1 x double> %0,
1310    ptr %1,
1311    <vscale x 1 x i64> %2,
1312    i64 %3)
1313
1314  ret void
1315}
1316
1317declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64(
1318  <vscale x 1 x double>,
1319  ptr,
1320  <vscale x 1 x i64>,
1321  <vscale x 1 x i1>,
1322  i64);
1323
1324define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1325; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64:
1326; CHECK:       # %bb.0: # %entry
1327; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1328; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
1329; CHECK-NEXT:    ret
1330entry:
1331  call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64(
1332    <vscale x 1 x double> %0,
1333    ptr %1,
1334    <vscale x 1 x i64> %2,
1335    <vscale x 1 x i1> %3,
1336    i64 %4)
1337
1338  ret void
1339}
1340
1341declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i64(
1342  <vscale x 2 x double>,
1343  ptr,
1344  <vscale x 2 x i64>,
1345  i64);
1346
1347define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
1348; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64:
1349; CHECK:       # %bb.0: # %entry
1350; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1351; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
1352; CHECK-NEXT:    ret
1353entry:
1354  call void @llvm.riscv.vsoxei.nxv2f64.nxv2i64(
1355    <vscale x 2 x double> %0,
1356    ptr %1,
1357    <vscale x 2 x i64> %2,
1358    i64 %3)
1359
1360  ret void
1361}
1362
1363declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64(
1364  <vscale x 2 x double>,
1365  ptr,
1366  <vscale x 2 x i64>,
1367  <vscale x 2 x i1>,
1368  i64);
1369
1370define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1371; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64:
1372; CHECK:       # %bb.0: # %entry
1373; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1374; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
1375; CHECK-NEXT:    ret
1376entry:
1377  call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64(
1378    <vscale x 2 x double> %0,
1379    ptr %1,
1380    <vscale x 2 x i64> %2,
1381    <vscale x 2 x i1> %3,
1382    i64 %4)
1383
1384  ret void
1385}
1386
1387declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i64(
1388  <vscale x 4 x double>,
1389  ptr,
1390  <vscale x 4 x i64>,
1391  i64);
1392
1393define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
1394; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64:
1395; CHECK:       # %bb.0: # %entry
1396; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1397; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
1398; CHECK-NEXT:    ret
1399entry:
1400  call void @llvm.riscv.vsoxei.nxv4f64.nxv4i64(
1401    <vscale x 4 x double> %0,
1402    ptr %1,
1403    <vscale x 4 x i64> %2,
1404    i64 %3)
1405
1406  ret void
1407}
1408
1409declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64(
1410  <vscale x 4 x double>,
1411  ptr,
1412  <vscale x 4 x i64>,
1413  <vscale x 4 x i1>,
1414  i64);
1415
1416define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1417; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64:
1418; CHECK:       # %bb.0: # %entry
1419; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1420; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
1421; CHECK-NEXT:    ret
1422entry:
1423  call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64(
1424    <vscale x 4 x double> %0,
1425    ptr %1,
1426    <vscale x 4 x i64> %2,
1427    <vscale x 4 x i1> %3,
1428    i64 %4)
1429
1430  ret void
1431}
1432
1433declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i64(
1434  <vscale x 8 x double>,
1435  ptr,
1436  <vscale x 8 x i64>,
1437  i64);
1438
1439define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
1440; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64:
1441; CHECK:       # %bb.0: # %entry
1442; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1443; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
1444; CHECK-NEXT:    ret
1445entry:
1446  call void @llvm.riscv.vsoxei.nxv8f64.nxv8i64(
1447    <vscale x 8 x double> %0,
1448    ptr %1,
1449    <vscale x 8 x i64> %2,
1450    i64 %3)
1451
1452  ret void
1453}
1454
1455declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64(
1456  <vscale x 8 x double>,
1457  ptr,
1458  <vscale x 8 x i64>,
1459  <vscale x 8 x i1>,
1460  i64);
1461
1462define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1463; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64:
1464; CHECK:       # %bb.0: # %entry
1465; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1466; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
1467; CHECK-NEXT:    ret
1468entry:
1469  call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64(
1470    <vscale x 8 x double> %0,
1471    ptr %1,
1472    <vscale x 8 x i64> %2,
1473    <vscale x 8 x i1> %3,
1474    i64 %4)
1475
1476  ret void
1477}
1478