xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll (revision 84a3739ac072c95af9fa80e36d9e0f52d11e28eb)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4
5; The intrinsics are not supported with RV32.
6
7declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64(
8  <vscale x 1 x i8>,
9  ptr,
10  <vscale x 1 x i64>,
11  i64);
12
13define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
14; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
17; CHECK-NEXT:    vloxei64.v v9, (a0), v8
18; CHECK-NEXT:    vmv1r.v v8, v9
19; CHECK-NEXT:    ret
20entry:
21  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64(
22    <vscale x 1 x i8> undef,
23    ptr %0,
24    <vscale x 1 x i64> %1,
25    i64 %2)
26
27  ret <vscale x 1 x i8> %a
28}
29
30declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64(
31  <vscale x 1 x i8>,
32  ptr,
33  <vscale x 1 x i64>,
34  <vscale x 1 x i1>,
35  i64,
36  i64);
37
38define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
39; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64:
40; CHECK:       # %bb.0: # %entry
41; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
42; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
43; CHECK-NEXT:    ret
44entry:
45  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64(
46    <vscale x 1 x i8> %0,
47    ptr %1,
48    <vscale x 1 x i64> %2,
49    <vscale x 1 x i1> %3,
50    i64 %4, i64 1)
51
52  ret <vscale x 1 x i8> %a
53}
54
55declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64(
56  <vscale x 2 x i8>,
57  ptr,
58  <vscale x 2 x i64>,
59  i64);
60
61define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
62; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64:
63; CHECK:       # %bb.0: # %entry
64; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
65; CHECK-NEXT:    vloxei64.v v10, (a0), v8
66; CHECK-NEXT:    vmv1r.v v8, v10
67; CHECK-NEXT:    ret
68entry:
69  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64(
70    <vscale x 2 x i8> undef,
71    ptr %0,
72    <vscale x 2 x i64> %1,
73    i64 %2)
74
75  ret <vscale x 2 x i8> %a
76}
77
78declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64(
79  <vscale x 2 x i8>,
80  ptr,
81  <vscale x 2 x i64>,
82  <vscale x 2 x i1>,
83  i64,
84  i64);
85
86define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
87; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64:
88; CHECK:       # %bb.0: # %entry
89; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
90; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
91; CHECK-NEXT:    ret
92entry:
93  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64(
94    <vscale x 2 x i8> %0,
95    ptr %1,
96    <vscale x 2 x i64> %2,
97    <vscale x 2 x i1> %3,
98    i64 %4, i64 1)
99
100  ret <vscale x 2 x i8> %a
101}
102
103declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64(
104  <vscale x 4 x i8>,
105  ptr,
106  <vscale x 4 x i64>,
107  i64);
108
109define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
110; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64:
111; CHECK:       # %bb.0: # %entry
112; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
113; CHECK-NEXT:    vloxei64.v v12, (a0), v8
114; CHECK-NEXT:    vmv1r.v v8, v12
115; CHECK-NEXT:    ret
116entry:
117  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64(
118    <vscale x 4 x i8> undef,
119    ptr %0,
120    <vscale x 4 x i64> %1,
121    i64 %2)
122
123  ret <vscale x 4 x i8> %a
124}
125
126declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64(
127  <vscale x 4 x i8>,
128  ptr,
129  <vscale x 4 x i64>,
130  <vscale x 4 x i1>,
131  i64,
132  i64);
133
134define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
135; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64:
136; CHECK:       # %bb.0: # %entry
137; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
138; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
139; CHECK-NEXT:    ret
140entry:
141  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64(
142    <vscale x 4 x i8> %0,
143    ptr %1,
144    <vscale x 4 x i64> %2,
145    <vscale x 4 x i1> %3,
146    i64 %4, i64 1)
147
148  ret <vscale x 4 x i8> %a
149}
150
151declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64(
152  <vscale x 8 x i8>,
153  ptr,
154  <vscale x 8 x i64>,
155  i64);
156
157define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
158; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64:
159; CHECK:       # %bb.0: # %entry
160; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
161; CHECK-NEXT:    vloxei64.v v16, (a0), v8
162; CHECK-NEXT:    vmv.v.v v8, v16
163; CHECK-NEXT:    ret
164entry:
165  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64(
166    <vscale x 8 x i8> undef,
167    ptr %0,
168    <vscale x 8 x i64> %1,
169    i64 %2)
170
171  ret <vscale x 8 x i8> %a
172}
173
174declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64(
175  <vscale x 8 x i8>,
176  ptr,
177  <vscale x 8 x i64>,
178  <vscale x 8 x i1>,
179  i64,
180  i64);
181
182define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
183; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64:
184; CHECK:       # %bb.0: # %entry
185; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
186; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
187; CHECK-NEXT:    ret
188entry:
189  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64(
190    <vscale x 8 x i8> %0,
191    ptr %1,
192    <vscale x 8 x i64> %2,
193    <vscale x 8 x i1> %3,
194    i64 %4, i64 1)
195
196  ret <vscale x 8 x i8> %a
197}
198
199declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64(
200  <vscale x 1 x i16>,
201  ptr,
202  <vscale x 1 x i64>,
203  i64);
204
205define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
206; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64:
207; CHECK:       # %bb.0: # %entry
208; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
209; CHECK-NEXT:    vloxei64.v v9, (a0), v8
210; CHECK-NEXT:    vmv1r.v v8, v9
211; CHECK-NEXT:    ret
212entry:
213  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64(
214    <vscale x 1 x i16> undef,
215    ptr %0,
216    <vscale x 1 x i64> %1,
217    i64 %2)
218
219  ret <vscale x 1 x i16> %a
220}
221
222declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64(
223  <vscale x 1 x i16>,
224  ptr,
225  <vscale x 1 x i64>,
226  <vscale x 1 x i1>,
227  i64,
228  i64);
229
230define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
231; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64:
232; CHECK:       # %bb.0: # %entry
233; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
234; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
235; CHECK-NEXT:    ret
236entry:
237  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64(
238    <vscale x 1 x i16> %0,
239    ptr %1,
240    <vscale x 1 x i64> %2,
241    <vscale x 1 x i1> %3,
242    i64 %4, i64 1)
243
244  ret <vscale x 1 x i16> %a
245}
246
247declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64(
248  <vscale x 2 x i16>,
249  ptr,
250  <vscale x 2 x i64>,
251  i64);
252
253define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
254; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64:
255; CHECK:       # %bb.0: # %entry
256; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
257; CHECK-NEXT:    vloxei64.v v10, (a0), v8
258; CHECK-NEXT:    vmv1r.v v8, v10
259; CHECK-NEXT:    ret
260entry:
261  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64(
262    <vscale x 2 x i16> undef,
263    ptr %0,
264    <vscale x 2 x i64> %1,
265    i64 %2)
266
267  ret <vscale x 2 x i16> %a
268}
269
270declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64(
271  <vscale x 2 x i16>,
272  ptr,
273  <vscale x 2 x i64>,
274  <vscale x 2 x i1>,
275  i64,
276  i64);
277
278define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
279; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64:
280; CHECK:       # %bb.0: # %entry
281; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
282; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
283; CHECK-NEXT:    ret
284entry:
285  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64(
286    <vscale x 2 x i16> %0,
287    ptr %1,
288    <vscale x 2 x i64> %2,
289    <vscale x 2 x i1> %3,
290    i64 %4, i64 1)
291
292  ret <vscale x 2 x i16> %a
293}
294
295declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64(
296  <vscale x 4 x i16>,
297  ptr,
298  <vscale x 4 x i64>,
299  i64);
300
301define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
302; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64:
303; CHECK:       # %bb.0: # %entry
304; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
305; CHECK-NEXT:    vloxei64.v v12, (a0), v8
306; CHECK-NEXT:    vmv.v.v v8, v12
307; CHECK-NEXT:    ret
308entry:
309  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64(
310    <vscale x 4 x i16> undef,
311    ptr %0,
312    <vscale x 4 x i64> %1,
313    i64 %2)
314
315  ret <vscale x 4 x i16> %a
316}
317
318declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64(
319  <vscale x 4 x i16>,
320  ptr,
321  <vscale x 4 x i64>,
322  <vscale x 4 x i1>,
323  i64,
324  i64);
325
326define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
327; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64:
328; CHECK:       # %bb.0: # %entry
329; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
330; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
331; CHECK-NEXT:    ret
332entry:
333  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64(
334    <vscale x 4 x i16> %0,
335    ptr %1,
336    <vscale x 4 x i64> %2,
337    <vscale x 4 x i1> %3,
338    i64 %4, i64 1)
339
340  ret <vscale x 4 x i16> %a
341}
342
343declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64(
344  <vscale x 8 x i16>,
345  ptr,
346  <vscale x 8 x i64>,
347  i64);
348
349define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
350; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64:
351; CHECK:       # %bb.0: # %entry
352; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
353; CHECK-NEXT:    vloxei64.v v16, (a0), v8
354; CHECK-NEXT:    vmv.v.v v8, v16
355; CHECK-NEXT:    ret
356entry:
357  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64(
358    <vscale x 8 x i16> undef,
359    ptr %0,
360    <vscale x 8 x i64> %1,
361    i64 %2)
362
363  ret <vscale x 8 x i16> %a
364}
365
366declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64(
367  <vscale x 8 x i16>,
368  ptr,
369  <vscale x 8 x i64>,
370  <vscale x 8 x i1>,
371  i64,
372  i64);
373
374define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
375; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64:
376; CHECK:       # %bb.0: # %entry
377; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
378; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
379; CHECK-NEXT:    ret
380entry:
381  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64(
382    <vscale x 8 x i16> %0,
383    ptr %1,
384    <vscale x 8 x i64> %2,
385    <vscale x 8 x i1> %3,
386    i64 %4, i64 1)
387
388  ret <vscale x 8 x i16> %a
389}
390
391declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64(
392  <vscale x 1 x i32>,
393  ptr,
394  <vscale x 1 x i64>,
395  i64);
396
397define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
398; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64:
399; CHECK:       # %bb.0: # %entry
400; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
401; CHECK-NEXT:    vloxei64.v v9, (a0), v8
402; CHECK-NEXT:    vmv1r.v v8, v9
403; CHECK-NEXT:    ret
404entry:
405  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64(
406    <vscale x 1 x i32> undef,
407    ptr %0,
408    <vscale x 1 x i64> %1,
409    i64 %2)
410
411  ret <vscale x 1 x i32> %a
412}
413
414declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64(
415  <vscale x 1 x i32>,
416  ptr,
417  <vscale x 1 x i64>,
418  <vscale x 1 x i1>,
419  i64,
420  i64);
421
422define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
423; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64:
424; CHECK:       # %bb.0: # %entry
425; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
426; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
427; CHECK-NEXT:    ret
428entry:
429  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64(
430    <vscale x 1 x i32> %0,
431    ptr %1,
432    <vscale x 1 x i64> %2,
433    <vscale x 1 x i1> %3,
434    i64 %4, i64 1)
435
436  ret <vscale x 1 x i32> %a
437}
438
439declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64(
440  <vscale x 2 x i32>,
441  ptr,
442  <vscale x 2 x i64>,
443  i64);
444
445define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
446; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64:
447; CHECK:       # %bb.0: # %entry
448; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
449; CHECK-NEXT:    vloxei64.v v10, (a0), v8
450; CHECK-NEXT:    vmv.v.v v8, v10
451; CHECK-NEXT:    ret
452entry:
453  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64(
454    <vscale x 2 x i32> undef,
455    ptr %0,
456    <vscale x 2 x i64> %1,
457    i64 %2)
458
459  ret <vscale x 2 x i32> %a
460}
461
462declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64(
463  <vscale x 2 x i32>,
464  ptr,
465  <vscale x 2 x i64>,
466  <vscale x 2 x i1>,
467  i64,
468  i64);
469
470define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
471; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64:
472; CHECK:       # %bb.0: # %entry
473; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
474; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
475; CHECK-NEXT:    ret
476entry:
477  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64(
478    <vscale x 2 x i32> %0,
479    ptr %1,
480    <vscale x 2 x i64> %2,
481    <vscale x 2 x i1> %3,
482    i64 %4, i64 1)
483
484  ret <vscale x 2 x i32> %a
485}
486
487declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
488  <vscale x 4 x i32>,
489  ptr,
490  <vscale x 4 x i64>,
491  i64);
492
493define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
494; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64:
495; CHECK:       # %bb.0: # %entry
496; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
497; CHECK-NEXT:    vloxei64.v v12, (a0), v8
498; CHECK-NEXT:    vmv.v.v v8, v12
499; CHECK-NEXT:    ret
500entry:
501  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
502    <vscale x 4 x i32> undef,
503    ptr %0,
504    <vscale x 4 x i64> %1,
505    i64 %2)
506
507  ret <vscale x 4 x i32> %a
508}
509
510declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64(
511  <vscale x 4 x i32>,
512  ptr,
513  <vscale x 4 x i64>,
514  <vscale x 4 x i1>,
515  i64,
516  i64);
517
518define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
519; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64:
520; CHECK:       # %bb.0: # %entry
521; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
522; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
523; CHECK-NEXT:    ret
524entry:
525  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64(
526    <vscale x 4 x i32> %0,
527    ptr %1,
528    <vscale x 4 x i64> %2,
529    <vscale x 4 x i1> %3,
530    i64 %4, i64 1)
531
532  ret <vscale x 4 x i32> %a
533}
534
535declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64(
536  <vscale x 8 x i32>,
537  ptr,
538  <vscale x 8 x i64>,
539  i64);
540
541define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
542; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64:
543; CHECK:       # %bb.0: # %entry
544; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
545; CHECK-NEXT:    vloxei64.v v16, (a0), v8
546; CHECK-NEXT:    vmv.v.v v8, v16
547; CHECK-NEXT:    ret
548entry:
549  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64(
550    <vscale x 8 x i32> undef,
551    ptr %0,
552    <vscale x 8 x i64> %1,
553    i64 %2)
554
555  ret <vscale x 8 x i32> %a
556}
557
558declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64(
559  <vscale x 8 x i32>,
560  ptr,
561  <vscale x 8 x i64>,
562  <vscale x 8 x i1>,
563  i64,
564  i64);
565
566define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
567; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64:
568; CHECK:       # %bb.0: # %entry
569; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
570; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
571; CHECK-NEXT:    ret
572entry:
573  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64(
574    <vscale x 8 x i32> %0,
575    ptr %1,
576    <vscale x 8 x i64> %2,
577    <vscale x 8 x i1> %3,
578    i64 %4, i64 1)
579
580  ret <vscale x 8 x i32> %a
581}
582
583declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64(
584  <vscale x 1 x i64>,
585  ptr,
586  <vscale x 1 x i64>,
587  i64);
588
589define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
590; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64:
591; CHECK:       # %bb.0: # %entry
592; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
593; CHECK-NEXT:    vloxei64.v v8, (a0), v8
594; CHECK-NEXT:    ret
595entry:
596  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64(
597    <vscale x 1 x i64> undef,
598    ptr %0,
599    <vscale x 1 x i64> %1,
600    i64 %2)
601
602  ret <vscale x 1 x i64> %a
603}
604
605declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64(
606  <vscale x 1 x i64>,
607  ptr,
608  <vscale x 1 x i64>,
609  <vscale x 1 x i1>,
610  i64,
611  i64);
612
613define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
614; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64:
615; CHECK:       # %bb.0: # %entry
616; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
617; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
618; CHECK-NEXT:    ret
619entry:
620  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64(
621    <vscale x 1 x i64> %0,
622    ptr %1,
623    <vscale x 1 x i64> %2,
624    <vscale x 1 x i1> %3,
625    i64 %4, i64 1)
626
627  ret <vscale x 1 x i64> %a
628}
629
630declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64(
631  <vscale x 2 x i64>,
632  ptr,
633  <vscale x 2 x i64>,
634  i64);
635
636define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
637; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64:
638; CHECK:       # %bb.0: # %entry
639; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
640; CHECK-NEXT:    vloxei64.v v8, (a0), v8
641; CHECK-NEXT:    ret
642entry:
643  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64(
644    <vscale x 2 x i64> undef,
645    ptr %0,
646    <vscale x 2 x i64> %1,
647    i64 %2)
648
649  ret <vscale x 2 x i64> %a
650}
651
652declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64(
653  <vscale x 2 x i64>,
654  ptr,
655  <vscale x 2 x i64>,
656  <vscale x 2 x i1>,
657  i64,
658  i64);
659
660define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
661; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64:
662; CHECK:       # %bb.0: # %entry
663; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
664; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
665; CHECK-NEXT:    ret
666entry:
667  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64(
668    <vscale x 2 x i64> %0,
669    ptr %1,
670    <vscale x 2 x i64> %2,
671    <vscale x 2 x i1> %3,
672    i64 %4, i64 1)
673
674  ret <vscale x 2 x i64> %a
675}
676
677declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64(
678  <vscale x 4 x i64>,
679  ptr,
680  <vscale x 4 x i64>,
681  i64);
682
683define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
684; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64:
685; CHECK:       # %bb.0: # %entry
686; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
687; CHECK-NEXT:    vloxei64.v v8, (a0), v8
688; CHECK-NEXT:    ret
689entry:
690  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64(
691    <vscale x 4 x i64> undef,
692    ptr %0,
693    <vscale x 4 x i64> %1,
694    i64 %2)
695
696  ret <vscale x 4 x i64> %a
697}
698
699declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64(
700  <vscale x 4 x i64>,
701  ptr,
702  <vscale x 4 x i64>,
703  <vscale x 4 x i1>,
704  i64,
705  i64);
706
707define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
708; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64:
709; CHECK:       # %bb.0: # %entry
710; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
711; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
712; CHECK-NEXT:    ret
713entry:
714  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64(
715    <vscale x 4 x i64> %0,
716    ptr %1,
717    <vscale x 4 x i64> %2,
718    <vscale x 4 x i1> %3,
719    i64 %4, i64 1)
720
721  ret <vscale x 4 x i64> %a
722}
723
724declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64(
725  <vscale x 8 x i64>,
726  ptr,
727  <vscale x 8 x i64>,
728  i64);
729
730define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
731; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64:
732; CHECK:       # %bb.0: # %entry
733; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
734; CHECK-NEXT:    vloxei64.v v8, (a0), v8
735; CHECK-NEXT:    ret
736entry:
737  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64(
738    <vscale x 8 x i64> undef,
739    ptr %0,
740    <vscale x 8 x i64> %1,
741    i64 %2)
742
743  ret <vscale x 8 x i64> %a
744}
745
746declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64(
747  <vscale x 8 x i64>,
748  ptr,
749  <vscale x 8 x i64>,
750  <vscale x 8 x i1>,
751  i64,
752  i64);
753
754define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
755; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64:
756; CHECK:       # %bb.0: # %entry
757; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
758; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
759; CHECK-NEXT:    ret
760entry:
761  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64(
762    <vscale x 8 x i64> %0,
763    ptr %1,
764    <vscale x 8 x i64> %2,
765    <vscale x 8 x i1> %3,
766    i64 %4, i64 1)
767
768  ret <vscale x 8 x i64> %a
769}
770
771declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i64(
772  <vscale x 1 x half>,
773  ptr,
774  <vscale x 1 x i64>,
775  i64);
776
777define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
778; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64:
779; CHECK:       # %bb.0: # %entry
780; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
781; CHECK-NEXT:    vloxei64.v v9, (a0), v8
782; CHECK-NEXT:    vmv1r.v v8, v9
783; CHECK-NEXT:    ret
784entry:
785  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i64(
786    <vscale x 1 x half> undef,
787    ptr %0,
788    <vscale x 1 x i64> %1,
789    i64 %2)
790
791  ret <vscale x 1 x half> %a
792}
793
794declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64(
795  <vscale x 1 x half>,
796  ptr,
797  <vscale x 1 x i64>,
798  <vscale x 1 x i1>,
799  i64,
800  i64);
801
802define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
803; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64:
804; CHECK:       # %bb.0: # %entry
805; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
806; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
807; CHECK-NEXT:    ret
808entry:
809  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64(
810    <vscale x 1 x half> %0,
811    ptr %1,
812    <vscale x 1 x i64> %2,
813    <vscale x 1 x i1> %3,
814    i64 %4, i64 1)
815
816  ret <vscale x 1 x half> %a
817}
818
819declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i64(
820  <vscale x 2 x half>,
821  ptr,
822  <vscale x 2 x i64>,
823  i64);
824
825define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
826; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64:
827; CHECK:       # %bb.0: # %entry
828; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
829; CHECK-NEXT:    vloxei64.v v10, (a0), v8
830; CHECK-NEXT:    vmv1r.v v8, v10
831; CHECK-NEXT:    ret
832entry:
833  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i64(
834    <vscale x 2 x half> undef,
835    ptr %0,
836    <vscale x 2 x i64> %1,
837    i64 %2)
838
839  ret <vscale x 2 x half> %a
840}
841
842declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64(
843  <vscale x 2 x half>,
844  ptr,
845  <vscale x 2 x i64>,
846  <vscale x 2 x i1>,
847  i64,
848  i64);
849
850define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
851; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64:
852; CHECK:       # %bb.0: # %entry
853; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
854; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
855; CHECK-NEXT:    ret
856entry:
857  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64(
858    <vscale x 2 x half> %0,
859    ptr %1,
860    <vscale x 2 x i64> %2,
861    <vscale x 2 x i1> %3,
862    i64 %4, i64 1)
863
864  ret <vscale x 2 x half> %a
865}
866
867declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i64(
868  <vscale x 4 x half>,
869  ptr,
870  <vscale x 4 x i64>,
871  i64);
872
873define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
874; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64:
875; CHECK:       # %bb.0: # %entry
876; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
877; CHECK-NEXT:    vloxei64.v v12, (a0), v8
878; CHECK-NEXT:    vmv.v.v v8, v12
879; CHECK-NEXT:    ret
880entry:
881  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i64(
882    <vscale x 4 x half> undef,
883    ptr %0,
884    <vscale x 4 x i64> %1,
885    i64 %2)
886
887  ret <vscale x 4 x half> %a
888}
889
890declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64(
891  <vscale x 4 x half>,
892  ptr,
893  <vscale x 4 x i64>,
894  <vscale x 4 x i1>,
895  i64,
896  i64);
897
898define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
899; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64:
900; CHECK:       # %bb.0: # %entry
901; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
902; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
903; CHECK-NEXT:    ret
904entry:
905  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64(
906    <vscale x 4 x half> %0,
907    ptr %1,
908    <vscale x 4 x i64> %2,
909    <vscale x 4 x i1> %3,
910    i64 %4, i64 1)
911
912  ret <vscale x 4 x half> %a
913}
914
915declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i64(
916  <vscale x 8 x half>,
917  ptr,
918  <vscale x 8 x i64>,
919  i64);
920
921define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
922; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64:
923; CHECK:       # %bb.0: # %entry
924; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
925; CHECK-NEXT:    vloxei64.v v16, (a0), v8
926; CHECK-NEXT:    vmv.v.v v8, v16
927; CHECK-NEXT:    ret
928entry:
929  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i64(
930    <vscale x 8 x half> undef,
931    ptr %0,
932    <vscale x 8 x i64> %1,
933    i64 %2)
934
935  ret <vscale x 8 x half> %a
936}
937
938declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64(
939  <vscale x 8 x half>,
940  ptr,
941  <vscale x 8 x i64>,
942  <vscale x 8 x i1>,
943  i64,
944  i64);
945
946define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
947; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64:
948; CHECK:       # %bb.0: # %entry
949; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
950; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
951; CHECK-NEXT:    ret
952entry:
953  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64(
954    <vscale x 8 x half> %0,
955    ptr %1,
956    <vscale x 8 x i64> %2,
957    <vscale x 8 x i1> %3,
958    i64 %4, i64 1)
959
960  ret <vscale x 8 x half> %a
961}
962
963declare <vscale x 1 x bfloat> @llvm.riscv.vloxei.nxv1bf16.nxv1i64(
964  <vscale x 1 x bfloat>,
965  ptr,
966  <vscale x 1 x i64>,
967  i64);
968
969define <vscale x 1 x bfloat> @intrinsic_vloxei_v_nxv1bf16_nxv1bf16_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
970; CHECK-LABEL: intrinsic_vloxei_v_nxv1bf16_nxv1bf16_nxv1i64:
971; CHECK:       # %bb.0: # %entry
972; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
973; CHECK-NEXT:    vloxei64.v v9, (a0), v8
974; CHECK-NEXT:    vmv1r.v v8, v9
975; CHECK-NEXT:    ret
976entry:
977  %a = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.nxv1bf16.nxv1i64(
978    <vscale x 1 x bfloat> undef,
979    ptr %0,
980    <vscale x 1 x i64> %1,
981    i64 %2)
982
983  ret <vscale x 1 x bfloat> %a
984}
985
986declare <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.nxv1i64(
987  <vscale x 1 x bfloat>,
988  ptr,
989  <vscale x 1 x i64>,
990  <vscale x 1 x i1>,
991  i64,
992  i64);
993
994define <vscale x 1 x bfloat> @intrinsic_vloxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
995; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1bf16_nxv1bf16_nxv1i64:
996; CHECK:       # %bb.0: # %entry
997; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
998; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
999; CHECK-NEXT:    ret
1000entry:
1001  %a = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.nxv1i64(
1002    <vscale x 1 x bfloat> %0,
1003    ptr %1,
1004    <vscale x 1 x i64> %2,
1005    <vscale x 1 x i1> %3,
1006    i64 %4, i64 1)
1007
1008  ret <vscale x 1 x bfloat> %a
1009}
1010
1011declare <vscale x 2 x bfloat> @llvm.riscv.vloxei.nxv2bf16.nxv2i64(
1012  <vscale x 2 x bfloat>,
1013  ptr,
1014  <vscale x 2 x i64>,
1015  i64);
1016
1017define <vscale x 2 x bfloat> @intrinsic_vloxei_v_nxv2bf16_nxv2bf16_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
1018; CHECK-LABEL: intrinsic_vloxei_v_nxv2bf16_nxv2bf16_nxv2i64:
1019; CHECK:       # %bb.0: # %entry
1020; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1021; CHECK-NEXT:    vloxei64.v v10, (a0), v8
1022; CHECK-NEXT:    vmv1r.v v8, v10
1023; CHECK-NEXT:    ret
1024entry:
1025  %a = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.nxv2bf16.nxv2i64(
1026    <vscale x 2 x bfloat> undef,
1027    ptr %0,
1028    <vscale x 2 x i64> %1,
1029    i64 %2)
1030
1031  ret <vscale x 2 x bfloat> %a
1032}
1033
1034declare <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.nxv2i64(
1035  <vscale x 2 x bfloat>,
1036  ptr,
1037  <vscale x 2 x i64>,
1038  <vscale x 2 x i1>,
1039  i64,
1040  i64);
1041
1042define <vscale x 2 x bfloat> @intrinsic_vloxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1043; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2bf16_nxv2bf16_nxv2i64:
1044; CHECK:       # %bb.0: # %entry
1045; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1046; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
1047; CHECK-NEXT:    ret
1048entry:
1049  %a = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.nxv2i64(
1050    <vscale x 2 x bfloat> %0,
1051    ptr %1,
1052    <vscale x 2 x i64> %2,
1053    <vscale x 2 x i1> %3,
1054    i64 %4, i64 1)
1055
1056  ret <vscale x 2 x bfloat> %a
1057}
1058
1059declare <vscale x 4 x bfloat> @llvm.riscv.vloxei.nxv4bf16.nxv4i64(
1060  <vscale x 4 x bfloat>,
1061  ptr,
1062  <vscale x 4 x i64>,
1063  i64);
1064
1065define <vscale x 4 x bfloat> @intrinsic_vloxei_v_nxv4bf16_nxv4bf16_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
1066; CHECK-LABEL: intrinsic_vloxei_v_nxv4bf16_nxv4bf16_nxv4i64:
1067; CHECK:       # %bb.0: # %entry
1068; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1069; CHECK-NEXT:    vloxei64.v v12, (a0), v8
1070; CHECK-NEXT:    vmv.v.v v8, v12
1071; CHECK-NEXT:    ret
1072entry:
1073  %a = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.nxv4bf16.nxv4i64(
1074    <vscale x 4 x bfloat> undef,
1075    ptr %0,
1076    <vscale x 4 x i64> %1,
1077    i64 %2)
1078
1079  ret <vscale x 4 x bfloat> %a
1080}
1081
1082declare <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.nxv4i64(
1083  <vscale x 4 x bfloat>,
1084  ptr,
1085  <vscale x 4 x i64>,
1086  <vscale x 4 x i1>,
1087  i64,
1088  i64);
1089
1090define <vscale x 4 x bfloat> @intrinsic_vloxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1091; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4bf16_nxv4bf16_nxv4i64:
1092; CHECK:       # %bb.0: # %entry
1093; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1094; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
1095; CHECK-NEXT:    ret
1096entry:
1097  %a = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.nxv4i64(
1098    <vscale x 4 x bfloat> %0,
1099    ptr %1,
1100    <vscale x 4 x i64> %2,
1101    <vscale x 4 x i1> %3,
1102    i64 %4, i64 1)
1103
1104  ret <vscale x 4 x bfloat> %a
1105}
1106
1107declare <vscale x 8 x bfloat> @llvm.riscv.vloxei.nxv8bf16.nxv8i64(
1108  <vscale x 8 x bfloat>,
1109  ptr,
1110  <vscale x 8 x i64>,
1111  i64);
1112
1113define <vscale x 8 x bfloat> @intrinsic_vloxei_v_nxv8bf16_nxv8bf16_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
1114; CHECK-LABEL: intrinsic_vloxei_v_nxv8bf16_nxv8bf16_nxv8i64:
1115; CHECK:       # %bb.0: # %entry
1116; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1117; CHECK-NEXT:    vloxei64.v v16, (a0), v8
1118; CHECK-NEXT:    vmv.v.v v8, v16
1119; CHECK-NEXT:    ret
1120entry:
1121  %a = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.nxv8bf16.nxv8i64(
1122    <vscale x 8 x bfloat> undef,
1123    ptr %0,
1124    <vscale x 8 x i64> %1,
1125    i64 %2)
1126
1127  ret <vscale x 8 x bfloat> %a
1128}
1129
1130declare <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.nxv8i64(
1131  <vscale x 8 x bfloat>,
1132  ptr,
1133  <vscale x 8 x i64>,
1134  <vscale x 8 x i1>,
1135  i64,
1136  i64);
1137
1138define <vscale x 8 x bfloat> @intrinsic_vloxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1139; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8bf16_nxv8bf16_nxv8i64:
1140; CHECK:       # %bb.0: # %entry
1141; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1142; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
1143; CHECK-NEXT:    ret
1144entry:
1145  %a = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.nxv8i64(
1146    <vscale x 8 x bfloat> %0,
1147    ptr %1,
1148    <vscale x 8 x i64> %2,
1149    <vscale x 8 x i1> %3,
1150    i64 %4, i64 1)
1151
1152  ret <vscale x 8 x bfloat> %a
1153}
1154
1155declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64(
1156  <vscale x 1 x float>,
1157  ptr,
1158  <vscale x 1 x i64>,
1159  i64);
1160
1161define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
1162; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64:
1163; CHECK:       # %bb.0: # %entry
1164; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1165; CHECK-NEXT:    vloxei64.v v9, (a0), v8
1166; CHECK-NEXT:    vmv1r.v v8, v9
1167; CHECK-NEXT:    ret
1168entry:
1169  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64(
1170    <vscale x 1 x float> undef,
1171    ptr %0,
1172    <vscale x 1 x i64> %1,
1173    i64 %2)
1174
1175  ret <vscale x 1 x float> %a
1176}
1177
1178declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64(
1179  <vscale x 1 x float>,
1180  ptr,
1181  <vscale x 1 x i64>,
1182  <vscale x 1 x i1>,
1183  i64,
1184  i64);
1185
1186define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1187; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64:
1188; CHECK:       # %bb.0: # %entry
1189; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
1190; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
1191; CHECK-NEXT:    ret
1192entry:
1193  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64(
1194    <vscale x 1 x float> %0,
1195    ptr %1,
1196    <vscale x 1 x i64> %2,
1197    <vscale x 1 x i1> %3,
1198    i64 %4, i64 1)
1199
1200  ret <vscale x 1 x float> %a
1201}
1202
1203declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64(
1204  <vscale x 2 x float>,
1205  ptr,
1206  <vscale x 2 x i64>,
1207  i64);
1208
1209define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
1210; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64:
1211; CHECK:       # %bb.0: # %entry
1212; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1213; CHECK-NEXT:    vloxei64.v v10, (a0), v8
1214; CHECK-NEXT:    vmv.v.v v8, v10
1215; CHECK-NEXT:    ret
1216entry:
1217  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64(
1218    <vscale x 2 x float> undef,
1219    ptr %0,
1220    <vscale x 2 x i64> %1,
1221    i64 %2)
1222
1223  ret <vscale x 2 x float> %a
1224}
1225
1226declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64(
1227  <vscale x 2 x float>,
1228  ptr,
1229  <vscale x 2 x i64>,
1230  <vscale x 2 x i1>,
1231  i64,
1232  i64);
1233
1234define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1235; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64:
1236; CHECK:       # %bb.0: # %entry
1237; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1238; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
1239; CHECK-NEXT:    ret
1240entry:
1241  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64(
1242    <vscale x 2 x float> %0,
1243    ptr %1,
1244    <vscale x 2 x i64> %2,
1245    <vscale x 2 x i1> %3,
1246    i64 %4, i64 1)
1247
1248  ret <vscale x 2 x float> %a
1249}
1250
1251declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64(
1252  <vscale x 4 x float>,
1253  ptr,
1254  <vscale x 4 x i64>,
1255  i64);
1256
1257define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
1258; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64:
1259; CHECK:       # %bb.0: # %entry
1260; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1261; CHECK-NEXT:    vloxei64.v v12, (a0), v8
1262; CHECK-NEXT:    vmv.v.v v8, v12
1263; CHECK-NEXT:    ret
1264entry:
1265  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64(
1266    <vscale x 4 x float> undef,
1267    ptr %0,
1268    <vscale x 4 x i64> %1,
1269    i64 %2)
1270
1271  ret <vscale x 4 x float> %a
1272}
1273
1274declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64(
1275  <vscale x 4 x float>,
1276  ptr,
1277  <vscale x 4 x i64>,
1278  <vscale x 4 x i1>,
1279  i64,
1280  i64);
1281
1282define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1283; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64:
1284; CHECK:       # %bb.0: # %entry
1285; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1286; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
1287; CHECK-NEXT:    ret
1288entry:
1289  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64(
1290    <vscale x 4 x float> %0,
1291    ptr %1,
1292    <vscale x 4 x i64> %2,
1293    <vscale x 4 x i1> %3,
1294    i64 %4, i64 1)
1295
1296  ret <vscale x 4 x float> %a
1297}
1298
1299declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64(
1300  <vscale x 8 x float>,
1301  ptr,
1302  <vscale x 8 x i64>,
1303  i64);
1304
1305define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
1306; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64:
1307; CHECK:       # %bb.0: # %entry
1308; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
1309; CHECK-NEXT:    vloxei64.v v16, (a0), v8
1310; CHECK-NEXT:    vmv.v.v v8, v16
1311; CHECK-NEXT:    ret
1312entry:
1313  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64(
1314    <vscale x 8 x float> undef,
1315    ptr %0,
1316    <vscale x 8 x i64> %1,
1317    i64 %2)
1318
1319  ret <vscale x 8 x float> %a
1320}
1321
1322declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64(
1323  <vscale x 8 x float>,
1324  ptr,
1325  <vscale x 8 x i64>,
1326  <vscale x 8 x i1>,
1327  i64,
1328  i64);
1329
1330define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1331; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64:
1332; CHECK:       # %bb.0: # %entry
1333; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1334; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
1335; CHECK-NEXT:    ret
1336entry:
1337  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64(
1338    <vscale x 8 x float> %0,
1339    ptr %1,
1340    <vscale x 8 x i64> %2,
1341    <vscale x 8 x i1> %3,
1342    i64 %4, i64 1)
1343
1344  ret <vscale x 8 x float> %a
1345}
1346
1347declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64(
1348  <vscale x 1 x double>,
1349  ptr,
1350  <vscale x 1 x i64>,
1351  i64);
1352
1353define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
1354; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64:
1355; CHECK:       # %bb.0: # %entry
1356; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1357; CHECK-NEXT:    vloxei64.v v8, (a0), v8
1358; CHECK-NEXT:    ret
1359entry:
1360  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64(
1361    <vscale x 1 x double> undef,
1362    ptr %0,
1363    <vscale x 1 x i64> %1,
1364    i64 %2)
1365
1366  ret <vscale x 1 x double> %a
1367}
1368
1369declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64(
1370  <vscale x 1 x double>,
1371  ptr,
1372  <vscale x 1 x i64>,
1373  <vscale x 1 x i1>,
1374  i64,
1375  i64);
1376
1377define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1378; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64:
1379; CHECK:       # %bb.0: # %entry
1380; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
1381; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
1382; CHECK-NEXT:    ret
1383entry:
1384  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64(
1385    <vscale x 1 x double> %0,
1386    ptr %1,
1387    <vscale x 1 x i64> %2,
1388    <vscale x 1 x i1> %3,
1389    i64 %4, i64 1)
1390
1391  ret <vscale x 1 x double> %a
1392}
1393
1394declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64(
1395  <vscale x 2 x double>,
1396  ptr,
1397  <vscale x 2 x i64>,
1398  i64);
1399
1400define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
1401; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64:
1402; CHECK:       # %bb.0: # %entry
1403; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1404; CHECK-NEXT:    vloxei64.v v8, (a0), v8
1405; CHECK-NEXT:    ret
1406entry:
1407  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64(
1408    <vscale x 2 x double> undef,
1409    ptr %0,
1410    <vscale x 2 x i64> %1,
1411    i64 %2)
1412
1413  ret <vscale x 2 x double> %a
1414}
1415
1416declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64(
1417  <vscale x 2 x double>,
1418  ptr,
1419  <vscale x 2 x i64>,
1420  <vscale x 2 x i1>,
1421  i64,
1422  i64);
1423
1424define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1425; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64:
1426; CHECK:       # %bb.0: # %entry
1427; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
1428; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
1429; CHECK-NEXT:    ret
1430entry:
1431  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64(
1432    <vscale x 2 x double> %0,
1433    ptr %1,
1434    <vscale x 2 x i64> %2,
1435    <vscale x 2 x i1> %3,
1436    i64 %4, i64 1)
1437
1438  ret <vscale x 2 x double> %a
1439}
1440
1441declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64(
1442  <vscale x 4 x double>,
1443  ptr,
1444  <vscale x 4 x i64>,
1445  i64);
1446
1447define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
1448; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64:
1449; CHECK:       # %bb.0: # %entry
1450; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1451; CHECK-NEXT:    vloxei64.v v8, (a0), v8
1452; CHECK-NEXT:    ret
1453entry:
1454  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64(
1455    <vscale x 4 x double> undef,
1456    ptr %0,
1457    <vscale x 4 x i64> %1,
1458    i64 %2)
1459
1460  ret <vscale x 4 x double> %a
1461}
1462
1463declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64(
1464  <vscale x 4 x double>,
1465  ptr,
1466  <vscale x 4 x i64>,
1467  <vscale x 4 x i1>,
1468  i64,
1469  i64);
1470
1471define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1472; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64:
1473; CHECK:       # %bb.0: # %entry
1474; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
1475; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
1476; CHECK-NEXT:    ret
1477entry:
1478  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64(
1479    <vscale x 4 x double> %0,
1480    ptr %1,
1481    <vscale x 4 x i64> %2,
1482    <vscale x 4 x i1> %3,
1483    i64 %4, i64 1)
1484
1485  ret <vscale x 4 x double> %a
1486}
1487
1488declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64(
1489  <vscale x 8 x double>,
1490  ptr,
1491  <vscale x 8 x i64>,
1492  i64);
1493
1494define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
1495; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64:
1496; CHECK:       # %bb.0: # %entry
1497; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1498; CHECK-NEXT:    vloxei64.v v8, (a0), v8
1499; CHECK-NEXT:    ret
1500entry:
1501  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64(
1502    <vscale x 8 x double> undef,
1503    ptr %0,
1504    <vscale x 8 x i64> %1,
1505    i64 %2)
1506
1507  ret <vscale x 8 x double> %a
1508}
1509
1510declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64(
1511  <vscale x 8 x double>,
1512  ptr,
1513  <vscale x 8 x i64>,
1514  <vscale x 8 x i1>,
1515  i64,
1516  i64);
1517
1518define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1519; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64:
1520; CHECK:       # %bb.0: # %entry
1521; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
1522; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
1523; CHECK-NEXT:    ret
1524entry:
1525  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64(
1526    <vscale x 8 x double> %0,
1527    ptr %1,
1528    <vscale x 8 x i64> %2,
1529    <vscale x 8 x i1> %3,
1530    i64 %4, i64 1)
1531
1532  ret <vscale x 8 x double> %a
1533}
1534