xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vrgather.ll (revision 6a59deafde742e30daf3bf886f98afc37f00d75b)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+d,+zfhmin,+zvfh,+zvfbfmin \
3; RUN:   -verify-machineinstrs | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+d,+zfhmin,+zvfh,+zvfbfmin \
5; RUN:   -verify-machineinstrs | FileCheck %s
6
7declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.iXLen(
8  <vscale x 1 x i8>,
9  <vscale x 1 x i8>,
10  <vscale x 1 x i8>,
11  iXLen)
12
13define <vscale x 1 x i8> @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
14; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
17; CHECK-NEXT:    vrgather.vv v10, v8, v9
18; CHECK-NEXT:    vmv1r.v v8, v10
19; CHECK-NEXT:    ret
20entry:
21  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.nxv1i8.iXLen(
22    <vscale x 1 x i8> undef,
23    <vscale x 1 x i8> %0,
24    <vscale x 1 x i8> %1,
25    iXLen %2)
26
27  ret <vscale x 1 x i8> %a
28}
29
30declare <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen(
31  <vscale x 1 x i8>,
32  <vscale x 1 x i8>,
33  <vscale x 1 x i8>,
34  <vscale x 1 x i1>,
35  iXLen,
36  iXLen)
37
38define <vscale x 1 x i8> @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
39; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8:
40; CHECK:       # %bb.0: # %entry
41; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
42; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
43; CHECK-NEXT:    ret
44entry:
45  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen(
46    <vscale x 1 x i8> %0,
47    <vscale x 1 x i8> %1,
48    <vscale x 1 x i8> %2,
49    <vscale x 1 x i1> %3,
50    iXLen %4, iXLen 1)
51
52  ret <vscale x 1 x i8> %a
53}
54
55declare <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.iXLen(
56  <vscale x 2 x i8>,
57  <vscale x 2 x i8>,
58  <vscale x 2 x i8>,
59  iXLen)
60
61define <vscale x 2 x i8> @intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
62; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8:
63; CHECK:       # %bb.0: # %entry
64; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
65; CHECK-NEXT:    vrgather.vv v10, v8, v9
66; CHECK-NEXT:    vmv1r.v v8, v10
67; CHECK-NEXT:    ret
68entry:
69  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.nxv2i8.iXLen(
70    <vscale x 2 x i8> undef,
71    <vscale x 2 x i8> %0,
72    <vscale x 2 x i8> %1,
73    iXLen %2)
74
75  ret <vscale x 2 x i8> %a
76}
77
78declare <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.iXLen(
79  <vscale x 2 x i8>,
80  <vscale x 2 x i8>,
81  <vscale x 2 x i8>,
82  <vscale x 2 x i1>,
83  iXLen,
84  iXLen)
85
86define <vscale x 2 x i8> @intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
87; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8:
88; CHECK:       # %bb.0: # %entry
89; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
90; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
91; CHECK-NEXT:    ret
92entry:
93  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vv.mask.nxv2i8.iXLen(
94    <vscale x 2 x i8> %0,
95    <vscale x 2 x i8> %1,
96    <vscale x 2 x i8> %2,
97    <vscale x 2 x i1> %3,
98    iXLen %4, iXLen 1)
99
100  ret <vscale x 2 x i8> %a
101}
102
103declare <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.iXLen(
104  <vscale x 4 x i8>,
105  <vscale x 4 x i8>,
106  <vscale x 4 x i8>,
107  iXLen)
108
109define <vscale x 4 x i8> @intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
110; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8:
111; CHECK:       # %bb.0: # %entry
112; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
113; CHECK-NEXT:    vrgather.vv v10, v8, v9
114; CHECK-NEXT:    vmv1r.v v8, v10
115; CHECK-NEXT:    ret
116entry:
117  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.nxv4i8.iXLen(
118    <vscale x 4 x i8> undef,
119    <vscale x 4 x i8> %0,
120    <vscale x 4 x i8> %1,
121    iXLen %2)
122
123  ret <vscale x 4 x i8> %a
124}
125
126declare <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.iXLen(
127  <vscale x 4 x i8>,
128  <vscale x 4 x i8>,
129  <vscale x 4 x i8>,
130  <vscale x 4 x i1>,
131  iXLen,
132  iXLen)
133
134define <vscale x 4 x i8> @intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
135; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8:
136; CHECK:       # %bb.0: # %entry
137; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
138; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
139; CHECK-NEXT:    ret
140entry:
141  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vv.mask.nxv4i8.iXLen(
142    <vscale x 4 x i8> %0,
143    <vscale x 4 x i8> %1,
144    <vscale x 4 x i8> %2,
145    <vscale x 4 x i1> %3,
146    iXLen %4, iXLen 1)
147
148  ret <vscale x 4 x i8> %a
149}
150
151declare <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.iXLen(
152  <vscale x 8 x i8>,
153  <vscale x 8 x i8>,
154  <vscale x 8 x i8>,
155  iXLen)
156
157define <vscale x 8 x i8> @intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
158; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8:
159; CHECK:       # %bb.0: # %entry
160; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
161; CHECK-NEXT:    vrgather.vv v10, v8, v9
162; CHECK-NEXT:    vmv.v.v v8, v10
163; CHECK-NEXT:    ret
164entry:
165  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.nxv8i8.iXLen(
166    <vscale x 8 x i8> undef,
167    <vscale x 8 x i8> %0,
168    <vscale x 8 x i8> %1,
169    iXLen %2)
170
171  ret <vscale x 8 x i8> %a
172}
173
174declare <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.iXLen(
175  <vscale x 8 x i8>,
176  <vscale x 8 x i8>,
177  <vscale x 8 x i8>,
178  <vscale x 8 x i1>,
179  iXLen,
180  iXLen)
181
182define <vscale x 8 x i8> @intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
183; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8:
184; CHECK:       # %bb.0: # %entry
185; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
186; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
187; CHECK-NEXT:    ret
188entry:
189  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vv.mask.nxv8i8.iXLen(
190    <vscale x 8 x i8> %0,
191    <vscale x 8 x i8> %1,
192    <vscale x 8 x i8> %2,
193    <vscale x 8 x i1> %3,
194    iXLen %4, iXLen 1)
195
196  ret <vscale x 8 x i8> %a
197}
198
199declare <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.iXLen(
200  <vscale x 16 x i8>,
201  <vscale x 16 x i8>,
202  <vscale x 16 x i8>,
203  iXLen)
204
205define <vscale x 16 x i8> @intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
206; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8:
207; CHECK:       # %bb.0: # %entry
208; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
209; CHECK-NEXT:    vrgather.vv v12, v8, v10
210; CHECK-NEXT:    vmv.v.v v8, v12
211; CHECK-NEXT:    ret
212entry:
213  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.nxv16i8.iXLen(
214    <vscale x 16 x i8> undef,
215    <vscale x 16 x i8> %0,
216    <vscale x 16 x i8> %1,
217    iXLen %2)
218
219  ret <vscale x 16 x i8> %a
220}
221
222declare <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.iXLen(
223  <vscale x 16 x i8>,
224  <vscale x 16 x i8>,
225  <vscale x 16 x i8>,
226  <vscale x 16 x i1>,
227  iXLen,
228  iXLen)
229
230define <vscale x 16 x i8> @intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
231; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8:
232; CHECK:       # %bb.0: # %entry
233; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
234; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
235; CHECK-NEXT:    ret
236entry:
237  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vv.mask.nxv16i8.iXLen(
238    <vscale x 16 x i8> %0,
239    <vscale x 16 x i8> %1,
240    <vscale x 16 x i8> %2,
241    <vscale x 16 x i1> %3,
242    iXLen %4, iXLen 1)
243
244  ret <vscale x 16 x i8> %a
245}
246
247declare <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.iXLen(
248  <vscale x 32 x i8>,
249  <vscale x 32 x i8>,
250  <vscale x 32 x i8>,
251  iXLen)
252
253define <vscale x 32 x i8> @intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
254; CHECK-LABEL: intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8:
255; CHECK:       # %bb.0: # %entry
256; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
257; CHECK-NEXT:    vrgather.vv v16, v8, v12
258; CHECK-NEXT:    vmv.v.v v8, v16
259; CHECK-NEXT:    ret
260entry:
261  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.nxv32i8.iXLen(
262    <vscale x 32 x i8> undef,
263    <vscale x 32 x i8> %0,
264    <vscale x 32 x i8> %1,
265    iXLen %2)
266
267  ret <vscale x 32 x i8> %a
268}
269
270declare <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.iXLen(
271  <vscale x 32 x i8>,
272  <vscale x 32 x i8>,
273  <vscale x 32 x i8>,
274  <vscale x 32 x i1>,
275  iXLen,
276  iXLen)
277
278define <vscale x 32 x i8> @intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
279; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8:
280; CHECK:       # %bb.0: # %entry
281; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
282; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
283; CHECK-NEXT:    ret
284entry:
285  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vv.mask.nxv32i8.iXLen(
286    <vscale x 32 x i8> %0,
287    <vscale x 32 x i8> %1,
288    <vscale x 32 x i8> %2,
289    <vscale x 32 x i1> %3,
290    iXLen %4, iXLen 1)
291
292  ret <vscale x 32 x i8> %a
293}
294
295declare <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.iXLen(
296  <vscale x 64 x i8>,
297  <vscale x 64 x i8>,
298  <vscale x 64 x i8>,
299  iXLen)
300
301define <vscale x 64 x i8> @intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
302; CHECK-LABEL: intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8:
303; CHECK:       # %bb.0: # %entry
304; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
305; CHECK-NEXT:    vrgather.vv v24, v8, v16
306; CHECK-NEXT:    vmv.v.v v8, v24
307; CHECK-NEXT:    ret
308entry:
309  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.nxv64i8.iXLen(
310    <vscale x 64 x i8> undef,
311    <vscale x 64 x i8> %0,
312    <vscale x 64 x i8> %1,
313    iXLen %2)
314
315  ret <vscale x 64 x i8> %a
316}
317
318declare <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.iXLen(
319  <vscale x 64 x i8>,
320  <vscale x 64 x i8>,
321  <vscale x 64 x i8>,
322  <vscale x 64 x i1>,
323  iXLen,
324  iXLen)
325
326define <vscale x 64 x i8> @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
327; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8:
328; CHECK:       # %bb.0: # %entry
329; CHECK-NEXT:    vl8r.v v24, (a0)
330; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
331; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
332; CHECK-NEXT:    ret
333entry:
334  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vv.mask.nxv64i8.iXLen(
335    <vscale x 64 x i8> %0,
336    <vscale x 64 x i8> %1,
337    <vscale x 64 x i8> %2,
338    <vscale x 64 x i1> %3,
339    iXLen %4, iXLen 1)
340
341  ret <vscale x 64 x i8> %a
342}
343
344declare <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.iXLen(
345  <vscale x 1 x i16>,
346  <vscale x 1 x i16>,
347  <vscale x 1 x i16>,
348  iXLen)
349
350define <vscale x 1 x i16> @intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
351; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16:
352; CHECK:       # %bb.0: # %entry
353; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
354; CHECK-NEXT:    vrgather.vv v10, v8, v9
355; CHECK-NEXT:    vmv1r.v v8, v10
356; CHECK-NEXT:    ret
357entry:
358  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.nxv1i16.iXLen(
359    <vscale x 1 x i16> undef,
360    <vscale x 1 x i16> %0,
361    <vscale x 1 x i16> %1,
362    iXLen %2)
363
364  ret <vscale x 1 x i16> %a
365}
366
367declare <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.iXLen(
368  <vscale x 1 x i16>,
369  <vscale x 1 x i16>,
370  <vscale x 1 x i16>,
371  <vscale x 1 x i1>,
372  iXLen,
373  iXLen)
374
375define <vscale x 1 x i16> @intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
376; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16:
377; CHECK:       # %bb.0: # %entry
378; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
379; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
380; CHECK-NEXT:    ret
381entry:
382  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vv.mask.nxv1i16.iXLen(
383    <vscale x 1 x i16> %0,
384    <vscale x 1 x i16> %1,
385    <vscale x 1 x i16> %2,
386    <vscale x 1 x i1> %3,
387    iXLen %4, iXLen 1)
388
389  ret <vscale x 1 x i16> %a
390}
391
392declare <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.iXLen(
393  <vscale x 2 x i16>,
394  <vscale x 2 x i16>,
395  <vscale x 2 x i16>,
396  iXLen)
397
398define <vscale x 2 x i16> @intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
399; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16:
400; CHECK:       # %bb.0: # %entry
401; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
402; CHECK-NEXT:    vrgather.vv v10, v8, v9
403; CHECK-NEXT:    vmv1r.v v8, v10
404; CHECK-NEXT:    ret
405entry:
406  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.nxv2i16.iXLen(
407    <vscale x 2 x i16> undef,
408    <vscale x 2 x i16> %0,
409    <vscale x 2 x i16> %1,
410    iXLen %2)
411
412  ret <vscale x 2 x i16> %a
413}
414
415declare <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.iXLen(
416  <vscale x 2 x i16>,
417  <vscale x 2 x i16>,
418  <vscale x 2 x i16>,
419  <vscale x 2 x i1>,
420  iXLen,
421  iXLen)
422
423define <vscale x 2 x i16> @intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
424; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16:
425; CHECK:       # %bb.0: # %entry
426; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
427; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
428; CHECK-NEXT:    ret
429entry:
430  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vv.mask.nxv2i16.iXLen(
431    <vscale x 2 x i16> %0,
432    <vscale x 2 x i16> %1,
433    <vscale x 2 x i16> %2,
434    <vscale x 2 x i1> %3,
435    iXLen %4, iXLen 1)
436
437  ret <vscale x 2 x i16> %a
438}
439
440declare <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.iXLen(
441  <vscale x 4 x i16>,
442  <vscale x 4 x i16>,
443  <vscale x 4 x i16>,
444  iXLen)
445
446define <vscale x 4 x i16> @intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
447; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16:
448; CHECK:       # %bb.0: # %entry
449; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
450; CHECK-NEXT:    vrgather.vv v10, v8, v9
451; CHECK-NEXT:    vmv.v.v v8, v10
452; CHECK-NEXT:    ret
453entry:
454  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.nxv4i16.iXLen(
455    <vscale x 4 x i16> undef,
456    <vscale x 4 x i16> %0,
457    <vscale x 4 x i16> %1,
458    iXLen %2)
459
460  ret <vscale x 4 x i16> %a
461}
462
463declare <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.iXLen(
464  <vscale x 4 x i16>,
465  <vscale x 4 x i16>,
466  <vscale x 4 x i16>,
467  <vscale x 4 x i1>,
468  iXLen,
469  iXLen)
470
471define <vscale x 4 x i16> @intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
472; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16:
473; CHECK:       # %bb.0: # %entry
474; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
475; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
476; CHECK-NEXT:    ret
477entry:
478  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vv.mask.nxv4i16.iXLen(
479    <vscale x 4 x i16> %0,
480    <vscale x 4 x i16> %1,
481    <vscale x 4 x i16> %2,
482    <vscale x 4 x i1> %3,
483    iXLen %4, iXLen 1)
484
485  ret <vscale x 4 x i16> %a
486}
487
488declare <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.iXLen(
489  <vscale x 8 x i16>,
490  <vscale x 8 x i16>,
491  <vscale x 8 x i16>,
492  iXLen)
493
494define <vscale x 8 x i16> @intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
495; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16:
496; CHECK:       # %bb.0: # %entry
497; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
498; CHECK-NEXT:    vrgather.vv v12, v8, v10
499; CHECK-NEXT:    vmv.v.v v8, v12
500; CHECK-NEXT:    ret
501entry:
502  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.nxv8i16.iXLen(
503    <vscale x 8 x i16> undef,
504    <vscale x 8 x i16> %0,
505    <vscale x 8 x i16> %1,
506    iXLen %2)
507
508  ret <vscale x 8 x i16> %a
509}
510
511declare <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.iXLen(
512  <vscale x 8 x i16>,
513  <vscale x 8 x i16>,
514  <vscale x 8 x i16>,
515  <vscale x 8 x i1>,
516  iXLen,
517  iXLen)
518
519define <vscale x 8 x i16> @intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
520; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16:
521; CHECK:       # %bb.0: # %entry
522; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
523; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
524; CHECK-NEXT:    ret
525entry:
526  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vv.mask.nxv8i16.iXLen(
527    <vscale x 8 x i16> %0,
528    <vscale x 8 x i16> %1,
529    <vscale x 8 x i16> %2,
530    <vscale x 8 x i1> %3,
531    iXLen %4, iXLen 1)
532
533  ret <vscale x 8 x i16> %a
534}
535
536declare <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.iXLen(
537  <vscale x 16 x i16>,
538  <vscale x 16 x i16>,
539  <vscale x 16 x i16>,
540  iXLen)
541
542define <vscale x 16 x i16> @intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
543; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16:
544; CHECK:       # %bb.0: # %entry
545; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
546; CHECK-NEXT:    vrgather.vv v16, v8, v12
547; CHECK-NEXT:    vmv.v.v v8, v16
548; CHECK-NEXT:    ret
549entry:
550  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.nxv16i16.iXLen(
551    <vscale x 16 x i16> undef,
552    <vscale x 16 x i16> %0,
553    <vscale x 16 x i16> %1,
554    iXLen %2)
555
556  ret <vscale x 16 x i16> %a
557}
558
559declare <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.iXLen(
560  <vscale x 16 x i16>,
561  <vscale x 16 x i16>,
562  <vscale x 16 x i16>,
563  <vscale x 16 x i1>,
564  iXLen,
565  iXLen)
566
567define <vscale x 16 x i16> @intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
568; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i16:
569; CHECK:       # %bb.0: # %entry
570; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
571; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
572; CHECK-NEXT:    ret
573entry:
574  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vv.mask.nxv16i16.iXLen(
575    <vscale x 16 x i16> %0,
576    <vscale x 16 x i16> %1,
577    <vscale x 16 x i16> %2,
578    <vscale x 16 x i1> %3,
579    iXLen %4, iXLen 1)
580
581  ret <vscale x 16 x i16> %a
582}
583
584declare <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.iXLen(
585  <vscale x 32 x i16>,
586  <vscale x 32 x i16>,
587  <vscale x 32 x i16>,
588  iXLen)
589
590define <vscale x 32 x i16> @intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
591; CHECK-LABEL: intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16:
592; CHECK:       # %bb.0: # %entry
593; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
594; CHECK-NEXT:    vrgather.vv v24, v8, v16
595; CHECK-NEXT:    vmv.v.v v8, v24
596; CHECK-NEXT:    ret
597entry:
598  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.nxv32i16.iXLen(
599    <vscale x 32 x i16> undef,
600    <vscale x 32 x i16> %0,
601    <vscale x 32 x i16> %1,
602    iXLen %2)
603
604  ret <vscale x 32 x i16> %a
605}
606
607declare <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.iXLen(
608  <vscale x 32 x i16>,
609  <vscale x 32 x i16>,
610  <vscale x 32 x i16>,
611  <vscale x 32 x i1>,
612  iXLen,
613  iXLen)
614
615define <vscale x 32 x i16> @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
616; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16:
617; CHECK:       # %bb.0: # %entry
618; CHECK-NEXT:    vl8re16.v v24, (a0)
619; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
620; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
621; CHECK-NEXT:    ret
622entry:
623  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vv.mask.nxv32i16.iXLen(
624    <vscale x 32 x i16> %0,
625    <vscale x 32 x i16> %1,
626    <vscale x 32 x i16> %2,
627    <vscale x 32 x i1> %3,
628    iXLen %4, iXLen 1)
629
630  ret <vscale x 32 x i16> %a
631}
632
633declare <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.iXLen(
634  <vscale x 1 x i32>,
635  <vscale x 1 x i32>,
636  <vscale x 1 x i32>,
637  iXLen)
638
639define <vscale x 1 x i32> @intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
640; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32:
641; CHECK:       # %bb.0: # %entry
642; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
643; CHECK-NEXT:    vrgather.vv v10, v8, v9
644; CHECK-NEXT:    vmv1r.v v8, v10
645; CHECK-NEXT:    ret
646entry:
647  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.nxv1i32.iXLen(
648    <vscale x 1 x i32> undef,
649    <vscale x 1 x i32> %0,
650    <vscale x 1 x i32> %1,
651    iXLen %2)
652
653  ret <vscale x 1 x i32> %a
654}
655
656declare <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.iXLen(
657  <vscale x 1 x i32>,
658  <vscale x 1 x i32>,
659  <vscale x 1 x i32>,
660  <vscale x 1 x i1>,
661  iXLen,
662  iXLen)
663
664define <vscale x 1 x i32> @intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
665; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32:
666; CHECK:       # %bb.0: # %entry
667; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
668; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
669; CHECK-NEXT:    ret
670entry:
671  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vv.mask.nxv1i32.iXLen(
672    <vscale x 1 x i32> %0,
673    <vscale x 1 x i32> %1,
674    <vscale x 1 x i32> %2,
675    <vscale x 1 x i1> %3,
676    iXLen %4, iXLen 1)
677
678  ret <vscale x 1 x i32> %a
679}
680
681declare <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.iXLen(
682  <vscale x 2 x i32>,
683  <vscale x 2 x i32>,
684  <vscale x 2 x i32>,
685  iXLen)
686
687define <vscale x 2 x i32> @intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
688; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32:
689; CHECK:       # %bb.0: # %entry
690; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
691; CHECK-NEXT:    vrgather.vv v10, v8, v9
692; CHECK-NEXT:    vmv.v.v v8, v10
693; CHECK-NEXT:    ret
694entry:
695  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.nxv2i32.iXLen(
696    <vscale x 2 x i32> undef,
697    <vscale x 2 x i32> %0,
698    <vscale x 2 x i32> %1,
699    iXLen %2)
700
701  ret <vscale x 2 x i32> %a
702}
703
704declare <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.iXLen(
705  <vscale x 2 x i32>,
706  <vscale x 2 x i32>,
707  <vscale x 2 x i32>,
708  <vscale x 2 x i1>,
709  iXLen,
710  iXLen)
711
712define <vscale x 2 x i32> @intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
713; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32:
714; CHECK:       # %bb.0: # %entry
715; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
716; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
717; CHECK-NEXT:    ret
718entry:
719  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vv.mask.nxv2i32.iXLen(
720    <vscale x 2 x i32> %0,
721    <vscale x 2 x i32> %1,
722    <vscale x 2 x i32> %2,
723    <vscale x 2 x i1> %3,
724    iXLen %4, iXLen 1)
725
726  ret <vscale x 2 x i32> %a
727}
728
729declare <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.iXLen(
730  <vscale x 4 x i32>,
731  <vscale x 4 x i32>,
732  <vscale x 4 x i32>,
733  iXLen)
734
735define <vscale x 4 x i32> @intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
736; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32:
737; CHECK:       # %bb.0: # %entry
738; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
739; CHECK-NEXT:    vrgather.vv v12, v8, v10
740; CHECK-NEXT:    vmv.v.v v8, v12
741; CHECK-NEXT:    ret
742entry:
743  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.nxv4i32.iXLen(
744    <vscale x 4 x i32> undef,
745    <vscale x 4 x i32> %0,
746    <vscale x 4 x i32> %1,
747    iXLen %2)
748
749  ret <vscale x 4 x i32> %a
750}
751
752declare <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.iXLen(
753  <vscale x 4 x i32>,
754  <vscale x 4 x i32>,
755  <vscale x 4 x i32>,
756  <vscale x 4 x i1>,
757  iXLen,
758  iXLen)
759
760define <vscale x 4 x i32> @intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
761; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32:
762; CHECK:       # %bb.0: # %entry
763; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
764; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
765; CHECK-NEXT:    ret
766entry:
767  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vv.mask.nxv4i32.iXLen(
768    <vscale x 4 x i32> %0,
769    <vscale x 4 x i32> %1,
770    <vscale x 4 x i32> %2,
771    <vscale x 4 x i1> %3,
772    iXLen %4, iXLen 1)
773
774  ret <vscale x 4 x i32> %a
775}
776
777declare <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.iXLen(
778  <vscale x 8 x i32>,
779  <vscale x 8 x i32>,
780  <vscale x 8 x i32>,
781  iXLen)
782
783define <vscale x 8 x i32> @intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
784; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32:
785; CHECK:       # %bb.0: # %entry
786; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
787; CHECK-NEXT:    vrgather.vv v16, v8, v12
788; CHECK-NEXT:    vmv.v.v v8, v16
789; CHECK-NEXT:    ret
790entry:
791  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.nxv8i32.iXLen(
792    <vscale x 8 x i32> undef,
793    <vscale x 8 x i32> %0,
794    <vscale x 8 x i32> %1,
795    iXLen %2)
796
797  ret <vscale x 8 x i32> %a
798}
799
800declare <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.iXLen(
801  <vscale x 8 x i32>,
802  <vscale x 8 x i32>,
803  <vscale x 8 x i32>,
804  <vscale x 8 x i1>,
805  iXLen,
806  iXLen)
807
808define <vscale x 8 x i32> @intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
809; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32:
810; CHECK:       # %bb.0: # %entry
811; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
812; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
813; CHECK-NEXT:    ret
814entry:
815  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vv.mask.nxv8i32.iXLen(
816    <vscale x 8 x i32> %0,
817    <vscale x 8 x i32> %1,
818    <vscale x 8 x i32> %2,
819    <vscale x 8 x i1> %3,
820    iXLen %4, iXLen 1)
821
822  ret <vscale x 8 x i32> %a
823}
824
825declare <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.iXLen(
826  <vscale x 16 x i32>,
827  <vscale x 16 x i32>,
828  <vscale x 16 x i32>,
829  iXLen)
830
831define <vscale x 16 x i32> @intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
832; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32:
833; CHECK:       # %bb.0: # %entry
834; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
835; CHECK-NEXT:    vrgather.vv v24, v8, v16
836; CHECK-NEXT:    vmv.v.v v8, v24
837; CHECK-NEXT:    ret
838entry:
839  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.nxv16i32.iXLen(
840    <vscale x 16 x i32> undef,
841    <vscale x 16 x i32> %0,
842    <vscale x 16 x i32> %1,
843    iXLen %2)
844
845  ret <vscale x 16 x i32> %a
846}
847
848declare <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.iXLen(
849  <vscale x 16 x i32>,
850  <vscale x 16 x i32>,
851  <vscale x 16 x i32>,
852  <vscale x 16 x i1>,
853  iXLen,
854  iXLen)
855
856define <vscale x 16 x i32> @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
857; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32:
858; CHECK:       # %bb.0: # %entry
859; CHECK-NEXT:    vl8re32.v v24, (a0)
860; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
861; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
862; CHECK-NEXT:    ret
863entry:
864  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vv.mask.nxv16i32.iXLen(
865    <vscale x 16 x i32> %0,
866    <vscale x 16 x i32> %1,
867    <vscale x 16 x i32> %2,
868    <vscale x 16 x i1> %3,
869    iXLen %4, iXLen 1)
870
871  ret <vscale x 16 x i32> %a
872}
873
874declare <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.iXLen(
875  <vscale x 1 x i64>,
876  <vscale x 1 x i64>,
877  <vscale x 1 x i64>,
878  iXLen)
879
880define <vscale x 1 x i64> @intrinsic_vrgather_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
881; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i64_nxv1i64_nxv1i64:
882; CHECK:       # %bb.0: # %entry
883; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
884; CHECK-NEXT:    vrgather.vv v10, v8, v9
885; CHECK-NEXT:    vmv.v.v v8, v10
886; CHECK-NEXT:    ret
887entry:
888  %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.nxv1i64.iXLen(
889    <vscale x 1 x i64> undef,
890    <vscale x 1 x i64> %0,
891    <vscale x 1 x i64> %1,
892    iXLen %2)
893
894  ret <vscale x 1 x i64> %a
895}
896
897declare <vscale x 1 x i64> @llvm.riscv.vrgather.vv.mask.nxv1i64.iXLen(
898  <vscale x 1 x i64>,
899  <vscale x 1 x i64>,
900  <vscale x 1 x i64>,
901  <vscale x 1 x i1>,
902  iXLen,
903  iXLen)
904
905define <vscale x 1 x i64> @intrinsic_vrgather_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
906; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i64_nxv1i64_nxv1i64:
907; CHECK:       # %bb.0: # %entry
908; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
909; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
910; CHECK-NEXT:    ret
911entry:
912  %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vv.mask.nxv1i64.iXLen(
913    <vscale x 1 x i64> %0,
914    <vscale x 1 x i64> %1,
915    <vscale x 1 x i64> %2,
916    <vscale x 1 x i1> %3,
917    iXLen %4, iXLen 1)
918
919  ret <vscale x 1 x i64> %a
920}
921
922declare <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.iXLen(
923  <vscale x 2 x i64>,
924  <vscale x 2 x i64>,
925  <vscale x 2 x i64>,
926  iXLen)
927
928define <vscale x 2 x i64> @intrinsic_vrgather_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
929; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i64_nxv2i64_nxv2i64:
930; CHECK:       # %bb.0: # %entry
931; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
932; CHECK-NEXT:    vrgather.vv v12, v8, v10
933; CHECK-NEXT:    vmv.v.v v8, v12
934; CHECK-NEXT:    ret
935entry:
936  %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.nxv2i64.iXLen(
937    <vscale x 2 x i64> undef,
938    <vscale x 2 x i64> %0,
939    <vscale x 2 x i64> %1,
940    iXLen %2)
941
942  ret <vscale x 2 x i64> %a
943}
944
945declare <vscale x 2 x i64> @llvm.riscv.vrgather.vv.mask.nxv2i64.iXLen(
946  <vscale x 2 x i64>,
947  <vscale x 2 x i64>,
948  <vscale x 2 x i64>,
949  <vscale x 2 x i1>,
950  iXLen,
951  iXLen)
952
953define <vscale x 2 x i64> @intrinsic_vrgather_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
954; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i64_nxv2i64_nxv2i64:
955; CHECK:       # %bb.0: # %entry
956; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
957; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
958; CHECK-NEXT:    ret
959entry:
960  %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vv.mask.nxv2i64.iXLen(
961    <vscale x 2 x i64> %0,
962    <vscale x 2 x i64> %1,
963    <vscale x 2 x i64> %2,
964    <vscale x 2 x i1> %3,
965    iXLen %4, iXLen 1)
966
967  ret <vscale x 2 x i64> %a
968}
969
970declare <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.iXLen(
971  <vscale x 4 x i64>,
972  <vscale x 4 x i64>,
973  <vscale x 4 x i64>,
974  iXLen)
975
976define <vscale x 4 x i64> @intrinsic_vrgather_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
977; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i64_nxv4i64_nxv4i64:
978; CHECK:       # %bb.0: # %entry
979; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
980; CHECK-NEXT:    vrgather.vv v16, v8, v12
981; CHECK-NEXT:    vmv.v.v v8, v16
982; CHECK-NEXT:    ret
983entry:
984  %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.nxv4i64.iXLen(
985    <vscale x 4 x i64> undef,
986    <vscale x 4 x i64> %0,
987    <vscale x 4 x i64> %1,
988    iXLen %2)
989
990  ret <vscale x 4 x i64> %a
991}
992
993declare <vscale x 4 x i64> @llvm.riscv.vrgather.vv.mask.nxv4i64.iXLen(
994  <vscale x 4 x i64>,
995  <vscale x 4 x i64>,
996  <vscale x 4 x i64>,
997  <vscale x 4 x i1>,
998  iXLen,
999  iXLen)
1000
1001define <vscale x 4 x i64> @intrinsic_vrgather_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1002; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i64_nxv4i64_nxv4i64:
1003; CHECK:       # %bb.0: # %entry
1004; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
1005; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
1006; CHECK-NEXT:    ret
1007entry:
1008  %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vv.mask.nxv4i64.iXLen(
1009    <vscale x 4 x i64> %0,
1010    <vscale x 4 x i64> %1,
1011    <vscale x 4 x i64> %2,
1012    <vscale x 4 x i1> %3,
1013    iXLen %4, iXLen 1)
1014
1015  ret <vscale x 4 x i64> %a
1016}
1017
1018declare <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.iXLen(
1019  <vscale x 8 x i64>,
1020  <vscale x 8 x i64>,
1021  <vscale x 8 x i64>,
1022  iXLen)
1023
1024define <vscale x 8 x i64> @intrinsic_vrgather_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
1025; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i64_nxv8i64_nxv8i64:
1026; CHECK:       # %bb.0: # %entry
1027; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1028; CHECK-NEXT:    vrgather.vv v24, v8, v16
1029; CHECK-NEXT:    vmv.v.v v8, v24
1030; CHECK-NEXT:    ret
1031entry:
1032  %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.nxv8i64.iXLen(
1033    <vscale x 8 x i64> undef,
1034    <vscale x 8 x i64> %0,
1035    <vscale x 8 x i64> %1,
1036    iXLen %2)
1037
1038  ret <vscale x 8 x i64> %a
1039}
1040
1041declare <vscale x 8 x i64> @llvm.riscv.vrgather.vv.mask.nxv8i64.iXLen(
1042  <vscale x 8 x i64>,
1043  <vscale x 8 x i64>,
1044  <vscale x 8 x i64>,
1045  <vscale x 8 x i1>,
1046  iXLen,
1047  iXLen)
1048
1049define <vscale x 8 x i64> @intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1050; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64:
1051; CHECK:       # %bb.0: # %entry
1052; CHECK-NEXT:    vl8re64.v v24, (a0)
1053; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
1054; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
1055; CHECK-NEXT:    ret
1056entry:
1057  %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vv.mask.nxv8i64.iXLen(
1058    <vscale x 8 x i64> %0,
1059    <vscale x 8 x i64> %1,
1060    <vscale x 8 x i64> %2,
1061    <vscale x 8 x i1> %3,
1062    iXLen %4, iXLen 1)
1063
1064  ret <vscale x 8 x i64> %a
1065}
1066
1067declare <vscale x 1 x half> @llvm.riscv.vrgather.vv.nxv1f16.iXLen(
1068  <vscale x 1 x half>,
1069  <vscale x 1 x half>,
1070  <vscale x 1 x i16>,
1071  iXLen)
1072
1073define <vscale x 1 x half> @intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
1074; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16:
1075; CHECK:       # %bb.0: # %entry
1076; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
1077; CHECK-NEXT:    vrgather.vv v10, v8, v9
1078; CHECK-NEXT:    vmv1r.v v8, v10
1079; CHECK-NEXT:    ret
1080entry:
1081  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.nxv1f16.iXLen(
1082    <vscale x 1 x half> undef,
1083    <vscale x 1 x half> %0,
1084    <vscale x 1 x i16> %1,
1085    iXLen %2)
1086
1087  ret <vscale x 1 x half> %a
1088}
1089
1090declare <vscale x 1 x half> @llvm.riscv.vrgather.vv.mask.nxv1f16.iXLen(
1091  <vscale x 1 x half>,
1092  <vscale x 1 x half>,
1093  <vscale x 1 x i16>,
1094  <vscale x 1 x i1>,
1095  iXLen,
1096  iXLen)
1097
1098define <vscale x 1 x half> @intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1099; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16:
1100; CHECK:       # %bb.0: # %entry
1101; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
1102; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
1103; CHECK-NEXT:    ret
1104entry:
1105  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vv.mask.nxv1f16.iXLen(
1106    <vscale x 1 x half> %0,
1107    <vscale x 1 x half> %1,
1108    <vscale x 1 x i16> %2,
1109    <vscale x 1 x i1> %3,
1110    iXLen %4, iXLen 1)
1111
1112  ret <vscale x 1 x half> %a
1113}
1114
1115declare <vscale x 2 x half> @llvm.riscv.vrgather.vv.nxv2f16.iXLen(
1116  <vscale x 2 x half>,
1117  <vscale x 2 x half>,
1118  <vscale x 2 x i16>,
1119  iXLen)
1120
1121define <vscale x 2 x half> @intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
1122; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16:
1123; CHECK:       # %bb.0: # %entry
1124; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
1125; CHECK-NEXT:    vrgather.vv v10, v8, v9
1126; CHECK-NEXT:    vmv1r.v v8, v10
1127; CHECK-NEXT:    ret
1128entry:
1129  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.nxv2f16.iXLen(
1130    <vscale x 2 x half> undef,
1131    <vscale x 2 x half> %0,
1132    <vscale x 2 x i16> %1,
1133    iXLen %2)
1134
1135  ret <vscale x 2 x half> %a
1136}
1137
1138declare <vscale x 2 x half> @llvm.riscv.vrgather.vv.mask.nxv2f16.iXLen(
1139  <vscale x 2 x half>,
1140  <vscale x 2 x half>,
1141  <vscale x 2 x i16>,
1142  <vscale x 2 x i1>,
1143  iXLen,
1144  iXLen)
1145
1146define <vscale x 2 x half> @intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1147; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16:
1148; CHECK:       # %bb.0: # %entry
1149; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
1150; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
1151; CHECK-NEXT:    ret
1152entry:
1153  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vv.mask.nxv2f16.iXLen(
1154    <vscale x 2 x half> %0,
1155    <vscale x 2 x half> %1,
1156    <vscale x 2 x i16> %2,
1157    <vscale x 2 x i1> %3,
1158    iXLen %4, iXLen 1)
1159
1160  ret <vscale x 2 x half> %a
1161}
1162
1163declare <vscale x 4 x half> @llvm.riscv.vrgather.vv.nxv4f16.iXLen(
1164  <vscale x 4 x half>,
1165  <vscale x 4 x half>,
1166  <vscale x 4 x i16>,
1167  iXLen)
1168
1169define <vscale x 4 x half> @intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
1170; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16:
1171; CHECK:       # %bb.0: # %entry
1172; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
1173; CHECK-NEXT:    vrgather.vv v10, v8, v9
1174; CHECK-NEXT:    vmv.v.v v8, v10
1175; CHECK-NEXT:    ret
1176entry:
1177  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.nxv4f16.iXLen(
1178    <vscale x 4 x half> undef,
1179    <vscale x 4 x half> %0,
1180    <vscale x 4 x i16> %1,
1181    iXLen %2)
1182
1183  ret <vscale x 4 x half> %a
1184}
1185
1186declare <vscale x 4 x half> @llvm.riscv.vrgather.vv.mask.nxv4f16.iXLen(
1187  <vscale x 4 x half>,
1188  <vscale x 4 x half>,
1189  <vscale x 4 x i16>,
1190  <vscale x 4 x i1>,
1191  iXLen,
1192  iXLen)
1193
1194define <vscale x 4 x half> @intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1195; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16:
1196; CHECK:       # %bb.0: # %entry
1197; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
1198; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
1199; CHECK-NEXT:    ret
1200entry:
1201  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vv.mask.nxv4f16.iXLen(
1202    <vscale x 4 x half> %0,
1203    <vscale x 4 x half> %1,
1204    <vscale x 4 x i16> %2,
1205    <vscale x 4 x i1> %3,
1206    iXLen %4, iXLen 1)
1207
1208  ret <vscale x 4 x half> %a
1209}
1210
1211declare <vscale x 8 x half> @llvm.riscv.vrgather.vv.nxv8f16.iXLen(
1212  <vscale x 8 x half>,
1213  <vscale x 8 x half>,
1214  <vscale x 8 x i16>,
1215  iXLen)
1216
1217define <vscale x 8 x half> @intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
1218; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16:
1219; CHECK:       # %bb.0: # %entry
1220; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
1221; CHECK-NEXT:    vrgather.vv v12, v8, v10
1222; CHECK-NEXT:    vmv.v.v v8, v12
1223; CHECK-NEXT:    ret
1224entry:
1225  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.nxv8f16.iXLen(
1226    <vscale x 8 x half> undef,
1227    <vscale x 8 x half> %0,
1228    <vscale x 8 x i16> %1,
1229    iXLen %2)
1230
1231  ret <vscale x 8 x half> %a
1232}
1233
1234declare <vscale x 8 x half> @llvm.riscv.vrgather.vv.mask.nxv8f16.iXLen(
1235  <vscale x 8 x half>,
1236  <vscale x 8 x half>,
1237  <vscale x 8 x i16>,
1238  <vscale x 8 x i1>,
1239  iXLen,
1240  iXLen)
1241
1242define <vscale x 8 x half> @intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1243; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16:
1244; CHECK:       # %bb.0: # %entry
1245; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
1246; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
1247; CHECK-NEXT:    ret
1248entry:
1249  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vv.mask.nxv8f16.iXLen(
1250    <vscale x 8 x half> %0,
1251    <vscale x 8 x half> %1,
1252    <vscale x 8 x i16> %2,
1253    <vscale x 8 x i1> %3,
1254    iXLen %4, iXLen 1)
1255
1256  ret <vscale x 8 x half> %a
1257}
1258
1259declare <vscale x 16 x half> @llvm.riscv.vrgather.vv.nxv16f16.iXLen(
1260  <vscale x 16 x half>,
1261  <vscale x 16 x half>,
1262  <vscale x 16 x i16>,
1263  iXLen)
1264
1265define <vscale x 16 x half> @intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
1266; CHECK-LABEL: intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16:
1267; CHECK:       # %bb.0: # %entry
1268; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
1269; CHECK-NEXT:    vrgather.vv v16, v8, v12
1270; CHECK-NEXT:    vmv.v.v v8, v16
1271; CHECK-NEXT:    ret
1272entry:
1273  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.nxv16f16.iXLen(
1274    <vscale x 16 x half> undef,
1275    <vscale x 16 x half> %0,
1276    <vscale x 16 x i16> %1,
1277    iXLen %2)
1278
1279  ret <vscale x 16 x half> %a
1280}
1281
1282declare <vscale x 16 x half> @llvm.riscv.vrgather.vv.mask.nxv16f16.iXLen(
1283  <vscale x 16 x half>,
1284  <vscale x 16 x half>,
1285  <vscale x 16 x i16>,
1286  <vscale x 16 x i1>,
1287  iXLen,
1288  iXLen)
1289
1290define <vscale x 16 x half> @intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1291; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i16:
1292; CHECK:       # %bb.0: # %entry
1293; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
1294; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
1295; CHECK-NEXT:    ret
1296entry:
1297  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vv.mask.nxv16f16.iXLen(
1298    <vscale x 16 x half> %0,
1299    <vscale x 16 x half> %1,
1300    <vscale x 16 x i16> %2,
1301    <vscale x 16 x i1> %3,
1302    iXLen %4, iXLen 1)
1303
1304  ret <vscale x 16 x half> %a
1305}
1306
1307declare <vscale x 32 x half> @llvm.riscv.vrgather.vv.nxv32f16.iXLen(
1308  <vscale x 32 x half>,
1309  <vscale x 32 x half>,
1310  <vscale x 32 x i16>,
1311  iXLen)
1312
1313define <vscale x 32 x half> @intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
1314; CHECK-LABEL: intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16:
1315; CHECK:       # %bb.0: # %entry
1316; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
1317; CHECK-NEXT:    vrgather.vv v24, v8, v16
1318; CHECK-NEXT:    vmv.v.v v8, v24
1319; CHECK-NEXT:    ret
1320entry:
1321  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.nxv32f16.iXLen(
1322    <vscale x 32 x half> undef,
1323    <vscale x 32 x half> %0,
1324    <vscale x 32 x i16> %1,
1325    iXLen %2)
1326
1327  ret <vscale x 32 x half> %a
1328}
1329
1330declare <vscale x 32 x half> @llvm.riscv.vrgather.vv.mask.nxv32f16.iXLen(
1331  <vscale x 32 x half>,
1332  <vscale x 32 x half>,
1333  <vscale x 32 x i16>,
1334  <vscale x 32 x i1>,
1335  iXLen,
1336  iXLen)
1337
1338define <vscale x 32 x half> @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1339; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16:
1340; CHECK:       # %bb.0: # %entry
1341; CHECK-NEXT:    vl8re16.v v24, (a0)
1342; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
1343; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
1344; CHECK-NEXT:    ret
1345entry:
1346  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vv.mask.nxv32f16.iXLen(
1347    <vscale x 32 x half> %0,
1348    <vscale x 32 x half> %1,
1349    <vscale x 32 x i16> %2,
1350    <vscale x 32 x i1> %3,
1351    iXLen %4, iXLen 1)
1352
1353  ret <vscale x 32 x half> %a
1354}
1355
1356declare <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.iXLen(
1357  <vscale x 1 x float>,
1358  <vscale x 1 x float>,
1359  <vscale x 1 x i32>,
1360  iXLen)
1361
1362define <vscale x 1 x float> @intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
1363; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32:
1364; CHECK:       # %bb.0: # %entry
1365; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
1366; CHECK-NEXT:    vrgather.vv v10, v8, v9
1367; CHECK-NEXT:    vmv1r.v v8, v10
1368; CHECK-NEXT:    ret
1369entry:
1370  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.nxv1f32.iXLen(
1371    <vscale x 1 x float> undef,
1372    <vscale x 1 x float> %0,
1373    <vscale x 1 x i32> %1,
1374    iXLen %2)
1375
1376  ret <vscale x 1 x float> %a
1377}
1378
1379declare <vscale x 1 x float> @llvm.riscv.vrgather.vv.mask.nxv1f32.iXLen(
1380  <vscale x 1 x float>,
1381  <vscale x 1 x float>,
1382  <vscale x 1 x i32>,
1383  <vscale x 1 x i1>,
1384  iXLen,
1385  iXLen)
1386
1387define <vscale x 1 x float> @intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1388; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32:
1389; CHECK:       # %bb.0: # %entry
1390; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
1391; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
1392; CHECK-NEXT:    ret
1393entry:
1394  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vv.mask.nxv1f32.iXLen(
1395    <vscale x 1 x float> %0,
1396    <vscale x 1 x float> %1,
1397    <vscale x 1 x i32> %2,
1398    <vscale x 1 x i1> %3,
1399    iXLen %4, iXLen 1)
1400
1401  ret <vscale x 1 x float> %a
1402}
1403
1404declare <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.iXLen(
1405  <vscale x 2 x float>,
1406  <vscale x 2 x float>,
1407  <vscale x 2 x i32>,
1408  iXLen)
1409
1410define <vscale x 2 x float> @intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
1411; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32:
1412; CHECK:       # %bb.0: # %entry
1413; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
1414; CHECK-NEXT:    vrgather.vv v10, v8, v9
1415; CHECK-NEXT:    vmv.v.v v8, v10
1416; CHECK-NEXT:    ret
1417entry:
1418  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.nxv2f32.iXLen(
1419    <vscale x 2 x float> undef,
1420    <vscale x 2 x float> %0,
1421    <vscale x 2 x i32> %1,
1422    iXLen %2)
1423
1424  ret <vscale x 2 x float> %a
1425}
1426
1427declare <vscale x 2 x float> @llvm.riscv.vrgather.vv.mask.nxv2f32.iXLen(
1428  <vscale x 2 x float>,
1429  <vscale x 2 x float>,
1430  <vscale x 2 x i32>,
1431  <vscale x 2 x i1>,
1432  iXLen,
1433  iXLen)
1434
1435define <vscale x 2 x float> @intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1436; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32:
1437; CHECK:       # %bb.0: # %entry
1438; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
1439; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
1440; CHECK-NEXT:    ret
1441entry:
1442  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vv.mask.nxv2f32.iXLen(
1443    <vscale x 2 x float> %0,
1444    <vscale x 2 x float> %1,
1445    <vscale x 2 x i32> %2,
1446    <vscale x 2 x i1> %3,
1447    iXLen %4, iXLen 1)
1448
1449  ret <vscale x 2 x float> %a
1450}
1451
1452declare <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.iXLen(
1453  <vscale x 4 x float>,
1454  <vscale x 4 x float>,
1455  <vscale x 4 x i32>,
1456  iXLen)
1457
1458define <vscale x 4 x float> @intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
1459; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32:
1460; CHECK:       # %bb.0: # %entry
1461; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
1462; CHECK-NEXT:    vrgather.vv v12, v8, v10
1463; CHECK-NEXT:    vmv.v.v v8, v12
1464; CHECK-NEXT:    ret
1465entry:
1466  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.nxv4f32.iXLen(
1467    <vscale x 4 x float> undef,
1468    <vscale x 4 x float> %0,
1469    <vscale x 4 x i32> %1,
1470    iXLen %2)
1471
1472  ret <vscale x 4 x float> %a
1473}
1474
1475declare <vscale x 4 x float> @llvm.riscv.vrgather.vv.mask.nxv4f32.iXLen(
1476  <vscale x 4 x float>,
1477  <vscale x 4 x float>,
1478  <vscale x 4 x i32>,
1479  <vscale x 4 x i1>,
1480  iXLen,
1481  iXLen)
1482
1483define <vscale x 4 x float> @intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1484; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32:
1485; CHECK:       # %bb.0: # %entry
1486; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
1487; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
1488; CHECK-NEXT:    ret
1489entry:
1490  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vv.mask.nxv4f32.iXLen(
1491    <vscale x 4 x float> %0,
1492    <vscale x 4 x float> %1,
1493    <vscale x 4 x i32> %2,
1494    <vscale x 4 x i1> %3,
1495    iXLen %4, iXLen 1)
1496
1497  ret <vscale x 4 x float> %a
1498}
1499
1500declare <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.iXLen(
1501  <vscale x 8 x float>,
1502  <vscale x 8 x float>,
1503  <vscale x 8 x i32>,
1504  iXLen)
1505
1506define <vscale x 8 x float> @intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
1507; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32:
1508; CHECK:       # %bb.0: # %entry
1509; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
1510; CHECK-NEXT:    vrgather.vv v16, v8, v12
1511; CHECK-NEXT:    vmv.v.v v8, v16
1512; CHECK-NEXT:    ret
1513entry:
1514  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.nxv8f32.iXLen(
1515    <vscale x 8 x float> undef,
1516    <vscale x 8 x float> %0,
1517    <vscale x 8 x i32> %1,
1518    iXLen %2)
1519
1520  ret <vscale x 8 x float> %a
1521}
1522
1523declare <vscale x 8 x float> @llvm.riscv.vrgather.vv.mask.nxv8f32.iXLen(
1524  <vscale x 8 x float>,
1525  <vscale x 8 x float>,
1526  <vscale x 8 x i32>,
1527  <vscale x 8 x i1>,
1528  iXLen,
1529  iXLen)
1530
1531define <vscale x 8 x float> @intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1532; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32:
1533; CHECK:       # %bb.0: # %entry
1534; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
1535; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
1536; CHECK-NEXT:    ret
1537entry:
1538  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vv.mask.nxv8f32.iXLen(
1539    <vscale x 8 x float> %0,
1540    <vscale x 8 x float> %1,
1541    <vscale x 8 x i32> %2,
1542    <vscale x 8 x i1> %3,
1543    iXLen %4, iXLen 1)
1544
1545  ret <vscale x 8 x float> %a
1546}
1547
1548declare <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.iXLen(
1549  <vscale x 16 x float>,
1550  <vscale x 16 x float>,
1551  <vscale x 16 x i32>,
1552  iXLen)
1553
1554define <vscale x 16 x float> @intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
1555; CHECK-LABEL: intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32:
1556; CHECK:       # %bb.0: # %entry
1557; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
1558; CHECK-NEXT:    vrgather.vv v24, v8, v16
1559; CHECK-NEXT:    vmv.v.v v8, v24
1560; CHECK-NEXT:    ret
1561entry:
1562  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.nxv16f32.iXLen(
1563    <vscale x 16 x float> undef,
1564    <vscale x 16 x float> %0,
1565    <vscale x 16 x i32> %1,
1566    iXLen %2)
1567
1568  ret <vscale x 16 x float> %a
1569}
1570
1571declare <vscale x 16 x float> @llvm.riscv.vrgather.vv.mask.nxv16f32.iXLen(
1572  <vscale x 16 x float>,
1573  <vscale x 16 x float>,
1574  <vscale x 16 x i32>,
1575  <vscale x 16 x i1>,
1576  iXLen,
1577  iXLen)
1578
1579define <vscale x 16 x float> @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1580; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32:
1581; CHECK:       # %bb.0: # %entry
1582; CHECK-NEXT:    vl8re32.v v24, (a0)
1583; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
1584; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
1585; CHECK-NEXT:    ret
1586entry:
1587  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vv.mask.nxv16f32.iXLen(
1588    <vscale x 16 x float> %0,
1589    <vscale x 16 x float> %1,
1590    <vscale x 16 x i32> %2,
1591    <vscale x 16 x i1> %3,
1592    iXLen %4, iXLen 1)
1593
1594  ret <vscale x 16 x float> %a
1595}
1596
1597declare <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.iXLen(
1598  <vscale x 1 x double>,
1599  <vscale x 1 x double>,
1600  <vscale x 1 x i64>,
1601  iXLen)
1602
1603define <vscale x 1 x double> @intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
1604; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64:
1605; CHECK:       # %bb.0: # %entry
1606; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
1607; CHECK-NEXT:    vrgather.vv v10, v8, v9
1608; CHECK-NEXT:    vmv.v.v v8, v10
1609; CHECK-NEXT:    ret
1610entry:
1611  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.nxv1f64.iXLen(
1612    <vscale x 1 x double> undef,
1613    <vscale x 1 x double> %0,
1614    <vscale x 1 x i64> %1,
1615    iXLen %2)
1616
1617  ret <vscale x 1 x double> %a
1618}
1619
1620declare <vscale x 1 x double> @llvm.riscv.vrgather.vv.mask.nxv1f64.iXLen(
1621  <vscale x 1 x double>,
1622  <vscale x 1 x double>,
1623  <vscale x 1 x i64>,
1624  <vscale x 1 x i1>,
1625  iXLen,
1626  iXLen)
1627
1628define <vscale x 1 x double> @intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1629; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64:
1630; CHECK:       # %bb.0: # %entry
1631; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
1632; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
1633; CHECK-NEXT:    ret
1634entry:
1635  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vv.mask.nxv1f64.iXLen(
1636    <vscale x 1 x double> %0,
1637    <vscale x 1 x double> %1,
1638    <vscale x 1 x i64> %2,
1639    <vscale x 1 x i1> %3,
1640    iXLen %4, iXLen 1)
1641
1642  ret <vscale x 1 x double> %a
1643}
1644
1645declare <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.iXLen(
1646  <vscale x 2 x double>,
1647  <vscale x 2 x double>,
1648  <vscale x 2 x i64>,
1649  iXLen)
1650
1651define <vscale x 2 x double> @intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
1652; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64:
1653; CHECK:       # %bb.0: # %entry
1654; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
1655; CHECK-NEXT:    vrgather.vv v12, v8, v10
1656; CHECK-NEXT:    vmv.v.v v8, v12
1657; CHECK-NEXT:    ret
1658entry:
1659  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.nxv2f64.iXLen(
1660    <vscale x 2 x double> undef,
1661    <vscale x 2 x double> %0,
1662    <vscale x 2 x i64> %1,
1663    iXLen %2)
1664
1665  ret <vscale x 2 x double> %a
1666}
1667
1668declare <vscale x 2 x double> @llvm.riscv.vrgather.vv.mask.nxv2f64.iXLen(
1669  <vscale x 2 x double>,
1670  <vscale x 2 x double>,
1671  <vscale x 2 x i64>,
1672  <vscale x 2 x i1>,
1673  iXLen,
1674  iXLen)
1675
1676define <vscale x 2 x double> @intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1677; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64:
1678; CHECK:       # %bb.0: # %entry
1679; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
1680; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
1681; CHECK-NEXT:    ret
1682entry:
1683  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vv.mask.nxv2f64.iXLen(
1684    <vscale x 2 x double> %0,
1685    <vscale x 2 x double> %1,
1686    <vscale x 2 x i64> %2,
1687    <vscale x 2 x i1> %3,
1688    iXLen %4, iXLen 1)
1689
1690  ret <vscale x 2 x double> %a
1691}
1692
1693declare <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.iXLen(
1694  <vscale x 4 x double>,
1695  <vscale x 4 x double>,
1696  <vscale x 4 x i64>,
1697  iXLen)
1698
1699define <vscale x 4 x double> @intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
1700; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64:
1701; CHECK:       # %bb.0: # %entry
1702; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1703; CHECK-NEXT:    vrgather.vv v16, v8, v12
1704; CHECK-NEXT:    vmv.v.v v8, v16
1705; CHECK-NEXT:    ret
1706entry:
1707  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.nxv4f64.iXLen(
1708    <vscale x 4 x double> undef,
1709    <vscale x 4 x double> %0,
1710    <vscale x 4 x i64> %1,
1711    iXLen %2)
1712
1713  ret <vscale x 4 x double> %a
1714}
1715
1716declare <vscale x 4 x double> @llvm.riscv.vrgather.vv.mask.nxv4f64.iXLen(
1717  <vscale x 4 x double>,
1718  <vscale x 4 x double>,
1719  <vscale x 4 x i64>,
1720  <vscale x 4 x i1>,
1721  iXLen,
1722  iXLen)
1723
1724define <vscale x 4 x double> @intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1725; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64:
1726; CHECK:       # %bb.0: # %entry
1727; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
1728; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
1729; CHECK-NEXT:    ret
1730entry:
1731  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vv.mask.nxv4f64.iXLen(
1732    <vscale x 4 x double> %0,
1733    <vscale x 4 x double> %1,
1734    <vscale x 4 x i64> %2,
1735    <vscale x 4 x i1> %3,
1736    iXLen %4, iXLen 1)
1737
1738  ret <vscale x 4 x double> %a
1739}
1740
1741declare <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.iXLen(
1742  <vscale x 8 x double>,
1743  <vscale x 8 x double>,
1744  <vscale x 8 x i64>,
1745  iXLen)
1746
1747define <vscale x 8 x double> @intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
1748; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64:
1749; CHECK:       # %bb.0: # %entry
1750; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1751; CHECK-NEXT:    vrgather.vv v24, v8, v16
1752; CHECK-NEXT:    vmv.v.v v8, v24
1753; CHECK-NEXT:    ret
1754entry:
1755  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.nxv8f64.iXLen(
1756    <vscale x 8 x double> undef,
1757    <vscale x 8 x double> %0,
1758    <vscale x 8 x i64> %1,
1759    iXLen %2)
1760
1761  ret <vscale x 8 x double> %a
1762}
1763
1764declare <vscale x 8 x double> @llvm.riscv.vrgather.vv.mask.nxv8f64.iXLen(
1765  <vscale x 8 x double>,
1766  <vscale x 8 x double>,
1767  <vscale x 8 x i64>,
1768  <vscale x 8 x i1>,
1769  iXLen,
1770  iXLen)
1771
1772define <vscale x 8 x double> @intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1773; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64:
1774; CHECK:       # %bb.0: # %entry
1775; CHECK-NEXT:    vl8re64.v v24, (a0)
1776; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
1777; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
1778; CHECK-NEXT:    ret
1779entry:
1780  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vv.mask.nxv8f64.iXLen(
1781    <vscale x 8 x double> %0,
1782    <vscale x 8 x double> %1,
1783    <vscale x 8 x i64> %2,
1784    <vscale x 8 x i1> %3,
1785    iXLen %4, iXLen 1)
1786
1787  ret <vscale x 8 x double> %a
1788}
1789
1790declare <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.iXLen(
1791  <vscale x 1 x i8>,
1792  <vscale x 1 x i8>,
1793  iXLen,
1794  iXLen)
1795
1796define <vscale x 1 x i8> @intrinsic_vrgather_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, iXLen %1, iXLen %2) nounwind {
1797; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8:
1798; CHECK:       # %bb.0: # %entry
1799; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
1800; CHECK-NEXT:    vrgather.vx v9, v8, a0
1801; CHECK-NEXT:    vmv1r.v v8, v9
1802; CHECK-NEXT:    ret
1803entry:
1804  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.iXLen(
1805    <vscale x 1 x i8> undef,
1806    <vscale x 1 x i8> %0,
1807    iXLen %1,
1808    iXLen %2)
1809
1810  ret <vscale x 1 x i8> %a
1811}
1812
1813declare <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.iXLen(
1814  <vscale x 1 x i8>,
1815  <vscale x 1 x i8>,
1816  iXLen,
1817  <vscale x 1 x i1>,
1818  iXLen,
1819  iXLen)
1820
1821define <vscale x 1 x i8> @intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1822; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8:
1823; CHECK:       # %bb.0: # %entry
1824; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
1825; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
1826; CHECK-NEXT:    ret
1827entry:
1828  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.iXLen(
1829    <vscale x 1 x i8> %0,
1830    <vscale x 1 x i8> %1,
1831    iXLen %2,
1832    <vscale x 1 x i1> %3,
1833    iXLen %4, iXLen 1)
1834
1835  ret <vscale x 1 x i8> %a
1836}
1837
1838declare <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.iXLen(
1839  <vscale x 2 x i8>,
1840  <vscale x 2 x i8>,
1841  iXLen,
1842  iXLen)
1843
1844define <vscale x 2 x i8> @intrinsic_vrgather_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, iXLen %1, iXLen %2) nounwind {
1845; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i8_nxv2i8:
1846; CHECK:       # %bb.0: # %entry
1847; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
1848; CHECK-NEXT:    vrgather.vx v9, v8, a0
1849; CHECK-NEXT:    vmv1r.v v8, v9
1850; CHECK-NEXT:    ret
1851entry:
1852  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.iXLen(
1853    <vscale x 2 x i8> undef,
1854    <vscale x 2 x i8> %0,
1855    iXLen %1,
1856    iXLen %2)
1857
1858  ret <vscale x 2 x i8> %a
1859}
1860
1861declare <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.iXLen(
1862  <vscale x 2 x i8>,
1863  <vscale x 2 x i8>,
1864  iXLen,
1865  <vscale x 2 x i1>,
1866  iXLen,
1867  iXLen)
1868
1869define <vscale x 2 x i8> @intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1870; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8:
1871; CHECK:       # %bb.0: # %entry
1872; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
1873; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
1874; CHECK-NEXT:    ret
1875entry:
1876  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.iXLen(
1877    <vscale x 2 x i8> %0,
1878    <vscale x 2 x i8> %1,
1879    iXLen %2,
1880    <vscale x 2 x i1> %3,
1881    iXLen %4, iXLen 1)
1882
1883  ret <vscale x 2 x i8> %a
1884}
1885
1886declare <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.iXLen(
1887  <vscale x 4 x i8>,
1888  <vscale x 4 x i8>,
1889  iXLen,
1890  iXLen)
1891
1892define <vscale x 4 x i8> @intrinsic_vrgather_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, iXLen %1, iXLen %2) nounwind {
1893; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i8_nxv4i8:
1894; CHECK:       # %bb.0: # %entry
1895; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
1896; CHECK-NEXT:    vrgather.vx v9, v8, a0
1897; CHECK-NEXT:    vmv1r.v v8, v9
1898; CHECK-NEXT:    ret
1899entry:
1900  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.iXLen(
1901    <vscale x 4 x i8> undef,
1902    <vscale x 4 x i8> %0,
1903    iXLen %1,
1904    iXLen %2)
1905
1906  ret <vscale x 4 x i8> %a
1907}
1908
1909declare <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.iXLen(
1910  <vscale x 4 x i8>,
1911  <vscale x 4 x i8>,
1912  iXLen,
1913  <vscale x 4 x i1>,
1914  iXLen,
1915  iXLen)
1916
1917define <vscale x 4 x i8> @intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1918; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8:
1919; CHECK:       # %bb.0: # %entry
1920; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
1921; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
1922; CHECK-NEXT:    ret
1923entry:
1924  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.iXLen(
1925    <vscale x 4 x i8> %0,
1926    <vscale x 4 x i8> %1,
1927    iXLen %2,
1928    <vscale x 4 x i1> %3,
1929    iXLen %4, iXLen 1)
1930
1931  ret <vscale x 4 x i8> %a
1932}
1933
1934declare <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.iXLen(
1935  <vscale x 8 x i8>,
1936  <vscale x 8 x i8>,
1937  iXLen,
1938  iXLen)
1939
1940define <vscale x 8 x i8> @intrinsic_vrgather_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, iXLen %1, iXLen %2) nounwind {
1941; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i8_nxv8i8:
1942; CHECK:       # %bb.0: # %entry
1943; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
1944; CHECK-NEXT:    vrgather.vx v9, v8, a0
1945; CHECK-NEXT:    vmv.v.v v8, v9
1946; CHECK-NEXT:    ret
1947entry:
1948  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.iXLen(
1949    <vscale x 8 x i8> undef,
1950    <vscale x 8 x i8> %0,
1951    iXLen %1,
1952    iXLen %2)
1953
1954  ret <vscale x 8 x i8> %a
1955}
1956
1957declare <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.iXLen(
1958  <vscale x 8 x i8>,
1959  <vscale x 8 x i8>,
1960  iXLen,
1961  <vscale x 8 x i1>,
1962  iXLen,
1963  iXLen)
1964
1965define <vscale x 8 x i8> @intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1966; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8:
1967; CHECK:       # %bb.0: # %entry
1968; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
1969; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
1970; CHECK-NEXT:    ret
1971entry:
1972  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.iXLen(
1973    <vscale x 8 x i8> %0,
1974    <vscale x 8 x i8> %1,
1975    iXLen %2,
1976    <vscale x 8 x i1> %3,
1977    iXLen %4, iXLen 1)
1978
1979  ret <vscale x 8 x i8> %a
1980}
1981
1982declare <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.iXLen(
1983  <vscale x 16 x i8>,
1984  <vscale x 16 x i8>,
1985  iXLen,
1986  iXLen)
1987
1988define <vscale x 16 x i8> @intrinsic_vrgather_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, iXLen %1, iXLen %2) nounwind {
1989; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i8_nxv16i8:
1990; CHECK:       # %bb.0: # %entry
1991; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
1992; CHECK-NEXT:    vrgather.vx v10, v8, a0
1993; CHECK-NEXT:    vmv.v.v v8, v10
1994; CHECK-NEXT:    ret
1995entry:
1996  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.iXLen(
1997    <vscale x 16 x i8> undef,
1998    <vscale x 16 x i8> %0,
1999    iXLen %1,
2000    iXLen %2)
2001
2002  ret <vscale x 16 x i8> %a
2003}
2004
2005declare <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.iXLen(
2006  <vscale x 16 x i8>,
2007  <vscale x 16 x i8>,
2008  iXLen,
2009  <vscale x 16 x i1>,
2010  iXLen,
2011  iXLen)
2012
2013define <vscale x 16 x i8> @intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2014; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8:
2015; CHECK:       # %bb.0: # %entry
2016; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
2017; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
2018; CHECK-NEXT:    ret
2019entry:
2020  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.iXLen(
2021    <vscale x 16 x i8> %0,
2022    <vscale x 16 x i8> %1,
2023    iXLen %2,
2024    <vscale x 16 x i1> %3,
2025    iXLen %4, iXLen 1)
2026
2027  ret <vscale x 16 x i8> %a
2028}
2029
2030declare <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.iXLen(
2031  <vscale x 32 x i8>,
2032  <vscale x 32 x i8>,
2033  iXLen,
2034  iXLen)
2035
2036define <vscale x 32 x i8> @intrinsic_vrgather_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, iXLen %1, iXLen %2) nounwind {
2037; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i8_nxv32i8:
2038; CHECK:       # %bb.0: # %entry
2039; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
2040; CHECK-NEXT:    vrgather.vx v12, v8, a0
2041; CHECK-NEXT:    vmv.v.v v8, v12
2042; CHECK-NEXT:    ret
2043entry:
2044  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.iXLen(
2045    <vscale x 32 x i8> undef,
2046    <vscale x 32 x i8> %0,
2047    iXLen %1,
2048    iXLen %2)
2049
2050  ret <vscale x 32 x i8> %a
2051}
2052
2053declare <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.iXLen(
2054  <vscale x 32 x i8>,
2055  <vscale x 32 x i8>,
2056  iXLen,
2057  <vscale x 32 x i1>,
2058  iXLen,
2059  iXLen)
2060
2061define <vscale x 32 x i8> @intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
2062; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8:
2063; CHECK:       # %bb.0: # %entry
2064; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
2065; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
2066; CHECK-NEXT:    ret
2067entry:
2068  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.iXLen(
2069    <vscale x 32 x i8> %0,
2070    <vscale x 32 x i8> %1,
2071    iXLen %2,
2072    <vscale x 32 x i1> %3,
2073    iXLen %4, iXLen 1)
2074
2075  ret <vscale x 32 x i8> %a
2076}
2077
2078declare <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.iXLen(
2079  <vscale x 64 x i8>,
2080  <vscale x 64 x i8>,
2081  iXLen,
2082  iXLen)
2083
2084define <vscale x 64 x i8> @intrinsic_vrgather_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, iXLen %1, iXLen %2) nounwind {
2085; CHECK-LABEL: intrinsic_vrgather_vx_nxv64i8_nxv64i8:
2086; CHECK:       # %bb.0: # %entry
2087; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
2088; CHECK-NEXT:    vrgather.vx v16, v8, a0
2089; CHECK-NEXT:    vmv.v.v v8, v16
2090; CHECK-NEXT:    ret
2091entry:
2092  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.iXLen(
2093    <vscale x 64 x i8> undef,
2094    <vscale x 64 x i8> %0,
2095    iXLen %1,
2096    iXLen %2)
2097
2098  ret <vscale x 64 x i8> %a
2099}
2100
2101declare <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.iXLen(
2102  <vscale x 64 x i8>,
2103  <vscale x 64 x i8>,
2104  iXLen,
2105  <vscale x 64 x i1>,
2106  iXLen,
2107  iXLen)
2108
2109define <vscale x 64 x i8> @intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
2110; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8:
2111; CHECK:       # %bb.0: # %entry
2112; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
2113; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
2114; CHECK-NEXT:    ret
2115entry:
2116  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.iXLen(
2117    <vscale x 64 x i8> %0,
2118    <vscale x 64 x i8> %1,
2119    iXLen %2,
2120    <vscale x 64 x i1> %3,
2121    iXLen %4, iXLen 1)
2122
2123  ret <vscale x 64 x i8> %a
2124}
2125
2126declare <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.iXLen(
2127  <vscale x 1 x i16>,
2128  <vscale x 1 x i16>,
2129  iXLen,
2130  iXLen)
2131
2132define <vscale x 1 x i16> @intrinsic_vrgather_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, iXLen %1, iXLen %2) nounwind {
2133; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i16_nxv1i16:
2134; CHECK:       # %bb.0: # %entry
2135; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2136; CHECK-NEXT:    vrgather.vx v9, v8, a0
2137; CHECK-NEXT:    vmv1r.v v8, v9
2138; CHECK-NEXT:    ret
2139entry:
2140  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.iXLen(
2141    <vscale x 1 x i16> undef,
2142    <vscale x 1 x i16> %0,
2143    iXLen %1,
2144    iXLen %2)
2145
2146  ret <vscale x 1 x i16> %a
2147}
2148
2149declare <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.iXLen(
2150  <vscale x 1 x i16>,
2151  <vscale x 1 x i16>,
2152  iXLen,
2153  <vscale x 1 x i1>,
2154  iXLen,
2155  iXLen)
2156
2157define <vscale x 1 x i16> @intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2158; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16:
2159; CHECK:       # %bb.0: # %entry
2160; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
2161; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
2162; CHECK-NEXT:    ret
2163entry:
2164  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.iXLen(
2165    <vscale x 1 x i16> %0,
2166    <vscale x 1 x i16> %1,
2167    iXLen %2,
2168    <vscale x 1 x i1> %3,
2169    iXLen %4, iXLen 1)
2170
2171  ret <vscale x 1 x i16> %a
2172}
2173
2174declare <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.iXLen(
2175  <vscale x 2 x i16>,
2176  <vscale x 2 x i16>,
2177  iXLen,
2178  iXLen)
2179
2180define <vscale x 2 x i16> @intrinsic_vrgather_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, iXLen %1, iXLen %2) nounwind {
2181; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i16_nxv2i16:
2182; CHECK:       # %bb.0: # %entry
2183; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2184; CHECK-NEXT:    vrgather.vx v9, v8, a0
2185; CHECK-NEXT:    vmv1r.v v8, v9
2186; CHECK-NEXT:    ret
2187entry:
2188  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.iXLen(
2189    <vscale x 2 x i16> undef,
2190    <vscale x 2 x i16> %0,
2191    iXLen %1,
2192    iXLen %2)
2193
2194  ret <vscale x 2 x i16> %a
2195}
2196
2197declare <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.iXLen(
2198  <vscale x 2 x i16>,
2199  <vscale x 2 x i16>,
2200  iXLen,
2201  <vscale x 2 x i1>,
2202  iXLen,
2203  iXLen)
2204
2205define <vscale x 2 x i16> @intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2206; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16:
2207; CHECK:       # %bb.0: # %entry
2208; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
2209; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
2210; CHECK-NEXT:    ret
2211entry:
2212  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.iXLen(
2213    <vscale x 2 x i16> %0,
2214    <vscale x 2 x i16> %1,
2215    iXLen %2,
2216    <vscale x 2 x i1> %3,
2217    iXLen %4, iXLen 1)
2218
2219  ret <vscale x 2 x i16> %a
2220}
2221
2222declare <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.iXLen(
2223  <vscale x 4 x i16>,
2224  <vscale x 4 x i16>,
2225  iXLen,
2226  iXLen)
2227
2228define <vscale x 4 x i16> @intrinsic_vrgather_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, iXLen %1, iXLen %2) nounwind {
2229; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i16_nxv4i16:
2230; CHECK:       # %bb.0: # %entry
2231; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2232; CHECK-NEXT:    vrgather.vx v9, v8, a0
2233; CHECK-NEXT:    vmv.v.v v8, v9
2234; CHECK-NEXT:    ret
2235entry:
2236  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.iXLen(
2237    <vscale x 4 x i16> undef,
2238    <vscale x 4 x i16> %0,
2239    iXLen %1,
2240    iXLen %2)
2241
2242  ret <vscale x 4 x i16> %a
2243}
2244
2245declare <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.iXLen(
2246  <vscale x 4 x i16>,
2247  <vscale x 4 x i16>,
2248  iXLen,
2249  <vscale x 4 x i1>,
2250  iXLen,
2251  iXLen)
2252
2253define <vscale x 4 x i16> @intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2254; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16:
2255; CHECK:       # %bb.0: # %entry
2256; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
2257; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
2258; CHECK-NEXT:    ret
2259entry:
2260  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.iXLen(
2261    <vscale x 4 x i16> %0,
2262    <vscale x 4 x i16> %1,
2263    iXLen %2,
2264    <vscale x 4 x i1> %3,
2265    iXLen %4, iXLen 1)
2266
2267  ret <vscale x 4 x i16> %a
2268}
2269
2270declare <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.iXLen(
2271  <vscale x 8 x i16>,
2272  <vscale x 8 x i16>,
2273  iXLen,
2274  iXLen)
2275
2276define <vscale x 8 x i16> @intrinsic_vrgather_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, iXLen %1, iXLen %2) nounwind {
2277; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i16_nxv8i16:
2278; CHECK:       # %bb.0: # %entry
2279; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
2280; CHECK-NEXT:    vrgather.vx v10, v8, a0
2281; CHECK-NEXT:    vmv.v.v v8, v10
2282; CHECK-NEXT:    ret
2283entry:
2284  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.iXLen(
2285    <vscale x 8 x i16> undef,
2286    <vscale x 8 x i16> %0,
2287    iXLen %1,
2288    iXLen %2)
2289
2290  ret <vscale x 8 x i16> %a
2291}
2292
2293declare <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.iXLen(
2294  <vscale x 8 x i16>,
2295  <vscale x 8 x i16>,
2296  iXLen,
2297  <vscale x 8 x i1>,
2298  iXLen,
2299  iXLen)
2300
2301define <vscale x 8 x i16> @intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2302; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16:
2303; CHECK:       # %bb.0: # %entry
2304; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
2305; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
2306; CHECK-NEXT:    ret
2307entry:
2308  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.iXLen(
2309    <vscale x 8 x i16> %0,
2310    <vscale x 8 x i16> %1,
2311    iXLen %2,
2312    <vscale x 8 x i1> %3,
2313    iXLen %4, iXLen 1)
2314
2315  ret <vscale x 8 x i16> %a
2316}
2317
2318declare <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.iXLen(
2319  <vscale x 16 x i16>,
2320  <vscale x 16 x i16>,
2321  iXLen,
2322  iXLen)
2323
2324define <vscale x 16 x i16> @intrinsic_vrgather_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, iXLen %1, iXLen %2) nounwind {
2325; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i16_nxv16i16:
2326; CHECK:       # %bb.0: # %entry
2327; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
2328; CHECK-NEXT:    vrgather.vx v12, v8, a0
2329; CHECK-NEXT:    vmv.v.v v8, v12
2330; CHECK-NEXT:    ret
2331entry:
2332  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.iXLen(
2333    <vscale x 16 x i16> undef,
2334    <vscale x 16 x i16> %0,
2335    iXLen %1,
2336    iXLen %2)
2337
2338  ret <vscale x 16 x i16> %a
2339}
2340
2341declare <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.iXLen(
2342  <vscale x 16 x i16>,
2343  <vscale x 16 x i16>,
2344  iXLen,
2345  <vscale x 16 x i1>,
2346  iXLen,
2347  iXLen)
2348
2349define <vscale x 16 x i16> @intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2350; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16:
2351; CHECK:       # %bb.0: # %entry
2352; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
2353; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
2354; CHECK-NEXT:    ret
2355entry:
2356  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.iXLen(
2357    <vscale x 16 x i16> %0,
2358    <vscale x 16 x i16> %1,
2359    iXLen %2,
2360    <vscale x 16 x i1> %3,
2361    iXLen %4, iXLen 1)
2362
2363  ret <vscale x 16 x i16> %a
2364}
2365
2366declare <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.iXLen(
2367  <vscale x 32 x i16>,
2368  <vscale x 32 x i16>,
2369  iXLen,
2370  iXLen)
2371
2372define <vscale x 32 x i16> @intrinsic_vrgather_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, iXLen %1, iXLen %2) nounwind {
2373; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i16_nxv32i16:
2374; CHECK:       # %bb.0: # %entry
2375; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
2376; CHECK-NEXT:    vrgather.vx v16, v8, a0
2377; CHECK-NEXT:    vmv.v.v v8, v16
2378; CHECK-NEXT:    ret
2379entry:
2380  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.iXLen(
2381    <vscale x 32 x i16> undef,
2382    <vscale x 32 x i16> %0,
2383    iXLen %1,
2384    iXLen %2)
2385
2386  ret <vscale x 32 x i16> %a
2387}
2388
2389declare <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.iXLen(
2390  <vscale x 32 x i16>,
2391  <vscale x 32 x i16>,
2392  iXLen,
2393  <vscale x 32 x i1>,
2394  iXLen,
2395  iXLen)
2396
2397define <vscale x 32 x i16> @intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
2398; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16:
2399; CHECK:       # %bb.0: # %entry
2400; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
2401; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
2402; CHECK-NEXT:    ret
2403entry:
2404  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.iXLen(
2405    <vscale x 32 x i16> %0,
2406    <vscale x 32 x i16> %1,
2407    iXLen %2,
2408    <vscale x 32 x i1> %3,
2409    iXLen %4, iXLen 1)
2410
2411  ret <vscale x 32 x i16> %a
2412}
2413
2414declare <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.iXLen(
2415  <vscale x 1 x i32>,
2416  <vscale x 1 x i32>,
2417  iXLen,
2418  iXLen)
2419
2420define <vscale x 1 x i32> @intrinsic_vrgather_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, iXLen %1, iXLen %2) nounwind {
2421; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i32_nxv1i32:
2422; CHECK:       # %bb.0: # %entry
2423; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
2424; CHECK-NEXT:    vrgather.vx v9, v8, a0
2425; CHECK-NEXT:    vmv1r.v v8, v9
2426; CHECK-NEXT:    ret
2427entry:
2428  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.iXLen(
2429    <vscale x 1 x i32> undef,
2430    <vscale x 1 x i32> %0,
2431    iXLen %1,
2432    iXLen %2)
2433
2434  ret <vscale x 1 x i32> %a
2435}
2436
2437declare <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.iXLen(
2438  <vscale x 1 x i32>,
2439  <vscale x 1 x i32>,
2440  iXLen,
2441  <vscale x 1 x i1>,
2442  iXLen,
2443  iXLen)
2444
2445define <vscale x 1 x i32> @intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2446; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32:
2447; CHECK:       # %bb.0: # %entry
2448; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
2449; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
2450; CHECK-NEXT:    ret
2451entry:
2452  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.iXLen(
2453    <vscale x 1 x i32> %0,
2454    <vscale x 1 x i32> %1,
2455    iXLen %2,
2456    <vscale x 1 x i1> %3,
2457    iXLen %4, iXLen 1)
2458
2459  ret <vscale x 1 x i32> %a
2460}
2461
2462declare <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.iXLen(
2463  <vscale x 2 x i32>,
2464  <vscale x 2 x i32>,
2465  iXLen,
2466  iXLen)
2467
2468define <vscale x 2 x i32> @intrinsic_vrgather_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, iXLen %1, iXLen %2) nounwind {
2469; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i32_nxv2i32:
2470; CHECK:       # %bb.0: # %entry
2471; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
2472; CHECK-NEXT:    vrgather.vx v9, v8, a0
2473; CHECK-NEXT:    vmv.v.v v8, v9
2474; CHECK-NEXT:    ret
2475entry:
2476  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.iXLen(
2477    <vscale x 2 x i32> undef,
2478    <vscale x 2 x i32> %0,
2479    iXLen %1,
2480    iXLen %2)
2481
2482  ret <vscale x 2 x i32> %a
2483}
2484
2485declare <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.iXLen(
2486  <vscale x 2 x i32>,
2487  <vscale x 2 x i32>,
2488  iXLen,
2489  <vscale x 2 x i1>,
2490  iXLen,
2491  iXLen)
2492
2493define <vscale x 2 x i32> @intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2494; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32:
2495; CHECK:       # %bb.0: # %entry
2496; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
2497; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
2498; CHECK-NEXT:    ret
2499entry:
2500  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.iXLen(
2501    <vscale x 2 x i32> %0,
2502    <vscale x 2 x i32> %1,
2503    iXLen %2,
2504    <vscale x 2 x i1> %3,
2505    iXLen %4, iXLen 1)
2506
2507  ret <vscale x 2 x i32> %a
2508}
2509
2510declare <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.iXLen(
2511  <vscale x 4 x i32>,
2512  <vscale x 4 x i32>,
2513  iXLen,
2514  iXLen)
2515
2516define <vscale x 4 x i32> @intrinsic_vrgather_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, iXLen %1, iXLen %2) nounwind {
2517; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i32_nxv4i32:
2518; CHECK:       # %bb.0: # %entry
2519; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
2520; CHECK-NEXT:    vrgather.vx v10, v8, a0
2521; CHECK-NEXT:    vmv.v.v v8, v10
2522; CHECK-NEXT:    ret
2523entry:
2524  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.iXLen(
2525    <vscale x 4 x i32> undef,
2526    <vscale x 4 x i32> %0,
2527    iXLen %1,
2528    iXLen %2)
2529
2530  ret <vscale x 4 x i32> %a
2531}
2532
2533declare <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.iXLen(
2534  <vscale x 4 x i32>,
2535  <vscale x 4 x i32>,
2536  iXLen,
2537  <vscale x 4 x i1>,
2538  iXLen,
2539  iXLen)
2540
2541define <vscale x 4 x i32> @intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2542; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32:
2543; CHECK:       # %bb.0: # %entry
2544; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
2545; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
2546; CHECK-NEXT:    ret
2547entry:
2548  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.iXLen(
2549    <vscale x 4 x i32> %0,
2550    <vscale x 4 x i32> %1,
2551    iXLen %2,
2552    <vscale x 4 x i1> %3,
2553    iXLen %4, iXLen 1)
2554
2555  ret <vscale x 4 x i32> %a
2556}
2557
2558declare <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.iXLen(
2559  <vscale x 8 x i32>,
2560  <vscale x 8 x i32>,
2561  iXLen,
2562  iXLen)
2563
2564define <vscale x 8 x i32> @intrinsic_vrgather_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, iXLen %1, iXLen %2) nounwind {
2565; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i32_nxv8i32:
2566; CHECK:       # %bb.0: # %entry
2567; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
2568; CHECK-NEXT:    vrgather.vx v12, v8, a0
2569; CHECK-NEXT:    vmv.v.v v8, v12
2570; CHECK-NEXT:    ret
2571entry:
2572  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.iXLen(
2573    <vscale x 8 x i32> undef,
2574    <vscale x 8 x i32> %0,
2575    iXLen %1,
2576    iXLen %2)
2577
2578  ret <vscale x 8 x i32> %a
2579}
2580
2581declare <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.iXLen(
2582  <vscale x 8 x i32>,
2583  <vscale x 8 x i32>,
2584  iXLen,
2585  <vscale x 8 x i1>,
2586  iXLen,
2587  iXLen)
2588
2589define <vscale x 8 x i32> @intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2590; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32:
2591; CHECK:       # %bb.0: # %entry
2592; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
2593; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
2594; CHECK-NEXT:    ret
2595entry:
2596  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.iXLen(
2597    <vscale x 8 x i32> %0,
2598    <vscale x 8 x i32> %1,
2599    iXLen %2,
2600    <vscale x 8 x i1> %3,
2601    iXLen %4, iXLen 1)
2602
2603  ret <vscale x 8 x i32> %a
2604}
2605
2606declare <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.iXLen(
2607  <vscale x 16 x i32>,
2608  <vscale x 16 x i32>,
2609  iXLen,
2610  iXLen)
2611
2612define <vscale x 16 x i32> @intrinsic_vrgather_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, iXLen %1, iXLen %2) nounwind {
2613; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i32_nxv16i32:
2614; CHECK:       # %bb.0: # %entry
2615; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
2616; CHECK-NEXT:    vrgather.vx v16, v8, a0
2617; CHECK-NEXT:    vmv.v.v v8, v16
2618; CHECK-NEXT:    ret
2619entry:
2620  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.iXLen(
2621    <vscale x 16 x i32> undef,
2622    <vscale x 16 x i32> %0,
2623    iXLen %1,
2624    iXLen %2)
2625
2626  ret <vscale x 16 x i32> %a
2627}
2628
2629declare <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.iXLen(
2630  <vscale x 16 x i32>,
2631  <vscale x 16 x i32>,
2632  iXLen,
2633  <vscale x 16 x i1>,
2634  iXLen,
2635  iXLen)
2636
2637define <vscale x 16 x i32> @intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
2638; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32:
2639; CHECK:       # %bb.0: # %entry
2640; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
2641; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
2642; CHECK-NEXT:    ret
2643entry:
2644  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.iXLen(
2645    <vscale x 16 x i32> %0,
2646    <vscale x 16 x i32> %1,
2647    iXLen %2,
2648    <vscale x 16 x i1> %3,
2649    iXLen %4, iXLen 1)
2650
2651  ret <vscale x 16 x i32> %a
2652}
2653
2654declare <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.iXLen(
2655  <vscale x 1 x i64>,
2656  <vscale x 1 x i64>,
2657  iXLen,
2658  iXLen)
2659
2660define <vscale x 1 x i64> @intrinsic_vrgather_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, iXLen %1, iXLen %2) nounwind {
2661; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i64_nxv1i64:
2662; CHECK:       # %bb.0: # %entry
2663; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
2664; CHECK-NEXT:    vrgather.vx v9, v8, a0
2665; CHECK-NEXT:    vmv.v.v v8, v9
2666; CHECK-NEXT:    ret
2667entry:
2668  %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.iXLen(
2669    <vscale x 1 x i64> undef,
2670    <vscale x 1 x i64> %0,
2671    iXLen %1,
2672    iXLen %2)
2673
2674  ret <vscale x 1 x i64> %a
2675}
2676
2677declare <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.iXLen(
2678  <vscale x 1 x i64>,
2679  <vscale x 1 x i64>,
2680  iXLen,
2681  <vscale x 1 x i1>,
2682  iXLen,
2683  iXLen)
2684
2685define <vscale x 1 x i64> @intrinsic_vrgather_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2686; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i64_nxv1i64:
2687; CHECK:       # %bb.0: # %entry
2688; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
2689; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
2690; CHECK-NEXT:    ret
2691entry:
2692  %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.iXLen(
2693    <vscale x 1 x i64> %0,
2694    <vscale x 1 x i64> %1,
2695    iXLen %2,
2696    <vscale x 1 x i1> %3,
2697    iXLen %4, iXLen 1)
2698
2699  ret <vscale x 1 x i64> %a
2700}
2701
2702declare <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.iXLen(
2703  <vscale x 2 x i64>,
2704  <vscale x 2 x i64>,
2705  iXLen,
2706  iXLen)
2707
2708define <vscale x 2 x i64> @intrinsic_vrgather_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, iXLen %1, iXLen %2) nounwind {
2709; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i64_nxv2i64:
2710; CHECK:       # %bb.0: # %entry
2711; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
2712; CHECK-NEXT:    vrgather.vx v10, v8, a0
2713; CHECK-NEXT:    vmv.v.v v8, v10
2714; CHECK-NEXT:    ret
2715entry:
2716  %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.iXLen(
2717    <vscale x 2 x i64> undef,
2718    <vscale x 2 x i64> %0,
2719    iXLen %1,
2720    iXLen %2)
2721
2722  ret <vscale x 2 x i64> %a
2723}
2724
2725declare <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.iXLen(
2726  <vscale x 2 x i64>,
2727  <vscale x 2 x i64>,
2728  iXLen,
2729  <vscale x 2 x i1>,
2730  iXLen,
2731  iXLen)
2732
2733define <vscale x 2 x i64> @intrinsic_vrgather_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2734; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i64_nxv2i64:
2735; CHECK:       # %bb.0: # %entry
2736; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
2737; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
2738; CHECK-NEXT:    ret
2739entry:
2740  %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.iXLen(
2741    <vscale x 2 x i64> %0,
2742    <vscale x 2 x i64> %1,
2743    iXLen %2,
2744    <vscale x 2 x i1> %3,
2745    iXLen %4, iXLen 1)
2746
2747  ret <vscale x 2 x i64> %a
2748}
2749
2750declare <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.iXLen(
2751  <vscale x 4 x i64>,
2752  <vscale x 4 x i64>,
2753  iXLen,
2754  iXLen)
2755
2756define <vscale x 4 x i64> @intrinsic_vrgather_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, iXLen %1, iXLen %2) nounwind {
2757; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i64_nxv4i64:
2758; CHECK:       # %bb.0: # %entry
2759; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
2760; CHECK-NEXT:    vrgather.vx v12, v8, a0
2761; CHECK-NEXT:    vmv.v.v v8, v12
2762; CHECK-NEXT:    ret
2763entry:
2764  %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.iXLen(
2765    <vscale x 4 x i64> undef,
2766    <vscale x 4 x i64> %0,
2767    iXLen %1,
2768    iXLen %2)
2769
2770  ret <vscale x 4 x i64> %a
2771}
2772
2773declare <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.iXLen(
2774  <vscale x 4 x i64>,
2775  <vscale x 4 x i64>,
2776  iXLen,
2777  <vscale x 4 x i1>,
2778  iXLen,
2779  iXLen)
2780
2781define <vscale x 4 x i64> @intrinsic_vrgather_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2782; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i64_nxv4i64:
2783; CHECK:       # %bb.0: # %entry
2784; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
2785; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
2786; CHECK-NEXT:    ret
2787entry:
2788  %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.iXLen(
2789    <vscale x 4 x i64> %0,
2790    <vscale x 4 x i64> %1,
2791    iXLen %2,
2792    <vscale x 4 x i1> %3,
2793    iXLen %4, iXLen 1)
2794
2795  ret <vscale x 4 x i64> %a
2796}
2797
2798declare <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.iXLen(
2799  <vscale x 8 x i64>,
2800  <vscale x 8 x i64>,
2801  iXLen,
2802  iXLen)
2803
2804define <vscale x 8 x i64> @intrinsic_vrgather_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, iXLen %1, iXLen %2) nounwind {
2805; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i64_nxv8i64:
2806; CHECK:       # %bb.0: # %entry
2807; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
2808; CHECK-NEXT:    vrgather.vx v16, v8, a0
2809; CHECK-NEXT:    vmv.v.v v8, v16
2810; CHECK-NEXT:    ret
2811entry:
2812  %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.iXLen(
2813    <vscale x 8 x i64> undef,
2814    <vscale x 8 x i64> %0,
2815    iXLen %1,
2816    iXLen %2)
2817
2818  ret <vscale x 8 x i64> %a
2819}
2820
2821declare <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.iXLen(
2822  <vscale x 8 x i64>,
2823  <vscale x 8 x i64>,
2824  iXLen,
2825  <vscale x 8 x i1>,
2826  iXLen,
2827  iXLen)
2828
2829define <vscale x 8 x i64> @intrinsic_vrgather_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
2830; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i64_nxv8i64:
2831; CHECK:       # %bb.0: # %entry
2832; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
2833; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
2834; CHECK-NEXT:    ret
2835entry:
2836  %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.iXLen(
2837    <vscale x 8 x i64> %0,
2838    <vscale x 8 x i64> %1,
2839    iXLen %2,
2840    <vscale x 8 x i1> %3,
2841    iXLen %4, iXLen 1)
2842
2843  ret <vscale x 8 x i64> %a
2844}
2845
2846declare <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.iXLen(
2847  <vscale x 1 x half>,
2848  <vscale x 1 x half>,
2849  iXLen,
2850  iXLen)
2851
2852define <vscale x 1 x half> @intrinsic_vrgather_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, iXLen %1, iXLen %2) nounwind {
2853; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f16_nxv1f16:
2854; CHECK:       # %bb.0: # %entry
2855; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2856; CHECK-NEXT:    vrgather.vx v9, v8, a0
2857; CHECK-NEXT:    vmv1r.v v8, v9
2858; CHECK-NEXT:    ret
2859entry:
2860  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.iXLen(
2861    <vscale x 1 x half> undef,
2862    <vscale x 1 x half> %0,
2863    iXLen %1,
2864    iXLen %2)
2865
2866  ret <vscale x 1 x half> %a
2867}
2868
2869declare <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.iXLen(
2870  <vscale x 1 x half>,
2871  <vscale x 1 x half>,
2872  iXLen,
2873  <vscale x 1 x i1>,
2874  iXLen,
2875  iXLen)
2876
2877define <vscale x 1 x half> @intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
2878; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16:
2879; CHECK:       # %bb.0: # %entry
2880; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
2881; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
2882; CHECK-NEXT:    ret
2883entry:
2884  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.iXLen(
2885    <vscale x 1 x half> %0,
2886    <vscale x 1 x half> %1,
2887    iXLen %2,
2888    <vscale x 1 x i1> %3,
2889    iXLen %4, iXLen 1)
2890
2891  ret <vscale x 1 x half> %a
2892}
2893
2894declare <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.iXLen(
2895  <vscale x 2 x half>,
2896  <vscale x 2 x half>,
2897  iXLen,
2898  iXLen)
2899
2900define <vscale x 2 x half> @intrinsic_vrgather_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, iXLen %1, iXLen %2) nounwind {
2901; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f16_nxv2f16:
2902; CHECK:       # %bb.0: # %entry
2903; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2904; CHECK-NEXT:    vrgather.vx v9, v8, a0
2905; CHECK-NEXT:    vmv1r.v v8, v9
2906; CHECK-NEXT:    ret
2907entry:
2908  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.iXLen(
2909    <vscale x 2 x half> undef,
2910    <vscale x 2 x half> %0,
2911    iXLen %1,
2912    iXLen %2)
2913
2914  ret <vscale x 2 x half> %a
2915}
2916
2917declare <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.iXLen(
2918  <vscale x 2 x half>,
2919  <vscale x 2 x half>,
2920  iXLen,
2921  <vscale x 2 x i1>,
2922  iXLen,
2923  iXLen)
2924
2925define <vscale x 2 x half> @intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
2926; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16:
2927; CHECK:       # %bb.0: # %entry
2928; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
2929; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
2930; CHECK-NEXT:    ret
2931entry:
2932  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.iXLen(
2933    <vscale x 2 x half> %0,
2934    <vscale x 2 x half> %1,
2935    iXLen %2,
2936    <vscale x 2 x i1> %3,
2937    iXLen %4, iXLen 1)
2938
2939  ret <vscale x 2 x half> %a
2940}
2941
2942declare <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.iXLen(
2943  <vscale x 4 x half>,
2944  <vscale x 4 x half>,
2945  iXLen,
2946  iXLen)
2947
2948define <vscale x 4 x half> @intrinsic_vrgather_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, iXLen %1, iXLen %2) nounwind {
2949; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f16_nxv4f16:
2950; CHECK:       # %bb.0: # %entry
2951; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2952; CHECK-NEXT:    vrgather.vx v9, v8, a0
2953; CHECK-NEXT:    vmv.v.v v8, v9
2954; CHECK-NEXT:    ret
2955entry:
2956  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.iXLen(
2957    <vscale x 4 x half> undef,
2958    <vscale x 4 x half> %0,
2959    iXLen %1,
2960    iXLen %2)
2961
2962  ret <vscale x 4 x half> %a
2963}
2964
2965declare <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.iXLen(
2966  <vscale x 4 x half>,
2967  <vscale x 4 x half>,
2968  iXLen,
2969  <vscale x 4 x i1>,
2970  iXLen,
2971  iXLen)
2972
2973define <vscale x 4 x half> @intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
2974; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16:
2975; CHECK:       # %bb.0: # %entry
2976; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
2977; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
2978; CHECK-NEXT:    ret
2979entry:
2980  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.iXLen(
2981    <vscale x 4 x half> %0,
2982    <vscale x 4 x half> %1,
2983    iXLen %2,
2984    <vscale x 4 x i1> %3,
2985    iXLen %4, iXLen 1)
2986
2987  ret <vscale x 4 x half> %a
2988}
2989
2990declare <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.iXLen(
2991  <vscale x 8 x half>,
2992  <vscale x 8 x half>,
2993  iXLen,
2994  iXLen)
2995
2996define <vscale x 8 x half> @intrinsic_vrgather_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, iXLen %1, iXLen %2) nounwind {
2997; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f16_nxv8f16:
2998; CHECK:       # %bb.0: # %entry
2999; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
3000; CHECK-NEXT:    vrgather.vx v10, v8, a0
3001; CHECK-NEXT:    vmv.v.v v8, v10
3002; CHECK-NEXT:    ret
3003entry:
3004  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.iXLen(
3005    <vscale x 8 x half> undef,
3006    <vscale x 8 x half> %0,
3007    iXLen %1,
3008    iXLen %2)
3009
3010  ret <vscale x 8 x half> %a
3011}
3012
3013declare <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.iXLen(
3014  <vscale x 8 x half>,
3015  <vscale x 8 x half>,
3016  iXLen,
3017  <vscale x 8 x i1>,
3018  iXLen,
3019  iXLen)
3020
3021define <vscale x 8 x half> @intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3022; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16:
3023; CHECK:       # %bb.0: # %entry
3024; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
3025; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
3026; CHECK-NEXT:    ret
3027entry:
3028  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.iXLen(
3029    <vscale x 8 x half> %0,
3030    <vscale x 8 x half> %1,
3031    iXLen %2,
3032    <vscale x 8 x i1> %3,
3033    iXLen %4, iXLen 1)
3034
3035  ret <vscale x 8 x half> %a
3036}
3037
3038declare <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.iXLen(
3039  <vscale x 16 x half>,
3040  <vscale x 16 x half>,
3041  iXLen,
3042  iXLen)
3043
3044define <vscale x 16 x half> @intrinsic_vrgather_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, iXLen %1, iXLen %2) nounwind {
3045; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f16_nxv16f16:
3046; CHECK:       # %bb.0: # %entry
3047; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
3048; CHECK-NEXT:    vrgather.vx v12, v8, a0
3049; CHECK-NEXT:    vmv.v.v v8, v12
3050; CHECK-NEXT:    ret
3051entry:
3052  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.iXLen(
3053    <vscale x 16 x half> undef,
3054    <vscale x 16 x half> %0,
3055    iXLen %1,
3056    iXLen %2)
3057
3058  ret <vscale x 16 x half> %a
3059}
3060
3061declare <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.iXLen(
3062  <vscale x 16 x half>,
3063  <vscale x 16 x half>,
3064  iXLen,
3065  <vscale x 16 x i1>,
3066  iXLen,
3067  iXLen)
3068
3069define <vscale x 16 x half> @intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
3070; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16:
3071; CHECK:       # %bb.0: # %entry
3072; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
3073; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
3074; CHECK-NEXT:    ret
3075entry:
3076  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.iXLen(
3077    <vscale x 16 x half> %0,
3078    <vscale x 16 x half> %1,
3079    iXLen %2,
3080    <vscale x 16 x i1> %3,
3081    iXLen %4, iXLen 1)
3082
3083  ret <vscale x 16 x half> %a
3084}
3085
3086declare <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.iXLen(
3087  <vscale x 32 x half>,
3088  <vscale x 32 x half>,
3089  iXLen,
3090  iXLen)
3091
3092define <vscale x 32 x half> @intrinsic_vrgather_vx_nxv32f16_nxv32f16(<vscale x 32 x half> %0, iXLen %1, iXLen %2) nounwind {
3093; CHECK-LABEL: intrinsic_vrgather_vx_nxv32f16_nxv32f16:
3094; CHECK:       # %bb.0: # %entry
3095; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
3096; CHECK-NEXT:    vrgather.vx v16, v8, a0
3097; CHECK-NEXT:    vmv.v.v v8, v16
3098; CHECK-NEXT:    ret
3099entry:
3100  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.iXLen(
3101    <vscale x 32 x half> undef,
3102    <vscale x 32 x half> %0,
3103    iXLen %1,
3104    iXLen %2)
3105
3106  ret <vscale x 32 x half> %a
3107}
3108
3109declare <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.iXLen(
3110  <vscale x 32 x half>,
3111  <vscale x 32 x half>,
3112  iXLen,
3113  <vscale x 32 x i1>,
3114  iXLen,
3115  iXLen)
3116
3117define <vscale x 32 x half> @intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
3118; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16:
3119; CHECK:       # %bb.0: # %entry
3120; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
3121; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
3122; CHECK-NEXT:    ret
3123entry:
3124  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.iXLen(
3125    <vscale x 32 x half> %0,
3126    <vscale x 32 x half> %1,
3127    iXLen %2,
3128    <vscale x 32 x i1> %3,
3129    iXLen %4, iXLen 1)
3130
3131  ret <vscale x 32 x half> %a
3132}
3133
3134declare <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.iXLen(
3135  <vscale x 1 x float>,
3136  <vscale x 1 x float>,
3137  iXLen,
3138  iXLen)
3139
3140define <vscale x 1 x float> @intrinsic_vrgather_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, iXLen %1, iXLen %2) nounwind {
3141; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f32_nxv1f32:
3142; CHECK:       # %bb.0: # %entry
3143; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
3144; CHECK-NEXT:    vrgather.vx v9, v8, a0
3145; CHECK-NEXT:    vmv1r.v v8, v9
3146; CHECK-NEXT:    ret
3147entry:
3148  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.iXLen(
3149    <vscale x 1 x float> undef,
3150    <vscale x 1 x float> %0,
3151    iXLen %1,
3152    iXLen %2)
3153
3154  ret <vscale x 1 x float> %a
3155}
3156
3157declare <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.iXLen(
3158  <vscale x 1 x float>,
3159  <vscale x 1 x float>,
3160  iXLen,
3161  <vscale x 1 x i1>,
3162  iXLen,
3163  iXLen)
3164
3165define <vscale x 1 x float> @intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3166; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32:
3167; CHECK:       # %bb.0: # %entry
3168; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
3169; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
3170; CHECK-NEXT:    ret
3171entry:
3172  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.iXLen(
3173    <vscale x 1 x float> %0,
3174    <vscale x 1 x float> %1,
3175    iXLen %2,
3176    <vscale x 1 x i1> %3,
3177    iXLen %4, iXLen 1)
3178
3179  ret <vscale x 1 x float> %a
3180}
3181
3182declare <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.iXLen(
3183  <vscale x 2 x float>,
3184  <vscale x 2 x float>,
3185  iXLen,
3186  iXLen)
3187
3188define <vscale x 2 x float> @intrinsic_vrgather_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, iXLen %1, iXLen %2) nounwind {
3189; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f32_nxv2f32:
3190; CHECK:       # %bb.0: # %entry
3191; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
3192; CHECK-NEXT:    vrgather.vx v9, v8, a0
3193; CHECK-NEXT:    vmv.v.v v8, v9
3194; CHECK-NEXT:    ret
3195entry:
3196  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.iXLen(
3197    <vscale x 2 x float> undef,
3198    <vscale x 2 x float> %0,
3199    iXLen %1,
3200    iXLen %2)
3201
3202  ret <vscale x 2 x float> %a
3203}
3204
3205declare <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.iXLen(
3206  <vscale x 2 x float>,
3207  <vscale x 2 x float>,
3208  iXLen,
3209  <vscale x 2 x i1>,
3210  iXLen,
3211  iXLen)
3212
3213define <vscale x 2 x float> @intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3214; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32:
3215; CHECK:       # %bb.0: # %entry
3216; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
3217; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
3218; CHECK-NEXT:    ret
3219entry:
3220  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.iXLen(
3221    <vscale x 2 x float> %0,
3222    <vscale x 2 x float> %1,
3223    iXLen %2,
3224    <vscale x 2 x i1> %3,
3225    iXLen %4, iXLen 1)
3226
3227  ret <vscale x 2 x float> %a
3228}
3229
3230declare <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.iXLen(
3231  <vscale x 4 x float>,
3232  <vscale x 4 x float>,
3233  iXLen,
3234  iXLen)
3235
3236define <vscale x 4 x float> @intrinsic_vrgather_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, iXLen %1, iXLen %2) nounwind {
3237; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f32_nxv4f32:
3238; CHECK:       # %bb.0: # %entry
3239; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
3240; CHECK-NEXT:    vrgather.vx v10, v8, a0
3241; CHECK-NEXT:    vmv.v.v v8, v10
3242; CHECK-NEXT:    ret
3243entry:
3244  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.iXLen(
3245    <vscale x 4 x float> undef,
3246    <vscale x 4 x float> %0,
3247    iXLen %1,
3248    iXLen %2)
3249
3250  ret <vscale x 4 x float> %a
3251}
3252
3253declare <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.iXLen(
3254  <vscale x 4 x float>,
3255  <vscale x 4 x float>,
3256  iXLen,
3257  <vscale x 4 x i1>,
3258  iXLen,
3259  iXLen)
3260
3261define <vscale x 4 x float> @intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3262; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32:
3263; CHECK:       # %bb.0: # %entry
3264; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
3265; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
3266; CHECK-NEXT:    ret
3267entry:
3268  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.iXLen(
3269    <vscale x 4 x float> %0,
3270    <vscale x 4 x float> %1,
3271    iXLen %2,
3272    <vscale x 4 x i1> %3,
3273    iXLen %4, iXLen 1)
3274
3275  ret <vscale x 4 x float> %a
3276}
3277
3278declare <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.iXLen(
3279  <vscale x 8 x float>,
3280  <vscale x 8 x float>,
3281  iXLen,
3282  iXLen)
3283
3284define <vscale x 8 x float> @intrinsic_vrgather_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, iXLen %1, iXLen %2) nounwind {
3285; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f32_nxv8f32:
3286; CHECK:       # %bb.0: # %entry
3287; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
3288; CHECK-NEXT:    vrgather.vx v12, v8, a0
3289; CHECK-NEXT:    vmv.v.v v8, v12
3290; CHECK-NEXT:    ret
3291entry:
3292  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.iXLen(
3293    <vscale x 8 x float> undef,
3294    <vscale x 8 x float> %0,
3295    iXLen %1,
3296    iXLen %2)
3297
3298  ret <vscale x 8 x float> %a
3299}
3300
3301declare <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.iXLen(
3302  <vscale x 8 x float>,
3303  <vscale x 8 x float>,
3304  iXLen,
3305  <vscale x 8 x i1>,
3306  iXLen,
3307  iXLen)
3308
3309define <vscale x 8 x float> @intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3310; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32:
3311; CHECK:       # %bb.0: # %entry
3312; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
3313; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
3314; CHECK-NEXT:    ret
3315entry:
3316  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.iXLen(
3317    <vscale x 8 x float> %0,
3318    <vscale x 8 x float> %1,
3319    iXLen %2,
3320    <vscale x 8 x i1> %3,
3321    iXLen %4, iXLen 1)
3322
3323  ret <vscale x 8 x float> %a
3324}
3325
3326declare <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.iXLen(
3327  <vscale x 16 x float>,
3328  <vscale x 16 x float>,
3329  iXLen,
3330  iXLen)
3331
3332define <vscale x 16 x float> @intrinsic_vrgather_vx_nxv16f32_nxv16f32(<vscale x 16 x float> %0, iXLen %1, iXLen %2) nounwind {
3333; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f32_nxv16f32:
3334; CHECK:       # %bb.0: # %entry
3335; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
3336; CHECK-NEXT:    vrgather.vx v16, v8, a0
3337; CHECK-NEXT:    vmv.v.v v8, v16
3338; CHECK-NEXT:    ret
3339entry:
3340  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.iXLen(
3341    <vscale x 16 x float> undef,
3342    <vscale x 16 x float> %0,
3343    iXLen %1,
3344    iXLen %2)
3345
3346  ret <vscale x 16 x float> %a
3347}
3348
3349declare <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.iXLen(
3350  <vscale x 16 x float>,
3351  <vscale x 16 x float>,
3352  iXLen,
3353  <vscale x 16 x i1>,
3354  iXLen,
3355  iXLen)
3356
3357define <vscale x 16 x float> @intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
3358; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32:
3359; CHECK:       # %bb.0: # %entry
3360; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
3361; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
3362; CHECK-NEXT:    ret
3363entry:
3364  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.iXLen(
3365    <vscale x 16 x float> %0,
3366    <vscale x 16 x float> %1,
3367    iXLen %2,
3368    <vscale x 16 x i1> %3,
3369    iXLen %4, iXLen 1)
3370
3371  ret <vscale x 16 x float> %a
3372}
3373
3374declare <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.iXLen(
3375  <vscale x 1 x double>,
3376  <vscale x 1 x double>,
3377  iXLen,
3378  iXLen)
3379
3380define <vscale x 1 x double> @intrinsic_vrgather_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, iXLen %1, iXLen %2) nounwind {
3381; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f64_nxv1f64:
3382; CHECK:       # %bb.0: # %entry
3383; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
3384; CHECK-NEXT:    vrgather.vx v9, v8, a0
3385; CHECK-NEXT:    vmv.v.v v8, v9
3386; CHECK-NEXT:    ret
3387entry:
3388  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.iXLen(
3389    <vscale x 1 x double> undef,
3390    <vscale x 1 x double> %0,
3391    iXLen %1,
3392    iXLen %2)
3393
3394  ret <vscale x 1 x double> %a
3395}
3396
3397declare <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.iXLen(
3398  <vscale x 1 x double>,
3399  <vscale x 1 x double>,
3400  iXLen,
3401  <vscale x 1 x i1>,
3402  iXLen,
3403  iXLen)
3404
3405define <vscale x 1 x double> @intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
3406; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64:
3407; CHECK:       # %bb.0: # %entry
3408; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
3409; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
3410; CHECK-NEXT:    ret
3411entry:
3412  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.iXLen(
3413    <vscale x 1 x double> %0,
3414    <vscale x 1 x double> %1,
3415    iXLen %2,
3416    <vscale x 1 x i1> %3,
3417    iXLen %4, iXLen 1)
3418
3419  ret <vscale x 1 x double> %a
3420}
3421
3422declare <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.iXLen(
3423  <vscale x 2 x double>,
3424  <vscale x 2 x double>,
3425  iXLen,
3426  iXLen)
3427
3428define <vscale x 2 x double> @intrinsic_vrgather_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, iXLen %1, iXLen %2) nounwind {
3429; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f64_nxv2f64:
3430; CHECK:       # %bb.0: # %entry
3431; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
3432; CHECK-NEXT:    vrgather.vx v10, v8, a0
3433; CHECK-NEXT:    vmv.v.v v8, v10
3434; CHECK-NEXT:    ret
3435entry:
3436  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.iXLen(
3437    <vscale x 2 x double> undef,
3438    <vscale x 2 x double> %0,
3439    iXLen %1,
3440    iXLen %2)
3441
3442  ret <vscale x 2 x double> %a
3443}
3444
3445declare <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.iXLen(
3446  <vscale x 2 x double>,
3447  <vscale x 2 x double>,
3448  iXLen,
3449  <vscale x 2 x i1>,
3450  iXLen,
3451  iXLen)
3452
3453define <vscale x 2 x double> @intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
3454; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64:
3455; CHECK:       # %bb.0: # %entry
3456; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
3457; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
3458; CHECK-NEXT:    ret
3459entry:
3460  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.iXLen(
3461    <vscale x 2 x double> %0,
3462    <vscale x 2 x double> %1,
3463    iXLen %2,
3464    <vscale x 2 x i1> %3,
3465    iXLen %4, iXLen 1)
3466
3467  ret <vscale x 2 x double> %a
3468}
3469
3470declare <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.iXLen(
3471  <vscale x 4 x double>,
3472  <vscale x 4 x double>,
3473  iXLen,
3474  iXLen)
3475
3476define <vscale x 4 x double> @intrinsic_vrgather_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, iXLen %1, iXLen %2) nounwind {
3477; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f64_nxv4f64:
3478; CHECK:       # %bb.0: # %entry
3479; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
3480; CHECK-NEXT:    vrgather.vx v12, v8, a0
3481; CHECK-NEXT:    vmv.v.v v8, v12
3482; CHECK-NEXT:    ret
3483entry:
3484  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.iXLen(
3485    <vscale x 4 x double> undef,
3486    <vscale x 4 x double> %0,
3487    iXLen %1,
3488    iXLen %2)
3489
3490  ret <vscale x 4 x double> %a
3491}
3492
3493declare <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.iXLen(
3494  <vscale x 4 x double>,
3495  <vscale x 4 x double>,
3496  iXLen,
3497  <vscale x 4 x i1>,
3498  iXLen,
3499  iXLen)
3500
3501define <vscale x 4 x double> @intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
3502; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64:
3503; CHECK:       # %bb.0: # %entry
3504; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
3505; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
3506; CHECK-NEXT:    ret
3507entry:
3508  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.iXLen(
3509    <vscale x 4 x double> %0,
3510    <vscale x 4 x double> %1,
3511    iXLen %2,
3512    <vscale x 4 x i1> %3,
3513    iXLen %4, iXLen 1)
3514
3515  ret <vscale x 4 x double> %a
3516}
3517
3518declare <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.iXLen(
3519  <vscale x 8 x double>,
3520  <vscale x 8 x double>,
3521  iXLen,
3522  iXLen)
3523
3524define <vscale x 8 x double> @intrinsic_vrgather_vx_nxv8f64_nxv8f64(<vscale x 8 x double> %0, iXLen %1, iXLen %2) nounwind {
3525; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f64_nxv8f64:
3526; CHECK:       # %bb.0: # %entry
3527; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
3528; CHECK-NEXT:    vrgather.vx v16, v8, a0
3529; CHECK-NEXT:    vmv.v.v v8, v16
3530; CHECK-NEXT:    ret
3531entry:
3532  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.iXLen(
3533    <vscale x 8 x double> undef,
3534    <vscale x 8 x double> %0,
3535    iXLen %1,
3536    iXLen %2)
3537
3538  ret <vscale x 8 x double> %a
3539}
3540
3541declare <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.iXLen(
3542  <vscale x 8 x double>,
3543  <vscale x 8 x double>,
3544  iXLen,
3545  <vscale x 8 x i1>,
3546  iXLen,
3547  iXLen)
3548
3549define <vscale x 8 x double> @intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
3550; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64:
3551; CHECK:       # %bb.0: # %entry
3552; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
3553; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
3554; CHECK-NEXT:    ret
3555entry:
3556  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.iXLen(
3557    <vscale x 8 x double> %0,
3558    <vscale x 8 x double> %1,
3559    iXLen %2,
3560    <vscale x 8 x i1> %3,
3561    iXLen %4, iXLen 1)
3562
3563  ret <vscale x 8 x double> %a
3564}
3565
3566define <vscale x 1 x i8> @intrinsic_vrgather_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
3567; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i8_nxv1i8:
3568; CHECK:       # %bb.0: # %entry
3569; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
3570; CHECK-NEXT:    vrgather.vi v9, v8, 9
3571; CHECK-NEXT:    vmv1r.v v8, v9
3572; CHECK-NEXT:    ret
3573entry:
3574  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.nxv1i8.iXLen(
3575    <vscale x 1 x i8> undef,
3576    <vscale x 1 x i8> %0,
3577    iXLen 9,
3578    iXLen %1)
3579
3580  ret <vscale x 1 x i8> %a
3581}
3582
3583define <vscale x 1 x i8> @intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
3584; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8:
3585; CHECK:       # %bb.0: # %entry
3586; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
3587; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
3588; CHECK-NEXT:    ret
3589entry:
3590  %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.vx.mask.nxv1i8.iXLen(
3591    <vscale x 1 x i8> %0,
3592    <vscale x 1 x i8> %1,
3593    iXLen 9,
3594    <vscale x 1 x i1> %2,
3595    iXLen %3, iXLen 1)
3596
3597  ret <vscale x 1 x i8> %a
3598}
3599
3600define <vscale x 2 x i8> @intrinsic_vrgather_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
3601; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i8_nxv2i8:
3602; CHECK:       # %bb.0: # %entry
3603; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
3604; CHECK-NEXT:    vrgather.vi v9, v8, 9
3605; CHECK-NEXT:    vmv1r.v v8, v9
3606; CHECK-NEXT:    ret
3607entry:
3608  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.nxv2i8.iXLen(
3609    <vscale x 2 x i8> undef,
3610    <vscale x 2 x i8> %0,
3611    iXLen 9,
3612    iXLen %1)
3613
3614  ret <vscale x 2 x i8> %a
3615}
3616
3617define <vscale x 2 x i8> @intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
3618; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8:
3619; CHECK:       # %bb.0: # %entry
3620; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
3621; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
3622; CHECK-NEXT:    ret
3623entry:
3624  %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.vx.mask.nxv2i8.iXLen(
3625    <vscale x 2 x i8> %0,
3626    <vscale x 2 x i8> %1,
3627    iXLen 9,
3628    <vscale x 2 x i1> %2,
3629    iXLen %3, iXLen 1)
3630
3631  ret <vscale x 2 x i8> %a
3632}
3633
3634define <vscale x 4 x i8> @intrinsic_vrgather_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
3635; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i8_nxv4i8:
3636; CHECK:       # %bb.0: # %entry
3637; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
3638; CHECK-NEXT:    vrgather.vi v9, v8, 9
3639; CHECK-NEXT:    vmv1r.v v8, v9
3640; CHECK-NEXT:    ret
3641entry:
3642  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.nxv4i8.iXLen(
3643    <vscale x 4 x i8> undef,
3644    <vscale x 4 x i8> %0,
3645    iXLen 9,
3646    iXLen %1)
3647
3648  ret <vscale x 4 x i8> %a
3649}
3650
3651define <vscale x 4 x i8> @intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
3652; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8:
3653; CHECK:       # %bb.0: # %entry
3654; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
3655; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
3656; CHECK-NEXT:    ret
3657entry:
3658  %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.vx.mask.nxv4i8.iXLen(
3659    <vscale x 4 x i8> %0,
3660    <vscale x 4 x i8> %1,
3661    iXLen 9,
3662    <vscale x 4 x i1> %2,
3663    iXLen %3, iXLen 1)
3664
3665  ret <vscale x 4 x i8> %a
3666}
3667
3668define <vscale x 8 x i8> @intrinsic_vrgather_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
3669; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i8_nxv8i8:
3670; CHECK:       # %bb.0: # %entry
3671; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
3672; CHECK-NEXT:    vrgather.vi v9, v8, 9
3673; CHECK-NEXT:    vmv.v.v v8, v9
3674; CHECK-NEXT:    ret
3675entry:
3676  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.nxv8i8.iXLen(
3677    <vscale x 8 x i8> undef,
3678    <vscale x 8 x i8> %0,
3679    iXLen 9,
3680    iXLen %1)
3681
3682  ret <vscale x 8 x i8> %a
3683}
3684
3685define <vscale x 8 x i8> @intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
3686; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8:
3687; CHECK:       # %bb.0: # %entry
3688; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
3689; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
3690; CHECK-NEXT:    ret
3691entry:
3692  %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.vx.mask.nxv8i8.iXLen(
3693    <vscale x 8 x i8> %0,
3694    <vscale x 8 x i8> %1,
3695    iXLen 9,
3696    <vscale x 8 x i1> %2,
3697    iXLen %3, iXLen 1)
3698
3699  ret <vscale x 8 x i8> %a
3700}
3701
3702define <vscale x 16 x i8> @intrinsic_vrgather_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
3703; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i8_nxv16i8:
3704; CHECK:       # %bb.0: # %entry
3705; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
3706; CHECK-NEXT:    vrgather.vi v10, v8, 9
3707; CHECK-NEXT:    vmv.v.v v8, v10
3708; CHECK-NEXT:    ret
3709entry:
3710  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.nxv16i8.iXLen(
3711    <vscale x 16 x i8> undef,
3712    <vscale x 16 x i8> %0,
3713    iXLen 9,
3714    iXLen %1)
3715
3716  ret <vscale x 16 x i8> %a
3717}
3718
3719define <vscale x 16 x i8> @intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
3720; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8:
3721; CHECK:       # %bb.0: # %entry
3722; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
3723; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
3724; CHECK-NEXT:    ret
3725entry:
3726  %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.vx.mask.nxv16i8.iXLen(
3727    <vscale x 16 x i8> %0,
3728    <vscale x 16 x i8> %1,
3729    iXLen 9,
3730    <vscale x 16 x i1> %2,
3731    iXLen %3, iXLen 1)
3732
3733  ret <vscale x 16 x i8> %a
3734}
3735
3736define <vscale x 32 x i8> @intrinsic_vrgather_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
3737; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i8_nxv32i8:
3738; CHECK:       # %bb.0: # %entry
3739; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
3740; CHECK-NEXT:    vrgather.vi v12, v8, 9
3741; CHECK-NEXT:    vmv.v.v v8, v12
3742; CHECK-NEXT:    ret
3743entry:
3744  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.nxv32i8.iXLen(
3745    <vscale x 32 x i8> undef,
3746    <vscale x 32 x i8> %0,
3747    iXLen 9,
3748    iXLen %1)
3749
3750  ret <vscale x 32 x i8> %a
3751}
3752
3753define <vscale x 32 x i8> @intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
3754; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8:
3755; CHECK:       # %bb.0: # %entry
3756; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
3757; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
3758; CHECK-NEXT:    ret
3759entry:
3760  %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.vx.mask.nxv32i8.iXLen(
3761    <vscale x 32 x i8> %0,
3762    <vscale x 32 x i8> %1,
3763    iXLen 9,
3764    <vscale x 32 x i1> %2,
3765    iXLen %3, iXLen 1)
3766
3767  ret <vscale x 32 x i8> %a
3768}
3769
3770define <vscale x 64 x i8> @intrinsic_vrgather_vi_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
3771; CHECK-LABEL: intrinsic_vrgather_vi_nxv64i8_nxv64i8:
3772; CHECK:       # %bb.0: # %entry
3773; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
3774; CHECK-NEXT:    vrgather.vi v16, v8, 9
3775; CHECK-NEXT:    vmv.v.v v8, v16
3776; CHECK-NEXT:    ret
3777entry:
3778  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.nxv64i8.iXLen(
3779    <vscale x 64 x i8> undef,
3780    <vscale x 64 x i8> %0,
3781    iXLen 9,
3782    iXLen %1)
3783
3784  ret <vscale x 64 x i8> %a
3785}
3786
3787define <vscale x 64 x i8> @intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
3788; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8:
3789; CHECK:       # %bb.0: # %entry
3790; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
3791; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
3792; CHECK-NEXT:    ret
3793entry:
3794  %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.vx.mask.nxv64i8.iXLen(
3795    <vscale x 64 x i8> %0,
3796    <vscale x 64 x i8> %1,
3797    iXLen 9,
3798    <vscale x 64 x i1> %2,
3799    iXLen %3, iXLen 1)
3800
3801  ret <vscale x 64 x i8> %a
3802}
3803
3804define <vscale x 1 x i16> @intrinsic_vrgather_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
3805; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i16_nxv1i16:
3806; CHECK:       # %bb.0: # %entry
3807; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
3808; CHECK-NEXT:    vrgather.vi v9, v8, 9
3809; CHECK-NEXT:    vmv1r.v v8, v9
3810; CHECK-NEXT:    ret
3811entry:
3812  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.nxv1i16.iXLen(
3813    <vscale x 1 x i16> undef,
3814    <vscale x 1 x i16> %0,
3815    iXLen 9,
3816    iXLen %1)
3817
3818  ret <vscale x 1 x i16> %a
3819}
3820
3821define <vscale x 1 x i16> @intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
3822; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16:
3823; CHECK:       # %bb.0: # %entry
3824; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
3825; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
3826; CHECK-NEXT:    ret
3827entry:
3828  %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.vx.mask.nxv1i16.iXLen(
3829    <vscale x 1 x i16> %0,
3830    <vscale x 1 x i16> %1,
3831    iXLen 9,
3832    <vscale x 1 x i1> %2,
3833    iXLen %3, iXLen 1)
3834
3835  ret <vscale x 1 x i16> %a
3836}
3837
3838define <vscale x 2 x i16> @intrinsic_vrgather_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
3839; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i16_nxv2i16:
3840; CHECK:       # %bb.0: # %entry
3841; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
3842; CHECK-NEXT:    vrgather.vi v9, v8, 9
3843; CHECK-NEXT:    vmv1r.v v8, v9
3844; CHECK-NEXT:    ret
3845entry:
3846  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.nxv2i16.iXLen(
3847    <vscale x 2 x i16> undef,
3848    <vscale x 2 x i16> %0,
3849    iXLen 9,
3850    iXLen %1)
3851
3852  ret <vscale x 2 x i16> %a
3853}
3854
3855define <vscale x 2 x i16> @intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
3856; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16:
3857; CHECK:       # %bb.0: # %entry
3858; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
3859; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
3860; CHECK-NEXT:    ret
3861entry:
3862  %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.vx.mask.nxv2i16.iXLen(
3863    <vscale x 2 x i16> %0,
3864    <vscale x 2 x i16> %1,
3865    iXLen 9,
3866    <vscale x 2 x i1> %2,
3867    iXLen %3, iXLen 1)
3868
3869  ret <vscale x 2 x i16> %a
3870}
3871
3872define <vscale x 4 x i16> @intrinsic_vrgather_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
3873; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i16_nxv4i16:
3874; CHECK:       # %bb.0: # %entry
3875; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
3876; CHECK-NEXT:    vrgather.vi v9, v8, 9
3877; CHECK-NEXT:    vmv.v.v v8, v9
3878; CHECK-NEXT:    ret
3879entry:
3880  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.nxv4i16.iXLen(
3881    <vscale x 4 x i16> undef,
3882    <vscale x 4 x i16> %0,
3883    iXLen 9,
3884    iXLen %1)
3885
3886  ret <vscale x 4 x i16> %a
3887}
3888
3889define <vscale x 4 x i16> @intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
3890; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16:
3891; CHECK:       # %bb.0: # %entry
3892; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
3893; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
3894; CHECK-NEXT:    ret
3895entry:
3896  %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.vx.mask.nxv4i16.iXLen(
3897    <vscale x 4 x i16> %0,
3898    <vscale x 4 x i16> %1,
3899    iXLen 9,
3900    <vscale x 4 x i1> %2,
3901    iXLen %3, iXLen 1)
3902
3903  ret <vscale x 4 x i16> %a
3904}
3905
3906define <vscale x 8 x i16> @intrinsic_vrgather_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
3907; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i16_nxv8i16:
3908; CHECK:       # %bb.0: # %entry
3909; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
3910; CHECK-NEXT:    vrgather.vi v10, v8, 9
3911; CHECK-NEXT:    vmv.v.v v8, v10
3912; CHECK-NEXT:    ret
3913entry:
3914  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.nxv8i16.iXLen(
3915    <vscale x 8 x i16> undef,
3916    <vscale x 8 x i16> %0,
3917    iXLen 9,
3918    iXLen %1)
3919
3920  ret <vscale x 8 x i16> %a
3921}
3922
3923define <vscale x 8 x i16> @intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
3924; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16:
3925; CHECK:       # %bb.0: # %entry
3926; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
3927; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
3928; CHECK-NEXT:    ret
3929entry:
3930  %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.vx.mask.nxv8i16.iXLen(
3931    <vscale x 8 x i16> %0,
3932    <vscale x 8 x i16> %1,
3933    iXLen 9,
3934    <vscale x 8 x i1> %2,
3935    iXLen %3, iXLen 1)
3936
3937  ret <vscale x 8 x i16> %a
3938}
3939
3940define <vscale x 16 x i16> @intrinsic_vrgather_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
3941; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i16_nxv16i16:
3942; CHECK:       # %bb.0: # %entry
3943; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
3944; CHECK-NEXT:    vrgather.vi v12, v8, 9
3945; CHECK-NEXT:    vmv.v.v v8, v12
3946; CHECK-NEXT:    ret
3947entry:
3948  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.nxv16i16.iXLen(
3949    <vscale x 16 x i16> undef,
3950    <vscale x 16 x i16> %0,
3951    iXLen 9,
3952    iXLen %1)
3953
3954  ret <vscale x 16 x i16> %a
3955}
3956
3957define <vscale x 16 x i16> @intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
3958; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16:
3959; CHECK:       # %bb.0: # %entry
3960; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
3961; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
3962; CHECK-NEXT:    ret
3963entry:
3964  %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.vx.mask.nxv16i16.iXLen(
3965    <vscale x 16 x i16> %0,
3966    <vscale x 16 x i16> %1,
3967    iXLen 9,
3968    <vscale x 16 x i1> %2,
3969    iXLen %3, iXLen 1)
3970
3971  ret <vscale x 16 x i16> %a
3972}
3973
3974define <vscale x 32 x i16> @intrinsic_vrgather_vi_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
3975; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i16_nxv32i16:
3976; CHECK:       # %bb.0: # %entry
3977; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
3978; CHECK-NEXT:    vrgather.vi v16, v8, 9
3979; CHECK-NEXT:    vmv.v.v v8, v16
3980; CHECK-NEXT:    ret
3981entry:
3982  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.nxv32i16.iXLen(
3983    <vscale x 32 x i16> undef,
3984    <vscale x 32 x i16> %0,
3985    iXLen 9,
3986    iXLen %1)
3987
3988  ret <vscale x 32 x i16> %a
3989}
3990
3991define <vscale x 32 x i16> @intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
3992; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16:
3993; CHECK:       # %bb.0: # %entry
3994; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
3995; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
3996; CHECK-NEXT:    ret
3997entry:
3998  %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.vx.mask.nxv32i16.iXLen(
3999    <vscale x 32 x i16> %0,
4000    <vscale x 32 x i16> %1,
4001    iXLen 9,
4002    <vscale x 32 x i1> %2,
4003    iXLen %3, iXLen 1)
4004
4005  ret <vscale x 32 x i16> %a
4006}
4007
4008define <vscale x 1 x i32> @intrinsic_vrgather_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
4009; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i32_nxv1i32:
4010; CHECK:       # %bb.0: # %entry
4011; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
4012; CHECK-NEXT:    vrgather.vi v9, v8, 9
4013; CHECK-NEXT:    vmv1r.v v8, v9
4014; CHECK-NEXT:    ret
4015entry:
4016  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.nxv1i32.iXLen(
4017    <vscale x 1 x i32> undef,
4018    <vscale x 1 x i32> %0,
4019    iXLen 9,
4020    iXLen %1)
4021
4022  ret <vscale x 1 x i32> %a
4023}
4024
4025define <vscale x 1 x i32> @intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
4026; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32:
4027; CHECK:       # %bb.0: # %entry
4028; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
4029; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
4030; CHECK-NEXT:    ret
4031entry:
4032  %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.vx.mask.nxv1i32.iXLen(
4033    <vscale x 1 x i32> %0,
4034    <vscale x 1 x i32> %1,
4035    iXLen 9,
4036    <vscale x 1 x i1> %2,
4037    iXLen %3, iXLen 1)
4038
4039  ret <vscale x 1 x i32> %a
4040}
4041
4042define <vscale x 2 x i32> @intrinsic_vrgather_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
4043; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i32_nxv2i32:
4044; CHECK:       # %bb.0: # %entry
4045; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
4046; CHECK-NEXT:    vrgather.vi v9, v8, 9
4047; CHECK-NEXT:    vmv.v.v v8, v9
4048; CHECK-NEXT:    ret
4049entry:
4050  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.nxv2i32.iXLen(
4051    <vscale x 2 x i32> undef,
4052    <vscale x 2 x i32> %0,
4053    iXLen 9,
4054    iXLen %1)
4055
4056  ret <vscale x 2 x i32> %a
4057}
4058
4059define <vscale x 2 x i32> @intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
4060; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32:
4061; CHECK:       # %bb.0: # %entry
4062; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
4063; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
4064; CHECK-NEXT:    ret
4065entry:
4066  %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.vx.mask.nxv2i32.iXLen(
4067    <vscale x 2 x i32> %0,
4068    <vscale x 2 x i32> %1,
4069    iXLen 9,
4070    <vscale x 2 x i1> %2,
4071    iXLen %3, iXLen 1)
4072
4073  ret <vscale x 2 x i32> %a
4074}
4075
4076define <vscale x 4 x i32> @intrinsic_vrgather_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
4077; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i32_nxv4i32:
4078; CHECK:       # %bb.0: # %entry
4079; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
4080; CHECK-NEXT:    vrgather.vi v10, v8, 9
4081; CHECK-NEXT:    vmv.v.v v8, v10
4082; CHECK-NEXT:    ret
4083entry:
4084  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.nxv4i32.iXLen(
4085    <vscale x 4 x i32> undef,
4086    <vscale x 4 x i32> %0,
4087    iXLen 9,
4088    iXLen %1)
4089
4090  ret <vscale x 4 x i32> %a
4091}
4092
4093define <vscale x 4 x i32> @intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
4094; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32:
4095; CHECK:       # %bb.0: # %entry
4096; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
4097; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
4098; CHECK-NEXT:    ret
4099entry:
4100  %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.vx.mask.nxv4i32.iXLen(
4101    <vscale x 4 x i32> %0,
4102    <vscale x 4 x i32> %1,
4103    iXLen 9,
4104    <vscale x 4 x i1> %2,
4105    iXLen %3, iXLen 1)
4106
4107  ret <vscale x 4 x i32> %a
4108}
4109
4110define <vscale x 8 x i32> @intrinsic_vrgather_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
4111; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i32_nxv8i32:
4112; CHECK:       # %bb.0: # %entry
4113; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
4114; CHECK-NEXT:    vrgather.vi v12, v8, 9
4115; CHECK-NEXT:    vmv.v.v v8, v12
4116; CHECK-NEXT:    ret
4117entry:
4118  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.nxv8i32.iXLen(
4119    <vscale x 8 x i32> undef,
4120    <vscale x 8 x i32> %0,
4121    iXLen 9,
4122    iXLen %1)
4123
4124  ret <vscale x 8 x i32> %a
4125}
4126
4127define <vscale x 8 x i32> @intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
4128; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32:
4129; CHECK:       # %bb.0: # %entry
4130; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
4131; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
4132; CHECK-NEXT:    ret
4133entry:
4134  %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.vx.mask.nxv8i32.iXLen(
4135    <vscale x 8 x i32> %0,
4136    <vscale x 8 x i32> %1,
4137    iXLen 9,
4138    <vscale x 8 x i1> %2,
4139    iXLen %3, iXLen 1)
4140
4141  ret <vscale x 8 x i32> %a
4142}
4143
4144define <vscale x 16 x i32> @intrinsic_vrgather_vi_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
4145; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i32_nxv16i32:
4146; CHECK:       # %bb.0: # %entry
4147; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
4148; CHECK-NEXT:    vrgather.vi v16, v8, 9
4149; CHECK-NEXT:    vmv.v.v v8, v16
4150; CHECK-NEXT:    ret
4151entry:
4152  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.nxv16i32.iXLen(
4153    <vscale x 16 x i32> undef,
4154    <vscale x 16 x i32> %0,
4155    iXLen 9,
4156    iXLen %1)
4157
4158  ret <vscale x 16 x i32> %a
4159}
4160
4161define <vscale x 16 x i32> @intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
4162; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32:
4163; CHECK:       # %bb.0: # %entry
4164; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
4165; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
4166; CHECK-NEXT:    ret
4167entry:
4168  %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.vx.mask.nxv16i32.iXLen(
4169    <vscale x 16 x i32> %0,
4170    <vscale x 16 x i32> %1,
4171    iXLen 9,
4172    <vscale x 16 x i1> %2,
4173    iXLen %3, iXLen 1)
4174
4175  ret <vscale x 16 x i32> %a
4176}
4177
4178define <vscale x 1 x i64> @intrinsic_vrgather_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
4179; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i64_nxv1i64:
4180; CHECK:       # %bb.0: # %entry
4181; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
4182; CHECK-NEXT:    vrgather.vi v9, v8, 9
4183; CHECK-NEXT:    vmv.v.v v8, v9
4184; CHECK-NEXT:    ret
4185entry:
4186  %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.nxv1i64.iXLen(
4187    <vscale x 1 x i64> undef,
4188    <vscale x 1 x i64> %0,
4189    iXLen 9,
4190    iXLen %1)
4191
4192  ret <vscale x 1 x i64> %a
4193}
4194
4195define <vscale x 1 x i64> @intrinsic_vrgather_mask_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
4196; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i64_nxv1i64:
4197; CHECK:       # %bb.0: # %entry
4198; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
4199; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
4200; CHECK-NEXT:    ret
4201entry:
4202  %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.vx.mask.nxv1i64.iXLen(
4203    <vscale x 1 x i64> %0,
4204    <vscale x 1 x i64> %1,
4205    iXLen 9,
4206    <vscale x 1 x i1> %2,
4207    iXLen %3, iXLen 1)
4208
4209  ret <vscale x 1 x i64> %a
4210}
4211
4212define <vscale x 2 x i64> @intrinsic_vrgather_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
4213; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i64_nxv2i64:
4214; CHECK:       # %bb.0: # %entry
4215; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
4216; CHECK-NEXT:    vrgather.vi v10, v8, 9
4217; CHECK-NEXT:    vmv.v.v v8, v10
4218; CHECK-NEXT:    ret
4219entry:
4220  %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.nxv2i64.iXLen(
4221    <vscale x 2 x i64> undef,
4222    <vscale x 2 x i64> %0,
4223    iXLen 9,
4224    iXLen %1)
4225
4226  ret <vscale x 2 x i64> %a
4227}
4228
4229define <vscale x 2 x i64> @intrinsic_vrgather_mask_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
4230; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i64_nxv2i64:
4231; CHECK:       # %bb.0: # %entry
4232; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
4233; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
4234; CHECK-NEXT:    ret
4235entry:
4236  %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.vx.mask.nxv2i64.iXLen(
4237    <vscale x 2 x i64> %0,
4238    <vscale x 2 x i64> %1,
4239    iXLen 9,
4240    <vscale x 2 x i1> %2,
4241    iXLen %3, iXLen 1)
4242
4243  ret <vscale x 2 x i64> %a
4244}
4245
4246define <vscale x 4 x i64> @intrinsic_vrgather_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
4247; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i64_nxv4i64:
4248; CHECK:       # %bb.0: # %entry
4249; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
4250; CHECK-NEXT:    vrgather.vi v12, v8, 9
4251; CHECK-NEXT:    vmv.v.v v8, v12
4252; CHECK-NEXT:    ret
4253entry:
4254  %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.nxv4i64.iXLen(
4255    <vscale x 4 x i64> undef,
4256    <vscale x 4 x i64> %0,
4257    iXLen 9,
4258    iXLen %1)
4259
4260  ret <vscale x 4 x i64> %a
4261}
4262
4263define <vscale x 4 x i64> @intrinsic_vrgather_mask_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
4264; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i64_nxv4i64:
4265; CHECK:       # %bb.0: # %entry
4266; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
4267; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
4268; CHECK-NEXT:    ret
4269entry:
4270  %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.vx.mask.nxv4i64.iXLen(
4271    <vscale x 4 x i64> %0,
4272    <vscale x 4 x i64> %1,
4273    iXLen 9,
4274    <vscale x 4 x i1> %2,
4275    iXLen %3, iXLen 1)
4276
4277  ret <vscale x 4 x i64> %a
4278}
4279
4280define <vscale x 8 x i64> @intrinsic_vrgather_vi_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
4281; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i64_nxv8i64:
4282; CHECK:       # %bb.0: # %entry
4283; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
4284; CHECK-NEXT:    vrgather.vi v16, v8, 9
4285; CHECK-NEXT:    vmv.v.v v8, v16
4286; CHECK-NEXT:    ret
4287entry:
4288  %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.nxv8i64.iXLen(
4289    <vscale x 8 x i64> undef,
4290    <vscale x 8 x i64> %0,
4291    iXLen 9,
4292    iXLen %1)
4293
4294  ret <vscale x 8 x i64> %a
4295}
4296
4297define <vscale x 8 x i64> @intrinsic_vrgather_mask_vi_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
4298; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i64_nxv8i64:
4299; CHECK:       # %bb.0: # %entry
4300; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
4301; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
4302; CHECK-NEXT:    ret
4303entry:
4304  %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.vx.mask.nxv8i64.iXLen(
4305    <vscale x 8 x i64> %0,
4306    <vscale x 8 x i64> %1,
4307    iXLen 9,
4308    <vscale x 8 x i1> %2,
4309    iXLen %3, iXLen 1)
4310
4311  ret <vscale x 8 x i64> %a
4312}
4313
4314define <vscale x 1 x half> @intrinsic_vrgather_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
4315; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f16_nxv1f16:
4316; CHECK:       # %bb.0: # %entry
4317; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
4318; CHECK-NEXT:    vrgather.vi v9, v8, 9
4319; CHECK-NEXT:    vmv1r.v v8, v9
4320; CHECK-NEXT:    ret
4321entry:
4322  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.nxv1f16.iXLen(
4323    <vscale x 1 x half> undef,
4324    <vscale x 1 x half> %0,
4325    iXLen 9,
4326    iXLen %1)
4327
4328  ret <vscale x 1 x half> %a
4329}
4330
4331define <vscale x 1 x half> @intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
4332; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16:
4333; CHECK:       # %bb.0: # %entry
4334; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
4335; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
4336; CHECK-NEXT:    ret
4337entry:
4338  %a = call <vscale x 1 x half> @llvm.riscv.vrgather.vx.mask.nxv1f16.iXLen(
4339    <vscale x 1 x half> %0,
4340    <vscale x 1 x half> %1,
4341    iXLen 9,
4342    <vscale x 1 x i1> %2,
4343    iXLen %3, iXLen 1)
4344
4345  ret <vscale x 1 x half> %a
4346}
4347
4348define <vscale x 2 x half> @intrinsic_vrgather_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
4349; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f16_nxv2f16:
4350; CHECK:       # %bb.0: # %entry
4351; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
4352; CHECK-NEXT:    vrgather.vi v9, v8, 9
4353; CHECK-NEXT:    vmv1r.v v8, v9
4354; CHECK-NEXT:    ret
4355entry:
4356  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.nxv2f16.iXLen(
4357    <vscale x 2 x half> undef,
4358    <vscale x 2 x half> %0,
4359    iXLen 9,
4360    iXLen %1)
4361
4362  ret <vscale x 2 x half> %a
4363}
4364
4365define <vscale x 2 x half> @intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
4366; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16:
4367; CHECK:       # %bb.0: # %entry
4368; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
4369; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
4370; CHECK-NEXT:    ret
4371entry:
4372  %a = call <vscale x 2 x half> @llvm.riscv.vrgather.vx.mask.nxv2f16.iXLen(
4373    <vscale x 2 x half> %0,
4374    <vscale x 2 x half> %1,
4375    iXLen 9,
4376    <vscale x 2 x i1> %2,
4377    iXLen %3, iXLen 1)
4378
4379  ret <vscale x 2 x half> %a
4380}
4381
4382define <vscale x 4 x half> @intrinsic_vrgather_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
4383; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f16_nxv4f16:
4384; CHECK:       # %bb.0: # %entry
4385; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
4386; CHECK-NEXT:    vrgather.vi v9, v8, 9
4387; CHECK-NEXT:    vmv.v.v v8, v9
4388; CHECK-NEXT:    ret
4389entry:
4390  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.nxv4f16.iXLen(
4391    <vscale x 4 x half> undef,
4392    <vscale x 4 x half> %0,
4393    iXLen 9,
4394    iXLen %1)
4395
4396  ret <vscale x 4 x half> %a
4397}
4398
4399define <vscale x 4 x half> @intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
4400; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16:
4401; CHECK:       # %bb.0: # %entry
4402; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
4403; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
4404; CHECK-NEXT:    ret
4405entry:
4406  %a = call <vscale x 4 x half> @llvm.riscv.vrgather.vx.mask.nxv4f16.iXLen(
4407    <vscale x 4 x half> %0,
4408    <vscale x 4 x half> %1,
4409    iXLen 9,
4410    <vscale x 4 x i1> %2,
4411    iXLen %3, iXLen 1)
4412
4413  ret <vscale x 4 x half> %a
4414}
4415
4416define <vscale x 8 x half> @intrinsic_vrgather_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
4417; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f16_nxv8f16:
4418; CHECK:       # %bb.0: # %entry
4419; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
4420; CHECK-NEXT:    vrgather.vi v10, v8, 9
4421; CHECK-NEXT:    vmv.v.v v8, v10
4422; CHECK-NEXT:    ret
4423entry:
4424  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.nxv8f16.iXLen(
4425    <vscale x 8 x half> undef,
4426    <vscale x 8 x half> %0,
4427    iXLen 9,
4428    iXLen %1)
4429
4430  ret <vscale x 8 x half> %a
4431}
4432
4433define <vscale x 8 x half> @intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
4434; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16:
4435; CHECK:       # %bb.0: # %entry
4436; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
4437; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
4438; CHECK-NEXT:    ret
4439entry:
4440  %a = call <vscale x 8 x half> @llvm.riscv.vrgather.vx.mask.nxv8f16.iXLen(
4441    <vscale x 8 x half> %0,
4442    <vscale x 8 x half> %1,
4443    iXLen 9,
4444    <vscale x 8 x i1> %2,
4445    iXLen %3, iXLen 1)
4446
4447  ret <vscale x 8 x half> %a
4448}
4449
4450define <vscale x 16 x half> @intrinsic_vrgather_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
4451; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f16_nxv16f16:
4452; CHECK:       # %bb.0: # %entry
4453; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
4454; CHECK-NEXT:    vrgather.vi v12, v8, 9
4455; CHECK-NEXT:    vmv.v.v v8, v12
4456; CHECK-NEXT:    ret
4457entry:
4458  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.nxv16f16.iXLen(
4459    <vscale x 16 x half> undef,
4460    <vscale x 16 x half> %0,
4461    iXLen 9,
4462    iXLen %1)
4463
4464  ret <vscale x 16 x half> %a
4465}
4466
4467define <vscale x 16 x half> @intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
4468; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16:
4469; CHECK:       # %bb.0: # %entry
4470; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
4471; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
4472; CHECK-NEXT:    ret
4473entry:
4474  %a = call <vscale x 16 x half> @llvm.riscv.vrgather.vx.mask.nxv16f16.iXLen(
4475    <vscale x 16 x half> %0,
4476    <vscale x 16 x half> %1,
4477    iXLen 9,
4478    <vscale x 16 x i1> %2,
4479    iXLen %3, iXLen 1)
4480
4481  ret <vscale x 16 x half> %a
4482}
4483
4484define <vscale x 32 x half> @intrinsic_vrgather_vi_nxv32f16_nxv32f16(<vscale x 32 x half> %0, iXLen %1) nounwind {
4485; CHECK-LABEL: intrinsic_vrgather_vi_nxv32f16_nxv32f16:
4486; CHECK:       # %bb.0: # %entry
4487; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
4488; CHECK-NEXT:    vrgather.vi v16, v8, 9
4489; CHECK-NEXT:    vmv.v.v v8, v16
4490; CHECK-NEXT:    ret
4491entry:
4492  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.nxv32f16.iXLen(
4493    <vscale x 32 x half> undef,
4494    <vscale x 32 x half> %0,
4495    iXLen 9,
4496    iXLen %1)
4497
4498  ret <vscale x 32 x half> %a
4499}
4500
4501define <vscale x 32 x half> @intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
4502; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16:
4503; CHECK:       # %bb.0: # %entry
4504; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
4505; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
4506; CHECK-NEXT:    ret
4507entry:
4508  %a = call <vscale x 32 x half> @llvm.riscv.vrgather.vx.mask.nxv32f16.iXLen(
4509    <vscale x 32 x half> %0,
4510    <vscale x 32 x half> %1,
4511    iXLen 9,
4512    <vscale x 32 x i1> %2,
4513    iXLen %3, iXLen 1)
4514
4515  ret <vscale x 32 x half> %a
4516}
4517
4518define <vscale x 1 x float> @intrinsic_vrgather_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
4519; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f32_nxv1f32:
4520; CHECK:       # %bb.0: # %entry
4521; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
4522; CHECK-NEXT:    vrgather.vi v9, v8, 9
4523; CHECK-NEXT:    vmv1r.v v8, v9
4524; CHECK-NEXT:    ret
4525entry:
4526  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.nxv1f32.iXLen(
4527    <vscale x 1 x float> undef,
4528    <vscale x 1 x float> %0,
4529    iXLen 9,
4530    iXLen %1)
4531
4532  ret <vscale x 1 x float> %a
4533}
4534
4535define <vscale x 1 x float> @intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
4536; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32:
4537; CHECK:       # %bb.0: # %entry
4538; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
4539; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
4540; CHECK-NEXT:    ret
4541entry:
4542  %a = call <vscale x 1 x float> @llvm.riscv.vrgather.vx.mask.nxv1f32.iXLen(
4543    <vscale x 1 x float> %0,
4544    <vscale x 1 x float> %1,
4545    iXLen 9,
4546    <vscale x 1 x i1> %2,
4547    iXLen %3, iXLen 1)
4548
4549  ret <vscale x 1 x float> %a
4550}
4551
4552define <vscale x 2 x float> @intrinsic_vrgather_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
4553; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f32_nxv2f32:
4554; CHECK:       # %bb.0: # %entry
4555; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
4556; CHECK-NEXT:    vrgather.vi v9, v8, 9
4557; CHECK-NEXT:    vmv.v.v v8, v9
4558; CHECK-NEXT:    ret
4559entry:
4560  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.iXLen(
4561    <vscale x 2 x float> undef,
4562    <vscale x 2 x float> %0,
4563    iXLen 9,
4564    iXLen %1)
4565
4566  ret <vscale x 2 x float> %a
4567}
4568
4569define <vscale x 2 x float> @intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
4570; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32:
4571; CHECK:       # %bb.0: # %entry
4572; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
4573; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
4574; CHECK-NEXT:    ret
4575entry:
4576  %a = call <vscale x 2 x float> @llvm.riscv.vrgather.vx.mask.nxv2f32.iXLen(
4577    <vscale x 2 x float> %0,
4578    <vscale x 2 x float> %1,
4579    iXLen 9,
4580    <vscale x 2 x i1> %2,
4581    iXLen %3, iXLen 1)
4582
4583  ret <vscale x 2 x float> %a
4584}
4585
4586define <vscale x 4 x float> @intrinsic_vrgather_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
4587; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f32_nxv4f32:
4588; CHECK:       # %bb.0: # %entry
4589; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
4590; CHECK-NEXT:    vrgather.vi v10, v8, 9
4591; CHECK-NEXT:    vmv.v.v v8, v10
4592; CHECK-NEXT:    ret
4593entry:
4594  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.nxv4f32.iXLen(
4595    <vscale x 4 x float> undef,
4596    <vscale x 4 x float> %0,
4597    iXLen 9,
4598    iXLen %1)
4599
4600  ret <vscale x 4 x float> %a
4601}
4602
4603define <vscale x 4 x float> @intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
4604; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32:
4605; CHECK:       # %bb.0: # %entry
4606; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
4607; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
4608; CHECK-NEXT:    ret
4609entry:
4610  %a = call <vscale x 4 x float> @llvm.riscv.vrgather.vx.mask.nxv4f32.iXLen(
4611    <vscale x 4 x float> %0,
4612    <vscale x 4 x float> %1,
4613    iXLen 9,
4614    <vscale x 4 x i1> %2,
4615    iXLen %3, iXLen 1)
4616
4617  ret <vscale x 4 x float> %a
4618}
4619
4620define <vscale x 8 x float> @intrinsic_vrgather_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
4621; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f32_nxv8f32:
4622; CHECK:       # %bb.0: # %entry
4623; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
4624; CHECK-NEXT:    vrgather.vi v12, v8, 9
4625; CHECK-NEXT:    vmv.v.v v8, v12
4626; CHECK-NEXT:    ret
4627entry:
4628  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.nxv8f32.iXLen(
4629    <vscale x 8 x float> undef,
4630    <vscale x 8 x float> %0,
4631    iXLen 9,
4632    iXLen %1)
4633
4634  ret <vscale x 8 x float> %a
4635}
4636
4637define <vscale x 8 x float> @intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
4638; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32:
4639; CHECK:       # %bb.0: # %entry
4640; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
4641; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
4642; CHECK-NEXT:    ret
4643entry:
4644  %a = call <vscale x 8 x float> @llvm.riscv.vrgather.vx.mask.nxv8f32.iXLen(
4645    <vscale x 8 x float> %0,
4646    <vscale x 8 x float> %1,
4647    iXLen 9,
4648    <vscale x 8 x i1> %2,
4649    iXLen %3, iXLen 1)
4650
4651  ret <vscale x 8 x float> %a
4652}
4653
4654define <vscale x 16 x float> @intrinsic_vrgather_vi_nxv16f32_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
4655; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f32_nxv16f32:
4656; CHECK:       # %bb.0: # %entry
4657; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
4658; CHECK-NEXT:    vrgather.vi v16, v8, 9
4659; CHECK-NEXT:    vmv.v.v v8, v16
4660; CHECK-NEXT:    ret
4661entry:
4662  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.nxv16f32.iXLen(
4663    <vscale x 16 x float> undef,
4664    <vscale x 16 x float> %0,
4665    iXLen 9,
4666    iXLen %1)
4667
4668  ret <vscale x 16 x float> %a
4669}
4670
4671define <vscale x 16 x float> @intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
4672; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32:
4673; CHECK:       # %bb.0: # %entry
4674; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
4675; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
4676; CHECK-NEXT:    ret
4677entry:
4678  %a = call <vscale x 16 x float> @llvm.riscv.vrgather.vx.mask.nxv16f32.iXLen(
4679    <vscale x 16 x float> %0,
4680    <vscale x 16 x float> %1,
4681    iXLen 9,
4682    <vscale x 16 x i1> %2,
4683    iXLen %3, iXLen 1)
4684
4685  ret <vscale x 16 x float> %a
4686}
4687
4688define <vscale x 1 x double> @intrinsic_vrgather_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, iXLen %1) nounwind {
4689; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f64_nxv1f64:
4690; CHECK:       # %bb.0: # %entry
4691; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
4692; CHECK-NEXT:    vrgather.vi v9, v8, 9
4693; CHECK-NEXT:    vmv.v.v v8, v9
4694; CHECK-NEXT:    ret
4695entry:
4696  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.nxv1f64.iXLen(
4697    <vscale x 1 x double> undef,
4698    <vscale x 1 x double> %0,
4699    iXLen 9,
4700    iXLen %1)
4701
4702  ret <vscale x 1 x double> %a
4703}
4704
4705define <vscale x 1 x double> @intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
4706; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64:
4707; CHECK:       # %bb.0: # %entry
4708; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
4709; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
4710; CHECK-NEXT:    ret
4711entry:
4712  %a = call <vscale x 1 x double> @llvm.riscv.vrgather.vx.mask.nxv1f64.iXLen(
4713    <vscale x 1 x double> %0,
4714    <vscale x 1 x double> %1,
4715    iXLen 9,
4716    <vscale x 1 x i1> %2,
4717    iXLen %3, iXLen 1)
4718
4719  ret <vscale x 1 x double> %a
4720}
4721
4722define <vscale x 2 x double> @intrinsic_vrgather_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, iXLen %1) nounwind {
4723; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f64_nxv2f64:
4724; CHECK:       # %bb.0: # %entry
4725; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
4726; CHECK-NEXT:    vrgather.vi v10, v8, 9
4727; CHECK-NEXT:    vmv.v.v v8, v10
4728; CHECK-NEXT:    ret
4729entry:
4730  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.nxv2f64.iXLen(
4731    <vscale x 2 x double> undef,
4732    <vscale x 2 x double> %0,
4733    iXLen 9,
4734    iXLen %1)
4735
4736  ret <vscale x 2 x double> %a
4737}
4738
4739define <vscale x 2 x double> @intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
4740; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64:
4741; CHECK:       # %bb.0: # %entry
4742; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
4743; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
4744; CHECK-NEXT:    ret
4745entry:
4746  %a = call <vscale x 2 x double> @llvm.riscv.vrgather.vx.mask.nxv2f64.iXLen(
4747    <vscale x 2 x double> %0,
4748    <vscale x 2 x double> %1,
4749    iXLen 9,
4750    <vscale x 2 x i1> %2,
4751    iXLen %3, iXLen 1)
4752
4753  ret <vscale x 2 x double> %a
4754}
4755
4756define <vscale x 4 x double> @intrinsic_vrgather_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, iXLen %1) nounwind {
4757; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f64_nxv4f64:
4758; CHECK:       # %bb.0: # %entry
4759; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
4760; CHECK-NEXT:    vrgather.vi v12, v8, 9
4761; CHECK-NEXT:    vmv.v.v v8, v12
4762; CHECK-NEXT:    ret
4763entry:
4764  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.nxv4f64.iXLen(
4765    <vscale x 4 x double> undef,
4766    <vscale x 4 x double> %0,
4767    iXLen 9,
4768    iXLen %1)
4769
4770  ret <vscale x 4 x double> %a
4771}
4772
4773define <vscale x 4 x double> @intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
4774; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64:
4775; CHECK:       # %bb.0: # %entry
4776; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
4777; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
4778; CHECK-NEXT:    ret
4779entry:
4780  %a = call <vscale x 4 x double> @llvm.riscv.vrgather.vx.mask.nxv4f64.iXLen(
4781    <vscale x 4 x double> %0,
4782    <vscale x 4 x double> %1,
4783    iXLen 9,
4784    <vscale x 4 x i1> %2,
4785    iXLen %3, iXLen 1)
4786
4787  ret <vscale x 4 x double> %a
4788}
4789
4790define <vscale x 8 x double> @intrinsic_vrgather_vi_nxv8f64_nxv8f64(<vscale x 8 x double> %0, iXLen %1) nounwind {
4791; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f64_nxv8f64:
4792; CHECK:       # %bb.0: # %entry
4793; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
4794; CHECK-NEXT:    vrgather.vi v16, v8, 9
4795; CHECK-NEXT:    vmv.v.v v8, v16
4796; CHECK-NEXT:    ret
4797entry:
4798  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.nxv8f64.iXLen(
4799    <vscale x 8 x double> undef,
4800    <vscale x 8 x double> %0,
4801    iXLen 9,
4802    iXLen %1)
4803
4804  ret <vscale x 8 x double> %a
4805}
4806
4807define <vscale x 8 x double> @intrinsic_vrgather_mask_vi_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
4808; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f64_nxv8f64:
4809; CHECK:       # %bb.0: # %entry
4810; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
4811; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
4812; CHECK-NEXT:    ret
4813entry:
4814  %a = call <vscale x 8 x double> @llvm.riscv.vrgather.vx.mask.nxv8f64.iXLen(
4815    <vscale x 8 x double> %0,
4816    <vscale x 8 x double> %1,
4817    iXLen 9,
4818    <vscale x 8 x i1> %2,
4819    iXLen %3, iXLen 1)
4820
4821  ret <vscale x 8 x double> %a
4822}
4823
4824declare <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.nxv1bf16.iXLen(
4825  <vscale x 1 x bfloat>,
4826  <vscale x 1 x bfloat>,
4827  <vscale x 1 x i16>,
4828  iXLen)
4829
4830define <vscale x 1 x bfloat> @intrinsic_vrgather_vv_nxv1bf16_nxv1bf16_nxv1i16(<vscale x 1 x bfloat> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
4831; CHECK-LABEL: intrinsic_vrgather_vv_nxv1bf16_nxv1bf16_nxv1i16:
4832; CHECK:       # %bb.0: # %entry
4833; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
4834; CHECK-NEXT:    vrgather.vv v10, v8, v9
4835; CHECK-NEXT:    vmv1r.v v8, v10
4836; CHECK-NEXT:    ret
4837entry:
4838  %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.nxv1bf16.iXLen(
4839    <vscale x 1 x bfloat> undef,
4840    <vscale x 1 x bfloat> %0,
4841    <vscale x 1 x i16> %1,
4842    iXLen %2)
4843
4844  ret <vscale x 1 x bfloat> %a
4845}
4846
4847declare <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv1bf16.iXLen(
4848  <vscale x 1 x bfloat>,
4849  <vscale x 1 x bfloat>,
4850  <vscale x 1 x i16>,
4851  <vscale x 1 x i1>,
4852  iXLen,
4853  iXLen)
4854
4855define <vscale x 1 x bfloat> @intrinsic_vrgather_mask_vv_nxv1bf16_nxv1bf16_nxv1i16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
4856; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1bf16_nxv1bf16_nxv1i16:
4857; CHECK:       # %bb.0: # %entry
4858; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
4859; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
4860; CHECK-NEXT:    ret
4861entry:
4862  %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv1bf16.iXLen(
4863    <vscale x 1 x bfloat> %0,
4864    <vscale x 1 x bfloat> %1,
4865    <vscale x 1 x i16> %2,
4866    <vscale x 1 x i1> %3,
4867    iXLen %4, iXLen 1)
4868
4869  ret <vscale x 1 x bfloat> %a
4870}
4871
4872declare <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.nxv2bf16.iXLen(
4873  <vscale x 2 x bfloat>,
4874  <vscale x 2 x bfloat>,
4875  <vscale x 2 x i16>,
4876  iXLen)
4877
4878define <vscale x 2 x bfloat> @intrinsic_vrgather_vv_nxv2bf16_nxv2bf16_nxv2i16(<vscale x 2 x bfloat> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
4879; CHECK-LABEL: intrinsic_vrgather_vv_nxv2bf16_nxv2bf16_nxv2i16:
4880; CHECK:       # %bb.0: # %entry
4881; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
4882; CHECK-NEXT:    vrgather.vv v10, v8, v9
4883; CHECK-NEXT:    vmv1r.v v8, v10
4884; CHECK-NEXT:    ret
4885entry:
4886  %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.nxv2bf16.iXLen(
4887    <vscale x 2 x bfloat> undef,
4888    <vscale x 2 x bfloat> %0,
4889    <vscale x 2 x i16> %1,
4890    iXLen %2)
4891
4892  ret <vscale x 2 x bfloat> %a
4893}
4894
4895declare <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv2bf16.iXLen(
4896  <vscale x 2 x bfloat>,
4897  <vscale x 2 x bfloat>,
4898  <vscale x 2 x i16>,
4899  <vscale x 2 x i1>,
4900  iXLen,
4901  iXLen)
4902
4903define <vscale x 2 x bfloat> @intrinsic_vrgather_mask_vv_nxv2bf16_nxv2bf16_nxv2i16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
4904; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2bf16_nxv2bf16_nxv2i16:
4905; CHECK:       # %bb.0: # %entry
4906; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
4907; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
4908; CHECK-NEXT:    ret
4909entry:
4910  %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv2bf16.iXLen(
4911    <vscale x 2 x bfloat> %0,
4912    <vscale x 2 x bfloat> %1,
4913    <vscale x 2 x i16> %2,
4914    <vscale x 2 x i1> %3,
4915    iXLen %4, iXLen 1)
4916
4917  ret <vscale x 2 x bfloat> %a
4918}
4919
4920declare <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.nxv4bf16.iXLen(
4921  <vscale x 4 x bfloat>,
4922  <vscale x 4 x bfloat>,
4923  <vscale x 4 x i16>,
4924  iXLen)
4925
4926define <vscale x 4 x bfloat> @intrinsic_vrgather_vv_nxv4bf16_nxv4bf16_nxv4i16(<vscale x 4 x bfloat> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
4927; CHECK-LABEL: intrinsic_vrgather_vv_nxv4bf16_nxv4bf16_nxv4i16:
4928; CHECK:       # %bb.0: # %entry
4929; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
4930; CHECK-NEXT:    vrgather.vv v10, v8, v9
4931; CHECK-NEXT:    vmv.v.v v8, v10
4932; CHECK-NEXT:    ret
4933entry:
4934  %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.nxv4bf16.iXLen(
4935    <vscale x 4 x bfloat> undef,
4936    <vscale x 4 x bfloat> %0,
4937    <vscale x 4 x i16> %1,
4938    iXLen %2)
4939
4940  ret <vscale x 4 x bfloat> %a
4941}
4942
4943declare <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv4bf16.iXLen(
4944  <vscale x 4 x bfloat>,
4945  <vscale x 4 x bfloat>,
4946  <vscale x 4 x i16>,
4947  <vscale x 4 x i1>,
4948  iXLen,
4949  iXLen)
4950
4951define <vscale x 4 x bfloat> @intrinsic_vrgather_mask_vv_nxv4bf16_nxv4bf16_nxv4i16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
4952; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4bf16_nxv4bf16_nxv4i16:
4953; CHECK:       # %bb.0: # %entry
4954; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
4955; CHECK-NEXT:    vrgather.vv v8, v9, v10, v0.t
4956; CHECK-NEXT:    ret
4957entry:
4958  %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv4bf16.iXLen(
4959    <vscale x 4 x bfloat> %0,
4960    <vscale x 4 x bfloat> %1,
4961    <vscale x 4 x i16> %2,
4962    <vscale x 4 x i1> %3,
4963    iXLen %4, iXLen 1)
4964
4965  ret <vscale x 4 x bfloat> %a
4966}
4967
4968declare <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.nxv8bf16.iXLen(
4969  <vscale x 8 x bfloat>,
4970  <vscale x 8 x bfloat>,
4971  <vscale x 8 x i16>,
4972  iXLen)
4973
4974define <vscale x 8 x bfloat> @intrinsic_vrgather_vv_nxv8bf16_nxv8bf16_nxv8i16(<vscale x 8 x bfloat> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
4975; CHECK-LABEL: intrinsic_vrgather_vv_nxv8bf16_nxv8bf16_nxv8i16:
4976; CHECK:       # %bb.0: # %entry
4977; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
4978; CHECK-NEXT:    vrgather.vv v12, v8, v10
4979; CHECK-NEXT:    vmv.v.v v8, v12
4980; CHECK-NEXT:    ret
4981entry:
4982  %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.nxv8bf16.iXLen(
4983    <vscale x 8 x bfloat> undef,
4984    <vscale x 8 x bfloat> %0,
4985    <vscale x 8 x i16> %1,
4986    iXLen %2)
4987
4988  ret <vscale x 8 x bfloat> %a
4989}
4990
4991declare <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv8bf16.iXLen(
4992  <vscale x 8 x bfloat>,
4993  <vscale x 8 x bfloat>,
4994  <vscale x 8 x i16>,
4995  <vscale x 8 x i1>,
4996  iXLen,
4997  iXLen)
4998
4999define <vscale x 8 x bfloat> @intrinsic_vrgather_mask_vv_nxv8bf16_nxv8bf16_nxv8i16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
5000; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8bf16_nxv8bf16_nxv8i16:
5001; CHECK:       # %bb.0: # %entry
5002; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
5003; CHECK-NEXT:    vrgather.vv v8, v10, v12, v0.t
5004; CHECK-NEXT:    ret
5005entry:
5006  %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv8bf16.iXLen(
5007    <vscale x 8 x bfloat> %0,
5008    <vscale x 8 x bfloat> %1,
5009    <vscale x 8 x i16> %2,
5010    <vscale x 8 x i1> %3,
5011    iXLen %4, iXLen 1)
5012
5013  ret <vscale x 8 x bfloat> %a
5014}
5015
5016declare <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.nxv16bf16.iXLen(
5017  <vscale x 16 x bfloat>,
5018  <vscale x 16 x bfloat>,
5019  <vscale x 16 x i16>,
5020  iXLen)
5021
5022define <vscale x 16 x bfloat> @intrinsic_vrgather_vv_nxv16bf16_nxv16bf16_nxv16i16(<vscale x 16 x bfloat> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
5023; CHECK-LABEL: intrinsic_vrgather_vv_nxv16bf16_nxv16bf16_nxv16i16:
5024; CHECK:       # %bb.0: # %entry
5025; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
5026; CHECK-NEXT:    vrgather.vv v16, v8, v12
5027; CHECK-NEXT:    vmv.v.v v8, v16
5028; CHECK-NEXT:    ret
5029entry:
5030  %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.nxv16bf16.iXLen(
5031    <vscale x 16 x bfloat> undef,
5032    <vscale x 16 x bfloat> %0,
5033    <vscale x 16 x i16> %1,
5034    iXLen %2)
5035
5036  ret <vscale x 16 x bfloat> %a
5037}
5038
5039declare <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv16bf16.iXLen(
5040  <vscale x 16 x bfloat>,
5041  <vscale x 16 x bfloat>,
5042  <vscale x 16 x i16>,
5043  <vscale x 16 x i1>,
5044  iXLen,
5045  iXLen)
5046
5047define <vscale x 16 x bfloat> @intrinsic_vrgather_mask_vv_nxv16bf16_nxv16bf16_nxv16i16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
5048; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16bf16_nxv16bf16_nxv16i16:
5049; CHECK:       # %bb.0: # %entry
5050; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
5051; CHECK-NEXT:    vrgather.vv v8, v12, v16, v0.t
5052; CHECK-NEXT:    ret
5053entry:
5054  %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv16bf16.iXLen(
5055    <vscale x 16 x bfloat> %0,
5056    <vscale x 16 x bfloat> %1,
5057    <vscale x 16 x i16> %2,
5058    <vscale x 16 x i1> %3,
5059    iXLen %4, iXLen 1)
5060
5061  ret <vscale x 16 x bfloat> %a
5062}
5063
5064declare <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.nxv32bf16.iXLen(
5065  <vscale x 32 x bfloat>,
5066  <vscale x 32 x bfloat>,
5067  <vscale x 32 x i16>,
5068  iXLen)
5069
5070define <vscale x 32 x bfloat> @intrinsic_vrgather_vv_nxv32bf16_nxv32bf16_nxv32i16(<vscale x 32 x bfloat> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
5071; CHECK-LABEL: intrinsic_vrgather_vv_nxv32bf16_nxv32bf16_nxv32i16:
5072; CHECK:       # %bb.0: # %entry
5073; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
5074; CHECK-NEXT:    vrgather.vv v24, v8, v16
5075; CHECK-NEXT:    vmv.v.v v8, v24
5076; CHECK-NEXT:    ret
5077entry:
5078  %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.nxv32bf16.iXLen(
5079    <vscale x 32 x bfloat> undef,
5080    <vscale x 32 x bfloat> %0,
5081    <vscale x 32 x i16> %1,
5082    iXLen %2)
5083
5084  ret <vscale x 32 x bfloat> %a
5085}
5086
5087declare <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv32bf16.iXLen(
5088  <vscale x 32 x bfloat>,
5089  <vscale x 32 x bfloat>,
5090  <vscale x 32 x i16>,
5091  <vscale x 32 x i1>,
5092  iXLen,
5093  iXLen)
5094
5095define <vscale x 32 x bfloat> @intrinsic_vrgather_mask_vv_nxv32bf16_nxv32bf16_nxv32i16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
5096; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32bf16_nxv32bf16_nxv32i16:
5097; CHECK:       # %bb.0: # %entry
5098; CHECK-NEXT:    vl8re16.v v24, (a0)
5099; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
5100; CHECK-NEXT:    vrgather.vv v8, v16, v24, v0.t
5101; CHECK-NEXT:    ret
5102entry:
5103  %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vv.mask.nxv32bf16.iXLen(
5104    <vscale x 32 x bfloat> %0,
5105    <vscale x 32 x bfloat> %1,
5106    <vscale x 32 x i16> %2,
5107    <vscale x 32 x i1> %3,
5108    iXLen %4, iXLen 1)
5109
5110  ret <vscale x 32 x bfloat> %a
5111}
5112
5113declare <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.nxv1bf16.iXLen(
5114  <vscale x 1 x bfloat>,
5115  <vscale x 1 x bfloat>,
5116  iXLen,
5117  iXLen)
5118
5119define <vscale x 1 x bfloat> @intrinsic_vrgather_vx_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1, iXLen %2) nounwind {
5120; CHECK-LABEL: intrinsic_vrgather_vx_nxv1bf16_nxv1bf16:
5121; CHECK:       # %bb.0: # %entry
5122; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
5123; CHECK-NEXT:    vrgather.vx v9, v8, a0
5124; CHECK-NEXT:    vmv1r.v v8, v9
5125; CHECK-NEXT:    ret
5126entry:
5127  %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.nxv1bf16.iXLen(
5128    <vscale x 1 x bfloat> undef,
5129    <vscale x 1 x bfloat> %0,
5130    iXLen %1,
5131    iXLen %2)
5132
5133  ret <vscale x 1 x bfloat> %a
5134}
5135
5136declare <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv1bf16.iXLen(
5137  <vscale x 1 x bfloat>,
5138  <vscale x 1 x bfloat>,
5139  iXLen,
5140  <vscale x 1 x i1>,
5141  iXLen,
5142  iXLen)
5143
5144define <vscale x 1 x bfloat> @intrinsic_vrgather_mask_vx_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
5145; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1bf16_nxv1bf16:
5146; CHECK:       # %bb.0: # %entry
5147; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
5148; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
5149; CHECK-NEXT:    ret
5150entry:
5151  %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv1bf16.iXLen(
5152    <vscale x 1 x bfloat> %0,
5153    <vscale x 1 x bfloat> %1,
5154    iXLen %2,
5155    <vscale x 1 x i1> %3,
5156    iXLen %4, iXLen 1)
5157
5158  ret <vscale x 1 x bfloat> %a
5159}
5160
5161declare <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.nxv2bf16.iXLen(
5162  <vscale x 2 x bfloat>,
5163  <vscale x 2 x bfloat>,
5164  iXLen,
5165  iXLen)
5166
5167define <vscale x 2 x bfloat> @intrinsic_vrgather_vx_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1, iXLen %2) nounwind {
5168; CHECK-LABEL: intrinsic_vrgather_vx_nxv2bf16_nxv2bf16:
5169; CHECK:       # %bb.0: # %entry
5170; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
5171; CHECK-NEXT:    vrgather.vx v9, v8, a0
5172; CHECK-NEXT:    vmv1r.v v8, v9
5173; CHECK-NEXT:    ret
5174entry:
5175  %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.nxv2bf16.iXLen(
5176    <vscale x 2 x bfloat> undef,
5177    <vscale x 2 x bfloat> %0,
5178    iXLen %1,
5179    iXLen %2)
5180
5181  ret <vscale x 2 x bfloat> %a
5182}
5183
5184declare <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv2bf16.iXLen(
5185  <vscale x 2 x bfloat>,
5186  <vscale x 2 x bfloat>,
5187  iXLen,
5188  <vscale x 2 x i1>,
5189  iXLen,
5190  iXLen)
5191
5192define <vscale x 2 x bfloat> @intrinsic_vrgather_mask_vx_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
5193; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2bf16_nxv2bf16:
5194; CHECK:       # %bb.0: # %entry
5195; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
5196; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
5197; CHECK-NEXT:    ret
5198entry:
5199  %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv2bf1bf16XLen(
5200    <vscale x 2 x bfloat> %0,
5201    <vscale x 2 x bfloat> %1,
5202    iXLen %2,
5203    <vscale x 2 x i1> %3,
5204    iXLen %4, iXLen 1)
5205
5206  ret <vscale x 2 x bfloat> %a
5207}
5208
5209declare <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.nxv4bf16.iXLen(
5210  <vscale x 4 x bfloat>,
5211  <vscale x 4 x bfloat>,
5212  iXLen,
5213  iXLen)
5214
5215define <vscale x 4 x bfloat> @intrinsic_vrgather_vx_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1, iXLen %2) nounwind {
5216; CHECK-LABEL: intrinsic_vrgather_vx_nxv4bf16_nxv4bf16:
5217; CHECK:       # %bb.0: # %entry
5218; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
5219; CHECK-NEXT:    vrgather.vx v9, v8, a0
5220; CHECK-NEXT:    vmv.v.v v8, v9
5221; CHECK-NEXT:    ret
5222entry:
5223  %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.nxv4bf16.iXLen(
5224    <vscale x 4 x bfloat> undef,
5225    <vscale x 4 x bfloat> %0,
5226    iXLen %1,
5227    iXLen %2)
5228
5229  ret <vscale x 4 x bfloat> %a
5230}
5231
5232declare <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv4bf16.iXLen(
5233  <vscale x 4 x bfloat>,
5234  <vscale x 4 x bfloat>,
5235  iXLen,
5236  <vscale x 4 x i1>,
5237  iXLen,
5238  iXLen)
5239
5240define <vscale x 4 x bfloat> @intrinsic_vrgather_mask_vx_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
5241; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4bf16_nxv4bf16:
5242; CHECK:       # %bb.0: # %entry
5243; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
5244; CHECK-NEXT:    vrgather.vx v8, v9, a0, v0.t
5245; CHECK-NEXT:    ret
5246entry:
5247  %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv4bf16.iXLen(
5248    <vscale x 4 x bfloat> %0,
5249    <vscale x 4 x bfloat> %1,
5250    iXLen %2,
5251    <vscale x 4 x i1> %3,
5252    iXLen %4, iXLen 1)
5253
5254  ret <vscale x 4 x bfloat> %a
5255}
5256
5257declare <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.nxv8bf16.iXLen(
5258  <vscale x 8 x bfloat>,
5259  <vscale x 8 x bfloat>,
5260  iXLen,
5261  iXLen)
5262
5263define <vscale x 8 x bfloat> @intrinsic_vrgather_vx_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1, iXLen %2) nounwind {
5264; CHECK-LABEL: intrinsic_vrgather_vx_nxv8bf16_nxv8bf16:
5265; CHECK:       # %bb.0: # %entry
5266; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
5267; CHECK-NEXT:    vrgather.vx v10, v8, a0
5268; CHECK-NEXT:    vmv.v.v v8, v10
5269; CHECK-NEXT:    ret
5270entry:
5271  %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.nxv8bf16.iXLen(
5272    <vscale x 8 x bfloat> undef,
5273    <vscale x 8 x bfloat> %0,
5274    iXLen %1,
5275    iXLen %2)
5276
5277  ret <vscale x 8 x bfloat> %a
5278}
5279
5280declare <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv8bf16.iXLen(
5281  <vscale x 8 x bfloat>,
5282  <vscale x 8 x bfloat>,
5283  iXLen,
5284  <vscale x 8 x i1>,
5285  iXLen,
5286  iXLen)
5287
5288define <vscale x 8 x bfloat> @intrinsic_vrgather_mask_vx_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
5289; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8bf16_nxv8bf16:
5290; CHECK:       # %bb.0: # %entry
5291; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
5292; CHECK-NEXT:    vrgather.vx v8, v10, a0, v0.t
5293; CHECK-NEXT:    ret
5294entry:
5295  %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv8bf16.iXLen(
5296    <vscale x 8 x bfloat> %0,
5297    <vscale x 8 x bfloat> %1,
5298    iXLen %2,
5299    <vscale x 8 x i1> %3,
5300    iXLen %4, iXLen 1)
5301
5302  ret <vscale x 8 x bfloat> %a
5303}
5304
5305declare <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.nxv16bf16.iXLen(
5306  <vscale x 16 x bfloat>,
5307  <vscale x 16 x bfloat>,
5308  iXLen,
5309  iXLen)
5310
5311define <vscale x 16 x bfloat> @intrinsic_vrgather_vx_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1, iXLen %2) nounwind {
5312; CHECK-LABEL: intrinsic_vrgather_vx_nxv16bf16_nxv16bf16:
5313; CHECK:       # %bb.0: # %entry
5314; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
5315; CHECK-NEXT:    vrgather.vx v12, v8, a0
5316; CHECK-NEXT:    vmv.v.v v8, v12
5317; CHECK-NEXT:    ret
5318entry:
5319  %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.nxv16bf16.iXLen(
5320    <vscale x 16 x bfloat> undef,
5321    <vscale x 16 x bfloat> %0,
5322    iXLen %1,
5323    iXLen %2)
5324
5325  ret <vscale x 16 x bfloat> %a
5326}
5327
5328declare <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv16bf16.iXLen(
5329  <vscale x 16 x bfloat>,
5330  <vscale x 16 x bfloat>,
5331  iXLen,
5332  <vscale x 16 x i1>,
5333  iXLen,
5334  iXLen)
5335
5336define <vscale x 16 x bfloat> @intrinsic_vrgather_mask_vx_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
5337; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16bf16_nxv16bf16:
5338; CHECK:       # %bb.0: # %entry
5339; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
5340; CHECK-NEXT:    vrgather.vx v8, v12, a0, v0.t
5341; CHECK-NEXT:    ret
5342entry:
5343  %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv16bf16.iXLen(
5344    <vscale x 16 x bfloat> %0,
5345    <vscale x 16 x bfloat> %1,
5346    iXLen %2,
5347    <vscale x 16 x i1> %3,
5348    iXLen %4, iXLen 1)
5349
5350  ret <vscale x 16 x bfloat> %a
5351}
5352
5353declare <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.nxv32bf16.iXLen(
5354  <vscale x 32 x bfloat>,
5355  <vscale x 32 x bfloat>,
5356  iXLen,
5357  iXLen)
5358
5359define <vscale x 32 x bfloat> @intrinsic_vrgather_vx_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1, iXLen %2) nounwind {
5360; CHECK-LABEL: intrinsic_vrgather_vx_nxv32bf16_nxv32bf16:
5361; CHECK:       # %bb.0: # %entry
5362; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
5363; CHECK-NEXT:    vrgather.vx v16, v8, a0
5364; CHECK-NEXT:    vmv.v.v v8, v16
5365; CHECK-NEXT:    ret
5366entry:
5367  %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.nxv32bf16.iXLen(
5368    <vscale x 32 x bfloat> undef,
5369    <vscale x 32 x bfloat> %0,
5370    iXLen %1,
5371    iXLen %2)
5372
5373  ret <vscale x 32 x bfloat> %a
5374}
5375
5376declare <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv32bf16.iXLen(
5377  <vscale x 32 x bfloat>,
5378  <vscale x 32 x bfloat>,
5379  iXLen,
5380  <vscale x 32 x i1>,
5381  iXLen,
5382  iXLen)
5383
5384define <vscale x 32 x bfloat> @intrinsic_vrgather_mask_vx_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
5385; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32bf16_nxv32bf16:
5386; CHECK:       # %bb.0: # %entry
5387; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
5388; CHECK-NEXT:    vrgather.vx v8, v16, a0, v0.t
5389; CHECK-NEXT:    ret
5390entry:
5391  %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv32bf16.iXLen(
5392    <vscale x 32 x bfloat> %0,
5393    <vscale x 32 x bfloat> %1,
5394    iXLen %2,
5395    <vscale x 32 x i1> %3,
5396    iXLen %4, iXLen 1)
5397
5398  ret <vscale x 32 x bfloat> %a
5399}
5400
5401define <vscale x 1 x bfloat> @intrinsic_vrgather_vi_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, iXLen %1) nounwind {
5402; CHECK-LABEL: intrinsic_vrgather_vi_nxv1bf16_nxv1bf16:
5403; CHECK:       # %bb.0: # %entry
5404; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
5405; CHECK-NEXT:    vrgather.vi v9, v8, 9
5406; CHECK-NEXT:    vmv1r.v v8, v9
5407; CHECK-NEXT:    ret
5408entry:
5409  %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.nxv1bf16.iXLen(
5410    <vscale x 1 x bfloat> undef,
5411    <vscale x 1 x bfloat> %0,
5412    iXLen 9,
5413    iXLen %1)
5414
5415  ret <vscale x 1 x bfloat> %a
5416}
5417
5418define <vscale x 1 x bfloat> @intrinsic_vrgather_mask_vi_nxv1bf16_nxv1bf16(<vscale x 1 x bfloat> %0, <vscale x 1 x bfloat> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
5419; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1bf16_nxv1bf16:
5420; CHECK:       # %bb.0: # %entry
5421; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
5422; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
5423; CHECK-NEXT:    ret
5424entry:
5425  %a = call <vscale x 1 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv1bf16.iXLen(
5426    <vscale x 1 x bfloat> %0,
5427    <vscale x 1 x bfloat> %1,
5428    iXLen 9,
5429    <vscale x 1 x i1> %2,
5430    iXLen %3, iXLen 1)
5431
5432  ret <vscale x 1 x bfloat> %a
5433}
5434
5435define <vscale x 2 x bfloat> @intrinsic_vrgather_vi_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, iXLen %1) nounwind {
5436; CHECK-LABEL: intrinsic_vrgather_vi_nxv2bf16_nxv2bf16:
5437; CHECK:       # %bb.0: # %entry
5438; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
5439; CHECK-NEXT:    vrgather.vi v9, v8, 9
5440; CHECK-NEXT:    vmv1r.v v8, v9
5441; CHECK-NEXT:    ret
5442entry:
5443  %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.nxv2bf16.iXLen(
5444    <vscale x 2 x bfloat> undef,
5445    <vscale x 2 x bfloat> %0,
5446    iXLen 9,
5447    iXLen %1)
5448
5449  ret <vscale x 2 x bfloat> %a
5450}
5451
5452define <vscale x 2 x bfloat> @intrinsic_vrgather_mask_vi_nxv2bf16_nxv2bf16(<vscale x 2 x bfloat> %0, <vscale x 2 x bfloat> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
5453; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2bf16_nxv2bf16:
5454; CHECK:       # %bb.0: # %entry
5455; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
5456; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
5457; CHECK-NEXT:    ret
5458entry:
5459  %a = call <vscale x 2 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv2bf16.iXLen(
5460    <vscale x 2 x bfloat> %0,
5461    <vscale x 2 x bfloat> %1,
5462    iXLen 9,
5463    <vscale x 2 x i1> %2,
5464    iXLen %3, iXLen 1)
5465
5466  ret <vscale x 2 x bfloat> %a
5467}
5468
5469define <vscale x 4 x bfloat> @intrinsic_vrgather_vi_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, iXLen %1) nounwind {
5470; CHECK-LABEL: intrinsic_vrgather_vi_nxv4bf16_nxv4bf16:
5471; CHECK:       # %bb.0: # %entry
5472; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
5473; CHECK-NEXT:    vrgather.vi v9, v8, 9
5474; CHECK-NEXT:    vmv.v.v v8, v9
5475; CHECK-NEXT:    ret
5476entry:
5477  %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.nxv4bf16.iXLen(
5478    <vscale x 4 x bfloat> undef,
5479    <vscale x 4 x bfloat> %0,
5480    iXLen 9,
5481    iXLen %1)
5482
5483  ret <vscale x 4 x bfloat> %a
5484}
5485
5486define <vscale x 4 x bfloat> @intrinsic_vrgather_mask_vi_nxv4bf16_nxv4bf16(<vscale x 4 x bfloat> %0, <vscale x 4 x bfloat> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
5487; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4bf16_nxv4bf16:
5488; CHECK:       # %bb.0: # %entry
5489; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
5490; CHECK-NEXT:    vrgather.vi v8, v9, 9, v0.t
5491; CHECK-NEXT:    ret
5492entry:
5493  %a = call <vscale x 4 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv4bf16.iXLen(
5494    <vscale x 4 x bfloat> %0,
5495    <vscale x 4 x bfloat> %1,
5496    iXLen 9,
5497    <vscale x 4 x i1> %2,
5498    iXLen %3, iXLen 1)
5499
5500  ret <vscale x 4 x bfloat> %a
5501}
5502
5503define <vscale x 8 x bfloat> @intrinsic_vrgather_vi_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, iXLen %1) nounwind {
5504; CHECK-LABEL: intrinsic_vrgather_vi_nxv8bf16_nxv8bf16:
5505; CHECK:       # %bb.0: # %entry
5506; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
5507; CHECK-NEXT:    vrgather.vi v10, v8, 9
5508; CHECK-NEXT:    vmv.v.v v8, v10
5509; CHECK-NEXT:    ret
5510entry:
5511  %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.nxv8bf16.iXLen(
5512    <vscale x 8 x bfloat> undef,
5513    <vscale x 8 x bfloat> %0,
5514    iXLen 9,
5515    iXLen %1)
5516
5517  ret <vscale x 8 x bfloat> %a
5518}
5519
5520define <vscale x 8 x bfloat> @intrinsic_vrgather_mask_vi_nxv8bf16_nxv8bf16(<vscale x 8 x bfloat> %0, <vscale x 8 x bfloat> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
5521; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8bf16_nxv8bf16:
5522; CHECK:       # %bb.0: # %entry
5523; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
5524; CHECK-NEXT:    vrgather.vi v8, v10, 9, v0.t
5525; CHECK-NEXT:    ret
5526entry:
5527  %a = call <vscale x 8 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv8bf16.iXLen(
5528    <vscale x 8 x bfloat> %0,
5529    <vscale x 8 x bfloat> %1,
5530    iXLen 9,
5531    <vscale x 8 x i1> %2,
5532    iXLen %3, iXLen 1)
5533
5534  ret <vscale x 8 x bfloat> %a
5535}
5536
5537define <vscale x 16 x bfloat> @intrinsic_vrgather_vi_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, iXLen %1) nounwind {
5538; CHECK-LABEL: intrinsic_vrgather_vi_nxv16bf16_nxv16bf16:
5539; CHECK:       # %bb.0: # %entry
5540; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
5541; CHECK-NEXT:    vrgather.vi v12, v8, 9
5542; CHECK-NEXT:    vmv.v.v v8, v12
5543; CHECK-NEXT:    ret
5544entry:
5545  %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.nxv16bf16.iXLen(
5546    <vscale x 16 x bfloat> undef,
5547    <vscale x 16 x bfloat> %0,
5548    iXLen 9,
5549    iXLen %1)
5550
5551  ret <vscale x 16 x bfloat> %a
5552}
5553
5554define <vscale x 16 x bfloat> @intrinsic_vrgather_mask_vi_nxv16bf16_nxv16bf16(<vscale x 16 x bfloat> %0, <vscale x 16 x bfloat> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
5555; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16bf16_nxv16bf16:
5556; CHECK:       # %bb.0: # %entry
5557; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
5558; CHECK-NEXT:    vrgather.vi v8, v12, 9, v0.t
5559; CHECK-NEXT:    ret
5560entry:
5561  %a = call <vscale x 16 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv16bf16.iXLen(
5562    <vscale x 16 x bfloat> %0,
5563    <vscale x 16 x bfloat> %1,
5564    iXLen 9,
5565    <vscale x 16 x i1> %2,
5566    iXLen %3, iXLen 1)
5567
5568  ret <vscale x 16 x bfloat> %a
5569}
5570
5571define <vscale x 32 x bfloat> @intrinsic_vrgather_vi_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, iXLen %1) nounwind {
5572; CHECK-LABEL: intrinsic_vrgather_vi_nxv32bf16_nxv32bf16:
5573; CHECK:       # %bb.0: # %entry
5574; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
5575; CHECK-NEXT:    vrgather.vi v16, v8, 9
5576; CHECK-NEXT:    vmv.v.v v8, v16
5577; CHECK-NEXT:    ret
5578entry:
5579  %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.nxv32bf16.iXLen(
5580    <vscale x 32 x bfloat> undef,
5581    <vscale x 32 x bfloat> %0,
5582    iXLen 9,
5583    iXLen %1)
5584
5585  ret <vscale x 32 x bfloat> %a
5586}
5587
5588define <vscale x 32 x bfloat> @intrinsic_vrgather_mask_vi_nxv32bf16_nxv32bf16(<vscale x 32 x bfloat> %0, <vscale x 32 x bfloat> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
5589; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32bf16_nxv32bf16:
5590; CHECK:       # %bb.0: # %entry
5591; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
5592; CHECK-NEXT:    vrgather.vi v8, v16, 9, v0.t
5593; CHECK-NEXT:    ret
5594entry:
5595  %a = call <vscale x 32 x bfloat> @llvm.riscv.vrgather.vx.mask.nxv32bf16.iXLen(
5596    <vscale x 32 x bfloat> %0,
5597    <vscale x 32 x bfloat> %1,
5598    iXLen 9,
5599    <vscale x 32 x i1> %2,
5600    iXLen %3, iXLen 1)
5601
5602  ret <vscale x 32 x bfloat> %a
5603}
5604
5605