xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vnsra.ll (revision f2bdc29f3e5dd4d8d65081094f8afc789d58706a)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3; RUN:   -verify-machineinstrs | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5; RUN:   -verify-machineinstrs | FileCheck %s
6
7declare <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8(
8  <vscale x 1 x i8>,
9  <vscale x 1 x i16>,
10  <vscale x 1 x i8>,
11  iXLen);
12
13define <vscale x 1 x i8> @intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
14; CHECK-LABEL: intrinsic_vnsra_wv_nxv1i8_nxv1i16_nxv1i8:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
17; CHECK-NEXT:    vnsra.wv v8, v8, v9
18; CHECK-NEXT:    ret
19entry:
20  %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16.nxv1i8(
21    <vscale x 1 x i8> undef,
22    <vscale x 1 x i16> %0,
23    <vscale x 1 x i8> %1,
24    iXLen %2)
25
26  ret <vscale x 1 x i8> %a
27}
28
29declare <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
30  <vscale x 1 x i8>,
31  <vscale x 1 x i16>,
32  <vscale x 1 x i8>,
33  <vscale x 1 x i1>,
34  iXLen,
35  iXLen);
36
37define <vscale x 1 x i8> @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
38; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8:
39; CHECK:       # %bb.0: # %entry
40; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
41; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
42; CHECK-NEXT:    ret
43entry:
44  %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8(
45    <vscale x 1 x i8> %0,
46    <vscale x 1 x i16> %1,
47    <vscale x 1 x i8> %2,
48    <vscale x 1 x i1> %3,
49    iXLen %4, iXLen 1)
50
51  ret <vscale x 1 x i8> %a
52}
53
54declare <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8(
55  <vscale x 2 x i8>,
56  <vscale x 2 x i16>,
57  <vscale x 2 x i8>,
58  iXLen);
59
60define <vscale x 2 x i8> @intrinsic_vnsra_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
61; CHECK-LABEL: intrinsic_vnsra_wv_nxv2i8_nxv2i16_nxv2i8:
62; CHECK:       # %bb.0: # %entry
63; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
64; CHECK-NEXT:    vnsra.wv v8, v8, v9
65; CHECK-NEXT:    ret
66entry:
67  %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16.nxv2i8(
68    <vscale x 2 x i8> undef,
69    <vscale x 2 x i16> %0,
70    <vscale x 2 x i8> %1,
71    iXLen %2)
72
73  ret <vscale x 2 x i8> %a
74}
75
76declare <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8(
77  <vscale x 2 x i8>,
78  <vscale x 2 x i16>,
79  <vscale x 2 x i8>,
80  <vscale x 2 x i1>,
81  iXLen,
82  iXLen);
83
84define <vscale x 2 x i8> @intrinsic_vnsra_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
85; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i8_nxv2i16_nxv2i8:
86; CHECK:       # %bb.0: # %entry
87; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
88; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
89; CHECK-NEXT:    ret
90entry:
91  %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8(
92    <vscale x 2 x i8> %0,
93    <vscale x 2 x i16> %1,
94    <vscale x 2 x i8> %2,
95    <vscale x 2 x i1> %3,
96    iXLen %4, iXLen 1)
97
98  ret <vscale x 2 x i8> %a
99}
100
101declare <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8(
102  <vscale x 4 x i8>,
103  <vscale x 4 x i16>,
104  <vscale x 4 x i8>,
105  iXLen);
106
107define <vscale x 4 x i8> @intrinsic_vnsra_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
108; CHECK-LABEL: intrinsic_vnsra_wv_nxv4i8_nxv4i16_nxv4i8:
109; CHECK:       # %bb.0: # %entry
110; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
111; CHECK-NEXT:    vnsra.wv v8, v8, v9
112; CHECK-NEXT:    ret
113entry:
114  %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16.nxv4i8(
115    <vscale x 4 x i8> undef,
116    <vscale x 4 x i16> %0,
117    <vscale x 4 x i8> %1,
118    iXLen %2)
119
120  ret <vscale x 4 x i8> %a
121}
122
123declare <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8(
124  <vscale x 4 x i8>,
125  <vscale x 4 x i16>,
126  <vscale x 4 x i8>,
127  <vscale x 4 x i1>,
128  iXLen,
129  iXLen);
130
131define <vscale x 4 x i8> @intrinsic_vnsra_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
132; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i8_nxv4i16_nxv4i8:
133; CHECK:       # %bb.0: # %entry
134; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
135; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
136; CHECK-NEXT:    ret
137entry:
138  %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8(
139    <vscale x 4 x i8> %0,
140    <vscale x 4 x i16> %1,
141    <vscale x 4 x i8> %2,
142    <vscale x 4 x i1> %3,
143    iXLen %4, iXLen 1)
144
145  ret <vscale x 4 x i8> %a
146}
147
148declare <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8(
149  <vscale x 8 x i8>,
150  <vscale x 8 x i16>,
151  <vscale x 8 x i8>,
152  iXLen);
153
154define <vscale x 8 x i8> @intrinsic_vnsra_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
155; CHECK-LABEL: intrinsic_vnsra_wv_nxv8i8_nxv8i16_nxv8i8:
156; CHECK:       # %bb.0: # %entry
157; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
158; CHECK-NEXT:    vnsra.wv v11, v8, v10
159; CHECK-NEXT:    vmv.v.v v8, v11
160; CHECK-NEXT:    ret
161entry:
162  %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8(
163    <vscale x 8 x i8> undef,
164    <vscale x 8 x i16> %0,
165    <vscale x 8 x i8> %1,
166    iXLen %2)
167
168  ret <vscale x 8 x i8> %a
169}
170
171declare <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8(
172  <vscale x 8 x i8>,
173  <vscale x 8 x i16>,
174  <vscale x 8 x i8>,
175  <vscale x 8 x i1>,
176  iXLen,
177  iXLen);
178
179define <vscale x 8 x i8> @intrinsic_vnsra_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
180; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i8_nxv8i16_nxv8i8:
181; CHECK:       # %bb.0: # %entry
182; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
183; CHECK-NEXT:    vnsra.wv v8, v10, v9, v0.t
184; CHECK-NEXT:    ret
185entry:
186  %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8(
187    <vscale x 8 x i8> %0,
188    <vscale x 8 x i16> %1,
189    <vscale x 8 x i8> %2,
190    <vscale x 8 x i1> %3,
191    iXLen %4, iXLen 1)
192
193  ret <vscale x 8 x i8> %a
194}
195
196declare <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8(
197  <vscale x 16 x i8>,
198  <vscale x 16 x i16>,
199  <vscale x 16 x i8>,
200  iXLen);
201
202define <vscale x 16 x i8> @intrinsic_vnsra_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
203; CHECK-LABEL: intrinsic_vnsra_wv_nxv16i8_nxv16i16_nxv16i8:
204; CHECK:       # %bb.0: # %entry
205; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
206; CHECK-NEXT:    vnsra.wv v14, v8, v12
207; CHECK-NEXT:    vmv.v.v v8, v14
208; CHECK-NEXT:    ret
209entry:
210  %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8(
211    <vscale x 16 x i8> undef,
212    <vscale x 16 x i16> %0,
213    <vscale x 16 x i8> %1,
214    iXLen %2)
215
216  ret <vscale x 16 x i8> %a
217}
218
219declare <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8(
220  <vscale x 16 x i8>,
221  <vscale x 16 x i16>,
222  <vscale x 16 x i8>,
223  <vscale x 16 x i1>,
224  iXLen,
225  iXLen);
226
227define <vscale x 16 x i8> @intrinsic_vnsra_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
228; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv16i8_nxv16i16_nxv16i8:
229; CHECK:       # %bb.0: # %entry
230; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
231; CHECK-NEXT:    vnsra.wv v8, v12, v10, v0.t
232; CHECK-NEXT:    ret
233entry:
234  %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8(
235    <vscale x 16 x i8> %0,
236    <vscale x 16 x i16> %1,
237    <vscale x 16 x i8> %2,
238    <vscale x 16 x i1> %3,
239    iXLen %4, iXLen 1)
240
241  ret <vscale x 16 x i8> %a
242}
243
244declare <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8(
245  <vscale x 32 x i8>,
246  <vscale x 32 x i16>,
247  <vscale x 32 x i8>,
248  iXLen);
249
250define <vscale x 32 x i8> @intrinsic_vnsra_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
251; CHECK-LABEL: intrinsic_vnsra_wv_nxv32i8_nxv32i16_nxv32i8:
252; CHECK:       # %bb.0: # %entry
253; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
254; CHECK-NEXT:    vnsra.wv v20, v8, v16
255; CHECK-NEXT:    vmv.v.v v8, v20
256; CHECK-NEXT:    ret
257entry:
258  %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8(
259    <vscale x 32 x i8> undef,
260    <vscale x 32 x i16> %0,
261    <vscale x 32 x i8> %1,
262    iXLen %2)
263
264  ret <vscale x 32 x i8> %a
265}
266
267declare <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8(
268  <vscale x 32 x i8>,
269  <vscale x 32 x i16>,
270  <vscale x 32 x i8>,
271  <vscale x 32 x i1>,
272  iXLen,
273  iXLen);
274
275define <vscale x 32 x i8> @intrinsic_vnsra_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
276; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv32i8_nxv32i16_nxv32i8:
277; CHECK:       # %bb.0: # %entry
278; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
279; CHECK-NEXT:    vnsra.wv v8, v16, v12, v0.t
280; CHECK-NEXT:    ret
281entry:
282  %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8(
283    <vscale x 32 x i8> %0,
284    <vscale x 32 x i16> %1,
285    <vscale x 32 x i8> %2,
286    <vscale x 32 x i1> %3,
287    iXLen %4, iXLen 1)
288
289  ret <vscale x 32 x i8> %a
290}
291
292declare <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16(
293  <vscale x 1 x i16>,
294  <vscale x 1 x i32>,
295  <vscale x 1 x i16>,
296  iXLen);
297
298define <vscale x 1 x i16> @intrinsic_vnsra_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
299; CHECK-LABEL: intrinsic_vnsra_wv_nxv1i16_nxv1i32_nxv1i16:
300; CHECK:       # %bb.0: # %entry
301; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
302; CHECK-NEXT:    vnsra.wv v8, v8, v9
303; CHECK-NEXT:    ret
304entry:
305  %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32.nxv1i16(
306    <vscale x 1 x i16> undef,
307    <vscale x 1 x i32> %0,
308    <vscale x 1 x i16> %1,
309    iXLen %2)
310
311  ret <vscale x 1 x i16> %a
312}
313
314declare <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16(
315  <vscale x 1 x i16>,
316  <vscale x 1 x i32>,
317  <vscale x 1 x i16>,
318  <vscale x 1 x i1>,
319  iXLen,
320  iXLen);
321
322define <vscale x 1 x i16> @intrinsic_vnsra_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
323; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i16_nxv1i32_nxv1i16:
324; CHECK:       # %bb.0: # %entry
325; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
326; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
327; CHECK-NEXT:    ret
328entry:
329  %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16(
330    <vscale x 1 x i16> %0,
331    <vscale x 1 x i32> %1,
332    <vscale x 1 x i16> %2,
333    <vscale x 1 x i1> %3,
334    iXLen %4, iXLen 1)
335
336  ret <vscale x 1 x i16> %a
337}
338
339declare <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16(
340  <vscale x 2 x i16>,
341  <vscale x 2 x i32>,
342  <vscale x 2 x i16>,
343  iXLen);
344
345define <vscale x 2 x i16> @intrinsic_vnsra_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
346; CHECK-LABEL: intrinsic_vnsra_wv_nxv2i16_nxv2i32_nxv2i16:
347; CHECK:       # %bb.0: # %entry
348; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
349; CHECK-NEXT:    vnsra.wv v8, v8, v9
350; CHECK-NEXT:    ret
351entry:
352  %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32.nxv2i16(
353    <vscale x 2 x i16> undef,
354    <vscale x 2 x i32> %0,
355    <vscale x 2 x i16> %1,
356    iXLen %2)
357
358  ret <vscale x 2 x i16> %a
359}
360
361declare <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16(
362  <vscale x 2 x i16>,
363  <vscale x 2 x i32>,
364  <vscale x 2 x i16>,
365  <vscale x 2 x i1>,
366  iXLen,
367  iXLen);
368
369define <vscale x 2 x i16> @intrinsic_vnsra_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
370; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i16_nxv2i32_nxv2i16:
371; CHECK:       # %bb.0: # %entry
372; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
373; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
374; CHECK-NEXT:    ret
375entry:
376  %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16(
377    <vscale x 2 x i16> %0,
378    <vscale x 2 x i32> %1,
379    <vscale x 2 x i16> %2,
380    <vscale x 2 x i1> %3,
381    iXLen %4, iXLen 1)
382
383  ret <vscale x 2 x i16> %a
384}
385
386declare <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16(
387  <vscale x 4 x i16>,
388  <vscale x 4 x i32>,
389  <vscale x 4 x i16>,
390  iXLen);
391
392define <vscale x 4 x i16> @intrinsic_vnsra_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
393; CHECK-LABEL: intrinsic_vnsra_wv_nxv4i16_nxv4i32_nxv4i16:
394; CHECK:       # %bb.0: # %entry
395; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
396; CHECK-NEXT:    vnsra.wv v11, v8, v10
397; CHECK-NEXT:    vmv.v.v v8, v11
398; CHECK-NEXT:    ret
399entry:
400  %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16(
401    <vscale x 4 x i16> undef,
402    <vscale x 4 x i32> %0,
403    <vscale x 4 x i16> %1,
404    iXLen %2)
405
406  ret <vscale x 4 x i16> %a
407}
408
409declare <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16(
410  <vscale x 4 x i16>,
411  <vscale x 4 x i32>,
412  <vscale x 4 x i16>,
413  <vscale x 4 x i1>,
414  iXLen,
415  iXLen);
416
417define <vscale x 4 x i16> @intrinsic_vnsra_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
418; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i16_nxv4i32_nxv4i16:
419; CHECK:       # %bb.0: # %entry
420; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
421; CHECK-NEXT:    vnsra.wv v8, v10, v9, v0.t
422; CHECK-NEXT:    ret
423entry:
424  %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16(
425    <vscale x 4 x i16> %0,
426    <vscale x 4 x i32> %1,
427    <vscale x 4 x i16> %2,
428    <vscale x 4 x i1> %3,
429    iXLen %4, iXLen 1)
430
431  ret <vscale x 4 x i16> %a
432}
433
434declare <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16(
435  <vscale x 8 x i16>,
436  <vscale x 8 x i32>,
437  <vscale x 8 x i16>,
438  iXLen);
439
440define <vscale x 8 x i16> @intrinsic_vnsra_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
441; CHECK-LABEL: intrinsic_vnsra_wv_nxv8i16_nxv8i32_nxv8i16:
442; CHECK:       # %bb.0: # %entry
443; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
444; CHECK-NEXT:    vnsra.wv v14, v8, v12
445; CHECK-NEXT:    vmv.v.v v8, v14
446; CHECK-NEXT:    ret
447entry:
448  %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16(
449    <vscale x 8 x i16> undef,
450    <vscale x 8 x i32> %0,
451    <vscale x 8 x i16> %1,
452    iXLen %2)
453
454  ret <vscale x 8 x i16> %a
455}
456
457declare <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16(
458  <vscale x 8 x i16>,
459  <vscale x 8 x i32>,
460  <vscale x 8 x i16>,
461  <vscale x 8 x i1>,
462  iXLen,
463  iXLen);
464
465define <vscale x 8 x i16> @intrinsic_vnsra_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
466; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i16_nxv8i32_nxv8i16:
467; CHECK:       # %bb.0: # %entry
468; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
469; CHECK-NEXT:    vnsra.wv v8, v12, v10, v0.t
470; CHECK-NEXT:    ret
471entry:
472  %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16(
473    <vscale x 8 x i16> %0,
474    <vscale x 8 x i32> %1,
475    <vscale x 8 x i16> %2,
476    <vscale x 8 x i1> %3,
477    iXLen %4, iXLen 1)
478
479  ret <vscale x 8 x i16> %a
480}
481
482declare <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16(
483  <vscale x 16 x i16>,
484  <vscale x 16 x i32>,
485  <vscale x 16 x i16>,
486  iXLen);
487
488define <vscale x 16 x i16> @intrinsic_vnsra_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
489; CHECK-LABEL: intrinsic_vnsra_wv_nxv16i16_nxv16i32_nxv16i16:
490; CHECK:       # %bb.0: # %entry
491; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
492; CHECK-NEXT:    vnsra.wv v20, v8, v16
493; CHECK-NEXT:    vmv.v.v v8, v20
494; CHECK-NEXT:    ret
495entry:
496  %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16(
497    <vscale x 16 x i16> undef,
498    <vscale x 16 x i32> %0,
499    <vscale x 16 x i16> %1,
500    iXLen %2)
501
502  ret <vscale x 16 x i16> %a
503}
504
505declare <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16(
506  <vscale x 16 x i16>,
507  <vscale x 16 x i32>,
508  <vscale x 16 x i16>,
509  <vscale x 16 x i1>,
510  iXLen,
511  iXLen);
512
513define <vscale x 16 x i16> @intrinsic_vnsra_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
514; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv16i16_nxv16i32_nxv16i16:
515; CHECK:       # %bb.0: # %entry
516; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
517; CHECK-NEXT:    vnsra.wv v8, v16, v12, v0.t
518; CHECK-NEXT:    ret
519entry:
520  %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16(
521    <vscale x 16 x i16> %0,
522    <vscale x 16 x i32> %1,
523    <vscale x 16 x i16> %2,
524    <vscale x 16 x i1> %3,
525    iXLen %4, iXLen 1)
526
527  ret <vscale x 16 x i16> %a
528}
529
530declare <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32(
531  <vscale x 1 x i32>,
532  <vscale x 1 x i64>,
533  <vscale x 1 x i32>,
534  iXLen);
535
536define <vscale x 1 x i32> @intrinsic_vnsra_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
537; CHECK-LABEL: intrinsic_vnsra_wv_nxv1i32_nxv1i64_nxv1i32:
538; CHECK:       # %bb.0: # %entry
539; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
540; CHECK-NEXT:    vnsra.wv v8, v8, v9
541; CHECK-NEXT:    ret
542entry:
543  %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64.nxv1i32(
544    <vscale x 1 x i32> undef,
545    <vscale x 1 x i64> %0,
546    <vscale x 1 x i32> %1,
547    iXLen %2)
548
549  ret <vscale x 1 x i32> %a
550}
551
552declare <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32(
553  <vscale x 1 x i32>,
554  <vscale x 1 x i64>,
555  <vscale x 1 x i32>,
556  <vscale x 1 x i1>,
557  iXLen,
558  iXLen);
559
560define <vscale x 1 x i32> @intrinsic_vnsra_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
561; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i32_nxv1i64_nxv1i32:
562; CHECK:       # %bb.0: # %entry
563; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
564; CHECK-NEXT:    vnsra.wv v8, v9, v10, v0.t
565; CHECK-NEXT:    ret
566entry:
567  %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32(
568    <vscale x 1 x i32> %0,
569    <vscale x 1 x i64> %1,
570    <vscale x 1 x i32> %2,
571    <vscale x 1 x i1> %3,
572    iXLen %4, iXLen 1)
573
574  ret <vscale x 1 x i32> %a
575}
576
577declare <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32(
578  <vscale x 2 x i32>,
579  <vscale x 2 x i64>,
580  <vscale x 2 x i32>,
581  iXLen);
582
583define <vscale x 2 x i32> @intrinsic_vnsra_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
584; CHECK-LABEL: intrinsic_vnsra_wv_nxv2i32_nxv2i64_nxv2i32:
585; CHECK:       # %bb.0: # %entry
586; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
587; CHECK-NEXT:    vnsra.wv v11, v8, v10
588; CHECK-NEXT:    vmv.v.v v8, v11
589; CHECK-NEXT:    ret
590entry:
591  %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32(
592    <vscale x 2 x i32> undef,
593    <vscale x 2 x i64> %0,
594    <vscale x 2 x i32> %1,
595    iXLen %2)
596
597  ret <vscale x 2 x i32> %a
598}
599
600declare <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32(
601  <vscale x 2 x i32>,
602  <vscale x 2 x i64>,
603  <vscale x 2 x i32>,
604  <vscale x 2 x i1>,
605  iXLen,
606  iXLen);
607
608define <vscale x 2 x i32> @intrinsic_vnsra_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
609; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv2i32_nxv2i64_nxv2i32:
610; CHECK:       # %bb.0: # %entry
611; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
612; CHECK-NEXT:    vnsra.wv v8, v10, v9, v0.t
613; CHECK-NEXT:    ret
614entry:
615  %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32(
616    <vscale x 2 x i32> %0,
617    <vscale x 2 x i64> %1,
618    <vscale x 2 x i32> %2,
619    <vscale x 2 x i1> %3,
620    iXLen %4, iXLen 1)
621
622  ret <vscale x 2 x i32> %a
623}
624
625declare <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32(
626  <vscale x 4 x i32>,
627  <vscale x 4 x i64>,
628  <vscale x 4 x i32>,
629  iXLen);
630
631define <vscale x 4 x i32> @intrinsic_vnsra_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
632; CHECK-LABEL: intrinsic_vnsra_wv_nxv4i32_nxv4i64_nxv4i32:
633; CHECK:       # %bb.0: # %entry
634; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
635; CHECK-NEXT:    vnsra.wv v14, v8, v12
636; CHECK-NEXT:    vmv.v.v v8, v14
637; CHECK-NEXT:    ret
638entry:
639  %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32(
640    <vscale x 4 x i32> undef,
641    <vscale x 4 x i64> %0,
642    <vscale x 4 x i32> %1,
643    iXLen %2)
644
645  ret <vscale x 4 x i32> %a
646}
647
648declare <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32(
649  <vscale x 4 x i32>,
650  <vscale x 4 x i64>,
651  <vscale x 4 x i32>,
652  <vscale x 4 x i1>,
653  iXLen,
654  iXLen);
655
656define <vscale x 4 x i32> @intrinsic_vnsra_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
657; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv4i32_nxv4i64_nxv4i32:
658; CHECK:       # %bb.0: # %entry
659; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
660; CHECK-NEXT:    vnsra.wv v8, v12, v10, v0.t
661; CHECK-NEXT:    ret
662entry:
663  %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32(
664    <vscale x 4 x i32> %0,
665    <vscale x 4 x i64> %1,
666    <vscale x 4 x i32> %2,
667    <vscale x 4 x i1> %3,
668    iXLen %4, iXLen 1)
669
670  ret <vscale x 4 x i32> %a
671}
672
673declare <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32(
674  <vscale x 8 x i32>,
675  <vscale x 8 x i64>,
676  <vscale x 8 x i32>,
677  iXLen);
678
679define <vscale x 8 x i32> @intrinsic_vnsra_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
680; CHECK-LABEL: intrinsic_vnsra_wv_nxv8i32_nxv8i64_nxv8i32:
681; CHECK:       # %bb.0: # %entry
682; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
683; CHECK-NEXT:    vnsra.wv v20, v8, v16
684; CHECK-NEXT:    vmv.v.v v8, v20
685; CHECK-NEXT:    ret
686entry:
687  %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32(
688    <vscale x 8 x i32> undef,
689    <vscale x 8 x i64> %0,
690    <vscale x 8 x i32> %1,
691    iXLen %2)
692
693  ret <vscale x 8 x i32> %a
694}
695
696declare <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32(
697  <vscale x 8 x i32>,
698  <vscale x 8 x i64>,
699  <vscale x 8 x i32>,
700  <vscale x 8 x i1>,
701  iXLen,
702  iXLen);
703
704define <vscale x 8 x i32> @intrinsic_vnsra_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
705; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv8i32_nxv8i64_nxv8i32:
706; CHECK:       # %bb.0: # %entry
707; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
708; CHECK-NEXT:    vnsra.wv v8, v16, v12, v0.t
709; CHECK-NEXT:    ret
710entry:
711  %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32(
712    <vscale x 8 x i32> %0,
713    <vscale x 8 x i64> %1,
714    <vscale x 8 x i32> %2,
715    <vscale x 8 x i1> %3,
716    iXLen %4, iXLen 1)
717
718  ret <vscale x 8 x i32> %a
719}
720
721declare <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16(
722  <vscale x 1 x i8>,
723  <vscale x 1 x i16>,
724  iXLen,
725  iXLen);
726
727define <vscale x 1 x i8> @intrinsic_vnsra_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %0, iXLen %1, iXLen %2) nounwind {
728; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i8_nxv1i16:
729; CHECK:       # %bb.0: # %entry
730; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
731; CHECK-NEXT:    vnsra.wx v8, v8, a0
732; CHECK-NEXT:    ret
733entry:
734  %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16(
735    <vscale x 1 x i8> undef,
736    <vscale x 1 x i16> %0,
737    iXLen %1,
738    iXLen %2)
739
740  ret <vscale x 1 x i8> %a
741}
742
743declare <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16(
744  <vscale x 1 x i8>,
745  <vscale x 1 x i16>,
746  iXLen,
747  <vscale x 1 x i1>,
748  iXLen,
749  iXLen);
750
751define <vscale x 1 x i8> @intrinsic_vnsra_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
752; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i8_nxv1i16:
753; CHECK:       # %bb.0: # %entry
754; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
755; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
756; CHECK-NEXT:    ret
757entry:
758  %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16(
759    <vscale x 1 x i8> %0,
760    <vscale x 1 x i16> %1,
761    iXLen %2,
762    <vscale x 1 x i1> %3,
763    iXLen %4, iXLen 1)
764
765  ret <vscale x 1 x i8> %a
766}
767
768declare <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16(
769  <vscale x 2 x i8>,
770  <vscale x 2 x i16>,
771  iXLen,
772  iXLen);
773
774define <vscale x 2 x i8> @intrinsic_vnsra_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %0, iXLen %1, iXLen %2) nounwind {
775; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i8_nxv2i16:
776; CHECK:       # %bb.0: # %entry
777; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
778; CHECK-NEXT:    vnsra.wx v8, v8, a0
779; CHECK-NEXT:    ret
780entry:
781  %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16(
782    <vscale x 2 x i8> undef,
783    <vscale x 2 x i16> %0,
784    iXLen %1,
785    iXLen %2)
786
787  ret <vscale x 2 x i8> %a
788}
789
790declare <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16(
791  <vscale x 2 x i8>,
792  <vscale x 2 x i16>,
793  iXLen,
794  <vscale x 2 x i1>,
795  iXLen,
796  iXLen);
797
798define <vscale x 2 x i8> @intrinsic_vnsra_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
799; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i8_nxv2i16:
800; CHECK:       # %bb.0: # %entry
801; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
802; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
803; CHECK-NEXT:    ret
804entry:
805  %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16(
806    <vscale x 2 x i8> %0,
807    <vscale x 2 x i16> %1,
808    iXLen %2,
809    <vscale x 2 x i1> %3,
810    iXLen %4, iXLen 1)
811
812  ret <vscale x 2 x i8> %a
813}
814
815declare <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16(
816  <vscale x 4 x i8>,
817  <vscale x 4 x i16>,
818  iXLen,
819  iXLen);
820
821define <vscale x 4 x i8> @intrinsic_vnsra_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, iXLen %1, iXLen %2) nounwind {
822; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i8_nxv4i16:
823; CHECK:       # %bb.0: # %entry
824; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
825; CHECK-NEXT:    vnsra.wx v8, v8, a0
826; CHECK-NEXT:    ret
827entry:
828  %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16(
829    <vscale x 4 x i8> undef,
830    <vscale x 4 x i16> %0,
831    iXLen %1,
832    iXLen %2)
833
834  ret <vscale x 4 x i8> %a
835}
836
837declare <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16(
838  <vscale x 4 x i8>,
839  <vscale x 4 x i16>,
840  iXLen,
841  <vscale x 4 x i1>,
842  iXLen,
843  iXLen);
844
845define <vscale x 4 x i8> @intrinsic_vnsra_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
846; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i8_nxv4i16:
847; CHECK:       # %bb.0: # %entry
848; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
849; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
850; CHECK-NEXT:    ret
851entry:
852  %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16(
853    <vscale x 4 x i8> %0,
854    <vscale x 4 x i16> %1,
855    iXLen %2,
856    <vscale x 4 x i1> %3,
857    iXLen %4, iXLen 1)
858
859  ret <vscale x 4 x i8> %a
860}
861
862declare <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16(
863  <vscale x 8 x i8>,
864  <vscale x 8 x i16>,
865  iXLen,
866  iXLen);
867
868define <vscale x 8 x i8> @intrinsic_vnsra_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %0, iXLen %1, iXLen %2) nounwind {
869; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i8_nxv8i16:
870; CHECK:       # %bb.0: # %entry
871; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
872; CHECK-NEXT:    vnsra.wx v10, v8, a0
873; CHECK-NEXT:    vmv.v.v v8, v10
874; CHECK-NEXT:    ret
875entry:
876  %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16(
877    <vscale x 8 x i8> undef,
878    <vscale x 8 x i16> %0,
879    iXLen %1,
880    iXLen %2)
881
882  ret <vscale x 8 x i8> %a
883}
884
885declare <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16(
886  <vscale x 8 x i8>,
887  <vscale x 8 x i16>,
888  iXLen,
889  <vscale x 8 x i1>,
890  iXLen,
891  iXLen);
892
893define <vscale x 8 x i8> @intrinsic_vnsra_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
894; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i8_nxv8i16:
895; CHECK:       # %bb.0: # %entry
896; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
897; CHECK-NEXT:    vnsra.wx v8, v10, a0, v0.t
898; CHECK-NEXT:    ret
899entry:
900  %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16(
901    <vscale x 8 x i8> %0,
902    <vscale x 8 x i16> %1,
903    iXLen %2,
904    <vscale x 8 x i1> %3,
905    iXLen %4, iXLen 1)
906
907  ret <vscale x 8 x i8> %a
908}
909
910declare <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16(
911  <vscale x 16 x i8>,
912  <vscale x 16 x i16>,
913  iXLen,
914  iXLen);
915
916define <vscale x 16 x i8> @intrinsic_vnsra_vx_nxv16i8_nxv16i16(<vscale x 16 x i16> %0, iXLen %1, iXLen %2) nounwind {
917; CHECK-LABEL: intrinsic_vnsra_vx_nxv16i8_nxv16i16:
918; CHECK:       # %bb.0: # %entry
919; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
920; CHECK-NEXT:    vnsra.wx v12, v8, a0
921; CHECK-NEXT:    vmv.v.v v8, v12
922; CHECK-NEXT:    ret
923entry:
924  %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16(
925    <vscale x 16 x i8> undef,
926    <vscale x 16 x i16> %0,
927    iXLen %1,
928    iXLen %2)
929
930  ret <vscale x 16 x i8> %a
931}
932
933declare <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16(
934  <vscale x 16 x i8>,
935  <vscale x 16 x i16>,
936  iXLen,
937  <vscale x 16 x i1>,
938  iXLen,
939  iXLen);
940
941define <vscale x 16 x i8> @intrinsic_vnsra_mask_vx_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
942; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv16i8_nxv16i16:
943; CHECK:       # %bb.0: # %entry
944; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
945; CHECK-NEXT:    vnsra.wx v8, v12, a0, v0.t
946; CHECK-NEXT:    ret
947entry:
948  %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16(
949    <vscale x 16 x i8> %0,
950    <vscale x 16 x i16> %1,
951    iXLen %2,
952    <vscale x 16 x i1> %3,
953    iXLen %4, iXLen 1)
954
955  ret <vscale x 16 x i8> %a
956}
957
958declare <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16(
959  <vscale x 32 x i8>,
960  <vscale x 32 x i16>,
961  iXLen,
962  iXLen);
963
964define <vscale x 32 x i8> @intrinsic_vnsra_vx_nxv32i8_nxv32i16(<vscale x 32 x i16> %0, iXLen %1, iXLen %2) nounwind {
965; CHECK-LABEL: intrinsic_vnsra_vx_nxv32i8_nxv32i16:
966; CHECK:       # %bb.0: # %entry
967; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
968; CHECK-NEXT:    vnsra.wx v16, v8, a0
969; CHECK-NEXT:    vmv.v.v v8, v16
970; CHECK-NEXT:    ret
971entry:
972  %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16(
973    <vscale x 32 x i8> undef,
974    <vscale x 32 x i16> %0,
975    iXLen %1,
976    iXLen %2)
977
978  ret <vscale x 32 x i8> %a
979}
980
981declare <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16(
982  <vscale x 32 x i8>,
983  <vscale x 32 x i16>,
984  iXLen,
985  <vscale x 32 x i1>,
986  iXLen,
987  iXLen);
988
989define <vscale x 32 x i8> @intrinsic_vnsra_mask_vx_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
990; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv32i8_nxv32i16:
991; CHECK:       # %bb.0: # %entry
992; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
993; CHECK-NEXT:    vnsra.wx v8, v16, a0, v0.t
994; CHECK-NEXT:    ret
995entry:
996  %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16(
997    <vscale x 32 x i8> %0,
998    <vscale x 32 x i16> %1,
999    iXLen %2,
1000    <vscale x 32 x i1> %3,
1001    iXLen %4, iXLen 1)
1002
1003  ret <vscale x 32 x i8> %a
1004}
1005
1006declare <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32(
1007  <vscale x 1 x i16>,
1008  <vscale x 1 x i32>,
1009  iXLen,
1010  iXLen);
1011
1012define <vscale x 1 x i16> @intrinsic_vnsra_vx_nxv1i16_nxv1i32(<vscale x 1 x i32> %0, iXLen %1, iXLen %2) nounwind {
1013; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i16_nxv1i32:
1014; CHECK:       # %bb.0: # %entry
1015; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1016; CHECK-NEXT:    vnsra.wx v8, v8, a0
1017; CHECK-NEXT:    ret
1018entry:
1019  %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32(
1020    <vscale x 1 x i16> undef,
1021    <vscale x 1 x i32> %0,
1022    iXLen %1,
1023    iXLen %2)
1024
1025  ret <vscale x 1 x i16> %a
1026}
1027
1028declare <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32(
1029  <vscale x 1 x i16>,
1030  <vscale x 1 x i32>,
1031  iXLen,
1032  <vscale x 1 x i1>,
1033  iXLen,
1034  iXLen);
1035
1036define <vscale x 1 x i16> @intrinsic_vnsra_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1037; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i16_nxv1i32:
1038; CHECK:       # %bb.0: # %entry
1039; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1040; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
1041; CHECK-NEXT:    ret
1042entry:
1043  %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32(
1044    <vscale x 1 x i16> %0,
1045    <vscale x 1 x i32> %1,
1046    iXLen %2,
1047    <vscale x 1 x i1> %3,
1048    iXLen %4, iXLen 1)
1049
1050  ret <vscale x 1 x i16> %a
1051}
1052
1053declare <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32(
1054  <vscale x 2 x i16>,
1055  <vscale x 2 x i32>,
1056  iXLen,
1057  iXLen);
1058
1059define <vscale x 2 x i16> @intrinsic_vnsra_vx_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, iXLen %1, iXLen %2) nounwind {
1060; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i16_nxv2i32:
1061; CHECK:       # %bb.0: # %entry
1062; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1063; CHECK-NEXT:    vnsra.wx v8, v8, a0
1064; CHECK-NEXT:    ret
1065entry:
1066  %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32(
1067    <vscale x 2 x i16> undef,
1068    <vscale x 2 x i32> %0,
1069    iXLen %1,
1070    iXLen %2)
1071
1072  ret <vscale x 2 x i16> %a
1073}
1074
1075declare <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32(
1076  <vscale x 2 x i16>,
1077  <vscale x 2 x i32>,
1078  iXLen,
1079  <vscale x 2 x i1>,
1080  iXLen,
1081  iXLen);
1082
1083define <vscale x 2 x i16> @intrinsic_vnsra_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1084; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i16_nxv2i32:
1085; CHECK:       # %bb.0: # %entry
1086; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1087; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
1088; CHECK-NEXT:    ret
1089entry:
1090  %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32(
1091    <vscale x 2 x i16> %0,
1092    <vscale x 2 x i32> %1,
1093    iXLen %2,
1094    <vscale x 2 x i1> %3,
1095    iXLen %4, iXLen 1)
1096
1097  ret <vscale x 2 x i16> %a
1098}
1099
1100declare <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32(
1101  <vscale x 4 x i16>,
1102  <vscale x 4 x i32>,
1103  iXLen,
1104  iXLen);
1105
1106define <vscale x 4 x i16> @intrinsic_vnsra_vx_nxv4i16_nxv4i32(<vscale x 4 x i32> %0, iXLen %1, iXLen %2) nounwind {
1107; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i16_nxv4i32:
1108; CHECK:       # %bb.0: # %entry
1109; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1110; CHECK-NEXT:    vnsra.wx v10, v8, a0
1111; CHECK-NEXT:    vmv.v.v v8, v10
1112; CHECK-NEXT:    ret
1113entry:
1114  %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32(
1115    <vscale x 4 x i16> undef,
1116    <vscale x 4 x i32> %0,
1117    iXLen %1,
1118    iXLen %2)
1119
1120  ret <vscale x 4 x i16> %a
1121}
1122
1123declare <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32(
1124  <vscale x 4 x i16>,
1125  <vscale x 4 x i32>,
1126  iXLen,
1127  <vscale x 4 x i1>,
1128  iXLen,
1129  iXLen);
1130
1131define <vscale x 4 x i16> @intrinsic_vnsra_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1132; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i16_nxv4i32:
1133; CHECK:       # %bb.0: # %entry
1134; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1135; CHECK-NEXT:    vnsra.wx v8, v10, a0, v0.t
1136; CHECK-NEXT:    ret
1137entry:
1138  %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32(
1139    <vscale x 4 x i16> %0,
1140    <vscale x 4 x i32> %1,
1141    iXLen %2,
1142    <vscale x 4 x i1> %3,
1143    iXLen %4, iXLen 1)
1144
1145  ret <vscale x 4 x i16> %a
1146}
1147
1148declare <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32(
1149  <vscale x 8 x i16>,
1150  <vscale x 8 x i32>,
1151  iXLen,
1152  iXLen);
1153
1154define <vscale x 8 x i16> @intrinsic_vnsra_vx_nxv8i16_nxv8i32(<vscale x 8 x i32> %0, iXLen %1, iXLen %2) nounwind {
1155; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i16_nxv8i32:
1156; CHECK:       # %bb.0: # %entry
1157; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1158; CHECK-NEXT:    vnsra.wx v12, v8, a0
1159; CHECK-NEXT:    vmv.v.v v8, v12
1160; CHECK-NEXT:    ret
1161entry:
1162  %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32(
1163    <vscale x 8 x i16> undef,
1164    <vscale x 8 x i32> %0,
1165    iXLen %1,
1166    iXLen %2)
1167
1168  ret <vscale x 8 x i16> %a
1169}
1170
1171declare <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32(
1172  <vscale x 8 x i16>,
1173  <vscale x 8 x i32>,
1174  iXLen,
1175  <vscale x 8 x i1>,
1176  iXLen,
1177  iXLen);
1178
1179define <vscale x 8 x i16> @intrinsic_vnsra_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1180; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i16_nxv8i32:
1181; CHECK:       # %bb.0: # %entry
1182; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1183; CHECK-NEXT:    vnsra.wx v8, v12, a0, v0.t
1184; CHECK-NEXT:    ret
1185entry:
1186  %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32(
1187    <vscale x 8 x i16> %0,
1188    <vscale x 8 x i32> %1,
1189    iXLen %2,
1190    <vscale x 8 x i1> %3,
1191    iXLen %4, iXLen 1)
1192
1193  ret <vscale x 8 x i16> %a
1194}
1195
1196declare <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32(
1197  <vscale x 16 x i16>,
1198  <vscale x 16 x i32>,
1199  iXLen,
1200  iXLen);
1201
1202define <vscale x 16 x i16> @intrinsic_vnsra_vx_nxv16i16_nxv16i32(<vscale x 16 x i32> %0, iXLen %1, iXLen %2) nounwind {
1203; CHECK-LABEL: intrinsic_vnsra_vx_nxv16i16_nxv16i32:
1204; CHECK:       # %bb.0: # %entry
1205; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
1206; CHECK-NEXT:    vnsra.wx v16, v8, a0
1207; CHECK-NEXT:    vmv.v.v v8, v16
1208; CHECK-NEXT:    ret
1209entry:
1210  %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32(
1211    <vscale x 16 x i16> undef,
1212    <vscale x 16 x i32> %0,
1213    iXLen %1,
1214    iXLen %2)
1215
1216  ret <vscale x 16 x i16> %a
1217}
1218
1219declare <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32(
1220  <vscale x 16 x i16>,
1221  <vscale x 16 x i32>,
1222  iXLen,
1223  <vscale x 16 x i1>,
1224  iXLen,
1225  iXLen);
1226
1227define <vscale x 16 x i16> @intrinsic_vnsra_mask_vx_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1228; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv16i16_nxv16i32:
1229; CHECK:       # %bb.0: # %entry
1230; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1231; CHECK-NEXT:    vnsra.wx v8, v16, a0, v0.t
1232; CHECK-NEXT:    ret
1233entry:
1234  %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32(
1235    <vscale x 16 x i16> %0,
1236    <vscale x 16 x i32> %1,
1237    iXLen %2,
1238    <vscale x 16 x i1> %3,
1239    iXLen %4, iXLen 1)
1240
1241  ret <vscale x 16 x i16> %a
1242}
1243
1244declare <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64(
1245  <vscale x 1 x i32>,
1246  <vscale x 1 x i64>,
1247  iXLen,
1248  iXLen);
1249
1250define <vscale x 1 x i32> @intrinsic_vnsra_vx_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, iXLen %1, iXLen %2) nounwind {
1251; CHECK-LABEL: intrinsic_vnsra_vx_nxv1i32_nxv1i64:
1252; CHECK:       # %bb.0: # %entry
1253; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1254; CHECK-NEXT:    vnsra.wx v8, v8, a0
1255; CHECK-NEXT:    ret
1256entry:
1257  %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64(
1258    <vscale x 1 x i32> undef,
1259    <vscale x 1 x i64> %0,
1260    iXLen %1,
1261    iXLen %2)
1262
1263  ret <vscale x 1 x i32> %a
1264}
1265
1266declare <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64(
1267  <vscale x 1 x i32>,
1268  <vscale x 1 x i64>,
1269  iXLen,
1270  <vscale x 1 x i1>,
1271  iXLen,
1272  iXLen);
1273
1274define <vscale x 1 x i32> @intrinsic_vnsra_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1275; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv1i32_nxv1i64:
1276; CHECK:       # %bb.0: # %entry
1277; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
1278; CHECK-NEXT:    vnsra.wx v8, v9, a0, v0.t
1279; CHECK-NEXT:    ret
1280entry:
1281  %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64(
1282    <vscale x 1 x i32> %0,
1283    <vscale x 1 x i64> %1,
1284    iXLen %2,
1285    <vscale x 1 x i1> %3,
1286    iXLen %4, iXLen 1)
1287
1288  ret <vscale x 1 x i32> %a
1289}
1290
1291declare <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64(
1292  <vscale x 2 x i32>,
1293  <vscale x 2 x i64>,
1294  iXLen,
1295  iXLen);
1296
1297define <vscale x 2 x i32> @intrinsic_vnsra_vx_nxv2i32_nxv2i64(<vscale x 2 x i64> %0, iXLen %1, iXLen %2) nounwind {
1298; CHECK-LABEL: intrinsic_vnsra_vx_nxv2i32_nxv2i64:
1299; CHECK:       # %bb.0: # %entry
1300; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1301; CHECK-NEXT:    vnsra.wx v10, v8, a0
1302; CHECK-NEXT:    vmv.v.v v8, v10
1303; CHECK-NEXT:    ret
1304entry:
1305  %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64(
1306    <vscale x 2 x i32> undef,
1307    <vscale x 2 x i64> %0,
1308    iXLen %1,
1309    iXLen %2)
1310
1311  ret <vscale x 2 x i32> %a
1312}
1313
1314declare <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64(
1315  <vscale x 2 x i32>,
1316  <vscale x 2 x i64>,
1317  iXLen,
1318  <vscale x 2 x i1>,
1319  iXLen,
1320  iXLen);
1321
1322define <vscale x 2 x i32> @intrinsic_vnsra_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1323; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv2i32_nxv2i64:
1324; CHECK:       # %bb.0: # %entry
1325; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1326; CHECK-NEXT:    vnsra.wx v8, v10, a0, v0.t
1327; CHECK-NEXT:    ret
1328entry:
1329  %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64(
1330    <vscale x 2 x i32> %0,
1331    <vscale x 2 x i64> %1,
1332    iXLen %2,
1333    <vscale x 2 x i1> %3,
1334    iXLen %4, iXLen 1)
1335
1336  ret <vscale x 2 x i32> %a
1337}
1338
1339declare <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64(
1340  <vscale x 4 x i32>,
1341  <vscale x 4 x i64>,
1342  iXLen,
1343  iXLen);
1344
1345define <vscale x 4 x i32> @intrinsic_vnsra_vx_nxv4i32_nxv4i64(<vscale x 4 x i64> %0, iXLen %1, iXLen %2) nounwind {
1346; CHECK-LABEL: intrinsic_vnsra_vx_nxv4i32_nxv4i64:
1347; CHECK:       # %bb.0: # %entry
1348; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1349; CHECK-NEXT:    vnsra.wx v12, v8, a0
1350; CHECK-NEXT:    vmv.v.v v8, v12
1351; CHECK-NEXT:    ret
1352entry:
1353  %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64(
1354    <vscale x 4 x i32> undef,
1355    <vscale x 4 x i64> %0,
1356    iXLen %1,
1357    iXLen %2)
1358
1359  ret <vscale x 4 x i32> %a
1360}
1361
1362declare <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64(
1363  <vscale x 4 x i32>,
1364  <vscale x 4 x i64>,
1365  iXLen,
1366  <vscale x 4 x i1>,
1367  iXLen,
1368  iXLen);
1369
1370define <vscale x 4 x i32> @intrinsic_vnsra_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1371; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv4i32_nxv4i64:
1372; CHECK:       # %bb.0: # %entry
1373; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1374; CHECK-NEXT:    vnsra.wx v8, v12, a0, v0.t
1375; CHECK-NEXT:    ret
1376entry:
1377  %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64(
1378    <vscale x 4 x i32> %0,
1379    <vscale x 4 x i64> %1,
1380    iXLen %2,
1381    <vscale x 4 x i1> %3,
1382    iXLen %4, iXLen 1)
1383
1384  ret <vscale x 4 x i32> %a
1385}
1386
1387declare <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64(
1388  <vscale x 8 x i32>,
1389  <vscale x 8 x i64>,
1390  iXLen,
1391  iXLen);
1392
1393define <vscale x 8 x i32> @intrinsic_vnsra_vx_nxv8i32_nxv8i64(<vscale x 8 x i64> %0, iXLen %1, iXLen %2) nounwind {
1394; CHECK-LABEL: intrinsic_vnsra_vx_nxv8i32_nxv8i64:
1395; CHECK:       # %bb.0: # %entry
1396; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
1397; CHECK-NEXT:    vnsra.wx v16, v8, a0
1398; CHECK-NEXT:    vmv.v.v v8, v16
1399; CHECK-NEXT:    ret
1400entry:
1401  %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64(
1402    <vscale x 8 x i32> undef,
1403    <vscale x 8 x i64> %0,
1404    iXLen %1,
1405    iXLen %2)
1406
1407  ret <vscale x 8 x i32> %a
1408}
1409
1410declare <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64(
1411  <vscale x 8 x i32>,
1412  <vscale x 8 x i64>,
1413  iXLen,
1414  <vscale x 8 x i1>,
1415  iXLen,
1416  iXLen);
1417
1418define <vscale x 8 x i32> @intrinsic_vnsra_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1419; CHECK-LABEL: intrinsic_vnsra_mask_vx_nxv8i32_nxv8i64:
1420; CHECK:       # %bb.0: # %entry
1421; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1422; CHECK-NEXT:    vnsra.wx v8, v16, a0, v0.t
1423; CHECK-NEXT:    ret
1424entry:
1425  %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64(
1426    <vscale x 8 x i32> %0,
1427    <vscale x 8 x i64> %1,
1428    iXLen %2,
1429    <vscale x 8 x i1> %3,
1430    iXLen %4, iXLen 1)
1431
1432  ret <vscale x 8 x i32> %a
1433}
1434
1435define <vscale x 1 x i8> @intrinsic_vnsra_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, iXLen %1) nounwind {
1436; CHECK-LABEL: intrinsic_vnsra_vi_nxv1i8_nxv1i16_i8:
1437; CHECK:       # %bb.0: # %entry
1438; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
1439; CHECK-NEXT:    vnsra.wi v8, v8, 9
1440; CHECK-NEXT:    ret
1441entry:
1442  %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.nxv1i8.nxv1i16(
1443    <vscale x 1 x i8> undef,
1444    <vscale x 1 x i16> %0,
1445    iXLen 9,
1446    iXLen %1)
1447
1448  ret <vscale x 1 x i8> %a
1449}
1450
1451define <vscale x 1 x i8> @intrinsic_vnsra_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1452; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv1i8_nxv1i16_i8:
1453; CHECK:       # %bb.0: # %entry
1454; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
1455; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
1456; CHECK-NEXT:    ret
1457entry:
1458  %a = call <vscale x 1 x i8> @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16(
1459    <vscale x 1 x i8> %0,
1460    <vscale x 1 x i16> %1,
1461    iXLen 9,
1462    <vscale x 1 x i1> %2,
1463    iXLen %3, iXLen 1)
1464
1465  ret <vscale x 1 x i8> %a
1466}
1467
1468define <vscale x 2 x i8> @intrinsic_vnsra_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, iXLen %1) nounwind {
1469; CHECK-LABEL: intrinsic_vnsra_vi_nxv2i8_nxv2i16_i8:
1470; CHECK:       # %bb.0: # %entry
1471; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
1472; CHECK-NEXT:    vnsra.wi v8, v8, 9
1473; CHECK-NEXT:    ret
1474entry:
1475  %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.nxv2i8.nxv2i16(
1476    <vscale x 2 x i8> undef,
1477    <vscale x 2 x i16> %0,
1478    iXLen 9,
1479    iXLen %1)
1480
1481  ret <vscale x 2 x i8> %a
1482}
1483
1484define <vscale x 2 x i8> @intrinsic_vnsra_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1485; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv2i8_nxv2i16_i8:
1486; CHECK:       # %bb.0: # %entry
1487; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
1488; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
1489; CHECK-NEXT:    ret
1490entry:
1491  %a = call <vscale x 2 x i8> @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16(
1492    <vscale x 2 x i8> %0,
1493    <vscale x 2 x i16> %1,
1494    iXLen 9,
1495    <vscale x 2 x i1> %2,
1496    iXLen %3, iXLen 1)
1497
1498  ret <vscale x 2 x i8> %a
1499}
1500
1501define <vscale x 4 x i8> @intrinsic_vnsra_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, iXLen %1) nounwind {
1502; CHECK-LABEL: intrinsic_vnsra_vi_nxv4i8_nxv4i16_i8:
1503; CHECK:       # %bb.0: # %entry
1504; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
1505; CHECK-NEXT:    vnsra.wi v8, v8, 9
1506; CHECK-NEXT:    ret
1507entry:
1508  %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.nxv4i8.nxv4i16(
1509    <vscale x 4 x i8> undef,
1510    <vscale x 4 x i16> %0,
1511    iXLen 9,
1512    iXLen %1)
1513
1514  ret <vscale x 4 x i8> %a
1515}
1516
1517define <vscale x 4 x i8> @intrinsic_vnsra_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1518; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv4i8_nxv4i16_i8:
1519; CHECK:       # %bb.0: # %entry
1520; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
1521; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
1522; CHECK-NEXT:    ret
1523entry:
1524  %a = call <vscale x 4 x i8> @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16(
1525    <vscale x 4 x i8> %0,
1526    <vscale x 4 x i16> %1,
1527    iXLen 9,
1528    <vscale x 4 x i1> %2,
1529    iXLen %3, iXLen 1)
1530
1531  ret <vscale x 4 x i8> %a
1532}
1533
1534define <vscale x 8 x i8> @intrinsic_vnsra_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, iXLen %1) nounwind {
1535; CHECK-LABEL: intrinsic_vnsra_vi_nxv8i8_nxv8i16_i8:
1536; CHECK:       # %bb.0: # %entry
1537; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
1538; CHECK-NEXT:    vnsra.wi v10, v8, 9
1539; CHECK-NEXT:    vmv.v.v v8, v10
1540; CHECK-NEXT:    ret
1541entry:
1542  %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.nxv8i8.nxv8i16(
1543    <vscale x 8 x i8> undef,
1544    <vscale x 8 x i16> %0,
1545    iXLen 9,
1546    iXLen %1)
1547
1548  ret <vscale x 8 x i8> %a
1549}
1550
1551define <vscale x 8 x i8> @intrinsic_vnsra_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1552; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv8i8_nxv8i16_i8:
1553; CHECK:       # %bb.0: # %entry
1554; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
1555; CHECK-NEXT:    vnsra.wi v8, v10, 9, v0.t
1556; CHECK-NEXT:    ret
1557entry:
1558  %a = call <vscale x 8 x i8> @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16(
1559    <vscale x 8 x i8> %0,
1560    <vscale x 8 x i16> %1,
1561    iXLen 9,
1562    <vscale x 8 x i1> %2,
1563    iXLen %3, iXLen 1)
1564
1565  ret <vscale x 8 x i8> %a
1566}
1567
1568define <vscale x 16 x i8> @intrinsic_vnsra_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, iXLen %1) nounwind {
1569; CHECK-LABEL: intrinsic_vnsra_vi_nxv16i8_nxv16i16_i8:
1570; CHECK:       # %bb.0: # %entry
1571; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
1572; CHECK-NEXT:    vnsra.wi v12, v8, 9
1573; CHECK-NEXT:    vmv.v.v v8, v12
1574; CHECK-NEXT:    ret
1575entry:
1576  %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.nxv16i8.nxv16i16(
1577    <vscale x 16 x i8> undef,
1578    <vscale x 16 x i16> %0,
1579    iXLen 9,
1580    iXLen %1)
1581
1582  ret <vscale x 16 x i8> %a
1583}
1584
1585define <vscale x 16 x i8> @intrinsic_vnsra_mask_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1586; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv16i8_nxv16i16_i8:
1587; CHECK:       # %bb.0: # %entry
1588; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
1589; CHECK-NEXT:    vnsra.wi v8, v12, 9, v0.t
1590; CHECK-NEXT:    ret
1591entry:
1592  %a = call <vscale x 16 x i8> @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16(
1593    <vscale x 16 x i8> %0,
1594    <vscale x 16 x i16> %1,
1595    iXLen 9,
1596    <vscale x 16 x i1> %2,
1597    iXLen %3, iXLen 1)
1598
1599  ret <vscale x 16 x i8> %a
1600}
1601
1602define <vscale x 32 x i8> @intrinsic_vnsra_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, iXLen %1) nounwind {
1603; CHECK-LABEL: intrinsic_vnsra_vi_nxv32i8_nxv32i16_i8:
1604; CHECK:       # %bb.0: # %entry
1605; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
1606; CHECK-NEXT:    vnsra.wi v16, v8, 9
1607; CHECK-NEXT:    vmv.v.v v8, v16
1608; CHECK-NEXT:    ret
1609entry:
1610  %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.nxv32i8.nxv32i16(
1611    <vscale x 32 x i8> undef,
1612    <vscale x 32 x i16> %0,
1613    iXLen 9,
1614    iXLen %1)
1615
1616  ret <vscale x 32 x i8> %a
1617}
1618
1619define <vscale x 32 x i8> @intrinsic_vnsra_mask_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1620; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv32i8_nxv32i16_i8:
1621; CHECK:       # %bb.0: # %entry
1622; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
1623; CHECK-NEXT:    vnsra.wi v8, v16, 9, v0.t
1624; CHECK-NEXT:    ret
1625entry:
1626  %a = call <vscale x 32 x i8> @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16(
1627    <vscale x 32 x i8> %0,
1628    <vscale x 32 x i16> %1,
1629    iXLen 9,
1630    <vscale x 32 x i1> %2,
1631    iXLen %3, iXLen 1)
1632
1633  ret <vscale x 32 x i8> %a
1634}
1635
1636define <vscale x 1 x i16> @intrinsic_vnsra_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, iXLen %1) nounwind {
1637; CHECK-LABEL: intrinsic_vnsra_vi_nxv1i16_nxv1i32_i16:
1638; CHECK:       # %bb.0: # %entry
1639; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
1640; CHECK-NEXT:    vnsra.wi v8, v8, 9
1641; CHECK-NEXT:    ret
1642entry:
1643  %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.nxv1i16.nxv1i32(
1644    <vscale x 1 x i16> undef,
1645    <vscale x 1 x i32> %0,
1646    iXLen 9,
1647    iXLen %1)
1648
1649  ret <vscale x 1 x i16> %a
1650}
1651
1652define <vscale x 1 x i16> @intrinsic_vnsra_mask_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1653; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv1i16_nxv1i32_i16:
1654; CHECK:       # %bb.0: # %entry
1655; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
1656; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
1657; CHECK-NEXT:    ret
1658entry:
1659  %a = call <vscale x 1 x i16> @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32(
1660    <vscale x 1 x i16> %0,
1661    <vscale x 1 x i32> %1,
1662    iXLen 9,
1663    <vscale x 1 x i1> %2,
1664    iXLen %3, iXLen 1)
1665
1666  ret <vscale x 1 x i16> %a
1667}
1668
1669define <vscale x 2 x i16> @intrinsic_vnsra_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, iXLen %1) nounwind {
1670; CHECK-LABEL: intrinsic_vnsra_vi_nxv2i16_nxv2i32_i16:
1671; CHECK:       # %bb.0: # %entry
1672; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
1673; CHECK-NEXT:    vnsra.wi v8, v8, 9
1674; CHECK-NEXT:    ret
1675entry:
1676  %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.nxv2i16.nxv2i32(
1677    <vscale x 2 x i16> undef,
1678    <vscale x 2 x i32> %0,
1679    iXLen 9,
1680    iXLen %1)
1681
1682  ret <vscale x 2 x i16> %a
1683}
1684
1685define <vscale x 2 x i16> @intrinsic_vnsra_mask_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1686; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv2i16_nxv2i32_i16:
1687; CHECK:       # %bb.0: # %entry
1688; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
1689; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
1690; CHECK-NEXT:    ret
1691entry:
1692  %a = call <vscale x 2 x i16> @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32(
1693    <vscale x 2 x i16> %0,
1694    <vscale x 2 x i32> %1,
1695    iXLen 9,
1696    <vscale x 2 x i1> %2,
1697    iXLen %3, iXLen 1)
1698
1699  ret <vscale x 2 x i16> %a
1700}
1701
1702define <vscale x 4 x i16> @intrinsic_vnsra_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, iXLen %1) nounwind {
1703; CHECK-LABEL: intrinsic_vnsra_vi_nxv4i16_nxv4i32_i16:
1704; CHECK:       # %bb.0: # %entry
1705; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
1706; CHECK-NEXT:    vnsra.wi v10, v8, 9
1707; CHECK-NEXT:    vmv.v.v v8, v10
1708; CHECK-NEXT:    ret
1709entry:
1710  %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.nxv4i16.nxv4i32(
1711    <vscale x 4 x i16> undef,
1712    <vscale x 4 x i32> %0,
1713    iXLen 9,
1714    iXLen %1)
1715
1716  ret <vscale x 4 x i16> %a
1717}
1718
1719define <vscale x 4 x i16> @intrinsic_vnsra_mask_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1720; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv4i16_nxv4i32_i16:
1721; CHECK:       # %bb.0: # %entry
1722; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
1723; CHECK-NEXT:    vnsra.wi v8, v10, 9, v0.t
1724; CHECK-NEXT:    ret
1725entry:
1726  %a = call <vscale x 4 x i16> @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32(
1727    <vscale x 4 x i16> %0,
1728    <vscale x 4 x i32> %1,
1729    iXLen 9,
1730    <vscale x 4 x i1> %2,
1731    iXLen %3, iXLen 1)
1732
1733  ret <vscale x 4 x i16> %a
1734}
1735
1736define <vscale x 8 x i16> @intrinsic_vnsra_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, iXLen %1) nounwind {
1737; CHECK-LABEL: intrinsic_vnsra_vi_nxv8i16_nxv8i32_i16:
1738; CHECK:       # %bb.0: # %entry
1739; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
1740; CHECK-NEXT:    vnsra.wi v12, v8, 9
1741; CHECK-NEXT:    vmv.v.v v8, v12
1742; CHECK-NEXT:    ret
1743entry:
1744  %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.nxv8i16.nxv8i32(
1745    <vscale x 8 x i16> undef,
1746    <vscale x 8 x i32> %0,
1747    iXLen 9,
1748    iXLen %1)
1749
1750  ret <vscale x 8 x i16> %a
1751}
1752
1753define <vscale x 8 x i16> @intrinsic_vnsra_mask_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1754; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv8i16_nxv8i32_i16:
1755; CHECK:       # %bb.0: # %entry
1756; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
1757; CHECK-NEXT:    vnsra.wi v8, v12, 9, v0.t
1758; CHECK-NEXT:    ret
1759entry:
1760  %a = call <vscale x 8 x i16> @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32(
1761    <vscale x 8 x i16> %0,
1762    <vscale x 8 x i32> %1,
1763    iXLen 9,
1764    <vscale x 8 x i1> %2,
1765    iXLen %3, iXLen 1)
1766
1767  ret <vscale x 8 x i16> %a
1768}
1769
1770define <vscale x 16 x i16> @intrinsic_vnsra_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, iXLen %1) nounwind {
1771; CHECK-LABEL: intrinsic_vnsra_vi_nxv16i16_nxv16i32_i16:
1772; CHECK:       # %bb.0: # %entry
1773; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
1774; CHECK-NEXT:    vnsra.wi v16, v8, 9
1775; CHECK-NEXT:    vmv.v.v v8, v16
1776; CHECK-NEXT:    ret
1777entry:
1778  %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.nxv16i16.nxv16i32(
1779    <vscale x 16 x i16> undef,
1780    <vscale x 16 x i32> %0,
1781    iXLen 9,
1782    iXLen %1)
1783
1784  ret <vscale x 16 x i16> %a
1785}
1786
1787define <vscale x 16 x i16> @intrinsic_vnsra_mask_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1788; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv16i16_nxv16i32_i16:
1789; CHECK:       # %bb.0: # %entry
1790; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
1791; CHECK-NEXT:    vnsra.wi v8, v16, 9, v0.t
1792; CHECK-NEXT:    ret
1793entry:
1794  %a = call <vscale x 16 x i16> @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32(
1795    <vscale x 16 x i16> %0,
1796    <vscale x 16 x i32> %1,
1797    iXLen 9,
1798    <vscale x 16 x i1> %2,
1799    iXLen %3, iXLen 1)
1800
1801  ret <vscale x 16 x i16> %a
1802}
1803
1804define <vscale x 1 x i32> @intrinsic_vnsra_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, iXLen %1) nounwind {
1805; CHECK-LABEL: intrinsic_vnsra_vi_nxv1i32_nxv1i64_i32:
1806; CHECK:       # %bb.0: # %entry
1807; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
1808; CHECK-NEXT:    vnsra.wi v8, v8, 9
1809; CHECK-NEXT:    ret
1810entry:
1811  %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.nxv1i32.nxv1i64(
1812    <vscale x 1 x i32> undef,
1813    <vscale x 1 x i64> %0,
1814    iXLen 9,
1815    iXLen %1)
1816
1817  ret <vscale x 1 x i32> %a
1818}
1819
1820define <vscale x 1 x i32> @intrinsic_vnsra_mask_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1821; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv1i32_nxv1i64_i32:
1822; CHECK:       # %bb.0: # %entry
1823; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
1824; CHECK-NEXT:    vnsra.wi v8, v9, 9, v0.t
1825; CHECK-NEXT:    ret
1826entry:
1827  %a = call <vscale x 1 x i32> @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64(
1828    <vscale x 1 x i32> %0,
1829    <vscale x 1 x i64> %1,
1830    iXLen 9,
1831    <vscale x 1 x i1> %2,
1832    iXLen %3, iXLen 1)
1833
1834  ret <vscale x 1 x i32> %a
1835}
1836
1837define <vscale x 2 x i32> @intrinsic_vnsra_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, iXLen %1) nounwind {
1838; CHECK-LABEL: intrinsic_vnsra_vi_nxv2i32_nxv2i64_i32:
1839; CHECK:       # %bb.0: # %entry
1840; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
1841; CHECK-NEXT:    vnsra.wi v10, v8, 9
1842; CHECK-NEXT:    vmv.v.v v8, v10
1843; CHECK-NEXT:    ret
1844entry:
1845  %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.nxv2i32.nxv2i64(
1846    <vscale x 2 x i32> undef,
1847    <vscale x 2 x i64> %0,
1848    iXLen 9,
1849    iXLen %1)
1850
1851  ret <vscale x 2 x i32> %a
1852}
1853
1854define <vscale x 2 x i32> @intrinsic_vnsra_mask_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1855; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv2i32_nxv2i64_i32:
1856; CHECK:       # %bb.0: # %entry
1857; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
1858; CHECK-NEXT:    vnsra.wi v8, v10, 9, v0.t
1859; CHECK-NEXT:    ret
1860entry:
1861  %a = call <vscale x 2 x i32> @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64(
1862    <vscale x 2 x i32> %0,
1863    <vscale x 2 x i64> %1,
1864    iXLen 9,
1865    <vscale x 2 x i1> %2,
1866    iXLen %3, iXLen 1)
1867
1868  ret <vscale x 2 x i32> %a
1869}
1870
1871define <vscale x 4 x i32> @intrinsic_vnsra_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, iXLen %1) nounwind {
1872; CHECK-LABEL: intrinsic_vnsra_vi_nxv4i32_nxv4i64_i32:
1873; CHECK:       # %bb.0: # %entry
1874; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
1875; CHECK-NEXT:    vnsra.wi v12, v8, 9
1876; CHECK-NEXT:    vmv.v.v v8, v12
1877; CHECK-NEXT:    ret
1878entry:
1879  %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.nxv4i32.nxv4i64(
1880    <vscale x 4 x i32> undef,
1881    <vscale x 4 x i64> %0,
1882    iXLen 9,
1883    iXLen %1)
1884
1885  ret <vscale x 4 x i32> %a
1886}
1887
1888define <vscale x 4 x i32> @intrinsic_vnsra_mask_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1889; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv4i32_nxv4i64_i32:
1890; CHECK:       # %bb.0: # %entry
1891; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
1892; CHECK-NEXT:    vnsra.wi v8, v12, 9, v0.t
1893; CHECK-NEXT:    ret
1894entry:
1895  %a = call <vscale x 4 x i32> @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64(
1896    <vscale x 4 x i32> %0,
1897    <vscale x 4 x i64> %1,
1898    iXLen 9,
1899    <vscale x 4 x i1> %2,
1900    iXLen %3, iXLen 1)
1901
1902  ret <vscale x 4 x i32> %a
1903}
1904
1905define <vscale x 8 x i32> @intrinsic_vnsra_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, iXLen %1) nounwind {
1906; CHECK-LABEL: intrinsic_vnsra_vi_nxv8i32_nxv8i64_i32:
1907; CHECK:       # %bb.0: # %entry
1908; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
1909; CHECK-NEXT:    vnsra.wi v16, v8, 9
1910; CHECK-NEXT:    vmv.v.v v8, v16
1911; CHECK-NEXT:    ret
1912entry:
1913  %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.nxv8i32.nxv8i64(
1914    <vscale x 8 x i32> undef,
1915    <vscale x 8 x i64> %0,
1916    iXLen 9,
1917    iXLen %1)
1918
1919  ret <vscale x 8 x i32> %a
1920}
1921
1922define <vscale x 8 x i32> @intrinsic_vnsra_mask_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1923; CHECK-LABEL: intrinsic_vnsra_mask_vi_nxv8i32_nxv8i64_i32:
1924; CHECK:       # %bb.0: # %entry
1925; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
1926; CHECK-NEXT:    vnsra.wi v8, v16, 9, v0.t
1927; CHECK-NEXT:    ret
1928entry:
1929  %a = call <vscale x 8 x i32> @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64(
1930    <vscale x 8 x i32> %0,
1931    <vscale x 8 x i64> %1,
1932    iXLen 9,
1933    <vscale x 8 x i1> %2,
1934    iXLen %3, iXLen 1)
1935
1936  ret <vscale x 8 x i32> %a
1937}
1938