xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vnclip.ll (revision 0ebe48f068c0ca69f76ed68b621c9294acd75f76)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
6
7declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
8  <vscale x 1 x i8>,
9  <vscale x 1 x i16>,
10  <vscale x 1 x i8>,
11  iXLen, iXLen);
12
13define <vscale x 1 x i8> @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
14; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    csrwi vxrm, 0
17; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
18; CHECK-NEXT:    vnclip.wv v8, v8, v9
19; CHECK-NEXT:    ret
20entry:
21  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
22    <vscale x 1 x i8> undef,
23    <vscale x 1 x i16> %0,
24    <vscale x 1 x i8> %1,
25    iXLen 0, iXLen %2)
26
27  ret <vscale x 1 x i8> %a
28}
29
30declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
31  <vscale x 1 x i8>,
32  <vscale x 1 x i16>,
33  <vscale x 1 x i8>,
34  <vscale x 1 x i1>,
35  iXLen, iXLen, iXLen);
36
37define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
38; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8:
39; CHECK:       # %bb.0: # %entry
40; CHECK-NEXT:    csrwi vxrm, 0
41; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
42; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
43; CHECK-NEXT:    ret
44entry:
45  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
46    <vscale x 1 x i8> %0,
47    <vscale x 1 x i16> %1,
48    <vscale x 1 x i8> %2,
49    <vscale x 1 x i1> %3,
50    iXLen 0, iXLen %4, iXLen 1)
51
52  ret <vscale x 1 x i8> %a
53}
54
55declare <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8(
56  <vscale x 2 x i8>,
57  <vscale x 2 x i16>,
58  <vscale x 2 x i8>,
59  iXLen, iXLen);
60
61define <vscale x 2 x i8> @intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
62; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8:
63; CHECK:       # %bb.0: # %entry
64; CHECK-NEXT:    csrwi vxrm, 0
65; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
66; CHECK-NEXT:    vnclip.wv v8, v8, v9
67; CHECK-NEXT:    ret
68entry:
69  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8(
70    <vscale x 2 x i8> undef,
71    <vscale x 2 x i16> %0,
72    <vscale x 2 x i8> %1,
73    iXLen 0, iXLen %2)
74
75  ret <vscale x 2 x i8> %a
76}
77
78declare <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8(
79  <vscale x 2 x i8>,
80  <vscale x 2 x i16>,
81  <vscale x 2 x i8>,
82  <vscale x 2 x i1>,
83  iXLen, iXLen, iXLen);
84
85define <vscale x 2 x i8> @intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
86; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8:
87; CHECK:       # %bb.0: # %entry
88; CHECK-NEXT:    csrwi vxrm, 0
89; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
90; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
91; CHECK-NEXT:    ret
92entry:
93  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8(
94    <vscale x 2 x i8> %0,
95    <vscale x 2 x i16> %1,
96    <vscale x 2 x i8> %2,
97    <vscale x 2 x i1> %3,
98    iXLen 0, iXLen %4, iXLen 1)
99
100  ret <vscale x 2 x i8> %a
101}
102
103declare <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8(
104  <vscale x 4 x i8>,
105  <vscale x 4 x i16>,
106  <vscale x 4 x i8>,
107  iXLen, iXLen);
108
109define <vscale x 4 x i8> @intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
110; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8:
111; CHECK:       # %bb.0: # %entry
112; CHECK-NEXT:    csrwi vxrm, 0
113; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
114; CHECK-NEXT:    vnclip.wv v8, v8, v9
115; CHECK-NEXT:    ret
116entry:
117  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8(
118    <vscale x 4 x i8> undef,
119    <vscale x 4 x i16> %0,
120    <vscale x 4 x i8> %1,
121    iXLen 0, iXLen %2)
122
123  ret <vscale x 4 x i8> %a
124}
125
126declare <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8(
127  <vscale x 4 x i8>,
128  <vscale x 4 x i16>,
129  <vscale x 4 x i8>,
130  <vscale x 4 x i1>,
131  iXLen, iXLen, iXLen);
132
133define <vscale x 4 x i8> @intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
134; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8:
135; CHECK:       # %bb.0: # %entry
136; CHECK-NEXT:    csrwi vxrm, 0
137; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
138; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
139; CHECK-NEXT:    ret
140entry:
141  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8(
142    <vscale x 4 x i8> %0,
143    <vscale x 4 x i16> %1,
144    <vscale x 4 x i8> %2,
145    <vscale x 4 x i1> %3,
146    iXLen 0, iXLen %4, iXLen 1)
147
148  ret <vscale x 4 x i8> %a
149}
150
151declare <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8(
152  <vscale x 8 x i8>,
153  <vscale x 8 x i16>,
154  <vscale x 8 x i8>,
155  iXLen, iXLen);
156
157define <vscale x 8 x i8> @intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
158; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8:
159; CHECK:       # %bb.0: # %entry
160; CHECK-NEXT:    csrwi vxrm, 0
161; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
162; CHECK-NEXT:    vnclip.wv v11, v8, v10
163; CHECK-NEXT:    vmv.v.v v8, v11
164; CHECK-NEXT:    ret
165entry:
166  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8(
167    <vscale x 8 x i8> undef,
168    <vscale x 8 x i16> %0,
169    <vscale x 8 x i8> %1,
170    iXLen 0, iXLen %2)
171
172  ret <vscale x 8 x i8> %a
173}
174
175declare <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8(
176  <vscale x 8 x i8>,
177  <vscale x 8 x i16>,
178  <vscale x 8 x i8>,
179  <vscale x 8 x i1>,
180  iXLen, iXLen, iXLen);
181
182define <vscale x 8 x i8> @intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
183; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8:
184; CHECK:       # %bb.0: # %entry
185; CHECK-NEXT:    csrwi vxrm, 0
186; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
187; CHECK-NEXT:    vnclip.wv v8, v10, v9, v0.t
188; CHECK-NEXT:    ret
189entry:
190  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8(
191    <vscale x 8 x i8> %0,
192    <vscale x 8 x i16> %1,
193    <vscale x 8 x i8> %2,
194    <vscale x 8 x i1> %3,
195    iXLen 0, iXLen %4, iXLen 1)
196
197  ret <vscale x 8 x i8> %a
198}
199
200declare <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8(
201  <vscale x 16 x i8>,
202  <vscale x 16 x i16>,
203  <vscale x 16 x i8>,
204  iXLen, iXLen);
205
206define <vscale x 16 x i8> @intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
207; CHECK-LABEL: intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8:
208; CHECK:       # %bb.0: # %entry
209; CHECK-NEXT:    csrwi vxrm, 0
210; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
211; CHECK-NEXT:    vnclip.wv v14, v8, v12
212; CHECK-NEXT:    vmv.v.v v8, v14
213; CHECK-NEXT:    ret
214entry:
215  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8(
216    <vscale x 16 x i8> undef,
217    <vscale x 16 x i16> %0,
218    <vscale x 16 x i8> %1,
219    iXLen 0, iXLen %2)
220
221  ret <vscale x 16 x i8> %a
222}
223
224declare <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8(
225  <vscale x 16 x i8>,
226  <vscale x 16 x i16>,
227  <vscale x 16 x i8>,
228  <vscale x 16 x i1>,
229  iXLen, iXLen, iXLen);
230
231define <vscale x 16 x i8> @intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
232; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8:
233; CHECK:       # %bb.0: # %entry
234; CHECK-NEXT:    csrwi vxrm, 0
235; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
236; CHECK-NEXT:    vnclip.wv v8, v12, v10, v0.t
237; CHECK-NEXT:    ret
238entry:
239  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8(
240    <vscale x 16 x i8> %0,
241    <vscale x 16 x i16> %1,
242    <vscale x 16 x i8> %2,
243    <vscale x 16 x i1> %3,
244    iXLen 0, iXLen %4, iXLen 1)
245
246  ret <vscale x 16 x i8> %a
247}
248
249declare <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8(
250  <vscale x 32 x i8>,
251  <vscale x 32 x i16>,
252  <vscale x 32 x i8>,
253  iXLen, iXLen);
254
255define <vscale x 32 x i8> @intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
256; CHECK-LABEL: intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8:
257; CHECK:       # %bb.0: # %entry
258; CHECK-NEXT:    csrwi vxrm, 0
259; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
260; CHECK-NEXT:    vnclip.wv v20, v8, v16
261; CHECK-NEXT:    vmv.v.v v8, v20
262; CHECK-NEXT:    ret
263entry:
264  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8(
265    <vscale x 32 x i8> undef,
266    <vscale x 32 x i16> %0,
267    <vscale x 32 x i8> %1,
268    iXLen 0, iXLen %2)
269
270  ret <vscale x 32 x i8> %a
271}
272
273declare <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8(
274  <vscale x 32 x i8>,
275  <vscale x 32 x i16>,
276  <vscale x 32 x i8>,
277  <vscale x 32 x i1>,
278  iXLen, iXLen, iXLen);
279
280define <vscale x 32 x i8> @intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
281; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8:
282; CHECK:       # %bb.0: # %entry
283; CHECK-NEXT:    csrwi vxrm, 0
284; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
285; CHECK-NEXT:    vnclip.wv v8, v16, v12, v0.t
286; CHECK-NEXT:    ret
287entry:
288  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8(
289    <vscale x 32 x i8> %0,
290    <vscale x 32 x i16> %1,
291    <vscale x 32 x i8> %2,
292    <vscale x 32 x i1> %3,
293    iXLen 0, iXLen %4, iXLen 1)
294
295  ret <vscale x 32 x i8> %a
296}
297
298declare <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16(
299  <vscale x 1 x i16>,
300  <vscale x 1 x i32>,
301  <vscale x 1 x i16>,
302  iXLen, iXLen);
303
304define <vscale x 1 x i16> @intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
305; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16:
306; CHECK:       # %bb.0: # %entry
307; CHECK-NEXT:    csrwi vxrm, 0
308; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
309; CHECK-NEXT:    vnclip.wv v8, v8, v9
310; CHECK-NEXT:    ret
311entry:
312  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16(
313    <vscale x 1 x i16> undef,
314    <vscale x 1 x i32> %0,
315    <vscale x 1 x i16> %1,
316    iXLen 0, iXLen %2)
317
318  ret <vscale x 1 x i16> %a
319}
320
321declare <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16(
322  <vscale x 1 x i16>,
323  <vscale x 1 x i32>,
324  <vscale x 1 x i16>,
325  <vscale x 1 x i1>,
326  iXLen, iXLen, iXLen);
327
328define <vscale x 1 x i16> @intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
329; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16:
330; CHECK:       # %bb.0: # %entry
331; CHECK-NEXT:    csrwi vxrm, 0
332; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
333; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
334; CHECK-NEXT:    ret
335entry:
336  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16(
337    <vscale x 1 x i16> %0,
338    <vscale x 1 x i32> %1,
339    <vscale x 1 x i16> %2,
340    <vscale x 1 x i1> %3,
341    iXLen 0, iXLen %4, iXLen 1)
342
343  ret <vscale x 1 x i16> %a
344}
345
346declare <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16(
347  <vscale x 2 x i16>,
348  <vscale x 2 x i32>,
349  <vscale x 2 x i16>,
350  iXLen, iXLen);
351
352define <vscale x 2 x i16> @intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
353; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16:
354; CHECK:       # %bb.0: # %entry
355; CHECK-NEXT:    csrwi vxrm, 0
356; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
357; CHECK-NEXT:    vnclip.wv v8, v8, v9
358; CHECK-NEXT:    ret
359entry:
360  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16(
361    <vscale x 2 x i16> undef,
362    <vscale x 2 x i32> %0,
363    <vscale x 2 x i16> %1,
364    iXLen 0, iXLen %2)
365
366  ret <vscale x 2 x i16> %a
367}
368
369declare <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16(
370  <vscale x 2 x i16>,
371  <vscale x 2 x i32>,
372  <vscale x 2 x i16>,
373  <vscale x 2 x i1>,
374  iXLen, iXLen, iXLen);
375
376define <vscale x 2 x i16> @intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
377; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16:
378; CHECK:       # %bb.0: # %entry
379; CHECK-NEXT:    csrwi vxrm, 0
380; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
381; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
382; CHECK-NEXT:    ret
383entry:
384  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16(
385    <vscale x 2 x i16> %0,
386    <vscale x 2 x i32> %1,
387    <vscale x 2 x i16> %2,
388    <vscale x 2 x i1> %3,
389    iXLen 0, iXLen %4, iXLen 1)
390
391  ret <vscale x 2 x i16> %a
392}
393
394declare <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16(
395  <vscale x 4 x i16>,
396  <vscale x 4 x i32>,
397  <vscale x 4 x i16>,
398  iXLen, iXLen);
399
400define <vscale x 4 x i16> @intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
401; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16:
402; CHECK:       # %bb.0: # %entry
403; CHECK-NEXT:    csrwi vxrm, 0
404; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
405; CHECK-NEXT:    vnclip.wv v11, v8, v10
406; CHECK-NEXT:    vmv.v.v v8, v11
407; CHECK-NEXT:    ret
408entry:
409  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16(
410    <vscale x 4 x i16> undef,
411    <vscale x 4 x i32> %0,
412    <vscale x 4 x i16> %1,
413    iXLen 0, iXLen %2)
414
415  ret <vscale x 4 x i16> %a
416}
417
418declare <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16(
419  <vscale x 4 x i16>,
420  <vscale x 4 x i32>,
421  <vscale x 4 x i16>,
422  <vscale x 4 x i1>,
423  iXLen, iXLen, iXLen);
424
425define <vscale x 4 x i16> @intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
426; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16:
427; CHECK:       # %bb.0: # %entry
428; CHECK-NEXT:    csrwi vxrm, 0
429; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
430; CHECK-NEXT:    vnclip.wv v8, v10, v9, v0.t
431; CHECK-NEXT:    ret
432entry:
433  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16(
434    <vscale x 4 x i16> %0,
435    <vscale x 4 x i32> %1,
436    <vscale x 4 x i16> %2,
437    <vscale x 4 x i1> %3,
438    iXLen 0, iXLen %4, iXLen 1)
439
440  ret <vscale x 4 x i16> %a
441}
442
443declare <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16(
444  <vscale x 8 x i16>,
445  <vscale x 8 x i32>,
446  <vscale x 8 x i16>,
447  iXLen, iXLen);
448
449define <vscale x 8 x i16> @intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
450; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16:
451; CHECK:       # %bb.0: # %entry
452; CHECK-NEXT:    csrwi vxrm, 0
453; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
454; CHECK-NEXT:    vnclip.wv v14, v8, v12
455; CHECK-NEXT:    vmv.v.v v8, v14
456; CHECK-NEXT:    ret
457entry:
458  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16(
459    <vscale x 8 x i16> undef,
460    <vscale x 8 x i32> %0,
461    <vscale x 8 x i16> %1,
462    iXLen 0, iXLen %2)
463
464  ret <vscale x 8 x i16> %a
465}
466
467declare <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16(
468  <vscale x 8 x i16>,
469  <vscale x 8 x i32>,
470  <vscale x 8 x i16>,
471  <vscale x 8 x i1>,
472  iXLen, iXLen, iXLen);
473
474define <vscale x 8 x i16> @intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
475; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16:
476; CHECK:       # %bb.0: # %entry
477; CHECK-NEXT:    csrwi vxrm, 0
478; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
479; CHECK-NEXT:    vnclip.wv v8, v12, v10, v0.t
480; CHECK-NEXT:    ret
481entry:
482  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16(
483    <vscale x 8 x i16> %0,
484    <vscale x 8 x i32> %1,
485    <vscale x 8 x i16> %2,
486    <vscale x 8 x i1> %3,
487    iXLen 0, iXLen %4, iXLen 1)
488
489  ret <vscale x 8 x i16> %a
490}
491
492declare <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16(
493  <vscale x 16 x i16>,
494  <vscale x 16 x i32>,
495  <vscale x 16 x i16>,
496  iXLen, iXLen);
497
498define <vscale x 16 x i16> @intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
499; CHECK-LABEL: intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16:
500; CHECK:       # %bb.0: # %entry
501; CHECK-NEXT:    csrwi vxrm, 0
502; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
503; CHECK-NEXT:    vnclip.wv v20, v8, v16
504; CHECK-NEXT:    vmv.v.v v8, v20
505; CHECK-NEXT:    ret
506entry:
507  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16(
508    <vscale x 16 x i16> undef,
509    <vscale x 16 x i32> %0,
510    <vscale x 16 x i16> %1,
511    iXLen 0, iXLen %2)
512
513  ret <vscale x 16 x i16> %a
514}
515
516declare <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16(
517  <vscale x 16 x i16>,
518  <vscale x 16 x i32>,
519  <vscale x 16 x i16>,
520  <vscale x 16 x i1>,
521  iXLen, iXLen, iXLen);
522
523define <vscale x 16 x i16> @intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
524; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16:
525; CHECK:       # %bb.0: # %entry
526; CHECK-NEXT:    csrwi vxrm, 0
527; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
528; CHECK-NEXT:    vnclip.wv v8, v16, v12, v0.t
529; CHECK-NEXT:    ret
530entry:
531  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16(
532    <vscale x 16 x i16> %0,
533    <vscale x 16 x i32> %1,
534    <vscale x 16 x i16> %2,
535    <vscale x 16 x i1> %3,
536    iXLen 0, iXLen %4, iXLen 1)
537
538  ret <vscale x 16 x i16> %a
539}
540
541declare <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32(
542  <vscale x 1 x i32>,
543  <vscale x 1 x i64>,
544  <vscale x 1 x i32>,
545  iXLen, iXLen);
546
547define <vscale x 1 x i32> @intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
548; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32:
549; CHECK:       # %bb.0: # %entry
550; CHECK-NEXT:    csrwi vxrm, 0
551; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
552; CHECK-NEXT:    vnclip.wv v8, v8, v9
553; CHECK-NEXT:    ret
554entry:
555  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32(
556    <vscale x 1 x i32> undef,
557    <vscale x 1 x i64> %0,
558    <vscale x 1 x i32> %1,
559    iXLen 0, iXLen %2)
560
561  ret <vscale x 1 x i32> %a
562}
563
564declare <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32(
565  <vscale x 1 x i32>,
566  <vscale x 1 x i64>,
567  <vscale x 1 x i32>,
568  <vscale x 1 x i1>,
569  iXLen, iXLen, iXLen);
570
571define <vscale x 1 x i32> @intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
572; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32:
573; CHECK:       # %bb.0: # %entry
574; CHECK-NEXT:    csrwi vxrm, 0
575; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
576; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
577; CHECK-NEXT:    ret
578entry:
579  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32(
580    <vscale x 1 x i32> %0,
581    <vscale x 1 x i64> %1,
582    <vscale x 1 x i32> %2,
583    <vscale x 1 x i1> %3,
584    iXLen 0, iXLen %4, iXLen 1)
585
586  ret <vscale x 1 x i32> %a
587}
588
589declare <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32(
590  <vscale x 2 x i32>,
591  <vscale x 2 x i64>,
592  <vscale x 2 x i32>,
593  iXLen, iXLen);
594
595define <vscale x 2 x i32> @intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
596; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32:
597; CHECK:       # %bb.0: # %entry
598; CHECK-NEXT:    csrwi vxrm, 0
599; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
600; CHECK-NEXT:    vnclip.wv v11, v8, v10
601; CHECK-NEXT:    vmv.v.v v8, v11
602; CHECK-NEXT:    ret
603entry:
604  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32(
605    <vscale x 2 x i32> undef,
606    <vscale x 2 x i64> %0,
607    <vscale x 2 x i32> %1,
608    iXLen 0, iXLen %2)
609
610  ret <vscale x 2 x i32> %a
611}
612
613declare <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32(
614  <vscale x 2 x i32>,
615  <vscale x 2 x i64>,
616  <vscale x 2 x i32>,
617  <vscale x 2 x i1>,
618  iXLen, iXLen, iXLen);
619
620define <vscale x 2 x i32> @intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
621; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32:
622; CHECK:       # %bb.0: # %entry
623; CHECK-NEXT:    csrwi vxrm, 0
624; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
625; CHECK-NEXT:    vnclip.wv v8, v10, v9, v0.t
626; CHECK-NEXT:    ret
627entry:
628  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32(
629    <vscale x 2 x i32> %0,
630    <vscale x 2 x i64> %1,
631    <vscale x 2 x i32> %2,
632    <vscale x 2 x i1> %3,
633    iXLen 0, iXLen %4, iXLen 1)
634
635  ret <vscale x 2 x i32> %a
636}
637
638declare <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32(
639  <vscale x 4 x i32>,
640  <vscale x 4 x i64>,
641  <vscale x 4 x i32>,
642  iXLen, iXLen);
643
644define <vscale x 4 x i32> @intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
645; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32:
646; CHECK:       # %bb.0: # %entry
647; CHECK-NEXT:    csrwi vxrm, 0
648; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
649; CHECK-NEXT:    vnclip.wv v14, v8, v12
650; CHECK-NEXT:    vmv.v.v v8, v14
651; CHECK-NEXT:    ret
652entry:
653  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32(
654    <vscale x 4 x i32> undef,
655    <vscale x 4 x i64> %0,
656    <vscale x 4 x i32> %1,
657    iXLen 0, iXLen %2)
658
659  ret <vscale x 4 x i32> %a
660}
661
662declare <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32(
663  <vscale x 4 x i32>,
664  <vscale x 4 x i64>,
665  <vscale x 4 x i32>,
666  <vscale x 4 x i1>,
667  iXLen, iXLen, iXLen);
668
669define <vscale x 4 x i32> @intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
670; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32:
671; CHECK:       # %bb.0: # %entry
672; CHECK-NEXT:    csrwi vxrm, 0
673; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
674; CHECK-NEXT:    vnclip.wv v8, v12, v10, v0.t
675; CHECK-NEXT:    ret
676entry:
677  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32(
678    <vscale x 4 x i32> %0,
679    <vscale x 4 x i64> %1,
680    <vscale x 4 x i32> %2,
681    <vscale x 4 x i1> %3,
682    iXLen 0, iXLen %4, iXLen 1)
683
684  ret <vscale x 4 x i32> %a
685}
686
687declare <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32(
688  <vscale x 8 x i32>,
689  <vscale x 8 x i64>,
690  <vscale x 8 x i32>,
691  iXLen, iXLen);
692
693define <vscale x 8 x i32> @intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
694; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32:
695; CHECK:       # %bb.0: # %entry
696; CHECK-NEXT:    csrwi vxrm, 0
697; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
698; CHECK-NEXT:    vnclip.wv v20, v8, v16
699; CHECK-NEXT:    vmv.v.v v8, v20
700; CHECK-NEXT:    ret
701entry:
702  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32(
703    <vscale x 8 x i32> undef,
704    <vscale x 8 x i64> %0,
705    <vscale x 8 x i32> %1,
706    iXLen 0, iXLen %2)
707
708  ret <vscale x 8 x i32> %a
709}
710
711declare <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32(
712  <vscale x 8 x i32>,
713  <vscale x 8 x i64>,
714  <vscale x 8 x i32>,
715  <vscale x 8 x i1>,
716  iXLen, iXLen, iXLen);
717
718define <vscale x 8 x i32> @intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
719; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32:
720; CHECK:       # %bb.0: # %entry
721; CHECK-NEXT:    csrwi vxrm, 0
722; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
723; CHECK-NEXT:    vnclip.wv v8, v16, v12, v0.t
724; CHECK-NEXT:    ret
725entry:
726  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32(
727    <vscale x 8 x i32> %0,
728    <vscale x 8 x i64> %1,
729    <vscale x 8 x i32> %2,
730    <vscale x 8 x i1> %3,
731    iXLen 0, iXLen %4, iXLen 1)
732
733  ret <vscale x 8 x i32> %a
734}
735
736declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
737  <vscale x 1 x i8>,
738  <vscale x 1 x i16>,
739  iXLen, iXLen, iXLen);
740
741define <vscale x 1 x i8> @intrinsic_vnclip_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %0, iXLen %1, iXLen %2) nounwind {
742; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i8_nxv1i16:
743; CHECK:       # %bb.0: # %entry
744; CHECK-NEXT:    csrwi vxrm, 0
745; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
746; CHECK-NEXT:    vnclip.wx v8, v8, a0
747; CHECK-NEXT:    ret
748entry:
749  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
750    <vscale x 1 x i8> undef,
751    <vscale x 1 x i16> %0,
752    iXLen %1,
753    iXLen 0, iXLen %2)
754
755  ret <vscale x 1 x i8> %a
756}
757
758declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16(
759  <vscale x 1 x i8>,
760  <vscale x 1 x i16>,
761  iXLen,
762  <vscale x 1 x i1>,
763  iXLen, iXLen, iXLen);
764
765define <vscale x 1 x i8> @intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
766; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16:
767; CHECK:       # %bb.0: # %entry
768; CHECK-NEXT:    csrwi vxrm, 0
769; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
770; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
771; CHECK-NEXT:    ret
772entry:
773  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16(
774    <vscale x 1 x i8> %0,
775    <vscale x 1 x i16> %1,
776    iXLen %2,
777    <vscale x 1 x i1> %3,
778    iXLen 0, iXLen %4, iXLen 1)
779
780  ret <vscale x 1 x i8> %a
781}
782
783declare <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
784  <vscale x 2 x i8>,
785  <vscale x 2 x i16>,
786  iXLen, iXLen, iXLen);
787
788define <vscale x 2 x i8> @intrinsic_vnclip_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %0, iXLen %1, iXLen %2) nounwind {
789; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i8_nxv2i16:
790; CHECK:       # %bb.0: # %entry
791; CHECK-NEXT:    csrwi vxrm, 0
792; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
793; CHECK-NEXT:    vnclip.wx v8, v8, a0
794; CHECK-NEXT:    ret
795entry:
796  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
797    <vscale x 2 x i8> undef,
798    <vscale x 2 x i16> %0,
799    iXLen %1,
800    iXLen 0, iXLen %2)
801
802  ret <vscale x 2 x i8> %a
803}
804
805declare <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16(
806  <vscale x 2 x i8>,
807  <vscale x 2 x i16>,
808  iXLen,
809  <vscale x 2 x i1>,
810  iXLen, iXLen, iXLen);
811
812define <vscale x 2 x i8> @intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
813; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16:
814; CHECK:       # %bb.0: # %entry
815; CHECK-NEXT:    csrwi vxrm, 0
816; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
817; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
818; CHECK-NEXT:    ret
819entry:
820  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16(
821    <vscale x 2 x i8> %0,
822    <vscale x 2 x i16> %1,
823    iXLen %2,
824    <vscale x 2 x i1> %3,
825    iXLen 0, iXLen %4, iXLen 1)
826
827  ret <vscale x 2 x i8> %a
828}
829
830declare <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
831  <vscale x 4 x i8>,
832  <vscale x 4 x i16>,
833  iXLen, iXLen, iXLen);
834
835define <vscale x 4 x i8> @intrinsic_vnclip_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, iXLen %1, iXLen %2) nounwind {
836; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i8_nxv4i16:
837; CHECK:       # %bb.0: # %entry
838; CHECK-NEXT:    csrwi vxrm, 0
839; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
840; CHECK-NEXT:    vnclip.wx v8, v8, a0
841; CHECK-NEXT:    ret
842entry:
843  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
844    <vscale x 4 x i8> undef,
845    <vscale x 4 x i16> %0,
846    iXLen %1,
847    iXLen 0, iXLen %2)
848
849  ret <vscale x 4 x i8> %a
850}
851
852declare <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16(
853  <vscale x 4 x i8>,
854  <vscale x 4 x i16>,
855  iXLen,
856  <vscale x 4 x i1>,
857  iXLen, iXLen, iXLen);
858
859define <vscale x 4 x i8> @intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
860; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16:
861; CHECK:       # %bb.0: # %entry
862; CHECK-NEXT:    csrwi vxrm, 0
863; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
864; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
865; CHECK-NEXT:    ret
866entry:
867  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16(
868    <vscale x 4 x i8> %0,
869    <vscale x 4 x i16> %1,
870    iXLen %2,
871    <vscale x 4 x i1> %3,
872    iXLen 0, iXLen %4, iXLen 1)
873
874  ret <vscale x 4 x i8> %a
875}
876
877declare <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
878  <vscale x 8 x i8>,
879  <vscale x 8 x i16>,
880  iXLen, iXLen, iXLen);
881
882define <vscale x 8 x i8> @intrinsic_vnclip_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %0, iXLen %1, iXLen %2) nounwind {
883; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i8_nxv8i16:
884; CHECK:       # %bb.0: # %entry
885; CHECK-NEXT:    csrwi vxrm, 0
886; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
887; CHECK-NEXT:    vnclip.wx v10, v8, a0
888; CHECK-NEXT:    vmv.v.v v8, v10
889; CHECK-NEXT:    ret
890entry:
891  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
892    <vscale x 8 x i8> undef,
893    <vscale x 8 x i16> %0,
894    iXLen %1,
895    iXLen 0, iXLen %2)
896
897  ret <vscale x 8 x i8> %a
898}
899
900declare <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16(
901  <vscale x 8 x i8>,
902  <vscale x 8 x i16>,
903  iXLen,
904  <vscale x 8 x i1>,
905  iXLen, iXLen, iXLen);
906
907define <vscale x 8 x i8> @intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
908; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16:
909; CHECK:       # %bb.0: # %entry
910; CHECK-NEXT:    csrwi vxrm, 0
911; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
912; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
913; CHECK-NEXT:    ret
914entry:
915  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16(
916    <vscale x 8 x i8> %0,
917    <vscale x 8 x i16> %1,
918    iXLen %2,
919    <vscale x 8 x i1> %3,
920    iXLen 0, iXLen %4, iXLen 1)
921
922  ret <vscale x 8 x i8> %a
923}
924
925declare <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
926  <vscale x 16 x i8>,
927  <vscale x 16 x i16>,
928  iXLen, iXLen, iXLen);
929
930define <vscale x 16 x i8> @intrinsic_vnclip_vx_nxv16i8_nxv16i16(<vscale x 16 x i16> %0, iXLen %1, iXLen %2) nounwind {
931; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i8_nxv16i16:
932; CHECK:       # %bb.0: # %entry
933; CHECK-NEXT:    csrwi vxrm, 0
934; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
935; CHECK-NEXT:    vnclip.wx v12, v8, a0
936; CHECK-NEXT:    vmv.v.v v8, v12
937; CHECK-NEXT:    ret
938entry:
939  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
940    <vscale x 16 x i8> undef,
941    <vscale x 16 x i16> %0,
942    iXLen %1,
943    iXLen 0, iXLen %2)
944
945  ret <vscale x 16 x i8> %a
946}
947
948declare <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16(
949  <vscale x 16 x i8>,
950  <vscale x 16 x i16>,
951  iXLen,
952  <vscale x 16 x i1>,
953  iXLen, iXLen, iXLen);
954
955define <vscale x 16 x i8> @intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
956; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16:
957; CHECK:       # %bb.0: # %entry
958; CHECK-NEXT:    csrwi vxrm, 0
959; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
960; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
961; CHECK-NEXT:    ret
962entry:
963  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16(
964    <vscale x 16 x i8> %0,
965    <vscale x 16 x i16> %1,
966    iXLen %2,
967    <vscale x 16 x i1> %3,
968    iXLen 0, iXLen %4, iXLen 1)
969
970  ret <vscale x 16 x i8> %a
971}
972
973declare <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
974  <vscale x 32 x i8>,
975  <vscale x 32 x i16>,
976  iXLen, iXLen, iXLen);
977
978define <vscale x 32 x i8> @intrinsic_vnclip_vx_nxv32i8_nxv32i16(<vscale x 32 x i16> %0, iXLen %1, iXLen %2) nounwind {
979; CHECK-LABEL: intrinsic_vnclip_vx_nxv32i8_nxv32i16:
980; CHECK:       # %bb.0: # %entry
981; CHECK-NEXT:    csrwi vxrm, 0
982; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
983; CHECK-NEXT:    vnclip.wx v16, v8, a0
984; CHECK-NEXT:    vmv.v.v v8, v16
985; CHECK-NEXT:    ret
986entry:
987  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
988    <vscale x 32 x i8> undef,
989    <vscale x 32 x i16> %0,
990    iXLen %1,
991    iXLen 0, iXLen %2)
992
993  ret <vscale x 32 x i8> %a
994}
995
996declare <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16(
997  <vscale x 32 x i8>,
998  <vscale x 32 x i16>,
999  iXLen,
1000  <vscale x 32 x i1>,
1001  iXLen, iXLen, iXLen);
1002
1003define <vscale x 32 x i8> @intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1004; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16:
1005; CHECK:       # %bb.0: # %entry
1006; CHECK-NEXT:    csrwi vxrm, 0
1007; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
1008; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
1009; CHECK-NEXT:    ret
1010entry:
1011  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16(
1012    <vscale x 32 x i8> %0,
1013    <vscale x 32 x i16> %1,
1014    iXLen %2,
1015    <vscale x 32 x i1> %3,
1016    iXLen 0, iXLen %4, iXLen 1)
1017
1018  ret <vscale x 32 x i8> %a
1019}
1020
1021declare <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
1022  <vscale x 1 x i16>,
1023  <vscale x 1 x i32>,
1024  iXLen, iXLen, iXLen);
1025
1026define <vscale x 1 x i16> @intrinsic_vnclip_vx_nxv1i16_nxv1i32(<vscale x 1 x i32> %0, iXLen %1, iXLen %2) nounwind {
1027; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i16_nxv1i32:
1028; CHECK:       # %bb.0: # %entry
1029; CHECK-NEXT:    csrwi vxrm, 0
1030; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1031; CHECK-NEXT:    vnclip.wx v8, v8, a0
1032; CHECK-NEXT:    ret
1033entry:
1034  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
1035    <vscale x 1 x i16> undef,
1036    <vscale x 1 x i32> %0,
1037    iXLen %1,
1038    iXLen 0, iXLen %2)
1039
1040  ret <vscale x 1 x i16> %a
1041}
1042
1043declare <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32(
1044  <vscale x 1 x i16>,
1045  <vscale x 1 x i32>,
1046  iXLen,
1047  <vscale x 1 x i1>,
1048  iXLen, iXLen, iXLen);
1049
1050define <vscale x 1 x i16> @intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1051; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32:
1052; CHECK:       # %bb.0: # %entry
1053; CHECK-NEXT:    csrwi vxrm, 0
1054; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1055; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
1056; CHECK-NEXT:    ret
1057entry:
1058  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32(
1059    <vscale x 1 x i16> %0,
1060    <vscale x 1 x i32> %1,
1061    iXLen %2,
1062    <vscale x 1 x i1> %3,
1063    iXLen 0, iXLen %4, iXLen 1)
1064
1065  ret <vscale x 1 x i16> %a
1066}
1067
1068declare <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
1069  <vscale x 2 x i16>,
1070  <vscale x 2 x i32>,
1071  iXLen, iXLen, iXLen);
1072
1073define <vscale x 2 x i16> @intrinsic_vnclip_vx_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, iXLen %1, iXLen %2) nounwind {
1074; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i16_nxv2i32:
1075; CHECK:       # %bb.0: # %entry
1076; CHECK-NEXT:    csrwi vxrm, 0
1077; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1078; CHECK-NEXT:    vnclip.wx v8, v8, a0
1079; CHECK-NEXT:    ret
1080entry:
1081  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
1082    <vscale x 2 x i16> undef,
1083    <vscale x 2 x i32> %0,
1084    iXLen %1,
1085    iXLen 0, iXLen %2)
1086
1087  ret <vscale x 2 x i16> %a
1088}
1089
1090declare <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32(
1091  <vscale x 2 x i16>,
1092  <vscale x 2 x i32>,
1093  iXLen,
1094  <vscale x 2 x i1>,
1095  iXLen, iXLen, iXLen);
1096
1097define <vscale x 2 x i16> @intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1098; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32:
1099; CHECK:       # %bb.0: # %entry
1100; CHECK-NEXT:    csrwi vxrm, 0
1101; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1102; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
1103; CHECK-NEXT:    ret
1104entry:
1105  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32(
1106    <vscale x 2 x i16> %0,
1107    <vscale x 2 x i32> %1,
1108    iXLen %2,
1109    <vscale x 2 x i1> %3,
1110    iXLen 0, iXLen %4, iXLen 1)
1111
1112  ret <vscale x 2 x i16> %a
1113}
1114
1115declare <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
1116  <vscale x 4 x i16>,
1117  <vscale x 4 x i32>,
1118  iXLen, iXLen, iXLen);
1119
1120define <vscale x 4 x i16> @intrinsic_vnclip_vx_nxv4i16_nxv4i32(<vscale x 4 x i32> %0, iXLen %1, iXLen %2) nounwind {
1121; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i16_nxv4i32:
1122; CHECK:       # %bb.0: # %entry
1123; CHECK-NEXT:    csrwi vxrm, 0
1124; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1125; CHECK-NEXT:    vnclip.wx v10, v8, a0
1126; CHECK-NEXT:    vmv.v.v v8, v10
1127; CHECK-NEXT:    ret
1128entry:
1129  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
1130    <vscale x 4 x i16> undef,
1131    <vscale x 4 x i32> %0,
1132    iXLen %1,
1133    iXLen 0, iXLen %2)
1134
1135  ret <vscale x 4 x i16> %a
1136}
1137
1138declare <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32(
1139  <vscale x 4 x i16>,
1140  <vscale x 4 x i32>,
1141  iXLen,
1142  <vscale x 4 x i1>,
1143  iXLen, iXLen, iXLen);
1144
1145define <vscale x 4 x i16> @intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1146; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32:
1147; CHECK:       # %bb.0: # %entry
1148; CHECK-NEXT:    csrwi vxrm, 0
1149; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1150; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
1151; CHECK-NEXT:    ret
1152entry:
1153  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32(
1154    <vscale x 4 x i16> %0,
1155    <vscale x 4 x i32> %1,
1156    iXLen %2,
1157    <vscale x 4 x i1> %3,
1158    iXLen 0, iXLen %4, iXLen 1)
1159
1160  ret <vscale x 4 x i16> %a
1161}
1162
1163declare <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
1164  <vscale x 8 x i16>,
1165  <vscale x 8 x i32>,
1166  iXLen, iXLen, iXLen);
1167
1168define <vscale x 8 x i16> @intrinsic_vnclip_vx_nxv8i16_nxv8i32(<vscale x 8 x i32> %0, iXLen %1, iXLen %2) nounwind {
1169; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i16_nxv8i32:
1170; CHECK:       # %bb.0: # %entry
1171; CHECK-NEXT:    csrwi vxrm, 0
1172; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1173; CHECK-NEXT:    vnclip.wx v12, v8, a0
1174; CHECK-NEXT:    vmv.v.v v8, v12
1175; CHECK-NEXT:    ret
1176entry:
1177  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
1178    <vscale x 8 x i16> undef,
1179    <vscale x 8 x i32> %0,
1180    iXLen %1,
1181    iXLen 0, iXLen %2)
1182
1183  ret <vscale x 8 x i16> %a
1184}
1185
1186declare <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32(
1187  <vscale x 8 x i16>,
1188  <vscale x 8 x i32>,
1189  iXLen,
1190  <vscale x 8 x i1>,
1191  iXLen, iXLen, iXLen);
1192
1193define <vscale x 8 x i16> @intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1194; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32:
1195; CHECK:       # %bb.0: # %entry
1196; CHECK-NEXT:    csrwi vxrm, 0
1197; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1198; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
1199; CHECK-NEXT:    ret
1200entry:
1201  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32(
1202    <vscale x 8 x i16> %0,
1203    <vscale x 8 x i32> %1,
1204    iXLen %2,
1205    <vscale x 8 x i1> %3,
1206    iXLen 0, iXLen %4, iXLen 1)
1207
1208  ret <vscale x 8 x i16> %a
1209}
1210
1211declare <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
1212  <vscale x 16 x i16>,
1213  <vscale x 16 x i32>,
1214  iXLen, iXLen, iXLen);
1215
1216define <vscale x 16 x i16> @intrinsic_vnclip_vx_nxv16i16_nxv16i32(<vscale x 16 x i32> %0, iXLen %1, iXLen %2) nounwind {
1217; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i16_nxv16i32:
1218; CHECK:       # %bb.0: # %entry
1219; CHECK-NEXT:    csrwi vxrm, 0
1220; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
1221; CHECK-NEXT:    vnclip.wx v16, v8, a0
1222; CHECK-NEXT:    vmv.v.v v8, v16
1223; CHECK-NEXT:    ret
1224entry:
1225  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
1226    <vscale x 16 x i16> undef,
1227    <vscale x 16 x i32> %0,
1228    iXLen %1,
1229    iXLen 0, iXLen %2)
1230
1231  ret <vscale x 16 x i16> %a
1232}
1233
1234declare <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32(
1235  <vscale x 16 x i16>,
1236  <vscale x 16 x i32>,
1237  iXLen,
1238  <vscale x 16 x i1>,
1239  iXLen, iXLen, iXLen);
1240
1241define <vscale x 16 x i16> @intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1242; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32:
1243; CHECK:       # %bb.0: # %entry
1244; CHECK-NEXT:    csrwi vxrm, 0
1245; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1246; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
1247; CHECK-NEXT:    ret
1248entry:
1249  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32(
1250    <vscale x 16 x i16> %0,
1251    <vscale x 16 x i32> %1,
1252    iXLen %2,
1253    <vscale x 16 x i1> %3,
1254    iXLen 0, iXLen %4, iXLen 1)
1255
1256  ret <vscale x 16 x i16> %a
1257}
1258
1259declare <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
1260  <vscale x 1 x i32>,
1261  <vscale x 1 x i64>,
1262  iXLen, iXLen, iXLen);
1263
1264define <vscale x 1 x i32> @intrinsic_vnclip_vx_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, iXLen %1, iXLen %2) nounwind {
1265; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i32_nxv1i64:
1266; CHECK:       # %bb.0: # %entry
1267; CHECK-NEXT:    csrwi vxrm, 0
1268; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1269; CHECK-NEXT:    vnclip.wx v8, v8, a0
1270; CHECK-NEXT:    ret
1271entry:
1272  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
1273    <vscale x 1 x i32> undef,
1274    <vscale x 1 x i64> %0,
1275    iXLen %1,
1276    iXLen 0, iXLen %2)
1277
1278  ret <vscale x 1 x i32> %a
1279}
1280
1281declare <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64(
1282  <vscale x 1 x i32>,
1283  <vscale x 1 x i64>,
1284  iXLen,
1285  <vscale x 1 x i1>,
1286  iXLen, iXLen, iXLen);
1287
1288define <vscale x 1 x i32> @intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1289; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64:
1290; CHECK:       # %bb.0: # %entry
1291; CHECK-NEXT:    csrwi vxrm, 0
1292; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
1293; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
1294; CHECK-NEXT:    ret
1295entry:
1296  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64(
1297    <vscale x 1 x i32> %0,
1298    <vscale x 1 x i64> %1,
1299    iXLen %2,
1300    <vscale x 1 x i1> %3,
1301    iXLen 0, iXLen %4, iXLen 1)
1302
1303  ret <vscale x 1 x i32> %a
1304}
1305
1306declare <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
1307  <vscale x 2 x i32>,
1308  <vscale x 2 x i64>,
1309  iXLen, iXLen, iXLen);
1310
1311define <vscale x 2 x i32> @intrinsic_vnclip_vx_nxv2i32_nxv2i64(<vscale x 2 x i64> %0, iXLen %1, iXLen %2) nounwind {
1312; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i32_nxv2i64:
1313; CHECK:       # %bb.0: # %entry
1314; CHECK-NEXT:    csrwi vxrm, 0
1315; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1316; CHECK-NEXT:    vnclip.wx v10, v8, a0
1317; CHECK-NEXT:    vmv.v.v v8, v10
1318; CHECK-NEXT:    ret
1319entry:
1320  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
1321    <vscale x 2 x i32> undef,
1322    <vscale x 2 x i64> %0,
1323    iXLen %1,
1324    iXLen 0, iXLen %2)
1325
1326  ret <vscale x 2 x i32> %a
1327}
1328
1329declare <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64(
1330  <vscale x 2 x i32>,
1331  <vscale x 2 x i64>,
1332  iXLen,
1333  <vscale x 2 x i1>,
1334  iXLen, iXLen, iXLen);
1335
1336define <vscale x 2 x i32> @intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1337; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64:
1338; CHECK:       # %bb.0: # %entry
1339; CHECK-NEXT:    csrwi vxrm, 0
1340; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1341; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
1342; CHECK-NEXT:    ret
1343entry:
1344  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64(
1345    <vscale x 2 x i32> %0,
1346    <vscale x 2 x i64> %1,
1347    iXLen %2,
1348    <vscale x 2 x i1> %3,
1349    iXLen 0, iXLen %4, iXLen 1)
1350
1351  ret <vscale x 2 x i32> %a
1352}
1353
1354declare <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
1355  <vscale x 4 x i32>,
1356  <vscale x 4 x i64>,
1357  iXLen, iXLen, iXLen);
1358
1359define <vscale x 4 x i32> @intrinsic_vnclip_vx_nxv4i32_nxv4i64(<vscale x 4 x i64> %0, iXLen %1, iXLen %2) nounwind {
1360; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i32_nxv4i64:
1361; CHECK:       # %bb.0: # %entry
1362; CHECK-NEXT:    csrwi vxrm, 0
1363; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1364; CHECK-NEXT:    vnclip.wx v12, v8, a0
1365; CHECK-NEXT:    vmv.v.v v8, v12
1366; CHECK-NEXT:    ret
1367entry:
1368  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
1369    <vscale x 4 x i32> undef,
1370    <vscale x 4 x i64> %0,
1371    iXLen %1,
1372    iXLen 0, iXLen %2)
1373
1374  ret <vscale x 4 x i32> %a
1375}
1376
1377declare <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64(
1378  <vscale x 4 x i32>,
1379  <vscale x 4 x i64>,
1380  iXLen,
1381  <vscale x 4 x i1>,
1382  iXLen, iXLen, iXLen);
1383
1384define <vscale x 4 x i32> @intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1385; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64:
1386; CHECK:       # %bb.0: # %entry
1387; CHECK-NEXT:    csrwi vxrm, 0
1388; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1389; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
1390; CHECK-NEXT:    ret
1391entry:
1392  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64(
1393    <vscale x 4 x i32> %0,
1394    <vscale x 4 x i64> %1,
1395    iXLen %2,
1396    <vscale x 4 x i1> %3,
1397    iXLen 0, iXLen %4, iXLen 1)
1398
1399  ret <vscale x 4 x i32> %a
1400}
1401
1402declare <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
1403  <vscale x 8 x i32>,
1404  <vscale x 8 x i64>,
1405  iXLen, iXLen, iXLen);
1406
1407define <vscale x 8 x i32> @intrinsic_vnclip_vx_nxv8i32_nxv8i64(<vscale x 8 x i64> %0, iXLen %1, iXLen %2) nounwind {
1408; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i32_nxv8i64:
1409; CHECK:       # %bb.0: # %entry
1410; CHECK-NEXT:    csrwi vxrm, 0
1411; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
1412; CHECK-NEXT:    vnclip.wx v16, v8, a0
1413; CHECK-NEXT:    vmv.v.v v8, v16
1414; CHECK-NEXT:    ret
1415entry:
1416  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
1417    <vscale x 8 x i32> undef,
1418    <vscale x 8 x i64> %0,
1419    iXLen %1,
1420    iXLen 0, iXLen %2)
1421
1422  ret <vscale x 8 x i32> %a
1423}
1424
1425declare <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64(
1426  <vscale x 8 x i32>,
1427  <vscale x 8 x i64>,
1428  iXLen,
1429  <vscale x 8 x i1>,
1430  iXLen, iXLen, iXLen);
1431
1432define <vscale x 8 x i32> @intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1433; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64:
1434; CHECK:       # %bb.0: # %entry
1435; CHECK-NEXT:    csrwi vxrm, 0
1436; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1437; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
1438; CHECK-NEXT:    ret
1439entry:
1440  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64(
1441    <vscale x 8 x i32> %0,
1442    <vscale x 8 x i64> %1,
1443    iXLen %2,
1444    <vscale x 8 x i1> %3,
1445    iXLen 0, iXLen %4, iXLen 1)
1446
1447  ret <vscale x 8 x i32> %a
1448}
1449
1450define <vscale x 1 x i8> @intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, iXLen %1) nounwind {
1451; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8:
1452; CHECK:       # %bb.0: # %entry
1453; CHECK-NEXT:    csrwi vxrm, 0
1454; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
1455; CHECK-NEXT:    vnclip.wi v8, v8, 9
1456; CHECK-NEXT:    ret
1457entry:
1458  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
1459    <vscale x 1 x i8> undef,
1460    <vscale x 1 x i16> %0,
1461    iXLen 9,
1462    iXLen 0, iXLen %1)
1463
1464  ret <vscale x 1 x i8> %a
1465}
1466
1467define <vscale x 1 x i8> @intrinsic_vnclip_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1468; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i8_nxv1i16_i8:
1469; CHECK:       # %bb.0: # %entry
1470; CHECK-NEXT:    csrwi vxrm, 0
1471; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
1472; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
1473; CHECK-NEXT:    ret
1474entry:
1475  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16(
1476    <vscale x 1 x i8> %0,
1477    <vscale x 1 x i16> %1,
1478    iXLen 9,
1479    <vscale x 1 x i1> %2,
1480    iXLen 0, iXLen %3, iXLen 1)
1481
1482  ret <vscale x 1 x i8> %a
1483}
1484
1485define <vscale x 2 x i8> @intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, iXLen %1) nounwind {
1486; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8:
1487; CHECK:       # %bb.0: # %entry
1488; CHECK-NEXT:    csrwi vxrm, 0
1489; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
1490; CHECK-NEXT:    vnclip.wi v8, v8, 9
1491; CHECK-NEXT:    ret
1492entry:
1493  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
1494    <vscale x 2 x i8> undef,
1495    <vscale x 2 x i16> %0,
1496    iXLen 9,
1497    iXLen 0, iXLen %1)
1498
1499  ret <vscale x 2 x i8> %a
1500}
1501
1502define <vscale x 2 x i8> @intrinsic_vnclip_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1503; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i8_nxv2i16_i8:
1504; CHECK:       # %bb.0: # %entry
1505; CHECK-NEXT:    csrwi vxrm, 0
1506; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
1507; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
1508; CHECK-NEXT:    ret
1509entry:
1510  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16(
1511    <vscale x 2 x i8> %0,
1512    <vscale x 2 x i16> %1,
1513    iXLen 9,
1514    <vscale x 2 x i1> %2,
1515    iXLen 0, iXLen %3, iXLen 1)
1516
1517  ret <vscale x 2 x i8> %a
1518}
1519
1520define <vscale x 4 x i8> @intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, iXLen %1) nounwind {
1521; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8:
1522; CHECK:       # %bb.0: # %entry
1523; CHECK-NEXT:    csrwi vxrm, 0
1524; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
1525; CHECK-NEXT:    vnclip.wi v8, v8, 9
1526; CHECK-NEXT:    ret
1527entry:
1528  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
1529    <vscale x 4 x i8> undef,
1530    <vscale x 4 x i16> %0,
1531    iXLen 9,
1532    iXLen 0, iXLen %1)
1533
1534  ret <vscale x 4 x i8> %a
1535}
1536
1537define <vscale x 4 x i8> @intrinsic_vnclip_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1538; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i8_nxv4i16_i8:
1539; CHECK:       # %bb.0: # %entry
1540; CHECK-NEXT:    csrwi vxrm, 0
1541; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
1542; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
1543; CHECK-NEXT:    ret
1544entry:
1545  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16(
1546    <vscale x 4 x i8> %0,
1547    <vscale x 4 x i16> %1,
1548    iXLen 9,
1549    <vscale x 4 x i1> %2,
1550    iXLen 0, iXLen %3, iXLen 1)
1551
1552  ret <vscale x 4 x i8> %a
1553}
1554
1555define <vscale x 8 x i8> @intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, iXLen %1) nounwind {
1556; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8:
1557; CHECK:       # %bb.0: # %entry
1558; CHECK-NEXT:    csrwi vxrm, 0
1559; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
1560; CHECK-NEXT:    vnclip.wi v10, v8, 9
1561; CHECK-NEXT:    vmv.v.v v8, v10
1562; CHECK-NEXT:    ret
1563entry:
1564  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
1565    <vscale x 8 x i8> undef,
1566    <vscale x 8 x i16> %0,
1567    iXLen 9,
1568    iXLen 0, iXLen %1)
1569
1570  ret <vscale x 8 x i8> %a
1571}
1572
1573define <vscale x 8 x i8> @intrinsic_vnclip_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1574; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i8_nxv8i16_i8:
1575; CHECK:       # %bb.0: # %entry
1576; CHECK-NEXT:    csrwi vxrm, 0
1577; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
1578; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
1579; CHECK-NEXT:    ret
1580entry:
1581  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16(
1582    <vscale x 8 x i8> %0,
1583    <vscale x 8 x i16> %1,
1584    iXLen 9,
1585    <vscale x 8 x i1> %2,
1586    iXLen 0, iXLen %3, iXLen 1)
1587
1588  ret <vscale x 8 x i8> %a
1589}
1590
1591define <vscale x 16 x i8> @intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, iXLen %1) nounwind {
1592; CHECK-LABEL: intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8:
1593; CHECK:       # %bb.0: # %entry
1594; CHECK-NEXT:    csrwi vxrm, 0
1595; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
1596; CHECK-NEXT:    vnclip.wi v12, v8, 9
1597; CHECK-NEXT:    vmv.v.v v8, v12
1598; CHECK-NEXT:    ret
1599entry:
1600  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
1601    <vscale x 16 x i8> undef,
1602    <vscale x 16 x i16> %0,
1603    iXLen 9,
1604    iXLen 0, iXLen %1)
1605
1606  ret <vscale x 16 x i8> %a
1607}
1608
1609define <vscale x 16 x i8> @intrinsic_vnclip_mask_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1610; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv16i8_nxv16i16_i8:
1611; CHECK:       # %bb.0: # %entry
1612; CHECK-NEXT:    csrwi vxrm, 0
1613; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
1614; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
1615; CHECK-NEXT:    ret
1616entry:
1617  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16(
1618    <vscale x 16 x i8> %0,
1619    <vscale x 16 x i16> %1,
1620    iXLen 9,
1621    <vscale x 16 x i1> %2,
1622    iXLen 0, iXLen %3, iXLen 1)
1623
1624  ret <vscale x 16 x i8> %a
1625}
1626
1627define <vscale x 32 x i8> @intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, iXLen %1) nounwind {
1628; CHECK-LABEL: intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8:
1629; CHECK:       # %bb.0: # %entry
1630; CHECK-NEXT:    csrwi vxrm, 0
1631; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
1632; CHECK-NEXT:    vnclip.wi v16, v8, 9
1633; CHECK-NEXT:    vmv.v.v v8, v16
1634; CHECK-NEXT:    ret
1635entry:
1636  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
1637    <vscale x 32 x i8> undef,
1638    <vscale x 32 x i16> %0,
1639    iXLen 9,
1640    iXLen 0, iXLen %1)
1641
1642  ret <vscale x 32 x i8> %a
1643}
1644
1645define <vscale x 32 x i8> @intrinsic_vnclip_mask_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1646; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv32i8_nxv32i16_i8:
1647; CHECK:       # %bb.0: # %entry
1648; CHECK-NEXT:    csrwi vxrm, 0
1649; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
1650; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
1651; CHECK-NEXT:    ret
1652entry:
1653  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16(
1654    <vscale x 32 x i8> %0,
1655    <vscale x 32 x i16> %1,
1656    iXLen 9,
1657    <vscale x 32 x i1> %2,
1658    iXLen 0, iXLen %3, iXLen 1)
1659
1660  ret <vscale x 32 x i8> %a
1661}
1662
1663define <vscale x 1 x i16> @intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, iXLen %1) nounwind {
1664; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16:
1665; CHECK:       # %bb.0: # %entry
1666; CHECK-NEXT:    csrwi vxrm, 0
1667; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
1668; CHECK-NEXT:    vnclip.wi v8, v8, 9
1669; CHECK-NEXT:    ret
1670entry:
1671  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
1672    <vscale x 1 x i16> undef,
1673    <vscale x 1 x i32> %0,
1674    iXLen 9,
1675    iXLen 0, iXLen %1)
1676
1677  ret <vscale x 1 x i16> %a
1678}
1679
1680define <vscale x 1 x i16> @intrinsic_vnclip_mask_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1681; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i16_nxv1i32_i16:
1682; CHECK:       # %bb.0: # %entry
1683; CHECK-NEXT:    csrwi vxrm, 0
1684; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
1685; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
1686; CHECK-NEXT:    ret
1687entry:
1688  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32(
1689    <vscale x 1 x i16> %0,
1690    <vscale x 1 x i32> %1,
1691    iXLen 9,
1692    <vscale x 1 x i1> %2,
1693    iXLen 0, iXLen %3, iXLen 1)
1694
1695  ret <vscale x 1 x i16> %a
1696}
1697
1698define <vscale x 2 x i16> @intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, iXLen %1) nounwind {
1699; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16:
1700; CHECK:       # %bb.0: # %entry
1701; CHECK-NEXT:    csrwi vxrm, 0
1702; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
1703; CHECK-NEXT:    vnclip.wi v8, v8, 9
1704; CHECK-NEXT:    ret
1705entry:
1706  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
1707    <vscale x 2 x i16> undef,
1708    <vscale x 2 x i32> %0,
1709    iXLen 9,
1710    iXLen 0, iXLen %1)
1711
1712  ret <vscale x 2 x i16> %a
1713}
1714
1715define <vscale x 2 x i16> @intrinsic_vnclip_mask_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1716; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i16_nxv2i32_i16:
1717; CHECK:       # %bb.0: # %entry
1718; CHECK-NEXT:    csrwi vxrm, 0
1719; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
1720; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
1721; CHECK-NEXT:    ret
1722entry:
1723  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32(
1724    <vscale x 2 x i16> %0,
1725    <vscale x 2 x i32> %1,
1726    iXLen 9,
1727    <vscale x 2 x i1> %2,
1728    iXLen 0, iXLen %3, iXLen 1)
1729
1730  ret <vscale x 2 x i16> %a
1731}
1732
1733define <vscale x 4 x i16> @intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, iXLen %1) nounwind {
1734; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16:
1735; CHECK:       # %bb.0: # %entry
1736; CHECK-NEXT:    csrwi vxrm, 0
1737; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
1738; CHECK-NEXT:    vnclip.wi v10, v8, 9
1739; CHECK-NEXT:    vmv.v.v v8, v10
1740; CHECK-NEXT:    ret
1741entry:
1742  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
1743    <vscale x 4 x i16> undef,
1744    <vscale x 4 x i32> %0,
1745    iXLen 9,
1746    iXLen 0, iXLen %1)
1747
1748  ret <vscale x 4 x i16> %a
1749}
1750
1751define <vscale x 4 x i16> @intrinsic_vnclip_mask_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1752; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i16_nxv4i32_i16:
1753; CHECK:       # %bb.0: # %entry
1754; CHECK-NEXT:    csrwi vxrm, 0
1755; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
1756; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
1757; CHECK-NEXT:    ret
1758entry:
1759  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32(
1760    <vscale x 4 x i16> %0,
1761    <vscale x 4 x i32> %1,
1762    iXLen 9,
1763    <vscale x 4 x i1> %2,
1764    iXLen 0, iXLen %3, iXLen 1)
1765
1766  ret <vscale x 4 x i16> %a
1767}
1768
1769define <vscale x 8 x i16> @intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, iXLen %1) nounwind {
1770; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16:
1771; CHECK:       # %bb.0: # %entry
1772; CHECK-NEXT:    csrwi vxrm, 0
1773; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
1774; CHECK-NEXT:    vnclip.wi v12, v8, 9
1775; CHECK-NEXT:    vmv.v.v v8, v12
1776; CHECK-NEXT:    ret
1777entry:
1778  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
1779    <vscale x 8 x i16> undef,
1780    <vscale x 8 x i32> %0,
1781    iXLen 9,
1782    iXLen 0, iXLen %1)
1783
1784  ret <vscale x 8 x i16> %a
1785}
1786
1787define <vscale x 8 x i16> @intrinsic_vnclip_mask_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1788; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i16_nxv8i32_i16:
1789; CHECK:       # %bb.0: # %entry
1790; CHECK-NEXT:    csrwi vxrm, 0
1791; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
1792; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
1793; CHECK-NEXT:    ret
1794entry:
1795  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32(
1796    <vscale x 8 x i16> %0,
1797    <vscale x 8 x i32> %1,
1798    iXLen 9,
1799    <vscale x 8 x i1> %2,
1800    iXLen 0, iXLen %3, iXLen 1)
1801
1802  ret <vscale x 8 x i16> %a
1803}
1804
1805define <vscale x 16 x i16> @intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, iXLen %1) nounwind {
1806; CHECK-LABEL: intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16:
1807; CHECK:       # %bb.0: # %entry
1808; CHECK-NEXT:    csrwi vxrm, 0
1809; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
1810; CHECK-NEXT:    vnclip.wi v16, v8, 9
1811; CHECK-NEXT:    vmv.v.v v8, v16
1812; CHECK-NEXT:    ret
1813entry:
1814  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
1815    <vscale x 16 x i16> undef,
1816    <vscale x 16 x i32> %0,
1817    iXLen 9,
1818    iXLen 0, iXLen %1)
1819
1820  ret <vscale x 16 x i16> %a
1821}
1822
1823define <vscale x 16 x i16> @intrinsic_vnclip_mask_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1824; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv16i16_nxv16i32_i16:
1825; CHECK:       # %bb.0: # %entry
1826; CHECK-NEXT:    csrwi vxrm, 0
1827; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
1828; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
1829; CHECK-NEXT:    ret
1830entry:
1831  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32(
1832    <vscale x 16 x i16> %0,
1833    <vscale x 16 x i32> %1,
1834    iXLen 9,
1835    <vscale x 16 x i1> %2,
1836    iXLen 0, iXLen %3, iXLen 1)
1837
1838  ret <vscale x 16 x i16> %a
1839}
1840
1841define <vscale x 1 x i32> @intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, iXLen %1) nounwind {
1842; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32:
1843; CHECK:       # %bb.0: # %entry
1844; CHECK-NEXT:    csrwi vxrm, 0
1845; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
1846; CHECK-NEXT:    vnclip.wi v8, v8, 9
1847; CHECK-NEXT:    ret
1848entry:
1849  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
1850    <vscale x 1 x i32> undef,
1851    <vscale x 1 x i64> %0,
1852    iXLen 9,
1853    iXLen 0, iXLen %1)
1854
1855  ret <vscale x 1 x i32> %a
1856}
1857
1858define <vscale x 1 x i32> @intrinsic_vnclip_mask_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1859; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i32_nxv1i64_i32:
1860; CHECK:       # %bb.0: # %entry
1861; CHECK-NEXT:    csrwi vxrm, 0
1862; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
1863; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
1864; CHECK-NEXT:    ret
1865entry:
1866  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64(
1867    <vscale x 1 x i32> %0,
1868    <vscale x 1 x i64> %1,
1869    iXLen 9,
1870    <vscale x 1 x i1> %2,
1871    iXLen 0, iXLen %3, iXLen 1)
1872
1873  ret <vscale x 1 x i32> %a
1874}
1875
1876define <vscale x 2 x i32> @intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, iXLen %1) nounwind {
1877; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32:
1878; CHECK:       # %bb.0: # %entry
1879; CHECK-NEXT:    csrwi vxrm, 0
1880; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
1881; CHECK-NEXT:    vnclip.wi v10, v8, 9
1882; CHECK-NEXT:    vmv.v.v v8, v10
1883; CHECK-NEXT:    ret
1884entry:
1885  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
1886    <vscale x 2 x i32> undef,
1887    <vscale x 2 x i64> %0,
1888    iXLen 9,
1889    iXLen 0, iXLen %1)
1890
1891  ret <vscale x 2 x i32> %a
1892}
1893
1894define <vscale x 2 x i32> @intrinsic_vnclip_mask_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1895; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i32_nxv2i64_i32:
1896; CHECK:       # %bb.0: # %entry
1897; CHECK-NEXT:    csrwi vxrm, 0
1898; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
1899; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
1900; CHECK-NEXT:    ret
1901entry:
1902  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64(
1903    <vscale x 2 x i32> %0,
1904    <vscale x 2 x i64> %1,
1905    iXLen 9,
1906    <vscale x 2 x i1> %2,
1907    iXLen 0, iXLen %3, iXLen 1)
1908
1909  ret <vscale x 2 x i32> %a
1910}
1911
1912define <vscale x 4 x i32> @intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, iXLen %1) nounwind {
1913; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32:
1914; CHECK:       # %bb.0: # %entry
1915; CHECK-NEXT:    csrwi vxrm, 0
1916; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
1917; CHECK-NEXT:    vnclip.wi v12, v8, 9
1918; CHECK-NEXT:    vmv.v.v v8, v12
1919; CHECK-NEXT:    ret
1920entry:
1921  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
1922    <vscale x 4 x i32> undef,
1923    <vscale x 4 x i64> %0,
1924    iXLen 9,
1925    iXLen 0, iXLen %1)
1926
1927  ret <vscale x 4 x i32> %a
1928}
1929
1930define <vscale x 4 x i32> @intrinsic_vnclip_mask_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1931; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i32_nxv4i64_i32:
1932; CHECK:       # %bb.0: # %entry
1933; CHECK-NEXT:    csrwi vxrm, 0
1934; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
1935; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
1936; CHECK-NEXT:    ret
1937entry:
1938  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64(
1939    <vscale x 4 x i32> %0,
1940    <vscale x 4 x i64> %1,
1941    iXLen 9,
1942    <vscale x 4 x i1> %2,
1943    iXLen 0, iXLen %3, iXLen 1)
1944
1945  ret <vscale x 4 x i32> %a
1946}
1947
1948define <vscale x 8 x i32> @intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, iXLen %1) nounwind {
1949; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32:
1950; CHECK:       # %bb.0: # %entry
1951; CHECK-NEXT:    csrwi vxrm, 0
1952; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
1953; CHECK-NEXT:    vnclip.wi v16, v8, 9
1954; CHECK-NEXT:    vmv.v.v v8, v16
1955; CHECK-NEXT:    ret
1956entry:
1957  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
1958    <vscale x 8 x i32> undef,
1959    <vscale x 8 x i64> %0,
1960    iXLen 9,
1961    iXLen 0, iXLen %1)
1962
1963  ret <vscale x 8 x i32> %a
1964}
1965
1966define <vscale x 8 x i32> @intrinsic_vnclip_mask_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1967; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i32_nxv8i64_i32:
1968; CHECK:       # %bb.0: # %entry
1969; CHECK-NEXT:    csrwi vxrm, 0
1970; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
1971; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
1972; CHECK-NEXT:    ret
1973entry:
1974  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64(
1975    <vscale x 8 x i32> %0,
1976    <vscale x 8 x i64> %1,
1977    iXLen 9,
1978    <vscale x 8 x i1> %2,
1979    iXLen 0, iXLen %3, iXLen 1)
1980
1981  ret <vscale x 8 x i32> %a
1982}
1983