xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vwsubu.w.ll (revision f2bdc29f3e5dd4d8d65081094f8afc789d58706a)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3; RUN:   -verify-machineinstrs | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5; RUN:   -verify-machineinstrs | FileCheck %s
6
7declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
8  <vscale x 1 x i16>,
9  <vscale x 1 x i16>,
10  <vscale x 1 x i8>,
11  iXLen);
12
13define <vscale x 1 x i16> @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
14; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
17; CHECK-NEXT:    vwsubu.wv v8, v8, v9
18; CHECK-NEXT:    ret
19entry:
20  %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
21    <vscale x 1 x i16> undef,
22    <vscale x 1 x i16> %0,
23    <vscale x 1 x i8> %1,
24    iXLen %2)
25
26  ret <vscale x 1 x i16> %a
27}
28
29declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8(
30  <vscale x 1 x i16>,
31  <vscale x 1 x i16>,
32  <vscale x 1 x i8>,
33  <vscale x 1 x i1>,
34  iXLen,
35  iXLen);
36
37define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
38; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8:
39; CHECK:       # %bb.0: # %entry
40; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
41; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
42; CHECK-NEXT:    ret
43entry:
44  %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8(
45    <vscale x 1 x i16> %0,
46    <vscale x 1 x i16> %1,
47    <vscale x 1 x i8> %2,
48    <vscale x 1 x i1> %3,
49    iXLen %4, iXLen 1)
50
51  ret <vscale x 1 x i16> %a
52}
53
54declare <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8(
55  <vscale x 2 x i16>,
56  <vscale x 2 x i16>,
57  <vscale x 2 x i8>,
58  iXLen);
59
60define <vscale x 2 x i16> @intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
61; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8:
62; CHECK:       # %bb.0: # %entry
63; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
64; CHECK-NEXT:    vwsubu.wv v8, v8, v9
65; CHECK-NEXT:    ret
66entry:
67  %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8(
68    <vscale x 2 x i16> undef,
69    <vscale x 2 x i16> %0,
70    <vscale x 2 x i8> %1,
71    iXLen %2)
72
73  ret <vscale x 2 x i16> %a
74}
75
76declare <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8(
77  <vscale x 2 x i16>,
78  <vscale x 2 x i16>,
79  <vscale x 2 x i8>,
80  <vscale x 2 x i1>,
81  iXLen,
82  iXLen);
83
84define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
85; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8:
86; CHECK:       # %bb.0: # %entry
87; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
88; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
89; CHECK-NEXT:    ret
90entry:
91  %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8(
92    <vscale x 2 x i16> %0,
93    <vscale x 2 x i16> %1,
94    <vscale x 2 x i8> %2,
95    <vscale x 2 x i1> %3,
96    iXLen %4, iXLen 1)
97
98  ret <vscale x 2 x i16> %a
99}
100
101declare <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8(
102  <vscale x 4 x i16>,
103  <vscale x 4 x i16>,
104  <vscale x 4 x i8>,
105  iXLen);
106
107define <vscale x 4 x i16> @intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
108; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8:
109; CHECK:       # %bb.0: # %entry
110; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
111; CHECK-NEXT:    vwsubu.wv v8, v8, v9
112; CHECK-NEXT:    ret
113entry:
114  %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8(
115    <vscale x 4 x i16> undef,
116    <vscale x 4 x i16> %0,
117    <vscale x 4 x i8> %1,
118    iXLen %2)
119
120  ret <vscale x 4 x i16> %a
121}
122
123declare <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8(
124  <vscale x 4 x i16>,
125  <vscale x 4 x i16>,
126  <vscale x 4 x i8>,
127  <vscale x 4 x i1>,
128  iXLen,
129  iXLen);
130
131define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
132; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8:
133; CHECK:       # %bb.0: # %entry
134; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
135; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
136; CHECK-NEXT:    ret
137entry:
138  %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8(
139    <vscale x 4 x i16> %0,
140    <vscale x 4 x i16> %1,
141    <vscale x 4 x i8> %2,
142    <vscale x 4 x i1> %3,
143    iXLen %4, iXLen 1)
144
145  ret <vscale x 4 x i16> %a
146}
147
148declare <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8(
149  <vscale x 8 x i16>,
150  <vscale x 8 x i16>,
151  <vscale x 8 x i8>,
152  iXLen);
153
154define <vscale x 8 x i16> @intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
155; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8:
156; CHECK:       # %bb.0: # %entry
157; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
158; CHECK-NEXT:    vwsubu.wv v8, v8, v10
159; CHECK-NEXT:    ret
160entry:
161  %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8(
162    <vscale x 8 x i16> undef,
163    <vscale x 8 x i16> %0,
164    <vscale x 8 x i8> %1,
165    iXLen %2)
166
167  ret <vscale x 8 x i16> %a
168}
169
170declare <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8(
171  <vscale x 8 x i16>,
172  <vscale x 8 x i16>,
173  <vscale x 8 x i8>,
174  <vscale x 8 x i1>,
175  iXLen,
176  iXLen);
177
178define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
179; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8:
180; CHECK:       # %bb.0: # %entry
181; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
182; CHECK-NEXT:    vwsubu.wv v8, v10, v12, v0.t
183; CHECK-NEXT:    ret
184entry:
185  %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8(
186    <vscale x 8 x i16> %0,
187    <vscale x 8 x i16> %1,
188    <vscale x 8 x i8> %2,
189    <vscale x 8 x i1> %3,
190    iXLen %4, iXLen 1)
191
192  ret <vscale x 8 x i16> %a
193}
194
195declare <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8(
196  <vscale x 16 x i16>,
197  <vscale x 16 x i16>,
198  <vscale x 16 x i8>,
199  iXLen);
200
201define <vscale x 16 x i16> @intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
202; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8:
203; CHECK:       # %bb.0: # %entry
204; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
205; CHECK-NEXT:    vwsubu.wv v8, v8, v12
206; CHECK-NEXT:    ret
207entry:
208  %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8(
209    <vscale x 16 x i16> undef,
210    <vscale x 16 x i16> %0,
211    <vscale x 16 x i8> %1,
212    iXLen %2)
213
214  ret <vscale x 16 x i16> %a
215}
216
217declare <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8(
218  <vscale x 16 x i16>,
219  <vscale x 16 x i16>,
220  <vscale x 16 x i8>,
221  <vscale x 16 x i1>,
222  iXLen,
223  iXLen);
224
225define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
226; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8:
227; CHECK:       # %bb.0: # %entry
228; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
229; CHECK-NEXT:    vwsubu.wv v8, v12, v16, v0.t
230; CHECK-NEXT:    ret
231entry:
232  %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8(
233    <vscale x 16 x i16> %0,
234    <vscale x 16 x i16> %1,
235    <vscale x 16 x i8> %2,
236    <vscale x 16 x i1> %3,
237    iXLen %4, iXLen 1)
238
239  ret <vscale x 16 x i16> %a
240}
241
242declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8(
243  <vscale x 32 x i16>,
244  <vscale x 32 x i16>,
245  <vscale x 32 x i8>,
246  iXLen);
247
248define <vscale x 32 x i16> @intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
249; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8:
250; CHECK:       # %bb.0: # %entry
251; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
252; CHECK-NEXT:    vwsubu.wv v8, v8, v16
253; CHECK-NEXT:    ret
254entry:
255  %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8(
256    <vscale x 32 x i16> undef,
257    <vscale x 32 x i16> %0,
258    <vscale x 32 x i8> %1,
259    iXLen %2)
260
261  ret <vscale x 32 x i16> %a
262}
263
264declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8(
265  <vscale x 32 x i16>,
266  <vscale x 32 x i16>,
267  <vscale x 32 x i8>,
268  <vscale x 32 x i1>,
269  iXLen,
270  iXLen);
271
272define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
273; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
274; CHECK:       # %bb.0: # %entry
275; CHECK-NEXT:    vl4r.v v24, (a0)
276; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
277; CHECK-NEXT:    vwsubu.wv v8, v16, v24, v0.t
278; CHECK-NEXT:    ret
279entry:
280  %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8(
281    <vscale x 32 x i16> %0,
282    <vscale x 32 x i16> %1,
283    <vscale x 32 x i8> %2,
284    <vscale x 32 x i1> %3,
285    iXLen %4, iXLen 1)
286
287  ret <vscale x 32 x i16> %a
288}
289
290declare <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16(
291  <vscale x 1 x i32>,
292  <vscale x 1 x i32>,
293  <vscale x 1 x i16>,
294  iXLen);
295
296define <vscale x 1 x i32> @intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
297; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16:
298; CHECK:       # %bb.0: # %entry
299; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
300; CHECK-NEXT:    vwsubu.wv v8, v8, v9
301; CHECK-NEXT:    ret
302entry:
303  %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16(
304    <vscale x 1 x i32> undef,
305    <vscale x 1 x i32> %0,
306    <vscale x 1 x i16> %1,
307    iXLen %2)
308
309  ret <vscale x 1 x i32> %a
310}
311
312declare <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16(
313  <vscale x 1 x i32>,
314  <vscale x 1 x i32>,
315  <vscale x 1 x i16>,
316  <vscale x 1 x i1>,
317  iXLen,
318  iXLen);
319
320define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
321; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16:
322; CHECK:       # %bb.0: # %entry
323; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
324; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
325; CHECK-NEXT:    ret
326entry:
327  %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16(
328    <vscale x 1 x i32> %0,
329    <vscale x 1 x i32> %1,
330    <vscale x 1 x i16> %2,
331    <vscale x 1 x i1> %3,
332    iXLen %4, iXLen 1)
333
334  ret <vscale x 1 x i32> %a
335}
336
337declare <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16(
338  <vscale x 2 x i32>,
339  <vscale x 2 x i32>,
340  <vscale x 2 x i16>,
341  iXLen);
342
343define <vscale x 2 x i32> @intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
344; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16:
345; CHECK:       # %bb.0: # %entry
346; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
347; CHECK-NEXT:    vwsubu.wv v8, v8, v9
348; CHECK-NEXT:    ret
349entry:
350  %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16(
351    <vscale x 2 x i32> undef,
352    <vscale x 2 x i32> %0,
353    <vscale x 2 x i16> %1,
354    iXLen %2)
355
356  ret <vscale x 2 x i32> %a
357}
358
359declare <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16(
360  <vscale x 2 x i32>,
361  <vscale x 2 x i32>,
362  <vscale x 2 x i16>,
363  <vscale x 2 x i1>,
364  iXLen,
365  iXLen);
366
367define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
368; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16:
369; CHECK:       # %bb.0: # %entry
370; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
371; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
372; CHECK-NEXT:    ret
373entry:
374  %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16(
375    <vscale x 2 x i32> %0,
376    <vscale x 2 x i32> %1,
377    <vscale x 2 x i16> %2,
378    <vscale x 2 x i1> %3,
379    iXLen %4, iXLen 1)
380
381  ret <vscale x 2 x i32> %a
382}
383
384declare <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16(
385  <vscale x 4 x i32>,
386  <vscale x 4 x i32>,
387  <vscale x 4 x i16>,
388  iXLen);
389
390define <vscale x 4 x i32> @intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
391; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16:
392; CHECK:       # %bb.0: # %entry
393; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
394; CHECK-NEXT:    vwsubu.wv v8, v8, v10
395; CHECK-NEXT:    ret
396entry:
397  %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16(
398    <vscale x 4 x i32> undef,
399    <vscale x 4 x i32> %0,
400    <vscale x 4 x i16> %1,
401    iXLen %2)
402
403  ret <vscale x 4 x i32> %a
404}
405
406declare <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16(
407  <vscale x 4 x i32>,
408  <vscale x 4 x i32>,
409  <vscale x 4 x i16>,
410  <vscale x 4 x i1>,
411  iXLen,
412  iXLen);
413
414define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
415; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16:
416; CHECK:       # %bb.0: # %entry
417; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
418; CHECK-NEXT:    vwsubu.wv v8, v10, v12, v0.t
419; CHECK-NEXT:    ret
420entry:
421  %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16(
422    <vscale x 4 x i32> %0,
423    <vscale x 4 x i32> %1,
424    <vscale x 4 x i16> %2,
425    <vscale x 4 x i1> %3,
426    iXLen %4, iXLen 1)
427
428  ret <vscale x 4 x i32> %a
429}
430
431declare <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16(
432  <vscale x 8 x i32>,
433  <vscale x 8 x i32>,
434  <vscale x 8 x i16>,
435  iXLen);
436
437define <vscale x 8 x i32> @intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
438; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16:
439; CHECK:       # %bb.0: # %entry
440; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
441; CHECK-NEXT:    vwsubu.wv v8, v8, v12
442; CHECK-NEXT:    ret
443entry:
444  %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16(
445    <vscale x 8 x i32> undef,
446    <vscale x 8 x i32> %0,
447    <vscale x 8 x i16> %1,
448    iXLen %2)
449
450  ret <vscale x 8 x i32> %a
451}
452
453declare <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16(
454  <vscale x 8 x i32>,
455  <vscale x 8 x i32>,
456  <vscale x 8 x i16>,
457  <vscale x 8 x i1>,
458  iXLen,
459  iXLen);
460
461define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
462; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16:
463; CHECK:       # %bb.0: # %entry
464; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
465; CHECK-NEXT:    vwsubu.wv v8, v12, v16, v0.t
466; CHECK-NEXT:    ret
467entry:
468  %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16(
469    <vscale x 8 x i32> %0,
470    <vscale x 8 x i32> %1,
471    <vscale x 8 x i16> %2,
472    <vscale x 8 x i1> %3,
473    iXLen %4, iXLen 1)
474
475  ret <vscale x 8 x i32> %a
476}
477
478declare <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16(
479  <vscale x 16 x i32>,
480  <vscale x 16 x i32>,
481  <vscale x 16 x i16>,
482  iXLen);
483
484define <vscale x 16 x i32> @intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
485; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16:
486; CHECK:       # %bb.0: # %entry
487; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
488; CHECK-NEXT:    vwsubu.wv v8, v8, v16
489; CHECK-NEXT:    ret
490entry:
491  %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16(
492    <vscale x 16 x i32> undef,
493    <vscale x 16 x i32> %0,
494    <vscale x 16 x i16> %1,
495    iXLen %2)
496
497  ret <vscale x 16 x i32> %a
498}
499
500declare <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16(
501  <vscale x 16 x i32>,
502  <vscale x 16 x i32>,
503  <vscale x 16 x i16>,
504  <vscale x 16 x i1>,
505  iXLen,
506  iXLen);
507
508define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
509; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16:
510; CHECK:       # %bb.0: # %entry
511; CHECK-NEXT:    vl4re16.v v24, (a0)
512; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
513; CHECK-NEXT:    vwsubu.wv v8, v16, v24, v0.t
514; CHECK-NEXT:    ret
515entry:
516  %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16(
517    <vscale x 16 x i32> %0,
518    <vscale x 16 x i32> %1,
519    <vscale x 16 x i16> %2,
520    <vscale x 16 x i1> %3,
521    iXLen %4, iXLen 1)
522
523  ret <vscale x 16 x i32> %a
524}
525
526declare <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32(
527  <vscale x 1 x i64>,
528  <vscale x 1 x i64>,
529  <vscale x 1 x i32>,
530  iXLen);
531
532define <vscale x 1 x i64> @intrinsic_vwsubu.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
533; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i64_nxv1i64_nxv1i32:
534; CHECK:       # %bb.0: # %entry
535; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
536; CHECK-NEXT:    vwsubu.wv v8, v8, v9
537; CHECK-NEXT:    ret
538entry:
539  %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32(
540    <vscale x 1 x i64> undef,
541    <vscale x 1 x i64> %0,
542    <vscale x 1 x i32> %1,
543    iXLen %2)
544
545  ret <vscale x 1 x i64> %a
546}
547
548declare <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32(
549  <vscale x 1 x i64>,
550  <vscale x 1 x i64>,
551  <vscale x 1 x i32>,
552  <vscale x 1 x i1>,
553  iXLen,
554  iXLen);
555
556define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
557; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i64_nxv1i64_nxv1i32:
558; CHECK:       # %bb.0: # %entry
559; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
560; CHECK-NEXT:    vwsubu.wv v8, v9, v10, v0.t
561; CHECK-NEXT:    ret
562entry:
563  %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32(
564    <vscale x 1 x i64> %0,
565    <vscale x 1 x i64> %1,
566    <vscale x 1 x i32> %2,
567    <vscale x 1 x i1> %3,
568    iXLen %4, iXLen 1)
569
570  ret <vscale x 1 x i64> %a
571}
572
573declare <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32(
574  <vscale x 2 x i64>,
575  <vscale x 2 x i64>,
576  <vscale x 2 x i32>,
577  iXLen);
578
579define <vscale x 2 x i64> @intrinsic_vwsubu.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
580; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i64_nxv2i64_nxv2i32:
581; CHECK:       # %bb.0: # %entry
582; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
583; CHECK-NEXT:    vwsubu.wv v8, v8, v10
584; CHECK-NEXT:    ret
585entry:
586  %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32(
587    <vscale x 2 x i64> undef,
588    <vscale x 2 x i64> %0,
589    <vscale x 2 x i32> %1,
590    iXLen %2)
591
592  ret <vscale x 2 x i64> %a
593}
594
595declare <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32(
596  <vscale x 2 x i64>,
597  <vscale x 2 x i64>,
598  <vscale x 2 x i32>,
599  <vscale x 2 x i1>,
600  iXLen,
601  iXLen);
602
603define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
604; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i64_nxv2i64_nxv2i32:
605; CHECK:       # %bb.0: # %entry
606; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
607; CHECK-NEXT:    vwsubu.wv v8, v10, v12, v0.t
608; CHECK-NEXT:    ret
609entry:
610  %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32(
611    <vscale x 2 x i64> %0,
612    <vscale x 2 x i64> %1,
613    <vscale x 2 x i32> %2,
614    <vscale x 2 x i1> %3,
615    iXLen %4, iXLen 1)
616
617  ret <vscale x 2 x i64> %a
618}
619
620declare <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32(
621  <vscale x 4 x i64>,
622  <vscale x 4 x i64>,
623  <vscale x 4 x i32>,
624  iXLen);
625
626define <vscale x 4 x i64> @intrinsic_vwsubu.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
627; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i64_nxv4i64_nxv4i32:
628; CHECK:       # %bb.0: # %entry
629; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
630; CHECK-NEXT:    vwsubu.wv v8, v8, v12
631; CHECK-NEXT:    ret
632entry:
633  %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32(
634    <vscale x 4 x i64> undef,
635    <vscale x 4 x i64> %0,
636    <vscale x 4 x i32> %1,
637    iXLen %2)
638
639  ret <vscale x 4 x i64> %a
640}
641
642declare <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32(
643  <vscale x 4 x i64>,
644  <vscale x 4 x i64>,
645  <vscale x 4 x i32>,
646  <vscale x 4 x i1>,
647  iXLen,
648  iXLen);
649
650define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
651; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i64_nxv4i64_nxv4i32:
652; CHECK:       # %bb.0: # %entry
653; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
654; CHECK-NEXT:    vwsubu.wv v8, v12, v16, v0.t
655; CHECK-NEXT:    ret
656entry:
657  %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32(
658    <vscale x 4 x i64> %0,
659    <vscale x 4 x i64> %1,
660    <vscale x 4 x i32> %2,
661    <vscale x 4 x i1> %3,
662    iXLen %4, iXLen 1)
663
664  ret <vscale x 4 x i64> %a
665}
666
667declare <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32(
668  <vscale x 8 x i64>,
669  <vscale x 8 x i64>,
670  <vscale x 8 x i32>,
671  iXLen);
672
673define <vscale x 8 x i64> @intrinsic_vwsubu.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
674; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i64_nxv8i64_nxv8i32:
675; CHECK:       # %bb.0: # %entry
676; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
677; CHECK-NEXT:    vwsubu.wv v8, v8, v16
678; CHECK-NEXT:    ret
679entry:
680  %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32(
681    <vscale x 8 x i64> undef,
682    <vscale x 8 x i64> %0,
683    <vscale x 8 x i32> %1,
684    iXLen %2)
685
686  ret <vscale x 8 x i64> %a
687}
688
689declare <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32(
690  <vscale x 8 x i64>,
691  <vscale x 8 x i64>,
692  <vscale x 8 x i32>,
693  <vscale x 8 x i1>,
694  iXLen,
695  iXLen);
696
697define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
698; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32:
699; CHECK:       # %bb.0: # %entry
700; CHECK-NEXT:    vl4re32.v v24, (a0)
701; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
702; CHECK-NEXT:    vwsubu.wv v8, v16, v24, v0.t
703; CHECK-NEXT:    ret
704entry:
705  %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32(
706    <vscale x 8 x i64> %0,
707    <vscale x 8 x i64> %1,
708    <vscale x 8 x i32> %2,
709    <vscale x 8 x i1> %3,
710    iXLen %4, iXLen 1)
711
712  ret <vscale x 8 x i64> %a
713}
714
715declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8(
716  <vscale x 1 x i16>,
717  <vscale x 1 x i16>,
718  i8,
719  iXLen);
720
721define <vscale x 1 x i16> @intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, iXLen %2) nounwind {
722; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8:
723; CHECK:       # %bb.0: # %entry
724; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
725; CHECK-NEXT:    vwsubu.wx v8, v8, a0
726; CHECK-NEXT:    ret
727entry:
728  %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8(
729    <vscale x 1 x i16> undef,
730    <vscale x 1 x i16> %0,
731    i8 %1,
732    iXLen %2)
733
734  ret <vscale x 1 x i16> %a
735}
736
737declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.i8(
738  <vscale x 1 x i16>,
739  <vscale x 1 x i16>,
740  i8,
741  <vscale x 1 x i1>,
742  iXLen,
743  iXLen);
744
745define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
746; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8:
747; CHECK:       # %bb.0: # %entry
748; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
749; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
750; CHECK-NEXT:    ret
751entry:
752  %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.i8(
753    <vscale x 1 x i16> %0,
754    <vscale x 1 x i16> %1,
755    i8 %2,
756    <vscale x 1 x i1> %3,
757    iXLen %4, iXLen 1)
758
759  ret <vscale x 1 x i16> %a
760}
761
762declare <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8(
763  <vscale x 2 x i16>,
764  <vscale x 2 x i16>,
765  i8,
766  iXLen);
767
768define <vscale x 2 x i16> @intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, iXLen %2) nounwind {
769; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8:
770; CHECK:       # %bb.0: # %entry
771; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
772; CHECK-NEXT:    vwsubu.wx v8, v8, a0
773; CHECK-NEXT:    ret
774entry:
775  %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8(
776    <vscale x 2 x i16> undef,
777    <vscale x 2 x i16> %0,
778    i8 %1,
779    iXLen %2)
780
781  ret <vscale x 2 x i16> %a
782}
783
784declare <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.i8(
785  <vscale x 2 x i16>,
786  <vscale x 2 x i16>,
787  i8,
788  <vscale x 2 x i1>,
789  iXLen,
790  iXLen);
791
792define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
793; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8:
794; CHECK:       # %bb.0: # %entry
795; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
796; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
797; CHECK-NEXT:    ret
798entry:
799  %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.i8(
800    <vscale x 2 x i16> %0,
801    <vscale x 2 x i16> %1,
802    i8 %2,
803    <vscale x 2 x i1> %3,
804    iXLen %4, iXLen 1)
805
806  ret <vscale x 2 x i16> %a
807}
808
809declare <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8(
810  <vscale x 4 x i16>,
811  <vscale x 4 x i16>,
812  i8,
813  iXLen);
814
815define <vscale x 4 x i16> @intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, iXLen %2) nounwind {
816; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8:
817; CHECK:       # %bb.0: # %entry
818; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
819; CHECK-NEXT:    vwsubu.wx v8, v8, a0
820; CHECK-NEXT:    ret
821entry:
822  %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8(
823    <vscale x 4 x i16> undef,
824    <vscale x 4 x i16> %0,
825    i8 %1,
826    iXLen %2)
827
828  ret <vscale x 4 x i16> %a
829}
830
831declare <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.i8(
832  <vscale x 4 x i16>,
833  <vscale x 4 x i16>,
834  i8,
835  <vscale x 4 x i1>,
836  iXLen,
837  iXLen);
838
839define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
840; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8:
841; CHECK:       # %bb.0: # %entry
842; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
843; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
844; CHECK-NEXT:    ret
845entry:
846  %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.i8(
847    <vscale x 4 x i16> %0,
848    <vscale x 4 x i16> %1,
849    i8 %2,
850    <vscale x 4 x i1> %3,
851    iXLen %4, iXLen 1)
852
853  ret <vscale x 4 x i16> %a
854}
855
856declare <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8(
857  <vscale x 8 x i16>,
858  <vscale x 8 x i16>,
859  i8,
860  iXLen);
861
862define <vscale x 8 x i16> @intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, iXLen %2) nounwind {
863; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8:
864; CHECK:       # %bb.0: # %entry
865; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
866; CHECK-NEXT:    vwsubu.wx v8, v8, a0
867; CHECK-NEXT:    ret
868entry:
869  %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8(
870    <vscale x 8 x i16> undef,
871    <vscale x 8 x i16> %0,
872    i8 %1,
873    iXLen %2)
874
875  ret <vscale x 8 x i16> %a
876}
877
878declare <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.i8(
879  <vscale x 8 x i16>,
880  <vscale x 8 x i16>,
881  i8,
882  <vscale x 8 x i1>,
883  iXLen,
884  iXLen);
885
886define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
887; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8:
888; CHECK:       # %bb.0: # %entry
889; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
890; CHECK-NEXT:    vwsubu.wx v8, v10, a0, v0.t
891; CHECK-NEXT:    ret
892entry:
893  %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.i8(
894    <vscale x 8 x i16> %0,
895    <vscale x 8 x i16> %1,
896    i8 %2,
897    <vscale x 8 x i1> %3,
898    iXLen %4, iXLen 1)
899
900  ret <vscale x 8 x i16> %a
901}
902
903declare <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8(
904  <vscale x 16 x i16>,
905  <vscale x 16 x i16>,
906  i8,
907  iXLen);
908
909define <vscale x 16 x i16> @intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, iXLen %2) nounwind {
910; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8:
911; CHECK:       # %bb.0: # %entry
912; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
913; CHECK-NEXT:    vwsubu.wx v8, v8, a0
914; CHECK-NEXT:    ret
915entry:
916  %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8(
917    <vscale x 16 x i16> undef,
918    <vscale x 16 x i16> %0,
919    i8 %1,
920    iXLen %2)
921
922  ret <vscale x 16 x i16> %a
923}
924
925declare <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.i8(
926  <vscale x 16 x i16>,
927  <vscale x 16 x i16>,
928  i8,
929  <vscale x 16 x i1>,
930  iXLen,
931  iXLen);
932
933define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
934; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8:
935; CHECK:       # %bb.0: # %entry
936; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
937; CHECK-NEXT:    vwsubu.wx v8, v12, a0, v0.t
938; CHECK-NEXT:    ret
939entry:
940  %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.i8(
941    <vscale x 16 x i16> %0,
942    <vscale x 16 x i16> %1,
943    i8 %2,
944    <vscale x 16 x i1> %3,
945    iXLen %4, iXLen 1)
946
947  ret <vscale x 16 x i16> %a
948}
949
950declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8(
951  <vscale x 32 x i16>,
952  <vscale x 32 x i16>,
953  i8,
954  iXLen);
955
956define <vscale x 32 x i16> @intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, iXLen %2) nounwind {
957; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8:
958; CHECK:       # %bb.0: # %entry
959; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
960; CHECK-NEXT:    vwsubu.wx v8, v8, a0
961; CHECK-NEXT:    ret
962entry:
963  %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8(
964    <vscale x 32 x i16> undef,
965    <vscale x 32 x i16> %0,
966    i8 %1,
967    iXLen %2)
968
969  ret <vscale x 32 x i16> %a
970}
971
972declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.i8(
973  <vscale x 32 x i16>,
974  <vscale x 32 x i16>,
975  i8,
976  <vscale x 32 x i1>,
977  iXLen,
978  iXLen);
979
980define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
981; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8:
982; CHECK:       # %bb.0: # %entry
983; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
984; CHECK-NEXT:    vwsubu.wx v8, v16, a0, v0.t
985; CHECK-NEXT:    ret
986entry:
987  %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.i8(
988    <vscale x 32 x i16> %0,
989    <vscale x 32 x i16> %1,
990    i8 %2,
991    <vscale x 32 x i1> %3,
992    iXLen %4, iXLen 1)
993
994  ret <vscale x 32 x i16> %a
995}
996
997declare <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16(
998  <vscale x 1 x i32>,
999  <vscale x 1 x i32>,
1000  i16,
1001  iXLen);
1002
1003define <vscale x 1 x i32> @intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, iXLen %2) nounwind {
1004; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16:
1005; CHECK:       # %bb.0: # %entry
1006; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1007; CHECK-NEXT:    vwsubu.wx v8, v8, a0
1008; CHECK-NEXT:    ret
1009entry:
1010  %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16(
1011    <vscale x 1 x i32> undef,
1012    <vscale x 1 x i32> %0,
1013    i16 %1,
1014    iXLen %2)
1015
1016  ret <vscale x 1 x i32> %a
1017}
1018
1019declare <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.i16(
1020  <vscale x 1 x i32>,
1021  <vscale x 1 x i32>,
1022  i16,
1023  <vscale x 1 x i1>,
1024  iXLen,
1025  iXLen);
1026
1027define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1028; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16:
1029; CHECK:       # %bb.0: # %entry
1030; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1031; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
1032; CHECK-NEXT:    ret
1033entry:
1034  %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.i16(
1035    <vscale x 1 x i32> %0,
1036    <vscale x 1 x i32> %1,
1037    i16 %2,
1038    <vscale x 1 x i1> %3,
1039    iXLen %4, iXLen 1)
1040
1041  ret <vscale x 1 x i32> %a
1042}
1043
1044declare <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16(
1045  <vscale x 2 x i32>,
1046  <vscale x 2 x i32>,
1047  i16,
1048  iXLen);
1049
1050define <vscale x 2 x i32> @intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, iXLen %2) nounwind {
1051; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16:
1052; CHECK:       # %bb.0: # %entry
1053; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1054; CHECK-NEXT:    vwsubu.wx v8, v8, a0
1055; CHECK-NEXT:    ret
1056entry:
1057  %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16(
1058    <vscale x 2 x i32> undef,
1059    <vscale x 2 x i32> %0,
1060    i16 %1,
1061    iXLen %2)
1062
1063  ret <vscale x 2 x i32> %a
1064}
1065
1066declare <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.i16(
1067  <vscale x 2 x i32>,
1068  <vscale x 2 x i32>,
1069  i16,
1070  <vscale x 2 x i1>,
1071  iXLen,
1072  iXLen);
1073
1074define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1075; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16:
1076; CHECK:       # %bb.0: # %entry
1077; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1078; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
1079; CHECK-NEXT:    ret
1080entry:
1081  %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.i16(
1082    <vscale x 2 x i32> %0,
1083    <vscale x 2 x i32> %1,
1084    i16 %2,
1085    <vscale x 2 x i1> %3,
1086    iXLen %4, iXLen 1)
1087
1088  ret <vscale x 2 x i32> %a
1089}
1090
1091declare <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16(
1092  <vscale x 4 x i32>,
1093  <vscale x 4 x i32>,
1094  i16,
1095  iXLen);
1096
1097define <vscale x 4 x i32> @intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, iXLen %2) nounwind {
1098; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16:
1099; CHECK:       # %bb.0: # %entry
1100; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1101; CHECK-NEXT:    vwsubu.wx v8, v8, a0
1102; CHECK-NEXT:    ret
1103entry:
1104  %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16(
1105    <vscale x 4 x i32> undef,
1106    <vscale x 4 x i32> %0,
1107    i16 %1,
1108    iXLen %2)
1109
1110  ret <vscale x 4 x i32> %a
1111}
1112
1113declare <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.i16(
1114  <vscale x 4 x i32>,
1115  <vscale x 4 x i32>,
1116  i16,
1117  <vscale x 4 x i1>,
1118  iXLen,
1119  iXLen);
1120
1121define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1122; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16:
1123; CHECK:       # %bb.0: # %entry
1124; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1125; CHECK-NEXT:    vwsubu.wx v8, v10, a0, v0.t
1126; CHECK-NEXT:    ret
1127entry:
1128  %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.i16(
1129    <vscale x 4 x i32> %0,
1130    <vscale x 4 x i32> %1,
1131    i16 %2,
1132    <vscale x 4 x i1> %3,
1133    iXLen %4, iXLen 1)
1134
1135  ret <vscale x 4 x i32> %a
1136}
1137
1138declare <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16(
1139  <vscale x 8 x i32>,
1140  <vscale x 8 x i32>,
1141  i16,
1142  iXLen);
1143
1144define <vscale x 8 x i32> @intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, iXLen %2) nounwind {
1145; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16:
1146; CHECK:       # %bb.0: # %entry
1147; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1148; CHECK-NEXT:    vwsubu.wx v8, v8, a0
1149; CHECK-NEXT:    ret
1150entry:
1151  %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16(
1152    <vscale x 8 x i32> undef,
1153    <vscale x 8 x i32> %0,
1154    i16 %1,
1155    iXLen %2)
1156
1157  ret <vscale x 8 x i32> %a
1158}
1159
1160declare <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.i16(
1161  <vscale x 8 x i32>,
1162  <vscale x 8 x i32>,
1163  i16,
1164  <vscale x 8 x i1>,
1165  iXLen,
1166  iXLen);
1167
1168define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1169; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16:
1170; CHECK:       # %bb.0: # %entry
1171; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1172; CHECK-NEXT:    vwsubu.wx v8, v12, a0, v0.t
1173; CHECK-NEXT:    ret
1174entry:
1175  %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.i16(
1176    <vscale x 8 x i32> %0,
1177    <vscale x 8 x i32> %1,
1178    i16 %2,
1179    <vscale x 8 x i1> %3,
1180    iXLen %4, iXLen 1)
1181
1182  ret <vscale x 8 x i32> %a
1183}
1184
1185declare <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16(
1186  <vscale x 16 x i32>,
1187  <vscale x 16 x i32>,
1188  i16,
1189  iXLen);
1190
1191define <vscale x 16 x i32> @intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, iXLen %2) nounwind {
1192; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16:
1193; CHECK:       # %bb.0: # %entry
1194; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
1195; CHECK-NEXT:    vwsubu.wx v8, v8, a0
1196; CHECK-NEXT:    ret
1197entry:
1198  %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16(
1199    <vscale x 16 x i32> undef,
1200    <vscale x 16 x i32> %0,
1201    i16 %1,
1202    iXLen %2)
1203
1204  ret <vscale x 16 x i32> %a
1205}
1206
1207declare <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.i16(
1208  <vscale x 16 x i32>,
1209  <vscale x 16 x i32>,
1210  i16,
1211  <vscale x 16 x i1>,
1212  iXLen,
1213  iXLen);
1214
1215define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1216; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16:
1217; CHECK:       # %bb.0: # %entry
1218; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1219; CHECK-NEXT:    vwsubu.wx v8, v16, a0, v0.t
1220; CHECK-NEXT:    ret
1221entry:
1222  %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.i16(
1223    <vscale x 16 x i32> %0,
1224    <vscale x 16 x i32> %1,
1225    i16 %2,
1226    <vscale x 16 x i1> %3,
1227    iXLen %4, iXLen 1)
1228
1229  ret <vscale x 16 x i32> %a
1230}
1231
1232declare <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32(
1233  <vscale x 1 x i64>,
1234  <vscale x 1 x i64>,
1235  i32,
1236  iXLen);
1237
1238define <vscale x 1 x i64> @intrinsic_vwsubu.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, iXLen %2) nounwind {
1239; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i64_nxv1i64_i32:
1240; CHECK:       # %bb.0: # %entry
1241; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1242; CHECK-NEXT:    vwsubu.wx v8, v8, a0
1243; CHECK-NEXT:    ret
1244entry:
1245  %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.i32(
1246    <vscale x 1 x i64> undef,
1247    <vscale x 1 x i64> %0,
1248    i32 %1,
1249    iXLen %2)
1250
1251  ret <vscale x 1 x i64> %a
1252}
1253
1254declare <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.i32(
1255  <vscale x 1 x i64>,
1256  <vscale x 1 x i64>,
1257  i32,
1258  <vscale x 1 x i1>,
1259  iXLen,
1260  iXLen);
1261
1262define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1263; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i64_nxv1i64_i32:
1264; CHECK:       # %bb.0: # %entry
1265; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
1266; CHECK-NEXT:    vwsubu.wx v8, v9, a0, v0.t
1267; CHECK-NEXT:    ret
1268entry:
1269  %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.i32(
1270    <vscale x 1 x i64> %0,
1271    <vscale x 1 x i64> %1,
1272    i32 %2,
1273    <vscale x 1 x i1> %3,
1274    iXLen %4, iXLen 1)
1275
1276  ret <vscale x 1 x i64> %a
1277}
1278
1279declare <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32(
1280  <vscale x 2 x i64>,
1281  <vscale x 2 x i64>,
1282  i32,
1283  iXLen);
1284
1285define <vscale x 2 x i64> @intrinsic_vwsubu.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, iXLen %2) nounwind {
1286; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i64_nxv2i64_i32:
1287; CHECK:       # %bb.0: # %entry
1288; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1289; CHECK-NEXT:    vwsubu.wx v8, v8, a0
1290; CHECK-NEXT:    ret
1291entry:
1292  %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.i32(
1293    <vscale x 2 x i64> undef,
1294    <vscale x 2 x i64> %0,
1295    i32 %1,
1296    iXLen %2)
1297
1298  ret <vscale x 2 x i64> %a
1299}
1300
1301declare <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.i32(
1302  <vscale x 2 x i64>,
1303  <vscale x 2 x i64>,
1304  i32,
1305  <vscale x 2 x i1>,
1306  iXLen,
1307  iXLen);
1308
1309define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1310; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i64_nxv2i64_i32:
1311; CHECK:       # %bb.0: # %entry
1312; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1313; CHECK-NEXT:    vwsubu.wx v8, v10, a0, v0.t
1314; CHECK-NEXT:    ret
1315entry:
1316  %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.i32(
1317    <vscale x 2 x i64> %0,
1318    <vscale x 2 x i64> %1,
1319    i32 %2,
1320    <vscale x 2 x i1> %3,
1321    iXLen %4, iXLen 1)
1322
1323  ret <vscale x 2 x i64> %a
1324}
1325
1326declare <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32(
1327  <vscale x 4 x i64>,
1328  <vscale x 4 x i64>,
1329  i32,
1330  iXLen);
1331
1332define <vscale x 4 x i64> @intrinsic_vwsubu.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, iXLen %2) nounwind {
1333; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i64_nxv4i64_i32:
1334; CHECK:       # %bb.0: # %entry
1335; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1336; CHECK-NEXT:    vwsubu.wx v8, v8, a0
1337; CHECK-NEXT:    ret
1338entry:
1339  %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.i32(
1340    <vscale x 4 x i64> undef,
1341    <vscale x 4 x i64> %0,
1342    i32 %1,
1343    iXLen %2)
1344
1345  ret <vscale x 4 x i64> %a
1346}
1347
1348declare <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.i32(
1349  <vscale x 4 x i64>,
1350  <vscale x 4 x i64>,
1351  i32,
1352  <vscale x 4 x i1>,
1353  iXLen,
1354  iXLen);
1355
1356define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1357; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i64_nxv4i64_i32:
1358; CHECK:       # %bb.0: # %entry
1359; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1360; CHECK-NEXT:    vwsubu.wx v8, v12, a0, v0.t
1361; CHECK-NEXT:    ret
1362entry:
1363  %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.i32(
1364    <vscale x 4 x i64> %0,
1365    <vscale x 4 x i64> %1,
1366    i32 %2,
1367    <vscale x 4 x i1> %3,
1368    iXLen %4, iXLen 1)
1369
1370  ret <vscale x 4 x i64> %a
1371}
1372
1373declare <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32(
1374  <vscale x 8 x i64>,
1375  <vscale x 8 x i64>,
1376  i32,
1377  iXLen);
1378
1379define <vscale x 8 x i64> @intrinsic_vwsubu.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, iXLen %2) nounwind {
1380; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i64_nxv8i64_i32:
1381; CHECK:       # %bb.0: # %entry
1382; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
1383; CHECK-NEXT:    vwsubu.wx v8, v8, a0
1384; CHECK-NEXT:    ret
1385entry:
1386  %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.i32(
1387    <vscale x 8 x i64> undef,
1388    <vscale x 8 x i64> %0,
1389    i32 %1,
1390    iXLen %2)
1391
1392  ret <vscale x 8 x i64> %a
1393}
1394
1395declare <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.i32(
1396  <vscale x 8 x i64>,
1397  <vscale x 8 x i64>,
1398  i32,
1399  <vscale x 8 x i1>,
1400  iXLen,
1401  iXLen);
1402
1403define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1404; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i64_nxv8i64_i32:
1405; CHECK:       # %bb.0: # %entry
1406; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1407; CHECK-NEXT:    vwsubu.wx v8, v16, a0, v0.t
1408; CHECK-NEXT:    ret
1409entry:
1410  %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.i32(
1411    <vscale x 8 x i64> %0,
1412    <vscale x 8 x i64> %1,
1413    i32 %2,
1414    <vscale x 8 x i1> %3,
1415    iXLen %4, iXLen 1)
1416
1417  ret <vscale x 8 x i64> %a
1418}
1419
1420define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1421; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
1422; CHECK:       # %bb.0: # %entry
1423; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
1424; CHECK-NEXT:    vwsubu.wv v8, v8, v9, v0.t
1425; CHECK-NEXT:    ret
1426entry:
1427  %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8(
1428    <vscale x 1 x i16> %0,
1429    <vscale x 1 x i16> %0,
1430    <vscale x 1 x i8> %1,
1431    <vscale x 1 x i1> %2,
1432    iXLen %3, iXLen 1)
1433
1434  ret <vscale x 1 x i16> %a
1435}
1436
1437define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1438; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
1439; CHECK:       # %bb.0: # %entry
1440; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
1441; CHECK-NEXT:    vwsubu.wv v8, v8, v9, v0.t
1442; CHECK-NEXT:    ret
1443entry:
1444  %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8(
1445    <vscale x 2 x i16> %0,
1446    <vscale x 2 x i16> %0,
1447    <vscale x 2 x i8> %1,
1448    <vscale x 2 x i1> %2,
1449    iXLen %3, iXLen 1)
1450
1451  ret <vscale x 2 x i16> %a
1452}
1453
1454define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1455; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
1456; CHECK:       # %bb.0: # %entry
1457; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
1458; CHECK-NEXT:    vwsubu.wv v8, v8, v9, v0.t
1459; CHECK-NEXT:    ret
1460entry:
1461  %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8(
1462    <vscale x 4 x i16> %0,
1463    <vscale x 4 x i16> %0,
1464    <vscale x 4 x i8> %1,
1465    <vscale x 4 x i1> %2,
1466    iXLen %3, iXLen 1)
1467
1468  ret <vscale x 4 x i16> %a
1469}
1470
1471define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1472; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
1473; CHECK:       # %bb.0: # %entry
1474; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
1475; CHECK-NEXT:    vwsubu.wv v8, v8, v10, v0.t
1476; CHECK-NEXT:    ret
1477entry:
1478  %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8(
1479    <vscale x 8 x i16> %0,
1480    <vscale x 8 x i16> %0,
1481    <vscale x 8 x i8> %1,
1482    <vscale x 8 x i1> %2,
1483    iXLen %3, iXLen 1)
1484
1485  ret <vscale x 8 x i16> %a
1486}
1487
1488define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1489; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
1490; CHECK:       # %bb.0: # %entry
1491; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
1492; CHECK-NEXT:    vwsubu.wv v8, v8, v12, v0.t
1493; CHECK-NEXT:    ret
1494entry:
1495  %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8(
1496    <vscale x 16 x i16> %0,
1497    <vscale x 16 x i16> %0,
1498    <vscale x 16 x i8> %1,
1499    <vscale x 16 x i1> %2,
1500    iXLen %3, iXLen 1)
1501
1502  ret <vscale x 16 x i16> %a
1503}
1504
1505define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1506; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
1507; CHECK:       # %bb.0: # %entry
1508; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
1509; CHECK-NEXT:    vwsubu.wv v8, v8, v16, v0.t
1510; CHECK-NEXT:    ret
1511entry:
1512  %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8(
1513    <vscale x 32 x i16> %0,
1514    <vscale x 32 x i16> %0,
1515    <vscale x 32 x i8> %1,
1516    <vscale x 32 x i1> %2,
1517    iXLen %3, iXLen 1)
1518
1519  ret <vscale x 32 x i16> %a
1520}
1521
1522define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1523; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
1524; CHECK:       # %bb.0: # %entry
1525; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
1526; CHECK-NEXT:    vwsubu.wv v8, v8, v9, v0.t
1527; CHECK-NEXT:    ret
1528entry:
1529  %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16(
1530    <vscale x 1 x i32> %0,
1531    <vscale x 1 x i32> %0,
1532    <vscale x 1 x i16> %1,
1533    <vscale x 1 x i1> %2,
1534    iXLen %3, iXLen 1)
1535
1536  ret <vscale x 1 x i32> %a
1537}
1538
1539define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1540; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
1541; CHECK:       # %bb.0: # %entry
1542; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
1543; CHECK-NEXT:    vwsubu.wv v8, v8, v9, v0.t
1544; CHECK-NEXT:    ret
1545entry:
1546  %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16(
1547    <vscale x 2 x i32> %0,
1548    <vscale x 2 x i32> %0,
1549    <vscale x 2 x i16> %1,
1550    <vscale x 2 x i1> %2,
1551    iXLen %3, iXLen 1)
1552
1553  ret <vscale x 2 x i32> %a
1554}
1555
1556define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1557; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
1558; CHECK:       # %bb.0: # %entry
1559; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
1560; CHECK-NEXT:    vwsubu.wv v8, v8, v10, v0.t
1561; CHECK-NEXT:    ret
1562entry:
1563  %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16(
1564    <vscale x 4 x i32> %0,
1565    <vscale x 4 x i32> %0,
1566    <vscale x 4 x i16> %1,
1567    <vscale x 4 x i1> %2,
1568    iXLen %3, iXLen 1)
1569
1570  ret <vscale x 4 x i32> %a
1571}
1572
1573define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1574; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
1575; CHECK:       # %bb.0: # %entry
1576; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
1577; CHECK-NEXT:    vwsubu.wv v8, v8, v12, v0.t
1578; CHECK-NEXT:    ret
1579entry:
1580  %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16(
1581    <vscale x 8 x i32> %0,
1582    <vscale x 8 x i32> %0,
1583    <vscale x 8 x i16> %1,
1584    <vscale x 8 x i1> %2,
1585    iXLen %3, iXLen 1)
1586
1587  ret <vscale x 8 x i32> %a
1588}
1589
1590define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1591; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
1592; CHECK:       # %bb.0: # %entry
1593; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
1594; CHECK-NEXT:    vwsubu.wv v8, v8, v16, v0.t
1595; CHECK-NEXT:    ret
1596entry:
1597  %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16(
1598    <vscale x 16 x i32> %0,
1599    <vscale x 16 x i32> %0,
1600    <vscale x 16 x i16> %1,
1601    <vscale x 16 x i1> %2,
1602    iXLen %3, iXLen 1)
1603
1604  ret <vscale x 16 x i32> %a
1605}
1606
1607define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1608; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
1609; CHECK:       # %bb.0: # %entry
1610; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
1611; CHECK-NEXT:    vwsubu.wv v8, v8, v9, v0.t
1612; CHECK-NEXT:    ret
1613entry:
1614  %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32(
1615    <vscale x 1 x i64> %0,
1616    <vscale x 1 x i64> %0,
1617    <vscale x 1 x i32> %1,
1618    <vscale x 1 x i1> %2,
1619    iXLen %3, iXLen 1)
1620
1621  ret <vscale x 1 x i64> %a
1622}
1623
1624define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1625; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
1626; CHECK:       # %bb.0: # %entry
1627; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
1628; CHECK-NEXT:    vwsubu.wv v8, v8, v10, v0.t
1629; CHECK-NEXT:    ret
1630entry:
1631  %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32(
1632    <vscale x 2 x i64> %0,
1633    <vscale x 2 x i64> %0,
1634    <vscale x 2 x i32> %1,
1635    <vscale x 2 x i1> %2,
1636    iXLen %3, iXLen 1)
1637
1638  ret <vscale x 2 x i64> %a
1639}
1640
1641define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1642; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
1643; CHECK:       # %bb.0: # %entry
1644; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
1645; CHECK-NEXT:    vwsubu.wv v8, v8, v12, v0.t
1646; CHECK-NEXT:    ret
1647entry:
1648  %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32(
1649    <vscale x 4 x i64> %0,
1650    <vscale x 4 x i64> %0,
1651    <vscale x 4 x i32> %1,
1652    <vscale x 4 x i1> %2,
1653    iXLen %3, iXLen 1)
1654
1655  ret <vscale x 4 x i64> %a
1656}
1657
1658define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1659; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
1660; CHECK:       # %bb.0: # %entry
1661; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
1662; CHECK-NEXT:    vwsubu.wv v8, v8, v16, v0.t
1663; CHECK-NEXT:    ret
1664entry:
1665  %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32(
1666    <vscale x 8 x i64> %0,
1667    <vscale x 8 x i64> %0,
1668    <vscale x 8 x i32> %1,
1669    <vscale x 8 x i1> %2,
1670    iXLen %3, iXLen 1)
1671
1672  ret <vscale x 8 x i64> %a
1673}
1674
1675define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1676; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv1i16_nxv1i16_i8:
1677; CHECK:       # %bb.0: # %entry
1678; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
1679; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
1680; CHECK-NEXT:    ret
1681entry:
1682  %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.i8(
1683    <vscale x 1 x i16> %0,
1684    <vscale x 1 x i16> %0,
1685    i8 %1,
1686    <vscale x 1 x i1> %2,
1687    iXLen %3, iXLen 1)
1688
1689  ret <vscale x 1 x i16> %a
1690}
1691
1692define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1693; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv2i16_nxv2i16_i8:
1694; CHECK:       # %bb.0: # %entry
1695; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
1696; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
1697; CHECK-NEXT:    ret
1698entry:
1699  %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.i8(
1700    <vscale x 2 x i16> %0,
1701    <vscale x 2 x i16> %0,
1702    i8 %1,
1703    <vscale x 2 x i1> %2,
1704    iXLen %3, iXLen 1)
1705
1706  ret <vscale x 2 x i16> %a
1707}
1708
1709define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1710; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv4i16_nxv4i16_i8:
1711; CHECK:       # %bb.0: # %entry
1712; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
1713; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
1714; CHECK-NEXT:    ret
1715entry:
1716  %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.i8(
1717    <vscale x 4 x i16> %0,
1718    <vscale x 4 x i16> %0,
1719    i8 %1,
1720    <vscale x 4 x i1> %2,
1721    iXLen %3, iXLen 1)
1722
1723  ret <vscale x 4 x i16> %a
1724}
1725
1726define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1727; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv8i16_nxv8i16_i8:
1728; CHECK:       # %bb.0: # %entry
1729; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
1730; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
1731; CHECK-NEXT:    ret
1732entry:
1733  %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.i8(
1734    <vscale x 8 x i16> %0,
1735    <vscale x 8 x i16> %0,
1736    i8 %1,
1737    <vscale x 8 x i1> %2,
1738    iXLen %3, iXLen 1)
1739
1740  ret <vscale x 8 x i16> %a
1741}
1742
1743define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1744; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv16i16_nxv16i16_i8:
1745; CHECK:       # %bb.0: # %entry
1746; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
1747; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
1748; CHECK-NEXT:    ret
1749entry:
1750  %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.i8(
1751    <vscale x 16 x i16> %0,
1752    <vscale x 16 x i16> %0,
1753    i8 %1,
1754    <vscale x 16 x i1> %2,
1755    iXLen %3, iXLen 1)
1756
1757  ret <vscale x 16 x i16> %a
1758}
1759
1760define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1761; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv32i16_nxv32i16_i8:
1762; CHECK:       # %bb.0: # %entry
1763; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
1764; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
1765; CHECK-NEXT:    ret
1766entry:
1767  %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.i8(
1768    <vscale x 32 x i16> %0,
1769    <vscale x 32 x i16> %0,
1770    i8 %1,
1771    <vscale x 32 x i1> %2,
1772    iXLen %3, iXLen 1)
1773
1774  ret <vscale x 32 x i16> %a
1775}
1776
1777define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1778; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv1i32_nxv1i32_i16:
1779; CHECK:       # %bb.0: # %entry
1780; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1781; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
1782; CHECK-NEXT:    ret
1783entry:
1784  %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.i16(
1785    <vscale x 1 x i32> %0,
1786    <vscale x 1 x i32> %0,
1787    i16 %1,
1788    <vscale x 1 x i1> %2,
1789    iXLen %3, iXLen 1)
1790
1791  ret <vscale x 1 x i32> %a
1792}
1793
1794define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1795; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv2i32_nxv2i32_i16:
1796; CHECK:       # %bb.0: # %entry
1797; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1798; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
1799; CHECK-NEXT:    ret
1800entry:
1801  %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.i16(
1802    <vscale x 2 x i32> %0,
1803    <vscale x 2 x i32> %0,
1804    i16 %1,
1805    <vscale x 2 x i1> %2,
1806    iXLen %3, iXLen 1)
1807
1808  ret <vscale x 2 x i32> %a
1809}
1810
1811define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1812; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv4i32_nxv4i32_i16:
1813; CHECK:       # %bb.0: # %entry
1814; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1815; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
1816; CHECK-NEXT:    ret
1817entry:
1818  %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.i16(
1819    <vscale x 4 x i32> %0,
1820    <vscale x 4 x i32> %0,
1821    i16 %1,
1822    <vscale x 4 x i1> %2,
1823    iXLen %3, iXLen 1)
1824
1825  ret <vscale x 4 x i32> %a
1826}
1827
1828define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1829; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv8i32_nxv8i32_i16:
1830; CHECK:       # %bb.0: # %entry
1831; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1832; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
1833; CHECK-NEXT:    ret
1834entry:
1835  %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.i16(
1836    <vscale x 8 x i32> %0,
1837    <vscale x 8 x i32> %0,
1838    i16 %1,
1839    <vscale x 8 x i1> %2,
1840    iXLen %3, iXLen 1)
1841
1842  ret <vscale x 8 x i32> %a
1843}
1844
1845define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1846; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv16i32_nxv16i32_i16:
1847; CHECK:       # %bb.0: # %entry
1848; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1849; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
1850; CHECK-NEXT:    ret
1851entry:
1852  %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.i16(
1853    <vscale x 16 x i32> %0,
1854    <vscale x 16 x i32> %0,
1855    i16 %1,
1856    <vscale x 16 x i1> %2,
1857    iXLen %3, iXLen 1)
1858
1859  ret <vscale x 16 x i32> %a
1860}
1861
1862define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wx_tie_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1863; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv1i64_nxv1i64_i32:
1864; CHECK:       # %bb.0: # %entry
1865; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
1866; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
1867; CHECK-NEXT:    ret
1868entry:
1869  %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.i32(
1870    <vscale x 1 x i64> %0,
1871    <vscale x 1 x i64> %0,
1872    i32 %1,
1873    <vscale x 1 x i1> %2,
1874    iXLen %3, iXLen 1)
1875
1876  ret <vscale x 1 x i64> %a
1877}
1878
1879define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wx_tie_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1880; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv2i64_nxv2i64_i32:
1881; CHECK:       # %bb.0: # %entry
1882; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1883; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
1884; CHECK-NEXT:    ret
1885entry:
1886  %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.i32(
1887    <vscale x 2 x i64> %0,
1888    <vscale x 2 x i64> %0,
1889    i32 %1,
1890    <vscale x 2 x i1> %2,
1891    iXLen %3, iXLen 1)
1892
1893  ret <vscale x 2 x i64> %a
1894}
1895
1896define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wx_tie_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1897; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv4i64_nxv4i64_i32:
1898; CHECK:       # %bb.0: # %entry
1899; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1900; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
1901; CHECK-NEXT:    ret
1902entry:
1903  %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.i32(
1904    <vscale x 4 x i64> %0,
1905    <vscale x 4 x i64> %0,
1906    i32 %1,
1907    <vscale x 4 x i1> %2,
1908    iXLen %3, iXLen 1)
1909
1910  ret <vscale x 4 x i64> %a
1911}
1912
1913define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wx_tie_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1914; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv8i64_nxv8i64_i32:
1915; CHECK:       # %bb.0: # %entry
1916; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1917; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
1918; CHECK-NEXT:    ret
1919entry:
1920  %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.i32(
1921    <vscale x 8 x i64> %0,
1922    <vscale x 8 x i64> %0,
1923    i32 %1,
1924    <vscale x 8 x i1> %2,
1925    iXLen %3, iXLen 1)
1926
1927  ret <vscale x 8 x i64> %a
1928}
1929
1930define <vscale x 1 x i16> @intrinsic_vwsubu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
1931; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv1i16_nxv1i16_nxv1i8:
1932; CHECK:       # %bb.0: # %entry
1933; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
1934; CHECK-NEXT:    vwsubu.wv v10, v9, v8
1935; CHECK-NEXT:    vmv1r.v v8, v10
1936; CHECK-NEXT:    ret
1937entry:
1938  %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
1939    <vscale x 1 x i16> undef,
1940    <vscale x 1 x i16> %1,
1941    <vscale x 1 x i8> %0,
1942    iXLen %2)
1943
1944  ret <vscale x 1 x i16> %a
1945}
1946
1947define <vscale x 2 x i16> @intrinsic_vwsubu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
1948; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv2i16_nxv2i16_nxv2i8:
1949; CHECK:       # %bb.0: # %entry
1950; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
1951; CHECK-NEXT:    vwsubu.wv v10, v9, v8
1952; CHECK-NEXT:    vmv1r.v v8, v10
1953; CHECK-NEXT:    ret
1954entry:
1955  %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8(
1956    <vscale x 2 x i16> undef,
1957    <vscale x 2 x i16> %1,
1958    <vscale x 2 x i8> %0,
1959    iXLen %2)
1960
1961  ret <vscale x 2 x i16> %a
1962}
1963
1964define <vscale x 4 x i16> @intrinsic_vwsubu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
1965; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv4i16_nxv4i16_nxv4i8:
1966; CHECK:       # %bb.0: # %entry
1967; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
1968; CHECK-NEXT:    vwsubu.wv v10, v9, v8
1969; CHECK-NEXT:    vmv1r.v v8, v10
1970; CHECK-NEXT:    ret
1971entry:
1972  %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8(
1973    <vscale x 4 x i16> undef,
1974    <vscale x 4 x i16> %1,
1975    <vscale x 4 x i8> %0,
1976    iXLen %2)
1977
1978  ret <vscale x 4 x i16> %a
1979}
1980
1981define <vscale x 8 x i16> @intrinsic_vwsubu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
1982; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv8i16_nxv8i16_nxv8i8:
1983; CHECK:       # %bb.0: # %entry
1984; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
1985; CHECK-NEXT:    vwsubu.wv v12, v10, v8
1986; CHECK-NEXT:    vmv2r.v v8, v12
1987; CHECK-NEXT:    ret
1988entry:
1989  %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8(
1990    <vscale x 8 x i16> undef,
1991    <vscale x 8 x i16> %1,
1992    <vscale x 8 x i8> %0,
1993    iXLen %2)
1994
1995  ret <vscale x 8 x i16> %a
1996}
1997
1998define <vscale x 16 x i16> @intrinsic_vwsubu.w_wv_untie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
1999; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv16i16_nxv16i16_nxv16i8:
2000; CHECK:       # %bb.0: # %entry
2001; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
2002; CHECK-NEXT:    vwsubu.wv v16, v12, v8
2003; CHECK-NEXT:    vmv4r.v v8, v16
2004; CHECK-NEXT:    ret
2005entry:
2006  %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8(
2007    <vscale x 16 x i16> undef,
2008    <vscale x 16 x i16> %1,
2009    <vscale x 16 x i8> %0,
2010    iXLen %2)
2011
2012  ret <vscale x 16 x i16> %a
2013}
2014
2015define <vscale x 32 x i16> @intrinsic_vwsubu.w_wv_untie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
2016; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv32i16_nxv32i16_nxv32i8:
2017; CHECK:       # %bb.0: # %entry
2018; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
2019; CHECK-NEXT:    vwsubu.wv v24, v16, v8
2020; CHECK-NEXT:    vmv8r.v v8, v24
2021; CHECK-NEXT:    ret
2022entry:
2023  %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8(
2024    <vscale x 32 x i16> undef,
2025    <vscale x 32 x i16> %1,
2026    <vscale x 32 x i8> %0,
2027    iXLen %2)
2028
2029  ret <vscale x 32 x i16> %a
2030}
2031
2032define <vscale x 1 x i32> @intrinsic_vwsubu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
2033; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv1i32_nxv1i32_nxv1i16:
2034; CHECK:       # %bb.0: # %entry
2035; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
2036; CHECK-NEXT:    vwsubu.wv v10, v9, v8
2037; CHECK-NEXT:    vmv1r.v v8, v10
2038; CHECK-NEXT:    ret
2039entry:
2040  %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16(
2041    <vscale x 1 x i32> undef,
2042    <vscale x 1 x i32> %1,
2043    <vscale x 1 x i16> %0,
2044    iXLen %2)
2045
2046  ret <vscale x 1 x i32> %a
2047}
2048
2049define <vscale x 2 x i32> @intrinsic_vwsubu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
2050; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv2i32_nxv2i32_nxv2i16:
2051; CHECK:       # %bb.0: # %entry
2052; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
2053; CHECK-NEXT:    vwsubu.wv v10, v9, v8
2054; CHECK-NEXT:    vmv1r.v v8, v10
2055; CHECK-NEXT:    ret
2056entry:
2057  %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16(
2058    <vscale x 2 x i32> undef,
2059    <vscale x 2 x i32> %1,
2060    <vscale x 2 x i16> %0,
2061    iXLen %2)
2062
2063  ret <vscale x 2 x i32> %a
2064}
2065
2066define <vscale x 4 x i32> @intrinsic_vwsubu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
2067; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv4i32_nxv4i32_nxv4i16:
2068; CHECK:       # %bb.0: # %entry
2069; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
2070; CHECK-NEXT:    vwsubu.wv v12, v10, v8
2071; CHECK-NEXT:    vmv2r.v v8, v12
2072; CHECK-NEXT:    ret
2073entry:
2074  %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16(
2075    <vscale x 4 x i32> undef,
2076    <vscale x 4 x i32> %1,
2077    <vscale x 4 x i16> %0,
2078    iXLen %2)
2079
2080  ret <vscale x 4 x i32> %a
2081}
2082
2083define <vscale x 8 x i32> @intrinsic_vwsubu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
2084; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv8i32_nxv8i32_nxv8i16:
2085; CHECK:       # %bb.0: # %entry
2086; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
2087; CHECK-NEXT:    vwsubu.wv v16, v12, v8
2088; CHECK-NEXT:    vmv4r.v v8, v16
2089; CHECK-NEXT:    ret
2090entry:
2091  %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16(
2092    <vscale x 8 x i32> undef,
2093    <vscale x 8 x i32> %1,
2094    <vscale x 8 x i16> %0,
2095    iXLen %2)
2096
2097  ret <vscale x 8 x i32> %a
2098}
2099
2100define <vscale x 1 x i64> @intrinsic_vwsubu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
2101; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv1i64_nxv1i64_nxv1i32:
2102; CHECK:       # %bb.0: # %entry
2103; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
2104; CHECK-NEXT:    vwsubu.wv v10, v9, v8
2105; CHECK-NEXT:    vmv1r.v v8, v10
2106; CHECK-NEXT:    ret
2107entry:
2108  %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.nxv1i64.nxv1i32(
2109    <vscale x 1 x i64> undef,
2110    <vscale x 1 x i64> %1,
2111    <vscale x 1 x i32> %0,
2112    iXLen %2)
2113
2114  ret <vscale x 1 x i64> %a
2115}
2116
2117define <vscale x 2 x i64> @intrinsic_vwsubu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
2118; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv2i64_nxv2i64_nxv2i32:
2119; CHECK:       # %bb.0: # %entry
2120; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
2121; CHECK-NEXT:    vwsubu.wv v12, v10, v8
2122; CHECK-NEXT:    vmv2r.v v8, v12
2123; CHECK-NEXT:    ret
2124entry:
2125  %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.nxv2i64.nxv2i32(
2126    <vscale x 2 x i64> undef,
2127    <vscale x 2 x i64> %1,
2128    <vscale x 2 x i32> %0,
2129    iXLen %2)
2130
2131  ret <vscale x 2 x i64> %a
2132}
2133
2134define <vscale x 4 x i64> @intrinsic_vwsubu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
2135; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv4i64_nxv4i64_nxv4i32:
2136; CHECK:       # %bb.0: # %entry
2137; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
2138; CHECK-NEXT:    vwsubu.wv v16, v12, v8
2139; CHECK-NEXT:    vmv4r.v v8, v16
2140; CHECK-NEXT:    ret
2141entry:
2142  %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.nxv4i64.nxv4i32(
2143    <vscale x 4 x i64> undef,
2144    <vscale x 4 x i64> %1,
2145    <vscale x 4 x i32> %0,
2146    iXLen %2)
2147
2148  ret <vscale x 4 x i64> %a
2149}
2150
2151define <vscale x 8 x i64> @intrinsic_vwsubu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
2152; CHECK-LABEL: intrinsic_vwsubu.w_wv_untie_nxv8i64_nxv8i64_nxv8i32:
2153; CHECK:       # %bb.0: # %entry
2154; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
2155; CHECK-NEXT:    vwsubu.wv v24, v16, v8
2156; CHECK-NEXT:    vmv8r.v v8, v24
2157; CHECK-NEXT:    ret
2158entry:
2159  %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.nxv8i64.nxv8i32(
2160    <vscale x 8 x i64> undef,
2161    <vscale x 8 x i64> %1,
2162    <vscale x 8 x i32> %0,
2163    iXLen %2)
2164
2165  ret <vscale x 8 x i64> %a
2166}
2167