xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vrsub.ll (revision 2967e5f8007d873a3e9d97870d2461d0827a3976)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
6
7declare <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
8  <vscale x 1 x i8>,
9  <vscale x 1 x i8>,
10  i8,
11  iXLen);
12
13define <vscale x 1 x i8> @intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
14; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8:
15; CHECK:       # %bb.0: # %entry
16; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
17; CHECK-NEXT:    vrsub.vx v8, v8, a0
18; CHECK-NEXT:    ret
19entry:
20  %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
21    <vscale x 1 x i8> undef,
22    <vscale x 1 x i8> %0,
23    i8 %1,
24    iXLen %2)
25
26  ret <vscale x 1 x i8> %a
27}
28
29declare <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
30  <vscale x 1 x i8>,
31  <vscale x 1 x i8>,
32  i8,
33  <vscale x 1 x i1>,
34  iXLen, iXLen);
35
36define <vscale x 1 x i8> @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
37; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8:
38; CHECK:       # %bb.0: # %entry
39; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
40; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
41; CHECK-NEXT:    ret
42entry:
43  %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
44    <vscale x 1 x i8> %0,
45    <vscale x 1 x i8> %1,
46    i8 %2,
47    <vscale x 1 x i1> %3,
48    iXLen %4, iXLen 1)
49
50  ret <vscale x 1 x i8> %a
51}
52
53declare <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
54  <vscale x 2 x i8>,
55  <vscale x 2 x i8>,
56  i8,
57  iXLen);
58
59define <vscale x 2 x i8> @intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
60; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8:
61; CHECK:       # %bb.0: # %entry
62; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
63; CHECK-NEXT:    vrsub.vx v8, v8, a0
64; CHECK-NEXT:    ret
65entry:
66  %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
67    <vscale x 2 x i8> undef,
68    <vscale x 2 x i8> %0,
69    i8 %1,
70    iXLen %2)
71
72  ret <vscale x 2 x i8> %a
73}
74
75declare <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
76  <vscale x 2 x i8>,
77  <vscale x 2 x i8>,
78  i8,
79  <vscale x 2 x i1>,
80  iXLen, iXLen);
81
82define <vscale x 2 x i8> @intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
83; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8:
84; CHECK:       # %bb.0: # %entry
85; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
86; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
87; CHECK-NEXT:    ret
88entry:
89  %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
90    <vscale x 2 x i8> %0,
91    <vscale x 2 x i8> %1,
92    i8 %2,
93    <vscale x 2 x i1> %3,
94    iXLen %4, iXLen 1)
95
96  ret <vscale x 2 x i8> %a
97}
98
99declare <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
100  <vscale x 4 x i8>,
101  <vscale x 4 x i8>,
102  i8,
103  iXLen);
104
105define <vscale x 4 x i8> @intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
106; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8:
107; CHECK:       # %bb.0: # %entry
108; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
109; CHECK-NEXT:    vrsub.vx v8, v8, a0
110; CHECK-NEXT:    ret
111entry:
112  %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
113    <vscale x 4 x i8> undef,
114    <vscale x 4 x i8> %0,
115    i8 %1,
116    iXLen %2)
117
118  ret <vscale x 4 x i8> %a
119}
120
121declare <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
122  <vscale x 4 x i8>,
123  <vscale x 4 x i8>,
124  i8,
125  <vscale x 4 x i1>,
126  iXLen, iXLen);
127
128define <vscale x 4 x i8> @intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
129; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8:
130; CHECK:       # %bb.0: # %entry
131; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
132; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
133; CHECK-NEXT:    ret
134entry:
135  %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
136    <vscale x 4 x i8> %0,
137    <vscale x 4 x i8> %1,
138    i8 %2,
139    <vscale x 4 x i1> %3,
140    iXLen %4, iXLen 1)
141
142  ret <vscale x 4 x i8> %a
143}
144
145declare <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
146  <vscale x 8 x i8>,
147  <vscale x 8 x i8>,
148  i8,
149  iXLen);
150
151define <vscale x 8 x i8> @intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
152; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8:
153; CHECK:       # %bb.0: # %entry
154; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
155; CHECK-NEXT:    vrsub.vx v8, v8, a0
156; CHECK-NEXT:    ret
157entry:
158  %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
159    <vscale x 8 x i8> undef,
160    <vscale x 8 x i8> %0,
161    i8 %1,
162    iXLen %2)
163
164  ret <vscale x 8 x i8> %a
165}
166
167declare <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
168  <vscale x 8 x i8>,
169  <vscale x 8 x i8>,
170  i8,
171  <vscale x 8 x i1>,
172  iXLen, iXLen);
173
174define <vscale x 8 x i8> @intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
175; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8:
176; CHECK:       # %bb.0: # %entry
177; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
178; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
179; CHECK-NEXT:    ret
180entry:
181  %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
182    <vscale x 8 x i8> %0,
183    <vscale x 8 x i8> %1,
184    i8 %2,
185    <vscale x 8 x i1> %3,
186    iXLen %4, iXLen 1)
187
188  ret <vscale x 8 x i8> %a
189}
190
191declare <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
192  <vscale x 16 x i8>,
193  <vscale x 16 x i8>,
194  i8,
195  iXLen);
196
197define <vscale x 16 x i8> @intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
198; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8:
199; CHECK:       # %bb.0: # %entry
200; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
201; CHECK-NEXT:    vrsub.vx v8, v8, a0
202; CHECK-NEXT:    ret
203entry:
204  %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
205    <vscale x 16 x i8> undef,
206    <vscale x 16 x i8> %0,
207    i8 %1,
208    iXLen %2)
209
210  ret <vscale x 16 x i8> %a
211}
212
213declare <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
214  <vscale x 16 x i8>,
215  <vscale x 16 x i8>,
216  i8,
217  <vscale x 16 x i1>,
218  iXLen, iXLen);
219
220define <vscale x 16 x i8> @intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
221; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8:
222; CHECK:       # %bb.0: # %entry
223; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
224; CHECK-NEXT:    vrsub.vx v8, v10, a0, v0.t
225; CHECK-NEXT:    ret
226entry:
227  %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
228    <vscale x 16 x i8> %0,
229    <vscale x 16 x i8> %1,
230    i8 %2,
231    <vscale x 16 x i1> %3,
232    iXLen %4, iXLen 1)
233
234  ret <vscale x 16 x i8> %a
235}
236
237declare <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
238  <vscale x 32 x i8>,
239  <vscale x 32 x i8>,
240  i8,
241  iXLen);
242
243define <vscale x 32 x i8> @intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
244; CHECK-LABEL: intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8:
245; CHECK:       # %bb.0: # %entry
246; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
247; CHECK-NEXT:    vrsub.vx v8, v8, a0
248; CHECK-NEXT:    ret
249entry:
250  %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
251    <vscale x 32 x i8> undef,
252    <vscale x 32 x i8> %0,
253    i8 %1,
254    iXLen %2)
255
256  ret <vscale x 32 x i8> %a
257}
258
259declare <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
260  <vscale x 32 x i8>,
261  <vscale x 32 x i8>,
262  i8,
263  <vscale x 32 x i1>,
264  iXLen, iXLen);
265
266define <vscale x 32 x i8> @intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
267; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8:
268; CHECK:       # %bb.0: # %entry
269; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
270; CHECK-NEXT:    vrsub.vx v8, v12, a0, v0.t
271; CHECK-NEXT:    ret
272entry:
273  %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
274    <vscale x 32 x i8> %0,
275    <vscale x 32 x i8> %1,
276    i8 %2,
277    <vscale x 32 x i1> %3,
278    iXLen %4, iXLen 1)
279
280  ret <vscale x 32 x i8> %a
281}
282
283declare <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
284  <vscale x 64 x i8>,
285  <vscale x 64 x i8>,
286  i8,
287  iXLen);
288
289define <vscale x 64 x i8> @intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
290; CHECK-LABEL: intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8:
291; CHECK:       # %bb.0: # %entry
292; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
293; CHECK-NEXT:    vrsub.vx v8, v8, a0
294; CHECK-NEXT:    ret
295entry:
296  %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
297    <vscale x 64 x i8> undef,
298    <vscale x 64 x i8> %0,
299    i8 %1,
300    iXLen %2)
301
302  ret <vscale x 64 x i8> %a
303}
304
305declare <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
306  <vscale x 64 x i8>,
307  <vscale x 64 x i8>,
308  i8,
309  <vscale x 64 x i1>,
310  iXLen, iXLen);
311
312define <vscale x 64 x i8> @intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
313; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8:
314; CHECK:       # %bb.0: # %entry
315; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
316; CHECK-NEXT:    vrsub.vx v8, v16, a0, v0.t
317; CHECK-NEXT:    ret
318entry:
319  %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
320    <vscale x 64 x i8> %0,
321    <vscale x 64 x i8> %1,
322    i8 %2,
323    <vscale x 64 x i1> %3,
324    iXLen %4, iXLen 1)
325
326  ret <vscale x 64 x i8> %a
327}
328
329declare <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
330  <vscale x 1 x i16>,
331  <vscale x 1 x i16>,
332  i16,
333  iXLen);
334
335define <vscale x 1 x i16> @intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
336; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16:
337; CHECK:       # %bb.0: # %entry
338; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
339; CHECK-NEXT:    vrsub.vx v8, v8, a0
340; CHECK-NEXT:    ret
341entry:
342  %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
343    <vscale x 1 x i16> undef,
344    <vscale x 1 x i16> %0,
345    i16 %1,
346    iXLen %2)
347
348  ret <vscale x 1 x i16> %a
349}
350
351declare <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
352  <vscale x 1 x i16>,
353  <vscale x 1 x i16>,
354  i16,
355  <vscale x 1 x i1>,
356  iXLen, iXLen);
357
358define <vscale x 1 x i16> @intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
359; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16:
360; CHECK:       # %bb.0: # %entry
361; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
362; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
363; CHECK-NEXT:    ret
364entry:
365  %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
366    <vscale x 1 x i16> %0,
367    <vscale x 1 x i16> %1,
368    i16 %2,
369    <vscale x 1 x i1> %3,
370    iXLen %4, iXLen 1)
371
372  ret <vscale x 1 x i16> %a
373}
374
375declare <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
376  <vscale x 2 x i16>,
377  <vscale x 2 x i16>,
378  i16,
379  iXLen);
380
381define <vscale x 2 x i16> @intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
382; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16:
383; CHECK:       # %bb.0: # %entry
384; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
385; CHECK-NEXT:    vrsub.vx v8, v8, a0
386; CHECK-NEXT:    ret
387entry:
388  %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
389    <vscale x 2 x i16> undef,
390    <vscale x 2 x i16> %0,
391    i16 %1,
392    iXLen %2)
393
394  ret <vscale x 2 x i16> %a
395}
396
397declare <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
398  <vscale x 2 x i16>,
399  <vscale x 2 x i16>,
400  i16,
401  <vscale x 2 x i1>,
402  iXLen, iXLen);
403
404define <vscale x 2 x i16> @intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
405; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16:
406; CHECK:       # %bb.0: # %entry
407; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
408; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
409; CHECK-NEXT:    ret
410entry:
411  %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
412    <vscale x 2 x i16> %0,
413    <vscale x 2 x i16> %1,
414    i16 %2,
415    <vscale x 2 x i1> %3,
416    iXLen %4, iXLen 1)
417
418  ret <vscale x 2 x i16> %a
419}
420
421declare <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
422  <vscale x 4 x i16>,
423  <vscale x 4 x i16>,
424  i16,
425  iXLen);
426
427define <vscale x 4 x i16> @intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
428; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16:
429; CHECK:       # %bb.0: # %entry
430; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
431; CHECK-NEXT:    vrsub.vx v8, v8, a0
432; CHECK-NEXT:    ret
433entry:
434  %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
435    <vscale x 4 x i16> undef,
436    <vscale x 4 x i16> %0,
437    i16 %1,
438    iXLen %2)
439
440  ret <vscale x 4 x i16> %a
441}
442
443declare <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
444  <vscale x 4 x i16>,
445  <vscale x 4 x i16>,
446  i16,
447  <vscale x 4 x i1>,
448  iXLen, iXLen);
449
450define <vscale x 4 x i16> @intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
451; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16:
452; CHECK:       # %bb.0: # %entry
453; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
454; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
455; CHECK-NEXT:    ret
456entry:
457  %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
458    <vscale x 4 x i16> %0,
459    <vscale x 4 x i16> %1,
460    i16 %2,
461    <vscale x 4 x i1> %3,
462    iXLen %4, iXLen 1)
463
464  ret <vscale x 4 x i16> %a
465}
466
467declare <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
468  <vscale x 8 x i16>,
469  <vscale x 8 x i16>,
470  i16,
471  iXLen);
472
473define <vscale x 8 x i16> @intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
474; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16:
475; CHECK:       # %bb.0: # %entry
476; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
477; CHECK-NEXT:    vrsub.vx v8, v8, a0
478; CHECK-NEXT:    ret
479entry:
480  %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
481    <vscale x 8 x i16> undef,
482    <vscale x 8 x i16> %0,
483    i16 %1,
484    iXLen %2)
485
486  ret <vscale x 8 x i16> %a
487}
488
489declare <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
490  <vscale x 8 x i16>,
491  <vscale x 8 x i16>,
492  i16,
493  <vscale x 8 x i1>,
494  iXLen, iXLen);
495
496define <vscale x 8 x i16> @intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
497; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16:
498; CHECK:       # %bb.0: # %entry
499; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
500; CHECK-NEXT:    vrsub.vx v8, v10, a0, v0.t
501; CHECK-NEXT:    ret
502entry:
503  %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
504    <vscale x 8 x i16> %0,
505    <vscale x 8 x i16> %1,
506    i16 %2,
507    <vscale x 8 x i1> %3,
508    iXLen %4, iXLen 1)
509
510  ret <vscale x 8 x i16> %a
511}
512
513declare <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
514  <vscale x 16 x i16>,
515  <vscale x 16 x i16>,
516  i16,
517  iXLen);
518
519define <vscale x 16 x i16> @intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
520; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16:
521; CHECK:       # %bb.0: # %entry
522; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
523; CHECK-NEXT:    vrsub.vx v8, v8, a0
524; CHECK-NEXT:    ret
525entry:
526  %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
527    <vscale x 16 x i16> undef,
528    <vscale x 16 x i16> %0,
529    i16 %1,
530    iXLen %2)
531
532  ret <vscale x 16 x i16> %a
533}
534
535declare <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
536  <vscale x 16 x i16>,
537  <vscale x 16 x i16>,
538  i16,
539  <vscale x 16 x i1>,
540  iXLen, iXLen);
541
542define <vscale x 16 x i16> @intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
543; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16:
544; CHECK:       # %bb.0: # %entry
545; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
546; CHECK-NEXT:    vrsub.vx v8, v12, a0, v0.t
547; CHECK-NEXT:    ret
548entry:
549  %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
550    <vscale x 16 x i16> %0,
551    <vscale x 16 x i16> %1,
552    i16 %2,
553    <vscale x 16 x i1> %3,
554    iXLen %4, iXLen 1)
555
556  ret <vscale x 16 x i16> %a
557}
558
559declare <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
560  <vscale x 32 x i16>,
561  <vscale x 32 x i16>,
562  i16,
563  iXLen);
564
565define <vscale x 32 x i16> @intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
566; CHECK-LABEL: intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16:
567; CHECK:       # %bb.0: # %entry
568; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
569; CHECK-NEXT:    vrsub.vx v8, v8, a0
570; CHECK-NEXT:    ret
571entry:
572  %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
573    <vscale x 32 x i16> undef,
574    <vscale x 32 x i16> %0,
575    i16 %1,
576    iXLen %2)
577
578  ret <vscale x 32 x i16> %a
579}
580
581declare <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
582  <vscale x 32 x i16>,
583  <vscale x 32 x i16>,
584  i16,
585  <vscale x 32 x i1>,
586  iXLen, iXLen);
587
588define <vscale x 32 x i16> @intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
589; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16:
590; CHECK:       # %bb.0: # %entry
591; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
592; CHECK-NEXT:    vrsub.vx v8, v16, a0, v0.t
593; CHECK-NEXT:    ret
594entry:
595  %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
596    <vscale x 32 x i16> %0,
597    <vscale x 32 x i16> %1,
598    i16 %2,
599    <vscale x 32 x i1> %3,
600    iXLen %4, iXLen 1)
601
602  ret <vscale x 32 x i16> %a
603}
604
605declare <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
606  <vscale x 1 x i32>,
607  <vscale x 1 x i32>,
608  i32,
609  iXLen);
610
611define <vscale x 1 x i32> @intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
612; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32:
613; CHECK:       # %bb.0: # %entry
614; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
615; CHECK-NEXT:    vrsub.vx v8, v8, a0
616; CHECK-NEXT:    ret
617entry:
618  %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
619    <vscale x 1 x i32> undef,
620    <vscale x 1 x i32> %0,
621    i32 %1,
622    iXLen %2)
623
624  ret <vscale x 1 x i32> %a
625}
626
627declare <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
628  <vscale x 1 x i32>,
629  <vscale x 1 x i32>,
630  i32,
631  <vscale x 1 x i1>,
632  iXLen, iXLen);
633
634define <vscale x 1 x i32> @intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
635; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32:
636; CHECK:       # %bb.0: # %entry
637; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
638; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
639; CHECK-NEXT:    ret
640entry:
641  %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
642    <vscale x 1 x i32> %0,
643    <vscale x 1 x i32> %1,
644    i32 %2,
645    <vscale x 1 x i1> %3,
646    iXLen %4, iXLen 1)
647
648  ret <vscale x 1 x i32> %a
649}
650
651declare <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
652  <vscale x 2 x i32>,
653  <vscale x 2 x i32>,
654  i32,
655  iXLen);
656
657define <vscale x 2 x i32> @intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
658; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32:
659; CHECK:       # %bb.0: # %entry
660; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
661; CHECK-NEXT:    vrsub.vx v8, v8, a0
662; CHECK-NEXT:    ret
663entry:
664  %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
665    <vscale x 2 x i32> undef,
666    <vscale x 2 x i32> %0,
667    i32 %1,
668    iXLen %2)
669
670  ret <vscale x 2 x i32> %a
671}
672
673declare <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
674  <vscale x 2 x i32>,
675  <vscale x 2 x i32>,
676  i32,
677  <vscale x 2 x i1>,
678  iXLen, iXLen);
679
680define <vscale x 2 x i32> @intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
681; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32:
682; CHECK:       # %bb.0: # %entry
683; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
684; CHECK-NEXT:    vrsub.vx v8, v9, a0, v0.t
685; CHECK-NEXT:    ret
686entry:
687  %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
688    <vscale x 2 x i32> %0,
689    <vscale x 2 x i32> %1,
690    i32 %2,
691    <vscale x 2 x i1> %3,
692    iXLen %4, iXLen 1)
693
694  ret <vscale x 2 x i32> %a
695}
696
697declare <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
698  <vscale x 4 x i32>,
699  <vscale x 4 x i32>,
700  i32,
701  iXLen);
702
703define <vscale x 4 x i32> @intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
704; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32:
705; CHECK:       # %bb.0: # %entry
706; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
707; CHECK-NEXT:    vrsub.vx v8, v8, a0
708; CHECK-NEXT:    ret
709entry:
710  %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
711    <vscale x 4 x i32> undef,
712    <vscale x 4 x i32> %0,
713    i32 %1,
714    iXLen %2)
715
716  ret <vscale x 4 x i32> %a
717}
718
719declare <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
720  <vscale x 4 x i32>,
721  <vscale x 4 x i32>,
722  i32,
723  <vscale x 4 x i1>,
724  iXLen, iXLen);
725
726define <vscale x 4 x i32> @intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
727; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32:
728; CHECK:       # %bb.0: # %entry
729; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
730; CHECK-NEXT:    vrsub.vx v8, v10, a0, v0.t
731; CHECK-NEXT:    ret
732entry:
733  %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
734    <vscale x 4 x i32> %0,
735    <vscale x 4 x i32> %1,
736    i32 %2,
737    <vscale x 4 x i1> %3,
738    iXLen %4, iXLen 1)
739
740  ret <vscale x 4 x i32> %a
741}
742
743declare <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
744  <vscale x 8 x i32>,
745  <vscale x 8 x i32>,
746  i32,
747  iXLen);
748
749define <vscale x 8 x i32> @intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
750; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32:
751; CHECK:       # %bb.0: # %entry
752; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
753; CHECK-NEXT:    vrsub.vx v8, v8, a0
754; CHECK-NEXT:    ret
755entry:
756  %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
757    <vscale x 8 x i32> undef,
758    <vscale x 8 x i32> %0,
759    i32 %1,
760    iXLen %2)
761
762  ret <vscale x 8 x i32> %a
763}
764
765declare <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
766  <vscale x 8 x i32>,
767  <vscale x 8 x i32>,
768  i32,
769  <vscale x 8 x i1>,
770  iXLen, iXLen);
771
772define <vscale x 8 x i32> @intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
773; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32:
774; CHECK:       # %bb.0: # %entry
775; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
776; CHECK-NEXT:    vrsub.vx v8, v12, a0, v0.t
777; CHECK-NEXT:    ret
778entry:
779  %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
780    <vscale x 8 x i32> %0,
781    <vscale x 8 x i32> %1,
782    i32 %2,
783    <vscale x 8 x i1> %3,
784    iXLen %4, iXLen 1)
785
786  ret <vscale x 8 x i32> %a
787}
788
789declare <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
790  <vscale x 16 x i32>,
791  <vscale x 16 x i32>,
792  i32,
793  iXLen);
794
795define <vscale x 16 x i32> @intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
796; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32:
797; CHECK:       # %bb.0: # %entry
798; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
799; CHECK-NEXT:    vrsub.vx v8, v8, a0
800; CHECK-NEXT:    ret
801entry:
802  %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
803    <vscale x 16 x i32> undef,
804    <vscale x 16 x i32> %0,
805    i32 %1,
806    iXLen %2)
807
808  ret <vscale x 16 x i32> %a
809}
810
811declare <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
812  <vscale x 16 x i32>,
813  <vscale x 16 x i32>,
814  i32,
815  <vscale x 16 x i1>,
816  iXLen, iXLen);
817
818define <vscale x 16 x i32> @intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
819; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32:
820; CHECK:       # %bb.0: # %entry
821; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
822; CHECK-NEXT:    vrsub.vx v8, v16, a0, v0.t
823; CHECK-NEXT:    ret
824entry:
825  %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
826    <vscale x 16 x i32> %0,
827    <vscale x 16 x i32> %1,
828    i32 %2,
829    <vscale x 16 x i1> %3,
830    iXLen %4, iXLen 1)
831
832  ret <vscale x 16 x i32> %a
833}
834
835declare <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
836  <vscale x 1 x i64>,
837  <vscale x 1 x i64>,
838  i64,
839  iXLen);
840
841define <vscale x 1 x i64> @intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
842; RV32-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64:
843; RV32:       # %bb.0: # %entry
844; RV32-NEXT:    addi sp, sp, -16
845; RV32-NEXT:    sw a0, 8(sp)
846; RV32-NEXT:    sw a1, 12(sp)
847; RV32-NEXT:    addi a0, sp, 8
848; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
849; RV32-NEXT:    vlse64.v v9, (a0), zero
850; RV32-NEXT:    vsub.vv v8, v9, v8
851; RV32-NEXT:    addi sp, sp, 16
852; RV32-NEXT:    ret
853;
854; RV64-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64:
855; RV64:       # %bb.0: # %entry
856; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
857; RV64-NEXT:    vrsub.vx v8, v8, a0
858; RV64-NEXT:    ret
859entry:
860  %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
861    <vscale x 1 x i64> undef,
862    <vscale x 1 x i64> %0,
863    i64 %1,
864    iXLen %2)
865
866  ret <vscale x 1 x i64> %a
867}
868
869declare <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64(
870  <vscale x 1 x i64>,
871  <vscale x 1 x i64>,
872  i64,
873  <vscale x 1 x i1>,
874  iXLen, iXLen);
875
876define <vscale x 1 x i64> @intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
877; RV32-LABEL: intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64:
878; RV32:       # %bb.0: # %entry
879; RV32-NEXT:    addi sp, sp, -16
880; RV32-NEXT:    sw a0, 8(sp)
881; RV32-NEXT:    sw a1, 12(sp)
882; RV32-NEXT:    addi a0, sp, 8
883; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
884; RV32-NEXT:    vlse64.v v10, (a0), zero
885; RV32-NEXT:    vsub.vv v8, v10, v9, v0.t
886; RV32-NEXT:    addi sp, sp, 16
887; RV32-NEXT:    ret
888;
889; RV64-LABEL: intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64:
890; RV64:       # %bb.0: # %entry
891; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
892; RV64-NEXT:    vrsub.vx v8, v9, a0, v0.t
893; RV64-NEXT:    ret
894entry:
895  %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64(
896    <vscale x 1 x i64> %0,
897    <vscale x 1 x i64> %1,
898    i64 %2,
899    <vscale x 1 x i1> %3,
900    iXLen %4, iXLen 1)
901
902  ret <vscale x 1 x i64> %a
903}
904
905declare <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
906  <vscale x 2 x i64>,
907  <vscale x 2 x i64>,
908  i64,
909  iXLen);
910
911define <vscale x 2 x i64> @intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
912; RV32-LABEL: intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64:
913; RV32:       # %bb.0: # %entry
914; RV32-NEXT:    addi sp, sp, -16
915; RV32-NEXT:    sw a0, 8(sp)
916; RV32-NEXT:    sw a1, 12(sp)
917; RV32-NEXT:    addi a0, sp, 8
918; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
919; RV32-NEXT:    vlse64.v v10, (a0), zero
920; RV32-NEXT:    vsub.vv v8, v10, v8
921; RV32-NEXT:    addi sp, sp, 16
922; RV32-NEXT:    ret
923;
924; RV64-LABEL: intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64:
925; RV64:       # %bb.0: # %entry
926; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
927; RV64-NEXT:    vrsub.vx v8, v8, a0
928; RV64-NEXT:    ret
929entry:
930  %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
931    <vscale x 2 x i64> undef,
932    <vscale x 2 x i64> %0,
933    i64 %1,
934    iXLen %2)
935
936  ret <vscale x 2 x i64> %a
937}
938
939declare <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64(
940  <vscale x 2 x i64>,
941  <vscale x 2 x i64>,
942  i64,
943  <vscale x 2 x i1>,
944  iXLen, iXLen);
945
946define <vscale x 2 x i64> @intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
947; RV32-LABEL: intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64:
948; RV32:       # %bb.0: # %entry
949; RV32-NEXT:    addi sp, sp, -16
950; RV32-NEXT:    sw a0, 8(sp)
951; RV32-NEXT:    sw a1, 12(sp)
952; RV32-NEXT:    addi a0, sp, 8
953; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
954; RV32-NEXT:    vlse64.v v12, (a0), zero
955; RV32-NEXT:    vsub.vv v8, v12, v10, v0.t
956; RV32-NEXT:    addi sp, sp, 16
957; RV32-NEXT:    ret
958;
959; RV64-LABEL: intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64:
960; RV64:       # %bb.0: # %entry
961; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
962; RV64-NEXT:    vrsub.vx v8, v10, a0, v0.t
963; RV64-NEXT:    ret
964entry:
965  %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64(
966    <vscale x 2 x i64> %0,
967    <vscale x 2 x i64> %1,
968    i64 %2,
969    <vscale x 2 x i1> %3,
970    iXLen %4, iXLen 1)
971
972  ret <vscale x 2 x i64> %a
973}
974
975declare <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
976  <vscale x 4 x i64>,
977  <vscale x 4 x i64>,
978  i64,
979  iXLen);
980
981define <vscale x 4 x i64> @intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
982; RV32-LABEL: intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64:
983; RV32:       # %bb.0: # %entry
984; RV32-NEXT:    addi sp, sp, -16
985; RV32-NEXT:    sw a0, 8(sp)
986; RV32-NEXT:    sw a1, 12(sp)
987; RV32-NEXT:    addi a0, sp, 8
988; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
989; RV32-NEXT:    vlse64.v v12, (a0), zero
990; RV32-NEXT:    vsub.vv v8, v12, v8
991; RV32-NEXT:    addi sp, sp, 16
992; RV32-NEXT:    ret
993;
994; RV64-LABEL: intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64:
995; RV64:       # %bb.0: # %entry
996; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
997; RV64-NEXT:    vrsub.vx v8, v8, a0
998; RV64-NEXT:    ret
999entry:
1000  %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
1001    <vscale x 4 x i64> undef,
1002    <vscale x 4 x i64> %0,
1003    i64 %1,
1004    iXLen %2)
1005
1006  ret <vscale x 4 x i64> %a
1007}
1008
1009declare <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64(
1010  <vscale x 4 x i64>,
1011  <vscale x 4 x i64>,
1012  i64,
1013  <vscale x 4 x i1>,
1014  iXLen, iXLen);
1015
1016define <vscale x 4 x i64> @intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1017; RV32-LABEL: intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64:
1018; RV32:       # %bb.0: # %entry
1019; RV32-NEXT:    addi sp, sp, -16
1020; RV32-NEXT:    sw a0, 8(sp)
1021; RV32-NEXT:    sw a1, 12(sp)
1022; RV32-NEXT:    addi a0, sp, 8
1023; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
1024; RV32-NEXT:    vlse64.v v16, (a0), zero
1025; RV32-NEXT:    vsub.vv v8, v16, v12, v0.t
1026; RV32-NEXT:    addi sp, sp, 16
1027; RV32-NEXT:    ret
1028;
1029; RV64-LABEL: intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64:
1030; RV64:       # %bb.0: # %entry
1031; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
1032; RV64-NEXT:    vrsub.vx v8, v12, a0, v0.t
1033; RV64-NEXT:    ret
1034entry:
1035  %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64(
1036    <vscale x 4 x i64> %0,
1037    <vscale x 4 x i64> %1,
1038    i64 %2,
1039    <vscale x 4 x i1> %3,
1040    iXLen %4, iXLen 1)
1041
1042  ret <vscale x 4 x i64> %a
1043}
1044
1045declare <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
1046  <vscale x 8 x i64>,
1047  <vscale x 8 x i64>,
1048  i64,
1049  iXLen);
1050
1051define <vscale x 8 x i64> @intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
1052; RV32-LABEL: intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64:
1053; RV32:       # %bb.0: # %entry
1054; RV32-NEXT:    addi sp, sp, -16
1055; RV32-NEXT:    sw a0, 8(sp)
1056; RV32-NEXT:    sw a1, 12(sp)
1057; RV32-NEXT:    addi a0, sp, 8
1058; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
1059; RV32-NEXT:    vlse64.v v16, (a0), zero
1060; RV32-NEXT:    vsub.vv v8, v16, v8
1061; RV32-NEXT:    addi sp, sp, 16
1062; RV32-NEXT:    ret
1063;
1064; RV64-LABEL: intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64:
1065; RV64:       # %bb.0: # %entry
1066; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1067; RV64-NEXT:    vrsub.vx v8, v8, a0
1068; RV64-NEXT:    ret
1069entry:
1070  %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
1071    <vscale x 8 x i64> undef,
1072    <vscale x 8 x i64> %0,
1073    i64 %1,
1074    iXLen %2)
1075
1076  ret <vscale x 8 x i64> %a
1077}
1078
1079declare <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64(
1080  <vscale x 8 x i64>,
1081  <vscale x 8 x i64>,
1082  i64,
1083  <vscale x 8 x i1>,
1084  iXLen, iXLen);
1085
1086define <vscale x 8 x i64> @intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1087; RV32-LABEL: intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64:
1088; RV32:       # %bb.0: # %entry
1089; RV32-NEXT:    addi sp, sp, -16
1090; RV32-NEXT:    sw a0, 8(sp)
1091; RV32-NEXT:    sw a1, 12(sp)
1092; RV32-NEXT:    addi a0, sp, 8
1093; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
1094; RV32-NEXT:    vlse64.v v24, (a0), zero
1095; RV32-NEXT:    vsub.vv v8, v24, v16, v0.t
1096; RV32-NEXT:    addi sp, sp, 16
1097; RV32-NEXT:    ret
1098;
1099; RV64-LABEL: intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64:
1100; RV64:       # %bb.0: # %entry
1101; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
1102; RV64-NEXT:    vrsub.vx v8, v16, a0, v0.t
1103; RV64-NEXT:    ret
1104entry:
1105  %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64(
1106    <vscale x 8 x i64> %0,
1107    <vscale x 8 x i64> %1,
1108    i64 %2,
1109    <vscale x 8 x i1> %3,
1110    iXLen %4, iXLen 1)
1111
1112  ret <vscale x 8 x i64> %a
1113}
1114
1115define <vscale x 1 x i8> @intrinsic_vrsub_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
1116; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i8_nxv1i8_i8:
1117; CHECK:       # %bb.0: # %entry
1118; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
1119; CHECK-NEXT:    vrsub.vi v8, v8, 9
1120; CHECK-NEXT:    ret
1121entry:
1122  %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.nxv1i8.i8(
1123    <vscale x 1 x i8> undef,
1124    <vscale x 1 x i8> %0,
1125    i8 9,
1126    iXLen %1)
1127
1128  ret <vscale x 1 x i8> %a
1129}
1130
1131define <vscale x 1 x i8> @intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1132; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8:
1133; CHECK:       # %bb.0: # %entry
1134; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
1135; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
1136; CHECK-NEXT:    ret
1137entry:
1138  %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
1139    <vscale x 1 x i8> %0,
1140    <vscale x 1 x i8> %1,
1141    i8 -9,
1142    <vscale x 1 x i1> %2,
1143    iXLen %3, iXLen 1)
1144
1145  ret <vscale x 1 x i8> %a
1146}
1147
1148define <vscale x 2 x i8> @intrinsic_vrsub_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
1149; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i8_nxv2i8_i8:
1150; CHECK:       # %bb.0: # %entry
1151; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
1152; CHECK-NEXT:    vrsub.vi v8, v8, 9
1153; CHECK-NEXT:    ret
1154entry:
1155  %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.nxv2i8.i8(
1156    <vscale x 2 x i8> undef,
1157    <vscale x 2 x i8> %0,
1158    i8 9,
1159    iXLen %1)
1160
1161  ret <vscale x 2 x i8> %a
1162}
1163
1164define <vscale x 2 x i8> @intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1165; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8:
1166; CHECK:       # %bb.0: # %entry
1167; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
1168; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
1169; CHECK-NEXT:    ret
1170entry:
1171  %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
1172    <vscale x 2 x i8> %0,
1173    <vscale x 2 x i8> %1,
1174    i8 -9,
1175    <vscale x 2 x i1> %2,
1176    iXLen %3, iXLen 1)
1177
1178  ret <vscale x 2 x i8> %a
1179}
1180
1181define <vscale x 4 x i8> @intrinsic_vrsub_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
1182; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i8_nxv4i8_i8:
1183; CHECK:       # %bb.0: # %entry
1184; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
1185; CHECK-NEXT:    vrsub.vi v8, v8, 9
1186; CHECK-NEXT:    ret
1187entry:
1188  %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.nxv4i8.i8(
1189    <vscale x 4 x i8> undef,
1190    <vscale x 4 x i8> %0,
1191    i8 9,
1192    iXLen %1)
1193
1194  ret <vscale x 4 x i8> %a
1195}
1196
1197define <vscale x 4 x i8> @intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1198; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8:
1199; CHECK:       # %bb.0: # %entry
1200; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
1201; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
1202; CHECK-NEXT:    ret
1203entry:
1204  %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
1205    <vscale x 4 x i8> %0,
1206    <vscale x 4 x i8> %1,
1207    i8 -9,
1208    <vscale x 4 x i1> %2,
1209    iXLen %3, iXLen 1)
1210
1211  ret <vscale x 4 x i8> %a
1212}
1213
1214define <vscale x 8 x i8> @intrinsic_vrsub_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
1215; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i8_nxv8i8_i8:
1216; CHECK:       # %bb.0: # %entry
1217; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
1218; CHECK-NEXT:    vrsub.vi v8, v8, 9
1219; CHECK-NEXT:    ret
1220entry:
1221  %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.nxv8i8.i8(
1222    <vscale x 8 x i8> undef,
1223    <vscale x 8 x i8> %0,
1224    i8 9,
1225    iXLen %1)
1226
1227  ret <vscale x 8 x i8> %a
1228}
1229
1230define <vscale x 8 x i8> @intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1231; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8:
1232; CHECK:       # %bb.0: # %entry
1233; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
1234; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
1235; CHECK-NEXT:    ret
1236entry:
1237  %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
1238    <vscale x 8 x i8> %0,
1239    <vscale x 8 x i8> %1,
1240    i8 -9,
1241    <vscale x 8 x i1> %2,
1242    iXLen %3, iXLen 1)
1243
1244  ret <vscale x 8 x i8> %a
1245}
1246
1247define <vscale x 16 x i8> @intrinsic_vrsub_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
1248; CHECK-LABEL: intrinsic_vrsub_vi_nxv16i8_nxv16i8_i8:
1249; CHECK:       # %bb.0: # %entry
1250; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
1251; CHECK-NEXT:    vrsub.vi v8, v8, 9
1252; CHECK-NEXT:    ret
1253entry:
1254  %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.nxv16i8.i8(
1255    <vscale x 16 x i8> undef,
1256    <vscale x 16 x i8> %0,
1257    i8 9,
1258    iXLen %1)
1259
1260  ret <vscale x 16 x i8> %a
1261}
1262
1263define <vscale x 16 x i8> @intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1264; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8:
1265; CHECK:       # %bb.0: # %entry
1266; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
1267; CHECK-NEXT:    vrsub.vi v8, v10, -9, v0.t
1268; CHECK-NEXT:    ret
1269entry:
1270  %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
1271    <vscale x 16 x i8> %0,
1272    <vscale x 16 x i8> %1,
1273    i8 -9,
1274    <vscale x 16 x i1> %2,
1275    iXLen %3, iXLen 1)
1276
1277  ret <vscale x 16 x i8> %a
1278}
1279
1280define <vscale x 32 x i8> @intrinsic_vrsub_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
1281; CHECK-LABEL: intrinsic_vrsub_vi_nxv32i8_nxv32i8_i8:
1282; CHECK:       # %bb.0: # %entry
1283; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
1284; CHECK-NEXT:    vrsub.vi v8, v8, 9
1285; CHECK-NEXT:    ret
1286entry:
1287  %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.nxv32i8.i8(
1288    <vscale x 32 x i8> undef,
1289    <vscale x 32 x i8> %0,
1290    i8 9,
1291    iXLen %1)
1292
1293  ret <vscale x 32 x i8> %a
1294}
1295
1296define <vscale x 32 x i8> @intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1297; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8:
1298; CHECK:       # %bb.0: # %entry
1299; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
1300; CHECK-NEXT:    vrsub.vi v8, v12, -9, v0.t
1301; CHECK-NEXT:    ret
1302entry:
1303  %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
1304    <vscale x 32 x i8> %0,
1305    <vscale x 32 x i8> %1,
1306    i8 -9,
1307    <vscale x 32 x i1> %2,
1308    iXLen %3, iXLen 1)
1309
1310  ret <vscale x 32 x i8> %a
1311}
1312
1313define <vscale x 64 x i8> @intrinsic_vrsub_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
1314; CHECK-LABEL: intrinsic_vrsub_vi_nxv64i8_nxv64i8_i8:
1315; CHECK:       # %bb.0: # %entry
1316; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
1317; CHECK-NEXT:    vrsub.vi v8, v8, 9
1318; CHECK-NEXT:    ret
1319entry:
1320  %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.nxv64i8.i8(
1321    <vscale x 64 x i8> undef,
1322    <vscale x 64 x i8> %0,
1323    i8 9,
1324    iXLen %1)
1325
1326  ret <vscale x 64 x i8> %a
1327}
1328
1329define <vscale x 64 x i8> @intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
1330; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8:
1331; CHECK:       # %bb.0: # %entry
1332; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
1333; CHECK-NEXT:    vrsub.vi v8, v16, -9, v0.t
1334; CHECK-NEXT:    ret
1335entry:
1336  %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
1337    <vscale x 64 x i8> %0,
1338    <vscale x 64 x i8> %1,
1339    i8 -9,
1340    <vscale x 64 x i1> %2,
1341    iXLen %3, iXLen 1)
1342
1343  ret <vscale x 64 x i8> %a
1344}
1345
1346define <vscale x 1 x i16> @intrinsic_vrsub_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
1347; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i16_nxv1i16_i16:
1348; CHECK:       # %bb.0: # %entry
1349; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
1350; CHECK-NEXT:    vrsub.vi v8, v8, 9
1351; CHECK-NEXT:    ret
1352entry:
1353  %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.nxv1i16.i16(
1354    <vscale x 1 x i16> undef,
1355    <vscale x 1 x i16> %0,
1356    i16 9,
1357    iXLen %1)
1358
1359  ret <vscale x 1 x i16> %a
1360}
1361
1362define <vscale x 1 x i16> @intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1363; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16:
1364; CHECK:       # %bb.0: # %entry
1365; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
1366; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
1367; CHECK-NEXT:    ret
1368entry:
1369  %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
1370    <vscale x 1 x i16> %0,
1371    <vscale x 1 x i16> %1,
1372    i16 -9,
1373    <vscale x 1 x i1> %2,
1374    iXLen %3, iXLen 1)
1375
1376  ret <vscale x 1 x i16> %a
1377}
1378
1379define <vscale x 2 x i16> @intrinsic_vrsub_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
1380; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i16_nxv2i16_i16:
1381; CHECK:       # %bb.0: # %entry
1382; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
1383; CHECK-NEXT:    vrsub.vi v8, v8, 9
1384; CHECK-NEXT:    ret
1385entry:
1386  %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.nxv2i16.i16(
1387    <vscale x 2 x i16> undef,
1388    <vscale x 2 x i16> %0,
1389    i16 9,
1390    iXLen %1)
1391
1392  ret <vscale x 2 x i16> %a
1393}
1394
1395define <vscale x 2 x i16> @intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1396; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16:
1397; CHECK:       # %bb.0: # %entry
1398; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
1399; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
1400; CHECK-NEXT:    ret
1401entry:
1402  %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
1403    <vscale x 2 x i16> %0,
1404    <vscale x 2 x i16> %1,
1405    i16 -9,
1406    <vscale x 2 x i1> %2,
1407    iXLen %3, iXLen 1)
1408
1409  ret <vscale x 2 x i16> %a
1410}
1411
1412define <vscale x 4 x i16> @intrinsic_vrsub_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
1413; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i16_nxv4i16_i16:
1414; CHECK:       # %bb.0: # %entry
1415; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
1416; CHECK-NEXT:    vrsub.vi v8, v8, 9
1417; CHECK-NEXT:    ret
1418entry:
1419  %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.nxv4i16.i16(
1420    <vscale x 4 x i16> undef,
1421    <vscale x 4 x i16> %0,
1422    i16 9,
1423    iXLen %1)
1424
1425  ret <vscale x 4 x i16> %a
1426}
1427
1428define <vscale x 4 x i16> @intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1429; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16:
1430; CHECK:       # %bb.0: # %entry
1431; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
1432; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
1433; CHECK-NEXT:    ret
1434entry:
1435  %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
1436    <vscale x 4 x i16> %0,
1437    <vscale x 4 x i16> %1,
1438    i16 -9,
1439    <vscale x 4 x i1> %2,
1440    iXLen %3, iXLen 1)
1441
1442  ret <vscale x 4 x i16> %a
1443}
1444
1445define <vscale x 8 x i16> @intrinsic_vrsub_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
1446; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i16_nxv8i16_i16:
1447; CHECK:       # %bb.0: # %entry
1448; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
1449; CHECK-NEXT:    vrsub.vi v8, v8, 9
1450; CHECK-NEXT:    ret
1451entry:
1452  %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.nxv8i16.i16(
1453    <vscale x 8 x i16> undef,
1454    <vscale x 8 x i16> %0,
1455    i16 9,
1456    iXLen %1)
1457
1458  ret <vscale x 8 x i16> %a
1459}
1460
1461define <vscale x 8 x i16> @intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1462; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16:
1463; CHECK:       # %bb.0: # %entry
1464; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
1465; CHECK-NEXT:    vrsub.vi v8, v10, -9, v0.t
1466; CHECK-NEXT:    ret
1467entry:
1468  %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
1469    <vscale x 8 x i16> %0,
1470    <vscale x 8 x i16> %1,
1471    i16 -9,
1472    <vscale x 8 x i1> %2,
1473    iXLen %3, iXLen 1)
1474
1475  ret <vscale x 8 x i16> %a
1476}
1477
1478define <vscale x 16 x i16> @intrinsic_vrsub_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
1479; CHECK-LABEL: intrinsic_vrsub_vi_nxv16i16_nxv16i16_i16:
1480; CHECK:       # %bb.0: # %entry
1481; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
1482; CHECK-NEXT:    vrsub.vi v8, v8, 9
1483; CHECK-NEXT:    ret
1484entry:
1485  %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.nxv16i16.i16(
1486    <vscale x 16 x i16> undef,
1487    <vscale x 16 x i16> %0,
1488    i16 9,
1489    iXLen %1)
1490
1491  ret <vscale x 16 x i16> %a
1492}
1493
1494define <vscale x 16 x i16> @intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1495; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16:
1496; CHECK:       # %bb.0: # %entry
1497; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
1498; CHECK-NEXT:    vrsub.vi v8, v12, -9, v0.t
1499; CHECK-NEXT:    ret
1500entry:
1501  %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
1502    <vscale x 16 x i16> %0,
1503    <vscale x 16 x i16> %1,
1504    i16 -9,
1505    <vscale x 16 x i1> %2,
1506    iXLen %3, iXLen 1)
1507
1508  ret <vscale x 16 x i16> %a
1509}
1510
1511define <vscale x 32 x i16> @intrinsic_vrsub_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
1512; CHECK-LABEL: intrinsic_vrsub_vi_nxv32i16_nxv32i16_i16:
1513; CHECK:       # %bb.0: # %entry
1514; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
1515; CHECK-NEXT:    vrsub.vi v8, v8, 9
1516; CHECK-NEXT:    ret
1517entry:
1518  %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.nxv32i16.i16(
1519    <vscale x 32 x i16> undef,
1520    <vscale x 32 x i16> %0,
1521    i16 9,
1522    iXLen %1)
1523
1524  ret <vscale x 32 x i16> %a
1525}
1526
1527define <vscale x 32 x i16> @intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
1528; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16:
1529; CHECK:       # %bb.0: # %entry
1530; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
1531; CHECK-NEXT:    vrsub.vi v8, v16, -9, v0.t
1532; CHECK-NEXT:    ret
1533entry:
1534  %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
1535    <vscale x 32 x i16> %0,
1536    <vscale x 32 x i16> %1,
1537    i16 -9,
1538    <vscale x 32 x i1> %2,
1539    iXLen %3, iXLen 1)
1540
1541  ret <vscale x 32 x i16> %a
1542}
1543
1544define <vscale x 1 x i32> @intrinsic_vrsub_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
1545; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i32_nxv1i32_i32:
1546; CHECK:       # %bb.0: # %entry
1547; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
1548; CHECK-NEXT:    vrsub.vi v8, v8, 9
1549; CHECK-NEXT:    ret
1550entry:
1551  %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32(
1552    <vscale x 1 x i32> undef,
1553    <vscale x 1 x i32> %0,
1554    i32 9,
1555    iXLen %1)
1556
1557  ret <vscale x 1 x i32> %a
1558}
1559
1560define <vscale x 1 x i32> @intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1561; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32:
1562; CHECK:       # %bb.0: # %entry
1563; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
1564; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
1565; CHECK-NEXT:    ret
1566entry:
1567  %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
1568    <vscale x 1 x i32> %0,
1569    <vscale x 1 x i32> %1,
1570    i32 -9,
1571    <vscale x 1 x i1> %2,
1572    iXLen %3, iXLen 1)
1573
1574  ret <vscale x 1 x i32> %a
1575}
1576
1577define <vscale x 2 x i32> @intrinsic_vrsub_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
1578; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i32_nxv2i32_i32:
1579; CHECK:       # %bb.0: # %entry
1580; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
1581; CHECK-NEXT:    vrsub.vi v8, v8, 9
1582; CHECK-NEXT:    ret
1583entry:
1584  %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.nxv2i32.i32(
1585    <vscale x 2 x i32> undef,
1586    <vscale x 2 x i32> %0,
1587    i32 9,
1588    iXLen %1)
1589
1590  ret <vscale x 2 x i32> %a
1591}
1592
1593define <vscale x 2 x i32> @intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1594; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32:
1595; CHECK:       # %bb.0: # %entry
1596; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
1597; CHECK-NEXT:    vrsub.vi v8, v9, -9, v0.t
1598; CHECK-NEXT:    ret
1599entry:
1600  %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
1601    <vscale x 2 x i32> %0,
1602    <vscale x 2 x i32> %1,
1603    i32 -9,
1604    <vscale x 2 x i1> %2,
1605    iXLen %3, iXLen 1)
1606
1607  ret <vscale x 2 x i32> %a
1608}
1609
1610define <vscale x 4 x i32> @intrinsic_vrsub_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
1611; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i32_nxv4i32_i32:
1612; CHECK:       # %bb.0: # %entry
1613; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
1614; CHECK-NEXT:    vrsub.vi v8, v8, 9
1615; CHECK-NEXT:    ret
1616entry:
1617  %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.i32(
1618    <vscale x 4 x i32> undef,
1619    <vscale x 4 x i32> %0,
1620    i32 9,
1621    iXLen %1)
1622
1623  ret <vscale x 4 x i32> %a
1624}
1625
1626define <vscale x 4 x i32> @intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1627; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32:
1628; CHECK:       # %bb.0: # %entry
1629; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
1630; CHECK-NEXT:    vrsub.vi v8, v10, -9, v0.t
1631; CHECK-NEXT:    ret
1632entry:
1633  %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
1634    <vscale x 4 x i32> %0,
1635    <vscale x 4 x i32> %1,
1636    i32 -9,
1637    <vscale x 4 x i1> %2,
1638    iXLen %3, iXLen 1)
1639
1640  ret <vscale x 4 x i32> %a
1641}
1642
1643define <vscale x 8 x i32> @intrinsic_vrsub_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
1644; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i32_nxv8i32_i32:
1645; CHECK:       # %bb.0: # %entry
1646; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
1647; CHECK-NEXT:    vrsub.vi v8, v8, 9
1648; CHECK-NEXT:    ret
1649entry:
1650  %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.nxv8i32.i32(
1651    <vscale x 8 x i32> undef,
1652    <vscale x 8 x i32> %0,
1653    i32 9,
1654    iXLen %1)
1655
1656  ret <vscale x 8 x i32> %a
1657}
1658
1659define <vscale x 8 x i32> @intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1660; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32:
1661; CHECK:       # %bb.0: # %entry
1662; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
1663; CHECK-NEXT:    vrsub.vi v8, v12, -9, v0.t
1664; CHECK-NEXT:    ret
1665entry:
1666  %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
1667    <vscale x 8 x i32> %0,
1668    <vscale x 8 x i32> %1,
1669    i32 -9,
1670    <vscale x 8 x i1> %2,
1671    iXLen %3, iXLen 1)
1672
1673  ret <vscale x 8 x i32> %a
1674}
1675
1676define <vscale x 16 x i32> @intrinsic_vrsub_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
1677; CHECK-LABEL: intrinsic_vrsub_vi_nxv16i32_nxv16i32_i32:
1678; CHECK:       # %bb.0: # %entry
1679; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
1680; CHECK-NEXT:    vrsub.vi v8, v8, 9
1681; CHECK-NEXT:    ret
1682entry:
1683  %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.nxv16i32.i32(
1684    <vscale x 16 x i32> undef,
1685    <vscale x 16 x i32> %0,
1686    i32 9,
1687    iXLen %1)
1688
1689  ret <vscale x 16 x i32> %a
1690}
1691
1692define <vscale x 16 x i32> @intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
1693; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32:
1694; CHECK:       # %bb.0: # %entry
1695; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
1696; CHECK-NEXT:    vrsub.vi v8, v16, -9, v0.t
1697; CHECK-NEXT:    ret
1698entry:
1699  %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
1700    <vscale x 16 x i32> %0,
1701    <vscale x 16 x i32> %1,
1702    i32 -9,
1703    <vscale x 16 x i1> %2,
1704    iXLen %3, iXLen 1)
1705
1706  ret <vscale x 16 x i32> %a
1707}
1708
1709define <vscale x 1 x i64> @intrinsic_vrsub_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
1710; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i64_nxv1i64_i64:
1711; CHECK:       # %bb.0: # %entry
1712; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
1713; CHECK-NEXT:    vrsub.vi v8, v8, 9
1714; CHECK-NEXT:    ret
1715entry:
1716  %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.nxv1i64.i64(
1717    <vscale x 1 x i64> undef,
1718    <vscale x 1 x i64> %0,
1719    i64 9,
1720    iXLen %1)
1721
1722  ret <vscale x 1 x i64> %a
1723}
1724
1725define <vscale x 1 x i64> @intrinsic_vrsub_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1726; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i64_nxv1i64_i64:
1727; CHECK:       # %bb.0: # %entry
1728; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
1729; CHECK-NEXT:    vrsub.vi v8, v9, 9, v0.t
1730; CHECK-NEXT:    ret
1731entry:
1732  %a = call <vscale x 1 x i64> @llvm.riscv.vrsub.mask.nxv1i64.i64(
1733    <vscale x 1 x i64> %0,
1734    <vscale x 1 x i64> %1,
1735    i64 9,
1736    <vscale x 1 x i1> %2,
1737    iXLen %3, iXLen 1)
1738
1739  ret <vscale x 1 x i64> %a
1740}
1741
1742define <vscale x 2 x i64> @intrinsic_vrsub_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
1743; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i64_nxv2i64_i64:
1744; CHECK:       # %bb.0: # %entry
1745; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
1746; CHECK-NEXT:    vrsub.vi v8, v8, 9
1747; CHECK-NEXT:    ret
1748entry:
1749  %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.nxv2i64.i64(
1750    <vscale x 2 x i64> undef,
1751    <vscale x 2 x i64> %0,
1752    i64 9,
1753    iXLen %1)
1754
1755  ret <vscale x 2 x i64> %a
1756}
1757
1758define <vscale x 2 x i64> @intrinsic_vrsub_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1759; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i64_nxv2i64_i64:
1760; CHECK:       # %bb.0: # %entry
1761; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
1762; CHECK-NEXT:    vrsub.vi v8, v10, 9, v0.t
1763; CHECK-NEXT:    ret
1764entry:
1765  %a = call <vscale x 2 x i64> @llvm.riscv.vrsub.mask.nxv2i64.i64(
1766    <vscale x 2 x i64> %0,
1767    <vscale x 2 x i64> %1,
1768    i64 9,
1769    <vscale x 2 x i1> %2,
1770    iXLen %3, iXLen 1)
1771
1772  ret <vscale x 2 x i64> %a
1773}
1774
1775define <vscale x 4 x i64> @intrinsic_vrsub_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
1776; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i64_nxv4i64_i64:
1777; CHECK:       # %bb.0: # %entry
1778; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1779; CHECK-NEXT:    vrsub.vi v8, v8, 9
1780; CHECK-NEXT:    ret
1781entry:
1782  %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.nxv4i64.i64(
1783    <vscale x 4 x i64> undef,
1784    <vscale x 4 x i64> %0,
1785    i64 9,
1786    iXLen %1)
1787
1788  ret <vscale x 4 x i64> %a
1789}
1790
1791define <vscale x 4 x i64> @intrinsic_vrsub_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1792; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i64_nxv4i64_i64:
1793; CHECK:       # %bb.0: # %entry
1794; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
1795; CHECK-NEXT:    vrsub.vi v8, v12, 9, v0.t
1796; CHECK-NEXT:    ret
1797entry:
1798  %a = call <vscale x 4 x i64> @llvm.riscv.vrsub.mask.nxv4i64.i64(
1799    <vscale x 4 x i64> %0,
1800    <vscale x 4 x i64> %1,
1801    i64 9,
1802    <vscale x 4 x i1> %2,
1803    iXLen %3, iXLen 1)
1804
1805  ret <vscale x 4 x i64> %a
1806}
1807
1808define <vscale x 8 x i64> @intrinsic_vrsub_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
1809; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i64_nxv8i64_i64:
1810; CHECK:       # %bb.0: # %entry
1811; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1812; CHECK-NEXT:    vrsub.vi v8, v8, 9
1813; CHECK-NEXT:    ret
1814entry:
1815  %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.nxv8i64.i64(
1816    <vscale x 8 x i64> undef,
1817    <vscale x 8 x i64> %0,
1818    i64 9,
1819    iXLen %1)
1820
1821  ret <vscale x 8 x i64> %a
1822}
1823
1824define <vscale x 8 x i64> @intrinsic_vrsub_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
1825; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i64_nxv8i64_i64:
1826; CHECK:       # %bb.0: # %entry
1827; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
1828; CHECK-NEXT:    vrsub.vi v8, v16, 9, v0.t
1829; CHECK-NEXT:    ret
1830entry:
1831  %a = call <vscale x 8 x i64> @llvm.riscv.vrsub.mask.nxv8i64.i64(
1832    <vscale x 8 x i64> %0,
1833    <vscale x 8 x i64> %1,
1834    i64 9,
1835    <vscale x 8 x i1> %2,
1836    iXLen %3, iXLen 1)
1837
1838  ret <vscale x 8 x i64> %a
1839}
1840