xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll (revision 36e4176f1d83d04cdebb4e1870561099b2478d80)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s --check-prefixes=CHECK,RV64
6
7declare <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
8
9define <vscale x 1 x i8> @vrsub_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
10; CHECK-LABEL: vrsub_vx_nxv1i8:
11; CHECK:       # %bb.0:
12; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
13; CHECK-NEXT:    vrsub.vx v8, v8, a0, v0.t
14; CHECK-NEXT:    ret
15  %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
16  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
17  %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 %evl)
18  ret <vscale x 1 x i8> %v
19}
20
21define <vscale x 1 x i8> @vrsub_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i32 zeroext %evl) {
22; CHECK-LABEL: vrsub_vx_nxv1i8_unmasked:
23; CHECK:       # %bb.0:
24; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
25; CHECK-NEXT:    vrsub.vx v8, v8, a0
26; CHECK-NEXT:    ret
27  %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
28  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
29  %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %vb, <vscale x 1 x i8> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
30  ret <vscale x 1 x i8> %v
31}
32
33define <vscale x 1 x i8> @vrsub_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
34; CHECK-LABEL: vrsub_vi_nxv1i8:
35; CHECK:       # %bb.0:
36; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
37; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
38; CHECK-NEXT:    ret
39  %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> splat (i8 2), <vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 %evl)
40  ret <vscale x 1 x i8> %v
41}
42
43define <vscale x 1 x i8> @vrsub_vi_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zeroext %evl) {
44; CHECK-LABEL: vrsub_vi_nxv1i8_unmasked:
45; CHECK:       # %bb.0:
46; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
47; CHECK-NEXT:    vrsub.vi v8, v8, 2
48; CHECK-NEXT:    ret
49  %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> splat (i8 2), <vscale x 1 x i8> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
50  ret <vscale x 1 x i8> %v
51}
52
53declare <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
54
55define <vscale x 2 x i8> @vrsub_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
56; CHECK-LABEL: vrsub_vx_nxv2i8:
57; CHECK:       # %bb.0:
58; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
59; CHECK-NEXT:    vrsub.vx v8, v8, a0, v0.t
60; CHECK-NEXT:    ret
61  %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
62  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
63  %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %vb, <vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 %evl)
64  ret <vscale x 2 x i8> %v
65}
66
67define <vscale x 2 x i8> @vrsub_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i32 zeroext %evl) {
68; CHECK-LABEL: vrsub_vx_nxv2i8_unmasked:
69; CHECK:       # %bb.0:
70; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
71; CHECK-NEXT:    vrsub.vx v8, v8, a0
72; CHECK-NEXT:    ret
73  %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
74  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
75  %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %vb, <vscale x 2 x i8> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
76  ret <vscale x 2 x i8> %v
77}
78
79define <vscale x 2 x i8> @vrsub_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
80; CHECK-LABEL: vrsub_vi_nxv2i8:
81; CHECK:       # %bb.0:
82; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
83; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
84; CHECK-NEXT:    ret
85  %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> splat (i8 2), <vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 %evl)
86  ret <vscale x 2 x i8> %v
87}
88
89define <vscale x 2 x i8> @vrsub_vi_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zeroext %evl) {
90; CHECK-LABEL: vrsub_vi_nxv2i8_unmasked:
91; CHECK:       # %bb.0:
92; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
93; CHECK-NEXT:    vrsub.vi v8, v8, 2
94; CHECK-NEXT:    ret
95  %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> splat (i8 2), <vscale x 2 x i8> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
96  ret <vscale x 2 x i8> %v
97}
98
99declare <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
100
101define <vscale x 4 x i8> @vrsub_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
102; CHECK-LABEL: vrsub_vx_nxv4i8:
103; CHECK:       # %bb.0:
104; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
105; CHECK-NEXT:    vrsub.vx v8, v8, a0, v0.t
106; CHECK-NEXT:    ret
107  %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
108  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
109  %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %vb, <vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 %evl)
110  ret <vscale x 4 x i8> %v
111}
112
113define <vscale x 4 x i8> @vrsub_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i32 zeroext %evl) {
114; CHECK-LABEL: vrsub_vx_nxv4i8_unmasked:
115; CHECK:       # %bb.0:
116; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
117; CHECK-NEXT:    vrsub.vx v8, v8, a0
118; CHECK-NEXT:    ret
119  %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
120  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
121  %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %vb, <vscale x 4 x i8> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
122  ret <vscale x 4 x i8> %v
123}
124
125define <vscale x 4 x i8> @vrsub_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
126; CHECK-LABEL: vrsub_vi_nxv4i8:
127; CHECK:       # %bb.0:
128; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
129; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
130; CHECK-NEXT:    ret
131  %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> splat (i8 2), <vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 %evl)
132  ret <vscale x 4 x i8> %v
133}
134
135define <vscale x 4 x i8> @vrsub_vi_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zeroext %evl) {
136; CHECK-LABEL: vrsub_vi_nxv4i8_unmasked:
137; CHECK:       # %bb.0:
138; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
139; CHECK-NEXT:    vrsub.vi v8, v8, 2
140; CHECK-NEXT:    ret
141  %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> splat (i8 2), <vscale x 4 x i8> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
142  ret <vscale x 4 x i8> %v
143}
144
145declare <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
146
147define <vscale x 8 x i8> @vrsub_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
148; CHECK-LABEL: vrsub_vx_nxv8i8:
149; CHECK:       # %bb.0:
150; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
151; CHECK-NEXT:    vrsub.vx v8, v8, a0, v0.t
152; CHECK-NEXT:    ret
153  %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
154  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
155  %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %vb, <vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 %evl)
156  ret <vscale x 8 x i8> %v
157}
158
159define <vscale x 8 x i8> @vrsub_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i32 zeroext %evl) {
160; CHECK-LABEL: vrsub_vx_nxv8i8_unmasked:
161; CHECK:       # %bb.0:
162; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
163; CHECK-NEXT:    vrsub.vx v8, v8, a0
164; CHECK-NEXT:    ret
165  %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
166  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
167  %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %vb, <vscale x 8 x i8> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
168  ret <vscale x 8 x i8> %v
169}
170
171define <vscale x 8 x i8> @vrsub_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
172; CHECK-LABEL: vrsub_vi_nxv8i8:
173; CHECK:       # %bb.0:
174; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
175; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
176; CHECK-NEXT:    ret
177  %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> splat (i8 2), <vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 %evl)
178  ret <vscale x 8 x i8> %v
179}
180
181define <vscale x 8 x i8> @vrsub_vi_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zeroext %evl) {
182; CHECK-LABEL: vrsub_vi_nxv8i8_unmasked:
183; CHECK:       # %bb.0:
184; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
185; CHECK-NEXT:    vrsub.vi v8, v8, 2
186; CHECK-NEXT:    ret
187  %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> splat (i8 2), <vscale x 8 x i8> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
188  ret <vscale x 8 x i8> %v
189}
190
191declare <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
192
193define <vscale x 16 x i8> @vrsub_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
194; CHECK-LABEL: vrsub_vx_nxv16i8:
195; CHECK:       # %bb.0:
196; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
197; CHECK-NEXT:    vrsub.vx v8, v8, a0, v0.t
198; CHECK-NEXT:    ret
199  %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
200  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
201  %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %vb, <vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 %evl)
202  ret <vscale x 16 x i8> %v
203}
204
205define <vscale x 16 x i8> @vrsub_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b, i32 zeroext %evl) {
206; CHECK-LABEL: vrsub_vx_nxv16i8_unmasked:
207; CHECK:       # %bb.0:
208; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
209; CHECK-NEXT:    vrsub.vx v8, v8, a0
210; CHECK-NEXT:    ret
211  %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
212  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
213  %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %vb, <vscale x 16 x i8> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
214  ret <vscale x 16 x i8> %v
215}
216
217define <vscale x 16 x i8> @vrsub_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
218; CHECK-LABEL: vrsub_vi_nxv16i8:
219; CHECK:       # %bb.0:
220; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
221; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
222; CHECK-NEXT:    ret
223  %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> splat (i8 2), <vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 %evl)
224  ret <vscale x 16 x i8> %v
225}
226
227define <vscale x 16 x i8> @vrsub_vi_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32 zeroext %evl) {
228; CHECK-LABEL: vrsub_vi_nxv16i8_unmasked:
229; CHECK:       # %bb.0:
230; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
231; CHECK-NEXT:    vrsub.vi v8, v8, 2
232; CHECK-NEXT:    ret
233  %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> splat (i8 2), <vscale x 16 x i8> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
234  ret <vscale x 16 x i8> %v
235}
236
237declare <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
238
239define <vscale x 32 x i8> @vrsub_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
240; CHECK-LABEL: vrsub_vx_nxv32i8:
241; CHECK:       # %bb.0:
242; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
243; CHECK-NEXT:    vrsub.vx v8, v8, a0, v0.t
244; CHECK-NEXT:    ret
245  %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
246  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
247  %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %vb, <vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 %evl)
248  ret <vscale x 32 x i8> %v
249}
250
251define <vscale x 32 x i8> @vrsub_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b, i32 zeroext %evl) {
252; CHECK-LABEL: vrsub_vx_nxv32i8_unmasked:
253; CHECK:       # %bb.0:
254; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
255; CHECK-NEXT:    vrsub.vx v8, v8, a0
256; CHECK-NEXT:    ret
257  %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
258  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
259  %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %vb, <vscale x 32 x i8> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
260  ret <vscale x 32 x i8> %v
261}
262
263define <vscale x 32 x i8> @vrsub_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
264; CHECK-LABEL: vrsub_vi_nxv32i8:
265; CHECK:       # %bb.0:
266; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
267; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
268; CHECK-NEXT:    ret
269  %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> splat (i8 2), <vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 %evl)
270  ret <vscale x 32 x i8> %v
271}
272
273define <vscale x 32 x i8> @vrsub_vi_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32 zeroext %evl) {
274; CHECK-LABEL: vrsub_vi_nxv32i8_unmasked:
275; CHECK:       # %bb.0:
276; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
277; CHECK-NEXT:    vrsub.vi v8, v8, 2
278; CHECK-NEXT:    ret
279  %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> splat (i8 2), <vscale x 32 x i8> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
280  ret <vscale x 32 x i8> %v
281}
282
283declare <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
284
285define <vscale x 64 x i8> @vrsub_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
286; CHECK-LABEL: vrsub_vx_nxv64i8:
287; CHECK:       # %bb.0:
288; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
289; CHECK-NEXT:    vrsub.vx v8, v8, a0, v0.t
290; CHECK-NEXT:    ret
291  %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
292  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
293  %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %vb, <vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 %evl)
294  ret <vscale x 64 x i8> %v
295}
296
297define <vscale x 64 x i8> @vrsub_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b, i32 zeroext %evl) {
298; CHECK-LABEL: vrsub_vx_nxv64i8_unmasked:
299; CHECK:       # %bb.0:
300; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
301; CHECK-NEXT:    vrsub.vx v8, v8, a0
302; CHECK-NEXT:    ret
303  %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
304  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
305  %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %vb, <vscale x 64 x i8> %va, <vscale x 64 x i1> splat (i1 true), i32 %evl)
306  ret <vscale x 64 x i8> %v
307}
308
309define <vscale x 64 x i8> @vrsub_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) {
310; CHECK-LABEL: vrsub_vi_nxv64i8:
311; CHECK:       # %bb.0:
312; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
313; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
314; CHECK-NEXT:    ret
315  %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> splat (i8 2), <vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 %evl)
316  ret <vscale x 64 x i8> %v
317}
318
319define <vscale x 64 x i8> @vrsub_vi_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32 zeroext %evl) {
320; CHECK-LABEL: vrsub_vi_nxv64i8_unmasked:
321; CHECK:       # %bb.0:
322; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
323; CHECK-NEXT:    vrsub.vi v8, v8, 2
324; CHECK-NEXT:    ret
325  %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> splat (i8 2), <vscale x 64 x i8> %va, <vscale x 64 x i1> splat (i1 true), i32 %evl)
326  ret <vscale x 64 x i8> %v
327}
328
329declare <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
330
331define <vscale x 1 x i16> @vrsub_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
332; CHECK-LABEL: vrsub_vx_nxv1i16:
333; CHECK:       # %bb.0:
334; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
335; CHECK-NEXT:    vrsub.vx v8, v8, a0, v0.t
336; CHECK-NEXT:    ret
337  %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
338  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
339  %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %vb, <vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 %evl)
340  ret <vscale x 1 x i16> %v
341}
342
343define <vscale x 1 x i16> @vrsub_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %b, i32 zeroext %evl) {
344; CHECK-LABEL: vrsub_vx_nxv1i16_unmasked:
345; CHECK:       # %bb.0:
346; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
347; CHECK-NEXT:    vrsub.vx v8, v8, a0
348; CHECK-NEXT:    ret
349  %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
350  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
351  %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %vb, <vscale x 1 x i16> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
352  ret <vscale x 1 x i16> %v
353}
354
355define <vscale x 1 x i16> @vrsub_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
356; CHECK-LABEL: vrsub_vi_nxv1i16:
357; CHECK:       # %bb.0:
358; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
359; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
360; CHECK-NEXT:    ret
361  %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> splat (i16 2), <vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 %evl)
362  ret <vscale x 1 x i16> %v
363}
364
365define <vscale x 1 x i16> @vrsub_vi_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32 zeroext %evl) {
366; CHECK-LABEL: vrsub_vi_nxv1i16_unmasked:
367; CHECK:       # %bb.0:
368; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
369; CHECK-NEXT:    vrsub.vi v8, v8, 2
370; CHECK-NEXT:    ret
371  %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> splat (i16 2), <vscale x 1 x i16> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
372  ret <vscale x 1 x i16> %v
373}
374
375declare <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
376
377define <vscale x 2 x i16> @vrsub_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
378; CHECK-LABEL: vrsub_vx_nxv2i16:
379; CHECK:       # %bb.0:
380; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
381; CHECK-NEXT:    vrsub.vx v8, v8, a0, v0.t
382; CHECK-NEXT:    ret
383  %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
384  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
385  %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %vb, <vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 %evl)
386  ret <vscale x 2 x i16> %v
387}
388
389define <vscale x 2 x i16> @vrsub_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %b, i32 zeroext %evl) {
390; CHECK-LABEL: vrsub_vx_nxv2i16_unmasked:
391; CHECK:       # %bb.0:
392; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
393; CHECK-NEXT:    vrsub.vx v8, v8, a0
394; CHECK-NEXT:    ret
395  %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
396  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
397  %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %vb, <vscale x 2 x i16> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
398  ret <vscale x 2 x i16> %v
399}
400
401define <vscale x 2 x i16> @vrsub_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
402; CHECK-LABEL: vrsub_vi_nxv2i16:
403; CHECK:       # %bb.0:
404; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
405; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
406; CHECK-NEXT:    ret
407  %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> splat (i16 2), <vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 %evl)
408  ret <vscale x 2 x i16> %v
409}
410
411define <vscale x 2 x i16> @vrsub_vi_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 zeroext %evl) {
412; CHECK-LABEL: vrsub_vi_nxv2i16_unmasked:
413; CHECK:       # %bb.0:
414; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
415; CHECK-NEXT:    vrsub.vi v8, v8, 2
416; CHECK-NEXT:    ret
417  %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> splat (i16 2), <vscale x 2 x i16> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
418  ret <vscale x 2 x i16> %v
419}
420
421declare <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
422
423define <vscale x 4 x i16> @vrsub_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
424; CHECK-LABEL: vrsub_vx_nxv4i16:
425; CHECK:       # %bb.0:
426; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
427; CHECK-NEXT:    vrsub.vx v8, v8, a0, v0.t
428; CHECK-NEXT:    ret
429  %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
430  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
431  %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %vb, <vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 %evl)
432  ret <vscale x 4 x i16> %v
433}
434
435define <vscale x 4 x i16> @vrsub_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %b, i32 zeroext %evl) {
436; CHECK-LABEL: vrsub_vx_nxv4i16_unmasked:
437; CHECK:       # %bb.0:
438; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
439; CHECK-NEXT:    vrsub.vx v8, v8, a0
440; CHECK-NEXT:    ret
441  %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
442  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
443  %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %vb, <vscale x 4 x i16> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
444  ret <vscale x 4 x i16> %v
445}
446
447define <vscale x 4 x i16> @vrsub_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
448; CHECK-LABEL: vrsub_vi_nxv4i16:
449; CHECK:       # %bb.0:
450; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
451; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
452; CHECK-NEXT:    ret
453  %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> splat (i16 2), <vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 %evl)
454  ret <vscale x 4 x i16> %v
455}
456
457define <vscale x 4 x i16> @vrsub_vi_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32 zeroext %evl) {
458; CHECK-LABEL: vrsub_vi_nxv4i16_unmasked:
459; CHECK:       # %bb.0:
460; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
461; CHECK-NEXT:    vrsub.vi v8, v8, 2
462; CHECK-NEXT:    ret
463  %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> splat (i16 2), <vscale x 4 x i16> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
464  ret <vscale x 4 x i16> %v
465}
466
467declare <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
468
469define <vscale x 8 x i16> @vrsub_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
470; CHECK-LABEL: vrsub_vx_nxv8i16:
471; CHECK:       # %bb.0:
472; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
473; CHECK-NEXT:    vrsub.vx v8, v8, a0, v0.t
474; CHECK-NEXT:    ret
475  %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
476  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
477  %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %vb, <vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 %evl)
478  ret <vscale x 8 x i16> %v
479}
480
481define <vscale x 8 x i16> @vrsub_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %b, i32 zeroext %evl) {
482; CHECK-LABEL: vrsub_vx_nxv8i16_unmasked:
483; CHECK:       # %bb.0:
484; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
485; CHECK-NEXT:    vrsub.vx v8, v8, a0
486; CHECK-NEXT:    ret
487  %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
488  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
489  %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %vb, <vscale x 8 x i16> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
490  ret <vscale x 8 x i16> %v
491}
492
493define <vscale x 8 x i16> @vrsub_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
494; CHECK-LABEL: vrsub_vi_nxv8i16:
495; CHECK:       # %bb.0:
496; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
497; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
498; CHECK-NEXT:    ret
499  %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> splat (i16 2), <vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 %evl)
500  ret <vscale x 8 x i16> %v
501}
502
503define <vscale x 8 x i16> @vrsub_vi_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32 zeroext %evl) {
504; CHECK-LABEL: vrsub_vi_nxv8i16_unmasked:
505; CHECK:       # %bb.0:
506; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
507; CHECK-NEXT:    vrsub.vi v8, v8, 2
508; CHECK-NEXT:    ret
509  %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> splat (i16 2), <vscale x 8 x i16> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
510  ret <vscale x 8 x i16> %v
511}
512
513declare <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
514
515define <vscale x 16 x i16> @vrsub_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
516; CHECK-LABEL: vrsub_vx_nxv16i16:
517; CHECK:       # %bb.0:
518; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
519; CHECK-NEXT:    vrsub.vx v8, v8, a0, v0.t
520; CHECK-NEXT:    ret
521  %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
522  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
523  %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %vb, <vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 %evl)
524  ret <vscale x 16 x i16> %v
525}
526
527define <vscale x 16 x i16> @vrsub_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i16 %b, i32 zeroext %evl) {
528; CHECK-LABEL: vrsub_vx_nxv16i16_unmasked:
529; CHECK:       # %bb.0:
530; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
531; CHECK-NEXT:    vrsub.vx v8, v8, a0
532; CHECK-NEXT:    ret
533  %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
534  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
535  %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %vb, <vscale x 16 x i16> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
536  ret <vscale x 16 x i16> %v
537}
538
539define <vscale x 16 x i16> @vrsub_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
540; CHECK-LABEL: vrsub_vi_nxv16i16:
541; CHECK:       # %bb.0:
542; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
543; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
544; CHECK-NEXT:    ret
545  %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> splat (i16 2), <vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 %evl)
546  ret <vscale x 16 x i16> %v
547}
548
549define <vscale x 16 x i16> @vrsub_vi_nxv16i16_unmasked(<vscale x 16 x i16> %va, i32 zeroext %evl) {
550; CHECK-LABEL: vrsub_vi_nxv16i16_unmasked:
551; CHECK:       # %bb.0:
552; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
553; CHECK-NEXT:    vrsub.vi v8, v8, 2
554; CHECK-NEXT:    ret
555  %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> splat (i16 2), <vscale x 16 x i16> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
556  ret <vscale x 16 x i16> %v
557}
558
559declare <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
560
561define <vscale x 32 x i16> @vrsub_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
562; CHECK-LABEL: vrsub_vx_nxv32i16:
563; CHECK:       # %bb.0:
564; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
565; CHECK-NEXT:    vrsub.vx v8, v8, a0, v0.t
566; CHECK-NEXT:    ret
567  %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
568  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
569  %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %vb, <vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 %evl)
570  ret <vscale x 32 x i16> %v
571}
572
573define <vscale x 32 x i16> @vrsub_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i16 %b, i32 zeroext %evl) {
574; CHECK-LABEL: vrsub_vx_nxv32i16_unmasked:
575; CHECK:       # %bb.0:
576; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
577; CHECK-NEXT:    vrsub.vx v8, v8, a0
578; CHECK-NEXT:    ret
579  %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
580  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
581  %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %vb, <vscale x 32 x i16> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
582  ret <vscale x 32 x i16> %v
583}
584
585define <vscale x 32 x i16> @vrsub_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
586; CHECK-LABEL: vrsub_vi_nxv32i16:
587; CHECK:       # %bb.0:
588; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
589; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
590; CHECK-NEXT:    ret
591  %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> splat (i16 2), <vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 %evl)
592  ret <vscale x 32 x i16> %v
593}
594
595define <vscale x 32 x i16> @vrsub_vi_nxv32i16_unmasked(<vscale x 32 x i16> %va, i32 zeroext %evl) {
596; CHECK-LABEL: vrsub_vi_nxv32i16_unmasked:
597; CHECK:       # %bb.0:
598; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
599; CHECK-NEXT:    vrsub.vi v8, v8, 2
600; CHECK-NEXT:    ret
601  %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> splat (i16 2), <vscale x 32 x i16> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
602  ret <vscale x 32 x i16> %v
603}
604
605declare <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
606
607define <vscale x 1 x i32> @vrsub_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
608; CHECK-LABEL: vrsub_vx_nxv1i32:
609; CHECK:       # %bb.0:
610; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
611; CHECK-NEXT:    vrsub.vx v8, v8, a0, v0.t
612; CHECK-NEXT:    ret
613  %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
614  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
615  %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %vb, <vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 %evl)
616  ret <vscale x 1 x i32> %v
617}
618
619define <vscale x 1 x i32> @vrsub_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %b, i32 zeroext %evl) {
620; CHECK-LABEL: vrsub_vx_nxv1i32_unmasked:
621; CHECK:       # %bb.0:
622; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
623; CHECK-NEXT:    vrsub.vx v8, v8, a0
624; CHECK-NEXT:    ret
625  %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
626  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
627  %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %vb, <vscale x 1 x i32> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
628  ret <vscale x 1 x i32> %v
629}
630
631define <vscale x 1 x i32> @vrsub_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
632; CHECK-LABEL: vrsub_vi_nxv1i32:
633; CHECK:       # %bb.0:
634; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
635; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
636; CHECK-NEXT:    ret
637  %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> splat (i32 2), <vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 %evl)
638  ret <vscale x 1 x i32> %v
639}
640
641define <vscale x 1 x i32> @vrsub_vi_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 zeroext %evl) {
642; CHECK-LABEL: vrsub_vi_nxv1i32_unmasked:
643; CHECK:       # %bb.0:
644; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
645; CHECK-NEXT:    vrsub.vi v8, v8, 2
646; CHECK-NEXT:    ret
647  %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> splat (i32 2), <vscale x 1 x i32> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
648  ret <vscale x 1 x i32> %v
649}
650
651declare <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
652
653define <vscale x 2 x i32> @vrsub_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
654; CHECK-LABEL: vrsub_vx_nxv2i32:
655; CHECK:       # %bb.0:
656; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
657; CHECK-NEXT:    vrsub.vx v8, v8, a0, v0.t
658; CHECK-NEXT:    ret
659  %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
660  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
661  %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %vb, <vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
662  ret <vscale x 2 x i32> %v
663}
664
665define <vscale x 2 x i32> @vrsub_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %b, i32 zeroext %evl) {
666; CHECK-LABEL: vrsub_vx_nxv2i32_unmasked:
667; CHECK:       # %bb.0:
668; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
669; CHECK-NEXT:    vrsub.vx v8, v8, a0
670; CHECK-NEXT:    ret
671  %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
672  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
673  %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %vb, <vscale x 2 x i32> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
674  ret <vscale x 2 x i32> %v
675}
676
677define <vscale x 2 x i32> @vrsub_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
678; CHECK-LABEL: vrsub_vi_nxv2i32:
679; CHECK:       # %bb.0:
680; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
681; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
682; CHECK-NEXT:    ret
683  %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> splat (i32 2), <vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
684  ret <vscale x 2 x i32> %v
685}
686
687define <vscale x 2 x i32> @vrsub_vi_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 zeroext %evl) {
688; CHECK-LABEL: vrsub_vi_nxv2i32_unmasked:
689; CHECK:       # %bb.0:
690; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
691; CHECK-NEXT:    vrsub.vi v8, v8, 2
692; CHECK-NEXT:    ret
693  %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> splat (i32 2), <vscale x 2 x i32> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
694  ret <vscale x 2 x i32> %v
695}
696
697declare <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
698
699define <vscale x 4 x i32> @vrsub_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
700; CHECK-LABEL: vrsub_vx_nxv4i32:
701; CHECK:       # %bb.0:
702; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
703; CHECK-NEXT:    vrsub.vx v8, v8, a0, v0.t
704; CHECK-NEXT:    ret
705  %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
706  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
707  %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %vb, <vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 %evl)
708  ret <vscale x 4 x i32> %v
709}
710
711define <vscale x 4 x i32> @vrsub_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %b, i32 zeroext %evl) {
712; CHECK-LABEL: vrsub_vx_nxv4i32_unmasked:
713; CHECK:       # %bb.0:
714; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
715; CHECK-NEXT:    vrsub.vx v8, v8, a0
716; CHECK-NEXT:    ret
717  %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
718  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
719  %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %vb, <vscale x 4 x i32> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
720  ret <vscale x 4 x i32> %v
721}
722
723define <vscale x 4 x i32> @vrsub_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
724; CHECK-LABEL: vrsub_vi_nxv4i32:
725; CHECK:       # %bb.0:
726; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
727; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
728; CHECK-NEXT:    ret
729  %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> splat (i32 2), <vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 %evl)
730  ret <vscale x 4 x i32> %v
731}
732
733define <vscale x 4 x i32> @vrsub_vi_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 zeroext %evl) {
734; CHECK-LABEL: vrsub_vi_nxv4i32_unmasked:
735; CHECK:       # %bb.0:
736; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
737; CHECK-NEXT:    vrsub.vi v8, v8, 2
738; CHECK-NEXT:    ret
739  %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> splat (i32 2), <vscale x 4 x i32> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
740  ret <vscale x 4 x i32> %v
741}
742
743declare <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
744
745define <vscale x 8 x i32> @vrsub_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
746; CHECK-LABEL: vrsub_vx_nxv8i32:
747; CHECK:       # %bb.0:
748; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
749; CHECK-NEXT:    vrsub.vx v8, v8, a0, v0.t
750; CHECK-NEXT:    ret
751  %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
752  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
753  %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %vb, <vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 %evl)
754  ret <vscale x 8 x i32> %v
755}
756
757define <vscale x 8 x i32> @vrsub_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %b, i32 zeroext %evl) {
758; CHECK-LABEL: vrsub_vx_nxv8i32_unmasked:
759; CHECK:       # %bb.0:
760; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
761; CHECK-NEXT:    vrsub.vx v8, v8, a0
762; CHECK-NEXT:    ret
763  %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
764  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
765  %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %vb, <vscale x 8 x i32> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
766  ret <vscale x 8 x i32> %v
767}
768
769define <vscale x 8 x i32> @vrsub_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
770; CHECK-LABEL: vrsub_vi_nxv8i32:
771; CHECK:       # %bb.0:
772; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
773; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
774; CHECK-NEXT:    ret
775  %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> splat (i32 2), <vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 %evl)
776  ret <vscale x 8 x i32> %v
777}
778
779define <vscale x 8 x i32> @vrsub_vi_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 zeroext %evl) {
780; CHECK-LABEL: vrsub_vi_nxv8i32_unmasked:
781; CHECK:       # %bb.0:
782; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
783; CHECK-NEXT:    vrsub.vi v8, v8, 2
784; CHECK-NEXT:    ret
785  %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> splat (i32 2), <vscale x 8 x i32> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
786  ret <vscale x 8 x i32> %v
787}
788
789declare <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
790
791define <vscale x 16 x i32> @vrsub_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
792; CHECK-LABEL: vrsub_vx_nxv16i32:
793; CHECK:       # %bb.0:
794; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
795; CHECK-NEXT:    vrsub.vx v8, v8, a0, v0.t
796; CHECK-NEXT:    ret
797  %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
798  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
799  %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %vb, <vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 %evl)
800  ret <vscale x 16 x i32> %v
801}
802
803define <vscale x 16 x i32> @vrsub_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 %b, i32 zeroext %evl) {
804; CHECK-LABEL: vrsub_vx_nxv16i32_unmasked:
805; CHECK:       # %bb.0:
806; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
807; CHECK-NEXT:    vrsub.vx v8, v8, a0
808; CHECK-NEXT:    ret
809  %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
810  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
811  %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %vb, <vscale x 16 x i32> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
812  ret <vscale x 16 x i32> %v
813}
814
815define <vscale x 16 x i32> @vrsub_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
816; CHECK-LABEL: vrsub_vi_nxv16i32:
817; CHECK:       # %bb.0:
818; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
819; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
820; CHECK-NEXT:    ret
821  %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> splat (i32 2), <vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 %evl)
822  ret <vscale x 16 x i32> %v
823}
824
825define <vscale x 16 x i32> @vrsub_vi_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 zeroext %evl) {
826; CHECK-LABEL: vrsub_vi_nxv16i32_unmasked:
827; CHECK:       # %bb.0:
828; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
829; CHECK-NEXT:    vrsub.vi v8, v8, 2
830; CHECK-NEXT:    ret
831  %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> splat (i32 2), <vscale x 16 x i32> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
832  ret <vscale x 16 x i32> %v
833}
834
835declare <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
836
837define <vscale x 1 x i64> @vrsub_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
838; RV32-LABEL: vrsub_vx_nxv1i64:
839; RV32:       # %bb.0:
840; RV32-NEXT:    addi sp, sp, -16
841; RV32-NEXT:    .cfi_def_cfa_offset 16
842; RV32-NEXT:    sw a0, 8(sp)
843; RV32-NEXT:    sw a1, 12(sp)
844; RV32-NEXT:    addi a0, sp, 8
845; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
846; RV32-NEXT:    vlse64.v v9, (a0), zero
847; RV32-NEXT:    vsub.vv v8, v9, v8, v0.t
848; RV32-NEXT:    addi sp, sp, 16
849; RV32-NEXT:    .cfi_def_cfa_offset 0
850; RV32-NEXT:    ret
851;
852; RV64-LABEL: vrsub_vx_nxv1i64:
853; RV64:       # %bb.0:
854; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
855; RV64-NEXT:    vrsub.vx v8, v8, a0, v0.t
856; RV64-NEXT:    ret
857  %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
858  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
859  %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %vb, <vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 %evl)
860  ret <vscale x 1 x i64> %v
861}
862
863define <vscale x 1 x i64> @vrsub_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %b, i32 zeroext %evl) {
864; RV32-LABEL: vrsub_vx_nxv1i64_unmasked:
865; RV32:       # %bb.0:
866; RV32-NEXT:    addi sp, sp, -16
867; RV32-NEXT:    .cfi_def_cfa_offset 16
868; RV32-NEXT:    sw a0, 8(sp)
869; RV32-NEXT:    sw a1, 12(sp)
870; RV32-NEXT:    addi a0, sp, 8
871; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
872; RV32-NEXT:    vlse64.v v9, (a0), zero
873; RV32-NEXT:    vsub.vv v8, v9, v8
874; RV32-NEXT:    addi sp, sp, 16
875; RV32-NEXT:    .cfi_def_cfa_offset 0
876; RV32-NEXT:    ret
877;
878; RV64-LABEL: vrsub_vx_nxv1i64_unmasked:
879; RV64:       # %bb.0:
880; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
881; RV64-NEXT:    vrsub.vx v8, v8, a0
882; RV64-NEXT:    ret
883  %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
884  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
885  %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %vb, <vscale x 1 x i64> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
886  ret <vscale x 1 x i64> %v
887}
888
889define <vscale x 1 x i64> @vrsub_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
890; CHECK-LABEL: vrsub_vi_nxv1i64:
891; CHECK:       # %bb.0:
892; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
893; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
894; CHECK-NEXT:    ret
895  %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> splat (i64 2), <vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 %evl)
896  ret <vscale x 1 x i64> %v
897}
898
899define <vscale x 1 x i64> @vrsub_vi_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32 zeroext %evl) {
900; CHECK-LABEL: vrsub_vi_nxv1i64_unmasked:
901; CHECK:       # %bb.0:
902; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
903; CHECK-NEXT:    vrsub.vi v8, v8, 2
904; CHECK-NEXT:    ret
905  %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> splat (i64 2), <vscale x 1 x i64> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
906  ret <vscale x 1 x i64> %v
907}
908
909declare <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
910
911define <vscale x 2 x i64> @vrsub_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
912; RV32-LABEL: vrsub_vx_nxv2i64:
913; RV32:       # %bb.0:
914; RV32-NEXT:    addi sp, sp, -16
915; RV32-NEXT:    .cfi_def_cfa_offset 16
916; RV32-NEXT:    sw a0, 8(sp)
917; RV32-NEXT:    sw a1, 12(sp)
918; RV32-NEXT:    addi a0, sp, 8
919; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
920; RV32-NEXT:    vlse64.v v10, (a0), zero
921; RV32-NEXT:    vsub.vv v8, v10, v8, v0.t
922; RV32-NEXT:    addi sp, sp, 16
923; RV32-NEXT:    .cfi_def_cfa_offset 0
924; RV32-NEXT:    ret
925;
926; RV64-LABEL: vrsub_vx_nxv2i64:
927; RV64:       # %bb.0:
928; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
929; RV64-NEXT:    vrsub.vx v8, v8, a0, v0.t
930; RV64-NEXT:    ret
931  %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
932  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
933  %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %vb, <vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 %evl)
934  ret <vscale x 2 x i64> %v
935}
936
937define <vscale x 2 x i64> @vrsub_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %b, i32 zeroext %evl) {
938; RV32-LABEL: vrsub_vx_nxv2i64_unmasked:
939; RV32:       # %bb.0:
940; RV32-NEXT:    addi sp, sp, -16
941; RV32-NEXT:    .cfi_def_cfa_offset 16
942; RV32-NEXT:    sw a0, 8(sp)
943; RV32-NEXT:    sw a1, 12(sp)
944; RV32-NEXT:    addi a0, sp, 8
945; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
946; RV32-NEXT:    vlse64.v v10, (a0), zero
947; RV32-NEXT:    vsub.vv v8, v10, v8
948; RV32-NEXT:    addi sp, sp, 16
949; RV32-NEXT:    .cfi_def_cfa_offset 0
950; RV32-NEXT:    ret
951;
952; RV64-LABEL: vrsub_vx_nxv2i64_unmasked:
953; RV64:       # %bb.0:
954; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
955; RV64-NEXT:    vrsub.vx v8, v8, a0
956; RV64-NEXT:    ret
957  %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
958  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
959  %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %vb, <vscale x 2 x i64> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
960  ret <vscale x 2 x i64> %v
961}
962
963define <vscale x 2 x i64> @vrsub_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
964; CHECK-LABEL: vrsub_vi_nxv2i64:
965; CHECK:       # %bb.0:
966; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
967; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
968; CHECK-NEXT:    ret
969  %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> splat (i64 2), <vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 %evl)
970  ret <vscale x 2 x i64> %v
971}
972
973define <vscale x 2 x i64> @vrsub_vi_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 zeroext %evl) {
974; CHECK-LABEL: vrsub_vi_nxv2i64_unmasked:
975; CHECK:       # %bb.0:
976; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
977; CHECK-NEXT:    vrsub.vi v8, v8, 2
978; CHECK-NEXT:    ret
979  %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> splat (i64 2), <vscale x 2 x i64> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
980  ret <vscale x 2 x i64> %v
981}
982
983declare <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
984
985define <vscale x 4 x i64> @vrsub_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
986; RV32-LABEL: vrsub_vx_nxv4i64:
987; RV32:       # %bb.0:
988; RV32-NEXT:    addi sp, sp, -16
989; RV32-NEXT:    .cfi_def_cfa_offset 16
990; RV32-NEXT:    sw a0, 8(sp)
991; RV32-NEXT:    sw a1, 12(sp)
992; RV32-NEXT:    addi a0, sp, 8
993; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
994; RV32-NEXT:    vlse64.v v12, (a0), zero
995; RV32-NEXT:    vsub.vv v8, v12, v8, v0.t
996; RV32-NEXT:    addi sp, sp, 16
997; RV32-NEXT:    .cfi_def_cfa_offset 0
998; RV32-NEXT:    ret
999;
1000; RV64-LABEL: vrsub_vx_nxv4i64:
1001; RV64:       # %bb.0:
1002; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1003; RV64-NEXT:    vrsub.vx v8, v8, a0, v0.t
1004; RV64-NEXT:    ret
1005  %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1006  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1007  %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %vb, <vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 %evl)
1008  ret <vscale x 4 x i64> %v
1009}
1010
1011define <vscale x 4 x i64> @vrsub_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %b, i32 zeroext %evl) {
1012; RV32-LABEL: vrsub_vx_nxv4i64_unmasked:
1013; RV32:       # %bb.0:
1014; RV32-NEXT:    addi sp, sp, -16
1015; RV32-NEXT:    .cfi_def_cfa_offset 16
1016; RV32-NEXT:    sw a0, 8(sp)
1017; RV32-NEXT:    sw a1, 12(sp)
1018; RV32-NEXT:    addi a0, sp, 8
1019; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
1020; RV32-NEXT:    vlse64.v v12, (a0), zero
1021; RV32-NEXT:    vsub.vv v8, v12, v8
1022; RV32-NEXT:    addi sp, sp, 16
1023; RV32-NEXT:    .cfi_def_cfa_offset 0
1024; RV32-NEXT:    ret
1025;
1026; RV64-LABEL: vrsub_vx_nxv4i64_unmasked:
1027; RV64:       # %bb.0:
1028; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1029; RV64-NEXT:    vrsub.vx v8, v8, a0
1030; RV64-NEXT:    ret
1031  %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1032  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1033  %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %vb, <vscale x 4 x i64> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1034  ret <vscale x 4 x i64> %v
1035}
1036
1037define <vscale x 4 x i64> @vrsub_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1038; CHECK-LABEL: vrsub_vi_nxv4i64:
1039; CHECK:       # %bb.0:
1040; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1041; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
1042; CHECK-NEXT:    ret
1043  %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> splat (i64 2), <vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 %evl)
1044  ret <vscale x 4 x i64> %v
1045}
1046
1047define <vscale x 4 x i64> @vrsub_vi_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32 zeroext %evl) {
1048; CHECK-LABEL: vrsub_vi_nxv4i64_unmasked:
1049; CHECK:       # %bb.0:
1050; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1051; CHECK-NEXT:    vrsub.vi v8, v8, 2
1052; CHECK-NEXT:    ret
1053  %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> splat (i64 2), <vscale x 4 x i64> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1054  ret <vscale x 4 x i64> %v
1055}
1056
1057declare <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1058
1059define <vscale x 8 x i64> @vrsub_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1060; RV32-LABEL: vrsub_vx_nxv8i64:
1061; RV32:       # %bb.0:
1062; RV32-NEXT:    addi sp, sp, -16
1063; RV32-NEXT:    .cfi_def_cfa_offset 16
1064; RV32-NEXT:    sw a0, 8(sp)
1065; RV32-NEXT:    sw a1, 12(sp)
1066; RV32-NEXT:    addi a0, sp, 8
1067; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
1068; RV32-NEXT:    vlse64.v v16, (a0), zero
1069; RV32-NEXT:    vsub.vv v8, v16, v8, v0.t
1070; RV32-NEXT:    addi sp, sp, 16
1071; RV32-NEXT:    .cfi_def_cfa_offset 0
1072; RV32-NEXT:    ret
1073;
1074; RV64-LABEL: vrsub_vx_nxv8i64:
1075; RV64:       # %bb.0:
1076; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1077; RV64-NEXT:    vrsub.vx v8, v8, a0, v0.t
1078; RV64-NEXT:    ret
1079  %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1080  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1081  %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %vb, <vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 %evl)
1082  ret <vscale x 8 x i64> %v
1083}
1084
1085define <vscale x 8 x i64> @vrsub_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %b, i32 zeroext %evl) {
1086; RV32-LABEL: vrsub_vx_nxv8i64_unmasked:
1087; RV32:       # %bb.0:
1088; RV32-NEXT:    addi sp, sp, -16
1089; RV32-NEXT:    .cfi_def_cfa_offset 16
1090; RV32-NEXT:    sw a0, 8(sp)
1091; RV32-NEXT:    sw a1, 12(sp)
1092; RV32-NEXT:    addi a0, sp, 8
1093; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
1094; RV32-NEXT:    vlse64.v v16, (a0), zero
1095; RV32-NEXT:    vsub.vv v8, v16, v8
1096; RV32-NEXT:    addi sp, sp, 16
1097; RV32-NEXT:    .cfi_def_cfa_offset 0
1098; RV32-NEXT:    ret
1099;
1100; RV64-LABEL: vrsub_vx_nxv8i64_unmasked:
1101; RV64:       # %bb.0:
1102; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1103; RV64-NEXT:    vrsub.vx v8, v8, a0
1104; RV64-NEXT:    ret
1105  %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1106  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1107  %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %vb, <vscale x 8 x i64> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1108  ret <vscale x 8 x i64> %v
1109}
1110
1111define <vscale x 8 x i64> @vrsub_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1112; CHECK-LABEL: vrsub_vi_nxv8i64:
1113; CHECK:       # %bb.0:
1114; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1115; CHECK-NEXT:    vrsub.vi v8, v8, 2, v0.t
1116; CHECK-NEXT:    ret
1117  %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> splat (i64 2), <vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 %evl)
1118  ret <vscale x 8 x i64> %v
1119}
1120
1121define <vscale x 8 x i64> @vrsub_vi_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
1122; CHECK-LABEL: vrsub_vi_nxv8i64_unmasked:
1123; CHECK:       # %bb.0:
1124; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1125; CHECK-NEXT:    vrsub.vi v8, v8, 2
1126; CHECK-NEXT:    ret
1127  %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> splat (i64 2), <vscale x 8 x i64> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1128  ret <vscale x 8 x i64> %v
1129}
1130