xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll (revision 36e4176f1d83d04cdebb4e1870561099b2478d80)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \
3; RUN:   | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \
5; RUN:   | FileCheck %s --check-prefixes=CHECK,RV64
6
7declare <vscale x 8 x i7> @llvm.vp.sub.nxv8i7(<vscale x 8 x i7>, <vscale x 8 x i7>, <vscale x 8 x i1>, i32)
8
9define <vscale x 8 x i7> @vsub_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
10; CHECK-LABEL: vsub_vx_nxv8i7:
11; CHECK:       # %bb.0:
12; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
13; CHECK-NEXT:    vsub.vx v8, v8, a0, v0.t
14; CHECK-NEXT:    ret
15  %elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
16  %vb = shufflevector <vscale x 8 x i7> %elt.head, <vscale x 8 x i7> poison, <vscale x 8 x i32> zeroinitializer
17  %v = call <vscale x 8 x i7> @llvm.vp.sub.nxv8i7(<vscale x 8 x i7> %a, <vscale x 8 x i7> %vb, <vscale x 8 x i1> %mask, i32 %evl)
18  ret <vscale x 8 x i7> %v
19}
20
21declare <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
22
23define <vscale x 1 x i8> @vsub_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
24; CHECK-LABEL: vsub_vv_nxv1i8:
25; CHECK:       # %bb.0:
26; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
27; CHECK-NEXT:    vsub.vv v8, v8, v9, v0.t
28; CHECK-NEXT:    ret
29  %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
30  ret <vscale x 1 x i8> %v
31}
32
33define <vscale x 1 x i8> @vsub_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, i32 zeroext %evl) {
34; CHECK-LABEL: vsub_vv_nxv1i8_unmasked:
35; CHECK:       # %bb.0:
36; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
37; CHECK-NEXT:    vsub.vv v8, v8, v9
38; CHECK-NEXT:    ret
39  %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
40  ret <vscale x 1 x i8> %v
41}
42
43define <vscale x 1 x i8> @vsub_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
44; CHECK-LABEL: vsub_vx_nxv1i8:
45; CHECK:       # %bb.0:
46; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
47; CHECK-NEXT:    vsub.vx v8, v8, a0, v0.t
48; CHECK-NEXT:    ret
49  %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
50  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
51  %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
52  ret <vscale x 1 x i8> %v
53}
54
55define <vscale x 1 x i8> @vsub_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i32 zeroext %evl) {
56; CHECK-LABEL: vsub_vx_nxv1i8_unmasked:
57; CHECK:       # %bb.0:
58; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
59; CHECK-NEXT:    vsub.vx v8, v8, a0
60; CHECK-NEXT:    ret
61  %elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
62  %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
63  %v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
64  ret <vscale x 1 x i8> %v
65}
66
67declare <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
68
69define <vscale x 2 x i8> @vsub_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
70; CHECK-LABEL: vsub_vv_nxv2i8:
71; CHECK:       # %bb.0:
72; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
73; CHECK-NEXT:    vsub.vv v8, v8, v9, v0.t
74; CHECK-NEXT:    ret
75  %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
76  ret <vscale x 2 x i8> %v
77}
78
79define <vscale x 2 x i8> @vsub_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, i32 zeroext %evl) {
80; CHECK-LABEL: vsub_vv_nxv2i8_unmasked:
81; CHECK:       # %bb.0:
82; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
83; CHECK-NEXT:    vsub.vv v8, v8, v9
84; CHECK-NEXT:    ret
85  %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
86  ret <vscale x 2 x i8> %v
87}
88
89define <vscale x 2 x i8> @vsub_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
90; CHECK-LABEL: vsub_vx_nxv2i8:
91; CHECK:       # %bb.0:
92; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
93; CHECK-NEXT:    vsub.vx v8, v8, a0, v0.t
94; CHECK-NEXT:    ret
95  %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
96  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
97  %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
98  ret <vscale x 2 x i8> %v
99}
100
101define <vscale x 2 x i8> @vsub_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i32 zeroext %evl) {
102; CHECK-LABEL: vsub_vx_nxv2i8_unmasked:
103; CHECK:       # %bb.0:
104; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
105; CHECK-NEXT:    vsub.vx v8, v8, a0
106; CHECK-NEXT:    ret
107  %elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
108  %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
109  %v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
110  ret <vscale x 2 x i8> %v
111}
112
113declare <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
114
115define <vscale x 4 x i8> @vsub_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
116; CHECK-LABEL: vsub_vv_nxv4i8:
117; CHECK:       # %bb.0:
118; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
119; CHECK-NEXT:    vsub.vv v8, v8, v9, v0.t
120; CHECK-NEXT:    ret
121  %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
122  ret <vscale x 4 x i8> %v
123}
124
125define <vscale x 4 x i8> @vsub_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, i32 zeroext %evl) {
126; CHECK-LABEL: vsub_vv_nxv4i8_unmasked:
127; CHECK:       # %bb.0:
128; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
129; CHECK-NEXT:    vsub.vv v8, v8, v9
130; CHECK-NEXT:    ret
131  %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
132  ret <vscale x 4 x i8> %v
133}
134
135define <vscale x 4 x i8> @vsub_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
136; CHECK-LABEL: vsub_vx_nxv4i8:
137; CHECK:       # %bb.0:
138; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
139; CHECK-NEXT:    vsub.vx v8, v8, a0, v0.t
140; CHECK-NEXT:    ret
141  %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
142  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
143  %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
144  ret <vscale x 4 x i8> %v
145}
146
147define <vscale x 4 x i8> @vsub_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i32 zeroext %evl) {
148; CHECK-LABEL: vsub_vx_nxv4i8_unmasked:
149; CHECK:       # %bb.0:
150; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
151; CHECK-NEXT:    vsub.vx v8, v8, a0
152; CHECK-NEXT:    ret
153  %elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
154  %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
155  %v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
156  ret <vscale x 4 x i8> %v
157}
158
159declare <vscale x 5 x i8> @llvm.vp.sub.nxv5i8(<vscale x 5 x i8>, <vscale x 5 x i8>, <vscale x 5 x i1>, i32)
160
161define <vscale x 5 x i8> @vsub_vv_nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %b, <vscale x 5 x i1> %m, i32 zeroext %evl) {
162; CHECK-LABEL: vsub_vv_nxv5i8:
163; CHECK:       # %bb.0:
164; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
165; CHECK-NEXT:    vsub.vv v8, v8, v9, v0.t
166; CHECK-NEXT:    ret
167  %v = call <vscale x 5 x i8> @llvm.vp.sub.nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %b, <vscale x 5 x i1> %m, i32 %evl)
168  ret <vscale x 5 x i8> %v
169}
170
171define <vscale x 5 x i8> @vsub_vv_nxv5i8_unmasked(<vscale x 5 x i8> %va, <vscale x 5 x i8> %b, i32 zeroext %evl) {
172; CHECK-LABEL: vsub_vv_nxv5i8_unmasked:
173; CHECK:       # %bb.0:
174; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
175; CHECK-NEXT:    vsub.vv v8, v8, v9
176; CHECK-NEXT:    ret
177  %v = call <vscale x 5 x i8> @llvm.vp.sub.nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %b, <vscale x 5 x i1> splat (i1 true), i32 %evl)
178  ret <vscale x 5 x i8> %v
179}
180
181define <vscale x 5 x i8> @vsub_vx_nxv5i8(<vscale x 5 x i8> %va, i8 %b, <vscale x 5 x i1> %m, i32 zeroext %evl) {
182; CHECK-LABEL: vsub_vx_nxv5i8:
183; CHECK:       # %bb.0:
184; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
185; CHECK-NEXT:    vsub.vx v8, v8, a0, v0.t
186; CHECK-NEXT:    ret
187  %elt.head = insertelement <vscale x 5 x i8> poison, i8 %b, i32 0
188  %vb = shufflevector <vscale x 5 x i8> %elt.head, <vscale x 5 x i8> poison, <vscale x 5 x i32> zeroinitializer
189  %v = call <vscale x 5 x i8> @llvm.vp.sub.nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %vb, <vscale x 5 x i1> %m, i32 %evl)
190  ret <vscale x 5 x i8> %v
191}
192
193define <vscale x 5 x i8> @vsub_vx_nxv5i8_unmasked(<vscale x 5 x i8> %va, i8 %b, i32 zeroext %evl) {
194; CHECK-LABEL: vsub_vx_nxv5i8_unmasked:
195; CHECK:       # %bb.0:
196; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
197; CHECK-NEXT:    vsub.vx v8, v8, a0
198; CHECK-NEXT:    ret
199  %elt.head = insertelement <vscale x 5 x i8> poison, i8 %b, i32 0
200  %vb = shufflevector <vscale x 5 x i8> %elt.head, <vscale x 5 x i8> poison, <vscale x 5 x i32> zeroinitializer
201  %v = call <vscale x 5 x i8> @llvm.vp.sub.nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %vb, <vscale x 5 x i1> splat (i1 true), i32 %evl)
202  ret <vscale x 5 x i8> %v
203}
204
205declare <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
206
207define <vscale x 8 x i8> @vsub_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
208; CHECK-LABEL: vsub_vv_nxv8i8:
209; CHECK:       # %bb.0:
210; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
211; CHECK-NEXT:    vsub.vv v8, v8, v9, v0.t
212; CHECK-NEXT:    ret
213  %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
214  ret <vscale x 8 x i8> %v
215}
216
217define <vscale x 8 x i8> @vsub_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, i32 zeroext %evl) {
218; CHECK-LABEL: vsub_vv_nxv8i8_unmasked:
219; CHECK:       # %bb.0:
220; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
221; CHECK-NEXT:    vsub.vv v8, v8, v9
222; CHECK-NEXT:    ret
223  %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
224  ret <vscale x 8 x i8> %v
225}
226
227define <vscale x 8 x i8> @vsub_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
228; CHECK-LABEL: vsub_vx_nxv8i8:
229; CHECK:       # %bb.0:
230; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
231; CHECK-NEXT:    vsub.vx v8, v8, a0, v0.t
232; CHECK-NEXT:    ret
233  %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
234  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
235  %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
236  ret <vscale x 8 x i8> %v
237}
238
239define <vscale x 8 x i8> @vsub_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i32 zeroext %evl) {
240; CHECK-LABEL: vsub_vx_nxv8i8_unmasked:
241; CHECK:       # %bb.0:
242; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
243; CHECK-NEXT:    vsub.vx v8, v8, a0
244; CHECK-NEXT:    ret
245  %elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
246  %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
247  %v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
248  ret <vscale x 8 x i8> %v
249}
250
251declare <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
252
253define <vscale x 16 x i8> @vsub_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
254; CHECK-LABEL: vsub_vv_nxv16i8:
255; CHECK:       # %bb.0:
256; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
257; CHECK-NEXT:    vsub.vv v8, v8, v10, v0.t
258; CHECK-NEXT:    ret
259  %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
260  ret <vscale x 16 x i8> %v
261}
262
263define <vscale x 16 x i8> @vsub_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, i32 zeroext %evl) {
264; CHECK-LABEL: vsub_vv_nxv16i8_unmasked:
265; CHECK:       # %bb.0:
266; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
267; CHECK-NEXT:    vsub.vv v8, v8, v10
268; CHECK-NEXT:    ret
269  %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
270  ret <vscale x 16 x i8> %v
271}
272
273define <vscale x 16 x i8> @vsub_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
274; CHECK-LABEL: vsub_vx_nxv16i8:
275; CHECK:       # %bb.0:
276; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
277; CHECK-NEXT:    vsub.vx v8, v8, a0, v0.t
278; CHECK-NEXT:    ret
279  %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
280  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
281  %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
282  ret <vscale x 16 x i8> %v
283}
284
285define <vscale x 16 x i8> @vsub_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b, i32 zeroext %evl) {
286; CHECK-LABEL: vsub_vx_nxv16i8_unmasked:
287; CHECK:       # %bb.0:
288; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
289; CHECK-NEXT:    vsub.vx v8, v8, a0
290; CHECK-NEXT:    ret
291  %elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
292  %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
293  %v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
294  ret <vscale x 16 x i8> %v
295}
296
297declare <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
298
299define <vscale x 32 x i8> @vsub_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
300; CHECK-LABEL: vsub_vv_nxv32i8:
301; CHECK:       # %bb.0:
302; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
303; CHECK-NEXT:    vsub.vv v8, v8, v12, v0.t
304; CHECK-NEXT:    ret
305  %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
306  ret <vscale x 32 x i8> %v
307}
308
309define <vscale x 32 x i8> @vsub_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, i32 zeroext %evl) {
310; CHECK-LABEL: vsub_vv_nxv32i8_unmasked:
311; CHECK:       # %bb.0:
312; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
313; CHECK-NEXT:    vsub.vv v8, v8, v12
314; CHECK-NEXT:    ret
315  %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
316  ret <vscale x 32 x i8> %v
317}
318
319define <vscale x 32 x i8> @vsub_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
320; CHECK-LABEL: vsub_vx_nxv32i8:
321; CHECK:       # %bb.0:
322; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
323; CHECK-NEXT:    vsub.vx v8, v8, a0, v0.t
324; CHECK-NEXT:    ret
325  %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
326  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
327  %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
328  ret <vscale x 32 x i8> %v
329}
330
331define <vscale x 32 x i8> @vsub_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b, i32 zeroext %evl) {
332; CHECK-LABEL: vsub_vx_nxv32i8_unmasked:
333; CHECK:       # %bb.0:
334; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
335; CHECK-NEXT:    vsub.vx v8, v8, a0
336; CHECK-NEXT:    ret
337  %elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
338  %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> poison, <vscale x 32 x i32> zeroinitializer
339  %v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
340  ret <vscale x 32 x i8> %v
341}
342
343declare <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
344
345define <vscale x 64 x i8> @vsub_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
346; CHECK-LABEL: vsub_vv_nxv64i8:
347; CHECK:       # %bb.0:
348; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
349; CHECK-NEXT:    vsub.vv v8, v8, v16, v0.t
350; CHECK-NEXT:    ret
351  %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
352  ret <vscale x 64 x i8> %v
353}
354
355define <vscale x 64 x i8> @vsub_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, i32 zeroext %evl) {
356; CHECK-LABEL: vsub_vv_nxv64i8_unmasked:
357; CHECK:       # %bb.0:
358; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
359; CHECK-NEXT:    vsub.vv v8, v8, v16
360; CHECK-NEXT:    ret
361  %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> splat (i1 true), i32 %evl)
362  ret <vscale x 64 x i8> %v
363}
364
365define <vscale x 64 x i8> @vsub_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
366; CHECK-LABEL: vsub_vx_nxv64i8:
367; CHECK:       # %bb.0:
368; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
369; CHECK-NEXT:    vsub.vx v8, v8, a0, v0.t
370; CHECK-NEXT:    ret
371  %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
372  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
373  %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
374  ret <vscale x 64 x i8> %v
375}
376
377define <vscale x 64 x i8> @vsub_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b, i32 zeroext %evl) {
378; CHECK-LABEL: vsub_vx_nxv64i8_unmasked:
379; CHECK:       # %bb.0:
380; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
381; CHECK-NEXT:    vsub.vx v8, v8, a0
382; CHECK-NEXT:    ret
383  %elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
384  %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> poison, <vscale x 64 x i32> zeroinitializer
385  %v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> splat (i1 true), i32 %evl)
386  ret <vscale x 64 x i8> %v
387}
388
389declare <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
390
391define <vscale x 1 x i16> @vsub_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
392; CHECK-LABEL: vsub_vv_nxv1i16:
393; CHECK:       # %bb.0:
394; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
395; CHECK-NEXT:    vsub.vv v8, v8, v9, v0.t
396; CHECK-NEXT:    ret
397  %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
398  ret <vscale x 1 x i16> %v
399}
400
401define <vscale x 1 x i16> @vsub_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, i32 zeroext %evl) {
402; CHECK-LABEL: vsub_vv_nxv1i16_unmasked:
403; CHECK:       # %bb.0:
404; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
405; CHECK-NEXT:    vsub.vv v8, v8, v9
406; CHECK-NEXT:    ret
407  %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
408  ret <vscale x 1 x i16> %v
409}
410
411define <vscale x 1 x i16> @vsub_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
412; CHECK-LABEL: vsub_vx_nxv1i16:
413; CHECK:       # %bb.0:
414; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
415; CHECK-NEXT:    vsub.vx v8, v8, a0, v0.t
416; CHECK-NEXT:    ret
417  %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
418  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
419  %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
420  ret <vscale x 1 x i16> %v
421}
422
423define <vscale x 1 x i16> @vsub_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %b, i32 zeroext %evl) {
424; CHECK-LABEL: vsub_vx_nxv1i16_unmasked:
425; CHECK:       # %bb.0:
426; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
427; CHECK-NEXT:    vsub.vx v8, v8, a0
428; CHECK-NEXT:    ret
429  %elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
430  %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
431  %v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
432  ret <vscale x 1 x i16> %v
433}
434
435declare <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
436
437define <vscale x 2 x i16> @vsub_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
438; CHECK-LABEL: vsub_vv_nxv2i16:
439; CHECK:       # %bb.0:
440; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
441; CHECK-NEXT:    vsub.vv v8, v8, v9, v0.t
442; CHECK-NEXT:    ret
443  %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
444  ret <vscale x 2 x i16> %v
445}
446
447define <vscale x 2 x i16> @vsub_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, i32 zeroext %evl) {
448; CHECK-LABEL: vsub_vv_nxv2i16_unmasked:
449; CHECK:       # %bb.0:
450; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
451; CHECK-NEXT:    vsub.vv v8, v8, v9
452; CHECK-NEXT:    ret
453  %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
454  ret <vscale x 2 x i16> %v
455}
456
457define <vscale x 2 x i16> @vsub_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
458; CHECK-LABEL: vsub_vx_nxv2i16:
459; CHECK:       # %bb.0:
460; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
461; CHECK-NEXT:    vsub.vx v8, v8, a0, v0.t
462; CHECK-NEXT:    ret
463  %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
464  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
465  %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
466  ret <vscale x 2 x i16> %v
467}
468
469define <vscale x 2 x i16> @vsub_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %b, i32 zeroext %evl) {
470; CHECK-LABEL: vsub_vx_nxv2i16_unmasked:
471; CHECK:       # %bb.0:
472; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
473; CHECK-NEXT:    vsub.vx v8, v8, a0
474; CHECK-NEXT:    ret
475  %elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
476  %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
477  %v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
478  ret <vscale x 2 x i16> %v
479}
480
481declare <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
482
483define <vscale x 4 x i16> @vsub_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
484; CHECK-LABEL: vsub_vv_nxv4i16:
485; CHECK:       # %bb.0:
486; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
487; CHECK-NEXT:    vsub.vv v8, v8, v9, v0.t
488; CHECK-NEXT:    ret
489  %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
490  ret <vscale x 4 x i16> %v
491}
492
493define <vscale x 4 x i16> @vsub_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, i32 zeroext %evl) {
494; CHECK-LABEL: vsub_vv_nxv4i16_unmasked:
495; CHECK:       # %bb.0:
496; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
497; CHECK-NEXT:    vsub.vv v8, v8, v9
498; CHECK-NEXT:    ret
499  %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
500  ret <vscale x 4 x i16> %v
501}
502
503define <vscale x 4 x i16> @vsub_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
504; CHECK-LABEL: vsub_vx_nxv4i16:
505; CHECK:       # %bb.0:
506; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
507; CHECK-NEXT:    vsub.vx v8, v8, a0, v0.t
508; CHECK-NEXT:    ret
509  %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
510  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
511  %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
512  ret <vscale x 4 x i16> %v
513}
514
515define <vscale x 4 x i16> @vsub_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %b, i32 zeroext %evl) {
516; CHECK-LABEL: vsub_vx_nxv4i16_unmasked:
517; CHECK:       # %bb.0:
518; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
519; CHECK-NEXT:    vsub.vx v8, v8, a0
520; CHECK-NEXT:    ret
521  %elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
522  %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
523  %v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
524  ret <vscale x 4 x i16> %v
525}
526
527declare <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
528
529define <vscale x 8 x i16> @vsub_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
530; CHECK-LABEL: vsub_vv_nxv8i16:
531; CHECK:       # %bb.0:
532; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
533; CHECK-NEXT:    vsub.vv v8, v8, v10, v0.t
534; CHECK-NEXT:    ret
535  %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
536  ret <vscale x 8 x i16> %v
537}
538
539define <vscale x 8 x i16> @vsub_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, i32 zeroext %evl) {
540; CHECK-LABEL: vsub_vv_nxv8i16_unmasked:
541; CHECK:       # %bb.0:
542; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
543; CHECK-NEXT:    vsub.vv v8, v8, v10
544; CHECK-NEXT:    ret
545  %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
546  ret <vscale x 8 x i16> %v
547}
548
549define <vscale x 8 x i16> @vsub_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
550; CHECK-LABEL: vsub_vx_nxv8i16:
551; CHECK:       # %bb.0:
552; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
553; CHECK-NEXT:    vsub.vx v8, v8, a0, v0.t
554; CHECK-NEXT:    ret
555  %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
556  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
557  %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
558  ret <vscale x 8 x i16> %v
559}
560
561define <vscale x 8 x i16> @vsub_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %b, i32 zeroext %evl) {
562; CHECK-LABEL: vsub_vx_nxv8i16_unmasked:
563; CHECK:       # %bb.0:
564; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
565; CHECK-NEXT:    vsub.vx v8, v8, a0
566; CHECK-NEXT:    ret
567  %elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
568  %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
569  %v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
570  ret <vscale x 8 x i16> %v
571}
572
573declare <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
574
575define <vscale x 16 x i16> @vsub_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
576; CHECK-LABEL: vsub_vv_nxv16i16:
577; CHECK:       # %bb.0:
578; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
579; CHECK-NEXT:    vsub.vv v8, v8, v12, v0.t
580; CHECK-NEXT:    ret
581  %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
582  ret <vscale x 16 x i16> %v
583}
584
585define <vscale x 16 x i16> @vsub_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, i32 zeroext %evl) {
586; CHECK-LABEL: vsub_vv_nxv16i16_unmasked:
587; CHECK:       # %bb.0:
588; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
589; CHECK-NEXT:    vsub.vv v8, v8, v12
590; CHECK-NEXT:    ret
591  %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
592  ret <vscale x 16 x i16> %v
593}
594
595define <vscale x 16 x i16> @vsub_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
596; CHECK-LABEL: vsub_vx_nxv16i16:
597; CHECK:       # %bb.0:
598; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
599; CHECK-NEXT:    vsub.vx v8, v8, a0, v0.t
600; CHECK-NEXT:    ret
601  %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
602  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
603  %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
604  ret <vscale x 16 x i16> %v
605}
606
607define <vscale x 16 x i16> @vsub_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i16 %b, i32 zeroext %evl) {
608; CHECK-LABEL: vsub_vx_nxv16i16_unmasked:
609; CHECK:       # %bb.0:
610; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
611; CHECK-NEXT:    vsub.vx v8, v8, a0
612; CHECK-NEXT:    ret
613  %elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
614  %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> poison, <vscale x 16 x i32> zeroinitializer
615  %v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
616  ret <vscale x 16 x i16> %v
617}
618
619declare <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
620
621define <vscale x 32 x i16> @vsub_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
622; CHECK-LABEL: vsub_vv_nxv32i16:
623; CHECK:       # %bb.0:
624; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
625; CHECK-NEXT:    vsub.vv v8, v8, v16, v0.t
626; CHECK-NEXT:    ret
627  %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
628  ret <vscale x 32 x i16> %v
629}
630
631define <vscale x 32 x i16> @vsub_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, i32 zeroext %evl) {
632; CHECK-LABEL: vsub_vv_nxv32i16_unmasked:
633; CHECK:       # %bb.0:
634; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
635; CHECK-NEXT:    vsub.vv v8, v8, v16
636; CHECK-NEXT:    ret
637  %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
638  ret <vscale x 32 x i16> %v
639}
640
641define <vscale x 32 x i16> @vsub_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
642; CHECK-LABEL: vsub_vx_nxv32i16:
643; CHECK:       # %bb.0:
644; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
645; CHECK-NEXT:    vsub.vx v8, v8, a0, v0.t
646; CHECK-NEXT:    ret
647  %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
648  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
649  %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
650  ret <vscale x 32 x i16> %v
651}
652
653define <vscale x 32 x i16> @vsub_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i16 %b, i32 zeroext %evl) {
654; CHECK-LABEL: vsub_vx_nxv32i16_unmasked:
655; CHECK:       # %bb.0:
656; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
657; CHECK-NEXT:    vsub.vx v8, v8, a0
658; CHECK-NEXT:    ret
659  %elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
660  %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> poison, <vscale x 32 x i32> zeroinitializer
661  %v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
662  ret <vscale x 32 x i16> %v
663}
664
665declare <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
666
667define <vscale x 1 x i32> @vsub_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
668; CHECK-LABEL: vsub_vv_nxv1i32:
669; CHECK:       # %bb.0:
670; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
671; CHECK-NEXT:    vsub.vv v8, v8, v9, v0.t
672; CHECK-NEXT:    ret
673  %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
674  ret <vscale x 1 x i32> %v
675}
676
677define <vscale x 1 x i32> @vsub_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, i32 zeroext %evl) {
678; CHECK-LABEL: vsub_vv_nxv1i32_unmasked:
679; CHECK:       # %bb.0:
680; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
681; CHECK-NEXT:    vsub.vv v8, v8, v9
682; CHECK-NEXT:    ret
683  %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
684  ret <vscale x 1 x i32> %v
685}
686
687define <vscale x 1 x i32> @vsub_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
688; CHECK-LABEL: vsub_vx_nxv1i32:
689; CHECK:       # %bb.0:
690; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
691; CHECK-NEXT:    vsub.vx v8, v8, a0, v0.t
692; CHECK-NEXT:    ret
693  %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
694  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
695  %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
696  ret <vscale x 1 x i32> %v
697}
698
699define <vscale x 1 x i32> @vsub_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %b, i32 zeroext %evl) {
700; CHECK-LABEL: vsub_vx_nxv1i32_unmasked:
701; CHECK:       # %bb.0:
702; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
703; CHECK-NEXT:    vsub.vx v8, v8, a0
704; CHECK-NEXT:    ret
705  %elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
706  %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
707  %v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
708  ret <vscale x 1 x i32> %v
709}
710
711declare <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
712
713define <vscale x 2 x i32> @vsub_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
714; CHECK-LABEL: vsub_vv_nxv2i32:
715; CHECK:       # %bb.0:
716; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
717; CHECK-NEXT:    vsub.vv v8, v8, v9, v0.t
718; CHECK-NEXT:    ret
719  %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
720  ret <vscale x 2 x i32> %v
721}
722
723define <vscale x 2 x i32> @vsub_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, i32 zeroext %evl) {
724; CHECK-LABEL: vsub_vv_nxv2i32_unmasked:
725; CHECK:       # %bb.0:
726; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
727; CHECK-NEXT:    vsub.vv v8, v8, v9
728; CHECK-NEXT:    ret
729  %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
730  ret <vscale x 2 x i32> %v
731}
732
733define <vscale x 2 x i32> @vsub_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
734; CHECK-LABEL: vsub_vx_nxv2i32:
735; CHECK:       # %bb.0:
736; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
737; CHECK-NEXT:    vsub.vx v8, v8, a0, v0.t
738; CHECK-NEXT:    ret
739  %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
740  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
741  %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
742  ret <vscale x 2 x i32> %v
743}
744
745define <vscale x 2 x i32> @vsub_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %b, i32 zeroext %evl) {
746; CHECK-LABEL: vsub_vx_nxv2i32_unmasked:
747; CHECK:       # %bb.0:
748; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
749; CHECK-NEXT:    vsub.vx v8, v8, a0
750; CHECK-NEXT:    ret
751  %elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
752  %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
753  %v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
754  ret <vscale x 2 x i32> %v
755}
756
757declare <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
758
759define <vscale x 4 x i32> @vsub_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
760; CHECK-LABEL: vsub_vv_nxv4i32:
761; CHECK:       # %bb.0:
762; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
763; CHECK-NEXT:    vsub.vv v8, v8, v10, v0.t
764; CHECK-NEXT:    ret
765  %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
766  ret <vscale x 4 x i32> %v
767}
768
769define <vscale x 4 x i32> @vsub_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, i32 zeroext %evl) {
770; CHECK-LABEL: vsub_vv_nxv4i32_unmasked:
771; CHECK:       # %bb.0:
772; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
773; CHECK-NEXT:    vsub.vv v8, v8, v10
774; CHECK-NEXT:    ret
775  %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
776  ret <vscale x 4 x i32> %v
777}
778
779define <vscale x 4 x i32> @vsub_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
780; CHECK-LABEL: vsub_vx_nxv4i32:
781; CHECK:       # %bb.0:
782; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
783; CHECK-NEXT:    vsub.vx v8, v8, a0, v0.t
784; CHECK-NEXT:    ret
785  %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
786  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
787  %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
788  ret <vscale x 4 x i32> %v
789}
790
791define <vscale x 4 x i32> @vsub_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %b, i32 zeroext %evl) {
792; CHECK-LABEL: vsub_vx_nxv4i32_unmasked:
793; CHECK:       # %bb.0:
794; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
795; CHECK-NEXT:    vsub.vx v8, v8, a0
796; CHECK-NEXT:    ret
797  %elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
798  %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
799  %v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
800  ret <vscale x 4 x i32> %v
801}
802
803declare <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
804
805define <vscale x 8 x i32> @vsub_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
806; CHECK-LABEL: vsub_vv_nxv8i32:
807; CHECK:       # %bb.0:
808; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
809; CHECK-NEXT:    vsub.vv v8, v8, v12, v0.t
810; CHECK-NEXT:    ret
811  %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
812  ret <vscale x 8 x i32> %v
813}
814
815define <vscale x 8 x i32> @vsub_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, i32 zeroext %evl) {
816; CHECK-LABEL: vsub_vv_nxv8i32_unmasked:
817; CHECK:       # %bb.0:
818; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
819; CHECK-NEXT:    vsub.vv v8, v8, v12
820; CHECK-NEXT:    ret
821  %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
822  ret <vscale x 8 x i32> %v
823}
824
825define <vscale x 8 x i32> @vsub_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
826; CHECK-LABEL: vsub_vx_nxv8i32:
827; CHECK:       # %bb.0:
828; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
829; CHECK-NEXT:    vsub.vx v8, v8, a0, v0.t
830; CHECK-NEXT:    ret
831  %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
832  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
833  %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
834  ret <vscale x 8 x i32> %v
835}
836
837define <vscale x 8 x i32> @vsub_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %b, i32 zeroext %evl) {
838; CHECK-LABEL: vsub_vx_nxv8i32_unmasked:
839; CHECK:       # %bb.0:
840; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
841; CHECK-NEXT:    vsub.vx v8, v8, a0
842; CHECK-NEXT:    ret
843  %elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
844  %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
845  %v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
846  ret <vscale x 8 x i32> %v
847}
848
849declare <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
850
851define <vscale x 16 x i32> @vsub_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
852; CHECK-LABEL: vsub_vv_nxv16i32:
853; CHECK:       # %bb.0:
854; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
855; CHECK-NEXT:    vsub.vv v8, v8, v16, v0.t
856; CHECK-NEXT:    ret
857  %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
858  ret <vscale x 16 x i32> %v
859}
860
861define <vscale x 16 x i32> @vsub_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, i32 zeroext %evl) {
862; CHECK-LABEL: vsub_vv_nxv16i32_unmasked:
863; CHECK:       # %bb.0:
864; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
865; CHECK-NEXT:    vsub.vv v8, v8, v16
866; CHECK-NEXT:    ret
867  %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
868  ret <vscale x 16 x i32> %v
869}
870
871define <vscale x 16 x i32> @vsub_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
872; CHECK-LABEL: vsub_vx_nxv16i32:
873; CHECK:       # %bb.0:
874; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
875; CHECK-NEXT:    vsub.vx v8, v8, a0, v0.t
876; CHECK-NEXT:    ret
877  %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
878  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
879  %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
880  ret <vscale x 16 x i32> %v
881}
882
883define <vscale x 16 x i32> @vsub_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 %b, i32 zeroext %evl) {
884; CHECK-LABEL: vsub_vx_nxv16i32_unmasked:
885; CHECK:       # %bb.0:
886; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
887; CHECK-NEXT:    vsub.vx v8, v8, a0
888; CHECK-NEXT:    ret
889  %elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
890  %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> poison, <vscale x 16 x i32> zeroinitializer
891  %v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
892  ret <vscale x 16 x i32> %v
893}
894
895declare <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
896
897define <vscale x 1 x i64> @vsub_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
898; CHECK-LABEL: vsub_vv_nxv1i64:
899; CHECK:       # %bb.0:
900; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
901; CHECK-NEXT:    vsub.vv v8, v8, v9, v0.t
902; CHECK-NEXT:    ret
903  %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
904  ret <vscale x 1 x i64> %v
905}
906
907define <vscale x 1 x i64> @vsub_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, i32 zeroext %evl) {
908; CHECK-LABEL: vsub_vv_nxv1i64_unmasked:
909; CHECK:       # %bb.0:
910; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
911; CHECK-NEXT:    vsub.vv v8, v8, v9
912; CHECK-NEXT:    ret
913  %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
914  ret <vscale x 1 x i64> %v
915}
916
917define <vscale x 1 x i64> @vsub_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
918; RV32-LABEL: vsub_vx_nxv1i64:
919; RV32:       # %bb.0:
920; RV32-NEXT:    addi sp, sp, -16
921; RV32-NEXT:    .cfi_def_cfa_offset 16
922; RV32-NEXT:    sw a0, 8(sp)
923; RV32-NEXT:    sw a1, 12(sp)
924; RV32-NEXT:    addi a0, sp, 8
925; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
926; RV32-NEXT:    vlse64.v v9, (a0), zero
927; RV32-NEXT:    vsub.vv v8, v8, v9, v0.t
928; RV32-NEXT:    addi sp, sp, 16
929; RV32-NEXT:    .cfi_def_cfa_offset 0
930; RV32-NEXT:    ret
931;
932; RV64-LABEL: vsub_vx_nxv1i64:
933; RV64:       # %bb.0:
934; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
935; RV64-NEXT:    vsub.vx v8, v8, a0, v0.t
936; RV64-NEXT:    ret
937  %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
938  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
939  %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
940  ret <vscale x 1 x i64> %v
941}
942
943define <vscale x 1 x i64> @vsub_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %b, i32 zeroext %evl) {
944; RV32-LABEL: vsub_vx_nxv1i64_unmasked:
945; RV32:       # %bb.0:
946; RV32-NEXT:    addi sp, sp, -16
947; RV32-NEXT:    .cfi_def_cfa_offset 16
948; RV32-NEXT:    sw a0, 8(sp)
949; RV32-NEXT:    sw a1, 12(sp)
950; RV32-NEXT:    addi a0, sp, 8
951; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
952; RV32-NEXT:    vlse64.v v9, (a0), zero
953; RV32-NEXT:    vsub.vv v8, v8, v9
954; RV32-NEXT:    addi sp, sp, 16
955; RV32-NEXT:    .cfi_def_cfa_offset 0
956; RV32-NEXT:    ret
957;
958; RV64-LABEL: vsub_vx_nxv1i64_unmasked:
959; RV64:       # %bb.0:
960; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
961; RV64-NEXT:    vsub.vx v8, v8, a0
962; RV64-NEXT:    ret
963  %elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
964  %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
965  %v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
966  ret <vscale x 1 x i64> %v
967}
968
969declare <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
970
971define <vscale x 2 x i64> @vsub_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
972; CHECK-LABEL: vsub_vv_nxv2i64:
973; CHECK:       # %bb.0:
974; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
975; CHECK-NEXT:    vsub.vv v8, v8, v10, v0.t
976; CHECK-NEXT:    ret
977  %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
978  ret <vscale x 2 x i64> %v
979}
980
981define <vscale x 2 x i64> @vsub_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, i32 zeroext %evl) {
982; CHECK-LABEL: vsub_vv_nxv2i64_unmasked:
983; CHECK:       # %bb.0:
984; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
985; CHECK-NEXT:    vsub.vv v8, v8, v10
986; CHECK-NEXT:    ret
987  %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
988  ret <vscale x 2 x i64> %v
989}
990
991define <vscale x 2 x i64> @vsub_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
992; RV32-LABEL: vsub_vx_nxv2i64:
993; RV32:       # %bb.0:
994; RV32-NEXT:    addi sp, sp, -16
995; RV32-NEXT:    .cfi_def_cfa_offset 16
996; RV32-NEXT:    sw a0, 8(sp)
997; RV32-NEXT:    sw a1, 12(sp)
998; RV32-NEXT:    addi a0, sp, 8
999; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
1000; RV32-NEXT:    vlse64.v v10, (a0), zero
1001; RV32-NEXT:    vsub.vv v8, v8, v10, v0.t
1002; RV32-NEXT:    addi sp, sp, 16
1003; RV32-NEXT:    .cfi_def_cfa_offset 0
1004; RV32-NEXT:    ret
1005;
1006; RV64-LABEL: vsub_vx_nxv2i64:
1007; RV64:       # %bb.0:
1008; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1009; RV64-NEXT:    vsub.vx v8, v8, a0, v0.t
1010; RV64-NEXT:    ret
1011  %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1012  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1013  %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
1014  ret <vscale x 2 x i64> %v
1015}
1016
1017define <vscale x 2 x i64> @vsub_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %b, i32 zeroext %evl) {
1018; RV32-LABEL: vsub_vx_nxv2i64_unmasked:
1019; RV32:       # %bb.0:
1020; RV32-NEXT:    addi sp, sp, -16
1021; RV32-NEXT:    .cfi_def_cfa_offset 16
1022; RV32-NEXT:    sw a0, 8(sp)
1023; RV32-NEXT:    sw a1, 12(sp)
1024; RV32-NEXT:    addi a0, sp, 8
1025; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
1026; RV32-NEXT:    vlse64.v v10, (a0), zero
1027; RV32-NEXT:    vsub.vv v8, v8, v10
1028; RV32-NEXT:    addi sp, sp, 16
1029; RV32-NEXT:    .cfi_def_cfa_offset 0
1030; RV32-NEXT:    ret
1031;
1032; RV64-LABEL: vsub_vx_nxv2i64_unmasked:
1033; RV64:       # %bb.0:
1034; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1035; RV64-NEXT:    vsub.vx v8, v8, a0
1036; RV64-NEXT:    ret
1037  %elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
1038  %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1039  %v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
1040  ret <vscale x 2 x i64> %v
1041}
1042
1043declare <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
1044
1045define <vscale x 4 x i64> @vsub_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1046; CHECK-LABEL: vsub_vv_nxv4i64:
1047; CHECK:       # %bb.0:
1048; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1049; CHECK-NEXT:    vsub.vv v8, v8, v12, v0.t
1050; CHECK-NEXT:    ret
1051  %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
1052  ret <vscale x 4 x i64> %v
1053}
1054
1055define <vscale x 4 x i64> @vsub_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, i32 zeroext %evl) {
1056; CHECK-LABEL: vsub_vv_nxv4i64_unmasked:
1057; CHECK:       # %bb.0:
1058; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
1059; CHECK-NEXT:    vsub.vv v8, v8, v12
1060; CHECK-NEXT:    ret
1061  %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1062  ret <vscale x 4 x i64> %v
1063}
1064
1065define <vscale x 4 x i64> @vsub_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
1066; RV32-LABEL: vsub_vx_nxv4i64:
1067; RV32:       # %bb.0:
1068; RV32-NEXT:    addi sp, sp, -16
1069; RV32-NEXT:    .cfi_def_cfa_offset 16
1070; RV32-NEXT:    sw a0, 8(sp)
1071; RV32-NEXT:    sw a1, 12(sp)
1072; RV32-NEXT:    addi a0, sp, 8
1073; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
1074; RV32-NEXT:    vlse64.v v12, (a0), zero
1075; RV32-NEXT:    vsub.vv v8, v8, v12, v0.t
1076; RV32-NEXT:    addi sp, sp, 16
1077; RV32-NEXT:    .cfi_def_cfa_offset 0
1078; RV32-NEXT:    ret
1079;
1080; RV64-LABEL: vsub_vx_nxv4i64:
1081; RV64:       # %bb.0:
1082; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1083; RV64-NEXT:    vsub.vx v8, v8, a0, v0.t
1084; RV64-NEXT:    ret
1085  %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1086  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1087  %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
1088  ret <vscale x 4 x i64> %v
1089}
1090
1091define <vscale x 4 x i64> @vsub_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %b, i32 zeroext %evl) {
1092; RV32-LABEL: vsub_vx_nxv4i64_unmasked:
1093; RV32:       # %bb.0:
1094; RV32-NEXT:    addi sp, sp, -16
1095; RV32-NEXT:    .cfi_def_cfa_offset 16
1096; RV32-NEXT:    sw a0, 8(sp)
1097; RV32-NEXT:    sw a1, 12(sp)
1098; RV32-NEXT:    addi a0, sp, 8
1099; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
1100; RV32-NEXT:    vlse64.v v12, (a0), zero
1101; RV32-NEXT:    vsub.vv v8, v8, v12
1102; RV32-NEXT:    addi sp, sp, 16
1103; RV32-NEXT:    .cfi_def_cfa_offset 0
1104; RV32-NEXT:    ret
1105;
1106; RV64-LABEL: vsub_vx_nxv4i64_unmasked:
1107; RV64:       # %bb.0:
1108; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1109; RV64-NEXT:    vsub.vx v8, v8, a0
1110; RV64-NEXT:    ret
1111  %elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
1112  %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
1113  %v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
1114  ret <vscale x 4 x i64> %v
1115}
1116
1117declare <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
1118
1119define <vscale x 8 x i64> @vsub_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1120; CHECK-LABEL: vsub_vv_nxv8i64:
1121; CHECK:       # %bb.0:
1122; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1123; CHECK-NEXT:    vsub.vv v8, v8, v16, v0.t
1124; CHECK-NEXT:    ret
1125  %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
1126  ret <vscale x 8 x i64> %v
1127}
1128
1129define <vscale x 8 x i64> @vsub_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, i32 zeroext %evl) {
1130; CHECK-LABEL: vsub_vv_nxv8i64_unmasked:
1131; CHECK:       # %bb.0:
1132; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
1133; CHECK-NEXT:    vsub.vv v8, v8, v16
1134; CHECK-NEXT:    ret
1135  %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1136  ret <vscale x 8 x i64> %v
1137}
1138
1139define <vscale x 8 x i64> @vsub_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1140; RV32-LABEL: vsub_vx_nxv8i64:
1141; RV32:       # %bb.0:
1142; RV32-NEXT:    addi sp, sp, -16
1143; RV32-NEXT:    .cfi_def_cfa_offset 16
1144; RV32-NEXT:    sw a0, 8(sp)
1145; RV32-NEXT:    sw a1, 12(sp)
1146; RV32-NEXT:    addi a0, sp, 8
1147; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
1148; RV32-NEXT:    vlse64.v v16, (a0), zero
1149; RV32-NEXT:    vsub.vv v8, v8, v16, v0.t
1150; RV32-NEXT:    addi sp, sp, 16
1151; RV32-NEXT:    .cfi_def_cfa_offset 0
1152; RV32-NEXT:    ret
1153;
1154; RV64-LABEL: vsub_vx_nxv8i64:
1155; RV64:       # %bb.0:
1156; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1157; RV64-NEXT:    vsub.vx v8, v8, a0, v0.t
1158; RV64-NEXT:    ret
1159  %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1160  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1161  %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
1162  ret <vscale x 8 x i64> %v
1163}
1164
1165define <vscale x 8 x i64> @vsub_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %b, i32 zeroext %evl) {
1166; RV32-LABEL: vsub_vx_nxv8i64_unmasked:
1167; RV32:       # %bb.0:
1168; RV32-NEXT:    addi sp, sp, -16
1169; RV32-NEXT:    .cfi_def_cfa_offset 16
1170; RV32-NEXT:    sw a0, 8(sp)
1171; RV32-NEXT:    sw a1, 12(sp)
1172; RV32-NEXT:    addi a0, sp, 8
1173; RV32-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
1174; RV32-NEXT:    vlse64.v v16, (a0), zero
1175; RV32-NEXT:    vsub.vv v8, v8, v16
1176; RV32-NEXT:    addi sp, sp, 16
1177; RV32-NEXT:    .cfi_def_cfa_offset 0
1178; RV32-NEXT:    ret
1179;
1180; RV64-LABEL: vsub_vx_nxv8i64_unmasked:
1181; RV64:       # %bb.0:
1182; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
1183; RV64-NEXT:    vsub.vx v8, v8, a0
1184; RV64-NEXT:    ret
1185  %elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
1186  %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
1187  %v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
1188  ret <vscale x 8 x i64> %v
1189}
1190