xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+m,+v \
3; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+m,+v \
5; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
6
7define <vscale x 3 x i1> @icmp_eq_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %vb) {
8; CHECK-LABEL: icmp_eq_vv_nxv3i8:
9; CHECK:       # %bb.0:
10; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
11; CHECK-NEXT:    vmseq.vv v0, v8, v9
12; CHECK-NEXT:    ret
13  %vc = icmp eq <vscale x 3 x i8> %va, %vb
14  ret <vscale x 3 x i1> %vc
15}
16
17define <vscale x 3 x i1> @icmp_eq_vx_nxv3i8(<vscale x 3 x i8> %va, i8 %b) {
18; CHECK-LABEL: icmp_eq_vx_nxv3i8:
19; CHECK:       # %bb.0:
20; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
21; CHECK-NEXT:    vmseq.vx v0, v8, a0
22; CHECK-NEXT:    ret
23  %head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
24  %splat = shufflevector <vscale x 3 x i8> %head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
25  %vc = icmp eq <vscale x 3 x i8> %va, %splat
26  ret <vscale x 3 x i1> %vc
27}
28
29define <vscale x 3 x i1> @icmp_eq_xv_nxv3i8(<vscale x 3 x i8> %va, i8 %b) {
30; CHECK-LABEL: icmp_eq_xv_nxv3i8:
31; CHECK:       # %bb.0:
32; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
33; CHECK-NEXT:    vmseq.vx v0, v8, a0
34; CHECK-NEXT:    ret
35  %head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
36  %splat = shufflevector <vscale x 3 x i8> %head, <vscale x 3 x i8> poison, <vscale x 3 x i32> zeroinitializer
37  %vc = icmp eq <vscale x 3 x i8> %splat, %va
38  ret <vscale x 3 x i1> %vc
39}
40
41define <vscale x 8 x i1> @icmp_eq_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
42; CHECK-LABEL: icmp_eq_vv_nxv8i8:
43; CHECK:       # %bb.0:
44; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
45; CHECK-NEXT:    vmseq.vv v0, v8, v9
46; CHECK-NEXT:    ret
47  %vc = icmp eq <vscale x 8 x i8> %va, %vb
48  ret <vscale x 8 x i1> %vc
49}
50
51define <vscale x 8 x i1> @icmp_eq_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
52; CHECK-LABEL: icmp_eq_vx_nxv8i8:
53; CHECK:       # %bb.0:
54; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
55; CHECK-NEXT:    vmseq.vx v0, v8, a0
56; CHECK-NEXT:    ret
57  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
58  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
59  %vc = icmp eq <vscale x 8 x i8> %va, %splat
60  ret <vscale x 8 x i1> %vc
61}
62
63define <vscale x 8 x i1> @icmp_eq_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
64; CHECK-LABEL: icmp_eq_xv_nxv8i8:
65; CHECK:       # %bb.0:
66; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
67; CHECK-NEXT:    vmseq.vx v0, v8, a0
68; CHECK-NEXT:    ret
69  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
70  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
71  %vc = icmp eq <vscale x 8 x i8> %splat, %va
72  ret <vscale x 8 x i1> %vc
73}
74
75define <vscale x 8 x i1> @icmp_eq_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
76; CHECK-LABEL: icmp_eq_vi_nxv8i8_0:
77; CHECK:       # %bb.0:
78; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
79; CHECK-NEXT:    vmseq.vi v0, v8, 0
80; CHECK-NEXT:    ret
81  %vc = icmp eq <vscale x 8 x i8> %va, splat (i8 0)
82  ret <vscale x 8 x i1> %vc
83}
84
85define <vscale x 8 x i1> @icmp_eq_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
86; CHECK-LABEL: icmp_eq_vi_nxv8i8_1:
87; CHECK:       # %bb.0:
88; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
89; CHECK-NEXT:    vmseq.vi v0, v8, 5
90; CHECK-NEXT:    ret
91  %vc = icmp eq <vscale x 8 x i8> %va, splat (i8 5)
92  ret <vscale x 8 x i1> %vc
93}
94
95define <vscale x 8 x i1> @icmp_eq_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
96; CHECK-LABEL: icmp_eq_iv_nxv8i8_1:
97; CHECK:       # %bb.0:
98; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
99; CHECK-NEXT:    vmseq.vi v0, v8, 5
100; CHECK-NEXT:    ret
101  %vc = icmp eq <vscale x 8 x i8> splat (i8 5), %va
102  ret <vscale x 8 x i1> %vc
103}
104
105define <vscale x 8 x i1> @icmp_ne_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
106; CHECK-LABEL: icmp_ne_vv_nxv8i8:
107; CHECK:       # %bb.0:
108; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
109; CHECK-NEXT:    vmsne.vv v0, v8, v9
110; CHECK-NEXT:    ret
111  %vc = icmp ne <vscale x 8 x i8> %va, %vb
112  ret <vscale x 8 x i1> %vc
113}
114
115define <vscale x 8 x i1> @icmp_ne_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
116; CHECK-LABEL: icmp_ne_vx_nxv8i8:
117; CHECK:       # %bb.0:
118; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
119; CHECK-NEXT:    vmsne.vx v0, v8, a0
120; CHECK-NEXT:    ret
121  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
122  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
123  %vc = icmp ne <vscale x 8 x i8> %va, %splat
124  ret <vscale x 8 x i1> %vc
125}
126
127define <vscale x 8 x i1> @icmp_ne_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
128; CHECK-LABEL: icmp_ne_xv_nxv8i8:
129; CHECK:       # %bb.0:
130; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
131; CHECK-NEXT:    vmsne.vx v0, v8, a0
132; CHECK-NEXT:    ret
133  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
134  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
135  %vc = icmp ne <vscale x 8 x i8> %splat, %va
136  ret <vscale x 8 x i1> %vc
137}
138
139define <vscale x 8 x i1> @icmp_ne_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
140; CHECK-LABEL: icmp_ne_vi_nxv8i8_0:
141; CHECK:       # %bb.0:
142; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
143; CHECK-NEXT:    vmsne.vi v0, v8, 5
144; CHECK-NEXT:    ret
145  %vc = icmp ne <vscale x 8 x i8> %va, splat (i8 5)
146  ret <vscale x 8 x i1> %vc
147}
148
149define <vscale x 8 x i1> @icmp_ugt_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
150; CHECK-LABEL: icmp_ugt_vv_nxv8i8:
151; CHECK:       # %bb.0:
152; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
153; CHECK-NEXT:    vmsltu.vv v0, v9, v8
154; CHECK-NEXT:    ret
155  %vc = icmp ugt <vscale x 8 x i8> %va, %vb
156  ret <vscale x 8 x i1> %vc
157}
158
159define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
160; CHECK-LABEL: icmp_ugt_vx_nxv8i8:
161; CHECK:       # %bb.0:
162; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
163; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
164; CHECK-NEXT:    ret
165  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
166  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
167  %vc = icmp ugt <vscale x 8 x i8> %va, %splat
168  ret <vscale x 8 x i1> %vc
169}
170
171define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
172; CHECK-LABEL: icmp_ugt_xv_nxv8i8:
173; CHECK:       # %bb.0:
174; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
175; CHECK-NEXT:    vmsltu.vx v0, v8, a0
176; CHECK-NEXT:    ret
177  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
178  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
179  %vc = icmp ugt <vscale x 8 x i8> %splat, %va
180  ret <vscale x 8 x i1> %vc
181}
182
183define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
184; CHECK-LABEL: icmp_ugt_vi_nxv8i8_0:
185; CHECK:       # %bb.0:
186; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
187; CHECK-NEXT:    vmsgtu.vi v0, v8, 5
188; CHECK-NEXT:    ret
189  %vc = icmp ugt <vscale x 8 x i8> %va, splat (i8 5)
190  ret <vscale x 8 x i1> %vc
191}
192
193define <vscale x 8 x i1> @icmp_uge_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
194; CHECK-LABEL: icmp_uge_vv_nxv8i8:
195; CHECK:       # %bb.0:
196; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
197; CHECK-NEXT:    vmsleu.vv v0, v9, v8
198; CHECK-NEXT:    ret
199  %vc = icmp uge <vscale x 8 x i8> %va, %vb
200  ret <vscale x 8 x i1> %vc
201}
202
203define <vscale x 8 x i1> @icmp_uge_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
204; CHECK-LABEL: icmp_uge_vx_nxv8i8:
205; CHECK:       # %bb.0:
206; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
207; CHECK-NEXT:    vmv.v.x v9, a0
208; CHECK-NEXT:    vmsleu.vv v0, v9, v8
209; CHECK-NEXT:    ret
210  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
211  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
212  %vc = icmp uge <vscale x 8 x i8> %va, %splat
213  ret <vscale x 8 x i1> %vc
214}
215
216define <vscale x 8 x i1> @icmp_uge_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
217; CHECK-LABEL: icmp_uge_xv_nxv8i8:
218; CHECK:       # %bb.0:
219; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
220; CHECK-NEXT:    vmsleu.vx v0, v8, a0
221; CHECK-NEXT:    ret
222  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
223  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
224  %vc = icmp uge <vscale x 8 x i8> %splat, %va
225  ret <vscale x 8 x i1> %vc
226}
227
228define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
229; CHECK-LABEL: icmp_uge_vi_nxv8i8_0:
230; CHECK:       # %bb.0:
231; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
232; CHECK-NEXT:    vmv.v.i v9, -16
233; CHECK-NEXT:    vmsleu.vv v0, v9, v8
234; CHECK-NEXT:    ret
235  %vc = icmp uge <vscale x 8 x i8> %va, splat (i8 -16)
236  ret <vscale x 8 x i1> %vc
237}
238
239define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
240; CHECK-LABEL: icmp_uge_vi_nxv8i8_1:
241; CHECK:       # %bb.0:
242; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
243; CHECK-NEXT:    vmsgtu.vi v0, v8, 14
244; CHECK-NEXT:    ret
245  %vc = icmp uge <vscale x 8 x i8> %va, splat (i8 15)
246  ret <vscale x 8 x i1> %vc
247}
248
249define <vscale x 8 x i1> @icmp_uge_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
250; CHECK-LABEL: icmp_uge_iv_nxv8i8_1:
251; CHECK:       # %bb.0:
252; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
253; CHECK-NEXT:    vmsleu.vi v0, v8, 15
254; CHECK-NEXT:    ret
255  %vc = icmp uge <vscale x 8 x i8> splat (i8 15), %va
256  ret <vscale x 8 x i1> %vc
257}
258
259define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
260; CHECK-LABEL: icmp_uge_vi_nxv8i8_2:
261; CHECK:       # %bb.0:
262; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
263; CHECK-NEXT:    vmset.m v0
264; CHECK-NEXT:    ret
265  %vc = icmp uge <vscale x 8 x i8> %va, splat (i8 0)
266  ret <vscale x 8 x i1> %vc
267}
268
269define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
270; CHECK-LABEL: icmp_uge_vi_nxv8i8_3:
271; CHECK:       # %bb.0:
272; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
273; CHECK-NEXT:    vmsgtu.vi v0, v8, 0
274; CHECK-NEXT:    ret
275  %vc = icmp uge <vscale x 8 x i8> %va, splat (i8 1)
276  ret <vscale x 8 x i1> %vc
277}
278
279define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_4(<vscale x 8 x i8> %va) {
280; CHECK-LABEL: icmp_uge_vi_nxv8i8_4:
281; CHECK:       # %bb.0:
282; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
283; CHECK-NEXT:    vmsgtu.vi v0, v8, -16
284; CHECK-NEXT:    ret
285  %vc = icmp uge <vscale x 8 x i8> %va, splat (i8 -15)
286  ret <vscale x 8 x i1> %vc
287}
288
289define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_5(<vscale x 8 x i8> %va) {
290; CHECK-LABEL: icmp_uge_vi_nxv8i8_5:
291; CHECK:       # %bb.0:
292; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
293; CHECK-NEXT:    vmsgtu.vi v0, v8, 15
294; CHECK-NEXT:    ret
295  %vc = icmp uge <vscale x 8 x i8> %va, splat (i8 16)
296  ret <vscale x 8 x i1> %vc
297}
298
299; Test that we don't optimize uge x, 0 -> ugt x, -1
300define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_6(<vscale x 8 x i8> %va, iXLen %vl) {
301; CHECK-LABEL: icmp_uge_vi_nxv8i8_6:
302; CHECK:       # %bb.0:
303; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
304; CHECK-NEXT:    vmv.v.i v9, 0
305; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
306; CHECK-NEXT:    vmsleu.vv v0, v9, v8
307; CHECK-NEXT:    ret
308  %splat = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.iXLen(<vscale x 8 x i8> undef, i8 0, iXLen %vl)
309  %vc = icmp uge <vscale x 8 x i8> %va, %splat
310  ret <vscale x 8 x i1> %vc
311}
312
313define <vscale x 8 x i1> @icmp_ult_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
314; CHECK-LABEL: icmp_ult_vv_nxv8i8:
315; CHECK:       # %bb.0:
316; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
317; CHECK-NEXT:    vmsltu.vv v0, v8, v9
318; CHECK-NEXT:    ret
319  %vc = icmp ult <vscale x 8 x i8> %va, %vb
320  ret <vscale x 8 x i1> %vc
321}
322
323define <vscale x 8 x i1> @icmp_ult_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
324; CHECK-LABEL: icmp_ult_vx_nxv8i8:
325; CHECK:       # %bb.0:
326; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
327; CHECK-NEXT:    vmsltu.vx v0, v8, a0
328; CHECK-NEXT:    ret
329  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
330  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
331  %vc = icmp ult <vscale x 8 x i8> %va, %splat
332  ret <vscale x 8 x i1> %vc
333}
334
335define <vscale x 8 x i1> @icmp_ult_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
336; CHECK-LABEL: icmp_ult_xv_nxv8i8:
337; CHECK:       # %bb.0:
338; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
339; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
340; CHECK-NEXT:    ret
341  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
342  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
343  %vc = icmp ult <vscale x 8 x i8> %splat, %va
344  ret <vscale x 8 x i1> %vc
345}
346
347define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
348; CHECK-LABEL: icmp_ult_vi_nxv8i8_0:
349; CHECK:       # %bb.0:
350; CHECK-NEXT:    li a0, -16
351; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
352; CHECK-NEXT:    vmsltu.vx v0, v8, a0
353; CHECK-NEXT:    ret
354  %vc = icmp ult <vscale x 8 x i8> %va, splat (i8 -16)
355  ret <vscale x 8 x i1> %vc
356}
357
358define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
359; CHECK-LABEL: icmp_ult_vi_nxv8i8_1:
360; CHECK:       # %bb.0:
361; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
362; CHECK-NEXT:    vmsleu.vi v0, v8, -16
363; CHECK-NEXT:    ret
364  %vc = icmp ult <vscale x 8 x i8> %va, splat (i8 -15)
365  ret <vscale x 8 x i1> %vc
366}
367
368define <vscale x 8 x i1> @icmp_ult_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
369; CHECK-LABEL: icmp_ult_iv_nxv8i8_1:
370; CHECK:       # %bb.0:
371; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
372; CHECK-NEXT:    vmsgtu.vi v0, v8, -15
373; CHECK-NEXT:    ret
374  %vc = icmp ult <vscale x 8 x i8> splat (i8 -15), %va
375  ret <vscale x 8 x i1> %vc
376}
377
378define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
379; CHECK-LABEL: icmp_ult_vi_nxv8i8_2:
380; CHECK:       # %bb.0:
381; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
382; CHECK-NEXT:    vmclr.m v0
383; CHECK-NEXT:    ret
384  %vc = icmp ult <vscale x 8 x i8> %va, splat (i8 0)
385  ret <vscale x 8 x i1> %vc
386}
387
388define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
389; CHECK-LABEL: icmp_ult_vi_nxv8i8_3:
390; CHECK:       # %bb.0:
391; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
392; CHECK-NEXT:    vmseq.vi v0, v8, 0
393; CHECK-NEXT:    ret
394  %vc = icmp ult <vscale x 8 x i8> %va, splat (i8 1)
395  ret <vscale x 8 x i1> %vc
396}
397
398define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_4(<vscale x 8 x i8> %va) {
399; CHECK-LABEL: icmp_ult_vi_nxv8i8_4:
400; CHECK:       # %bb.0:
401; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
402; CHECK-NEXT:    vmsleu.vi v0, v8, 15
403; CHECK-NEXT:    ret
404  %vc = icmp ult <vscale x 8 x i8> %va, splat (i8 16)
405  ret <vscale x 8 x i1> %vc
406}
407
408declare <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.iXLen(<vscale x 8 x i8>, i8, iXLen);
409
410; Test that we don't optimize ult x, 0 -> ule x, -1
411define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_5(<vscale x 8 x i8> %va, iXLen %vl) {
412; CHECK-LABEL: icmp_ult_vi_nxv8i8_5:
413; CHECK:       # %bb.0:
414; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
415; CHECK-NEXT:    vmsltu.vx v0, v8, zero
416; CHECK-NEXT:    ret
417  %splat = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.iXLen(<vscale x 8 x i8> undef, i8 0, iXLen %vl)
418  %vc = icmp ult <vscale x 8 x i8> %va, %splat
419  ret <vscale x 8 x i1> %vc
420}
421
422define <vscale x 8 x i1> @icmp_ule_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
423; CHECK-LABEL: icmp_ule_vv_nxv8i8:
424; CHECK:       # %bb.0:
425; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
426; CHECK-NEXT:    vmsleu.vv v0, v8, v9
427; CHECK-NEXT:    ret
428  %vc = icmp ule <vscale x 8 x i8> %va, %vb
429  ret <vscale x 8 x i1> %vc
430}
431
432define <vscale x 8 x i1> @icmp_ule_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
433; CHECK-LABEL: icmp_ule_vx_nxv8i8:
434; CHECK:       # %bb.0:
435; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
436; CHECK-NEXT:    vmsleu.vx v0, v8, a0
437; CHECK-NEXT:    ret
438  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
439  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
440  %vc = icmp ule <vscale x 8 x i8> %va, %splat
441  ret <vscale x 8 x i1> %vc
442}
443
444define <vscale x 8 x i1> @icmp_ule_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
445; CHECK-LABEL: icmp_ule_xv_nxv8i8:
446; CHECK:       # %bb.0:
447; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
448; CHECK-NEXT:    vmv.v.x v9, a0
449; CHECK-NEXT:    vmsleu.vv v0, v9, v8
450; CHECK-NEXT:    ret
451  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
452  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
453  %vc = icmp ule <vscale x 8 x i8> %splat, %va
454  ret <vscale x 8 x i1> %vc
455}
456
457define <vscale x 8 x i1> @icmp_ule_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
458; CHECK-LABEL: icmp_ule_vi_nxv8i8_0:
459; CHECK:       # %bb.0:
460; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
461; CHECK-NEXT:    vmsleu.vi v0, v8, 5
462; CHECK-NEXT:    ret
463  %vc = icmp ule <vscale x 8 x i8> %va, splat (i8 5)
464  ret <vscale x 8 x i1> %vc
465}
466
467define <vscale x 8 x i1> @icmp_sgt_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
468; CHECK-LABEL: icmp_sgt_vv_nxv8i8:
469; CHECK:       # %bb.0:
470; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
471; CHECK-NEXT:    vmslt.vv v0, v9, v8
472; CHECK-NEXT:    ret
473  %vc = icmp sgt <vscale x 8 x i8> %va, %vb
474  ret <vscale x 8 x i1> %vc
475}
476
477define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
478; CHECK-LABEL: icmp_sgt_vx_nxv8i8:
479; CHECK:       # %bb.0:
480; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
481; CHECK-NEXT:    vmsgt.vx v0, v8, a0
482; CHECK-NEXT:    ret
483  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
484  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
485  %vc = icmp sgt <vscale x 8 x i8> %va, %splat
486  ret <vscale x 8 x i1> %vc
487}
488
489define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
490; CHECK-LABEL: icmp_sgt_xv_nxv8i8:
491; CHECK:       # %bb.0:
492; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
493; CHECK-NEXT:    vmslt.vx v0, v8, a0
494; CHECK-NEXT:    ret
495  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
496  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
497  %vc = icmp sgt <vscale x 8 x i8> %splat, %va
498  ret <vscale x 8 x i1> %vc
499}
500
501define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
502; CHECK-LABEL: icmp_sgt_vi_nxv8i8_0:
503; CHECK:       # %bb.0:
504; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
505; CHECK-NEXT:    vmsgt.vi v0, v8, 5
506; CHECK-NEXT:    ret
507  %vc = icmp sgt <vscale x 8 x i8> %va, splat (i8 5)
508  ret <vscale x 8 x i1> %vc
509}
510
511define <vscale x 8 x i1> @icmp_sge_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
512; CHECK-LABEL: icmp_sge_vv_nxv8i8:
513; CHECK:       # %bb.0:
514; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
515; CHECK-NEXT:    vmsle.vv v0, v9, v8
516; CHECK-NEXT:    ret
517  %vc = icmp sge <vscale x 8 x i8> %va, %vb
518  ret <vscale x 8 x i1> %vc
519}
520
521define <vscale x 8 x i1> @icmp_sge_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
522; CHECK-LABEL: icmp_sge_vx_nxv8i8:
523; CHECK:       # %bb.0:
524; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
525; CHECK-NEXT:    vmv.v.x v9, a0
526; CHECK-NEXT:    vmsle.vv v0, v9, v8
527; CHECK-NEXT:    ret
528  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
529  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
530  %vc = icmp sge <vscale x 8 x i8> %va, %splat
531  ret <vscale x 8 x i1> %vc
532}
533
534define <vscale x 8 x i1> @icmp_sge_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
535; CHECK-LABEL: icmp_sge_xv_nxv8i8:
536; CHECK:       # %bb.0:
537; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
538; CHECK-NEXT:    vmsle.vx v0, v8, a0
539; CHECK-NEXT:    ret
540  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
541  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
542  %vc = icmp sge <vscale x 8 x i8> %splat, %va
543  ret <vscale x 8 x i1> %vc
544}
545
546define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
547; CHECK-LABEL: icmp_sge_vi_nxv8i8_0:
548; CHECK:       # %bb.0:
549; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
550; CHECK-NEXT:    vmv.v.i v9, -16
551; CHECK-NEXT:    vmsle.vv v0, v9, v8
552; CHECK-NEXT:    ret
553  %vc = icmp sge <vscale x 8 x i8> %va, splat (i8 -16)
554  ret <vscale x 8 x i1> %vc
555}
556
557define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
558; CHECK-LABEL: icmp_sge_vi_nxv8i8_1:
559; CHECK:       # %bb.0:
560; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
561; CHECK-NEXT:    vmsgt.vi v0, v8, -16
562; CHECK-NEXT:    ret
563  %vc = icmp sge <vscale x 8 x i8> %va, splat (i8 -15)
564  ret <vscale x 8 x i1> %vc
565}
566
567define <vscale x 8 x i1> @icmp_sge_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
568; CHECK-LABEL: icmp_sge_iv_nxv8i8_1:
569; CHECK:       # %bb.0:
570; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
571; CHECK-NEXT:    vmsle.vi v0, v8, -15
572; CHECK-NEXT:    ret
573  %vc = icmp sge <vscale x 8 x i8> splat (i8 -15), %va
574  ret <vscale x 8 x i1> %vc
575}
576
577define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
578; CHECK-LABEL: icmp_sge_vi_nxv8i8_2:
579; CHECK:       # %bb.0:
580; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
581; CHECK-NEXT:    vmsgt.vi v0, v8, -1
582; CHECK-NEXT:    ret
583  %vc = icmp sge <vscale x 8 x i8> %va, splat (i8 0)
584  ret <vscale x 8 x i1> %vc
585}
586
587define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
588; CHECK-LABEL: icmp_sge_vi_nxv8i8_3:
589; CHECK:       # %bb.0:
590; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
591; CHECK-NEXT:    vmsgt.vi v0, v8, 15
592; CHECK-NEXT:    ret
593  %vc = icmp sge <vscale x 8 x i8> %va, splat (i8 16)
594  ret <vscale x 8 x i1> %vc
595}
596
597define <vscale x 8 x i1> @icmp_slt_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
598; CHECK-LABEL: icmp_slt_vv_nxv8i8:
599; CHECK:       # %bb.0:
600; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
601; CHECK-NEXT:    vmslt.vv v0, v8, v9
602; CHECK-NEXT:    ret
603  %vc = icmp slt <vscale x 8 x i8> %va, %vb
604  ret <vscale x 8 x i1> %vc
605}
606
607define <vscale x 8 x i1> @icmp_slt_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
608; CHECK-LABEL: icmp_slt_vx_nxv8i8:
609; CHECK:       # %bb.0:
610; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
611; CHECK-NEXT:    vmslt.vx v0, v8, a0
612; CHECK-NEXT:    ret
613  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
614  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
615  %vc = icmp slt <vscale x 8 x i8> %va, %splat
616  ret <vscale x 8 x i1> %vc
617}
618
619define <vscale x 8 x i1> @icmp_slt_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
620; CHECK-LABEL: icmp_slt_xv_nxv8i8:
621; CHECK:       # %bb.0:
622; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
623; CHECK-NEXT:    vmsgt.vx v0, v8, a0
624; CHECK-NEXT:    ret
625  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
626  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
627  %vc = icmp slt <vscale x 8 x i8> %splat, %va
628  ret <vscale x 8 x i1> %vc
629}
630
631define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
632; CHECK-LABEL: icmp_slt_vi_nxv8i8_0:
633; CHECK:       # %bb.0:
634; CHECK-NEXT:    li a0, -16
635; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
636; CHECK-NEXT:    vmslt.vx v0, v8, a0
637; CHECK-NEXT:    ret
638  %vc = icmp slt <vscale x 8 x i8> %va, splat (i8 -16)
639  ret <vscale x 8 x i1> %vc
640}
641
642define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
643; CHECK-LABEL: icmp_slt_vi_nxv8i8_1:
644; CHECK:       # %bb.0:
645; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
646; CHECK-NEXT:    vmsle.vi v0, v8, -16
647; CHECK-NEXT:    ret
648  %vc = icmp slt <vscale x 8 x i8> %va, splat (i8 -15)
649  ret <vscale x 8 x i1> %vc
650}
651
652define <vscale x 8 x i1> @icmp_slt_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
653; CHECK-LABEL: icmp_slt_iv_nxv8i8_1:
654; CHECK:       # %bb.0:
655; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
656; CHECK-NEXT:    vmsgt.vi v0, v8, -15
657; CHECK-NEXT:    ret
658  %vc = icmp slt <vscale x 8 x i8> splat (i8 -15), %va
659  ret <vscale x 8 x i1> %vc
660}
661
662define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
663; CHECK-LABEL: icmp_slt_vi_nxv8i8_2:
664; CHECK:       # %bb.0:
665; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
666; CHECK-NEXT:    vmsle.vi v0, v8, -1
667; CHECK-NEXT:    ret
668  %vc = icmp slt <vscale x 8 x i8> %va, splat (i8 0)
669  ret <vscale x 8 x i1> %vc
670}
671
672define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
673; CHECK-LABEL: icmp_slt_vi_nxv8i8_3:
674; CHECK:       # %bb.0:
675; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
676; CHECK-NEXT:    vmsle.vi v0, v8, 15
677; CHECK-NEXT:    ret
678  %vc = icmp slt <vscale x 8 x i8> %va, splat (i8 16)
679  ret <vscale x 8 x i1> %vc
680}
681
682define <vscale x 8 x i1> @icmp_sle_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
683; CHECK-LABEL: icmp_sle_vv_nxv8i8:
684; CHECK:       # %bb.0:
685; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
686; CHECK-NEXT:    vmsle.vv v0, v8, v9
687; CHECK-NEXT:    ret
688  %vc = icmp sle <vscale x 8 x i8> %va, %vb
689  ret <vscale x 8 x i1> %vc
690}
691
692define <vscale x 8 x i1> @icmp_sle_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
693; CHECK-LABEL: icmp_sle_vx_nxv8i8:
694; CHECK:       # %bb.0:
695; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
696; CHECK-NEXT:    vmsle.vx v0, v8, a0
697; CHECK-NEXT:    ret
698  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
699  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
700  %vc = icmp sle <vscale x 8 x i8> %va, %splat
701  ret <vscale x 8 x i1> %vc
702}
703
704define <vscale x 8 x i1> @icmp_sle_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
705; CHECK-LABEL: icmp_sle_xv_nxv8i8:
706; CHECK:       # %bb.0:
707; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
708; CHECK-NEXT:    vmv.v.x v9, a0
709; CHECK-NEXT:    vmsle.vv v0, v9, v8
710; CHECK-NEXT:    ret
711  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
712  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
713  %vc = icmp sle <vscale x 8 x i8> %splat, %va
714  ret <vscale x 8 x i1> %vc
715}
716
717define <vscale x 8 x i1> @icmp_sle_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
718; CHECK-LABEL: icmp_sle_vi_nxv8i8_0:
719; CHECK:       # %bb.0:
720; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
721; CHECK-NEXT:    vmsle.vi v0, v8, 5
722; CHECK-NEXT:    ret
723  %vc = icmp sle <vscale x 8 x i8> %va, splat (i8 5)
724  ret <vscale x 8 x i1> %vc
725}
726
727define <vscale x 8 x i1> @icmp_eq_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
728; CHECK-LABEL: icmp_eq_vv_nxv8i16:
729; CHECK:       # %bb.0:
730; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
731; CHECK-NEXT:    vmseq.vv v0, v8, v10
732; CHECK-NEXT:    ret
733  %vc = icmp eq <vscale x 8 x i16> %va, %vb
734  ret <vscale x 8 x i1> %vc
735}
736
737define <vscale x 8 x i1> @icmp_eq_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
738; CHECK-LABEL: icmp_eq_vx_nxv8i16:
739; CHECK:       # %bb.0:
740; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
741; CHECK-NEXT:    vmseq.vx v0, v8, a0
742; CHECK-NEXT:    ret
743  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
744  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
745  %vc = icmp eq <vscale x 8 x i16> %va, %splat
746  ret <vscale x 8 x i1> %vc
747}
748
749define <vscale x 8 x i1> @icmp_eq_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
750; CHECK-LABEL: icmp_eq_xv_nxv8i16:
751; CHECK:       # %bb.0:
752; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
753; CHECK-NEXT:    vmseq.vx v0, v8, a0
754; CHECK-NEXT:    ret
755  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
756  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
757  %vc = icmp eq <vscale x 8 x i16> %splat, %va
758  ret <vscale x 8 x i1> %vc
759}
760
761define <vscale x 8 x i1> @icmp_eq_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
762; CHECK-LABEL: icmp_eq_vi_nxv8i16_0:
763; CHECK:       # %bb.0:
764; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
765; CHECK-NEXT:    vmseq.vi v0, v8, 0
766; CHECK-NEXT:    ret
767  %vc = icmp eq <vscale x 8 x i16> %va, splat (i16 0)
768  ret <vscale x 8 x i1> %vc
769}
770
771define <vscale x 8 x i1> @icmp_eq_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
772; CHECK-LABEL: icmp_eq_vi_nxv8i16_1:
773; CHECK:       # %bb.0:
774; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
775; CHECK-NEXT:    vmseq.vi v0, v8, 5
776; CHECK-NEXT:    ret
777  %vc = icmp eq <vscale x 8 x i16> %va, splat (i16 5)
778  ret <vscale x 8 x i1> %vc
779}
780
781define <vscale x 8 x i1> @icmp_eq_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
782; CHECK-LABEL: icmp_eq_iv_nxv8i16_1:
783; CHECK:       # %bb.0:
784; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
785; CHECK-NEXT:    vmseq.vi v0, v8, 5
786; CHECK-NEXT:    ret
787  %vc = icmp eq <vscale x 8 x i16> splat (i16 5), %va
788  ret <vscale x 8 x i1> %vc
789}
790
791define <vscale x 8 x i1> @icmp_ne_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
792; CHECK-LABEL: icmp_ne_vv_nxv8i16:
793; CHECK:       # %bb.0:
794; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
795; CHECK-NEXT:    vmsne.vv v0, v8, v10
796; CHECK-NEXT:    ret
797  %vc = icmp ne <vscale x 8 x i16> %va, %vb
798  ret <vscale x 8 x i1> %vc
799}
800
801define <vscale x 8 x i1> @icmp_ne_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
802; CHECK-LABEL: icmp_ne_vx_nxv8i16:
803; CHECK:       # %bb.0:
804; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
805; CHECK-NEXT:    vmsne.vx v0, v8, a0
806; CHECK-NEXT:    ret
807  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
808  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
809  %vc = icmp ne <vscale x 8 x i16> %va, %splat
810  ret <vscale x 8 x i1> %vc
811}
812
813define <vscale x 8 x i1> @icmp_ne_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
814; CHECK-LABEL: icmp_ne_xv_nxv8i16:
815; CHECK:       # %bb.0:
816; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
817; CHECK-NEXT:    vmsne.vx v0, v8, a0
818; CHECK-NEXT:    ret
819  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
820  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
821  %vc = icmp ne <vscale x 8 x i16> %splat, %va
822  ret <vscale x 8 x i1> %vc
823}
824
825define <vscale x 8 x i1> @icmp_ne_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
826; CHECK-LABEL: icmp_ne_vi_nxv8i16_0:
827; CHECK:       # %bb.0:
828; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
829; CHECK-NEXT:    vmsne.vi v0, v8, 5
830; CHECK-NEXT:    ret
831  %vc = icmp ne <vscale x 8 x i16> %va, splat (i16 5)
832  ret <vscale x 8 x i1> %vc
833}
834
835define <vscale x 8 x i1> @icmp_ugt_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
836; CHECK-LABEL: icmp_ugt_vv_nxv8i16:
837; CHECK:       # %bb.0:
838; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
839; CHECK-NEXT:    vmsltu.vv v0, v10, v8
840; CHECK-NEXT:    ret
841  %vc = icmp ugt <vscale x 8 x i16> %va, %vb
842  ret <vscale x 8 x i1> %vc
843}
844
845define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
846; CHECK-LABEL: icmp_ugt_vx_nxv8i16:
847; CHECK:       # %bb.0:
848; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
849; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
850; CHECK-NEXT:    ret
851  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
852  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
853  %vc = icmp ugt <vscale x 8 x i16> %va, %splat
854  ret <vscale x 8 x i1> %vc
855}
856
857define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
858; CHECK-LABEL: icmp_ugt_xv_nxv8i16:
859; CHECK:       # %bb.0:
860; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
861; CHECK-NEXT:    vmsltu.vx v0, v8, a0
862; CHECK-NEXT:    ret
863  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
864  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
865  %vc = icmp ugt <vscale x 8 x i16> %splat, %va
866  ret <vscale x 8 x i1> %vc
867}
868
869define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
870; CHECK-LABEL: icmp_ugt_vi_nxv8i16_0:
871; CHECK:       # %bb.0:
872; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
873; CHECK-NEXT:    vmsgtu.vi v0, v8, 5
874; CHECK-NEXT:    ret
875  %vc = icmp ugt <vscale x 8 x i16> %va, splat (i16 5)
876  ret <vscale x 8 x i1> %vc
877}
878
879define <vscale x 8 x i1> @icmp_uge_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
880; CHECK-LABEL: icmp_uge_vv_nxv8i16:
881; CHECK:       # %bb.0:
882; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
883; CHECK-NEXT:    vmsleu.vv v0, v10, v8
884; CHECK-NEXT:    ret
885  %vc = icmp uge <vscale x 8 x i16> %va, %vb
886  ret <vscale x 8 x i1> %vc
887}
888
889define <vscale x 8 x i1> @icmp_uge_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
890; CHECK-LABEL: icmp_uge_vx_nxv8i16:
891; CHECK:       # %bb.0:
892; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
893; CHECK-NEXT:    vmv.v.x v10, a0
894; CHECK-NEXT:    vmsleu.vv v0, v10, v8
895; CHECK-NEXT:    ret
896  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
897  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
898  %vc = icmp uge <vscale x 8 x i16> %va, %splat
899  ret <vscale x 8 x i1> %vc
900}
901
902define <vscale x 8 x i1> @icmp_uge_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
903; CHECK-LABEL: icmp_uge_xv_nxv8i16:
904; CHECK:       # %bb.0:
905; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
906; CHECK-NEXT:    vmsleu.vx v0, v8, a0
907; CHECK-NEXT:    ret
908  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
909  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
910  %vc = icmp uge <vscale x 8 x i16> %splat, %va
911  ret <vscale x 8 x i1> %vc
912}
913
914define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
915; CHECK-LABEL: icmp_uge_vi_nxv8i16_0:
916; CHECK:       # %bb.0:
917; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
918; CHECK-NEXT:    vmv.v.i v10, -16
919; CHECK-NEXT:    vmsleu.vv v0, v10, v8
920; CHECK-NEXT:    ret
921  %vc = icmp uge <vscale x 8 x i16> %va, splat (i16 -16)
922  ret <vscale x 8 x i1> %vc
923}
924
925define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
926; CHECK-LABEL: icmp_uge_vi_nxv8i16_1:
927; CHECK:       # %bb.0:
928; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
929; CHECK-NEXT:    vmsgtu.vi v0, v8, 14
930; CHECK-NEXT:    ret
931  %vc = icmp uge <vscale x 8 x i16> %va, splat (i16 15)
932  ret <vscale x 8 x i1> %vc
933}
934
935define <vscale x 8 x i1> @icmp_uge_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
936; CHECK-LABEL: icmp_uge_iv_nxv8i16_1:
937; CHECK:       # %bb.0:
938; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
939; CHECK-NEXT:    vmsleu.vi v0, v8, 15
940; CHECK-NEXT:    ret
941  %vc = icmp uge <vscale x 8 x i16> splat (i16 15), %va
942  ret <vscale x 8 x i1> %vc
943}
944
945define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
946; CHECK-LABEL: icmp_uge_vi_nxv8i16_2:
947; CHECK:       # %bb.0:
948; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
949; CHECK-NEXT:    vmset.m v0
950; CHECK-NEXT:    ret
951  %vc = icmp uge <vscale x 8 x i16> %va, splat (i16 0)
952  ret <vscale x 8 x i1> %vc
953}
954
955define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
956; CHECK-LABEL: icmp_uge_vi_nxv8i16_3:
957; CHECK:       # %bb.0:
958; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
959; CHECK-NEXT:    vmsgtu.vi v0, v8, 0
960; CHECK-NEXT:    ret
961  %vc = icmp uge <vscale x 8 x i16> %va, splat (i16 1)
962  ret <vscale x 8 x i1> %vc
963}
964
965define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_4(<vscale x 8 x i16> %va) {
966; CHECK-LABEL: icmp_uge_vi_nxv8i16_4:
967; CHECK:       # %bb.0:
968; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
969; CHECK-NEXT:    vmsgtu.vi v0, v8, -16
970; CHECK-NEXT:    ret
971  %vc = icmp uge <vscale x 8 x i16> %va, splat (i16 -15)
972  ret <vscale x 8 x i1> %vc
973}
974
975define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_5(<vscale x 8 x i16> %va) {
976; CHECK-LABEL: icmp_uge_vi_nxv8i16_5:
977; CHECK:       # %bb.0:
978; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
979; CHECK-NEXT:    vmsgtu.vi v0, v8, 15
980; CHECK-NEXT:    ret
981  %vc = icmp uge <vscale x 8 x i16> %va, splat (i16 16)
982  ret <vscale x 8 x i1> %vc
983}
984
985define <vscale x 8 x i1> @icmp_ult_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
986; CHECK-LABEL: icmp_ult_vv_nxv8i16:
987; CHECK:       # %bb.0:
988; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
989; CHECK-NEXT:    vmsltu.vv v0, v8, v10
990; CHECK-NEXT:    ret
991  %vc = icmp ult <vscale x 8 x i16> %va, %vb
992  ret <vscale x 8 x i1> %vc
993}
994
995define <vscale x 8 x i1> @icmp_ult_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
996; CHECK-LABEL: icmp_ult_vx_nxv8i16:
997; CHECK:       # %bb.0:
998; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
999; CHECK-NEXT:    vmsltu.vx v0, v8, a0
1000; CHECK-NEXT:    ret
1001  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1002  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1003  %vc = icmp ult <vscale x 8 x i16> %va, %splat
1004  ret <vscale x 8 x i1> %vc
1005}
1006
1007define <vscale x 8 x i1> @icmp_ult_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
1008; CHECK-LABEL: icmp_ult_xv_nxv8i16:
1009; CHECK:       # %bb.0:
1010; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
1011; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
1012; CHECK-NEXT:    ret
1013  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1014  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1015  %vc = icmp ult <vscale x 8 x i16> %splat, %va
1016  ret <vscale x 8 x i1> %vc
1017}
1018
1019define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
1020; CHECK-LABEL: icmp_ult_vi_nxv8i16_0:
1021; CHECK:       # %bb.0:
1022; CHECK-NEXT:    li a0, -16
1023; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
1024; CHECK-NEXT:    vmsltu.vx v0, v8, a0
1025; CHECK-NEXT:    ret
1026  %vc = icmp ult <vscale x 8 x i16> %va, splat (i16 -16)
1027  ret <vscale x 8 x i1> %vc
1028}
1029
1030define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
1031; CHECK-LABEL: icmp_ult_vi_nxv8i16_1:
1032; CHECK:       # %bb.0:
1033; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1034; CHECK-NEXT:    vmsleu.vi v0, v8, -16
1035; CHECK-NEXT:    ret
1036  %vc = icmp ult <vscale x 8 x i16> %va, splat (i16 -15)
1037  ret <vscale x 8 x i1> %vc
1038}
1039
1040define <vscale x 8 x i1> @icmp_ult_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
1041; CHECK-LABEL: icmp_ult_iv_nxv8i16_1:
1042; CHECK:       # %bb.0:
1043; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1044; CHECK-NEXT:    vmsgtu.vi v0, v8, -15
1045; CHECK-NEXT:    ret
1046  %vc = icmp ult <vscale x 8 x i16> splat (i16 -15), %va
1047  ret <vscale x 8 x i1> %vc
1048}
1049
1050define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
1051; CHECK-LABEL: icmp_ult_vi_nxv8i16_2:
1052; CHECK:       # %bb.0:
1053; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
1054; CHECK-NEXT:    vmclr.m v0
1055; CHECK-NEXT:    ret
1056  %vc = icmp ult <vscale x 8 x i16> %va, splat (i16 0)
1057  ret <vscale x 8 x i1> %vc
1058}
1059
1060define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
1061; CHECK-LABEL: icmp_ult_vi_nxv8i16_3:
1062; CHECK:       # %bb.0:
1063; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1064; CHECK-NEXT:    vmseq.vi v0, v8, 0
1065; CHECK-NEXT:    ret
1066  %vc = icmp ult <vscale x 8 x i16> %va, splat (i16 1)
1067  ret <vscale x 8 x i1> %vc
1068}
1069
1070define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_4(<vscale x 8 x i16> %va) {
1071; CHECK-LABEL: icmp_ult_vi_nxv8i16_4:
1072; CHECK:       # %bb.0:
1073; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1074; CHECK-NEXT:    vmsleu.vi v0, v8, 15
1075; CHECK-NEXT:    ret
1076  %vc = icmp ult <vscale x 8 x i16> %va, splat (i16 16)
1077  ret <vscale x 8 x i1> %vc
1078}
1079
1080define <vscale x 8 x i1> @icmp_ule_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
1081; CHECK-LABEL: icmp_ule_vv_nxv8i16:
1082; CHECK:       # %bb.0:
1083; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1084; CHECK-NEXT:    vmsleu.vv v0, v8, v10
1085; CHECK-NEXT:    ret
1086  %vc = icmp ule <vscale x 8 x i16> %va, %vb
1087  ret <vscale x 8 x i1> %vc
1088}
1089
1090define <vscale x 8 x i1> @icmp_ule_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
1091; CHECK-LABEL: icmp_ule_vx_nxv8i16:
1092; CHECK:       # %bb.0:
1093; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
1094; CHECK-NEXT:    vmsleu.vx v0, v8, a0
1095; CHECK-NEXT:    ret
1096  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1097  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1098  %vc = icmp ule <vscale x 8 x i16> %va, %splat
1099  ret <vscale x 8 x i1> %vc
1100}
1101
1102define <vscale x 8 x i1> @icmp_ule_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
1103; CHECK-LABEL: icmp_ule_xv_nxv8i16:
1104; CHECK:       # %bb.0:
1105; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
1106; CHECK-NEXT:    vmv.v.x v10, a0
1107; CHECK-NEXT:    vmsleu.vv v0, v10, v8
1108; CHECK-NEXT:    ret
1109  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1110  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1111  %vc = icmp ule <vscale x 8 x i16> %splat, %va
1112  ret <vscale x 8 x i1> %vc
1113}
1114
1115define <vscale x 8 x i1> @icmp_ule_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
1116; CHECK-LABEL: icmp_ule_vi_nxv8i16_0:
1117; CHECK:       # %bb.0:
1118; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1119; CHECK-NEXT:    vmsleu.vi v0, v8, 5
1120; CHECK-NEXT:    ret
1121  %vc = icmp ule <vscale x 8 x i16> %va, splat (i16 5)
1122  ret <vscale x 8 x i1> %vc
1123}
1124
1125define <vscale x 8 x i1> @icmp_sgt_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
1126; CHECK-LABEL: icmp_sgt_vv_nxv8i16:
1127; CHECK:       # %bb.0:
1128; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1129; CHECK-NEXT:    vmslt.vv v0, v10, v8
1130; CHECK-NEXT:    ret
1131  %vc = icmp sgt <vscale x 8 x i16> %va, %vb
1132  ret <vscale x 8 x i1> %vc
1133}
1134
1135define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
1136; CHECK-LABEL: icmp_sgt_vx_nxv8i16:
1137; CHECK:       # %bb.0:
1138; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
1139; CHECK-NEXT:    vmsgt.vx v0, v8, a0
1140; CHECK-NEXT:    ret
1141  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1142  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1143  %vc = icmp sgt <vscale x 8 x i16> %va, %splat
1144  ret <vscale x 8 x i1> %vc
1145}
1146
1147define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
1148; CHECK-LABEL: icmp_sgt_xv_nxv8i16:
1149; CHECK:       # %bb.0:
1150; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
1151; CHECK-NEXT:    vmslt.vx v0, v8, a0
1152; CHECK-NEXT:    ret
1153  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1154  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1155  %vc = icmp sgt <vscale x 8 x i16> %splat, %va
1156  ret <vscale x 8 x i1> %vc
1157}
1158
1159define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
1160; CHECK-LABEL: icmp_sgt_vi_nxv8i16_0:
1161; CHECK:       # %bb.0:
1162; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1163; CHECK-NEXT:    vmsgt.vi v0, v8, 5
1164; CHECK-NEXT:    ret
1165  %vc = icmp sgt <vscale x 8 x i16> %va, splat (i16 5)
1166  ret <vscale x 8 x i1> %vc
1167}
1168
1169define <vscale x 8 x i1> @icmp_sge_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
1170; CHECK-LABEL: icmp_sge_vv_nxv8i16:
1171; CHECK:       # %bb.0:
1172; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1173; CHECK-NEXT:    vmsle.vv v0, v10, v8
1174; CHECK-NEXT:    ret
1175  %vc = icmp sge <vscale x 8 x i16> %va, %vb
1176  ret <vscale x 8 x i1> %vc
1177}
1178
1179define <vscale x 8 x i1> @icmp_sge_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
1180; CHECK-LABEL: icmp_sge_vx_nxv8i16:
1181; CHECK:       # %bb.0:
1182; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
1183; CHECK-NEXT:    vmv.v.x v10, a0
1184; CHECK-NEXT:    vmsle.vv v0, v10, v8
1185; CHECK-NEXT:    ret
1186  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1187  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1188  %vc = icmp sge <vscale x 8 x i16> %va, %splat
1189  ret <vscale x 8 x i1> %vc
1190}
1191
1192define <vscale x 8 x i1> @icmp_sge_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
1193; CHECK-LABEL: icmp_sge_xv_nxv8i16:
1194; CHECK:       # %bb.0:
1195; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
1196; CHECK-NEXT:    vmsle.vx v0, v8, a0
1197; CHECK-NEXT:    ret
1198  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1199  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1200  %vc = icmp sge <vscale x 8 x i16> %splat, %va
1201  ret <vscale x 8 x i1> %vc
1202}
1203
1204define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
1205; CHECK-LABEL: icmp_sge_vi_nxv8i16_0:
1206; CHECK:       # %bb.0:
1207; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1208; CHECK-NEXT:    vmv.v.i v10, -16
1209; CHECK-NEXT:    vmsle.vv v0, v10, v8
1210; CHECK-NEXT:    ret
1211  %vc = icmp sge <vscale x 8 x i16> %va, splat (i16 -16)
1212  ret <vscale x 8 x i1> %vc
1213}
1214
1215define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
1216; CHECK-LABEL: icmp_sge_vi_nxv8i16_1:
1217; CHECK:       # %bb.0:
1218; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1219; CHECK-NEXT:    vmsgt.vi v0, v8, -16
1220; CHECK-NEXT:    ret
1221  %vc = icmp sge <vscale x 8 x i16> %va, splat (i16 -15)
1222  ret <vscale x 8 x i1> %vc
1223}
1224
1225define <vscale x 8 x i1> @icmp_sge_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
1226; CHECK-LABEL: icmp_sge_iv_nxv8i16_1:
1227; CHECK:       # %bb.0:
1228; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1229; CHECK-NEXT:    vmsle.vi v0, v8, -15
1230; CHECK-NEXT:    ret
1231  %vc = icmp sge <vscale x 8 x i16> splat (i16 -15), %va
1232  ret <vscale x 8 x i1> %vc
1233}
1234
1235define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
1236; CHECK-LABEL: icmp_sge_vi_nxv8i16_2:
1237; CHECK:       # %bb.0:
1238; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1239; CHECK-NEXT:    vmsgt.vi v0, v8, -1
1240; CHECK-NEXT:    ret
1241  %vc = icmp sge <vscale x 8 x i16> %va, splat (i16 0)
1242  ret <vscale x 8 x i1> %vc
1243}
1244
1245define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
1246; CHECK-LABEL: icmp_sge_vi_nxv8i16_3:
1247; CHECK:       # %bb.0:
1248; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1249; CHECK-NEXT:    vmsgt.vi v0, v8, 15
1250; CHECK-NEXT:    ret
1251  %vc = icmp sge <vscale x 8 x i16> %va, splat (i16 16)
1252  ret <vscale x 8 x i1> %vc
1253}
1254
1255define <vscale x 8 x i1> @icmp_slt_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
1256; CHECK-LABEL: icmp_slt_vv_nxv8i16:
1257; CHECK:       # %bb.0:
1258; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1259; CHECK-NEXT:    vmslt.vv v0, v8, v10
1260; CHECK-NEXT:    ret
1261  %vc = icmp slt <vscale x 8 x i16> %va, %vb
1262  ret <vscale x 8 x i1> %vc
1263}
1264
1265define <vscale x 8 x i1> @icmp_slt_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
1266; CHECK-LABEL: icmp_slt_vx_nxv8i16:
1267; CHECK:       # %bb.0:
1268; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
1269; CHECK-NEXT:    vmslt.vx v0, v8, a0
1270; CHECK-NEXT:    ret
1271  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1272  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1273  %vc = icmp slt <vscale x 8 x i16> %va, %splat
1274  ret <vscale x 8 x i1> %vc
1275}
1276
1277define <vscale x 8 x i1> @icmp_slt_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
1278; CHECK-LABEL: icmp_slt_xv_nxv8i16:
1279; CHECK:       # %bb.0:
1280; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
1281; CHECK-NEXT:    vmsgt.vx v0, v8, a0
1282; CHECK-NEXT:    ret
1283  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1284  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1285  %vc = icmp slt <vscale x 8 x i16> %splat, %va
1286  ret <vscale x 8 x i1> %vc
1287}
1288
1289define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
1290; CHECK-LABEL: icmp_slt_vi_nxv8i16_0:
1291; CHECK:       # %bb.0:
1292; CHECK-NEXT:    li a0, -16
1293; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
1294; CHECK-NEXT:    vmslt.vx v0, v8, a0
1295; CHECK-NEXT:    ret
1296  %vc = icmp slt <vscale x 8 x i16> %va, splat (i16 -16)
1297  ret <vscale x 8 x i1> %vc
1298}
1299
1300define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
1301; CHECK-LABEL: icmp_slt_vi_nxv8i16_1:
1302; CHECK:       # %bb.0:
1303; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1304; CHECK-NEXT:    vmsle.vi v0, v8, -16
1305; CHECK-NEXT:    ret
1306  %vc = icmp slt <vscale x 8 x i16> %va, splat (i16 -15)
1307  ret <vscale x 8 x i1> %vc
1308}
1309
1310define <vscale x 8 x i1> @icmp_slt_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
1311; CHECK-LABEL: icmp_slt_iv_nxv8i16_1:
1312; CHECK:       # %bb.0:
1313; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1314; CHECK-NEXT:    vmsgt.vi v0, v8, -15
1315; CHECK-NEXT:    ret
1316  %vc = icmp slt <vscale x 8 x i16> splat (i16 -15), %va
1317  ret <vscale x 8 x i1> %vc
1318}
1319
1320define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
1321; CHECK-LABEL: icmp_slt_vi_nxv8i16_2:
1322; CHECK:       # %bb.0:
1323; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1324; CHECK-NEXT:    vmsle.vi v0, v8, -1
1325; CHECK-NEXT:    ret
1326  %vc = icmp slt <vscale x 8 x i16> %va, splat (i16 0)
1327  ret <vscale x 8 x i1> %vc
1328}
1329
1330define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
1331; CHECK-LABEL: icmp_slt_vi_nxv8i16_3:
1332; CHECK:       # %bb.0:
1333; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1334; CHECK-NEXT:    vmsle.vi v0, v8, 15
1335; CHECK-NEXT:    ret
1336  %vc = icmp slt <vscale x 8 x i16> %va, splat (i16 16)
1337  ret <vscale x 8 x i1> %vc
1338}
1339
1340define <vscale x 8 x i1> @icmp_sle_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
1341; CHECK-LABEL: icmp_sle_vv_nxv8i16:
1342; CHECK:       # %bb.0:
1343; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1344; CHECK-NEXT:    vmsle.vv v0, v8, v10
1345; CHECK-NEXT:    ret
1346  %vc = icmp sle <vscale x 8 x i16> %va, %vb
1347  ret <vscale x 8 x i1> %vc
1348}
1349
1350define <vscale x 8 x i1> @icmp_sle_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
1351; CHECK-LABEL: icmp_sle_vx_nxv8i16:
1352; CHECK:       # %bb.0:
1353; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
1354; CHECK-NEXT:    vmsle.vx v0, v8, a0
1355; CHECK-NEXT:    ret
1356  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1357  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1358  %vc = icmp sle <vscale x 8 x i16> %va, %splat
1359  ret <vscale x 8 x i1> %vc
1360}
1361
1362define <vscale x 8 x i1> @icmp_sle_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
1363; CHECK-LABEL: icmp_sle_xv_nxv8i16:
1364; CHECK:       # %bb.0:
1365; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
1366; CHECK-NEXT:    vmv.v.x v10, a0
1367; CHECK-NEXT:    vmsle.vv v0, v10, v8
1368; CHECK-NEXT:    ret
1369  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
1370  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1371  %vc = icmp sle <vscale x 8 x i16> %splat, %va
1372  ret <vscale x 8 x i1> %vc
1373}
1374
1375define <vscale x 8 x i1> @icmp_sle_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
1376; CHECK-LABEL: icmp_sle_vi_nxv8i16_0:
1377; CHECK:       # %bb.0:
1378; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
1379; CHECK-NEXT:    vmsle.vi v0, v8, 5
1380; CHECK-NEXT:    ret
1381  %vc = icmp sle <vscale x 8 x i16> %va, splat (i16 5)
1382  ret <vscale x 8 x i1> %vc
1383}
1384
1385define <vscale x 8 x i1> @icmp_eq_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
1386; CHECK-LABEL: icmp_eq_vv_nxv8i32:
1387; CHECK:       # %bb.0:
1388; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1389; CHECK-NEXT:    vmseq.vv v0, v8, v12
1390; CHECK-NEXT:    ret
1391  %vc = icmp eq <vscale x 8 x i32> %va, %vb
1392  ret <vscale x 8 x i1> %vc
1393}
1394
1395define <vscale x 8 x i1> @icmp_eq_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1396; CHECK-LABEL: icmp_eq_vx_nxv8i32:
1397; CHECK:       # %bb.0:
1398; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
1399; CHECK-NEXT:    vmseq.vx v0, v8, a0
1400; CHECK-NEXT:    ret
1401  %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1402  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1403  %vc = icmp eq <vscale x 8 x i32> %va, %splat
1404  ret <vscale x 8 x i1> %vc
1405}
1406
1407define <vscale x 8 x i1> @icmp_eq_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1408; CHECK-LABEL: icmp_eq_xv_nxv8i32:
1409; CHECK:       # %bb.0:
1410; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
1411; CHECK-NEXT:    vmseq.vx v0, v8, a0
1412; CHECK-NEXT:    ret
1413  %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1414  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1415  %vc = icmp eq <vscale x 8 x i32> %splat, %va
1416  ret <vscale x 8 x i1> %vc
1417}
1418
1419define <vscale x 8 x i1> @icmp_eq_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
1420; CHECK-LABEL: icmp_eq_vi_nxv8i32_0:
1421; CHECK:       # %bb.0:
1422; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1423; CHECK-NEXT:    vmseq.vi v0, v8, 0
1424; CHECK-NEXT:    ret
1425  %vc = icmp eq <vscale x 8 x i32> %va, splat (i32 0)
1426  ret <vscale x 8 x i1> %vc
1427}
1428
1429define <vscale x 8 x i1> @icmp_eq_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
1430; CHECK-LABEL: icmp_eq_vi_nxv8i32_1:
1431; CHECK:       # %bb.0:
1432; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1433; CHECK-NEXT:    vmseq.vi v0, v8, 5
1434; CHECK-NEXT:    ret
1435  %vc = icmp eq <vscale x 8 x i32> %va, splat (i32 5)
1436  ret <vscale x 8 x i1> %vc
1437}
1438
1439define <vscale x 8 x i1> @icmp_eq_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
1440; CHECK-LABEL: icmp_eq_iv_nxv8i32_1:
1441; CHECK:       # %bb.0:
1442; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1443; CHECK-NEXT:    vmseq.vi v0, v8, 5
1444; CHECK-NEXT:    ret
1445  %vc = icmp eq <vscale x 8 x i32> splat (i32 5), %va
1446  ret <vscale x 8 x i1> %vc
1447}
1448
1449define <vscale x 8 x i1> @icmp_ne_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
1450; CHECK-LABEL: icmp_ne_vv_nxv8i32:
1451; CHECK:       # %bb.0:
1452; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1453; CHECK-NEXT:    vmsne.vv v0, v8, v12
1454; CHECK-NEXT:    ret
1455  %vc = icmp ne <vscale x 8 x i32> %va, %vb
1456  ret <vscale x 8 x i1> %vc
1457}
1458
1459define <vscale x 8 x i1> @icmp_ne_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1460; CHECK-LABEL: icmp_ne_vx_nxv8i32:
1461; CHECK:       # %bb.0:
1462; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
1463; CHECK-NEXT:    vmsne.vx v0, v8, a0
1464; CHECK-NEXT:    ret
1465  %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1466  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1467  %vc = icmp ne <vscale x 8 x i32> %va, %splat
1468  ret <vscale x 8 x i1> %vc
1469}
1470
1471define <vscale x 8 x i1> @icmp_ne_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1472; CHECK-LABEL: icmp_ne_xv_nxv8i32:
1473; CHECK:       # %bb.0:
1474; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
1475; CHECK-NEXT:    vmsne.vx v0, v8, a0
1476; CHECK-NEXT:    ret
1477  %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1478  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1479  %vc = icmp ne <vscale x 8 x i32> %splat, %va
1480  ret <vscale x 8 x i1> %vc
1481}
1482
1483define <vscale x 8 x i1> @icmp_ne_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
1484; CHECK-LABEL: icmp_ne_vi_nxv8i32_0:
1485; CHECK:       # %bb.0:
1486; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1487; CHECK-NEXT:    vmsne.vi v0, v8, 5
1488; CHECK-NEXT:    ret
1489  %vc = icmp ne <vscale x 8 x i32> %va, splat (i32 5)
1490  ret <vscale x 8 x i1> %vc
1491}
1492
1493define <vscale x 8 x i1> @icmp_ugt_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
1494; CHECK-LABEL: icmp_ugt_vv_nxv8i32:
1495; CHECK:       # %bb.0:
1496; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1497; CHECK-NEXT:    vmsltu.vv v0, v12, v8
1498; CHECK-NEXT:    ret
1499  %vc = icmp ugt <vscale x 8 x i32> %va, %vb
1500  ret <vscale x 8 x i1> %vc
1501}
1502
1503define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1504; CHECK-LABEL: icmp_ugt_vx_nxv8i32:
1505; CHECK:       # %bb.0:
1506; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
1507; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
1508; CHECK-NEXT:    ret
1509  %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1510  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1511  %vc = icmp ugt <vscale x 8 x i32> %va, %splat
1512  ret <vscale x 8 x i1> %vc
1513}
1514
1515define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1516; CHECK-LABEL: icmp_ugt_xv_nxv8i32:
1517; CHECK:       # %bb.0:
1518; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
1519; CHECK-NEXT:    vmsltu.vx v0, v8, a0
1520; CHECK-NEXT:    ret
1521  %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1522  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1523  %vc = icmp ugt <vscale x 8 x i32> %splat, %va
1524  ret <vscale x 8 x i1> %vc
1525}
1526
1527define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
1528; CHECK-LABEL: icmp_ugt_vi_nxv8i32_0:
1529; CHECK:       # %bb.0:
1530; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1531; CHECK-NEXT:    vmsgtu.vi v0, v8, 5
1532; CHECK-NEXT:    ret
1533  %vc = icmp ugt <vscale x 8 x i32> %va, splat (i32 5)
1534  ret <vscale x 8 x i1> %vc
1535}
1536
1537define <vscale x 8 x i1> @icmp_uge_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
1538; CHECK-LABEL: icmp_uge_vv_nxv8i32:
1539; CHECK:       # %bb.0:
1540; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1541; CHECK-NEXT:    vmsleu.vv v0, v12, v8
1542; CHECK-NEXT:    ret
1543  %vc = icmp uge <vscale x 8 x i32> %va, %vb
1544  ret <vscale x 8 x i1> %vc
1545}
1546
1547define <vscale x 8 x i1> @icmp_uge_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1548; CHECK-LABEL: icmp_uge_vx_nxv8i32:
1549; CHECK:       # %bb.0:
1550; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
1551; CHECK-NEXT:    vmv.v.x v12, a0
1552; CHECK-NEXT:    vmsleu.vv v0, v12, v8
1553; CHECK-NEXT:    ret
1554  %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1555  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1556  %vc = icmp uge <vscale x 8 x i32> %va, %splat
1557  ret <vscale x 8 x i1> %vc
1558}
1559
1560define <vscale x 8 x i1> @icmp_uge_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1561; CHECK-LABEL: icmp_uge_xv_nxv8i32:
1562; CHECK:       # %bb.0:
1563; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
1564; CHECK-NEXT:    vmsleu.vx v0, v8, a0
1565; CHECK-NEXT:    ret
1566  %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1567  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1568  %vc = icmp uge <vscale x 8 x i32> %splat, %va
1569  ret <vscale x 8 x i1> %vc
1570}
1571
1572define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
1573; CHECK-LABEL: icmp_uge_vi_nxv8i32_0:
1574; CHECK:       # %bb.0:
1575; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1576; CHECK-NEXT:    vmv.v.i v12, -16
1577; CHECK-NEXT:    vmsleu.vv v0, v12, v8
1578; CHECK-NEXT:    ret
1579  %vc = icmp uge <vscale x 8 x i32> %va, splat (i32 -16)
1580  ret <vscale x 8 x i1> %vc
1581}
1582
1583define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
1584; CHECK-LABEL: icmp_uge_vi_nxv8i32_1:
1585; CHECK:       # %bb.0:
1586; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1587; CHECK-NEXT:    vmsgtu.vi v0, v8, 14
1588; CHECK-NEXT:    ret
1589  %vc = icmp uge <vscale x 8 x i32> %va, splat (i32 15)
1590  ret <vscale x 8 x i1> %vc
1591}
1592
1593define <vscale x 8 x i1> @icmp_uge_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
1594; CHECK-LABEL: icmp_uge_iv_nxv8i32_1:
1595; CHECK:       # %bb.0:
1596; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1597; CHECK-NEXT:    vmsleu.vi v0, v8, 15
1598; CHECK-NEXT:    ret
1599  %vc = icmp uge <vscale x 8 x i32> splat (i32 15), %va
1600  ret <vscale x 8 x i1> %vc
1601}
1602
1603define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
1604; CHECK-LABEL: icmp_uge_vi_nxv8i32_2:
1605; CHECK:       # %bb.0:
1606; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
1607; CHECK-NEXT:    vmset.m v0
1608; CHECK-NEXT:    ret
1609  %vc = icmp uge <vscale x 8 x i32> %va, splat (i32 0)
1610  ret <vscale x 8 x i1> %vc
1611}
1612
1613define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
1614; CHECK-LABEL: icmp_uge_vi_nxv8i32_3:
1615; CHECK:       # %bb.0:
1616; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1617; CHECK-NEXT:    vmsgtu.vi v0, v8, 0
1618; CHECK-NEXT:    ret
1619  %vc = icmp uge <vscale x 8 x i32> %va, splat (i32 1)
1620  ret <vscale x 8 x i1> %vc
1621}
1622
1623define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_4(<vscale x 8 x i32> %va) {
1624; CHECK-LABEL: icmp_uge_vi_nxv8i32_4:
1625; CHECK:       # %bb.0:
1626; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1627; CHECK-NEXT:    vmsgtu.vi v0, v8, -16
1628; CHECK-NEXT:    ret
1629  %vc = icmp uge <vscale x 8 x i32> %va, splat (i32 -15)
1630  ret <vscale x 8 x i1> %vc
1631}
1632
1633define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_5(<vscale x 8 x i32> %va) {
1634; CHECK-LABEL: icmp_uge_vi_nxv8i32_5:
1635; CHECK:       # %bb.0:
1636; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1637; CHECK-NEXT:    vmsgtu.vi v0, v8, 15
1638; CHECK-NEXT:    ret
1639  %vc = icmp uge <vscale x 8 x i32> %va, splat (i32 16)
1640  ret <vscale x 8 x i1> %vc
1641}
1642
1643define <vscale x 8 x i1> @icmp_ult_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
1644; CHECK-LABEL: icmp_ult_vv_nxv8i32:
1645; CHECK:       # %bb.0:
1646; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1647; CHECK-NEXT:    vmsltu.vv v0, v8, v12
1648; CHECK-NEXT:    ret
1649  %vc = icmp ult <vscale x 8 x i32> %va, %vb
1650  ret <vscale x 8 x i1> %vc
1651}
1652
1653define <vscale x 8 x i1> @icmp_ult_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1654; CHECK-LABEL: icmp_ult_vx_nxv8i32:
1655; CHECK:       # %bb.0:
1656; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
1657; CHECK-NEXT:    vmsltu.vx v0, v8, a0
1658; CHECK-NEXT:    ret
1659  %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1660  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1661  %vc = icmp ult <vscale x 8 x i32> %va, %splat
1662  ret <vscale x 8 x i1> %vc
1663}
1664
1665define <vscale x 8 x i1> @icmp_ult_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1666; CHECK-LABEL: icmp_ult_xv_nxv8i32:
1667; CHECK:       # %bb.0:
1668; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
1669; CHECK-NEXT:    vmsgtu.vx v0, v8, a0
1670; CHECK-NEXT:    ret
1671  %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1672  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1673  %vc = icmp ult <vscale x 8 x i32> %splat, %va
1674  ret <vscale x 8 x i1> %vc
1675}
1676
1677define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
1678; CHECK-LABEL: icmp_ult_vi_nxv8i32_0:
1679; CHECK:       # %bb.0:
1680; CHECK-NEXT:    li a0, -16
1681; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
1682; CHECK-NEXT:    vmsltu.vx v0, v8, a0
1683; CHECK-NEXT:    ret
1684  %vc = icmp ult <vscale x 8 x i32> %va, splat (i32 -16)
1685  ret <vscale x 8 x i1> %vc
1686}
1687
1688define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
1689; CHECK-LABEL: icmp_ult_vi_nxv8i32_1:
1690; CHECK:       # %bb.0:
1691; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1692; CHECK-NEXT:    vmsleu.vi v0, v8, -16
1693; CHECK-NEXT:    ret
1694  %vc = icmp ult <vscale x 8 x i32> %va, splat (i32 -15)
1695  ret <vscale x 8 x i1> %vc
1696}
1697
1698define <vscale x 8 x i1> @icmp_ult_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
1699; CHECK-LABEL: icmp_ult_iv_nxv8i32_1:
1700; CHECK:       # %bb.0:
1701; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1702; CHECK-NEXT:    vmsgtu.vi v0, v8, -15
1703; CHECK-NEXT:    ret
1704  %vc = icmp ult <vscale x 8 x i32> splat (i32 -15), %va
1705  ret <vscale x 8 x i1> %vc
1706}
1707
1708define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
1709; CHECK-LABEL: icmp_ult_vi_nxv8i32_2:
1710; CHECK:       # %bb.0:
1711; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
1712; CHECK-NEXT:    vmclr.m v0
1713; CHECK-NEXT:    ret
1714  %vc = icmp ult <vscale x 8 x i32> %va, splat (i32 0)
1715  ret <vscale x 8 x i1> %vc
1716}
1717
1718define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
1719; CHECK-LABEL: icmp_ult_vi_nxv8i32_3:
1720; CHECK:       # %bb.0:
1721; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1722; CHECK-NEXT:    vmseq.vi v0, v8, 0
1723; CHECK-NEXT:    ret
1724  %vc = icmp ult <vscale x 8 x i32> %va, splat (i32 1)
1725  ret <vscale x 8 x i1> %vc
1726}
1727
1728define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_4(<vscale x 8 x i32> %va) {
1729; CHECK-LABEL: icmp_ult_vi_nxv8i32_4:
1730; CHECK:       # %bb.0:
1731; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1732; CHECK-NEXT:    vmsleu.vi v0, v8, 15
1733; CHECK-NEXT:    ret
1734  %vc = icmp ult <vscale x 8 x i32> %va, splat (i32 16)
1735  ret <vscale x 8 x i1> %vc
1736}
1737
1738define <vscale x 8 x i1> @icmp_ule_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
1739; CHECK-LABEL: icmp_ule_vv_nxv8i32:
1740; CHECK:       # %bb.0:
1741; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1742; CHECK-NEXT:    vmsleu.vv v0, v8, v12
1743; CHECK-NEXT:    ret
1744  %vc = icmp ule <vscale x 8 x i32> %va, %vb
1745  ret <vscale x 8 x i1> %vc
1746}
1747
1748define <vscale x 8 x i1> @icmp_ule_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1749; CHECK-LABEL: icmp_ule_vx_nxv8i32:
1750; CHECK:       # %bb.0:
1751; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
1752; CHECK-NEXT:    vmsleu.vx v0, v8, a0
1753; CHECK-NEXT:    ret
1754  %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1755  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1756  %vc = icmp ule <vscale x 8 x i32> %va, %splat
1757  ret <vscale x 8 x i1> %vc
1758}
1759
1760define <vscale x 8 x i1> @icmp_ule_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1761; CHECK-LABEL: icmp_ule_xv_nxv8i32:
1762; CHECK:       # %bb.0:
1763; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
1764; CHECK-NEXT:    vmv.v.x v12, a0
1765; CHECK-NEXT:    vmsleu.vv v0, v12, v8
1766; CHECK-NEXT:    ret
1767  %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1768  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1769  %vc = icmp ule <vscale x 8 x i32> %splat, %va
1770  ret <vscale x 8 x i1> %vc
1771}
1772
1773define <vscale x 8 x i1> @icmp_ule_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
1774; CHECK-LABEL: icmp_ule_vi_nxv8i32_0:
1775; CHECK:       # %bb.0:
1776; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1777; CHECK-NEXT:    vmsleu.vi v0, v8, 5
1778; CHECK-NEXT:    ret
1779  %vc = icmp ule <vscale x 8 x i32> %va, splat (i32 5)
1780  ret <vscale x 8 x i1> %vc
1781}
1782
1783define <vscale x 8 x i1> @icmp_sgt_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
1784; CHECK-LABEL: icmp_sgt_vv_nxv8i32:
1785; CHECK:       # %bb.0:
1786; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1787; CHECK-NEXT:    vmslt.vv v0, v12, v8
1788; CHECK-NEXT:    ret
1789  %vc = icmp sgt <vscale x 8 x i32> %va, %vb
1790  ret <vscale x 8 x i1> %vc
1791}
1792
1793define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1794; CHECK-LABEL: icmp_sgt_vx_nxv8i32:
1795; CHECK:       # %bb.0:
1796; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
1797; CHECK-NEXT:    vmsgt.vx v0, v8, a0
1798; CHECK-NEXT:    ret
1799  %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1800  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1801  %vc = icmp sgt <vscale x 8 x i32> %va, %splat
1802  ret <vscale x 8 x i1> %vc
1803}
1804
1805define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1806; CHECK-LABEL: icmp_sgt_xv_nxv8i32:
1807; CHECK:       # %bb.0:
1808; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
1809; CHECK-NEXT:    vmslt.vx v0, v8, a0
1810; CHECK-NEXT:    ret
1811  %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1812  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1813  %vc = icmp sgt <vscale x 8 x i32> %splat, %va
1814  ret <vscale x 8 x i1> %vc
1815}
1816
1817define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
1818; CHECK-LABEL: icmp_sgt_vi_nxv8i32_0:
1819; CHECK:       # %bb.0:
1820; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1821; CHECK-NEXT:    vmsgt.vi v0, v8, 5
1822; CHECK-NEXT:    ret
1823  %vc = icmp sgt <vscale x 8 x i32> %va, splat (i32 5)
1824  ret <vscale x 8 x i1> %vc
1825}
1826
1827define <vscale x 8 x i1> @icmp_sge_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
1828; CHECK-LABEL: icmp_sge_vv_nxv8i32:
1829; CHECK:       # %bb.0:
1830; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1831; CHECK-NEXT:    vmsle.vv v0, v12, v8
1832; CHECK-NEXT:    ret
1833  %vc = icmp sge <vscale x 8 x i32> %va, %vb
1834  ret <vscale x 8 x i1> %vc
1835}
1836
1837define <vscale x 8 x i1> @icmp_sge_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1838; CHECK-LABEL: icmp_sge_vx_nxv8i32:
1839; CHECK:       # %bb.0:
1840; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
1841; CHECK-NEXT:    vmv.v.x v12, a0
1842; CHECK-NEXT:    vmsle.vv v0, v12, v8
1843; CHECK-NEXT:    ret
1844  %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1845  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1846  %vc = icmp sge <vscale x 8 x i32> %va, %splat
1847  ret <vscale x 8 x i1> %vc
1848}
1849
1850define <vscale x 8 x i1> @icmp_sge_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1851; CHECK-LABEL: icmp_sge_xv_nxv8i32:
1852; CHECK:       # %bb.0:
1853; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
1854; CHECK-NEXT:    vmsle.vx v0, v8, a0
1855; CHECK-NEXT:    ret
1856  %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1857  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1858  %vc = icmp sge <vscale x 8 x i32> %splat, %va
1859  ret <vscale x 8 x i1> %vc
1860}
1861
1862define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
1863; CHECK-LABEL: icmp_sge_vi_nxv8i32_0:
1864; CHECK:       # %bb.0:
1865; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1866; CHECK-NEXT:    vmv.v.i v12, -16
1867; CHECK-NEXT:    vmsle.vv v0, v12, v8
1868; CHECK-NEXT:    ret
1869  %vc = icmp sge <vscale x 8 x i32> %va, splat (i32 -16)
1870  ret <vscale x 8 x i1> %vc
1871}
1872
1873define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
1874; CHECK-LABEL: icmp_sge_vi_nxv8i32_1:
1875; CHECK:       # %bb.0:
1876; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1877; CHECK-NEXT:    vmsgt.vi v0, v8, -16
1878; CHECK-NEXT:    ret
1879  %vc = icmp sge <vscale x 8 x i32> %va, splat (i32 -15)
1880  ret <vscale x 8 x i1> %vc
1881}
1882
1883define <vscale x 8 x i1> @icmp_sge_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
1884; CHECK-LABEL: icmp_sge_iv_nxv8i32_1:
1885; CHECK:       # %bb.0:
1886; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1887; CHECK-NEXT:    vmsle.vi v0, v8, -15
1888; CHECK-NEXT:    ret
1889  %vc = icmp sge <vscale x 8 x i32> splat (i32 -15), %va
1890  ret <vscale x 8 x i1> %vc
1891}
1892
1893define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
1894; CHECK-LABEL: icmp_sge_vi_nxv8i32_2:
1895; CHECK:       # %bb.0:
1896; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1897; CHECK-NEXT:    vmsgt.vi v0, v8, -1
1898; CHECK-NEXT:    ret
1899  %vc = icmp sge <vscale x 8 x i32> %va, splat (i32 0)
1900  ret <vscale x 8 x i1> %vc
1901}
1902
1903define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
1904; CHECK-LABEL: icmp_sge_vi_nxv8i32_3:
1905; CHECK:       # %bb.0:
1906; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1907; CHECK-NEXT:    vmsgt.vi v0, v8, 15
1908; CHECK-NEXT:    ret
1909  %vc = icmp sge <vscale x 8 x i32> %va, splat (i32 16)
1910  ret <vscale x 8 x i1> %vc
1911}
1912
1913define <vscale x 8 x i1> @icmp_slt_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
1914; CHECK-LABEL: icmp_slt_vv_nxv8i32:
1915; CHECK:       # %bb.0:
1916; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1917; CHECK-NEXT:    vmslt.vv v0, v8, v12
1918; CHECK-NEXT:    ret
1919  %vc = icmp slt <vscale x 8 x i32> %va, %vb
1920  ret <vscale x 8 x i1> %vc
1921}
1922
1923define <vscale x 8 x i1> @icmp_slt_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1924; CHECK-LABEL: icmp_slt_vx_nxv8i32:
1925; CHECK:       # %bb.0:
1926; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
1927; CHECK-NEXT:    vmslt.vx v0, v8, a0
1928; CHECK-NEXT:    ret
1929  %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1930  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1931  %vc = icmp slt <vscale x 8 x i32> %va, %splat
1932  ret <vscale x 8 x i1> %vc
1933}
1934
1935define <vscale x 8 x i1> @icmp_slt_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
1936; CHECK-LABEL: icmp_slt_xv_nxv8i32:
1937; CHECK:       # %bb.0:
1938; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
1939; CHECK-NEXT:    vmsgt.vx v0, v8, a0
1940; CHECK-NEXT:    ret
1941  %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
1942  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
1943  %vc = icmp slt <vscale x 8 x i32> %splat, %va
1944  ret <vscale x 8 x i1> %vc
1945}
1946
1947define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
1948; CHECK-LABEL: icmp_slt_vi_nxv8i32_0:
1949; CHECK:       # %bb.0:
1950; CHECK-NEXT:    li a0, -16
1951; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
1952; CHECK-NEXT:    vmslt.vx v0, v8, a0
1953; CHECK-NEXT:    ret
1954  %vc = icmp slt <vscale x 8 x i32> %va, splat (i32 -16)
1955  ret <vscale x 8 x i1> %vc
1956}
1957
1958define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
1959; CHECK-LABEL: icmp_slt_vi_nxv8i32_1:
1960; CHECK:       # %bb.0:
1961; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1962; CHECK-NEXT:    vmsle.vi v0, v8, -16
1963; CHECK-NEXT:    ret
1964  %vc = icmp slt <vscale x 8 x i32> %va, splat (i32 -15)
1965  ret <vscale x 8 x i1> %vc
1966}
1967
1968define <vscale x 8 x i1> @icmp_slt_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
1969; CHECK-LABEL: icmp_slt_iv_nxv8i32_1:
1970; CHECK:       # %bb.0:
1971; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1972; CHECK-NEXT:    vmsgt.vi v0, v8, -15
1973; CHECK-NEXT:    ret
1974  %vc = icmp slt <vscale x 8 x i32> splat (i32 -15), %va
1975  ret <vscale x 8 x i1> %vc
1976}
1977
1978define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
1979; CHECK-LABEL: icmp_slt_vi_nxv8i32_2:
1980; CHECK:       # %bb.0:
1981; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1982; CHECK-NEXT:    vmsle.vi v0, v8, -1
1983; CHECK-NEXT:    ret
1984  %vc = icmp slt <vscale x 8 x i32> %va, splat (i32 0)
1985  ret <vscale x 8 x i1> %vc
1986}
1987
1988define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
1989; CHECK-LABEL: icmp_slt_vi_nxv8i32_3:
1990; CHECK:       # %bb.0:
1991; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
1992; CHECK-NEXT:    vmsle.vi v0, v8, 15
1993; CHECK-NEXT:    ret
1994  %vc = icmp slt <vscale x 8 x i32> %va, splat (i32 16)
1995  ret <vscale x 8 x i1> %vc
1996}
1997
1998define <vscale x 8 x i1> @icmp_sle_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
1999; CHECK-LABEL: icmp_sle_vv_nxv8i32:
2000; CHECK:       # %bb.0:
2001; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
2002; CHECK-NEXT:    vmsle.vv v0, v8, v12
2003; CHECK-NEXT:    ret
2004  %vc = icmp sle <vscale x 8 x i32> %va, %vb
2005  ret <vscale x 8 x i1> %vc
2006}
2007
2008define <vscale x 8 x i1> @icmp_sle_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
2009; CHECK-LABEL: icmp_sle_vx_nxv8i32:
2010; CHECK:       # %bb.0:
2011; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
2012; CHECK-NEXT:    vmsle.vx v0, v8, a0
2013; CHECK-NEXT:    ret
2014  %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
2015  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
2016  %vc = icmp sle <vscale x 8 x i32> %va, %splat
2017  ret <vscale x 8 x i1> %vc
2018}
2019
2020define <vscale x 8 x i1> @icmp_sle_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
2021; CHECK-LABEL: icmp_sle_xv_nxv8i32:
2022; CHECK:       # %bb.0:
2023; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
2024; CHECK-NEXT:    vmv.v.x v12, a0
2025; CHECK-NEXT:    vmsle.vv v0, v12, v8
2026; CHECK-NEXT:    ret
2027  %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
2028  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
2029  %vc = icmp sle <vscale x 8 x i32> %splat, %va
2030  ret <vscale x 8 x i1> %vc
2031}
2032
2033define <vscale x 8 x i1> @icmp_sle_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
2034; CHECK-LABEL: icmp_sle_vi_nxv8i32_0:
2035; CHECK:       # %bb.0:
2036; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
2037; CHECK-NEXT:    vmsle.vi v0, v8, 5
2038; CHECK-NEXT:    ret
2039  %vc = icmp sle <vscale x 8 x i32> %va, splat (i32 5)
2040  ret <vscale x 8 x i1> %vc
2041}
2042
2043define <vscale x 8 x i1> @icmp_eq_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
2044; CHECK-LABEL: icmp_eq_vv_nxv8i64:
2045; CHECK:       # %bb.0:
2046; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2047; CHECK-NEXT:    vmseq.vv v0, v8, v16
2048; CHECK-NEXT:    ret
2049  %vc = icmp eq <vscale x 8 x i64> %va, %vb
2050  ret <vscale x 8 x i1> %vc
2051}
2052
2053define <vscale x 8 x i1> @icmp_eq_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2054; RV32-LABEL: icmp_eq_vx_nxv8i64:
2055; RV32:       # %bb.0:
2056; RV32-NEXT:    addi sp, sp, -16
2057; RV32-NEXT:    .cfi_def_cfa_offset 16
2058; RV32-NEXT:    sw a0, 8(sp)
2059; RV32-NEXT:    sw a1, 12(sp)
2060; RV32-NEXT:    addi a0, sp, 8
2061; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2062; RV32-NEXT:    vlse64.v v16, (a0), zero
2063; RV32-NEXT:    vmseq.vv v0, v8, v16
2064; RV32-NEXT:    addi sp, sp, 16
2065; RV32-NEXT:    .cfi_def_cfa_offset 0
2066; RV32-NEXT:    ret
2067;
2068; RV64-LABEL: icmp_eq_vx_nxv8i64:
2069; RV64:       # %bb.0:
2070; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2071; RV64-NEXT:    vmseq.vx v0, v8, a0
2072; RV64-NEXT:    ret
2073  %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2074  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2075  %vc = icmp eq <vscale x 8 x i64> %va, %splat
2076  ret <vscale x 8 x i1> %vc
2077}
2078
2079define <vscale x 8 x i1> @icmp_eq_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2080; RV32-LABEL: icmp_eq_xv_nxv8i64:
2081; RV32:       # %bb.0:
2082; RV32-NEXT:    addi sp, sp, -16
2083; RV32-NEXT:    .cfi_def_cfa_offset 16
2084; RV32-NEXT:    sw a0, 8(sp)
2085; RV32-NEXT:    sw a1, 12(sp)
2086; RV32-NEXT:    addi a0, sp, 8
2087; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2088; RV32-NEXT:    vlse64.v v16, (a0), zero
2089; RV32-NEXT:    vmseq.vv v0, v16, v8
2090; RV32-NEXT:    addi sp, sp, 16
2091; RV32-NEXT:    .cfi_def_cfa_offset 0
2092; RV32-NEXT:    ret
2093;
2094; RV64-LABEL: icmp_eq_xv_nxv8i64:
2095; RV64:       # %bb.0:
2096; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2097; RV64-NEXT:    vmseq.vx v0, v8, a0
2098; RV64-NEXT:    ret
2099  %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2100  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2101  %vc = icmp eq <vscale x 8 x i64> %splat, %va
2102  ret <vscale x 8 x i1> %vc
2103}
2104
2105define <vscale x 8 x i1> @icmp_eq_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
2106; CHECK-LABEL: icmp_eq_vi_nxv8i64_0:
2107; CHECK:       # %bb.0:
2108; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2109; CHECK-NEXT:    vmseq.vi v0, v8, 0
2110; CHECK-NEXT:    ret
2111  %vc = icmp eq <vscale x 8 x i64> %va, splat (i64 0)
2112  ret <vscale x 8 x i1> %vc
2113}
2114
2115define <vscale x 8 x i1> @icmp_eq_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
2116; CHECK-LABEL: icmp_eq_vi_nxv8i64_1:
2117; CHECK:       # %bb.0:
2118; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2119; CHECK-NEXT:    vmseq.vi v0, v8, 5
2120; CHECK-NEXT:    ret
2121  %vc = icmp eq <vscale x 8 x i64> %va, splat (i64 5)
2122  ret <vscale x 8 x i1> %vc
2123}
2124
2125define <vscale x 8 x i1> @icmp_eq_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
2126; CHECK-LABEL: icmp_eq_iv_nxv8i64_1:
2127; CHECK:       # %bb.0:
2128; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2129; CHECK-NEXT:    vmseq.vi v0, v8, 5
2130; CHECK-NEXT:    ret
2131  %vc = icmp eq <vscale x 8 x i64> splat (i64 5), %va
2132  ret <vscale x 8 x i1> %vc
2133}
2134
2135define <vscale x 8 x i1> @icmp_ne_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
2136; CHECK-LABEL: icmp_ne_vv_nxv8i64:
2137; CHECK:       # %bb.0:
2138; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2139; CHECK-NEXT:    vmsne.vv v0, v8, v16
2140; CHECK-NEXT:    ret
2141  %vc = icmp ne <vscale x 8 x i64> %va, %vb
2142  ret <vscale x 8 x i1> %vc
2143}
2144
2145define <vscale x 8 x i1> @icmp_ne_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2146; RV32-LABEL: icmp_ne_vx_nxv8i64:
2147; RV32:       # %bb.0:
2148; RV32-NEXT:    addi sp, sp, -16
2149; RV32-NEXT:    .cfi_def_cfa_offset 16
2150; RV32-NEXT:    sw a0, 8(sp)
2151; RV32-NEXT:    sw a1, 12(sp)
2152; RV32-NEXT:    addi a0, sp, 8
2153; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2154; RV32-NEXT:    vlse64.v v16, (a0), zero
2155; RV32-NEXT:    vmsne.vv v0, v8, v16
2156; RV32-NEXT:    addi sp, sp, 16
2157; RV32-NEXT:    .cfi_def_cfa_offset 0
2158; RV32-NEXT:    ret
2159;
2160; RV64-LABEL: icmp_ne_vx_nxv8i64:
2161; RV64:       # %bb.0:
2162; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2163; RV64-NEXT:    vmsne.vx v0, v8, a0
2164; RV64-NEXT:    ret
2165  %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2166  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2167  %vc = icmp ne <vscale x 8 x i64> %va, %splat
2168  ret <vscale x 8 x i1> %vc
2169}
2170
2171define <vscale x 8 x i1> @icmp_ne_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2172; RV32-LABEL: icmp_ne_xv_nxv8i64:
2173; RV32:       # %bb.0:
2174; RV32-NEXT:    addi sp, sp, -16
2175; RV32-NEXT:    .cfi_def_cfa_offset 16
2176; RV32-NEXT:    sw a0, 8(sp)
2177; RV32-NEXT:    sw a1, 12(sp)
2178; RV32-NEXT:    addi a0, sp, 8
2179; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2180; RV32-NEXT:    vlse64.v v16, (a0), zero
2181; RV32-NEXT:    vmsne.vv v0, v16, v8
2182; RV32-NEXT:    addi sp, sp, 16
2183; RV32-NEXT:    .cfi_def_cfa_offset 0
2184; RV32-NEXT:    ret
2185;
2186; RV64-LABEL: icmp_ne_xv_nxv8i64:
2187; RV64:       # %bb.0:
2188; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2189; RV64-NEXT:    vmsne.vx v0, v8, a0
2190; RV64-NEXT:    ret
2191  %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2192  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2193  %vc = icmp ne <vscale x 8 x i64> %splat, %va
2194  ret <vscale x 8 x i1> %vc
2195}
2196
2197define <vscale x 8 x i1> @icmp_ne_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
2198; CHECK-LABEL: icmp_ne_vi_nxv8i64_0:
2199; CHECK:       # %bb.0:
2200; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2201; CHECK-NEXT:    vmsne.vi v0, v8, 5
2202; CHECK-NEXT:    ret
2203  %vc = icmp ne <vscale x 8 x i64> %va, splat (i64 5)
2204  ret <vscale x 8 x i1> %vc
2205}
2206
2207define <vscale x 8 x i1> @icmp_ugt_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
2208; CHECK-LABEL: icmp_ugt_vv_nxv8i64:
2209; CHECK:       # %bb.0:
2210; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2211; CHECK-NEXT:    vmsltu.vv v0, v16, v8
2212; CHECK-NEXT:    ret
2213  %vc = icmp ugt <vscale x 8 x i64> %va, %vb
2214  ret <vscale x 8 x i1> %vc
2215}
2216
2217define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2218; RV32-LABEL: icmp_ugt_vx_nxv8i64:
2219; RV32:       # %bb.0:
2220; RV32-NEXT:    addi sp, sp, -16
2221; RV32-NEXT:    .cfi_def_cfa_offset 16
2222; RV32-NEXT:    sw a0, 8(sp)
2223; RV32-NEXT:    sw a1, 12(sp)
2224; RV32-NEXT:    addi a0, sp, 8
2225; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2226; RV32-NEXT:    vlse64.v v16, (a0), zero
2227; RV32-NEXT:    vmsltu.vv v0, v16, v8
2228; RV32-NEXT:    addi sp, sp, 16
2229; RV32-NEXT:    .cfi_def_cfa_offset 0
2230; RV32-NEXT:    ret
2231;
2232; RV64-LABEL: icmp_ugt_vx_nxv8i64:
2233; RV64:       # %bb.0:
2234; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2235; RV64-NEXT:    vmsgtu.vx v0, v8, a0
2236; RV64-NEXT:    ret
2237  %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2238  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2239  %vc = icmp ugt <vscale x 8 x i64> %va, %splat
2240  ret <vscale x 8 x i1> %vc
2241}
2242
2243define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2244; RV32-LABEL: icmp_ugt_xv_nxv8i64:
2245; RV32:       # %bb.0:
2246; RV32-NEXT:    addi sp, sp, -16
2247; RV32-NEXT:    .cfi_def_cfa_offset 16
2248; RV32-NEXT:    sw a0, 8(sp)
2249; RV32-NEXT:    sw a1, 12(sp)
2250; RV32-NEXT:    addi a0, sp, 8
2251; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2252; RV32-NEXT:    vlse64.v v16, (a0), zero
2253; RV32-NEXT:    vmsltu.vv v0, v8, v16
2254; RV32-NEXT:    addi sp, sp, 16
2255; RV32-NEXT:    .cfi_def_cfa_offset 0
2256; RV32-NEXT:    ret
2257;
2258; RV64-LABEL: icmp_ugt_xv_nxv8i64:
2259; RV64:       # %bb.0:
2260; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2261; RV64-NEXT:    vmsltu.vx v0, v8, a0
2262; RV64-NEXT:    ret
2263  %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2264  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2265  %vc = icmp ugt <vscale x 8 x i64> %splat, %va
2266  ret <vscale x 8 x i1> %vc
2267}
2268
2269define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
2270; CHECK-LABEL: icmp_ugt_vi_nxv8i64_0:
2271; CHECK:       # %bb.0:
2272; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2273; CHECK-NEXT:    vmsgtu.vi v0, v8, 5
2274; CHECK-NEXT:    ret
2275  %vc = icmp ugt <vscale x 8 x i64> %va, splat (i64 5)
2276  ret <vscale x 8 x i1> %vc
2277}
2278
2279define <vscale x 8 x i1> @icmp_uge_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
2280; CHECK-LABEL: icmp_uge_vv_nxv8i64:
2281; CHECK:       # %bb.0:
2282; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2283; CHECK-NEXT:    vmsleu.vv v0, v16, v8
2284; CHECK-NEXT:    ret
2285  %vc = icmp uge <vscale x 8 x i64> %va, %vb
2286  ret <vscale x 8 x i1> %vc
2287}
2288
2289define <vscale x 8 x i1> @icmp_uge_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2290; RV32-LABEL: icmp_uge_vx_nxv8i64:
2291; RV32:       # %bb.0:
2292; RV32-NEXT:    addi sp, sp, -16
2293; RV32-NEXT:    .cfi_def_cfa_offset 16
2294; RV32-NEXT:    sw a0, 8(sp)
2295; RV32-NEXT:    sw a1, 12(sp)
2296; RV32-NEXT:    addi a0, sp, 8
2297; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2298; RV32-NEXT:    vlse64.v v16, (a0), zero
2299; RV32-NEXT:    vmsleu.vv v0, v16, v8
2300; RV32-NEXT:    addi sp, sp, 16
2301; RV32-NEXT:    .cfi_def_cfa_offset 0
2302; RV32-NEXT:    ret
2303;
2304; RV64-LABEL: icmp_uge_vx_nxv8i64:
2305; RV64:       # %bb.0:
2306; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2307; RV64-NEXT:    vmv.v.x v16, a0
2308; RV64-NEXT:    vmsleu.vv v0, v16, v8
2309; RV64-NEXT:    ret
2310  %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2311  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2312  %vc = icmp uge <vscale x 8 x i64> %va, %splat
2313  ret <vscale x 8 x i1> %vc
2314}
2315
2316define <vscale x 8 x i1> @icmp_uge_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2317; RV32-LABEL: icmp_uge_xv_nxv8i64:
2318; RV32:       # %bb.0:
2319; RV32-NEXT:    addi sp, sp, -16
2320; RV32-NEXT:    .cfi_def_cfa_offset 16
2321; RV32-NEXT:    sw a0, 8(sp)
2322; RV32-NEXT:    sw a1, 12(sp)
2323; RV32-NEXT:    addi a0, sp, 8
2324; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2325; RV32-NEXT:    vlse64.v v16, (a0), zero
2326; RV32-NEXT:    vmsleu.vv v0, v8, v16
2327; RV32-NEXT:    addi sp, sp, 16
2328; RV32-NEXT:    .cfi_def_cfa_offset 0
2329; RV32-NEXT:    ret
2330;
2331; RV64-LABEL: icmp_uge_xv_nxv8i64:
2332; RV64:       # %bb.0:
2333; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2334; RV64-NEXT:    vmsleu.vx v0, v8, a0
2335; RV64-NEXT:    ret
2336  %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2337  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2338  %vc = icmp uge <vscale x 8 x i64> %splat, %va
2339  ret <vscale x 8 x i1> %vc
2340}
2341
2342define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
2343; CHECK-LABEL: icmp_uge_vi_nxv8i64_0:
2344; CHECK:       # %bb.0:
2345; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2346; CHECK-NEXT:    vmv.v.i v16, -16
2347; CHECK-NEXT:    vmsleu.vv v0, v16, v8
2348; CHECK-NEXT:    ret
2349  %vc = icmp uge <vscale x 8 x i64> %va, splat (i64 -16)
2350  ret <vscale x 8 x i1> %vc
2351}
2352
2353define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
2354; CHECK-LABEL: icmp_uge_vi_nxv8i64_1:
2355; CHECK:       # %bb.0:
2356; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2357; CHECK-NEXT:    vmsgtu.vi v0, v8, 14
2358; CHECK-NEXT:    ret
2359  %vc = icmp uge <vscale x 8 x i64> %va, splat (i64 15)
2360  ret <vscale x 8 x i1> %vc
2361}
2362
2363define <vscale x 8 x i1> @icmp_uge_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
2364; CHECK-LABEL: icmp_uge_iv_nxv8i64_1:
2365; CHECK:       # %bb.0:
2366; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2367; CHECK-NEXT:    vmsleu.vi v0, v8, 15
2368; CHECK-NEXT:    ret
2369  %vc = icmp uge <vscale x 8 x i64> splat (i64 15), %va
2370  ret <vscale x 8 x i1> %vc
2371}
2372
2373define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
2374; CHECK-LABEL: icmp_uge_vi_nxv8i64_2:
2375; CHECK:       # %bb.0:
2376; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
2377; CHECK-NEXT:    vmset.m v0
2378; CHECK-NEXT:    ret
2379  %vc = icmp uge <vscale x 8 x i64> %va, splat (i64 0)
2380  ret <vscale x 8 x i1> %vc
2381}
2382
2383define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
2384; CHECK-LABEL: icmp_uge_vi_nxv8i64_3:
2385; CHECK:       # %bb.0:
2386; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2387; CHECK-NEXT:    vmsgtu.vi v0, v8, 0
2388; CHECK-NEXT:    ret
2389  %vc = icmp uge <vscale x 8 x i64> %va, splat (i64 1)
2390  ret <vscale x 8 x i1> %vc
2391}
2392
2393define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_4(<vscale x 8 x i64> %va) {
2394; CHECK-LABEL: icmp_uge_vi_nxv8i64_4:
2395; CHECK:       # %bb.0:
2396; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2397; CHECK-NEXT:    vmsgtu.vi v0, v8, -16
2398; CHECK-NEXT:    ret
2399  %vc = icmp uge <vscale x 8 x i64> %va, splat (i64 -15)
2400  ret <vscale x 8 x i1> %vc
2401}
2402
2403define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_5(<vscale x 8 x i64> %va) {
2404; CHECK-LABEL: icmp_uge_vi_nxv8i64_5:
2405; CHECK:       # %bb.0:
2406; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2407; CHECK-NEXT:    vmsgtu.vi v0, v8, 15
2408; CHECK-NEXT:    ret
2409  %vc = icmp uge <vscale x 8 x i64> %va, splat (i64 16)
2410  ret <vscale x 8 x i1> %vc
2411}
2412
2413define <vscale x 8 x i1> @icmp_ult_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
2414; CHECK-LABEL: icmp_ult_vv_nxv8i64:
2415; CHECK:       # %bb.0:
2416; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2417; CHECK-NEXT:    vmsltu.vv v0, v8, v16
2418; CHECK-NEXT:    ret
2419  %vc = icmp ult <vscale x 8 x i64> %va, %vb
2420  ret <vscale x 8 x i1> %vc
2421}
2422
2423define <vscale x 8 x i1> @icmp_ult_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2424; RV32-LABEL: icmp_ult_vx_nxv8i64:
2425; RV32:       # %bb.0:
2426; RV32-NEXT:    addi sp, sp, -16
2427; RV32-NEXT:    .cfi_def_cfa_offset 16
2428; RV32-NEXT:    sw a0, 8(sp)
2429; RV32-NEXT:    sw a1, 12(sp)
2430; RV32-NEXT:    addi a0, sp, 8
2431; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2432; RV32-NEXT:    vlse64.v v16, (a0), zero
2433; RV32-NEXT:    vmsltu.vv v0, v8, v16
2434; RV32-NEXT:    addi sp, sp, 16
2435; RV32-NEXT:    .cfi_def_cfa_offset 0
2436; RV32-NEXT:    ret
2437;
2438; RV64-LABEL: icmp_ult_vx_nxv8i64:
2439; RV64:       # %bb.0:
2440; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2441; RV64-NEXT:    vmsltu.vx v0, v8, a0
2442; RV64-NEXT:    ret
2443  %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2444  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2445  %vc = icmp ult <vscale x 8 x i64> %va, %splat
2446  ret <vscale x 8 x i1> %vc
2447}
2448
2449define <vscale x 8 x i1> @icmp_ult_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2450; RV32-LABEL: icmp_ult_xv_nxv8i64:
2451; RV32:       # %bb.0:
2452; RV32-NEXT:    addi sp, sp, -16
2453; RV32-NEXT:    .cfi_def_cfa_offset 16
2454; RV32-NEXT:    sw a0, 8(sp)
2455; RV32-NEXT:    sw a1, 12(sp)
2456; RV32-NEXT:    addi a0, sp, 8
2457; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2458; RV32-NEXT:    vlse64.v v16, (a0), zero
2459; RV32-NEXT:    vmsltu.vv v0, v16, v8
2460; RV32-NEXT:    addi sp, sp, 16
2461; RV32-NEXT:    .cfi_def_cfa_offset 0
2462; RV32-NEXT:    ret
2463;
2464; RV64-LABEL: icmp_ult_xv_nxv8i64:
2465; RV64:       # %bb.0:
2466; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2467; RV64-NEXT:    vmsgtu.vx v0, v8, a0
2468; RV64-NEXT:    ret
2469  %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2470  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2471  %vc = icmp ult <vscale x 8 x i64> %splat, %va
2472  ret <vscale x 8 x i1> %vc
2473}
2474
2475define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
2476; CHECK-LABEL: icmp_ult_vi_nxv8i64_0:
2477; CHECK:       # %bb.0:
2478; CHECK-NEXT:    li a0, -16
2479; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2480; CHECK-NEXT:    vmsltu.vx v0, v8, a0
2481; CHECK-NEXT:    ret
2482  %vc = icmp ult <vscale x 8 x i64> %va, splat (i64 -16)
2483  ret <vscale x 8 x i1> %vc
2484}
2485
2486define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
2487; CHECK-LABEL: icmp_ult_vi_nxv8i64_1:
2488; CHECK:       # %bb.0:
2489; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2490; CHECK-NEXT:    vmsleu.vi v0, v8, -16
2491; CHECK-NEXT:    ret
2492  %vc = icmp ult <vscale x 8 x i64> %va, splat (i64 -15)
2493  ret <vscale x 8 x i1> %vc
2494}
2495
2496define <vscale x 8 x i1> @icmp_ult_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
2497; CHECK-LABEL: icmp_ult_iv_nxv8i64_1:
2498; CHECK:       # %bb.0:
2499; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2500; CHECK-NEXT:    vmsgtu.vi v0, v8, -15
2501; CHECK-NEXT:    ret
2502  %vc = icmp ult <vscale x 8 x i64> splat (i64 -15), %va
2503  ret <vscale x 8 x i1> %vc
2504}
2505
2506define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
2507; CHECK-LABEL: icmp_ult_vi_nxv8i64_2:
2508; CHECK:       # %bb.0:
2509; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
2510; CHECK-NEXT:    vmclr.m v0
2511; CHECK-NEXT:    ret
2512  %vc = icmp ult <vscale x 8 x i64> %va, splat (i64 0)
2513  ret <vscale x 8 x i1> %vc
2514}
2515
2516define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
2517; CHECK-LABEL: icmp_ult_vi_nxv8i64_3:
2518; CHECK:       # %bb.0:
2519; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2520; CHECK-NEXT:    vmseq.vi v0, v8, 0
2521; CHECK-NEXT:    ret
2522  %vc = icmp ult <vscale x 8 x i64> %va, splat (i64 1)
2523  ret <vscale x 8 x i1> %vc
2524}
2525
2526define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_4(<vscale x 8 x i64> %va) {
2527; CHECK-LABEL: icmp_ult_vi_nxv8i64_4:
2528; CHECK:       # %bb.0:
2529; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2530; CHECK-NEXT:    vmsleu.vi v0, v8, 15
2531; CHECK-NEXT:    ret
2532  %vc = icmp ult <vscale x 8 x i64> %va, splat (i64 16)
2533  ret <vscale x 8 x i1> %vc
2534}
2535
2536define <vscale x 8 x i1> @icmp_ule_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
2537; CHECK-LABEL: icmp_ule_vv_nxv8i64:
2538; CHECK:       # %bb.0:
2539; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2540; CHECK-NEXT:    vmsleu.vv v0, v8, v16
2541; CHECK-NEXT:    ret
2542  %vc = icmp ule <vscale x 8 x i64> %va, %vb
2543  ret <vscale x 8 x i1> %vc
2544}
2545
2546define <vscale x 8 x i1> @icmp_ule_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2547; RV32-LABEL: icmp_ule_vx_nxv8i64:
2548; RV32:       # %bb.0:
2549; RV32-NEXT:    addi sp, sp, -16
2550; RV32-NEXT:    .cfi_def_cfa_offset 16
2551; RV32-NEXT:    sw a0, 8(sp)
2552; RV32-NEXT:    sw a1, 12(sp)
2553; RV32-NEXT:    addi a0, sp, 8
2554; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2555; RV32-NEXT:    vlse64.v v16, (a0), zero
2556; RV32-NEXT:    vmsleu.vv v0, v8, v16
2557; RV32-NEXT:    addi sp, sp, 16
2558; RV32-NEXT:    .cfi_def_cfa_offset 0
2559; RV32-NEXT:    ret
2560;
2561; RV64-LABEL: icmp_ule_vx_nxv8i64:
2562; RV64:       # %bb.0:
2563; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2564; RV64-NEXT:    vmsleu.vx v0, v8, a0
2565; RV64-NEXT:    ret
2566  %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2567  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2568  %vc = icmp ule <vscale x 8 x i64> %va, %splat
2569  ret <vscale x 8 x i1> %vc
2570}
2571
2572define <vscale x 8 x i1> @icmp_ule_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2573; RV32-LABEL: icmp_ule_xv_nxv8i64:
2574; RV32:       # %bb.0:
2575; RV32-NEXT:    addi sp, sp, -16
2576; RV32-NEXT:    .cfi_def_cfa_offset 16
2577; RV32-NEXT:    sw a0, 8(sp)
2578; RV32-NEXT:    sw a1, 12(sp)
2579; RV32-NEXT:    addi a0, sp, 8
2580; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2581; RV32-NEXT:    vlse64.v v16, (a0), zero
2582; RV32-NEXT:    vmsleu.vv v0, v16, v8
2583; RV32-NEXT:    addi sp, sp, 16
2584; RV32-NEXT:    .cfi_def_cfa_offset 0
2585; RV32-NEXT:    ret
2586;
2587; RV64-LABEL: icmp_ule_xv_nxv8i64:
2588; RV64:       # %bb.0:
2589; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2590; RV64-NEXT:    vmv.v.x v16, a0
2591; RV64-NEXT:    vmsleu.vv v0, v16, v8
2592; RV64-NEXT:    ret
2593  %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2594  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2595  %vc = icmp ule <vscale x 8 x i64> %splat, %va
2596  ret <vscale x 8 x i1> %vc
2597}
2598
2599define <vscale x 8 x i1> @icmp_ule_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
2600; CHECK-LABEL: icmp_ule_vi_nxv8i64_0:
2601; CHECK:       # %bb.0:
2602; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2603; CHECK-NEXT:    vmsleu.vi v0, v8, 5
2604; CHECK-NEXT:    ret
2605  %vc = icmp ule <vscale x 8 x i64> %va, splat (i64 5)
2606  ret <vscale x 8 x i1> %vc
2607}
2608
2609define <vscale x 8 x i1> @icmp_sgt_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
2610; CHECK-LABEL: icmp_sgt_vv_nxv8i64:
2611; CHECK:       # %bb.0:
2612; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2613; CHECK-NEXT:    vmslt.vv v0, v16, v8
2614; CHECK-NEXT:    ret
2615  %vc = icmp sgt <vscale x 8 x i64> %va, %vb
2616  ret <vscale x 8 x i1> %vc
2617}
2618
2619define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2620; RV32-LABEL: icmp_sgt_vx_nxv8i64:
2621; RV32:       # %bb.0:
2622; RV32-NEXT:    addi sp, sp, -16
2623; RV32-NEXT:    .cfi_def_cfa_offset 16
2624; RV32-NEXT:    sw a0, 8(sp)
2625; RV32-NEXT:    sw a1, 12(sp)
2626; RV32-NEXT:    addi a0, sp, 8
2627; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2628; RV32-NEXT:    vlse64.v v16, (a0), zero
2629; RV32-NEXT:    vmslt.vv v0, v16, v8
2630; RV32-NEXT:    addi sp, sp, 16
2631; RV32-NEXT:    .cfi_def_cfa_offset 0
2632; RV32-NEXT:    ret
2633;
2634; RV64-LABEL: icmp_sgt_vx_nxv8i64:
2635; RV64:       # %bb.0:
2636; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2637; RV64-NEXT:    vmsgt.vx v0, v8, a0
2638; RV64-NEXT:    ret
2639  %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2640  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2641  %vc = icmp sgt <vscale x 8 x i64> %va, %splat
2642  ret <vscale x 8 x i1> %vc
2643}
2644
2645define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2646; RV32-LABEL: icmp_sgt_xv_nxv8i64:
2647; RV32:       # %bb.0:
2648; RV32-NEXT:    addi sp, sp, -16
2649; RV32-NEXT:    .cfi_def_cfa_offset 16
2650; RV32-NEXT:    sw a0, 8(sp)
2651; RV32-NEXT:    sw a1, 12(sp)
2652; RV32-NEXT:    addi a0, sp, 8
2653; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2654; RV32-NEXT:    vlse64.v v16, (a0), zero
2655; RV32-NEXT:    vmslt.vv v0, v8, v16
2656; RV32-NEXT:    addi sp, sp, 16
2657; RV32-NEXT:    .cfi_def_cfa_offset 0
2658; RV32-NEXT:    ret
2659;
2660; RV64-LABEL: icmp_sgt_xv_nxv8i64:
2661; RV64:       # %bb.0:
2662; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2663; RV64-NEXT:    vmslt.vx v0, v8, a0
2664; RV64-NEXT:    ret
2665  %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2666  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2667  %vc = icmp sgt <vscale x 8 x i64> %splat, %va
2668  ret <vscale x 8 x i1> %vc
2669}
2670
2671define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
2672; CHECK-LABEL: icmp_sgt_vi_nxv8i64_0:
2673; CHECK:       # %bb.0:
2674; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2675; CHECK-NEXT:    vmsgt.vi v0, v8, 5
2676; CHECK-NEXT:    ret
2677  %vc = icmp sgt <vscale x 8 x i64> %va, splat (i64 5)
2678  ret <vscale x 8 x i1> %vc
2679}
2680
2681define <vscale x 8 x i1> @icmp_sge_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
2682; CHECK-LABEL: icmp_sge_vv_nxv8i64:
2683; CHECK:       # %bb.0:
2684; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2685; CHECK-NEXT:    vmsle.vv v0, v16, v8
2686; CHECK-NEXT:    ret
2687  %vc = icmp sge <vscale x 8 x i64> %va, %vb
2688  ret <vscale x 8 x i1> %vc
2689}
2690
2691define <vscale x 8 x i1> @icmp_sge_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2692; RV32-LABEL: icmp_sge_vx_nxv8i64:
2693; RV32:       # %bb.0:
2694; RV32-NEXT:    addi sp, sp, -16
2695; RV32-NEXT:    .cfi_def_cfa_offset 16
2696; RV32-NEXT:    sw a0, 8(sp)
2697; RV32-NEXT:    sw a1, 12(sp)
2698; RV32-NEXT:    addi a0, sp, 8
2699; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2700; RV32-NEXT:    vlse64.v v16, (a0), zero
2701; RV32-NEXT:    vmsle.vv v0, v16, v8
2702; RV32-NEXT:    addi sp, sp, 16
2703; RV32-NEXT:    .cfi_def_cfa_offset 0
2704; RV32-NEXT:    ret
2705;
2706; RV64-LABEL: icmp_sge_vx_nxv8i64:
2707; RV64:       # %bb.0:
2708; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2709; RV64-NEXT:    vmv.v.x v16, a0
2710; RV64-NEXT:    vmsle.vv v0, v16, v8
2711; RV64-NEXT:    ret
2712  %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2713  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2714  %vc = icmp sge <vscale x 8 x i64> %va, %splat
2715  ret <vscale x 8 x i1> %vc
2716}
2717
2718define <vscale x 8 x i1> @icmp_sge_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2719; RV32-LABEL: icmp_sge_xv_nxv8i64:
2720; RV32:       # %bb.0:
2721; RV32-NEXT:    addi sp, sp, -16
2722; RV32-NEXT:    .cfi_def_cfa_offset 16
2723; RV32-NEXT:    sw a0, 8(sp)
2724; RV32-NEXT:    sw a1, 12(sp)
2725; RV32-NEXT:    addi a0, sp, 8
2726; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2727; RV32-NEXT:    vlse64.v v16, (a0), zero
2728; RV32-NEXT:    vmsle.vv v0, v8, v16
2729; RV32-NEXT:    addi sp, sp, 16
2730; RV32-NEXT:    .cfi_def_cfa_offset 0
2731; RV32-NEXT:    ret
2732;
2733; RV64-LABEL: icmp_sge_xv_nxv8i64:
2734; RV64:       # %bb.0:
2735; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2736; RV64-NEXT:    vmsle.vx v0, v8, a0
2737; RV64-NEXT:    ret
2738  %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2739  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2740  %vc = icmp sge <vscale x 8 x i64> %splat, %va
2741  ret <vscale x 8 x i1> %vc
2742}
2743
2744define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
2745; CHECK-LABEL: icmp_sge_vi_nxv8i64_0:
2746; CHECK:       # %bb.0:
2747; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2748; CHECK-NEXT:    vmv.v.i v16, -16
2749; CHECK-NEXT:    vmsle.vv v0, v16, v8
2750; CHECK-NEXT:    ret
2751  %vc = icmp sge <vscale x 8 x i64> %va, splat (i64 -16)
2752  ret <vscale x 8 x i1> %vc
2753}
2754
2755define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
2756; CHECK-LABEL: icmp_sge_vi_nxv8i64_1:
2757; CHECK:       # %bb.0:
2758; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2759; CHECK-NEXT:    vmsgt.vi v0, v8, -16
2760; CHECK-NEXT:    ret
2761  %vc = icmp sge <vscale x 8 x i64> %va, splat (i64 -15)
2762  ret <vscale x 8 x i1> %vc
2763}
2764
2765define <vscale x 8 x i1> @icmp_sge_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
2766; CHECK-LABEL: icmp_sge_iv_nxv8i64_1:
2767; CHECK:       # %bb.0:
2768; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2769; CHECK-NEXT:    vmsle.vi v0, v8, -15
2770; CHECK-NEXT:    ret
2771  %vc = icmp sge <vscale x 8 x i64> splat (i64 -15), %va
2772  ret <vscale x 8 x i1> %vc
2773}
2774
2775define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
2776; CHECK-LABEL: icmp_sge_vi_nxv8i64_2:
2777; CHECK:       # %bb.0:
2778; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2779; CHECK-NEXT:    vmsgt.vi v0, v8, -1
2780; CHECK-NEXT:    ret
2781  %vc = icmp sge <vscale x 8 x i64> %va, splat (i64 0)
2782  ret <vscale x 8 x i1> %vc
2783}
2784
2785define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
2786; CHECK-LABEL: icmp_sge_vi_nxv8i64_3:
2787; CHECK:       # %bb.0:
2788; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2789; CHECK-NEXT:    vmsgt.vi v0, v8, 15
2790; CHECK-NEXT:    ret
2791  %vc = icmp sge <vscale x 8 x i64> %va, splat (i64 16)
2792  ret <vscale x 8 x i1> %vc
2793}
2794
2795define <vscale x 8 x i1> @icmp_slt_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
2796; CHECK-LABEL: icmp_slt_vv_nxv8i64:
2797; CHECK:       # %bb.0:
2798; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2799; CHECK-NEXT:    vmslt.vv v0, v8, v16
2800; CHECK-NEXT:    ret
2801  %vc = icmp slt <vscale x 8 x i64> %va, %vb
2802  ret <vscale x 8 x i1> %vc
2803}
2804
2805define <vscale x 8 x i1> @icmp_slt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2806; RV32-LABEL: icmp_slt_vx_nxv8i64:
2807; RV32:       # %bb.0:
2808; RV32-NEXT:    addi sp, sp, -16
2809; RV32-NEXT:    .cfi_def_cfa_offset 16
2810; RV32-NEXT:    sw a0, 8(sp)
2811; RV32-NEXT:    sw a1, 12(sp)
2812; RV32-NEXT:    addi a0, sp, 8
2813; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2814; RV32-NEXT:    vlse64.v v16, (a0), zero
2815; RV32-NEXT:    vmslt.vv v0, v8, v16
2816; RV32-NEXT:    addi sp, sp, 16
2817; RV32-NEXT:    .cfi_def_cfa_offset 0
2818; RV32-NEXT:    ret
2819;
2820; RV64-LABEL: icmp_slt_vx_nxv8i64:
2821; RV64:       # %bb.0:
2822; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2823; RV64-NEXT:    vmslt.vx v0, v8, a0
2824; RV64-NEXT:    ret
2825  %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2826  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2827  %vc = icmp slt <vscale x 8 x i64> %va, %splat
2828  ret <vscale x 8 x i1> %vc
2829}
2830
2831define <vscale x 8 x i1> @icmp_slt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2832; RV32-LABEL: icmp_slt_xv_nxv8i64:
2833; RV32:       # %bb.0:
2834; RV32-NEXT:    addi sp, sp, -16
2835; RV32-NEXT:    .cfi_def_cfa_offset 16
2836; RV32-NEXT:    sw a0, 8(sp)
2837; RV32-NEXT:    sw a1, 12(sp)
2838; RV32-NEXT:    addi a0, sp, 8
2839; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2840; RV32-NEXT:    vlse64.v v16, (a0), zero
2841; RV32-NEXT:    vmslt.vv v0, v16, v8
2842; RV32-NEXT:    addi sp, sp, 16
2843; RV32-NEXT:    .cfi_def_cfa_offset 0
2844; RV32-NEXT:    ret
2845;
2846; RV64-LABEL: icmp_slt_xv_nxv8i64:
2847; RV64:       # %bb.0:
2848; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2849; RV64-NEXT:    vmsgt.vx v0, v8, a0
2850; RV64-NEXT:    ret
2851  %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2852  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2853  %vc = icmp slt <vscale x 8 x i64> %splat, %va
2854  ret <vscale x 8 x i1> %vc
2855}
2856
2857define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
2858; CHECK-LABEL: icmp_slt_vi_nxv8i64_0:
2859; CHECK:       # %bb.0:
2860; CHECK-NEXT:    li a0, -16
2861; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2862; CHECK-NEXT:    vmslt.vx v0, v8, a0
2863; CHECK-NEXT:    ret
2864  %vc = icmp slt <vscale x 8 x i64> %va, splat (i64 -16)
2865  ret <vscale x 8 x i1> %vc
2866}
2867
2868define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
2869; CHECK-LABEL: icmp_slt_vi_nxv8i64_1:
2870; CHECK:       # %bb.0:
2871; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2872; CHECK-NEXT:    vmsle.vi v0, v8, -16
2873; CHECK-NEXT:    ret
2874  %vc = icmp slt <vscale x 8 x i64> %va, splat (i64 -15)
2875  ret <vscale x 8 x i1> %vc
2876}
2877
2878define <vscale x 8 x i1> @icmp_slt_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
2879; CHECK-LABEL: icmp_slt_iv_nxv8i64_1:
2880; CHECK:       # %bb.0:
2881; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2882; CHECK-NEXT:    vmsgt.vi v0, v8, -15
2883; CHECK-NEXT:    ret
2884  %vc = icmp slt <vscale x 8 x i64> splat (i64 -15), %va
2885  ret <vscale x 8 x i1> %vc
2886}
2887
2888define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
2889; CHECK-LABEL: icmp_slt_vi_nxv8i64_2:
2890; CHECK:       # %bb.0:
2891; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2892; CHECK-NEXT:    vmsle.vi v0, v8, -1
2893; CHECK-NEXT:    ret
2894  %vc = icmp slt <vscale x 8 x i64> %va, splat (i64 0)
2895  ret <vscale x 8 x i1> %vc
2896}
2897
2898define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
2899; CHECK-LABEL: icmp_slt_vi_nxv8i64_3:
2900; CHECK:       # %bb.0:
2901; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2902; CHECK-NEXT:    vmsle.vi v0, v8, 15
2903; CHECK-NEXT:    ret
2904  %vc = icmp slt <vscale x 8 x i64> %va, splat (i64 16)
2905  ret <vscale x 8 x i1> %vc
2906}
2907
2908define <vscale x 8 x i1> @icmp_sle_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
2909; CHECK-LABEL: icmp_sle_vv_nxv8i64:
2910; CHECK:       # %bb.0:
2911; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2912; CHECK-NEXT:    vmsle.vv v0, v8, v16
2913; CHECK-NEXT:    ret
2914  %vc = icmp sle <vscale x 8 x i64> %va, %vb
2915  ret <vscale x 8 x i1> %vc
2916}
2917
2918define <vscale x 8 x i1> @icmp_sle_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2919; RV32-LABEL: icmp_sle_vx_nxv8i64:
2920; RV32:       # %bb.0:
2921; RV32-NEXT:    addi sp, sp, -16
2922; RV32-NEXT:    .cfi_def_cfa_offset 16
2923; RV32-NEXT:    sw a0, 8(sp)
2924; RV32-NEXT:    sw a1, 12(sp)
2925; RV32-NEXT:    addi a0, sp, 8
2926; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2927; RV32-NEXT:    vlse64.v v16, (a0), zero
2928; RV32-NEXT:    vmsle.vv v0, v8, v16
2929; RV32-NEXT:    addi sp, sp, 16
2930; RV32-NEXT:    .cfi_def_cfa_offset 0
2931; RV32-NEXT:    ret
2932;
2933; RV64-LABEL: icmp_sle_vx_nxv8i64:
2934; RV64:       # %bb.0:
2935; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2936; RV64-NEXT:    vmsle.vx v0, v8, a0
2937; RV64-NEXT:    ret
2938  %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2939  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2940  %vc = icmp sle <vscale x 8 x i64> %va, %splat
2941  ret <vscale x 8 x i1> %vc
2942}
2943
2944define <vscale x 8 x i1> @icmp_sle_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
2945; RV32-LABEL: icmp_sle_xv_nxv8i64:
2946; RV32:       # %bb.0:
2947; RV32-NEXT:    addi sp, sp, -16
2948; RV32-NEXT:    .cfi_def_cfa_offset 16
2949; RV32-NEXT:    sw a0, 8(sp)
2950; RV32-NEXT:    sw a1, 12(sp)
2951; RV32-NEXT:    addi a0, sp, 8
2952; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2953; RV32-NEXT:    vlse64.v v16, (a0), zero
2954; RV32-NEXT:    vmsle.vv v0, v16, v8
2955; RV32-NEXT:    addi sp, sp, 16
2956; RV32-NEXT:    .cfi_def_cfa_offset 0
2957; RV32-NEXT:    ret
2958;
2959; RV64-LABEL: icmp_sle_xv_nxv8i64:
2960; RV64:       # %bb.0:
2961; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
2962; RV64-NEXT:    vmv.v.x v16, a0
2963; RV64-NEXT:    vmsle.vv v0, v16, v8
2964; RV64-NEXT:    ret
2965  %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
2966  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
2967  %vc = icmp sle <vscale x 8 x i64> %splat, %va
2968  ret <vscale x 8 x i1> %vc
2969}
2970
2971define <vscale x 8 x i1> @icmp_sle_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
2972; CHECK-LABEL: icmp_sle_vi_nxv8i64_0:
2973; CHECK:       # %bb.0:
2974; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
2975; CHECK-NEXT:    vmsle.vi v0, v8, 5
2976; CHECK-NEXT:    ret
2977  %vc = icmp sle <vscale x 8 x i64> %va, splat (i64 5)
2978  ret <vscale x 8 x i1> %vc
2979}
2980
2981; Check a setcc with two constant splats, which would previously get stuck in
2982; an infinite loop. DAGCombine isn't clever enough to constant-fold
2983; splat_vectors but could continuously swap the operands, trying to put the
2984; splat on the RHS.
2985define <vscale x 8 x i1> @icmp_eq_ii_nxv8i8() {
2986; CHECK-LABEL: icmp_eq_ii_nxv8i8:
2987; CHECK:       # %bb.0:
2988; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
2989; CHECK-NEXT:    vmclr.m v0
2990; CHECK-NEXT:    ret
2991  %vc = icmp eq <vscale x 8 x i8> splat (i8 5), splat (i8 2)
2992  ret <vscale x 8 x i1> %vc
2993}
2994
2995; This icmp/setcc is split and so we find a scalable-vector mask CONCAT_VECTOR
2996; node. Ensure we correctly (custom) lower this.
2997define <vscale x 16 x i1> @icmp_eq_vi_nx16i64(<vscale x 16 x i64> %va) {
2998; CHECK-LABEL: icmp_eq_vi_nx16i64:
2999; CHECK:       # %bb.0:
3000; CHECK-NEXT:    csrr a0, vlenb
3001; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
3002; CHECK-NEXT:    vmseq.vi v24, v16, 0
3003; CHECK-NEXT:    srli a0, a0, 3
3004; CHECK-NEXT:    add a1, a0, a0
3005; CHECK-NEXT:    vmseq.vi v0, v8, 0
3006; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
3007; CHECK-NEXT:    vslideup.vx v0, v24, a0
3008; CHECK-NEXT:    ret
3009  %vc = icmp eq <vscale x 16 x i64> %va, zeroinitializer
3010  ret <vscale x 16 x i1> %vc
3011}
3012