xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vmsge.ll (revision b6c0f1bfa79a3a32d841ac5ab1f94c3aee3b5d90)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
6
7declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
8  <vscale x 1 x i8>,
9  <vscale x 1 x i8>,
10  iXLen);
11
12define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
13; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i8_nxv1i8:
14; CHECK:       # %bb.0: # %entry
15; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
16; CHECK-NEXT:    vmsle.vv v0, v9, v8
17; CHECK-NEXT:    ret
18entry:
19  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
20    <vscale x 1 x i8> %0,
21    <vscale x 1 x i8> %1,
22    iXLen %2)
23
24  ret <vscale x 1 x i1> %a
25}
26
27declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8(
28  <vscale x 1 x i1>,
29  <vscale x 1 x i8>,
30  <vscale x 1 x i8>,
31  <vscale x 1 x i1>,
32  iXLen);
33
34define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
35; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8:
36; CHECK:       # %bb.0: # %entry
37; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
38; CHECK-NEXT:    vmv1r.v v11, v0
39; CHECK-NEXT:    vmsle.vv v0, v9, v8
40; CHECK-NEXT:    vmsle.vv v11, v10, v9, v0.t
41; CHECK-NEXT:    vmv1r.v v0, v11
42; CHECK-NEXT:    ret
43entry:
44  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
45    <vscale x 1 x i8> %1,
46    <vscale x 1 x i8> %2,
47    iXLen %4)
48  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8(
49    <vscale x 1 x i1> %0,
50    <vscale x 1 x i8> %2,
51    <vscale x 1 x i8> %3,
52    <vscale x 1 x i1> %mask,
53    iXLen %4)
54
55  ret <vscale x 1 x i1> %a
56}
57
58declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
59  <vscale x 2 x i8>,
60  <vscale x 2 x i8>,
61  iXLen);
62
63define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
64; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i8_nxv2i8:
65; CHECK:       # %bb.0: # %entry
66; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
67; CHECK-NEXT:    vmsle.vv v0, v9, v8
68; CHECK-NEXT:    ret
69entry:
70  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
71    <vscale x 2 x i8> %0,
72    <vscale x 2 x i8> %1,
73    iXLen %2)
74
75  ret <vscale x 2 x i1> %a
76}
77
78declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8(
79  <vscale x 2 x i1>,
80  <vscale x 2 x i8>,
81  <vscale x 2 x i8>,
82  <vscale x 2 x i1>,
83  iXLen);
84
85define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
86; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8:
87; CHECK:       # %bb.0: # %entry
88; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
89; CHECK-NEXT:    vmv1r.v v11, v0
90; CHECK-NEXT:    vmsle.vv v0, v9, v8
91; CHECK-NEXT:    vmsle.vv v11, v10, v9, v0.t
92; CHECK-NEXT:    vmv1r.v v0, v11
93; CHECK-NEXT:    ret
94entry:
95  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
96    <vscale x 2 x i8> %1,
97    <vscale x 2 x i8> %2,
98    iXLen %4)
99  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8(
100    <vscale x 2 x i1> %0,
101    <vscale x 2 x i8> %2,
102    <vscale x 2 x i8> %3,
103    <vscale x 2 x i1> %mask,
104    iXLen %4)
105
106  ret <vscale x 2 x i1> %a
107}
108
109declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
110  <vscale x 4 x i8>,
111  <vscale x 4 x i8>,
112  iXLen);
113
114define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
115; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i8_nxv4i8:
116; CHECK:       # %bb.0: # %entry
117; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
118; CHECK-NEXT:    vmsle.vv v0, v9, v8
119; CHECK-NEXT:    ret
120entry:
121  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
122    <vscale x 4 x i8> %0,
123    <vscale x 4 x i8> %1,
124    iXLen %2)
125
126  ret <vscale x 4 x i1> %a
127}
128
129declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8(
130  <vscale x 4 x i1>,
131  <vscale x 4 x i8>,
132  <vscale x 4 x i8>,
133  <vscale x 4 x i1>,
134  iXLen);
135
136define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
137; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8:
138; CHECK:       # %bb.0: # %entry
139; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
140; CHECK-NEXT:    vmv1r.v v11, v0
141; CHECK-NEXT:    vmsle.vv v0, v9, v8
142; CHECK-NEXT:    vmsle.vv v11, v10, v9, v0.t
143; CHECK-NEXT:    vmv1r.v v0, v11
144; CHECK-NEXT:    ret
145entry:
146  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
147    <vscale x 4 x i8> %1,
148    <vscale x 4 x i8> %2,
149    iXLen %4)
150  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8(
151    <vscale x 4 x i1> %0,
152    <vscale x 4 x i8> %2,
153    <vscale x 4 x i8> %3,
154    <vscale x 4 x i1> %mask,
155    iXLen %4)
156
157  ret <vscale x 4 x i1> %a
158}
159
160declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
161  <vscale x 8 x i8>,
162  <vscale x 8 x i8>,
163  iXLen);
164
165define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
166; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i8_nxv8i8:
167; CHECK:       # %bb.0: # %entry
168; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
169; CHECK-NEXT:    vmsle.vv v0, v9, v8
170; CHECK-NEXT:    ret
171entry:
172  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
173    <vscale x 8 x i8> %0,
174    <vscale x 8 x i8> %1,
175    iXLen %2)
176
177  ret <vscale x 8 x i1> %a
178}
179
180declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8(
181  <vscale x 8 x i1>,
182  <vscale x 8 x i8>,
183  <vscale x 8 x i8>,
184  <vscale x 8 x i1>,
185  iXLen);
186
187define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
188; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8:
189; CHECK:       # %bb.0: # %entry
190; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
191; CHECK-NEXT:    vmv1r.v v11, v0
192; CHECK-NEXT:    vmsle.vv v0, v9, v8
193; CHECK-NEXT:    vmsle.vv v11, v10, v9, v0.t
194; CHECK-NEXT:    vmv.v.v v0, v11
195; CHECK-NEXT:    ret
196entry:
197  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
198    <vscale x 8 x i8> %1,
199    <vscale x 8 x i8> %2,
200    iXLen %4)
201  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8(
202    <vscale x 8 x i1> %0,
203    <vscale x 8 x i8> %2,
204    <vscale x 8 x i8> %3,
205    <vscale x 8 x i1> %mask,
206    iXLen %4)
207
208  ret <vscale x 8 x i1> %a
209}
210
211declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
212  <vscale x 16 x i8>,
213  <vscale x 16 x i8>,
214  iXLen);
215
216define <vscale x 16 x i1> @intrinsic_vmsge_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
217; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i8_nxv16i8:
218; CHECK:       # %bb.0: # %entry
219; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
220; CHECK-NEXT:    vmsle.vv v0, v10, v8
221; CHECK-NEXT:    ret
222entry:
223  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
224    <vscale x 16 x i8> %0,
225    <vscale x 16 x i8> %1,
226    iXLen %2)
227
228  ret <vscale x 16 x i1> %a
229}
230
231declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8(
232  <vscale x 16 x i1>,
233  <vscale x 16 x i8>,
234  <vscale x 16 x i8>,
235  <vscale x 16 x i1>,
236  iXLen);
237
238define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
239; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8:
240; CHECK:       # %bb.0: # %entry
241; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
242; CHECK-NEXT:    vmv1r.v v14, v0
243; CHECK-NEXT:    vmsle.vv v0, v10, v8
244; CHECK-NEXT:    vmsle.vv v14, v12, v10, v0.t
245; CHECK-NEXT:    vmv1r.v v0, v14
246; CHECK-NEXT:    ret
247entry:
248  %mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
249    <vscale x 16 x i8> %1,
250    <vscale x 16 x i8> %2,
251    iXLen %4)
252  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8(
253    <vscale x 16 x i1> %0,
254    <vscale x 16 x i8> %2,
255    <vscale x 16 x i8> %3,
256    <vscale x 16 x i1> %mask,
257    iXLen %4)
258
259  ret <vscale x 16 x i1> %a
260}
261
262declare <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
263  <vscale x 32 x i8>,
264  <vscale x 32 x i8>,
265  iXLen);
266
267define <vscale x 32 x i1> @intrinsic_vmsge_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
268; CHECK-LABEL: intrinsic_vmsge_vv_nxv32i8_nxv32i8:
269; CHECK:       # %bb.0: # %entry
270; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
271; CHECK-NEXT:    vmsle.vv v0, v12, v8
272; CHECK-NEXT:    ret
273entry:
274  %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
275    <vscale x 32 x i8> %0,
276    <vscale x 32 x i8> %1,
277    iXLen %2)
278
279  ret <vscale x 32 x i1> %a
280}
281
282declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8(
283  <vscale x 32 x i1>,
284  <vscale x 32 x i8>,
285  <vscale x 32 x i8>,
286  <vscale x 32 x i1>,
287  iXLen);
288
289define <vscale x 32 x i1> @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
290; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8:
291; CHECK:       # %bb.0: # %entry
292; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
293; CHECK-NEXT:    vmv1r.v v20, v0
294; CHECK-NEXT:    vmsle.vv v0, v12, v8
295; CHECK-NEXT:    vmsle.vv v20, v16, v12, v0.t
296; CHECK-NEXT:    vmv1r.v v0, v20
297; CHECK-NEXT:    ret
298entry:
299  %mask = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
300    <vscale x 32 x i8> %1,
301    <vscale x 32 x i8> %2,
302    iXLen %4)
303  %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8(
304    <vscale x 32 x i1> %0,
305    <vscale x 32 x i8> %2,
306    <vscale x 32 x i8> %3,
307    <vscale x 32 x i1> %mask,
308    iXLen %4)
309
310  ret <vscale x 32 x i1> %a
311}
312
313declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
314  <vscale x 1 x i16>,
315  <vscale x 1 x i16>,
316  iXLen);
317
318define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
319; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i16_nxv1i16:
320; CHECK:       # %bb.0: # %entry
321; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
322; CHECK-NEXT:    vmsle.vv v0, v9, v8
323; CHECK-NEXT:    ret
324entry:
325  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
326    <vscale x 1 x i16> %0,
327    <vscale x 1 x i16> %1,
328    iXLen %2)
329
330  ret <vscale x 1 x i1> %a
331}
332
333declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16(
334  <vscale x 1 x i1>,
335  <vscale x 1 x i16>,
336  <vscale x 1 x i16>,
337  <vscale x 1 x i1>,
338  iXLen);
339
340define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
341; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16:
342; CHECK:       # %bb.0: # %entry
343; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
344; CHECK-NEXT:    vmv1r.v v11, v0
345; CHECK-NEXT:    vmsle.vv v0, v9, v8
346; CHECK-NEXT:    vmsle.vv v11, v10, v9, v0.t
347; CHECK-NEXT:    vmv1r.v v0, v11
348; CHECK-NEXT:    ret
349entry:
350  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
351    <vscale x 1 x i16> %1,
352    <vscale x 1 x i16> %2,
353    iXLen %4)
354  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16(
355    <vscale x 1 x i1> %0,
356    <vscale x 1 x i16> %2,
357    <vscale x 1 x i16> %3,
358    <vscale x 1 x i1> %mask,
359    iXLen %4)
360
361  ret <vscale x 1 x i1> %a
362}
363
364declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
365  <vscale x 2 x i16>,
366  <vscale x 2 x i16>,
367  iXLen);
368
369define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
370; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i16_nxv2i16:
371; CHECK:       # %bb.0: # %entry
372; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
373; CHECK-NEXT:    vmsle.vv v0, v9, v8
374; CHECK-NEXT:    ret
375entry:
376  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
377    <vscale x 2 x i16> %0,
378    <vscale x 2 x i16> %1,
379    iXLen %2)
380
381  ret <vscale x 2 x i1> %a
382}
383
384declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16(
385  <vscale x 2 x i1>,
386  <vscale x 2 x i16>,
387  <vscale x 2 x i16>,
388  <vscale x 2 x i1>,
389  iXLen);
390
391define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
392; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16:
393; CHECK:       # %bb.0: # %entry
394; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
395; CHECK-NEXT:    vmv1r.v v11, v0
396; CHECK-NEXT:    vmsle.vv v0, v9, v8
397; CHECK-NEXT:    vmsle.vv v11, v10, v9, v0.t
398; CHECK-NEXT:    vmv1r.v v0, v11
399; CHECK-NEXT:    ret
400entry:
401  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
402    <vscale x 2 x i16> %1,
403    <vscale x 2 x i16> %2,
404    iXLen %4)
405  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16(
406    <vscale x 2 x i1> %0,
407    <vscale x 2 x i16> %2,
408    <vscale x 2 x i16> %3,
409    <vscale x 2 x i1> %mask,
410    iXLen %4)
411
412  ret <vscale x 2 x i1> %a
413}
414
415declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
416  <vscale x 4 x i16>,
417  <vscale x 4 x i16>,
418  iXLen);
419
420define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
421; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i16_nxv4i16:
422; CHECK:       # %bb.0: # %entry
423; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
424; CHECK-NEXT:    vmsle.vv v0, v9, v8
425; CHECK-NEXT:    ret
426entry:
427  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
428    <vscale x 4 x i16> %0,
429    <vscale x 4 x i16> %1,
430    iXLen %2)
431
432  ret <vscale x 4 x i1> %a
433}
434
435declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16(
436  <vscale x 4 x i1>,
437  <vscale x 4 x i16>,
438  <vscale x 4 x i16>,
439  <vscale x 4 x i1>,
440  iXLen);
441
442define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
443; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16:
444; CHECK:       # %bb.0: # %entry
445; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
446; CHECK-NEXT:    vmv1r.v v11, v0
447; CHECK-NEXT:    vmsle.vv v0, v9, v8
448; CHECK-NEXT:    vmsle.vv v11, v10, v9, v0.t
449; CHECK-NEXT:    vmv.v.v v0, v11
450; CHECK-NEXT:    ret
451entry:
452  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
453    <vscale x 4 x i16> %1,
454    <vscale x 4 x i16> %2,
455    iXLen %4)
456  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16(
457    <vscale x 4 x i1> %0,
458    <vscale x 4 x i16> %2,
459    <vscale x 4 x i16> %3,
460    <vscale x 4 x i1> %mask,
461    iXLen %4)
462
463  ret <vscale x 4 x i1> %a
464}
465
466declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
467  <vscale x 8 x i16>,
468  <vscale x 8 x i16>,
469  iXLen);
470
471define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
472; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i16_nxv8i16:
473; CHECK:       # %bb.0: # %entry
474; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
475; CHECK-NEXT:    vmsle.vv v0, v10, v8
476; CHECK-NEXT:    ret
477entry:
478  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
479    <vscale x 8 x i16> %0,
480    <vscale x 8 x i16> %1,
481    iXLen %2)
482
483  ret <vscale x 8 x i1> %a
484}
485
486declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16(
487  <vscale x 8 x i1>,
488  <vscale x 8 x i16>,
489  <vscale x 8 x i16>,
490  <vscale x 8 x i1>,
491  iXLen);
492
493define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
494; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16:
495; CHECK:       # %bb.0: # %entry
496; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
497; CHECK-NEXT:    vmv1r.v v14, v0
498; CHECK-NEXT:    vmsle.vv v0, v10, v8
499; CHECK-NEXT:    vmsle.vv v14, v12, v10, v0.t
500; CHECK-NEXT:    vmv1r.v v0, v14
501; CHECK-NEXT:    ret
502entry:
503  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
504    <vscale x 8 x i16> %1,
505    <vscale x 8 x i16> %2,
506    iXLen %4)
507  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16(
508    <vscale x 8 x i1> %0,
509    <vscale x 8 x i16> %2,
510    <vscale x 8 x i16> %3,
511    <vscale x 8 x i1> %mask,
512    iXLen %4)
513
514  ret <vscale x 8 x i1> %a
515}
516
517declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
518  <vscale x 16 x i16>,
519  <vscale x 16 x i16>,
520  iXLen);
521
522define <vscale x 16 x i1> @intrinsic_vmsge_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
523; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i16_nxv16i16:
524; CHECK:       # %bb.0: # %entry
525; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
526; CHECK-NEXT:    vmsle.vv v0, v12, v8
527; CHECK-NEXT:    ret
528entry:
529  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
530    <vscale x 16 x i16> %0,
531    <vscale x 16 x i16> %1,
532    iXLen %2)
533
534  ret <vscale x 16 x i1> %a
535}
536
537declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16(
538  <vscale x 16 x i1>,
539  <vscale x 16 x i16>,
540  <vscale x 16 x i16>,
541  <vscale x 16 x i1>,
542  iXLen);
543
544define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
545; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16:
546; CHECK:       # %bb.0: # %entry
547; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
548; CHECK-NEXT:    vmv1r.v v20, v0
549; CHECK-NEXT:    vmsle.vv v0, v12, v8
550; CHECK-NEXT:    vmsle.vv v20, v16, v12, v0.t
551; CHECK-NEXT:    vmv1r.v v0, v20
552; CHECK-NEXT:    ret
553entry:
554  %mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
555    <vscale x 16 x i16> %1,
556    <vscale x 16 x i16> %2,
557    iXLen %4)
558  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16(
559    <vscale x 16 x i1> %0,
560    <vscale x 16 x i16> %2,
561    <vscale x 16 x i16> %3,
562    <vscale x 16 x i1> %mask,
563    iXLen %4)
564
565  ret <vscale x 16 x i1> %a
566}
567
568declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
569  <vscale x 1 x i32>,
570  <vscale x 1 x i32>,
571  iXLen);
572
573define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
574; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i32_nxv1i32:
575; CHECK:       # %bb.0: # %entry
576; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
577; CHECK-NEXT:    vmsle.vv v0, v9, v8
578; CHECK-NEXT:    ret
579entry:
580  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
581    <vscale x 1 x i32> %0,
582    <vscale x 1 x i32> %1,
583    iXLen %2)
584
585  ret <vscale x 1 x i1> %a
586}
587
588declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32(
589  <vscale x 1 x i1>,
590  <vscale x 1 x i32>,
591  <vscale x 1 x i32>,
592  <vscale x 1 x i1>,
593  iXLen);
594
595define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
596; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32:
597; CHECK:       # %bb.0: # %entry
598; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
599; CHECK-NEXT:    vmv1r.v v11, v0
600; CHECK-NEXT:    vmsle.vv v0, v9, v8
601; CHECK-NEXT:    vmsle.vv v11, v10, v9, v0.t
602; CHECK-NEXT:    vmv1r.v v0, v11
603; CHECK-NEXT:    ret
604entry:
605  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
606    <vscale x 1 x i32> %1,
607    <vscale x 1 x i32> %2,
608    iXLen %4)
609  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32(
610    <vscale x 1 x i1> %0,
611    <vscale x 1 x i32> %2,
612    <vscale x 1 x i32> %3,
613    <vscale x 1 x i1> %mask,
614    iXLen %4)
615
616  ret <vscale x 1 x i1> %a
617}
618
619declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
620  <vscale x 2 x i32>,
621  <vscale x 2 x i32>,
622  iXLen);
623
624define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
625; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i32_nxv2i32:
626; CHECK:       # %bb.0: # %entry
627; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
628; CHECK-NEXT:    vmsle.vv v0, v9, v8
629; CHECK-NEXT:    ret
630entry:
631  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
632    <vscale x 2 x i32> %0,
633    <vscale x 2 x i32> %1,
634    iXLen %2)
635
636  ret <vscale x 2 x i1> %a
637}
638
639declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32(
640  <vscale x 2 x i1>,
641  <vscale x 2 x i32>,
642  <vscale x 2 x i32>,
643  <vscale x 2 x i1>,
644  iXLen);
645
646define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
647; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32:
648; CHECK:       # %bb.0: # %entry
649; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
650; CHECK-NEXT:    vmv1r.v v11, v0
651; CHECK-NEXT:    vmsle.vv v0, v9, v8
652; CHECK-NEXT:    vmsle.vv v11, v10, v9, v0.t
653; CHECK-NEXT:    vmv.v.v v0, v11
654; CHECK-NEXT:    ret
655entry:
656  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
657    <vscale x 2 x i32> %1,
658    <vscale x 2 x i32> %2,
659    iXLen %4)
660  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32(
661    <vscale x 2 x i1> %0,
662    <vscale x 2 x i32> %2,
663    <vscale x 2 x i32> %3,
664    <vscale x 2 x i1> %mask,
665    iXLen %4)
666
667  ret <vscale x 2 x i1> %a
668}
669
670declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
671  <vscale x 4 x i32>,
672  <vscale x 4 x i32>,
673  iXLen);
674
675define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
676; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i32_nxv4i32:
677; CHECK:       # %bb.0: # %entry
678; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
679; CHECK-NEXT:    vmsle.vv v0, v10, v8
680; CHECK-NEXT:    ret
681entry:
682  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
683    <vscale x 4 x i32> %0,
684    <vscale x 4 x i32> %1,
685    iXLen %2)
686
687  ret <vscale x 4 x i1> %a
688}
689
690declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32(
691  <vscale x 4 x i1>,
692  <vscale x 4 x i32>,
693  <vscale x 4 x i32>,
694  <vscale x 4 x i1>,
695  iXLen);
696
697define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
698; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32:
699; CHECK:       # %bb.0: # %entry
700; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
701; CHECK-NEXT:    vmv1r.v v14, v0
702; CHECK-NEXT:    vmsle.vv v0, v10, v8
703; CHECK-NEXT:    vmsle.vv v14, v12, v10, v0.t
704; CHECK-NEXT:    vmv1r.v v0, v14
705; CHECK-NEXT:    ret
706entry:
707  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
708    <vscale x 4 x i32> %1,
709    <vscale x 4 x i32> %2,
710    iXLen %4)
711  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32(
712    <vscale x 4 x i1> %0,
713    <vscale x 4 x i32> %2,
714    <vscale x 4 x i32> %3,
715    <vscale x 4 x i1> %mask,
716    iXLen %4)
717
718  ret <vscale x 4 x i1> %a
719}
720
721declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
722  <vscale x 8 x i32>,
723  <vscale x 8 x i32>,
724  iXLen);
725
726define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
727; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i32_nxv8i32:
728; CHECK:       # %bb.0: # %entry
729; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
730; CHECK-NEXT:    vmsle.vv v0, v12, v8
731; CHECK-NEXT:    ret
732entry:
733  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
734    <vscale x 8 x i32> %0,
735    <vscale x 8 x i32> %1,
736    iXLen %2)
737
738  ret <vscale x 8 x i1> %a
739}
740
741declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32(
742  <vscale x 8 x i1>,
743  <vscale x 8 x i32>,
744  <vscale x 8 x i32>,
745  <vscale x 8 x i1>,
746  iXLen);
747
748define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
749; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32:
750; CHECK:       # %bb.0: # %entry
751; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
752; CHECK-NEXT:    vmv1r.v v20, v0
753; CHECK-NEXT:    vmsle.vv v0, v12, v8
754; CHECK-NEXT:    vmsle.vv v20, v16, v12, v0.t
755; CHECK-NEXT:    vmv1r.v v0, v20
756; CHECK-NEXT:    ret
757entry:
758  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
759    <vscale x 8 x i32> %1,
760    <vscale x 8 x i32> %2,
761    iXLen %4)
762  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32(
763    <vscale x 8 x i1> %0,
764    <vscale x 8 x i32> %2,
765    <vscale x 8 x i32> %3,
766    <vscale x 8 x i1> %mask,
767    iXLen %4)
768
769  ret <vscale x 8 x i1> %a
770}
771
772declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
773  <vscale x 1 x i64>,
774  <vscale x 1 x i64>,
775  iXLen);
776
777define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
778; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i64_nxv1i64:
779; CHECK:       # %bb.0: # %entry
780; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
781; CHECK-NEXT:    vmsle.vv v0, v9, v8
782; CHECK-NEXT:    ret
783entry:
784  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
785    <vscale x 1 x i64> %0,
786    <vscale x 1 x i64> %1,
787    iXLen %2)
788
789  ret <vscale x 1 x i1> %a
790}
791
792declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64(
793  <vscale x 1 x i1>,
794  <vscale x 1 x i64>,
795  <vscale x 1 x i64>,
796  <vscale x 1 x i1>,
797  iXLen);
798
799define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
800; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64:
801; CHECK:       # %bb.0: # %entry
802; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
803; CHECK-NEXT:    vmv1r.v v11, v0
804; CHECK-NEXT:    vmsle.vv v0, v9, v8
805; CHECK-NEXT:    vmsle.vv v11, v10, v9, v0.t
806; CHECK-NEXT:    vmv.v.v v0, v11
807; CHECK-NEXT:    ret
808entry:
809  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
810    <vscale x 1 x i64> %1,
811    <vscale x 1 x i64> %2,
812    iXLen %4)
813  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64(
814    <vscale x 1 x i1> %0,
815    <vscale x 1 x i64> %2,
816    <vscale x 1 x i64> %3,
817    <vscale x 1 x i1> %mask,
818    iXLen %4)
819
820  ret <vscale x 1 x i1> %a
821}
822
823declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
824  <vscale x 2 x i64>,
825  <vscale x 2 x i64>,
826  iXLen);
827
828define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
829; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i64_nxv2i64:
830; CHECK:       # %bb.0: # %entry
831; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
832; CHECK-NEXT:    vmsle.vv v0, v10, v8
833; CHECK-NEXT:    ret
834entry:
835  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
836    <vscale x 2 x i64> %0,
837    <vscale x 2 x i64> %1,
838    iXLen %2)
839
840  ret <vscale x 2 x i1> %a
841}
842
843declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64(
844  <vscale x 2 x i1>,
845  <vscale x 2 x i64>,
846  <vscale x 2 x i64>,
847  <vscale x 2 x i1>,
848  iXLen);
849
850define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
851; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64:
852; CHECK:       # %bb.0: # %entry
853; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
854; CHECK-NEXT:    vmv1r.v v14, v0
855; CHECK-NEXT:    vmsle.vv v0, v10, v8
856; CHECK-NEXT:    vmsle.vv v14, v12, v10, v0.t
857; CHECK-NEXT:    vmv1r.v v0, v14
858; CHECK-NEXT:    ret
859entry:
860  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
861    <vscale x 2 x i64> %1,
862    <vscale x 2 x i64> %2,
863    iXLen %4)
864  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64(
865    <vscale x 2 x i1> %0,
866    <vscale x 2 x i64> %2,
867    <vscale x 2 x i64> %3,
868    <vscale x 2 x i1> %mask,
869    iXLen %4)
870
871  ret <vscale x 2 x i1> %a
872}
873
874declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
875  <vscale x 4 x i64>,
876  <vscale x 4 x i64>,
877  iXLen);
878
879define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
880; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i64_nxv4i64:
881; CHECK:       # %bb.0: # %entry
882; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
883; CHECK-NEXT:    vmsle.vv v0, v12, v8
884; CHECK-NEXT:    ret
885entry:
886  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
887    <vscale x 4 x i64> %0,
888    <vscale x 4 x i64> %1,
889    iXLen %2)
890
891  ret <vscale x 4 x i1> %a
892}
893
894declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64(
895  <vscale x 4 x i1>,
896  <vscale x 4 x i64>,
897  <vscale x 4 x i64>,
898  <vscale x 4 x i1>,
899  iXLen);
900
901define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
902; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64:
903; CHECK:       # %bb.0: # %entry
904; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
905; CHECK-NEXT:    vmv1r.v v20, v0
906; CHECK-NEXT:    vmsle.vv v0, v12, v8
907; CHECK-NEXT:    vmsle.vv v20, v16, v12, v0.t
908; CHECK-NEXT:    vmv1r.v v0, v20
909; CHECK-NEXT:    ret
910entry:
911  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
912    <vscale x 4 x i64> %1,
913    <vscale x 4 x i64> %2,
914    iXLen %4)
915  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64(
916    <vscale x 4 x i1> %0,
917    <vscale x 4 x i64> %2,
918    <vscale x 4 x i64> %3,
919    <vscale x 4 x i1> %mask,
920    iXLen %4)
921
922  ret <vscale x 4 x i1> %a
923}
924
925declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
926  <vscale x 1 x i8>,
927  i8,
928  iXLen);
929
930define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
931; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i8_i8:
932; CHECK:       # %bb.0: # %entry
933; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
934; CHECK-NEXT:    vmslt.vx v8, v8, a0
935; CHECK-NEXT:    vmnot.m v0, v8
936; CHECK-NEXT:    ret
937entry:
938  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
939    <vscale x 1 x i8> %0,
940    i8 %1,
941    iXLen %2)
942
943  ret <vscale x 1 x i1> %a
944}
945
946declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
947  <vscale x 1 x i1>,
948  <vscale x 1 x i8>,
949  i8,
950  <vscale x 1 x i1>,
951  iXLen);
952
953define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
954; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8:
955; CHECK:       # %bb.0: # %entry
956; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
957; CHECK-NEXT:    vmv1r.v v10, v0
958; CHECK-NEXT:    vmv1r.v v0, v9
959; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
960; CHECK-NEXT:    vmxor.mm v0, v10, v9
961; CHECK-NEXT:    ret
962entry:
963  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
964    <vscale x 1 x i1> %0,
965    <vscale x 1 x i8> %1,
966    i8 %2,
967    <vscale x 1 x i1> %3,
968    iXLen %4)
969
970  ret <vscale x 1 x i1> %a
971}
972
973declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
974  <vscale x 2 x i8>,
975  i8,
976  iXLen);
977
978define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
979; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i8_i8:
980; CHECK:       # %bb.0: # %entry
981; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
982; CHECK-NEXT:    vmslt.vx v8, v8, a0
983; CHECK-NEXT:    vmnot.m v0, v8
984; CHECK-NEXT:    ret
985entry:
986  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
987    <vscale x 2 x i8> %0,
988    i8 %1,
989    iXLen %2)
990
991  ret <vscale x 2 x i1> %a
992}
993
994declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
995  <vscale x 2 x i1>,
996  <vscale x 2 x i8>,
997  i8,
998  <vscale x 2 x i1>,
999  iXLen);
1000
1001define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1002; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8:
1003; CHECK:       # %bb.0: # %entry
1004; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
1005; CHECK-NEXT:    vmv1r.v v10, v0
1006; CHECK-NEXT:    vmv1r.v v0, v9
1007; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
1008; CHECK-NEXT:    vmxor.mm v0, v10, v9
1009; CHECK-NEXT:    ret
1010entry:
1011  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
1012    <vscale x 2 x i1> %0,
1013    <vscale x 2 x i8> %1,
1014    i8 %2,
1015    <vscale x 2 x i1> %3,
1016    iXLen %4)
1017
1018  ret <vscale x 2 x i1> %a
1019}
1020
1021declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
1022  <vscale x 4 x i8>,
1023  i8,
1024  iXLen);
1025
1026define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
1027; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i8_i8:
1028; CHECK:       # %bb.0: # %entry
1029; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
1030; CHECK-NEXT:    vmslt.vx v8, v8, a0
1031; CHECK-NEXT:    vmnot.m v0, v8
1032; CHECK-NEXT:    ret
1033entry:
1034  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
1035    <vscale x 4 x i8> %0,
1036    i8 %1,
1037    iXLen %2)
1038
1039  ret <vscale x 4 x i1> %a
1040}
1041
1042declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
1043  <vscale x 4 x i1>,
1044  <vscale x 4 x i8>,
1045  i8,
1046  <vscale x 4 x i1>,
1047  iXLen);
1048
1049define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1050; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8:
1051; CHECK:       # %bb.0: # %entry
1052; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
1053; CHECK-NEXT:    vmv1r.v v10, v0
1054; CHECK-NEXT:    vmv1r.v v0, v9
1055; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
1056; CHECK-NEXT:    vmxor.mm v0, v10, v9
1057; CHECK-NEXT:    ret
1058entry:
1059  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
1060    <vscale x 4 x i1> %0,
1061    <vscale x 4 x i8> %1,
1062    i8 %2,
1063    <vscale x 4 x i1> %3,
1064    iXLen %4)
1065
1066  ret <vscale x 4 x i1> %a
1067}
1068
1069define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i8_i8_1(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1070; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8_1:
1071; CHECK:       # %bb.0: # %entry
1072; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
1073; CHECK-NEXT:    vmv1r.v v10, v0
1074; CHECK-NEXT:    li a0, 99
1075; CHECK-NEXT:    vmv1r.v v0, v9
1076; CHECK-NEXT:    vmsgt.vx v10, v8, a0, v0.t
1077; CHECK-NEXT:    vmv1r.v v0, v10
1078; CHECK-NEXT:    ret
1079entry:
1080  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
1081    <vscale x 4 x i1> %0,
1082    <vscale x 4 x i8> %1,
1083    i8 100,
1084    <vscale x 4 x i1> %2,
1085    iXLen %3)
1086
1087  ret <vscale x 4 x i1> %a
1088}
1089
1090define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i8_i8_2(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1091; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8_2:
1092; CHECK:       # %bb.0: # %entry
1093; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
1094; CHECK-NEXT:    vmor.mm v0, v9, v0
1095; CHECK-NEXT:    ret
1096entry:
1097  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
1098    <vscale x 4 x i1> %0,
1099    <vscale x 4 x i8> %1,
1100    i8 -128,
1101    <vscale x 4 x i1> %2,
1102    iXLen %3)
1103
1104  ret <vscale x 4 x i1> %a
1105}
1106
1107
1108declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
1109  <vscale x 8 x i8>,
1110  i8,
1111  iXLen);
1112
1113define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
1114; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i8_i8:
1115; CHECK:       # %bb.0: # %entry
1116; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
1117; CHECK-NEXT:    vmslt.vx v8, v8, a0
1118; CHECK-NEXT:    vmnot.m v0, v8
1119; CHECK-NEXT:    ret
1120entry:
1121  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
1122    <vscale x 8 x i8> %0,
1123    i8 %1,
1124    iXLen %2)
1125
1126  ret <vscale x 8 x i1> %a
1127}
1128
1129declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
1130  <vscale x 8 x i1>,
1131  <vscale x 8 x i8>,
1132  i8,
1133  <vscale x 8 x i1>,
1134  iXLen);
1135
1136define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1137; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8:
1138; CHECK:       # %bb.0: # %entry
1139; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
1140; CHECK-NEXT:    vmv1r.v v10, v0
1141; CHECK-NEXT:    vmv1r.v v0, v9
1142; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
1143; CHECK-NEXT:    vmxor.mm v0, v10, v9
1144; CHECK-NEXT:    ret
1145entry:
1146  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
1147    <vscale x 8 x i1> %0,
1148    <vscale x 8 x i8> %1,
1149    i8 %2,
1150    <vscale x 8 x i1> %3,
1151    iXLen %4)
1152
1153  ret <vscale x 8 x i1> %a
1154}
1155
1156declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
1157  <vscale x 16 x i8>,
1158  i8,
1159  iXLen);
1160
1161define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
1162; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i8_i8:
1163; CHECK:       # %bb.0: # %entry
1164; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
1165; CHECK-NEXT:    vmslt.vx v10, v8, a0
1166; CHECK-NEXT:    vmnot.m v0, v10
1167; CHECK-NEXT:    ret
1168entry:
1169  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
1170    <vscale x 16 x i8> %0,
1171    i8 %1,
1172    iXLen %2)
1173
1174  ret <vscale x 16 x i1> %a
1175}
1176
1177declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
1178  <vscale x 16 x i1>,
1179  <vscale x 16 x i8>,
1180  i8,
1181  <vscale x 16 x i1>,
1182  iXLen);
1183
1184define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1185; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8:
1186; CHECK:       # %bb.0: # %entry
1187; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
1188; CHECK-NEXT:    vmv1r.v v11, v0
1189; CHECK-NEXT:    vmv1r.v v0, v10
1190; CHECK-NEXT:    vmslt.vx v11, v8, a0, v0.t
1191; CHECK-NEXT:    vmxor.mm v0, v11, v10
1192; CHECK-NEXT:    ret
1193entry:
1194  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
1195    <vscale x 16 x i1> %0,
1196    <vscale x 16 x i8> %1,
1197    i8 %2,
1198    <vscale x 16 x i1> %3,
1199    iXLen %4)
1200
1201  ret <vscale x 16 x i1> %a
1202}
1203
1204declare <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
1205  <vscale x 32 x i8>,
1206  i8,
1207  iXLen);
1208
1209define <vscale x 32 x i1> @intrinsic_vmsge_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
1210; CHECK-LABEL: intrinsic_vmsge_vx_nxv32i8_i8:
1211; CHECK:       # %bb.0: # %entry
1212; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
1213; CHECK-NEXT:    vmslt.vx v12, v8, a0
1214; CHECK-NEXT:    vmnot.m v0, v12
1215; CHECK-NEXT:    ret
1216entry:
1217  %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
1218    <vscale x 32 x i8> %0,
1219    i8 %1,
1220    iXLen %2)
1221
1222  ret <vscale x 32 x i1> %a
1223}
1224
1225declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
1226  <vscale x 32 x i1>,
1227  <vscale x 32 x i8>,
1228  i8,
1229  <vscale x 32 x i1>,
1230  iXLen);
1231
1232define <vscale x 32 x i1> @intrinsic_vmsge_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1233; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8:
1234; CHECK:       # %bb.0: # %entry
1235; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
1236; CHECK-NEXT:    vmv1r.v v13, v0
1237; CHECK-NEXT:    vmv1r.v v0, v12
1238; CHECK-NEXT:    vmslt.vx v13, v8, a0, v0.t
1239; CHECK-NEXT:    vmxor.mm v0, v13, v12
1240; CHECK-NEXT:    ret
1241entry:
1242  %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
1243    <vscale x 32 x i1> %0,
1244    <vscale x 32 x i8> %1,
1245    i8 %2,
1246    <vscale x 32 x i1> %3,
1247    iXLen %4)
1248
1249  ret <vscale x 32 x i1> %a
1250}
1251
1252declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
1253  <vscale x 1 x i16>,
1254  i16,
1255  iXLen);
1256
1257define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
1258; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i16_i16:
1259; CHECK:       # %bb.0: # %entry
1260; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1261; CHECK-NEXT:    vmslt.vx v8, v8, a0
1262; CHECK-NEXT:    vmnot.m v0, v8
1263; CHECK-NEXT:    ret
1264entry:
1265  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
1266    <vscale x 1 x i16> %0,
1267    i16 %1,
1268    iXLen %2)
1269
1270  ret <vscale x 1 x i1> %a
1271}
1272
1273declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
1274  <vscale x 1 x i1>,
1275  <vscale x 1 x i16>,
1276  i16,
1277  <vscale x 1 x i1>,
1278  iXLen);
1279
1280define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1281; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16:
1282; CHECK:       # %bb.0: # %entry
1283; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1284; CHECK-NEXT:    vmv1r.v v10, v0
1285; CHECK-NEXT:    vmv1r.v v0, v9
1286; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
1287; CHECK-NEXT:    vmxor.mm v0, v10, v9
1288; CHECK-NEXT:    ret
1289entry:
1290  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
1291    <vscale x 1 x i1> %0,
1292    <vscale x 1 x i16> %1,
1293    i16 %2,
1294    <vscale x 1 x i1> %3,
1295    iXLen %4)
1296
1297  ret <vscale x 1 x i1> %a
1298}
1299
1300declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
1301  <vscale x 2 x i16>,
1302  i16,
1303  iXLen);
1304
1305define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
1306; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i16_i16:
1307; CHECK:       # %bb.0: # %entry
1308; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1309; CHECK-NEXT:    vmslt.vx v8, v8, a0
1310; CHECK-NEXT:    vmnot.m v0, v8
1311; CHECK-NEXT:    ret
1312entry:
1313  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
1314    <vscale x 2 x i16> %0,
1315    i16 %1,
1316    iXLen %2)
1317
1318  ret <vscale x 2 x i1> %a
1319}
1320
1321declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
1322  <vscale x 2 x i1>,
1323  <vscale x 2 x i16>,
1324  i16,
1325  <vscale x 2 x i1>,
1326  iXLen);
1327
1328define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1329; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16:
1330; CHECK:       # %bb.0: # %entry
1331; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1332; CHECK-NEXT:    vmv1r.v v10, v0
1333; CHECK-NEXT:    vmv1r.v v0, v9
1334; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
1335; CHECK-NEXT:    vmxor.mm v0, v10, v9
1336; CHECK-NEXT:    ret
1337entry:
1338  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
1339    <vscale x 2 x i1> %0,
1340    <vscale x 2 x i16> %1,
1341    i16 %2,
1342    <vscale x 2 x i1> %3,
1343    iXLen %4)
1344
1345  ret <vscale x 2 x i1> %a
1346}
1347
1348declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
1349  <vscale x 4 x i16>,
1350  i16,
1351  iXLen);
1352
1353define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
1354; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i16_i16:
1355; CHECK:       # %bb.0: # %entry
1356; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1357; CHECK-NEXT:    vmslt.vx v8, v8, a0
1358; CHECK-NEXT:    vmnot.m v0, v8
1359; CHECK-NEXT:    ret
1360entry:
1361  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
1362    <vscale x 4 x i16> %0,
1363    i16 %1,
1364    iXLen %2)
1365
1366  ret <vscale x 4 x i1> %a
1367}
1368
1369declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
1370  <vscale x 4 x i1>,
1371  <vscale x 4 x i16>,
1372  i16,
1373  <vscale x 4 x i1>,
1374  iXLen);
1375
1376define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1377; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16:
1378; CHECK:       # %bb.0: # %entry
1379; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1380; CHECK-NEXT:    vmv1r.v v10, v0
1381; CHECK-NEXT:    vmv1r.v v0, v9
1382; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
1383; CHECK-NEXT:    vmxor.mm v0, v10, v9
1384; CHECK-NEXT:    ret
1385entry:
1386  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
1387    <vscale x 4 x i1> %0,
1388    <vscale x 4 x i16> %1,
1389    i16 %2,
1390    <vscale x 4 x i1> %3,
1391    iXLen %4)
1392
1393  ret <vscale x 4 x i1> %a
1394}
1395
1396declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
1397  <vscale x 8 x i16>,
1398  i16,
1399  iXLen);
1400
1401define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
1402; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i16_i16:
1403; CHECK:       # %bb.0: # %entry
1404; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1405; CHECK-NEXT:    vmslt.vx v10, v8, a0
1406; CHECK-NEXT:    vmnot.m v0, v10
1407; CHECK-NEXT:    ret
1408entry:
1409  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
1410    <vscale x 8 x i16> %0,
1411    i16 %1,
1412    iXLen %2)
1413
1414  ret <vscale x 8 x i1> %a
1415}
1416
1417declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
1418  <vscale x 8 x i1>,
1419  <vscale x 8 x i16>,
1420  i16,
1421  <vscale x 8 x i1>,
1422  iXLen);
1423
1424define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1425; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16:
1426; CHECK:       # %bb.0: # %entry
1427; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1428; CHECK-NEXT:    vmv1r.v v11, v0
1429; CHECK-NEXT:    vmv1r.v v0, v10
1430; CHECK-NEXT:    vmslt.vx v11, v8, a0, v0.t
1431; CHECK-NEXT:    vmxor.mm v0, v11, v10
1432; CHECK-NEXT:    ret
1433entry:
1434  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
1435    <vscale x 8 x i1> %0,
1436    <vscale x 8 x i16> %1,
1437    i16 %2,
1438    <vscale x 8 x i1> %3,
1439    iXLen %4)
1440
1441  ret <vscale x 8 x i1> %a
1442}
1443
1444declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
1445  <vscale x 16 x i16>,
1446  i16,
1447  iXLen);
1448
1449define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
1450; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i16_i16:
1451; CHECK:       # %bb.0: # %entry
1452; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
1453; CHECK-NEXT:    vmslt.vx v12, v8, a0
1454; CHECK-NEXT:    vmnot.m v0, v12
1455; CHECK-NEXT:    ret
1456entry:
1457  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
1458    <vscale x 16 x i16> %0,
1459    i16 %1,
1460    iXLen %2)
1461
1462  ret <vscale x 16 x i1> %a
1463}
1464
1465declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
1466  <vscale x 16 x i1>,
1467  <vscale x 16 x i16>,
1468  i16,
1469  <vscale x 16 x i1>,
1470  iXLen);
1471
1472define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1473; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16:
1474; CHECK:       # %bb.0: # %entry
1475; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1476; CHECK-NEXT:    vmv1r.v v13, v0
1477; CHECK-NEXT:    vmv1r.v v0, v12
1478; CHECK-NEXT:    vmslt.vx v13, v8, a0, v0.t
1479; CHECK-NEXT:    vmxor.mm v0, v13, v12
1480; CHECK-NEXT:    ret
1481entry:
1482  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
1483    <vscale x 16 x i1> %0,
1484    <vscale x 16 x i16> %1,
1485    i16 %2,
1486    <vscale x 16 x i1> %3,
1487    iXLen %4)
1488
1489  ret <vscale x 16 x i1> %a
1490}
1491
1492declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
1493  <vscale x 1 x i32>,
1494  i32,
1495  iXLen);
1496
1497define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
1498; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i32_i32:
1499; CHECK:       # %bb.0: # %entry
1500; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1501; CHECK-NEXT:    vmslt.vx v8, v8, a0
1502; CHECK-NEXT:    vmnot.m v0, v8
1503; CHECK-NEXT:    ret
1504entry:
1505  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
1506    <vscale x 1 x i32> %0,
1507    i32 %1,
1508    iXLen %2)
1509
1510  ret <vscale x 1 x i1> %a
1511}
1512
1513declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
1514  <vscale x 1 x i1>,
1515  <vscale x 1 x i32>,
1516  i32,
1517  <vscale x 1 x i1>,
1518  iXLen);
1519
1520define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1521; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32:
1522; CHECK:       # %bb.0: # %entry
1523; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
1524; CHECK-NEXT:    vmv1r.v v10, v0
1525; CHECK-NEXT:    vmv1r.v v0, v9
1526; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
1527; CHECK-NEXT:    vmxor.mm v0, v10, v9
1528; CHECK-NEXT:    ret
1529entry:
1530  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
1531    <vscale x 1 x i1> %0,
1532    <vscale x 1 x i32> %1,
1533    i32 %2,
1534    <vscale x 1 x i1> %3,
1535    iXLen %4)
1536
1537  ret <vscale x 1 x i1> %a
1538}
1539
1540declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
1541  <vscale x 2 x i32>,
1542  i32,
1543  iXLen);
1544
1545define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
1546; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i32_i32:
1547; CHECK:       # %bb.0: # %entry
1548; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1549; CHECK-NEXT:    vmslt.vx v8, v8, a0
1550; CHECK-NEXT:    vmnot.m v0, v8
1551; CHECK-NEXT:    ret
1552entry:
1553  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
1554    <vscale x 2 x i32> %0,
1555    i32 %1,
1556    iXLen %2)
1557
1558  ret <vscale x 2 x i1> %a
1559}
1560
1561declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
1562  <vscale x 2 x i1>,
1563  <vscale x 2 x i32>,
1564  i32,
1565  <vscale x 2 x i1>,
1566  iXLen);
1567
1568define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1569; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32:
1570; CHECK:       # %bb.0: # %entry
1571; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1572; CHECK-NEXT:    vmv1r.v v10, v0
1573; CHECK-NEXT:    vmv1r.v v0, v9
1574; CHECK-NEXT:    vmslt.vx v10, v8, a0, v0.t
1575; CHECK-NEXT:    vmxor.mm v0, v10, v9
1576; CHECK-NEXT:    ret
1577entry:
1578  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
1579    <vscale x 2 x i1> %0,
1580    <vscale x 2 x i32> %1,
1581    i32 %2,
1582    <vscale x 2 x i1> %3,
1583    iXLen %4)
1584
1585  ret <vscale x 2 x i1> %a
1586}
1587
1588declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
1589  <vscale x 4 x i32>,
1590  i32,
1591  iXLen);
1592
1593define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
1594; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i32_i32:
1595; CHECK:       # %bb.0: # %entry
1596; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1597; CHECK-NEXT:    vmslt.vx v10, v8, a0
1598; CHECK-NEXT:    vmnot.m v0, v10
1599; CHECK-NEXT:    ret
1600entry:
1601  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
1602    <vscale x 4 x i32> %0,
1603    i32 %1,
1604    iXLen %2)
1605
1606  ret <vscale x 4 x i1> %a
1607}
1608
1609declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
1610  <vscale x 4 x i1>,
1611  <vscale x 4 x i32>,
1612  i32,
1613  <vscale x 4 x i1>,
1614  iXLen);
1615
1616define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1617; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32:
1618; CHECK:       # %bb.0: # %entry
1619; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1620; CHECK-NEXT:    vmv1r.v v11, v0
1621; CHECK-NEXT:    vmv1r.v v0, v10
1622; CHECK-NEXT:    vmslt.vx v11, v8, a0, v0.t
1623; CHECK-NEXT:    vmxor.mm v0, v11, v10
1624; CHECK-NEXT:    ret
1625entry:
1626  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
1627    <vscale x 4 x i1> %0,
1628    <vscale x 4 x i32> %1,
1629    i32 %2,
1630    <vscale x 4 x i1> %3,
1631    iXLen %4)
1632
1633  ret <vscale x 4 x i1> %a
1634}
1635
1636declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
1637  <vscale x 8 x i32>,
1638  i32,
1639  iXLen);
1640
1641define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
1642; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i32_i32:
1643; CHECK:       # %bb.0: # %entry
1644; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
1645; CHECK-NEXT:    vmslt.vx v12, v8, a0
1646; CHECK-NEXT:    vmnot.m v0, v12
1647; CHECK-NEXT:    ret
1648entry:
1649  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
1650    <vscale x 8 x i32> %0,
1651    i32 %1,
1652    iXLen %2)
1653
1654  ret <vscale x 8 x i1> %a
1655}
1656
1657declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
1658  <vscale x 8 x i1>,
1659  <vscale x 8 x i32>,
1660  i32,
1661  <vscale x 8 x i1>,
1662  iXLen);
1663
1664define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1665; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32:
1666; CHECK:       # %bb.0: # %entry
1667; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1668; CHECK-NEXT:    vmv1r.v v13, v0
1669; CHECK-NEXT:    vmv1r.v v0, v12
1670; CHECK-NEXT:    vmslt.vx v13, v8, a0, v0.t
1671; CHECK-NEXT:    vmxor.mm v0, v13, v12
1672; CHECK-NEXT:    ret
1673entry:
1674  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
1675    <vscale x 8 x i1> %0,
1676    <vscale x 8 x i32> %1,
1677    i32 %2,
1678    <vscale x 8 x i1> %3,
1679    iXLen %4)
1680
1681  ret <vscale x 8 x i1> %a
1682}
1683
1684declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
1685  <vscale x 1 x i64>,
1686  i64,
1687  iXLen);
1688
1689define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
1690; RV32-LABEL: intrinsic_vmsge_vx_nxv1i64_i64:
1691; RV32:       # %bb.0: # %entry
1692; RV32-NEXT:    addi sp, sp, -16
1693; RV32-NEXT:    sw a0, 8(sp)
1694; RV32-NEXT:    sw a1, 12(sp)
1695; RV32-NEXT:    addi a0, sp, 8
1696; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
1697; RV32-NEXT:    vlse64.v v9, (a0), zero
1698; RV32-NEXT:    vmsle.vv v0, v9, v8
1699; RV32-NEXT:    addi sp, sp, 16
1700; RV32-NEXT:    ret
1701;
1702; RV64-LABEL: intrinsic_vmsge_vx_nxv1i64_i64:
1703; RV64:       # %bb.0: # %entry
1704; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1705; RV64-NEXT:    vmslt.vx v8, v8, a0
1706; RV64-NEXT:    vmnot.m v0, v8
1707; RV64-NEXT:    ret
1708entry:
1709  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
1710    <vscale x 1 x i64> %0,
1711    i64 %1,
1712    iXLen %2)
1713
1714  ret <vscale x 1 x i1> %a
1715}
1716
1717declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
1718  <vscale x 1 x i1>,
1719  <vscale x 1 x i64>,
1720  i64,
1721  <vscale x 1 x i1>,
1722  iXLen);
1723
1724define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1725; RV32-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64:
1726; RV32:       # %bb.0: # %entry
1727; RV32-NEXT:    addi sp, sp, -16
1728; RV32-NEXT:    sw a0, 8(sp)
1729; RV32-NEXT:    sw a1, 12(sp)
1730; RV32-NEXT:    addi a0, sp, 8
1731; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
1732; RV32-NEXT:    vlse64.v v11, (a0), zero
1733; RV32-NEXT:    vmv1r.v v10, v0
1734; RV32-NEXT:    vmv1r.v v0, v9
1735; RV32-NEXT:    vmsle.vv v10, v11, v8, v0.t
1736; RV32-NEXT:    vmv.v.v v0, v10
1737; RV32-NEXT:    addi sp, sp, 16
1738; RV32-NEXT:    ret
1739;
1740; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64:
1741; RV64:       # %bb.0: # %entry
1742; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
1743; RV64-NEXT:    vmv1r.v v10, v0
1744; RV64-NEXT:    vmv1r.v v0, v9
1745; RV64-NEXT:    vmslt.vx v10, v8, a0, v0.t
1746; RV64-NEXT:    vmxor.mm v0, v10, v9
1747; RV64-NEXT:    ret
1748entry:
1749  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
1750    <vscale x 1 x i1> %0,
1751    <vscale x 1 x i64> %1,
1752    i64 %2,
1753    <vscale x 1 x i1> %3,
1754    iXLen %4)
1755
1756  ret <vscale x 1 x i1> %a
1757}
1758
1759declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
1760  <vscale x 2 x i64>,
1761  i64,
1762  iXLen);
1763
1764define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
1765; RV32-LABEL: intrinsic_vmsge_vx_nxv2i64_i64:
1766; RV32:       # %bb.0: # %entry
1767; RV32-NEXT:    addi sp, sp, -16
1768; RV32-NEXT:    sw a0, 8(sp)
1769; RV32-NEXT:    sw a1, 12(sp)
1770; RV32-NEXT:    addi a0, sp, 8
1771; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
1772; RV32-NEXT:    vlse64.v v10, (a0), zero
1773; RV32-NEXT:    vmsle.vv v0, v10, v8
1774; RV32-NEXT:    addi sp, sp, 16
1775; RV32-NEXT:    ret
1776;
1777; RV64-LABEL: intrinsic_vmsge_vx_nxv2i64_i64:
1778; RV64:       # %bb.0: # %entry
1779; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1780; RV64-NEXT:    vmslt.vx v10, v8, a0
1781; RV64-NEXT:    vmnot.m v0, v10
1782; RV64-NEXT:    ret
1783entry:
1784  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
1785    <vscale x 2 x i64> %0,
1786    i64 %1,
1787    iXLen %2)
1788
1789  ret <vscale x 2 x i1> %a
1790}
1791
1792declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
1793  <vscale x 2 x i1>,
1794  <vscale x 2 x i64>,
1795  i64,
1796  <vscale x 2 x i1>,
1797  iXLen);
1798
1799define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1800; RV32-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64:
1801; RV32:       # %bb.0: # %entry
1802; RV32-NEXT:    addi sp, sp, -16
1803; RV32-NEXT:    sw a0, 8(sp)
1804; RV32-NEXT:    sw a1, 12(sp)
1805; RV32-NEXT:    addi a0, sp, 8
1806; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
1807; RV32-NEXT:    vlse64.v v12, (a0), zero
1808; RV32-NEXT:    vmv1r.v v11, v0
1809; RV32-NEXT:    vmv1r.v v0, v10
1810; RV32-NEXT:    vmsle.vv v11, v12, v8, v0.t
1811; RV32-NEXT:    vmv1r.v v0, v11
1812; RV32-NEXT:    addi sp, sp, 16
1813; RV32-NEXT:    ret
1814;
1815; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64:
1816; RV64:       # %bb.0: # %entry
1817; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
1818; RV64-NEXT:    vmv1r.v v11, v0
1819; RV64-NEXT:    vmv1r.v v0, v10
1820; RV64-NEXT:    vmslt.vx v11, v8, a0, v0.t
1821; RV64-NEXT:    vmxor.mm v0, v11, v10
1822; RV64-NEXT:    ret
1823entry:
1824  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
1825    <vscale x 2 x i1> %0,
1826    <vscale x 2 x i64> %1,
1827    i64 %2,
1828    <vscale x 2 x i1> %3,
1829    iXLen %4)
1830
1831  ret <vscale x 2 x i1> %a
1832}
1833
1834declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
1835  <vscale x 4 x i64>,
1836  i64,
1837  iXLen);
1838
1839define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
1840; RV32-LABEL: intrinsic_vmsge_vx_nxv4i64_i64:
1841; RV32:       # %bb.0: # %entry
1842; RV32-NEXT:    addi sp, sp, -16
1843; RV32-NEXT:    sw a0, 8(sp)
1844; RV32-NEXT:    sw a1, 12(sp)
1845; RV32-NEXT:    addi a0, sp, 8
1846; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
1847; RV32-NEXT:    vlse64.v v12, (a0), zero
1848; RV32-NEXT:    vmsle.vv v0, v12, v8
1849; RV32-NEXT:    addi sp, sp, 16
1850; RV32-NEXT:    ret
1851;
1852; RV64-LABEL: intrinsic_vmsge_vx_nxv4i64_i64:
1853; RV64:       # %bb.0: # %entry
1854; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1855; RV64-NEXT:    vmslt.vx v12, v8, a0
1856; RV64-NEXT:    vmnot.m v0, v12
1857; RV64-NEXT:    ret
1858entry:
1859  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
1860    <vscale x 4 x i64> %0,
1861    i64 %1,
1862    iXLen %2)
1863
1864  ret <vscale x 4 x i1> %a
1865}
1866
1867declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
1868  <vscale x 4 x i1>,
1869  <vscale x 4 x i64>,
1870  i64,
1871  <vscale x 4 x i1>,
1872  iXLen);
1873
1874define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1875; RV32-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64:
1876; RV32:       # %bb.0: # %entry
1877; RV32-NEXT:    addi sp, sp, -16
1878; RV32-NEXT:    sw a0, 8(sp)
1879; RV32-NEXT:    sw a1, 12(sp)
1880; RV32-NEXT:    addi a0, sp, 8
1881; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
1882; RV32-NEXT:    vlse64.v v16, (a0), zero
1883; RV32-NEXT:    vmv1r.v v13, v0
1884; RV32-NEXT:    vmv1r.v v0, v12
1885; RV32-NEXT:    vmsle.vv v13, v16, v8, v0.t
1886; RV32-NEXT:    vmv1r.v v0, v13
1887; RV32-NEXT:    addi sp, sp, 16
1888; RV32-NEXT:    ret
1889;
1890; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64:
1891; RV64:       # %bb.0: # %entry
1892; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
1893; RV64-NEXT:    vmv1r.v v13, v0
1894; RV64-NEXT:    vmv1r.v v0, v12
1895; RV64-NEXT:    vmslt.vx v13, v8, a0, v0.t
1896; RV64-NEXT:    vmxor.mm v0, v13, v12
1897; RV64-NEXT:    ret
1898entry:
1899  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
1900    <vscale x 4 x i1> %0,
1901    <vscale x 4 x i64> %1,
1902    i64 %2,
1903    <vscale x 4 x i1> %3,
1904    iXLen %4)
1905
1906  ret <vscale x 4 x i1> %a
1907}
1908
1909define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
1910; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i8_i8:
1911; CHECK:       # %bb.0: # %entry
1912; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
1913; CHECK-NEXT:    vmsgt.vi v0, v8, -16
1914; CHECK-NEXT:    ret
1915entry:
1916  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
1917    <vscale x 1 x i8> %0,
1918    i8 -15,
1919    iXLen %1)
1920
1921  ret <vscale x 1 x i1> %a
1922}
1923
1924define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1925; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i8_i8:
1926; CHECK:       # %bb.0: # %entry
1927; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
1928; CHECK-NEXT:    vmv1r.v v10, v0
1929; CHECK-NEXT:    vmv1r.v v0, v9
1930; CHECK-NEXT:    vmsgt.vi v10, v8, -15, v0.t
1931; CHECK-NEXT:    vmv1r.v v0, v10
1932; CHECK-NEXT:    ret
1933entry:
1934  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
1935    <vscale x 1 x i1> %0,
1936    <vscale x 1 x i8> %1,
1937    i8 -14,
1938    <vscale x 1 x i1> %2,
1939    iXLen %3)
1940
1941  ret <vscale x 1 x i1> %a
1942}
1943
1944define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
1945; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i8_i8:
1946; CHECK:       # %bb.0: # %entry
1947; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
1948; CHECK-NEXT:    vmsgt.vi v0, v8, -14
1949; CHECK-NEXT:    ret
1950entry:
1951  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
1952    <vscale x 2 x i8> %0,
1953    i8 -13,
1954    iXLen %1)
1955
1956  ret <vscale x 2 x i1> %a
1957}
1958
1959define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1960; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i8_i8:
1961; CHECK:       # %bb.0: # %entry
1962; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
1963; CHECK-NEXT:    vmv1r.v v10, v0
1964; CHECK-NEXT:    vmv1r.v v0, v9
1965; CHECK-NEXT:    vmsgt.vi v10, v8, -13, v0.t
1966; CHECK-NEXT:    vmv1r.v v0, v10
1967; CHECK-NEXT:    ret
1968entry:
1969  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
1970    <vscale x 2 x i1> %0,
1971    <vscale x 2 x i8> %1,
1972    i8 -12,
1973    <vscale x 2 x i1> %2,
1974    iXLen %3)
1975
1976  ret <vscale x 2 x i1> %a
1977}
1978
1979define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
1980; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i8_i8:
1981; CHECK:       # %bb.0: # %entry
1982; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
1983; CHECK-NEXT:    vmsgt.vi v0, v8, -12
1984; CHECK-NEXT:    ret
1985entry:
1986  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
1987    <vscale x 4 x i8> %0,
1988    i8 -11,
1989    iXLen %1)
1990
1991  ret <vscale x 4 x i1> %a
1992}
1993
1994define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i8_i8_1(<vscale x 4 x i8> %0, iXLen %1) nounwind {
1995; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i8_i8_1:
1996; CHECK:       # %bb.0: # %entry
1997; CHECK-NEXT:    li a1, 99
1998; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
1999; CHECK-NEXT:    vmsgt.vx v0, v8, a1
2000; CHECK-NEXT:    ret
2001entry:
2002  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
2003    <vscale x 4 x i8> %0,
2004    i8 100,
2005    iXLen %1)
2006
2007  ret <vscale x 4 x i1> %a
2008}
2009
2010define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i8_i8_2(<vscale x 4 x i8> %0, iXLen %1) nounwind {
2011; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i8_i8_2:
2012; CHECK:       # %bb.0: # %entry
2013; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
2014; CHECK-NEXT:    vmset.m v0
2015; CHECK-NEXT:    ret
2016entry:
2017  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
2018    <vscale x 4 x i8> %0,
2019    i8 -128,
2020    iXLen %1)
2021
2022  ret <vscale x 4 x i1> %a
2023}
2024
2025define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2026; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i8_i8:
2027; CHECK:       # %bb.0: # %entry
2028; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
2029; CHECK-NEXT:    vmv1r.v v10, v0
2030; CHECK-NEXT:    vmv1r.v v0, v9
2031; CHECK-NEXT:    vmsgt.vi v10, v8, -11, v0.t
2032; CHECK-NEXT:    vmv1r.v v0, v10
2033; CHECK-NEXT:    ret
2034entry:
2035  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
2036    <vscale x 4 x i1> %0,
2037    <vscale x 4 x i8> %1,
2038    i8 -10,
2039    <vscale x 4 x i1> %2,
2040    iXLen %3)
2041
2042  ret <vscale x 4 x i1> %a
2043}
2044
2045define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
2046; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i8_i8:
2047; CHECK:       # %bb.0: # %entry
2048; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
2049; CHECK-NEXT:    vmsgt.vi v0, v8, -10
2050; CHECK-NEXT:    ret
2051entry:
2052  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
2053    <vscale x 8 x i8> %0,
2054    i8 -9,
2055    iXLen %1)
2056
2057  ret <vscale x 8 x i1> %a
2058}
2059
2060define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2061; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i8_i8:
2062; CHECK:       # %bb.0: # %entry
2063; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
2064; CHECK-NEXT:    vmv1r.v v10, v0
2065; CHECK-NEXT:    vmv1r.v v0, v9
2066; CHECK-NEXT:    vmsgt.vi v10, v8, -9, v0.t
2067; CHECK-NEXT:    vmv.v.v v0, v10
2068; CHECK-NEXT:    ret
2069entry:
2070  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
2071    <vscale x 8 x i1> %0,
2072    <vscale x 8 x i8> %1,
2073    i8 -8,
2074    <vscale x 8 x i1> %2,
2075    iXLen %3)
2076
2077  ret <vscale x 8 x i1> %a
2078}
2079
2080define <vscale x 16 x i1> @intrinsic_vmsge_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
2081; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i8_i8:
2082; CHECK:       # %bb.0: # %entry
2083; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
2084; CHECK-NEXT:    vmsgt.vi v0, v8, -8
2085; CHECK-NEXT:    ret
2086entry:
2087  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
2088    <vscale x 16 x i8> %0,
2089    i8 -7,
2090    iXLen %1)
2091
2092  ret <vscale x 16 x i1> %a
2093}
2094
2095define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2096; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i8_i8:
2097; CHECK:       # %bb.0: # %entry
2098; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
2099; CHECK-NEXT:    vmv1r.v v11, v0
2100; CHECK-NEXT:    vmv1r.v v0, v10
2101; CHECK-NEXT:    vmsgt.vi v11, v8, -7, v0.t
2102; CHECK-NEXT:    vmv1r.v v0, v11
2103; CHECK-NEXT:    ret
2104entry:
2105  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
2106    <vscale x 16 x i1> %0,
2107    <vscale x 16 x i8> %1,
2108    i8 -6,
2109    <vscale x 16 x i1> %2,
2110    iXLen %3)
2111
2112  ret <vscale x 16 x i1> %a
2113}
2114
2115define <vscale x 32 x i1> @intrinsic_vmsge_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
2116; CHECK-LABEL: intrinsic_vmsge_vi_nxv32i8_i8:
2117; CHECK:       # %bb.0: # %entry
2118; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
2119; CHECK-NEXT:    vmsgt.vi v0, v8, -6
2120; CHECK-NEXT:    ret
2121entry:
2122  %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
2123    <vscale x 32 x i8> %0,
2124    i8 -5,
2125    iXLen %1)
2126
2127  ret <vscale x 32 x i1> %a
2128}
2129
2130define <vscale x 32 x i1> @intrinsic_vmsge_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
2131; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv32i8_i8:
2132; CHECK:       # %bb.0: # %entry
2133; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
2134; CHECK-NEXT:    vmv1r.v v13, v0
2135; CHECK-NEXT:    vmv1r.v v0, v12
2136; CHECK-NEXT:    vmsgt.vi v13, v8, -5, v0.t
2137; CHECK-NEXT:    vmv1r.v v0, v13
2138; CHECK-NEXT:    ret
2139entry:
2140  %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
2141    <vscale x 32 x i1> %0,
2142    <vscale x 32 x i8> %1,
2143    i8 -4,
2144    <vscale x 32 x i1> %2,
2145    iXLen %3)
2146
2147  ret <vscale x 32 x i1> %a
2148}
2149
2150define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
2151; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i16_i16:
2152; CHECK:       # %bb.0: # %entry
2153; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
2154; CHECK-NEXT:    vmsgt.vi v0, v8, -4
2155; CHECK-NEXT:    ret
2156entry:
2157  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
2158    <vscale x 1 x i16> %0,
2159    i16 -3,
2160    iXLen %1)
2161
2162  ret <vscale x 1 x i1> %a
2163}
2164
2165define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2166; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i16_i16:
2167; CHECK:       # %bb.0: # %entry
2168; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
2169; CHECK-NEXT:    vmv1r.v v10, v0
2170; CHECK-NEXT:    vmv1r.v v0, v9
2171; CHECK-NEXT:    vmsgt.vi v10, v8, -3, v0.t
2172; CHECK-NEXT:    vmv1r.v v0, v10
2173; CHECK-NEXT:    ret
2174entry:
2175  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
2176    <vscale x 1 x i1> %0,
2177    <vscale x 1 x i16> %1,
2178    i16 -2,
2179    <vscale x 1 x i1> %2,
2180    iXLen %3)
2181
2182  ret <vscale x 1 x i1> %a
2183}
2184
2185define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
2186; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i16_i16:
2187; CHECK:       # %bb.0: # %entry
2188; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
2189; CHECK-NEXT:    vmsgt.vi v0, v8, -2
2190; CHECK-NEXT:    ret
2191entry:
2192  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
2193    <vscale x 2 x i16> %0,
2194    i16 -1,
2195    iXLen %1)
2196
2197  ret <vscale x 2 x i1> %a
2198}
2199
2200define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2201; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i16_i16:
2202; CHECK:       # %bb.0: # %entry
2203; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
2204; CHECK-NEXT:    vmv1r.v v10, v0
2205; CHECK-NEXT:    vmv1r.v v0, v9
2206; CHECK-NEXT:    vmsgt.vi v10, v8, -1, v0.t
2207; CHECK-NEXT:    vmv1r.v v0, v10
2208; CHECK-NEXT:    ret
2209entry:
2210  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
2211    <vscale x 2 x i1> %0,
2212    <vscale x 2 x i16> %1,
2213    i16 0,
2214    <vscale x 2 x i1> %2,
2215    iXLen %3)
2216
2217  ret <vscale x 2 x i1> %a
2218}
2219
2220define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
2221; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i16_i16:
2222; CHECK:       # %bb.0: # %entry
2223; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
2224; CHECK-NEXT:    vmsgt.vi v0, v8, -1
2225; CHECK-NEXT:    ret
2226entry:
2227  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
2228    <vscale x 4 x i16> %0,
2229    i16 0,
2230    iXLen %1)
2231
2232  ret <vscale x 4 x i1> %a
2233}
2234
2235define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2236; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i16_i16:
2237; CHECK:       # %bb.0: # %entry
2238; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
2239; CHECK-NEXT:    vmv1r.v v10, v0
2240; CHECK-NEXT:    vmv1r.v v0, v9
2241; CHECK-NEXT:    vmsgt.vi v10, v8, 0, v0.t
2242; CHECK-NEXT:    vmv.v.v v0, v10
2243; CHECK-NEXT:    ret
2244entry:
2245  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
2246    <vscale x 4 x i1> %0,
2247    <vscale x 4 x i16> %1,
2248    i16 1,
2249    <vscale x 4 x i1> %2,
2250    iXLen %3)
2251
2252  ret <vscale x 4 x i1> %a
2253}
2254
2255define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
2256; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i16_i16:
2257; CHECK:       # %bb.0: # %entry
2258; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
2259; CHECK-NEXT:    vmsgt.vi v0, v8, 1
2260; CHECK-NEXT:    ret
2261entry:
2262  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
2263    <vscale x 8 x i16> %0,
2264    i16 2,
2265    iXLen %1)
2266
2267  ret <vscale x 8 x i1> %a
2268}
2269
2270define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2271; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i16_i16:
2272; CHECK:       # %bb.0: # %entry
2273; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
2274; CHECK-NEXT:    vmv1r.v v11, v0
2275; CHECK-NEXT:    vmv1r.v v0, v10
2276; CHECK-NEXT:    vmsgt.vi v11, v8, 2, v0.t
2277; CHECK-NEXT:    vmv1r.v v0, v11
2278; CHECK-NEXT:    ret
2279entry:
2280  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
2281    <vscale x 8 x i1> %0,
2282    <vscale x 8 x i16> %1,
2283    i16 3,
2284    <vscale x 8 x i1> %2,
2285    iXLen %3)
2286
2287  ret <vscale x 8 x i1> %a
2288}
2289
2290define <vscale x 16 x i1> @intrinsic_vmsge_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
2291; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i16_i16:
2292; CHECK:       # %bb.0: # %entry
2293; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
2294; CHECK-NEXT:    vmsgt.vi v0, v8, 3
2295; CHECK-NEXT:    ret
2296entry:
2297  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
2298    <vscale x 16 x i16> %0,
2299    i16 4,
2300    iXLen %1)
2301
2302  ret <vscale x 16 x i1> %a
2303}
2304
2305define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2306; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i16_i16:
2307; CHECK:       # %bb.0: # %entry
2308; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
2309; CHECK-NEXT:    vmv1r.v v13, v0
2310; CHECK-NEXT:    vmv1r.v v0, v12
2311; CHECK-NEXT:    vmsgt.vi v13, v8, 4, v0.t
2312; CHECK-NEXT:    vmv1r.v v0, v13
2313; CHECK-NEXT:    ret
2314entry:
2315  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
2316    <vscale x 16 x i1> %0,
2317    <vscale x 16 x i16> %1,
2318    i16 5,
2319    <vscale x 16 x i1> %2,
2320    iXLen %3)
2321
2322  ret <vscale x 16 x i1> %a
2323}
2324
2325define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
2326; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i32_i32:
2327; CHECK:       # %bb.0: # %entry
2328; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
2329; CHECK-NEXT:    vmsgt.vi v0, v8, 5
2330; CHECK-NEXT:    ret
2331entry:
2332  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
2333    <vscale x 1 x i32> %0,
2334    i32 6,
2335    iXLen %1)
2336
2337  ret <vscale x 1 x i1> %a
2338}
2339
2340define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2341; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i32_i32:
2342; CHECK:       # %bb.0: # %entry
2343; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
2344; CHECK-NEXT:    vmv1r.v v10, v0
2345; CHECK-NEXT:    vmv1r.v v0, v9
2346; CHECK-NEXT:    vmsgt.vi v10, v8, 6, v0.t
2347; CHECK-NEXT:    vmv1r.v v0, v10
2348; CHECK-NEXT:    ret
2349entry:
2350  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
2351    <vscale x 1 x i1> %0,
2352    <vscale x 1 x i32> %1,
2353    i32 7,
2354    <vscale x 1 x i1> %2,
2355    iXLen %3)
2356
2357  ret <vscale x 1 x i1> %a
2358}
2359
2360define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
2361; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i32_i32:
2362; CHECK:       # %bb.0: # %entry
2363; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
2364; CHECK-NEXT:    vmsgt.vi v0, v8, 7
2365; CHECK-NEXT:    ret
2366entry:
2367  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
2368    <vscale x 2 x i32> %0,
2369    i32 8,
2370    iXLen %1)
2371
2372  ret <vscale x 2 x i1> %a
2373}
2374
2375define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2376; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i32_i32:
2377; CHECK:       # %bb.0: # %entry
2378; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
2379; CHECK-NEXT:    vmv1r.v v10, v0
2380; CHECK-NEXT:    vmv1r.v v0, v9
2381; CHECK-NEXT:    vmsgt.vi v10, v8, 8, v0.t
2382; CHECK-NEXT:    vmv.v.v v0, v10
2383; CHECK-NEXT:    ret
2384entry:
2385  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
2386    <vscale x 2 x i1> %0,
2387    <vscale x 2 x i32> %1,
2388    i32 9,
2389    <vscale x 2 x i1> %2,
2390    iXLen %3)
2391
2392  ret <vscale x 2 x i1> %a
2393}
2394
2395define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
2396; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i32_i32:
2397; CHECK:       # %bb.0: # %entry
2398; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
2399; CHECK-NEXT:    vmsgt.vi v0, v8, 9
2400; CHECK-NEXT:    ret
2401entry:
2402  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
2403    <vscale x 4 x i32> %0,
2404    i32 10,
2405    iXLen %1)
2406
2407  ret <vscale x 4 x i1> %a
2408}
2409
2410define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2411; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i32_i32:
2412; CHECK:       # %bb.0: # %entry
2413; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
2414; CHECK-NEXT:    vmv1r.v v11, v0
2415; CHECK-NEXT:    vmv1r.v v0, v10
2416; CHECK-NEXT:    vmsgt.vi v11, v8, 10, v0.t
2417; CHECK-NEXT:    vmv1r.v v0, v11
2418; CHECK-NEXT:    ret
2419entry:
2420  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
2421    <vscale x 4 x i1> %0,
2422    <vscale x 4 x i32> %1,
2423    i32 11,
2424    <vscale x 4 x i1> %2,
2425    iXLen %3)
2426
2427  ret <vscale x 4 x i1> %a
2428}
2429
2430define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
2431; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i32_i32:
2432; CHECK:       # %bb.0: # %entry
2433; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
2434; CHECK-NEXT:    vmsgt.vi v0, v8, 11
2435; CHECK-NEXT:    ret
2436entry:
2437  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
2438    <vscale x 8 x i32> %0,
2439    i32 12,
2440    iXLen %1)
2441
2442  ret <vscale x 8 x i1> %a
2443}
2444
2445define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2446; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i32_i32:
2447; CHECK:       # %bb.0: # %entry
2448; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
2449; CHECK-NEXT:    vmv1r.v v13, v0
2450; CHECK-NEXT:    vmv1r.v v0, v12
2451; CHECK-NEXT:    vmsgt.vi v13, v8, 12, v0.t
2452; CHECK-NEXT:    vmv1r.v v0, v13
2453; CHECK-NEXT:    ret
2454entry:
2455  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
2456    <vscale x 8 x i1> %0,
2457    <vscale x 8 x i32> %1,
2458    i32 13,
2459    <vscale x 8 x i1> %2,
2460    iXLen %3)
2461
2462  ret <vscale x 8 x i1> %a
2463}
2464
2465define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
2466; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i64_i64:
2467; CHECK:       # %bb.0: # %entry
2468; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
2469; CHECK-NEXT:    vmsgt.vi v0, v8, 8
2470; CHECK-NEXT:    ret
2471entry:
2472  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
2473    <vscale x 1 x i64> %0,
2474    i64 9,
2475    iXLen %1)
2476
2477  ret <vscale x 1 x i1> %a
2478}
2479
2480define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2481; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i64_i64:
2482; CHECK:       # %bb.0: # %entry
2483; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
2484; CHECK-NEXT:    vmv1r.v v10, v0
2485; CHECK-NEXT:    vmv1r.v v0, v9
2486; CHECK-NEXT:    vmsgt.vi v10, v8, 8, v0.t
2487; CHECK-NEXT:    vmv.v.v v0, v10
2488; CHECK-NEXT:    ret
2489entry:
2490  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
2491    <vscale x 1 x i1> %0,
2492    <vscale x 1 x i64> %1,
2493    i64 9,
2494    <vscale x 1 x i1> %2,
2495    iXLen %3)
2496
2497  ret <vscale x 1 x i1> %a
2498}
2499
2500define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
2501; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i64_i64:
2502; CHECK:       # %bb.0: # %entry
2503; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
2504; CHECK-NEXT:    vmsgt.vi v0, v8, 8
2505; CHECK-NEXT:    ret
2506entry:
2507  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
2508    <vscale x 2 x i64> %0,
2509    i64 9,
2510    iXLen %1)
2511
2512  ret <vscale x 2 x i1> %a
2513}
2514
2515define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2516; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i64_i64:
2517; CHECK:       # %bb.0: # %entry
2518; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
2519; CHECK-NEXT:    vmv1r.v v11, v0
2520; CHECK-NEXT:    vmv1r.v v0, v10
2521; CHECK-NEXT:    vmsgt.vi v11, v8, 8, v0.t
2522; CHECK-NEXT:    vmv1r.v v0, v11
2523; CHECK-NEXT:    ret
2524entry:
2525  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
2526    <vscale x 2 x i1> %0,
2527    <vscale x 2 x i64> %1,
2528    i64 9,
2529    <vscale x 2 x i1> %2,
2530    iXLen %3)
2531
2532  ret <vscale x 2 x i1> %a
2533}
2534
2535define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
2536; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i64_i64:
2537; CHECK:       # %bb.0: # %entry
2538; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
2539; CHECK-NEXT:    vmsgt.vi v0, v8, 8
2540; CHECK-NEXT:    ret
2541entry:
2542  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
2543    <vscale x 4 x i64> %0,
2544    i64 9,
2545    iXLen %1)
2546
2547  ret <vscale x 4 x i1> %a
2548}
2549
2550define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2551; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i64_i64:
2552; CHECK:       # %bb.0: # %entry
2553; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
2554; CHECK-NEXT:    vmv1r.v v13, v0
2555; CHECK-NEXT:    vmv1r.v v0, v12
2556; CHECK-NEXT:    vmsgt.vi v13, v8, 8, v0.t
2557; CHECK-NEXT:    vmv1r.v v0, v13
2558; CHECK-NEXT:    ret
2559entry:
2560  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
2561    <vscale x 4 x i1> %0,
2562    <vscale x 4 x i64> %1,
2563    i64 9,
2564    <vscale x 4 x i1> %2,
2565    iXLen %3)
2566
2567  ret <vscale x 4 x i1> %a
2568}
2569
2570; Test cases where the mask and maskedoff are the same value.
2571define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, iXLen %3) nounwind {
2572; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8:
2573; CHECK:       # %bb.0: # %entry
2574; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
2575; CHECK-NEXT:    vmslt.vx v8, v8, a0
2576; CHECK-NEXT:    vmandn.mm v0, v0, v8
2577; CHECK-NEXT:    ret
2578entry:
2579  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
2580    <vscale x 1 x i1> %0,
2581    <vscale x 1 x i8> %1,
2582    i8 %2,
2583    <vscale x 1 x i1> %0,
2584    iXLen %3)
2585
2586  ret <vscale x 1 x i1> %a
2587}
2588
2589define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, iXLen %3) nounwind {
2590; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8:
2591; CHECK:       # %bb.0: # %entry
2592; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
2593; CHECK-NEXT:    vmslt.vx v8, v8, a0
2594; CHECK-NEXT:    vmandn.mm v0, v0, v8
2595; CHECK-NEXT:    ret
2596entry:
2597  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
2598    <vscale x 2 x i1> %0,
2599    <vscale x 2 x i8> %1,
2600    i8 %2,
2601    <vscale x 2 x i1> %0,
2602    iXLen %3)
2603
2604  ret <vscale x 2 x i1> %a
2605}
2606
2607define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, iXLen %3) nounwind {
2608; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8:
2609; CHECK:       # %bb.0: # %entry
2610; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
2611; CHECK-NEXT:    vmslt.vx v8, v8, a0
2612; CHECK-NEXT:    vmandn.mm v0, v0, v8
2613; CHECK-NEXT:    ret
2614entry:
2615  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
2616    <vscale x 4 x i1> %0,
2617    <vscale x 4 x i8> %1,
2618    i8 %2,
2619    <vscale x 4 x i1> %0,
2620    iXLen %3)
2621
2622  ret <vscale x 4 x i1> %a
2623}
2624
2625define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, iXLen %3) nounwind {
2626; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8:
2627; CHECK:       # %bb.0: # %entry
2628; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
2629; CHECK-NEXT:    vmslt.vx v8, v8, a0
2630; CHECK-NEXT:    vmandn.mm v0, v0, v8
2631; CHECK-NEXT:    ret
2632entry:
2633  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
2634    <vscale x 8 x i1> %0,
2635    <vscale x 8 x i8> %1,
2636    i8 %2,
2637    <vscale x 8 x i1> %0,
2638    iXLen %3)
2639
2640  ret <vscale x 8 x i1> %a
2641}
2642
2643define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, iXLen %3) nounwind {
2644; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8:
2645; CHECK:       # %bb.0: # %entry
2646; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
2647; CHECK-NEXT:    vmslt.vx v10, v8, a0
2648; CHECK-NEXT:    vmandn.mm v0, v0, v10
2649; CHECK-NEXT:    ret
2650entry:
2651  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
2652    <vscale x 16 x i1> %0,
2653    <vscale x 16 x i8> %1,
2654    i8 %2,
2655    <vscale x 16 x i1> %0,
2656    iXLen %3)
2657
2658  ret <vscale x 16 x i1> %a
2659}
2660
2661define <vscale x 32 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, iXLen %3) nounwind {
2662; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8:
2663; CHECK:       # %bb.0: # %entry
2664; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
2665; CHECK-NEXT:    vmslt.vx v12, v8, a0
2666; CHECK-NEXT:    vmandn.mm v0, v0, v12
2667; CHECK-NEXT:    ret
2668entry:
2669  %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
2670    <vscale x 32 x i1> %0,
2671    <vscale x 32 x i8> %1,
2672    i8 %2,
2673    <vscale x 32 x i1> %0,
2674    iXLen %3)
2675
2676  ret <vscale x 32 x i1> %a
2677}
2678
2679define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, iXLen %3) nounwind {
2680; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16:
2681; CHECK:       # %bb.0: # %entry
2682; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2683; CHECK-NEXT:    vmslt.vx v8, v8, a0
2684; CHECK-NEXT:    vmandn.mm v0, v0, v8
2685; CHECK-NEXT:    ret
2686entry:
2687  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
2688    <vscale x 1 x i1> %0,
2689    <vscale x 1 x i16> %1,
2690    i16 %2,
2691    <vscale x 1 x i1> %0,
2692    iXLen %3)
2693
2694  ret <vscale x 1 x i1> %a
2695}
2696
2697define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, iXLen %3) nounwind {
2698; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16:
2699; CHECK:       # %bb.0: # %entry
2700; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2701; CHECK-NEXT:    vmslt.vx v8, v8, a0
2702; CHECK-NEXT:    vmandn.mm v0, v0, v8
2703; CHECK-NEXT:    ret
2704entry:
2705  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
2706    <vscale x 2 x i1> %0,
2707    <vscale x 2 x i16> %1,
2708    i16 %2,
2709    <vscale x 2 x i1> %0,
2710    iXLen %3)
2711
2712  ret <vscale x 2 x i1> %a
2713}
2714
2715define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, iXLen %3) nounwind {
2716; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16:
2717; CHECK:       # %bb.0: # %entry
2718; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2719; CHECK-NEXT:    vmslt.vx v8, v8, a0
2720; CHECK-NEXT:    vmandn.mm v0, v0, v8
2721; CHECK-NEXT:    ret
2722entry:
2723  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
2724    <vscale x 4 x i1> %0,
2725    <vscale x 4 x i16> %1,
2726    i16 %2,
2727    <vscale x 4 x i1> %0,
2728    iXLen %3)
2729
2730  ret <vscale x 4 x i1> %a
2731}
2732
2733define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, iXLen %3) nounwind {
2734; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16:
2735; CHECK:       # %bb.0: # %entry
2736; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
2737; CHECK-NEXT:    vmslt.vx v10, v8, a0
2738; CHECK-NEXT:    vmandn.mm v0, v0, v10
2739; CHECK-NEXT:    ret
2740entry:
2741  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
2742    <vscale x 8 x i1> %0,
2743    <vscale x 8 x i16> %1,
2744    i16 %2,
2745    <vscale x 8 x i1> %0,
2746    iXLen %3)
2747
2748  ret <vscale x 8 x i1> %a
2749}
2750
2751define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, iXLen %3) nounwind {
2752; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16:
2753; CHECK:       # %bb.0: # %entry
2754; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
2755; CHECK-NEXT:    vmslt.vx v12, v8, a0
2756; CHECK-NEXT:    vmandn.mm v0, v0, v12
2757; CHECK-NEXT:    ret
2758entry:
2759  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
2760    <vscale x 16 x i1> %0,
2761    <vscale x 16 x i16> %1,
2762    i16 %2,
2763    <vscale x 16 x i1> %0,
2764    iXLen %3)
2765
2766  ret <vscale x 16 x i1> %a
2767}
2768
2769define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, iXLen %3) nounwind {
2770; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32:
2771; CHECK:       # %bb.0: # %entry
2772; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
2773; CHECK-NEXT:    vmslt.vx v8, v8, a0
2774; CHECK-NEXT:    vmandn.mm v0, v0, v8
2775; CHECK-NEXT:    ret
2776entry:
2777  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
2778    <vscale x 1 x i1> %0,
2779    <vscale x 1 x i32> %1,
2780    i32 %2,
2781    <vscale x 1 x i1> %0,
2782    iXLen %3)
2783
2784  ret <vscale x 1 x i1> %a
2785}
2786
2787define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, iXLen %3) nounwind {
2788; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32:
2789; CHECK:       # %bb.0: # %entry
2790; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
2791; CHECK-NEXT:    vmslt.vx v8, v8, a0
2792; CHECK-NEXT:    vmandn.mm v0, v0, v8
2793; CHECK-NEXT:    ret
2794entry:
2795  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
2796    <vscale x 2 x i1> %0,
2797    <vscale x 2 x i32> %1,
2798    i32 %2,
2799    <vscale x 2 x i1> %0,
2800    iXLen %3)
2801
2802  ret <vscale x 2 x i1> %a
2803}
2804
2805define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, iXLen %3) nounwind {
2806; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32:
2807; CHECK:       # %bb.0: # %entry
2808; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
2809; CHECK-NEXT:    vmslt.vx v10, v8, a0
2810; CHECK-NEXT:    vmandn.mm v0, v0, v10
2811; CHECK-NEXT:    ret
2812entry:
2813  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
2814    <vscale x 4 x i1> %0,
2815    <vscale x 4 x i32> %1,
2816    i32 %2,
2817    <vscale x 4 x i1> %0,
2818    iXLen %3)
2819
2820  ret <vscale x 4 x i1> %a
2821}
2822
2823define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, iXLen %3) nounwind {
2824; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32:
2825; CHECK:       # %bb.0: # %entry
2826; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
2827; CHECK-NEXT:    vmslt.vx v12, v8, a0
2828; CHECK-NEXT:    vmandn.mm v0, v0, v12
2829; CHECK-NEXT:    ret
2830entry:
2831  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
2832    <vscale x 8 x i1> %0,
2833    <vscale x 8 x i32> %1,
2834    i32 %2,
2835    <vscale x 8 x i1> %0,
2836    iXLen %3)
2837
2838  ret <vscale x 8 x i1> %a
2839}
2840
2841define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
2842; RV32-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64:
2843; RV32:       # %bb.0: # %entry
2844; RV32-NEXT:    addi sp, sp, -16
2845; RV32-NEXT:    sw a0, 8(sp)
2846; RV32-NEXT:    sw a1, 12(sp)
2847; RV32-NEXT:    addi a0, sp, 8
2848; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
2849; RV32-NEXT:    vlse64.v v9, (a0), zero
2850; RV32-NEXT:    vmsle.vv v0, v9, v8, v0.t
2851; RV32-NEXT:    addi sp, sp, 16
2852; RV32-NEXT:    ret
2853;
2854; RV64-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64:
2855; RV64:       # %bb.0: # %entry
2856; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
2857; RV64-NEXT:    vmslt.vx v8, v8, a0
2858; RV64-NEXT:    vmandn.mm v0, v0, v8
2859; RV64-NEXT:    ret
2860entry:
2861  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
2862    <vscale x 1 x i1> %0,
2863    <vscale x 1 x i64> %1,
2864    i64 %2,
2865    <vscale x 1 x i1> %0,
2866    iXLen %3)
2867
2868  ret <vscale x 1 x i1> %a
2869}
2870
2871define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, iXLen %3) nounwind {
2872; RV32-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64:
2873; RV32:       # %bb.0: # %entry
2874; RV32-NEXT:    addi sp, sp, -16
2875; RV32-NEXT:    sw a0, 8(sp)
2876; RV32-NEXT:    sw a1, 12(sp)
2877; RV32-NEXT:    addi a0, sp, 8
2878; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
2879; RV32-NEXT:    vlse64.v v12, (a0), zero
2880; RV32-NEXT:    vmv1r.v v10, v0
2881; RV32-NEXT:    vmsle.vv v10, v12, v8, v0.t
2882; RV32-NEXT:    vmv1r.v v0, v10
2883; RV32-NEXT:    addi sp, sp, 16
2884; RV32-NEXT:    ret
2885;
2886; RV64-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64:
2887; RV64:       # %bb.0: # %entry
2888; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
2889; RV64-NEXT:    vmslt.vx v10, v8, a0
2890; RV64-NEXT:    vmandn.mm v0, v0, v10
2891; RV64-NEXT:    ret
2892entry:
2893  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
2894    <vscale x 2 x i1> %0,
2895    <vscale x 2 x i64> %1,
2896    i64 %2,
2897    <vscale x 2 x i1> %0,
2898    iXLen %3)
2899
2900  ret <vscale x 2 x i1> %a
2901}
2902
2903define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, iXLen %3) nounwind {
2904; RV32-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64:
2905; RV32:       # %bb.0: # %entry
2906; RV32-NEXT:    addi sp, sp, -16
2907; RV32-NEXT:    sw a0, 8(sp)
2908; RV32-NEXT:    sw a1, 12(sp)
2909; RV32-NEXT:    addi a0, sp, 8
2910; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
2911; RV32-NEXT:    vlse64.v v16, (a0), zero
2912; RV32-NEXT:    vmv1r.v v12, v0
2913; RV32-NEXT:    vmsle.vv v12, v16, v8, v0.t
2914; RV32-NEXT:    vmv1r.v v0, v12
2915; RV32-NEXT:    addi sp, sp, 16
2916; RV32-NEXT:    ret
2917;
2918; RV64-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64:
2919; RV64:       # %bb.0: # %entry
2920; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
2921; RV64-NEXT:    vmslt.vx v12, v8, a0
2922; RV64-NEXT:    vmandn.mm v0, v0, v12
2923; RV64-NEXT:    ret
2924entry:
2925  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
2926    <vscale x 4 x i1> %0,
2927    <vscale x 4 x i64> %1,
2928    i64 %2,
2929    <vscale x 4 x i1> %0,
2930    iXLen %3)
2931
2932  ret <vscale x 4 x i1> %a
2933}
2934