xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll (revision 3b3394baec18d77e8d5b984882c82f7b3a59f981)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
6
7declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
8  <vscale x 1 x i8>,
9  <vscale x 1 x i8>,
10  iXLen);
11
12define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
13; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i8_nxv1i8:
14; CHECK:       # %bb.0: # %entry
15; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
16; CHECK-NEXT:    vmsleu.vv v0, v9, v8
17; CHECK-NEXT:    ret
18entry:
19  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
20    <vscale x 1 x i8> %0,
21    <vscale x 1 x i8> %1,
22    iXLen %2)
23
24  ret <vscale x 1 x i1> %a
25}
26
27declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8(
28  <vscale x 1 x i1>,
29  <vscale x 1 x i8>,
30  <vscale x 1 x i8>,
31  <vscale x 1 x i1>,
32  iXLen);
33
34define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
35; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8:
36; CHECK:       # %bb.0: # %entry
37; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
38; CHECK-NEXT:    vmv1r.v v11, v0
39; CHECK-NEXT:    vmsleu.vv v0, v9, v8
40; CHECK-NEXT:    vmsleu.vv v11, v10, v9, v0.t
41; CHECK-NEXT:    vmv1r.v v0, v11
42; CHECK-NEXT:    ret
43entry:
44  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8(
45    <vscale x 1 x i8> %1,
46    <vscale x 1 x i8> %2,
47    iXLen %4)
48  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8(
49    <vscale x 1 x i1> %0,
50    <vscale x 1 x i8> %2,
51    <vscale x 1 x i8> %3,
52    <vscale x 1 x i1> %mask,
53    iXLen %4)
54
55  ret <vscale x 1 x i1> %a
56}
57
58declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
59  <vscale x 2 x i8>,
60  <vscale x 2 x i8>,
61  iXLen);
62
63define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
64; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i8_nxv2i8:
65; CHECK:       # %bb.0: # %entry
66; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
67; CHECK-NEXT:    vmsleu.vv v0, v9, v8
68; CHECK-NEXT:    ret
69entry:
70  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
71    <vscale x 2 x i8> %0,
72    <vscale x 2 x i8> %1,
73    iXLen %2)
74
75  ret <vscale x 2 x i1> %a
76}
77
78declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8(
79  <vscale x 2 x i1>,
80  <vscale x 2 x i8>,
81  <vscale x 2 x i8>,
82  <vscale x 2 x i1>,
83  iXLen);
84
85define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
86; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8:
87; CHECK:       # %bb.0: # %entry
88; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
89; CHECK-NEXT:    vmv1r.v v11, v0
90; CHECK-NEXT:    vmsleu.vv v0, v9, v8
91; CHECK-NEXT:    vmsleu.vv v11, v10, v9, v0.t
92; CHECK-NEXT:    vmv1r.v v0, v11
93; CHECK-NEXT:    ret
94entry:
95  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8(
96    <vscale x 2 x i8> %1,
97    <vscale x 2 x i8> %2,
98    iXLen %4)
99  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8(
100    <vscale x 2 x i1> %0,
101    <vscale x 2 x i8> %2,
102    <vscale x 2 x i8> %3,
103    <vscale x 2 x i1> %mask,
104    iXLen %4)
105
106  ret <vscale x 2 x i1> %a
107}
108
109declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
110  <vscale x 4 x i8>,
111  <vscale x 4 x i8>,
112  iXLen);
113
114define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
115; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i8_nxv4i8:
116; CHECK:       # %bb.0: # %entry
117; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
118; CHECK-NEXT:    vmsleu.vv v0, v9, v8
119; CHECK-NEXT:    ret
120entry:
121  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
122    <vscale x 4 x i8> %0,
123    <vscale x 4 x i8> %1,
124    iXLen %2)
125
126  ret <vscale x 4 x i1> %a
127}
128
129declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8(
130  <vscale x 4 x i1>,
131  <vscale x 4 x i8>,
132  <vscale x 4 x i8>,
133  <vscale x 4 x i1>,
134  iXLen);
135
136define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
137; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8:
138; CHECK:       # %bb.0: # %entry
139; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
140; CHECK-NEXT:    vmv1r.v v11, v0
141; CHECK-NEXT:    vmsleu.vv v0, v9, v8
142; CHECK-NEXT:    vmsleu.vv v11, v10, v9, v0.t
143; CHECK-NEXT:    vmv1r.v v0, v11
144; CHECK-NEXT:    ret
145entry:
146  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8(
147    <vscale x 4 x i8> %1,
148    <vscale x 4 x i8> %2,
149    iXLen %4)
150  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8(
151    <vscale x 4 x i1> %0,
152    <vscale x 4 x i8> %2,
153    <vscale x 4 x i8> %3,
154    <vscale x 4 x i1> %mask,
155    iXLen %4)
156
157  ret <vscale x 4 x i1> %a
158}
159
160declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
161  <vscale x 8 x i8>,
162  <vscale x 8 x i8>,
163  iXLen);
164
165define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
166; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i8_nxv8i8:
167; CHECK:       # %bb.0: # %entry
168; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
169; CHECK-NEXT:    vmsleu.vv v0, v9, v8
170; CHECK-NEXT:    ret
171entry:
172  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
173    <vscale x 8 x i8> %0,
174    <vscale x 8 x i8> %1,
175    iXLen %2)
176
177  ret <vscale x 8 x i1> %a
178}
179
180declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8(
181  <vscale x 8 x i1>,
182  <vscale x 8 x i8>,
183  <vscale x 8 x i8>,
184  <vscale x 8 x i1>,
185  iXLen);
186
187define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
188; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8:
189; CHECK:       # %bb.0: # %entry
190; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
191; CHECK-NEXT:    vmv1r.v v11, v0
192; CHECK-NEXT:    vmsleu.vv v0, v9, v8
193; CHECK-NEXT:    vmsleu.vv v11, v10, v9, v0.t
194; CHECK-NEXT:    vmv.v.v v0, v11
195; CHECK-NEXT:    ret
196entry:
197  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8(
198    <vscale x 8 x i8> %1,
199    <vscale x 8 x i8> %2,
200    iXLen %4)
201  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8(
202    <vscale x 8 x i1> %0,
203    <vscale x 8 x i8> %2,
204    <vscale x 8 x i8> %3,
205    <vscale x 8 x i1> %mask,
206    iXLen %4)
207
208  ret <vscale x 8 x i1> %a
209}
210
211declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
212  <vscale x 16 x i8>,
213  <vscale x 16 x i8>,
214  iXLen);
215
216define <vscale x 16 x i1> @intrinsic_vmsgeu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
217; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i8_nxv16i8:
218; CHECK:       # %bb.0: # %entry
219; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
220; CHECK-NEXT:    vmsleu.vv v0, v10, v8
221; CHECK-NEXT:    ret
222entry:
223  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
224    <vscale x 16 x i8> %0,
225    <vscale x 16 x i8> %1,
226    iXLen %2)
227
228  ret <vscale x 16 x i1> %a
229}
230
231declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8(
232  <vscale x 16 x i1>,
233  <vscale x 16 x i8>,
234  <vscale x 16 x i8>,
235  <vscale x 16 x i1>,
236  iXLen);
237
238define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
239; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8:
240; CHECK:       # %bb.0: # %entry
241; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
242; CHECK-NEXT:    vmv1r.v v14, v0
243; CHECK-NEXT:    vmsleu.vv v0, v10, v8
244; CHECK-NEXT:    vmsleu.vv v14, v12, v10, v0.t
245; CHECK-NEXT:    vmv1r.v v0, v14
246; CHECK-NEXT:    ret
247entry:
248  %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
249    <vscale x 16 x i8> %1,
250    <vscale x 16 x i8> %2,
251    iXLen %4)
252  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8(
253    <vscale x 16 x i1> %0,
254    <vscale x 16 x i8> %2,
255    <vscale x 16 x i8> %3,
256    <vscale x 16 x i1> %mask,
257    iXLen %4)
258
259  ret <vscale x 16 x i1> %a
260}
261
262declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
263  <vscale x 32 x i8>,
264  <vscale x 32 x i8>,
265  iXLen);
266
267define <vscale x 32 x i1> @intrinsic_vmsgeu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
268; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv32i8_nxv32i8:
269; CHECK:       # %bb.0: # %entry
270; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
271; CHECK-NEXT:    vmsleu.vv v0, v12, v8
272; CHECK-NEXT:    ret
273entry:
274  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
275    <vscale x 32 x i8> %0,
276    <vscale x 32 x i8> %1,
277    iXLen %2)
278
279  ret <vscale x 32 x i1> %a
280}
281
282declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8(
283  <vscale x 32 x i1>,
284  <vscale x 32 x i8>,
285  <vscale x 32 x i8>,
286  <vscale x 32 x i1>,
287  iXLen);
288
289define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
290; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8:
291; CHECK:       # %bb.0: # %entry
292; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
293; CHECK-NEXT:    vmv1r.v v20, v0
294; CHECK-NEXT:    vmsleu.vv v0, v12, v8
295; CHECK-NEXT:    vmsleu.vv v20, v16, v12, v0.t
296; CHECK-NEXT:    vmv1r.v v0, v20
297; CHECK-NEXT:    ret
298entry:
299  %mask = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
300    <vscale x 32 x i8> %1,
301    <vscale x 32 x i8> %2,
302    iXLen %4)
303  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8(
304    <vscale x 32 x i1> %0,
305    <vscale x 32 x i8> %2,
306    <vscale x 32 x i8> %3,
307    <vscale x 32 x i1> %mask,
308    iXLen %4)
309
310  ret <vscale x 32 x i1> %a
311}
312
313declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
314  <vscale x 1 x i16>,
315  <vscale x 1 x i16>,
316  iXLen);
317
318define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
319; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i16_nxv1i16:
320; CHECK:       # %bb.0: # %entry
321; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
322; CHECK-NEXT:    vmsleu.vv v0, v9, v8
323; CHECK-NEXT:    ret
324entry:
325  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
326    <vscale x 1 x i16> %0,
327    <vscale x 1 x i16> %1,
328    iXLen %2)
329
330  ret <vscale x 1 x i1> %a
331}
332
333declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16(
334  <vscale x 1 x i1>,
335  <vscale x 1 x i16>,
336  <vscale x 1 x i16>,
337  <vscale x 1 x i1>,
338  iXLen);
339
340define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
341; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16:
342; CHECK:       # %bb.0: # %entry
343; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
344; CHECK-NEXT:    vmv1r.v v11, v0
345; CHECK-NEXT:    vmsleu.vv v0, v9, v8
346; CHECK-NEXT:    vmsleu.vv v11, v10, v9, v0.t
347; CHECK-NEXT:    vmv1r.v v0, v11
348; CHECK-NEXT:    ret
349entry:
350  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16(
351    <vscale x 1 x i16> %1,
352    <vscale x 1 x i16> %2,
353    iXLen %4)
354  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16(
355    <vscale x 1 x i1> %0,
356    <vscale x 1 x i16> %2,
357    <vscale x 1 x i16> %3,
358    <vscale x 1 x i1> %mask,
359    iXLen %4)
360
361  ret <vscale x 1 x i1> %a
362}
363
364declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
365  <vscale x 2 x i16>,
366  <vscale x 2 x i16>,
367  iXLen);
368
369define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
370; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i16_nxv2i16:
371; CHECK:       # %bb.0: # %entry
372; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
373; CHECK-NEXT:    vmsleu.vv v0, v9, v8
374; CHECK-NEXT:    ret
375entry:
376  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
377    <vscale x 2 x i16> %0,
378    <vscale x 2 x i16> %1,
379    iXLen %2)
380
381  ret <vscale x 2 x i1> %a
382}
383
384declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16(
385  <vscale x 2 x i1>,
386  <vscale x 2 x i16>,
387  <vscale x 2 x i16>,
388  <vscale x 2 x i1>,
389  iXLen);
390
391define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
392; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16:
393; CHECK:       # %bb.0: # %entry
394; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
395; CHECK-NEXT:    vmv1r.v v11, v0
396; CHECK-NEXT:    vmsleu.vv v0, v9, v8
397; CHECK-NEXT:    vmsleu.vv v11, v10, v9, v0.t
398; CHECK-NEXT:    vmv1r.v v0, v11
399; CHECK-NEXT:    ret
400entry:
401  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16(
402    <vscale x 2 x i16> %1,
403    <vscale x 2 x i16> %2,
404    iXLen %4)
405  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16(
406    <vscale x 2 x i1> %0,
407    <vscale x 2 x i16> %2,
408    <vscale x 2 x i16> %3,
409    <vscale x 2 x i1> %mask,
410    iXLen %4)
411
412  ret <vscale x 2 x i1> %a
413}
414
415declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
416  <vscale x 4 x i16>,
417  <vscale x 4 x i16>,
418  iXLen);
419
420define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
421; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i16_nxv4i16:
422; CHECK:       # %bb.0: # %entry
423; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
424; CHECK-NEXT:    vmsleu.vv v0, v9, v8
425; CHECK-NEXT:    ret
426entry:
427  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
428    <vscale x 4 x i16> %0,
429    <vscale x 4 x i16> %1,
430    iXLen %2)
431
432  ret <vscale x 4 x i1> %a
433}
434
435declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16(
436  <vscale x 4 x i1>,
437  <vscale x 4 x i16>,
438  <vscale x 4 x i16>,
439  <vscale x 4 x i1>,
440  iXLen);
441
442define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
443; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16:
444; CHECK:       # %bb.0: # %entry
445; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
446; CHECK-NEXT:    vmv1r.v v11, v0
447; CHECK-NEXT:    vmsleu.vv v0, v9, v8
448; CHECK-NEXT:    vmsleu.vv v11, v10, v9, v0.t
449; CHECK-NEXT:    vmv.v.v v0, v11
450; CHECK-NEXT:    ret
451entry:
452  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16(
453    <vscale x 4 x i16> %1,
454    <vscale x 4 x i16> %2,
455    iXLen %4)
456  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16(
457    <vscale x 4 x i1> %0,
458    <vscale x 4 x i16> %2,
459    <vscale x 4 x i16> %3,
460    <vscale x 4 x i1> %mask,
461    iXLen %4)
462
463  ret <vscale x 4 x i1> %a
464}
465
466declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
467  <vscale x 8 x i16>,
468  <vscale x 8 x i16>,
469  iXLen);
470
471define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
472; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i16_nxv8i16:
473; CHECK:       # %bb.0: # %entry
474; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
475; CHECK-NEXT:    vmsleu.vv v0, v10, v8
476; CHECK-NEXT:    ret
477entry:
478  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
479    <vscale x 8 x i16> %0,
480    <vscale x 8 x i16> %1,
481    iXLen %2)
482
483  ret <vscale x 8 x i1> %a
484}
485
486declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16(
487  <vscale x 8 x i1>,
488  <vscale x 8 x i16>,
489  <vscale x 8 x i16>,
490  <vscale x 8 x i1>,
491  iXLen);
492
493define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
494; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16:
495; CHECK:       # %bb.0: # %entry
496; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
497; CHECK-NEXT:    vmv1r.v v14, v0
498; CHECK-NEXT:    vmsleu.vv v0, v10, v8
499; CHECK-NEXT:    vmsleu.vv v14, v12, v10, v0.t
500; CHECK-NEXT:    vmv1r.v v0, v14
501; CHECK-NEXT:    ret
502entry:
503  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
504    <vscale x 8 x i16> %1,
505    <vscale x 8 x i16> %2,
506    iXLen %4)
507  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16(
508    <vscale x 8 x i1> %0,
509    <vscale x 8 x i16> %2,
510    <vscale x 8 x i16> %3,
511    <vscale x 8 x i1> %mask,
512    iXLen %4)
513
514  ret <vscale x 8 x i1> %a
515}
516
517declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
518  <vscale x 16 x i16>,
519  <vscale x 16 x i16>,
520  iXLen);
521
522define <vscale x 16 x i1> @intrinsic_vmsgeu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
523; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i16_nxv16i16:
524; CHECK:       # %bb.0: # %entry
525; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
526; CHECK-NEXT:    vmsleu.vv v0, v12, v8
527; CHECK-NEXT:    ret
528entry:
529  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
530    <vscale x 16 x i16> %0,
531    <vscale x 16 x i16> %1,
532    iXLen %2)
533
534  ret <vscale x 16 x i1> %a
535}
536
537declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16(
538  <vscale x 16 x i1>,
539  <vscale x 16 x i16>,
540  <vscale x 16 x i16>,
541  <vscale x 16 x i1>,
542  iXLen);
543
544define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
545; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16:
546; CHECK:       # %bb.0: # %entry
547; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
548; CHECK-NEXT:    vmv1r.v v20, v0
549; CHECK-NEXT:    vmsleu.vv v0, v12, v8
550; CHECK-NEXT:    vmsleu.vv v20, v16, v12, v0.t
551; CHECK-NEXT:    vmv1r.v v0, v20
552; CHECK-NEXT:    ret
553entry:
554  %mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
555    <vscale x 16 x i16> %1,
556    <vscale x 16 x i16> %2,
557    iXLen %4)
558  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16(
559    <vscale x 16 x i1> %0,
560    <vscale x 16 x i16> %2,
561    <vscale x 16 x i16> %3,
562    <vscale x 16 x i1> %mask,
563    iXLen %4)
564
565  ret <vscale x 16 x i1> %a
566}
567
568declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
569  <vscale x 1 x i32>,
570  <vscale x 1 x i32>,
571  iXLen);
572
573define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
574; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i32_nxv1i32:
575; CHECK:       # %bb.0: # %entry
576; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
577; CHECK-NEXT:    vmsleu.vv v0, v9, v8
578; CHECK-NEXT:    ret
579entry:
580  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
581    <vscale x 1 x i32> %0,
582    <vscale x 1 x i32> %1,
583    iXLen %2)
584
585  ret <vscale x 1 x i1> %a
586}
587
588declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32(
589  <vscale x 1 x i1>,
590  <vscale x 1 x i32>,
591  <vscale x 1 x i32>,
592  <vscale x 1 x i1>,
593  iXLen);
594
595define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
596; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32:
597; CHECK:       # %bb.0: # %entry
598; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
599; CHECK-NEXT:    vmv1r.v v11, v0
600; CHECK-NEXT:    vmsleu.vv v0, v9, v8
601; CHECK-NEXT:    vmsleu.vv v11, v10, v9, v0.t
602; CHECK-NEXT:    vmv1r.v v0, v11
603; CHECK-NEXT:    ret
604entry:
605  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32(
606    <vscale x 1 x i32> %1,
607    <vscale x 1 x i32> %2,
608    iXLen %4)
609  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32(
610    <vscale x 1 x i1> %0,
611    <vscale x 1 x i32> %2,
612    <vscale x 1 x i32> %3,
613    <vscale x 1 x i1> %mask,
614    iXLen %4)
615
616  ret <vscale x 1 x i1> %a
617}
618
619declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
620  <vscale x 2 x i32>,
621  <vscale x 2 x i32>,
622  iXLen);
623
624define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
625; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i32_nxv2i32:
626; CHECK:       # %bb.0: # %entry
627; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
628; CHECK-NEXT:    vmsleu.vv v0, v9, v8
629; CHECK-NEXT:    ret
630entry:
631  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
632    <vscale x 2 x i32> %0,
633    <vscale x 2 x i32> %1,
634    iXLen %2)
635
636  ret <vscale x 2 x i1> %a
637}
638
639declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32(
640  <vscale x 2 x i1>,
641  <vscale x 2 x i32>,
642  <vscale x 2 x i32>,
643  <vscale x 2 x i1>,
644  iXLen);
645
646define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
647; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32:
648; CHECK:       # %bb.0: # %entry
649; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
650; CHECK-NEXT:    vmv1r.v v11, v0
651; CHECK-NEXT:    vmsleu.vv v0, v9, v8
652; CHECK-NEXT:    vmsleu.vv v11, v10, v9, v0.t
653; CHECK-NEXT:    vmv.v.v v0, v11
654; CHECK-NEXT:    ret
655entry:
656  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32(
657    <vscale x 2 x i32> %1,
658    <vscale x 2 x i32> %2,
659    iXLen %4)
660  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32(
661    <vscale x 2 x i1> %0,
662    <vscale x 2 x i32> %2,
663    <vscale x 2 x i32> %3,
664    <vscale x 2 x i1> %mask,
665    iXLen %4)
666
667  ret <vscale x 2 x i1> %a
668}
669
670declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
671  <vscale x 4 x i32>,
672  <vscale x 4 x i32>,
673  iXLen);
674
675define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
676; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i32_nxv4i32:
677; CHECK:       # %bb.0: # %entry
678; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
679; CHECK-NEXT:    vmsleu.vv v0, v10, v8
680; CHECK-NEXT:    ret
681entry:
682  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
683    <vscale x 4 x i32> %0,
684    <vscale x 4 x i32> %1,
685    iXLen %2)
686
687  ret <vscale x 4 x i1> %a
688}
689
690declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32(
691  <vscale x 4 x i1>,
692  <vscale x 4 x i32>,
693  <vscale x 4 x i32>,
694  <vscale x 4 x i1>,
695  iXLen);
696
697define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
698; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32:
699; CHECK:       # %bb.0: # %entry
700; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
701; CHECK-NEXT:    vmv1r.v v14, v0
702; CHECK-NEXT:    vmsleu.vv v0, v10, v8
703; CHECK-NEXT:    vmsleu.vv v14, v12, v10, v0.t
704; CHECK-NEXT:    vmv1r.v v0, v14
705; CHECK-NEXT:    ret
706entry:
707  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
708    <vscale x 4 x i32> %1,
709    <vscale x 4 x i32> %2,
710    iXLen %4)
711  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32(
712    <vscale x 4 x i1> %0,
713    <vscale x 4 x i32> %2,
714    <vscale x 4 x i32> %3,
715    <vscale x 4 x i1> %mask,
716    iXLen %4)
717
718  ret <vscale x 4 x i1> %a
719}
720
721declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
722  <vscale x 8 x i32>,
723  <vscale x 8 x i32>,
724  iXLen);
725
726define <vscale x 8 x i1> @intrinsic_vmsgeu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
727; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i32_nxv8i32:
728; CHECK:       # %bb.0: # %entry
729; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
730; CHECK-NEXT:    vmsleu.vv v0, v12, v8
731; CHECK-NEXT:    ret
732entry:
733  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
734    <vscale x 8 x i32> %0,
735    <vscale x 8 x i32> %1,
736    iXLen %2)
737
738  ret <vscale x 8 x i1> %a
739}
740
741declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32(
742  <vscale x 8 x i1>,
743  <vscale x 8 x i32>,
744  <vscale x 8 x i32>,
745  <vscale x 8 x i1>,
746  iXLen);
747
748define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
749; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32:
750; CHECK:       # %bb.0: # %entry
751; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
752; CHECK-NEXT:    vmv1r.v v20, v0
753; CHECK-NEXT:    vmsleu.vv v0, v12, v8
754; CHECK-NEXT:    vmsleu.vv v20, v16, v12, v0.t
755; CHECK-NEXT:    vmv1r.v v0, v20
756; CHECK-NEXT:    ret
757entry:
758  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
759    <vscale x 8 x i32> %1,
760    <vscale x 8 x i32> %2,
761    iXLen %4)
762  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32(
763    <vscale x 8 x i1> %0,
764    <vscale x 8 x i32> %2,
765    <vscale x 8 x i32> %3,
766    <vscale x 8 x i1> %mask,
767    iXLen %4)
768
769  ret <vscale x 8 x i1> %a
770}
771
772declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
773  <vscale x 1 x i64>,
774  <vscale x 1 x i64>,
775  iXLen);
776
777define <vscale x 1 x i1> @intrinsic_vmsgeu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
778; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i64_nxv1i64:
779; CHECK:       # %bb.0: # %entry
780; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
781; CHECK-NEXT:    vmsleu.vv v0, v9, v8
782; CHECK-NEXT:    ret
783entry:
784  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
785    <vscale x 1 x i64> %0,
786    <vscale x 1 x i64> %1,
787    iXLen %2)
788
789  ret <vscale x 1 x i1> %a
790}
791
792declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64(
793  <vscale x 1 x i1>,
794  <vscale x 1 x i64>,
795  <vscale x 1 x i64>,
796  <vscale x 1 x i1>,
797  iXLen);
798
799define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
800; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64:
801; CHECK:       # %bb.0: # %entry
802; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
803; CHECK-NEXT:    vmv1r.v v11, v0
804; CHECK-NEXT:    vmsleu.vv v0, v9, v8
805; CHECK-NEXT:    vmsleu.vv v11, v10, v9, v0.t
806; CHECK-NEXT:    vmv.v.v v0, v11
807; CHECK-NEXT:    ret
808entry:
809  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64(
810    <vscale x 1 x i64> %1,
811    <vscale x 1 x i64> %2,
812    iXLen %4)
813  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64(
814    <vscale x 1 x i1> %0,
815    <vscale x 1 x i64> %2,
816    <vscale x 1 x i64> %3,
817    <vscale x 1 x i1> %mask,
818    iXLen %4)
819
820  ret <vscale x 1 x i1> %a
821}
822
823declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
824  <vscale x 2 x i64>,
825  <vscale x 2 x i64>,
826  iXLen);
827
828define <vscale x 2 x i1> @intrinsic_vmsgeu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
829; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i64_nxv2i64:
830; CHECK:       # %bb.0: # %entry
831; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
832; CHECK-NEXT:    vmsleu.vv v0, v10, v8
833; CHECK-NEXT:    ret
834entry:
835  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
836    <vscale x 2 x i64> %0,
837    <vscale x 2 x i64> %1,
838    iXLen %2)
839
840  ret <vscale x 2 x i1> %a
841}
842
843declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64(
844  <vscale x 2 x i1>,
845  <vscale x 2 x i64>,
846  <vscale x 2 x i64>,
847  <vscale x 2 x i1>,
848  iXLen);
849
850define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
851; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64:
852; CHECK:       # %bb.0: # %entry
853; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
854; CHECK-NEXT:    vmv1r.v v14, v0
855; CHECK-NEXT:    vmsleu.vv v0, v10, v8
856; CHECK-NEXT:    vmsleu.vv v14, v12, v10, v0.t
857; CHECK-NEXT:    vmv1r.v v0, v14
858; CHECK-NEXT:    ret
859entry:
860  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
861    <vscale x 2 x i64> %1,
862    <vscale x 2 x i64> %2,
863    iXLen %4)
864  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64(
865    <vscale x 2 x i1> %0,
866    <vscale x 2 x i64> %2,
867    <vscale x 2 x i64> %3,
868    <vscale x 2 x i1> %mask,
869    iXLen %4)
870
871  ret <vscale x 2 x i1> %a
872}
873
874declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
875  <vscale x 4 x i64>,
876  <vscale x 4 x i64>,
877  iXLen);
878
879define <vscale x 4 x i1> @intrinsic_vmsgeu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
880; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i64_nxv4i64:
881; CHECK:       # %bb.0: # %entry
882; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
883; CHECK-NEXT:    vmsleu.vv v0, v12, v8
884; CHECK-NEXT:    ret
885entry:
886  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
887    <vscale x 4 x i64> %0,
888    <vscale x 4 x i64> %1,
889    iXLen %2)
890
891  ret <vscale x 4 x i1> %a
892}
893
894declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64(
895  <vscale x 4 x i1>,
896  <vscale x 4 x i64>,
897  <vscale x 4 x i64>,
898  <vscale x 4 x i1>,
899  iXLen);
900
901define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
902; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64:
903; CHECK:       # %bb.0: # %entry
904; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
905; CHECK-NEXT:    vmv1r.v v20, v0
906; CHECK-NEXT:    vmsleu.vv v0, v12, v8
907; CHECK-NEXT:    vmsleu.vv v20, v16, v12, v0.t
908; CHECK-NEXT:    vmv1r.v v0, v20
909; CHECK-NEXT:    ret
910entry:
911  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
912    <vscale x 4 x i64> %1,
913    <vscale x 4 x i64> %2,
914    iXLen %4)
915  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64(
916    <vscale x 4 x i1> %0,
917    <vscale x 4 x i64> %2,
918    <vscale x 4 x i64> %3,
919    <vscale x 4 x i1> %mask,
920    iXLen %4)
921
922  ret <vscale x 4 x i1> %a
923}
924
925declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
926  <vscale x 1 x i8>,
927  i8,
928  iXLen);
929
930define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
931; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i8_i8:
932; CHECK:       # %bb.0: # %entry
933; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
934; CHECK-NEXT:    vmsltu.vx v8, v8, a0
935; CHECK-NEXT:    vmnot.m v0, v8
936; CHECK-NEXT:    ret
937entry:
938  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
939    <vscale x 1 x i8> %0,
940    i8 %1,
941    iXLen %2)
942
943  ret <vscale x 1 x i1> %a
944}
945
946declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
947  <vscale x 1 x i1>,
948  <vscale x 1 x i8>,
949  i8,
950  <vscale x 1 x i1>,
951  iXLen);
952
953define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
954; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8:
955; CHECK:       # %bb.0: # %entry
956; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
957; CHECK-NEXT:    vmv1r.v v10, v0
958; CHECK-NEXT:    vmv1r.v v0, v9
959; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
960; CHECK-NEXT:    vmxor.mm v0, v10, v9
961; CHECK-NEXT:    ret
962entry:
963  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
964    <vscale x 1 x i1> %0,
965    <vscale x 1 x i8> %1,
966    i8 %2,
967    <vscale x 1 x i1> %3,
968    iXLen %4)
969
970  ret <vscale x 1 x i1> %a
971}
972
973declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
974  <vscale x 2 x i8>,
975  i8,
976  iXLen);
977
978define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
979; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i8_i8:
980; CHECK:       # %bb.0: # %entry
981; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
982; CHECK-NEXT:    vmsltu.vx v8, v8, a0
983; CHECK-NEXT:    vmnot.m v0, v8
984; CHECK-NEXT:    ret
985entry:
986  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
987    <vscale x 2 x i8> %0,
988    i8 %1,
989    iXLen %2)
990
991  ret <vscale x 2 x i1> %a
992}
993
994declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
995  <vscale x 2 x i1>,
996  <vscale x 2 x i8>,
997  i8,
998  <vscale x 2 x i1>,
999  iXLen);
1000
1001define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1002; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8:
1003; CHECK:       # %bb.0: # %entry
1004; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
1005; CHECK-NEXT:    vmv1r.v v10, v0
1006; CHECK-NEXT:    vmv1r.v v0, v9
1007; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
1008; CHECK-NEXT:    vmxor.mm v0, v10, v9
1009; CHECK-NEXT:    ret
1010entry:
1011  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
1012    <vscale x 2 x i1> %0,
1013    <vscale x 2 x i8> %1,
1014    i8 %2,
1015    <vscale x 2 x i1> %3,
1016    iXLen %4)
1017
1018  ret <vscale x 2 x i1> %a
1019}
1020
1021declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
1022  <vscale x 4 x i8>,
1023  i8,
1024  iXLen);
1025
1026define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
1027; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i8_i8:
1028; CHECK:       # %bb.0: # %entry
1029; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
1030; CHECK-NEXT:    vmsltu.vx v8, v8, a0
1031; CHECK-NEXT:    vmnot.m v0, v8
1032; CHECK-NEXT:    ret
1033entry:
1034  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
1035    <vscale x 4 x i8> %0,
1036    i8 %1,
1037    iXLen %2)
1038
1039  ret <vscale x 4 x i1> %a
1040}
1041
1042declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
1043  <vscale x 4 x i1>,
1044  <vscale x 4 x i8>,
1045  i8,
1046  <vscale x 4 x i1>,
1047  iXLen);
1048
1049define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1050; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8:
1051; CHECK:       # %bb.0: # %entry
1052; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
1053; CHECK-NEXT:    vmv1r.v v10, v0
1054; CHECK-NEXT:    vmv1r.v v0, v9
1055; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
1056; CHECK-NEXT:    vmxor.mm v0, v10, v9
1057; CHECK-NEXT:    ret
1058entry:
1059  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
1060    <vscale x 4 x i1> %0,
1061    <vscale x 4 x i8> %1,
1062    i8 %2,
1063    <vscale x 4 x i1> %3,
1064    iXLen %4)
1065
1066  ret <vscale x 4 x i1> %a
1067}
1068
1069declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
1070  <vscale x 8 x i8>,
1071  i8,
1072  iXLen);
1073
1074define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
1075; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i8_i8:
1076; CHECK:       # %bb.0: # %entry
1077; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
1078; CHECK-NEXT:    vmsltu.vx v8, v8, a0
1079; CHECK-NEXT:    vmnot.m v0, v8
1080; CHECK-NEXT:    ret
1081entry:
1082  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
1083    <vscale x 8 x i8> %0,
1084    i8 %1,
1085    iXLen %2)
1086
1087  ret <vscale x 8 x i1> %a
1088}
1089
1090declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
1091  <vscale x 8 x i1>,
1092  <vscale x 8 x i8>,
1093  i8,
1094  <vscale x 8 x i1>,
1095  iXLen);
1096
1097define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1098; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8:
1099; CHECK:       # %bb.0: # %entry
1100; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
1101; CHECK-NEXT:    vmv1r.v v10, v0
1102; CHECK-NEXT:    vmv1r.v v0, v9
1103; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
1104; CHECK-NEXT:    vmxor.mm v0, v10, v9
1105; CHECK-NEXT:    ret
1106entry:
1107  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
1108    <vscale x 8 x i1> %0,
1109    <vscale x 8 x i8> %1,
1110    i8 %2,
1111    <vscale x 8 x i1> %3,
1112    iXLen %4)
1113
1114  ret <vscale x 8 x i1> %a
1115}
1116
1117declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
1118  <vscale x 16 x i8>,
1119  i8,
1120  iXLen);
1121
1122define <vscale x 16 x i1> @intrinsic_vmsgeu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
1123; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i8_i8:
1124; CHECK:       # %bb.0: # %entry
1125; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
1126; CHECK-NEXT:    vmsltu.vx v10, v8, a0
1127; CHECK-NEXT:    vmnot.m v0, v10
1128; CHECK-NEXT:    ret
1129entry:
1130  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
1131    <vscale x 16 x i8> %0,
1132    i8 %1,
1133    iXLen %2)
1134
1135  ret <vscale x 16 x i1> %a
1136}
1137
1138declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
1139  <vscale x 16 x i1>,
1140  <vscale x 16 x i8>,
1141  i8,
1142  <vscale x 16 x i1>,
1143  iXLen);
1144
1145define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1146; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8:
1147; CHECK:       # %bb.0: # %entry
1148; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
1149; CHECK-NEXT:    vmv1r.v v11, v0
1150; CHECK-NEXT:    vmv1r.v v0, v10
1151; CHECK-NEXT:    vmsltu.vx v11, v8, a0, v0.t
1152; CHECK-NEXT:    vmxor.mm v0, v11, v10
1153; CHECK-NEXT:    ret
1154entry:
1155  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
1156    <vscale x 16 x i1> %0,
1157    <vscale x 16 x i8> %1,
1158    i8 %2,
1159    <vscale x 16 x i1> %3,
1160    iXLen %4)
1161
1162  ret <vscale x 16 x i1> %a
1163}
1164
1165declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
1166  <vscale x 32 x i8>,
1167  i8,
1168  iXLen);
1169
1170define <vscale x 32 x i1> @intrinsic_vmsgeu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
1171; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv32i8_i8:
1172; CHECK:       # %bb.0: # %entry
1173; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
1174; CHECK-NEXT:    vmsltu.vx v12, v8, a0
1175; CHECK-NEXT:    vmnot.m v0, v12
1176; CHECK-NEXT:    ret
1177entry:
1178  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
1179    <vscale x 32 x i8> %0,
1180    i8 %1,
1181    iXLen %2)
1182
1183  ret <vscale x 32 x i1> %a
1184}
1185
1186declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
1187  <vscale x 32 x i1>,
1188  <vscale x 32 x i8>,
1189  i8,
1190  <vscale x 32 x i1>,
1191  iXLen);
1192
1193define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1194; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8:
1195; CHECK:       # %bb.0: # %entry
1196; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
1197; CHECK-NEXT:    vmv1r.v v13, v0
1198; CHECK-NEXT:    vmv1r.v v0, v12
1199; CHECK-NEXT:    vmsltu.vx v13, v8, a0, v0.t
1200; CHECK-NEXT:    vmxor.mm v0, v13, v12
1201; CHECK-NEXT:    ret
1202entry:
1203  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
1204    <vscale x 32 x i1> %0,
1205    <vscale x 32 x i8> %1,
1206    i8 %2,
1207    <vscale x 32 x i1> %3,
1208    iXLen %4)
1209
1210  ret <vscale x 32 x i1> %a
1211}
1212
1213declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
1214  <vscale x 1 x i16>,
1215  i16,
1216  iXLen);
1217
1218define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
1219; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i16_i16:
1220; CHECK:       # %bb.0: # %entry
1221; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
1222; CHECK-NEXT:    vmsltu.vx v8, v8, a0
1223; CHECK-NEXT:    vmnot.m v0, v8
1224; CHECK-NEXT:    ret
1225entry:
1226  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
1227    <vscale x 1 x i16> %0,
1228    i16 %1,
1229    iXLen %2)
1230
1231  ret <vscale x 1 x i1> %a
1232}
1233
1234declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
1235  <vscale x 1 x i1>,
1236  <vscale x 1 x i16>,
1237  i16,
1238  <vscale x 1 x i1>,
1239  iXLen);
1240
1241define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1242; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16:
1243; CHECK:       # %bb.0: # %entry
1244; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1245; CHECK-NEXT:    vmv1r.v v10, v0
1246; CHECK-NEXT:    vmv1r.v v0, v9
1247; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
1248; CHECK-NEXT:    vmxor.mm v0, v10, v9
1249; CHECK-NEXT:    ret
1250entry:
1251  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
1252    <vscale x 1 x i1> %0,
1253    <vscale x 1 x i16> %1,
1254    i16 %2,
1255    <vscale x 1 x i1> %3,
1256    iXLen %4)
1257
1258  ret <vscale x 1 x i1> %a
1259}
1260
1261declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
1262  <vscale x 2 x i16>,
1263  i16,
1264  iXLen);
1265
1266define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
1267; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i16_i16:
1268; CHECK:       # %bb.0: # %entry
1269; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
1270; CHECK-NEXT:    vmsltu.vx v8, v8, a0
1271; CHECK-NEXT:    vmnot.m v0, v8
1272; CHECK-NEXT:    ret
1273entry:
1274  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
1275    <vscale x 2 x i16> %0,
1276    i16 %1,
1277    iXLen %2)
1278
1279  ret <vscale x 2 x i1> %a
1280}
1281
1282declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
1283  <vscale x 2 x i1>,
1284  <vscale x 2 x i16>,
1285  i16,
1286  <vscale x 2 x i1>,
1287  iXLen);
1288
1289define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1290; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16:
1291; CHECK:       # %bb.0: # %entry
1292; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1293; CHECK-NEXT:    vmv1r.v v10, v0
1294; CHECK-NEXT:    vmv1r.v v0, v9
1295; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
1296; CHECK-NEXT:    vmxor.mm v0, v10, v9
1297; CHECK-NEXT:    ret
1298entry:
1299  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
1300    <vscale x 2 x i1> %0,
1301    <vscale x 2 x i16> %1,
1302    i16 %2,
1303    <vscale x 2 x i1> %3,
1304    iXLen %4)
1305
1306  ret <vscale x 2 x i1> %a
1307}
1308
1309declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
1310  <vscale x 4 x i16>,
1311  i16,
1312  iXLen);
1313
1314define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
1315; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i16_i16:
1316; CHECK:       # %bb.0: # %entry
1317; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
1318; CHECK-NEXT:    vmsltu.vx v8, v8, a0
1319; CHECK-NEXT:    vmnot.m v0, v8
1320; CHECK-NEXT:    ret
1321entry:
1322  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
1323    <vscale x 4 x i16> %0,
1324    i16 %1,
1325    iXLen %2)
1326
1327  ret <vscale x 4 x i1> %a
1328}
1329
1330declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
1331  <vscale x 4 x i1>,
1332  <vscale x 4 x i16>,
1333  i16,
1334  <vscale x 4 x i1>,
1335  iXLen);
1336
1337define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1338; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16:
1339; CHECK:       # %bb.0: # %entry
1340; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1341; CHECK-NEXT:    vmv1r.v v10, v0
1342; CHECK-NEXT:    vmv1r.v v0, v9
1343; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
1344; CHECK-NEXT:    vmxor.mm v0, v10, v9
1345; CHECK-NEXT:    ret
1346entry:
1347  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
1348    <vscale x 4 x i1> %0,
1349    <vscale x 4 x i16> %1,
1350    i16 %2,
1351    <vscale x 4 x i1> %3,
1352    iXLen %4)
1353
1354  ret <vscale x 4 x i1> %a
1355}
1356
1357declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
1358  <vscale x 8 x i16>,
1359  i16,
1360  iXLen);
1361
1362define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
1363; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i16_i16:
1364; CHECK:       # %bb.0: # %entry
1365; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
1366; CHECK-NEXT:    vmsltu.vx v10, v8, a0
1367; CHECK-NEXT:    vmnot.m v0, v10
1368; CHECK-NEXT:    ret
1369entry:
1370  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
1371    <vscale x 8 x i16> %0,
1372    i16 %1,
1373    iXLen %2)
1374
1375  ret <vscale x 8 x i1> %a
1376}
1377
1378declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
1379  <vscale x 8 x i1>,
1380  <vscale x 8 x i16>,
1381  i16,
1382  <vscale x 8 x i1>,
1383  iXLen);
1384
1385define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1386; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16:
1387; CHECK:       # %bb.0: # %entry
1388; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1389; CHECK-NEXT:    vmv1r.v v11, v0
1390; CHECK-NEXT:    vmv1r.v v0, v10
1391; CHECK-NEXT:    vmsltu.vx v11, v8, a0, v0.t
1392; CHECK-NEXT:    vmxor.mm v0, v11, v10
1393; CHECK-NEXT:    ret
1394entry:
1395  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
1396    <vscale x 8 x i1> %0,
1397    <vscale x 8 x i16> %1,
1398    i16 %2,
1399    <vscale x 8 x i1> %3,
1400    iXLen %4)
1401
1402  ret <vscale x 8 x i1> %a
1403}
1404
1405declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
1406  <vscale x 16 x i16>,
1407  i16,
1408  iXLen);
1409
1410define <vscale x 16 x i1> @intrinsic_vmsgeu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
1411; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i16_i16:
1412; CHECK:       # %bb.0: # %entry
1413; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
1414; CHECK-NEXT:    vmsltu.vx v12, v8, a0
1415; CHECK-NEXT:    vmnot.m v0, v12
1416; CHECK-NEXT:    ret
1417entry:
1418  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
1419    <vscale x 16 x i16> %0,
1420    i16 %1,
1421    iXLen %2)
1422
1423  ret <vscale x 16 x i1> %a
1424}
1425
1426declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
1427  <vscale x 16 x i1>,
1428  <vscale x 16 x i16>,
1429  i16,
1430  <vscale x 16 x i1>,
1431  iXLen);
1432
1433define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1434; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16:
1435; CHECK:       # %bb.0: # %entry
1436; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1437; CHECK-NEXT:    vmv1r.v v13, v0
1438; CHECK-NEXT:    vmv1r.v v0, v12
1439; CHECK-NEXT:    vmsltu.vx v13, v8, a0, v0.t
1440; CHECK-NEXT:    vmxor.mm v0, v13, v12
1441; CHECK-NEXT:    ret
1442entry:
1443  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
1444    <vscale x 16 x i1> %0,
1445    <vscale x 16 x i16> %1,
1446    i16 %2,
1447    <vscale x 16 x i1> %3,
1448    iXLen %4)
1449
1450  ret <vscale x 16 x i1> %a
1451}
1452
1453declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
1454  <vscale x 1 x i32>,
1455  i32,
1456  iXLen);
1457
1458define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
1459; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i32_i32:
1460; CHECK:       # %bb.0: # %entry
1461; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
1462; CHECK-NEXT:    vmsltu.vx v8, v8, a0
1463; CHECK-NEXT:    vmnot.m v0, v8
1464; CHECK-NEXT:    ret
1465entry:
1466  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
1467    <vscale x 1 x i32> %0,
1468    i32 %1,
1469    iXLen %2)
1470
1471  ret <vscale x 1 x i1> %a
1472}
1473
1474declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
1475  <vscale x 1 x i1>,
1476  <vscale x 1 x i32>,
1477  i32,
1478  <vscale x 1 x i1>,
1479  iXLen);
1480
1481define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1482; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32:
1483; CHECK:       # %bb.0: # %entry
1484; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
1485; CHECK-NEXT:    vmv1r.v v10, v0
1486; CHECK-NEXT:    vmv1r.v v0, v9
1487; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
1488; CHECK-NEXT:    vmxor.mm v0, v10, v9
1489; CHECK-NEXT:    ret
1490entry:
1491  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
1492    <vscale x 1 x i1> %0,
1493    <vscale x 1 x i32> %1,
1494    i32 %2,
1495    <vscale x 1 x i1> %3,
1496    iXLen %4)
1497
1498  ret <vscale x 1 x i1> %a
1499}
1500
1501declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
1502  <vscale x 2 x i32>,
1503  i32,
1504  iXLen);
1505
1506define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
1507; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i32_i32:
1508; CHECK:       # %bb.0: # %entry
1509; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
1510; CHECK-NEXT:    vmsltu.vx v8, v8, a0
1511; CHECK-NEXT:    vmnot.m v0, v8
1512; CHECK-NEXT:    ret
1513entry:
1514  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
1515    <vscale x 2 x i32> %0,
1516    i32 %1,
1517    iXLen %2)
1518
1519  ret <vscale x 2 x i1> %a
1520}
1521
1522declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
1523  <vscale x 2 x i1>,
1524  <vscale x 2 x i32>,
1525  i32,
1526  <vscale x 2 x i1>,
1527  iXLen);
1528
1529define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1530; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32:
1531; CHECK:       # %bb.0: # %entry
1532; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1533; CHECK-NEXT:    vmv1r.v v10, v0
1534; CHECK-NEXT:    vmv1r.v v0, v9
1535; CHECK-NEXT:    vmsltu.vx v10, v8, a0, v0.t
1536; CHECK-NEXT:    vmxor.mm v0, v10, v9
1537; CHECK-NEXT:    ret
1538entry:
1539  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
1540    <vscale x 2 x i1> %0,
1541    <vscale x 2 x i32> %1,
1542    i32 %2,
1543    <vscale x 2 x i1> %3,
1544    iXLen %4)
1545
1546  ret <vscale x 2 x i1> %a
1547}
1548
1549declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
1550  <vscale x 4 x i32>,
1551  i32,
1552  iXLen);
1553
1554define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
1555; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i32_i32:
1556; CHECK:       # %bb.0: # %entry
1557; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
1558; CHECK-NEXT:    vmsltu.vx v10, v8, a0
1559; CHECK-NEXT:    vmnot.m v0, v10
1560; CHECK-NEXT:    ret
1561entry:
1562  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
1563    <vscale x 4 x i32> %0,
1564    i32 %1,
1565    iXLen %2)
1566
1567  ret <vscale x 4 x i1> %a
1568}
1569
1570declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
1571  <vscale x 4 x i1>,
1572  <vscale x 4 x i32>,
1573  i32,
1574  <vscale x 4 x i1>,
1575  iXLen);
1576
1577define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1578; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32:
1579; CHECK:       # %bb.0: # %entry
1580; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1581; CHECK-NEXT:    vmv1r.v v11, v0
1582; CHECK-NEXT:    vmv1r.v v0, v10
1583; CHECK-NEXT:    vmsltu.vx v11, v8, a0, v0.t
1584; CHECK-NEXT:    vmxor.mm v0, v11, v10
1585; CHECK-NEXT:    ret
1586entry:
1587  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
1588    <vscale x 4 x i1> %0,
1589    <vscale x 4 x i32> %1,
1590    i32 %2,
1591    <vscale x 4 x i1> %3,
1592    iXLen %4)
1593
1594  ret <vscale x 4 x i1> %a
1595}
1596
1597declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
1598  <vscale x 8 x i32>,
1599  i32,
1600  iXLen);
1601
1602define <vscale x 8 x i1> @intrinsic_vmsgeu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
1603; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i32_i32:
1604; CHECK:       # %bb.0: # %entry
1605; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
1606; CHECK-NEXT:    vmsltu.vx v12, v8, a0
1607; CHECK-NEXT:    vmnot.m v0, v12
1608; CHECK-NEXT:    ret
1609entry:
1610  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
1611    <vscale x 8 x i32> %0,
1612    i32 %1,
1613    iXLen %2)
1614
1615  ret <vscale x 8 x i1> %a
1616}
1617
1618declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
1619  <vscale x 8 x i1>,
1620  <vscale x 8 x i32>,
1621  i32,
1622  <vscale x 8 x i1>,
1623  iXLen);
1624
1625define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1626; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32:
1627; CHECK:       # %bb.0: # %entry
1628; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1629; CHECK-NEXT:    vmv1r.v v13, v0
1630; CHECK-NEXT:    vmv1r.v v0, v12
1631; CHECK-NEXT:    vmsltu.vx v13, v8, a0, v0.t
1632; CHECK-NEXT:    vmxor.mm v0, v13, v12
1633; CHECK-NEXT:    ret
1634entry:
1635  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
1636    <vscale x 8 x i1> %0,
1637    <vscale x 8 x i32> %1,
1638    i32 %2,
1639    <vscale x 8 x i1> %3,
1640    iXLen %4)
1641
1642  ret <vscale x 8 x i1> %a
1643}
1644
1645declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
1646  <vscale x 1 x i64>,
1647  i64,
1648  iXLen);
1649
1650define <vscale x 1 x i1> @intrinsic_vmsgeu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
1651; RV32-LABEL: intrinsic_vmsgeu_vx_nxv1i64_i64:
1652; RV32:       # %bb.0: # %entry
1653; RV32-NEXT:    addi sp, sp, -16
1654; RV32-NEXT:    sw a0, 8(sp)
1655; RV32-NEXT:    sw a1, 12(sp)
1656; RV32-NEXT:    addi a0, sp, 8
1657; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
1658; RV32-NEXT:    vlse64.v v9, (a0), zero
1659; RV32-NEXT:    vmsleu.vv v0, v9, v8
1660; RV32-NEXT:    addi sp, sp, 16
1661; RV32-NEXT:    ret
1662;
1663; RV64-LABEL: intrinsic_vmsgeu_vx_nxv1i64_i64:
1664; RV64:       # %bb.0: # %entry
1665; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
1666; RV64-NEXT:    vmsltu.vx v8, v8, a0
1667; RV64-NEXT:    vmnot.m v0, v8
1668; RV64-NEXT:    ret
1669entry:
1670  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
1671    <vscale x 1 x i64> %0,
1672    i64 %1,
1673    iXLen %2)
1674
1675  ret <vscale x 1 x i1> %a
1676}
1677
1678declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
1679  <vscale x 1 x i1>,
1680  <vscale x 1 x i64>,
1681  i64,
1682  <vscale x 1 x i1>,
1683  iXLen);
1684
1685define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1686; RV32-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64:
1687; RV32:       # %bb.0: # %entry
1688; RV32-NEXT:    addi sp, sp, -16
1689; RV32-NEXT:    sw a0, 8(sp)
1690; RV32-NEXT:    sw a1, 12(sp)
1691; RV32-NEXT:    addi a0, sp, 8
1692; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
1693; RV32-NEXT:    vlse64.v v11, (a0), zero
1694; RV32-NEXT:    vmv1r.v v10, v0
1695; RV32-NEXT:    vmv1r.v v0, v9
1696; RV32-NEXT:    vmsleu.vv v10, v11, v8, v0.t
1697; RV32-NEXT:    vmv.v.v v0, v10
1698; RV32-NEXT:    addi sp, sp, 16
1699; RV32-NEXT:    ret
1700;
1701; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64:
1702; RV64:       # %bb.0: # %entry
1703; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
1704; RV64-NEXT:    vmv1r.v v10, v0
1705; RV64-NEXT:    vmv1r.v v0, v9
1706; RV64-NEXT:    vmsltu.vx v10, v8, a0, v0.t
1707; RV64-NEXT:    vmxor.mm v0, v10, v9
1708; RV64-NEXT:    ret
1709entry:
1710  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
1711    <vscale x 1 x i1> %0,
1712    <vscale x 1 x i64> %1,
1713    i64 %2,
1714    <vscale x 1 x i1> %3,
1715    iXLen %4)
1716
1717  ret <vscale x 1 x i1> %a
1718}
1719
1720declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
1721  <vscale x 2 x i64>,
1722  i64,
1723  iXLen);
1724
1725define <vscale x 2 x i1> @intrinsic_vmsgeu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
1726; RV32-LABEL: intrinsic_vmsgeu_vx_nxv2i64_i64:
1727; RV32:       # %bb.0: # %entry
1728; RV32-NEXT:    addi sp, sp, -16
1729; RV32-NEXT:    sw a0, 8(sp)
1730; RV32-NEXT:    sw a1, 12(sp)
1731; RV32-NEXT:    addi a0, sp, 8
1732; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
1733; RV32-NEXT:    vlse64.v v10, (a0), zero
1734; RV32-NEXT:    vmsleu.vv v0, v10, v8
1735; RV32-NEXT:    addi sp, sp, 16
1736; RV32-NEXT:    ret
1737;
1738; RV64-LABEL: intrinsic_vmsgeu_vx_nxv2i64_i64:
1739; RV64:       # %bb.0: # %entry
1740; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
1741; RV64-NEXT:    vmsltu.vx v10, v8, a0
1742; RV64-NEXT:    vmnot.m v0, v10
1743; RV64-NEXT:    ret
1744entry:
1745  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
1746    <vscale x 2 x i64> %0,
1747    i64 %1,
1748    iXLen %2)
1749
1750  ret <vscale x 2 x i1> %a
1751}
1752
1753declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
1754  <vscale x 2 x i1>,
1755  <vscale x 2 x i64>,
1756  i64,
1757  <vscale x 2 x i1>,
1758  iXLen);
1759
1760define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1761; RV32-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64:
1762; RV32:       # %bb.0: # %entry
1763; RV32-NEXT:    addi sp, sp, -16
1764; RV32-NEXT:    sw a0, 8(sp)
1765; RV32-NEXT:    sw a1, 12(sp)
1766; RV32-NEXT:    addi a0, sp, 8
1767; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
1768; RV32-NEXT:    vlse64.v v12, (a0), zero
1769; RV32-NEXT:    vmv1r.v v11, v0
1770; RV32-NEXT:    vmv1r.v v0, v10
1771; RV32-NEXT:    vmsleu.vv v11, v12, v8, v0.t
1772; RV32-NEXT:    vmv1r.v v0, v11
1773; RV32-NEXT:    addi sp, sp, 16
1774; RV32-NEXT:    ret
1775;
1776; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64:
1777; RV64:       # %bb.0: # %entry
1778; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
1779; RV64-NEXT:    vmv1r.v v11, v0
1780; RV64-NEXT:    vmv1r.v v0, v10
1781; RV64-NEXT:    vmsltu.vx v11, v8, a0, v0.t
1782; RV64-NEXT:    vmxor.mm v0, v11, v10
1783; RV64-NEXT:    ret
1784entry:
1785  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
1786    <vscale x 2 x i1> %0,
1787    <vscale x 2 x i64> %1,
1788    i64 %2,
1789    <vscale x 2 x i1> %3,
1790    iXLen %4)
1791
1792  ret <vscale x 2 x i1> %a
1793}
1794
1795declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
1796  <vscale x 4 x i64>,
1797  i64,
1798  iXLen);
1799
1800define <vscale x 4 x i1> @intrinsic_vmsgeu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
1801; RV32-LABEL: intrinsic_vmsgeu_vx_nxv4i64_i64:
1802; RV32:       # %bb.0: # %entry
1803; RV32-NEXT:    addi sp, sp, -16
1804; RV32-NEXT:    sw a0, 8(sp)
1805; RV32-NEXT:    sw a1, 12(sp)
1806; RV32-NEXT:    addi a0, sp, 8
1807; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
1808; RV32-NEXT:    vlse64.v v12, (a0), zero
1809; RV32-NEXT:    vmsleu.vv v0, v12, v8
1810; RV32-NEXT:    addi sp, sp, 16
1811; RV32-NEXT:    ret
1812;
1813; RV64-LABEL: intrinsic_vmsgeu_vx_nxv4i64_i64:
1814; RV64:       # %bb.0: # %entry
1815; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
1816; RV64-NEXT:    vmsltu.vx v12, v8, a0
1817; RV64-NEXT:    vmnot.m v0, v12
1818; RV64-NEXT:    ret
1819entry:
1820  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
1821    <vscale x 4 x i64> %0,
1822    i64 %1,
1823    iXLen %2)
1824
1825  ret <vscale x 4 x i1> %a
1826}
1827
1828declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
1829  <vscale x 4 x i1>,
1830  <vscale x 4 x i64>,
1831  i64,
1832  <vscale x 4 x i1>,
1833  iXLen);
1834
1835define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1836; RV32-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64:
1837; RV32:       # %bb.0: # %entry
1838; RV32-NEXT:    addi sp, sp, -16
1839; RV32-NEXT:    sw a0, 8(sp)
1840; RV32-NEXT:    sw a1, 12(sp)
1841; RV32-NEXT:    addi a0, sp, 8
1842; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
1843; RV32-NEXT:    vlse64.v v16, (a0), zero
1844; RV32-NEXT:    vmv1r.v v13, v0
1845; RV32-NEXT:    vmv1r.v v0, v12
1846; RV32-NEXT:    vmsleu.vv v13, v16, v8, v0.t
1847; RV32-NEXT:    vmv1r.v v0, v13
1848; RV32-NEXT:    addi sp, sp, 16
1849; RV32-NEXT:    ret
1850;
1851; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64:
1852; RV64:       # %bb.0: # %entry
1853; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
1854; RV64-NEXT:    vmv1r.v v13, v0
1855; RV64-NEXT:    vmv1r.v v0, v12
1856; RV64-NEXT:    vmsltu.vx v13, v8, a0, v0.t
1857; RV64-NEXT:    vmxor.mm v0, v13, v12
1858; RV64-NEXT:    ret
1859entry:
1860  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
1861    <vscale x 4 x i1> %0,
1862    <vscale x 4 x i64> %1,
1863    i64 %2,
1864    <vscale x 4 x i1> %3,
1865    iXLen %4)
1866
1867  ret <vscale x 4 x i1> %a
1868}
1869
1870define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
1871; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i8_i8:
1872; CHECK:       # %bb.0: # %entry
1873; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
1874; CHECK-NEXT:    vmsgtu.vi v0, v8, -16
1875; CHECK-NEXT:    ret
1876entry:
1877  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
1878    <vscale x 1 x i8> %0,
1879    i8 -15,
1880    iXLen %1)
1881
1882  ret <vscale x 1 x i1> %a
1883}
1884
1885define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
1886; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i8_i8:
1887; CHECK:       # %bb.0: # %entry
1888; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
1889; CHECK-NEXT:    vmv1r.v v10, v0
1890; CHECK-NEXT:    vmv1r.v v0, v9
1891; CHECK-NEXT:    vmsgtu.vi v10, v8, -15, v0.t
1892; CHECK-NEXT:    vmv1r.v v0, v10
1893; CHECK-NEXT:    ret
1894entry:
1895  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
1896    <vscale x 1 x i1> %0,
1897    <vscale x 1 x i8> %1,
1898    i8 -14,
1899    <vscale x 1 x i1> %2,
1900    iXLen %3)
1901
1902  ret <vscale x 1 x i1> %a
1903}
1904
1905define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
1906; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i8_i8:
1907; CHECK:       # %bb.0: # %entry
1908; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
1909; CHECK-NEXT:    vmsgtu.vi v0, v8, -14
1910; CHECK-NEXT:    ret
1911entry:
1912  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
1913    <vscale x 2 x i8> %0,
1914    i8 -13,
1915    iXLen %1)
1916
1917  ret <vscale x 2 x i1> %a
1918}
1919
1920define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
1921; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i8_i8:
1922; CHECK:       # %bb.0: # %entry
1923; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
1924; CHECK-NEXT:    vmv1r.v v10, v0
1925; CHECK-NEXT:    vmv1r.v v0, v9
1926; CHECK-NEXT:    vmsgtu.vi v10, v8, -13, v0.t
1927; CHECK-NEXT:    vmv1r.v v0, v10
1928; CHECK-NEXT:    ret
1929entry:
1930  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
1931    <vscale x 2 x i1> %0,
1932    <vscale x 2 x i8> %1,
1933    i8 -12,
1934    <vscale x 2 x i1> %2,
1935    iXLen %3)
1936
1937  ret <vscale x 2 x i1> %a
1938}
1939
1940define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
1941; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i8_i8:
1942; CHECK:       # %bb.0: # %entry
1943; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
1944; CHECK-NEXT:    vmsgtu.vi v0, v8, -12
1945; CHECK-NEXT:    ret
1946entry:
1947  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
1948    <vscale x 4 x i8> %0,
1949    i8 -11,
1950    iXLen %1)
1951
1952  ret <vscale x 4 x i1> %a
1953}
1954
1955define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1956; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8:
1957; CHECK:       # %bb.0: # %entry
1958; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
1959; CHECK-NEXT:    vmv1r.v v10, v0
1960; CHECK-NEXT:    vmv1r.v v0, v9
1961; CHECK-NEXT:    vmsgtu.vi v10, v8, -11, v0.t
1962; CHECK-NEXT:    vmv1r.v v0, v10
1963; CHECK-NEXT:    ret
1964entry:
1965  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
1966    <vscale x 4 x i1> %0,
1967    <vscale x 4 x i8> %1,
1968    i8 -10,
1969    <vscale x 4 x i1> %2,
1970    iXLen %3)
1971
1972  ret <vscale x 4 x i1> %a
1973}
1974
1975define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i8_i8_1(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
1976; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8_1:
1977; CHECK:       # %bb.0: # %entry
1978; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
1979; CHECK-NEXT:    vmv1r.v v10, v0
1980; CHECK-NEXT:    li a0, 99
1981; CHECK-NEXT:    vmv1r.v v0, v9
1982; CHECK-NEXT:    vmsgtu.vx v10, v8, a0, v0.t
1983; CHECK-NEXT:    vmv1r.v v0, v10
1984; CHECK-NEXT:    ret
1985entry:
1986  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
1987    <vscale x 4 x i1> %0,
1988    <vscale x 4 x i8> %1,
1989    i8 100,
1990    <vscale x 4 x i1> %2,
1991    iXLen %3)
1992
1993  ret <vscale x 4 x i1> %a
1994}
1995
1996define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
1997; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i8_i8:
1998; CHECK:       # %bb.0: # %entry
1999; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
2000; CHECK-NEXT:    vmsgtu.vi v0, v8, -10
2001; CHECK-NEXT:    ret
2002entry:
2003  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
2004    <vscale x 8 x i8> %0,
2005    i8 -9,
2006    iXLen %1)
2007
2008  ret <vscale x 8 x i1> %a
2009}
2010
2011define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2012; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i8_i8:
2013; CHECK:       # %bb.0: # %entry
2014; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
2015; CHECK-NEXT:    vmv1r.v v10, v0
2016; CHECK-NEXT:    vmv1r.v v0, v9
2017; CHECK-NEXT:    vmsgtu.vi v10, v8, -9, v0.t
2018; CHECK-NEXT:    vmv.v.v v0, v10
2019; CHECK-NEXT:    ret
2020entry:
2021  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
2022    <vscale x 8 x i1> %0,
2023    <vscale x 8 x i8> %1,
2024    i8 -8,
2025    <vscale x 8 x i1> %2,
2026    iXLen %3)
2027
2028  ret <vscale x 8 x i1> %a
2029}
2030
2031define <vscale x 16 x i1> @intrinsic_vmsgeu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
2032; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i8_i8:
2033; CHECK:       # %bb.0: # %entry
2034; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
2035; CHECK-NEXT:    vmsgtu.vi v0, v8, -8
2036; CHECK-NEXT:    ret
2037entry:
2038  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
2039    <vscale x 16 x i8> %0,
2040    i8 -7,
2041    iXLen %1)
2042
2043  ret <vscale x 16 x i1> %a
2044}
2045
2046define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2047; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i8_i8:
2048; CHECK:       # %bb.0: # %entry
2049; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
2050; CHECK-NEXT:    vmv1r.v v11, v0
2051; CHECK-NEXT:    vmv1r.v v0, v10
2052; CHECK-NEXT:    vmsgtu.vi v11, v8, -7, v0.t
2053; CHECK-NEXT:    vmv1r.v v0, v11
2054; CHECK-NEXT:    ret
2055entry:
2056  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
2057    <vscale x 16 x i1> %0,
2058    <vscale x 16 x i8> %1,
2059    i8 -6,
2060    <vscale x 16 x i1> %2,
2061    iXLen %3)
2062
2063  ret <vscale x 16 x i1> %a
2064}
2065
2066define <vscale x 32 x i1> @intrinsic_vmsgeu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
2067; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv32i8_i8:
2068; CHECK:       # %bb.0: # %entry
2069; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
2070; CHECK-NEXT:    vmsgtu.vi v0, v8, -6
2071; CHECK-NEXT:    ret
2072entry:
2073  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
2074    <vscale x 32 x i8> %0,
2075    i8 -5,
2076    iXLen %1)
2077
2078  ret <vscale x 32 x i1> %a
2079}
2080
2081define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
2082; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv32i8_i8:
2083; CHECK:       # %bb.0: # %entry
2084; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
2085; CHECK-NEXT:    vmv1r.v v13, v0
2086; CHECK-NEXT:    vmv1r.v v0, v12
2087; CHECK-NEXT:    vmsgtu.vi v13, v8, -5, v0.t
2088; CHECK-NEXT:    vmv1r.v v0, v13
2089; CHECK-NEXT:    ret
2090entry:
2091  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
2092    <vscale x 32 x i1> %0,
2093    <vscale x 32 x i8> %1,
2094    i8 -4,
2095    <vscale x 32 x i1> %2,
2096    iXLen %3)
2097
2098  ret <vscale x 32 x i1> %a
2099}
2100
2101define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
2102; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i16_i16:
2103; CHECK:       # %bb.0: # %entry
2104; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
2105; CHECK-NEXT:    vmsgtu.vi v0, v8, -4
2106; CHECK-NEXT:    ret
2107entry:
2108  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
2109    <vscale x 1 x i16> %0,
2110    i16 -3,
2111    iXLen %1)
2112
2113  ret <vscale x 1 x i1> %a
2114}
2115
2116define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2117; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i16_i16:
2118; CHECK:       # %bb.0: # %entry
2119; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
2120; CHECK-NEXT:    vmv1r.v v10, v0
2121; CHECK-NEXT:    vmv1r.v v0, v9
2122; CHECK-NEXT:    vmsgtu.vi v10, v8, -3, v0.t
2123; CHECK-NEXT:    vmv1r.v v0, v10
2124; CHECK-NEXT:    ret
2125entry:
2126  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
2127    <vscale x 1 x i1> %0,
2128    <vscale x 1 x i16> %1,
2129    i16 -2,
2130    <vscale x 1 x i1> %2,
2131    iXLen %3)
2132
2133  ret <vscale x 1 x i1> %a
2134}
2135
2136define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
2137; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i16_i16:
2138; CHECK:       # %bb.0: # %entry
2139; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
2140; CHECK-NEXT:    vmsgtu.vi v0, v8, -2
2141; CHECK-NEXT:    ret
2142entry:
2143  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
2144    <vscale x 2 x i16> %0,
2145    i16 -1,
2146    iXLen %1)
2147
2148  ret <vscale x 2 x i1> %a
2149}
2150
2151define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2152; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16:
2153; CHECK:       # %bb.0: # %entry
2154; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
2155; CHECK-NEXT:    vmor.mm v0, v9, v0
2156; CHECK-NEXT:    ret
2157entry:
2158  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
2159    <vscale x 2 x i1> %0,
2160    <vscale x 2 x i16> %1,
2161    i16 0,
2162    <vscale x 2 x i1> %2,
2163    iXLen %3)
2164
2165  ret <vscale x 2 x i1> %a
2166}
2167
2168define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i16_i16_same_mask_maskedoff(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
2169; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16_same_mask_maskedoff:
2170; CHECK:       # %bb.0: # %entry
2171; CHECK-NEXT:    ret
2172entry:
2173  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
2174    <vscale x 2 x i1> %0,
2175    <vscale x 2 x i16> %1,
2176    i16 0,
2177    <vscale x 2 x i1> %0,
2178    iXLen %2)
2179
2180  ret <vscale x 2 x i1> %a
2181}
2182
2183define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
2184; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i16_i16:
2185; CHECK:       # %bb.0: # %entry
2186; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
2187; CHECK-NEXT:    vmset.m v0
2188; CHECK-NEXT:    ret
2189entry:
2190  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
2191    <vscale x 4 x i16> %0,
2192    i16 0,
2193    iXLen %1)
2194
2195  ret <vscale x 4 x i1> %a
2196}
2197
2198define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i16_i16_1(<vscale x 4 x i16> %0, iXLen %1) nounwind {
2199; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i16_i16_1:
2200; CHECK:       # %bb.0: # %entry
2201; CHECK-NEXT:    li a1, 99
2202; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
2203; CHECK-NEXT:    vmsgtu.vx v0, v8, a1
2204; CHECK-NEXT:    ret
2205entry:
2206  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
2207    <vscale x 4 x i16> %0,
2208    i16 100,
2209    iXLen %1)
2210
2211  ret <vscale x 4 x i1> %a
2212}
2213
2214define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2215; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i16_i16:
2216; CHECK:       # %bb.0: # %entry
2217; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
2218; CHECK-NEXT:    vmv1r.v v10, v0
2219; CHECK-NEXT:    vmv1r.v v0, v9
2220; CHECK-NEXT:    vmsgtu.vi v10, v8, 0, v0.t
2221; CHECK-NEXT:    vmv.v.v v0, v10
2222; CHECK-NEXT:    ret
2223entry:
2224  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
2225    <vscale x 4 x i1> %0,
2226    <vscale x 4 x i16> %1,
2227    i16 1,
2228    <vscale x 4 x i1> %2,
2229    iXLen %3)
2230
2231  ret <vscale x 4 x i1> %a
2232}
2233
2234define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
2235; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i16_i16:
2236; CHECK:       # %bb.0: # %entry
2237; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
2238; CHECK-NEXT:    vmsgtu.vi v0, v8, 1
2239; CHECK-NEXT:    ret
2240entry:
2241  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
2242    <vscale x 8 x i16> %0,
2243    i16 2,
2244    iXLen %1)
2245
2246  ret <vscale x 8 x i1> %a
2247}
2248
2249define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2250; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i16_i16:
2251; CHECK:       # %bb.0: # %entry
2252; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
2253; CHECK-NEXT:    vmv1r.v v11, v0
2254; CHECK-NEXT:    vmv1r.v v0, v10
2255; CHECK-NEXT:    vmsgtu.vi v11, v8, 2, v0.t
2256; CHECK-NEXT:    vmv1r.v v0, v11
2257; CHECK-NEXT:    ret
2258entry:
2259  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
2260    <vscale x 8 x i1> %0,
2261    <vscale x 8 x i16> %1,
2262    i16 3,
2263    <vscale x 8 x i1> %2,
2264    iXLen %3)
2265
2266  ret <vscale x 8 x i1> %a
2267}
2268
2269define <vscale x 16 x i1> @intrinsic_vmsgeu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
2270; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i16_i16:
2271; CHECK:       # %bb.0: # %entry
2272; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
2273; CHECK-NEXT:    vmsgtu.vi v0, v8, 3
2274; CHECK-NEXT:    ret
2275entry:
2276  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
2277    <vscale x 16 x i16> %0,
2278    i16 4,
2279    iXLen %1)
2280
2281  ret <vscale x 16 x i1> %a
2282}
2283
2284define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
2285; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i16_i16:
2286; CHECK:       # %bb.0: # %entry
2287; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
2288; CHECK-NEXT:    vmv1r.v v13, v0
2289; CHECK-NEXT:    vmv1r.v v0, v12
2290; CHECK-NEXT:    vmsgtu.vi v13, v8, 4, v0.t
2291; CHECK-NEXT:    vmv1r.v v0, v13
2292; CHECK-NEXT:    ret
2293entry:
2294  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
2295    <vscale x 16 x i1> %0,
2296    <vscale x 16 x i16> %1,
2297    i16 5,
2298    <vscale x 16 x i1> %2,
2299    iXLen %3)
2300
2301  ret <vscale x 16 x i1> %a
2302}
2303
2304define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
2305; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i32_i32:
2306; CHECK:       # %bb.0: # %entry
2307; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
2308; CHECK-NEXT:    vmsgtu.vi v0, v8, 5
2309; CHECK-NEXT:    ret
2310entry:
2311  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
2312    <vscale x 1 x i32> %0,
2313    i32 6,
2314    iXLen %1)
2315
2316  ret <vscale x 1 x i1> %a
2317}
2318
2319define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2320; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i32_i32:
2321; CHECK:       # %bb.0: # %entry
2322; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
2323; CHECK-NEXT:    vmv1r.v v10, v0
2324; CHECK-NEXT:    vmv1r.v v0, v9
2325; CHECK-NEXT:    vmsgtu.vi v10, v8, 6, v0.t
2326; CHECK-NEXT:    vmv1r.v v0, v10
2327; CHECK-NEXT:    ret
2328entry:
2329  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
2330    <vscale x 1 x i1> %0,
2331    <vscale x 1 x i32> %1,
2332    i32 7,
2333    <vscale x 1 x i1> %2,
2334    iXLen %3)
2335
2336  ret <vscale x 1 x i1> %a
2337}
2338
2339define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
2340; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i32_i32:
2341; CHECK:       # %bb.0: # %entry
2342; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
2343; CHECK-NEXT:    vmsgtu.vi v0, v8, 7
2344; CHECK-NEXT:    ret
2345entry:
2346  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
2347    <vscale x 2 x i32> %0,
2348    i32 8,
2349    iXLen %1)
2350
2351  ret <vscale x 2 x i1> %a
2352}
2353
2354define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2355; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i32_i32:
2356; CHECK:       # %bb.0: # %entry
2357; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
2358; CHECK-NEXT:    vmv1r.v v10, v0
2359; CHECK-NEXT:    vmv1r.v v0, v9
2360; CHECK-NEXT:    vmsgtu.vi v10, v8, 8, v0.t
2361; CHECK-NEXT:    vmv.v.v v0, v10
2362; CHECK-NEXT:    ret
2363entry:
2364  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
2365    <vscale x 2 x i1> %0,
2366    <vscale x 2 x i32> %1,
2367    i32 9,
2368    <vscale x 2 x i1> %2,
2369    iXLen %3)
2370
2371  ret <vscale x 2 x i1> %a
2372}
2373
2374define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
2375; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i32_i32:
2376; CHECK:       # %bb.0: # %entry
2377; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
2378; CHECK-NEXT:    vmsgtu.vi v0, v8, 9
2379; CHECK-NEXT:    ret
2380entry:
2381  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
2382    <vscale x 4 x i32> %0,
2383    i32 10,
2384    iXLen %1)
2385
2386  ret <vscale x 4 x i1> %a
2387}
2388
2389define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2390; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i32_i32:
2391; CHECK:       # %bb.0: # %entry
2392; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
2393; CHECK-NEXT:    vmv1r.v v11, v0
2394; CHECK-NEXT:    vmv1r.v v0, v10
2395; CHECK-NEXT:    vmsgtu.vi v11, v8, 10, v0.t
2396; CHECK-NEXT:    vmv1r.v v0, v11
2397; CHECK-NEXT:    ret
2398entry:
2399  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
2400    <vscale x 4 x i1> %0,
2401    <vscale x 4 x i32> %1,
2402    i32 11,
2403    <vscale x 4 x i1> %2,
2404    iXLen %3)
2405
2406  ret <vscale x 4 x i1> %a
2407}
2408
2409define <vscale x 8 x i1> @intrinsic_vmsgeu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
2410; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i32_i32:
2411; CHECK:       # %bb.0: # %entry
2412; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
2413; CHECK-NEXT:    vmsgtu.vi v0, v8, 11
2414; CHECK-NEXT:    ret
2415entry:
2416  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
2417    <vscale x 8 x i32> %0,
2418    i32 12,
2419    iXLen %1)
2420
2421  ret <vscale x 8 x i1> %a
2422}
2423
2424define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
2425; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i32_i32:
2426; CHECK:       # %bb.0: # %entry
2427; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
2428; CHECK-NEXT:    vmv1r.v v13, v0
2429; CHECK-NEXT:    vmv1r.v v0, v12
2430; CHECK-NEXT:    vmsgtu.vi v13, v8, 12, v0.t
2431; CHECK-NEXT:    vmv1r.v v0, v13
2432; CHECK-NEXT:    ret
2433entry:
2434  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
2435    <vscale x 8 x i1> %0,
2436    <vscale x 8 x i32> %1,
2437    i32 13,
2438    <vscale x 8 x i1> %2,
2439    iXLen %3)
2440
2441  ret <vscale x 8 x i1> %a
2442}
2443
2444define <vscale x 1 x i1> @intrinsic_vmsgeu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
2445; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i64_i64:
2446; CHECK:       # %bb.0: # %entry
2447; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
2448; CHECK-NEXT:    vmsgtu.vi v0, v8, 13
2449; CHECK-NEXT:    ret
2450entry:
2451  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
2452    <vscale x 1 x i64> %0,
2453    i64 14,
2454    iXLen %1)
2455
2456  ret <vscale x 1 x i1> %a
2457}
2458
2459define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
2460; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i64_i64:
2461; CHECK:       # %bb.0: # %entry
2462; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
2463; CHECK-NEXT:    vmv1r.v v10, v0
2464; CHECK-NEXT:    vmv1r.v v0, v9
2465; CHECK-NEXT:    vmsgtu.vi v10, v8, 14, v0.t
2466; CHECK-NEXT:    vmv.v.v v0, v10
2467; CHECK-NEXT:    ret
2468entry:
2469  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
2470    <vscale x 1 x i1> %0,
2471    <vscale x 1 x i64> %1,
2472    i64 15,
2473    <vscale x 1 x i1> %2,
2474    iXLen %3)
2475
2476  ret <vscale x 1 x i1> %a
2477}
2478
2479define <vscale x 2 x i1> @intrinsic_vmsgeu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
2480; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i64_i64:
2481; CHECK:       # %bb.0: # %entry
2482; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
2483; CHECK-NEXT:    vmsgtu.vi v0, v8, 15
2484; CHECK-NEXT:    ret
2485entry:
2486  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
2487    <vscale x 2 x i64> %0,
2488    i64 16,
2489    iXLen %1)
2490
2491  ret <vscale x 2 x i1> %a
2492}
2493
2494define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
2495; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i64_i64:
2496; CHECK:       # %bb.0: # %entry
2497; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
2498; CHECK-NEXT:    vmv1r.v v11, v0
2499; CHECK-NEXT:    vmv1r.v v0, v10
2500; CHECK-NEXT:    vmsgtu.vi v11, v8, -16, v0.t
2501; CHECK-NEXT:    vmv1r.v v0, v11
2502; CHECK-NEXT:    ret
2503entry:
2504  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
2505    <vscale x 2 x i1> %0,
2506    <vscale x 2 x i64> %1,
2507    i64 -15,
2508    <vscale x 2 x i1> %2,
2509    iXLen %3)
2510
2511  ret <vscale x 2 x i1> %a
2512}
2513
2514define <vscale x 4 x i1> @intrinsic_vmsgeu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
2515; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i64_i64:
2516; CHECK:       # %bb.0: # %entry
2517; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
2518; CHECK-NEXT:    vmsgtu.vi v0, v8, -15
2519; CHECK-NEXT:    ret
2520entry:
2521  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
2522    <vscale x 4 x i64> %0,
2523    i64 -14,
2524    iXLen %1)
2525
2526  ret <vscale x 4 x i1> %a
2527}
2528
2529define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
2530; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i64_i64:
2531; CHECK:       # %bb.0: # %entry
2532; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
2533; CHECK-NEXT:    vmv1r.v v13, v0
2534; CHECK-NEXT:    vmv1r.v v0, v12
2535; CHECK-NEXT:    vmsgtu.vi v13, v8, -14, v0.t
2536; CHECK-NEXT:    vmv1r.v v0, v13
2537; CHECK-NEXT:    ret
2538entry:
2539  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
2540    <vscale x 4 x i1> %0,
2541    <vscale x 4 x i64> %1,
2542    i64 -13,
2543    <vscale x 4 x i1> %2,
2544    iXLen %3)
2545
2546  ret <vscale x 4 x i1> %a
2547}
2548
2549; Test cases where the mask and maskedoff are the same value.
2550define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, iXLen %3) nounwind {
2551; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8:
2552; CHECK:       # %bb.0: # %entry
2553; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
2554; CHECK-NEXT:    vmsltu.vx v8, v8, a0
2555; CHECK-NEXT:    vmandn.mm v0, v0, v8
2556; CHECK-NEXT:    ret
2557entry:
2558  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
2559    <vscale x 1 x i1> %0,
2560    <vscale x 1 x i8> %1,
2561    i8 %2,
2562    <vscale x 1 x i1> %0,
2563    iXLen %3)
2564
2565  ret <vscale x 1 x i1> %a
2566}
2567
2568define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, iXLen %3) nounwind {
2569; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8:
2570; CHECK:       # %bb.0: # %entry
2571; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
2572; CHECK-NEXT:    vmsltu.vx v8, v8, a0
2573; CHECK-NEXT:    vmandn.mm v0, v0, v8
2574; CHECK-NEXT:    ret
2575entry:
2576  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
2577    <vscale x 2 x i1> %0,
2578    <vscale x 2 x i8> %1,
2579    i8 %2,
2580    <vscale x 2 x i1> %0,
2581    iXLen %3)
2582
2583  ret <vscale x 2 x i1> %a
2584}
2585
2586define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, iXLen %3) nounwind {
2587; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8:
2588; CHECK:       # %bb.0: # %entry
2589; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
2590; CHECK-NEXT:    vmsltu.vx v8, v8, a0
2591; CHECK-NEXT:    vmandn.mm v0, v0, v8
2592; CHECK-NEXT:    ret
2593entry:
2594  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
2595    <vscale x 4 x i1> %0,
2596    <vscale x 4 x i8> %1,
2597    i8 %2,
2598    <vscale x 4 x i1> %0,
2599    iXLen %3)
2600
2601  ret <vscale x 4 x i1> %a
2602}
2603
2604define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, iXLen %3) nounwind {
2605; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8:
2606; CHECK:       # %bb.0: # %entry
2607; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
2608; CHECK-NEXT:    vmsltu.vx v8, v8, a0
2609; CHECK-NEXT:    vmandn.mm v0, v0, v8
2610; CHECK-NEXT:    ret
2611entry:
2612  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
2613    <vscale x 8 x i1> %0,
2614    <vscale x 8 x i8> %1,
2615    i8 %2,
2616    <vscale x 8 x i1> %0,
2617    iXLen %3)
2618
2619  ret <vscale x 8 x i1> %a
2620}
2621
2622define <vscale x 16 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, iXLen %3) nounwind {
2623; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8:
2624; CHECK:       # %bb.0: # %entry
2625; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
2626; CHECK-NEXT:    vmsltu.vx v10, v8, a0
2627; CHECK-NEXT:    vmandn.mm v0, v0, v10
2628; CHECK-NEXT:    ret
2629entry:
2630  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
2631    <vscale x 16 x i1> %0,
2632    <vscale x 16 x i8> %1,
2633    i8 %2,
2634    <vscale x 16 x i1> %0,
2635    iXLen %3)
2636
2637  ret <vscale x 16 x i1> %a
2638}
2639
2640define <vscale x 32 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, iXLen %3) nounwind {
2641; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8:
2642; CHECK:       # %bb.0: # %entry
2643; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
2644; CHECK-NEXT:    vmsltu.vx v12, v8, a0
2645; CHECK-NEXT:    vmandn.mm v0, v0, v12
2646; CHECK-NEXT:    ret
2647entry:
2648  %a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
2649    <vscale x 32 x i1> %0,
2650    <vscale x 32 x i8> %1,
2651    i8 %2,
2652    <vscale x 32 x i1> %0,
2653    iXLen %3)
2654
2655  ret <vscale x 32 x i1> %a
2656}
2657
2658define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, iXLen %3) nounwind {
2659; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16:
2660; CHECK:       # %bb.0: # %entry
2661; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
2662; CHECK-NEXT:    vmsltu.vx v8, v8, a0
2663; CHECK-NEXT:    vmandn.mm v0, v0, v8
2664; CHECK-NEXT:    ret
2665entry:
2666  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
2667    <vscale x 1 x i1> %0,
2668    <vscale x 1 x i16> %1,
2669    i16 %2,
2670    <vscale x 1 x i1> %0,
2671    iXLen %3)
2672
2673  ret <vscale x 1 x i1> %a
2674}
2675
2676define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, iXLen %3) nounwind {
2677; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16:
2678; CHECK:       # %bb.0: # %entry
2679; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
2680; CHECK-NEXT:    vmsltu.vx v8, v8, a0
2681; CHECK-NEXT:    vmandn.mm v0, v0, v8
2682; CHECK-NEXT:    ret
2683entry:
2684  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
2685    <vscale x 2 x i1> %0,
2686    <vscale x 2 x i16> %1,
2687    i16 %2,
2688    <vscale x 2 x i1> %0,
2689    iXLen %3)
2690
2691  ret <vscale x 2 x i1> %a
2692}
2693
2694define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, iXLen %3) nounwind {
2695; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16:
2696; CHECK:       # %bb.0: # %entry
2697; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
2698; CHECK-NEXT:    vmsltu.vx v8, v8, a0
2699; CHECK-NEXT:    vmandn.mm v0, v0, v8
2700; CHECK-NEXT:    ret
2701entry:
2702  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
2703    <vscale x 4 x i1> %0,
2704    <vscale x 4 x i16> %1,
2705    i16 %2,
2706    <vscale x 4 x i1> %0,
2707    iXLen %3)
2708
2709  ret <vscale x 4 x i1> %a
2710}
2711
2712define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, iXLen %3) nounwind {
2713; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16:
2714; CHECK:       # %bb.0: # %entry
2715; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
2716; CHECK-NEXT:    vmsltu.vx v10, v8, a0
2717; CHECK-NEXT:    vmandn.mm v0, v0, v10
2718; CHECK-NEXT:    ret
2719entry:
2720  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
2721    <vscale x 8 x i1> %0,
2722    <vscale x 8 x i16> %1,
2723    i16 %2,
2724    <vscale x 8 x i1> %0,
2725    iXLen %3)
2726
2727  ret <vscale x 8 x i1> %a
2728}
2729
2730define <vscale x 16 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, iXLen %3) nounwind {
2731; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16:
2732; CHECK:       # %bb.0: # %entry
2733; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
2734; CHECK-NEXT:    vmsltu.vx v12, v8, a0
2735; CHECK-NEXT:    vmandn.mm v0, v0, v12
2736; CHECK-NEXT:    ret
2737entry:
2738  %a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
2739    <vscale x 16 x i1> %0,
2740    <vscale x 16 x i16> %1,
2741    i16 %2,
2742    <vscale x 16 x i1> %0,
2743    iXLen %3)
2744
2745  ret <vscale x 16 x i1> %a
2746}
2747
2748define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, iXLen %3) nounwind {
2749; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32:
2750; CHECK:       # %bb.0: # %entry
2751; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
2752; CHECK-NEXT:    vmsltu.vx v8, v8, a0
2753; CHECK-NEXT:    vmandn.mm v0, v0, v8
2754; CHECK-NEXT:    ret
2755entry:
2756  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
2757    <vscale x 1 x i1> %0,
2758    <vscale x 1 x i32> %1,
2759    i32 %2,
2760    <vscale x 1 x i1> %0,
2761    iXLen %3)
2762
2763  ret <vscale x 1 x i1> %a
2764}
2765
2766define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, iXLen %3) nounwind {
2767; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32:
2768; CHECK:       # %bb.0: # %entry
2769; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
2770; CHECK-NEXT:    vmsltu.vx v8, v8, a0
2771; CHECK-NEXT:    vmandn.mm v0, v0, v8
2772; CHECK-NEXT:    ret
2773entry:
2774  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
2775    <vscale x 2 x i1> %0,
2776    <vscale x 2 x i32> %1,
2777    i32 %2,
2778    <vscale x 2 x i1> %0,
2779    iXLen %3)
2780
2781  ret <vscale x 2 x i1> %a
2782}
2783
2784define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, iXLen %3) nounwind {
2785; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32:
2786; CHECK:       # %bb.0: # %entry
2787; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
2788; CHECK-NEXT:    vmsltu.vx v10, v8, a0
2789; CHECK-NEXT:    vmandn.mm v0, v0, v10
2790; CHECK-NEXT:    ret
2791entry:
2792  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
2793    <vscale x 4 x i1> %0,
2794    <vscale x 4 x i32> %1,
2795    i32 %2,
2796    <vscale x 4 x i1> %0,
2797    iXLen %3)
2798
2799  ret <vscale x 4 x i1> %a
2800}
2801
2802define <vscale x 8 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, iXLen %3) nounwind {
2803; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32:
2804; CHECK:       # %bb.0: # %entry
2805; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
2806; CHECK-NEXT:    vmsltu.vx v12, v8, a0
2807; CHECK-NEXT:    vmandn.mm v0, v0, v12
2808; CHECK-NEXT:    ret
2809entry:
2810  %a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
2811    <vscale x 8 x i1> %0,
2812    <vscale x 8 x i32> %1,
2813    i32 %2,
2814    <vscale x 8 x i1> %0,
2815    iXLen %3)
2816
2817  ret <vscale x 8 x i1> %a
2818}
2819
2820define <vscale x 1 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, iXLen %3) nounwind {
2821; RV32-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64:
2822; RV32:       # %bb.0: # %entry
2823; RV32-NEXT:    addi sp, sp, -16
2824; RV32-NEXT:    sw a0, 8(sp)
2825; RV32-NEXT:    sw a1, 12(sp)
2826; RV32-NEXT:    addi a0, sp, 8
2827; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
2828; RV32-NEXT:    vlse64.v v9, (a0), zero
2829; RV32-NEXT:    vmsleu.vv v0, v9, v8, v0.t
2830; RV32-NEXT:    addi sp, sp, 16
2831; RV32-NEXT:    ret
2832;
2833; RV64-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64:
2834; RV64:       # %bb.0: # %entry
2835; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
2836; RV64-NEXT:    vmsltu.vx v8, v8, a0
2837; RV64-NEXT:    vmandn.mm v0, v0, v8
2838; RV64-NEXT:    ret
2839entry:
2840  %a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
2841    <vscale x 1 x i1> %0,
2842    <vscale x 1 x i64> %1,
2843    i64 %2,
2844    <vscale x 1 x i1> %0,
2845    iXLen %3)
2846
2847  ret <vscale x 1 x i1> %a
2848}
2849
2850define <vscale x 2 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, iXLen %3) nounwind {
2851; RV32-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64:
2852; RV32:       # %bb.0: # %entry
2853; RV32-NEXT:    addi sp, sp, -16
2854; RV32-NEXT:    sw a0, 8(sp)
2855; RV32-NEXT:    sw a1, 12(sp)
2856; RV32-NEXT:    addi a0, sp, 8
2857; RV32-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
2858; RV32-NEXT:    vlse64.v v12, (a0), zero
2859; RV32-NEXT:    vmv1r.v v10, v0
2860; RV32-NEXT:    vmsleu.vv v10, v12, v8, v0.t
2861; RV32-NEXT:    vmv1r.v v0, v10
2862; RV32-NEXT:    addi sp, sp, 16
2863; RV32-NEXT:    ret
2864;
2865; RV64-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64:
2866; RV64:       # %bb.0: # %entry
2867; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
2868; RV64-NEXT:    vmsltu.vx v10, v8, a0
2869; RV64-NEXT:    vmandn.mm v0, v0, v10
2870; RV64-NEXT:    ret
2871entry:
2872  %a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
2873    <vscale x 2 x i1> %0,
2874    <vscale x 2 x i64> %1,
2875    i64 %2,
2876    <vscale x 2 x i1> %0,
2877    iXLen %3)
2878
2879  ret <vscale x 2 x i1> %a
2880}
2881
2882define <vscale x 4 x i1> @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, iXLen %3) nounwind {
2883; RV32-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64:
2884; RV32:       # %bb.0: # %entry
2885; RV32-NEXT:    addi sp, sp, -16
2886; RV32-NEXT:    sw a0, 8(sp)
2887; RV32-NEXT:    sw a1, 12(sp)
2888; RV32-NEXT:    addi a0, sp, 8
2889; RV32-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
2890; RV32-NEXT:    vlse64.v v16, (a0), zero
2891; RV32-NEXT:    vmv1r.v v12, v0
2892; RV32-NEXT:    vmsleu.vv v12, v16, v8, v0.t
2893; RV32-NEXT:    vmv1r.v v0, v12
2894; RV32-NEXT:    addi sp, sp, 16
2895; RV32-NEXT:    ret
2896;
2897; RV64-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64:
2898; RV64:       # %bb.0: # %entry
2899; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
2900; RV64-NEXT:    vmsltu.vx v12, v8, a0
2901; RV64-NEXT:    vmandn.mm v0, v0, v12
2902; RV64-NEXT:    ret
2903entry:
2904  %a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
2905    <vscale x 4 x i1> %0,
2906    <vscale x 4 x i64> %1,
2907    i64 %2,
2908    <vscale x 4 x i1> %0,
2909    iXLen %3)
2910
2911  ret <vscale x 4 x i1> %a
2912}
2913