xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll (revision d8d131dfa99762ccdd2116661980b7d0493cd7b5)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
3; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
4
5; FIXME: We use exclusively byte types here because the MVT we use for the
6; stores is calculated assuming byte elements. We need to deal with mismatched
7; subvector "casts" to make other elements work.
8
9define void @seteq_vv_v16i8(ptr %x, ptr %y) {
10; CHECK-LABEL: seteq_vv_v16i8:
11; CHECK:       # %bb.0:
12; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
13; CHECK-NEXT:    vle8.v v8, (a0)
14; CHECK-NEXT:    vle8.v v9, (a1)
15; CHECK-NEXT:    vmseq.vv v0, v8, v9
16; CHECK-NEXT:    vmv.v.i v8, 0
17; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
18; CHECK-NEXT:    vse8.v v8, (a0)
19; CHECK-NEXT:    ret
20  %a = load <16 x i8>, ptr %x
21  %b = load <16 x i8>, ptr %y
22  %c = icmp eq <16 x i8> %a, %b
23  %d = sext <16 x i1> %c to <16 x i8>
24  store <16 x i8> %d, ptr %x
25  ret void
26}
27
28define void @setne_vv_v32i8(ptr %x, ptr %y) {
29; CHECK-LABEL: setne_vv_v32i8:
30; CHECK:       # %bb.0:
31; CHECK-NEXT:    li a2, 32
32; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
33; CHECK-NEXT:    vle8.v v8, (a0)
34; CHECK-NEXT:    vle8.v v10, (a1)
35; CHECK-NEXT:    vmsne.vv v0, v8, v10
36; CHECK-NEXT:    vmv.v.i v8, 0
37; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
38; CHECK-NEXT:    vse8.v v8, (a0)
39; CHECK-NEXT:    ret
40  %a = load <32 x i8>, ptr %x
41  %b = load <32 x i8>, ptr %y
42  %c = icmp ne <32 x i8> %a, %b
43  %d = zext <32 x i1> %c to <32 x i8>
44  store <32 x i8> %d, ptr %x
45  ret void
46}
47
48define void @setgt_vv_v64i8(ptr %x, ptr %y, ptr %z) {
49; CHECK-LABEL: setgt_vv_v64i8:
50; CHECK:       # %bb.0:
51; CHECK-NEXT:    li a3, 64
52; CHECK-NEXT:    vsetvli zero, a3, e8, m4, ta, ma
53; CHECK-NEXT:    vle8.v v8, (a0)
54; CHECK-NEXT:    vle8.v v12, (a1)
55; CHECK-NEXT:    vmslt.vv v16, v12, v8
56; CHECK-NEXT:    vsm.v v16, (a2)
57; CHECK-NEXT:    ret
58  %a = load <64 x i8>, ptr %x
59  %b = load <64 x i8>, ptr %y
60  %c = icmp sgt <64 x i8> %a, %b
61  store <64 x i1> %c, ptr %z
62  ret void
63}
64
65define void @setlt_vv_v128i8(ptr %x, ptr %y, ptr %z) {
66; CHECK-LABEL: setlt_vv_v128i8:
67; CHECK:       # %bb.0:
68; CHECK-NEXT:    li a3, 128
69; CHECK-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
70; CHECK-NEXT:    vle8.v v8, (a0)
71; CHECK-NEXT:    vle8.v v16, (a1)
72; CHECK-NEXT:    vmslt.vv v24, v8, v16
73; CHECK-NEXT:    vsm.v v24, (a2)
74; CHECK-NEXT:    ret
75  %a = load <128 x i8>, ptr %x
76  %b = load <128 x i8>, ptr %y
77  %c = icmp slt <128 x i8> %a, %b
78  store <128 x i1> %c, ptr %z
79  ret void
80}
81
82define void @setge_vv_v8i8(ptr %x, ptr %y, ptr %z) {
83; CHECK-LABEL: setge_vv_v8i8:
84; CHECK:       # %bb.0:
85; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
86; CHECK-NEXT:    vle8.v v8, (a0)
87; CHECK-NEXT:    vle8.v v9, (a1)
88; CHECK-NEXT:    vmsle.vv v8, v9, v8
89; CHECK-NEXT:    vsm.v v8, (a2)
90; CHECK-NEXT:    ret
91  %a = load <8 x i8>, ptr %x
92  %b = load <8 x i8>, ptr %y
93  %c = icmp sge <8 x i8> %a, %b
94  store <8 x i1> %c, ptr %z
95  ret void
96}
97
98define void @setle_vv_v16i8(ptr %x, ptr %y, ptr %z) {
99; CHECK-LABEL: setle_vv_v16i8:
100; CHECK:       # %bb.0:
101; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
102; CHECK-NEXT:    vle8.v v8, (a0)
103; CHECK-NEXT:    vle8.v v9, (a1)
104; CHECK-NEXT:    vmsle.vv v8, v8, v9
105; CHECK-NEXT:    vsm.v v8, (a2)
106; CHECK-NEXT:    ret
107  %a = load <16 x i8>, ptr %x
108  %b = load <16 x i8>, ptr %y
109  %c = icmp sle <16 x i8> %a, %b
110  store <16 x i1> %c, ptr %z
111  ret void
112}
113
114define void @setugt_vv_v32i8(ptr %x, ptr %y, ptr %z) {
115; CHECK-LABEL: setugt_vv_v32i8:
116; CHECK:       # %bb.0:
117; CHECK-NEXT:    li a3, 32
118; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, ma
119; CHECK-NEXT:    vle8.v v8, (a0)
120; CHECK-NEXT:    vle8.v v10, (a1)
121; CHECK-NEXT:    vmsltu.vv v12, v10, v8
122; CHECK-NEXT:    vsm.v v12, (a2)
123; CHECK-NEXT:    ret
124  %a = load <32 x i8>, ptr %x
125  %b = load <32 x i8>, ptr %y
126  %c = icmp ugt <32 x i8> %a, %b
127  store <32 x i1> %c, ptr %z
128  ret void
129}
130
131define void @setult_vv_v64i8(ptr %x, ptr %y, ptr %z) {
132; CHECK-LABEL: setult_vv_v64i8:
133; CHECK:       # %bb.0:
134; CHECK-NEXT:    li a3, 64
135; CHECK-NEXT:    vsetvli zero, a3, e8, m4, ta, ma
136; CHECK-NEXT:    vle8.v v8, (a0)
137; CHECK-NEXT:    vle8.v v12, (a1)
138; CHECK-NEXT:    vmsltu.vv v16, v8, v12
139; CHECK-NEXT:    vsm.v v16, (a2)
140; CHECK-NEXT:    ret
141  %a = load <64 x i8>, ptr %x
142  %b = load <64 x i8>, ptr %y
143  %c = icmp ult <64 x i8> %a, %b
144  store <64 x i1> %c, ptr %z
145  ret void
146}
147
148define void @setuge_vv_v128i8(ptr %x, ptr %y, ptr %z) {
149; CHECK-LABEL: setuge_vv_v128i8:
150; CHECK:       # %bb.0:
151; CHECK-NEXT:    li a3, 128
152; CHECK-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
153; CHECK-NEXT:    vle8.v v8, (a0)
154; CHECK-NEXT:    vle8.v v16, (a1)
155; CHECK-NEXT:    vmsleu.vv v24, v16, v8
156; CHECK-NEXT:    vsm.v v24, (a2)
157; CHECK-NEXT:    ret
158  %a = load <128 x i8>, ptr %x
159  %b = load <128 x i8>, ptr %y
160  %c = icmp uge <128 x i8> %a, %b
161  store <128 x i1> %c, ptr %z
162  ret void
163}
164
165define void @setule_vv_v8i8(ptr %x, ptr %y, ptr %z) {
166; CHECK-LABEL: setule_vv_v8i8:
167; CHECK:       # %bb.0:
168; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
169; CHECK-NEXT:    vle8.v v8, (a0)
170; CHECK-NEXT:    vle8.v v9, (a1)
171; CHECK-NEXT:    vmsleu.vv v8, v8, v9
172; CHECK-NEXT:    vsm.v v8, (a2)
173; CHECK-NEXT:    ret
174  %a = load <8 x i8>, ptr %x
175  %b = load <8 x i8>, ptr %y
176  %c = icmp ule <8 x i8> %a, %b
177  store <8 x i1> %c, ptr %z
178  ret void
179}
180
181define void @seteq_vx_v16i8(ptr %x, i8 %y, ptr %z) {
182; CHECK-LABEL: seteq_vx_v16i8:
183; CHECK:       # %bb.0:
184; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
185; CHECK-NEXT:    vle8.v v8, (a0)
186; CHECK-NEXT:    vmseq.vx v8, v8, a1
187; CHECK-NEXT:    vsm.v v8, (a2)
188; CHECK-NEXT:    ret
189  %a = load <16 x i8>, ptr %x
190  %b = insertelement <16 x i8> poison, i8 %y, i32 0
191  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
192  %d = icmp eq <16 x i8> %a, %c
193  store <16 x i1> %d, ptr %z
194  ret void
195}
196
197define void @setne_vx_v32i8(ptr %x, i8 %y, ptr %z) {
198; CHECK-LABEL: setne_vx_v32i8:
199; CHECK:       # %bb.0:
200; CHECK-NEXT:    li a3, 32
201; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, ma
202; CHECK-NEXT:    vle8.v v8, (a0)
203; CHECK-NEXT:    vmsne.vx v10, v8, a1
204; CHECK-NEXT:    vsm.v v10, (a2)
205; CHECK-NEXT:    ret
206  %a = load <32 x i8>, ptr %x
207  %b = insertelement <32 x i8> poison, i8 %y, i32 0
208  %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
209  %d = icmp ne <32 x i8> %a, %c
210  store <32 x i1> %d, ptr %z
211  ret void
212}
213
214define void @setgt_vx_v64i8(ptr %x, i8 %y, ptr %z) {
215; CHECK-LABEL: setgt_vx_v64i8:
216; CHECK:       # %bb.0:
217; CHECK-NEXT:    li a3, 64
218; CHECK-NEXT:    vsetvli zero, a3, e8, m4, ta, ma
219; CHECK-NEXT:    vle8.v v8, (a0)
220; CHECK-NEXT:    vmsgt.vx v12, v8, a1
221; CHECK-NEXT:    vsm.v v12, (a2)
222; CHECK-NEXT:    ret
223  %a = load <64 x i8>, ptr %x
224  %b = insertelement <64 x i8> poison, i8 %y, i32 0
225  %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
226  %d = icmp sgt <64 x i8> %a, %c
227  store <64 x i1> %d, ptr %z
228  ret void
229}
230
231define void @setlt_vx_v128i8(ptr %x, i8 %y, ptr %z) {
232; CHECK-LABEL: setlt_vx_v128i8:
233; CHECK:       # %bb.0:
234; CHECK-NEXT:    li a3, 128
235; CHECK-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
236; CHECK-NEXT:    vle8.v v8, (a0)
237; CHECK-NEXT:    vmslt.vx v16, v8, a1
238; CHECK-NEXT:    vsm.v v16, (a2)
239; CHECK-NEXT:    ret
240  %a = load <128 x i8>, ptr %x
241  %b = insertelement <128 x i8> poison, i8 %y, i32 0
242  %c = shufflevector <128 x i8> %b, <128 x i8> poison, <128 x i32> zeroinitializer
243  %d = icmp slt <128 x i8> %a, %c
244  store <128 x i1> %d, ptr %z
245  ret void
246}
247
248define void @setge_vx_v8i8(ptr %x, i8 %y, ptr %z) {
249; CHECK-LABEL: setge_vx_v8i8:
250; CHECK:       # %bb.0:
251; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
252; CHECK-NEXT:    vle8.v v8, (a0)
253; CHECK-NEXT:    vmv.v.x v9, a1
254; CHECK-NEXT:    vmsle.vv v8, v9, v8
255; CHECK-NEXT:    vsm.v v8, (a2)
256; CHECK-NEXT:    ret
257  %a = load <8 x i8>, ptr %x
258  %b = insertelement <8 x i8> poison, i8 %y, i32 0
259  %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
260  %d = icmp sge <8 x i8> %a, %c
261  store <8 x i1> %d, ptr %z
262  ret void
263}
264
265define void @setle_vx_v16i8(ptr %x, i8 %y, ptr %z) {
266; CHECK-LABEL: setle_vx_v16i8:
267; CHECK:       # %bb.0:
268; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
269; CHECK-NEXT:    vle8.v v8, (a0)
270; CHECK-NEXT:    vmsle.vx v8, v8, a1
271; CHECK-NEXT:    vsm.v v8, (a2)
272; CHECK-NEXT:    ret
273  %a = load <16 x i8>, ptr %x
274  %b = insertelement <16 x i8> poison, i8 %y, i32 0
275  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
276  %d = icmp sle <16 x i8> %a, %c
277  store <16 x i1> %d, ptr %z
278  ret void
279}
280
281define void @setugt_vx_v32i8(ptr %x, i8 %y, ptr %z) {
282; CHECK-LABEL: setugt_vx_v32i8:
283; CHECK:       # %bb.0:
284; CHECK-NEXT:    li a3, 32
285; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, ma
286; CHECK-NEXT:    vle8.v v8, (a0)
287; CHECK-NEXT:    vmsgtu.vx v10, v8, a1
288; CHECK-NEXT:    vsm.v v10, (a2)
289; CHECK-NEXT:    ret
290  %a = load <32 x i8>, ptr %x
291  %b = insertelement <32 x i8> poison, i8 %y, i32 0
292  %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
293  %d = icmp ugt <32 x i8> %a, %c
294  store <32 x i1> %d, ptr %z
295  ret void
296}
297
298define void @setult_vx_v64i8(ptr %x, i8 %y, ptr %z) {
299; CHECK-LABEL: setult_vx_v64i8:
300; CHECK:       # %bb.0:
301; CHECK-NEXT:    li a3, 64
302; CHECK-NEXT:    vsetvli zero, a3, e8, m4, ta, ma
303; CHECK-NEXT:    vle8.v v8, (a0)
304; CHECK-NEXT:    vmsltu.vx v12, v8, a1
305; CHECK-NEXT:    vsm.v v12, (a2)
306; CHECK-NEXT:    ret
307  %a = load <64 x i8>, ptr %x
308  %b = insertelement <64 x i8> poison, i8 %y, i32 0
309  %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
310  %d = icmp ult <64 x i8> %a, %c
311  store <64 x i1> %d, ptr %z
312  ret void
313}
314
315define void @setuge_vx_v128i8(ptr %x, i8 %y, ptr %z) {
316; CHECK-LABEL: setuge_vx_v128i8:
317; CHECK:       # %bb.0:
318; CHECK-NEXT:    li a3, 128
319; CHECK-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
320; CHECK-NEXT:    vle8.v v8, (a0)
321; CHECK-NEXT:    vmv.v.x v16, a1
322; CHECK-NEXT:    vmsleu.vv v24, v16, v8
323; CHECK-NEXT:    vsm.v v24, (a2)
324; CHECK-NEXT:    ret
325  %a = load <128 x i8>, ptr %x
326  %b = insertelement <128 x i8> poison, i8 %y, i32 0
327  %c = shufflevector <128 x i8> %b, <128 x i8> poison, <128 x i32> zeroinitializer
328  %d = icmp uge <128 x i8> %a, %c
329  store <128 x i1> %d, ptr %z
330  ret void
331}
332
333define void @setule_vx_v8i8(ptr %x, i8 %y, ptr %z) {
334; CHECK-LABEL: setule_vx_v8i8:
335; CHECK:       # %bb.0:
336; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
337; CHECK-NEXT:    vle8.v v8, (a0)
338; CHECK-NEXT:    vmsleu.vx v8, v8, a1
339; CHECK-NEXT:    vsm.v v8, (a2)
340; CHECK-NEXT:    ret
341  %a = load <8 x i8>, ptr %x
342  %b = insertelement <8 x i8> poison, i8 %y, i32 0
343  %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
344  %d = icmp ule <8 x i8> %a, %c
345  store <8 x i1> %d, ptr %z
346  ret void
347}
348
349define void @seteq_xv_v16i8(ptr %x, i8 %y, ptr %z) {
350; CHECK-LABEL: seteq_xv_v16i8:
351; CHECK:       # %bb.0:
352; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
353; CHECK-NEXT:    vle8.v v8, (a0)
354; CHECK-NEXT:    vmseq.vx v8, v8, a1
355; CHECK-NEXT:    vsm.v v8, (a2)
356; CHECK-NEXT:    ret
357  %a = load <16 x i8>, ptr %x
358  %b = insertelement <16 x i8> poison, i8 %y, i32 0
359  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
360  %d = icmp eq <16 x i8> %c, %a
361  store <16 x i1> %d, ptr %z
362  ret void
363}
364
365define void @setne_xv_v32i8(ptr %x, i8 %y, ptr %z) {
366; CHECK-LABEL: setne_xv_v32i8:
367; CHECK:       # %bb.0:
368; CHECK-NEXT:    li a3, 32
369; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, ma
370; CHECK-NEXT:    vle8.v v8, (a0)
371; CHECK-NEXT:    vmsne.vx v10, v8, a1
372; CHECK-NEXT:    vsm.v v10, (a2)
373; CHECK-NEXT:    ret
374  %a = load <32 x i8>, ptr %x
375  %b = insertelement <32 x i8> poison, i8 %y, i32 0
376  %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
377  %d = icmp ne <32 x i8> %c, %a
378  store <32 x i1> %d, ptr %z
379  ret void
380}
381
382define void @setgt_xv_v64i8(ptr %x, i8 %y, ptr %z) {
383; CHECK-LABEL: setgt_xv_v64i8:
384; CHECK:       # %bb.0:
385; CHECK-NEXT:    li a3, 64
386; CHECK-NEXT:    vsetvli zero, a3, e8, m4, ta, ma
387; CHECK-NEXT:    vle8.v v8, (a0)
388; CHECK-NEXT:    vmslt.vx v12, v8, a1
389; CHECK-NEXT:    vsm.v v12, (a2)
390; CHECK-NEXT:    ret
391  %a = load <64 x i8>, ptr %x
392  %b = insertelement <64 x i8> poison, i8 %y, i32 0
393  %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
394  %d = icmp sgt <64 x i8> %c, %a
395  store <64 x i1> %d, ptr %z
396  ret void
397}
398
399define void @setlt_xv_v128i8(ptr %x, i8 %y, ptr %z) {
400; CHECK-LABEL: setlt_xv_v128i8:
401; CHECK:       # %bb.0:
402; CHECK-NEXT:    li a3, 128
403; CHECK-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
404; CHECK-NEXT:    vle8.v v8, (a0)
405; CHECK-NEXT:    vmsgt.vx v16, v8, a1
406; CHECK-NEXT:    vsm.v v16, (a2)
407; CHECK-NEXT:    ret
408  %a = load <128 x i8>, ptr %x
409  %b = insertelement <128 x i8> poison, i8 %y, i32 0
410  %c = shufflevector <128 x i8> %b, <128 x i8> poison, <128 x i32> zeroinitializer
411  %d = icmp slt <128 x i8> %c, %a
412  store <128 x i1> %d, ptr %z
413  ret void
414}
415
416define void @setge_xv_v8i8(ptr %x, i8 %y, ptr %z) {
417; CHECK-LABEL: setge_xv_v8i8:
418; CHECK:       # %bb.0:
419; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
420; CHECK-NEXT:    vle8.v v8, (a0)
421; CHECK-NEXT:    vmsle.vx v8, v8, a1
422; CHECK-NEXT:    vsm.v v8, (a2)
423; CHECK-NEXT:    ret
424  %a = load <8 x i8>, ptr %x
425  %b = insertelement <8 x i8> poison, i8 %y, i32 0
426  %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
427  %d = icmp sge <8 x i8> %c, %a
428  store <8 x i1> %d, ptr %z
429  ret void
430}
431
432define void @setle_xv_v16i8(ptr %x, i8 %y, ptr %z) {
433; CHECK-LABEL: setle_xv_v16i8:
434; CHECK:       # %bb.0:
435; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
436; CHECK-NEXT:    vle8.v v8, (a0)
437; CHECK-NEXT:    vmv.v.x v9, a1
438; CHECK-NEXT:    vmsle.vv v8, v9, v8
439; CHECK-NEXT:    vsm.v v8, (a2)
440; CHECK-NEXT:    ret
441  %a = load <16 x i8>, ptr %x
442  %b = insertelement <16 x i8> poison, i8 %y, i32 0
443  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
444  %d = icmp sle <16 x i8> %c, %a
445  store <16 x i1> %d, ptr %z
446  ret void
447}
448
449define void @setugt_xv_v32i8(ptr %x, i8 %y, ptr %z) {
450; CHECK-LABEL: setugt_xv_v32i8:
451; CHECK:       # %bb.0:
452; CHECK-NEXT:    li a3, 32
453; CHECK-NEXT:    vsetvli zero, a3, e8, m2, ta, ma
454; CHECK-NEXT:    vle8.v v8, (a0)
455; CHECK-NEXT:    vmsltu.vx v10, v8, a1
456; CHECK-NEXT:    vsm.v v10, (a2)
457; CHECK-NEXT:    ret
458  %a = load <32 x i8>, ptr %x
459  %b = insertelement <32 x i8> poison, i8 %y, i32 0
460  %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
461  %d = icmp ugt <32 x i8> %c, %a
462  store <32 x i1> %d, ptr %z
463  ret void
464}
465
466define void @setult_xv_v64i8(ptr %x, i8 %y, ptr %z) {
467; CHECK-LABEL: setult_xv_v64i8:
468; CHECK:       # %bb.0:
469; CHECK-NEXT:    li a3, 64
470; CHECK-NEXT:    vsetvli zero, a3, e8, m4, ta, ma
471; CHECK-NEXT:    vle8.v v8, (a0)
472; CHECK-NEXT:    vmsgtu.vx v12, v8, a1
473; CHECK-NEXT:    vsm.v v12, (a2)
474; CHECK-NEXT:    ret
475  %a = load <64 x i8>, ptr %x
476  %b = insertelement <64 x i8> poison, i8 %y, i32 0
477  %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
478  %d = icmp ult <64 x i8> %c, %a
479  store <64 x i1> %d, ptr %z
480  ret void
481}
482
483define void @setuge_xv_v128i8(ptr %x, i8 %y, ptr %z) {
484; CHECK-LABEL: setuge_xv_v128i8:
485; CHECK:       # %bb.0:
486; CHECK-NEXT:    li a3, 128
487; CHECK-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
488; CHECK-NEXT:    vle8.v v8, (a0)
489; CHECK-NEXT:    vmsleu.vx v16, v8, a1
490; CHECK-NEXT:    vsm.v v16, (a2)
491; CHECK-NEXT:    ret
492  %a = load <128 x i8>, ptr %x
493  %b = insertelement <128 x i8> poison, i8 %y, i32 0
494  %c = shufflevector <128 x i8> %b, <128 x i8> poison, <128 x i32> zeroinitializer
495  %d = icmp uge <128 x i8> %c, %a
496  store <128 x i1> %d, ptr %z
497  ret void
498}
499
500define void @setule_xv_v8i8(ptr %x, i8 %y, ptr %z) {
501; CHECK-LABEL: setule_xv_v8i8:
502; CHECK:       # %bb.0:
503; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
504; CHECK-NEXT:    vle8.v v8, (a0)
505; CHECK-NEXT:    vmv.v.x v9, a1
506; CHECK-NEXT:    vmsleu.vv v8, v9, v8
507; CHECK-NEXT:    vsm.v v8, (a2)
508; CHECK-NEXT:    ret
509  %a = load <8 x i8>, ptr %x
510  %b = insertelement <8 x i8> poison, i8 %y, i32 0
511  %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
512  %d = icmp ule <8 x i8> %c, %a
513  store <8 x i1> %d, ptr %z
514  ret void
515}
516
517define void @seteq_vi_v16i8(ptr %x, ptr %z) {
518; CHECK-LABEL: seteq_vi_v16i8:
519; CHECK:       # %bb.0:
520; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
521; CHECK-NEXT:    vle8.v v8, (a0)
522; CHECK-NEXT:    vmseq.vi v8, v8, 0
523; CHECK-NEXT:    vsm.v v8, (a1)
524; CHECK-NEXT:    ret
525  %a = load <16 x i8>, ptr %x
526  %d = icmp eq <16 x i8> %a, splat (i8 0)
527  store <16 x i1> %d, ptr %z
528  ret void
529}
530
531define void @setne_vi_v32i8(ptr %x, ptr %z) {
532; CHECK-LABEL: setne_vi_v32i8:
533; CHECK:       # %bb.0:
534; CHECK-NEXT:    li a2, 32
535; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
536; CHECK-NEXT:    vle8.v v8, (a0)
537; CHECK-NEXT:    vmsne.vi v10, v8, 0
538; CHECK-NEXT:    vsm.v v10, (a1)
539; CHECK-NEXT:    ret
540  %a = load <32 x i8>, ptr %x
541  %d = icmp ne <32 x i8> %a, splat (i8 0)
542  store <32 x i1> %d, ptr %z
543  ret void
544}
545
546define void @setgt_vi_v64i8(ptr %x, ptr %z) {
547; CHECK-LABEL: setgt_vi_v64i8:
548; CHECK:       # %bb.0:
549; CHECK-NEXT:    li a2, 64
550; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, ma
551; CHECK-NEXT:    vle8.v v8, (a0)
552; CHECK-NEXT:    vmsgt.vi v12, v8, 0
553; CHECK-NEXT:    vsm.v v12, (a1)
554; CHECK-NEXT:    ret
555  %a = load <64 x i8>, ptr %x
556  %d = icmp sgt <64 x i8> %a, splat (i8 0)
557  store <64 x i1> %d, ptr %z
558  ret void
559}
560
561define void @setgt_vi_v64i8_nonzero(ptr %x, ptr %z) {
562; CHECK-LABEL: setgt_vi_v64i8_nonzero:
563; CHECK:       # %bb.0:
564; CHECK-NEXT:    li a2, 64
565; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, ma
566; CHECK-NEXT:    vle8.v v8, (a0)
567; CHECK-NEXT:    vmsgt.vi v12, v8, 5
568; CHECK-NEXT:    vsm.v v12, (a1)
569; CHECK-NEXT:    ret
570  %a = load <64 x i8>, ptr %x
571  %d = icmp sgt <64 x i8> %a, splat (i8 5)
572  store <64 x i1> %d, ptr %z
573  ret void
574}
575
576define void @setlt_vi_v128i8(ptr %x, ptr %z) {
577; CHECK-LABEL: setlt_vi_v128i8:
578; CHECK:       # %bb.0:
579; CHECK-NEXT:    li a2, 128
580; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
581; CHECK-NEXT:    vle8.v v8, (a0)
582; CHECK-NEXT:    vmsle.vi v16, v8, -1
583; CHECK-NEXT:    vsm.v v16, (a1)
584; CHECK-NEXT:    ret
585  %a = load <128 x i8>, ptr %x
586  %d = icmp slt <128 x i8> %a, splat (i8 0)
587  store <128 x i1> %d, ptr %z
588  ret void
589}
590
591define void @setge_vi_v8i8(ptr %x, ptr %z) {
592; CHECK-LABEL: setge_vi_v8i8:
593; CHECK:       # %bb.0:
594; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
595; CHECK-NEXT:    vle8.v v8, (a0)
596; CHECK-NEXT:    vmsgt.vi v8, v8, -1
597; CHECK-NEXT:    vsm.v v8, (a1)
598; CHECK-NEXT:    ret
599  %a = load <8 x i8>, ptr %x
600  %d = icmp sge <8 x i8> %a, splat (i8 0)
601  store <8 x i1> %d, ptr %z
602  ret void
603}
604
605define void @setle_vi_v16i8(ptr %x, ptr %z) {
606; CHECK-LABEL: setle_vi_v16i8:
607; CHECK:       # %bb.0:
608; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
609; CHECK-NEXT:    vle8.v v8, (a0)
610; CHECK-NEXT:    vmsle.vi v8, v8, 0
611; CHECK-NEXT:    vsm.v v8, (a1)
612; CHECK-NEXT:    ret
613  %a = load <16 x i8>, ptr %x
614  %d = icmp sle <16 x i8> %a, splat (i8 0)
615  store <16 x i1> %d, ptr %z
616  ret void
617}
618
619define void @setugt_vi_v32i8(ptr %x, ptr %z) {
620; CHECK-LABEL: setugt_vi_v32i8:
621; CHECK:       # %bb.0:
622; CHECK-NEXT:    li a2, 32
623; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
624; CHECK-NEXT:    vle8.v v8, (a0)
625; CHECK-NEXT:    vmsgtu.vi v10, v8, 5
626; CHECK-NEXT:    vsm.v v10, (a1)
627; CHECK-NEXT:    ret
628  %a = load <32 x i8>, ptr %x
629  %d = icmp ugt <32 x i8> %a, splat (i8 5)
630  store <32 x i1> %d, ptr %z
631  ret void
632}
633
634define void @setult_vi_v64i8(ptr %x, ptr %z) {
635; CHECK-LABEL: setult_vi_v64i8:
636; CHECK:       # %bb.0:
637; CHECK-NEXT:    li a2, 64
638; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, ma
639; CHECK-NEXT:    vle8.v v8, (a0)
640; CHECK-NEXT:    vmsleu.vi v12, v8, 4
641; CHECK-NEXT:    vsm.v v12, (a1)
642; CHECK-NEXT:    ret
643  %a = load <64 x i8>, ptr %x
644  %d = icmp ult <64 x i8> %a, splat (i8 5)
645  store <64 x i1> %d, ptr %z
646  ret void
647}
648
649define void @setuge_vi_v128i8(ptr %x, ptr %z) {
650; CHECK-LABEL: setuge_vi_v128i8:
651; CHECK:       # %bb.0:
652; CHECK-NEXT:    li a2, 128
653; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
654; CHECK-NEXT:    vle8.v v8, (a0)
655; CHECK-NEXT:    vmsgtu.vi v16, v8, 4
656; CHECK-NEXT:    vsm.v v16, (a1)
657; CHECK-NEXT:    ret
658  %a = load <128 x i8>, ptr %x
659  %d = icmp uge <128 x i8> %a, splat (i8 5)
660  store <128 x i1> %d, ptr %z
661  ret void
662}
663
664define void @setule_vi_v8i8(ptr %x, ptr %z) {
665; CHECK-LABEL: setule_vi_v8i8:
666; CHECK:       # %bb.0:
667; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
668; CHECK-NEXT:    vle8.v v8, (a0)
669; CHECK-NEXT:    vmsleu.vi v8, v8, 5
670; CHECK-NEXT:    vsm.v v8, (a1)
671; CHECK-NEXT:    ret
672  %a = load <8 x i8>, ptr %x
673  %d = icmp ule <8 x i8> %a, splat (i8 5)
674  store <8 x i1> %d, ptr %z
675  ret void
676}
677
678define void @seteq_vv_v8i16(ptr %x, ptr %y) {
679; CHECK-LABEL: seteq_vv_v8i16:
680; CHECK:       # %bb.0:
681; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
682; CHECK-NEXT:    vle16.v v8, (a0)
683; CHECK-NEXT:    vle16.v v9, (a1)
684; CHECK-NEXT:    vmseq.vv v0, v8, v9
685; CHECK-NEXT:    vmv.v.i v8, 0
686; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
687; CHECK-NEXT:    vse16.v v8, (a0)
688; CHECK-NEXT:    ret
689  %a = load <8 x i16>, ptr %x
690  %b = load <8 x i16>, ptr %y
691  %c = icmp eq <8 x i16> %a, %b
692  %d = sext <8 x i1> %c to <8 x i16>
693  store <8 x i16> %d, ptr %x
694  ret void
695}
696
697define void @setne_vv_v4i32(ptr %x, ptr %y) {
698; CHECK-LABEL: setne_vv_v4i32:
699; CHECK:       # %bb.0:
700; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
701; CHECK-NEXT:    vle32.v v8, (a0)
702; CHECK-NEXT:    vle32.v v9, (a1)
703; CHECK-NEXT:    vmsne.vv v0, v8, v9
704; CHECK-NEXT:    vmv.v.i v8, 0
705; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
706; CHECK-NEXT:    vse32.v v8, (a0)
707; CHECK-NEXT:    ret
708  %a = load <4 x i32>, ptr %x
709  %b = load <4 x i32>, ptr %y
710  %c = icmp ne <4 x i32> %a, %b
711  %d = sext <4 x i1> %c to <4 x i32>
712  store <4 x i32> %d, ptr %x
713  ret void
714}
715
716define void @setgt_vv_v2i64(ptr %x, ptr %y) {
717; CHECK-LABEL: setgt_vv_v2i64:
718; CHECK:       # %bb.0:
719; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
720; CHECK-NEXT:    vle64.v v8, (a0)
721; CHECK-NEXT:    vle64.v v9, (a1)
722; CHECK-NEXT:    vmslt.vv v0, v9, v8
723; CHECK-NEXT:    vmv.v.i v8, 0
724; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
725; CHECK-NEXT:    vse64.v v8, (a0)
726; CHECK-NEXT:    ret
727  %a = load <2 x i64>, ptr %x
728  %b = load <2 x i64>, ptr %y
729  %c = icmp sgt <2 x i64> %a, %b
730  %d = sext <2 x i1> %c to <2 x i64>
731  store <2 x i64> %d, ptr %x
732  ret void
733}
734
735define void @setlt_vv_v16i16(ptr %x, ptr %y) {
736; CHECK-LABEL: setlt_vv_v16i16:
737; CHECK:       # %bb.0:
738; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
739; CHECK-NEXT:    vle16.v v8, (a0)
740; CHECK-NEXT:    vle16.v v10, (a1)
741; CHECK-NEXT:    vmslt.vv v0, v8, v10
742; CHECK-NEXT:    vmv.v.i v8, 0
743; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
744; CHECK-NEXT:    vse16.v v8, (a0)
745; CHECK-NEXT:    ret
746  %a = load <16 x i16>, ptr %x
747  %b = load <16 x i16>, ptr %y
748  %c = icmp slt <16 x i16> %a, %b
749  %d = zext <16 x i1> %c to <16 x i16>
750  store <16 x i16> %d, ptr %x
751  ret void
752}
753
754define void @setugt_vv_v8i32(ptr %x, ptr %y) {
755; CHECK-LABEL: setugt_vv_v8i32:
756; CHECK:       # %bb.0:
757; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
758; CHECK-NEXT:    vle32.v v8, (a0)
759; CHECK-NEXT:    vle32.v v10, (a1)
760; CHECK-NEXT:    vmsltu.vv v0, v10, v8
761; CHECK-NEXT:    vmv.v.i v8, 0
762; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
763; CHECK-NEXT:    vse32.v v8, (a0)
764; CHECK-NEXT:    ret
765  %a = load <8 x i32>, ptr %x
766  %b = load <8 x i32>, ptr %y
767  %c = icmp ugt <8 x i32> %a, %b
768  %d = zext <8 x i1> %c to <8 x i32>
769  store <8 x i32> %d, ptr %x
770  ret void
771}
772
773define void @setult_vv_v4i64(ptr %x, ptr %y) {
774; CHECK-LABEL: setult_vv_v4i64:
775; CHECK:       # %bb.0:
776; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
777; CHECK-NEXT:    vle64.v v8, (a0)
778; CHECK-NEXT:    vle64.v v10, (a1)
779; CHECK-NEXT:    vmsltu.vv v0, v8, v10
780; CHECK-NEXT:    vmv.v.i v8, 0
781; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
782; CHECK-NEXT:    vse64.v v8, (a0)
783; CHECK-NEXT:    ret
784  %a = load <4 x i64>, ptr %x
785  %b = load <4 x i64>, ptr %y
786  %c = icmp ult <4 x i64> %a, %b
787  %d = zext <4 x i1> %c to <4 x i64>
788  store <4 x i64> %d, ptr %x
789  ret void
790}
791