xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll (revision 9122c5235ec85ce0c0ad337e862b006e7b349d84)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
3; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
4; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfhmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
5; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfhmin,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
6
7define void @fcmp_oeq_vv_v8f16(ptr %x, ptr %y, ptr %z) {
8; ZVFH-LABEL: fcmp_oeq_vv_v8f16:
9; ZVFH:       # %bb.0:
10; ZVFH-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
11; ZVFH-NEXT:    vle16.v v8, (a0)
12; ZVFH-NEXT:    vle16.v v9, (a1)
13; ZVFH-NEXT:    vmfeq.vv v8, v8, v9
14; ZVFH-NEXT:    vsm.v v8, (a2)
15; ZVFH-NEXT:    ret
16;
17; ZVFHMIN-LABEL: fcmp_oeq_vv_v8f16:
18; ZVFHMIN:       # %bb.0:
19; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
20; ZVFHMIN-NEXT:    vle16.v v8, (a1)
21; ZVFHMIN-NEXT:    vle16.v v9, (a0)
22; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
23; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
24; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
25; ZVFHMIN-NEXT:    vmfeq.vv v8, v12, v10
26; ZVFHMIN-NEXT:    vsm.v v8, (a2)
27; ZVFHMIN-NEXT:    ret
28  %a = load <8 x half>, ptr %x
29  %b = load <8 x half>, ptr %y
30  %c = fcmp oeq <8 x half> %a, %b
31  store <8 x i1> %c, ptr %z
32  ret void
33}
34
35define void @fcmp_oeq_vv_v8f16_nonans(ptr %x, ptr %y, ptr %z) {
36; ZVFH-LABEL: fcmp_oeq_vv_v8f16_nonans:
37; ZVFH:       # %bb.0:
38; ZVFH-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
39; ZVFH-NEXT:    vle16.v v8, (a0)
40; ZVFH-NEXT:    vle16.v v9, (a1)
41; ZVFH-NEXT:    vmfeq.vv v8, v8, v9
42; ZVFH-NEXT:    vsm.v v8, (a2)
43; ZVFH-NEXT:    ret
44;
45; ZVFHMIN-LABEL: fcmp_oeq_vv_v8f16_nonans:
46; ZVFHMIN:       # %bb.0:
47; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
48; ZVFHMIN-NEXT:    vle16.v v8, (a1)
49; ZVFHMIN-NEXT:    vle16.v v9, (a0)
50; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
51; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
52; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
53; ZVFHMIN-NEXT:    vmfeq.vv v8, v12, v10
54; ZVFHMIN-NEXT:    vsm.v v8, (a2)
55; ZVFHMIN-NEXT:    ret
56  %a = load <8 x half>, ptr %x
57  %b = load <8 x half>, ptr %y
58  %c = fcmp nnan oeq <8 x half> %a, %b
59  store <8 x i1> %c, ptr %z
60  ret void
61}
62
63define void @fcmp_une_vv_v4f32(ptr %x, ptr %y, ptr %z) {
64; CHECK-LABEL: fcmp_une_vv_v4f32:
65; CHECK:       # %bb.0:
66; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
67; CHECK-NEXT:    vle32.v v8, (a0)
68; CHECK-NEXT:    vle32.v v9, (a1)
69; CHECK-NEXT:    vmfne.vv v0, v8, v9
70; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
71; CHECK-NEXT:    vmv.v.i v8, 0
72; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
73; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
74; CHECK-NEXT:    vmv.v.i v9, 0
75; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, ma
76; CHECK-NEXT:    vmv.v.v v9, v8
77; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
78; CHECK-NEXT:    vmsne.vi v8, v9, 0
79; CHECK-NEXT:    vsm.v v8, (a2)
80; CHECK-NEXT:    ret
81  %a = load <4 x float>, ptr %x
82  %b = load <4 x float>, ptr %y
83  %c = fcmp une <4 x float> %a, %b
84  store <4 x i1> %c, ptr %z
85  ret void
86}
87
88define void @fcmp_une_vv_v4f32_nonans(ptr %x, ptr %y, ptr %z) {
89; CHECK-LABEL: fcmp_une_vv_v4f32_nonans:
90; CHECK:       # %bb.0:
91; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
92; CHECK-NEXT:    vle32.v v8, (a0)
93; CHECK-NEXT:    vle32.v v9, (a1)
94; CHECK-NEXT:    vmfne.vv v0, v8, v9
95; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
96; CHECK-NEXT:    vmv.v.i v8, 0
97; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
98; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
99; CHECK-NEXT:    vmv.v.i v9, 0
100; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, ma
101; CHECK-NEXT:    vmv.v.v v9, v8
102; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
103; CHECK-NEXT:    vmsne.vi v8, v9, 0
104; CHECK-NEXT:    vsm.v v8, (a2)
105; CHECK-NEXT:    ret
106  %a = load <4 x float>, ptr %x
107  %b = load <4 x float>, ptr %y
108  %c = fcmp nnan une <4 x float> %a, %b
109  store <4 x i1> %c, ptr %z
110  ret void
111}
112
113define void @fcmp_ogt_vv_v2f64(ptr %x, ptr %y, ptr %z) {
114; CHECK-LABEL: fcmp_ogt_vv_v2f64:
115; CHECK:       # %bb.0:
116; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
117; CHECK-NEXT:    vle64.v v8, (a0)
118; CHECK-NEXT:    vle64.v v9, (a1)
119; CHECK-NEXT:    vmflt.vv v0, v9, v8
120; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
121; CHECK-NEXT:    vmv.v.i v8, 0
122; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
123; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
124; CHECK-NEXT:    vmv.v.i v9, 0
125; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, ma
126; CHECK-NEXT:    vmv.v.v v9, v8
127; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
128; CHECK-NEXT:    vmsne.vi v8, v9, 0
129; CHECK-NEXT:    vsm.v v8, (a2)
130; CHECK-NEXT:    ret
131  %a = load <2 x double>, ptr %x
132  %b = load <2 x double>, ptr %y
133  %c = fcmp ogt <2 x double> %a, %b
134  store <2 x i1> %c, ptr %z
135  ret void
136}
137
138define void @fcmp_ogt_vv_v2f64_nonans(ptr %x, ptr %y, ptr %z) {
139; CHECK-LABEL: fcmp_ogt_vv_v2f64_nonans:
140; CHECK:       # %bb.0:
141; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
142; CHECK-NEXT:    vle64.v v8, (a0)
143; CHECK-NEXT:    vle64.v v9, (a1)
144; CHECK-NEXT:    vmflt.vv v0, v9, v8
145; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
146; CHECK-NEXT:    vmv.v.i v8, 0
147; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
148; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
149; CHECK-NEXT:    vmv.v.i v9, 0
150; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, ma
151; CHECK-NEXT:    vmv.v.v v9, v8
152; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
153; CHECK-NEXT:    vmsne.vi v8, v9, 0
154; CHECK-NEXT:    vsm.v v8, (a2)
155; CHECK-NEXT:    ret
156  %a = load <2 x double>, ptr %x
157  %b = load <2 x double>, ptr %y
158  %c = fcmp nnan ogt <2 x double> %a, %b
159  store <2 x i1> %c, ptr %z
160  ret void
161}
162
163define void @fcmp_olt_vv_v16f16(ptr %x, ptr %y, ptr %z) {
164; ZVFH-LABEL: fcmp_olt_vv_v16f16:
165; ZVFH:       # %bb.0:
166; ZVFH-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
167; ZVFH-NEXT:    vle16.v v8, (a0)
168; ZVFH-NEXT:    vle16.v v10, (a1)
169; ZVFH-NEXT:    vmflt.vv v12, v8, v10
170; ZVFH-NEXT:    vsm.v v12, (a2)
171; ZVFH-NEXT:    ret
172;
173; ZVFHMIN-LABEL: fcmp_olt_vv_v16f16:
174; ZVFHMIN:       # %bb.0:
175; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
176; ZVFHMIN-NEXT:    vle16.v v8, (a1)
177; ZVFHMIN-NEXT:    vle16.v v10, (a0)
178; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
179; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v10
180; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
181; ZVFHMIN-NEXT:    vmflt.vv v8, v16, v12
182; ZVFHMIN-NEXT:    vsm.v v8, (a2)
183; ZVFHMIN-NEXT:    ret
184  %a = load <16 x half>, ptr %x
185  %b = load <16 x half>, ptr %y
186  %c = fcmp olt <16 x half> %a, %b
187  store <16 x i1> %c, ptr %z
188  ret void
189}
190
191define void @fcmp_olt_vv_v16f16_nonans(ptr %x, ptr %y, ptr %z) {
192; ZVFH-LABEL: fcmp_olt_vv_v16f16_nonans:
193; ZVFH:       # %bb.0:
194; ZVFH-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
195; ZVFH-NEXT:    vle16.v v8, (a0)
196; ZVFH-NEXT:    vle16.v v10, (a1)
197; ZVFH-NEXT:    vmflt.vv v12, v8, v10
198; ZVFH-NEXT:    vsm.v v12, (a2)
199; ZVFH-NEXT:    ret
200;
201; ZVFHMIN-LABEL: fcmp_olt_vv_v16f16_nonans:
202; ZVFHMIN:       # %bb.0:
203; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
204; ZVFHMIN-NEXT:    vle16.v v8, (a1)
205; ZVFHMIN-NEXT:    vle16.v v10, (a0)
206; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
207; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v10
208; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
209; ZVFHMIN-NEXT:    vmflt.vv v8, v16, v12
210; ZVFHMIN-NEXT:    vsm.v v8, (a2)
211; ZVFHMIN-NEXT:    ret
212  %a = load <16 x half>, ptr %x
213  %b = load <16 x half>, ptr %y
214  %c = fcmp nnan olt <16 x half> %a, %b
215  store <16 x i1> %c, ptr %z
216  ret void
217}
218
219define void @fcmp_oge_vv_v8f32(ptr %x, ptr %y, ptr %z) {
220; CHECK-LABEL: fcmp_oge_vv_v8f32:
221; CHECK:       # %bb.0:
222; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
223; CHECK-NEXT:    vle32.v v8, (a0)
224; CHECK-NEXT:    vle32.v v10, (a1)
225; CHECK-NEXT:    vmfle.vv v12, v10, v8
226; CHECK-NEXT:    vsm.v v12, (a2)
227; CHECK-NEXT:    ret
228  %a = load <8 x float>, ptr %x
229  %b = load <8 x float>, ptr %y
230  %c = fcmp oge <8 x float> %a, %b
231  store <8 x i1> %c, ptr %z
232  ret void
233}
234
235define void @fcmp_oge_vv_v8f32_nonans(ptr %x, ptr %y, ptr %z) {
236; CHECK-LABEL: fcmp_oge_vv_v8f32_nonans:
237; CHECK:       # %bb.0:
238; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
239; CHECK-NEXT:    vle32.v v8, (a0)
240; CHECK-NEXT:    vle32.v v10, (a1)
241; CHECK-NEXT:    vmfle.vv v12, v10, v8
242; CHECK-NEXT:    vsm.v v12, (a2)
243; CHECK-NEXT:    ret
244  %a = load <8 x float>, ptr %x
245  %b = load <8 x float>, ptr %y
246  %c = fcmp nnan oge <8 x float> %a, %b
247  store <8 x i1> %c, ptr %z
248  ret void
249}
250
251define void @fcmp_ole_vv_v4f64(ptr %x, ptr %y, ptr %z) {
252; CHECK-LABEL: fcmp_ole_vv_v4f64:
253; CHECK:       # %bb.0:
254; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
255; CHECK-NEXT:    vle64.v v8, (a0)
256; CHECK-NEXT:    vle64.v v10, (a1)
257; CHECK-NEXT:    vmfle.vv v0, v8, v10
258; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
259; CHECK-NEXT:    vmv.v.i v8, 0
260; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
261; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
262; CHECK-NEXT:    vmv.v.i v9, 0
263; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, ma
264; CHECK-NEXT:    vmv.v.v v9, v8
265; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
266; CHECK-NEXT:    vmsne.vi v8, v9, 0
267; CHECK-NEXT:    vsm.v v8, (a2)
268; CHECK-NEXT:    ret
269  %a = load <4 x double>, ptr %x
270  %b = load <4 x double>, ptr %y
271  %c = fcmp ole <4 x double> %a, %b
272  store <4 x i1> %c, ptr %z
273  ret void
274}
275
276define void @fcmp_ole_vv_v4f64_nonans(ptr %x, ptr %y, ptr %z) {
277; CHECK-LABEL: fcmp_ole_vv_v4f64_nonans:
278; CHECK:       # %bb.0:
279; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
280; CHECK-NEXT:    vle64.v v8, (a0)
281; CHECK-NEXT:    vle64.v v10, (a1)
282; CHECK-NEXT:    vmfle.vv v0, v8, v10
283; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
284; CHECK-NEXT:    vmv.v.i v8, 0
285; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
286; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
287; CHECK-NEXT:    vmv.v.i v9, 0
288; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, ma
289; CHECK-NEXT:    vmv.v.v v9, v8
290; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
291; CHECK-NEXT:    vmsne.vi v8, v9, 0
292; CHECK-NEXT:    vsm.v v8, (a2)
293; CHECK-NEXT:    ret
294  %a = load <4 x double>, ptr %x
295  %b = load <4 x double>, ptr %y
296  %c = fcmp nnan ole <4 x double> %a, %b
297  store <4 x i1> %c, ptr %z
298  ret void
299}
300
301define void @fcmp_ule_vv_v32f16(ptr %x, ptr %y, ptr %z) {
302; ZVFH-LABEL: fcmp_ule_vv_v32f16:
303; ZVFH:       # %bb.0:
304; ZVFH-NEXT:    li a3, 32
305; ZVFH-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
306; ZVFH-NEXT:    vle16.v v8, (a0)
307; ZVFH-NEXT:    vle16.v v12, (a1)
308; ZVFH-NEXT:    vmflt.vv v16, v12, v8
309; ZVFH-NEXT:    vmnot.m v8, v16
310; ZVFH-NEXT:    vsm.v v8, (a2)
311; ZVFH-NEXT:    ret
312;
313; ZVFHMIN-LABEL: fcmp_ule_vv_v32f16:
314; ZVFHMIN:       # %bb.0:
315; ZVFHMIN-NEXT:    li a3, 32
316; ZVFHMIN-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
317; ZVFHMIN-NEXT:    vle16.v v8, (a0)
318; ZVFHMIN-NEXT:    vle16.v v12, (a1)
319; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
320; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12
321; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
322; ZVFHMIN-NEXT:    vmflt.vv v8, v24, v16
323; ZVFHMIN-NEXT:    vmnot.m v8, v8
324; ZVFHMIN-NEXT:    vsm.v v8, (a2)
325; ZVFHMIN-NEXT:    ret
326  %a = load <32 x half>, ptr %x
327  %b = load <32 x half>, ptr %y
328  %c = fcmp ule <32 x half> %a, %b
329  store <32 x i1> %c, ptr %z
330  ret void
331}
332
333define void @fcmp_ule_vv_v32f16_nonans(ptr %x, ptr %y, ptr %z) {
334; ZVFH-LABEL: fcmp_ule_vv_v32f16_nonans:
335; ZVFH:       # %bb.0:
336; ZVFH-NEXT:    li a3, 32
337; ZVFH-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
338; ZVFH-NEXT:    vle16.v v8, (a0)
339; ZVFH-NEXT:    vle16.v v12, (a1)
340; ZVFH-NEXT:    vmfle.vv v16, v8, v12
341; ZVFH-NEXT:    vsm.v v16, (a2)
342; ZVFH-NEXT:    ret
343;
344; ZVFHMIN-LABEL: fcmp_ule_vv_v32f16_nonans:
345; ZVFHMIN:       # %bb.0:
346; ZVFHMIN-NEXT:    li a3, 32
347; ZVFHMIN-NEXT:    vsetvli zero, a3, e16, m4, ta, ma
348; ZVFHMIN-NEXT:    vle16.v v8, (a1)
349; ZVFHMIN-NEXT:    vle16.v v12, (a0)
350; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
351; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12
352; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
353; ZVFHMIN-NEXT:    vmfle.vv v8, v24, v16
354; ZVFHMIN-NEXT:    vsm.v v8, (a2)
355; ZVFHMIN-NEXT:    ret
356  %a = load <32 x half>, ptr %x
357  %b = load <32 x half>, ptr %y
358  %c = fcmp nnan ule <32 x half> %a, %b
359  store <32 x i1> %c, ptr %z
360  ret void
361}
362
363define void @fcmp_uge_vv_v16f32(ptr %x, ptr %y, ptr %z) {
364; CHECK-LABEL: fcmp_uge_vv_v16f32:
365; CHECK:       # %bb.0:
366; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
367; CHECK-NEXT:    vle32.v v8, (a0)
368; CHECK-NEXT:    vle32.v v12, (a1)
369; CHECK-NEXT:    vmflt.vv v16, v8, v12
370; CHECK-NEXT:    vmnot.m v8, v16
371; CHECK-NEXT:    vsm.v v8, (a2)
372; CHECK-NEXT:    ret
373  %a = load <16 x float>, ptr %x
374  %b = load <16 x float>, ptr %y
375  %c = fcmp uge <16 x float> %a, %b
376  store <16 x i1> %c, ptr %z
377  ret void
378}
379
380define void @fcmp_uge_vv_v16f32_nonans(ptr %x, ptr %y, ptr %z) {
381; CHECK-LABEL: fcmp_uge_vv_v16f32_nonans:
382; CHECK:       # %bb.0:
383; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
384; CHECK-NEXT:    vle32.v v8, (a0)
385; CHECK-NEXT:    vle32.v v12, (a1)
386; CHECK-NEXT:    vmfle.vv v16, v12, v8
387; CHECK-NEXT:    vsm.v v16, (a2)
388; CHECK-NEXT:    ret
389  %a = load <16 x float>, ptr %x
390  %b = load <16 x float>, ptr %y
391  %c = fcmp nnan uge <16 x float> %a, %b
392  store <16 x i1> %c, ptr %z
393  ret void
394}
395
396define void @fcmp_ult_vv_v8f64(ptr %x, ptr %y, ptr %z) {
397; CHECK-LABEL: fcmp_ult_vv_v8f64:
398; CHECK:       # %bb.0:
399; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
400; CHECK-NEXT:    vle64.v v8, (a0)
401; CHECK-NEXT:    vle64.v v12, (a1)
402; CHECK-NEXT:    vmfle.vv v16, v12, v8
403; CHECK-NEXT:    vmnot.m v8, v16
404; CHECK-NEXT:    vsm.v v8, (a2)
405; CHECK-NEXT:    ret
406  %a = load <8 x double>, ptr %x
407  %b = load <8 x double>, ptr %y
408  %c = fcmp ult <8 x double> %a, %b
409  store <8 x i1> %c, ptr %z
410  ret void
411}
412
413define void @fcmp_ult_vv_v8f64_nonans(ptr %x, ptr %y, ptr %z) {
414; CHECK-LABEL: fcmp_ult_vv_v8f64_nonans:
415; CHECK:       # %bb.0:
416; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
417; CHECK-NEXT:    vle64.v v8, (a0)
418; CHECK-NEXT:    vle64.v v12, (a1)
419; CHECK-NEXT:    vmflt.vv v16, v8, v12
420; CHECK-NEXT:    vsm.v v16, (a2)
421; CHECK-NEXT:    ret
422  %a = load <8 x double>, ptr %x
423  %b = load <8 x double>, ptr %y
424  %c = fcmp nnan ult <8 x double> %a, %b
425  store <8 x i1> %c, ptr %z
426  ret void
427}
428
429define void @fcmp_ugt_vv_v64f16(ptr %x, ptr %y, ptr %z) {
430; ZVFH-LABEL: fcmp_ugt_vv_v64f16:
431; ZVFH:       # %bb.0:
432; ZVFH-NEXT:    li a3, 64
433; ZVFH-NEXT:    vsetvli zero, a3, e16, m8, ta, ma
434; ZVFH-NEXT:    vle16.v v8, (a0)
435; ZVFH-NEXT:    vle16.v v16, (a1)
436; ZVFH-NEXT:    vmfle.vv v24, v8, v16
437; ZVFH-NEXT:    vmnot.m v8, v24
438; ZVFH-NEXT:    vsm.v v8, (a2)
439; ZVFH-NEXT:    ret
440  %a = load <64 x half>, ptr %x
441  %b = load <64 x half>, ptr %y
442  %c = fcmp ugt <64 x half> %a, %b
443  store <64 x i1> %c, ptr %z
444  ret void
445}
446
447define void @fcmp_ugt_vv_v64f16_nonans(ptr %x, ptr %y, ptr %z) {
448; ZVFH-LABEL: fcmp_ugt_vv_v64f16_nonans:
449; ZVFH:       # %bb.0:
450; ZVFH-NEXT:    li a3, 64
451; ZVFH-NEXT:    vsetvli zero, a3, e16, m8, ta, ma
452; ZVFH-NEXT:    vle16.v v8, (a0)
453; ZVFH-NEXT:    vle16.v v16, (a1)
454; ZVFH-NEXT:    vmflt.vv v24, v16, v8
455; ZVFH-NEXT:    vsm.v v24, (a2)
456; ZVFH-NEXT:    ret
457  %a = load <64 x half>, ptr %x
458  %b = load <64 x half>, ptr %y
459  %c = fcmp nnan ugt <64 x half> %a, %b
460  store <64 x i1> %c, ptr %z
461  ret void
462}
463
464define void @fcmp_ueq_vv_v32f32(ptr %x, ptr %y, ptr %z) {
465; CHECK-LABEL: fcmp_ueq_vv_v32f32:
466; CHECK:       # %bb.0:
467; CHECK-NEXT:    li a3, 32
468; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, ma
469; CHECK-NEXT:    vle32.v v8, (a0)
470; CHECK-NEXT:    vle32.v v16, (a1)
471; CHECK-NEXT:    vmflt.vv v24, v8, v16
472; CHECK-NEXT:    vmflt.vv v25, v16, v8
473; CHECK-NEXT:    vmnor.mm v8, v25, v24
474; CHECK-NEXT:    vsm.v v8, (a2)
475; CHECK-NEXT:    ret
476  %a = load <32 x float>, ptr %x
477  %b = load <32 x float>, ptr %y
478  %c = fcmp ueq <32 x float> %a, %b
479  store <32 x i1> %c, ptr %z
480  ret void
481}
482
483define void @fcmp_ueq_vv_v32f32_nonans(ptr %x, ptr %y, ptr %z) {
484; CHECK-LABEL: fcmp_ueq_vv_v32f32_nonans:
485; CHECK:       # %bb.0:
486; CHECK-NEXT:    li a3, 32
487; CHECK-NEXT:    vsetvli zero, a3, e32, m8, ta, ma
488; CHECK-NEXT:    vle32.v v8, (a0)
489; CHECK-NEXT:    vle32.v v16, (a1)
490; CHECK-NEXT:    vmfeq.vv v24, v8, v16
491; CHECK-NEXT:    vsm.v v24, (a2)
492; CHECK-NEXT:    ret
493  %a = load <32 x float>, ptr %x
494  %b = load <32 x float>, ptr %y
495  %c = fcmp nnan ueq <32 x float> %a, %b
496  store <32 x i1> %c, ptr %z
497  ret void
498}
499
500define void @fcmp_one_vv_v8f64(ptr %x, ptr %y, ptr %z) {
501; CHECK-LABEL: fcmp_one_vv_v8f64:
502; CHECK:       # %bb.0:
503; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
504; CHECK-NEXT:    vle64.v v8, (a0)
505; CHECK-NEXT:    vle64.v v16, (a1)
506; CHECK-NEXT:    vmflt.vv v24, v8, v16
507; CHECK-NEXT:    vmflt.vv v25, v16, v8
508; CHECK-NEXT:    vmor.mm v8, v25, v24
509; CHECK-NEXT:    vsm.v v8, (a2)
510; CHECK-NEXT:    ret
511  %a = load <16 x double>, ptr %x
512  %b = load <16 x double>, ptr %y
513  %c = fcmp one <16 x double> %a, %b
514  store <16 x i1> %c, ptr %z
515  ret void
516}
517
518define void @fcmp_one_vv_v8f64_nonans(ptr %x, ptr %y, ptr %z) {
519; CHECK-LABEL: fcmp_one_vv_v8f64_nonans:
520; CHECK:       # %bb.0:
521; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
522; CHECK-NEXT:    vle64.v v8, (a0)
523; CHECK-NEXT:    vle64.v v16, (a1)
524; CHECK-NEXT:    vmfne.vv v24, v8, v16
525; CHECK-NEXT:    vsm.v v24, (a2)
526; CHECK-NEXT:    ret
527  %a = load <16 x double>, ptr %x
528  %b = load <16 x double>, ptr %y
529  %c = fcmp nnan one <16 x double> %a, %b
530  store <16 x i1> %c, ptr %z
531  ret void
532}
533
534define void @fcmp_ord_vv_v4f16(ptr %x, ptr %y, ptr %z) {
535; ZVFH-LABEL: fcmp_ord_vv_v4f16:
536; ZVFH:       # %bb.0:
537; ZVFH-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
538; ZVFH-NEXT:    vle16.v v8, (a1)
539; ZVFH-NEXT:    vle16.v v9, (a0)
540; ZVFH-NEXT:    vmfeq.vv v8, v8, v8
541; ZVFH-NEXT:    vmfeq.vv v9, v9, v9
542; ZVFH-NEXT:    vmand.mm v0, v9, v8
543; ZVFH-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
544; ZVFH-NEXT:    vmv.v.i v8, 0
545; ZVFH-NEXT:    vmerge.vim v8, v8, 1, v0
546; ZVFH-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
547; ZVFH-NEXT:    vmv.v.i v9, 0
548; ZVFH-NEXT:    vsetivli zero, 4, e8, mf2, tu, ma
549; ZVFH-NEXT:    vmv.v.v v9, v8
550; ZVFH-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
551; ZVFH-NEXT:    vmsne.vi v8, v9, 0
552; ZVFH-NEXT:    vsm.v v8, (a2)
553; ZVFH-NEXT:    ret
554;
555; ZVFHMIN-LABEL: fcmp_ord_vv_v4f16:
556; ZVFHMIN:       # %bb.0:
557; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
558; ZVFHMIN-NEXT:    vle16.v v8, (a1)
559; ZVFHMIN-NEXT:    vle16.v v9, (a0)
560; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
561; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
562; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
563; ZVFHMIN-NEXT:    vmfeq.vv v9, v10, v10
564; ZVFHMIN-NEXT:    vmfeq.vv v8, v8, v8
565; ZVFHMIN-NEXT:    vmand.mm v0, v8, v9
566; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
567; ZVFHMIN-NEXT:    vmv.v.i v8, 0
568; ZVFHMIN-NEXT:    vmerge.vim v8, v8, 1, v0
569; ZVFHMIN-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
570; ZVFHMIN-NEXT:    vmv.v.i v9, 0
571; ZVFHMIN-NEXT:    vsetivli zero, 4, e8, mf2, tu, ma
572; ZVFHMIN-NEXT:    vmv.v.v v9, v8
573; ZVFHMIN-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
574; ZVFHMIN-NEXT:    vmsne.vi v8, v9, 0
575; ZVFHMIN-NEXT:    vsm.v v8, (a2)
576; ZVFHMIN-NEXT:    ret
577  %a = load <4 x half>, ptr %x
578  %b = load <4 x half>, ptr %y
579  %c = fcmp ord <4 x half> %a, %b
580  store <4 x i1> %c, ptr %z
581  ret void
582}
583
584define void @fcmp_uno_vv_v4f16(ptr %x, ptr %y, ptr %z) {
585; ZVFH-LABEL: fcmp_uno_vv_v4f16:
586; ZVFH:       # %bb.0:
587; ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
588; ZVFH-NEXT:    vle16.v v8, (a1)
589; ZVFH-NEXT:    vle16.v v9, (a0)
590; ZVFH-NEXT:    vmfne.vv v8, v8, v8
591; ZVFH-NEXT:    vmfne.vv v9, v9, v9
592; ZVFH-NEXT:    vmor.mm v0, v9, v8
593; ZVFH-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
594; ZVFH-NEXT:    vmv.v.i v8, 0
595; ZVFH-NEXT:    vmerge.vim v8, v8, 1, v0
596; ZVFH-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
597; ZVFH-NEXT:    vmv.v.i v9, 0
598; ZVFH-NEXT:    vsetivli zero, 2, e8, mf2, tu, ma
599; ZVFH-NEXT:    vmv.v.v v9, v8
600; ZVFH-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
601; ZVFH-NEXT:    vmsne.vi v8, v9, 0
602; ZVFH-NEXT:    vsm.v v8, (a2)
603; ZVFH-NEXT:    ret
604;
605; ZVFHMIN-LABEL: fcmp_uno_vv_v4f16:
606; ZVFHMIN:       # %bb.0:
607; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
608; ZVFHMIN-NEXT:    vle16.v v8, (a1)
609; ZVFHMIN-NEXT:    vle16.v v9, (a0)
610; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
611; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
612; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
613; ZVFHMIN-NEXT:    vmfne.vv v9, v10, v10
614; ZVFHMIN-NEXT:    vmfne.vv v8, v8, v8
615; ZVFHMIN-NEXT:    vmor.mm v0, v8, v9
616; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
617; ZVFHMIN-NEXT:    vmv.v.i v8, 0
618; ZVFHMIN-NEXT:    vmerge.vim v8, v8, 1, v0
619; ZVFHMIN-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
620; ZVFHMIN-NEXT:    vmv.v.i v9, 0
621; ZVFHMIN-NEXT:    vsetivli zero, 2, e8, mf2, tu, ma
622; ZVFHMIN-NEXT:    vmv.v.v v9, v8
623; ZVFHMIN-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
624; ZVFHMIN-NEXT:    vmsne.vi v8, v9, 0
625; ZVFHMIN-NEXT:    vsm.v v8, (a2)
626; ZVFHMIN-NEXT:    ret
627  %a = load <2 x half>, ptr %x
628  %b = load <2 x half>, ptr %y
629  %c = fcmp uno <2 x half> %a, %b
630  store <2 x i1> %c, ptr %z
631  ret void
632}
633
634define void @fcmp_oeq_vf_v8f16(ptr %x, half %y, ptr %z) {
635; ZVFH-LABEL: fcmp_oeq_vf_v8f16:
636; ZVFH:       # %bb.0:
637; ZVFH-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
638; ZVFH-NEXT:    vle16.v v8, (a0)
639; ZVFH-NEXT:    vmfeq.vf v8, v8, fa0
640; ZVFH-NEXT:    vsm.v v8, (a1)
641; ZVFH-NEXT:    ret
642;
643; ZVFHMIN-LABEL: fcmp_oeq_vf_v8f16:
644; ZVFHMIN:       # %bb.0:
645; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
646; ZVFHMIN-NEXT:    vle16.v v8, (a0)
647; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
648; ZVFHMIN-NEXT:    vmv.v.x v9, a0
649; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
650; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
651; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
652; ZVFHMIN-NEXT:    vmfeq.vv v8, v10, v12
653; ZVFHMIN-NEXT:    vsm.v v8, (a1)
654; ZVFHMIN-NEXT:    ret
655  %a = load <8 x half>, ptr %x
656  %b = insertelement <8 x half> poison, half %y, i32 0
657  %c = shufflevector <8 x half> %b, <8 x half> poison, <8 x i32> zeroinitializer
658  %d = fcmp oeq <8 x half> %a, %c
659  store <8 x i1> %d, ptr %z
660  ret void
661}
662
663define void @fcmp_oeq_vf_v8f16_nonans(ptr %x, half %y, ptr %z) {
664; ZVFH-LABEL: fcmp_oeq_vf_v8f16_nonans:
665; ZVFH:       # %bb.0:
666; ZVFH-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
667; ZVFH-NEXT:    vle16.v v8, (a0)
668; ZVFH-NEXT:    vmfeq.vf v8, v8, fa0
669; ZVFH-NEXT:    vsm.v v8, (a1)
670; ZVFH-NEXT:    ret
671;
672; ZVFHMIN-LABEL: fcmp_oeq_vf_v8f16_nonans:
673; ZVFHMIN:       # %bb.0:
674; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
675; ZVFHMIN-NEXT:    vle16.v v8, (a0)
676; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
677; ZVFHMIN-NEXT:    vmv.v.x v9, a0
678; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
679; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
680; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
681; ZVFHMIN-NEXT:    vmfeq.vv v8, v10, v12
682; ZVFHMIN-NEXT:    vsm.v v8, (a1)
683; ZVFHMIN-NEXT:    ret
684  %a = load <8 x half>, ptr %x
685  %b = insertelement <8 x half> poison, half %y, i32 0
686  %c = shufflevector <8 x half> %b, <8 x half> poison, <8 x i32> zeroinitializer
687  %d = fcmp nnan oeq <8 x half> %a, %c
688  store <8 x i1> %d, ptr %z
689  ret void
690}
691
692define void @fcmp_une_vf_v4f32(ptr %x, float %y, ptr %z) {
693; CHECK-LABEL: fcmp_une_vf_v4f32:
694; CHECK:       # %bb.0:
695; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
696; CHECK-NEXT:    vle32.v v8, (a0)
697; CHECK-NEXT:    vmfne.vf v0, v8, fa0
698; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
699; CHECK-NEXT:    vmv.v.i v8, 0
700; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
701; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
702; CHECK-NEXT:    vmv.v.i v9, 0
703; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, ma
704; CHECK-NEXT:    vmv.v.v v9, v8
705; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
706; CHECK-NEXT:    vmsne.vi v8, v9, 0
707; CHECK-NEXT:    vsm.v v8, (a1)
708; CHECK-NEXT:    ret
709  %a = load <4 x float>, ptr %x
710  %b = insertelement <4 x float> poison, float %y, i32 0
711  %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
712  %d = fcmp une <4 x float> %a, %c
713  store <4 x i1> %d, ptr %z
714  ret void
715}
716
717define void @fcmp_une_vf_v4f32_nonans(ptr %x, float %y, ptr %z) {
718; CHECK-LABEL: fcmp_une_vf_v4f32_nonans:
719; CHECK:       # %bb.0:
720; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
721; CHECK-NEXT:    vle32.v v8, (a0)
722; CHECK-NEXT:    vmfne.vf v0, v8, fa0
723; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
724; CHECK-NEXT:    vmv.v.i v8, 0
725; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
726; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
727; CHECK-NEXT:    vmv.v.i v9, 0
728; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, ma
729; CHECK-NEXT:    vmv.v.v v9, v8
730; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
731; CHECK-NEXT:    vmsne.vi v8, v9, 0
732; CHECK-NEXT:    vsm.v v8, (a1)
733; CHECK-NEXT:    ret
734  %a = load <4 x float>, ptr %x
735  %b = insertelement <4 x float> poison, float %y, i32 0
736  %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
737  %d = fcmp nnan une <4 x float> %a, %c
738  store <4 x i1> %d, ptr %z
739  ret void
740}
741
742define void @fcmp_ogt_vf_v2f64(ptr %x, double %y, ptr %z) {
743; CHECK-LABEL: fcmp_ogt_vf_v2f64:
744; CHECK:       # %bb.0:
745; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
746; CHECK-NEXT:    vle64.v v8, (a0)
747; CHECK-NEXT:    vmfgt.vf v0, v8, fa0
748; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
749; CHECK-NEXT:    vmv.v.i v8, 0
750; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
751; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
752; CHECK-NEXT:    vmv.v.i v9, 0
753; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, ma
754; CHECK-NEXT:    vmv.v.v v9, v8
755; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
756; CHECK-NEXT:    vmsne.vi v8, v9, 0
757; CHECK-NEXT:    vsm.v v8, (a1)
758; CHECK-NEXT:    ret
759  %a = load <2 x double>, ptr %x
760  %b = insertelement <2 x double> poison, double %y, i32 0
761  %c = shufflevector <2 x double> %b, <2 x double> poison, <2 x i32> zeroinitializer
762  %d = fcmp ogt <2 x double> %a, %c
763  store <2 x i1> %d, ptr %z
764  ret void
765}
766
767define void @fcmp_ogt_vf_v2f64_nonans(ptr %x, double %y, ptr %z) {
768; CHECK-LABEL: fcmp_ogt_vf_v2f64_nonans:
769; CHECK:       # %bb.0:
770; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
771; CHECK-NEXT:    vle64.v v8, (a0)
772; CHECK-NEXT:    vmfgt.vf v0, v8, fa0
773; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
774; CHECK-NEXT:    vmv.v.i v8, 0
775; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
776; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
777; CHECK-NEXT:    vmv.v.i v9, 0
778; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, ma
779; CHECK-NEXT:    vmv.v.v v9, v8
780; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
781; CHECK-NEXT:    vmsne.vi v8, v9, 0
782; CHECK-NEXT:    vsm.v v8, (a1)
783; CHECK-NEXT:    ret
784  %a = load <2 x double>, ptr %x
785  %b = insertelement <2 x double> poison, double %y, i32 0
786  %c = shufflevector <2 x double> %b, <2 x double> poison, <2 x i32> zeroinitializer
787  %d = fcmp nnan ogt <2 x double> %a, %c
788  store <2 x i1> %d, ptr %z
789  ret void
790}
791
792define void @fcmp_olt_vf_v16f16(ptr %x, half %y, ptr %z) {
793; ZVFH-LABEL: fcmp_olt_vf_v16f16:
794; ZVFH:       # %bb.0:
795; ZVFH-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
796; ZVFH-NEXT:    vle16.v v8, (a0)
797; ZVFH-NEXT:    vmflt.vf v10, v8, fa0
798; ZVFH-NEXT:    vsm.v v10, (a1)
799; ZVFH-NEXT:    ret
800;
801; ZVFHMIN-LABEL: fcmp_olt_vf_v16f16:
802; ZVFHMIN:       # %bb.0:
803; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
804; ZVFHMIN-NEXT:    vle16.v v8, (a0)
805; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
806; ZVFHMIN-NEXT:    vmv.v.x v10, a0
807; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
808; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v10
809; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
810; ZVFHMIN-NEXT:    vmflt.vv v8, v12, v16
811; ZVFHMIN-NEXT:    vsm.v v8, (a1)
812; ZVFHMIN-NEXT:    ret
813  %a = load <16 x half>, ptr %x
814  %b = insertelement <16 x half> poison, half %y, i32 0
815  %c = shufflevector <16 x half> %b, <16 x half> poison, <16 x i32> zeroinitializer
816  %d = fcmp olt <16 x half> %a, %c
817  store <16 x i1> %d, ptr %z
818  ret void
819}
820
821define void @fcmp_olt_vf_v16f16_nonans(ptr %x, half %y, ptr %z) {
822; ZVFH-LABEL: fcmp_olt_vf_v16f16_nonans:
823; ZVFH:       # %bb.0:
824; ZVFH-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
825; ZVFH-NEXT:    vle16.v v8, (a0)
826; ZVFH-NEXT:    vmflt.vf v10, v8, fa0
827; ZVFH-NEXT:    vsm.v v10, (a1)
828; ZVFH-NEXT:    ret
829;
830; ZVFHMIN-LABEL: fcmp_olt_vf_v16f16_nonans:
831; ZVFHMIN:       # %bb.0:
832; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
833; ZVFHMIN-NEXT:    vle16.v v8, (a0)
834; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
835; ZVFHMIN-NEXT:    vmv.v.x v10, a0
836; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
837; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v10
838; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
839; ZVFHMIN-NEXT:    vmflt.vv v8, v12, v16
840; ZVFHMIN-NEXT:    vsm.v v8, (a1)
841; ZVFHMIN-NEXT:    ret
842  %a = load <16 x half>, ptr %x
843  %b = insertelement <16 x half> poison, half %y, i32 0
844  %c = shufflevector <16 x half> %b, <16 x half> poison, <16 x i32> zeroinitializer
845  %d = fcmp nnan olt <16 x half> %a, %c
846  store <16 x i1> %d, ptr %z
847  ret void
848}
849
850define void @fcmp_oge_vf_v8f32(ptr %x, float %y, ptr %z) {
851; CHECK-LABEL: fcmp_oge_vf_v8f32:
852; CHECK:       # %bb.0:
853; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
854; CHECK-NEXT:    vle32.v v8, (a0)
855; CHECK-NEXT:    vmfge.vf v10, v8, fa0
856; CHECK-NEXT:    vsm.v v10, (a1)
857; CHECK-NEXT:    ret
858  %a = load <8 x float>, ptr %x
859  %b = insertelement <8 x float> poison, float %y, i32 0
860  %c = shufflevector <8 x float> %b, <8 x float> poison, <8 x i32> zeroinitializer
861  %d = fcmp oge <8 x float> %a, %c
862  store <8 x i1> %d, ptr %z
863  ret void
864}
865
866define void @fcmp_oge_vf_v8f32_nonans(ptr %x, float %y, ptr %z) {
867; CHECK-LABEL: fcmp_oge_vf_v8f32_nonans:
868; CHECK:       # %bb.0:
869; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
870; CHECK-NEXT:    vle32.v v8, (a0)
871; CHECK-NEXT:    vmfge.vf v10, v8, fa0
872; CHECK-NEXT:    vsm.v v10, (a1)
873; CHECK-NEXT:    ret
874  %a = load <8 x float>, ptr %x
875  %b = insertelement <8 x float> poison, float %y, i32 0
876  %c = shufflevector <8 x float> %b, <8 x float> poison, <8 x i32> zeroinitializer
877  %d = fcmp nnan oge <8 x float> %a, %c
878  store <8 x i1> %d, ptr %z
879  ret void
880}
881
882define void @fcmp_ole_vf_v4f64(ptr %x, double %y, ptr %z) {
883; CHECK-LABEL: fcmp_ole_vf_v4f64:
884; CHECK:       # %bb.0:
885; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
886; CHECK-NEXT:    vle64.v v8, (a0)
887; CHECK-NEXT:    vmfle.vf v0, v8, fa0
888; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
889; CHECK-NEXT:    vmv.v.i v8, 0
890; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
891; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
892; CHECK-NEXT:    vmv.v.i v9, 0
893; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, ma
894; CHECK-NEXT:    vmv.v.v v9, v8
895; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
896; CHECK-NEXT:    vmsne.vi v8, v9, 0
897; CHECK-NEXT:    vsm.v v8, (a1)
898; CHECK-NEXT:    ret
899  %a = load <4 x double>, ptr %x
900  %b = insertelement <4 x double> poison, double %y, i32 0
901  %c = shufflevector <4 x double> %b, <4 x double> poison, <4 x i32> zeroinitializer
902  %d = fcmp ole <4 x double> %a, %c
903  store <4 x i1> %d, ptr %z
904  ret void
905}
906
907define void @fcmp_ole_vf_v4f64_nonans(ptr %x, double %y, ptr %z) {
908; CHECK-LABEL: fcmp_ole_vf_v4f64_nonans:
909; CHECK:       # %bb.0:
910; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
911; CHECK-NEXT:    vle64.v v8, (a0)
912; CHECK-NEXT:    vmfle.vf v0, v8, fa0
913; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
914; CHECK-NEXT:    vmv.v.i v8, 0
915; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
916; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
917; CHECK-NEXT:    vmv.v.i v9, 0
918; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, ma
919; CHECK-NEXT:    vmv.v.v v9, v8
920; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
921; CHECK-NEXT:    vmsne.vi v8, v9, 0
922; CHECK-NEXT:    vsm.v v8, (a1)
923; CHECK-NEXT:    ret
924  %a = load <4 x double>, ptr %x
925  %b = insertelement <4 x double> poison, double %y, i32 0
926  %c = shufflevector <4 x double> %b, <4 x double> poison, <4 x i32> zeroinitializer
927  %d = fcmp nnan ole <4 x double> %a, %c
928  store <4 x i1> %d, ptr %z
929  ret void
930}
931
932define void @fcmp_ule_vf_v32f16(ptr %x, half %y, ptr %z) {
933; ZVFH-LABEL: fcmp_ule_vf_v32f16:
934; ZVFH:       # %bb.0:
935; ZVFH-NEXT:    li a2, 32
936; ZVFH-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
937; ZVFH-NEXT:    vle16.v v8, (a0)
938; ZVFH-NEXT:    vmfgt.vf v12, v8, fa0
939; ZVFH-NEXT:    vmnot.m v8, v12
940; ZVFH-NEXT:    vsm.v v8, (a1)
941; ZVFH-NEXT:    ret
942;
943; ZVFHMIN-LABEL: fcmp_ule_vf_v32f16:
944; ZVFHMIN:       # %bb.0:
945; ZVFHMIN-NEXT:    li a2, 32
946; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
947; ZVFHMIN-NEXT:    vle16.v v8, (a0)
948; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
949; ZVFHMIN-NEXT:    vmv.v.x v12, a0
950; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
951; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12
952; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
953; ZVFHMIN-NEXT:    vmflt.vv v8, v24, v16
954; ZVFHMIN-NEXT:    vmnot.m v8, v8
955; ZVFHMIN-NEXT:    vsm.v v8, (a1)
956; ZVFHMIN-NEXT:    ret
957  %a = load <32 x half>, ptr %x
958  %b = insertelement <32 x half> poison, half %y, i32 0
959  %c = shufflevector <32 x half> %b, <32 x half> poison, <32 x i32> zeroinitializer
960  %d = fcmp ule <32 x half> %a, %c
961  store <32 x i1> %d, ptr %z
962  ret void
963}
964
965define void @fcmp_ule_vf_v32f16_nonans(ptr %x, half %y, ptr %z) {
966; ZVFH-LABEL: fcmp_ule_vf_v32f16_nonans:
967; ZVFH:       # %bb.0:
968; ZVFH-NEXT:    li a2, 32
969; ZVFH-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
970; ZVFH-NEXT:    vle16.v v8, (a0)
971; ZVFH-NEXT:    vmfle.vf v12, v8, fa0
972; ZVFH-NEXT:    vsm.v v12, (a1)
973; ZVFH-NEXT:    ret
974;
975; ZVFHMIN-LABEL: fcmp_ule_vf_v32f16_nonans:
976; ZVFHMIN:       # %bb.0:
977; ZVFHMIN-NEXT:    li a2, 32
978; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
979; ZVFHMIN-NEXT:    vle16.v v8, (a0)
980; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
981; ZVFHMIN-NEXT:    vmv.v.x v12, a0
982; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
983; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12
984; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
985; ZVFHMIN-NEXT:    vmfle.vv v8, v16, v24
986; ZVFHMIN-NEXT:    vsm.v v8, (a1)
987; ZVFHMIN-NEXT:    ret
988  %a = load <32 x half>, ptr %x
989  %b = insertelement <32 x half> poison, half %y, i32 0
990  %c = shufflevector <32 x half> %b, <32 x half> poison, <32 x i32> zeroinitializer
991  %d = fcmp nnan ule <32 x half> %a, %c
992  store <32 x i1> %d, ptr %z
993  ret void
994}
995
996define void @fcmp_uge_vf_v16f32(ptr %x, float %y, ptr %z) {
997; CHECK-LABEL: fcmp_uge_vf_v16f32:
998; CHECK:       # %bb.0:
999; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
1000; CHECK-NEXT:    vle32.v v8, (a0)
1001; CHECK-NEXT:    vmflt.vf v12, v8, fa0
1002; CHECK-NEXT:    vmnot.m v8, v12
1003; CHECK-NEXT:    vsm.v v8, (a1)
1004; CHECK-NEXT:    ret
1005  %a = load <16 x float>, ptr %x
1006  %b = insertelement <16 x float> poison, float %y, i32 0
1007  %c = shufflevector <16 x float> %b, <16 x float> poison, <16 x i32> zeroinitializer
1008  %d = fcmp uge <16 x float> %a, %c
1009  store <16 x i1> %d, ptr %z
1010  ret void
1011}
1012
1013define void @fcmp_uge_vf_v16f32_nonans(ptr %x, float %y, ptr %z) {
1014; CHECK-LABEL: fcmp_uge_vf_v16f32_nonans:
1015; CHECK:       # %bb.0:
1016; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
1017; CHECK-NEXT:    vle32.v v8, (a0)
1018; CHECK-NEXT:    vmfge.vf v12, v8, fa0
1019; CHECK-NEXT:    vsm.v v12, (a1)
1020; CHECK-NEXT:    ret
1021  %a = load <16 x float>, ptr %x
1022  %b = insertelement <16 x float> poison, float %y, i32 0
1023  %c = shufflevector <16 x float> %b, <16 x float> poison, <16 x i32> zeroinitializer
1024  %d = fcmp nnan uge <16 x float> %a, %c
1025  store <16 x i1> %d, ptr %z
1026  ret void
1027}
1028
1029define void @fcmp_ult_vf_v8f64(ptr %x, double %y, ptr %z) {
1030; CHECK-LABEL: fcmp_ult_vf_v8f64:
1031; CHECK:       # %bb.0:
1032; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
1033; CHECK-NEXT:    vle64.v v8, (a0)
1034; CHECK-NEXT:    vmfge.vf v12, v8, fa0
1035; CHECK-NEXT:    vmnot.m v8, v12
1036; CHECK-NEXT:    vsm.v v8, (a1)
1037; CHECK-NEXT:    ret
1038  %a = load <8 x double>, ptr %x
1039  %b = insertelement <8 x double> poison, double %y, i32 0
1040  %c = shufflevector <8 x double> %b, <8 x double> poison, <8 x i32> zeroinitializer
1041  %d = fcmp ult <8 x double> %a, %c
1042  store <8 x i1> %d, ptr %z
1043  ret void
1044}
1045
1046define void @fcmp_ult_vf_v8f64_nonans(ptr %x, double %y, ptr %z) {
1047; CHECK-LABEL: fcmp_ult_vf_v8f64_nonans:
1048; CHECK:       # %bb.0:
1049; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
1050; CHECK-NEXT:    vle64.v v8, (a0)
1051; CHECK-NEXT:    vmflt.vf v12, v8, fa0
1052; CHECK-NEXT:    vsm.v v12, (a1)
1053; CHECK-NEXT:    ret
1054  %a = load <8 x double>, ptr %x
1055  %b = insertelement <8 x double> poison, double %y, i32 0
1056  %c = shufflevector <8 x double> %b, <8 x double> poison, <8 x i32> zeroinitializer
1057  %d = fcmp nnan ult <8 x double> %a, %c
1058  store <8 x i1> %d, ptr %z
1059  ret void
1060}
1061
1062define void @fcmp_ugt_vf_v64f16(ptr %x, half %y, ptr %z) {
1063; ZVFH-LABEL: fcmp_ugt_vf_v64f16:
1064; ZVFH:       # %bb.0:
1065; ZVFH-NEXT:    li a2, 64
1066; ZVFH-NEXT:    vsetvli zero, a2, e16, m8, ta, ma
1067; ZVFH-NEXT:    vle16.v v8, (a0)
1068; ZVFH-NEXT:    vmfle.vf v16, v8, fa0
1069; ZVFH-NEXT:    vmnot.m v8, v16
1070; ZVFH-NEXT:    vsm.v v8, (a1)
1071; ZVFH-NEXT:    ret
1072  %a = load <64 x half>, ptr %x
1073  %b = insertelement <64 x half> poison, half %y, i32 0
1074  %c = shufflevector <64 x half> %b, <64 x half> poison, <64 x i32> zeroinitializer
1075  %d = fcmp ugt <64 x half> %a, %c
1076  store <64 x i1> %d, ptr %z
1077  ret void
1078}
1079
1080define void @fcmp_ugt_vf_v64f16_nonans(ptr %x, half %y, ptr %z) {
1081; ZVFH-LABEL: fcmp_ugt_vf_v64f16_nonans:
1082; ZVFH:       # %bb.0:
1083; ZVFH-NEXT:    li a2, 64
1084; ZVFH-NEXT:    vsetvli zero, a2, e16, m8, ta, ma
1085; ZVFH-NEXT:    vle16.v v8, (a0)
1086; ZVFH-NEXT:    vmfgt.vf v16, v8, fa0
1087; ZVFH-NEXT:    vsm.v v16, (a1)
1088; ZVFH-NEXT:    ret
1089  %a = load <64 x half>, ptr %x
1090  %b = insertelement <64 x half> poison, half %y, i32 0
1091  %c = shufflevector <64 x half> %b, <64 x half> poison, <64 x i32> zeroinitializer
1092  %d = fcmp nnan ugt <64 x half> %a, %c
1093  store <64 x i1> %d, ptr %z
1094  ret void
1095}
1096
1097define void @fcmp_ueq_vf_v32f32(ptr %x, float %y, ptr %z) {
1098; CHECK-LABEL: fcmp_ueq_vf_v32f32:
1099; CHECK:       # %bb.0:
1100; CHECK-NEXT:    li a2, 32
1101; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
1102; CHECK-NEXT:    vle32.v v8, (a0)
1103; CHECK-NEXT:    vmflt.vf v16, v8, fa0
1104; CHECK-NEXT:    vmfgt.vf v17, v8, fa0
1105; CHECK-NEXT:    vmnor.mm v8, v17, v16
1106; CHECK-NEXT:    vsm.v v8, (a1)
1107; CHECK-NEXT:    ret
1108  %a = load <32 x float>, ptr %x
1109  %b = insertelement <32 x float> poison, float %y, i32 0
1110  %c = shufflevector <32 x float> %b, <32 x float> poison, <32 x i32> zeroinitializer
1111  %d = fcmp ueq <32 x float> %a, %c
1112  store <32 x i1> %d, ptr %z
1113  ret void
1114}
1115
1116define void @fcmp_ueq_vf_v32f32_nonans(ptr %x, float %y, ptr %z) {
1117; CHECK-LABEL: fcmp_ueq_vf_v32f32_nonans:
1118; CHECK:       # %bb.0:
1119; CHECK-NEXT:    li a2, 32
1120; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
1121; CHECK-NEXT:    vle32.v v8, (a0)
1122; CHECK-NEXT:    vmfeq.vf v16, v8, fa0
1123; CHECK-NEXT:    vsm.v v16, (a1)
1124; CHECK-NEXT:    ret
1125  %a = load <32 x float>, ptr %x
1126  %b = insertelement <32 x float> poison, float %y, i32 0
1127  %c = shufflevector <32 x float> %b, <32 x float> poison, <32 x i32> zeroinitializer
1128  %d = fcmp nnan ueq <32 x float> %a, %c
1129  store <32 x i1> %d, ptr %z
1130  ret void
1131}
1132
1133define void @fcmp_one_vf_v8f64(ptr %x, double %y, ptr %z) {
1134; CHECK-LABEL: fcmp_one_vf_v8f64:
1135; CHECK:       # %bb.0:
1136; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
1137; CHECK-NEXT:    vle64.v v8, (a0)
1138; CHECK-NEXT:    vmflt.vf v16, v8, fa0
1139; CHECK-NEXT:    vmfgt.vf v17, v8, fa0
1140; CHECK-NEXT:    vmor.mm v8, v17, v16
1141; CHECK-NEXT:    vsm.v v8, (a1)
1142; CHECK-NEXT:    ret
1143  %a = load <16 x double>, ptr %x
1144  %b = insertelement <16 x double> poison, double %y, i32 0
1145  %c = shufflevector <16 x double> %b, <16 x double> poison, <16 x i32> zeroinitializer
1146  %d = fcmp one <16 x double> %a, %c
1147  store <16 x i1> %d, ptr %z
1148  ret void
1149}
1150
1151define void @fcmp_one_vf_v8f64_nonans(ptr %x, double %y, ptr %z) {
1152; CHECK-LABEL: fcmp_one_vf_v8f64_nonans:
1153; CHECK:       # %bb.0:
1154; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
1155; CHECK-NEXT:    vle64.v v8, (a0)
1156; CHECK-NEXT:    vmfne.vf v16, v8, fa0
1157; CHECK-NEXT:    vsm.v v16, (a1)
1158; CHECK-NEXT:    ret
1159  %a = load <16 x double>, ptr %x
1160  %b = insertelement <16 x double> poison, double %y, i32 0
1161  %c = shufflevector <16 x double> %b, <16 x double> poison, <16 x i32> zeroinitializer
1162  %d = fcmp nnan one <16 x double> %a, %c
1163  store <16 x i1> %d, ptr %z
1164  ret void
1165}
1166
1167define void @fcmp_ord_vf_v4f16(ptr %x, half %y, ptr %z) {
1168; ZVFH-LABEL: fcmp_ord_vf_v4f16:
1169; ZVFH:       # %bb.0:
1170; ZVFH-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
1171; ZVFH-NEXT:    vle16.v v8, (a0)
1172; ZVFH-NEXT:    vfmv.v.f v9, fa0
1173; ZVFH-NEXT:    vmfeq.vf v9, v9, fa0
1174; ZVFH-NEXT:    vmfeq.vv v8, v8, v8
1175; ZVFH-NEXT:    vmand.mm v0, v8, v9
1176; ZVFH-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
1177; ZVFH-NEXT:    vmv.v.i v8, 0
1178; ZVFH-NEXT:    vmerge.vim v8, v8, 1, v0
1179; ZVFH-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1180; ZVFH-NEXT:    vmv.v.i v9, 0
1181; ZVFH-NEXT:    vsetivli zero, 4, e8, mf2, tu, ma
1182; ZVFH-NEXT:    vmv.v.v v9, v8
1183; ZVFH-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1184; ZVFH-NEXT:    vmsne.vi v8, v9, 0
1185; ZVFH-NEXT:    vsm.v v8, (a1)
1186; ZVFH-NEXT:    ret
1187;
1188; ZVFHMIN-LABEL: fcmp_ord_vf_v4f16:
1189; ZVFHMIN:       # %bb.0:
1190; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
1191; ZVFHMIN-NEXT:    vle16.v v8, (a0)
1192; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
1193; ZVFHMIN-NEXT:    vmv.v.x v9, a0
1194; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v9
1195; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
1196; ZVFHMIN-NEXT:    vmfeq.vv v9, v10, v10
1197; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
1198; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
1199; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
1200; ZVFHMIN-NEXT:    vmfeq.vv v8, v10, v10
1201; ZVFHMIN-NEXT:    vmand.mm v0, v8, v9
1202; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
1203; ZVFHMIN-NEXT:    vmv.v.i v8, 0
1204; ZVFHMIN-NEXT:    vmerge.vim v8, v8, 1, v0
1205; ZVFHMIN-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1206; ZVFHMIN-NEXT:    vmv.v.i v9, 0
1207; ZVFHMIN-NEXT:    vsetivli zero, 4, e8, mf2, tu, ma
1208; ZVFHMIN-NEXT:    vmv.v.v v9, v8
1209; ZVFHMIN-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1210; ZVFHMIN-NEXT:    vmsne.vi v8, v9, 0
1211; ZVFHMIN-NEXT:    vsm.v v8, (a1)
1212; ZVFHMIN-NEXT:    ret
1213  %a = load <4 x half>, ptr %x
1214  %b = insertelement <4 x half> poison, half %y, i32 0
1215  %c = shufflevector <4 x half> %b, <4 x half> poison, <4 x i32> zeroinitializer
1216  %d = fcmp ord <4 x half> %a, %c
1217  store <4 x i1> %d, ptr %z
1218  ret void
1219}
1220
1221define void @fcmp_uno_vf_v4f16(ptr %x, half %y, ptr %z) {
1222; ZVFH-LABEL: fcmp_uno_vf_v4f16:
1223; ZVFH:       # %bb.0:
1224; ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
1225; ZVFH-NEXT:    vle16.v v8, (a0)
1226; ZVFH-NEXT:    vfmv.v.f v9, fa0
1227; ZVFH-NEXT:    vmfne.vf v9, v9, fa0
1228; ZVFH-NEXT:    vmfne.vv v8, v8, v8
1229; ZVFH-NEXT:    vmor.mm v0, v8, v9
1230; ZVFH-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
1231; ZVFH-NEXT:    vmv.v.i v8, 0
1232; ZVFH-NEXT:    vmerge.vim v8, v8, 1, v0
1233; ZVFH-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1234; ZVFH-NEXT:    vmv.v.i v9, 0
1235; ZVFH-NEXT:    vsetivli zero, 2, e8, mf2, tu, ma
1236; ZVFH-NEXT:    vmv.v.v v9, v8
1237; ZVFH-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1238; ZVFH-NEXT:    vmsne.vi v8, v9, 0
1239; ZVFH-NEXT:    vsm.v v8, (a1)
1240; ZVFH-NEXT:    ret
1241;
1242; ZVFHMIN-LABEL: fcmp_uno_vf_v4f16:
1243; ZVFHMIN:       # %bb.0:
1244; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
1245; ZVFHMIN-NEXT:    vle16.v v8, (a0)
1246; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
1247; ZVFHMIN-NEXT:    vmv.v.x v9, a0
1248; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v9
1249; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
1250; ZVFHMIN-NEXT:    vmfne.vv v9, v10, v10
1251; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
1252; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
1253; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
1254; ZVFHMIN-NEXT:    vmfne.vv v8, v10, v10
1255; ZVFHMIN-NEXT:    vmor.mm v0, v8, v9
1256; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
1257; ZVFHMIN-NEXT:    vmv.v.i v8, 0
1258; ZVFHMIN-NEXT:    vmerge.vim v8, v8, 1, v0
1259; ZVFHMIN-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1260; ZVFHMIN-NEXT:    vmv.v.i v9, 0
1261; ZVFHMIN-NEXT:    vsetivli zero, 2, e8, mf2, tu, ma
1262; ZVFHMIN-NEXT:    vmv.v.v v9, v8
1263; ZVFHMIN-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1264; ZVFHMIN-NEXT:    vmsne.vi v8, v9, 0
1265; ZVFHMIN-NEXT:    vsm.v v8, (a1)
1266; ZVFHMIN-NEXT:    ret
1267  %a = load <2 x half>, ptr %x
1268  %b = insertelement <2 x half> poison, half %y, i32 0
1269  %c = shufflevector <2 x half> %b, <2 x half> poison, <2 x i32> zeroinitializer
1270  %d = fcmp uno <2 x half> %a, %c
1271  store <2 x i1> %d, ptr %z
1272  ret void
1273}
1274
1275define void @fcmp_oeq_fv_v8f16(ptr %x, half %y, ptr %z) {
1276; ZVFH-LABEL: fcmp_oeq_fv_v8f16:
1277; ZVFH:       # %bb.0:
1278; ZVFH-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
1279; ZVFH-NEXT:    vle16.v v8, (a0)
1280; ZVFH-NEXT:    vmfeq.vf v8, v8, fa0
1281; ZVFH-NEXT:    vsm.v v8, (a1)
1282; ZVFH-NEXT:    ret
1283;
1284; ZVFHMIN-LABEL: fcmp_oeq_fv_v8f16:
1285; ZVFHMIN:       # %bb.0:
1286; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
1287; ZVFHMIN-NEXT:    vle16.v v8, (a0)
1288; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
1289; ZVFHMIN-NEXT:    vmv.v.x v9, a0
1290; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
1291; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
1292; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
1293; ZVFHMIN-NEXT:    vmfeq.vv v8, v12, v10
1294; ZVFHMIN-NEXT:    vsm.v v8, (a1)
1295; ZVFHMIN-NEXT:    ret
1296  %a = load <8 x half>, ptr %x
1297  %b = insertelement <8 x half> poison, half %y, i32 0
1298  %c = shufflevector <8 x half> %b, <8 x half> poison, <8 x i32> zeroinitializer
1299  %d = fcmp oeq <8 x half> %c, %a
1300  store <8 x i1> %d, ptr %z
1301  ret void
1302}
1303
1304define void @fcmp_oeq_fv_v8f16_nonans(ptr %x, half %y, ptr %z) {
1305; ZVFH-LABEL: fcmp_oeq_fv_v8f16_nonans:
1306; ZVFH:       # %bb.0:
1307; ZVFH-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
1308; ZVFH-NEXT:    vle16.v v8, (a0)
1309; ZVFH-NEXT:    vmfeq.vf v8, v8, fa0
1310; ZVFH-NEXT:    vsm.v v8, (a1)
1311; ZVFH-NEXT:    ret
1312;
1313; ZVFHMIN-LABEL: fcmp_oeq_fv_v8f16_nonans:
1314; ZVFHMIN:       # %bb.0:
1315; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
1316; ZVFHMIN-NEXT:    vle16.v v8, (a0)
1317; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
1318; ZVFHMIN-NEXT:    vmv.v.x v9, a0
1319; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
1320; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
1321; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
1322; ZVFHMIN-NEXT:    vmfeq.vv v8, v12, v10
1323; ZVFHMIN-NEXT:    vsm.v v8, (a1)
1324; ZVFHMIN-NEXT:    ret
1325  %a = load <8 x half>, ptr %x
1326  %b = insertelement <8 x half> poison, half %y, i32 0
1327  %c = shufflevector <8 x half> %b, <8 x half> poison, <8 x i32> zeroinitializer
1328  %d = fcmp nnan oeq <8 x half> %c, %a
1329  store <8 x i1> %d, ptr %z
1330  ret void
1331}
1332
1333define void @fcmp_une_fv_v4f32(ptr %x, float %y, ptr %z) {
1334; CHECK-LABEL: fcmp_une_fv_v4f32:
1335; CHECK:       # %bb.0:
1336; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
1337; CHECK-NEXT:    vle32.v v8, (a0)
1338; CHECK-NEXT:    vmfne.vf v0, v8, fa0
1339; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
1340; CHECK-NEXT:    vmv.v.i v8, 0
1341; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
1342; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1343; CHECK-NEXT:    vmv.v.i v9, 0
1344; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, ma
1345; CHECK-NEXT:    vmv.v.v v9, v8
1346; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1347; CHECK-NEXT:    vmsne.vi v8, v9, 0
1348; CHECK-NEXT:    vsm.v v8, (a1)
1349; CHECK-NEXT:    ret
1350  %a = load <4 x float>, ptr %x
1351  %b = insertelement <4 x float> poison, float %y, i32 0
1352  %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
1353  %d = fcmp une <4 x float> %c, %a
1354  store <4 x i1> %d, ptr %z
1355  ret void
1356}
1357
1358define void @fcmp_une_fv_v4f32_nonans(ptr %x, float %y, ptr %z) {
1359; CHECK-LABEL: fcmp_une_fv_v4f32_nonans:
1360; CHECK:       # %bb.0:
1361; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
1362; CHECK-NEXT:    vle32.v v8, (a0)
1363; CHECK-NEXT:    vmfne.vf v0, v8, fa0
1364; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
1365; CHECK-NEXT:    vmv.v.i v8, 0
1366; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
1367; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1368; CHECK-NEXT:    vmv.v.i v9, 0
1369; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, ma
1370; CHECK-NEXT:    vmv.v.v v9, v8
1371; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1372; CHECK-NEXT:    vmsne.vi v8, v9, 0
1373; CHECK-NEXT:    vsm.v v8, (a1)
1374; CHECK-NEXT:    ret
1375  %a = load <4 x float>, ptr %x
1376  %b = insertelement <4 x float> poison, float %y, i32 0
1377  %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
1378  %d = fcmp nnan une <4 x float> %c, %a
1379  store <4 x i1> %d, ptr %z
1380  ret void
1381}
1382
1383define void @fcmp_ogt_fv_v2f64(ptr %x, double %y, ptr %z) {
1384; CHECK-LABEL: fcmp_ogt_fv_v2f64:
1385; CHECK:       # %bb.0:
1386; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
1387; CHECK-NEXT:    vle64.v v8, (a0)
1388; CHECK-NEXT:    vmflt.vf v0, v8, fa0
1389; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
1390; CHECK-NEXT:    vmv.v.i v8, 0
1391; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
1392; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1393; CHECK-NEXT:    vmv.v.i v9, 0
1394; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, ma
1395; CHECK-NEXT:    vmv.v.v v9, v8
1396; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1397; CHECK-NEXT:    vmsne.vi v8, v9, 0
1398; CHECK-NEXT:    vsm.v v8, (a1)
1399; CHECK-NEXT:    ret
1400  %a = load <2 x double>, ptr %x
1401  %b = insertelement <2 x double> poison, double %y, i32 0
1402  %c = shufflevector <2 x double> %b, <2 x double> poison, <2 x i32> zeroinitializer
1403  %d = fcmp ogt <2 x double> %c, %a
1404  store <2 x i1> %d, ptr %z
1405  ret void
1406}
1407
1408define void @fcmp_ogt_fv_v2f64_nonans(ptr %x, double %y, ptr %z) {
1409; CHECK-LABEL: fcmp_ogt_fv_v2f64_nonans:
1410; CHECK:       # %bb.0:
1411; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
1412; CHECK-NEXT:    vle64.v v8, (a0)
1413; CHECK-NEXT:    vmflt.vf v0, v8, fa0
1414; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
1415; CHECK-NEXT:    vmv.v.i v8, 0
1416; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
1417; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1418; CHECK-NEXT:    vmv.v.i v9, 0
1419; CHECK-NEXT:    vsetivli zero, 2, e8, mf2, tu, ma
1420; CHECK-NEXT:    vmv.v.v v9, v8
1421; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1422; CHECK-NEXT:    vmsne.vi v8, v9, 0
1423; CHECK-NEXT:    vsm.v v8, (a1)
1424; CHECK-NEXT:    ret
1425  %a = load <2 x double>, ptr %x
1426  %b = insertelement <2 x double> poison, double %y, i32 0
1427  %c = shufflevector <2 x double> %b, <2 x double> poison, <2 x i32> zeroinitializer
1428  %d = fcmp nnan ogt <2 x double> %c, %a
1429  store <2 x i1> %d, ptr %z
1430  ret void
1431}
1432
1433define void @fcmp_olt_fv_v16f16(ptr %x, half %y, ptr %z) {
1434; ZVFH-LABEL: fcmp_olt_fv_v16f16:
1435; ZVFH:       # %bb.0:
1436; ZVFH-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
1437; ZVFH-NEXT:    vle16.v v8, (a0)
1438; ZVFH-NEXT:    vmfgt.vf v10, v8, fa0
1439; ZVFH-NEXT:    vsm.v v10, (a1)
1440; ZVFH-NEXT:    ret
1441;
1442; ZVFHMIN-LABEL: fcmp_olt_fv_v16f16:
1443; ZVFHMIN:       # %bb.0:
1444; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
1445; ZVFHMIN-NEXT:    vle16.v v8, (a0)
1446; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
1447; ZVFHMIN-NEXT:    vmv.v.x v10, a0
1448; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
1449; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v10
1450; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
1451; ZVFHMIN-NEXT:    vmflt.vv v8, v16, v12
1452; ZVFHMIN-NEXT:    vsm.v v8, (a1)
1453; ZVFHMIN-NEXT:    ret
1454  %a = load <16 x half>, ptr %x
1455  %b = insertelement <16 x half> poison, half %y, i32 0
1456  %c = shufflevector <16 x half> %b, <16 x half> poison, <16 x i32> zeroinitializer
1457  %d = fcmp olt <16 x half> %c, %a
1458  store <16 x i1> %d, ptr %z
1459  ret void
1460}
1461
1462define void @fcmp_olt_fv_v16f16_nonans(ptr %x, half %y, ptr %z) {
1463; ZVFH-LABEL: fcmp_olt_fv_v16f16_nonans:
1464; ZVFH:       # %bb.0:
1465; ZVFH-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
1466; ZVFH-NEXT:    vle16.v v8, (a0)
1467; ZVFH-NEXT:    vmfgt.vf v10, v8, fa0
1468; ZVFH-NEXT:    vsm.v v10, (a1)
1469; ZVFH-NEXT:    ret
1470;
1471; ZVFHMIN-LABEL: fcmp_olt_fv_v16f16_nonans:
1472; ZVFHMIN:       # %bb.0:
1473; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
1474; ZVFHMIN-NEXT:    vle16.v v8, (a0)
1475; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
1476; ZVFHMIN-NEXT:    vmv.v.x v10, a0
1477; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
1478; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v10
1479; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
1480; ZVFHMIN-NEXT:    vmflt.vv v8, v16, v12
1481; ZVFHMIN-NEXT:    vsm.v v8, (a1)
1482; ZVFHMIN-NEXT:    ret
1483  %a = load <16 x half>, ptr %x
1484  %b = insertelement <16 x half> poison, half %y, i32 0
1485  %c = shufflevector <16 x half> %b, <16 x half> poison, <16 x i32> zeroinitializer
1486  %d = fcmp nnan olt <16 x half> %c, %a
1487  store <16 x i1> %d, ptr %z
1488  ret void
1489}
1490
1491define void @fcmp_oge_fv_v8f32(ptr %x, float %y, ptr %z) {
1492; CHECK-LABEL: fcmp_oge_fv_v8f32:
1493; CHECK:       # %bb.0:
1494; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
1495; CHECK-NEXT:    vle32.v v8, (a0)
1496; CHECK-NEXT:    vmfle.vf v10, v8, fa0
1497; CHECK-NEXT:    vsm.v v10, (a1)
1498; CHECK-NEXT:    ret
1499  %a = load <8 x float>, ptr %x
1500  %b = insertelement <8 x float> poison, float %y, i32 0
1501  %c = shufflevector <8 x float> %b, <8 x float> poison, <8 x i32> zeroinitializer
1502  %d = fcmp oge <8 x float> %c, %a
1503  store <8 x i1> %d, ptr %z
1504  ret void
1505}
1506
1507define void @fcmp_oge_fv_v8f32_nonans(ptr %x, float %y, ptr %z) {
1508; CHECK-LABEL: fcmp_oge_fv_v8f32_nonans:
1509; CHECK:       # %bb.0:
1510; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
1511; CHECK-NEXT:    vle32.v v8, (a0)
1512; CHECK-NEXT:    vmfle.vf v10, v8, fa0
1513; CHECK-NEXT:    vsm.v v10, (a1)
1514; CHECK-NEXT:    ret
1515  %a = load <8 x float>, ptr %x
1516  %b = insertelement <8 x float> poison, float %y, i32 0
1517  %c = shufflevector <8 x float> %b, <8 x float> poison, <8 x i32> zeroinitializer
1518  %d = fcmp nnan oge <8 x float> %c, %a
1519  store <8 x i1> %d, ptr %z
1520  ret void
1521}
1522
1523define void @fcmp_ole_fv_v4f64(ptr %x, double %y, ptr %z) {
1524; CHECK-LABEL: fcmp_ole_fv_v4f64:
1525; CHECK:       # %bb.0:
1526; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
1527; CHECK-NEXT:    vle64.v v8, (a0)
1528; CHECK-NEXT:    vmfge.vf v0, v8, fa0
1529; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
1530; CHECK-NEXT:    vmv.v.i v8, 0
1531; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
1532; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1533; CHECK-NEXT:    vmv.v.i v9, 0
1534; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, ma
1535; CHECK-NEXT:    vmv.v.v v9, v8
1536; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1537; CHECK-NEXT:    vmsne.vi v8, v9, 0
1538; CHECK-NEXT:    vsm.v v8, (a1)
1539; CHECK-NEXT:    ret
1540  %a = load <4 x double>, ptr %x
1541  %b = insertelement <4 x double> poison, double %y, i32 0
1542  %c = shufflevector <4 x double> %b, <4 x double> poison, <4 x i32> zeroinitializer
1543  %d = fcmp ole <4 x double> %c, %a
1544  store <4 x i1> %d, ptr %z
1545  ret void
1546}
1547
1548define void @fcmp_ole_fv_v4f64_nonans(ptr %x, double %y, ptr %z) {
1549; CHECK-LABEL: fcmp_ole_fv_v4f64_nonans:
1550; CHECK:       # %bb.0:
1551; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
1552; CHECK-NEXT:    vle64.v v8, (a0)
1553; CHECK-NEXT:    vmfge.vf v0, v8, fa0
1554; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
1555; CHECK-NEXT:    vmv.v.i v8, 0
1556; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
1557; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1558; CHECK-NEXT:    vmv.v.i v9, 0
1559; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, tu, ma
1560; CHECK-NEXT:    vmv.v.v v9, v8
1561; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1562; CHECK-NEXT:    vmsne.vi v8, v9, 0
1563; CHECK-NEXT:    vsm.v v8, (a1)
1564; CHECK-NEXT:    ret
1565  %a = load <4 x double>, ptr %x
1566  %b = insertelement <4 x double> poison, double %y, i32 0
1567  %c = shufflevector <4 x double> %b, <4 x double> poison, <4 x i32> zeroinitializer
1568  %d = fcmp nnan ole <4 x double> %c, %a
1569  store <4 x i1> %d, ptr %z
1570  ret void
1571}
1572
1573define void @fcmp_ule_fv_v32f16(ptr %x, half %y, ptr %z) {
1574; ZVFH-LABEL: fcmp_ule_fv_v32f16:
1575; ZVFH:       # %bb.0:
1576; ZVFH-NEXT:    li a2, 32
1577; ZVFH-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
1578; ZVFH-NEXT:    vle16.v v8, (a0)
1579; ZVFH-NEXT:    vmflt.vf v12, v8, fa0
1580; ZVFH-NEXT:    vmnot.m v8, v12
1581; ZVFH-NEXT:    vsm.v v8, (a1)
1582; ZVFH-NEXT:    ret
1583;
1584; ZVFHMIN-LABEL: fcmp_ule_fv_v32f16:
1585; ZVFHMIN:       # %bb.0:
1586; ZVFHMIN-NEXT:    li a2, 32
1587; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
1588; ZVFHMIN-NEXT:    vle16.v v8, (a0)
1589; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
1590; ZVFHMIN-NEXT:    vmv.v.x v12, a0
1591; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
1592; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12
1593; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
1594; ZVFHMIN-NEXT:    vmflt.vv v8, v16, v24
1595; ZVFHMIN-NEXT:    vmnot.m v8, v8
1596; ZVFHMIN-NEXT:    vsm.v v8, (a1)
1597; ZVFHMIN-NEXT:    ret
1598  %a = load <32 x half>, ptr %x
1599  %b = insertelement <32 x half> poison, half %y, i32 0
1600  %c = shufflevector <32 x half> %b, <32 x half> poison, <32 x i32> zeroinitializer
1601  %d = fcmp ule <32 x half> %c, %a
1602  store <32 x i1> %d, ptr %z
1603  ret void
1604}
1605
1606define void @fcmp_ule_fv_v32f16_nonans(ptr %x, half %y, ptr %z) {
1607; ZVFH-LABEL: fcmp_ule_fv_v32f16_nonans:
1608; ZVFH:       # %bb.0:
1609; ZVFH-NEXT:    li a2, 32
1610; ZVFH-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
1611; ZVFH-NEXT:    vle16.v v8, (a0)
1612; ZVFH-NEXT:    vmfge.vf v12, v8, fa0
1613; ZVFH-NEXT:    vsm.v v12, (a1)
1614; ZVFH-NEXT:    ret
1615;
1616; ZVFHMIN-LABEL: fcmp_ule_fv_v32f16_nonans:
1617; ZVFHMIN:       # %bb.0:
1618; ZVFHMIN-NEXT:    li a2, 32
1619; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
1620; ZVFHMIN-NEXT:    vle16.v v8, (a0)
1621; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
1622; ZVFHMIN-NEXT:    vmv.v.x v12, a0
1623; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
1624; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12
1625; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
1626; ZVFHMIN-NEXT:    vmfle.vv v8, v24, v16
1627; ZVFHMIN-NEXT:    vsm.v v8, (a1)
1628; ZVFHMIN-NEXT:    ret
1629  %a = load <32 x half>, ptr %x
1630  %b = insertelement <32 x half> poison, half %y, i32 0
1631  %c = shufflevector <32 x half> %b, <32 x half> poison, <32 x i32> zeroinitializer
1632  %d = fcmp nnan ule <32 x half> %c, %a
1633  store <32 x i1> %d, ptr %z
1634  ret void
1635}
1636
1637define void @fcmp_uge_fv_v16f32(ptr %x, float %y, ptr %z) {
1638; CHECK-LABEL: fcmp_uge_fv_v16f32:
1639; CHECK:       # %bb.0:
1640; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
1641; CHECK-NEXT:    vle32.v v8, (a0)
1642; CHECK-NEXT:    vmfgt.vf v12, v8, fa0
1643; CHECK-NEXT:    vmnot.m v8, v12
1644; CHECK-NEXT:    vsm.v v8, (a1)
1645; CHECK-NEXT:    ret
1646  %a = load <16 x float>, ptr %x
1647  %b = insertelement <16 x float> poison, float %y, i32 0
1648  %c = shufflevector <16 x float> %b, <16 x float> poison, <16 x i32> zeroinitializer
1649  %d = fcmp uge <16 x float> %c, %a
1650  store <16 x i1> %d, ptr %z
1651  ret void
1652}
1653
1654define void @fcmp_uge_fv_v16f32_nonans(ptr %x, float %y, ptr %z) {
1655; CHECK-LABEL: fcmp_uge_fv_v16f32_nonans:
1656; CHECK:       # %bb.0:
1657; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
1658; CHECK-NEXT:    vle32.v v8, (a0)
1659; CHECK-NEXT:    vmfle.vf v12, v8, fa0
1660; CHECK-NEXT:    vsm.v v12, (a1)
1661; CHECK-NEXT:    ret
1662  %a = load <16 x float>, ptr %x
1663  %b = insertelement <16 x float> poison, float %y, i32 0
1664  %c = shufflevector <16 x float> %b, <16 x float> poison, <16 x i32> zeroinitializer
1665  %d = fcmp nnan uge <16 x float> %c, %a
1666  store <16 x i1> %d, ptr %z
1667  ret void
1668}
1669
1670define void @fcmp_ult_fv_v8f64(ptr %x, double %y, ptr %z) {
1671; CHECK-LABEL: fcmp_ult_fv_v8f64:
1672; CHECK:       # %bb.0:
1673; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
1674; CHECK-NEXT:    vle64.v v8, (a0)
1675; CHECK-NEXT:    vmfle.vf v12, v8, fa0
1676; CHECK-NEXT:    vmnot.m v8, v12
1677; CHECK-NEXT:    vsm.v v8, (a1)
1678; CHECK-NEXT:    ret
1679  %a = load <8 x double>, ptr %x
1680  %b = insertelement <8 x double> poison, double %y, i32 0
1681  %c = shufflevector <8 x double> %b, <8 x double> poison, <8 x i32> zeroinitializer
1682  %d = fcmp ult <8 x double> %c, %a
1683  store <8 x i1> %d, ptr %z
1684  ret void
1685}
1686
1687define void @fcmp_ult_fv_v8f64_nonans(ptr %x, double %y, ptr %z) {
1688; CHECK-LABEL: fcmp_ult_fv_v8f64_nonans:
1689; CHECK:       # %bb.0:
1690; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
1691; CHECK-NEXT:    vle64.v v8, (a0)
1692; CHECK-NEXT:    vmfgt.vf v12, v8, fa0
1693; CHECK-NEXT:    vsm.v v12, (a1)
1694; CHECK-NEXT:    ret
1695  %a = load <8 x double>, ptr %x
1696  %b = insertelement <8 x double> poison, double %y, i32 0
1697  %c = shufflevector <8 x double> %b, <8 x double> poison, <8 x i32> zeroinitializer
1698  %d = fcmp nnan ult <8 x double> %c, %a
1699  store <8 x i1> %d, ptr %z
1700  ret void
1701}
1702
1703define void @fcmp_ugt_fv_v64f16(ptr %x, half %y, ptr %z) {
1704; ZVFH-LABEL: fcmp_ugt_fv_v64f16:
1705; ZVFH:       # %bb.0:
1706; ZVFH-NEXT:    li a2, 64
1707; ZVFH-NEXT:    vsetvli zero, a2, e16, m8, ta, ma
1708; ZVFH-NEXT:    vle16.v v8, (a0)
1709; ZVFH-NEXT:    vmfge.vf v16, v8, fa0
1710; ZVFH-NEXT:    vmnot.m v8, v16
1711; ZVFH-NEXT:    vsm.v v8, (a1)
1712; ZVFH-NEXT:    ret
1713  %a = load <64 x half>, ptr %x
1714  %b = insertelement <64 x half> poison, half %y, i32 0
1715  %c = shufflevector <64 x half> %b, <64 x half> poison, <64 x i32> zeroinitializer
1716  %d = fcmp ugt <64 x half> %c, %a
1717  store <64 x i1> %d, ptr %z
1718  ret void
1719}
1720
1721define void @fcmp_ugt_fv_v64f16_nonans(ptr %x, half %y, ptr %z) {
1722; ZVFH-LABEL: fcmp_ugt_fv_v64f16_nonans:
1723; ZVFH:       # %bb.0:
1724; ZVFH-NEXT:    li a2, 64
1725; ZVFH-NEXT:    vsetvli zero, a2, e16, m8, ta, ma
1726; ZVFH-NEXT:    vle16.v v8, (a0)
1727; ZVFH-NEXT:    vmflt.vf v16, v8, fa0
1728; ZVFH-NEXT:    vsm.v v16, (a1)
1729; ZVFH-NEXT:    ret
1730  %a = load <64 x half>, ptr %x
1731  %b = insertelement <64 x half> poison, half %y, i32 0
1732  %c = shufflevector <64 x half> %b, <64 x half> poison, <64 x i32> zeroinitializer
1733  %d = fcmp nnan ugt <64 x half> %c, %a
1734  store <64 x i1> %d, ptr %z
1735  ret void
1736}
1737
1738define void @fcmp_ueq_fv_v32f32(ptr %x, float %y, ptr %z) {
1739; CHECK-LABEL: fcmp_ueq_fv_v32f32:
1740; CHECK:       # %bb.0:
1741; CHECK-NEXT:    li a2, 32
1742; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
1743; CHECK-NEXT:    vle32.v v8, (a0)
1744; CHECK-NEXT:    vmfgt.vf v16, v8, fa0
1745; CHECK-NEXT:    vmflt.vf v17, v8, fa0
1746; CHECK-NEXT:    vmnor.mm v8, v17, v16
1747; CHECK-NEXT:    vsm.v v8, (a1)
1748; CHECK-NEXT:    ret
1749  %a = load <32 x float>, ptr %x
1750  %b = insertelement <32 x float> poison, float %y, i32 0
1751  %c = shufflevector <32 x float> %b, <32 x float> poison, <32 x i32> zeroinitializer
1752  %d = fcmp ueq <32 x float> %c, %a
1753  store <32 x i1> %d, ptr %z
1754  ret void
1755}
1756
1757define void @fcmp_ueq_fv_v32f32_nonans(ptr %x, float %y, ptr %z) {
1758; CHECK-LABEL: fcmp_ueq_fv_v32f32_nonans:
1759; CHECK:       # %bb.0:
1760; CHECK-NEXT:    li a2, 32
1761; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
1762; CHECK-NEXT:    vle32.v v8, (a0)
1763; CHECK-NEXT:    vmfeq.vf v16, v8, fa0
1764; CHECK-NEXT:    vsm.v v16, (a1)
1765; CHECK-NEXT:    ret
1766  %a = load <32 x float>, ptr %x
1767  %b = insertelement <32 x float> poison, float %y, i32 0
1768  %c = shufflevector <32 x float> %b, <32 x float> poison, <32 x i32> zeroinitializer
1769  %d = fcmp nnan ueq <32 x float> %c, %a
1770  store <32 x i1> %d, ptr %z
1771  ret void
1772}
1773
1774define void @fcmp_one_fv_v8f64(ptr %x, double %y, ptr %z) {
1775; CHECK-LABEL: fcmp_one_fv_v8f64:
1776; CHECK:       # %bb.0:
1777; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
1778; CHECK-NEXT:    vle64.v v8, (a0)
1779; CHECK-NEXT:    vmfgt.vf v16, v8, fa0
1780; CHECK-NEXT:    vmflt.vf v17, v8, fa0
1781; CHECK-NEXT:    vmor.mm v8, v17, v16
1782; CHECK-NEXT:    vsm.v v8, (a1)
1783; CHECK-NEXT:    ret
1784  %a = load <16 x double>, ptr %x
1785  %b = insertelement <16 x double> poison, double %y, i32 0
1786  %c = shufflevector <16 x double> %b, <16 x double> poison, <16 x i32> zeroinitializer
1787  %d = fcmp one <16 x double> %c, %a
1788  store <16 x i1> %d, ptr %z
1789  ret void
1790}
1791
1792define void @fcmp_one_fv_v8f64_nonans(ptr %x, double %y, ptr %z) {
1793; CHECK-LABEL: fcmp_one_fv_v8f64_nonans:
1794; CHECK:       # %bb.0:
1795; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
1796; CHECK-NEXT:    vle64.v v8, (a0)
1797; CHECK-NEXT:    vmfne.vf v16, v8, fa0
1798; CHECK-NEXT:    vsm.v v16, (a1)
1799; CHECK-NEXT:    ret
1800  %a = load <16 x double>, ptr %x
1801  %b = insertelement <16 x double> poison, double %y, i32 0
1802  %c = shufflevector <16 x double> %b, <16 x double> poison, <16 x i32> zeroinitializer
1803  %d = fcmp nnan one <16 x double> %c, %a
1804  store <16 x i1> %d, ptr %z
1805  ret void
1806}
1807
1808define void @fcmp_ord_fv_v4f16(ptr %x, half %y, ptr %z) {
1809; ZVFH-LABEL: fcmp_ord_fv_v4f16:
1810; ZVFH:       # %bb.0:
1811; ZVFH-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
1812; ZVFH-NEXT:    vle16.v v8, (a0)
1813; ZVFH-NEXT:    vfmv.v.f v9, fa0
1814; ZVFH-NEXT:    vmfeq.vf v9, v9, fa0
1815; ZVFH-NEXT:    vmfeq.vv v8, v8, v8
1816; ZVFH-NEXT:    vmand.mm v0, v9, v8
1817; ZVFH-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
1818; ZVFH-NEXT:    vmv.v.i v8, 0
1819; ZVFH-NEXT:    vmerge.vim v8, v8, 1, v0
1820; ZVFH-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1821; ZVFH-NEXT:    vmv.v.i v9, 0
1822; ZVFH-NEXT:    vsetivli zero, 4, e8, mf2, tu, ma
1823; ZVFH-NEXT:    vmv.v.v v9, v8
1824; ZVFH-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1825; ZVFH-NEXT:    vmsne.vi v8, v9, 0
1826; ZVFH-NEXT:    vsm.v v8, (a1)
1827; ZVFH-NEXT:    ret
1828;
1829; ZVFHMIN-LABEL: fcmp_ord_fv_v4f16:
1830; ZVFHMIN:       # %bb.0:
1831; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
1832; ZVFHMIN-NEXT:    vle16.v v8, (a0)
1833; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
1834; ZVFHMIN-NEXT:    vmv.v.x v9, a0
1835; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v9
1836; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
1837; ZVFHMIN-NEXT:    vmfeq.vv v9, v10, v10
1838; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
1839; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
1840; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
1841; ZVFHMIN-NEXT:    vmfeq.vv v8, v10, v10
1842; ZVFHMIN-NEXT:    vmand.mm v0, v9, v8
1843; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
1844; ZVFHMIN-NEXT:    vmv.v.i v8, 0
1845; ZVFHMIN-NEXT:    vmerge.vim v8, v8, 1, v0
1846; ZVFHMIN-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1847; ZVFHMIN-NEXT:    vmv.v.i v9, 0
1848; ZVFHMIN-NEXT:    vsetivli zero, 4, e8, mf2, tu, ma
1849; ZVFHMIN-NEXT:    vmv.v.v v9, v8
1850; ZVFHMIN-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1851; ZVFHMIN-NEXT:    vmsne.vi v8, v9, 0
1852; ZVFHMIN-NEXT:    vsm.v v8, (a1)
1853; ZVFHMIN-NEXT:    ret
1854  %a = load <4 x half>, ptr %x
1855  %b = insertelement <4 x half> poison, half %y, i32 0
1856  %c = shufflevector <4 x half> %b, <4 x half> poison, <4 x i32> zeroinitializer
1857  %d = fcmp ord <4 x half> %c, %a
1858  store <4 x i1> %d, ptr %z
1859  ret void
1860}
1861
1862define void @fcmp_uno_fv_v4f16(ptr %x, half %y, ptr %z) {
1863; ZVFH-LABEL: fcmp_uno_fv_v4f16:
1864; ZVFH:       # %bb.0:
1865; ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
1866; ZVFH-NEXT:    vle16.v v8, (a0)
1867; ZVFH-NEXT:    vfmv.v.f v9, fa0
1868; ZVFH-NEXT:    vmfne.vf v9, v9, fa0
1869; ZVFH-NEXT:    vmfne.vv v8, v8, v8
1870; ZVFH-NEXT:    vmor.mm v0, v9, v8
1871; ZVFH-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
1872; ZVFH-NEXT:    vmv.v.i v8, 0
1873; ZVFH-NEXT:    vmerge.vim v8, v8, 1, v0
1874; ZVFH-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1875; ZVFH-NEXT:    vmv.v.i v9, 0
1876; ZVFH-NEXT:    vsetivli zero, 2, e8, mf2, tu, ma
1877; ZVFH-NEXT:    vmv.v.v v9, v8
1878; ZVFH-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1879; ZVFH-NEXT:    vmsne.vi v8, v9, 0
1880; ZVFH-NEXT:    vsm.v v8, (a1)
1881; ZVFH-NEXT:    ret
1882;
1883; ZVFHMIN-LABEL: fcmp_uno_fv_v4f16:
1884; ZVFHMIN:       # %bb.0:
1885; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
1886; ZVFHMIN-NEXT:    vle16.v v8, (a0)
1887; ZVFHMIN-NEXT:    fmv.x.h a0, fa0
1888; ZVFHMIN-NEXT:    vmv.v.x v9, a0
1889; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v9
1890; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
1891; ZVFHMIN-NEXT:    vmfne.vv v9, v10, v10
1892; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
1893; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
1894; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
1895; ZVFHMIN-NEXT:    vmfne.vv v8, v10, v10
1896; ZVFHMIN-NEXT:    vmor.mm v0, v9, v8
1897; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
1898; ZVFHMIN-NEXT:    vmv.v.i v8, 0
1899; ZVFHMIN-NEXT:    vmerge.vim v8, v8, 1, v0
1900; ZVFHMIN-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1901; ZVFHMIN-NEXT:    vmv.v.i v9, 0
1902; ZVFHMIN-NEXT:    vsetivli zero, 2, e8, mf2, tu, ma
1903; ZVFHMIN-NEXT:    vmv.v.v v9, v8
1904; ZVFHMIN-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
1905; ZVFHMIN-NEXT:    vmsne.vi v8, v9, 0
1906; ZVFHMIN-NEXT:    vsm.v v8, (a1)
1907; ZVFHMIN-NEXT:    ret
1908  %a = load <2 x half>, ptr %x
1909  %b = insertelement <2 x half> poison, half %y, i32 0
1910  %c = shufflevector <2 x half> %b, <2 x half> poison, <2 x i32> zeroinitializer
1911  %d = fcmp uno <2 x half> %c, %a
1912  store <2 x i1> %d, ptr %z
1913  ret void
1914}
1915