xref: /llvm-project/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll (revision 7ba49685c020f7059fe0ba27c157ecf08b937d44)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
3; RUN:   -disable-strictnode-mutation -target-abi=ilp32f \
4; RUN:   | FileCheck -check-prefix=CHECKIF %s
5; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
6; RUN:   -disable-strictnode-mutation -target-abi=lp64f \
7; RUN:   | FileCheck -check-prefix=CHECKIF %s
8; RUN: llc -mtriple=riscv32 -mattr=+zfinx -verify-machineinstrs < %s \
9; RUN:   -disable-strictnode-mutation -target-abi=ilp32 \
10; RUN:   | FileCheck -check-prefix=CHECKIZFINX %s
11; RUN: llc -mtriple=riscv64 -mattr=+zfinx -verify-machineinstrs < %s \
12; RUN:   -disable-strictnode-mutation -target-abi=lp64 \
13; RUN:   | FileCheck -check-prefix=CHECKIZFINX %s
14; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
15; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
16; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
17; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV64I %s
18
19define i32 @fcmp_oeq(float %a, float %b) nounwind strictfp {
20; CHECKIF-LABEL: fcmp_oeq:
21; CHECKIF:       # %bb.0:
22; CHECKIF-NEXT:    feq.s a0, fa0, fa1
23; CHECKIF-NEXT:    ret
24;
25; CHECKIZFINX-LABEL: fcmp_oeq:
26; CHECKIZFINX:       # %bb.0:
27; CHECKIZFINX-NEXT:    feq.s a0, a0, a1
28; CHECKIZFINX-NEXT:    ret
29;
30; RV32I-LABEL: fcmp_oeq:
31; RV32I:       # %bb.0:
32; RV32I-NEXT:    addi sp, sp, -16
33; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
34; RV32I-NEXT:    call __eqsf2
35; RV32I-NEXT:    seqz a0, a0
36; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
37; RV32I-NEXT:    addi sp, sp, 16
38; RV32I-NEXT:    ret
39;
40; RV64I-LABEL: fcmp_oeq:
41; RV64I:       # %bb.0:
42; RV64I-NEXT:    addi sp, sp, -16
43; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
44; RV64I-NEXT:    call __eqsf2
45; RV64I-NEXT:    seqz a0, a0
46; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
47; RV64I-NEXT:    addi sp, sp, 16
48; RV64I-NEXT:    ret
49  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp
50  %2 = zext i1 %1 to i32
51  ret i32 %2
52}
53declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata)
54
55define i32 @fcmp_ogt(float %a, float %b) nounwind strictfp {
56; CHECKIF-LABEL: fcmp_ogt:
57; CHECKIF:       # %bb.0:
58; CHECKIF-NEXT:    frflags a1
59; CHECKIF-NEXT:    flt.s a0, fa1, fa0
60; CHECKIF-NEXT:    fsflags a1
61; CHECKIF-NEXT:    feq.s zero, fa1, fa0
62; CHECKIF-NEXT:    ret
63;
64; CHECKIZFINX-LABEL: fcmp_ogt:
65; CHECKIZFINX:       # %bb.0:
66; CHECKIZFINX-NEXT:    frflags a3
67; CHECKIZFINX-NEXT:    flt.s a2, a1, a0
68; CHECKIZFINX-NEXT:    fsflags a3
69; CHECKIZFINX-NEXT:    feq.s zero, a1, a0
70; CHECKIZFINX-NEXT:    mv a0, a2
71; CHECKIZFINX-NEXT:    ret
72;
73; RV32I-LABEL: fcmp_ogt:
74; RV32I:       # %bb.0:
75; RV32I-NEXT:    addi sp, sp, -16
76; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
77; RV32I-NEXT:    call __gtsf2
78; RV32I-NEXT:    sgtz a0, a0
79; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
80; RV32I-NEXT:    addi sp, sp, 16
81; RV32I-NEXT:    ret
82;
83; RV64I-LABEL: fcmp_ogt:
84; RV64I:       # %bb.0:
85; RV64I-NEXT:    addi sp, sp, -16
86; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
87; RV64I-NEXT:    call __gtsf2
88; RV64I-NEXT:    sgtz a0, a0
89; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
90; RV64I-NEXT:    addi sp, sp, 16
91; RV64I-NEXT:    ret
92  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp
93  %2 = zext i1 %1 to i32
94  ret i32 %2
95}
96
97define i32 @fcmp_oge(float %a, float %b) nounwind strictfp {
98; CHECKIF-LABEL: fcmp_oge:
99; CHECKIF:       # %bb.0:
100; CHECKIF-NEXT:    frflags a1
101; CHECKIF-NEXT:    fle.s a0, fa1, fa0
102; CHECKIF-NEXT:    fsflags a1
103; CHECKIF-NEXT:    feq.s zero, fa1, fa0
104; CHECKIF-NEXT:    ret
105;
106; CHECKIZFINX-LABEL: fcmp_oge:
107; CHECKIZFINX:       # %bb.0:
108; CHECKIZFINX-NEXT:    frflags a3
109; CHECKIZFINX-NEXT:    fle.s a2, a1, a0
110; CHECKIZFINX-NEXT:    fsflags a3
111; CHECKIZFINX-NEXT:    feq.s zero, a1, a0
112; CHECKIZFINX-NEXT:    mv a0, a2
113; CHECKIZFINX-NEXT:    ret
114;
115; RV32I-LABEL: fcmp_oge:
116; RV32I:       # %bb.0:
117; RV32I-NEXT:    addi sp, sp, -16
118; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
119; RV32I-NEXT:    call __gesf2
120; RV32I-NEXT:    slti a0, a0, 0
121; RV32I-NEXT:    xori a0, a0, 1
122; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
123; RV32I-NEXT:    addi sp, sp, 16
124; RV32I-NEXT:    ret
125;
126; RV64I-LABEL: fcmp_oge:
127; RV64I:       # %bb.0:
128; RV64I-NEXT:    addi sp, sp, -16
129; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
130; RV64I-NEXT:    call __gesf2
131; RV64I-NEXT:    slti a0, a0, 0
132; RV64I-NEXT:    xori a0, a0, 1
133; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
134; RV64I-NEXT:    addi sp, sp, 16
135; RV64I-NEXT:    ret
136  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"oge", metadata !"fpexcept.strict") strictfp
137  %2 = zext i1 %1 to i32
138  ret i32 %2
139}
140
141define i32 @fcmp_olt(float %a, float %b) nounwind strictfp {
142; CHECKIF-LABEL: fcmp_olt:
143; CHECKIF:       # %bb.0:
144; CHECKIF-NEXT:    frflags a1
145; CHECKIF-NEXT:    flt.s a0, fa0, fa1
146; CHECKIF-NEXT:    fsflags a1
147; CHECKIF-NEXT:    feq.s zero, fa0, fa1
148; CHECKIF-NEXT:    ret
149;
150; CHECKIZFINX-LABEL: fcmp_olt:
151; CHECKIZFINX:       # %bb.0:
152; CHECKIZFINX-NEXT:    frflags a3
153; CHECKIZFINX-NEXT:    flt.s a2, a0, a1
154; CHECKIZFINX-NEXT:    fsflags a3
155; CHECKIZFINX-NEXT:    feq.s zero, a0, a1
156; CHECKIZFINX-NEXT:    mv a0, a2
157; CHECKIZFINX-NEXT:    ret
158;
159; RV32I-LABEL: fcmp_olt:
160; RV32I:       # %bb.0:
161; RV32I-NEXT:    addi sp, sp, -16
162; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
163; RV32I-NEXT:    call __ltsf2
164; RV32I-NEXT:    slti a0, a0, 0
165; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
166; RV32I-NEXT:    addi sp, sp, 16
167; RV32I-NEXT:    ret
168;
169; RV64I-LABEL: fcmp_olt:
170; RV64I:       # %bb.0:
171; RV64I-NEXT:    addi sp, sp, -16
172; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
173; RV64I-NEXT:    call __ltsf2
174; RV64I-NEXT:    slti a0, a0, 0
175; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
176; RV64I-NEXT:    addi sp, sp, 16
177; RV64I-NEXT:    ret
178  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"olt", metadata !"fpexcept.strict") strictfp
179  %2 = zext i1 %1 to i32
180  ret i32 %2
181}
182
183define i32 @fcmp_ole(float %a, float %b) nounwind strictfp {
184; CHECKIF-LABEL: fcmp_ole:
185; CHECKIF:       # %bb.0:
186; CHECKIF-NEXT:    frflags a1
187; CHECKIF-NEXT:    fle.s a0, fa0, fa1
188; CHECKIF-NEXT:    fsflags a1
189; CHECKIF-NEXT:    feq.s zero, fa0, fa1
190; CHECKIF-NEXT:    ret
191;
192; CHECKIZFINX-LABEL: fcmp_ole:
193; CHECKIZFINX:       # %bb.0:
194; CHECKIZFINX-NEXT:    frflags a3
195; CHECKIZFINX-NEXT:    fle.s a2, a0, a1
196; CHECKIZFINX-NEXT:    fsflags a3
197; CHECKIZFINX-NEXT:    feq.s zero, a0, a1
198; CHECKIZFINX-NEXT:    mv a0, a2
199; CHECKIZFINX-NEXT:    ret
200;
201; RV32I-LABEL: fcmp_ole:
202; RV32I:       # %bb.0:
203; RV32I-NEXT:    addi sp, sp, -16
204; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
205; RV32I-NEXT:    call __lesf2
206; RV32I-NEXT:    slti a0, a0, 1
207; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
208; RV32I-NEXT:    addi sp, sp, 16
209; RV32I-NEXT:    ret
210;
211; RV64I-LABEL: fcmp_ole:
212; RV64I:       # %bb.0:
213; RV64I-NEXT:    addi sp, sp, -16
214; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
215; RV64I-NEXT:    call __lesf2
216; RV64I-NEXT:    slti a0, a0, 1
217; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
218; RV64I-NEXT:    addi sp, sp, 16
219; RV64I-NEXT:    ret
220  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ole", metadata !"fpexcept.strict") strictfp
221  %2 = zext i1 %1 to i32
222  ret i32 %2
223}
224
225; FIXME: We only need one frflags before the two flts and one fsflags after the
226; two flts.
227define i32 @fcmp_one(float %a, float %b) nounwind strictfp {
228; CHECKIF-LABEL: fcmp_one:
229; CHECKIF:       # %bb.0:
230; CHECKIF-NEXT:    frflags a0
231; CHECKIF-NEXT:    flt.s a1, fa0, fa1
232; CHECKIF-NEXT:    fsflags a0
233; CHECKIF-NEXT:    feq.s zero, fa0, fa1
234; CHECKIF-NEXT:    frflags a0
235; CHECKIF-NEXT:    flt.s a2, fa1, fa0
236; CHECKIF-NEXT:    fsflags a0
237; CHECKIF-NEXT:    or a0, a2, a1
238; CHECKIF-NEXT:    feq.s zero, fa1, fa0
239; CHECKIF-NEXT:    ret
240;
241; CHECKIZFINX-LABEL: fcmp_one:
242; CHECKIZFINX:       # %bb.0:
243; CHECKIZFINX-NEXT:    frflags a2
244; CHECKIZFINX-NEXT:    flt.s a3, a0, a1
245; CHECKIZFINX-NEXT:    fsflags a2
246; CHECKIZFINX-NEXT:    feq.s zero, a0, a1
247; CHECKIZFINX-NEXT:    frflags a2
248; CHECKIZFINX-NEXT:    flt.s a4, a1, a0
249; CHECKIZFINX-NEXT:    fsflags a2
250; CHECKIZFINX-NEXT:    or a2, a4, a3
251; CHECKIZFINX-NEXT:    feq.s zero, a1, a0
252; CHECKIZFINX-NEXT:    mv a0, a2
253; CHECKIZFINX-NEXT:    ret
254;
255; RV32I-LABEL: fcmp_one:
256; RV32I:       # %bb.0:
257; RV32I-NEXT:    addi sp, sp, -16
258; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
259; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
260; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
261; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
262; RV32I-NEXT:    mv s0, a1
263; RV32I-NEXT:    mv s1, a0
264; RV32I-NEXT:    call __eqsf2
265; RV32I-NEXT:    snez s2, a0
266; RV32I-NEXT:    mv a0, s1
267; RV32I-NEXT:    mv a1, s0
268; RV32I-NEXT:    call __unordsf2
269; RV32I-NEXT:    seqz a0, a0
270; RV32I-NEXT:    and a0, a0, s2
271; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
272; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
273; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
274; RV32I-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
275; RV32I-NEXT:    addi sp, sp, 16
276; RV32I-NEXT:    ret
277;
278; RV64I-LABEL: fcmp_one:
279; RV64I:       # %bb.0:
280; RV64I-NEXT:    addi sp, sp, -32
281; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
282; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
283; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
284; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
285; RV64I-NEXT:    mv s0, a1
286; RV64I-NEXT:    mv s1, a0
287; RV64I-NEXT:    call __eqsf2
288; RV64I-NEXT:    snez s2, a0
289; RV64I-NEXT:    mv a0, s1
290; RV64I-NEXT:    mv a1, s0
291; RV64I-NEXT:    call __unordsf2
292; RV64I-NEXT:    seqz a0, a0
293; RV64I-NEXT:    and a0, a0, s2
294; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
295; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
296; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
297; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
298; RV64I-NEXT:    addi sp, sp, 32
299; RV64I-NEXT:    ret
300  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"one", metadata !"fpexcept.strict") strictfp
301  %2 = zext i1 %1 to i32
302  ret i32 %2
303}
304
305define i32 @fcmp_ord(float %a, float %b) nounwind strictfp {
306; CHECKIF-LABEL: fcmp_ord:
307; CHECKIF:       # %bb.0:
308; CHECKIF-NEXT:    feq.s a0, fa1, fa1
309; CHECKIF-NEXT:    feq.s a1, fa0, fa0
310; CHECKIF-NEXT:    and a0, a1, a0
311; CHECKIF-NEXT:    ret
312;
313; CHECKIZFINX-LABEL: fcmp_ord:
314; CHECKIZFINX:       # %bb.0:
315; CHECKIZFINX-NEXT:    feq.s a1, a1, a1
316; CHECKIZFINX-NEXT:    feq.s a0, a0, a0
317; CHECKIZFINX-NEXT:    and a0, a0, a1
318; CHECKIZFINX-NEXT:    ret
319;
320; RV32I-LABEL: fcmp_ord:
321; RV32I:       # %bb.0:
322; RV32I-NEXT:    addi sp, sp, -16
323; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
324; RV32I-NEXT:    call __unordsf2
325; RV32I-NEXT:    seqz a0, a0
326; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
327; RV32I-NEXT:    addi sp, sp, 16
328; RV32I-NEXT:    ret
329;
330; RV64I-LABEL: fcmp_ord:
331; RV64I:       # %bb.0:
332; RV64I-NEXT:    addi sp, sp, -16
333; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
334; RV64I-NEXT:    call __unordsf2
335; RV64I-NEXT:    seqz a0, a0
336; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
337; RV64I-NEXT:    addi sp, sp, 16
338; RV64I-NEXT:    ret
339  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ord", metadata !"fpexcept.strict") strictfp
340  %2 = zext i1 %1 to i32
341  ret i32 %2
342}
343
344; FIXME: We only need one frflags before the two flts and one fsflags after the
345; two flts.
346define i32 @fcmp_ueq(float %a, float %b) nounwind strictfp {
347; CHECKIF-LABEL: fcmp_ueq:
348; CHECKIF:       # %bb.0:
349; CHECKIF-NEXT:    frflags a0
350; CHECKIF-NEXT:    flt.s a1, fa0, fa1
351; CHECKIF-NEXT:    fsflags a0
352; CHECKIF-NEXT:    feq.s zero, fa0, fa1
353; CHECKIF-NEXT:    frflags a0
354; CHECKIF-NEXT:    flt.s a2, fa1, fa0
355; CHECKIF-NEXT:    fsflags a0
356; CHECKIF-NEXT:    or a1, a2, a1
357; CHECKIF-NEXT:    xori a0, a1, 1
358; CHECKIF-NEXT:    feq.s zero, fa1, fa0
359; CHECKIF-NEXT:    ret
360;
361; CHECKIZFINX-LABEL: fcmp_ueq:
362; CHECKIZFINX:       # %bb.0:
363; CHECKIZFINX-NEXT:    frflags a2
364; CHECKIZFINX-NEXT:    flt.s a3, a0, a1
365; CHECKIZFINX-NEXT:    fsflags a2
366; CHECKIZFINX-NEXT:    feq.s zero, a0, a1
367; CHECKIZFINX-NEXT:    frflags a2
368; CHECKIZFINX-NEXT:    flt.s a4, a1, a0
369; CHECKIZFINX-NEXT:    fsflags a2
370; CHECKIZFINX-NEXT:    or a3, a4, a3
371; CHECKIZFINX-NEXT:    xori a2, a3, 1
372; CHECKIZFINX-NEXT:    feq.s zero, a1, a0
373; CHECKIZFINX-NEXT:    mv a0, a2
374; CHECKIZFINX-NEXT:    ret
375;
376; RV32I-LABEL: fcmp_ueq:
377; RV32I:       # %bb.0:
378; RV32I-NEXT:    addi sp, sp, -16
379; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
380; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
381; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
382; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
383; RV32I-NEXT:    mv s0, a1
384; RV32I-NEXT:    mv s1, a0
385; RV32I-NEXT:    call __eqsf2
386; RV32I-NEXT:    seqz s2, a0
387; RV32I-NEXT:    mv a0, s1
388; RV32I-NEXT:    mv a1, s0
389; RV32I-NEXT:    call __unordsf2
390; RV32I-NEXT:    snez a0, a0
391; RV32I-NEXT:    or a0, a0, s2
392; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
393; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
394; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
395; RV32I-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
396; RV32I-NEXT:    addi sp, sp, 16
397; RV32I-NEXT:    ret
398;
399; RV64I-LABEL: fcmp_ueq:
400; RV64I:       # %bb.0:
401; RV64I-NEXT:    addi sp, sp, -32
402; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
403; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
404; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
405; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
406; RV64I-NEXT:    mv s0, a1
407; RV64I-NEXT:    mv s1, a0
408; RV64I-NEXT:    call __eqsf2
409; RV64I-NEXT:    seqz s2, a0
410; RV64I-NEXT:    mv a0, s1
411; RV64I-NEXT:    mv a1, s0
412; RV64I-NEXT:    call __unordsf2
413; RV64I-NEXT:    snez a0, a0
414; RV64I-NEXT:    or a0, a0, s2
415; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
416; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
417; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
418; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
419; RV64I-NEXT:    addi sp, sp, 32
420; RV64I-NEXT:    ret
421  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp
422  %2 = zext i1 %1 to i32
423  ret i32 %2
424}
425
426define i32 @fcmp_ugt(float %a, float %b) nounwind strictfp {
427; CHECKIF-LABEL: fcmp_ugt:
428; CHECKIF:       # %bb.0:
429; CHECKIF-NEXT:    frflags a0
430; CHECKIF-NEXT:    fle.s a1, fa0, fa1
431; CHECKIF-NEXT:    fsflags a0
432; CHECKIF-NEXT:    xori a0, a1, 1
433; CHECKIF-NEXT:    feq.s zero, fa0, fa1
434; CHECKIF-NEXT:    ret
435;
436; CHECKIZFINX-LABEL: fcmp_ugt:
437; CHECKIZFINX:       # %bb.0:
438; CHECKIZFINX-NEXT:    frflags a2
439; CHECKIZFINX-NEXT:    fle.s a3, a0, a1
440; CHECKIZFINX-NEXT:    fsflags a2
441; CHECKIZFINX-NEXT:    xori a2, a3, 1
442; CHECKIZFINX-NEXT:    feq.s zero, a0, a1
443; CHECKIZFINX-NEXT:    mv a0, a2
444; CHECKIZFINX-NEXT:    ret
445;
446; RV32I-LABEL: fcmp_ugt:
447; RV32I:       # %bb.0:
448; RV32I-NEXT:    addi sp, sp, -16
449; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
450; RV32I-NEXT:    call __lesf2
451; RV32I-NEXT:    sgtz a0, a0
452; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
453; RV32I-NEXT:    addi sp, sp, 16
454; RV32I-NEXT:    ret
455;
456; RV64I-LABEL: fcmp_ugt:
457; RV64I:       # %bb.0:
458; RV64I-NEXT:    addi sp, sp, -16
459; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
460; RV64I-NEXT:    call __lesf2
461; RV64I-NEXT:    sgtz a0, a0
462; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
463; RV64I-NEXT:    addi sp, sp, 16
464; RV64I-NEXT:    ret
465  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp
466  %2 = zext i1 %1 to i32
467  ret i32 %2
468}
469
470define i32 @fcmp_uge(float %a, float %b) nounwind strictfp {
471; CHECKIF-LABEL: fcmp_uge:
472; CHECKIF:       # %bb.0:
473; CHECKIF-NEXT:    frflags a0
474; CHECKIF-NEXT:    flt.s a1, fa0, fa1
475; CHECKIF-NEXT:    fsflags a0
476; CHECKIF-NEXT:    xori a0, a1, 1
477; CHECKIF-NEXT:    feq.s zero, fa0, fa1
478; CHECKIF-NEXT:    ret
479;
480; CHECKIZFINX-LABEL: fcmp_uge:
481; CHECKIZFINX:       # %bb.0:
482; CHECKIZFINX-NEXT:    frflags a2
483; CHECKIZFINX-NEXT:    flt.s a3, a0, a1
484; CHECKIZFINX-NEXT:    fsflags a2
485; CHECKIZFINX-NEXT:    xori a2, a3, 1
486; CHECKIZFINX-NEXT:    feq.s zero, a0, a1
487; CHECKIZFINX-NEXT:    mv a0, a2
488; CHECKIZFINX-NEXT:    ret
489;
490; RV32I-LABEL: fcmp_uge:
491; RV32I:       # %bb.0:
492; RV32I-NEXT:    addi sp, sp, -16
493; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
494; RV32I-NEXT:    call __ltsf2
495; RV32I-NEXT:    slti a0, a0, 0
496; RV32I-NEXT:    xori a0, a0, 1
497; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
498; RV32I-NEXT:    addi sp, sp, 16
499; RV32I-NEXT:    ret
500;
501; RV64I-LABEL: fcmp_uge:
502; RV64I:       # %bb.0:
503; RV64I-NEXT:    addi sp, sp, -16
504; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
505; RV64I-NEXT:    call __ltsf2
506; RV64I-NEXT:    slti a0, a0, 0
507; RV64I-NEXT:    xori a0, a0, 1
508; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
509; RV64I-NEXT:    addi sp, sp, 16
510; RV64I-NEXT:    ret
511  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"uge", metadata !"fpexcept.strict") strictfp
512  %2 = zext i1 %1 to i32
513  ret i32 %2
514}
515
516define i32 @fcmp_ult(float %a, float %b) nounwind strictfp {
517; CHECKIF-LABEL: fcmp_ult:
518; CHECKIF:       # %bb.0:
519; CHECKIF-NEXT:    frflags a0
520; CHECKIF-NEXT:    fle.s a1, fa1, fa0
521; CHECKIF-NEXT:    fsflags a0
522; CHECKIF-NEXT:    xori a0, a1, 1
523; CHECKIF-NEXT:    feq.s zero, fa1, fa0
524; CHECKIF-NEXT:    ret
525;
526; CHECKIZFINX-LABEL: fcmp_ult:
527; CHECKIZFINX:       # %bb.0:
528; CHECKIZFINX-NEXT:    frflags a2
529; CHECKIZFINX-NEXT:    fle.s a3, a1, a0
530; CHECKIZFINX-NEXT:    fsflags a2
531; CHECKIZFINX-NEXT:    xori a2, a3, 1
532; CHECKIZFINX-NEXT:    feq.s zero, a1, a0
533; CHECKIZFINX-NEXT:    mv a0, a2
534; CHECKIZFINX-NEXT:    ret
535;
536; RV32I-LABEL: fcmp_ult:
537; RV32I:       # %bb.0:
538; RV32I-NEXT:    addi sp, sp, -16
539; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
540; RV32I-NEXT:    call __gesf2
541; RV32I-NEXT:    slti a0, a0, 0
542; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
543; RV32I-NEXT:    addi sp, sp, 16
544; RV32I-NEXT:    ret
545;
546; RV64I-LABEL: fcmp_ult:
547; RV64I:       # %bb.0:
548; RV64I-NEXT:    addi sp, sp, -16
549; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
550; RV64I-NEXT:    call __gesf2
551; RV64I-NEXT:    slti a0, a0, 0
552; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
553; RV64I-NEXT:    addi sp, sp, 16
554; RV64I-NEXT:    ret
555  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ult", metadata !"fpexcept.strict") strictfp
556  %2 = zext i1 %1 to i32
557  ret i32 %2
558}
559
560define i32 @fcmp_ule(float %a, float %b) nounwind strictfp {
561; CHECKIF-LABEL: fcmp_ule:
562; CHECKIF:       # %bb.0:
563; CHECKIF-NEXT:    frflags a0
564; CHECKIF-NEXT:    flt.s a1, fa1, fa0
565; CHECKIF-NEXT:    fsflags a0
566; CHECKIF-NEXT:    xori a0, a1, 1
567; CHECKIF-NEXT:    feq.s zero, fa1, fa0
568; CHECKIF-NEXT:    ret
569;
570; CHECKIZFINX-LABEL: fcmp_ule:
571; CHECKIZFINX:       # %bb.0:
572; CHECKIZFINX-NEXT:    frflags a2
573; CHECKIZFINX-NEXT:    flt.s a3, a1, a0
574; CHECKIZFINX-NEXT:    fsflags a2
575; CHECKIZFINX-NEXT:    xori a2, a3, 1
576; CHECKIZFINX-NEXT:    feq.s zero, a1, a0
577; CHECKIZFINX-NEXT:    mv a0, a2
578; CHECKIZFINX-NEXT:    ret
579;
580; RV32I-LABEL: fcmp_ule:
581; RV32I:       # %bb.0:
582; RV32I-NEXT:    addi sp, sp, -16
583; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
584; RV32I-NEXT:    call __gtsf2
585; RV32I-NEXT:    slti a0, a0, 1
586; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
587; RV32I-NEXT:    addi sp, sp, 16
588; RV32I-NEXT:    ret
589;
590; RV64I-LABEL: fcmp_ule:
591; RV64I:       # %bb.0:
592; RV64I-NEXT:    addi sp, sp, -16
593; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
594; RV64I-NEXT:    call __gtsf2
595; RV64I-NEXT:    slti a0, a0, 1
596; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
597; RV64I-NEXT:    addi sp, sp, 16
598; RV64I-NEXT:    ret
599  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ule", metadata !"fpexcept.strict") strictfp
600  %2 = zext i1 %1 to i32
601  ret i32 %2
602}
603
604define i32 @fcmp_une(float %a, float %b) nounwind strictfp {
605; CHECKIF-LABEL: fcmp_une:
606; CHECKIF:       # %bb.0:
607; CHECKIF-NEXT:    feq.s a0, fa0, fa1
608; CHECKIF-NEXT:    xori a0, a0, 1
609; CHECKIF-NEXT:    ret
610;
611; CHECKIZFINX-LABEL: fcmp_une:
612; CHECKIZFINX:       # %bb.0:
613; CHECKIZFINX-NEXT:    feq.s a0, a0, a1
614; CHECKIZFINX-NEXT:    xori a0, a0, 1
615; CHECKIZFINX-NEXT:    ret
616;
617; RV32I-LABEL: fcmp_une:
618; RV32I:       # %bb.0:
619; RV32I-NEXT:    addi sp, sp, -16
620; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
621; RV32I-NEXT:    call __nesf2
622; RV32I-NEXT:    snez a0, a0
623; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
624; RV32I-NEXT:    addi sp, sp, 16
625; RV32I-NEXT:    ret
626;
627; RV64I-LABEL: fcmp_une:
628; RV64I:       # %bb.0:
629; RV64I-NEXT:    addi sp, sp, -16
630; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
631; RV64I-NEXT:    call __nesf2
632; RV64I-NEXT:    snez a0, a0
633; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
634; RV64I-NEXT:    addi sp, sp, 16
635; RV64I-NEXT:    ret
636  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"une", metadata !"fpexcept.strict") strictfp
637  %2 = zext i1 %1 to i32
638  ret i32 %2
639}
640
641define i32 @fcmp_uno(float %a, float %b) nounwind strictfp {
642; CHECKIF-LABEL: fcmp_uno:
643; CHECKIF:       # %bb.0:
644; CHECKIF-NEXT:    feq.s a0, fa1, fa1
645; CHECKIF-NEXT:    feq.s a1, fa0, fa0
646; CHECKIF-NEXT:    and a0, a1, a0
647; CHECKIF-NEXT:    xori a0, a0, 1
648; CHECKIF-NEXT:    ret
649;
650; CHECKIZFINX-LABEL: fcmp_uno:
651; CHECKIZFINX:       # %bb.0:
652; CHECKIZFINX-NEXT:    feq.s a1, a1, a1
653; CHECKIZFINX-NEXT:    feq.s a0, a0, a0
654; CHECKIZFINX-NEXT:    and a0, a0, a1
655; CHECKIZFINX-NEXT:    xori a0, a0, 1
656; CHECKIZFINX-NEXT:    ret
657;
658; RV32I-LABEL: fcmp_uno:
659; RV32I:       # %bb.0:
660; RV32I-NEXT:    addi sp, sp, -16
661; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
662; RV32I-NEXT:    call __unordsf2
663; RV32I-NEXT:    snez a0, a0
664; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
665; RV32I-NEXT:    addi sp, sp, 16
666; RV32I-NEXT:    ret
667;
668; RV64I-LABEL: fcmp_uno:
669; RV64I:       # %bb.0:
670; RV64I-NEXT:    addi sp, sp, -16
671; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
672; RV64I-NEXT:    call __unordsf2
673; RV64I-NEXT:    snez a0, a0
674; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
675; RV64I-NEXT:    addi sp, sp, 16
676; RV64I-NEXT:    ret
677  %1 = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"uno", metadata !"fpexcept.strict") strictfp
678  %2 = zext i1 %1 to i32
679  ret i32 %2
680}
681
682define i32 @fcmps_oeq(float %a, float %b) nounwind strictfp {
683; CHECKIF-LABEL: fcmps_oeq:
684; CHECKIF:       # %bb.0:
685; CHECKIF-NEXT:    fle.s a0, fa1, fa0
686; CHECKIF-NEXT:    fle.s a1, fa0, fa1
687; CHECKIF-NEXT:    and a0, a1, a0
688; CHECKIF-NEXT:    ret
689;
690; CHECKIZFINX-LABEL: fcmps_oeq:
691; CHECKIZFINX:       # %bb.0:
692; CHECKIZFINX-NEXT:    fle.s a2, a1, a0
693; CHECKIZFINX-NEXT:    fle.s a0, a0, a1
694; CHECKIZFINX-NEXT:    and a0, a0, a2
695; CHECKIZFINX-NEXT:    ret
696;
697; RV32I-LABEL: fcmps_oeq:
698; RV32I:       # %bb.0:
699; RV32I-NEXT:    addi sp, sp, -16
700; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
701; RV32I-NEXT:    call __eqsf2
702; RV32I-NEXT:    seqz a0, a0
703; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
704; RV32I-NEXT:    addi sp, sp, 16
705; RV32I-NEXT:    ret
706;
707; RV64I-LABEL: fcmps_oeq:
708; RV64I:       # %bb.0:
709; RV64I-NEXT:    addi sp, sp, -16
710; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
711; RV64I-NEXT:    call __eqsf2
712; RV64I-NEXT:    seqz a0, a0
713; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
714; RV64I-NEXT:    addi sp, sp, 16
715; RV64I-NEXT:    ret
716  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp
717  %2 = zext i1 %1 to i32
718  ret i32 %2
719}
720declare i1 @llvm.experimental.constrained.fcmps.f32(float, float, metadata, metadata)
721
722define i32 @fcmps_ogt(float %a, float %b) nounwind strictfp {
723; CHECKIF-LABEL: fcmps_ogt:
724; CHECKIF:       # %bb.0:
725; CHECKIF-NEXT:    flt.s a0, fa1, fa0
726; CHECKIF-NEXT:    ret
727;
728; CHECKIZFINX-LABEL: fcmps_ogt:
729; CHECKIZFINX:       # %bb.0:
730; CHECKIZFINX-NEXT:    flt.s a0, a1, a0
731; CHECKIZFINX-NEXT:    ret
732;
733; RV32I-LABEL: fcmps_ogt:
734; RV32I:       # %bb.0:
735; RV32I-NEXT:    addi sp, sp, -16
736; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
737; RV32I-NEXT:    call __gtsf2
738; RV32I-NEXT:    sgtz a0, a0
739; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
740; RV32I-NEXT:    addi sp, sp, 16
741; RV32I-NEXT:    ret
742;
743; RV64I-LABEL: fcmps_ogt:
744; RV64I:       # %bb.0:
745; RV64I-NEXT:    addi sp, sp, -16
746; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
747; RV64I-NEXT:    call __gtsf2
748; RV64I-NEXT:    sgtz a0, a0
749; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
750; RV64I-NEXT:    addi sp, sp, 16
751; RV64I-NEXT:    ret
752  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp
753  %2 = zext i1 %1 to i32
754  ret i32 %2
755}
756
757define i32 @fcmps_oge(float %a, float %b) nounwind strictfp {
758; CHECKIF-LABEL: fcmps_oge:
759; CHECKIF:       # %bb.0:
760; CHECKIF-NEXT:    fle.s a0, fa1, fa0
761; CHECKIF-NEXT:    ret
762;
763; CHECKIZFINX-LABEL: fcmps_oge:
764; CHECKIZFINX:       # %bb.0:
765; CHECKIZFINX-NEXT:    fle.s a0, a1, a0
766; CHECKIZFINX-NEXT:    ret
767;
768; RV32I-LABEL: fcmps_oge:
769; RV32I:       # %bb.0:
770; RV32I-NEXT:    addi sp, sp, -16
771; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
772; RV32I-NEXT:    call __gesf2
773; RV32I-NEXT:    slti a0, a0, 0
774; RV32I-NEXT:    xori a0, a0, 1
775; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
776; RV32I-NEXT:    addi sp, sp, 16
777; RV32I-NEXT:    ret
778;
779; RV64I-LABEL: fcmps_oge:
780; RV64I:       # %bb.0:
781; RV64I-NEXT:    addi sp, sp, -16
782; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
783; RV64I-NEXT:    call __gesf2
784; RV64I-NEXT:    slti a0, a0, 0
785; RV64I-NEXT:    xori a0, a0, 1
786; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
787; RV64I-NEXT:    addi sp, sp, 16
788; RV64I-NEXT:    ret
789  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"oge", metadata !"fpexcept.strict") strictfp
790  %2 = zext i1 %1 to i32
791  ret i32 %2
792}
793
794define i32 @fcmps_olt(float %a, float %b) nounwind strictfp {
795; CHECKIF-LABEL: fcmps_olt:
796; CHECKIF:       # %bb.0:
797; CHECKIF-NEXT:    flt.s a0, fa0, fa1
798; CHECKIF-NEXT:    ret
799;
800; CHECKIZFINX-LABEL: fcmps_olt:
801; CHECKIZFINX:       # %bb.0:
802; CHECKIZFINX-NEXT:    flt.s a0, a0, a1
803; CHECKIZFINX-NEXT:    ret
804;
805; RV32I-LABEL: fcmps_olt:
806; RV32I:       # %bb.0:
807; RV32I-NEXT:    addi sp, sp, -16
808; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
809; RV32I-NEXT:    call __ltsf2
810; RV32I-NEXT:    slti a0, a0, 0
811; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
812; RV32I-NEXT:    addi sp, sp, 16
813; RV32I-NEXT:    ret
814;
815; RV64I-LABEL: fcmps_olt:
816; RV64I:       # %bb.0:
817; RV64I-NEXT:    addi sp, sp, -16
818; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
819; RV64I-NEXT:    call __ltsf2
820; RV64I-NEXT:    slti a0, a0, 0
821; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
822; RV64I-NEXT:    addi sp, sp, 16
823; RV64I-NEXT:    ret
824  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"olt", metadata !"fpexcept.strict") strictfp
825  %2 = zext i1 %1 to i32
826  ret i32 %2
827}
828
829define i32 @fcmps_ole(float %a, float %b) nounwind strictfp {
830; CHECKIF-LABEL: fcmps_ole:
831; CHECKIF:       # %bb.0:
832; CHECKIF-NEXT:    fle.s a0, fa0, fa1
833; CHECKIF-NEXT:    ret
834;
835; CHECKIZFINX-LABEL: fcmps_ole:
836; CHECKIZFINX:       # %bb.0:
837; CHECKIZFINX-NEXT:    fle.s a0, a0, a1
838; CHECKIZFINX-NEXT:    ret
839;
840; RV32I-LABEL: fcmps_ole:
841; RV32I:       # %bb.0:
842; RV32I-NEXT:    addi sp, sp, -16
843; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
844; RV32I-NEXT:    call __lesf2
845; RV32I-NEXT:    slti a0, a0, 1
846; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
847; RV32I-NEXT:    addi sp, sp, 16
848; RV32I-NEXT:    ret
849;
850; RV64I-LABEL: fcmps_ole:
851; RV64I:       # %bb.0:
852; RV64I-NEXT:    addi sp, sp, -16
853; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
854; RV64I-NEXT:    call __lesf2
855; RV64I-NEXT:    slti a0, a0, 1
856; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
857; RV64I-NEXT:    addi sp, sp, 16
858; RV64I-NEXT:    ret
859  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ole", metadata !"fpexcept.strict") strictfp
860  %2 = zext i1 %1 to i32
861  ret i32 %2
862}
863
864define i32 @fcmps_one(float %a, float %b) nounwind strictfp {
865; CHECKIF-LABEL: fcmps_one:
866; CHECKIF:       # %bb.0:
867; CHECKIF-NEXT:    flt.s a0, fa0, fa1
868; CHECKIF-NEXT:    flt.s a1, fa1, fa0
869; CHECKIF-NEXT:    or a0, a1, a0
870; CHECKIF-NEXT:    ret
871;
872; CHECKIZFINX-LABEL: fcmps_one:
873; CHECKIZFINX:       # %bb.0:
874; CHECKIZFINX-NEXT:    flt.s a2, a0, a1
875; CHECKIZFINX-NEXT:    flt.s a0, a1, a0
876; CHECKIZFINX-NEXT:    or a0, a0, a2
877; CHECKIZFINX-NEXT:    ret
878;
879; RV32I-LABEL: fcmps_one:
880; RV32I:       # %bb.0:
881; RV32I-NEXT:    addi sp, sp, -16
882; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
883; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
884; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
885; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
886; RV32I-NEXT:    mv s0, a1
887; RV32I-NEXT:    mv s1, a0
888; RV32I-NEXT:    call __eqsf2
889; RV32I-NEXT:    snez s2, a0
890; RV32I-NEXT:    mv a0, s1
891; RV32I-NEXT:    mv a1, s0
892; RV32I-NEXT:    call __unordsf2
893; RV32I-NEXT:    seqz a0, a0
894; RV32I-NEXT:    and a0, a0, s2
895; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
896; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
897; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
898; RV32I-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
899; RV32I-NEXT:    addi sp, sp, 16
900; RV32I-NEXT:    ret
901;
902; RV64I-LABEL: fcmps_one:
903; RV64I:       # %bb.0:
904; RV64I-NEXT:    addi sp, sp, -32
905; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
906; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
907; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
908; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
909; RV64I-NEXT:    mv s0, a1
910; RV64I-NEXT:    mv s1, a0
911; RV64I-NEXT:    call __eqsf2
912; RV64I-NEXT:    snez s2, a0
913; RV64I-NEXT:    mv a0, s1
914; RV64I-NEXT:    mv a1, s0
915; RV64I-NEXT:    call __unordsf2
916; RV64I-NEXT:    seqz a0, a0
917; RV64I-NEXT:    and a0, a0, s2
918; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
919; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
920; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
921; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
922; RV64I-NEXT:    addi sp, sp, 32
923; RV64I-NEXT:    ret
924  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"one", metadata !"fpexcept.strict") strictfp
925  %2 = zext i1 %1 to i32
926  ret i32 %2
927}
928
929define i32 @fcmps_ord(float %a, float %b) nounwind strictfp {
930; CHECKIF-LABEL: fcmps_ord:
931; CHECKIF:       # %bb.0:
932; CHECKIF-NEXT:    fle.s a0, fa1, fa1
933; CHECKIF-NEXT:    fle.s a1, fa0, fa0
934; CHECKIF-NEXT:    and a0, a1, a0
935; CHECKIF-NEXT:    ret
936;
937; CHECKIZFINX-LABEL: fcmps_ord:
938; CHECKIZFINX:       # %bb.0:
939; CHECKIZFINX-NEXT:    fle.s a1, a1, a1
940; CHECKIZFINX-NEXT:    fle.s a0, a0, a0
941; CHECKIZFINX-NEXT:    and a0, a0, a1
942; CHECKIZFINX-NEXT:    ret
943;
944; RV32I-LABEL: fcmps_ord:
945; RV32I:       # %bb.0:
946; RV32I-NEXT:    addi sp, sp, -16
947; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
948; RV32I-NEXT:    call __unordsf2
949; RV32I-NEXT:    seqz a0, a0
950; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
951; RV32I-NEXT:    addi sp, sp, 16
952; RV32I-NEXT:    ret
953;
954; RV64I-LABEL: fcmps_ord:
955; RV64I:       # %bb.0:
956; RV64I-NEXT:    addi sp, sp, -16
957; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
958; RV64I-NEXT:    call __unordsf2
959; RV64I-NEXT:    seqz a0, a0
960; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
961; RV64I-NEXT:    addi sp, sp, 16
962; RV64I-NEXT:    ret
963  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ord", metadata !"fpexcept.strict") strictfp
964  %2 = zext i1 %1 to i32
965  ret i32 %2
966}
967
968define i32 @fcmps_ueq(float %a, float %b) nounwind strictfp {
969; CHECKIF-LABEL: fcmps_ueq:
970; CHECKIF:       # %bb.0:
971; CHECKIF-NEXT:    flt.s a0, fa0, fa1
972; CHECKIF-NEXT:    flt.s a1, fa1, fa0
973; CHECKIF-NEXT:    or a0, a1, a0
974; CHECKIF-NEXT:    xori a0, a0, 1
975; CHECKIF-NEXT:    ret
976;
977; CHECKIZFINX-LABEL: fcmps_ueq:
978; CHECKIZFINX:       # %bb.0:
979; CHECKIZFINX-NEXT:    flt.s a2, a0, a1
980; CHECKIZFINX-NEXT:    flt.s a0, a1, a0
981; CHECKIZFINX-NEXT:    or a0, a0, a2
982; CHECKIZFINX-NEXT:    xori a0, a0, 1
983; CHECKIZFINX-NEXT:    ret
984;
985; RV32I-LABEL: fcmps_ueq:
986; RV32I:       # %bb.0:
987; RV32I-NEXT:    addi sp, sp, -16
988; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
989; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
990; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
991; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
992; RV32I-NEXT:    mv s0, a1
993; RV32I-NEXT:    mv s1, a0
994; RV32I-NEXT:    call __eqsf2
995; RV32I-NEXT:    seqz s2, a0
996; RV32I-NEXT:    mv a0, s1
997; RV32I-NEXT:    mv a1, s0
998; RV32I-NEXT:    call __unordsf2
999; RV32I-NEXT:    snez a0, a0
1000; RV32I-NEXT:    or a0, a0, s2
1001; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1002; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
1003; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
1004; RV32I-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
1005; RV32I-NEXT:    addi sp, sp, 16
1006; RV32I-NEXT:    ret
1007;
1008; RV64I-LABEL: fcmps_ueq:
1009; RV64I:       # %bb.0:
1010; RV64I-NEXT:    addi sp, sp, -32
1011; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
1012; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
1013; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
1014; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
1015; RV64I-NEXT:    mv s0, a1
1016; RV64I-NEXT:    mv s1, a0
1017; RV64I-NEXT:    call __eqsf2
1018; RV64I-NEXT:    seqz s2, a0
1019; RV64I-NEXT:    mv a0, s1
1020; RV64I-NEXT:    mv a1, s0
1021; RV64I-NEXT:    call __unordsf2
1022; RV64I-NEXT:    snez a0, a0
1023; RV64I-NEXT:    or a0, a0, s2
1024; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
1025; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
1026; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
1027; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
1028; RV64I-NEXT:    addi sp, sp, 32
1029; RV64I-NEXT:    ret
1030  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp
1031  %2 = zext i1 %1 to i32
1032  ret i32 %2
1033}
1034
1035define i32 @fcmps_ugt(float %a, float %b) nounwind strictfp {
1036; CHECKIF-LABEL: fcmps_ugt:
1037; CHECKIF:       # %bb.0:
1038; CHECKIF-NEXT:    fle.s a0, fa0, fa1
1039; CHECKIF-NEXT:    xori a0, a0, 1
1040; CHECKIF-NEXT:    ret
1041;
1042; CHECKIZFINX-LABEL: fcmps_ugt:
1043; CHECKIZFINX:       # %bb.0:
1044; CHECKIZFINX-NEXT:    fle.s a0, a0, a1
1045; CHECKIZFINX-NEXT:    xori a0, a0, 1
1046; CHECKIZFINX-NEXT:    ret
1047;
1048; RV32I-LABEL: fcmps_ugt:
1049; RV32I:       # %bb.0:
1050; RV32I-NEXT:    addi sp, sp, -16
1051; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1052; RV32I-NEXT:    call __lesf2
1053; RV32I-NEXT:    sgtz a0, a0
1054; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1055; RV32I-NEXT:    addi sp, sp, 16
1056; RV32I-NEXT:    ret
1057;
1058; RV64I-LABEL: fcmps_ugt:
1059; RV64I:       # %bb.0:
1060; RV64I-NEXT:    addi sp, sp, -16
1061; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1062; RV64I-NEXT:    call __lesf2
1063; RV64I-NEXT:    sgtz a0, a0
1064; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1065; RV64I-NEXT:    addi sp, sp, 16
1066; RV64I-NEXT:    ret
1067  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp
1068  %2 = zext i1 %1 to i32
1069  ret i32 %2
1070}
1071
1072define i32 @fcmps_uge(float %a, float %b) nounwind strictfp {
1073; CHECKIF-LABEL: fcmps_uge:
1074; CHECKIF:       # %bb.0:
1075; CHECKIF-NEXT:    flt.s a0, fa0, fa1
1076; CHECKIF-NEXT:    xori a0, a0, 1
1077; CHECKIF-NEXT:    ret
1078;
1079; CHECKIZFINX-LABEL: fcmps_uge:
1080; CHECKIZFINX:       # %bb.0:
1081; CHECKIZFINX-NEXT:    flt.s a0, a0, a1
1082; CHECKIZFINX-NEXT:    xori a0, a0, 1
1083; CHECKIZFINX-NEXT:    ret
1084;
1085; RV32I-LABEL: fcmps_uge:
1086; RV32I:       # %bb.0:
1087; RV32I-NEXT:    addi sp, sp, -16
1088; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1089; RV32I-NEXT:    call __ltsf2
1090; RV32I-NEXT:    slti a0, a0, 0
1091; RV32I-NEXT:    xori a0, a0, 1
1092; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1093; RV32I-NEXT:    addi sp, sp, 16
1094; RV32I-NEXT:    ret
1095;
1096; RV64I-LABEL: fcmps_uge:
1097; RV64I:       # %bb.0:
1098; RV64I-NEXT:    addi sp, sp, -16
1099; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1100; RV64I-NEXT:    call __ltsf2
1101; RV64I-NEXT:    slti a0, a0, 0
1102; RV64I-NEXT:    xori a0, a0, 1
1103; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1104; RV64I-NEXT:    addi sp, sp, 16
1105; RV64I-NEXT:    ret
1106  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"uge", metadata !"fpexcept.strict") strictfp
1107  %2 = zext i1 %1 to i32
1108  ret i32 %2
1109}
1110
1111define i32 @fcmps_ult(float %a, float %b) nounwind strictfp {
1112; CHECKIF-LABEL: fcmps_ult:
1113; CHECKIF:       # %bb.0:
1114; CHECKIF-NEXT:    fle.s a0, fa1, fa0
1115; CHECKIF-NEXT:    xori a0, a0, 1
1116; CHECKIF-NEXT:    ret
1117;
1118; CHECKIZFINX-LABEL: fcmps_ult:
1119; CHECKIZFINX:       # %bb.0:
1120; CHECKIZFINX-NEXT:    fle.s a0, a1, a0
1121; CHECKIZFINX-NEXT:    xori a0, a0, 1
1122; CHECKIZFINX-NEXT:    ret
1123;
1124; RV32I-LABEL: fcmps_ult:
1125; RV32I:       # %bb.0:
1126; RV32I-NEXT:    addi sp, sp, -16
1127; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1128; RV32I-NEXT:    call __gesf2
1129; RV32I-NEXT:    slti a0, a0, 0
1130; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1131; RV32I-NEXT:    addi sp, sp, 16
1132; RV32I-NEXT:    ret
1133;
1134; RV64I-LABEL: fcmps_ult:
1135; RV64I:       # %bb.0:
1136; RV64I-NEXT:    addi sp, sp, -16
1137; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1138; RV64I-NEXT:    call __gesf2
1139; RV64I-NEXT:    slti a0, a0, 0
1140; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1141; RV64I-NEXT:    addi sp, sp, 16
1142; RV64I-NEXT:    ret
1143  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ult", metadata !"fpexcept.strict") strictfp
1144  %2 = zext i1 %1 to i32
1145  ret i32 %2
1146}
1147
1148define i32 @fcmps_ule(float %a, float %b) nounwind strictfp {
1149; CHECKIF-LABEL: fcmps_ule:
1150; CHECKIF:       # %bb.0:
1151; CHECKIF-NEXT:    flt.s a0, fa1, fa0
1152; CHECKIF-NEXT:    xori a0, a0, 1
1153; CHECKIF-NEXT:    ret
1154;
1155; CHECKIZFINX-LABEL: fcmps_ule:
1156; CHECKIZFINX:       # %bb.0:
1157; CHECKIZFINX-NEXT:    flt.s a0, a1, a0
1158; CHECKIZFINX-NEXT:    xori a0, a0, 1
1159; CHECKIZFINX-NEXT:    ret
1160;
1161; RV32I-LABEL: fcmps_ule:
1162; RV32I:       # %bb.0:
1163; RV32I-NEXT:    addi sp, sp, -16
1164; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1165; RV32I-NEXT:    call __gtsf2
1166; RV32I-NEXT:    slti a0, a0, 1
1167; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1168; RV32I-NEXT:    addi sp, sp, 16
1169; RV32I-NEXT:    ret
1170;
1171; RV64I-LABEL: fcmps_ule:
1172; RV64I:       # %bb.0:
1173; RV64I-NEXT:    addi sp, sp, -16
1174; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1175; RV64I-NEXT:    call __gtsf2
1176; RV64I-NEXT:    slti a0, a0, 1
1177; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1178; RV64I-NEXT:    addi sp, sp, 16
1179; RV64I-NEXT:    ret
1180  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ule", metadata !"fpexcept.strict") strictfp
1181  %2 = zext i1 %1 to i32
1182  ret i32 %2
1183}
1184
1185define i32 @fcmps_une(float %a, float %b) nounwind strictfp {
1186; CHECKIF-LABEL: fcmps_une:
1187; CHECKIF:       # %bb.0:
1188; CHECKIF-NEXT:    fle.s a0, fa1, fa0
1189; CHECKIF-NEXT:    fle.s a1, fa0, fa1
1190; CHECKIF-NEXT:    and a0, a1, a0
1191; CHECKIF-NEXT:    xori a0, a0, 1
1192; CHECKIF-NEXT:    ret
1193;
1194; CHECKIZFINX-LABEL: fcmps_une:
1195; CHECKIZFINX:       # %bb.0:
1196; CHECKIZFINX-NEXT:    fle.s a2, a1, a0
1197; CHECKIZFINX-NEXT:    fle.s a0, a0, a1
1198; CHECKIZFINX-NEXT:    and a0, a0, a2
1199; CHECKIZFINX-NEXT:    xori a0, a0, 1
1200; CHECKIZFINX-NEXT:    ret
1201;
1202; RV32I-LABEL: fcmps_une:
1203; RV32I:       # %bb.0:
1204; RV32I-NEXT:    addi sp, sp, -16
1205; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1206; RV32I-NEXT:    call __nesf2
1207; RV32I-NEXT:    snez a0, a0
1208; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1209; RV32I-NEXT:    addi sp, sp, 16
1210; RV32I-NEXT:    ret
1211;
1212; RV64I-LABEL: fcmps_une:
1213; RV64I:       # %bb.0:
1214; RV64I-NEXT:    addi sp, sp, -16
1215; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1216; RV64I-NEXT:    call __nesf2
1217; RV64I-NEXT:    snez a0, a0
1218; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1219; RV64I-NEXT:    addi sp, sp, 16
1220; RV64I-NEXT:    ret
1221  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"une", metadata !"fpexcept.strict") strictfp
1222  %2 = zext i1 %1 to i32
1223  ret i32 %2
1224}
1225
1226define i32 @fcmps_uno(float %a, float %b) nounwind strictfp {
1227; CHECKIF-LABEL: fcmps_uno:
1228; CHECKIF:       # %bb.0:
1229; CHECKIF-NEXT:    fle.s a0, fa1, fa1
1230; CHECKIF-NEXT:    fle.s a1, fa0, fa0
1231; CHECKIF-NEXT:    and a0, a1, a0
1232; CHECKIF-NEXT:    xori a0, a0, 1
1233; CHECKIF-NEXT:    ret
1234;
1235; CHECKIZFINX-LABEL: fcmps_uno:
1236; CHECKIZFINX:       # %bb.0:
1237; CHECKIZFINX-NEXT:    fle.s a1, a1, a1
1238; CHECKIZFINX-NEXT:    fle.s a0, a0, a0
1239; CHECKIZFINX-NEXT:    and a0, a0, a1
1240; CHECKIZFINX-NEXT:    xori a0, a0, 1
1241; CHECKIZFINX-NEXT:    ret
1242;
1243; RV32I-LABEL: fcmps_uno:
1244; RV32I:       # %bb.0:
1245; RV32I-NEXT:    addi sp, sp, -16
1246; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1247; RV32I-NEXT:    call __unordsf2
1248; RV32I-NEXT:    snez a0, a0
1249; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1250; RV32I-NEXT:    addi sp, sp, 16
1251; RV32I-NEXT:    ret
1252;
1253; RV64I-LABEL: fcmps_uno:
1254; RV64I:       # %bb.0:
1255; RV64I-NEXT:    addi sp, sp, -16
1256; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1257; RV64I-NEXT:    call __unordsf2
1258; RV64I-NEXT:    snez a0, a0
1259; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1260; RV64I-NEXT:    addi sp, sp, 16
1261; RV64I-NEXT:    ret
1262  %1 = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"uno", metadata !"fpexcept.strict") strictfp
1263  %2 = zext i1 %1 to i32
1264  ret i32 %2
1265}
1266