xref: /llvm-project/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll (revision 7ba49685c020f7059fe0ba27c157ecf08b937d44)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
3; RUN:   -disable-strictnode-mutation -target-abi=ilp32d \
4; RUN:   | FileCheck -check-prefix=CHECKIFD %s
5; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
6; RUN:   -disable-strictnode-mutation -target-abi=lp64d \
7; RUN:   | FileCheck -check-prefix=CHECKIFD %s
8; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \
9; RUN:   -disable-strictnode-mutation -target-abi=ilp32 \
10; RUN:   | FileCheck -check-prefix=RV32IZFINXZDINX %s
11; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \
12; RUN:   -disable-strictnode-mutation -target-abi=lp64 \
13; RUN:   | FileCheck -check-prefix=RV64IZFINXZDINX %s
14; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
15; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
16; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
17; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV64I %s
18
19define i32 @fcmp_oeq(double %a, double %b) nounwind strictfp {
20; CHECKIFD-LABEL: fcmp_oeq:
21; CHECKIFD:       # %bb.0:
22; CHECKIFD-NEXT:    feq.d a0, fa0, fa1
23; CHECKIFD-NEXT:    ret
24;
25; RV32IZFINXZDINX-LABEL: fcmp_oeq:
26; RV32IZFINXZDINX:       # %bb.0:
27; RV32IZFINXZDINX-NEXT:    feq.d a0, a0, a2
28; RV32IZFINXZDINX-NEXT:    ret
29;
30; RV64IZFINXZDINX-LABEL: fcmp_oeq:
31; RV64IZFINXZDINX:       # %bb.0:
32; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a1
33; RV64IZFINXZDINX-NEXT:    ret
34;
35; RV32I-LABEL: fcmp_oeq:
36; RV32I:       # %bb.0:
37; RV32I-NEXT:    addi sp, sp, -16
38; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
39; RV32I-NEXT:    call __eqdf2
40; RV32I-NEXT:    seqz a0, a0
41; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
42; RV32I-NEXT:    addi sp, sp, 16
43; RV32I-NEXT:    ret
44;
45; RV64I-LABEL: fcmp_oeq:
46; RV64I:       # %bb.0:
47; RV64I-NEXT:    addi sp, sp, -16
48; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
49; RV64I-NEXT:    call __eqdf2
50; RV64I-NEXT:    seqz a0, a0
51; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
52; RV64I-NEXT:    addi sp, sp, 16
53; RV64I-NEXT:    ret
54  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp
55  %2 = zext i1 %1 to i32
56  ret i32 %2
57}
58declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata)
59
60define i32 @fcmp_ogt(double %a, double %b) nounwind strictfp {
61; CHECKIFD-LABEL: fcmp_ogt:
62; CHECKIFD:       # %bb.0:
63; CHECKIFD-NEXT:    frflags a1
64; CHECKIFD-NEXT:    flt.d a0, fa1, fa0
65; CHECKIFD-NEXT:    fsflags a1
66; CHECKIFD-NEXT:    feq.d zero, fa1, fa0
67; CHECKIFD-NEXT:    ret
68;
69; RV32IZFINXZDINX-LABEL: fcmp_ogt:
70; RV32IZFINXZDINX:       # %bb.0:
71; RV32IZFINXZDINX-NEXT:    frflags a5
72; RV32IZFINXZDINX-NEXT:    flt.d a4, a2, a0
73; RV32IZFINXZDINX-NEXT:    fsflags a5
74; RV32IZFINXZDINX-NEXT:    feq.d zero, a2, a0
75; RV32IZFINXZDINX-NEXT:    mv a0, a4
76; RV32IZFINXZDINX-NEXT:    ret
77;
78; RV64IZFINXZDINX-LABEL: fcmp_ogt:
79; RV64IZFINXZDINX:       # %bb.0:
80; RV64IZFINXZDINX-NEXT:    frflags a3
81; RV64IZFINXZDINX-NEXT:    flt.d a2, a1, a0
82; RV64IZFINXZDINX-NEXT:    fsflags a3
83; RV64IZFINXZDINX-NEXT:    feq.d zero, a1, a0
84; RV64IZFINXZDINX-NEXT:    mv a0, a2
85; RV64IZFINXZDINX-NEXT:    ret
86;
87; RV32I-LABEL: fcmp_ogt:
88; RV32I:       # %bb.0:
89; RV32I-NEXT:    addi sp, sp, -16
90; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
91; RV32I-NEXT:    call __gtdf2
92; RV32I-NEXT:    sgtz a0, a0
93; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
94; RV32I-NEXT:    addi sp, sp, 16
95; RV32I-NEXT:    ret
96;
97; RV64I-LABEL: fcmp_ogt:
98; RV64I:       # %bb.0:
99; RV64I-NEXT:    addi sp, sp, -16
100; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
101; RV64I-NEXT:    call __gtdf2
102; RV64I-NEXT:    sgtz a0, a0
103; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
104; RV64I-NEXT:    addi sp, sp, 16
105; RV64I-NEXT:    ret
106  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp
107  %2 = zext i1 %1 to i32
108  ret i32 %2
109}
110
111define i32 @fcmp_oge(double %a, double %b) nounwind strictfp {
112; CHECKIFD-LABEL: fcmp_oge:
113; CHECKIFD:       # %bb.0:
114; CHECKIFD-NEXT:    frflags a1
115; CHECKIFD-NEXT:    fle.d a0, fa1, fa0
116; CHECKIFD-NEXT:    fsflags a1
117; CHECKIFD-NEXT:    feq.d zero, fa1, fa0
118; CHECKIFD-NEXT:    ret
119;
120; RV32IZFINXZDINX-LABEL: fcmp_oge:
121; RV32IZFINXZDINX:       # %bb.0:
122; RV32IZFINXZDINX-NEXT:    frflags a5
123; RV32IZFINXZDINX-NEXT:    fle.d a4, a2, a0
124; RV32IZFINXZDINX-NEXT:    fsflags a5
125; RV32IZFINXZDINX-NEXT:    feq.d zero, a2, a0
126; RV32IZFINXZDINX-NEXT:    mv a0, a4
127; RV32IZFINXZDINX-NEXT:    ret
128;
129; RV64IZFINXZDINX-LABEL: fcmp_oge:
130; RV64IZFINXZDINX:       # %bb.0:
131; RV64IZFINXZDINX-NEXT:    frflags a3
132; RV64IZFINXZDINX-NEXT:    fle.d a2, a1, a0
133; RV64IZFINXZDINX-NEXT:    fsflags a3
134; RV64IZFINXZDINX-NEXT:    feq.d zero, a1, a0
135; RV64IZFINXZDINX-NEXT:    mv a0, a2
136; RV64IZFINXZDINX-NEXT:    ret
137;
138; RV32I-LABEL: fcmp_oge:
139; RV32I:       # %bb.0:
140; RV32I-NEXT:    addi sp, sp, -16
141; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
142; RV32I-NEXT:    call __gedf2
143; RV32I-NEXT:    slti a0, a0, 0
144; RV32I-NEXT:    xori a0, a0, 1
145; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
146; RV32I-NEXT:    addi sp, sp, 16
147; RV32I-NEXT:    ret
148;
149; RV64I-LABEL: fcmp_oge:
150; RV64I:       # %bb.0:
151; RV64I-NEXT:    addi sp, sp, -16
152; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
153; RV64I-NEXT:    call __gedf2
154; RV64I-NEXT:    slti a0, a0, 0
155; RV64I-NEXT:    xori a0, a0, 1
156; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
157; RV64I-NEXT:    addi sp, sp, 16
158; RV64I-NEXT:    ret
159  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oge", metadata !"fpexcept.strict") strictfp
160  %2 = zext i1 %1 to i32
161  ret i32 %2
162}
163
164define i32 @fcmp_olt(double %a, double %b) nounwind strictfp {
165; CHECKIFD-LABEL: fcmp_olt:
166; CHECKIFD:       # %bb.0:
167; CHECKIFD-NEXT:    frflags a1
168; CHECKIFD-NEXT:    flt.d a0, fa0, fa1
169; CHECKIFD-NEXT:    fsflags a1
170; CHECKIFD-NEXT:    feq.d zero, fa0, fa1
171; CHECKIFD-NEXT:    ret
172;
173; RV32IZFINXZDINX-LABEL: fcmp_olt:
174; RV32IZFINXZDINX:       # %bb.0:
175; RV32IZFINXZDINX-NEXT:    frflags a5
176; RV32IZFINXZDINX-NEXT:    flt.d a4, a0, a2
177; RV32IZFINXZDINX-NEXT:    fsflags a5
178; RV32IZFINXZDINX-NEXT:    feq.d zero, a0, a2
179; RV32IZFINXZDINX-NEXT:    mv a0, a4
180; RV32IZFINXZDINX-NEXT:    ret
181;
182; RV64IZFINXZDINX-LABEL: fcmp_olt:
183; RV64IZFINXZDINX:       # %bb.0:
184; RV64IZFINXZDINX-NEXT:    frflags a3
185; RV64IZFINXZDINX-NEXT:    flt.d a2, a0, a1
186; RV64IZFINXZDINX-NEXT:    fsflags a3
187; RV64IZFINXZDINX-NEXT:    feq.d zero, a0, a1
188; RV64IZFINXZDINX-NEXT:    mv a0, a2
189; RV64IZFINXZDINX-NEXT:    ret
190;
191; RV32I-LABEL: fcmp_olt:
192; RV32I:       # %bb.0:
193; RV32I-NEXT:    addi sp, sp, -16
194; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
195; RV32I-NEXT:    call __ltdf2
196; RV32I-NEXT:    slti a0, a0, 0
197; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
198; RV32I-NEXT:    addi sp, sp, 16
199; RV32I-NEXT:    ret
200;
201; RV64I-LABEL: fcmp_olt:
202; RV64I:       # %bb.0:
203; RV64I-NEXT:    addi sp, sp, -16
204; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
205; RV64I-NEXT:    call __ltdf2
206; RV64I-NEXT:    slti a0, a0, 0
207; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
208; RV64I-NEXT:    addi sp, sp, 16
209; RV64I-NEXT:    ret
210  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"olt", metadata !"fpexcept.strict") strictfp
211  %2 = zext i1 %1 to i32
212  ret i32 %2
213}
214
215define i32 @fcmp_ole(double %a, double %b) nounwind strictfp {
216; CHECKIFD-LABEL: fcmp_ole:
217; CHECKIFD:       # %bb.0:
218; CHECKIFD-NEXT:    frflags a1
219; CHECKIFD-NEXT:    fle.d a0, fa0, fa1
220; CHECKIFD-NEXT:    fsflags a1
221; CHECKIFD-NEXT:    feq.d zero, fa0, fa1
222; CHECKIFD-NEXT:    ret
223;
224; RV32IZFINXZDINX-LABEL: fcmp_ole:
225; RV32IZFINXZDINX:       # %bb.0:
226; RV32IZFINXZDINX-NEXT:    frflags a5
227; RV32IZFINXZDINX-NEXT:    fle.d a4, a0, a2
228; RV32IZFINXZDINX-NEXT:    fsflags a5
229; RV32IZFINXZDINX-NEXT:    feq.d zero, a0, a2
230; RV32IZFINXZDINX-NEXT:    mv a0, a4
231; RV32IZFINXZDINX-NEXT:    ret
232;
233; RV64IZFINXZDINX-LABEL: fcmp_ole:
234; RV64IZFINXZDINX:       # %bb.0:
235; RV64IZFINXZDINX-NEXT:    frflags a3
236; RV64IZFINXZDINX-NEXT:    fle.d a2, a0, a1
237; RV64IZFINXZDINX-NEXT:    fsflags a3
238; RV64IZFINXZDINX-NEXT:    feq.d zero, a0, a1
239; RV64IZFINXZDINX-NEXT:    mv a0, a2
240; RV64IZFINXZDINX-NEXT:    ret
241;
242; RV32I-LABEL: fcmp_ole:
243; RV32I:       # %bb.0:
244; RV32I-NEXT:    addi sp, sp, -16
245; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
246; RV32I-NEXT:    call __ledf2
247; RV32I-NEXT:    slti a0, a0, 1
248; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
249; RV32I-NEXT:    addi sp, sp, 16
250; RV32I-NEXT:    ret
251;
252; RV64I-LABEL: fcmp_ole:
253; RV64I:       # %bb.0:
254; RV64I-NEXT:    addi sp, sp, -16
255; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
256; RV64I-NEXT:    call __ledf2
257; RV64I-NEXT:    slti a0, a0, 1
258; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
259; RV64I-NEXT:    addi sp, sp, 16
260; RV64I-NEXT:    ret
261  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ole", metadata !"fpexcept.strict") strictfp
262  %2 = zext i1 %1 to i32
263  ret i32 %2
264}
265
266; FIXME: We only need one frflags before the two flts and one fsflags after the
267; two flts.
268define i32 @fcmp_one(double %a, double %b) nounwind strictfp {
269; CHECKIFD-LABEL: fcmp_one:
270; CHECKIFD:       # %bb.0:
271; CHECKIFD-NEXT:    frflags a0
272; CHECKIFD-NEXT:    flt.d a1, fa0, fa1
273; CHECKIFD-NEXT:    fsflags a0
274; CHECKIFD-NEXT:    feq.d zero, fa0, fa1
275; CHECKIFD-NEXT:    frflags a0
276; CHECKIFD-NEXT:    flt.d a2, fa1, fa0
277; CHECKIFD-NEXT:    fsflags a0
278; CHECKIFD-NEXT:    or a0, a2, a1
279; CHECKIFD-NEXT:    feq.d zero, fa1, fa0
280; CHECKIFD-NEXT:    ret
281;
282; RV32IZFINXZDINX-LABEL: fcmp_one:
283; RV32IZFINXZDINX:       # %bb.0:
284; RV32IZFINXZDINX-NEXT:    frflags a4
285; RV32IZFINXZDINX-NEXT:    flt.d a5, a0, a2
286; RV32IZFINXZDINX-NEXT:    fsflags a4
287; RV32IZFINXZDINX-NEXT:    feq.d zero, a0, a2
288; RV32IZFINXZDINX-NEXT:    frflags a4
289; RV32IZFINXZDINX-NEXT:    flt.d a6, a2, a0
290; RV32IZFINXZDINX-NEXT:    fsflags a4
291; RV32IZFINXZDINX-NEXT:    or a4, a6, a5
292; RV32IZFINXZDINX-NEXT:    feq.d zero, a2, a0
293; RV32IZFINXZDINX-NEXT:    mv a0, a4
294; RV32IZFINXZDINX-NEXT:    ret
295;
296; RV64IZFINXZDINX-LABEL: fcmp_one:
297; RV64IZFINXZDINX:       # %bb.0:
298; RV64IZFINXZDINX-NEXT:    frflags a2
299; RV64IZFINXZDINX-NEXT:    flt.d a3, a0, a1
300; RV64IZFINXZDINX-NEXT:    fsflags a2
301; RV64IZFINXZDINX-NEXT:    feq.d zero, a0, a1
302; RV64IZFINXZDINX-NEXT:    frflags a2
303; RV64IZFINXZDINX-NEXT:    flt.d a4, a1, a0
304; RV64IZFINXZDINX-NEXT:    fsflags a2
305; RV64IZFINXZDINX-NEXT:    or a2, a4, a3
306; RV64IZFINXZDINX-NEXT:    feq.d zero, a1, a0
307; RV64IZFINXZDINX-NEXT:    mv a0, a2
308; RV64IZFINXZDINX-NEXT:    ret
309;
310; RV32I-LABEL: fcmp_one:
311; RV32I:       # %bb.0:
312; RV32I-NEXT:    addi sp, sp, -32
313; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
314; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
315; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
316; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
317; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
318; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
319; RV32I-NEXT:    mv s0, a3
320; RV32I-NEXT:    mv s1, a2
321; RV32I-NEXT:    mv s2, a1
322; RV32I-NEXT:    mv s3, a0
323; RV32I-NEXT:    call __eqdf2
324; RV32I-NEXT:    snez s4, a0
325; RV32I-NEXT:    mv a0, s3
326; RV32I-NEXT:    mv a1, s2
327; RV32I-NEXT:    mv a2, s1
328; RV32I-NEXT:    mv a3, s0
329; RV32I-NEXT:    call __unorddf2
330; RV32I-NEXT:    seqz a0, a0
331; RV32I-NEXT:    and a0, a0, s4
332; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
333; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
334; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
335; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
336; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
337; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
338; RV32I-NEXT:    addi sp, sp, 32
339; RV32I-NEXT:    ret
340;
341; RV64I-LABEL: fcmp_one:
342; RV64I:       # %bb.0:
343; RV64I-NEXT:    addi sp, sp, -32
344; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
345; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
346; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
347; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
348; RV64I-NEXT:    mv s0, a1
349; RV64I-NEXT:    mv s1, a0
350; RV64I-NEXT:    call __eqdf2
351; RV64I-NEXT:    snez s2, a0
352; RV64I-NEXT:    mv a0, s1
353; RV64I-NEXT:    mv a1, s0
354; RV64I-NEXT:    call __unorddf2
355; RV64I-NEXT:    seqz a0, a0
356; RV64I-NEXT:    and a0, a0, s2
357; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
358; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
359; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
360; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
361; RV64I-NEXT:    addi sp, sp, 32
362; RV64I-NEXT:    ret
363  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"one", metadata !"fpexcept.strict") strictfp
364  %2 = zext i1 %1 to i32
365  ret i32 %2
366}
367
368define i32 @fcmp_ord(double %a, double %b) nounwind strictfp {
369; CHECKIFD-LABEL: fcmp_ord:
370; CHECKIFD:       # %bb.0:
371; CHECKIFD-NEXT:    feq.d a0, fa1, fa1
372; CHECKIFD-NEXT:    feq.d a1, fa0, fa0
373; CHECKIFD-NEXT:    and a0, a1, a0
374; CHECKIFD-NEXT:    ret
375;
376; RV32IZFINXZDINX-LABEL: fcmp_ord:
377; RV32IZFINXZDINX:       # %bb.0:
378; RV32IZFINXZDINX-NEXT:    feq.d a2, a2, a2
379; RV32IZFINXZDINX-NEXT:    feq.d a0, a0, a0
380; RV32IZFINXZDINX-NEXT:    and a0, a0, a2
381; RV32IZFINXZDINX-NEXT:    ret
382;
383; RV64IZFINXZDINX-LABEL: fcmp_ord:
384; RV64IZFINXZDINX:       # %bb.0:
385; RV64IZFINXZDINX-NEXT:    feq.d a1, a1, a1
386; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
387; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
388; RV64IZFINXZDINX-NEXT:    ret
389;
390; RV32I-LABEL: fcmp_ord:
391; RV32I:       # %bb.0:
392; RV32I-NEXT:    addi sp, sp, -16
393; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
394; RV32I-NEXT:    call __unorddf2
395; RV32I-NEXT:    seqz a0, a0
396; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
397; RV32I-NEXT:    addi sp, sp, 16
398; RV32I-NEXT:    ret
399;
400; RV64I-LABEL: fcmp_ord:
401; RV64I:       # %bb.0:
402; RV64I-NEXT:    addi sp, sp, -16
403; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
404; RV64I-NEXT:    call __unorddf2
405; RV64I-NEXT:    seqz a0, a0
406; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
407; RV64I-NEXT:    addi sp, sp, 16
408; RV64I-NEXT:    ret
409  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ord", metadata !"fpexcept.strict") strictfp
410  %2 = zext i1 %1 to i32
411  ret i32 %2
412}
413
414; FIXME: We only need one frflags before the two flts and one fsflags after the
415; two flts.
416define i32 @fcmp_ueq(double %a, double %b) nounwind strictfp {
417; CHECKIFD-LABEL: fcmp_ueq:
418; CHECKIFD:       # %bb.0:
419; CHECKIFD-NEXT:    frflags a0
420; CHECKIFD-NEXT:    flt.d a1, fa0, fa1
421; CHECKIFD-NEXT:    fsflags a0
422; CHECKIFD-NEXT:    feq.d zero, fa0, fa1
423; CHECKIFD-NEXT:    frflags a0
424; CHECKIFD-NEXT:    flt.d a2, fa1, fa0
425; CHECKIFD-NEXT:    fsflags a0
426; CHECKIFD-NEXT:    or a1, a2, a1
427; CHECKIFD-NEXT:    xori a0, a1, 1
428; CHECKIFD-NEXT:    feq.d zero, fa1, fa0
429; CHECKIFD-NEXT:    ret
430;
431; RV32IZFINXZDINX-LABEL: fcmp_ueq:
432; RV32IZFINXZDINX:       # %bb.0:
433; RV32IZFINXZDINX-NEXT:    frflags a4
434; RV32IZFINXZDINX-NEXT:    flt.d a5, a0, a2
435; RV32IZFINXZDINX-NEXT:    fsflags a4
436; RV32IZFINXZDINX-NEXT:    feq.d zero, a0, a2
437; RV32IZFINXZDINX-NEXT:    frflags a4
438; RV32IZFINXZDINX-NEXT:    flt.d a6, a2, a0
439; RV32IZFINXZDINX-NEXT:    fsflags a4
440; RV32IZFINXZDINX-NEXT:    or a4, a6, a5
441; RV32IZFINXZDINX-NEXT:    xori a4, a4, 1
442; RV32IZFINXZDINX-NEXT:    feq.d zero, a2, a0
443; RV32IZFINXZDINX-NEXT:    mv a0, a4
444; RV32IZFINXZDINX-NEXT:    ret
445;
446; RV64IZFINXZDINX-LABEL: fcmp_ueq:
447; RV64IZFINXZDINX:       # %bb.0:
448; RV64IZFINXZDINX-NEXT:    frflags a2
449; RV64IZFINXZDINX-NEXT:    flt.d a3, a0, a1
450; RV64IZFINXZDINX-NEXT:    fsflags a2
451; RV64IZFINXZDINX-NEXT:    feq.d zero, a0, a1
452; RV64IZFINXZDINX-NEXT:    frflags a2
453; RV64IZFINXZDINX-NEXT:    flt.d a4, a1, a0
454; RV64IZFINXZDINX-NEXT:    fsflags a2
455; RV64IZFINXZDINX-NEXT:    or a3, a4, a3
456; RV64IZFINXZDINX-NEXT:    xori a2, a3, 1
457; RV64IZFINXZDINX-NEXT:    feq.d zero, a1, a0
458; RV64IZFINXZDINX-NEXT:    mv a0, a2
459; RV64IZFINXZDINX-NEXT:    ret
460;
461; RV32I-LABEL: fcmp_ueq:
462; RV32I:       # %bb.0:
463; RV32I-NEXT:    addi sp, sp, -32
464; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
465; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
466; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
467; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
468; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
469; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
470; RV32I-NEXT:    mv s0, a3
471; RV32I-NEXT:    mv s1, a2
472; RV32I-NEXT:    mv s2, a1
473; RV32I-NEXT:    mv s3, a0
474; RV32I-NEXT:    call __eqdf2
475; RV32I-NEXT:    seqz s4, a0
476; RV32I-NEXT:    mv a0, s3
477; RV32I-NEXT:    mv a1, s2
478; RV32I-NEXT:    mv a2, s1
479; RV32I-NEXT:    mv a3, s0
480; RV32I-NEXT:    call __unorddf2
481; RV32I-NEXT:    snez a0, a0
482; RV32I-NEXT:    or a0, a0, s4
483; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
484; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
485; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
486; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
487; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
488; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
489; RV32I-NEXT:    addi sp, sp, 32
490; RV32I-NEXT:    ret
491;
492; RV64I-LABEL: fcmp_ueq:
493; RV64I:       # %bb.0:
494; RV64I-NEXT:    addi sp, sp, -32
495; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
496; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
497; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
498; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
499; RV64I-NEXT:    mv s0, a1
500; RV64I-NEXT:    mv s1, a0
501; RV64I-NEXT:    call __eqdf2
502; RV64I-NEXT:    seqz s2, a0
503; RV64I-NEXT:    mv a0, s1
504; RV64I-NEXT:    mv a1, s0
505; RV64I-NEXT:    call __unorddf2
506; RV64I-NEXT:    snez a0, a0
507; RV64I-NEXT:    or a0, a0, s2
508; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
509; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
510; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
511; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
512; RV64I-NEXT:    addi sp, sp, 32
513; RV64I-NEXT:    ret
514  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp
515  %2 = zext i1 %1 to i32
516  ret i32 %2
517}
518
519define i32 @fcmp_ugt(double %a, double %b) nounwind strictfp {
520; CHECKIFD-LABEL: fcmp_ugt:
521; CHECKIFD:       # %bb.0:
522; CHECKIFD-NEXT:    frflags a0
523; CHECKIFD-NEXT:    fle.d a1, fa0, fa1
524; CHECKIFD-NEXT:    fsflags a0
525; CHECKIFD-NEXT:    xori a0, a1, 1
526; CHECKIFD-NEXT:    feq.d zero, fa0, fa1
527; CHECKIFD-NEXT:    ret
528;
529; RV32IZFINXZDINX-LABEL: fcmp_ugt:
530; RV32IZFINXZDINX:       # %bb.0:
531; RV32IZFINXZDINX-NEXT:    frflags a4
532; RV32IZFINXZDINX-NEXT:    fle.d a5, a0, a2
533; RV32IZFINXZDINX-NEXT:    fsflags a4
534; RV32IZFINXZDINX-NEXT:    xori a4, a5, 1
535; RV32IZFINXZDINX-NEXT:    feq.d zero, a0, a2
536; RV32IZFINXZDINX-NEXT:    mv a0, a4
537; RV32IZFINXZDINX-NEXT:    ret
538;
539; RV64IZFINXZDINX-LABEL: fcmp_ugt:
540; RV64IZFINXZDINX:       # %bb.0:
541; RV64IZFINXZDINX-NEXT:    frflags a2
542; RV64IZFINXZDINX-NEXT:    fle.d a3, a0, a1
543; RV64IZFINXZDINX-NEXT:    fsflags a2
544; RV64IZFINXZDINX-NEXT:    xori a2, a3, 1
545; RV64IZFINXZDINX-NEXT:    feq.d zero, a0, a1
546; RV64IZFINXZDINX-NEXT:    mv a0, a2
547; RV64IZFINXZDINX-NEXT:    ret
548;
549; RV32I-LABEL: fcmp_ugt:
550; RV32I:       # %bb.0:
551; RV32I-NEXT:    addi sp, sp, -16
552; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
553; RV32I-NEXT:    call __ledf2
554; RV32I-NEXT:    sgtz a0, a0
555; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
556; RV32I-NEXT:    addi sp, sp, 16
557; RV32I-NEXT:    ret
558;
559; RV64I-LABEL: fcmp_ugt:
560; RV64I:       # %bb.0:
561; RV64I-NEXT:    addi sp, sp, -16
562; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
563; RV64I-NEXT:    call __ledf2
564; RV64I-NEXT:    sgtz a0, a0
565; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
566; RV64I-NEXT:    addi sp, sp, 16
567; RV64I-NEXT:    ret
568  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp
569  %2 = zext i1 %1 to i32
570  ret i32 %2
571}
572
573define i32 @fcmp_uge(double %a, double %b) nounwind strictfp {
574; CHECKIFD-LABEL: fcmp_uge:
575; CHECKIFD:       # %bb.0:
576; CHECKIFD-NEXT:    frflags a0
577; CHECKIFD-NEXT:    flt.d a1, fa0, fa1
578; CHECKIFD-NEXT:    fsflags a0
579; CHECKIFD-NEXT:    xori a0, a1, 1
580; CHECKIFD-NEXT:    feq.d zero, fa0, fa1
581; CHECKIFD-NEXT:    ret
582;
583; RV32IZFINXZDINX-LABEL: fcmp_uge:
584; RV32IZFINXZDINX:       # %bb.0:
585; RV32IZFINXZDINX-NEXT:    frflags a4
586; RV32IZFINXZDINX-NEXT:    flt.d a5, a0, a2
587; RV32IZFINXZDINX-NEXT:    fsflags a4
588; RV32IZFINXZDINX-NEXT:    xori a4, a5, 1
589; RV32IZFINXZDINX-NEXT:    feq.d zero, a0, a2
590; RV32IZFINXZDINX-NEXT:    mv a0, a4
591; RV32IZFINXZDINX-NEXT:    ret
592;
593; RV64IZFINXZDINX-LABEL: fcmp_uge:
594; RV64IZFINXZDINX:       # %bb.0:
595; RV64IZFINXZDINX-NEXT:    frflags a2
596; RV64IZFINXZDINX-NEXT:    flt.d a3, a0, a1
597; RV64IZFINXZDINX-NEXT:    fsflags a2
598; RV64IZFINXZDINX-NEXT:    xori a2, a3, 1
599; RV64IZFINXZDINX-NEXT:    feq.d zero, a0, a1
600; RV64IZFINXZDINX-NEXT:    mv a0, a2
601; RV64IZFINXZDINX-NEXT:    ret
602;
603; RV32I-LABEL: fcmp_uge:
604; RV32I:       # %bb.0:
605; RV32I-NEXT:    addi sp, sp, -16
606; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
607; RV32I-NEXT:    call __ltdf2
608; RV32I-NEXT:    slti a0, a0, 0
609; RV32I-NEXT:    xori a0, a0, 1
610; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
611; RV32I-NEXT:    addi sp, sp, 16
612; RV32I-NEXT:    ret
613;
614; RV64I-LABEL: fcmp_uge:
615; RV64I:       # %bb.0:
616; RV64I-NEXT:    addi sp, sp, -16
617; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
618; RV64I-NEXT:    call __ltdf2
619; RV64I-NEXT:    slti a0, a0, 0
620; RV64I-NEXT:    xori a0, a0, 1
621; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
622; RV64I-NEXT:    addi sp, sp, 16
623; RV64I-NEXT:    ret
624  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"uge", metadata !"fpexcept.strict") strictfp
625  %2 = zext i1 %1 to i32
626  ret i32 %2
627}
628
629define i32 @fcmp_ult(double %a, double %b) nounwind strictfp {
630; CHECKIFD-LABEL: fcmp_ult:
631; CHECKIFD:       # %bb.0:
632; CHECKIFD-NEXT:    frflags a0
633; CHECKIFD-NEXT:    fle.d a1, fa1, fa0
634; CHECKIFD-NEXT:    fsflags a0
635; CHECKIFD-NEXT:    xori a0, a1, 1
636; CHECKIFD-NEXT:    feq.d zero, fa1, fa0
637; CHECKIFD-NEXT:    ret
638;
639; RV32IZFINXZDINX-LABEL: fcmp_ult:
640; RV32IZFINXZDINX:       # %bb.0:
641; RV32IZFINXZDINX-NEXT:    frflags a4
642; RV32IZFINXZDINX-NEXT:    fle.d a5, a2, a0
643; RV32IZFINXZDINX-NEXT:    fsflags a4
644; RV32IZFINXZDINX-NEXT:    xori a4, a5, 1
645; RV32IZFINXZDINX-NEXT:    feq.d zero, a2, a0
646; RV32IZFINXZDINX-NEXT:    mv a0, a4
647; RV32IZFINXZDINX-NEXT:    ret
648;
649; RV64IZFINXZDINX-LABEL: fcmp_ult:
650; RV64IZFINXZDINX:       # %bb.0:
651; RV64IZFINXZDINX-NEXT:    frflags a2
652; RV64IZFINXZDINX-NEXT:    fle.d a3, a1, a0
653; RV64IZFINXZDINX-NEXT:    fsflags a2
654; RV64IZFINXZDINX-NEXT:    xori a2, a3, 1
655; RV64IZFINXZDINX-NEXT:    feq.d zero, a1, a0
656; RV64IZFINXZDINX-NEXT:    mv a0, a2
657; RV64IZFINXZDINX-NEXT:    ret
658;
659; RV32I-LABEL: fcmp_ult:
660; RV32I:       # %bb.0:
661; RV32I-NEXT:    addi sp, sp, -16
662; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
663; RV32I-NEXT:    call __gedf2
664; RV32I-NEXT:    slti a0, a0, 0
665; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
666; RV32I-NEXT:    addi sp, sp, 16
667; RV32I-NEXT:    ret
668;
669; RV64I-LABEL: fcmp_ult:
670; RV64I:       # %bb.0:
671; RV64I-NEXT:    addi sp, sp, -16
672; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
673; RV64I-NEXT:    call __gedf2
674; RV64I-NEXT:    slti a0, a0, 0
675; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
676; RV64I-NEXT:    addi sp, sp, 16
677; RV64I-NEXT:    ret
678  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ult", metadata !"fpexcept.strict") strictfp
679  %2 = zext i1 %1 to i32
680  ret i32 %2
681}
682
683define i32 @fcmp_ule(double %a, double %b) nounwind strictfp {
684; CHECKIFD-LABEL: fcmp_ule:
685; CHECKIFD:       # %bb.0:
686; CHECKIFD-NEXT:    frflags a0
687; CHECKIFD-NEXT:    flt.d a1, fa1, fa0
688; CHECKIFD-NEXT:    fsflags a0
689; CHECKIFD-NEXT:    xori a0, a1, 1
690; CHECKIFD-NEXT:    feq.d zero, fa1, fa0
691; CHECKIFD-NEXT:    ret
692;
693; RV32IZFINXZDINX-LABEL: fcmp_ule:
694; RV32IZFINXZDINX:       # %bb.0:
695; RV32IZFINXZDINX-NEXT:    frflags a4
696; RV32IZFINXZDINX-NEXT:    flt.d a5, a2, a0
697; RV32IZFINXZDINX-NEXT:    fsflags a4
698; RV32IZFINXZDINX-NEXT:    xori a4, a5, 1
699; RV32IZFINXZDINX-NEXT:    feq.d zero, a2, a0
700; RV32IZFINXZDINX-NEXT:    mv a0, a4
701; RV32IZFINXZDINX-NEXT:    ret
702;
703; RV64IZFINXZDINX-LABEL: fcmp_ule:
704; RV64IZFINXZDINX:       # %bb.0:
705; RV64IZFINXZDINX-NEXT:    frflags a2
706; RV64IZFINXZDINX-NEXT:    flt.d a3, a1, a0
707; RV64IZFINXZDINX-NEXT:    fsflags a2
708; RV64IZFINXZDINX-NEXT:    xori a2, a3, 1
709; RV64IZFINXZDINX-NEXT:    feq.d zero, a1, a0
710; RV64IZFINXZDINX-NEXT:    mv a0, a2
711; RV64IZFINXZDINX-NEXT:    ret
712;
713; RV32I-LABEL: fcmp_ule:
714; RV32I:       # %bb.0:
715; RV32I-NEXT:    addi sp, sp, -16
716; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
717; RV32I-NEXT:    call __gtdf2
718; RV32I-NEXT:    slti a0, a0, 1
719; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
720; RV32I-NEXT:    addi sp, sp, 16
721; RV32I-NEXT:    ret
722;
723; RV64I-LABEL: fcmp_ule:
724; RV64I:       # %bb.0:
725; RV64I-NEXT:    addi sp, sp, -16
726; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
727; RV64I-NEXT:    call __gtdf2
728; RV64I-NEXT:    slti a0, a0, 1
729; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
730; RV64I-NEXT:    addi sp, sp, 16
731; RV64I-NEXT:    ret
732  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ule", metadata !"fpexcept.strict") strictfp
733  %2 = zext i1 %1 to i32
734  ret i32 %2
735}
736
737define i32 @fcmp_une(double %a, double %b) nounwind strictfp {
738; CHECKIFD-LABEL: fcmp_une:
739; CHECKIFD:       # %bb.0:
740; CHECKIFD-NEXT:    feq.d a0, fa0, fa1
741; CHECKIFD-NEXT:    xori a0, a0, 1
742; CHECKIFD-NEXT:    ret
743;
744; RV32IZFINXZDINX-LABEL: fcmp_une:
745; RV32IZFINXZDINX:       # %bb.0:
746; RV32IZFINXZDINX-NEXT:    feq.d a0, a0, a2
747; RV32IZFINXZDINX-NEXT:    xori a0, a0, 1
748; RV32IZFINXZDINX-NEXT:    ret
749;
750; RV64IZFINXZDINX-LABEL: fcmp_une:
751; RV64IZFINXZDINX:       # %bb.0:
752; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a1
753; RV64IZFINXZDINX-NEXT:    xori a0, a0, 1
754; RV64IZFINXZDINX-NEXT:    ret
755;
756; RV32I-LABEL: fcmp_une:
757; RV32I:       # %bb.0:
758; RV32I-NEXT:    addi sp, sp, -16
759; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
760; RV32I-NEXT:    call __nedf2
761; RV32I-NEXT:    snez a0, a0
762; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
763; RV32I-NEXT:    addi sp, sp, 16
764; RV32I-NEXT:    ret
765;
766; RV64I-LABEL: fcmp_une:
767; RV64I:       # %bb.0:
768; RV64I-NEXT:    addi sp, sp, -16
769; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
770; RV64I-NEXT:    call __nedf2
771; RV64I-NEXT:    snez a0, a0
772; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
773; RV64I-NEXT:    addi sp, sp, 16
774; RV64I-NEXT:    ret
775  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"une", metadata !"fpexcept.strict") strictfp
776  %2 = zext i1 %1 to i32
777  ret i32 %2
778}
779
780define i32 @fcmp_uno(double %a, double %b) nounwind strictfp {
781; CHECKIFD-LABEL: fcmp_uno:
782; CHECKIFD:       # %bb.0:
783; CHECKIFD-NEXT:    feq.d a0, fa1, fa1
784; CHECKIFD-NEXT:    feq.d a1, fa0, fa0
785; CHECKIFD-NEXT:    and a0, a1, a0
786; CHECKIFD-NEXT:    xori a0, a0, 1
787; CHECKIFD-NEXT:    ret
788;
789; RV32IZFINXZDINX-LABEL: fcmp_uno:
790; RV32IZFINXZDINX:       # %bb.0:
791; RV32IZFINXZDINX-NEXT:    feq.d a2, a2, a2
792; RV32IZFINXZDINX-NEXT:    feq.d a0, a0, a0
793; RV32IZFINXZDINX-NEXT:    and a0, a0, a2
794; RV32IZFINXZDINX-NEXT:    xori a0, a0, 1
795; RV32IZFINXZDINX-NEXT:    ret
796;
797; RV64IZFINXZDINX-LABEL: fcmp_uno:
798; RV64IZFINXZDINX:       # %bb.0:
799; RV64IZFINXZDINX-NEXT:    feq.d a1, a1, a1
800; RV64IZFINXZDINX-NEXT:    feq.d a0, a0, a0
801; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
802; RV64IZFINXZDINX-NEXT:    xori a0, a0, 1
803; RV64IZFINXZDINX-NEXT:    ret
804;
805; RV32I-LABEL: fcmp_uno:
806; RV32I:       # %bb.0:
807; RV32I-NEXT:    addi sp, sp, -16
808; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
809; RV32I-NEXT:    call __unorddf2
810; RV32I-NEXT:    snez a0, a0
811; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
812; RV32I-NEXT:    addi sp, sp, 16
813; RV32I-NEXT:    ret
814;
815; RV64I-LABEL: fcmp_uno:
816; RV64I:       # %bb.0:
817; RV64I-NEXT:    addi sp, sp, -16
818; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
819; RV64I-NEXT:    call __unorddf2
820; RV64I-NEXT:    snez a0, a0
821; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
822; RV64I-NEXT:    addi sp, sp, 16
823; RV64I-NEXT:    ret
824  %1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"uno", metadata !"fpexcept.strict") strictfp
825  %2 = zext i1 %1 to i32
826  ret i32 %2
827}
828
829define i32 @fcmps_oeq(double %a, double %b) nounwind strictfp {
830; CHECKIFD-LABEL: fcmps_oeq:
831; CHECKIFD:       # %bb.0:
832; CHECKIFD-NEXT:    fle.d a0, fa1, fa0
833; CHECKIFD-NEXT:    fle.d a1, fa0, fa1
834; CHECKIFD-NEXT:    and a0, a1, a0
835; CHECKIFD-NEXT:    ret
836;
837; RV32IZFINXZDINX-LABEL: fcmps_oeq:
838; RV32IZFINXZDINX:       # %bb.0:
839; RV32IZFINXZDINX-NEXT:    fle.d a4, a2, a0
840; RV32IZFINXZDINX-NEXT:    fle.d a0, a0, a2
841; RV32IZFINXZDINX-NEXT:    and a0, a0, a4
842; RV32IZFINXZDINX-NEXT:    ret
843;
844; RV64IZFINXZDINX-LABEL: fcmps_oeq:
845; RV64IZFINXZDINX:       # %bb.0:
846; RV64IZFINXZDINX-NEXT:    fle.d a2, a1, a0
847; RV64IZFINXZDINX-NEXT:    fle.d a0, a0, a1
848; RV64IZFINXZDINX-NEXT:    and a0, a0, a2
849; RV64IZFINXZDINX-NEXT:    ret
850;
851; RV32I-LABEL: fcmps_oeq:
852; RV32I:       # %bb.0:
853; RV32I-NEXT:    addi sp, sp, -16
854; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
855; RV32I-NEXT:    call __eqdf2
856; RV32I-NEXT:    seqz a0, a0
857; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
858; RV32I-NEXT:    addi sp, sp, 16
859; RV32I-NEXT:    ret
860;
861; RV64I-LABEL: fcmps_oeq:
862; RV64I:       # %bb.0:
863; RV64I-NEXT:    addi sp, sp, -16
864; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
865; RV64I-NEXT:    call __eqdf2
866; RV64I-NEXT:    seqz a0, a0
867; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
868; RV64I-NEXT:    addi sp, sp, 16
869; RV64I-NEXT:    ret
870  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp
871  %2 = zext i1 %1 to i32
872  ret i32 %2
873}
874declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata)
875
876define i32 @fcmps_ogt(double %a, double %b) nounwind strictfp {
877; CHECKIFD-LABEL: fcmps_ogt:
878; CHECKIFD:       # %bb.0:
879; CHECKIFD-NEXT:    flt.d a0, fa1, fa0
880; CHECKIFD-NEXT:    ret
881;
882; RV32IZFINXZDINX-LABEL: fcmps_ogt:
883; RV32IZFINXZDINX:       # %bb.0:
884; RV32IZFINXZDINX-NEXT:    flt.d a0, a2, a0
885; RV32IZFINXZDINX-NEXT:    ret
886;
887; RV64IZFINXZDINX-LABEL: fcmps_ogt:
888; RV64IZFINXZDINX:       # %bb.0:
889; RV64IZFINXZDINX-NEXT:    flt.d a0, a1, a0
890; RV64IZFINXZDINX-NEXT:    ret
891;
892; RV32I-LABEL: fcmps_ogt:
893; RV32I:       # %bb.0:
894; RV32I-NEXT:    addi sp, sp, -16
895; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
896; RV32I-NEXT:    call __gtdf2
897; RV32I-NEXT:    sgtz a0, a0
898; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
899; RV32I-NEXT:    addi sp, sp, 16
900; RV32I-NEXT:    ret
901;
902; RV64I-LABEL: fcmps_ogt:
903; RV64I:       # %bb.0:
904; RV64I-NEXT:    addi sp, sp, -16
905; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
906; RV64I-NEXT:    call __gtdf2
907; RV64I-NEXT:    sgtz a0, a0
908; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
909; RV64I-NEXT:    addi sp, sp, 16
910; RV64I-NEXT:    ret
911  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp
912  %2 = zext i1 %1 to i32
913  ret i32 %2
914}
915
916define i32 @fcmps_oge(double %a, double %b) nounwind strictfp {
917; CHECKIFD-LABEL: fcmps_oge:
918; CHECKIFD:       # %bb.0:
919; CHECKIFD-NEXT:    fle.d a0, fa1, fa0
920; CHECKIFD-NEXT:    ret
921;
922; RV32IZFINXZDINX-LABEL: fcmps_oge:
923; RV32IZFINXZDINX:       # %bb.0:
924; RV32IZFINXZDINX-NEXT:    fle.d a0, a2, a0
925; RV32IZFINXZDINX-NEXT:    ret
926;
927; RV64IZFINXZDINX-LABEL: fcmps_oge:
928; RV64IZFINXZDINX:       # %bb.0:
929; RV64IZFINXZDINX-NEXT:    fle.d a0, a1, a0
930; RV64IZFINXZDINX-NEXT:    ret
931;
932; RV32I-LABEL: fcmps_oge:
933; RV32I:       # %bb.0:
934; RV32I-NEXT:    addi sp, sp, -16
935; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
936; RV32I-NEXT:    call __gedf2
937; RV32I-NEXT:    slti a0, a0, 0
938; RV32I-NEXT:    xori a0, a0, 1
939; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
940; RV32I-NEXT:    addi sp, sp, 16
941; RV32I-NEXT:    ret
942;
943; RV64I-LABEL: fcmps_oge:
944; RV64I:       # %bb.0:
945; RV64I-NEXT:    addi sp, sp, -16
946; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
947; RV64I-NEXT:    call __gedf2
948; RV64I-NEXT:    slti a0, a0, 0
949; RV64I-NEXT:    xori a0, a0, 1
950; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
951; RV64I-NEXT:    addi sp, sp, 16
952; RV64I-NEXT:    ret
953  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oge", metadata !"fpexcept.strict") strictfp
954  %2 = zext i1 %1 to i32
955  ret i32 %2
956}
957
958define i32 @fcmps_olt(double %a, double %b) nounwind strictfp {
959; CHECKIFD-LABEL: fcmps_olt:
960; CHECKIFD:       # %bb.0:
961; CHECKIFD-NEXT:    flt.d a0, fa0, fa1
962; CHECKIFD-NEXT:    ret
963;
964; RV32IZFINXZDINX-LABEL: fcmps_olt:
965; RV32IZFINXZDINX:       # %bb.0:
966; RV32IZFINXZDINX-NEXT:    flt.d a0, a0, a2
967; RV32IZFINXZDINX-NEXT:    ret
968;
969; RV64IZFINXZDINX-LABEL: fcmps_olt:
970; RV64IZFINXZDINX:       # %bb.0:
971; RV64IZFINXZDINX-NEXT:    flt.d a0, a0, a1
972; RV64IZFINXZDINX-NEXT:    ret
973;
974; RV32I-LABEL: fcmps_olt:
975; RV32I:       # %bb.0:
976; RV32I-NEXT:    addi sp, sp, -16
977; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
978; RV32I-NEXT:    call __ltdf2
979; RV32I-NEXT:    slti a0, a0, 0
980; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
981; RV32I-NEXT:    addi sp, sp, 16
982; RV32I-NEXT:    ret
983;
984; RV64I-LABEL: fcmps_olt:
985; RV64I:       # %bb.0:
986; RV64I-NEXT:    addi sp, sp, -16
987; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
988; RV64I-NEXT:    call __ltdf2
989; RV64I-NEXT:    slti a0, a0, 0
990; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
991; RV64I-NEXT:    addi sp, sp, 16
992; RV64I-NEXT:    ret
993  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"olt", metadata !"fpexcept.strict") strictfp
994  %2 = zext i1 %1 to i32
995  ret i32 %2
996}
997
998define i32 @fcmps_ole(double %a, double %b) nounwind strictfp {
999; CHECKIFD-LABEL: fcmps_ole:
1000; CHECKIFD:       # %bb.0:
1001; CHECKIFD-NEXT:    fle.d a0, fa0, fa1
1002; CHECKIFD-NEXT:    ret
1003;
1004; RV32IZFINXZDINX-LABEL: fcmps_ole:
1005; RV32IZFINXZDINX:       # %bb.0:
1006; RV32IZFINXZDINX-NEXT:    fle.d a0, a0, a2
1007; RV32IZFINXZDINX-NEXT:    ret
1008;
1009; RV64IZFINXZDINX-LABEL: fcmps_ole:
1010; RV64IZFINXZDINX:       # %bb.0:
1011; RV64IZFINXZDINX-NEXT:    fle.d a0, a0, a1
1012; RV64IZFINXZDINX-NEXT:    ret
1013;
1014; RV32I-LABEL: fcmps_ole:
1015; RV32I:       # %bb.0:
1016; RV32I-NEXT:    addi sp, sp, -16
1017; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1018; RV32I-NEXT:    call __ledf2
1019; RV32I-NEXT:    slti a0, a0, 1
1020; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1021; RV32I-NEXT:    addi sp, sp, 16
1022; RV32I-NEXT:    ret
1023;
1024; RV64I-LABEL: fcmps_ole:
1025; RV64I:       # %bb.0:
1026; RV64I-NEXT:    addi sp, sp, -16
1027; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1028; RV64I-NEXT:    call __ledf2
1029; RV64I-NEXT:    slti a0, a0, 1
1030; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1031; RV64I-NEXT:    addi sp, sp, 16
1032; RV64I-NEXT:    ret
1033  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ole", metadata !"fpexcept.strict") strictfp
1034  %2 = zext i1 %1 to i32
1035  ret i32 %2
1036}
1037
1038define i32 @fcmps_one(double %a, double %b) nounwind strictfp {
1039; CHECKIFD-LABEL: fcmps_one:
1040; CHECKIFD:       # %bb.0:
1041; CHECKIFD-NEXT:    flt.d a0, fa0, fa1
1042; CHECKIFD-NEXT:    flt.d a1, fa1, fa0
1043; CHECKIFD-NEXT:    or a0, a1, a0
1044; CHECKIFD-NEXT:    ret
1045;
1046; RV32IZFINXZDINX-LABEL: fcmps_one:
1047; RV32IZFINXZDINX:       # %bb.0:
1048; RV32IZFINXZDINX-NEXT:    flt.d a4, a0, a2
1049; RV32IZFINXZDINX-NEXT:    flt.d a0, a2, a0
1050; RV32IZFINXZDINX-NEXT:    or a0, a0, a4
1051; RV32IZFINXZDINX-NEXT:    ret
1052;
1053; RV64IZFINXZDINX-LABEL: fcmps_one:
1054; RV64IZFINXZDINX:       # %bb.0:
1055; RV64IZFINXZDINX-NEXT:    flt.d a2, a0, a1
1056; RV64IZFINXZDINX-NEXT:    flt.d a0, a1, a0
1057; RV64IZFINXZDINX-NEXT:    or a0, a0, a2
1058; RV64IZFINXZDINX-NEXT:    ret
1059;
1060; RV32I-LABEL: fcmps_one:
1061; RV32I:       # %bb.0:
1062; RV32I-NEXT:    addi sp, sp, -32
1063; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
1064; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
1065; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
1066; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
1067; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
1068; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
1069; RV32I-NEXT:    mv s0, a3
1070; RV32I-NEXT:    mv s1, a2
1071; RV32I-NEXT:    mv s2, a1
1072; RV32I-NEXT:    mv s3, a0
1073; RV32I-NEXT:    call __eqdf2
1074; RV32I-NEXT:    snez s4, a0
1075; RV32I-NEXT:    mv a0, s3
1076; RV32I-NEXT:    mv a1, s2
1077; RV32I-NEXT:    mv a2, s1
1078; RV32I-NEXT:    mv a3, s0
1079; RV32I-NEXT:    call __unorddf2
1080; RV32I-NEXT:    seqz a0, a0
1081; RV32I-NEXT:    and a0, a0, s4
1082; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
1083; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
1084; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
1085; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
1086; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
1087; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
1088; RV32I-NEXT:    addi sp, sp, 32
1089; RV32I-NEXT:    ret
1090;
1091; RV64I-LABEL: fcmps_one:
1092; RV64I:       # %bb.0:
1093; RV64I-NEXT:    addi sp, sp, -32
1094; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
1095; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
1096; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
1097; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
1098; RV64I-NEXT:    mv s0, a1
1099; RV64I-NEXT:    mv s1, a0
1100; RV64I-NEXT:    call __eqdf2
1101; RV64I-NEXT:    snez s2, a0
1102; RV64I-NEXT:    mv a0, s1
1103; RV64I-NEXT:    mv a1, s0
1104; RV64I-NEXT:    call __unorddf2
1105; RV64I-NEXT:    seqz a0, a0
1106; RV64I-NEXT:    and a0, a0, s2
1107; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
1108; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
1109; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
1110; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
1111; RV64I-NEXT:    addi sp, sp, 32
1112; RV64I-NEXT:    ret
1113  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"one", metadata !"fpexcept.strict") strictfp
1114  %2 = zext i1 %1 to i32
1115  ret i32 %2
1116}
1117
1118define i32 @fcmps_ord(double %a, double %b) nounwind strictfp {
1119; CHECKIFD-LABEL: fcmps_ord:
1120; CHECKIFD:       # %bb.0:
1121; CHECKIFD-NEXT:    fle.d a0, fa1, fa1
1122; CHECKIFD-NEXT:    fle.d a1, fa0, fa0
1123; CHECKIFD-NEXT:    and a0, a1, a0
1124; CHECKIFD-NEXT:    ret
1125;
1126; RV32IZFINXZDINX-LABEL: fcmps_ord:
1127; RV32IZFINXZDINX:       # %bb.0:
1128; RV32IZFINXZDINX-NEXT:    fle.d a2, a2, a2
1129; RV32IZFINXZDINX-NEXT:    fle.d a0, a0, a0
1130; RV32IZFINXZDINX-NEXT:    and a0, a0, a2
1131; RV32IZFINXZDINX-NEXT:    ret
1132;
1133; RV64IZFINXZDINX-LABEL: fcmps_ord:
1134; RV64IZFINXZDINX:       # %bb.0:
1135; RV64IZFINXZDINX-NEXT:    fle.d a1, a1, a1
1136; RV64IZFINXZDINX-NEXT:    fle.d a0, a0, a0
1137; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
1138; RV64IZFINXZDINX-NEXT:    ret
1139;
1140; RV32I-LABEL: fcmps_ord:
1141; RV32I:       # %bb.0:
1142; RV32I-NEXT:    addi sp, sp, -16
1143; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1144; RV32I-NEXT:    call __unorddf2
1145; RV32I-NEXT:    seqz a0, a0
1146; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1147; RV32I-NEXT:    addi sp, sp, 16
1148; RV32I-NEXT:    ret
1149;
1150; RV64I-LABEL: fcmps_ord:
1151; RV64I:       # %bb.0:
1152; RV64I-NEXT:    addi sp, sp, -16
1153; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1154; RV64I-NEXT:    call __unorddf2
1155; RV64I-NEXT:    seqz a0, a0
1156; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1157; RV64I-NEXT:    addi sp, sp, 16
1158; RV64I-NEXT:    ret
1159  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ord", metadata !"fpexcept.strict") strictfp
1160  %2 = zext i1 %1 to i32
1161  ret i32 %2
1162}
1163
1164define i32 @fcmps_ueq(double %a, double %b) nounwind strictfp {
1165; CHECKIFD-LABEL: fcmps_ueq:
1166; CHECKIFD:       # %bb.0:
1167; CHECKIFD-NEXT:    flt.d a0, fa0, fa1
1168; CHECKIFD-NEXT:    flt.d a1, fa1, fa0
1169; CHECKIFD-NEXT:    or a0, a1, a0
1170; CHECKIFD-NEXT:    xori a0, a0, 1
1171; CHECKIFD-NEXT:    ret
1172;
1173; RV32IZFINXZDINX-LABEL: fcmps_ueq:
1174; RV32IZFINXZDINX:       # %bb.0:
1175; RV32IZFINXZDINX-NEXT:    flt.d a4, a0, a2
1176; RV32IZFINXZDINX-NEXT:    flt.d a0, a2, a0
1177; RV32IZFINXZDINX-NEXT:    or a0, a0, a4
1178; RV32IZFINXZDINX-NEXT:    xori a0, a0, 1
1179; RV32IZFINXZDINX-NEXT:    ret
1180;
1181; RV64IZFINXZDINX-LABEL: fcmps_ueq:
1182; RV64IZFINXZDINX:       # %bb.0:
1183; RV64IZFINXZDINX-NEXT:    flt.d a2, a0, a1
1184; RV64IZFINXZDINX-NEXT:    flt.d a0, a1, a0
1185; RV64IZFINXZDINX-NEXT:    or a0, a0, a2
1186; RV64IZFINXZDINX-NEXT:    xori a0, a0, 1
1187; RV64IZFINXZDINX-NEXT:    ret
1188;
1189; RV32I-LABEL: fcmps_ueq:
1190; RV32I:       # %bb.0:
1191; RV32I-NEXT:    addi sp, sp, -32
1192; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
1193; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
1194; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
1195; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
1196; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
1197; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
1198; RV32I-NEXT:    mv s0, a3
1199; RV32I-NEXT:    mv s1, a2
1200; RV32I-NEXT:    mv s2, a1
1201; RV32I-NEXT:    mv s3, a0
1202; RV32I-NEXT:    call __eqdf2
1203; RV32I-NEXT:    seqz s4, a0
1204; RV32I-NEXT:    mv a0, s3
1205; RV32I-NEXT:    mv a1, s2
1206; RV32I-NEXT:    mv a2, s1
1207; RV32I-NEXT:    mv a3, s0
1208; RV32I-NEXT:    call __unorddf2
1209; RV32I-NEXT:    snez a0, a0
1210; RV32I-NEXT:    or a0, a0, s4
1211; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
1212; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
1213; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
1214; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
1215; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
1216; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
1217; RV32I-NEXT:    addi sp, sp, 32
1218; RV32I-NEXT:    ret
1219;
1220; RV64I-LABEL: fcmps_ueq:
1221; RV64I:       # %bb.0:
1222; RV64I-NEXT:    addi sp, sp, -32
1223; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
1224; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
1225; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
1226; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
1227; RV64I-NEXT:    mv s0, a1
1228; RV64I-NEXT:    mv s1, a0
1229; RV64I-NEXT:    call __eqdf2
1230; RV64I-NEXT:    seqz s2, a0
1231; RV64I-NEXT:    mv a0, s1
1232; RV64I-NEXT:    mv a1, s0
1233; RV64I-NEXT:    call __unorddf2
1234; RV64I-NEXT:    snez a0, a0
1235; RV64I-NEXT:    or a0, a0, s2
1236; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
1237; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
1238; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
1239; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
1240; RV64I-NEXT:    addi sp, sp, 32
1241; RV64I-NEXT:    ret
1242  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp
1243  %2 = zext i1 %1 to i32
1244  ret i32 %2
1245}
1246
1247define i32 @fcmps_ugt(double %a, double %b) nounwind strictfp {
1248; CHECKIFD-LABEL: fcmps_ugt:
1249; CHECKIFD:       # %bb.0:
1250; CHECKIFD-NEXT:    fle.d a0, fa0, fa1
1251; CHECKIFD-NEXT:    xori a0, a0, 1
1252; CHECKIFD-NEXT:    ret
1253;
1254; RV32IZFINXZDINX-LABEL: fcmps_ugt:
1255; RV32IZFINXZDINX:       # %bb.0:
1256; RV32IZFINXZDINX-NEXT:    fle.d a0, a0, a2
1257; RV32IZFINXZDINX-NEXT:    xori a0, a0, 1
1258; RV32IZFINXZDINX-NEXT:    ret
1259;
1260; RV64IZFINXZDINX-LABEL: fcmps_ugt:
1261; RV64IZFINXZDINX:       # %bb.0:
1262; RV64IZFINXZDINX-NEXT:    fle.d a0, a0, a1
1263; RV64IZFINXZDINX-NEXT:    xori a0, a0, 1
1264; RV64IZFINXZDINX-NEXT:    ret
1265;
1266; RV32I-LABEL: fcmps_ugt:
1267; RV32I:       # %bb.0:
1268; RV32I-NEXT:    addi sp, sp, -16
1269; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1270; RV32I-NEXT:    call __ledf2
1271; RV32I-NEXT:    sgtz a0, a0
1272; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1273; RV32I-NEXT:    addi sp, sp, 16
1274; RV32I-NEXT:    ret
1275;
1276; RV64I-LABEL: fcmps_ugt:
1277; RV64I:       # %bb.0:
1278; RV64I-NEXT:    addi sp, sp, -16
1279; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1280; RV64I-NEXT:    call __ledf2
1281; RV64I-NEXT:    sgtz a0, a0
1282; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1283; RV64I-NEXT:    addi sp, sp, 16
1284; RV64I-NEXT:    ret
1285  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp
1286  %2 = zext i1 %1 to i32
1287  ret i32 %2
1288}
1289
1290define i32 @fcmps_uge(double %a, double %b) nounwind strictfp {
1291; CHECKIFD-LABEL: fcmps_uge:
1292; CHECKIFD:       # %bb.0:
1293; CHECKIFD-NEXT:    flt.d a0, fa0, fa1
1294; CHECKIFD-NEXT:    xori a0, a0, 1
1295; CHECKIFD-NEXT:    ret
1296;
1297; RV32IZFINXZDINX-LABEL: fcmps_uge:
1298; RV32IZFINXZDINX:       # %bb.0:
1299; RV32IZFINXZDINX-NEXT:    flt.d a0, a0, a2
1300; RV32IZFINXZDINX-NEXT:    xori a0, a0, 1
1301; RV32IZFINXZDINX-NEXT:    ret
1302;
1303; RV64IZFINXZDINX-LABEL: fcmps_uge:
1304; RV64IZFINXZDINX:       # %bb.0:
1305; RV64IZFINXZDINX-NEXT:    flt.d a0, a0, a1
1306; RV64IZFINXZDINX-NEXT:    xori a0, a0, 1
1307; RV64IZFINXZDINX-NEXT:    ret
1308;
1309; RV32I-LABEL: fcmps_uge:
1310; RV32I:       # %bb.0:
1311; RV32I-NEXT:    addi sp, sp, -16
1312; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1313; RV32I-NEXT:    call __ltdf2
1314; RV32I-NEXT:    slti a0, a0, 0
1315; RV32I-NEXT:    xori a0, a0, 1
1316; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1317; RV32I-NEXT:    addi sp, sp, 16
1318; RV32I-NEXT:    ret
1319;
1320; RV64I-LABEL: fcmps_uge:
1321; RV64I:       # %bb.0:
1322; RV64I-NEXT:    addi sp, sp, -16
1323; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1324; RV64I-NEXT:    call __ltdf2
1325; RV64I-NEXT:    slti a0, a0, 0
1326; RV64I-NEXT:    xori a0, a0, 1
1327; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1328; RV64I-NEXT:    addi sp, sp, 16
1329; RV64I-NEXT:    ret
1330  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"uge", metadata !"fpexcept.strict") strictfp
1331  %2 = zext i1 %1 to i32
1332  ret i32 %2
1333}
1334
1335define i32 @fcmps_ult(double %a, double %b) nounwind strictfp {
1336; CHECKIFD-LABEL: fcmps_ult:
1337; CHECKIFD:       # %bb.0:
1338; CHECKIFD-NEXT:    fle.d a0, fa1, fa0
1339; CHECKIFD-NEXT:    xori a0, a0, 1
1340; CHECKIFD-NEXT:    ret
1341;
1342; RV32IZFINXZDINX-LABEL: fcmps_ult:
1343; RV32IZFINXZDINX:       # %bb.0:
1344; RV32IZFINXZDINX-NEXT:    fle.d a0, a2, a0
1345; RV32IZFINXZDINX-NEXT:    xori a0, a0, 1
1346; RV32IZFINXZDINX-NEXT:    ret
1347;
1348; RV64IZFINXZDINX-LABEL: fcmps_ult:
1349; RV64IZFINXZDINX:       # %bb.0:
1350; RV64IZFINXZDINX-NEXT:    fle.d a0, a1, a0
1351; RV64IZFINXZDINX-NEXT:    xori a0, a0, 1
1352; RV64IZFINXZDINX-NEXT:    ret
1353;
1354; RV32I-LABEL: fcmps_ult:
1355; RV32I:       # %bb.0:
1356; RV32I-NEXT:    addi sp, sp, -16
1357; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1358; RV32I-NEXT:    call __gedf2
1359; RV32I-NEXT:    slti a0, a0, 0
1360; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1361; RV32I-NEXT:    addi sp, sp, 16
1362; RV32I-NEXT:    ret
1363;
1364; RV64I-LABEL: fcmps_ult:
1365; RV64I:       # %bb.0:
1366; RV64I-NEXT:    addi sp, sp, -16
1367; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1368; RV64I-NEXT:    call __gedf2
1369; RV64I-NEXT:    slti a0, a0, 0
1370; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1371; RV64I-NEXT:    addi sp, sp, 16
1372; RV64I-NEXT:    ret
1373  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ult", metadata !"fpexcept.strict") strictfp
1374  %2 = zext i1 %1 to i32
1375  ret i32 %2
1376}
1377
1378define i32 @fcmps_ule(double %a, double %b) nounwind strictfp {
1379; CHECKIFD-LABEL: fcmps_ule:
1380; CHECKIFD:       # %bb.0:
1381; CHECKIFD-NEXT:    flt.d a0, fa1, fa0
1382; CHECKIFD-NEXT:    xori a0, a0, 1
1383; CHECKIFD-NEXT:    ret
1384;
1385; RV32IZFINXZDINX-LABEL: fcmps_ule:
1386; RV32IZFINXZDINX:       # %bb.0:
1387; RV32IZFINXZDINX-NEXT:    flt.d a0, a2, a0
1388; RV32IZFINXZDINX-NEXT:    xori a0, a0, 1
1389; RV32IZFINXZDINX-NEXT:    ret
1390;
1391; RV64IZFINXZDINX-LABEL: fcmps_ule:
1392; RV64IZFINXZDINX:       # %bb.0:
1393; RV64IZFINXZDINX-NEXT:    flt.d a0, a1, a0
1394; RV64IZFINXZDINX-NEXT:    xori a0, a0, 1
1395; RV64IZFINXZDINX-NEXT:    ret
1396;
1397; RV32I-LABEL: fcmps_ule:
1398; RV32I:       # %bb.0:
1399; RV32I-NEXT:    addi sp, sp, -16
1400; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1401; RV32I-NEXT:    call __gtdf2
1402; RV32I-NEXT:    slti a0, a0, 1
1403; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1404; RV32I-NEXT:    addi sp, sp, 16
1405; RV32I-NEXT:    ret
1406;
1407; RV64I-LABEL: fcmps_ule:
1408; RV64I:       # %bb.0:
1409; RV64I-NEXT:    addi sp, sp, -16
1410; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1411; RV64I-NEXT:    call __gtdf2
1412; RV64I-NEXT:    slti a0, a0, 1
1413; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1414; RV64I-NEXT:    addi sp, sp, 16
1415; RV64I-NEXT:    ret
1416  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ule", metadata !"fpexcept.strict") strictfp
1417  %2 = zext i1 %1 to i32
1418  ret i32 %2
1419}
1420
1421define i32 @fcmps_une(double %a, double %b) nounwind strictfp {
1422; CHECKIFD-LABEL: fcmps_une:
1423; CHECKIFD:       # %bb.0:
1424; CHECKIFD-NEXT:    fle.d a0, fa1, fa0
1425; CHECKIFD-NEXT:    fle.d a1, fa0, fa1
1426; CHECKIFD-NEXT:    and a0, a1, a0
1427; CHECKIFD-NEXT:    xori a0, a0, 1
1428; CHECKIFD-NEXT:    ret
1429;
1430; RV32IZFINXZDINX-LABEL: fcmps_une:
1431; RV32IZFINXZDINX:       # %bb.0:
1432; RV32IZFINXZDINX-NEXT:    fle.d a4, a2, a0
1433; RV32IZFINXZDINX-NEXT:    fle.d a0, a0, a2
1434; RV32IZFINXZDINX-NEXT:    and a0, a0, a4
1435; RV32IZFINXZDINX-NEXT:    xori a0, a0, 1
1436; RV32IZFINXZDINX-NEXT:    ret
1437;
1438; RV64IZFINXZDINX-LABEL: fcmps_une:
1439; RV64IZFINXZDINX:       # %bb.0:
1440; RV64IZFINXZDINX-NEXT:    fle.d a2, a1, a0
1441; RV64IZFINXZDINX-NEXT:    fle.d a0, a0, a1
1442; RV64IZFINXZDINX-NEXT:    and a0, a0, a2
1443; RV64IZFINXZDINX-NEXT:    xori a0, a0, 1
1444; RV64IZFINXZDINX-NEXT:    ret
1445;
1446; RV32I-LABEL: fcmps_une:
1447; RV32I:       # %bb.0:
1448; RV32I-NEXT:    addi sp, sp, -16
1449; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1450; RV32I-NEXT:    call __nedf2
1451; RV32I-NEXT:    snez a0, a0
1452; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1453; RV32I-NEXT:    addi sp, sp, 16
1454; RV32I-NEXT:    ret
1455;
1456; RV64I-LABEL: fcmps_une:
1457; RV64I:       # %bb.0:
1458; RV64I-NEXT:    addi sp, sp, -16
1459; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1460; RV64I-NEXT:    call __nedf2
1461; RV64I-NEXT:    snez a0, a0
1462; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1463; RV64I-NEXT:    addi sp, sp, 16
1464; RV64I-NEXT:    ret
1465  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"une", metadata !"fpexcept.strict") strictfp
1466  %2 = zext i1 %1 to i32
1467  ret i32 %2
1468}
1469
1470define i32 @fcmps_uno(double %a, double %b) nounwind strictfp {
1471; CHECKIFD-LABEL: fcmps_uno:
1472; CHECKIFD:       # %bb.0:
1473; CHECKIFD-NEXT:    fle.d a0, fa1, fa1
1474; CHECKIFD-NEXT:    fle.d a1, fa0, fa0
1475; CHECKIFD-NEXT:    and a0, a1, a0
1476; CHECKIFD-NEXT:    xori a0, a0, 1
1477; CHECKIFD-NEXT:    ret
1478;
1479; RV32IZFINXZDINX-LABEL: fcmps_uno:
1480; RV32IZFINXZDINX:       # %bb.0:
1481; RV32IZFINXZDINX-NEXT:    fle.d a2, a2, a2
1482; RV32IZFINXZDINX-NEXT:    fle.d a0, a0, a0
1483; RV32IZFINXZDINX-NEXT:    and a0, a0, a2
1484; RV32IZFINXZDINX-NEXT:    xori a0, a0, 1
1485; RV32IZFINXZDINX-NEXT:    ret
1486;
1487; RV64IZFINXZDINX-LABEL: fcmps_uno:
1488; RV64IZFINXZDINX:       # %bb.0:
1489; RV64IZFINXZDINX-NEXT:    fle.d a1, a1, a1
1490; RV64IZFINXZDINX-NEXT:    fle.d a0, a0, a0
1491; RV64IZFINXZDINX-NEXT:    and a0, a0, a1
1492; RV64IZFINXZDINX-NEXT:    xori a0, a0, 1
1493; RV64IZFINXZDINX-NEXT:    ret
1494;
1495; RV32I-LABEL: fcmps_uno:
1496; RV32I:       # %bb.0:
1497; RV32I-NEXT:    addi sp, sp, -16
1498; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1499; RV32I-NEXT:    call __unorddf2
1500; RV32I-NEXT:    snez a0, a0
1501; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1502; RV32I-NEXT:    addi sp, sp, 16
1503; RV32I-NEXT:    ret
1504;
1505; RV64I-LABEL: fcmps_uno:
1506; RV64I:       # %bb.0:
1507; RV64I-NEXT:    addi sp, sp, -16
1508; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1509; RV64I-NEXT:    call __unorddf2
1510; RV64I-NEXT:    snez a0, a0
1511; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1512; RV64I-NEXT:    addi sp, sp, 16
1513; RV64I-NEXT:    ret
1514  %1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"uno", metadata !"fpexcept.strict") strictfp
1515  %2 = zext i1 %1 to i32
1516  ret i32 %2
1517}
1518