xref: /llvm-project/llvm/test/CodeGen/RISCV/rv64i-double-softfloat.ll (revision eabaee0c59110d0e11b33a69db54ccda526b35fd)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
3; RUN:   | FileCheck -check-prefix=RV64I %s
4; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi lp64f -verify-machineinstrs < %s \
5; RUN:   | FileCheck -check-prefix=RV64IF %s
6
7; The test cases check that we use the si versions of the conversions from
8; double.
9
10declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
11declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
12
13define i32 @strict_fp64_to_ui32(double %a) nounwind strictfp {
14; RV64I-LABEL: strict_fp64_to_ui32:
15; RV64I:       # %bb.0: # %entry
16; RV64I-NEXT:    addi sp, sp, -16
17; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
18; RV64I-NEXT:    call __fixunsdfsi
19; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
20; RV64I-NEXT:    addi sp, sp, 16
21; RV64I-NEXT:    ret
22;
23; RV64IF-LABEL: strict_fp64_to_ui32:
24; RV64IF:       # %bb.0: # %entry
25; RV64IF-NEXT:    addi sp, sp, -16
26; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
27; RV64IF-NEXT:    call __fixunsdfsi
28; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
29; RV64IF-NEXT:    addi sp, sp, 16
30; RV64IF-NEXT:    ret
31entry:
32  %conv = tail call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.strict")
33  ret i32 %conv
34}
35
36define i32 @strict_fp64_to_si32(double %a) nounwind strictfp {
37; RV64I-LABEL: strict_fp64_to_si32:
38; RV64I:       # %bb.0: # %entry
39; RV64I-NEXT:    addi sp, sp, -16
40; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
41; RV64I-NEXT:    call __fixdfsi
42; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
43; RV64I-NEXT:    addi sp, sp, 16
44; RV64I-NEXT:    ret
45;
46; RV64IF-LABEL: strict_fp64_to_si32:
47; RV64IF:       # %bb.0: # %entry
48; RV64IF-NEXT:    addi sp, sp, -16
49; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
50; RV64IF-NEXT:    call __fixdfsi
51; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
52; RV64IF-NEXT:    addi sp, sp, 16
53; RV64IF-NEXT:    ret
54entry:
55  %conv = tail call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.strict")
56  ret i32 %conv
57}
58