xref: /llvm-project/llvm/test/CodeGen/RISCV/rv64-half-convert-strict.ll (revision 73186546f0c0209c65c4b4ef1379a4832545b871)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -verify-machineinstrs \
3; RUN:   -target-abi lp64 -disable-strictnode-mutation < %s | \
4; RUN:   FileCheck %s -check-prefixes=CHECK,RV64I
5; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs \
6; RUN:   -target-abi lp64f -disable-strictnode-mutation < %s | \
7; RUN:   FileCheck %s -check-prefixes=CHECK,RV64IZFH
8; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs \
9; RUN:   -target-abi lp64 -disable-strictnode-mutation < %s | \
10; RUN:   FileCheck %s -check-prefixes=CHECK,RV64IZHINX
11
12define i128 @fptosi_f16_to_i128(half %a) nounwind strictfp {
13; RV64I-LABEL: fptosi_f16_to_i128:
14; RV64I:       # %bb.0:
15; RV64I-NEXT:    addi sp, sp, -16
16; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
17; RV64I-NEXT:    call __extendhfsf2
18; RV64I-NEXT:    call __fixsfti
19; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
20; RV64I-NEXT:    addi sp, sp, 16
21; RV64I-NEXT:    ret
22;
23; RV64IZFH-LABEL: fptosi_f16_to_i128:
24; RV64IZFH:       # %bb.0:
25; RV64IZFH-NEXT:    addi sp, sp, -16
26; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
27; RV64IZFH-NEXT:    call __fixhfti
28; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
29; RV64IZFH-NEXT:    addi sp, sp, 16
30; RV64IZFH-NEXT:    ret
31;
32; RV64IZHINX-LABEL: fptosi_f16_to_i128:
33; RV64IZHINX:       # %bb.0:
34; RV64IZHINX-NEXT:    addi sp, sp, -16
35; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
36; RV64IZHINX-NEXT:    call __fixhfti
37; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
38; RV64IZHINX-NEXT:    addi sp, sp, 16
39; RV64IZHINX-NEXT:    ret
40  %1 = call i128 @llvm.experimental.constrained.fptosi.i128.f16(half %a, metadata !"fpexcept.strict")
41  ret i128 %1
42}
43
44define i128 @fptoui_f16_to_i128(half %a) nounwind strictfp {
45; RV64I-LABEL: fptoui_f16_to_i128:
46; RV64I:       # %bb.0:
47; RV64I-NEXT:    addi sp, sp, -16
48; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
49; RV64I-NEXT:    call __extendhfsf2
50; RV64I-NEXT:    call __fixunssfti
51; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
52; RV64I-NEXT:    addi sp, sp, 16
53; RV64I-NEXT:    ret
54;
55; RV64IZFH-LABEL: fptoui_f16_to_i128:
56; RV64IZFH:       # %bb.0:
57; RV64IZFH-NEXT:    addi sp, sp, -16
58; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
59; RV64IZFH-NEXT:    call __fixunshfti
60; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
61; RV64IZFH-NEXT:    addi sp, sp, 16
62; RV64IZFH-NEXT:    ret
63;
64; RV64IZHINX-LABEL: fptoui_f16_to_i128:
65; RV64IZHINX:       # %bb.0:
66; RV64IZHINX-NEXT:    addi sp, sp, -16
67; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
68; RV64IZHINX-NEXT:    call __fixunshfti
69; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
70; RV64IZHINX-NEXT:    addi sp, sp, 16
71; RV64IZHINX-NEXT:    ret
72  %1 = call i128 @llvm.experimental.constrained.fptoui.i128.f16(half %a, metadata !"fpexcept.strict")
73  ret i128 %1
74}
75
76define half @sitofp_i128_to_f16(i128 %a) nounwind strictfp {
77; RV64I-LABEL: sitofp_i128_to_f16:
78; RV64I:       # %bb.0:
79; RV64I-NEXT:    addi sp, sp, -16
80; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
81; RV64I-NEXT:    call __floattisf
82; RV64I-NEXT:    call __truncsfhf2
83; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
84; RV64I-NEXT:    addi sp, sp, 16
85; RV64I-NEXT:    ret
86;
87; RV64IZFH-LABEL: sitofp_i128_to_f16:
88; RV64IZFH:       # %bb.0:
89; RV64IZFH-NEXT:    addi sp, sp, -16
90; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
91; RV64IZFH-NEXT:    call __floattihf
92; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
93; RV64IZFH-NEXT:    addi sp, sp, 16
94; RV64IZFH-NEXT:    ret
95;
96; RV64IZHINX-LABEL: sitofp_i128_to_f16:
97; RV64IZHINX:       # %bb.0:
98; RV64IZHINX-NEXT:    addi sp, sp, -16
99; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
100; RV64IZHINX-NEXT:    call __floattihf
101; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
102; RV64IZHINX-NEXT:    addi sp, sp, 16
103; RV64IZHINX-NEXT:    ret
104  %1 = call half @llvm.experimental.constrained.sitofp.f16.i128(i128 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
105  ret half %1
106}
107
108define half @uitofp_i128_to_f16(i128 %a) nounwind strictfp {
109; RV64I-LABEL: uitofp_i128_to_f16:
110; RV64I:       # %bb.0:
111; RV64I-NEXT:    addi sp, sp, -16
112; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
113; RV64I-NEXT:    call __floatuntisf
114; RV64I-NEXT:    call __truncsfhf2
115; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
116; RV64I-NEXT:    addi sp, sp, 16
117; RV64I-NEXT:    ret
118;
119; RV64IZFH-LABEL: uitofp_i128_to_f16:
120; RV64IZFH:       # %bb.0:
121; RV64IZFH-NEXT:    addi sp, sp, -16
122; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
123; RV64IZFH-NEXT:    call __floatuntihf
124; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
125; RV64IZFH-NEXT:    addi sp, sp, 16
126; RV64IZFH-NEXT:    ret
127;
128; RV64IZHINX-LABEL: uitofp_i128_to_f16:
129; RV64IZHINX:       # %bb.0:
130; RV64IZHINX-NEXT:    addi sp, sp, -16
131; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
132; RV64IZHINX-NEXT:    call __floatuntihf
133; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
134; RV64IZHINX-NEXT:    addi sp, sp, 16
135; RV64IZHINX-NEXT:    ret
136  %1 = call half @llvm.experimental.constrained.uitofp.f16.i128(i128 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
137  ret half %1
138}
139;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
140; CHECK: {{.*}}
141