xref: /llvm-project/llvm/test/CodeGen/RISCV/rv64-half-convert-strict.ll (revision 73186546f0c0209c65c4b4ef1379a4832545b871)
1*768b0b4eSCraig Topper; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2*768b0b4eSCraig Topper; RUN: llc -mtriple=riscv64 -verify-machineinstrs \
3*768b0b4eSCraig Topper; RUN:   -target-abi lp64 -disable-strictnode-mutation < %s | \
4*768b0b4eSCraig Topper; RUN:   FileCheck %s -check-prefixes=CHECK,RV64I
5*768b0b4eSCraig Topper; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs \
6*768b0b4eSCraig Topper; RUN:   -target-abi lp64f -disable-strictnode-mutation < %s | \
7*768b0b4eSCraig Topper; RUN:   FileCheck %s -check-prefixes=CHECK,RV64IZFH
8*768b0b4eSCraig Topper; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs \
9*768b0b4eSCraig Topper; RUN:   -target-abi lp64 -disable-strictnode-mutation < %s | \
10*768b0b4eSCraig Topper; RUN:   FileCheck %s -check-prefixes=CHECK,RV64IZHINX
11*768b0b4eSCraig Topper
12*768b0b4eSCraig Topperdefine i128 @fptosi_f16_to_i128(half %a) nounwind strictfp {
13*768b0b4eSCraig Topper; RV64I-LABEL: fptosi_f16_to_i128:
14*768b0b4eSCraig Topper; RV64I:       # %bb.0:
15*768b0b4eSCraig Topper; RV64I-NEXT:    addi sp, sp, -16
16*768b0b4eSCraig Topper; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
17*768b0b4eSCraig Topper; RV64I-NEXT:    call __extendhfsf2
18*768b0b4eSCraig Topper; RV64I-NEXT:    call __fixsfti
19*768b0b4eSCraig Topper; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
20*768b0b4eSCraig Topper; RV64I-NEXT:    addi sp, sp, 16
21*768b0b4eSCraig Topper; RV64I-NEXT:    ret
22*768b0b4eSCraig Topper;
23*768b0b4eSCraig Topper; RV64IZFH-LABEL: fptosi_f16_to_i128:
24*768b0b4eSCraig Topper; RV64IZFH:       # %bb.0:
25*768b0b4eSCraig Topper; RV64IZFH-NEXT:    addi sp, sp, -16
26*768b0b4eSCraig Topper; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
27*768b0b4eSCraig Topper; RV64IZFH-NEXT:    call __fixhfti
28*768b0b4eSCraig Topper; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
29*768b0b4eSCraig Topper; RV64IZFH-NEXT:    addi sp, sp, 16
30*768b0b4eSCraig Topper; RV64IZFH-NEXT:    ret
31*768b0b4eSCraig Topper;
32*768b0b4eSCraig Topper; RV64IZHINX-LABEL: fptosi_f16_to_i128:
33*768b0b4eSCraig Topper; RV64IZHINX:       # %bb.0:
34*768b0b4eSCraig Topper; RV64IZHINX-NEXT:    addi sp, sp, -16
35*768b0b4eSCraig Topper; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
36*768b0b4eSCraig Topper; RV64IZHINX-NEXT:    call __fixhfti
37*768b0b4eSCraig Topper; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
38*768b0b4eSCraig Topper; RV64IZHINX-NEXT:    addi sp, sp, 16
39*768b0b4eSCraig Topper; RV64IZHINX-NEXT:    ret
40*768b0b4eSCraig Topper  %1 = call i128 @llvm.experimental.constrained.fptosi.i128.f16(half %a, metadata !"fpexcept.strict")
41*768b0b4eSCraig Topper  ret i128 %1
42*768b0b4eSCraig Topper}
43*768b0b4eSCraig Topper
44*768b0b4eSCraig Topperdefine i128 @fptoui_f16_to_i128(half %a) nounwind strictfp {
45*768b0b4eSCraig Topper; RV64I-LABEL: fptoui_f16_to_i128:
46*768b0b4eSCraig Topper; RV64I:       # %bb.0:
47*768b0b4eSCraig Topper; RV64I-NEXT:    addi sp, sp, -16
48*768b0b4eSCraig Topper; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
49*768b0b4eSCraig Topper; RV64I-NEXT:    call __extendhfsf2
50*768b0b4eSCraig Topper; RV64I-NEXT:    call __fixunssfti
51*768b0b4eSCraig Topper; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
52*768b0b4eSCraig Topper; RV64I-NEXT:    addi sp, sp, 16
53*768b0b4eSCraig Topper; RV64I-NEXT:    ret
54*768b0b4eSCraig Topper;
55*768b0b4eSCraig Topper; RV64IZFH-LABEL: fptoui_f16_to_i128:
56*768b0b4eSCraig Topper; RV64IZFH:       # %bb.0:
57*768b0b4eSCraig Topper; RV64IZFH-NEXT:    addi sp, sp, -16
58*768b0b4eSCraig Topper; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
59*768b0b4eSCraig Topper; RV64IZFH-NEXT:    call __fixunshfti
60*768b0b4eSCraig Topper; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
61*768b0b4eSCraig Topper; RV64IZFH-NEXT:    addi sp, sp, 16
62*768b0b4eSCraig Topper; RV64IZFH-NEXT:    ret
63*768b0b4eSCraig Topper;
64*768b0b4eSCraig Topper; RV64IZHINX-LABEL: fptoui_f16_to_i128:
65*768b0b4eSCraig Topper; RV64IZHINX:       # %bb.0:
66*768b0b4eSCraig Topper; RV64IZHINX-NEXT:    addi sp, sp, -16
67*768b0b4eSCraig Topper; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
68*768b0b4eSCraig Topper; RV64IZHINX-NEXT:    call __fixunshfti
69*768b0b4eSCraig Topper; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
70*768b0b4eSCraig Topper; RV64IZHINX-NEXT:    addi sp, sp, 16
71*768b0b4eSCraig Topper; RV64IZHINX-NEXT:    ret
72*768b0b4eSCraig Topper  %1 = call i128 @llvm.experimental.constrained.fptoui.i128.f16(half %a, metadata !"fpexcept.strict")
73*768b0b4eSCraig Topper  ret i128 %1
74*768b0b4eSCraig Topper}
75*768b0b4eSCraig Topper
76*768b0b4eSCraig Topperdefine half @sitofp_i128_to_f16(i128 %a) nounwind strictfp {
77*768b0b4eSCraig Topper; RV64I-LABEL: sitofp_i128_to_f16:
78*768b0b4eSCraig Topper; RV64I:       # %bb.0:
79*768b0b4eSCraig Topper; RV64I-NEXT:    addi sp, sp, -16
80*768b0b4eSCraig Topper; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
81*768b0b4eSCraig Topper; RV64I-NEXT:    call __floattisf
82*768b0b4eSCraig Topper; RV64I-NEXT:    call __truncsfhf2
83*768b0b4eSCraig Topper; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
84*768b0b4eSCraig Topper; RV64I-NEXT:    addi sp, sp, 16
85*768b0b4eSCraig Topper; RV64I-NEXT:    ret
86*768b0b4eSCraig Topper;
87*768b0b4eSCraig Topper; RV64IZFH-LABEL: sitofp_i128_to_f16:
88*768b0b4eSCraig Topper; RV64IZFH:       # %bb.0:
89*768b0b4eSCraig Topper; RV64IZFH-NEXT:    addi sp, sp, -16
90*768b0b4eSCraig Topper; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
91*768b0b4eSCraig Topper; RV64IZFH-NEXT:    call __floattihf
92*768b0b4eSCraig Topper; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
93*768b0b4eSCraig Topper; RV64IZFH-NEXT:    addi sp, sp, 16
94*768b0b4eSCraig Topper; RV64IZFH-NEXT:    ret
95*768b0b4eSCraig Topper;
96*768b0b4eSCraig Topper; RV64IZHINX-LABEL: sitofp_i128_to_f16:
97*768b0b4eSCraig Topper; RV64IZHINX:       # %bb.0:
98*768b0b4eSCraig Topper; RV64IZHINX-NEXT:    addi sp, sp, -16
99*768b0b4eSCraig Topper; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
100*768b0b4eSCraig Topper; RV64IZHINX-NEXT:    call __floattihf
101*768b0b4eSCraig Topper; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
102*768b0b4eSCraig Topper; RV64IZHINX-NEXT:    addi sp, sp, 16
103*768b0b4eSCraig Topper; RV64IZHINX-NEXT:    ret
104*768b0b4eSCraig Topper  %1 = call half @llvm.experimental.constrained.sitofp.f16.i128(i128 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
105*768b0b4eSCraig Topper  ret half %1
106*768b0b4eSCraig Topper}
107*768b0b4eSCraig Topper
108*768b0b4eSCraig Topperdefine half @uitofp_i128_to_f16(i128 %a) nounwind strictfp {
109*768b0b4eSCraig Topper; RV64I-LABEL: uitofp_i128_to_f16:
110*768b0b4eSCraig Topper; RV64I:       # %bb.0:
111*768b0b4eSCraig Topper; RV64I-NEXT:    addi sp, sp, -16
112*768b0b4eSCraig Topper; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
113*768b0b4eSCraig Topper; RV64I-NEXT:    call __floatuntisf
114*768b0b4eSCraig Topper; RV64I-NEXT:    call __truncsfhf2
115*768b0b4eSCraig Topper; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
116*768b0b4eSCraig Topper; RV64I-NEXT:    addi sp, sp, 16
117*768b0b4eSCraig Topper; RV64I-NEXT:    ret
118*768b0b4eSCraig Topper;
119*768b0b4eSCraig Topper; RV64IZFH-LABEL: uitofp_i128_to_f16:
120*768b0b4eSCraig Topper; RV64IZFH:       # %bb.0:
121*768b0b4eSCraig Topper; RV64IZFH-NEXT:    addi sp, sp, -16
122*768b0b4eSCraig Topper; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
123*768b0b4eSCraig Topper; RV64IZFH-NEXT:    call __floatuntihf
124*768b0b4eSCraig Topper; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
125*768b0b4eSCraig Topper; RV64IZFH-NEXT:    addi sp, sp, 16
126*768b0b4eSCraig Topper; RV64IZFH-NEXT:    ret
127*768b0b4eSCraig Topper;
128*768b0b4eSCraig Topper; RV64IZHINX-LABEL: uitofp_i128_to_f16:
129*768b0b4eSCraig Topper; RV64IZHINX:       # %bb.0:
130*768b0b4eSCraig Topper; RV64IZHINX-NEXT:    addi sp, sp, -16
131*768b0b4eSCraig Topper; RV64IZHINX-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
132*768b0b4eSCraig Topper; RV64IZHINX-NEXT:    call __floatuntihf
133*768b0b4eSCraig Topper; RV64IZHINX-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
134*768b0b4eSCraig Topper; RV64IZHINX-NEXT:    addi sp, sp, 16
135*768b0b4eSCraig Topper; RV64IZHINX-NEXT:    ret
136*768b0b4eSCraig Topper  %1 = call half @llvm.experimental.constrained.uitofp.f16.i128(i128 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
137*768b0b4eSCraig Topper  ret half %1
138*768b0b4eSCraig Topper}
139*768b0b4eSCraig Topper;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
140*768b0b4eSCraig Topper; CHECK: {{.*}}
141