xref: /llvm-project/llvm/test/CodeGen/LoongArch/soft-fp-to-int.ll (revision 1897bf61f0bc85c8637997d0f2aa7d94d375d787)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
3; RUN: llc --mtriple=loongarch64 --mattr=-f < %s | FileCheck %s --check-prefix=LA64
4
5define i32 @fptosi_i32_fp128(fp128 %X) nounwind {
6; LA32-LABEL: fptosi_i32_fp128:
7; LA32:       # %bb.0:
8; LA32-NEXT:    addi.w $sp, $sp, -32
9; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
10; LA32-NEXT:    ld.w $a1, $a0, 0
11; LA32-NEXT:    ld.w $a2, $a0, 4
12; LA32-NEXT:    ld.w $a3, $a0, 8
13; LA32-NEXT:    ld.w $a0, $a0, 12
14; LA32-NEXT:    st.w $a0, $sp, 20
15; LA32-NEXT:    st.w $a3, $sp, 16
16; LA32-NEXT:    st.w $a2, $sp, 12
17; LA32-NEXT:    addi.w $a0, $sp, 8
18; LA32-NEXT:    st.w $a1, $sp, 8
19; LA32-NEXT:    bl %plt(__fixtfsi)
20; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
21; LA32-NEXT:    addi.w $sp, $sp, 32
22; LA32-NEXT:    ret
23;
24; LA64-LABEL: fptosi_i32_fp128:
25; LA64:       # %bb.0:
26; LA64-NEXT:    addi.d $sp, $sp, -16
27; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
28; LA64-NEXT:    bl %plt(__fixtfsi)
29; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
30; LA64-NEXT:    addi.d $sp, $sp, 16
31; LA64-NEXT:    ret
32  %tmp = fptosi fp128 %X to i32
33  ret i32 %tmp
34}
35
36define i32 @fptosi_i32_double(double %X) nounwind {
37; LA32-LABEL: fptosi_i32_double:
38; LA32:       # %bb.0:
39; LA32-NEXT:    addi.w $sp, $sp, -16
40; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
41; LA32-NEXT:    bl %plt(__fixdfsi)
42; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
43; LA32-NEXT:    addi.w $sp, $sp, 16
44; LA32-NEXT:    ret
45;
46; LA64-LABEL: fptosi_i32_double:
47; LA64:       # %bb.0:
48; LA64-NEXT:    addi.d $sp, $sp, -16
49; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
50; LA64-NEXT:    bl %plt(__fixdfsi)
51; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
52; LA64-NEXT:    addi.d $sp, $sp, 16
53; LA64-NEXT:    ret
54  %tmp = fptosi double %X to i32
55  ret i32 %tmp
56}
57
58define i32 @fptosi_i32_float(float %X) nounwind {
59; LA32-LABEL: fptosi_i32_float:
60; LA32:       # %bb.0:
61; LA32-NEXT:    addi.w $sp, $sp, -16
62; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
63; LA32-NEXT:    bl %plt(__fixsfsi)
64; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
65; LA32-NEXT:    addi.w $sp, $sp, 16
66; LA32-NEXT:    ret
67;
68; LA64-LABEL: fptosi_i32_float:
69; LA64:       # %bb.0:
70; LA64-NEXT:    addi.d $sp, $sp, -16
71; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
72; LA64-NEXT:    bl %plt(__fixsfsi)
73; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
74; LA64-NEXT:    addi.d $sp, $sp, 16
75; LA64-NEXT:    ret
76  %tmp = fptosi float %X to i32
77  ret i32 %tmp
78}
79
80define i64 @fptosi_i64_fp128(fp128 %X) nounwind {
81; LA32-LABEL: fptosi_i64_fp128:
82; LA32:       # %bb.0:
83; LA32-NEXT:    addi.w $sp, $sp, -32
84; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
85; LA32-NEXT:    ld.w $a1, $a0, 0
86; LA32-NEXT:    ld.w $a2, $a0, 4
87; LA32-NEXT:    ld.w $a3, $a0, 8
88; LA32-NEXT:    ld.w $a0, $a0, 12
89; LA32-NEXT:    st.w $a0, $sp, 12
90; LA32-NEXT:    st.w $a3, $sp, 8
91; LA32-NEXT:    st.w $a2, $sp, 4
92; LA32-NEXT:    addi.w $a0, $sp, 0
93; LA32-NEXT:    st.w $a1, $sp, 0
94; LA32-NEXT:    bl %plt(__fixtfdi)
95; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
96; LA32-NEXT:    addi.w $sp, $sp, 32
97; LA32-NEXT:    ret
98;
99; LA64-LABEL: fptosi_i64_fp128:
100; LA64:       # %bb.0:
101; LA64-NEXT:    addi.d $sp, $sp, -16
102; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
103; LA64-NEXT:    bl %plt(__fixtfdi)
104; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
105; LA64-NEXT:    addi.d $sp, $sp, 16
106; LA64-NEXT:    ret
107  %tmp = fptosi fp128 %X to i64
108  ret i64 %tmp
109}
110
111define i64 @fptosi_i64_double(double %X) nounwind {
112; LA32-LABEL: fptosi_i64_double:
113; LA32:       # %bb.0:
114; LA32-NEXT:    addi.w $sp, $sp, -16
115; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
116; LA32-NEXT:    bl %plt(__fixdfdi)
117; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
118; LA32-NEXT:    addi.w $sp, $sp, 16
119; LA32-NEXT:    ret
120;
121; LA64-LABEL: fptosi_i64_double:
122; LA64:       # %bb.0:
123; LA64-NEXT:    addi.d $sp, $sp, -16
124; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
125; LA64-NEXT:    bl %plt(__fixdfdi)
126; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
127; LA64-NEXT:    addi.d $sp, $sp, 16
128; LA64-NEXT:    ret
129  %tmp = fptosi double %X to i64
130  ret i64 %tmp
131}
132
133define i64 @fptosi_i64_float(float %X) nounwind {
134; LA32-LABEL: fptosi_i64_float:
135; LA32:       # %bb.0:
136; LA32-NEXT:    addi.w $sp, $sp, -16
137; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
138; LA32-NEXT:    bl %plt(__fixsfdi)
139; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
140; LA32-NEXT:    addi.w $sp, $sp, 16
141; LA32-NEXT:    ret
142;
143; LA64-LABEL: fptosi_i64_float:
144; LA64:       # %bb.0:
145; LA64-NEXT:    addi.d $sp, $sp, -16
146; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
147; LA64-NEXT:    bl %plt(__fixsfdi)
148; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
149; LA64-NEXT:    addi.d $sp, $sp, 16
150; LA64-NEXT:    ret
151  %tmp = fptosi float %X to i64
152  ret i64 %tmp
153}
154