xref: /llvm-project/llvm/test/CodeGen/X86/i128-fpconv-win64-strict.ll (revision eeedb1e962977caeb699ef9aa714c8878c4d62d2)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s -check-prefix=WIN64
3; RUN: llc < %s -mtriple=x86_64-mingw32 | FileCheck %s -check-prefix=WIN64
4
5define i64 @double_to_i128(double %d) nounwind strictfp {
6; WIN64-LABEL: double_to_i128:
7; WIN64:       # %bb.0:
8; WIN64-NEXT:    subq $40, %rsp
9; WIN64-NEXT:    callq __fixdfti
10; WIN64-NEXT:    movq %xmm0, %rax
11; WIN64-NEXT:    addq $40, %rsp
12; WIN64-NEXT:    retq
13  %1 = tail call i128 @llvm.experimental.constrained.fptosi.i128.f64(double %d, metadata !"fpexcept.strict")
14  %2 = trunc i128 %1 to i64
15  ret i64 %2
16}
17
18define i64 @double_to_ui128(double %d) nounwind strictfp {
19; WIN64-LABEL: double_to_ui128:
20; WIN64:       # %bb.0:
21; WIN64-NEXT:    subq $40, %rsp
22; WIN64-NEXT:    callq __fixunsdfti
23; WIN64-NEXT:    movq %xmm0, %rax
24; WIN64-NEXT:    addq $40, %rsp
25; WIN64-NEXT:    retq
26  %1 = tail call i128 @llvm.experimental.constrained.fptoui.i128.f64(double %d, metadata !"fpexcept.strict")
27  %2 = trunc i128 %1 to i64
28  ret i64 %2
29}
30
31define i64 @float_to_i128(float %d) nounwind strictfp {
32; WIN64-LABEL: float_to_i128:
33; WIN64:       # %bb.0:
34; WIN64-NEXT:    subq $40, %rsp
35; WIN64-NEXT:    callq __fixsfti
36; WIN64-NEXT:    movq %xmm0, %rax
37; WIN64-NEXT:    addq $40, %rsp
38; WIN64-NEXT:    retq
39  %1 = tail call i128 @llvm.experimental.constrained.fptosi.i128.f32(float %d, metadata !"fpexcept.strict")
40  %2 = trunc i128 %1 to i64
41  ret i64 %2
42}
43
44define i64 @float_to_ui128(float %d) nounwind strictfp {
45; WIN64-LABEL: float_to_ui128:
46; WIN64:       # %bb.0:
47; WIN64-NEXT:    subq $40, %rsp
48; WIN64-NEXT:    callq __fixunssfti
49; WIN64-NEXT:    movq %xmm0, %rax
50; WIN64-NEXT:    addq $40, %rsp
51; WIN64-NEXT:    retq
52  %1 = tail call i128 @llvm.experimental.constrained.fptoui.i128.f32(float %d, metadata !"fpexcept.strict")
53  %2 = trunc i128 %1 to i64
54  ret i64 %2
55}
56
57define i64 @longdouble_to_i128(ptr nocapture readonly %0) nounwind strictfp {
58; WIN64-LABEL: longdouble_to_i128:
59; WIN64:       # %bb.0:
60; WIN64-NEXT:    subq $56, %rsp
61; WIN64-NEXT:    fldt (%rcx)
62; WIN64-NEXT:    fstpt {{[0-9]+}}(%rsp)
63; WIN64-NEXT:    wait
64; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
65; WIN64-NEXT:    callq __fixxfti
66; WIN64-NEXT:    movq %xmm0, %rax
67; WIN64-NEXT:    addq $56, %rsp
68; WIN64-NEXT:    retq
69  %2 = load x86_fp80, ptr %0, align 16
70  %3 = tail call i128 @llvm.experimental.constrained.fptosi.i128.f80(x86_fp80 %2, metadata !"fpexcept.strict")
71  %4 = trunc i128 %3 to i64
72  ret i64 %4
73}
74
75define i64 @longdouble_to_ui128(ptr nocapture readonly %0) nounwind strictfp {
76; WIN64-LABEL: longdouble_to_ui128:
77; WIN64:       # %bb.0:
78; WIN64-NEXT:    subq $56, %rsp
79; WIN64-NEXT:    fldt (%rcx)
80; WIN64-NEXT:    fstpt {{[0-9]+}}(%rsp)
81; WIN64-NEXT:    wait
82; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
83; WIN64-NEXT:    callq __fixunsxfti
84; WIN64-NEXT:    movq %xmm0, %rax
85; WIN64-NEXT:    addq $56, %rsp
86; WIN64-NEXT:    retq
87  %2 = load x86_fp80, ptr %0, align 16
88  %3 = tail call i128 @llvm.experimental.constrained.fptoui.i128.f80(x86_fp80 %2, metadata !"fpexcept.strict")
89  %4 = trunc i128 %3 to i64
90  ret i64 %4
91}
92
93define double @i128_to_double(ptr nocapture readonly %0) nounwind strictfp {
94; WIN64-LABEL: i128_to_double:
95; WIN64:       # %bb.0:
96; WIN64-NEXT:    subq $56, %rsp
97; WIN64-NEXT:    movaps (%rcx), %xmm0
98; WIN64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp)
99; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
100; WIN64-NEXT:    callq __floattidf
101; WIN64-NEXT:    addq $56, %rsp
102; WIN64-NEXT:    retq
103  %2 = load i128, ptr %0, align 16
104  %3 = tail call double @llvm.experimental.constrained.sitofp.f64.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
105  ret double %3
106}
107
108define double @ui128_to_double(ptr nocapture readonly %0) nounwind strictfp {
109; WIN64-LABEL: ui128_to_double:
110; WIN64:       # %bb.0:
111; WIN64-NEXT:    subq $56, %rsp
112; WIN64-NEXT:    movaps (%rcx), %xmm0
113; WIN64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp)
114; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
115; WIN64-NEXT:    callq __floatuntidf
116; WIN64-NEXT:    addq $56, %rsp
117; WIN64-NEXT:    retq
118  %2 = load i128, ptr %0, align 16
119  %3 = tail call double @llvm.experimental.constrained.uitofp.f64.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
120  ret double %3
121}
122
123define float @i128_to_float(ptr nocapture readonly %0) nounwind strictfp {
124; WIN64-LABEL: i128_to_float:
125; WIN64:       # %bb.0:
126; WIN64-NEXT:    subq $56, %rsp
127; WIN64-NEXT:    movaps (%rcx), %xmm0
128; WIN64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp)
129; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
130; WIN64-NEXT:    callq __floattisf
131; WIN64-NEXT:    addq $56, %rsp
132; WIN64-NEXT:    retq
133  %2 = load i128, ptr %0, align 16
134  %3 = tail call float @llvm.experimental.constrained.sitofp.f32.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
135  ret float %3
136}
137
138define float @ui128_to_float(ptr nocapture readonly %0) nounwind strictfp {
139; WIN64-LABEL: ui128_to_float:
140; WIN64:       # %bb.0:
141; WIN64-NEXT:    subq $56, %rsp
142; WIN64-NEXT:    movaps (%rcx), %xmm0
143; WIN64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp)
144; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
145; WIN64-NEXT:    callq __floatuntisf
146; WIN64-NEXT:    addq $56, %rsp
147; WIN64-NEXT:    retq
148  %2 = load i128, ptr %0, align 16
149  %3 = tail call float @llvm.experimental.constrained.uitofp.f32.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
150  ret float %3
151}
152
153define void @i128_to_longdouble(ptr noalias nocapture sret(x86_fp80) align 16 %agg.result, ptr nocapture readonly %0) nounwind strictfp {
154; WIN64-LABEL: i128_to_longdouble:
155; WIN64:       # %bb.0:
156; WIN64-NEXT:    pushq %rsi
157; WIN64-NEXT:    subq $64, %rsp
158; WIN64-NEXT:    movq %rcx, %rsi
159; WIN64-NEXT:    movaps (%rdx), %xmm0
160; WIN64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp)
161; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
162; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
163; WIN64-NEXT:    callq __floattixf
164; WIN64-NEXT:    fldt {{[0-9]+}}(%rsp)
165; WIN64-NEXT:    fstpt (%rsi)
166; WIN64-NEXT:    wait
167; WIN64-NEXT:    movq %rsi, %rax
168; WIN64-NEXT:    addq $64, %rsp
169; WIN64-NEXT:    popq %rsi
170; WIN64-NEXT:    retq
171  %2 = load i128, ptr %0, align 16
172  %3 = tail call x86_fp80 @llvm.experimental.constrained.sitofp.f80.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
173  store x86_fp80 %3, ptr %agg.result, align 16
174  ret void
175}
176
177define void @ui128_to_longdouble(ptr noalias nocapture sret(x86_fp80) align 16 %agg.result, ptr nocapture readonly %0) nounwind strictfp {
178; WIN64-LABEL: ui128_to_longdouble:
179; WIN64:       # %bb.0:
180; WIN64-NEXT:    pushq %rsi
181; WIN64-NEXT:    subq $64, %rsp
182; WIN64-NEXT:    movq %rcx, %rsi
183; WIN64-NEXT:    movaps (%rdx), %xmm0
184; WIN64-NEXT:    movaps %xmm0, {{[0-9]+}}(%rsp)
185; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rcx
186; WIN64-NEXT:    leaq {{[0-9]+}}(%rsp), %rdx
187; WIN64-NEXT:    callq __floatuntixf
188; WIN64-NEXT:    fldt {{[0-9]+}}(%rsp)
189; WIN64-NEXT:    fstpt (%rsi)
190; WIN64-NEXT:    wait
191; WIN64-NEXT:    movq %rsi, %rax
192; WIN64-NEXT:    addq $64, %rsp
193; WIN64-NEXT:    popq %rsi
194; WIN64-NEXT:    retq
195  %2 = load i128, ptr %0, align 16
196  %3 = tail call x86_fp80 @llvm.experimental.constrained.uitofp.f80.i128(i128 %2, metadata !"round.dynamic", metadata !"fpexcept.strict")
197  store x86_fp80 %3, ptr %agg.result, align 16
198  ret void
199}
200
201declare i128 @llvm.experimental.constrained.fptosi.i128.f64(double, metadata)
202declare i128 @llvm.experimental.constrained.fptoui.i128.f64(double, metadata)
203declare i128 @llvm.experimental.constrained.fptosi.i128.f32(float, metadata)
204declare i128 @llvm.experimental.constrained.fptoui.i128.f32(float, metadata)
205declare i128 @llvm.experimental.constrained.fptosi.i128.f80(x86_fp80, metadata)
206declare i128 @llvm.experimental.constrained.fptoui.i128.f80(x86_fp80, metadata)
207declare double @llvm.experimental.constrained.sitofp.f64.i128(i128, metadata, metadata)
208declare double @llvm.experimental.constrained.uitofp.f64.i128(i128, metadata, metadata)
209declare float @llvm.experimental.constrained.sitofp.f32.i128(i128, metadata, metadata)
210declare float @llvm.experimental.constrained.uitofp.f32.i128(i128, metadata, metadata)
211declare x86_fp80 @llvm.experimental.constrained.sitofp.f80.i128(i128, metadata, metadata)
212declare x86_fp80 @llvm.experimental.constrained.uitofp.f80.i128(i128, metadata, metadata)
213