xref: /llvm-project/llvm/test/CodeGen/X86/fp128-cast-strict.ll (revision a21abc782a8e1cb718a10c471a3b634f3102fc1c)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-linux-android -mattr=+sse | FileCheck %s --check-prefixes=X64,X64-SSE
3; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=+sse | FileCheck %s --check-prefixes=X64,X64-SSE
4; RUN: llc < %s -mtriple=x86_64-linux-android -mattr=+avx | FileCheck %s --check-prefixes=X64,X64-AVX
5; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=+avx | FileCheck %s --check-prefixes=X64,X64-AVX
6; RUN: llc < %s -mtriple=x86_64-linux-android -mattr=+avx512fp16 | FileCheck %s --check-prefixes=X64,X64-AVX,X64-AVX512
7; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=+avx512fp16 | FileCheck %s --check-prefixes=X64,X64-AVX,X64-AVX512
8; RUN: llc < %s -mtriple=i686-linux-gnu -mattr=-sse | FileCheck %s --check-prefixes=X86
9
10; Check soft floating point conversion function calls.
11
12@vf16 = common dso_local global half 0.000000e+00, align 2
13@vf32 = common dso_local global float 0.000000e+00, align 4
14@vf64 = common dso_local global double 0.000000e+00, align 8
15@vf80 = common dso_local global x86_fp80 0xK00000000000000000000, align 8
16@vf128 = common dso_local global fp128 0xL00000000000000000000000000000000, align 16
17
18define dso_local void @TestFPExtF16_F128() nounwind strictfp {
19; X64-SSE-LABEL: TestFPExtF16_F128:
20; X64-SSE:       # %bb.0: # %entry
21; X64-SSE-NEXT:    pushq %rax
22; X64-SSE-NEXT:    pinsrw $0, vf16(%rip), %xmm0
23; X64-SSE-NEXT:    callq __extendhftf2@PLT
24; X64-SSE-NEXT:    movdqa %xmm0, vf128(%rip)
25; X64-SSE-NEXT:    popq %rax
26; X64-SSE-NEXT:    retq
27;
28; X64-AVX512-LABEL: TestFPExtF16_F128:
29; X64-AVX512:       # %bb.0: # %entry
30; X64-AVX512-NEXT:    pushq %rax
31; X64-AVX512-NEXT:    vmovsh vf16(%rip), %xmm0
32; X64-AVX512-NEXT:    callq __extendhftf2@PLT
33; X64-AVX512-NEXT:    vmovaps %xmm0, vf128(%rip)
34; X64-AVX512-NEXT:    popq %rax
35; X64-AVX512-NEXT:    retq
36;
37; X86-LABEL: TestFPExtF16_F128:
38; X86:       # %bb.0: # %entry
39; X86-NEXT:    pushl %esi
40; X86-NEXT:    subl $40, %esp
41; X86-NEXT:    movzwl vf16, %eax
42; X86-NEXT:    movl %eax, (%esp)
43; X86-NEXT:    calll __gnu_h2f_ieee
44; X86-NEXT:    fstps {{[0-9]+}}(%esp)
45; X86-NEXT:    wait
46; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
47; X86-NEXT:    movl %eax, (%esp)
48; X86-NEXT:    calll __extendsftf2
49; X86-NEXT:    subl $4, %esp
50; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
51; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
52; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
53; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
54; X86-NEXT:    movl %esi, vf128+12
55; X86-NEXT:    movl %edx, vf128+8
56; X86-NEXT:    movl %ecx, vf128+4
57; X86-NEXT:    movl %eax, vf128
58; X86-NEXT:    addl $40, %esp
59; X86-NEXT:    popl %esi
60; X86-NEXT:    retl
61entry:
62  %0 = load half, ptr @vf16, align 2
63  %conv = call fp128 @llvm.experimental.constrained.fpext.f128.f16(half %0, metadata !"fpexcept.strict") #0
64  store fp128 %conv, ptr @vf128, align 16
65  ret void
66}
67
68define dso_local void @TestFPExtF32_F128() nounwind strictfp {
69; X64-SSE-LABEL: TestFPExtF32_F128:
70; X64-SSE:       # %bb.0: # %entry
71; X64-SSE-NEXT:    pushq %rax
72; X64-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
73; X64-SSE-NEXT:    callq __extendsftf2@PLT
74; X64-SSE-NEXT:    movaps %xmm0, vf128(%rip)
75; X64-SSE-NEXT:    popq %rax
76; X64-SSE-NEXT:    retq
77;
78; X64-AVX-LABEL: TestFPExtF32_F128:
79; X64-AVX:       # %bb.0: # %entry
80; X64-AVX-NEXT:    pushq %rax
81; X64-AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
82; X64-AVX-NEXT:    callq __extendsftf2@PLT
83; X64-AVX-NEXT:    vmovaps %xmm0, vf128(%rip)
84; X64-AVX-NEXT:    popq %rax
85; X64-AVX-NEXT:    retq
86;
87; X86-LABEL: TestFPExtF32_F128:
88; X86:       # %bb.0: # %entry
89; X86-NEXT:    pushl %esi
90; X86-NEXT:    subl $40, %esp
91; X86-NEXT:    flds vf32
92; X86-NEXT:    fstps {{[0-9]+}}(%esp)
93; X86-NEXT:    wait
94; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
95; X86-NEXT:    movl %eax, (%esp)
96; X86-NEXT:    calll __extendsftf2
97; X86-NEXT:    subl $4, %esp
98; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
99; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
100; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
101; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
102; X86-NEXT:    movl %esi, vf128+12
103; X86-NEXT:    movl %edx, vf128+8
104; X86-NEXT:    movl %ecx, vf128+4
105; X86-NEXT:    movl %eax, vf128
106; X86-NEXT:    addl $40, %esp
107; X86-NEXT:    popl %esi
108; X86-NEXT:    retl
109entry:
110  %0 = load float, ptr @vf32, align 4
111  %conv = call fp128 @llvm.experimental.constrained.fpext.f128.f32(float %0, metadata !"fpexcept.strict") #0
112  store fp128 %conv, ptr @vf128, align 16
113  ret void
114}
115
116define dso_local void @TestFPExtF64_F128() nounwind strictfp {
117; X64-SSE-LABEL: TestFPExtF64_F128:
118; X64-SSE:       # %bb.0: # %entry
119; X64-SSE-NEXT:    pushq %rax
120; X64-SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
121; X64-SSE-NEXT:    callq __extenddftf2@PLT
122; X64-SSE-NEXT:    movaps %xmm0, vf128(%rip)
123; X64-SSE-NEXT:    popq %rax
124; X64-SSE-NEXT:    retq
125;
126; X64-AVX-LABEL: TestFPExtF64_F128:
127; X64-AVX:       # %bb.0: # %entry
128; X64-AVX-NEXT:    pushq %rax
129; X64-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
130; X64-AVX-NEXT:    callq __extenddftf2@PLT
131; X64-AVX-NEXT:    vmovaps %xmm0, vf128(%rip)
132; X64-AVX-NEXT:    popq %rax
133; X64-AVX-NEXT:    retq
134;
135; X86-LABEL: TestFPExtF64_F128:
136; X86:       # %bb.0: # %entry
137; X86-NEXT:    pushl %esi
138; X86-NEXT:    subl $40, %esp
139; X86-NEXT:    fldl vf64
140; X86-NEXT:    fstpl {{[0-9]+}}(%esp)
141; X86-NEXT:    wait
142; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
143; X86-NEXT:    movl %eax, (%esp)
144; X86-NEXT:    calll __extenddftf2
145; X86-NEXT:    subl $4, %esp
146; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
147; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
148; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
149; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
150; X86-NEXT:    movl %esi, vf128+12
151; X86-NEXT:    movl %edx, vf128+8
152; X86-NEXT:    movl %ecx, vf128+4
153; X86-NEXT:    movl %eax, vf128
154; X86-NEXT:    addl $40, %esp
155; X86-NEXT:    popl %esi
156; X86-NEXT:    retl
157entry:
158  %0 = load double, ptr @vf64, align 8
159  %conv = call fp128 @llvm.experimental.constrained.fpext.f128.f64(double %0, metadata !"fpexcept.strict") #0
160  store fp128 %conv, ptr @vf128, align 16
161  ret void
162}
163
164define dso_local void @TestFPExtF80_F128() nounwind strictfp {
165; X64-SSE-LABEL: TestFPExtF80_F128:
166; X64-SSE:       # %bb.0: # %entry
167; X64-SSE-NEXT:    subq $24, %rsp
168; X64-SSE-NEXT:    fldt vf80(%rip)
169; X64-SSE-NEXT:    fstpt (%rsp)
170; X64-SSE-NEXT:    wait
171; X64-SSE-NEXT:    callq __extendxftf2@PLT
172; X64-SSE-NEXT:    movaps %xmm0, vf128(%rip)
173; X64-SSE-NEXT:    addq $24, %rsp
174; X64-SSE-NEXT:    retq
175;
176; X64-AVX-LABEL: TestFPExtF80_F128:
177; X64-AVX:       # %bb.0: # %entry
178; X64-AVX-NEXT:    subq $24, %rsp
179; X64-AVX-NEXT:    fldt vf80(%rip)
180; X64-AVX-NEXT:    fstpt (%rsp)
181; X64-AVX-NEXT:    wait
182; X64-AVX-NEXT:    callq __extendxftf2@PLT
183; X64-AVX-NEXT:    vmovaps %xmm0, vf128(%rip)
184; X64-AVX-NEXT:    addq $24, %rsp
185; X64-AVX-NEXT:    retq
186;
187; X86-LABEL: TestFPExtF80_F128:
188; X86:       # %bb.0: # %entry
189; X86-NEXT:    pushl %esi
190; X86-NEXT:    subl $40, %esp
191; X86-NEXT:    fldt vf80
192; X86-NEXT:    fstpt {{[0-9]+}}(%esp)
193; X86-NEXT:    wait
194; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
195; X86-NEXT:    movl %eax, (%esp)
196; X86-NEXT:    calll __extendxftf2
197; X86-NEXT:    subl $4, %esp
198; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
199; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
200; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
201; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
202; X86-NEXT:    movl %esi, vf128+12
203; X86-NEXT:    movl %edx, vf128+8
204; X86-NEXT:    movl %ecx, vf128+4
205; X86-NEXT:    movl %eax, vf128
206; X86-NEXT:    addl $40, %esp
207; X86-NEXT:    popl %esi
208; X86-NEXT:    retl
209entry:
210  %0 = load x86_fp80, ptr @vf80, align 8
211  %conv = call fp128 @llvm.experimental.constrained.fpext.f128.f80(x86_fp80 %0, metadata !"fpexcept.strict") #0
212  store fp128 %conv, ptr @vf128, align 16
213  ret void
214}
215
216define dso_local void @TestFPTruncF128_F16() nounwind strictfp {
217; X64-SSE-LABEL: TestFPTruncF128_F16:
218; X64-SSE:       # %bb.0: # %entry
219; X64-SSE-NEXT:    pushq %rax
220; X64-SSE-NEXT:    movdqa vf128(%rip), %xmm0
221; X64-SSE-NEXT:    callq __trunctfhf2@PLT
222; X64-SSE-NEXT:    pextrw $0, %xmm0, %eax
223; X64-SSE-NEXT:    movw %ax, vf16(%rip)
224; X64-SSE-NEXT:    popq %rax
225; X64-SSE-NEXT:    retq
226;
227; X64-AVX512-LABEL: TestFPTruncF128_F16:
228; X64-AVX512:       # %bb.0: # %entry
229; X64-AVX512-NEXT:    pushq %rax
230; X64-AVX512-NEXT:    vmovaps vf128(%rip), %xmm0
231; X64-AVX512-NEXT:    callq __trunctfhf2@PLT
232; X64-AVX512-NEXT:    vmovsh %xmm0, vf16(%rip)
233; X64-AVX512-NEXT:    popq %rax
234; X64-AVX512-NEXT:    retq
235;
236; X86-LABEL: TestFPTruncF128_F16:
237; X86:       # %bb.0: # %entry
238; X86-NEXT:    subl $12, %esp
239; X86-NEXT:    pushl vf128+12
240; X86-NEXT:    pushl vf128+8
241; X86-NEXT:    pushl vf128+4
242; X86-NEXT:    pushl vf128
243; X86-NEXT:    calll __trunctfhf2
244; X86-NEXT:    addl $16, %esp
245; X86-NEXT:    movw %ax, vf16
246; X86-NEXT:    addl $12, %esp
247; X86-NEXT:    retl
248entry:
249  %0 = load fp128, ptr @vf128, align 16
250  %conv = call half @llvm.experimental.constrained.fptrunc.f16.f128(fp128 %0, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
251  store half %conv, ptr @vf16, align 2
252  ret void
253}
254
255define dso_local void @TestFPTruncF128_F32() nounwind strictfp {
256; X64-SSE-LABEL: TestFPTruncF128_F32:
257; X64-SSE:       # %bb.0: # %entry
258; X64-SSE-NEXT:    pushq %rax
259; X64-SSE-NEXT:    movaps vf128(%rip), %xmm0
260; X64-SSE-NEXT:    callq __trunctfsf2@PLT
261; X64-SSE-NEXT:    movss %xmm0, vf32(%rip)
262; X64-SSE-NEXT:    popq %rax
263; X64-SSE-NEXT:    retq
264;
265; X64-AVX-LABEL: TestFPTruncF128_F32:
266; X64-AVX:       # %bb.0: # %entry
267; X64-AVX-NEXT:    pushq %rax
268; X64-AVX-NEXT:    vmovaps vf128(%rip), %xmm0
269; X64-AVX-NEXT:    callq __trunctfsf2@PLT
270; X64-AVX-NEXT:    vmovss %xmm0, vf32(%rip)
271; X64-AVX-NEXT:    popq %rax
272; X64-AVX-NEXT:    retq
273;
274; X86-LABEL: TestFPTruncF128_F32:
275; X86:       # %bb.0: # %entry
276; X86-NEXT:    subl $12, %esp
277; X86-NEXT:    pushl vf128+12
278; X86-NEXT:    pushl vf128+8
279; X86-NEXT:    pushl vf128+4
280; X86-NEXT:    pushl vf128
281; X86-NEXT:    calll __trunctfsf2
282; X86-NEXT:    addl $16, %esp
283; X86-NEXT:    fstps vf32
284; X86-NEXT:    wait
285; X86-NEXT:    addl $12, %esp
286; X86-NEXT:    retl
287entry:
288  %0 = load fp128, ptr @vf128, align 16
289  %conv = call float @llvm.experimental.constrained.fptrunc.f32.f128(fp128 %0, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
290  store float %conv, ptr @vf32, align 4
291  ret void
292}
293
294define dso_local void @TestFPTruncF128_F64() nounwind strictfp {
295; X64-SSE-LABEL: TestFPTruncF128_F64:
296; X64-SSE:       # %bb.0: # %entry
297; X64-SSE-NEXT:    pushq %rax
298; X64-SSE-NEXT:    movaps vf128(%rip), %xmm0
299; X64-SSE-NEXT:    callq __trunctfdf2@PLT
300; X64-SSE-NEXT:    movsd %xmm0, vf64(%rip)
301; X64-SSE-NEXT:    popq %rax
302; X64-SSE-NEXT:    retq
303;
304; X64-AVX-LABEL: TestFPTruncF128_F64:
305; X64-AVX:       # %bb.0: # %entry
306; X64-AVX-NEXT:    pushq %rax
307; X64-AVX-NEXT:    vmovaps vf128(%rip), %xmm0
308; X64-AVX-NEXT:    callq __trunctfdf2@PLT
309; X64-AVX-NEXT:    vmovsd %xmm0, vf64(%rip)
310; X64-AVX-NEXT:    popq %rax
311; X64-AVX-NEXT:    retq
312;
313; X86-LABEL: TestFPTruncF128_F64:
314; X86:       # %bb.0: # %entry
315; X86-NEXT:    subl $12, %esp
316; X86-NEXT:    pushl vf128+12
317; X86-NEXT:    pushl vf128+8
318; X86-NEXT:    pushl vf128+4
319; X86-NEXT:    pushl vf128
320; X86-NEXT:    calll __trunctfdf2
321; X86-NEXT:    addl $16, %esp
322; X86-NEXT:    fstpl vf64
323; X86-NEXT:    wait
324; X86-NEXT:    addl $12, %esp
325; X86-NEXT:    retl
326entry:
327  %0 = load fp128, ptr @vf128, align 16
328  %conv = call double @llvm.experimental.constrained.fptrunc.f64.f128(fp128 %0, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
329  store double %conv, ptr @vf64, align 8
330  ret void
331}
332
333define dso_local void @TestFPTruncF128_F80() nounwind strictfp {
334; X64-SSE-LABEL: TestFPTruncF128_F80:
335; X64-SSE:       # %bb.0: # %entry
336; X64-SSE-NEXT:    pushq %rax
337; X64-SSE-NEXT:    movaps vf128(%rip), %xmm0
338; X64-SSE-NEXT:    callq __trunctfxf2@PLT
339; X64-SSE-NEXT:    fstpt vf80(%rip)
340; X64-SSE-NEXT:    wait
341; X64-SSE-NEXT:    popq %rax
342; X64-SSE-NEXT:    retq
343;
344; X64-AVX-LABEL: TestFPTruncF128_F80:
345; X64-AVX:       # %bb.0: # %entry
346; X64-AVX-NEXT:    pushq %rax
347; X64-AVX-NEXT:    vmovaps vf128(%rip), %xmm0
348; X64-AVX-NEXT:    callq __trunctfxf2@PLT
349; X64-AVX-NEXT:    fstpt vf80(%rip)
350; X64-AVX-NEXT:    wait
351; X64-AVX-NEXT:    popq %rax
352; X64-AVX-NEXT:    retq
353;
354; X86-LABEL: TestFPTruncF128_F80:
355; X86:       # %bb.0: # %entry
356; X86-NEXT:    subl $12, %esp
357; X86-NEXT:    pushl vf128+12
358; X86-NEXT:    pushl vf128+8
359; X86-NEXT:    pushl vf128+4
360; X86-NEXT:    pushl vf128
361; X86-NEXT:    calll __trunctfxf2
362; X86-NEXT:    addl $16, %esp
363; X86-NEXT:    fstpt vf80
364; X86-NEXT:    wait
365; X86-NEXT:    addl $12, %esp
366; X86-NEXT:    retl
367entry:
368  %0 = load fp128, ptr @vf128, align 16
369  %conv = call x86_fp80 @llvm.experimental.constrained.fptrunc.f80.f128(fp128 %0, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
370  store x86_fp80 %conv, ptr @vf80, align 8
371  ret void
372}
373
374define dso_local i8 @fptosi_i8(fp128 %x) nounwind strictfp {
375; X64-LABEL: fptosi_i8:
376; X64:       # %bb.0: # %entry
377; X64-NEXT:    pushq %rax
378; X64-NEXT:    callq __fixtfsi@PLT
379; X64-NEXT:    # kill: def $al killed $al killed $eax
380; X64-NEXT:    popq %rcx
381; X64-NEXT:    retq
382;
383; X86-LABEL: fptosi_i8:
384; X86:       # %bb.0: # %entry
385; X86-NEXT:    subl $12, %esp
386; X86-NEXT:    pushl {{[0-9]+}}(%esp)
387; X86-NEXT:    pushl {{[0-9]+}}(%esp)
388; X86-NEXT:    pushl {{[0-9]+}}(%esp)
389; X86-NEXT:    pushl {{[0-9]+}}(%esp)
390; X86-NEXT:    calll __fixtfsi
391; X86-NEXT:    addl $16, %esp
392; X86-NEXT:    # kill: def $al killed $al killed $eax
393; X86-NEXT:    addl $12, %esp
394; X86-NEXT:    retl
395entry:
396  %conv = call i8 @llvm.experimental.constrained.fptosi.i8.f128(fp128 %x, metadata !"fpexcept.strict") #0
397  ret i8 %conv
398}
399
400define i16 @fptosi_i16(fp128 %x) nounwind strictfp {
401; X64-LABEL: fptosi_i16:
402; X64:       # %bb.0: # %entry
403; X64-NEXT:    pushq %rax
404; X64-NEXT:    callq __fixtfsi@PLT
405; X64-NEXT:    # kill: def $ax killed $ax killed $eax
406; X64-NEXT:    popq %rcx
407; X64-NEXT:    retq
408;
409; X86-LABEL: fptosi_i16:
410; X86:       # %bb.0: # %entry
411; X86-NEXT:    subl $12, %esp
412; X86-NEXT:    pushl {{[0-9]+}}(%esp)
413; X86-NEXT:    pushl {{[0-9]+}}(%esp)
414; X86-NEXT:    pushl {{[0-9]+}}(%esp)
415; X86-NEXT:    pushl {{[0-9]+}}(%esp)
416; X86-NEXT:    calll __fixtfsi
417; X86-NEXT:    addl $16, %esp
418; X86-NEXT:    # kill: def $ax killed $ax killed $eax
419; X86-NEXT:    addl $12, %esp
420; X86-NEXT:    retl
421entry:
422  %conv = call i16 @llvm.experimental.constrained.fptosi.i16.f128(fp128 %x, metadata !"fpexcept.strict") #0
423  ret i16 %conv
424}
425
426define dso_local i32 @fptosi_i32(fp128 %x) nounwind strictfp {
427; X64-LABEL: fptosi_i32:
428; X64:       # %bb.0: # %entry
429; X64-NEXT:    pushq %rax
430; X64-NEXT:    callq __fixtfsi@PLT
431; X64-NEXT:    popq %rcx
432; X64-NEXT:    retq
433;
434; X86-LABEL: fptosi_i32:
435; X86:       # %bb.0: # %entry
436; X86-NEXT:    subl $12, %esp
437; X86-NEXT:    pushl {{[0-9]+}}(%esp)
438; X86-NEXT:    pushl {{[0-9]+}}(%esp)
439; X86-NEXT:    pushl {{[0-9]+}}(%esp)
440; X86-NEXT:    pushl {{[0-9]+}}(%esp)
441; X86-NEXT:    calll __fixtfsi
442; X86-NEXT:    addl $28, %esp
443; X86-NEXT:    retl
444entry:
445  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f128(fp128 %x, metadata !"fpexcept.strict") #0
446  ret i32 %conv
447}
448
449define i64 @fptosi_i64(fp128 %x) nounwind strictfp {
450; X64-LABEL: fptosi_i64:
451; X64:       # %bb.0: # %entry
452; X64-NEXT:    pushq %rax
453; X64-NEXT:    callq __fixtfdi@PLT
454; X64-NEXT:    popq %rcx
455; X64-NEXT:    retq
456;
457; X86-LABEL: fptosi_i64:
458; X86:       # %bb.0: # %entry
459; X86-NEXT:    subl $12, %esp
460; X86-NEXT:    pushl {{[0-9]+}}(%esp)
461; X86-NEXT:    pushl {{[0-9]+}}(%esp)
462; X86-NEXT:    pushl {{[0-9]+}}(%esp)
463; X86-NEXT:    pushl {{[0-9]+}}(%esp)
464; X86-NEXT:    calll __fixtfdi
465; X86-NEXT:    addl $28, %esp
466; X86-NEXT:    retl
467entry:
468  %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f128(fp128 %x, metadata !"fpexcept.strict") #0
469  ret i64 %conv
470}
471
472define i128 @fptosi_i128(fp128 %x) nounwind strictfp {
473; X64-LABEL: fptosi_i128:
474; X64:       # %bb.0: # %entry
475; X64-NEXT:    pushq %rax
476; X64-NEXT:    callq __fixtfti@PLT
477; X64-NEXT:    popq %rcx
478; X64-NEXT:    retq
479;
480; X86-LABEL: fptosi_i128:
481; X86:       # %bb.0: # %entry
482; X86-NEXT:    pushl %edi
483; X86-NEXT:    pushl %esi
484; X86-NEXT:    subl $20, %esp
485; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
486; X86-NEXT:    subl $12, %esp
487; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
488; X86-NEXT:    pushl {{[0-9]+}}(%esp)
489; X86-NEXT:    pushl {{[0-9]+}}(%esp)
490; X86-NEXT:    pushl {{[0-9]+}}(%esp)
491; X86-NEXT:    pushl {{[0-9]+}}(%esp)
492; X86-NEXT:    pushl %eax
493; X86-NEXT:    calll __fixtfti
494; X86-NEXT:    addl $28, %esp
495; X86-NEXT:    movl (%esp), %eax
496; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
497; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
498; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
499; X86-NEXT:    movl %edi, 8(%esi)
500; X86-NEXT:    movl %edx, 12(%esi)
501; X86-NEXT:    movl %eax, (%esi)
502; X86-NEXT:    movl %ecx, 4(%esi)
503; X86-NEXT:    movl %esi, %eax
504; X86-NEXT:    addl $20, %esp
505; X86-NEXT:    popl %esi
506; X86-NEXT:    popl %edi
507; X86-NEXT:    retl $4
508entry:
509  %conv = call i128 @llvm.experimental.constrained.fptosi.i128.f128(fp128 %x, metadata !"fpexcept.strict") #0
510  ret i128 %conv
511}
512
513define dso_local i8 @fptoui_i8(fp128 %x) nounwind strictfp {
514; X64-LABEL: fptoui_i8:
515; X64:       # %bb.0: # %entry
516; X64-NEXT:    pushq %rax
517; X64-NEXT:    callq __fixtfsi@PLT
518; X64-NEXT:    # kill: def $al killed $al killed $eax
519; X64-NEXT:    popq %rcx
520; X64-NEXT:    retq
521;
522; X86-LABEL: fptoui_i8:
523; X86:       # %bb.0: # %entry
524; X86-NEXT:    subl $12, %esp
525; X86-NEXT:    pushl {{[0-9]+}}(%esp)
526; X86-NEXT:    pushl {{[0-9]+}}(%esp)
527; X86-NEXT:    pushl {{[0-9]+}}(%esp)
528; X86-NEXT:    pushl {{[0-9]+}}(%esp)
529; X86-NEXT:    calll __fixunstfsi
530; X86-NEXT:    addl $16, %esp
531; X86-NEXT:    # kill: def $al killed $al killed $eax
532; X86-NEXT:    addl $12, %esp
533; X86-NEXT:    retl
534entry:
535  %conv = call i8 @llvm.experimental.constrained.fptoui.i8.f128(fp128 %x, metadata !"fpexcept.strict") #0
536  ret i8 %conv
537}
538
539define i16 @fptoui_i16(fp128 %x) nounwind strictfp {
540; X64-LABEL: fptoui_i16:
541; X64:       # %bb.0: # %entry
542; X64-NEXT:    pushq %rax
543; X64-NEXT:    callq __fixtfsi@PLT
544; X64-NEXT:    # kill: def $ax killed $ax killed $eax
545; X64-NEXT:    popq %rcx
546; X64-NEXT:    retq
547;
548; X86-LABEL: fptoui_i16:
549; X86:       # %bb.0: # %entry
550; X86-NEXT:    subl $12, %esp
551; X86-NEXT:    pushl {{[0-9]+}}(%esp)
552; X86-NEXT:    pushl {{[0-9]+}}(%esp)
553; X86-NEXT:    pushl {{[0-9]+}}(%esp)
554; X86-NEXT:    pushl {{[0-9]+}}(%esp)
555; X86-NEXT:    calll __fixunstfsi
556; X86-NEXT:    addl $16, %esp
557; X86-NEXT:    # kill: def $ax killed $ax killed $eax
558; X86-NEXT:    addl $12, %esp
559; X86-NEXT:    retl
560entry:
561  %conv = call i16 @llvm.experimental.constrained.fptoui.i16.f128(fp128 %x, metadata !"fpexcept.strict") #0
562  ret i16 %conv
563}
564
565define dso_local i32 @fptoui_i32(fp128 %x) nounwind strictfp {
566; X64-LABEL: fptoui_i32:
567; X64:       # %bb.0: # %entry
568; X64-NEXT:    pushq %rax
569; X64-NEXT:    callq __fixunstfsi@PLT
570; X64-NEXT:    popq %rcx
571; X64-NEXT:    retq
572;
573; X86-LABEL: fptoui_i32:
574; X86:       # %bb.0: # %entry
575; X86-NEXT:    subl $12, %esp
576; X86-NEXT:    pushl {{[0-9]+}}(%esp)
577; X86-NEXT:    pushl {{[0-9]+}}(%esp)
578; X86-NEXT:    pushl {{[0-9]+}}(%esp)
579; X86-NEXT:    pushl {{[0-9]+}}(%esp)
580; X86-NEXT:    calll __fixunstfsi
581; X86-NEXT:    addl $28, %esp
582; X86-NEXT:    retl
583entry:
584  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128 %x, metadata !"fpexcept.strict") #0
585  ret i32 %conv
586}
587
588define i64 @fptoui_i64(fp128 %x) nounwind strictfp {
589; X64-LABEL: fptoui_i64:
590; X64:       # %bb.0: # %entry
591; X64-NEXT:    pushq %rax
592; X64-NEXT:    callq __fixunstfdi@PLT
593; X64-NEXT:    popq %rcx
594; X64-NEXT:    retq
595;
596; X86-LABEL: fptoui_i64:
597; X86:       # %bb.0: # %entry
598; X86-NEXT:    subl $12, %esp
599; X86-NEXT:    pushl {{[0-9]+}}(%esp)
600; X86-NEXT:    pushl {{[0-9]+}}(%esp)
601; X86-NEXT:    pushl {{[0-9]+}}(%esp)
602; X86-NEXT:    pushl {{[0-9]+}}(%esp)
603; X86-NEXT:    calll __fixunstfdi
604; X86-NEXT:    addl $28, %esp
605; X86-NEXT:    retl
606entry:
607  %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128 %x, metadata !"fpexcept.strict") #0
608  ret i64 %conv
609}
610
611define i128 @fptoui_i128(fp128 %x) nounwind strictfp {
612; X64-LABEL: fptoui_i128:
613; X64:       # %bb.0: # %entry
614; X64-NEXT:    pushq %rax
615; X64-NEXT:    callq __fixunstfti@PLT
616; X64-NEXT:    popq %rcx
617; X64-NEXT:    retq
618;
619; X86-LABEL: fptoui_i128:
620; X86:       # %bb.0: # %entry
621; X86-NEXT:    pushl %edi
622; X86-NEXT:    pushl %esi
623; X86-NEXT:    subl $20, %esp
624; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
625; X86-NEXT:    subl $12, %esp
626; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
627; X86-NEXT:    pushl {{[0-9]+}}(%esp)
628; X86-NEXT:    pushl {{[0-9]+}}(%esp)
629; X86-NEXT:    pushl {{[0-9]+}}(%esp)
630; X86-NEXT:    pushl {{[0-9]+}}(%esp)
631; X86-NEXT:    pushl %eax
632; X86-NEXT:    calll __fixunstfti
633; X86-NEXT:    addl $28, %esp
634; X86-NEXT:    movl (%esp), %eax
635; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
636; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
637; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
638; X86-NEXT:    movl %edi, 8(%esi)
639; X86-NEXT:    movl %edx, 12(%esi)
640; X86-NEXT:    movl %eax, (%esi)
641; X86-NEXT:    movl %ecx, 4(%esi)
642; X86-NEXT:    movl %esi, %eax
643; X86-NEXT:    addl $20, %esp
644; X86-NEXT:    popl %esi
645; X86-NEXT:    popl %edi
646; X86-NEXT:    retl $4
647entry:
648  %conv = call i128 @llvm.experimental.constrained.fptoui.i128.f128(fp128 %x, metadata !"fpexcept.strict") #0
649  ret i128 %conv
650}
651
652define fp128 @sitofp_i8(i8 %x) nounwind strictfp {
653; X64-LABEL: sitofp_i8:
654; X64:       # %bb.0: # %entry
655; X64-NEXT:    pushq %rax
656; X64-NEXT:    movsbl %dil, %edi
657; X64-NEXT:    callq __floatsitf@PLT
658; X64-NEXT:    popq %rax
659; X64-NEXT:    retq
660;
661; X86-LABEL: sitofp_i8:
662; X86:       # %bb.0: # %entry
663; X86-NEXT:    pushl %edi
664; X86-NEXT:    pushl %esi
665; X86-NEXT:    subl $20, %esp
666; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
667; X86-NEXT:    movsbl {{[0-9]+}}(%esp), %eax
668; X86-NEXT:    subl $8, %esp
669; X86-NEXT:    leal {{[0-9]+}}(%esp), %ecx
670; X86-NEXT:    pushl %eax
671; X86-NEXT:    pushl %ecx
672; X86-NEXT:    calll __floatsitf
673; X86-NEXT:    addl $12, %esp
674; X86-NEXT:    movl (%esp), %eax
675; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
676; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
677; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
678; X86-NEXT:    movl %edi, 8(%esi)
679; X86-NEXT:    movl %edx, 12(%esi)
680; X86-NEXT:    movl %eax, (%esi)
681; X86-NEXT:    movl %ecx, 4(%esi)
682; X86-NEXT:    movl %esi, %eax
683; X86-NEXT:    addl $20, %esp
684; X86-NEXT:    popl %esi
685; X86-NEXT:    popl %edi
686; X86-NEXT:    retl $4
687entry:
688  %conv = call fp128 @llvm.experimental.constrained.sitofp.f128.i8(i8 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
689  ret fp128 %conv
690}
691
692define fp128 @sitofp_i16(i16 %x) nounwind strictfp {
693; X64-LABEL: sitofp_i16:
694; X64:       # %bb.0: # %entry
695; X64-NEXT:    pushq %rax
696; X64-NEXT:    movswl %di, %edi
697; X64-NEXT:    callq __floatsitf@PLT
698; X64-NEXT:    popq %rax
699; X64-NEXT:    retq
700;
701; X86-LABEL: sitofp_i16:
702; X86:       # %bb.0: # %entry
703; X86-NEXT:    pushl %edi
704; X86-NEXT:    pushl %esi
705; X86-NEXT:    subl $20, %esp
706; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
707; X86-NEXT:    movswl {{[0-9]+}}(%esp), %eax
708; X86-NEXT:    subl $8, %esp
709; X86-NEXT:    leal {{[0-9]+}}(%esp), %ecx
710; X86-NEXT:    pushl %eax
711; X86-NEXT:    pushl %ecx
712; X86-NEXT:    calll __floatsitf
713; X86-NEXT:    addl $12, %esp
714; X86-NEXT:    movl (%esp), %eax
715; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
716; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
717; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
718; X86-NEXT:    movl %edi, 8(%esi)
719; X86-NEXT:    movl %edx, 12(%esi)
720; X86-NEXT:    movl %eax, (%esi)
721; X86-NEXT:    movl %ecx, 4(%esi)
722; X86-NEXT:    movl %esi, %eax
723; X86-NEXT:    addl $20, %esp
724; X86-NEXT:    popl %esi
725; X86-NEXT:    popl %edi
726; X86-NEXT:    retl $4
727entry:
728  %conv = call fp128 @llvm.experimental.constrained.sitofp.f128.i16(i16 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
729  ret fp128 %conv
730}
731
732define fp128 @sitofp_i32(i32 %x) nounwind strictfp {
733; X64-LABEL: sitofp_i32:
734; X64:       # %bb.0: # %entry
735; X64-NEXT:    pushq %rax
736; X64-NEXT:    callq __floatsitf@PLT
737; X64-NEXT:    popq %rax
738; X64-NEXT:    retq
739;
740; X86-LABEL: sitofp_i32:
741; X86:       # %bb.0: # %entry
742; X86-NEXT:    pushl %edi
743; X86-NEXT:    pushl %esi
744; X86-NEXT:    subl $20, %esp
745; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
746; X86-NEXT:    subl $8, %esp
747; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
748; X86-NEXT:    pushl {{[0-9]+}}(%esp)
749; X86-NEXT:    pushl %eax
750; X86-NEXT:    calll __floatsitf
751; X86-NEXT:    addl $12, %esp
752; X86-NEXT:    movl (%esp), %eax
753; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
754; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
755; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
756; X86-NEXT:    movl %edi, 8(%esi)
757; X86-NEXT:    movl %edx, 12(%esi)
758; X86-NEXT:    movl %eax, (%esi)
759; X86-NEXT:    movl %ecx, 4(%esi)
760; X86-NEXT:    movl %esi, %eax
761; X86-NEXT:    addl $20, %esp
762; X86-NEXT:    popl %esi
763; X86-NEXT:    popl %edi
764; X86-NEXT:    retl $4
765entry:
766  %conv = call fp128 @llvm.experimental.constrained.sitofp.f128.i32(i32 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
767  ret fp128 %conv
768}
769
770define fp128 @sitofp_i64(i64 %x) nounwind strictfp {
771; X64-LABEL: sitofp_i64:
772; X64:       # %bb.0: # %entry
773; X64-NEXT:    pushq %rax
774; X64-NEXT:    callq __floatditf@PLT
775; X64-NEXT:    popq %rax
776; X64-NEXT:    retq
777;
778; X86-LABEL: sitofp_i64:
779; X86:       # %bb.0: # %entry
780; X86-NEXT:    pushl %edi
781; X86-NEXT:    pushl %esi
782; X86-NEXT:    subl $20, %esp
783; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
784; X86-NEXT:    subl $4, %esp
785; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
786; X86-NEXT:    pushl {{[0-9]+}}(%esp)
787; X86-NEXT:    pushl {{[0-9]+}}(%esp)
788; X86-NEXT:    pushl %eax
789; X86-NEXT:    calll __floatditf
790; X86-NEXT:    addl $12, %esp
791; X86-NEXT:    movl (%esp), %eax
792; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
793; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
794; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
795; X86-NEXT:    movl %edi, 8(%esi)
796; X86-NEXT:    movl %edx, 12(%esi)
797; X86-NEXT:    movl %eax, (%esi)
798; X86-NEXT:    movl %ecx, 4(%esi)
799; X86-NEXT:    movl %esi, %eax
800; X86-NEXT:    addl $20, %esp
801; X86-NEXT:    popl %esi
802; X86-NEXT:    popl %edi
803; X86-NEXT:    retl $4
804entry:
805  %conv = call fp128 @llvm.experimental.constrained.sitofp.f128.i64(i64 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
806  ret fp128 %conv
807}
808
809define fp128 @sitofp_i128(i128 %x) nounwind strictfp {
810; X64-LABEL: sitofp_i128:
811; X64:       # %bb.0: # %entry
812; X64-NEXT:    pushq %rax
813; X64-NEXT:    callq __floattitf@PLT
814; X64-NEXT:    popq %rax
815; X64-NEXT:    retq
816;
817; X86-LABEL: sitofp_i128:
818; X86:       # %bb.0: # %entry
819; X86-NEXT:    pushl %edi
820; X86-NEXT:    pushl %esi
821; X86-NEXT:    subl $20, %esp
822; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
823; X86-NEXT:    subl $12, %esp
824; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
825; X86-NEXT:    pushl {{[0-9]+}}(%esp)
826; X86-NEXT:    pushl {{[0-9]+}}(%esp)
827; X86-NEXT:    pushl {{[0-9]+}}(%esp)
828; X86-NEXT:    pushl {{[0-9]+}}(%esp)
829; X86-NEXT:    pushl %eax
830; X86-NEXT:    calll __floattitf
831; X86-NEXT:    addl $28, %esp
832; X86-NEXT:    movl (%esp), %eax
833; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
834; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
835; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
836; X86-NEXT:    movl %edi, 8(%esi)
837; X86-NEXT:    movl %edx, 12(%esi)
838; X86-NEXT:    movl %eax, (%esi)
839; X86-NEXT:    movl %ecx, 4(%esi)
840; X86-NEXT:    movl %esi, %eax
841; X86-NEXT:    addl $20, %esp
842; X86-NEXT:    popl %esi
843; X86-NEXT:    popl %edi
844; X86-NEXT:    retl $4
845entry:
846  %conv = call fp128 @llvm.experimental.constrained.sitofp.f128.i128(i128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
847  ret fp128 %conv
848}
849
850define fp128 @uitofp_i8(i8 %x) nounwind strictfp {
851; X64-LABEL: uitofp_i8:
852; X64:       # %bb.0: # %entry
853; X64-NEXT:    pushq %rax
854; X64-NEXT:    movzbl %dil, %edi
855; X64-NEXT:    callq __floatsitf@PLT
856; X64-NEXT:    popq %rax
857; X64-NEXT:    retq
858;
859; X86-LABEL: uitofp_i8:
860; X86:       # %bb.0: # %entry
861; X86-NEXT:    pushl %edi
862; X86-NEXT:    pushl %esi
863; X86-NEXT:    subl $20, %esp
864; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
865; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
866; X86-NEXT:    subl $8, %esp
867; X86-NEXT:    leal {{[0-9]+}}(%esp), %ecx
868; X86-NEXT:    pushl %eax
869; X86-NEXT:    pushl %ecx
870; X86-NEXT:    calll __floatunsitf
871; X86-NEXT:    addl $12, %esp
872; X86-NEXT:    movl (%esp), %eax
873; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
874; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
875; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
876; X86-NEXT:    movl %edi, 8(%esi)
877; X86-NEXT:    movl %edx, 12(%esi)
878; X86-NEXT:    movl %eax, (%esi)
879; X86-NEXT:    movl %ecx, 4(%esi)
880; X86-NEXT:    movl %esi, %eax
881; X86-NEXT:    addl $20, %esp
882; X86-NEXT:    popl %esi
883; X86-NEXT:    popl %edi
884; X86-NEXT:    retl $4
885entry:
886  %conv = call fp128 @llvm.experimental.constrained.uitofp.f128.i8(i8 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
887  ret fp128 %conv
888}
889
890define fp128 @uitofp_i16(i16 %x) nounwind strictfp {
891; X64-LABEL: uitofp_i16:
892; X64:       # %bb.0: # %entry
893; X64-NEXT:    pushq %rax
894; X64-NEXT:    movzwl %di, %edi
895; X64-NEXT:    callq __floatsitf@PLT
896; X64-NEXT:    popq %rax
897; X64-NEXT:    retq
898;
899; X86-LABEL: uitofp_i16:
900; X86:       # %bb.0: # %entry
901; X86-NEXT:    pushl %edi
902; X86-NEXT:    pushl %esi
903; X86-NEXT:    subl $20, %esp
904; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
905; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
906; X86-NEXT:    subl $8, %esp
907; X86-NEXT:    leal {{[0-9]+}}(%esp), %ecx
908; X86-NEXT:    pushl %eax
909; X86-NEXT:    pushl %ecx
910; X86-NEXT:    calll __floatunsitf
911; X86-NEXT:    addl $12, %esp
912; X86-NEXT:    movl (%esp), %eax
913; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
914; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
915; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
916; X86-NEXT:    movl %edi, 8(%esi)
917; X86-NEXT:    movl %edx, 12(%esi)
918; X86-NEXT:    movl %eax, (%esi)
919; X86-NEXT:    movl %ecx, 4(%esi)
920; X86-NEXT:    movl %esi, %eax
921; X86-NEXT:    addl $20, %esp
922; X86-NEXT:    popl %esi
923; X86-NEXT:    popl %edi
924; X86-NEXT:    retl $4
925entry:
926  %conv = call fp128 @llvm.experimental.constrained.uitofp.f128.i16(i16 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
927  ret fp128 %conv
928}
929
930define fp128 @uitofp_i32(i32 %x) nounwind strictfp {
931; X64-LABEL: uitofp_i32:
932; X64:       # %bb.0: # %entry
933; X64-NEXT:    pushq %rax
934; X64-NEXT:    callq __floatunsitf@PLT
935; X64-NEXT:    popq %rax
936; X64-NEXT:    retq
937;
938; X86-LABEL: uitofp_i32:
939; X86:       # %bb.0: # %entry
940; X86-NEXT:    pushl %edi
941; X86-NEXT:    pushl %esi
942; X86-NEXT:    subl $20, %esp
943; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
944; X86-NEXT:    subl $8, %esp
945; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
946; X86-NEXT:    pushl {{[0-9]+}}(%esp)
947; X86-NEXT:    pushl %eax
948; X86-NEXT:    calll __floatunsitf
949; X86-NEXT:    addl $12, %esp
950; X86-NEXT:    movl (%esp), %eax
951; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
952; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
953; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
954; X86-NEXT:    movl %edi, 8(%esi)
955; X86-NEXT:    movl %edx, 12(%esi)
956; X86-NEXT:    movl %eax, (%esi)
957; X86-NEXT:    movl %ecx, 4(%esi)
958; X86-NEXT:    movl %esi, %eax
959; X86-NEXT:    addl $20, %esp
960; X86-NEXT:    popl %esi
961; X86-NEXT:    popl %edi
962; X86-NEXT:    retl $4
963entry:
964  %conv = call fp128 @llvm.experimental.constrained.uitofp.f128.i32(i32 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
965  ret fp128 %conv
966}
967
968define fp128 @uitofp_i64(i64 %x) nounwind strictfp {
969; X64-LABEL: uitofp_i64:
970; X64:       # %bb.0: # %entry
971; X64-NEXT:    pushq %rax
972; X64-NEXT:    callq __floatunditf@PLT
973; X64-NEXT:    popq %rax
974; X64-NEXT:    retq
975;
976; X86-LABEL: uitofp_i64:
977; X86:       # %bb.0: # %entry
978; X86-NEXT:    pushl %edi
979; X86-NEXT:    pushl %esi
980; X86-NEXT:    subl $20, %esp
981; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
982; X86-NEXT:    subl $4, %esp
983; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
984; X86-NEXT:    pushl {{[0-9]+}}(%esp)
985; X86-NEXT:    pushl {{[0-9]+}}(%esp)
986; X86-NEXT:    pushl %eax
987; X86-NEXT:    calll __floatunditf
988; X86-NEXT:    addl $12, %esp
989; X86-NEXT:    movl (%esp), %eax
990; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
991; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
992; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
993; X86-NEXT:    movl %edi, 8(%esi)
994; X86-NEXT:    movl %edx, 12(%esi)
995; X86-NEXT:    movl %eax, (%esi)
996; X86-NEXT:    movl %ecx, 4(%esi)
997; X86-NEXT:    movl %esi, %eax
998; X86-NEXT:    addl $20, %esp
999; X86-NEXT:    popl %esi
1000; X86-NEXT:    popl %edi
1001; X86-NEXT:    retl $4
1002entry:
1003  %conv = call fp128 @llvm.experimental.constrained.uitofp.f128.i64(i64 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
1004  ret fp128 %conv
1005}
1006
1007define fp128 @uitofp_i128(i128 %x) nounwind strictfp {
1008; X64-LABEL: uitofp_i128:
1009; X64:       # %bb.0: # %entry
1010; X64-NEXT:    pushq %rax
1011; X64-NEXT:    callq __floatuntitf@PLT
1012; X64-NEXT:    popq %rax
1013; X64-NEXT:    retq
1014;
1015; X86-LABEL: uitofp_i128:
1016; X86:       # %bb.0: # %entry
1017; X86-NEXT:    pushl %edi
1018; X86-NEXT:    pushl %esi
1019; X86-NEXT:    subl $20, %esp
1020; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
1021; X86-NEXT:    subl $12, %esp
1022; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
1023; X86-NEXT:    pushl {{[0-9]+}}(%esp)
1024; X86-NEXT:    pushl {{[0-9]+}}(%esp)
1025; X86-NEXT:    pushl {{[0-9]+}}(%esp)
1026; X86-NEXT:    pushl {{[0-9]+}}(%esp)
1027; X86-NEXT:    pushl %eax
1028; X86-NEXT:    calll __floatuntitf
1029; X86-NEXT:    addl $28, %esp
1030; X86-NEXT:    movl (%esp), %eax
1031; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
1032; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
1033; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
1034; X86-NEXT:    movl %edi, 8(%esi)
1035; X86-NEXT:    movl %edx, 12(%esi)
1036; X86-NEXT:    movl %eax, (%esi)
1037; X86-NEXT:    movl %ecx, 4(%esi)
1038; X86-NEXT:    movl %esi, %eax
1039; X86-NEXT:    addl $20, %esp
1040; X86-NEXT:    popl %esi
1041; X86-NEXT:    popl %edi
1042; X86-NEXT:    retl $4
1043entry:
1044  %conv = call fp128 @llvm.experimental.constrained.uitofp.f128.i128(i128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
1045  ret fp128 %conv
1046}
1047
1048attributes #0 = { strictfp }
1049
1050declare half @llvm.experimental.constrained.fptrunc.f16.f128(fp128, metadata, metadata)
1051declare float @llvm.experimental.constrained.fptrunc.f32.f128(fp128, metadata, metadata)
1052declare double @llvm.experimental.constrained.fptrunc.f64.f128(fp128, metadata, metadata)
1053declare x86_fp80 @llvm.experimental.constrained.fptrunc.f80.f128(fp128, metadata, metadata)
1054declare fp128 @llvm.experimental.constrained.fpext.f128.f16(half, metadata)
1055declare fp128 @llvm.experimental.constrained.fpext.f128.f32(float, metadata)
1056declare fp128 @llvm.experimental.constrained.fpext.f128.f64(double, metadata)
1057declare fp128 @llvm.experimental.constrained.fpext.f128.f80(x86_fp80, metadata)
1058declare i8 @llvm.experimental.constrained.fptosi.i8.f128(fp128, metadata)
1059declare i16 @llvm.experimental.constrained.fptosi.i16.f128(fp128, metadata)
1060declare i32 @llvm.experimental.constrained.fptosi.i32.f128(fp128, metadata)
1061declare i64 @llvm.experimental.constrained.fptosi.i64.f128(fp128, metadata)
1062declare i128 @llvm.experimental.constrained.fptosi.i128.f128(fp128, metadata)
1063declare i8 @llvm.experimental.constrained.fptoui.i8.f128(fp128, metadata)
1064declare i16 @llvm.experimental.constrained.fptoui.i16.f128(fp128, metadata)
1065declare i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128, metadata)
1066declare i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128, metadata)
1067declare i128 @llvm.experimental.constrained.fptoui.i128.f128(fp128, metadata)
1068declare fp128 @llvm.experimental.constrained.sitofp.f128.i8(i8, metadata, metadata)
1069declare fp128 @llvm.experimental.constrained.sitofp.f128.i16(i16, metadata, metadata)
1070declare fp128 @llvm.experimental.constrained.sitofp.f128.i32(i32, metadata, metadata)
1071declare fp128 @llvm.experimental.constrained.sitofp.f128.i64(i64, metadata, metadata)
1072declare fp128 @llvm.experimental.constrained.sitofp.f128.i128(i128, metadata, metadata)
1073declare fp128 @llvm.experimental.constrained.uitofp.f128.i8(i8, metadata, metadata)
1074declare fp128 @llvm.experimental.constrained.uitofp.f128.i16(i16, metadata, metadata)
1075declare fp128 @llvm.experimental.constrained.uitofp.f128.i32(i32, metadata, metadata)
1076declare fp128 @llvm.experimental.constrained.uitofp.f128.i64(i64, metadata, metadata)
1077declare fp128 @llvm.experimental.constrained.uitofp.f128.i128(i128, metadata, metadata)
1078