xref: /llvm-project/llvm/test/CodeGen/X86/fp-strict-scalar-inttofp.ll (revision f0dd12ec5c0169ba5b4363b62d59511181cf954a)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=SSE-X86
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=SSE-X64
4; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=AVX-X86,AVX1-X86
5; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=AVX-X64,AVX1-X64
6; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=AVX-X86,AVX512-X86
7; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=AVX-X64,AVX512-X64
8; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=-sse -O3 | FileCheck %s --check-prefixes=X87
9
10declare float @llvm.experimental.constrained.sitofp.f32.i1(i1, metadata, metadata)
11declare float @llvm.experimental.constrained.sitofp.f32.i8(i8, metadata, metadata)
12declare float @llvm.experimental.constrained.sitofp.f32.i16(i16, metadata, metadata)
13declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
14declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata)
15declare float @llvm.experimental.constrained.uitofp.f32.i1(i1, metadata, metadata)
16declare float @llvm.experimental.constrained.uitofp.f32.i8(i8, metadata, metadata)
17declare float @llvm.experimental.constrained.uitofp.f32.i16(i16, metadata, metadata)
18declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
19declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata)
20
21declare double @llvm.experimental.constrained.sitofp.f64.i1(i1, metadata, metadata)
22declare double @llvm.experimental.constrained.sitofp.f64.i8(i8, metadata, metadata)
23declare double @llvm.experimental.constrained.sitofp.f64.i16(i16, metadata, metadata)
24declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
25declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata)
26declare double @llvm.experimental.constrained.uitofp.f64.i1(i1, metadata, metadata)
27declare double @llvm.experimental.constrained.uitofp.f64.i8(i8, metadata, metadata)
28declare double @llvm.experimental.constrained.uitofp.f64.i16(i16, metadata, metadata)
29declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
30declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
31
32define float @sitofp_i1tof32(i1 %x) #0 {
33; SSE-X86-LABEL: sitofp_i1tof32:
34; SSE-X86:       # %bb.0:
35; SSE-X86-NEXT:    pushl %eax
36; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
37; SSE-X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
38; SSE-X86-NEXT:    andb $1, %al
39; SSE-X86-NEXT:    negb %al
40; SSE-X86-NEXT:    movsbl %al, %eax
41; SSE-X86-NEXT:    cvtsi2ss %eax, %xmm0
42; SSE-X86-NEXT:    movss %xmm0, (%esp)
43; SSE-X86-NEXT:    flds (%esp)
44; SSE-X86-NEXT:    wait
45; SSE-X86-NEXT:    popl %eax
46; SSE-X86-NEXT:    .cfi_def_cfa_offset 4
47; SSE-X86-NEXT:    retl
48;
49; SSE-X64-LABEL: sitofp_i1tof32:
50; SSE-X64:       # %bb.0:
51; SSE-X64-NEXT:    andb $1, %dil
52; SSE-X64-NEXT:    negb %dil
53; SSE-X64-NEXT:    movsbl %dil, %eax
54; SSE-X64-NEXT:    cvtsi2ss %eax, %xmm0
55; SSE-X64-NEXT:    retq
56;
57; AVX-X86-LABEL: sitofp_i1tof32:
58; AVX-X86:       # %bb.0:
59; AVX-X86-NEXT:    pushl %eax
60; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
61; AVX-X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
62; AVX-X86-NEXT:    andb $1, %al
63; AVX-X86-NEXT:    negb %al
64; AVX-X86-NEXT:    movsbl %al, %eax
65; AVX-X86-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
66; AVX-X86-NEXT:    vmovss %xmm0, (%esp)
67; AVX-X86-NEXT:    flds (%esp)
68; AVX-X86-NEXT:    wait
69; AVX-X86-NEXT:    popl %eax
70; AVX-X86-NEXT:    .cfi_def_cfa_offset 4
71; AVX-X86-NEXT:    retl
72;
73; AVX-X64-LABEL: sitofp_i1tof32:
74; AVX-X64:       # %bb.0:
75; AVX-X64-NEXT:    andb $1, %dil
76; AVX-X64-NEXT:    negb %dil
77; AVX-X64-NEXT:    movsbl %dil, %eax
78; AVX-X64-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
79; AVX-X64-NEXT:    retq
80;
81; X87-LABEL: sitofp_i1tof32:
82; X87:       # %bb.0:
83; X87-NEXT:    pushl %eax
84; X87-NEXT:    .cfi_def_cfa_offset 8
85; X87-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
86; X87-NEXT:    andb $1, %al
87; X87-NEXT:    negb %al
88; X87-NEXT:    movsbl %al, %eax
89; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
90; X87-NEXT:    filds {{[0-9]+}}(%esp)
91; X87-NEXT:    wait
92; X87-NEXT:    popl %eax
93; X87-NEXT:    .cfi_def_cfa_offset 4
94; X87-NEXT:    retl
95  %result = call float @llvm.experimental.constrained.sitofp.f32.i1(i1 %x,
96                                               metadata !"round.dynamic",
97                                               metadata !"fpexcept.strict") #0
98  ret float %result
99}
100
101define float @sitofp_i8tof32(i8 %x) #0 {
102; SSE-X86-LABEL: sitofp_i8tof32:
103; SSE-X86:       # %bb.0:
104; SSE-X86-NEXT:    pushl %eax
105; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
106; SSE-X86-NEXT:    movsbl {{[0-9]+}}(%esp), %eax
107; SSE-X86-NEXT:    cvtsi2ss %eax, %xmm0
108; SSE-X86-NEXT:    movss %xmm0, (%esp)
109; SSE-X86-NEXT:    flds (%esp)
110; SSE-X86-NEXT:    wait
111; SSE-X86-NEXT:    popl %eax
112; SSE-X86-NEXT:    .cfi_def_cfa_offset 4
113; SSE-X86-NEXT:    retl
114;
115; SSE-X64-LABEL: sitofp_i8tof32:
116; SSE-X64:       # %bb.0:
117; SSE-X64-NEXT:    movsbl %dil, %eax
118; SSE-X64-NEXT:    cvtsi2ss %eax, %xmm0
119; SSE-X64-NEXT:    retq
120;
121; AVX-X86-LABEL: sitofp_i8tof32:
122; AVX-X86:       # %bb.0:
123; AVX-X86-NEXT:    pushl %eax
124; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
125; AVX-X86-NEXT:    movsbl {{[0-9]+}}(%esp), %eax
126; AVX-X86-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
127; AVX-X86-NEXT:    vmovss %xmm0, (%esp)
128; AVX-X86-NEXT:    flds (%esp)
129; AVX-X86-NEXT:    wait
130; AVX-X86-NEXT:    popl %eax
131; AVX-X86-NEXT:    .cfi_def_cfa_offset 4
132; AVX-X86-NEXT:    retl
133;
134; AVX-X64-LABEL: sitofp_i8tof32:
135; AVX-X64:       # %bb.0:
136; AVX-X64-NEXT:    movsbl %dil, %eax
137; AVX-X64-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
138; AVX-X64-NEXT:    retq
139;
140; X87-LABEL: sitofp_i8tof32:
141; X87:       # %bb.0:
142; X87-NEXT:    pushl %eax
143; X87-NEXT:    .cfi_def_cfa_offset 8
144; X87-NEXT:    movsbl {{[0-9]+}}(%esp), %eax
145; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
146; X87-NEXT:    filds {{[0-9]+}}(%esp)
147; X87-NEXT:    wait
148; X87-NEXT:    popl %eax
149; X87-NEXT:    .cfi_def_cfa_offset 4
150; X87-NEXT:    retl
151  %result = call float @llvm.experimental.constrained.sitofp.f32.i8(i8 %x,
152                                               metadata !"round.dynamic",
153                                               metadata !"fpexcept.strict") #0
154  ret float %result
155}
156
157define float @sitofp_i16tof32(i16 %x) #0 {
158; SSE-X86-LABEL: sitofp_i16tof32:
159; SSE-X86:       # %bb.0:
160; SSE-X86-NEXT:    pushl %eax
161; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
162; SSE-X86-NEXT:    movswl {{[0-9]+}}(%esp), %eax
163; SSE-X86-NEXT:    cvtsi2ss %eax, %xmm0
164; SSE-X86-NEXT:    movss %xmm0, (%esp)
165; SSE-X86-NEXT:    flds (%esp)
166; SSE-X86-NEXT:    wait
167; SSE-X86-NEXT:    popl %eax
168; SSE-X86-NEXT:    .cfi_def_cfa_offset 4
169; SSE-X86-NEXT:    retl
170;
171; SSE-X64-LABEL: sitofp_i16tof32:
172; SSE-X64:       # %bb.0:
173; SSE-X64-NEXT:    movswl %di, %eax
174; SSE-X64-NEXT:    cvtsi2ss %eax, %xmm0
175; SSE-X64-NEXT:    retq
176;
177; AVX-X86-LABEL: sitofp_i16tof32:
178; AVX-X86:       # %bb.0:
179; AVX-X86-NEXT:    pushl %eax
180; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
181; AVX-X86-NEXT:    movswl {{[0-9]+}}(%esp), %eax
182; AVX-X86-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
183; AVX-X86-NEXT:    vmovss %xmm0, (%esp)
184; AVX-X86-NEXT:    flds (%esp)
185; AVX-X86-NEXT:    wait
186; AVX-X86-NEXT:    popl %eax
187; AVX-X86-NEXT:    .cfi_def_cfa_offset 4
188; AVX-X86-NEXT:    retl
189;
190; AVX-X64-LABEL: sitofp_i16tof32:
191; AVX-X64:       # %bb.0:
192; AVX-X64-NEXT:    movswl %di, %eax
193; AVX-X64-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
194; AVX-X64-NEXT:    retq
195;
196; X87-LABEL: sitofp_i16tof32:
197; X87:       # %bb.0:
198; X87-NEXT:    pushl %eax
199; X87-NEXT:    .cfi_def_cfa_offset 8
200; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
201; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
202; X87-NEXT:    filds {{[0-9]+}}(%esp)
203; X87-NEXT:    wait
204; X87-NEXT:    popl %eax
205; X87-NEXT:    .cfi_def_cfa_offset 4
206; X87-NEXT:    retl
207  %result = call float @llvm.experimental.constrained.sitofp.f32.i16(i16 %x,
208                                               metadata !"round.dynamic",
209                                               metadata !"fpexcept.strict") #0
210  ret float %result
211}
212
213define float @sitofp_i32tof32(i32 %x) #0 {
214; SSE-X86-LABEL: sitofp_i32tof32:
215; SSE-X86:       # %bb.0:
216; SSE-X86-NEXT:    pushl %eax
217; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
218; SSE-X86-NEXT:    cvtsi2ssl {{[0-9]+}}(%esp), %xmm0
219; SSE-X86-NEXT:    movss %xmm0, (%esp)
220; SSE-X86-NEXT:    flds (%esp)
221; SSE-X86-NEXT:    wait
222; SSE-X86-NEXT:    popl %eax
223; SSE-X86-NEXT:    .cfi_def_cfa_offset 4
224; SSE-X86-NEXT:    retl
225;
226; SSE-X64-LABEL: sitofp_i32tof32:
227; SSE-X64:       # %bb.0:
228; SSE-X64-NEXT:    cvtsi2ss %edi, %xmm0
229; SSE-X64-NEXT:    retq
230;
231; AVX-X86-LABEL: sitofp_i32tof32:
232; AVX-X86:       # %bb.0:
233; AVX-X86-NEXT:    pushl %eax
234; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
235; AVX-X86-NEXT:    vcvtsi2ssl {{[0-9]+}}(%esp), %xmm0, %xmm0
236; AVX-X86-NEXT:    vmovss %xmm0, (%esp)
237; AVX-X86-NEXT:    flds (%esp)
238; AVX-X86-NEXT:    wait
239; AVX-X86-NEXT:    popl %eax
240; AVX-X86-NEXT:    .cfi_def_cfa_offset 4
241; AVX-X86-NEXT:    retl
242;
243; AVX-X64-LABEL: sitofp_i32tof32:
244; AVX-X64:       # %bb.0:
245; AVX-X64-NEXT:    vcvtsi2ss %edi, %xmm0, %xmm0
246; AVX-X64-NEXT:    retq
247;
248; X87-LABEL: sitofp_i32tof32:
249; X87:       # %bb.0:
250; X87-NEXT:    pushl %eax
251; X87-NEXT:    .cfi_def_cfa_offset 8
252; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
253; X87-NEXT:    movl %eax, (%esp)
254; X87-NEXT:    fildl (%esp)
255; X87-NEXT:    wait
256; X87-NEXT:    popl %eax
257; X87-NEXT:    .cfi_def_cfa_offset 4
258; X87-NEXT:    retl
259  %result = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %x,
260                                               metadata !"round.dynamic",
261                                               metadata !"fpexcept.strict") #0
262  ret float %result
263}
264
265define float @sitofp_i64tof32(i64 %x) #0 {
266; SSE-X86-LABEL: sitofp_i64tof32:
267; SSE-X86:       # %bb.0:
268; SSE-X86-NEXT:    pushl %eax
269; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
270; SSE-X86-NEXT:    fildll {{[0-9]+}}(%esp)
271; SSE-X86-NEXT:    fstps (%esp)
272; SSE-X86-NEXT:    flds (%esp)
273; SSE-X86-NEXT:    wait
274; SSE-X86-NEXT:    popl %eax
275; SSE-X86-NEXT:    .cfi_def_cfa_offset 4
276; SSE-X86-NEXT:    retl
277;
278; SSE-X64-LABEL: sitofp_i64tof32:
279; SSE-X64:       # %bb.0:
280; SSE-X64-NEXT:    cvtsi2ss %rdi, %xmm0
281; SSE-X64-NEXT:    retq
282;
283; AVX-X86-LABEL: sitofp_i64tof32:
284; AVX-X86:       # %bb.0:
285; AVX-X86-NEXT:    pushl %eax
286; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
287; AVX-X86-NEXT:    fildll {{[0-9]+}}(%esp)
288; AVX-X86-NEXT:    fstps (%esp)
289; AVX-X86-NEXT:    flds (%esp)
290; AVX-X86-NEXT:    wait
291; AVX-X86-NEXT:    popl %eax
292; AVX-X86-NEXT:    .cfi_def_cfa_offset 4
293; AVX-X86-NEXT:    retl
294;
295; AVX-X64-LABEL: sitofp_i64tof32:
296; AVX-X64:       # %bb.0:
297; AVX-X64-NEXT:    vcvtsi2ss %rdi, %xmm0, %xmm0
298; AVX-X64-NEXT:    retq
299;
300; X87-LABEL: sitofp_i64tof32:
301; X87:       # %bb.0:
302; X87-NEXT:    fildll {{[0-9]+}}(%esp)
303; X87-NEXT:    wait
304; X87-NEXT:    retl
305  %result = call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %x,
306                                               metadata !"round.dynamic",
307                                               metadata !"fpexcept.strict") #0
308  ret float %result
309}
310
311define float @uitofp_i1tof32(i1 %x) #0 {
312; SSE-X86-LABEL: uitofp_i1tof32:
313; SSE-X86:       # %bb.0:
314; SSE-X86-NEXT:    pushl %eax
315; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
316; SSE-X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
317; SSE-X86-NEXT:    andb $1, %al
318; SSE-X86-NEXT:    movzbl %al, %eax
319; SSE-X86-NEXT:    cvtsi2ss %eax, %xmm0
320; SSE-X86-NEXT:    movss %xmm0, (%esp)
321; SSE-X86-NEXT:    flds (%esp)
322; SSE-X86-NEXT:    wait
323; SSE-X86-NEXT:    popl %eax
324; SSE-X86-NEXT:    .cfi_def_cfa_offset 4
325; SSE-X86-NEXT:    retl
326;
327; SSE-X64-LABEL: uitofp_i1tof32:
328; SSE-X64:       # %bb.0:
329; SSE-X64-NEXT:    andl $1, %edi
330; SSE-X64-NEXT:    cvtsi2ss %edi, %xmm0
331; SSE-X64-NEXT:    retq
332;
333; AVX-X86-LABEL: uitofp_i1tof32:
334; AVX-X86:       # %bb.0:
335; AVX-X86-NEXT:    pushl %eax
336; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
337; AVX-X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
338; AVX-X86-NEXT:    andb $1, %al
339; AVX-X86-NEXT:    movzbl %al, %eax
340; AVX-X86-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
341; AVX-X86-NEXT:    vmovss %xmm0, (%esp)
342; AVX-X86-NEXT:    flds (%esp)
343; AVX-X86-NEXT:    wait
344; AVX-X86-NEXT:    popl %eax
345; AVX-X86-NEXT:    .cfi_def_cfa_offset 4
346; AVX-X86-NEXT:    retl
347;
348; AVX-X64-LABEL: uitofp_i1tof32:
349; AVX-X64:       # %bb.0:
350; AVX-X64-NEXT:    andl $1, %edi
351; AVX-X64-NEXT:    vcvtsi2ss %edi, %xmm0, %xmm0
352; AVX-X64-NEXT:    retq
353;
354; X87-LABEL: uitofp_i1tof32:
355; X87:       # %bb.0:
356; X87-NEXT:    pushl %eax
357; X87-NEXT:    .cfi_def_cfa_offset 8
358; X87-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
359; X87-NEXT:    andb $1, %al
360; X87-NEXT:    movzbl %al, %eax
361; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
362; X87-NEXT:    filds {{[0-9]+}}(%esp)
363; X87-NEXT:    wait
364; X87-NEXT:    popl %eax
365; X87-NEXT:    .cfi_def_cfa_offset 4
366; X87-NEXT:    retl
367  %result = call float @llvm.experimental.constrained.uitofp.f32.i1(i1 %x,
368                                               metadata !"round.dynamic",
369                                               metadata !"fpexcept.strict") #0
370  ret float %result
371}
372
373define float @uitofp_i8tof32(i8 %x) #0 {
374; SSE-X86-LABEL: uitofp_i8tof32:
375; SSE-X86:       # %bb.0:
376; SSE-X86-NEXT:    pushl %eax
377; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
378; SSE-X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
379; SSE-X86-NEXT:    cvtsi2ss %eax, %xmm0
380; SSE-X86-NEXT:    movss %xmm0, (%esp)
381; SSE-X86-NEXT:    flds (%esp)
382; SSE-X86-NEXT:    wait
383; SSE-X86-NEXT:    popl %eax
384; SSE-X86-NEXT:    .cfi_def_cfa_offset 4
385; SSE-X86-NEXT:    retl
386;
387; SSE-X64-LABEL: uitofp_i8tof32:
388; SSE-X64:       # %bb.0:
389; SSE-X64-NEXT:    movzbl %dil, %eax
390; SSE-X64-NEXT:    cvtsi2ss %eax, %xmm0
391; SSE-X64-NEXT:    retq
392;
393; AVX-X86-LABEL: uitofp_i8tof32:
394; AVX-X86:       # %bb.0:
395; AVX-X86-NEXT:    pushl %eax
396; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
397; AVX-X86-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
398; AVX-X86-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
399; AVX-X86-NEXT:    vmovss %xmm0, (%esp)
400; AVX-X86-NEXT:    flds (%esp)
401; AVX-X86-NEXT:    wait
402; AVX-X86-NEXT:    popl %eax
403; AVX-X86-NEXT:    .cfi_def_cfa_offset 4
404; AVX-X86-NEXT:    retl
405;
406; AVX-X64-LABEL: uitofp_i8tof32:
407; AVX-X64:       # %bb.0:
408; AVX-X64-NEXT:    movzbl %dil, %eax
409; AVX-X64-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
410; AVX-X64-NEXT:    retq
411;
412; X87-LABEL: uitofp_i8tof32:
413; X87:       # %bb.0:
414; X87-NEXT:    pushl %eax
415; X87-NEXT:    .cfi_def_cfa_offset 8
416; X87-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
417; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
418; X87-NEXT:    filds {{[0-9]+}}(%esp)
419; X87-NEXT:    wait
420; X87-NEXT:    popl %eax
421; X87-NEXT:    .cfi_def_cfa_offset 4
422; X87-NEXT:    retl
423  %result = call float @llvm.experimental.constrained.uitofp.f32.i8(i8 %x,
424                                               metadata !"round.dynamic",
425                                               metadata !"fpexcept.strict") #0
426  ret float %result
427}
428
429define float @uitofp_i16tof32(i16 %x) #0 {
430; SSE-X86-LABEL: uitofp_i16tof32:
431; SSE-X86:       # %bb.0:
432; SSE-X86-NEXT:    pushl %eax
433; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
434; SSE-X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
435; SSE-X86-NEXT:    cvtsi2ss %eax, %xmm0
436; SSE-X86-NEXT:    movss %xmm0, (%esp)
437; SSE-X86-NEXT:    flds (%esp)
438; SSE-X86-NEXT:    wait
439; SSE-X86-NEXT:    popl %eax
440; SSE-X86-NEXT:    .cfi_def_cfa_offset 4
441; SSE-X86-NEXT:    retl
442;
443; SSE-X64-LABEL: uitofp_i16tof32:
444; SSE-X64:       # %bb.0:
445; SSE-X64-NEXT:    movzwl %di, %eax
446; SSE-X64-NEXT:    cvtsi2ss %eax, %xmm0
447; SSE-X64-NEXT:    retq
448;
449; AVX-X86-LABEL: uitofp_i16tof32:
450; AVX-X86:       # %bb.0:
451; AVX-X86-NEXT:    pushl %eax
452; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
453; AVX-X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
454; AVX-X86-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
455; AVX-X86-NEXT:    vmovss %xmm0, (%esp)
456; AVX-X86-NEXT:    flds (%esp)
457; AVX-X86-NEXT:    wait
458; AVX-X86-NEXT:    popl %eax
459; AVX-X86-NEXT:    .cfi_def_cfa_offset 4
460; AVX-X86-NEXT:    retl
461;
462; AVX-X64-LABEL: uitofp_i16tof32:
463; AVX-X64:       # %bb.0:
464; AVX-X64-NEXT:    movzwl %di, %eax
465; AVX-X64-NEXT:    vcvtsi2ss %eax, %xmm0, %xmm0
466; AVX-X64-NEXT:    retq
467;
468; X87-LABEL: uitofp_i16tof32:
469; X87:       # %bb.0:
470; X87-NEXT:    pushl %eax
471; X87-NEXT:    .cfi_def_cfa_offset 8
472; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
473; X87-NEXT:    movl %eax, (%esp)
474; X87-NEXT:    fildl (%esp)
475; X87-NEXT:    wait
476; X87-NEXT:    popl %eax
477; X87-NEXT:    .cfi_def_cfa_offset 4
478; X87-NEXT:    retl
479  %result = call float @llvm.experimental.constrained.uitofp.f32.i16(i16 %x,
480                                               metadata !"round.dynamic",
481                                               metadata !"fpexcept.strict") #0
482  ret float %result
483}
484
485define float @uitofp_i32tof32(i32 %x) #0 {
486; SSE-X86-LABEL: uitofp_i32tof32:
487; SSE-X86:       # %bb.0:
488; SSE-X86-NEXT:    pushl %ebp
489; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
490; SSE-X86-NEXT:    .cfi_offset %ebp, -8
491; SSE-X86-NEXT:    movl %esp, %ebp
492; SSE-X86-NEXT:    .cfi_def_cfa_register %ebp
493; SSE-X86-NEXT:    andl $-8, %esp
494; SSE-X86-NEXT:    subl $16, %esp
495; SSE-X86-NEXT:    movl 8(%ebp), %eax
496; SSE-X86-NEXT:    movl %eax, {{[0-9]+}}(%esp)
497; SSE-X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
498; SSE-X86-NEXT:    fildll {{[0-9]+}}(%esp)
499; SSE-X86-NEXT:    fstps {{[0-9]+}}(%esp)
500; SSE-X86-NEXT:    flds {{[0-9]+}}(%esp)
501; SSE-X86-NEXT:    wait
502; SSE-X86-NEXT:    movl %ebp, %esp
503; SSE-X86-NEXT:    popl %ebp
504; SSE-X86-NEXT:    .cfi_def_cfa %esp, 4
505; SSE-X86-NEXT:    retl
506;
507; SSE-X64-LABEL: uitofp_i32tof32:
508; SSE-X64:       # %bb.0:
509; SSE-X64-NEXT:    movl %edi, %eax
510; SSE-X64-NEXT:    cvtsi2ss %rax, %xmm0
511; SSE-X64-NEXT:    retq
512;
513; AVX1-X86-LABEL: uitofp_i32tof32:
514; AVX1-X86:       # %bb.0:
515; AVX1-X86-NEXT:    pushl %ebp
516; AVX1-X86-NEXT:    .cfi_def_cfa_offset 8
517; AVX1-X86-NEXT:    .cfi_offset %ebp, -8
518; AVX1-X86-NEXT:    movl %esp, %ebp
519; AVX1-X86-NEXT:    .cfi_def_cfa_register %ebp
520; AVX1-X86-NEXT:    andl $-8, %esp
521; AVX1-X86-NEXT:    subl $16, %esp
522; AVX1-X86-NEXT:    movl 8(%ebp), %eax
523; AVX1-X86-NEXT:    movl %eax, {{[0-9]+}}(%esp)
524; AVX1-X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
525; AVX1-X86-NEXT:    fildll {{[0-9]+}}(%esp)
526; AVX1-X86-NEXT:    fstps {{[0-9]+}}(%esp)
527; AVX1-X86-NEXT:    flds {{[0-9]+}}(%esp)
528; AVX1-X86-NEXT:    wait
529; AVX1-X86-NEXT:    movl %ebp, %esp
530; AVX1-X86-NEXT:    popl %ebp
531; AVX1-X86-NEXT:    .cfi_def_cfa %esp, 4
532; AVX1-X86-NEXT:    retl
533;
534; AVX1-X64-LABEL: uitofp_i32tof32:
535; AVX1-X64:       # %bb.0:
536; AVX1-X64-NEXT:    movl %edi, %eax
537; AVX1-X64-NEXT:    vcvtsi2ss %rax, %xmm0, %xmm0
538; AVX1-X64-NEXT:    retq
539;
540; AVX512-X86-LABEL: uitofp_i32tof32:
541; AVX512-X86:       # %bb.0:
542; AVX512-X86-NEXT:    pushl %eax
543; AVX512-X86-NEXT:    .cfi_def_cfa_offset 8
544; AVX512-X86-NEXT:    vcvtusi2ssl {{[0-9]+}}(%esp), %xmm0, %xmm0
545; AVX512-X86-NEXT:    vmovss %xmm0, (%esp)
546; AVX512-X86-NEXT:    flds (%esp)
547; AVX512-X86-NEXT:    wait
548; AVX512-X86-NEXT:    popl %eax
549; AVX512-X86-NEXT:    .cfi_def_cfa_offset 4
550; AVX512-X86-NEXT:    retl
551;
552; AVX512-X64-LABEL: uitofp_i32tof32:
553; AVX512-X64:       # %bb.0:
554; AVX512-X64-NEXT:    vcvtusi2ss %edi, %xmm0, %xmm0
555; AVX512-X64-NEXT:    retq
556;
557; X87-LABEL: uitofp_i32tof32:
558; X87:       # %bb.0:
559; X87-NEXT:    pushl %ebp
560; X87-NEXT:    .cfi_def_cfa_offset 8
561; X87-NEXT:    .cfi_offset %ebp, -8
562; X87-NEXT:    movl %esp, %ebp
563; X87-NEXT:    .cfi_def_cfa_register %ebp
564; X87-NEXT:    andl $-8, %esp
565; X87-NEXT:    subl $8, %esp
566; X87-NEXT:    movl 8(%ebp), %eax
567; X87-NEXT:    movl %eax, (%esp)
568; X87-NEXT:    movl $0, {{[0-9]+}}(%esp)
569; X87-NEXT:    fildll (%esp)
570; X87-NEXT:    wait
571; X87-NEXT:    movl %ebp, %esp
572; X87-NEXT:    popl %ebp
573; X87-NEXT:    .cfi_def_cfa %esp, 4
574; X87-NEXT:    retl
575  %result = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %x,
576                                               metadata !"round.dynamic",
577                                               metadata !"fpexcept.strict") #0
578  ret float %result
579}
580
581define float @uitofp_i64tof32(i64 %x) #0 {
582; SSE-X86-LABEL: uitofp_i64tof32:
583; SSE-X86:       # %bb.0:
584; SSE-X86-NEXT:    pushl %ebp
585; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
586; SSE-X86-NEXT:    .cfi_offset %ebp, -8
587; SSE-X86-NEXT:    movl %esp, %ebp
588; SSE-X86-NEXT:    .cfi_def_cfa_register %ebp
589; SSE-X86-NEXT:    andl $-8, %esp
590; SSE-X86-NEXT:    subl $16, %esp
591; SSE-X86-NEXT:    movl 12(%ebp), %eax
592; SSE-X86-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
593; SSE-X86-NEXT:    movlps %xmm0, {{[0-9]+}}(%esp)
594; SSE-X86-NEXT:    shrl $31, %eax
595; SSE-X86-NEXT:    fildll {{[0-9]+}}(%esp)
596; SSE-X86-NEXT:    fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%eax,4)
597; SSE-X86-NEXT:    fstps {{[0-9]+}}(%esp)
598; SSE-X86-NEXT:    wait
599; SSE-X86-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
600; SSE-X86-NEXT:    movss %xmm0, (%esp)
601; SSE-X86-NEXT:    flds (%esp)
602; SSE-X86-NEXT:    wait
603; SSE-X86-NEXT:    movl %ebp, %esp
604; SSE-X86-NEXT:    popl %ebp
605; SSE-X86-NEXT:    .cfi_def_cfa %esp, 4
606; SSE-X86-NEXT:    retl
607;
608; SSE-X64-LABEL: uitofp_i64tof32:
609; SSE-X64:       # %bb.0:
610; SSE-X64-NEXT:    movq %rdi, %rax
611; SSE-X64-NEXT:    shrq %rax
612; SSE-X64-NEXT:    movl %edi, %ecx
613; SSE-X64-NEXT:    andl $1, %ecx
614; SSE-X64-NEXT:    orq %rax, %rcx
615; SSE-X64-NEXT:    testq %rdi, %rdi
616; SSE-X64-NEXT:    cmovnsq %rdi, %rcx
617; SSE-X64-NEXT:    cvtsi2ss %rcx, %xmm0
618; SSE-X64-NEXT:    jns .LBB9_2
619; SSE-X64-NEXT:  # %bb.1:
620; SSE-X64-NEXT:    addss %xmm0, %xmm0
621; SSE-X64-NEXT:  .LBB9_2:
622; SSE-X64-NEXT:    retq
623;
624; AVX-X86-LABEL: uitofp_i64tof32:
625; AVX-X86:       # %bb.0:
626; AVX-X86-NEXT:    pushl %ebp
627; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
628; AVX-X86-NEXT:    .cfi_offset %ebp, -8
629; AVX-X86-NEXT:    movl %esp, %ebp
630; AVX-X86-NEXT:    .cfi_def_cfa_register %ebp
631; AVX-X86-NEXT:    andl $-8, %esp
632; AVX-X86-NEXT:    subl $16, %esp
633; AVX-X86-NEXT:    movl 12(%ebp), %eax
634; AVX-X86-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
635; AVX-X86-NEXT:    vmovlps %xmm0, {{[0-9]+}}(%esp)
636; AVX-X86-NEXT:    shrl $31, %eax
637; AVX-X86-NEXT:    fildll {{[0-9]+}}(%esp)
638; AVX-X86-NEXT:    fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%eax,4)
639; AVX-X86-NEXT:    fstps {{[0-9]+}}(%esp)
640; AVX-X86-NEXT:    wait
641; AVX-X86-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
642; AVX-X86-NEXT:    vmovss %xmm0, (%esp)
643; AVX-X86-NEXT:    flds (%esp)
644; AVX-X86-NEXT:    wait
645; AVX-X86-NEXT:    movl %ebp, %esp
646; AVX-X86-NEXT:    popl %ebp
647; AVX-X86-NEXT:    .cfi_def_cfa %esp, 4
648; AVX-X86-NEXT:    retl
649;
650; AVX1-X64-LABEL: uitofp_i64tof32:
651; AVX1-X64:       # %bb.0:
652; AVX1-X64-NEXT:    movq %rdi, %rax
653; AVX1-X64-NEXT:    shrq %rax
654; AVX1-X64-NEXT:    movl %edi, %ecx
655; AVX1-X64-NEXT:    andl $1, %ecx
656; AVX1-X64-NEXT:    orq %rax, %rcx
657; AVX1-X64-NEXT:    testq %rdi, %rdi
658; AVX1-X64-NEXT:    cmovnsq %rdi, %rcx
659; AVX1-X64-NEXT:    vcvtsi2ss %rcx, %xmm0, %xmm0
660; AVX1-X64-NEXT:    jns .LBB9_2
661; AVX1-X64-NEXT:  # %bb.1:
662; AVX1-X64-NEXT:    vaddss %xmm0, %xmm0, %xmm0
663; AVX1-X64-NEXT:  .LBB9_2:
664; AVX1-X64-NEXT:    retq
665;
666; AVX512-X64-LABEL: uitofp_i64tof32:
667; AVX512-X64:       # %bb.0:
668; AVX512-X64-NEXT:    vcvtusi2ss %rdi, %xmm0, %xmm0
669; AVX512-X64-NEXT:    retq
670;
671; X87-LABEL: uitofp_i64tof32:
672; X87:       # %bb.0:
673; X87-NEXT:    pushl %ebp
674; X87-NEXT:    .cfi_def_cfa_offset 8
675; X87-NEXT:    .cfi_offset %ebp, -8
676; X87-NEXT:    movl %esp, %ebp
677; X87-NEXT:    .cfi_def_cfa_register %ebp
678; X87-NEXT:    andl $-8, %esp
679; X87-NEXT:    subl $16, %esp
680; X87-NEXT:    movl 8(%ebp), %eax
681; X87-NEXT:    movl 12(%ebp), %ecx
682; X87-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
683; X87-NEXT:    movl %eax, {{[0-9]+}}(%esp)
684; X87-NEXT:    shrl $31, %ecx
685; X87-NEXT:    fildll {{[0-9]+}}(%esp)
686; X87-NEXT:    fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%ecx,4)
687; X87-NEXT:    fstps {{[0-9]+}}(%esp)
688; X87-NEXT:    flds {{[0-9]+}}(%esp)
689; X87-NEXT:    wait
690; X87-NEXT:    movl %ebp, %esp
691; X87-NEXT:    popl %ebp
692; X87-NEXT:    .cfi_def_cfa %esp, 4
693; X87-NEXT:    retl
694  %result = call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %x,
695                                               metadata !"round.dynamic",
696                                               metadata !"fpexcept.strict") #0
697  ret float %result
698}
699
700define double @sitofp_i8tof64(i8 %x) #0 {
701; SSE-X86-LABEL: sitofp_i8tof64:
702; SSE-X86:       # %bb.0:
703; SSE-X86-NEXT:    pushl %ebp
704; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
705; SSE-X86-NEXT:    .cfi_offset %ebp, -8
706; SSE-X86-NEXT:    movl %esp, %ebp
707; SSE-X86-NEXT:    .cfi_def_cfa_register %ebp
708; SSE-X86-NEXT:    andl $-8, %esp
709; SSE-X86-NEXT:    subl $8, %esp
710; SSE-X86-NEXT:    movsbl 8(%ebp), %eax
711; SSE-X86-NEXT:    cvtsi2sd %eax, %xmm0
712; SSE-X86-NEXT:    movsd %xmm0, (%esp)
713; SSE-X86-NEXT:    fldl (%esp)
714; SSE-X86-NEXT:    wait
715; SSE-X86-NEXT:    movl %ebp, %esp
716; SSE-X86-NEXT:    popl %ebp
717; SSE-X86-NEXT:    .cfi_def_cfa %esp, 4
718; SSE-X86-NEXT:    retl
719;
720; SSE-X64-LABEL: sitofp_i8tof64:
721; SSE-X64:       # %bb.0:
722; SSE-X64-NEXT:    movsbl %dil, %eax
723; SSE-X64-NEXT:    cvtsi2sd %eax, %xmm0
724; SSE-X64-NEXT:    retq
725;
726; AVX-X86-LABEL: sitofp_i8tof64:
727; AVX-X86:       # %bb.0:
728; AVX-X86-NEXT:    pushl %ebp
729; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
730; AVX-X86-NEXT:    .cfi_offset %ebp, -8
731; AVX-X86-NEXT:    movl %esp, %ebp
732; AVX-X86-NEXT:    .cfi_def_cfa_register %ebp
733; AVX-X86-NEXT:    andl $-8, %esp
734; AVX-X86-NEXT:    subl $8, %esp
735; AVX-X86-NEXT:    movsbl 8(%ebp), %eax
736; AVX-X86-NEXT:    vcvtsi2sd %eax, %xmm0, %xmm0
737; AVX-X86-NEXT:    vmovsd %xmm0, (%esp)
738; AVX-X86-NEXT:    fldl (%esp)
739; AVX-X86-NEXT:    wait
740; AVX-X86-NEXT:    movl %ebp, %esp
741; AVX-X86-NEXT:    popl %ebp
742; AVX-X86-NEXT:    .cfi_def_cfa %esp, 4
743; AVX-X86-NEXT:    retl
744;
745; AVX-X64-LABEL: sitofp_i8tof64:
746; AVX-X64:       # %bb.0:
747; AVX-X64-NEXT:    movsbl %dil, %eax
748; AVX-X64-NEXT:    vcvtsi2sd %eax, %xmm0, %xmm0
749; AVX-X64-NEXT:    retq
750;
751; X87-LABEL: sitofp_i8tof64:
752; X87:       # %bb.0:
753; X87-NEXT:    pushl %eax
754; X87-NEXT:    .cfi_def_cfa_offset 8
755; X87-NEXT:    movsbl {{[0-9]+}}(%esp), %eax
756; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
757; X87-NEXT:    filds {{[0-9]+}}(%esp)
758; X87-NEXT:    wait
759; X87-NEXT:    popl %eax
760; X87-NEXT:    .cfi_def_cfa_offset 4
761; X87-NEXT:    retl
762  %result = call double @llvm.experimental.constrained.sitofp.f64.i8(i8 %x,
763                                               metadata !"round.dynamic",
764                                               metadata !"fpexcept.strict") #0
765  ret double %result
766}
767
768define double @sitofp_i16tof64(i16 %x) #0 {
769; SSE-X86-LABEL: sitofp_i16tof64:
770; SSE-X86:       # %bb.0:
771; SSE-X86-NEXT:    pushl %ebp
772; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
773; SSE-X86-NEXT:    .cfi_offset %ebp, -8
774; SSE-X86-NEXT:    movl %esp, %ebp
775; SSE-X86-NEXT:    .cfi_def_cfa_register %ebp
776; SSE-X86-NEXT:    andl $-8, %esp
777; SSE-X86-NEXT:    subl $8, %esp
778; SSE-X86-NEXT:    movswl 8(%ebp), %eax
779; SSE-X86-NEXT:    cvtsi2sd %eax, %xmm0
780; SSE-X86-NEXT:    movsd %xmm0, (%esp)
781; SSE-X86-NEXT:    fldl (%esp)
782; SSE-X86-NEXT:    wait
783; SSE-X86-NEXT:    movl %ebp, %esp
784; SSE-X86-NEXT:    popl %ebp
785; SSE-X86-NEXT:    .cfi_def_cfa %esp, 4
786; SSE-X86-NEXT:    retl
787;
788; SSE-X64-LABEL: sitofp_i16tof64:
789; SSE-X64:       # %bb.0:
790; SSE-X64-NEXT:    movswl %di, %eax
791; SSE-X64-NEXT:    cvtsi2sd %eax, %xmm0
792; SSE-X64-NEXT:    retq
793;
794; AVX-X86-LABEL: sitofp_i16tof64:
795; AVX-X86:       # %bb.0:
796; AVX-X86-NEXT:    pushl %ebp
797; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
798; AVX-X86-NEXT:    .cfi_offset %ebp, -8
799; AVX-X86-NEXT:    movl %esp, %ebp
800; AVX-X86-NEXT:    .cfi_def_cfa_register %ebp
801; AVX-X86-NEXT:    andl $-8, %esp
802; AVX-X86-NEXT:    subl $8, %esp
803; AVX-X86-NEXT:    movswl 8(%ebp), %eax
804; AVX-X86-NEXT:    vcvtsi2sd %eax, %xmm0, %xmm0
805; AVX-X86-NEXT:    vmovsd %xmm0, (%esp)
806; AVX-X86-NEXT:    fldl (%esp)
807; AVX-X86-NEXT:    wait
808; AVX-X86-NEXT:    movl %ebp, %esp
809; AVX-X86-NEXT:    popl %ebp
810; AVX-X86-NEXT:    .cfi_def_cfa %esp, 4
811; AVX-X86-NEXT:    retl
812;
813; AVX-X64-LABEL: sitofp_i16tof64:
814; AVX-X64:       # %bb.0:
815; AVX-X64-NEXT:    movswl %di, %eax
816; AVX-X64-NEXT:    vcvtsi2sd %eax, %xmm0, %xmm0
817; AVX-X64-NEXT:    retq
818;
819; X87-LABEL: sitofp_i16tof64:
820; X87:       # %bb.0:
821; X87-NEXT:    pushl %eax
822; X87-NEXT:    .cfi_def_cfa_offset 8
823; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
824; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
825; X87-NEXT:    filds {{[0-9]+}}(%esp)
826; X87-NEXT:    wait
827; X87-NEXT:    popl %eax
828; X87-NEXT:    .cfi_def_cfa_offset 4
829; X87-NEXT:    retl
830  %result = call double @llvm.experimental.constrained.sitofp.f64.i16(i16 %x,
831                                               metadata !"round.dynamic",
832                                               metadata !"fpexcept.strict") #0
833  ret double %result
834}
835
836define double @sitofp_i32tof64(i32 %x) #0 {
837; SSE-X86-LABEL: sitofp_i32tof64:
838; SSE-X86:       # %bb.0:
839; SSE-X86-NEXT:    pushl %ebp
840; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
841; SSE-X86-NEXT:    .cfi_offset %ebp, -8
842; SSE-X86-NEXT:    movl %esp, %ebp
843; SSE-X86-NEXT:    .cfi_def_cfa_register %ebp
844; SSE-X86-NEXT:    andl $-8, %esp
845; SSE-X86-NEXT:    subl $8, %esp
846; SSE-X86-NEXT:    cvtsi2sdl 8(%ebp), %xmm0
847; SSE-X86-NEXT:    movsd %xmm0, (%esp)
848; SSE-X86-NEXT:    fldl (%esp)
849; SSE-X86-NEXT:    wait
850; SSE-X86-NEXT:    movl %ebp, %esp
851; SSE-X86-NEXT:    popl %ebp
852; SSE-X86-NEXT:    .cfi_def_cfa %esp, 4
853; SSE-X86-NEXT:    retl
854;
855; SSE-X64-LABEL: sitofp_i32tof64:
856; SSE-X64:       # %bb.0:
857; SSE-X64-NEXT:    cvtsi2sd %edi, %xmm0
858; SSE-X64-NEXT:    retq
859;
860; AVX-X86-LABEL: sitofp_i32tof64:
861; AVX-X86:       # %bb.0:
862; AVX-X86-NEXT:    pushl %ebp
863; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
864; AVX-X86-NEXT:    .cfi_offset %ebp, -8
865; AVX-X86-NEXT:    movl %esp, %ebp
866; AVX-X86-NEXT:    .cfi_def_cfa_register %ebp
867; AVX-X86-NEXT:    andl $-8, %esp
868; AVX-X86-NEXT:    subl $8, %esp
869; AVX-X86-NEXT:    vcvtsi2sdl 8(%ebp), %xmm0, %xmm0
870; AVX-X86-NEXT:    vmovsd %xmm0, (%esp)
871; AVX-X86-NEXT:    fldl (%esp)
872; AVX-X86-NEXT:    wait
873; AVX-X86-NEXT:    movl %ebp, %esp
874; AVX-X86-NEXT:    popl %ebp
875; AVX-X86-NEXT:    .cfi_def_cfa %esp, 4
876; AVX-X86-NEXT:    retl
877;
878; AVX-X64-LABEL: sitofp_i32tof64:
879; AVX-X64:       # %bb.0:
880; AVX-X64-NEXT:    vcvtsi2sd %edi, %xmm0, %xmm0
881; AVX-X64-NEXT:    retq
882;
883; X87-LABEL: sitofp_i32tof64:
884; X87:       # %bb.0:
885; X87-NEXT:    pushl %eax
886; X87-NEXT:    .cfi_def_cfa_offset 8
887; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
888; X87-NEXT:    movl %eax, (%esp)
889; X87-NEXT:    fildl (%esp)
890; X87-NEXT:    wait
891; X87-NEXT:    popl %eax
892; X87-NEXT:    .cfi_def_cfa_offset 4
893; X87-NEXT:    retl
894  %result = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %x,
895                                               metadata !"round.dynamic",
896                                               metadata !"fpexcept.strict") #0
897  ret double %result
898}
899
900define double @sitofp_i64tof64(i64 %x) #0 {
901; SSE-X86-LABEL: sitofp_i64tof64:
902; SSE-X86:       # %bb.0:
903; SSE-X86-NEXT:    pushl %ebp
904; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
905; SSE-X86-NEXT:    .cfi_offset %ebp, -8
906; SSE-X86-NEXT:    movl %esp, %ebp
907; SSE-X86-NEXT:    .cfi_def_cfa_register %ebp
908; SSE-X86-NEXT:    andl $-8, %esp
909; SSE-X86-NEXT:    subl $8, %esp
910; SSE-X86-NEXT:    fildll 8(%ebp)
911; SSE-X86-NEXT:    fstpl (%esp)
912; SSE-X86-NEXT:    fldl (%esp)
913; SSE-X86-NEXT:    wait
914; SSE-X86-NEXT:    movl %ebp, %esp
915; SSE-X86-NEXT:    popl %ebp
916; SSE-X86-NEXT:    .cfi_def_cfa %esp, 4
917; SSE-X86-NEXT:    retl
918;
919; SSE-X64-LABEL: sitofp_i64tof64:
920; SSE-X64:       # %bb.0:
921; SSE-X64-NEXT:    cvtsi2sd %rdi, %xmm0
922; SSE-X64-NEXT:    retq
923;
924; AVX-X86-LABEL: sitofp_i64tof64:
925; AVX-X86:       # %bb.0:
926; AVX-X86-NEXT:    pushl %ebp
927; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
928; AVX-X86-NEXT:    .cfi_offset %ebp, -8
929; AVX-X86-NEXT:    movl %esp, %ebp
930; AVX-X86-NEXT:    .cfi_def_cfa_register %ebp
931; AVX-X86-NEXT:    andl $-8, %esp
932; AVX-X86-NEXT:    subl $8, %esp
933; AVX-X86-NEXT:    fildll 8(%ebp)
934; AVX-X86-NEXT:    fstpl (%esp)
935; AVX-X86-NEXT:    fldl (%esp)
936; AVX-X86-NEXT:    wait
937; AVX-X86-NEXT:    movl %ebp, %esp
938; AVX-X86-NEXT:    popl %ebp
939; AVX-X86-NEXT:    .cfi_def_cfa %esp, 4
940; AVX-X86-NEXT:    retl
941;
942; AVX-X64-LABEL: sitofp_i64tof64:
943; AVX-X64:       # %bb.0:
944; AVX-X64-NEXT:    vcvtsi2sd %rdi, %xmm0, %xmm0
945; AVX-X64-NEXT:    retq
946;
947; X87-LABEL: sitofp_i64tof64:
948; X87:       # %bb.0:
949; X87-NEXT:    fildll {{[0-9]+}}(%esp)
950; X87-NEXT:    wait
951; X87-NEXT:    retl
952  %result = call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %x,
953                                               metadata !"round.dynamic",
954                                               metadata !"fpexcept.strict") #0
955  ret double %result
956}
957
958define double @uitofp_i1tof64(i1 %x) #0 {
959; SSE-X86-LABEL: uitofp_i1tof64:
960; SSE-X86:       # %bb.0:
961; SSE-X86-NEXT:    pushl %ebp
962; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
963; SSE-X86-NEXT:    .cfi_offset %ebp, -8
964; SSE-X86-NEXT:    movl %esp, %ebp
965; SSE-X86-NEXT:    .cfi_def_cfa_register %ebp
966; SSE-X86-NEXT:    andl $-8, %esp
967; SSE-X86-NEXT:    subl $8, %esp
968; SSE-X86-NEXT:    movzbl 8(%ebp), %eax
969; SSE-X86-NEXT:    andb $1, %al
970; SSE-X86-NEXT:    movzbl %al, %eax
971; SSE-X86-NEXT:    cvtsi2sd %eax, %xmm0
972; SSE-X86-NEXT:    movsd %xmm0, (%esp)
973; SSE-X86-NEXT:    fldl (%esp)
974; SSE-X86-NEXT:    wait
975; SSE-X86-NEXT:    movl %ebp, %esp
976; SSE-X86-NEXT:    popl %ebp
977; SSE-X86-NEXT:    .cfi_def_cfa %esp, 4
978; SSE-X86-NEXT:    retl
979;
980; SSE-X64-LABEL: uitofp_i1tof64:
981; SSE-X64:       # %bb.0:
982; SSE-X64-NEXT:    andl $1, %edi
983; SSE-X64-NEXT:    cvtsi2sd %edi, %xmm0
984; SSE-X64-NEXT:    retq
985;
986; AVX-X86-LABEL: uitofp_i1tof64:
987; AVX-X86:       # %bb.0:
988; AVX-X86-NEXT:    pushl %ebp
989; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
990; AVX-X86-NEXT:    .cfi_offset %ebp, -8
991; AVX-X86-NEXT:    movl %esp, %ebp
992; AVX-X86-NEXT:    .cfi_def_cfa_register %ebp
993; AVX-X86-NEXT:    andl $-8, %esp
994; AVX-X86-NEXT:    subl $8, %esp
995; AVX-X86-NEXT:    movzbl 8(%ebp), %eax
996; AVX-X86-NEXT:    andb $1, %al
997; AVX-X86-NEXT:    movzbl %al, %eax
998; AVX-X86-NEXT:    vcvtsi2sd %eax, %xmm0, %xmm0
999; AVX-X86-NEXT:    vmovsd %xmm0, (%esp)
1000; AVX-X86-NEXT:    fldl (%esp)
1001; AVX-X86-NEXT:    wait
1002; AVX-X86-NEXT:    movl %ebp, %esp
1003; AVX-X86-NEXT:    popl %ebp
1004; AVX-X86-NEXT:    .cfi_def_cfa %esp, 4
1005; AVX-X86-NEXT:    retl
1006;
1007; AVX-X64-LABEL: uitofp_i1tof64:
1008; AVX-X64:       # %bb.0:
1009; AVX-X64-NEXT:    andl $1, %edi
1010; AVX-X64-NEXT:    vcvtsi2sd %edi, %xmm0, %xmm0
1011; AVX-X64-NEXT:    retq
1012;
1013; X87-LABEL: uitofp_i1tof64:
1014; X87:       # %bb.0:
1015; X87-NEXT:    pushl %eax
1016; X87-NEXT:    .cfi_def_cfa_offset 8
1017; X87-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
1018; X87-NEXT:    andb $1, %al
1019; X87-NEXT:    movzbl %al, %eax
1020; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
1021; X87-NEXT:    filds {{[0-9]+}}(%esp)
1022; X87-NEXT:    wait
1023; X87-NEXT:    popl %eax
1024; X87-NEXT:    .cfi_def_cfa_offset 4
1025; X87-NEXT:    retl
1026  %result = call double @llvm.experimental.constrained.uitofp.f64.i1(i1 %x,
1027                                               metadata !"round.dynamic",
1028                                               metadata !"fpexcept.strict") #0
1029  ret double %result
1030}
1031
1032define double @uitofp_i8tof64(i8 %x) #0 {
1033; SSE-X86-LABEL: uitofp_i8tof64:
1034; SSE-X86:       # %bb.0:
1035; SSE-X86-NEXT:    pushl %ebp
1036; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
1037; SSE-X86-NEXT:    .cfi_offset %ebp, -8
1038; SSE-X86-NEXT:    movl %esp, %ebp
1039; SSE-X86-NEXT:    .cfi_def_cfa_register %ebp
1040; SSE-X86-NEXT:    andl $-8, %esp
1041; SSE-X86-NEXT:    subl $8, %esp
1042; SSE-X86-NEXT:    movzbl 8(%ebp), %eax
1043; SSE-X86-NEXT:    cvtsi2sd %eax, %xmm0
1044; SSE-X86-NEXT:    movsd %xmm0, (%esp)
1045; SSE-X86-NEXT:    fldl (%esp)
1046; SSE-X86-NEXT:    wait
1047; SSE-X86-NEXT:    movl %ebp, %esp
1048; SSE-X86-NEXT:    popl %ebp
1049; SSE-X86-NEXT:    .cfi_def_cfa %esp, 4
1050; SSE-X86-NEXT:    retl
1051;
1052; SSE-X64-LABEL: uitofp_i8tof64:
1053; SSE-X64:       # %bb.0:
1054; SSE-X64-NEXT:    movzbl %dil, %eax
1055; SSE-X64-NEXT:    cvtsi2sd %eax, %xmm0
1056; SSE-X64-NEXT:    retq
1057;
1058; AVX-X86-LABEL: uitofp_i8tof64:
1059; AVX-X86:       # %bb.0:
1060; AVX-X86-NEXT:    pushl %ebp
1061; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
1062; AVX-X86-NEXT:    .cfi_offset %ebp, -8
1063; AVX-X86-NEXT:    movl %esp, %ebp
1064; AVX-X86-NEXT:    .cfi_def_cfa_register %ebp
1065; AVX-X86-NEXT:    andl $-8, %esp
1066; AVX-X86-NEXT:    subl $8, %esp
1067; AVX-X86-NEXT:    movzbl 8(%ebp), %eax
1068; AVX-X86-NEXT:    vcvtsi2sd %eax, %xmm0, %xmm0
1069; AVX-X86-NEXT:    vmovsd %xmm0, (%esp)
1070; AVX-X86-NEXT:    fldl (%esp)
1071; AVX-X86-NEXT:    wait
1072; AVX-X86-NEXT:    movl %ebp, %esp
1073; AVX-X86-NEXT:    popl %ebp
1074; AVX-X86-NEXT:    .cfi_def_cfa %esp, 4
1075; AVX-X86-NEXT:    retl
1076;
1077; AVX-X64-LABEL: uitofp_i8tof64:
1078; AVX-X64:       # %bb.0:
1079; AVX-X64-NEXT:    movzbl %dil, %eax
1080; AVX-X64-NEXT:    vcvtsi2sd %eax, %xmm0, %xmm0
1081; AVX-X64-NEXT:    retq
1082;
1083; X87-LABEL: uitofp_i8tof64:
1084; X87:       # %bb.0:
1085; X87-NEXT:    pushl %eax
1086; X87-NEXT:    .cfi_def_cfa_offset 8
1087; X87-NEXT:    movzbl {{[0-9]+}}(%esp), %eax
1088; X87-NEXT:    movw %ax, {{[0-9]+}}(%esp)
1089; X87-NEXT:    filds {{[0-9]+}}(%esp)
1090; X87-NEXT:    wait
1091; X87-NEXT:    popl %eax
1092; X87-NEXT:    .cfi_def_cfa_offset 4
1093; X87-NEXT:    retl
1094  %result = call double @llvm.experimental.constrained.uitofp.f64.i8(i8 %x,
1095                                               metadata !"round.dynamic",
1096                                               metadata !"fpexcept.strict") #0
1097  ret double %result
1098}
1099
1100define double @uitofp_i16tof64(i16 %x) #0 {
1101; SSE-X86-LABEL: uitofp_i16tof64:
1102; SSE-X86:       # %bb.0:
1103; SSE-X86-NEXT:    pushl %ebp
1104; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
1105; SSE-X86-NEXT:    .cfi_offset %ebp, -8
1106; SSE-X86-NEXT:    movl %esp, %ebp
1107; SSE-X86-NEXT:    .cfi_def_cfa_register %ebp
1108; SSE-X86-NEXT:    andl $-8, %esp
1109; SSE-X86-NEXT:    subl $8, %esp
1110; SSE-X86-NEXT:    movzwl 8(%ebp), %eax
1111; SSE-X86-NEXT:    cvtsi2sd %eax, %xmm0
1112; SSE-X86-NEXT:    movsd %xmm0, (%esp)
1113; SSE-X86-NEXT:    fldl (%esp)
1114; SSE-X86-NEXT:    wait
1115; SSE-X86-NEXT:    movl %ebp, %esp
1116; SSE-X86-NEXT:    popl %ebp
1117; SSE-X86-NEXT:    .cfi_def_cfa %esp, 4
1118; SSE-X86-NEXT:    retl
1119;
1120; SSE-X64-LABEL: uitofp_i16tof64:
1121; SSE-X64:       # %bb.0:
1122; SSE-X64-NEXT:    movzwl %di, %eax
1123; SSE-X64-NEXT:    cvtsi2sd %eax, %xmm0
1124; SSE-X64-NEXT:    retq
1125;
1126; AVX-X86-LABEL: uitofp_i16tof64:
1127; AVX-X86:       # %bb.0:
1128; AVX-X86-NEXT:    pushl %ebp
1129; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
1130; AVX-X86-NEXT:    .cfi_offset %ebp, -8
1131; AVX-X86-NEXT:    movl %esp, %ebp
1132; AVX-X86-NEXT:    .cfi_def_cfa_register %ebp
1133; AVX-X86-NEXT:    andl $-8, %esp
1134; AVX-X86-NEXT:    subl $8, %esp
1135; AVX-X86-NEXT:    movzwl 8(%ebp), %eax
1136; AVX-X86-NEXT:    vcvtsi2sd %eax, %xmm0, %xmm0
1137; AVX-X86-NEXT:    vmovsd %xmm0, (%esp)
1138; AVX-X86-NEXT:    fldl (%esp)
1139; AVX-X86-NEXT:    wait
1140; AVX-X86-NEXT:    movl %ebp, %esp
1141; AVX-X86-NEXT:    popl %ebp
1142; AVX-X86-NEXT:    .cfi_def_cfa %esp, 4
1143; AVX-X86-NEXT:    retl
1144;
1145; AVX-X64-LABEL: uitofp_i16tof64:
1146; AVX-X64:       # %bb.0:
1147; AVX-X64-NEXT:    movzwl %di, %eax
1148; AVX-X64-NEXT:    vcvtsi2sd %eax, %xmm0, %xmm0
1149; AVX-X64-NEXT:    retq
1150;
1151; X87-LABEL: uitofp_i16tof64:
1152; X87:       # %bb.0:
1153; X87-NEXT:    pushl %eax
1154; X87-NEXT:    .cfi_def_cfa_offset 8
1155; X87-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
1156; X87-NEXT:    movl %eax, (%esp)
1157; X87-NEXT:    fildl (%esp)
1158; X87-NEXT:    wait
1159; X87-NEXT:    popl %eax
1160; X87-NEXT:    .cfi_def_cfa_offset 4
1161; X87-NEXT:    retl
1162  %result = call double @llvm.experimental.constrained.uitofp.f64.i16(i16 %x,
1163                                               metadata !"round.dynamic",
1164                                               metadata !"fpexcept.strict") #0
1165  ret double %result
1166}
1167
1168define double @uitofp_i32tof64(i32 %x) #0 {
1169; SSE-X86-LABEL: uitofp_i32tof64:
1170; SSE-X86:       # %bb.0:
1171; SSE-X86-NEXT:    pushl %ebp
1172; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
1173; SSE-X86-NEXT:    .cfi_offset %ebp, -8
1174; SSE-X86-NEXT:    movl %esp, %ebp
1175; SSE-X86-NEXT:    .cfi_def_cfa_register %ebp
1176; SSE-X86-NEXT:    andl $-8, %esp
1177; SSE-X86-NEXT:    subl $16, %esp
1178; SSE-X86-NEXT:    movl 8(%ebp), %eax
1179; SSE-X86-NEXT:    movl %eax, (%esp)
1180; SSE-X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
1181; SSE-X86-NEXT:    fildll (%esp)
1182; SSE-X86-NEXT:    fstpl {{[0-9]+}}(%esp)
1183; SSE-X86-NEXT:    fldl {{[0-9]+}}(%esp)
1184; SSE-X86-NEXT:    wait
1185; SSE-X86-NEXT:    movl %ebp, %esp
1186; SSE-X86-NEXT:    popl %ebp
1187; SSE-X86-NEXT:    .cfi_def_cfa %esp, 4
1188; SSE-X86-NEXT:    retl
1189;
1190; SSE-X64-LABEL: uitofp_i32tof64:
1191; SSE-X64:       # %bb.0:
1192; SSE-X64-NEXT:    movl %edi, %eax
1193; SSE-X64-NEXT:    cvtsi2sd %rax, %xmm0
1194; SSE-X64-NEXT:    retq
1195;
1196; AVX1-X86-LABEL: uitofp_i32tof64:
1197; AVX1-X86:       # %bb.0:
1198; AVX1-X86-NEXT:    pushl %ebp
1199; AVX1-X86-NEXT:    .cfi_def_cfa_offset 8
1200; AVX1-X86-NEXT:    .cfi_offset %ebp, -8
1201; AVX1-X86-NEXT:    movl %esp, %ebp
1202; AVX1-X86-NEXT:    .cfi_def_cfa_register %ebp
1203; AVX1-X86-NEXT:    andl $-8, %esp
1204; AVX1-X86-NEXT:    subl $16, %esp
1205; AVX1-X86-NEXT:    movl 8(%ebp), %eax
1206; AVX1-X86-NEXT:    movl %eax, (%esp)
1207; AVX1-X86-NEXT:    movl $0, {{[0-9]+}}(%esp)
1208; AVX1-X86-NEXT:    fildll (%esp)
1209; AVX1-X86-NEXT:    fstpl {{[0-9]+}}(%esp)
1210; AVX1-X86-NEXT:    fldl {{[0-9]+}}(%esp)
1211; AVX1-X86-NEXT:    wait
1212; AVX1-X86-NEXT:    movl %ebp, %esp
1213; AVX1-X86-NEXT:    popl %ebp
1214; AVX1-X86-NEXT:    .cfi_def_cfa %esp, 4
1215; AVX1-X86-NEXT:    retl
1216;
1217; AVX1-X64-LABEL: uitofp_i32tof64:
1218; AVX1-X64:       # %bb.0:
1219; AVX1-X64-NEXT:    movl %edi, %eax
1220; AVX1-X64-NEXT:    vcvtsi2sd %rax, %xmm0, %xmm0
1221; AVX1-X64-NEXT:    retq
1222;
1223; AVX512-X86-LABEL: uitofp_i32tof64:
1224; AVX512-X86:       # %bb.0:
1225; AVX512-X86-NEXT:    pushl %ebp
1226; AVX512-X86-NEXT:    .cfi_def_cfa_offset 8
1227; AVX512-X86-NEXT:    .cfi_offset %ebp, -8
1228; AVX512-X86-NEXT:    movl %esp, %ebp
1229; AVX512-X86-NEXT:    .cfi_def_cfa_register %ebp
1230; AVX512-X86-NEXT:    andl $-8, %esp
1231; AVX512-X86-NEXT:    subl $8, %esp
1232; AVX512-X86-NEXT:    vcvtusi2sdl 8(%ebp), %xmm0, %xmm0
1233; AVX512-X86-NEXT:    vmovsd %xmm0, (%esp)
1234; AVX512-X86-NEXT:    fldl (%esp)
1235; AVX512-X86-NEXT:    wait
1236; AVX512-X86-NEXT:    movl %ebp, %esp
1237; AVX512-X86-NEXT:    popl %ebp
1238; AVX512-X86-NEXT:    .cfi_def_cfa %esp, 4
1239; AVX512-X86-NEXT:    retl
1240;
1241; AVX512-X64-LABEL: uitofp_i32tof64:
1242; AVX512-X64:       # %bb.0:
1243; AVX512-X64-NEXT:    vcvtusi2sd %edi, %xmm0, %xmm0
1244; AVX512-X64-NEXT:    retq
1245;
1246; X87-LABEL: uitofp_i32tof64:
1247; X87:       # %bb.0:
1248; X87-NEXT:    pushl %ebp
1249; X87-NEXT:    .cfi_def_cfa_offset 8
1250; X87-NEXT:    .cfi_offset %ebp, -8
1251; X87-NEXT:    movl %esp, %ebp
1252; X87-NEXT:    .cfi_def_cfa_register %ebp
1253; X87-NEXT:    andl $-8, %esp
1254; X87-NEXT:    subl $8, %esp
1255; X87-NEXT:    movl 8(%ebp), %eax
1256; X87-NEXT:    movl %eax, (%esp)
1257; X87-NEXT:    movl $0, {{[0-9]+}}(%esp)
1258; X87-NEXT:    fildll (%esp)
1259; X87-NEXT:    wait
1260; X87-NEXT:    movl %ebp, %esp
1261; X87-NEXT:    popl %ebp
1262; X87-NEXT:    .cfi_def_cfa %esp, 4
1263; X87-NEXT:    retl
1264  %result = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %x,
1265                                               metadata !"round.dynamic",
1266                                               metadata !"fpexcept.strict") #0
1267  ret double %result
1268}
1269
1270define double @uitofp_i64tof64(i64 %x) #0 {
1271; SSE-X86-LABEL: uitofp_i64tof64:
1272; SSE-X86:       # %bb.0:
1273; SSE-X86-NEXT:    pushl %ebp
1274; SSE-X86-NEXT:    .cfi_def_cfa_offset 8
1275; SSE-X86-NEXT:    .cfi_offset %ebp, -8
1276; SSE-X86-NEXT:    movl %esp, %ebp
1277; SSE-X86-NEXT:    .cfi_def_cfa_register %ebp
1278; SSE-X86-NEXT:    andl $-8, %esp
1279; SSE-X86-NEXT:    subl $24, %esp
1280; SSE-X86-NEXT:    movl 12(%ebp), %eax
1281; SSE-X86-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
1282; SSE-X86-NEXT:    movlps %xmm0, {{[0-9]+}}(%esp)
1283; SSE-X86-NEXT:    shrl $31, %eax
1284; SSE-X86-NEXT:    fildll {{[0-9]+}}(%esp)
1285; SSE-X86-NEXT:    fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%eax,4)
1286; SSE-X86-NEXT:    fstpl {{[0-9]+}}(%esp)
1287; SSE-X86-NEXT:    wait
1288; SSE-X86-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
1289; SSE-X86-NEXT:    movsd %xmm0, (%esp)
1290; SSE-X86-NEXT:    fldl (%esp)
1291; SSE-X86-NEXT:    wait
1292; SSE-X86-NEXT:    movl %ebp, %esp
1293; SSE-X86-NEXT:    popl %ebp
1294; SSE-X86-NEXT:    .cfi_def_cfa %esp, 4
1295; SSE-X86-NEXT:    retl
1296;
1297; SSE-X64-LABEL: uitofp_i64tof64:
1298; SSE-X64:       # %bb.0:
1299; SSE-X64-NEXT:    movq %rdi, %rax
1300; SSE-X64-NEXT:    shrq %rax
1301; SSE-X64-NEXT:    movl %edi, %ecx
1302; SSE-X64-NEXT:    andl $1, %ecx
1303; SSE-X64-NEXT:    orq %rax, %rcx
1304; SSE-X64-NEXT:    testq %rdi, %rdi
1305; SSE-X64-NEXT:    cmovnsq %rdi, %rcx
1306; SSE-X64-NEXT:    cvtsi2sd %rcx, %xmm0
1307; SSE-X64-NEXT:    jns .LBB18_2
1308; SSE-X64-NEXT:  # %bb.1:
1309; SSE-X64-NEXT:    addsd %xmm0, %xmm0
1310; SSE-X64-NEXT:  .LBB18_2:
1311; SSE-X64-NEXT:    retq
1312;
1313; AVX-X86-LABEL: uitofp_i64tof64:
1314; AVX-X86:       # %bb.0:
1315; AVX-X86-NEXT:    pushl %ebp
1316; AVX-X86-NEXT:    .cfi_def_cfa_offset 8
1317; AVX-X86-NEXT:    .cfi_offset %ebp, -8
1318; AVX-X86-NEXT:    movl %esp, %ebp
1319; AVX-X86-NEXT:    .cfi_def_cfa_register %ebp
1320; AVX-X86-NEXT:    andl $-8, %esp
1321; AVX-X86-NEXT:    subl $24, %esp
1322; AVX-X86-NEXT:    movl 12(%ebp), %eax
1323; AVX-X86-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
1324; AVX-X86-NEXT:    vmovlps %xmm0, {{[0-9]+}}(%esp)
1325; AVX-X86-NEXT:    shrl $31, %eax
1326; AVX-X86-NEXT:    fildll {{[0-9]+}}(%esp)
1327; AVX-X86-NEXT:    fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%eax,4)
1328; AVX-X86-NEXT:    fstpl {{[0-9]+}}(%esp)
1329; AVX-X86-NEXT:    wait
1330; AVX-X86-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
1331; AVX-X86-NEXT:    vmovsd %xmm0, (%esp)
1332; AVX-X86-NEXT:    fldl (%esp)
1333; AVX-X86-NEXT:    wait
1334; AVX-X86-NEXT:    movl %ebp, %esp
1335; AVX-X86-NEXT:    popl %ebp
1336; AVX-X86-NEXT:    .cfi_def_cfa %esp, 4
1337; AVX-X86-NEXT:    retl
1338;
1339; AVX1-X64-LABEL: uitofp_i64tof64:
1340; AVX1-X64:       # %bb.0:
1341; AVX1-X64-NEXT:    movq %rdi, %rax
1342; AVX1-X64-NEXT:    shrq %rax
1343; AVX1-X64-NEXT:    movl %edi, %ecx
1344; AVX1-X64-NEXT:    andl $1, %ecx
1345; AVX1-X64-NEXT:    orq %rax, %rcx
1346; AVX1-X64-NEXT:    testq %rdi, %rdi
1347; AVX1-X64-NEXT:    cmovnsq %rdi, %rcx
1348; AVX1-X64-NEXT:    vcvtsi2sd %rcx, %xmm0, %xmm0
1349; AVX1-X64-NEXT:    jns .LBB18_2
1350; AVX1-X64-NEXT:  # %bb.1:
1351; AVX1-X64-NEXT:    vaddsd %xmm0, %xmm0, %xmm0
1352; AVX1-X64-NEXT:  .LBB18_2:
1353; AVX1-X64-NEXT:    retq
1354;
1355; AVX512-X64-LABEL: uitofp_i64tof64:
1356; AVX512-X64:       # %bb.0:
1357; AVX512-X64-NEXT:    vcvtusi2sd %rdi, %xmm0, %xmm0
1358; AVX512-X64-NEXT:    retq
1359;
1360; X87-LABEL: uitofp_i64tof64:
1361; X87:       # %bb.0:
1362; X87-NEXT:    pushl %ebp
1363; X87-NEXT:    .cfi_def_cfa_offset 8
1364; X87-NEXT:    .cfi_offset %ebp, -8
1365; X87-NEXT:    movl %esp, %ebp
1366; X87-NEXT:    .cfi_def_cfa_register %ebp
1367; X87-NEXT:    andl $-8, %esp
1368; X87-NEXT:    subl $16, %esp
1369; X87-NEXT:    movl 8(%ebp), %eax
1370; X87-NEXT:    movl 12(%ebp), %ecx
1371; X87-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
1372; X87-NEXT:    movl %eax, (%esp)
1373; X87-NEXT:    shrl $31, %ecx
1374; X87-NEXT:    fildll (%esp)
1375; X87-NEXT:    fadds {{\.?LCPI[0-9]+_[0-9]+}}(,%ecx,4)
1376; X87-NEXT:    fstpl {{[0-9]+}}(%esp)
1377; X87-NEXT:    fldl {{[0-9]+}}(%esp)
1378; X87-NEXT:    wait
1379; X87-NEXT:    movl %ebp, %esp
1380; X87-NEXT:    popl %ebp
1381; X87-NEXT:    .cfi_def_cfa %esp, 4
1382; X87-NEXT:    retl
1383  %result = call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %x,
1384                                               metadata !"round.dynamic",
1385                                               metadata !"fpexcept.strict") #0
1386  ret double %result
1387}
1388
1389attributes #0 = { strictfp }
1390