xref: /llvm-project/llvm/test/CodeGen/AArch64/cvt-fp-int-fp.ll (revision 1729e6e742ba9f6f210550000ace4bec72530c2e)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=aarch64 -o - %s  -mattr=+neon,+fullfp16 | FileCheck %s
3
4define double @t1(double %x) {
5; CHECK-LABEL: t1:
6; CHECK:       // %bb.0: // %entry
7; CHECK-NEXT:    fcvtzs d0, d0
8; CHECK-NEXT:    scvtf d0, d0
9; CHECK-NEXT:    ret
10entry:
11  %conv = fptosi double %x to i64
12  %conv1 = sitofp i64 %conv to double
13  ret double %conv1
14}
15
16define float @t2(float %x) {
17; CHECK-LABEL: t2:
18; CHECK:       // %bb.0: // %entry
19; CHECK-NEXT:    fcvtzs s0, s0
20; CHECK-NEXT:    scvtf s0, s0
21; CHECK-NEXT:    ret
22entry:
23  %conv = fptosi float %x to i32
24  %conv1 = sitofp i32 %conv to float
25  ret float %conv1
26}
27
28define half @t3(half %x)  {
29; CHECK-LABEL: t3:
30; CHECK:       // %bb.0: // %entry
31; CHECK-NEXT:    fcvtzs h0, h0
32; CHECK-NEXT:    scvtf h0, h0
33; CHECK-NEXT:    ret
34entry:
35  %conv = fptosi half %x to i32
36  %conv1 = sitofp i32 %conv to half
37  ret half %conv1
38}
39
40define double @t4(double %x) {
41; CHECK-LABEL: t4:
42; CHECK:       // %bb.0: // %entry
43; CHECK-NEXT:    fcvtzu d0, d0
44; CHECK-NEXT:    ucvtf d0, d0
45; CHECK-NEXT:    ret
46entry:
47  %conv = fptoui double %x to i64
48  %conv1 = uitofp i64 %conv to double
49  ret double %conv1
50}
51
52define float @t5(float %x) {
53; CHECK-LABEL: t5:
54; CHECK:       // %bb.0: // %entry
55; CHECK-NEXT:    fcvtzu s0, s0
56; CHECK-NEXT:    ucvtf s0, s0
57; CHECK-NEXT:    ret
58entry:
59  %conv = fptoui float %x to i32
60  %conv1 = uitofp i32 %conv to float
61  ret float %conv1
62}
63
64define half @t6(half %x)  {
65; CHECK-LABEL: t6:
66; CHECK:       // %bb.0: // %entry
67; CHECK-NEXT:    fcvtzu h0, h0
68; CHECK-NEXT:    ucvtf h0, h0
69; CHECK-NEXT:    ret
70entry:
71  %conv = fptoui half %x to i32
72  %conv1 = uitofp i32 %conv to half
73  ret half %conv1
74}
75
76define bfloat @t7(bfloat %x)  {
77; CHECK-LABEL: t7:
78; CHECK:       // %bb.0: // %entry
79; CHECK-NEXT:    // kill: def $h0 killed $h0 def $d0
80; CHECK-NEXT:    mov w8, #32767 // =0x7fff
81; CHECK-NEXT:    shll v0.4s, v0.4h, #16
82; CHECK-NEXT:    fcvtzs w9, s0
83; CHECK-NEXT:    scvtf d0, w9
84; CHECK-NEXT:    fcvtxn s0, d0
85; CHECK-NEXT:    fmov w9, s0
86; CHECK-NEXT:    ubfx w10, w9, #16, #1
87; CHECK-NEXT:    add w8, w9, w8
88; CHECK-NEXT:    add w8, w10, w8
89; CHECK-NEXT:    lsr w8, w8, #16
90; CHECK-NEXT:    fmov s0, w8
91; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $s0
92; CHECK-NEXT:    ret
93entry:
94  %conv = fptosi bfloat %x to i32
95  %conv1 = sitofp i32 %conv to bfloat
96  ret bfloat %conv1
97}
98
99define bfloat @t8(bfloat %x)  {
100; CHECK-LABEL: t8:
101; CHECK:       // %bb.0: // %entry
102; CHECK-NEXT:    // kill: def $h0 killed $h0 def $d0
103; CHECK-NEXT:    mov w8, #32767 // =0x7fff
104; CHECK-NEXT:    shll v0.4s, v0.4h, #16
105; CHECK-NEXT:    fcvtzu w9, s0
106; CHECK-NEXT:    ucvtf d0, w9
107; CHECK-NEXT:    fcvtxn s0, d0
108; CHECK-NEXT:    fmov w9, s0
109; CHECK-NEXT:    ubfx w10, w9, #16, #1
110; CHECK-NEXT:    add w8, w9, w8
111; CHECK-NEXT:    add w8, w10, w8
112; CHECK-NEXT:    lsr w8, w8, #16
113; CHECK-NEXT:    fmov s0, w8
114; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $s0
115; CHECK-NEXT:    ret
116entry:
117  %conv = fptoui bfloat %x to i32
118  %conv1 = uitofp i32 %conv to bfloat
119  ret bfloat %conv1
120}
121
122define double @t1_strict(double %x) #0 {
123; CHECK-LABEL: t1_strict:
124; CHECK:       // %bb.0: // %entry
125; CHECK-NEXT:    fcvtzs d0, d0
126; CHECK-NEXT:    scvtf d0, d0
127; CHECK-NEXT:    ret
128entry:
129  %conv = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %x, metadata !"fpexcept.strict") #0
130  %conv1 = call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
131  ret double %conv1
132}
133
134define float @t2_strict(float %x) #0 {
135; CHECK-LABEL: t2_strict:
136; CHECK:       // %bb.0: // %entry
137; CHECK-NEXT:    fcvtzs s0, s0
138; CHECK-NEXT:    scvtf s0, s0
139; CHECK-NEXT:    ret
140entry:
141  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %x, metadata !"fpexcept.strict") #0
142  %conv1 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
143  ret float %conv1
144}
145
146define half @t3_strict(half %x) #0 {
147; CHECK-LABEL: t3_strict:
148; CHECK:       // %bb.0: // %entry
149; CHECK-NEXT:    fcvtzs h0, h0
150; CHECK-NEXT:    scvtf h0, h0
151; CHECK-NEXT:    ret
152entry:
153  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %x, metadata !"fpexcept.strict") #0
154  %conv1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
155  ret half %conv1
156}
157
158define double @t4_strict(double %x) #0 {
159; CHECK-LABEL: t4_strict:
160; CHECK:       // %bb.0: // %entry
161; CHECK-NEXT:    fcvtzu d0, d0
162; CHECK-NEXT:    ucvtf d0, d0
163; CHECK-NEXT:    ret
164entry:
165  %conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %x, metadata !"fpexcept.strict") #0
166  %conv1 = call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
167  ret double %conv1
168}
169
170define float @t5_strict(float %x) #0 {
171; CHECK-LABEL: t5_strict:
172; CHECK:       // %bb.0: // %entry
173; CHECK-NEXT:    fcvtzu s0, s0
174; CHECK-NEXT:    ucvtf s0, s0
175; CHECK-NEXT:    ret
176entry:
177  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %x, metadata !"fpexcept.strict") #0
178  %conv1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
179  ret float %conv1
180}
181
182define half @t6_strict(half %x) #0 {
183; CHECK-LABEL: t6_strict:
184; CHECK:       // %bb.0: // %entry
185; CHECK-NEXT:    fcvtzu h0, h0
186; CHECK-NEXT:    ucvtf h0, h0
187; CHECK-NEXT:    ret
188entry:
189  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %x, metadata !"fpexcept.strict") #0
190  %conv1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
191  ret half %conv1
192}
193
194define bfloat @t7_strict(bfloat %x) #0 {
195; CHECK-LABEL: t7_strict:
196; CHECK:       // %bb.0: // %entry
197; CHECK-NEXT:    // kill: def $h0 killed $h0 def $d0
198; CHECK-NEXT:    mov w8, #32767 // =0x7fff
199; CHECK-NEXT:    shll v0.4s, v0.4h, #16
200; CHECK-NEXT:    fcvtzs w9, s0
201; CHECK-NEXT:    scvtf d0, w9
202; CHECK-NEXT:    fcvtxn s0, d0
203; CHECK-NEXT:    fmov w9, s0
204; CHECK-NEXT:    ubfx w10, w9, #16, #1
205; CHECK-NEXT:    add w8, w9, w8
206; CHECK-NEXT:    add w8, w10, w8
207; CHECK-NEXT:    lsr w8, w8, #16
208; CHECK-NEXT:    fmov s0, w8
209; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $s0
210; CHECK-NEXT:    ret
211entry:
212  %conv = call i32 @llvm.experimental.constrained.fptosi.i32.bf16(bfloat %x, metadata !"fpexcept.strict") #0
213  %conv1 = call bfloat @llvm.experimental.constrained.sitofp.bf16.i32(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
214  ret bfloat %conv1
215}
216
217define bfloat @t8_strict(bfloat %x) #0 {
218; CHECK-LABEL: t8_strict:
219; CHECK:       // %bb.0: // %entry
220; CHECK-NEXT:    // kill: def $h0 killed $h0 def $d0
221; CHECK-NEXT:    mov w8, #32767 // =0x7fff
222; CHECK-NEXT:    shll v0.4s, v0.4h, #16
223; CHECK-NEXT:    fcvtzu w9, s0
224; CHECK-NEXT:    ucvtf d0, w9
225; CHECK-NEXT:    fcvtxn s0, d0
226; CHECK-NEXT:    fmov w9, s0
227; CHECK-NEXT:    ubfx w10, w9, #16, #1
228; CHECK-NEXT:    add w8, w9, w8
229; CHECK-NEXT:    add w8, w10, w8
230; CHECK-NEXT:    lsr w8, w8, #16
231; CHECK-NEXT:    fmov s0, w8
232; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $s0
233; CHECK-NEXT:    ret
234entry:
235  %conv = call i32 @llvm.experimental.constrained.fptoui.i32.bf16(bfloat %x, metadata !"fpexcept.strict") #0
236  %conv1 = call bfloat @llvm.experimental.constrained.uitofp.bf16.i32(i32 %conv, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
237  ret bfloat %conv1
238}
239
240attributes #0 = { strictfp }
241
242declare i32 @llvm.experimental.constrained.fptosi.i32.bf16(bfloat, metadata)
243declare i32 @llvm.experimental.constrained.fptoui.i32.bf16(bfloat, metadata)
244declare i32 @llvm.experimental.constrained.fptosi.i32.f16(half, metadata)
245declare i32 @llvm.experimental.constrained.fptoui.i32.f16(half, metadata)
246declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata)
247declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata)
248declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata)
249declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
250declare bfloat @llvm.experimental.constrained.sitofp.bf16.i32(i32, metadata, metadata)
251declare bfloat @llvm.experimental.constrained.uitofp.bf16.i32(i32, metadata, metadata)
252declare half @llvm.experimental.constrained.sitofp.f16.i32(i32, metadata, metadata)
253declare half @llvm.experimental.constrained.uitofp.f16.i32(i32, metadata, metadata)
254declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
255declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
256declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata)
257declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
258