xref: /llvm-project/llvm/test/Transforms/InstCombine/double-float-shrink-2.ll (revision d2d9dc8eb4126271ee1406c2586a4953db831d21)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; REQUIRES: x86-registered-target,sparc-registered-target
3; RUN: opt < %s -passes=instcombine -S -mtriple "i386-pc-linux"     | FileCheck %s --check-prefixes=CHECK,DOUBLE-4BYTE-ALIGN
4; RUN: opt < %s -passes=instcombine -S -mtriple "i386-pc-win32"     | FileCheck %s --check-prefixes=CHECK,DOUBLE-8BYTE-ALIGN
5; RUN: opt < %s -passes=instcombine -S -mtriple "x86_64-pc-win32"   | FileCheck %s --check-prefixes=CHECK,DOUBLE-8BYTE-ALIGN
6; RUN: opt < %s -passes=instcombine -S -mtriple "i386-pc-mingw32"   | FileCheck %s --check-prefixes=CHECK,DOUBLE-8BYTE-ALIGN
7; RUN: opt < %s -passes=instcombine -S -mtriple "x86_64-pc-mingw32" | FileCheck %s --check-prefixes=CHECK,DOUBLE-8BYTE-ALIGN
8; RUN: opt < %s -passes=instcombine -S -mtriple "sparc-sun-solaris" | FileCheck %s --check-prefixes=CHECK,DOUBLE-8BYTE-ALIGN
9; RUN: opt < %s -passes=instcombine -S -mtriple "x86_64-pc-win32" -enable-debugify 2>&1 | FileCheck --check-prefix=DBG-VALID %s
10; RUN: opt < %s -passes=instcombine -S -mtriple "x86_64-pc-win32" -enable-debugify 2>&1 --try-experimental-debuginfo-iterators | FileCheck --check-prefix=DBG-VALID %s
11
12declare double @floor(double)
13declare double @ceil(double)
14declare double @round(double)
15declare double @roundeven(double)
16declare double @nearbyint(double)
17declare double @trunc(double)
18declare double @fabs(double)
19
20declare double @llvm.ceil.f64(double)
21declare <2 x double> @llvm.ceil.v2f64(<2 x double>)
22
23declare double @llvm.fabs.f64(double)
24declare <2 x double> @llvm.fabs.v2f64(<2 x double>)
25
26declare double @llvm.floor.f64(double)
27declare <2 x double> @llvm.floor.v2f64(<2 x double>)
28
29declare double @llvm.nearbyint.f64(double)
30declare <2 x double> @llvm.nearbyint.v2f64(<2 x double>)
31
32declare float @llvm.rint.f32(float)
33declare <2 x float> @llvm.rint.v2f32(<2 x float>)
34
35declare double @llvm.round.f64(double)
36declare <2 x double> @llvm.round.v2f64(<2 x double>)
37
38declare double @llvm.roundeven.f64(double)
39declare <2 x double> @llvm.roundeven.v2f64(<2 x double>)
40
41declare double @llvm.trunc.f64(double)
42declare <2 x double> @llvm.trunc.v2f64(<2 x double>)
43
44define float @test_shrink_libcall_floor(float %C) {
45; CHECK-LABEL: @test_shrink_libcall_floor(
46; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.floor.f32(float [[C:%.*]])
47; CHECK-NEXT:    ret float [[TMP1]]
48;
49  %D = fpext float %C to double
50  ; --> floorf
51  %E = call double @floor(double %D)
52  %F = fptrunc double %E to float
53  ret float %F
54}
55
56define float @test_shrink_libcall_ceil(float %C) {
57; CHECK-LABEL: @test_shrink_libcall_ceil(
58; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.ceil.f32(float [[C:%.*]])
59; CHECK-NEXT:    ret float [[TMP1]]
60;
61  %D = fpext float %C to double
62  ; --> ceilf
63  %E = call double @ceil(double %D)
64  %F = fptrunc double %E to float
65  ret float %F
66}
67
68define float @test_shrink_libcall_round(float %C) {
69; CHECK-LABEL: @test_shrink_libcall_round(
70; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.round.f32(float [[C:%.*]])
71; CHECK-NEXT:    ret float [[TMP1]]
72;
73  %D = fpext float %C to double
74  ; --> roundf
75  %E = call double @round(double %D)
76  %F = fptrunc double %E to float
77  ret float %F
78}
79
80define float @test_shrink_libcall_roundeven(float %C) {
81; CHECK-LABEL: @test_shrink_libcall_roundeven(
82; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.roundeven.f32(float [[C:%.*]])
83; CHECK-NEXT:    ret float [[TMP1]]
84;
85  %D = fpext float %C to double
86  ; --> roundeven
87  %E = call double @roundeven(double %D)
88  %F = fptrunc double %E to float
89  ret float %F
90}
91
92define float @test_shrink_libcall_nearbyint(float %C) {
93; CHECK-LABEL: @test_shrink_libcall_nearbyint(
94; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.nearbyint.f32(float [[C:%.*]])
95; CHECK-NEXT:    ret float [[TMP1]]
96;
97  %D = fpext float %C to double
98  ; --> nearbyintf
99  %E = call double @nearbyint(double %D)
100  %F = fptrunc double %E to float
101  ret float %F
102}
103
104define float @test_shrink_libcall_trunc(float %C) {
105; CHECK-LABEL: @test_shrink_libcall_trunc(
106; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.trunc.f32(float [[C:%.*]])
107; CHECK-NEXT:    ret float [[TMP1]]
108;
109  %D = fpext float %C to double
110  ; --> truncf
111  %E = call double @trunc(double %D)
112  %F = fptrunc double %E to float
113  ret float %F
114}
115
116; This is replaced with the intrinsic, which does the right thing on
117; tested platforms.
118define float @test_shrink_libcall_fabs(float %C) {
119; CHECK-LABEL: @test_shrink_libcall_fabs(
120; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.fabs.f32(float [[C:%.*]])
121; CHECK-NEXT:    ret float [[TMP1]]
122;
123  %D = fpext float %C to double
124  %E = call double @fabs(double %D)
125  %F = fptrunc double %E to float
126  ret float %F
127}
128
129; Make sure fast math flags are preserved
130define float @test_shrink_libcall_fabs_fast(float %C) {
131; CHECK-LABEL: @test_shrink_libcall_fabs_fast(
132; CHECK-NEXT:    [[TMP1:%.*]] = call fast float @llvm.fabs.f32(float [[C:%.*]])
133; CHECK-NEXT:    ret float [[TMP1]]
134;
135  %D = fpext float %C to double
136  %E = call fast double @fabs(double %D)
137  %F = fptrunc double %E to float
138  ret float %F
139}
140
141define float @test_shrink_intrin_ceil(float %C) {
142; CHECK-LABEL: @test_shrink_intrin_ceil(
143; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.ceil.f32(float [[C:%.*]])
144; CHECK-NEXT:    ret float [[TMP1]]
145;
146  %D = fpext float %C to double
147  %E = call double @llvm.ceil.f64(double %D)
148  %F = fptrunc double %E to float
149  ret float %F
150}
151
152define float @test_shrink_intrin_fabs(float %C) {
153; CHECK-LABEL: @test_shrink_intrin_fabs(
154; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.fabs.f32(float [[C:%.*]])
155; CHECK-NEXT:    ret float [[TMP1]]
156;
157  %D = fpext float %C to double
158  %E = call double @llvm.fabs.f64(double %D)
159  %F = fptrunc double %E to float
160  ret float %F
161}
162
163define float @test_shrink_intrin_floor(float %C) {
164; CHECK-LABEL: @test_shrink_intrin_floor(
165; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.floor.f32(float [[C:%.*]])
166; CHECK-NEXT:    ret float [[TMP1]]
167;
168  %D = fpext float %C to double
169  %E = call double @llvm.floor.f64(double %D)
170  %F = fptrunc double %E to float
171  ret float %F
172}
173
174define float @test_shrink_intrin_nearbyint(float %C) {
175; CHECK-LABEL: @test_shrink_intrin_nearbyint(
176; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.nearbyint.f32(float [[C:%.*]])
177; CHECK-NEXT:    ret float [[TMP1]]
178;
179  %D = fpext float %C to double
180  %E = call double @llvm.nearbyint.f64(double %D)
181  %F = fptrunc double %E to float
182  ret float %F
183}
184
185define half @test_shrink_intrin_rint(half %C) {
186; CHECK-LABEL: @test_shrink_intrin_rint(
187; CHECK-NEXT:    [[TMP1:%.*]] = call half @llvm.rint.f16(half [[C:%.*]])
188; CHECK-NEXT:    ret half [[TMP1]]
189;
190  %D = fpext half %C to float
191  %E = call float @llvm.rint.f32(float %D)
192  %F = fptrunc float %E to half
193  ret half %F
194}
195
196define float @test_shrink_intrin_round(float %C) {
197; CHECK-LABEL: @test_shrink_intrin_round(
198; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.round.f32(float [[C:%.*]])
199; CHECK-NEXT:    ret float [[TMP1]]
200;
201  %D = fpext float %C to double
202  %E = call double @llvm.round.f64(double %D)
203  %F = fptrunc double %E to float
204  ret float %F
205}
206
207define float @test_shrink_intrin_roundeven(float %C) {
208; CHECK-LABEL: @test_shrink_intrin_roundeven(
209; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.roundeven.f32(float [[C:%.*]])
210; CHECK-NEXT:    ret float [[TMP1]]
211;
212  %D = fpext float %C to double
213  %E = call double @llvm.roundeven.f64(double %D)
214  %F = fptrunc double %E to float
215  ret float %F
216}
217
218define float @test_shrink_intrin_trunc(float %C) {
219; CHECK-LABEL: @test_shrink_intrin_trunc(
220; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.trunc.f32(float [[C:%.*]])
221; CHECK-NEXT:    ret float [[TMP1]]
222;
223  %D = fpext float %C to double
224  %E = call double @llvm.trunc.f64(double %D)
225  %F = fptrunc double %E to float
226  ret float %F
227}
228
229declare void @use_v2f64(<2 x double>)
230declare void @use_v2f32(<2 x float>)
231
232define <2 x float> @test_shrink_intrin_ceil_multi_use(<2 x float> %C) {
233; CHECK-LABEL: @test_shrink_intrin_ceil_multi_use(
234; CHECK-NEXT:    [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
235; CHECK-NEXT:    [[E:%.*]] = call <2 x double> @llvm.ceil.v2f64(<2 x double> [[D]])
236; CHECK-NEXT:    [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
237; CHECK-NEXT:    call void @use_v2f64(<2 x double> [[D]])
238; CHECK-NEXT:    ret <2 x float> [[F]]
239;
240  %D = fpext <2 x float> %C to <2 x double>
241  %E = call <2 x double> @llvm.ceil.v2f64(<2 x double> %D)
242  %F = fptrunc <2 x double> %E to <2 x float>
243  call void @use_v2f64(<2 x double> %D)
244  ret <2 x float> %F
245}
246
247define <2 x float> @test_shrink_intrin_fabs_multi_use(<2 x float> %C) {
248; CHECK-LABEL: @test_shrink_intrin_fabs_multi_use(
249; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x float> @llvm.fabs.v2f32(<2 x float> [[C:%.*]])
250; CHECK-NEXT:    [[E:%.*]] = fpext <2 x float> [[TMP1]] to <2 x double>
251; CHECK-NEXT:    call void @use_v2f64(<2 x double> [[E]])
252; CHECK-NEXT:    ret <2 x float> [[TMP1]]
253;
254  %D = fpext <2 x float> %C to <2 x double>
255  %E = call <2 x double> @llvm.fabs.v2f64(<2 x double> %D)
256  %F = fptrunc <2 x double> %E to <2 x float>
257  call void @use_v2f64(<2 x double> %E)
258  ret <2 x float> %F
259}
260
261define <2 x float> @test_shrink_intrin_floor_multi_use(<2 x float> %C) {
262; CHECK-LABEL: @test_shrink_intrin_floor_multi_use(
263; CHECK-NEXT:    [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
264; CHECK-NEXT:    [[E:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[D]])
265; CHECK-NEXT:    [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
266; CHECK-NEXT:    call void @use_v2f64(<2 x double> [[D]])
267; CHECK-NEXT:    call void @use_v2f64(<2 x double> [[E]])
268; CHECK-NEXT:    ret <2 x float> [[F]]
269;
270  %D = fpext <2 x float> %C to <2 x double>
271  %E = call <2 x double> @llvm.floor.v2f64(<2 x double> %D)
272  %F = fptrunc <2 x double> %E to <2 x float>
273  call void @use_v2f64(<2 x double> %D)
274  call void @use_v2f64(<2 x double> %E)
275  ret <2 x float> %F
276}
277
278define <2 x float> @test_shrink_intrin_nearbyint_multi_use(<2 x float> %C) {
279; CHECK-LABEL: @test_shrink_intrin_nearbyint_multi_use(
280; CHECK-NEXT:    [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
281; CHECK-NEXT:    [[E:%.*]] = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> [[D]])
282; CHECK-NEXT:    [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
283; CHECK-NEXT:    call void @use_v2f64(<2 x double> [[D]])
284; CHECK-NEXT:    ret <2 x float> [[F]]
285;
286  %D = fpext <2 x float> %C to <2 x double>
287  %E = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %D)
288  %F = fptrunc <2 x double> %E to <2 x float>
289  call void @use_v2f64(<2 x double> %D)
290  ret <2 x float> %F
291}
292
293define <2 x half> @test_shrink_intrin_rint_multi_use(<2 x half> %C) {
294; CHECK-LABEL: @test_shrink_intrin_rint_multi_use(
295; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x half> @llvm.rint.v2f16(<2 x half> [[C:%.*]])
296; CHECK-NEXT:    [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x float>
297; CHECK-NEXT:    call void @use_v2f32(<2 x float> [[E]])
298; CHECK-NEXT:    ret <2 x half> [[TMP1]]
299;
300  %D = fpext <2 x half> %C to <2 x float>
301  %E = call <2 x float> @llvm.rint.v2f32(<2 x float> %D)
302  %F = fptrunc <2 x float> %E to <2 x half>
303  call void @use_v2f32(<2 x float> %E)
304  ret <2 x half> %F
305}
306
307define <2 x float> @test_shrink_intrin_round_multi_use(<2 x float> %C) {
308; CHECK-LABEL: @test_shrink_intrin_round_multi_use(
309; CHECK-NEXT:    [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
310; CHECK-NEXT:    [[E:%.*]] = call <2 x double> @llvm.round.v2f64(<2 x double> [[D]])
311; CHECK-NEXT:    [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
312; CHECK-NEXT:    call void @use_v2f64(<2 x double> [[D]])
313; CHECK-NEXT:    call void @use_v2f64(<2 x double> [[E]])
314; CHECK-NEXT:    ret <2 x float> [[F]]
315;
316  %D = fpext <2 x float> %C to <2 x double>
317  %E = call <2 x double> @llvm.round.v2f64(<2 x double> %D)
318  %F = fptrunc <2 x double> %E to <2 x float>
319  call void @use_v2f64(<2 x double> %D)
320  call void @use_v2f64(<2 x double> %E)
321  ret <2 x float> %F
322}
323
324define <2 x float> @test_shrink_intrin_roundeven_multi_use(<2 x float> %C) {
325; CHECK-LABEL: @test_shrink_intrin_roundeven_multi_use(
326; CHECK-NEXT:    [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
327; CHECK-NEXT:    [[E:%.*]] = call <2 x double> @llvm.roundeven.v2f64(<2 x double> [[D]])
328; CHECK-NEXT:    [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
329; CHECK-NEXT:    call void @use_v2f64(<2 x double> [[D]])
330; CHECK-NEXT:    call void @use_v2f64(<2 x double> [[E]])
331; CHECK-NEXT:    ret <2 x float> [[F]]
332;
333  %D = fpext <2 x float> %C to <2 x double>
334  %E = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %D)
335  %F = fptrunc <2 x double> %E to <2 x float>
336  call void @use_v2f64(<2 x double> %D)
337  call void @use_v2f64(<2 x double> %E)
338  ret <2 x float> %F
339}
340
341define <2 x float> @test_shrink_intrin_trunc_multi_use(<2 x float> %C) {
342; CHECK-LABEL: @test_shrink_intrin_trunc_multi_use(
343; CHECK-NEXT:    [[D:%.*]] = fpext <2 x float> [[C:%.*]] to <2 x double>
344; CHECK-NEXT:    [[E:%.*]] = call <2 x double> @llvm.trunc.v2f64(<2 x double> [[D]])
345; CHECK-NEXT:    [[F:%.*]] = fptrunc <2 x double> [[E]] to <2 x float>
346; CHECK-NEXT:    call void @use_v2f64(<2 x double> [[D]])
347; CHECK-NEXT:    ret <2 x float> [[F]]
348;
349  %D = fpext <2 x float> %C to <2 x double>
350  %E = call <2 x double> @llvm.trunc.v2f64(<2 x double> %D)
351  %F = fptrunc <2 x double> %E to <2 x float>
352  call void @use_v2f64(<2 x double> %D)
353  ret <2 x float> %F
354}
355
356; Make sure fast math flags are preserved
357define float @test_shrink_intrin_fabs_fast(float %C) {
358; CHECK-LABEL: @test_shrink_intrin_fabs_fast(
359; CHECK-NEXT:    [[TMP1:%.*]] = call fast float @llvm.fabs.f32(float [[C:%.*]])
360; CHECK-NEXT:    ret float [[TMP1]]
361;
362  %D = fpext float %C to double
363  %E = call fast double @llvm.fabs.f64(double %D)
364  %F = fptrunc double %E to float
365  ret float %F
366}
367
368define float @test_no_shrink_intrin_floor(double %D) {
369; CHECK-LABEL: @test_no_shrink_intrin_floor(
370; CHECK-NEXT:    [[E:%.*]] = call double @llvm.floor.f64(double [[D:%.*]])
371; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to float
372; CHECK-NEXT:    ret float [[F]]
373;
374  %E = call double @llvm.floor.f64(double %D)
375  %F = fptrunc double %E to float
376  ret float %F
377}
378
379define float @test_no_shrink_intrin_ceil(double %D) {
380; CHECK-LABEL: @test_no_shrink_intrin_ceil(
381; CHECK-NEXT:    [[E:%.*]] = call double @llvm.ceil.f64(double [[D:%.*]])
382; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to float
383; CHECK-NEXT:    ret float [[F]]
384;
385  %E = call double @llvm.ceil.f64(double %D)
386  %F = fptrunc double %E to float
387  ret float %F
388}
389
390define float @test_no_shrink_intrin_round(double %D) {
391; CHECK-LABEL: @test_no_shrink_intrin_round(
392; CHECK-NEXT:    [[E:%.*]] = call double @llvm.round.f64(double [[D:%.*]])
393; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to float
394; CHECK-NEXT:    ret float [[F]]
395;
396  %E = call double @llvm.round.f64(double %D)
397  %F = fptrunc double %E to float
398  ret float %F
399}
400
401define float @test_no_shrink_intrin_roundeven(double %D) {
402; CHECK-LABEL: @test_no_shrink_intrin_roundeven(
403; CHECK-NEXT:    [[E:%.*]] = call double @llvm.roundeven.f64(double [[D:%.*]])
404; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to float
405; CHECK-NEXT:    ret float [[F]]
406;
407  %E = call double @llvm.roundeven.f64(double %D)
408  %F = fptrunc double %E to float
409  ret float %F
410}
411
412define float @test_no_shrink_intrin_nearbyint(double %D) {
413; CHECK-LABEL: @test_no_shrink_intrin_nearbyint(
414; CHECK-NEXT:    [[E:%.*]] = call double @llvm.nearbyint.f64(double [[D:%.*]])
415; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to float
416; CHECK-NEXT:    ret float [[F]]
417;
418  %E = call double @llvm.nearbyint.f64(double %D)
419  %F = fptrunc double %E to float
420  ret float %F
421}
422
423define float @test_no_shrink_intrin_trunc(double %D) {
424; CHECK-LABEL: @test_no_shrink_intrin_trunc(
425; CHECK-NEXT:    [[E:%.*]] = call double @llvm.trunc.f64(double [[D:%.*]])
426; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to float
427; CHECK-NEXT:    ret float [[F]]
428;
429  %E = call double @llvm.trunc.f64(double %D)
430  %F = fptrunc double %E to float
431  ret float %F
432}
433
434define float @test_shrink_intrin_fabs_double_src(double %D) {
435; CHECK-LABEL: @test_shrink_intrin_fabs_double_src(
436; CHECK-NEXT:    [[TMP1:%.*]] = fptrunc double [[D:%.*]] to float
437; CHECK-NEXT:    [[F:%.*]] = call float @llvm.fabs.f32(float [[TMP1]])
438; CHECK-NEXT:    ret float [[F]]
439;
440  %E = call double @llvm.fabs.f64(double %D)
441  %F = fptrunc double %E to float
442  ret float %F
443}
444
445; Make sure fast math flags are preserved
446define float @test_shrink_intrin_fabs_fast_double_src(double %D) {
447; CHECK-LABEL: @test_shrink_intrin_fabs_fast_double_src(
448; CHECK-NEXT:    [[TMP1:%.*]] = fptrunc double [[D:%.*]] to float
449; CHECK-NEXT:    [[F:%.*]] = call fast float @llvm.fabs.f32(float [[TMP1]])
450; CHECK-NEXT:    ret float [[F]]
451;
452  %E = call fast double @llvm.fabs.f64(double %D)
453  %F = fptrunc double %E to float
454  ret float %F
455}
456
457define float @test_shrink_float_convertible_constant_intrin_floor() {
458; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_floor(
459; CHECK-NEXT:    ret float 2.000000e+00
460;
461  %E = call double @llvm.floor.f64(double 2.1)
462  %F = fptrunc double %E to float
463  ret float %F
464}
465
466define float @test_shrink_float_convertible_constant_intrin_ceil() {
467; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_ceil(
468; CHECK-NEXT:    ret float 3.000000e+00
469;
470  %E = call double @llvm.ceil.f64(double 2.1)
471  %F = fptrunc double %E to float
472  ret float %F
473}
474
475define float @test_shrink_float_convertible_constant_intrin_round() {
476; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_round(
477; CHECK-NEXT:    ret float 2.000000e+00
478;
479  %E = call double @llvm.round.f64(double 2.1)
480  %F = fptrunc double %E to float
481  ret float %F
482}
483
484define float @test_shrink_float_convertible_constant_intrin_roundeven() {
485; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_roundeven(
486; CHECK-NEXT:    ret float 2.000000e+00
487;
488  %E = call double @llvm.roundeven.f64(double 2.1)
489  %F = fptrunc double %E to float
490  ret float %F
491}
492
493define float @test_shrink_float_convertible_constant_intrin_nearbyint() {
494; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_nearbyint(
495; CHECK-NEXT:    ret float 2.000000e+00
496;
497  %E = call double @llvm.nearbyint.f64(double 2.1)
498  %F = fptrunc double %E to float
499  ret float %F
500}
501
502define float @test_shrink_float_convertible_constant_intrin_trunc() {
503; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_trunc(
504; CHECK-NEXT:    ret float 2.000000e+00
505;
506  %E = call double @llvm.trunc.f64(double 2.1)
507  %F = fptrunc double %E to float
508  ret float %F
509}
510
511define float @test_shrink_float_convertible_constant_intrin_fabs() {
512; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_fabs(
513; CHECK-NEXT:    ret float 0x4000CCCCC0000000
514;
515  %E = call double @llvm.fabs.f64(double 2.1)
516  %F = fptrunc double %E to float
517  ret float %F
518}
519
520; Make sure fast math flags are preserved
521define float @test_shrink_float_convertible_constant_intrin_fabs_fast() {
522; CHECK-LABEL: @test_shrink_float_convertible_constant_intrin_fabs_fast(
523; CHECK-NEXT:    ret float 0x4000CCCCC0000000
524;
525  %E = call fast double @llvm.fabs.f64(double 2.1)
526  %F = fptrunc double %E to float
527  ret float %F
528}
529
530define half @test_no_shrink_mismatched_type_intrin_floor(double %D) {
531; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_floor(
532; CHECK-NEXT:    [[E:%.*]] = call double @llvm.floor.f64(double [[D:%.*]])
533; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to half
534; CHECK-NEXT:    ret half [[F]]
535;
536  %E = call double @llvm.floor.f64(double %D)
537  %F = fptrunc double %E to half
538  ret half %F
539}
540
541define half @test_no_shrink_mismatched_type_intrin_ceil(double %D) {
542; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_ceil(
543; CHECK-NEXT:    [[E:%.*]] = call double @llvm.ceil.f64(double [[D:%.*]])
544; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to half
545; CHECK-NEXT:    ret half [[F]]
546;
547  %E = call double @llvm.ceil.f64(double %D)
548  %F = fptrunc double %E to half
549  ret half %F
550}
551
552define half @test_no_shrink_mismatched_type_intrin_round(double %D) {
553; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_round(
554; CHECK-NEXT:    [[E:%.*]] = call double @llvm.round.f64(double [[D:%.*]])
555; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to half
556; CHECK-NEXT:    ret half [[F]]
557;
558  %E = call double @llvm.round.f64(double %D)
559  %F = fptrunc double %E to half
560  ret half %F
561}
562
563define half @test_no_shrink_mismatched_type_intrin_roundeven(double %D) {
564; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_roundeven(
565; CHECK-NEXT:    [[E:%.*]] = call double @llvm.roundeven.f64(double [[D:%.*]])
566; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to half
567; CHECK-NEXT:    ret half [[F]]
568;
569  %E = call double @llvm.roundeven.f64(double %D)
570  %F = fptrunc double %E to half
571  ret half %F
572}
573
574define half @test_no_shrink_mismatched_type_intrin_nearbyint(double %D) {
575; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_nearbyint(
576; CHECK-NEXT:    [[E:%.*]] = call double @llvm.nearbyint.f64(double [[D:%.*]])
577; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to half
578; CHECK-NEXT:    ret half [[F]]
579;
580  %E = call double @llvm.nearbyint.f64(double %D)
581  %F = fptrunc double %E to half
582  ret half %F
583}
584
585define half @test_no_shrink_mismatched_type_intrin_trunc(double %D) {
586; CHECK-LABEL: @test_no_shrink_mismatched_type_intrin_trunc(
587; CHECK-NEXT:    [[E:%.*]] = call double @llvm.trunc.f64(double [[D:%.*]])
588; CHECK-NEXT:    [[F:%.*]] = fptrunc double [[E]] to half
589; CHECK-NEXT:    ret half [[F]]
590;
591  %E = call double @llvm.trunc.f64(double %D)
592  %F = fptrunc double %E to half
593  ret half %F
594}
595
596define half @test_shrink_mismatched_type_intrin_fabs_double_src(double %D) {
597; CHECK-LABEL: @test_shrink_mismatched_type_intrin_fabs_double_src(
598; CHECK-NEXT:    [[TMP1:%.*]] = fptrunc double [[D:%.*]] to half
599; CHECK-NEXT:    [[F:%.*]] = call half @llvm.fabs.f16(half [[TMP1]])
600; CHECK-NEXT:    ret half [[F]]
601;
602  %E = call double @llvm.fabs.f64(double %D)
603  %F = fptrunc double %E to half
604  ret half %F
605}
606
607; Make sure fast math flags are preserved
608define half @test_mismatched_type_intrin_fabs_fast_double_src(double %D) {
609; CHECK-LABEL: @test_mismatched_type_intrin_fabs_fast_double_src(
610; CHECK-NEXT:    [[TMP1:%.*]] = fptrunc double [[D:%.*]] to half
611; CHECK-NEXT:    [[F:%.*]] = call fast half @llvm.fabs.f16(half [[TMP1]])
612; CHECK-NEXT:    ret half [[F]]
613;
614  %E = call fast double @llvm.fabs.f64(double %D)
615  %F = fptrunc double %E to half
616  ret half %F
617}
618
619define <2 x double> @test_shrink_intrin_floor_fp16_vec(<2 x half> %C) {
620; CHECK-LABEL: @test_shrink_intrin_floor_fp16_vec(
621; CHECK-NEXT:    [[TMP1:%.*]] = call arcp <2 x half> @llvm.floor.v2f16(<2 x half> [[C:%.*]])
622; CHECK-NEXT:    [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x double>
623; CHECK-NEXT:    ret <2 x double> [[E]]
624;
625  %D = fpext <2 x half> %C to <2 x double>
626  %E = call arcp <2 x double> @llvm.floor.v2f64(<2 x double> %D)
627  ret <2 x double> %E
628}
629
630define float @test_shrink_intrin_ceil_fp16_src(half %C) {
631; CHECK-LABEL: @test_shrink_intrin_ceil_fp16_src(
632; CHECK-NEXT:    [[TMP1:%.*]] = call half @llvm.ceil.f16(half [[C:%.*]])
633; CHECK-NEXT:    [[F:%.*]] = fpext half [[TMP1]] to float
634; CHECK-NEXT:    ret float [[F]]
635;
636  %D = fpext half %C to double
637  %E = call double @llvm.ceil.f64(double %D)
638  %F = fptrunc double %E to float
639  ret float %F
640}
641
642define <2 x double> @test_shrink_intrin_round_fp16_vec(<2 x half> %C) {
643; CHECK-LABEL: @test_shrink_intrin_round_fp16_vec(
644; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x half> @llvm.round.v2f16(<2 x half> [[C:%.*]])
645; CHECK-NEXT:    [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x double>
646; CHECK-NEXT:    ret <2 x double> [[E]]
647;
648  %D = fpext <2 x  half> %C to <2 x double>
649  %E = call <2 x double> @llvm.round.v2f64(<2 x double> %D)
650  ret <2 x double> %E
651}
652
653define <2 x double> @test_shrink_intrin_roundeven_fp16_vec(<2 x half> %C) {
654; CHECK-LABEL: @test_shrink_intrin_roundeven_fp16_vec(
655; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x half> @llvm.roundeven.v2f16(<2 x half> [[C:%.*]])
656; CHECK-NEXT:    [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x double>
657; CHECK-NEXT:    ret <2 x double> [[E]]
658;
659  %D = fpext <2 x  half> %C to <2 x double>
660  %E = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %D)
661  ret <2 x double> %E
662}
663
664define float @test_shrink_intrin_nearbyint_fp16_src(half %C) {
665; CHECK-LABEL: @test_shrink_intrin_nearbyint_fp16_src(
666; CHECK-NEXT:    [[TMP1:%.*]] = call half @llvm.nearbyint.f16(half [[C:%.*]])
667; CHECK-NEXT:    [[F:%.*]] = fpext half [[TMP1]] to float
668; CHECK-NEXT:    ret float [[F]]
669;
670  %D = fpext half %C to double
671  %E = call double @llvm.nearbyint.f64(double %D)
672  %F = fptrunc double %E to float
673  ret float %F
674}
675
676define <2 x double> @test_shrink_intrin_trunc_fp16_src(<2 x half> %C) {
677; CHECK-LABEL: @test_shrink_intrin_trunc_fp16_src(
678; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x half> @llvm.trunc.v2f16(<2 x half> [[C:%.*]])
679; CHECK-NEXT:    [[E:%.*]] = fpext <2 x half> [[TMP1]] to <2 x double>
680; CHECK-NEXT:    ret <2 x double> [[E]]
681;
682  %D = fpext <2 x half> %C to <2 x double>
683  %E = call <2 x double> @llvm.trunc.v2f64(<2 x double> %D)
684  ret <2 x double> %E
685}
686
687define float @test_shrink_intrin_fabs_fp16_src(half %C) {
688; CHECK-LABEL: @test_shrink_intrin_fabs_fp16_src(
689; CHECK-NEXT:    [[TMP1:%.*]] = call half @llvm.fabs.f16(half [[C:%.*]])
690; CHECK-NEXT:    [[F:%.*]] = fpext half [[TMP1]] to float
691; CHECK-NEXT:    ret float [[F]]
692;
693  %D = fpext half %C to double
694  %E = call double @llvm.fabs.f64(double %D)
695  %F = fptrunc double %E to float
696  ret float %F
697}
698
699; Make sure fast math flags are preserved
700define float @test_shrink_intrin_fabs_fast_fp16_src(half %C) {
701; CHECK-LABEL: @test_shrink_intrin_fabs_fast_fp16_src(
702; CHECK-NEXT:    [[TMP1:%.*]] = call fast half @llvm.fabs.f16(half [[C:%.*]])
703; CHECK-NEXT:    [[F:%.*]] = fpext half [[TMP1]] to float
704; CHECK-NEXT:    ret float [[F]]
705;
706  %D = fpext half %C to double
707  %E = call fast double @llvm.fabs.f64(double %D)
708  %F = fptrunc double %E to float
709  ret float %F
710}
711
712define float @test_no_shrink_intrin_floor_multi_use_fpext(half %C) {
713; DOUBLE-4BYTE-ALIGN-LABEL: @test_no_shrink_intrin_floor_multi_use_fpext(
714; DOUBLE-4BYTE-ALIGN-NEXT:    [[D:%.*]] = fpext half [[C:%.*]] to double
715; DOUBLE-4BYTE-ALIGN-NEXT:    store volatile double [[D]], ptr undef, align 4
716; DOUBLE-4BYTE-ALIGN-NEXT:    [[E:%.*]] = call double @llvm.floor.f64(double [[D]])
717; DOUBLE-4BYTE-ALIGN-NEXT:    [[F:%.*]] = fptrunc double [[E]] to float
718; DOUBLE-4BYTE-ALIGN-NEXT:    ret float [[F]]
719;
720; DOUBLE-8BYTE-ALIGN-LABEL: @test_no_shrink_intrin_floor_multi_use_fpext(
721; DOUBLE-8BYTE-ALIGN-NEXT:    [[D:%.*]] = fpext half [[C:%.*]] to double
722; DOUBLE-8BYTE-ALIGN-NEXT:    store volatile double [[D]], ptr undef, align 8
723; DOUBLE-8BYTE-ALIGN-NEXT:    [[E:%.*]] = call double @llvm.floor.f64(double [[D]])
724; DOUBLE-8BYTE-ALIGN-NEXT:    [[F:%.*]] = fptrunc double [[E]] to float
725; DOUBLE-8BYTE-ALIGN-NEXT:    ret float [[F]]
726;
727  %D = fpext half %C to double
728  store volatile double %D, ptr undef
729  %E = call double @llvm.floor.f64(double %D)
730  %F = fptrunc double %E to float
731  ret float %F
732}
733
734define float @test_no_shrink_intrin_fabs_multi_use_fpext(half %C) {
735; DOUBLE-4BYTE-ALIGN-LABEL: @test_no_shrink_intrin_fabs_multi_use_fpext(
736; DOUBLE-4BYTE-ALIGN-NEXT:    [[D:%.*]] = fpext half [[C:%.*]] to double
737; DOUBLE-4BYTE-ALIGN-NEXT:    store volatile double [[D]], ptr undef, align 4
738; DOUBLE-4BYTE-ALIGN-NEXT:    [[E:%.*]] = call double @llvm.fabs.f64(double [[D]])
739; DOUBLE-4BYTE-ALIGN-NEXT:    [[F:%.*]] = fptrunc double [[E]] to float
740; DOUBLE-4BYTE-ALIGN-NEXT:    ret float [[F]]
741;
742; DOUBLE-8BYTE-ALIGN-LABEL: @test_no_shrink_intrin_fabs_multi_use_fpext(
743; DOUBLE-8BYTE-ALIGN-NEXT:    [[D:%.*]] = fpext half [[C:%.*]] to double
744; DOUBLE-8BYTE-ALIGN-NEXT:    store volatile double [[D]], ptr undef, align 8
745; DOUBLE-8BYTE-ALIGN-NEXT:    [[E:%.*]] = call double @llvm.fabs.f64(double [[D]])
746; DOUBLE-8BYTE-ALIGN-NEXT:    [[F:%.*]] = fptrunc double [[E]] to float
747; DOUBLE-8BYTE-ALIGN-NEXT:    ret float [[F]]
748;
749  %D = fpext half %C to double
750  store volatile double %D, ptr undef
751  %E = call double @llvm.fabs.f64(double %D)
752  %F = fptrunc double %E to float
753  ret float %F
754}
755
756; DBG-VALID: CheckModuleDebugify: PASS
757