xref: /llvm-project/clang/test/CodeGen/constrained-math-builtins.c (revision 5c2a133b1342881dc4f42a896e7e5f4b85d20508)
1 // RUN: %clang_cc1 -triple x86_64-linux -ffp-exception-behavior=maytrap -w -o - -emit-llvm %s | FileCheck %s
2 
3 // Test codegen of constrained math builtins.
4 //
5 // Test that the constrained intrinsics are picking up the exception
6 // metadata from the AST instead of the global default from the command line.
7 
8 #pragma float_control(except, on)
9 
10 void foo(double *d, float f, float *fp, long double *l, int *i, const char *c, _Float16 h) {
11   f = __builtin_fmod(f,f);    f = __builtin_fmodf(f,f);   f =  __builtin_fmodl(f,f); f = __builtin_fmodf128(f,f);
12 
13 // CHECK: call double @llvm.experimental.constrained.frem.f64(double %{{.*}}, double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
14 // CHECK: call float @llvm.experimental.constrained.frem.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
15 // CHECK: call x86_fp80 @llvm.experimental.constrained.frem.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
16 // CHECK: call fp128 @llvm.experimental.constrained.frem.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
17 
18   __builtin_pow(f,f);        __builtin_powf(f,f);       __builtin_powl(f,f); __builtin_powf128(f,f);
19 
20 // CHECK: call double @llvm.experimental.constrained.pow.f64(double %{{.*}}, double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
21 // CHECK: call float @llvm.experimental.constrained.pow.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
22 // CHECK: call x86_fp80 @llvm.experimental.constrained.pow.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
23 // CHECK: call fp128 @llvm.experimental.constrained.pow.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
24 
25   __builtin_powi(f,f);        __builtin_powif(f,f);       __builtin_powil(f,f);
26 
27 // CHECK: call double @llvm.experimental.constrained.powi.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
28 // CHECK: call float @llvm.experimental.constrained.powi.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
29 // CHECK: call x86_fp80 @llvm.experimental.constrained.powi.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
30 
31 
32   h = __builtin_ldexpf16(h, *i);  *d = __builtin_ldexp(*d, *i);        f = __builtin_ldexpf(f, *i);       __builtin_ldexpl(*l, *i);
33 
34 // CHECK: call half @llvm.experimental.constrained.ldexp.f16.i32(half %{{.*}}, i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
35 // CHECK: call double @llvm.experimental.constrained.ldexp.f64.i32(double %{{.*}}, i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
36 // CHECK: call float @llvm.experimental.constrained.ldexp.f32.i32(float %{{.*}}, i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
37 // CHECK: call x86_fp80 @llvm.experimental.constrained.ldexp.f80.i32(x86_fp80 %{{.*}}, i32 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
38 
39   __builtin_acos(f);        __builtin_acosf(f);       __builtin_acosl(f); __builtin_acosf128(f);
40 
41 // CHECK: call double @llvm.experimental.constrained.acos.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
42 // CHECK: call float @llvm.experimental.constrained.acos.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
43 // CHECK: call x86_fp80 @llvm.experimental.constrained.acos.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
44 // CHECK: call fp128 @llvm.experimental.constrained.acos.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
45 
46 __builtin_asin(f);        __builtin_asinf(f);       __builtin_asinl(f); __builtin_asinf128(f);
47 
48 // CHECK: call double @llvm.experimental.constrained.asin.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
49 // CHECK: call float @llvm.experimental.constrained.asin.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
50 // CHECK: call x86_fp80 @llvm.experimental.constrained.asin.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
51 // CHECK: call fp128 @llvm.experimental.constrained.asin.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
52 
53 __builtin_atan(f);        __builtin_atanf(f);       __builtin_atanl(f); __builtin_atanf128(f);
54 
55 // CHECK: call double @llvm.experimental.constrained.atan.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
56 // CHECK: call float @llvm.experimental.constrained.atan.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
57 // CHECK: call x86_fp80 @llvm.experimental.constrained.atan.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
58 // CHECK: call fp128 @llvm.experimental.constrained.atan.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
59 
60 __builtin_atan2(f,f);        __builtin_atan2f(f,f);       __builtin_atan2l(f,f); __builtin_atan2f128(f,f);
61 
62 // CHECK: call double @llvm.experimental.constrained.atan2.f64(double %{{.*}}, double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
63 // CHECK: call float @llvm.experimental.constrained.atan2.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
64 // CHECK: call x86_fp80 @llvm.experimental.constrained.atan2.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
65 // CHECK: call fp128 @llvm.experimental.constrained.atan2.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
66 
67   __builtin_ceil(f);       __builtin_ceilf(f);      __builtin_ceill(f); __builtin_ceilf128(f);
68 
69 // CHECK: call double @llvm.experimental.constrained.ceil.f64(double %{{.*}}, metadata !"fpexcept.strict")
70 // CHECK: call float @llvm.experimental.constrained.ceil.f32(float %{{.*}}, metadata !"fpexcept.strict")
71 // CHECK: call x86_fp80 @llvm.experimental.constrained.ceil.f80(x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
72 // CHECK: call fp128 @llvm.experimental.constrained.ceil.f128(fp128 %{{.*}}, metadata !"fpexcept.strict")
73 
74   __builtin_cos(f);        __builtin_cosf(f);       __builtin_cosl(f); __builtin_cosf128(f);
75 
76 // CHECK: call double @llvm.experimental.constrained.cos.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
77 // CHECK: call float @llvm.experimental.constrained.cos.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
78 // CHECK: call x86_fp80 @llvm.experimental.constrained.cos.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
79 // CHECK: call fp128 @llvm.experimental.constrained.cos.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
80 
81   __builtin_cosh(f);        __builtin_coshf(f);       __builtin_coshl(f); __builtin_coshf128(f);
82 
83 // CHECK: call double @llvm.experimental.constrained.cosh.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
84 // CHECK: call float @llvm.experimental.constrained.cosh.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
85 // CHECK: call x86_fp80 @llvm.experimental.constrained.cosh.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
86 // CHECK: call fp128 @llvm.experimental.constrained.cosh.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
87 
88   __builtin_exp(f);        __builtin_expf(f);       __builtin_expl(f); __builtin_expf128(f);
89 
90 // CHECK: call double @llvm.experimental.constrained.exp.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
91 // CHECK: call float @llvm.experimental.constrained.exp.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
92 // CHECK: call x86_fp80 @llvm.experimental.constrained.exp.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
93 // CHECK: call fp128 @llvm.experimental.constrained.exp.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
94 
95   __builtin_exp2(f);       __builtin_exp2f(f);      __builtin_exp2l(f); __builtin_exp2f128(f);
96 
97 // CHECK: call double @llvm.experimental.constrained.exp2.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
98 // CHECK: call float @llvm.experimental.constrained.exp2.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
99 // CHECK: call x86_fp80 @llvm.experimental.constrained.exp2.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
100 // CHECK: call fp128 @llvm.experimental.constrained.exp2.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
101 
102   __builtin_exp10(f);       __builtin_exp10f(f);      __builtin_exp10l(f); __builtin_exp10f128(f);
103 
104 // CHECK: call double @exp10(double noundef %{{.*}})
105 // CHECK: call float @exp10f(float noundef %{{.*}})
106 // CHECK: call x86_fp80 @exp10l(x86_fp80 noundef %{{.*}})
107 // CHECK: call fp128 @exp10f128(fp128 noundef %{{.*}})
108 
109   __builtin_floor(f);      __builtin_floorf(f);     __builtin_floorl(f); __builtin_floorf128(f);
110 
111 // CHECK: call double @llvm.experimental.constrained.floor.f64(double %{{.*}}, metadata !"fpexcept.strict")
112 // CHECK: call float @llvm.experimental.constrained.floor.f32(float %{{.*}}, metadata !"fpexcept.strict")
113 // CHECK: call x86_fp80 @llvm.experimental.constrained.floor.f80(x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
114 // CHECK: call fp128 @llvm.experimental.constrained.floor.f128(fp128 %{{.*}}, metadata !"fpexcept.strict")
115 
116   __builtin_fma(f,f,f);        __builtin_fmaf(f,f,f);       __builtin_fmal(f,f,f);  __builtin_fmaf128(f,f,f); __builtin_fmaf16(f,f,f);
117 
118 // CHECK: call double @llvm.experimental.constrained.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
119 // CHECK: call float @llvm.experimental.constrained.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
120 // CHECK: call x86_fp80 @llvm.experimental.constrained.fma.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
121 // CHECK: call fp128 @llvm.experimental.constrained.fma.f128(fp128 %{{.*}}, fp128 %{{.*}}, fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
122 // CHECK: call half @llvm.experimental.constrained.fma.f16(half %{{.*}}, half %{{.*}}, half %{{.*}}, metadata !"fpexcept.strict")
123 
124   __builtin_fmax(f,f);       __builtin_fmaxf(f,f);      __builtin_fmaxl(f,f); __builtin_fmaxf128(f,f);
125 
126 // CHECK: call double @llvm.experimental.constrained.maxnum.f64(double %{{.*}}, double %{{.*}}, metadata !"fpexcept.strict")
127 // CHECK: call float @llvm.experimental.constrained.maxnum.f32(float %{{.*}}, float %{{.*}}, metadata !"fpexcept.strict")
128 // CHECK: call x86_fp80 @llvm.experimental.constrained.maxnum.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
129 // CHECK: call fp128 @llvm.experimental.constrained.maxnum.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"fpexcept.strict")
130 
131   __builtin_fmin(f,f);       __builtin_fminf(f,f);      __builtin_fminl(f,f); __builtin_fminf128(f,f);
132 
133 // CHECK: call double @llvm.experimental.constrained.minnum.f64(double %{{.*}}, double %{{.*}}, metadata !"fpexcept.strict")
134 // CHECK: call float @llvm.experimental.constrained.minnum.f32(float %{{.*}}, float %{{.*}}, metadata !"fpexcept.strict")
135 // CHECK: call x86_fp80 @llvm.experimental.constrained.minnum.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
136 // CHECK: call fp128 @llvm.experimental.constrained.minnum.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"fpexcept.strict")
137 
138   __builtin_llrint(f);     __builtin_llrintf(f);    __builtin_llrintl(f); __builtin_llrintf128(f);
139 
140 // CHECK: call i64 @llvm.experimental.constrained.llrint.i64.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
141 // CHECK: call i64 @llvm.experimental.constrained.llrint.i64.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
142 // CHECK: call i64 @llvm.experimental.constrained.llrint.i64.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
143 // CHECK: call i64 @llvm.experimental.constrained.llrint.i64.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
144 
145   __builtin_llround(f);    __builtin_llroundf(f);   __builtin_llroundl(f); __builtin_llroundf128(f);
146 
147 // CHECK: call i64 @llvm.experimental.constrained.llround.i64.f64(double %{{.*}}, metadata !"fpexcept.strict")
148 // CHECK: call i64 @llvm.experimental.constrained.llround.i64.f32(float %{{.*}}, metadata !"fpexcept.strict")
149 // CHECK: call i64 @llvm.experimental.constrained.llround.i64.f80(x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
150 // CHECK: call i64 @llvm.experimental.constrained.llround.i64.f128(fp128 %{{.*}}, metadata !"fpexcept.strict")
151 
152   __builtin_log(f);        __builtin_logf(f);       __builtin_logl(f); __builtin_logf128(f);
153 
154 // CHECK: call double @llvm.experimental.constrained.log.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
155 // CHECK: call float @llvm.experimental.constrained.log.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
156 // CHECK: call x86_fp80 @llvm.experimental.constrained.log.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
157 // CHECK: call fp128 @llvm.experimental.constrained.log.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
158 
159   __builtin_log10(f);      __builtin_log10f(f);     __builtin_log10l(f); __builtin_log10f128(f);
160 
161 // CHECK: call double @llvm.experimental.constrained.log10.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
162 // CHECK: call float @llvm.experimental.constrained.log10.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
163 // CHECK: call x86_fp80 @llvm.experimental.constrained.log10.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
164 // CHECK: call fp128 @llvm.experimental.constrained.log10.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
165 
166   __builtin_log2(f);       __builtin_log2f(f);      __builtin_log2l(f); __builtin_log2f128(f);
167 
168 // CHECK: call double @llvm.experimental.constrained.log2.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
169 // CHECK: call float @llvm.experimental.constrained.log2.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
170 // CHECK: call x86_fp80 @llvm.experimental.constrained.log2.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
171 // CHECK: call fp128 @llvm.experimental.constrained.log2.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
172 
173   __builtin_lrint(f);      __builtin_lrintf(f);     __builtin_lrintl(f); __builtin_lrintf128(f);
174 
175 // CHECK: call i64 @llvm.experimental.constrained.lrint.i64.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
176 // CHECK: call i64 @llvm.experimental.constrained.lrint.i64.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
177 // CHECK: call i64 @llvm.experimental.constrained.lrint.i64.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
178 // CHECK: call i64 @llvm.experimental.constrained.lrint.i64.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
179 
180   __builtin_lround(f);     __builtin_lroundf(f);    __builtin_lroundl(f); __builtin_lroundf128(f);
181 
182 // CHECK: call i64 @llvm.experimental.constrained.lround.i64.f64(double %{{.*}}, metadata !"fpexcept.strict")
183 // CHECK: call i64 @llvm.experimental.constrained.lround.i64.f32(float %{{.*}}, metadata !"fpexcept.strict")
184 // CHECK: call i64 @llvm.experimental.constrained.lround.i64.f80(x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
185 // CHECK: call i64 @llvm.experimental.constrained.lround.i64.f128(fp128 %{{.*}}, metadata !"fpexcept.strict")
186 
187   __builtin_nearbyint(f);  __builtin_nearbyintf(f); __builtin_nearbyintl(f); __builtin_nearbyintf128(f);
188 
189 // CHECK: call double @llvm.experimental.constrained.nearbyint.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
190 // CHECK: call float @llvm.experimental.constrained.nearbyint.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
191 // CHECK: call x86_fp80 @llvm.experimental.constrained.nearbyint.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
192 // CHECK: call fp128 @llvm.experimental.constrained.nearbyint.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
193 
194   __builtin_rint(f);       __builtin_rintf(f);      __builtin_rintl(f); __builtin_rintf128(f);
195 
196 // CHECK: call double @llvm.experimental.constrained.rint.f64(double %{{.*}}, metadata !"fpexcept.strict")
197 // CHECK: call float @llvm.experimental.constrained.rint.f32(float %{{.*}}, metadata !"fpexcept.strict")
198 // CHECK: call x86_fp80 @llvm.experimental.constrained.rint.f80(x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
199 // CHECK: call fp128 @llvm.experimental.constrained.rint.f128(fp128 %{{.*}}, metadata !"fpexcept.strict")
200 
201   __builtin_round(f);      __builtin_roundf(f);     __builtin_roundl(f); __builtin_roundf128(f);
202 
203 // CHECK: call double @llvm.experimental.constrained.round.f64(double %{{.*}}, metadata !"fpexcept.strict")
204 // CHECK: call float @llvm.experimental.constrained.round.f32(float %{{.*}}, metadata !"fpexcept.strict")
205 // CHECK: call x86_fp80 @llvm.experimental.constrained.round.f80(x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
206 // CHECK: call fp128 @llvm.experimental.constrained.round.f128(fp128 %{{.*}}, metadata !"fpexcept.strict")
207 
208   __builtin_sin(f);        __builtin_sinf(f);       __builtin_sinl(f); __builtin_sinf128(f);
209 
210 // CHECK: call double @llvm.experimental.constrained.sin.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
211 // CHECK: call float @llvm.experimental.constrained.sin.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
212 // CHECK: call x86_fp80 @llvm.experimental.constrained.sin.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
213 // CHECK: call fp128 @llvm.experimental.constrained.sin.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
214 
215   __builtin_sinh(f);        __builtin_sinhf(f);       __builtin_sinhl(f); __builtin_sinhf128(f);
216 
217 // CHECK: call double @llvm.experimental.constrained.sinh.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
218 // CHECK: call float @llvm.experimental.constrained.sinh.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
219 // CHECK: call x86_fp80 @llvm.experimental.constrained.sinh.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
220 // CHECK: call fp128 @llvm.experimental.constrained.sinh.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
221 
222   __builtin_sqrt(f);       __builtin_sqrtf(f);      __builtin_sqrtl(f); __builtin_sqrtf128(f);
223 
224 // CHECK: call double @llvm.experimental.constrained.sqrt.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
225 // CHECK: call float @llvm.experimental.constrained.sqrt.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
226 // CHECK: call x86_fp80 @llvm.experimental.constrained.sqrt.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
227 // CHECK: call fp128 @llvm.experimental.constrained.sqrt.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
228 
229   __builtin_tan(f);        __builtin_tanf(f);       __builtin_tanl(f); __builtin_tanf128(f);
230 
231 // CHECK: call double @llvm.experimental.constrained.tan.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
232 // CHECK: call float @llvm.experimental.constrained.tan.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
233 // CHECK: call x86_fp80 @llvm.experimental.constrained.tan.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
234 // CHECK: call fp128 @llvm.experimental.constrained.tan.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
235 
236   __builtin_tanh(f);        __builtin_tanhf(f);       __builtin_tanhl(f); __builtin_tanhf128(f);
237 
238 // CHECK: call double @llvm.experimental.constrained.tanh.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
239 // CHECK: call float @llvm.experimental.constrained.tanh.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
240 // CHECK: call x86_fp80 @llvm.experimental.constrained.tanh.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
241 // CHECK: call fp128 @llvm.experimental.constrained.tanh.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
242 
243   __builtin_trunc(f);      __builtin_truncf(f);     __builtin_truncl(f); __builtin_truncf128(f);
244 
245 // CHECK: call double @llvm.experimental.constrained.trunc.f64(double %{{.*}}, metadata !"fpexcept.strict")
246 // CHECK: call float @llvm.experimental.constrained.trunc.f32(float %{{.*}}, metadata !"fpexcept.strict")
247 // CHECK: call x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
248 // CHECK: call fp128 @llvm.experimental.constrained.trunc.f128(fp128 %{{.*}}, metadata !"fpexcept.strict")
249 };
250 
251 // CHECK: declare double @llvm.experimental.constrained.frem.f64(double, double, metadata, metadata)
252 // CHECK: declare float @llvm.experimental.constrained.frem.f32(float, float, metadata, metadata)
253 // CHECK: declare x86_fp80 @llvm.experimental.constrained.frem.f80(x86_fp80, x86_fp80, metadata, metadata)
254 // CHECK: declare fp128 @llvm.experimental.constrained.frem.f128(fp128, fp128, metadata, metadata)
255 
256 // CHECK: declare double @llvm.experimental.constrained.pow.f64(double, double, metadata, metadata)
257 // CHECK: declare float @llvm.experimental.constrained.pow.f32(float, float, metadata, metadata)
258 // CHECK: declare x86_fp80 @llvm.experimental.constrained.pow.f80(x86_fp80, x86_fp80, metadata, metadata)
259 // CHECK: declare fp128 @llvm.experimental.constrained.pow.f128(fp128, fp128, metadata, metadata)
260 
261 // CHECK: declare double @llvm.experimental.constrained.powi.f64(double, i32, metadata, metadata)
262 // CHECK: declare float @llvm.experimental.constrained.powi.f32(float, i32, metadata, metadata)
263 // CHECK: declare x86_fp80 @llvm.experimental.constrained.powi.f80(x86_fp80, i32, metadata, metadata)
264 
265 // CHECK: declare half @llvm.experimental.constrained.ldexp.f16.i32(half, i32, metadata, metadata)
266 // CHECK: declare double @llvm.experimental.constrained.ldexp.f64.i32(double, i32, metadata, metadata)
267 // CHECK: declare float @llvm.experimental.constrained.ldexp.f32.i32(float, i32, metadata, metadata)
268 // CHECK: declare x86_fp80 @llvm.experimental.constrained.ldexp.f80.i32(x86_fp80, i32, metadata, metadata)
269 
270 // CHECK: declare double @llvm.experimental.constrained.ceil.f64(double, metadata)
271 // CHECK: declare float @llvm.experimental.constrained.ceil.f32(float, metadata)
272 // CHECK: declare x86_fp80 @llvm.experimental.constrained.ceil.f80(x86_fp80, metadata)
273 // CHECK: declare fp128 @llvm.experimental.constrained.ceil.f128(fp128, metadata)
274 
275 // CHECK: declare double @llvm.experimental.constrained.cos.f64(double, metadata, metadata)
276 // CHECK: declare float @llvm.experimental.constrained.cos.f32(float, metadata, metadata)
277 // CHECK: declare x86_fp80 @llvm.experimental.constrained.cos.f80(x86_fp80, metadata, metadata)
278 // CHECK: declare fp128 @llvm.experimental.constrained.cos.f128(fp128, metadata, metadata)
279 
280 // CHECK: declare double @llvm.experimental.constrained.exp.f64(double, metadata, metadata)
281 // CHECK: declare float @llvm.experimental.constrained.exp.f32(float, metadata, metadata)
282 // CHECK: declare x86_fp80 @llvm.experimental.constrained.exp.f80(x86_fp80, metadata, metadata)
283 // CHECK: declare fp128 @llvm.experimental.constrained.exp.f128(fp128, metadata, metadata)
284 
285 // CHECK: declare double @llvm.experimental.constrained.exp2.f64(double, metadata, metadata)
286 // CHECK: declare float @llvm.experimental.constrained.exp2.f32(float, metadata, metadata)
287 // CHECK: declare x86_fp80 @llvm.experimental.constrained.exp2.f80(x86_fp80, metadata, metadata)
288 // CHECK: declare fp128 @llvm.experimental.constrained.exp2.f128(fp128, metadata, metadata)
289 
290 // CHECK: declare double @exp10(double noundef)
291 // CHECK: declare float @exp10f(float noundef)
292 // CHECK: declare x86_fp80 @exp10l(x86_fp80 noundef)
293 // CHECK: declare fp128 @exp10f128(fp128 noundef)
294 
295 // CHECK: declare double @llvm.experimental.constrained.floor.f64(double, metadata)
296 // CHECK: declare float @llvm.experimental.constrained.floor.f32(float, metadata)
297 // CHECK: declare x86_fp80 @llvm.experimental.constrained.floor.f80(x86_fp80, metadata)
298 // CHECK: declare fp128 @llvm.experimental.constrained.floor.f128(fp128, metadata)
299 
300 // CHECK: declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata)
301 // CHECK: declare float @llvm.experimental.constrained.fma.f32(float, float, float, metadata, metadata)
302 // CHECK: declare x86_fp80 @llvm.experimental.constrained.fma.f80(x86_fp80, x86_fp80, x86_fp80, metadata, metadata)
303 // CHECK: declare fp128 @llvm.experimental.constrained.fma.f128(fp128, fp128, fp128, metadata, metadata)
304 
305 // CHECK: declare double @llvm.experimental.constrained.maxnum.f64(double, double, metadata)
306 // CHECK: declare float @llvm.experimental.constrained.maxnum.f32(float, float, metadata)
307 // CHECK: declare x86_fp80 @llvm.experimental.constrained.maxnum.f80(x86_fp80, x86_fp80, metadata)
308 // CHECK: declare fp128 @llvm.experimental.constrained.maxnum.f128(fp128, fp128, metadata)
309 
310 // CHECK: declare double @llvm.experimental.constrained.minnum.f64(double, double, metadata)
311 // CHECK: declare float @llvm.experimental.constrained.minnum.f32(float, float, metadata)
312 // CHECK: declare x86_fp80 @llvm.experimental.constrained.minnum.f80(x86_fp80, x86_fp80, metadata)
313 // CHECK: declare fp128 @llvm.experimental.constrained.minnum.f128(fp128, fp128, metadata)
314 
315 // CHECK: declare i64 @llvm.experimental.constrained.llrint.i64.f64(double, metadata, metadata)
316 // CHECK: declare i64 @llvm.experimental.constrained.llrint.i64.f32(float, metadata, metadata)
317 // CHECK: declare i64 @llvm.experimental.constrained.llrint.i64.f80(x86_fp80, metadata, metadata)
318 // CHECK: declare i64 @llvm.experimental.constrained.llrint.i64.f128(fp128, metadata, metadata)
319 
320 // CHECK: declare i64 @llvm.experimental.constrained.llround.i64.f64(double, metadata)
321 // CHECK: declare i64 @llvm.experimental.constrained.llround.i64.f32(float, metadata)
322 // CHECK: declare i64 @llvm.experimental.constrained.llround.i64.f80(x86_fp80, metadata)
323 // CHECK: declare i64 @llvm.experimental.constrained.llround.i64.f128(fp128, metadata)
324 
325 // CHECK: declare double @llvm.experimental.constrained.log.f64(double, metadata, metadata)
326 // CHECK: declare float @llvm.experimental.constrained.log.f32(float, metadata, metadata)
327 // CHECK: declare x86_fp80 @llvm.experimental.constrained.log.f80(x86_fp80, metadata, metadata)
328 // CHECK: declare fp128 @llvm.experimental.constrained.log.f128(fp128, metadata, metadata)
329 
330 // CHECK: declare double @llvm.experimental.constrained.log10.f64(double, metadata, metadata)
331 // CHECK: declare float @llvm.experimental.constrained.log10.f32(float, metadata, metadata)
332 // CHECK: declare x86_fp80 @llvm.experimental.constrained.log10.f80(x86_fp80, metadata, metadata)
333 // CHECK: declare fp128 @llvm.experimental.constrained.log10.f128(fp128, metadata, metadata)
334 
335 // CHECK: declare double @llvm.experimental.constrained.log2.f64(double, metadata, metadata)
336 // CHECK: declare float @llvm.experimental.constrained.log2.f32(float, metadata, metadata)
337 // CHECK: declare x86_fp80 @llvm.experimental.constrained.log2.f80(x86_fp80, metadata, metadata)
338 // CHECK: declare fp128 @llvm.experimental.constrained.log2.f128(fp128, metadata, metadata)
339 
340 // CHECK: declare i64 @llvm.experimental.constrained.lrint.i64.f64(double, metadata, metadata)
341 // CHECK: declare i64 @llvm.experimental.constrained.lrint.i64.f32(float, metadata, metadata)
342 // CHECK: declare i64 @llvm.experimental.constrained.lrint.i64.f80(x86_fp80, metadata, metadata)
343 // CHECK: declare i64 @llvm.experimental.constrained.lrint.i64.f128(fp128, metadata, metadata)
344 
345 // CHECK: declare i64 @llvm.experimental.constrained.lround.i64.f64(double, metadata)
346 // CHECK: declare i64 @llvm.experimental.constrained.lround.i64.f32(float, metadata)
347 // CHECK: declare i64 @llvm.experimental.constrained.lround.i64.f80(x86_fp80, metadata)
348 // CHECK: declare i64 @llvm.experimental.constrained.lround.i64.f128(fp128, metadata)
349 
350 // CHECK: declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
351 // CHECK: declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata)
352 // CHECK: declare x86_fp80 @llvm.experimental.constrained.nearbyint.f80(x86_fp80, metadata, metadata)
353 // CHECK: declare fp128 @llvm.experimental.constrained.nearbyint.f128(fp128, metadata, metadata)
354 
355 // CHECK: declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
356 // CHECK: declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata)
357 // CHECK: declare x86_fp80 @llvm.experimental.constrained.rint.f80(x86_fp80, metadata, metadata)
358 // CHECK: declare fp128 @llvm.experimental.constrained.rint.f128(fp128, metadata, metadata)
359 
360 // CHECK: declare double @llvm.experimental.constrained.round.f64(double, metadata)
361 // CHECK: declare float @llvm.experimental.constrained.round.f32(float, metadata)
362 // CHECK: declare x86_fp80 @llvm.experimental.constrained.round.f80(x86_fp80, metadata)
363 // CHECK: declare fp128 @llvm.experimental.constrained.round.f128(fp128, metadata)
364 
365 // CHECK: declare double @llvm.experimental.constrained.sin.f64(double, metadata, metadata)
366 // CHECK: declare float @llvm.experimental.constrained.sin.f32(float, metadata, metadata)
367 // CHECK: declare x86_fp80 @llvm.experimental.constrained.sin.f80(x86_fp80, metadata, metadata)
368 // CHECK: declare fp128 @llvm.experimental.constrained.sin.f128(fp128, metadata, metadata)
369 
370 // CHECK: declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata)
371 // CHECK: declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata)
372 // CHECK: declare x86_fp80 @llvm.experimental.constrained.sqrt.f80(x86_fp80, metadata, metadata)
373 // CHECK: declare fp128 @llvm.experimental.constrained.sqrt.f128(fp128, metadata, metadata)
374 
375 // CHECK: declare double @llvm.experimental.constrained.tan.f64(double, metadata, metadata)
376 // CHECK: declare float @llvm.experimental.constrained.tan.f32(float, metadata, metadata)
377 // CHECK: declare x86_fp80 @llvm.experimental.constrained.tan.f80(x86_fp80, metadata, metadata)
378 // CHECK: declare fp128 @llvm.experimental.constrained.tan.f128(fp128, metadata, metadata)
379 
380 // CHECK: declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
381 // CHECK: declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
382 // CHECK: declare x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80, metadata)
383 // CHECK: declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata)
384 
385 #pragma STDC FP_CONTRACT ON
386 void bar(float f) {
387   f * f + f;
388   (double)f * f - f;
389   (long double)-f * f + f;
390   -(f * f) - f;
391   f + -(f * f);
392 
393   // CHECK: call float @llvm.experimental.constrained.fmuladd.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
394   // CHECK: fneg
395   // CHECK: call double @llvm.experimental.constrained.fmuladd.f64(double %{{.*}}, double %{{.*}}, double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
396   // CHECK: fneg
397   // CHECK: call x86_fp80 @llvm.experimental.constrained.fmuladd.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
398   // CHECK: fneg
399   // CHECK: fneg
400   // CHECK: call float @llvm.experimental.constrained.fmuladd.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
401   // CHECK: fneg
402   // CHECK: call float @llvm.experimental.constrained.fmuladd.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
403 };
404