1*181254a7Smrg /* Compute x * y + z as ternary operation.
2*181254a7Smrg Copyright (C) 2010-2018 Free Software Foundation, Inc.
3*181254a7Smrg This file is part of the GNU C Library.
4*181254a7Smrg Contributed by Jakub Jelinek <jakub@redhat.com>, 2010.
5*181254a7Smrg
6*181254a7Smrg The GNU C Library is free software; you can redistribute it and/or
7*181254a7Smrg modify it under the terms of the GNU Lesser General Public
8*181254a7Smrg License as published by the Free Software Foundation; either
9*181254a7Smrg version 2.1 of the License, or (at your option) any later version.
10*181254a7Smrg
11*181254a7Smrg The GNU C Library is distributed in the hope that it will be useful,
12*181254a7Smrg but WITHOUT ANY WARRANTY; without even the implied warranty of
13*181254a7Smrg MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14*181254a7Smrg Lesser General Public License for more details.
15*181254a7Smrg
16*181254a7Smrg You should have received a copy of the GNU Lesser General Public
17*181254a7Smrg License along with the GNU C Library; if not, see
18*181254a7Smrg <http://www.gnu.org/licenses/>. */
19*181254a7Smrg
20*181254a7Smrg #include "quadmath-imp.h"
21*181254a7Smrg
22*181254a7Smrg /* This implementation uses rounding to odd to avoid problems with
23*181254a7Smrg double rounding. See a paper by Boldo and Melquiond:
24*181254a7Smrg http://www.lri.fr/~melquion/doc/08-tc.pdf */
25*181254a7Smrg
26*181254a7Smrg __float128
fmaq(__float128 x,__float128 y,__float128 z)27*181254a7Smrg fmaq (__float128 x, __float128 y, __float128 z)
28*181254a7Smrg {
29*181254a7Smrg ieee854_float128 u, v, w;
30*181254a7Smrg int adjust = 0;
31*181254a7Smrg u.value = x;
32*181254a7Smrg v.value = y;
33*181254a7Smrg w.value = z;
34*181254a7Smrg if (__builtin_expect (u.ieee.exponent + v.ieee.exponent
35*181254a7Smrg >= 0x7fff + IEEE854_FLOAT128_BIAS
36*181254a7Smrg - FLT128_MANT_DIG, 0)
37*181254a7Smrg || __builtin_expect (u.ieee.exponent >= 0x7fff - FLT128_MANT_DIG, 0)
38*181254a7Smrg || __builtin_expect (v.ieee.exponent >= 0x7fff - FLT128_MANT_DIG, 0)
39*181254a7Smrg || __builtin_expect (w.ieee.exponent >= 0x7fff - FLT128_MANT_DIG, 0)
40*181254a7Smrg || __builtin_expect (u.ieee.exponent + v.ieee.exponent
41*181254a7Smrg <= IEEE854_FLOAT128_BIAS + FLT128_MANT_DIG, 0))
42*181254a7Smrg {
43*181254a7Smrg /* If z is Inf, but x and y are finite, the result should be
44*181254a7Smrg z rather than NaN. */
45*181254a7Smrg if (w.ieee.exponent == 0x7fff
46*181254a7Smrg && u.ieee.exponent != 0x7fff
47*181254a7Smrg && v.ieee.exponent != 0x7fff)
48*181254a7Smrg return (z + x) + y;
49*181254a7Smrg /* If z is zero and x are y are nonzero, compute the result
50*181254a7Smrg as x * y to avoid the wrong sign of a zero result if x * y
51*181254a7Smrg underflows to 0. */
52*181254a7Smrg if (z == 0 && x != 0 && y != 0)
53*181254a7Smrg return x * y;
54*181254a7Smrg /* If x or y or z is Inf/NaN, or if x * y is zero, compute as
55*181254a7Smrg x * y + z. */
56*181254a7Smrg if (u.ieee.exponent == 0x7fff
57*181254a7Smrg || v.ieee.exponent == 0x7fff
58*181254a7Smrg || w.ieee.exponent == 0x7fff
59*181254a7Smrg || x == 0
60*181254a7Smrg || y == 0)
61*181254a7Smrg return x * y + z;
62*181254a7Smrg /* If fma will certainly overflow, compute as x * y. */
63*181254a7Smrg if (u.ieee.exponent + v.ieee.exponent
64*181254a7Smrg > 0x7fff + IEEE854_FLOAT128_BIAS)
65*181254a7Smrg return x * y;
66*181254a7Smrg /* If x * y is less than 1/4 of FLT128_TRUE_MIN, neither the
67*181254a7Smrg result nor whether there is underflow depends on its exact
68*181254a7Smrg value, only on its sign. */
69*181254a7Smrg if (u.ieee.exponent + v.ieee.exponent
70*181254a7Smrg < IEEE854_FLOAT128_BIAS - FLT128_MANT_DIG - 2)
71*181254a7Smrg {
72*181254a7Smrg int neg = u.ieee.negative ^ v.ieee.negative;
73*181254a7Smrg __float128 tiny = neg ? -0x1p-16494Q : 0x1p-16494Q;
74*181254a7Smrg if (w.ieee.exponent >= 3)
75*181254a7Smrg return tiny + z;
76*181254a7Smrg /* Scaling up, adding TINY and scaling down produces the
77*181254a7Smrg correct result, because in round-to-nearest mode adding
78*181254a7Smrg TINY has no effect and in other modes double rounding is
79*181254a7Smrg harmless. But it may not produce required underflow
80*181254a7Smrg exceptions. */
81*181254a7Smrg v.value = z * 0x1p114Q + tiny;
82*181254a7Smrg if (TININESS_AFTER_ROUNDING
83*181254a7Smrg ? v.ieee.exponent < 115
84*181254a7Smrg : (w.ieee.exponent == 0
85*181254a7Smrg || (w.ieee.exponent == 1
86*181254a7Smrg && w.ieee.negative != neg
87*181254a7Smrg && w.ieee.mantissa3 == 0
88*181254a7Smrg && w.ieee.mantissa2 == 0
89*181254a7Smrg && w.ieee.mantissa1 == 0
90*181254a7Smrg && w.ieee.mantissa0 == 0)))
91*181254a7Smrg {
92*181254a7Smrg __float128 force_underflow = x * y;
93*181254a7Smrg math_force_eval (force_underflow);
94*181254a7Smrg }
95*181254a7Smrg return v.value * 0x1p-114Q;
96*181254a7Smrg }
97*181254a7Smrg if (u.ieee.exponent + v.ieee.exponent
98*181254a7Smrg >= 0x7fff + IEEE854_FLOAT128_BIAS - FLT128_MANT_DIG)
99*181254a7Smrg {
100*181254a7Smrg /* Compute 1p-113 times smaller result and multiply
101*181254a7Smrg at the end. */
102*181254a7Smrg if (u.ieee.exponent > v.ieee.exponent)
103*181254a7Smrg u.ieee.exponent -= FLT128_MANT_DIG;
104*181254a7Smrg else
105*181254a7Smrg v.ieee.exponent -= FLT128_MANT_DIG;
106*181254a7Smrg /* If x + y exponent is very large and z exponent is very small,
107*181254a7Smrg it doesn't matter if we don't adjust it. */
108*181254a7Smrg if (w.ieee.exponent > FLT128_MANT_DIG)
109*181254a7Smrg w.ieee.exponent -= FLT128_MANT_DIG;
110*181254a7Smrg adjust = 1;
111*181254a7Smrg }
112*181254a7Smrg else if (w.ieee.exponent >= 0x7fff - FLT128_MANT_DIG)
113*181254a7Smrg {
114*181254a7Smrg /* Similarly.
115*181254a7Smrg If z exponent is very large and x and y exponents are
116*181254a7Smrg very small, adjust them up to avoid spurious underflows,
117*181254a7Smrg rather than down. */
118*181254a7Smrg if (u.ieee.exponent + v.ieee.exponent
119*181254a7Smrg <= IEEE854_FLOAT128_BIAS + 2 * FLT128_MANT_DIG)
120*181254a7Smrg {
121*181254a7Smrg if (u.ieee.exponent > v.ieee.exponent)
122*181254a7Smrg u.ieee.exponent += 2 * FLT128_MANT_DIG + 2;
123*181254a7Smrg else
124*181254a7Smrg v.ieee.exponent += 2 * FLT128_MANT_DIG + 2;
125*181254a7Smrg }
126*181254a7Smrg else if (u.ieee.exponent > v.ieee.exponent)
127*181254a7Smrg {
128*181254a7Smrg if (u.ieee.exponent > FLT128_MANT_DIG)
129*181254a7Smrg u.ieee.exponent -= FLT128_MANT_DIG;
130*181254a7Smrg }
131*181254a7Smrg else if (v.ieee.exponent > FLT128_MANT_DIG)
132*181254a7Smrg v.ieee.exponent -= FLT128_MANT_DIG;
133*181254a7Smrg w.ieee.exponent -= FLT128_MANT_DIG;
134*181254a7Smrg adjust = 1;
135*181254a7Smrg }
136*181254a7Smrg else if (u.ieee.exponent >= 0x7fff - FLT128_MANT_DIG)
137*181254a7Smrg {
138*181254a7Smrg u.ieee.exponent -= FLT128_MANT_DIG;
139*181254a7Smrg if (v.ieee.exponent)
140*181254a7Smrg v.ieee.exponent += FLT128_MANT_DIG;
141*181254a7Smrg else
142*181254a7Smrg v.value *= 0x1p113Q;
143*181254a7Smrg }
144*181254a7Smrg else if (v.ieee.exponent >= 0x7fff - FLT128_MANT_DIG)
145*181254a7Smrg {
146*181254a7Smrg v.ieee.exponent -= FLT128_MANT_DIG;
147*181254a7Smrg if (u.ieee.exponent)
148*181254a7Smrg u.ieee.exponent += FLT128_MANT_DIG;
149*181254a7Smrg else
150*181254a7Smrg u.value *= 0x1p113Q;
151*181254a7Smrg }
152*181254a7Smrg else /* if (u.ieee.exponent + v.ieee.exponent
153*181254a7Smrg <= IEEE854_FLOAT128_BIAS + FLT128_MANT_DIG) */
154*181254a7Smrg {
155*181254a7Smrg if (u.ieee.exponent > v.ieee.exponent)
156*181254a7Smrg u.ieee.exponent += 2 * FLT128_MANT_DIG + 2;
157*181254a7Smrg else
158*181254a7Smrg v.ieee.exponent += 2 * FLT128_MANT_DIG + 2;
159*181254a7Smrg if (w.ieee.exponent <= 4 * FLT128_MANT_DIG + 6)
160*181254a7Smrg {
161*181254a7Smrg if (w.ieee.exponent)
162*181254a7Smrg w.ieee.exponent += 2 * FLT128_MANT_DIG + 2;
163*181254a7Smrg else
164*181254a7Smrg w.value *= 0x1p228Q;
165*181254a7Smrg adjust = -1;
166*181254a7Smrg }
167*181254a7Smrg /* Otherwise x * y should just affect inexact
168*181254a7Smrg and nothing else. */
169*181254a7Smrg }
170*181254a7Smrg x = u.value;
171*181254a7Smrg y = v.value;
172*181254a7Smrg z = w.value;
173*181254a7Smrg }
174*181254a7Smrg
175*181254a7Smrg /* Ensure correct sign of exact 0 + 0. */
176*181254a7Smrg if (__glibc_unlikely ((x == 0 || y == 0) && z == 0))
177*181254a7Smrg {
178*181254a7Smrg x = math_opt_barrier (x);
179*181254a7Smrg return x * y + z;
180*181254a7Smrg }
181*181254a7Smrg
182*181254a7Smrg fenv_t env;
183*181254a7Smrg feholdexcept (&env);
184*181254a7Smrg fesetround (FE_TONEAREST);
185*181254a7Smrg
186*181254a7Smrg /* Multiplication m1 + m2 = x * y using Dekker's algorithm. */
187*181254a7Smrg #define C ((1LL << (FLT128_MANT_DIG + 1) / 2) + 1)
188*181254a7Smrg __float128 x1 = x * C;
189*181254a7Smrg __float128 y1 = y * C;
190*181254a7Smrg __float128 m1 = x * y;
191*181254a7Smrg x1 = (x - x1) + x1;
192*181254a7Smrg y1 = (y - y1) + y1;
193*181254a7Smrg __float128 x2 = x - x1;
194*181254a7Smrg __float128 y2 = y - y1;
195*181254a7Smrg __float128 m2 = (((x1 * y1 - m1) + x1 * y2) + x2 * y1) + x2 * y2;
196*181254a7Smrg
197*181254a7Smrg /* Addition a1 + a2 = z + m1 using Knuth's algorithm. */
198*181254a7Smrg __float128 a1 = z + m1;
199*181254a7Smrg __float128 t1 = a1 - z;
200*181254a7Smrg __float128 t2 = a1 - t1;
201*181254a7Smrg t1 = m1 - t1;
202*181254a7Smrg t2 = z - t2;
203*181254a7Smrg __float128 a2 = t1 + t2;
204*181254a7Smrg /* Ensure the arithmetic is not scheduled after feclearexcept call. */
205*181254a7Smrg math_force_eval (m2);
206*181254a7Smrg math_force_eval (a2);
207*181254a7Smrg feclearexcept (FE_INEXACT);
208*181254a7Smrg
209*181254a7Smrg /* If the result is an exact zero, ensure it has the correct sign. */
210*181254a7Smrg if (a1 == 0 && m2 == 0)
211*181254a7Smrg {
212*181254a7Smrg feupdateenv (&env);
213*181254a7Smrg /* Ensure that round-to-nearest value of z + m1 is not reused. */
214*181254a7Smrg z = math_opt_barrier (z);
215*181254a7Smrg return z + m1;
216*181254a7Smrg }
217*181254a7Smrg
218*181254a7Smrg fesetround (FE_TOWARDZERO);
219*181254a7Smrg /* Perform m2 + a2 addition with round to odd. */
220*181254a7Smrg u.value = a2 + m2;
221*181254a7Smrg
222*181254a7Smrg if (__glibc_likely (adjust == 0))
223*181254a7Smrg {
224*181254a7Smrg if ((u.ieee.mantissa3 & 1) == 0 && u.ieee.exponent != 0x7fff)
225*181254a7Smrg u.ieee.mantissa3 |= fetestexcept (FE_INEXACT) != 0;
226*181254a7Smrg feupdateenv (&env);
227*181254a7Smrg /* Result is a1 + u.value. */
228*181254a7Smrg return a1 + u.value;
229*181254a7Smrg }
230*181254a7Smrg else if (__glibc_likely (adjust > 0))
231*181254a7Smrg {
232*181254a7Smrg if ((u.ieee.mantissa3 & 1) == 0 && u.ieee.exponent != 0x7fff)
233*181254a7Smrg u.ieee.mantissa3 |= fetestexcept (FE_INEXACT) != 0;
234*181254a7Smrg feupdateenv (&env);
235*181254a7Smrg /* Result is a1 + u.value, scaled up. */
236*181254a7Smrg return (a1 + u.value) * 0x1p113Q;
237*181254a7Smrg }
238*181254a7Smrg else
239*181254a7Smrg {
240*181254a7Smrg if ((u.ieee.mantissa3 & 1) == 0)
241*181254a7Smrg u.ieee.mantissa3 |= fetestexcept (FE_INEXACT) != 0;
242*181254a7Smrg v.value = a1 + u.value;
243*181254a7Smrg /* Ensure the addition is not scheduled after fetestexcept call. */
244*181254a7Smrg math_force_eval (v.value);
245*181254a7Smrg int j = fetestexcept (FE_INEXACT) != 0;
246*181254a7Smrg feupdateenv (&env);
247*181254a7Smrg /* Ensure the following computations are performed in default rounding
248*181254a7Smrg mode instead of just reusing the round to zero computation. */
249*181254a7Smrg asm volatile ("" : "=m" (u) : "m" (u));
250*181254a7Smrg /* If a1 + u.value is exact, the only rounding happens during
251*181254a7Smrg scaling down. */
252*181254a7Smrg if (j == 0)
253*181254a7Smrg return v.value * 0x1p-228Q;
254*181254a7Smrg /* If result rounded to zero is not subnormal, no double
255*181254a7Smrg rounding will occur. */
256*181254a7Smrg if (v.ieee.exponent > 228)
257*181254a7Smrg return (a1 + u.value) * 0x1p-228Q;
258*181254a7Smrg /* If v.value * 0x1p-228L with round to zero is a subnormal above
259*181254a7Smrg or equal to FLT128_MIN / 2, then v.value * 0x1p-228L shifts mantissa
260*181254a7Smrg down just by 1 bit, which means v.ieee.mantissa3 |= j would
261*181254a7Smrg change the round bit, not sticky or guard bit.
262*181254a7Smrg v.value * 0x1p-228L never normalizes by shifting up,
263*181254a7Smrg so round bit plus sticky bit should be already enough
264*181254a7Smrg for proper rounding. */
265*181254a7Smrg if (v.ieee.exponent == 228)
266*181254a7Smrg {
267*181254a7Smrg /* If the exponent would be in the normal range when
268*181254a7Smrg rounding to normal precision with unbounded exponent
269*181254a7Smrg range, the exact result is known and spurious underflows
270*181254a7Smrg must be avoided on systems detecting tininess after
271*181254a7Smrg rounding. */
272*181254a7Smrg if (TININESS_AFTER_ROUNDING)
273*181254a7Smrg {
274*181254a7Smrg w.value = a1 + u.value;
275*181254a7Smrg if (w.ieee.exponent == 229)
276*181254a7Smrg return w.value * 0x1p-228Q;
277*181254a7Smrg }
278*181254a7Smrg /* v.ieee.mantissa3 & 2 is LSB bit of the result before rounding,
279*181254a7Smrg v.ieee.mantissa3 & 1 is the round bit and j is our sticky
280*181254a7Smrg bit. */
281*181254a7Smrg w.value = 0;
282*181254a7Smrg w.ieee.mantissa3 = ((v.ieee.mantissa3 & 3) << 1) | j;
283*181254a7Smrg w.ieee.negative = v.ieee.negative;
284*181254a7Smrg v.ieee.mantissa3 &= ~3U;
285*181254a7Smrg v.value *= 0x1p-228Q;
286*181254a7Smrg w.value *= 0x1p-2Q;
287*181254a7Smrg return v.value + w.value;
288*181254a7Smrg }
289*181254a7Smrg v.ieee.mantissa3 |= j;
290*181254a7Smrg return v.value * 0x1p-228Q;
291*181254a7Smrg }
292*181254a7Smrg }
293