1*627f7eb2Smrg /* Compute x * y + z as ternary operation.
2*627f7eb2Smrg Copyright (C) 2010-2018 Free Software Foundation, Inc.
3*627f7eb2Smrg This file is part of the GNU C Library.
4*627f7eb2Smrg Contributed by Jakub Jelinek <jakub@redhat.com>, 2010.
5*627f7eb2Smrg
6*627f7eb2Smrg The GNU C Library is free software; you can redistribute it and/or
7*627f7eb2Smrg modify it under the terms of the GNU Lesser General Public
8*627f7eb2Smrg License as published by the Free Software Foundation; either
9*627f7eb2Smrg version 2.1 of the License, or (at your option) any later version.
10*627f7eb2Smrg
11*627f7eb2Smrg The GNU C Library is distributed in the hope that it will be useful,
12*627f7eb2Smrg but WITHOUT ANY WARRANTY; without even the implied warranty of
13*627f7eb2Smrg MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14*627f7eb2Smrg Lesser General Public License for more details.
15*627f7eb2Smrg
16*627f7eb2Smrg You should have received a copy of the GNU Lesser General Public
17*627f7eb2Smrg License along with the GNU C Library; if not, see
18*627f7eb2Smrg <http://www.gnu.org/licenses/>. */
19*627f7eb2Smrg
20*627f7eb2Smrg #include "quadmath-imp.h"
21*627f7eb2Smrg
22*627f7eb2Smrg /* This implementation uses rounding to odd to avoid problems with
23*627f7eb2Smrg double rounding. See a paper by Boldo and Melquiond:
24*627f7eb2Smrg http://www.lri.fr/~melquion/doc/08-tc.pdf */
25*627f7eb2Smrg
26*627f7eb2Smrg __float128
fmaq(__float128 x,__float128 y,__float128 z)27*627f7eb2Smrg fmaq (__float128 x, __float128 y, __float128 z)
28*627f7eb2Smrg {
29*627f7eb2Smrg ieee854_float128 u, v, w;
30*627f7eb2Smrg int adjust = 0;
31*627f7eb2Smrg u.value = x;
32*627f7eb2Smrg v.value = y;
33*627f7eb2Smrg w.value = z;
34*627f7eb2Smrg if (__builtin_expect (u.ieee.exponent + v.ieee.exponent
35*627f7eb2Smrg >= 0x7fff + IEEE854_FLOAT128_BIAS
36*627f7eb2Smrg - FLT128_MANT_DIG, 0)
37*627f7eb2Smrg || __builtin_expect (u.ieee.exponent >= 0x7fff - FLT128_MANT_DIG, 0)
38*627f7eb2Smrg || __builtin_expect (v.ieee.exponent >= 0x7fff - FLT128_MANT_DIG, 0)
39*627f7eb2Smrg || __builtin_expect (w.ieee.exponent >= 0x7fff - FLT128_MANT_DIG, 0)
40*627f7eb2Smrg || __builtin_expect (u.ieee.exponent + v.ieee.exponent
41*627f7eb2Smrg <= IEEE854_FLOAT128_BIAS + FLT128_MANT_DIG, 0))
42*627f7eb2Smrg {
43*627f7eb2Smrg /* If z is Inf, but x and y are finite, the result should be
44*627f7eb2Smrg z rather than NaN. */
45*627f7eb2Smrg if (w.ieee.exponent == 0x7fff
46*627f7eb2Smrg && u.ieee.exponent != 0x7fff
47*627f7eb2Smrg && v.ieee.exponent != 0x7fff)
48*627f7eb2Smrg return (z + x) + y;
49*627f7eb2Smrg /* If z is zero and x are y are nonzero, compute the result
50*627f7eb2Smrg as x * y to avoid the wrong sign of a zero result if x * y
51*627f7eb2Smrg underflows to 0. */
52*627f7eb2Smrg if (z == 0 && x != 0 && y != 0)
53*627f7eb2Smrg return x * y;
54*627f7eb2Smrg /* If x or y or z is Inf/NaN, or if x * y is zero, compute as
55*627f7eb2Smrg x * y + z. */
56*627f7eb2Smrg if (u.ieee.exponent == 0x7fff
57*627f7eb2Smrg || v.ieee.exponent == 0x7fff
58*627f7eb2Smrg || w.ieee.exponent == 0x7fff
59*627f7eb2Smrg || x == 0
60*627f7eb2Smrg || y == 0)
61*627f7eb2Smrg return x * y + z;
62*627f7eb2Smrg /* If fma will certainly overflow, compute as x * y. */
63*627f7eb2Smrg if (u.ieee.exponent + v.ieee.exponent
64*627f7eb2Smrg > 0x7fff + IEEE854_FLOAT128_BIAS)
65*627f7eb2Smrg return x * y;
66*627f7eb2Smrg /* If x * y is less than 1/4 of FLT128_TRUE_MIN, neither the
67*627f7eb2Smrg result nor whether there is underflow depends on its exact
68*627f7eb2Smrg value, only on its sign. */
69*627f7eb2Smrg if (u.ieee.exponent + v.ieee.exponent
70*627f7eb2Smrg < IEEE854_FLOAT128_BIAS - FLT128_MANT_DIG - 2)
71*627f7eb2Smrg {
72*627f7eb2Smrg int neg = u.ieee.negative ^ v.ieee.negative;
73*627f7eb2Smrg __float128 tiny = neg ? -0x1p-16494Q : 0x1p-16494Q;
74*627f7eb2Smrg if (w.ieee.exponent >= 3)
75*627f7eb2Smrg return tiny + z;
76*627f7eb2Smrg /* Scaling up, adding TINY and scaling down produces the
77*627f7eb2Smrg correct result, because in round-to-nearest mode adding
78*627f7eb2Smrg TINY has no effect and in other modes double rounding is
79*627f7eb2Smrg harmless. But it may not produce required underflow
80*627f7eb2Smrg exceptions. */
81*627f7eb2Smrg v.value = z * 0x1p114Q + tiny;
82*627f7eb2Smrg if (TININESS_AFTER_ROUNDING
83*627f7eb2Smrg ? v.ieee.exponent < 115
84*627f7eb2Smrg : (w.ieee.exponent == 0
85*627f7eb2Smrg || (w.ieee.exponent == 1
86*627f7eb2Smrg && w.ieee.negative != neg
87*627f7eb2Smrg && w.ieee.mantissa3 == 0
88*627f7eb2Smrg && w.ieee.mantissa2 == 0
89*627f7eb2Smrg && w.ieee.mantissa1 == 0
90*627f7eb2Smrg && w.ieee.mantissa0 == 0)))
91*627f7eb2Smrg {
92*627f7eb2Smrg __float128 force_underflow = x * y;
93*627f7eb2Smrg math_force_eval (force_underflow);
94*627f7eb2Smrg }
95*627f7eb2Smrg return v.value * 0x1p-114Q;
96*627f7eb2Smrg }
97*627f7eb2Smrg if (u.ieee.exponent + v.ieee.exponent
98*627f7eb2Smrg >= 0x7fff + IEEE854_FLOAT128_BIAS - FLT128_MANT_DIG)
99*627f7eb2Smrg {
100*627f7eb2Smrg /* Compute 1p-113 times smaller result and multiply
101*627f7eb2Smrg at the end. */
102*627f7eb2Smrg if (u.ieee.exponent > v.ieee.exponent)
103*627f7eb2Smrg u.ieee.exponent -= FLT128_MANT_DIG;
104*627f7eb2Smrg else
105*627f7eb2Smrg v.ieee.exponent -= FLT128_MANT_DIG;
106*627f7eb2Smrg /* If x + y exponent is very large and z exponent is very small,
107*627f7eb2Smrg it doesn't matter if we don't adjust it. */
108*627f7eb2Smrg if (w.ieee.exponent > FLT128_MANT_DIG)
109*627f7eb2Smrg w.ieee.exponent -= FLT128_MANT_DIG;
110*627f7eb2Smrg adjust = 1;
111*627f7eb2Smrg }
112*627f7eb2Smrg else if (w.ieee.exponent >= 0x7fff - FLT128_MANT_DIG)
113*627f7eb2Smrg {
114*627f7eb2Smrg /* Similarly.
115*627f7eb2Smrg If z exponent is very large and x and y exponents are
116*627f7eb2Smrg very small, adjust them up to avoid spurious underflows,
117*627f7eb2Smrg rather than down. */
118*627f7eb2Smrg if (u.ieee.exponent + v.ieee.exponent
119*627f7eb2Smrg <= IEEE854_FLOAT128_BIAS + 2 * FLT128_MANT_DIG)
120*627f7eb2Smrg {
121*627f7eb2Smrg if (u.ieee.exponent > v.ieee.exponent)
122*627f7eb2Smrg u.ieee.exponent += 2 * FLT128_MANT_DIG + 2;
123*627f7eb2Smrg else
124*627f7eb2Smrg v.ieee.exponent += 2 * FLT128_MANT_DIG + 2;
125*627f7eb2Smrg }
126*627f7eb2Smrg else if (u.ieee.exponent > v.ieee.exponent)
127*627f7eb2Smrg {
128*627f7eb2Smrg if (u.ieee.exponent > FLT128_MANT_DIG)
129*627f7eb2Smrg u.ieee.exponent -= FLT128_MANT_DIG;
130*627f7eb2Smrg }
131*627f7eb2Smrg else if (v.ieee.exponent > FLT128_MANT_DIG)
132*627f7eb2Smrg v.ieee.exponent -= FLT128_MANT_DIG;
133*627f7eb2Smrg w.ieee.exponent -= FLT128_MANT_DIG;
134*627f7eb2Smrg adjust = 1;
135*627f7eb2Smrg }
136*627f7eb2Smrg else if (u.ieee.exponent >= 0x7fff - FLT128_MANT_DIG)
137*627f7eb2Smrg {
138*627f7eb2Smrg u.ieee.exponent -= FLT128_MANT_DIG;
139*627f7eb2Smrg if (v.ieee.exponent)
140*627f7eb2Smrg v.ieee.exponent += FLT128_MANT_DIG;
141*627f7eb2Smrg else
142*627f7eb2Smrg v.value *= 0x1p113Q;
143*627f7eb2Smrg }
144*627f7eb2Smrg else if (v.ieee.exponent >= 0x7fff - FLT128_MANT_DIG)
145*627f7eb2Smrg {
146*627f7eb2Smrg v.ieee.exponent -= FLT128_MANT_DIG;
147*627f7eb2Smrg if (u.ieee.exponent)
148*627f7eb2Smrg u.ieee.exponent += FLT128_MANT_DIG;
149*627f7eb2Smrg else
150*627f7eb2Smrg u.value *= 0x1p113Q;
151*627f7eb2Smrg }
152*627f7eb2Smrg else /* if (u.ieee.exponent + v.ieee.exponent
153*627f7eb2Smrg <= IEEE854_FLOAT128_BIAS + FLT128_MANT_DIG) */
154*627f7eb2Smrg {
155*627f7eb2Smrg if (u.ieee.exponent > v.ieee.exponent)
156*627f7eb2Smrg u.ieee.exponent += 2 * FLT128_MANT_DIG + 2;
157*627f7eb2Smrg else
158*627f7eb2Smrg v.ieee.exponent += 2 * FLT128_MANT_DIG + 2;
159*627f7eb2Smrg if (w.ieee.exponent <= 4 * FLT128_MANT_DIG + 6)
160*627f7eb2Smrg {
161*627f7eb2Smrg if (w.ieee.exponent)
162*627f7eb2Smrg w.ieee.exponent += 2 * FLT128_MANT_DIG + 2;
163*627f7eb2Smrg else
164*627f7eb2Smrg w.value *= 0x1p228Q;
165*627f7eb2Smrg adjust = -1;
166*627f7eb2Smrg }
167*627f7eb2Smrg /* Otherwise x * y should just affect inexact
168*627f7eb2Smrg and nothing else. */
169*627f7eb2Smrg }
170*627f7eb2Smrg x = u.value;
171*627f7eb2Smrg y = v.value;
172*627f7eb2Smrg z = w.value;
173*627f7eb2Smrg }
174*627f7eb2Smrg
175*627f7eb2Smrg /* Ensure correct sign of exact 0 + 0. */
176*627f7eb2Smrg if (__glibc_unlikely ((x == 0 || y == 0) && z == 0))
177*627f7eb2Smrg {
178*627f7eb2Smrg x = math_opt_barrier (x);
179*627f7eb2Smrg return x * y + z;
180*627f7eb2Smrg }
181*627f7eb2Smrg
182*627f7eb2Smrg fenv_t env;
183*627f7eb2Smrg feholdexcept (&env);
184*627f7eb2Smrg fesetround (FE_TONEAREST);
185*627f7eb2Smrg
186*627f7eb2Smrg /* Multiplication m1 + m2 = x * y using Dekker's algorithm. */
187*627f7eb2Smrg #define C ((1LL << (FLT128_MANT_DIG + 1) / 2) + 1)
188*627f7eb2Smrg __float128 x1 = x * C;
189*627f7eb2Smrg __float128 y1 = y * C;
190*627f7eb2Smrg __float128 m1 = x * y;
191*627f7eb2Smrg x1 = (x - x1) + x1;
192*627f7eb2Smrg y1 = (y - y1) + y1;
193*627f7eb2Smrg __float128 x2 = x - x1;
194*627f7eb2Smrg __float128 y2 = y - y1;
195*627f7eb2Smrg __float128 m2 = (((x1 * y1 - m1) + x1 * y2) + x2 * y1) + x2 * y2;
196*627f7eb2Smrg
197*627f7eb2Smrg /* Addition a1 + a2 = z + m1 using Knuth's algorithm. */
198*627f7eb2Smrg __float128 a1 = z + m1;
199*627f7eb2Smrg __float128 t1 = a1 - z;
200*627f7eb2Smrg __float128 t2 = a1 - t1;
201*627f7eb2Smrg t1 = m1 - t1;
202*627f7eb2Smrg t2 = z - t2;
203*627f7eb2Smrg __float128 a2 = t1 + t2;
204*627f7eb2Smrg /* Ensure the arithmetic is not scheduled after feclearexcept call. */
205*627f7eb2Smrg math_force_eval (m2);
206*627f7eb2Smrg math_force_eval (a2);
207*627f7eb2Smrg feclearexcept (FE_INEXACT);
208*627f7eb2Smrg
209*627f7eb2Smrg /* If the result is an exact zero, ensure it has the correct sign. */
210*627f7eb2Smrg if (a1 == 0 && m2 == 0)
211*627f7eb2Smrg {
212*627f7eb2Smrg feupdateenv (&env);
213*627f7eb2Smrg /* Ensure that round-to-nearest value of z + m1 is not reused. */
214*627f7eb2Smrg z = math_opt_barrier (z);
215*627f7eb2Smrg return z + m1;
216*627f7eb2Smrg }
217*627f7eb2Smrg
218*627f7eb2Smrg fesetround (FE_TOWARDZERO);
219*627f7eb2Smrg /* Perform m2 + a2 addition with round to odd. */
220*627f7eb2Smrg u.value = a2 + m2;
221*627f7eb2Smrg
222*627f7eb2Smrg if (__glibc_likely (adjust == 0))
223*627f7eb2Smrg {
224*627f7eb2Smrg if ((u.ieee.mantissa3 & 1) == 0 && u.ieee.exponent != 0x7fff)
225*627f7eb2Smrg u.ieee.mantissa3 |= fetestexcept (FE_INEXACT) != 0;
226*627f7eb2Smrg feupdateenv (&env);
227*627f7eb2Smrg /* Result is a1 + u.value. */
228*627f7eb2Smrg return a1 + u.value;
229*627f7eb2Smrg }
230*627f7eb2Smrg else if (__glibc_likely (adjust > 0))
231*627f7eb2Smrg {
232*627f7eb2Smrg if ((u.ieee.mantissa3 & 1) == 0 && u.ieee.exponent != 0x7fff)
233*627f7eb2Smrg u.ieee.mantissa3 |= fetestexcept (FE_INEXACT) != 0;
234*627f7eb2Smrg feupdateenv (&env);
235*627f7eb2Smrg /* Result is a1 + u.value, scaled up. */
236*627f7eb2Smrg return (a1 + u.value) * 0x1p113Q;
237*627f7eb2Smrg }
238*627f7eb2Smrg else
239*627f7eb2Smrg {
240*627f7eb2Smrg if ((u.ieee.mantissa3 & 1) == 0)
241*627f7eb2Smrg u.ieee.mantissa3 |= fetestexcept (FE_INEXACT) != 0;
242*627f7eb2Smrg v.value = a1 + u.value;
243*627f7eb2Smrg /* Ensure the addition is not scheduled after fetestexcept call. */
244*627f7eb2Smrg math_force_eval (v.value);
245*627f7eb2Smrg int j = fetestexcept (FE_INEXACT) != 0;
246*627f7eb2Smrg feupdateenv (&env);
247*627f7eb2Smrg /* Ensure the following computations are performed in default rounding
248*627f7eb2Smrg mode instead of just reusing the round to zero computation. */
249*627f7eb2Smrg asm volatile ("" : "=m" (u) : "m" (u));
250*627f7eb2Smrg /* If a1 + u.value is exact, the only rounding happens during
251*627f7eb2Smrg scaling down. */
252*627f7eb2Smrg if (j == 0)
253*627f7eb2Smrg return v.value * 0x1p-228Q;
254*627f7eb2Smrg /* If result rounded to zero is not subnormal, no double
255*627f7eb2Smrg rounding will occur. */
256*627f7eb2Smrg if (v.ieee.exponent > 228)
257*627f7eb2Smrg return (a1 + u.value) * 0x1p-228Q;
258*627f7eb2Smrg /* If v.value * 0x1p-228L with round to zero is a subnormal above
259*627f7eb2Smrg or equal to FLT128_MIN / 2, then v.value * 0x1p-228L shifts mantissa
260*627f7eb2Smrg down just by 1 bit, which means v.ieee.mantissa3 |= j would
261*627f7eb2Smrg change the round bit, not sticky or guard bit.
262*627f7eb2Smrg v.value * 0x1p-228L never normalizes by shifting up,
263*627f7eb2Smrg so round bit plus sticky bit should be already enough
264*627f7eb2Smrg for proper rounding. */
265*627f7eb2Smrg if (v.ieee.exponent == 228)
266*627f7eb2Smrg {
267*627f7eb2Smrg /* If the exponent would be in the normal range when
268*627f7eb2Smrg rounding to normal precision with unbounded exponent
269*627f7eb2Smrg range, the exact result is known and spurious underflows
270*627f7eb2Smrg must be avoided on systems detecting tininess after
271*627f7eb2Smrg rounding. */
272*627f7eb2Smrg if (TININESS_AFTER_ROUNDING)
273*627f7eb2Smrg {
274*627f7eb2Smrg w.value = a1 + u.value;
275*627f7eb2Smrg if (w.ieee.exponent == 229)
276*627f7eb2Smrg return w.value * 0x1p-228Q;
277*627f7eb2Smrg }
278*627f7eb2Smrg /* v.ieee.mantissa3 & 2 is LSB bit of the result before rounding,
279*627f7eb2Smrg v.ieee.mantissa3 & 1 is the round bit and j is our sticky
280*627f7eb2Smrg bit. */
281*627f7eb2Smrg w.value = 0;
282*627f7eb2Smrg w.ieee.mantissa3 = ((v.ieee.mantissa3 & 3) << 1) | j;
283*627f7eb2Smrg w.ieee.negative = v.ieee.negative;
284*627f7eb2Smrg v.ieee.mantissa3 &= ~3U;
285*627f7eb2Smrg v.value *= 0x1p-228Q;
286*627f7eb2Smrg w.value *= 0x1p-2Q;
287*627f7eb2Smrg return v.value + w.value;
288*627f7eb2Smrg }
289*627f7eb2Smrg v.ieee.mantissa3 |= j;
290*627f7eb2Smrg return v.value * 0x1p-228Q;
291*627f7eb2Smrg }
292*627f7eb2Smrg }
293