xref: /minix3/lib/libm/src/s_fmal.c (revision 84d9c625bfea59e274550651111ae9edfdc40fbd)
1*84d9c625SLionel Sambuc /*	$NetBSD: s_fmal.c,v 1.3 2013/02/12 21:40:19 martin Exp $	*/
2*84d9c625SLionel Sambuc 
3*84d9c625SLionel Sambuc /*-
4*84d9c625SLionel Sambuc  * Copyright (c) 2005-2011 David Schultz <das@FreeBSD.ORG>
5*84d9c625SLionel Sambuc  * All rights reserved.
6*84d9c625SLionel Sambuc  *
7*84d9c625SLionel Sambuc  * Redistribution and use in source and binary forms, with or without
8*84d9c625SLionel Sambuc  * modification, are permitted provided that the following conditions
9*84d9c625SLionel Sambuc  * are met:
10*84d9c625SLionel Sambuc  * 1. Redistributions of source code must retain the above copyright
11*84d9c625SLionel Sambuc  *    notice, this list of conditions and the following disclaimer.
12*84d9c625SLionel Sambuc  * 2. Redistributions in binary form must reproduce the above copyright
13*84d9c625SLionel Sambuc  *    notice, this list of conditions and the following disclaimer in the
14*84d9c625SLionel Sambuc  *    documentation and/or other materials provided with the distribution.
15*84d9c625SLionel Sambuc  *
16*84d9c625SLionel Sambuc  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17*84d9c625SLionel Sambuc  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18*84d9c625SLionel Sambuc  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19*84d9c625SLionel Sambuc  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20*84d9c625SLionel Sambuc  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21*84d9c625SLionel Sambuc  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22*84d9c625SLionel Sambuc  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23*84d9c625SLionel Sambuc  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24*84d9c625SLionel Sambuc  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25*84d9c625SLionel Sambuc  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26*84d9c625SLionel Sambuc  * SUCH DAMAGE.
27*84d9c625SLionel Sambuc  */
28*84d9c625SLionel Sambuc 
29*84d9c625SLionel Sambuc #include <sys/cdefs.h>
30*84d9c625SLionel Sambuc #if 0
31*84d9c625SLionel Sambuc __FBSDID("$FreeBSD: src/lib/msun/src/s_fmal.c,v 1.7 2011/10/21 06:30:43 das Exp $");
32*84d9c625SLionel Sambuc #else
33*84d9c625SLionel Sambuc __RCSID("$NetBSD: s_fmal.c,v 1.3 2013/02/12 21:40:19 martin Exp $");
34*84d9c625SLionel Sambuc #endif
35*84d9c625SLionel Sambuc 
36*84d9c625SLionel Sambuc #include <machine/ieee.h>
37*84d9c625SLionel Sambuc #include <fenv.h>
38*84d9c625SLionel Sambuc #include <float.h>
39*84d9c625SLionel Sambuc #include <math.h>
40*84d9c625SLionel Sambuc 
41*84d9c625SLionel Sambuc #include "math_private.h"
42*84d9c625SLionel Sambuc 
43*84d9c625SLionel Sambuc #ifdef __HAVE_LONG_DOUBLE
44*84d9c625SLionel Sambuc /*
45*84d9c625SLionel Sambuc  * A struct dd represents a floating-point number with twice the precision
46*84d9c625SLionel Sambuc  * of a long double.  We maintain the invariant that "hi" stores the high-order
47*84d9c625SLionel Sambuc  * bits of the result.
48*84d9c625SLionel Sambuc  */
49*84d9c625SLionel Sambuc struct dd {
50*84d9c625SLionel Sambuc 	long double hi;
51*84d9c625SLionel Sambuc 	long double lo;
52*84d9c625SLionel Sambuc };
53*84d9c625SLionel Sambuc 
54*84d9c625SLionel Sambuc /*
55*84d9c625SLionel Sambuc  * Compute a+b exactly, returning the exact result in a struct dd.  We assume
56*84d9c625SLionel Sambuc  * that both a and b are finite, but make no assumptions about their relative
57*84d9c625SLionel Sambuc  * magnitudes.
58*84d9c625SLionel Sambuc  */
59*84d9c625SLionel Sambuc static inline struct dd
dd_add(long double a,long double b)60*84d9c625SLionel Sambuc dd_add(long double a, long double b)
61*84d9c625SLionel Sambuc {
62*84d9c625SLionel Sambuc 	struct dd ret;
63*84d9c625SLionel Sambuc 	long double s;
64*84d9c625SLionel Sambuc 
65*84d9c625SLionel Sambuc 	ret.hi = a + b;
66*84d9c625SLionel Sambuc 	s = ret.hi - a;
67*84d9c625SLionel Sambuc 	ret.lo = (a - (ret.hi - s)) + (b - s);
68*84d9c625SLionel Sambuc 	return (ret);
69*84d9c625SLionel Sambuc }
70*84d9c625SLionel Sambuc 
71*84d9c625SLionel Sambuc /*
72*84d9c625SLionel Sambuc  * Compute a+b, with a small tweak:  The least significant bit of the
73*84d9c625SLionel Sambuc  * result is adjusted into a sticky bit summarizing all the bits that
74*84d9c625SLionel Sambuc  * were lost to rounding.  This adjustment negates the effects of double
75*84d9c625SLionel Sambuc  * rounding when the result is added to another number with a higher
76*84d9c625SLionel Sambuc  * exponent.  For an explanation of round and sticky bits, see any reference
77*84d9c625SLionel Sambuc  * on FPU design, e.g.,
78*84d9c625SLionel Sambuc  *
79*84d9c625SLionel Sambuc  *     J. Coonen.  An Implementation Guide to a Proposed Standard for
80*84d9c625SLionel Sambuc  *     Floating-Point Arithmetic.  Computer, vol. 13, no. 1, Jan 1980.
81*84d9c625SLionel Sambuc  */
82*84d9c625SLionel Sambuc static inline long double
add_adjusted(long double a,long double b)83*84d9c625SLionel Sambuc add_adjusted(long double a, long double b)
84*84d9c625SLionel Sambuc {
85*84d9c625SLionel Sambuc 	struct dd sum;
86*84d9c625SLionel Sambuc 	union ieee_ext_u u;
87*84d9c625SLionel Sambuc 
88*84d9c625SLionel Sambuc 	sum = dd_add(a, b);
89*84d9c625SLionel Sambuc 	if (sum.lo != 0) {
90*84d9c625SLionel Sambuc 		u.extu_ld = sum.hi;
91*84d9c625SLionel Sambuc 		if ((u.extu_ext.ext_fracl & 1) == 0)
92*84d9c625SLionel Sambuc 			sum.hi = nextafterl(sum.hi, INFINITY * sum.lo);
93*84d9c625SLionel Sambuc 	}
94*84d9c625SLionel Sambuc 	return (sum.hi);
95*84d9c625SLionel Sambuc }
96*84d9c625SLionel Sambuc 
97*84d9c625SLionel Sambuc /*
98*84d9c625SLionel Sambuc  * Compute ldexp(a+b, scale) with a single rounding error. It is assumed
99*84d9c625SLionel Sambuc  * that the result will be subnormal, and care is taken to ensure that
100*84d9c625SLionel Sambuc  * double rounding does not occur.
101*84d9c625SLionel Sambuc  */
102*84d9c625SLionel Sambuc static inline long double
add_and_denormalize(long double a,long double b,int scale)103*84d9c625SLionel Sambuc add_and_denormalize(long double a, long double b, int scale)
104*84d9c625SLionel Sambuc {
105*84d9c625SLionel Sambuc 	struct dd sum;
106*84d9c625SLionel Sambuc 	int bits_lost;
107*84d9c625SLionel Sambuc 	union ieee_ext_u u;
108*84d9c625SLionel Sambuc 
109*84d9c625SLionel Sambuc 	sum = dd_add(a, b);
110*84d9c625SLionel Sambuc 
111*84d9c625SLionel Sambuc 	/*
112*84d9c625SLionel Sambuc 	 * If we are losing at least two bits of accuracy to denormalization,
113*84d9c625SLionel Sambuc 	 * then the first lost bit becomes a round bit, and we adjust the
114*84d9c625SLionel Sambuc 	 * lowest bit of sum.hi to make it a sticky bit summarizing all the
115*84d9c625SLionel Sambuc 	 * bits in sum.lo. With the sticky bit adjusted, the hardware will
116*84d9c625SLionel Sambuc 	 * break any ties in the correct direction.
117*84d9c625SLionel Sambuc 	 *
118*84d9c625SLionel Sambuc 	 * If we are losing only one bit to denormalization, however, we must
119*84d9c625SLionel Sambuc 	 * break the ties manually.
120*84d9c625SLionel Sambuc 	 */
121*84d9c625SLionel Sambuc 	if (sum.lo != 0) {
122*84d9c625SLionel Sambuc 		u.extu_ld = sum.hi;
123*84d9c625SLionel Sambuc 		bits_lost = -u.extu_ext.ext_exp - scale + 1;
124*84d9c625SLionel Sambuc 		if ((bits_lost != 1) ^ (int)(u.extu_ext.ext_fracl & 1))
125*84d9c625SLionel Sambuc 			sum.hi = nextafterl(sum.hi, INFINITY * sum.lo);
126*84d9c625SLionel Sambuc 	}
127*84d9c625SLionel Sambuc 	return (ldexp((double)sum.hi, scale));
128*84d9c625SLionel Sambuc }
129*84d9c625SLionel Sambuc 
130*84d9c625SLionel Sambuc /*
131*84d9c625SLionel Sambuc  * Compute a*b exactly, returning the exact result in a struct dd.  We assume
132*84d9c625SLionel Sambuc  * that both a and b are normalized, so no underflow or overflow will occur.
133*84d9c625SLionel Sambuc  * The current rounding mode must be round-to-nearest.
134*84d9c625SLionel Sambuc  */
135*84d9c625SLionel Sambuc static inline struct dd
dd_mul(long double a,long double b)136*84d9c625SLionel Sambuc dd_mul(long double a, long double b)
137*84d9c625SLionel Sambuc {
138*84d9c625SLionel Sambuc #if LDBL_MANT_DIG == 64
139*84d9c625SLionel Sambuc 	static const long double split = 0x1p32L + 1.0;
140*84d9c625SLionel Sambuc #elif LDBL_MANT_DIG == 113
141*84d9c625SLionel Sambuc 	static const long double split = 0x1p57L + 1.0;
142*84d9c625SLionel Sambuc #endif
143*84d9c625SLionel Sambuc 	struct dd ret;
144*84d9c625SLionel Sambuc 	long double ha, hb, la, lb, p, q;
145*84d9c625SLionel Sambuc 
146*84d9c625SLionel Sambuc 	p = a * split;
147*84d9c625SLionel Sambuc 	ha = a - p;
148*84d9c625SLionel Sambuc 	ha += p;
149*84d9c625SLionel Sambuc 	la = a - ha;
150*84d9c625SLionel Sambuc 
151*84d9c625SLionel Sambuc 	p = b * split;
152*84d9c625SLionel Sambuc 	hb = b - p;
153*84d9c625SLionel Sambuc 	hb += p;
154*84d9c625SLionel Sambuc 	lb = b - hb;
155*84d9c625SLionel Sambuc 
156*84d9c625SLionel Sambuc 	p = ha * hb;
157*84d9c625SLionel Sambuc 	q = ha * lb + la * hb;
158*84d9c625SLionel Sambuc 
159*84d9c625SLionel Sambuc 	ret.hi = p + q;
160*84d9c625SLionel Sambuc 	ret.lo = p - ret.hi + q + la * lb;
161*84d9c625SLionel Sambuc 	return (ret);
162*84d9c625SLionel Sambuc }
163*84d9c625SLionel Sambuc 
164*84d9c625SLionel Sambuc /*
165*84d9c625SLionel Sambuc  * Fused multiply-add: Compute x * y + z with a single rounding error.
166*84d9c625SLionel Sambuc  *
167*84d9c625SLionel Sambuc  * We use scaling to avoid overflow/underflow, along with the
168*84d9c625SLionel Sambuc  * canonical precision-doubling technique adapted from:
169*84d9c625SLionel Sambuc  *
170*84d9c625SLionel Sambuc  *	Dekker, T.  A Floating-Point Technique for Extending the
171*84d9c625SLionel Sambuc  *	Available Precision.  Numer. Math. 18, 224-242 (1971).
172*84d9c625SLionel Sambuc  */
173*84d9c625SLionel Sambuc long double
fmal(long double x,long double y,long double z)174*84d9c625SLionel Sambuc fmal(long double x, long double y, long double z)
175*84d9c625SLionel Sambuc {
176*84d9c625SLionel Sambuc 	long double xs, ys, zs, adj;
177*84d9c625SLionel Sambuc 	struct dd xy, r;
178*84d9c625SLionel Sambuc 	int oround;
179*84d9c625SLionel Sambuc 	int ex, ey, ez;
180*84d9c625SLionel Sambuc 	int spread;
181*84d9c625SLionel Sambuc 
182*84d9c625SLionel Sambuc 	/*
183*84d9c625SLionel Sambuc 	 * Handle special cases. The order of operations and the particular
184*84d9c625SLionel Sambuc 	 * return values here are crucial in handling special cases involving
185*84d9c625SLionel Sambuc 	 * infinities, NaNs, overflows, and signed zeroes correctly.
186*84d9c625SLionel Sambuc 	 */
187*84d9c625SLionel Sambuc 	if (x == 0.0 || y == 0.0)
188*84d9c625SLionel Sambuc 		return (x * y + z);
189*84d9c625SLionel Sambuc 	if (z == 0.0)
190*84d9c625SLionel Sambuc 		return (x * y);
191*84d9c625SLionel Sambuc 	if (!isfinite(x) || !isfinite(y))
192*84d9c625SLionel Sambuc 		return (x * y + z);
193*84d9c625SLionel Sambuc 	if (!isfinite(z))
194*84d9c625SLionel Sambuc 		return (z);
195*84d9c625SLionel Sambuc 
196*84d9c625SLionel Sambuc 	xs = frexpl(x, &ex);
197*84d9c625SLionel Sambuc 	ys = frexpl(y, &ey);
198*84d9c625SLionel Sambuc 	zs = frexpl(z, &ez);
199*84d9c625SLionel Sambuc 	oround = fegetround();
200*84d9c625SLionel Sambuc 	spread = ex + ey - ez;
201*84d9c625SLionel Sambuc 
202*84d9c625SLionel Sambuc 	/*
203*84d9c625SLionel Sambuc 	 * If x * y and z are many orders of magnitude apart, the scaling
204*84d9c625SLionel Sambuc 	 * will overflow, so we handle these cases specially.  Rounding
205*84d9c625SLionel Sambuc 	 * modes other than FE_TONEAREST are painful.
206*84d9c625SLionel Sambuc 	 */
207*84d9c625SLionel Sambuc 	if (spread < -LDBL_MANT_DIG) {
208*84d9c625SLionel Sambuc 		feraiseexcept(FE_INEXACT);
209*84d9c625SLionel Sambuc 		if (!isnormal(z))
210*84d9c625SLionel Sambuc 			feraiseexcept(FE_UNDERFLOW);
211*84d9c625SLionel Sambuc 		switch (oround) {
212*84d9c625SLionel Sambuc 		case FE_TONEAREST:
213*84d9c625SLionel Sambuc 			return (z);
214*84d9c625SLionel Sambuc 		case FE_TOWARDZERO:
215*84d9c625SLionel Sambuc 			if ((x > 0.0) ^ (y < 0.0) ^ (z < 0.0))
216*84d9c625SLionel Sambuc 				return (z);
217*84d9c625SLionel Sambuc 			else
218*84d9c625SLionel Sambuc 				return (nextafterl(z, 0));
219*84d9c625SLionel Sambuc 		case FE_DOWNWARD:
220*84d9c625SLionel Sambuc 			if ((x > 0.0) ^ (y < 0.0))
221*84d9c625SLionel Sambuc 				return (z);
222*84d9c625SLionel Sambuc 			else
223*84d9c625SLionel Sambuc 				return (nextafterl(z, (long double)-INFINITY));
224*84d9c625SLionel Sambuc 		default:	/* FE_UPWARD */
225*84d9c625SLionel Sambuc 			if ((x > 0.0) ^ (y < 0.0))
226*84d9c625SLionel Sambuc 				return (nextafterl(z, (long double)INFINITY));
227*84d9c625SLionel Sambuc 			else
228*84d9c625SLionel Sambuc 				return (z);
229*84d9c625SLionel Sambuc 		}
230*84d9c625SLionel Sambuc 	}
231*84d9c625SLionel Sambuc 	if (spread <= LDBL_MANT_DIG * 2)
232*84d9c625SLionel Sambuc 		zs = ldexpl(zs, -spread);
233*84d9c625SLionel Sambuc 	else
234*84d9c625SLionel Sambuc 		zs = copysignl(LDBL_MIN, zs);
235*84d9c625SLionel Sambuc 
236*84d9c625SLionel Sambuc 	fesetround(FE_TONEAREST);
237*84d9c625SLionel Sambuc 
238*84d9c625SLionel Sambuc 	/*
239*84d9c625SLionel Sambuc 	 * Basic approach for round-to-nearest:
240*84d9c625SLionel Sambuc 	 *
241*84d9c625SLionel Sambuc 	 *     (xy.hi, xy.lo) = x * y		(exact)
242*84d9c625SLionel Sambuc 	 *     (r.hi, r.lo)   = xy.hi + z	(exact)
243*84d9c625SLionel Sambuc 	 *     adj = xy.lo + r.lo		(inexact; low bit is sticky)
244*84d9c625SLionel Sambuc 	 *     result = r.hi + adj		(correctly rounded)
245*84d9c625SLionel Sambuc 	 */
246*84d9c625SLionel Sambuc 	xy = dd_mul(xs, ys);
247*84d9c625SLionel Sambuc 	r = dd_add(xy.hi, zs);
248*84d9c625SLionel Sambuc 
249*84d9c625SLionel Sambuc 	spread = ex + ey;
250*84d9c625SLionel Sambuc 
251*84d9c625SLionel Sambuc 	if (r.hi == 0.0) {
252*84d9c625SLionel Sambuc 		/*
253*84d9c625SLionel Sambuc 		 * When the addends cancel to 0, ensure that the result has
254*84d9c625SLionel Sambuc 		 * the correct sign.
255*84d9c625SLionel Sambuc 		 */
256*84d9c625SLionel Sambuc 		fesetround(oround);
257*84d9c625SLionel Sambuc 		{
258*84d9c625SLionel Sambuc 		volatile long double vzs = zs; /* XXX gcc CSE bug workaround */
259*84d9c625SLionel Sambuc 		return (xy.hi + vzs + ldexpl(xy.lo, spread));
260*84d9c625SLionel Sambuc 		}
261*84d9c625SLionel Sambuc 	}
262*84d9c625SLionel Sambuc 
263*84d9c625SLionel Sambuc 	if (oround != FE_TONEAREST) {
264*84d9c625SLionel Sambuc 		/*
265*84d9c625SLionel Sambuc 		 * There is no need to worry about double rounding in directed
266*84d9c625SLionel Sambuc 		 * rounding modes.
267*84d9c625SLionel Sambuc 		 */
268*84d9c625SLionel Sambuc 		fesetround(oround);
269*84d9c625SLionel Sambuc 		adj = r.lo + xy.lo;
270*84d9c625SLionel Sambuc 		return (ldexpl(r.hi + adj, spread));
271*84d9c625SLionel Sambuc 	}
272*84d9c625SLionel Sambuc 
273*84d9c625SLionel Sambuc 	adj = add_adjusted(r.lo, xy.lo);
274*84d9c625SLionel Sambuc 	if (spread + ilogbl(r.hi) > -16383)
275*84d9c625SLionel Sambuc 		return (ldexpl(r.hi + adj, spread));
276*84d9c625SLionel Sambuc 	else
277*84d9c625SLionel Sambuc 		return (add_and_denormalize(r.hi, adj, spread));
278*84d9c625SLionel Sambuc }
279*84d9c625SLionel Sambuc #endif
280