xref: /minix3/lib/libm/src/s_fma.c (revision 84d9c625bfea59e274550651111ae9edfdc40fbd)
1*84d9c625SLionel Sambuc /*	$NetBSD: s_fma.c,v 1.6 2013/02/14 09:24:50 matt Exp $	*/
2*84d9c625SLionel Sambuc 
3*84d9c625SLionel Sambuc /*-
4*84d9c625SLionel Sambuc  * Copyright (c) 2005-2011 David Schultz <das@FreeBSD.ORG>
5*84d9c625SLionel Sambuc  * All rights reserved.
6*84d9c625SLionel Sambuc  *
7*84d9c625SLionel Sambuc  * Redistribution and use in source and binary forms, with or without
8*84d9c625SLionel Sambuc  * modification, are permitted provided that the following conditions
9*84d9c625SLionel Sambuc  * are met:
10*84d9c625SLionel Sambuc  * 1. Redistributions of source code must retain the above copyright
11*84d9c625SLionel Sambuc  *    notice, this list of conditions and the following disclaimer.
12*84d9c625SLionel Sambuc  * 2. Redistributions in binary form must reproduce the above copyright
13*84d9c625SLionel Sambuc  *    notice, this list of conditions and the following disclaimer in the
14*84d9c625SLionel Sambuc  *    documentation and/or other materials provided with the distribution.
15*84d9c625SLionel Sambuc  *
16*84d9c625SLionel Sambuc  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17*84d9c625SLionel Sambuc  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18*84d9c625SLionel Sambuc  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19*84d9c625SLionel Sambuc  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20*84d9c625SLionel Sambuc  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21*84d9c625SLionel Sambuc  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22*84d9c625SLionel Sambuc  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23*84d9c625SLionel Sambuc  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24*84d9c625SLionel Sambuc  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25*84d9c625SLionel Sambuc  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26*84d9c625SLionel Sambuc  * SUCH DAMAGE.
27*84d9c625SLionel Sambuc  */
28*84d9c625SLionel Sambuc 
29*84d9c625SLionel Sambuc #include <sys/cdefs.h>
30*84d9c625SLionel Sambuc #if 0
31*84d9c625SLionel Sambuc __FBSDID("$FreeBSD: src/lib/msun/src/s_fma.c,v 1.8 2011/10/21 06:30:43 das Exp $");
32*84d9c625SLionel Sambuc #else
33*84d9c625SLionel Sambuc __RCSID("$NetBSD: s_fma.c,v 1.6 2013/02/14 09:24:50 matt Exp $");
34*84d9c625SLionel Sambuc #endif
35*84d9c625SLionel Sambuc 
36*84d9c625SLionel Sambuc #include <machine/ieee.h>
37*84d9c625SLionel Sambuc #include <fenv.h>
38*84d9c625SLionel Sambuc #include <float.h>
39*84d9c625SLionel Sambuc #include <math.h>
40*84d9c625SLionel Sambuc 
41*84d9c625SLionel Sambuc #include "math_private.h"
42*84d9c625SLionel Sambuc 
43*84d9c625SLionel Sambuc #ifndef __HAVE_LONG_DOUBLE
44*84d9c625SLionel Sambuc __strong_alias(fmal, fma)
45*84d9c625SLionel Sambuc #endif
46*84d9c625SLionel Sambuc 
47*84d9c625SLionel Sambuc /*
48*84d9c625SLionel Sambuc  * A struct dd represents a floating-point number with twice the precision
49*84d9c625SLionel Sambuc  * of a double.  We maintain the invariant that "hi" stores the 53 high-order
50*84d9c625SLionel Sambuc  * bits of the result.
51*84d9c625SLionel Sambuc  */
52*84d9c625SLionel Sambuc struct dd {
53*84d9c625SLionel Sambuc 	double hi;
54*84d9c625SLionel Sambuc 	double lo;
55*84d9c625SLionel Sambuc };
56*84d9c625SLionel Sambuc 
57*84d9c625SLionel Sambuc /*
58*84d9c625SLionel Sambuc  * Compute a+b exactly, returning the exact result in a struct dd.  We assume
59*84d9c625SLionel Sambuc  * that both a and b are finite, but make no assumptions about their relative
60*84d9c625SLionel Sambuc  * magnitudes.
61*84d9c625SLionel Sambuc  */
62*84d9c625SLionel Sambuc static inline struct dd
dd_add(double a,double b)63*84d9c625SLionel Sambuc dd_add(double a, double b)
64*84d9c625SLionel Sambuc {
65*84d9c625SLionel Sambuc 	struct dd ret;
66*84d9c625SLionel Sambuc 	double s;
67*84d9c625SLionel Sambuc 
68*84d9c625SLionel Sambuc 	ret.hi = a + b;
69*84d9c625SLionel Sambuc 	s = ret.hi - a;
70*84d9c625SLionel Sambuc 	ret.lo = (a - (ret.hi - s)) + (b - s);
71*84d9c625SLionel Sambuc 	return (ret);
72*84d9c625SLionel Sambuc }
73*84d9c625SLionel Sambuc 
74*84d9c625SLionel Sambuc /*
75*84d9c625SLionel Sambuc  * Compute a+b, with a small tweak:  The least significant bit of the
76*84d9c625SLionel Sambuc  * result is adjusted into a sticky bit summarizing all the bits that
77*84d9c625SLionel Sambuc  * were lost to rounding.  This adjustment negates the effects of double
78*84d9c625SLionel Sambuc  * rounding when the result is added to another number with a higher
79*84d9c625SLionel Sambuc  * exponent.  For an explanation of round and sticky bits, see any reference
80*84d9c625SLionel Sambuc  * on FPU design, e.g.,
81*84d9c625SLionel Sambuc  *
82*84d9c625SLionel Sambuc  *     J. Coonen.  An Implementation Guide to a Proposed Standard for
83*84d9c625SLionel Sambuc  *     Floating-Point Arithmetic.  Computer, vol. 13, no. 1, Jan 1980.
84*84d9c625SLionel Sambuc  */
85*84d9c625SLionel Sambuc static inline double
add_adjusted(double a,double b)86*84d9c625SLionel Sambuc add_adjusted(double a, double b)
87*84d9c625SLionel Sambuc {
88*84d9c625SLionel Sambuc 	struct dd sum;
89*84d9c625SLionel Sambuc 	uint64_t hibits, lobits;
90*84d9c625SLionel Sambuc 
91*84d9c625SLionel Sambuc 	sum = dd_add(a, b);
92*84d9c625SLionel Sambuc 	if (sum.lo != 0) {
93*84d9c625SLionel Sambuc 		EXTRACT_WORD64(hibits, sum.hi);
94*84d9c625SLionel Sambuc 		if ((hibits & 1) == 0) {
95*84d9c625SLionel Sambuc 			/* hibits += (int)copysign(1.0, sum.hi * sum.lo) */
96*84d9c625SLionel Sambuc 			EXTRACT_WORD64(lobits, sum.lo);
97*84d9c625SLionel Sambuc 			hibits += 1 - ((hibits ^ lobits) >> 62);
98*84d9c625SLionel Sambuc 			INSERT_WORD64(sum.hi, hibits);
99*84d9c625SLionel Sambuc 		}
100*84d9c625SLionel Sambuc 	}
101*84d9c625SLionel Sambuc 	return (sum.hi);
102*84d9c625SLionel Sambuc }
103*84d9c625SLionel Sambuc 
104*84d9c625SLionel Sambuc /*
105*84d9c625SLionel Sambuc  * Compute ldexp(a+b, scale) with a single rounding error. It is assumed
106*84d9c625SLionel Sambuc  * that the result will be subnormal, and care is taken to ensure that
107*84d9c625SLionel Sambuc  * double rounding does not occur.
108*84d9c625SLionel Sambuc  */
109*84d9c625SLionel Sambuc static inline double
add_and_denormalize(double a,double b,int scale)110*84d9c625SLionel Sambuc add_and_denormalize(double a, double b, int scale)
111*84d9c625SLionel Sambuc {
112*84d9c625SLionel Sambuc 	struct dd sum;
113*84d9c625SLionel Sambuc 	uint64_t hibits, lobits;
114*84d9c625SLionel Sambuc 	int bits_lost;
115*84d9c625SLionel Sambuc 
116*84d9c625SLionel Sambuc 	sum = dd_add(a, b);
117*84d9c625SLionel Sambuc 
118*84d9c625SLionel Sambuc 	/*
119*84d9c625SLionel Sambuc 	 * If we are losing at least two bits of accuracy to denormalization,
120*84d9c625SLionel Sambuc 	 * then the first lost bit becomes a round bit, and we adjust the
121*84d9c625SLionel Sambuc 	 * lowest bit of sum.hi to make it a sticky bit summarizing all the
122*84d9c625SLionel Sambuc 	 * bits in sum.lo. With the sticky bit adjusted, the hardware will
123*84d9c625SLionel Sambuc 	 * break any ties in the correct direction.
124*84d9c625SLionel Sambuc 	 *
125*84d9c625SLionel Sambuc 	 * If we are losing only one bit to denormalization, however, we must
126*84d9c625SLionel Sambuc 	 * break the ties manually.
127*84d9c625SLionel Sambuc 	 */
128*84d9c625SLionel Sambuc 	if (sum.lo != 0) {
129*84d9c625SLionel Sambuc 		EXTRACT_WORD64(hibits, sum.hi);
130*84d9c625SLionel Sambuc 		bits_lost = -((int)(hibits >> 52) & 0x7ff) - scale + 1;
131*84d9c625SLionel Sambuc 		if ((bits_lost != 1) ^ (int)(hibits & 1)) {
132*84d9c625SLionel Sambuc 			/* hibits += (int)copysign(1.0, sum.hi * sum.lo) */
133*84d9c625SLionel Sambuc 			EXTRACT_WORD64(lobits, sum.lo);
134*84d9c625SLionel Sambuc 			hibits += 1 - (((hibits ^ lobits) >> 62) & 2);
135*84d9c625SLionel Sambuc 			INSERT_WORD64(sum.hi, hibits);
136*84d9c625SLionel Sambuc 		}
137*84d9c625SLionel Sambuc 	}
138*84d9c625SLionel Sambuc 	return (ldexp(sum.hi, scale));
139*84d9c625SLionel Sambuc }
140*84d9c625SLionel Sambuc 
141*84d9c625SLionel Sambuc /*
142*84d9c625SLionel Sambuc  * Compute a*b exactly, returning the exact result in a struct dd.  We assume
143*84d9c625SLionel Sambuc  * that both a and b are normalized, so no underflow or overflow will occur.
144*84d9c625SLionel Sambuc  * The current rounding mode must be round-to-nearest.
145*84d9c625SLionel Sambuc  */
146*84d9c625SLionel Sambuc static inline struct dd
dd_mul(double a,double b)147*84d9c625SLionel Sambuc dd_mul(double a, double b)
148*84d9c625SLionel Sambuc {
149*84d9c625SLionel Sambuc 	static const double split = 0x1p27 + 1.0;
150*84d9c625SLionel Sambuc 	struct dd ret;
151*84d9c625SLionel Sambuc 	double ha, hb, la, lb, p, q;
152*84d9c625SLionel Sambuc 
153*84d9c625SLionel Sambuc 	p = a * split;
154*84d9c625SLionel Sambuc 	ha = a - p;
155*84d9c625SLionel Sambuc 	ha += p;
156*84d9c625SLionel Sambuc 	la = a - ha;
157*84d9c625SLionel Sambuc 
158*84d9c625SLionel Sambuc 	p = b * split;
159*84d9c625SLionel Sambuc 	hb = b - p;
160*84d9c625SLionel Sambuc 	hb += p;
161*84d9c625SLionel Sambuc 	lb = b - hb;
162*84d9c625SLionel Sambuc 
163*84d9c625SLionel Sambuc 	p = ha * hb;
164*84d9c625SLionel Sambuc 	q = ha * lb + la * hb;
165*84d9c625SLionel Sambuc 
166*84d9c625SLionel Sambuc 	ret.hi = p + q;
167*84d9c625SLionel Sambuc 	ret.lo = p - ret.hi + q + la * lb;
168*84d9c625SLionel Sambuc 	return (ret);
169*84d9c625SLionel Sambuc }
170*84d9c625SLionel Sambuc 
171*84d9c625SLionel Sambuc /*
172*84d9c625SLionel Sambuc  * Fused multiply-add: Compute x * y + z with a single rounding error.
173*84d9c625SLionel Sambuc  *
174*84d9c625SLionel Sambuc  * We use scaling to avoid overflow/underflow, along with the
175*84d9c625SLionel Sambuc  * canonical precision-doubling technique adapted from:
176*84d9c625SLionel Sambuc  *
177*84d9c625SLionel Sambuc  *	Dekker, T.  A Floating-Point Technique for Extending the
178*84d9c625SLionel Sambuc  *	Available Precision.  Numer. Math. 18, 224-242 (1971).
179*84d9c625SLionel Sambuc  *
180*84d9c625SLionel Sambuc  * This algorithm is sensitive to the rounding precision.  FPUs such
181*84d9c625SLionel Sambuc  * as the i387 must be set in double-precision mode if variables are
182*84d9c625SLionel Sambuc  * to be stored in FP registers in order to avoid incorrect results.
183*84d9c625SLionel Sambuc  * This is the default on FreeBSD, but not on many other systems.
184*84d9c625SLionel Sambuc  *
185*84d9c625SLionel Sambuc  * Hardware instructions should be used on architectures that support it,
186*84d9c625SLionel Sambuc  * since this implementation will likely be several times slower.
187*84d9c625SLionel Sambuc  */
188*84d9c625SLionel Sambuc double
fma(double x,double y,double z)189*84d9c625SLionel Sambuc fma(double x, double y, double z)
190*84d9c625SLionel Sambuc {
191*84d9c625SLionel Sambuc 	double xs, ys, zs, adj;
192*84d9c625SLionel Sambuc 	struct dd xy, r;
193*84d9c625SLionel Sambuc 	int oround;
194*84d9c625SLionel Sambuc 	int ex, ey, ez;
195*84d9c625SLionel Sambuc 	int spread;
196*84d9c625SLionel Sambuc 
197*84d9c625SLionel Sambuc 	/*
198*84d9c625SLionel Sambuc 	 * Handle special cases. The order of operations and the particular
199*84d9c625SLionel Sambuc 	 * return values here are crucial in handling special cases involving
200*84d9c625SLionel Sambuc 	 * infinities, NaNs, overflows, and signed zeroes correctly.
201*84d9c625SLionel Sambuc 	 */
202*84d9c625SLionel Sambuc 	if (x == 0.0 || y == 0.0)
203*84d9c625SLionel Sambuc 		return (x * y + z);
204*84d9c625SLionel Sambuc 	if (z == 0.0)
205*84d9c625SLionel Sambuc 		return (x * y);
206*84d9c625SLionel Sambuc 	if (!isfinite(x) || !isfinite(y))
207*84d9c625SLionel Sambuc 		return (x * y + z);
208*84d9c625SLionel Sambuc 	if (!isfinite(z))
209*84d9c625SLionel Sambuc 		return (z);
210*84d9c625SLionel Sambuc 
211*84d9c625SLionel Sambuc 	xs = frexp(x, &ex);
212*84d9c625SLionel Sambuc 	ys = frexp(y, &ey);
213*84d9c625SLionel Sambuc 	zs = frexp(z, &ez);
214*84d9c625SLionel Sambuc 	oround = fegetround();
215*84d9c625SLionel Sambuc 	spread = ex + ey - ez;
216*84d9c625SLionel Sambuc 
217*84d9c625SLionel Sambuc 	/*
218*84d9c625SLionel Sambuc 	 * If x * y and z are many orders of magnitude apart, the scaling
219*84d9c625SLionel Sambuc 	 * will overflow, so we handle these cases specially.  Rounding
220*84d9c625SLionel Sambuc 	 * modes other than FE_TONEAREST are painful.
221*84d9c625SLionel Sambuc 	 */
222*84d9c625SLionel Sambuc 	if (spread < -DBL_MANT_DIG) {
223*84d9c625SLionel Sambuc 		feraiseexcept(FE_INEXACT);
224*84d9c625SLionel Sambuc 		if (!isnormal(z))
225*84d9c625SLionel Sambuc 			feraiseexcept(FE_UNDERFLOW);
226*84d9c625SLionel Sambuc 		switch (oround) {
227*84d9c625SLionel Sambuc 		case FE_TONEAREST:
228*84d9c625SLionel Sambuc 			return (z);
229*84d9c625SLionel Sambuc 		case FE_TOWARDZERO:
230*84d9c625SLionel Sambuc 			if ((x > 0.0) ^ (y < 0.0) ^ (z < 0.0))
231*84d9c625SLionel Sambuc 				return (z);
232*84d9c625SLionel Sambuc 			else
233*84d9c625SLionel Sambuc 				return (nextafter(z, 0));
234*84d9c625SLionel Sambuc 		case FE_DOWNWARD:
235*84d9c625SLionel Sambuc 			if ((x > 0.0) ^ (y < 0.0))
236*84d9c625SLionel Sambuc 				return (z);
237*84d9c625SLionel Sambuc 			else
238*84d9c625SLionel Sambuc 				return (nextafter(z, -INFINITY));
239*84d9c625SLionel Sambuc 		default:	/* FE_UPWARD */
240*84d9c625SLionel Sambuc 			if ((x > 0.0) ^ (y < 0.0))
241*84d9c625SLionel Sambuc 				return (nextafter(z, INFINITY));
242*84d9c625SLionel Sambuc 			else
243*84d9c625SLionel Sambuc 				return (z);
244*84d9c625SLionel Sambuc 		}
245*84d9c625SLionel Sambuc 	}
246*84d9c625SLionel Sambuc 	if (spread <= DBL_MANT_DIG * 2)
247*84d9c625SLionel Sambuc 		zs = ldexp(zs, -spread);
248*84d9c625SLionel Sambuc 	else
249*84d9c625SLionel Sambuc 		zs = copysign(DBL_MIN, zs);
250*84d9c625SLionel Sambuc 
251*84d9c625SLionel Sambuc 	fesetround(FE_TONEAREST);
252*84d9c625SLionel Sambuc 
253*84d9c625SLionel Sambuc 	/*
254*84d9c625SLionel Sambuc 	 * Basic approach for round-to-nearest:
255*84d9c625SLionel Sambuc 	 *
256*84d9c625SLionel Sambuc 	 *     (xy.hi, xy.lo) = x * y		(exact)
257*84d9c625SLionel Sambuc 	 *     (r.hi, r.lo)   = xy.hi + z	(exact)
258*84d9c625SLionel Sambuc 	 *     adj = xy.lo + r.lo		(inexact; low bit is sticky)
259*84d9c625SLionel Sambuc 	 *     result = r.hi + adj		(correctly rounded)
260*84d9c625SLionel Sambuc 	 */
261*84d9c625SLionel Sambuc 	xy = dd_mul(xs, ys);
262*84d9c625SLionel Sambuc 	r = dd_add(xy.hi, zs);
263*84d9c625SLionel Sambuc 
264*84d9c625SLionel Sambuc 	spread = ex + ey;
265*84d9c625SLionel Sambuc 
266*84d9c625SLionel Sambuc 	if (r.hi == 0.0) {
267*84d9c625SLionel Sambuc 		/*
268*84d9c625SLionel Sambuc 		 * When the addends cancel to 0, ensure that the result has
269*84d9c625SLionel Sambuc 		 * the correct sign.
270*84d9c625SLionel Sambuc 		 */
271*84d9c625SLionel Sambuc 		fesetround(oround);
272*84d9c625SLionel Sambuc 		{
273*84d9c625SLionel Sambuc 		volatile double vzs = zs; /* XXX gcc CSE bug workaround */
274*84d9c625SLionel Sambuc 		return (xy.hi + vzs + ldexp(xy.lo, spread));
275*84d9c625SLionel Sambuc 		}
276*84d9c625SLionel Sambuc 	}
277*84d9c625SLionel Sambuc 
278*84d9c625SLionel Sambuc 	if (oround != FE_TONEAREST) {
279*84d9c625SLionel Sambuc 		/*
280*84d9c625SLionel Sambuc 		 * There is no need to worry about double rounding in directed
281*84d9c625SLionel Sambuc 		 * rounding modes.
282*84d9c625SLionel Sambuc 		 */
283*84d9c625SLionel Sambuc 		fesetround(oround);
284*84d9c625SLionel Sambuc 		adj = r.lo + xy.lo;
285*84d9c625SLionel Sambuc 		return (ldexp(r.hi + adj, spread));
286*84d9c625SLionel Sambuc 	}
287*84d9c625SLionel Sambuc 
288*84d9c625SLionel Sambuc 	adj = add_adjusted(r.lo, xy.lo);
289*84d9c625SLionel Sambuc 	if (spread + ilogb(r.hi) > -1023)
290*84d9c625SLionel Sambuc 		return (ldexp(r.hi + adj, spread));
291*84d9c625SLionel Sambuc 	else
292*84d9c625SLionel Sambuc 		return (add_and_denormalize(r.hi, adj, spread));
293*84d9c625SLionel Sambuc }
294