1*181254a7Smrg /* Implementation of gamma function according to ISO C.
2*181254a7Smrg Copyright (C) 1997-2018 Free Software Foundation, Inc.
3*181254a7Smrg This file is part of the GNU C Library.
4*181254a7Smrg Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997 and
5*181254a7Smrg Jakub Jelinek <jj@ultra.linux.cz, 1999.
6*181254a7Smrg
7*181254a7Smrg The GNU C Library is free software; you can redistribute it and/or
8*181254a7Smrg modify it under the terms of the GNU Lesser General Public
9*181254a7Smrg License as published by the Free Software Foundation; either
10*181254a7Smrg version 2.1 of the License, or (at your option) any later version.
11*181254a7Smrg
12*181254a7Smrg The GNU C Library is distributed in the hope that it will be useful,
13*181254a7Smrg but WITHOUT ANY WARRANTY; without even the implied warranty of
14*181254a7Smrg MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15*181254a7Smrg Lesser General Public License for more details.
16*181254a7Smrg
17*181254a7Smrg You should have received a copy of the GNU Lesser General Public
18*181254a7Smrg License along with the GNU C Library; if not, see
19*181254a7Smrg <http://www.gnu.org/licenses/>. */
20*181254a7Smrg
21*181254a7Smrg #include "quadmath-imp.h"
22*181254a7Smrg __float128
tgammaq(__float128 x)23*181254a7Smrg tgammaq (__float128 x)
24*181254a7Smrg {
25*181254a7Smrg int sign;
26*181254a7Smrg __float128 ret;
27*181254a7Smrg ret = __quadmath_gammaq_r (x, &sign);
28*181254a7Smrg return sign < 0 ? -ret : ret;
29*181254a7Smrg }
30*181254a7Smrg
31*181254a7Smrg /* Coefficients B_2k / 2k(2k-1) of x^-(2k-1) inside exp in Stirling's
32*181254a7Smrg approximation to gamma function. */
33*181254a7Smrg
34*181254a7Smrg static const __float128 gamma_coeff[] =
35*181254a7Smrg {
36*181254a7Smrg 0x1.5555555555555555555555555555p-4Q,
37*181254a7Smrg -0xb.60b60b60b60b60b60b60b60b60b8p-12Q,
38*181254a7Smrg 0x3.4034034034034034034034034034p-12Q,
39*181254a7Smrg -0x2.7027027027027027027027027028p-12Q,
40*181254a7Smrg 0x3.72a3c5631fe46ae1d4e700dca8f2p-12Q,
41*181254a7Smrg -0x7.daac36664f1f207daac36664f1f4p-12Q,
42*181254a7Smrg 0x1.a41a41a41a41a41a41a41a41a41ap-8Q,
43*181254a7Smrg -0x7.90a1b2c3d4e5f708192a3b4c5d7p-8Q,
44*181254a7Smrg 0x2.dfd2c703c0cfff430edfd2c703cp-4Q,
45*181254a7Smrg -0x1.6476701181f39edbdb9ce625987dp+0Q,
46*181254a7Smrg 0xd.672219167002d3a7a9c886459cp+0Q,
47*181254a7Smrg -0x9.cd9292e6660d55b3f712eb9e07c8p+4Q,
48*181254a7Smrg 0x8.911a740da740da740da740da741p+8Q,
49*181254a7Smrg -0x8.d0cc570e255bf59ff6eec24b49p+12Q,
50*181254a7Smrg };
51*181254a7Smrg
52*181254a7Smrg #define NCOEFF (sizeof (gamma_coeff) / sizeof (gamma_coeff[0]))
53*181254a7Smrg
54*181254a7Smrg /* Return gamma (X), for positive X less than 1775, in the form R *
55*181254a7Smrg 2^(*EXP2_ADJ), where R is the return value and *EXP2_ADJ is set to
56*181254a7Smrg avoid overflow or underflow in intermediate calculations. */
57*181254a7Smrg
58*181254a7Smrg static __float128
gammal_positive(__float128 x,int * exp2_adj)59*181254a7Smrg gammal_positive (__float128 x, int *exp2_adj)
60*181254a7Smrg {
61*181254a7Smrg int local_signgam;
62*181254a7Smrg if (x < 0.5Q)
63*181254a7Smrg {
64*181254a7Smrg *exp2_adj = 0;
65*181254a7Smrg return expq (__quadmath_lgammaq_r (x + 1, &local_signgam)) / x;
66*181254a7Smrg }
67*181254a7Smrg else if (x <= 1.5Q)
68*181254a7Smrg {
69*181254a7Smrg *exp2_adj = 0;
70*181254a7Smrg return expq (__quadmath_lgammaq_r (x, &local_signgam));
71*181254a7Smrg }
72*181254a7Smrg else if (x < 12.5Q)
73*181254a7Smrg {
74*181254a7Smrg /* Adjust into the range for using exp (lgamma). */
75*181254a7Smrg *exp2_adj = 0;
76*181254a7Smrg __float128 n = ceilq (x - 1.5Q);
77*181254a7Smrg __float128 x_adj = x - n;
78*181254a7Smrg __float128 eps;
79*181254a7Smrg __float128 prod = __quadmath_gamma_productq (x_adj, 0, n, &eps);
80*181254a7Smrg return (expq (__quadmath_lgammaq_r (x_adj, &local_signgam))
81*181254a7Smrg * prod * (1 + eps));
82*181254a7Smrg }
83*181254a7Smrg else
84*181254a7Smrg {
85*181254a7Smrg __float128 eps = 0;
86*181254a7Smrg __float128 x_eps = 0;
87*181254a7Smrg __float128 x_adj = x;
88*181254a7Smrg __float128 prod = 1;
89*181254a7Smrg if (x < 24)
90*181254a7Smrg {
91*181254a7Smrg /* Adjust into the range for applying Stirling's
92*181254a7Smrg approximation. */
93*181254a7Smrg __float128 n = ceilq (24 - x);
94*181254a7Smrg x_adj = x + n;
95*181254a7Smrg x_eps = (x - (x_adj - n));
96*181254a7Smrg prod = __quadmath_gamma_productq (x_adj - n, x_eps, n, &eps);
97*181254a7Smrg }
98*181254a7Smrg /* The result is now gamma (X_ADJ + X_EPS) / (PROD * (1 + EPS)).
99*181254a7Smrg Compute gamma (X_ADJ + X_EPS) using Stirling's approximation,
100*181254a7Smrg starting by computing pow (X_ADJ, X_ADJ) with a power of 2
101*181254a7Smrg factored out. */
102*181254a7Smrg __float128 exp_adj = -eps;
103*181254a7Smrg __float128 x_adj_int = roundq (x_adj);
104*181254a7Smrg __float128 x_adj_frac = x_adj - x_adj_int;
105*181254a7Smrg int x_adj_log2;
106*181254a7Smrg __float128 x_adj_mant = frexpq (x_adj, &x_adj_log2);
107*181254a7Smrg if (x_adj_mant < M_SQRT1_2q)
108*181254a7Smrg {
109*181254a7Smrg x_adj_log2--;
110*181254a7Smrg x_adj_mant *= 2;
111*181254a7Smrg }
112*181254a7Smrg *exp2_adj = x_adj_log2 * (int) x_adj_int;
113*181254a7Smrg __float128 ret = (powq (x_adj_mant, x_adj)
114*181254a7Smrg * exp2q (x_adj_log2 * x_adj_frac)
115*181254a7Smrg * expq (-x_adj)
116*181254a7Smrg * sqrtq (2 * M_PIq / x_adj)
117*181254a7Smrg / prod);
118*181254a7Smrg exp_adj += x_eps * logq (x_adj);
119*181254a7Smrg __float128 bsum = gamma_coeff[NCOEFF - 1];
120*181254a7Smrg __float128 x_adj2 = x_adj * x_adj;
121*181254a7Smrg for (size_t i = 1; i <= NCOEFF - 1; i++)
122*181254a7Smrg bsum = bsum / x_adj2 + gamma_coeff[NCOEFF - 1 - i];
123*181254a7Smrg exp_adj += bsum / x_adj;
124*181254a7Smrg return ret + ret * expm1q (exp_adj);
125*181254a7Smrg }
126*181254a7Smrg }
127*181254a7Smrg
128*181254a7Smrg __float128
__quadmath_gammaq_r(__float128 x,int * signgamp)129*181254a7Smrg __quadmath_gammaq_r (__float128 x, int *signgamp)
130*181254a7Smrg {
131*181254a7Smrg int64_t hx;
132*181254a7Smrg uint64_t lx;
133*181254a7Smrg __float128 ret;
134*181254a7Smrg
135*181254a7Smrg GET_FLT128_WORDS64 (hx, lx, x);
136*181254a7Smrg
137*181254a7Smrg if (((hx & 0x7fffffffffffffffLL) | lx) == 0)
138*181254a7Smrg {
139*181254a7Smrg /* Return value for x == 0 is Inf with divide by zero exception. */
140*181254a7Smrg *signgamp = 0;
141*181254a7Smrg return 1.0 / x;
142*181254a7Smrg }
143*181254a7Smrg if (hx < 0 && (uint64_t) hx < 0xffff000000000000ULL && rintq (x) == x)
144*181254a7Smrg {
145*181254a7Smrg /* Return value for integer x < 0 is NaN with invalid exception. */
146*181254a7Smrg *signgamp = 0;
147*181254a7Smrg return (x - x) / (x - x);
148*181254a7Smrg }
149*181254a7Smrg if (hx == 0xffff000000000000ULL && lx == 0)
150*181254a7Smrg {
151*181254a7Smrg /* x == -Inf. According to ISO this is NaN. */
152*181254a7Smrg *signgamp = 0;
153*181254a7Smrg return x - x;
154*181254a7Smrg }
155*181254a7Smrg if ((hx & 0x7fff000000000000ULL) == 0x7fff000000000000ULL)
156*181254a7Smrg {
157*181254a7Smrg /* Positive infinity (return positive infinity) or NaN (return
158*181254a7Smrg NaN). */
159*181254a7Smrg *signgamp = 0;
160*181254a7Smrg return x + x;
161*181254a7Smrg }
162*181254a7Smrg
163*181254a7Smrg if (x >= 1756)
164*181254a7Smrg {
165*181254a7Smrg /* Overflow. */
166*181254a7Smrg *signgamp = 0;
167*181254a7Smrg return FLT128_MAX * FLT128_MAX;
168*181254a7Smrg }
169*181254a7Smrg else
170*181254a7Smrg {
171*181254a7Smrg SET_RESTORE_ROUNDF128 (FE_TONEAREST);
172*181254a7Smrg if (x > 0)
173*181254a7Smrg {
174*181254a7Smrg *signgamp = 0;
175*181254a7Smrg int exp2_adj;
176*181254a7Smrg ret = gammal_positive (x, &exp2_adj);
177*181254a7Smrg ret = scalbnq (ret, exp2_adj);
178*181254a7Smrg }
179*181254a7Smrg else if (x >= -FLT128_EPSILON / 4)
180*181254a7Smrg {
181*181254a7Smrg *signgamp = 0;
182*181254a7Smrg ret = 1 / x;
183*181254a7Smrg }
184*181254a7Smrg else
185*181254a7Smrg {
186*181254a7Smrg __float128 tx = truncq (x);
187*181254a7Smrg *signgamp = (tx == 2 * truncq (tx / 2)) ? -1 : 1;
188*181254a7Smrg if (x <= -1775)
189*181254a7Smrg /* Underflow. */
190*181254a7Smrg ret = FLT128_MIN * FLT128_MIN;
191*181254a7Smrg else
192*181254a7Smrg {
193*181254a7Smrg __float128 frac = tx - x;
194*181254a7Smrg if (frac > 0.5Q)
195*181254a7Smrg frac = 1 - frac;
196*181254a7Smrg __float128 sinpix = (frac <= 0.25Q
197*181254a7Smrg ? sinq (M_PIq * frac)
198*181254a7Smrg : cosq (M_PIq * (0.5Q - frac)));
199*181254a7Smrg int exp2_adj;
200*181254a7Smrg ret = M_PIq / (-x * sinpix
201*181254a7Smrg * gammal_positive (-x, &exp2_adj));
202*181254a7Smrg ret = scalbnq (ret, -exp2_adj);
203*181254a7Smrg math_check_force_underflow_nonneg (ret);
204*181254a7Smrg }
205*181254a7Smrg }
206*181254a7Smrg }
207*181254a7Smrg if (isinfq (ret) && x != 0)
208*181254a7Smrg {
209*181254a7Smrg if (*signgamp < 0)
210*181254a7Smrg return -(-copysignq (FLT128_MAX, ret) * FLT128_MAX);
211*181254a7Smrg else
212*181254a7Smrg return copysignq (FLT128_MAX, ret) * FLT128_MAX;
213*181254a7Smrg }
214*181254a7Smrg else if (ret == 0)
215*181254a7Smrg {
216*181254a7Smrg if (*signgamp < 0)
217*181254a7Smrg return -(-copysignq (FLT128_MIN, ret) * FLT128_MIN);
218*181254a7Smrg else
219*181254a7Smrg return copysignq (FLT128_MIN, ret) * FLT128_MIN;
220*181254a7Smrg }
221*181254a7Smrg else
222*181254a7Smrg return ret;
223*181254a7Smrg }
224