xref: /netbsd-src/external/gpl3/gcc/dist/libquadmath/math/csqrtq.c (revision 181254a7b1bdde6873432bffef2d2decc4b5c22f)
1*181254a7Smrg /* Complex square root of a float type.
2*181254a7Smrg    Copyright (C) 1997-2018 Free Software Foundation, Inc.
3*181254a7Smrg    This file is part of the GNU C Library.
4*181254a7Smrg    Based on an algorithm by Stephen L. Moshier <moshier@world.std.com>.
5*181254a7Smrg    Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
6*181254a7Smrg 
7*181254a7Smrg    The GNU C Library is free software; you can redistribute it and/or
8*181254a7Smrg    modify it under the terms of the GNU Lesser General Public
9*181254a7Smrg    License as published by the Free Software Foundation; either
10*181254a7Smrg    version 2.1 of the License, or (at your option) any later version.
11*181254a7Smrg 
12*181254a7Smrg    The GNU C Library is distributed in the hope that it will be useful,
13*181254a7Smrg    but WITHOUT ANY WARRANTY; without even the implied warranty of
14*181254a7Smrg    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15*181254a7Smrg    Lesser General Public License for more details.
16*181254a7Smrg 
17*181254a7Smrg    You should have received a copy of the GNU Lesser General Public
18*181254a7Smrg    License along with the GNU C Library; if not, see
19*181254a7Smrg    <http://www.gnu.org/licenses/>.  */
20*181254a7Smrg 
21*181254a7Smrg #include "quadmath-imp.h"
22*181254a7Smrg 
23*181254a7Smrg __complex128
csqrtq(__complex128 x)24*181254a7Smrg csqrtq (__complex128 x)
25*181254a7Smrg {
26*181254a7Smrg   __complex128 res;
27*181254a7Smrg   int rcls = fpclassifyq (__real__ x);
28*181254a7Smrg   int icls = fpclassifyq (__imag__ x);
29*181254a7Smrg 
30*181254a7Smrg   if (__glibc_unlikely (rcls <= QUADFP_INFINITE || icls <= QUADFP_INFINITE))
31*181254a7Smrg     {
32*181254a7Smrg       if (icls == QUADFP_INFINITE)
33*181254a7Smrg 	{
34*181254a7Smrg 	  __real__ res = HUGE_VALQ;
35*181254a7Smrg 	  __imag__ res = __imag__ x;
36*181254a7Smrg 	}
37*181254a7Smrg       else if (rcls == QUADFP_INFINITE)
38*181254a7Smrg 	{
39*181254a7Smrg 	  if (__real__ x < 0)
40*181254a7Smrg 	    {
41*181254a7Smrg 	      __real__ res = icls == QUADFP_NAN ? nanq ("") : 0;
42*181254a7Smrg 	      __imag__ res = copysignq (HUGE_VALQ, __imag__ x);
43*181254a7Smrg 	    }
44*181254a7Smrg 	  else
45*181254a7Smrg 	    {
46*181254a7Smrg 	      __real__ res = __real__ x;
47*181254a7Smrg 	      __imag__ res = (icls == QUADFP_NAN
48*181254a7Smrg 			      ? nanq ("") : copysignq (0, __imag__ x));
49*181254a7Smrg 	    }
50*181254a7Smrg 	}
51*181254a7Smrg       else
52*181254a7Smrg 	{
53*181254a7Smrg 	  __real__ res = nanq ("");
54*181254a7Smrg 	  __imag__ res = nanq ("");
55*181254a7Smrg 	}
56*181254a7Smrg     }
57*181254a7Smrg   else
58*181254a7Smrg     {
59*181254a7Smrg       if (__glibc_unlikely (icls == QUADFP_ZERO))
60*181254a7Smrg 	{
61*181254a7Smrg 	  if (__real__ x < 0)
62*181254a7Smrg 	    {
63*181254a7Smrg 	      __real__ res = 0;
64*181254a7Smrg 	      __imag__ res = copysignq (sqrtq (-__real__ x), __imag__ x);
65*181254a7Smrg 	    }
66*181254a7Smrg 	  else
67*181254a7Smrg 	    {
68*181254a7Smrg 	      __real__ res = fabsq (sqrtq (__real__ x));
69*181254a7Smrg 	      __imag__ res = copysignq (0, __imag__ x);
70*181254a7Smrg 	    }
71*181254a7Smrg 	}
72*181254a7Smrg       else if (__glibc_unlikely (rcls == QUADFP_ZERO))
73*181254a7Smrg 	{
74*181254a7Smrg 	  __float128 r;
75*181254a7Smrg 	  if (fabsq (__imag__ x) >= 2 * FLT128_MIN)
76*181254a7Smrg 	    r = sqrtq (0.5Q * fabsq (__imag__ x));
77*181254a7Smrg 	  else
78*181254a7Smrg 	    r = 0.5Q * sqrtq (2 * fabsq (__imag__ x));
79*181254a7Smrg 
80*181254a7Smrg 	  __real__ res = r;
81*181254a7Smrg 	  __imag__ res = copysignq (r, __imag__ x);
82*181254a7Smrg 	}
83*181254a7Smrg       else
84*181254a7Smrg 	{
85*181254a7Smrg 	  __float128 d, r, s;
86*181254a7Smrg 	  int scale = 0;
87*181254a7Smrg 
88*181254a7Smrg 	  if (fabsq (__real__ x) > FLT128_MAX / 4)
89*181254a7Smrg 	    {
90*181254a7Smrg 	      scale = 1;
91*181254a7Smrg 	      __real__ x = scalbnq (__real__ x, -2 * scale);
92*181254a7Smrg 	      __imag__ x = scalbnq (__imag__ x, -2 * scale);
93*181254a7Smrg 	    }
94*181254a7Smrg 	  else if (fabsq (__imag__ x) > FLT128_MAX / 4)
95*181254a7Smrg 	    {
96*181254a7Smrg 	      scale = 1;
97*181254a7Smrg 	      if (fabsq (__real__ x) >= 4 * FLT128_MIN)
98*181254a7Smrg 		__real__ x = scalbnq (__real__ x, -2 * scale);
99*181254a7Smrg 	      else
100*181254a7Smrg 		__real__ x = 0;
101*181254a7Smrg 	      __imag__ x = scalbnq (__imag__ x, -2 * scale);
102*181254a7Smrg 	    }
103*181254a7Smrg 	  else if (fabsq (__real__ x) < 2 * FLT128_MIN
104*181254a7Smrg 		   && fabsq (__imag__ x) < 2 * FLT128_MIN)
105*181254a7Smrg 	    {
106*181254a7Smrg 	      scale = -((FLT128_MANT_DIG + 1) / 2);
107*181254a7Smrg 	      __real__ x = scalbnq (__real__ x, -2 * scale);
108*181254a7Smrg 	      __imag__ x = scalbnq (__imag__ x, -2 * scale);
109*181254a7Smrg 	    }
110*181254a7Smrg 
111*181254a7Smrg 	  d = hypotq (__real__ x, __imag__ x);
112*181254a7Smrg 	  /* Use the identity   2  Re res  Im res = Im x
113*181254a7Smrg 	     to avoid cancellation error in  d +/- Re x.  */
114*181254a7Smrg 	  if (__real__ x > 0)
115*181254a7Smrg 	    {
116*181254a7Smrg 	      r = sqrtq (0.5Q * (d + __real__ x));
117*181254a7Smrg 	      if (scale == 1 && fabsq (__imag__ x) < 1)
118*181254a7Smrg 		{
119*181254a7Smrg 		  /* Avoid possible intermediate underflow.  */
120*181254a7Smrg 		  s = __imag__ x / r;
121*181254a7Smrg 		  r = scalbnq (r, scale);
122*181254a7Smrg 		  scale = 0;
123*181254a7Smrg 		}
124*181254a7Smrg 	      else
125*181254a7Smrg 		s = 0.5Q * (__imag__ x / r);
126*181254a7Smrg 	    }
127*181254a7Smrg 	  else
128*181254a7Smrg 	    {
129*181254a7Smrg 	      s = sqrtq (0.5Q * (d - __real__ x));
130*181254a7Smrg 	      if (scale == 1 && fabsq (__imag__ x) < 1)
131*181254a7Smrg 		{
132*181254a7Smrg 		  /* Avoid possible intermediate underflow.  */
133*181254a7Smrg 		  r = fabsq (__imag__ x / s);
134*181254a7Smrg 		  s = scalbnq (s, scale);
135*181254a7Smrg 		  scale = 0;
136*181254a7Smrg 		}
137*181254a7Smrg 	      else
138*181254a7Smrg 		r = fabsq (0.5Q * (__imag__ x / s));
139*181254a7Smrg 	    }
140*181254a7Smrg 
141*181254a7Smrg 	  if (scale)
142*181254a7Smrg 	    {
143*181254a7Smrg 	      r = scalbnq (r, scale);
144*181254a7Smrg 	      s = scalbnq (s, scale);
145*181254a7Smrg 	    }
146*181254a7Smrg 
147*181254a7Smrg 	  math_check_force_underflow (r);
148*181254a7Smrg 	  math_check_force_underflow (s);
149*181254a7Smrg 
150*181254a7Smrg 	  __real__ res = r;
151*181254a7Smrg 	  __imag__ res = copysignq (s, __imag__ x);
152*181254a7Smrg 	}
153*181254a7Smrg     }
154*181254a7Smrg 
155*181254a7Smrg   return res;
156*181254a7Smrg }
157