1*627f7eb2Smrg /* Complex square root of a float type.
2*627f7eb2Smrg Copyright (C) 1997-2018 Free Software Foundation, Inc.
3*627f7eb2Smrg This file is part of the GNU C Library.
4*627f7eb2Smrg Based on an algorithm by Stephen L. Moshier <moshier@world.std.com>.
5*627f7eb2Smrg Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
6*627f7eb2Smrg
7*627f7eb2Smrg The GNU C Library is free software; you can redistribute it and/or
8*627f7eb2Smrg modify it under the terms of the GNU Lesser General Public
9*627f7eb2Smrg License as published by the Free Software Foundation; either
10*627f7eb2Smrg version 2.1 of the License, or (at your option) any later version.
11*627f7eb2Smrg
12*627f7eb2Smrg The GNU C Library is distributed in the hope that it will be useful,
13*627f7eb2Smrg but WITHOUT ANY WARRANTY; without even the implied warranty of
14*627f7eb2Smrg MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15*627f7eb2Smrg Lesser General Public License for more details.
16*627f7eb2Smrg
17*627f7eb2Smrg You should have received a copy of the GNU Lesser General Public
18*627f7eb2Smrg License along with the GNU C Library; if not, see
19*627f7eb2Smrg <http://www.gnu.org/licenses/>. */
20*627f7eb2Smrg
21*627f7eb2Smrg #include "quadmath-imp.h"
22*627f7eb2Smrg
23*627f7eb2Smrg __complex128
csqrtq(__complex128 x)24*627f7eb2Smrg csqrtq (__complex128 x)
25*627f7eb2Smrg {
26*627f7eb2Smrg __complex128 res;
27*627f7eb2Smrg int rcls = fpclassifyq (__real__ x);
28*627f7eb2Smrg int icls = fpclassifyq (__imag__ x);
29*627f7eb2Smrg
30*627f7eb2Smrg if (__glibc_unlikely (rcls <= QUADFP_INFINITE || icls <= QUADFP_INFINITE))
31*627f7eb2Smrg {
32*627f7eb2Smrg if (icls == QUADFP_INFINITE)
33*627f7eb2Smrg {
34*627f7eb2Smrg __real__ res = HUGE_VALQ;
35*627f7eb2Smrg __imag__ res = __imag__ x;
36*627f7eb2Smrg }
37*627f7eb2Smrg else if (rcls == QUADFP_INFINITE)
38*627f7eb2Smrg {
39*627f7eb2Smrg if (__real__ x < 0)
40*627f7eb2Smrg {
41*627f7eb2Smrg __real__ res = icls == QUADFP_NAN ? nanq ("") : 0;
42*627f7eb2Smrg __imag__ res = copysignq (HUGE_VALQ, __imag__ x);
43*627f7eb2Smrg }
44*627f7eb2Smrg else
45*627f7eb2Smrg {
46*627f7eb2Smrg __real__ res = __real__ x;
47*627f7eb2Smrg __imag__ res = (icls == QUADFP_NAN
48*627f7eb2Smrg ? nanq ("") : copysignq (0, __imag__ x));
49*627f7eb2Smrg }
50*627f7eb2Smrg }
51*627f7eb2Smrg else
52*627f7eb2Smrg {
53*627f7eb2Smrg __real__ res = nanq ("");
54*627f7eb2Smrg __imag__ res = nanq ("");
55*627f7eb2Smrg }
56*627f7eb2Smrg }
57*627f7eb2Smrg else
58*627f7eb2Smrg {
59*627f7eb2Smrg if (__glibc_unlikely (icls == QUADFP_ZERO))
60*627f7eb2Smrg {
61*627f7eb2Smrg if (__real__ x < 0)
62*627f7eb2Smrg {
63*627f7eb2Smrg __real__ res = 0;
64*627f7eb2Smrg __imag__ res = copysignq (sqrtq (-__real__ x), __imag__ x);
65*627f7eb2Smrg }
66*627f7eb2Smrg else
67*627f7eb2Smrg {
68*627f7eb2Smrg __real__ res = fabsq (sqrtq (__real__ x));
69*627f7eb2Smrg __imag__ res = copysignq (0, __imag__ x);
70*627f7eb2Smrg }
71*627f7eb2Smrg }
72*627f7eb2Smrg else if (__glibc_unlikely (rcls == QUADFP_ZERO))
73*627f7eb2Smrg {
74*627f7eb2Smrg __float128 r;
75*627f7eb2Smrg if (fabsq (__imag__ x) >= 2 * FLT128_MIN)
76*627f7eb2Smrg r = sqrtq (0.5Q * fabsq (__imag__ x));
77*627f7eb2Smrg else
78*627f7eb2Smrg r = 0.5Q * sqrtq (2 * fabsq (__imag__ x));
79*627f7eb2Smrg
80*627f7eb2Smrg __real__ res = r;
81*627f7eb2Smrg __imag__ res = copysignq (r, __imag__ x);
82*627f7eb2Smrg }
83*627f7eb2Smrg else
84*627f7eb2Smrg {
85*627f7eb2Smrg __float128 d, r, s;
86*627f7eb2Smrg int scale = 0;
87*627f7eb2Smrg
88*627f7eb2Smrg if (fabsq (__real__ x) > FLT128_MAX / 4)
89*627f7eb2Smrg {
90*627f7eb2Smrg scale = 1;
91*627f7eb2Smrg __real__ x = scalbnq (__real__ x, -2 * scale);
92*627f7eb2Smrg __imag__ x = scalbnq (__imag__ x, -2 * scale);
93*627f7eb2Smrg }
94*627f7eb2Smrg else if (fabsq (__imag__ x) > FLT128_MAX / 4)
95*627f7eb2Smrg {
96*627f7eb2Smrg scale = 1;
97*627f7eb2Smrg if (fabsq (__real__ x) >= 4 * FLT128_MIN)
98*627f7eb2Smrg __real__ x = scalbnq (__real__ x, -2 * scale);
99*627f7eb2Smrg else
100*627f7eb2Smrg __real__ x = 0;
101*627f7eb2Smrg __imag__ x = scalbnq (__imag__ x, -2 * scale);
102*627f7eb2Smrg }
103*627f7eb2Smrg else if (fabsq (__real__ x) < 2 * FLT128_MIN
104*627f7eb2Smrg && fabsq (__imag__ x) < 2 * FLT128_MIN)
105*627f7eb2Smrg {
106*627f7eb2Smrg scale = -((FLT128_MANT_DIG + 1) / 2);
107*627f7eb2Smrg __real__ x = scalbnq (__real__ x, -2 * scale);
108*627f7eb2Smrg __imag__ x = scalbnq (__imag__ x, -2 * scale);
109*627f7eb2Smrg }
110*627f7eb2Smrg
111*627f7eb2Smrg d = hypotq (__real__ x, __imag__ x);
112*627f7eb2Smrg /* Use the identity 2 Re res Im res = Im x
113*627f7eb2Smrg to avoid cancellation error in d +/- Re x. */
114*627f7eb2Smrg if (__real__ x > 0)
115*627f7eb2Smrg {
116*627f7eb2Smrg r = sqrtq (0.5Q * (d + __real__ x));
117*627f7eb2Smrg if (scale == 1 && fabsq (__imag__ x) < 1)
118*627f7eb2Smrg {
119*627f7eb2Smrg /* Avoid possible intermediate underflow. */
120*627f7eb2Smrg s = __imag__ x / r;
121*627f7eb2Smrg r = scalbnq (r, scale);
122*627f7eb2Smrg scale = 0;
123*627f7eb2Smrg }
124*627f7eb2Smrg else
125*627f7eb2Smrg s = 0.5Q * (__imag__ x / r);
126*627f7eb2Smrg }
127*627f7eb2Smrg else
128*627f7eb2Smrg {
129*627f7eb2Smrg s = sqrtq (0.5Q * (d - __real__ x));
130*627f7eb2Smrg if (scale == 1 && fabsq (__imag__ x) < 1)
131*627f7eb2Smrg {
132*627f7eb2Smrg /* Avoid possible intermediate underflow. */
133*627f7eb2Smrg r = fabsq (__imag__ x / s);
134*627f7eb2Smrg s = scalbnq (s, scale);
135*627f7eb2Smrg scale = 0;
136*627f7eb2Smrg }
137*627f7eb2Smrg else
138*627f7eb2Smrg r = fabsq (0.5Q * (__imag__ x / s));
139*627f7eb2Smrg }
140*627f7eb2Smrg
141*627f7eb2Smrg if (scale)
142*627f7eb2Smrg {
143*627f7eb2Smrg r = scalbnq (r, scale);
144*627f7eb2Smrg s = scalbnq (s, scale);
145*627f7eb2Smrg }
146*627f7eb2Smrg
147*627f7eb2Smrg math_check_force_underflow (r);
148*627f7eb2Smrg math_check_force_underflow (s);
149*627f7eb2Smrg
150*627f7eb2Smrg __real__ res = r;
151*627f7eb2Smrg __imag__ res = copysignq (s, __imag__ x);
152*627f7eb2Smrg }
153*627f7eb2Smrg }
154*627f7eb2Smrg
155*627f7eb2Smrg return res;
156*627f7eb2Smrg }
157