1 /* mpc_atan -- arctangent of a complex number.
2
3 Copyright (C) 2009, 2010, 2011, 2012, 2013, 2017, 2020, 2022 INRIA
4
5 This file is part of GNU MPC.
6
7 GNU MPC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU Lesser General Public License as published by the
9 Free Software Foundation; either version 3 of the License, or (at your
10 option) any later version.
11
12 GNU MPC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
15 more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with this program. If not, see http://www.gnu.org/licenses/ .
19 */
20
21 #include <stdio.h>
22 #include "mpc-impl.h"
23
24 /* set rop to
25 -pi/2 if s < 0
26 +pi/2 else
27 rounded in the direction rnd
28 */
29 int
set_pi_over_2(mpfr_ptr rop,int s,mpfr_rnd_t rnd)30 set_pi_over_2 (mpfr_ptr rop, int s, mpfr_rnd_t rnd)
31 {
32 int inex;
33
34 inex = mpfr_const_pi (rop, s < 0 ? INV_RND (rnd) : rnd);
35 mpfr_div_2ui (rop, rop, 1, MPFR_RNDN);
36 if (s < 0)
37 {
38 inex = -inex;
39 mpfr_neg (rop, rop, MPFR_RNDN);
40 }
41
42 return inex;
43 }
44
45 int
mpc_atan(mpc_ptr rop,mpc_srcptr op,mpc_rnd_t rnd)46 mpc_atan (mpc_ptr rop, mpc_srcptr op, mpc_rnd_t rnd)
47 {
48 int s_re, s_im;
49 int inex_re, inex_im, inex;
50 mpfr_exp_t saved_emin, saved_emax;
51
52 inex_re = 0;
53 inex_im = 0;
54 s_re = mpfr_signbit (mpc_realref (op));
55 s_im = mpfr_signbit (mpc_imagref (op));
56
57 /* special values */
58 if (mpfr_nan_p (mpc_realref (op)) || mpfr_nan_p (mpc_imagref (op)))
59 {
60 if (mpfr_nan_p (mpc_realref (op)))
61 {
62 mpfr_set_nan (mpc_realref (rop));
63 if (mpfr_zero_p (mpc_imagref (op)) || mpfr_inf_p (mpc_imagref (op)))
64 {
65 mpfr_set_ui (mpc_imagref (rop), 0, MPFR_RNDN);
66 if (s_im)
67 mpc_conj (rop, rop, MPC_RNDNN);
68 }
69 else
70 mpfr_set_nan (mpc_imagref (rop));
71 }
72 else
73 {
74 if (mpfr_inf_p (mpc_realref (op)))
75 {
76 inex_re = set_pi_over_2 (mpc_realref (rop), -s_re, MPC_RND_RE (rnd));
77 mpfr_set_ui (mpc_imagref (rop), 0, MPFR_RNDN);
78 }
79 else
80 {
81 mpfr_set_nan (mpc_realref (rop));
82 mpfr_set_nan (mpc_imagref (rop));
83 }
84 }
85 return MPC_INEX (inex_re, 0);
86 }
87
88 if (mpfr_inf_p (mpc_realref (op)) || mpfr_inf_p (mpc_imagref (op)))
89 {
90 inex_re = set_pi_over_2 (mpc_realref (rop), -s_re, MPC_RND_RE (rnd));
91
92 mpfr_set_ui (mpc_imagref (rop), 0, MPFR_RNDN);
93 if (s_im)
94 mpc_conj (rop, rop, MPFR_RNDN);
95
96 return MPC_INEX (inex_re, 0);
97 }
98
99 /* pure real argument */
100 if (mpfr_zero_p (mpc_imagref (op)))
101 {
102 inex_re = mpfr_atan (mpc_realref (rop), mpc_realref (op), MPC_RND_RE (rnd));
103
104 mpfr_set_ui (mpc_imagref (rop), 0, MPFR_RNDN);
105 if (s_im)
106 mpc_conj (rop, rop, MPFR_RNDN);
107
108 return MPC_INEX (inex_re, 0);
109 }
110
111 /* pure imaginary argument */
112 if (mpfr_zero_p (mpc_realref (op)))
113 {
114 int cmp_1;
115
116 if (s_im)
117 cmp_1 = -mpfr_cmp_si (mpc_imagref (op), -1);
118 else
119 cmp_1 = mpfr_cmp_ui (mpc_imagref (op), +1);
120
121 if (cmp_1 < 0)
122 {
123 /* atan(+0+iy) = +0 +i*atanh(y), if |y| < 1
124 atan(-0+iy) = -0 +i*atanh(y), if |y| < 1 */
125
126 mpfr_set_ui (mpc_realref (rop), 0, MPFR_RNDN);
127 if (s_re)
128 mpfr_neg (mpc_realref (rop), mpc_realref (rop), MPFR_RNDN);
129
130 inex_im = mpfr_atanh (mpc_imagref (rop), mpc_imagref (op), MPC_RND_IM (rnd));
131 }
132 else if (cmp_1 == 0)
133 {
134 /* atan(+/-0 +i) = +/-0 +i*inf
135 atan(+/-0 -i) = +/-0 -i*inf */
136 mpfr_set_zero (mpc_realref (rop), s_re ? -1 : +1);
137 mpfr_set_inf (mpc_imagref (rop), s_im ? -1 : +1);
138 }
139 else
140 {
141 /* atan(+0+iy) = +pi/2 +i*atanh(1/y), if |y| > 1
142 atan(-0+iy) = -pi/2 +i*atanh(1/y), if |y| > 1 */
143 mpfr_rnd_t rnd_im;
144 mpfr_t y, z;
145 mpfr_prec_t p, p_im;
146 int ok = 0;
147
148 rnd_im = MPC_RND_IM (rnd);
149 mpfr_init (y);
150 mpfr_init (z);
151 p_im = mpfr_get_prec (mpc_imagref (rop));
152 p = p_im;
153
154 /* a = o(1/y) with error(a) < ulp(a), rounded away
155 b = o(atanh(a)) with error(b) < ulp(b) + 1/|a^2-1|*ulp(a),
156 since if a = 1/y + eps, then atanh(a) = atanh(1/y) + eps * atanh'(t)
157 with t in (1/y, a). Since a is rounded away, we have 1/y <= a <= 1
158 if y > 1, and -1 <= a <= 1/y if y < -1, thus |atanh'(t)| =
159 1/|t^2-1| <= 1/|a^2-1|.
160
161 We round atanh(1/y) away from 0.
162 */
163 do
164 {
165 mpfr_exp_t err, exp_a;
166
167 p += mpc_ceil_log2 (p) + 2;
168 mpfr_set_prec (y, p);
169 mpfr_set_prec (z, p);
170 inex_im = mpfr_ui_div (y, 1, mpc_imagref (op), MPFR_RNDA);
171 exp_a = mpfr_get_exp (y);
172 /* FIXME: should we consider the case with unreasonably huge
173 precision prec(y)>3*exp_min, where atanh(1/Im(op)) could be
174 representable while 1/Im(op) underflows ?
175 This corresponds to |y| = 0.5*2^emin, in which case the
176 result may be wrong. */
177
178 /* We would like to compute a rounded-up error bound 1/|a^2-1|,
179 so we need to round down |a^2-1|, which means rounding up
180 a^2 since |a|<1. */
181 mpfr_sqr (z, y, MPFR_RNDU);
182 /* since |y| > 1, we should have |a| <= 1, thus a^2 <= 1 */
183 MPC_ASSERT(mpfr_cmp_ui (z, 1) <= 0);
184 /* in case z=1, we should try again with more precision */
185 if (mpfr_cmp_ui (z, 1) == 0)
186 continue;
187 /* now z < 1 */
188 mpfr_ui_sub (z, 1, z, MPFR_RNDZ);
189
190 /* atanh cannot underflow: |atanh(x)| > |x| for |x| < 1 */
191 inex_im |= mpfr_atanh (y, y, MPFR_RNDA);
192
193 /* the error is now bounded by ulp(b) + 1/z*ulp(a), thus
194 ulp(b) + 2^(exp(a) - exp(b) + 1 - exp(z)) * ulp(b) */
195 err = exp_a - mpfr_get_exp (y) + 1 - mpfr_get_exp (z);
196 if (err >= 0) /* 1 + 2^err <= 2^(err+1) */
197 err = err + 1;
198 else
199 err = 1; /* 1 + 2^err <= 2^1 */
200
201 /* the error is bounded by 2^err ulps */
202
203 ok = inex_im == 0
204 || mpfr_can_round (y, p - err, MPFR_RNDA, MPFR_RNDZ,
205 p_im + (rnd_im == MPFR_RNDN));
206 } while (ok == 0);
207
208 inex_re = set_pi_over_2 (mpc_realref (rop), -s_re, MPC_RND_RE (rnd));
209 inex_im = mpfr_set (mpc_imagref (rop), y, rnd_im);
210 mpfr_clear (y);
211 mpfr_clear (z);
212 }
213 return MPC_INEX (inex_re, inex_im);
214 }
215
216 saved_emin = mpfr_get_emin ();
217 saved_emax = mpfr_get_emax ();
218 mpfr_set_emin (mpfr_get_emin_min ());
219 mpfr_set_emax (mpfr_get_emax_max ());
220
221 /* regular number argument */
222 {
223 mpfr_t a, b, x, y;
224 mpfr_prec_t prec, p;
225 mpfr_exp_t err, expo;
226 int ok = 0;
227 mpfr_t minus_op_re;
228 mpfr_exp_t op_re_exp, op_im_exp;
229 mpfr_rnd_t rnd1, rnd2;
230
231 mpfr_inits2 (MPFR_PREC_MIN, a, b, x, y, (mpfr_ptr) 0);
232
233 /* real part: Re(arctan(x+i*y)) = [arctan2(x,1-y) - arctan2(-x,1+y)]/2 */
234 minus_op_re[0] = mpc_realref (op)[0];
235 MPFR_CHANGE_SIGN (minus_op_re);
236 op_re_exp = mpfr_get_exp (mpc_realref (op));
237 op_im_exp = mpfr_get_exp (mpc_imagref (op));
238
239 prec = mpfr_get_prec (mpc_realref (rop)); /* result precision */
240
241 /* a = o(1-y) error(a) < 1 ulp(a)
242 b = o(atan2(x,a)) error(b) < [1+2^{3+Exp(x)-Exp(a)-Exp(b)}] ulp(b)
243 = kb ulp(b)
244 c = o(1+y) error(c) < 1 ulp(c)
245 d = o(atan2(-x,c)) error(d) < [1+2^{3+Exp(x)-Exp(c)-Exp(d)}] ulp(d)
246 = kd ulp(d)
247 e = o(b - d) error(e) < [1 + kb*2^{Exp(b}-Exp(e)}
248 + kd*2^{Exp(d)-Exp(e)}] ulp(e)
249 error(e) < [1 + 2^{4+Exp(x)-Exp(a)-Exp(e)}
250 + 2^{4+Exp(x)-Exp(c)-Exp(e)}] ulp(e)
251 because |atan(u)| < |u|
252 < [1 + 2^{5+Exp(x)-min(Exp(a),Exp(c))
253 -Exp(e)}] ulp(e)
254 f = e/2 exact
255 */
256
257 /* p: working precision */
258 p = (op_im_exp > 0 || prec > SAFE_ABS (mpfr_prec_t, op_im_exp)) ? prec
259 : (prec - op_im_exp);
260 rnd1 = mpfr_sgn (mpc_realref (op)) > 0 ? MPFR_RNDD : MPFR_RNDU;
261 rnd2 = mpfr_sgn (mpc_realref (op)) < 0 ? MPFR_RNDU : MPFR_RNDD;
262
263 do
264 {
265 p += mpc_ceil_log2 (p) + 2;
266 mpfr_set_prec (a, p);
267 mpfr_set_prec (b, p);
268 mpfr_set_prec (x, p);
269
270 /* x = upper bound for atan (x/(1-y)). Since atan is increasing, we
271 need an upper bound on x/(1-y), i.e., a lower bound on 1-y for
272 x positive, and an upper bound on 1-y for x negative */
273 mpfr_ui_sub (a, 1, mpc_imagref (op), rnd1);
274 if (mpfr_sgn (a) == 0) /* y is near 1, thus 1+y is near 2, and
275 expo will be 1 or 2 below */
276 {
277 MPC_ASSERT (mpfr_cmp_ui (mpc_imagref(op), 1) == 0);
278 /* check for intermediate underflow */
279 err = 2; /* ensures err will be expo below */
280 }
281 else
282 err = mpfr_get_exp (a); /* err = Exp(a) with the notations above */
283 mpfr_atan2 (x, mpc_realref (op), a, MPFR_RNDU);
284
285 /* b = lower bound for atan (-x/(1+y)): for x negative, we need a
286 lower bound on -x/(1+y), i.e., an upper bound on 1+y */
287 mpfr_add_ui (a, mpc_imagref(op), 1, rnd2);
288 /* if a is exactly zero, i.e., Im(op) = -1, then the error on a is 0,
289 and we can simply ignore the terms involving Exp(a) in the error */
290 if (mpfr_sgn (a) == 0)
291 {
292 MPC_ASSERT (mpfr_cmp_si (mpc_imagref(op), -1) == 0);
293 /* check for intermediate underflow */
294 expo = err; /* will leave err unchanged below */
295 }
296 else
297 expo = mpfr_get_exp (a); /* expo = Exp(c) with the notations above */
298 mpfr_atan2 (b, minus_op_re, a, MPFR_RNDD);
299
300 err = err < expo ? err : expo; /* err = min(Exp(a),Exp(c)) */
301 mpfr_sub (x, x, b, MPFR_RNDU);
302
303 err = 5 + op_re_exp - err - mpfr_get_exp (x);
304 /* error is bounded by [1 + 2^err] ulp(e) */
305 err = err < 0 ? 1 : err + 1;
306
307 mpfr_div_2ui (x, x, 1, MPFR_RNDU);
308
309 /* Note: using RND2=RNDD guarantees that if x is exactly representable
310 on prec + ... bits, mpfr_can_round will return 0 */
311 ok = mpfr_can_round (x, p - err, MPFR_RNDU, MPFR_RNDD,
312 prec + (MPC_RND_RE (rnd) == MPFR_RNDN));
313 } while (ok == 0);
314
315 /* Imaginary part
316 Im(atan(x+I*y)) = 1/4 * [log(x^2+(1+y)^2) - log (x^2 +(1-y)^2)] */
317 prec = mpfr_get_prec (mpc_imagref (rop)); /* result precision */
318
319 /* a = o(1+y) error(a) < 1 ulp(a)
320 b = o(a^2) error(b) < 5 ulp(b)
321 c = o(x^2) error(c) < 1 ulp(c)
322 d = o(b+c) error(d) < 7 ulp(d)
323 e = o(log(d)) error(e) < [1 + 7*2^{2-Exp(e)}] ulp(e) = ke ulp(e)
324 f = o(1-y) error(f) < 1 ulp(f)
325 g = o(f^2) error(g) < 5 ulp(g)
326 h = o(c+f) error(h) < 7 ulp(h)
327 i = o(log(h)) error(i) < [1 + 7*2^{2-Exp(i)}] ulp(i) = ki ulp(i)
328 j = o(e-i) error(j) < [1 + ke*2^{Exp(e)-Exp(j)}
329 + ki*2^{Exp(i)-Exp(j)}] ulp(j)
330 error(j) < [1 + 2^{Exp(e)-Exp(j)} + 2^{Exp(i)-Exp(j)}
331 + 7*2^{3-Exp(j)}] ulp(j)
332 < [1 + 2^{max(Exp(e),Exp(i))-Exp(j)+1}
333 + 7*2^{3-Exp(j)}] ulp(j)
334 k = j/4 exact
335 */
336 err = 2;
337 p = prec; /* working precision */
338
339 do
340 {
341 p += mpc_ceil_log2 (p) + err;
342 mpfr_set_prec (a, p);
343 mpfr_set_prec (b, p);
344 mpfr_set_prec (y, p);
345
346 /* a = upper bound for log(x^2 + (1+y)^2) */
347 mpfr_add_ui (a, mpc_imagref (op), 1, MPFR_RNDA);
348 mpfr_sqr (a, a, MPFR_RNDU);
349 mpfr_sqr (y, mpc_realref (op), MPFR_RNDU);
350 mpfr_add (a, a, y, MPFR_RNDU);
351 mpfr_log (a, a, MPFR_RNDU);
352
353 /* b = lower bound for log(x^2 + (1-y)^2) */
354 mpfr_ui_sub (b, 1, mpc_imagref (op), MPFR_RNDZ); /* round to zero */
355 mpfr_sqr (b, b, MPFR_RNDZ);
356 /* we could write mpfr_sqr (y, mpc_realref (op), MPFR_RNDZ) but it is
357 more efficient to reuse the value of y (x^2) above and subtract
358 one ulp */
359 mpfr_nextbelow (y);
360 mpfr_add (b, b, y, MPFR_RNDZ);
361 mpfr_log (b, b, MPFR_RNDZ);
362
363 mpfr_sub (y, a, b, MPFR_RNDU);
364
365 if (mpfr_zero_p (y))
366 /* FIXME: happens when x and y have very different magnitudes;
367 could be handled more efficiently */
368 ok = 0;
369 else
370 {
371 expo = MPC_MAX (mpfr_get_exp (a), mpfr_get_exp (b));
372 expo = expo - mpfr_get_exp (y) + 1;
373 err = 3 - mpfr_get_exp (y);
374 /* error(j) <= [1 + 2^expo + 7*2^err] ulp(j) */
375 if (expo <= err) /* error(j) <= [1 + 2^{err+1}] ulp(j) */
376 err = (err < 0) ? 1 : err + 2;
377 else
378 err = (expo < 0) ? 1 : expo + 2;
379
380 mpfr_div_2ui (y, y, 2, MPFR_RNDN);
381 MPC_ASSERT (!mpfr_zero_p (y));
382 /* FIXME: underflow. Since the main term of the Taylor series
383 in y=0 is 1/(x^2+1) * y, this means that y is very small
384 and/or x very large; but then the mpfr_zero_p (y) above
385 should be true. This needs a proof, or better yet,
386 special code. */
387
388 ok = mpfr_can_round (y, p - err, MPFR_RNDU, MPFR_RNDD,
389 prec + (MPC_RND_IM (rnd) == MPFR_RNDN));
390 }
391 } while (ok == 0);
392
393 inex = mpc_set_fr_fr (rop, x, y, rnd);
394
395 mpfr_clears (a, b, x, y, (mpfr_ptr) 0);
396
397 /* restore the exponent range, and check the range of results */
398 mpfr_set_emin (saved_emin);
399 mpfr_set_emax (saved_emax);
400 inex_re = mpfr_check_range (mpc_realref (rop), MPC_INEX_RE (inex),
401 MPC_RND_RE (rnd));
402 inex_im = mpfr_check_range (mpc_imagref (rop), MPC_INEX_IM (inex),
403 MPC_RND_IM (rnd));
404
405 return MPC_INEX (inex_re, inex_im);
406 }
407 }
408