xref: /netbsd-src/external/lgpl3/mpc/dist/src/radius.c (revision 367b82799ab709709d3c3b541df56a2a14644d3e)
1 /* radius -- Functions for radii of complex balls.
2 
3 Copyright (C) 2022 INRIA
4 
5 This file is part of GNU MPC.
6 
7 GNU MPC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU Lesser General Public License as published by the
9 Free Software Foundation; either version 3 of the License, or (at your
10 option) any later version.
11 
12 GNU MPC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
15 more details.
16 
17 You should have received a copy of the GNU Lesser General Public License
18 along with this program. If not, see http://www.gnu.org/licenses/ .
19 */
20 
21 #include <math.h>
22 #include <inttypes.h> /* for the PRIi64 format modifier */
23 #include <stdio.h>    /* for FILE */
24 #include "mpc-impl.h"
25 
26 #define MPCR_MANT(r) ((r)->mant)
27 #define MPCR_EXP(r) ((r)->exp)
28 #define MPCR_MANT_MIN (((int64_t) 1) << 30)
29 #define MPCR_MANT_MAX (MPCR_MANT_MIN << 1)
30 
31 /* The radius can take three types of values, represented as follows:
32    infinite: the mantissa is -1 and the exponent is undefined;
33    0: the mantissa and the exponent are 0;
34    positive: the mantissa is a positive integer, and the radius is
35    mantissa*2^exponent. A normalised positive radius "has 31 bits",
36    in the sense that the bits 0 to 29 are arbitrary, bit 30 is 1,
37    and bits 31 to 63 are 0; otherwise said, the mantissa lies between
38    2^30 and 2^31-1.
39    Unless stated otherwise, all functions take normalised inputs and
40    produce normalised output; they compute only upper bounds on the radii,
41    without guaranteeing that these are tight. */
42 
43 
mpcr_add_one_ulp(mpcr_ptr r)44 static void mpcr_add_one_ulp (mpcr_ptr r)
45    /* Add 1 to the mantissa of the normalised r and renormalise. */
46 {
47    MPCR_MANT (r)++;
48    if (MPCR_MANT (r) == MPCR_MANT_MAX) {
49       MPCR_MANT (r) >>= 1;
50       MPCR_EXP (r)++;
51    }
52 }
53 
54 
leading_bit(int64_t n)55 static unsigned int leading_bit (int64_t n)
56    /* Assuming that n is a positive integer, return the position
57       (from 0 to 62) of the leading bit, that is, the k such that
58       n >= 2^k, but n < 2^(k+1). */
59 {
60    unsigned int k;
61 
62    if (n & (((uint64_t) 0x7fffffff) << 32))
63       if (n & (((uint64_t) 0x7fff) << 48))
64          if (n & (((uint64_t) 0x7f) << 56))
65             if (n & (((uint64_t) 0x7) << 60))
66                if (n & (((uint64_t) 0x1) << 62))
67                   k = 62;
68                else
69                   if (n & (((uint64_t) 0x1) << 61))
70                      k = 61;
71                   else
72                      k = 60;
73             else
74                if (n & (((uint64_t) 0x3) << 58))
75                   if (n & (((uint64_t) 0x1) << 59))
76                      k = 59;
77                   else
78                      k = 58;
79                else
80                   if (n & (((uint64_t) 0x1) << 57))
81                      k = 57;
82                   else
83                      k = 56;
84          else
85             if (n & (((uint64_t) 0xf) << 52))
86                if (n & (((uint64_t) 0x3) << 54))
87                   if (n & (((uint64_t) 0x1) << 55))
88                      k = 55;
89                   else
90                      k = 54;
91                else
92                   if (n & (((uint64_t) 0x1) << 53))
93                      k = 53;
94                   else
95                      k = 52;
96             else
97                if (n & (((uint64_t) 0x3) << 50))
98                   if (n & (((uint64_t) 0x1) << 51))
99                      k = 51;
100                   else
101                      k = 50;
102                else
103                   if (n & (((uint64_t) 0x1) << 49))
104                      k = 49;
105                   else
106                      k = 48;
107       else
108          if (n & (((uint64_t) 0xff) << 40))
109             if (n & (((uint64_t) 0xf) << 44))
110                if (n & (((uint64_t) 0x3) << 46))
111                   if (n & (((uint64_t) 0x1) << 47))
112                      k = 47;
113                   else
114                      k = 46;
115                else
116                   if (n & (((uint64_t) 0x1) << 45))
117                      k = 45;
118                   else
119                      k = 44;
120             else
121                if (n & (((uint64_t) 0x3) << 42))
122                   if (n & (((uint64_t) 0x1) << 43))
123                      k = 43;
124                   else
125                      k = 42;
126                else
127                   if (n & (((uint64_t) 0x1) << 41))
128                      k = 41;
129                   else
130                      k = 40;
131          else
132             if (n & (((uint64_t) 0xf) << 36))
133                if (n & (((uint64_t) 0x3) << 38))
134                   if (n & (((uint64_t) 0x1) << 39))
135                      k = 39;
136                   else
137                      k = 38;
138                else
139                   if (n & (((uint64_t) 0x1) << 37))
140                      k = 37;
141                   else
142                      k = 36;
143             else
144                if (n & (((uint64_t) 0x3) << 34))
145                   if (n & (((uint64_t) 0x1) << 35))
146                      k = 35;
147                   else
148                      k = 34;
149                else
150                   if (n & (((uint64_t) 0x1) << 33))
151                      k = 33;
152                   else
153                      k = 32;
154    else
155       if (n & (((uint64_t) 0xffff) << 16))
156          if (n & (((uint64_t) 0xff) << 24))
157             if (n & (((uint64_t) 0xf) << 28))
158                if (n & (((uint64_t) 0x3) << 30))
159                   if (n & (((uint64_t) 0x1) << 31))
160                      k = 31;
161                   else
162                      k = 30;
163                else
164                   if (n & (((uint64_t) 0x1) << 29))
165                      k = 29;
166                   else
167                      k = 28;
168             else
169                if (n & (((uint64_t) 0x3) << 26))
170                   if (n & (((uint64_t) 0x1) << 27))
171                      k = 27;
172                   else
173                      k = 26;
174                else
175                   if (n & (((uint64_t) 0x1) << 25))
176                      k = 25;
177                   else
178                      k = 24;
179          else
180             if (n & (((uint64_t) 0xf) << 20))
181                if (n & (((uint64_t) 0x3) << 22))
182                   if (n & (((uint64_t) 0x1) << 23))
183                      k = 23;
184                   else
185                      k = 22;
186                else
187                   if (n & (((uint64_t) 0x1) << 21))
188                      k = 21;
189                   else
190                      k = 20;
191             else
192                if (n & (((uint64_t) 0x3) << 18))
193                   if (n & (((uint64_t) 0x1) << 19))
194                      k = 19;
195                   else
196                      k = 18;
197                else
198                   if (n & (((uint64_t) 0x1) << 17))
199                      k = 17;
200                   else
201                      k = 16;
202       else
203          if (n & (((uint64_t) 0xff) << 8))
204             if (n & (((uint64_t) 0xf) << 12))
205                if (n & (((uint64_t) 0x3) << 14))
206                   if (n & (((uint64_t) 0x1) << 15))
207                      k = 15;
208                   else
209                      k = 14;
210                else
211                   if (n & (((uint64_t) 0x1) << 13))
212                      k = 13;
213                   else
214                      k = 12;
215             else
216                if (n & (((uint64_t) 0x3) << 10))
217                   if (n & (((uint64_t) 0x1) << 11))
218                      k = 11;
219                   else
220                      k = 10;
221                else
222                   if (n & (((uint64_t) 0x1) << 9))
223                      k = 9;
224                   else
225                      k = 8;
226          else
227             if (n & (((uint64_t) 0xf) << 4))
228                if (n & (((uint64_t) 0x3) << 6))
229                   if (n & (((uint64_t) 0x1) << 7))
230                      k = 7;
231                   else
232                      k = 6;
233                else
234                   if (n & (((uint64_t) 0x1) << 5))
235                      k = 5;
236                   else
237                      k = 4;
238             else
239                if (n & (((uint64_t) 0x3) << 2))
240                   if (n & (((uint64_t) 0x1) << 3))
241                      k = 3;
242                   else
243                      k = 2;
244                else
245                   if (n & (((uint64_t) 0x1) << 1))
246                      k = 1;
247                   else
248                      k = 0;
249 
250    return k;
251 }
252 
253 
mpcr_normalise_rnd(mpcr_ptr r,mpfr_rnd_t rnd)254 static void mpcr_normalise_rnd (mpcr_ptr r, mpfr_rnd_t rnd)
255    /* The function computes a normalised value for the potentially
256       unnormalised r; depending on whether rnd is MPFR_RNDU or MPFR_RNDD,
257       the result is rounded up or down. For efficiency reasons, rounding
258       up does not take exact cases into account and adds one ulp anyway. */
259 {
260    unsigned int k;
261 
262    if (mpcr_zero_p (r))
263       MPCR_EXP (r) = 0;
264    else if (!mpcr_inf_p (r)) {
265       k = leading_bit (MPCR_MANT (r));
266       if (k <= 30) {
267          MPCR_MANT (r) <<= 30 - k;
268          MPCR_EXP (r) -= 30 - k;
269       }
270       else {
271          MPCR_MANT (r) >>= k - 30;
272          MPCR_EXP (r) += k - 30;
273          if (rnd == MPFR_RNDU)
274             mpcr_add_one_ulp (r);
275       }
276    }
277 }
278 
279 
mpcr_normalise(mpcr_ptr r)280 static void mpcr_normalise (mpcr_ptr r)
281    /* The potentially unnormalised r is normalised with rounding up. */
282 {
283    mpcr_normalise_rnd (r, MPFR_RNDU);
284 }
285 
286 
mpcr_inf_p(mpcr_srcptr r)287 int mpcr_inf_p (mpcr_srcptr r)
288 {
289    return MPCR_MANT (r) == -1;
290 }
291 
292 
mpcr_zero_p(mpcr_srcptr r)293 int mpcr_zero_p (mpcr_srcptr r)
294 {
295    return MPCR_MANT (r) == 0;
296 }
297 
298 
mpcr_lt_half_p(mpcr_srcptr r)299 int mpcr_lt_half_p (mpcr_srcptr r)
300    /* Return true if r < 1/2, false otherwise. */
301 {
302    return mpcr_zero_p (r) || MPCR_EXP (r) < -31;
303 }
304 
305 
mpcr_cmp(mpcr_srcptr r,mpcr_srcptr s)306 int mpcr_cmp (mpcr_srcptr r, mpcr_srcptr s)
307 {
308    if (mpcr_inf_p (r))
309       if (mpcr_inf_p (s))
310          return 0;
311       else
312          return +1;
313    else if (mpcr_inf_p (s))
314       return -1;
315    else if (mpcr_zero_p (r))
316       if (mpcr_zero_p (s))
317          return 0;
318       else
319          return -1;
320    else if (mpcr_zero_p (s))
321       return +1;
322    else if (MPCR_EXP (r) > MPCR_EXP (s))
323       return +1;
324    else if (MPCR_EXP (r) < MPCR_EXP (s))
325       return -1;
326    else if (MPCR_MANT (r) > MPCR_MANT (s))
327       return +1;
328    else if (MPCR_MANT (r) < MPCR_MANT (s))
329       return -1;
330    else
331       return 0;
332 }
333 
334 
mpcr_set_inf(mpcr_ptr r)335 void mpcr_set_inf (mpcr_ptr r)
336 {
337    MPCR_MANT (r) = -1;
338 }
339 
340 
mpcr_set_zero(mpcr_ptr r)341 void mpcr_set_zero (mpcr_ptr r)
342 {
343    MPCR_MANT (r) = 0;
344    MPCR_EXP (r) = 0;
345 }
346 
347 
mpcr_set_one(mpcr_ptr r)348 void mpcr_set_one (mpcr_ptr r)
349 {
350    MPCR_MANT (r) = ((int64_t) 1) << 30;
351    MPCR_EXP (r) = -30;
352 }
353 
354 
mpcr_set(mpcr_ptr r,mpcr_srcptr s)355 void mpcr_set (mpcr_ptr r, mpcr_srcptr s)
356 {
357    r [0] = s [0];
358 }
359 
360 
mpcr_set_ui64_2si64(mpcr_ptr r,uint64_t mant,int64_t exp)361 void mpcr_set_ui64_2si64 (mpcr_ptr r, uint64_t mant, int64_t exp)
362    /* Set r to mant*2^exp, rounded up. */
363 {
364    if (mant == 0)
365       mpcr_set_zero (r);
366    else {
367       if (mant >= ((uint64_t) 1) << 63) {
368          if (mant % 2 == 0)
369             mant = mant / 2;
370          else
371             mant = mant / 2 + 1;
372          exp++;
373       }
374       MPCR_MANT (r) = (int64_t) mant;
375       MPCR_EXP (r) = exp;
376       mpcr_normalise (r);
377    }
378 }
379 
380 
mpcr_max(mpcr_ptr r,mpcr_srcptr s,mpcr_srcptr t)381 void mpcr_max (mpcr_ptr r, mpcr_srcptr s, mpcr_srcptr t)
382    /* Set r to the maximum of s and t. */
383 {
384    if (mpcr_inf_p (s) || mpcr_inf_p (t))
385       mpcr_set_inf (r);
386    else if (mpcr_zero_p (s))
387       mpcr_set (r, t);
388    else if (mpcr_zero_p (t))
389       mpcr_set (r, s);
390    else if (MPCR_EXP (s) < MPCR_EXP (t))
391       mpcr_set (r, t);
392    else if (MPCR_EXP (s) > MPCR_EXP (t))
393       mpcr_set (r, s);
394    else if (MPCR_MANT (s) < MPCR_MANT (t))
395       mpcr_set (r, t);
396    else
397       mpcr_set (r, s);
398 }
399 
400 
mpcr_get_exp(mpcr_srcptr r)401 int64_t mpcr_get_exp (mpcr_srcptr r)
402    /* Return the exponent e such that r = m * 2^e with m such that
403       0.5 <= m < 1. */
404 {
405    return MPCR_EXP (r) + 31;
406 }
407 
mpcr_out_str(FILE * f,mpcr_srcptr r)408 void mpcr_out_str (FILE *f, mpcr_srcptr r)
409 {
410    if (mpcr_inf_p (r))
411       fprintf (f, "∞");
412    else if (mpcr_zero_p (r))
413       fprintf (f, "0");
414    else {
415       fprintf (f, "±(%" PRIi64 ", %" PRIi64 ")", MPCR_MANT (r), MPCR_EXP (r));
416    }
417 }
418 
mpcr_mul_rnd(mpcr_ptr r,mpcr_srcptr s,mpcr_srcptr t,mpfr_rnd_t rnd)419 static void mpcr_mul_rnd (mpcr_ptr r, mpcr_srcptr s, mpcr_srcptr t,
420     mpfr_rnd_t rnd)
421     /* Set r to the product of s and t, rounded according to whether rnd
422        is MPFR_RNDU or MPFR_RNDD. */
423 {
424    if (mpcr_inf_p (s) || mpcr_inf_p (t))
425       mpcr_set_inf (r);
426    else if (mpcr_zero_p (s) || mpcr_zero_p (t))
427       mpcr_set_zero (r);
428    else {
429       MPCR_MANT (r) = MPCR_MANT (s) * MPCR_MANT (t);
430       MPCR_EXP (r) = MPCR_EXP (s) + MPCR_EXP (t);
431       mpcr_normalise_rnd (r, rnd);
432    }
433 }
434 
435 
mpcr_mul(mpcr_ptr r,mpcr_srcptr s,mpcr_srcptr t)436 void mpcr_mul (mpcr_ptr r, mpcr_srcptr s, mpcr_srcptr t)
437 {
438    mpcr_mul_rnd (r, s, t, MPFR_RNDU);
439 }
440 
441 
mpcr_mul_2ui(mpcr_ptr r,mpcr_srcptr s,unsigned long int e)442 void mpcr_mul_2ui (mpcr_ptr r, mpcr_srcptr s, unsigned long int e)
443 {
444    if (mpcr_inf_p (s))
445       mpcr_set_inf (r);
446    else if (mpcr_zero_p (s))
447       mpcr_set_zero (r);
448    else {
449       MPCR_MANT (r) = MPCR_MANT (s);
450       MPCR_EXP (r) = MPCR_EXP (s) + (int64_t) e;
451    }
452 }
453 
454 
mpcr_sqr(mpcr_ptr r,mpcr_srcptr s)455 void mpcr_sqr (mpcr_ptr r, mpcr_srcptr s)
456 {
457    mpcr_mul_rnd (r, s, s, MPFR_RNDU);
458 }
459 
460 
mpcr_add_rnd(mpcr_ptr r,mpcr_srcptr s,mpcr_srcptr t,mpfr_rnd_t rnd)461 static void mpcr_add_rnd (mpcr_ptr r, mpcr_srcptr s, mpcr_srcptr t,
462    mpfr_rnd_t rnd)
463     /* Set r to the sum of s and t, rounded according to whether rnd
464        is MPFR_RNDU or MPFR_RNDD.
465        The function also works correctly for certain non-normalised
466        arguments s and t as long as the sum of their (potentially shifted
467        if the exponents are not the same) mantissae does not flow over into
468        the sign bit of the resulting mantissa. This is in particular the
469        case when the mantissae of s and t start with the bits 00, that is,
470        are less than 2^62, for instance because they are the results of
471        multiplying two normalised mantissae together, so that an fmma
472        function can be implemented without intermediate normalisation of
473        the products. */
474 {
475    int64_t d;
476 
477    if (mpcr_inf_p (s) || mpcr_inf_p (t))
478       mpcr_set_inf (r);
479    else if (mpcr_zero_p (s))
480       mpcr_set (r, t);
481    else if (mpcr_zero_p (t))
482       mpcr_set (r, s);
483    else {
484       /* Now all numbers are finite and non-zero. */
485       d = MPCR_EXP (s) - MPCR_EXP (t);
486       if (d >= 0) {
487          if (d >= 64)
488             /* Shifting by more than the bitlength of the type may cause
489                compiler warnings and run time errors. */
490             MPCR_MANT (r) = MPCR_MANT (s);
491          else
492             MPCR_MANT (r) = MPCR_MANT (s) + (MPCR_MANT (t) >> d);
493          MPCR_EXP (r) = MPCR_EXP (s);
494       }
495       else {
496          if (d <= -64)
497             MPCR_MANT (r) = MPCR_MANT (t);
498          else
499             MPCR_MANT (r) = MPCR_MANT (t) + (MPCR_MANT (s) >> (-d));
500          MPCR_EXP (r) = MPCR_EXP (t);
501       }
502       if (rnd == MPFR_RNDU)
503          MPCR_MANT (r)++;
504       mpcr_normalise_rnd (r, rnd);
505    }
506 }
507 
508 
mpcr_add(mpcr_ptr r,mpcr_srcptr s,mpcr_srcptr t)509 void mpcr_add (mpcr_ptr r, mpcr_srcptr s, mpcr_srcptr t)
510 {
511    mpcr_add_rnd (r, s, t, MPFR_RNDU);
512 }
513 
514 
mpcr_sub_rnd(mpcr_ptr r,mpcr_srcptr s,mpcr_srcptr t,mpfr_rnd_t rnd)515 void mpcr_sub_rnd (mpcr_ptr r, mpcr_srcptr s, mpcr_srcptr t, mpfr_rnd_t rnd)
516    /* Set r to s - t, rounded according to whether rnd is MPFR_RNDU or
517        MPFR_RNDD; if the result were negative, it is set to infinity. */
518 {
519    int64_t d;
520    int cmp;
521 
522    cmp = mpcr_cmp (s, t);
523    if (mpcr_inf_p (s) || mpcr_inf_p (t) || cmp < 0)
524       mpcr_set_inf (r);
525    else if (cmp == 0)
526       mpcr_set_zero (r);
527    else if (mpcr_zero_p (t))
528       mpcr_set (r, s);
529    else {
530       /* Now all numbers are positive and normalised, and s > t. */
531       d = MPCR_EXP (s) - MPCR_EXP (t);
532       if (d >= 64)
533          MPCR_MANT (r) = MPCR_MANT (s);
534       else
535          MPCR_MANT (r) = MPCR_MANT (s) - (MPCR_MANT (t) >> d);
536       MPCR_EXP (r) = MPCR_EXP (s);
537       if (rnd == MPFR_RNDD)
538          MPCR_MANT (r)--;
539       mpcr_normalise_rnd (r, rnd);
540    }
541 }
542 
543 
mpcr_sub(mpcr_ptr r,mpcr_srcptr s,mpcr_srcptr t)544 void mpcr_sub (mpcr_ptr r, mpcr_srcptr s, mpcr_srcptr t)
545 {
546    mpcr_sub_rnd (r, s, t, MPFR_RNDU);
547 }
548 
549 
mpcr_div(mpcr_ptr r,mpcr_srcptr s,mpcr_srcptr t)550 void mpcr_div (mpcr_ptr r, mpcr_srcptr s, mpcr_srcptr t)
551 {
552    if (mpcr_inf_p (s) || mpcr_inf_p (t) || mpcr_zero_p (t))
553       mpcr_set_inf (r);
554    else if (mpcr_zero_p (s))
555       mpcr_set_zero (r);
556    else {
557       MPCR_MANT (r) = (MPCR_MANT (s) << 32) / MPCR_MANT (t) + 1;
558       MPCR_EXP (r) = MPCR_EXP (s) - 32 - MPCR_EXP (t);
559       mpcr_normalise (r);
560    }
561 }
562 
563 
mpcr_div_2ui(mpcr_ptr r,mpcr_srcptr s,unsigned long int e)564 void mpcr_div_2ui (mpcr_ptr r, mpcr_srcptr s, unsigned long int e)
565 {
566    if (mpcr_inf_p (s))
567       mpcr_set_inf (r);
568    else if (mpcr_zero_p (s))
569       mpcr_set_zero (r);
570    else {
571       MPCR_MANT (r) = MPCR_MANT (s);
572       MPCR_EXP (r) = MPCR_EXP (s) - (int64_t) e;
573    }
574 }
575 
576 
sqrt_int64(int64_t n)577 int64_t sqrt_int64 (int64_t n)
578    /* Assuming that 2^30 <= n < 2^32, return ceil (sqrt (n*2^30)). */
579 {
580    uint64_t N, s, t;
581    int i;
582 
583    /* We use the "Babylonian" method to compute an integer square root of N;
584       replacing the geometric mean sqrt (N) = sqrt (s * N/s) by the
585       arithmetic mean (s + N/s) / 2, rounded up, we obtain an upper bound
586       on the square root. */
587    N = ((uint64_t) n) << 30;
588    s = ((uint64_t) 1) << 31;
589    for (i = 0; i < 5; i++) {
590       t = s << 1;
591       s = (s * s + N + t - 1) / t;
592    }
593 
594    /* Exhaustive search over all possible values of n shows that with
595       6 or more iterations, the method computes ceil (sqrt (N)) except
596       for squares N, where it stabilises on sqrt (N) + 1.
597       So we need to add a check for s-1; it turns out that then
598       5 iterations are enough. */
599    if ((s - 1) * (s - 1) >= N)
600       return s - 1;
601    else
602       return s;
603 }
604 
605 
mpcr_sqrt_rnd(mpcr_ptr r,mpcr_srcptr s,mpfr_rnd_t rnd)606 static void mpcr_sqrt_rnd (mpcr_ptr r, mpcr_srcptr s, mpfr_rnd_t rnd)
607     /* Set r to the square root of s, rounded according to whether rnd is
608        MPFR_RNDU or MPFR_RNDD. */
609 {
610    if (mpcr_inf_p (s))
611       mpcr_set_inf (r);
612    else if (mpcr_zero_p (s))
613       mpcr_set_zero (r);
614    else {
615       if (MPCR_EXP (s) % 2 == 0) {
616          MPCR_MANT (r) = sqrt_int64 (MPCR_MANT (s));
617          MPCR_EXP (r) = MPCR_EXP (s) / 2 - 15;
618       }
619       else {
620          MPCR_MANT (r) = sqrt_int64 (2 * MPCR_MANT (s));
621          MPCR_EXP (r) = (MPCR_EXP (s) - 1) / 2 - 15;
622       }
623       /* Now we have 2^30 <= r < 2^31, so r is normalised;
624          if r == 2^30, then furthermore the square root was exact,
625          so we do not need to subtract 1 ulp when rounding down and
626          preserve normalisation. */
627       if (rnd == MPFR_RNDD && MPCR_MANT (r) != ((int64_t) 1) << 30)
628          MPCR_MANT (r)--;
629    }
630 }
631 
632 
mpcr_sqrt(mpcr_ptr r,mpcr_srcptr s)633 void mpcr_sqrt (mpcr_ptr r, mpcr_srcptr s)
634 {
635    mpcr_sqrt_rnd (r, s, MPFR_RNDU);
636 }
637 
638 
mpcr_set_d_rnd(mpcr_ptr r,double d,mpfr_rnd_t rnd)639 static void mpcr_set_d_rnd (mpcr_ptr r, double d, mpfr_rnd_t rnd)
640    /* Assuming that d is a positive double, set r to d rounded according
641       to rnd, which can be one of MPFR_RNDU or MPFR_RNDD. */
642 {
643    double frac;
644    int e;
645 
646    frac = frexp (d, &e);
647    MPCR_MANT (r) = (int64_t) (frac * (((int64_t) 1) << 53));
648    MPCR_EXP (r) = e - 53;
649    mpcr_normalise_rnd (r, rnd);
650 }
651 
652 
mpcr_f_abs_rnd(mpcr_ptr r,mpfr_srcptr z,mpfr_rnd_t rnd)653 static void mpcr_f_abs_rnd (mpcr_ptr r, mpfr_srcptr z, mpfr_rnd_t rnd)
654    /* Set r to the absolute value of z, rounded according to rnd, which
655       can be one of MPFR_RNDU or MPFR_RNDD. */
656 {
657    double d;
658    int neg;
659 
660    neg = mpfr_cmp_ui (z, 0);
661    if (neg == 0)
662       mpcr_set_zero (r);
663    else {
664       if (rnd == MPFR_RNDU)
665          d = mpfr_get_d (z, MPFR_RNDA);
666       else
667          d = mpfr_get_d (z, MPFR_RNDZ);
668       if (d < 0)
669          d = -d;
670       mpcr_set_d_rnd (r, d, rnd);
671    }
672 }
673 
674 
mpcr_add_rounding_error(mpcr_ptr r,mpfr_prec_t p,mpfr_rnd_t rnd)675 void mpcr_add_rounding_error (mpcr_ptr r, mpfr_prec_t p, mpfr_rnd_t rnd)
676    /* Replace r, radius of a complex ball, by the new radius obtained after
677       rounding both parts of the centre of the ball in direction rnd at
678       precision t.
679       Otherwise said:
680       r += ldexp (1 + r, -p) for rounding to nearest, adding 0.5ulp;
681       r += ldexp (1 + r, 1-p) for directed rounding, adding 1ulp.
682    */
683 {
684    mpcr_t s;
685 
686    mpcr_set_one (s);
687    mpcr_add (s, s, r);
688    if (rnd == MPFR_RNDN)
689       mpcr_div_2ui (s, s, p);
690    else
691       mpcr_div_2ui (s, s, p-1);
692    mpcr_add (r, r, s);
693 }
694 
695 
mpcr_c_abs_rnd(mpcr_ptr r,mpc_srcptr z,mpfr_rnd_t rnd)696 void mpcr_c_abs_rnd (mpcr_ptr r, mpc_srcptr z, mpfr_rnd_t rnd)
697     /* Compute a bound on mpc_abs (z) in r.
698        rnd can take either of the values MPFR_RNDU and MPFR_RNDD, and
699        the function computes an upper or a lower bound, respectively. */
700 {
701    mpcr_t re, im, u;
702 
703    mpcr_f_abs_rnd (re, mpc_realref (z), rnd);
704    mpcr_f_abs_rnd (im, mpc_imagref (z), rnd);
705 
706    if (mpcr_zero_p (re))
707       mpcr_set (r, im);
708    else if (mpcr_zero_p (im))
709       mpcr_set (r, re);
710    else {
711       /* Squarings can be done exactly. */
712       MPCR_MANT (u) = MPCR_MANT (re) * MPCR_MANT (re);
713       MPCR_EXP (u) = 2 * MPCR_EXP (re);
714       MPCR_MANT (r) = MPCR_MANT (im) * MPCR_MANT (im);
715       MPCR_EXP (r) = 2 * MPCR_EXP (im);
716       /* Additions still fit. */
717       mpcr_add_rnd (r, r, u, rnd);
718       mpcr_sqrt_rnd (r, r, rnd);
719    }
720 }
721 
722