xref: /netbsd-src/external/lgpl3/mpfr/dist/src/mpfr-gmp.h (revision ec6772edaf0cdcb5f52a48f4aca5e33a8fb8ecfd)
1 /* Uniform Interface to GMP.
2 
3 Copyright 2004-2023 Free Software Foundation, Inc.
4 Contributed by the AriC and Caramba projects, INRIA.
5 
6 This file is part of the GNU MPFR Library.
7 
8 The GNU MPFR Library is free software; you can redistribute it and/or modify
9 it under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or (at your
11 option) any later version.
12 
13 The GNU MPFR Library is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
16 License for more details.
17 
18 You should have received a copy of the GNU Lesser General Public License
19 along with the GNU MPFR Library; see the file COPYING.LESSER.  If not, see
20 https://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
21 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
22 
23 #ifndef __GMPFR_GMP_H__
24 #define __GMPFR_GMP_H__
25 
26 #ifndef __MPFR_IMPL_H__
27 # error  "mpfr-impl.h not included"
28 #endif
29 
30 
31 /******************************************************
32  ******************** C++ Compatibility ***************
33  ******************************************************/
34 #if defined (__cplusplus)
35 extern "C" {
36 #endif
37 
38 
39 /******************************************************
40  ******************** Identify GMP ********************
41  ******************************************************/
42 
43 /* Macro to detect the GMP version */
44 #if defined(__GNU_MP_VERSION) && \
45     defined(__GNU_MP_VERSION_MINOR) && \
46     defined(__GNU_MP_VERSION_PATCHLEVEL)
47 # define __MPFR_GMP(a,b,c) \
48   (MPFR_VERSION_NUM(__GNU_MP_VERSION,__GNU_MP_VERSION_MINOR,__GNU_MP_VERSION_PATCHLEVEL) >= MPFR_VERSION_NUM(a,b,c))
49 #else
50 # define __MPFR_GMP(a,b,c) 0
51 #endif
52 
53 
54 
55 /******************************************************
56  ******************** Check GMP ***********************
57  ******************************************************/
58 
59 #if !__MPFR_GMP(5,0,0) && !defined(MPFR_USE_MINI_GMP)
60 # error "GMP 5.0.0 or newer is required"
61 #endif
62 
63 #if GMP_NAIL_BITS != 0
64 # error "MPFR doesn't support non-zero values of GMP_NAIL_BITS"
65 #endif
66 
67 #if (GMP_NUMB_BITS<8) || (GMP_NUMB_BITS & (GMP_NUMB_BITS - 1))
68 # error "GMP_NUMB_BITS must be a power of 2, and >= 8"
69 #endif
70 
71 #if GMP_NUMB_BITS == 8
72 # define MPFR_LOG2_GMP_NUMB_BITS 3
73 #elif GMP_NUMB_BITS == 16
74 # define MPFR_LOG2_GMP_NUMB_BITS 4
75 #elif GMP_NUMB_BITS == 32
76 # define MPFR_LOG2_GMP_NUMB_BITS 5
77 #elif GMP_NUMB_BITS == 64
78 # define MPFR_LOG2_GMP_NUMB_BITS 6
79 #elif GMP_NUMB_BITS == 128
80 # define MPFR_LOG2_GMP_NUMB_BITS 7
81 #elif GMP_NUMB_BITS == 256
82 # define MPFR_LOG2_GMP_NUMB_BITS 8
83 #else
84 # error "Can't compute log2(GMP_NUMB_BITS)"
85 #endif
86 
87 
88 
89 /******************************************************
90  ************* Define GMP Internal Interface  *********
91  ******************************************************/
92 
93 #ifdef MPFR_HAVE_GMP_IMPL  /* with gmp build */
94 
95 #define mpfr_allocate_func   (*__gmp_allocate_func)
96 #define mpfr_reallocate_func (*__gmp_reallocate_func)
97 #define mpfr_free_func       (*__gmp_free_func)
98 
99 #else  /* without gmp build (gmp-impl.h replacement) */
100 
101 /* Define some macros */
102 
103 #define ULONG_HIGHBIT (ULONG_MAX ^ ((unsigned long) ULONG_MAX >> 1))
104 #define UINT_HIGHBIT  (UINT_MAX ^ ((unsigned) UINT_MAX >> 1))
105 #define USHRT_HIGHBIT ((unsigned short) (USHRT_MAX ^ ((unsigned short) USHRT_MAX >> 1)))
106 
107 
108 #if __GMP_MP_SIZE_T_INT
109 #define MP_SIZE_T_MAX      INT_MAX
110 #define MP_SIZE_T_MIN      INT_MIN
111 #else
112 #define MP_SIZE_T_MAX      LONG_MAX
113 #define MP_SIZE_T_MIN      LONG_MIN
114 #endif
115 
116 /* MP_LIMB macros.
117    Note: GMP now also has the MPN_FILL macro, and GMP's MPN_ZERO(dst,n) is
118    defined as "if (n) MPN_FILL(dst, n, 0);". */
119 #define MPN_ZERO(dst, n) memset((dst), 0, (n)*MPFR_BYTES_PER_MP_LIMB)
120 #define MPN_COPY(dst,src,n) \
121   do                                                                  \
122     {                                                                 \
123       if ((dst) != (src))                                             \
124         {                                                             \
125           MPFR_ASSERTD ((char *) (dst) >= (char *) (src) +            \
126                                      (n) * MPFR_BYTES_PER_MP_LIMB ||  \
127                         (char *) (src) >= (char *) (dst) +            \
128                                      (n) * MPFR_BYTES_PER_MP_LIMB);   \
129           memcpy ((dst), (src), (n) * MPFR_BYTES_PER_MP_LIMB);        \
130         }                                                             \
131     }                                                                 \
132   while (0)
133 
134 /* MPN macros taken from gmp-impl.h */
135 #define MPN_NORMALIZE(DST, NLIMBS) \
136   do {                                        \
137     while (NLIMBS > 0)                        \
138       {                                       \
139         if ((DST)[(NLIMBS) - 1] != 0)         \
140           break;                              \
141         NLIMBS--;                             \
142       }                                       \
143   } while (0)
144 #define MPN_NORMALIZE_NOT_ZERO(DST, NLIMBS)     \
145   do {                                          \
146     MPFR_ASSERTD ((NLIMBS) >= 1);               \
147     while (1)                                   \
148       {                                         \
149         if ((DST)[(NLIMBS) - 1] != 0)           \
150           break;                                \
151         NLIMBS--;                               \
152       }                                         \
153   } while (0)
154 #define MPN_OVERLAP_P(xp, xsize, yp, ysize) \
155   ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp))
156 #define MPN_SAME_OR_INCR2_P(dst, dsize, src, ssize)             \
157   ((dst) <= (src) || ! MPN_OVERLAP_P (dst, dsize, src, ssize))
158 #define MPN_SAME_OR_INCR_P(dst, src, size)      \
159   MPN_SAME_OR_INCR2_P(dst, size, src, size)
160 #define MPN_SAME_OR_DECR2_P(dst, dsize, src, ssize)             \
161   ((dst) >= (src) || ! MPN_OVERLAP_P (dst, dsize, src, ssize))
162 #define MPN_SAME_OR_DECR_P(dst, src, size)      \
163   MPN_SAME_OR_DECR2_P(dst, size, src, size)
164 
165 #ifndef MUL_FFT_THRESHOLD
166 #define MUL_FFT_THRESHOLD 8448
167 #endif
168 
169 /* If mul_basecase or mpn_sqr_basecase are not exported, used mpn_mul instead */
170 #ifndef mpn_mul_basecase
171 # define mpn_mul_basecase(dst,s1,n1,s2,n2) mpn_mul((dst),(s1),(n1),(s2),(n2))
172 #endif
173 #ifndef mpn_sqr_basecase
174 # define mpn_sqr_basecase(dst,src,n) mpn_mul((dst),(src),(n),(src),(n))
175 #endif
176 
177 /* ASSERT */
178 __MPFR_DECLSPEC void mpfr_assert_fail (const char *, int,
179                                        const char *);
180 
181 #define ASSERT_FAIL(expr)  mpfr_assert_fail (__FILE__, __LINE__, #expr)
182 /* ASSERT() is for mpfr-longlong.h only. */
183 #define ASSERT(expr)       MPFR_ASSERTD(expr)
184 
185 /* Access fields of GMP struct */
186 #define SIZ(x) ((x)->_mp_size)
187 #define ABSIZ(x) ABS (SIZ (x))
188 #define PTR(x) ((x)->_mp_d)
189 #define ALLOC(x) ((x)->_mp_alloc)
190 /* For mpf numbers only. */
191 #ifdef MPFR_NEED_MPF_INTERNALS
192 /* Note: the EXP macro name is reserved when <errno.h> is included.
193    For compatibility with gmp-impl.h (cf --with-gmp-build), we cannot
194    change this macro, but let's define it only when we need it, where
195    <errno.h> will not be included. */
196 #define EXP(x) ((x)->_mp_exp)
197 #define PREC(x) ((x)->_mp_prec)
198 #endif
199 
200 /* For longlong.h */
201 #ifdef HAVE_ATTRIBUTE_MODE
202 typedef unsigned int UQItype    __attribute__ ((mode (QI)));
203 typedef          int SItype     __attribute__ ((mode (SI)));
204 typedef unsigned int USItype    __attribute__ ((mode (SI)));
205 typedef          int DItype     __attribute__ ((mode (DI)));
206 typedef unsigned int UDItype    __attribute__ ((mode (DI)));
207 #else
208 typedef unsigned char UQItype;
209 typedef          long SItype;
210 typedef unsigned long USItype;
211 #ifdef HAVE_LONG_LONG
212 typedef long long int DItype;
213 typedef unsigned long long int UDItype;
214 #else /* Assume `long' gives us a wide enough type.  Needed for hppa2.0w.  */
215 typedef long int DItype;
216 typedef unsigned long int UDItype;
217 #endif
218 #endif
219 typedef mp_limb_t UWtype;
220 typedef unsigned int UHWtype;
221 #define W_TYPE_SIZE GMP_NUMB_BITS
222 
223 /* Remap names of internal mpn functions (for longlong.h).  */
224 #undef  __clz_tab
225 #define __clz_tab               mpfr_clz_tab
226 
227 /* Use (4.0 * ...) instead of (2.0 * ...) to work around buggy compilers
228    that don't convert ulong->double correctly (eg. SunOS 4 native cc).  */
229 #undef MP_BASE_AS_DOUBLE
230 #define MP_BASE_AS_DOUBLE (4.0 * (MPFR_LIMB_ONE << (GMP_NUMB_BITS - 2)))
231 
232 /* Structure for conversion between internal binary format and
233    strings in base 2..36.  */
234 struct bases
235 {
236   /* log(2)/log(conversion_base) */
237   double chars_per_bit_exactly;
238 };
239 #undef  __mp_bases
240 #define __mp_bases mpfr_bases
241 __MPFR_DECLSPEC extern const struct bases mpfr_bases[257];
242 
243 /* Standard macros */
244 #undef ABS
245 #undef MIN
246 #undef MAX
247 #define ABS(x) ((x) >= 0 ? (x) : -(x))
248 #define MIN(l,o) ((l) < (o) ? (l) : (o))
249 #define MAX(h,i) ((h) > (i) ? (h) : (i))
250 
251 __MPFR_DECLSPEC void * mpfr_allocate_func (size_t);
252 __MPFR_DECLSPEC void * mpfr_reallocate_func (void *, size_t, size_t);
253 __MPFR_DECLSPEC void   mpfr_free_func (void *, size_t);
254 
255 #if defined(WANT_GMP_INTERNALS) && defined(HAVE___GMPN_SBPI1_DIVAPPR_Q)
256 #ifndef __gmpn_sbpi1_divappr_q
257 __MPFR_DECLSPEC mp_limb_t __gmpn_sbpi1_divappr_q (mp_limb_t*,
258                 mp_limb_t*, mp_size_t, mp_limb_t*, mp_size_t, mp_limb_t);
259 #endif
260 #endif
261 
262 #if defined(WANT_GMP_INTERNALS) && defined(HAVE___GMPN_INVERT_LIMB)
263 #ifndef __gmpn_invert_limb
264 __MPFR_DECLSPEC mp_limb_t __gmpn_invert_limb (mp_limb_t);
265 #endif
266 #endif
267 
268 #if defined(WANT_GMP_INTERNALS) && defined(HAVE___GMPN_RSBLSH1_N)
269 #ifndef __gmpn_rsblsh1_n
270 __MPFR_DECLSPEC mp_limb_t __gmpn_rsblsh1_n (mp_limb_t*, mp_limb_t*, mp_limb_t*, mp_size_t);
271 #endif
272 #endif
273 
274 /* Definitions related to temporary memory allocation */
275 
276 struct tmp_marker
277 {
278   void *ptr;
279   size_t size;
280   struct tmp_marker *next;
281 };
282 
283 __MPFR_DECLSPEC void *mpfr_tmp_allocate (struct tmp_marker **,
284                                          size_t);
285 __MPFR_DECLSPEC void mpfr_tmp_free (struct tmp_marker *);
286 
287 /* Default MPFR_ALLOCA_MAX value. It can be overridden at configure time;
288    with some tools, by giving a low value such as 0, this is useful for
289    checking buffer overflow, which may not be possible with alloca.
290    If HAVE_ALLOCA is not defined, then alloca() is not available, so that
291    MPFR_ALLOCA_MAX needs to be 0 (see the definition of TMP_ALLOC below);
292    if the user has explicitly given a non-zero value, this will probably
293    yield an error at link time or at run time. */
294 #ifndef MPFR_ALLOCA_MAX
295 # ifdef HAVE_ALLOCA
296 #  define MPFR_ALLOCA_MAX 16384
297 # else
298 #  define MPFR_ALLOCA_MAX 0
299 # endif
300 #endif
301 
302 /* Do not define TMP_SALLOC (see the test in mpfr-impl.h)! */
303 
304 #if MPFR_ALLOCA_MAX != 0
305 
306 /* The following tries to get a good version of alloca.
307    See gmp-impl.h for implementation details and original version */
308 /* FIXME: the autoconf manual gives a different piece of code under the
309    documentation of the AC_FUNC_ALLOCA macro. Should we switch to it?
310    But note that the HAVE_ALLOCA test in it seems wrong.
311    https://lists.gnu.org/archive/html/bug-autoconf/2019-01/msg00009.html */
312 #ifndef alloca
313 # if defined ( __GNUC__ )
314 #  define alloca __builtin_alloca
315 # elif defined (__DECC)
316 #  define alloca(x) __ALLOCA(x)
317 # elif defined (_MSC_VER)
318 #  include <malloc.h>
319 #  define alloca _alloca
320 # elif defined (HAVE_ALLOCA_H)
321 #  include <alloca.h>
322 # elif defined (_AIX) || defined (_IBMR2)
323 #  pragma alloca
324 # else
325 void *alloca (size_t);
326 # endif
327 #endif
328 
329 #define TMP_ALLOC(n) (MPFR_ASSERTD ((n) > 0),                      \
330                       MPFR_LIKELY ((n) <= MPFR_ALLOCA_MAX) ?       \
331                       alloca (n) : mpfr_tmp_allocate (&tmp_marker, (n)))
332 
333 #else  /* MPFR_ALLOCA_MAX == 0, alloca() not needed */
334 
335 #define TMP_ALLOC(n) (mpfr_tmp_allocate (&tmp_marker, (n)))
336 
337 #endif
338 
339 #define TMP_DECL(m) struct tmp_marker *tmp_marker
340 
341 #define TMP_MARK(m) (tmp_marker = 0)
342 
343 /* Note about TMP_FREE: For small precisions, tmp_marker is null as
344    the allocation is done on the stack (see TMP_ALLOC above). */
345 #define TMP_FREE(m) \
346   (MPFR_LIKELY (tmp_marker == NULL) ? (void) 0 : mpfr_tmp_free (tmp_marker))
347 
348 #endif  /* gmp-impl.h replacement */
349 
350 
351 
352 /******************************************************
353  ****** GMP Interface which changes with versions *****
354  ****** to other versions of GMP. Add missing     *****
355  ****** interfaces.                               *****
356  ******************************************************/
357 
358 #ifndef MPFR_LONG_WITHIN_LIMB
359 
360 /* the following routines assume that an unsigned long has at least twice the
361    size of an mp_limb_t */
362 
363 #define umul_ppmm(ph, pl, m0, m1)                                       \
364   do {                                                                  \
365     unsigned long _p = (unsigned long) (m0) * (unsigned long) (m1);     \
366     (ph) = (mp_limb_t) (_p >> GMP_NUMB_BITS);                           \
367     (pl) = (mp_limb_t) (_p & MPFR_LIMB_MAX);                            \
368   } while (0)
369 
370 #define add_ssaaaa(sh, sl, ah, al, bh, bl)                              \
371   do {                                                                  \
372     unsigned long _a = ((unsigned long) (ah) << GMP_NUMB_BITS) + (al);  \
373     unsigned long _b = ((unsigned long) (bh) << GMP_NUMB_BITS) + (bl);  \
374     unsigned long _s = _a + _b;                                         \
375     (sh) = (mp_limb_t) (_s >> GMP_NUMB_BITS);                           \
376     (sl) = (mp_limb_t) (_s & MPFR_LIMB_MAX);                            \
377   } while (0)
378 
379 #define sub_ddmmss(sh, sl, ah, al, bh, bl)                              \
380   do {                                                                  \
381     unsigned long _a = ((unsigned long) (ah) << GMP_NUMB_BITS) + (al);  \
382     unsigned long _b = ((unsigned long) (bh) << GMP_NUMB_BITS) + (bl);  \
383     unsigned long _s = _a - _b;                                         \
384     (sh) = (mp_limb_t) (_s >> GMP_NUMB_BITS);                           \
385     (sl) = (mp_limb_t) (_s & MPFR_LIMB_MAX);                            \
386   } while (0)
387 
388 #define count_leading_zeros(count,x)                                    \
389   do {                                                                  \
390     int _c = 0;                                                         \
391     mp_limb_t _x = (mp_limb_t) (x);                                     \
392     while (GMP_NUMB_BITS > 16 && (_x >> (GMP_NUMB_BITS - 16)) == 0)     \
393       {                                                                 \
394         _c += 16;                                                       \
395         _x = (mp_limb_t) (_x << 16);                                    \
396       }                                                                 \
397     if (GMP_NUMB_BITS > 8 && (_x >> (GMP_NUMB_BITS - 8)) == 0)          \
398       {                                                                 \
399         _c += 8;                                                        \
400         _x = (mp_limb_t) (_x << 8);                                     \
401       }                                                                 \
402     if (GMP_NUMB_BITS > 4 && (_x >> (GMP_NUMB_BITS - 4)) == 0)          \
403       {                                                                 \
404         _c += 4;                                                        \
405         _x = (mp_limb_t) (_x << 4);                                     \
406       }                                                                 \
407     if (GMP_NUMB_BITS > 2 && (_x >> (GMP_NUMB_BITS - 2)) == 0)          \
408       {                                                                 \
409         _c += 2;                                                        \
410         _x = (mp_limb_t) (_x << 2);                                     \
411       }                                                                 \
412     if ((_x & MPFR_LIMB_HIGHBIT) == 0)                                  \
413       _c ++;                                                            \
414     (count) = _c;                                                       \
415   } while (0)
416 
417 #define invert_limb(invxl,xl)                                           \
418   do {                                                                  \
419     unsigned long _num;                                                 \
420     MPFR_ASSERTD ((xl) != 0);                                           \
421     _num = (unsigned long) (mp_limb_t) ~(xl);                           \
422     _num = (_num << GMP_NUMB_BITS) | MPFR_LIMB_MAX;                     \
423     (invxl) = _num / (xl);                                              \
424   } while (0)
425 
426 #define udiv_qrnnd(q, r, n1, n0, d)                                     \
427   do {                                                                  \
428     unsigned long _num;                                                 \
429     _num = ((unsigned long) (n1) << GMP_NUMB_BITS) | (n0);              \
430     (q) = _num / (d);                                                   \
431     (r) = _num % (d);                                                   \
432   } while (0)
433 
434 #endif
435 
436 /* If mpn_sqr is not defined, use mpn_mul_n instead
437    (mpn_sqr was called mpn_sqr_n (internal) in older versions of GMP). */
438 #ifndef mpn_sqr
439 # define mpn_sqr(dst,src,n) mpn_mul_n((dst),(src),(src),(n))
440 #endif
441 
442 /* invert_limb macro, copied from GMP 5.0.2, file gmp-impl.h.
443    It returns invxl = floor((B^2-1)/xl)-B, where B=2^BITS_PER_LIMB,
444    assuming the most significant bit of xl is set. */
445 #ifndef invert_limb
446 #if defined(WANT_GMP_INTERNALS) && defined(HAVE___GMPN_INVERT_LIMB)
447 #define invert_limb(invxl,xl)                             \
448   do {                                                    \
449     invxl = __gmpn_invert_limb (xl);                      \
450   } while (0)
451 #else
452 #define invert_limb(invxl,xl)                             \
453   do {                                                    \
454     mp_limb_t dummy MPFR_MAYBE_UNUSED;                    \
455     MPFR_ASSERTD ((xl) != 0);                             \
456     udiv_qrnnd (invxl, dummy, ~(xl), MPFR_LIMB_MAX, xl);  \
457   } while (0)
458 #endif /* defined(WANT_GMP_INTERNALS) && defined(HAVE___GMPN_INVERT_LIMB) */
459 #endif /* ifndef invert_limb */
460 
461 /* udiv_qr_3by2 macro, adapted from GMP 5.0.2, file gmp-impl.h.
462    Compute quotient the quotient and remainder for n / d. Requires d
463    >= B^2 / 2 and n < d B. dinv is the inverse
464 
465      floor ((B^3 - 1) / (d0 + d1 B)) - B.
466 
467    NOTE: Output variables are updated multiple times. Only some inputs
468    and outputs may overlap.
469 */
470 #ifndef udiv_qr_3by2
471 # ifdef MPFR_USE_MINI_GMP
472 /* Avoid integer overflow on int in case of integer promotion
473    (when mp_limb_t is shorter than int). Note that unsigned long
474    may be longer than necessary, but GCC seems to optimize. */
475 #  define OP_CAST (unsigned long)
476 # else
477 #  define OP_CAST
478 # endif
479 # define udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv)              \
480   do {                                                                  \
481     mp_limb_t _q0, _t1, _t0, _mask;                                     \
482     umul_ppmm ((q), _q0, (n2), (dinv));                                 \
483     add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1));                        \
484                                                                         \
485     /* Compute the two most significant limbs of n - q'd */             \
486     (r1) = (n1) - OP_CAST (d1) * (q);                                   \
487     (r0) = (n0);                                                        \
488     sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0));                    \
489     umul_ppmm (_t1, _t0, (d0), (q));                                    \
490     sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0);                      \
491     (q)++;                                                              \
492                                                                         \
493     /* Conditionally adjust q and the remainders */                     \
494     _mask = - (mp_limb_t) ((r1) >= _q0);                                \
495     (q) += _mask;                                                       \
496     add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0));    \
497     if (MPFR_UNLIKELY ((r1) >= (d1)))                                   \
498       {                                                                 \
499         if ((r1) > (d1) || (r0) >= (d0))                                \
500           {                                                             \
501             (q)++;                                                      \
502             sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0));            \
503           }                                                             \
504       }                                                                 \
505   } while (0)
506 #endif
507 
508 /* invert_pi1 macro adapted from GMP 5, this computes in (dinv).inv32
509    the value of floor((beta^3 - 1)/(d1*beta+d0)) - beta,
510    cf "Improved Division by Invariant Integers" by Niels Möller and
511    Torbjörn Granlund */
512 typedef struct {mp_limb_t inv32;} mpfr_pi1_t;
513 #ifndef invert_pi1
514 #define invert_pi1(dinv, d1, d0)                                        \
515   do {                                                                  \
516     mp_limb_t _v, _p, _t1, _t0, _mask;                                  \
517     invert_limb (_v, d1);                                               \
518     _p = (d1) * _v;                                                     \
519     _p += (d0);                                                         \
520     if (_p < (d0))                                                      \
521       {                                                                 \
522         _v--;                                                           \
523         _mask = -(mp_limb_t) (_p >= (d1));                              \
524         _p -= (d1);                                                     \
525         _v += _mask;                                                    \
526         _p -= _mask & (d1);                                             \
527       }                                                                 \
528     umul_ppmm (_t1, _t0, d0, _v);                                       \
529     _p += _t1;                                                          \
530     if (_p < _t1)                                                       \
531       {                                                                 \
532         _v--;                                                           \
533         if (MPFR_UNLIKELY (_p >= (d1)))                                 \
534           {                                                             \
535             if (_p > (d1) || _t0 >= (d0))                               \
536               _v--;                                                     \
537           }                                                             \
538       }                                                                 \
539     (dinv).inv32 = _v;                                                  \
540   } while (0)
541 #endif
542 
543 /* The following macro is copied from GMP-6.1.1, file gmp-impl.h,
544    macro udiv_qrnnd_preinv.
545    It computes q and r such that nh*2^GMP_NUMB_BITS + nl = q*d + r,
546    with 0 <= r < d, assuming di = __gmpn_invert_limb (d). */
547 #define __udiv_qrnnd_preinv(q, r, nh, nl, d, di)                        \
548   do {                                                                  \
549     mp_limb_t _qh, _ql, _r, _mask;                                      \
550     umul_ppmm (_qh, _ql, (nh), (di));                                   \
551     if (__builtin_constant_p (nl) && (nl) == 0)                         \
552       {                                                                 \
553         _qh += (nh) + 1;                                                \
554         _r = - _qh * (d);                                               \
555         _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */     \
556         _qh += _mask;                                                   \
557         _r += _mask & (d);                                              \
558       }                                                                 \
559     else                                                                \
560       {                                                                 \
561         add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl));                \
562         _r = (nl) - _qh * (d);                                          \
563         _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */     \
564         _qh += _mask;                                                   \
565         _r += _mask & (d);                                              \
566         if (MPFR_UNLIKELY (_r >= (d)))                                  \
567           {                                                             \
568             _r -= (d);                                                  \
569             _qh++;                                                      \
570           }                                                             \
571       }                                                                 \
572     (r) = _r;                                                           \
573     (q) = _qh;                                                          \
574   } while (0)
575 
576 #if GMP_NUMB_BITS == 64
577 /* specialized version for nl = 0, with di computed inside */
578 #define __udiv_qrnd_preinv(q, r, nh, d)                                 \
579   do {                                                                  \
580     mp_limb_t _di;                                                      \
581                                                                         \
582     MPFR_ASSERTD ((d) != 0);                                            \
583     MPFR_ASSERTD ((nh) < (d));                                          \
584     MPFR_ASSERTD ((d) & MPFR_LIMB_HIGHBIT);                             \
585                                                                         \
586     __gmpfr_invert_limb (_di, d);                                       \
587     __udiv_qrnnd_preinv (q, r, nh, 0, d, _di);                          \
588   } while (0)
589 #elif defined(WANT_GMP_INTERNALS) && defined(HAVE___GMPN_INVERT_LIMB)
590 /* specialized version for nl = 0, with di computed inside */
591 #define __udiv_qrnd_preinv(q, r, nh, d)                                 \
592   do {                                                                  \
593     mp_limb_t _di;                                                      \
594                                                                         \
595     MPFR_ASSERTD ((d) != 0);                                            \
596     MPFR_ASSERTD ((nh) < (d));                                          \
597     MPFR_ASSERTD ((d) & MPFR_LIMB_HIGHBIT);                             \
598                                                                         \
599     _di = __gmpn_invert_limb (d);                                       \
600     __udiv_qrnnd_preinv (q, r, nh, 0, d, _di);                          \
601   } while (0)
602 #else
603 /* Same as __udiv_qrnnd_c from longlong.h, but using a single UWType/UWtype
604    division instead of two, and with n0=0. The analysis below assumes a limb
605    has 64 bits for simplicity. */
606 #define __udiv_qrnd_preinv(q, r, n1, d)                                 \
607   do {                                                                  \
608     UWtype __d1, __d0, __q1, __q0, __r1, __r0, __i;                     \
609                                                                         \
610     MPFR_ASSERTD ((d) != 0);                                            \
611     MPFR_ASSERTD ((n1) < (d));                                          \
612     MPFR_ASSERTD ((d) & MPFR_LIMB_HIGHBIT);                             \
613                                                                         \
614     __d1 = __ll_highpart (d);                                           \
615     /* 2^31 <= d1 < 2^32 */                                             \
616     __d0 = __ll_lowpart (d);                                            \
617     /* 0 <= d0 < 2^32 */                                                \
618     __i = ~(UWtype) 0 / __d1;                                           \
619     /* 2^32 < i < 2^33 with i < 2^64/d1 */                              \
620                                                                         \
621     __q1 = (((n1) / __ll_B) * __i) / __ll_B;                            \
622     /* Since n1 < d, high(n1) <= d1 = high(d), thus */                  \
623     /* q1 <= high(n1) * (2^64/d1) / 2^32 <= 2^32 */                     \
624     /* and also q1 <= n1/d1 thus r1 >= 0 below */                       \
625     __r1 = (n1) - __q1 * __d1;                                          \
626     while (__r1 >= __d1)                                                \
627       __q1 ++, __r1 -= __d1;                                            \
628     /* by construction, we have n1 = q1*d1+r1, and 0 <= r1 < d1 */      \
629     /* thus q1 <= n1/d1 < 2^32+2 */                                     \
630     /* q1*d0 <= (2^32+1)*(2^32-1) <= 2^64-1 thus it fits in a limb. */  \
631     __r0 = __r1 * __ll_B;                                               \
632     __r1 = __r0 - __q1 * __d0;                                          \
633     /* At most two corrections are needed like in __udiv_qrnnd_c. */    \
634     if (__r1 > __r0) /* borrow when subtracting q1*d0 */                \
635       {                                                                 \
636         __q1--, __r1 += (d);                                            \
637         if (__r1 > (d)) /* no carry when adding d */                    \
638           __q1--, __r1 += (d);                                          \
639       }                                                                 \
640     /* We can have r1 < m here, but in this case the true remainder */  \
641     /* is 2^64+r1, which is > m (same analysis as below for r0). */     \
642     /* Now we have n1*2^32 = q1*d + r1, with 0 <= r1 < d. */            \
643     MPFR_ASSERTD(__r1 < (d));                                           \
644                                                                         \
645     /* The same analysis as above applies, with n1 replaced by r1, */   \
646     /* q1 by q0, r1 by r0. */                                           \
647     __q0 = ((__r1 / __ll_B) * __i) / __ll_B;                            \
648     __r0 = __r1  - __q0 * __d1;                                         \
649     while (__r0 >= __d1)                                                \
650       __q0 ++, __r0 -= __d1;                                            \
651     __r1 = __r0 * __ll_B;                                               \
652     __r0 = __r1 - __q0 * __d0;                                          \
653     /* We know the quotient floor(r1*2^64/d) is q0, q0-1 or q0-2.*/     \
654     if (__r0 > __r1) /* borrow when subtracting q0*d0 */                \
655       {                                                                 \
656         /* The quotient is either q0-1 or q0-2. */                      \
657         __q0--, __r0 += (d);                                            \
658         if (__r0 > (d)) /* no carry when adding d */                    \
659           __q0--, __r0 += (d);                                          \
660       }                                                                 \
661     /* Now we have n1*2^64 = (q1*2^32+q0)*d + r0, with 0 <= r0 < d. */  \
662     MPFR_ASSERTD(__r0 < (d));                                           \
663                                                                         \
664     (q) = __q1 * __ll_B | __q0;                                         \
665     (r) = __r0;                                                         \
666   } while (0)
667 #endif
668 
669 /******************************************************
670  ************* GMP Basic Pointer Types ****************
671  ******************************************************/
672 
673 typedef mp_limb_t *mpfr_limb_ptr;
674 typedef const mp_limb_t *mpfr_limb_srcptr;
675 
676 /* mpfr_ieee_double_extract structure (copied from GMP 6.1.0, gmp-impl.h, with
677    ieee_double_extract changed into mpfr_ieee_double_extract, and
678    _GMP_IEEE_FLOATS changed into _MPFR_IEEE_FLOATS). */
679 
680 /* Define mpfr_ieee_double_extract and _MPFR_IEEE_FLOATS.
681 
682    Bit field packing is "implementation defined" according to C99, which
683    leaves us at the compiler's mercy here.  For some systems packing is
684    defined in the ABI (eg. x86).  In any case so far it seems universal that
685    little endian systems pack from low to high, and big endian from high to
686    low within the given type.
687 
688    Within the fields we rely on the integer endianness being the same as the
689    float endianness, this is true everywhere we know of and it'd be a fairly
690    strange system that did anything else.  */
691 
692 /* Note for MPFR: building with "gcc -std=c89 -pedantic -pedantic-errors"
693    fails if the bit-field type is unsigned long:
694 
695      error: type of bit-field '...' is a GCC extension [-Wpedantic]
696 
697    Though with -std=c99 no errors are obtained, this is still an extension
698    in C99, which says:
699 
700      A bit-field shall have a type that is a qualified or unqualified version
701      of _Bool, signed int, unsigned int, or some other implementation-defined
702      type.
703 
704    So, unsigned int should be better. This will fail with implementations
705    having 16-bit int's, but such implementations are not required to
706    support bit-fields of size > 16 anyway; if ever an implementation with
707    16-bit int's is found, the appropriate minimal changes could still be
708    done in the future. See WG14/N2921 (5.16).
709 */
710 
711 #ifndef _MPFR_IEEE_FLOATS
712 
713 #ifdef HAVE_DOUBLE_IEEE_LITTLE_ENDIAN
714 #define _MPFR_IEEE_FLOATS 1
715 union mpfr_ieee_double_extract
716 {
717   struct
718     {
719       unsigned int manl:32;
720       unsigned int manh:20;
721       unsigned int exp:11;
722       unsigned int sig:1;
723     } s;
724   double d;
725 };
726 #endif
727 
728 #ifdef HAVE_DOUBLE_IEEE_BIG_ENDIAN
729 #define _MPFR_IEEE_FLOATS 1
730 union mpfr_ieee_double_extract
731 {
732   struct
733     {
734       unsigned int sig:1;
735       unsigned int exp:11;
736       unsigned int manh:20;
737       unsigned int manl:32;
738     } s;
739   double d;
740 };
741 #endif
742 
743 #endif /* _MPFR_IEEE_FLOATS */
744 
745 /******************************************************
746  ******************** C++ Compatibility ***************
747  ******************************************************/
748 #if defined (__cplusplus)
749 }
750 #endif
751 
752 #endif /* Gmp internal emulator */
753