xref: /netbsd-src/external/lgpl3/mpfr/dist/src/mpfr-gmp.h (revision 4b004442778f1201b2161e87fd65ba87aae6601a)
1 /* Uniform Interface to GMP.
2 
3 Copyright 2004-2023 Free Software Foundation, Inc.
4 Contributed by the AriC and Caramba projects, INRIA.
5 
6 This file is part of the GNU MPFR Library.
7 
8 The GNU MPFR Library is free software; you can redistribute it and/or modify
9 it under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or (at your
11 option) any later version.
12 
13 The GNU MPFR Library is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
16 License for more details.
17 
18 You should have received a copy of the GNU Lesser General Public License
19 along with the GNU MPFR Library; see the file COPYING.LESSER.  If not, see
20 https://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
21 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
22 
23 #ifndef __GMPFR_GMP_H__
24 #define __GMPFR_GMP_H__
25 
26 #ifndef __MPFR_IMPL_H__
27 # error  "mpfr-impl.h not included"
28 #endif
29 
30 
31 /******************************************************
32  ******************** C++ Compatibility ***************
33  ******************************************************/
34 #if defined (__cplusplus)
35 extern "C" {
36 #endif
37 
38 
39 /******************************************************
40  ******************** Identify GMP ********************
41  ******************************************************/
42 
43 /* Macro to detect the GMP version */
44 #if defined(__GNU_MP_VERSION) && \
45     defined(__GNU_MP_VERSION_MINOR) && \
46     defined(__GNU_MP_VERSION_PATCHLEVEL)
47 # define __MPFR_GMP(a,b,c) \
48   (MPFR_VERSION_NUM(__GNU_MP_VERSION,__GNU_MP_VERSION_MINOR,__GNU_MP_VERSION_PATCHLEVEL) >= MPFR_VERSION_NUM(a,b,c))
49 #else
50 # define __MPFR_GMP(a,b,c) 0
51 #endif
52 
53 
54 
55 /******************************************************
56  ******************** Check GMP ***********************
57  ******************************************************/
58 
59 #if !__MPFR_GMP(5,0,0) && !defined(MPFR_USE_MINI_GMP)
60 # error "GMP 5.0.0 or newer is required"
61 #endif
62 
63 #if GMP_NAIL_BITS != 0
64 # error "MPFR doesn't support non-zero values of GMP_NAIL_BITS"
65 #endif
66 
67 #if (GMP_NUMB_BITS<8) || (GMP_NUMB_BITS & (GMP_NUMB_BITS - 1))
68 # error "GMP_NUMB_BITS must be a power of 2, and >= 8"
69 #endif
70 
71 #if GMP_NUMB_BITS == 8
72 # define MPFR_LOG2_GMP_NUMB_BITS 3
73 #elif GMP_NUMB_BITS == 16
74 # define MPFR_LOG2_GMP_NUMB_BITS 4
75 #elif GMP_NUMB_BITS == 32
76 # define MPFR_LOG2_GMP_NUMB_BITS 5
77 #elif GMP_NUMB_BITS == 64
78 # define MPFR_LOG2_GMP_NUMB_BITS 6
79 #elif GMP_NUMB_BITS == 128
80 # define MPFR_LOG2_GMP_NUMB_BITS 7
81 #elif GMP_NUMB_BITS == 256
82 # define MPFR_LOG2_GMP_NUMB_BITS 8
83 #else
84 # error "Can't compute log2(GMP_NUMB_BITS)"
85 #endif
86 
87 
88 
89 /******************************************************
90  ************* Define GMP Internal Interface  *********
91  ******************************************************/
92 
93 #ifdef MPFR_HAVE_GMP_IMPL  /* with gmp build */
94 
95 #define mpfr_allocate_func   (*__gmp_allocate_func)
96 #define mpfr_reallocate_func (*__gmp_reallocate_func)
97 #define mpfr_free_func       (*__gmp_free_func)
98 
99 #else  /* without gmp build (gmp-impl.h replacement) */
100 
101 /* Define some macros */
102 
103 #define ULONG_HIGHBIT (ULONG_MAX ^ ((unsigned long) ULONG_MAX >> 1))
104 #define UINT_HIGHBIT  (UINT_MAX ^ ((unsigned) UINT_MAX >> 1))
105 #define USHRT_HIGHBIT ((unsigned short) (USHRT_MAX ^ ((unsigned short) USHRT_MAX >> 1)))
106 
107 
108 #if __GMP_MP_SIZE_T_INT
109 #define MP_SIZE_T_MAX      INT_MAX
110 #define MP_SIZE_T_MIN      INT_MIN
111 #else
112 #define MP_SIZE_T_MAX      LONG_MAX
113 #define MP_SIZE_T_MIN      LONG_MIN
114 #endif
115 
116 /* MP_LIMB macros.
117    Note: GMP now also has the MPN_FILL macro, and GMP's MPN_ZERO(dst,n) is
118    defined as "if (n) MPN_FILL(dst, n, 0);". */
119 #define MPN_ZERO(dst, n) memset((dst), 0, (n)*MPFR_BYTES_PER_MP_LIMB)
120 #define MPN_COPY(dst,src,n) \
121   do                                                                  \
122     {                                                                 \
123       if ((dst) != (src))                                             \
124         {                                                             \
125           MPFR_ASSERTD ((char *) (dst) >= (char *) (src) +            \
126                                      (n) * MPFR_BYTES_PER_MP_LIMB ||  \
127                         (char *) (src) >= (char *) (dst) +            \
128                                      (n) * MPFR_BYTES_PER_MP_LIMB);   \
129           memcpy ((dst), (src), (n) * MPFR_BYTES_PER_MP_LIMB);        \
130         }                                                             \
131     }                                                                 \
132   while (0)
133 
134 /* MPN macros taken from gmp-impl.h */
135 #define MPN_NORMALIZE(DST, NLIMBS) \
136   do {                                        \
137     while (NLIMBS > 0)                        \
138       {                                       \
139         if ((DST)[(NLIMBS) - 1] != 0)         \
140           break;                              \
141         NLIMBS--;                             \
142       }                                       \
143   } while (0)
144 #define MPN_NORMALIZE_NOT_ZERO(DST, NLIMBS)     \
145   do {                                          \
146     MPFR_ASSERTD ((NLIMBS) >= 1);               \
147     while (1)                                   \
148       {                                         \
149         if ((DST)[(NLIMBS) - 1] != 0)           \
150           break;                                \
151         NLIMBS--;                               \
152       }                                         \
153   } while (0)
154 #define MPN_OVERLAP_P(xp, xsize, yp, ysize) \
155   ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp))
156 #define MPN_SAME_OR_INCR2_P(dst, dsize, src, ssize)             \
157   ((dst) <= (src) || ! MPN_OVERLAP_P (dst, dsize, src, ssize))
158 #define MPN_SAME_OR_INCR_P(dst, src, size)      \
159   MPN_SAME_OR_INCR2_P(dst, size, src, size)
160 #define MPN_SAME_OR_DECR2_P(dst, dsize, src, ssize)             \
161   ((dst) >= (src) || ! MPN_OVERLAP_P (dst, dsize, src, ssize))
162 #define MPN_SAME_OR_DECR_P(dst, src, size)      \
163   MPN_SAME_OR_DECR2_P(dst, size, src, size)
164 
165 #ifndef MUL_FFT_THRESHOLD
166 #define MUL_FFT_THRESHOLD 8448
167 #endif
168 
169 /* If mul_basecase or mpn_sqr_basecase are not exported, used mpn_mul instead */
170 #ifndef mpn_mul_basecase
171 # define mpn_mul_basecase(dst,s1,n1,s2,n2) mpn_mul((dst),(s1),(n1),(s2),(n2))
172 #endif
173 #ifndef mpn_sqr_basecase
174 # define mpn_sqr_basecase(dst,src,n) mpn_mul((dst),(src),(n),(src),(n))
175 #endif
176 
177 /* ASSERT */
178 __MPFR_DECLSPEC void mpfr_assert_fail (const char *, int,
179                                        const char *);
180 
181 #define ASSERT_FAIL(expr)  mpfr_assert_fail (__FILE__, __LINE__, #expr)
182 /* ASSERT() is for mpfr-longlong.h only. */
183 #define ASSERT(expr)       MPFR_ASSERTD(expr)
184 
185 /* Access fields of GMP struct */
186 #define SIZ(x) ((x)->_mp_size)
187 #define ABSIZ(x) ABS (SIZ (x))
188 #define PTR(x) ((x)->_mp_d)
189 #define ALLOC(x) ((x)->_mp_alloc)
190 /* For mpf numbers only. */
191 #ifdef MPFR_NEED_MPF_INTERNALS
192 /* Note: the EXP macro name is reserved when <errno.h> is included.
193    For compatibility with gmp-impl.h (cf --with-gmp-build), we cannot
194    change this macro, but let's define it only when we need it, where
195    <errno.h> will not be included. */
196 #define EXP(x) ((x)->_mp_exp)
197 #define PREC(x) ((x)->_mp_prec)
198 #endif
199 
200 /* For longlong.h */
201 #ifdef HAVE_ATTRIBUTE_MODE
202 typedef unsigned int UQItype    __attribute__ ((mode (QI)));
203 typedef          int SItype     __attribute__ ((mode (SI)));
204 typedef unsigned int USItype    __attribute__ ((mode (SI)));
205 typedef          int DItype     __attribute__ ((mode (DI)));
206 typedef unsigned int UDItype    __attribute__ ((mode (DI)));
207 #else
208 typedef unsigned char UQItype;
209 typedef          long SItype;
210 typedef unsigned long USItype;
211 #ifdef HAVE_LONG_LONG
212 typedef long long int DItype;
213 typedef unsigned long long int UDItype;
214 #else /* Assume `long' gives us a wide enough type.  Needed for hppa2.0w.  */
215 typedef long int DItype;
216 typedef unsigned long int UDItype;
217 #endif
218 #endif
219 typedef mp_limb_t UWtype;
220 typedef unsigned int UHWtype;
221 #define W_TYPE_SIZE GMP_NUMB_BITS
222 
223 /* Remap names of internal mpn functions (for longlong.h).  */
224 #undef  __clz_tab
225 #define __clz_tab               mpfr_clz_tab
226 
227 /* Use (4.0 * ...) instead of (2.0 * ...) to work around buggy compilers
228    that don't convert ulong->double correctly (eg. SunOS 4 native cc).  */
229 #undef MP_BASE_AS_DOUBLE
230 #define MP_BASE_AS_DOUBLE (4.0 * (MPFR_LIMB_ONE << (GMP_NUMB_BITS - 2)))
231 
232 /* Structure for conversion between internal binary format and
233    strings in base 2..36.  */
234 struct bases
235 {
236   /* log(2)/log(conversion_base) */
237   double chars_per_bit_exactly;
238 };
239 #undef  __mp_bases
240 #define __mp_bases mpfr_bases
241 __MPFR_DECLSPEC extern const struct bases mpfr_bases[257];
242 
243 /* Standard macros */
244 #undef ABS
245 #undef MIN
246 #undef MAX
247 #define ABS(x) ((x) >= 0 ? (x) : -(x))
248 #define MIN(l,o) ((l) < (o) ? (l) : (o))
249 #define MAX(h,i) ((h) > (i) ? (h) : (i))
250 
251 __MPFR_DECLSPEC void * mpfr_allocate_func (size_t);
252 __MPFR_DECLSPEC void * mpfr_reallocate_func (void *, size_t, size_t);
253 __MPFR_DECLSPEC void   mpfr_free_func (void *, size_t);
254 
255 #if defined(WANT_GMP_INTERNALS) && defined(HAVE___GMPN_SBPI1_DIVAPPR_Q)
256 #ifndef __gmpn_sbpi1_divappr_q
257 __MPFR_DECLSPEC mp_limb_t __gmpn_sbpi1_divappr_q (mp_limb_t*,
258                 mp_limb_t*, mp_size_t, mp_limb_t*, mp_size_t, mp_limb_t);
259 #endif
260 #endif
261 
262 #if defined(WANT_GMP_INTERNALS) && defined(HAVE___GMPN_INVERT_LIMB)
263 #ifndef __gmpn_invert_limb
264 __MPFR_DECLSPEC mp_limb_t __gmpn_invert_limb (mp_limb_t);
265 #endif
266 #endif
267 
268 #if defined(WANT_GMP_INTERNALS) && defined(HAVE___GMPN_RSBLSH1_N)
269 #ifndef __gmpn_rsblsh1_n
270 __MPFR_DECLSPEC mp_limb_t __gmpn_rsblsh1_n (mp_limb_t*, mp_limb_t*, mp_limb_t*, mp_size_t);
271 #endif
272 #endif
273 
274 /* Definitions related to temporary memory allocation */
275 
276 struct tmp_marker
277 {
278   void *ptr;
279   size_t size;
280   struct tmp_marker *next;
281 };
282 
283 __MPFR_DECLSPEC void *mpfr_tmp_allocate (struct tmp_marker **,
284                                          size_t);
285 __MPFR_DECLSPEC void mpfr_tmp_free (struct tmp_marker *);
286 
287 /* Default MPFR_ALLOCA_MAX value. It can be overridden at configure time;
288    with some tools, by giving a low value such as 0, this is useful for
289    checking buffer overflow, which may not be possible with alloca.
290    If HAVE_ALLOCA is not defined, then alloca() is not available, so that
291    MPFR_ALLOCA_MAX needs to be 0 (see the definition of TMP_ALLOC below);
292    if the user has explicitly given a non-zero value, this will probably
293    yield an error at link time or at run time. */
294 #ifndef MPFR_ALLOCA_MAX
295 # ifdef HAVE_ALLOCA
296 #  define MPFR_ALLOCA_MAX 16384
297 # else
298 #  define MPFR_ALLOCA_MAX 0
299 # endif
300 #endif
301 
302 /* Do not define TMP_SALLOC (see the test in mpfr-impl.h)! */
303 
304 #if MPFR_ALLOCA_MAX != 0
305 
306 /* The following tries to get a good version of alloca.
307    See gmp-impl.h for implementation details and original version */
308 /* FIXME: the autoconf manual gives a different piece of code under the
309    documentation of the AC_FUNC_ALLOCA macro. Should we switch to it?
310    But note that the HAVE_ALLOCA test in it seems wrong.
311    https://lists.gnu.org/archive/html/bug-autoconf/2019-01/msg00009.html */
312 #ifndef alloca
313 # if defined ( __GNUC__ )
314 #  define alloca __builtin_alloca
315 # elif defined (__DECC)
316 #  define alloca(x) __ALLOCA(x)
317 # elif defined (_MSC_VER)
318 #  include <malloc.h>
319 #  define alloca _alloca
320 # elif defined (HAVE_ALLOCA_H)
321 #  include <alloca.h>
322 # elif defined (_AIX) || defined (_IBMR2)
323 #  pragma alloca
324 # else
325 void *alloca (size_t);
326 # endif
327 #endif
328 
329 #define TMP_ALLOC(n) (MPFR_ASSERTD ((n) > 0),                      \
330                       MPFR_LIKELY ((n) <= MPFR_ALLOCA_MAX) ?       \
331                       alloca (n) : mpfr_tmp_allocate (&tmp_marker, (n)))
332 
333 #else  /* MPFR_ALLOCA_MAX == 0, alloca() not needed */
334 
335 #define TMP_ALLOC(n) (mpfr_tmp_allocate (&tmp_marker, (n)))
336 
337 #endif
338 
339 #define TMP_DECL(m) struct tmp_marker *tmp_marker
340 
341 #define TMP_MARK(m) (tmp_marker = 0)
342 
343 /* Note about TMP_FREE: For small precisions, tmp_marker is null as
344    the allocation is done on the stack (see TMP_ALLOC above). */
345 #define TMP_FREE(m) \
346   (MPFR_LIKELY (tmp_marker == NULL) ? (void) 0 : mpfr_tmp_free (tmp_marker))
347 
348 #endif  /* gmp-impl.h replacement */
349 
350 
351 
352 /******************************************************
353  ****** GMP Interface which changes with versions *****
354  ****** to other versions of GMP. Add missing     *****
355  ****** interfaces.                               *****
356  ******************************************************/
357 
358 #ifndef MPFR_LONG_WITHIN_LIMB
359 
360 /* the following routines assume that an unsigned long has at least twice the
361    size of an mp_limb_t */
362 
363 #define umul_ppmm(ph, pl, m0, m1)                                       \
364   do {                                                                  \
365     unsigned long _p = (unsigned long) (m0) * (unsigned long) (m1);     \
366     (ph) = (mp_limb_t) (_p >> GMP_NUMB_BITS);                           \
367     (pl) = (mp_limb_t) (_p & MPFR_LIMB_MAX);                            \
368   } while (0)
369 
370 #define add_ssaaaa(sh, sl, ah, al, bh, bl)                              \
371   do {                                                                  \
372     unsigned long _a = ((unsigned long) (ah) << GMP_NUMB_BITS) + (al);  \
373     unsigned long _b = ((unsigned long) (bh) << GMP_NUMB_BITS) + (bl);  \
374     unsigned long _s = _a + _b;                                         \
375     (sh) = (mp_limb_t) (_s >> GMP_NUMB_BITS);                           \
376     (sl) = (mp_limb_t) (_s & MPFR_LIMB_MAX);                            \
377   } while (0)
378 
379 #define sub_ddmmss(sh, sl, ah, al, bh, bl)                              \
380   do {                                                                  \
381     unsigned long _a = ((unsigned long) (ah) << GMP_NUMB_BITS) + (al);  \
382     unsigned long _b = ((unsigned long) (bh) << GMP_NUMB_BITS) + (bl);  \
383     unsigned long _s = _a - _b;                                         \
384     (sh) = (mp_limb_t) (_s >> GMP_NUMB_BITS);                           \
385     (sl) = (mp_limb_t) (_s & MPFR_LIMB_MAX);                            \
386   } while (0)
387 
388 #define count_leading_zeros(count,x)                                    \
389   do {                                                                  \
390     int _c = 0;                                                         \
391     mp_limb_t _x = (mp_limb_t) (x);                                     \
392     while (GMP_NUMB_BITS > 16 && (_x >> (GMP_NUMB_BITS - 16)) == 0)     \
393       {                                                                 \
394         _c += 16;                                                       \
395         _x = (mp_limb_t) (_x << 16);                                    \
396       }                                                                 \
397     if (GMP_NUMB_BITS > 8 && (_x >> (GMP_NUMB_BITS - 8)) == 0)          \
398       {                                                                 \
399         _c += 8;                                                        \
400         _x = (mp_limb_t) (_x << 8);                                     \
401       }                                                                 \
402     if (GMP_NUMB_BITS > 4 && (_x >> (GMP_NUMB_BITS - 4)) == 0)          \
403       {                                                                 \
404         _c += 4;                                                        \
405         _x = (mp_limb_t) (_x << 4);                                     \
406       }                                                                 \
407     if (GMP_NUMB_BITS > 2 && (_x >> (GMP_NUMB_BITS - 2)) == 0)          \
408       {                                                                 \
409         _c += 2;                                                        \
410         _x = (mp_limb_t) (_x << 2);                                     \
411       }                                                                 \
412     if ((_x & MPFR_LIMB_HIGHBIT) == 0)                                  \
413       _c ++;                                                            \
414     (count) = _c;                                                       \
415   } while (0)
416 
417 #define invert_limb(invxl,xl)                                           \
418   do {                                                                  \
419     unsigned long _num;                                                 \
420     MPFR_ASSERTD ((xl) != 0);                                           \
421     _num = (unsigned long) (mp_limb_t) ~(xl);                           \
422     _num = (_num << GMP_NUMB_BITS) | MPFR_LIMB_MAX;                     \
423     (invxl) = _num / (xl);                                              \
424   } while (0)
425 
426 #define udiv_qrnnd(q, r, n1, n0, d)                                     \
427   do {                                                                  \
428     unsigned long _num;                                                 \
429     _num = ((unsigned long) (n1) << GMP_NUMB_BITS) | (n0);              \
430     (q) = _num / (d);                                                   \
431     (r) = _num % (d);                                                   \
432   } while (0)
433 
434 #endif
435 
436 /* If mpn_sqr is not defined, use mpn_mul_n instead
437    (mpn_sqr was called mpn_sqr_n (internal) in older versions of GMP). */
438 #ifndef mpn_sqr
439 # define mpn_sqr(dst,src,n) mpn_mul_n((dst),(src),(src),(n))
440 #endif
441 
442 /* invert_limb macro, copied from GMP 5.0.2, file gmp-impl.h.
443    It returns invxl = floor((B^2-1)/xl)-B, where B=2^BITS_PER_LIMB,
444    assuming the most significant bit of xl is set. */
445 #ifndef invert_limb
446 #if defined(WANT_GMP_INTERNALS) && defined(HAVE___GMPN_INVERT_LIMB)
447 #define invert_limb(invxl,xl)                             \
448   do {                                                    \
449     invxl = __gmpn_invert_limb (xl);                      \
450   } while (0)
451 #else
452 #define invert_limb(invxl,xl)                             \
453   do {                                                    \
454     mp_limb_t dummy MPFR_MAYBE_UNUSED;                    \
455     MPFR_ASSERTD ((xl) != 0);                             \
456     udiv_qrnnd (invxl, dummy, ~(xl), MPFR_LIMB_MAX, xl);  \
457   } while (0)
458 #endif /* defined(WANT_GMP_INTERNALS) && defined(HAVE___GMPN_INVERT_LIMB) */
459 #endif /* ifndef invert_limb */
460 
461 /* udiv_qr_3by2 macro, adapted from GMP 5.0.2, file gmp-impl.h.
462    Compute quotient the quotient and remainder for n / d. Requires d
463    >= B^2 / 2 and n < d B. dinv is the inverse
464 
465      floor ((B^3 - 1) / (d0 + d1 B)) - B.
466 
467    NOTE: Output variables are updated multiple times. Only some inputs
468    and outputs may overlap.
469 */
470 #ifndef udiv_qr_3by2
471 #define udiv_qr_3by2(q, r1, r0, n2, n1, n0, d1, d0, dinv)               \
472   do {                                                                  \
473     mp_limb_t _q0, _t1, _t0, _mask;                                     \
474     umul_ppmm ((q), _q0, (n2), (dinv));                                 \
475     add_ssaaaa ((q), _q0, (q), _q0, (n2), (n1));                        \
476                                                                         \
477     /* Compute the two most significant limbs of n - q'd */             \
478     (r1) = (n1) - (d1) * (q);                                           \
479     (r0) = (n0);                                                        \
480     sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0));                    \
481     umul_ppmm (_t1, _t0, (d0), (q));                                    \
482     sub_ddmmss ((r1), (r0), (r1), (r0), _t1, _t0);                      \
483     (q)++;                                                              \
484                                                                         \
485     /* Conditionally adjust q and the remainders */                     \
486     _mask = - (mp_limb_t) ((r1) >= _q0);                                \
487     (q) += _mask;                                                       \
488     add_ssaaaa ((r1), (r0), (r1), (r0), _mask & (d1), _mask & (d0));    \
489     if (MPFR_UNLIKELY ((r1) >= (d1)))                                   \
490       {                                                                 \
491         if ((r1) > (d1) || (r0) >= (d0))                                \
492           {                                                             \
493             (q)++;                                                      \
494             sub_ddmmss ((r1), (r0), (r1), (r0), (d1), (d0));            \
495           }                                                             \
496       }                                                                 \
497   } while (0)
498 #endif
499 
500 /* invert_pi1 macro adapted from GMP 5, this computes in (dinv).inv32
501    the value of floor((beta^3 - 1)/(d1*beta+d0)) - beta,
502    cf "Improved Division by Invariant Integers" by Niels Möller and
503    Torbjörn Granlund */
504 typedef struct {mp_limb_t inv32;} mpfr_pi1_t;
505 #ifndef invert_pi1
506 #define invert_pi1(dinv, d1, d0)                                        \
507   do {                                                                  \
508     mp_limb_t _v, _p, _t1, _t0, _mask;                                  \
509     invert_limb (_v, d1);                                               \
510     _p = (d1) * _v;                                                     \
511     _p += (d0);                                                         \
512     if (_p < (d0))                                                      \
513       {                                                                 \
514         _v--;                                                           \
515         _mask = -(mp_limb_t) (_p >= (d1));                              \
516         _p -= (d1);                                                     \
517         _v += _mask;                                                    \
518         _p -= _mask & (d1);                                             \
519       }                                                                 \
520     umul_ppmm (_t1, _t0, d0, _v);                                       \
521     _p += _t1;                                                          \
522     if (_p < _t1)                                                       \
523       {                                                                 \
524         _v--;                                                           \
525         if (MPFR_UNLIKELY (_p >= (d1)))                                 \
526           {                                                             \
527             if (_p > (d1) || _t0 >= (d0))                               \
528               _v--;                                                     \
529           }                                                             \
530       }                                                                 \
531     (dinv).inv32 = _v;                                                  \
532   } while (0)
533 #endif
534 
535 /* The following macro is copied from GMP-6.1.1, file gmp-impl.h,
536    macro udiv_qrnnd_preinv.
537    It computes q and r such that nh*2^GMP_NUMB_BITS + nl = q*d + r,
538    with 0 <= r < d, assuming di = __gmpn_invert_limb (d). */
539 #define __udiv_qrnnd_preinv(q, r, nh, nl, d, di)                        \
540   do {                                                                  \
541     mp_limb_t _qh, _ql, _r, _mask;                                      \
542     umul_ppmm (_qh, _ql, (nh), (di));                                   \
543     if (__builtin_constant_p (nl) && (nl) == 0)                         \
544       {                                                                 \
545         _qh += (nh) + 1;                                                \
546         _r = - _qh * (d);                                               \
547         _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */     \
548         _qh += _mask;                                                   \
549         _r += _mask & (d);                                              \
550       }                                                                 \
551     else                                                                \
552       {                                                                 \
553         add_ssaaaa (_qh, _ql, _qh, _ql, (nh) + 1, (nl));                \
554         _r = (nl) - _qh * (d);                                          \
555         _mask = -(mp_limb_t) (_r > _ql); /* both > and >= are OK */     \
556         _qh += _mask;                                                   \
557         _r += _mask & (d);                                              \
558         if (MPFR_UNLIKELY (_r >= (d)))                                  \
559           {                                                             \
560             _r -= (d);                                                  \
561             _qh++;                                                      \
562           }                                                             \
563       }                                                                 \
564     (r) = _r;                                                           \
565     (q) = _qh;                                                          \
566   } while (0)
567 
568 #if GMP_NUMB_BITS == 64
569 /* specialized version for nl = 0, with di computed inside */
570 #define __udiv_qrnd_preinv(q, r, nh, d)                                 \
571   do {                                                                  \
572     mp_limb_t _di;                                                      \
573                                                                         \
574     MPFR_ASSERTD ((d) != 0);                                            \
575     MPFR_ASSERTD ((nh) < (d));                                          \
576     MPFR_ASSERTD ((d) & MPFR_LIMB_HIGHBIT);                             \
577                                                                         \
578     __gmpfr_invert_limb (_di, d);                                       \
579     __udiv_qrnnd_preinv (q, r, nh, 0, d, _di);                          \
580   } while (0)
581 #elif defined(WANT_GMP_INTERNALS) && defined(HAVE___GMPN_INVERT_LIMB)
582 /* specialized version for nl = 0, with di computed inside */
583 #define __udiv_qrnd_preinv(q, r, nh, d)                                 \
584   do {                                                                  \
585     mp_limb_t _di;                                                      \
586                                                                         \
587     MPFR_ASSERTD ((d) != 0);                                            \
588     MPFR_ASSERTD ((nh) < (d));                                          \
589     MPFR_ASSERTD ((d) & MPFR_LIMB_HIGHBIT);                             \
590                                                                         \
591     _di = __gmpn_invert_limb (d);                                       \
592     __udiv_qrnnd_preinv (q, r, nh, 0, d, _di);                          \
593   } while (0)
594 #else
595 /* Same as __udiv_qrnnd_c from longlong.h, but using a single UWType/UWtype
596    division instead of two, and with n0=0. The analysis below assumes a limb
597    has 64 bits for simplicity. */
598 #define __udiv_qrnd_preinv(q, r, n1, d)                                 \
599   do {                                                                  \
600     UWtype __d1, __d0, __q1, __q0, __r1, __r0, __i;                     \
601                                                                         \
602     MPFR_ASSERTD ((d) != 0);                                            \
603     MPFR_ASSERTD ((n1) < (d));                                          \
604     MPFR_ASSERTD ((d) & MPFR_LIMB_HIGHBIT);                             \
605                                                                         \
606     __d1 = __ll_highpart (d);                                           \
607     /* 2^31 <= d1 < 2^32 */                                             \
608     __d0 = __ll_lowpart (d);                                            \
609     /* 0 <= d0 < 2^32 */                                                \
610     __i = ~(UWtype) 0 / __d1;                                           \
611     /* 2^32 < i < 2^33 with i < 2^64/d1 */                              \
612                                                                         \
613     __q1 = (((n1) / __ll_B) * __i) / __ll_B;                            \
614     /* Since n1 < d, high(n1) <= d1 = high(d), thus */                  \
615     /* q1 <= high(n1) * (2^64/d1) / 2^32 <= 2^32 */                     \
616     /* and also q1 <= n1/d1 thus r1 >= 0 below */                       \
617     __r1 = (n1) - __q1 * __d1;                                          \
618     while (__r1 >= __d1)                                                \
619       __q1 ++, __r1 -= __d1;                                            \
620     /* by construction, we have n1 = q1*d1+r1, and 0 <= r1 < d1 */      \
621     /* thus q1 <= n1/d1 < 2^32+2 */                                     \
622     /* q1*d0 <= (2^32+1)*(2^32-1) <= 2^64-1 thus it fits in a limb. */  \
623     __r0 = __r1 * __ll_B;                                               \
624     __r1 = __r0 - __q1 * __d0;                                          \
625     /* At most two corrections are needed like in __udiv_qrnnd_c. */    \
626     if (__r1 > __r0) /* borrow when subtracting q1*d0 */                \
627       {                                                                 \
628         __q1--, __r1 += (d);                                            \
629         if (__r1 > (d)) /* no carry when adding d */                    \
630           __q1--, __r1 += (d);                                          \
631       }                                                                 \
632     /* We can have r1 < m here, but in this case the true remainder */  \
633     /* is 2^64+r1, which is > m (same analysis as below for r0). */     \
634     /* Now we have n1*2^32 = q1*d + r1, with 0 <= r1 < d. */            \
635     MPFR_ASSERTD(__r1 < (d));                                           \
636                                                                         \
637     /* The same analysis as above applies, with n1 replaced by r1, */   \
638     /* q1 by q0, r1 by r0. */                                           \
639     __q0 = ((__r1 / __ll_B) * __i) / __ll_B;                            \
640     __r0 = __r1  - __q0 * __d1;                                         \
641     while (__r0 >= __d1)                                                \
642       __q0 ++, __r0 -= __d1;                                            \
643     __r1 = __r0 * __ll_B;                                               \
644     __r0 = __r1 - __q0 * __d0;                                          \
645     /* We know the quotient floor(r1*2^64/d) is q0, q0-1 or q0-2.*/     \
646     if (__r0 > __r1) /* borrow when subtracting q0*d0 */                \
647       {                                                                 \
648         /* The quotient is either q0-1 or q0-2. */                      \
649         __q0--, __r0 += (d);                                            \
650         if (__r0 > (d)) /* no carry when adding d */                    \
651           __q0--, __r0 += (d);                                          \
652       }                                                                 \
653     /* Now we have n1*2^64 = (q1*2^32+q0)*d + r0, with 0 <= r0 < d. */  \
654     MPFR_ASSERTD(__r0 < (d));                                           \
655                                                                         \
656     (q) = __q1 * __ll_B | __q0;                                         \
657     (r) = __r0;                                                         \
658   } while (0)
659 #endif
660 
661 /******************************************************
662  ************* GMP Basic Pointer Types ****************
663  ******************************************************/
664 
665 typedef mp_limb_t *mpfr_limb_ptr;
666 typedef const mp_limb_t *mpfr_limb_srcptr;
667 
668 /* mpfr_ieee_double_extract structure (copied from GMP 6.1.0, gmp-impl.h, with
669    ieee_double_extract changed into mpfr_ieee_double_extract, and
670    _GMP_IEEE_FLOATS changed into _MPFR_IEEE_FLOATS). */
671 
672 /* Define mpfr_ieee_double_extract and _MPFR_IEEE_FLOATS.
673 
674    Bit field packing is "implementation defined" according to C99, which
675    leaves us at the compiler's mercy here.  For some systems packing is
676    defined in the ABI (eg. x86).  In any case so far it seems universal that
677    little endian systems pack from low to high, and big endian from high to
678    low within the given type.
679 
680    Within the fields we rely on the integer endianness being the same as the
681    float endianness, this is true everywhere we know of and it'd be a fairly
682    strange system that did anything else.  */
683 
684 /* Note for MPFR: building with "gcc -std=c89 -pedantic -pedantic-errors"
685    fails if the bit-field type is unsigned long:
686 
687      error: type of bit-field '...' is a GCC extension [-Wpedantic]
688 
689    Though with -std=c99 no errors are obtained, this is still an extension
690    in C99, which says:
691 
692      A bit-field shall have a type that is a qualified or unqualified version
693      of _Bool, signed int, unsigned int, or some other implementation-defined
694      type.
695 
696    So, unsigned int should be better. This will fail with implementations
697    having 16-bit int's, but such implementations are not required to
698    support bit-fields of size > 16 anyway; if ever an implementation with
699    16-bit int's is found, the appropriate minimal changes could still be
700    done in the future. See WG14/N2921 (5.16).
701 */
702 
703 #ifndef _MPFR_IEEE_FLOATS
704 
705 #ifdef HAVE_DOUBLE_IEEE_LITTLE_ENDIAN
706 #define _MPFR_IEEE_FLOATS 1
707 union mpfr_ieee_double_extract
708 {
709   struct
710     {
711       unsigned int manl:32;
712       unsigned int manh:20;
713       unsigned int exp:11;
714       unsigned int sig:1;
715     } s;
716   double d;
717 };
718 #endif
719 
720 #ifdef HAVE_DOUBLE_IEEE_BIG_ENDIAN
721 #define _MPFR_IEEE_FLOATS 1
722 union mpfr_ieee_double_extract
723 {
724   struct
725     {
726       unsigned int sig:1;
727       unsigned int exp:11;
728       unsigned int manh:20;
729       unsigned int manl:32;
730     } s;
731   double d;
732 };
733 #endif
734 
735 #endif /* _MPFR_IEEE_FLOATS */
736 
737 /******************************************************
738  ******************** C++ Compatibility ***************
739  ******************************************************/
740 #if defined (__cplusplus)
741 }
742 #endif
743 
744 #endif /* Gmp internal emulator */
745