1*72c7faa4Smrg /* mpn_gcd_11 -- limb greatest common divisor.
2*72c7faa4Smrg
3*72c7faa4Smrg Copyright 1994, 1996, 2000, 2001, 2009, 2012, 2019 Free Software Foundation,
4*72c7faa4Smrg Inc.
5*72c7faa4Smrg
6*72c7faa4Smrg This file is part of the GNU MP Library.
7*72c7faa4Smrg
8*72c7faa4Smrg The GNU MP Library is free software; you can redistribute it and/or modify
9*72c7faa4Smrg it under the terms of either:
10*72c7faa4Smrg
11*72c7faa4Smrg * the GNU Lesser General Public License as published by the Free
12*72c7faa4Smrg Software Foundation; either version 3 of the License, or (at your
13*72c7faa4Smrg option) any later version.
14*72c7faa4Smrg
15*72c7faa4Smrg or
16*72c7faa4Smrg
17*72c7faa4Smrg * the GNU General Public License as published by the Free Software
18*72c7faa4Smrg Foundation; either version 2 of the License, or (at your option) any
19*72c7faa4Smrg later version.
20*72c7faa4Smrg
21*72c7faa4Smrg or both in parallel, as here.
22*72c7faa4Smrg
23*72c7faa4Smrg The GNU MP Library is distributed in the hope that it will be useful, but
24*72c7faa4Smrg WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
25*72c7faa4Smrg or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
26*72c7faa4Smrg for more details.
27*72c7faa4Smrg
28*72c7faa4Smrg You should have received copies of the GNU General Public License and the
29*72c7faa4Smrg GNU Lesser General Public License along with the GNU MP Library. If not,
30*72c7faa4Smrg see https://www.gnu.org/licenses/. */
31*72c7faa4Smrg
32*72c7faa4Smrg #include "gmp-impl.h"
33*72c7faa4Smrg #include "longlong.h"
34*72c7faa4Smrg
35*72c7faa4Smrg mp_limb_t
mpn_gcd_11(mp_limb_t u,mp_limb_t v)36*72c7faa4Smrg mpn_gcd_11 (mp_limb_t u, mp_limb_t v)
37*72c7faa4Smrg {
38*72c7faa4Smrg ASSERT (u & v & 1);
39*72c7faa4Smrg
40*72c7faa4Smrg /* In this loop, we represent the odd numbers ulimb and vlimb
41*72c7faa4Smrg without the redundant least significant one bit. This reduction
42*72c7faa4Smrg in size by one bit ensures that the high bit of t, below, is set
43*72c7faa4Smrg if and only if vlimb > ulimb. */
44*72c7faa4Smrg
45*72c7faa4Smrg u >>= 1;
46*72c7faa4Smrg v >>= 1;
47*72c7faa4Smrg
48*72c7faa4Smrg while (u != v)
49*72c7faa4Smrg {
50*72c7faa4Smrg mp_limb_t t;
51*72c7faa4Smrg mp_limb_t vgtu;
52*72c7faa4Smrg int c;
53*72c7faa4Smrg
54*72c7faa4Smrg t = u - v;
55*72c7faa4Smrg vgtu = LIMB_HIGHBIT_TO_MASK (t);
56*72c7faa4Smrg
57*72c7faa4Smrg /* v <-- min (u, v) */
58*72c7faa4Smrg v += (vgtu & t);
59*72c7faa4Smrg
60*72c7faa4Smrg /* u <-- |u - v| */
61*72c7faa4Smrg u = (t ^ vgtu) - vgtu;
62*72c7faa4Smrg
63*72c7faa4Smrg count_trailing_zeros (c, t);
64*72c7faa4Smrg /* We have c <= GMP_LIMB_BITS - 2 here, so that
65*72c7faa4Smrg
66*72c7faa4Smrg ulimb >>= (c + 1);
67*72c7faa4Smrg
68*72c7faa4Smrg would be safe. But unlike the addition c + 1, a separate
69*72c7faa4Smrg shift by 1 is independent of c, and can be executed in
70*72c7faa4Smrg parallel with count_trailing_zeros. */
71*72c7faa4Smrg u = (u >> 1) >> c;
72*72c7faa4Smrg }
73*72c7faa4Smrg return (u << 1) + 1;
74*72c7faa4Smrg }
75