1*86d7f5d3SJohn Marino /* mpn_addmul_1 -- multiply the N long limb vector pointed to by UP by VL,
2*86d7f5d3SJohn Marino add the N least significant limbs of the product to the limb vector
3*86d7f5d3SJohn Marino pointed to by RP. Return the most significant limb of the product,
4*86d7f5d3SJohn Marino adjusted for carry-out from the addition.
5*86d7f5d3SJohn Marino
6*86d7f5d3SJohn Marino Copyright 1992, 1993, 1994, 1996, 2000, 2002, 2004 Free Software Foundation,
7*86d7f5d3SJohn Marino Inc.
8*86d7f5d3SJohn Marino
9*86d7f5d3SJohn Marino This file is part of the GNU MP Library.
10*86d7f5d3SJohn Marino
11*86d7f5d3SJohn Marino The GNU MP Library is free software; you can redistribute it and/or modify
12*86d7f5d3SJohn Marino it under the terms of the GNU Lesser General Public License as published by
13*86d7f5d3SJohn Marino the Free Software Foundation; either version 3 of the License, or (at your
14*86d7f5d3SJohn Marino option) any later version.
15*86d7f5d3SJohn Marino
16*86d7f5d3SJohn Marino The GNU MP Library is distributed in the hope that it will be useful, but
17*86d7f5d3SJohn Marino WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
18*86d7f5d3SJohn Marino or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
19*86d7f5d3SJohn Marino License for more details.
20*86d7f5d3SJohn Marino
21*86d7f5d3SJohn Marino You should have received a copy of the GNU Lesser General Public License
22*86d7f5d3SJohn Marino along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
23*86d7f5d3SJohn Marino
24*86d7f5d3SJohn Marino #include "gmp.h"
25*86d7f5d3SJohn Marino #include "gmp-impl.h"
26*86d7f5d3SJohn Marino #include "longlong.h"
27*86d7f5d3SJohn Marino
28*86d7f5d3SJohn Marino
29*86d7f5d3SJohn Marino #if GMP_NAIL_BITS == 0
30*86d7f5d3SJohn Marino
31*86d7f5d3SJohn Marino mp_limb_t
mpn_addmul_1(mp_ptr rp,mp_srcptr up,mp_size_t n,mp_limb_t vl)32*86d7f5d3SJohn Marino mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
33*86d7f5d3SJohn Marino {
34*86d7f5d3SJohn Marino mp_limb_t ul, cl, hpl, lpl, rl;
35*86d7f5d3SJohn Marino
36*86d7f5d3SJohn Marino ASSERT (n >= 1);
37*86d7f5d3SJohn Marino ASSERT (MPN_SAME_OR_SEPARATE_P (rp, up, n));
38*86d7f5d3SJohn Marino
39*86d7f5d3SJohn Marino cl = 0;
40*86d7f5d3SJohn Marino do
41*86d7f5d3SJohn Marino {
42*86d7f5d3SJohn Marino ul = *up++;
43*86d7f5d3SJohn Marino umul_ppmm (hpl, lpl, ul, vl);
44*86d7f5d3SJohn Marino
45*86d7f5d3SJohn Marino lpl += cl;
46*86d7f5d3SJohn Marino cl = (lpl < cl) + hpl;
47*86d7f5d3SJohn Marino
48*86d7f5d3SJohn Marino rl = *rp;
49*86d7f5d3SJohn Marino lpl = rl + lpl;
50*86d7f5d3SJohn Marino cl += lpl < rl;
51*86d7f5d3SJohn Marino *rp++ = lpl;
52*86d7f5d3SJohn Marino }
53*86d7f5d3SJohn Marino while (--n != 0);
54*86d7f5d3SJohn Marino
55*86d7f5d3SJohn Marino return cl;
56*86d7f5d3SJohn Marino }
57*86d7f5d3SJohn Marino
58*86d7f5d3SJohn Marino #endif
59*86d7f5d3SJohn Marino
60*86d7f5d3SJohn Marino #if GMP_NAIL_BITS == 1
61*86d7f5d3SJohn Marino
62*86d7f5d3SJohn Marino mp_limb_t
mpn_addmul_1(mp_ptr rp,mp_srcptr up,mp_size_t n,mp_limb_t vl)63*86d7f5d3SJohn Marino mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
64*86d7f5d3SJohn Marino {
65*86d7f5d3SJohn Marino mp_limb_t shifted_vl, ul, rl, lpl, hpl, prev_hpl, cl, xl, c1, c2, c3;
66*86d7f5d3SJohn Marino
67*86d7f5d3SJohn Marino ASSERT (n >= 1);
68*86d7f5d3SJohn Marino ASSERT (MPN_SAME_OR_SEPARATE_P (rp, up, n));
69*86d7f5d3SJohn Marino ASSERT_MPN (rp, n);
70*86d7f5d3SJohn Marino ASSERT_MPN (up, n);
71*86d7f5d3SJohn Marino ASSERT_LIMB (vl);
72*86d7f5d3SJohn Marino
73*86d7f5d3SJohn Marino shifted_vl = vl << GMP_NAIL_BITS;
74*86d7f5d3SJohn Marino cl = 0;
75*86d7f5d3SJohn Marino prev_hpl = 0;
76*86d7f5d3SJohn Marino do
77*86d7f5d3SJohn Marino {
78*86d7f5d3SJohn Marino ul = *up++;
79*86d7f5d3SJohn Marino rl = *rp;
80*86d7f5d3SJohn Marino umul_ppmm (hpl, lpl, ul, shifted_vl);
81*86d7f5d3SJohn Marino lpl >>= GMP_NAIL_BITS;
82*86d7f5d3SJohn Marino ADDC_LIMB (c1, xl, prev_hpl, lpl);
83*86d7f5d3SJohn Marino ADDC_LIMB (c2, xl, xl, rl);
84*86d7f5d3SJohn Marino ADDC_LIMB (c3, xl, xl, cl);
85*86d7f5d3SJohn Marino cl = c1 + c2 + c3;
86*86d7f5d3SJohn Marino *rp++ = xl;
87*86d7f5d3SJohn Marino prev_hpl = hpl;
88*86d7f5d3SJohn Marino }
89*86d7f5d3SJohn Marino while (--n != 0);
90*86d7f5d3SJohn Marino
91*86d7f5d3SJohn Marino return prev_hpl + cl;
92*86d7f5d3SJohn Marino }
93*86d7f5d3SJohn Marino
94*86d7f5d3SJohn Marino #endif
95*86d7f5d3SJohn Marino
96*86d7f5d3SJohn Marino #if GMP_NAIL_BITS >= 2
97*86d7f5d3SJohn Marino
98*86d7f5d3SJohn Marino mp_limb_t
mpn_addmul_1(mp_ptr rp,mp_srcptr up,mp_size_t n,mp_limb_t vl)99*86d7f5d3SJohn Marino mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
100*86d7f5d3SJohn Marino {
101*86d7f5d3SJohn Marino mp_limb_t shifted_vl, ul, rl, lpl, hpl, prev_hpl, xw, cl, xl;
102*86d7f5d3SJohn Marino
103*86d7f5d3SJohn Marino ASSERT (n >= 1);
104*86d7f5d3SJohn Marino ASSERT (MPN_SAME_OR_SEPARATE_P (rp, up, n));
105*86d7f5d3SJohn Marino ASSERT_MPN (rp, n);
106*86d7f5d3SJohn Marino ASSERT_MPN (up, n);
107*86d7f5d3SJohn Marino ASSERT_LIMB (vl);
108*86d7f5d3SJohn Marino
109*86d7f5d3SJohn Marino shifted_vl = vl << GMP_NAIL_BITS;
110*86d7f5d3SJohn Marino cl = 0;
111*86d7f5d3SJohn Marino prev_hpl = 0;
112*86d7f5d3SJohn Marino do
113*86d7f5d3SJohn Marino {
114*86d7f5d3SJohn Marino ul = *up++;
115*86d7f5d3SJohn Marino rl = *rp;
116*86d7f5d3SJohn Marino umul_ppmm (hpl, lpl, ul, shifted_vl);
117*86d7f5d3SJohn Marino lpl >>= GMP_NAIL_BITS;
118*86d7f5d3SJohn Marino xw = prev_hpl + lpl + rl + cl;
119*86d7f5d3SJohn Marino cl = xw >> GMP_NUMB_BITS;
120*86d7f5d3SJohn Marino xl = xw & GMP_NUMB_MASK;
121*86d7f5d3SJohn Marino *rp++ = xl;
122*86d7f5d3SJohn Marino prev_hpl = hpl;
123*86d7f5d3SJohn Marino }
124*86d7f5d3SJohn Marino while (--n != 0);
125*86d7f5d3SJohn Marino
126*86d7f5d3SJohn Marino return prev_hpl + cl;
127*86d7f5d3SJohn Marino }
128*86d7f5d3SJohn Marino
129*86d7f5d3SJohn Marino #endif
130