1*86d7f5d3SJohn Marino /* mpn_mul_1 -- Multiply a limb vector with a single limb and store the
2*86d7f5d3SJohn Marino product in a second limb vector.
3*86d7f5d3SJohn Marino
4*86d7f5d3SJohn Marino Copyright 1991, 1992, 1993, 1994, 1996, 2000, 2001, 2002 Free Software
5*86d7f5d3SJohn Marino Foundation, Inc.
6*86d7f5d3SJohn Marino
7*86d7f5d3SJohn Marino This file is part of the GNU MP Library.
8*86d7f5d3SJohn Marino
9*86d7f5d3SJohn Marino The GNU MP Library is free software; you can redistribute it and/or modify
10*86d7f5d3SJohn Marino it under the terms of the GNU Lesser General Public License as published by
11*86d7f5d3SJohn Marino the Free Software Foundation; either version 3 of the License, or (at your
12*86d7f5d3SJohn Marino option) any later version.
13*86d7f5d3SJohn Marino
14*86d7f5d3SJohn Marino The GNU MP Library is distributed in the hope that it will be useful, but
15*86d7f5d3SJohn Marino WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16*86d7f5d3SJohn Marino or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
17*86d7f5d3SJohn Marino License for more details.
18*86d7f5d3SJohn Marino
19*86d7f5d3SJohn Marino You should have received a copy of the GNU Lesser General Public License
20*86d7f5d3SJohn Marino along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
21*86d7f5d3SJohn Marino
22*86d7f5d3SJohn Marino #include "gmp.h"
23*86d7f5d3SJohn Marino #include "gmp-impl.h"
24*86d7f5d3SJohn Marino #include "longlong.h"
25*86d7f5d3SJohn Marino
26*86d7f5d3SJohn Marino
27*86d7f5d3SJohn Marino #if GMP_NAIL_BITS == 0
28*86d7f5d3SJohn Marino
29*86d7f5d3SJohn Marino mp_limb_t
mpn_mul_1(mp_ptr rp,mp_srcptr up,mp_size_t n,mp_limb_t vl)30*86d7f5d3SJohn Marino mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
31*86d7f5d3SJohn Marino {
32*86d7f5d3SJohn Marino mp_limb_t ul, cl, hpl, lpl;
33*86d7f5d3SJohn Marino
34*86d7f5d3SJohn Marino ASSERT (n >= 1);
35*86d7f5d3SJohn Marino ASSERT (MPN_SAME_OR_INCR_P (rp, up, n));
36*86d7f5d3SJohn Marino
37*86d7f5d3SJohn Marino cl = 0;
38*86d7f5d3SJohn Marino do
39*86d7f5d3SJohn Marino {
40*86d7f5d3SJohn Marino ul = *up++;
41*86d7f5d3SJohn Marino umul_ppmm (hpl, lpl, ul, vl);
42*86d7f5d3SJohn Marino
43*86d7f5d3SJohn Marino lpl += cl;
44*86d7f5d3SJohn Marino cl = (lpl < cl) + hpl;
45*86d7f5d3SJohn Marino
46*86d7f5d3SJohn Marino *rp++ = lpl;
47*86d7f5d3SJohn Marino }
48*86d7f5d3SJohn Marino while (--n != 0);
49*86d7f5d3SJohn Marino
50*86d7f5d3SJohn Marino return cl;
51*86d7f5d3SJohn Marino }
52*86d7f5d3SJohn Marino
53*86d7f5d3SJohn Marino #endif
54*86d7f5d3SJohn Marino
55*86d7f5d3SJohn Marino #if GMP_NAIL_BITS >= 1
56*86d7f5d3SJohn Marino
57*86d7f5d3SJohn Marino mp_limb_t
mpn_mul_1(mp_ptr rp,mp_srcptr up,mp_size_t n,mp_limb_t vl)58*86d7f5d3SJohn Marino mpn_mul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
59*86d7f5d3SJohn Marino {
60*86d7f5d3SJohn Marino mp_limb_t shifted_vl, ul, lpl, hpl, prev_hpl, xw, cl, xl;
61*86d7f5d3SJohn Marino
62*86d7f5d3SJohn Marino ASSERT (n >= 1);
63*86d7f5d3SJohn Marino ASSERT (MPN_SAME_OR_INCR_P (rp, up, n));
64*86d7f5d3SJohn Marino ASSERT_MPN (up, n);
65*86d7f5d3SJohn Marino ASSERT_LIMB (vl);
66*86d7f5d3SJohn Marino
67*86d7f5d3SJohn Marino shifted_vl = vl << GMP_NAIL_BITS;
68*86d7f5d3SJohn Marino cl = 0;
69*86d7f5d3SJohn Marino prev_hpl = 0;
70*86d7f5d3SJohn Marino do
71*86d7f5d3SJohn Marino {
72*86d7f5d3SJohn Marino ul = *up++;
73*86d7f5d3SJohn Marino
74*86d7f5d3SJohn Marino umul_ppmm (hpl, lpl, ul, shifted_vl);
75*86d7f5d3SJohn Marino lpl >>= GMP_NAIL_BITS;
76*86d7f5d3SJohn Marino xw = prev_hpl + lpl + cl;
77*86d7f5d3SJohn Marino cl = xw >> GMP_NUMB_BITS;
78*86d7f5d3SJohn Marino xl = xw & GMP_NUMB_MASK;
79*86d7f5d3SJohn Marino *rp++ = xl;
80*86d7f5d3SJohn Marino prev_hpl = hpl;
81*86d7f5d3SJohn Marino }
82*86d7f5d3SJohn Marino while (--n != 0);
83*86d7f5d3SJohn Marino
84*86d7f5d3SJohn Marino return prev_hpl + cl;
85*86d7f5d3SJohn Marino }
86*86d7f5d3SJohn Marino
87*86d7f5d3SJohn Marino #endif
88