1 /* mpn_addmul_1 -- multiply the N long limb vector pointed to by UP by VL,
2 add the N least significant limbs of the product to the limb vector
3 pointed to by RP. Return the most significant limb of the product,
4 adjusted for carry-out from the addition.
5
6 Copyright 1992-1994, 1996, 2000, 2002, 2004, 2016 Free Software Foundation,
7 Inc.
8
9 This file is part of the GNU MP Library.
10
11 The GNU MP Library is free software; you can redistribute it and/or modify
12 it under the terms of either:
13
14 * the GNU Lesser General Public License as published by the Free
15 Software Foundation; either version 3 of the License, or (at your
16 option) any later version.
17
18 or
19
20 * the GNU General Public License as published by the Free Software
21 Foundation; either version 2 of the License, or (at your option) any
22 later version.
23
24 or both in parallel, as here.
25
26 The GNU MP Library is distributed in the hope that it will be useful, but
27 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
28 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
29 for more details.
30
31 You should have received copies of the GNU General Public License and the
32 GNU Lesser General Public License along with the GNU MP Library. If not,
33 see https://www.gnu.org/licenses/. */
34
35 #include "gmp-impl.h"
36 #include "longlong.h"
37
38
39 #if GMP_NAIL_BITS == 0
40
41 mp_limb_t
mpn_addmul_1(mp_ptr rp,mp_srcptr up,mp_size_t n,mp_limb_t v0)42 mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t v0)
43 {
44 mp_limb_t u0, crec, c, p1, p0, r0;
45
46 ASSERT (n >= 1);
47 ASSERT (MPN_SAME_OR_SEPARATE_P (rp, up, n));
48
49 crec = 0;
50 do
51 {
52 u0 = *up++;
53 umul_ppmm (p1, p0, u0, v0);
54
55 r0 = *rp;
56
57 p0 = r0 + p0;
58 c = r0 > p0;
59
60 p1 = p1 + c;
61
62 r0 = p0 + crec; /* cycle 0, 3, ... */
63 c = p0 > r0; /* cycle 1, 4, ... */
64
65 crec = p1 + c; /* cycle 2, 5, ... */
66
67 *rp++ = r0;
68 }
69 while (--n != 0);
70
71 return crec;
72 }
73
74 #endif
75
76 #if GMP_NAIL_BITS == 1
77
78 mp_limb_t
mpn_addmul_1(mp_ptr rp,mp_srcptr up,mp_size_t n,mp_limb_t v0)79 mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t v0)
80 {
81 mp_limb_t shifted_v0, u0, r0, p0, p1, prev_p1, crec, xl, c1, c2, c3;
82
83 ASSERT (n >= 1);
84 ASSERT (MPN_SAME_OR_SEPARATE_P (rp, up, n));
85 ASSERT_MPN (rp, n);
86 ASSERT_MPN (up, n);
87 ASSERT_LIMB (v0);
88
89 shifted_v0 = v0 << GMP_NAIL_BITS;
90 crec = 0;
91 prev_p1 = 0;
92 do
93 {
94 u0 = *up++;
95 r0 = *rp;
96 umul_ppmm (p1, p0, u0, shifted_v0);
97 p0 >>= GMP_NAIL_BITS;
98 ADDC_LIMB (c1, xl, prev_p1, p0);
99 ADDC_LIMB (c2, xl, xl, r0);
100 ADDC_LIMB (c3, xl, xl, crec);
101 crec = c1 + c2 + c3;
102 *rp++ = xl;
103 prev_p1 = p1;
104 }
105 while (--n != 0);
106
107 return prev_p1 + crec;
108 }
109
110 #endif
111
112 #if GMP_NAIL_BITS >= 2
113
114 mp_limb_t
mpn_addmul_1(mp_ptr rp,mp_srcptr up,mp_size_t n,mp_limb_t v0)115 mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t v0)
116 {
117 mp_limb_t shifted_v0, u0, r0, p0, p1, prev_p1, xw, crec, xl;
118
119 ASSERT (n >= 1);
120 ASSERT (MPN_SAME_OR_SEPARATE_P (rp, up, n));
121 ASSERT_MPN (rp, n);
122 ASSERT_MPN (up, n);
123 ASSERT_LIMB (v0);
124
125 shifted_v0 = v0 << GMP_NAIL_BITS;
126 crec = 0;
127 prev_p1 = 0;
128 do
129 {
130 u0 = *up++;
131 r0 = *rp;
132 umul_ppmm (p1, p0, u0, shifted_v0);
133 p0 >>= GMP_NAIL_BITS;
134 xw = prev_p1 + p0 + r0 + crec;
135 crec = xw >> GMP_NUMB_BITS;
136 xl = xw & GMP_NUMB_MASK;
137 *rp++ = xl;
138 prev_p1 = p1;
139 }
140 while (--n != 0);
141
142 return prev_p1 + crec;
143 }
144
145 #endif
146