1 /* mpn_toom42_mul -- Multiply {ap,an} and {bp,bn} where an is nominally twice
2 as large as bn. Or more accurately, (3/2)bn < an < 4bn.
3
4 Contributed to the GNU project by Torbjorn Granlund.
5 Additional improvements by Marco Bodrato.
6
7 The idea of applying toom to unbalanced multiplication is due to Marco
8 Bodrato and Alberto Zanoni.
9
10 THE FUNCTION IN THIS FILE IS INTERNAL WITH A MUTABLE INTERFACE. IT IS ONLY
11 SAFE TO REACH IT THROUGH DOCUMENTED INTERFACES. IN FACT, IT IS ALMOST
12 GUARANTEED THAT IT WILL CHANGE OR DISAPPEAR IN A FUTURE GNU MP RELEASE.
13
14 Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
15
16 This file is part of the GNU MP Library.
17
18 The GNU MP Library is free software; you can redistribute it and/or modify
19 it under the terms of the GNU Lesser General Public License as published by
20 the Free Software Foundation; either version 3 of the License, or (at your
21 option) any later version.
22
23 The GNU MP Library is distributed in the hope that it will be useful, but
24 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
25 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
26 License for more details.
27
28 You should have received a copy of the GNU Lesser General Public License
29 along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */
30
31
32 #include "gmp.h"
33 #include "gmp-impl.h"
34
35 /* Evaluate in: -1, 0, +1, +2, +inf
36
37 <-s-><--n--><--n--><--n-->
38 ___ ______ ______ ______
39 |a3_|___a2_|___a1_|___a0_|
40 |_b1_|___b0_|
41 <-t--><--n-->
42
43 v0 = a0 * b0 # A(0)*B(0)
44 v1 = (a0+ a1+ a2+ a3)*(b0+ b1) # A(1)*B(1) ah <= 3 bh <= 1
45 vm1 = (a0- a1+ a2- a3)*(b0- b1) # A(-1)*B(-1) |ah| <= 1 bh = 0
46 v2 = (a0+2a1+4a2+8a3)*(b0+2b1) # A(2)*B(2) ah <= 14 bh <= 2
47 vinf= a3 * b1 # A(inf)*B(inf)
48 */
49
50 #define TOOM42_MUL_N_REC(p, a, b, n, ws) \
51 do { \
52 mpn_mul_n (p, a, b, n); \
53 } while (0)
54
55 void
mpn_toom42_mul(mp_ptr pp,mp_srcptr ap,mp_size_t an,mp_srcptr bp,mp_size_t bn,mp_ptr scratch)56 mpn_toom42_mul (mp_ptr pp,
57 mp_srcptr ap, mp_size_t an,
58 mp_srcptr bp, mp_size_t bn,
59 mp_ptr scratch)
60 {
61 mp_size_t n, s, t;
62 int vm1_neg;
63 mp_limb_t cy, vinf0;
64 mp_ptr a0_a2, a1_a3;
65 mp_ptr as1, asm1, as2;
66 mp_ptr bs1, bsm1, bs2;
67 TMP_DECL;
68
69 #define a0 ap
70 #define a1 (ap + n)
71 #define a2 (ap + 2*n)
72 #define a3 (ap + 3*n)
73 #define b0 bp
74 #define b1 (bp + n)
75
76 n = an >= 2 * bn ? (an + 3) >> 2 : (bn + 1) >> 1;
77
78 s = an - 3 * n;
79 t = bn - n;
80
81 ASSERT (0 < s && s <= n);
82 ASSERT (0 < t && t <= n);
83
84 TMP_MARK;
85
86 as1 = TMP_SALLOC_LIMBS (n + 1);
87 asm1 = TMP_SALLOC_LIMBS (n + 1);
88 as2 = TMP_SALLOC_LIMBS (n + 1);
89
90 bs1 = TMP_SALLOC_LIMBS (n + 1);
91 bsm1 = TMP_SALLOC_LIMBS (n);
92 bs2 = TMP_SALLOC_LIMBS (n + 1);
93
94 a0_a2 = pp;
95 a1_a3 = pp + n + 1;
96
97 /* Compute as1 and asm1. */
98 vm1_neg = mpn_toom_eval_dgr3_pm1 (as1, asm1, ap, n, s, a0_a2) & 1;
99
100 /* Compute as2. */
101 #if HAVE_NATIVE_mpn_addlsh1_n
102 cy = mpn_addlsh1_n (as2, a2, a3, s);
103 if (s != n)
104 cy = mpn_add_1 (as2 + s, a2 + s, n - s, cy);
105 cy = 2 * cy + mpn_addlsh1_n (as2, a1, as2, n);
106 cy = 2 * cy + mpn_addlsh1_n (as2, a0, as2, n);
107 #else
108 cy = mpn_lshift (as2, a3, s, 1);
109 cy += mpn_add_n (as2, a2, as2, s);
110 if (s != n)
111 cy = mpn_add_1 (as2 + s, a2 + s, n - s, cy);
112 cy = 2 * cy + mpn_lshift (as2, as2, n, 1);
113 cy += mpn_add_n (as2, a1, as2, n);
114 cy = 2 * cy + mpn_lshift (as2, as2, n, 1);
115 cy += mpn_add_n (as2, a0, as2, n);
116 #endif
117 as2[n] = cy;
118
119 /* Compute bs1 and bsm1. */
120 if (t == n)
121 {
122 #if HAVE_NATIVE_mpn_add_n_sub_n
123 if (mpn_cmp (b0, b1, n) < 0)
124 {
125 cy = mpn_add_n_sub_n (bs1, bsm1, b1, b0, n);
126 vm1_neg ^= 1;
127 }
128 else
129 {
130 cy = mpn_add_n_sub_n (bs1, bsm1, b0, b1, n);
131 }
132 bs1[n] = cy >> 1;
133 #else
134 bs1[n] = mpn_add_n (bs1, b0, b1, n);
135
136 if (mpn_cmp (b0, b1, n) < 0)
137 {
138 mpn_sub_n (bsm1, b1, b0, n);
139 vm1_neg ^= 1;
140 }
141 else
142 {
143 mpn_sub_n (bsm1, b0, b1, n);
144 }
145 #endif
146 }
147 else
148 {
149 bs1[n] = mpn_add (bs1, b0, n, b1, t);
150
151 if (mpn_zero_p (b0 + t, n - t) && mpn_cmp (b0, b1, t) < 0)
152 {
153 mpn_sub_n (bsm1, b1, b0, t);
154 MPN_ZERO (bsm1 + t, n - t);
155 vm1_neg ^= 1;
156 }
157 else
158 {
159 mpn_sub (bsm1, b0, n, b1, t);
160 }
161 }
162
163 /* Compute bs2, recycling bs1. bs2=bs1+b1 */
164 mpn_add (bs2, bs1, n + 1, b1, t);
165
166 ASSERT (as1[n] <= 3);
167 ASSERT (bs1[n] <= 1);
168 ASSERT (asm1[n] <= 1);
169 /*ASSERT (bsm1[n] == 0);*/
170 ASSERT (as2[n] <= 14);
171 ASSERT (bs2[n] <= 2);
172
173 #define v0 pp /* 2n */
174 #define v1 (pp + 2 * n) /* 2n+1 */
175 #define vinf (pp + 4 * n) /* s+t */
176 #define vm1 scratch /* 2n+1 */
177 #define v2 (scratch + 2 * n + 1) /* 2n+2 */
178 #define scratch_out scratch + 4 * n + 4 /* Currently unused. */
179
180 /* vm1, 2n+1 limbs */
181 TOOM42_MUL_N_REC (vm1, asm1, bsm1, n, scratch_out);
182 cy = 0;
183 if (asm1[n] != 0)
184 cy = mpn_add_n (vm1 + n, vm1 + n, bsm1, n);
185 vm1[2 * n] = cy;
186
187 TOOM42_MUL_N_REC (v2, as2, bs2, n + 1, scratch_out); /* v2, 2n+1 limbs */
188
189 /* vinf, s+t limbs */
190 if (s > t) mpn_mul (vinf, a3, s, b1, t);
191 else mpn_mul (vinf, b1, t, a3, s);
192
193 vinf0 = vinf[0]; /* v1 overlaps with this */
194
195 /* v1, 2n+1 limbs */
196 TOOM42_MUL_N_REC (v1, as1, bs1, n, scratch_out);
197 if (as1[n] == 1)
198 {
199 cy = bs1[n] + mpn_add_n (v1 + n, v1 + n, bs1, n);
200 }
201 else if (as1[n] == 2)
202 {
203 #if HAVE_NATIVE_mpn_addlsh1_n
204 cy = 2 * bs1[n] + mpn_addlsh1_n (v1 + n, v1 + n, bs1, n);
205 #else
206 cy = 2 * bs1[n] + mpn_addmul_1 (v1 + n, bs1, n, CNST_LIMB(2));
207 #endif
208 }
209 else if (as1[n] == 3)
210 {
211 cy = 3 * bs1[n] + mpn_addmul_1 (v1 + n, bs1, n, CNST_LIMB(3));
212 }
213 else
214 cy = 0;
215 if (bs1[n] != 0)
216 cy += mpn_add_n (v1 + n, v1 + n, as1, n);
217 v1[2 * n] = cy;
218
219 TOOM42_MUL_N_REC (v0, ap, bp, n, scratch_out); /* v0, 2n limbs */
220
221 mpn_toom_interpolate_5pts (pp, v2, vm1, n, s + t, vm1_neg, vinf0);
222
223 TMP_FREE;
224 }
225