1 /* mpn_toom42_mul -- Multiply {ap,an} and {bp,bn} where an is nominally twice 2 as large as bn. Or more accurately, (3/2)bn < an < 4bn. 3 4 Contributed to the GNU project by Torbjorn Granlund. 5 Additional improvements by Marco Bodrato. 6 7 The idea of applying toom to unbalanced multiplication is due to Marco 8 Bodrato and Alberto Zanoni. 9 10 THE FUNCTION IN THIS FILE IS INTERNAL WITH A MUTABLE INTERFACE. IT IS ONLY 11 SAFE TO REACH IT THROUGH DOCUMENTED INTERFACES. IN FACT, IT IS ALMOST 12 GUARANTEED THAT IT WILL CHANGE OR DISAPPEAR IN A FUTURE GNU MP RELEASE. 13 14 Copyright 2006, 2007, 2008, 2012 Free Software Foundation, Inc. 15 16 This file is part of the GNU MP Library. 17 18 The GNU MP Library is free software; you can redistribute it and/or modify 19 it under the terms of the GNU Lesser General Public License as published by 20 the Free Software Foundation; either version 3 of the License, or (at your 21 option) any later version. 22 23 The GNU MP Library is distributed in the hope that it will be useful, but 24 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 25 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public 26 License for more details. 27 28 You should have received a copy of the GNU Lesser General Public License 29 along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */ 30 31 32 #include "gmp.h" 33 #include "gmp-impl.h" 34 35 /* Evaluate in: -1, 0, +1, +2, +inf 36 37 <-s-><--n--><--n--><--n--> 38 ___ ______ ______ ______ 39 |a3_|___a2_|___a1_|___a0_| 40 |_b1_|___b0_| 41 <-t--><--n--> 42 43 v0 = a0 * b0 # A(0)*B(0) 44 v1 = (a0+ a1+ a2+ a3)*(b0+ b1) # A(1)*B(1) ah <= 3 bh <= 1 45 vm1 = (a0- a1+ a2- a3)*(b0- b1) # A(-1)*B(-1) |ah| <= 1 bh = 0 46 v2 = (a0+2a1+4a2+8a3)*(b0+2b1) # A(2)*B(2) ah <= 14 bh <= 2 47 vinf= a3 * b1 # A(inf)*B(inf) 48 */ 49 50 #define TOOM42_MUL_N_REC(p, a, b, n, ws) \ 51 do { \ 52 mpn_mul_n (p, a, b, n); \ 53 } while (0) 54 55 void 56 mpn_toom42_mul (mp_ptr pp, 57 mp_srcptr ap, mp_size_t an, 58 mp_srcptr bp, mp_size_t bn, 59 mp_ptr scratch) 60 { 61 mp_size_t n, s, t; 62 int vm1_neg; 63 mp_limb_t cy, vinf0; 64 mp_ptr a0_a2; 65 mp_ptr as1, asm1, as2; 66 mp_ptr bs1, bsm1, bs2; 67 TMP_DECL; 68 69 #define a0 ap 70 #define a1 (ap + n) 71 #define a2 (ap + 2*n) 72 #define a3 (ap + 3*n) 73 #define b0 bp 74 #define b1 (bp + n) 75 76 n = an >= 2 * bn ? (an + 3) >> 2 : (bn + 1) >> 1; 77 78 s = an - 3 * n; 79 t = bn - n; 80 81 ASSERT (0 < s && s <= n); 82 ASSERT (0 < t && t <= n); 83 84 TMP_MARK; 85 86 as1 = TMP_SALLOC_LIMBS (n + 1); 87 asm1 = TMP_SALLOC_LIMBS (n + 1); 88 as2 = TMP_SALLOC_LIMBS (n + 1); 89 90 bs1 = TMP_SALLOC_LIMBS (n + 1); 91 bsm1 = TMP_SALLOC_LIMBS (n); 92 bs2 = TMP_SALLOC_LIMBS (n + 1); 93 94 a0_a2 = pp; 95 96 /* Compute as1 and asm1. */ 97 vm1_neg = mpn_toom_eval_dgr3_pm1 (as1, asm1, ap, n, s, a0_a2) & 1; 98 99 /* Compute as2. */ 100 #if HAVE_NATIVE_mpn_addlsh1_n 101 cy = mpn_addlsh1_n (as2, a2, a3, s); 102 if (s != n) 103 cy = mpn_add_1 (as2 + s, a2 + s, n - s, cy); 104 cy = 2 * cy + mpn_addlsh1_n (as2, a1, as2, n); 105 cy = 2 * cy + mpn_addlsh1_n (as2, a0, as2, n); 106 #else 107 cy = mpn_lshift (as2, a3, s, 1); 108 cy += mpn_add_n (as2, a2, as2, s); 109 if (s != n) 110 cy = mpn_add_1 (as2 + s, a2 + s, n - s, cy); 111 cy = 2 * cy + mpn_lshift (as2, as2, n, 1); 112 cy += mpn_add_n (as2, a1, as2, n); 113 cy = 2 * cy + mpn_lshift (as2, as2, n, 1); 114 cy += mpn_add_n (as2, a0, as2, n); 115 #endif 116 as2[n] = cy; 117 118 /* Compute bs1 and bsm1. */ 119 if (t == n) 120 { 121 #if HAVE_NATIVE_mpn_add_n_sub_n 122 if (mpn_cmp (b0, b1, n) < 0) 123 { 124 cy = mpn_add_n_sub_n (bs1, bsm1, b1, b0, n); 125 vm1_neg ^= 1; 126 } 127 else 128 { 129 cy = mpn_add_n_sub_n (bs1, bsm1, b0, b1, n); 130 } 131 bs1[n] = cy >> 1; 132 #else 133 bs1[n] = mpn_add_n (bs1, b0, b1, n); 134 135 if (mpn_cmp (b0, b1, n) < 0) 136 { 137 mpn_sub_n (bsm1, b1, b0, n); 138 vm1_neg ^= 1; 139 } 140 else 141 { 142 mpn_sub_n (bsm1, b0, b1, n); 143 } 144 #endif 145 } 146 else 147 { 148 bs1[n] = mpn_add (bs1, b0, n, b1, t); 149 150 if (mpn_zero_p (b0 + t, n - t) && mpn_cmp (b0, b1, t) < 0) 151 { 152 mpn_sub_n (bsm1, b1, b0, t); 153 MPN_ZERO (bsm1 + t, n - t); 154 vm1_neg ^= 1; 155 } 156 else 157 { 158 mpn_sub (bsm1, b0, n, b1, t); 159 } 160 } 161 162 /* Compute bs2, recycling bs1. bs2=bs1+b1 */ 163 mpn_add (bs2, bs1, n + 1, b1, t); 164 165 ASSERT (as1[n] <= 3); 166 ASSERT (bs1[n] <= 1); 167 ASSERT (asm1[n] <= 1); 168 /*ASSERT (bsm1[n] == 0);*/ 169 ASSERT (as2[n] <= 14); 170 ASSERT (bs2[n] <= 2); 171 172 #define v0 pp /* 2n */ 173 #define v1 (pp + 2 * n) /* 2n+1 */ 174 #define vinf (pp + 4 * n) /* s+t */ 175 #define vm1 scratch /* 2n+1 */ 176 #define v2 (scratch + 2 * n + 1) /* 2n+2 */ 177 #define scratch_out scratch + 4 * n + 4 /* Currently unused. */ 178 179 /* vm1, 2n+1 limbs */ 180 TOOM42_MUL_N_REC (vm1, asm1, bsm1, n, scratch_out); 181 cy = 0; 182 if (asm1[n] != 0) 183 cy = mpn_add_n (vm1 + n, vm1 + n, bsm1, n); 184 vm1[2 * n] = cy; 185 186 TOOM42_MUL_N_REC (v2, as2, bs2, n + 1, scratch_out); /* v2, 2n+1 limbs */ 187 188 /* vinf, s+t limbs */ 189 if (s > t) mpn_mul (vinf, a3, s, b1, t); 190 else mpn_mul (vinf, b1, t, a3, s); 191 192 vinf0 = vinf[0]; /* v1 overlaps with this */ 193 194 /* v1, 2n+1 limbs */ 195 TOOM42_MUL_N_REC (v1, as1, bs1, n, scratch_out); 196 if (as1[n] == 1) 197 { 198 cy = bs1[n] + mpn_add_n (v1 + n, v1 + n, bs1, n); 199 } 200 else if (as1[n] == 2) 201 { 202 #if HAVE_NATIVE_mpn_addlsh1_n 203 cy = 2 * bs1[n] + mpn_addlsh1_n (v1 + n, v1 + n, bs1, n); 204 #else 205 cy = 2 * bs1[n] + mpn_addmul_1 (v1 + n, bs1, n, CNST_LIMB(2)); 206 #endif 207 } 208 else if (as1[n] == 3) 209 { 210 cy = 3 * bs1[n] + mpn_addmul_1 (v1 + n, bs1, n, CNST_LIMB(3)); 211 } 212 else 213 cy = 0; 214 if (bs1[n] != 0) 215 cy += mpn_add_n (v1 + n, v1 + n, as1, n); 216 v1[2 * n] = cy; 217 218 TOOM42_MUL_N_REC (v0, ap, bp, n, scratch_out); /* v0, 2n limbs */ 219 220 mpn_toom_interpolate_5pts (pp, v2, vm1, n, s + t, vm1_neg, vinf0); 221 222 TMP_FREE; 223 } 224