xref: /dflybsd-src/contrib/gmp/mpn/generic/toom43_mul.c (revision 86d7f5d305c6adaa56ff4582ece9859d73106103)
1*86d7f5d3SJohn Marino /* mpn_toom43_mul -- Multiply {ap,an} and {bp,bn} where an is nominally 4/3
2*86d7f5d3SJohn Marino    times as large as bn.  Or more accurately, bn < an < 2 bn.
3*86d7f5d3SJohn Marino 
4*86d7f5d3SJohn Marino    Contributed to the GNU project by Marco Bodrato.
5*86d7f5d3SJohn Marino 
6*86d7f5d3SJohn Marino    The idea of applying toom to unbalanced multiplication is due to Marco
7*86d7f5d3SJohn Marino    Bodrato and Alberto Zanoni.
8*86d7f5d3SJohn Marino 
9*86d7f5d3SJohn Marino    THE FUNCTION IN THIS FILE IS INTERNAL WITH A MUTABLE INTERFACE.  IT IS ONLY
10*86d7f5d3SJohn Marino    SAFE TO REACH IT THROUGH DOCUMENTED INTERFACES.  IN FACT, IT IS ALMOST
11*86d7f5d3SJohn Marino    GUARANTEED THAT IT WILL CHANGE OR DISAPPEAR IN A FUTURE GNU MP RELEASE.
12*86d7f5d3SJohn Marino 
13*86d7f5d3SJohn Marino Copyright 2009 Free Software Foundation, Inc.
14*86d7f5d3SJohn Marino 
15*86d7f5d3SJohn Marino This file is part of the GNU MP Library.
16*86d7f5d3SJohn Marino 
17*86d7f5d3SJohn Marino The GNU MP Library is free software; you can redistribute it and/or modify
18*86d7f5d3SJohn Marino it under the terms of the GNU Lesser General Public License as published by
19*86d7f5d3SJohn Marino the Free Software Foundation; either version 3 of the License, or (at your
20*86d7f5d3SJohn Marino option) any later version.
21*86d7f5d3SJohn Marino 
22*86d7f5d3SJohn Marino The GNU MP Library is distributed in the hope that it will be useful, but
23*86d7f5d3SJohn Marino WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
24*86d7f5d3SJohn Marino or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
25*86d7f5d3SJohn Marino License for more details.
26*86d7f5d3SJohn Marino 
27*86d7f5d3SJohn Marino You should have received a copy of the GNU Lesser General Public License
28*86d7f5d3SJohn Marino along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.  */
29*86d7f5d3SJohn Marino 
30*86d7f5d3SJohn Marino 
31*86d7f5d3SJohn Marino #include "gmp.h"
32*86d7f5d3SJohn Marino #include "gmp-impl.h"
33*86d7f5d3SJohn Marino 
34*86d7f5d3SJohn Marino /* Evaluate in: -2, -1, 0, +1, +2, +inf
35*86d7f5d3SJohn Marino 
36*86d7f5d3SJohn Marino   <-s-><--n--><--n--><--n-->
37*86d7f5d3SJohn Marino    ___ ______ ______ ______
38*86d7f5d3SJohn Marino   |a3_|___a2_|___a1_|___a0_|
39*86d7f5d3SJohn Marino 	|_b2_|___b1_|___b0_|
40*86d7f5d3SJohn Marino 	<-t--><--n--><--n-->
41*86d7f5d3SJohn Marino 
42*86d7f5d3SJohn Marino   v0  =  a0             * b0          #   A(0)*B(0)
43*86d7f5d3SJohn Marino   v1  = (a0+ a1+ a2+ a3)*(b0+ b1+ b2) #   A(1)*B(1)      ah  <= 3  bh <= 2
44*86d7f5d3SJohn Marino   vm1 = (a0- a1+ a2- a3)*(b0- b1+ b2) #  A(-1)*B(-1)    |ah| <= 1 |bh|<= 1
45*86d7f5d3SJohn Marino   v2  = (a0+2a1+4a2+8a3)*(b0+2b1+4b2) #   A(2)*B(2)      ah  <= 14 bh <= 6
46*86d7f5d3SJohn Marino   vm2 = (a0-2a1+4a2-8a3)*(b0-2b1+4b2) #  A(-2)*B(-2)    |ah| <= 9 |bh|<= 4
47*86d7f5d3SJohn Marino   vinf=              a3 *         b2  # A(inf)*B(inf)
48*86d7f5d3SJohn Marino */
49*86d7f5d3SJohn Marino 
50*86d7f5d3SJohn Marino void
mpn_toom43_mul(mp_ptr pp,mp_srcptr ap,mp_size_t an,mp_srcptr bp,mp_size_t bn,mp_ptr scratch)51*86d7f5d3SJohn Marino mpn_toom43_mul (mp_ptr pp,
52*86d7f5d3SJohn Marino 		mp_srcptr ap, mp_size_t an,
53*86d7f5d3SJohn Marino 		mp_srcptr bp, mp_size_t bn, mp_ptr scratch)
54*86d7f5d3SJohn Marino {
55*86d7f5d3SJohn Marino   mp_size_t n, s, t;
56*86d7f5d3SJohn Marino   enum toom6_flags flags;
57*86d7f5d3SJohn Marino   mp_limb_t cy;
58*86d7f5d3SJohn Marino 
59*86d7f5d3SJohn Marino #define a0  ap
60*86d7f5d3SJohn Marino #define a1  (ap + n)
61*86d7f5d3SJohn Marino #define a2  (ap + 2 * n)
62*86d7f5d3SJohn Marino #define a3  (ap + 3 * n)
63*86d7f5d3SJohn Marino #define b0  bp
64*86d7f5d3SJohn Marino #define b1  (bp + n)
65*86d7f5d3SJohn Marino #define b2  (bp + 2 * n)
66*86d7f5d3SJohn Marino 
67*86d7f5d3SJohn Marino   n = 1 + (3 * an >= 4 * bn ? (an - 1) >> 2 : (bn - 1) / (size_t) 3);
68*86d7f5d3SJohn Marino 
69*86d7f5d3SJohn Marino   s = an - 3 * n;
70*86d7f5d3SJohn Marino   t = bn - 2 * n;
71*86d7f5d3SJohn Marino 
72*86d7f5d3SJohn Marino   ASSERT (0 < s && s <= n);
73*86d7f5d3SJohn Marino   ASSERT (0 < t && t <= n);
74*86d7f5d3SJohn Marino 
75*86d7f5d3SJohn Marino   /* This is true whenever an >= 25 or bn >= 19, I think. It
76*86d7f5d3SJohn Marino      guarantees that we can fit 5 values of size n+1 in the product
77*86d7f5d3SJohn Marino      area. */
78*86d7f5d3SJohn Marino   ASSERT (s+t >= 5);
79*86d7f5d3SJohn Marino 
80*86d7f5d3SJohn Marino #define v0    pp				/* 2n */
81*86d7f5d3SJohn Marino #define vm1   (scratch)				/* 2n+1 */
82*86d7f5d3SJohn Marino #define v1    (pp + 2*n)			/* 2n+1 */
83*86d7f5d3SJohn Marino #define vm2   (scratch + 2 * n + 1)		/* 2n+1 */
84*86d7f5d3SJohn Marino #define v2    (scratch + 4 * n + 2)		/* 2n+1 */
85*86d7f5d3SJohn Marino #define vinf  (pp + 5 * n)			/* s+t */
86*86d7f5d3SJohn Marino #define bs1    pp				/* n+1 */
87*86d7f5d3SJohn Marino #define bsm1  (scratch + 2 * n + 2)		/* n+1 */
88*86d7f5d3SJohn Marino #define asm1  (scratch + 3 * n + 3)		/* n+1 */
89*86d7f5d3SJohn Marino #define asm2  (scratch + 4 * n + 4)		/* n+1 */
90*86d7f5d3SJohn Marino #define bsm2  (pp + n + 1)			/* n+1 */
91*86d7f5d3SJohn Marino #define bs2   (pp + 2 * n + 2)			/* n+1 */
92*86d7f5d3SJohn Marino #define as2   (pp + 3 * n + 3)			/* n+1 */
93*86d7f5d3SJohn Marino #define as1   (pp + 4 * n + 4)			/* n+1 */
94*86d7f5d3SJohn Marino 
95*86d7f5d3SJohn Marino   /* Total sccratch need is 6 * n + 3 + 1; we allocate one extra
96*86d7f5d3SJohn Marino      limb, because products will overwrite 2n+2 limbs. */
97*86d7f5d3SJohn Marino 
98*86d7f5d3SJohn Marino #define a0a2  scratch
99*86d7f5d3SJohn Marino #define b0b2  scratch
100*86d7f5d3SJohn Marino #define a1a3  asm1
101*86d7f5d3SJohn Marino #define b1d   bsm1
102*86d7f5d3SJohn Marino 
103*86d7f5d3SJohn Marino   /* Compute as2 and asm2.  */
104*86d7f5d3SJohn Marino   flags = toom6_vm2_neg & mpn_toom_eval_dgr3_pm2 (as2, asm2, ap, n, s, a1a3);
105*86d7f5d3SJohn Marino 
106*86d7f5d3SJohn Marino   /* Compute bs2 and bsm2.  */
107*86d7f5d3SJohn Marino   b1d[n] = mpn_lshift (b1d, b1, n, 1);			/*       2b1      */
108*86d7f5d3SJohn Marino   cy  = mpn_lshift (b0b2, b2, t, 2);			/*  4b2           */
109*86d7f5d3SJohn Marino   cy += mpn_add_n (b0b2, b0b2, b0, t);			/*  4b2      + b0 */
110*86d7f5d3SJohn Marino   if (t != n)
111*86d7f5d3SJohn Marino     cy = mpn_add_1 (b0b2 + t, b0 + t, n - t, cy);
112*86d7f5d3SJohn Marino   b0b2[n] = cy;
113*86d7f5d3SJohn Marino 
114*86d7f5d3SJohn Marino #if HAVE_NATIVE_mpn_add_n_sub_n
115*86d7f5d3SJohn Marino   if (mpn_cmp (b0b2, b1d, n+1) < 0)
116*86d7f5d3SJohn Marino     {
117*86d7f5d3SJohn Marino       mpn_add_n_sub_n (bs2, bsm2, b1d, b0b2, n+1);
118*86d7f5d3SJohn Marino       flags ^= toom6_vm2_neg;
119*86d7f5d3SJohn Marino     }
120*86d7f5d3SJohn Marino   else
121*86d7f5d3SJohn Marino     {
122*86d7f5d3SJohn Marino       mpn_add_n_sub_n (bs2, bsm2, b0b2, b1d, n+1);
123*86d7f5d3SJohn Marino     }
124*86d7f5d3SJohn Marino #else
125*86d7f5d3SJohn Marino   mpn_add_n (bs2, b0b2, b1d, n+1);
126*86d7f5d3SJohn Marino   if (mpn_cmp (b0b2, b1d, n+1) < 0)
127*86d7f5d3SJohn Marino     {
128*86d7f5d3SJohn Marino       mpn_sub_n (bsm2, b1d, b0b2, n+1);
129*86d7f5d3SJohn Marino       flags ^= toom6_vm2_neg;
130*86d7f5d3SJohn Marino     }
131*86d7f5d3SJohn Marino   else
132*86d7f5d3SJohn Marino     {
133*86d7f5d3SJohn Marino       mpn_sub_n (bsm2, b0b2, b1d, n+1);
134*86d7f5d3SJohn Marino     }
135*86d7f5d3SJohn Marino #endif
136*86d7f5d3SJohn Marino 
137*86d7f5d3SJohn Marino   /* Compute as1 and asm1.  */
138*86d7f5d3SJohn Marino   flags ^= toom6_vm1_neg & mpn_toom_eval_dgr3_pm1 (as1, asm1, ap, n, s, a0a2);
139*86d7f5d3SJohn Marino 
140*86d7f5d3SJohn Marino   /* Compute bs1 and bsm1.  */
141*86d7f5d3SJohn Marino   bsm1[n] = mpn_add (bsm1, b0, n, b2, t);
142*86d7f5d3SJohn Marino #if HAVE_NATIVE_mpn_add_n_sub_n
143*86d7f5d3SJohn Marino   if (bsm1[n] == 0 && mpn_cmp (bsm1, b1, n) < 0)
144*86d7f5d3SJohn Marino     {
145*86d7f5d3SJohn Marino       cy = mpn_add_n_sub_n (bs1, bsm1, b1, bsm1, n);
146*86d7f5d3SJohn Marino       bs1[n] = cy >> 1;
147*86d7f5d3SJohn Marino       flags ^= toom6_vm1_neg;
148*86d7f5d3SJohn Marino     }
149*86d7f5d3SJohn Marino   else
150*86d7f5d3SJohn Marino     {
151*86d7f5d3SJohn Marino       cy = mpn_add_n_sub_n (bs1, bsm1, bsm1, b1, n);
152*86d7f5d3SJohn Marino       bs1[n] = bsm1[n] + (cy >> 1);
153*86d7f5d3SJohn Marino       bsm1[n]-= cy & 1;
154*86d7f5d3SJohn Marino     }
155*86d7f5d3SJohn Marino #else
156*86d7f5d3SJohn Marino   bs1[n] = bsm1[n] + mpn_add_n (bs1, bsm1, b1, n);
157*86d7f5d3SJohn Marino   if (bsm1[n] == 0 && mpn_cmp (bsm1, b1, n) < 0)
158*86d7f5d3SJohn Marino     {
159*86d7f5d3SJohn Marino       mpn_sub_n (bsm1, b1, bsm1, n);
160*86d7f5d3SJohn Marino       flags ^= toom6_vm1_neg;
161*86d7f5d3SJohn Marino     }
162*86d7f5d3SJohn Marino   else
163*86d7f5d3SJohn Marino     {
164*86d7f5d3SJohn Marino       bsm1[n] -= mpn_sub_n (bsm1, bsm1, b1, n);
165*86d7f5d3SJohn Marino     }
166*86d7f5d3SJohn Marino #endif
167*86d7f5d3SJohn Marino 
168*86d7f5d3SJohn Marino   ASSERT (as1[n] <= 3);
169*86d7f5d3SJohn Marino   ASSERT (bs1[n] <= 2);
170*86d7f5d3SJohn Marino   ASSERT (asm1[n] <= 1);
171*86d7f5d3SJohn Marino   ASSERT (bsm1[n] <= 1);
172*86d7f5d3SJohn Marino   ASSERT (as2[n] <=14);
173*86d7f5d3SJohn Marino   ASSERT (bs2[n] <= 6);
174*86d7f5d3SJohn Marino   ASSERT (asm2[n] <= 9);
175*86d7f5d3SJohn Marino   ASSERT (bsm2[n] <= 4);
176*86d7f5d3SJohn Marino 
177*86d7f5d3SJohn Marino   /* vm1, 2n+1 limbs */
178*86d7f5d3SJohn Marino   mpn_mul_n (vm1, asm1, bsm1, n+1);  /* W4 */
179*86d7f5d3SJohn Marino 
180*86d7f5d3SJohn Marino   /* vm2, 2n+1 limbs */
181*86d7f5d3SJohn Marino   mpn_mul_n (vm2, asm2, bsm2, n+1);  /* W2 */
182*86d7f5d3SJohn Marino 
183*86d7f5d3SJohn Marino   /* v2, 2n+1 limbs */
184*86d7f5d3SJohn Marino   mpn_mul_n (v2, as2, bs2, n+1);  /* W1 */
185*86d7f5d3SJohn Marino 
186*86d7f5d3SJohn Marino   /* v1, 2n+1 limbs */
187*86d7f5d3SJohn Marino   mpn_mul_n (v1, as1, bs1, n+1);  /* W3 */
188*86d7f5d3SJohn Marino 
189*86d7f5d3SJohn Marino   /* vinf, s+t limbs */   /* W0 */
190*86d7f5d3SJohn Marino   if (s > t)  mpn_mul (vinf, a3, s, b2, t);
191*86d7f5d3SJohn Marino   else        mpn_mul (vinf, b2, t, a3, s);
192*86d7f5d3SJohn Marino 
193*86d7f5d3SJohn Marino   /* v0, 2n limbs */
194*86d7f5d3SJohn Marino   mpn_mul_n (v0, ap, bp, n);  /* W5 */
195*86d7f5d3SJohn Marino 
196*86d7f5d3SJohn Marino   mpn_toom_interpolate_6pts (pp, n, flags, vm1, vm2, v2, t + s);
197*86d7f5d3SJohn Marino 
198*86d7f5d3SJohn Marino #undef v0
199*86d7f5d3SJohn Marino #undef vm1
200*86d7f5d3SJohn Marino #undef v1
201*86d7f5d3SJohn Marino #undef vm2
202*86d7f5d3SJohn Marino #undef v2
203*86d7f5d3SJohn Marino #undef vinf
204*86d7f5d3SJohn Marino #undef bs1
205*86d7f5d3SJohn Marino #undef bs2
206*86d7f5d3SJohn Marino #undef bsm1
207*86d7f5d3SJohn Marino #undef bsm2
208*86d7f5d3SJohn Marino #undef asm1
209*86d7f5d3SJohn Marino #undef asm2
210*86d7f5d3SJohn Marino /* #undef as1 */
211*86d7f5d3SJohn Marino /* #undef as2 */
212*86d7f5d3SJohn Marino #undef a0a2
213*86d7f5d3SJohn Marino #undef b0b2
214*86d7f5d3SJohn Marino #undef a1a3
215*86d7f5d3SJohn Marino #undef b1d
216*86d7f5d3SJohn Marino #undef a0
217*86d7f5d3SJohn Marino #undef a1
218*86d7f5d3SJohn Marino #undef a2
219*86d7f5d3SJohn Marino #undef a3
220*86d7f5d3SJohn Marino #undef b0
221*86d7f5d3SJohn Marino #undef b1
222*86d7f5d3SJohn Marino #undef b2
223*86d7f5d3SJohn Marino }
224