xref: /netbsd-src/external/lgpl3/gmp/dist/mpn/generic/mu_bdiv_qr.c (revision e89934bbf778a6d6d6894877c4da59d0c7835b0f)
1 /* mpn_mu_bdiv_qr(qp,rp,np,nn,dp,dn,tp) -- Compute {np,nn} / {dp,dn} mod B^qn,
2    where qn = nn-dn, storing the result in {qp,qn}.  Overlap allowed between Q
3    and N; all other overlap disallowed.
4 
5    Contributed to the GNU project by Torbjorn Granlund.
6 
7    THE FUNCTIONS IN THIS FILE ARE INTERNAL WITH MUTABLE INTERFACES.  IT IS ONLY
8    SAFE TO REACH THEM THROUGH DOCUMENTED INTERFACES.  IN FACT, IT IS ALMOST
9    GUARANTEED THAT THEY WILL CHANGE OR DISAPPEAR IN A FUTURE GMP RELEASE.
10 
11 Copyright 2005, 2006, 2007, 2009, 2010, 2012 Free Software Foundation, Inc.
12 
13 This file is part of the GNU MP Library.
14 
15 The GNU MP Library is free software; you can redistribute it and/or modify
16 it under the terms of the GNU Lesser General Public License as published by
17 the Free Software Foundation; either version 3 of the License, or (at your
18 option) any later version.
19 
20 The GNU MP Library is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
22 or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
23 License for more details.
24 
25 You should have received a copy of the GNU Lesser General Public License
26 along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.  */
27 
28 
29 /*
30    The idea of the algorithm used herein is to compute a smaller inverted value
31    than used in the standard Barrett algorithm, and thus save time in the
32    Newton iterations, and pay just a small price when using the inverted value
33    for developing quotient bits.  This algorithm was presented at ICMS 2006.
34 */
35 
36 #include "gmp.h"
37 #include "gmp-impl.h"
38 
39 
40 /* N = {np,nn}
41    D = {dp,dn}
42 
43    Requirements: N >= D
44 		 D >= 1
45 		 D odd
46 		 dn >= 2
47 		 nn >= 2
48 		 scratch space as determined by mpn_mu_bdiv_qr_itch(nn,dn).
49 
50    Write quotient to Q = {qp,nn-dn}.
51 
52    FIXME: When iterating, perhaps do the small step before loop, not after.
53    FIXME: Try to avoid the scalar divisions when computing inverse size.
54    FIXME: Trim allocation for (qn > dn) case, 3*dn might be possible.  In
55 	  particular, when dn==in, tp and rp could use the same space.
56 */
57 mp_limb_t
58 mpn_mu_bdiv_qr (mp_ptr qp,
59 		mp_ptr rp,
60 		mp_srcptr np, mp_size_t nn,
61 		mp_srcptr dp, mp_size_t dn,
62 		mp_ptr scratch)
63 {
64   mp_size_t qn;
65   mp_size_t in;
66   mp_limb_t cy, c0;
67   mp_size_t tn, wn;
68 
69   qn = nn - dn;
70 
71   ASSERT (dn >= 2);
72   ASSERT (qn >= 2);
73 
74   if (qn > dn)
75     {
76       mp_size_t b;
77 
78       /* |_______________________|   dividend
79 			|________|   divisor  */
80 
81 #define ip           scratch		/* in */
82 #define tp           (scratch + in)	/* dn+in or next_size(dn) or rest >= binvert_itch(in) */
83 #define scratch_out  (scratch + in + tn)/* mulmod_bnm1_itch(next_size(dn)) */
84 
85       /* Compute an inverse size that is a nice partition of the quotient.  */
86       b = (qn - 1) / dn + 1;	/* ceil(qn/dn), number of blocks */
87       in = (qn - 1) / b + 1;	/* ceil(qn/b) = ceil(qn / ceil(qn/dn)) */
88 
89       /* Some notes on allocation:
90 
91 	 When in = dn, R dies when mpn_mullo returns, if in < dn the low in
92 	 limbs of R dies at that point.  We could save memory by letting T live
93 	 just under R, and let the upper part of T expand into R. These changes
94 	 should reduce itch to perhaps 3dn.
95        */
96 
97       mpn_binvert (ip, dp, in, tp);
98 
99       MPN_COPY (rp, np, dn);
100       np += dn;
101       cy = 0;
102 
103       while (qn > in)
104 	{
105 	  mpn_mullo_n (qp, rp, ip, in);
106 
107 	  if (BELOW_THRESHOLD (in, MUL_TO_MULMOD_BNM1_FOR_2NXN_THRESHOLD))
108 	    mpn_mul (tp, dp, dn, qp, in);	/* mulhi, need tp[dn+in-1...in] */
109 	  else
110 	    {
111 	      tn = mpn_mulmod_bnm1_next_size (dn);
112 	      mpn_mulmod_bnm1 (tp, tn, dp, dn, qp, in, scratch_out);
113 	      wn = dn + in - tn;		/* number of wrapped limbs */
114 	      if (wn > 0)
115 		{
116 		  c0 = mpn_sub_n (tp + tn, tp, rp, wn);
117 		  mpn_decr_u (tp + wn, c0);
118 		}
119 	    }
120 
121 	  qp += in;
122 	  qn -= in;
123 
124 	  if (dn != in)
125 	    {
126 	      /* Subtract tp[dn-1...in] from partial remainder.  */
127 	      cy += mpn_sub_n (rp, rp + in, tp + in, dn - in);
128 	      if (cy == 2)
129 		{
130 		  mpn_incr_u (tp + dn, 1);
131 		  cy = 1;
132 		}
133 	    }
134 	  /* Subtract tp[dn+in-1...dn] from dividend.  */
135 	  cy = mpn_sub_nc (rp + dn - in, np, tp + dn, in, cy);
136 	  np += in;
137 	}
138 
139       /* Generate last qn limbs.  */
140       mpn_mullo_n (qp, rp, ip, qn);
141 
142       if (BELOW_THRESHOLD (qn, MUL_TO_MULMOD_BNM1_FOR_2NXN_THRESHOLD))
143 	mpn_mul (tp, dp, dn, qp, qn);		/* mulhi, need tp[qn+in-1...in] */
144       else
145 	{
146 	  tn = mpn_mulmod_bnm1_next_size (dn);
147 	  mpn_mulmod_bnm1 (tp, tn, dp, dn, qp, qn, scratch_out);
148 	  wn = dn + qn - tn;			/* number of wrapped limbs */
149 	  if (wn > 0)
150 	    {
151 	      c0 = mpn_sub_n (tp + tn, tp, rp, wn);
152 	      mpn_decr_u (tp + wn, c0);
153 	    }
154 	}
155 
156       if (dn != qn)
157 	{
158 	  cy += mpn_sub_n (rp, rp + qn, tp + qn, dn - qn);
159 	  if (cy == 2)
160 	    {
161 	      mpn_incr_u (tp + dn, 1);
162 	      cy = 1;
163 	    }
164 	}
165       return mpn_sub_nc (rp + dn - qn, np, tp + dn, qn, cy);
166 
167 #undef ip
168 #undef tp
169 #undef scratch_out
170     }
171   else
172     {
173       /* |_______________________|   dividend
174 		|________________|   divisor  */
175 
176 #define ip           scratch		/* in */
177 #define tp           (scratch + in)	/* dn+in or next_size(dn) or rest >= binvert_itch(in) */
178 #define scratch_out  (scratch + in + tn)/* mulmod_bnm1_itch(next_size(dn)) */
179 
180       /* Compute half-sized inverse.  */
181       in = qn - (qn >> 1);
182 
183       mpn_binvert (ip, dp, in, tp);
184 
185       mpn_mullo_n (qp, np, ip, in);		/* low `in' quotient limbs */
186 
187       if (BELOW_THRESHOLD (in, MUL_TO_MULMOD_BNM1_FOR_2NXN_THRESHOLD))
188 	mpn_mul (tp, dp, dn, qp, in);		/* mulhigh */
189       else
190 	{
191 	  tn = mpn_mulmod_bnm1_next_size (dn);
192 	  mpn_mulmod_bnm1 (tp, tn, dp, dn, qp, in, scratch_out);
193 	  wn = dn + in - tn;			/* number of wrapped limbs */
194 	  if (wn > 0)
195 	    {
196 	      c0 = mpn_sub_n (tp + tn, tp, np, wn);
197 	      mpn_decr_u (tp + wn, c0);
198 	    }
199 	}
200 
201       qp += in;
202       qn -= in;
203 
204       cy = mpn_sub_n (rp, np + in, tp + in, dn);
205       mpn_mullo_n (qp, rp, ip, qn);		/* high qn quotient limbs */
206 
207       if (BELOW_THRESHOLD (qn, MUL_TO_MULMOD_BNM1_FOR_2NXN_THRESHOLD))
208 	mpn_mul (tp, dp, dn, qp, qn);		/* mulhigh */
209       else
210 	{
211 	  tn = mpn_mulmod_bnm1_next_size (dn);
212 	  mpn_mulmod_bnm1 (tp, tn, dp, dn, qp, qn, scratch_out);
213 	  wn = dn + qn - tn;			/* number of wrapped limbs */
214 	  if (wn > 0)
215 	    {
216 	      c0 = mpn_sub_n (tp + tn, tp, rp, wn);
217 	      mpn_decr_u (tp + wn, c0);
218 	    }
219 	}
220 
221       cy += mpn_sub_n (rp, rp + qn, tp + qn, dn - qn);
222       if (cy == 2)
223 	{
224 	  mpn_incr_u (tp + dn, 1);
225 	  cy = 1;
226 	}
227       return mpn_sub_nc (rp + dn - qn, np + dn + in, tp + dn, qn, cy);
228 
229 #undef ip
230 #undef tp
231 #undef scratch_out
232     }
233 }
234 
235 mp_size_t
236 mpn_mu_bdiv_qr_itch (mp_size_t nn, mp_size_t dn)
237 {
238   mp_size_t qn, in, tn, itch_binvert, itch_out, itches;
239   mp_size_t b;
240 
241   qn = nn - dn;
242 
243   if (qn > dn)
244     {
245       b = (qn - 1) / dn + 1;	/* ceil(qn/dn), number of blocks */
246       in = (qn - 1) / b + 1;	/* ceil(qn/b) = ceil(qn / ceil(qn/dn)) */
247       if (BELOW_THRESHOLD (in, MUL_TO_MULMOD_BNM1_FOR_2NXN_THRESHOLD))
248 	{
249 	  tn = dn + in;
250 	  itch_out = 0;
251 	}
252       else
253 	{
254 	  tn = mpn_mulmod_bnm1_next_size (dn);
255 	  itch_out = mpn_mulmod_bnm1_itch (tn, dn, in);
256 	}
257       itch_binvert = mpn_binvert_itch (in);
258       itches = tn + itch_out;
259       return in + MAX (itches, itch_binvert);
260     }
261   else
262     {
263       in = qn - (qn >> 1);
264       if (BELOW_THRESHOLD (in, MUL_TO_MULMOD_BNM1_FOR_2NXN_THRESHOLD))
265 	{
266 	  tn = dn + in;
267 	  itch_out = 0;
268 	}
269       else
270 	{
271 	  tn = mpn_mulmod_bnm1_next_size (dn);
272 	  itch_out = mpn_mulmod_bnm1_itch (tn, dn, in);
273 	}
274     }
275   itch_binvert = mpn_binvert_itch (in);
276   itches = tn + itch_out;
277   return in + MAX (itches, itch_binvert);
278 }
279