xref: /netbsd-src/external/lgpl3/gmp/dist/mpn/sparc64/mod_1_4.c (revision 230b95665bbd3a9d1a53658a36b1053f8382a519)
1 /* mpn_mod_1s_4p (ap, n, b, cps)
2    Divide (ap,,n) by b.  Return the single-limb remainder.
3    Requires that d < B / 4.
4 
5    Contributed to the GNU project by Torbjorn Granlund.
6    Based on a suggestion by Peter L. Montgomery.
7 
8    THE FUNCTIONS IN THIS FILE ARE INTERNAL WITH MUTABLE INTERFACES.  IT IS ONLY
9    SAFE TO REACH THEM THROUGH DOCUMENTED INTERFACES.  IN FACT, IT IS ALMOST
10    GUARANTEED THAT THEY WILL CHANGE OR DISAPPEAR IN A FUTURE GNU MP RELEASE.
11 
12 Copyright 2008, 2009, 2010 Free Software Foundation, Inc.
13 
14 This file is part of the GNU MP Library.
15 
16 The GNU MP Library is free software; you can redistribute it and/or modify
17 it under the terms of the GNU Lesser General Public License as published by
18 the Free Software Foundation; either version 3 of the License, or (at your
19 option) any later version.
20 
21 The GNU MP Library is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
23 or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
24 License for more details.
25 
26 You should have received a copy of the GNU Lesser General Public License
27 along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.  */
28 
29 #include "gmp.h"
30 #include "gmp-impl.h"
31 #include "longlong.h"
32 
33 #include "mpn/sparc64/sparc64.h"
34 
35 void
36 mpn_mod_1s_4p_cps (mp_limb_t cps[7], mp_limb_t b)
37 {
38   mp_limb_t bi;
39   mp_limb_t B1modb, B2modb, B3modb, B4modb, B5modb;
40   int cnt;
41 
42   ASSERT (b <= (~(mp_limb_t) 0) / 4);
43 
44   count_leading_zeros (cnt, b);
45 
46   b <<= cnt;
47   invert_limb (bi, b);
48 
49   B1modb = -b * ((bi >> (GMP_LIMB_BITS-cnt)) | (CNST_LIMB(1) << cnt));
50   ASSERT (B1modb <= b);		/* NB: not fully reduced mod b */
51   udiv_rnnd_preinv (B2modb, B1modb, 0, b, bi);
52   udiv_rnnd_preinv (B3modb, B2modb, 0, b, bi);
53   udiv_rnnd_preinv (B4modb, B3modb, 0, b, bi);
54   udiv_rnnd_preinv (B5modb, B4modb, 0, b, bi);
55 
56   cps[0] = bi;
57   cps[1] = cnt;
58   cps[2] = B1modb >> cnt;
59   cps[3] = B2modb >> cnt;
60   cps[4] = B3modb >> cnt;
61   cps[5] = B4modb >> cnt;
62   cps[6] = B5modb >> cnt;
63 
64 #if WANT_ASSERT
65   {
66     int i;
67     b = cps[2];
68     for (i = 3; i <= 6; i++)
69       {
70 	b += cps[i];
71 	ASSERT (b >= cps[i]);
72       }
73   }
74 #endif
75 }
76 
77 mp_limb_t
78 mpn_mod_1s_4p (mp_srcptr ap, mp_size_t n, mp_limb_t b, mp_limb_t cps[7])
79 {
80   mp_limb_t rh, rl, bi, ph, pl, ch, cl, r;
81   mp_limb_t B1modb, B2modb, B3modb, B4modb, B5modb;
82   mp_size_t i;
83   int cnt;
84 
85   ASSERT (n >= 1);
86 
87   B1modb = cps[2];
88   B2modb = cps[3];
89   B3modb = cps[4];
90   B4modb = cps[5];
91   B5modb = cps[6];
92 
93   if ((b >> 32) == 0)
94     {
95       switch (n & 3)
96 	{
97 	case 0:
98 	  umul_ppmm_s (ph, pl, ap[n - 3], B1modb);
99 	  add_ssaaaa (ph, pl, ph, pl, 0, ap[n - 4]);
100 	  umul_ppmm_s (ch, cl, ap[n - 2], B2modb);
101 	  add_ssaaaa (ph, pl, ph, pl, ch, cl);
102 	  umul_ppmm_s (rh, rl, ap[n - 1], B3modb);
103 	  add_ssaaaa (rh, rl, rh, rl, ph, pl);
104 	  n -= 4;
105 	  break;
106 	case 1:
107 	  rh = 0;
108 	  rl = ap[n - 1];
109 	  n -= 1;
110 	  break;
111 	case 2:
112 	  rh = ap[n - 1];
113 	  rl = ap[n - 2];
114 	  n -= 2;
115 	  break;
116 	case 3:
117 	  umul_ppmm_s (ph, pl, ap[n - 2], B1modb);
118 	  add_ssaaaa (ph, pl, ph, pl, 0, ap[n - 3]);
119 	  umul_ppmm_s (rh, rl, ap[n - 1], B2modb);
120 	  add_ssaaaa (rh, rl, rh, rl, ph, pl);
121 	  n -= 3;
122 	  break;
123 	}
124 
125       for (i = n - 4; i >= 0; i -= 4)
126 	{
127 	  /* rr = ap[i]				< B
128 		+ ap[i+1] * (B mod b)		<= (B-1)(b-1)
129 		+ ap[i+2] * (B^2 mod b)		<= (B-1)(b-1)
130 		+ ap[i+3] * (B^3 mod b)		<= (B-1)(b-1)
131 		+ LO(rr)  * (B^4 mod b)		<= (B-1)(b-1)
132 		+ HI(rr)  * (B^5 mod b)		<= (B-1)(b-1)
133 	  */
134 	  umul_ppmm_s (ph, pl, ap[i + 1], B1modb);
135 	  add_ssaaaa (ph, pl, ph, pl, 0, ap[i + 0]);
136 
137 	  umul_ppmm_s (ch, cl, ap[i + 2], B2modb);
138 	  add_ssaaaa (ph, pl, ph, pl, ch, cl);
139 
140 	  umul_ppmm_s (ch, cl, ap[i + 3], B3modb);
141 	  add_ssaaaa (ph, pl, ph, pl, ch, cl);
142 
143 	  umul_ppmm_s (ch, cl, rl, B4modb);
144 	  add_ssaaaa (ph, pl, ph, pl, ch, cl);
145 
146 	  umul_ppmm_s (rh, rl, rh, B5modb);
147 	  add_ssaaaa (rh, rl, rh, rl, ph, pl);
148 	}
149 
150       umul_ppmm_s (rh, cl, rh, B1modb);
151       add_ssaaaa (rh, rl, rh, rl, 0, cl);
152     }
153   else
154     {
155       switch (n & 3)
156 	{
157 	case 0:
158 	  umul_ppmm (ph, pl, ap[n - 3], B1modb);
159 	  add_ssaaaa (ph, pl, ph, pl, 0, ap[n - 4]);
160 	  umul_ppmm (ch, cl, ap[n - 2], B2modb);
161 	  add_ssaaaa (ph, pl, ph, pl, ch, cl);
162 	  umul_ppmm (rh, rl, ap[n - 1], B3modb);
163 	  add_ssaaaa (rh, rl, rh, rl, ph, pl);
164 	  n -= 4;
165 	  break;
166 	case 1:
167 	  rh = 0;
168 	  rl = ap[n - 1];
169 	  n -= 1;
170 	  break;
171 	case 2:
172 	  rh = ap[n - 1];
173 	  rl = ap[n - 2];
174 	  n -= 2;
175 	  break;
176 	case 3:
177 	  umul_ppmm (ph, pl, ap[n - 2], B1modb);
178 	  add_ssaaaa (ph, pl, ph, pl, 0, ap[n - 3]);
179 	  umul_ppmm (rh, rl, ap[n - 1], B2modb);
180 	  add_ssaaaa (rh, rl, rh, rl, ph, pl);
181 	  n -= 3;
182 	  break;
183 	}
184 
185       for (i = n - 4; i >= 0; i -= 4)
186 	{
187 	  /* rr = ap[i]				< B
188 		+ ap[i+1] * (B mod b)		<= (B-1)(b-1)
189 		+ ap[i+2] * (B^2 mod b)		<= (B-1)(b-1)
190 		+ ap[i+3] * (B^3 mod b)		<= (B-1)(b-1)
191 		+ LO(rr)  * (B^4 mod b)		<= (B-1)(b-1)
192 		+ HI(rr)  * (B^5 mod b)		<= (B-1)(b-1)
193 	  */
194 	  umul_ppmm (ph, pl, ap[i + 1], B1modb);
195 	  add_ssaaaa (ph, pl, ph, pl, 0, ap[i + 0]);
196 
197 	  umul_ppmm (ch, cl, ap[i + 2], B2modb);
198 	  add_ssaaaa (ph, pl, ph, pl, ch, cl);
199 
200 	  umul_ppmm (ch, cl, ap[i + 3], B3modb);
201 	  add_ssaaaa (ph, pl, ph, pl, ch, cl);
202 
203 	  umul_ppmm (ch, cl, rl, B4modb);
204 	  add_ssaaaa (ph, pl, ph, pl, ch, cl);
205 
206 	  umul_ppmm (rh, rl, rh, B5modb);
207 	  add_ssaaaa (rh, rl, rh, rl, ph, pl);
208 	}
209 
210       umul_ppmm (rh, cl, rh, B1modb);
211       add_ssaaaa (rh, rl, rh, rl, 0, cl);
212     }
213 
214   bi = cps[0];
215   cnt = cps[1];
216 
217   r = (rh << cnt) | (rl >> (GMP_LIMB_BITS - cnt));
218   udiv_rnnd_preinv (r, r, rl << cnt, b, bi);
219 
220   return r >> cnt;
221 }
222