1*0957b409SSimon J. Gerraty /*
2*0957b409SSimon J. Gerraty * Copyright (c) 2017 Thomas Pornin <pornin@bolet.org>
3*0957b409SSimon J. Gerraty *
4*0957b409SSimon J. Gerraty * Permission is hereby granted, free of charge, to any person obtaining
5*0957b409SSimon J. Gerraty * a copy of this software and associated documentation files (the
6*0957b409SSimon J. Gerraty * "Software"), to deal in the Software without restriction, including
7*0957b409SSimon J. Gerraty * without limitation the rights to use, copy, modify, merge, publish,
8*0957b409SSimon J. Gerraty * distribute, sublicense, and/or sell copies of the Software, and to
9*0957b409SSimon J. Gerraty * permit persons to whom the Software is furnished to do so, subject to
10*0957b409SSimon J. Gerraty * the following conditions:
11*0957b409SSimon J. Gerraty *
12*0957b409SSimon J. Gerraty * The above copyright notice and this permission notice shall be
13*0957b409SSimon J. Gerraty * included in all copies or substantial portions of the Software.
14*0957b409SSimon J. Gerraty *
15*0957b409SSimon J. Gerraty * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16*0957b409SSimon J. Gerraty * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17*0957b409SSimon J. Gerraty * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18*0957b409SSimon J. Gerraty * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19*0957b409SSimon J. Gerraty * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20*0957b409SSimon J. Gerraty * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21*0957b409SSimon J. Gerraty * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22*0957b409SSimon J. Gerraty * SOFTWARE.
23*0957b409SSimon J. Gerraty */
24*0957b409SSimon J. Gerraty
25*0957b409SSimon J. Gerraty #include "inner.h"
26*0957b409SSimon J. Gerraty
27*0957b409SSimon J. Gerraty #if BR_INT128 || BR_UMUL128
28*0957b409SSimon J. Gerraty
29*0957b409SSimon J. Gerraty #if BR_INT128
30*0957b409SSimon J. Gerraty
31*0957b409SSimon J. Gerraty /*
32*0957b409SSimon J. Gerraty * Compute x*y+v1+v2. Operands are 64-bit, and result is 128-bit, with
33*0957b409SSimon J. Gerraty * high word in "hi" and low word in "lo".
34*0957b409SSimon J. Gerraty */
35*0957b409SSimon J. Gerraty #define FMA1(hi, lo, x, y, v1, v2) do { \
36*0957b409SSimon J. Gerraty unsigned __int128 fmaz; \
37*0957b409SSimon J. Gerraty fmaz = (unsigned __int128)(x) * (unsigned __int128)(y) \
38*0957b409SSimon J. Gerraty + (unsigned __int128)(v1) + (unsigned __int128)(v2); \
39*0957b409SSimon J. Gerraty (hi) = (uint64_t)(fmaz >> 64); \
40*0957b409SSimon J. Gerraty (lo) = (uint64_t)fmaz; \
41*0957b409SSimon J. Gerraty } while (0)
42*0957b409SSimon J. Gerraty
43*0957b409SSimon J. Gerraty /*
44*0957b409SSimon J. Gerraty * Compute x1*y1+x2*y2+v1+v2. Operands are 64-bit, and result is 128-bit,
45*0957b409SSimon J. Gerraty * with high word in "hi" and low word in "lo".
46*0957b409SSimon J. Gerraty *
47*0957b409SSimon J. Gerraty * Callers should ensure that the two inner products, and the v1 and v2
48*0957b409SSimon J. Gerraty * operands, are multiple of 4 (this is not used by this specific definition
49*0957b409SSimon J. Gerraty * but may help other implementations).
50*0957b409SSimon J. Gerraty */
51*0957b409SSimon J. Gerraty #define FMA2(hi, lo, x1, y1, x2, y2, v1, v2) do { \
52*0957b409SSimon J. Gerraty unsigned __int128 fmaz; \
53*0957b409SSimon J. Gerraty fmaz = (unsigned __int128)(x1) * (unsigned __int128)(y1) \
54*0957b409SSimon J. Gerraty + (unsigned __int128)(x2) * (unsigned __int128)(y2) \
55*0957b409SSimon J. Gerraty + (unsigned __int128)(v1) + (unsigned __int128)(v2); \
56*0957b409SSimon J. Gerraty (hi) = (uint64_t)(fmaz >> 64); \
57*0957b409SSimon J. Gerraty (lo) = (uint64_t)fmaz; \
58*0957b409SSimon J. Gerraty } while (0)
59*0957b409SSimon J. Gerraty
60*0957b409SSimon J. Gerraty #elif BR_UMUL128
61*0957b409SSimon J. Gerraty
62*0957b409SSimon J. Gerraty #include <intrin.h>
63*0957b409SSimon J. Gerraty
64*0957b409SSimon J. Gerraty #define FMA1(hi, lo, x, y, v1, v2) do { \
65*0957b409SSimon J. Gerraty uint64_t fmahi, fmalo; \
66*0957b409SSimon J. Gerraty unsigned char fmacc; \
67*0957b409SSimon J. Gerraty fmalo = _umul128((x), (y), &fmahi); \
68*0957b409SSimon J. Gerraty fmacc = _addcarry_u64(0, fmalo, (v1), &fmalo); \
69*0957b409SSimon J. Gerraty _addcarry_u64(fmacc, fmahi, 0, &fmahi); \
70*0957b409SSimon J. Gerraty fmacc = _addcarry_u64(0, fmalo, (v2), &(lo)); \
71*0957b409SSimon J. Gerraty _addcarry_u64(fmacc, fmahi, 0, &(hi)); \
72*0957b409SSimon J. Gerraty } while (0)
73*0957b409SSimon J. Gerraty
74*0957b409SSimon J. Gerraty /*
75*0957b409SSimon J. Gerraty * Normally we should use _addcarry_u64() for FMA2 too, but it makes
76*0957b409SSimon J. Gerraty * Visual Studio crash. Instead we use this version, which leverages
77*0957b409SSimon J. Gerraty * the fact that the vx operands, and the products, are multiple of 4.
78*0957b409SSimon J. Gerraty * This is unfortunately slower.
79*0957b409SSimon J. Gerraty */
80*0957b409SSimon J. Gerraty #define FMA2(hi, lo, x1, y1, x2, y2, v1, v2) do { \
81*0957b409SSimon J. Gerraty uint64_t fma1hi, fma1lo; \
82*0957b409SSimon J. Gerraty uint64_t fma2hi, fma2lo; \
83*0957b409SSimon J. Gerraty uint64_t fmatt; \
84*0957b409SSimon J. Gerraty fma1lo = _umul128((x1), (y1), &fma1hi); \
85*0957b409SSimon J. Gerraty fma2lo = _umul128((x2), (y2), &fma2hi); \
86*0957b409SSimon J. Gerraty fmatt = (fma1lo >> 2) + (fma2lo >> 2) \
87*0957b409SSimon J. Gerraty + ((v1) >> 2) + ((v2) >> 2); \
88*0957b409SSimon J. Gerraty (lo) = fmatt << 2; \
89*0957b409SSimon J. Gerraty (hi) = fma1hi + fma2hi + (fmatt >> 62); \
90*0957b409SSimon J. Gerraty } while (0)
91*0957b409SSimon J. Gerraty
92*0957b409SSimon J. Gerraty /*
93*0957b409SSimon J. Gerraty * The FMA2 macro definition we would prefer to use, but it triggers
94*0957b409SSimon J. Gerraty * an internal compiler error in Visual Studio 2015.
95*0957b409SSimon J. Gerraty *
96*0957b409SSimon J. Gerraty #define FMA2(hi, lo, x1, y1, x2, y2, v1, v2) do { \
97*0957b409SSimon J. Gerraty uint64_t fma1hi, fma1lo; \
98*0957b409SSimon J. Gerraty uint64_t fma2hi, fma2lo; \
99*0957b409SSimon J. Gerraty unsigned char fmacc; \
100*0957b409SSimon J. Gerraty fma1lo = _umul128((x1), (y1), &fma1hi); \
101*0957b409SSimon J. Gerraty fma2lo = _umul128((x2), (y2), &fma2hi); \
102*0957b409SSimon J. Gerraty fmacc = _addcarry_u64(0, fma1lo, (v1), &fma1lo); \
103*0957b409SSimon J. Gerraty _addcarry_u64(fmacc, fma1hi, 0, &fma1hi); \
104*0957b409SSimon J. Gerraty fmacc = _addcarry_u64(0, fma2lo, (v2), &fma2lo); \
105*0957b409SSimon J. Gerraty _addcarry_u64(fmacc, fma2hi, 0, &fma2hi); \
106*0957b409SSimon J. Gerraty fmacc = _addcarry_u64(0, fma1lo, fma2lo, &(lo)); \
107*0957b409SSimon J. Gerraty _addcarry_u64(fmacc, fma1hi, fma2hi, &(hi)); \
108*0957b409SSimon J. Gerraty } while (0)
109*0957b409SSimon J. Gerraty */
110*0957b409SSimon J. Gerraty
111*0957b409SSimon J. Gerraty #endif
112*0957b409SSimon J. Gerraty
113*0957b409SSimon J. Gerraty #define MASK62 ((uint64_t)0x3FFFFFFFFFFFFFFF)
114*0957b409SSimon J. Gerraty #define MUL62_lo(x, y) (((uint64_t)(x) * (uint64_t)(y)) & MASK62)
115*0957b409SSimon J. Gerraty
116*0957b409SSimon J. Gerraty /*
117*0957b409SSimon J. Gerraty * Subtract b from a, and return the final carry. If 'ctl32' is 0, then
118*0957b409SSimon J. Gerraty * a[] is kept unmodified, but the final carry is still computed and
119*0957b409SSimon J. Gerraty * returned.
120*0957b409SSimon J. Gerraty */
121*0957b409SSimon J. Gerraty static uint32_t
i62_sub(uint64_t * a,const uint64_t * b,size_t num,uint32_t ctl32)122*0957b409SSimon J. Gerraty i62_sub(uint64_t *a, const uint64_t *b, size_t num, uint32_t ctl32)
123*0957b409SSimon J. Gerraty {
124*0957b409SSimon J. Gerraty uint64_t cc, mask;
125*0957b409SSimon J. Gerraty size_t u;
126*0957b409SSimon J. Gerraty
127*0957b409SSimon J. Gerraty cc = 0;
128*0957b409SSimon J. Gerraty ctl32 = -ctl32;
129*0957b409SSimon J. Gerraty mask = (uint64_t)ctl32 | ((uint64_t)ctl32 << 32);
130*0957b409SSimon J. Gerraty for (u = 0; u < num; u ++) {
131*0957b409SSimon J. Gerraty uint64_t aw, bw, dw;
132*0957b409SSimon J. Gerraty
133*0957b409SSimon J. Gerraty aw = a[u];
134*0957b409SSimon J. Gerraty bw = b[u];
135*0957b409SSimon J. Gerraty dw = aw - bw - cc;
136*0957b409SSimon J. Gerraty cc = dw >> 63;
137*0957b409SSimon J. Gerraty dw &= MASK62;
138*0957b409SSimon J. Gerraty a[u] = aw ^ (mask & (dw ^ aw));
139*0957b409SSimon J. Gerraty }
140*0957b409SSimon J. Gerraty return (uint32_t)cc;
141*0957b409SSimon J. Gerraty }
142*0957b409SSimon J. Gerraty
143*0957b409SSimon J. Gerraty /*
144*0957b409SSimon J. Gerraty * Montgomery multiplication, over arrays of 62-bit values. The
145*0957b409SSimon J. Gerraty * destination array (d) must be distinct from the other operands
146*0957b409SSimon J. Gerraty * (x, y and m). All arrays are in little-endian format (least
147*0957b409SSimon J. Gerraty * significant word comes first) over 'num' words.
148*0957b409SSimon J. Gerraty */
149*0957b409SSimon J. Gerraty static void
montymul(uint64_t * d,const uint64_t * x,const uint64_t * y,const uint64_t * m,size_t num,uint64_t m0i)150*0957b409SSimon J. Gerraty montymul(uint64_t *d, const uint64_t *x, const uint64_t *y,
151*0957b409SSimon J. Gerraty const uint64_t *m, size_t num, uint64_t m0i)
152*0957b409SSimon J. Gerraty {
153*0957b409SSimon J. Gerraty uint64_t dh;
154*0957b409SSimon J. Gerraty size_t u, num4;
155*0957b409SSimon J. Gerraty
156*0957b409SSimon J. Gerraty num4 = 1 + ((num - 1) & ~(size_t)3);
157*0957b409SSimon J. Gerraty memset(d, 0, num * sizeof *d);
158*0957b409SSimon J. Gerraty dh = 0;
159*0957b409SSimon J. Gerraty for (u = 0; u < num; u ++) {
160*0957b409SSimon J. Gerraty size_t v;
161*0957b409SSimon J. Gerraty uint64_t f, xu;
162*0957b409SSimon J. Gerraty uint64_t r, zh;
163*0957b409SSimon J. Gerraty uint64_t hi, lo;
164*0957b409SSimon J. Gerraty
165*0957b409SSimon J. Gerraty xu = x[u] << 2;
166*0957b409SSimon J. Gerraty f = MUL62_lo(d[0] + MUL62_lo(x[u], y[0]), m0i) << 2;
167*0957b409SSimon J. Gerraty
168*0957b409SSimon J. Gerraty FMA2(hi, lo, xu, y[0], f, m[0], d[0] << 2, 0);
169*0957b409SSimon J. Gerraty r = hi;
170*0957b409SSimon J. Gerraty
171*0957b409SSimon J. Gerraty for (v = 1; v < num4; v += 4) {
172*0957b409SSimon J. Gerraty FMA2(hi, lo, xu, y[v + 0],
173*0957b409SSimon J. Gerraty f, m[v + 0], d[v + 0] << 2, r << 2);
174*0957b409SSimon J. Gerraty r = hi + (r >> 62);
175*0957b409SSimon J. Gerraty d[v - 1] = lo >> 2;
176*0957b409SSimon J. Gerraty FMA2(hi, lo, xu, y[v + 1],
177*0957b409SSimon J. Gerraty f, m[v + 1], d[v + 1] << 2, r << 2);
178*0957b409SSimon J. Gerraty r = hi + (r >> 62);
179*0957b409SSimon J. Gerraty d[v + 0] = lo >> 2;
180*0957b409SSimon J. Gerraty FMA2(hi, lo, xu, y[v + 2],
181*0957b409SSimon J. Gerraty f, m[v + 2], d[v + 2] << 2, r << 2);
182*0957b409SSimon J. Gerraty r = hi + (r >> 62);
183*0957b409SSimon J. Gerraty d[v + 1] = lo >> 2;
184*0957b409SSimon J. Gerraty FMA2(hi, lo, xu, y[v + 3],
185*0957b409SSimon J. Gerraty f, m[v + 3], d[v + 3] << 2, r << 2);
186*0957b409SSimon J. Gerraty r = hi + (r >> 62);
187*0957b409SSimon J. Gerraty d[v + 2] = lo >> 2;
188*0957b409SSimon J. Gerraty }
189*0957b409SSimon J. Gerraty for (; v < num; v ++) {
190*0957b409SSimon J. Gerraty FMA2(hi, lo, xu, y[v], f, m[v], d[v] << 2, r << 2);
191*0957b409SSimon J. Gerraty r = hi + (r >> 62);
192*0957b409SSimon J. Gerraty d[v - 1] = lo >> 2;
193*0957b409SSimon J. Gerraty }
194*0957b409SSimon J. Gerraty
195*0957b409SSimon J. Gerraty zh = dh + r;
196*0957b409SSimon J. Gerraty d[num - 1] = zh & MASK62;
197*0957b409SSimon J. Gerraty dh = zh >> 62;
198*0957b409SSimon J. Gerraty }
199*0957b409SSimon J. Gerraty i62_sub(d, m, num, (uint32_t)dh | NOT(i62_sub(d, m, num, 0)));
200*0957b409SSimon J. Gerraty }
201*0957b409SSimon J. Gerraty
202*0957b409SSimon J. Gerraty /*
203*0957b409SSimon J. Gerraty * Conversion back from Montgomery representation.
204*0957b409SSimon J. Gerraty */
205*0957b409SSimon J. Gerraty static void
frommonty(uint64_t * x,const uint64_t * m,size_t num,uint64_t m0i)206*0957b409SSimon J. Gerraty frommonty(uint64_t *x, const uint64_t *m, size_t num, uint64_t m0i)
207*0957b409SSimon J. Gerraty {
208*0957b409SSimon J. Gerraty size_t u, v;
209*0957b409SSimon J. Gerraty
210*0957b409SSimon J. Gerraty for (u = 0; u < num; u ++) {
211*0957b409SSimon J. Gerraty uint64_t f, cc;
212*0957b409SSimon J. Gerraty
213*0957b409SSimon J. Gerraty f = MUL62_lo(x[0], m0i) << 2;
214*0957b409SSimon J. Gerraty cc = 0;
215*0957b409SSimon J. Gerraty for (v = 0; v < num; v ++) {
216*0957b409SSimon J. Gerraty uint64_t hi, lo;
217*0957b409SSimon J. Gerraty
218*0957b409SSimon J. Gerraty FMA1(hi, lo, f, m[v], x[v] << 2, cc);
219*0957b409SSimon J. Gerraty cc = hi << 2;
220*0957b409SSimon J. Gerraty if (v != 0) {
221*0957b409SSimon J. Gerraty x[v - 1] = lo >> 2;
222*0957b409SSimon J. Gerraty }
223*0957b409SSimon J. Gerraty }
224*0957b409SSimon J. Gerraty x[num - 1] = cc >> 2;
225*0957b409SSimon J. Gerraty }
226*0957b409SSimon J. Gerraty i62_sub(x, m, num, NOT(i62_sub(x, m, num, 0)));
227*0957b409SSimon J. Gerraty }
228*0957b409SSimon J. Gerraty
229*0957b409SSimon J. Gerraty /* see inner.h */
230*0957b409SSimon J. Gerraty uint32_t
br_i62_modpow_opt(uint32_t * x31,const unsigned char * e,size_t elen,const uint32_t * m31,uint32_t m0i31,uint64_t * tmp,size_t twlen)231*0957b409SSimon J. Gerraty br_i62_modpow_opt(uint32_t *x31, const unsigned char *e, size_t elen,
232*0957b409SSimon J. Gerraty const uint32_t *m31, uint32_t m0i31, uint64_t *tmp, size_t twlen)
233*0957b409SSimon J. Gerraty {
234*0957b409SSimon J. Gerraty size_t u, mw31num, mw62num;
235*0957b409SSimon J. Gerraty uint64_t *x, *m, *t1, *t2;
236*0957b409SSimon J. Gerraty uint64_t m0i;
237*0957b409SSimon J. Gerraty uint32_t acc;
238*0957b409SSimon J. Gerraty int win_len, acc_len;
239*0957b409SSimon J. Gerraty
240*0957b409SSimon J. Gerraty /*
241*0957b409SSimon J. Gerraty * Get modulus size, in words.
242*0957b409SSimon J. Gerraty */
243*0957b409SSimon J. Gerraty mw31num = (m31[0] + 31) >> 5;
244*0957b409SSimon J. Gerraty mw62num = (mw31num + 1) >> 1;
245*0957b409SSimon J. Gerraty
246*0957b409SSimon J. Gerraty /*
247*0957b409SSimon J. Gerraty * In order to apply this function, we must have enough room to
248*0957b409SSimon J. Gerraty * copy the operand and modulus into the temporary array, along
249*0957b409SSimon J. Gerraty * with at least two temporaries. If there is not enough room,
250*0957b409SSimon J. Gerraty * switch to br_i31_modpow(). We also use br_i31_modpow() if the
251*0957b409SSimon J. Gerraty * modulus length is not at least four words (94 bits or more).
252*0957b409SSimon J. Gerraty */
253*0957b409SSimon J. Gerraty if (mw31num < 4 || (mw62num << 2) > twlen) {
254*0957b409SSimon J. Gerraty /*
255*0957b409SSimon J. Gerraty * We assume here that we can split an aligned uint64_t
256*0957b409SSimon J. Gerraty * into two properly aligned uint32_t. Since both types
257*0957b409SSimon J. Gerraty * are supposed to have an exact width with no padding,
258*0957b409SSimon J. Gerraty * then this property must hold.
259*0957b409SSimon J. Gerraty */
260*0957b409SSimon J. Gerraty size_t txlen;
261*0957b409SSimon J. Gerraty
262*0957b409SSimon J. Gerraty txlen = mw31num + 1;
263*0957b409SSimon J. Gerraty if (twlen < txlen) {
264*0957b409SSimon J. Gerraty return 0;
265*0957b409SSimon J. Gerraty }
266*0957b409SSimon J. Gerraty br_i31_modpow(x31, e, elen, m31, m0i31,
267*0957b409SSimon J. Gerraty (uint32_t *)tmp, (uint32_t *)tmp + txlen);
268*0957b409SSimon J. Gerraty return 1;
269*0957b409SSimon J. Gerraty }
270*0957b409SSimon J. Gerraty
271*0957b409SSimon J. Gerraty /*
272*0957b409SSimon J. Gerraty * Convert x to Montgomery representation: this means that
273*0957b409SSimon J. Gerraty * we replace x with x*2^z mod m, where z is the smallest multiple
274*0957b409SSimon J. Gerraty * of the word size such that 2^z >= m. We want to reuse the 31-bit
275*0957b409SSimon J. Gerraty * functions here (for constant-time operation), but we need z
276*0957b409SSimon J. Gerraty * for a 62-bit word size.
277*0957b409SSimon J. Gerraty */
278*0957b409SSimon J. Gerraty for (u = 0; u < mw62num; u ++) {
279*0957b409SSimon J. Gerraty br_i31_muladd_small(x31, 0, m31);
280*0957b409SSimon J. Gerraty br_i31_muladd_small(x31, 0, m31);
281*0957b409SSimon J. Gerraty }
282*0957b409SSimon J. Gerraty
283*0957b409SSimon J. Gerraty /*
284*0957b409SSimon J. Gerraty * Assemble operands into arrays of 62-bit words. Note that
285*0957b409SSimon J. Gerraty * all the arrays of 62-bit words that we will handle here
286*0957b409SSimon J. Gerraty * are without any leading size word.
287*0957b409SSimon J. Gerraty *
288*0957b409SSimon J. Gerraty * We also adjust tmp and twlen to account for the words used
289*0957b409SSimon J. Gerraty * for these extra arrays.
290*0957b409SSimon J. Gerraty */
291*0957b409SSimon J. Gerraty m = tmp;
292*0957b409SSimon J. Gerraty x = tmp + mw62num;
293*0957b409SSimon J. Gerraty tmp += (mw62num << 1);
294*0957b409SSimon J. Gerraty twlen -= (mw62num << 1);
295*0957b409SSimon J. Gerraty for (u = 0; u < mw31num; u += 2) {
296*0957b409SSimon J. Gerraty size_t v;
297*0957b409SSimon J. Gerraty
298*0957b409SSimon J. Gerraty v = u >> 1;
299*0957b409SSimon J. Gerraty if ((u + 1) == mw31num) {
300*0957b409SSimon J. Gerraty m[v] = (uint64_t)m31[u + 1];
301*0957b409SSimon J. Gerraty x[v] = (uint64_t)x31[u + 1];
302*0957b409SSimon J. Gerraty } else {
303*0957b409SSimon J. Gerraty m[v] = (uint64_t)m31[u + 1]
304*0957b409SSimon J. Gerraty + ((uint64_t)m31[u + 2] << 31);
305*0957b409SSimon J. Gerraty x[v] = (uint64_t)x31[u + 1]
306*0957b409SSimon J. Gerraty + ((uint64_t)x31[u + 2] << 31);
307*0957b409SSimon J. Gerraty }
308*0957b409SSimon J. Gerraty }
309*0957b409SSimon J. Gerraty
310*0957b409SSimon J. Gerraty /*
311*0957b409SSimon J. Gerraty * Compute window size. We support windows up to 5 bits; for a
312*0957b409SSimon J. Gerraty * window of size k bits, we need 2^k+1 temporaries (for k = 1,
313*0957b409SSimon J. Gerraty * we use special code that uses only 2 temporaries).
314*0957b409SSimon J. Gerraty */
315*0957b409SSimon J. Gerraty for (win_len = 5; win_len > 1; win_len --) {
316*0957b409SSimon J. Gerraty if ((((uint32_t)1 << win_len) + 1) * mw62num <= twlen) {
317*0957b409SSimon J. Gerraty break;
318*0957b409SSimon J. Gerraty }
319*0957b409SSimon J. Gerraty }
320*0957b409SSimon J. Gerraty
321*0957b409SSimon J. Gerraty t1 = tmp;
322*0957b409SSimon J. Gerraty t2 = tmp + mw62num;
323*0957b409SSimon J. Gerraty
324*0957b409SSimon J. Gerraty /*
325*0957b409SSimon J. Gerraty * Compute m0i, which is equal to -(1/m0) mod 2^62. We were
326*0957b409SSimon J. Gerraty * provided with m0i31, which already fulfills this property
327*0957b409SSimon J. Gerraty * modulo 2^31; the single expression below is then sufficient.
328*0957b409SSimon J. Gerraty */
329*0957b409SSimon J. Gerraty m0i = (uint64_t)m0i31;
330*0957b409SSimon J. Gerraty m0i = MUL62_lo(m0i, (uint64_t)2 + MUL62_lo(m0i, m[0]));
331*0957b409SSimon J. Gerraty
332*0957b409SSimon J. Gerraty /*
333*0957b409SSimon J. Gerraty * Compute window contents. If the window has size one bit only,
334*0957b409SSimon J. Gerraty * then t2 is set to x; otherwise, t2[0] is left untouched, and
335*0957b409SSimon J. Gerraty * t2[k] is set to x^k (for k >= 1).
336*0957b409SSimon J. Gerraty */
337*0957b409SSimon J. Gerraty if (win_len == 1) {
338*0957b409SSimon J. Gerraty memcpy(t2, x, mw62num * sizeof *x);
339*0957b409SSimon J. Gerraty } else {
340*0957b409SSimon J. Gerraty uint64_t *base;
341*0957b409SSimon J. Gerraty
342*0957b409SSimon J. Gerraty memcpy(t2 + mw62num, x, mw62num * sizeof *x);
343*0957b409SSimon J. Gerraty base = t2 + mw62num;
344*0957b409SSimon J. Gerraty for (u = 2; u < ((unsigned)1 << win_len); u ++) {
345*0957b409SSimon J. Gerraty montymul(base + mw62num, base, x, m, mw62num, m0i);
346*0957b409SSimon J. Gerraty base += mw62num;
347*0957b409SSimon J. Gerraty }
348*0957b409SSimon J. Gerraty }
349*0957b409SSimon J. Gerraty
350*0957b409SSimon J. Gerraty /*
351*0957b409SSimon J. Gerraty * Set x to 1, in Montgomery representation. We again use the
352*0957b409SSimon J. Gerraty * 31-bit code.
353*0957b409SSimon J. Gerraty */
354*0957b409SSimon J. Gerraty br_i31_zero(x31, m31[0]);
355*0957b409SSimon J. Gerraty x31[(m31[0] + 31) >> 5] = 1;
356*0957b409SSimon J. Gerraty br_i31_muladd_small(x31, 0, m31);
357*0957b409SSimon J. Gerraty if (mw31num & 1) {
358*0957b409SSimon J. Gerraty br_i31_muladd_small(x31, 0, m31);
359*0957b409SSimon J. Gerraty }
360*0957b409SSimon J. Gerraty for (u = 0; u < mw31num; u += 2) {
361*0957b409SSimon J. Gerraty size_t v;
362*0957b409SSimon J. Gerraty
363*0957b409SSimon J. Gerraty v = u >> 1;
364*0957b409SSimon J. Gerraty if ((u + 1) == mw31num) {
365*0957b409SSimon J. Gerraty x[v] = (uint64_t)x31[u + 1];
366*0957b409SSimon J. Gerraty } else {
367*0957b409SSimon J. Gerraty x[v] = (uint64_t)x31[u + 1]
368*0957b409SSimon J. Gerraty + ((uint64_t)x31[u + 2] << 31);
369*0957b409SSimon J. Gerraty }
370*0957b409SSimon J. Gerraty }
371*0957b409SSimon J. Gerraty
372*0957b409SSimon J. Gerraty /*
373*0957b409SSimon J. Gerraty * We process bits from most to least significant. At each
374*0957b409SSimon J. Gerraty * loop iteration, we have acc_len bits in acc.
375*0957b409SSimon J. Gerraty */
376*0957b409SSimon J. Gerraty acc = 0;
377*0957b409SSimon J. Gerraty acc_len = 0;
378*0957b409SSimon J. Gerraty while (acc_len > 0 || elen > 0) {
379*0957b409SSimon J. Gerraty int i, k;
380*0957b409SSimon J. Gerraty uint32_t bits;
381*0957b409SSimon J. Gerraty uint64_t mask1, mask2;
382*0957b409SSimon J. Gerraty
383*0957b409SSimon J. Gerraty /*
384*0957b409SSimon J. Gerraty * Get the next bits.
385*0957b409SSimon J. Gerraty */
386*0957b409SSimon J. Gerraty k = win_len;
387*0957b409SSimon J. Gerraty if (acc_len < win_len) {
388*0957b409SSimon J. Gerraty if (elen > 0) {
389*0957b409SSimon J. Gerraty acc = (acc << 8) | *e ++;
390*0957b409SSimon J. Gerraty elen --;
391*0957b409SSimon J. Gerraty acc_len += 8;
392*0957b409SSimon J. Gerraty } else {
393*0957b409SSimon J. Gerraty k = acc_len;
394*0957b409SSimon J. Gerraty }
395*0957b409SSimon J. Gerraty }
396*0957b409SSimon J. Gerraty bits = (acc >> (acc_len - k)) & (((uint32_t)1 << k) - 1);
397*0957b409SSimon J. Gerraty acc_len -= k;
398*0957b409SSimon J. Gerraty
399*0957b409SSimon J. Gerraty /*
400*0957b409SSimon J. Gerraty * We could get exactly k bits. Compute k squarings.
401*0957b409SSimon J. Gerraty */
402*0957b409SSimon J. Gerraty for (i = 0; i < k; i ++) {
403*0957b409SSimon J. Gerraty montymul(t1, x, x, m, mw62num, m0i);
404*0957b409SSimon J. Gerraty memcpy(x, t1, mw62num * sizeof *x);
405*0957b409SSimon J. Gerraty }
406*0957b409SSimon J. Gerraty
407*0957b409SSimon J. Gerraty /*
408*0957b409SSimon J. Gerraty * Window lookup: we want to set t2 to the window
409*0957b409SSimon J. Gerraty * lookup value, assuming the bits are non-zero. If
410*0957b409SSimon J. Gerraty * the window length is 1 bit only, then t2 is
411*0957b409SSimon J. Gerraty * already set; otherwise, we do a constant-time lookup.
412*0957b409SSimon J. Gerraty */
413*0957b409SSimon J. Gerraty if (win_len > 1) {
414*0957b409SSimon J. Gerraty uint64_t *base;
415*0957b409SSimon J. Gerraty
416*0957b409SSimon J. Gerraty memset(t2, 0, mw62num * sizeof *t2);
417*0957b409SSimon J. Gerraty base = t2 + mw62num;
418*0957b409SSimon J. Gerraty for (u = 1; u < ((uint32_t)1 << k); u ++) {
419*0957b409SSimon J. Gerraty uint64_t mask;
420*0957b409SSimon J. Gerraty size_t v;
421*0957b409SSimon J. Gerraty
422*0957b409SSimon J. Gerraty mask = -(uint64_t)EQ(u, bits);
423*0957b409SSimon J. Gerraty for (v = 0; v < mw62num; v ++) {
424*0957b409SSimon J. Gerraty t2[v] |= mask & base[v];
425*0957b409SSimon J. Gerraty }
426*0957b409SSimon J. Gerraty base += mw62num;
427*0957b409SSimon J. Gerraty }
428*0957b409SSimon J. Gerraty }
429*0957b409SSimon J. Gerraty
430*0957b409SSimon J. Gerraty /*
431*0957b409SSimon J. Gerraty * Multiply with the looked-up value. We keep the product
432*0957b409SSimon J. Gerraty * only if the exponent bits are not all-zero.
433*0957b409SSimon J. Gerraty */
434*0957b409SSimon J. Gerraty montymul(t1, x, t2, m, mw62num, m0i);
435*0957b409SSimon J. Gerraty mask1 = -(uint64_t)EQ(bits, 0);
436*0957b409SSimon J. Gerraty mask2 = ~mask1;
437*0957b409SSimon J. Gerraty for (u = 0; u < mw62num; u ++) {
438*0957b409SSimon J. Gerraty x[u] = (mask1 & x[u]) | (mask2 & t1[u]);
439*0957b409SSimon J. Gerraty }
440*0957b409SSimon J. Gerraty }
441*0957b409SSimon J. Gerraty
442*0957b409SSimon J. Gerraty /*
443*0957b409SSimon J. Gerraty * Convert back from Montgomery representation.
444*0957b409SSimon J. Gerraty */
445*0957b409SSimon J. Gerraty frommonty(x, m, mw62num, m0i);
446*0957b409SSimon J. Gerraty
447*0957b409SSimon J. Gerraty /*
448*0957b409SSimon J. Gerraty * Convert result into 31-bit words.
449*0957b409SSimon J. Gerraty */
450*0957b409SSimon J. Gerraty for (u = 0; u < mw31num; u += 2) {
451*0957b409SSimon J. Gerraty uint64_t zw;
452*0957b409SSimon J. Gerraty
453*0957b409SSimon J. Gerraty zw = x[u >> 1];
454*0957b409SSimon J. Gerraty x31[u + 1] = (uint32_t)zw & 0x7FFFFFFF;
455*0957b409SSimon J. Gerraty if ((u + 1) < mw31num) {
456*0957b409SSimon J. Gerraty x31[u + 2] = (uint32_t)(zw >> 31);
457*0957b409SSimon J. Gerraty }
458*0957b409SSimon J. Gerraty }
459*0957b409SSimon J. Gerraty return 1;
460*0957b409SSimon J. Gerraty }
461*0957b409SSimon J. Gerraty
462*0957b409SSimon J. Gerraty #else
463*0957b409SSimon J. Gerraty
464*0957b409SSimon J. Gerraty /* see inner.h */
465*0957b409SSimon J. Gerraty uint32_t
br_i62_modpow_opt(uint32_t * x31,const unsigned char * e,size_t elen,const uint32_t * m31,uint32_t m0i31,uint64_t * tmp,size_t twlen)466*0957b409SSimon J. Gerraty br_i62_modpow_opt(uint32_t *x31, const unsigned char *e, size_t elen,
467*0957b409SSimon J. Gerraty const uint32_t *m31, uint32_t m0i31, uint64_t *tmp, size_t twlen)
468*0957b409SSimon J. Gerraty {
469*0957b409SSimon J. Gerraty size_t mwlen;
470*0957b409SSimon J. Gerraty
471*0957b409SSimon J. Gerraty mwlen = (m31[0] + 63) >> 5;
472*0957b409SSimon J. Gerraty if (twlen < mwlen) {
473*0957b409SSimon J. Gerraty return 0;
474*0957b409SSimon J. Gerraty }
475*0957b409SSimon J. Gerraty return br_i31_modpow_opt(x31, e, elen, m31, m0i31,
476*0957b409SSimon J. Gerraty (uint32_t *)tmp, twlen << 1);
477*0957b409SSimon J. Gerraty }
478*0957b409SSimon J. Gerraty
479*0957b409SSimon J. Gerraty #endif
480*0957b409SSimon J. Gerraty
481*0957b409SSimon J. Gerraty /* see inner.h */
482*0957b409SSimon J. Gerraty uint32_t
br_i62_modpow_opt_as_i31(uint32_t * x31,const unsigned char * e,size_t elen,const uint32_t * m31,uint32_t m0i31,uint32_t * tmp,size_t twlen)483*0957b409SSimon J. Gerraty br_i62_modpow_opt_as_i31(uint32_t *x31, const unsigned char *e, size_t elen,
484*0957b409SSimon J. Gerraty const uint32_t *m31, uint32_t m0i31, uint32_t *tmp, size_t twlen)
485*0957b409SSimon J. Gerraty {
486*0957b409SSimon J. Gerraty /*
487*0957b409SSimon J. Gerraty * As documented, this function expects the 'tmp' argument to be
488*0957b409SSimon J. Gerraty * 64-bit aligned. This is OK since this function is internal (it
489*0957b409SSimon J. Gerraty * is not part of BearSSL's public API).
490*0957b409SSimon J. Gerraty */
491*0957b409SSimon J. Gerraty return br_i62_modpow_opt(x31, e, elen, m31, m0i31,
492*0957b409SSimon J. Gerraty (uint64_t *)tmp, twlen >> 1);
493*0957b409SSimon J. Gerraty }
494