xref: /netbsd-src/crypto/external/bsd/openssl/dist/crypto/bn/asm/x86_64-gcc.c (revision bbde328be4e75ea9ad02e9715ea13ca54b797ada)
1 #include "../bn_lcl.h"
2 #if !(defined(__GNUC__) && __GNUC__>=2)
3 # include "../bn_asm.c"	/* kind of dirty hack for Sun Studio */
4 #else
5 /*
6  * x86_64 BIGNUM accelerator version 0.1, December 2002.
7  *
8  * Implemented by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
9  * project.
10  *
11  * Rights for redistribution and usage in source and binary forms are
12  * granted according to the OpenSSL license. Warranty of any kind is
13  * disclaimed.
14  *
15  * Q. Version 0.1? It doesn't sound like Andy, he used to assign real
16  *    versions, like 1.0...
17  * A. Well, that's because this code is basically a quick-n-dirty
18  *    proof-of-concept hack. As you can see it's implemented with
19  *    inline assembler, which means that you're bound to GCC and that
20  *    there might be enough room for further improvement.
21  *
22  * Q. Why inline assembler?
23  * A. x86_64 features own ABI which I'm not familiar with. This is
24  *    why I decided to let the compiler take care of subroutine
25  *    prologue/epilogue as well as register allocation. For reference.
26  *    Win64 implements different ABI for AMD64, different from Linux.
27  *
28  * Q. How much faster does it get?
29  * A. 'apps/openssl speed rsa dsa' output with no-asm:
30  *
31  *	                  sign    verify    sign/s verify/s
32  *	rsa  512 bits   0.0006s   0.0001s   1683.8  18456.2
33  *	rsa 1024 bits   0.0028s   0.0002s    356.0   6407.0
34  *	rsa 2048 bits   0.0172s   0.0005s     58.0   1957.8
35  *	rsa 4096 bits   0.1155s   0.0018s      8.7    555.6
36  *	                  sign    verify    sign/s verify/s
37  *	dsa  512 bits   0.0005s   0.0006s   2100.8   1768.3
38  *	dsa 1024 bits   0.0014s   0.0018s    692.3    559.2
39  *	dsa 2048 bits   0.0049s   0.0061s    204.7    165.0
40  *
41  *    'apps/openssl speed rsa dsa' output with this module:
42  *
43  *	                  sign    verify    sign/s verify/s
44  *	rsa  512 bits   0.0004s   0.0000s   2767.1  33297.9
45  *	rsa 1024 bits   0.0012s   0.0001s    867.4  14674.7
46  *	rsa 2048 bits   0.0061s   0.0002s    164.0   5270.0
47  *	rsa 4096 bits   0.0384s   0.0006s     26.1   1650.8
48  *	                  sign    verify    sign/s verify/s
49  *	dsa  512 bits   0.0002s   0.0003s   4442.2   3786.3
50  *	dsa 1024 bits   0.0005s   0.0007s   1835.1   1497.4
51  *	dsa 2048 bits   0.0016s   0.0020s    620.4    504.6
52  *
53  *    For the reference. IA-32 assembler implementation performs
54  *    very much like 64-bit code compiled with no-asm on the same
55  *    machine.
56  */
57 
58 #ifdef _WIN64
59 #define BN_ULONG unsigned long long
60 #else
61 #define BN_ULONG unsigned long
62 #endif
63 
64 #undef mul
65 #undef mul_add
66 
67 /*
68  * "m"(a), "+m"(r)	is the way to favor DirectPath �-code;
69  * "g"(0)		let the compiler to decide where does it
70  *			want to keep the value of zero;
71  */
72 #define mul_add(r,a,word,carry) do {	\
73 	register BN_ULONG high,low;	\
74 	asm ("mulq %3"			\
75 		: "=a"(low),"=d"(high)	\
76 		: "a"(word),"m"(a)	\
77 		: "cc");		\
78 	asm ("addq %2,%0; adcq %3,%1"	\
79 		: "+r"(carry),"+d"(high)\
80 		: "a"(low),"g"(0)	\
81 		: "cc");		\
82 	asm ("addq %2,%0; adcq %3,%1"	\
83 		: "+m"(r),"+d"(high)	\
84 		: "r"(carry),"g"(0)	\
85 		: "cc");		\
86 	carry=high;			\
87 	} while (0)
88 
89 #define mul(r,a,word,carry) do {	\
90 	register BN_ULONG high,low;	\
91 	asm ("mulq %3"			\
92 		: "=a"(low),"=d"(high)	\
93 		: "a"(word),"g"(a)	\
94 		: "cc");		\
95 	asm ("addq %2,%0; adcq %3,%1"	\
96 		: "+r"(carry),"+d"(high)\
97 		: "a"(low),"g"(0)	\
98 		: "cc");		\
99 	(r)=carry, carry=high;		\
100 	} while (0)
101 
102 #define sqr(r0,r1,a)			\
103 	asm ("mulq %2"			\
104 		: "=a"(r0),"=d"(r1)	\
105 		: "a"(a)		\
106 		: "cc");
107 
108 BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w)
109 	{
110 	BN_ULONG c1=0;
111 
112 	if (num <= 0) return(c1);
113 
114 	while (num&~3)
115 		{
116 		mul_add(rp[0],ap[0],w,c1);
117 		mul_add(rp[1],ap[1],w,c1);
118 		mul_add(rp[2],ap[2],w,c1);
119 		mul_add(rp[3],ap[3],w,c1);
120 		ap+=4; rp+=4; num-=4;
121 		}
122 	if (num)
123 		{
124 		mul_add(rp[0],ap[0],w,c1); if (--num==0) return c1;
125 		mul_add(rp[1],ap[1],w,c1); if (--num==0) return c1;
126 		mul_add(rp[2],ap[2],w,c1); return c1;
127 		}
128 
129 	return(c1);
130 	}
131 
132 BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w)
133 	{
134 	BN_ULONG c1=0;
135 
136 	if (num <= 0) return(c1);
137 
138 	while (num&~3)
139 		{
140 		mul(rp[0],ap[0],w,c1);
141 		mul(rp[1],ap[1],w,c1);
142 		mul(rp[2],ap[2],w,c1);
143 		mul(rp[3],ap[3],w,c1);
144 		ap+=4; rp+=4; num-=4;
145 		}
146 	if (num)
147 		{
148 		mul(rp[0],ap[0],w,c1); if (--num == 0) return c1;
149 		mul(rp[1],ap[1],w,c1); if (--num == 0) return c1;
150 		mul(rp[2],ap[2],w,c1);
151 		}
152 	return(c1);
153 	}
154 
155 void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, int n)
156         {
157 	if (n <= 0) return;
158 
159 	while (n&~3)
160 		{
161 		sqr(r[0],r[1],a[0]);
162 		sqr(r[2],r[3],a[1]);
163 		sqr(r[4],r[5],a[2]);
164 		sqr(r[6],r[7],a[3]);
165 		a+=4; r+=8; n-=4;
166 		}
167 	if (n)
168 		{
169 		sqr(r[0],r[1],a[0]); if (--n == 0) return;
170 		sqr(r[2],r[3],a[1]); if (--n == 0) return;
171 		sqr(r[4],r[5],a[2]);
172 		}
173 	}
174 
175 BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d)
176 {	BN_ULONG ret,waste;
177 
178 	asm ("divq	%4"
179 		: "=a"(ret),"=d"(waste)
180 		: "a"(l),"d"(h),"g"(d)
181 		: "cc");
182 
183 	return ret;
184 }
185 
186 BN_ULONG bn_add_words (BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,int n)
187 { BN_ULONG ret=0,i=0;
188 
189 	if (n <= 0) return 0;
190 
191 	asm (
192 	"	subq	%2,%2		\n"
193 	".p2align 4			\n"
194 	"1:	movq	(%4,%2,8),%0	\n"
195 	"	adcq	(%5,%2,8),%0	\n"
196 	"	movq	%0,(%3,%2,8)	\n"
197 	"	leaq	1(%2),%2	\n"
198 	"	loop	1b		\n"
199 	"	sbbq	%0,%0		\n"
200 		: "=&a"(ret),"+c"(n),"=&r"(i)
201 		: "r"(rp),"r"(ap),"r"(bp)
202 		: "cc"
203 	);
204 
205   return ret&1;
206 }
207 
208 #ifndef SIMICS
209 BN_ULONG bn_sub_words (BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,int n)
210 { BN_ULONG ret=0,i=0;
211 
212 	if (n <= 0) return 0;
213 
214 	asm (
215 	"	subq	%2,%2		\n"
216 	".p2align 4			\n"
217 	"1:	movq	(%4,%2,8),%0	\n"
218 	"	sbbq	(%5,%2,8),%0	\n"
219 	"	movq	%0,(%3,%2,8)	\n"
220 	"	leaq	1(%2),%2	\n"
221 	"	loop	1b		\n"
222 	"	sbbq	%0,%0		\n"
223 		: "=&a"(ret),"+c"(n),"=&r"(i)
224 		: "r"(rp),"r"(ap),"r"(bp)
225 		: "cc"
226 	);
227 
228   return ret&1;
229 }
230 #else
231 /* Simics 1.4<7 has buggy sbbq:-( */
232 #define BN_MASK2 0xffffffffffffffffL
233 BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
234         {
235 	BN_ULONG t1,t2;
236 	int c=0;
237 
238 	if (n <= 0) return((BN_ULONG)0);
239 
240 	for (;;)
241 		{
242 		t1=a[0]; t2=b[0];
243 		r[0]=(t1-t2-c)&BN_MASK2;
244 		if (t1 != t2) c=(t1 < t2);
245 		if (--n <= 0) break;
246 
247 		t1=a[1]; t2=b[1];
248 		r[1]=(t1-t2-c)&BN_MASK2;
249 		if (t1 != t2) c=(t1 < t2);
250 		if (--n <= 0) break;
251 
252 		t1=a[2]; t2=b[2];
253 		r[2]=(t1-t2-c)&BN_MASK2;
254 		if (t1 != t2) c=(t1 < t2);
255 		if (--n <= 0) break;
256 
257 		t1=a[3]; t2=b[3];
258 		r[3]=(t1-t2-c)&BN_MASK2;
259 		if (t1 != t2) c=(t1 < t2);
260 		if (--n <= 0) break;
261 
262 		a+=4;
263 		b+=4;
264 		r+=4;
265 		}
266 	return(c);
267 	}
268 #endif
269 
270 /* mul_add_c(a,b,c0,c1,c2)  -- c+=a*b for three word number c=(c2,c1,c0) */
271 /* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */
272 /* sqr_add_c(a,i,c0,c1,c2)  -- c+=a[i]^2 for three word number c=(c2,c1,c0) */
273 /* sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) */
274 
275 #if 0
276 /* original macros are kept for reference purposes */
277 #define mul_add_c(a,b,c0,c1,c2) {	\
278 	BN_ULONG ta=(a),tb=(b);		\
279 	t1 = ta * tb;			\
280 	t2 = BN_UMULT_HIGH(ta,tb);	\
281 	c0 += t1; t2 += (c0<t1)?1:0;	\
282 	c1 += t2; c2 += (c1<t2)?1:0;	\
283 	}
284 
285 #define mul_add_c2(a,b,c0,c1,c2) {	\
286 	BN_ULONG ta=(a),tb=(b),t0;	\
287 	t1 = BN_UMULT_HIGH(ta,tb);	\
288 	t0 = ta * tb;			\
289 	t2 = t1+t1; c2 += (t2<t1)?1:0;	\
290 	t1 = t0+t0; t2 += (t1<t0)?1:0;	\
291 	c0 += t1; t2 += (c0<t1)?1:0;	\
292 	c1 += t2; c2 += (c1<t2)?1:0;	\
293 	}
294 #else
295 #define mul_add_c(a,b,c0,c1,c2)	do {	\
296 	asm ("mulq %3"			\
297 		: "=a"(t1),"=d"(t2)	\
298 		: "a"(a),"m"(b)		\
299 		: "cc");		\
300 	asm ("addq %2,%0; adcq %3,%1"	\
301 		: "+r"(c0),"+d"(t2)	\
302 		: "a"(t1),"g"(0)	\
303 		: "cc");		\
304 	asm ("addq %2,%0; adcq %3,%1"	\
305 		: "+r"(c1),"+r"(c2)	\
306 		: "d"(t2),"g"(0)	\
307 		: "cc");		\
308 	} while (0)
309 
310 #define sqr_add_c(a,i,c0,c1,c2)	do {	\
311 	asm ("mulq %2"			\
312 		: "=a"(t1),"=d"(t2)	\
313 		: "a"(a[i])		\
314 		: "cc");		\
315 	asm ("addq %2,%0; adcq %3,%1"	\
316 		: "+r"(c0),"+d"(t2)	\
317 		: "a"(t1),"g"(0)	\
318 		: "cc");		\
319 	asm ("addq %2,%0; adcq %3,%1"	\
320 		: "+r"(c1),"+r"(c2)	\
321 		: "d"(t2),"g"(0)	\
322 		: "cc");		\
323 	} while (0)
324 
325 #define mul_add_c2(a,b,c0,c1,c2) do {	\
326 	asm ("mulq %3"			\
327 		: "=a"(t1),"=d"(t2)	\
328 		: "a"(a),"m"(b)		\
329 		: "cc");		\
330 	asm ("addq %0,%0; adcq %2,%1"	\
331 		: "+d"(t2),"+r"(c2)	\
332 		: "g"(0)		\
333 		: "cc");		\
334 	asm ("addq %0,%0; adcq %2,%1"	\
335 		: "+a"(t1),"+d"(t2)	\
336 		: "g"(0)		\
337 		: "cc");		\
338 	asm ("addq %2,%0; adcq %3,%1"	\
339 		: "+r"(c0),"+d"(t2)	\
340 		: "a"(t1),"g"(0)	\
341 		: "cc");		\
342 	asm ("addq %2,%0; adcq %3,%1"	\
343 		: "+r"(c1),"+r"(c2)	\
344 		: "d"(t2),"g"(0)	\
345 		: "cc");		\
346 	} while (0)
347 #endif
348 
349 #define sqr_add_c2(a,i,j,c0,c1,c2)	\
350 	mul_add_c2((a)[i],(a)[j],c0,c1,c2)
351 
352 void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
353 	{
354 	BN_ULONG t1,t2;
355 	BN_ULONG c1,c2,c3;
356 
357 	c1=0;
358 	c2=0;
359 	c3=0;
360 	mul_add_c(a[0],b[0],c1,c2,c3);
361 	r[0]=c1;
362 	c1=0;
363 	mul_add_c(a[0],b[1],c2,c3,c1);
364 	mul_add_c(a[1],b[0],c2,c3,c1);
365 	r[1]=c2;
366 	c2=0;
367 	mul_add_c(a[2],b[0],c3,c1,c2);
368 	mul_add_c(a[1],b[1],c3,c1,c2);
369 	mul_add_c(a[0],b[2],c3,c1,c2);
370 	r[2]=c3;
371 	c3=0;
372 	mul_add_c(a[0],b[3],c1,c2,c3);
373 	mul_add_c(a[1],b[2],c1,c2,c3);
374 	mul_add_c(a[2],b[1],c1,c2,c3);
375 	mul_add_c(a[3],b[0],c1,c2,c3);
376 	r[3]=c1;
377 	c1=0;
378 	mul_add_c(a[4],b[0],c2,c3,c1);
379 	mul_add_c(a[3],b[1],c2,c3,c1);
380 	mul_add_c(a[2],b[2],c2,c3,c1);
381 	mul_add_c(a[1],b[3],c2,c3,c1);
382 	mul_add_c(a[0],b[4],c2,c3,c1);
383 	r[4]=c2;
384 	c2=0;
385 	mul_add_c(a[0],b[5],c3,c1,c2);
386 	mul_add_c(a[1],b[4],c3,c1,c2);
387 	mul_add_c(a[2],b[3],c3,c1,c2);
388 	mul_add_c(a[3],b[2],c3,c1,c2);
389 	mul_add_c(a[4],b[1],c3,c1,c2);
390 	mul_add_c(a[5],b[0],c3,c1,c2);
391 	r[5]=c3;
392 	c3=0;
393 	mul_add_c(a[6],b[0],c1,c2,c3);
394 	mul_add_c(a[5],b[1],c1,c2,c3);
395 	mul_add_c(a[4],b[2],c1,c2,c3);
396 	mul_add_c(a[3],b[3],c1,c2,c3);
397 	mul_add_c(a[2],b[4],c1,c2,c3);
398 	mul_add_c(a[1],b[5],c1,c2,c3);
399 	mul_add_c(a[0],b[6],c1,c2,c3);
400 	r[6]=c1;
401 	c1=0;
402 	mul_add_c(a[0],b[7],c2,c3,c1);
403 	mul_add_c(a[1],b[6],c2,c3,c1);
404 	mul_add_c(a[2],b[5],c2,c3,c1);
405 	mul_add_c(a[3],b[4],c2,c3,c1);
406 	mul_add_c(a[4],b[3],c2,c3,c1);
407 	mul_add_c(a[5],b[2],c2,c3,c1);
408 	mul_add_c(a[6],b[1],c2,c3,c1);
409 	mul_add_c(a[7],b[0],c2,c3,c1);
410 	r[7]=c2;
411 	c2=0;
412 	mul_add_c(a[7],b[1],c3,c1,c2);
413 	mul_add_c(a[6],b[2],c3,c1,c2);
414 	mul_add_c(a[5],b[3],c3,c1,c2);
415 	mul_add_c(a[4],b[4],c3,c1,c2);
416 	mul_add_c(a[3],b[5],c3,c1,c2);
417 	mul_add_c(a[2],b[6],c3,c1,c2);
418 	mul_add_c(a[1],b[7],c3,c1,c2);
419 	r[8]=c3;
420 	c3=0;
421 	mul_add_c(a[2],b[7],c1,c2,c3);
422 	mul_add_c(a[3],b[6],c1,c2,c3);
423 	mul_add_c(a[4],b[5],c1,c2,c3);
424 	mul_add_c(a[5],b[4],c1,c2,c3);
425 	mul_add_c(a[6],b[3],c1,c2,c3);
426 	mul_add_c(a[7],b[2],c1,c2,c3);
427 	r[9]=c1;
428 	c1=0;
429 	mul_add_c(a[7],b[3],c2,c3,c1);
430 	mul_add_c(a[6],b[4],c2,c3,c1);
431 	mul_add_c(a[5],b[5],c2,c3,c1);
432 	mul_add_c(a[4],b[6],c2,c3,c1);
433 	mul_add_c(a[3],b[7],c2,c3,c1);
434 	r[10]=c2;
435 	c2=0;
436 	mul_add_c(a[4],b[7],c3,c1,c2);
437 	mul_add_c(a[5],b[6],c3,c1,c2);
438 	mul_add_c(a[6],b[5],c3,c1,c2);
439 	mul_add_c(a[7],b[4],c3,c1,c2);
440 	r[11]=c3;
441 	c3=0;
442 	mul_add_c(a[7],b[5],c1,c2,c3);
443 	mul_add_c(a[6],b[6],c1,c2,c3);
444 	mul_add_c(a[5],b[7],c1,c2,c3);
445 	r[12]=c1;
446 	c1=0;
447 	mul_add_c(a[6],b[7],c2,c3,c1);
448 	mul_add_c(a[7],b[6],c2,c3,c1);
449 	r[13]=c2;
450 	c2=0;
451 	mul_add_c(a[7],b[7],c3,c1,c2);
452 	r[14]=c3;
453 	r[15]=c1;
454 	}
455 
456 void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
457 	{
458 	BN_ULONG t1,t2;
459 	BN_ULONG c1,c2,c3;
460 
461 	c1=0;
462 	c2=0;
463 	c3=0;
464 	mul_add_c(a[0],b[0],c1,c2,c3);
465 	r[0]=c1;
466 	c1=0;
467 	mul_add_c(a[0],b[1],c2,c3,c1);
468 	mul_add_c(a[1],b[0],c2,c3,c1);
469 	r[1]=c2;
470 	c2=0;
471 	mul_add_c(a[2],b[0],c3,c1,c2);
472 	mul_add_c(a[1],b[1],c3,c1,c2);
473 	mul_add_c(a[0],b[2],c3,c1,c2);
474 	r[2]=c3;
475 	c3=0;
476 	mul_add_c(a[0],b[3],c1,c2,c3);
477 	mul_add_c(a[1],b[2],c1,c2,c3);
478 	mul_add_c(a[2],b[1],c1,c2,c3);
479 	mul_add_c(a[3],b[0],c1,c2,c3);
480 	r[3]=c1;
481 	c1=0;
482 	mul_add_c(a[3],b[1],c2,c3,c1);
483 	mul_add_c(a[2],b[2],c2,c3,c1);
484 	mul_add_c(a[1],b[3],c2,c3,c1);
485 	r[4]=c2;
486 	c2=0;
487 	mul_add_c(a[2],b[3],c3,c1,c2);
488 	mul_add_c(a[3],b[2],c3,c1,c2);
489 	r[5]=c3;
490 	c3=0;
491 	mul_add_c(a[3],b[3],c1,c2,c3);
492 	r[6]=c1;
493 	r[7]=c2;
494 	}
495 
496 void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a)
497 	{
498 	BN_ULONG t1,t2;
499 	BN_ULONG c1,c2,c3;
500 
501 	c1=0;
502 	c2=0;
503 	c3=0;
504 	sqr_add_c(a,0,c1,c2,c3);
505 	r[0]=c1;
506 	c1=0;
507 	sqr_add_c2(a,1,0,c2,c3,c1);
508 	r[1]=c2;
509 	c2=0;
510 	sqr_add_c(a,1,c3,c1,c2);
511 	sqr_add_c2(a,2,0,c3,c1,c2);
512 	r[2]=c3;
513 	c3=0;
514 	sqr_add_c2(a,3,0,c1,c2,c3);
515 	sqr_add_c2(a,2,1,c1,c2,c3);
516 	r[3]=c1;
517 	c1=0;
518 	sqr_add_c(a,2,c2,c3,c1);
519 	sqr_add_c2(a,3,1,c2,c3,c1);
520 	sqr_add_c2(a,4,0,c2,c3,c1);
521 	r[4]=c2;
522 	c2=0;
523 	sqr_add_c2(a,5,0,c3,c1,c2);
524 	sqr_add_c2(a,4,1,c3,c1,c2);
525 	sqr_add_c2(a,3,2,c3,c1,c2);
526 	r[5]=c3;
527 	c3=0;
528 	sqr_add_c(a,3,c1,c2,c3);
529 	sqr_add_c2(a,4,2,c1,c2,c3);
530 	sqr_add_c2(a,5,1,c1,c2,c3);
531 	sqr_add_c2(a,6,0,c1,c2,c3);
532 	r[6]=c1;
533 	c1=0;
534 	sqr_add_c2(a,7,0,c2,c3,c1);
535 	sqr_add_c2(a,6,1,c2,c3,c1);
536 	sqr_add_c2(a,5,2,c2,c3,c1);
537 	sqr_add_c2(a,4,3,c2,c3,c1);
538 	r[7]=c2;
539 	c2=0;
540 	sqr_add_c(a,4,c3,c1,c2);
541 	sqr_add_c2(a,5,3,c3,c1,c2);
542 	sqr_add_c2(a,6,2,c3,c1,c2);
543 	sqr_add_c2(a,7,1,c3,c1,c2);
544 	r[8]=c3;
545 	c3=0;
546 	sqr_add_c2(a,7,2,c1,c2,c3);
547 	sqr_add_c2(a,6,3,c1,c2,c3);
548 	sqr_add_c2(a,5,4,c1,c2,c3);
549 	r[9]=c1;
550 	c1=0;
551 	sqr_add_c(a,5,c2,c3,c1);
552 	sqr_add_c2(a,6,4,c2,c3,c1);
553 	sqr_add_c2(a,7,3,c2,c3,c1);
554 	r[10]=c2;
555 	c2=0;
556 	sqr_add_c2(a,7,4,c3,c1,c2);
557 	sqr_add_c2(a,6,5,c3,c1,c2);
558 	r[11]=c3;
559 	c3=0;
560 	sqr_add_c(a,6,c1,c2,c3);
561 	sqr_add_c2(a,7,5,c1,c2,c3);
562 	r[12]=c1;
563 	c1=0;
564 	sqr_add_c2(a,7,6,c2,c3,c1);
565 	r[13]=c2;
566 	c2=0;
567 	sqr_add_c(a,7,c3,c1,c2);
568 	r[14]=c3;
569 	r[15]=c1;
570 	}
571 
572 void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a)
573 	{
574 	BN_ULONG t1,t2;
575 	BN_ULONG c1,c2,c3;
576 
577 	c1=0;
578 	c2=0;
579 	c3=0;
580 	sqr_add_c(a,0,c1,c2,c3);
581 	r[0]=c1;
582 	c1=0;
583 	sqr_add_c2(a,1,0,c2,c3,c1);
584 	r[1]=c2;
585 	c2=0;
586 	sqr_add_c(a,1,c3,c1,c2);
587 	sqr_add_c2(a,2,0,c3,c1,c2);
588 	r[2]=c3;
589 	c3=0;
590 	sqr_add_c2(a,3,0,c1,c2,c3);
591 	sqr_add_c2(a,2,1,c1,c2,c3);
592 	r[3]=c1;
593 	c1=0;
594 	sqr_add_c(a,2,c2,c3,c1);
595 	sqr_add_c2(a,3,1,c2,c3,c1);
596 	r[4]=c2;
597 	c2=0;
598 	sqr_add_c2(a,3,2,c3,c1,c2);
599 	r[5]=c3;
600 	c3=0;
601 	sqr_add_c(a,3,c1,c2,c3);
602 	r[6]=c1;
603 	r[7]=c2;
604 	}
605 #endif
606