xref: /netbsd-src/crypto/external/bsd/openssl/lib/libcrypto/arch/arm/ghash-armv4.S (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1#include "arm_arch.h"
2
3.text
4#if defined(__thumb2__) || defined(__clang__)
5.syntax	unified
6#endif
7#if defined(__thumb2__)
8.thumb
9#else
10.code	32
11#endif
12
13#ifdef  __clang__
14#define ldrplb  ldrbpl
15#define ldrneb  ldrbne
16#endif
17
18.type	rem_4bit,%object
19.align	5
20rem_4bit:
21.short	0x0000,0x1C20,0x3840,0x2460
22.short	0x7080,0x6CA0,0x48C0,0x54E0
23.short	0xE100,0xFD20,0xD940,0xC560
24.short	0x9180,0x8DA0,0xA9C0,0xB5E0
25.size	rem_4bit,.-rem_4bit
26
27.type	rem_4bit_get,%function
28rem_4bit_get:
29#if defined(__thumb2__)
30	adr	r2,rem_4bit
31#else
32	sub	r2,pc,#8+32	@ &rem_4bit
33#endif
34	b	.Lrem_4bit_got
35	nop
36	nop
37.size	rem_4bit_get,.-rem_4bit_get
38
39.globl	gcm_ghash_4bit
40.type	gcm_ghash_4bit,%function
41.align	4
42gcm_ghash_4bit:
43#if defined(__thumb2__)
44	adr	r12,rem_4bit
45#else
46	sub	r12,pc,#8+48		@ &rem_4bit
47#endif
48	add	r3,r2,r3		@ r3 to point at the end
49	stmdb	sp!,{r3,r4,r5,r6,r7,r8,r9,r10,r11,lr}		@ save r3/end too
50
51	ldmia	r12,{r4,r5,r6,r7,r8,r9,r10,r11}		@ copy rem_4bit ...
52	stmdb	sp!,{r4,r5,r6,r7,r8,r9,r10,r11}		@ ... to stack
53
54	ldrb	r12,[r2,#15]
55	ldrb	r14,[r0,#15]
56.Louter:
57	eor	r12,r12,r14
58	and	r14,r12,#0xf0
59	and	r12,r12,#0x0f
60	mov	r3,#14
61
62	add	r7,r1,r12,lsl#4
63	ldmia	r7,{r4,r5,r6,r7}	@ load Htbl[nlo]
64	add	r11,r1,r14
65	ldrb	r12,[r2,#14]
66
67	and	r14,r4,#0xf		@ rem
68	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nhi]
69	add	r14,r14,r14
70	eor	r4,r8,r4,lsr#4
71	ldrh	r8,[sp,r14]		@ rem_4bit[rem]
72	eor	r4,r4,r5,lsl#28
73	ldrb	r14,[r0,#14]
74	eor	r5,r9,r5,lsr#4
75	eor	r5,r5,r6,lsl#28
76	eor	r6,r10,r6,lsr#4
77	eor	r6,r6,r7,lsl#28
78	eor	r7,r11,r7,lsr#4
79	eor	r12,r12,r14
80	and	r14,r12,#0xf0
81	and	r12,r12,#0x0f
82	eor	r7,r7,r8,lsl#16
83
84.Linner:
85	add	r11,r1,r12,lsl#4
86	and	r12,r4,#0xf		@ rem
87	subs	r3,r3,#1
88	add	r12,r12,r12
89	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nlo]
90	eor	r4,r8,r4,lsr#4
91	eor	r4,r4,r5,lsl#28
92	eor	r5,r9,r5,lsr#4
93	eor	r5,r5,r6,lsl#28
94	ldrh	r8,[sp,r12]		@ rem_4bit[rem]
95	eor	r6,r10,r6,lsr#4
96#ifdef	__thumb2__
97	it	pl
98#endif
99	ldrplb	r12,[r2,r3]
100	eor	r6,r6,r7,lsl#28
101	eor	r7,r11,r7,lsr#4
102
103	add	r11,r1,r14
104	and	r14,r4,#0xf		@ rem
105	eor	r7,r7,r8,lsl#16	@ ^= rem_4bit[rem]
106	add	r14,r14,r14
107	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nhi]
108	eor	r4,r8,r4,lsr#4
109#ifdef	__thumb2__
110	it	pl
111#endif
112	ldrplb	r8,[r0,r3]
113	eor	r4,r4,r5,lsl#28
114	eor	r5,r9,r5,lsr#4
115	ldrh	r9,[sp,r14]
116	eor	r5,r5,r6,lsl#28
117	eor	r6,r10,r6,lsr#4
118	eor	r6,r6,r7,lsl#28
119#ifdef	__thumb2__
120	it	pl
121#endif
122	eorpl	r12,r12,r8
123	eor	r7,r11,r7,lsr#4
124#ifdef	__thumb2__
125	itt	pl
126#endif
127	andpl	r14,r12,#0xf0
128	andpl	r12,r12,#0x0f
129	eor	r7,r7,r9,lsl#16	@ ^= rem_4bit[rem]
130	bpl	.Linner
131
132	ldr	r3,[sp,#32]		@ re-load r3/end
133	add	r2,r2,#16
134	mov	r14,r4
135#if __ARM_ARCH__>=7 && defined(__ARMEL__)
136	rev	r4,r4
137	str	r4,[r0,#12]
138#elif defined(__ARMEB__)
139	str	r4,[r0,#12]
140#else
141	mov	r9,r4,lsr#8
142	strb	r4,[r0,#12+3]
143	mov	r10,r4,lsr#16
144	strb	r9,[r0,#12+2]
145	mov	r11,r4,lsr#24
146	strb	r10,[r0,#12+1]
147	strb	r11,[r0,#12]
148#endif
149	cmp	r2,r3
150#if __ARM_ARCH__>=7 && defined(__ARMEL__)
151	rev	r5,r5
152	str	r5,[r0,#8]
153#elif defined(__ARMEB__)
154	str	r5,[r0,#8]
155#else
156	mov	r9,r5,lsr#8
157	strb	r5,[r0,#8+3]
158	mov	r10,r5,lsr#16
159	strb	r9,[r0,#8+2]
160	mov	r11,r5,lsr#24
161	strb	r10,[r0,#8+1]
162	strb	r11,[r0,#8]
163#endif
164
165#ifdef __thumb2__
166	it	ne
167#endif
168	ldrneb	r12,[r2,#15]
169#if __ARM_ARCH__>=7 && defined(__ARMEL__)
170	rev	r6,r6
171	str	r6,[r0,#4]
172#elif defined(__ARMEB__)
173	str	r6,[r0,#4]
174#else
175	mov	r9,r6,lsr#8
176	strb	r6,[r0,#4+3]
177	mov	r10,r6,lsr#16
178	strb	r9,[r0,#4+2]
179	mov	r11,r6,lsr#24
180	strb	r10,[r0,#4+1]
181	strb	r11,[r0,#4]
182#endif
183
184#if __ARM_ARCH__>=7 && defined(__ARMEL__)
185	rev	r7,r7
186	str	r7,[r0,#0]
187#elif defined(__ARMEB__)
188	str	r7,[r0,#0]
189#else
190	mov	r9,r7,lsr#8
191	strb	r7,[r0,#0+3]
192	mov	r10,r7,lsr#16
193	strb	r9,[r0,#0+2]
194	mov	r11,r7,lsr#24
195	strb	r10,[r0,#0+1]
196	strb	r11,[r0,#0]
197#endif
198
199	bne	.Louter
200
201	add	sp,sp,#36
202#if __ARM_ARCH__>=5
203	ldmia	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
204#else
205	ldmia	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
206	tst	lr,#1
207	moveq	pc,lr			@ be binary compatible with V4, yet
208.word	0xe12fff1e			@ interoperable with Thumb ISA:-)
209#endif
210.size	gcm_ghash_4bit,.-gcm_ghash_4bit
211
212.globl	gcm_gmult_4bit
213.type	gcm_gmult_4bit,%function
214gcm_gmult_4bit:
215	stmdb	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
216	ldrb	r12,[r0,#15]
217	b	rem_4bit_get
218.Lrem_4bit_got:
219	and	r14,r12,#0xf0
220	and	r12,r12,#0x0f
221	mov	r3,#14
222
223	add	r7,r1,r12,lsl#4
224	ldmia	r7,{r4,r5,r6,r7}	@ load Htbl[nlo]
225	ldrb	r12,[r0,#14]
226
227	add	r11,r1,r14
228	and	r14,r4,#0xf		@ rem
229	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nhi]
230	add	r14,r14,r14
231	eor	r4,r8,r4,lsr#4
232	ldrh	r8,[r2,r14]	@ rem_4bit[rem]
233	eor	r4,r4,r5,lsl#28
234	eor	r5,r9,r5,lsr#4
235	eor	r5,r5,r6,lsl#28
236	eor	r6,r10,r6,lsr#4
237	eor	r6,r6,r7,lsl#28
238	eor	r7,r11,r7,lsr#4
239	and	r14,r12,#0xf0
240	eor	r7,r7,r8,lsl#16
241	and	r12,r12,#0x0f
242
243.Loop:
244	add	r11,r1,r12,lsl#4
245	and	r12,r4,#0xf		@ rem
246	subs	r3,r3,#1
247	add	r12,r12,r12
248	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nlo]
249	eor	r4,r8,r4,lsr#4
250	eor	r4,r4,r5,lsl#28
251	eor	r5,r9,r5,lsr#4
252	eor	r5,r5,r6,lsl#28
253	ldrh	r8,[r2,r12]	@ rem_4bit[rem]
254	eor	r6,r10,r6,lsr#4
255#ifdef	__thumb2__
256	it	pl
257#endif
258	ldrplb	r12,[r0,r3]
259	eor	r6,r6,r7,lsl#28
260	eor	r7,r11,r7,lsr#4
261
262	add	r11,r1,r14
263	and	r14,r4,#0xf		@ rem
264	eor	r7,r7,r8,lsl#16	@ ^= rem_4bit[rem]
265	add	r14,r14,r14
266	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nhi]
267	eor	r4,r8,r4,lsr#4
268	eor	r4,r4,r5,lsl#28
269	eor	r5,r9,r5,lsr#4
270	ldrh	r8,[r2,r14]	@ rem_4bit[rem]
271	eor	r5,r5,r6,lsl#28
272	eor	r6,r10,r6,lsr#4
273	eor	r6,r6,r7,lsl#28
274	eor	r7,r11,r7,lsr#4
275#ifdef	__thumb2__
276	itt	pl
277#endif
278	andpl	r14,r12,#0xf0
279	andpl	r12,r12,#0x0f
280	eor	r7,r7,r8,lsl#16	@ ^= rem_4bit[rem]
281	bpl	.Loop
282#if __ARM_ARCH__>=7 && defined(__ARMEL__)
283	rev	r4,r4
284	str	r4,[r0,#12]
285#elif defined(__ARMEB__)
286	str	r4,[r0,#12]
287#else
288	mov	r9,r4,lsr#8
289	strb	r4,[r0,#12+3]
290	mov	r10,r4,lsr#16
291	strb	r9,[r0,#12+2]
292	mov	r11,r4,lsr#24
293	strb	r10,[r0,#12+1]
294	strb	r11,[r0,#12]
295#endif
296
297#if __ARM_ARCH__>=7 && defined(__ARMEL__)
298	rev	r5,r5
299	str	r5,[r0,#8]
300#elif defined(__ARMEB__)
301	str	r5,[r0,#8]
302#else
303	mov	r9,r5,lsr#8
304	strb	r5,[r0,#8+3]
305	mov	r10,r5,lsr#16
306	strb	r9,[r0,#8+2]
307	mov	r11,r5,lsr#24
308	strb	r10,[r0,#8+1]
309	strb	r11,[r0,#8]
310#endif
311
312#if __ARM_ARCH__>=7 && defined(__ARMEL__)
313	rev	r6,r6
314	str	r6,[r0,#4]
315#elif defined(__ARMEB__)
316	str	r6,[r0,#4]
317#else
318	mov	r9,r6,lsr#8
319	strb	r6,[r0,#4+3]
320	mov	r10,r6,lsr#16
321	strb	r9,[r0,#4+2]
322	mov	r11,r6,lsr#24
323	strb	r10,[r0,#4+1]
324	strb	r11,[r0,#4]
325#endif
326
327#if __ARM_ARCH__>=7 && defined(__ARMEL__)
328	rev	r7,r7
329	str	r7,[r0,#0]
330#elif defined(__ARMEB__)
331	str	r7,[r0,#0]
332#else
333	mov	r9,r7,lsr#8
334	strb	r7,[r0,#0+3]
335	mov	r10,r7,lsr#16
336	strb	r9,[r0,#0+2]
337	mov	r11,r7,lsr#24
338	strb	r10,[r0,#0+1]
339	strb	r11,[r0,#0]
340#endif
341
342#if __ARM_ARCH__>=5
343	ldmia	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
344#else
345	ldmia	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
346	tst	lr,#1
347	moveq	pc,lr			@ be binary compatible with V4, yet
348.word	0xe12fff1e			@ interoperable with Thumb ISA:-)
349#endif
350.size	gcm_gmult_4bit,.-gcm_gmult_4bit
351#if __ARM_MAX_ARCH__>=7
352.arch	armv7-a
353.fpu	neon
354
355.globl	gcm_init_neon
356.type	gcm_init_neon,%function
357.align	4
358gcm_init_neon:
359	vld1.64	d7,[r1]!		@ load H
360	vmov.i8	q8,#0xe1
361	vld1.64	d6,[r1]
362	vshl.i64	d17,#57
363	vshr.u64	d16,#63		@ t0=0xc2....01
364	vdup.8	q9,d7[7]
365	vshr.u64	d26,d6,#63
366	vshr.s8	q9,#7			@ broadcast carry bit
367	vshl.i64	q3,q3,#1
368	vand	q8,q8,q9
369	vorr	d7,d26		@ H<<<=1
370	veor	q3,q3,q8		@ twisted H
371	vstmia	r0,{q3}
372
373	bx	lr					@ bx lr
374.size	gcm_init_neon,.-gcm_init_neon
375
376.globl	gcm_gmult_neon
377.type	gcm_gmult_neon,%function
378.align	4
379gcm_gmult_neon:
380	vld1.64	d7,[r0]!		@ load Xi
381	vld1.64	d6,[r0]!
382	vmov.i64	d29,#0x0000ffffffffffff
383	vldmia	r1,{d26,d27}	@ load twisted H
384	vmov.i64	d30,#0x00000000ffffffff
385#ifdef __ARMEL__
386	vrev64.8	q3,q3
387#endif
388	vmov.i64	d31,#0x000000000000ffff
389	veor	d28,d26,d27		@ Karatsuba pre-processing
390	mov	r3,#16
391	b	.Lgmult_neon
392.size	gcm_gmult_neon,.-gcm_gmult_neon
393
394.globl	gcm_ghash_neon
395.type	gcm_ghash_neon,%function
396.align	4
397gcm_ghash_neon:
398	vld1.64	d1,[r0]!		@ load Xi
399	vld1.64	d0,[r0]!
400	vmov.i64	d29,#0x0000ffffffffffff
401	vldmia	r1,{d26,d27}	@ load twisted H
402	vmov.i64	d30,#0x00000000ffffffff
403#ifdef __ARMEL__
404	vrev64.8	q0,q0
405#endif
406	vmov.i64	d31,#0x000000000000ffff
407	veor	d28,d26,d27		@ Karatsuba pre-processing
408
409.Loop_neon:
410	vld1.64	d7,[r2]!		@ load inp
411	vld1.64	d6,[r2]!
412#ifdef __ARMEL__
413	vrev64.8	q3,q3
414#endif
415	veor	q3,q0			@ inp^=Xi
416.Lgmult_neon:
417	vext.8	d16, d26, d26, #1	@ A1
418	vmull.p8	q8, d16, d6		@ F = A1*B
419	vext.8	d0, d6, d6, #1	@ B1
420	vmull.p8	q0, d26, d0		@ E = A*B1
421	vext.8	d18, d26, d26, #2	@ A2
422	vmull.p8	q9, d18, d6		@ H = A2*B
423	vext.8	d22, d6, d6, #2	@ B2
424	vmull.p8	q11, d26, d22		@ G = A*B2
425	vext.8	d20, d26, d26, #3	@ A3
426	veor	q8, q8, q0		@ L = E + F
427	vmull.p8	q10, d20, d6		@ J = A3*B
428	vext.8	d0, d6, d6, #3	@ B3
429	veor	q9, q9, q11		@ M = G + H
430	vmull.p8	q0, d26, d0		@ I = A*B3
431	veor	d16, d16, d17	@ t0 = (L) (P0 + P1) << 8
432	vand	d17, d17, d29
433	vext.8	d22, d6, d6, #4	@ B4
434	veor	d18, d18, d19	@ t1 = (M) (P2 + P3) << 16
435	vand	d19, d19, d30
436	vmull.p8	q11, d26, d22		@ K = A*B4
437	veor	q10, q10, q0		@ N = I + J
438	veor	d16, d16, d17
439	veor	d18, d18, d19
440	veor	d20, d20, d21	@ t2 = (N) (P4 + P5) << 24
441	vand	d21, d21, d31
442	vext.8	q8, q8, q8, #15
443	veor	d22, d22, d23	@ t3 = (K) (P6 + P7) << 32
444	vmov.i64	d23, #0
445	vext.8	q9, q9, q9, #14
446	veor	d20, d20, d21
447	vmull.p8	q0, d26, d6		@ D = A*B
448	vext.8	q11, q11, q11, #12
449	vext.8	q10, q10, q10, #13
450	veor	q8, q8, q9
451	veor	q10, q10, q11
452	veor	q0, q0, q8
453	veor	q0, q0, q10
454	veor	d6,d6,d7	@ Karatsuba pre-processing
455	vext.8	d16, d28, d28, #1	@ A1
456	vmull.p8	q8, d16, d6		@ F = A1*B
457	vext.8	d2, d6, d6, #1	@ B1
458	vmull.p8	q1, d28, d2		@ E = A*B1
459	vext.8	d18, d28, d28, #2	@ A2
460	vmull.p8	q9, d18, d6		@ H = A2*B
461	vext.8	d22, d6, d6, #2	@ B2
462	vmull.p8	q11, d28, d22		@ G = A*B2
463	vext.8	d20, d28, d28, #3	@ A3
464	veor	q8, q8, q1		@ L = E + F
465	vmull.p8	q10, d20, d6		@ J = A3*B
466	vext.8	d2, d6, d6, #3	@ B3
467	veor	q9, q9, q11		@ M = G + H
468	vmull.p8	q1, d28, d2		@ I = A*B3
469	veor	d16, d16, d17	@ t0 = (L) (P0 + P1) << 8
470	vand	d17, d17, d29
471	vext.8	d22, d6, d6, #4	@ B4
472	veor	d18, d18, d19	@ t1 = (M) (P2 + P3) << 16
473	vand	d19, d19, d30
474	vmull.p8	q11, d28, d22		@ K = A*B4
475	veor	q10, q10, q1		@ N = I + J
476	veor	d16, d16, d17
477	veor	d18, d18, d19
478	veor	d20, d20, d21	@ t2 = (N) (P4 + P5) << 24
479	vand	d21, d21, d31
480	vext.8	q8, q8, q8, #15
481	veor	d22, d22, d23	@ t3 = (K) (P6 + P7) << 32
482	vmov.i64	d23, #0
483	vext.8	q9, q9, q9, #14
484	veor	d20, d20, d21
485	vmull.p8	q1, d28, d6		@ D = A*B
486	vext.8	q11, q11, q11, #12
487	vext.8	q10, q10, q10, #13
488	veor	q8, q8, q9
489	veor	q10, q10, q11
490	veor	q1, q1, q8
491	veor	q1, q1, q10
492	vext.8	d16, d27, d27, #1	@ A1
493	vmull.p8	q8, d16, d7		@ F = A1*B
494	vext.8	d4, d7, d7, #1	@ B1
495	vmull.p8	q2, d27, d4		@ E = A*B1
496	vext.8	d18, d27, d27, #2	@ A2
497	vmull.p8	q9, d18, d7		@ H = A2*B
498	vext.8	d22, d7, d7, #2	@ B2
499	vmull.p8	q11, d27, d22		@ G = A*B2
500	vext.8	d20, d27, d27, #3	@ A3
501	veor	q8, q8, q2		@ L = E + F
502	vmull.p8	q10, d20, d7		@ J = A3*B
503	vext.8	d4, d7, d7, #3	@ B3
504	veor	q9, q9, q11		@ M = G + H
505	vmull.p8	q2, d27, d4		@ I = A*B3
506	veor	d16, d16, d17	@ t0 = (L) (P0 + P1) << 8
507	vand	d17, d17, d29
508	vext.8	d22, d7, d7, #4	@ B4
509	veor	d18, d18, d19	@ t1 = (M) (P2 + P3) << 16
510	vand	d19, d19, d30
511	vmull.p8	q11, d27, d22		@ K = A*B4
512	veor	q10, q10, q2		@ N = I + J
513	veor	d16, d16, d17
514	veor	d18, d18, d19
515	veor	d20, d20, d21	@ t2 = (N) (P4 + P5) << 24
516	vand	d21, d21, d31
517	vext.8	q8, q8, q8, #15
518	veor	d22, d22, d23	@ t3 = (K) (P6 + P7) << 32
519	vmov.i64	d23, #0
520	vext.8	q9, q9, q9, #14
521	veor	d20, d20, d21
522	vmull.p8	q2, d27, d7		@ D = A*B
523	vext.8	q11, q11, q11, #12
524	vext.8	q10, q10, q10, #13
525	veor	q8, q8, q9
526	veor	q10, q10, q11
527	veor	q2, q2, q8
528	veor	q2, q2, q10
529	veor	q1,q1,q0		@ Karatsuba post-processing
530	veor	q1,q1,q2
531	veor	d1,d1,d2
532	veor	d4,d4,d3	@ Xh|Xl - 256-bit result
533
534	@ equivalent of reduction_avx from ghash-x86_64.pl
535	vshl.i64	q9,q0,#57		@ 1st phase
536	vshl.i64	q10,q0,#62
537	veor	q10,q10,q9		@
538	vshl.i64	q9,q0,#63
539	veor	q10, q10, q9		@
540	veor	d1,d1,d20	@
541	veor	d4,d4,d21
542
543	vshr.u64	q10,q0,#1		@ 2nd phase
544	veor	q2,q2,q0
545	veor	q0,q0,q10		@
546	vshr.u64	q10,q10,#6
547	vshr.u64	q0,q0,#1		@
548	veor	q0,q0,q2		@
549	veor	q0,q0,q10		@
550
551	subs	r3,#16
552	bne	.Loop_neon
553
554#ifdef __ARMEL__
555	vrev64.8	q0,q0
556#endif
557	sub	r0,#16
558	vst1.64	d1,[r0]!		@ write out Xi
559	vst1.64	d0,[r0]
560
561	bx	lr					@ bx lr
562.size	gcm_ghash_neon,.-gcm_ghash_neon
563#endif
564.byte	71,72,65,83,72,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
565.align	2
566.align	2
567