xref: /onnv-gate/usr/src/common/openssl/crypto/rc4/asm/rc4-x86_64.pl (revision 2139:6243c3338933)
1*2139Sjp161948#!/usr/bin/env perl
2*2139Sjp161948#
3*2139Sjp161948# ====================================================================
4*2139Sjp161948# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5*2139Sjp161948# project. Rights for redistribution and usage in source and binary
6*2139Sjp161948# forms are granted according to the OpenSSL license.
7*2139Sjp161948# ====================================================================
8*2139Sjp161948#
9*2139Sjp161948# 2.22x RC4 tune-up:-) It should be noted though that my hand [as in
10*2139Sjp161948# "hand-coded assembler"] doesn't stand for the whole improvement
11*2139Sjp161948# coefficient. It turned out that eliminating RC4_CHAR from config
12*2139Sjp161948# line results in ~40% improvement (yes, even for C implementation).
13*2139Sjp161948# Presumably it has everything to do with AMD cache architecture and
14*2139Sjp161948# RAW or whatever penalties. Once again! The module *requires* config
15*2139Sjp161948# line *without* RC4_CHAR! As for coding "secret," I bet on partial
16*2139Sjp161948# register arithmetics. For example instead of 'inc %r8; and $255,%r8'
17*2139Sjp161948# I simply 'inc %r8b'. Even though optimization manual discourages
18*2139Sjp161948# to operate on partial registers, it turned out to be the best bet.
19*2139Sjp161948# At least for AMD... How IA32E would perform remains to be seen...
20*2139Sjp161948
21*2139Sjp161948# As was shown by Marc Bevand reordering of couple of load operations
22*2139Sjp161948# results in even higher performance gain of 3.3x:-) At least on
23*2139Sjp161948# Opteron... For reference, 1x in this case is RC4_CHAR C-code
24*2139Sjp161948# compiled with gcc 3.3.2, which performs at ~54MBps per 1GHz clock.
25*2139Sjp161948# Latter means that if you want to *estimate* what to expect from
26*2139Sjp161948# *your* Opteron, then multiply 54 by 3.3 and clock frequency in GHz.
27*2139Sjp161948
28*2139Sjp161948# Intel P4 EM64T core was found to run the AMD64 code really slow...
29*2139Sjp161948# The only way to achieve comparable performance on P4 was to keep
30*2139Sjp161948# RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to
31*2139Sjp161948# compose blended code, which would perform even within 30% marginal
32*2139Sjp161948# on either AMD and Intel platforms, I implement both cases. See
33*2139Sjp161948# rc4_skey.c for further details...
34*2139Sjp161948
35*2139Sjp161948# P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing
36*2139Sjp161948# those with add/sub results in 50% performance improvement of folded
37*2139Sjp161948# loop...
38*2139Sjp161948
39*2139Sjp161948# As was shown by Zou Nanhai loop unrolling can improve Intel EM64T
40*2139Sjp161948# performance by >30% [unlike P4 32-bit case that is]. But this is
41*2139Sjp161948# provided that loads are reordered even more aggressively! Both code
42*2139Sjp161948# pathes, AMD64 and EM64T, reorder loads in essentially same manner
43*2139Sjp161948# as my IA-64 implementation. On Opteron this resulted in modest 5%
44*2139Sjp161948# improvement [I had to test it], while final Intel P4 performance
45*2139Sjp161948# achieves respectful 432MBps on 2.8GHz processor now. For reference.
46*2139Sjp161948# If executed on Xeon, current RC4_CHAR code-path is 2.7x faster than
47*2139Sjp161948# RC4_INT code-path. While if executed on Opteron, it's only 25%
48*2139Sjp161948# slower than the RC4_INT one [meaning that if CPU �-arch detection
49*2139Sjp161948# is not implemented, then this final RC4_CHAR code-path should be
50*2139Sjp161948# preferred, as it provides better *all-round* performance].
51*2139Sjp161948
52*2139Sjp161948$output=shift;
53*2139Sjp161948open STDOUT,"| $^X ../perlasm/x86_64-xlate.pl $output";
54*2139Sjp161948
55*2139Sjp161948$dat="%rdi";	    # arg1
56*2139Sjp161948$len="%rsi";	    # arg2
57*2139Sjp161948$inp="%rdx";	    # arg3
58*2139Sjp161948$out="%rcx";	    # arg4
59*2139Sjp161948
60*2139Sjp161948@XX=("%r8","%r10");
61*2139Sjp161948@TX=("%r9","%r11");
62*2139Sjp161948$YY="%r12";
63*2139Sjp161948$TY="%r13";
64*2139Sjp161948
65*2139Sjp161948$code=<<___;
66*2139Sjp161948.text
67*2139Sjp161948
68*2139Sjp161948.globl	RC4
69*2139Sjp161948.type	RC4,\@function,4
70*2139Sjp161948.align	16
71*2139Sjp161948RC4:	or	$len,$len
72*2139Sjp161948	jne	.Lentry
73*2139Sjp161948	ret
74*2139Sjp161948.Lentry:
75*2139Sjp161948	push	%r12
76*2139Sjp161948	push	%r13
77*2139Sjp161948
78*2139Sjp161948	add	\$8,$dat
79*2139Sjp161948	movl	-8($dat),$XX[0]#d
80*2139Sjp161948	movl	-4($dat),$YY#d
81*2139Sjp161948	cmpl	\$-1,256($dat)
82*2139Sjp161948	je	.LRC4_CHAR
83*2139Sjp161948	inc	$XX[0]#b
84*2139Sjp161948	movl	($dat,$XX[0],4),$TX[0]#d
85*2139Sjp161948	test	\$-8,$len
86*2139Sjp161948	jz	.Lloop1
87*2139Sjp161948	jmp	.Lloop8
88*2139Sjp161948.align	16
89*2139Sjp161948.Lloop8:
90*2139Sjp161948___
91*2139Sjp161948for ($i=0;$i<8;$i++) {
92*2139Sjp161948$code.=<<___;
93*2139Sjp161948	add	$TX[0]#b,$YY#b
94*2139Sjp161948	mov	$XX[0],$XX[1]
95*2139Sjp161948	movl	($dat,$YY,4),$TY#d
96*2139Sjp161948	ror	\$8,%rax			# ror is redundant when $i=0
97*2139Sjp161948	inc	$XX[1]#b
98*2139Sjp161948	movl	($dat,$XX[1],4),$TX[1]#d
99*2139Sjp161948	cmp	$XX[1],$YY
100*2139Sjp161948	movl	$TX[0]#d,($dat,$YY,4)
101*2139Sjp161948	cmove	$TX[0],$TX[1]
102*2139Sjp161948	movl	$TY#d,($dat,$XX[0],4)
103*2139Sjp161948	add	$TX[0]#b,$TY#b
104*2139Sjp161948	movb	($dat,$TY,4),%al
105*2139Sjp161948___
106*2139Sjp161948push(@TX,shift(@TX)); push(@XX,shift(@XX));	# "rotate" registers
107*2139Sjp161948}
108*2139Sjp161948$code.=<<___;
109*2139Sjp161948	ror	\$8,%rax
110*2139Sjp161948	sub	\$8,$len
111*2139Sjp161948
112*2139Sjp161948	xor	($inp),%rax
113*2139Sjp161948	add	\$8,$inp
114*2139Sjp161948	mov	%rax,($out)
115*2139Sjp161948	add	\$8,$out
116*2139Sjp161948
117*2139Sjp161948	test	\$-8,$len
118*2139Sjp161948	jnz	.Lloop8
119*2139Sjp161948	cmp	\$0,$len
120*2139Sjp161948	jne	.Lloop1
121*2139Sjp161948___
122*2139Sjp161948$code.=<<___;
123*2139Sjp161948.Lexit:
124*2139Sjp161948	sub	\$1,$XX[0]#b
125*2139Sjp161948	movl	$XX[0]#d,-8($dat)
126*2139Sjp161948	movl	$YY#d,-4($dat)
127*2139Sjp161948
128*2139Sjp161948	pop	%r13
129*2139Sjp161948	pop	%r12
130*2139Sjp161948	ret
131*2139Sjp161948.align	16
132*2139Sjp161948.Lloop1:
133*2139Sjp161948	add	$TX[0]#b,$YY#b
134*2139Sjp161948	movl	($dat,$YY,4),$TY#d
135*2139Sjp161948	movl	$TX[0]#d,($dat,$YY,4)
136*2139Sjp161948	movl	$TY#d,($dat,$XX[0],4)
137*2139Sjp161948	add	$TY#b,$TX[0]#b
138*2139Sjp161948	inc	$XX[0]#b
139*2139Sjp161948	movl	($dat,$TX[0],4),$TY#d
140*2139Sjp161948	movl	($dat,$XX[0],4),$TX[0]#d
141*2139Sjp161948	xorb	($inp),$TY#b
142*2139Sjp161948	inc	$inp
143*2139Sjp161948	movb	$TY#b,($out)
144*2139Sjp161948	inc	$out
145*2139Sjp161948	dec	$len
146*2139Sjp161948	jnz	.Lloop1
147*2139Sjp161948	jmp	.Lexit
148*2139Sjp161948
149*2139Sjp161948.align	16
150*2139Sjp161948.LRC4_CHAR:
151*2139Sjp161948	add	\$1,$XX[0]#b
152*2139Sjp161948	movzb	($dat,$XX[0]),$TX[0]#d
153*2139Sjp161948	test	\$-8,$len
154*2139Sjp161948	jz	.Lcloop1
155*2139Sjp161948	push	%rbx
156*2139Sjp161948	jmp	.Lcloop8
157*2139Sjp161948.align	16
158*2139Sjp161948.Lcloop8:
159*2139Sjp161948	mov	($inp),%eax
160*2139Sjp161948	mov	4($inp),%ebx
161*2139Sjp161948___
162*2139Sjp161948# unroll 2x4-wise, because 64-bit rotates kill Intel P4...
163*2139Sjp161948for ($i=0;$i<4;$i++) {
164*2139Sjp161948$code.=<<___;
165*2139Sjp161948	add	$TX[0]#b,$YY#b
166*2139Sjp161948	lea	1($XX[0]),$XX[1]
167*2139Sjp161948	movzb	($dat,$YY),$TY#d
168*2139Sjp161948	movzb	$XX[1]#b,$XX[1]#d
169*2139Sjp161948	movzb	($dat,$XX[1]),$TX[1]#d
170*2139Sjp161948	movb	$TX[0]#b,($dat,$YY)
171*2139Sjp161948	cmp	$XX[1],$YY
172*2139Sjp161948	movb	$TY#b,($dat,$XX[0])
173*2139Sjp161948	jne	.Lcmov$i			# Intel cmov is sloooow...
174*2139Sjp161948	mov	$TX[0],$TX[1]
175*2139Sjp161948.Lcmov$i:
176*2139Sjp161948	add	$TX[0]#b,$TY#b
177*2139Sjp161948	xor	($dat,$TY),%al
178*2139Sjp161948	ror	\$8,%eax
179*2139Sjp161948___
180*2139Sjp161948push(@TX,shift(@TX)); push(@XX,shift(@XX));	# "rotate" registers
181*2139Sjp161948}
182*2139Sjp161948for ($i=4;$i<8;$i++) {
183*2139Sjp161948$code.=<<___;
184*2139Sjp161948	add	$TX[0]#b,$YY#b
185*2139Sjp161948	lea	1($XX[0]),$XX[1]
186*2139Sjp161948	movzb	($dat,$YY),$TY#d
187*2139Sjp161948	movzb	$XX[1]#b,$XX[1]#d
188*2139Sjp161948	movzb	($dat,$XX[1]),$TX[1]#d
189*2139Sjp161948	movb	$TX[0]#b,($dat,$YY)
190*2139Sjp161948	cmp	$XX[1],$YY
191*2139Sjp161948	movb	$TY#b,($dat,$XX[0])
192*2139Sjp161948	jne	.Lcmov$i			# Intel cmov is sloooow...
193*2139Sjp161948	mov	$TX[0],$TX[1]
194*2139Sjp161948.Lcmov$i:
195*2139Sjp161948	add	$TX[0]#b,$TY#b
196*2139Sjp161948	xor	($dat,$TY),%bl
197*2139Sjp161948	ror	\$8,%ebx
198*2139Sjp161948___
199*2139Sjp161948push(@TX,shift(@TX)); push(@XX,shift(@XX));	# "rotate" registers
200*2139Sjp161948}
201*2139Sjp161948$code.=<<___;
202*2139Sjp161948	lea	-8($len),$len
203*2139Sjp161948	mov	%eax,($out)
204*2139Sjp161948	lea	8($inp),$inp
205*2139Sjp161948	mov	%ebx,4($out)
206*2139Sjp161948	lea	8($out),$out
207*2139Sjp161948
208*2139Sjp161948	test	\$-8,$len
209*2139Sjp161948	jnz	.Lcloop8
210*2139Sjp161948	pop	%rbx
211*2139Sjp161948	cmp	\$0,$len
212*2139Sjp161948	jne	.Lcloop1
213*2139Sjp161948	jmp	.Lexit
214*2139Sjp161948___
215*2139Sjp161948$code.=<<___;
216*2139Sjp161948.align	16
217*2139Sjp161948.Lcloop1:
218*2139Sjp161948	add	$TX[0]#b,$YY#b
219*2139Sjp161948	movzb	($dat,$YY),$TY#d
220*2139Sjp161948	movb	$TX[0]#b,($dat,$YY)
221*2139Sjp161948	movb	$TY#b,($dat,$XX[0])
222*2139Sjp161948	add	$TX[0]#b,$TY#b
223*2139Sjp161948	add	\$1,$XX[0]#b
224*2139Sjp161948	movzb	($dat,$TY),$TY#d
225*2139Sjp161948	movzb	($dat,$XX[0]),$TX[0]#d
226*2139Sjp161948	xorb	($inp),$TY#b
227*2139Sjp161948	lea	1($inp),$inp
228*2139Sjp161948	movb	$TY#b,($out)
229*2139Sjp161948	lea	1($out),$out
230*2139Sjp161948	sub	\$1,$len
231*2139Sjp161948	jnz	.Lcloop1
232*2139Sjp161948	jmp	.Lexit
233*2139Sjp161948.size	RC4,.-RC4
234*2139Sjp161948___
235*2139Sjp161948
236*2139Sjp161948$code =~ s/#([bwd])/$1/gm;
237*2139Sjp161948
238*2139Sjp161948print $code;
239*2139Sjp161948
240*2139Sjp161948close STDOUT;
241