xref: /onnv-gate/usr/src/common/openssl/crypto/sha/asm/sha512-sse2.pl (revision 2139:6243c3338933)
1*2139Sjp161948#!/usr/bin/env perl
2*2139Sjp161948#
3*2139Sjp161948# ====================================================================
4*2139Sjp161948# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5*2139Sjp161948# project. Rights for redistribution and usage in source and binary
6*2139Sjp161948# forms are granted according to the OpenSSL license.
7*2139Sjp161948# ====================================================================
8*2139Sjp161948#
9*2139Sjp161948# SHA512_Transform_SSE2.
10*2139Sjp161948#
11*2139Sjp161948# As the name suggests, this is an IA-32 SSE2 implementation of
12*2139Sjp161948# SHA512_Transform. Motivating factor for the undertaken effort was that
13*2139Sjp161948# SHA512 was observed to *consistently* perform *significantly* poorer
14*2139Sjp161948# than SHA256 [2x and slower is common] on 32-bit platforms. On 64-bit
15*2139Sjp161948# platforms on the other hand SHA512 tend to outperform SHA256 [~50%
16*2139Sjp161948# seem to be common improvement factor]. All this is perfectly natural,
17*2139Sjp161948# as SHA512 is a 64-bit algorithm. But isn't IA-32 SSE2 essentially
18*2139Sjp161948# a 64-bit instruction set? Is it rich enough to implement SHA512?
19*2139Sjp161948# If answer was "no," then you wouldn't have been reading this...
20*2139Sjp161948#
21*2139Sjp161948# Throughput performance in MBps (larger is better):
22*2139Sjp161948#
23*2139Sjp161948#		2.4GHz P4	1.4GHz AMD32	1.4GHz AMD64(*)
24*2139Sjp161948# SHA256/gcc(*)	54		43		59
25*2139Sjp161948# SHA512/gcc	17		23		92
26*2139Sjp161948# SHA512/sse2	61(**)		57(**)
27*2139Sjp161948# SHA512/icc	26		28
28*2139Sjp161948# SHA256/icc(*)	65		54
29*2139Sjp161948#
30*2139Sjp161948# (*)	AMD64 and SHA256 numbers are presented mostly for amusement or
31*2139Sjp161948#	reference purposes.
32*2139Sjp161948# (**)	I.e. it gives ~2-3x speed-up if compared with compiler generated
33*2139Sjp161948#	code. One can argue that hand-coded *non*-SSE2 implementation
34*2139Sjp161948#	would perform better than compiler generated one as well, and
35*2139Sjp161948#	that comparison is therefore not exactly fair. Well, as SHA512
36*2139Sjp161948#	puts enormous pressure on IA-32 GP register bank, I reckon that
37*2139Sjp161948#	hand-coded version wouldn't perform significantly better than
38*2139Sjp161948#	one compiled with icc, ~20% perhaps... So that this code would
39*2139Sjp161948#	still outperform it with distinguishing marginal. But feel free
40*2139Sjp161948#	to prove me wrong:-)
41*2139Sjp161948#						<appro@fy.chalmers.se>
42*2139Sjp161948push(@INC,"perlasm","../../perlasm");
43*2139Sjp161948require "x86asm.pl";
44*2139Sjp161948
45*2139Sjp161948&asm_init($ARGV[0],"sha512-sse2.pl",$ARGV[$#ARGV] eq "386");
46*2139Sjp161948
47*2139Sjp161948$K512="esi";	# K512[80] table, found at the end...
48*2139Sjp161948#$W512="esp";	# $W512 is not just W512[16]: it comprises *two* copies
49*2139Sjp161948		# of W512[16] and a copy of A-H variables...
50*2139Sjp161948$W512_SZ=8*(16+16+8);	# see above...
51*2139Sjp161948#$Kidx="ebx";	# index in K512 table, advances from 0 to 80...
52*2139Sjp161948$Widx="edx";	# index in W512, wraps around at 16...
53*2139Sjp161948$data="edi";	# 16 qwords of input data...
54*2139Sjp161948$A="mm0";	# B-D and
55*2139Sjp161948$E="mm1";	# F-H are allocated dynamically...
56*2139Sjp161948$Aoff=256+0;	# A-H offsets relative to $W512...
57*2139Sjp161948$Boff=256+8;
58*2139Sjp161948$Coff=256+16;
59*2139Sjp161948$Doff=256+24;
60*2139Sjp161948$Eoff=256+32;
61*2139Sjp161948$Foff=256+40;
62*2139Sjp161948$Goff=256+48;
63*2139Sjp161948$Hoff=256+56;
64*2139Sjp161948
65*2139Sjp161948sub SHA2_ROUND()
66*2139Sjp161948{ local ($kidx,$widx)=@_;
67*2139Sjp161948
68*2139Sjp161948	# One can argue that one could reorder instructions for better
69*2139Sjp161948	# performance. Well, I tried and it doesn't seem to make any
70*2139Sjp161948	# noticeable difference. Modern out-of-order execution cores
71*2139Sjp161948	# reorder instructions to their liking in either case and they
72*2139Sjp161948	# apparently do decent job. So we can keep the code more
73*2139Sjp161948	# readable/regular/comprehensible:-)
74*2139Sjp161948
75*2139Sjp161948	# I adhere to 64-bit %mmX registers in order to avoid/not care
76*2139Sjp161948	# about #GP exceptions on misaligned 128-bit access, most
77*2139Sjp161948	# notably in paddq with memory operand. Not to mention that
78*2139Sjp161948	# SSE2 intructions operating on %mmX can be scheduled every
79*2139Sjp161948	# cycle [and not every second one if operating on %xmmN].
80*2139Sjp161948
81*2139Sjp161948	&movq	("mm4",&QWP($Foff,$W512));	# load f
82*2139Sjp161948	&movq	("mm5",&QWP($Goff,$W512));	# load g
83*2139Sjp161948	&movq	("mm6",&QWP($Hoff,$W512));	# load h
84*2139Sjp161948
85*2139Sjp161948	&movq	("mm2",$E);			# %mm2 is sliding right
86*2139Sjp161948	&movq	("mm3",$E);			# %mm3 is sliding left
87*2139Sjp161948	&psrlq	("mm2",14);
88*2139Sjp161948	&psllq	("mm3",23);
89*2139Sjp161948	&movq	("mm7","mm2");			# %mm7 is T1
90*2139Sjp161948	&pxor	("mm7","mm3");
91*2139Sjp161948	&psrlq	("mm2",4);
92*2139Sjp161948	&psllq	("mm3",23);
93*2139Sjp161948	&pxor	("mm7","mm2");
94*2139Sjp161948	&pxor	("mm7","mm3");
95*2139Sjp161948	&psrlq	("mm2",23);
96*2139Sjp161948	&psllq	("mm3",4);
97*2139Sjp161948	&pxor	("mm7","mm2");
98*2139Sjp161948	&pxor	("mm7","mm3");			# T1=Sigma1_512(e)
99*2139Sjp161948
100*2139Sjp161948	&movq	(&QWP($Foff,$W512),$E);		# f = e
101*2139Sjp161948	&movq	(&QWP($Goff,$W512),"mm4");	# g = f
102*2139Sjp161948	&movq	(&QWP($Hoff,$W512),"mm5");	# h = g
103*2139Sjp161948
104*2139Sjp161948	&pxor	("mm4","mm5");			# f^=g
105*2139Sjp161948	&pand	("mm4",$E);			# f&=e
106*2139Sjp161948	&pxor	("mm4","mm5");			# f^=g
107*2139Sjp161948	&paddq	("mm7","mm4");			# T1+=Ch(e,f,g)
108*2139Sjp161948
109*2139Sjp161948	&movq	("mm2",&QWP($Boff,$W512));	# load b
110*2139Sjp161948	&movq	("mm3",&QWP($Coff,$W512));	# load c
111*2139Sjp161948	&movq	($E,&QWP($Doff,$W512));		# e = d
112*2139Sjp161948
113*2139Sjp161948	&paddq	("mm7","mm6");			# T1+=h
114*2139Sjp161948	&paddq	("mm7",&QWP(0,$K512,$kidx,8));	# T1+=K512[i]
115*2139Sjp161948	&paddq	("mm7",&QWP(0,$W512,$widx,8));	# T1+=W512[i]
116*2139Sjp161948	&paddq	($E,"mm7");			# e += T1
117*2139Sjp161948
118*2139Sjp161948	&movq	("mm4",$A);			# %mm4 is sliding right
119*2139Sjp161948	&movq	("mm5",$A);			# %mm5 is sliding left
120*2139Sjp161948	&psrlq	("mm4",28);
121*2139Sjp161948	&psllq	("mm5",25);
122*2139Sjp161948	&movq	("mm6","mm4");			# %mm6 is T2
123*2139Sjp161948	&pxor	("mm6","mm5");
124*2139Sjp161948	&psrlq	("mm4",6);
125*2139Sjp161948	&psllq	("mm5",5);
126*2139Sjp161948	&pxor	("mm6","mm4");
127*2139Sjp161948	&pxor	("mm6","mm5");
128*2139Sjp161948	&psrlq	("mm4",5);
129*2139Sjp161948	&psllq	("mm5",6);
130*2139Sjp161948	&pxor	("mm6","mm4");
131*2139Sjp161948	&pxor	("mm6","mm5");			# T2=Sigma0_512(a)
132*2139Sjp161948
133*2139Sjp161948	&movq	(&QWP($Boff,$W512),$A);		# b = a
134*2139Sjp161948	&movq	(&QWP($Coff,$W512),"mm2");	# c = b
135*2139Sjp161948	&movq	(&QWP($Doff,$W512),"mm3");	# d = c
136*2139Sjp161948
137*2139Sjp161948	&movq	("mm4",$A);			# %mm4=a
138*2139Sjp161948	&por	($A,"mm3");			# a=a|c
139*2139Sjp161948	&pand	("mm4","mm3");			# %mm4=a&c
140*2139Sjp161948	&pand	($A,"mm2");			# a=(a|c)&b
141*2139Sjp161948	&por	("mm4",$A);			# %mm4=(a&c)|((a|c)&b)
142*2139Sjp161948	&paddq	("mm6","mm4");			# T2+=Maj(a,b,c)
143*2139Sjp161948
144*2139Sjp161948	&movq	($A,"mm7");			# a=T1
145*2139Sjp161948	&paddq	($A,"mm6");			# a+=T2
146*2139Sjp161948}
147*2139Sjp161948
148*2139Sjp161948$func="sha512_block_sse2";
149*2139Sjp161948
150*2139Sjp161948&function_begin_B($func);
151*2139Sjp161948	if (0) {# Caller is expected to check if it's appropriate to
152*2139Sjp161948		# call this routine. Below 3 lines are retained for
153*2139Sjp161948		# debugging purposes...
154*2139Sjp161948		&picmeup("eax","OPENSSL_ia32cap");
155*2139Sjp161948		&bt	(&DWP(0,"eax"),26);
156*2139Sjp161948		&jnc	("SHA512_Transform");
157*2139Sjp161948	}
158*2139Sjp161948
159*2139Sjp161948	&push	("ebp");
160*2139Sjp161948	&mov	("ebp","esp");
161*2139Sjp161948	&push	("ebx");
162*2139Sjp161948	&push	("esi");
163*2139Sjp161948	&push	("edi");
164*2139Sjp161948
165*2139Sjp161948	&mov	($Widx,&DWP(8,"ebp"));		# A-H state, 1st arg
166*2139Sjp161948	&mov	($data,&DWP(12,"ebp"));		# input data, 2nd arg
167*2139Sjp161948	&call	(&label("pic_point"));		# make it PIC!
168*2139Sjp161948&set_label("pic_point");
169*2139Sjp161948	&blindpop($K512);
170*2139Sjp161948	&lea	($K512,&DWP(&label("K512")."-".&label("pic_point"),$K512));
171*2139Sjp161948
172*2139Sjp161948	$W512 = "esp";			# start using %esp as W512
173*2139Sjp161948	&sub	($W512,$W512_SZ);
174*2139Sjp161948	&and	($W512,-16);		# ensure 128-bit alignment
175*2139Sjp161948
176*2139Sjp161948	# make private copy of A-H
177*2139Sjp161948	#     v assume the worst and stick to unaligned load
178*2139Sjp161948	&movdqu	("xmm0",&QWP(0,$Widx));
179*2139Sjp161948	&movdqu	("xmm1",&QWP(16,$Widx));
180*2139Sjp161948	&movdqu	("xmm2",&QWP(32,$Widx));
181*2139Sjp161948	&movdqu	("xmm3",&QWP(48,$Widx));
182*2139Sjp161948
183*2139Sjp161948&align(8);
184*2139Sjp161948&set_label("_chunk_loop");
185*2139Sjp161948
186*2139Sjp161948	&movdqa	(&QWP($Aoff,$W512),"xmm0");	# a,b
187*2139Sjp161948	&movdqa	(&QWP($Coff,$W512),"xmm1");	# c,d
188*2139Sjp161948	&movdqa	(&QWP($Eoff,$W512),"xmm2");	# e,f
189*2139Sjp161948	&movdqa	(&QWP($Goff,$W512),"xmm3");	# g,h
190*2139Sjp161948
191*2139Sjp161948	&xor	($Widx,$Widx);
192*2139Sjp161948
193*2139Sjp161948	&movdq2q($A,"xmm0");			# load a
194*2139Sjp161948	&movdq2q($E,"xmm2");			# load e
195*2139Sjp161948
196*2139Sjp161948	# Why aren't loops unrolled? It makes sense to unroll if
197*2139Sjp161948	# execution time for loop body is comparable with branch
198*2139Sjp161948	# penalties and/or if whole data-set resides in register bank.
199*2139Sjp161948	# Neither is case here... Well, it would be possible to
200*2139Sjp161948	# eliminate few store operations, but it would hardly affect
201*2139Sjp161948	# so to say stop-watch performance, as there is a lot of
202*2139Sjp161948	# available memory slots to fill. It will only relieve some
203*2139Sjp161948	# pressure off memory bus...
204*2139Sjp161948
205*2139Sjp161948	# flip input stream byte order...
206*2139Sjp161948	&mov	("eax",&DWP(0,$data,$Widx,8));
207*2139Sjp161948	&mov	("ebx",&DWP(4,$data,$Widx,8));
208*2139Sjp161948	&bswap	("eax");
209*2139Sjp161948	&bswap	("ebx");
210*2139Sjp161948	&mov	(&DWP(0,$W512,$Widx,8),"ebx");		# W512[i]
211*2139Sjp161948	&mov	(&DWP(4,$W512,$Widx,8),"eax");
212*2139Sjp161948	&mov	(&DWP(128+0,$W512,$Widx,8),"ebx");	# copy of W512[i]
213*2139Sjp161948	&mov	(&DWP(128+4,$W512,$Widx,8),"eax");
214*2139Sjp161948
215*2139Sjp161948&align(8);
216*2139Sjp161948&set_label("_1st_loop");		# 0-15
217*2139Sjp161948	# flip input stream byte order...
218*2139Sjp161948	&mov	("eax",&DWP(0+8,$data,$Widx,8));
219*2139Sjp161948	&mov	("ebx",&DWP(4+8,$data,$Widx,8));
220*2139Sjp161948	&bswap	("eax");
221*2139Sjp161948	&bswap	("ebx");
222*2139Sjp161948	&mov	(&DWP(0+8,$W512,$Widx,8),"ebx");	# W512[i]
223*2139Sjp161948	&mov	(&DWP(4+8,$W512,$Widx,8),"eax");
224*2139Sjp161948	&mov	(&DWP(128+0+8,$W512,$Widx,8),"ebx");	# copy of W512[i]
225*2139Sjp161948	&mov	(&DWP(128+4+8,$W512,$Widx,8),"eax");
226*2139Sjp161948&set_label("_1st_looplet");
227*2139Sjp161948	&SHA2_ROUND($Widx,$Widx); &inc($Widx);
228*2139Sjp161948
229*2139Sjp161948&cmp	($Widx,15)
230*2139Sjp161948&jl	(&label("_1st_loop"));
231*2139Sjp161948&je	(&label("_1st_looplet"));	# playing similar trick on 2nd loop
232*2139Sjp161948					# does not improve performance...
233*2139Sjp161948
234*2139Sjp161948	$Kidx = "ebx";			# start using %ebx as Kidx
235*2139Sjp161948	&mov	($Kidx,$Widx);
236*2139Sjp161948
237*2139Sjp161948&align(8);
238*2139Sjp161948&set_label("_2nd_loop");		# 16-79
239*2139Sjp161948	&and($Widx,0xf);
240*2139Sjp161948
241*2139Sjp161948	# 128-bit fragment! I update W512[i] and W512[i+1] in
242*2139Sjp161948	# parallel:-) Note that I refer to W512[(i&0xf)+N] and not to
243*2139Sjp161948	# W512[(i+N)&0xf]! This is exactly what I maintain the second
244*2139Sjp161948	# copy of W512[16] for...
245*2139Sjp161948	&movdqu	("xmm0",&QWP(8*1,$W512,$Widx,8));	# s0=W512[i+1]
246*2139Sjp161948	&movdqa	("xmm2","xmm0");		# %xmm2 is sliding right
247*2139Sjp161948	&movdqa	("xmm3","xmm0");		# %xmm3 is sliding left
248*2139Sjp161948	&psrlq	("xmm2",1);
249*2139Sjp161948	&psllq	("xmm3",56);
250*2139Sjp161948	&movdqa	("xmm0","xmm2");
251*2139Sjp161948	&pxor	("xmm0","xmm3");
252*2139Sjp161948	&psrlq	("xmm2",6);
253*2139Sjp161948	&psllq	("xmm3",7);
254*2139Sjp161948	&pxor	("xmm0","xmm2");
255*2139Sjp161948	&pxor	("xmm0","xmm3");
256*2139Sjp161948	&psrlq	("xmm2",1);
257*2139Sjp161948	&pxor	("xmm0","xmm2");		# s0 = sigma0_512(s0);
258*2139Sjp161948
259*2139Sjp161948	&movdqa	("xmm1",&QWP(8*14,$W512,$Widx,8));	# s1=W512[i+14]
260*2139Sjp161948	&movdqa	("xmm4","xmm1");		# %xmm4 is sliding right
261*2139Sjp161948	&movdqa	("xmm5","xmm1");		# %xmm5 is sliding left
262*2139Sjp161948	&psrlq	("xmm4",6);
263*2139Sjp161948	&psllq	("xmm5",3);
264*2139Sjp161948	&movdqa	("xmm1","xmm4");
265*2139Sjp161948	&pxor	("xmm1","xmm5");
266*2139Sjp161948	&psrlq	("xmm4",13);
267*2139Sjp161948	&psllq	("xmm5",42);
268*2139Sjp161948	&pxor	("xmm1","xmm4");
269*2139Sjp161948	&pxor	("xmm1","xmm5");
270*2139Sjp161948	&psrlq	("xmm4",42);
271*2139Sjp161948	&pxor	("xmm1","xmm4");		# s1 = sigma1_512(s1);
272*2139Sjp161948
273*2139Sjp161948	#     + have to explictly load W512[i+9] as it's not 128-bit
274*2139Sjp161948	#     v	aligned and paddq would throw an exception...
275*2139Sjp161948	&movdqu	("xmm6",&QWP(8*9,$W512,$Widx,8));
276*2139Sjp161948	&paddq	("xmm0","xmm1");		# s0 += s1
277*2139Sjp161948	&paddq	("xmm0","xmm6");		# s0 += W512[i+9]
278*2139Sjp161948	&paddq	("xmm0",&QWP(0,$W512,$Widx,8));	# s0 += W512[i]
279*2139Sjp161948
280*2139Sjp161948	&movdqa	(&QWP(0,$W512,$Widx,8),"xmm0");		# W512[i] = s0
281*2139Sjp161948	&movdqa	(&QWP(16*8,$W512,$Widx,8),"xmm0");	# copy of W512[i]
282*2139Sjp161948
283*2139Sjp161948	# as the above fragment was 128-bit, we "owe" 2 rounds...
284*2139Sjp161948	&SHA2_ROUND($Kidx,$Widx); &inc($Kidx); &inc($Widx);
285*2139Sjp161948	&SHA2_ROUND($Kidx,$Widx); &inc($Kidx); &inc($Widx);
286*2139Sjp161948
287*2139Sjp161948&cmp	($Kidx,80);
288*2139Sjp161948&jl	(&label("_2nd_loop"));
289*2139Sjp161948
290*2139Sjp161948	# update A-H state
291*2139Sjp161948	&mov	($Widx,&DWP(8,"ebp"));		# A-H state, 1st arg
292*2139Sjp161948	&movq	(&QWP($Aoff,$W512),$A);		# write out a
293*2139Sjp161948	&movq	(&QWP($Eoff,$W512),$E);		# write out e
294*2139Sjp161948	&movdqu	("xmm0",&QWP(0,$Widx));
295*2139Sjp161948	&movdqu	("xmm1",&QWP(16,$Widx));
296*2139Sjp161948	&movdqu	("xmm2",&QWP(32,$Widx));
297*2139Sjp161948	&movdqu	("xmm3",&QWP(48,$Widx));
298*2139Sjp161948	&paddq	("xmm0",&QWP($Aoff,$W512));	# 128-bit additions...
299*2139Sjp161948	&paddq	("xmm1",&QWP($Coff,$W512));
300*2139Sjp161948	&paddq	("xmm2",&QWP($Eoff,$W512));
301*2139Sjp161948	&paddq	("xmm3",&QWP($Goff,$W512));
302*2139Sjp161948	&movdqu	(&QWP(0,$Widx),"xmm0");
303*2139Sjp161948	&movdqu	(&QWP(16,$Widx),"xmm1");
304*2139Sjp161948	&movdqu	(&QWP(32,$Widx),"xmm2");
305*2139Sjp161948	&movdqu	(&QWP(48,$Widx),"xmm3");
306*2139Sjp161948
307*2139Sjp161948&add	($data,16*8);				# advance input data pointer
308*2139Sjp161948&dec	(&DWP(16,"ebp"));			# decrement 3rd arg
309*2139Sjp161948&jnz	(&label("_chunk_loop"));
310*2139Sjp161948
311*2139Sjp161948	# epilogue
312*2139Sjp161948	&emms	();	# required for at least ELF and Win32 ABIs
313*2139Sjp161948	&mov	("edi",&DWP(-12,"ebp"));
314*2139Sjp161948	&mov	("esi",&DWP(-8,"ebp"));
315*2139Sjp161948	&mov	("ebx",&DWP(-4,"ebp"));
316*2139Sjp161948	&leave	();
317*2139Sjp161948&ret	();
318*2139Sjp161948
319*2139Sjp161948&align(64);
320*2139Sjp161948&set_label("K512");	# Yes! I keep it in the code segment!
321*2139Sjp161948	&data_word(0xd728ae22,0x428a2f98);	# u64
322*2139Sjp161948	&data_word(0x23ef65cd,0x71374491);	# u64
323*2139Sjp161948	&data_word(0xec4d3b2f,0xb5c0fbcf);	# u64
324*2139Sjp161948	&data_word(0x8189dbbc,0xe9b5dba5);	# u64
325*2139Sjp161948	&data_word(0xf348b538,0x3956c25b);	# u64
326*2139Sjp161948	&data_word(0xb605d019,0x59f111f1);	# u64
327*2139Sjp161948	&data_word(0xaf194f9b,0x923f82a4);	# u64
328*2139Sjp161948	&data_word(0xda6d8118,0xab1c5ed5);	# u64
329*2139Sjp161948	&data_word(0xa3030242,0xd807aa98);	# u64
330*2139Sjp161948	&data_word(0x45706fbe,0x12835b01);	# u64
331*2139Sjp161948	&data_word(0x4ee4b28c,0x243185be);	# u64
332*2139Sjp161948	&data_word(0xd5ffb4e2,0x550c7dc3);	# u64
333*2139Sjp161948	&data_word(0xf27b896f,0x72be5d74);	# u64
334*2139Sjp161948	&data_word(0x3b1696b1,0x80deb1fe);	# u64
335*2139Sjp161948	&data_word(0x25c71235,0x9bdc06a7);	# u64
336*2139Sjp161948	&data_word(0xcf692694,0xc19bf174);	# u64
337*2139Sjp161948	&data_word(0x9ef14ad2,0xe49b69c1);	# u64
338*2139Sjp161948	&data_word(0x384f25e3,0xefbe4786);	# u64
339*2139Sjp161948	&data_word(0x8b8cd5b5,0x0fc19dc6);	# u64
340*2139Sjp161948	&data_word(0x77ac9c65,0x240ca1cc);	# u64
341*2139Sjp161948	&data_word(0x592b0275,0x2de92c6f);	# u64
342*2139Sjp161948	&data_word(0x6ea6e483,0x4a7484aa);	# u64
343*2139Sjp161948	&data_word(0xbd41fbd4,0x5cb0a9dc);	# u64
344*2139Sjp161948	&data_word(0x831153b5,0x76f988da);	# u64
345*2139Sjp161948	&data_word(0xee66dfab,0x983e5152);	# u64
346*2139Sjp161948	&data_word(0x2db43210,0xa831c66d);	# u64
347*2139Sjp161948	&data_word(0x98fb213f,0xb00327c8);	# u64
348*2139Sjp161948	&data_word(0xbeef0ee4,0xbf597fc7);	# u64
349*2139Sjp161948	&data_word(0x3da88fc2,0xc6e00bf3);	# u64
350*2139Sjp161948	&data_word(0x930aa725,0xd5a79147);	# u64
351*2139Sjp161948	&data_word(0xe003826f,0x06ca6351);	# u64
352*2139Sjp161948	&data_word(0x0a0e6e70,0x14292967);	# u64
353*2139Sjp161948	&data_word(0x46d22ffc,0x27b70a85);	# u64
354*2139Sjp161948	&data_word(0x5c26c926,0x2e1b2138);	# u64
355*2139Sjp161948	&data_word(0x5ac42aed,0x4d2c6dfc);	# u64
356*2139Sjp161948	&data_word(0x9d95b3df,0x53380d13);	# u64
357*2139Sjp161948	&data_word(0x8baf63de,0x650a7354);	# u64
358*2139Sjp161948	&data_word(0x3c77b2a8,0x766a0abb);	# u64
359*2139Sjp161948	&data_word(0x47edaee6,0x81c2c92e);	# u64
360*2139Sjp161948	&data_word(0x1482353b,0x92722c85);	# u64
361*2139Sjp161948	&data_word(0x4cf10364,0xa2bfe8a1);	# u64
362*2139Sjp161948	&data_word(0xbc423001,0xa81a664b);	# u64
363*2139Sjp161948	&data_word(0xd0f89791,0xc24b8b70);	# u64
364*2139Sjp161948	&data_word(0x0654be30,0xc76c51a3);	# u64
365*2139Sjp161948	&data_word(0xd6ef5218,0xd192e819);	# u64
366*2139Sjp161948	&data_word(0x5565a910,0xd6990624);	# u64
367*2139Sjp161948	&data_word(0x5771202a,0xf40e3585);	# u64
368*2139Sjp161948	&data_word(0x32bbd1b8,0x106aa070);	# u64
369*2139Sjp161948	&data_word(0xb8d2d0c8,0x19a4c116);	# u64
370*2139Sjp161948	&data_word(0x5141ab53,0x1e376c08);	# u64
371*2139Sjp161948	&data_word(0xdf8eeb99,0x2748774c);	# u64
372*2139Sjp161948	&data_word(0xe19b48a8,0x34b0bcb5);	# u64
373*2139Sjp161948	&data_word(0xc5c95a63,0x391c0cb3);	# u64
374*2139Sjp161948	&data_word(0xe3418acb,0x4ed8aa4a);	# u64
375*2139Sjp161948	&data_word(0x7763e373,0x5b9cca4f);	# u64
376*2139Sjp161948	&data_word(0xd6b2b8a3,0x682e6ff3);	# u64
377*2139Sjp161948	&data_word(0x5defb2fc,0x748f82ee);	# u64
378*2139Sjp161948	&data_word(0x43172f60,0x78a5636f);	# u64
379*2139Sjp161948	&data_word(0xa1f0ab72,0x84c87814);	# u64
380*2139Sjp161948	&data_word(0x1a6439ec,0x8cc70208);	# u64
381*2139Sjp161948	&data_word(0x23631e28,0x90befffa);	# u64
382*2139Sjp161948	&data_word(0xde82bde9,0xa4506ceb);	# u64
383*2139Sjp161948	&data_word(0xb2c67915,0xbef9a3f7);	# u64
384*2139Sjp161948	&data_word(0xe372532b,0xc67178f2);	# u64
385*2139Sjp161948	&data_word(0xea26619c,0xca273ece);	# u64
386*2139Sjp161948	&data_word(0x21c0c207,0xd186b8c7);	# u64
387*2139Sjp161948	&data_word(0xcde0eb1e,0xeada7dd6);	# u64
388*2139Sjp161948	&data_word(0xee6ed178,0xf57d4f7f);	# u64
389*2139Sjp161948	&data_word(0x72176fba,0x06f067aa);	# u64
390*2139Sjp161948	&data_word(0xa2c898a6,0x0a637dc5);	# u64
391*2139Sjp161948	&data_word(0xbef90dae,0x113f9804);	# u64
392*2139Sjp161948	&data_word(0x131c471b,0x1b710b35);	# u64
393*2139Sjp161948	&data_word(0x23047d84,0x28db77f5);	# u64
394*2139Sjp161948	&data_word(0x40c72493,0x32caab7b);	# u64
395*2139Sjp161948	&data_word(0x15c9bebc,0x3c9ebe0a);	# u64
396*2139Sjp161948	&data_word(0x9c100d4c,0x431d67c4);	# u64
397*2139Sjp161948	&data_word(0xcb3e42b6,0x4cc5d4be);	# u64
398*2139Sjp161948	&data_word(0xfc657e2a,0x597f299c);	# u64
399*2139Sjp161948	&data_word(0x3ad6faec,0x5fcb6fab);	# u64
400*2139Sjp161948	&data_word(0x4a475817,0x6c44198c);	# u64
401*2139Sjp161948
402*2139Sjp161948&function_end_B($func);
403*2139Sjp161948
404*2139Sjp161948&asm_finish();
405