xref: /isa-l_crypto/mh_sha256/mh_sha256_block_sse.asm (revision a922b2eb9b797dc02b9708397f9fc5ba4b325e5d)
1;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2;  Copyright(c) 2011-2017 Intel Corporation All rights reserved.
3;
4;  Redistribution and use in source and binary forms, with or without
5;  modification, are permitted provided that the following conditions
6;  are met:
7;    * Redistributions of source code must retain the above copyright
8;      notice, this list of conditions and the following disclaimer.
9;    * Redistributions in binary form must reproduce the above copyright
10;      notice, this list of conditions and the following disclaimer in
11;      the documentation and/or other materials provided with the
12;      distribution.
13;    * Neither the name of Intel Corporation nor the names of its
14;      contributors may be used to endorse or promote products derived
15;      from this software without specific prior written permission.
16;
17;  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18;  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19;  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20;  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21;  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22;  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23;  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24;  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25;  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26;  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27;  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
29
30;; code to compute 16 SHA256 using SSE
31;;
32
33%include "reg_sizes.asm"
34
35[bits 64]
36default rel
37section .text
38
39;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
40;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
41;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
42%ifidn __OUTPUT_FORMAT__, elf64
43 ; Linux
44 %define arg0  rdi
45 %define arg1  rsi
46 %define arg2  rdx
47 %define arg3  rcx
48
49 %define arg4  r8
50 %define arg5  r9
51
52 %define tmp1  r10
53 %define tmp2  r11
54 %define tmp3  r12		; must be saved and restored
55 %define tmp4  r13		; must be saved and restored
56 %define tmp5  r14		; must be saved and restored
57 %define tmp6  r15		; must be saved and restored
58 %define return rax
59
60 %define func(x) x:
61 %macro FUNC_SAVE 0
62	push	r12
63	push	r13
64	push	r14
65	push	r15
66 %endmacro
67 %macro FUNC_RESTORE 0
68	pop	r15
69	pop	r14
70	pop	r13
71	pop	r12
72 %endmacro
73%else
74 ; Windows
75 %define arg0   rcx
76 %define arg1   rdx
77 %define arg2   r8
78 %define arg3   r9
79
80 %define arg4   r10
81 %define arg5   r11
82 %define tmp1   r12		; must be saved and restored
83 %define tmp2   r13		; must be saved and restored
84 %define tmp3   r14		; must be saved and restored
85 %define tmp4   r15		; must be saved and restored
86 %define tmp5   rdi		; must be saved and restored
87 %define tmp6   rsi		; must be saved and restored
88 %define return rax
89
90 %define stack_size  10*16 + 7*8		; must be an odd multiple of 8
91 %define func(x) proc_frame x
92 %macro FUNC_SAVE 0
93	alloc_stack	stack_size
94	save_xmm128	xmm6, 0*16
95	save_xmm128	xmm7, 1*16
96	save_xmm128	xmm8, 2*16
97	save_xmm128	xmm9, 3*16
98	save_xmm128	xmm10, 4*16
99	save_xmm128	xmm11, 5*16
100	save_xmm128	xmm12, 6*16
101	save_xmm128	xmm13, 7*16
102	save_xmm128	xmm14, 8*16
103	save_xmm128	xmm15, 9*16
104	save_reg	r12,  10*16 + 0*8
105	save_reg	r13,  10*16 + 1*8
106	save_reg	r14,  10*16 + 2*8
107	save_reg	r15,  10*16 + 3*8
108	save_reg	rdi,  10*16 + 4*8
109	save_reg	rsi,  10*16 + 5*8
110	end_prolog
111 %endmacro
112
113 %macro FUNC_RESTORE 0
114	movdqa	xmm6, [rsp + 0*16]
115	movdqa	xmm7, [rsp + 1*16]
116	movdqa	xmm8, [rsp + 2*16]
117	movdqa	xmm9, [rsp + 3*16]
118	movdqa	xmm10, [rsp + 4*16]
119	movdqa	xmm11, [rsp + 5*16]
120	movdqa	xmm12, [rsp + 6*16]
121	movdqa	xmm13, [rsp + 7*16]
122	movdqa	xmm14, [rsp + 8*16]
123	movdqa	xmm15, [rsp + 9*16]
124	mov	r12,  [rsp + 10*16 + 0*8]
125	mov	r13,  [rsp + 10*16 + 1*8]
126	mov	r14,  [rsp + 10*16 + 2*8]
127	mov	r15,  [rsp + 10*16 + 3*8]
128	mov	rdi,  [rsp + 10*16 + 4*8]
129	mov	rsi,  [rsp + 10*16 + 5*8]
130	add	rsp, stack_size
131 %endmacro
132%endif
133;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
134%define loops 		arg3
135;variables of mh_sha256
136%define mh_in_p  	arg0
137%define mh_digests_p 	arg1
138%define mh_data_p	arg2
139%define mh_segs  	tmp1
140;variables used by storing segs_digests on stack
141%define RSP_SAVE	tmp2
142%define FRAMESZ 	4*8*16		;BYTES*DWORDS*SEGS
143
144; Common definitions
145%define ROUND	tmp4
146%define TBL	tmp5
147
148%define pref	tmp3
149%macro PREFETCH_X 1
150%define %%mem  %1
151	prefetcht1  %%mem
152%endmacro
153;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
154%define MOVPS  movups
155
156%define SZ	4
157%define SZ4	4*SZ
158%define ROUNDS 64*SZ4
159
160%define a xmm0
161%define b xmm1
162%define c xmm2
163%define d xmm3
164%define e xmm4
165%define f xmm5
166%define g xmm6
167%define h xmm7
168
169%define a0 xmm8
170%define a1 xmm9
171%define a2 xmm10
172
173%define TT0 xmm14
174%define TT1 xmm13
175%define TT2 xmm12
176%define TT3 xmm11
177%define TT4 xmm10
178%define TT5 xmm9
179
180%define T1  xmm14
181%define TMP xmm15
182
183;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
184%macro ROTATE_ARGS 0
185%xdefine TMP_ h
186%xdefine h g
187%xdefine g f
188%xdefine f e
189%xdefine e d
190%xdefine d c
191%xdefine c b
192%xdefine b a
193%xdefine a TMP_
194%endm
195
196
197; PRORD reg, imm, tmp
198%macro PRORD 3
199%define %%reg %1
200%define %%imm %2
201%define %%tmp %3
202	movdqa  %%tmp, %%reg
203	psrld   %%reg, %%imm
204	pslld   %%tmp, (32-(%%imm))
205	por     %%reg, %%tmp
206%endmacro
207
208; PRORD dst/src, amt
209%macro PRORD 2
210	PRORD	%1, %2, TMP
211%endmacro
212
213;; arguments passed implicitly in preprocessor symbols i, a...h
214%macro ROUND_00_15_R 3
215%define %%T1 %1
216%define %%i  %2
217%define %%data %3
218
219	movdqa	a0, e		; sig1: a0 = e
220	movdqa	a1, e		; sig1: s1 = e
221	PRORD	a0, (11-6)	; sig1: a0 = (e >> 5)
222
223	movdqa	a2, f		; ch: a2 = f
224	pxor	a2, g		; ch: a2 = f^g
225	pand	a2, e		; ch: a2 = (f^g)&e
226	pxor	a2, g		; a2 = ch
227
228	PRORD	a1, 25		; sig1: a1 = (e >> 25)
229	movdqa	%%T1,[SZ4*(%%i&0xf) + %%data]
230	paddd	%%T1,[TBL + ROUND]	; T1 = W + K
231	pxor	a0, e		; sig1: a0 = e ^ (e >> 5)
232	PRORD	a0, 6		; sig1: a0 = (e >> 6) ^ (e >> 11)
233	paddd	h, a2		; h = h + ch
234	movdqa	a2, a		; sig0: a2 = a
235	PRORD	a2, (13-2)	; sig0: a2 = (a >> 11)
236	paddd	h, %%T1		; h = h + ch + W + K
237	pxor	a0, a1		; a0 = sigma1
238	movdqa	a1, a		; sig0: a1 = a
239	movdqa	%%T1, a		; maj: T1 = a
240	PRORD	a1, 22		; sig0: a1 = (a >> 22)
241	pxor	%%T1, c		; maj: T1 = a^c
242	add	ROUND, SZ4	; ROUND++
243	pand	%%T1, b		; maj: T1 = (a^c)&b
244	paddd	h, a0
245
246	paddd	d, h
247
248	pxor	a2, a		; sig0: a2 = a ^ (a >> 11)
249	PRORD	a2, 2		; sig0: a2 = (a >> 2) ^ (a >> 13)
250	pxor	a2, a1		; a2 = sig0
251	movdqa	a1, a		; maj: a1 = a
252	pand	a1, c		; maj: a1 = a&c
253	por	a1, %%T1	; a1 = maj
254	paddd	h, a1		; h = h + ch + W + K + maj
255	paddd	h, a2		; h = h + ch + W + K + maj + sigma0
256
257	ROTATE_ARGS
258%endm
259
260;; arguments passed implicitly in preprocessor symbols i, a...h
261%macro ROUND_00_15_W 3
262%define %%T1 %1
263%define %%i  %2
264%define %%data %3
265
266	movdqa	a0, e		; sig1: a0 = e
267	movdqa	a1, e		; sig1: s1 = e
268	PRORD	a0, (11-6)	; sig1: a0 = (e >> 5)
269
270	movdqa	a2, f		; ch: a2 = f
271	pxor	a2, g		; ch: a2 = f^g
272	pand	a2, e		; ch: a2 = (f^g)&e
273	pxor	a2, g		; a2 = ch
274
275	PRORD	a1, 25		; sig1: a1 = (e >> 25)
276	movdqa	[SZ4*(%%i&0xf) + %%data], %%T1
277	paddd	%%T1,[TBL + ROUND]	; T1 = W + K
278	pxor	a0, e		; sig1: a0 = e ^ (e >> 5)
279	PRORD	a0, 6		; sig1: a0 = (e >> 6) ^ (e >> 11)
280	paddd	h, a2		; h = h + ch
281	movdqa	a2, a		; sig0: a2 = a
282	PRORD	a2, (13-2)	; sig0: a2 = (a >> 11)
283	paddd	h, %%T1		; h = h + ch + W + K
284	pxor	a0, a1		; a0 = sigma1
285	movdqa	a1, a		; sig0: a1 = a
286	movdqa	%%T1, a		; maj: T1 = a
287	PRORD	a1, 22		; sig0: a1 = (a >> 22)
288	pxor	%%T1, c		; maj: T1 = a^c
289	add	ROUND, SZ4	; ROUND++
290	pand	%%T1, b		; maj: T1 = (a^c)&b
291	paddd	h, a0
292
293	paddd	d, h
294
295	pxor	a2, a		; sig0: a2 = a ^ (a >> 11)
296	PRORD	a2, 2		; sig0: a2 = (a >> 2) ^ (a >> 13)
297	pxor	a2, a1		; a2 = sig0
298	movdqa	a1, a		; maj: a1 = a
299	pand	a1, c		; maj: a1 = a&c
300	por	a1, %%T1	; a1 = maj
301	paddd	h, a1		; h = h + ch + W + K + maj
302	paddd	h, a2		; h = h + ch + W + K + maj + sigma0
303
304	ROTATE_ARGS
305%endm
306;; arguments passed implicitly in preprocessor symbols i, a...h
307%macro ROUND_16_XX 3
308%define %%T1 %1
309%define %%i  %2
310%define %%data %3
311
312	movdqa	%%T1, [SZ4*((%%i-15)&0xf) + %%data]
313	movdqa	a1, [SZ4*((%%i-2)&0xf) + %%data]
314	movdqa	a0, %%T1
315	PRORD	%%T1, 18-7
316	movdqa	a2, a1
317	PRORD	a1, 19-17
318	pxor	%%T1, a0
319	PRORD	%%T1, 7
320	pxor	a1, a2
321	PRORD	a1, 17
322	psrld	a0, 3
323	pxor	%%T1, a0
324	psrld	a2, 10
325	pxor	a1, a2
326	paddd	%%T1, [SZ4*((%%i-16)&0xf) + %%data]
327	paddd	a1, [SZ4*((%%i-7)&0xf) + %%data]
328	paddd	%%T1, a1
329
330	ROUND_00_15_W %%T1, %%i, %%data
331
332%endm
333
334;init hash digests
335; segs_digests:low addr-> high_addr
336; a  | b  |  c | ...|  p | (16)
337; h0 | h0 | h0 | ...| h0 |    | Aa| Ab | Ac |...| Ap |
338; h1 | h1 | h1 | ...| h1 |    | Ba| Bb | Bc |...| Bp |
339; ....
340; h7 | h7 | h7 | ...| h7 |    | Ha| Hb | Hc |...| Hp |
341
342align 32
343
344;void _mh_sha256_block_sse(const uint8_t * input_data, uint32_t digests[ISAL_SHA256_DIGEST_WORDS][ISAL_HASH_SEGS],
345;		uint8_t frame_buffer[ISAL_MH_SHA256_BLOCK_SIZE], uint32_t num_blocks);
346; arg 0 pointer to input data
347; arg 1 pointer to digests, include segments digests(uint32_t digests[16][8])
348; arg 2 pointer to aligned_frame_buffer which is used to save the big_endian data.
349; arg 3 number  of 1KB blocks
350;
351mk_global _mh_sha256_block_sse, function, internal
352func(_mh_sha256_block_sse)
353	endbranch
354	FUNC_SAVE
355	; save rsp
356	mov	RSP_SAVE, rsp
357
358	cmp	loops, 0
359	jle	.return
360
361	; leave enough space to store segs_digests
362	sub     rsp, FRAMESZ
363	; align rsp to 16 Bytes needed by sse
364	and	rsp, ~0x0F
365	lea	TBL,[TABLE]
366
367 %assign I 0					; copy segs_digests into stack
368 %rep 8
369	MOVPS  a, [mh_digests_p + I*64 + 16*0]
370	MOVPS  b, [mh_digests_p + I*64 + 16*1]
371	MOVPS  c, [mh_digests_p + I*64 + 16*2]
372	MOVPS  d, [mh_digests_p + I*64 + 16*3]
373
374	movdqa [rsp + I*64 + 16*0], a
375	movdqa [rsp + I*64 + 16*1], b
376	movdqa [rsp + I*64 + 16*2], c
377	movdqa [rsp + I*64 + 16*3], d
378 %assign I (I+1)
379 %endrep
380
381.block_loop:
382	;transform to big-endian data and store on aligned_frame
383	movdqa  TMP, [PSHUFFLE_BYTE_FLIP_MASK]
384	;transform input data from DWORD*16_SEGS*8 to DWORD*4_SEGS*8*4
385 %assign I 0
386 %rep 16
387	MOVPS   TT0,[mh_in_p + I*64+0*16]
388	MOVPS   TT1,[mh_in_p + I*64+1*16]
389	MOVPS   TT2,[mh_in_p + I*64+2*16]
390	MOVPS   TT3,[mh_in_p + I*64+3*16]
391
392	pshufb  TT0, TMP
393	movdqa  [mh_data_p +(I)*16 +0*256],TT0
394	pshufb  TT1, TMP
395	movdqa  [mh_data_p +(I)*16 +1*256],TT1
396	pshufb  TT2, TMP
397	movdqa  [mh_data_p +(I)*16 +2*256],TT2
398	pshufb  TT3, TMP
399	movdqa  [mh_data_p +(I)*16 +3*256],TT3
400 %assign I (I+1)
401 %endrep
402
403	mov	mh_segs, 0			;start from the first 4 segments
404	mov	pref, 1024			;avoid prefetch repeadtedly
405 .segs_loop:
406	xor	ROUND, ROUND
407	;; Initialize digests
408	movdqa  a, [rsp + 0*64 + mh_segs]
409	movdqa  b, [rsp + 1*64 + mh_segs]
410	movdqa  c, [rsp + 2*64 + mh_segs]
411	movdqa  d, [rsp + 3*64 + mh_segs]
412	movdqa  e, [rsp + 4*64 + mh_segs]
413	movdqa  f, [rsp + 5*64 + mh_segs]
414	movdqa  g, [rsp + 6*64 + mh_segs]
415	movdqa  h, [rsp + 7*64 + mh_segs]
416
417  %assign i 0
418  %rep 4
419	ROUND_00_15_R	TT0, (i*4+0), mh_data_p
420	ROUND_00_15_R	TT1, (i*4+1), mh_data_p
421	ROUND_00_15_R	TT2, (i*4+2), mh_data_p
422	ROUND_00_15_R	TT3, (i*4+3), mh_data_p
423  %assign i (i+1)
424  %endrep
425	PREFETCH_X [mh_in_p + pref+128*0]
426
427  %assign i 16
428  %rep 48
429	%if i = 48
430		PREFETCH_X [mh_in_p + pref+128*1]
431	%endif
432	ROUND_16_XX	T1, i, mh_data_p
433  %assign i (i+1)
434  %endrep
435
436	;; add old digest
437	paddd	a, [rsp + 0*64 + mh_segs]
438	paddd	b, [rsp + 1*64 + mh_segs]
439	paddd	c, [rsp + 2*64 + mh_segs]
440	paddd	d, [rsp + 3*64 + mh_segs]
441	paddd	e, [rsp + 4*64 + mh_segs]
442	paddd	f, [rsp + 5*64 + mh_segs]
443	paddd	g, [rsp + 6*64 + mh_segs]
444	paddd	h, [rsp + 7*64 + mh_segs]
445
446	; write out digests
447	movdqa  [rsp + 0*64 + mh_segs], a
448	movdqa  [rsp + 1*64 + mh_segs], b
449	movdqa  [rsp + 2*64 + mh_segs], c
450	movdqa  [rsp + 3*64 + mh_segs], d
451	movdqa  [rsp + 4*64 + mh_segs], e
452	movdqa  [rsp + 5*64 + mh_segs], f
453	movdqa  [rsp + 6*64 + mh_segs], g
454	movdqa  [rsp + 7*64 + mh_segs], h
455
456	add	pref,      256
457	add	mh_data_p, 256
458	add 	mh_segs,   16
459	cmp	mh_segs,   64
460	jc 	.segs_loop
461
462	sub	mh_data_p, (1024)
463	add 	mh_in_p,   (1024)
464	sub     loops,     1
465	jne     .block_loop
466
467 %assign I 0					; copy segs_digests back to mh_digests_p
468 %rep 8
469	movdqa a, [rsp + I*64 + 16*0]
470	movdqa b, [rsp + I*64 + 16*1]
471	movdqa c, [rsp + I*64 + 16*2]
472	movdqa d, [rsp + I*64 + 16*3]
473
474	MOVPS  [mh_digests_p + I*64 + 16*0], a
475	MOVPS  [mh_digests_p + I*64 + 16*1], b
476	MOVPS  [mh_digests_p + I*64 + 16*2], c
477	MOVPS  [mh_digests_p + I*64 + 16*3], d
478 %assign I (I+1)
479 %endrep
480	mov	rsp, RSP_SAVE			; restore rsp
481
482.return:
483	FUNC_RESTORE
484	ret
485
486endproc_frame
487
488section .data align=16
489
490align 16
491TABLE:
492	dq	0x428a2f98428a2f98, 0x428a2f98428a2f98
493	dq	0x7137449171374491, 0x7137449171374491
494	dq	0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf
495	dq	0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5
496	dq	0x3956c25b3956c25b, 0x3956c25b3956c25b
497	dq	0x59f111f159f111f1, 0x59f111f159f111f1
498	dq	0x923f82a4923f82a4, 0x923f82a4923f82a4
499	dq	0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5
500	dq	0xd807aa98d807aa98, 0xd807aa98d807aa98
501	dq	0x12835b0112835b01, 0x12835b0112835b01
502	dq	0x243185be243185be, 0x243185be243185be
503	dq	0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3
504	dq	0x72be5d7472be5d74, 0x72be5d7472be5d74
505	dq	0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe
506	dq	0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7
507	dq	0xc19bf174c19bf174, 0xc19bf174c19bf174
508	dq	0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1
509	dq	0xefbe4786efbe4786, 0xefbe4786efbe4786
510	dq	0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6
511	dq	0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc
512	dq	0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f
513	dq	0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa
514	dq	0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc
515	dq	0x76f988da76f988da, 0x76f988da76f988da
516	dq	0x983e5152983e5152, 0x983e5152983e5152
517	dq	0xa831c66da831c66d, 0xa831c66da831c66d
518	dq	0xb00327c8b00327c8, 0xb00327c8b00327c8
519	dq	0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7
520	dq	0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3
521	dq	0xd5a79147d5a79147, 0xd5a79147d5a79147
522	dq	0x06ca635106ca6351, 0x06ca635106ca6351
523	dq	0x1429296714292967, 0x1429296714292967
524	dq	0x27b70a8527b70a85, 0x27b70a8527b70a85
525	dq	0x2e1b21382e1b2138, 0x2e1b21382e1b2138
526	dq	0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc
527	dq	0x53380d1353380d13, 0x53380d1353380d13
528	dq	0x650a7354650a7354, 0x650a7354650a7354
529	dq	0x766a0abb766a0abb, 0x766a0abb766a0abb
530	dq	0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e
531	dq	0x92722c8592722c85, 0x92722c8592722c85
532	dq	0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1
533	dq	0xa81a664ba81a664b, 0xa81a664ba81a664b
534	dq	0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70
535	dq	0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3
536	dq	0xd192e819d192e819, 0xd192e819d192e819
537	dq	0xd6990624d6990624, 0xd6990624d6990624
538	dq	0xf40e3585f40e3585, 0xf40e3585f40e3585
539	dq	0x106aa070106aa070, 0x106aa070106aa070
540	dq	0x19a4c11619a4c116, 0x19a4c11619a4c116
541	dq	0x1e376c081e376c08, 0x1e376c081e376c08
542	dq	0x2748774c2748774c, 0x2748774c2748774c
543	dq	0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5
544	dq	0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3
545	dq	0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a
546	dq	0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f
547	dq	0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3
548	dq	0x748f82ee748f82ee, 0x748f82ee748f82ee
549	dq	0x78a5636f78a5636f, 0x78a5636f78a5636f
550	dq	0x84c8781484c87814, 0x84c8781484c87814
551	dq	0x8cc702088cc70208, 0x8cc702088cc70208
552	dq	0x90befffa90befffa, 0x90befffa90befffa
553	dq	0xa4506ceba4506ceb, 0xa4506ceba4506ceb
554	dq	0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7
555	dq	0xc67178f2c67178f2, 0xc67178f2c67178f2
556PSHUFFLE_BYTE_FLIP_MASK: dq 0x0405060700010203, 0x0c0d0e0f08090a0b
557
558