xref: /isa-l_crypto/mh_sha256/mh_sha256_block_avx.asm (revision a922b2eb9b797dc02b9708397f9fc5ba4b325e5d)
1;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2;  Copyright(c) 2011-2017 Intel Corporation All rights reserved.
3;
4;  Redistribution and use in source and binary forms, with or without
5;  modification, are permitted provided that the following conditions
6;  are met:
7;    * Redistributions of source code must retain the above copyright
8;      notice, this list of conditions and the following disclaimer.
9;    * Redistributions in binary form must reproduce the above copyright
10;      notice, this list of conditions and the following disclaimer in
11;      the documentation and/or other materials provided with the
12;      distribution.
13;    * Neither the name of Intel Corporation nor the names of its
14;      contributors may be used to endorse or promote products derived
15;      from this software without specific prior written permission.
16;
17;  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18;  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19;  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20;  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21;  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22;  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23;  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24;  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25;  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26;  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27;  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
29
30;; code to compute 16 SHA256 using AVX
31;;
32
33%include "reg_sizes.asm"
34
35[bits 64]
36default rel
37section .text
38
39;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
40;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
41;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
42%ifidn __OUTPUT_FORMAT__, elf64
43 ; Linux
44 %define arg0  rdi
45 %define arg1  rsi
46 %define arg2  rdx
47 %define arg3  rcx
48
49 %define arg4  r8
50 %define arg5  r9
51
52 %define tmp1  r10
53 %define tmp2  r11
54 %define tmp3  r12		; must be saved and restored
55 %define tmp4  r13		; must be saved and restored
56 %define tmp5  r14		; must be saved and restored
57 %define tmp6  r15		; must be saved and restored
58 %define return rax
59
60 %define func(x) x:
61 %macro FUNC_SAVE 0
62	push	r12
63	push	r13
64	push	r14
65	push	r15
66 %endmacro
67 %macro FUNC_RESTORE 0
68	pop	r15
69	pop	r14
70	pop	r13
71	pop	r12
72 %endmacro
73%else
74 ; Windows
75 %define arg0   rcx
76 %define arg1   rdx
77 %define arg2   r8
78 %define arg3   r9
79
80 %define arg4   r10
81 %define arg5   r11
82 %define tmp1   r12		; must be saved and restored
83 %define tmp2   r13		; must be saved and restored
84 %define tmp3   r14		; must be saved and restored
85 %define tmp4   r15		; must be saved and restored
86 %define tmp5   rdi		; must be saved and restored
87 %define tmp6   rsi		; must be saved and restored
88 %define return rax
89
90 %define stack_size  10*16 + 7*8		; must be an odd multiple of 8
91 %define func(x) proc_frame x
92 %macro FUNC_SAVE 0
93	alloc_stack	stack_size
94	save_xmm128	xmm6, 0*16
95	save_xmm128	xmm7, 1*16
96	save_xmm128	xmm8, 2*16
97	save_xmm128	xmm9, 3*16
98	save_xmm128	xmm10, 4*16
99	save_xmm128	xmm11, 5*16
100	save_xmm128	xmm12, 6*16
101	save_xmm128	xmm13, 7*16
102	save_xmm128	xmm14, 8*16
103	save_xmm128	xmm15, 9*16
104	save_reg	r12,  10*16 + 0*8
105	save_reg	r13,  10*16 + 1*8
106	save_reg	r14,  10*16 + 2*8
107	save_reg	r15,  10*16 + 3*8
108	save_reg	rdi,  10*16 + 4*8
109	save_reg	rsi,  10*16 + 5*8
110	end_prolog
111 %endmacro
112
113 %macro FUNC_RESTORE 0
114	movdqa	xmm6, [rsp + 0*16]
115	movdqa	xmm7, [rsp + 1*16]
116	movdqa	xmm8, [rsp + 2*16]
117	movdqa	xmm9, [rsp + 3*16]
118	movdqa	xmm10, [rsp + 4*16]
119	movdqa	xmm11, [rsp + 5*16]
120	movdqa	xmm12, [rsp + 6*16]
121	movdqa	xmm13, [rsp + 7*16]
122	movdqa	xmm14, [rsp + 8*16]
123	movdqa	xmm15, [rsp + 9*16]
124	mov	r12,  [rsp + 10*16 + 0*8]
125	mov	r13,  [rsp + 10*16 + 1*8]
126	mov	r14,  [rsp + 10*16 + 2*8]
127	mov	r15,  [rsp + 10*16 + 3*8]
128	mov	rdi,  [rsp + 10*16 + 4*8]
129	mov	rsi,  [rsp + 10*16 + 5*8]
130	add	rsp, stack_size
131 %endmacro
132%endif
133;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
134%define loops 		arg3
135;variables of mh_sha256
136%define mh_in_p  	arg0
137%define mh_digests_p 	arg1
138%define mh_data_p	arg2
139%define mh_segs  	tmp1
140;variables used by storing segs_digests on stack
141%define RSP_SAVE	tmp2
142%define FRAMESZ 	4*8*16		;BYTES*DWORDS*SEGS
143
144; Common definitions
145%define ROUND	tmp4
146%define TBL	tmp5
147
148%define pref	tmp3
149%macro PREFETCH_X 1
150%define %%mem  %1
151	prefetcht1  %%mem
152%endmacro
153;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
154%define VMOVPS  vmovups
155
156%define SZ	4
157%define SZ4	4*SZ
158%define ROUNDS 64*SZ4
159
160%define a xmm0
161%define b xmm1
162%define c xmm2
163%define d xmm3
164%define e xmm4
165%define f xmm5
166%define g xmm6
167%define h xmm7
168
169%define a0 xmm8
170%define a1 xmm9
171%define a2 xmm10
172
173%define TT0 xmm14
174%define TT1 xmm13
175%define TT2 xmm12
176%define TT3 xmm11
177%define TT4 xmm10
178%define TT5 xmm9
179
180%define T1  xmm14
181%define TMP xmm15
182
183;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
184%macro ROTATE_ARGS 0
185%xdefine TMP_ h
186%xdefine h g
187%xdefine g f
188%xdefine f e
189%xdefine e d
190%xdefine d c
191%xdefine c b
192%xdefine b a
193%xdefine a TMP_
194%endm
195
196; PRORD reg, imm, tmp
197%macro PRORD 3
198%define %%reg %1
199%define %%imm %2
200%define %%tmp %3
201	vpslld	%%tmp, %%reg, (32-(%%imm))
202	vpsrld	%%reg, %%reg, %%imm
203	vpor	%%reg, %%reg, %%tmp
204%endmacro
205
206; non-destructive
207; PRORD_nd reg, imm, tmp, src
208%macro PRORD_nd 4
209%define %%reg %1
210%define %%imm %2
211%define %%tmp %3
212%define %%src %4
213	vpslld	%%tmp, %%src, (32-(%%imm))
214	vpsrld	%%reg, %%src, %%imm
215	vpor	%%reg, %%reg, %%tmp
216%endmacro
217
218; PRORD dst/src, amt
219%macro PRORD 2
220	PRORD	%1, %2, TMP
221%endmacro
222
223; PRORD_nd dst, src, amt
224%macro PRORD_nd 3
225	PRORD_nd	%1, %3, TMP, %2
226%endmacro
227
228;; arguments passed implicitly in preprocessor symbols i, a...h
229%macro ROUND_00_15_R 3
230%define %%T1 %1
231%define %%i  %2
232%define %%data %3
233
234	PRORD_nd	a0, e, (11-6)	; sig1: a0 = (e >> 5)
235
236	vpxor	a2, f, g	; ch: a2 = f^g
237	vpand	a2, e		; ch: a2 = (f^g)&e
238	vpxor	a2, g		; a2 = ch
239
240	PRORD_nd	a1, e, 25		; sig1: a1 = (e >> 25)
241	vmovdqa	%%T1, [SZ4*(%%i&0xf) + %%data]
242	vpaddd	%%T1, %%T1, [TBL + ROUND]	; T1 = W + K
243	vpxor	a0, a0, e	; sig1: a0 = e ^ (e >> 5)
244	PRORD	a0, 6		; sig1: a0 = (e >> 6) ^ (e >> 11)
245	vpaddd	h, h, a2	; h = h + ch
246	PRORD_nd	a2, a, (13-2)	; sig0: a2 = (a >> 11)
247	vpaddd	h, h, %%T1	; h = h + ch + W + K
248	vpxor	a0, a0, a1	; a0 = sigma1
249	PRORD_nd	a1, a, 22	; sig0: a1 = (a >> 22)
250	vpxor	%%T1, a, c	; maj: T1 = a^c
251	add	ROUND, SZ4	; ROUND++
252	vpand	%%T1, %%T1, b	; maj: T1 = (a^c)&b
253	vpaddd	h, h, a0
254
255	vpaddd	d, d, h
256
257	vpxor	a2, a2, a	; sig0: a2 = a ^ (a >> 11)
258	PRORD	a2, 2		; sig0: a2 = (a >> 2) ^ (a >> 13)
259	vpxor	a2, a2, a1	; a2 = sig0
260	vpand	a1, a, c	; maj: a1 = a&c
261	vpor	a1, a1, %%T1	; a1 = maj
262	vpaddd	h, h, a1	; h = h + ch + W + K + maj
263	vpaddd	h, h, a2	; h = h + ch + W + K + maj + sigma0
264
265	ROTATE_ARGS
266%endm
267;; arguments passed implicitly in preprocessor symbols i, a...h
268%macro ROUND_00_15_W 3
269%define %%T1 %1
270%define %%i  %2
271%define %%data %3
272
273	PRORD_nd	a0, e, (11-6)	; sig1: a0 = (e >> 5)
274
275	vpxor	a2, f, g	; ch: a2 = f^g
276	vpand	a2, e		; ch: a2 = (f^g)&e
277	vpxor	a2, g		; a2 = ch
278
279	PRORD_nd	a1, e, 25		; sig1: a1 = (e >> 25)
280	vmovdqa	[SZ4*(%%i&0xf) + %%data], %%T1
281	vpaddd	%%T1, %%T1, [TBL + ROUND]	; T1 = W + K
282	vpxor	a0, a0, e	; sig1: a0 = e ^ (e >> 5)
283	PRORD	a0, 6		; sig1: a0 = (e >> 6) ^ (e >> 11)
284	vpaddd	h, h, a2	; h = h + ch
285	PRORD_nd	a2, a, (13-2)	; sig0: a2 = (a >> 11)
286	vpaddd	h, h, %%T1	; h = h + ch + W + K
287	vpxor	a0, a0, a1	; a0 = sigma1
288	PRORD_nd	a1, a, 22	; sig0: a1 = (a >> 22)
289	vpxor	%%T1, a, c	; maj: T1 = a^c
290	add	ROUND, SZ4	; ROUND++
291	vpand	%%T1, %%T1, b	; maj: T1 = (a^c)&b
292	vpaddd	h, h, a0
293
294	vpaddd	d, d, h
295
296	vpxor	a2, a2, a	; sig0: a2 = a ^ (a >> 11)
297	PRORD	a2, 2		; sig0: a2 = (a >> 2) ^ (a >> 13)
298	vpxor	a2, a2, a1	; a2 = sig0
299	vpand	a1, a, c	; maj: a1 = a&c
300	vpor	a1, a1, %%T1	; a1 = maj
301	vpaddd	h, h, a1	; h = h + ch + W + K + maj
302	vpaddd	h, h, a2	; h = h + ch + W + K + maj + sigma0
303
304	ROTATE_ARGS
305%endm
306
307;; arguments passed implicitly in preprocessor symbols i, a...h
308%macro ROUND_16_XX 3
309%define %%T1 %1
310%define %%i  %2
311%define %%data %3
312
313	vmovdqa	%%T1, [SZ4*((%%i-15)&0xf) + %%data]
314	vmovdqa	a1, [SZ4*((%%i-2)&0xf) + %%data]
315	vmovdqa	a0, %%T1
316	PRORD	%%T1, 18-7
317	vmovdqa	a2, a1
318	PRORD	a1, 19-17
319	vpxor	%%T1, %%T1, a0
320	PRORD	%%T1, 7
321	vpxor	a1, a1, a2
322	PRORD	a1, 17
323	vpsrld	a0, a0, 3
324	vpxor	%%T1, %%T1, a0
325	vpsrld	a2, a2, 10
326	vpxor	a1, a1, a2
327	vpaddd	%%T1, %%T1, [SZ4*((%%i-16)&0xf) + %%data]
328	vpaddd	a1, a1, [SZ4*((%%i-7)&0xf) + %%data]
329	vpaddd	%%T1, %%T1, a1
330
331	ROUND_00_15_W %%T1, %%i, %%data
332%endm
333
334;init hash digests
335; segs_digests:low addr-> high_addr
336; a  | b  |  c | ...|  p | (16)
337; h0 | h0 | h0 | ...| h0 |    | Aa| Ab | Ac |...| Ap |
338; h1 | h1 | h1 | ...| h1 |    | Ba| Bb | Bc |...| Bp |
339; ....
340; h7 | h7 | h7 | ...| h7 |    | Ha| Hb | Hc |...| Hp |
341
342align 32
343
344;void _mh_sha256_block_avx(const uint8_t * input_data, uint32_t digests[ISAL_SHA256_DIGEST_WORDS][ISAL_HASH_SEGS],
345;		uint8_t frame_buffer[ISAL_MH_SHA256_BLOCK_SIZE], uint32_t num_blocks);
346; arg 0 pointer to input data
347; arg 1 pointer to digests, include segments digests(uint32_t digests[16][8])
348; arg 2 pointer to aligned_frame_buffer which is used to save the big_endian data.
349; arg 3 number  of 1KB blocks
350;
351mk_global _mh_sha256_block_avx, function, internal
352func(_mh_sha256_block_avx)
353	endbranch
354	FUNC_SAVE
355	; save rsp
356	mov	RSP_SAVE, rsp
357
358	cmp	loops, 0
359	jle	.return
360
361	; leave enough space to store segs_digests
362	sub     rsp, FRAMESZ
363	; align rsp to 16 Bytes needed by avx
364	and	rsp, ~0x0F
365	lea	TBL,[TABLE]
366
367 %assign I 0					; copy segs_digests into stack
368 %rep 8
369	VMOVPS  a, [mh_digests_p + I*64 + 16*0]
370	VMOVPS  b, [mh_digests_p + I*64 + 16*1]
371	VMOVPS  c, [mh_digests_p + I*64 + 16*2]
372	VMOVPS  d, [mh_digests_p + I*64 + 16*3]
373
374	vmovdqa [rsp + I*64 + 16*0], a
375	vmovdqa [rsp + I*64 + 16*1], b
376	vmovdqa [rsp + I*64 + 16*2], c
377	vmovdqa [rsp + I*64 + 16*3], d
378 %assign I (I+1)
379 %endrep
380
381.block_loop:
382	;transform to big-endian data and store on aligned_frame
383	vmovdqa  TMP, [PSHUFFLE_BYTE_FLIP_MASK]
384	;transform input data from DWORD*16_SEGS*8 to DWORD*4_SEGS*8*4
385 %assign I 0
386 %rep 16
387	VMOVPS   TT0,[mh_in_p + I*64+0*16]
388	VMOVPS   TT1,[mh_in_p + I*64+1*16]
389	VMOVPS   TT2,[mh_in_p + I*64+2*16]
390	VMOVPS   TT3,[mh_in_p + I*64+3*16]
391
392	vpshufb  TT0, TMP
393	vmovdqa  [mh_data_p +(I)*16 +0*256],TT0
394	vpshufb  TT1, TMP
395	vmovdqa  [mh_data_p +(I)*16 +1*256],TT1
396	vpshufb  TT2, TMP
397	vmovdqa  [mh_data_p +(I)*16 +2*256],TT2
398	vpshufb  TT3, TMP
399	vmovdqa  [mh_data_p +(I)*16 +3*256],TT3
400 %assign I (I+1)
401 %endrep
402
403	mov	mh_segs, 0			;start from the first 4 segments
404	mov	pref, 1024			;avoid prefetch repeadtedly
405 .segs_loop:
406	xor	ROUND, ROUND
407	;; Initialize digests
408	vmovdqa  a, [rsp + 0*64 + mh_segs]
409	vmovdqa  b, [rsp + 1*64 + mh_segs]
410	vmovdqa  c, [rsp + 2*64 + mh_segs]
411	vmovdqa  d, [rsp + 3*64 + mh_segs]
412	vmovdqa  e, [rsp + 4*64 + mh_segs]
413	vmovdqa  f, [rsp + 5*64 + mh_segs]
414	vmovdqa  g, [rsp + 6*64 + mh_segs]
415	vmovdqa  h, [rsp + 7*64 + mh_segs]
416
417  %assign i 0
418  %rep 4
419	ROUND_00_15_R	TT0, (i*4+0), mh_data_p
420	ROUND_00_15_R	TT1, (i*4+1), mh_data_p
421	ROUND_00_15_R	TT2, (i*4+2), mh_data_p
422	ROUND_00_15_R	TT3, (i*4+3), mh_data_p
423  %assign i (i+1)
424  %endrep
425	PREFETCH_X [mh_in_p + pref+128*0]
426
427  %assign i 16
428  %rep 48
429	%if i = 48
430		PREFETCH_X [mh_in_p + pref+128*1]
431	%endif
432	ROUND_16_XX	T1, i, mh_data_p
433  %assign i (i+1)
434  %endrep
435
436	;; add old digest
437	vpaddd	a, a, [rsp + 0*64 + mh_segs]
438	vpaddd	b, b, [rsp + 1*64 + mh_segs]
439	vpaddd	c, c, [rsp + 2*64 + mh_segs]
440	vpaddd	d, d, [rsp + 3*64 + mh_segs]
441	vpaddd	e, e, [rsp + 4*64 + mh_segs]
442	vpaddd	f, f, [rsp + 5*64 + mh_segs]
443	vpaddd	g, g, [rsp + 6*64 + mh_segs]
444	vpaddd	h, h, [rsp + 7*64 + mh_segs]
445
446	; write out digests
447	vmovdqa  [rsp + 0*64 + mh_segs], a
448	vmovdqa  [rsp + 1*64 + mh_segs], b
449	vmovdqa  [rsp + 2*64 + mh_segs], c
450	vmovdqa  [rsp + 3*64 + mh_segs], d
451	vmovdqa  [rsp + 4*64 + mh_segs], e
452	vmovdqa  [rsp + 5*64 + mh_segs], f
453	vmovdqa  [rsp + 6*64 + mh_segs], g
454	vmovdqa  [rsp + 7*64 + mh_segs], h
455
456	add	pref,      256
457	add	mh_data_p, 256
458	add 	mh_segs,   16
459	cmp	mh_segs,   64
460	jc 	.segs_loop
461
462	sub	mh_data_p, (1024)
463	add 	mh_in_p,   (1024)
464	sub     loops,     1
465	jne     .block_loop
466
467 %assign I 0					; copy segs_digests back to mh_digests_p
468 %rep 8
469	vmovdqa a, [rsp + I*64 + 16*0]
470	vmovdqa b, [rsp + I*64 + 16*1]
471	vmovdqa c, [rsp + I*64 + 16*2]
472	vmovdqa d, [rsp + I*64 + 16*3]
473
474	VMOVPS  [mh_digests_p + I*64 + 16*0], a
475	VMOVPS  [mh_digests_p + I*64 + 16*1], b
476	VMOVPS  [mh_digests_p + I*64 + 16*2], c
477	VMOVPS  [mh_digests_p + I*64 + 16*3], d
478 %assign I (I+1)
479 %endrep
480	mov	rsp, RSP_SAVE			; restore rsp
481
482.return:
483	FUNC_RESTORE
484	ret
485
486endproc_frame
487
488section .data align=64
489
490align 64
491TABLE:
492	dq	0x428a2f98428a2f98, 0x428a2f98428a2f98
493	dq	0x7137449171374491, 0x7137449171374491
494	dq	0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf
495	dq	0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5
496	dq	0x3956c25b3956c25b, 0x3956c25b3956c25b
497	dq	0x59f111f159f111f1, 0x59f111f159f111f1
498	dq	0x923f82a4923f82a4, 0x923f82a4923f82a4
499	dq	0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5
500	dq	0xd807aa98d807aa98, 0xd807aa98d807aa98
501	dq	0x12835b0112835b01, 0x12835b0112835b01
502	dq	0x243185be243185be, 0x243185be243185be
503	dq	0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3
504	dq	0x72be5d7472be5d74, 0x72be5d7472be5d74
505	dq	0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe
506	dq	0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7
507	dq	0xc19bf174c19bf174, 0xc19bf174c19bf174
508	dq	0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1
509	dq	0xefbe4786efbe4786, 0xefbe4786efbe4786
510	dq	0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6
511	dq	0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc
512	dq	0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f
513	dq	0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa
514	dq	0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc
515	dq	0x76f988da76f988da, 0x76f988da76f988da
516	dq	0x983e5152983e5152, 0x983e5152983e5152
517	dq	0xa831c66da831c66d, 0xa831c66da831c66d
518	dq	0xb00327c8b00327c8, 0xb00327c8b00327c8
519	dq	0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7
520	dq	0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3
521	dq	0xd5a79147d5a79147, 0xd5a79147d5a79147
522	dq	0x06ca635106ca6351, 0x06ca635106ca6351
523	dq	0x1429296714292967, 0x1429296714292967
524	dq	0x27b70a8527b70a85, 0x27b70a8527b70a85
525	dq	0x2e1b21382e1b2138, 0x2e1b21382e1b2138
526	dq	0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc
527	dq	0x53380d1353380d13, 0x53380d1353380d13
528	dq	0x650a7354650a7354, 0x650a7354650a7354
529	dq	0x766a0abb766a0abb, 0x766a0abb766a0abb
530	dq	0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e
531	dq	0x92722c8592722c85, 0x92722c8592722c85
532	dq	0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1
533	dq	0xa81a664ba81a664b, 0xa81a664ba81a664b
534	dq	0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70
535	dq	0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3
536	dq	0xd192e819d192e819, 0xd192e819d192e819
537	dq	0xd6990624d6990624, 0xd6990624d6990624
538	dq	0xf40e3585f40e3585, 0xf40e3585f40e3585
539	dq	0x106aa070106aa070, 0x106aa070106aa070
540	dq	0x19a4c11619a4c116, 0x19a4c11619a4c116
541	dq	0x1e376c081e376c08, 0x1e376c081e376c08
542	dq	0x2748774c2748774c, 0x2748774c2748774c
543	dq	0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5
544	dq	0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3
545	dq	0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a
546	dq	0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f
547	dq	0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3
548	dq	0x748f82ee748f82ee, 0x748f82ee748f82ee
549	dq	0x78a5636f78a5636f, 0x78a5636f78a5636f
550	dq	0x84c8781484c87814, 0x84c8781484c87814
551	dq	0x8cc702088cc70208, 0x8cc702088cc70208
552	dq	0x90befffa90befffa, 0x90befffa90befffa
553	dq	0xa4506ceba4506ceb, 0xa4506ceba4506ceb
554	dq	0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7
555	dq	0xc67178f2c67178f2, 0xc67178f2c67178f2
556PSHUFFLE_BYTE_FLIP_MASK: dq 0x0405060700010203, 0x0c0d0e0f08090a0b
557
558