xref: /isa-l_crypto/sha1_mb/sha1_mb_mgr_flush_avx512.asm (revision d28f1034f736e3eb791c3cf6bff3e2fa81fb5331)
1;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2;  Copyright(c) 2011-2016 Intel Corporation All rights reserved.
3;
4;  Redistribution and use in source and binary forms, with or without
5;  modification, are permitted provided that the following conditions
6;  are met:
7;    * Redistributions of source code must retain the above copyright
8;      notice, this list of conditions and the following disclaimer.
9;    * Redistributions in binary form must reproduce the above copyright
10;      notice, this list of conditions and the following disclaimer in
11;      the documentation and/or other materials provided with the
12;      distribution.
13;    * Neither the name of Intel Corporation nor the names of its
14;      contributors may be used to endorse or promote products derived
15;      from this software without specific prior written permission.
16;
17;  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18;  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19;  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20;  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21;  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22;  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23;  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24;  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25;  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26;  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27;  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
29
30%include "sha1_job.asm"
31%include "sha1_mb_mgr_datastruct.asm"
32%include "reg_sizes.asm"
33
34extern sha1_mb_x16_avx512
35extern sha1_opt_x1
36
37[bits 64]
38default rel
39section .text
40
41%ifidn __OUTPUT_FORMAT__, elf64
42;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
43; LINUX register definitions
44%define arg1    rdi ; rcx
45%define arg2    rsi ; rdx
46;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
47%else
48;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
49; WINDOWS register definitions
50%define arg1    rcx
51%define arg2    rdx
52;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
53%endif
54
55; Common definitions and latter-state(unused,covered,unchanged)
56%define state   arg1	; unchanged
57%define job     arg2	; unused
58%define len2    arg2	; unused
59
60; idx must be a register not clobberred by sha1_mb_x16_avx512
61%define idx             rbp	; unchanged
62
63%define unused_lanes    rbx	; covered
64%define lane_data       rbx	; covered
65%define tmp2            rbx	; covered
66
67%define num_lanes_inuse r9	; covered
68
69%define job_rax         rax	; covered
70%define tmp             rax	; unused
71
72; STACK_SPACE needs to be an odd multiple of 8
73_XMM_SAVE_SIZE  equ 10*16
74_GPR_SAVE_SIZE  equ 8*8
75_ALIGN_SIZE     equ 8
76
77_XMM_SAVE       equ 0
78_GPR_SAVE       equ _XMM_SAVE + _XMM_SAVE_SIZE
79STACK_SPACE     equ _GPR_SAVE + _GPR_SAVE_SIZE + _ALIGN_SIZE
80
81%define APPEND(a,b) a %+ b
82
83; SHA1_JOB* _sha1_mb_mgr_flush_avx512(SHA1_MB_JOB_MGR *state)
84; arg 1 : rcx : state
85mk_global _sha1_mb_mgr_flush_avx512, function, internal
86_sha1_mb_mgr_flush_avx512:
87	endbranch
88	sub     rsp, STACK_SPACE
89	mov     [rsp + _GPR_SAVE + 8*0], rbx
90	mov     [rsp + _GPR_SAVE + 8*3], rbp
91	mov     [rsp + _GPR_SAVE + 8*4], r12
92	mov     [rsp + _GPR_SAVE + 8*5], r13
93	mov     [rsp + _GPR_SAVE + 8*6], r14
94	mov     [rsp + _GPR_SAVE + 8*7], r15
95%ifidn __OUTPUT_FORMAT__, win64
96	mov     [rsp + _GPR_SAVE + 8*1], rsi
97	mov     [rsp + _GPR_SAVE + 8*2], rdi
98	vmovdqa  [rsp + _XMM_SAVE + 16*0], xmm6
99	vmovdqa  [rsp + _XMM_SAVE + 16*1], xmm7
100	vmovdqa  [rsp + _XMM_SAVE + 16*2], xmm8
101	vmovdqa  [rsp + _XMM_SAVE + 16*3], xmm9
102	vmovdqa  [rsp + _XMM_SAVE + 16*4], xmm10
103	vmovdqa  [rsp + _XMM_SAVE + 16*5], xmm11
104	vmovdqa  [rsp + _XMM_SAVE + 16*6], xmm12
105	vmovdqa  [rsp + _XMM_SAVE + 16*7], xmm13
106	vmovdqa  [rsp + _XMM_SAVE + 16*8], xmm14
107	vmovdqa  [rsp + _XMM_SAVE + 16*9], xmm15
108%endif
109
110	mov	DWORD(num_lanes_inuse), [state + _num_lanes_inuse]
111	cmp	num_lanes_inuse, 0
112	jz	return_null
113
114	; find a lane with a non-null job
115	xor	idx, idx
116%assign I 1
117%rep 15
118	cmp	qword [state + _ldata + I * _LANE_DATA_size + _job_in_lane], 0
119	cmovne	idx, [APPEND(lane_,I)]
120%assign I (I+1)
121%endrep
122
123	; copy idx to empty lanes
124copy_lane_data:
125	mov	tmp, [state + _args + _data_ptr + 8*idx]
126
127%assign I 0
128%rep 16
129	cmp	qword [state + _ldata + I * _LANE_DATA_size + _job_in_lane], 0
130	jne	APPEND(skip_,I)
131	mov	[state + _args + _data_ptr + 8*I], tmp
132	mov 	dword [state + _lens + 4*I], 0xFFFFFFFF
133APPEND(skip_,I):
134%assign I (I+1)
135%endrep
136
137	; Find min length
138	vmovdqu ymm0, [state + _lens + 0*32]
139	vmovdqu ymm1, [state + _lens + 1*32]
140
141	vpminud ymm2, ymm0, ymm1        ; ymm2 has {H1,G1,F1,E1,D1,C1,B1,A1}
142	vpalignr ymm3, ymm3, ymm2, 8    ; ymm3 has {x,x,H1,G1,x,x,D1,C1}
143	vpminud ymm2, ymm2, ymm3        ; ymm2 has {x,x,H2,G2,x,x,D2,C2}
144	vpalignr ymm3, ymm3, ymm2, 4    ; ymm3 has {x,x, x,H2,x,x, x,D2}
145	vpminud ymm2, ymm2, ymm3        ; ymm2 has {x,x, x,G3,x,x, x,C3}
146	vperm2i128 ymm3, ymm2, ymm2, 1	; ymm3 has {x,x, x, x,x,x, x,C3}
147        vpminud ymm2, ymm2, ymm3        ; ymm2 has min value in low dword
148
149	vmovd   DWORD(idx), xmm2
150	mov	len2, idx
151	and	idx, 0xF
152	shr	len2, 4
153	jz	len_is_0
154
155	; compare with sha-sb threshold, if num_lanes_inuse <= threshold, using sb func
156	cmp	dword [state + _num_lanes_inuse], SHA1_SB_THRESHOLD_AVX512
157	ja	mb_processing
158
159	; lensN-len2=idx
160	mov     [state + _lens + idx*4], DWORD(idx)
161	mov	r10, idx
162	or	r10, 0x4000	; avx2 has 8 lanes *4, r10b is idx, r10b2 is 32
163	; "state" and "args" are the same address, arg1
164	; len is arg2, idx and nlane in r10
165	call    sha1_opt_x1
166	; state and idx are intact
167	jmp	len_is_0
168
169mb_processing:
170
171	vpand   ymm2, ymm2, [rel clear_low_nibble]
172        vpshufd ymm2, ymm2, 0
173
174        vpsubd  ymm0, ymm0, ymm2
175        vpsubd  ymm1, ymm1, ymm2
176
177        vmovdqu [state + _lens + 0*32], ymm0
178        vmovdqu [state + _lens + 1*32], ymm1
179
180
181	; "state" and "args" are the same address, arg1
182	; len is arg2
183	call	sha1_mb_x16_avx512
184	; state and idx are intact
185
186len_is_0:
187	; process completed job "idx"
188	imul	lane_data, idx, _LANE_DATA_size
189	lea	lane_data, [state + _ldata + lane_data]
190
191	mov	job_rax, [lane_data + _job_in_lane]
192	mov	qword [lane_data + _job_in_lane], 0
193	mov	dword [job_rax + _status], ISAL_STS_COMPLETED
194	mov	unused_lanes, [state + _unused_lanes]
195	shl	unused_lanes, 4
196	or	unused_lanes, idx
197	mov	[state + _unused_lanes], unused_lanes
198
199        mov     DWORD(num_lanes_inuse), [state + _num_lanes_inuse]
200        sub     num_lanes_inuse, 1
201        mov     [state + _num_lanes_inuse], DWORD(num_lanes_inuse)
202
203	vmovd	xmm0, [state + _args_digest + 4*idx + 0*64]
204	vpinsrd	xmm0, [state + _args_digest + 4*idx + 1*64], 1
205	vpinsrd	xmm0, [state + _args_digest + 4*idx + 2*64], 2
206	vpinsrd	xmm0, [state + _args_digest + 4*idx + 3*64], 3
207	mov	DWORD(tmp2),  [state + _args_digest + 4*idx + 4*64]
208
209	vmovdqa	[job_rax + _result_digest + 0*16], xmm0
210	mov	[job_rax + _result_digest + 1*16], DWORD(tmp2)
211
212return:
213
214%ifidn __OUTPUT_FORMAT__, win64
215	vmovdqa  xmm6, [rsp + _XMM_SAVE + 16*0]
216	vmovdqa  xmm7, [rsp + _XMM_SAVE + 16*1]
217	vmovdqa  xmm8, [rsp + _XMM_SAVE + 16*2]
218	vmovdqa  xmm9, [rsp + _XMM_SAVE + 16*3]
219	vmovdqa  xmm10, [rsp + _XMM_SAVE + 16*4]
220	vmovdqa  xmm11, [rsp + _XMM_SAVE + 16*5]
221	vmovdqa  xmm12, [rsp + _XMM_SAVE + 16*6]
222	vmovdqa  xmm13, [rsp + _XMM_SAVE + 16*7]
223	vmovdqa  xmm14, [rsp + _XMM_SAVE + 16*8]
224	vmovdqa  xmm15, [rsp + _XMM_SAVE + 16*9]
225	mov     rsi, [rsp + _GPR_SAVE + 8*1]
226	mov     rdi, [rsp + _GPR_SAVE + 8*2]
227%endif
228	mov     rbx, [rsp + _GPR_SAVE + 8*0]
229	mov     rbp, [rsp + _GPR_SAVE + 8*3]
230	mov     r12, [rsp + _GPR_SAVE + 8*4]
231	mov     r13, [rsp + _GPR_SAVE + 8*5]
232	mov     r14, [rsp + _GPR_SAVE + 8*6]
233	mov     r15, [rsp + _GPR_SAVE + 8*7]
234	add     rsp, STACK_SPACE
235
236	ret
237
238return_null:
239	xor	job_rax, job_rax
240	jmp	return
241
242section .data align=16
243
244align 16
245clear_low_nibble:
246	dq 0x00000000FFFFFFF0, 0x0000000000000000
247	dq 0x00000000FFFFFFF0, 0x0000000000000000
248lane_1:     dq  1
249lane_2:     dq  2
250lane_3:     dq  3
251lane_4:     dq  4
252lane_5:     dq  5
253lane_6:     dq  6
254lane_7:     dq  7
255lane_8:     dq  8
256lane_9:     dq  9
257lane_10:    dq  10
258lane_11:    dq  11
259lane_12:    dq  12
260lane_13:    dq  13
261lane_14:    dq  14
262lane_15:    dq  15
263