xref: /isa-l_crypto/sha1_mb/sha1_mb_mgr_submit_avx2.asm (revision 8cb7fe780eac8ee5f1e0aa3ca37466e89c673ccd)
1;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2;  Copyright(c) 2011-2016 Intel Corporation All rights reserved.
3;
4;  Redistribution and use in source and binary forms, with or without
5;  modification, are permitted provided that the following conditions
6;  are met:
7;    * Redistributions of source code must retain the above copyright
8;      notice, this list of conditions and the following disclaimer.
9;    * Redistributions in binary form must reproduce the above copyright
10;      notice, this list of conditions and the following disclaimer in
11;      the documentation and/or other materials provided with the
12;      distribution.
13;    * Neither the name of Intel Corporation nor the names of its
14;      contributors may be used to endorse or promote products derived
15;      from this software without specific prior written permission.
16;
17;  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18;  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19;  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20;  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21;  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22;  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23;  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24;  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25;  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26;  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27;  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
29
30%include "sha1_job.asm"
31%include "memcpy.asm"
32%include "sha1_mb_mgr_datastruct.asm"
33
34%include "reg_sizes.asm"
35
36extern sha1_mb_x8_avx2
37
38[bits 64]
39default rel
40section .text
41
42%ifidn __OUTPUT_FORMAT__, elf64
43;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
44; LINUX register definitions
45%define arg1    rdi ; rcx
46%define arg2    rsi ; rdx
47
48%define size_offset     rcx ; rdi
49%define tmp2            rcx ; rdi
50
51%define extra_blocks    rdx
52;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
53
54%else
55
56;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
57; WINDOWS register definitions
58%define arg1    rcx
59%define arg2    rdx
60
61%define size_offset     rdi
62%define tmp2            rdi
63
64%define extra_blocks    rsi
65;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
66%endif
67
68; Common definitions
69%define state   arg1
70%define job     arg2
71%define len2    arg2
72%define p2      arg2
73
74; idx must be a register not clobberred by sha1_x8_avx2
75%define idx             r8
76%define last_len        r8
77
78%define p               r11
79%define start_offset    r11
80
81%define unused_lanes    rbx
82
83%define job_rax         rax
84%define len             rax
85
86%define lane            rbp
87%define tmp3            rbp
88
89%define tmp             r9
90
91%define lane_data       r10
92
93; STACK_SPACE needs to be an odd multiple of 8
94%define STACK_SPACE	8*8 + 16*10 + 8
95
96; JOB* _sha1_mb_mgr_submit_avx2(MB_MGR *state, JOB_SHA1 *job)
97; arg 1 : rcx : state
98; arg 2 : rdx : job
99mk_global _sha1_mb_mgr_submit_avx2, function, internal
100_sha1_mb_mgr_submit_avx2:
101	endbranch
102
103	sub     rsp, STACK_SPACE
104	mov     [rsp + 8*0], rbx
105	mov     [rsp + 8*3], rbp
106	mov     [rsp + 8*4], r12
107	mov     [rsp + 8*5], r13
108	mov     [rsp + 8*6], r14
109	mov     [rsp + 8*7], r15
110%ifidn __OUTPUT_FORMAT__, win64
111	mov     [rsp + 8*1], rsi
112	mov     [rsp + 8*2], rdi
113	vmovdqa  [rsp + 8*8 + 16*0], xmm6
114	vmovdqa  [rsp + 8*8 + 16*1], xmm7
115	vmovdqa  [rsp + 8*8 + 16*2], xmm8
116	vmovdqa  [rsp + 8*8 + 16*3], xmm9
117	vmovdqa  [rsp + 8*8 + 16*4], xmm10
118	vmovdqa  [rsp + 8*8 + 16*5], xmm11
119	vmovdqa  [rsp + 8*8 + 16*6], xmm12
120	vmovdqa  [rsp + 8*8 + 16*7], xmm13
121	vmovdqa  [rsp + 8*8 + 16*8], xmm14
122	vmovdqa  [rsp + 8*8 + 16*9], xmm15
123%endif
124
125	mov	unused_lanes, [state + _unused_lanes]
126	mov	lane, unused_lanes
127	and	lane, 0xF
128	shr	unused_lanes, 4
129	imul	lane_data, lane, _LANE_DATA_size
130	mov	dword [job + _status], ISAL_STS_BEING_PROCESSED
131	lea	lane_data, [state + _ldata + lane_data]
132	mov	[state + _unused_lanes], unused_lanes
133	mov	DWORD(len), [job + _len]
134
135	mov	[lane_data + _job_in_lane], job
136
137	shl	len,4
138	or	len, lane
139	mov	[state + _lens + 4*lane], DWORD(len)
140	; Load digest words from result_digest
141	vmovdqu	xmm0, [job + _result_digest + 0*16]
142	mov	DWORD(tmp), [job + _result_digest + 1*16]
143
144	vmovd   [state + _args_digest + 4*lane + 0*32], xmm0
145	vpextrd [state + _args_digest + 4*lane + 1*32], xmm0, 1
146	vpextrd [state + _args_digest + 4*lane + 2*32], xmm0, 2
147	vpextrd [state + _args_digest + 4*lane + 3*32], xmm0, 3
148	mov     [state + _args_digest + 4*lane + 4*32], DWORD(tmp)
149
150	mov	p, [job + _buffer]
151	mov	[state + _args_data_ptr + 8*lane], p
152
153	add	dword [state + _num_lanes_inuse], 1
154	cmp	unused_lanes, 0xf
155	jne	return_null
156
157start_loop:
158	; Find min length
159	vmovdqa xmm0, [state + _lens + 0*16]
160	vmovdqa xmm1, [state + _lens + 1*16]
161
162	vpminud xmm2, xmm0, xmm1        ; xmm2 has {D,C,B,A}
163	vpalignr xmm3, xmm3, xmm2, 8    ; xmm3 has {x,x,D,C}
164	vpminud xmm2, xmm2, xmm3        ; xmm2 has {x,x,E,F}
165	vpalignr xmm3, xmm3, xmm2, 4    ; xmm3 has {x,x,x,E}
166	vpminud xmm2, xmm2, xmm3        ; xmm2 has min value in low dword
167
168	vmovd   DWORD(idx), xmm2
169	mov	len2, idx
170	and	idx, 0xF
171	shr	len2, 4
172	jz	len_is_0
173
174	vpand   xmm2, xmm2, [rel clear_low_nibble]
175	vpshufd xmm2, xmm2, 0
176
177	vpsubd  xmm0, xmm0, xmm2
178	vpsubd  xmm1, xmm1, xmm2
179
180	vmovdqa [state + _lens + 0*16], xmm0
181	vmovdqa [state + _lens + 1*16], xmm1
182
183
184	; "state" and "args" are the same address, arg1
185	; len is arg2
186	call	sha1_mb_x8_avx2
187
188	; state and idx are intact
189
190len_is_0:
191	; process completed job "idx"
192	imul	lane_data, idx, _LANE_DATA_size
193	lea	lane_data, [state + _ldata + lane_data]
194
195	mov	job_rax, [lane_data + _job_in_lane]
196	mov	unused_lanes, [state + _unused_lanes]
197	mov	qword [lane_data + _job_in_lane], 0
198	mov	dword [job_rax + _status], ISAL_STS_COMPLETED
199	shl	unused_lanes, 4
200	or	unused_lanes, idx
201	mov	[state + _unused_lanes], unused_lanes
202
203	sub     dword [state + _num_lanes_inuse], 1
204
205	vmovd	xmm0, [state + _args_digest + 4*idx + 0*32]
206	vpinsrd	xmm0, [state + _args_digest + 4*idx + 1*32], 1
207	vpinsrd	xmm0, [state + _args_digest + 4*idx + 2*32], 2
208	vpinsrd	xmm0, [state + _args_digest + 4*idx + 3*32], 3
209	mov	DWORD(tmp),  [state + _args_digest + 4*idx + 4*32]
210
211	vmovdqa	[job_rax + _result_digest + 0*16], xmm0
212	mov	[job_rax + _result_digest + 1*16], DWORD(tmp)
213
214return:
215
216%ifidn __OUTPUT_FORMAT__, win64
217	vmovdqa  xmm6, [rsp + 8*8 + 16*0]
218	vmovdqa  xmm7, [rsp + 8*8 + 16*1]
219	vmovdqa  xmm8, [rsp + 8*8 + 16*2]
220	vmovdqa  xmm9, [rsp + 8*8 + 16*3]
221	vmovdqa  xmm10, [rsp + 8*8 + 16*4]
222	vmovdqa  xmm11, [rsp + 8*8 + 16*5]
223	vmovdqa  xmm12, [rsp + 8*8 + 16*6]
224	vmovdqa  xmm13, [rsp + 8*8 + 16*7]
225	vmovdqa  xmm14, [rsp + 8*8 + 16*8]
226	vmovdqa  xmm15, [rsp + 8*8 + 16*9]
227	mov     rsi, [rsp + 8*1]
228	mov     rdi, [rsp + 8*2]
229%endif
230	mov     rbx, [rsp + 8*0]
231	mov     rbp, [rsp + 8*3]
232	mov     r12, [rsp + 8*4]
233	mov     r13, [rsp + 8*5]
234	mov     r14, [rsp + 8*6]
235	mov     r15, [rsp + 8*7]
236	add     rsp, STACK_SPACE
237
238	ret
239
240return_null:
241	xor	job_rax, job_rax
242	jmp	return
243
244
245section .data align=16
246
247align 16
248clear_low_nibble:
249	dq 0x00000000FFFFFFF0, 0x0000000000000000
250
251