xref: /isa-l_crypto/sm3_mb/sm3_mb_mgr_flush_avx512.asm (revision d28f1034f736e3eb791c3cf6bff3e2fa81fb5331)
1;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2;  Copyright(c) 2011-2020 Intel Corporation All rights reserved.
3;
4;  Redistribution and use in source and binary forms, with or without
5;  modification, are permitted provided that the following conditions
6;  are met:
7;    * Redistributions of source code must retain the above copyright
8;      notice, this list of conditions and the following disclaimer.
9;    * Redistributions in binary form must reproduce the above copyright
10;      notice, this list of conditions and the following disclaimer in
11;      the documentation and/or other materials provided with the
12;      distribution.
13;    * Neither the name of Intel Corporation nor the names of its
14;      contributors may be used to endorse or promote products derived
15;      from this software without specific prior written permission.
16;
17;  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18;  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19;  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20;  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21;  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22;  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23;  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24;  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25;  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26;  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27;  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
29
30%include "sm3_job.asm"
31%include "sm3_mb_mgr_datastruct.asm"
32%include "reg_sizes.asm"
33
34extern sm3_mb_x16_avx512
35;extern sm3_opt_x1
36
37[bits 64]
38default rel
39section .text
40
41%ifidn __OUTPUT_FORMAT__, elf64
42	%define arg1    rdi ; rcx
43	%define arg2    rsi ; rdx
44	%define tmp4    rdx
45%else
46	%define arg1    rcx
47	%define arg2    rdx
48	%define tmp4    rsi
49%endif
50
51
52; Common register definitions
53
54%define state   arg1
55%define job     arg2
56%define len2    arg2
57
58%define idx             rbp
59
60%define num_lanes_inuse r9
61%define unused_lanes    rbx
62%define lane_data       rbx
63%define tmp2            rbx
64
65%define job_rax         rax
66%define tmp1            rax
67%define size_offset     rax
68%define tmp             rax
69%define start_offset    rax
70
71%define tmp3            arg1
72
73%define extra_blocks    arg2
74%define p               arg2
75
76
77
78; STACK_SPACE needs to be an odd multiple of 8
79_XMM_SAVE_SIZE  equ 10*16
80_GPR_SAVE_SIZE  equ 8*8
81_ALIGN_SIZE     equ 8
82
83_XMM_SAVE       equ 0
84_GPR_SAVE       equ _XMM_SAVE + _XMM_SAVE_SIZE
85STACK_SPACE     equ _GPR_SAVE + _GPR_SAVE_SIZE + _ALIGN_SIZE
86
87%define APPEND(a,b) a %+ b
88
89
90; ISAL_SM3_JOB* _sm3_mb_mgr_flush_avx512(ISAL_SM3_MB_JOB_MGR *state)
91; arg 1 : rcx : state
92mk_global _sm3_mb_mgr_flush_avx512, function, internal
93_sm3_mb_mgr_flush_avx512:
94	endbranch
95
96	; Save the stack
97	sub     rsp, STACK_SPACE
98	mov     [rsp + _GPR_SAVE + 8*0], rbx
99	mov     [rsp + _GPR_SAVE + 8*3], rbp
100	mov     [rsp + _GPR_SAVE + 8*4], r12
101	mov     [rsp + _GPR_SAVE + 8*5], r13
102	mov     [rsp + _GPR_SAVE + 8*6], r14
103	mov     [rsp + _GPR_SAVE + 8*7], r15
104%ifidn __OUTPUT_FORMAT__, win64
105	mov     [rsp + _GPR_SAVE + 8*1], rsi
106	mov     [rsp + _GPR_SAVE + 8*2], rdi
107	vmovdqa  [rsp + _XMM_SAVE + 16*0], xmm6
108	vmovdqa  [rsp + _XMM_SAVE + 16*1], xmm7
109	vmovdqa  [rsp + _XMM_SAVE + 16*2], xmm8
110	vmovdqa  [rsp + _XMM_SAVE + 16*3], xmm9
111	vmovdqa  [rsp + _XMM_SAVE + 16*4], xmm10
112	vmovdqa  [rsp + _XMM_SAVE + 16*5], xmm11
113	vmovdqa  [rsp + _XMM_SAVE + 16*6], xmm12
114	vmovdqa  [rsp + _XMM_SAVE + 16*7], xmm13
115	vmovdqa  [rsp + _XMM_SAVE + 16*8], xmm14
116	vmovdqa  [rsp + _XMM_SAVE + 16*9], xmm15
117%endif
118
119	mov	DWORD(num_lanes_inuse), [state + _num_lanes_inuse]
120	cmp	num_lanes_inuse, 0
121	jz	return_null
122
123	; find a lane with a non-null job
124	xor	idx, idx
125%assign I 1
126%rep 15
127	cmp	qword [state + _ldata + I * _LANE_DATA_size + _job_in_lane], 0
128	cmovne	idx, [APPEND(lane_,I)]
129%assign I (I+1)
130%endrep
131
132
133	; copy idx to empty lanes
134copy_lane_data:
135	mov	tmp, [state + _args + _data_ptr + 8*idx]
136
137%assign I 0
138%rep 16
139	cmp	qword [state + _ldata + I * _LANE_DATA_size + _job_in_lane], 0
140	jne	APPEND(skip_,I)
141	mov	[state + _args + _data_ptr + 8*I], tmp
142	mov	dword [state + _lens + 4*I], 0xFFFFFFFF
143APPEND(skip_,I):
144%assign I (I+1)
145%endrep
146
147	; Find min length
148	vmovdqu ymm0, [state + _lens + 0*32]
149	vmovdqu ymm1, [state + _lens + 1*32]
150
151	vpminud ymm2, ymm0, ymm1        ; ymm2 has {H1,G1,F1,E1,D1,C1,B1,A1}
152	vpalignr ymm3, ymm3, ymm2, 8    ; ymm3 has {x,x,H1,G1,x,x,D1,C1}
153	vpminud ymm2, ymm2, ymm3        ; ymm2 has {x,x,H2,G2,x,x,D2,C2}
154	vpalignr ymm3, ymm3, ymm2, 4    ; ymm3 has {x,x, x,H2,x,x, x,D2}
155	vpminud ymm2, ymm2, ymm3        ; ymm2 has {x,x, x,G3,x,x, x,C3}
156	vperm2i128 ymm3, ymm2, ymm2, 1	; ymm3 has {x,x, x, x,x,x, x,C3}
157        vpminud ymm2, ymm2, ymm3        ; ymm2 has min value in low dword
158
159	vmovd   DWORD(idx), xmm2
160	mov	len2, idx
161	and	idx, 0xF
162	shr	len2, 4
163	jz	len_is_0
164
165	; flush may check here and call x1
166
167mb_processing:
168
169	vpand ymm2, ymm2, [rel clear_low_nibble]
170	vpshufd ymm2, ymm2, 0
171	vpsubd ymm0, ymm0, ymm2
172	vpsubd ymm1, ymm1, ymm2
173
174	vmovdqu [state + _lens + 0*32], ymm0
175	vmovdqu [state + _lens + 1*32], ymm1
176
177	; "state" and "args" are the same address, arg1
178	; len is arg2
179	call	sm3_mb_x16_avx512
180	; state and idx are intact
181
182
183len_is_0:
184	; process completed job "idx"
185	imul	lane_data, idx, _LANE_DATA_size
186	lea	lane_data, [state + _ldata + lane_data]
187
188	mov	job_rax, [lane_data + _job_in_lane]
189	mov	qword [lane_data + _job_in_lane], 0
190	mov	dword [job_rax + _status], ISAL_STS_COMPLETED
191	mov	unused_lanes, [state + _unused_lanes]
192	shl	unused_lanes, 4
193	or	unused_lanes, idx
194	mov	[state + _unused_lanes], unused_lanes
195
196	mov     DWORD(num_lanes_inuse), [state + _num_lanes_inuse]
197	sub     num_lanes_inuse, 1
198	mov     [state + _num_lanes_inuse], DWORD(num_lanes_inuse)
199
200	vmovd	xmm0, [state + _args_digest + 4*idx + 0*4*16]
201	vpinsrd	xmm0, [state + _args_digest + 4*idx + 1*4*16], 1
202	vpinsrd	xmm0, [state + _args_digest + 4*idx + 2*4*16], 2
203	vpinsrd	xmm0, [state + _args_digest + 4*idx + 3*4*16], 3
204	vmovd	xmm1, [state + _args_digest + 4*idx + 4*4*16]
205	vpinsrd	xmm1, [state + _args_digest + 4*idx + 5*4*16], 1
206	vpinsrd	xmm1, [state + _args_digest + 4*idx + 6*4*16], 2
207	vpinsrd	xmm1, [state + _args_digest + 4*idx + 7*4*16], 3
208
209	vmovdqa	[job_rax + _result_digest + 0*16], xmm0
210	vmovdqa	[job_rax + _result_digest + 1*16], xmm1
211
212
213; return back stack
214return:
215%ifidn __OUTPUT_FORMAT__, win64
216	vmovdqa  xmm6, [rsp + _XMM_SAVE + 16*0]
217	vmovdqa  xmm7, [rsp + _XMM_SAVE + 16*1]
218	vmovdqa  xmm8, [rsp + _XMM_SAVE + 16*2]
219	vmovdqa  xmm9, [rsp + _XMM_SAVE + 16*3]
220	vmovdqa  xmm10, [rsp + _XMM_SAVE + 16*4]
221	vmovdqa  xmm11, [rsp + _XMM_SAVE + 16*5]
222	vmovdqa  xmm12, [rsp + _XMM_SAVE + 16*6]
223	vmovdqa  xmm13, [rsp + _XMM_SAVE + 16*7]
224	vmovdqa  xmm14, [rsp + _XMM_SAVE + 16*8]
225	vmovdqa  xmm15, [rsp + _XMM_SAVE + 16*9]
226	mov     rsi, [rsp + _GPR_SAVE + 8*1]
227	mov     rdi, [rsp + _GPR_SAVE + 8*2]
228%endif
229	mov     rbx, [rsp + _GPR_SAVE + 8*0]
230	mov     rbp, [rsp + _GPR_SAVE + 8*3]
231	mov     r12, [rsp + _GPR_SAVE + 8*4]
232	mov     r13, [rsp + _GPR_SAVE + 8*5]
233	mov     r14, [rsp + _GPR_SAVE + 8*6]
234	mov     r15, [rsp + _GPR_SAVE + 8*7]
235	add     rsp, STACK_SPACE
236
237	ret
238
239
240return_null:
241	xor	job_rax, job_rax
242	jmp	return
243
244section .data align=16
245
246align 16
247clear_low_nibble:
248	dq 0x00000000FFFFFFF0, 0x0000000000000000
249	dq 0x00000000FFFFFFF0, 0x0000000000000000
250lane_1:     dq  1
251lane_2:     dq  2
252lane_3:     dq  3
253lane_4:     dq  4
254lane_5:     dq  5
255lane_6:     dq  6
256lane_7:     dq  7
257lane_8:     dq  8
258lane_9:     dq  9
259lane_10:    dq  10
260lane_11:    dq  11
261lane_12:    dq  12
262lane_13:    dq  13
263lane_14:    dq  14
264lane_15:    dq  15
265