xref: /isa-l_crypto/sm3_mb/sm3_mb_mgr_submit_avx512.asm (revision d28f1034f736e3eb791c3cf6bff3e2fa81fb5331)
1;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2;  Copyright(c) 2011-2020 Intel Corporation All rights reserved.
3;
4;  Redistribution and use in source and binary forms, with or without
5;  modification, are permitted provided that the following conditions
6;  are met:
7;    * Redistributions of source code must retain the above copyright
8;      notice, this list of conditions and the following disclaimer.
9;    * Redistributions in binary form must reproduce the above copyright
10;      notice, this list of conditions and the following disclaimer in
11;      the documentation and/or other materials provided with the
12;      distribution.
13;    * Neither the name of Intel Corporation nor the names of its
14;      contributors may be used to endorse or promote products derived
15;      from this software without specific prior written permission.
16;
17;  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18;  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19;  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20;  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21;  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22;  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23;  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24;  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25;  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26;  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27;  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
29
30%include "sm3_job.asm"
31%include "memcpy.asm"
32%include "sm3_mb_mgr_datastruct.asm"
33%include "reg_sizes.asm"
34
35;
36; ISAL_SM3_JOB* _sm3_mb_mgr_submit_avx512 (ISAL_SM3_MB_JOB_MGR *state, ISAL_SM3_JOB* job);
37;
38
39extern sm3_mb_x16_avx512
40
41[bits 64]
42default rel
43section .text
44
45%ifidn __OUTPUT_FORMAT__, elf64
46%define arg1    rdi ; state
47%define arg2    rsi ; job
48
49%define size_offset     rcx ; rdi
50%define tmp2            rcx ; rdi
51
52%else
53; WINDOWS register definitions
54%define arg1    rcx ; state
55%define arg2    rdx ; job
56
57%define size_offset     rdi
58%define tmp2            rdi
59
60%endif
61
62; Common definitions
63%define state   arg1
64%define job     arg2 ;
65%define len2    arg2 ; + offset
66%define p2      arg2 ; need + offset
67
68%define idx             r8
69%define last_len        r8
70%define p               r11
71%define start_offset    r11
72%define num_lanes_inuse r11
73
74%define unused_lanes    rbx
75
76%define job_rax         rax
77%define len             rax
78
79%define lane            rbp
80%define tmp3            rbp
81
82%define tmp             r9
83
84%define lane_data       r10
85
86; todo make sure
87; STACK_SPACE needs to be an odd multiple of 8
88%define STACK_SPACE	8*8 + 16*10 + 8
89
90mk_global _sm3_mb_mgr_submit_avx512, function, internal
91_sm3_mb_mgr_submit_avx512:
92	endbranch
93
94	; save these registers
95	sub     rsp, STACK_SPACE
96	; rsp contain stack ptr , mov to stack bottom
97	mov     [rsp + 8*0], rbx
98	mov     [rsp + 8*3], rbp ; unuse 1 2
99	mov     [rsp + 8*4], r12
100	mov     [rsp + 8*5], r13
101	mov     [rsp + 8*6], r14
102	mov     [rsp + 8*7], r15
103	;mov rbx,rbp,r12,r13,r14,r15 to stack
104%ifidn __OUTPUT_FORMAT__, win64
105	mov     [rsp + 8*1], rsi
106	mov     [rsp + 8*2], rdi
107	vmovdqa  [rsp + 8*8 + 16*0], xmm6
108	vmovdqa  [rsp + 8*8 + 16*1], xmm7
109	vmovdqa  [rsp + 8*8 + 16*2], xmm8
110	vmovdqa  [rsp + 8*8 + 16*3], xmm9
111	vmovdqa  [rsp + 8*8 + 16*4], xmm10
112	vmovdqa  [rsp + 8*8 + 16*5], xmm11
113	vmovdqa  [rsp + 8*8 + 16*6], xmm12
114	vmovdqa  [rsp + 8*8 + 16*7], xmm13
115	vmovdqa  [rsp + 8*8 + 16*8], xmm14
116	vmovdqa  [rsp + 8*8 + 16*9], xmm15
117%endif
118	mov	unused_lanes, [state + _unused_lanes]
119	mov	lane, unused_lanes
120	; mov args to rbx and then mov rbx to rbp
121	; unused_lanes - rbx , lane - rbp both have already backup
122	and	lane, 0xF
123	; unless lane is 0x789abcdef, and return 0
124
125	shr	unused_lanes, 4
126	imul	lane_data, lane, _LANE_DATA_size
127	mov	dword [job + _status], ISAL_STS_BEING_PROCESSED
128	lea	lane_data, [state + _ldata + lane_data]
129	mov	[state + _unused_lanes], unused_lanes
130	mov	DWORD(len), [job + _len]
131
132	shl	len, 4
133	or	len, lane
134	mov	[state + _lens + 4*lane], DWORD(len)
135
136	mov	[lane_data + _job_in_lane], job
137
138	; Load digest words from result_digest
139	vmovdqu	xmm0, [job + _result_digest + 0*16]
140	vmovdqu xmm1, [job + _result_digest + 1*16]
141	vmovd	[state + _args_digest + 4*lane + 0*4*16], xmm0
142	vpextrd	[state + _args_digest + 4*lane + 1*4*16], xmm0, 1
143	vpextrd	[state + _args_digest + 4*lane + 2*4*16], xmm0, 2
144	vpextrd	[state + _args_digest + 4*lane + 3*4*16], xmm0, 3
145	vmovd	[state + _args_digest + 4*lane + 4*4*16], xmm1
146	vpextrd	[state + _args_digest + 4*lane + 5*4*16], xmm1, 1
147	vpextrd	[state + _args_digest + 4*lane + 6*4*16], xmm1, 2
148	vpextrd	[state + _args_digest + 4*lane + 7*4*16], xmm1, 3
149
150
151	mov	p, [job + _buffer]
152	mov	[state + _args_data_ptr + 8*lane], p
153
154	mov	DWORD(num_lanes_inuse), [state + _num_lanes_inuse]
155        add     num_lanes_inuse, 1
156	mov	[state + _num_lanes_inuse], DWORD(num_lanes_inuse)
157	; eq jump
158        cmp     num_lanes_inuse, 16
159	jne	return_null
160
161start_loop:
162	; Find min length, ymm0 holds ahead 8, ymm1 holds rear 8
163	vmovdqu ymm0, [state + _lens + 0*32]
164	vmovdqu ymm1, [state + _lens + 1*32]
165
166	vpminud ymm2, ymm0, ymm1        ; ymm2 has {H1,G1,F1,E1,D1,C1,B1,A1}
167	vpalignr ymm3, ymm3, ymm2, 8    ; ymm3 has {x,x,H1,G1,x,x,D1,C1}
168	vpminud ymm2, ymm2, ymm3        ; ymm2 has {x,x,H2,G2,x,x,D2,C2}
169	vpalignr ymm3, ymm3, ymm2, 4    ; ymm3 has {x,x, x,H2,x,x, x,D2}
170	vpminud ymm2, ymm2, ymm3        ; ymm2 has {x,x, x,G3,x,x, x,C3}
171	vperm2i128 ymm3, ymm2, ymm2, 1	; ymm3 has {x,x, x, x,x,x, x,C3}
172        vpminud ymm2, ymm2, ymm3        ; ymm2 has min value in low dword
173
174	vmovd   DWORD(idx), xmm2
175	mov	len2, idx
176	and	idx, 0xF
177	shr	len2, 4
178	jz	len_is_0
179
180        vpand   ymm2, ymm2, [rel clear_low_nibble]
181        vpshufd ymm2, ymm2, 0
182
183        vpsubd  ymm0, ymm0, ymm2
184        vpsubd  ymm1, ymm1, ymm2
185
186        vmovdqu [state + _lens + 0*32], ymm0
187        vmovdqu [state + _lens + 1*32], ymm1
188
189
190
191	; "state" and "args" are the same address, arg1
192	; len is arg2
193	call	sm3_mb_x16_avx512
194
195	; state and idx are intact
196
197len_is_0:
198	; process completed job "idx"
199	imul	lane_data, idx, _LANE_DATA_size
200	lea	lane_data, [state + _ldata + lane_data]
201
202	mov	job_rax, [lane_data + _job_in_lane]
203	mov	unused_lanes, [state + _unused_lanes]
204	mov	qword [lane_data + _job_in_lane], 0
205	mov	dword [job_rax + _status], ISAL_STS_COMPLETED
206	shl	unused_lanes, 4
207	or	unused_lanes, idx
208	mov	[state + _unused_lanes], unused_lanes
209
210        mov     DWORD(num_lanes_inuse), [state + _num_lanes_inuse]
211        sub     num_lanes_inuse, 1
212        mov     [state + _num_lanes_inuse], DWORD(num_lanes_inuse)
213	vmovd	xmm0, [state + _args_digest + 4*idx + 0*4*16]
214	vpinsrd	xmm0, [state + _args_digest + 4*idx + 1*4*16], 1
215	vpinsrd	xmm0, [state + _args_digest + 4*idx + 2*4*16], 2
216	vpinsrd	xmm0, [state + _args_digest + 4*idx + 3*4*16], 3
217	vmovd	xmm1, [state + _args_digest + 4*idx + 4*4*16]
218	vpinsrd	xmm1, [state + _args_digest + 4*idx + 5*4*16], 1
219	vpinsrd	xmm1, [state + _args_digest + 4*idx + 6*4*16], 2
220	vpinsrd	xmm1, [state + _args_digest + 4*idx + 7*4*16], 3
221
222	vmovdqa	[job_rax + _result_digest + 0*16], xmm0
223	vmovdqa	[job_rax + _result_digest + 1*16], xmm1
224
225; restore stack
226return:
227
228%ifidn __OUTPUT_FORMAT__, win64
229	vmovdqa  xmm6, [rsp + 8*8 + 16*0]
230	vmovdqa  xmm7, [rsp + 8*8 + 16*1]
231	vmovdqa  xmm8, [rsp + 8*8 + 16*2]
232	vmovdqa  xmm9, [rsp + 8*8 + 16*3]
233	vmovdqa  xmm10, [rsp + 8*8 + 16*4]
234	vmovdqa  xmm11, [rsp + 8*8 + 16*5]
235	vmovdqa  xmm12, [rsp + 8*8 + 16*6]
236	vmovdqa  xmm13, [rsp + 8*8 + 16*7]
237	vmovdqa  xmm14, [rsp + 8*8 + 16*8]
238	vmovdqa  xmm15, [rsp + 8*8 + 16*9]
239	mov     rsi, [rsp + 8*1]
240	mov     rdi, [rsp + 8*2]
241%endif
242	mov     rbx, [rsp + 8*0]
243	mov     rbp, [rsp + 8*3]
244	mov     r12, [rsp + 8*4]
245	mov     r13, [rsp + 8*5]
246	mov     r14, [rsp + 8*6]
247	mov     r15, [rsp + 8*7]
248	add     rsp, STACK_SPACE
249
250	ret
251
252return_null:
253	xor     job_rax, job_rax
254	jmp     return
255
256section .data align=32
257
258align 32
259clear_low_nibble:
260	dq 0x00000000FFFFFFF0, 0x0000000000000000
261	dq 0x00000000FFFFFFF0, 0x0000000000000000
262