xref: /isa-l_crypto/sm3_mb/sm3_mb_mgr_flush_avx2.asm (revision 6801b27bd9295090e36d2864110576f428efdf61)
1;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
2;  Copyright(c) 2011-2020 Intel Corporation All rights reserved.
3;
4;  Redistribution and use in source and binary forms, with or without
5;  modification, are permitted provided that the following conditions
6;  are met:
7;    * Redistributions of source code must retain the above copyright
8;      notice, this list of conditions and the following disclaimer.
9;    * Redistributions in binary form must reproduce the above copyright
10;      notice, this list of conditions and the following disclaimer in
11;      the documentation and/or other materials provided with the
12;      distribution.
13;    * Neither the name of Intel Corporation nor the names of its
14;      contributors may be used to endorse or promote products derived
15;      from this software without specific prior written permission.
16;
17;  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18;  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19;  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20;  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21;  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22;  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23;  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24;  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25;  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26;  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27;  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
29
30%include "sm3_job.asm"
31%include "sm3_mb_mgr_datastruct.asm"
32
33%include "reg_sizes.asm"
34
35extern sm3_mb_x8_avx2
36
37[bits 64]
38default rel
39section .text
40
41%ifidn __OUTPUT_FORMAT__, elf64
42;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
43; LINUX register definitions
44%define arg1    rdi ; rcx
45%define arg2    rsi ; rdx
46
47%define tmp4    rdx
48;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
49
50%else
51
52;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
53; WINDOWS register definitions
54%define arg1    rcx
55%define arg2    rdx
56
57%define tmp4    rsi
58;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
59%endif
60
61; Common register definitions
62
63%define state   arg1
64%define job     arg2
65%define len2    arg2
66
67; idx must be a register not clobberred by sm3_mb_x8_avx2
68%define idx             rbp
69
70%define unused_lanes    rbx
71%define lane_data       rbx
72%define tmp2            rbx
73
74%define job_rax         rax
75%define tmp1            rax
76%define size_offset     rax
77%define tmp             rax
78%define start_offset    rax
79
80%define tmp3            arg1
81
82%define extra_blocks    arg2
83%define p               arg2
84
85
86; STACK_SPACE needs to be an odd multiple of 8
87_XMM_SAVE_SIZE  equ 10*16
88_GPR_SAVE_SIZE  equ 8*8
89_ALIGN_SIZE     equ 8
90
91_XMM_SAVE       equ 0
92_GPR_SAVE       equ _XMM_SAVE + _XMM_SAVE_SIZE
93STACK_SPACE     equ _GPR_SAVE + _GPR_SAVE_SIZE + _ALIGN_SIZE
94
95%define APPEND(a,b) a %+ b
96
97; ISAL_SM3_JOB* _sm3_mb_mgr_flush_avx2(ISAL_SM3_MB_JOB_MGR *state)
98; arg 1 : rcx : state
99mk_global _sm3_mb_mgr_flush_avx2, function, internal
100_sm3_mb_mgr_flush_avx2:
101	endbranch
102	sub     rsp, STACK_SPACE
103	mov     [rsp + _GPR_SAVE + 8*0], rbx
104	mov     [rsp + _GPR_SAVE + 8*3], rbp
105	mov     [rsp + _GPR_SAVE + 8*4], r12
106	mov     [rsp + _GPR_SAVE + 8*5], r13
107	mov     [rsp + _GPR_SAVE + 8*6], r14
108	mov     [rsp + _GPR_SAVE + 8*7], r15
109%ifidn __OUTPUT_FORMAT__, win64
110	mov     [rsp + _GPR_SAVE + 8*1], rsi
111	mov     [rsp + _GPR_SAVE + 8*2], rdi
112	vmovdqa  [rsp + _XMM_SAVE + 16*0], xmm6
113	vmovdqa  [rsp + _XMM_SAVE + 16*1], xmm7
114	vmovdqa  [rsp + _XMM_SAVE + 16*2], xmm8
115	vmovdqa  [rsp + _XMM_SAVE + 16*3], xmm9
116	vmovdqa  [rsp + _XMM_SAVE + 16*4], xmm10
117	vmovdqa  [rsp + _XMM_SAVE + 16*5], xmm11
118	vmovdqa  [rsp + _XMM_SAVE + 16*6], xmm12
119	vmovdqa  [rsp + _XMM_SAVE + 16*7], xmm13
120	vmovdqa  [rsp + _XMM_SAVE + 16*8], xmm14
121	vmovdqa  [rsp + _XMM_SAVE + 16*9], xmm15
122%endif
123
124	; use num_lanes_inuse to judge all lanes are empty
125	cmp	dword [state + _num_lanes_inuse], 0
126	jz	return_null
127
128	; find a lane with a non-null job
129	xor	idx, idx
130	cmp	qword [state + _ldata + 1 * _LANE_DATA_size + _job_in_lane], 0
131	cmovne	idx, [one]
132	cmp	qword [state + _ldata + 2 * _LANE_DATA_size + _job_in_lane], 0
133	cmovne	idx, [two]
134	cmp	qword [state + _ldata + 3 * _LANE_DATA_size + _job_in_lane], 0
135	cmovne	idx, [three]
136	cmp	qword [state + _ldata + 4 * _LANE_DATA_size + _job_in_lane], 0
137	cmovne	idx, [four]
138	cmp	qword [state + _ldata + 5 * _LANE_DATA_size + _job_in_lane], 0
139	cmovne	idx, [five]
140	cmp	qword [state + _ldata + 6 * _LANE_DATA_size + _job_in_lane], 0
141	cmovne	idx, [six]
142	cmp	qword [state + _ldata + 7 * _LANE_DATA_size + _job_in_lane], 0
143	cmovne	idx, [seven]
144
145	; copy idx to empty lanes
146copy_lane_data:
147	mov	tmp, [state + _args + _data_ptr + 8*idx]
148
149%assign I 0
150%rep 8
151	cmp	qword [state + _ldata + I * _LANE_DATA_size + _job_in_lane], 0
152	jne	APPEND(skip_,I)
153	mov	[state + _args + _data_ptr + 8*I], tmp
154	mov	dword [state + _lens + 4*I], 0xFFFFFFFF
155APPEND(skip_,I):
156%assign I (I+1)
157%endrep
158
159	; Find min length
160	vmovdqa xmm0, [state + _lens + 0*16]
161	vmovdqa xmm1, [state + _lens + 1*16]
162
163	vpminud xmm2, xmm0, xmm1        ; xmm2 has {D,C,B,A}
164	vpalignr xmm3, xmm3, xmm2, 8    ; xmm3 has {x,x,D,C}
165	vpminud xmm2, xmm2, xmm3        ; xmm2 has {x,x,E,F}
166	vpalignr xmm3, xmm3, xmm2, 4    ; xmm3 has {x,x,x,E}
167	vpminud xmm2, xmm2, xmm3        ; xmm2 has min value in low dword
168
169	vmovd   DWORD(idx), xmm2
170	mov	len2, idx
171	and	idx, 0xF
172	shr	len2, 4
173	jz	len_is_0
174
175mb_processing:
176
177	vpand   xmm2, xmm2, [rel clear_low_nibble]
178	vpshufd xmm2, xmm2, 0
179
180	vpsubd  xmm0, xmm0, xmm2
181	vpsubd  xmm1, xmm1, xmm2
182
183	vmovdqa [state + _lens + 0*16], xmm0
184	vmovdqa [state + _lens + 1*16], xmm1
185
186	; "state" and "args" are the same address, arg1
187	; len is arg2
188	call	sm3_mb_x8_avx2
189	; state and idx are intact
190
191len_is_0:
192	; process completed job "idx"
193	imul	lane_data, idx, _LANE_DATA_size
194	lea	lane_data, [state + _ldata + lane_data]
195
196	mov	job_rax, [lane_data + _job_in_lane]
197	mov	qword [lane_data + _job_in_lane], 0
198	mov	dword [job_rax + _status], ISAL_STS_COMPLETED
199	mov	unused_lanes, [state + _unused_lanes]
200	shl	unused_lanes, 4
201	or	unused_lanes, idx
202	mov	[state + _unused_lanes], unused_lanes
203
204	sub     dword [state + _num_lanes_inuse], 1
205
206	vmovd	xmm0, [state + _args_digest + 4*idx + 0*4*8]
207	vpinsrd	xmm0, [state + _args_digest + 4*idx + 1*4*8], 1
208	vpinsrd	xmm0, [state + _args_digest + 4*idx + 2*4*8], 2
209	vpinsrd	xmm0, [state + _args_digest + 4*idx + 3*4*8], 3
210	vmovd	xmm1, [state + _args_digest + 4*idx + 4*4*8]
211	vpinsrd	xmm1, [state + _args_digest + 4*idx + 5*4*8], 1
212	vpinsrd	xmm1, [state + _args_digest + 4*idx + 6*4*8], 2
213	vpinsrd	xmm1, [state + _args_digest + 4*idx + 7*4*8], 3
214
215	vmovdqa	[job_rax + _result_digest + 0*16], xmm0
216	vmovdqa	[job_rax + _result_digest + 1*16], xmm1
217
218return:
219%ifidn __OUTPUT_FORMAT__, win64
220	vmovdqa  xmm6, [rsp + _XMM_SAVE + 16*0]
221	vmovdqa  xmm7, [rsp + _XMM_SAVE + 16*1]
222	vmovdqa  xmm8, [rsp + _XMM_SAVE + 16*2]
223	vmovdqa  xmm9, [rsp + _XMM_SAVE + 16*3]
224	vmovdqa  xmm10, [rsp + _XMM_SAVE + 16*4]
225	vmovdqa  xmm11, [rsp + _XMM_SAVE + 16*5]
226	vmovdqa  xmm12, [rsp + _XMM_SAVE + 16*6]
227	vmovdqa  xmm13, [rsp + _XMM_SAVE + 16*7]
228	vmovdqa  xmm14, [rsp + _XMM_SAVE + 16*8]
229	vmovdqa  xmm15, [rsp + _XMM_SAVE + 16*9]
230	mov     rsi, [rsp + _GPR_SAVE + 8*1]
231	mov     rdi, [rsp + _GPR_SAVE + 8*2]
232%endif
233	mov     rbx, [rsp + _GPR_SAVE + 8*0]
234	mov     rbp, [rsp + _GPR_SAVE + 8*3]
235	mov     r12, [rsp + _GPR_SAVE + 8*4]
236	mov     r13, [rsp + _GPR_SAVE + 8*5]
237	mov     r14, [rsp + _GPR_SAVE + 8*6]
238	mov     r15, [rsp + _GPR_SAVE + 8*7]
239	add     rsp, STACK_SPACE
240
241	ret
242
243return_null:
244	xor	job_rax, job_rax
245	jmp	return
246
247section .data align=16
248
249align 16
250clear_low_nibble:
251	dq 0x00000000FFFFFFF0, 0x0000000000000000
252one:	dq  1
253two:	dq  2
254three:	dq  3
255four:	dq  4
256five:	dq  5
257six:	dq  6
258seven:	dq  7
259