1;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; 2; Copyright(c) 2011-2016 Intel Corporation All rights reserved. 3; 4; Redistribution and use in source and binary forms, with or without 5; modification, are permitted provided that the following conditions 6; are met: 7; * Redistributions of source code must retain the above copyright 8; notice, this list of conditions and the following disclaimer. 9; * Redistributions in binary form must reproduce the above copyright 10; notice, this list of conditions and the following disclaimer in 11; the documentation and/or other materials provided with the 12; distribution. 13; * Neither the name of Intel Corporation nor the names of its 14; contributors may be used to endorse or promote products derived 15; from this software without specific prior written permission. 16; 17; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; 29 30%include "sha512_job.asm" 31%include "sha512_mb_mgr_datastruct.asm" 32 33%include "reg_sizes.asm" 34 35extern sha512_mb_x2_sse 36 37[bits 64] 38default rel 39section .text 40 41%ifidn __OUTPUT_FORMAT__, elf64 42; LINUX register definitions 43%define arg1 rdi ; rcx 44%define arg2 rsi ; rdx 45 46; idx needs to be other than arg1, arg2, rbx, r12 47%define idx rdx ; rsi 48%else 49; WINDOWS register definitions 50%define arg1 rcx 51%define arg2 rdx 52 53; idx needs to be other than arg1, arg2, rbx, r12 54%define idx rsi 55%endif 56 57; Common definitions 58%define state arg1 59%define job arg2 60%define len2 arg2 61 62%define unused_lanes rbx 63%define lane_data rbx 64%define tmp2 rbx 65 66%define job_rax rax 67%define tmp1 rax 68%define size_offset rax 69%define tmp rax 70%define start_offset rax 71 72%define tmp3 arg1 73 74%define extra_blocks arg2 75%define p arg2 76 77%define tmp4 r8 78%define lens0 r8 79 80%define lens1 r9 81%define lens2 r10 82%define lens3 r11 83 84; STACK_SPACE needs to be an odd multiple of 8 85_XMM_SAVE_SIZE equ 10*16 86_GPR_SAVE_SIZE equ 8*3 87_ALIGN_SIZE equ 0 88 89_XMM_SAVE equ 0 90_GPR_SAVE equ _XMM_SAVE + _XMM_SAVE_SIZE 91STACK_SPACE equ _GPR_SAVE + _GPR_SAVE_SIZE + _ALIGN_SIZE 92 93%define APPEND(a,b) a %+ b 94 95; ISAL_SHA512_JOB* _sha512_mb_mgr_flush_sse(ISAL_SHA512_MB_JOB_MGR *state) 96; arg 1 : rcx : state 97mk_global _sha512_mb_mgr_flush_sse, function, internal 98_sha512_mb_mgr_flush_sse: 99 endbranch 100 101 sub rsp, STACK_SPACE 102 mov [rsp + _GPR_SAVE + 8*0], rbx 103 mov [rsp + _GPR_SAVE + 8*1], r12 104%ifidn __OUTPUT_FORMAT__, win64 105 mov [rsp + _GPR_SAVE + 8*2], rsi 106 movdqa [rsp + _XMM_SAVE + 16*0], xmm6 107 movdqa [rsp + _XMM_SAVE + 16*1], xmm7 108 movdqa [rsp + _XMM_SAVE + 16*2], xmm8 109 movdqa [rsp + _XMM_SAVE + 16*3], xmm9 110 movdqa [rsp + _XMM_SAVE + 16*4], xmm10 111 movdqa [rsp + _XMM_SAVE + 16*5], xmm11 112 movdqa [rsp + _XMM_SAVE + 16*6], xmm12 113 movdqa [rsp + _XMM_SAVE + 16*7], xmm13 114 movdqa [rsp + _XMM_SAVE + 16*8], xmm14 115 movdqa [rsp + _XMM_SAVE + 16*9], xmm15 116%endif 117 118 119 mov unused_lanes, [state + _unused_lanes] 120 bt unused_lanes, 16+7 121 jc return_null 122 123 ; find a lane with a non-null job 124 xor idx, idx 125 cmp qword [state + _ldata + 1 * _LANE_DATA_size + _job_in_lane], 0 126 cmovne idx, [one] 127 128 ; copy idx to empty lanes 129copy_lane_data: 130 mov tmp, [state + _args + _data_ptr + 8*idx] 131 132%assign I 0 133%rep 2 134 cmp qword [state + _ldata + I * _LANE_DATA_size + _job_in_lane], 0 135 jne APPEND(skip_,I) 136 mov [state + _args + _data_ptr + 8*I], tmp 137 mov dword [state + _lens + 4 + 8*I], 0xFFFFFFFF 138APPEND(skip_,I): 139%assign I (I+1) 140%endrep 141 142 ; Find min length 143 mov lens0, [state + _lens + 0*8] 144 mov idx, lens0 145 mov lens1, [state + _lens + 1*8] 146 cmp lens1, idx 147 cmovb idx, lens1 148 149 mov len2, idx 150 and idx, 0xF 151 and len2, ~0xFF 152 jz len_is_0 153 154 sub lens0, len2 155 sub lens1, len2 156 shr len2, 32 157 mov [state + _lens + 0*8], lens0 158 mov [state + _lens + 1*8], lens1 159 160 ; "state" and "args" are the same address, arg1 161 ; len is arg2 162 call sha512_mb_x2_sse 163 ; state and idx are intact 164 165 166len_is_0: 167 ; process completed job "idx" 168 imul lane_data, idx, _LANE_DATA_size 169 lea lane_data, [state + _ldata + lane_data] 170 171 mov job_rax, [lane_data + _job_in_lane] 172 mov qword [lane_data + _job_in_lane], 0 173 mov dword [job_rax + _status], ISAL_STS_COMPLETED 174 mov unused_lanes, [state + _unused_lanes] 175 shl unused_lanes, 8 176 or unused_lanes, idx 177 mov [state + _unused_lanes], unused_lanes 178 179 sub dword [state + _num_lanes_inuse], 1 180 181 movq xmm0, [state + _args_digest + 8*idx + 0*32] 182 pinsrq xmm0, [state + _args_digest + 8*idx + 1*32], 1 183 movq xmm1, [state + _args_digest + 8*idx + 2*32] 184 pinsrq xmm1, [state + _args_digest + 8*idx + 3*32], 1 185 movq xmm2, [state + _args_digest + 8*idx + 4*32] 186 pinsrq xmm2, [state + _args_digest + 8*idx + 5*32], 1 187 movq xmm3, [state + _args_digest + 8*idx + 6*32] 188 pinsrq xmm3, [state + _args_digest + 8*idx + 7*32], 1 189 190 191 movdqa [job_rax + _result_digest + 0*16], xmm0 192 movdqa [job_rax + _result_digest + 1*16], xmm1 193 movdqa [job_rax + _result_digest + 2*16], xmm2 194 movdqa [job_rax + _result_digest + 3*16], xmm3 195 196return: 197 198%ifidn __OUTPUT_FORMAT__, win64 199 movdqa xmm6, [rsp + _XMM_SAVE + 16*0] 200 movdqa xmm7, [rsp + _XMM_SAVE + 16*1] 201 movdqa xmm8, [rsp + _XMM_SAVE + 16*2] 202 movdqa xmm9, [rsp + _XMM_SAVE + 16*3] 203 movdqa xmm10, [rsp + _XMM_SAVE + 16*4] 204 movdqa xmm11, [rsp + _XMM_SAVE + 16*5] 205 movdqa xmm12, [rsp + _XMM_SAVE + 16*6] 206 movdqa xmm13, [rsp + _XMM_SAVE + 16*7] 207 movdqa xmm14, [rsp + _XMM_SAVE + 16*8] 208 movdqa xmm15, [rsp + _XMM_SAVE + 16*9] 209 mov rsi, [rsp + _GPR_SAVE + 8*2] 210%endif 211 mov rbx, [rsp + _GPR_SAVE + 8*0] 212 mov r12, [rsp + _GPR_SAVE + 8*1] 213 add rsp, STACK_SPACE 214 215 ret 216 217return_null: 218 xor job_rax, job_rax 219 jmp return 220 221section .data align=16 222 223align 16 224one: dq 1 225two: dq 2 226three: dq 3 227 228