xref: /openbsd-src/lib/libcrypto/sha/sha1.c (revision 9cb045229698d08f09fbd7cf9ae7f3b9b8f8b848)
1 /* $OpenBSD: sha1.c,v 1.15 2024/06/01 07:36:16 tb Exp $ */
2 /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
3  * All rights reserved.
4  *
5  * This package is an SSL implementation written
6  * by Eric Young (eay@cryptsoft.com).
7  * The implementation was written so as to conform with Netscapes SSL.
8  *
9  * This library is free for commercial and non-commercial use as long as
10  * the following conditions are aheared to.  The following conditions
11  * apply to all code found in this distribution, be it the RC4, RSA,
12  * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
13  * included with this distribution is covered by the same copyright terms
14  * except that the holder is Tim Hudson (tjh@cryptsoft.com).
15  *
16  * Copyright remains Eric Young's, and as such any Copyright notices in
17  * the code are not to be removed.
18  * If this package is used in a product, Eric Young should be given attribution
19  * as the author of the parts of the library used.
20  * This can be in the form of a textual message at program startup or
21  * in documentation (online or textual) provided with the package.
22  *
23  * Redistribution and use in source and binary forms, with or without
24  * modification, are permitted provided that the following conditions
25  * are met:
26  * 1. Redistributions of source code must retain the copyright
27  *    notice, this list of conditions and the following disclaimer.
28  * 2. Redistributions in binary form must reproduce the above copyright
29  *    notice, this list of conditions and the following disclaimer in the
30  *    documentation and/or other materials provided with the distribution.
31  * 3. All advertising materials mentioning features or use of this software
32  *    must display the following acknowledgement:
33  *    "This product includes cryptographic software written by
34  *     Eric Young (eay@cryptsoft.com)"
35  *    The word 'cryptographic' can be left out if the rouines from the library
36  *    being used are not cryptographic related :-).
37  * 4. If you include any Windows specific code (or a derivative thereof) from
38  *    the apps directory (application code) you must include an acknowledgement:
39  *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
40  *
41  * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
42  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51  * SUCH DAMAGE.
52  *
53  * The licence and distribution terms for any publically available version or
54  * derivative of this code cannot be changed.  i.e. this code cannot simply be
55  * copied and put under another distribution licence
56  * [including the GNU Public Licence.]
57  */
58 
59 #include <stdlib.h>
60 #include <string.h>
61 
62 #include <openssl/opensslconf.h>
63 
64 #include <openssl/crypto.h>
65 #include <openssl/sha.h>
66 
67 #include "crypto_internal.h"
68 
69 #if !defined(OPENSSL_NO_SHA1) && !defined(OPENSSL_NO_SHA)
70 
71 /* Ensure that SHA_LONG and uint32_t are equivalent sizes. */
72 CTASSERT(sizeof(SHA_LONG) == sizeof(uint32_t));
73 
74 #ifdef SHA1_ASM
75 void sha1_block_data_order(SHA_CTX *ctx, const void *p, size_t num);
76 #endif
77 
78 #ifndef SHA1_ASM
79 static inline SHA_LONG
Ch(SHA_LONG x,SHA_LONG y,SHA_LONG z)80 Ch(SHA_LONG x, SHA_LONG y, SHA_LONG z)
81 {
82 	return (x & y) ^ (~x & z);
83 }
84 
85 static inline SHA_LONG
Parity(SHA_LONG x,SHA_LONG y,SHA_LONG z)86 Parity(SHA_LONG x, SHA_LONG y, SHA_LONG z)
87 {
88 	return x ^ y ^ z;
89 }
90 
91 static inline SHA_LONG
Maj(SHA_LONG x,SHA_LONG y,SHA_LONG z)92 Maj(SHA_LONG x, SHA_LONG y, SHA_LONG z)
93 {
94 	return (x & y) ^ (x & z) ^ (y & z);
95 }
96 
97 static inline void
sha1_msg_schedule_update(SHA_LONG * W0,SHA_LONG W2,SHA_LONG W8,SHA_LONG W13)98 sha1_msg_schedule_update(SHA_LONG *W0, SHA_LONG W2, SHA_LONG W8, SHA_LONG W13)
99 {
100 	*W0 = crypto_rol_u32(W13 ^ W8 ^ W2 ^ *W0, 1);
101 }
102 
103 static inline void
sha1_round1(SHA_LONG * a,SHA_LONG * b,SHA_LONG * c,SHA_LONG * d,SHA_LONG * e,SHA_LONG Wt)104 sha1_round1(SHA_LONG *a, SHA_LONG *b, SHA_LONG *c, SHA_LONG *d, SHA_LONG *e,
105     SHA_LONG Wt)
106 {
107 	SHA_LONG Kt, T;
108 
109 	Kt = 0x5a827999UL;
110 	T = crypto_rol_u32(*a, 5) + Ch(*b, *c, *d) + *e + Kt + Wt;
111 
112 	*e = *d;
113 	*d = *c;
114 	*c = crypto_rol_u32(*b, 30);
115 	*b = *a;
116 	*a = T;
117 }
118 
119 static inline void
sha1_round2(SHA_LONG * a,SHA_LONG * b,SHA_LONG * c,SHA_LONG * d,SHA_LONG * e,SHA_LONG Wt)120 sha1_round2(SHA_LONG *a, SHA_LONG *b, SHA_LONG *c, SHA_LONG *d, SHA_LONG *e,
121     SHA_LONG Wt)
122 {
123 	SHA_LONG Kt, T;
124 
125 	Kt = 0x6ed9eba1UL;
126 	T = crypto_rol_u32(*a, 5) + Parity(*b, *c, *d) + *e + Kt + Wt;
127 
128 	*e = *d;
129 	*d = *c;
130 	*c = crypto_rol_u32(*b, 30);
131 	*b = *a;
132 	*a = T;
133 }
134 
135 static inline void
sha1_round3(SHA_LONG * a,SHA_LONG * b,SHA_LONG * c,SHA_LONG * d,SHA_LONG * e,SHA_LONG Wt)136 sha1_round3(SHA_LONG *a, SHA_LONG *b, SHA_LONG *c, SHA_LONG *d, SHA_LONG *e,
137     SHA_LONG Wt)
138 {
139 	SHA_LONG Kt, T;
140 
141 	Kt = 0x8f1bbcdcUL;
142 	T = crypto_rol_u32(*a, 5) + Maj(*b, *c, *d) + *e + Kt + Wt;
143 
144 	*e = *d;
145 	*d = *c;
146 	*c = crypto_rol_u32(*b, 30);
147 	*b = *a;
148 	*a = T;
149 }
150 
151 static inline void
sha1_round4(SHA_LONG * a,SHA_LONG * b,SHA_LONG * c,SHA_LONG * d,SHA_LONG * e,SHA_LONG Wt)152 sha1_round4(SHA_LONG *a, SHA_LONG *b, SHA_LONG *c, SHA_LONG *d, SHA_LONG *e,
153     SHA_LONG Wt)
154 {
155 	SHA_LONG Kt, T;
156 
157 	Kt = 0xca62c1d6UL;
158 	T = crypto_rol_u32(*a, 5) + Parity(*b, *c, *d) + *e + Kt + Wt;
159 
160 	*e = *d;
161 	*d = *c;
162 	*c = crypto_rol_u32(*b, 30);
163 	*b = *a;
164 	*a = T;
165 }
166 
167 static void
sha1_block_data_order(SHA_CTX * ctx,const void * _in,size_t num)168 sha1_block_data_order(SHA_CTX *ctx, const void *_in, size_t num)
169 {
170 	const uint8_t *in = _in;
171 	const SHA_LONG *in32;
172 	unsigned int a, b, c, d, e;
173 	unsigned int X0, X1, X2, X3, X4, X5, X6, X7,
174 	    X8, X9, X10, X11, X12, X13, X14, X15;
175 
176 	while (num--) {
177 		a = ctx->h0;
178 		b = ctx->h1;
179 		c = ctx->h2;
180 		d = ctx->h3;
181 		e = ctx->h4;
182 
183 		if ((size_t)in % 4 == 0) {
184 			/* Input is 32 bit aligned. */
185 			in32 = (const SHA_LONG *)in;
186 			X0 = be32toh(in32[0]);
187 			X1 = be32toh(in32[1]);
188 			X2 = be32toh(in32[2]);
189 			X3 = be32toh(in32[3]);
190 			X4 = be32toh(in32[4]);
191 			X5 = be32toh(in32[5]);
192 			X6 = be32toh(in32[6]);
193 			X7 = be32toh(in32[7]);
194 			X8 = be32toh(in32[8]);
195 			X9 = be32toh(in32[9]);
196 			X10 = be32toh(in32[10]);
197 			X11 = be32toh(in32[11]);
198 			X12 = be32toh(in32[12]);
199 			X13 = be32toh(in32[13]);
200 			X14 = be32toh(in32[14]);
201 			X15 = be32toh(in32[15]);
202 		} else {
203 			/* Input is not 32 bit aligned. */
204 			X0 = crypto_load_be32toh(&in[0 * 4]);
205 			X1 = crypto_load_be32toh(&in[1 * 4]);
206 			X2 = crypto_load_be32toh(&in[2 * 4]);
207 			X3 = crypto_load_be32toh(&in[3 * 4]);
208 			X4 = crypto_load_be32toh(&in[4 * 4]);
209 			X5 = crypto_load_be32toh(&in[5 * 4]);
210 			X6 = crypto_load_be32toh(&in[6 * 4]);
211 			X7 = crypto_load_be32toh(&in[7 * 4]);
212 			X8 = crypto_load_be32toh(&in[8 * 4]);
213 			X9 = crypto_load_be32toh(&in[9 * 4]);
214 			X10 = crypto_load_be32toh(&in[10 * 4]);
215 			X11 = crypto_load_be32toh(&in[11 * 4]);
216 			X12 = crypto_load_be32toh(&in[12 * 4]);
217 			X13 = crypto_load_be32toh(&in[13 * 4]);
218 			X14 = crypto_load_be32toh(&in[14 * 4]);
219 			X15 = crypto_load_be32toh(&in[15 * 4]);
220 		}
221 		in += SHA_CBLOCK;
222 
223 		sha1_round1(&a, &b, &c, &d, &e, X0);
224 		sha1_round1(&a, &b, &c, &d, &e, X1);
225 		sha1_round1(&a, &b, &c, &d, &e, X2);
226 		sha1_round1(&a, &b, &c, &d, &e, X3);
227 		sha1_round1(&a, &b, &c, &d, &e, X4);
228 		sha1_round1(&a, &b, &c, &d, &e, X5);
229 		sha1_round1(&a, &b, &c, &d, &e, X6);
230 		sha1_round1(&a, &b, &c, &d, &e, X7);
231 		sha1_round1(&a, &b, &c, &d, &e, X8);
232 		sha1_round1(&a, &b, &c, &d, &e, X9);
233 		sha1_round1(&a, &b, &c, &d, &e, X10);
234 		sha1_round1(&a, &b, &c, &d, &e, X11);
235 		sha1_round1(&a, &b, &c, &d, &e, X12);
236 		sha1_round1(&a, &b, &c, &d, &e, X13);
237 		sha1_round1(&a, &b, &c, &d, &e, X14);
238 		sha1_round1(&a, &b, &c, &d, &e, X15);
239 
240 		sha1_msg_schedule_update(&X0, X2, X8, X13);
241 		sha1_msg_schedule_update(&X1, X3, X9, X14);
242 		sha1_msg_schedule_update(&X2, X4, X10, X15);
243 		sha1_msg_schedule_update(&X3, X5, X11, X0);
244 		sha1_msg_schedule_update(&X4, X6, X12, X1);
245 		sha1_msg_schedule_update(&X5, X7, X13, X2);
246 		sha1_msg_schedule_update(&X6, X8, X14, X3);
247 		sha1_msg_schedule_update(&X7, X9, X15, X4);
248 		sha1_msg_schedule_update(&X8, X10, X0, X5);
249 		sha1_msg_schedule_update(&X9, X11, X1, X6);
250 		sha1_msg_schedule_update(&X10, X12, X2, X7);
251 		sha1_msg_schedule_update(&X11, X13, X3, X8);
252 		sha1_msg_schedule_update(&X12, X14, X4, X9);
253 		sha1_msg_schedule_update(&X13, X15, X5, X10);
254 		sha1_msg_schedule_update(&X14, X0, X6, X11);
255 		sha1_msg_schedule_update(&X15, X1, X7, X12);
256 
257 		sha1_round1(&a, &b, &c, &d, &e, X0);
258 		sha1_round1(&a, &b, &c, &d, &e, X1);
259 		sha1_round1(&a, &b, &c, &d, &e, X2);
260 		sha1_round1(&a, &b, &c, &d, &e, X3);
261 		sha1_round2(&a, &b, &c, &d, &e, X4);
262 		sha1_round2(&a, &b, &c, &d, &e, X5);
263 		sha1_round2(&a, &b, &c, &d, &e, X6);
264 		sha1_round2(&a, &b, &c, &d, &e, X7);
265 		sha1_round2(&a, &b, &c, &d, &e, X8);
266 		sha1_round2(&a, &b, &c, &d, &e, X9);
267 		sha1_round2(&a, &b, &c, &d, &e, X10);
268 		sha1_round2(&a, &b, &c, &d, &e, X11);
269 		sha1_round2(&a, &b, &c, &d, &e, X12);
270 		sha1_round2(&a, &b, &c, &d, &e, X13);
271 		sha1_round2(&a, &b, &c, &d, &e, X14);
272 		sha1_round2(&a, &b, &c, &d, &e, X15);
273 
274 		sha1_msg_schedule_update(&X0, X2, X8, X13);
275 		sha1_msg_schedule_update(&X1, X3, X9, X14);
276 		sha1_msg_schedule_update(&X2, X4, X10, X15);
277 		sha1_msg_schedule_update(&X3, X5, X11, X0);
278 		sha1_msg_schedule_update(&X4, X6, X12, X1);
279 		sha1_msg_schedule_update(&X5, X7, X13, X2);
280 		sha1_msg_schedule_update(&X6, X8, X14, X3);
281 		sha1_msg_schedule_update(&X7, X9, X15, X4);
282 		sha1_msg_schedule_update(&X8, X10, X0, X5);
283 		sha1_msg_schedule_update(&X9, X11, X1, X6);
284 		sha1_msg_schedule_update(&X10, X12, X2, X7);
285 		sha1_msg_schedule_update(&X11, X13, X3, X8);
286 		sha1_msg_schedule_update(&X12, X14, X4, X9);
287 		sha1_msg_schedule_update(&X13, X15, X5, X10);
288 		sha1_msg_schedule_update(&X14, X0, X6, X11);
289 		sha1_msg_schedule_update(&X15, X1, X7, X12);
290 
291 		sha1_round2(&a, &b, &c, &d, &e, X0);
292 		sha1_round2(&a, &b, &c, &d, &e, X1);
293 		sha1_round2(&a, &b, &c, &d, &e, X2);
294 		sha1_round2(&a, &b, &c, &d, &e, X3);
295 		sha1_round2(&a, &b, &c, &d, &e, X4);
296 		sha1_round2(&a, &b, &c, &d, &e, X5);
297 		sha1_round2(&a, &b, &c, &d, &e, X6);
298 		sha1_round2(&a, &b, &c, &d, &e, X7);
299 		sha1_round3(&a, &b, &c, &d, &e, X8);
300 		sha1_round3(&a, &b, &c, &d, &e, X9);
301 		sha1_round3(&a, &b, &c, &d, &e, X10);
302 		sha1_round3(&a, &b, &c, &d, &e, X11);
303 		sha1_round3(&a, &b, &c, &d, &e, X12);
304 		sha1_round3(&a, &b, &c, &d, &e, X13);
305 		sha1_round3(&a, &b, &c, &d, &e, X14);
306 		sha1_round3(&a, &b, &c, &d, &e, X15);
307 
308 		sha1_msg_schedule_update(&X0, X2, X8, X13);
309 		sha1_msg_schedule_update(&X1, X3, X9, X14);
310 		sha1_msg_schedule_update(&X2, X4, X10, X15);
311 		sha1_msg_schedule_update(&X3, X5, X11, X0);
312 		sha1_msg_schedule_update(&X4, X6, X12, X1);
313 		sha1_msg_schedule_update(&X5, X7, X13, X2);
314 		sha1_msg_schedule_update(&X6, X8, X14, X3);
315 		sha1_msg_schedule_update(&X7, X9, X15, X4);
316 		sha1_msg_schedule_update(&X8, X10, X0, X5);
317 		sha1_msg_schedule_update(&X9, X11, X1, X6);
318 		sha1_msg_schedule_update(&X10, X12, X2, X7);
319 		sha1_msg_schedule_update(&X11, X13, X3, X8);
320 		sha1_msg_schedule_update(&X12, X14, X4, X9);
321 		sha1_msg_schedule_update(&X13, X15, X5, X10);
322 		sha1_msg_schedule_update(&X14, X0, X6, X11);
323 		sha1_msg_schedule_update(&X15, X1, X7, X12);
324 
325 		sha1_round3(&a, &b, &c, &d, &e, X0);
326 		sha1_round3(&a, &b, &c, &d, &e, X1);
327 		sha1_round3(&a, &b, &c, &d, &e, X2);
328 		sha1_round3(&a, &b, &c, &d, &e, X3);
329 		sha1_round3(&a, &b, &c, &d, &e, X4);
330 		sha1_round3(&a, &b, &c, &d, &e, X5);
331 		sha1_round3(&a, &b, &c, &d, &e, X6);
332 		sha1_round3(&a, &b, &c, &d, &e, X7);
333 		sha1_round3(&a, &b, &c, &d, &e, X8);
334 		sha1_round3(&a, &b, &c, &d, &e, X9);
335 		sha1_round3(&a, &b, &c, &d, &e, X10);
336 		sha1_round3(&a, &b, &c, &d, &e, X11);
337 		sha1_round4(&a, &b, &c, &d, &e, X12);
338 		sha1_round4(&a, &b, &c, &d, &e, X13);
339 		sha1_round4(&a, &b, &c, &d, &e, X14);
340 		sha1_round4(&a, &b, &c, &d, &e, X15);
341 
342 		sha1_msg_schedule_update(&X0, X2, X8, X13);
343 		sha1_msg_schedule_update(&X1, X3, X9, X14);
344 		sha1_msg_schedule_update(&X2, X4, X10, X15);
345 		sha1_msg_schedule_update(&X3, X5, X11, X0);
346 		sha1_msg_schedule_update(&X4, X6, X12, X1);
347 		sha1_msg_schedule_update(&X5, X7, X13, X2);
348 		sha1_msg_schedule_update(&X6, X8, X14, X3);
349 		sha1_msg_schedule_update(&X7, X9, X15, X4);
350 		sha1_msg_schedule_update(&X8, X10, X0, X5);
351 		sha1_msg_schedule_update(&X9, X11, X1, X6);
352 		sha1_msg_schedule_update(&X10, X12, X2, X7);
353 		sha1_msg_schedule_update(&X11, X13, X3, X8);
354 		sha1_msg_schedule_update(&X12, X14, X4, X9);
355 		sha1_msg_schedule_update(&X13, X15, X5, X10);
356 		sha1_msg_schedule_update(&X14, X0, X6, X11);
357 		sha1_msg_schedule_update(&X15, X1, X7, X12);
358 
359 		sha1_round4(&a, &b, &c, &d, &e, X0);
360 		sha1_round4(&a, &b, &c, &d, &e, X1);
361 		sha1_round4(&a, &b, &c, &d, &e, X2);
362 		sha1_round4(&a, &b, &c, &d, &e, X3);
363 		sha1_round4(&a, &b, &c, &d, &e, X4);
364 		sha1_round4(&a, &b, &c, &d, &e, X5);
365 		sha1_round4(&a, &b, &c, &d, &e, X6);
366 		sha1_round4(&a, &b, &c, &d, &e, X7);
367 		sha1_round4(&a, &b, &c, &d, &e, X8);
368 		sha1_round4(&a, &b, &c, &d, &e, X9);
369 		sha1_round4(&a, &b, &c, &d, &e, X10);
370 		sha1_round4(&a, &b, &c, &d, &e, X11);
371 		sha1_round4(&a, &b, &c, &d, &e, X12);
372 		sha1_round4(&a, &b, &c, &d, &e, X13);
373 		sha1_round4(&a, &b, &c, &d, &e, X14);
374 		sha1_round4(&a, &b, &c, &d, &e, X15);
375 
376 		ctx->h0 += a;
377 		ctx->h1 += b;
378 		ctx->h2 += c;
379 		ctx->h3 += d;
380 		ctx->h4 += e;
381 	}
382 }
383 #endif
384 
385 int
SHA1_Init(SHA_CTX * c)386 SHA1_Init(SHA_CTX *c)
387 {
388 	memset(c, 0, sizeof(*c));
389 
390 	c->h0 = 0x67452301UL;
391 	c->h1 = 0xefcdab89UL;
392 	c->h2 = 0x98badcfeUL;
393 	c->h3 = 0x10325476UL;
394 	c->h4 = 0xc3d2e1f0UL;
395 
396 	return 1;
397 }
398 LCRYPTO_ALIAS(SHA1_Init);
399 
400 int
SHA1_Update(SHA_CTX * c,const void * data_,size_t len)401 SHA1_Update(SHA_CTX *c, const void *data_, size_t len)
402 {
403 	const unsigned char *data = data_;
404 	unsigned char *p;
405 	SHA_LONG l;
406 	size_t n;
407 
408 	if (len == 0)
409 		return 1;
410 
411 	l = (c->Nl + (((SHA_LONG)len) << 3))&0xffffffffUL;
412 	/* 95-05-24 eay Fixed a bug with the overflow handling, thanks to
413 	 * Wei Dai <weidai@eskimo.com> for pointing it out. */
414 	if (l < c->Nl) /* overflow */
415 		c->Nh++;
416 	c->Nh+=(SHA_LONG)(len>>29);	/* might cause compiler warning on 16-bit */
417 	c->Nl = l;
418 
419 	n = c->num;
420 	if (n != 0) {
421 		p = (unsigned char *)c->data;
422 
423 		if (len >= SHA_CBLOCK || len + n >= SHA_CBLOCK) {
424 			memcpy(p + n, data, SHA_CBLOCK - n);
425 			sha1_block_data_order(c, p, 1);
426 			n = SHA_CBLOCK - n;
427 			data += n;
428 			len -= n;
429 			c->num = 0;
430 			memset(p,0,SHA_CBLOCK);	/* keep it zeroed */
431 		} else {
432 			memcpy(p + n, data, len);
433 			c->num += (unsigned int)len;
434 			return 1;
435 		}
436 	}
437 
438 	n = len/SHA_CBLOCK;
439 	if (n > 0) {
440 		sha1_block_data_order(c, data, n);
441 		n    *= SHA_CBLOCK;
442 		data += n;
443 		len -= n;
444 	}
445 
446 	if (len != 0) {
447 		p = (unsigned char *)c->data;
448 		c->num = (unsigned int)len;
449 		memcpy(p, data, len);
450 	}
451 	return 1;
452 }
453 LCRYPTO_ALIAS(SHA1_Update);
454 
455 void
SHA1_Transform(SHA_CTX * c,const unsigned char * data)456 SHA1_Transform(SHA_CTX *c, const unsigned char *data)
457 {
458 	sha1_block_data_order(c, data, 1);
459 }
460 LCRYPTO_ALIAS(SHA1_Transform);
461 
462 int
SHA1_Final(unsigned char * md,SHA_CTX * c)463 SHA1_Final(unsigned char *md, SHA_CTX *c)
464 {
465 	unsigned char *p = (unsigned char *)c->data;
466 	size_t n = c->num;
467 
468 	p[n] = 0x80; /* there is always room for one */
469 	n++;
470 
471 	if (n > (SHA_CBLOCK - 8)) {
472 		memset(p + n, 0, SHA_CBLOCK - n);
473 		n = 0;
474 		sha1_block_data_order(c, p, 1);
475 	}
476 
477 	memset(p + n, 0, SHA_CBLOCK - 8 - n);
478 	c->data[SHA_LBLOCK - 2] = htobe32(c->Nh);
479 	c->data[SHA_LBLOCK - 1] = htobe32(c->Nl);
480 
481 	sha1_block_data_order(c, p, 1);
482 	c->num = 0;
483 	memset(p, 0, SHA_CBLOCK);
484 
485 	crypto_store_htobe32(&md[0 * 4], c->h0);
486 	crypto_store_htobe32(&md[1 * 4], c->h1);
487 	crypto_store_htobe32(&md[2 * 4], c->h2);
488 	crypto_store_htobe32(&md[3 * 4], c->h3);
489 	crypto_store_htobe32(&md[4 * 4], c->h4);
490 
491 	return 1;
492 }
493 LCRYPTO_ALIAS(SHA1_Final);
494 
495 unsigned char *
SHA1(const unsigned char * d,size_t n,unsigned char * md)496 SHA1(const unsigned char *d, size_t n, unsigned char *md)
497 {
498 	SHA_CTX c;
499 
500 	if (!SHA1_Init(&c))
501 		return NULL;
502 	SHA1_Update(&c, d, n);
503 	SHA1_Final(md, &c);
504 
505 	explicit_bzero(&c, sizeof(c));
506 
507 	return (md);
508 }
509 LCRYPTO_ALIAS(SHA1);
510 
511 #endif
512