xref: /openbsd-src/lib/libcrypto/sha/sha256.c (revision ff0e7be1ebbcc809ea8ad2b6dafe215824da9e46)
1 /* $OpenBSD: sha256.c,v 1.22 2023/05/28 14:54:37 jsing Exp $ */
2 /* ====================================================================
3  * Copyright (c) 1998-2011 The OpenSSL Project.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the
15  *    distribution.
16  *
17  * 3. All advertising materials mentioning features or use of this
18  *    software must display the following acknowledgment:
19  *    "This product includes software developed by the OpenSSL Project
20  *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
21  *
22  * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
23  *    endorse or promote products derived from this software without
24  *    prior written permission. For written permission, please contact
25  *    openssl-core@openssl.org.
26  *
27  * 5. Products derived from this software may not be called "OpenSSL"
28  *    nor may "OpenSSL" appear in their names without prior written
29  *    permission of the OpenSSL Project.
30  *
31  * 6. Redistributions of any form whatsoever must retain the following
32  *    acknowledgment:
33  *    "This product includes software developed by the OpenSSL Project
34  *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
35  *
36  * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
37  * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
39  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
40  * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
41  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
42  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
43  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
44  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
45  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
46  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
47  * OF THE POSSIBILITY OF SUCH DAMAGE.
48  * ====================================================================
49  *
50  * This product includes cryptographic software written by Eric Young
51  * (eay@cryptsoft.com).  This product includes software written by Tim
52  * Hudson (tjh@cryptsoft.com).
53  */
54 
55 #include <endian.h>
56 #include <stdlib.h>
57 #include <string.h>
58 
59 #include <openssl/opensslconf.h>
60 
61 #include <openssl/crypto.h>
62 #include <openssl/sha.h>
63 
64 #if !defined(OPENSSL_NO_SHA) && !defined(OPENSSL_NO_SHA256)
65 
66 #define	DATA_ORDER_IS_BIG_ENDIAN
67 
68 #define	HASH_LONG		SHA_LONG
69 #define	HASH_CTX		SHA256_CTX
70 #define	HASH_CBLOCK		SHA_CBLOCK
71 
72 #define	HASH_BLOCK_DATA_ORDER	sha256_block_data_order
73 
74 #ifndef SHA256_ASM
75 static
76 #endif
77 void sha256_block_data_order(SHA256_CTX *ctx, const void *in, size_t num);
78 
79 #define HASH_NO_UPDATE
80 #define HASH_NO_TRANSFORM
81 #define HASH_NO_FINAL
82 
83 #include "md32_common.h"
84 
85 #ifndef SHA256_ASM
86 static const SHA_LONG K256[64] = {
87 	0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL,
88 	0x3956c25bUL, 0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL,
89 	0xd807aa98UL, 0x12835b01UL, 0x243185beUL, 0x550c7dc3UL,
90 	0x72be5d74UL, 0x80deb1feUL, 0x9bdc06a7UL, 0xc19bf174UL,
91 	0xe49b69c1UL, 0xefbe4786UL, 0x0fc19dc6UL, 0x240ca1ccUL,
92 	0x2de92c6fUL, 0x4a7484aaUL, 0x5cb0a9dcUL, 0x76f988daUL,
93 	0x983e5152UL, 0xa831c66dUL, 0xb00327c8UL, 0xbf597fc7UL,
94 	0xc6e00bf3UL, 0xd5a79147UL, 0x06ca6351UL, 0x14292967UL,
95 	0x27b70a85UL, 0x2e1b2138UL, 0x4d2c6dfcUL, 0x53380d13UL,
96 	0x650a7354UL, 0x766a0abbUL, 0x81c2c92eUL, 0x92722c85UL,
97 	0xa2bfe8a1UL, 0xa81a664bUL, 0xc24b8b70UL, 0xc76c51a3UL,
98 	0xd192e819UL, 0xd6990624UL, 0xf40e3585UL, 0x106aa070UL,
99 	0x19a4c116UL, 0x1e376c08UL, 0x2748774cUL, 0x34b0bcb5UL,
100 	0x391c0cb3UL, 0x4ed8aa4aUL, 0x5b9cca4fUL, 0x682e6ff3UL,
101 	0x748f82eeUL, 0x78a5636fUL, 0x84c87814UL, 0x8cc70208UL,
102 	0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL,
103 };
104 
105 /*
106  * FIPS specification refers to right rotations, while our ROTATE macro
107  * is left one. This is why you might notice that rotation coefficients
108  * differ from those observed in FIPS document by 32-N...
109  */
110 #define Sigma0(x)	(ROTATE((x),30) ^ ROTATE((x),19) ^ ROTATE((x),10))
111 #define Sigma1(x)	(ROTATE((x),26) ^ ROTATE((x),21) ^ ROTATE((x),7))
112 #define sigma0(x)	(ROTATE((x),25) ^ ROTATE((x),14) ^ ((x)>>3))
113 #define sigma1(x)	(ROTATE((x),15) ^ ROTATE((x),13) ^ ((x)>>10))
114 
115 #define Ch(x, y, z)	(((x) & (y)) ^ ((~(x)) & (z)))
116 #define Maj(x, y, z)	(((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
117 
118 #ifdef OPENSSL_SMALL_FOOTPRINT
119 
120 static void
121 sha256_block_data_order(SHA256_CTX *ctx, const void *in, size_t num)
122 {
123 	unsigned MD32_REG_T a, b, c, d, e, f, g, h, s0, s1, T1, T2;
124 	SHA_LONG	X[16], l;
125 	int i;
126 	const unsigned char *data = in;
127 
128 	while (num--) {
129 
130 		a = ctx->h[0];
131 		b = ctx->h[1];
132 		c = ctx->h[2];
133 		d = ctx->h[3];
134 		e = ctx->h[4];
135 		f = ctx->h[5];
136 		g = ctx->h[6];
137 		h = ctx->h[7];
138 
139 		for (i = 0; i < 16; i++) {
140 			HOST_c2l(data, l);
141 			T1 = X[i] = l;
142 			T1 += h + Sigma1(e) + Ch(e, f, g) + K256[i];
143 			T2 = Sigma0(a) + Maj(a, b, c);
144 			h = g;
145 			g = f;
146 			f = e;
147 			e = d + T1;
148 			d = c;
149 			c = b;
150 			b = a;
151 			a = T1 + T2;
152 		}
153 
154 		for (; i < 64; i++) {
155 			s0 = X[(i + 1)&0x0f];
156 			s0 = sigma0(s0);
157 			s1 = X[(i + 14)&0x0f];
158 			s1 = sigma1(s1);
159 
160 			T1 = X[i&0xf] += s0 + s1 + X[(i + 9)&0xf];
161 			T1 += h + Sigma1(e) + Ch(e, f, g) + K256[i];
162 			T2 = Sigma0(a) + Maj(a, b, c);
163 			h = g;
164 			g = f;
165 			f = e;
166 			e = d + T1;
167 			d = c;
168 			c = b;
169 			b = a;
170 			a = T1 + T2;
171 		}
172 
173 		ctx->h[0] += a;
174 		ctx->h[1] += b;
175 		ctx->h[2] += c;
176 		ctx->h[3] += d;
177 		ctx->h[4] += e;
178 		ctx->h[5] += f;
179 		ctx->h[6] += g;
180 		ctx->h[7] += h;
181 	}
182 }
183 
184 #else
185 
186 #define	ROUND_00_15(i, a, b, c, d, e, f, g, h)		do {	\
187 	T1 += h + Sigma1(e) + Ch(e, f, g) + K256[i];	\
188 	h = Sigma0(a) + Maj(a, b, c);			\
189 	d += T1;	h += T1;		} while (0)
190 
191 #define	ROUND_16_63(i, a, b, c, d, e, f, g, h, X)	do {	\
192 	s0 = X[(i+1)&0x0f];	s0 = sigma0(s0);	\
193 	s1 = X[(i+14)&0x0f];	s1 = sigma1(s1);	\
194 	T1 = X[(i)&0x0f] += s0 + s1 + X[(i+9)&0x0f];	\
195 	ROUND_00_15(i, a, b, c, d, e, f, g, h);		} while (0)
196 
197 static void
198 sha256_block_data_order(SHA256_CTX *ctx, const void *in, size_t num)
199 {
200 	unsigned MD32_REG_T a, b, c, d, e, f, g, h, s0, s1, T1;
201 	SHA_LONG X[16];
202 	int i;
203 	const unsigned char *data = in;
204 
205 	while (num--) {
206 
207 		a = ctx->h[0];
208 		b = ctx->h[1];
209 		c = ctx->h[2];
210 		d = ctx->h[3];
211 		e = ctx->h[4];
212 		f = ctx->h[5];
213 		g = ctx->h[6];
214 		h = ctx->h[7];
215 
216 		if (BYTE_ORDER != LITTLE_ENDIAN &&
217 		    sizeof(SHA_LONG) == 4 && ((size_t)in % 4) == 0) {
218 			const SHA_LONG *W = (const SHA_LONG *)data;
219 
220 			T1 = X[0] = W[0];
221 			ROUND_00_15(0, a, b, c, d, e, f, g, h);
222 			T1 = X[1] = W[1];
223 			ROUND_00_15(1, h, a, b, c, d, e, f, g);
224 			T1 = X[2] = W[2];
225 			ROUND_00_15(2, g, h, a, b, c, d, e, f);
226 			T1 = X[3] = W[3];
227 			ROUND_00_15(3, f, g, h, a, b, c, d, e);
228 			T1 = X[4] = W[4];
229 			ROUND_00_15(4, e, f, g, h, a, b, c, d);
230 			T1 = X[5] = W[5];
231 			ROUND_00_15(5, d, e, f, g, h, a, b, c);
232 			T1 = X[6] = W[6];
233 			ROUND_00_15(6, c, d, e, f, g, h, a, b);
234 			T1 = X[7] = W[7];
235 			ROUND_00_15(7, b, c, d, e, f, g, h, a);
236 			T1 = X[8] = W[8];
237 			ROUND_00_15(8, a, b, c, d, e, f, g, h);
238 			T1 = X[9] = W[9];
239 			ROUND_00_15(9, h, a, b, c, d, e, f, g);
240 			T1 = X[10] = W[10];
241 			ROUND_00_15(10, g, h, a, b, c, d, e, f);
242 			T1 = X[11] = W[11];
243 			ROUND_00_15(11, f, g, h, a, b, c, d, e);
244 			T1 = X[12] = W[12];
245 			ROUND_00_15(12, e, f, g, h, a, b, c, d);
246 			T1 = X[13] = W[13];
247 			ROUND_00_15(13, d, e, f, g, h, a, b, c);
248 			T1 = X[14] = W[14];
249 			ROUND_00_15(14, c, d, e, f, g, h, a, b);
250 			T1 = X[15] = W[15];
251 			ROUND_00_15(15, b, c, d, e, f, g, h, a);
252 
253 			data += SHA256_CBLOCK;
254 		} else {
255 			SHA_LONG l;
256 
257 			HOST_c2l(data, l);
258 			T1 = X[0] = l;
259 			ROUND_00_15(0, a, b, c, d, e, f, g, h);
260 			HOST_c2l(data, l);
261 			T1 = X[1] = l;
262 			ROUND_00_15(1, h, a, b, c, d, e, f, g);
263 			HOST_c2l(data, l);
264 			T1 = X[2] = l;
265 			ROUND_00_15(2, g, h, a, b, c, d, e, f);
266 			HOST_c2l(data, l);
267 			T1 = X[3] = l;
268 			ROUND_00_15(3, f, g, h, a, b, c, d, e);
269 			HOST_c2l(data, l);
270 			T1 = X[4] = l;
271 			ROUND_00_15(4, e, f, g, h, a, b, c, d);
272 			HOST_c2l(data, l);
273 			T1 = X[5] = l;
274 			ROUND_00_15(5, d, e, f, g, h, a, b, c);
275 			HOST_c2l(data, l);
276 			T1 = X[6] = l;
277 			ROUND_00_15(6, c, d, e, f, g, h, a, b);
278 			HOST_c2l(data, l);
279 			T1 = X[7] = l;
280 			ROUND_00_15(7, b, c, d, e, f, g, h, a);
281 			HOST_c2l(data, l);
282 			T1 = X[8] = l;
283 			ROUND_00_15(8, a, b, c, d, e, f, g, h);
284 			HOST_c2l(data, l);
285 			T1 = X[9] = l;
286 			ROUND_00_15(9, h, a, b, c, d, e, f, g);
287 			HOST_c2l(data, l);
288 			T1 = X[10] = l;
289 			ROUND_00_15(10, g, h, a, b, c, d, e, f);
290 			HOST_c2l(data, l);
291 			T1 = X[11] = l;
292 			ROUND_00_15(11, f, g, h, a, b, c, d, e);
293 			HOST_c2l(data, l);
294 			T1 = X[12] = l;
295 			ROUND_00_15(12, e, f, g, h, a, b, c, d);
296 			HOST_c2l(data, l);
297 			T1 = X[13] = l;
298 			ROUND_00_15(13, d, e, f, g, h, a, b, c);
299 			HOST_c2l(data, l);
300 			T1 = X[14] = l;
301 			ROUND_00_15(14, c, d, e, f, g, h, a, b);
302 			HOST_c2l(data, l);
303 			T1 = X[15] = l;
304 			ROUND_00_15(15, b, c, d, e, f, g, h, a);
305 		}
306 
307 		for (i = 16; i < 64; i += 8) {
308 			ROUND_16_63(i + 0, a, b, c, d, e, f, g, h, X);
309 			ROUND_16_63(i + 1, h, a, b, c, d, e, f, g, X);
310 			ROUND_16_63(i + 2, g, h, a, b, c, d, e, f, X);
311 			ROUND_16_63(i + 3, f, g, h, a, b, c, d, e, X);
312 			ROUND_16_63(i + 4, e, f, g, h, a, b, c, d, X);
313 			ROUND_16_63(i + 5, d, e, f, g, h, a, b, c, X);
314 			ROUND_16_63(i + 6, c, d, e, f, g, h, a, b, X);
315 			ROUND_16_63(i + 7, b, c, d, e, f, g, h, a, X);
316 		}
317 
318 		ctx->h[0] += a;
319 		ctx->h[1] += b;
320 		ctx->h[2] += c;
321 		ctx->h[3] += d;
322 		ctx->h[4] += e;
323 		ctx->h[5] += f;
324 		ctx->h[6] += g;
325 		ctx->h[7] += h;
326 	}
327 }
328 
329 #endif
330 #endif /* SHA256_ASM */
331 
332 int
333 SHA224_Init(SHA256_CTX *c)
334 {
335 	memset(c, 0, sizeof(*c));
336 
337 	c->h[0] = 0xc1059ed8UL;
338 	c->h[1] = 0x367cd507UL;
339 	c->h[2] = 0x3070dd17UL;
340 	c->h[3] = 0xf70e5939UL;
341 	c->h[4] = 0xffc00b31UL;
342 	c->h[5] = 0x68581511UL;
343 	c->h[6] = 0x64f98fa7UL;
344 	c->h[7] = 0xbefa4fa4UL;
345 
346 	c->md_len = SHA224_DIGEST_LENGTH;
347 
348 	return 1;
349 }
350 
351 int
352 SHA224_Update(SHA256_CTX *c, const void *data, size_t len)
353 {
354 	return SHA256_Update(c, data, len);
355 }
356 
357 int
358 SHA224_Final(unsigned char *md, SHA256_CTX *c)
359 {
360 	return SHA256_Final(md, c);
361 }
362 
363 unsigned char *
364 SHA224(const unsigned char *d, size_t n, unsigned char *md)
365 {
366 	SHA256_CTX c;
367 	static unsigned char m[SHA224_DIGEST_LENGTH];
368 
369 	if (md == NULL)
370 		md = m;
371 
372 	SHA224_Init(&c);
373 	SHA256_Update(&c, d, n);
374 	SHA256_Final(md, &c);
375 
376 	explicit_bzero(&c, sizeof(c));
377 
378 	return (md);
379 }
380 
381 int
382 SHA256_Init(SHA256_CTX *c)
383 {
384 	memset(c, 0, sizeof(*c));
385 
386 	c->h[0] = 0x6a09e667UL;
387 	c->h[1] = 0xbb67ae85UL;
388 	c->h[2] = 0x3c6ef372UL;
389 	c->h[3] = 0xa54ff53aUL;
390 	c->h[4] = 0x510e527fUL;
391 	c->h[5] = 0x9b05688cUL;
392 	c->h[6] = 0x1f83d9abUL;
393 	c->h[7] = 0x5be0cd19UL;
394 
395 	c->md_len = SHA256_DIGEST_LENGTH;
396 
397 	return 1;
398 }
399 
400 int
401 SHA256_Update(SHA256_CTX *c, const void *data_, size_t len)
402 {
403 	const unsigned char *data = data_;
404 	unsigned char *p;
405 	SHA_LONG l;
406 	size_t n;
407 
408 	if (len == 0)
409 		return 1;
410 
411 	l = (c->Nl + (((SHA_LONG)len) << 3)) & 0xffffffffUL;
412 	/* 95-05-24 eay Fixed a bug with the overflow handling, thanks to
413 	 * Wei Dai <weidai@eskimo.com> for pointing it out. */
414 	if (l < c->Nl) /* overflow */
415 		c->Nh++;
416 	c->Nh += (SHA_LONG)(len >> 29);	/* might cause compiler warning on 16-bit */
417 	c->Nl = l;
418 
419 	n = c->num;
420 	if (n != 0) {
421 		p = (unsigned char *)c->data;
422 
423 		if (len >= SHA_CBLOCK || len + n >= SHA_CBLOCK) {
424 			memcpy(p + n, data, SHA_CBLOCK - n);
425 			sha256_block_data_order(c, p, 1);
426 			n = SHA_CBLOCK - n;
427 			data += n;
428 			len -= n;
429 			c->num = 0;
430 			memset(p, 0, SHA_CBLOCK);	/* keep it zeroed */
431 		} else {
432 			memcpy(p + n, data, len);
433 			c->num += (unsigned int)len;
434 			return 1;
435 		}
436 	}
437 
438 	n = len/SHA_CBLOCK;
439 	if (n > 0) {
440 		sha256_block_data_order(c, data, n);
441 		n *= SHA_CBLOCK;
442 		data += n;
443 		len -= n;
444 	}
445 
446 	if (len != 0) {
447 		p = (unsigned char *)c->data;
448 		c->num = (unsigned int)len;
449 		memcpy(p, data, len);
450 	}
451 	return 1;
452 }
453 
454 void
455 SHA256_Transform(SHA256_CTX *c, const unsigned char *data)
456 {
457 	sha256_block_data_order(c, data, 1);
458 }
459 
460 int
461 SHA256_Final(unsigned char *md, SHA256_CTX *c)
462 {
463 	unsigned char *p = (unsigned char *)c->data;
464 	size_t n = c->num;
465 	unsigned long ll;
466 	unsigned int nn;
467 
468 	p[n] = 0x80; /* there is always room for one */
469 	n++;
470 
471 	if (n > (SHA_CBLOCK - 8)) {
472 		memset(p + n, 0, SHA_CBLOCK - n);
473 		n = 0;
474 		sha256_block_data_order(c, p, 1);
475 	}
476 	memset(p + n, 0, SHA_CBLOCK - 8 - n);
477 
478 	p += SHA_CBLOCK - 8;
479 #if   defined(DATA_ORDER_IS_BIG_ENDIAN)
480 	HOST_l2c(c->Nh, p);
481 	HOST_l2c(c->Nl, p);
482 #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
483 	HOST_l2c(c->Nl, p);
484 	HOST_l2c(c->Nh, p);
485 #endif
486 	p -= SHA_CBLOCK;
487 	sha256_block_data_order(c, p, 1);
488 	c->num = 0;
489 	memset(p, 0, SHA_CBLOCK);
490 
491 	/*
492 	 * Note that FIPS180-2 discusses "Truncation of the Hash Function Output."
493 	 * default: case below covers for it. It's not clear however if it's
494 	 * permitted to truncate to amount of bytes not divisible by 4. I bet not,
495 	 * but if it is, then default: case shall be extended. For reference.
496 	 * Idea behind separate cases for pre-defined lengths is to let the
497 	 * compiler decide if it's appropriate to unroll small loops.
498 	 */
499 	switch (c->md_len) {
500 	case SHA224_DIGEST_LENGTH:
501 		for (nn = 0; nn < SHA224_DIGEST_LENGTH / 4; nn++) {
502 			ll = c->h[nn];
503 			HOST_l2c(ll, md);
504 		}
505 		break;
506 
507 	case SHA256_DIGEST_LENGTH:
508 		for (nn = 0; nn < SHA256_DIGEST_LENGTH / 4; nn++) {
509 			ll = c->h[nn];
510 			HOST_l2c(ll, md);
511 		}
512 		break;
513 
514 	default:
515 		if (c->md_len > SHA256_DIGEST_LENGTH)
516 			return 0;
517 		for (nn = 0; nn < c->md_len / 4; nn++) {
518 			ll = c->h[nn];
519 			HOST_l2c(ll, md);
520 		}
521 		break;
522 	}
523 
524 	return 1;
525 }
526 
527 unsigned char *
528 SHA256(const unsigned char *d, size_t n, unsigned char *md)
529 {
530 	SHA256_CTX c;
531 	static unsigned char m[SHA256_DIGEST_LENGTH];
532 
533 	if (md == NULL)
534 		md = m;
535 
536 	SHA256_Init(&c);
537 	SHA256_Update(&c, d, n);
538 	SHA256_Final(md, &c);
539 
540 	explicit_bzero(&c, sizeof(c));
541 
542 	return (md);
543 }
544 
545 #endif /* OPENSSL_NO_SHA256 */
546