1 /* $OpenBSD: sha256.c,v 1.30 2023/08/11 15:27:28 jsing Exp $ */ 2 /* ==================================================================== 3 * Copyright (c) 1998-2011 The OpenSSL Project. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * 17 * 3. All advertising materials mentioning features or use of this 18 * software must display the following acknowledgment: 19 * "This product includes software developed by the OpenSSL Project 20 * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" 21 * 22 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to 23 * endorse or promote products derived from this software without 24 * prior written permission. For written permission, please contact 25 * openssl-core@openssl.org. 26 * 27 * 5. Products derived from this software may not be called "OpenSSL" 28 * nor may "OpenSSL" appear in their names without prior written 29 * permission of the OpenSSL Project. 30 * 31 * 6. Redistributions of any form whatsoever must retain the following 32 * acknowledgment: 33 * "This product includes software developed by the OpenSSL Project 34 * for use in the OpenSSL Toolkit (http://www.openssl.org/)" 35 * 36 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY 37 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 39 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR 40 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 41 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 42 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 43 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 44 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 45 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 46 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 47 * OF THE POSSIBILITY OF SUCH DAMAGE. 48 * ==================================================================== 49 * 50 * This product includes cryptographic software written by Eric Young 51 * (eay@cryptsoft.com). This product includes software written by Tim 52 * Hudson (tjh@cryptsoft.com). 53 */ 54 55 #include <endian.h> 56 #include <stdlib.h> 57 #include <string.h> 58 59 #include <openssl/opensslconf.h> 60 61 #include <openssl/crypto.h> 62 #include <openssl/sha.h> 63 64 #include "crypto_internal.h" 65 66 #if !defined(OPENSSL_NO_SHA) && !defined(OPENSSL_NO_SHA256) 67 68 /* Ensure that SHA_LONG and uint32_t are equivalent. */ 69 CTASSERT(sizeof(SHA_LONG) == sizeof(uint32_t)); 70 71 #ifdef SHA256_ASM 72 void sha256_block_data_order(SHA256_CTX *ctx, const void *_in, size_t num); 73 #endif 74 75 #ifndef SHA256_ASM 76 static const SHA_LONG K256[64] = { 77 0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL, 78 0x3956c25bUL, 0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL, 79 0xd807aa98UL, 0x12835b01UL, 0x243185beUL, 0x550c7dc3UL, 80 0x72be5d74UL, 0x80deb1feUL, 0x9bdc06a7UL, 0xc19bf174UL, 81 0xe49b69c1UL, 0xefbe4786UL, 0x0fc19dc6UL, 0x240ca1ccUL, 82 0x2de92c6fUL, 0x4a7484aaUL, 0x5cb0a9dcUL, 0x76f988daUL, 83 0x983e5152UL, 0xa831c66dUL, 0xb00327c8UL, 0xbf597fc7UL, 84 0xc6e00bf3UL, 0xd5a79147UL, 0x06ca6351UL, 0x14292967UL, 85 0x27b70a85UL, 0x2e1b2138UL, 0x4d2c6dfcUL, 0x53380d13UL, 86 0x650a7354UL, 0x766a0abbUL, 0x81c2c92eUL, 0x92722c85UL, 87 0xa2bfe8a1UL, 0xa81a664bUL, 0xc24b8b70UL, 0xc76c51a3UL, 88 0xd192e819UL, 0xd6990624UL, 0xf40e3585UL, 0x106aa070UL, 89 0x19a4c116UL, 0x1e376c08UL, 0x2748774cUL, 0x34b0bcb5UL, 90 0x391c0cb3UL, 0x4ed8aa4aUL, 0x5b9cca4fUL, 0x682e6ff3UL, 91 0x748f82eeUL, 0x78a5636fUL, 0x84c87814UL, 0x8cc70208UL, 92 0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL, 93 }; 94 95 static inline SHA_LONG 96 Sigma0(SHA_LONG x) 97 { 98 return crypto_ror_u32(x, 2) ^ crypto_ror_u32(x, 13) ^ 99 crypto_ror_u32(x, 22); 100 } 101 102 static inline SHA_LONG 103 Sigma1(SHA_LONG x) 104 { 105 return crypto_ror_u32(x, 6) ^ crypto_ror_u32(x, 11) ^ 106 crypto_ror_u32(x, 25); 107 } 108 109 static inline SHA_LONG 110 sigma0(SHA_LONG x) 111 { 112 return crypto_ror_u32(x, 7) ^ crypto_ror_u32(x, 18) ^ (x >> 3); 113 } 114 115 static inline SHA_LONG 116 sigma1(SHA_LONG x) 117 { 118 return crypto_ror_u32(x, 17) ^ crypto_ror_u32(x, 19) ^ (x >> 10); 119 } 120 121 static inline SHA_LONG 122 Ch(SHA_LONG x, SHA_LONG y, SHA_LONG z) 123 { 124 return (x & y) ^ (~x & z); 125 } 126 127 static inline SHA_LONG 128 Maj(SHA_LONG x, SHA_LONG y, SHA_LONG z) 129 { 130 return (x & y) ^ (x & z) ^ (y & z); 131 } 132 133 static inline void 134 sha256_msg_schedule_update(SHA_LONG *W0, SHA_LONG W1, 135 SHA_LONG W9, SHA_LONG W14) 136 { 137 *W0 = sigma1(W14) + W9 + sigma0(W1) + *W0; 138 } 139 140 static inline void 141 sha256_round(SHA_LONG *a, SHA_LONG *b, SHA_LONG *c, SHA_LONG *d, 142 SHA_LONG *e, SHA_LONG *f, SHA_LONG *g, SHA_LONG *h, 143 SHA_LONG Kt, SHA_LONG Wt) 144 { 145 SHA_LONG T1, T2; 146 147 T1 = *h + Sigma1(*e) + Ch(*e, *f, *g) + Kt + Wt; 148 T2 = Sigma0(*a) + Maj(*a, *b, *c); 149 150 *h = *g; 151 *g = *f; 152 *f = *e; 153 *e = *d + T1; 154 *d = *c; 155 *c = *b; 156 *b = *a; 157 *a = T1 + T2; 158 } 159 160 static void 161 sha256_block_data_order(SHA256_CTX *ctx, const void *_in, size_t num) 162 { 163 const uint8_t *in = _in; 164 const SHA_LONG *in32; 165 SHA_LONG a, b, c, d, e, f, g, h; 166 SHA_LONG X[16]; 167 int i; 168 169 while (num--) { 170 a = ctx->h[0]; 171 b = ctx->h[1]; 172 c = ctx->h[2]; 173 d = ctx->h[3]; 174 e = ctx->h[4]; 175 f = ctx->h[5]; 176 g = ctx->h[6]; 177 h = ctx->h[7]; 178 179 if ((size_t)in % 4 == 0) { 180 /* Input is 32 bit aligned. */ 181 in32 = (const SHA_LONG *)in; 182 X[0] = be32toh(in32[0]); 183 X[1] = be32toh(in32[1]); 184 X[2] = be32toh(in32[2]); 185 X[3] = be32toh(in32[3]); 186 X[4] = be32toh(in32[4]); 187 X[5] = be32toh(in32[5]); 188 X[6] = be32toh(in32[6]); 189 X[7] = be32toh(in32[7]); 190 X[8] = be32toh(in32[8]); 191 X[9] = be32toh(in32[9]); 192 X[10] = be32toh(in32[10]); 193 X[11] = be32toh(in32[11]); 194 X[12] = be32toh(in32[12]); 195 X[13] = be32toh(in32[13]); 196 X[14] = be32toh(in32[14]); 197 X[15] = be32toh(in32[15]); 198 } else { 199 /* Input is not 32 bit aligned. */ 200 X[0] = crypto_load_be32toh(&in[0 * 4]); 201 X[1] = crypto_load_be32toh(&in[1 * 4]); 202 X[2] = crypto_load_be32toh(&in[2 * 4]); 203 X[3] = crypto_load_be32toh(&in[3 * 4]); 204 X[4] = crypto_load_be32toh(&in[4 * 4]); 205 X[5] = crypto_load_be32toh(&in[5 * 4]); 206 X[6] = crypto_load_be32toh(&in[6 * 4]); 207 X[7] = crypto_load_be32toh(&in[7 * 4]); 208 X[8] = crypto_load_be32toh(&in[8 * 4]); 209 X[9] = crypto_load_be32toh(&in[9 * 4]); 210 X[10] = crypto_load_be32toh(&in[10 * 4]); 211 X[11] = crypto_load_be32toh(&in[11 * 4]); 212 X[12] = crypto_load_be32toh(&in[12 * 4]); 213 X[13] = crypto_load_be32toh(&in[13 * 4]); 214 X[14] = crypto_load_be32toh(&in[14 * 4]); 215 X[15] = crypto_load_be32toh(&in[15 * 4]); 216 } 217 in += SHA256_CBLOCK; 218 219 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[0], X[0]); 220 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[1], X[1]); 221 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[2], X[2]); 222 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[3], X[3]); 223 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[4], X[4]); 224 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[5], X[5]); 225 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[6], X[6]); 226 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[7], X[7]); 227 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[8], X[8]); 228 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[9], X[9]); 229 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[10], X[10]); 230 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[11], X[11]); 231 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[12], X[12]); 232 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[13], X[13]); 233 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[14], X[14]); 234 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[15], X[15]); 235 236 for (i = 16; i < 64; i += 16) { 237 sha256_msg_schedule_update(&X[0], X[1], X[9], X[14]); 238 sha256_msg_schedule_update(&X[1], X[2], X[10], X[15]); 239 sha256_msg_schedule_update(&X[2], X[3], X[11], X[0]); 240 sha256_msg_schedule_update(&X[3], X[4], X[12], X[1]); 241 sha256_msg_schedule_update(&X[4], X[5], X[13], X[2]); 242 sha256_msg_schedule_update(&X[5], X[6], X[14], X[3]); 243 sha256_msg_schedule_update(&X[6], X[7], X[15], X[4]); 244 sha256_msg_schedule_update(&X[7], X[8], X[0], X[5]); 245 sha256_msg_schedule_update(&X[8], X[9], X[1], X[6]); 246 sha256_msg_schedule_update(&X[9], X[10], X[2], X[7]); 247 sha256_msg_schedule_update(&X[10], X[11], X[3], X[8]); 248 sha256_msg_schedule_update(&X[11], X[12], X[4], X[9]); 249 sha256_msg_schedule_update(&X[12], X[13], X[5], X[10]); 250 sha256_msg_schedule_update(&X[13], X[14], X[6], X[11]); 251 sha256_msg_schedule_update(&X[14], X[15], X[7], X[12]); 252 sha256_msg_schedule_update(&X[15], X[0], X[8], X[13]); 253 254 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 0], X[0]); 255 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 1], X[1]); 256 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 2], X[2]); 257 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 3], X[3]); 258 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 4], X[4]); 259 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 5], X[5]); 260 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 6], X[6]); 261 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 7], X[7]); 262 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 8], X[8]); 263 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 9], X[9]); 264 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 10], X[10]); 265 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 11], X[11]); 266 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 12], X[12]); 267 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 13], X[13]); 268 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 14], X[14]); 269 sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 15], X[15]); 270 } 271 272 ctx->h[0] += a; 273 ctx->h[1] += b; 274 ctx->h[2] += c; 275 ctx->h[3] += d; 276 ctx->h[4] += e; 277 ctx->h[5] += f; 278 ctx->h[6] += g; 279 ctx->h[7] += h; 280 } 281 } 282 #endif /* SHA256_ASM */ 283 284 int 285 SHA224_Init(SHA256_CTX *c) 286 { 287 memset(c, 0, sizeof(*c)); 288 289 c->h[0] = 0xc1059ed8UL; 290 c->h[1] = 0x367cd507UL; 291 c->h[2] = 0x3070dd17UL; 292 c->h[3] = 0xf70e5939UL; 293 c->h[4] = 0xffc00b31UL; 294 c->h[5] = 0x68581511UL; 295 c->h[6] = 0x64f98fa7UL; 296 c->h[7] = 0xbefa4fa4UL; 297 298 c->md_len = SHA224_DIGEST_LENGTH; 299 300 return 1; 301 } 302 LCRYPTO_ALIAS(SHA224_Init); 303 304 int 305 SHA224_Update(SHA256_CTX *c, const void *data, size_t len) 306 { 307 return SHA256_Update(c, data, len); 308 } 309 LCRYPTO_ALIAS(SHA224_Update); 310 311 int 312 SHA224_Final(unsigned char *md, SHA256_CTX *c) 313 { 314 return SHA256_Final(md, c); 315 } 316 LCRYPTO_ALIAS(SHA224_Final); 317 318 unsigned char * 319 SHA224(const unsigned char *d, size_t n, unsigned char *md) 320 { 321 SHA256_CTX c; 322 static unsigned char m[SHA224_DIGEST_LENGTH]; 323 324 if (md == NULL) 325 md = m; 326 327 SHA224_Init(&c); 328 SHA256_Update(&c, d, n); 329 SHA256_Final(md, &c); 330 331 explicit_bzero(&c, sizeof(c)); 332 333 return (md); 334 } 335 LCRYPTO_ALIAS(SHA224); 336 337 int 338 SHA256_Init(SHA256_CTX *c) 339 { 340 memset(c, 0, sizeof(*c)); 341 342 c->h[0] = 0x6a09e667UL; 343 c->h[1] = 0xbb67ae85UL; 344 c->h[2] = 0x3c6ef372UL; 345 c->h[3] = 0xa54ff53aUL; 346 c->h[4] = 0x510e527fUL; 347 c->h[5] = 0x9b05688cUL; 348 c->h[6] = 0x1f83d9abUL; 349 c->h[7] = 0x5be0cd19UL; 350 351 c->md_len = SHA256_DIGEST_LENGTH; 352 353 return 1; 354 } 355 LCRYPTO_ALIAS(SHA256_Init); 356 357 int 358 SHA256_Update(SHA256_CTX *c, const void *data_, size_t len) 359 { 360 const unsigned char *data = data_; 361 unsigned char *p; 362 SHA_LONG l; 363 size_t n; 364 365 if (len == 0) 366 return 1; 367 368 l = (c->Nl + (((SHA_LONG)len) << 3)) & 0xffffffffUL; 369 /* 95-05-24 eay Fixed a bug with the overflow handling, thanks to 370 * Wei Dai <weidai@eskimo.com> for pointing it out. */ 371 if (l < c->Nl) /* overflow */ 372 c->Nh++; 373 c->Nh += (SHA_LONG)(len >> 29); /* might cause compiler warning on 16-bit */ 374 c->Nl = l; 375 376 n = c->num; 377 if (n != 0) { 378 p = (unsigned char *)c->data; 379 380 if (len >= SHA_CBLOCK || len + n >= SHA_CBLOCK) { 381 memcpy(p + n, data, SHA_CBLOCK - n); 382 sha256_block_data_order(c, p, 1); 383 n = SHA_CBLOCK - n; 384 data += n; 385 len -= n; 386 c->num = 0; 387 memset(p, 0, SHA_CBLOCK); /* keep it zeroed */ 388 } else { 389 memcpy(p + n, data, len); 390 c->num += (unsigned int)len; 391 return 1; 392 } 393 } 394 395 n = len/SHA_CBLOCK; 396 if (n > 0) { 397 sha256_block_data_order(c, data, n); 398 n *= SHA_CBLOCK; 399 data += n; 400 len -= n; 401 } 402 403 if (len != 0) { 404 p = (unsigned char *)c->data; 405 c->num = (unsigned int)len; 406 memcpy(p, data, len); 407 } 408 return 1; 409 } 410 LCRYPTO_ALIAS(SHA256_Update); 411 412 void 413 SHA256_Transform(SHA256_CTX *c, const unsigned char *data) 414 { 415 sha256_block_data_order(c, data, 1); 416 } 417 LCRYPTO_ALIAS(SHA256_Transform); 418 419 int 420 SHA256_Final(unsigned char *md, SHA256_CTX *c) 421 { 422 unsigned char *p = (unsigned char *)c->data; 423 size_t n = c->num; 424 unsigned int nn; 425 426 p[n] = 0x80; /* there is always room for one */ 427 n++; 428 429 if (n > (SHA_CBLOCK - 8)) { 430 memset(p + n, 0, SHA_CBLOCK - n); 431 n = 0; 432 sha256_block_data_order(c, p, 1); 433 } 434 435 memset(p + n, 0, SHA_CBLOCK - 8 - n); 436 c->data[SHA_LBLOCK - 2] = htobe32(c->Nh); 437 c->data[SHA_LBLOCK - 1] = htobe32(c->Nl); 438 439 sha256_block_data_order(c, p, 1); 440 c->num = 0; 441 memset(p, 0, SHA_CBLOCK); 442 443 /* 444 * Note that FIPS180-2 discusses "Truncation of the Hash Function Output." 445 * default: case below covers for it. It's not clear however if it's 446 * permitted to truncate to amount of bytes not divisible by 4. I bet not, 447 * but if it is, then default: case shall be extended. For reference. 448 * Idea behind separate cases for pre-defined lengths is to let the 449 * compiler decide if it's appropriate to unroll small loops. 450 */ 451 switch (c->md_len) { 452 case SHA224_DIGEST_LENGTH: 453 for (nn = 0; nn < SHA224_DIGEST_LENGTH / 4; nn++) { 454 crypto_store_htobe32(md, c->h[nn]); 455 md += 4; 456 } 457 break; 458 459 case SHA256_DIGEST_LENGTH: 460 for (nn = 0; nn < SHA256_DIGEST_LENGTH / 4; nn++) { 461 crypto_store_htobe32(md, c->h[nn]); 462 md += 4; 463 } 464 break; 465 466 default: 467 if (c->md_len > SHA256_DIGEST_LENGTH) 468 return 0; 469 for (nn = 0; nn < c->md_len / 4; nn++) { 470 crypto_store_htobe32(md, c->h[nn]); 471 md += 4; 472 } 473 break; 474 } 475 476 return 1; 477 } 478 LCRYPTO_ALIAS(SHA256_Final); 479 480 unsigned char * 481 SHA256(const unsigned char *d, size_t n, unsigned char *md) 482 { 483 SHA256_CTX c; 484 static unsigned char m[SHA256_DIGEST_LENGTH]; 485 486 if (md == NULL) 487 md = m; 488 489 SHA256_Init(&c); 490 SHA256_Update(&c, d, n); 491 SHA256_Final(md, &c); 492 493 explicit_bzero(&c, sizeof(c)); 494 495 return (md); 496 } 497 LCRYPTO_ALIAS(SHA256); 498 499 #endif /* OPENSSL_NO_SHA256 */ 500