1 /* $NetBSD: cryptosoft.c,v 1.26 2010/08/02 19:59:35 jakllsch Exp $ */ 2 /* $FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.2.2.1 2002/11/21 23:34:23 sam Exp $ */ 3 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ 4 5 /* 6 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 7 * 8 * This code was written by Angelos D. Keromytis in Athens, Greece, in 9 * February 2000. Network Security Technologies Inc. (NSTI) kindly 10 * supported the development of this code. 11 * 12 * Copyright (c) 2000, 2001 Angelos D. Keromytis 13 * 14 * Permission to use, copy, and modify this software with or without fee 15 * is hereby granted, provided that this entire notice is included in 16 * all source code copies of any software which is or includes a copy or 17 * modification of this software. 18 * 19 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 20 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 21 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 22 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 23 * PURPOSE. 24 */ 25 26 #include <sys/cdefs.h> 27 __KERNEL_RCSID(0, "$NetBSD: cryptosoft.c,v 1.26 2010/08/02 19:59:35 jakllsch Exp $"); 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/malloc.h> 32 #include <sys/mbuf.h> 33 #include <sys/sysctl.h> 34 #include <sys/errno.h> 35 36 #include "opt_ocf.h" 37 #include <opencrypto/cryptodev.h> 38 #include <opencrypto/cryptosoft.h> 39 #include <opencrypto/xform.h> 40 41 #include <opencrypto/cryptosoft_xform.c> 42 43 union authctx { 44 MD5_CTX md5ctx; 45 SHA1_CTX sha1ctx; 46 RMD160_CTX rmd160ctx; 47 SHA256_CTX sha256ctx; 48 SHA384_CTX sha384ctx; 49 SHA512_CTX sha512ctx; 50 }; 51 52 struct swcr_data **swcr_sessions = NULL; 53 u_int32_t swcr_sesnum = 0; 54 int32_t swcr_id = -1; 55 56 #define COPYBACK(x, a, b, c, d) \ 57 (x) == CRYPTO_BUF_MBUF ? m_copyback((struct mbuf *)a,b,c,d) \ 58 : cuio_copyback((struct uio *)a,b,c,d) 59 #define COPYDATA(x, a, b, c, d) \ 60 (x) == CRYPTO_BUF_MBUF ? m_copydata((struct mbuf *)a,b,c,d) \ 61 : cuio_copydata((struct uio *)a,b,c,d) 62 63 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, void *, int); 64 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, void *, int); 65 static int swcr_process(void *, struct cryptop *, int); 66 static int swcr_newsession(void *, u_int32_t *, struct cryptoini *); 67 static int swcr_freesession(void *, u_int64_t); 68 69 /* 70 * Apply a symmetric encryption/decryption algorithm. 71 */ 72 static int 73 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, void *bufv, 74 int outtype) 75 { 76 char *buf = bufv; 77 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat; 78 unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN]; 79 const struct swcr_enc_xform *exf; 80 int i, k, j, blks; 81 int count, ind; 82 83 exf = sw->sw_exf; 84 blks = exf->enc_xform->blocksize; 85 86 /* Check for non-padded data */ 87 if (crd->crd_len % blks) 88 return EINVAL; 89 90 /* Initialize the IV */ 91 if (crd->crd_flags & CRD_F_ENCRYPT) { 92 /* IV explicitly provided ? */ 93 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 94 memcpy(iv, crd->crd_iv, blks); 95 else { 96 /* Get random IV */ 97 for (i = 0; 98 i + sizeof (u_int32_t) < EALG_MAX_BLOCK_LEN; 99 i += sizeof (u_int32_t)) { 100 u_int32_t temp = arc4random(); 101 102 memcpy(iv + i, &temp, sizeof(u_int32_t)); 103 } 104 /* 105 * What if the block size is not a multiple 106 * of sizeof (u_int32_t), which is the size of 107 * what arc4random() returns ? 108 */ 109 if (EALG_MAX_BLOCK_LEN % sizeof (u_int32_t) != 0) { 110 u_int32_t temp = arc4random(); 111 112 bcopy (&temp, iv + i, 113 EALG_MAX_BLOCK_LEN - i); 114 } 115 } 116 117 /* Do we need to write the IV */ 118 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) { 119 COPYBACK(outtype, buf, crd->crd_inject, blks, iv); 120 } 121 122 } else { /* Decryption */ 123 /* IV explicitly provided ? */ 124 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 125 memcpy(iv, crd->crd_iv, blks); 126 else { 127 /* Get IV off buf */ 128 COPYDATA(outtype, buf, crd->crd_inject, blks, iv); 129 } 130 } 131 132 ivp = iv; 133 134 if (outtype == CRYPTO_BUF_CONTIG) { 135 if (crd->crd_flags & CRD_F_ENCRYPT) { 136 for (i = crd->crd_skip; 137 i < crd->crd_skip + crd->crd_len; i += blks) { 138 /* XOR with the IV/previous block, as appropriate. */ 139 if (i == crd->crd_skip) 140 for (k = 0; k < blks; k++) 141 buf[i + k] ^= ivp[k]; 142 else 143 for (k = 0; k < blks; k++) 144 buf[i + k] ^= buf[i + k - blks]; 145 exf->encrypt(sw->sw_kschedule, buf + i); 146 } 147 } else { /* Decrypt */ 148 /* 149 * Start at the end, so we don't need to keep the encrypted 150 * block as the IV for the next block. 151 */ 152 for (i = crd->crd_skip + crd->crd_len - blks; 153 i >= crd->crd_skip; i -= blks) { 154 exf->decrypt(sw->sw_kschedule, buf + i); 155 156 /* XOR with the IV/previous block, as appropriate */ 157 if (i == crd->crd_skip) 158 for (k = 0; k < blks; k++) 159 buf[i + k] ^= ivp[k]; 160 else 161 for (k = 0; k < blks; k++) 162 buf[i + k] ^= buf[i + k - blks]; 163 } 164 } 165 166 return 0; 167 } else if (outtype == CRYPTO_BUF_MBUF) { 168 struct mbuf *m = (struct mbuf *) buf; 169 170 /* Find beginning of data */ 171 m = m_getptr(m, crd->crd_skip, &k); 172 if (m == NULL) 173 return EINVAL; 174 175 i = crd->crd_len; 176 177 while (i > 0) { 178 /* 179 * If there's insufficient data at the end of 180 * an mbuf, we have to do some copying. 181 */ 182 if (m->m_len < k + blks && m->m_len != k) { 183 m_copydata(m, k, blks, blk); 184 185 /* Actual encryption/decryption */ 186 if (crd->crd_flags & CRD_F_ENCRYPT) { 187 /* XOR with previous block */ 188 for (j = 0; j < blks; j++) 189 blk[j] ^= ivp[j]; 190 191 exf->encrypt(sw->sw_kschedule, blk); 192 193 /* 194 * Keep encrypted block for XOR'ing 195 * with next block 196 */ 197 memcpy(iv, blk, blks); 198 ivp = iv; 199 } else { /* decrypt */ 200 /* 201 * Keep encrypted block for XOR'ing 202 * with next block 203 */ 204 if (ivp == iv) 205 memcpy(piv, blk, blks); 206 else 207 memcpy(iv, blk, blks); 208 209 exf->decrypt(sw->sw_kschedule, blk); 210 211 /* XOR with previous block */ 212 for (j = 0; j < blks; j++) 213 blk[j] ^= ivp[j]; 214 215 if (ivp == iv) 216 memcpy(iv, piv, blks); 217 else 218 ivp = iv; 219 } 220 221 /* Copy back decrypted block */ 222 m_copyback(m, k, blks, blk); 223 224 /* Advance pointer */ 225 m = m_getptr(m, k + blks, &k); 226 if (m == NULL) 227 return EINVAL; 228 229 i -= blks; 230 231 /* Could be done... */ 232 if (i == 0) 233 break; 234 } 235 236 /* Skip possibly empty mbufs */ 237 if (k == m->m_len) { 238 for (m = m->m_next; m && m->m_len == 0; 239 m = m->m_next) 240 ; 241 k = 0; 242 } 243 244 /* Sanity check */ 245 if (m == NULL) 246 return EINVAL; 247 248 /* 249 * Warning: idat may point to garbage here, but 250 * we only use it in the while() loop, only if 251 * there are indeed enough data. 252 */ 253 idat = mtod(m, unsigned char *) + k; 254 255 while (m->m_len >= k + blks && i > 0) { 256 if (crd->crd_flags & CRD_F_ENCRYPT) { 257 /* XOR with previous block/IV */ 258 for (j = 0; j < blks; j++) 259 idat[j] ^= ivp[j]; 260 261 exf->encrypt(sw->sw_kschedule, idat); 262 ivp = idat; 263 } else { /* decrypt */ 264 /* 265 * Keep encrypted block to be used 266 * in next block's processing. 267 */ 268 if (ivp == iv) 269 memcpy(piv, idat, blks); 270 else 271 memcpy(iv, idat, blks); 272 273 exf->decrypt(sw->sw_kschedule, idat); 274 275 /* XOR with previous block/IV */ 276 for (j = 0; j < blks; j++) 277 idat[j] ^= ivp[j]; 278 279 if (ivp == iv) 280 memcpy(iv, piv, blks); 281 else 282 ivp = iv; 283 } 284 285 idat += blks; 286 k += blks; 287 i -= blks; 288 } 289 } 290 291 return 0; /* Done with mbuf encryption/decryption */ 292 } else if (outtype == CRYPTO_BUF_IOV) { 293 struct uio *uio = (struct uio *) buf; 294 295 /* Find beginning of data */ 296 count = crd->crd_skip; 297 ind = cuio_getptr(uio, count, &k); 298 if (ind == -1) 299 return EINVAL; 300 301 i = crd->crd_len; 302 303 while (i > 0) { 304 /* 305 * If there's insufficient data at the end, 306 * we have to do some copying. 307 */ 308 if (uio->uio_iov[ind].iov_len < k + blks && 309 uio->uio_iov[ind].iov_len != k) { 310 cuio_copydata(uio, k, blks, blk); 311 312 /* Actual encryption/decryption */ 313 if (crd->crd_flags & CRD_F_ENCRYPT) { 314 /* XOR with previous block */ 315 for (j = 0; j < blks; j++) 316 blk[j] ^= ivp[j]; 317 318 exf->encrypt(sw->sw_kschedule, blk); 319 320 /* 321 * Keep encrypted block for XOR'ing 322 * with next block 323 */ 324 memcpy(iv, blk, blks); 325 ivp = iv; 326 } else { /* decrypt */ 327 /* 328 * Keep encrypted block for XOR'ing 329 * with next block 330 */ 331 if (ivp == iv) 332 memcpy(piv, blk, blks); 333 else 334 memcpy(iv, blk, blks); 335 336 exf->decrypt(sw->sw_kschedule, blk); 337 338 /* XOR with previous block */ 339 for (j = 0; j < blks; j++) 340 blk[j] ^= ivp[j]; 341 342 if (ivp == iv) 343 memcpy(iv, piv, blks); 344 else 345 ivp = iv; 346 } 347 348 /* Copy back decrypted block */ 349 cuio_copyback(uio, k, blks, blk); 350 351 count += blks; 352 353 /* Advance pointer */ 354 ind = cuio_getptr(uio, count, &k); 355 if (ind == -1) 356 return (EINVAL); 357 358 i -= blks; 359 360 /* Could be done... */ 361 if (i == 0) 362 break; 363 } 364 365 /* 366 * Warning: idat may point to garbage here, but 367 * we only use it in the while() loop, only if 368 * there are indeed enough data. 369 */ 370 idat = ((char *)uio->uio_iov[ind].iov_base) + k; 371 372 while (uio->uio_iov[ind].iov_len >= k + blks && 373 i > 0) { 374 if (crd->crd_flags & CRD_F_ENCRYPT) { 375 /* XOR with previous block/IV */ 376 for (j = 0; j < blks; j++) 377 idat[j] ^= ivp[j]; 378 379 exf->encrypt(sw->sw_kschedule, idat); 380 ivp = idat; 381 } else { /* decrypt */ 382 /* 383 * Keep encrypted block to be used 384 * in next block's processing. 385 */ 386 if (ivp == iv) 387 memcpy(piv, idat, blks); 388 else 389 memcpy(iv, idat, blks); 390 391 exf->decrypt(sw->sw_kschedule, idat); 392 393 /* XOR with previous block/IV */ 394 for (j = 0; j < blks; j++) 395 idat[j] ^= ivp[j]; 396 397 if (ivp == iv) 398 memcpy(iv, piv, blks); 399 else 400 ivp = iv; 401 } 402 403 idat += blks; 404 count += blks; 405 k += blks; 406 i -= blks; 407 } 408 } 409 return 0; /* Done with mbuf encryption/decryption */ 410 } 411 412 /* Unreachable */ 413 return EINVAL; 414 } 415 416 /* 417 * Compute keyed-hash authenticator. 418 */ 419 int 420 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd, 421 struct swcr_data *sw, void *buf, int outtype) 422 { 423 unsigned char aalg[AALG_MAX_RESULT_LEN]; 424 const struct swcr_auth_hash *axf; 425 union authctx ctx; 426 int err; 427 428 if (sw->sw_ictx == 0) 429 return EINVAL; 430 431 axf = sw->sw_axf; 432 433 memcpy(&ctx, sw->sw_ictx, axf->auth_hash->ctxsize); 434 435 switch (outtype) { 436 case CRYPTO_BUF_CONTIG: 437 axf->Update(&ctx, (char *)buf + crd->crd_skip, crd->crd_len); 438 break; 439 case CRYPTO_BUF_MBUF: 440 err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len, 441 (int (*)(void*, void *, unsigned int)) axf->Update, 442 (void *) &ctx); 443 if (err) 444 return err; 445 break; 446 case CRYPTO_BUF_IOV: 447 err = cuio_apply((struct uio *) buf, crd->crd_skip, 448 crd->crd_len, 449 (int (*)(void *, void *, unsigned int)) axf->Update, 450 (void *) &ctx); 451 if (err) { 452 return err; 453 } 454 break; 455 default: 456 return EINVAL; 457 } 458 459 switch (sw->sw_alg) { 460 case CRYPTO_MD5_HMAC: 461 case CRYPTO_MD5_HMAC_96: 462 case CRYPTO_SHA1_HMAC: 463 case CRYPTO_SHA1_HMAC_96: 464 case CRYPTO_SHA2_HMAC: 465 case CRYPTO_RIPEMD160_HMAC: 466 case CRYPTO_RIPEMD160_HMAC_96: 467 if (sw->sw_octx == NULL) 468 return EINVAL; 469 470 axf->Final(aalg, &ctx); 471 memcpy(&ctx, sw->sw_octx, axf->auth_hash->ctxsize); 472 axf->Update(&ctx, aalg, axf->auth_hash->hashsize); 473 axf->Final(aalg, &ctx); 474 break; 475 476 case CRYPTO_MD5_KPDK: 477 case CRYPTO_SHA1_KPDK: 478 if (sw->sw_octx == NULL) 479 return EINVAL; 480 481 axf->Update(&ctx, sw->sw_octx, sw->sw_klen); 482 axf->Final(aalg, &ctx); 483 break; 484 485 case CRYPTO_NULL_HMAC: 486 case CRYPTO_MD5: 487 case CRYPTO_SHA1: 488 axf->Final(aalg, &ctx); 489 break; 490 } 491 492 /* Inject the authentication data */ 493 switch (outtype) { 494 case CRYPTO_BUF_CONTIG: 495 (void)memcpy((char *)buf + crd->crd_inject, aalg, 496 axf->auth_hash->authsize); 497 break; 498 case CRYPTO_BUF_MBUF: 499 m_copyback((struct mbuf *) buf, crd->crd_inject, 500 axf->auth_hash->authsize, aalg); 501 break; 502 case CRYPTO_BUF_IOV: 503 memcpy(crp->crp_mac, aalg, axf->auth_hash->authsize); 504 break; 505 default: 506 return EINVAL; 507 } 508 return 0; 509 } 510 511 /* 512 * Apply a compression/decompression algorithm 513 */ 514 static int 515 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw, 516 void *buf, int outtype) 517 { 518 u_int8_t *data, *out; 519 const struct swcr_comp_algo *cxf; 520 int adj; 521 u_int32_t result; 522 523 cxf = sw->sw_cxf; 524 525 /* We must handle the whole buffer of data in one time 526 * then if there is not all the data in the mbuf, we must 527 * copy in a buffer. 528 */ 529 530 data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT); 531 if (data == NULL) 532 return (EINVAL); 533 COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data); 534 535 if (crd->crd_flags & CRD_F_COMP) 536 result = cxf->compress(data, crd->crd_len, &out); 537 else 538 result = cxf->decompress(data, crd->crd_len, &out); 539 540 free(data, M_CRYPTO_DATA); 541 if (result == 0) 542 return EINVAL; 543 544 /* Copy back the (de)compressed data. m_copyback is 545 * extending the mbuf as necessary. 546 */ 547 sw->sw_size = result; 548 /* Check the compressed size when doing compression */ 549 if (crd->crd_flags & CRD_F_COMP) { 550 if (result > crd->crd_len) { 551 /* Compression was useless, we lost time */ 552 free(out, M_CRYPTO_DATA); 553 return 0; 554 } 555 } 556 557 COPYBACK(outtype, buf, crd->crd_skip, result, out); 558 if (result < crd->crd_len) { 559 adj = result - crd->crd_len; 560 if (outtype == CRYPTO_BUF_MBUF) { 561 adj = result - crd->crd_len; 562 m_adj((struct mbuf *)buf, adj); 563 } 564 /* Don't adjust the iov_len, it breaks the kmem_free */ 565 } 566 free(out, M_CRYPTO_DATA); 567 return 0; 568 } 569 570 /* 571 * Generate a new software session. 572 */ 573 static int 574 swcr_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri) 575 { 576 struct swcr_data **swd; 577 const struct swcr_auth_hash *axf; 578 const struct swcr_enc_xform *txf; 579 const struct swcr_comp_algo *cxf; 580 u_int32_t i; 581 int k, error; 582 583 if (sid == NULL || cri == NULL) 584 return EINVAL; 585 586 if (swcr_sessions) { 587 for (i = 1; i < swcr_sesnum; i++) 588 if (swcr_sessions[i] == NULL) 589 break; 590 } else 591 i = 1; /* NB: to silence compiler warning */ 592 593 if (swcr_sessions == NULL || i == swcr_sesnum) { 594 if (swcr_sessions == NULL) { 595 i = 1; /* We leave swcr_sessions[0] empty */ 596 swcr_sesnum = CRYPTO_SW_SESSIONS; 597 } else 598 swcr_sesnum *= 2; 599 600 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *), 601 M_CRYPTO_DATA, M_NOWAIT); 602 if (swd == NULL) { 603 /* Reset session number */ 604 if (swcr_sesnum == CRYPTO_SW_SESSIONS) 605 swcr_sesnum = 0; 606 else 607 swcr_sesnum /= 2; 608 return ENOBUFS; 609 } 610 611 memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *)); 612 613 /* Copy existing sessions */ 614 if (swcr_sessions) { 615 memcpy(swd, swcr_sessions, 616 (swcr_sesnum / 2) * sizeof(struct swcr_data *)); 617 free(swcr_sessions, M_CRYPTO_DATA); 618 } 619 620 swcr_sessions = swd; 621 } 622 623 swd = &swcr_sessions[i]; 624 *sid = i; 625 626 while (cri) { 627 *swd = malloc(sizeof **swd, M_CRYPTO_DATA, M_NOWAIT); 628 if (*swd == NULL) { 629 swcr_freesession(NULL, i); 630 return ENOBUFS; 631 } 632 memset(*swd, 0, sizeof(struct swcr_data)); 633 634 switch (cri->cri_alg) { 635 case CRYPTO_DES_CBC: 636 txf = &swcr_enc_xform_des; 637 goto enccommon; 638 case CRYPTO_3DES_CBC: 639 txf = &swcr_enc_xform_3des; 640 goto enccommon; 641 case CRYPTO_BLF_CBC: 642 txf = &swcr_enc_xform_blf; 643 goto enccommon; 644 case CRYPTO_CAST_CBC: 645 txf = &swcr_enc_xform_cast5; 646 goto enccommon; 647 case CRYPTO_SKIPJACK_CBC: 648 txf = &swcr_enc_xform_skipjack; 649 goto enccommon; 650 case CRYPTO_RIJNDAEL128_CBC: 651 txf = &swcr_enc_xform_rijndael128; 652 goto enccommon; 653 case CRYPTO_NULL_CBC: 654 txf = &swcr_enc_xform_null; 655 goto enccommon; 656 enccommon: 657 error = txf->setkey(&((*swd)->sw_kschedule), 658 cri->cri_key, cri->cri_klen / 8); 659 if (error) { 660 swcr_freesession(NULL, i); 661 return error; 662 } 663 (*swd)->sw_exf = txf; 664 break; 665 666 case CRYPTO_MD5_HMAC: 667 axf = &swcr_auth_hash_hmac_md5; 668 goto authcommon; 669 case CRYPTO_MD5_HMAC_96: 670 axf = &swcr_auth_hash_hmac_md5_96; 671 goto authcommon; 672 case CRYPTO_SHA1_HMAC: 673 axf = &swcr_auth_hash_hmac_sha1; 674 goto authcommon; 675 case CRYPTO_SHA1_HMAC_96: 676 axf = &swcr_auth_hash_hmac_sha1_96; 677 goto authcommon; 678 case CRYPTO_SHA2_HMAC: 679 if (cri->cri_klen == 256) 680 axf = &swcr_auth_hash_hmac_sha2_256; 681 else if (cri->cri_klen == 384) 682 axf = &swcr_auth_hash_hmac_sha2_384; 683 else if (cri->cri_klen == 512) 684 axf = &swcr_auth_hash_hmac_sha2_512; 685 else { 686 swcr_freesession(NULL, i); 687 return EINVAL; 688 } 689 goto authcommon; 690 case CRYPTO_NULL_HMAC: 691 axf = &swcr_auth_hash_null; 692 goto authcommon; 693 case CRYPTO_RIPEMD160_HMAC: 694 axf = &swcr_auth_hash_hmac_ripemd_160; 695 goto authcommon; 696 case CRYPTO_RIPEMD160_HMAC_96: 697 axf = &swcr_auth_hash_hmac_ripemd_160_96; 698 goto authcommon; /* leave this for safety */ 699 authcommon: 700 (*swd)->sw_ictx = malloc(axf->auth_hash->ctxsize, 701 M_CRYPTO_DATA, M_NOWAIT); 702 if ((*swd)->sw_ictx == NULL) { 703 swcr_freesession(NULL, i); 704 return ENOBUFS; 705 } 706 707 (*swd)->sw_octx = malloc(axf->auth_hash->ctxsize, 708 M_CRYPTO_DATA, M_NOWAIT); 709 if ((*swd)->sw_octx == NULL) { 710 swcr_freesession(NULL, i); 711 return ENOBUFS; 712 } 713 714 for (k = 0; k < cri->cri_klen / 8; k++) 715 cri->cri_key[k] ^= HMAC_IPAD_VAL; 716 717 axf->Init((*swd)->sw_ictx); 718 axf->Update((*swd)->sw_ictx, cri->cri_key, 719 cri->cri_klen / 8); 720 axf->Update((*swd)->sw_ictx, hmac_ipad_buffer, 721 HMAC_BLOCK_LEN - (cri->cri_klen / 8)); 722 723 for (k = 0; k < cri->cri_klen / 8; k++) 724 cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 725 726 axf->Init((*swd)->sw_octx); 727 axf->Update((*swd)->sw_octx, cri->cri_key, 728 cri->cri_klen / 8); 729 axf->Update((*swd)->sw_octx, hmac_opad_buffer, 730 HMAC_BLOCK_LEN - (cri->cri_klen / 8)); 731 732 for (k = 0; k < cri->cri_klen / 8; k++) 733 cri->cri_key[k] ^= HMAC_OPAD_VAL; 734 (*swd)->sw_axf = axf; 735 break; 736 737 case CRYPTO_MD5_KPDK: 738 axf = &swcr_auth_hash_key_md5; 739 goto auth2common; 740 741 case CRYPTO_SHA1_KPDK: 742 axf = &swcr_auth_hash_key_sha1; 743 auth2common: 744 (*swd)->sw_ictx = malloc(axf->auth_hash->ctxsize, 745 M_CRYPTO_DATA, M_NOWAIT); 746 if ((*swd)->sw_ictx == NULL) { 747 swcr_freesession(NULL, i); 748 return ENOBUFS; 749 } 750 751 /* Store the key so we can "append" it to the payload */ 752 (*swd)->sw_octx = malloc(cri->cri_klen / 8, M_CRYPTO_DATA, 753 M_NOWAIT); 754 if ((*swd)->sw_octx == NULL) { 755 swcr_freesession(NULL, i); 756 return ENOBUFS; 757 } 758 759 (*swd)->sw_klen = cri->cri_klen / 8; 760 memcpy((*swd)->sw_octx, cri->cri_key, cri->cri_klen / 8); 761 axf->Init((*swd)->sw_ictx); 762 axf->Update((*swd)->sw_ictx, cri->cri_key, 763 cri->cri_klen / 8); 764 axf->Final(NULL, (*swd)->sw_ictx); 765 (*swd)->sw_axf = axf; 766 break; 767 768 case CRYPTO_MD5: 769 axf = &swcr_auth_hash_md5; 770 goto auth3common; 771 772 case CRYPTO_SHA1: 773 axf = &swcr_auth_hash_sha1; 774 auth3common: 775 (*swd)->sw_ictx = malloc(axf->auth_hash->ctxsize, 776 M_CRYPTO_DATA, M_NOWAIT); 777 if ((*swd)->sw_ictx == NULL) { 778 swcr_freesession(NULL, i); 779 return ENOBUFS; 780 } 781 782 axf->Init((*swd)->sw_ictx); 783 (*swd)->sw_axf = axf; 784 break; 785 786 case CRYPTO_DEFLATE_COMP: 787 cxf = &swcr_comp_algo_deflate; 788 (*swd)->sw_cxf = cxf; 789 break; 790 791 case CRYPTO_GZIP_COMP: 792 cxf = &swcr_comp_algo_gzip; 793 (*swd)->sw_cxf = cxf; 794 break; 795 default: 796 swcr_freesession(NULL, i); 797 return EINVAL; 798 } 799 800 (*swd)->sw_alg = cri->cri_alg; 801 cri = cri->cri_next; 802 swd = &((*swd)->sw_next); 803 } 804 return 0; 805 } 806 807 /* 808 * Free a session. 809 */ 810 static int 811 swcr_freesession(void *arg, u_int64_t tid) 812 { 813 struct swcr_data *swd; 814 const struct swcr_enc_xform *txf; 815 const struct swcr_auth_hash *axf; 816 const struct swcr_comp_algo *cxf; 817 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; 818 819 if (sid > swcr_sesnum || swcr_sessions == NULL || 820 swcr_sessions[sid] == NULL) 821 return EINVAL; 822 823 /* Silently accept and return */ 824 if (sid == 0) 825 return 0; 826 827 while ((swd = swcr_sessions[sid]) != NULL) { 828 swcr_sessions[sid] = swd->sw_next; 829 830 switch (swd->sw_alg) { 831 case CRYPTO_DES_CBC: 832 case CRYPTO_3DES_CBC: 833 case CRYPTO_BLF_CBC: 834 case CRYPTO_CAST_CBC: 835 case CRYPTO_SKIPJACK_CBC: 836 case CRYPTO_RIJNDAEL128_CBC: 837 case CRYPTO_NULL_CBC: 838 txf = swd->sw_exf; 839 840 if (swd->sw_kschedule) 841 txf->zerokey(&(swd->sw_kschedule)); 842 break; 843 844 case CRYPTO_MD5_HMAC: 845 case CRYPTO_MD5_HMAC_96: 846 case CRYPTO_SHA1_HMAC: 847 case CRYPTO_SHA1_HMAC_96: 848 case CRYPTO_SHA2_HMAC: 849 case CRYPTO_RIPEMD160_HMAC: 850 case CRYPTO_RIPEMD160_HMAC_96: 851 case CRYPTO_NULL_HMAC: 852 axf = swd->sw_axf; 853 854 if (swd->sw_ictx) { 855 memset(swd->sw_ictx, 0, axf->auth_hash->ctxsize); 856 free(swd->sw_ictx, M_CRYPTO_DATA); 857 } 858 if (swd->sw_octx) { 859 memset(swd->sw_octx, 0, axf->auth_hash->ctxsize); 860 free(swd->sw_octx, M_CRYPTO_DATA); 861 } 862 break; 863 864 case CRYPTO_MD5_KPDK: 865 case CRYPTO_SHA1_KPDK: 866 axf = swd->sw_axf; 867 868 if (swd->sw_ictx) { 869 memset(swd->sw_ictx, 0, axf->auth_hash->ctxsize); 870 free(swd->sw_ictx, M_CRYPTO_DATA); 871 } 872 if (swd->sw_octx) { 873 memset(swd->sw_octx, 0, swd->sw_klen); 874 free(swd->sw_octx, M_CRYPTO_DATA); 875 } 876 break; 877 878 case CRYPTO_MD5: 879 case CRYPTO_SHA1: 880 axf = swd->sw_axf; 881 882 if (swd->sw_ictx) 883 free(swd->sw_ictx, M_CRYPTO_DATA); 884 break; 885 886 case CRYPTO_DEFLATE_COMP: 887 case CRYPTO_GZIP_COMP: 888 cxf = swd->sw_cxf; 889 break; 890 } 891 892 free(swd, M_CRYPTO_DATA); 893 } 894 return 0; 895 } 896 897 /* 898 * Process a software request. 899 */ 900 static int 901 swcr_process(void *arg, struct cryptop *crp, int hint) 902 { 903 struct cryptodesc *crd; 904 struct swcr_data *sw; 905 u_int32_t lid; 906 int type; 907 908 /* Sanity check */ 909 if (crp == NULL) 910 return EINVAL; 911 912 if (crp->crp_desc == NULL || crp->crp_buf == NULL) { 913 crp->crp_etype = EINVAL; 914 goto done; 915 } 916 917 lid = crp->crp_sid & 0xffffffff; 918 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) { 919 crp->crp_etype = ENOENT; 920 goto done; 921 } 922 923 if (crp->crp_flags & CRYPTO_F_IMBUF) { 924 type = CRYPTO_BUF_MBUF; 925 } else if (crp->crp_flags & CRYPTO_F_IOV) { 926 type = CRYPTO_BUF_IOV; 927 } else { 928 type = CRYPTO_BUF_CONTIG; 929 } 930 931 /* Go through crypto descriptors, processing as we go */ 932 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 933 /* 934 * Find the crypto context. 935 * 936 * XXX Note that the logic here prevents us from having 937 * XXX the same algorithm multiple times in a session 938 * XXX (or rather, we can but it won't give us the right 939 * XXX results). To do that, we'd need some way of differentiating 940 * XXX between the various instances of an algorithm (so we can 941 * XXX locate the correct crypto context). 942 */ 943 for (sw = swcr_sessions[lid]; 944 sw && sw->sw_alg != crd->crd_alg; 945 sw = sw->sw_next) 946 ; 947 948 /* No such context ? */ 949 if (sw == NULL) { 950 crp->crp_etype = EINVAL; 951 goto done; 952 } 953 954 switch (sw->sw_alg) { 955 case CRYPTO_DES_CBC: 956 case CRYPTO_3DES_CBC: 957 case CRYPTO_BLF_CBC: 958 case CRYPTO_CAST_CBC: 959 case CRYPTO_SKIPJACK_CBC: 960 case CRYPTO_RIJNDAEL128_CBC: 961 if ((crp->crp_etype = swcr_encdec(crd, sw, 962 crp->crp_buf, type)) != 0) 963 goto done; 964 break; 965 case CRYPTO_NULL_CBC: 966 crp->crp_etype = 0; 967 break; 968 case CRYPTO_MD5_HMAC: 969 case CRYPTO_MD5_HMAC_96: 970 case CRYPTO_SHA1_HMAC: 971 case CRYPTO_SHA1_HMAC_96: 972 case CRYPTO_SHA2_HMAC: 973 case CRYPTO_RIPEMD160_HMAC: 974 case CRYPTO_RIPEMD160_HMAC_96: 975 case CRYPTO_NULL_HMAC: 976 case CRYPTO_MD5_KPDK: 977 case CRYPTO_SHA1_KPDK: 978 case CRYPTO_MD5: 979 case CRYPTO_SHA1: 980 if ((crp->crp_etype = swcr_authcompute(crp, crd, sw, 981 crp->crp_buf, type)) != 0) 982 goto done; 983 break; 984 985 case CRYPTO_DEFLATE_COMP: 986 case CRYPTO_GZIP_COMP: 987 DPRINTF(("swcr_process: compdec for %d\n", sw->sw_alg)); 988 if ((crp->crp_etype = swcr_compdec(crd, sw, 989 crp->crp_buf, type)) != 0) 990 goto done; 991 else 992 crp->crp_olen = (int)sw->sw_size; 993 break; 994 995 default: 996 /* Unknown/unsupported algorithm */ 997 crp->crp_etype = EINVAL; 998 goto done; 999 } 1000 } 1001 1002 done: 1003 DPRINTF(("request %p done\n", crp)); 1004 crypto_done(crp); 1005 return 0; 1006 } 1007 1008 static void 1009 swcr_init(void) 1010 { 1011 swcr_id = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE); 1012 if (swcr_id < 0) { 1013 /* This should never happen */ 1014 panic("Software crypto device cannot initialize!"); 1015 } 1016 1017 crypto_register(swcr_id, CRYPTO_DES_CBC, 1018 0, 0, swcr_newsession, swcr_freesession, swcr_process, NULL); 1019 #define REGISTER(alg) \ 1020 crypto_register(swcr_id, alg, 0, 0, NULL, NULL, NULL, NULL) 1021 1022 REGISTER(CRYPTO_3DES_CBC); 1023 REGISTER(CRYPTO_BLF_CBC); 1024 REGISTER(CRYPTO_CAST_CBC); 1025 REGISTER(CRYPTO_SKIPJACK_CBC); 1026 REGISTER(CRYPTO_NULL_CBC); 1027 REGISTER(CRYPTO_MD5_HMAC); 1028 REGISTER(CRYPTO_MD5_HMAC_96); 1029 REGISTER(CRYPTO_SHA1_HMAC); 1030 REGISTER(CRYPTO_SHA1_HMAC_96); 1031 REGISTER(CRYPTO_SHA2_HMAC); 1032 REGISTER(CRYPTO_RIPEMD160_HMAC); 1033 REGISTER(CRYPTO_RIPEMD160_HMAC_96); 1034 REGISTER(CRYPTO_NULL_HMAC); 1035 REGISTER(CRYPTO_MD5_KPDK); 1036 REGISTER(CRYPTO_SHA1_KPDK); 1037 REGISTER(CRYPTO_MD5); 1038 REGISTER(CRYPTO_SHA1); 1039 REGISTER(CRYPTO_RIJNDAEL128_CBC); 1040 REGISTER(CRYPTO_DEFLATE_COMP); 1041 REGISTER(CRYPTO_GZIP_COMP); 1042 #undef REGISTER 1043 } 1044 1045 1046 /* 1047 * Pseudo-device init routine for software crypto. 1048 */ 1049 void swcryptoattach(int); 1050 1051 void 1052 swcryptoattach(int num) 1053 { 1054 1055 swcr_init(); 1056 } 1057