1 /*- 2 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 3 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting 4 * 5 * This code was written by Angelos D. Keromytis in Athens, Greece, in 6 * February 2000. Network Security Technologies Inc. (NSTI) kindly 7 * supported the development of this code. 8 * 9 * Copyright (c) 2000, 2001 Angelos D. Keromytis 10 * 11 * SMP modifications by Matthew Dillon for the DragonFlyBSD Project 12 * 13 * Permission to use, copy, and modify this software with or without fee 14 * is hereby granted, provided that this entire notice is included in 15 * all source code copies of any software which is or includes a copy or 16 * modification of this software. 17 * 18 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 19 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 20 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 21 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 22 * PURPOSE. 23 * 24 * $FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.23 2009/02/05 17:43:12 imp Exp $ 25 * $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ 26 */ 27 28 #include <sys/param.h> 29 #include <sys/systm.h> 30 #include <sys/malloc.h> 31 #include <sys/mbuf.h> 32 #include <sys/module.h> 33 #include <sys/sysctl.h> 34 #include <sys/errno.h> 35 #include <sys/endian.h> 36 #include <sys/random.h> 37 #include <sys/kernel.h> 38 #include <sys/uio.h> 39 #include <sys/spinlock2.h> 40 41 #include <crypto/blowfish/blowfish.h> 42 #include <crypto/sha1.h> 43 #include <opencrypto/rmd160.h> 44 #include <opencrypto/cast.h> 45 #include <opencrypto/skipjack.h> 46 #include <sys/md5.h> 47 48 #include <opencrypto/cryptodev.h> 49 #include <opencrypto/cryptosoft.h> 50 #include <opencrypto/xform.h> 51 52 #include <sys/kobj.h> 53 #include <sys/bus.h> 54 #include "cryptodev_if.h" 55 56 static int32_t swcr_id; 57 static struct swcr_data **swcr_sessions = NULL; 58 static u_int32_t swcr_sesnum; 59 static u_int32_t swcr_minsesnum = 1; 60 61 static struct spinlock swcr_spin = SPINLOCK_INITIALIZER(swcr_spin); 62 63 u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN]; 64 u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN]; 65 66 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); 67 static int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int); 68 static int swcr_combined(struct cryptop *); 69 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); 70 static int swcr_freesession(device_t dev, u_int64_t tid); 71 static int swcr_freesession_slot(struct swcr_data **swdp, u_int32_t sid); 72 73 /* 74 * Apply a symmetric encryption/decryption algorithm. 75 */ 76 static int 77 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, 78 int flags) 79 { 80 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat; 81 unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN]; 82 u_int8_t *kschedule; 83 u_int8_t *okschedule; 84 struct enc_xform *exf; 85 int i, k, j, blks, ivlen; 86 int error; 87 int explicit_kschedule; 88 89 exf = sw->sw_exf; 90 blks = exf->blocksize; 91 ivlen = exf->ivsize; 92 93 /* Check for non-padded data */ 94 if (crd->crd_len % blks) 95 return EINVAL; 96 97 /* Initialize the IV */ 98 if (crd->crd_flags & CRD_F_ENCRYPT) { 99 /* IV explicitly provided ? */ 100 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 101 bcopy(crd->crd_iv, iv, ivlen); 102 else 103 karc4rand(iv, ivlen); 104 105 /* Do we need to write the IV */ 106 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) 107 crypto_copyback(flags, buf, crd->crd_inject, ivlen, iv); 108 109 } else { /* Decryption */ 110 /* IV explicitly provided ? */ 111 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 112 bcopy(crd->crd_iv, iv, ivlen); 113 else { 114 /* Get IV off buf */ 115 crypto_copydata(flags, buf, crd->crd_inject, ivlen, iv); 116 } 117 } 118 119 ivp = iv; 120 121 /* 122 * The semantics are seriously broken because the session key 123 * storage was never designed for concurrent ops. 124 */ 125 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) { 126 kschedule = NULL; 127 explicit_kschedule = 1; 128 error = exf->setkey(&kschedule, 129 crd->crd_key, crd->crd_klen / 8); 130 if (error) 131 goto done; 132 } else { 133 spin_lock(&swcr_spin); 134 kschedule = sw->sw_kschedule; 135 ++sw->sw_kschedule_refs; 136 spin_unlock(&swcr_spin); 137 explicit_kschedule = 0; 138 } 139 140 /* 141 * xforms that provide a reinit method perform all IV 142 * handling themselves. 143 */ 144 if (exf->reinit) 145 exf->reinit(kschedule, iv); 146 147 if (flags & CRYPTO_F_IMBUF) { 148 struct mbuf *m = (struct mbuf *) buf; 149 150 /* Find beginning of data */ 151 m = m_getptr(m, crd->crd_skip, &k); 152 if (m == NULL) { 153 error = EINVAL; 154 goto done; 155 } 156 157 i = crd->crd_len; 158 159 while (i > 0) { 160 /* 161 * If there's insufficient data at the end of 162 * an mbuf, we have to do some copying. 163 */ 164 if (m->m_len < k + blks && m->m_len != k) { 165 m_copydata(m, k, blks, blk); 166 167 /* Actual encryption/decryption */ 168 if (exf->reinit) { 169 if (crd->crd_flags & CRD_F_ENCRYPT) { 170 exf->encrypt(kschedule, 171 blk, iv); 172 } else { 173 exf->decrypt(kschedule, 174 blk, iv); 175 } 176 } else if (crd->crd_flags & CRD_F_ENCRYPT) { 177 /* XOR with previous block */ 178 for (j = 0; j < blks; j++) 179 blk[j] ^= ivp[j]; 180 181 exf->encrypt(kschedule, blk, iv); 182 183 /* 184 * Keep encrypted block for XOR'ing 185 * with next block 186 */ 187 bcopy(blk, iv, blks); 188 ivp = iv; 189 } else { /* decrypt */ 190 /* 191 * Keep encrypted block for XOR'ing 192 * with next block 193 */ 194 nivp = (ivp == iv) ? iv2 : iv; 195 bcopy(blk, nivp, blks); 196 197 exf->decrypt(kschedule, blk, iv); 198 199 /* XOR with previous block */ 200 for (j = 0; j < blks; j++) 201 blk[j] ^= ivp[j]; 202 203 ivp = nivp; 204 } 205 206 /* Copy back decrypted block */ 207 m_copyback(m, k, blks, blk); 208 209 /* Advance pointer */ 210 m = m_getptr(m, k + blks, &k); 211 if (m == NULL) { 212 error = EINVAL; 213 goto done; 214 } 215 216 i -= blks; 217 218 /* Could be done... */ 219 if (i == 0) 220 break; 221 } 222 223 /* Skip possibly empty mbufs */ 224 if (k == m->m_len) { 225 for (m = m->m_next; m && m->m_len == 0; 226 m = m->m_next) 227 ; 228 k = 0; 229 } 230 231 /* Sanity check */ 232 if (m == NULL) { 233 error = EINVAL; 234 goto done; 235 } 236 237 /* 238 * Warning: idat may point to garbage here, but 239 * we only use it in the while() loop, only if 240 * there are indeed enough data. 241 */ 242 idat = mtod(m, unsigned char *) + k; 243 244 while (m->m_len >= k + blks && i > 0) { 245 if (exf->reinit) { 246 if (crd->crd_flags & CRD_F_ENCRYPT) { 247 exf->encrypt(kschedule, 248 idat, iv); 249 } else { 250 exf->decrypt(kschedule, 251 idat, iv); 252 } 253 } else if (crd->crd_flags & CRD_F_ENCRYPT) { 254 /* XOR with previous block/IV */ 255 for (j = 0; j < blks; j++) 256 idat[j] ^= ivp[j]; 257 258 exf->encrypt(kschedule, idat, iv); 259 ivp = idat; 260 } else { /* decrypt */ 261 /* 262 * Keep encrypted block to be used 263 * in next block's processing. 264 */ 265 nivp = (ivp == iv) ? iv2 : iv; 266 bcopy(idat, nivp, blks); 267 268 exf->decrypt(kschedule, idat, iv); 269 270 /* XOR with previous block/IV */ 271 for (j = 0; j < blks; j++) 272 idat[j] ^= ivp[j]; 273 274 ivp = nivp; 275 } 276 277 idat += blks; 278 k += blks; 279 i -= blks; 280 } 281 } 282 error = 0; /* Done with mbuf encryption/decryption */ 283 } else if (flags & CRYPTO_F_IOV) { 284 struct uio *uio = (struct uio *) buf; 285 struct iovec *iov; 286 287 /* Find beginning of data */ 288 iov = cuio_getptr(uio, crd->crd_skip, &k); 289 if (iov == NULL) { 290 error = EINVAL; 291 goto done; 292 } 293 294 i = crd->crd_len; 295 296 while (i > 0) { 297 /* 298 * If there's insufficient data at the end of 299 * an iovec, we have to do some copying. 300 */ 301 if (iov->iov_len < k + blks && iov->iov_len != k) { 302 cuio_copydata(uio, k, blks, blk); 303 304 /* Actual encryption/decryption */ 305 if (exf->reinit) { 306 if (crd->crd_flags & CRD_F_ENCRYPT) { 307 exf->encrypt(kschedule, 308 blk, iv); 309 } else { 310 exf->decrypt(kschedule, 311 blk, iv); 312 } 313 } else if (crd->crd_flags & CRD_F_ENCRYPT) { 314 /* XOR with previous block */ 315 for (j = 0; j < blks; j++) 316 blk[j] ^= ivp[j]; 317 318 exf->encrypt(kschedule, blk, iv); 319 320 /* 321 * Keep encrypted block for XOR'ing 322 * with next block 323 */ 324 bcopy(blk, iv, blks); 325 ivp = iv; 326 } else { /* decrypt */ 327 /* 328 * Keep encrypted block for XOR'ing 329 * with next block 330 */ 331 nivp = (ivp == iv) ? iv2 : iv; 332 bcopy(blk, nivp, blks); 333 334 exf->decrypt(kschedule, blk, iv); 335 336 /* XOR with previous block */ 337 for (j = 0; j < blks; j++) 338 blk[j] ^= ivp[j]; 339 340 ivp = nivp; 341 } 342 343 /* Copy back decrypted block */ 344 cuio_copyback(uio, k, blks, blk); 345 346 /* Advance pointer */ 347 iov = cuio_getptr(uio, k + blks, &k); 348 if (iov == NULL) { 349 error = EINVAL; 350 goto done; 351 } 352 353 i -= blks; 354 355 /* Could be done... */ 356 if (i == 0) 357 break; 358 } 359 360 /* 361 * Warning: idat may point to garbage here, but 362 * we only use it in the while() loop, only if 363 * there are indeed enough data. 364 */ 365 idat = (char *)iov->iov_base + k; 366 367 while (iov->iov_len >= k + blks && i > 0) { 368 if (exf->reinit) { 369 if (crd->crd_flags & CRD_F_ENCRYPT) { 370 exf->encrypt(kschedule, 371 idat, iv); 372 } else { 373 exf->decrypt(kschedule, 374 idat, iv); 375 } 376 } else if (crd->crd_flags & CRD_F_ENCRYPT) { 377 /* XOR with previous block/IV */ 378 for (j = 0; j < blks; j++) 379 idat[j] ^= ivp[j]; 380 381 exf->encrypt(kschedule, idat, iv); 382 ivp = idat; 383 } else { /* decrypt */ 384 /* 385 * Keep encrypted block to be used 386 * in next block's processing. 387 */ 388 nivp = (ivp == iv) ? iv2 : iv; 389 bcopy(idat, nivp, blks); 390 391 exf->decrypt(kschedule, idat, iv); 392 393 /* XOR with previous block/IV */ 394 for (j = 0; j < blks; j++) 395 idat[j] ^= ivp[j]; 396 397 ivp = nivp; 398 } 399 400 idat += blks; 401 k += blks; 402 i -= blks; 403 } 404 if (k == iov->iov_len) { 405 iov++; 406 k = 0; 407 } 408 } 409 error = 0; /* Done with iovec encryption/decryption */ 410 } else { 411 /* 412 * contiguous buffer 413 */ 414 if (exf->reinit) { 415 for(i = crd->crd_skip; 416 i < crd->crd_skip + crd->crd_len; i += blks) { 417 if (crd->crd_flags & CRD_F_ENCRYPT) { 418 exf->encrypt(kschedule, buf + i, iv); 419 } else { 420 exf->decrypt(kschedule, buf + i, iv); 421 } 422 } 423 } else if (crd->crd_flags & CRD_F_ENCRYPT) { 424 for (i = crd->crd_skip; 425 i < crd->crd_skip + crd->crd_len; i += blks) { 426 /* XOR with the IV/previous block, as appropriate. */ 427 if (i == crd->crd_skip) 428 for (k = 0; k < blks; k++) 429 buf[i + k] ^= ivp[k]; 430 else 431 for (k = 0; k < blks; k++) 432 buf[i + k] ^= buf[i + k - blks]; 433 exf->encrypt(kschedule, buf + i, iv); 434 } 435 } else { /* Decrypt */ 436 /* 437 * Start at the end, so we don't need to keep the 438 * encrypted block as the IV for the next block. 439 */ 440 for (i = crd->crd_skip + crd->crd_len - blks; 441 i >= crd->crd_skip; i -= blks) { 442 exf->decrypt(kschedule, buf + i, iv); 443 444 /* XOR with the IV/previous block, as appropriate */ 445 if (i == crd->crd_skip) 446 for (k = 0; k < blks; k++) 447 buf[i + k] ^= ivp[k]; 448 else 449 for (k = 0; k < blks; k++) 450 buf[i + k] ^= buf[i + k - blks]; 451 } 452 } 453 error = 0; /* Done w/contiguous buffer encrypt/decrypt */ 454 } 455 done: 456 /* 457 * Cleanup - explicitly replace the session key if requested 458 * (horrible semantics for concurrent operation) 459 */ 460 if (explicit_kschedule) { 461 spin_lock(&swcr_spin); 462 if (sw->sw_kschedule && sw->sw_kschedule_refs == 0) { 463 okschedule = sw->sw_kschedule; 464 sw->sw_kschedule = kschedule; 465 } else { 466 okschedule = NULL; 467 } 468 spin_unlock(&swcr_spin); 469 if (okschedule) 470 exf->zerokey(&okschedule); 471 } else { 472 spin_lock(&swcr_spin); 473 --sw->sw_kschedule_refs; 474 spin_unlock(&swcr_spin); 475 } 476 return error; 477 } 478 479 static void 480 swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key, 481 int klen) 482 { 483 int k; 484 485 klen /= 8; 486 487 switch (axf->type) { 488 case CRYPTO_MD5_HMAC: 489 case CRYPTO_SHA1_HMAC: 490 case CRYPTO_SHA2_256_HMAC: 491 case CRYPTO_SHA2_384_HMAC: 492 case CRYPTO_SHA2_512_HMAC: 493 case CRYPTO_NULL_HMAC: 494 case CRYPTO_RIPEMD160_HMAC: 495 for (k = 0; k < klen; k++) 496 key[k] ^= HMAC_IPAD_VAL; 497 498 axf->Init(sw->sw_ictx); 499 axf->Update(sw->sw_ictx, key, klen); 500 axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen); 501 502 for (k = 0; k < klen; k++) 503 key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 504 505 axf->Init(sw->sw_octx); 506 axf->Update(sw->sw_octx, key, klen); 507 axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen); 508 509 for (k = 0; k < klen; k++) 510 key[k] ^= HMAC_OPAD_VAL; 511 break; 512 case CRYPTO_MD5_KPDK: 513 case CRYPTO_SHA1_KPDK: 514 { 515 /* We need a buffer that can hold an md5 and a sha1 result. */ 516 u_char buf[SHA1_RESULTLEN]; 517 518 sw->sw_klen = klen; 519 bcopy(key, sw->sw_octx, klen); 520 axf->Init(sw->sw_ictx); 521 axf->Update(sw->sw_ictx, key, klen); 522 axf->Final(buf, sw->sw_ictx); 523 break; 524 } 525 default: 526 kprintf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d " 527 "doesn't use keys.\n", __func__, axf->type); 528 } 529 } 530 531 /* 532 * Compute keyed-hash authenticator. 533 */ 534 static int 535 swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, 536 int flags) 537 { 538 unsigned char aalg[HASH_MAX_LEN]; 539 struct auth_hash *axf; 540 union authctx ctx; 541 int err; 542 543 if (sw->sw_ictx == NULL) 544 return EINVAL; 545 546 axf = sw->sw_axf; 547 548 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) 549 swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen); 550 551 bcopy(sw->sw_ictx, &ctx, axf->ctxsize); 552 553 err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len, 554 (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx); 555 if (err) 556 return err; 557 558 switch (sw->sw_alg) { 559 case CRYPTO_MD5_HMAC: 560 case CRYPTO_SHA1_HMAC: 561 case CRYPTO_SHA2_256_HMAC: 562 case CRYPTO_SHA2_384_HMAC: 563 case CRYPTO_SHA2_512_HMAC: 564 case CRYPTO_RIPEMD160_HMAC: 565 if (sw->sw_octx == NULL) 566 return EINVAL; 567 568 axf->Final(aalg, &ctx); 569 bcopy(sw->sw_octx, &ctx, axf->ctxsize); 570 axf->Update(&ctx, aalg, axf->hashsize); 571 axf->Final(aalg, &ctx); 572 break; 573 574 case CRYPTO_MD5_KPDK: 575 case CRYPTO_SHA1_KPDK: 576 if (sw->sw_octx == NULL) 577 return EINVAL; 578 579 axf->Update(&ctx, sw->sw_octx, sw->sw_klen); 580 axf->Final(aalg, &ctx); 581 break; 582 583 case CRYPTO_NULL_HMAC: 584 axf->Final(aalg, &ctx); 585 break; 586 } 587 588 /* Inject the authentication data */ 589 crypto_copyback(flags, buf, crd->crd_inject, 590 sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg); 591 return 0; 592 } 593 594 /* 595 * Apply a combined encryption-authentication transformation 596 */ 597 static int 598 swcr_combined(struct cryptop *crp) 599 { 600 uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))]; 601 u_char *blk = (u_char *)blkbuf; 602 u_char aalg[HASH_MAX_LEN]; 603 u_char iv[EALG_MAX_BLOCK_LEN]; 604 uint8_t *kschedule; 605 union authctx ctx; 606 struct cryptodesc *crd, *crda = NULL, *crde = NULL; 607 struct swcr_data *sw, *swa, *swe; 608 struct auth_hash *axf = NULL; 609 struct enc_xform *exf = NULL; 610 struct mbuf *m = NULL; 611 struct uio *uio = NULL; 612 caddr_t buf = (caddr_t)crp->crp_buf; 613 uint32_t *blkp; 614 int i, blksz, ivlen, outtype, len; 615 616 blksz = 0; 617 ivlen = 0; 618 619 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 620 for (sw = swcr_sessions[crp->crp_sid & 0xffffffff]; 621 sw && sw->sw_alg != crd->crd_alg; 622 sw = sw->sw_next) 623 ; 624 if (sw == NULL) 625 return (EINVAL); 626 627 switch (sw->sw_alg) { 628 case CRYPTO_AES_GCM_16: 629 case CRYPTO_AES_GMAC: 630 swe = sw; 631 crde = crd; 632 exf = swe->sw_exf; 633 ivlen = exf->ivsize; 634 break; 635 case CRYPTO_AES_128_GMAC: 636 case CRYPTO_AES_192_GMAC: 637 case CRYPTO_AES_256_GMAC: 638 swa = sw; 639 crda = crd; 640 axf = swa->sw_axf; 641 if (swa->sw_ictx == NULL) 642 return (EINVAL); 643 bcopy(swa->sw_ictx, &ctx, axf->ctxsize); 644 blksz = axf->blocksize; 645 break; 646 default: 647 return (EINVAL); 648 } 649 } 650 if (crde == NULL || crda == NULL) 651 return (EINVAL); 652 653 if (crp->crp_flags & CRYPTO_F_IMBUF) { 654 outtype = CRYPTO_BUF_MBUF; 655 m = (struct mbuf *)buf; 656 } else { 657 outtype = CRYPTO_BUF_IOV; 658 uio = (struct uio *)buf; 659 } 660 661 /* Initialize the IV */ 662 if (crde->crd_flags & CRD_F_ENCRYPT) { 663 /* IV explicitly provided ? */ 664 if (crde->crd_flags & CRD_F_IV_EXPLICIT) 665 bcopy(crde->crd_iv, iv, ivlen); 666 else 667 karc4rand(iv, ivlen); 668 669 /* Do we need to write the IV */ 670 if (!(crde->crd_flags & CRD_F_IV_PRESENT)) 671 crypto_copyback(crde->crd_flags, buf, crde->crd_inject, 672 ivlen, iv); 673 674 } else { /* Decryption */ 675 /* IV explicitly provided ? */ 676 if (crde->crd_flags & CRD_F_IV_EXPLICIT) 677 bcopy(crde->crd_iv, iv, ivlen); 678 else 679 /* Get IV off buf */ 680 crypto_copydata(crde->crd_flags, buf, crde->crd_inject, 681 ivlen, iv); 682 } 683 684 /* Supply MAC with IV */ 685 if (axf->Reinit) 686 axf->Reinit(&ctx, iv, ivlen); 687 688 /* Supply MAC with AAD */ 689 for (i = 0; i < crda->crd_len; i += blksz) { 690 len = MIN(crda->crd_len - i, blksz); 691 crypto_copydata(crde->crd_flags, buf, crda->crd_skip + i, len, 692 blk); 693 axf->Update(&ctx, blk, len); 694 } 695 696 spin_lock(&swcr_spin); 697 kschedule = sw->sw_kschedule; 698 ++sw->sw_kschedule_refs; 699 spin_unlock(&swcr_spin); 700 701 if (exf->reinit) 702 exf->reinit(kschedule, iv); 703 704 /* Do encryption/decryption with MAC */ 705 for (i = 0; i < crde->crd_len; i += blksz) { 706 len = MIN(crde->crd_len - i, blksz); 707 if (len < blksz) 708 bzero(blk, blksz); 709 crypto_copydata(crde->crd_flags, buf, crde->crd_skip + i, len, 710 blk); 711 if (crde->crd_flags & CRD_F_ENCRYPT) { 712 exf->encrypt(kschedule, blk, iv); 713 axf->Update(&ctx, blk, len); 714 } else { 715 axf->Update(&ctx, blk, len); 716 exf->decrypt(kschedule, blk, iv); 717 } 718 crypto_copyback(crde->crd_flags, buf, crde->crd_skip + i, len, 719 blk); 720 } 721 722 /* Do any required special finalization */ 723 switch (crda->crd_alg) { 724 case CRYPTO_AES_128_GMAC: 725 case CRYPTO_AES_192_GMAC: 726 case CRYPTO_AES_256_GMAC: 727 /* length block */ 728 bzero(blk, blksz); 729 blkp = (uint32_t *)blk + 1; 730 *blkp = htobe32(crda->crd_len * 8); 731 blkp = (uint32_t *)blk + 3; 732 *blkp = htobe32(crde->crd_len * 8); 733 axf->Update(&ctx, blk, blksz); 734 break; 735 } 736 737 /* Finalize MAC */ 738 axf->Final(aalg, &ctx); 739 740 /* Inject the authentication data */ 741 crypto_copyback(crda->crd_flags, crp->crp_buf, crda->crd_inject, 742 axf->blocksize, aalg); 743 744 spin_lock(&swcr_spin); 745 --sw->sw_kschedule_refs; 746 spin_unlock(&swcr_spin); 747 748 return (0); 749 } 750 751 /* 752 * Apply a compression/decompression algorithm 753 */ 754 static int 755 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw, 756 caddr_t buf, int flags) 757 { 758 u_int8_t *data, *out; 759 struct comp_algo *cxf; 760 int adj; 761 u_int32_t result; 762 763 cxf = sw->sw_cxf; 764 765 /* 766 * We must handle the whole buffer of data in one time 767 * then if there is not all the data in the mbuf, we must 768 * copy in a buffer. 769 */ 770 data = kmalloc(crd->crd_len, M_CRYPTO_DATA, M_INTWAIT); 771 crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data); 772 773 if (crd->crd_flags & CRD_F_COMP) 774 result = cxf->compress(data, crd->crd_len, &out); 775 else 776 result = cxf->decompress(data, crd->crd_len, &out); 777 778 kfree(data, M_CRYPTO_DATA); 779 if (result == 0) 780 return EINVAL; 781 782 /* Copy back the (de)compressed data. m_copyback is 783 * extending the mbuf as necessary. 784 */ 785 sw->sw_size = result; 786 /* Check the compressed size when doing compression */ 787 if (crd->crd_flags & CRD_F_COMP) { 788 if (result >= crd->crd_len) { 789 /* Compression was useless, we lost time */ 790 kfree(out, M_CRYPTO_DATA); 791 return 0; 792 } 793 } 794 795 crypto_copyback(flags, buf, crd->crd_skip, result, out); 796 if (result < crd->crd_len) { 797 adj = result - crd->crd_len; 798 if (flags & CRYPTO_F_IMBUF) { 799 adj = result - crd->crd_len; 800 m_adj((struct mbuf *)buf, adj); 801 } else if (flags & CRYPTO_F_IOV) { 802 struct uio *uio = (struct uio *)buf; 803 int ind; 804 805 adj = crd->crd_len - result; 806 ind = uio->uio_iovcnt - 1; 807 808 while (adj > 0 && ind >= 0) { 809 if (adj < uio->uio_iov[ind].iov_len) { 810 uio->uio_iov[ind].iov_len -= adj; 811 break; 812 } 813 814 adj -= uio->uio_iov[ind].iov_len; 815 uio->uio_iov[ind].iov_len = 0; 816 ind--; 817 uio->uio_iovcnt--; 818 } 819 } 820 } 821 kfree(out, M_CRYPTO_DATA); 822 return 0; 823 } 824 825 /* 826 * Generate a new software session. 827 */ 828 static int 829 swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri) 830 { 831 struct swcr_data *swd_base; 832 struct swcr_data **swd; 833 struct swcr_data **oswd; 834 struct auth_hash *axf; 835 struct enc_xform *txf; 836 struct comp_algo *cxf; 837 u_int32_t i; 838 u_int32_t n; 839 int error; 840 841 if (sid == NULL || cri == NULL) 842 return EINVAL; 843 844 swd_base = NULL; 845 swd = &swd_base; 846 847 while (cri) { 848 *swd = kmalloc(sizeof(struct swcr_data), 849 M_CRYPTO_DATA, M_WAITOK | M_ZERO); 850 851 switch (cri->cri_alg) { 852 case CRYPTO_DES_CBC: 853 txf = &enc_xform_des; 854 goto enccommon; 855 case CRYPTO_3DES_CBC: 856 txf = &enc_xform_3des; 857 goto enccommon; 858 case CRYPTO_BLF_CBC: 859 txf = &enc_xform_blf; 860 goto enccommon; 861 case CRYPTO_CAST_CBC: 862 txf = &enc_xform_cast5; 863 goto enccommon; 864 case CRYPTO_SKIPJACK_CBC: 865 txf = &enc_xform_skipjack; 866 goto enccommon; 867 case CRYPTO_RIJNDAEL128_CBC: 868 txf = &enc_xform_rijndael128; 869 goto enccommon; 870 case CRYPTO_AES_XTS: 871 txf = &enc_xform_aes_xts; 872 goto enccommon; 873 case CRYPTO_AES_CTR: 874 txf = &enc_xform_aes_ctr; 875 goto enccommon; 876 case CRYPTO_AES_GCM_16: 877 txf = &enc_xform_aes_gcm; 878 goto enccommon; 879 case CRYPTO_AES_GMAC: 880 txf = &enc_xform_aes_gmac; 881 (*swd)->sw_exf = txf; 882 break; 883 case CRYPTO_CAMELLIA_CBC: 884 txf = &enc_xform_camellia; 885 goto enccommon; 886 case CRYPTO_TWOFISH_CBC: 887 txf = &enc_xform_twofish; 888 goto enccommon; 889 case CRYPTO_SERPENT_CBC: 890 txf = &enc_xform_serpent; 891 goto enccommon; 892 case CRYPTO_TWOFISH_XTS: 893 txf = &enc_xform_twofish_xts; 894 goto enccommon; 895 case CRYPTO_SERPENT_XTS: 896 txf = &enc_xform_serpent_xts; 897 goto enccommon; 898 case CRYPTO_NULL_CBC: 899 txf = &enc_xform_null; 900 goto enccommon; 901 enccommon: 902 if (cri->cri_key != NULL) { 903 error = txf->setkey(&((*swd)->sw_kschedule), 904 cri->cri_key, 905 cri->cri_klen / 8); 906 if (error) { 907 swcr_freesession_slot(&swd_base, 0); 908 return error; 909 } 910 } 911 (*swd)->sw_exf = txf; 912 break; 913 914 case CRYPTO_MD5_HMAC: 915 axf = &auth_hash_hmac_md5; 916 goto authcommon; 917 case CRYPTO_SHA1_HMAC: 918 axf = &auth_hash_hmac_sha1; 919 goto authcommon; 920 case CRYPTO_SHA2_256_HMAC: 921 axf = &auth_hash_hmac_sha2_256; 922 goto authcommon; 923 case CRYPTO_SHA2_384_HMAC: 924 axf = &auth_hash_hmac_sha2_384; 925 goto authcommon; 926 case CRYPTO_SHA2_512_HMAC: 927 axf = &auth_hash_hmac_sha2_512; 928 goto authcommon; 929 case CRYPTO_NULL_HMAC: 930 axf = &auth_hash_null; 931 goto authcommon; 932 case CRYPTO_RIPEMD160_HMAC: 933 axf = &auth_hash_hmac_ripemd_160; 934 authcommon: 935 (*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA, 936 M_WAITOK); 937 938 (*swd)->sw_octx = kmalloc(axf->ctxsize, M_CRYPTO_DATA, 939 M_WAITOK); 940 941 if (cri->cri_key != NULL) { 942 swcr_authprepare(axf, *swd, cri->cri_key, 943 cri->cri_klen); 944 } 945 946 (*swd)->sw_mlen = cri->cri_mlen; 947 (*swd)->sw_axf = axf; 948 break; 949 950 case CRYPTO_MD5_KPDK: 951 axf = &auth_hash_key_md5; 952 goto auth2common; 953 954 case CRYPTO_SHA1_KPDK: 955 axf = &auth_hash_key_sha1; 956 auth2common: 957 (*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA, 958 M_WAITOK); 959 960 (*swd)->sw_octx = kmalloc(cri->cri_klen / 8, 961 M_CRYPTO_DATA, M_WAITOK); 962 963 /* Store the key so we can "append" it to the payload */ 964 if (cri->cri_key != NULL) { 965 swcr_authprepare(axf, *swd, cri->cri_key, 966 cri->cri_klen); 967 } 968 969 (*swd)->sw_mlen = cri->cri_mlen; 970 (*swd)->sw_axf = axf; 971 break; 972 #ifdef notdef 973 case CRYPTO_MD5: 974 axf = &auth_hash_md5; 975 goto auth3common; 976 977 case CRYPTO_SHA1: 978 axf = &auth_hash_sha1; 979 auth3common: 980 (*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA, 981 M_WAITOK); 982 983 axf->Init((*swd)->sw_ictx); 984 (*swd)->sw_mlen = cri->cri_mlen; 985 (*swd)->sw_axf = axf; 986 break; 987 #endif 988 case CRYPTO_AES_128_GMAC: 989 axf = &auth_hash_gmac_aes_128; 990 goto auth4common; 991 992 case CRYPTO_AES_192_GMAC: 993 axf = &auth_hash_gmac_aes_192; 994 goto auth4common; 995 996 case CRYPTO_AES_256_GMAC: 997 axf = &auth_hash_gmac_aes_256; 998 auth4common: 999 (*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA, 1000 M_NOWAIT); 1001 if ((*swd)->sw_ictx == NULL) { 1002 swcr_freesession_slot(&swd_base, 0); 1003 return ENOBUFS; 1004 } 1005 1006 axf->Init((*swd)->sw_ictx); 1007 axf->Setkey((*swd)->sw_ictx, cri->cri_key, 1008 cri->cri_klen / 8); 1009 (*swd)->sw_axf = axf; 1010 break; 1011 1012 case CRYPTO_DEFLATE_COMP: 1013 cxf = &comp_algo_deflate; 1014 (*swd)->sw_cxf = cxf; 1015 break; 1016 default: 1017 swcr_freesession_slot(&swd_base, 0); 1018 return EINVAL; 1019 } 1020 1021 (*swd)->sw_alg = cri->cri_alg; 1022 cri = cri->cri_next; 1023 swd = &((*swd)->sw_next); 1024 } 1025 1026 for (;;) { 1027 /* 1028 * Atomically allocate a session 1029 */ 1030 spin_lock(&swcr_spin); 1031 for (i = swcr_minsesnum; i < swcr_sesnum; ++i) { 1032 if (swcr_sessions[i] == NULL) 1033 break; 1034 } 1035 if (i < swcr_sesnum) { 1036 swcr_sessions[i] = swd_base; 1037 swcr_minsesnum = i + 1; 1038 spin_unlock(&swcr_spin); 1039 break; 1040 } 1041 n = swcr_sesnum; 1042 spin_unlock(&swcr_spin); 1043 1044 /* 1045 * A larger allocation is required, reallocate the array 1046 * and replace, checking for SMP races. 1047 */ 1048 if (n < CRYPTO_SW_SESSIONS) 1049 n = CRYPTO_SW_SESSIONS; 1050 else 1051 n = n * 3 / 2; 1052 swd = kmalloc(n * sizeof(struct swcr_data *), 1053 M_CRYPTO_DATA, M_WAITOK | M_ZERO); 1054 1055 spin_lock(&swcr_spin); 1056 if (swcr_sesnum >= n) { 1057 spin_unlock(&swcr_spin); 1058 kfree(swd, M_CRYPTO_DATA); 1059 } else if (swcr_sesnum) { 1060 bcopy(swcr_sessions, swd, 1061 swcr_sesnum * sizeof(struct swcr_data *)); 1062 oswd = swcr_sessions; 1063 swcr_sessions = swd; 1064 swcr_sesnum = n; 1065 spin_unlock(&swcr_spin); 1066 kfree(oswd, M_CRYPTO_DATA); 1067 } else { 1068 swcr_sessions = swd; 1069 swcr_sesnum = n; 1070 spin_unlock(&swcr_spin); 1071 } 1072 } 1073 1074 *sid = i; 1075 return 0; 1076 } 1077 1078 /* 1079 * Free a session. 1080 */ 1081 static int 1082 swcr_freesession(device_t dev, u_int64_t tid) 1083 { 1084 u_int32_t sid = CRYPTO_SESID2LID(tid); 1085 1086 if (sid > swcr_sesnum || swcr_sessions == NULL || 1087 swcr_sessions[sid] == NULL) { 1088 return EINVAL; 1089 } 1090 1091 /* Silently accept and return */ 1092 if (sid == 0) 1093 return 0; 1094 1095 return(swcr_freesession_slot(&swcr_sessions[sid], sid)); 1096 } 1097 1098 static 1099 int 1100 swcr_freesession_slot(struct swcr_data **swdp, u_int32_t sid) 1101 { 1102 struct enc_xform *txf; 1103 struct auth_hash *axf; 1104 struct comp_algo *cxf; 1105 struct swcr_data *swd; 1106 struct swcr_data *swnext; 1107 1108 /* 1109 * Protect session detachment with the spinlock. 1110 */ 1111 spin_lock(&swcr_spin); 1112 swnext = *swdp; 1113 *swdp = NULL; 1114 if (sid && swcr_minsesnum > sid) 1115 swcr_minsesnum = sid; 1116 spin_unlock(&swcr_spin); 1117 1118 /* 1119 * Clean up at our leisure. 1120 */ 1121 while ((swd = swnext) != NULL) { 1122 swnext = swd->sw_next; 1123 1124 swd->sw_next = NULL; 1125 1126 switch (swd->sw_alg) { 1127 case CRYPTO_DES_CBC: 1128 case CRYPTO_3DES_CBC: 1129 case CRYPTO_BLF_CBC: 1130 case CRYPTO_CAST_CBC: 1131 case CRYPTO_SKIPJACK_CBC: 1132 case CRYPTO_RIJNDAEL128_CBC: 1133 case CRYPTO_AES_XTS: 1134 case CRYPTO_AES_CTR: 1135 case CRYPTO_AES_GCM_16: 1136 case CRYPTO_AES_GMAC: 1137 case CRYPTO_CAMELLIA_CBC: 1138 case CRYPTO_TWOFISH_CBC: 1139 case CRYPTO_SERPENT_CBC: 1140 case CRYPTO_TWOFISH_XTS: 1141 case CRYPTO_SERPENT_XTS: 1142 case CRYPTO_NULL_CBC: 1143 txf = swd->sw_exf; 1144 1145 if (swd->sw_kschedule) 1146 txf->zerokey(&(swd->sw_kschedule)); 1147 break; 1148 1149 case CRYPTO_MD5_HMAC: 1150 case CRYPTO_SHA1_HMAC: 1151 case CRYPTO_SHA2_256_HMAC: 1152 case CRYPTO_SHA2_384_HMAC: 1153 case CRYPTO_SHA2_512_HMAC: 1154 case CRYPTO_RIPEMD160_HMAC: 1155 case CRYPTO_NULL_HMAC: 1156 axf = swd->sw_axf; 1157 1158 if (swd->sw_ictx) { 1159 bzero(swd->sw_ictx, axf->ctxsize); 1160 kfree(swd->sw_ictx, M_CRYPTO_DATA); 1161 } 1162 if (swd->sw_octx) { 1163 bzero(swd->sw_octx, axf->ctxsize); 1164 kfree(swd->sw_octx, M_CRYPTO_DATA); 1165 } 1166 break; 1167 1168 case CRYPTO_MD5_KPDK: 1169 case CRYPTO_SHA1_KPDK: 1170 axf = swd->sw_axf; 1171 1172 if (swd->sw_ictx) { 1173 bzero(swd->sw_ictx, axf->ctxsize); 1174 kfree(swd->sw_ictx, M_CRYPTO_DATA); 1175 } 1176 if (swd->sw_octx) { 1177 bzero(swd->sw_octx, swd->sw_klen); 1178 kfree(swd->sw_octx, M_CRYPTO_DATA); 1179 } 1180 break; 1181 1182 case CRYPTO_AES_128_GMAC: 1183 case CRYPTO_AES_192_GMAC: 1184 case CRYPTO_AES_256_GMAC: 1185 case CRYPTO_MD5: 1186 case CRYPTO_SHA1: 1187 axf = swd->sw_axf; 1188 1189 if (swd->sw_ictx) { 1190 bzero(swd->sw_ictx, axf->ctxsize); 1191 kfree(swd->sw_ictx, M_CRYPTO_DATA); 1192 } 1193 break; 1194 1195 case CRYPTO_DEFLATE_COMP: 1196 cxf = swd->sw_cxf; 1197 break; 1198 } 1199 1200 //FREE(swd, M_CRYPTO_DATA); 1201 kfree(swd, M_CRYPTO_DATA); 1202 } 1203 return 0; 1204 } 1205 1206 /* 1207 * Process a software request. 1208 */ 1209 static int 1210 swcr_process(device_t dev, struct cryptop *crp, int hint) 1211 { 1212 struct cryptodesc *crd; 1213 struct swcr_data *sw; 1214 u_int32_t lid; 1215 1216 /* Sanity check */ 1217 if (crp == NULL) 1218 return EINVAL; 1219 1220 if (crp->crp_desc == NULL || crp->crp_buf == NULL) { 1221 crp->crp_etype = EINVAL; 1222 goto done; 1223 } 1224 1225 lid = crp->crp_sid & 0xffffffff; 1226 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) { 1227 crp->crp_etype = ENOENT; 1228 goto done; 1229 } 1230 1231 /* Go through crypto descriptors, processing as we go */ 1232 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1233 /* 1234 * Find the crypto context. 1235 * 1236 * XXX Note that the logic here prevents us from having 1237 * XXX the same algorithm multiple times in a session 1238 * XXX (or rather, we can but it won't give us the right 1239 * XXX results). To do that, we'd need some way of differentiating 1240 * XXX between the various instances of an algorithm (so we can 1241 * XXX locate the correct crypto context). 1242 */ 1243 for (sw = swcr_sessions[lid]; 1244 sw && sw->sw_alg != crd->crd_alg; 1245 sw = sw->sw_next) 1246 ; 1247 1248 /* No such context ? */ 1249 if (sw == NULL) { 1250 crp->crp_etype = EINVAL; 1251 goto done; 1252 } 1253 switch (sw->sw_alg) { 1254 case CRYPTO_DES_CBC: 1255 case CRYPTO_3DES_CBC: 1256 case CRYPTO_BLF_CBC: 1257 case CRYPTO_CAST_CBC: 1258 case CRYPTO_SKIPJACK_CBC: 1259 case CRYPTO_RIJNDAEL128_CBC: 1260 case CRYPTO_AES_XTS: 1261 case CRYPTO_AES_CTR: 1262 case CRYPTO_CAMELLIA_CBC: 1263 case CRYPTO_TWOFISH_CBC: 1264 case CRYPTO_SERPENT_CBC: 1265 case CRYPTO_TWOFISH_XTS: 1266 case CRYPTO_SERPENT_XTS: 1267 if ((crp->crp_etype = swcr_encdec(crd, sw, 1268 crp->crp_buf, crp->crp_flags)) != 0) 1269 goto done; 1270 break; 1271 case CRYPTO_NULL_CBC: 1272 crp->crp_etype = 0; 1273 break; 1274 case CRYPTO_MD5_HMAC: 1275 case CRYPTO_SHA1_HMAC: 1276 case CRYPTO_SHA2_256_HMAC: 1277 case CRYPTO_SHA2_384_HMAC: 1278 case CRYPTO_SHA2_512_HMAC: 1279 case CRYPTO_RIPEMD160_HMAC: 1280 case CRYPTO_NULL_HMAC: 1281 case CRYPTO_MD5_KPDK: 1282 case CRYPTO_SHA1_KPDK: 1283 case CRYPTO_MD5: 1284 case CRYPTO_SHA1: 1285 if ((crp->crp_etype = swcr_authcompute(crd, sw, 1286 crp->crp_buf, crp->crp_flags)) != 0) 1287 goto done; 1288 break; 1289 1290 case CRYPTO_AES_GCM_16: 1291 case CRYPTO_AES_GMAC: 1292 case CRYPTO_AES_128_GMAC: 1293 case CRYPTO_AES_192_GMAC: 1294 case CRYPTO_AES_256_GMAC: 1295 crp->crp_etype = swcr_combined(crp); 1296 goto done; 1297 1298 case CRYPTO_DEFLATE_COMP: 1299 if ((crp->crp_etype = swcr_compdec(crd, sw, 1300 crp->crp_buf, crp->crp_flags)) != 0) 1301 goto done; 1302 else 1303 crp->crp_olen = (int)sw->sw_size; 1304 break; 1305 1306 default: 1307 /* Unknown/unsupported algorithm */ 1308 crp->crp_etype = EINVAL; 1309 goto done; 1310 } 1311 } 1312 1313 done: 1314 crypto_done(crp); 1315 lwkt_yield(); 1316 return 0; 1317 } 1318 1319 static void 1320 swcr_identify(driver_t *drv, device_t parent) 1321 { 1322 /* NB: order 10 is so we get attached after h/w devices */ 1323 /* XXX: wouldn't bet about this BUS_ADD_CHILD correctness */ 1324 if (device_find_child(parent, "cryptosoft", -1) == NULL && 1325 BUS_ADD_CHILD(parent, parent, 10, "cryptosoft", -1) == 0) 1326 panic("cryptosoft: could not attach"); 1327 } 1328 1329 static int 1330 swcr_probe(device_t dev) 1331 { 1332 device_set_desc(dev, "software crypto"); 1333 return (0); 1334 } 1335 1336 static int 1337 swcr_attach(device_t dev) 1338 { 1339 memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN); 1340 memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN); 1341 1342 swcr_id = crypto_get_driverid(dev, CRYPTOCAP_F_SOFTWARE | 1343 CRYPTOCAP_F_SYNC | 1344 CRYPTOCAP_F_SMP); 1345 if (swcr_id < 0) { 1346 device_printf(dev, "cannot initialize!"); 1347 return ENOMEM; 1348 } 1349 #define REGISTER(alg) \ 1350 crypto_register(swcr_id, alg, 0,0) 1351 REGISTER(CRYPTO_DES_CBC); 1352 REGISTER(CRYPTO_3DES_CBC); 1353 REGISTER(CRYPTO_BLF_CBC); 1354 REGISTER(CRYPTO_CAST_CBC); 1355 REGISTER(CRYPTO_SKIPJACK_CBC); 1356 REGISTER(CRYPTO_NULL_CBC); 1357 REGISTER(CRYPTO_MD5_HMAC); 1358 REGISTER(CRYPTO_SHA1_HMAC); 1359 REGISTER(CRYPTO_SHA2_256_HMAC); 1360 REGISTER(CRYPTO_SHA2_384_HMAC); 1361 REGISTER(CRYPTO_SHA2_512_HMAC); 1362 REGISTER(CRYPTO_RIPEMD160_HMAC); 1363 REGISTER(CRYPTO_NULL_HMAC); 1364 REGISTER(CRYPTO_MD5_KPDK); 1365 REGISTER(CRYPTO_SHA1_KPDK); 1366 REGISTER(CRYPTO_MD5); 1367 REGISTER(CRYPTO_SHA1); 1368 REGISTER(CRYPTO_RIJNDAEL128_CBC); 1369 REGISTER(CRYPTO_AES_XTS); 1370 REGISTER(CRYPTO_AES_CTR); 1371 REGISTER(CRYPTO_AES_GCM_16); 1372 REGISTER(CRYPTO_AES_GMAC); 1373 REGISTER(CRYPTO_AES_128_GMAC); 1374 REGISTER(CRYPTO_AES_192_GMAC); 1375 REGISTER(CRYPTO_AES_256_GMAC); 1376 REGISTER(CRYPTO_CAMELLIA_CBC); 1377 REGISTER(CRYPTO_TWOFISH_CBC); 1378 REGISTER(CRYPTO_SERPENT_CBC); 1379 REGISTER(CRYPTO_TWOFISH_XTS); 1380 REGISTER(CRYPTO_SERPENT_XTS); 1381 REGISTER(CRYPTO_DEFLATE_COMP); 1382 #undef REGISTER 1383 1384 return 0; 1385 } 1386 1387 static int 1388 swcr_detach(device_t dev) 1389 { 1390 crypto_unregister_all(swcr_id); 1391 if (swcr_sessions != NULL) 1392 kfree(swcr_sessions, M_CRYPTO_DATA); 1393 return 0; 1394 } 1395 1396 static device_method_t swcr_methods[] = { 1397 DEVMETHOD(device_identify, swcr_identify), 1398 DEVMETHOD(device_probe, swcr_probe), 1399 DEVMETHOD(device_attach, swcr_attach), 1400 DEVMETHOD(device_detach, swcr_detach), 1401 1402 DEVMETHOD(cryptodev_newsession, swcr_newsession), 1403 DEVMETHOD(cryptodev_freesession,swcr_freesession), 1404 DEVMETHOD(cryptodev_process, swcr_process), 1405 1406 {0, 0}, 1407 }; 1408 1409 static driver_t swcr_driver = { 1410 "cryptosoft", 1411 swcr_methods, 1412 0, /* NB: no softc */ 1413 }; 1414 static devclass_t swcr_devclass; 1415 1416 /* 1417 * NB: We explicitly reference the crypto module so we 1418 * get the necessary ordering when built as a loadable 1419 * module. This is required because we bundle the crypto 1420 * module code together with the cryptosoft driver (otherwise 1421 * normal module dependencies would handle things). 1422 */ 1423 extern int crypto_modevent(struct module *, int, void *); 1424 /* XXX where to attach */ 1425 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,NULL); 1426 MODULE_VERSION(cryptosoft, 1); 1427 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1); 1428