1 /* $NetBSD: cryptosoft.c,v 1.40 2012/08/30 12:16:49 drochner Exp $ */ 2 /* $FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.2.2.1 2002/11/21 23:34:23 sam Exp $ */ 3 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ 4 5 /* 6 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 7 * 8 * This code was written by Angelos D. Keromytis in Athens, Greece, in 9 * February 2000. Network Security Technologies Inc. (NSTI) kindly 10 * supported the development of this code. 11 * 12 * Copyright (c) 2000, 2001 Angelos D. Keromytis 13 * 14 * Permission to use, copy, and modify this software with or without fee 15 * is hereby granted, provided that this entire notice is included in 16 * all source code copies of any software which is or includes a copy or 17 * modification of this software. 18 * 19 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 20 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 21 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 22 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 23 * PURPOSE. 24 */ 25 26 #include <sys/cdefs.h> 27 __KERNEL_RCSID(0, "$NetBSD: cryptosoft.c,v 1.40 2012/08/30 12:16:49 drochner Exp $"); 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/malloc.h> 32 #include <sys/mbuf.h> 33 #include <sys/sysctl.h> 34 #include <sys/errno.h> 35 36 #include "opt_ocf.h" 37 #include <opencrypto/cryptodev.h> 38 #include <opencrypto/cryptosoft.h> 39 #include <opencrypto/xform.h> 40 41 #include <opencrypto/cryptosoft_xform.c> 42 43 union authctx { 44 MD5_CTX md5ctx; 45 SHA1_CTX sha1ctx; 46 RMD160_CTX rmd160ctx; 47 SHA256_CTX sha256ctx; 48 SHA384_CTX sha384ctx; 49 SHA512_CTX sha512ctx; 50 aesxcbc_ctx aesxcbcctx; 51 AES_GMAC_CTX aesgmacctx; 52 }; 53 54 struct swcr_data **swcr_sessions = NULL; 55 u_int32_t swcr_sesnum = 0; 56 int32_t swcr_id = -1; 57 58 #define COPYBACK(x, a, b, c, d) \ 59 (x) == CRYPTO_BUF_MBUF ? m_copyback((struct mbuf *)a,b,c,d) \ 60 : cuio_copyback((struct uio *)a,b,c,d) 61 #define COPYDATA(x, a, b, c, d) \ 62 (x) == CRYPTO_BUF_MBUF ? m_copydata((struct mbuf *)a,b,c,d) \ 63 : cuio_copydata((struct uio *)a,b,c,d) 64 65 static int swcr_encdec(struct cryptodesc *, const struct swcr_data *, void *, int); 66 static int swcr_compdec(struct cryptodesc *, const struct swcr_data *, void *, int, int *); 67 static int swcr_combined(struct cryptop *, int); 68 static int swcr_process(void *, struct cryptop *, int); 69 static int swcr_newsession(void *, u_int32_t *, struct cryptoini *); 70 static int swcr_freesession(void *, u_int64_t); 71 72 /* 73 * Apply a symmetric encryption/decryption algorithm. 74 */ 75 static int 76 swcr_encdec(struct cryptodesc *crd, const struct swcr_data *sw, void *bufv, 77 int outtype) 78 { 79 char *buf = bufv; 80 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat; 81 unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN]; 82 const struct swcr_enc_xform *exf; 83 int i, k, j, blks, ivlen; 84 int count, ind; 85 86 exf = sw->sw_exf; 87 blks = exf->enc_xform->blocksize; 88 ivlen = exf->enc_xform->ivsize; 89 KASSERT(exf->reinit ? ivlen <= blks : ivlen == blks); 90 91 /* Check for non-padded data */ 92 if (crd->crd_len % blks) 93 return EINVAL; 94 95 /* Initialize the IV */ 96 if (crd->crd_flags & CRD_F_ENCRYPT) { 97 /* IV explicitly provided ? */ 98 if (crd->crd_flags & CRD_F_IV_EXPLICIT) { 99 memcpy(iv, crd->crd_iv, ivlen); 100 if (exf->reinit) 101 exf->reinit(sw->sw_kschedule, iv, 0); 102 } else if (exf->reinit) { 103 exf->reinit(sw->sw_kschedule, 0, iv); 104 } else { 105 /* Get random IV */ 106 for (i = 0; 107 i + sizeof (u_int32_t) <= EALG_MAX_BLOCK_LEN; 108 i += sizeof (u_int32_t)) { 109 u_int32_t temp = cprng_fast32(); 110 111 memcpy(iv + i, &temp, sizeof(u_int32_t)); 112 } 113 /* 114 * What if the block size is not a multiple 115 * of sizeof (u_int32_t), which is the size of 116 * what arc4random() returns ? 117 */ 118 if (EALG_MAX_BLOCK_LEN % sizeof (u_int32_t) != 0) { 119 u_int32_t temp = cprng_fast32(); 120 121 bcopy (&temp, iv + i, 122 EALG_MAX_BLOCK_LEN - i); 123 } 124 } 125 126 /* Do we need to write the IV */ 127 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) { 128 COPYBACK(outtype, buf, crd->crd_inject, ivlen, iv); 129 } 130 131 } else { /* Decryption */ 132 /* IV explicitly provided ? */ 133 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 134 memcpy(iv, crd->crd_iv, ivlen); 135 else { 136 /* Get IV off buf */ 137 COPYDATA(outtype, buf, crd->crd_inject, ivlen, iv); 138 } 139 if (exf->reinit) 140 exf->reinit(sw->sw_kschedule, iv, 0); 141 } 142 143 ivp = iv; 144 145 if (outtype == CRYPTO_BUF_CONTIG) { 146 if (exf->reinit) { 147 for (i = crd->crd_skip; 148 i < crd->crd_skip + crd->crd_len; i += blks) { 149 if (crd->crd_flags & CRD_F_ENCRYPT) { 150 exf->encrypt(sw->sw_kschedule, buf + i); 151 } else { 152 exf->decrypt(sw->sw_kschedule, buf + i); 153 } 154 } 155 } else if (crd->crd_flags & CRD_F_ENCRYPT) { 156 for (i = crd->crd_skip; 157 i < crd->crd_skip + crd->crd_len; i += blks) { 158 /* XOR with the IV/previous block, as appropriate. */ 159 if (i == crd->crd_skip) 160 for (k = 0; k < blks; k++) 161 buf[i + k] ^= ivp[k]; 162 else 163 for (k = 0; k < blks; k++) 164 buf[i + k] ^= buf[i + k - blks]; 165 exf->encrypt(sw->sw_kschedule, buf + i); 166 } 167 } else { /* Decrypt */ 168 /* 169 * Start at the end, so we don't need to keep the encrypted 170 * block as the IV for the next block. 171 */ 172 for (i = crd->crd_skip + crd->crd_len - blks; 173 i >= crd->crd_skip; i -= blks) { 174 exf->decrypt(sw->sw_kschedule, buf + i); 175 176 /* XOR with the IV/previous block, as appropriate */ 177 if (i == crd->crd_skip) 178 for (k = 0; k < blks; k++) 179 buf[i + k] ^= ivp[k]; 180 else 181 for (k = 0; k < blks; k++) 182 buf[i + k] ^= buf[i + k - blks]; 183 } 184 } 185 186 return 0; 187 } else if (outtype == CRYPTO_BUF_MBUF) { 188 struct mbuf *m = (struct mbuf *) buf; 189 190 /* Find beginning of data */ 191 m = m_getptr(m, crd->crd_skip, &k); 192 if (m == NULL) 193 return EINVAL; 194 195 i = crd->crd_len; 196 197 while (i > 0) { 198 /* 199 * If there's insufficient data at the end of 200 * an mbuf, we have to do some copying. 201 */ 202 if (m->m_len < k + blks && m->m_len != k) { 203 m_copydata(m, k, blks, blk); 204 205 /* Actual encryption/decryption */ 206 if (exf->reinit) { 207 if (crd->crd_flags & CRD_F_ENCRYPT) { 208 exf->encrypt(sw->sw_kschedule, 209 blk); 210 } else { 211 exf->decrypt(sw->sw_kschedule, 212 blk); 213 } 214 } else if (crd->crd_flags & CRD_F_ENCRYPT) { 215 /* XOR with previous block */ 216 for (j = 0; j < blks; j++) 217 blk[j] ^= ivp[j]; 218 219 exf->encrypt(sw->sw_kschedule, blk); 220 221 /* 222 * Keep encrypted block for XOR'ing 223 * with next block 224 */ 225 memcpy(iv, blk, blks); 226 ivp = iv; 227 } else { /* decrypt */ 228 /* 229 * Keep encrypted block for XOR'ing 230 * with next block 231 */ 232 if (ivp == iv) 233 memcpy(piv, blk, blks); 234 else 235 memcpy(iv, blk, blks); 236 237 exf->decrypt(sw->sw_kschedule, blk); 238 239 /* XOR with previous block */ 240 for (j = 0; j < blks; j++) 241 blk[j] ^= ivp[j]; 242 243 if (ivp == iv) 244 memcpy(iv, piv, blks); 245 else 246 ivp = iv; 247 } 248 249 /* Copy back decrypted block */ 250 m_copyback(m, k, blks, blk); 251 252 /* Advance pointer */ 253 m = m_getptr(m, k + blks, &k); 254 if (m == NULL) 255 return EINVAL; 256 257 i -= blks; 258 259 /* Could be done... */ 260 if (i == 0) 261 break; 262 } 263 264 /* Skip possibly empty mbufs */ 265 if (k == m->m_len) { 266 for (m = m->m_next; m && m->m_len == 0; 267 m = m->m_next) 268 ; 269 k = 0; 270 } 271 272 /* Sanity check */ 273 if (m == NULL) 274 return EINVAL; 275 276 /* 277 * Warning: idat may point to garbage here, but 278 * we only use it in the while() loop, only if 279 * there are indeed enough data. 280 */ 281 idat = mtod(m, unsigned char *) + k; 282 283 while (m->m_len >= k + blks && i > 0) { 284 if (exf->reinit) { 285 if (crd->crd_flags & CRD_F_ENCRYPT) { 286 exf->encrypt(sw->sw_kschedule, 287 idat); 288 } else { 289 exf->decrypt(sw->sw_kschedule, 290 idat); 291 } 292 } else if (crd->crd_flags & CRD_F_ENCRYPT) { 293 /* XOR with previous block/IV */ 294 for (j = 0; j < blks; j++) 295 idat[j] ^= ivp[j]; 296 297 exf->encrypt(sw->sw_kschedule, idat); 298 ivp = idat; 299 } else { /* decrypt */ 300 /* 301 * Keep encrypted block to be used 302 * in next block's processing. 303 */ 304 if (ivp == iv) 305 memcpy(piv, idat, blks); 306 else 307 memcpy(iv, idat, blks); 308 309 exf->decrypt(sw->sw_kschedule, idat); 310 311 /* XOR with previous block/IV */ 312 for (j = 0; j < blks; j++) 313 idat[j] ^= ivp[j]; 314 315 if (ivp == iv) 316 memcpy(iv, piv, blks); 317 else 318 ivp = iv; 319 } 320 321 idat += blks; 322 k += blks; 323 i -= blks; 324 } 325 } 326 327 return 0; /* Done with mbuf encryption/decryption */ 328 } else if (outtype == CRYPTO_BUF_IOV) { 329 struct uio *uio = (struct uio *) buf; 330 331 /* Find beginning of data */ 332 count = crd->crd_skip; 333 ind = cuio_getptr(uio, count, &k); 334 if (ind == -1) 335 return EINVAL; 336 337 i = crd->crd_len; 338 339 while (i > 0) { 340 /* 341 * If there's insufficient data at the end, 342 * we have to do some copying. 343 */ 344 if (uio->uio_iov[ind].iov_len < k + blks && 345 uio->uio_iov[ind].iov_len != k) { 346 cuio_copydata(uio, k, blks, blk); 347 348 /* Actual encryption/decryption */ 349 if (exf->reinit) { 350 if (crd->crd_flags & CRD_F_ENCRYPT) { 351 exf->encrypt(sw->sw_kschedule, 352 blk); 353 } else { 354 exf->decrypt(sw->sw_kschedule, 355 blk); 356 } 357 } else if (crd->crd_flags & CRD_F_ENCRYPT) { 358 /* XOR with previous block */ 359 for (j = 0; j < blks; j++) 360 blk[j] ^= ivp[j]; 361 362 exf->encrypt(sw->sw_kschedule, blk); 363 364 /* 365 * Keep encrypted block for XOR'ing 366 * with next block 367 */ 368 memcpy(iv, blk, blks); 369 ivp = iv; 370 } else { /* decrypt */ 371 /* 372 * Keep encrypted block for XOR'ing 373 * with next block 374 */ 375 if (ivp == iv) 376 memcpy(piv, blk, blks); 377 else 378 memcpy(iv, blk, blks); 379 380 exf->decrypt(sw->sw_kschedule, blk); 381 382 /* XOR with previous block */ 383 for (j = 0; j < blks; j++) 384 blk[j] ^= ivp[j]; 385 386 if (ivp == iv) 387 memcpy(iv, piv, blks); 388 else 389 ivp = iv; 390 } 391 392 /* Copy back decrypted block */ 393 cuio_copyback(uio, k, blks, blk); 394 395 count += blks; 396 397 /* Advance pointer */ 398 ind = cuio_getptr(uio, count, &k); 399 if (ind == -1) 400 return (EINVAL); 401 402 i -= blks; 403 404 /* Could be done... */ 405 if (i == 0) 406 break; 407 } 408 409 /* 410 * Warning: idat may point to garbage here, but 411 * we only use it in the while() loop, only if 412 * there are indeed enough data. 413 */ 414 idat = ((char *)uio->uio_iov[ind].iov_base) + k; 415 416 while (uio->uio_iov[ind].iov_len >= k + blks && 417 i > 0) { 418 if (exf->reinit) { 419 if (crd->crd_flags & CRD_F_ENCRYPT) { 420 exf->encrypt(sw->sw_kschedule, 421 idat); 422 } else { 423 exf->decrypt(sw->sw_kschedule, 424 idat); 425 } 426 } else if (crd->crd_flags & CRD_F_ENCRYPT) { 427 /* XOR with previous block/IV */ 428 for (j = 0; j < blks; j++) 429 idat[j] ^= ivp[j]; 430 431 exf->encrypt(sw->sw_kschedule, idat); 432 ivp = idat; 433 } else { /* decrypt */ 434 /* 435 * Keep encrypted block to be used 436 * in next block's processing. 437 */ 438 if (ivp == iv) 439 memcpy(piv, idat, blks); 440 else 441 memcpy(iv, idat, blks); 442 443 exf->decrypt(sw->sw_kschedule, idat); 444 445 /* XOR with previous block/IV */ 446 for (j = 0; j < blks; j++) 447 idat[j] ^= ivp[j]; 448 449 if (ivp == iv) 450 memcpy(iv, piv, blks); 451 else 452 ivp = iv; 453 } 454 455 idat += blks; 456 count += blks; 457 k += blks; 458 i -= blks; 459 } 460 } 461 return 0; /* Done with mbuf encryption/decryption */ 462 } 463 464 /* Unreachable */ 465 return EINVAL; 466 } 467 468 /* 469 * Compute keyed-hash authenticator. 470 */ 471 int 472 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd, 473 const struct swcr_data *sw, void *buf, int outtype) 474 { 475 unsigned char aalg[AALG_MAX_RESULT_LEN]; 476 const struct swcr_auth_hash *axf; 477 union authctx ctx; 478 int err; 479 480 if (sw->sw_ictx == 0) 481 return EINVAL; 482 483 axf = sw->sw_axf; 484 485 memcpy(&ctx, sw->sw_ictx, axf->ctxsize); 486 487 switch (outtype) { 488 case CRYPTO_BUF_CONTIG: 489 axf->Update(&ctx, (char *)buf + crd->crd_skip, crd->crd_len); 490 break; 491 case CRYPTO_BUF_MBUF: 492 err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len, 493 (int (*)(void*, void *, unsigned int)) axf->Update, 494 (void *) &ctx); 495 if (err) 496 return err; 497 break; 498 case CRYPTO_BUF_IOV: 499 err = cuio_apply((struct uio *) buf, crd->crd_skip, 500 crd->crd_len, 501 (int (*)(void *, void *, unsigned int)) axf->Update, 502 (void *) &ctx); 503 if (err) { 504 return err; 505 } 506 break; 507 default: 508 return EINVAL; 509 } 510 511 switch (sw->sw_alg) { 512 case CRYPTO_MD5_HMAC: 513 case CRYPTO_MD5_HMAC_96: 514 case CRYPTO_SHA1_HMAC: 515 case CRYPTO_SHA1_HMAC_96: 516 case CRYPTO_SHA2_256_HMAC: 517 case CRYPTO_SHA2_384_HMAC: 518 case CRYPTO_SHA2_512_HMAC: 519 case CRYPTO_RIPEMD160_HMAC: 520 case CRYPTO_RIPEMD160_HMAC_96: 521 if (sw->sw_octx == NULL) 522 return EINVAL; 523 524 axf->Final(aalg, &ctx); 525 memcpy(&ctx, sw->sw_octx, axf->ctxsize); 526 axf->Update(&ctx, aalg, axf->auth_hash->hashsize); 527 axf->Final(aalg, &ctx); 528 break; 529 530 case CRYPTO_MD5_KPDK: 531 case CRYPTO_SHA1_KPDK: 532 if (sw->sw_octx == NULL) 533 return EINVAL; 534 535 axf->Update(&ctx, sw->sw_octx, sw->sw_klen); 536 axf->Final(aalg, &ctx); 537 break; 538 539 case CRYPTO_NULL_HMAC: 540 case CRYPTO_MD5: 541 case CRYPTO_SHA1: 542 case CRYPTO_AES_XCBC_MAC_96: 543 axf->Final(aalg, &ctx); 544 break; 545 } 546 547 /* Inject the authentication data */ 548 switch (outtype) { 549 case CRYPTO_BUF_CONTIG: 550 (void)memcpy((char *)buf + crd->crd_inject, aalg, 551 axf->auth_hash->authsize); 552 break; 553 case CRYPTO_BUF_MBUF: 554 m_copyback((struct mbuf *) buf, crd->crd_inject, 555 axf->auth_hash->authsize, aalg); 556 break; 557 case CRYPTO_BUF_IOV: 558 memcpy(crp->crp_mac, aalg, axf->auth_hash->authsize); 559 break; 560 default: 561 return EINVAL; 562 } 563 return 0; 564 } 565 566 /* 567 * Apply a combined encryption-authentication transformation 568 */ 569 static int 570 swcr_combined(struct cryptop *crp, int outtype) 571 { 572 uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))]; 573 u_char *blk = (u_char *)blkbuf; 574 u_char aalg[AALG_MAX_RESULT_LEN]; 575 u_char iv[EALG_MAX_BLOCK_LEN]; 576 union authctx ctx; 577 struct cryptodesc *crd, *crda = NULL, *crde = NULL; 578 struct swcr_data *sw, *swa, *swe = NULL; 579 const struct swcr_auth_hash *axf = NULL; 580 const struct swcr_enc_xform *exf = NULL; 581 void *buf = (void *)crp->crp_buf; 582 uint32_t *blkp; 583 int i, blksz = 0, ivlen = 0, len; 584 585 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 586 for (sw = swcr_sessions[crp->crp_sid & 0xffffffff]; 587 sw && sw->sw_alg != crd->crd_alg; 588 sw = sw->sw_next) 589 ; 590 if (sw == NULL) 591 return (EINVAL); 592 593 switch (sw->sw_alg) { 594 case CRYPTO_AES_GCM_16: 595 case CRYPTO_AES_GMAC: 596 swe = sw; 597 crde = crd; 598 exf = swe->sw_exf; 599 ivlen = exf->enc_xform->ivsize; 600 break; 601 case CRYPTO_AES_128_GMAC: 602 case CRYPTO_AES_192_GMAC: 603 case CRYPTO_AES_256_GMAC: 604 swa = sw; 605 crda = crd; 606 axf = swa->sw_axf; 607 if (swa->sw_ictx == 0) 608 return (EINVAL); 609 memcpy(&ctx, swa->sw_ictx, axf->ctxsize); 610 blksz = axf->auth_hash->blocksize; 611 break; 612 default: 613 return (EINVAL); 614 } 615 } 616 if (crde == NULL || crda == NULL) 617 return (EINVAL); 618 if (outtype == CRYPTO_BUF_CONTIG) 619 return (EINVAL); 620 621 /* Initialize the IV */ 622 if (crde->crd_flags & CRD_F_ENCRYPT) { 623 /* IV explicitly provided ? */ 624 if (crde->crd_flags & CRD_F_IV_EXPLICIT) { 625 memcpy(iv, crde->crd_iv, ivlen); 626 if (exf->reinit) 627 exf->reinit(swe->sw_kschedule, iv, 0); 628 } else if (exf->reinit) 629 exf->reinit(swe->sw_kschedule, 0, iv); 630 else 631 cprng_fast(iv, ivlen); 632 633 /* Do we need to write the IV */ 634 if (!(crde->crd_flags & CRD_F_IV_PRESENT)) 635 COPYBACK(outtype, buf, crde->crd_inject, ivlen, iv); 636 637 } else { /* Decryption */ 638 /* IV explicitly provided ? */ 639 if (crde->crd_flags & CRD_F_IV_EXPLICIT) 640 memcpy(iv, crde->crd_iv, ivlen); 641 else { 642 /* Get IV off buf */ 643 COPYDATA(outtype, buf, crde->crd_inject, ivlen, iv); 644 } 645 if (exf->reinit) 646 exf->reinit(swe->sw_kschedule, iv, 0); 647 } 648 649 /* Supply MAC with IV */ 650 if (axf->Reinit) 651 axf->Reinit(&ctx, iv, ivlen); 652 653 /* Supply MAC with AAD */ 654 for (i = 0; i < crda->crd_len; i += blksz) { 655 len = MIN(crda->crd_len - i, blksz); 656 COPYDATA(outtype, buf, crda->crd_skip + i, len, blk); 657 axf->Update(&ctx, blk, len); 658 } 659 660 /* Do encryption/decryption with MAC */ 661 for (i = 0; i < crde->crd_len; i += blksz) { 662 len = MIN(crde->crd_len - i, blksz); 663 if (len < blksz) 664 memset(blk, 0, blksz); 665 COPYDATA(outtype, buf, crde->crd_skip + i, len, blk); 666 if (crde->crd_flags & CRD_F_ENCRYPT) { 667 exf->encrypt(swe->sw_kschedule, blk); 668 axf->Update(&ctx, blk, len); 669 } else { 670 axf->Update(&ctx, blk, len); 671 exf->decrypt(swe->sw_kschedule, blk); 672 } 673 COPYBACK(outtype, buf, crde->crd_skip + i, len, blk); 674 } 675 676 /* Do any required special finalization */ 677 switch (crda->crd_alg) { 678 case CRYPTO_AES_128_GMAC: 679 case CRYPTO_AES_192_GMAC: 680 case CRYPTO_AES_256_GMAC: 681 /* length block */ 682 memset(blk, 0, blksz); 683 blkp = (uint32_t *)blk + 1; 684 *blkp = htobe32(crda->crd_len * 8); 685 blkp = (uint32_t *)blk + 3; 686 *blkp = htobe32(crde->crd_len * 8); 687 axf->Update(&ctx, blk, blksz); 688 break; 689 } 690 691 /* Finalize MAC */ 692 axf->Final(aalg, &ctx); 693 694 /* Inject the authentication data */ 695 if (outtype == CRYPTO_BUF_MBUF) 696 COPYBACK(outtype, buf, crda->crd_inject, axf->auth_hash->authsize, aalg); 697 else 698 memcpy(crp->crp_mac, aalg, axf->auth_hash->authsize); 699 700 return (0); 701 } 702 703 /* 704 * Apply a compression/decompression algorithm 705 */ 706 static int 707 swcr_compdec(struct cryptodesc *crd, const struct swcr_data *sw, 708 void *buf, int outtype, int *res_size) 709 { 710 u_int8_t *data, *out; 711 const struct swcr_comp_algo *cxf; 712 int adj; 713 u_int32_t result; 714 715 cxf = sw->sw_cxf; 716 717 /* We must handle the whole buffer of data in one time 718 * then if there is not all the data in the mbuf, we must 719 * copy in a buffer. 720 */ 721 722 data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT); 723 if (data == NULL) 724 return (EINVAL); 725 COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data); 726 727 if (crd->crd_flags & CRD_F_COMP) 728 result = cxf->compress(data, crd->crd_len, &out); 729 else 730 result = cxf->decompress(data, crd->crd_len, &out, 731 *res_size); 732 733 free(data, M_CRYPTO_DATA); 734 if (result == 0) 735 return EINVAL; 736 737 /* Copy back the (de)compressed data. m_copyback is 738 * extending the mbuf as necessary. 739 */ 740 *res_size = (int)result; 741 /* Check the compressed size when doing compression */ 742 if (crd->crd_flags & CRD_F_COMP && 743 sw->sw_alg == CRYPTO_DEFLATE_COMP_NOGROW && 744 result >= crd->crd_len) { 745 /* Compression was useless, we lost time */ 746 free(out, M_CRYPTO_DATA); 747 return 0; 748 } 749 750 COPYBACK(outtype, buf, crd->crd_skip, result, out); 751 if (result < crd->crd_len) { 752 adj = result - crd->crd_len; 753 if (outtype == CRYPTO_BUF_MBUF) { 754 adj = result - crd->crd_len; 755 m_adj((struct mbuf *)buf, adj); 756 } 757 /* Don't adjust the iov_len, it breaks the kmem_free */ 758 } 759 free(out, M_CRYPTO_DATA); 760 return 0; 761 } 762 763 /* 764 * Generate a new software session. 765 */ 766 static int 767 swcr_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri) 768 { 769 struct swcr_data **swd; 770 const struct swcr_auth_hash *axf; 771 const struct swcr_enc_xform *txf; 772 const struct swcr_comp_algo *cxf; 773 u_int32_t i; 774 int k, error; 775 776 if (sid == NULL || cri == NULL) 777 return EINVAL; 778 779 if (swcr_sessions) { 780 for (i = 1; i < swcr_sesnum; i++) 781 if (swcr_sessions[i] == NULL) 782 break; 783 } else 784 i = 1; /* NB: to silence compiler warning */ 785 786 if (swcr_sessions == NULL || i == swcr_sesnum) { 787 if (swcr_sessions == NULL) { 788 i = 1; /* We leave swcr_sessions[0] empty */ 789 swcr_sesnum = CRYPTO_SW_SESSIONS; 790 } else 791 swcr_sesnum *= 2; 792 793 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *), 794 M_CRYPTO_DATA, M_NOWAIT); 795 if (swd == NULL) { 796 /* Reset session number */ 797 if (swcr_sesnum == CRYPTO_SW_SESSIONS) 798 swcr_sesnum = 0; 799 else 800 swcr_sesnum /= 2; 801 return ENOBUFS; 802 } 803 804 memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *)); 805 806 /* Copy existing sessions */ 807 if (swcr_sessions) { 808 memcpy(swd, swcr_sessions, 809 (swcr_sesnum / 2) * sizeof(struct swcr_data *)); 810 free(swcr_sessions, M_CRYPTO_DATA); 811 } 812 813 swcr_sessions = swd; 814 } 815 816 swd = &swcr_sessions[i]; 817 *sid = i; 818 819 while (cri) { 820 *swd = malloc(sizeof **swd, M_CRYPTO_DATA, M_NOWAIT); 821 if (*swd == NULL) { 822 swcr_freesession(NULL, i); 823 return ENOBUFS; 824 } 825 memset(*swd, 0, sizeof(struct swcr_data)); 826 827 switch (cri->cri_alg) { 828 case CRYPTO_DES_CBC: 829 txf = &swcr_enc_xform_des; 830 goto enccommon; 831 case CRYPTO_3DES_CBC: 832 txf = &swcr_enc_xform_3des; 833 goto enccommon; 834 case CRYPTO_BLF_CBC: 835 txf = &swcr_enc_xform_blf; 836 goto enccommon; 837 case CRYPTO_CAST_CBC: 838 txf = &swcr_enc_xform_cast5; 839 goto enccommon; 840 case CRYPTO_SKIPJACK_CBC: 841 txf = &swcr_enc_xform_skipjack; 842 goto enccommon; 843 case CRYPTO_RIJNDAEL128_CBC: 844 txf = &swcr_enc_xform_rijndael128; 845 goto enccommon; 846 case CRYPTO_CAMELLIA_CBC: 847 txf = &swcr_enc_xform_camellia; 848 goto enccommon; 849 case CRYPTO_AES_CTR: 850 txf = &swcr_enc_xform_aes_ctr; 851 goto enccommon; 852 case CRYPTO_AES_GCM_16: 853 txf = &swcr_enc_xform_aes_gcm; 854 goto enccommon; 855 case CRYPTO_AES_GMAC: 856 txf = &swcr_enc_xform_aes_gmac; 857 goto enccommon; 858 case CRYPTO_NULL_CBC: 859 txf = &swcr_enc_xform_null; 860 goto enccommon; 861 enccommon: 862 error = txf->setkey(&((*swd)->sw_kschedule), 863 cri->cri_key, cri->cri_klen / 8); 864 if (error) { 865 swcr_freesession(NULL, i); 866 return error; 867 } 868 (*swd)->sw_exf = txf; 869 break; 870 871 case CRYPTO_MD5_HMAC: 872 axf = &swcr_auth_hash_hmac_md5; 873 goto authcommon; 874 case CRYPTO_MD5_HMAC_96: 875 axf = &swcr_auth_hash_hmac_md5_96; 876 goto authcommon; 877 case CRYPTO_SHA1_HMAC: 878 axf = &swcr_auth_hash_hmac_sha1; 879 goto authcommon; 880 case CRYPTO_SHA1_HMAC_96: 881 axf = &swcr_auth_hash_hmac_sha1_96; 882 goto authcommon; 883 case CRYPTO_SHA2_256_HMAC: 884 axf = &swcr_auth_hash_hmac_sha2_256; 885 goto authcommon; 886 case CRYPTO_SHA2_384_HMAC: 887 axf = &swcr_auth_hash_hmac_sha2_384; 888 goto authcommon; 889 case CRYPTO_SHA2_512_HMAC: 890 axf = &swcr_auth_hash_hmac_sha2_512; 891 goto authcommon; 892 case CRYPTO_NULL_HMAC: 893 axf = &swcr_auth_hash_null; 894 goto authcommon; 895 case CRYPTO_RIPEMD160_HMAC: 896 axf = &swcr_auth_hash_hmac_ripemd_160; 897 goto authcommon; 898 case CRYPTO_RIPEMD160_HMAC_96: 899 axf = &swcr_auth_hash_hmac_ripemd_160_96; 900 goto authcommon; /* leave this for safety */ 901 authcommon: 902 (*swd)->sw_ictx = malloc(axf->ctxsize, 903 M_CRYPTO_DATA, M_NOWAIT); 904 if ((*swd)->sw_ictx == NULL) { 905 swcr_freesession(NULL, i); 906 return ENOBUFS; 907 } 908 909 (*swd)->sw_octx = malloc(axf->ctxsize, 910 M_CRYPTO_DATA, M_NOWAIT); 911 if ((*swd)->sw_octx == NULL) { 912 swcr_freesession(NULL, i); 913 return ENOBUFS; 914 } 915 916 for (k = 0; k < cri->cri_klen / 8; k++) 917 cri->cri_key[k] ^= HMAC_IPAD_VAL; 918 919 axf->Init((*swd)->sw_ictx); 920 axf->Update((*swd)->sw_ictx, cri->cri_key, 921 cri->cri_klen / 8); 922 axf->Update((*swd)->sw_ictx, hmac_ipad_buffer, 923 axf->auth_hash->blocksize - (cri->cri_klen / 8)); 924 925 for (k = 0; k < cri->cri_klen / 8; k++) 926 cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 927 928 axf->Init((*swd)->sw_octx); 929 axf->Update((*swd)->sw_octx, cri->cri_key, 930 cri->cri_klen / 8); 931 axf->Update((*swd)->sw_octx, hmac_opad_buffer, 932 axf->auth_hash->blocksize - (cri->cri_klen / 8)); 933 934 for (k = 0; k < cri->cri_klen / 8; k++) 935 cri->cri_key[k] ^= HMAC_OPAD_VAL; 936 (*swd)->sw_axf = axf; 937 break; 938 939 case CRYPTO_MD5_KPDK: 940 axf = &swcr_auth_hash_key_md5; 941 goto auth2common; 942 943 case CRYPTO_SHA1_KPDK: 944 axf = &swcr_auth_hash_key_sha1; 945 auth2common: 946 (*swd)->sw_ictx = malloc(axf->ctxsize, 947 M_CRYPTO_DATA, M_NOWAIT); 948 if ((*swd)->sw_ictx == NULL) { 949 swcr_freesession(NULL, i); 950 return ENOBUFS; 951 } 952 953 /* Store the key so we can "append" it to the payload */ 954 (*swd)->sw_octx = malloc(cri->cri_klen / 8, M_CRYPTO_DATA, 955 M_NOWAIT); 956 if ((*swd)->sw_octx == NULL) { 957 swcr_freesession(NULL, i); 958 return ENOBUFS; 959 } 960 961 (*swd)->sw_klen = cri->cri_klen / 8; 962 memcpy((*swd)->sw_octx, cri->cri_key, cri->cri_klen / 8); 963 axf->Init((*swd)->sw_ictx); 964 axf->Update((*swd)->sw_ictx, cri->cri_key, 965 cri->cri_klen / 8); 966 axf->Final(NULL, (*swd)->sw_ictx); 967 (*swd)->sw_axf = axf; 968 break; 969 970 case CRYPTO_MD5: 971 axf = &swcr_auth_hash_md5; 972 goto auth3common; 973 974 case CRYPTO_SHA1: 975 axf = &swcr_auth_hash_sha1; 976 auth3common: 977 (*swd)->sw_ictx = malloc(axf->ctxsize, 978 M_CRYPTO_DATA, M_NOWAIT); 979 if ((*swd)->sw_ictx == NULL) { 980 swcr_freesession(NULL, i); 981 return ENOBUFS; 982 } 983 984 axf->Init((*swd)->sw_ictx); 985 (*swd)->sw_axf = axf; 986 break; 987 988 case CRYPTO_AES_XCBC_MAC_96: 989 axf = &swcr_auth_hash_aes_xcbc_mac; 990 goto auth4common; 991 case CRYPTO_AES_128_GMAC: 992 axf = &swcr_auth_hash_gmac_aes_128; 993 goto auth4common; 994 case CRYPTO_AES_192_GMAC: 995 axf = &swcr_auth_hash_gmac_aes_192; 996 goto auth4common; 997 case CRYPTO_AES_256_GMAC: 998 axf = &swcr_auth_hash_gmac_aes_256; 999 auth4common: 1000 (*swd)->sw_ictx = malloc(axf->ctxsize, 1001 M_CRYPTO_DATA, M_NOWAIT); 1002 if ((*swd)->sw_ictx == NULL) { 1003 swcr_freesession(NULL, i); 1004 return ENOBUFS; 1005 } 1006 axf->Init((*swd)->sw_ictx); 1007 axf->Setkey((*swd)->sw_ictx, 1008 cri->cri_key, cri->cri_klen / 8); 1009 (*swd)->sw_axf = axf; 1010 break; 1011 1012 case CRYPTO_DEFLATE_COMP: 1013 cxf = &swcr_comp_algo_deflate; 1014 (*swd)->sw_cxf = cxf; 1015 break; 1016 1017 case CRYPTO_DEFLATE_COMP_NOGROW: 1018 cxf = &swcr_comp_algo_deflate_nogrow; 1019 (*swd)->sw_cxf = cxf; 1020 break; 1021 1022 case CRYPTO_GZIP_COMP: 1023 cxf = &swcr_comp_algo_gzip; 1024 (*swd)->sw_cxf = cxf; 1025 break; 1026 default: 1027 swcr_freesession(NULL, i); 1028 return EINVAL; 1029 } 1030 1031 (*swd)->sw_alg = cri->cri_alg; 1032 cri = cri->cri_next; 1033 swd = &((*swd)->sw_next); 1034 } 1035 return 0; 1036 } 1037 1038 /* 1039 * Free a session. 1040 */ 1041 static int 1042 swcr_freesession(void *arg, u_int64_t tid) 1043 { 1044 struct swcr_data *swd; 1045 const struct swcr_enc_xform *txf; 1046 const struct swcr_auth_hash *axf; 1047 const struct swcr_comp_algo *cxf; 1048 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; 1049 1050 if (sid > swcr_sesnum || swcr_sessions == NULL || 1051 swcr_sessions[sid] == NULL) 1052 return EINVAL; 1053 1054 /* Silently accept and return */ 1055 if (sid == 0) 1056 return 0; 1057 1058 while ((swd = swcr_sessions[sid]) != NULL) { 1059 swcr_sessions[sid] = swd->sw_next; 1060 1061 switch (swd->sw_alg) { 1062 case CRYPTO_DES_CBC: 1063 case CRYPTO_3DES_CBC: 1064 case CRYPTO_BLF_CBC: 1065 case CRYPTO_CAST_CBC: 1066 case CRYPTO_SKIPJACK_CBC: 1067 case CRYPTO_RIJNDAEL128_CBC: 1068 case CRYPTO_CAMELLIA_CBC: 1069 case CRYPTO_AES_CTR: 1070 case CRYPTO_AES_GCM_16: 1071 case CRYPTO_AES_GMAC: 1072 case CRYPTO_NULL_CBC: 1073 txf = swd->sw_exf; 1074 1075 if (swd->sw_kschedule) 1076 txf->zerokey(&(swd->sw_kschedule)); 1077 break; 1078 1079 case CRYPTO_MD5_HMAC: 1080 case CRYPTO_MD5_HMAC_96: 1081 case CRYPTO_SHA1_HMAC: 1082 case CRYPTO_SHA1_HMAC_96: 1083 case CRYPTO_SHA2_256_HMAC: 1084 case CRYPTO_SHA2_384_HMAC: 1085 case CRYPTO_SHA2_512_HMAC: 1086 case CRYPTO_RIPEMD160_HMAC: 1087 case CRYPTO_RIPEMD160_HMAC_96: 1088 case CRYPTO_NULL_HMAC: 1089 axf = swd->sw_axf; 1090 1091 if (swd->sw_ictx) { 1092 explicit_bzero(swd->sw_ictx, axf->ctxsize); 1093 free(swd->sw_ictx, M_CRYPTO_DATA); 1094 } 1095 if (swd->sw_octx) { 1096 explicit_bzero(swd->sw_octx, axf->ctxsize); 1097 free(swd->sw_octx, M_CRYPTO_DATA); 1098 } 1099 break; 1100 1101 case CRYPTO_MD5_KPDK: 1102 case CRYPTO_SHA1_KPDK: 1103 axf = swd->sw_axf; 1104 1105 if (swd->sw_ictx) { 1106 explicit_bzero(swd->sw_ictx, axf->ctxsize); 1107 free(swd->sw_ictx, M_CRYPTO_DATA); 1108 } 1109 if (swd->sw_octx) { 1110 explicit_bzero(swd->sw_octx, swd->sw_klen); 1111 free(swd->sw_octx, M_CRYPTO_DATA); 1112 } 1113 break; 1114 1115 case CRYPTO_MD5: 1116 case CRYPTO_SHA1: 1117 case CRYPTO_AES_XCBC_MAC_96: 1118 case CRYPTO_AES_128_GMAC: 1119 case CRYPTO_AES_192_GMAC: 1120 case CRYPTO_AES_256_GMAC: 1121 axf = swd->sw_axf; 1122 1123 if (swd->sw_ictx) { 1124 explicit_bzero(swd->sw_ictx, axf->ctxsize); 1125 free(swd->sw_ictx, M_CRYPTO_DATA); 1126 } 1127 break; 1128 1129 case CRYPTO_DEFLATE_COMP: 1130 case CRYPTO_DEFLATE_COMP_NOGROW: 1131 case CRYPTO_GZIP_COMP: 1132 cxf = swd->sw_cxf; 1133 break; 1134 } 1135 1136 free(swd, M_CRYPTO_DATA); 1137 } 1138 return 0; 1139 } 1140 1141 /* 1142 * Process a software request. 1143 */ 1144 static int 1145 swcr_process(void *arg, struct cryptop *crp, int hint) 1146 { 1147 struct cryptodesc *crd; 1148 struct swcr_data *sw; 1149 u_int32_t lid; 1150 int type; 1151 1152 /* Sanity check */ 1153 if (crp == NULL) 1154 return EINVAL; 1155 1156 if (crp->crp_desc == NULL || crp->crp_buf == NULL) { 1157 crp->crp_etype = EINVAL; 1158 goto done; 1159 } 1160 1161 lid = crp->crp_sid & 0xffffffff; 1162 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) { 1163 crp->crp_etype = ENOENT; 1164 goto done; 1165 } 1166 1167 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1168 type = CRYPTO_BUF_MBUF; 1169 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1170 type = CRYPTO_BUF_IOV; 1171 } else { 1172 type = CRYPTO_BUF_CONTIG; 1173 } 1174 1175 /* Go through crypto descriptors, processing as we go */ 1176 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1177 /* 1178 * Find the crypto context. 1179 * 1180 * XXX Note that the logic here prevents us from having 1181 * XXX the same algorithm multiple times in a session 1182 * XXX (or rather, we can but it won't give us the right 1183 * XXX results). To do that, we'd need some way of differentiating 1184 * XXX between the various instances of an algorithm (so we can 1185 * XXX locate the correct crypto context). 1186 */ 1187 for (sw = swcr_sessions[lid]; 1188 sw && sw->sw_alg != crd->crd_alg; 1189 sw = sw->sw_next) 1190 ; 1191 1192 /* No such context ? */ 1193 if (sw == NULL) { 1194 crp->crp_etype = EINVAL; 1195 goto done; 1196 } 1197 1198 switch (sw->sw_alg) { 1199 case CRYPTO_DES_CBC: 1200 case CRYPTO_3DES_CBC: 1201 case CRYPTO_BLF_CBC: 1202 case CRYPTO_CAST_CBC: 1203 case CRYPTO_SKIPJACK_CBC: 1204 case CRYPTO_RIJNDAEL128_CBC: 1205 case CRYPTO_CAMELLIA_CBC: 1206 case CRYPTO_AES_CTR: 1207 if ((crp->crp_etype = swcr_encdec(crd, sw, 1208 crp->crp_buf, type)) != 0) 1209 goto done; 1210 break; 1211 case CRYPTO_NULL_CBC: 1212 crp->crp_etype = 0; 1213 break; 1214 case CRYPTO_MD5_HMAC: 1215 case CRYPTO_MD5_HMAC_96: 1216 case CRYPTO_SHA1_HMAC: 1217 case CRYPTO_SHA1_HMAC_96: 1218 case CRYPTO_SHA2_256_HMAC: 1219 case CRYPTO_SHA2_384_HMAC: 1220 case CRYPTO_SHA2_512_HMAC: 1221 case CRYPTO_RIPEMD160_HMAC: 1222 case CRYPTO_RIPEMD160_HMAC_96: 1223 case CRYPTO_NULL_HMAC: 1224 case CRYPTO_MD5_KPDK: 1225 case CRYPTO_SHA1_KPDK: 1226 case CRYPTO_MD5: 1227 case CRYPTO_SHA1: 1228 case CRYPTO_AES_XCBC_MAC_96: 1229 if ((crp->crp_etype = swcr_authcompute(crp, crd, sw, 1230 crp->crp_buf, type)) != 0) 1231 goto done; 1232 break; 1233 1234 case CRYPTO_AES_GCM_16: 1235 case CRYPTO_AES_GMAC: 1236 case CRYPTO_AES_128_GMAC: 1237 case CRYPTO_AES_192_GMAC: 1238 case CRYPTO_AES_256_GMAC: 1239 crp->crp_etype = swcr_combined(crp, type); 1240 goto done; 1241 1242 case CRYPTO_DEFLATE_COMP: 1243 case CRYPTO_DEFLATE_COMP_NOGROW: 1244 case CRYPTO_GZIP_COMP: 1245 DPRINTF(("swcr_process: compdec for %d\n", sw->sw_alg)); 1246 if ((crp->crp_etype = swcr_compdec(crd, sw, 1247 crp->crp_buf, type, &crp->crp_olen)) != 0) 1248 goto done; 1249 break; 1250 1251 default: 1252 /* Unknown/unsupported algorithm */ 1253 crp->crp_etype = EINVAL; 1254 goto done; 1255 } 1256 } 1257 1258 done: 1259 DPRINTF(("request %p done\n", crp)); 1260 crypto_done(crp); 1261 return 0; 1262 } 1263 1264 static void 1265 swcr_init(void) 1266 { 1267 swcr_id = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE); 1268 if (swcr_id < 0) { 1269 /* This should never happen */ 1270 panic("Software crypto device cannot initialize!"); 1271 } 1272 1273 crypto_register(swcr_id, CRYPTO_DES_CBC, 1274 0, 0, swcr_newsession, swcr_freesession, swcr_process, NULL); 1275 #define REGISTER(alg) \ 1276 crypto_register(swcr_id, alg, 0, 0, NULL, NULL, NULL, NULL) 1277 1278 REGISTER(CRYPTO_3DES_CBC); 1279 REGISTER(CRYPTO_BLF_CBC); 1280 REGISTER(CRYPTO_CAST_CBC); 1281 REGISTER(CRYPTO_SKIPJACK_CBC); 1282 REGISTER(CRYPTO_CAMELLIA_CBC); 1283 REGISTER(CRYPTO_AES_CTR); 1284 REGISTER(CRYPTO_AES_GCM_16); 1285 REGISTER(CRYPTO_AES_GMAC); 1286 REGISTER(CRYPTO_NULL_CBC); 1287 REGISTER(CRYPTO_MD5_HMAC); 1288 REGISTER(CRYPTO_MD5_HMAC_96); 1289 REGISTER(CRYPTO_SHA1_HMAC); 1290 REGISTER(CRYPTO_SHA1_HMAC_96); 1291 REGISTER(CRYPTO_SHA2_256_HMAC); 1292 REGISTER(CRYPTO_SHA2_384_HMAC); 1293 REGISTER(CRYPTO_SHA2_512_HMAC); 1294 REGISTER(CRYPTO_RIPEMD160_HMAC); 1295 REGISTER(CRYPTO_RIPEMD160_HMAC_96); 1296 REGISTER(CRYPTO_NULL_HMAC); 1297 REGISTER(CRYPTO_MD5_KPDK); 1298 REGISTER(CRYPTO_SHA1_KPDK); 1299 REGISTER(CRYPTO_MD5); 1300 REGISTER(CRYPTO_SHA1); 1301 REGISTER(CRYPTO_AES_XCBC_MAC_96); 1302 REGISTER(CRYPTO_AES_128_GMAC); 1303 REGISTER(CRYPTO_AES_192_GMAC); 1304 REGISTER(CRYPTO_AES_256_GMAC); 1305 REGISTER(CRYPTO_RIJNDAEL128_CBC); 1306 REGISTER(CRYPTO_DEFLATE_COMP); 1307 REGISTER(CRYPTO_DEFLATE_COMP_NOGROW); 1308 REGISTER(CRYPTO_GZIP_COMP); 1309 #undef REGISTER 1310 } 1311 1312 1313 /* 1314 * Pseudo-device init routine for software crypto. 1315 */ 1316 void swcryptoattach(int); 1317 1318 void 1319 swcryptoattach(int num) 1320 { 1321 1322 swcr_init(); 1323 } 1324