1 /* $OpenBSD: crypto.c,v 1.44 2003/06/03 15:28:06 beck Exp $ */ 2 /* 3 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 4 * 5 * This code was written by Angelos D. Keromytis in Athens, Greece, in 6 * February 2000. Network Security Technologies Inc. (NSTI) kindly 7 * supported the development of this code. 8 * 9 * Copyright (c) 2000, 2001 Angelos D. Keromytis 10 * 11 * Permission to use, copy, and modify this software with or without fee 12 * is hereby granted, provided that this entire notice is included in 13 * all source code copies of any software which is or includes a copy or 14 * modification of this software. 15 * 16 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 17 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 18 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 19 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 20 * PURPOSE. 21 */ 22 23 #include <sys/param.h> 24 #include <sys/systm.h> 25 #include <sys/malloc.h> 26 #include <sys/proc.h> 27 #include <sys/pool.h> 28 #include <crypto/cryptodev.h> 29 30 struct cryptocap *crypto_drivers = NULL; 31 int crypto_drivers_num = 0; 32 33 struct pool cryptop_pool; 34 struct pool cryptodesc_pool; 35 int crypto_pool_initialized = 0; 36 37 struct cryptop *crp_req_queue = NULL; 38 struct cryptop **crp_req_queue_tail = NULL; 39 40 struct cryptop *crp_ret_queue = NULL; 41 struct cryptop **crp_ret_queue_tail = NULL; 42 43 struct cryptkop *krp_req_queue = NULL; 44 struct cryptkop **krp_req_queue_tail = NULL; 45 46 struct cryptkop *krp_ret_queue = NULL; 47 struct cryptkop **krp_ret_queue_tail = NULL; 48 49 /* 50 * Create a new session. 51 */ 52 int 53 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard) 54 { 55 u_int32_t hid, lid, hid2 = -1; 56 struct cryptocap *cpc; 57 struct cryptoini *cr; 58 int err, s, turn = 0; 59 60 if (crypto_drivers == NULL) 61 return EINVAL; 62 63 s = splimp(); 64 65 /* 66 * The algorithm we use here is pretty stupid; just use the 67 * first driver that supports all the algorithms we need. Do 68 * a double-pass over all the drivers, ignoring software ones 69 * at first, to deal with cases of drivers that register after 70 * the software one(s) --- e.g., PCMCIA crypto cards. 71 * 72 * XXX We need more smarts here (in real life too, but that's 73 * XXX another story altogether). 74 */ 75 do { 76 for (hid = 0; hid < crypto_drivers_num; hid++) { 77 cpc = &crypto_drivers[hid]; 78 79 /* 80 * If it's not initialized or has remaining sessions 81 * referencing it, skip. 82 */ 83 if (cpc->cc_newsession == NULL || 84 (cpc->cc_flags & CRYPTOCAP_F_CLEANUP)) 85 continue; 86 87 if (cpc->cc_flags & CRYPTOCAP_F_SOFTWARE) { 88 /* 89 * First round of search, ignore 90 * software drivers. 91 */ 92 if (turn == 0) 93 continue; 94 } else { /* !CRYPTOCAP_F_SOFTWARE */ 95 /* Second round of search, only software. */ 96 if (turn == 1) 97 continue; 98 } 99 100 /* See if all the algorithms are supported. */ 101 for (cr = cri; cr; cr = cr->cri_next) { 102 if (cpc->cc_alg[cr->cri_alg] == 0) 103 break; 104 } 105 106 /* 107 * If even one algorithm is not supported, 108 * keep searching. 109 */ 110 if (cr != NULL) 111 continue; 112 113 /* 114 * If we had a previous match, see how it compares 115 * to this one. Keep "remembering" whichever is 116 * the best of the two. 117 */ 118 if (hid2 != -1) { 119 /* 120 * Compare session numbers, pick the one 121 * with the lowest. 122 * XXX Need better metrics, this will 123 * XXX just do un-weighted round-robin. 124 */ 125 if (crypto_drivers[hid].cc_sessions <= 126 crypto_drivers[hid2].cc_sessions) 127 hid2 = hid; 128 } else { 129 /* 130 * Remember this one, for future 131 * comparisons. 132 */ 133 hid2 = hid; 134 } 135 } 136 137 /* 138 * If we found something worth remembering, leave. The 139 * side-effect is that we will always prefer a hardware 140 * driver over the software one. 141 */ 142 if (hid2 != -1) 143 break; 144 145 turn++; 146 147 /* If we only want hardware drivers, don't do second pass. */ 148 } while (turn <= 2 && hard == 0); 149 150 hid = hid2; 151 152 /* 153 * Can't do everything in one session. 154 * 155 * XXX Fix this. We need to inject a "virtual" session 156 * XXX layer right about here. 157 */ 158 159 if (hid == -1) { 160 splx(s); 161 return EINVAL; 162 } 163 164 /* Call the driver initialization routine. */ 165 lid = hid; /* Pass the driver ID. */ 166 err = crypto_drivers[hid].cc_newsession(&lid, cri); 167 if (err == 0) { 168 (*sid) = hid; 169 (*sid) <<= 32; 170 (*sid) |= (lid & 0xffffffff); 171 crypto_drivers[hid].cc_sessions++; 172 } 173 174 splx(s); 175 return err; 176 } 177 178 /* 179 * Delete an existing session (or a reserved session on an unregistered 180 * driver). 181 */ 182 int 183 crypto_freesession(u_int64_t sid) 184 { 185 int err = 0, s; 186 u_int32_t hid; 187 188 if (crypto_drivers == NULL) 189 return EINVAL; 190 191 /* Determine two IDs. */ 192 hid = (sid >> 32) & 0xffffffff; 193 194 if (hid >= crypto_drivers_num) 195 return ENOENT; 196 197 s = splimp(); 198 199 if (crypto_drivers[hid].cc_sessions) 200 crypto_drivers[hid].cc_sessions--; 201 202 /* Call the driver cleanup routine, if available. */ 203 if (crypto_drivers[hid].cc_freesession) 204 err = crypto_drivers[hid].cc_freesession(sid); 205 206 /* 207 * If this was the last session of a driver marked as invalid, 208 * make the entry available for reuse. 209 */ 210 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) && 211 crypto_drivers[hid].cc_sessions == 0) 212 bzero(&crypto_drivers[hid], sizeof(struct cryptocap)); 213 214 splx(s); 215 return err; 216 } 217 218 /* 219 * Find an empty slot. 220 */ 221 int32_t 222 crypto_get_driverid(u_int8_t flags) 223 { 224 struct cryptocap *newdrv; 225 int i, s = splimp(); 226 227 if (crypto_drivers_num == 0) { 228 crypto_drivers_num = CRYPTO_DRIVERS_INITIAL; 229 crypto_drivers = malloc(crypto_drivers_num * 230 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT); 231 if (crypto_drivers == NULL) { 232 splx(s); 233 crypto_drivers_num = 0; 234 return -1; 235 } 236 237 bzero(crypto_drivers, crypto_drivers_num * 238 sizeof(struct cryptocap)); 239 } 240 241 for (i = 0; i < crypto_drivers_num; i++) { 242 if (crypto_drivers[i].cc_process == NULL && 243 !(crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) && 244 crypto_drivers[i].cc_sessions == 0) { 245 crypto_drivers[i].cc_sessions = 1; /* Mark */ 246 crypto_drivers[i].cc_flags = flags; 247 splx(s); 248 return i; 249 } 250 } 251 252 /* Out of entries, allocate some more. */ 253 if (i == crypto_drivers_num) { 254 /* Be careful about wrap-around. */ 255 if (2 * crypto_drivers_num <= crypto_drivers_num) { 256 splx(s); 257 return -1; 258 } 259 260 newdrv = malloc(2 * crypto_drivers_num * 261 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT); 262 if (newdrv == NULL) { 263 splx(s); 264 return -1; 265 } 266 267 bcopy(crypto_drivers, newdrv, 268 crypto_drivers_num * sizeof(struct cryptocap)); 269 bzero(&newdrv[crypto_drivers_num], 270 crypto_drivers_num * sizeof(struct cryptocap)); 271 272 newdrv[i].cc_sessions = 1; /* Mark */ 273 newdrv[i].cc_flags = flags; 274 crypto_drivers_num *= 2; 275 276 free(crypto_drivers, M_CRYPTO_DATA); 277 crypto_drivers = newdrv; 278 splx(s); 279 return i; 280 } 281 282 /* Shouldn't really get here... */ 283 splx(s); 284 return -1; 285 } 286 287 /* 288 * Register a crypto driver. It should be called once for each algorithm 289 * supported by the driver. 290 */ 291 int 292 crypto_kregister(u_int32_t driverid, int *kalg, 293 int (*kprocess)(struct cryptkop *)) 294 { 295 int s, i; 296 297 if (driverid >= crypto_drivers_num || kalg == NULL || 298 crypto_drivers == NULL) 299 return EINVAL; 300 301 s = splimp(); 302 303 for (i = 0; i < CRK_ALGORITHM_MAX; i++) { 304 /* 305 * XXX Do some performance testing to determine 306 * placing. We probably need an auxiliary data 307 * structure that describes relative performances. 308 */ 309 310 crypto_drivers[driverid].cc_kalg[i] = kalg[i]; 311 } 312 313 crypto_drivers[driverid].cc_kprocess = kprocess; 314 315 splx(s); 316 return 0; 317 } 318 319 /* Register a crypto driver. */ 320 int 321 crypto_register(u_int32_t driverid, int *alg, 322 int (*newses)(u_int32_t *, struct cryptoini *), 323 int (*freeses)(u_int64_t), int (*process)(struct cryptop *)) 324 { 325 int s, i; 326 327 328 if (driverid >= crypto_drivers_num || alg == NULL || 329 crypto_drivers == NULL) 330 return EINVAL; 331 332 s = splimp(); 333 334 for (i = 0; i < CRYPTO_ALGORITHM_ALL; i++) { 335 /* 336 * XXX Do some performance testing to determine 337 * placing. We probably need an auxiliary data 338 * structure that describes relative performances. 339 */ 340 341 crypto_drivers[driverid].cc_alg[i] = alg[i]; 342 } 343 344 345 crypto_drivers[driverid].cc_newsession = newses; 346 crypto_drivers[driverid].cc_process = process; 347 crypto_drivers[driverid].cc_freesession = freeses; 348 crypto_drivers[driverid].cc_sessions = 0; /* Unmark */ 349 350 splx(s); 351 352 return 0; 353 } 354 355 /* 356 * Unregister a crypto driver. If there are pending sessions using it, 357 * leave enough information around so that subsequent calls using those 358 * sessions will correctly detect the driver being unregistered and reroute 359 * the request. 360 */ 361 int 362 crypto_unregister(u_int32_t driverid, int alg) 363 { 364 int i = CRYPTO_ALGORITHM_MAX + 1, s = splimp(); 365 u_int32_t ses; 366 367 /* Sanity checks. */ 368 if (driverid >= crypto_drivers_num || crypto_drivers == NULL || 369 ((alg <= 0 || alg > CRYPTO_ALGORITHM_MAX) && 370 alg != CRYPTO_ALGORITHM_ALL) || 371 crypto_drivers[driverid].cc_alg[alg] == 0) { 372 splx(s); 373 return EINVAL; 374 } 375 376 if (alg != CRYPTO_ALGORITHM_ALL) { 377 crypto_drivers[driverid].cc_alg[alg] = 0; 378 379 /* Was this the last algorithm ? */ 380 for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++) 381 if (crypto_drivers[driverid].cc_alg[i] != 0) 382 break; 383 } 384 385 /* 386 * If a driver unregistered its last algorithm or all of them 387 * (alg == CRYPTO_ALGORITHM_ALL), cleanup its entry. 388 */ 389 if (i == CRYPTO_ALGORITHM_MAX + 1 || alg == CRYPTO_ALGORITHM_ALL) { 390 ses = crypto_drivers[driverid].cc_sessions; 391 bzero(&crypto_drivers[driverid], sizeof(struct cryptocap)); 392 if (ses != 0) { 393 /* 394 * If there are pending sessions, just mark as invalid. 395 */ 396 crypto_drivers[driverid].cc_flags |= CRYPTOCAP_F_CLEANUP; 397 crypto_drivers[driverid].cc_sessions = ses; 398 } 399 } 400 splx(s); 401 return 0; 402 } 403 404 /* 405 * Add crypto request to a queue, to be processed by a kernel thread. 406 */ 407 int 408 crypto_dispatch(struct cryptop *crp) 409 { 410 int s = splimp(); 411 u_int32_t hid; 412 413 /* 414 * Keep track of ops per driver, for coallescing purposes. If 415 * we have been given an invalid hid, we'll deal with in the 416 * crypto_invoke(), through session migration. 417 */ 418 hid = (crp->crp_sid >> 32) & 0xffffffff; 419 if (hid < crypto_drivers_num) 420 crypto_drivers[hid].cc_queued++; 421 422 crp->crp_next = NULL; 423 if (crp_req_queue == NULL) { 424 crp_req_queue = crp; 425 crp_req_queue_tail = &(crp->crp_next); 426 splx(s); 427 wakeup((caddr_t) &crp_req_queue); /* Shared wait channel. */ 428 } else { 429 *crp_req_queue_tail = crp; 430 crp_req_queue_tail = &(crp->crp_next); 431 splx(s); 432 } 433 return 0; 434 } 435 436 int 437 crypto_kdispatch(struct cryptkop *krp) 438 { 439 int s = splimp(); 440 441 krp->krp_next = NULL; 442 if (krp_req_queue == NULL) { 443 krp_req_queue = krp; 444 krp_req_queue_tail = &(krp->krp_next); 445 splx(s); 446 wakeup((caddr_t) &crp_req_queue); /* Shared wait channel. */ 447 } else { 448 *krp_req_queue_tail = krp; 449 krp_req_queue_tail = &(krp->krp_next); 450 splx(s); 451 } 452 return 0; 453 } 454 455 /* 456 * Dispatch an asymmetric crypto request to the appropriate crypto devices. 457 */ 458 int 459 crypto_kinvoke(struct cryptkop *krp) 460 { 461 extern int cryptodevallowsoft; 462 u_int32_t hid; 463 int error; 464 465 /* Sanity checks. */ 466 if (krp == NULL || krp->krp_callback == NULL) 467 return (EINVAL); 468 469 for (hid = 0; hid < crypto_drivers_num; hid++) { 470 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) && 471 cryptodevallowsoft == 0) 472 continue; 473 if (crypto_drivers[hid].cc_kprocess == NULL) 474 continue; 475 if ((crypto_drivers[hid].cc_kalg[krp->krp_op] & 476 CRYPTO_ALG_FLAG_SUPPORTED) == 0) 477 continue; 478 break; 479 } 480 481 if (hid == crypto_drivers_num) { 482 krp->krp_status = ENODEV; 483 crypto_kdone(krp); 484 return (0); 485 } 486 487 krp->krp_hid = hid; 488 489 crypto_drivers[hid].cc_koperations++; 490 491 error = crypto_drivers[hid].cc_kprocess(krp); 492 if (error) { 493 krp->krp_status = error; 494 crypto_kdone(krp); 495 } 496 return (0); 497 } 498 499 /* 500 * Dispatch a crypto request to the appropriate crypto devices. 501 */ 502 int 503 crypto_invoke(struct cryptop *crp) 504 { 505 struct cryptodesc *crd; 506 u_int64_t nid; 507 u_int32_t hid; 508 int error; 509 510 /* Sanity checks. */ 511 if (crp == NULL || crp->crp_callback == NULL) 512 return EINVAL; 513 514 if (crp->crp_desc == NULL || crypto_drivers == NULL) { 515 crp->crp_etype = EINVAL; 516 crypto_done(crp); 517 return 0; 518 } 519 520 hid = (crp->crp_sid >> 32) & 0xffffffff; 521 if (hid >= crypto_drivers_num) 522 goto migrate; 523 524 crypto_drivers[hid].cc_queued--; 525 526 if (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) { 527 crypto_freesession(crp->crp_sid); 528 goto migrate; 529 } 530 531 if (crypto_drivers[hid].cc_process == NULL) 532 goto migrate; 533 534 crypto_drivers[hid].cc_operations++; 535 crypto_drivers[hid].cc_bytes += crp->crp_ilen; 536 537 error = crypto_drivers[hid].cc_process(crp); 538 if (error) { 539 if (error == ERESTART) { 540 /* Unregister driver and migrate session. */ 541 crypto_unregister(hid, CRYPTO_ALGORITHM_ALL); 542 goto migrate; 543 } else { 544 crp->crp_etype = error; 545 crypto_done(crp); 546 } 547 } 548 549 return 0; 550 551 migrate: 552 /* Migrate session. */ 553 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next) 554 crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI); 555 556 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0) 557 crp->crp_sid = nid; 558 559 crp->crp_etype = EAGAIN; 560 crypto_done(crp); 561 return 0; 562 } 563 564 /* 565 * Release a set of crypto descriptors. 566 */ 567 void 568 crypto_freereq(struct cryptop *crp) 569 { 570 struct cryptodesc *crd; 571 int s; 572 573 if (crp == NULL) 574 return; 575 576 s = splimp(); 577 578 while ((crd = crp->crp_desc) != NULL) { 579 crp->crp_desc = crd->crd_next; 580 pool_put(&cryptodesc_pool, crd); 581 } 582 583 pool_put(&cryptop_pool, crp); 584 splx(s); 585 } 586 587 /* 588 * Acquire a set of crypto descriptors. 589 */ 590 struct cryptop * 591 crypto_getreq(int num) 592 { 593 struct cryptodesc *crd; 594 struct cryptop *crp; 595 int s = splimp(); 596 597 if (crypto_pool_initialized == 0) { 598 pool_init(&cryptop_pool, sizeof(struct cryptop), 0, 0, 599 0, "cryptop", NULL); 600 pool_init(&cryptodesc_pool, sizeof(struct cryptodesc), 0, 0, 601 0, "cryptodesc", NULL); 602 crypto_pool_initialized = 1; 603 } 604 605 crp = pool_get(&cryptop_pool, 0); 606 if (crp == NULL) { 607 splx(s); 608 return NULL; 609 } 610 bzero(crp, sizeof(struct cryptop)); 611 612 while (num--) { 613 crd = pool_get(&cryptodesc_pool, 0); 614 if (crd == NULL) { 615 splx(s); 616 crypto_freereq(crp); 617 return NULL; 618 } 619 620 bzero(crd, sizeof(struct cryptodesc)); 621 crd->crd_next = crp->crp_desc; 622 crp->crp_desc = crd; 623 } 624 625 splx(s); 626 return crp; 627 } 628 629 /* 630 * Crypto thread, runs as a kernel thread to process crypto requests. 631 */ 632 void 633 crypto_thread(void) 634 { 635 struct cryptop *crp, *crpt; 636 struct cryptkop *krp, *krpt; 637 int s; 638 639 s = splimp(); 640 641 for (;;) { 642 crp = crp_req_queue; 643 krp = krp_req_queue; 644 crpt = crp_ret_queue; 645 krpt = krp_ret_queue; 646 if (crp == NULL && krp == NULL && 647 crpt == NULL && krpt == NULL) { 648 (void) tsleep(&crp_req_queue, PLOCK, "crypto_wait", 0); 649 continue; 650 } 651 652 if (crp) { 653 /* Remove from the queue. */ 654 crp_req_queue = crp->crp_next; 655 crypto_invoke(crp); 656 } 657 if (krp) { 658 /* Remove from the queue. */ 659 krp_req_queue = krp->krp_next; 660 crypto_kinvoke(krp); 661 } 662 if (crpt) { 663 /* Remove from the queue. */ 664 crp_ret_queue = crpt->crp_next; 665 splx(s); 666 crpt->crp_callback(crpt); 667 splimp(); 668 } 669 if (krpt) { 670 /* Remove from the queue. */ 671 krp_ret_queue = krpt->krp_next; 672 /* 673 * Cheat. For public key ops, we know that 674 * all that's done is a wakeup() for the 675 * userland process, so don't bother to 676 * change the processor priority. 677 */ 678 krpt->krp_callback(krpt); 679 } 680 } 681 } 682 683 /* 684 * Invoke the callback on behalf of the driver. 685 */ 686 void 687 crypto_done(struct cryptop *crp) 688 { 689 int s; 690 691 if (crp->crp_flags & CRYPTO_F_NOQUEUE) { 692 /* not from the crypto queue, wakeup the userland 693 * process 694 */ 695 crp->crp_flags |= CRYPTO_F_DONE; 696 crp->crp_callback(crp); 697 } else { 698 s = splimp(); 699 crp->crp_flags |= CRYPTO_F_DONE; 700 crp->crp_next = NULL; 701 if (crp_ret_queue == NULL) { 702 crp_ret_queue = crp; 703 crp_ret_queue_tail = &(crp->crp_next); 704 splx(s); 705 wakeup((caddr_t) &crp_req_queue); /* Shared wait channel. */ 706 } else { 707 *crp_ret_queue_tail = crp; 708 crp_ret_queue_tail = &(crp->crp_next); 709 splx(s); 710 } 711 } 712 } 713 714 /* 715 * Invoke the callback on behalf of the driver. 716 */ 717 void 718 crypto_kdone(struct cryptkop *krp) 719 { 720 int s = splimp(); 721 722 krp->krp_next = NULL; 723 if (krp_ret_queue == NULL) { 724 krp_ret_queue = krp; 725 krp_ret_queue_tail = &(krp->krp_next); 726 splx(s); 727 wakeup((caddr_t) &crp_req_queue); /* Shared wait channel. */ 728 } else { 729 *krp_ret_queue_tail = krp; 730 krp_ret_queue_tail = &(krp->krp_next); 731 splx(s); 732 } 733 } 734 735 int 736 crypto_getfeat(int *featp) 737 { 738 extern int cryptodevallowsoft, userasymcrypto; 739 int hid, kalg, feat = 0; 740 741 if (userasymcrypto == 0) 742 goto out; 743 for (hid = 0; hid < crypto_drivers_num; hid++) { 744 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) && 745 cryptodevallowsoft == 0) { 746 continue; 747 } 748 if (crypto_drivers[hid].cc_kprocess == NULL) 749 continue; 750 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++) 751 if ((crypto_drivers[hid].cc_kalg[kalg] & 752 CRYPTO_ALG_FLAG_SUPPORTED) != 0) 753 feat |= 1 << kalg; 754 } 755 out: 756 *featp = feat; 757 return (0); 758 } 759