1 /* $OpenBSD: crypto.c,v 1.53 2009/09/03 07:47:27 dlg Exp $ */ 2 /* 3 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 4 * 5 * This code was written by Angelos D. Keromytis in Athens, Greece, in 6 * February 2000. Network Security Technologies Inc. (NSTI) kindly 7 * supported the development of this code. 8 * 9 * Copyright (c) 2000, 2001 Angelos D. Keromytis 10 * 11 * Permission to use, copy, and modify this software with or without fee 12 * is hereby granted, provided that this entire notice is included in 13 * all source code copies of any software which is or includes a copy or 14 * modification of this software. 15 * 16 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 17 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 18 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 19 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 20 * PURPOSE. 21 */ 22 23 #include <sys/param.h> 24 #include <sys/systm.h> 25 #include <sys/malloc.h> 26 #include <sys/proc.h> 27 #include <sys/pool.h> 28 29 #include <crypto/cryptodev.h> 30 31 void init_crypto(void); 32 33 struct cryptocap *crypto_drivers = NULL; 34 int crypto_drivers_num = 0; 35 36 struct pool cryptop_pool; 37 struct pool cryptodesc_pool; 38 int crypto_pool_initialized = 0; 39 40 struct workq *crypto_workq; 41 42 /* 43 * Create a new session. 44 */ 45 int 46 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard) 47 { 48 u_int32_t hid, lid, hid2 = -1; 49 struct cryptocap *cpc; 50 struct cryptoini *cr; 51 int err, s, turn = 0; 52 53 if (crypto_drivers == NULL) 54 return EINVAL; 55 56 s = splvm(); 57 58 /* 59 * The algorithm we use here is pretty stupid; just use the 60 * first driver that supports all the algorithms we need. Do 61 * a double-pass over all the drivers, ignoring software ones 62 * at first, to deal with cases of drivers that register after 63 * the software one(s) --- e.g., PCMCIA crypto cards. 64 * 65 * XXX We need more smarts here (in real life too, but that's 66 * XXX another story altogether). 67 */ 68 do { 69 for (hid = 0; hid < crypto_drivers_num; hid++) { 70 cpc = &crypto_drivers[hid]; 71 72 /* 73 * If it's not initialized or has remaining sessions 74 * referencing it, skip. 75 */ 76 if (cpc->cc_newsession == NULL || 77 (cpc->cc_flags & CRYPTOCAP_F_CLEANUP)) 78 continue; 79 80 if (cpc->cc_flags & CRYPTOCAP_F_SOFTWARE) { 81 /* 82 * First round of search, ignore 83 * software drivers. 84 */ 85 if (turn == 0) 86 continue; 87 } else { /* !CRYPTOCAP_F_SOFTWARE */ 88 /* Second round of search, only software. */ 89 if (turn == 1) 90 continue; 91 } 92 93 /* See if all the algorithms are supported. */ 94 for (cr = cri; cr; cr = cr->cri_next) { 95 if (cpc->cc_alg[cr->cri_alg] == 0) 96 break; 97 } 98 99 /* 100 * If even one algorithm is not supported, 101 * keep searching. 102 */ 103 if (cr != NULL) 104 continue; 105 106 /* 107 * If we had a previous match, see how it compares 108 * to this one. Keep "remembering" whichever is 109 * the best of the two. 110 */ 111 if (hid2 != -1) { 112 /* 113 * Compare session numbers, pick the one 114 * with the lowest. 115 * XXX Need better metrics, this will 116 * XXX just do un-weighted round-robin. 117 */ 118 if (crypto_drivers[hid].cc_sessions <= 119 crypto_drivers[hid2].cc_sessions) 120 hid2 = hid; 121 } else { 122 /* 123 * Remember this one, for future 124 * comparisons. 125 */ 126 hid2 = hid; 127 } 128 } 129 130 /* 131 * If we found something worth remembering, leave. The 132 * side-effect is that we will always prefer a hardware 133 * driver over the software one. 134 */ 135 if (hid2 != -1) 136 break; 137 138 turn++; 139 140 /* If we only want hardware drivers, don't do second pass. */ 141 } while (turn <= 2 && hard == 0); 142 143 hid = hid2; 144 145 /* 146 * Can't do everything in one session. 147 * 148 * XXX Fix this. We need to inject a "virtual" session 149 * XXX layer right about here. 150 */ 151 152 if (hid == -1) { 153 splx(s); 154 return EINVAL; 155 } 156 157 /* Call the driver initialization routine. */ 158 lid = hid; /* Pass the driver ID. */ 159 err = crypto_drivers[hid].cc_newsession(&lid, cri); 160 if (err == 0) { 161 (*sid) = hid; 162 (*sid) <<= 32; 163 (*sid) |= (lid & 0xffffffff); 164 crypto_drivers[hid].cc_sessions++; 165 } 166 167 splx(s); 168 return err; 169 } 170 171 /* 172 * Delete an existing session (or a reserved session on an unregistered 173 * driver). 174 */ 175 int 176 crypto_freesession(u_int64_t sid) 177 { 178 int err = 0, s; 179 u_int32_t hid; 180 181 if (crypto_drivers == NULL) 182 return EINVAL; 183 184 /* Determine two IDs. */ 185 hid = (sid >> 32) & 0xffffffff; 186 187 if (hid >= crypto_drivers_num) 188 return ENOENT; 189 190 s = splvm(); 191 192 if (crypto_drivers[hid].cc_sessions) 193 crypto_drivers[hid].cc_sessions--; 194 195 /* Call the driver cleanup routine, if available. */ 196 if (crypto_drivers[hid].cc_freesession) 197 err = crypto_drivers[hid].cc_freesession(sid); 198 199 /* 200 * If this was the last session of a driver marked as invalid, 201 * make the entry available for reuse. 202 */ 203 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) && 204 crypto_drivers[hid].cc_sessions == 0) 205 bzero(&crypto_drivers[hid], sizeof(struct cryptocap)); 206 207 splx(s); 208 return err; 209 } 210 211 /* 212 * Find an empty slot. 213 */ 214 int32_t 215 crypto_get_driverid(u_int8_t flags) 216 { 217 struct cryptocap *newdrv; 218 int i, s; 219 220 s = splvm(); 221 222 if (crypto_drivers_num == 0) { 223 crypto_drivers_num = CRYPTO_DRIVERS_INITIAL; 224 crypto_drivers = malloc(crypto_drivers_num * 225 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT); 226 if (crypto_drivers == NULL) { 227 crypto_drivers_num = 0; 228 splx(s); 229 return -1; 230 } 231 232 bzero(crypto_drivers, crypto_drivers_num * 233 sizeof(struct cryptocap)); 234 } 235 236 for (i = 0; i < crypto_drivers_num; i++) { 237 if (crypto_drivers[i].cc_process == NULL && 238 !(crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) && 239 crypto_drivers[i].cc_sessions == 0) { 240 crypto_drivers[i].cc_sessions = 1; /* Mark */ 241 crypto_drivers[i].cc_flags = flags; 242 splx(s); 243 return i; 244 } 245 } 246 247 /* Out of entries, allocate some more. */ 248 if (i == crypto_drivers_num) { 249 /* Be careful about wrap-around. */ 250 if (2 * crypto_drivers_num <= crypto_drivers_num) { 251 splx(s); 252 return -1; 253 } 254 255 newdrv = malloc(2 * crypto_drivers_num * 256 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT); 257 if (newdrv == NULL) { 258 splx(s); 259 return -1; 260 } 261 262 bcopy(crypto_drivers, newdrv, 263 crypto_drivers_num * sizeof(struct cryptocap)); 264 bzero(&newdrv[crypto_drivers_num], 265 crypto_drivers_num * sizeof(struct cryptocap)); 266 267 newdrv[i].cc_sessions = 1; /* Mark */ 268 newdrv[i].cc_flags = flags; 269 crypto_drivers_num *= 2; 270 271 free(crypto_drivers, M_CRYPTO_DATA); 272 crypto_drivers = newdrv; 273 splx(s); 274 return i; 275 } 276 277 /* Shouldn't really get here... */ 278 splx(s); 279 return -1; 280 } 281 282 /* 283 * Register a crypto driver. It should be called once for each algorithm 284 * supported by the driver. 285 */ 286 int 287 crypto_kregister(u_int32_t driverid, int *kalg, 288 int (*kprocess)(struct cryptkop *)) 289 { 290 int s, i; 291 292 if (driverid >= crypto_drivers_num || kalg == NULL || 293 crypto_drivers == NULL) 294 return EINVAL; 295 296 s = splvm(); 297 298 for (i = 0; i < CRK_ALGORITHM_MAX; i++) { 299 /* 300 * XXX Do some performance testing to determine 301 * placing. We probably need an auxiliary data 302 * structure that describes relative performances. 303 */ 304 305 crypto_drivers[driverid].cc_kalg[i] = kalg[i]; 306 } 307 308 crypto_drivers[driverid].cc_kprocess = kprocess; 309 310 splx(s); 311 return 0; 312 } 313 314 /* Register a crypto driver. */ 315 int 316 crypto_register(u_int32_t driverid, int *alg, 317 int (*newses)(u_int32_t *, struct cryptoini *), 318 int (*freeses)(u_int64_t), int (*process)(struct cryptop *)) 319 { 320 int s, i; 321 322 323 if (driverid >= crypto_drivers_num || alg == NULL || 324 crypto_drivers == NULL) 325 return EINVAL; 326 327 s = splvm(); 328 329 for (i = 0; i < CRYPTO_ALGORITHM_ALL; i++) { 330 /* 331 * XXX Do some performance testing to determine 332 * placing. We probably need an auxiliary data 333 * structure that describes relative performances. 334 */ 335 336 crypto_drivers[driverid].cc_alg[i] = alg[i]; 337 } 338 339 340 crypto_drivers[driverid].cc_newsession = newses; 341 crypto_drivers[driverid].cc_process = process; 342 crypto_drivers[driverid].cc_freesession = freeses; 343 crypto_drivers[driverid].cc_sessions = 0; /* Unmark */ 344 345 splx(s); 346 347 return 0; 348 } 349 350 /* 351 * Unregister a crypto driver. If there are pending sessions using it, 352 * leave enough information around so that subsequent calls using those 353 * sessions will correctly detect the driver being unregistered and reroute 354 * the request. 355 */ 356 int 357 crypto_unregister(u_int32_t driverid, int alg) 358 { 359 int i = CRYPTO_ALGORITHM_MAX + 1, s; 360 u_int32_t ses; 361 362 s = splvm(); 363 364 /* Sanity checks. */ 365 if (driverid >= crypto_drivers_num || crypto_drivers == NULL || 366 ((alg <= 0 || alg > CRYPTO_ALGORITHM_MAX) && 367 alg != CRYPTO_ALGORITHM_ALL) || 368 crypto_drivers[driverid].cc_alg[alg] == 0) { 369 splx(s); 370 return EINVAL; 371 } 372 373 if (alg != CRYPTO_ALGORITHM_ALL) { 374 crypto_drivers[driverid].cc_alg[alg] = 0; 375 376 /* Was this the last algorithm ? */ 377 for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++) 378 if (crypto_drivers[driverid].cc_alg[i] != 0) 379 break; 380 } 381 382 /* 383 * If a driver unregistered its last algorithm or all of them 384 * (alg == CRYPTO_ALGORITHM_ALL), cleanup its entry. 385 */ 386 if (i == CRYPTO_ALGORITHM_MAX + 1 || alg == CRYPTO_ALGORITHM_ALL) { 387 ses = crypto_drivers[driverid].cc_sessions; 388 bzero(&crypto_drivers[driverid], sizeof(struct cryptocap)); 389 if (ses != 0) { 390 /* 391 * If there are pending sessions, just mark as invalid. 392 */ 393 crypto_drivers[driverid].cc_flags |= CRYPTOCAP_F_CLEANUP; 394 crypto_drivers[driverid].cc_sessions = ses; 395 } 396 } 397 splx(s); 398 return 0; 399 } 400 401 /* 402 * Add crypto request to a queue, to be processed by a kernel thread. 403 */ 404 int 405 crypto_dispatch(struct cryptop *crp) 406 { 407 int s; 408 u_int32_t hid; 409 410 s = splvm(); 411 /* 412 * Keep track of ops per driver, for coallescing purposes. If 413 * we have been given an invalid hid, we'll deal with in the 414 * crypto_invoke(), through session migration. 415 */ 416 hid = (crp->crp_sid >> 32) & 0xffffffff; 417 if (hid < crypto_drivers_num) 418 crypto_drivers[hid].cc_queued++; 419 splx(s); 420 421 if (crypto_workq) { 422 workq_queue_task(crypto_workq, &crp->crp_wqt, 0, 423 (workq_fn)crypto_invoke, crp, NULL); 424 } else { 425 crypto_invoke(crp); 426 } 427 428 return 0; 429 } 430 431 int 432 crypto_kdispatch(struct cryptkop *krp) 433 { 434 if (crypto_workq) { 435 workq_queue_task(crypto_workq, &krp->krp_wqt, 0, 436 (workq_fn)crypto_kinvoke, krp, NULL); 437 } else { 438 crypto_kinvoke(krp); 439 } 440 441 return 0; 442 } 443 444 /* 445 * Dispatch an asymmetric crypto request to the appropriate crypto devices. 446 */ 447 int 448 crypto_kinvoke(struct cryptkop *krp) 449 { 450 extern int cryptodevallowsoft; 451 u_int32_t hid; 452 int error; 453 int s; 454 455 /* Sanity checks. */ 456 if (krp == NULL || krp->krp_callback == NULL) 457 return (EINVAL); 458 459 s = splvm(); 460 for (hid = 0; hid < crypto_drivers_num; hid++) { 461 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) && 462 cryptodevallowsoft == 0) 463 continue; 464 if (crypto_drivers[hid].cc_kprocess == NULL) 465 continue; 466 if ((crypto_drivers[hid].cc_kalg[krp->krp_op] & 467 CRYPTO_ALG_FLAG_SUPPORTED) == 0) 468 continue; 469 break; 470 } 471 472 if (hid == crypto_drivers_num) { 473 krp->krp_status = ENODEV; 474 crypto_kdone(krp); 475 splx(s); 476 return (0); 477 } 478 479 krp->krp_hid = hid; 480 481 crypto_drivers[hid].cc_koperations++; 482 483 error = crypto_drivers[hid].cc_kprocess(krp); 484 if (error) { 485 krp->krp_status = error; 486 crypto_kdone(krp); 487 } 488 splx(s); 489 return (0); 490 } 491 492 /* 493 * Dispatch a crypto request to the appropriate crypto devices. 494 */ 495 int 496 crypto_invoke(struct cryptop *crp) 497 { 498 struct cryptodesc *crd; 499 u_int64_t nid; 500 u_int32_t hid; 501 int error; 502 int s; 503 504 /* Sanity checks. */ 505 if (crp == NULL || crp->crp_callback == NULL) 506 return EINVAL; 507 508 s = splvm(); 509 if (crp->crp_desc == NULL || crypto_drivers == NULL) { 510 crp->crp_etype = EINVAL; 511 crypto_done(crp); 512 splx(s); 513 return 0; 514 } 515 516 hid = (crp->crp_sid >> 32) & 0xffffffff; 517 if (hid >= crypto_drivers_num) 518 goto migrate; 519 520 crypto_drivers[hid].cc_queued--; 521 522 if (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) { 523 crypto_freesession(crp->crp_sid); 524 goto migrate; 525 } 526 527 if (crypto_drivers[hid].cc_process == NULL) 528 goto migrate; 529 530 crypto_drivers[hid].cc_operations++; 531 crypto_drivers[hid].cc_bytes += crp->crp_ilen; 532 533 error = crypto_drivers[hid].cc_process(crp); 534 if (error) { 535 if (error == ERESTART) { 536 /* Unregister driver and migrate session. */ 537 crypto_unregister(hid, CRYPTO_ALGORITHM_ALL); 538 goto migrate; 539 } else { 540 crp->crp_etype = error; 541 } 542 } 543 544 splx(s); 545 return 0; 546 547 migrate: 548 /* Migrate session. */ 549 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next) 550 crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI); 551 552 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0) 553 crp->crp_sid = nid; 554 555 crp->crp_etype = EAGAIN; 556 crypto_done(crp); 557 splx(s); 558 return 0; 559 } 560 561 /* 562 * Release a set of crypto descriptors. 563 */ 564 void 565 crypto_freereq(struct cryptop *crp) 566 { 567 struct cryptodesc *crd; 568 int s; 569 570 if (crp == NULL) 571 return; 572 573 s = splvm(); 574 575 while ((crd = crp->crp_desc) != NULL) { 576 crp->crp_desc = crd->crd_next; 577 pool_put(&cryptodesc_pool, crd); 578 } 579 580 pool_put(&cryptop_pool, crp); 581 splx(s); 582 } 583 584 /* 585 * Acquire a set of crypto descriptors. 586 */ 587 struct cryptop * 588 crypto_getreq(int num) 589 { 590 struct cryptodesc *crd; 591 struct cryptop *crp; 592 int s; 593 594 s = splvm(); 595 596 if (crypto_pool_initialized == 0) { 597 pool_init(&cryptop_pool, sizeof(struct cryptop), 0, 0, 598 0, "cryptop", NULL); 599 pool_init(&cryptodesc_pool, sizeof(struct cryptodesc), 0, 0, 600 0, "cryptodesc", NULL); 601 crypto_pool_initialized = 1; 602 } 603 604 crp = pool_get(&cryptop_pool, PR_NOWAIT); 605 if (crp == NULL) { 606 splx(s); 607 return NULL; 608 } 609 bzero(crp, sizeof(struct cryptop)); 610 611 while (num--) { 612 crd = pool_get(&cryptodesc_pool, PR_NOWAIT); 613 if (crd == NULL) { 614 splx(s); 615 crypto_freereq(crp); 616 return NULL; 617 } 618 619 bzero(crd, sizeof(struct cryptodesc)); 620 crd->crd_next = crp->crp_desc; 621 crp->crp_desc = crd; 622 } 623 624 splx(s); 625 return crp; 626 } 627 628 void 629 init_crypto() 630 { 631 crypto_workq = workq_create("crypto", 1, IPL_HIGH); 632 } 633 634 /* 635 * Invoke the callback on behalf of the driver. 636 */ 637 void 638 crypto_done(struct cryptop *crp) 639 { 640 crp->crp_flags |= CRYPTO_F_DONE; 641 if (crp->crp_flags & CRYPTO_F_NOQUEUE) { 642 /* not from the crypto queue, wakeup the userland process */ 643 crp->crp_callback(crp); 644 } else { 645 workq_queue_task(crypto_workq, &crp->crp_wqt, 0, 646 (workq_fn)crp->crp_callback, crp, NULL); 647 } 648 } 649 650 /* 651 * Invoke the callback on behalf of the driver. 652 */ 653 void 654 crypto_kdone(struct cryptkop *krp) 655 { 656 workq_queue_task(crypto_workq, &krp->krp_wqt, 0, 657 (workq_fn)krp->krp_callback, krp, NULL); 658 } 659 660 int 661 crypto_getfeat(int *featp) 662 { 663 extern int cryptodevallowsoft, userasymcrypto; 664 int hid, kalg, feat = 0; 665 666 if (userasymcrypto == 0) 667 goto out; 668 for (hid = 0; hid < crypto_drivers_num; hid++) { 669 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) && 670 cryptodevallowsoft == 0) { 671 continue; 672 } 673 if (crypto_drivers[hid].cc_kprocess == NULL) 674 continue; 675 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++) 676 if ((crypto_drivers[hid].cc_kalg[kalg] & 677 CRYPTO_ALG_FLAG_SUPPORTED) != 0) 678 feat |= 1 << kalg; 679 } 680 out: 681 *featp = feat; 682 return (0); 683 } 684