1 /* $NetBSD: crypto.c,v 1.28 2008/04/28 20:24:10 martin Exp $ */ 2 /* $FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.5 2003/02/26 00:14:05 sam Exp $ */ 3 /* $OpenBSD: crypto.c,v 1.41 2002/07/17 23:52:38 art Exp $ */ 4 5 /*- 6 * Copyright (c) 2008 The NetBSD Foundation, Inc. 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to The NetBSD Foundation 10 * by Coyote Point Systems, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 36 * 37 * This code was written by Angelos D. Keromytis in Athens, Greece, in 38 * February 2000. Network Security Technologies Inc. (NSTI) kindly 39 * supported the development of this code. 40 * 41 * Copyright (c) 2000, 2001 Angelos D. Keromytis 42 * 43 * Permission to use, copy, and modify this software with or without fee 44 * is hereby granted, provided that this entire notice is included in 45 * all source code copies of any software which is or includes a copy or 46 * modification of this software. 47 * 48 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 49 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 50 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 51 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 52 * PURPOSE. 53 */ 54 55 #include <sys/cdefs.h> 56 __KERNEL_RCSID(0, "$NetBSD: crypto.c,v 1.28 2008/04/28 20:24:10 martin Exp $"); 57 58 #include <sys/param.h> 59 #include <sys/reboot.h> 60 #include <sys/systm.h> 61 #include <sys/malloc.h> 62 #include <sys/proc.h> 63 #include <sys/pool.h> 64 #include <sys/kthread.h> 65 #include <sys/once.h> 66 #include <sys/sysctl.h> 67 #include <sys/intr.h> 68 69 #include "opt_ocf.h" 70 #include <opencrypto/cryptodev.h> 71 #include <opencrypto/xform.h> /* XXX for M_XDATA */ 72 73 kcondvar_t cryptoret_cv; 74 kmutex_t crypto_mtx; 75 76 /* below are kludges for residual code wrtitten to FreeBSD interfaces */ 77 #define SWI_CRYPTO 17 78 #define register_swi(lvl, fn) \ 79 softint_establish(SOFTINT_NET, (void (*)(void*))fn, NULL) 80 #define unregister_swi(lvl, fn) softint_disestablish(softintr_cookie) 81 #define setsoftcrypto(x) softint_schedule(x) 82 83 #define SESID2HID(sid) (((sid) >> 32) & 0xffffffff) 84 85 /* 86 * Crypto drivers register themselves by allocating a slot in the 87 * crypto_drivers table with crypto_get_driverid() and then registering 88 * each algorithm they support with crypto_register() and crypto_kregister(). 89 */ 90 static struct cryptocap *crypto_drivers; 91 static int crypto_drivers_num; 92 static void* softintr_cookie; 93 94 /* 95 * There are two queues for crypto requests; one for symmetric (e.g. 96 * cipher) operations and one for asymmetric (e.g. MOD) operations. 97 * See below for how synchronization is handled. 98 */ 99 static TAILQ_HEAD(,cryptop) crp_q = /* request queues */ 100 TAILQ_HEAD_INITIALIZER(crp_q); 101 static TAILQ_HEAD(,cryptkop) crp_kq = 102 TAILQ_HEAD_INITIALIZER(crp_kq); 103 104 /* 105 * There are two queues for processing completed crypto requests; one 106 * for the symmetric and one for the asymmetric ops. We only need one 107 * but have two to avoid type futzing (cryptop vs. cryptkop). See below 108 * for how synchronization is handled. 109 */ 110 static TAILQ_HEAD(crprethead, cryptop) crp_ret_q = /* callback queues */ 111 TAILQ_HEAD_INITIALIZER(crp_ret_q); 112 static TAILQ_HEAD(krprethead, cryptkop) crp_ret_kq = 113 TAILQ_HEAD_INITIALIZER(crp_ret_kq); 114 115 /* 116 * XXX these functions are ghastly hacks for when the submission 117 * XXX routines discover a request that was not CBIMM is already 118 * XXX done, and must be yanked from the retq (where _done) put it 119 * XXX as cryptoret won't get the chance. The queue is walked backwards 120 * XXX as the request is generally the last one queued. 121 * 122 * call with the lock held, or else. 123 */ 124 int 125 crypto_ret_q_remove(struct cryptop *crp) 126 { 127 struct cryptop * acrp; 128 129 TAILQ_FOREACH_REVERSE(acrp, &crp_ret_q, crprethead, crp_next) { 130 if (acrp == crp) { 131 TAILQ_REMOVE(&crp_ret_q, crp, crp_next); 132 crp->crp_flags &= (~CRYPTO_F_ONRETQ); 133 return 1; 134 } 135 } 136 return 0; 137 } 138 139 int 140 crypto_ret_kq_remove(struct cryptkop *krp) 141 { 142 struct cryptkop * akrp; 143 144 TAILQ_FOREACH_REVERSE(akrp, &crp_ret_kq, krprethead, krp_next) { 145 if (akrp == krp) { 146 TAILQ_REMOVE(&crp_ret_kq, krp, krp_next); 147 krp->krp_flags &= (~CRYPTO_F_ONRETQ); 148 return 1; 149 } 150 } 151 return 0; 152 } 153 154 /* 155 * Crypto op and desciptor data structures are allocated 156 * from separate private zones(FreeBSD)/pools(netBSD/OpenBSD) . 157 */ 158 struct pool cryptop_pool; 159 struct pool cryptodesc_pool; 160 struct pool cryptkop_pool; 161 162 int crypto_usercrypto = 1; /* userland may open /dev/crypto */ 163 int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */ 164 /* 165 * cryptodevallowsoft is (intended to be) sysctl'able, controlling 166 * access to hardware versus software transforms as below: 167 * 168 * crypto_devallowsoft < 0: Force userlevel requests to use software 169 * transforms, always 170 * crypto_devallowsoft = 0: Use hardware if present, grant userlevel 171 * requests for non-accelerated transforms 172 * (handling the latter in software) 173 * crypto_devallowsoft > 0: Allow user requests only for transforms which 174 * are hardware-accelerated. 175 */ 176 int crypto_devallowsoft = 1; /* only use hardware crypto */ 177 178 SYSCTL_SETUP(sysctl_opencrypto_setup, "sysctl opencrypto subtree setup") 179 { 180 sysctl_createv(clog, 0, NULL, NULL, 181 CTLFLAG_PERMANENT, 182 CTLTYPE_NODE, "kern", NULL, 183 NULL, 0, NULL, 0, 184 CTL_KERN, CTL_EOL); 185 sysctl_createv(clog, 0, NULL, NULL, 186 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 187 CTLTYPE_INT, "usercrypto", 188 SYSCTL_DESCR("Enable/disable user-mode access to " 189 "crypto support"), 190 NULL, 0, &crypto_usercrypto, 0, 191 CTL_KERN, CTL_CREATE, CTL_EOL); 192 sysctl_createv(clog, 0, NULL, NULL, 193 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 194 CTLTYPE_INT, "userasymcrypto", 195 SYSCTL_DESCR("Enable/disable user-mode access to " 196 "asymmetric crypto support"), 197 NULL, 0, &crypto_userasymcrypto, 0, 198 CTL_KERN, CTL_CREATE, CTL_EOL); 199 sysctl_createv(clog, 0, NULL, NULL, 200 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 201 CTLTYPE_INT, "cryptodevallowsoft", 202 SYSCTL_DESCR("Enable/disable use of software " 203 "asymmetric crypto support"), 204 NULL, 0, &crypto_devallowsoft, 0, 205 CTL_KERN, CTL_CREATE, CTL_EOL); 206 } 207 208 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records"); 209 210 /* 211 * Synchronization: read carefully, this is non-trivial. 212 * 213 * Crypto requests are submitted via crypto_dispatch. Typically 214 * these come in from network protocols at spl0 (output path) or 215 * spl[,soft]net (input path). 216 * 217 * Requests are typically passed on the driver directly, but they 218 * may also be queued for processing by a software interrupt thread, 219 * cryptointr, that runs at splsoftcrypto. This thread dispatches 220 * the requests to crypto drivers (h/w or s/w) who call crypto_done 221 * when a request is complete. Hardware crypto drivers are assumed 222 * to register their IRQ's as network devices so their interrupt handlers 223 * and subsequent "done callbacks" happen at spl[imp,net]. 224 * 225 * Completed crypto ops are queued for a separate kernel thread that 226 * handles the callbacks at spl0. This decoupling insures the crypto 227 * driver interrupt service routine is not delayed while the callback 228 * takes place and that callbacks are delivered after a context switch 229 * (as opposed to a software interrupt that clients must block). 230 * 231 * This scheme is not intended for SMP machines. 232 */ 233 static void cryptointr(void); /* swi thread to dispatch ops */ 234 static void cryptoret(void); /* kernel thread for callbacks*/ 235 static struct lwp *cryptothread; 236 static void crypto_destroy(void); 237 static int crypto_invoke(struct cryptop *crp, int hint); 238 static int crypto_kinvoke(struct cryptkop *krp, int hint); 239 240 static struct cryptostats cryptostats; 241 #ifdef CRYPTO_TIMING 242 static int crypto_timing = 0; 243 #endif 244 245 static int 246 crypto_init0(void) 247 { 248 int error; 249 250 mutex_init(&crypto_mtx, MUTEX_DEFAULT, IPL_NET); 251 cv_init(&cryptoret_cv, "crypto_wait"); 252 pool_init(&cryptop_pool, sizeof(struct cryptop), 0, 0, 253 0, "cryptop", NULL, IPL_NET); 254 pool_init(&cryptodesc_pool, sizeof(struct cryptodesc), 0, 0, 255 0, "cryptodesc", NULL, IPL_NET); 256 pool_init(&cryptkop_pool, sizeof(struct cryptkop), 0, 0, 257 0, "cryptkop", NULL, IPL_NET); 258 259 crypto_drivers = malloc(CRYPTO_DRIVERS_INITIAL * 260 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO); 261 if (crypto_drivers == NULL) { 262 printf("crypto_init: cannot malloc driver table\n"); 263 return 0; 264 } 265 crypto_drivers_num = CRYPTO_DRIVERS_INITIAL; 266 267 softintr_cookie = register_swi(SWI_CRYPTO, cryptointr); 268 error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, 269 (void (*)(void*))cryptoret, NULL, &cryptothread, "cryptoret"); 270 if (error) { 271 printf("crypto_init: cannot start cryptoret thread; error %d", 272 error); 273 crypto_destroy(); 274 } 275 276 return 0; 277 } 278 279 void 280 crypto_init(void) 281 { 282 static ONCE_DECL(crypto_init_once); 283 284 RUN_ONCE(&crypto_init_once, crypto_init0); 285 } 286 287 static void 288 crypto_destroy(void) 289 { 290 /* XXX no wait to reclaim zones */ 291 if (crypto_drivers != NULL) 292 free(crypto_drivers, M_CRYPTO_DATA); 293 unregister_swi(SWI_CRYPTO, cryptointr); 294 } 295 296 /* 297 * Create a new session. Must be called with crypto_mtx held. 298 */ 299 int 300 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard) 301 { 302 struct cryptoini *cr; 303 u_int32_t hid, lid; 304 int err = EINVAL; 305 306 KASSERT(mutex_owned(&crypto_mtx)); 307 308 if (crypto_drivers == NULL) 309 goto done; 310 311 /* 312 * The algorithm we use here is pretty stupid; just use the 313 * first driver that supports all the algorithms we need. 314 * 315 * XXX We need more smarts here (in real life too, but that's 316 * XXX another story altogether). 317 */ 318 319 for (hid = 0; hid < crypto_drivers_num; hid++) { 320 /* 321 * If it's not initialized or has remaining sessions 322 * referencing it, skip. 323 */ 324 if (crypto_drivers[hid].cc_newsession == NULL || 325 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP)) 326 continue; 327 328 /* Hardware required -- ignore software drivers. */ 329 if (hard > 0 && 330 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE)) 331 continue; 332 /* Software required -- ignore hardware drivers. */ 333 if (hard < 0 && 334 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) == 0) 335 continue; 336 337 /* See if all the algorithms are supported. */ 338 for (cr = cri; cr; cr = cr->cri_next) 339 if (crypto_drivers[hid].cc_alg[cr->cri_alg] == 0) 340 break; 341 342 if (cr == NULL) { 343 /* Ok, all algorithms are supported. */ 344 345 /* 346 * Can't do everything in one session. 347 * 348 * XXX Fix this. We need to inject a "virtual" session layer right 349 * XXX about here. 350 */ 351 352 /* Call the driver initialization routine. */ 353 lid = hid; /* Pass the driver ID. */ 354 err = crypto_drivers[hid].cc_newsession( 355 crypto_drivers[hid].cc_arg, &lid, cri); 356 if (err == 0) { 357 (*sid) = hid; 358 (*sid) <<= 32; 359 (*sid) |= (lid & 0xffffffff); 360 crypto_drivers[hid].cc_sessions++; 361 } 362 goto done; 363 /*break;*/ 364 } 365 } 366 done: 367 return err; 368 } 369 370 /* 371 * Delete an existing session (or a reserved session on an unregistered 372 * driver). Must be called with crypto_mtx mutex held. 373 */ 374 int 375 crypto_freesession(u_int64_t sid) 376 { 377 u_int32_t hid; 378 int err = 0; 379 380 KASSERT(mutex_owned(&crypto_mtx)); 381 382 if (crypto_drivers == NULL) { 383 err = EINVAL; 384 goto done; 385 } 386 387 /* Determine two IDs. */ 388 hid = SESID2HID(sid); 389 390 if (hid >= crypto_drivers_num) { 391 err = ENOENT; 392 goto done; 393 } 394 395 if (crypto_drivers[hid].cc_sessions) 396 crypto_drivers[hid].cc_sessions--; 397 398 /* Call the driver cleanup routine, if available. */ 399 if (crypto_drivers[hid].cc_freesession) { 400 err = crypto_drivers[hid].cc_freesession( 401 crypto_drivers[hid].cc_arg, sid); 402 } 403 else 404 err = 0; 405 406 /* 407 * If this was the last session of a driver marked as invalid, 408 * make the entry available for reuse. 409 */ 410 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) && 411 crypto_drivers[hid].cc_sessions == 0) 412 bzero(&crypto_drivers[hid], sizeof(struct cryptocap)); 413 414 done: 415 return err; 416 } 417 418 /* 419 * Return an unused driver id. Used by drivers prior to registering 420 * support for the algorithms they handle. 421 */ 422 int32_t 423 crypto_get_driverid(u_int32_t flags) 424 { 425 struct cryptocap *newdrv; 426 int i; 427 428 crypto_init(); /* XXX oh, this is foul! */ 429 430 mutex_spin_enter(&crypto_mtx); 431 for (i = 0; i < crypto_drivers_num; i++) 432 if (crypto_drivers[i].cc_process == NULL && 433 (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0 && 434 crypto_drivers[i].cc_sessions == 0) 435 break; 436 437 /* Out of entries, allocate some more. */ 438 if (i == crypto_drivers_num) { 439 /* Be careful about wrap-around. */ 440 if (2 * crypto_drivers_num <= crypto_drivers_num) { 441 mutex_spin_exit(&crypto_mtx); 442 printf("crypto: driver count wraparound!\n"); 443 return -1; 444 } 445 446 newdrv = malloc(2 * crypto_drivers_num * 447 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 448 if (newdrv == NULL) { 449 mutex_spin_exit(&crypto_mtx); 450 printf("crypto: no space to expand driver table!\n"); 451 return -1; 452 } 453 454 bcopy(crypto_drivers, newdrv, 455 crypto_drivers_num * sizeof(struct cryptocap)); 456 457 crypto_drivers_num *= 2; 458 459 free(crypto_drivers, M_CRYPTO_DATA); 460 crypto_drivers = newdrv; 461 } 462 463 /* NB: state is zero'd on free */ 464 crypto_drivers[i].cc_sessions = 1; /* Mark */ 465 crypto_drivers[i].cc_flags = flags; 466 467 if (bootverbose) 468 printf("crypto: assign driver %u, flags %u\n", i, flags); 469 470 mutex_spin_exit(&crypto_mtx); 471 472 return i; 473 } 474 475 static struct cryptocap * 476 crypto_checkdriver(u_int32_t hid) 477 { 478 if (crypto_drivers == NULL) 479 return NULL; 480 return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]); 481 } 482 483 /* 484 * Register support for a key-related algorithm. This routine 485 * is called once for each algorithm supported a driver. 486 */ 487 int 488 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags, 489 int (*kprocess)(void*, struct cryptkop *, int), 490 void *karg) 491 { 492 struct cryptocap *cap; 493 int err; 494 495 mutex_spin_enter(&crypto_mtx); 496 497 cap = crypto_checkdriver(driverid); 498 if (cap != NULL && 499 (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) { 500 /* 501 * XXX Do some performance testing to determine placing. 502 * XXX We probably need an auxiliary data structure that 503 * XXX describes relative performances. 504 */ 505 506 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; 507 if (bootverbose) { 508 printf("crypto: driver %u registers key alg %u " 509 " flags %u\n", 510 driverid, 511 kalg, 512 flags 513 ); 514 } 515 516 if (cap->cc_kprocess == NULL) { 517 cap->cc_karg = karg; 518 cap->cc_kprocess = kprocess; 519 } 520 err = 0; 521 } else 522 err = EINVAL; 523 524 mutex_spin_exit(&crypto_mtx); 525 return err; 526 } 527 528 /* 529 * Register support for a non-key-related algorithm. This routine 530 * is called once for each such algorithm supported by a driver. 531 */ 532 int 533 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen, 534 u_int32_t flags, 535 int (*newses)(void*, u_int32_t*, struct cryptoini*), 536 int (*freeses)(void*, u_int64_t), 537 int (*process)(void*, struct cryptop *, int), 538 void *arg) 539 { 540 struct cryptocap *cap; 541 int err; 542 543 mutex_spin_enter(&crypto_mtx); 544 545 cap = crypto_checkdriver(driverid); 546 /* NB: algorithms are in the range [1..max] */ 547 if (cap != NULL && 548 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) { 549 /* 550 * XXX Do some performance testing to determine placing. 551 * XXX We probably need an auxiliary data structure that 552 * XXX describes relative performances. 553 */ 554 555 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; 556 cap->cc_max_op_len[alg] = maxoplen; 557 if (bootverbose) { 558 printf("crypto: driver %u registers alg %u " 559 "flags %u maxoplen %u\n", 560 driverid, 561 alg, 562 flags, 563 maxoplen 564 ); 565 } 566 567 if (cap->cc_process == NULL) { 568 cap->cc_arg = arg; 569 cap->cc_newsession = newses; 570 cap->cc_process = process; 571 cap->cc_freesession = freeses; 572 cap->cc_sessions = 0; /* Unmark */ 573 } 574 err = 0; 575 } else 576 err = EINVAL; 577 578 mutex_spin_exit(&crypto_mtx); 579 return err; 580 } 581 582 /* 583 * Unregister a crypto driver. If there are pending sessions using it, 584 * leave enough information around so that subsequent calls using those 585 * sessions will correctly detect the driver has been unregistered and 586 * reroute requests. 587 */ 588 int 589 crypto_unregister(u_int32_t driverid, int alg) 590 { 591 int i, err; 592 u_int32_t ses; 593 struct cryptocap *cap; 594 595 mutex_spin_enter(&crypto_mtx); 596 597 cap = crypto_checkdriver(driverid); 598 if (cap != NULL && 599 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) && 600 cap->cc_alg[alg] != 0) { 601 cap->cc_alg[alg] = 0; 602 cap->cc_max_op_len[alg] = 0; 603 604 /* Was this the last algorithm ? */ 605 for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++) 606 if (cap->cc_alg[i] != 0) 607 break; 608 609 if (i == CRYPTO_ALGORITHM_MAX + 1) { 610 ses = cap->cc_sessions; 611 bzero(cap, sizeof(struct cryptocap)); 612 if (ses != 0) { 613 /* 614 * If there are pending sessions, just mark as invalid. 615 */ 616 cap->cc_flags |= CRYPTOCAP_F_CLEANUP; 617 cap->cc_sessions = ses; 618 } 619 } 620 err = 0; 621 } else 622 err = EINVAL; 623 624 mutex_spin_exit(&crypto_mtx); 625 return err; 626 } 627 628 /* 629 * Unregister all algorithms associated with a crypto driver. 630 * If there are pending sessions using it, leave enough information 631 * around so that subsequent calls using those sessions will 632 * correctly detect the driver has been unregistered and reroute 633 * requests. 634 * 635 * XXX careful. Don't change this to call crypto_unregister() for each 636 * XXX registered algorithm unless you drop the mutex across the calls; 637 * XXX you can't take it recursively. 638 */ 639 int 640 crypto_unregister_all(u_int32_t driverid) 641 { 642 int i, err; 643 u_int32_t ses; 644 struct cryptocap *cap; 645 646 mutex_spin_enter(&crypto_mtx); 647 cap = crypto_checkdriver(driverid); 648 if (cap != NULL) { 649 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) { 650 cap->cc_alg[i] = 0; 651 cap->cc_max_op_len[i] = 0; 652 } 653 ses = cap->cc_sessions; 654 bzero(cap, sizeof(struct cryptocap)); 655 if (ses != 0) { 656 /* 657 * If there are pending sessions, just mark as invalid. 658 */ 659 cap->cc_flags |= CRYPTOCAP_F_CLEANUP; 660 cap->cc_sessions = ses; 661 } 662 err = 0; 663 } else 664 err = EINVAL; 665 666 mutex_spin_exit(&crypto_mtx); 667 return err; 668 } 669 670 /* 671 * Clear blockage on a driver. The what parameter indicates whether 672 * the driver is now ready for cryptop's and/or cryptokop's. 673 */ 674 int 675 crypto_unblock(u_int32_t driverid, int what) 676 { 677 struct cryptocap *cap; 678 int needwakeup, err; 679 680 mutex_spin_enter(&crypto_mtx); 681 cap = crypto_checkdriver(driverid); 682 if (cap != NULL) { 683 needwakeup = 0; 684 if (what & CRYPTO_SYMQ) { 685 needwakeup |= cap->cc_qblocked; 686 cap->cc_qblocked = 0; 687 } 688 if (what & CRYPTO_ASYMQ) { 689 needwakeup |= cap->cc_kqblocked; 690 cap->cc_kqblocked = 0; 691 } 692 err = 0; 693 mutex_spin_exit(&crypto_mtx); 694 if (needwakeup) 695 setsoftcrypto(softintr_cookie); 696 } else { 697 err = EINVAL; 698 mutex_spin_exit(&crypto_mtx); 699 } 700 701 return err; 702 } 703 704 /* 705 * Dispatch a crypto request to a driver or queue 706 * it, to be processed by the kernel thread. 707 */ 708 int 709 crypto_dispatch(struct cryptop *crp) 710 { 711 u_int32_t hid = SESID2HID(crp->crp_sid); 712 int result; 713 714 mutex_spin_enter(&crypto_mtx); 715 716 cryptostats.cs_ops++; 717 718 #ifdef CRYPTO_TIMING 719 if (crypto_timing) 720 nanouptime(&crp->crp_tstamp); 721 #endif 722 if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) { 723 struct cryptocap *cap; 724 /* 725 * Caller marked the request to be processed 726 * immediately; dispatch it directly to the 727 * driver unless the driver is currently blocked. 728 */ 729 cap = crypto_checkdriver(hid); 730 if (cap && !cap->cc_qblocked) { 731 mutex_spin_exit(&crypto_mtx); 732 result = crypto_invoke(crp, 0); 733 if (result == ERESTART) { 734 /* 735 * The driver ran out of resources, mark the 736 * driver ``blocked'' for cryptop's and put 737 * the op on the queue. 738 */ 739 mutex_spin_enter(&crypto_mtx); 740 crypto_drivers[hid].cc_qblocked = 1; 741 TAILQ_INSERT_HEAD(&crp_q, crp, crp_next); 742 cryptostats.cs_blocks++; 743 mutex_spin_exit(&crypto_mtx); 744 } 745 goto out_released; 746 } else { 747 /* 748 * The driver is blocked, just queue the op until 749 * it unblocks and the swi thread gets kicked. 750 */ 751 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); 752 result = 0; 753 } 754 } else { 755 int wasempty = TAILQ_EMPTY(&crp_q); 756 /* 757 * Caller marked the request as ``ok to delay''; 758 * queue it for the swi thread. This is desirable 759 * when the operation is low priority and/or suitable 760 * for batching. 761 */ 762 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); 763 if (wasempty) { 764 mutex_spin_exit(&crypto_mtx); 765 setsoftcrypto(softintr_cookie); 766 result = 0; 767 goto out_released; 768 } 769 770 result = 0; 771 } 772 773 mutex_spin_exit(&crypto_mtx); 774 out_released: 775 return result; 776 } 777 778 /* 779 * Add an asymetric crypto request to a queue, 780 * to be processed by the kernel thread. 781 */ 782 int 783 crypto_kdispatch(struct cryptkop *krp) 784 { 785 struct cryptocap *cap; 786 int result; 787 788 mutex_spin_enter(&crypto_mtx); 789 cryptostats.cs_kops++; 790 791 cap = crypto_checkdriver(krp->krp_hid); 792 if (cap && !cap->cc_kqblocked) { 793 mutex_spin_exit(&crypto_mtx); 794 result = crypto_kinvoke(krp, 0); 795 if (result == ERESTART) { 796 /* 797 * The driver ran out of resources, mark the 798 * driver ``blocked'' for cryptop's and put 799 * the op on the queue. 800 */ 801 mutex_spin_enter(&crypto_mtx); 802 crypto_drivers[krp->krp_hid].cc_kqblocked = 1; 803 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next); 804 cryptostats.cs_kblocks++; 805 mutex_spin_exit(&crypto_mtx); 806 } 807 } else { 808 /* 809 * The driver is blocked, just queue the op until 810 * it unblocks and the swi thread gets kicked. 811 */ 812 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next); 813 result = 0; 814 mutex_spin_exit(&crypto_mtx); 815 } 816 817 return result; 818 } 819 820 /* 821 * Dispatch an assymetric crypto request to the appropriate crypto devices. 822 */ 823 static int 824 crypto_kinvoke(struct cryptkop *krp, int hint) 825 { 826 u_int32_t hid; 827 int error; 828 829 /* Sanity checks. */ 830 if (krp == NULL) 831 return EINVAL; 832 if (krp->krp_callback == NULL) { 833 pool_put(&cryptkop_pool, krp); 834 return EINVAL; 835 } 836 837 for (hid = 0; hid < crypto_drivers_num; hid++) { 838 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) && 839 crypto_devallowsoft == 0) 840 continue; 841 if (crypto_drivers[hid].cc_kprocess == NULL) 842 continue; 843 if ((crypto_drivers[hid].cc_kalg[krp->krp_op] & 844 CRYPTO_ALG_FLAG_SUPPORTED) == 0) 845 continue; 846 break; 847 } 848 if (hid < crypto_drivers_num) { 849 krp->krp_hid = hid; 850 error = crypto_drivers[hid].cc_kprocess( 851 crypto_drivers[hid].cc_karg, krp, hint); 852 } else { 853 error = ENODEV; 854 } 855 856 if (error) { 857 krp->krp_status = error; 858 crypto_kdone(krp); 859 } 860 return 0; 861 } 862 863 #ifdef CRYPTO_TIMING 864 static void 865 crypto_tstat(struct cryptotstat *ts, struct timespec *tv) 866 { 867 struct timespec now, t; 868 869 nanouptime(&now); 870 t.tv_sec = now.tv_sec - tv->tv_sec; 871 t.tv_nsec = now.tv_nsec - tv->tv_nsec; 872 if (t.tv_nsec < 0) { 873 t.tv_sec--; 874 t.tv_nsec += 1000000000; 875 } 876 timespecadd(&ts->acc, &t, &t); 877 if (timespeccmp(&t, &ts->min, <)) 878 ts->min = t; 879 if (timespeccmp(&t, &ts->max, >)) 880 ts->max = t; 881 ts->count++; 882 883 *tv = now; 884 } 885 #endif 886 887 /* 888 * Dispatch a crypto request to the appropriate crypto devices. 889 */ 890 static int 891 crypto_invoke(struct cryptop *crp, int hint) 892 { 893 u_int32_t hid; 894 int (*process)(void*, struct cryptop *, int); 895 896 #ifdef CRYPTO_TIMING 897 if (crypto_timing) 898 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp); 899 #endif 900 /* Sanity checks. */ 901 if (crp == NULL) 902 return EINVAL; 903 if (crp->crp_callback == NULL) { 904 crypto_freereq(crp); 905 return EINVAL; 906 } 907 if (crp->crp_desc == NULL) { 908 crp->crp_etype = EINVAL; 909 crypto_done(crp); 910 return 0; 911 } 912 913 hid = SESID2HID(crp->crp_sid); 914 if (hid < crypto_drivers_num) { 915 mutex_enter(&crypto_mtx); 916 if (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) 917 crypto_freesession(crp->crp_sid); 918 process = crypto_drivers[hid].cc_process; 919 mutex_exit(&crypto_mtx); 920 } else { 921 process = NULL; 922 } 923 924 if (process == NULL) { 925 struct cryptodesc *crd; 926 u_int64_t nid = 0; 927 928 /* 929 * Driver has unregistered; migrate the session and return 930 * an error to the caller so they'll resubmit the op. 931 */ 932 mutex_enter(&crypto_mtx); 933 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next) 934 crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI); 935 936 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0) 937 crp->crp_sid = nid; 938 939 crp->crp_etype = EAGAIN; 940 mutex_exit(&crypto_mtx); 941 942 crypto_done(crp); 943 return 0; 944 } else { 945 /* 946 * Invoke the driver to process the request. 947 */ 948 DPRINTF(("calling process for %08x\n", (uint32_t)crp)); 949 return (*process)(crypto_drivers[hid].cc_arg, crp, hint); 950 } 951 } 952 953 /* 954 * Release a set of crypto descriptors. 955 */ 956 void 957 crypto_freereq(struct cryptop *crp) 958 { 959 struct cryptodesc *crd; 960 961 if (crp == NULL) 962 return; 963 964 while ((crd = crp->crp_desc) != NULL) { 965 crp->crp_desc = crd->crd_next; 966 pool_put(&cryptodesc_pool, crd); 967 } 968 pool_put(&cryptop_pool, crp); 969 } 970 971 /* 972 * Acquire a set of crypto descriptors. 973 */ 974 struct cryptop * 975 crypto_getreq(int num) 976 { 977 struct cryptodesc *crd; 978 struct cryptop *crp; 979 980 crp = pool_get(&cryptop_pool, 0); 981 if (crp == NULL) { 982 return NULL; 983 } 984 bzero(crp, sizeof(struct cryptop)); 985 cv_init(&crp->crp_cv, "crydev"); 986 987 while (num--) { 988 crd = pool_get(&cryptodesc_pool, 0); 989 if (crd == NULL) { 990 crypto_freereq(crp); 991 return NULL; 992 } 993 994 bzero(crd, sizeof(struct cryptodesc)); 995 crd->crd_next = crp->crp_desc; 996 crp->crp_desc = crd; 997 } 998 999 return crp; 1000 } 1001 1002 /* 1003 * Invoke the callback on behalf of the driver. 1004 */ 1005 void 1006 crypto_done(struct cryptop *crp) 1007 { 1008 int wasempty; 1009 1010 if (crp->crp_etype != 0) 1011 cryptostats.cs_errs++; 1012 #ifdef CRYPTO_TIMING 1013 if (crypto_timing) 1014 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp); 1015 #endif 1016 1017 crp->crp_flags |= CRYPTO_F_DONE; 1018 1019 /* 1020 * Normal case; queue the callback for the thread. 1021 * 1022 * The return queue is manipulated by the swi thread 1023 * and, potentially, by crypto device drivers calling 1024 * back to mark operations completed. Thus we need 1025 * to mask both while manipulating the return queue. 1026 */ 1027 if (crp->crp_flags & CRYPTO_F_CBIMM) { 1028 /* 1029 * Do the callback directly. This is ok when the 1030 * callback routine does very little (e.g. the 1031 * /dev/crypto callback method just does a wakeup). 1032 */ 1033 #ifdef CRYPTO_TIMING 1034 if (crypto_timing) { 1035 /* 1036 * NB: We must copy the timestamp before 1037 * doing the callback as the cryptop is 1038 * likely to be reclaimed. 1039 */ 1040 struct timespec t = crp->crp_tstamp; 1041 crypto_tstat(&cryptostats.cs_cb, &t); 1042 crp->crp_callback(crp); 1043 crypto_tstat(&cryptostats.cs_finis, &t); 1044 } else 1045 #endif 1046 crp->crp_callback(crp); 1047 } else { 1048 mutex_spin_enter(&crypto_mtx); 1049 wasempty = TAILQ_EMPTY(&crp_ret_q); 1050 DPRINTF(("crypto_done: queueing %08x\n", (uint32_t)crp)); 1051 crp->crp_flags |= CRYPTO_F_ONRETQ; 1052 TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next); 1053 if (wasempty) { 1054 DPRINTF(("crypto_done: waking cryptoret, %08x " \ 1055 "hit empty queue\n.", (uint32_t)crp)); 1056 cv_signal(&cryptoret_cv); 1057 } 1058 mutex_spin_exit(&crypto_mtx); 1059 } 1060 } 1061 1062 /* 1063 * Invoke the callback on behalf of the driver. 1064 */ 1065 void 1066 crypto_kdone(struct cryptkop *krp) 1067 { 1068 int wasempty; 1069 1070 if (krp->krp_status != 0) 1071 cryptostats.cs_kerrs++; 1072 1073 krp->krp_flags |= CRYPTO_F_DONE; 1074 1075 /* 1076 * The return queue is manipulated by the swi thread 1077 * and, potentially, by crypto device drivers calling 1078 * back to mark operations completed. Thus we need 1079 * to mask both while manipulating the return queue. 1080 */ 1081 if (krp->krp_flags & CRYPTO_F_CBIMM) { 1082 krp->krp_callback(krp); 1083 } else { 1084 mutex_spin_enter(&crypto_mtx); 1085 wasempty = TAILQ_EMPTY(&crp_ret_kq); 1086 krp->krp_flags |= CRYPTO_F_ONRETQ; 1087 TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next); 1088 if (wasempty) 1089 cv_signal(&cryptoret_cv); 1090 mutex_spin_exit(&crypto_mtx); 1091 } 1092 } 1093 1094 int 1095 crypto_getfeat(int *featp) 1096 { 1097 int hid, kalg, feat = 0; 1098 1099 mutex_spin_enter(&crypto_mtx); 1100 1101 if (crypto_userasymcrypto == 0) 1102 goto out; 1103 1104 for (hid = 0; hid < crypto_drivers_num; hid++) { 1105 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) && 1106 crypto_devallowsoft == 0) { 1107 continue; 1108 } 1109 if (crypto_drivers[hid].cc_kprocess == NULL) 1110 continue; 1111 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++) 1112 if ((crypto_drivers[hid].cc_kalg[kalg] & 1113 CRYPTO_ALG_FLAG_SUPPORTED) != 0) 1114 feat |= 1 << kalg; 1115 } 1116 out: 1117 mutex_spin_exit(&crypto_mtx); 1118 *featp = feat; 1119 return (0); 1120 } 1121 1122 /* 1123 * Software interrupt thread to dispatch crypto requests. 1124 */ 1125 static void 1126 cryptointr(void) 1127 { 1128 struct cryptop *crp, *submit; 1129 struct cryptkop *krp; 1130 struct cryptocap *cap; 1131 int result, hint; 1132 1133 printf("crypto softint\n"); 1134 cryptostats.cs_intrs++; 1135 mutex_spin_enter(&crypto_mtx); 1136 do { 1137 /* 1138 * Find the first element in the queue that can be 1139 * processed and look-ahead to see if multiple ops 1140 * are ready for the same driver. 1141 */ 1142 submit = NULL; 1143 hint = 0; 1144 TAILQ_FOREACH(crp, &crp_q, crp_next) { 1145 u_int32_t hid = SESID2HID(crp->crp_sid); 1146 cap = crypto_checkdriver(hid); 1147 if (cap == NULL || cap->cc_process == NULL) { 1148 /* Op needs to be migrated, process it. */ 1149 if (submit == NULL) 1150 submit = crp; 1151 break; 1152 } 1153 if (!cap->cc_qblocked) { 1154 if (submit != NULL) { 1155 /* 1156 * We stop on finding another op, 1157 * regardless whether its for the same 1158 * driver or not. We could keep 1159 * searching the queue but it might be 1160 * better to just use a per-driver 1161 * queue instead. 1162 */ 1163 if (SESID2HID(submit->crp_sid) == hid) 1164 hint = CRYPTO_HINT_MORE; 1165 break; 1166 } else { 1167 submit = crp; 1168 if ((submit->crp_flags & CRYPTO_F_BATCH) == 0) 1169 break; 1170 /* keep scanning for more are q'd */ 1171 } 1172 } 1173 } 1174 if (submit != NULL) { 1175 TAILQ_REMOVE(&crp_q, submit, crp_next); 1176 mutex_spin_exit(&crypto_mtx); 1177 result = crypto_invoke(submit, hint); 1178 /* we must take here as the TAILQ op or kinvoke 1179 may need this mutex below. sigh. */ 1180 mutex_spin_enter(&crypto_mtx); 1181 if (result == ERESTART) { 1182 /* 1183 * The driver ran out of resources, mark the 1184 * driver ``blocked'' for cryptop's and put 1185 * the request back in the queue. It would 1186 * best to put the request back where we got 1187 * it but that's hard so for now we put it 1188 * at the front. This should be ok; putting 1189 * it at the end does not work. 1190 */ 1191 /* XXX validate sid again? */ 1192 crypto_drivers[SESID2HID(submit->crp_sid)].cc_qblocked = 1; 1193 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next); 1194 cryptostats.cs_blocks++; 1195 } 1196 } 1197 1198 /* As above, but for key ops */ 1199 TAILQ_FOREACH(krp, &crp_kq, krp_next) { 1200 cap = crypto_checkdriver(krp->krp_hid); 1201 if (cap == NULL || cap->cc_kprocess == NULL) { 1202 /* Op needs to be migrated, process it. */ 1203 break; 1204 } 1205 if (!cap->cc_kqblocked) 1206 break; 1207 } 1208 if (krp != NULL) { 1209 TAILQ_REMOVE(&crp_kq, krp, krp_next); 1210 mutex_spin_exit(&crypto_mtx); 1211 result = crypto_kinvoke(krp, 0); 1212 /* the next iteration will want the mutex. :-/ */ 1213 mutex_spin_enter(&crypto_mtx); 1214 if (result == ERESTART) { 1215 /* 1216 * The driver ran out of resources, mark the 1217 * driver ``blocked'' for cryptkop's and put 1218 * the request back in the queue. It would 1219 * best to put the request back where we got 1220 * it but that's hard so for now we put it 1221 * at the front. This should be ok; putting 1222 * it at the end does not work. 1223 */ 1224 /* XXX validate sid again? */ 1225 crypto_drivers[krp->krp_hid].cc_kqblocked = 1; 1226 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next); 1227 cryptostats.cs_kblocks++; 1228 } 1229 } 1230 } while (submit != NULL || krp != NULL); 1231 mutex_spin_exit(&crypto_mtx); 1232 } 1233 1234 /* 1235 * Kernel thread to do callbacks. 1236 */ 1237 static void 1238 cryptoret(void) 1239 { 1240 struct cryptop *crp; 1241 struct cryptkop *krp; 1242 1243 mutex_spin_enter(&crypto_mtx); 1244 for (;;) { 1245 crp = TAILQ_FIRST(&crp_ret_q); 1246 if (crp != NULL) { 1247 TAILQ_REMOVE(&crp_ret_q, crp, crp_next); 1248 crp->crp_flags &= ~CRYPTO_F_ONRETQ; 1249 } 1250 krp = TAILQ_FIRST(&crp_ret_kq); 1251 if (krp != NULL) { 1252 TAILQ_REMOVE(&crp_ret_kq, krp, krp_next); 1253 krp->krp_flags &= ~CRYPTO_F_ONRETQ; 1254 } 1255 1256 /* drop before calling any callbacks. */ 1257 if (crp == NULL && krp == NULL) { 1258 cryptostats.cs_rets++; 1259 cv_wait(&cryptoret_cv, &crypto_mtx); 1260 continue; 1261 } 1262 1263 mutex_spin_exit(&crypto_mtx); 1264 1265 if (crp != NULL) { 1266 #ifdef CRYPTO_TIMING 1267 if (crypto_timing) { 1268 /* 1269 * NB: We must copy the timestamp before 1270 * doing the callback as the cryptop is 1271 * likely to be reclaimed. 1272 */ 1273 struct timespec t = crp->crp_tstamp; 1274 crypto_tstat(&cryptostats.cs_cb, &t); 1275 crp->crp_callback(crp); 1276 crypto_tstat(&cryptostats.cs_finis, &t); 1277 } else 1278 #endif 1279 { 1280 crp->crp_callback(crp); 1281 } 1282 } 1283 if (krp != NULL) 1284 krp->krp_callback(krp); 1285 1286 mutex_spin_enter(&crypto_mtx); 1287 } 1288 } 1289