1 /* $NetBSD: crypto.c,v 1.114 2020/04/08 15:27:18 pgoyette Exp $ */ 2 /* $FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.5 2003/02/26 00:14:05 sam Exp $ */ 3 /* $OpenBSD: crypto.c,v 1.41 2002/07/17 23:52:38 art Exp $ */ 4 5 /*- 6 * Copyright (c) 2008 The NetBSD Foundation, Inc. 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to The NetBSD Foundation 10 * by Coyote Point Systems, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 36 * 37 * This code was written by Angelos D. Keromytis in Athens, Greece, in 38 * February 2000. Network Security Technologies Inc. (NSTI) kindly 39 * supported the development of this code. 40 * 41 * Copyright (c) 2000, 2001 Angelos D. Keromytis 42 * 43 * Permission to use, copy, and modify this software with or without fee 44 * is hereby granted, provided that this entire notice is included in 45 * all source code copies of any software which is or includes a copy or 46 * modification of this software. 47 * 48 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 49 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 50 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 51 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 52 * PURPOSE. 53 */ 54 55 #include <sys/cdefs.h> 56 __KERNEL_RCSID(0, "$NetBSD: crypto.c,v 1.114 2020/04/08 15:27:18 pgoyette Exp $"); 57 58 #include <sys/param.h> 59 #include <sys/reboot.h> 60 #include <sys/systm.h> 61 #include <sys/proc.h> 62 #include <sys/pool.h> 63 #include <sys/kthread.h> 64 #include <sys/once.h> 65 #include <sys/sysctl.h> 66 #include <sys/intr.h> 67 #include <sys/errno.h> 68 #include <sys/module.h> 69 #include <sys/xcall.h> 70 #include <sys/device.h> 71 #include <sys/cpu.h> 72 #include <sys/percpu.h> 73 #include <sys/kmem.h> 74 75 #if defined(_KERNEL_OPT) 76 #include "opt_ocf.h" 77 #endif 78 79 #include <opencrypto/cryptodev.h> 80 #include <opencrypto/xform.h> /* XXX for M_XDATA */ 81 82 /* 83 * Crypto drivers register themselves by allocating a slot in the 84 * crypto_drivers table with crypto_get_driverid() and then registering 85 * each algorithm they support with crypto_register() and crypto_kregister(). 86 */ 87 /* Don't directly access crypto_drivers[i], use crypto_checkdriver(i). */ 88 static struct { 89 kmutex_t mtx; 90 int num; 91 struct cryptocap *list; 92 } crypto_drv __cacheline_aligned; 93 #define crypto_drv_mtx (crypto_drv.mtx) 94 #define crypto_drivers_num (crypto_drv.num) 95 #define crypto_drivers (crypto_drv.list) 96 97 static void *crypto_q_si; 98 static void *crypto_ret_si; 99 100 /* 101 * There are two queues for crypto requests; one for symmetric (e.g. 102 * cipher) operations and one for asymmetric (e.g. MOD) operations. 103 * See below for how synchronization is handled. 104 */ 105 TAILQ_HEAD(crypto_crp_q, cryptop); 106 TAILQ_HEAD(crypto_crp_kq, cryptkop); 107 struct crypto_crp_qs { 108 struct crypto_crp_q *crp_q; 109 struct crypto_crp_kq *crp_kq; 110 }; 111 static percpu_t *crypto_crp_qs_percpu; 112 113 static inline struct crypto_crp_qs * 114 crypto_get_crp_qs(int *s) 115 { 116 117 KASSERT(s != NULL); 118 119 *s = splsoftnet(); 120 return percpu_getref(crypto_crp_qs_percpu); 121 } 122 123 static inline void 124 crypto_put_crp_qs(int *s) 125 { 126 127 KASSERT(s != NULL); 128 129 percpu_putref(crypto_crp_qs_percpu); 130 splx(*s); 131 } 132 133 static void 134 crypto_crp_q_is_busy_pc(void *p, void *arg, struct cpu_info *ci __unused) 135 { 136 struct crypto_crp_qs *qs_pc = p; 137 bool *isempty = arg; 138 139 if (!TAILQ_EMPTY(qs_pc->crp_q) || !TAILQ_EMPTY(qs_pc->crp_kq)) 140 *isempty = true; 141 } 142 143 static void 144 crypto_crp_qs_init_pc(void *p, void *arg __unused, struct cpu_info *ci __unused) 145 { 146 struct crypto_crp_qs *qs = p; 147 148 qs->crp_q = kmem_alloc(sizeof(struct crypto_crp_q), KM_SLEEP); 149 qs->crp_kq = kmem_alloc(sizeof(struct crypto_crp_kq), KM_SLEEP); 150 151 TAILQ_INIT(qs->crp_q); 152 TAILQ_INIT(qs->crp_kq); 153 } 154 155 /* 156 * There are two queues for processing completed crypto requests; one 157 * for the symmetric and one for the asymmetric ops. We only need one 158 * but have two to avoid type futzing (cryptop vs. cryptkop). See below 159 * for how synchronization is handled. 160 */ 161 TAILQ_HEAD(crypto_crp_ret_q, cryptop); 162 TAILQ_HEAD(crypto_crp_ret_kq, cryptkop); 163 struct crypto_crp_ret_qs { 164 kmutex_t crp_ret_q_mtx; 165 bool crp_ret_q_exit_flag; 166 167 struct crypto_crp_ret_q crp_ret_q; 168 int crp_ret_q_len; 169 int crp_ret_q_maxlen; /* queue length limit. <=0 means unlimited. */ 170 int crp_ret_q_drops; 171 172 struct crypto_crp_ret_kq crp_ret_kq; 173 int crp_ret_kq_len; 174 int crp_ret_kq_maxlen; /* queue length limit. <=0 means unlimited. */ 175 int crp_ret_kq_drops; 176 }; 177 struct crypto_crp_ret_qs **crypto_crp_ret_qs_list; 178 179 180 static inline struct crypto_crp_ret_qs * 181 crypto_get_crp_ret_qs(struct cpu_info *ci) 182 { 183 u_int cpuid; 184 struct crypto_crp_ret_qs *qs; 185 186 KASSERT(ci != NULL); 187 188 cpuid = cpu_index(ci); 189 qs = crypto_crp_ret_qs_list[cpuid]; 190 mutex_enter(&qs->crp_ret_q_mtx); 191 return qs; 192 } 193 194 static inline void 195 crypto_put_crp_ret_qs(struct cpu_info *ci) 196 { 197 u_int cpuid; 198 struct crypto_crp_ret_qs *qs; 199 200 KASSERT(ci != NULL); 201 202 cpuid = cpu_index(ci); 203 qs = crypto_crp_ret_qs_list[cpuid]; 204 mutex_exit(&qs->crp_ret_q_mtx); 205 } 206 207 #ifndef CRYPTO_RET_Q_MAXLEN 208 #define CRYPTO_RET_Q_MAXLEN 0 209 #endif 210 #ifndef CRYPTO_RET_KQ_MAXLEN 211 #define CRYPTO_RET_KQ_MAXLEN 0 212 #endif 213 214 static int 215 sysctl_opencrypto_q_len(SYSCTLFN_ARGS) 216 { 217 int error, len = 0; 218 struct sysctlnode node = *rnode; 219 220 for (int i = 0; i < ncpu; i++) { 221 struct crypto_crp_ret_qs *qs; 222 struct cpu_info *ci = cpu_lookup(i); 223 224 qs = crypto_get_crp_ret_qs(ci); 225 len += qs->crp_ret_q_len; 226 crypto_put_crp_ret_qs(ci); 227 } 228 229 node.sysctl_data = &len; 230 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 231 if (error || newp == NULL) 232 return error; 233 234 return 0; 235 } 236 237 static int 238 sysctl_opencrypto_q_drops(SYSCTLFN_ARGS) 239 { 240 int error, drops = 0; 241 struct sysctlnode node = *rnode; 242 243 for (int i = 0; i < ncpu; i++) { 244 struct crypto_crp_ret_qs *qs; 245 struct cpu_info *ci = cpu_lookup(i); 246 247 qs = crypto_get_crp_ret_qs(ci); 248 drops += qs->crp_ret_q_drops; 249 crypto_put_crp_ret_qs(ci); 250 } 251 252 node.sysctl_data = &drops; 253 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 254 if (error || newp == NULL) 255 return error; 256 257 return 0; 258 } 259 260 static int 261 sysctl_opencrypto_q_maxlen(SYSCTLFN_ARGS) 262 { 263 int error, maxlen; 264 struct crypto_crp_ret_qs *qs; 265 struct sysctlnode node = *rnode; 266 267 /* each crp_ret_kq_maxlen is the same. */ 268 qs = crypto_get_crp_ret_qs(curcpu()); 269 maxlen = qs->crp_ret_q_maxlen; 270 crypto_put_crp_ret_qs(curcpu()); 271 272 node.sysctl_data = &maxlen; 273 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 274 if (error || newp == NULL) 275 return error; 276 277 for (int i = 0; i < ncpu; i++) { 278 struct cpu_info *ci = cpu_lookup(i); 279 280 qs = crypto_get_crp_ret_qs(ci); 281 qs->crp_ret_q_maxlen = maxlen; 282 crypto_put_crp_ret_qs(ci); 283 } 284 285 return 0; 286 } 287 288 static int 289 sysctl_opencrypto_kq_len(SYSCTLFN_ARGS) 290 { 291 int error, len = 0; 292 struct sysctlnode node = *rnode; 293 294 for (int i = 0; i < ncpu; i++) { 295 struct crypto_crp_ret_qs *qs; 296 struct cpu_info *ci = cpu_lookup(i); 297 298 qs = crypto_get_crp_ret_qs(ci); 299 len += qs->crp_ret_kq_len; 300 crypto_put_crp_ret_qs(ci); 301 } 302 303 node.sysctl_data = &len; 304 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 305 if (error || newp == NULL) 306 return error; 307 308 return 0; 309 } 310 311 static int 312 sysctl_opencrypto_kq_drops(SYSCTLFN_ARGS) 313 { 314 int error, drops = 0; 315 struct sysctlnode node = *rnode; 316 317 for (int i = 0; i < ncpu; i++) { 318 struct crypto_crp_ret_qs *qs; 319 struct cpu_info *ci = cpu_lookup(i); 320 321 qs = crypto_get_crp_ret_qs(ci); 322 drops += qs->crp_ret_kq_drops; 323 crypto_put_crp_ret_qs(ci); 324 } 325 326 node.sysctl_data = &drops; 327 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 328 if (error || newp == NULL) 329 return error; 330 331 return 0; 332 } 333 334 static int 335 sysctl_opencrypto_kq_maxlen(SYSCTLFN_ARGS) 336 { 337 int error, maxlen; 338 struct crypto_crp_ret_qs *qs; 339 struct sysctlnode node = *rnode; 340 341 /* each crp_ret_kq_maxlen is the same. */ 342 qs = crypto_get_crp_ret_qs(curcpu()); 343 maxlen = qs->crp_ret_kq_maxlen; 344 crypto_put_crp_ret_qs(curcpu()); 345 346 node.sysctl_data = &maxlen; 347 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 348 if (error || newp == NULL) 349 return error; 350 351 for (int i = 0; i < ncpu; i++) { 352 struct cpu_info *ci = cpu_lookup(i); 353 354 qs = crypto_get_crp_ret_qs(ci); 355 qs->crp_ret_kq_maxlen = maxlen; 356 crypto_put_crp_ret_qs(ci); 357 } 358 359 return 0; 360 } 361 362 /* 363 * Crypto op and descriptor data structures are allocated 364 * from separate private zones(FreeBSD)/pools(netBSD/OpenBSD) . 365 */ 366 static pool_cache_t cryptop_cache; 367 static pool_cache_t cryptodesc_cache; 368 static pool_cache_t cryptkop_cache; 369 370 int crypto_usercrypto = 1; /* userland may open /dev/crypto */ 371 int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */ 372 /* 373 * cryptodevallowsoft is (intended to be) sysctl'able, controlling 374 * access to hardware versus software transforms as below: 375 * 376 * crypto_devallowsoft < 0: Force userlevel requests to use software 377 * transforms, always 378 * crypto_devallowsoft = 0: Use hardware if present, grant userlevel 379 * requests for non-accelerated transforms 380 * (handling the latter in software) 381 * crypto_devallowsoft > 0: Allow user requests only for transforms which 382 * are hardware-accelerated. 383 */ 384 int crypto_devallowsoft = 1; /* only use hardware crypto */ 385 386 static void 387 sysctl_opencrypto_setup(struct sysctllog **clog) 388 { 389 const struct sysctlnode *ocnode; 390 const struct sysctlnode *retqnode, *retkqnode; 391 392 sysctl_createv(clog, 0, NULL, NULL, 393 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 394 CTLTYPE_INT, "usercrypto", 395 SYSCTL_DESCR("Enable/disable user-mode access to " 396 "crypto support"), 397 NULL, 0, &crypto_usercrypto, 0, 398 CTL_KERN, CTL_CREATE, CTL_EOL); 399 sysctl_createv(clog, 0, NULL, NULL, 400 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 401 CTLTYPE_INT, "userasymcrypto", 402 SYSCTL_DESCR("Enable/disable user-mode access to " 403 "asymmetric crypto support"), 404 NULL, 0, &crypto_userasymcrypto, 0, 405 CTL_KERN, CTL_CREATE, CTL_EOL); 406 sysctl_createv(clog, 0, NULL, NULL, 407 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 408 CTLTYPE_INT, "cryptodevallowsoft", 409 SYSCTL_DESCR("Enable/disable use of software " 410 "asymmetric crypto support"), 411 NULL, 0, &crypto_devallowsoft, 0, 412 CTL_KERN, CTL_CREATE, CTL_EOL); 413 414 sysctl_createv(clog, 0, NULL, &ocnode, 415 CTLFLAG_PERMANENT, 416 CTLTYPE_NODE, "opencrypto", 417 SYSCTL_DESCR("opencrypto related entries"), 418 NULL, 0, NULL, 0, 419 CTL_CREATE, CTL_EOL); 420 421 sysctl_createv(clog, 0, &ocnode, &retqnode, 422 CTLFLAG_PERMANENT, 423 CTLTYPE_NODE, "crypto_ret_q", 424 SYSCTL_DESCR("crypto_ret_q related entries"), 425 NULL, 0, NULL, 0, 426 CTL_CREATE, CTL_EOL); 427 sysctl_createv(clog, 0, &retqnode, NULL, 428 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 429 CTLTYPE_INT, "len", 430 SYSCTL_DESCR("Current queue length"), 431 sysctl_opencrypto_q_len, 0, 432 NULL, 0, 433 CTL_CREATE, CTL_EOL); 434 sysctl_createv(clog, 0, &retqnode, NULL, 435 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 436 CTLTYPE_INT, "drops", 437 SYSCTL_DESCR("Crypto requests dropped due to full ret queue"), 438 sysctl_opencrypto_q_drops, 0, 439 NULL, 0, 440 CTL_CREATE, CTL_EOL); 441 sysctl_createv(clog, 0, &retqnode, NULL, 442 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 443 CTLTYPE_INT, "maxlen", 444 SYSCTL_DESCR("Maximum allowed queue length"), 445 sysctl_opencrypto_q_maxlen, 0, 446 NULL, 0, 447 CTL_CREATE, CTL_EOL); 448 449 450 sysctl_createv(clog, 0, &ocnode, &retkqnode, 451 CTLFLAG_PERMANENT, 452 CTLTYPE_NODE, "crypto_ret_kq", 453 SYSCTL_DESCR("crypto_ret_kq related entries"), 454 NULL, 0, NULL, 0, 455 CTL_CREATE, CTL_EOL); 456 sysctl_createv(clog, 0, &retkqnode, NULL, 457 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 458 CTLTYPE_INT, "len", 459 SYSCTL_DESCR("Current queue length"), 460 sysctl_opencrypto_kq_len, 0, 461 NULL, 0, 462 CTL_CREATE, CTL_EOL); 463 sysctl_createv(clog, 0, &retkqnode, NULL, 464 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 465 CTLTYPE_INT, "drops", 466 SYSCTL_DESCR("Crypto requests dropped due to full ret queue"), 467 sysctl_opencrypto_kq_drops, 0, 468 NULL, 0, 469 CTL_CREATE, CTL_EOL); 470 sysctl_createv(clog, 0, &retkqnode, NULL, 471 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 472 CTLTYPE_INT, "maxlen", 473 SYSCTL_DESCR("Maximum allowed queue length"), 474 sysctl_opencrypto_kq_maxlen, 0, 475 NULL, 0, 476 CTL_CREATE, CTL_EOL); 477 } 478 479 /* 480 * Synchronization: read carefully, this is non-trivial. 481 * 482 * Crypto requests are submitted via crypto_dispatch. Typically 483 * these come in from network protocols at spl0 (output path) or 484 * spl[,soft]net (input path). 485 * 486 * Requests are typically passed on the driver directly, but they 487 * may also be queued for processing by a software interrupt thread, 488 * cryptointr, that runs at splsoftcrypto. This thread dispatches 489 * the requests to crypto drivers (h/w or s/w) who call crypto_done 490 * when a request is complete. Hardware crypto drivers are assumed 491 * to register their IRQ's as network devices so their interrupt handlers 492 * and subsequent "done callbacks" happen at spl[imp,net]. 493 * 494 * Completed crypto ops are queued for a separate kernel thread that 495 * handles the callbacks at spl0. This decoupling insures the crypto 496 * driver interrupt service routine is not delayed while the callback 497 * takes place and that callbacks are delivered after a context switch 498 * (as opposed to a software interrupt that clients must block). 499 * 500 * This scheme is not intended for SMP machines. 501 */ 502 static void cryptointr(void *); /* swi thread to dispatch ops */ 503 static void cryptoret_softint(void *); /* kernel thread for callbacks*/ 504 static int crypto_destroy(bool); 505 static int crypto_invoke(struct cryptop *crp, int hint); 506 static int crypto_kinvoke(struct cryptkop *krp, int hint); 507 508 static struct cryptocap *crypto_checkdriver_lock(u_int32_t); 509 static struct cryptocap *crypto_checkdriver_uninit(u_int32_t); 510 static struct cryptocap *crypto_checkdriver(u_int32_t); 511 static void crypto_driver_lock(struct cryptocap *); 512 static void crypto_driver_unlock(struct cryptocap *); 513 static void crypto_driver_clear(struct cryptocap *); 514 515 static int crypto_init_finalize(device_t); 516 517 static struct cryptostats cryptostats; 518 #ifdef CRYPTO_TIMING 519 static int crypto_timing = 0; 520 #endif 521 522 static struct sysctllog *sysctl_opencrypto_clog; 523 524 static void 525 crypto_crp_ret_qs_init(void) 526 { 527 int i; 528 529 crypto_crp_ret_qs_list = kmem_alloc(sizeof(struct crypto_crp_ret_qs *) * ncpu, 530 KM_SLEEP); 531 532 for (i = 0; i < ncpu; i++) { 533 struct crypto_crp_ret_qs *qs; 534 535 qs = kmem_alloc(sizeof(struct crypto_crp_ret_qs), KM_SLEEP); 536 mutex_init(&qs->crp_ret_q_mtx, MUTEX_DEFAULT, IPL_NET); 537 qs->crp_ret_q_exit_flag = false; 538 539 TAILQ_INIT(&qs->crp_ret_q); 540 qs->crp_ret_q_len = 0; 541 qs->crp_ret_q_maxlen = CRYPTO_RET_Q_MAXLEN; 542 qs->crp_ret_q_drops = 0; 543 544 TAILQ_INIT(&qs->crp_ret_kq); 545 qs->crp_ret_kq_len = 0; 546 qs->crp_ret_kq_maxlen = CRYPTO_RET_KQ_MAXLEN; 547 qs->crp_ret_kq_drops = 0; 548 549 crypto_crp_ret_qs_list[i] = qs; 550 } 551 } 552 553 static int 554 crypto_init0(void) 555 { 556 557 mutex_init(&crypto_drv_mtx, MUTEX_DEFAULT, IPL_NONE); 558 cryptop_cache = pool_cache_init(sizeof(struct cryptop), 559 coherency_unit, 0, 0, "cryptop", NULL, IPL_NET, NULL, NULL, NULL); 560 cryptodesc_cache = pool_cache_init(sizeof(struct cryptodesc), 561 coherency_unit, 0, 0, "cryptdesc", NULL, IPL_NET, NULL, NULL, NULL); 562 cryptkop_cache = pool_cache_init(sizeof(struct cryptkop), 563 coherency_unit, 0, 0, "cryptkop", NULL, IPL_NET, NULL, NULL, NULL); 564 565 crypto_crp_qs_percpu = percpu_create(sizeof(struct crypto_crp_qs), 566 crypto_crp_qs_init_pc, /*XXX*/NULL, NULL); 567 568 crypto_crp_ret_qs_init(); 569 570 crypto_drivers = kmem_zalloc(CRYPTO_DRIVERS_INITIAL * 571 sizeof(struct cryptocap), KM_SLEEP); 572 crypto_drivers_num = CRYPTO_DRIVERS_INITIAL; 573 574 crypto_q_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE, cryptointr, NULL); 575 if (crypto_q_si == NULL) { 576 printf("crypto_init: cannot establish request queue handler\n"); 577 return crypto_destroy(false); 578 } 579 580 /* 581 * Some encryption devices (such as mvcesa) are attached before 582 * ipi_sysinit(). That causes an assertion in ipi_register() as 583 * crypto_ret_si softint uses SOFTINT_RCPU. 584 */ 585 if (config_finalize_register(NULL, crypto_init_finalize) != 0) { 586 printf("crypto_init: cannot register crypto_init_finalize\n"); 587 return crypto_destroy(false); 588 } 589 590 sysctl_opencrypto_setup(&sysctl_opencrypto_clog); 591 592 return 0; 593 } 594 595 static int 596 crypto_init_finalize(device_t self __unused) 597 { 598 599 crypto_ret_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE|SOFTINT_RCPU, 600 &cryptoret_softint, NULL); 601 KASSERT(crypto_ret_si != NULL); 602 603 return 0; 604 } 605 606 int 607 crypto_init(void) 608 { 609 static ONCE_DECL(crypto_init_once); 610 611 return RUN_ONCE(&crypto_init_once, crypto_init0); 612 } 613 614 static int 615 crypto_destroy(bool exit_kthread) 616 { 617 int i; 618 619 if (exit_kthread) { 620 struct cryptocap *cap = NULL; 621 bool is_busy = false; 622 623 /* if we have any in-progress requests, don't unload */ 624 percpu_foreach(crypto_crp_qs_percpu, crypto_crp_q_is_busy_pc, 625 &is_busy); 626 if (is_busy) 627 return EBUSY; 628 /* FIXME: 629 * prohibit enqueue to crp_q and crp_kq after here. 630 */ 631 632 mutex_enter(&crypto_drv_mtx); 633 for (i = 0; i < crypto_drivers_num; i++) { 634 cap = crypto_checkdriver(i); 635 if (cap == NULL) 636 continue; 637 if (cap->cc_sessions != 0) { 638 mutex_exit(&crypto_drv_mtx); 639 return EBUSY; 640 } 641 } 642 mutex_exit(&crypto_drv_mtx); 643 /* FIXME: 644 * prohibit touch crypto_drivers[] and each element after here. 645 */ 646 647 /* Ensure cryptoret_softint() is never scheduled again. */ 648 for (i = 0; i < ncpu; i++) { 649 struct crypto_crp_ret_qs *qs; 650 struct cpu_info *ci = cpu_lookup(i); 651 652 qs = crypto_get_crp_ret_qs(ci); 653 qs->crp_ret_q_exit_flag = true; 654 crypto_put_crp_ret_qs(ci); 655 } 656 } 657 658 if (sysctl_opencrypto_clog != NULL) 659 sysctl_teardown(&sysctl_opencrypto_clog); 660 661 if (crypto_ret_si != NULL) 662 softint_disestablish(crypto_ret_si); 663 664 if (crypto_q_si != NULL) 665 softint_disestablish(crypto_q_si); 666 667 mutex_enter(&crypto_drv_mtx); 668 if (crypto_drivers != NULL) 669 kmem_free(crypto_drivers, 670 crypto_drivers_num * sizeof(struct cryptocap)); 671 mutex_exit(&crypto_drv_mtx); 672 673 percpu_free(crypto_crp_qs_percpu, sizeof(struct crypto_crp_qs)); 674 675 pool_cache_destroy(cryptop_cache); 676 pool_cache_destroy(cryptodesc_cache); 677 pool_cache_destroy(cryptkop_cache); 678 679 mutex_destroy(&crypto_drv_mtx); 680 681 return 0; 682 } 683 684 static bool 685 crypto_driver_suitable(struct cryptocap *cap, struct cryptoini *cri) 686 { 687 struct cryptoini *cr; 688 689 for (cr = cri; cr; cr = cr->cri_next) 690 if (cap->cc_alg[cr->cri_alg] == 0) { 691 DPRINTF("alg %d not supported\n", cr->cri_alg); 692 return false; 693 } 694 695 return true; 696 } 697 698 #define CRYPTO_ACCEPT_HARDWARE 0x1 699 #define CRYPTO_ACCEPT_SOFTWARE 0x2 700 /* 701 * The algorithm we use here is pretty stupid; just use the 702 * first driver that supports all the algorithms we need. 703 * If there are multiple drivers we choose the driver with 704 * the fewest active sessions. We prefer hardware-backed 705 * drivers to software ones. 706 * 707 * XXX We need more smarts here (in real life too, but that's 708 * XXX another story altogether). 709 */ 710 static struct cryptocap * 711 crypto_select_driver_lock(struct cryptoini *cri, int hard) 712 { 713 u_int32_t hid; 714 int accept; 715 struct cryptocap *cap, *best; 716 int error = 0; 717 718 best = NULL; 719 /* 720 * hard == 0 can use both hardware and software drivers. 721 * We use hardware drivers prior to software drivers, so search 722 * hardware drivers at first time. 723 */ 724 if (hard >= 0) 725 accept = CRYPTO_ACCEPT_HARDWARE; 726 else 727 accept = CRYPTO_ACCEPT_SOFTWARE; 728 again: 729 for (hid = 0; hid < crypto_drivers_num; hid++) { 730 cap = crypto_checkdriver(hid); 731 if (cap == NULL) 732 continue; 733 734 crypto_driver_lock(cap); 735 736 /* 737 * If it's not initialized or has remaining sessions 738 * referencing it, skip. 739 */ 740 if (cap->cc_newsession == NULL || 741 (cap->cc_flags & CRYPTOCAP_F_CLEANUP)) { 742 crypto_driver_unlock(cap); 743 continue; 744 } 745 746 /* Hardware required -- ignore software drivers. */ 747 if ((accept & CRYPTO_ACCEPT_SOFTWARE) == 0 748 && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE)) { 749 crypto_driver_unlock(cap); 750 continue; 751 } 752 /* Software required -- ignore hardware drivers. */ 753 if ((accept & CRYPTO_ACCEPT_HARDWARE) == 0 754 && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE) == 0) { 755 crypto_driver_unlock(cap); 756 continue; 757 } 758 759 /* See if all the algorithms are supported. */ 760 if (crypto_driver_suitable(cap, cri)) { 761 if (best == NULL) { 762 /* keep holding crypto_driver_lock(cap) */ 763 best = cap; 764 continue; 765 } else if (cap->cc_sessions < best->cc_sessions) { 766 crypto_driver_unlock(best); 767 /* keep holding crypto_driver_lock(cap) */ 768 best = cap; 769 continue; 770 } 771 } 772 773 crypto_driver_unlock(cap); 774 } 775 if (best == NULL && hard == 0 776 && (accept & CRYPTO_ACCEPT_SOFTWARE) == 0) { 777 accept = CRYPTO_ACCEPT_SOFTWARE; 778 goto again; 779 } 780 781 if (best == NULL && hard == 0 && error == 0) { 782 mutex_exit(&crypto_drv_mtx); 783 error = module_autoload("swcrypto", MODULE_CLASS_DRIVER); 784 mutex_enter(&crypto_drv_mtx); 785 if (error == 0) { 786 error = EINVAL; 787 goto again; 788 } 789 } 790 791 return best; 792 } 793 794 /* 795 * Create a new session. 796 */ 797 int 798 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard) 799 { 800 struct cryptocap *cap; 801 int err = EINVAL; 802 803 mutex_enter(&crypto_drv_mtx); 804 805 cap = crypto_select_driver_lock(cri, hard); 806 if (cap != NULL) { 807 u_int32_t hid, lid; 808 809 hid = cap - crypto_drivers; 810 /* 811 * Can't do everything in one session. 812 * 813 * XXX Fix this. We need to inject a "virtual" session layer right 814 * XXX about here. 815 */ 816 817 /* Call the driver initialization routine. */ 818 lid = hid; /* Pass the driver ID. */ 819 crypto_driver_unlock(cap); 820 err = cap->cc_newsession(cap->cc_arg, &lid, cri); 821 crypto_driver_lock(cap); 822 if (err == 0) { 823 (*sid) = hid; 824 (*sid) <<= 32; 825 (*sid) |= (lid & 0xffffffff); 826 (cap->cc_sessions)++; 827 } else { 828 DPRINTF("crypto_drivers[%d].cc_newsession() failed. error=%d\n", 829 hid, err); 830 } 831 crypto_driver_unlock(cap); 832 } 833 834 mutex_exit(&crypto_drv_mtx); 835 836 return err; 837 } 838 839 /* 840 * Delete an existing session (or a reserved session on an unregistered 841 * driver). 842 */ 843 int 844 crypto_freesession(u_int64_t sid) 845 { 846 struct cryptocap *cap; 847 int err = 0; 848 849 /* Determine two IDs. */ 850 cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(sid)); 851 if (cap == NULL) 852 return ENOENT; 853 854 if (cap->cc_sessions) 855 (cap->cc_sessions)--; 856 857 /* Call the driver cleanup routine, if available. */ 858 if (cap->cc_freesession) 859 err = cap->cc_freesession(cap->cc_arg, sid); 860 else 861 err = 0; 862 863 /* 864 * If this was the last session of a driver marked as invalid, 865 * make the entry available for reuse. 866 */ 867 if ((cap->cc_flags & CRYPTOCAP_F_CLEANUP) && cap->cc_sessions == 0) 868 crypto_driver_clear(cap); 869 870 crypto_driver_unlock(cap); 871 return err; 872 } 873 874 static bool 875 crypto_checkdriver_initialized(const struct cryptocap *cap) 876 { 877 878 return cap->cc_process != NULL || 879 (cap->cc_flags & CRYPTOCAP_F_CLEANUP) != 0 || 880 cap->cc_sessions != 0; 881 } 882 883 /* 884 * Return an unused driver id. Used by drivers prior to registering 885 * support for the algorithms they handle. 886 */ 887 int32_t 888 crypto_get_driverid(u_int32_t flags) 889 { 890 struct cryptocap *newdrv; 891 struct cryptocap *cap = NULL; 892 int i; 893 894 (void)crypto_init(); /* XXX oh, this is foul! */ 895 896 mutex_enter(&crypto_drv_mtx); 897 for (i = 0; i < crypto_drivers_num; i++) { 898 cap = crypto_checkdriver_uninit(i); 899 if (cap == NULL || crypto_checkdriver_initialized(cap)) 900 continue; 901 break; 902 } 903 904 /* Out of entries, allocate some more. */ 905 if (cap == NULL) { 906 /* Be careful about wrap-around. */ 907 if (2 * crypto_drivers_num <= crypto_drivers_num) { 908 mutex_exit(&crypto_drv_mtx); 909 printf("crypto: driver count wraparound!\n"); 910 return -1; 911 } 912 913 newdrv = kmem_zalloc(2 * crypto_drivers_num * 914 sizeof(struct cryptocap), KM_SLEEP); 915 memcpy(newdrv, crypto_drivers, 916 crypto_drivers_num * sizeof(struct cryptocap)); 917 kmem_free(crypto_drivers, 918 crypto_drivers_num * sizeof(struct cryptocap)); 919 920 crypto_drivers_num *= 2; 921 crypto_drivers = newdrv; 922 923 cap = crypto_checkdriver_uninit(i); 924 KASSERT(cap != NULL); 925 } 926 927 /* NB: state is zero'd on free */ 928 cap->cc_sessions = 1; /* Mark */ 929 cap->cc_flags = flags; 930 mutex_init(&cap->cc_lock, MUTEX_DEFAULT, IPL_NET); 931 932 if (bootverbose) 933 printf("crypto: assign driver %u, flags %u\n", i, flags); 934 935 mutex_exit(&crypto_drv_mtx); 936 937 return i; 938 } 939 940 static struct cryptocap * 941 crypto_checkdriver_lock(u_int32_t hid) 942 { 943 struct cryptocap *cap; 944 945 KASSERT(crypto_drivers != NULL); 946 947 if (hid >= crypto_drivers_num) 948 return NULL; 949 950 cap = &crypto_drivers[hid]; 951 mutex_enter(&cap->cc_lock); 952 return cap; 953 } 954 955 /* 956 * Use crypto_checkdriver_uninit() instead of crypto_checkdriver() below two 957 * situations 958 * - crypto_drivers[] may not be allocated 959 * - crypto_drivers[hid] may not be initialized 960 */ 961 static struct cryptocap * 962 crypto_checkdriver_uninit(u_int32_t hid) 963 { 964 965 KASSERT(mutex_owned(&crypto_drv_mtx)); 966 967 if (crypto_drivers == NULL) 968 return NULL; 969 970 return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]); 971 } 972 973 /* 974 * Use crypto_checkdriver_uninit() instead of crypto_checkdriver() below two 975 * situations 976 * - crypto_drivers[] may not be allocated 977 * - crypto_drivers[hid] may not be initialized 978 */ 979 static struct cryptocap * 980 crypto_checkdriver(u_int32_t hid) 981 { 982 983 KASSERT(mutex_owned(&crypto_drv_mtx)); 984 985 if (crypto_drivers == NULL || hid >= crypto_drivers_num) 986 return NULL; 987 988 struct cryptocap *cap = &crypto_drivers[hid]; 989 return crypto_checkdriver_initialized(cap) ? cap : NULL; 990 } 991 992 static inline void 993 crypto_driver_lock(struct cryptocap *cap) 994 { 995 996 KASSERT(cap != NULL); 997 998 mutex_enter(&cap->cc_lock); 999 } 1000 1001 static inline void 1002 crypto_driver_unlock(struct cryptocap *cap) 1003 { 1004 1005 KASSERT(cap != NULL); 1006 1007 mutex_exit(&cap->cc_lock); 1008 } 1009 1010 static void 1011 crypto_driver_clear(struct cryptocap *cap) 1012 { 1013 1014 if (cap == NULL) 1015 return; 1016 1017 KASSERT(mutex_owned(&cap->cc_lock)); 1018 1019 cap->cc_sessions = 0; 1020 memset(&cap->cc_max_op_len, 0, sizeof(cap->cc_max_op_len)); 1021 memset(&cap->cc_alg, 0, sizeof(cap->cc_alg)); 1022 memset(&cap->cc_kalg, 0, sizeof(cap->cc_kalg)); 1023 cap->cc_flags = 0; 1024 cap->cc_qblocked = 0; 1025 cap->cc_kqblocked = 0; 1026 1027 cap->cc_arg = NULL; 1028 cap->cc_newsession = NULL; 1029 cap->cc_process = NULL; 1030 cap->cc_freesession = NULL; 1031 cap->cc_kprocess = NULL; 1032 } 1033 1034 /* 1035 * Register support for a key-related algorithm. This routine 1036 * is called once for each algorithm supported a driver. 1037 */ 1038 int 1039 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags, 1040 int (*kprocess)(void *, struct cryptkop *, int), 1041 void *karg) 1042 { 1043 struct cryptocap *cap; 1044 int err; 1045 1046 mutex_enter(&crypto_drv_mtx); 1047 1048 cap = crypto_checkdriver_lock(driverid); 1049 if (cap != NULL && 1050 (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) { 1051 /* 1052 * XXX Do some performance testing to determine placing. 1053 * XXX We probably need an auxiliary data structure that 1054 * XXX describes relative performances. 1055 */ 1056 1057 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; 1058 if (bootverbose) { 1059 printf("crypto: driver %u registers key alg %u " 1060 " flags %u\n", 1061 driverid, 1062 kalg, 1063 flags 1064 ); 1065 } 1066 1067 if (cap->cc_kprocess == NULL) { 1068 cap->cc_karg = karg; 1069 cap->cc_kprocess = kprocess; 1070 } 1071 err = 0; 1072 } else 1073 err = EINVAL; 1074 1075 mutex_exit(&crypto_drv_mtx); 1076 return err; 1077 } 1078 1079 /* 1080 * Register support for a non-key-related algorithm. This routine 1081 * is called once for each such algorithm supported by a driver. 1082 */ 1083 int 1084 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen, 1085 u_int32_t flags, 1086 int (*newses)(void *, u_int32_t*, struct cryptoini*), 1087 int (*freeses)(void *, u_int64_t), 1088 int (*process)(void *, struct cryptop *, int), 1089 void *arg) 1090 { 1091 struct cryptocap *cap; 1092 int err; 1093 1094 cap = crypto_checkdriver_lock(driverid); 1095 if (cap == NULL) 1096 return EINVAL; 1097 1098 /* NB: algorithms are in the range [1..max] */ 1099 if (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) { 1100 /* 1101 * XXX Do some performance testing to determine placing. 1102 * XXX We probably need an auxiliary data structure that 1103 * XXX describes relative performances. 1104 */ 1105 1106 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; 1107 cap->cc_max_op_len[alg] = maxoplen; 1108 if (bootverbose) { 1109 printf("crypto: driver %u registers alg %u " 1110 "flags %u maxoplen %u\n", 1111 driverid, 1112 alg, 1113 flags, 1114 maxoplen 1115 ); 1116 } 1117 1118 if (cap->cc_process == NULL) { 1119 cap->cc_arg = arg; 1120 cap->cc_newsession = newses; 1121 cap->cc_process = process; 1122 cap->cc_freesession = freeses; 1123 cap->cc_sessions = 0; /* Unmark */ 1124 } 1125 err = 0; 1126 } else 1127 err = EINVAL; 1128 1129 crypto_driver_unlock(cap); 1130 1131 return err; 1132 } 1133 1134 static int 1135 crypto_unregister_locked(struct cryptocap *cap, int alg, bool all) 1136 { 1137 int i; 1138 u_int32_t ses; 1139 bool lastalg = true; 1140 1141 KASSERT(cap != NULL); 1142 KASSERT(mutex_owned(&cap->cc_lock)); 1143 1144 if (alg < CRYPTO_ALGORITHM_MIN || CRYPTO_ALGORITHM_MAX < alg) 1145 return EINVAL; 1146 1147 if (!all && cap->cc_alg[alg] == 0) 1148 return EINVAL; 1149 1150 cap->cc_alg[alg] = 0; 1151 cap->cc_max_op_len[alg] = 0; 1152 1153 if (all) { 1154 if (alg != CRYPTO_ALGORITHM_MAX) 1155 lastalg = false; 1156 } else { 1157 /* Was this the last algorithm ? */ 1158 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) 1159 if (cap->cc_alg[i] != 0) { 1160 lastalg = false; 1161 break; 1162 } 1163 } 1164 if (lastalg) { 1165 ses = cap->cc_sessions; 1166 crypto_driver_clear(cap); 1167 if (ses != 0) { 1168 /* 1169 * If there are pending sessions, just mark as invalid. 1170 */ 1171 cap->cc_flags |= CRYPTOCAP_F_CLEANUP; 1172 cap->cc_sessions = ses; 1173 } 1174 } 1175 1176 return 0; 1177 } 1178 1179 /* 1180 * Unregister a crypto driver. If there are pending sessions using it, 1181 * leave enough information around so that subsequent calls using those 1182 * sessions will correctly detect the driver has been unregistered and 1183 * reroute requests. 1184 */ 1185 int 1186 crypto_unregister(u_int32_t driverid, int alg) 1187 { 1188 int err; 1189 struct cryptocap *cap; 1190 1191 cap = crypto_checkdriver_lock(driverid); 1192 err = crypto_unregister_locked(cap, alg, false); 1193 crypto_driver_unlock(cap); 1194 1195 return err; 1196 } 1197 1198 /* 1199 * Unregister all algorithms associated with a crypto driver. 1200 * If there are pending sessions using it, leave enough information 1201 * around so that subsequent calls using those sessions will 1202 * correctly detect the driver has been unregistered and reroute 1203 * requests. 1204 */ 1205 int 1206 crypto_unregister_all(u_int32_t driverid) 1207 { 1208 int err, i; 1209 struct cryptocap *cap; 1210 1211 cap = crypto_checkdriver_lock(driverid); 1212 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) { 1213 err = crypto_unregister_locked(cap, i, true); 1214 if (err) 1215 break; 1216 } 1217 crypto_driver_unlock(cap); 1218 1219 return err; 1220 } 1221 1222 /* 1223 * Clear blockage on a driver. The what parameter indicates whether 1224 * the driver is now ready for cryptop's and/or cryptokop's. 1225 */ 1226 int 1227 crypto_unblock(u_int32_t driverid, int what) 1228 { 1229 struct cryptocap *cap; 1230 int needwakeup = 0; 1231 1232 cap = crypto_checkdriver_lock(driverid); 1233 if (cap == NULL) 1234 return EINVAL; 1235 1236 if (what & CRYPTO_SYMQ) { 1237 needwakeup |= cap->cc_qblocked; 1238 cap->cc_qblocked = 0; 1239 } 1240 if (what & CRYPTO_ASYMQ) { 1241 needwakeup |= cap->cc_kqblocked; 1242 cap->cc_kqblocked = 0; 1243 } 1244 crypto_driver_unlock(cap); 1245 if (needwakeup) { 1246 kpreempt_disable(); 1247 softint_schedule(crypto_q_si); 1248 kpreempt_enable(); 1249 } 1250 1251 return 0; 1252 } 1253 1254 /* 1255 * Dispatch a crypto request to a driver or queue 1256 * it, to be processed by the kernel thread. 1257 */ 1258 int 1259 crypto_dispatch(struct cryptop *crp) 1260 { 1261 int result, s; 1262 struct cryptocap *cap; 1263 struct crypto_crp_qs *crp_qs; 1264 struct crypto_crp_q *crp_q; 1265 1266 KASSERT(crp != NULL); 1267 1268 DPRINTF("crp %p, alg %d\n", crp, crp->crp_desc->crd_alg); 1269 1270 cryptostats.cs_ops++; 1271 1272 #ifdef CRYPTO_TIMING 1273 if (crypto_timing) 1274 nanouptime(&crp->crp_tstamp); 1275 #endif 1276 1277 if ((crp->crp_flags & CRYPTO_F_BATCH) != 0) { 1278 int wasempty; 1279 /* 1280 * Caller marked the request as ``ok to delay''; 1281 * queue it for the swi thread. This is desirable 1282 * when the operation is low priority and/or suitable 1283 * for batching. 1284 * 1285 * don't care list order in batch job. 1286 */ 1287 crp_qs = crypto_get_crp_qs(&s); 1288 crp_q = crp_qs->crp_q; 1289 wasempty = TAILQ_EMPTY(crp_q); 1290 TAILQ_INSERT_TAIL(crp_q, crp, crp_next); 1291 crypto_put_crp_qs(&s); 1292 crp_q = NULL; 1293 if (wasempty) { 1294 kpreempt_disable(); 1295 softint_schedule(crypto_q_si); 1296 kpreempt_enable(); 1297 } 1298 1299 return 0; 1300 } 1301 1302 crp_qs = crypto_get_crp_qs(&s); 1303 crp_q = crp_qs->crp_q; 1304 cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(crp->crp_sid)); 1305 /* 1306 * TODO: 1307 * If we can ensure the driver has been valid until the driver is 1308 * done crypto_unregister(), this migrate operation is not required. 1309 */ 1310 if (cap == NULL) { 1311 /* 1312 * The driver must be detached, so this request will migrate 1313 * to other drivers in cryptointr() later. 1314 */ 1315 TAILQ_INSERT_TAIL(crp_q, crp, crp_next); 1316 result = 0; 1317 goto out; 1318 } 1319 1320 if (cap->cc_qblocked != 0) { 1321 crypto_driver_unlock(cap); 1322 /* 1323 * The driver is blocked, just queue the op until 1324 * it unblocks and the swi thread gets kicked. 1325 */ 1326 TAILQ_INSERT_TAIL(crp_q, crp, crp_next); 1327 result = 0; 1328 goto out; 1329 } 1330 1331 /* 1332 * Caller marked the request to be processed 1333 * immediately; dispatch it directly to the 1334 * driver unless the driver is currently blocked. 1335 */ 1336 crypto_driver_unlock(cap); 1337 result = crypto_invoke(crp, 0); 1338 if (result == ERESTART) { 1339 /* 1340 * The driver ran out of resources, mark the 1341 * driver ``blocked'' for cryptop's and put 1342 * the op on the queue. 1343 */ 1344 crypto_driver_lock(cap); 1345 cap->cc_qblocked = 1; 1346 crypto_driver_unlock(cap); 1347 TAILQ_INSERT_HEAD(crp_q, crp, crp_next); 1348 cryptostats.cs_blocks++; 1349 1350 /* 1351 * The crp is enqueued to crp_q, that is, 1352 * no error occurs. So, this function should 1353 * not return error. 1354 */ 1355 result = 0; 1356 } 1357 1358 out: 1359 crypto_put_crp_qs(&s); 1360 return result; 1361 } 1362 1363 /* 1364 * Add an asymetric crypto request to a queue, 1365 * to be processed by the kernel thread. 1366 */ 1367 int 1368 crypto_kdispatch(struct cryptkop *krp) 1369 { 1370 int result, s; 1371 struct cryptocap *cap; 1372 struct crypto_crp_qs *crp_qs; 1373 struct crypto_crp_kq *crp_kq; 1374 1375 KASSERT(krp != NULL); 1376 1377 cryptostats.cs_kops++; 1378 1379 crp_qs = crypto_get_crp_qs(&s); 1380 crp_kq = crp_qs->crp_kq; 1381 cap = crypto_checkdriver_lock(krp->krp_hid); 1382 /* 1383 * TODO: 1384 * If we can ensure the driver has been valid until the driver is 1385 * done crypto_unregister(), this migrate operation is not required. 1386 */ 1387 if (cap == NULL) { 1388 TAILQ_INSERT_TAIL(crp_kq, krp, krp_next); 1389 result = 0; 1390 goto out; 1391 } 1392 1393 if (cap->cc_kqblocked != 0) { 1394 crypto_driver_unlock(cap); 1395 /* 1396 * The driver is blocked, just queue the op until 1397 * it unblocks and the swi thread gets kicked. 1398 */ 1399 TAILQ_INSERT_TAIL(crp_kq, krp, krp_next); 1400 result = 0; 1401 goto out; 1402 } 1403 1404 crypto_driver_unlock(cap); 1405 result = crypto_kinvoke(krp, 0); 1406 if (result == ERESTART) { 1407 /* 1408 * The driver ran out of resources, mark the 1409 * driver ``blocked'' for cryptop's and put 1410 * the op on the queue. 1411 */ 1412 crypto_driver_lock(cap); 1413 cap->cc_kqblocked = 1; 1414 crypto_driver_unlock(cap); 1415 TAILQ_INSERT_HEAD(crp_kq, krp, krp_next); 1416 cryptostats.cs_kblocks++; 1417 1418 /* 1419 * The krp is enqueued to crp_kq, that is, 1420 * no error occurs. So, this function should 1421 * not return error. 1422 */ 1423 result = 0; 1424 } 1425 1426 out: 1427 crypto_put_crp_qs(&s); 1428 return result; 1429 } 1430 1431 /* 1432 * Dispatch an assymetric crypto request to the appropriate crypto devices. 1433 */ 1434 static int 1435 crypto_kinvoke(struct cryptkop *krp, int hint) 1436 { 1437 struct cryptocap *cap = NULL; 1438 u_int32_t hid; 1439 int error; 1440 1441 KASSERT(krp != NULL); 1442 1443 /* Sanity checks. */ 1444 if (krp->krp_callback == NULL) { 1445 cv_destroy(&krp->krp_cv); 1446 crypto_kfreereq(krp); 1447 return EINVAL; 1448 } 1449 1450 mutex_enter(&crypto_drv_mtx); 1451 for (hid = 0; hid < crypto_drivers_num; hid++) { 1452 cap = crypto_checkdriver(hid); 1453 if (cap == NULL) 1454 continue; 1455 crypto_driver_lock(cap); 1456 if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) && 1457 crypto_devallowsoft == 0) { 1458 crypto_driver_unlock(cap); 1459 continue; 1460 } 1461 if (cap->cc_kprocess == NULL) { 1462 crypto_driver_unlock(cap); 1463 continue; 1464 } 1465 if ((cap->cc_kalg[krp->krp_op] & 1466 CRYPTO_ALG_FLAG_SUPPORTED) == 0) { 1467 crypto_driver_unlock(cap); 1468 continue; 1469 } 1470 break; 1471 } 1472 mutex_exit(&crypto_drv_mtx); 1473 if (cap != NULL) { 1474 int (*process)(void *, struct cryptkop *, int); 1475 void *arg; 1476 1477 process = cap->cc_kprocess; 1478 arg = cap->cc_karg; 1479 krp->krp_hid = hid; 1480 krp->reqcpu = curcpu(); 1481 crypto_driver_unlock(cap); 1482 error = (*process)(arg, krp, hint); 1483 } else { 1484 error = ENODEV; 1485 } 1486 1487 if (error) { 1488 krp->krp_status = error; 1489 crypto_kdone(krp); 1490 } 1491 return 0; 1492 } 1493 1494 #ifdef CRYPTO_TIMING 1495 static void 1496 crypto_tstat(struct cryptotstat *ts, struct timespec *tv) 1497 { 1498 struct timespec now, t; 1499 1500 nanouptime(&now); 1501 t.tv_sec = now.tv_sec - tv->tv_sec; 1502 t.tv_nsec = now.tv_nsec - tv->tv_nsec; 1503 if (t.tv_nsec < 0) { 1504 t.tv_sec--; 1505 t.tv_nsec += 1000000000; 1506 } 1507 timespecadd(&ts->acc, &t, &t); 1508 if (timespeccmp(&t, &ts->min, <)) 1509 ts->min = t; 1510 if (timespeccmp(&t, &ts->max, >)) 1511 ts->max = t; 1512 ts->count++; 1513 1514 *tv = now; 1515 } 1516 #endif 1517 1518 /* 1519 * Dispatch a crypto request to the appropriate crypto devices. 1520 */ 1521 static int 1522 crypto_invoke(struct cryptop *crp, int hint) 1523 { 1524 struct cryptocap *cap; 1525 1526 KASSERT(crp != NULL); 1527 1528 #ifdef CRYPTO_TIMING 1529 if (crypto_timing) 1530 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp); 1531 #endif 1532 /* Sanity checks. */ 1533 if (crp->crp_callback == NULL) { 1534 return EINVAL; 1535 } 1536 if (crp->crp_desc == NULL) { 1537 crp->crp_etype = EINVAL; 1538 crypto_done(crp); 1539 return 0; 1540 } 1541 1542 cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(crp->crp_sid)); 1543 if (cap != NULL && (cap->cc_flags & CRYPTOCAP_F_CLEANUP) == 0) { 1544 int (*process)(void *, struct cryptop *, int); 1545 void *arg; 1546 1547 process = cap->cc_process; 1548 arg = cap->cc_arg; 1549 crp->reqcpu = curcpu(); 1550 1551 /* 1552 * Invoke the driver to process the request. 1553 */ 1554 DPRINTF("calling process for %p\n", crp); 1555 crypto_driver_unlock(cap); 1556 return (*process)(arg, crp, hint); 1557 } else { 1558 struct cryptodesc *crd; 1559 u_int64_t nid = 0; 1560 1561 if (cap != NULL) 1562 crypto_driver_unlock(cap); 1563 1564 /* 1565 * Driver has unregistered; migrate the session and return 1566 * an error to the caller so they'll resubmit the op. 1567 */ 1568 crypto_freesession(crp->crp_sid); 1569 1570 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next) 1571 crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI); 1572 1573 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0) 1574 crp->crp_sid = nid; 1575 1576 crp->crp_etype = EAGAIN; 1577 1578 crypto_done(crp); 1579 return 0; 1580 } 1581 } 1582 1583 /* 1584 * Release a set of crypto descriptors. 1585 */ 1586 void 1587 crypto_freereq(struct cryptop *crp) 1588 { 1589 struct cryptodesc *crd; 1590 1591 if (crp == NULL) 1592 return; 1593 DPRINTF("lid[%u]: crp %p\n", CRYPTO_SESID2LID(crp->crp_sid), crp); 1594 1595 /* sanity check */ 1596 if (crp->crp_flags & CRYPTO_F_ONRETQ) { 1597 panic("crypto_freereq() freeing crp on RETQ\n"); 1598 } 1599 1600 while ((crd = crp->crp_desc) != NULL) { 1601 crp->crp_desc = crd->crd_next; 1602 pool_cache_put(cryptodesc_cache, crd); 1603 } 1604 pool_cache_put(cryptop_cache, crp); 1605 } 1606 1607 /* 1608 * Acquire a set of crypto descriptors. 1609 */ 1610 struct cryptop * 1611 crypto_getreq(int num) 1612 { 1613 struct cryptodesc *crd; 1614 struct cryptop *crp; 1615 struct crypto_crp_ret_qs *qs; 1616 1617 /* 1618 * When crp_ret_q is full, we restrict here to avoid crp_ret_q overflow 1619 * by error callback. 1620 */ 1621 qs = crypto_get_crp_ret_qs(curcpu()); 1622 if (qs->crp_ret_q_maxlen > 0 1623 && qs->crp_ret_q_len > qs->crp_ret_q_maxlen) { 1624 qs->crp_ret_q_drops++; 1625 crypto_put_crp_ret_qs(curcpu()); 1626 return NULL; 1627 } 1628 crypto_put_crp_ret_qs(curcpu()); 1629 1630 crp = pool_cache_get(cryptop_cache, PR_NOWAIT); 1631 if (crp == NULL) { 1632 return NULL; 1633 } 1634 memset(crp, 0, sizeof(struct cryptop)); 1635 1636 while (num--) { 1637 crd = pool_cache_get(cryptodesc_cache, PR_NOWAIT); 1638 if (crd == NULL) { 1639 crypto_freereq(crp); 1640 return NULL; 1641 } 1642 1643 memset(crd, 0, sizeof(struct cryptodesc)); 1644 crd->crd_next = crp->crp_desc; 1645 crp->crp_desc = crd; 1646 } 1647 1648 return crp; 1649 } 1650 1651 /* 1652 * Release a set of asymmetric crypto descriptors. 1653 * Currently, support one descriptor only. 1654 */ 1655 void 1656 crypto_kfreereq(struct cryptkop *krp) 1657 { 1658 1659 if (krp == NULL) 1660 return; 1661 1662 DPRINTF("krp %p\n", krp); 1663 1664 /* sanity check */ 1665 if (krp->krp_flags & CRYPTO_F_ONRETQ) { 1666 panic("crypto_kfreereq() freeing krp on RETQ\n"); 1667 } 1668 1669 pool_cache_put(cryptkop_cache, krp); 1670 } 1671 1672 /* 1673 * Acquire a set of asymmetric crypto descriptors. 1674 * Currently, support one descriptor only. 1675 */ 1676 struct cryptkop * 1677 crypto_kgetreq(int num __unused, int prflags) 1678 { 1679 struct cryptkop *krp; 1680 struct crypto_crp_ret_qs *qs; 1681 1682 /* 1683 * When crp_ret_kq is full, we restrict here to avoid crp_ret_kq 1684 * overflow by error callback. 1685 */ 1686 qs = crypto_get_crp_ret_qs(curcpu()); 1687 if (qs->crp_ret_kq_maxlen > 0 1688 && qs->crp_ret_kq_len > qs->crp_ret_kq_maxlen) { 1689 qs->crp_ret_kq_drops++; 1690 crypto_put_crp_ret_qs(curcpu()); 1691 return NULL; 1692 } 1693 crypto_put_crp_ret_qs(curcpu()); 1694 1695 krp = pool_cache_get(cryptkop_cache, prflags); 1696 if (krp == NULL) { 1697 return NULL; 1698 } 1699 memset(krp, 0, sizeof(struct cryptkop)); 1700 1701 return krp; 1702 } 1703 1704 /* 1705 * Invoke the callback on behalf of the driver. 1706 */ 1707 void 1708 crypto_done(struct cryptop *crp) 1709 { 1710 1711 KASSERT(crp != NULL); 1712 1713 if (crp->crp_etype != 0) 1714 cryptostats.cs_errs++; 1715 #ifdef CRYPTO_TIMING 1716 if (crypto_timing) 1717 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp); 1718 #endif 1719 DPRINTF("lid[%u]: crp %p\n", CRYPTO_SESID2LID(crp->crp_sid), crp); 1720 1721 /* 1722 * Normal case; queue the callback for the thread. 1723 * 1724 * The return queue is manipulated by the swi thread 1725 * and, potentially, by crypto device drivers calling 1726 * back to mark operations completed. Thus we need 1727 * to mask both while manipulating the return queue. 1728 */ 1729 if (crp->crp_flags & CRYPTO_F_CBIMM) { 1730 /* 1731 * Do the callback directly. This is ok when the 1732 * callback routine does very little (e.g. the 1733 * /dev/crypto callback method just does a wakeup). 1734 */ 1735 crp->crp_flags |= CRYPTO_F_DONE; 1736 1737 #ifdef CRYPTO_TIMING 1738 if (crypto_timing) { 1739 /* 1740 * NB: We must copy the timestamp before 1741 * doing the callback as the cryptop is 1742 * likely to be reclaimed. 1743 */ 1744 struct timespec t = crp->crp_tstamp; 1745 crypto_tstat(&cryptostats.cs_cb, &t); 1746 crp->crp_callback(crp); 1747 crypto_tstat(&cryptostats.cs_finis, &t); 1748 } else 1749 #endif 1750 crp->crp_callback(crp); 1751 } else { 1752 crp->crp_flags |= CRYPTO_F_DONE; 1753 #if 0 1754 if (crp->crp_flags & CRYPTO_F_USER) { 1755 /* 1756 * TODO: 1757 * If crp->crp_flags & CRYPTO_F_USER and the used 1758 * encryption driver does all the processing in 1759 * the same context, we can skip enqueueing crp_ret_q 1760 * and softint_schedule(crypto_ret_si). 1761 */ 1762 DPRINTF("lid[%u]: crp %p CRYPTO_F_USER\n", 1763 CRYPTO_SESID2LID(crp->crp_sid), crp); 1764 } else 1765 #endif 1766 { 1767 int wasempty; 1768 struct crypto_crp_ret_qs *qs; 1769 struct crypto_crp_ret_q *crp_ret_q; 1770 1771 qs = crypto_get_crp_ret_qs(crp->reqcpu); 1772 crp_ret_q = &qs->crp_ret_q; 1773 wasempty = TAILQ_EMPTY(crp_ret_q); 1774 DPRINTF("lid[%u]: queueing %p\n", 1775 CRYPTO_SESID2LID(crp->crp_sid), crp); 1776 crp->crp_flags |= CRYPTO_F_ONRETQ; 1777 TAILQ_INSERT_TAIL(crp_ret_q, crp, crp_next); 1778 qs->crp_ret_q_len++; 1779 if (wasempty && !qs->crp_ret_q_exit_flag) { 1780 DPRINTF("lid[%u]: waking cryptoret," 1781 "crp %p hit empty queue\n.", 1782 CRYPTO_SESID2LID(crp->crp_sid), crp); 1783 softint_schedule_cpu(crypto_ret_si, crp->reqcpu); 1784 } 1785 crypto_put_crp_ret_qs(crp->reqcpu); 1786 } 1787 } 1788 } 1789 1790 /* 1791 * Invoke the callback on behalf of the driver. 1792 */ 1793 void 1794 crypto_kdone(struct cryptkop *krp) 1795 { 1796 1797 KASSERT(krp != NULL); 1798 1799 if (krp->krp_status != 0) 1800 cryptostats.cs_kerrs++; 1801 1802 krp->krp_flags |= CRYPTO_F_DONE; 1803 1804 /* 1805 * The return queue is manipulated by the swi thread 1806 * and, potentially, by crypto device drivers calling 1807 * back to mark operations completed. Thus we need 1808 * to mask both while manipulating the return queue. 1809 */ 1810 if (krp->krp_flags & CRYPTO_F_CBIMM) { 1811 krp->krp_callback(krp); 1812 } else { 1813 int wasempty; 1814 struct crypto_crp_ret_qs *qs; 1815 struct crypto_crp_ret_kq *crp_ret_kq; 1816 1817 qs = crypto_get_crp_ret_qs(krp->reqcpu); 1818 crp_ret_kq = &qs->crp_ret_kq; 1819 1820 wasempty = TAILQ_EMPTY(crp_ret_kq); 1821 krp->krp_flags |= CRYPTO_F_ONRETQ; 1822 TAILQ_INSERT_TAIL(crp_ret_kq, krp, krp_next); 1823 qs->crp_ret_kq_len++; 1824 if (wasempty && !qs->crp_ret_q_exit_flag) 1825 softint_schedule_cpu(crypto_ret_si, krp->reqcpu); 1826 crypto_put_crp_ret_qs(krp->reqcpu); 1827 } 1828 } 1829 1830 int 1831 crypto_getfeat(int *featp) 1832 { 1833 1834 if (crypto_userasymcrypto == 0) { 1835 *featp = 0; 1836 return 0; 1837 } 1838 1839 mutex_enter(&crypto_drv_mtx); 1840 1841 int feat = 0; 1842 for (int hid = 0; hid < crypto_drivers_num; hid++) { 1843 struct cryptocap *cap; 1844 cap = crypto_checkdriver(hid); 1845 if (cap == NULL) 1846 continue; 1847 1848 crypto_driver_lock(cap); 1849 1850 if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) && 1851 crypto_devallowsoft == 0) 1852 goto unlock; 1853 1854 if (cap->cc_kprocess == NULL) 1855 goto unlock; 1856 1857 for (int kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++) 1858 if ((cap->cc_kalg[kalg] & 1859 CRYPTO_ALG_FLAG_SUPPORTED) != 0) 1860 feat |= 1 << kalg; 1861 1862 unlock: crypto_driver_unlock(cap); 1863 } 1864 1865 mutex_exit(&crypto_drv_mtx); 1866 *featp = feat; 1867 return (0); 1868 } 1869 1870 /* 1871 * Software interrupt thread to dispatch crypto requests. 1872 */ 1873 static void 1874 cryptointr(void *arg __unused) 1875 { 1876 struct cryptop *crp, *submit, *cnext; 1877 struct cryptkop *krp, *knext; 1878 struct cryptocap *cap; 1879 struct crypto_crp_qs *crp_qs; 1880 struct crypto_crp_q *crp_q; 1881 struct crypto_crp_kq *crp_kq; 1882 int result, hint, s; 1883 1884 cryptostats.cs_intrs++; 1885 crp_qs = crypto_get_crp_qs(&s); 1886 crp_q = crp_qs->crp_q; 1887 crp_kq = crp_qs->crp_kq; 1888 do { 1889 /* 1890 * Find the first element in the queue that can be 1891 * processed and look-ahead to see if multiple ops 1892 * are ready for the same driver. 1893 */ 1894 submit = NULL; 1895 hint = 0; 1896 TAILQ_FOREACH_SAFE(crp, crp_q, crp_next, cnext) { 1897 u_int32_t hid = CRYPTO_SESID2HID(crp->crp_sid); 1898 cap = crypto_checkdriver_lock(hid); 1899 if (cap == NULL || cap->cc_process == NULL) { 1900 if (cap != NULL) 1901 crypto_driver_unlock(cap); 1902 /* Op needs to be migrated, process it. */ 1903 submit = crp; 1904 break; 1905 } 1906 1907 /* 1908 * skip blocked crp regardless of CRYPTO_F_BATCH 1909 */ 1910 if (cap->cc_qblocked != 0) { 1911 crypto_driver_unlock(cap); 1912 continue; 1913 } 1914 crypto_driver_unlock(cap); 1915 1916 /* 1917 * skip batch crp until the end of crp_q 1918 */ 1919 if ((crp->crp_flags & CRYPTO_F_BATCH) != 0) { 1920 if (submit == NULL) { 1921 submit = crp; 1922 } else { 1923 if (CRYPTO_SESID2HID(submit->crp_sid) 1924 == hid) 1925 hint = CRYPTO_HINT_MORE; 1926 } 1927 1928 continue; 1929 } 1930 1931 /* 1932 * found first crp which is neither blocked nor batch. 1933 */ 1934 submit = crp; 1935 /* 1936 * batch crp can be processed much later, so clear hint. 1937 */ 1938 hint = 0; 1939 break; 1940 } 1941 if (submit != NULL) { 1942 TAILQ_REMOVE(crp_q, submit, crp_next); 1943 result = crypto_invoke(submit, hint); 1944 /* we must take here as the TAILQ op or kinvoke 1945 may need this mutex below. sigh. */ 1946 if (result == ERESTART) { 1947 /* 1948 * The driver ran out of resources, mark the 1949 * driver ``blocked'' for cryptop's and put 1950 * the request back in the queue. It would 1951 * best to put the request back where we got 1952 * it but that's hard so for now we put it 1953 * at the front. This should be ok; putting 1954 * it at the end does not work. 1955 */ 1956 /* validate sid again */ 1957 cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(submit->crp_sid)); 1958 if (cap == NULL) { 1959 /* migrate again, sigh... */ 1960 TAILQ_INSERT_TAIL(crp_q, submit, crp_next); 1961 } else { 1962 cap->cc_qblocked = 1; 1963 crypto_driver_unlock(cap); 1964 TAILQ_INSERT_HEAD(crp_q, submit, crp_next); 1965 cryptostats.cs_blocks++; 1966 } 1967 } 1968 } 1969 1970 /* As above, but for key ops */ 1971 TAILQ_FOREACH_SAFE(krp, crp_kq, krp_next, knext) { 1972 cap = crypto_checkdriver_lock(krp->krp_hid); 1973 if (cap == NULL || cap->cc_kprocess == NULL) { 1974 if (cap != NULL) 1975 crypto_driver_unlock(cap); 1976 /* Op needs to be migrated, process it. */ 1977 break; 1978 } 1979 if (!cap->cc_kqblocked) { 1980 crypto_driver_unlock(cap); 1981 break; 1982 } 1983 crypto_driver_unlock(cap); 1984 } 1985 if (krp != NULL) { 1986 TAILQ_REMOVE(crp_kq, krp, krp_next); 1987 result = crypto_kinvoke(krp, 0); 1988 /* the next iteration will want the mutex. :-/ */ 1989 if (result == ERESTART) { 1990 /* 1991 * The driver ran out of resources, mark the 1992 * driver ``blocked'' for cryptkop's and put 1993 * the request back in the queue. It would 1994 * best to put the request back where we got 1995 * it but that's hard so for now we put it 1996 * at the front. This should be ok; putting 1997 * it at the end does not work. 1998 */ 1999 /* validate sid again */ 2000 cap = crypto_checkdriver_lock(krp->krp_hid); 2001 if (cap == NULL) { 2002 /* migrate again, sigh... */ 2003 TAILQ_INSERT_TAIL(crp_kq, krp, krp_next); 2004 } else { 2005 cap->cc_kqblocked = 1; 2006 crypto_driver_unlock(cap); 2007 TAILQ_INSERT_HEAD(crp_kq, krp, krp_next); 2008 cryptostats.cs_kblocks++; 2009 } 2010 } 2011 } 2012 } while (submit != NULL || krp != NULL); 2013 crypto_put_crp_qs(&s); 2014 } 2015 2016 /* 2017 * softint handler to do callbacks. 2018 */ 2019 static void 2020 cryptoret_softint(void *arg __unused) 2021 { 2022 struct crypto_crp_ret_qs *qs; 2023 struct crypto_crp_ret_q *crp_ret_q; 2024 struct crypto_crp_ret_kq *crp_ret_kq; 2025 2026 qs = crypto_get_crp_ret_qs(curcpu()); 2027 crp_ret_q = &qs->crp_ret_q; 2028 crp_ret_kq = &qs->crp_ret_kq; 2029 for (;;) { 2030 struct cryptop *crp; 2031 struct cryptkop *krp; 2032 2033 crp = TAILQ_FIRST(crp_ret_q); 2034 if (crp != NULL) { 2035 TAILQ_REMOVE(crp_ret_q, crp, crp_next); 2036 qs->crp_ret_q_len--; 2037 crp->crp_flags &= ~CRYPTO_F_ONRETQ; 2038 } 2039 krp = TAILQ_FIRST(crp_ret_kq); 2040 if (krp != NULL) { 2041 TAILQ_REMOVE(crp_ret_kq, krp, krp_next); 2042 qs->crp_ret_q_len--; 2043 krp->krp_flags &= ~CRYPTO_F_ONRETQ; 2044 } 2045 2046 /* drop before calling any callbacks. */ 2047 if (crp == NULL && krp == NULL) 2048 break; 2049 2050 mutex_spin_exit(&qs->crp_ret_q_mtx); 2051 if (crp != NULL) { 2052 #ifdef CRYPTO_TIMING 2053 if (crypto_timing) { 2054 /* 2055 * NB: We must copy the timestamp before 2056 * doing the callback as the cryptop is 2057 * likely to be reclaimed. 2058 */ 2059 struct timespec t = crp->crp_tstamp; 2060 crypto_tstat(&cryptostats.cs_cb, &t); 2061 crp->crp_callback(crp); 2062 crypto_tstat(&cryptostats.cs_finis, &t); 2063 } else 2064 #endif 2065 { 2066 crp->crp_callback(crp); 2067 } 2068 } 2069 if (krp != NULL) 2070 krp->krp_callback(krp); 2071 2072 mutex_spin_enter(&qs->crp_ret_q_mtx); 2073 } 2074 crypto_put_crp_ret_qs(curcpu()); 2075 } 2076 2077 /* NetBSD module interface */ 2078 2079 MODULE(MODULE_CLASS_MISC, opencrypto, NULL); 2080 2081 static int 2082 opencrypto_modcmd(modcmd_t cmd, void *opaque) 2083 { 2084 int error = 0; 2085 2086 switch (cmd) { 2087 case MODULE_CMD_INIT: 2088 #ifdef _MODULE 2089 error = crypto_init(); 2090 #endif 2091 break; 2092 case MODULE_CMD_FINI: 2093 #ifdef _MODULE 2094 error = crypto_destroy(true); 2095 #endif 2096 break; 2097 default: 2098 error = ENOTTY; 2099 } 2100 return error; 2101 } 2102