1 /* $NetBSD: crypto.c,v 1.113 2020/03/16 21:20:12 pgoyette Exp $ */ 2 /* $FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.5 2003/02/26 00:14:05 sam Exp $ */ 3 /* $OpenBSD: crypto.c,v 1.41 2002/07/17 23:52:38 art Exp $ */ 4 5 /*- 6 * Copyright (c) 2008 The NetBSD Foundation, Inc. 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to The NetBSD Foundation 10 * by Coyote Point Systems, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 36 * 37 * This code was written by Angelos D. Keromytis in Athens, Greece, in 38 * February 2000. Network Security Technologies Inc. (NSTI) kindly 39 * supported the development of this code. 40 * 41 * Copyright (c) 2000, 2001 Angelos D. Keromytis 42 * 43 * Permission to use, copy, and modify this software with or without fee 44 * is hereby granted, provided that this entire notice is included in 45 * all source code copies of any software which is or includes a copy or 46 * modification of this software. 47 * 48 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 49 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 50 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 51 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 52 * PURPOSE. 53 */ 54 55 #include <sys/cdefs.h> 56 __KERNEL_RCSID(0, "$NetBSD: crypto.c,v 1.113 2020/03/16 21:20:12 pgoyette Exp $"); 57 58 #include <sys/param.h> 59 #include <sys/reboot.h> 60 #include <sys/systm.h> 61 #include <sys/proc.h> 62 #include <sys/pool.h> 63 #include <sys/kthread.h> 64 #include <sys/once.h> 65 #include <sys/sysctl.h> 66 #include <sys/intr.h> 67 #include <sys/errno.h> 68 #include <sys/module.h> 69 #include <sys/xcall.h> 70 #include <sys/device.h> 71 #include <sys/cpu.h> 72 #include <sys/percpu.h> 73 #include <sys/kmem.h> 74 75 #if defined(_KERNEL_OPT) 76 #include "opt_ocf.h" 77 #endif 78 79 #include <opencrypto/cryptodev.h> 80 #include <opencrypto/xform.h> /* XXX for M_XDATA */ 81 82 /* 83 * Crypto drivers register themselves by allocating a slot in the 84 * crypto_drivers table with crypto_get_driverid() and then registering 85 * each algorithm they support with crypto_register() and crypto_kregister(). 86 */ 87 /* Don't directly access crypto_drivers[i], use crypto_checkdriver(i). */ 88 static struct { 89 kmutex_t mtx; 90 int num; 91 struct cryptocap *list; 92 } crypto_drv __cacheline_aligned; 93 #define crypto_drv_mtx (crypto_drv.mtx) 94 #define crypto_drivers_num (crypto_drv.num) 95 #define crypto_drivers (crypto_drv.list) 96 97 static void *crypto_q_si; 98 static void *crypto_ret_si; 99 100 /* 101 * There are two queues for crypto requests; one for symmetric (e.g. 102 * cipher) operations and one for asymmetric (e.g. MOD) operations. 103 * See below for how synchronization is handled. 104 */ 105 TAILQ_HEAD(crypto_crp_q, cryptop); 106 TAILQ_HEAD(crypto_crp_kq, cryptkop); 107 struct crypto_crp_qs { 108 struct crypto_crp_q *crp_q; 109 struct crypto_crp_kq *crp_kq; 110 }; 111 static percpu_t *crypto_crp_qs_percpu; 112 113 static inline struct crypto_crp_qs * 114 crypto_get_crp_qs(int *s) 115 { 116 117 KASSERT(s != NULL); 118 119 *s = splsoftnet(); 120 return percpu_getref(crypto_crp_qs_percpu); 121 } 122 123 static inline void 124 crypto_put_crp_qs(int *s) 125 { 126 127 KASSERT(s != NULL); 128 129 percpu_putref(crypto_crp_qs_percpu); 130 splx(*s); 131 } 132 133 static void 134 crypto_crp_q_is_busy_pc(void *p, void *arg, struct cpu_info *ci __unused) 135 { 136 struct crypto_crp_qs *qs_pc = p; 137 bool *isempty = arg; 138 139 if (!TAILQ_EMPTY(qs_pc->crp_q) || !TAILQ_EMPTY(qs_pc->crp_kq)) 140 *isempty = true; 141 } 142 143 static void 144 crypto_crp_qs_init_pc(void *p, void *arg __unused, struct cpu_info *ci __unused) 145 { 146 struct crypto_crp_qs *qs = p; 147 148 qs->crp_q = kmem_alloc(sizeof(struct crypto_crp_q), KM_SLEEP); 149 qs->crp_kq = kmem_alloc(sizeof(struct crypto_crp_kq), KM_SLEEP); 150 151 TAILQ_INIT(qs->crp_q); 152 TAILQ_INIT(qs->crp_kq); 153 } 154 155 /* 156 * There are two queues for processing completed crypto requests; one 157 * for the symmetric and one for the asymmetric ops. We only need one 158 * but have two to avoid type futzing (cryptop vs. cryptkop). See below 159 * for how synchronization is handled. 160 */ 161 TAILQ_HEAD(crypto_crp_ret_q, cryptop); 162 TAILQ_HEAD(crypto_crp_ret_kq, cryptkop); 163 struct crypto_crp_ret_qs { 164 kmutex_t crp_ret_q_mtx; 165 bool crp_ret_q_exit_flag; 166 167 struct crypto_crp_ret_q crp_ret_q; 168 int crp_ret_q_len; 169 int crp_ret_q_maxlen; /* queue length limit. <=0 means unlimited. */ 170 int crp_ret_q_drops; 171 172 struct crypto_crp_ret_kq crp_ret_kq; 173 int crp_ret_kq_len; 174 int crp_ret_kq_maxlen; /* queue length limit. <=0 means unlimited. */ 175 int crp_ret_kq_drops; 176 }; 177 struct crypto_crp_ret_qs **crypto_crp_ret_qs_list; 178 179 180 static inline struct crypto_crp_ret_qs * 181 crypto_get_crp_ret_qs(struct cpu_info *ci) 182 { 183 u_int cpuid; 184 struct crypto_crp_ret_qs *qs; 185 186 KASSERT(ci != NULL); 187 188 cpuid = cpu_index(ci); 189 qs = crypto_crp_ret_qs_list[cpuid]; 190 mutex_enter(&qs->crp_ret_q_mtx); 191 return qs; 192 } 193 194 static inline void 195 crypto_put_crp_ret_qs(struct cpu_info *ci) 196 { 197 u_int cpuid; 198 struct crypto_crp_ret_qs *qs; 199 200 KASSERT(ci != NULL); 201 202 cpuid = cpu_index(ci); 203 qs = crypto_crp_ret_qs_list[cpuid]; 204 mutex_exit(&qs->crp_ret_q_mtx); 205 } 206 207 #ifndef CRYPTO_RET_Q_MAXLEN 208 #define CRYPTO_RET_Q_MAXLEN 0 209 #endif 210 #ifndef CRYPTO_RET_KQ_MAXLEN 211 #define CRYPTO_RET_KQ_MAXLEN 0 212 #endif 213 214 static int 215 sysctl_opencrypto_q_len(SYSCTLFN_ARGS) 216 { 217 int error, len = 0; 218 struct sysctlnode node = *rnode; 219 220 for (int i = 0; i < ncpu; i++) { 221 struct crypto_crp_ret_qs *qs; 222 struct cpu_info *ci = cpu_lookup(i); 223 224 qs = crypto_get_crp_ret_qs(ci); 225 len += qs->crp_ret_q_len; 226 crypto_put_crp_ret_qs(ci); 227 } 228 229 node.sysctl_data = &len; 230 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 231 if (error || newp == NULL) 232 return error; 233 234 return 0; 235 } 236 237 static int 238 sysctl_opencrypto_q_drops(SYSCTLFN_ARGS) 239 { 240 int error, drops = 0; 241 struct sysctlnode node = *rnode; 242 243 for (int i = 0; i < ncpu; i++) { 244 struct crypto_crp_ret_qs *qs; 245 struct cpu_info *ci = cpu_lookup(i); 246 247 qs = crypto_get_crp_ret_qs(ci); 248 drops += qs->crp_ret_q_drops; 249 crypto_put_crp_ret_qs(ci); 250 } 251 252 node.sysctl_data = &drops; 253 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 254 if (error || newp == NULL) 255 return error; 256 257 return 0; 258 } 259 260 static int 261 sysctl_opencrypto_q_maxlen(SYSCTLFN_ARGS) 262 { 263 int error, maxlen; 264 struct crypto_crp_ret_qs *qs; 265 struct sysctlnode node = *rnode; 266 267 /* each crp_ret_kq_maxlen is the same. */ 268 qs = crypto_get_crp_ret_qs(curcpu()); 269 maxlen = qs->crp_ret_q_maxlen; 270 crypto_put_crp_ret_qs(curcpu()); 271 272 node.sysctl_data = &maxlen; 273 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 274 if (error || newp == NULL) 275 return error; 276 277 for (int i = 0; i < ncpu; i++) { 278 struct cpu_info *ci = cpu_lookup(i); 279 280 qs = crypto_get_crp_ret_qs(ci); 281 qs->crp_ret_q_maxlen = maxlen; 282 crypto_put_crp_ret_qs(ci); 283 } 284 285 return 0; 286 } 287 288 static int 289 sysctl_opencrypto_kq_len(SYSCTLFN_ARGS) 290 { 291 int error, len = 0; 292 struct sysctlnode node = *rnode; 293 294 for (int i = 0; i < ncpu; i++) { 295 struct crypto_crp_ret_qs *qs; 296 struct cpu_info *ci = cpu_lookup(i); 297 298 qs = crypto_get_crp_ret_qs(ci); 299 len += qs->crp_ret_kq_len; 300 crypto_put_crp_ret_qs(ci); 301 } 302 303 node.sysctl_data = &len; 304 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 305 if (error || newp == NULL) 306 return error; 307 308 return 0; 309 } 310 311 static int 312 sysctl_opencrypto_kq_drops(SYSCTLFN_ARGS) 313 { 314 int error, drops = 0; 315 struct sysctlnode node = *rnode; 316 317 for (int i = 0; i < ncpu; i++) { 318 struct crypto_crp_ret_qs *qs; 319 struct cpu_info *ci = cpu_lookup(i); 320 321 qs = crypto_get_crp_ret_qs(ci); 322 drops += qs->crp_ret_kq_drops; 323 crypto_put_crp_ret_qs(ci); 324 } 325 326 node.sysctl_data = &drops; 327 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 328 if (error || newp == NULL) 329 return error; 330 331 return 0; 332 } 333 334 static int 335 sysctl_opencrypto_kq_maxlen(SYSCTLFN_ARGS) 336 { 337 int error, maxlen; 338 struct crypto_crp_ret_qs *qs; 339 struct sysctlnode node = *rnode; 340 341 /* each crp_ret_kq_maxlen is the same. */ 342 qs = crypto_get_crp_ret_qs(curcpu()); 343 maxlen = qs->crp_ret_kq_maxlen; 344 crypto_put_crp_ret_qs(curcpu()); 345 346 node.sysctl_data = &maxlen; 347 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 348 if (error || newp == NULL) 349 return error; 350 351 for (int i = 0; i < ncpu; i++) { 352 struct cpu_info *ci = cpu_lookup(i); 353 354 qs = crypto_get_crp_ret_qs(ci); 355 qs->crp_ret_kq_maxlen = maxlen; 356 crypto_put_crp_ret_qs(ci); 357 } 358 359 return 0; 360 } 361 362 /* 363 * Crypto op and descriptor data structures are allocated 364 * from separate private zones(FreeBSD)/pools(netBSD/OpenBSD) . 365 */ 366 static pool_cache_t cryptop_cache; 367 static pool_cache_t cryptodesc_cache; 368 static pool_cache_t cryptkop_cache; 369 370 int crypto_usercrypto = 1; /* userland may open /dev/crypto */ 371 int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */ 372 /* 373 * cryptodevallowsoft is (intended to be) sysctl'able, controlling 374 * access to hardware versus software transforms as below: 375 * 376 * crypto_devallowsoft < 0: Force userlevel requests to use software 377 * transforms, always 378 * crypto_devallowsoft = 0: Use hardware if present, grant userlevel 379 * requests for non-accelerated transforms 380 * (handling the latter in software) 381 * crypto_devallowsoft > 0: Allow user requests only for transforms which 382 * are hardware-accelerated. 383 */ 384 int crypto_devallowsoft = 1; /* only use hardware crypto */ 385 386 SYSCTL_SETUP(sysctl_opencrypto_setup, "opencrypto sysctl") 387 { 388 const struct sysctlnode *ocnode; 389 const struct sysctlnode *retqnode, *retkqnode; 390 391 sysctl_createv(clog, 0, NULL, NULL, 392 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 393 CTLTYPE_INT, "usercrypto", 394 SYSCTL_DESCR("Enable/disable user-mode access to " 395 "crypto support"), 396 NULL, 0, &crypto_usercrypto, 0, 397 CTL_KERN, CTL_CREATE, CTL_EOL); 398 sysctl_createv(clog, 0, NULL, NULL, 399 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 400 CTLTYPE_INT, "userasymcrypto", 401 SYSCTL_DESCR("Enable/disable user-mode access to " 402 "asymmetric crypto support"), 403 NULL, 0, &crypto_userasymcrypto, 0, 404 CTL_KERN, CTL_CREATE, CTL_EOL); 405 sysctl_createv(clog, 0, NULL, NULL, 406 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 407 CTLTYPE_INT, "cryptodevallowsoft", 408 SYSCTL_DESCR("Enable/disable use of software " 409 "asymmetric crypto support"), 410 NULL, 0, &crypto_devallowsoft, 0, 411 CTL_KERN, CTL_CREATE, CTL_EOL); 412 413 sysctl_createv(clog, 0, NULL, &ocnode, 414 CTLFLAG_PERMANENT, 415 CTLTYPE_NODE, "opencrypto", 416 SYSCTL_DESCR("opencrypto related entries"), 417 NULL, 0, NULL, 0, 418 CTL_CREATE, CTL_EOL); 419 420 sysctl_createv(clog, 0, &ocnode, &retqnode, 421 CTLFLAG_PERMANENT, 422 CTLTYPE_NODE, "crypto_ret_q", 423 SYSCTL_DESCR("crypto_ret_q related entries"), 424 NULL, 0, NULL, 0, 425 CTL_CREATE, CTL_EOL); 426 sysctl_createv(clog, 0, &retqnode, NULL, 427 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 428 CTLTYPE_INT, "len", 429 SYSCTL_DESCR("Current queue length"), 430 sysctl_opencrypto_q_len, 0, 431 NULL, 0, 432 CTL_CREATE, CTL_EOL); 433 sysctl_createv(clog, 0, &retqnode, NULL, 434 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 435 CTLTYPE_INT, "drops", 436 SYSCTL_DESCR("Crypto requests dropped due to full ret queue"), 437 sysctl_opencrypto_q_drops, 0, 438 NULL, 0, 439 CTL_CREATE, CTL_EOL); 440 sysctl_createv(clog, 0, &retqnode, NULL, 441 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 442 CTLTYPE_INT, "maxlen", 443 SYSCTL_DESCR("Maximum allowed queue length"), 444 sysctl_opencrypto_q_maxlen, 0, 445 NULL, 0, 446 CTL_CREATE, CTL_EOL); 447 448 449 sysctl_createv(clog, 0, &ocnode, &retkqnode, 450 CTLFLAG_PERMANENT, 451 CTLTYPE_NODE, "crypto_ret_kq", 452 SYSCTL_DESCR("crypto_ret_kq related entries"), 453 NULL, 0, NULL, 0, 454 CTL_CREATE, CTL_EOL); 455 sysctl_createv(clog, 0, &retkqnode, NULL, 456 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 457 CTLTYPE_INT, "len", 458 SYSCTL_DESCR("Current queue length"), 459 sysctl_opencrypto_kq_len, 0, 460 NULL, 0, 461 CTL_CREATE, CTL_EOL); 462 sysctl_createv(clog, 0, &retkqnode, NULL, 463 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 464 CTLTYPE_INT, "drops", 465 SYSCTL_DESCR("Crypto requests dropped due to full ret queue"), 466 sysctl_opencrypto_kq_drops, 0, 467 NULL, 0, 468 CTL_CREATE, CTL_EOL); 469 sysctl_createv(clog, 0, &retkqnode, NULL, 470 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 471 CTLTYPE_INT, "maxlen", 472 SYSCTL_DESCR("Maximum allowed queue length"), 473 sysctl_opencrypto_kq_maxlen, 0, 474 NULL, 0, 475 CTL_CREATE, CTL_EOL); 476 } 477 478 /* 479 * Synchronization: read carefully, this is non-trivial. 480 * 481 * Crypto requests are submitted via crypto_dispatch. Typically 482 * these come in from network protocols at spl0 (output path) or 483 * spl[,soft]net (input path). 484 * 485 * Requests are typically passed on the driver directly, but they 486 * may also be queued for processing by a software interrupt thread, 487 * cryptointr, that runs at splsoftcrypto. This thread dispatches 488 * the requests to crypto drivers (h/w or s/w) who call crypto_done 489 * when a request is complete. Hardware crypto drivers are assumed 490 * to register their IRQ's as network devices so their interrupt handlers 491 * and subsequent "done callbacks" happen at spl[imp,net]. 492 * 493 * Completed crypto ops are queued for a separate kernel thread that 494 * handles the callbacks at spl0. This decoupling insures the crypto 495 * driver interrupt service routine is not delayed while the callback 496 * takes place and that callbacks are delivered after a context switch 497 * (as opposed to a software interrupt that clients must block). 498 * 499 * This scheme is not intended for SMP machines. 500 */ 501 static void cryptointr(void *); /* swi thread to dispatch ops */ 502 static void cryptoret_softint(void *); /* kernel thread for callbacks*/ 503 static int crypto_destroy(bool); 504 static int crypto_invoke(struct cryptop *crp, int hint); 505 static int crypto_kinvoke(struct cryptkop *krp, int hint); 506 507 static struct cryptocap *crypto_checkdriver_lock(u_int32_t); 508 static struct cryptocap *crypto_checkdriver_uninit(u_int32_t); 509 static struct cryptocap *crypto_checkdriver(u_int32_t); 510 static void crypto_driver_lock(struct cryptocap *); 511 static void crypto_driver_unlock(struct cryptocap *); 512 static void crypto_driver_clear(struct cryptocap *); 513 514 static int crypto_init_finalize(device_t); 515 516 static struct cryptostats cryptostats; 517 #ifdef CRYPTO_TIMING 518 static int crypto_timing = 0; 519 #endif 520 521 static void 522 crypto_crp_ret_qs_init(void) 523 { 524 int i; 525 526 crypto_crp_ret_qs_list = kmem_alloc(sizeof(struct crypto_crp_ret_qs *) * ncpu, 527 KM_SLEEP); 528 529 for (i = 0; i < ncpu; i++) { 530 struct crypto_crp_ret_qs *qs; 531 532 qs = kmem_alloc(sizeof(struct crypto_crp_ret_qs), KM_SLEEP); 533 mutex_init(&qs->crp_ret_q_mtx, MUTEX_DEFAULT, IPL_NET); 534 qs->crp_ret_q_exit_flag = false; 535 536 TAILQ_INIT(&qs->crp_ret_q); 537 qs->crp_ret_q_len = 0; 538 qs->crp_ret_q_maxlen = CRYPTO_RET_Q_MAXLEN; 539 qs->crp_ret_q_drops = 0; 540 541 TAILQ_INIT(&qs->crp_ret_kq); 542 qs->crp_ret_kq_len = 0; 543 qs->crp_ret_kq_maxlen = CRYPTO_RET_KQ_MAXLEN; 544 qs->crp_ret_kq_drops = 0; 545 546 crypto_crp_ret_qs_list[i] = qs; 547 } 548 } 549 550 static int 551 crypto_init0(void) 552 { 553 554 mutex_init(&crypto_drv_mtx, MUTEX_DEFAULT, IPL_NONE); 555 cryptop_cache = pool_cache_init(sizeof(struct cryptop), 556 coherency_unit, 0, 0, "cryptop", NULL, IPL_NET, NULL, NULL, NULL); 557 cryptodesc_cache = pool_cache_init(sizeof(struct cryptodesc), 558 coherency_unit, 0, 0, "cryptdesc", NULL, IPL_NET, NULL, NULL, NULL); 559 cryptkop_cache = pool_cache_init(sizeof(struct cryptkop), 560 coherency_unit, 0, 0, "cryptkop", NULL, IPL_NET, NULL, NULL, NULL); 561 562 crypto_crp_qs_percpu = percpu_create(sizeof(struct crypto_crp_qs), 563 crypto_crp_qs_init_pc, /*XXX*/NULL, NULL); 564 565 crypto_crp_ret_qs_init(); 566 567 crypto_drivers = kmem_zalloc(CRYPTO_DRIVERS_INITIAL * 568 sizeof(struct cryptocap), KM_SLEEP); 569 crypto_drivers_num = CRYPTO_DRIVERS_INITIAL; 570 571 crypto_q_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE, cryptointr, NULL); 572 if (crypto_q_si == NULL) { 573 printf("crypto_init: cannot establish request queue handler\n"); 574 return crypto_destroy(false); 575 } 576 577 /* 578 * Some encryption devices (such as mvcesa) are attached before 579 * ipi_sysinit(). That causes an assertion in ipi_register() as 580 * crypto_ret_si softint uses SOFTINT_RCPU. 581 */ 582 if (config_finalize_register(NULL, crypto_init_finalize) != 0) { 583 printf("crypto_init: cannot register crypto_init_finalize\n"); 584 return crypto_destroy(false); 585 } 586 587 return 0; 588 } 589 590 static int 591 crypto_init_finalize(device_t self __unused) 592 { 593 594 crypto_ret_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE|SOFTINT_RCPU, 595 &cryptoret_softint, NULL); 596 KASSERT(crypto_ret_si != NULL); 597 598 return 0; 599 } 600 601 int 602 crypto_init(void) 603 { 604 static ONCE_DECL(crypto_init_once); 605 606 return RUN_ONCE(&crypto_init_once, crypto_init0); 607 } 608 609 static int 610 crypto_destroy(bool exit_kthread) 611 { 612 int i; 613 614 if (exit_kthread) { 615 struct cryptocap *cap = NULL; 616 bool is_busy = false; 617 618 /* if we have any in-progress requests, don't unload */ 619 percpu_foreach(crypto_crp_qs_percpu, crypto_crp_q_is_busy_pc, 620 &is_busy); 621 if (is_busy) 622 return EBUSY; 623 /* FIXME: 624 * prohibit enqueue to crp_q and crp_kq after here. 625 */ 626 627 mutex_enter(&crypto_drv_mtx); 628 for (i = 0; i < crypto_drivers_num; i++) { 629 cap = crypto_checkdriver(i); 630 if (cap == NULL) 631 continue; 632 if (cap->cc_sessions != 0) { 633 mutex_exit(&crypto_drv_mtx); 634 return EBUSY; 635 } 636 } 637 mutex_exit(&crypto_drv_mtx); 638 /* FIXME: 639 * prohibit touch crypto_drivers[] and each element after here. 640 */ 641 642 /* Ensure cryptoret_softint() is never scheduled again. */ 643 for (i = 0; i < ncpu; i++) { 644 struct crypto_crp_ret_qs *qs; 645 struct cpu_info *ci = cpu_lookup(i); 646 647 qs = crypto_get_crp_ret_qs(ci); 648 qs->crp_ret_q_exit_flag = true; 649 crypto_put_crp_ret_qs(ci); 650 } 651 } 652 653 if (crypto_ret_si != NULL) 654 softint_disestablish(crypto_ret_si); 655 656 if (crypto_q_si != NULL) 657 softint_disestablish(crypto_q_si); 658 659 mutex_enter(&crypto_drv_mtx); 660 if (crypto_drivers != NULL) 661 kmem_free(crypto_drivers, 662 crypto_drivers_num * sizeof(struct cryptocap)); 663 mutex_exit(&crypto_drv_mtx); 664 665 percpu_free(crypto_crp_qs_percpu, sizeof(struct crypto_crp_qs)); 666 667 pool_cache_destroy(cryptop_cache); 668 pool_cache_destroy(cryptodesc_cache); 669 pool_cache_destroy(cryptkop_cache); 670 671 mutex_destroy(&crypto_drv_mtx); 672 673 return 0; 674 } 675 676 static bool 677 crypto_driver_suitable(struct cryptocap *cap, struct cryptoini *cri) 678 { 679 struct cryptoini *cr; 680 681 for (cr = cri; cr; cr = cr->cri_next) 682 if (cap->cc_alg[cr->cri_alg] == 0) { 683 DPRINTF("alg %d not supported\n", cr->cri_alg); 684 return false; 685 } 686 687 return true; 688 } 689 690 #define CRYPTO_ACCEPT_HARDWARE 0x1 691 #define CRYPTO_ACCEPT_SOFTWARE 0x2 692 /* 693 * The algorithm we use here is pretty stupid; just use the 694 * first driver that supports all the algorithms we need. 695 * If there are multiple drivers we choose the driver with 696 * the fewest active sessions. We prefer hardware-backed 697 * drivers to software ones. 698 * 699 * XXX We need more smarts here (in real life too, but that's 700 * XXX another story altogether). 701 */ 702 static struct cryptocap * 703 crypto_select_driver_lock(struct cryptoini *cri, int hard) 704 { 705 u_int32_t hid; 706 int accept; 707 struct cryptocap *cap, *best; 708 int error = 0; 709 710 best = NULL; 711 /* 712 * hard == 0 can use both hardware and software drivers. 713 * We use hardware drivers prior to software drivers, so search 714 * hardware drivers at first time. 715 */ 716 if (hard >= 0) 717 accept = CRYPTO_ACCEPT_HARDWARE; 718 else 719 accept = CRYPTO_ACCEPT_SOFTWARE; 720 again: 721 for (hid = 0; hid < crypto_drivers_num; hid++) { 722 cap = crypto_checkdriver(hid); 723 if (cap == NULL) 724 continue; 725 726 crypto_driver_lock(cap); 727 728 /* 729 * If it's not initialized or has remaining sessions 730 * referencing it, skip. 731 */ 732 if (cap->cc_newsession == NULL || 733 (cap->cc_flags & CRYPTOCAP_F_CLEANUP)) { 734 crypto_driver_unlock(cap); 735 continue; 736 } 737 738 /* Hardware required -- ignore software drivers. */ 739 if ((accept & CRYPTO_ACCEPT_SOFTWARE) == 0 740 && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE)) { 741 crypto_driver_unlock(cap); 742 continue; 743 } 744 /* Software required -- ignore hardware drivers. */ 745 if ((accept & CRYPTO_ACCEPT_HARDWARE) == 0 746 && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE) == 0) { 747 crypto_driver_unlock(cap); 748 continue; 749 } 750 751 /* See if all the algorithms are supported. */ 752 if (crypto_driver_suitable(cap, cri)) { 753 if (best == NULL) { 754 /* keep holding crypto_driver_lock(cap) */ 755 best = cap; 756 continue; 757 } else if (cap->cc_sessions < best->cc_sessions) { 758 crypto_driver_unlock(best); 759 /* keep holding crypto_driver_lock(cap) */ 760 best = cap; 761 continue; 762 } 763 } 764 765 crypto_driver_unlock(cap); 766 } 767 if (best == NULL && hard == 0 768 && (accept & CRYPTO_ACCEPT_SOFTWARE) == 0) { 769 accept = CRYPTO_ACCEPT_SOFTWARE; 770 goto again; 771 } 772 773 if (best == NULL && hard == 0 && error == 0) { 774 mutex_exit(&crypto_drv_mtx); 775 error = module_autoload("swcrypto", MODULE_CLASS_DRIVER); 776 mutex_enter(&crypto_drv_mtx); 777 if (error == 0) { 778 error = EINVAL; 779 goto again; 780 } 781 } 782 783 return best; 784 } 785 786 /* 787 * Create a new session. 788 */ 789 int 790 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard) 791 { 792 struct cryptocap *cap; 793 int err = EINVAL; 794 795 mutex_enter(&crypto_drv_mtx); 796 797 cap = crypto_select_driver_lock(cri, hard); 798 if (cap != NULL) { 799 u_int32_t hid, lid; 800 801 hid = cap - crypto_drivers; 802 /* 803 * Can't do everything in one session. 804 * 805 * XXX Fix this. We need to inject a "virtual" session layer right 806 * XXX about here. 807 */ 808 809 /* Call the driver initialization routine. */ 810 lid = hid; /* Pass the driver ID. */ 811 crypto_driver_unlock(cap); 812 err = cap->cc_newsession(cap->cc_arg, &lid, cri); 813 crypto_driver_lock(cap); 814 if (err == 0) { 815 (*sid) = hid; 816 (*sid) <<= 32; 817 (*sid) |= (lid & 0xffffffff); 818 (cap->cc_sessions)++; 819 } else { 820 DPRINTF("crypto_drivers[%d].cc_newsession() failed. error=%d\n", 821 hid, err); 822 } 823 crypto_driver_unlock(cap); 824 } 825 826 mutex_exit(&crypto_drv_mtx); 827 828 return err; 829 } 830 831 /* 832 * Delete an existing session (or a reserved session on an unregistered 833 * driver). 834 */ 835 int 836 crypto_freesession(u_int64_t sid) 837 { 838 struct cryptocap *cap; 839 int err = 0; 840 841 /* Determine two IDs. */ 842 cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(sid)); 843 if (cap == NULL) 844 return ENOENT; 845 846 if (cap->cc_sessions) 847 (cap->cc_sessions)--; 848 849 /* Call the driver cleanup routine, if available. */ 850 if (cap->cc_freesession) 851 err = cap->cc_freesession(cap->cc_arg, sid); 852 else 853 err = 0; 854 855 /* 856 * If this was the last session of a driver marked as invalid, 857 * make the entry available for reuse. 858 */ 859 if ((cap->cc_flags & CRYPTOCAP_F_CLEANUP) && cap->cc_sessions == 0) 860 crypto_driver_clear(cap); 861 862 crypto_driver_unlock(cap); 863 return err; 864 } 865 866 static bool 867 crypto_checkdriver_initialized(const struct cryptocap *cap) 868 { 869 870 return cap->cc_process != NULL || 871 (cap->cc_flags & CRYPTOCAP_F_CLEANUP) != 0 || 872 cap->cc_sessions != 0; 873 } 874 875 /* 876 * Return an unused driver id. Used by drivers prior to registering 877 * support for the algorithms they handle. 878 */ 879 int32_t 880 crypto_get_driverid(u_int32_t flags) 881 { 882 struct cryptocap *newdrv; 883 struct cryptocap *cap = NULL; 884 int i; 885 886 (void)crypto_init(); /* XXX oh, this is foul! */ 887 888 mutex_enter(&crypto_drv_mtx); 889 for (i = 0; i < crypto_drivers_num; i++) { 890 cap = crypto_checkdriver_uninit(i); 891 if (cap == NULL || crypto_checkdriver_initialized(cap)) 892 continue; 893 break; 894 } 895 896 /* Out of entries, allocate some more. */ 897 if (cap == NULL) { 898 /* Be careful about wrap-around. */ 899 if (2 * crypto_drivers_num <= crypto_drivers_num) { 900 mutex_exit(&crypto_drv_mtx); 901 printf("crypto: driver count wraparound!\n"); 902 return -1; 903 } 904 905 newdrv = kmem_zalloc(2 * crypto_drivers_num * 906 sizeof(struct cryptocap), KM_SLEEP); 907 memcpy(newdrv, crypto_drivers, 908 crypto_drivers_num * sizeof(struct cryptocap)); 909 kmem_free(crypto_drivers, 910 crypto_drivers_num * sizeof(struct cryptocap)); 911 912 crypto_drivers_num *= 2; 913 crypto_drivers = newdrv; 914 915 cap = crypto_checkdriver_uninit(i); 916 KASSERT(cap != NULL); 917 } 918 919 /* NB: state is zero'd on free */ 920 cap->cc_sessions = 1; /* Mark */ 921 cap->cc_flags = flags; 922 mutex_init(&cap->cc_lock, MUTEX_DEFAULT, IPL_NET); 923 924 if (bootverbose) 925 printf("crypto: assign driver %u, flags %u\n", i, flags); 926 927 mutex_exit(&crypto_drv_mtx); 928 929 return i; 930 } 931 932 static struct cryptocap * 933 crypto_checkdriver_lock(u_int32_t hid) 934 { 935 struct cryptocap *cap; 936 937 KASSERT(crypto_drivers != NULL); 938 939 if (hid >= crypto_drivers_num) 940 return NULL; 941 942 cap = &crypto_drivers[hid]; 943 mutex_enter(&cap->cc_lock); 944 return cap; 945 } 946 947 /* 948 * Use crypto_checkdriver_uninit() instead of crypto_checkdriver() below two 949 * situations 950 * - crypto_drivers[] may not be allocated 951 * - crypto_drivers[hid] may not be initialized 952 */ 953 static struct cryptocap * 954 crypto_checkdriver_uninit(u_int32_t hid) 955 { 956 957 KASSERT(mutex_owned(&crypto_drv_mtx)); 958 959 if (crypto_drivers == NULL) 960 return NULL; 961 962 return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]); 963 } 964 965 /* 966 * Use crypto_checkdriver_uninit() instead of crypto_checkdriver() below two 967 * situations 968 * - crypto_drivers[] may not be allocated 969 * - crypto_drivers[hid] may not be initialized 970 */ 971 static struct cryptocap * 972 crypto_checkdriver(u_int32_t hid) 973 { 974 975 KASSERT(mutex_owned(&crypto_drv_mtx)); 976 977 if (crypto_drivers == NULL || hid >= crypto_drivers_num) 978 return NULL; 979 980 struct cryptocap *cap = &crypto_drivers[hid]; 981 return crypto_checkdriver_initialized(cap) ? cap : NULL; 982 } 983 984 static inline void 985 crypto_driver_lock(struct cryptocap *cap) 986 { 987 988 KASSERT(cap != NULL); 989 990 mutex_enter(&cap->cc_lock); 991 } 992 993 static inline void 994 crypto_driver_unlock(struct cryptocap *cap) 995 { 996 997 KASSERT(cap != NULL); 998 999 mutex_exit(&cap->cc_lock); 1000 } 1001 1002 static void 1003 crypto_driver_clear(struct cryptocap *cap) 1004 { 1005 1006 if (cap == NULL) 1007 return; 1008 1009 KASSERT(mutex_owned(&cap->cc_lock)); 1010 1011 cap->cc_sessions = 0; 1012 memset(&cap->cc_max_op_len, 0, sizeof(cap->cc_max_op_len)); 1013 memset(&cap->cc_alg, 0, sizeof(cap->cc_alg)); 1014 memset(&cap->cc_kalg, 0, sizeof(cap->cc_kalg)); 1015 cap->cc_flags = 0; 1016 cap->cc_qblocked = 0; 1017 cap->cc_kqblocked = 0; 1018 1019 cap->cc_arg = NULL; 1020 cap->cc_newsession = NULL; 1021 cap->cc_process = NULL; 1022 cap->cc_freesession = NULL; 1023 cap->cc_kprocess = NULL; 1024 } 1025 1026 /* 1027 * Register support for a key-related algorithm. This routine 1028 * is called once for each algorithm supported a driver. 1029 */ 1030 int 1031 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags, 1032 int (*kprocess)(void *, struct cryptkop *, int), 1033 void *karg) 1034 { 1035 struct cryptocap *cap; 1036 int err; 1037 1038 mutex_enter(&crypto_drv_mtx); 1039 1040 cap = crypto_checkdriver_lock(driverid); 1041 if (cap != NULL && 1042 (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) { 1043 /* 1044 * XXX Do some performance testing to determine placing. 1045 * XXX We probably need an auxiliary data structure that 1046 * XXX describes relative performances. 1047 */ 1048 1049 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; 1050 if (bootverbose) { 1051 printf("crypto: driver %u registers key alg %u " 1052 " flags %u\n", 1053 driverid, 1054 kalg, 1055 flags 1056 ); 1057 } 1058 1059 if (cap->cc_kprocess == NULL) { 1060 cap->cc_karg = karg; 1061 cap->cc_kprocess = kprocess; 1062 } 1063 err = 0; 1064 } else 1065 err = EINVAL; 1066 1067 mutex_exit(&crypto_drv_mtx); 1068 return err; 1069 } 1070 1071 /* 1072 * Register support for a non-key-related algorithm. This routine 1073 * is called once for each such algorithm supported by a driver. 1074 */ 1075 int 1076 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen, 1077 u_int32_t flags, 1078 int (*newses)(void *, u_int32_t*, struct cryptoini*), 1079 int (*freeses)(void *, u_int64_t), 1080 int (*process)(void *, struct cryptop *, int), 1081 void *arg) 1082 { 1083 struct cryptocap *cap; 1084 int err; 1085 1086 cap = crypto_checkdriver_lock(driverid); 1087 if (cap == NULL) 1088 return EINVAL; 1089 1090 /* NB: algorithms are in the range [1..max] */ 1091 if (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) { 1092 /* 1093 * XXX Do some performance testing to determine placing. 1094 * XXX We probably need an auxiliary data structure that 1095 * XXX describes relative performances. 1096 */ 1097 1098 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; 1099 cap->cc_max_op_len[alg] = maxoplen; 1100 if (bootverbose) { 1101 printf("crypto: driver %u registers alg %u " 1102 "flags %u maxoplen %u\n", 1103 driverid, 1104 alg, 1105 flags, 1106 maxoplen 1107 ); 1108 } 1109 1110 if (cap->cc_process == NULL) { 1111 cap->cc_arg = arg; 1112 cap->cc_newsession = newses; 1113 cap->cc_process = process; 1114 cap->cc_freesession = freeses; 1115 cap->cc_sessions = 0; /* Unmark */ 1116 } 1117 err = 0; 1118 } else 1119 err = EINVAL; 1120 1121 crypto_driver_unlock(cap); 1122 1123 return err; 1124 } 1125 1126 static int 1127 crypto_unregister_locked(struct cryptocap *cap, int alg, bool all) 1128 { 1129 int i; 1130 u_int32_t ses; 1131 bool lastalg = true; 1132 1133 KASSERT(cap != NULL); 1134 KASSERT(mutex_owned(&cap->cc_lock)); 1135 1136 if (alg < CRYPTO_ALGORITHM_MIN || CRYPTO_ALGORITHM_MAX < alg) 1137 return EINVAL; 1138 1139 if (!all && cap->cc_alg[alg] == 0) 1140 return EINVAL; 1141 1142 cap->cc_alg[alg] = 0; 1143 cap->cc_max_op_len[alg] = 0; 1144 1145 if (all) { 1146 if (alg != CRYPTO_ALGORITHM_MAX) 1147 lastalg = false; 1148 } else { 1149 /* Was this the last algorithm ? */ 1150 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) 1151 if (cap->cc_alg[i] != 0) { 1152 lastalg = false; 1153 break; 1154 } 1155 } 1156 if (lastalg) { 1157 ses = cap->cc_sessions; 1158 crypto_driver_clear(cap); 1159 if (ses != 0) { 1160 /* 1161 * If there are pending sessions, just mark as invalid. 1162 */ 1163 cap->cc_flags |= CRYPTOCAP_F_CLEANUP; 1164 cap->cc_sessions = ses; 1165 } 1166 } 1167 1168 return 0; 1169 } 1170 1171 /* 1172 * Unregister a crypto driver. If there are pending sessions using it, 1173 * leave enough information around so that subsequent calls using those 1174 * sessions will correctly detect the driver has been unregistered and 1175 * reroute requests. 1176 */ 1177 int 1178 crypto_unregister(u_int32_t driverid, int alg) 1179 { 1180 int err; 1181 struct cryptocap *cap; 1182 1183 cap = crypto_checkdriver_lock(driverid); 1184 err = crypto_unregister_locked(cap, alg, false); 1185 crypto_driver_unlock(cap); 1186 1187 return err; 1188 } 1189 1190 /* 1191 * Unregister all algorithms associated with a crypto driver. 1192 * If there are pending sessions using it, leave enough information 1193 * around so that subsequent calls using those sessions will 1194 * correctly detect the driver has been unregistered and reroute 1195 * requests. 1196 */ 1197 int 1198 crypto_unregister_all(u_int32_t driverid) 1199 { 1200 int err, i; 1201 struct cryptocap *cap; 1202 1203 cap = crypto_checkdriver_lock(driverid); 1204 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) { 1205 err = crypto_unregister_locked(cap, i, true); 1206 if (err) 1207 break; 1208 } 1209 crypto_driver_unlock(cap); 1210 1211 return err; 1212 } 1213 1214 /* 1215 * Clear blockage on a driver. The what parameter indicates whether 1216 * the driver is now ready for cryptop's and/or cryptokop's. 1217 */ 1218 int 1219 crypto_unblock(u_int32_t driverid, int what) 1220 { 1221 struct cryptocap *cap; 1222 int needwakeup = 0; 1223 1224 cap = crypto_checkdriver_lock(driverid); 1225 if (cap == NULL) 1226 return EINVAL; 1227 1228 if (what & CRYPTO_SYMQ) { 1229 needwakeup |= cap->cc_qblocked; 1230 cap->cc_qblocked = 0; 1231 } 1232 if (what & CRYPTO_ASYMQ) { 1233 needwakeup |= cap->cc_kqblocked; 1234 cap->cc_kqblocked = 0; 1235 } 1236 crypto_driver_unlock(cap); 1237 if (needwakeup) { 1238 kpreempt_disable(); 1239 softint_schedule(crypto_q_si); 1240 kpreempt_enable(); 1241 } 1242 1243 return 0; 1244 } 1245 1246 /* 1247 * Dispatch a crypto request to a driver or queue 1248 * it, to be processed by the kernel thread. 1249 */ 1250 int 1251 crypto_dispatch(struct cryptop *crp) 1252 { 1253 int result, s; 1254 struct cryptocap *cap; 1255 struct crypto_crp_qs *crp_qs; 1256 struct crypto_crp_q *crp_q; 1257 1258 KASSERT(crp != NULL); 1259 1260 DPRINTF("crp %p, alg %d\n", crp, crp->crp_desc->crd_alg); 1261 1262 cryptostats.cs_ops++; 1263 1264 #ifdef CRYPTO_TIMING 1265 if (crypto_timing) 1266 nanouptime(&crp->crp_tstamp); 1267 #endif 1268 1269 if ((crp->crp_flags & CRYPTO_F_BATCH) != 0) { 1270 int wasempty; 1271 /* 1272 * Caller marked the request as ``ok to delay''; 1273 * queue it for the swi thread. This is desirable 1274 * when the operation is low priority and/or suitable 1275 * for batching. 1276 * 1277 * don't care list order in batch job. 1278 */ 1279 crp_qs = crypto_get_crp_qs(&s); 1280 crp_q = crp_qs->crp_q; 1281 wasempty = TAILQ_EMPTY(crp_q); 1282 TAILQ_INSERT_TAIL(crp_q, crp, crp_next); 1283 crypto_put_crp_qs(&s); 1284 crp_q = NULL; 1285 if (wasempty) { 1286 kpreempt_disable(); 1287 softint_schedule(crypto_q_si); 1288 kpreempt_enable(); 1289 } 1290 1291 return 0; 1292 } 1293 1294 crp_qs = crypto_get_crp_qs(&s); 1295 crp_q = crp_qs->crp_q; 1296 cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(crp->crp_sid)); 1297 /* 1298 * TODO: 1299 * If we can ensure the driver has been valid until the driver is 1300 * done crypto_unregister(), this migrate operation is not required. 1301 */ 1302 if (cap == NULL) { 1303 /* 1304 * The driver must be detached, so this request will migrate 1305 * to other drivers in cryptointr() later. 1306 */ 1307 TAILQ_INSERT_TAIL(crp_q, crp, crp_next); 1308 result = 0; 1309 goto out; 1310 } 1311 1312 if (cap->cc_qblocked != 0) { 1313 crypto_driver_unlock(cap); 1314 /* 1315 * The driver is blocked, just queue the op until 1316 * it unblocks and the swi thread gets kicked. 1317 */ 1318 TAILQ_INSERT_TAIL(crp_q, crp, crp_next); 1319 result = 0; 1320 goto out; 1321 } 1322 1323 /* 1324 * Caller marked the request to be processed 1325 * immediately; dispatch it directly to the 1326 * driver unless the driver is currently blocked. 1327 */ 1328 crypto_driver_unlock(cap); 1329 result = crypto_invoke(crp, 0); 1330 if (result == ERESTART) { 1331 /* 1332 * The driver ran out of resources, mark the 1333 * driver ``blocked'' for cryptop's and put 1334 * the op on the queue. 1335 */ 1336 crypto_driver_lock(cap); 1337 cap->cc_qblocked = 1; 1338 crypto_driver_unlock(cap); 1339 TAILQ_INSERT_HEAD(crp_q, crp, crp_next); 1340 cryptostats.cs_blocks++; 1341 1342 /* 1343 * The crp is enqueued to crp_q, that is, 1344 * no error occurs. So, this function should 1345 * not return error. 1346 */ 1347 result = 0; 1348 } 1349 1350 out: 1351 crypto_put_crp_qs(&s); 1352 return result; 1353 } 1354 1355 /* 1356 * Add an asymetric crypto request to a queue, 1357 * to be processed by the kernel thread. 1358 */ 1359 int 1360 crypto_kdispatch(struct cryptkop *krp) 1361 { 1362 int result, s; 1363 struct cryptocap *cap; 1364 struct crypto_crp_qs *crp_qs; 1365 struct crypto_crp_kq *crp_kq; 1366 1367 KASSERT(krp != NULL); 1368 1369 cryptostats.cs_kops++; 1370 1371 crp_qs = crypto_get_crp_qs(&s); 1372 crp_kq = crp_qs->crp_kq; 1373 cap = crypto_checkdriver_lock(krp->krp_hid); 1374 /* 1375 * TODO: 1376 * If we can ensure the driver has been valid until the driver is 1377 * done crypto_unregister(), this migrate operation is not required. 1378 */ 1379 if (cap == NULL) { 1380 TAILQ_INSERT_TAIL(crp_kq, krp, krp_next); 1381 result = 0; 1382 goto out; 1383 } 1384 1385 if (cap->cc_kqblocked != 0) { 1386 crypto_driver_unlock(cap); 1387 /* 1388 * The driver is blocked, just queue the op until 1389 * it unblocks and the swi thread gets kicked. 1390 */ 1391 TAILQ_INSERT_TAIL(crp_kq, krp, krp_next); 1392 result = 0; 1393 goto out; 1394 } 1395 1396 crypto_driver_unlock(cap); 1397 result = crypto_kinvoke(krp, 0); 1398 if (result == ERESTART) { 1399 /* 1400 * The driver ran out of resources, mark the 1401 * driver ``blocked'' for cryptop's and put 1402 * the op on the queue. 1403 */ 1404 crypto_driver_lock(cap); 1405 cap->cc_kqblocked = 1; 1406 crypto_driver_unlock(cap); 1407 TAILQ_INSERT_HEAD(crp_kq, krp, krp_next); 1408 cryptostats.cs_kblocks++; 1409 1410 /* 1411 * The krp is enqueued to crp_kq, that is, 1412 * no error occurs. So, this function should 1413 * not return error. 1414 */ 1415 result = 0; 1416 } 1417 1418 out: 1419 crypto_put_crp_qs(&s); 1420 return result; 1421 } 1422 1423 /* 1424 * Dispatch an assymetric crypto request to the appropriate crypto devices. 1425 */ 1426 static int 1427 crypto_kinvoke(struct cryptkop *krp, int hint) 1428 { 1429 struct cryptocap *cap = NULL; 1430 u_int32_t hid; 1431 int error; 1432 1433 KASSERT(krp != NULL); 1434 1435 /* Sanity checks. */ 1436 if (krp->krp_callback == NULL) { 1437 cv_destroy(&krp->krp_cv); 1438 crypto_kfreereq(krp); 1439 return EINVAL; 1440 } 1441 1442 mutex_enter(&crypto_drv_mtx); 1443 for (hid = 0; hid < crypto_drivers_num; hid++) { 1444 cap = crypto_checkdriver(hid); 1445 if (cap == NULL) 1446 continue; 1447 crypto_driver_lock(cap); 1448 if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) && 1449 crypto_devallowsoft == 0) { 1450 crypto_driver_unlock(cap); 1451 continue; 1452 } 1453 if (cap->cc_kprocess == NULL) { 1454 crypto_driver_unlock(cap); 1455 continue; 1456 } 1457 if ((cap->cc_kalg[krp->krp_op] & 1458 CRYPTO_ALG_FLAG_SUPPORTED) == 0) { 1459 crypto_driver_unlock(cap); 1460 continue; 1461 } 1462 break; 1463 } 1464 mutex_exit(&crypto_drv_mtx); 1465 if (cap != NULL) { 1466 int (*process)(void *, struct cryptkop *, int); 1467 void *arg; 1468 1469 process = cap->cc_kprocess; 1470 arg = cap->cc_karg; 1471 krp->krp_hid = hid; 1472 krp->reqcpu = curcpu(); 1473 crypto_driver_unlock(cap); 1474 error = (*process)(arg, krp, hint); 1475 } else { 1476 error = ENODEV; 1477 } 1478 1479 if (error) { 1480 krp->krp_status = error; 1481 crypto_kdone(krp); 1482 } 1483 return 0; 1484 } 1485 1486 #ifdef CRYPTO_TIMING 1487 static void 1488 crypto_tstat(struct cryptotstat *ts, struct timespec *tv) 1489 { 1490 struct timespec now, t; 1491 1492 nanouptime(&now); 1493 t.tv_sec = now.tv_sec - tv->tv_sec; 1494 t.tv_nsec = now.tv_nsec - tv->tv_nsec; 1495 if (t.tv_nsec < 0) { 1496 t.tv_sec--; 1497 t.tv_nsec += 1000000000; 1498 } 1499 timespecadd(&ts->acc, &t, &t); 1500 if (timespeccmp(&t, &ts->min, <)) 1501 ts->min = t; 1502 if (timespeccmp(&t, &ts->max, >)) 1503 ts->max = t; 1504 ts->count++; 1505 1506 *tv = now; 1507 } 1508 #endif 1509 1510 /* 1511 * Dispatch a crypto request to the appropriate crypto devices. 1512 */ 1513 static int 1514 crypto_invoke(struct cryptop *crp, int hint) 1515 { 1516 struct cryptocap *cap; 1517 1518 KASSERT(crp != NULL); 1519 1520 #ifdef CRYPTO_TIMING 1521 if (crypto_timing) 1522 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp); 1523 #endif 1524 /* Sanity checks. */ 1525 if (crp->crp_callback == NULL) { 1526 return EINVAL; 1527 } 1528 if (crp->crp_desc == NULL) { 1529 crp->crp_etype = EINVAL; 1530 crypto_done(crp); 1531 return 0; 1532 } 1533 1534 cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(crp->crp_sid)); 1535 if (cap != NULL && (cap->cc_flags & CRYPTOCAP_F_CLEANUP) == 0) { 1536 int (*process)(void *, struct cryptop *, int); 1537 void *arg; 1538 1539 process = cap->cc_process; 1540 arg = cap->cc_arg; 1541 crp->reqcpu = curcpu(); 1542 1543 /* 1544 * Invoke the driver to process the request. 1545 */ 1546 DPRINTF("calling process for %p\n", crp); 1547 crypto_driver_unlock(cap); 1548 return (*process)(arg, crp, hint); 1549 } else { 1550 struct cryptodesc *crd; 1551 u_int64_t nid = 0; 1552 1553 if (cap != NULL) 1554 crypto_driver_unlock(cap); 1555 1556 /* 1557 * Driver has unregistered; migrate the session and return 1558 * an error to the caller so they'll resubmit the op. 1559 */ 1560 crypto_freesession(crp->crp_sid); 1561 1562 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next) 1563 crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI); 1564 1565 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0) 1566 crp->crp_sid = nid; 1567 1568 crp->crp_etype = EAGAIN; 1569 1570 crypto_done(crp); 1571 return 0; 1572 } 1573 } 1574 1575 /* 1576 * Release a set of crypto descriptors. 1577 */ 1578 void 1579 crypto_freereq(struct cryptop *crp) 1580 { 1581 struct cryptodesc *crd; 1582 1583 if (crp == NULL) 1584 return; 1585 DPRINTF("lid[%u]: crp %p\n", CRYPTO_SESID2LID(crp->crp_sid), crp); 1586 1587 /* sanity check */ 1588 if (crp->crp_flags & CRYPTO_F_ONRETQ) { 1589 panic("crypto_freereq() freeing crp on RETQ\n"); 1590 } 1591 1592 while ((crd = crp->crp_desc) != NULL) { 1593 crp->crp_desc = crd->crd_next; 1594 pool_cache_put(cryptodesc_cache, crd); 1595 } 1596 pool_cache_put(cryptop_cache, crp); 1597 } 1598 1599 /* 1600 * Acquire a set of crypto descriptors. 1601 */ 1602 struct cryptop * 1603 crypto_getreq(int num) 1604 { 1605 struct cryptodesc *crd; 1606 struct cryptop *crp; 1607 struct crypto_crp_ret_qs *qs; 1608 1609 /* 1610 * When crp_ret_q is full, we restrict here to avoid crp_ret_q overflow 1611 * by error callback. 1612 */ 1613 qs = crypto_get_crp_ret_qs(curcpu()); 1614 if (qs->crp_ret_q_maxlen > 0 1615 && qs->crp_ret_q_len > qs->crp_ret_q_maxlen) { 1616 qs->crp_ret_q_drops++; 1617 crypto_put_crp_ret_qs(curcpu()); 1618 return NULL; 1619 } 1620 crypto_put_crp_ret_qs(curcpu()); 1621 1622 crp = pool_cache_get(cryptop_cache, PR_NOWAIT); 1623 if (crp == NULL) { 1624 return NULL; 1625 } 1626 memset(crp, 0, sizeof(struct cryptop)); 1627 1628 while (num--) { 1629 crd = pool_cache_get(cryptodesc_cache, PR_NOWAIT); 1630 if (crd == NULL) { 1631 crypto_freereq(crp); 1632 return NULL; 1633 } 1634 1635 memset(crd, 0, sizeof(struct cryptodesc)); 1636 crd->crd_next = crp->crp_desc; 1637 crp->crp_desc = crd; 1638 } 1639 1640 return crp; 1641 } 1642 1643 /* 1644 * Release a set of asymmetric crypto descriptors. 1645 * Currently, support one descriptor only. 1646 */ 1647 void 1648 crypto_kfreereq(struct cryptkop *krp) 1649 { 1650 1651 if (krp == NULL) 1652 return; 1653 1654 DPRINTF("krp %p\n", krp); 1655 1656 /* sanity check */ 1657 if (krp->krp_flags & CRYPTO_F_ONRETQ) { 1658 panic("crypto_kfreereq() freeing krp on RETQ\n"); 1659 } 1660 1661 pool_cache_put(cryptkop_cache, krp); 1662 } 1663 1664 /* 1665 * Acquire a set of asymmetric crypto descriptors. 1666 * Currently, support one descriptor only. 1667 */ 1668 struct cryptkop * 1669 crypto_kgetreq(int num __unused, int prflags) 1670 { 1671 struct cryptkop *krp; 1672 struct crypto_crp_ret_qs *qs; 1673 1674 /* 1675 * When crp_ret_kq is full, we restrict here to avoid crp_ret_kq 1676 * overflow by error callback. 1677 */ 1678 qs = crypto_get_crp_ret_qs(curcpu()); 1679 if (qs->crp_ret_kq_maxlen > 0 1680 && qs->crp_ret_kq_len > qs->crp_ret_kq_maxlen) { 1681 qs->crp_ret_kq_drops++; 1682 crypto_put_crp_ret_qs(curcpu()); 1683 return NULL; 1684 } 1685 crypto_put_crp_ret_qs(curcpu()); 1686 1687 krp = pool_cache_get(cryptkop_cache, prflags); 1688 if (krp == NULL) { 1689 return NULL; 1690 } 1691 memset(krp, 0, sizeof(struct cryptkop)); 1692 1693 return krp; 1694 } 1695 1696 /* 1697 * Invoke the callback on behalf of the driver. 1698 */ 1699 void 1700 crypto_done(struct cryptop *crp) 1701 { 1702 1703 KASSERT(crp != NULL); 1704 1705 if (crp->crp_etype != 0) 1706 cryptostats.cs_errs++; 1707 #ifdef CRYPTO_TIMING 1708 if (crypto_timing) 1709 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp); 1710 #endif 1711 DPRINTF("lid[%u]: crp %p\n", CRYPTO_SESID2LID(crp->crp_sid), crp); 1712 1713 /* 1714 * Normal case; queue the callback for the thread. 1715 * 1716 * The return queue is manipulated by the swi thread 1717 * and, potentially, by crypto device drivers calling 1718 * back to mark operations completed. Thus we need 1719 * to mask both while manipulating the return queue. 1720 */ 1721 if (crp->crp_flags & CRYPTO_F_CBIMM) { 1722 /* 1723 * Do the callback directly. This is ok when the 1724 * callback routine does very little (e.g. the 1725 * /dev/crypto callback method just does a wakeup). 1726 */ 1727 crp->crp_flags |= CRYPTO_F_DONE; 1728 1729 #ifdef CRYPTO_TIMING 1730 if (crypto_timing) { 1731 /* 1732 * NB: We must copy the timestamp before 1733 * doing the callback as the cryptop is 1734 * likely to be reclaimed. 1735 */ 1736 struct timespec t = crp->crp_tstamp; 1737 crypto_tstat(&cryptostats.cs_cb, &t); 1738 crp->crp_callback(crp); 1739 crypto_tstat(&cryptostats.cs_finis, &t); 1740 } else 1741 #endif 1742 crp->crp_callback(crp); 1743 } else { 1744 crp->crp_flags |= CRYPTO_F_DONE; 1745 #if 0 1746 if (crp->crp_flags & CRYPTO_F_USER) { 1747 /* 1748 * TODO: 1749 * If crp->crp_flags & CRYPTO_F_USER and the used 1750 * encryption driver does all the processing in 1751 * the same context, we can skip enqueueing crp_ret_q 1752 * and softint_schedule(crypto_ret_si). 1753 */ 1754 DPRINTF("lid[%u]: crp %p CRYPTO_F_USER\n", 1755 CRYPTO_SESID2LID(crp->crp_sid), crp); 1756 } else 1757 #endif 1758 { 1759 int wasempty; 1760 struct crypto_crp_ret_qs *qs; 1761 struct crypto_crp_ret_q *crp_ret_q; 1762 1763 qs = crypto_get_crp_ret_qs(crp->reqcpu); 1764 crp_ret_q = &qs->crp_ret_q; 1765 wasempty = TAILQ_EMPTY(crp_ret_q); 1766 DPRINTF("lid[%u]: queueing %p\n", 1767 CRYPTO_SESID2LID(crp->crp_sid), crp); 1768 crp->crp_flags |= CRYPTO_F_ONRETQ; 1769 TAILQ_INSERT_TAIL(crp_ret_q, crp, crp_next); 1770 qs->crp_ret_q_len++; 1771 if (wasempty && !qs->crp_ret_q_exit_flag) { 1772 DPRINTF("lid[%u]: waking cryptoret," 1773 "crp %p hit empty queue\n.", 1774 CRYPTO_SESID2LID(crp->crp_sid), crp); 1775 softint_schedule_cpu(crypto_ret_si, crp->reqcpu); 1776 } 1777 crypto_put_crp_ret_qs(crp->reqcpu); 1778 } 1779 } 1780 } 1781 1782 /* 1783 * Invoke the callback on behalf of the driver. 1784 */ 1785 void 1786 crypto_kdone(struct cryptkop *krp) 1787 { 1788 1789 KASSERT(krp != NULL); 1790 1791 if (krp->krp_status != 0) 1792 cryptostats.cs_kerrs++; 1793 1794 krp->krp_flags |= CRYPTO_F_DONE; 1795 1796 /* 1797 * The return queue is manipulated by the swi thread 1798 * and, potentially, by crypto device drivers calling 1799 * back to mark operations completed. Thus we need 1800 * to mask both while manipulating the return queue. 1801 */ 1802 if (krp->krp_flags & CRYPTO_F_CBIMM) { 1803 krp->krp_callback(krp); 1804 } else { 1805 int wasempty; 1806 struct crypto_crp_ret_qs *qs; 1807 struct crypto_crp_ret_kq *crp_ret_kq; 1808 1809 qs = crypto_get_crp_ret_qs(krp->reqcpu); 1810 crp_ret_kq = &qs->crp_ret_kq; 1811 1812 wasempty = TAILQ_EMPTY(crp_ret_kq); 1813 krp->krp_flags |= CRYPTO_F_ONRETQ; 1814 TAILQ_INSERT_TAIL(crp_ret_kq, krp, krp_next); 1815 qs->crp_ret_kq_len++; 1816 if (wasempty && !qs->crp_ret_q_exit_flag) 1817 softint_schedule_cpu(crypto_ret_si, krp->reqcpu); 1818 crypto_put_crp_ret_qs(krp->reqcpu); 1819 } 1820 } 1821 1822 int 1823 crypto_getfeat(int *featp) 1824 { 1825 1826 if (crypto_userasymcrypto == 0) { 1827 *featp = 0; 1828 return 0; 1829 } 1830 1831 mutex_enter(&crypto_drv_mtx); 1832 1833 int feat = 0; 1834 for (int hid = 0; hid < crypto_drivers_num; hid++) { 1835 struct cryptocap *cap; 1836 cap = crypto_checkdriver(hid); 1837 if (cap == NULL) 1838 continue; 1839 1840 crypto_driver_lock(cap); 1841 1842 if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) && 1843 crypto_devallowsoft == 0) 1844 goto unlock; 1845 1846 if (cap->cc_kprocess == NULL) 1847 goto unlock; 1848 1849 for (int kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++) 1850 if ((cap->cc_kalg[kalg] & 1851 CRYPTO_ALG_FLAG_SUPPORTED) != 0) 1852 feat |= 1 << kalg; 1853 1854 unlock: crypto_driver_unlock(cap); 1855 } 1856 1857 mutex_exit(&crypto_drv_mtx); 1858 *featp = feat; 1859 return (0); 1860 } 1861 1862 /* 1863 * Software interrupt thread to dispatch crypto requests. 1864 */ 1865 static void 1866 cryptointr(void *arg __unused) 1867 { 1868 struct cryptop *crp, *submit, *cnext; 1869 struct cryptkop *krp, *knext; 1870 struct cryptocap *cap; 1871 struct crypto_crp_qs *crp_qs; 1872 struct crypto_crp_q *crp_q; 1873 struct crypto_crp_kq *crp_kq; 1874 int result, hint, s; 1875 1876 cryptostats.cs_intrs++; 1877 crp_qs = crypto_get_crp_qs(&s); 1878 crp_q = crp_qs->crp_q; 1879 crp_kq = crp_qs->crp_kq; 1880 do { 1881 /* 1882 * Find the first element in the queue that can be 1883 * processed and look-ahead to see if multiple ops 1884 * are ready for the same driver. 1885 */ 1886 submit = NULL; 1887 hint = 0; 1888 TAILQ_FOREACH_SAFE(crp, crp_q, crp_next, cnext) { 1889 u_int32_t hid = CRYPTO_SESID2HID(crp->crp_sid); 1890 cap = crypto_checkdriver_lock(hid); 1891 if (cap == NULL || cap->cc_process == NULL) { 1892 if (cap != NULL) 1893 crypto_driver_unlock(cap); 1894 /* Op needs to be migrated, process it. */ 1895 submit = crp; 1896 break; 1897 } 1898 1899 /* 1900 * skip blocked crp regardless of CRYPTO_F_BATCH 1901 */ 1902 if (cap->cc_qblocked != 0) { 1903 crypto_driver_unlock(cap); 1904 continue; 1905 } 1906 crypto_driver_unlock(cap); 1907 1908 /* 1909 * skip batch crp until the end of crp_q 1910 */ 1911 if ((crp->crp_flags & CRYPTO_F_BATCH) != 0) { 1912 if (submit == NULL) { 1913 submit = crp; 1914 } else { 1915 if (CRYPTO_SESID2HID(submit->crp_sid) 1916 == hid) 1917 hint = CRYPTO_HINT_MORE; 1918 } 1919 1920 continue; 1921 } 1922 1923 /* 1924 * found first crp which is neither blocked nor batch. 1925 */ 1926 submit = crp; 1927 /* 1928 * batch crp can be processed much later, so clear hint. 1929 */ 1930 hint = 0; 1931 break; 1932 } 1933 if (submit != NULL) { 1934 TAILQ_REMOVE(crp_q, submit, crp_next); 1935 result = crypto_invoke(submit, hint); 1936 /* we must take here as the TAILQ op or kinvoke 1937 may need this mutex below. sigh. */ 1938 if (result == ERESTART) { 1939 /* 1940 * The driver ran out of resources, mark the 1941 * driver ``blocked'' for cryptop's and put 1942 * the request back in the queue. It would 1943 * best to put the request back where we got 1944 * it but that's hard so for now we put it 1945 * at the front. This should be ok; putting 1946 * it at the end does not work. 1947 */ 1948 /* validate sid again */ 1949 cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(submit->crp_sid)); 1950 if (cap == NULL) { 1951 /* migrate again, sigh... */ 1952 TAILQ_INSERT_TAIL(crp_q, submit, crp_next); 1953 } else { 1954 cap->cc_qblocked = 1; 1955 crypto_driver_unlock(cap); 1956 TAILQ_INSERT_HEAD(crp_q, submit, crp_next); 1957 cryptostats.cs_blocks++; 1958 } 1959 } 1960 } 1961 1962 /* As above, but for key ops */ 1963 TAILQ_FOREACH_SAFE(krp, crp_kq, krp_next, knext) { 1964 cap = crypto_checkdriver_lock(krp->krp_hid); 1965 if (cap == NULL || cap->cc_kprocess == NULL) { 1966 if (cap != NULL) 1967 crypto_driver_unlock(cap); 1968 /* Op needs to be migrated, process it. */ 1969 break; 1970 } 1971 if (!cap->cc_kqblocked) { 1972 crypto_driver_unlock(cap); 1973 break; 1974 } 1975 crypto_driver_unlock(cap); 1976 } 1977 if (krp != NULL) { 1978 TAILQ_REMOVE(crp_kq, krp, krp_next); 1979 result = crypto_kinvoke(krp, 0); 1980 /* the next iteration will want the mutex. :-/ */ 1981 if (result == ERESTART) { 1982 /* 1983 * The driver ran out of resources, mark the 1984 * driver ``blocked'' for cryptkop's and put 1985 * the request back in the queue. It would 1986 * best to put the request back where we got 1987 * it but that's hard so for now we put it 1988 * at the front. This should be ok; putting 1989 * it at the end does not work. 1990 */ 1991 /* validate sid again */ 1992 cap = crypto_checkdriver_lock(krp->krp_hid); 1993 if (cap == NULL) { 1994 /* migrate again, sigh... */ 1995 TAILQ_INSERT_TAIL(crp_kq, krp, krp_next); 1996 } else { 1997 cap->cc_kqblocked = 1; 1998 crypto_driver_unlock(cap); 1999 TAILQ_INSERT_HEAD(crp_kq, krp, krp_next); 2000 cryptostats.cs_kblocks++; 2001 } 2002 } 2003 } 2004 } while (submit != NULL || krp != NULL); 2005 crypto_put_crp_qs(&s); 2006 } 2007 2008 /* 2009 * softint handler to do callbacks. 2010 */ 2011 static void 2012 cryptoret_softint(void *arg __unused) 2013 { 2014 struct crypto_crp_ret_qs *qs; 2015 struct crypto_crp_ret_q *crp_ret_q; 2016 struct crypto_crp_ret_kq *crp_ret_kq; 2017 2018 qs = crypto_get_crp_ret_qs(curcpu()); 2019 crp_ret_q = &qs->crp_ret_q; 2020 crp_ret_kq = &qs->crp_ret_kq; 2021 for (;;) { 2022 struct cryptop *crp; 2023 struct cryptkop *krp; 2024 2025 crp = TAILQ_FIRST(crp_ret_q); 2026 if (crp != NULL) { 2027 TAILQ_REMOVE(crp_ret_q, crp, crp_next); 2028 qs->crp_ret_q_len--; 2029 crp->crp_flags &= ~CRYPTO_F_ONRETQ; 2030 } 2031 krp = TAILQ_FIRST(crp_ret_kq); 2032 if (krp != NULL) { 2033 TAILQ_REMOVE(crp_ret_kq, krp, krp_next); 2034 qs->crp_ret_q_len--; 2035 krp->krp_flags &= ~CRYPTO_F_ONRETQ; 2036 } 2037 2038 /* drop before calling any callbacks. */ 2039 if (crp == NULL && krp == NULL) 2040 break; 2041 2042 mutex_spin_exit(&qs->crp_ret_q_mtx); 2043 if (crp != NULL) { 2044 #ifdef CRYPTO_TIMING 2045 if (crypto_timing) { 2046 /* 2047 * NB: We must copy the timestamp before 2048 * doing the callback as the cryptop is 2049 * likely to be reclaimed. 2050 */ 2051 struct timespec t = crp->crp_tstamp; 2052 crypto_tstat(&cryptostats.cs_cb, &t); 2053 crp->crp_callback(crp); 2054 crypto_tstat(&cryptostats.cs_finis, &t); 2055 } else 2056 #endif 2057 { 2058 crp->crp_callback(crp); 2059 } 2060 } 2061 if (krp != NULL) 2062 krp->krp_callback(krp); 2063 2064 mutex_spin_enter(&qs->crp_ret_q_mtx); 2065 } 2066 crypto_put_crp_ret_qs(curcpu()); 2067 } 2068 2069 /* NetBSD module interface */ 2070 2071 MODULE(MODULE_CLASS_MISC, opencrypto, NULL); 2072 2073 static int 2074 opencrypto_modcmd(modcmd_t cmd, void *opaque) 2075 { 2076 int error = 0; 2077 2078 switch (cmd) { 2079 case MODULE_CMD_INIT: 2080 #ifdef _MODULE 2081 error = crypto_init(); 2082 #endif 2083 break; 2084 case MODULE_CMD_FINI: 2085 #ifdef _MODULE 2086 error = crypto_destroy(true); 2087 #endif 2088 break; 2089 default: 2090 error = ENOTTY; 2091 } 2092 return error; 2093 } 2094