1 /* $NetBSD: crypto.c,v 1.106 2018/06/06 01:49:09 maya Exp $ */ 2 /* $FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.5 2003/02/26 00:14:05 sam Exp $ */ 3 /* $OpenBSD: crypto.c,v 1.41 2002/07/17 23:52:38 art Exp $ */ 4 5 /*- 6 * Copyright (c) 2008 The NetBSD Foundation, Inc. 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to The NetBSD Foundation 10 * by Coyote Point Systems, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 36 * 37 * This code was written by Angelos D. Keromytis in Athens, Greece, in 38 * February 2000. Network Security Technologies Inc. (NSTI) kindly 39 * supported the development of this code. 40 * 41 * Copyright (c) 2000, 2001 Angelos D. Keromytis 42 * 43 * Permission to use, copy, and modify this software with or without fee 44 * is hereby granted, provided that this entire notice is included in 45 * all source code copies of any software which is or includes a copy or 46 * modification of this software. 47 * 48 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 49 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 50 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 51 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 52 * PURPOSE. 53 */ 54 55 #include <sys/cdefs.h> 56 __KERNEL_RCSID(0, "$NetBSD: crypto.c,v 1.106 2018/06/06 01:49:09 maya Exp $"); 57 58 #include <sys/param.h> 59 #include <sys/reboot.h> 60 #include <sys/systm.h> 61 #include <sys/proc.h> 62 #include <sys/pool.h> 63 #include <sys/kthread.h> 64 #include <sys/once.h> 65 #include <sys/sysctl.h> 66 #include <sys/intr.h> 67 #include <sys/errno.h> 68 #include <sys/module.h> 69 #include <sys/xcall.h> 70 #include <sys/device.h> 71 #include <sys/cpu.h> 72 #include <sys/percpu.h> 73 #include <sys/kmem.h> 74 75 #if defined(_KERNEL_OPT) 76 #include "opt_ocf.h" 77 #endif 78 79 #include <opencrypto/cryptodev.h> 80 #include <opencrypto/xform.h> /* XXX for M_XDATA */ 81 82 /* 83 * Crypto drivers register themselves by allocating a slot in the 84 * crypto_drivers table with crypto_get_driverid() and then registering 85 * each algorithm they support with crypto_register() and crypto_kregister(). 86 */ 87 /* Don't directly access crypto_drivers[i], use crypto_checkdriver(i). */ 88 static struct { 89 kmutex_t mtx; 90 int num; 91 struct cryptocap *list; 92 } crypto_drv __cacheline_aligned; 93 #define crypto_drv_mtx (crypto_drv.mtx) 94 #define crypto_drivers_num (crypto_drv.num) 95 #define crypto_drivers (crypto_drv.list) 96 97 static void *crypto_q_si; 98 static void *crypto_ret_si; 99 100 /* 101 * There are two queues for crypto requests; one for symmetric (e.g. 102 * cipher) operations and one for asymmetric (e.g. MOD) operations. 103 * See below for how synchronization is handled. 104 */ 105 TAILQ_HEAD(crypto_crp_q, cryptop); 106 TAILQ_HEAD(crypto_crp_kq, cryptkop); 107 struct crypto_crp_qs { 108 struct crypto_crp_q *crp_q; 109 struct crypto_crp_kq *crp_kq; 110 }; 111 static percpu_t *crypto_crp_qs_percpu; 112 113 static inline struct crypto_crp_qs * 114 crypto_get_crp_qs(int *s) 115 { 116 117 KASSERT(s != NULL); 118 119 *s = splsoftnet(); 120 return percpu_getref(crypto_crp_qs_percpu); 121 } 122 123 static inline void 124 crypto_put_crp_qs(int *s) 125 { 126 127 KASSERT(s != NULL); 128 129 percpu_putref(crypto_crp_qs_percpu); 130 splx(*s); 131 } 132 133 static void 134 crypto_crp_q_is_busy_pc(void *p, void *arg, struct cpu_info *ci __unused) 135 { 136 struct crypto_crp_qs *qs_pc = p; 137 bool *isempty = arg; 138 139 if (!TAILQ_EMPTY(qs_pc->crp_q) || !TAILQ_EMPTY(qs_pc->crp_kq)) 140 *isempty = true; 141 } 142 143 static void 144 crypto_crp_qs_init_pc(void *p, void *arg __unused, struct cpu_info *ci __unused) 145 { 146 struct crypto_crp_qs *qs = p; 147 148 qs->crp_q = kmem_alloc(sizeof(struct crypto_crp_q), KM_SLEEP); 149 qs->crp_kq = kmem_alloc(sizeof(struct crypto_crp_kq), KM_SLEEP); 150 151 TAILQ_INIT(qs->crp_q); 152 TAILQ_INIT(qs->crp_kq); 153 } 154 155 /* 156 * There are two queues for processing completed crypto requests; one 157 * for the symmetric and one for the asymmetric ops. We only need one 158 * but have two to avoid type futzing (cryptop vs. cryptkop). See below 159 * for how synchronization is handled. 160 */ 161 TAILQ_HEAD(crypto_crp_ret_q, cryptop); 162 TAILQ_HEAD(crypto_crp_ret_kq, cryptkop); 163 struct crypto_crp_ret_qs { 164 kmutex_t crp_ret_q_mtx; 165 bool crp_ret_q_exit_flag; 166 167 struct crypto_crp_ret_q crp_ret_q; 168 int crp_ret_q_len; 169 int crp_ret_q_maxlen; /* queue length limit. <=0 means unlimited. */ 170 int crp_ret_q_drops; 171 172 struct crypto_crp_ret_kq crp_ret_kq; 173 int crp_ret_kq_len; 174 int crp_ret_kq_maxlen; /* queue length limit. <=0 means unlimited. */ 175 int crp_ret_kq_drops; 176 }; 177 struct crypto_crp_ret_qs **crypto_crp_ret_qs_list; 178 179 180 static inline struct crypto_crp_ret_qs * 181 crypto_get_crp_ret_qs(struct cpu_info *ci) 182 { 183 u_int cpuid; 184 struct crypto_crp_ret_qs *qs; 185 186 KASSERT(ci != NULL); 187 188 cpuid = cpu_index(ci); 189 qs = crypto_crp_ret_qs_list[cpuid]; 190 mutex_enter(&qs->crp_ret_q_mtx); 191 return qs; 192 } 193 194 static inline void 195 crypto_put_crp_ret_qs(struct cpu_info *ci) 196 { 197 u_int cpuid; 198 struct crypto_crp_ret_qs *qs; 199 200 KASSERT(ci != NULL); 201 202 cpuid = cpu_index(ci); 203 qs = crypto_crp_ret_qs_list[cpuid]; 204 mutex_exit(&qs->crp_ret_q_mtx); 205 } 206 207 #ifndef CRYPTO_RET_Q_MAXLEN 208 #define CRYPTO_RET_Q_MAXLEN 0 209 #endif 210 #ifndef CRYPTO_RET_KQ_MAXLEN 211 #define CRYPTO_RET_KQ_MAXLEN 0 212 #endif 213 214 static int 215 sysctl_opencrypto_q_len(SYSCTLFN_ARGS) 216 { 217 int error, len = 0; 218 struct sysctlnode node = *rnode; 219 220 for (int i = 0; i < ncpu; i++) { 221 struct crypto_crp_ret_qs *qs; 222 struct cpu_info *ci = cpu_lookup(i); 223 224 qs = crypto_get_crp_ret_qs(ci); 225 len += qs->crp_ret_q_len; 226 crypto_put_crp_ret_qs(ci); 227 } 228 229 node.sysctl_data = &len; 230 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 231 if (error || newp == NULL) 232 return error; 233 234 return 0; 235 } 236 237 static int 238 sysctl_opencrypto_q_drops(SYSCTLFN_ARGS) 239 { 240 int error, drops = 0; 241 struct sysctlnode node = *rnode; 242 243 for (int i = 0; i < ncpu; i++) { 244 struct crypto_crp_ret_qs *qs; 245 struct cpu_info *ci = cpu_lookup(i); 246 247 qs = crypto_get_crp_ret_qs(ci); 248 drops += qs->crp_ret_q_drops; 249 crypto_put_crp_ret_qs(ci); 250 } 251 252 node.sysctl_data = &drops; 253 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 254 if (error || newp == NULL) 255 return error; 256 257 return 0; 258 } 259 260 static int 261 sysctl_opencrypto_q_maxlen(SYSCTLFN_ARGS) 262 { 263 int error, maxlen; 264 struct crypto_crp_ret_qs *qs; 265 struct sysctlnode node = *rnode; 266 267 /* each crp_ret_kq_maxlen is the same. */ 268 qs = crypto_get_crp_ret_qs(curcpu()); 269 maxlen = qs->crp_ret_q_maxlen; 270 crypto_put_crp_ret_qs(curcpu()); 271 272 node.sysctl_data = &maxlen; 273 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 274 if (error || newp == NULL) 275 return error; 276 277 for (int i = 0; i < ncpu; i++) { 278 struct cpu_info *ci = cpu_lookup(i); 279 280 qs = crypto_get_crp_ret_qs(ci); 281 qs->crp_ret_q_maxlen = maxlen; 282 crypto_put_crp_ret_qs(ci); 283 } 284 285 return 0; 286 } 287 288 static int 289 sysctl_opencrypto_kq_len(SYSCTLFN_ARGS) 290 { 291 int error, len = 0; 292 struct sysctlnode node = *rnode; 293 294 for (int i = 0; i < ncpu; i++) { 295 struct crypto_crp_ret_qs *qs; 296 struct cpu_info *ci = cpu_lookup(i); 297 298 qs = crypto_get_crp_ret_qs(ci); 299 len += qs->crp_ret_kq_len; 300 crypto_put_crp_ret_qs(ci); 301 } 302 303 node.sysctl_data = &len; 304 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 305 if (error || newp == NULL) 306 return error; 307 308 return 0; 309 } 310 311 static int 312 sysctl_opencrypto_kq_drops(SYSCTLFN_ARGS) 313 { 314 int error, drops = 0; 315 struct sysctlnode node = *rnode; 316 317 for (int i = 0; i < ncpu; i++) { 318 struct crypto_crp_ret_qs *qs; 319 struct cpu_info *ci = cpu_lookup(i); 320 321 qs = crypto_get_crp_ret_qs(ci); 322 drops += qs->crp_ret_kq_drops; 323 crypto_put_crp_ret_qs(ci); 324 } 325 326 node.sysctl_data = &drops; 327 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 328 if (error || newp == NULL) 329 return error; 330 331 return 0; 332 } 333 334 static int 335 sysctl_opencrypto_kq_maxlen(SYSCTLFN_ARGS) 336 { 337 int error, maxlen; 338 struct crypto_crp_ret_qs *qs; 339 struct sysctlnode node = *rnode; 340 341 /* each crp_ret_kq_maxlen is the same. */ 342 qs = crypto_get_crp_ret_qs(curcpu()); 343 maxlen = qs->crp_ret_kq_maxlen; 344 crypto_put_crp_ret_qs(curcpu()); 345 346 node.sysctl_data = &maxlen; 347 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 348 if (error || newp == NULL) 349 return error; 350 351 for (int i = 0; i < ncpu; i++) { 352 struct cpu_info *ci = cpu_lookup(i); 353 354 qs = crypto_get_crp_ret_qs(ci); 355 qs->crp_ret_kq_maxlen = maxlen; 356 crypto_put_crp_ret_qs(ci); 357 } 358 359 return 0; 360 } 361 362 /* 363 * Crypto op and descriptor data structures are allocated 364 * from separate private zones(FreeBSD)/pools(netBSD/OpenBSD) . 365 */ 366 static pool_cache_t cryptop_cache; 367 static pool_cache_t cryptodesc_cache; 368 static pool_cache_t cryptkop_cache; 369 370 int crypto_usercrypto = 1; /* userland may open /dev/crypto */ 371 int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */ 372 /* 373 * cryptodevallowsoft is (intended to be) sysctl'able, controlling 374 * access to hardware versus software transforms as below: 375 * 376 * crypto_devallowsoft < 0: Force userlevel requests to use software 377 * transforms, always 378 * crypto_devallowsoft = 0: Use hardware if present, grant userlevel 379 * requests for non-accelerated transforms 380 * (handling the latter in software) 381 * crypto_devallowsoft > 0: Allow user requests only for transforms which 382 * are hardware-accelerated. 383 */ 384 int crypto_devallowsoft = 1; /* only use hardware crypto */ 385 386 static void 387 sysctl_opencrypto_setup(struct sysctllog **clog) 388 { 389 const struct sysctlnode *ocnode; 390 const struct sysctlnode *retqnode, *retkqnode; 391 392 sysctl_createv(clog, 0, NULL, NULL, 393 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 394 CTLTYPE_INT, "usercrypto", 395 SYSCTL_DESCR("Enable/disable user-mode access to " 396 "crypto support"), 397 NULL, 0, &crypto_usercrypto, 0, 398 CTL_KERN, CTL_CREATE, CTL_EOL); 399 sysctl_createv(clog, 0, NULL, NULL, 400 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 401 CTLTYPE_INT, "userasymcrypto", 402 SYSCTL_DESCR("Enable/disable user-mode access to " 403 "asymmetric crypto support"), 404 NULL, 0, &crypto_userasymcrypto, 0, 405 CTL_KERN, CTL_CREATE, CTL_EOL); 406 sysctl_createv(clog, 0, NULL, NULL, 407 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 408 CTLTYPE_INT, "cryptodevallowsoft", 409 SYSCTL_DESCR("Enable/disable use of software " 410 "asymmetric crypto support"), 411 NULL, 0, &crypto_devallowsoft, 0, 412 CTL_KERN, CTL_CREATE, CTL_EOL); 413 414 sysctl_createv(clog, 0, NULL, &ocnode, 415 CTLFLAG_PERMANENT, 416 CTLTYPE_NODE, "opencrypto", 417 SYSCTL_DESCR("opencrypto related entries"), 418 NULL, 0, NULL, 0, 419 CTL_CREATE, CTL_EOL); 420 421 sysctl_createv(clog, 0, &ocnode, &retqnode, 422 CTLFLAG_PERMANENT, 423 CTLTYPE_NODE, "crypto_ret_q", 424 SYSCTL_DESCR("crypto_ret_q related entries"), 425 NULL, 0, NULL, 0, 426 CTL_CREATE, CTL_EOL); 427 sysctl_createv(clog, 0, &retqnode, NULL, 428 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 429 CTLTYPE_INT, "len", 430 SYSCTL_DESCR("Current queue length"), 431 sysctl_opencrypto_q_len, 0, 432 NULL, 0, 433 CTL_CREATE, CTL_EOL); 434 sysctl_createv(clog, 0, &retqnode, NULL, 435 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 436 CTLTYPE_INT, "drops", 437 SYSCTL_DESCR("Crypto requests dropped due to full ret queue"), 438 sysctl_opencrypto_q_drops, 0, 439 NULL, 0, 440 CTL_CREATE, CTL_EOL); 441 sysctl_createv(clog, 0, &retqnode, NULL, 442 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 443 CTLTYPE_INT, "maxlen", 444 SYSCTL_DESCR("Maximum allowed queue length"), 445 sysctl_opencrypto_q_maxlen, 0, 446 NULL, 0, 447 CTL_CREATE, CTL_EOL); 448 449 450 sysctl_createv(clog, 0, &ocnode, &retkqnode, 451 CTLFLAG_PERMANENT, 452 CTLTYPE_NODE, "crypto_ret_kq", 453 SYSCTL_DESCR("crypto_ret_kq related entries"), 454 NULL, 0, NULL, 0, 455 CTL_CREATE, CTL_EOL); 456 sysctl_createv(clog, 0, &retkqnode, NULL, 457 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 458 CTLTYPE_INT, "len", 459 SYSCTL_DESCR("Current queue length"), 460 sysctl_opencrypto_kq_len, 0, 461 NULL, 0, 462 CTL_CREATE, CTL_EOL); 463 sysctl_createv(clog, 0, &retkqnode, NULL, 464 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 465 CTLTYPE_INT, "drops", 466 SYSCTL_DESCR("Crypto requests dropped due to full ret queue"), 467 sysctl_opencrypto_kq_drops, 0, 468 NULL, 0, 469 CTL_CREATE, CTL_EOL); 470 sysctl_createv(clog, 0, &retkqnode, NULL, 471 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 472 CTLTYPE_INT, "maxlen", 473 SYSCTL_DESCR("Maximum allowed queue length"), 474 sysctl_opencrypto_kq_maxlen, 0, 475 NULL, 0, 476 CTL_CREATE, CTL_EOL); 477 } 478 479 /* 480 * Synchronization: read carefully, this is non-trivial. 481 * 482 * Crypto requests are submitted via crypto_dispatch. Typically 483 * these come in from network protocols at spl0 (output path) or 484 * spl[,soft]net (input path). 485 * 486 * Requests are typically passed on the driver directly, but they 487 * may also be queued for processing by a software interrupt thread, 488 * cryptointr, that runs at splsoftcrypto. This thread dispatches 489 * the requests to crypto drivers (h/w or s/w) who call crypto_done 490 * when a request is complete. Hardware crypto drivers are assumed 491 * to register their IRQ's as network devices so their interrupt handlers 492 * and subsequent "done callbacks" happen at spl[imp,net]. 493 * 494 * Completed crypto ops are queued for a separate kernel thread that 495 * handles the callbacks at spl0. This decoupling insures the crypto 496 * driver interrupt service routine is not delayed while the callback 497 * takes place and that callbacks are delivered after a context switch 498 * (as opposed to a software interrupt that clients must block). 499 * 500 * This scheme is not intended for SMP machines. 501 */ 502 static void cryptointr(void *); /* swi thread to dispatch ops */ 503 static void cryptoret_softint(void *); /* kernel thread for callbacks*/ 504 static int crypto_destroy(bool); 505 static int crypto_invoke(struct cryptop *crp, int hint); 506 static int crypto_kinvoke(struct cryptkop *krp, int hint); 507 508 static struct cryptocap *crypto_checkdriver_lock(u_int32_t); 509 static struct cryptocap *crypto_checkdriver_uninit(u_int32_t); 510 static struct cryptocap *crypto_checkdriver(u_int32_t); 511 static void crypto_driver_lock(struct cryptocap *); 512 static void crypto_driver_unlock(struct cryptocap *); 513 static void crypto_driver_clear(struct cryptocap *); 514 515 static int crypto_init_finalize(device_t); 516 517 static struct cryptostats cryptostats; 518 #ifdef CRYPTO_TIMING 519 static int crypto_timing = 0; 520 #endif 521 522 static struct sysctllog *sysctl_opencrypto_clog; 523 524 static int 525 crypto_crp_ret_qs_init(void) 526 { 527 int i, j; 528 529 crypto_crp_ret_qs_list = kmem_alloc(sizeof(struct crypto_crp_ret_qs *) * ncpu, 530 KM_NOSLEEP); 531 if (crypto_crp_ret_qs_list == NULL) { 532 printf("crypto_init: crypto_crp_qs_list\n"); 533 return ENOMEM; 534 } 535 536 for (i = 0; i < ncpu; i++) { 537 struct crypto_crp_ret_qs *qs; 538 qs = kmem_alloc(sizeof(struct crypto_crp_ret_qs), KM_NOSLEEP); 539 if (qs == NULL) 540 break; 541 542 mutex_init(&qs->crp_ret_q_mtx, MUTEX_DEFAULT, IPL_NET); 543 qs->crp_ret_q_exit_flag = false; 544 545 TAILQ_INIT(&qs->crp_ret_q); 546 qs->crp_ret_q_len = 0; 547 qs->crp_ret_q_maxlen = CRYPTO_RET_Q_MAXLEN; 548 qs->crp_ret_q_drops = 0; 549 550 TAILQ_INIT(&qs->crp_ret_kq); 551 qs->crp_ret_kq_len = 0; 552 qs->crp_ret_kq_maxlen = CRYPTO_RET_KQ_MAXLEN; 553 qs->crp_ret_kq_drops = 0; 554 555 crypto_crp_ret_qs_list[i] = qs; 556 } 557 if (i == ncpu) 558 return 0; 559 560 for (j = 0; j < i; j++) { 561 struct crypto_crp_ret_qs *qs = crypto_crp_ret_qs_list[j]; 562 563 mutex_destroy(&qs->crp_ret_q_mtx); 564 kmem_free(qs, sizeof(struct crypto_crp_ret_qs)); 565 } 566 kmem_free(crypto_crp_ret_qs_list, sizeof(struct crypto_crp_ret_qs *) * ncpu); 567 568 return ENOMEM; 569 } 570 571 static int 572 crypto_init0(void) 573 { 574 int error; 575 576 mutex_init(&crypto_drv_mtx, MUTEX_DEFAULT, IPL_NONE); 577 cryptop_cache = pool_cache_init(sizeof(struct cryptop), 578 coherency_unit, 0, 0, "cryptop", NULL, IPL_NET, NULL, NULL, NULL); 579 cryptodesc_cache = pool_cache_init(sizeof(struct cryptodesc), 580 coherency_unit, 0, 0, "cryptdesc", NULL, IPL_NET, NULL, NULL, NULL); 581 cryptkop_cache = pool_cache_init(sizeof(struct cryptkop), 582 coherency_unit, 0, 0, "cryptkop", NULL, IPL_NET, NULL, NULL, NULL); 583 584 crypto_crp_qs_percpu = percpu_alloc(sizeof(struct crypto_crp_qs)); 585 percpu_foreach(crypto_crp_qs_percpu, crypto_crp_qs_init_pc, NULL); 586 587 error = crypto_crp_ret_qs_init(); 588 if (error) { 589 printf("crypto_init: cannot malloc ret_q list\n"); 590 return ENOMEM; 591 } 592 593 crypto_drivers = kmem_zalloc(CRYPTO_DRIVERS_INITIAL * 594 sizeof(struct cryptocap), KM_NOSLEEP); 595 if (crypto_drivers == NULL) { 596 printf("crypto_init: cannot malloc driver table\n"); 597 return ENOMEM; 598 } 599 crypto_drivers_num = CRYPTO_DRIVERS_INITIAL; 600 601 crypto_q_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE, cryptointr, NULL); 602 if (crypto_q_si == NULL) { 603 printf("crypto_init: cannot establish request queue handler\n"); 604 return crypto_destroy(false); 605 } 606 607 /* 608 * Some encryption devices (such as mvcesa) are attached before 609 * ipi_sysinit(). That causes an assertion in ipi_register() as 610 * crypto_ret_si softint uses SOFTINT_RCPU. 611 */ 612 if (config_finalize_register(NULL, crypto_init_finalize) != 0) { 613 printf("crypto_init: cannot register crypto_init_finalize\n"); 614 return crypto_destroy(false); 615 } 616 617 sysctl_opencrypto_setup(&sysctl_opencrypto_clog); 618 619 return 0; 620 } 621 622 static int 623 crypto_init_finalize(device_t self __unused) 624 { 625 626 crypto_ret_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE|SOFTINT_RCPU, 627 &cryptoret_softint, NULL); 628 KASSERT(crypto_ret_si != NULL); 629 630 return 0; 631 } 632 633 int 634 crypto_init(void) 635 { 636 static ONCE_DECL(crypto_init_once); 637 638 return RUN_ONCE(&crypto_init_once, crypto_init0); 639 } 640 641 static int 642 crypto_destroy(bool exit_kthread) 643 { 644 int i; 645 646 if (exit_kthread) { 647 struct cryptocap *cap = NULL; 648 uint64_t where; 649 bool is_busy = false; 650 651 /* if we have any in-progress requests, don't unload */ 652 percpu_foreach(crypto_crp_qs_percpu, crypto_crp_q_is_busy_pc, 653 &is_busy); 654 if (is_busy) 655 return EBUSY; 656 /* FIXME: 657 * prohibit enqueue to crp_q and crp_kq after here. 658 */ 659 660 mutex_enter(&crypto_drv_mtx); 661 for (i = 0; i < crypto_drivers_num; i++) { 662 cap = crypto_checkdriver(i); 663 if (cap == NULL) 664 continue; 665 if (cap->cc_sessions != 0) { 666 mutex_exit(&crypto_drv_mtx); 667 return EBUSY; 668 } 669 } 670 mutex_exit(&crypto_drv_mtx); 671 /* FIXME: 672 * prohibit touch crypto_drivers[] and each element after here. 673 */ 674 675 /* 676 * Ensure cryptoret_softint() is never scheduled and then wait 677 * for last softint_execute(). 678 */ 679 for (i = 0; i < ncpu; i++) { 680 struct crypto_crp_ret_qs *qs; 681 struct cpu_info *ci = cpu_lookup(i); 682 683 qs = crypto_get_crp_ret_qs(ci); 684 qs->crp_ret_q_exit_flag = true; 685 crypto_put_crp_ret_qs(ci); 686 } 687 where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL); 688 xc_wait(where); 689 } 690 691 if (sysctl_opencrypto_clog != NULL) 692 sysctl_teardown(&sysctl_opencrypto_clog); 693 694 if (crypto_ret_si != NULL) 695 softint_disestablish(crypto_ret_si); 696 697 if (crypto_q_si != NULL) 698 softint_disestablish(crypto_q_si); 699 700 mutex_enter(&crypto_drv_mtx); 701 if (crypto_drivers != NULL) 702 kmem_free(crypto_drivers, 703 crypto_drivers_num * sizeof(struct cryptocap)); 704 mutex_exit(&crypto_drv_mtx); 705 706 percpu_free(crypto_crp_qs_percpu, sizeof(struct crypto_crp_qs)); 707 708 pool_cache_destroy(cryptop_cache); 709 pool_cache_destroy(cryptodesc_cache); 710 pool_cache_destroy(cryptkop_cache); 711 712 mutex_destroy(&crypto_drv_mtx); 713 714 return 0; 715 } 716 717 static bool 718 crypto_driver_suitable(struct cryptocap *cap, struct cryptoini *cri) 719 { 720 struct cryptoini *cr; 721 722 for (cr = cri; cr; cr = cr->cri_next) 723 if (cap->cc_alg[cr->cri_alg] == 0) { 724 DPRINTF("alg %d not supported\n", cr->cri_alg); 725 return false; 726 } 727 728 return true; 729 } 730 731 #define CRYPTO_ACCEPT_HARDWARE 0x1 732 #define CRYPTO_ACCEPT_SOFTWARE 0x2 733 /* 734 * The algorithm we use here is pretty stupid; just use the 735 * first driver that supports all the algorithms we need. 736 * If there are multiple drivers we choose the driver with 737 * the fewest active sessions. We prefer hardware-backed 738 * drivers to software ones. 739 * 740 * XXX We need more smarts here (in real life too, but that's 741 * XXX another story altogether). 742 */ 743 static struct cryptocap * 744 crypto_select_driver_lock(struct cryptoini *cri, int hard) 745 { 746 u_int32_t hid; 747 int accept; 748 struct cryptocap *cap, *best; 749 750 best = NULL; 751 /* 752 * hard == 0 can use both hardware and software drivers. 753 * We use hardware drivers prior to software drivers, so search 754 * hardware drivers at first time. 755 */ 756 if (hard >= 0) 757 accept = CRYPTO_ACCEPT_HARDWARE; 758 else 759 accept = CRYPTO_ACCEPT_SOFTWARE; 760 again: 761 for (hid = 0; hid < crypto_drivers_num; hid++) { 762 cap = crypto_checkdriver(hid); 763 if (cap == NULL) 764 continue; 765 766 crypto_driver_lock(cap); 767 768 /* 769 * If it's not initialized or has remaining sessions 770 * referencing it, skip. 771 */ 772 if (cap->cc_newsession == NULL || 773 (cap->cc_flags & CRYPTOCAP_F_CLEANUP)) { 774 crypto_driver_unlock(cap); 775 continue; 776 } 777 778 /* Hardware required -- ignore software drivers. */ 779 if ((accept & CRYPTO_ACCEPT_SOFTWARE) == 0 780 && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE)) { 781 crypto_driver_unlock(cap); 782 continue; 783 } 784 /* Software required -- ignore hardware drivers. */ 785 if ((accept & CRYPTO_ACCEPT_HARDWARE) == 0 786 && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE) == 0) { 787 crypto_driver_unlock(cap); 788 continue; 789 } 790 791 /* See if all the algorithms are supported. */ 792 if (crypto_driver_suitable(cap, cri)) { 793 if (best == NULL) { 794 /* keep holding crypto_driver_lock(cap) */ 795 best = cap; 796 continue; 797 } else if (cap->cc_sessions < best->cc_sessions) { 798 crypto_driver_unlock(best); 799 /* keep holding crypto_driver_lock(cap) */ 800 best = cap; 801 continue; 802 } 803 } 804 805 crypto_driver_unlock(cap); 806 } 807 if (best == NULL && hard == 0 808 && (accept & CRYPTO_ACCEPT_SOFTWARE) == 0) { 809 accept = CRYPTO_ACCEPT_SOFTWARE; 810 goto again; 811 } 812 813 return best; 814 } 815 816 /* 817 * Create a new session. 818 */ 819 int 820 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard) 821 { 822 struct cryptocap *cap; 823 int err = EINVAL; 824 825 mutex_enter(&crypto_drv_mtx); 826 827 cap = crypto_select_driver_lock(cri, hard); 828 if (cap != NULL) { 829 u_int32_t hid, lid; 830 831 hid = cap - crypto_drivers; 832 /* 833 * Can't do everything in one session. 834 * 835 * XXX Fix this. We need to inject a "virtual" session layer right 836 * XXX about here. 837 */ 838 839 /* Call the driver initialization routine. */ 840 lid = hid; /* Pass the driver ID. */ 841 crypto_driver_unlock(cap); 842 err = cap->cc_newsession(cap->cc_arg, &lid, cri); 843 crypto_driver_lock(cap); 844 if (err == 0) { 845 (*sid) = hid; 846 (*sid) <<= 32; 847 (*sid) |= (lid & 0xffffffff); 848 (cap->cc_sessions)++; 849 } else { 850 DPRINTF("crypto_drivers[%d].cc_newsession() failed. error=%d\n", 851 hid, err); 852 } 853 crypto_driver_unlock(cap); 854 } 855 856 mutex_exit(&crypto_drv_mtx); 857 858 return err; 859 } 860 861 /* 862 * Delete an existing session (or a reserved session on an unregistered 863 * driver). 864 */ 865 int 866 crypto_freesession(u_int64_t sid) 867 { 868 struct cryptocap *cap; 869 int err = 0; 870 871 /* Determine two IDs. */ 872 cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(sid)); 873 if (cap == NULL) 874 return ENOENT; 875 876 if (cap->cc_sessions) 877 (cap->cc_sessions)--; 878 879 /* Call the driver cleanup routine, if available. */ 880 if (cap->cc_freesession) 881 err = cap->cc_freesession(cap->cc_arg, sid); 882 else 883 err = 0; 884 885 /* 886 * If this was the last session of a driver marked as invalid, 887 * make the entry available for reuse. 888 */ 889 if ((cap->cc_flags & CRYPTOCAP_F_CLEANUP) && cap->cc_sessions == 0) 890 crypto_driver_clear(cap); 891 892 crypto_driver_unlock(cap); 893 return err; 894 } 895 896 static bool 897 crypto_checkdriver_initialized(const struct cryptocap *cap) 898 { 899 900 return cap->cc_process != NULL || 901 (cap->cc_flags & CRYPTOCAP_F_CLEANUP) != 0 || 902 cap->cc_sessions != 0; 903 } 904 905 /* 906 * Return an unused driver id. Used by drivers prior to registering 907 * support for the algorithms they handle. 908 */ 909 int32_t 910 crypto_get_driverid(u_int32_t flags) 911 { 912 struct cryptocap *newdrv; 913 struct cryptocap *cap = NULL; 914 int i; 915 916 (void)crypto_init(); /* XXX oh, this is foul! */ 917 918 mutex_enter(&crypto_drv_mtx); 919 for (i = 0; i < crypto_drivers_num; i++) { 920 cap = crypto_checkdriver_uninit(i); 921 if (cap == NULL || crypto_checkdriver_initialized(cap)) 922 continue; 923 break; 924 } 925 926 /* Out of entries, allocate some more. */ 927 if (cap == NULL) { 928 /* Be careful about wrap-around. */ 929 if (2 * crypto_drivers_num <= crypto_drivers_num) { 930 mutex_exit(&crypto_drv_mtx); 931 printf("crypto: driver count wraparound!\n"); 932 return -1; 933 } 934 935 newdrv = kmem_zalloc(2 * crypto_drivers_num * 936 sizeof(struct cryptocap), KM_NOSLEEP); 937 if (newdrv == NULL) { 938 mutex_exit(&crypto_drv_mtx); 939 printf("crypto: no space to expand driver table!\n"); 940 return -1; 941 } 942 943 memcpy(newdrv, crypto_drivers, 944 crypto_drivers_num * sizeof(struct cryptocap)); 945 kmem_free(crypto_drivers, 946 crypto_drivers_num * sizeof(struct cryptocap)); 947 948 crypto_drivers_num *= 2; 949 crypto_drivers = newdrv; 950 951 cap = crypto_checkdriver_uninit(i); 952 KASSERT(cap != NULL); 953 } 954 955 /* NB: state is zero'd on free */ 956 cap->cc_sessions = 1; /* Mark */ 957 cap->cc_flags = flags; 958 mutex_init(&cap->cc_lock, MUTEX_DEFAULT, IPL_NET); 959 960 if (bootverbose) 961 printf("crypto: assign driver %u, flags %u\n", i, flags); 962 963 mutex_exit(&crypto_drv_mtx); 964 965 return i; 966 } 967 968 static struct cryptocap * 969 crypto_checkdriver_lock(u_int32_t hid) 970 { 971 struct cryptocap *cap; 972 973 KASSERT(crypto_drivers != NULL); 974 975 if (hid >= crypto_drivers_num) 976 return NULL; 977 978 cap = &crypto_drivers[hid]; 979 mutex_enter(&cap->cc_lock); 980 return cap; 981 } 982 983 /* 984 * Use crypto_checkdriver_uninit() instead of crypto_checkdriver() below two 985 * situations 986 * - crypto_drivers[] may not be allocated 987 * - crypto_drivers[hid] may not be initialized 988 */ 989 static struct cryptocap * 990 crypto_checkdriver_uninit(u_int32_t hid) 991 { 992 993 KASSERT(mutex_owned(&crypto_drv_mtx)); 994 995 if (crypto_drivers == NULL) 996 return NULL; 997 998 return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]); 999 } 1000 1001 /* 1002 * Use crypto_checkdriver_uninit() instead of crypto_checkdriver() below two 1003 * situations 1004 * - crypto_drivers[] may not be allocated 1005 * - crypto_drivers[hid] may not be initialized 1006 */ 1007 static struct cryptocap * 1008 crypto_checkdriver(u_int32_t hid) 1009 { 1010 1011 KASSERT(mutex_owned(&crypto_drv_mtx)); 1012 1013 if (crypto_drivers == NULL || hid >= crypto_drivers_num) 1014 return NULL; 1015 1016 struct cryptocap *cap = &crypto_drivers[hid]; 1017 return crypto_checkdriver_initialized(cap) ? cap : NULL; 1018 } 1019 1020 static inline void 1021 crypto_driver_lock(struct cryptocap *cap) 1022 { 1023 1024 KASSERT(cap != NULL); 1025 1026 mutex_enter(&cap->cc_lock); 1027 } 1028 1029 static inline void 1030 crypto_driver_unlock(struct cryptocap *cap) 1031 { 1032 1033 KASSERT(cap != NULL); 1034 1035 mutex_exit(&cap->cc_lock); 1036 } 1037 1038 static void 1039 crypto_driver_clear(struct cryptocap *cap) 1040 { 1041 1042 if (cap == NULL) 1043 return; 1044 1045 KASSERT(mutex_owned(&cap->cc_lock)); 1046 1047 cap->cc_sessions = 0; 1048 memset(&cap->cc_max_op_len, 0, sizeof(cap->cc_max_op_len)); 1049 memset(&cap->cc_alg, 0, sizeof(cap->cc_alg)); 1050 memset(&cap->cc_kalg, 0, sizeof(cap->cc_kalg)); 1051 cap->cc_flags = 0; 1052 cap->cc_qblocked = 0; 1053 cap->cc_kqblocked = 0; 1054 1055 cap->cc_arg = NULL; 1056 cap->cc_newsession = NULL; 1057 cap->cc_process = NULL; 1058 cap->cc_freesession = NULL; 1059 cap->cc_kprocess = NULL; 1060 } 1061 1062 /* 1063 * Register support for a key-related algorithm. This routine 1064 * is called once for each algorithm supported a driver. 1065 */ 1066 int 1067 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags, 1068 int (*kprocess)(void *, struct cryptkop *, int), 1069 void *karg) 1070 { 1071 struct cryptocap *cap; 1072 int err; 1073 1074 mutex_enter(&crypto_drv_mtx); 1075 1076 cap = crypto_checkdriver_lock(driverid); 1077 if (cap != NULL && 1078 (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) { 1079 /* 1080 * XXX Do some performance testing to determine placing. 1081 * XXX We probably need an auxiliary data structure that 1082 * XXX describes relative performances. 1083 */ 1084 1085 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; 1086 if (bootverbose) { 1087 printf("crypto: driver %u registers key alg %u " 1088 " flags %u\n", 1089 driverid, 1090 kalg, 1091 flags 1092 ); 1093 } 1094 1095 if (cap->cc_kprocess == NULL) { 1096 cap->cc_karg = karg; 1097 cap->cc_kprocess = kprocess; 1098 } 1099 err = 0; 1100 } else 1101 err = EINVAL; 1102 1103 mutex_exit(&crypto_drv_mtx); 1104 return err; 1105 } 1106 1107 /* 1108 * Register support for a non-key-related algorithm. This routine 1109 * is called once for each such algorithm supported by a driver. 1110 */ 1111 int 1112 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen, 1113 u_int32_t flags, 1114 int (*newses)(void *, u_int32_t*, struct cryptoini*), 1115 int (*freeses)(void *, u_int64_t), 1116 int (*process)(void *, struct cryptop *, int), 1117 void *arg) 1118 { 1119 struct cryptocap *cap; 1120 int err; 1121 1122 cap = crypto_checkdriver_lock(driverid); 1123 if (cap == NULL) 1124 return EINVAL; 1125 1126 /* NB: algorithms are in the range [1..max] */ 1127 if (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) { 1128 /* 1129 * XXX Do some performance testing to determine placing. 1130 * XXX We probably need an auxiliary data structure that 1131 * XXX describes relative performances. 1132 */ 1133 1134 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; 1135 cap->cc_max_op_len[alg] = maxoplen; 1136 if (bootverbose) { 1137 printf("crypto: driver %u registers alg %u " 1138 "flags %u maxoplen %u\n", 1139 driverid, 1140 alg, 1141 flags, 1142 maxoplen 1143 ); 1144 } 1145 1146 if (cap->cc_process == NULL) { 1147 cap->cc_arg = arg; 1148 cap->cc_newsession = newses; 1149 cap->cc_process = process; 1150 cap->cc_freesession = freeses; 1151 cap->cc_sessions = 0; /* Unmark */ 1152 } 1153 err = 0; 1154 } else 1155 err = EINVAL; 1156 1157 crypto_driver_unlock(cap); 1158 1159 return err; 1160 } 1161 1162 static int 1163 crypto_unregister_locked(struct cryptocap *cap, int alg, bool all) 1164 { 1165 int i; 1166 u_int32_t ses; 1167 bool lastalg = true; 1168 1169 KASSERT(cap != NULL); 1170 KASSERT(mutex_owned(&cap->cc_lock)); 1171 1172 if (alg < CRYPTO_ALGORITHM_MIN || CRYPTO_ALGORITHM_MAX < alg) 1173 return EINVAL; 1174 1175 if (!all && cap->cc_alg[alg] == 0) 1176 return EINVAL; 1177 1178 cap->cc_alg[alg] = 0; 1179 cap->cc_max_op_len[alg] = 0; 1180 1181 if (all) { 1182 if (alg != CRYPTO_ALGORITHM_MAX) 1183 lastalg = false; 1184 } else { 1185 /* Was this the last algorithm ? */ 1186 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) 1187 if (cap->cc_alg[i] != 0) { 1188 lastalg = false; 1189 break; 1190 } 1191 } 1192 if (lastalg) { 1193 ses = cap->cc_sessions; 1194 crypto_driver_clear(cap); 1195 if (ses != 0) { 1196 /* 1197 * If there are pending sessions, just mark as invalid. 1198 */ 1199 cap->cc_flags |= CRYPTOCAP_F_CLEANUP; 1200 cap->cc_sessions = ses; 1201 } 1202 } 1203 1204 return 0; 1205 } 1206 1207 /* 1208 * Unregister a crypto driver. If there are pending sessions using it, 1209 * leave enough information around so that subsequent calls using those 1210 * sessions will correctly detect the driver has been unregistered and 1211 * reroute requests. 1212 */ 1213 int 1214 crypto_unregister(u_int32_t driverid, int alg) 1215 { 1216 int err; 1217 struct cryptocap *cap; 1218 1219 cap = crypto_checkdriver_lock(driverid); 1220 err = crypto_unregister_locked(cap, alg, false); 1221 crypto_driver_unlock(cap); 1222 1223 return err; 1224 } 1225 1226 /* 1227 * Unregister all algorithms associated with a crypto driver. 1228 * If there are pending sessions using it, leave enough information 1229 * around so that subsequent calls using those sessions will 1230 * correctly detect the driver has been unregistered and reroute 1231 * requests. 1232 */ 1233 int 1234 crypto_unregister_all(u_int32_t driverid) 1235 { 1236 int err, i; 1237 struct cryptocap *cap; 1238 1239 cap = crypto_checkdriver_lock(driverid); 1240 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) { 1241 err = crypto_unregister_locked(cap, i, true); 1242 if (err) 1243 break; 1244 } 1245 crypto_driver_unlock(cap); 1246 1247 return err; 1248 } 1249 1250 /* 1251 * Clear blockage on a driver. The what parameter indicates whether 1252 * the driver is now ready for cryptop's and/or cryptokop's. 1253 */ 1254 int 1255 crypto_unblock(u_int32_t driverid, int what) 1256 { 1257 struct cryptocap *cap; 1258 int needwakeup = 0; 1259 1260 cap = crypto_checkdriver_lock(driverid); 1261 if (cap == NULL) 1262 return EINVAL; 1263 1264 if (what & CRYPTO_SYMQ) { 1265 needwakeup |= cap->cc_qblocked; 1266 cap->cc_qblocked = 0; 1267 } 1268 if (what & CRYPTO_ASYMQ) { 1269 needwakeup |= cap->cc_kqblocked; 1270 cap->cc_kqblocked = 0; 1271 } 1272 crypto_driver_unlock(cap); 1273 if (needwakeup) { 1274 kpreempt_disable(); 1275 softint_schedule(crypto_q_si); 1276 kpreempt_enable(); 1277 } 1278 1279 return 0; 1280 } 1281 1282 /* 1283 * Dispatch a crypto request to a driver or queue 1284 * it, to be processed by the kernel thread. 1285 */ 1286 int 1287 crypto_dispatch(struct cryptop *crp) 1288 { 1289 int result, s; 1290 struct cryptocap *cap; 1291 struct crypto_crp_qs *crp_qs; 1292 struct crypto_crp_q *crp_q; 1293 1294 KASSERT(crp != NULL); 1295 1296 DPRINTF("crp %p, alg %d\n", crp, crp->crp_desc->crd_alg); 1297 1298 cryptostats.cs_ops++; 1299 1300 #ifdef CRYPTO_TIMING 1301 if (crypto_timing) 1302 nanouptime(&crp->crp_tstamp); 1303 #endif 1304 1305 if ((crp->crp_flags & CRYPTO_F_BATCH) != 0) { 1306 int wasempty; 1307 /* 1308 * Caller marked the request as ``ok to delay''; 1309 * queue it for the swi thread. This is desirable 1310 * when the operation is low priority and/or suitable 1311 * for batching. 1312 * 1313 * don't care list order in batch job. 1314 */ 1315 crp_qs = crypto_get_crp_qs(&s); 1316 crp_q = crp_qs->crp_q; 1317 wasempty = TAILQ_EMPTY(crp_q); 1318 TAILQ_INSERT_TAIL(crp_q, crp, crp_next); 1319 crypto_put_crp_qs(&s); 1320 crp_q = NULL; 1321 if (wasempty) { 1322 kpreempt_disable(); 1323 softint_schedule(crypto_q_si); 1324 kpreempt_enable(); 1325 } 1326 1327 return 0; 1328 } 1329 1330 crp_qs = crypto_get_crp_qs(&s); 1331 crp_q = crp_qs->crp_q; 1332 cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(crp->crp_sid)); 1333 /* 1334 * TODO: 1335 * If we can ensure the driver has been valid until the driver is 1336 * done crypto_unregister(), this migrate operation is not required. 1337 */ 1338 if (cap == NULL) { 1339 /* 1340 * The driver must be detached, so this request will migrate 1341 * to other drivers in cryptointr() later. 1342 */ 1343 TAILQ_INSERT_TAIL(crp_q, crp, crp_next); 1344 result = 0; 1345 goto out; 1346 } 1347 1348 if (cap->cc_qblocked != 0) { 1349 crypto_driver_unlock(cap); 1350 /* 1351 * The driver is blocked, just queue the op until 1352 * it unblocks and the swi thread gets kicked. 1353 */ 1354 TAILQ_INSERT_TAIL(crp_q, crp, crp_next); 1355 result = 0; 1356 goto out; 1357 } 1358 1359 /* 1360 * Caller marked the request to be processed 1361 * immediately; dispatch it directly to the 1362 * driver unless the driver is currently blocked. 1363 */ 1364 crypto_driver_unlock(cap); 1365 result = crypto_invoke(crp, 0); 1366 if (result == ERESTART) { 1367 /* 1368 * The driver ran out of resources, mark the 1369 * driver ``blocked'' for cryptop's and put 1370 * the op on the queue. 1371 */ 1372 crypto_driver_lock(cap); 1373 cap->cc_qblocked = 1; 1374 crypto_driver_unlock(cap); 1375 TAILQ_INSERT_HEAD(crp_q, crp, crp_next); 1376 cryptostats.cs_blocks++; 1377 1378 /* 1379 * The crp is enqueued to crp_q, that is, 1380 * no error occurs. So, this function should 1381 * not return error. 1382 */ 1383 result = 0; 1384 } 1385 1386 out: 1387 crypto_put_crp_qs(&s); 1388 return result; 1389 } 1390 1391 /* 1392 * Add an asymetric crypto request to a queue, 1393 * to be processed by the kernel thread. 1394 */ 1395 int 1396 crypto_kdispatch(struct cryptkop *krp) 1397 { 1398 int result, s; 1399 struct cryptocap *cap; 1400 struct crypto_crp_qs *crp_qs; 1401 struct crypto_crp_kq *crp_kq; 1402 1403 KASSERT(krp != NULL); 1404 1405 cryptostats.cs_kops++; 1406 1407 crp_qs = crypto_get_crp_qs(&s); 1408 crp_kq = crp_qs->crp_kq; 1409 cap = crypto_checkdriver_lock(krp->krp_hid); 1410 /* 1411 * TODO: 1412 * If we can ensure the driver has been valid until the driver is 1413 * done crypto_unregister(), this migrate operation is not required. 1414 */ 1415 if (cap == NULL) { 1416 TAILQ_INSERT_TAIL(crp_kq, krp, krp_next); 1417 result = 0; 1418 goto out; 1419 } 1420 1421 if (cap->cc_kqblocked != 0) { 1422 crypto_driver_unlock(cap); 1423 /* 1424 * The driver is blocked, just queue the op until 1425 * it unblocks and the swi thread gets kicked. 1426 */ 1427 TAILQ_INSERT_TAIL(crp_kq, krp, krp_next); 1428 result = 0; 1429 goto out; 1430 } 1431 1432 crypto_driver_unlock(cap); 1433 result = crypto_kinvoke(krp, 0); 1434 if (result == ERESTART) { 1435 /* 1436 * The driver ran out of resources, mark the 1437 * driver ``blocked'' for cryptop's and put 1438 * the op on the queue. 1439 */ 1440 crypto_driver_lock(cap); 1441 cap->cc_kqblocked = 1; 1442 crypto_driver_unlock(cap); 1443 TAILQ_INSERT_HEAD(crp_kq, krp, krp_next); 1444 cryptostats.cs_kblocks++; 1445 1446 /* 1447 * The krp is enqueued to crp_kq, that is, 1448 * no error occurs. So, this function should 1449 * not return error. 1450 */ 1451 result = 0; 1452 } 1453 1454 out: 1455 crypto_put_crp_qs(&s); 1456 return result; 1457 } 1458 1459 /* 1460 * Dispatch an assymetric crypto request to the appropriate crypto devices. 1461 */ 1462 static int 1463 crypto_kinvoke(struct cryptkop *krp, int hint) 1464 { 1465 struct cryptocap *cap = NULL; 1466 u_int32_t hid; 1467 int error; 1468 1469 KASSERT(krp != NULL); 1470 1471 /* Sanity checks. */ 1472 if (krp->krp_callback == NULL) { 1473 cv_destroy(&krp->krp_cv); 1474 crypto_kfreereq(krp); 1475 return EINVAL; 1476 } 1477 1478 mutex_enter(&crypto_drv_mtx); 1479 for (hid = 0; hid < crypto_drivers_num; hid++) { 1480 cap = crypto_checkdriver(hid); 1481 if (cap == NULL) 1482 continue; 1483 crypto_driver_lock(cap); 1484 if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) && 1485 crypto_devallowsoft == 0) { 1486 crypto_driver_unlock(cap); 1487 continue; 1488 } 1489 if (cap->cc_kprocess == NULL) { 1490 crypto_driver_unlock(cap); 1491 continue; 1492 } 1493 if ((cap->cc_kalg[krp->krp_op] & 1494 CRYPTO_ALG_FLAG_SUPPORTED) == 0) { 1495 crypto_driver_unlock(cap); 1496 continue; 1497 } 1498 break; 1499 } 1500 mutex_exit(&crypto_drv_mtx); 1501 if (cap != NULL) { 1502 int (*process)(void *, struct cryptkop *, int); 1503 void *arg; 1504 1505 process = cap->cc_kprocess; 1506 arg = cap->cc_karg; 1507 krp->krp_hid = hid; 1508 krp->reqcpu = curcpu(); 1509 crypto_driver_unlock(cap); 1510 error = (*process)(arg, krp, hint); 1511 } else { 1512 error = ENODEV; 1513 } 1514 1515 if (error) { 1516 krp->krp_status = error; 1517 crypto_kdone(krp); 1518 } 1519 return 0; 1520 } 1521 1522 #ifdef CRYPTO_TIMING 1523 static void 1524 crypto_tstat(struct cryptotstat *ts, struct timespec *tv) 1525 { 1526 struct timespec now, t; 1527 1528 nanouptime(&now); 1529 t.tv_sec = now.tv_sec - tv->tv_sec; 1530 t.tv_nsec = now.tv_nsec - tv->tv_nsec; 1531 if (t.tv_nsec < 0) { 1532 t.tv_sec--; 1533 t.tv_nsec += 1000000000; 1534 } 1535 timespecadd(&ts->acc, &t, &t); 1536 if (timespeccmp(&t, &ts->min, <)) 1537 ts->min = t; 1538 if (timespeccmp(&t, &ts->max, >)) 1539 ts->max = t; 1540 ts->count++; 1541 1542 *tv = now; 1543 } 1544 #endif 1545 1546 /* 1547 * Dispatch a crypto request to the appropriate crypto devices. 1548 */ 1549 static int 1550 crypto_invoke(struct cryptop *crp, int hint) 1551 { 1552 struct cryptocap *cap; 1553 1554 KASSERT(crp != NULL); 1555 1556 #ifdef CRYPTO_TIMING 1557 if (crypto_timing) 1558 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp); 1559 #endif 1560 /* Sanity checks. */ 1561 if (crp->crp_callback == NULL) { 1562 return EINVAL; 1563 } 1564 if (crp->crp_desc == NULL) { 1565 crp->crp_etype = EINVAL; 1566 crypto_done(crp); 1567 return 0; 1568 } 1569 1570 cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(crp->crp_sid)); 1571 if (cap != NULL && (cap->cc_flags & CRYPTOCAP_F_CLEANUP) == 0) { 1572 int (*process)(void *, struct cryptop *, int); 1573 void *arg; 1574 1575 process = cap->cc_process; 1576 arg = cap->cc_arg; 1577 crp->reqcpu = curcpu(); 1578 1579 /* 1580 * Invoke the driver to process the request. 1581 */ 1582 DPRINTF("calling process for %p\n", crp); 1583 crypto_driver_unlock(cap); 1584 return (*process)(arg, crp, hint); 1585 } else { 1586 struct cryptodesc *crd; 1587 u_int64_t nid = 0; 1588 1589 if (cap != NULL) 1590 crypto_driver_unlock(cap); 1591 1592 /* 1593 * Driver has unregistered; migrate the session and return 1594 * an error to the caller so they'll resubmit the op. 1595 */ 1596 crypto_freesession(crp->crp_sid); 1597 1598 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next) 1599 crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI); 1600 1601 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0) 1602 crp->crp_sid = nid; 1603 1604 crp->crp_etype = EAGAIN; 1605 1606 crypto_done(crp); 1607 return 0; 1608 } 1609 } 1610 1611 /* 1612 * Release a set of crypto descriptors. 1613 */ 1614 void 1615 crypto_freereq(struct cryptop *crp) 1616 { 1617 struct cryptodesc *crd; 1618 1619 if (crp == NULL) 1620 return; 1621 DPRINTF("lid[%u]: crp %p\n", CRYPTO_SESID2LID(crp->crp_sid), crp); 1622 1623 /* sanity check */ 1624 if (crp->crp_flags & CRYPTO_F_ONRETQ) { 1625 panic("crypto_freereq() freeing crp on RETQ\n"); 1626 } 1627 1628 while ((crd = crp->crp_desc) != NULL) { 1629 crp->crp_desc = crd->crd_next; 1630 pool_cache_put(cryptodesc_cache, crd); 1631 } 1632 pool_cache_put(cryptop_cache, crp); 1633 } 1634 1635 /* 1636 * Acquire a set of crypto descriptors. 1637 */ 1638 struct cryptop * 1639 crypto_getreq(int num) 1640 { 1641 struct cryptodesc *crd; 1642 struct cryptop *crp; 1643 struct crypto_crp_ret_qs *qs; 1644 1645 /* 1646 * When crp_ret_q is full, we restrict here to avoid crp_ret_q overflow 1647 * by error callback. 1648 */ 1649 qs = crypto_get_crp_ret_qs(curcpu()); 1650 if (qs->crp_ret_q_maxlen > 0 1651 && qs->crp_ret_q_len > qs->crp_ret_q_maxlen) { 1652 qs->crp_ret_q_drops++; 1653 crypto_put_crp_ret_qs(curcpu()); 1654 return NULL; 1655 } 1656 crypto_put_crp_ret_qs(curcpu()); 1657 1658 crp = pool_cache_get(cryptop_cache, PR_NOWAIT); 1659 if (crp == NULL) { 1660 return NULL; 1661 } 1662 memset(crp, 0, sizeof(struct cryptop)); 1663 1664 while (num--) { 1665 crd = pool_cache_get(cryptodesc_cache, PR_NOWAIT); 1666 if (crd == NULL) { 1667 crypto_freereq(crp); 1668 return NULL; 1669 } 1670 1671 memset(crd, 0, sizeof(struct cryptodesc)); 1672 crd->crd_next = crp->crp_desc; 1673 crp->crp_desc = crd; 1674 } 1675 1676 return crp; 1677 } 1678 1679 /* 1680 * Release a set of asymmetric crypto descriptors. 1681 * Currently, support one descriptor only. 1682 */ 1683 void 1684 crypto_kfreereq(struct cryptkop *krp) 1685 { 1686 1687 if (krp == NULL) 1688 return; 1689 1690 DPRINTF("krp %p\n", krp); 1691 1692 /* sanity check */ 1693 if (krp->krp_flags & CRYPTO_F_ONRETQ) { 1694 panic("crypto_kfreereq() freeing krp on RETQ\n"); 1695 } 1696 1697 pool_cache_put(cryptkop_cache, krp); 1698 } 1699 1700 /* 1701 * Acquire a set of asymmetric crypto descriptors. 1702 * Currently, support one descriptor only. 1703 */ 1704 struct cryptkop * 1705 crypto_kgetreq(int num __unused, int prflags) 1706 { 1707 struct cryptkop *krp; 1708 struct crypto_crp_ret_qs *qs; 1709 1710 /* 1711 * When crp_ret_kq is full, we restrict here to avoid crp_ret_kq 1712 * overflow by error callback. 1713 */ 1714 qs = crypto_get_crp_ret_qs(curcpu()); 1715 if (qs->crp_ret_kq_maxlen > 0 1716 && qs->crp_ret_kq_len > qs->crp_ret_kq_maxlen) { 1717 qs->crp_ret_kq_drops++; 1718 crypto_put_crp_ret_qs(curcpu()); 1719 return NULL; 1720 } 1721 crypto_put_crp_ret_qs(curcpu()); 1722 1723 krp = pool_cache_get(cryptkop_cache, prflags); 1724 if (krp == NULL) { 1725 return NULL; 1726 } 1727 memset(krp, 0, sizeof(struct cryptkop)); 1728 1729 return krp; 1730 } 1731 1732 /* 1733 * Invoke the callback on behalf of the driver. 1734 */ 1735 void 1736 crypto_done(struct cryptop *crp) 1737 { 1738 1739 KASSERT(crp != NULL); 1740 1741 if (crp->crp_etype != 0) 1742 cryptostats.cs_errs++; 1743 #ifdef CRYPTO_TIMING 1744 if (crypto_timing) 1745 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp); 1746 #endif 1747 DPRINTF("lid[%u]: crp %p\n", CRYPTO_SESID2LID(crp->crp_sid), crp); 1748 1749 /* 1750 * Normal case; queue the callback for the thread. 1751 * 1752 * The return queue is manipulated by the swi thread 1753 * and, potentially, by crypto device drivers calling 1754 * back to mark operations completed. Thus we need 1755 * to mask both while manipulating the return queue. 1756 */ 1757 if (crp->crp_flags & CRYPTO_F_CBIMM) { 1758 /* 1759 * Do the callback directly. This is ok when the 1760 * callback routine does very little (e.g. the 1761 * /dev/crypto callback method just does a wakeup). 1762 */ 1763 crp->crp_flags |= CRYPTO_F_DONE; 1764 1765 #ifdef CRYPTO_TIMING 1766 if (crypto_timing) { 1767 /* 1768 * NB: We must copy the timestamp before 1769 * doing the callback as the cryptop is 1770 * likely to be reclaimed. 1771 */ 1772 struct timespec t = crp->crp_tstamp; 1773 crypto_tstat(&cryptostats.cs_cb, &t); 1774 crp->crp_callback(crp); 1775 crypto_tstat(&cryptostats.cs_finis, &t); 1776 } else 1777 #endif 1778 crp->crp_callback(crp); 1779 } else { 1780 crp->crp_flags |= CRYPTO_F_DONE; 1781 #if 0 1782 if (crp->crp_flags & CRYPTO_F_USER) { 1783 /* 1784 * TODO: 1785 * If crp->crp_flags & CRYPTO_F_USER and the used 1786 * encryption driver does all the processing in 1787 * the same context, we can skip enqueueing crp_ret_q 1788 * and softint_schedule(crypto_ret_si). 1789 */ 1790 DPRINTF("lid[%u]: crp %p CRYPTO_F_USER\n", 1791 CRYPTO_SESID2LID(crp->crp_sid), crp); 1792 } else 1793 #endif 1794 { 1795 int wasempty; 1796 struct crypto_crp_ret_qs *qs; 1797 struct crypto_crp_ret_q *crp_ret_q; 1798 1799 qs = crypto_get_crp_ret_qs(crp->reqcpu); 1800 crp_ret_q = &qs->crp_ret_q; 1801 wasempty = TAILQ_EMPTY(crp_ret_q); 1802 DPRINTF("lid[%u]: queueing %p\n", 1803 CRYPTO_SESID2LID(crp->crp_sid), crp); 1804 crp->crp_flags |= CRYPTO_F_ONRETQ; 1805 TAILQ_INSERT_TAIL(crp_ret_q, crp, crp_next); 1806 qs->crp_ret_q_len++; 1807 if (wasempty && !qs->crp_ret_q_exit_flag) { 1808 DPRINTF("lid[%u]: waking cryptoret," 1809 "crp %p hit empty queue\n.", 1810 CRYPTO_SESID2LID(crp->crp_sid), crp); 1811 softint_schedule_cpu(crypto_ret_si, crp->reqcpu); 1812 } 1813 crypto_put_crp_ret_qs(crp->reqcpu); 1814 } 1815 } 1816 } 1817 1818 /* 1819 * Invoke the callback on behalf of the driver. 1820 */ 1821 void 1822 crypto_kdone(struct cryptkop *krp) 1823 { 1824 1825 KASSERT(krp != NULL); 1826 1827 if (krp->krp_status != 0) 1828 cryptostats.cs_kerrs++; 1829 1830 krp->krp_flags |= CRYPTO_F_DONE; 1831 1832 /* 1833 * The return queue is manipulated by the swi thread 1834 * and, potentially, by crypto device drivers calling 1835 * back to mark operations completed. Thus we need 1836 * to mask both while manipulating the return queue. 1837 */ 1838 if (krp->krp_flags & CRYPTO_F_CBIMM) { 1839 krp->krp_callback(krp); 1840 } else { 1841 int wasempty; 1842 struct crypto_crp_ret_qs *qs; 1843 struct crypto_crp_ret_kq *crp_ret_kq; 1844 1845 qs = crypto_get_crp_ret_qs(krp->reqcpu); 1846 crp_ret_kq = &qs->crp_ret_kq; 1847 1848 wasempty = TAILQ_EMPTY(crp_ret_kq); 1849 krp->krp_flags |= CRYPTO_F_ONRETQ; 1850 TAILQ_INSERT_TAIL(crp_ret_kq, krp, krp_next); 1851 qs->crp_ret_kq_len++; 1852 if (wasempty && !qs->crp_ret_q_exit_flag) 1853 softint_schedule_cpu(crypto_ret_si, krp->reqcpu); 1854 crypto_put_crp_ret_qs(krp->reqcpu); 1855 } 1856 } 1857 1858 int 1859 crypto_getfeat(int *featp) 1860 { 1861 1862 if (crypto_userasymcrypto == 0) { 1863 *featp = 0; 1864 return 0; 1865 } 1866 1867 mutex_enter(&crypto_drv_mtx); 1868 1869 int feat = 0; 1870 for (int hid = 0; hid < crypto_drivers_num; hid++) { 1871 struct cryptocap *cap; 1872 cap = crypto_checkdriver(hid); 1873 if (cap == NULL) 1874 continue; 1875 1876 crypto_driver_lock(cap); 1877 1878 if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) && 1879 crypto_devallowsoft == 0) 1880 goto unlock; 1881 1882 if (cap->cc_kprocess == NULL) 1883 goto unlock; 1884 1885 for (int kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++) 1886 if ((cap->cc_kalg[kalg] & 1887 CRYPTO_ALG_FLAG_SUPPORTED) != 0) 1888 feat |= 1 << kalg; 1889 1890 unlock: crypto_driver_unlock(cap); 1891 } 1892 1893 mutex_exit(&crypto_drv_mtx); 1894 *featp = feat; 1895 return (0); 1896 } 1897 1898 /* 1899 * Software interrupt thread to dispatch crypto requests. 1900 */ 1901 static void 1902 cryptointr(void *arg __unused) 1903 { 1904 struct cryptop *crp, *submit, *cnext; 1905 struct cryptkop *krp, *knext; 1906 struct cryptocap *cap; 1907 struct crypto_crp_qs *crp_qs; 1908 struct crypto_crp_q *crp_q; 1909 struct crypto_crp_kq *crp_kq; 1910 int result, hint, s; 1911 1912 cryptostats.cs_intrs++; 1913 crp_qs = crypto_get_crp_qs(&s); 1914 crp_q = crp_qs->crp_q; 1915 crp_kq = crp_qs->crp_kq; 1916 do { 1917 /* 1918 * Find the first element in the queue that can be 1919 * processed and look-ahead to see if multiple ops 1920 * are ready for the same driver. 1921 */ 1922 submit = NULL; 1923 hint = 0; 1924 TAILQ_FOREACH_SAFE(crp, crp_q, crp_next, cnext) { 1925 u_int32_t hid = CRYPTO_SESID2HID(crp->crp_sid); 1926 cap = crypto_checkdriver_lock(hid); 1927 if (cap == NULL || cap->cc_process == NULL) { 1928 if (cap != NULL) 1929 crypto_driver_unlock(cap); 1930 /* Op needs to be migrated, process it. */ 1931 submit = crp; 1932 break; 1933 } 1934 1935 /* 1936 * skip blocked crp regardless of CRYPTO_F_BATCH 1937 */ 1938 if (cap->cc_qblocked != 0) { 1939 crypto_driver_unlock(cap); 1940 continue; 1941 } 1942 crypto_driver_unlock(cap); 1943 1944 /* 1945 * skip batch crp until the end of crp_q 1946 */ 1947 if ((crp->crp_flags & CRYPTO_F_BATCH) != 0) { 1948 if (submit == NULL) { 1949 submit = crp; 1950 } else { 1951 if (CRYPTO_SESID2HID(submit->crp_sid) 1952 == hid) 1953 hint = CRYPTO_HINT_MORE; 1954 } 1955 1956 continue; 1957 } 1958 1959 /* 1960 * found first crp which is neither blocked nor batch. 1961 */ 1962 submit = crp; 1963 /* 1964 * batch crp can be processed much later, so clear hint. 1965 */ 1966 hint = 0; 1967 break; 1968 } 1969 if (submit != NULL) { 1970 TAILQ_REMOVE(crp_q, submit, crp_next); 1971 result = crypto_invoke(submit, hint); 1972 /* we must take here as the TAILQ op or kinvoke 1973 may need this mutex below. sigh. */ 1974 if (result == ERESTART) { 1975 /* 1976 * The driver ran out of resources, mark the 1977 * driver ``blocked'' for cryptop's and put 1978 * the request back in the queue. It would 1979 * best to put the request back where we got 1980 * it but that's hard so for now we put it 1981 * at the front. This should be ok; putting 1982 * it at the end does not work. 1983 */ 1984 /* validate sid again */ 1985 cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(submit->crp_sid)); 1986 if (cap == NULL) { 1987 /* migrate again, sigh... */ 1988 TAILQ_INSERT_TAIL(crp_q, submit, crp_next); 1989 } else { 1990 cap->cc_qblocked = 1; 1991 crypto_driver_unlock(cap); 1992 TAILQ_INSERT_HEAD(crp_q, submit, crp_next); 1993 cryptostats.cs_blocks++; 1994 } 1995 } 1996 } 1997 1998 /* As above, but for key ops */ 1999 TAILQ_FOREACH_SAFE(krp, crp_kq, krp_next, knext) { 2000 cap = crypto_checkdriver_lock(krp->krp_hid); 2001 if (cap == NULL || cap->cc_kprocess == NULL) { 2002 if (cap != NULL) 2003 crypto_driver_unlock(cap); 2004 /* Op needs to be migrated, process it. */ 2005 break; 2006 } 2007 if (!cap->cc_kqblocked) { 2008 crypto_driver_unlock(cap); 2009 break; 2010 } 2011 crypto_driver_unlock(cap); 2012 } 2013 if (krp != NULL) { 2014 TAILQ_REMOVE(crp_kq, krp, krp_next); 2015 result = crypto_kinvoke(krp, 0); 2016 /* the next iteration will want the mutex. :-/ */ 2017 if (result == ERESTART) { 2018 /* 2019 * The driver ran out of resources, mark the 2020 * driver ``blocked'' for cryptkop's and put 2021 * the request back in the queue. It would 2022 * best to put the request back where we got 2023 * it but that's hard so for now we put it 2024 * at the front. This should be ok; putting 2025 * it at the end does not work. 2026 */ 2027 /* validate sid again */ 2028 cap = crypto_checkdriver_lock(krp->krp_hid); 2029 if (cap == NULL) { 2030 /* migrate again, sigh... */ 2031 TAILQ_INSERT_TAIL(crp_kq, krp, krp_next); 2032 } else { 2033 cap->cc_kqblocked = 1; 2034 crypto_driver_unlock(cap); 2035 TAILQ_INSERT_HEAD(crp_kq, krp, krp_next); 2036 cryptostats.cs_kblocks++; 2037 } 2038 } 2039 } 2040 } while (submit != NULL || krp != NULL); 2041 crypto_put_crp_qs(&s); 2042 } 2043 2044 /* 2045 * softint handler to do callbacks. 2046 */ 2047 static void 2048 cryptoret_softint(void *arg __unused) 2049 { 2050 struct crypto_crp_ret_qs *qs; 2051 struct crypto_crp_ret_q *crp_ret_q; 2052 struct crypto_crp_ret_kq *crp_ret_kq; 2053 2054 qs = crypto_get_crp_ret_qs(curcpu()); 2055 crp_ret_q = &qs->crp_ret_q; 2056 crp_ret_kq = &qs->crp_ret_kq; 2057 for (;;) { 2058 struct cryptop *crp; 2059 struct cryptkop *krp; 2060 2061 crp = TAILQ_FIRST(crp_ret_q); 2062 if (crp != NULL) { 2063 TAILQ_REMOVE(crp_ret_q, crp, crp_next); 2064 qs->crp_ret_q_len--; 2065 crp->crp_flags &= ~CRYPTO_F_ONRETQ; 2066 } 2067 krp = TAILQ_FIRST(crp_ret_kq); 2068 if (krp != NULL) { 2069 TAILQ_REMOVE(crp_ret_kq, krp, krp_next); 2070 qs->crp_ret_q_len--; 2071 krp->krp_flags &= ~CRYPTO_F_ONRETQ; 2072 } 2073 2074 /* drop before calling any callbacks. */ 2075 if (crp == NULL && krp == NULL) 2076 break; 2077 2078 mutex_spin_exit(&qs->crp_ret_q_mtx); 2079 if (crp != NULL) { 2080 #ifdef CRYPTO_TIMING 2081 if (crypto_timing) { 2082 /* 2083 * NB: We must copy the timestamp before 2084 * doing the callback as the cryptop is 2085 * likely to be reclaimed. 2086 */ 2087 struct timespec t = crp->crp_tstamp; 2088 crypto_tstat(&cryptostats.cs_cb, &t); 2089 crp->crp_callback(crp); 2090 crypto_tstat(&cryptostats.cs_finis, &t); 2091 } else 2092 #endif 2093 { 2094 crp->crp_callback(crp); 2095 } 2096 } 2097 if (krp != NULL) 2098 krp->krp_callback(krp); 2099 2100 mutex_spin_enter(&qs->crp_ret_q_mtx); 2101 } 2102 crypto_put_crp_ret_qs(curcpu()); 2103 } 2104 2105 /* NetBSD module interface */ 2106 2107 MODULE(MODULE_CLASS_MISC, opencrypto, NULL); 2108 2109 static int 2110 opencrypto_modcmd(modcmd_t cmd, void *opaque) 2111 { 2112 int error = 0; 2113 2114 switch (cmd) { 2115 case MODULE_CMD_INIT: 2116 #ifdef _MODULE 2117 error = crypto_init(); 2118 #endif 2119 break; 2120 case MODULE_CMD_FINI: 2121 #ifdef _MODULE 2122 error = crypto_destroy(true); 2123 #endif 2124 break; 2125 default: 2126 error = ENOTTY; 2127 } 2128 return error; 2129 } 2130