16810ad6fSSam Leffler /*- 26810ad6fSSam Leffler * Copyright (c) 2002-2006 Sam Leffler. All rights reserved. 3ae18720dSJohn Baldwin * Copyright (c) 2021 The FreeBSD Foundation 4ae18720dSJohn Baldwin * 5ae18720dSJohn Baldwin * Portions of this software were developed by Ararat River 6ae18720dSJohn Baldwin * Consulting, LLC under sponsorship of the FreeBSD Foundation. 76810ad6fSSam Leffler * 86810ad6fSSam Leffler * Redistribution and use in source and binary forms, with or without 96810ad6fSSam Leffler * modification, are permitted provided that the following conditions 106810ad6fSSam Leffler * are met: 116810ad6fSSam Leffler * 1. Redistributions of source code must retain the above copyright 126810ad6fSSam Leffler * notice, this list of conditions and the following disclaimer. 136810ad6fSSam Leffler * 2. Redistributions in binary form must reproduce the above copyright 146810ad6fSSam Leffler * notice, this list of conditions and the following disclaimer in the 156810ad6fSSam Leffler * documentation and/or other materials provided with the distribution. 166810ad6fSSam Leffler * 176810ad6fSSam Leffler * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 186810ad6fSSam Leffler * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 196810ad6fSSam Leffler * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 206810ad6fSSam Leffler * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 216810ad6fSSam Leffler * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 226810ad6fSSam Leffler * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 236810ad6fSSam Leffler * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 246810ad6fSSam Leffler * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 256810ad6fSSam Leffler * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 266810ad6fSSam Leffler * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 276810ad6fSSam Leffler */ 286810ad6fSSam Leffler 296810ad6fSSam Leffler #include <sys/cdefs.h> 306810ad6fSSam Leffler /* 316810ad6fSSam Leffler * Cryptographic Subsystem. 326810ad6fSSam Leffler * 336810ad6fSSam Leffler * This code is derived from the Openbsd Cryptographic Framework (OCF) 346810ad6fSSam Leffler * that has the copyright shown below. Very little of the original 356810ad6fSSam Leffler * code remains. 366810ad6fSSam Leffler */ 376810ad6fSSam Leffler 3860727d8bSWarner Losh /*- 39091d81d1SSam Leffler * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 40091d81d1SSam Leffler * 41091d81d1SSam Leffler * This code was written by Angelos D. Keromytis in Athens, Greece, in 42091d81d1SSam Leffler * February 2000. Network Security Technologies Inc. (NSTI) kindly 43091d81d1SSam Leffler * supported the development of this code. 44091d81d1SSam Leffler * 45091d81d1SSam Leffler * Copyright (c) 2000, 2001 Angelos D. Keromytis 46091d81d1SSam Leffler * 47091d81d1SSam Leffler * Permission to use, copy, and modify this software with or without fee 48091d81d1SSam Leffler * is hereby granted, provided that this entire notice is included in 49091d81d1SSam Leffler * all source code copies of any software which is or includes a copy or 50091d81d1SSam Leffler * modification of this software. 51091d81d1SSam Leffler * 52091d81d1SSam Leffler * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 53091d81d1SSam Leffler * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 54091d81d1SSam Leffler * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 55091d81d1SSam Leffler * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 56091d81d1SSam Leffler * PURPOSE. 57091d81d1SSam Leffler */ 582c446514SDavid E. O'Brien 596810ad6fSSam Leffler #include "opt_ddb.h" 606810ad6fSSam Leffler 61091d81d1SSam Leffler #include <sys/param.h> 62091d81d1SSam Leffler #include <sys/systm.h> 637290cb47SMark Johnston #include <sys/counter.h> 64091d81d1SSam Leffler #include <sys/kernel.h> 65091d81d1SSam Leffler #include <sys/kthread.h> 66ec5c0e5bSAllan Jude #include <sys/linker.h> 67091d81d1SSam Leffler #include <sys/lock.h> 685dba30f1SPoul-Henning Kamp #include <sys/module.h> 69091d81d1SSam Leffler #include <sys/mutex.h> 70091d81d1SSam Leffler #include <sys/malloc.h> 719c0e3d3aSJohn Baldwin #include <sys/mbuf.h> 72091d81d1SSam Leffler #include <sys/proc.h> 73c0341432SJohn Baldwin #include <sys/refcount.h> 74df21ad6eSBjoern A. Zeeb #include <sys/sdt.h> 7539bbca6fSFabien Thomas #include <sys/smp.h> 76091d81d1SSam Leffler #include <sys/sysctl.h> 7739bbca6fSFabien Thomas #include <sys/taskqueue.h> 789c0e3d3aSJohn Baldwin #include <sys/uio.h> 79091d81d1SSam Leffler 806810ad6fSSam Leffler #include <ddb/ddb.h> 816810ad6fSSam Leffler 82e6f6d0c9SAlan Somers #include <machine/vmparam.h> 83091d81d1SSam Leffler #include <vm/uma.h> 84e6f6d0c9SAlan Somers 85ec5c0e5bSAllan Jude #include <crypto/intake.h> 86091d81d1SSam Leffler #include <opencrypto/cryptodev.h> 87c0341432SJohn Baldwin #include <opencrypto/xform_auth.h> 88c0341432SJohn Baldwin #include <opencrypto/xform_enc.h> 89091d81d1SSam Leffler 906810ad6fSSam Leffler #include <sys/kobj.h> 916810ad6fSSam Leffler #include <sys/bus.h> 926810ad6fSSam Leffler #include "cryptodev_if.h" 936810ad6fSSam Leffler 946ed982a2SAndrew Turner #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 9504c49e68SKonstantin Belousov #include <machine/pcb.h> 9604c49e68SKonstantin Belousov #endif 9704c49e68SKonstantin Belousov 98df21ad6eSBjoern A. Zeeb SDT_PROVIDER_DEFINE(opencrypto); 99df21ad6eSBjoern A. Zeeb 100091d81d1SSam Leffler /* 101091d81d1SSam Leffler * Crypto drivers register themselves by allocating a slot in the 10276681661SJohn Baldwin * crypto_drivers table with crypto_get_driverid(). 103091d81d1SSam Leffler */ 104091d81d1SSam Leffler static struct mtx crypto_drivers_mtx; /* lock on driver table */ 105091d81d1SSam Leffler #define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx) 106091d81d1SSam Leffler #define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx) 1076810ad6fSSam Leffler #define CRYPTO_DRIVER_ASSERT() mtx_assert(&crypto_drivers_mtx, MA_OWNED) 1086810ad6fSSam Leffler 1096810ad6fSSam Leffler /* 1106810ad6fSSam Leffler * Crypto device/driver capabilities structure. 1116810ad6fSSam Leffler * 1126810ad6fSSam Leffler * Synchronization: 1136810ad6fSSam Leffler * (d) - protected by CRYPTO_DRIVER_LOCK() 1146810ad6fSSam Leffler * (q) - protected by CRYPTO_Q_LOCK() 1156810ad6fSSam Leffler * Not tagged fields are read-only. 1166810ad6fSSam Leffler */ 1176810ad6fSSam Leffler struct cryptocap { 118c0341432SJohn Baldwin device_t cc_dev; 119c0341432SJohn Baldwin uint32_t cc_hid; 120d3d79e96SJohn Baldwin uint32_t cc_sessions; /* (d) # of sessions */ 1216810ad6fSSam Leffler 1226810ad6fSSam Leffler int cc_flags; /* (d) flags */ 1236810ad6fSSam Leffler #define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */ 1246810ad6fSSam Leffler int cc_qblocked; /* (q) symmetric q blocked */ 1251b0909d5SConrad Meyer size_t cc_session_size; 126c0341432SJohn Baldwin volatile int cc_refs; 1276810ad6fSSam Leffler }; 128c0341432SJohn Baldwin 129c0341432SJohn Baldwin static struct cryptocap **crypto_drivers = NULL; 130c0341432SJohn Baldwin static int crypto_drivers_size = 0; 131c0341432SJohn Baldwin 132c0341432SJohn Baldwin struct crypto_session { 133c0341432SJohn Baldwin struct cryptocap *cap; 134c0341432SJohn Baldwin struct crypto_session_params csp; 13598d788c8SMark Johnston uint64_t id; 1368adcc757SMark Johnston /* Driver softc follows. */ 137c0341432SJohn Baldwin }; 138091d81d1SSam Leffler 1393a865c82SPawel Jakub Dawidek static int crp_sleep = 0; 14039bbca6fSFabien Thomas static TAILQ_HEAD(cryptop_q ,cryptop) crp_q; /* request queues */ 141091d81d1SSam Leffler static struct mtx crypto_q_mtx; 142091d81d1SSam Leffler #define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx) 143091d81d1SSam Leffler #define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx) 144091d81d1SSam Leffler 14533f3bad3SJohn Baldwin SYSCTL_NODE(_kern, OID_AUTO, crypto, CTLFLAG_RW, 0, 146c0341432SJohn Baldwin "In-kernel cryptography"); 147c0341432SJohn Baldwin 148091d81d1SSam Leffler /* 149002c47baSAlan Somers * Taskqueue used to dispatch the crypto requests submitted with 150002c47baSAlan Somers * crypto_dispatch_async . 151091d81d1SSam Leffler */ 15239bbca6fSFabien Thomas static struct taskqueue *crypto_tq; 15339bbca6fSFabien Thomas 15439bbca6fSFabien Thomas /* 15539bbca6fSFabien Thomas * Crypto seq numbers are operated on with modular arithmetic 15639bbca6fSFabien Thomas */ 15739bbca6fSFabien Thomas #define CRYPTO_SEQ_GT(a,b) ((int)((a)-(b)) > 0) 15839bbca6fSFabien Thomas 15939bbca6fSFabien Thomas struct crypto_ret_worker { 16039bbca6fSFabien Thomas struct mtx crypto_ret_mtx; 16139bbca6fSFabien Thomas 16239bbca6fSFabien Thomas TAILQ_HEAD(,cryptop) crp_ordered_ret_q; /* ordered callback queue for symetric jobs */ 16339bbca6fSFabien Thomas TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queue for symetric jobs */ 16439bbca6fSFabien Thomas 165d3d79e96SJohn Baldwin uint32_t reorder_ops; /* total ordered sym jobs received */ 166d3d79e96SJohn Baldwin uint32_t reorder_cur_seq; /* current sym job dispatched */ 16739bbca6fSFabien Thomas 16871785781SJohn Baldwin struct thread *td; 16939bbca6fSFabien Thomas }; 17039bbca6fSFabien Thomas static struct crypto_ret_worker *crypto_ret_workers = NULL; 17139bbca6fSFabien Thomas 17239bbca6fSFabien Thomas #define CRYPTO_RETW(i) (&crypto_ret_workers[i]) 17339bbca6fSFabien Thomas #define CRYPTO_RETW_ID(w) ((w) - crypto_ret_workers) 17439bbca6fSFabien Thomas #define FOREACH_CRYPTO_RETW(w) \ 17539bbca6fSFabien Thomas for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w) 17639bbca6fSFabien Thomas 17739bbca6fSFabien Thomas #define CRYPTO_RETW_LOCK(w) mtx_lock(&w->crypto_ret_mtx) 17839bbca6fSFabien Thomas #define CRYPTO_RETW_UNLOCK(w) mtx_unlock(&w->crypto_ret_mtx) 17939bbca6fSFabien Thomas 18039bbca6fSFabien Thomas static int crypto_workers_num = 0; 181c0341432SJohn Baldwin SYSCTL_INT(_kern_crypto, OID_AUTO, num_workers, CTLFLAG_RDTUN, 182c0341432SJohn Baldwin &crypto_workers_num, 0, 183c0341432SJohn Baldwin "Number of crypto workers used to dispatch crypto jobs"); 184c0341432SJohn Baldwin #ifdef COMPAT_FREEBSD12 18539bbca6fSFabien Thomas SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN, 18639bbca6fSFabien Thomas &crypto_workers_num, 0, 18739bbca6fSFabien Thomas "Number of crypto workers used to dispatch crypto jobs"); 188c0341432SJohn Baldwin #endif 189091d81d1SSam Leffler 190091d81d1SSam Leffler static uma_zone_t cryptop_zone; 191091d81d1SSam Leffler 192c0341432SJohn Baldwin int crypto_devallowsoft = 0; 1939e0c0512SMark Johnston SYSCTL_INT(_kern_crypto, OID_AUTO, allow_soft, CTLFLAG_RWTUN, 194c0341432SJohn Baldwin &crypto_devallowsoft, 0, 195c0341432SJohn Baldwin "Enable use of software crypto by /dev/crypto"); 196c0341432SJohn Baldwin #ifdef COMPAT_FREEBSD12 1979e0c0512SMark Johnston SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RWTUN, 198091d81d1SSam Leffler &crypto_devallowsoft, 0, 1996c20d7a3SJohn-Mark Gurney "Enable/disable use of software crypto by /dev/crypto"); 200c0341432SJohn Baldwin #endif 201091d81d1SSam Leffler 20270439285SMateusz Guzik #ifdef DIAGNOSTIC 20370439285SMateusz Guzik bool crypto_destroyreq_check; 20470439285SMateusz Guzik SYSCTL_BOOL(_kern_crypto, OID_AUTO, destroyreq_check, CTLFLAG_RWTUN, 20570439285SMateusz Guzik &crypto_destroyreq_check, 0, 20670439285SMateusz Guzik "Enable checks when destroying a request"); 20770439285SMateusz Guzik #endif 20870439285SMateusz Guzik 209091d81d1SSam Leffler MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records"); 210091d81d1SSam Leffler 21171785781SJohn Baldwin static void crypto_dispatch_thread(void *arg); 21271785781SJohn Baldwin static struct thread *cryptotd; 21371785781SJohn Baldwin static void crypto_ret_thread(void *arg); 21451e45326SSam Leffler static void crypto_destroy(void); 2154acae0acSPawel Jakub Dawidek static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint); 21639bbca6fSFabien Thomas static void crypto_task_invoke(void *ctx, int pending); 21739bbca6fSFabien Thomas static void crypto_batch_enqueue(struct cryptop *crp); 21851e45326SSam Leffler 2197290cb47SMark Johnston static counter_u64_t cryptostats[sizeof(struct cryptostats) / sizeof(uint64_t)]; 2207290cb47SMark Johnston SYSCTL_COUNTER_U64_ARRAY(_kern_crypto, OID_AUTO, stats, CTLFLAG_RW, 2217290cb47SMark Johnston cryptostats, nitems(cryptostats), 2227290cb47SMark Johnston "Crypto system statistics"); 2237290cb47SMark Johnston 2247290cb47SMark Johnston #define CRYPTOSTAT_INC(stat) do { \ 2257290cb47SMark Johnston counter_u64_add( \ 2267290cb47SMark Johnston cryptostats[offsetof(struct cryptostats, stat) / sizeof(uint64_t)],\ 2277290cb47SMark Johnston 1); \ 2287290cb47SMark Johnston } while (0) 2297290cb47SMark Johnston 2307290cb47SMark Johnston static void 2317290cb47SMark Johnston cryptostats_init(void *arg __unused) 2327290cb47SMark Johnston { 2337290cb47SMark Johnston COUNTER_ARRAY_ALLOC(cryptostats, nitems(cryptostats), M_WAITOK); 2347290cb47SMark Johnston } 2357290cb47SMark Johnston SYSINIT(cryptostats_init, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_init, NULL); 2367290cb47SMark Johnston 2377290cb47SMark Johnston static void 2387290cb47SMark Johnston cryptostats_fini(void *arg __unused) 2397290cb47SMark Johnston { 2407290cb47SMark Johnston COUNTER_ARRAY_FREE(cryptostats, nitems(cryptostats)); 2417290cb47SMark Johnston } 2427290cb47SMark Johnston SYSUNINIT(cryptostats_fini, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_fini, 2437290cb47SMark Johnston NULL); 2447d1853eeSSam Leffler 245ec5c0e5bSAllan Jude /* Try to avoid directly exposing the key buffer as a symbol */ 246ec5c0e5bSAllan Jude static struct keybuf *keybuf; 247ec5c0e5bSAllan Jude 248ec5c0e5bSAllan Jude static struct keybuf empty_keybuf = { 249ec5c0e5bSAllan Jude .kb_nents = 0 250ec5c0e5bSAllan Jude }; 251ec5c0e5bSAllan Jude 252ec5c0e5bSAllan Jude /* Obtain the key buffer from boot metadata */ 253ec5c0e5bSAllan Jude static void 254ec5c0e5bSAllan Jude keybuf_init(void) 255ec5c0e5bSAllan Jude { 256*b72ae900SAhmad Khalifa keybuf = (struct keybuf *)preload_search_info(preload_kmdp, 257ec5c0e5bSAllan Jude MODINFO_METADATA | MODINFOMD_KEYBUF); 258ec5c0e5bSAllan Jude 259ec5c0e5bSAllan Jude if (keybuf == NULL) 260ec5c0e5bSAllan Jude keybuf = &empty_keybuf; 261ec5c0e5bSAllan Jude } 262ec5c0e5bSAllan Jude 263ec5c0e5bSAllan Jude /* It'd be nice if we could store these in some kind of secure memory... */ 2645973f492SJohn Baldwin struct keybuf * 2655973f492SJohn Baldwin get_keybuf(void) 2665973f492SJohn Baldwin { 267ec5c0e5bSAllan Jude 268ec5c0e5bSAllan Jude return (keybuf); 269ec5c0e5bSAllan Jude } 270ec5c0e5bSAllan Jude 271c0341432SJohn Baldwin static struct cryptocap * 272c0341432SJohn Baldwin cap_ref(struct cryptocap *cap) 273c0341432SJohn Baldwin { 274c0341432SJohn Baldwin 275c0341432SJohn Baldwin refcount_acquire(&cap->cc_refs); 276c0341432SJohn Baldwin return (cap); 277c0341432SJohn Baldwin } 278c0341432SJohn Baldwin 279c0341432SJohn Baldwin static void 280c0341432SJohn Baldwin cap_rele(struct cryptocap *cap) 281c0341432SJohn Baldwin { 282c0341432SJohn Baldwin 283c0341432SJohn Baldwin if (refcount_release(&cap->cc_refs) == 0) 284c0341432SJohn Baldwin return; 285c0341432SJohn Baldwin 286c0341432SJohn Baldwin KASSERT(cap->cc_sessions == 0, 287c0341432SJohn Baldwin ("freeing crypto driver with active sessions")); 288c0341432SJohn Baldwin 289c0341432SJohn Baldwin free(cap, M_CRYPTO_DATA); 290c0341432SJohn Baldwin } 291c0341432SJohn Baldwin 29251e45326SSam Leffler static int 293091d81d1SSam Leffler crypto_init(void) 294091d81d1SSam Leffler { 29539bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 29671785781SJohn Baldwin struct proc *p; 29751e45326SSam Leffler int error; 298091d81d1SSam Leffler 2994e057806SJohn Baldwin mtx_init(&crypto_drivers_mtx, "crypto driver table", NULL, MTX_DEF); 300091d81d1SSam Leffler 301091d81d1SSam Leffler TAILQ_INIT(&crp_q); 3024e057806SJohn Baldwin mtx_init(&crypto_q_mtx, "crypto op queues", NULL, MTX_DEF); 303091d81d1SSam Leffler 304e5587cbbSMark Johnston cryptop_zone = uma_zcreate("cryptop", 305e5587cbbSMark Johnston sizeof(struct cryptop), NULL, NULL, NULL, NULL, 30651e45326SSam Leffler UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 3071b0909d5SConrad Meyer 308c0341432SJohn Baldwin crypto_drivers_size = CRYPTO_DRIVERS_INITIAL; 309c0341432SJohn Baldwin crypto_drivers = malloc(crypto_drivers_size * 310e5587cbbSMark Johnston sizeof(struct cryptocap), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 31151e45326SSam Leffler 31239bbca6fSFabien Thomas if (crypto_workers_num < 1 || crypto_workers_num > mp_ncpus) 31339bbca6fSFabien Thomas crypto_workers_num = mp_ncpus; 31439bbca6fSFabien Thomas 31539bbca6fSFabien Thomas crypto_tq = taskqueue_create("crypto", M_WAITOK | M_ZERO, 31639bbca6fSFabien Thomas taskqueue_thread_enqueue, &crypto_tq); 31739bbca6fSFabien Thomas 31839bbca6fSFabien Thomas taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MIN_KERN, 31939bbca6fSFabien Thomas "crypto"); 32039bbca6fSFabien Thomas 32171785781SJohn Baldwin p = NULL; 32271785781SJohn Baldwin error = kproc_kthread_add(crypto_dispatch_thread, NULL, &p, &cryptotd, 32371785781SJohn Baldwin 0, 0, "crypto", "crypto"); 32451e45326SSam Leffler if (error) { 32551e45326SSam Leffler printf("crypto_init: cannot start crypto thread; error %d", 32651e45326SSam Leffler error); 32751e45326SSam Leffler goto bad; 32851e45326SSam Leffler } 32951e45326SSam Leffler 330e5587cbbSMark Johnston crypto_ret_workers = mallocarray(crypto_workers_num, 331e5587cbbSMark Johnston sizeof(struct crypto_ret_worker), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 33239bbca6fSFabien Thomas 33339bbca6fSFabien Thomas FOREACH_CRYPTO_RETW(ret_worker) { 33439bbca6fSFabien Thomas TAILQ_INIT(&ret_worker->crp_ordered_ret_q); 33539bbca6fSFabien Thomas TAILQ_INIT(&ret_worker->crp_ret_q); 33639bbca6fSFabien Thomas 33739bbca6fSFabien Thomas ret_worker->reorder_ops = 0; 33839bbca6fSFabien Thomas ret_worker->reorder_cur_seq = 0; 33939bbca6fSFabien Thomas 3404e057806SJohn Baldwin mtx_init(&ret_worker->crypto_ret_mtx, "crypto return queues", 3414e057806SJohn Baldwin NULL, MTX_DEF); 34239bbca6fSFabien Thomas 34371785781SJohn Baldwin error = kthread_add(crypto_ret_thread, ret_worker, p, 34471785781SJohn Baldwin &ret_worker->td, 0, 0, "crypto returns %td", 34571785781SJohn Baldwin CRYPTO_RETW_ID(ret_worker)); 34651e45326SSam Leffler if (error) { 34751e45326SSam Leffler printf("crypto_init: cannot start cryptoret thread; error %d", 34851e45326SSam Leffler error); 34951e45326SSam Leffler goto bad; 35051e45326SSam Leffler } 35139bbca6fSFabien Thomas } 352ec5c0e5bSAllan Jude 353ec5c0e5bSAllan Jude keybuf_init(); 354ec5c0e5bSAllan Jude 35551e45326SSam Leffler return 0; 35651e45326SSam Leffler bad: 35751e45326SSam Leffler crypto_destroy(); 35851e45326SSam Leffler return error; 35951e45326SSam Leffler } 36051e45326SSam Leffler 36151e45326SSam Leffler /* 36251e45326SSam Leffler * Signal a crypto thread to terminate. We use the driver 36351e45326SSam Leffler * table lock to synchronize the sleep/wakeups so that we 36451e45326SSam Leffler * are sure the threads have terminated before we release 36551e45326SSam Leffler * the data structures they use. See crypto_finis below 36651e45326SSam Leffler * for the other half of this song-and-dance. 36751e45326SSam Leffler */ 36851e45326SSam Leffler static void 36971785781SJohn Baldwin crypto_terminate(struct thread **tdp, void *q) 37051e45326SSam Leffler { 37171785781SJohn Baldwin struct thread *td; 37251e45326SSam Leffler 37351e45326SSam Leffler mtx_assert(&crypto_drivers_mtx, MA_OWNED); 37471785781SJohn Baldwin td = *tdp; 37571785781SJohn Baldwin *tdp = NULL; 37671785781SJohn Baldwin if (td != NULL) { 37751e45326SSam Leffler wakeup_one(q); 37871785781SJohn Baldwin mtx_sleep(td, &crypto_drivers_mtx, PWAIT, "crypto_destroy", 0); 37951e45326SSam Leffler } 38051e45326SSam Leffler } 38151e45326SSam Leffler 38251e45326SSam Leffler static void 383d588dc7dSMark Johnston hmac_init_pad(const struct auth_hash *axf, const char *key, int klen, 384d588dc7dSMark Johnston void *auth_ctx, uint8_t padval) 385c0341432SJohn Baldwin { 386c0341432SJohn Baldwin uint8_t hmac_key[HMAC_MAX_BLOCK_LEN]; 387c0341432SJohn Baldwin u_int i; 388c0341432SJohn Baldwin 389c0341432SJohn Baldwin KASSERT(axf->blocksize <= sizeof(hmac_key), 390c0341432SJohn Baldwin ("Invalid HMAC block size %d", axf->blocksize)); 391c0341432SJohn Baldwin 392c0341432SJohn Baldwin /* 393c0341432SJohn Baldwin * If the key is larger than the block size, use the digest of 394c0341432SJohn Baldwin * the key as the key instead. 395c0341432SJohn Baldwin */ 396c0341432SJohn Baldwin memset(hmac_key, 0, sizeof(hmac_key)); 397c0341432SJohn Baldwin if (klen > axf->blocksize) { 398c0341432SJohn Baldwin axf->Init(auth_ctx); 399c0341432SJohn Baldwin axf->Update(auth_ctx, key, klen); 400c0341432SJohn Baldwin axf->Final(hmac_key, auth_ctx); 401c0341432SJohn Baldwin klen = axf->hashsize; 402c0341432SJohn Baldwin } else 403c0341432SJohn Baldwin memcpy(hmac_key, key, klen); 404c0341432SJohn Baldwin 405c0341432SJohn Baldwin for (i = 0; i < axf->blocksize; i++) 406c0341432SJohn Baldwin hmac_key[i] ^= padval; 407c0341432SJohn Baldwin 408c0341432SJohn Baldwin axf->Init(auth_ctx); 409c0341432SJohn Baldwin axf->Update(auth_ctx, hmac_key, axf->blocksize); 41017a831eaSJohn Baldwin explicit_bzero(hmac_key, sizeof(hmac_key)); 411c0341432SJohn Baldwin } 412c0341432SJohn Baldwin 413c0341432SJohn Baldwin void 414d588dc7dSMark Johnston hmac_init_ipad(const struct auth_hash *axf, const char *key, int klen, 415c0341432SJohn Baldwin void *auth_ctx) 416c0341432SJohn Baldwin { 417c0341432SJohn Baldwin 418c0341432SJohn Baldwin hmac_init_pad(axf, key, klen, auth_ctx, HMAC_IPAD_VAL); 419c0341432SJohn Baldwin } 420c0341432SJohn Baldwin 421c0341432SJohn Baldwin void 422d588dc7dSMark Johnston hmac_init_opad(const struct auth_hash *axf, const char *key, int klen, 423c0341432SJohn Baldwin void *auth_ctx) 424c0341432SJohn Baldwin { 425c0341432SJohn Baldwin 426c0341432SJohn Baldwin hmac_init_pad(axf, key, klen, auth_ctx, HMAC_OPAD_VAL); 427c0341432SJohn Baldwin } 428c0341432SJohn Baldwin 429c0341432SJohn Baldwin static void 43051e45326SSam Leffler crypto_destroy(void) 43151e45326SSam Leffler { 43239bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 433c0341432SJohn Baldwin int i; 43439bbca6fSFabien Thomas 43551e45326SSam Leffler /* 43651e45326SSam Leffler * Terminate any crypto threads. 43751e45326SSam Leffler */ 43839bbca6fSFabien Thomas if (crypto_tq != NULL) 43939bbca6fSFabien Thomas taskqueue_drain_all(crypto_tq); 44051e45326SSam Leffler CRYPTO_DRIVER_LOCK(); 44171785781SJohn Baldwin crypto_terminate(&cryptotd, &crp_q); 44239bbca6fSFabien Thomas FOREACH_CRYPTO_RETW(ret_worker) 44371785781SJohn Baldwin crypto_terminate(&ret_worker->td, &ret_worker->crp_ret_q); 44451e45326SSam Leffler CRYPTO_DRIVER_UNLOCK(); 44551e45326SSam Leffler 44651e45326SSam Leffler /* XXX flush queues??? */ 44751e45326SSam Leffler 44851e45326SSam Leffler /* 44951e45326SSam Leffler * Reclaim dynamically allocated resources. 45051e45326SSam Leffler */ 451c0341432SJohn Baldwin for (i = 0; i < crypto_drivers_size; i++) { 452c0341432SJohn Baldwin if (crypto_drivers[i] != NULL) 453c0341432SJohn Baldwin cap_rele(crypto_drivers[i]); 454c0341432SJohn Baldwin } 45551e45326SSam Leffler free(crypto_drivers, M_CRYPTO_DATA); 45651e45326SSam Leffler 45751e45326SSam Leffler if (cryptop_zone != NULL) 45851e45326SSam Leffler uma_zdestroy(cryptop_zone); 45951e45326SSam Leffler mtx_destroy(&crypto_q_mtx); 46039bbca6fSFabien Thomas FOREACH_CRYPTO_RETW(ret_worker) 46139bbca6fSFabien Thomas mtx_destroy(&ret_worker->crypto_ret_mtx); 46239bbca6fSFabien Thomas free(crypto_ret_workers, M_CRYPTO_DATA); 46339bbca6fSFabien Thomas if (crypto_tq != NULL) 46439bbca6fSFabien Thomas taskqueue_free(crypto_tq); 46551e45326SSam Leffler mtx_destroy(&crypto_drivers_mtx); 466091d81d1SSam Leffler } 467f544a528SMark Murray 4681b0909d5SConrad Meyer uint32_t 4691b0909d5SConrad Meyer crypto_ses2hid(crypto_session_t crypto_session) 4701b0909d5SConrad Meyer { 471c0341432SJohn Baldwin return (crypto_session->cap->cc_hid); 4721b0909d5SConrad Meyer } 4731b0909d5SConrad Meyer 4741b0909d5SConrad Meyer uint32_t 4751b0909d5SConrad Meyer crypto_ses2caps(crypto_session_t crypto_session) 4761b0909d5SConrad Meyer { 477c0341432SJohn Baldwin return (crypto_session->cap->cc_flags & 0xff000000); 4781b0909d5SConrad Meyer } 4791b0909d5SConrad Meyer 4801b0909d5SConrad Meyer void * 4811b0909d5SConrad Meyer crypto_get_driver_session(crypto_session_t crypto_session) 4821b0909d5SConrad Meyer { 483d1816248SMark Johnston return (crypto_session + 1); 4841b0909d5SConrad Meyer } 4851b0909d5SConrad Meyer 486c0341432SJohn Baldwin const struct crypto_session_params * 487c0341432SJohn Baldwin crypto_get_params(crypto_session_t crypto_session) 488c0341432SJohn Baldwin { 489c0341432SJohn Baldwin return (&crypto_session->csp); 490c0341432SJohn Baldwin } 491c0341432SJohn Baldwin 492d8787d4fSMark Johnston const struct auth_hash * 493c0341432SJohn Baldwin crypto_auth_hash(const struct crypto_session_params *csp) 494c0341432SJohn Baldwin { 495c0341432SJohn Baldwin 496c0341432SJohn Baldwin switch (csp->csp_auth_alg) { 497c0341432SJohn Baldwin case CRYPTO_SHA1_HMAC: 498c0341432SJohn Baldwin return (&auth_hash_hmac_sha1); 499c0341432SJohn Baldwin case CRYPTO_SHA2_224_HMAC: 500c0341432SJohn Baldwin return (&auth_hash_hmac_sha2_224); 501c0341432SJohn Baldwin case CRYPTO_SHA2_256_HMAC: 502c0341432SJohn Baldwin return (&auth_hash_hmac_sha2_256); 503c0341432SJohn Baldwin case CRYPTO_SHA2_384_HMAC: 504c0341432SJohn Baldwin return (&auth_hash_hmac_sha2_384); 505c0341432SJohn Baldwin case CRYPTO_SHA2_512_HMAC: 506c0341432SJohn Baldwin return (&auth_hash_hmac_sha2_512); 507c0341432SJohn Baldwin case CRYPTO_NULL_HMAC: 508c0341432SJohn Baldwin return (&auth_hash_null); 509c0341432SJohn Baldwin case CRYPTO_RIPEMD160_HMAC: 510c0341432SJohn Baldwin return (&auth_hash_hmac_ripemd_160); 511c3a688efSJohn Baldwin case CRYPTO_RIPEMD160: 512c3a688efSJohn Baldwin return (&auth_hash_ripemd_160); 513c0341432SJohn Baldwin case CRYPTO_SHA1: 514c0341432SJohn Baldwin return (&auth_hash_sha1); 515c0341432SJohn Baldwin case CRYPTO_SHA2_224: 516c0341432SJohn Baldwin return (&auth_hash_sha2_224); 517c0341432SJohn Baldwin case CRYPTO_SHA2_256: 518c0341432SJohn Baldwin return (&auth_hash_sha2_256); 519c0341432SJohn Baldwin case CRYPTO_SHA2_384: 520c0341432SJohn Baldwin return (&auth_hash_sha2_384); 521c0341432SJohn Baldwin case CRYPTO_SHA2_512: 522c0341432SJohn Baldwin return (&auth_hash_sha2_512); 523c0341432SJohn Baldwin case CRYPTO_AES_NIST_GMAC: 524c0341432SJohn Baldwin switch (csp->csp_auth_klen) { 525c0341432SJohn Baldwin case 128 / 8: 526c0341432SJohn Baldwin return (&auth_hash_nist_gmac_aes_128); 527c0341432SJohn Baldwin case 192 / 8: 528c0341432SJohn Baldwin return (&auth_hash_nist_gmac_aes_192); 529c0341432SJohn Baldwin case 256 / 8: 530c0341432SJohn Baldwin return (&auth_hash_nist_gmac_aes_256); 531c0341432SJohn Baldwin default: 532c0341432SJohn Baldwin return (NULL); 533c0341432SJohn Baldwin } 534c0341432SJohn Baldwin case CRYPTO_BLAKE2B: 535c0341432SJohn Baldwin return (&auth_hash_blake2b); 536c0341432SJohn Baldwin case CRYPTO_BLAKE2S: 537c0341432SJohn Baldwin return (&auth_hash_blake2s); 538c0341432SJohn Baldwin case CRYPTO_POLY1305: 539c0341432SJohn Baldwin return (&auth_hash_poly1305); 540c0341432SJohn Baldwin case CRYPTO_AES_CCM_CBC_MAC: 541c0341432SJohn Baldwin switch (csp->csp_auth_klen) { 542c0341432SJohn Baldwin case 128 / 8: 543c0341432SJohn Baldwin return (&auth_hash_ccm_cbc_mac_128); 544c0341432SJohn Baldwin case 192 / 8: 545c0341432SJohn Baldwin return (&auth_hash_ccm_cbc_mac_192); 546c0341432SJohn Baldwin case 256 / 8: 547c0341432SJohn Baldwin return (&auth_hash_ccm_cbc_mac_256); 548c0341432SJohn Baldwin default: 549c0341432SJohn Baldwin return (NULL); 550c0341432SJohn Baldwin } 551c0341432SJohn Baldwin default: 552c0341432SJohn Baldwin return (NULL); 553c0341432SJohn Baldwin } 554c0341432SJohn Baldwin } 555c0341432SJohn Baldwin 556d8787d4fSMark Johnston const struct enc_xform * 557c0341432SJohn Baldwin crypto_cipher(const struct crypto_session_params *csp) 558c0341432SJohn Baldwin { 559c0341432SJohn Baldwin 560c0341432SJohn Baldwin switch (csp->csp_cipher_alg) { 561246982c1SJohn Baldwin case CRYPTO_AES_CBC: 562246982c1SJohn Baldwin return (&enc_xform_aes_cbc); 563c0341432SJohn Baldwin case CRYPTO_AES_XTS: 564c0341432SJohn Baldwin return (&enc_xform_aes_xts); 565c0341432SJohn Baldwin case CRYPTO_AES_ICM: 566c0341432SJohn Baldwin return (&enc_xform_aes_icm); 567c0341432SJohn Baldwin case CRYPTO_AES_NIST_GCM_16: 568c0341432SJohn Baldwin return (&enc_xform_aes_nist_gcm); 569c0341432SJohn Baldwin case CRYPTO_CAMELLIA_CBC: 570c0341432SJohn Baldwin return (&enc_xform_camellia); 571c0341432SJohn Baldwin case CRYPTO_NULL_CBC: 572c0341432SJohn Baldwin return (&enc_xform_null); 573c0341432SJohn Baldwin case CRYPTO_CHACHA20: 574c0341432SJohn Baldwin return (&enc_xform_chacha20); 575c0341432SJohn Baldwin case CRYPTO_AES_CCM_16: 576c0341432SJohn Baldwin return (&enc_xform_ccm); 577fc8fc743SJohn Baldwin case CRYPTO_CHACHA20_POLY1305: 578fc8fc743SJohn Baldwin return (&enc_xform_chacha20_poly1305); 5798f35841fSJohn Baldwin case CRYPTO_XCHACHA20_POLY1305: 5808f35841fSJohn Baldwin return (&enc_xform_xchacha20_poly1305); 581c0341432SJohn Baldwin default: 582c0341432SJohn Baldwin return (NULL); 583c0341432SJohn Baldwin } 584c0341432SJohn Baldwin } 585c0341432SJohn Baldwin 5866810ad6fSSam Leffler static struct cryptocap * 587d3d79e96SJohn Baldwin crypto_checkdriver(uint32_t hid) 5886810ad6fSSam Leffler { 5896810ad6fSSam Leffler 590c0341432SJohn Baldwin return (hid >= crypto_drivers_size ? NULL : crypto_drivers[hid]); 591f544a528SMark Murray } 592f544a528SMark Murray 593091d81d1SSam Leffler /* 5946810ad6fSSam Leffler * Select a driver for a new session that supports the specified 5956810ad6fSSam Leffler * algorithms and, optionally, is constrained according to the flags. 596091d81d1SSam Leffler */ 5976810ad6fSSam Leffler static struct cryptocap * 598c0341432SJohn Baldwin crypto_select_driver(const struct crypto_session_params *csp, int flags) 5996810ad6fSSam Leffler { 6006810ad6fSSam Leffler struct cryptocap *cap, *best; 601c0341432SJohn Baldwin int best_match, error, hid; 6026810ad6fSSam Leffler 6036810ad6fSSam Leffler CRYPTO_DRIVER_ASSERT(); 604091d81d1SSam Leffler 6056810ad6fSSam Leffler best = NULL; 606c0341432SJohn Baldwin for (hid = 0; hid < crypto_drivers_size; hid++) { 607091d81d1SSam Leffler /* 608c0341432SJohn Baldwin * If there is no driver for this slot, or the driver 609c0341432SJohn Baldwin * is not appropriate (hardware or software based on 610c0341432SJohn Baldwin * match), then skip. 611091d81d1SSam Leffler */ 612c0341432SJohn Baldwin cap = crypto_drivers[hid]; 613c0341432SJohn Baldwin if (cap == NULL || 614c0341432SJohn Baldwin (cap->cc_flags & flags) == 0) 615091d81d1SSam Leffler continue; 616091d81d1SSam Leffler 617c0341432SJohn Baldwin error = CRYPTODEV_PROBESESSION(cap->cc_dev, csp); 618c0341432SJohn Baldwin if (error >= 0) 619c0341432SJohn Baldwin continue; 620c0341432SJohn Baldwin 621c0341432SJohn Baldwin /* 622c0341432SJohn Baldwin * Use the driver with the highest probe value. 623c0341432SJohn Baldwin * Hardware drivers use a higher probe value than 624c0341432SJohn Baldwin * software. In case of a tie, prefer the driver with 625c0341432SJohn Baldwin * the fewest active sessions. 626c0341432SJohn Baldwin */ 627c0341432SJohn Baldwin if (best == NULL || error > best_match || 628c0341432SJohn Baldwin (error == best_match && 629c0341432SJohn Baldwin cap->cc_sessions < best->cc_sessions)) { 6306810ad6fSSam Leffler best = cap; 631c0341432SJohn Baldwin best_match = error; 6326810ad6fSSam Leffler } 6336810ad6fSSam Leffler } 6346810ad6fSSam Leffler return best; 6356810ad6fSSam Leffler } 636091d81d1SSam Leffler 637ad557055SJohn Baldwin static enum alg_type { 638ad557055SJohn Baldwin ALG_NONE = 0, 639ad557055SJohn Baldwin ALG_CIPHER, 640ad557055SJohn Baldwin ALG_DIGEST, 641ad557055SJohn Baldwin ALG_KEYED_DIGEST, 642ad557055SJohn Baldwin ALG_COMPRESSION, 643ad557055SJohn Baldwin ALG_AEAD 644ad557055SJohn Baldwin } alg_types[] = { 645ad557055SJohn Baldwin [CRYPTO_SHA1_HMAC] = ALG_KEYED_DIGEST, 646ad557055SJohn Baldwin [CRYPTO_RIPEMD160_HMAC] = ALG_KEYED_DIGEST, 647ad557055SJohn Baldwin [CRYPTO_AES_CBC] = ALG_CIPHER, 648ad557055SJohn Baldwin [CRYPTO_SHA1] = ALG_DIGEST, 649ad557055SJohn Baldwin [CRYPTO_NULL_HMAC] = ALG_DIGEST, 650ad557055SJohn Baldwin [CRYPTO_NULL_CBC] = ALG_CIPHER, 651ad557055SJohn Baldwin [CRYPTO_DEFLATE_COMP] = ALG_COMPRESSION, 652ad557055SJohn Baldwin [CRYPTO_SHA2_256_HMAC] = ALG_KEYED_DIGEST, 653ad557055SJohn Baldwin [CRYPTO_SHA2_384_HMAC] = ALG_KEYED_DIGEST, 654ad557055SJohn Baldwin [CRYPTO_SHA2_512_HMAC] = ALG_KEYED_DIGEST, 655ad557055SJohn Baldwin [CRYPTO_CAMELLIA_CBC] = ALG_CIPHER, 656ad557055SJohn Baldwin [CRYPTO_AES_XTS] = ALG_CIPHER, 657ad557055SJohn Baldwin [CRYPTO_AES_ICM] = ALG_CIPHER, 658ad557055SJohn Baldwin [CRYPTO_AES_NIST_GMAC] = ALG_KEYED_DIGEST, 659ad557055SJohn Baldwin [CRYPTO_AES_NIST_GCM_16] = ALG_AEAD, 660ad557055SJohn Baldwin [CRYPTO_BLAKE2B] = ALG_KEYED_DIGEST, 661ad557055SJohn Baldwin [CRYPTO_BLAKE2S] = ALG_KEYED_DIGEST, 662ad557055SJohn Baldwin [CRYPTO_CHACHA20] = ALG_CIPHER, 663ad557055SJohn Baldwin [CRYPTO_SHA2_224_HMAC] = ALG_KEYED_DIGEST, 664ad557055SJohn Baldwin [CRYPTO_RIPEMD160] = ALG_DIGEST, 665ad557055SJohn Baldwin [CRYPTO_SHA2_224] = ALG_DIGEST, 666ad557055SJohn Baldwin [CRYPTO_SHA2_256] = ALG_DIGEST, 667ad557055SJohn Baldwin [CRYPTO_SHA2_384] = ALG_DIGEST, 668ad557055SJohn Baldwin [CRYPTO_SHA2_512] = ALG_DIGEST, 669ad557055SJohn Baldwin [CRYPTO_POLY1305] = ALG_KEYED_DIGEST, 670ad557055SJohn Baldwin [CRYPTO_AES_CCM_CBC_MAC] = ALG_KEYED_DIGEST, 671ad557055SJohn Baldwin [CRYPTO_AES_CCM_16] = ALG_AEAD, 672fc8fc743SJohn Baldwin [CRYPTO_CHACHA20_POLY1305] = ALG_AEAD, 6738f35841fSJohn Baldwin [CRYPTO_XCHACHA20_POLY1305] = ALG_AEAD, 674ad557055SJohn Baldwin }; 675ad557055SJohn Baldwin 676ad557055SJohn Baldwin static enum alg_type 677ad557055SJohn Baldwin alg_type(int alg) 678ad557055SJohn Baldwin { 679ad557055SJohn Baldwin 680ad557055SJohn Baldwin if (alg < nitems(alg_types)) 681ad557055SJohn Baldwin return (alg_types[alg]); 682ad557055SJohn Baldwin return (ALG_NONE); 683ad557055SJohn Baldwin } 684ad557055SJohn Baldwin 685c0341432SJohn Baldwin static bool 686c0341432SJohn Baldwin alg_is_compression(int alg) 687c0341432SJohn Baldwin { 688c0341432SJohn Baldwin 689ad557055SJohn Baldwin return (alg_type(alg) == ALG_COMPRESSION); 690c0341432SJohn Baldwin } 691c0341432SJohn Baldwin 692c0341432SJohn Baldwin static bool 693c0341432SJohn Baldwin alg_is_cipher(int alg) 694c0341432SJohn Baldwin { 695c0341432SJohn Baldwin 696ad557055SJohn Baldwin return (alg_type(alg) == ALG_CIPHER); 697c0341432SJohn Baldwin } 698c0341432SJohn Baldwin 699c0341432SJohn Baldwin static bool 700c0341432SJohn Baldwin alg_is_digest(int alg) 701c0341432SJohn Baldwin { 702c0341432SJohn Baldwin 703ad557055SJohn Baldwin return (alg_type(alg) == ALG_DIGEST || 704ad557055SJohn Baldwin alg_type(alg) == ALG_KEYED_DIGEST); 705c0341432SJohn Baldwin } 706c0341432SJohn Baldwin 707c0341432SJohn Baldwin static bool 708c0341432SJohn Baldwin alg_is_keyed_digest(int alg) 709c0341432SJohn Baldwin { 710c0341432SJohn Baldwin 711ad557055SJohn Baldwin return (alg_type(alg) == ALG_KEYED_DIGEST); 712c0341432SJohn Baldwin } 713c0341432SJohn Baldwin 714c0341432SJohn Baldwin static bool 715c0341432SJohn Baldwin alg_is_aead(int alg) 716c0341432SJohn Baldwin { 717c0341432SJohn Baldwin 718ad557055SJohn Baldwin return (alg_type(alg) == ALG_AEAD); 719c0341432SJohn Baldwin } 720c0341432SJohn Baldwin 721ae18720dSJohn Baldwin static bool 722ae18720dSJohn Baldwin ccm_tag_length_valid(int len) 723ae18720dSJohn Baldwin { 724ae18720dSJohn Baldwin /* RFC 3610 */ 725ae18720dSJohn Baldwin switch (len) { 726ae18720dSJohn Baldwin case 4: 727ae18720dSJohn Baldwin case 6: 728ae18720dSJohn Baldwin case 8: 729ae18720dSJohn Baldwin case 10: 730ae18720dSJohn Baldwin case 12: 731ae18720dSJohn Baldwin case 14: 732ae18720dSJohn Baldwin case 16: 733ae18720dSJohn Baldwin return (true); 734ae18720dSJohn Baldwin default: 735ae18720dSJohn Baldwin return (false); 736ae18720dSJohn Baldwin } 737ae18720dSJohn Baldwin } 738ae18720dSJohn Baldwin 7397e89ae49SMarcin Wojtas #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN) 7407e89ae49SMarcin Wojtas 741c0341432SJohn Baldwin /* Various sanity checks on crypto session parameters. */ 742c0341432SJohn Baldwin static bool 743c0341432SJohn Baldwin check_csp(const struct crypto_session_params *csp) 744c0341432SJohn Baldwin { 745d8787d4fSMark Johnston const struct auth_hash *axf; 746c0341432SJohn Baldwin 747c0341432SJohn Baldwin /* Mode-independent checks. */ 7487e89ae49SMarcin Wojtas if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0) 749c0341432SJohn Baldwin return (false); 750c0341432SJohn Baldwin if (csp->csp_ivlen < 0 || csp->csp_cipher_klen < 0 || 751c0341432SJohn Baldwin csp->csp_auth_klen < 0 || csp->csp_auth_mlen < 0) 752c0341432SJohn Baldwin return (false); 753c0341432SJohn Baldwin if (csp->csp_auth_key != NULL && csp->csp_auth_klen == 0) 754c0341432SJohn Baldwin return (false); 755c0341432SJohn Baldwin if (csp->csp_cipher_key != NULL && csp->csp_cipher_klen == 0) 756c0341432SJohn Baldwin return (false); 757c0341432SJohn Baldwin 758c0341432SJohn Baldwin switch (csp->csp_mode) { 759c0341432SJohn Baldwin case CSP_MODE_COMPRESS: 760c0341432SJohn Baldwin if (!alg_is_compression(csp->csp_cipher_alg)) 761c0341432SJohn Baldwin return (false); 7629c0e3d3aSJohn Baldwin if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) 763c0341432SJohn Baldwin return (false); 7649b774dc0SJohn Baldwin if (csp->csp_flags & CSP_F_SEPARATE_AAD) 7659b774dc0SJohn Baldwin return (false); 766c0341432SJohn Baldwin if (csp->csp_cipher_klen != 0 || csp->csp_ivlen != 0 || 767c0341432SJohn Baldwin csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || 768c0341432SJohn Baldwin csp->csp_auth_mlen != 0) 769c0341432SJohn Baldwin return (false); 770c0341432SJohn Baldwin break; 771c0341432SJohn Baldwin case CSP_MODE_CIPHER: 772c0341432SJohn Baldwin if (!alg_is_cipher(csp->csp_cipher_alg)) 773c0341432SJohn Baldwin return (false); 7749b774dc0SJohn Baldwin if (csp->csp_flags & CSP_F_SEPARATE_AAD) 7759b774dc0SJohn Baldwin return (false); 776c0341432SJohn Baldwin if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { 777c0341432SJohn Baldwin if (csp->csp_cipher_klen == 0) 778c0341432SJohn Baldwin return (false); 779c0341432SJohn Baldwin if (csp->csp_ivlen == 0) 780c0341432SJohn Baldwin return (false); 781c0341432SJohn Baldwin } 782c0341432SJohn Baldwin if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) 783c0341432SJohn Baldwin return (false); 784c0341432SJohn Baldwin if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || 785c0341432SJohn Baldwin csp->csp_auth_mlen != 0) 786c0341432SJohn Baldwin return (false); 787c0341432SJohn Baldwin break; 788c0341432SJohn Baldwin case CSP_MODE_DIGEST: 789c0341432SJohn Baldwin if (csp->csp_cipher_alg != 0 || csp->csp_cipher_klen != 0) 790c0341432SJohn Baldwin return (false); 791c0341432SJohn Baldwin 7929b774dc0SJohn Baldwin if (csp->csp_flags & CSP_F_SEPARATE_AAD) 7939b774dc0SJohn Baldwin return (false); 7949b774dc0SJohn Baldwin 795c0341432SJohn Baldwin /* IV is optional for digests (e.g. GMAC). */ 796ae18720dSJohn Baldwin switch (csp->csp_auth_alg) { 797ae18720dSJohn Baldwin case CRYPTO_AES_CCM_CBC_MAC: 798ae18720dSJohn Baldwin if (csp->csp_ivlen < 7 || csp->csp_ivlen > 13) 799c0341432SJohn Baldwin return (false); 800ae18720dSJohn Baldwin break; 801ae18720dSJohn Baldwin case CRYPTO_AES_NIST_GMAC: 802ae18720dSJohn Baldwin if (csp->csp_ivlen != AES_GCM_IV_LEN) 803ae18720dSJohn Baldwin return (false); 804ae18720dSJohn Baldwin break; 805ae18720dSJohn Baldwin default: 806ae18720dSJohn Baldwin if (csp->csp_ivlen != 0) 807ae18720dSJohn Baldwin return (false); 808ae18720dSJohn Baldwin break; 809ae18720dSJohn Baldwin } 810ae18720dSJohn Baldwin 811c0341432SJohn Baldwin if (!alg_is_digest(csp->csp_auth_alg)) 812c0341432SJohn Baldwin return (false); 813c0341432SJohn Baldwin 814c0341432SJohn Baldwin /* Key is optional for BLAKE2 digests. */ 815c0341432SJohn Baldwin if (csp->csp_auth_alg == CRYPTO_BLAKE2B || 816c0341432SJohn Baldwin csp->csp_auth_alg == CRYPTO_BLAKE2S) 817c0341432SJohn Baldwin ; 818c0341432SJohn Baldwin else if (alg_is_keyed_digest(csp->csp_auth_alg)) { 819c0341432SJohn Baldwin if (csp->csp_auth_klen == 0) 820c0341432SJohn Baldwin return (false); 821c0341432SJohn Baldwin } else { 822c0341432SJohn Baldwin if (csp->csp_auth_klen != 0) 823c0341432SJohn Baldwin return (false); 824c0341432SJohn Baldwin } 825c0341432SJohn Baldwin if (csp->csp_auth_mlen != 0) { 826c0341432SJohn Baldwin axf = crypto_auth_hash(csp); 827c0341432SJohn Baldwin if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) 828c0341432SJohn Baldwin return (false); 829ae18720dSJohn Baldwin 830ae18720dSJohn Baldwin if (csp->csp_auth_alg == CRYPTO_AES_CCM_CBC_MAC && 831ae18720dSJohn Baldwin !ccm_tag_length_valid(csp->csp_auth_mlen)) 832ae18720dSJohn Baldwin return (false); 833c0341432SJohn Baldwin } 834c0341432SJohn Baldwin break; 835c0341432SJohn Baldwin case CSP_MODE_AEAD: 836c0341432SJohn Baldwin if (!alg_is_aead(csp->csp_cipher_alg)) 837c0341432SJohn Baldwin return (false); 838c0341432SJohn Baldwin if (csp->csp_cipher_klen == 0) 839c0341432SJohn Baldwin return (false); 840c0341432SJohn Baldwin if (csp->csp_ivlen == 0 || 841c0341432SJohn Baldwin csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) 842c0341432SJohn Baldwin return (false); 843c0341432SJohn Baldwin if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0) 844c0341432SJohn Baldwin return (false); 845c0341432SJohn Baldwin 846c0341432SJohn Baldwin switch (csp->csp_cipher_alg) { 847c0341432SJohn Baldwin case CRYPTO_AES_CCM_16: 848ae18720dSJohn Baldwin if (csp->csp_auth_mlen != 0 && 849ae18720dSJohn Baldwin !ccm_tag_length_valid(csp->csp_auth_mlen)) 850ae18720dSJohn Baldwin return (false); 851ae18720dSJohn Baldwin 852ae18720dSJohn Baldwin if (csp->csp_ivlen < 7 || csp->csp_ivlen > 13) 853ae18720dSJohn Baldwin return (false); 854ae18720dSJohn Baldwin break; 855ae18720dSJohn Baldwin case CRYPTO_AES_NIST_GCM_16: 8566e17a2e0SJohn Baldwin if (csp->csp_auth_mlen > AES_GMAC_HASH_LEN) 8576e17a2e0SJohn Baldwin return (false); 8586e17a2e0SJohn Baldwin 8596e17a2e0SJohn Baldwin if (csp->csp_ivlen != AES_GCM_IV_LEN) 860c0341432SJohn Baldwin return (false); 861c0341432SJohn Baldwin break; 86242dcd395SJohn Baldwin case CRYPTO_CHACHA20_POLY1305: 86342dcd395SJohn Baldwin if (csp->csp_ivlen != 8 && csp->csp_ivlen != 12) 86442dcd395SJohn Baldwin return (false); 86542dcd395SJohn Baldwin if (csp->csp_auth_mlen > POLY1305_HASH_LEN) 86642dcd395SJohn Baldwin return (false); 86742dcd395SJohn Baldwin break; 8688f35841fSJohn Baldwin case CRYPTO_XCHACHA20_POLY1305: 8698f35841fSJohn Baldwin if (csp->csp_ivlen != XCHACHA20_POLY1305_IV_LEN) 8708f35841fSJohn Baldwin return (false); 8718f35841fSJohn Baldwin if (csp->csp_auth_mlen > POLY1305_HASH_LEN) 8728f35841fSJohn Baldwin return (false); 8738f35841fSJohn Baldwin break; 874c0341432SJohn Baldwin } 875c0341432SJohn Baldwin break; 876c0341432SJohn Baldwin case CSP_MODE_ETA: 877c0341432SJohn Baldwin if (!alg_is_cipher(csp->csp_cipher_alg)) 878c0341432SJohn Baldwin return (false); 879c0341432SJohn Baldwin if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { 880c0341432SJohn Baldwin if (csp->csp_cipher_klen == 0) 881c0341432SJohn Baldwin return (false); 882c0341432SJohn Baldwin if (csp->csp_ivlen == 0) 883c0341432SJohn Baldwin return (false); 884c0341432SJohn Baldwin } 885c0341432SJohn Baldwin if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) 886c0341432SJohn Baldwin return (false); 887c0341432SJohn Baldwin if (!alg_is_digest(csp->csp_auth_alg)) 888c0341432SJohn Baldwin return (false); 889c0341432SJohn Baldwin 890c0341432SJohn Baldwin /* Key is optional for BLAKE2 digests. */ 891c0341432SJohn Baldwin if (csp->csp_auth_alg == CRYPTO_BLAKE2B || 892c0341432SJohn Baldwin csp->csp_auth_alg == CRYPTO_BLAKE2S) 893c0341432SJohn Baldwin ; 894c0341432SJohn Baldwin else if (alg_is_keyed_digest(csp->csp_auth_alg)) { 895c0341432SJohn Baldwin if (csp->csp_auth_klen == 0) 896c0341432SJohn Baldwin return (false); 897c0341432SJohn Baldwin } else { 898c0341432SJohn Baldwin if (csp->csp_auth_klen != 0) 899c0341432SJohn Baldwin return (false); 900c0341432SJohn Baldwin } 901c0341432SJohn Baldwin if (csp->csp_auth_mlen != 0) { 902c0341432SJohn Baldwin axf = crypto_auth_hash(csp); 903c0341432SJohn Baldwin if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) 904c0341432SJohn Baldwin return (false); 905c0341432SJohn Baldwin } 906c0341432SJohn Baldwin break; 907c0341432SJohn Baldwin default: 908c0341432SJohn Baldwin return (false); 909c0341432SJohn Baldwin } 910c0341432SJohn Baldwin 911c0341432SJohn Baldwin return (true); 912c0341432SJohn Baldwin } 913c0341432SJohn Baldwin 914c0341432SJohn Baldwin /* 915c0341432SJohn Baldwin * Delete a session after it has been detached from its driver. 916c0341432SJohn Baldwin */ 917c0341432SJohn Baldwin static void 918c0341432SJohn Baldwin crypto_deletesession(crypto_session_t cses) 919c0341432SJohn Baldwin { 920c0341432SJohn Baldwin struct cryptocap *cap; 921c0341432SJohn Baldwin 922c0341432SJohn Baldwin cap = cses->cap; 923c0341432SJohn Baldwin 924d1816248SMark Johnston zfree(cses, M_CRYPTO_DATA); 925c0341432SJohn Baldwin 926c0341432SJohn Baldwin CRYPTO_DRIVER_LOCK(); 927c0341432SJohn Baldwin cap->cc_sessions--; 928c0341432SJohn Baldwin if (cap->cc_sessions == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP) 929c0341432SJohn Baldwin wakeup(cap); 930c0341432SJohn Baldwin CRYPTO_DRIVER_UNLOCK(); 931c0341432SJohn Baldwin cap_rele(cap); 932c0341432SJohn Baldwin } 933c0341432SJohn Baldwin 934694e0113SPawel Jakub Dawidek /* 9356810ad6fSSam Leffler * Create a new session. The crid argument specifies a crypto 9366810ad6fSSam Leffler * driver to use or constraints on a driver to select (hardware 9376810ad6fSSam Leffler * only, software only, either). Whatever driver is selected 9386810ad6fSSam Leffler * must be capable of the requested crypto algorithms. 939694e0113SPawel Jakub Dawidek */ 9406810ad6fSSam Leffler int 941c0341432SJohn Baldwin crypto_newsession(crypto_session_t *cses, 942c0341432SJohn Baldwin const struct crypto_session_params *csp, int crid) 9436810ad6fSSam Leffler { 94498d788c8SMark Johnston static uint64_t sessid = 0; 9451b0909d5SConrad Meyer crypto_session_t res; 9466810ad6fSSam Leffler struct cryptocap *cap; 9476810ad6fSSam Leffler int err; 9486810ad6fSSam Leffler 949c0341432SJohn Baldwin if (!check_csp(csp)) 950c0341432SJohn Baldwin return (EINVAL); 951c0341432SJohn Baldwin 9521b0909d5SConrad Meyer res = NULL; 9531b0909d5SConrad Meyer 9546810ad6fSSam Leffler CRYPTO_DRIVER_LOCK(); 9556810ad6fSSam Leffler if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 956694e0113SPawel Jakub Dawidek /* 9576810ad6fSSam Leffler * Use specified driver; verify it is capable. 958694e0113SPawel Jakub Dawidek */ 9596810ad6fSSam Leffler cap = crypto_checkdriver(crid); 960c0341432SJohn Baldwin if (cap != NULL && CRYPTODEV_PROBESESSION(cap->cc_dev, csp) > 0) 961694e0113SPawel Jakub Dawidek cap = NULL; 9626810ad6fSSam Leffler } else { 9636810ad6fSSam Leffler /* 9646810ad6fSSam Leffler * No requested driver; select based on crid flags. 9656810ad6fSSam Leffler */ 966c0341432SJohn Baldwin cap = crypto_select_driver(csp, crid); 967694e0113SPawel Jakub Dawidek } 9681b0909d5SConrad Meyer if (cap == NULL) { 969c0341432SJohn Baldwin CRYPTO_DRIVER_UNLOCK(); 97008fca7a5SJohn-Mark Gurney CRYPTDEB("no driver"); 971c0341432SJohn Baldwin return (EOPNOTSUPP); 97208fca7a5SJohn-Mark Gurney } 973c0341432SJohn Baldwin cap_ref(cap); 9741b0909d5SConrad Meyer cap->cc_sessions++; 975091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 9761b0909d5SConrad Meyer 9778adcc757SMark Johnston /* Allocate a single block for the generic session and driver softc. */ 978d1816248SMark Johnston res = malloc(sizeof(*res) + cap->cc_session_size, M_CRYPTO_DATA, 979d1816248SMark Johnston M_WAITOK | M_ZERO); 980c0341432SJohn Baldwin res->cap = cap; 981c0341432SJohn Baldwin res->csp = *csp; 98298d788c8SMark Johnston res->id = atomic_fetchadd_64(&sessid, 1); 9831b0909d5SConrad Meyer 9841b0909d5SConrad Meyer /* Call the driver initialization routine. */ 985c0341432SJohn Baldwin err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, csp); 9861b0909d5SConrad Meyer if (err != 0) { 9871b0909d5SConrad Meyer CRYPTDEB("dev newsession failed: %d", err); 988c0341432SJohn Baldwin crypto_deletesession(res); 989c0341432SJohn Baldwin return (err); 9901b0909d5SConrad Meyer } 9911b0909d5SConrad Meyer 9921b0909d5SConrad Meyer *cses = res; 993c0341432SJohn Baldwin return (0); 9944acae0acSPawel Jakub Dawidek } 9954acae0acSPawel Jakub Dawidek 996091d81d1SSam Leffler /* 997091d81d1SSam Leffler * Delete an existing session (or a reserved session on an unregistered 998091d81d1SSam Leffler * driver). 999091d81d1SSam Leffler */ 10001b0909d5SConrad Meyer void 10011b0909d5SConrad Meyer crypto_freesession(crypto_session_t cses) 1002091d81d1SSam Leffler { 10034acae0acSPawel Jakub Dawidek struct cryptocap *cap; 10041b0909d5SConrad Meyer 10051b0909d5SConrad Meyer if (cses == NULL) 10061b0909d5SConrad Meyer return; 1007091d81d1SSam Leffler 1008c0341432SJohn Baldwin cap = cses->cap; 1009091d81d1SSam Leffler 1010091d81d1SSam Leffler /* Call the driver cleanup routine, if available. */ 10111b0909d5SConrad Meyer CRYPTODEV_FREESESSION(cap->cc_dev, cses); 10121b0909d5SConrad Meyer 1013c0341432SJohn Baldwin crypto_deletesession(cses); 1014091d81d1SSam Leffler } 1015091d81d1SSam Leffler 1016091d81d1SSam Leffler /* 1017c0341432SJohn Baldwin * Return a new driver id. Registers a driver with the system so that 1018c0341432SJohn Baldwin * it can be probed by subsequent sessions. 1019091d81d1SSam Leffler */ 1020091d81d1SSam Leffler int32_t 10211b0909d5SConrad Meyer crypto_get_driverid(device_t dev, size_t sessionsize, int flags) 1022091d81d1SSam Leffler { 1023c0341432SJohn Baldwin struct cryptocap *cap, **newdrv; 1024091d81d1SSam Leffler int i; 1025091d81d1SSam Leffler 10266810ad6fSSam Leffler if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 1027c0341432SJohn Baldwin device_printf(dev, 1028c0341432SJohn Baldwin "no flags specified when registering driver\n"); 10296810ad6fSSam Leffler return -1; 10306810ad6fSSam Leffler } 10316810ad6fSSam Leffler 1032c0341432SJohn Baldwin cap = malloc(sizeof(*cap), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 1033c0341432SJohn Baldwin cap->cc_dev = dev; 1034c0341432SJohn Baldwin cap->cc_session_size = sessionsize; 1035c0341432SJohn Baldwin cap->cc_flags = flags; 1036c0341432SJohn Baldwin refcount_init(&cap->cc_refs, 1); 1037c0341432SJohn Baldwin 1038091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 1039c0341432SJohn Baldwin for (;;) { 1040c0341432SJohn Baldwin for (i = 0; i < crypto_drivers_size; i++) { 1041c0341432SJohn Baldwin if (crypto_drivers[i] == NULL) 1042091d81d1SSam Leffler break; 10434acae0acSPawel Jakub Dawidek } 1044c0341432SJohn Baldwin 1045c0341432SJohn Baldwin if (i < crypto_drivers_size) 1046c0341432SJohn Baldwin break; 1047091d81d1SSam Leffler 1048091d81d1SSam Leffler /* Out of entries, allocate some more. */ 1049c0341432SJohn Baldwin 1050c0341432SJohn Baldwin if (2 * crypto_drivers_size <= crypto_drivers_size) { 1051091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 1052091d81d1SSam Leffler printf("crypto: driver count wraparound!\n"); 1053c0341432SJohn Baldwin cap_rele(cap); 1054c0341432SJohn Baldwin return (-1); 1055091d81d1SSam Leffler } 1056091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 1057091d81d1SSam Leffler 1058c0341432SJohn Baldwin newdrv = malloc(2 * crypto_drivers_size * 1059c0341432SJohn Baldwin sizeof(*crypto_drivers), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 1060091d81d1SSam Leffler 1061c0341432SJohn Baldwin CRYPTO_DRIVER_LOCK(); 1062c0341432SJohn Baldwin memcpy(newdrv, crypto_drivers, 1063c0341432SJohn Baldwin crypto_drivers_size * sizeof(*crypto_drivers)); 1064c0341432SJohn Baldwin 1065c0341432SJohn Baldwin crypto_drivers_size *= 2; 1066091d81d1SSam Leffler 1067091d81d1SSam Leffler free(crypto_drivers, M_CRYPTO_DATA); 1068091d81d1SSam Leffler crypto_drivers = newdrv; 1069091d81d1SSam Leffler } 1070091d81d1SSam Leffler 1071c0341432SJohn Baldwin cap->cc_hid = i; 1072c0341432SJohn Baldwin crypto_drivers[i] = cap; 1073c0341432SJohn Baldwin CRYPTO_DRIVER_UNLOCK(); 1074c0341432SJohn Baldwin 1075091d81d1SSam Leffler if (bootverbose) 1076d7d2f0d4SConrad Meyer printf("crypto: assign %s driver id %u, flags 0x%x\n", 10776810ad6fSSam Leffler device_get_nameunit(dev), i, flags); 1078091d81d1SSam Leffler 1079091d81d1SSam Leffler return i; 1080091d81d1SSam Leffler } 1081091d81d1SSam Leffler 10826810ad6fSSam Leffler /* 10836810ad6fSSam Leffler * Lookup a driver by name. We match against the full device 10846810ad6fSSam Leffler * name and unit, and against just the name. The latter gives 10856810ad6fSSam Leffler * us a simple widlcarding by device name. On success return the 10866810ad6fSSam Leffler * driver/hardware identifier; otherwise return -1. 10876810ad6fSSam Leffler */ 10886810ad6fSSam Leffler int 10896810ad6fSSam Leffler crypto_find_driver(const char *match) 1090091d81d1SSam Leffler { 1091c0341432SJohn Baldwin struct cryptocap *cap; 10926810ad6fSSam Leffler int i, len = strlen(match); 10936810ad6fSSam Leffler 10946810ad6fSSam Leffler CRYPTO_DRIVER_LOCK(); 1095c0341432SJohn Baldwin for (i = 0; i < crypto_drivers_size; i++) { 1096c0341432SJohn Baldwin if (crypto_drivers[i] == NULL) 10976810ad6fSSam Leffler continue; 1098c0341432SJohn Baldwin cap = crypto_drivers[i]; 1099c0341432SJohn Baldwin if (strncmp(match, device_get_nameunit(cap->cc_dev), len) == 0 || 1100c0341432SJohn Baldwin strncmp(match, device_get_name(cap->cc_dev), len) == 0) { 1101c0341432SJohn Baldwin CRYPTO_DRIVER_UNLOCK(); 1102c0341432SJohn Baldwin return (i); 1103c0341432SJohn Baldwin } 11046810ad6fSSam Leffler } 11056810ad6fSSam Leffler CRYPTO_DRIVER_UNLOCK(); 1106c0341432SJohn Baldwin return (-1); 11076810ad6fSSam Leffler } 11086810ad6fSSam Leffler 11096810ad6fSSam Leffler /* 11106810ad6fSSam Leffler * Return the device_t for the specified driver or NULL 11116810ad6fSSam Leffler * if the driver identifier is invalid. 11126810ad6fSSam Leffler */ 11136810ad6fSSam Leffler device_t 11146810ad6fSSam Leffler crypto_find_device_byhid(int hid) 11156810ad6fSSam Leffler { 1116c0341432SJohn Baldwin struct cryptocap *cap; 1117c0341432SJohn Baldwin device_t dev; 1118c0341432SJohn Baldwin 1119c0341432SJohn Baldwin dev = NULL; 1120c0341432SJohn Baldwin CRYPTO_DRIVER_LOCK(); 1121c0341432SJohn Baldwin cap = crypto_checkdriver(hid); 1122c0341432SJohn Baldwin if (cap != NULL) 1123c0341432SJohn Baldwin dev = cap->cc_dev; 1124c0341432SJohn Baldwin CRYPTO_DRIVER_UNLOCK(); 1125c0341432SJohn Baldwin return (dev); 11266810ad6fSSam Leffler } 11276810ad6fSSam Leffler 11286810ad6fSSam Leffler /* 11296810ad6fSSam Leffler * Return the device/driver capabilities. 11306810ad6fSSam Leffler */ 11316810ad6fSSam Leffler int 11326810ad6fSSam Leffler crypto_getcaps(int hid) 11336810ad6fSSam Leffler { 1134c0341432SJohn Baldwin struct cryptocap *cap; 1135c0341432SJohn Baldwin int flags; 1136c0341432SJohn Baldwin 1137c0341432SJohn Baldwin flags = 0; 1138c0341432SJohn Baldwin CRYPTO_DRIVER_LOCK(); 1139c0341432SJohn Baldwin cap = crypto_checkdriver(hid); 1140c0341432SJohn Baldwin if (cap != NULL) 1141c0341432SJohn Baldwin flags = cap->cc_flags; 1142c0341432SJohn Baldwin CRYPTO_DRIVER_UNLOCK(); 1143c0341432SJohn Baldwin return (flags); 1144091d81d1SSam Leffler } 1145091d81d1SSam Leffler 1146091d81d1SSam Leffler /* 1147091d81d1SSam Leffler * Unregister all algorithms associated with a crypto driver. 1148091d81d1SSam Leffler * If there are pending sessions using it, leave enough information 1149091d81d1SSam Leffler * around so that subsequent calls using those sessions will 1150091d81d1SSam Leffler * correctly detect the driver has been unregistered and reroute 1151091d81d1SSam Leffler * requests. 1152091d81d1SSam Leffler */ 1153091d81d1SSam Leffler int 1154d3d79e96SJohn Baldwin crypto_unregister_all(uint32_t driverid) 1155091d81d1SSam Leffler { 1156091d81d1SSam Leffler struct cryptocap *cap; 1157091d81d1SSam Leffler 1158091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 1159091d81d1SSam Leffler cap = crypto_checkdriver(driverid); 1160c0341432SJohn Baldwin if (cap == NULL) { 1161091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 1162c0341432SJohn Baldwin return (EINVAL); 1163c0341432SJohn Baldwin } 11646810ad6fSSam Leffler 1165c0341432SJohn Baldwin cap->cc_flags |= CRYPTOCAP_F_CLEANUP; 1166c0341432SJohn Baldwin crypto_drivers[driverid] = NULL; 1167c0341432SJohn Baldwin 1168c0341432SJohn Baldwin /* 1169c0341432SJohn Baldwin * XXX: This doesn't do anything to kick sessions that 1170c0341432SJohn Baldwin * have no pending operations. 1171c0341432SJohn Baldwin */ 117276681661SJohn Baldwin while (cap->cc_sessions != 0) 1173c0341432SJohn Baldwin mtx_sleep(cap, &crypto_drivers_mtx, 0, "cryunreg", 0); 1174c0341432SJohn Baldwin CRYPTO_DRIVER_UNLOCK(); 1175c0341432SJohn Baldwin cap_rele(cap); 1176c0341432SJohn Baldwin 1177c0341432SJohn Baldwin return (0); 1178091d81d1SSam Leffler } 1179091d81d1SSam Leffler 1180091d81d1SSam Leffler /* 1181091d81d1SSam Leffler * Clear blockage on a driver. The what parameter indicates whether 1182091d81d1SSam Leffler * the driver is now ready for cryptop's and/or cryptokop's. 1183091d81d1SSam Leffler */ 1184091d81d1SSam Leffler int 1185d3d79e96SJohn Baldwin crypto_unblock(uint32_t driverid, int what) 1186091d81d1SSam Leffler { 1187091d81d1SSam Leffler struct cryptocap *cap; 11883a865c82SPawel Jakub Dawidek int err; 1189091d81d1SSam Leffler 1190091d81d1SSam Leffler CRYPTO_Q_LOCK(); 1191091d81d1SSam Leffler cap = crypto_checkdriver(driverid); 1192091d81d1SSam Leffler if (cap != NULL) { 11933a865c82SPawel Jakub Dawidek if (what & CRYPTO_SYMQ) 1194091d81d1SSam Leffler cap->cc_qblocked = 0; 11953a865c82SPawel Jakub Dawidek if (crp_sleep) 11961a91ccccSSam Leffler wakeup_one(&crp_q); 1197091d81d1SSam Leffler err = 0; 1198091d81d1SSam Leffler } else 1199091d81d1SSam Leffler err = EINVAL; 1200091d81d1SSam Leffler CRYPTO_Q_UNLOCK(); 1201091d81d1SSam Leffler 1202091d81d1SSam Leffler return err; 1203091d81d1SSam Leffler } 1204091d81d1SSam Leffler 12059c0e3d3aSJohn Baldwin size_t 12069c0e3d3aSJohn Baldwin crypto_buffer_len(struct crypto_buffer *cb) 12079c0e3d3aSJohn Baldwin { 12089c0e3d3aSJohn Baldwin switch (cb->cb_type) { 12099c0e3d3aSJohn Baldwin case CRYPTO_BUF_CONTIG: 12109c0e3d3aSJohn Baldwin return (cb->cb_buf_len); 12119c0e3d3aSJohn Baldwin case CRYPTO_BUF_MBUF: 12129c0e3d3aSJohn Baldwin if (cb->cb_mbuf->m_flags & M_PKTHDR) 12139c0e3d3aSJohn Baldwin return (cb->cb_mbuf->m_pkthdr.len); 12149c0e3d3aSJohn Baldwin return (m_length(cb->cb_mbuf, NULL)); 1215883a0196SJohn Baldwin case CRYPTO_BUF_SINGLE_MBUF: 1216883a0196SJohn Baldwin return (cb->cb_mbuf->m_len); 1217e6f6d0c9SAlan Somers case CRYPTO_BUF_VMPAGE: 1218e6f6d0c9SAlan Somers return (cb->cb_vm_page_len); 12199c0e3d3aSJohn Baldwin case CRYPTO_BUF_UIO: 12209c0e3d3aSJohn Baldwin return (cb->cb_uio->uio_resid); 12219c0e3d3aSJohn Baldwin default: 12229c0e3d3aSJohn Baldwin return (0); 12239c0e3d3aSJohn Baldwin } 12249c0e3d3aSJohn Baldwin } 12259c0e3d3aSJohn Baldwin 1226c0341432SJohn Baldwin #ifdef INVARIANTS 1227c0341432SJohn Baldwin /* Various sanity checks on crypto requests. */ 1228c0341432SJohn Baldwin static void 12299c0e3d3aSJohn Baldwin cb_sanity(struct crypto_buffer *cb, const char *name) 12309c0e3d3aSJohn Baldwin { 12319c0e3d3aSJohn Baldwin KASSERT(cb->cb_type > CRYPTO_BUF_NONE && cb->cb_type <= CRYPTO_BUF_LAST, 12329c0e3d3aSJohn Baldwin ("incoming crp with invalid %s buffer type", name)); 1233e6f6d0c9SAlan Somers switch (cb->cb_type) { 1234e6f6d0c9SAlan Somers case CRYPTO_BUF_CONTIG: 12359c0e3d3aSJohn Baldwin KASSERT(cb->cb_buf_len >= 0, 12369c0e3d3aSJohn Baldwin ("incoming crp with -ve %s buffer length", name)); 1237e6f6d0c9SAlan Somers break; 1238e6f6d0c9SAlan Somers case CRYPTO_BUF_VMPAGE: 1239e6f6d0c9SAlan Somers KASSERT(CRYPTO_HAS_VMPAGE, 1240e6f6d0c9SAlan Somers ("incoming crp uses dmap on supported arch")); 1241e6f6d0c9SAlan Somers KASSERT(cb->cb_vm_page_len >= 0, 1242e6f6d0c9SAlan Somers ("incoming crp with -ve %s buffer length", name)); 1243e6f6d0c9SAlan Somers KASSERT(cb->cb_vm_page_offset >= 0, 1244e6f6d0c9SAlan Somers ("incoming crp with -ve %s buffer offset", name)); 1245e6f6d0c9SAlan Somers KASSERT(cb->cb_vm_page_offset < PAGE_SIZE, 1246e6f6d0c9SAlan Somers ("incoming crp with %s buffer offset greater than page size" 1247e6f6d0c9SAlan Somers , name)); 1248e6f6d0c9SAlan Somers break; 1249e6f6d0c9SAlan Somers default: 1250e6f6d0c9SAlan Somers break; 1251e6f6d0c9SAlan Somers } 12529c0e3d3aSJohn Baldwin } 12539c0e3d3aSJohn Baldwin 12549c0e3d3aSJohn Baldwin static void 1255c0341432SJohn Baldwin crp_sanity(struct cryptop *crp) 1256c0341432SJohn Baldwin { 1257c0341432SJohn Baldwin struct crypto_session_params *csp; 12589c0e3d3aSJohn Baldwin struct crypto_buffer *out; 12599c0e3d3aSJohn Baldwin size_t ilen, len, olen; 1260c0341432SJohn Baldwin 1261c0341432SJohn Baldwin KASSERT(crp->crp_session != NULL, ("incoming crp without a session")); 12629c0e3d3aSJohn Baldwin KASSERT(crp->crp_obuf.cb_type >= CRYPTO_BUF_NONE && 12639c0e3d3aSJohn Baldwin crp->crp_obuf.cb_type <= CRYPTO_BUF_LAST, 12649c0e3d3aSJohn Baldwin ("incoming crp with invalid output buffer type")); 1265c0341432SJohn Baldwin KASSERT(crp->crp_etype == 0, ("incoming crp with error")); 1266c0341432SJohn Baldwin KASSERT(!(crp->crp_flags & CRYPTO_F_DONE), 1267c0341432SJohn Baldwin ("incoming crp already done")); 1268c0341432SJohn Baldwin 1269c0341432SJohn Baldwin csp = &crp->crp_session->csp; 12709c0e3d3aSJohn Baldwin cb_sanity(&crp->crp_buf, "input"); 12719c0e3d3aSJohn Baldwin ilen = crypto_buffer_len(&crp->crp_buf); 12729c0e3d3aSJohn Baldwin olen = ilen; 12739c0e3d3aSJohn Baldwin out = NULL; 12749c0e3d3aSJohn Baldwin if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) { 12759c0e3d3aSJohn Baldwin if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE) { 12769c0e3d3aSJohn Baldwin cb_sanity(&crp->crp_obuf, "output"); 12779c0e3d3aSJohn Baldwin out = &crp->crp_obuf; 12789c0e3d3aSJohn Baldwin olen = crypto_buffer_len(out); 12799c0e3d3aSJohn Baldwin } 12809c0e3d3aSJohn Baldwin } else 12819c0e3d3aSJohn Baldwin KASSERT(crp->crp_obuf.cb_type == CRYPTO_BUF_NONE, 12829c0e3d3aSJohn Baldwin ("incoming crp with separate output buffer " 12839c0e3d3aSJohn Baldwin "but no session support")); 12849c0e3d3aSJohn Baldwin 1285c0341432SJohn Baldwin switch (csp->csp_mode) { 1286c0341432SJohn Baldwin case CSP_MODE_COMPRESS: 1287c0341432SJohn Baldwin KASSERT(crp->crp_op == CRYPTO_OP_COMPRESS || 1288c0341432SJohn Baldwin crp->crp_op == CRYPTO_OP_DECOMPRESS, 1289c0341432SJohn Baldwin ("invalid compression op %x", crp->crp_op)); 1290c0341432SJohn Baldwin break; 1291c0341432SJohn Baldwin case CSP_MODE_CIPHER: 1292c0341432SJohn Baldwin KASSERT(crp->crp_op == CRYPTO_OP_ENCRYPT || 1293c0341432SJohn Baldwin crp->crp_op == CRYPTO_OP_DECRYPT, 1294c0341432SJohn Baldwin ("invalid cipher op %x", crp->crp_op)); 1295c0341432SJohn Baldwin break; 1296c0341432SJohn Baldwin case CSP_MODE_DIGEST: 1297c0341432SJohn Baldwin KASSERT(crp->crp_op == CRYPTO_OP_COMPUTE_DIGEST || 1298c0341432SJohn Baldwin crp->crp_op == CRYPTO_OP_VERIFY_DIGEST, 1299c0341432SJohn Baldwin ("invalid digest op %x", crp->crp_op)); 1300c0341432SJohn Baldwin break; 1301c0341432SJohn Baldwin case CSP_MODE_AEAD: 1302c0341432SJohn Baldwin KASSERT(crp->crp_op == 1303c0341432SJohn Baldwin (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || 1304c0341432SJohn Baldwin crp->crp_op == 1305c0341432SJohn Baldwin (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), 1306c0341432SJohn Baldwin ("invalid AEAD op %x", crp->crp_op)); 1307c0341432SJohn Baldwin KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE, 1308fc8fc743SJohn Baldwin ("AEAD without a separate IV")); 1309c0341432SJohn Baldwin break; 1310c0341432SJohn Baldwin case CSP_MODE_ETA: 1311c0341432SJohn Baldwin KASSERT(crp->crp_op == 1312c0341432SJohn Baldwin (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || 1313c0341432SJohn Baldwin crp->crp_op == 1314c0341432SJohn Baldwin (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), 1315c0341432SJohn Baldwin ("invalid ETA op %x", crp->crp_op)); 1316c0341432SJohn Baldwin break; 1317c0341432SJohn Baldwin } 1318c0341432SJohn Baldwin if (csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { 13199b774dc0SJohn Baldwin if (crp->crp_aad == NULL) { 1320c0341432SJohn Baldwin KASSERT(crp->crp_aad_start == 0 || 13219c0e3d3aSJohn Baldwin crp->crp_aad_start < ilen, 1322c0341432SJohn Baldwin ("invalid AAD start")); 13239b774dc0SJohn Baldwin KASSERT(crp->crp_aad_length != 0 || 13249b774dc0SJohn Baldwin crp->crp_aad_start == 0, 1325c0341432SJohn Baldwin ("AAD with zero length and non-zero start")); 1326c0341432SJohn Baldwin KASSERT(crp->crp_aad_length == 0 || 13279c0e3d3aSJohn Baldwin crp->crp_aad_start + crp->crp_aad_length <= ilen, 1328c0341432SJohn Baldwin ("AAD outside input length")); 1329c0341432SJohn Baldwin } else { 13309b774dc0SJohn Baldwin KASSERT(csp->csp_flags & CSP_F_SEPARATE_AAD, 13319b774dc0SJohn Baldwin ("session doesn't support separate AAD buffer")); 13329b774dc0SJohn Baldwin KASSERT(crp->crp_aad_start == 0, 13339b774dc0SJohn Baldwin ("separate AAD buffer with non-zero AAD start")); 13349b774dc0SJohn Baldwin KASSERT(crp->crp_aad_length != 0, 13359b774dc0SJohn Baldwin ("separate AAD buffer with zero length")); 13369b774dc0SJohn Baldwin } 13379b774dc0SJohn Baldwin } else { 13389b774dc0SJohn Baldwin KASSERT(crp->crp_aad == NULL && crp->crp_aad_start == 0 && 13399b774dc0SJohn Baldwin crp->crp_aad_length == 0, 1340c0341432SJohn Baldwin ("AAD region in request not supporting AAD")); 1341c0341432SJohn Baldwin } 1342c0341432SJohn Baldwin if (csp->csp_ivlen == 0) { 134329fe41ddSJohn Baldwin KASSERT((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0, 134429fe41ddSJohn Baldwin ("IV_SEPARATE set when IV isn't used")); 1345c0341432SJohn Baldwin KASSERT(crp->crp_iv_start == 0, 1346c0341432SJohn Baldwin ("crp_iv_start set when IV isn't used")); 1347c0341432SJohn Baldwin } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) { 1348c0341432SJohn Baldwin KASSERT(crp->crp_iv_start == 0, 1349c0341432SJohn Baldwin ("IV_SEPARATE used with non-zero IV start")); 1350c0341432SJohn Baldwin } else { 13519c0e3d3aSJohn Baldwin KASSERT(crp->crp_iv_start < ilen, 1352c0341432SJohn Baldwin ("invalid IV start")); 13539c0e3d3aSJohn Baldwin KASSERT(crp->crp_iv_start + csp->csp_ivlen <= ilen, 13549c0e3d3aSJohn Baldwin ("IV outside buffer length")); 1355c0341432SJohn Baldwin } 13569c0e3d3aSJohn Baldwin /* XXX: payload_start of 0 should always be < ilen? */ 1357c0341432SJohn Baldwin KASSERT(crp->crp_payload_start == 0 || 13589c0e3d3aSJohn Baldwin crp->crp_payload_start < ilen, 1359c0341432SJohn Baldwin ("invalid payload start")); 1360c0341432SJohn Baldwin KASSERT(crp->crp_payload_start + crp->crp_payload_length <= 13619c0e3d3aSJohn Baldwin ilen, ("payload outside input buffer")); 13629c0e3d3aSJohn Baldwin if (out == NULL) { 13639c0e3d3aSJohn Baldwin KASSERT(crp->crp_payload_output_start == 0, 13649c0e3d3aSJohn Baldwin ("payload output start non-zero without output buffer")); 1365c71f2370SJohn Baldwin } else if (csp->csp_mode == CSP_MODE_DIGEST) { 1366c71f2370SJohn Baldwin KASSERT(!(crp->crp_op & CRYPTO_OP_VERIFY_DIGEST), 1367c71f2370SJohn Baldwin ("digest verify with separate output buffer")); 1368c71f2370SJohn Baldwin KASSERT(crp->crp_payload_output_start == 0, 1369c71f2370SJohn Baldwin ("digest operation with non-zero payload output start")); 13709c0e3d3aSJohn Baldwin } else { 1371ec498562SJohn Baldwin KASSERT(crp->crp_payload_output_start == 0 || 1372ec498562SJohn Baldwin crp->crp_payload_output_start < olen, 13739c0e3d3aSJohn Baldwin ("invalid payload output start")); 13749c0e3d3aSJohn Baldwin KASSERT(crp->crp_payload_output_start + 13759c0e3d3aSJohn Baldwin crp->crp_payload_length <= olen, 13769c0e3d3aSJohn Baldwin ("payload outside output buffer")); 13779c0e3d3aSJohn Baldwin } 1378c0341432SJohn Baldwin if (csp->csp_mode == CSP_MODE_DIGEST || 1379c0341432SJohn Baldwin csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { 13809c0e3d3aSJohn Baldwin if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) 13819c0e3d3aSJohn Baldwin len = ilen; 13829c0e3d3aSJohn Baldwin else 13839c0e3d3aSJohn Baldwin len = olen; 1384c0341432SJohn Baldwin KASSERT(crp->crp_digest_start == 0 || 13859c0e3d3aSJohn Baldwin crp->crp_digest_start < len, 1386c0341432SJohn Baldwin ("invalid digest start")); 1387c0341432SJohn Baldwin /* XXX: For the mlen == 0 case this check isn't perfect. */ 13889c0e3d3aSJohn Baldwin KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <= len, 13899c0e3d3aSJohn Baldwin ("digest outside buffer")); 1390c0341432SJohn Baldwin } else { 1391c0341432SJohn Baldwin KASSERT(crp->crp_digest_start == 0, 1392c0341432SJohn Baldwin ("non-zero digest start for request without a digest")); 1393c0341432SJohn Baldwin } 1394c0341432SJohn Baldwin if (csp->csp_cipher_klen != 0) 1395c0341432SJohn Baldwin KASSERT(csp->csp_cipher_key != NULL || 1396c0341432SJohn Baldwin crp->crp_cipher_key != NULL, 1397c0341432SJohn Baldwin ("cipher request without a key")); 1398c0341432SJohn Baldwin if (csp->csp_auth_klen != 0) 1399c0341432SJohn Baldwin KASSERT(csp->csp_auth_key != NULL || crp->crp_auth_key != NULL, 1400c0341432SJohn Baldwin ("auth request without a key")); 1401c0341432SJohn Baldwin KASSERT(crp->crp_callback != NULL, ("incoming crp without callback")); 1402c0341432SJohn Baldwin } 1403c0341432SJohn Baldwin #endif 1404c0341432SJohn Baldwin 140568f6800cSMark Johnston static int 140668f6800cSMark Johnston crypto_dispatch_one(struct cryptop *crp, int hint) 1407091d81d1SSam Leffler { 14084acae0acSPawel Jakub Dawidek struct cryptocap *cap; 14094acae0acSPawel Jakub Dawidek int result; 1410091d81d1SSam Leffler 1411c0341432SJohn Baldwin #ifdef INVARIANTS 1412c0341432SJohn Baldwin crp_sanity(crp); 1413c0341432SJohn Baldwin #endif 14147290cb47SMark Johnston CRYPTOSTAT_INC(cs_ops); 14157d1853eeSSam Leffler 141698d788c8SMark Johnston crp->crp_retw_id = crp->crp_session->id % crypto_workers_num; 1417de2b2c90SFabien Thomas 141868f6800cSMark Johnston /* 141968f6800cSMark Johnston * Caller marked the request to be processed immediately; dispatch it 142068f6800cSMark Johnston * directly to the driver unless the driver is currently blocked, in 142168f6800cSMark Johnston * which case it is queued for deferred dispatch. 142268f6800cSMark Johnston */ 142368f6800cSMark Johnston cap = crp->crp_session->cap; 142468f6800cSMark Johnston if (!atomic_load_int(&cap->cc_qblocked)) { 142568f6800cSMark Johnston result = crypto_invoke(cap, crp, hint); 142668f6800cSMark Johnston if (result != ERESTART) 142768f6800cSMark Johnston return (result); 142868f6800cSMark Johnston 142968f6800cSMark Johnston /* 143068f6800cSMark Johnston * The driver ran out of resources, put the request on the 143168f6800cSMark Johnston * queue. 143268f6800cSMark Johnston */ 143368f6800cSMark Johnston } 143468f6800cSMark Johnston crypto_batch_enqueue(crp); 143568f6800cSMark Johnston return (0); 143668f6800cSMark Johnston } 143768f6800cSMark Johnston 143868f6800cSMark Johnston int 143968f6800cSMark Johnston crypto_dispatch(struct cryptop *crp) 144068f6800cSMark Johnston { 144168f6800cSMark Johnston return (crypto_dispatch_one(crp, 0)); 144268f6800cSMark Johnston } 144368f6800cSMark Johnston 144468f6800cSMark Johnston int 144568f6800cSMark Johnston crypto_dispatch_async(struct cryptop *crp, int flags) 144668f6800cSMark Johnston { 144739bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 144839bbca6fSFabien Thomas 144968f6800cSMark Johnston if (!CRYPTO_SESS_SYNC(crp->crp_session)) { 145068f6800cSMark Johnston /* 145168f6800cSMark Johnston * The driver issues completions asynchonously, don't bother 145268f6800cSMark Johnston * deferring dispatch to a worker thread. 145368f6800cSMark Johnston */ 145468f6800cSMark Johnston return (crypto_dispatch(crp)); 145568f6800cSMark Johnston } 145639bbca6fSFabien Thomas 145768f6800cSMark Johnston #ifdef INVARIANTS 145868f6800cSMark Johnston crp_sanity(crp); 145968f6800cSMark Johnston #endif 146068f6800cSMark Johnston CRYPTOSTAT_INC(cs_ops); 146168f6800cSMark Johnston 146268f6800cSMark Johnston crp->crp_retw_id = crp->crp_session->id % crypto_workers_num; 146368f6800cSMark Johnston if ((flags & CRYPTO_ASYNC_ORDERED) != 0) { 146468f6800cSMark Johnston crp->crp_flags |= CRYPTO_F_ASYNC_ORDERED; 146568f6800cSMark Johnston ret_worker = CRYPTO_RETW(crp->crp_retw_id); 146639bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 146739bbca6fSFabien Thomas crp->crp_seq = ret_worker->reorder_ops++; 146839bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 146939bbca6fSFabien Thomas } 147039bbca6fSFabien Thomas TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp); 147139bbca6fSFabien Thomas taskqueue_enqueue(crypto_tq, &crp->crp_task); 147239bbca6fSFabien Thomas return (0); 147339bbca6fSFabien Thomas } 14744acae0acSPawel Jakub Dawidek 147568f6800cSMark Johnston void 147668f6800cSMark Johnston crypto_dispatch_batch(struct cryptopq *crpq, int flags) 147768f6800cSMark Johnston { 147868f6800cSMark Johnston struct cryptop *crp; 147968f6800cSMark Johnston int hint; 148068f6800cSMark Johnston 148168f6800cSMark Johnston while ((crp = TAILQ_FIRST(crpq)) != NULL) { 148268f6800cSMark Johnston hint = TAILQ_NEXT(crp, crp_next) != NULL ? CRYPTO_HINT_MORE : 0; 148368f6800cSMark Johnston TAILQ_REMOVE(crpq, crp, crp_next); 148468f6800cSMark Johnston if (crypto_dispatch_one(crp, hint) != 0) 148539bbca6fSFabien Thomas crypto_batch_enqueue(crp); 148668f6800cSMark Johnston } 148739bbca6fSFabien Thomas } 148839bbca6fSFabien Thomas 148968f6800cSMark Johnston static void 149039bbca6fSFabien Thomas crypto_batch_enqueue(struct cryptop *crp) 149139bbca6fSFabien Thomas { 149239bbca6fSFabien Thomas 14934acae0acSPawel Jakub Dawidek CRYPTO_Q_LOCK(); 14944acae0acSPawel Jakub Dawidek TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); 14953a865c82SPawel Jakub Dawidek if (crp_sleep) 14963a865c82SPawel Jakub Dawidek wakeup_one(&crp_q); 14973569ae7fSSam Leffler CRYPTO_Q_UNLOCK(); 1498091d81d1SSam Leffler } 1499091d81d1SSam Leffler 150039bbca6fSFabien Thomas static void 150139bbca6fSFabien Thomas crypto_task_invoke(void *ctx, int pending) 150239bbca6fSFabien Thomas { 150339bbca6fSFabien Thomas struct cryptocap *cap; 150439bbca6fSFabien Thomas struct cryptop *crp; 1505c0341432SJohn Baldwin int result; 150639bbca6fSFabien Thomas 150739bbca6fSFabien Thomas crp = (struct cryptop *)ctx; 1508c0341432SJohn Baldwin cap = crp->crp_session->cap; 150939bbca6fSFabien Thomas result = crypto_invoke(cap, crp, 0); 151039bbca6fSFabien Thomas if (result == ERESTART) 151139bbca6fSFabien Thomas crypto_batch_enqueue(crp); 151239bbca6fSFabien Thomas } 151339bbca6fSFabien Thomas 1514091d81d1SSam Leffler /* 1515091d81d1SSam Leffler * Dispatch a crypto request to the appropriate crypto devices. 1516091d81d1SSam Leffler */ 1517091d81d1SSam Leffler static int 15184acae0acSPawel Jakub Dawidek crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint) 1519091d81d1SSam Leffler { 152099df9148SMark Johnston int error; 15214acae0acSPawel Jakub Dawidek 15224acae0acSPawel Jakub Dawidek KASSERT(crp != NULL, ("%s: crp == NULL", __func__)); 15234acae0acSPawel Jakub Dawidek KASSERT(crp->crp_callback != NULL, 15244acae0acSPawel Jakub Dawidek ("%s: crp->crp_callback == NULL", __func__)); 1525c0341432SJohn Baldwin KASSERT(crp->crp_session != NULL, 1526c0341432SJohn Baldwin ("%s: crp->crp_session == NULL", __func__)); 1527091d81d1SSam Leffler 15284acae0acSPawel Jakub Dawidek if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { 1529c0341432SJohn Baldwin struct crypto_session_params csp; 15301b0909d5SConrad Meyer crypto_session_t nses; 1531091d81d1SSam Leffler 1532091d81d1SSam Leffler /* 1533091d81d1SSam Leffler * Driver has unregistered; migrate the session and return 1534091d81d1SSam Leffler * an error to the caller so they'll resubmit the op. 15354acae0acSPawel Jakub Dawidek * 15364acae0acSPawel Jakub Dawidek * XXX: What if there are more already queued requests for this 15374acae0acSPawel Jakub Dawidek * session? 1538c0341432SJohn Baldwin * 1539c0341432SJohn Baldwin * XXX: Real solution is to make sessions refcounted 1540c0341432SJohn Baldwin * and force callers to hold a reference when 1541c0341432SJohn Baldwin * assigning to crp_session. Could maybe change 1542c0341432SJohn Baldwin * crypto_getreq to accept a session pointer to make 1543c0341432SJohn Baldwin * that work. Alternatively, we could abandon the 1544c0341432SJohn Baldwin * notion of rewriting crp_session in requests forcing 1545c0341432SJohn Baldwin * the caller to deal with allocating a new session. 1546c0341432SJohn Baldwin * Perhaps provide a method to allow a crp's session to 1547c0341432SJohn Baldwin * be swapped that callers could use. 1548091d81d1SSam Leffler */ 1549c0341432SJohn Baldwin csp = crp->crp_session->csp; 15501b0909d5SConrad Meyer crypto_freesession(crp->crp_session); 15514acae0acSPawel Jakub Dawidek 1552c0341432SJohn Baldwin /* 1553c0341432SJohn Baldwin * XXX: Key pointers may no longer be valid. If we 1554c0341432SJohn Baldwin * really want to support this we need to define the 1555c0341432SJohn Baldwin * KPI such that 'csp' is required to be valid for the 1556c0341432SJohn Baldwin * duration of a session by the caller perhaps. 1557c0341432SJohn Baldwin * 1558c0341432SJohn Baldwin * XXX: If the keys have been changed this will reuse 1559c0341432SJohn Baldwin * the old keys. This probably suggests making 1560c0341432SJohn Baldwin * rekeying more explicit and updating the key 1561c0341432SJohn Baldwin * pointers in 'csp' when the keys change. 1562c0341432SJohn Baldwin */ 1563c0341432SJohn Baldwin if (crypto_newsession(&nses, &csp, 15646810ad6fSSam Leffler CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0) 15651b0909d5SConrad Meyer crp->crp_session = nses; 1566091d81d1SSam Leffler 1567091d81d1SSam Leffler crp->crp_etype = EAGAIN; 15681a91ccccSSam Leffler crypto_done(crp); 156999df9148SMark Johnston error = 0; 1570091d81d1SSam Leffler } else { 1571091d81d1SSam Leffler /* 157299df9148SMark Johnston * Invoke the driver to process the request. Errors are 157399df9148SMark Johnston * signaled by setting crp_etype before invoking the completion 157499df9148SMark Johnston * callback. 1575091d81d1SSam Leffler */ 157699df9148SMark Johnston error = CRYPTODEV_PROCESS(cap->cc_dev, crp, hint); 157799df9148SMark Johnston KASSERT(error == 0 || error == ERESTART, 157899df9148SMark Johnston ("%s: invalid error %d from CRYPTODEV_PROCESS", 157999df9148SMark Johnston __func__, error)); 1580091d81d1SSam Leffler } 158199df9148SMark Johnston return (error); 1582091d81d1SSam Leffler } 1583091d81d1SSam Leffler 1584091d81d1SSam Leffler void 1585946b8f6fSJohn Baldwin crypto_destroyreq(struct cryptop *crp) 1586091d81d1SSam Leffler { 15870d5c337bSPawel Jakub Dawidek #ifdef DIAGNOSTIC 15880d5c337bSPawel Jakub Dawidek { 15890d5c337bSPawel Jakub Dawidek struct cryptop *crp2; 159039bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 15910d5c337bSPawel Jakub Dawidek 159270439285SMateusz Guzik if (!crypto_destroyreq_check) 159370439285SMateusz Guzik return; 159470439285SMateusz Guzik 15950d5c337bSPawel Jakub Dawidek CRYPTO_Q_LOCK(); 15960d5c337bSPawel Jakub Dawidek TAILQ_FOREACH(crp2, &crp_q, crp_next) { 15970d5c337bSPawel Jakub Dawidek KASSERT(crp2 != crp, 15980d5c337bSPawel Jakub Dawidek ("Freeing cryptop from the crypto queue (%p).", 15990d5c337bSPawel Jakub Dawidek crp)); 16000d5c337bSPawel Jakub Dawidek } 16010d5c337bSPawel Jakub Dawidek CRYPTO_Q_UNLOCK(); 160239bbca6fSFabien Thomas 160339bbca6fSFabien Thomas FOREACH_CRYPTO_RETW(ret_worker) { 160439bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 160539bbca6fSFabien Thomas TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) { 16060d5c337bSPawel Jakub Dawidek KASSERT(crp2 != crp, 16070d5c337bSPawel Jakub Dawidek ("Freeing cryptop from the return queue (%p).", 16080d5c337bSPawel Jakub Dawidek crp)); 16090d5c337bSPawel Jakub Dawidek } 161039bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 161139bbca6fSFabien Thomas } 16120d5c337bSPawel Jakub Dawidek } 16130d5c337bSPawel Jakub Dawidek #endif 1614946b8f6fSJohn Baldwin } 16150d5c337bSPawel Jakub Dawidek 1616946b8f6fSJohn Baldwin void 1617946b8f6fSJohn Baldwin crypto_freereq(struct cryptop *crp) 1618946b8f6fSJohn Baldwin { 1619946b8f6fSJohn Baldwin if (crp == NULL) 1620946b8f6fSJohn Baldwin return; 1621946b8f6fSJohn Baldwin 1622946b8f6fSJohn Baldwin crypto_destroyreq(crp); 1623091d81d1SSam Leffler uma_zfree(cryptop_zone, crp); 1624091d81d1SSam Leffler } 1625091d81d1SSam Leffler 1626946b8f6fSJohn Baldwin void 1627946b8f6fSJohn Baldwin crypto_initreq(struct cryptop *crp, crypto_session_t cses) 1628946b8f6fSJohn Baldwin { 1629946b8f6fSJohn Baldwin memset(crp, 0, sizeof(*crp)); 163051754757SMateusz Guzik crp->crp_session = cses; 1631946b8f6fSJohn Baldwin } 1632946b8f6fSJohn Baldwin 1633091d81d1SSam Leffler struct cryptop * 1634c0341432SJohn Baldwin crypto_getreq(crypto_session_t cses, int how) 1635091d81d1SSam Leffler { 1636091d81d1SSam Leffler struct cryptop *crp; 1637091d81d1SSam Leffler 1638c0341432SJohn Baldwin MPASS(how == M_WAITOK || how == M_NOWAIT); 163951754757SMateusz Guzik crp = uma_zalloc(cryptop_zone, how); 1640946b8f6fSJohn Baldwin if (crp != NULL) 164151754757SMateusz Guzik crypto_initreq(crp, cses); 1642c0341432SJohn Baldwin return (crp); 1643091d81d1SSam Leffler } 1644091d81d1SSam Leffler 1645091d81d1SSam Leffler /* 164674d3f1b6SJohn Baldwin * Clone a crypto request, but associate it with the specified session 164774d3f1b6SJohn Baldwin * rather than inheriting the session from the original request. The 164874d3f1b6SJohn Baldwin * fields describing the request buffers are copied, but not the 164974d3f1b6SJohn Baldwin * opaque field or callback function. 165074d3f1b6SJohn Baldwin */ 165174d3f1b6SJohn Baldwin struct cryptop * 165274d3f1b6SJohn Baldwin crypto_clonereq(struct cryptop *crp, crypto_session_t cses, int how) 165374d3f1b6SJohn Baldwin { 165474d3f1b6SJohn Baldwin struct cryptop *new; 165574d3f1b6SJohn Baldwin 165674d3f1b6SJohn Baldwin MPASS((crp->crp_flags & CRYPTO_F_DONE) == 0); 165774d3f1b6SJohn Baldwin new = crypto_getreq(cses, how); 165874d3f1b6SJohn Baldwin if (new == NULL) 165974d3f1b6SJohn Baldwin return (NULL); 166074d3f1b6SJohn Baldwin 166174d3f1b6SJohn Baldwin memcpy(&new->crp_startcopy, &crp->crp_startcopy, 166274d3f1b6SJohn Baldwin __rangeof(struct cryptop, crp_startcopy, crp_endcopy)); 166374d3f1b6SJohn Baldwin return (new); 166474d3f1b6SJohn Baldwin } 166574d3f1b6SJohn Baldwin 166674d3f1b6SJohn Baldwin /* 1667091d81d1SSam Leffler * Invoke the callback on behalf of the driver. 1668091d81d1SSam Leffler */ 1669091d81d1SSam Leffler void 1670091d81d1SSam Leffler crypto_done(struct cryptop *crp) 1671091d81d1SSam Leffler { 16723569ae7fSSam Leffler KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0, 16733569ae7fSSam Leffler ("crypto_done: op already done, flags 0x%x", crp->crp_flags)); 16743569ae7fSSam Leffler crp->crp_flags |= CRYPTO_F_DONE; 16757d1853eeSSam Leffler if (crp->crp_etype != 0) 16767290cb47SMark Johnston CRYPTOSTAT_INC(cs_errs); 1677a5c053f5SMark Johnston 1678d8409aafSSam Leffler /* 1679d8409aafSSam Leffler * CBIMM means unconditionally do the callback immediately; 1680d8409aafSSam Leffler * CBIFSYNC means do the callback immediately only if the 1681d8409aafSSam Leffler * operation was done synchronously. Both are used to avoid 1682d8409aafSSam Leffler * doing extraneous context switches; the latter is mostly 1683d8409aafSSam Leffler * used with the software crypto driver. 1684d8409aafSSam Leffler */ 168568f6800cSMark Johnston if ((crp->crp_flags & CRYPTO_F_ASYNC_ORDERED) == 0 && 168668f6800cSMark Johnston ((crp->crp_flags & CRYPTO_F_CBIMM) != 0 || 168768f6800cSMark Johnston ((crp->crp_flags & CRYPTO_F_CBIFSYNC) != 0 && 168868f6800cSMark Johnston CRYPTO_SESS_SYNC(crp->crp_session)))) { 1689eb73a605SSam Leffler /* 1690eb73a605SSam Leffler * Do the callback directly. This is ok when the 1691eb73a605SSam Leffler * callback routine does very little (e.g. the 1692eb73a605SSam Leffler * /dev/crypto callback method just does a wakeup). 1693eb73a605SSam Leffler */ 1694eb73a605SSam Leffler crp->crp_callback(crp); 1695eb73a605SSam Leffler } else { 169639bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 169739bbca6fSFabien Thomas bool wake; 169839bbca6fSFabien Thomas 169939bbca6fSFabien Thomas ret_worker = CRYPTO_RETW(crp->crp_retw_id); 170039bbca6fSFabien Thomas 1701eb73a605SSam Leffler /* 1702eb73a605SSam Leffler * Normal case; queue the callback for the thread. 1703eb73a605SSam Leffler */ 170439bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 170568f6800cSMark Johnston if ((crp->crp_flags & CRYPTO_F_ASYNC_ORDERED) != 0) { 170639bbca6fSFabien Thomas struct cryptop *tmp; 170739bbca6fSFabien Thomas 170868f6800cSMark Johnston TAILQ_FOREACH_REVERSE(tmp, 170968f6800cSMark Johnston &ret_worker->crp_ordered_ret_q, cryptop_q, 171068f6800cSMark Johnston crp_next) { 171139bbca6fSFabien Thomas if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) { 171268f6800cSMark Johnston TAILQ_INSERT_AFTER( 171368f6800cSMark Johnston &ret_worker->crp_ordered_ret_q, tmp, 171468f6800cSMark Johnston crp, crp_next); 171539bbca6fSFabien Thomas break; 171639bbca6fSFabien Thomas } 171739bbca6fSFabien Thomas } 171839bbca6fSFabien Thomas if (tmp == NULL) { 171968f6800cSMark Johnston TAILQ_INSERT_HEAD( 172068f6800cSMark Johnston &ret_worker->crp_ordered_ret_q, crp, 172168f6800cSMark Johnston crp_next); 172239bbca6fSFabien Thomas } 172339bbca6fSFabien Thomas 172468f6800cSMark Johnston wake = crp->crp_seq == ret_worker->reorder_cur_seq; 172568f6800cSMark Johnston } else { 172668f6800cSMark Johnston wake = TAILQ_EMPTY(&ret_worker->crp_ret_q); 172768f6800cSMark Johnston TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp, 172868f6800cSMark Johnston crp_next); 172939bbca6fSFabien Thomas } 173039bbca6fSFabien Thomas 173139bbca6fSFabien Thomas if (wake) 173239bbca6fSFabien Thomas wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ 173339bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 1734091d81d1SSam Leffler } 1735eb73a605SSam Leffler } 1736091d81d1SSam Leffler 1737091d81d1SSam Leffler /* 173851e45326SSam Leffler * Terminate a thread at module unload. The process that 173951e45326SSam Leffler * initiated this is waiting for us to signal that we're gone; 174051e45326SSam Leffler * wake it up and exit. We use the driver table lock to insure 174151e45326SSam Leffler * we don't do the wakeup before they're waiting. There is no 174251e45326SSam Leffler * race here because the waiter sleeps on the proc lock for the 174351e45326SSam Leffler * thread so it gets notified at the right time because of an 174451e45326SSam Leffler * extra wakeup that's done in exit1(). 174551e45326SSam Leffler */ 1746091d81d1SSam Leffler static void 174751e45326SSam Leffler crypto_finis(void *chan) 1748091d81d1SSam Leffler { 174951e45326SSam Leffler CRYPTO_DRIVER_LOCK(); 175051e45326SSam Leffler wakeup_one(chan); 175151e45326SSam Leffler CRYPTO_DRIVER_UNLOCK(); 175271785781SJohn Baldwin kthread_exit(); 1753091d81d1SSam Leffler } 1754091d81d1SSam Leffler 1755091d81d1SSam Leffler /* 17561a91ccccSSam Leffler * Crypto thread, dispatches crypto requests. 1757091d81d1SSam Leffler */ 1758091d81d1SSam Leffler static void 175971785781SJohn Baldwin crypto_dispatch_thread(void *arg __unused) 1760091d81d1SSam Leffler { 17611a91ccccSSam Leffler struct cryptop *crp, *submit; 1762091d81d1SSam Leffler struct cryptocap *cap; 1763091d81d1SSam Leffler int result, hint; 1764091d81d1SSam Leffler 17656ed982a2SAndrew Turner #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 176604c49e68SKonstantin Belousov fpu_kern_thread(FPU_KERN_NORMAL); 176704c49e68SKonstantin Belousov #endif 176804c49e68SKonstantin Belousov 17691a91ccccSSam Leffler CRYPTO_Q_LOCK(); 1770091d81d1SSam Leffler for (;;) { 1771091d81d1SSam Leffler /* 1772091d81d1SSam Leffler * Find the first element in the queue that can be 1773091d81d1SSam Leffler * processed and look-ahead to see if multiple ops 1774091d81d1SSam Leffler * are ready for the same driver. 1775091d81d1SSam Leffler */ 1776091d81d1SSam Leffler submit = NULL; 1777091d81d1SSam Leffler hint = 0; 1778091d81d1SSam Leffler TAILQ_FOREACH(crp, &crp_q, crp_next) { 1779c0341432SJohn Baldwin cap = crp->crp_session->cap; 17804acae0acSPawel Jakub Dawidek /* 17814acae0acSPawel Jakub Dawidek * Driver cannot disappeared when there is an active 17824acae0acSPawel Jakub Dawidek * session. 17834acae0acSPawel Jakub Dawidek */ 1784c3c82036SPawel Jakub Dawidek KASSERT(cap != NULL, ("%s:%u Driver disappeared.", 1785c3c82036SPawel Jakub Dawidek __func__, __LINE__)); 1786c0341432SJohn Baldwin if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { 1787091d81d1SSam Leffler /* Op needs to be migrated, process it. */ 1788091d81d1SSam Leffler if (submit == NULL) 1789091d81d1SSam Leffler submit = crp; 1790091d81d1SSam Leffler break; 1791091d81d1SSam Leffler } 1792091d81d1SSam Leffler if (!cap->cc_qblocked) { 1793091d81d1SSam Leffler if (submit != NULL) { 1794091d81d1SSam Leffler /* 1795091d81d1SSam Leffler * We stop on finding another op, 1796091d81d1SSam Leffler * regardless whether its for the same 1797091d81d1SSam Leffler * driver or not. We could keep 1798091d81d1SSam Leffler * searching the queue but it might be 1799091d81d1SSam Leffler * better to just use a per-driver 1800091d81d1SSam Leffler * queue instead. 1801091d81d1SSam Leffler */ 1802c0341432SJohn Baldwin if (submit->crp_session->cap == cap) 1803091d81d1SSam Leffler hint = CRYPTO_HINT_MORE; 1804091d81d1SSam Leffler } else { 1805091d81d1SSam Leffler submit = crp; 1806091d81d1SSam Leffler } 180768f6800cSMark Johnston break; 1808091d81d1SSam Leffler } 1809091d81d1SSam Leffler } 1810091d81d1SSam Leffler if (submit != NULL) { 1811091d81d1SSam Leffler TAILQ_REMOVE(&crp_q, submit, crp_next); 1812c0341432SJohn Baldwin cap = submit->crp_session->cap; 1813c3c82036SPawel Jakub Dawidek KASSERT(cap != NULL, ("%s:%u Driver disappeared.", 1814c3c82036SPawel Jakub Dawidek __func__, __LINE__)); 1815c0341432SJohn Baldwin CRYPTO_Q_UNLOCK(); 18164acae0acSPawel Jakub Dawidek result = crypto_invoke(cap, submit, hint); 1817c0341432SJohn Baldwin CRYPTO_Q_LOCK(); 1818091d81d1SSam Leffler if (result == ERESTART) { 1819091d81d1SSam Leffler /* 1820091d81d1SSam Leffler * The driver ran out of resources, mark the 1821091d81d1SSam Leffler * driver ``blocked'' for cryptop's and put 1822091d81d1SSam Leffler * the request back in the queue. It would 1823091d81d1SSam Leffler * best to put the request back where we got 1824091d81d1SSam Leffler * it but that's hard so for now we put it 1825091d81d1SSam Leffler * at the front. This should be ok; putting 1826091d81d1SSam Leffler * it at the end does not work. 1827091d81d1SSam Leffler */ 1828c0341432SJohn Baldwin cap->cc_qblocked = 1; 1829091d81d1SSam Leffler TAILQ_INSERT_HEAD(&crp_q, submit, crp_next); 18307290cb47SMark Johnston CRYPTOSTAT_INC(cs_blocks); 1831091d81d1SSam Leffler } 183276681661SJohn Baldwin } else { 1833091d81d1SSam Leffler /* 1834091d81d1SSam Leffler * Nothing more to be processed. Sleep until we're 1835091d81d1SSam Leffler * woken because there are more ops to process. 1836091d81d1SSam Leffler * This happens either by submission or by a driver 1837091d81d1SSam Leffler * becoming unblocked and notifying us through 1838091d81d1SSam Leffler * crypto_unblock. Note that when we wakeup we 1839091d81d1SSam Leffler * start processing each queue again from the 1840091d81d1SSam Leffler * front. It's not clear that it's important to 1841091d81d1SSam Leffler * preserve this ordering since ops may finish 1842091d81d1SSam Leffler * out of order if dispatched to different devices 1843091d81d1SSam Leffler * and some become blocked while others do not. 1844091d81d1SSam Leffler */ 18453a865c82SPawel Jakub Dawidek crp_sleep = 1; 18461a91ccccSSam Leffler msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0); 18473a865c82SPawel Jakub Dawidek crp_sleep = 0; 184871785781SJohn Baldwin if (cryptotd == NULL) 184951e45326SSam Leffler break; 18507290cb47SMark Johnston CRYPTOSTAT_INC(cs_intrs); 1851091d81d1SSam Leffler } 1852091d81d1SSam Leffler } 185351e45326SSam Leffler CRYPTO_Q_UNLOCK(); 18541a91ccccSSam Leffler 185551e45326SSam Leffler crypto_finis(&crp_q); 18561a91ccccSSam Leffler } 18571a91ccccSSam Leffler 18581a91ccccSSam Leffler /* 18591a91ccccSSam Leffler * Crypto returns thread, does callbacks for processed crypto requests. 18601a91ccccSSam Leffler * Callbacks are done here, rather than in the crypto drivers, because 18611a91ccccSSam Leffler * callbacks typically are expensive and would slow interrupt handling. 18621a91ccccSSam Leffler */ 18631a91ccccSSam Leffler static void 186471785781SJohn Baldwin crypto_ret_thread(void *arg) 18651a91ccccSSam Leffler { 186671785781SJohn Baldwin struct crypto_ret_worker *ret_worker = arg; 18671a91ccccSSam Leffler struct cryptop *crpt; 18681a91ccccSSam Leffler 186939bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 18701a91ccccSSam Leffler for (;;) { 18711a91ccccSSam Leffler /* Harvest return q's for completed ops */ 187239bbca6fSFabien Thomas crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q); 187339bbca6fSFabien Thomas if (crpt != NULL) { 187439bbca6fSFabien Thomas if (crpt->crp_seq == ret_worker->reorder_cur_seq) { 187539bbca6fSFabien Thomas TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next); 187639bbca6fSFabien Thomas ret_worker->reorder_cur_seq++; 187739bbca6fSFabien Thomas } else { 187839bbca6fSFabien Thomas crpt = NULL; 187939bbca6fSFabien Thomas } 188039bbca6fSFabien Thomas } 18811a91ccccSSam Leffler 188239bbca6fSFabien Thomas if (crpt == NULL) { 188339bbca6fSFabien Thomas crpt = TAILQ_FIRST(&ret_worker->crp_ret_q); 188439bbca6fSFabien Thomas if (crpt != NULL) 188539bbca6fSFabien Thomas TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next); 188639bbca6fSFabien Thomas } 188739bbca6fSFabien Thomas 188876681661SJohn Baldwin if (crpt != NULL) { 188939bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 18901a91ccccSSam Leffler /* 18911a91ccccSSam Leffler * Run callbacks unlocked. 18921a91ccccSSam Leffler */ 1893a5c053f5SMark Johnston if (crpt != NULL) 18941a91ccccSSam Leffler crpt->crp_callback(crpt); 189539bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 18961a91ccccSSam Leffler } else { 18971a91ccccSSam Leffler /* 18981a91ccccSSam Leffler * Nothing more to be processed. Sleep until we're 18991a91ccccSSam Leffler * woken because there are more returns to process. 19001a91ccccSSam Leffler */ 190139bbca6fSFabien Thomas msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT, 19021a91ccccSSam Leffler "crypto_ret_wait", 0); 190371785781SJohn Baldwin if (ret_worker->td == NULL) 190451e45326SSam Leffler break; 19057290cb47SMark Johnston CRYPTOSTAT_INC(cs_rets); 19061a91ccccSSam Leffler } 19071a91ccccSSam Leffler } 190839bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 190951e45326SSam Leffler 191039bbca6fSFabien Thomas crypto_finis(&ret_worker->crp_ret_q); 19111a91ccccSSam Leffler } 19126810ad6fSSam Leffler 19136810ad6fSSam Leffler #ifdef DDB 19146810ad6fSSam Leffler static void 19156810ad6fSSam Leffler db_show_drivers(void) 19166810ad6fSSam Leffler { 19176810ad6fSSam Leffler int hid; 19186810ad6fSSam Leffler 191976681661SJohn Baldwin db_printf("%12s %4s %8s %2s\n" 19206810ad6fSSam Leffler , "Device" 19216810ad6fSSam Leffler , "Ses" 19226810ad6fSSam Leffler , "Flags" 19236810ad6fSSam Leffler , "QB" 19246810ad6fSSam Leffler ); 1925c0341432SJohn Baldwin for (hid = 0; hid < crypto_drivers_size; hid++) { 1926c0341432SJohn Baldwin const struct cryptocap *cap = crypto_drivers[hid]; 1927c0341432SJohn Baldwin if (cap == NULL) 19286810ad6fSSam Leffler continue; 192976681661SJohn Baldwin db_printf("%-12s %4u %08x %2u\n" 19306810ad6fSSam Leffler , device_get_nameunit(cap->cc_dev) 19316810ad6fSSam Leffler , cap->cc_sessions 19326810ad6fSSam Leffler , cap->cc_flags 19336810ad6fSSam Leffler , cap->cc_qblocked 19346810ad6fSSam Leffler ); 19356810ad6fSSam Leffler } 19366810ad6fSSam Leffler } 19376810ad6fSSam Leffler 1938c84c5e00SMitchell Horne DB_SHOW_COMMAND_FLAGS(crypto, db_show_crypto, DB_CMD_MEMSAFE) 19396810ad6fSSam Leffler { 19406810ad6fSSam Leffler struct cryptop *crp; 194139bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 19426810ad6fSSam Leffler 19436810ad6fSSam Leffler db_show_drivers(); 19446810ad6fSSam Leffler db_printf("\n"); 19456810ad6fSSam Leffler 19466810ad6fSSam Leffler db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n", 19476810ad6fSSam Leffler "HID", "Caps", "Ilen", "Olen", "Etype", "Flags", 1948c0341432SJohn Baldwin "Device", "Callback"); 19496810ad6fSSam Leffler TAILQ_FOREACH(crp, &crp_q, crp_next) { 19509c0e3d3aSJohn Baldwin db_printf("%4u %08x %4u %4u %04x %8p %8p\n" 1951c0341432SJohn Baldwin , crp->crp_session->cap->cc_hid 19521b0909d5SConrad Meyer , (int) crypto_ses2caps(crp->crp_session) 19539c0e3d3aSJohn Baldwin , crp->crp_olen 19546810ad6fSSam Leffler , crp->crp_etype 19556810ad6fSSam Leffler , crp->crp_flags 1956c0341432SJohn Baldwin , device_get_nameunit(crp->crp_session->cap->cc_dev) 19576810ad6fSSam Leffler , crp->crp_callback 19586810ad6fSSam Leffler ); 19596810ad6fSSam Leffler } 196039bbca6fSFabien Thomas FOREACH_CRYPTO_RETW(ret_worker) { 196139bbca6fSFabien Thomas db_printf("\n%8s %4s %4s %4s %8s\n", 196239bbca6fSFabien Thomas "ret_worker", "HID", "Etype", "Flags", "Callback"); 196339bbca6fSFabien Thomas if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { 196439bbca6fSFabien Thomas TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) { 196539bbca6fSFabien Thomas db_printf("%8td %4u %4u %04x %8p\n" 196639bbca6fSFabien Thomas , CRYPTO_RETW_ID(ret_worker) 1967c0341432SJohn Baldwin , crp->crp_session->cap->cc_hid 19686810ad6fSSam Leffler , crp->crp_etype 19696810ad6fSSam Leffler , crp->crp_flags 19706810ad6fSSam Leffler , crp->crp_callback 19716810ad6fSSam Leffler ); 19726810ad6fSSam Leffler } 19736810ad6fSSam Leffler } 19746810ad6fSSam Leffler } 197539bbca6fSFabien Thomas } 19766810ad6fSSam Leffler #endif 19776810ad6fSSam Leffler 19786810ad6fSSam Leffler int crypto_modevent(module_t mod, int type, void *unused); 19796810ad6fSSam Leffler 19806810ad6fSSam Leffler /* 19816810ad6fSSam Leffler * Initialization code, both for static and dynamic loading. 19826810ad6fSSam Leffler * Note this is not invoked with the usual MODULE_DECLARE 19836810ad6fSSam Leffler * mechanism but instead is listed as a dependency by the 19846810ad6fSSam Leffler * cryptosoft driver. This guarantees proper ordering of 19856810ad6fSSam Leffler * calls on module load/unload. 19866810ad6fSSam Leffler */ 19876810ad6fSSam Leffler int 19886810ad6fSSam Leffler crypto_modevent(module_t mod, int type, void *unused) 19896810ad6fSSam Leffler { 19906810ad6fSSam Leffler int error = EINVAL; 19916810ad6fSSam Leffler 19926810ad6fSSam Leffler switch (type) { 19936810ad6fSSam Leffler case MOD_LOAD: 19946810ad6fSSam Leffler error = crypto_init(); 19956810ad6fSSam Leffler if (error == 0 && bootverbose) 19966810ad6fSSam Leffler printf("crypto: <crypto core>\n"); 19976810ad6fSSam Leffler break; 19986810ad6fSSam Leffler case MOD_UNLOAD: 19996810ad6fSSam Leffler /*XXX disallow if active sessions */ 20006810ad6fSSam Leffler error = 0; 20016810ad6fSSam Leffler crypto_destroy(); 20026810ad6fSSam Leffler return 0; 20036810ad6fSSam Leffler } 20046810ad6fSSam Leffler return error; 20056810ad6fSSam Leffler } 20066810ad6fSSam Leffler MODULE_VERSION(crypto, 1); 20076810ad6fSSam Leffler MODULE_DEPEND(crypto, zlib, 1, 1, 1); 2008