1*ff733a25Sriastradh /* $NetBSD: crypto.c,v 1.131 2022/06/26 22:52:30 riastradh Exp $ */
2cdfce9ceSjonathan /* $FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.5 2003/02/26 00:14:05 sam Exp $ */
3cdfce9ceSjonathan /* $OpenBSD: crypto.c,v 1.41 2002/07/17 23:52:38 art Exp $ */
4cdfce9ceSjonathan
5597ee710Stls /*-
6597ee710Stls * Copyright (c) 2008 The NetBSD Foundation, Inc.
7597ee710Stls * All rights reserved.
8597ee710Stls *
9597ee710Stls * This code is derived from software contributed to The NetBSD Foundation
10597ee710Stls * by Coyote Point Systems, Inc.
11597ee710Stls *
12597ee710Stls * Redistribution and use in source and binary forms, with or without
13597ee710Stls * modification, are permitted provided that the following conditions
14597ee710Stls * are met:
15597ee710Stls * 1. Redistributions of source code must retain the above copyright
16597ee710Stls * notice, this list of conditions and the following disclaimer.
17597ee710Stls * 2. Redistributions in binary form must reproduce the above copyright
18597ee710Stls * notice, this list of conditions and the following disclaimer in the
19597ee710Stls * documentation and/or other materials provided with the distribution.
20597ee710Stls *
21597ee710Stls * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22597ee710Stls * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23597ee710Stls * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24597ee710Stls * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25597ee710Stls * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26597ee710Stls * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27597ee710Stls * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28597ee710Stls * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29597ee710Stls * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30597ee710Stls * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31597ee710Stls * POSSIBILITY OF SUCH DAMAGE.
32597ee710Stls */
33597ee710Stls
34cdfce9ceSjonathan /*
35cdfce9ceSjonathan * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
36cdfce9ceSjonathan *
37cdfce9ceSjonathan * This code was written by Angelos D. Keromytis in Athens, Greece, in
38cdfce9ceSjonathan * February 2000. Network Security Technologies Inc. (NSTI) kindly
39cdfce9ceSjonathan * supported the development of this code.
40cdfce9ceSjonathan *
41cdfce9ceSjonathan * Copyright (c) 2000, 2001 Angelos D. Keromytis
42cdfce9ceSjonathan *
43cdfce9ceSjonathan * Permission to use, copy, and modify this software with or without fee
44cdfce9ceSjonathan * is hereby granted, provided that this entire notice is included in
45cdfce9ceSjonathan * all source code copies of any software which is or includes a copy or
46cdfce9ceSjonathan * modification of this software.
47cdfce9ceSjonathan *
48cdfce9ceSjonathan * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
49cdfce9ceSjonathan * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
50cdfce9ceSjonathan * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
51cdfce9ceSjonathan * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
52cdfce9ceSjonathan * PURPOSE.
53cdfce9ceSjonathan */
54cdfce9ceSjonathan
55cdfce9ceSjonathan #include <sys/cdefs.h>
56*ff733a25Sriastradh __KERNEL_RCSID(0, "$NetBSD: crypto.c,v 1.131 2022/06/26 22:52:30 riastradh Exp $");
57cdfce9ceSjonathan
58cdfce9ceSjonathan #include <sys/param.h>
59cdfce9ceSjonathan #include <sys/reboot.h>
60cdfce9ceSjonathan #include <sys/systm.h>
61cdfce9ceSjonathan #include <sys/proc.h>
62cdfce9ceSjonathan #include <sys/pool.h>
63cdfce9ceSjonathan #include <sys/kthread.h>
647bc6d90cSthorpej #include <sys/once.h>
6503d1230dSchristos #include <sys/sysctl.h>
6646ed8f7dSad #include <sys/intr.h>
672dd4f4d9Spgoyette #include <sys/errno.h>
682dd4f4d9Spgoyette #include <sys/module.h>
69024c4368Sknakahara #include <sys/xcall.h>
70f2a05a61Sknakahara #include <sys/device.h>
71b6082029Sknakahara #include <sys/cpu.h>
72100d0481Sknakahara #include <sys/percpu.h>
73b6082029Sknakahara #include <sys/kmem.h>
74cdfce9ceSjonathan
752dd4f4d9Spgoyette #if defined(_KERNEL_OPT)
76e5bd2a12Stls #include "opt_ocf.h"
772dd4f4d9Spgoyette #endif
782dd4f4d9Spgoyette
7946ed8f7dSad #include <opencrypto/cryptodev.h>
80cdfce9ceSjonathan #include <opencrypto/xform.h> /* XXX for M_XDATA */
81cdfce9ceSjonathan
82cdfce9ceSjonathan /*
83cdfce9ceSjonathan * Crypto drivers register themselves by allocating a slot in the
84cdfce9ceSjonathan * crypto_drivers table with crypto_get_driverid() and then registering
85cdfce9ceSjonathan * each algorithm they support with crypto_register() and crypto_kregister().
86cdfce9ceSjonathan */
87a3f2d30dSknakahara /* Don't directly access crypto_drivers[i], use crypto_checkdriver(i). */
88cd14ce8cSknakahara static struct {
89cd14ce8cSknakahara kmutex_t mtx;
90cd14ce8cSknakahara int num;
91cd14ce8cSknakahara struct cryptocap *list;
92cd14ce8cSknakahara } crypto_drv __cacheline_aligned;
93cd14ce8cSknakahara #define crypto_drv_mtx (crypto_drv.mtx)
94cd14ce8cSknakahara #define crypto_drivers_num (crypto_drv.num)
95cd14ce8cSknakahara #define crypto_drivers (crypto_drv.list)
96cd14ce8cSknakahara
9782f82d0bSknakahara static void *crypto_q_si;
98024c4368Sknakahara static void *crypto_ret_si;
99024c4368Sknakahara
100cdfce9ceSjonathan /*
101cdfce9ceSjonathan * There are two queues for crypto requests; one for symmetric (e.g.
102cdfce9ceSjonathan * cipher) operations and one for asymmetric (e.g. MOD) operations.
103cdfce9ceSjonathan * See below for how synchronization is handled.
104cdfce9ceSjonathan */
105100d0481Sknakahara TAILQ_HEAD(crypto_crp_q, cryptop);
106100d0481Sknakahara TAILQ_HEAD(crypto_crp_kq, cryptkop);
107100d0481Sknakahara struct crypto_crp_qs {
108d33bbbcbSknakahara struct crypto_crp_q *crp_q;
109d33bbbcbSknakahara struct crypto_crp_kq *crp_kq;
110100d0481Sknakahara };
111100d0481Sknakahara static percpu_t *crypto_crp_qs_percpu;
112100d0481Sknakahara
113100d0481Sknakahara static inline struct crypto_crp_qs *
crypto_get_crp_qs(int * s)114100d0481Sknakahara crypto_get_crp_qs(int *s)
115100d0481Sknakahara {
116100d0481Sknakahara
117100d0481Sknakahara KASSERT(s != NULL);
118100d0481Sknakahara
119100d0481Sknakahara *s = splsoftnet();
120100d0481Sknakahara return percpu_getref(crypto_crp_qs_percpu);
121100d0481Sknakahara }
122100d0481Sknakahara
123100d0481Sknakahara static inline void
crypto_put_crp_qs(int * s)124100d0481Sknakahara crypto_put_crp_qs(int *s)
125100d0481Sknakahara {
126100d0481Sknakahara
127100d0481Sknakahara KASSERT(s != NULL);
128100d0481Sknakahara
129100d0481Sknakahara percpu_putref(crypto_crp_qs_percpu);
130100d0481Sknakahara splx(*s);
131100d0481Sknakahara }
132100d0481Sknakahara
133100d0481Sknakahara static void
crypto_crp_q_is_busy_pc(void * p,void * arg,struct cpu_info * ci __unused)134100d0481Sknakahara crypto_crp_q_is_busy_pc(void *p, void *arg, struct cpu_info *ci __unused)
135100d0481Sknakahara {
136100d0481Sknakahara struct crypto_crp_qs *qs_pc = p;
137100d0481Sknakahara bool *isempty = arg;
138100d0481Sknakahara
139d33bbbcbSknakahara if (!TAILQ_EMPTY(qs_pc->crp_q) || !TAILQ_EMPTY(qs_pc->crp_kq))
140100d0481Sknakahara *isempty = true;
141100d0481Sknakahara }
142100d0481Sknakahara
143100d0481Sknakahara static void
crypto_crp_qs_init_pc(void * p,void * arg __unused,struct cpu_info * ci __unused)144100d0481Sknakahara crypto_crp_qs_init_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
145100d0481Sknakahara {
146100d0481Sknakahara struct crypto_crp_qs *qs = p;
147100d0481Sknakahara
148d33bbbcbSknakahara qs->crp_q = kmem_alloc(sizeof(struct crypto_crp_q), KM_SLEEP);
149d33bbbcbSknakahara qs->crp_kq = kmem_alloc(sizeof(struct crypto_crp_kq), KM_SLEEP);
150d33bbbcbSknakahara
151d33bbbcbSknakahara TAILQ_INIT(qs->crp_q);
152d33bbbcbSknakahara TAILQ_INIT(qs->crp_kq);
153100d0481Sknakahara }
154cdfce9ceSjonathan
155cdfce9ceSjonathan /*
156cdfce9ceSjonathan * There are two queues for processing completed crypto requests; one
157cdfce9ceSjonathan * for the symmetric and one for the asymmetric ops. We only need one
158cdfce9ceSjonathan * but have two to avoid type futzing (cryptop vs. cryptkop). See below
159cdfce9ceSjonathan * for how synchronization is handled.
160cdfce9ceSjonathan */
161b6082029Sknakahara TAILQ_HEAD(crypto_crp_ret_q, cryptop);
162b6082029Sknakahara TAILQ_HEAD(crypto_crp_ret_kq, cryptkop);
163b6082029Sknakahara struct crypto_crp_ret_qs {
164b6082029Sknakahara kmutex_t crp_ret_q_mtx;
165b6082029Sknakahara bool crp_ret_q_exit_flag;
166cdfce9ceSjonathan
167b6082029Sknakahara struct crypto_crp_ret_q crp_ret_q;
168b6082029Sknakahara int crp_ret_q_len;
169b6082029Sknakahara int crp_ret_q_maxlen; /* queue length limit. <=0 means unlimited. */
170b6082029Sknakahara int crp_ret_q_drops;
1716b65b884Sknakahara
172b6082029Sknakahara struct crypto_crp_ret_kq crp_ret_kq;
173b6082029Sknakahara int crp_ret_kq_len;
174b6082029Sknakahara int crp_ret_kq_maxlen; /* queue length limit. <=0 means unlimited. */
175b6082029Sknakahara int crp_ret_kq_drops;
176b6082029Sknakahara };
177b6082029Sknakahara struct crypto_crp_ret_qs **crypto_crp_ret_qs_list;
1786b65b884Sknakahara
1796b65b884Sknakahara
180b6082029Sknakahara static inline struct crypto_crp_ret_qs *
crypto_get_crp_ret_qs(struct cpu_info * ci)181b6082029Sknakahara crypto_get_crp_ret_qs(struct cpu_info *ci)
182b6082029Sknakahara {
183b6082029Sknakahara u_int cpuid;
184b6082029Sknakahara struct crypto_crp_ret_qs *qs;
1856b65b884Sknakahara
186b6082029Sknakahara KASSERT(ci != NULL);
1876b65b884Sknakahara
188b6082029Sknakahara cpuid = cpu_index(ci);
189b6082029Sknakahara qs = crypto_crp_ret_qs_list[cpuid];
190b6082029Sknakahara mutex_enter(&qs->crp_ret_q_mtx);
191b6082029Sknakahara return qs;
192b6082029Sknakahara }
19348886aafSknakahara
194b6082029Sknakahara static inline void
crypto_put_crp_ret_qs(struct cpu_info * ci)195b6082029Sknakahara crypto_put_crp_ret_qs(struct cpu_info *ci)
196b6082029Sknakahara {
197b6082029Sknakahara u_int cpuid;
198b6082029Sknakahara struct crypto_crp_ret_qs *qs;
19948886aafSknakahara
200b6082029Sknakahara KASSERT(ci != NULL);
2016b65b884Sknakahara
202b6082029Sknakahara cpuid = cpu_index(ci);
203b6082029Sknakahara qs = crypto_crp_ret_qs_list[cpuid];
204b6082029Sknakahara mutex_exit(&qs->crp_ret_q_mtx);
205b6082029Sknakahara }
2066b65b884Sknakahara
207c7023dc1Sknakahara #ifndef CRYPTO_RET_Q_MAXLEN
208c7023dc1Sknakahara #define CRYPTO_RET_Q_MAXLEN 0
209c7023dc1Sknakahara #endif
210c7023dc1Sknakahara #ifndef CRYPTO_RET_KQ_MAXLEN
211c7023dc1Sknakahara #define CRYPTO_RET_KQ_MAXLEN 0
212c7023dc1Sknakahara #endif
2136b65b884Sknakahara
2146b65b884Sknakahara static int
sysctl_opencrypto_q_len(SYSCTLFN_ARGS)2156b65b884Sknakahara sysctl_opencrypto_q_len(SYSCTLFN_ARGS)
2166b65b884Sknakahara {
217b6082029Sknakahara int error, len = 0;
218b6082029Sknakahara struct sysctlnode node = *rnode;
2196b65b884Sknakahara
220b6082029Sknakahara for (int i = 0; i < ncpu; i++) {
221b6082029Sknakahara struct crypto_crp_ret_qs *qs;
222b6082029Sknakahara struct cpu_info *ci = cpu_lookup(i);
223b6082029Sknakahara
224b6082029Sknakahara qs = crypto_get_crp_ret_qs(ci);
225b6082029Sknakahara len += qs->crp_ret_q_len;
226b6082029Sknakahara crypto_put_crp_ret_qs(ci);
227b6082029Sknakahara }
228b6082029Sknakahara
229b6082029Sknakahara node.sysctl_data = &len;
230b6082029Sknakahara error = sysctl_lookup(SYSCTLFN_CALL(&node));
2316b65b884Sknakahara if (error || newp == NULL)
2326b65b884Sknakahara return error;
2336b65b884Sknakahara
2346b65b884Sknakahara return 0;
2356b65b884Sknakahara }
2366b65b884Sknakahara
2376b65b884Sknakahara static int
sysctl_opencrypto_q_drops(SYSCTLFN_ARGS)2386b65b884Sknakahara sysctl_opencrypto_q_drops(SYSCTLFN_ARGS)
2396b65b884Sknakahara {
240b6082029Sknakahara int error, drops = 0;
241b6082029Sknakahara struct sysctlnode node = *rnode;
2426b65b884Sknakahara
243b6082029Sknakahara for (int i = 0; i < ncpu; i++) {
244b6082029Sknakahara struct crypto_crp_ret_qs *qs;
245b6082029Sknakahara struct cpu_info *ci = cpu_lookup(i);
246b6082029Sknakahara
247b6082029Sknakahara qs = crypto_get_crp_ret_qs(ci);
248b6082029Sknakahara drops += qs->crp_ret_q_drops;
249b6082029Sknakahara crypto_put_crp_ret_qs(ci);
250b6082029Sknakahara }
251b6082029Sknakahara
252b6082029Sknakahara node.sysctl_data = &drops;
253b6082029Sknakahara error = sysctl_lookup(SYSCTLFN_CALL(&node));
2546b65b884Sknakahara if (error || newp == NULL)
2556b65b884Sknakahara return error;
2566b65b884Sknakahara
2576b65b884Sknakahara return 0;
2586b65b884Sknakahara }
2596b65b884Sknakahara
2606b65b884Sknakahara static int
sysctl_opencrypto_q_maxlen(SYSCTLFN_ARGS)2616b65b884Sknakahara sysctl_opencrypto_q_maxlen(SYSCTLFN_ARGS)
2626b65b884Sknakahara {
263b6082029Sknakahara int error, maxlen;
264b6082029Sknakahara struct crypto_crp_ret_qs *qs;
265b6082029Sknakahara struct sysctlnode node = *rnode;
2666b65b884Sknakahara
267b6082029Sknakahara /* each crp_ret_kq_maxlen is the same. */
268b6082029Sknakahara qs = crypto_get_crp_ret_qs(curcpu());
269b6082029Sknakahara maxlen = qs->crp_ret_q_maxlen;
270b6082029Sknakahara crypto_put_crp_ret_qs(curcpu());
271b6082029Sknakahara
272b6082029Sknakahara node.sysctl_data = &maxlen;
273b6082029Sknakahara error = sysctl_lookup(SYSCTLFN_CALL(&node));
2746b65b884Sknakahara if (error || newp == NULL)
2756b65b884Sknakahara return error;
2766b65b884Sknakahara
277b6082029Sknakahara for (int i = 0; i < ncpu; i++) {
278b6082029Sknakahara struct cpu_info *ci = cpu_lookup(i);
279b6082029Sknakahara
280b6082029Sknakahara qs = crypto_get_crp_ret_qs(ci);
281b6082029Sknakahara qs->crp_ret_q_maxlen = maxlen;
282b6082029Sknakahara crypto_put_crp_ret_qs(ci);
283b6082029Sknakahara }
284b6082029Sknakahara
285b6082029Sknakahara return 0;
286b6082029Sknakahara }
287b6082029Sknakahara
288b6082029Sknakahara static int
sysctl_opencrypto_kq_len(SYSCTLFN_ARGS)289b6082029Sknakahara sysctl_opencrypto_kq_len(SYSCTLFN_ARGS)
290b6082029Sknakahara {
291b6082029Sknakahara int error, len = 0;
292b6082029Sknakahara struct sysctlnode node = *rnode;
293b6082029Sknakahara
294b6082029Sknakahara for (int i = 0; i < ncpu; i++) {
295b6082029Sknakahara struct crypto_crp_ret_qs *qs;
296b6082029Sknakahara struct cpu_info *ci = cpu_lookup(i);
297b6082029Sknakahara
298b6082029Sknakahara qs = crypto_get_crp_ret_qs(ci);
299b6082029Sknakahara len += qs->crp_ret_kq_len;
300b6082029Sknakahara crypto_put_crp_ret_qs(ci);
301b6082029Sknakahara }
302b6082029Sknakahara
303b6082029Sknakahara node.sysctl_data = &len;
304b6082029Sknakahara error = sysctl_lookup(SYSCTLFN_CALL(&node));
305b6082029Sknakahara if (error || newp == NULL)
306b6082029Sknakahara return error;
307b6082029Sknakahara
308b6082029Sknakahara return 0;
309b6082029Sknakahara }
310b6082029Sknakahara
311b6082029Sknakahara static int
sysctl_opencrypto_kq_drops(SYSCTLFN_ARGS)312b6082029Sknakahara sysctl_opencrypto_kq_drops(SYSCTLFN_ARGS)
313b6082029Sknakahara {
314b6082029Sknakahara int error, drops = 0;
315b6082029Sknakahara struct sysctlnode node = *rnode;
316b6082029Sknakahara
317b6082029Sknakahara for (int i = 0; i < ncpu; i++) {
318b6082029Sknakahara struct crypto_crp_ret_qs *qs;
319b6082029Sknakahara struct cpu_info *ci = cpu_lookup(i);
320b6082029Sknakahara
321b6082029Sknakahara qs = crypto_get_crp_ret_qs(ci);
322b6082029Sknakahara drops += qs->crp_ret_kq_drops;
323b6082029Sknakahara crypto_put_crp_ret_qs(ci);
324b6082029Sknakahara }
325b6082029Sknakahara
326b6082029Sknakahara node.sysctl_data = &drops;
327b6082029Sknakahara error = sysctl_lookup(SYSCTLFN_CALL(&node));
328b6082029Sknakahara if (error || newp == NULL)
329b6082029Sknakahara return error;
330b6082029Sknakahara
331b6082029Sknakahara return 0;
332b6082029Sknakahara }
333b6082029Sknakahara
334b6082029Sknakahara static int
sysctl_opencrypto_kq_maxlen(SYSCTLFN_ARGS)335b6082029Sknakahara sysctl_opencrypto_kq_maxlen(SYSCTLFN_ARGS)
336b6082029Sknakahara {
337b6082029Sknakahara int error, maxlen;
338b6082029Sknakahara struct crypto_crp_ret_qs *qs;
339b6082029Sknakahara struct sysctlnode node = *rnode;
340b6082029Sknakahara
341b6082029Sknakahara /* each crp_ret_kq_maxlen is the same. */
342b6082029Sknakahara qs = crypto_get_crp_ret_qs(curcpu());
343b6082029Sknakahara maxlen = qs->crp_ret_kq_maxlen;
344b6082029Sknakahara crypto_put_crp_ret_qs(curcpu());
345b6082029Sknakahara
346b6082029Sknakahara node.sysctl_data = &maxlen;
347b6082029Sknakahara error = sysctl_lookup(SYSCTLFN_CALL(&node));
348b6082029Sknakahara if (error || newp == NULL)
349b6082029Sknakahara return error;
350b6082029Sknakahara
351b6082029Sknakahara for (int i = 0; i < ncpu; i++) {
352b6082029Sknakahara struct cpu_info *ci = cpu_lookup(i);
353b6082029Sknakahara
354b6082029Sknakahara qs = crypto_get_crp_ret_qs(ci);
355b6082029Sknakahara qs->crp_ret_kq_maxlen = maxlen;
356b6082029Sknakahara crypto_put_crp_ret_qs(ci);
357b6082029Sknakahara }
358b6082029Sknakahara
3596b65b884Sknakahara return 0;
3606b65b884Sknakahara }
3616b65b884Sknakahara
362cdfce9ceSjonathan /*
3638a37362bSknakahara * Crypto op and descriptor data structures are allocated
364cdfce9ceSjonathan * from separate private zones(FreeBSD)/pools(netBSD/OpenBSD) .
365cdfce9ceSjonathan */
366aeb50172Sknakahara static pool_cache_t cryptop_cache;
367aeb50172Sknakahara static pool_cache_t cryptodesc_cache;
368aeb50172Sknakahara static pool_cache_t cryptkop_cache;
369cdfce9ceSjonathan
370cdfce9ceSjonathan int crypto_usercrypto = 1; /* userland may open /dev/crypto */
371cdfce9ceSjonathan int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */
37222b38bb1Sjonathan /*
37322b38bb1Sjonathan * cryptodevallowsoft is (intended to be) sysctl'able, controlling
37422b38bb1Sjonathan * access to hardware versus software transforms as below:
37522b38bb1Sjonathan *
37622b38bb1Sjonathan * crypto_devallowsoft < 0: Force userlevel requests to use software
37722b38bb1Sjonathan * transforms, always
37822b38bb1Sjonathan * crypto_devallowsoft = 0: Use hardware if present, grant userlevel
37922b38bb1Sjonathan * requests for non-accelerated transforms
38022b38bb1Sjonathan * (handling the latter in software)
38122b38bb1Sjonathan * crypto_devallowsoft > 0: Allow user requests only for transforms which
38222b38bb1Sjonathan * are hardware-accelerated.
38322b38bb1Sjonathan */
384a9d00c08Sjonathan int crypto_devallowsoft = 1; /* only use hardware crypto */
38522b38bb1Sjonathan
3861352bf26Spgoyette static void
sysctl_opencrypto_setup(struct sysctllog ** clog)3871352bf26Spgoyette sysctl_opencrypto_setup(struct sysctllog **clog)
38803d1230dSchristos {
3896b65b884Sknakahara const struct sysctlnode *ocnode;
3906b65b884Sknakahara const struct sysctlnode *retqnode, *retkqnode;
3914f6fb3bfSpooka
39203d1230dSchristos sysctl_createv(clog, 0, NULL, NULL,
39303d1230dSchristos CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
39403d1230dSchristos CTLTYPE_INT, "usercrypto",
39503d1230dSchristos SYSCTL_DESCR("Enable/disable user-mode access to "
39603d1230dSchristos "crypto support"),
39703d1230dSchristos NULL, 0, &crypto_usercrypto, 0,
39803d1230dSchristos CTL_KERN, CTL_CREATE, CTL_EOL);
39903d1230dSchristos sysctl_createv(clog, 0, NULL, NULL,
40003d1230dSchristos CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
40103d1230dSchristos CTLTYPE_INT, "userasymcrypto",
40203d1230dSchristos SYSCTL_DESCR("Enable/disable user-mode access to "
40303d1230dSchristos "asymmetric crypto support"),
40403d1230dSchristos NULL, 0, &crypto_userasymcrypto, 0,
40503d1230dSchristos CTL_KERN, CTL_CREATE, CTL_EOL);
40603d1230dSchristos sysctl_createv(clog, 0, NULL, NULL,
40703d1230dSchristos CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
40803d1230dSchristos CTLTYPE_INT, "cryptodevallowsoft",
40903d1230dSchristos SYSCTL_DESCR("Enable/disable use of software "
41003d1230dSchristos "asymmetric crypto support"),
41103d1230dSchristos NULL, 0, &crypto_devallowsoft, 0,
41203d1230dSchristos CTL_KERN, CTL_CREATE, CTL_EOL);
4136b65b884Sknakahara
4146b65b884Sknakahara sysctl_createv(clog, 0, NULL, &ocnode,
4156b65b884Sknakahara CTLFLAG_PERMANENT,
4166b65b884Sknakahara CTLTYPE_NODE, "opencrypto",
4176b65b884Sknakahara SYSCTL_DESCR("opencrypto related entries"),
4186b65b884Sknakahara NULL, 0, NULL, 0,
4196b65b884Sknakahara CTL_CREATE, CTL_EOL);
4206b65b884Sknakahara
4216b65b884Sknakahara sysctl_createv(clog, 0, &ocnode, &retqnode,
4226b65b884Sknakahara CTLFLAG_PERMANENT,
4236b65b884Sknakahara CTLTYPE_NODE, "crypto_ret_q",
4246b65b884Sknakahara SYSCTL_DESCR("crypto_ret_q related entries"),
4256b65b884Sknakahara NULL, 0, NULL, 0,
4266b65b884Sknakahara CTL_CREATE, CTL_EOL);
4276b65b884Sknakahara sysctl_createv(clog, 0, &retqnode, NULL,
4286b65b884Sknakahara CTLFLAG_PERMANENT|CTLFLAG_READONLY,
4296b65b884Sknakahara CTLTYPE_INT, "len",
4306b65b884Sknakahara SYSCTL_DESCR("Current queue length"),
4316b65b884Sknakahara sysctl_opencrypto_q_len, 0,
432b6082029Sknakahara NULL, 0,
4336b65b884Sknakahara CTL_CREATE, CTL_EOL);
4346b65b884Sknakahara sysctl_createv(clog, 0, &retqnode, NULL,
4356b65b884Sknakahara CTLFLAG_PERMANENT|CTLFLAG_READONLY,
4366b65b884Sknakahara CTLTYPE_INT, "drops",
4376b65b884Sknakahara SYSCTL_DESCR("Crypto requests dropped due to full ret queue"),
4386b65b884Sknakahara sysctl_opencrypto_q_drops, 0,
439b6082029Sknakahara NULL, 0,
4406b65b884Sknakahara CTL_CREATE, CTL_EOL);
4416b65b884Sknakahara sysctl_createv(clog, 0, &retqnode, NULL,
4426b65b884Sknakahara CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4436b65b884Sknakahara CTLTYPE_INT, "maxlen",
4446b65b884Sknakahara SYSCTL_DESCR("Maximum allowed queue length"),
4456b65b884Sknakahara sysctl_opencrypto_q_maxlen, 0,
446b6082029Sknakahara NULL, 0,
4476b65b884Sknakahara CTL_CREATE, CTL_EOL);
4486b65b884Sknakahara
449b6082029Sknakahara
4506b65b884Sknakahara sysctl_createv(clog, 0, &ocnode, &retkqnode,
4516b65b884Sknakahara CTLFLAG_PERMANENT,
4526b65b884Sknakahara CTLTYPE_NODE, "crypto_ret_kq",
4536b65b884Sknakahara SYSCTL_DESCR("crypto_ret_kq related entries"),
4546b65b884Sknakahara NULL, 0, NULL, 0,
4556b65b884Sknakahara CTL_CREATE, CTL_EOL);
4566b65b884Sknakahara sysctl_createv(clog, 0, &retkqnode, NULL,
4576b65b884Sknakahara CTLFLAG_PERMANENT|CTLFLAG_READONLY,
4586b65b884Sknakahara CTLTYPE_INT, "len",
4596b65b884Sknakahara SYSCTL_DESCR("Current queue length"),
460b6082029Sknakahara sysctl_opencrypto_kq_len, 0,
461b6082029Sknakahara NULL, 0,
4626b65b884Sknakahara CTL_CREATE, CTL_EOL);
4636b65b884Sknakahara sysctl_createv(clog, 0, &retkqnode, NULL,
4646b65b884Sknakahara CTLFLAG_PERMANENT|CTLFLAG_READONLY,
4656b65b884Sknakahara CTLTYPE_INT, "drops",
4666b65b884Sknakahara SYSCTL_DESCR("Crypto requests dropped due to full ret queue"),
467b6082029Sknakahara sysctl_opencrypto_kq_drops, 0,
468b6082029Sknakahara NULL, 0,
4696b65b884Sknakahara CTL_CREATE, CTL_EOL);
4706b65b884Sknakahara sysctl_createv(clog, 0, &retkqnode, NULL,
4716b65b884Sknakahara CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4726b65b884Sknakahara CTLTYPE_INT, "maxlen",
4736b65b884Sknakahara SYSCTL_DESCR("Maximum allowed queue length"),
474b6082029Sknakahara sysctl_opencrypto_kq_maxlen, 0,
475b6082029Sknakahara NULL, 0,
4766b65b884Sknakahara CTL_CREATE, CTL_EOL);
47703d1230dSchristos }
478cdfce9ceSjonathan
479cdfce9ceSjonathan /*
480cdfce9ceSjonathan * Synchronization: read carefully, this is non-trivial.
481cdfce9ceSjonathan *
482cdfce9ceSjonathan * Crypto requests are submitted via crypto_dispatch. Typically
483cdfce9ceSjonathan * these come in from network protocols at spl0 (output path) or
484cdfce9ceSjonathan * spl[,soft]net (input path).
485cdfce9ceSjonathan *
486cdfce9ceSjonathan * Requests are typically passed on the driver directly, but they
487cdfce9ceSjonathan * may also be queued for processing by a software interrupt thread,
488cdfce9ceSjonathan * cryptointr, that runs at splsoftcrypto. This thread dispatches
489cdfce9ceSjonathan * the requests to crypto drivers (h/w or s/w) who call crypto_done
490cdfce9ceSjonathan * when a request is complete. Hardware crypto drivers are assumed
491cdfce9ceSjonathan * to register their IRQ's as network devices so their interrupt handlers
492cdfce9ceSjonathan * and subsequent "done callbacks" happen at spl[imp,net].
493cdfce9ceSjonathan *
494cdfce9ceSjonathan * Completed crypto ops are queued for a separate kernel thread that
495cdfce9ceSjonathan * handles the callbacks at spl0. This decoupling insures the crypto
496cdfce9ceSjonathan * driver interrupt service routine is not delayed while the callback
497cdfce9ceSjonathan * takes place and that callbacks are delivered after a context switch
498cdfce9ceSjonathan * (as opposed to a software interrupt that clients must block).
499cdfce9ceSjonathan *
500cdfce9ceSjonathan * This scheme is not intended for SMP machines.
501cdfce9ceSjonathan */
50282f82d0bSknakahara static void cryptointr(void *); /* swi thread to dispatch ops */
503024c4368Sknakahara static void cryptoret_softint(void *); /* kernel thread for callbacks*/
504aa36e9a1Spgoyette static int crypto_destroy(bool);
505cdfce9ceSjonathan static int crypto_invoke(struct cryptop *crp, int hint);
506cdfce9ceSjonathan static int crypto_kinvoke(struct cryptkop *krp, int hint);
507cdfce9ceSjonathan
508bd79c5acSknakahara static struct cryptocap *crypto_checkdriver_lock(u_int32_t);
50954c9a772Sknakahara static struct cryptocap *crypto_checkdriver_uninit(u_int32_t);
510da7de9a4Schristos static struct cryptocap *crypto_checkdriver(u_int32_t);
511bd79c5acSknakahara static void crypto_driver_lock(struct cryptocap *);
512bd79c5acSknakahara static void crypto_driver_unlock(struct cryptocap *);
513bd79c5acSknakahara static void crypto_driver_clear(struct cryptocap *);
514a3f2d30dSknakahara
515f2a05a61Sknakahara static int crypto_init_finalize(device_t);
516f2a05a61Sknakahara
517cdfce9ceSjonathan static struct cryptostats cryptostats;
518e5bd2a12Stls #ifdef CRYPTO_TIMING
519cdfce9ceSjonathan static int crypto_timing = 0;
520e5bd2a12Stls #endif
521cdfce9ceSjonathan
5221352bf26Spgoyette static struct sysctllog *sysctl_opencrypto_clog;
5231352bf26Spgoyette
52402991323Schs static void
crypto_crp_ret_qs_init(void)525b6082029Sknakahara crypto_crp_ret_qs_init(void)
526b6082029Sknakahara {
52702991323Schs int i;
528b6082029Sknakahara
529b6082029Sknakahara crypto_crp_ret_qs_list = kmem_alloc(sizeof(struct crypto_crp_ret_qs *) * ncpu,
53002991323Schs KM_SLEEP);
531b6082029Sknakahara
532b6082029Sknakahara for (i = 0; i < ncpu; i++) {
533b6082029Sknakahara struct crypto_crp_ret_qs *qs;
534b6082029Sknakahara
53502991323Schs qs = kmem_alloc(sizeof(struct crypto_crp_ret_qs), KM_SLEEP);
536b6082029Sknakahara mutex_init(&qs->crp_ret_q_mtx, MUTEX_DEFAULT, IPL_NET);
537b6082029Sknakahara qs->crp_ret_q_exit_flag = false;
538b6082029Sknakahara
539b6082029Sknakahara TAILQ_INIT(&qs->crp_ret_q);
540b6082029Sknakahara qs->crp_ret_q_len = 0;
541b6082029Sknakahara qs->crp_ret_q_maxlen = CRYPTO_RET_Q_MAXLEN;
542b6082029Sknakahara qs->crp_ret_q_drops = 0;
543b6082029Sknakahara
544b6082029Sknakahara TAILQ_INIT(&qs->crp_ret_kq);
545b6082029Sknakahara qs->crp_ret_kq_len = 0;
546b6082029Sknakahara qs->crp_ret_kq_maxlen = CRYPTO_RET_KQ_MAXLEN;
547b6082029Sknakahara qs->crp_ret_kq_drops = 0;
548b6082029Sknakahara
549b6082029Sknakahara crypto_crp_ret_qs_list[i] = qs;
550b6082029Sknakahara }
551b6082029Sknakahara }
552b6082029Sknakahara
553b6082029Sknakahara static int
crypto_init0(void)5547bc6d90cSthorpej crypto_init0(void)
555cdfce9ceSjonathan {
556cdfce9ceSjonathan
557396579fbSknakahara mutex_init(&crypto_drv_mtx, MUTEX_DEFAULT, IPL_NONE);
558aeb50172Sknakahara cryptop_cache = pool_cache_init(sizeof(struct cryptop),
559aeb50172Sknakahara coherency_unit, 0, 0, "cryptop", NULL, IPL_NET, NULL, NULL, NULL);
560aeb50172Sknakahara cryptodesc_cache = pool_cache_init(sizeof(struct cryptodesc),
561aeb50172Sknakahara coherency_unit, 0, 0, "cryptdesc", NULL, IPL_NET, NULL, NULL, NULL);
562aeb50172Sknakahara cryptkop_cache = pool_cache_init(sizeof(struct cryptkop),
563aeb50172Sknakahara coherency_unit, 0, 0, "cryptkop", NULL, IPL_NET, NULL, NULL, NULL);
564cdfce9ceSjonathan
5653d6eb800Sriastradh crypto_crp_qs_percpu = percpu_create(sizeof(struct crypto_crp_qs),
5663d6eb800Sriastradh crypto_crp_qs_init_pc, /*XXX*/NULL, NULL);
567100d0481Sknakahara
56802991323Schs crypto_crp_ret_qs_init();
569b6082029Sknakahara
570a70fedd6Sknakahara crypto_drivers = kmem_zalloc(CRYPTO_DRIVERS_INITIAL *
57102991323Schs sizeof(struct cryptocap), KM_SLEEP);
5727bc6d90cSthorpej crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
573cdfce9ceSjonathan
57482f82d0bSknakahara crypto_q_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE, cryptointr, NULL);
57582f82d0bSknakahara if (crypto_q_si == NULL) {
576024c4368Sknakahara printf("crypto_init: cannot establish request queue handler\n");
577024c4368Sknakahara return crypto_destroy(false);
578024c4368Sknakahara }
579024c4368Sknakahara
580f2a05a61Sknakahara /*
58121f43ca3Sknakahara * Some encryption devices (such as mvcesa) are attached before
58221f43ca3Sknakahara * ipi_sysinit(). That causes an assertion in ipi_register() as
583f2a05a61Sknakahara * crypto_ret_si softint uses SOFTINT_RCPU.
584f2a05a61Sknakahara */
585f2a05a61Sknakahara if (config_finalize_register(NULL, crypto_init_finalize) != 0) {
586f2a05a61Sknakahara printf("crypto_init: cannot register crypto_init_finalize\n");
587aa36e9a1Spgoyette return crypto_destroy(false);
588cdfce9ceSjonathan }
58988ab7da9Sad
5901352bf26Spgoyette sysctl_opencrypto_setup(&sysctl_opencrypto_clog);
5911352bf26Spgoyette
592dae53410Syamt return 0;
5937bc6d90cSthorpej }
5947bc6d90cSthorpej
595f2a05a61Sknakahara static int
crypto_init_finalize(device_t self __unused)596f2a05a61Sknakahara crypto_init_finalize(device_t self __unused)
597f2a05a61Sknakahara {
598f2a05a61Sknakahara
599f2a05a61Sknakahara crypto_ret_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE|SOFTINT_RCPU,
600f2a05a61Sknakahara &cryptoret_softint, NULL);
601f2a05a61Sknakahara KASSERT(crypto_ret_si != NULL);
602f2a05a61Sknakahara
603f2a05a61Sknakahara return 0;
604f2a05a61Sknakahara }
605f2a05a61Sknakahara
606aa36e9a1Spgoyette int
crypto_init(void)6077bc6d90cSthorpej crypto_init(void)
6087bc6d90cSthorpej {
6099a3687b7Sdaniel static ONCE_DECL(crypto_init_once);
6107bc6d90cSthorpej
611aa36e9a1Spgoyette return RUN_ONCE(&crypto_init_once, crypto_init0);
612cdfce9ceSjonathan }
613cdfce9ceSjonathan
614aa36e9a1Spgoyette static int
crypto_destroy(bool exit_kthread)615aa36e9a1Spgoyette crypto_destroy(bool exit_kthread)
616cdfce9ceSjonathan {
617aa36e9a1Spgoyette int i;
618aa36e9a1Spgoyette
619aa36e9a1Spgoyette if (exit_kthread) {
620a3f2d30dSknakahara struct cryptocap *cap = NULL;
621100d0481Sknakahara bool is_busy = false;
622a3f2d30dSknakahara
623aa36e9a1Spgoyette /* if we have any in-progress requests, don't unload */
624100d0481Sknakahara percpu_foreach(crypto_crp_qs_percpu, crypto_crp_q_is_busy_pc,
625100d0481Sknakahara &is_busy);
626100d0481Sknakahara if (is_busy)
627aa36e9a1Spgoyette return EBUSY;
628bd79c5acSknakahara /* FIXME:
629bd79c5acSknakahara * prohibit enqueue to crp_q and crp_kq after here.
630bd79c5acSknakahara */
631aa36e9a1Spgoyette
632bd79c5acSknakahara mutex_enter(&crypto_drv_mtx);
633a3f2d30dSknakahara for (i = 0; i < crypto_drivers_num; i++) {
634da7de9a4Schristos cap = crypto_checkdriver(i);
635a3f2d30dSknakahara if (cap == NULL)
636a3f2d30dSknakahara continue;
637bd79c5acSknakahara if (cap->cc_sessions != 0) {
638bd79c5acSknakahara mutex_exit(&crypto_drv_mtx);
639aa36e9a1Spgoyette return EBUSY;
64026c3616fSknakahara }
641bd79c5acSknakahara }
642bd79c5acSknakahara mutex_exit(&crypto_drv_mtx);
643bd79c5acSknakahara /* FIXME:
644bd79c5acSknakahara * prohibit touch crypto_drivers[] and each element after here.
645bd79c5acSknakahara */
646aa36e9a1Spgoyette
64726cde978Sriastradh /* Ensure cryptoret_softint() is never scheduled again. */
648b6082029Sknakahara for (i = 0; i < ncpu; i++) {
649b6082029Sknakahara struct crypto_crp_ret_qs *qs;
650b6082029Sknakahara struct cpu_info *ci = cpu_lookup(i);
651b6082029Sknakahara
652b6082029Sknakahara qs = crypto_get_crp_ret_qs(ci);
653b6082029Sknakahara qs->crp_ret_q_exit_flag = true;
654b6082029Sknakahara crypto_put_crp_ret_qs(ci);
655b6082029Sknakahara }
656aa36e9a1Spgoyette }
657aa36e9a1Spgoyette
6581352bf26Spgoyette if (sysctl_opencrypto_clog != NULL)
6591352bf26Spgoyette sysctl_teardown(&sysctl_opencrypto_clog);
6601352bf26Spgoyette
661024c4368Sknakahara if (crypto_ret_si != NULL)
662024c4368Sknakahara softint_disestablish(crypto_ret_si);
663024c4368Sknakahara
66482f82d0bSknakahara if (crypto_q_si != NULL)
66582f82d0bSknakahara softint_disestablish(crypto_q_si);
666aa36e9a1Spgoyette
667396579fbSknakahara mutex_enter(&crypto_drv_mtx);
668cdfce9ceSjonathan if (crypto_drivers != NULL)
669a70fedd6Sknakahara kmem_free(crypto_drivers,
670a70fedd6Sknakahara crypto_drivers_num * sizeof(struct cryptocap));
671396579fbSknakahara mutex_exit(&crypto_drv_mtx);
672aa36e9a1Spgoyette
673100d0481Sknakahara percpu_free(crypto_crp_qs_percpu, sizeof(struct crypto_crp_qs));
674100d0481Sknakahara
675aeb50172Sknakahara pool_cache_destroy(cryptop_cache);
676aeb50172Sknakahara pool_cache_destroy(cryptodesc_cache);
677aeb50172Sknakahara pool_cache_destroy(cryptkop_cache);
678aa36e9a1Spgoyette
679396579fbSknakahara mutex_destroy(&crypto_drv_mtx);
680aa36e9a1Spgoyette
681aa36e9a1Spgoyette return 0;
682cdfce9ceSjonathan }
683cdfce9ceSjonathan
684ad7b969cSknakahara static bool
crypto_driver_suitable(struct cryptocap * cap,struct cryptoini * cri)685ad7b969cSknakahara crypto_driver_suitable(struct cryptocap *cap, struct cryptoini *cri)
686cdfce9ceSjonathan {
687cdfce9ceSjonathan struct cryptoini *cr;
688cdfce9ceSjonathan
689ad7b969cSknakahara for (cr = cri; cr; cr = cr->cri_next)
690ad7b969cSknakahara if (cap->cc_alg[cr->cri_alg] == 0) {
691ad7b969cSknakahara DPRINTF("alg %d not supported\n", cr->cri_alg);
692ad7b969cSknakahara return false;
693ad7b969cSknakahara }
694ad7b969cSknakahara
695ad7b969cSknakahara return true;
696ad7b969cSknakahara }
697cdfce9ceSjonathan
698a15414e5Sknakahara #define CRYPTO_ACCEPT_HARDWARE 0x1
699a15414e5Sknakahara #define CRYPTO_ACCEPT_SOFTWARE 0x2
700cdfce9ceSjonathan /*
701cdfce9ceSjonathan * The algorithm we use here is pretty stupid; just use the
702cdfce9ceSjonathan * first driver that supports all the algorithms we need.
703a15414e5Sknakahara * If there are multiple drivers we choose the driver with
704a15414e5Sknakahara * the fewest active sessions. We prefer hardware-backed
705a15414e5Sknakahara * drivers to software ones.
706cdfce9ceSjonathan *
707cdfce9ceSjonathan * XXX We need more smarts here (in real life too, but that's
708cdfce9ceSjonathan * XXX another story altogether).
709cdfce9ceSjonathan */
710ad7b969cSknakahara static struct cryptocap *
crypto_select_driver_lock(struct cryptoini * cri,int hard)711ad7b969cSknakahara crypto_select_driver_lock(struct cryptoini *cri, int hard)
712ad7b969cSknakahara {
713ad7b969cSknakahara u_int32_t hid;
714a15414e5Sknakahara int accept;
715a15414e5Sknakahara struct cryptocap *cap, *best;
716075ea189Schristos int error = 0;
717cdfce9ceSjonathan
718a15414e5Sknakahara best = NULL;
719a15414e5Sknakahara /*
720a15414e5Sknakahara * hard == 0 can use both hardware and software drivers.
721a15414e5Sknakahara * We use hardware drivers prior to software drivers, so search
722a15414e5Sknakahara * hardware drivers at first time.
723a15414e5Sknakahara */
724a15414e5Sknakahara if (hard >= 0)
725a15414e5Sknakahara accept = CRYPTO_ACCEPT_HARDWARE;
726a15414e5Sknakahara else
727a15414e5Sknakahara accept = CRYPTO_ACCEPT_SOFTWARE;
728a15414e5Sknakahara again:
729cdfce9ceSjonathan for (hid = 0; hid < crypto_drivers_num; hid++) {
730a15414e5Sknakahara cap = crypto_checkdriver(hid);
731a3f2d30dSknakahara if (cap == NULL)
732a3f2d30dSknakahara continue;
733a3f2d30dSknakahara
734bd79c5acSknakahara crypto_driver_lock(cap);
735bd79c5acSknakahara
736cdfce9ceSjonathan /*
737cdfce9ceSjonathan * If it's not initialized or has remaining sessions
738cdfce9ceSjonathan * referencing it, skip.
739cdfce9ceSjonathan */
740a3f2d30dSknakahara if (cap->cc_newsession == NULL ||
741bd79c5acSknakahara (cap->cc_flags & CRYPTOCAP_F_CLEANUP)) {
742bd79c5acSknakahara crypto_driver_unlock(cap);
743cdfce9ceSjonathan continue;
744bd79c5acSknakahara }
745cdfce9ceSjonathan
746cdfce9ceSjonathan /* Hardware required -- ignore software drivers. */
747a15414e5Sknakahara if ((accept & CRYPTO_ACCEPT_SOFTWARE) == 0
748a15414e5Sknakahara && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE)) {
749bd79c5acSknakahara crypto_driver_unlock(cap);
750cdfce9ceSjonathan continue;
751bd79c5acSknakahara }
752cdfce9ceSjonathan /* Software required -- ignore hardware drivers. */
753a15414e5Sknakahara if ((accept & CRYPTO_ACCEPT_HARDWARE) == 0
754a15414e5Sknakahara && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE) == 0) {
755bd79c5acSknakahara crypto_driver_unlock(cap);
756cdfce9ceSjonathan continue;
757bd79c5acSknakahara }
758cdfce9ceSjonathan
759cdfce9ceSjonathan /* See if all the algorithms are supported. */
760ad7b969cSknakahara if (crypto_driver_suitable(cap, cri)) {
761a15414e5Sknakahara if (best == NULL) {
762ad7b969cSknakahara /* keep holding crypto_driver_lock(cap) */
763a15414e5Sknakahara best = cap;
764a15414e5Sknakahara continue;
765a15414e5Sknakahara } else if (cap->cc_sessions < best->cc_sessions) {
766a15414e5Sknakahara crypto_driver_unlock(best);
767a15414e5Sknakahara /* keep holding crypto_driver_lock(cap) */
768a15414e5Sknakahara best = cap;
769a15414e5Sknakahara continue;
770a15414e5Sknakahara }
77136ea3668Sdarran }
772cdfce9ceSjonathan
773ad7b969cSknakahara crypto_driver_unlock(cap);
774ad7b969cSknakahara }
775a15414e5Sknakahara if (best == NULL && hard == 0
776a15414e5Sknakahara && (accept & CRYPTO_ACCEPT_SOFTWARE) == 0) {
777a15414e5Sknakahara accept = CRYPTO_ACCEPT_SOFTWARE;
778a15414e5Sknakahara goto again;
779a15414e5Sknakahara }
780cdfce9ceSjonathan
781075ea189Schristos if (best == NULL && hard == 0 && error == 0) {
782803a9f2aSchristos mutex_exit(&crypto_drv_mtx);
783075ea189Schristos error = module_autoload("swcrypto", MODULE_CLASS_DRIVER);
784803a9f2aSchristos mutex_enter(&crypto_drv_mtx);
785075ea189Schristos if (error == 0) {
786075ea189Schristos error = EINVAL;
787075ea189Schristos goto again;
788075ea189Schristos }
789075ea189Schristos }
790075ea189Schristos
791a15414e5Sknakahara return best;
792ad7b969cSknakahara }
793ad7b969cSknakahara
794ad7b969cSknakahara /*
795ad7b969cSknakahara * Create a new session.
796ad7b969cSknakahara */
797ad7b969cSknakahara int
crypto_newsession(u_int64_t * sid,struct cryptoini * cri,int hard)798ad7b969cSknakahara crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard)
799ad7b969cSknakahara {
800ad7b969cSknakahara struct cryptocap *cap;
801ad7b969cSknakahara int err = EINVAL;
802ad7b969cSknakahara
803479de1f7Sriastradh /*
804479de1f7Sriastradh * On failure, leave *sid initialized to a sentinel value that
805479de1f7Sriastradh * crypto_freesession will ignore. This is the same as what
806479de1f7Sriastradh * you get from zero-initialized memory -- some callers (I'm
807479de1f7Sriastradh * looking at you, netipsec!) have paths that lead from
808479de1f7Sriastradh * zero-initialized memory into crypto_freesession without any
809479de1f7Sriastradh * crypto_newsession.
810479de1f7Sriastradh */
811479de1f7Sriastradh *sid = 0;
812479de1f7Sriastradh
813ad7b969cSknakahara mutex_enter(&crypto_drv_mtx);
814ad7b969cSknakahara
815ad7b969cSknakahara cap = crypto_select_driver_lock(cri, hard);
816ad7b969cSknakahara if (cap != NULL) {
817ad7b969cSknakahara u_int32_t hid, lid;
818ad7b969cSknakahara
819ad7b969cSknakahara hid = cap - crypto_drivers;
820479de1f7Sriastradh KASSERT(hid < 0xffffff);
821cdfce9ceSjonathan /*
822cdfce9ceSjonathan * Can't do everything in one session.
823cdfce9ceSjonathan *
824cdfce9ceSjonathan * XXX Fix this. We need to inject a "virtual" session layer right
825cdfce9ceSjonathan * XXX about here.
826cdfce9ceSjonathan */
827cdfce9ceSjonathan
828cdfce9ceSjonathan /* Call the driver initialization routine. */
829cdfce9ceSjonathan lid = hid; /* Pass the driver ID. */
830ed8aa5b4Sknakahara crypto_driver_unlock(cap);
831a3f2d30dSknakahara err = cap->cc_newsession(cap->cc_arg, &lid, cri);
832ed8aa5b4Sknakahara crypto_driver_lock(cap);
833cdfce9ceSjonathan if (err == 0) {
834479de1f7Sriastradh (*sid) = hid + 1;
835cdfce9ceSjonathan (*sid) <<= 32;
836cdfce9ceSjonathan (*sid) |= (lid & 0xffffffff);
837479de1f7Sriastradh KASSERT(*sid != 0);
838479de1f7Sriastradh cap->cc_sessions++;
839e8a45236Sknakahara } else {
840c4e549c7Sknakahara DPRINTF("crypto_drivers[%d].cc_newsession() failed. error=%d\n",
841c4e549c7Sknakahara hid, err);
842cdfce9ceSjonathan }
843bd79c5acSknakahara crypto_driver_unlock(cap);
844cdfce9ceSjonathan }
845bd79c5acSknakahara
846396579fbSknakahara mutex_exit(&crypto_drv_mtx);
847ad7b969cSknakahara
848cdfce9ceSjonathan return err;
849cdfce9ceSjonathan }
850cdfce9ceSjonathan
851cdfce9ceSjonathan /*
852cdfce9ceSjonathan * Delete an existing session (or a reserved session on an unregistered
853396579fbSknakahara * driver).
854cdfce9ceSjonathan */
855a1f5e1f2Sriastradh void
crypto_freesession(u_int64_t sid)856cdfce9ceSjonathan crypto_freesession(u_int64_t sid)
857cdfce9ceSjonathan {
858a3f2d30dSknakahara struct cryptocap *cap;
859cdfce9ceSjonathan
860479de1f7Sriastradh /*
861479de1f7Sriastradh * crypto_newsession never returns 0 as a sid (by virtue of
862479de1f7Sriastradh * never returning 0 as a hid, which is part of the sid).
863479de1f7Sriastradh * However, some callers assume that freeing zero is safe.
864479de1f7Sriastradh * Previously this relied on all drivers to agree that freeing
865479de1f7Sriastradh * invalid sids is a no-op, but that's a terrible API contract
866479de1f7Sriastradh * that we're getting rid of.
867479de1f7Sriastradh */
868479de1f7Sriastradh if (sid == 0)
869479de1f7Sriastradh return;
870479de1f7Sriastradh
871cdfce9ceSjonathan /* Determine two IDs. */
872bd79c5acSknakahara cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(sid));
87341507b6aSriastradh KASSERTMSG(cap != NULL, "sid=%"PRIx64, sid);
874cdfce9ceSjonathan
87541507b6aSriastradh KASSERT(cap->cc_sessions > 0);
87641507b6aSriastradh cap->cc_sessions--;
877cdfce9ceSjonathan
878cdfce9ceSjonathan /* Call the driver cleanup routine, if available. */
879a3f2d30dSknakahara if (cap->cc_freesession)
880ee55792fSriastradh cap->cc_freesession(cap->cc_arg, sid);
881cdfce9ceSjonathan
882cdfce9ceSjonathan /*
883cdfce9ceSjonathan * If this was the last session of a driver marked as invalid,
884cdfce9ceSjonathan * make the entry available for reuse.
885cdfce9ceSjonathan */
886a3f2d30dSknakahara if ((cap->cc_flags & CRYPTOCAP_F_CLEANUP) && cap->cc_sessions == 0)
887bd79c5acSknakahara crypto_driver_clear(cap);
888cdfce9ceSjonathan
889bd79c5acSknakahara crypto_driver_unlock(cap);
890cdfce9ceSjonathan }
891cdfce9ceSjonathan
892da7de9a4Schristos static bool
crypto_checkdriver_initialized(const struct cryptocap * cap)893da7de9a4Schristos crypto_checkdriver_initialized(const struct cryptocap *cap)
894da7de9a4Schristos {
895da7de9a4Schristos
896da7de9a4Schristos return cap->cc_process != NULL ||
897da7de9a4Schristos (cap->cc_flags & CRYPTOCAP_F_CLEANUP) != 0 ||
898da7de9a4Schristos cap->cc_sessions != 0;
899da7de9a4Schristos }
900da7de9a4Schristos
901cdfce9ceSjonathan /*
902cdfce9ceSjonathan * Return an unused driver id. Used by drivers prior to registering
903cdfce9ceSjonathan * support for the algorithms they handle.
904cdfce9ceSjonathan */
905cdfce9ceSjonathan int32_t
crypto_get_driverid(u_int32_t flags)906cdfce9ceSjonathan crypto_get_driverid(u_int32_t flags)
907cdfce9ceSjonathan {
908cdfce9ceSjonathan struct cryptocap *newdrv;
909a3f2d30dSknakahara struct cryptocap *cap = NULL;
910e5bd2a12Stls int i;
911cdfce9ceSjonathan
912aa36e9a1Spgoyette (void)crypto_init(); /* XXX oh, this is foul! */
9137bc6d90cSthorpej
914396579fbSknakahara mutex_enter(&crypto_drv_mtx);
915a3f2d30dSknakahara for (i = 0; i < crypto_drivers_num; i++) {
91654c9a772Sknakahara cap = crypto_checkdriver_uninit(i);
917da7de9a4Schristos if (cap == NULL || crypto_checkdriver_initialized(cap))
918a3f2d30dSknakahara continue;
919cdfce9ceSjonathan break;
920a3f2d30dSknakahara }
921cdfce9ceSjonathan
922cdfce9ceSjonathan /* Out of entries, allocate some more. */
923a3f2d30dSknakahara if (cap == NULL) {
924cdfce9ceSjonathan /* Be careful about wrap-around. */
925cdfce9ceSjonathan if (2 * crypto_drivers_num <= crypto_drivers_num) {
926396579fbSknakahara mutex_exit(&crypto_drv_mtx);
927cdfce9ceSjonathan printf("crypto: driver count wraparound!\n");
928cdfce9ceSjonathan return -1;
929cdfce9ceSjonathan }
930cdfce9ceSjonathan
931a70fedd6Sknakahara newdrv = kmem_zalloc(2 * crypto_drivers_num *
93202991323Schs sizeof(struct cryptocap), KM_SLEEP);
933e2cb8590Scegger memcpy(newdrv, crypto_drivers,
934cdfce9ceSjonathan crypto_drivers_num * sizeof(struct cryptocap));
935a70fedd6Sknakahara kmem_free(crypto_drivers,
936a70fedd6Sknakahara crypto_drivers_num * sizeof(struct cryptocap));
937cdfce9ceSjonathan
938cdfce9ceSjonathan crypto_drivers_num *= 2;
939cdfce9ceSjonathan crypto_drivers = newdrv;
940a3f2d30dSknakahara
94154c9a772Sknakahara cap = crypto_checkdriver_uninit(i);
942a3f2d30dSknakahara KASSERT(cap != NULL);
943cdfce9ceSjonathan }
944cdfce9ceSjonathan
945cdfce9ceSjonathan /* NB: state is zero'd on free */
946a3f2d30dSknakahara cap->cc_sessions = 1; /* Mark */
947a3f2d30dSknakahara cap->cc_flags = flags;
948bd79c5acSknakahara mutex_init(&cap->cc_lock, MUTEX_DEFAULT, IPL_NET);
949cdfce9ceSjonathan
950cdfce9ceSjonathan if (bootverbose)
951cdfce9ceSjonathan printf("crypto: assign driver %u, flags %u\n", i, flags);
952cdfce9ceSjonathan
953396579fbSknakahara mutex_exit(&crypto_drv_mtx);
954cdfce9ceSjonathan
955cdfce9ceSjonathan return i;
956cdfce9ceSjonathan }
957cdfce9ceSjonathan
958cdfce9ceSjonathan static struct cryptocap *
crypto_checkdriver_lock(u_int32_t hid)959bd79c5acSknakahara crypto_checkdriver_lock(u_int32_t hid)
960cdfce9ceSjonathan {
961bd79c5acSknakahara struct cryptocap *cap;
96254c9a772Sknakahara
96354c9a772Sknakahara KASSERT(crypto_drivers != NULL);
96454c9a772Sknakahara
965bd79c5acSknakahara if (hid >= crypto_drivers_num)
966bd79c5acSknakahara return NULL;
967bd79c5acSknakahara
968bd79c5acSknakahara cap = &crypto_drivers[hid];
969bd79c5acSknakahara mutex_enter(&cap->cc_lock);
970bd79c5acSknakahara return cap;
97154c9a772Sknakahara }
97254c9a772Sknakahara
97354c9a772Sknakahara /*
97454c9a772Sknakahara * Use crypto_checkdriver_uninit() instead of crypto_checkdriver() below two
97554c9a772Sknakahara * situations
97654c9a772Sknakahara * - crypto_drivers[] may not be allocated
97754c9a772Sknakahara * - crypto_drivers[hid] may not be initialized
97854c9a772Sknakahara */
97954c9a772Sknakahara static struct cryptocap *
crypto_checkdriver_uninit(u_int32_t hid)98054c9a772Sknakahara crypto_checkdriver_uninit(u_int32_t hid)
98154c9a772Sknakahara {
98254c9a772Sknakahara
983bd79c5acSknakahara KASSERT(mutex_owned(&crypto_drv_mtx));
984bd79c5acSknakahara
985cdfce9ceSjonathan if (crypto_drivers == NULL)
986cdfce9ceSjonathan return NULL;
98754c9a772Sknakahara
988cdfce9ceSjonathan return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
989cdfce9ceSjonathan }
990cdfce9ceSjonathan
991da7de9a4Schristos /*
992da7de9a4Schristos * Use crypto_checkdriver_uninit() instead of crypto_checkdriver() below two
993da7de9a4Schristos * situations
994da7de9a4Schristos * - crypto_drivers[] may not be allocated
995da7de9a4Schristos * - crypto_drivers[hid] may not be initialized
996da7de9a4Schristos */
997da7de9a4Schristos static struct cryptocap *
crypto_checkdriver(u_int32_t hid)998da7de9a4Schristos crypto_checkdriver(u_int32_t hid)
999da7de9a4Schristos {
1000da7de9a4Schristos
1001da7de9a4Schristos KASSERT(mutex_owned(&crypto_drv_mtx));
1002da7de9a4Schristos
1003da7de9a4Schristos if (crypto_drivers == NULL || hid >= crypto_drivers_num)
1004da7de9a4Schristos return NULL;
1005da7de9a4Schristos
1006da7de9a4Schristos struct cryptocap *cap = &crypto_drivers[hid];
1007da7de9a4Schristos return crypto_checkdriver_initialized(cap) ? cap : NULL;
1008da7de9a4Schristos }
1009da7de9a4Schristos
1010bd79c5acSknakahara static inline void
crypto_driver_lock(struct cryptocap * cap)1011bd79c5acSknakahara crypto_driver_lock(struct cryptocap *cap)
1012bd79c5acSknakahara {
1013bd79c5acSknakahara
1014bd79c5acSknakahara KASSERT(cap != NULL);
1015bd79c5acSknakahara
1016bd79c5acSknakahara mutex_enter(&cap->cc_lock);
1017bd79c5acSknakahara }
1018bd79c5acSknakahara
1019bd79c5acSknakahara static inline void
crypto_driver_unlock(struct cryptocap * cap)1020bd79c5acSknakahara crypto_driver_unlock(struct cryptocap *cap)
1021bd79c5acSknakahara {
1022bd79c5acSknakahara
1023bd79c5acSknakahara KASSERT(cap != NULL);
1024bd79c5acSknakahara
1025bd79c5acSknakahara mutex_exit(&cap->cc_lock);
1026bd79c5acSknakahara }
1027bd79c5acSknakahara
1028bd79c5acSknakahara static void
crypto_driver_clear(struct cryptocap * cap)1029bd79c5acSknakahara crypto_driver_clear(struct cryptocap *cap)
1030bd79c5acSknakahara {
1031bd79c5acSknakahara
1032bd79c5acSknakahara if (cap == NULL)
1033bd79c5acSknakahara return;
1034bd79c5acSknakahara
1035bd79c5acSknakahara KASSERT(mutex_owned(&cap->cc_lock));
1036bd79c5acSknakahara
1037bd79c5acSknakahara cap->cc_sessions = 0;
1038bd79c5acSknakahara memset(&cap->cc_max_op_len, 0, sizeof(cap->cc_max_op_len));
1039bd79c5acSknakahara memset(&cap->cc_alg, 0, sizeof(cap->cc_alg));
1040bd79c5acSknakahara memset(&cap->cc_kalg, 0, sizeof(cap->cc_kalg));
1041bd79c5acSknakahara cap->cc_flags = 0;
1042bd79c5acSknakahara cap->cc_qblocked = 0;
1043bd79c5acSknakahara cap->cc_kqblocked = 0;
1044bd79c5acSknakahara
1045bd79c5acSknakahara cap->cc_arg = NULL;
1046bd79c5acSknakahara cap->cc_newsession = NULL;
1047bd79c5acSknakahara cap->cc_process = NULL;
1048bd79c5acSknakahara cap->cc_freesession = NULL;
1049bd79c5acSknakahara cap->cc_kprocess = NULL;
1050bd79c5acSknakahara }
1051bd79c5acSknakahara
1052cdfce9ceSjonathan /*
1053cdfce9ceSjonathan * Register support for a key-related algorithm. This routine
1054cdfce9ceSjonathan * is called once for each algorithm supported a driver.
1055cdfce9ceSjonathan */
1056cdfce9ceSjonathan int
crypto_kregister(u_int32_t driverid,int kalg,u_int32_t flags,int (* kprocess)(void *,struct cryptkop *,int),void * karg)1057cdfce9ceSjonathan crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags,
1058cdfce9ceSjonathan int (*kprocess)(void *, struct cryptkop *, int),
1059cdfce9ceSjonathan void *karg)
1060cdfce9ceSjonathan {
1061cdfce9ceSjonathan struct cryptocap *cap;
1062cdfce9ceSjonathan int err;
1063cdfce9ceSjonathan
1064396579fbSknakahara mutex_enter(&crypto_drv_mtx);
1065cdfce9ceSjonathan
1066bd79c5acSknakahara cap = crypto_checkdriver_lock(driverid);
1067cdfce9ceSjonathan if (cap != NULL &&
106853f067a3Sandvar (CRK_ALGORITHM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
1069cdfce9ceSjonathan /*
1070cdfce9ceSjonathan * XXX Do some performance testing to determine placing.
1071cdfce9ceSjonathan * XXX We probably need an auxiliary data structure that
1072cdfce9ceSjonathan * XXX describes relative performances.
1073cdfce9ceSjonathan */
1074cdfce9ceSjonathan
1075cdfce9ceSjonathan cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
1076e5bd2a12Stls if (bootverbose) {
1077e5bd2a12Stls printf("crypto: driver %u registers key alg %u "
1078e5bd2a12Stls " flags %u\n",
1079e5bd2a12Stls driverid,
1080e5bd2a12Stls kalg,
1081e5bd2a12Stls flags
1082cdfce9ceSjonathan );
1083e5bd2a12Stls }
1084cdfce9ceSjonathan
1085cdfce9ceSjonathan if (cap->cc_kprocess == NULL) {
1086cdfce9ceSjonathan cap->cc_karg = karg;
1087cdfce9ceSjonathan cap->cc_kprocess = kprocess;
1088cdfce9ceSjonathan }
1089cdfce9ceSjonathan err = 0;
1090cdfce9ceSjonathan } else
1091cdfce9ceSjonathan err = EINVAL;
1092cdfce9ceSjonathan
1093396579fbSknakahara mutex_exit(&crypto_drv_mtx);
1094cdfce9ceSjonathan return err;
1095cdfce9ceSjonathan }
1096cdfce9ceSjonathan
1097cdfce9ceSjonathan /*
1098cdfce9ceSjonathan * Register support for a non-key-related algorithm. This routine
1099cdfce9ceSjonathan * is called once for each such algorithm supported by a driver.
1100cdfce9ceSjonathan */
1101cdfce9ceSjonathan int
crypto_register(u_int32_t driverid,int alg,u_int16_t maxoplen,u_int32_t flags,int (* newses)(void *,u_int32_t *,struct cryptoini *),void (* freeses)(void *,u_int64_t),int (* process)(void *,struct cryptop *,int),void * arg)1102cdfce9ceSjonathan crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
1103cdfce9ceSjonathan u_int32_t flags,
1104cdfce9ceSjonathan int (*newses)(void *, u_int32_t*, struct cryptoini*),
1105ee55792fSriastradh void (*freeses)(void *, u_int64_t),
1106cdfce9ceSjonathan int (*process)(void *, struct cryptop *, int),
1107cdfce9ceSjonathan void *arg)
1108cdfce9ceSjonathan {
1109cdfce9ceSjonathan struct cryptocap *cap;
1110e5bd2a12Stls int err;
1111cdfce9ceSjonathan
1112bd79c5acSknakahara cap = crypto_checkdriver_lock(driverid);
1113bd79c5acSknakahara if (cap == NULL)
1114bd79c5acSknakahara return EINVAL;
1115cdfce9ceSjonathan
1116cdfce9ceSjonathan /* NB: algorithms are in the range [1..max] */
1117bd79c5acSknakahara if (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) {
1118cdfce9ceSjonathan /*
1119cdfce9ceSjonathan * XXX Do some performance testing to determine placing.
1120cdfce9ceSjonathan * XXX We probably need an auxiliary data structure that
1121cdfce9ceSjonathan * XXX describes relative performances.
1122cdfce9ceSjonathan */
1123cdfce9ceSjonathan
1124cdfce9ceSjonathan cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
1125cdfce9ceSjonathan cap->cc_max_op_len[alg] = maxoplen;
1126e5bd2a12Stls if (bootverbose) {
1127e5bd2a12Stls printf("crypto: driver %u registers alg %u "
1128e5bd2a12Stls "flags %u maxoplen %u\n",
1129e5bd2a12Stls driverid,
1130e5bd2a12Stls alg,
1131e5bd2a12Stls flags,
1132e5bd2a12Stls maxoplen
1133cdfce9ceSjonathan );
1134e5bd2a12Stls }
1135cdfce9ceSjonathan
1136cdfce9ceSjonathan if (cap->cc_process == NULL) {
1137cdfce9ceSjonathan cap->cc_arg = arg;
1138cdfce9ceSjonathan cap->cc_newsession = newses;
1139cdfce9ceSjonathan cap->cc_process = process;
1140cdfce9ceSjonathan cap->cc_freesession = freeses;
1141cdfce9ceSjonathan cap->cc_sessions = 0; /* Unmark */
1142cdfce9ceSjonathan }
1143cdfce9ceSjonathan err = 0;
1144cdfce9ceSjonathan } else
1145cdfce9ceSjonathan err = EINVAL;
1146cdfce9ceSjonathan
1147bd79c5acSknakahara crypto_driver_unlock(cap);
1148bd79c5acSknakahara
1149cdfce9ceSjonathan return err;
1150cdfce9ceSjonathan }
1151cdfce9ceSjonathan
1152f7fc02afSknakahara static int
crypto_unregister_locked(struct cryptocap * cap,int alg,bool all)1153bd79c5acSknakahara crypto_unregister_locked(struct cryptocap *cap, int alg, bool all)
1154cdfce9ceSjonathan {
1155f7fc02afSknakahara int i;
1156cdfce9ceSjonathan u_int32_t ses;
1157f7fc02afSknakahara bool lastalg = true;
1158cdfce9ceSjonathan
1159bd79c5acSknakahara KASSERT(cap != NULL);
1160bd79c5acSknakahara KASSERT(mutex_owned(&cap->cc_lock));
1161f7fc02afSknakahara
1162a54dc18fSknakahara if (alg < CRYPTO_ALGORITHM_MIN || CRYPTO_ALGORITHM_MAX < alg)
1163f7fc02afSknakahara return EINVAL;
1164cdfce9ceSjonathan
1165bd79c5acSknakahara if (!all && cap->cc_alg[alg] == 0)
1166f7fc02afSknakahara return EINVAL;
1167f7fc02afSknakahara
1168cdfce9ceSjonathan cap->cc_alg[alg] = 0;
1169cdfce9ceSjonathan cap->cc_max_op_len[alg] = 0;
1170cdfce9ceSjonathan
1171408cf952Sknakahara if (all) {
1172408cf952Sknakahara if (alg != CRYPTO_ALGORITHM_MAX)
1173408cf952Sknakahara lastalg = false;
1174408cf952Sknakahara } else {
1175cdfce9ceSjonathan /* Was this the last algorithm ? */
11760ab4de07Sknakahara for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++)
1177f7fc02afSknakahara if (cap->cc_alg[i] != 0) {
1178f7fc02afSknakahara lastalg = false;
1179cdfce9ceSjonathan break;
1180f7fc02afSknakahara }
1181408cf952Sknakahara }
1182f7fc02afSknakahara if (lastalg) {
1183cdfce9ceSjonathan ses = cap->cc_sessions;
1184bd79c5acSknakahara crypto_driver_clear(cap);
1185cdfce9ceSjonathan if (ses != 0) {
1186cdfce9ceSjonathan /*
1187cdfce9ceSjonathan * If there are pending sessions, just mark as invalid.
1188cdfce9ceSjonathan */
1189cdfce9ceSjonathan cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
1190cdfce9ceSjonathan cap->cc_sessions = ses;
1191cdfce9ceSjonathan }
1192cdfce9ceSjonathan }
1193cdfce9ceSjonathan
1194f7fc02afSknakahara return 0;
1195f7fc02afSknakahara }
1196f7fc02afSknakahara
1197f7fc02afSknakahara /*
1198f7fc02afSknakahara * Unregister a crypto driver. If there are pending sessions using it,
1199f7fc02afSknakahara * leave enough information around so that subsequent calls using those
1200f7fc02afSknakahara * sessions will correctly detect the driver has been unregistered and
1201f7fc02afSknakahara * reroute requests.
1202f7fc02afSknakahara */
1203f7fc02afSknakahara int
crypto_unregister(u_int32_t driverid,int alg)1204f7fc02afSknakahara crypto_unregister(u_int32_t driverid, int alg)
1205f7fc02afSknakahara {
1206f7fc02afSknakahara int err;
1207bd79c5acSknakahara struct cryptocap *cap;
1208f7fc02afSknakahara
1209bd79c5acSknakahara cap = crypto_checkdriver_lock(driverid);
1210bd79c5acSknakahara err = crypto_unregister_locked(cap, alg, false);
1211bd79c5acSknakahara crypto_driver_unlock(cap);
1212f7fc02afSknakahara
1213cdfce9ceSjonathan return err;
1214cdfce9ceSjonathan }
1215cdfce9ceSjonathan
1216cdfce9ceSjonathan /*
1217cdfce9ceSjonathan * Unregister all algorithms associated with a crypto driver.
1218cdfce9ceSjonathan * If there are pending sessions using it, leave enough information
1219cdfce9ceSjonathan * around so that subsequent calls using those sessions will
1220cdfce9ceSjonathan * correctly detect the driver has been unregistered and reroute
1221cdfce9ceSjonathan * requests.
1222cdfce9ceSjonathan */
1223cdfce9ceSjonathan int
crypto_unregister_all(u_int32_t driverid)1224cdfce9ceSjonathan crypto_unregister_all(u_int32_t driverid)
1225cdfce9ceSjonathan {
1226408cf952Sknakahara int err, i;
1227bd79c5acSknakahara struct cryptocap *cap;
1228cdfce9ceSjonathan
1229bd79c5acSknakahara cap = crypto_checkdriver_lock(driverid);
1230cdfce9ceSjonathan for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) {
1231bd79c5acSknakahara err = crypto_unregister_locked(cap, i, true);
1232408cf952Sknakahara if (err)
1233408cf952Sknakahara break;
1234cdfce9ceSjonathan }
1235bd79c5acSknakahara crypto_driver_unlock(cap);
1236408cf952Sknakahara
1237cdfce9ceSjonathan return err;
1238cdfce9ceSjonathan }
1239cdfce9ceSjonathan
1240cdfce9ceSjonathan /*
1241cdfce9ceSjonathan * Clear blockage on a driver. The what parameter indicates whether
1242cdfce9ceSjonathan * the driver is now ready for cryptop's and/or cryptokop's.
1243cdfce9ceSjonathan */
1244cdfce9ceSjonathan int
crypto_unblock(u_int32_t driverid,int what)1245cdfce9ceSjonathan crypto_unblock(u_int32_t driverid, int what)
1246cdfce9ceSjonathan {
1247cdfce9ceSjonathan struct cryptocap *cap;
12483cc51cb9Sknakahara int needwakeup = 0;
1249cdfce9ceSjonathan
1250bd79c5acSknakahara cap = crypto_checkdriver_lock(driverid);
1251bd79c5acSknakahara if (cap == NULL)
12523cc51cb9Sknakahara return EINVAL;
12533cc51cb9Sknakahara
1254cdfce9ceSjonathan if (what & CRYPTO_SYMQ) {
1255cdfce9ceSjonathan needwakeup |= cap->cc_qblocked;
1256cdfce9ceSjonathan cap->cc_qblocked = 0;
1257cdfce9ceSjonathan }
1258cdfce9ceSjonathan if (what & CRYPTO_ASYMQ) {
1259cdfce9ceSjonathan needwakeup |= cap->cc_kqblocked;
1260cdfce9ceSjonathan cap->cc_kqblocked = 0;
1261cdfce9ceSjonathan }
1262bd79c5acSknakahara crypto_driver_unlock(cap);
126382f82d0bSknakahara if (needwakeup) {
126482f82d0bSknakahara kpreempt_disable();
126582f82d0bSknakahara softint_schedule(crypto_q_si);
126682f82d0bSknakahara kpreempt_enable();
126782f82d0bSknakahara }
1268cdfce9ceSjonathan
12693cc51cb9Sknakahara return 0;
1270cdfce9ceSjonathan }
1271cdfce9ceSjonathan
1272cdfce9ceSjonathan /*
1273cdfce9ceSjonathan * Dispatch a crypto request to a driver or queue
1274cdfce9ceSjonathan * it, to be processed by the kernel thread.
1275cdfce9ceSjonathan */
1276893f06d4Sriastradh void
crypto_dispatch(struct cryptop * crp)1277cdfce9ceSjonathan crypto_dispatch(struct cryptop *crp)
1278cdfce9ceSjonathan {
1279100d0481Sknakahara int result, s;
1280818d6afeSknakahara struct cryptocap *cap;
1281100d0481Sknakahara struct crypto_crp_qs *crp_qs;
1282100d0481Sknakahara struct crypto_crp_q *crp_q;
1283cdfce9ceSjonathan
1284108266a5Sknakahara KASSERT(crp != NULL);
1285faea2b2fSriastradh KASSERT(crp->crp_callback != NULL);
1286aafbfcd2Sriastradh KASSERT(crp->crp_desc != NULL);
1287aafbfcd2Sriastradh KASSERT(crp->crp_buf != NULL);
1288d31c8656Sriastradh KASSERT(!cpu_intr_p());
1289108266a5Sknakahara
1290c4e549c7Sknakahara DPRINTF("crp %p, alg %d\n", crp, crp->crp_desc->crd_alg);
1291cdfce9ceSjonathan
1292cdfce9ceSjonathan cryptostats.cs_ops++;
1293cdfce9ceSjonathan
1294cdfce9ceSjonathan #ifdef CRYPTO_TIMING
1295cdfce9ceSjonathan if (crypto_timing)
1296cdfce9ceSjonathan nanouptime(&crp->crp_tstamp);
1297cdfce9ceSjonathan #endif
1298818d6afeSknakahara
1299818d6afeSknakahara if ((crp->crp_flags & CRYPTO_F_BATCH) != 0) {
1300039014ebSknakahara int wasempty;
1301818d6afeSknakahara /*
1302818d6afeSknakahara * Caller marked the request as ``ok to delay'';
1303818d6afeSknakahara * queue it for the swi thread. This is desirable
1304818d6afeSknakahara * when the operation is low priority and/or suitable
1305818d6afeSknakahara * for batching.
1306239ae8baSknakahara *
1307239ae8baSknakahara * don't care list order in batch job.
1308818d6afeSknakahara */
1309100d0481Sknakahara crp_qs = crypto_get_crp_qs(&s);
1310d33bbbcbSknakahara crp_q = crp_qs->crp_q;
1311100d0481Sknakahara wasempty = TAILQ_EMPTY(crp_q);
1312100d0481Sknakahara TAILQ_INSERT_TAIL(crp_q, crp, crp_next);
1313100d0481Sknakahara crypto_put_crp_qs(&s);
1314100d0481Sknakahara crp_q = NULL;
131582f82d0bSknakahara if (wasempty) {
131682f82d0bSknakahara kpreempt_disable();
131782f82d0bSknakahara softint_schedule(crypto_q_si);
131882f82d0bSknakahara kpreempt_enable();
131982f82d0bSknakahara }
1320893f06d4Sriastradh return;
1321818d6afeSknakahara }
1322818d6afeSknakahara
1323100d0481Sknakahara crp_qs = crypto_get_crp_qs(&s);
1324d33bbbcbSknakahara crp_q = crp_qs->crp_q;
1325bd79c5acSknakahara cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(crp->crp_sid));
1326c42317cfSknakahara /*
1327c42317cfSknakahara * TODO:
1328c42317cfSknakahara * If we can ensure the driver has been valid until the driver is
1329c42317cfSknakahara * done crypto_unregister(), this migrate operation is not required.
1330c42317cfSknakahara */
1331c42317cfSknakahara if (cap == NULL) {
1332c42317cfSknakahara /*
1333c42317cfSknakahara * The driver must be detached, so this request will migrate
1334c42317cfSknakahara * to other drivers in cryptointr() later.
1335c42317cfSknakahara */
1336100d0481Sknakahara TAILQ_INSERT_TAIL(crp_q, crp, crp_next);
1337fb94c95eSknakahara goto out;
1338c42317cfSknakahara }
1339c42317cfSknakahara
1340e309e6bfSknakahara if (cap->cc_qblocked != 0) {
1341bd79c5acSknakahara crypto_driver_unlock(cap);
1342e309e6bfSknakahara /*
1343e309e6bfSknakahara * The driver is blocked, just queue the op until
1344e309e6bfSknakahara * it unblocks and the swi thread gets kicked.
1345e309e6bfSknakahara */
1346100d0481Sknakahara TAILQ_INSERT_TAIL(crp_q, crp, crp_next);
1347fb94c95eSknakahara goto out;
1348e309e6bfSknakahara }
1349e309e6bfSknakahara
1350e309e6bfSknakahara /*
1351cdfce9ceSjonathan * Caller marked the request to be processed
1352cdfce9ceSjonathan * immediately; dispatch it directly to the
1353cdfce9ceSjonathan * driver unless the driver is currently blocked.
1354cdfce9ceSjonathan */
1355bd79c5acSknakahara crypto_driver_unlock(cap);
1356cdfce9ceSjonathan result = crypto_invoke(crp, 0);
1357bcc6b1ebSriastradh KASSERTMSG(result == 0 || result == ERESTART, "result=%d", result);
1358cdfce9ceSjonathan if (result == ERESTART) {
1359cdfce9ceSjonathan /*
1360cdfce9ceSjonathan * The driver ran out of resources, mark the
1361cdfce9ceSjonathan * driver ``blocked'' for cryptop's and put
1362cdfce9ceSjonathan * the op on the queue.
1363cdfce9ceSjonathan */
1364bd79c5acSknakahara crypto_driver_lock(cap);
1365a3f2d30dSknakahara cap->cc_qblocked = 1;
1366bd79c5acSknakahara crypto_driver_unlock(cap);
1367100d0481Sknakahara TAILQ_INSERT_HEAD(crp_q, crp, crp_next);
1368cdfce9ceSjonathan cryptostats.cs_blocks++;
1369cdfce9ceSjonathan }
1370cdfce9ceSjonathan
1371fb94c95eSknakahara out:
1372100d0481Sknakahara crypto_put_crp_qs(&s);
1373cdfce9ceSjonathan }
1374cdfce9ceSjonathan
1375cdfce9ceSjonathan /*
1376d7fca1abSandvar * Add an asymmetric crypto request to a queue,
1377cdfce9ceSjonathan * to be processed by the kernel thread.
1378cdfce9ceSjonathan */
1379893f06d4Sriastradh void
crypto_kdispatch(struct cryptkop * krp)1380cdfce9ceSjonathan crypto_kdispatch(struct cryptkop *krp)
1381cdfce9ceSjonathan {
1382100d0481Sknakahara int result, s;
1383cdfce9ceSjonathan struct cryptocap *cap;
1384100d0481Sknakahara struct crypto_crp_qs *crp_qs;
1385100d0481Sknakahara struct crypto_crp_kq *crp_kq;
1386cdfce9ceSjonathan
1387108266a5Sknakahara KASSERT(krp != NULL);
1388faea2b2fSriastradh KASSERT(krp->krp_callback != NULL);
1389d31c8656Sriastradh KASSERT(!cpu_intr_p());
1390108266a5Sknakahara
1391cdfce9ceSjonathan cryptostats.cs_kops++;
1392cdfce9ceSjonathan
1393100d0481Sknakahara crp_qs = crypto_get_crp_qs(&s);
1394d33bbbcbSknakahara crp_kq = crp_qs->crp_kq;
1395bd79c5acSknakahara cap = crypto_checkdriver_lock(krp->krp_hid);
13963e6db4b4Sknakahara /*
13973e6db4b4Sknakahara * TODO:
13983e6db4b4Sknakahara * If we can ensure the driver has been valid until the driver is
13993e6db4b4Sknakahara * done crypto_unregister(), this migrate operation is not required.
14003e6db4b4Sknakahara */
14013e6db4b4Sknakahara if (cap == NULL) {
1402100d0481Sknakahara TAILQ_INSERT_TAIL(crp_kq, krp, krp_next);
1403fb94c95eSknakahara goto out;
14043e6db4b4Sknakahara }
14053e6db4b4Sknakahara
14063e6db4b4Sknakahara if (cap->cc_kqblocked != 0) {
1407bd79c5acSknakahara crypto_driver_unlock(cap);
14083e6db4b4Sknakahara /*
14093e6db4b4Sknakahara * The driver is blocked, just queue the op until
14103e6db4b4Sknakahara * it unblocks and the swi thread gets kicked.
14113e6db4b4Sknakahara */
1412100d0481Sknakahara TAILQ_INSERT_TAIL(crp_kq, krp, krp_next);
1413fb94c95eSknakahara goto out;
14143e6db4b4Sknakahara }
14153e6db4b4Sknakahara
1416bd79c5acSknakahara crypto_driver_unlock(cap);
1417cdfce9ceSjonathan result = crypto_kinvoke(krp, 0);
1418bcc6b1ebSriastradh KASSERTMSG(result == 0 || result == ERESTART, "result=%d", result);
1419cdfce9ceSjonathan if (result == ERESTART) {
1420cdfce9ceSjonathan /*
1421cdfce9ceSjonathan * The driver ran out of resources, mark the
1422cdfce9ceSjonathan * driver ``blocked'' for cryptop's and put
1423cdfce9ceSjonathan * the op on the queue.
1424cdfce9ceSjonathan */
1425bd79c5acSknakahara crypto_driver_lock(cap);
1426a3f2d30dSknakahara cap->cc_kqblocked = 1;
1427bd79c5acSknakahara crypto_driver_unlock(cap);
1428100d0481Sknakahara TAILQ_INSERT_HEAD(crp_kq, krp, krp_next);
1429cdfce9ceSjonathan cryptostats.cs_kblocks++;
1430cdfce9ceSjonathan }
1431cdfce9ceSjonathan
1432fb94c95eSknakahara out:
1433100d0481Sknakahara crypto_put_crp_qs(&s);
1434cdfce9ceSjonathan }
1435cdfce9ceSjonathan
1436cdfce9ceSjonathan /*
1437d7fca1abSandvar * Dispatch an asymmetric crypto request to the appropriate crypto devices.
1438cdfce9ceSjonathan */
1439cdfce9ceSjonathan static int
crypto_kinvoke(struct cryptkop * krp,int hint)1440cdfce9ceSjonathan crypto_kinvoke(struct cryptkop *krp, int hint)
1441cdfce9ceSjonathan {
1442a3f2d30dSknakahara struct cryptocap *cap = NULL;
1443cdfce9ceSjonathan u_int32_t hid;
1444cdfce9ceSjonathan int error;
1445cdfce9ceSjonathan
1446108266a5Sknakahara KASSERT(krp != NULL);
1447faea2b2fSriastradh KASSERT(krp->krp_callback != NULL);
1448d31c8656Sriastradh KASSERT(!cpu_intr_p());
1449108266a5Sknakahara
1450396579fbSknakahara mutex_enter(&crypto_drv_mtx);
1451cdfce9ceSjonathan for (hid = 0; hid < crypto_drivers_num; hid++) {
1452da7de9a4Schristos cap = crypto_checkdriver(hid);
1453a3f2d30dSknakahara if (cap == NULL)
1454a3f2d30dSknakahara continue;
1455bd79c5acSknakahara crypto_driver_lock(cap);
1456a3f2d30dSknakahara if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1457bd79c5acSknakahara crypto_devallowsoft == 0) {
1458bd79c5acSknakahara crypto_driver_unlock(cap);
1459cdfce9ceSjonathan continue;
1460bd79c5acSknakahara }
1461bd79c5acSknakahara if (cap->cc_kprocess == NULL) {
1462bd79c5acSknakahara crypto_driver_unlock(cap);
1463cdfce9ceSjonathan continue;
1464bd79c5acSknakahara }
1465a3f2d30dSknakahara if ((cap->cc_kalg[krp->krp_op] &
1466bd79c5acSknakahara CRYPTO_ALG_FLAG_SUPPORTED) == 0) {
1467bd79c5acSknakahara crypto_driver_unlock(cap);
1468cdfce9ceSjonathan continue;
1469bd79c5acSknakahara }
1470cdfce9ceSjonathan break;
1471cdfce9ceSjonathan }
1472bd79c5acSknakahara mutex_exit(&crypto_drv_mtx);
1473a3f2d30dSknakahara if (cap != NULL) {
14744e0632c1Schristos int (*process)(void *, struct cryptkop *, int);
14754e0632c1Schristos void *arg;
14764e0632c1Schristos
1477a3f2d30dSknakahara process = cap->cc_kprocess;
1478a3f2d30dSknakahara arg = cap->cc_karg;
1479cdfce9ceSjonathan krp->krp_hid = hid;
1480024c4368Sknakahara krp->reqcpu = curcpu();
1481bd79c5acSknakahara crypto_driver_unlock(cap);
14824e0632c1Schristos error = (*process)(arg, krp, hint);
1483bcc6b1ebSriastradh KASSERTMSG(error == 0 || error == ERESTART, "error=%d",
1484bcc6b1ebSriastradh error);
1485bcc6b1ebSriastradh return error;
1486cdfce9ceSjonathan } else {
1487bcc6b1ebSriastradh krp->krp_status = ENODEV;
1488*ff733a25Sriastradh krp->reqcpu = curcpu();
1489cdfce9ceSjonathan crypto_kdone(krp);
1490cdfce9ceSjonathan return 0;
1491cdfce9ceSjonathan }
1492bcc6b1ebSriastradh }
1493cdfce9ceSjonathan
1494cdfce9ceSjonathan #ifdef CRYPTO_TIMING
1495cdfce9ceSjonathan static void
crypto_tstat(struct cryptotstat * ts,struct timespec * tv)1496cdfce9ceSjonathan crypto_tstat(struct cryptotstat *ts, struct timespec *tv)
1497cdfce9ceSjonathan {
1498cdfce9ceSjonathan struct timespec now, t;
1499cdfce9ceSjonathan
1500cdfce9ceSjonathan nanouptime(&now);
1501cdfce9ceSjonathan t.tv_sec = now.tv_sec - tv->tv_sec;
1502cdfce9ceSjonathan t.tv_nsec = now.tv_nsec - tv->tv_nsec;
1503cdfce9ceSjonathan if (t.tv_nsec < 0) {
1504cdfce9ceSjonathan t.tv_sec--;
1505cdfce9ceSjonathan t.tv_nsec += 1000000000;
1506cdfce9ceSjonathan }
1507cdfce9ceSjonathan timespecadd(&ts->acc, &t, &t);
1508cdfce9ceSjonathan if (timespeccmp(&t, &ts->min, <))
1509cdfce9ceSjonathan ts->min = t;
1510cdfce9ceSjonathan if (timespeccmp(&t, &ts->max, >))
1511cdfce9ceSjonathan ts->max = t;
1512cdfce9ceSjonathan ts->count++;
1513cdfce9ceSjonathan
1514cdfce9ceSjonathan *tv = now;
1515cdfce9ceSjonathan }
1516cdfce9ceSjonathan #endif
1517cdfce9ceSjonathan
1518cdfce9ceSjonathan /*
1519cdfce9ceSjonathan * Dispatch a crypto request to the appropriate crypto devices.
1520cdfce9ceSjonathan */
1521cdfce9ceSjonathan static int
crypto_invoke(struct cryptop * crp,int hint)1522cdfce9ceSjonathan crypto_invoke(struct cryptop *crp, int hint)
1523cdfce9ceSjonathan {
1524a3f2d30dSknakahara struct cryptocap *cap;
1525bcc6b1ebSriastradh int error;
1526cdfce9ceSjonathan
1527108266a5Sknakahara KASSERT(crp != NULL);
1528faea2b2fSriastradh KASSERT(crp->crp_callback != NULL);
1529faea2b2fSriastradh KASSERT(crp->crp_desc != NULL);
1530d31c8656Sriastradh KASSERT(!cpu_intr_p());
1531108266a5Sknakahara
1532cdfce9ceSjonathan #ifdef CRYPTO_TIMING
1533cdfce9ceSjonathan if (crypto_timing)
1534cdfce9ceSjonathan crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
1535cdfce9ceSjonathan #endif
1536cdfce9ceSjonathan
1537bd79c5acSknakahara cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(crp->crp_sid));
1538a3f2d30dSknakahara if (cap != NULL && (cap->cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
15394e0632c1Schristos int (*process)(void *, struct cryptop *, int);
15404e0632c1Schristos void *arg;
15414e0632c1Schristos
1542a3f2d30dSknakahara process = cap->cc_process;
1543a3f2d30dSknakahara arg = cap->cc_arg;
1544024c4368Sknakahara crp->reqcpu = curcpu();
1545cdfce9ceSjonathan
15464e0632c1Schristos /*
15474e0632c1Schristos * Invoke the driver to process the request.
15484e0632c1Schristos */
1549c4e549c7Sknakahara DPRINTF("calling process for %p\n", crp);
1550bd79c5acSknakahara crypto_driver_unlock(cap);
1551bcc6b1ebSriastradh error = (*process)(arg, crp, hint);
1552bcc6b1ebSriastradh KASSERTMSG(error == 0 || error == ERESTART, "error=%d",
1553bcc6b1ebSriastradh error);
1554bcc6b1ebSriastradh return error;
15554e0632c1Schristos } else {
15563bbdee24Sriastradh if (cap != NULL) {
1557bd79c5acSknakahara crypto_driver_unlock(cap);
15586168db0fSknakahara crypto_freesession(crp->crp_sid);
15593bbdee24Sriastradh }
15603bbdee24Sriastradh crp->crp_etype = ENODEV;
1561cdfce9ceSjonathan crypto_done(crp);
1562cdfce9ceSjonathan return 0;
1563cdfce9ceSjonathan }
1564cdfce9ceSjonathan }
1565cdfce9ceSjonathan
1566cdfce9ceSjonathan /*
1567cdfce9ceSjonathan * Release a set of crypto descriptors.
1568cdfce9ceSjonathan */
1569cdfce9ceSjonathan void
crypto_freereq(struct cryptop * crp)1570cdfce9ceSjonathan crypto_freereq(struct cryptop *crp)
1571cdfce9ceSjonathan {
1572cdfce9ceSjonathan struct cryptodesc *crd;
1573cdfce9ceSjonathan
1574cdfce9ceSjonathan if (crp == NULL)
1575cdfce9ceSjonathan return;
1576c4e549c7Sknakahara DPRINTF("lid[%u]: crp %p\n", CRYPTO_SESID2LID(crp->crp_sid), crp);
1577cdfce9ceSjonathan
1578d6a1889dSdarran /* sanity check */
1579d6a1889dSdarran if (crp->crp_flags & CRYPTO_F_ONRETQ) {
1580d6a1889dSdarran panic("crypto_freereq() freeing crp on RETQ\n");
1581d6a1889dSdarran }
1582d6a1889dSdarran
1583cdfce9ceSjonathan while ((crd = crp->crp_desc) != NULL) {
1584cdfce9ceSjonathan crp->crp_desc = crd->crd_next;
1585aeb50172Sknakahara pool_cache_put(cryptodesc_cache, crd);
1586cdfce9ceSjonathan }
1587aeb50172Sknakahara pool_cache_put(cryptop_cache, crp);
1588cdfce9ceSjonathan }
1589cdfce9ceSjonathan
1590cdfce9ceSjonathan /*
1591cdfce9ceSjonathan * Acquire a set of crypto descriptors.
1592cdfce9ceSjonathan */
1593cdfce9ceSjonathan struct cryptop *
crypto_getreq(int num)1594cdfce9ceSjonathan crypto_getreq(int num)
1595cdfce9ceSjonathan {
1596cdfce9ceSjonathan struct cryptodesc *crd;
1597cdfce9ceSjonathan struct cryptop *crp;
1598b6082029Sknakahara struct crypto_crp_ret_qs *qs;
1599cdfce9ceSjonathan
1600a56f0ba8Sriastradh KASSERT(num > 0);
1601a56f0ba8Sriastradh
160248886aafSknakahara /*
160348886aafSknakahara * When crp_ret_q is full, we restrict here to avoid crp_ret_q overflow
160448886aafSknakahara * by error callback.
160548886aafSknakahara */
1606b6082029Sknakahara qs = crypto_get_crp_ret_qs(curcpu());
1607b6082029Sknakahara if (qs->crp_ret_q_maxlen > 0
1608b6082029Sknakahara && qs->crp_ret_q_len > qs->crp_ret_q_maxlen) {
1609b6082029Sknakahara qs->crp_ret_q_drops++;
1610b6082029Sknakahara crypto_put_crp_ret_qs(curcpu());
161148886aafSknakahara return NULL;
161248886aafSknakahara }
1613b6082029Sknakahara crypto_put_crp_ret_qs(curcpu());
161448886aafSknakahara
16150fddc7f0Schristos crp = pool_cache_get(cryptop_cache, PR_NOWAIT);
1616cdfce9ceSjonathan if (crp == NULL) {
1617cdfce9ceSjonathan return NULL;
1618cdfce9ceSjonathan }
1619c363a9cbScegger memset(crp, 0, sizeof(struct cryptop));
1620cdfce9ceSjonathan
1621cdfce9ceSjonathan while (num--) {
16220fddc7f0Schristos crd = pool_cache_get(cryptodesc_cache, PR_NOWAIT);
1623cdfce9ceSjonathan if (crd == NULL) {
1624cdfce9ceSjonathan crypto_freereq(crp);
1625cdfce9ceSjonathan return NULL;
1626cdfce9ceSjonathan }
1627cdfce9ceSjonathan
1628c363a9cbScegger memset(crd, 0, sizeof(struct cryptodesc));
1629cdfce9ceSjonathan crd->crd_next = crp->crp_desc;
1630cdfce9ceSjonathan crp->crp_desc = crd;
1631cdfce9ceSjonathan }
1632cdfce9ceSjonathan
1633cdfce9ceSjonathan return crp;
1634cdfce9ceSjonathan }
1635cdfce9ceSjonathan
1636cdfce9ceSjonathan /*
1637ecc8a111Sknakahara * Release a set of asymmetric crypto descriptors.
1638ecc8a111Sknakahara * Currently, support one descriptor only.
1639ecc8a111Sknakahara */
1640ecc8a111Sknakahara void
crypto_kfreereq(struct cryptkop * krp)1641ecc8a111Sknakahara crypto_kfreereq(struct cryptkop *krp)
1642ecc8a111Sknakahara {
1643ecc8a111Sknakahara
1644ecc8a111Sknakahara if (krp == NULL)
1645ecc8a111Sknakahara return;
1646ecc8a111Sknakahara
1647ecc8a111Sknakahara DPRINTF("krp %p\n", krp);
1648ecc8a111Sknakahara
1649ecc8a111Sknakahara /* sanity check */
1650ecc8a111Sknakahara if (krp->krp_flags & CRYPTO_F_ONRETQ) {
1651ecc8a111Sknakahara panic("crypto_kfreereq() freeing krp on RETQ\n");
1652ecc8a111Sknakahara }
1653ecc8a111Sknakahara
1654aeb50172Sknakahara pool_cache_put(cryptkop_cache, krp);
1655ecc8a111Sknakahara }
1656ecc8a111Sknakahara
1657ecc8a111Sknakahara /*
1658ecc8a111Sknakahara * Acquire a set of asymmetric crypto descriptors.
1659ecc8a111Sknakahara * Currently, support one descriptor only.
1660ecc8a111Sknakahara */
1661ecc8a111Sknakahara struct cryptkop *
crypto_kgetreq(int num __diagused,int prflags)1662a56f0ba8Sriastradh crypto_kgetreq(int num __diagused, int prflags)
1663ecc8a111Sknakahara {
1664ecc8a111Sknakahara struct cryptkop *krp;
1665b6082029Sknakahara struct crypto_crp_ret_qs *qs;
1666ecc8a111Sknakahara
1667a56f0ba8Sriastradh KASSERTMSG(num == 1, "num=%d not supported", num);
1668a56f0ba8Sriastradh
1669ecc8a111Sknakahara /*
1670ecc8a111Sknakahara * When crp_ret_kq is full, we restrict here to avoid crp_ret_kq
1671ecc8a111Sknakahara * overflow by error callback.
1672ecc8a111Sknakahara */
1673b6082029Sknakahara qs = crypto_get_crp_ret_qs(curcpu());
1674b6082029Sknakahara if (qs->crp_ret_kq_maxlen > 0
1675b6082029Sknakahara && qs->crp_ret_kq_len > qs->crp_ret_kq_maxlen) {
1676b6082029Sknakahara qs->crp_ret_kq_drops++;
1677b6082029Sknakahara crypto_put_crp_ret_qs(curcpu());
1678ecc8a111Sknakahara return NULL;
1679ecc8a111Sknakahara }
1680b6082029Sknakahara crypto_put_crp_ret_qs(curcpu());
1681ecc8a111Sknakahara
1682aeb50172Sknakahara krp = pool_cache_get(cryptkop_cache, prflags);
1683ecc8a111Sknakahara if (krp == NULL) {
1684ecc8a111Sknakahara return NULL;
1685ecc8a111Sknakahara }
1686ecc8a111Sknakahara memset(krp, 0, sizeof(struct cryptkop));
1687ecc8a111Sknakahara
1688ecc8a111Sknakahara return krp;
1689ecc8a111Sknakahara }
1690ecc8a111Sknakahara
1691ecc8a111Sknakahara /*
1692cdfce9ceSjonathan * Invoke the callback on behalf of the driver.
1693cdfce9ceSjonathan */
1694cdfce9ceSjonathan void
crypto_done(struct cryptop * crp)1695cdfce9ceSjonathan crypto_done(struct cryptop *crp)
1696cdfce9ceSjonathan {
169799541d13Sriastradh int wasempty;
169899541d13Sriastradh struct crypto_crp_ret_qs *qs;
169999541d13Sriastradh struct crypto_crp_ret_q *crp_ret_q;
1700e5bd2a12Stls
1701108266a5Sknakahara KASSERT(crp != NULL);
1702108266a5Sknakahara
1703cdfce9ceSjonathan if (crp->crp_etype != 0)
1704cdfce9ceSjonathan cryptostats.cs_errs++;
1705cdfce9ceSjonathan #ifdef CRYPTO_TIMING
1706cdfce9ceSjonathan if (crypto_timing)
1707cdfce9ceSjonathan crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
1708cdfce9ceSjonathan #endif
1709c4e549c7Sknakahara DPRINTF("lid[%u]: crp %p\n", CRYPTO_SESID2LID(crp->crp_sid), crp);
1710597ee710Stls
1711b6082029Sknakahara qs = crypto_get_crp_ret_qs(crp->reqcpu);
1712b6082029Sknakahara crp_ret_q = &qs->crp_ret_q;
1713b6082029Sknakahara wasempty = TAILQ_EMPTY(crp_ret_q);
171499541d13Sriastradh DPRINTF("lid[%u]: queueing %p\n", CRYPTO_SESID2LID(crp->crp_sid), crp);
1715597ee710Stls crp->crp_flags |= CRYPTO_F_ONRETQ;
1716b6082029Sknakahara TAILQ_INSERT_TAIL(crp_ret_q, crp, crp_next);
1717b6082029Sknakahara qs->crp_ret_q_len++;
1718b6082029Sknakahara if (wasempty && !qs->crp_ret_q_exit_flag) {
171999541d13Sriastradh DPRINTF("lid[%u]: waking cryptoret, crp %p hit empty queue\n.",
1720c4e549c7Sknakahara CRYPTO_SESID2LID(crp->crp_sid), crp);
1721024c4368Sknakahara softint_schedule_cpu(crypto_ret_si, crp->reqcpu);
1722cdfce9ceSjonathan }
1723b6082029Sknakahara crypto_put_crp_ret_qs(crp->reqcpu);
1724cdfce9ceSjonathan }
1725cdfce9ceSjonathan
1726cdfce9ceSjonathan /*
1727cdfce9ceSjonathan * Invoke the callback on behalf of the driver.
1728cdfce9ceSjonathan */
1729cdfce9ceSjonathan void
crypto_kdone(struct cryptkop * krp)1730cdfce9ceSjonathan crypto_kdone(struct cryptkop *krp)
1731cdfce9ceSjonathan {
173299541d13Sriastradh int wasempty;
173399541d13Sriastradh struct crypto_crp_ret_qs *qs;
173499541d13Sriastradh struct crypto_crp_ret_kq *crp_ret_kq;
1735cdfce9ceSjonathan
1736108266a5Sknakahara KASSERT(krp != NULL);
1737108266a5Sknakahara
1738cdfce9ceSjonathan if (krp->krp_status != 0)
1739cdfce9ceSjonathan cryptostats.cs_kerrs++;
1740597ee710Stls
1741b6082029Sknakahara qs = crypto_get_crp_ret_qs(krp->reqcpu);
1742b6082029Sknakahara crp_ret_kq = &qs->crp_ret_kq;
1743b6082029Sknakahara
1744b6082029Sknakahara wasempty = TAILQ_EMPTY(crp_ret_kq);
1745597ee710Stls krp->krp_flags |= CRYPTO_F_ONRETQ;
1746b6082029Sknakahara TAILQ_INSERT_TAIL(crp_ret_kq, krp, krp_next);
1747b6082029Sknakahara qs->crp_ret_kq_len++;
1748b6082029Sknakahara if (wasempty && !qs->crp_ret_q_exit_flag)
1749024c4368Sknakahara softint_schedule_cpu(crypto_ret_si, krp->reqcpu);
1750b6082029Sknakahara crypto_put_crp_ret_qs(krp->reqcpu);
1751cdfce9ceSjonathan }
1752cdfce9ceSjonathan
1753cdfce9ceSjonathan int
crypto_getfeat(int * featp)1754cdfce9ceSjonathan crypto_getfeat(int *featp)
1755cdfce9ceSjonathan {
1756cdfce9ceSjonathan
17578bbf9e0cSchristos if (crypto_userasymcrypto == 0) {
17588bbf9e0cSchristos *featp = 0;
1759396579fbSknakahara return 0;
17608bbf9e0cSchristos }
1761396579fbSknakahara
1762396579fbSknakahara mutex_enter(&crypto_drv_mtx);
1763cdfce9ceSjonathan
17648bbf9e0cSchristos int feat = 0;
17658bbf9e0cSchristos for (int hid = 0; hid < crypto_drivers_num; hid++) {
1766a3f2d30dSknakahara struct cryptocap *cap;
1767da7de9a4Schristos cap = crypto_checkdriver(hid);
1768a3f2d30dSknakahara if (cap == NULL)
1769a3f2d30dSknakahara continue;
1770a3f2d30dSknakahara
17718bbf9e0cSchristos crypto_driver_lock(cap);
17728bbf9e0cSchristos
1773a3f2d30dSknakahara if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
17748bbf9e0cSchristos crypto_devallowsoft == 0)
17758bbf9e0cSchristos goto unlock;
17768bbf9e0cSchristos
17778bbf9e0cSchristos if (cap->cc_kprocess == NULL)
17788bbf9e0cSchristos goto unlock;
17798bbf9e0cSchristos
17808bbf9e0cSchristos for (int kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
1781a3f2d30dSknakahara if ((cap->cc_kalg[kalg] &
1782cdfce9ceSjonathan CRYPTO_ALG_FLAG_SUPPORTED) != 0)
1783cdfce9ceSjonathan feat |= 1 << kalg;
1784bd79c5acSknakahara
17858bbf9e0cSchristos unlock: crypto_driver_unlock(cap);
1786cdfce9ceSjonathan }
1787396579fbSknakahara
1788396579fbSknakahara mutex_exit(&crypto_drv_mtx);
1789cdfce9ceSjonathan *featp = feat;
1790cdfce9ceSjonathan return (0);
1791cdfce9ceSjonathan }
1792cdfce9ceSjonathan
1793cdfce9ceSjonathan /*
1794cdfce9ceSjonathan * Software interrupt thread to dispatch crypto requests.
1795cdfce9ceSjonathan */
1796cdfce9ceSjonathan static void
cryptointr(void * arg __unused)179782f82d0bSknakahara cryptointr(void *arg __unused)
1798cdfce9ceSjonathan {
1799d6a1889dSdarran struct cryptop *crp, *submit, *cnext;
1800d6a1889dSdarran struct cryptkop *krp, *knext;
1801cdfce9ceSjonathan struct cryptocap *cap;
1802100d0481Sknakahara struct crypto_crp_qs *crp_qs;
1803100d0481Sknakahara struct crypto_crp_q *crp_q;
1804100d0481Sknakahara struct crypto_crp_kq *crp_kq;
1805100d0481Sknakahara int result, hint, s;
1806cdfce9ceSjonathan
1807cdfce9ceSjonathan cryptostats.cs_intrs++;
1808100d0481Sknakahara crp_qs = crypto_get_crp_qs(&s);
1809d33bbbcbSknakahara crp_q = crp_qs->crp_q;
1810d33bbbcbSknakahara crp_kq = crp_qs->crp_kq;
1811cdfce9ceSjonathan do {
1812cdfce9ceSjonathan /*
1813cdfce9ceSjonathan * Find the first element in the queue that can be
1814cdfce9ceSjonathan * processed and look-ahead to see if multiple ops
1815cdfce9ceSjonathan * are ready for the same driver.
1816cdfce9ceSjonathan */
1817cdfce9ceSjonathan submit = NULL;
1818cdfce9ceSjonathan hint = 0;
1819100d0481Sknakahara TAILQ_FOREACH_SAFE(crp, crp_q, crp_next, cnext) {
1820bbc746ceSjakllsch u_int32_t hid = CRYPTO_SESID2HID(crp->crp_sid);
1821bd79c5acSknakahara cap = crypto_checkdriver_lock(hid);
1822cdfce9ceSjonathan if (cap == NULL || cap->cc_process == NULL) {
1823bd79c5acSknakahara if (cap != NULL)
1824bd79c5acSknakahara crypto_driver_unlock(cap);
1825cdfce9ceSjonathan /* Op needs to be migrated, process it. */
1826cdfce9ceSjonathan submit = crp;
1827cdfce9ceSjonathan break;
1828cdfce9ceSjonathan }
1829b83761f2Sknakahara
1830b83761f2Sknakahara /*
1831b83761f2Sknakahara * skip blocked crp regardless of CRYPTO_F_BATCH
1832b83761f2Sknakahara */
1833bd79c5acSknakahara if (cap->cc_qblocked != 0) {
1834bd79c5acSknakahara crypto_driver_unlock(cap);
1835b83761f2Sknakahara continue;
1836bd79c5acSknakahara }
1837bd79c5acSknakahara crypto_driver_unlock(cap);
1838b83761f2Sknakahara
1839cdfce9ceSjonathan /*
1840e8378c51Sknakahara * skip batch crp until the end of crp_q
1841cdfce9ceSjonathan */
1842e8378c51Sknakahara if ((crp->crp_flags & CRYPTO_F_BATCH) != 0) {
1843e8378c51Sknakahara if (submit == NULL) {
1844cdfce9ceSjonathan submit = crp;
1845e8378c51Sknakahara } else {
1846e8378c51Sknakahara if (CRYPTO_SESID2HID(submit->crp_sid)
1847e8378c51Sknakahara == hid)
1848e8378c51Sknakahara hint = CRYPTO_HINT_MORE;
1849cdfce9ceSjonathan }
1850e8378c51Sknakahara
1851e8378c51Sknakahara continue;
1852e8378c51Sknakahara }
1853e8378c51Sknakahara
1854e8378c51Sknakahara /*
1855e8378c51Sknakahara * found first crp which is neither blocked nor batch.
1856e8378c51Sknakahara */
1857e8378c51Sknakahara submit = crp;
1858e8378c51Sknakahara /*
1859e8378c51Sknakahara * batch crp can be processed much later, so clear hint.
1860e8378c51Sknakahara */
1861e8378c51Sknakahara hint = 0;
1862e8378c51Sknakahara break;
1863cdfce9ceSjonathan }
1864cdfce9ceSjonathan if (submit != NULL) {
1865100d0481Sknakahara TAILQ_REMOVE(crp_q, submit, crp_next);
1866cdfce9ceSjonathan result = crypto_invoke(submit, hint);
1867bcc6b1ebSriastradh KASSERTMSG(result == 0 || result == ERESTART,
1868bcc6b1ebSriastradh "result=%d", result);
1869e5bd2a12Stls /* we must take here as the TAILQ op or kinvoke
1870e5bd2a12Stls may need this mutex below. sigh. */
1871cdfce9ceSjonathan if (result == ERESTART) {
1872cdfce9ceSjonathan /*
1873cdfce9ceSjonathan * The driver ran out of resources, mark the
1874cdfce9ceSjonathan * driver ``blocked'' for cryptop's and put
1875cdfce9ceSjonathan * the request back in the queue. It would
1876cdfce9ceSjonathan * best to put the request back where we got
1877cdfce9ceSjonathan * it but that's hard so for now we put it
1878cdfce9ceSjonathan * at the front. This should be ok; putting
1879cdfce9ceSjonathan * it at the end does not work.
1880cdfce9ceSjonathan */
1881a3f2d30dSknakahara /* validate sid again */
1882bd79c5acSknakahara cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(submit->crp_sid));
1883a3f2d30dSknakahara if (cap == NULL) {
1884a3f2d30dSknakahara /* migrate again, sigh... */
1885100d0481Sknakahara TAILQ_INSERT_TAIL(crp_q, submit, crp_next);
1886a3f2d30dSknakahara } else {
1887a3f2d30dSknakahara cap->cc_qblocked = 1;
1888bd79c5acSknakahara crypto_driver_unlock(cap);
1889100d0481Sknakahara TAILQ_INSERT_HEAD(crp_q, submit, crp_next);
1890cdfce9ceSjonathan cryptostats.cs_blocks++;
1891cdfce9ceSjonathan }
1892cdfce9ceSjonathan }
1893a3f2d30dSknakahara }
1894cdfce9ceSjonathan
1895cdfce9ceSjonathan /* As above, but for key ops */
1896100d0481Sknakahara TAILQ_FOREACH_SAFE(krp, crp_kq, krp_next, knext) {
1897bd79c5acSknakahara cap = crypto_checkdriver_lock(krp->krp_hid);
1898cdfce9ceSjonathan if (cap == NULL || cap->cc_kprocess == NULL) {
1899bd79c5acSknakahara if (cap != NULL)
1900bd79c5acSknakahara crypto_driver_unlock(cap);
1901cdfce9ceSjonathan /* Op needs to be migrated, process it. */
1902cdfce9ceSjonathan break;
1903cdfce9ceSjonathan }
1904bd79c5acSknakahara if (!cap->cc_kqblocked) {
1905bd79c5acSknakahara crypto_driver_unlock(cap);
1906cdfce9ceSjonathan break;
1907cdfce9ceSjonathan }
1908bd79c5acSknakahara crypto_driver_unlock(cap);
1909bd79c5acSknakahara }
1910cdfce9ceSjonathan if (krp != NULL) {
1911100d0481Sknakahara TAILQ_REMOVE(crp_kq, krp, krp_next);
1912cdfce9ceSjonathan result = crypto_kinvoke(krp, 0);
1913bcc6b1ebSriastradh KASSERTMSG(result == 0 || result == ERESTART,
1914bcc6b1ebSriastradh "result=%d", result);
1915e5bd2a12Stls /* the next iteration will want the mutex. :-/ */
1916cdfce9ceSjonathan if (result == ERESTART) {
1917cdfce9ceSjonathan /*
1918cdfce9ceSjonathan * The driver ran out of resources, mark the
1919cdfce9ceSjonathan * driver ``blocked'' for cryptkop's and put
1920cdfce9ceSjonathan * the request back in the queue. It would
1921cdfce9ceSjonathan * best to put the request back where we got
1922cdfce9ceSjonathan * it but that's hard so for now we put it
1923cdfce9ceSjonathan * at the front. This should be ok; putting
1924cdfce9ceSjonathan * it at the end does not work.
1925cdfce9ceSjonathan */
1926a3f2d30dSknakahara /* validate sid again */
1927bd79c5acSknakahara cap = crypto_checkdriver_lock(krp->krp_hid);
1928a3f2d30dSknakahara if (cap == NULL) {
1929a3f2d30dSknakahara /* migrate again, sigh... */
1930100d0481Sknakahara TAILQ_INSERT_TAIL(crp_kq, krp, krp_next);
1931a3f2d30dSknakahara } else {
1932a3f2d30dSknakahara cap->cc_kqblocked = 1;
1933bd79c5acSknakahara crypto_driver_unlock(cap);
1934100d0481Sknakahara TAILQ_INSERT_HEAD(crp_kq, krp, krp_next);
1935cdfce9ceSjonathan cryptostats.cs_kblocks++;
1936cdfce9ceSjonathan }
1937cdfce9ceSjonathan }
1938a3f2d30dSknakahara }
1939cdfce9ceSjonathan } while (submit != NULL || krp != NULL);
1940100d0481Sknakahara crypto_put_crp_qs(&s);
1941cdfce9ceSjonathan }
1942cdfce9ceSjonathan
1943cdfce9ceSjonathan /*
1944024c4368Sknakahara * softint handler to do callbacks.
1945cdfce9ceSjonathan */
1946cdfce9ceSjonathan static void
cryptoret_softint(void * arg __unused)1947024c4368Sknakahara cryptoret_softint(void *arg __unused)
1948cdfce9ceSjonathan {
1949b6082029Sknakahara struct crypto_crp_ret_qs *qs;
1950a50c0670Smaya struct crypto_crp_ret_q *crp_ret_q;
1951a50c0670Smaya struct crypto_crp_ret_kq *crp_ret_kq;
1952cdfce9ceSjonathan
1953b6082029Sknakahara qs = crypto_get_crp_ret_qs(curcpu());
1954b6082029Sknakahara crp_ret_q = &qs->crp_ret_q;
1955b6082029Sknakahara crp_ret_kq = &qs->crp_ret_kq;
1956ed830de9Sad for (;;) {
1957024c4368Sknakahara struct cryptop *crp;
1958024c4368Sknakahara struct cryptkop *krp;
1959024c4368Sknakahara
1960b6082029Sknakahara crp = TAILQ_FIRST(crp_ret_q);
1961e5bd2a12Stls if (crp != NULL) {
1962b6082029Sknakahara TAILQ_REMOVE(crp_ret_q, crp, crp_next);
1963b6082029Sknakahara qs->crp_ret_q_len--;
1964e5bd2a12Stls crp->crp_flags &= ~CRYPTO_F_ONRETQ;
1965e5bd2a12Stls }
1966b6082029Sknakahara krp = TAILQ_FIRST(crp_ret_kq);
1967e5bd2a12Stls if (krp != NULL) {
1968b6082029Sknakahara TAILQ_REMOVE(crp_ret_kq, krp, krp_next);
1969b6082029Sknakahara qs->crp_ret_q_len--;
1970e5bd2a12Stls krp->krp_flags &= ~CRYPTO_F_ONRETQ;
1971e5bd2a12Stls }
1972e5bd2a12Stls
1973e5bd2a12Stls /* drop before calling any callbacks. */
1974024c4368Sknakahara if (crp == NULL && krp == NULL)
1975024c4368Sknakahara break;
1976ed830de9Sad
1977b6082029Sknakahara mutex_spin_exit(&qs->crp_ret_q_mtx);
1978cdfce9ceSjonathan if (crp != NULL) {
1979cdfce9ceSjonathan #ifdef CRYPTO_TIMING
1980cdfce9ceSjonathan if (crypto_timing) {
1981cdfce9ceSjonathan /*
1982cdfce9ceSjonathan * NB: We must copy the timestamp before
1983cdfce9ceSjonathan * doing the callback as the cryptop is
1984cdfce9ceSjonathan * likely to be reclaimed.
1985cdfce9ceSjonathan */
1986cdfce9ceSjonathan struct timespec t = crp->crp_tstamp;
1987cdfce9ceSjonathan crypto_tstat(&cryptostats.cs_cb, &t);
1988cdfce9ceSjonathan crp->crp_callback(crp);
1989cdfce9ceSjonathan crypto_tstat(&cryptostats.cs_finis, &t);
1990cdfce9ceSjonathan } else
1991cdfce9ceSjonathan #endif
1992e5bd2a12Stls {
1993cdfce9ceSjonathan crp->crp_callback(crp);
1994cdfce9ceSjonathan }
1995e5bd2a12Stls }
1996cdfce9ceSjonathan if (krp != NULL)
1997cdfce9ceSjonathan krp->krp_callback(krp);
1998ed830de9Sad
1999b6082029Sknakahara mutex_spin_enter(&qs->crp_ret_q_mtx);
2000cdfce9ceSjonathan }
2001b6082029Sknakahara crypto_put_crp_ret_qs(curcpu());
2002cdfce9ceSjonathan }
20032dd4f4d9Spgoyette
20042dd4f4d9Spgoyette /* NetBSD module interface */
20052dd4f4d9Spgoyette
20062dd4f4d9Spgoyette MODULE(MODULE_CLASS_MISC, opencrypto, NULL);
20072dd4f4d9Spgoyette
20082dd4f4d9Spgoyette static int
opencrypto_modcmd(modcmd_t cmd,void * opaque)20092dd4f4d9Spgoyette opencrypto_modcmd(modcmd_t cmd, void *opaque)
20102dd4f4d9Spgoyette {
2011aa36e9a1Spgoyette int error = 0;
20122dd4f4d9Spgoyette
20132dd4f4d9Spgoyette switch (cmd) {
20142dd4f4d9Spgoyette case MODULE_CMD_INIT:
2015c01db8ecSpgoyette #ifdef _MODULE
2016aa36e9a1Spgoyette error = crypto_init();
2017c01db8ecSpgoyette #endif
2018aa36e9a1Spgoyette break;
20192dd4f4d9Spgoyette case MODULE_CMD_FINI:
2020c01db8ecSpgoyette #ifdef _MODULE
2021aa36e9a1Spgoyette error = crypto_destroy(true);
2022c01db8ecSpgoyette #endif
2023aa36e9a1Spgoyette break;
20242dd4f4d9Spgoyette default:
2025aa36e9a1Spgoyette error = ENOTTY;
20262dd4f4d9Spgoyette }
2027aa36e9a1Spgoyette return error;
20282dd4f4d9Spgoyette }
2029