xref: /netbsd-src/sys/opencrypto/crypto.c (revision d48f14661dda8638fee055ba15d35bdfb29b9fa8)
1 /*	$NetBSD: crypto.c,v 1.14 2006/06/07 22:34:18 kardel Exp $ */
2 /*	$FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.5 2003/02/26 00:14:05 sam Exp $	*/
3 /*	$OpenBSD: crypto.c,v 1.41 2002/07/17 23:52:38 art Exp $	*/
4 
5 /*
6  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
7  *
8  * This code was written by Angelos D. Keromytis in Athens, Greece, in
9  * February 2000. Network Security Technologies Inc. (NSTI) kindly
10  * supported the development of this code.
11  *
12  * Copyright (c) 2000, 2001 Angelos D. Keromytis
13  *
14  * Permission to use, copy, and modify this software with or without fee
15  * is hereby granted, provided that this entire notice is included in
16  * all source code copies of any software which is or includes a copy or
17  * modification of this software.
18  *
19  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
20  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
21  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
22  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
23  * PURPOSE.
24  */
25 
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: crypto.c,v 1.14 2006/06/07 22:34:18 kardel Exp $");
28 
29 /* XXX FIXME: should be defopt'ed */
30 #define CRYPTO_TIMING			/* enable cryptop timing stuff */
31 
32 #include <sys/param.h>
33 #include <sys/reboot.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/proc.h>
37 #include <sys/pool.h>
38 #include <opencrypto/cryptodev.h>
39 #include <sys/kthread.h>
40 #include <sys/once.h>
41 #include <sys/sysctl.h>
42 
43 #include <opencrypto/xform.h>			/* XXX for M_XDATA */
44 
45 #ifdef __NetBSD__
46   #define splcrypto splnet
47   /* below is kludges to check whats still missing */
48   #define SWI_CRYPTO 17
49   #define register_swi(lvl, fn)  \
50   softintr_establish(IPL_SOFTNET, (void (*)(void*))fn, NULL)
51   #define unregister_swi(lvl, fn)  softintr_disestablish(softintr_cookie)
52   #define setsoftcrypto(x) softintr_schedule(x)
53 #endif
54 
55 #define	SESID2HID(sid)	(((sid) >> 32) & 0xffffffff)
56 
57 /*
58  * Crypto drivers register themselves by allocating a slot in the
59  * crypto_drivers table with crypto_get_driverid() and then registering
60  * each algorithm they support with crypto_register() and crypto_kregister().
61  */
62 static	struct cryptocap *crypto_drivers;
63 static	int crypto_drivers_num;
64 static	void* softintr_cookie;
65 
66 /*
67  * There are two queues for crypto requests; one for symmetric (e.g.
68  * cipher) operations and one for asymmetric (e.g. MOD) operations.
69  * See below for how synchronization is handled.
70  */
71 static	TAILQ_HEAD(,cryptop) crp_q =		/* request queues */
72 		TAILQ_HEAD_INITIALIZER(crp_q);
73 static	TAILQ_HEAD(,cryptkop) crp_kq =
74 		TAILQ_HEAD_INITIALIZER(crp_kq);
75 
76 /*
77  * There are two queues for processing completed crypto requests; one
78  * for the symmetric and one for the asymmetric ops.  We only need one
79  * but have two to avoid type futzing (cryptop vs. cryptkop).  See below
80  * for how synchronization is handled.
81  */
82 static	TAILQ_HEAD(,cryptop) crp_ret_q =	/* callback queues */
83 		TAILQ_HEAD_INITIALIZER(crp_ret_q);
84 static	TAILQ_HEAD(,cryptkop) crp_ret_kq =
85 		TAILQ_HEAD_INITIALIZER(crp_ret_kq);
86 
87 /*
88  * Crypto op and desciptor data structures are allocated
89  * from separate private zones(FreeBSD)/pools(netBSD/OpenBSD) .
90  */
91 struct pool cryptop_pool;
92 struct pool cryptodesc_pool;
93 int crypto_pool_initialized = 0;
94 
95 #ifdef __NetBSD__
96 static void deferred_crypto_thread(void *arg);
97 #endif
98 
99 int	crypto_usercrypto = 1;		/* userland may open /dev/crypto */
100 int	crypto_userasymcrypto = 1;	/* userland may do asym crypto reqs */
101 /*
102  * cryptodevallowsoft is (intended to be) sysctl'able, controlling
103  * access to hardware versus software transforms as below:
104  *
105  * crypto_devallowsoft < 0:  Force userlevel requests to use software
106  *                              transforms, always
107  * crypto_devallowsoft = 0:  Use hardware if present, grant userlevel
108  *                              requests for non-accelerated transforms
109  *                              (handling the latter in software)
110  * crypto_devallowsoft > 0:  Allow user requests only for transforms which
111  *                               are hardware-accelerated.
112  */
113 int	crypto_devallowsoft = 1;	/* only use hardware crypto */
114 
115 #ifdef __FreeBSD__
116 SYSCTL_INT(_kern, OID_AUTO, usercrypto, CTLFLAG_RW,
117 	   &crypto_usercrypto, 0,
118 	   "Enable/disable user-mode access to crypto support");
119 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
120 	   &crypto_userasymcrypto, 0,
121 	   "Enable/disable user-mode access to asymmetric crypto support");
122 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
123 	   &crypto_devallowsoft, 0,
124 	   "Enable/disable use of software asym crypto support");
125 #endif
126 #ifdef __NetBSD__
127 SYSCTL_SETUP(sysctl_opencrypto_setup, "sysctl opencrypto subtree setup")
128 {
129 	sysctl_createv(clog, 0, NULL, NULL,
130 		       CTLFLAG_PERMANENT,
131 		       CTLTYPE_NODE, "kern", NULL,
132 		       NULL, 0, NULL, 0,
133 		       CTL_KERN, CTL_EOL);
134 	sysctl_createv(clog, 0, NULL, NULL,
135 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
136 		       CTLTYPE_INT, "usercrypto",
137 		       SYSCTL_DESCR("Enable/disable user-mode access to "
138 			   "crypto support"),
139 		       NULL, 0, &crypto_usercrypto, 0,
140 		       CTL_KERN, CTL_CREATE, CTL_EOL);
141 	sysctl_createv(clog, 0, NULL, NULL,
142 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
143 		       CTLTYPE_INT, "userasymcrypto",
144 		       SYSCTL_DESCR("Enable/disable user-mode access to "
145 			   "asymmetric crypto support"),
146 		       NULL, 0, &crypto_userasymcrypto, 0,
147 		       CTL_KERN, CTL_CREATE, CTL_EOL);
148 	sysctl_createv(clog, 0, NULL, NULL,
149 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
150 		       CTLTYPE_INT, "cryptodevallowsoft",
151 		       SYSCTL_DESCR("Enable/disable use of software "
152 			   "asymmetric crypto support"),
153 		       NULL, 0, &crypto_devallowsoft, 0,
154 		       CTL_KERN, CTL_CREATE, CTL_EOL);
155 }
156 #endif
157 
158 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
159 
160 /*
161  * Synchronization: read carefully, this is non-trivial.
162  *
163  * Crypto requests are submitted via crypto_dispatch.  Typically
164  * these come in from network protocols at spl0 (output path) or
165  * spl[,soft]net (input path).
166  *
167  * Requests are typically passed on the driver directly, but they
168  * may also be queued for processing by a software interrupt thread,
169  * cryptointr, that runs at splsoftcrypto.  This thread dispatches
170  * the requests to crypto drivers (h/w or s/w) who call crypto_done
171  * when a request is complete.  Hardware crypto drivers are assumed
172  * to register their IRQ's as network devices so their interrupt handlers
173  * and subsequent "done callbacks" happen at spl[imp,net].
174  *
175  * Completed crypto ops are queued for a separate kernel thread that
176  * handles the callbacks at spl0.  This decoupling insures the crypto
177  * driver interrupt service routine is not delayed while the callback
178  * takes place and that callbacks are delivered after a context switch
179  * (as opposed to a software interrupt that clients must block).
180  *
181  * This scheme is not intended for SMP machines.
182  */
183 static	void cryptointr(void);		/* swi thread to dispatch ops */
184 static	void cryptoret(void);		/* kernel thread for callbacks*/
185 static	struct proc *cryptoproc;
186 static	void crypto_destroy(void);
187 static	int crypto_invoke(struct cryptop *crp, int hint);
188 static	int crypto_kinvoke(struct cryptkop *krp, int hint);
189 
190 static struct cryptostats cryptostats;
191 static	int crypto_timing = 0;
192 
193 #ifdef __FreeBSD__
194 SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
195 	    cryptostats, "Crypto system statistics");
196 
197 SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW,
198 	   &crypto_timing, 0, "Enable/disable crypto timing support");
199 SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
200 	    cryptostats, "Crypto system statistics");
201 #endif /* __FreeBSD__ */
202 
203 static int
204 crypto_init0(void)
205 {
206 #ifdef __FreeBSD__
207 	int error;
208 
209 	cryptop_zone = zinit("cryptop", sizeof (struct cryptop), 0, 0, 1);
210 	cryptodesc_zone = zinit("cryptodesc", sizeof (struct cryptodesc),
211 				0, 0, 1);
212 	if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
213 		printf("crypto_init: cannot setup crypto zones\n");
214 		return;
215 	}
216 #endif
217 
218 	crypto_drivers = malloc(CRYPTO_DRIVERS_INITIAL *
219 	    sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
220 	if (crypto_drivers == NULL) {
221 		printf("crypto_init: cannot malloc driver table\n");
222 		return 0;
223 	}
224 	crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
225 
226 	softintr_cookie = register_swi(SWI_CRYPTO, cryptointr);
227 #ifdef __FreeBSD__
228 	error = kthread_create((void (*)(void *)) cryptoret, NULL,
229 		    &cryptoproc, "cryptoret");
230 	if (error) {
231 		printf("crypto_init: cannot start cryptoret thread; error %d",
232 			error);
233 		crypto_destroy();
234 	}
235 #else
236 	/* defer thread creation until after boot */
237 	kthread_create( deferred_crypto_thread, NULL);
238 #endif
239 	return 0;
240 }
241 
242 void
243 crypto_init(void)
244 {
245 	ONCE_DECL(crypto_init_once);
246 
247 	RUN_ONCE(&crypto_init_once, crypto_init0);
248 }
249 
250 static void
251 crypto_destroy(void)
252 {
253 	/* XXX no wait to reclaim zones */
254 	if (crypto_drivers != NULL)
255 		free(crypto_drivers, M_CRYPTO_DATA);
256 	unregister_swi(SWI_CRYPTO, cryptointr);
257 }
258 
259 /*
260  * Create a new session.
261  */
262 int
263 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard)
264 {
265 	struct cryptoini *cr;
266 	u_int32_t hid, lid;
267 	int err = EINVAL;
268 	int s;
269 
270 	s = splcrypto();
271 
272 	if (crypto_drivers == NULL)
273 		goto done;
274 
275 	/*
276 	 * The algorithm we use here is pretty stupid; just use the
277 	 * first driver that supports all the algorithms we need.
278 	 *
279 	 * XXX We need more smarts here (in real life too, but that's
280 	 * XXX another story altogether).
281 	 */
282 
283 	for (hid = 0; hid < crypto_drivers_num; hid++) {
284 		/*
285 		 * If it's not initialized or has remaining sessions
286 		 * referencing it, skip.
287 		 */
288 		if (crypto_drivers[hid].cc_newsession == NULL ||
289 		    (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP))
290 			continue;
291 
292 		/* Hardware required -- ignore software drivers. */
293 		if (hard > 0 &&
294 		    (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE))
295 			continue;
296 		/* Software required -- ignore hardware drivers. */
297 		if (hard < 0 &&
298 		    (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) == 0)
299 			continue;
300 
301 		/* See if all the algorithms are supported. */
302 		for (cr = cri; cr; cr = cr->cri_next)
303 			if (crypto_drivers[hid].cc_alg[cr->cri_alg] == 0)
304 				break;
305 
306 		if (cr == NULL) {
307 			/* Ok, all algorithms are supported. */
308 
309 			/*
310 			 * Can't do everything in one session.
311 			 *
312 			 * XXX Fix this. We need to inject a "virtual" session layer right
313 			 * XXX about here.
314 			 */
315 
316 			/* Call the driver initialization routine. */
317 			lid = hid;		/* Pass the driver ID. */
318 			err = crypto_drivers[hid].cc_newsession(
319 					crypto_drivers[hid].cc_arg, &lid, cri);
320 			if (err == 0) {
321 				(*sid) = hid;
322 				(*sid) <<= 32;
323 				(*sid) |= (lid & 0xffffffff);
324 				crypto_drivers[hid].cc_sessions++;
325 			}
326 			goto done;
327 			/*break;*/
328 		}
329 	}
330 done:
331 	splx(s);
332 	return err;
333 }
334 
335 /*
336  * Delete an existing session (or a reserved session on an unregistered
337  * driver).
338  */
339 int
340 crypto_freesession(u_int64_t sid)
341 {
342 	u_int32_t hid;
343 	int err = 0;
344 	int s;
345 
346 	s = splcrypto();
347 
348 	if (crypto_drivers == NULL) {
349 		err = EINVAL;
350 		goto done;
351 	}
352 
353 	/* Determine two IDs. */
354 	hid = SESID2HID(sid);
355 
356 	if (hid >= crypto_drivers_num) {
357 		err = ENOENT;
358 		goto done;
359 	}
360 
361 	if (crypto_drivers[hid].cc_sessions)
362 		crypto_drivers[hid].cc_sessions--;
363 
364 	/* Call the driver cleanup routine, if available. */
365 	if (crypto_drivers[hid].cc_freesession)
366 		err = crypto_drivers[hid].cc_freesession(
367 				crypto_drivers[hid].cc_arg, sid);
368 	else
369 		err = 0;
370 
371 	/*
372 	 * If this was the last session of a driver marked as invalid,
373 	 * make the entry available for reuse.
374 	 */
375 	if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) &&
376 	    crypto_drivers[hid].cc_sessions == 0)
377 		bzero(&crypto_drivers[hid], sizeof(struct cryptocap));
378 
379 done:
380 	splx(s);
381 	return err;
382 }
383 
384 /*
385  * Return an unused driver id.  Used by drivers prior to registering
386  * support for the algorithms they handle.
387  */
388 int32_t
389 crypto_get_driverid(u_int32_t flags)
390 {
391 	struct cryptocap *newdrv;
392 	int i, s;
393 
394 	crypto_init();
395 
396 	s = splcrypto();
397 	for (i = 0; i < crypto_drivers_num; i++)
398 		if (crypto_drivers[i].cc_process == NULL &&
399 		    (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0 &&
400 		    crypto_drivers[i].cc_sessions == 0)
401 			break;
402 
403 	/* Out of entries, allocate some more. */
404 	if (i == crypto_drivers_num) {
405 		/* Be careful about wrap-around. */
406 		if (2 * crypto_drivers_num <= crypto_drivers_num) {
407 			splx(s);
408 			printf("crypto: driver count wraparound!\n");
409 			return -1;
410 		}
411 
412 		newdrv = malloc(2 * crypto_drivers_num *
413 		    sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
414 		if (newdrv == NULL) {
415 			splx(s);
416 			printf("crypto: no space to expand driver table!\n");
417 			return -1;
418 		}
419 
420 		bcopy(crypto_drivers, newdrv,
421 		    crypto_drivers_num * sizeof(struct cryptocap));
422 
423 		crypto_drivers_num *= 2;
424 
425 		free(crypto_drivers, M_CRYPTO_DATA);
426 		crypto_drivers = newdrv;
427 	}
428 
429 	/* NB: state is zero'd on free */
430 	crypto_drivers[i].cc_sessions = 1;	/* Mark */
431 	crypto_drivers[i].cc_flags = flags;
432 
433 	if (bootverbose)
434 		printf("crypto: assign driver %u, flags %u\n", i, flags);
435 
436 	splx(s);
437 
438 	return i;
439 }
440 
441 static struct cryptocap *
442 crypto_checkdriver(u_int32_t hid)
443 {
444 	if (crypto_drivers == NULL)
445 		return NULL;
446 	return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
447 }
448 
449 /*
450  * Register support for a key-related algorithm.  This routine
451  * is called once for each algorithm supported a driver.
452  */
453 int
454 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags,
455     int (*kprocess)(void*, struct cryptkop *, int),
456     void *karg)
457 {
458 	int s;
459 	struct cryptocap *cap;
460 	int err;
461 
462 	s = splcrypto();
463 
464 	cap = crypto_checkdriver(driverid);
465 	if (cap != NULL &&
466 	    (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
467 		/*
468 		 * XXX Do some performance testing to determine placing.
469 		 * XXX We probably need an auxiliary data structure that
470 		 * XXX describes relative performances.
471 		 */
472 
473 		cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
474 		if (bootverbose)
475 			printf("crypto: driver %u registers key alg %u flags %u\n"
476 				, driverid
477 				, kalg
478 				, flags
479 			);
480 
481 		if (cap->cc_kprocess == NULL) {
482 			cap->cc_karg = karg;
483 			cap->cc_kprocess = kprocess;
484 		}
485 		err = 0;
486 	} else
487 		err = EINVAL;
488 
489 	splx(s);
490 	return err;
491 }
492 
493 /*
494  * Register support for a non-key-related algorithm.  This routine
495  * is called once for each such algorithm supported by a driver.
496  */
497 int
498 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
499     u_int32_t flags,
500     int (*newses)(void*, u_int32_t*, struct cryptoini*),
501     int (*freeses)(void*, u_int64_t),
502     int (*process)(void*, struct cryptop *, int),
503     void *arg)
504 {
505 	struct cryptocap *cap;
506 	int s, err;
507 
508 	s = splcrypto();
509 
510 	cap = crypto_checkdriver(driverid);
511 	/* NB: algorithms are in the range [1..max] */
512 	if (cap != NULL &&
513 	    (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
514 		/*
515 		 * XXX Do some performance testing to determine placing.
516 		 * XXX We probably need an auxiliary data structure that
517 		 * XXX describes relative performances.
518 		 */
519 
520 		cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
521 		cap->cc_max_op_len[alg] = maxoplen;
522 		if (bootverbose)
523 			printf("crypto: driver %u registers alg %u flags %u maxoplen %u\n"
524 				, driverid
525 				, alg
526 				, flags
527 				, maxoplen
528 			);
529 
530 		if (cap->cc_process == NULL) {
531 			cap->cc_arg = arg;
532 			cap->cc_newsession = newses;
533 			cap->cc_process = process;
534 			cap->cc_freesession = freeses;
535 			cap->cc_sessions = 0;		/* Unmark */
536 		}
537 		err = 0;
538 	} else
539 		err = EINVAL;
540 
541 	splx(s);
542 	return err;
543 }
544 
545 /*
546  * Unregister a crypto driver. If there are pending sessions using it,
547  * leave enough information around so that subsequent calls using those
548  * sessions will correctly detect the driver has been unregistered and
549  * reroute requests.
550  */
551 int
552 crypto_unregister(u_int32_t driverid, int alg)
553 {
554 	int i, err, s;
555 	u_int32_t ses;
556 	struct cryptocap *cap;
557 
558 	s = splcrypto();
559 
560 	cap = crypto_checkdriver(driverid);
561 	if (cap != NULL &&
562 	    (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
563 	    cap->cc_alg[alg] != 0) {
564 		cap->cc_alg[alg] = 0;
565 		cap->cc_max_op_len[alg] = 0;
566 
567 		/* Was this the last algorithm ? */
568 		for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
569 			if (cap->cc_alg[i] != 0)
570 				break;
571 
572 		if (i == CRYPTO_ALGORITHM_MAX + 1) {
573 			ses = cap->cc_sessions;
574 			bzero(cap, sizeof(struct cryptocap));
575 			if (ses != 0) {
576 				/*
577 				 * If there are pending sessions, just mark as invalid.
578 				 */
579 				cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
580 				cap->cc_sessions = ses;
581 			}
582 		}
583 		err = 0;
584 	} else
585 		err = EINVAL;
586 
587 	splx(s);
588 	return err;
589 }
590 
591 /*
592  * Unregister all algorithms associated with a crypto driver.
593  * If there are pending sessions using it, leave enough information
594  * around so that subsequent calls using those sessions will
595  * correctly detect the driver has been unregistered and reroute
596  * requests.
597  */
598 int
599 crypto_unregister_all(u_int32_t driverid)
600 {
601 	int i, err, s = splcrypto();
602 	u_int32_t ses;
603 	struct cryptocap *cap;
604 
605 	cap = crypto_checkdriver(driverid);
606 	if (cap != NULL) {
607 		for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) {
608 			cap->cc_alg[i] = 0;
609 			cap->cc_max_op_len[i] = 0;
610 		}
611 		ses = cap->cc_sessions;
612 		bzero(cap, sizeof(struct cryptocap));
613 		if (ses != 0) {
614 			/*
615 			 * If there are pending sessions, just mark as invalid.
616 			 */
617 			cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
618 			cap->cc_sessions = ses;
619 		}
620 		err = 0;
621 	} else
622 		err = EINVAL;
623 
624 	splx(s);
625 	return err;
626 }
627 
628 /*
629  * Clear blockage on a driver.  The what parameter indicates whether
630  * the driver is now ready for cryptop's and/or cryptokop's.
631  */
632 int
633 crypto_unblock(u_int32_t driverid, int what)
634 {
635 	struct cryptocap *cap;
636 	int needwakeup, err, s;
637 
638 	s = splcrypto();
639 	cap = crypto_checkdriver(driverid);
640 	if (cap != NULL) {
641 		needwakeup = 0;
642 		if (what & CRYPTO_SYMQ) {
643 			needwakeup |= cap->cc_qblocked;
644 			cap->cc_qblocked = 0;
645 		}
646 		if (what & CRYPTO_ASYMQ) {
647 			needwakeup |= cap->cc_kqblocked;
648 			cap->cc_kqblocked = 0;
649 		}
650 		if (needwakeup) {
651 			setsoftcrypto(softintr_cookie);
652 		}
653 		err = 0;
654 	} else
655 		err = EINVAL;
656 	splx(s);
657 
658 	return err;
659 }
660 
661 /*
662  * Dispatch a crypto request to a driver or queue
663  * it, to be processed by the kernel thread.
664  */
665 int
666 crypto_dispatch(struct cryptop *crp)
667 {
668 	u_int32_t hid = SESID2HID(crp->crp_sid);
669 	int s, result;
670 
671 	s = splcrypto();
672 
673 	cryptostats.cs_ops++;
674 
675 #ifdef CRYPTO_TIMING
676 	if (crypto_timing)
677 		nanouptime(&crp->crp_tstamp);
678 #endif
679 	if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
680 		struct cryptocap *cap;
681 		/*
682 		 * Caller marked the request to be processed
683 		 * immediately; dispatch it directly to the
684 		 * driver unless the driver is currently blocked.
685 		 */
686 		cap = crypto_checkdriver(hid);
687 		if (cap && !cap->cc_qblocked) {
688 			result = crypto_invoke(crp, 0);
689 			if (result == ERESTART) {
690 				/*
691 				 * The driver ran out of resources, mark the
692 				 * driver ``blocked'' for cryptop's and put
693 				 * the op on the queue.
694 				 */
695 				crypto_drivers[hid].cc_qblocked = 1;
696 				TAILQ_INSERT_HEAD(&crp_q, crp, crp_next);
697 				cryptostats.cs_blocks++;
698 			}
699 		} else {
700 			/*
701 			 * The driver is blocked, just queue the op until
702 			 * it unblocks and the swi thread gets kicked.
703 			 */
704 			TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
705 			result = 0;
706 		}
707 	} else {
708 		int wasempty = TAILQ_EMPTY(&crp_q);
709 		/*
710 		 * Caller marked the request as ``ok to delay'';
711 		 * queue it for the swi thread.  This is desirable
712 		 * when the operation is low priority and/or suitable
713 		 * for batching.
714 		 */
715 		TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
716 		if (wasempty) {
717 			setsoftcrypto(softintr_cookie);
718 		}
719 
720 		result = 0;
721 	}
722 	splx(s);
723 
724 	return result;
725 }
726 
727 /*
728  * Add an asymetric crypto request to a queue,
729  * to be processed by the kernel thread.
730  */
731 int
732 crypto_kdispatch(struct cryptkop *krp)
733 {
734 	struct cryptocap *cap;
735 	int s, result;
736 
737 	s = splcrypto();
738 	cryptostats.cs_kops++;
739 
740 	cap = crypto_checkdriver(krp->krp_hid);
741 	if (cap && !cap->cc_kqblocked) {
742 		result = crypto_kinvoke(krp, 0);
743 		if (result == ERESTART) {
744 			/*
745 			 * The driver ran out of resources, mark the
746 			 * driver ``blocked'' for cryptop's and put
747 			 * the op on the queue.
748 			 */
749 			crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
750 			TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
751 			cryptostats.cs_kblocks++;
752 		}
753 	} else {
754 		/*
755 		 * The driver is blocked, just queue the op until
756 		 * it unblocks and the swi thread gets kicked.
757 		 */
758 		TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
759 		result = 0;
760 	}
761 	splx(s);
762 
763 	return result;
764 }
765 
766 /*
767  * Dispatch an assymetric crypto request to the appropriate crypto devices.
768  */
769 static int
770 crypto_kinvoke(struct cryptkop *krp, int hint)
771 {
772 	u_int32_t hid;
773 	int error;
774 
775 	/* Sanity checks. */
776 	if (krp == NULL)
777 		return EINVAL;
778 	if (krp->krp_callback == NULL) {
779 		free(krp, M_XDATA);		/* XXX allocated in cryptodev */
780 		return EINVAL;
781 	}
782 
783 	for (hid = 0; hid < crypto_drivers_num; hid++) {
784 		if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
785 		    crypto_devallowsoft == 0)
786 			continue;
787 		if (crypto_drivers[hid].cc_kprocess == NULL)
788 			continue;
789 		if ((crypto_drivers[hid].cc_kalg[krp->krp_op] &
790 		    CRYPTO_ALG_FLAG_SUPPORTED) == 0)
791 			continue;
792 		break;
793 	}
794 	if (hid < crypto_drivers_num) {
795 		krp->krp_hid = hid;
796 		error = crypto_drivers[hid].cc_kprocess(
797 				crypto_drivers[hid].cc_karg, krp, hint);
798 	} else {
799 		error = ENODEV;
800 	}
801 
802 	if (error) {
803 		krp->krp_status = error;
804 		crypto_kdone(krp);
805 	}
806 	return 0;
807 }
808 
809 #ifdef CRYPTO_TIMING
810 static void
811 crypto_tstat(struct cryptotstat *ts, struct timespec *tv)
812 {
813 	struct timespec now, t;
814 
815 	nanouptime(&now);
816 	t.tv_sec = now.tv_sec - tv->tv_sec;
817 	t.tv_nsec = now.tv_nsec - tv->tv_nsec;
818 	if (t.tv_nsec < 0) {
819 		t.tv_sec--;
820 		t.tv_nsec += 1000000000;
821 	}
822 	timespecadd(&ts->acc, &t, &t);
823 	if (timespeccmp(&t, &ts->min, <))
824 		ts->min = t;
825 	if (timespeccmp(&t, &ts->max, >))
826 		ts->max = t;
827 	ts->count++;
828 
829 	*tv = now;
830 }
831 #endif
832 
833 /*
834  * Dispatch a crypto request to the appropriate crypto devices.
835  */
836 static int
837 crypto_invoke(struct cryptop *crp, int hint)
838 {
839 	u_int32_t hid;
840 	int (*process)(void*, struct cryptop *, int);
841 
842 #ifdef CRYPTO_TIMING
843 	if (crypto_timing)
844 		crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
845 #endif
846 	/* Sanity checks. */
847 	if (crp == NULL)
848 		return EINVAL;
849 	if (crp->crp_callback == NULL) {
850 		crypto_freereq(crp);
851 		return EINVAL;
852 	}
853 	if (crp->crp_desc == NULL) {
854 		crp->crp_etype = EINVAL;
855 		crypto_done(crp);
856 		return 0;
857 	}
858 
859 	hid = SESID2HID(crp->crp_sid);
860 	if (hid < crypto_drivers_num) {
861 		if (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP)
862 			crypto_freesession(crp->crp_sid);
863 		process = crypto_drivers[hid].cc_process;
864 	} else {
865 		process = NULL;
866 	}
867 
868 	if (process == NULL) {
869 		struct cryptodesc *crd;
870 		u_int64_t nid;
871 
872 		/*
873 		 * Driver has unregistered; migrate the session and return
874 		 * an error to the caller so they'll resubmit the op.
875 		 */
876 		for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
877 			crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
878 
879 		if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0)
880 			crp->crp_sid = nid;
881 
882 		crp->crp_etype = EAGAIN;
883 		crypto_done(crp);
884 		return 0;
885 	} else {
886 		/*
887 		 * Invoke the driver to process the request.
888 		 */
889 		return (*process)(crypto_drivers[hid].cc_arg, crp, hint);
890 	}
891 }
892 
893 /*
894  * Release a set of crypto descriptors.
895  */
896 void
897 crypto_freereq(struct cryptop *crp)
898 {
899 	struct cryptodesc *crd;
900 	int s;
901 
902 	if (crp == NULL)
903 		return;
904 
905 	s = splcrypto();
906 
907 	while ((crd = crp->crp_desc) != NULL) {
908 		crp->crp_desc = crd->crd_next;
909 		pool_put(&cryptodesc_pool, crd);
910 	}
911 
912 	pool_put(&cryptop_pool, crp);
913 	splx(s);
914 }
915 
916 /*
917  * Acquire a set of crypto descriptors.
918  */
919 struct cryptop *
920 crypto_getreq(int num)
921 {
922 	struct cryptodesc *crd;
923 	struct cryptop *crp;
924 	int s;
925 
926 	s = splcrypto();
927 
928 	if (crypto_pool_initialized == 0) {
929 		pool_init(&cryptop_pool, sizeof(struct cryptop), 0, 0,
930 		    0, "cryptop", NULL);
931 		pool_init(&cryptodesc_pool, sizeof(struct cryptodesc), 0, 0,
932 		    0, "cryptodesc", NULL);
933 		crypto_pool_initialized = 1;
934 	}
935 
936 	crp = pool_get(&cryptop_pool, 0);
937 	if (crp == NULL) {
938 		splx(s);
939 		return NULL;
940 	}
941 	bzero(crp, sizeof(struct cryptop));
942 
943 	while (num--) {
944 		crd = pool_get(&cryptodesc_pool, 0);
945 		if (crd == NULL) {
946 			splx(s);
947 			crypto_freereq(crp);
948 			return NULL;
949 		}
950 
951 		bzero(crd, sizeof(struct cryptodesc));
952 		crd->crd_next = crp->crp_desc;
953 		crp->crp_desc = crd;
954 	}
955 
956 	splx(s);
957 	return crp;
958 }
959 
960 /*
961  * Invoke the callback on behalf of the driver.
962  */
963 void
964 crypto_done(struct cryptop *crp)
965 {
966 	if (crp->crp_etype != 0)
967 		cryptostats.cs_errs++;
968 #ifdef CRYPTO_TIMING
969 	if (crypto_timing)
970 		crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
971 #endif
972 	/*
973 	 * On netbsd 1.6O, CBIMM does its wake_one() before the requestor
974 	 * has done its tsleep().
975 	 */
976 #ifndef __NetBSD__
977 	if (crp->crp_flags & CRYPTO_F_CBIMM) {
978 		/*
979 		 * Do the callback directly.  This is ok when the
980 		 * callback routine does very little (e.g. the
981 		 * /dev/crypto callback method just does a wakeup).
982 		 */
983 #ifdef CRYPTO_TIMING
984 		if (crypto_timing) {
985 			/*
986 			 * NB: We must copy the timestamp before
987 			 * doing the callback as the cryptop is
988 			 * likely to be reclaimed.
989 			 */
990 			struct timespec t = crp->crp_tstamp;
991 			crypto_tstat(&cryptostats.cs_cb, &t);
992 			crp->crp_callback(crp);
993 			crypto_tstat(&cryptostats.cs_finis, &t);
994 		} else
995 #endif
996 			crp->crp_callback(crp);
997 	} else
998 #endif /* __NetBSD__ */
999 	{
1000 		int s, wasempty;
1001 		/*
1002 		 * Normal case; queue the callback for the thread.
1003 		 *
1004 		 * The return queue is manipulated by the swi thread
1005 		 * and, potentially, by crypto device drivers calling
1006 		 * back to mark operations completed.  Thus we need
1007 		 * to mask both while manipulating the return queue.
1008 		 */
1009 		s = splcrypto();
1010 		wasempty = TAILQ_EMPTY(&crp_ret_q);
1011 		TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
1012 		if (wasempty)
1013 			wakeup_one(&crp_ret_q);
1014 		splx(s);
1015 	}
1016 }
1017 
1018 /*
1019  * Invoke the callback on behalf of the driver.
1020  */
1021 void
1022 crypto_kdone(struct cryptkop *krp)
1023 {
1024 	int s, wasempty;
1025 
1026 	if (krp->krp_status != 0)
1027 		cryptostats.cs_kerrs++;
1028 	/*
1029 	 * The return queue is manipulated by the swi thread
1030 	 * and, potentially, by crypto device drivers calling
1031 	 * back to mark operations completed.  Thus we need
1032 	 * to mask both while manipulating the return queue.
1033 	 */
1034 	s = splcrypto();
1035 	wasempty = TAILQ_EMPTY(&crp_ret_kq);
1036 	TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
1037 	if (wasempty)
1038 		wakeup_one(&crp_ret_q);
1039 	splx(s);
1040 }
1041 
1042 int
1043 crypto_getfeat(int *featp)
1044 {
1045 	int hid, kalg, feat = 0;
1046 	int s;
1047 
1048 	s = splcrypto();
1049 
1050 	if (crypto_userasymcrypto == 0)
1051 		goto out;
1052 
1053 	for (hid = 0; hid < crypto_drivers_num; hid++) {
1054 		if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1055 		    crypto_devallowsoft == 0) {
1056 			continue;
1057 		}
1058 		if (crypto_drivers[hid].cc_kprocess == NULL)
1059 			continue;
1060 		for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
1061 			if ((crypto_drivers[hid].cc_kalg[kalg] &
1062 			    CRYPTO_ALG_FLAG_SUPPORTED) != 0)
1063 				feat |=  1 << kalg;
1064 	}
1065 out:
1066 	splx(s);
1067 	*featp = feat;
1068 	return (0);
1069 }
1070 
1071 /*
1072  * Software interrupt thread to dispatch crypto requests.
1073  */
1074 static void
1075 cryptointr(void)
1076 {
1077 	struct cryptop *crp, *submit;
1078 	struct cryptkop *krp;
1079 	struct cryptocap *cap;
1080 	int result, hint, s;
1081 
1082 	printf("crypto softint\n");
1083 	cryptostats.cs_intrs++;
1084 	s = splcrypto();
1085 	do {
1086 		/*
1087 		 * Find the first element in the queue that can be
1088 		 * processed and look-ahead to see if multiple ops
1089 		 * are ready for the same driver.
1090 		 */
1091 		submit = NULL;
1092 		hint = 0;
1093 		TAILQ_FOREACH(crp, &crp_q, crp_next) {
1094 			u_int32_t hid = SESID2HID(crp->crp_sid);
1095 			cap = crypto_checkdriver(hid);
1096 			if (cap == NULL || cap->cc_process == NULL) {
1097 				/* Op needs to be migrated, process it. */
1098 				if (submit == NULL)
1099 					submit = crp;
1100 				break;
1101 			}
1102 			if (!cap->cc_qblocked) {
1103 				if (submit != NULL) {
1104 					/*
1105 					 * We stop on finding another op,
1106 					 * regardless whether its for the same
1107 					 * driver or not.  We could keep
1108 					 * searching the queue but it might be
1109 					 * better to just use a per-driver
1110 					 * queue instead.
1111 					 */
1112 					if (SESID2HID(submit->crp_sid) == hid)
1113 						hint = CRYPTO_HINT_MORE;
1114 					break;
1115 				} else {
1116 					submit = crp;
1117 					if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
1118 						break;
1119 					/* keep scanning for more are q'd */
1120 				}
1121 			}
1122 		}
1123 		if (submit != NULL) {
1124 			TAILQ_REMOVE(&crp_q, submit, crp_next);
1125 			result = crypto_invoke(submit, hint);
1126 			if (result == ERESTART) {
1127 				/*
1128 				 * The driver ran out of resources, mark the
1129 				 * driver ``blocked'' for cryptop's and put
1130 				 * the request back in the queue.  It would
1131 				 * best to put the request back where we got
1132 				 * it but that's hard so for now we put it
1133 				 * at the front.  This should be ok; putting
1134 				 * it at the end does not work.
1135 				 */
1136 				/* XXX validate sid again? */
1137 				crypto_drivers[SESID2HID(submit->crp_sid)].cc_qblocked = 1;
1138 				TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
1139 				cryptostats.cs_blocks++;
1140 			}
1141 		}
1142 
1143 		/* As above, but for key ops */
1144 		TAILQ_FOREACH(krp, &crp_kq, krp_next) {
1145 			cap = crypto_checkdriver(krp->krp_hid);
1146 			if (cap == NULL || cap->cc_kprocess == NULL) {
1147 				/* Op needs to be migrated, process it. */
1148 				break;
1149 			}
1150 			if (!cap->cc_kqblocked)
1151 				break;
1152 		}
1153 		if (krp != NULL) {
1154 			TAILQ_REMOVE(&crp_kq, krp, krp_next);
1155 			result = crypto_kinvoke(krp, 0);
1156 			if (result == ERESTART) {
1157 				/*
1158 				 * The driver ran out of resources, mark the
1159 				 * driver ``blocked'' for cryptkop's and put
1160 				 * the request back in the queue.  It would
1161 				 * best to put the request back where we got
1162 				 * it but that's hard so for now we put it
1163 				 * at the front.  This should be ok; putting
1164 				 * it at the end does not work.
1165 				 */
1166 				/* XXX validate sid again? */
1167 				crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
1168 				TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
1169 				cryptostats.cs_kblocks++;
1170 			}
1171 		}
1172 	} while (submit != NULL || krp != NULL);
1173 	splx(s);
1174 }
1175 
1176 /*
1177  * Kernel thread to do callbacks.
1178  */
1179 static void
1180 cryptoret(void)
1181 {
1182 	struct cryptop *crp;
1183 	struct cryptkop *krp;
1184 	int s;
1185 
1186 	s = splcrypto();
1187 	for (;;) {
1188 		crp = TAILQ_FIRST(&crp_ret_q);
1189 		if (crp != NULL)
1190 			TAILQ_REMOVE(&crp_ret_q, crp, crp_next);
1191 		krp = TAILQ_FIRST(&crp_ret_kq);
1192 		if (krp != NULL)
1193 			TAILQ_REMOVE(&crp_ret_kq, krp, krp_next);
1194 
1195 		if (crp != NULL || krp != NULL) {
1196 			splx(s);		/* lower ipl for callbacks */
1197 			if (crp != NULL) {
1198 #ifdef CRYPTO_TIMING
1199 				if (crypto_timing) {
1200 					/*
1201 					 * NB: We must copy the timestamp before
1202 					 * doing the callback as the cryptop is
1203 					 * likely to be reclaimed.
1204 					 */
1205 					struct timespec t = crp->crp_tstamp;
1206 					crypto_tstat(&cryptostats.cs_cb, &t);
1207 					crp->crp_callback(crp);
1208 					crypto_tstat(&cryptostats.cs_finis, &t);
1209 				} else
1210 #endif
1211 					crp->crp_callback(crp);
1212 			}
1213 			if (krp != NULL)
1214 				krp->krp_callback(krp);
1215 			s  = splcrypto();
1216 		} else {
1217 			(void) tsleep(&crp_ret_q, PLOCK, "crypto_wait", 0);
1218 			cryptostats.cs_rets++;
1219 		}
1220 	}
1221 }
1222 
1223 static void
1224 deferred_crypto_thread(void *arg)
1225 {
1226 	int error;
1227 
1228 	error = kthread_create1((void (*)(void*)) cryptoret, NULL,
1229 				&cryptoproc, "cryptoret");
1230 	if (error) {
1231 		printf("crypto_init: cannot start cryptoret thread; error %d",
1232 		    error);
1233 		crypto_destroy();
1234 	}
1235 }
1236 
1237 #ifdef __FreeBSD__
1238 /*
1239  * Initialization code, both for static and dynamic loading.
1240  */
1241 static int
1242 crypto_modevent(module_t mod, int type, void *unused)
1243 {
1244 	int error = EINVAL;
1245 
1246 	switch (type) {
1247 	case MOD_LOAD:
1248 		error = crypto_init();
1249 		if (error == 0 && bootverbose)
1250 			printf("crypto: <crypto core>\n");
1251 		break;
1252 	case MOD_UNLOAD:
1253 		/*XXX disallow if active sessions */
1254 		error = 0;
1255 		crypto_destroy();
1256 		break;
1257 	}
1258 	return error;
1259 }
1260 static moduledata_t crypto_mod = {
1261 	"crypto",
1262 	crypto_modevent,
1263 	0
1264 };
1265 
1266 MODULE_VERSION(crypto, 1);
1267 DECLARE_MODULE(crypto, crypto_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
1268 #endif /* __FreeBSD__ */
1269 
1270 
1271