xref: /netbsd-src/sys/opencrypto/cryptodev.c (revision da9817918ec7e88db2912a2882967c7570a83f47)
1 /*	$NetBSD: cryptodev.c,v 1.49 2009/04/11 23:05:26 christos Exp $ */
2 /*	$FreeBSD: src/sys/opencrypto/cryptodev.c,v 1.4.2.4 2003/06/03 00:09:02 sam Exp $	*/
3 /*	$OpenBSD: cryptodev.c,v 1.53 2002/07/10 22:21:30 mickey Exp $	*/
4 
5 /*-
6  * Copyright (c) 2008 The NetBSD Foundation, Inc.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to The NetBSD Foundation
10  * by Coyote Point Systems, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 2001 Theo de Raadt
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  *
41  * 1. Redistributions of source code must retain the above copyright
42  *   notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *   notice, this list of conditions and the following disclaimer in the
45  *   documentation and/or other materials provided with the distribution.
46  * 3. The name of the author may not be used to endorse or promote products
47  *   derived from this software without specific prior written permission.
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59  *
60  * Effort sponsored in part by the Defense Advanced Research Projects
61  * Agency (DARPA) and Air Force Research Laboratory, Air Force
62  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
63  *
64  */
65 
66 #include <sys/cdefs.h>
67 __KERNEL_RCSID(0, "$NetBSD: cryptodev.c,v 1.49 2009/04/11 23:05:26 christos Exp $");
68 
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kmem.h>
72 #include <sys/malloc.h>
73 #include <sys/mbuf.h>
74 #include <sys/pool.h>
75 #include <sys/sysctl.h>
76 #include <sys/file.h>
77 #include <sys/filedesc.h>
78 #include <sys/errno.h>
79 #include <sys/md5.h>
80 #include <sys/sha1.h>
81 #include <sys/conf.h>
82 #include <sys/device.h>
83 #include <sys/kauth.h>
84 #include <sys/select.h>
85 #include <sys/poll.h>
86 #include <sys/atomic.h>
87 #include <sys/stat.h>
88 
89 #include "opt_ocf.h"
90 #include <opencrypto/cryptodev.h>
91 #include <opencrypto/ocryptodev.h>
92 #include <opencrypto/xform.h>
93 
94 struct csession {
95 	TAILQ_ENTRY(csession) next;
96 	u_int64_t	sid;
97 	u_int32_t	ses;
98 
99 	u_int32_t	cipher;		/* note: shares name space in crd_alg */
100 	struct enc_xform *txform;
101 	u_int32_t	mac;		/* note: shares name space in crd_alg */
102 	struct auth_hash *thash;
103 	u_int32_t	comp_alg;	/* note: shares name space in crd_alg */
104 	struct comp_algo *tcomp;
105 
106 	void *		key;
107 	int		keylen;
108 	u_char		tmp_iv[EALG_MAX_BLOCK_LEN];
109 
110 	void *		mackey;
111 	int		mackeylen;
112 	u_char		tmp_mac[CRYPTO_MAX_MAC_LEN];
113 
114 	struct iovec	iovec[1];	/* user requests never have more */
115 	struct uio	uio;
116 	int		error;
117 };
118 
119 struct fcrypt {
120 	TAILQ_HEAD(csessionlist, csession) csessions;
121 	TAILQ_HEAD(crprethead, cryptop) crp_ret_mq;
122 	TAILQ_HEAD(krprethead, cryptkop) crp_ret_mkq;
123 	int		sesn;
124 	struct selinfo	sinfo;
125 	u_int32_t	requestid;
126 	struct timespec atime;
127 	struct timespec mtime;
128 	struct timespec btime;
129 };
130 
131 /* For our fixed-size allocations */
132 static struct pool fcrpl;
133 static struct pool csepl;
134 
135 /* Declaration of master device (fd-cloning/ctxt-allocating) entrypoints */
136 static int	cryptoopen(dev_t dev, int flag, int mode, struct lwp *l);
137 static int	cryptoread(dev_t dev, struct uio *uio, int ioflag);
138 static int	cryptowrite(dev_t dev, struct uio *uio, int ioflag);
139 static int	cryptoselect(dev_t dev, int rw, struct lwp *l);
140 
141 /* Declaration of cloned-device (per-ctxt) entrypoints */
142 static int	cryptof_read(struct file *, off_t *, struct uio *,
143     kauth_cred_t, int);
144 static int	cryptof_write(struct file *, off_t *, struct uio *,
145     kauth_cred_t, int);
146 static int	cryptof_ioctl(struct file *, u_long, void *);
147 static int	cryptof_close(struct file *);
148 static int 	cryptof_poll(struct file *, int);
149 static int 	cryptof_stat(struct file *, struct stat *);
150 
151 static const struct fileops cryptofops = {
152 	.fo_read = cryptof_read,
153 	.fo_write = cryptof_write,
154 	.fo_ioctl = cryptof_ioctl,
155 	.fo_fcntl = fnullop_fcntl,
156 	.fo_poll = cryptof_poll,
157 	.fo_stat = cryptof_stat,
158 	.fo_close = cryptof_close,
159 	.fo_kqfilter = fnullop_kqfilter,
160 	.fo_drain = fnullop_drain,
161 };
162 
163 struct csession *cryptodev_csefind(struct fcrypt *, u_int);
164 static struct	csession *csefind(struct fcrypt *, u_int);
165 static int	csedelete(struct fcrypt *, struct csession *);
166 static struct	csession *cseadd(struct fcrypt *, struct csession *);
167 static struct	csession *csecreate(struct fcrypt *, u_int64_t, void *,
168     u_int64_t, void *, u_int64_t, u_int32_t, u_int32_t, u_int32_t,
169     struct enc_xform *, struct auth_hash *, struct comp_algo *);
170 static int	csefree(struct csession *);
171 
172 static int	cryptodev_key(struct crypt_kop *);
173 static int	cryptodev_mkey(struct fcrypt *, struct crypt_n_kop *, int);
174 static int	cryptodev_msessionfin(struct fcrypt *, int, u_int32_t *);
175 
176 static int	cryptodev_cb(void *);
177 static int	cryptodevkey_cb(void *);
178 
179 static int	cryptodev_mcb(void *);
180 static int	cryptodevkey_mcb(void *);
181 
182 static int 	cryptodev_getmstatus(struct fcrypt *, struct crypt_result *,
183     int);
184 static int	cryptodev_getstatus(struct fcrypt *, struct crypt_result *);
185 
186 extern int	ocryptof_ioctl(struct file *, u_long, void *);
187 
188 /*
189  * sysctl-able control variables for /dev/crypto now defined in crypto.c:
190  * crypto_usercrypto, crypto_userasmcrypto, crypto_devallowsoft.
191  */
192 
193 /* ARGSUSED */
194 int
195 cryptof_read(file_t *fp, off_t *poff,
196     struct uio *uio, kauth_cred_t cred, int flags)
197 {
198 	return EIO;
199 }
200 
201 /* ARGSUSED */
202 int
203 cryptof_write(file_t *fp, off_t *poff,
204     struct uio *uio, kauth_cred_t cred, int flags)
205 {
206 	return EIO;
207 }
208 
209 /* ARGSUSED */
210 int
211 cryptof_ioctl(struct file *fp, u_long cmd, void *data)
212 {
213 	struct fcrypt *fcr = fp->f_data;
214 	struct csession *cse;
215 	struct session_op *sop;
216 	struct session_n_op *snop;
217 	struct crypt_op *cop;
218 	struct crypt_mop *mop;
219 	struct crypt_mkop *mkop;
220 	struct crypt_n_op *cnop;
221 	struct crypt_n_kop *knop;
222 	struct crypt_sgop *sgop;
223 	struct crypt_sfop *sfop;
224 	struct cryptret *crypt_ret;
225 	struct crypt_result *crypt_res;
226 	u_int32_t ses;
227 	u_int32_t *sesid;
228 	int error = 0;
229 	size_t count;
230 
231 	/* backwards compatibility */
232         file_t *criofp;
233 	struct fcrypt *criofcr;
234 	int criofd;
235 
236 	mutex_spin_enter(&crypto_mtx);
237 	getnanotime(&fcr->atime);
238 	mutex_spin_exit(&crypto_mtx);
239 
240 	switch (cmd) {
241         case CRIOGET:   /* XXX deprecated, remove after 5.0 */
242 		if ((error = fd_allocfile(&criofp, &criofd)) != 0)
243 			return error;
244 		criofcr = pool_get(&fcrpl, PR_WAITOK);
245 		mutex_spin_enter(&crypto_mtx);
246 		TAILQ_INIT(&criofcr->csessions);
247 		TAILQ_INIT(&criofcr->crp_ret_mq);
248 		TAILQ_INIT(&criofcr->crp_ret_mkq);
249 		selinit(&criofcr->sinfo);
250 
251                 /*
252                  * Don't ever return session 0, to allow detection of
253                  * failed creation attempts with multi-create ioctl.
254                  */
255 		criofcr->sesn = 1;
256 		criofcr->requestid = 1;
257 		mutex_spin_exit(&crypto_mtx);
258 		(void)fd_clone(criofp, criofd, (FREAD|FWRITE),
259 			      &cryptofops, criofcr);
260 		*(u_int32_t *)data = criofd;
261 		return error;
262 		break;
263 	case CIOCGSESSION:
264 		sop = (struct session_op *)data;
265 		error = cryptodev_session(fcr, sop);
266 		break;
267 	case CIOCNGSESSION:
268 		sgop = (struct crypt_sgop *)data;
269 		snop = kmem_alloc((sgop->count *
270 				  sizeof(struct session_n_op)), KM_SLEEP);
271 		error = copyin(sgop->sessions, snop, sgop->count *
272 			       sizeof(struct session_n_op));
273 		if (error) {
274 			goto mbail;
275 		}
276 
277 		mutex_spin_enter(&crypto_mtx);
278 		fcr->mtime = fcr->atime;
279 		mutex_spin_exit(&crypto_mtx);
280 		error = cryptodev_msession(fcr, snop, sgop->count);
281 		if (error) {
282 			goto mbail;
283 		}
284 
285 		error = copyout(snop, sgop->sessions, sgop->count *
286 		    sizeof(struct session_n_op));
287 mbail:
288 		kmem_free(snop, sgop->count * sizeof(struct session_n_op));
289 		break;
290 	case CIOCFSESSION:
291 		mutex_spin_enter(&crypto_mtx);
292 		fcr->mtime = fcr->atime;
293 		ses = *(u_int32_t *)data;
294 		cse = csefind(fcr, ses);
295 		if (cse == NULL)
296 			return EINVAL;
297 		csedelete(fcr, cse);
298 		error = csefree(cse);
299 		mutex_spin_exit(&crypto_mtx);
300 		break;
301 	case CIOCNFSESSION:
302 		mutex_spin_enter(&crypto_mtx);
303 		fcr->mtime = fcr->atime;
304 		mutex_spin_exit(&crypto_mtx);
305 		sfop = (struct crypt_sfop *)data;
306 		sesid = kmem_alloc((sfop->count * sizeof(u_int32_t)),
307 		    KM_SLEEP);
308 		error = copyin(sfop->sesid, sesid,
309 		    (sfop->count * sizeof(u_int32_t)));
310 		if (!error) {
311 			error = cryptodev_msessionfin(fcr, sfop->count, sesid);
312 		}
313 		kmem_free(sesid, (sfop->count * sizeof(u_int32_t)));
314 		break;
315 	case CIOCCRYPT:
316 		mutex_spin_enter(&crypto_mtx);
317 		fcr->mtime = fcr->atime;
318 		cop = (struct crypt_op *)data;
319 		cse = csefind(fcr, cop->ses);
320 		mutex_spin_exit(&crypto_mtx);
321 		if (cse == NULL) {
322 			DPRINTF(("csefind failed\n"));
323 			return EINVAL;
324 		}
325 		error = cryptodev_op(cse, cop, curlwp);
326 		DPRINTF(("cryptodev_op error = %d\n", error));
327 		break;
328 	case CIOCNCRYPTM:
329 		mutex_spin_enter(&crypto_mtx);
330 		fcr->mtime = fcr->atime;
331 		mutex_spin_exit(&crypto_mtx);
332 		mop = (struct crypt_mop *)data;
333 		cnop = kmem_alloc((mop->count * sizeof(struct crypt_n_op)),
334 		    KM_SLEEP);
335 		error = copyin(mop->reqs, cnop,
336 		    (mop->count * sizeof(struct crypt_n_op)));
337 		if(!error) {
338 			error = cryptodev_mop(fcr, cnop, mop->count, curlwp);
339 			if (!error) {
340 				error = copyout(cnop, mop->reqs,
341 				    (mop->count * sizeof(struct crypt_n_op)));
342 			}
343 		}
344 		kmem_free(cnop, (mop->count * sizeof(struct crypt_n_op)));
345 		break;
346 	case CIOCKEY:
347 		error = cryptodev_key((struct crypt_kop *)data);
348 		DPRINTF(("cryptodev_key error = %d\n", error));
349 		break;
350 	case CIOCNFKEYM:
351 		mutex_spin_enter(&crypto_mtx);
352 		fcr->mtime = fcr->atime;
353 		mutex_spin_exit(&crypto_mtx);
354 		mkop = (struct crypt_mkop *)data;
355 		knop = kmem_alloc((mkop->count * sizeof(struct crypt_n_kop)),
356 		    KM_SLEEP);
357 		error = copyin(mkop->reqs, knop,
358 		    (mkop->count * sizeof(struct crypt_n_kop)));
359 		if (!error) {
360 			error = cryptodev_mkey(fcr, knop, mkop->count);
361 			if (!error)
362 				error = copyout(knop, mkop->reqs,
363 				    (mkop->count * sizeof(struct crypt_n_kop)));
364 		}
365 		kmem_free(knop, (mkop->count * sizeof(struct crypt_n_kop)));
366 		break;
367 	case CIOCASYMFEAT:
368 		error = crypto_getfeat((int *)data);
369 		break;
370 	case CIOCNCRYPTRETM:
371 		mutex_spin_enter(&crypto_mtx);
372 		fcr->mtime = fcr->atime;
373 		mutex_spin_exit(&crypto_mtx);
374 		crypt_ret = (struct cryptret *)data;
375 		count = crypt_ret->count;
376 		crypt_res = kmem_alloc((count * sizeof(struct crypt_result)),
377 		    KM_SLEEP);
378 		error = copyin(crypt_ret->results, crypt_res,
379 		    (count * sizeof(struct crypt_result)));
380 		if (error)
381 			goto reterr;
382 		crypt_ret->count = cryptodev_getmstatus(fcr, crypt_res,
383 		    crypt_ret->count);
384 		/* sanity check count */
385 		if (crypt_ret->count > count) {
386 			printf("%s.%d: error returned count %zd > original "
387 			    " count %zd\n",
388 			    __FILE__, __LINE__, crypt_ret->count, count);
389 			crypt_ret->count = count;
390 
391 		}
392 		error = copyout(crypt_res, crypt_ret->results,
393 		    (crypt_ret->count * sizeof(struct crypt_result)));
394 reterr:
395 		kmem_free(crypt_res, (count * sizeof(struct crypt_result)));
396 		break;
397 	case CIOCNCRYPTRET:
398 		error = cryptodev_getstatus(fcr, (struct crypt_result *)data);
399 		break;
400 	default:
401 		/* Check for backward compatible commands */
402 		error = ocryptof_ioctl(fp, cmd, data);
403 	}
404 	return error;
405 }
406 
407 int
408 cryptodev_op(struct csession *cse, struct crypt_op *cop, struct lwp *l)
409 {
410 	struct cryptop *crp = NULL;
411 	struct cryptodesc *crde = NULL, *crda = NULL, *crdc = NULL;
412 	int error;
413 	int iov_len = cop->len;
414 	int flags=0;
415 	int dst_len;	/* copyout size */
416 
417 	if (cop->len > 256*1024-4)
418 		return E2BIG;
419 
420 	if (cse->txform) {
421 		if (cop->len == 0 || (cop->len % cse->txform->blocksize) != 0)
422 			return EINVAL;
423 	}
424 
425 	DPRINTF(("cryptodev_op[%d]: iov_len %d\n", (uint32_t)cse->sid, iov_len));
426 	if ((cse->tcomp) && cop->dst_len) {
427 		if (iov_len < cop->dst_len) {
428 			/* Need larger iov to deal with decompress */
429 			iov_len = cop->dst_len;
430 		}
431 		DPRINTF(("cryptodev_op: iov_len -> %d for decompress\n", iov_len));
432 	}
433 
434 	(void)memset(&cse->uio, 0, sizeof(cse->uio));
435 	cse->uio.uio_iovcnt = 1;
436 	cse->uio.uio_resid = 0;
437 	cse->uio.uio_rw = UIO_WRITE;
438 	cse->uio.uio_iov = cse->iovec;
439 	UIO_SETUP_SYSSPACE(&cse->uio);
440 	memset(&cse->iovec, 0, sizeof(cse->iovec));
441 
442 	/* the iov needs to be big enough to handle the uncompressed
443 	 * data.... */
444 	cse->uio.uio_iov[0].iov_len = iov_len;
445 	cse->uio.uio_iov[0].iov_base = kmem_alloc(iov_len, KM_SLEEP);
446 	cse->uio.uio_resid = cse->uio.uio_iov[0].iov_len;
447 	DPRINTF(("cryptodev_op[%d]: uio.iov_base %p malloced %d bytes\n",
448 		(uint32_t)cse->sid, cse->uio.uio_iov[0].iov_base, iov_len));
449 
450 	crp = crypto_getreq((cse->tcomp != NULL) + (cse->txform != NULL) + (cse->thash != NULL));
451 	if (crp == NULL) {
452 		error = ENOMEM;
453 		goto bail;
454 	}
455 	DPRINTF(("cryptodev_op[%d]: crp %p\n", (uint32_t)cse->sid, crp));
456 
457 	/* crds are always ordered tcomp, thash, then txform */
458 	/* with optional missing links */
459 
460 	/* XXX: If we're going to compress then hash or encrypt, we need
461 	 * to be able to pass on the new size of the data.
462 	 */
463 
464 	if (cse->tcomp) {
465 		crdc = crp->crp_desc;
466 	}
467 
468 	if (cse->thash) {
469 		crda = crdc ? crdc->crd_next : crp->crp_desc;
470 		if (cse->txform && crda)
471 			crde = crda->crd_next;
472 	} else {
473 		if (cse->txform) {
474 			crde = crdc ? crdc->crd_next : crp->crp_desc;
475 		} else if (!cse->tcomp) {
476 			error = EINVAL;
477 			goto bail;
478 		}
479 	}
480 
481 	DPRINTF(("ocf[%d]: iov_len %d, cop->len %d\n",
482 			(uint32_t)cse->sid,
483 			cse->uio.uio_iov[0].iov_len,
484 			cop->len));
485 
486 	if ((error = copyin(cop->src, cse->uio.uio_iov[0].iov_base, cop->len)))
487 	{
488 		printf("copyin failed %s %d \n", (char *)cop->src, error);
489 		goto bail;
490 	}
491 
492 	if (crdc) {
493 		switch (cop->op) {
494 		case COP_COMP:
495 			crdc->crd_flags |= CRD_F_COMP;
496 			break;
497 		case COP_DECOMP:
498 			crdc->crd_flags &= ~CRD_F_COMP;
499 			break;
500 		default:
501 			break;
502 		}
503 		/* more data to follow? */
504 		if (cop->flags & COP_F_MORE) {
505 			flags |= CRYPTO_F_MORE;
506 		}
507 		crdc->crd_len = cop->len;
508 		crdc->crd_inject = 0;
509 
510 		crdc->crd_alg = cse->comp_alg;
511 		crdc->crd_key = NULL;
512 		crdc->crd_klen = 0;
513 		DPRINTF(("cryptodev_op[%d]: crdc setup for comp_alg %d.\n",
514 					(uint32_t)cse->sid, crdc->crd_alg));
515 	}
516 
517 	if (crda) {
518 		crda->crd_skip = 0;
519 		crda->crd_len = cop->len;
520 		crda->crd_inject = 0;	/* ??? */
521 
522 		crda->crd_alg = cse->mac;
523 		crda->crd_key = cse->mackey;
524 		crda->crd_klen = cse->mackeylen * 8;
525 		DPRINTF(("cryptodev_op: crda setup for mac %d.\n", crda->crd_alg));
526 	}
527 
528 	if (crde) {
529 		switch (cop->op) {
530 		case COP_ENCRYPT:
531 			crde->crd_flags |= CRD_F_ENCRYPT;
532 			break;
533 		case COP_DECRYPT:
534 			crde->crd_flags &= ~CRD_F_ENCRYPT;
535 			break;
536 		default:
537 			break;
538 		}
539 		crde->crd_len = cop->len;
540 		crde->crd_inject = 0;
541 
542 		crde->crd_alg = cse->cipher;
543 		crde->crd_key = cse->key;
544 		crde->crd_klen = cse->keylen * 8;
545 		DPRINTF(("cryptodev_op: crde setup for cipher %d.\n", crde->crd_alg));
546 	}
547 
548 
549 	crp->crp_ilen = cop->len;
550 	/* The reqest is flagged as CRYPTO_F_USER as long as it is running
551 	 * in the user IOCTL thread.  This flag lets us skip using the retq for
552 	 * the request if it completes immediately. If the request ends up being
553 	 * delayed or is not completed immediately the flag is removed.
554 	 */
555 	crp->crp_flags = CRYPTO_F_IOV | (cop->flags & COP_F_BATCH) | CRYPTO_F_USER |
556 			flags;
557 	crp->crp_buf = (void *)&cse->uio;
558 	crp->crp_callback = (int (*) (struct cryptop *)) cryptodev_cb;
559 	crp->crp_sid = cse->sid;
560 	crp->crp_opaque = (void *)cse;
561 
562 	if (cop->iv) {
563 		if (crde == NULL) {
564 			error = EINVAL;
565 			goto bail;
566 		}
567 		if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
568 			error = EINVAL;
569 			goto bail;
570 		}
571 		if ((error = copyin(cop->iv, cse->tmp_iv,
572 		    cse->txform->blocksize)))
573 			goto bail;
574 		(void)memcpy(crde->crd_iv, cse->tmp_iv, cse->txform->blocksize);
575 		crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
576 		crde->crd_skip = 0;
577 	} else if (crde) {
578 		if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
579 			crde->crd_skip = 0;
580 		} else {
581 			crde->crd_flags |= CRD_F_IV_PRESENT;
582 			crde->crd_skip = cse->txform->blocksize;
583 			crde->crd_len -= cse->txform->blocksize;
584 		}
585 	}
586 
587 	if (cop->mac) {
588 		if (crda == NULL) {
589 			error = EINVAL;
590 			goto bail;
591 		}
592 		crp->crp_mac=cse->tmp_mac;
593 	}
594 
595 	/*
596 	 * XXX there was a comment here which said that we went to
597 	 * XXX splcrypto() but needed to only if CRYPTO_F_CBIMM,
598 	 * XXX disabled on NetBSD since 1.6O due to a race condition.
599 	 * XXX But crypto_dispatch went to splcrypto() itself!  (And
600 	 * XXX now takes the crypto_mtx mutex itself).  We do, however,
601 	 *
602 	 * XXX need to hold the mutex across the call to cv_wait().
603 	 * XXX     (should we arrange for crypto_dispatch to return to
604 	 * XXX      us with it held?  it seems quite ugly to do so.)
605 	 */
606 #ifdef notyet
607 eagain:
608 #endif
609 	error = crypto_dispatch(crp);
610 	mutex_spin_enter(&crypto_mtx);
611 
612 	/*
613 	 * If the request was going to be completed by the
614 	 * ioctl thread then it would have been done by now.
615 	 * Remove the F_USER flag it so crypto_done() is not confused
616 	 * if the crypto device calls it after this point.
617 	 */
618 	crp->crp_flags &= ~(CRYPTO_F_USER);
619 
620 	switch (error) {
621 #ifdef notyet	/* don't loop forever -- but EAGAIN not possible here yet */
622 	case EAGAIN:
623 		mutex_spin_exit(&crypto_mtx);
624 		goto eagain;
625 		break;
626 #endif
627 	case 0:
628 		break;
629 	default:
630 		DPRINTF(("cryptodev_op: not waiting, error.\n"));
631 		mutex_spin_exit(&crypto_mtx);
632 		goto bail;
633 	}
634 
635 	while (!(crp->crp_flags & CRYPTO_F_DONE)) {
636 		DPRINTF(("cryptodev_op[%d]: sleeping on cv %08x for crp %08x\n",
637 			(uint32_t)cse->sid, (uint32_t)&crp->crp_cv,
638 			(uint32_t)crp));
639 		cv_wait(&crp->crp_cv, &crypto_mtx);	/* XXX cv_wait_sig? */
640 	}
641 	if (crp->crp_flags & CRYPTO_F_ONRETQ) {
642 		/* XXX this should never happen now with the CRYPTO_F_USER flag
643 		 * changes.
644 		 */
645 		DPRINTF(("cryptodev_op: DONE, not woken by cryptoret.\n"));
646 		(void)crypto_ret_q_remove(crp);
647 	}
648 	mutex_spin_exit(&crypto_mtx);
649 
650 	if (crp->crp_etype != 0) {
651 		DPRINTF(("cryptodev_op: crp_etype %d\n", crp->crp_etype));
652 		error = crp->crp_etype;
653 		goto bail;
654 	}
655 
656 	if (cse->error) {
657 		DPRINTF(("cryptodev_op: cse->error %d\n", cse->error));
658 		error = cse->error;
659 		goto bail;
660 	}
661 
662 	dst_len = crp->crp_ilen;
663 	/* let the user know how much data was returned */
664 	if (crp->crp_olen) {
665 		dst_len = cop->dst_len = crp->crp_olen;
666 	}
667 	crp->len = dst_len;
668 
669 	if (cop->dst) {
670 		DPRINTF(("cryptodev_op: copyout %d bytes to %p\n", dst_len, cop->dst));
671 	}
672 	if (cop->dst &&
673 	    (error = copyout(cse->uio.uio_iov[0].iov_base, cop->dst, dst_len)))
674 	{
675 		DPRINTF(("cryptodev_op: copyout error %d\n", error));
676 		goto bail;
677 	}
678 
679 	if (cop->mac &&
680 	    (error = copyout(crp->crp_mac, cop->mac, cse->thash->authsize))) {
681 		DPRINTF(("cryptodev_op: mac copyout error %d\n", error));
682 		goto bail;
683 	}
684 
685 
686 bail:
687 	if (crp) {
688 		crypto_freereq(crp);
689 	}
690 	if (cse->uio.uio_iov[0].iov_base) {
691 		kmem_free(cse->uio.uio_iov[0].iov_base,iov_len);
692 	}
693 
694 	return error;
695 }
696 
697 static int
698 cryptodev_cb(void *op)
699 {
700 	struct cryptop *crp = (struct cryptop *) op;
701 	struct csession *cse = (struct csession *)crp->crp_opaque;
702 	int error = 0;
703 
704 	mutex_spin_enter(&crypto_mtx);
705 	cse->error = crp->crp_etype;
706 	if (crp->crp_etype == EAGAIN) {
707 		/* always drop mutex to call dispatch routine */
708 		mutex_spin_exit(&crypto_mtx);
709 		error = crypto_dispatch(crp);
710 		mutex_spin_enter(&crypto_mtx);
711 	}
712 	if (error != 0 || (crp->crp_flags & CRYPTO_F_DONE)) {
713 		cv_signal(&crp->crp_cv);
714 	}
715 	mutex_spin_exit(&crypto_mtx);
716 	return 0;
717 }
718 
719 static int
720 cryptodev_mcb(void *op)
721 {
722 	struct cryptop *crp = (struct cryptop *) op;
723 	struct csession *cse = (struct csession *)crp->crp_opaque;
724 	int  error=0;
725 
726 	mutex_spin_enter(&crypto_mtx);
727 	cse->error = crp->crp_etype;
728 	if (crp->crp_etype == EAGAIN) {
729 		mutex_spin_exit(&crypto_mtx);
730 		error = crypto_dispatch(crp);
731 		mutex_spin_enter(&crypto_mtx);
732 	}
733 	if (error != 0 || (crp->crp_flags & CRYPTO_F_DONE)) {
734 		cv_signal(&crp->crp_cv);
735 	}
736 
737 	TAILQ_INSERT_TAIL(&crp->fcrp->crp_ret_mq, crp, crp_next);
738 	selnotify(&crp->fcrp->sinfo, 0, 0);
739 	mutex_spin_exit(&crypto_mtx);
740 	return 0;
741 }
742 
743 static int
744 cryptodevkey_cb(void *op)
745 {
746 	struct cryptkop *krp = op;
747 
748 	mutex_spin_enter(&crypto_mtx);
749 	cv_signal(&krp->krp_cv);
750 	mutex_spin_exit(&crypto_mtx);
751 	return 0;
752 }
753 
754 static int
755 cryptodevkey_mcb(void *op)
756 {
757 	struct cryptkop *krp = op;
758 
759 	mutex_spin_enter(&crypto_mtx);
760 	cv_signal(&krp->krp_cv);
761 	TAILQ_INSERT_TAIL(&krp->fcrp->crp_ret_mkq, krp, krp_next);
762 	selnotify(&krp->fcrp->sinfo, 0, 0);
763 	mutex_spin_exit(&crypto_mtx);
764 	return 0;
765 }
766 
767 static int
768 cryptodev_key(struct crypt_kop *kop)
769 {
770 	struct cryptkop *krp = NULL;
771 	int error = EINVAL;
772 	int in, out, size, i;
773 
774 	if (kop->crk_iparams + kop->crk_oparams > CRK_MAXPARAM)
775 		return EFBIG;
776 
777 	in = kop->crk_iparams;
778 	out = kop->crk_oparams;
779 	switch (kop->crk_op) {
780 	case CRK_MOD_EXP:
781 		if (in == 3 && out == 1)
782 			break;
783 		return EINVAL;
784 	case CRK_MOD_EXP_CRT:
785 		if (in == 6 && out == 1)
786 			break;
787 		return EINVAL;
788 	case CRK_DSA_SIGN:
789 		if (in == 5 && out == 2)
790 			break;
791 		return EINVAL;
792 	case CRK_DSA_VERIFY:
793 		if (in == 7 && out == 0)
794 			break;
795 		return EINVAL;
796 	case CRK_DH_COMPUTE_KEY:
797 		if (in == 3 && out == 1)
798 			break;
799 		return EINVAL;
800 	case CRK_MOD_ADD:
801 		if (in == 3 && out == 1)
802 			break;
803 		return EINVAL;
804 	case CRK_MOD_ADDINV:
805 		if (in == 2 && out == 1)
806 			break;
807 		return EINVAL;
808 	case CRK_MOD_SUB:
809 		if (in == 3 && out == 1)
810 			break;
811 		return EINVAL;
812 	case CRK_MOD_MULT:
813 		if (in == 3 && out == 1)
814 			break;
815 		return EINVAL;
816 	case CRK_MOD_MULTINV:
817 		if (in == 2 && out == 1)
818 			break;
819 		return EINVAL;
820 	case CRK_MOD:
821 		if (in == 2 && out == 1)
822 			break;
823 		return EINVAL;
824 	default:
825 		return EINVAL;
826 	}
827 
828 	krp = pool_get(&cryptkop_pool, PR_WAITOK);
829 	(void)memset(krp, 0, sizeof *krp);
830 	cv_init(&krp->krp_cv, "crykdev");
831 	krp->krp_op = kop->crk_op;
832 	krp->krp_status = kop->crk_status;
833 	krp->krp_iparams = kop->crk_iparams;
834 	krp->krp_oparams = kop->crk_oparams;
835 	krp->krp_status = 0;
836 	krp->krp_callback = (int (*) (struct cryptkop *)) cryptodevkey_cb;
837 
838 	for (i = 0; i < CRK_MAXPARAM; i++)
839 		krp->krp_param[i].crp_nbits = kop->crk_param[i].crp_nbits;
840 	for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) {
841 		size = (krp->krp_param[i].crp_nbits + 7) / 8;
842 		if (size == 0)
843 			continue;
844 		krp->krp_param[i].crp_p = kmem_alloc(size, KM_SLEEP);
845 		if (i >= krp->krp_iparams)
846 			continue;
847 		error = copyin(kop->crk_param[i].crp_p,
848 		    krp->krp_param[i].crp_p, size);
849 		if (error)
850 			goto fail;
851 	}
852 
853 	error = crypto_kdispatch(krp);
854 	if (error != 0) {
855 		goto fail;
856 	}
857 
858 	mutex_spin_enter(&crypto_mtx);
859 	while (!(krp->krp_flags & CRYPTO_F_DONE)) {
860 		cv_wait(&krp->krp_cv, &crypto_mtx);	/* XXX cv_wait_sig? */
861 	}
862 	if (krp->krp_flags & CRYPTO_F_ONRETQ) {
863 		DPRINTF(("cryptodev_key: DONE early, not via cryptoret.\n"));
864 		(void)crypto_ret_kq_remove(krp);
865 	}
866 	mutex_spin_exit(&crypto_mtx);
867 
868 	if (krp->krp_status != 0) {
869 		DPRINTF(("cryptodev_key: krp->krp_status 0x%08x\n",
870 		    krp->krp_status));
871 		error = krp->krp_status;
872 		goto fail;
873 	}
874 
875 	for (i = krp->krp_iparams; i < krp->krp_iparams + krp->krp_oparams;
876 	    i++) {
877 		size = (krp->krp_param[i].crp_nbits + 7) / 8;
878 		if (size == 0)
879 			continue;
880 		error = copyout(krp->krp_param[i].crp_p,
881 		    kop->crk_param[i].crp_p, size);
882 		if (error) {
883 			DPRINTF(("cryptodev_key: copyout oparam %d failed, "
884 			    "error=%d\n", i-krp->krp_iparams, error));
885 			goto fail;
886 		}
887 	}
888 
889 fail:
890 	kop->crk_status = krp->krp_status;
891 	for (i = 0; i < CRK_MAXPARAM; i++) {
892 		struct crparam *kp = &(krp->krp_param[i]);
893 		if (krp->krp_param[i].crp_p) {
894 			size = (kp->crp_nbits + 7)  / 8;
895 			KASSERT(size > 0);
896 			(void)memset(kp->crp_p, 0, size);
897 			kmem_free(kp->crp_p, size);
898 		}
899 	}
900 	cv_destroy(&krp->krp_cv);
901 	pool_put(&cryptkop_pool, krp);
902 	DPRINTF(("cryptodev_key: error=0x%08x\n", error));
903 	return error;
904 }
905 
906 /* ARGSUSED */
907 static int
908 cryptof_close(struct file *fp)
909 {
910 	struct fcrypt *fcr = fp->f_data;
911 	struct csession *cse;
912 
913 	mutex_spin_enter(&crypto_mtx);
914 	while ((cse = TAILQ_FIRST(&fcr->csessions))) {
915 		TAILQ_REMOVE(&fcr->csessions, cse, next);
916 		(void)csefree(cse);
917 	}
918 	seldestroy(&fcr->sinfo);
919 	fp->f_data = NULL;
920 	mutex_spin_exit(&crypto_mtx);
921 
922 	pool_put(&fcrpl, fcr);
923 	return 0;
924 }
925 
926 /* needed for compatibility module */
927 struct	csession *cryptodev_csefind(struct fcrypt *fcr, u_int ses)
928 {
929 	return csefind(fcr, ses);
930 }
931 
932 /* csefind: call with crypto_mtx held. */
933 static struct csession *
934 csefind(struct fcrypt *fcr, u_int ses)
935 {
936 	struct csession *cse, *cnext, *ret = NULL;
937 
938 	KASSERT(mutex_owned(&crypto_mtx));
939 	TAILQ_FOREACH_SAFE(cse, &fcr->csessions, next, cnext)
940 		if (cse->ses == ses)
941 			ret = cse;
942 
943 	return ret;
944 }
945 
946 /* csedelete: call with crypto_mtx held. */
947 static int
948 csedelete(struct fcrypt *fcr, struct csession *cse_del)
949 {
950 	struct csession *cse, *cnext;
951 	int ret = 0;
952 
953 	KASSERT(mutex_owned(&crypto_mtx));
954 	TAILQ_FOREACH_SAFE(cse, &fcr->csessions, next, cnext) {
955 		if (cse == cse_del) {
956 			TAILQ_REMOVE(&fcr->csessions, cse, next);
957 			ret = 1;
958 		}
959 	}
960 	return ret;
961 }
962 
963 /* cseadd: call with crypto_mtx held. */
964 static struct csession *
965 cseadd(struct fcrypt *fcr, struct csession *cse)
966 {
967 	KASSERT(mutex_owned(&crypto_mtx));
968 	/* don't let session ID wrap! */
969 	if (fcr->sesn + 1 == 0) return NULL;
970 	TAILQ_INSERT_TAIL(&fcr->csessions, cse, next);
971 	cse->ses = fcr->sesn++;
972 	return cse;
973 }
974 
975 /* csecreate: call with crypto_mtx held. */
976 static struct csession *
977 csecreate(struct fcrypt *fcr, u_int64_t sid, void *key, u_int64_t keylen,
978     void *mackey, u_int64_t mackeylen, u_int32_t cipher, u_int32_t mac,
979     u_int32_t comp_alg, struct enc_xform *txform, struct auth_hash *thash,
980     struct comp_algo *tcomp)
981 {
982 	struct csession *cse;
983 
984 	KASSERT(mutex_owned(&crypto_mtx));
985 	cse = pool_get(&csepl, PR_NOWAIT);
986 	if (cse == NULL)
987 		return NULL;
988 	cse->key = key;
989 	cse->keylen = keylen/8;
990 	cse->mackey = mackey;
991 	cse->mackeylen = mackeylen/8;
992 	cse->sid = sid;
993 	cse->cipher = cipher;
994 	cse->mac = mac;
995 	cse->comp_alg = comp_alg;
996 	cse->txform = txform;
997 	cse->thash = thash;
998 	cse->tcomp = tcomp;
999 	cse->error = 0;
1000 	if (cseadd(fcr, cse))
1001 		return cse;
1002 	else {
1003 		pool_put(&csepl, cse);
1004 		return NULL;
1005 	}
1006 }
1007 
1008 /* csefree: call with crypto_mtx held. */
1009 static int
1010 csefree(struct csession *cse)
1011 {
1012 	int error;
1013 
1014 	KASSERT(mutex_owned(&crypto_mtx));
1015 	error = crypto_freesession(cse->sid);
1016 	if (cse->key)
1017 		free(cse->key, M_XDATA);
1018 	if (cse->mackey)
1019 		free(cse->mackey, M_XDATA);
1020 	pool_put(&csepl, cse);
1021 	return error;
1022 }
1023 
1024 static int
1025 cryptoopen(dev_t dev, int flag, int mode,
1026     struct lwp *l)
1027 {
1028 	file_t *fp;
1029         struct fcrypt *fcr;
1030         int fd, error;
1031 
1032 	if (crypto_usercrypto == 0)
1033 		return ENXIO;
1034 
1035 	if ((error = fd_allocfile(&fp, &fd)) != 0)
1036 		return error;
1037 
1038 	fcr = pool_get(&fcrpl, PR_WAITOK);
1039 	getnanotime(&fcr->btime);
1040 	fcr->atime = fcr->mtime = fcr->btime;
1041 	mutex_spin_enter(&crypto_mtx);
1042 	TAILQ_INIT(&fcr->csessions);
1043 	TAILQ_INIT(&fcr->crp_ret_mq);
1044 	TAILQ_INIT(&fcr->crp_ret_mkq);
1045 	selinit(&fcr->sinfo);
1046 	/*
1047 	 * Don't ever return session 0, to allow detection of
1048 	 * failed creation attempts with multi-create ioctl.
1049 	 */
1050 	fcr->sesn = 1;
1051 	fcr->requestid = 1;
1052 	mutex_spin_exit(&crypto_mtx);
1053 	return fd_clone(fp, fd, flag, &cryptofops, fcr);
1054 }
1055 
1056 static int
1057 cryptoread(dev_t dev, struct uio *uio, int ioflag)
1058 {
1059 	return EIO;
1060 }
1061 
1062 static int
1063 cryptowrite(dev_t dev, struct uio *uio, int ioflag)
1064 {
1065 	return EIO;
1066 }
1067 
1068 int
1069 cryptoselect(dev_t dev, int rw, struct lwp *l)
1070 {
1071 	return 0;
1072 }
1073 
1074 /*static*/
1075 struct cdevsw crypto_cdevsw = {
1076 	/* open */	cryptoopen,
1077 	/* close */	noclose,
1078 	/* read */	cryptoread,
1079 	/* write */	cryptowrite,
1080 	/* ioctl */	noioctl,
1081 	/* ttstop?*/	nostop,
1082 	/* ??*/		notty,
1083 	/* poll */	cryptoselect /*nopoll*/,
1084 	/* mmap */	nommap,
1085 	/* kqfilter */	nokqfilter,
1086 	/* type */	D_OTHER,
1087 };
1088 
1089 int
1090 cryptodev_mop(struct fcrypt *fcr,
1091               struct crypt_n_op * cnop,
1092               int count, struct lwp *l)
1093 {
1094 	struct cryptop *crp = NULL;
1095 	struct cryptodesc *crde = NULL, *crda = NULL, *crdc = NULL;
1096 	int req, error=0;
1097 	struct csession *cse;
1098 	int flags=0;
1099 	int iov_len;
1100 
1101 	for (req = 0; req < count; req++) {
1102 		mutex_spin_enter(&crypto_mtx);
1103 		cse = csefind(fcr, cnop[req].ses);
1104 		if (cse == NULL) {
1105 			DPRINTF(("csefind failed\n"));
1106 			cnop[req].status = EINVAL;
1107 			mutex_spin_exit(&crypto_mtx);
1108 			continue;
1109 		}
1110 		mutex_spin_exit(&crypto_mtx);
1111 
1112 		if (cnop[req].len > 256*1024-4) {
1113 			DPRINTF(("length failed\n"));
1114 			cnop[req].status = EINVAL;
1115 			continue;
1116 		}
1117 		if (cse->txform) {
1118 			if (cnop[req].len == 0 ||
1119 			    (cnop[req].len % cse->txform->blocksize) != 0) {
1120 				cnop[req].status = EINVAL;
1121 				continue;
1122 			}
1123 		}
1124 
1125 		crp = crypto_getreq((cse->txform != NULL) +
1126 				    (cse->thash != NULL) +
1127 				    (cse->tcomp != NULL));
1128 		if (crp == NULL) {
1129 			cnop[req].status = ENOMEM;
1130 			goto bail;
1131 		}
1132 
1133 		iov_len = cnop[req].len;
1134 		/* got a compression/decompression max size? */
1135 		if ((cse->tcomp) && cnop[req].dst_len) {
1136 			if (iov_len < cnop[req].dst_len) {
1137 				/* Need larger iov to deal with decompress */
1138 				iov_len = cnop[req].dst_len;
1139 			}
1140 			DPRINTF(("cryptodev_mop: iov_len -> %d for decompress\n", iov_len));
1141 		}
1142 
1143 		(void)memset(&crp->uio, 0, sizeof(crp->uio));
1144 		crp->uio.uio_iovcnt = 1;
1145 		crp->uio.uio_resid = 0;
1146 		crp->uio.uio_rw = UIO_WRITE;
1147 		crp->uio.uio_iov = crp->iovec;
1148 		UIO_SETUP_SYSSPACE(&crp->uio);
1149 		memset(&crp->iovec, 0, sizeof(crp->iovec));
1150 		crp->uio.uio_iov[0].iov_len = iov_len;
1151 		DPRINTF(("cryptodev_mop: kmem_alloc(%d) for iov \n", iov_len));
1152 		crp->uio.uio_iov[0].iov_base = kmem_alloc(iov_len, KM_SLEEP);
1153 		crp->uio.uio_resid = crp->uio.uio_iov[0].iov_len;
1154 
1155 		if (cse->tcomp) {
1156 			crdc = crp->crp_desc;
1157 		}
1158 
1159 		if (cse->thash) {
1160 			crda = crdc ? crdc->crd_next : crp->crp_desc;
1161 			if (cse->txform && crda)
1162 				crde = crda->crd_next;
1163 		} else {
1164 			if (cse->txform) {
1165 				crde = crdc ? crdc->crd_next : crp->crp_desc;
1166 			} else if (!cse->tcomp) {
1167 				error = EINVAL;
1168 				goto bail;
1169 			}
1170 		}
1171 
1172 		if ((copyin(cnop[req].src,
1173 		    crp->uio.uio_iov[0].iov_base, cnop[req].len))) {
1174 			cnop[req].status = EINVAL;
1175 			goto bail;
1176 		}
1177 
1178 		if (crdc) {
1179 			switch (cnop[req].op) {
1180 			case COP_COMP:
1181 				crdc->crd_flags |= CRD_F_COMP;
1182 				break;
1183 			case COP_DECOMP:
1184 				crdc->crd_flags &= ~CRD_F_COMP;
1185 				break;
1186 			default:
1187 				break;
1188 			}
1189 			/* more data to follow? */
1190 			if (cnop[req].flags & COP_F_MORE) {
1191 				flags |= CRYPTO_F_MORE;
1192 			}
1193 			crdc->crd_len = cnop[req].len;
1194 			crdc->crd_inject = 0;
1195 
1196 			crdc->crd_alg = cse->comp_alg;
1197 			crdc->crd_key = NULL;
1198 			crdc->crd_klen = 0;
1199 			DPRINTF(("cryptodev_mop[%d]: crdc setup for comp_alg %d"
1200 				 " len %d.\n",
1201 				(uint32_t)cse->sid, crdc->crd_alg,
1202 				crdc->crd_len));
1203 		}
1204 
1205 		if (crda) {
1206 			crda->crd_skip = 0;
1207 			crda->crd_len = cnop[req].len;
1208 			crda->crd_inject = 0;	/* ??? */
1209 
1210 			crda->crd_alg = cse->mac;
1211 			crda->crd_key = cse->mackey;
1212 			crda->crd_klen = cse->mackeylen * 8;
1213 		}
1214 
1215 		if (crde) {
1216 			if (cnop[req].op == COP_ENCRYPT)
1217 				crde->crd_flags |= CRD_F_ENCRYPT;
1218 			else
1219 				crde->crd_flags &= ~CRD_F_ENCRYPT;
1220 			crde->crd_len = cnop[req].len;
1221 			crde->crd_inject = 0;
1222 
1223 			crde->crd_alg = cse->cipher;
1224 #ifdef notyet		/* XXX must notify h/w driver new key, drain */
1225 			if(cnop[req].key && cnop[req].keylen) {
1226 				crde->crd_key = malloc(cnop[req].keylen,
1227 						    M_XDATA, M_WAITOK);
1228 				if((error = copyin(cnop[req].key,
1229 				    crde->crd_key, cnop[req].keylen))) {
1230 					cnop[req].status = EINVAL;
1231 					goto bail;
1232 				}
1233 				crde->crd_klen =  cnop[req].keylen * 8;
1234 			} else { ... }
1235 #endif
1236 			crde->crd_key = cse->key;
1237 			crde->crd_klen = cse->keylen * 8;
1238 		}
1239 
1240 		crp->crp_ilen = cnop[req].len;
1241 		crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM |
1242 		    (cnop[req].flags & COP_F_BATCH) | flags;
1243 		crp->crp_buf = (void *)&crp->uio;
1244 		crp->crp_callback = (int (*) (struct cryptop *)) cryptodev_mcb;
1245 		crp->crp_sid = cse->sid;
1246 		crp->crp_opaque = (void *)cse;
1247 		crp->fcrp = fcr;
1248 		crp->dst = cnop[req].dst;
1249 		crp->len = cnop[req].len; /* input len, iov may be larger */
1250 		crp->mac = cnop[req].mac;
1251 		DPRINTF(("cryptodev_mop: iov_base %p dst %p len %d mac %p\n",
1252 			    crp->uio.uio_iov[0].iov_base, crp->dst, crp->len,
1253 			    crp->mac));
1254 
1255 		if (cnop[req].iv) {
1256 			if (crde == NULL) {
1257 				cnop[req].status = EINVAL;
1258 				goto bail;
1259 			}
1260 			if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
1261 				cnop[req].status = EINVAL;
1262 				goto bail;
1263 			}
1264 			if ((error = copyin(cnop[req].iv, crp->tmp_iv,
1265 			    cse->txform->blocksize))) {
1266 				cnop[req].status = EINVAL;
1267 				goto bail;
1268 			}
1269 			(void)memcpy(crde->crd_iv, crp->tmp_iv,
1270 			    cse->txform->blocksize);
1271 			crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
1272 			crde->crd_skip = 0;
1273 		} else if (crde) {
1274 			if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
1275 				crde->crd_skip = 0;
1276 			} else {
1277 				crde->crd_flags |= CRD_F_IV_PRESENT;
1278 				crde->crd_skip = cse->txform->blocksize;
1279 				crde->crd_len -= cse->txform->blocksize;
1280 			}
1281 		}
1282 
1283 		if (cnop[req].mac) {
1284 			if (crda == NULL) {
1285 				cnop[req].status = EINVAL;
1286 				goto bail;
1287 			}
1288 			crp->crp_mac=cse->tmp_mac;
1289 		}
1290 		cnop[req].reqid = atomic_inc_32_nv(&(fcr->requestid));
1291 		crp->crp_reqid = cnop[req].reqid;
1292 		crp->crp_usropaque = cnop[req].opaque;
1293 #ifdef notyet
1294 eagain:
1295 #endif
1296 		cnop[req].status = crypto_dispatch(crp);
1297 		mutex_spin_enter(&crypto_mtx);	/* XXX why mutex? */
1298 
1299 		switch (cnop[req].status) {
1300 #ifdef notyet	/* don't loop forever -- but EAGAIN not possible here yet */
1301 		case EAGAIN:
1302 			mutex_spin_exit(&crypto_mtx);
1303 			goto eagain;
1304 			break;
1305 #endif
1306 		case 0:
1307 			break;
1308 		default:
1309 			DPRINTF(("cryptodev_op: not waiting, error.\n"));
1310 			mutex_spin_exit(&crypto_mtx);
1311 			goto bail;
1312 		}
1313 
1314 		mutex_spin_exit(&crypto_mtx);
1315 bail:
1316 		if (cnop[req].status) {
1317 			if (crp) {
1318 				if (crp->uio.uio_iov[0].iov_base) {
1319 					kmem_free(crp->uio.uio_iov[0].iov_base,
1320 					    crp->uio.uio_iov[0].iov_len);
1321 				}
1322 				crypto_freereq(crp);
1323 			}
1324 			error = 0;
1325 		}
1326 	}
1327 	return error;
1328 }
1329 
1330 static int
1331 cryptodev_mkey(struct fcrypt *fcr, struct crypt_n_kop *kop, int count)
1332 {
1333 	struct cryptkop *krp = NULL;
1334 	int error = EINVAL;
1335 	int in, out, size, i, req;
1336 
1337 	for (req = 0; req < count; req++) {
1338 		if (kop[req].crk_iparams + kop[req].crk_oparams > CRK_MAXPARAM)
1339 			return EFBIG;
1340 
1341 		in = kop[req].crk_iparams;
1342 		out = kop[req].crk_oparams;
1343 		switch (kop[req].crk_op) {
1344 		case CRK_MOD_EXP:
1345 			if (in == 3 && out == 1)
1346 				break;
1347 			kop[req].crk_status = EINVAL;
1348 			continue;
1349 		case CRK_MOD_EXP_CRT:
1350 			if (in == 6 && out == 1)
1351 				break;
1352 			kop[req].crk_status = EINVAL;
1353 			continue;
1354 		case CRK_DSA_SIGN:
1355 			if (in == 5 && out == 2)
1356 				break;
1357 			kop[req].crk_status = EINVAL;
1358 			continue;
1359 		case CRK_DSA_VERIFY:
1360 			if (in == 7 && out == 0)
1361 				break;
1362 			kop[req].crk_status = EINVAL;
1363 			continue;
1364 		case CRK_DH_COMPUTE_KEY:
1365 			if (in == 3 && out == 1)
1366 				break;
1367 			kop[req].crk_status = EINVAL;
1368 			continue;
1369 		case CRK_MOD_ADD:
1370 			if (in == 3 && out == 1)
1371 				break;
1372 			kop[req].crk_status = EINVAL;
1373 			continue;
1374 		case CRK_MOD_ADDINV:
1375 			if (in == 2 && out == 1)
1376 				break;
1377 			kop[req].crk_status = EINVAL;
1378 			continue;
1379 		case CRK_MOD_SUB:
1380 			if (in == 3 && out == 1)
1381 				break;
1382 			kop[req].crk_status = EINVAL;
1383 			continue;
1384 		case CRK_MOD_MULT:
1385 			if (in == 3 && out == 1)
1386 				break;
1387 			kop[req].crk_status = EINVAL;
1388 			continue;
1389 		case CRK_MOD_MULTINV:
1390 			if (in == 2 && out == 1)
1391 				break;
1392 			kop[req].crk_status = EINVAL;
1393 			continue;
1394 		case CRK_MOD:
1395 			if (in == 2 && out == 1)
1396 				break;
1397 			kop[req].crk_status = EINVAL;
1398 			continue;
1399 		default:
1400 			kop[req].crk_status = EINVAL;
1401 			continue;
1402 		}
1403 
1404 		krp = pool_get(&cryptkop_pool, PR_WAITOK);
1405 		(void)memset(krp, 0, sizeof *krp);
1406 		cv_init(&krp->krp_cv, "crykdev");
1407 		krp->krp_op = kop[req].crk_op;
1408 		krp->krp_status = kop[req].crk_status;
1409 		krp->krp_iparams = kop[req].crk_iparams;
1410 		krp->krp_oparams = kop[req].crk_oparams;
1411 		krp->krp_status = 0;
1412 		krp->krp_callback =
1413 		    (int (*) (struct cryptkop *)) cryptodevkey_mcb;
1414 		(void)memcpy(krp->crk_param, kop[req].crk_param,
1415 		    sizeof(kop[req].crk_param));
1416 
1417 		krp->krp_flags = CRYPTO_F_CBIMM;
1418 
1419 		for (i = 0; i < CRK_MAXPARAM; i++)
1420 			krp->krp_param[i].crp_nbits =
1421 			    kop[req].crk_param[i].crp_nbits;
1422 		for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) {
1423 			size = (krp->krp_param[i].crp_nbits + 7) / 8;
1424 			if (size == 0)
1425 				continue;
1426 			krp->krp_param[i].crp_p =
1427 			    kmem_alloc(size, KM_SLEEP);
1428 			if (i >= krp->krp_iparams)
1429 				continue;
1430 			kop[req].crk_status =
1431 			    copyin(kop[req].crk_param[i].crp_p,
1432 			    krp->krp_param[i].crp_p, size);
1433 			if (kop[req].crk_status)
1434 				goto fail;
1435 		}
1436 		krp->fcrp = fcr;
1437 
1438 		kop[req].crk_reqid = atomic_inc_32_nv(&(fcr->requestid));
1439 		krp->krp_reqid = kop[req].crk_reqid;
1440 		krp->krp_usropaque = kop[req].crk_opaque;
1441 
1442 		kop[req].crk_status = crypto_kdispatch(krp);
1443 		if (kop[req].crk_status != 0) {
1444 			goto fail;
1445 		}
1446 
1447 fail:
1448 		if(kop[req].crk_status) {
1449 			if (krp) {
1450 				kop[req].crk_status = krp->krp_status;
1451 				for (i = 0; i < CRK_MAXPARAM; i++) {
1452 					struct crparam *kp =
1453 						&(krp->krp_param[i]);
1454 					if (kp->crp_p) {
1455 						size = (kp->crp_nbits + 7) / 8;
1456 						KASSERT(size > 0);
1457 						memset(kp->crp_p, 0, size);
1458 						kmem_free(kp->crp_p, size);
1459 					}
1460 				}
1461 				cv_destroy(&krp->krp_cv);
1462 				pool_put(&cryptkop_pool, krp);
1463 			}
1464 		}
1465 		error = 0;
1466 	}
1467 	DPRINTF(("cryptodev_key: error=0x%08x\n", error));
1468 	return error;
1469 }
1470 
1471 int
1472 cryptodev_session(struct fcrypt *fcr, struct session_op *sop)
1473 {
1474 	struct cryptoini cria, crie;
1475 	struct cryptoini cric;		/* compressor */
1476 	struct cryptoini *crihead = NULL;
1477 	struct enc_xform *txform = NULL;
1478 	struct auth_hash *thash = NULL;
1479 	struct comp_algo *tcomp = NULL;
1480 	struct csession *cse;
1481 	u_int64_t sid;
1482 	int error = 0;
1483 
1484 	DPRINTF(("cryptodev_session() cipher=%d, mac=%d\n", sop->cipher, sop->mac));
1485 
1486 	/* XXX there must be a way to not embed the list of xforms here */
1487 	switch (sop->cipher) {
1488 	case 0:
1489 		break;
1490 	case CRYPTO_DES_CBC:
1491 		txform = &enc_xform_des;
1492 		break;
1493 	case CRYPTO_3DES_CBC:
1494 		txform = &enc_xform_3des;
1495 		break;
1496 	case CRYPTO_BLF_CBC:
1497 		txform = &enc_xform_blf;
1498 		break;
1499 	case CRYPTO_CAST_CBC:
1500 		txform = &enc_xform_cast5;
1501 	case CRYPTO_SKIPJACK_CBC:
1502 		txform = &enc_xform_skipjack;
1503 		break;
1504 	case CRYPTO_AES_CBC:
1505 		txform = &enc_xform_rijndael128;
1506 		break;
1507 	case CRYPTO_NULL_CBC:
1508 		txform = &enc_xform_null;
1509 		break;
1510 	case CRYPTO_ARC4:
1511 		txform = &enc_xform_arc4;
1512 		break;
1513 	default:
1514 		DPRINTF(("Invalid cipher %d\n", sop->cipher));
1515 		return EINVAL;
1516 	}
1517 
1518 	switch (sop->comp_alg) {
1519 	case 0:
1520 		break;
1521 	case CRYPTO_DEFLATE_COMP:
1522 		tcomp = &comp_algo_deflate;
1523 		break;
1524 	case CRYPTO_GZIP_COMP:
1525 		tcomp = &comp_algo_gzip;
1526 		DPRINTF(("cryptodev_session() tcomp for GZIP\n"));
1527 		break;
1528 	default:
1529 		DPRINTF(("Invalid compression alg %d\n", sop->comp_alg));
1530 		return EINVAL;
1531 	}
1532 
1533 	switch (sop->mac) {
1534 	case 0:
1535 		break;
1536 	case CRYPTO_MD5_HMAC:
1537 		thash = &auth_hash_hmac_md5;
1538 		break;
1539 	case CRYPTO_SHA1_HMAC:
1540 		thash = &auth_hash_hmac_sha1;
1541 		break;
1542 	case CRYPTO_MD5_HMAC_96:
1543 		thash = &auth_hash_hmac_md5_96;
1544 		break;
1545 	case CRYPTO_SHA1_HMAC_96:
1546 		thash = &auth_hash_hmac_sha1_96;
1547 		break;
1548 	case CRYPTO_SHA2_HMAC:
1549 		/* XXX switching on key length seems questionable */
1550 		if (sop->mackeylen == auth_hash_hmac_sha2_256.keysize) {
1551 			thash = &auth_hash_hmac_sha2_256;
1552 		} else if (sop->mackeylen == auth_hash_hmac_sha2_384.keysize) {
1553 			thash = &auth_hash_hmac_sha2_384;
1554 		} else if (sop->mackeylen == auth_hash_hmac_sha2_512.keysize) {
1555 			thash = &auth_hash_hmac_sha2_512;
1556 		} else {
1557 			DPRINTF(("Invalid mackeylen %d\n", sop->mackeylen));
1558 			return EINVAL;
1559 		}
1560 		break;
1561 	case CRYPTO_RIPEMD160_HMAC:
1562 		thash = &auth_hash_hmac_ripemd_160;
1563 		break;
1564 	case CRYPTO_RIPEMD160_HMAC_96:
1565 		thash = &auth_hash_hmac_ripemd_160_96;
1566 		break;
1567 	case CRYPTO_MD5:
1568 		thash = &auth_hash_md5;
1569 		break;
1570 	case CRYPTO_SHA1:
1571 		thash = &auth_hash_sha1;
1572 		break;
1573 	case CRYPTO_NULL_HMAC:
1574 		thash = &auth_hash_null;
1575 		break;
1576 	default:
1577 		DPRINTF(("Invalid mac %d\n", sop->mac));
1578 		return EINVAL;
1579 	}
1580 
1581 	memset(&crie, 0, sizeof(crie));
1582 	memset(&cria, 0, sizeof(cria));
1583 	memset(&cric, 0, sizeof(cric));
1584 
1585 	if (tcomp) {
1586 		cric.cri_alg = tcomp->type;
1587 		cric.cri_klen = 0;
1588 		DPRINTF(("tcomp->type = %d\n", tcomp->type));
1589 
1590 		crihead = &cric;
1591 		if (thash) {
1592 			cric.cri_next = &cria;
1593 		} else if (txform) {
1594 			cric.cri_next = &crie;
1595 		}
1596 	}
1597 
1598 	if (txform) {
1599 		crie.cri_alg = txform->type;
1600 		crie.cri_klen = sop->keylen * 8;
1601 		if (sop->keylen > txform->maxkey ||
1602 		    sop->keylen < txform->minkey) {
1603 			DPRINTF(("keylen %d not in [%d,%d]\n",
1604 			    sop->keylen, txform->minkey, txform->maxkey));
1605 			error = EINVAL;
1606 			goto bail;
1607 		}
1608 
1609 		crie.cri_key = malloc(crie.cri_klen / 8, M_XDATA, M_WAITOK);
1610 		if ((error = copyin(sop->key, crie.cri_key, crie.cri_klen / 8)))
1611 			goto bail;
1612 		if (!crihead) {
1613 			crihead = &crie;
1614 		}
1615 	}
1616 
1617 	if (thash) {
1618 		cria.cri_alg = thash->type;
1619 		cria.cri_klen = sop->mackeylen * 8;
1620 		if (sop->mackeylen != thash->keysize) {
1621 			DPRINTF(("mackeylen %d != keysize %d\n",
1622 			    sop->mackeylen, thash->keysize));
1623 			error = EINVAL;
1624 			goto bail;
1625 		}
1626 		if (cria.cri_klen) {
1627 			cria.cri_key = malloc(cria.cri_klen / 8, M_XDATA,
1628 			    M_WAITOK);
1629 			if ((error = copyin(sop->mackey, cria.cri_key,
1630 			    cria.cri_klen / 8))) {
1631 				goto bail;
1632 			}
1633 		}
1634 		if (txform)
1635 			cria.cri_next = &crie;	/* XXX forces enc then hash? */
1636 		if (!crihead) {
1637 			crihead = &cria;
1638 		}
1639 	}
1640 
1641 	/* crypto_newsession requires that we hold the mutex. */
1642 	mutex_spin_enter(&crypto_mtx);
1643 	error = crypto_newsession(&sid, crihead, crypto_devallowsoft);
1644 	if (!error) {
1645 		DPRINTF(("cyrptodev_session: got session %d\n", (uint32_t)sid));
1646 		cse = csecreate(fcr, sid, crie.cri_key, crie.cri_klen,
1647 		    cria.cri_key, cria.cri_klen, (txform ? sop->cipher : 0), sop->mac,
1648 		    (tcomp ? sop->comp_alg : 0), txform, thash, tcomp);
1649 		if (cse != NULL) {
1650 			sop->ses = cse->ses;
1651 		} else {
1652 			DPRINTF(("csecreate failed\n"));
1653 			crypto_freesession(sid);
1654 			error = EINVAL;
1655 		}
1656 	} else {
1657 		DPRINTF(("SIOCSESSION violates kernel parameters %d\n",
1658 		    error));
1659 	}
1660 	mutex_spin_exit(&crypto_mtx);
1661 bail:
1662 	if (error) {
1663 		if (crie.cri_key) {
1664 			memset(crie.cri_key, 0, crie.cri_klen / 8);
1665 			free(crie.cri_key, M_XDATA);
1666 		}
1667 		if (cria.cri_key) {
1668 			memset(cria.cri_key, 0, cria.cri_klen / 8);
1669 			free(cria.cri_key, M_XDATA);
1670 		}
1671 	}
1672 	return error;
1673 }
1674 
1675 int
1676 cryptodev_msession(struct fcrypt *fcr, struct session_n_op *sn_ops,
1677 		   int count)
1678 {
1679 	int i;
1680 
1681 	for (i = 0; i < count; i++, sn_ops++) {
1682 		struct session_op s_op;
1683 		s_op.cipher =		sn_ops->cipher;
1684 		s_op.mac =		sn_ops->mac;
1685 		s_op.keylen =		sn_ops->keylen;
1686 		s_op.key =		sn_ops->key;
1687 		s_op.mackeylen =	sn_ops->mackeylen;
1688 		s_op.mackey =		sn_ops->mackey;
1689 
1690 		sn_ops->status = cryptodev_session(fcr, &s_op);
1691 		sn_ops->ses =		s_op.ses;
1692 	}
1693 
1694 	return 0;
1695 }
1696 
1697 static int
1698 cryptodev_msessionfin(struct fcrypt *fcr, int count, u_int32_t *sesid)
1699 {
1700 	struct csession *cse;
1701 	int req, error = 0;
1702 
1703 	mutex_spin_enter(&crypto_mtx);
1704 	for(req = 0; req < count; req++) {
1705 		cse = csefind(fcr, sesid[req]);
1706 		if (cse == NULL)
1707 			continue;
1708 		csedelete(fcr, cse);
1709 		error = csefree(cse);
1710 	}
1711 	mutex_spin_exit(&crypto_mtx);
1712 	return 0;
1713 }
1714 
1715 /*
1716  * collect as many completed requests as are availble, or count completed
1717  * requests whichever is less.
1718  * return the number of requests.
1719  */
1720 static int
1721 cryptodev_getmstatus(struct fcrypt *fcr, struct crypt_result *crypt_res,
1722     int count)
1723 {
1724 	struct cryptop *crp = NULL;
1725 	struct cryptkop *krp = NULL;
1726 	struct csession *cse;
1727 	int i, size, req = 0;
1728 	int completed=0;
1729 
1730 	/* On queue so nobody else can grab them
1731 	 * and copyout can be delayed-- no locking */
1732 	TAILQ_HEAD(, cryptop) crp_delfree_q =
1733 		TAILQ_HEAD_INITIALIZER(crp_delfree_q);
1734 	TAILQ_HEAD(, cryptkop) krp_delfree_q =
1735 		TAILQ_HEAD_INITIALIZER(krp_delfree_q);
1736 
1737 	/* at this point we do not know which response user is requesting for
1738 	 * (symmetric or asymmetric) so we copyout one from each i.e if the
1739 	 * count is 2 then 1 from symmetric and 1 from asymmetric queue and
1740 	 * if 3 then 2 symmetric and 1 asymmetric and so on */
1741 
1742 	/* pull off a list of requests while protected from changes */
1743 	mutex_spin_enter(&crypto_mtx);
1744 	while (req < count) {
1745 		crp = TAILQ_FIRST(&fcr->crp_ret_mq);
1746 		if (crp) {
1747 			TAILQ_REMOVE(&fcr->crp_ret_mq, crp, crp_next);
1748 			TAILQ_INSERT_TAIL(&crp_delfree_q, crp, crp_next);
1749 			cse = (struct csession *)crp->crp_opaque;
1750 
1751 			/* see if the session is still valid */
1752 			cse = csefind(fcr, cse->ses);
1753 			if (cse != NULL) {
1754 				crypt_res[req].status = 0;
1755 			} else {
1756 				DPRINTF(("csefind failed\n"));
1757 				crypt_res[req].status = EINVAL;
1758 			}
1759 			req++;
1760 		}
1761 		if(req < count) {
1762 			crypt_res[req].status = 0;
1763 			krp = TAILQ_FIRST(&fcr->crp_ret_mkq);
1764 			if (krp) {
1765 				TAILQ_REMOVE(&fcr->crp_ret_mkq, krp, krp_next);
1766 				TAILQ_INSERT_TAIL(&krp_delfree_q, krp, krp_next);
1767 			req++;
1768 			}
1769 		}
1770 	}
1771 	mutex_spin_exit(&crypto_mtx);
1772 
1773 	/* now do all the work outside the mutex */
1774 	for(req=0; req < count ;) {
1775 		crp = TAILQ_FIRST(&crp_delfree_q);
1776 		if (crp) {
1777 			if (crypt_res[req].status != 0) {
1778 				/* csefind failed during collection */
1779 				goto bail;
1780 			}
1781 			cse = (struct csession *)crp->crp_opaque;
1782 			crypt_res[req].reqid = crp->crp_reqid;
1783 			crypt_res[req].opaque = crp->crp_usropaque;
1784 			completed++;
1785 
1786 			if (crp->crp_etype != 0) {
1787 				crypt_res[req].status = crp->crp_etype;
1788 				goto bail;
1789 			}
1790 
1791 			if (cse->error) {
1792 				crypt_res[req].status = cse->error;
1793 				goto bail;
1794 			}
1795 
1796 			if (crp->dst && (crypt_res[req].status =
1797 			    copyout(crp->uio.uio_iov[0].iov_base, crp->dst,
1798 			    crp->len)))
1799 				goto bail;
1800 
1801 			if (crp->mac && (crypt_res[req].status =
1802 			    copyout(crp->crp_mac, crp->mac,
1803 			    cse->thash->authsize)))
1804 				goto bail;
1805 
1806 bail:
1807 			TAILQ_REMOVE(&crp_delfree_q, crp, crp_next);
1808 			kmem_free(crp->uio.uio_iov[0].iov_base,
1809 			    crp->uio.uio_iov[0].iov_len);
1810 			crypto_freereq(crp);
1811 			req++;
1812 		}
1813 
1814 		if (req < count) {
1815 			krp = TAILQ_FIRST(&krp_delfree_q);
1816 			if (krp) {
1817 				crypt_res[req].reqid = krp->krp_reqid;
1818 				crypt_res[req].opaque = krp->krp_usropaque;
1819 				completed++;
1820 				if (krp->krp_status != 0) {
1821 					DPRINTF(("cryptodev_key: "
1822 					    "krp->krp_status 0x%08x\n",
1823 					    krp->krp_status));
1824 					crypt_res[req].status = krp->krp_status;
1825 					goto fail;
1826 				}
1827 
1828 				for (i = krp->krp_iparams; i < krp->krp_iparams
1829 				    + krp->krp_oparams; i++) {
1830 					size = (krp->krp_param[i].crp_nbits
1831 					    + 7) / 8;
1832 					if (size == 0)
1833 						continue;
1834 					crypt_res[req].status = copyout
1835 					    (krp->krp_param[i].crp_p,
1836 					    krp->crk_param[i].crp_p, size);
1837 					if (crypt_res[req].status) {
1838 						DPRINTF(("cryptodev_key: "
1839 						    "copyout oparam %d failed, "
1840 						    "error=%d\n",
1841 						    i - krp->krp_iparams,
1842 						    crypt_res[req].status));
1843 						goto fail;
1844 					}
1845 				}
1846 fail:
1847 				TAILQ_REMOVE(&krp_delfree_q, krp, krp_next);
1848 				/* not sure what to do for this */
1849 				/* kop[req].crk_status = krp->krp_status; */
1850 				for (i = 0; i < CRK_MAXPARAM; i++) {
1851 					struct crparam *kp = &(krp->krp_param[i]);
1852 					if (kp->crp_p) {
1853 						size = (kp->crp_nbits + 7) / 8;
1854 						KASSERT(size > 0);
1855 						(void)memset(kp->crp_p, 0, size);
1856 						kmem_free(kp->crp_p, size);
1857 					}
1858 				}
1859 				cv_destroy(&krp->krp_cv);
1860 				pool_put(&cryptkop_pool, krp);
1861 				req++;
1862 			}
1863 		}
1864 	}
1865 
1866 	return completed;
1867 }
1868 
1869 static int
1870 cryptodev_getstatus (struct fcrypt *fcr, struct crypt_result *crypt_res)
1871 {
1872         struct cryptop *crp = NULL, *cnext;
1873         struct cryptkop *krp = NULL, *knext;
1874         struct csession *cse;
1875         int i, size, req = 0;
1876 
1877 	mutex_spin_enter(&crypto_mtx);
1878 	/* Here we dont know for which request the user is requesting the
1879 	 * response so checking in both the queues */
1880 	TAILQ_FOREACH_SAFE(crp, &fcr->crp_ret_mq, crp_next, cnext) {
1881 		if(crp && (crp->crp_reqid == crypt_res->reqid)) {
1882 			cse = (struct csession *)crp->crp_opaque;
1883 		        crypt_res->opaque = crp->crp_usropaque;
1884 			cse = csefind(fcr, cse->ses);
1885 			if (cse == NULL) {
1886 				DPRINTF(("csefind failed\n"));
1887 				crypt_res->status = EINVAL;
1888 				goto bail;
1889 			}
1890 
1891 			if (crp->crp_etype != 0) {
1892 				crypt_res->status = crp->crp_etype;
1893 				goto bail;
1894 			}
1895 
1896 			if (cse->error) {
1897 				crypt_res->status = cse->error;
1898 				goto bail;
1899 			}
1900 
1901 			if (crp->dst && (crypt_res->status =
1902 			    copyout(crp->uio.uio_iov[0].iov_base,
1903 			    crp->dst, crp->len)))
1904 				goto bail;
1905 
1906 			if (crp->mac && (crypt_res->status =
1907 			    copyout(crp->crp_mac, crp->mac,
1908 			    cse->thash->authsize)))
1909 				goto bail;
1910 bail:
1911 			TAILQ_REMOVE(&fcr->crp_ret_mq, crp, crp_next);
1912 
1913 			mutex_spin_exit(&crypto_mtx);
1914 			crypto_freereq(crp);
1915 			return 0;
1916 		}
1917 	}
1918 
1919 	TAILQ_FOREACH_SAFE(krp, &fcr->crp_ret_mkq, krp_next, knext) {
1920 		if(krp && (krp->krp_reqid == crypt_res->reqid)) {
1921 			crypt_res[req].opaque = krp->krp_usropaque;
1922 			if (krp->krp_status != 0) {
1923 				DPRINTF(("cryptodev_key: "
1924 				    "krp->krp_status 0x%08x\n",
1925 				    krp->krp_status));
1926 				crypt_res[req].status = krp->krp_status;
1927 				goto fail;
1928 			}
1929 
1930 			for (i = krp->krp_iparams; i < krp->krp_iparams +
1931 			    krp->krp_oparams; i++) {
1932 				size = (krp->krp_param[i].crp_nbits + 7) / 8;
1933 				if (size == 0)
1934 					continue;
1935 				crypt_res[req].status = copyout(
1936 				    krp->krp_param[i].crp_p,
1937 				    krp->crk_param[i].crp_p, size);
1938 				if (crypt_res[req].status) {
1939 					DPRINTF(("cryptodev_key: copyout oparam"
1940 					    "%d failed, error=%d\n",
1941 					    i - krp->krp_iparams,
1942 					    crypt_res[req].status));
1943 					goto fail;
1944 				}
1945 			}
1946 fail:
1947 			TAILQ_REMOVE(&fcr->crp_ret_mkq, krp, krp_next);
1948 			mutex_spin_exit(&crypto_mtx);
1949 			/* not sure what to do for this */
1950 			/* kop[req].crk_status = krp->krp_status; */
1951 			for (i = 0; i < CRK_MAXPARAM; i++) {
1952 				struct crparam *kp = &(krp->krp_param[i]);
1953 				if (kp->crp_p) {
1954 					size = (kp->crp_nbits + 7) / 8;
1955 					KASSERT(size > 0);
1956 					memset(kp->crp_p, 0, size);
1957 					kmem_free(kp->crp_p, size);
1958 				}
1959 			}
1960 			cv_destroy(&krp->krp_cv);
1961 			pool_put(&cryptkop_pool, krp);
1962 			return 0;
1963 		}
1964 	}
1965 	mutex_spin_exit(&crypto_mtx);
1966 	return EINPROGRESS;
1967 }
1968 
1969 static int
1970 cryptof_stat(struct file *fp, struct stat *st)
1971 {
1972 	struct fcrypt *fcr = fp->f_data;
1973 
1974 	(void)memset(st, 0, sizeof(st));
1975 
1976 	mutex_spin_enter(&crypto_mtx);
1977 	st->st_dev = makedev(cdevsw_lookup_major(&crypto_cdevsw), fcr->sesn);
1978 	st->st_atimespec = fcr->atime;
1979 	st->st_mtimespec = fcr->mtime;
1980 	st->st_ctimespec = st->st_birthtimespec = fcr->btime;
1981 	st->st_uid = kauth_cred_geteuid(fp->f_cred);
1982 	st->st_gid = kauth_cred_getegid(fp->f_cred);
1983 	mutex_spin_exit(&crypto_mtx);
1984 
1985 	return 0;
1986 }
1987 
1988 static int
1989 cryptof_poll(struct file *fp, int events)
1990 {
1991 	struct fcrypt *fcr = (struct fcrypt *)fp->f_data;
1992 	int revents = 0;
1993 
1994 	if (!(events & (POLLIN | POLLRDNORM))) {
1995 		/* only support read and POLLIN */
1996 		return 0;
1997 	}
1998 
1999 	mutex_spin_enter(&crypto_mtx);
2000 	if (TAILQ_EMPTY(&fcr->crp_ret_mq) && TAILQ_EMPTY(&fcr->crp_ret_mkq)) {
2001 		/* no completed requests pending, save the poll for later */
2002 		selrecord(curlwp, &fcr->sinfo);
2003 	} else {
2004 		/* let the app(s) know that there are completed requests */
2005 		revents = events & (POLLIN | POLLRDNORM);
2006 	}
2007 	mutex_spin_exit(&crypto_mtx);
2008 
2009 	return revents;
2010 }
2011 
2012 /*
2013  * Pseudo-device initialization routine for /dev/crypto
2014  */
2015 void	cryptoattach(int);
2016 
2017 void
2018 cryptoattach(int num)
2019 {
2020 	pool_init(&fcrpl, sizeof(struct fcrypt), 0, 0, 0, "fcrpl",
2021 	    NULL, IPL_NET);	/* XXX IPL_NET ("splcrypto") */
2022 	pool_init(&csepl, sizeof(struct csession), 0, 0, 0, "csepl",
2023 	    NULL, IPL_NET);	/* XXX IPL_NET ("splcrypto") */
2024 
2025 	/*
2026 	 * Preallocate space for 64 users, with 5 sessions each.
2027 	 * (consider that a TLS protocol session requires at least
2028 	 * 3DES, MD5, and SHA1 (both hashes are used in the PRF) for
2029 	 * the negotiation, plus HMAC_SHA1 for the actual SSL records,
2030 	 * consuming one session here for each algorithm.
2031 	 */
2032 	pool_prime(&fcrpl, 64);
2033 	pool_prime(&csepl, 64 * 5);
2034 }
2035