xref: /netbsd-src/sys/opencrypto/cryptosoft.c (revision 946379e7b37692fc43f68eb0d1c10daa0a7f3b6c)
1 /*	$NetBSD: cryptosoft.c,v 1.47 2015/08/20 14:40:19 christos Exp $ */
2 /*	$FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.2.2.1 2002/11/21 23:34:23 sam Exp $	*/
3 /*	$OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $	*/
4 
5 /*
6  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
7  *
8  * This code was written by Angelos D. Keromytis in Athens, Greece, in
9  * February 2000. Network Security Technologies Inc. (NSTI) kindly
10  * supported the development of this code.
11  *
12  * Copyright (c) 2000, 2001 Angelos D. Keromytis
13  *
14  * Permission to use, copy, and modify this software with or without fee
15  * is hereby granted, provided that this entire notice is included in
16  * all source code copies of any software which is or includes a copy or
17  * modification of this software.
18  *
19  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
20  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
21  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
22  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
23  * PURPOSE.
24  */
25 
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: cryptosoft.c,v 1.47 2015/08/20 14:40:19 christos Exp $");
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32 #include <sys/mbuf.h>
33 #include <sys/sysctl.h>
34 #include <sys/errno.h>
35 #include <sys/cprng.h>
36 #include <sys/module.h>
37 #include <sys/device.h>
38 
39 #ifdef _KERNEL_OPT
40 #include "opt_ocf.h"
41 #endif
42 
43 #include <opencrypto/cryptodev.h>
44 #include <opencrypto/cryptosoft.h>
45 #include <opencrypto/xform.h>
46 
47 #include <opencrypto/cryptosoft_xform.c>
48 
49 #include "ioconf.h"
50 
51 union authctx {
52 	MD5_CTX md5ctx;
53 	SHA1_CTX sha1ctx;
54 	RMD160_CTX rmd160ctx;
55 	SHA256_CTX sha256ctx;
56 	SHA384_CTX sha384ctx;
57 	SHA512_CTX sha512ctx;
58 	aesxcbc_ctx aesxcbcctx;
59 	AES_GMAC_CTX aesgmacctx;
60 };
61 
62 struct swcr_data **swcr_sessions = NULL;
63 u_int32_t swcr_sesnum = 0;
64 int32_t swcr_id = -1;
65 
66 #define COPYBACK(x, a, b, c, d) \
67 	(x) == CRYPTO_BUF_MBUF ? m_copyback((struct mbuf *)a,b,c,d) \
68 	: cuio_copyback((struct uio *)a,b,c,d)
69 #define COPYDATA(x, a, b, c, d) \
70 	(x) == CRYPTO_BUF_MBUF ? m_copydata((struct mbuf *)a,b,c,d) \
71 	: cuio_copydata((struct uio *)a,b,c,d)
72 
73 static	int swcr_encdec(struct cryptodesc *, const struct swcr_data *, void *, int);
74 static	int swcr_compdec(struct cryptodesc *, const struct swcr_data *, void *, int, int *);
75 static	int swcr_combined(struct cryptop *, int);
76 static	int swcr_process(void *, struct cryptop *, int);
77 static	int swcr_newsession(void *, u_int32_t *, struct cryptoini *);
78 static	int swcr_freesession(void *, u_int64_t);
79 
80 /*
81  * Apply a symmetric encryption/decryption algorithm.
82  */
83 static int
84 swcr_encdec(struct cryptodesc *crd, const struct swcr_data *sw, void *bufv,
85     int outtype)
86 {
87 	char *buf = bufv;
88 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
89 	unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
90 	const struct swcr_enc_xform *exf;
91 	int i, k, j, blks, ivlen;
92 	int count, ind;
93 
94 	exf = sw->sw_exf;
95 	blks = exf->enc_xform->blocksize;
96 	ivlen = exf->enc_xform->ivsize;
97 	KASSERT(exf->reinit ? ivlen <= blks : ivlen == blks);
98 
99 	/* Check for non-padded data */
100 	if (crd->crd_len % blks)
101 		return EINVAL;
102 
103 	/* Initialize the IV */
104 	if (crd->crd_flags & CRD_F_ENCRYPT) {
105 		/* IV explicitly provided ? */
106 		if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
107 			memcpy(iv, crd->crd_iv, ivlen);
108 			if (exf->reinit)
109 				exf->reinit(sw->sw_kschedule, iv, 0);
110 		} else if (exf->reinit) {
111 			exf->reinit(sw->sw_kschedule, 0, iv);
112 		} else {
113 			/* Get random IV */
114 			for (i = 0;
115 			    i + sizeof (u_int32_t) <= EALG_MAX_BLOCK_LEN;
116 			    i += sizeof (u_int32_t)) {
117 				u_int32_t temp = cprng_fast32();
118 
119 				memcpy(iv + i, &temp, sizeof(u_int32_t));
120 			}
121 			/*
122 			 * What if the block size is not a multiple
123 			 * of sizeof (u_int32_t), which is the size of
124 			 * what arc4random() returns ?
125 			 */
126 			if (EALG_MAX_BLOCK_LEN % sizeof (u_int32_t) != 0) {
127 				u_int32_t temp = cprng_fast32();
128 
129 				bcopy (&temp, iv + i,
130 				    EALG_MAX_BLOCK_LEN - i);
131 			}
132 		}
133 
134 		/* Do we need to write the IV */
135 		if (!(crd->crd_flags & CRD_F_IV_PRESENT)) {
136 			COPYBACK(outtype, buf, crd->crd_inject, ivlen, iv);
137 		}
138 
139 	} else {	/* Decryption */
140 			/* IV explicitly provided ? */
141 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
142 			memcpy(iv, crd->crd_iv, ivlen);
143 		else {
144 			/* Get IV off buf */
145 			COPYDATA(outtype, buf, crd->crd_inject, ivlen, iv);
146 		}
147 		if (exf->reinit)
148 			exf->reinit(sw->sw_kschedule, iv, 0);
149 	}
150 
151 	ivp = iv;
152 
153 	if (outtype == CRYPTO_BUF_CONTIG) {
154 		if (exf->reinit) {
155 			for (i = crd->crd_skip;
156 			     i < crd->crd_skip + crd->crd_len; i += blks) {
157 				if (crd->crd_flags & CRD_F_ENCRYPT) {
158 					exf->encrypt(sw->sw_kschedule, buf + i);
159 				} else {
160 					exf->decrypt(sw->sw_kschedule, buf + i);
161 				}
162 			}
163 		} else if (crd->crd_flags & CRD_F_ENCRYPT) {
164 			for (i = crd->crd_skip;
165 			    i < crd->crd_skip + crd->crd_len; i += blks) {
166 				/* XOR with the IV/previous block, as appropriate. */
167 				if (i == crd->crd_skip)
168 					for (k = 0; k < blks; k++)
169 						buf[i + k] ^= ivp[k];
170 				else
171 					for (k = 0; k < blks; k++)
172 						buf[i + k] ^= buf[i + k - blks];
173 				exf->encrypt(sw->sw_kschedule, buf + i);
174 			}
175 		} else {		/* Decrypt */
176 			/*
177 			 * Start at the end, so we don't need to keep the encrypted
178 			 * block as the IV for the next block.
179 			 */
180 			for (i = crd->crd_skip + crd->crd_len - blks;
181 			    i >= crd->crd_skip; i -= blks) {
182 				exf->decrypt(sw->sw_kschedule, buf + i);
183 
184 				/* XOR with the IV/previous block, as appropriate */
185 				if (i == crd->crd_skip)
186 					for (k = 0; k < blks; k++)
187 						buf[i + k] ^= ivp[k];
188 				else
189 					for (k = 0; k < blks; k++)
190 						buf[i + k] ^= buf[i + k - blks];
191 			}
192 		}
193 
194 		return 0;
195 	} else if (outtype == CRYPTO_BUF_MBUF) {
196 		struct mbuf *m = (struct mbuf *) buf;
197 
198 		/* Find beginning of data */
199 		m = m_getptr(m, crd->crd_skip, &k);
200 		if (m == NULL)
201 			return EINVAL;
202 
203 		i = crd->crd_len;
204 
205 		while (i > 0) {
206 			/*
207 			 * If there's insufficient data at the end of
208 			 * an mbuf, we have to do some copying.
209 			 */
210 			if (m->m_len < k + blks && m->m_len != k) {
211 				m_copydata(m, k, blks, blk);
212 
213 				/* Actual encryption/decryption */
214 				if (exf->reinit) {
215 					if (crd->crd_flags & CRD_F_ENCRYPT) {
216 						exf->encrypt(sw->sw_kschedule,
217 							     blk);
218 					} else {
219 						exf->decrypt(sw->sw_kschedule,
220 							     blk);
221 					}
222 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
223 					/* XOR with previous block */
224 					for (j = 0; j < blks; j++)
225 						blk[j] ^= ivp[j];
226 
227 					exf->encrypt(sw->sw_kschedule, blk);
228 
229 					/*
230 					 * Keep encrypted block for XOR'ing
231 					 * with next block
232 					 */
233 					memcpy(iv, blk, blks);
234 					ivp = iv;
235 				} else {	/* decrypt */
236 					/*
237 					 * Keep encrypted block for XOR'ing
238 					 * with next block
239 					 */
240 					if (ivp == iv)
241 						memcpy(piv, blk, blks);
242 					else
243 						memcpy(iv, blk, blks);
244 
245 					exf->decrypt(sw->sw_kschedule, blk);
246 
247 					/* XOR with previous block */
248 					for (j = 0; j < blks; j++)
249 						blk[j] ^= ivp[j];
250 
251 					if (ivp == iv)
252 						memcpy(iv, piv, blks);
253 					else
254 						ivp = iv;
255 				}
256 
257 				/* Copy back decrypted block */
258 				m_copyback(m, k, blks, blk);
259 
260 				/* Advance pointer */
261 				m = m_getptr(m, k + blks, &k);
262 				if (m == NULL)
263 					return EINVAL;
264 
265 				i -= blks;
266 
267 				/* Could be done... */
268 				if (i == 0)
269 					break;
270 			}
271 
272 			/* Skip possibly empty mbufs */
273 			if (k == m->m_len) {
274 				for (m = m->m_next; m && m->m_len == 0;
275 				    m = m->m_next)
276 					;
277 				k = 0;
278 			}
279 
280 			/* Sanity check */
281 			if (m == NULL)
282 				return EINVAL;
283 
284 			/*
285 			 * Warning: idat may point to garbage here, but
286 			 * we only use it in the while() loop, only if
287 			 * there are indeed enough data.
288 			 */
289 			idat = mtod(m, unsigned char *) + k;
290 
291 			while (m->m_len >= k + blks && i > 0) {
292 				if (exf->reinit) {
293 					if (crd->crd_flags & CRD_F_ENCRYPT) {
294 						exf->encrypt(sw->sw_kschedule,
295 							     idat);
296 					} else {
297 						exf->decrypt(sw->sw_kschedule,
298 							     idat);
299 					}
300 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
301 					/* XOR with previous block/IV */
302 					for (j = 0; j < blks; j++)
303 						idat[j] ^= ivp[j];
304 
305 					exf->encrypt(sw->sw_kschedule, idat);
306 					ivp = idat;
307 				} else {	/* decrypt */
308 					/*
309 					 * Keep encrypted block to be used
310 					 * in next block's processing.
311 					 */
312 					if (ivp == iv)
313 						memcpy(piv, idat, blks);
314 					else
315 						memcpy(iv, idat, blks);
316 
317 					exf->decrypt(sw->sw_kschedule, idat);
318 
319 					/* XOR with previous block/IV */
320 					for (j = 0; j < blks; j++)
321 						idat[j] ^= ivp[j];
322 
323 					if (ivp == iv)
324 						memcpy(iv, piv, blks);
325 					else
326 						ivp = iv;
327 				}
328 
329 				idat += blks;
330 				k += blks;
331 				i -= blks;
332 			}
333 		}
334 
335 		return 0; /* Done with mbuf encryption/decryption */
336 	} else if (outtype == CRYPTO_BUF_IOV) {
337 		struct uio *uio = (struct uio *) buf;
338 
339 		/* Find beginning of data */
340 		count = crd->crd_skip;
341 		ind = cuio_getptr(uio, count, &k);
342 		if (ind == -1)
343 			return EINVAL;
344 
345 		i = crd->crd_len;
346 
347 		while (i > 0) {
348 			/*
349 			 * If there's insufficient data at the end,
350 			 * we have to do some copying.
351 			 */
352 			if (uio->uio_iov[ind].iov_len < k + blks &&
353 			    uio->uio_iov[ind].iov_len != k) {
354 				cuio_copydata(uio, k, blks, blk);
355 
356 				/* Actual encryption/decryption */
357 				if (exf->reinit) {
358 					if (crd->crd_flags & CRD_F_ENCRYPT) {
359 						exf->encrypt(sw->sw_kschedule,
360 							     blk);
361 					} else {
362 						exf->decrypt(sw->sw_kschedule,
363 							     blk);
364 					}
365 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
366 					/* XOR with previous block */
367 					for (j = 0; j < blks; j++)
368 						blk[j] ^= ivp[j];
369 
370 					exf->encrypt(sw->sw_kschedule, blk);
371 
372 					/*
373 					 * Keep encrypted block for XOR'ing
374 					 * with next block
375 					 */
376 					memcpy(iv, blk, blks);
377 					ivp = iv;
378 				} else {	/* decrypt */
379 					/*
380 					 * Keep encrypted block for XOR'ing
381 					 * with next block
382 					 */
383 					if (ivp == iv)
384 						memcpy(piv, blk, blks);
385 					else
386 						memcpy(iv, blk, blks);
387 
388 					exf->decrypt(sw->sw_kschedule, blk);
389 
390 					/* XOR with previous block */
391 					for (j = 0; j < blks; j++)
392 						blk[j] ^= ivp[j];
393 
394 					if (ivp == iv)
395 						memcpy(iv, piv, blks);
396 					else
397 						ivp = iv;
398 				}
399 
400 				/* Copy back decrypted block */
401 				cuio_copyback(uio, k, blks, blk);
402 
403 				count += blks;
404 
405 				/* Advance pointer */
406 				ind = cuio_getptr(uio, count, &k);
407 				if (ind == -1)
408 					return (EINVAL);
409 
410 				i -= blks;
411 
412 				/* Could be done... */
413 				if (i == 0)
414 					break;
415 			}
416 
417 			/*
418 			 * Warning: idat may point to garbage here, but
419 			 * we only use it in the while() loop, only if
420 			 * there are indeed enough data.
421 			 */
422 			idat = ((char *)uio->uio_iov[ind].iov_base) + k;
423 
424 			while (uio->uio_iov[ind].iov_len >= k + blks &&
425 			    i > 0) {
426 				if (exf->reinit) {
427 					if (crd->crd_flags & CRD_F_ENCRYPT) {
428 						exf->encrypt(sw->sw_kschedule,
429 							    idat);
430 					} else {
431 						exf->decrypt(sw->sw_kschedule,
432 							    idat);
433 					}
434 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
435 					/* XOR with previous block/IV */
436 					for (j = 0; j < blks; j++)
437 						idat[j] ^= ivp[j];
438 
439 					exf->encrypt(sw->sw_kschedule, idat);
440 					ivp = idat;
441 				} else {	/* decrypt */
442 					/*
443 					 * Keep encrypted block to be used
444 					 * in next block's processing.
445 					 */
446 					if (ivp == iv)
447 						memcpy(piv, idat, blks);
448 					else
449 						memcpy(iv, idat, blks);
450 
451 					exf->decrypt(sw->sw_kschedule, idat);
452 
453 					/* XOR with previous block/IV */
454 					for (j = 0; j < blks; j++)
455 						idat[j] ^= ivp[j];
456 
457 					if (ivp == iv)
458 						memcpy(iv, piv, blks);
459 					else
460 						ivp = iv;
461 				}
462 
463 				idat += blks;
464 				count += blks;
465 				k += blks;
466 				i -= blks;
467 			}
468 		}
469 		return 0; /* Done with mbuf encryption/decryption */
470 	}
471 
472 	/* Unreachable */
473 	return EINVAL;
474 }
475 
476 /*
477  * Compute keyed-hash authenticator.
478  */
479 int
480 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
481     const struct swcr_data *sw, void *buf, int outtype)
482 {
483 	unsigned char aalg[AALG_MAX_RESULT_LEN];
484 	const struct swcr_auth_hash *axf;
485 	union authctx ctx;
486 	int err;
487 
488 	if (sw->sw_ictx == 0)
489 		return EINVAL;
490 
491 	axf = sw->sw_axf;
492 
493 	memcpy(&ctx, sw->sw_ictx, axf->ctxsize);
494 
495 	switch (outtype) {
496 	case CRYPTO_BUF_CONTIG:
497 		axf->Update(&ctx, (char *)buf + crd->crd_skip, crd->crd_len);
498 		break;
499 	case CRYPTO_BUF_MBUF:
500 		err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len,
501 		    (int (*)(void*, void *, unsigned int)) axf->Update,
502 		    (void *) &ctx);
503 		if (err)
504 			return err;
505 		break;
506 	case CRYPTO_BUF_IOV:
507 		err = cuio_apply((struct uio *) buf, crd->crd_skip,
508 		    crd->crd_len,
509 		    (int (*)(void *, void *, unsigned int)) axf->Update,
510 		    (void *) &ctx);
511 		if (err) {
512 			return err;
513 		}
514 		break;
515 	default:
516 		return EINVAL;
517 	}
518 
519 	switch (sw->sw_alg) {
520 	case CRYPTO_MD5_HMAC:
521 	case CRYPTO_MD5_HMAC_96:
522 	case CRYPTO_SHA1_HMAC:
523 	case CRYPTO_SHA1_HMAC_96:
524 	case CRYPTO_SHA2_256_HMAC:
525 	case CRYPTO_SHA2_384_HMAC:
526 	case CRYPTO_SHA2_512_HMAC:
527 	case CRYPTO_RIPEMD160_HMAC:
528 	case CRYPTO_RIPEMD160_HMAC_96:
529 		if (sw->sw_octx == NULL)
530 			return EINVAL;
531 
532 		axf->Final(aalg, &ctx);
533 		memcpy(&ctx, sw->sw_octx, axf->ctxsize);
534 		axf->Update(&ctx, aalg, axf->auth_hash->hashsize);
535 		axf->Final(aalg, &ctx);
536 		break;
537 
538 	case CRYPTO_MD5_KPDK:
539 	case CRYPTO_SHA1_KPDK:
540 		if (sw->sw_octx == NULL)
541 			return EINVAL;
542 
543 		axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
544 		axf->Final(aalg, &ctx);
545 		break;
546 
547 	case CRYPTO_NULL_HMAC:
548 	case CRYPTO_MD5:
549 	case CRYPTO_SHA1:
550 	case CRYPTO_AES_XCBC_MAC_96:
551 		axf->Final(aalg, &ctx);
552 		break;
553 	}
554 
555 	/* Inject the authentication data */
556 	switch (outtype) {
557 	case CRYPTO_BUF_CONTIG:
558 		(void)memcpy((char *)buf + crd->crd_inject, aalg,
559 		    axf->auth_hash->authsize);
560 		break;
561 	case CRYPTO_BUF_MBUF:
562 		m_copyback((struct mbuf *) buf, crd->crd_inject,
563 		    axf->auth_hash->authsize, aalg);
564 		break;
565 	case CRYPTO_BUF_IOV:
566 		memcpy(crp->crp_mac, aalg, axf->auth_hash->authsize);
567 		break;
568 	default:
569 		return EINVAL;
570 	}
571 	return 0;
572 }
573 
574 /*
575  * Apply a combined encryption-authentication transformation
576  */
577 static int
578 swcr_combined(struct cryptop *crp, int outtype)
579 {
580 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
581 	u_char *blk = (u_char *)blkbuf;
582 	u_char aalg[AALG_MAX_RESULT_LEN];
583 	u_char iv[EALG_MAX_BLOCK_LEN];
584 	union authctx ctx;
585 	struct cryptodesc *crd, *crda = NULL, *crde = NULL;
586 	struct swcr_data *sw, *swa, *swe = NULL;
587 	const struct swcr_auth_hash *axf = NULL;
588 	const struct swcr_enc_xform *exf = NULL;
589 	void *buf = (void *)crp->crp_buf;
590 	uint32_t *blkp;
591 	int i, blksz = 0, ivlen = 0, len;
592 
593 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
594 		for (sw = swcr_sessions[crp->crp_sid & 0xffffffff];
595 		     sw && sw->sw_alg != crd->crd_alg;
596 		     sw = sw->sw_next)
597 			;
598 		if (sw == NULL)
599 			return (EINVAL);
600 
601 		switch (sw->sw_alg) {
602 		case CRYPTO_AES_GCM_16:
603 		case CRYPTO_AES_GMAC:
604 			swe = sw;
605 			crde = crd;
606 			exf = swe->sw_exf;
607 			ivlen = exf->enc_xform->ivsize;
608 			break;
609 		case CRYPTO_AES_128_GMAC:
610 		case CRYPTO_AES_192_GMAC:
611 		case CRYPTO_AES_256_GMAC:
612 			swa = sw;
613 			crda = crd;
614 			axf = swa->sw_axf;
615 			if (swa->sw_ictx == 0)
616 				return (EINVAL);
617 			memcpy(&ctx, swa->sw_ictx, axf->ctxsize);
618 			blksz = axf->auth_hash->blocksize;
619 			break;
620 		default:
621 			return (EINVAL);
622 		}
623 	}
624 	if (crde == NULL || crda == NULL)
625 		return (EINVAL);
626 	if (outtype == CRYPTO_BUF_CONTIG)
627 		return (EINVAL);
628 
629 	/* Initialize the IV */
630 	if (crde->crd_flags & CRD_F_ENCRYPT) {
631 		/* IV explicitly provided ? */
632 		if (crde->crd_flags & CRD_F_IV_EXPLICIT) {
633 			memcpy(iv, crde->crd_iv, ivlen);
634 			if (exf->reinit)
635 				exf->reinit(swe->sw_kschedule, iv, 0);
636 		} else if (exf->reinit)
637 			exf->reinit(swe->sw_kschedule, 0, iv);
638 		else
639 			cprng_fast(iv, ivlen);
640 
641 		/* Do we need to write the IV */
642 		if (!(crde->crd_flags & CRD_F_IV_PRESENT))
643 			COPYBACK(outtype, buf, crde->crd_inject, ivlen, iv);
644 
645 	} else {	/* Decryption */
646 			/* IV explicitly provided ? */
647 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
648 			memcpy(iv, crde->crd_iv, ivlen);
649 		else {
650 			/* Get IV off buf */
651 			COPYDATA(outtype, buf, crde->crd_inject, ivlen, iv);
652 		}
653 		if (exf->reinit)
654 			exf->reinit(swe->sw_kschedule, iv, 0);
655 	}
656 
657 	/* Supply MAC with IV */
658 	if (axf->Reinit)
659 		axf->Reinit(&ctx, iv, ivlen);
660 
661 	/* Supply MAC with AAD */
662 	for (i = 0; i < crda->crd_len; i += blksz) {
663 		len = MIN(crda->crd_len - i, blksz);
664 		COPYDATA(outtype, buf, crda->crd_skip + i, len, blk);
665 		axf->Update(&ctx, blk, len);
666 	}
667 
668 	/* Do encryption/decryption with MAC */
669 	for (i = 0; i < crde->crd_len; i += blksz) {
670 		len = MIN(crde->crd_len - i, blksz);
671 		if (len < blksz)
672 			memset(blk, 0, blksz);
673 		COPYDATA(outtype, buf, crde->crd_skip + i, len, blk);
674 		if (crde->crd_flags & CRD_F_ENCRYPT) {
675 			exf->encrypt(swe->sw_kschedule, blk);
676 			axf->Update(&ctx, blk, len);
677 		} else {
678 			axf->Update(&ctx, blk, len);
679 			exf->decrypt(swe->sw_kschedule, blk);
680 		}
681 		COPYBACK(outtype, buf, crde->crd_skip + i, len, blk);
682 	}
683 
684 	/* Do any required special finalization */
685 	switch (crda->crd_alg) {
686 		case CRYPTO_AES_128_GMAC:
687 		case CRYPTO_AES_192_GMAC:
688 		case CRYPTO_AES_256_GMAC:
689 			/* length block */
690 			memset(blk, 0, blksz);
691 			blkp = (uint32_t *)blk + 1;
692 			*blkp = htobe32(crda->crd_len * 8);
693 			blkp = (uint32_t *)blk + 3;
694 			*blkp = htobe32(crde->crd_len * 8);
695 			axf->Update(&ctx, blk, blksz);
696 			break;
697 	}
698 
699 	/* Finalize MAC */
700 	axf->Final(aalg, &ctx);
701 
702 	/* Inject the authentication data */
703 	if (outtype == CRYPTO_BUF_MBUF)
704 		COPYBACK(outtype, buf, crda->crd_inject, axf->auth_hash->authsize, aalg);
705 	else
706 		memcpy(crp->crp_mac, aalg, axf->auth_hash->authsize);
707 
708 	return (0);
709 }
710 
711 /*
712  * Apply a compression/decompression algorithm
713  */
714 static int
715 swcr_compdec(struct cryptodesc *crd, const struct swcr_data *sw,
716     void *buf, int outtype, int *res_size)
717 {
718 	u_int8_t *data, *out;
719 	const struct swcr_comp_algo *cxf;
720 	int adj;
721 	u_int32_t result;
722 
723 	cxf = sw->sw_cxf;
724 
725 	/* We must handle the whole buffer of data in one time
726 	 * then if there is not all the data in the mbuf, we must
727 	 * copy in a buffer.
728 	 */
729 
730 	data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
731 	if (data == NULL)
732 		return (EINVAL);
733 	COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data);
734 
735 	if (crd->crd_flags & CRD_F_COMP)
736 		result = cxf->compress(data, crd->crd_len, &out);
737 	else
738 		result = cxf->decompress(data, crd->crd_len, &out,
739 					 *res_size);
740 
741 	free(data, M_CRYPTO_DATA);
742 	if (result == 0)
743 		return EINVAL;
744 
745 	/* Copy back the (de)compressed data. m_copyback is
746 	 * extending the mbuf as necessary.
747 	 */
748 	*res_size = (int)result;
749 	/* Check the compressed size when doing compression */
750 	if (crd->crd_flags & CRD_F_COMP &&
751 	    sw->sw_alg == CRYPTO_DEFLATE_COMP_NOGROW &&
752 	    result >= crd->crd_len) {
753 			/* Compression was useless, we lost time */
754 			free(out, M_CRYPTO_DATA);
755 			return 0;
756 	}
757 
758 	COPYBACK(outtype, buf, crd->crd_skip, result, out);
759 	if (result < crd->crd_len) {
760 		adj = result - crd->crd_len;
761 		if (outtype == CRYPTO_BUF_MBUF) {
762 			adj = result - crd->crd_len;
763 			m_adj((struct mbuf *)buf, adj);
764 		}
765 		/* Don't adjust the iov_len, it breaks the kmem_free */
766 	}
767 	free(out, M_CRYPTO_DATA);
768 	return 0;
769 }
770 
771 /*
772  * Generate a new software session.
773  */
774 static int
775 swcr_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri)
776 {
777 	struct swcr_data **swd;
778 	const struct swcr_auth_hash *axf;
779 	const struct swcr_enc_xform *txf;
780 	const struct swcr_comp_algo *cxf;
781 	u_int32_t i;
782 	int k, error;
783 
784 	if (sid == NULL || cri == NULL)
785 		return EINVAL;
786 
787 	if (swcr_sessions) {
788 		for (i = 1; i < swcr_sesnum; i++)
789 			if (swcr_sessions[i] == NULL)
790 				break;
791 	} else
792 		i = 1;		/* NB: to silence compiler warning */
793 
794 	if (swcr_sessions == NULL || i == swcr_sesnum) {
795 		if (swcr_sessions == NULL) {
796 			i = 1; /* We leave swcr_sessions[0] empty */
797 			swcr_sesnum = CRYPTO_SW_SESSIONS;
798 		} else
799 			swcr_sesnum *= 2;
800 
801 		swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
802 		    M_CRYPTO_DATA, M_NOWAIT);
803 		if (swd == NULL) {
804 			/* Reset session number */
805 			if (swcr_sesnum == CRYPTO_SW_SESSIONS)
806 				swcr_sesnum = 0;
807 			else
808 				swcr_sesnum /= 2;
809 			return ENOBUFS;
810 		}
811 
812 		memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
813 
814 		/* Copy existing sessions */
815 		if (swcr_sessions) {
816 			memcpy(swd, swcr_sessions,
817 			    (swcr_sesnum / 2) * sizeof(struct swcr_data *));
818 			free(swcr_sessions, M_CRYPTO_DATA);
819 		}
820 
821 		swcr_sessions = swd;
822 	}
823 
824 	swd = &swcr_sessions[i];
825 	*sid = i;
826 
827 	while (cri) {
828 		*swd = malloc(sizeof **swd, M_CRYPTO_DATA, M_NOWAIT);
829 		if (*swd == NULL) {
830 			swcr_freesession(NULL, i);
831 			return ENOBUFS;
832 		}
833 		memset(*swd, 0, sizeof(struct swcr_data));
834 
835 		switch (cri->cri_alg) {
836 		case CRYPTO_DES_CBC:
837 			txf = &swcr_enc_xform_des;
838 			goto enccommon;
839 		case CRYPTO_3DES_CBC:
840 			txf = &swcr_enc_xform_3des;
841 			goto enccommon;
842 		case CRYPTO_BLF_CBC:
843 			txf = &swcr_enc_xform_blf;
844 			goto enccommon;
845 		case CRYPTO_CAST_CBC:
846 			txf = &swcr_enc_xform_cast5;
847 			goto enccommon;
848 		case CRYPTO_SKIPJACK_CBC:
849 			txf = &swcr_enc_xform_skipjack;
850 			goto enccommon;
851 		case CRYPTO_RIJNDAEL128_CBC:
852 			txf = &swcr_enc_xform_rijndael128;
853 			goto enccommon;
854 		case CRYPTO_CAMELLIA_CBC:
855 			txf = &swcr_enc_xform_camellia;
856 			goto enccommon;
857 		case CRYPTO_AES_CTR:
858 			txf = &swcr_enc_xform_aes_ctr;
859 			goto enccommon;
860 		case CRYPTO_AES_GCM_16:
861 			txf = &swcr_enc_xform_aes_gcm;
862 			goto enccommon;
863 		case CRYPTO_AES_GMAC:
864 			txf = &swcr_enc_xform_aes_gmac;
865 			goto enccommon;
866 		case CRYPTO_NULL_CBC:
867 			txf = &swcr_enc_xform_null;
868 			goto enccommon;
869 		enccommon:
870 			error = txf->setkey(&((*swd)->sw_kschedule),
871 					cri->cri_key, cri->cri_klen / 8);
872 			if (error) {
873 				swcr_freesession(NULL, i);
874 				return error;
875 			}
876 			(*swd)->sw_exf = txf;
877 			break;
878 
879 		case CRYPTO_MD5_HMAC:
880 			axf = &swcr_auth_hash_hmac_md5;
881 			goto authcommon;
882 		case CRYPTO_MD5_HMAC_96:
883 			axf = &swcr_auth_hash_hmac_md5_96;
884 			goto authcommon;
885 		case CRYPTO_SHA1_HMAC:
886 			axf = &swcr_auth_hash_hmac_sha1;
887 			goto authcommon;
888 		case CRYPTO_SHA1_HMAC_96:
889 			axf = &swcr_auth_hash_hmac_sha1_96;
890 			goto authcommon;
891 		case CRYPTO_SHA2_256_HMAC:
892 			axf = &swcr_auth_hash_hmac_sha2_256;
893 			goto authcommon;
894 		case CRYPTO_SHA2_384_HMAC:
895 			axf = &swcr_auth_hash_hmac_sha2_384;
896 			goto authcommon;
897 		case CRYPTO_SHA2_512_HMAC:
898 			axf = &swcr_auth_hash_hmac_sha2_512;
899 			goto authcommon;
900 		case CRYPTO_NULL_HMAC:
901 			axf = &swcr_auth_hash_null;
902 			goto authcommon;
903 		case CRYPTO_RIPEMD160_HMAC:
904 			axf = &swcr_auth_hash_hmac_ripemd_160;
905 			goto authcommon;
906 		case CRYPTO_RIPEMD160_HMAC_96:
907 			axf = &swcr_auth_hash_hmac_ripemd_160_96;
908 			goto authcommon;	/* leave this for safety */
909 		authcommon:
910 			(*swd)->sw_ictx = malloc(axf->ctxsize,
911 			    M_CRYPTO_DATA, M_NOWAIT);
912 			if ((*swd)->sw_ictx == NULL) {
913 				swcr_freesession(NULL, i);
914 				return ENOBUFS;
915 			}
916 
917 			(*swd)->sw_octx = malloc(axf->ctxsize,
918 			    M_CRYPTO_DATA, M_NOWAIT);
919 			if ((*swd)->sw_octx == NULL) {
920 				swcr_freesession(NULL, i);
921 				return ENOBUFS;
922 			}
923 
924 			for (k = 0; k < cri->cri_klen / 8; k++)
925 				cri->cri_key[k] ^= HMAC_IPAD_VAL;
926 
927 			axf->Init((*swd)->sw_ictx);
928 			axf->Update((*swd)->sw_ictx, cri->cri_key,
929 			    cri->cri_klen / 8);
930 			axf->Update((*swd)->sw_ictx, hmac_ipad_buffer,
931 			    axf->auth_hash->blocksize - (cri->cri_klen / 8));
932 
933 			for (k = 0; k < cri->cri_klen / 8; k++)
934 				cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
935 
936 			axf->Init((*swd)->sw_octx);
937 			axf->Update((*swd)->sw_octx, cri->cri_key,
938 			    cri->cri_klen / 8);
939 			axf->Update((*swd)->sw_octx, hmac_opad_buffer,
940 			    axf->auth_hash->blocksize - (cri->cri_klen / 8));
941 
942 			for (k = 0; k < cri->cri_klen / 8; k++)
943 				cri->cri_key[k] ^= HMAC_OPAD_VAL;
944 			(*swd)->sw_axf = axf;
945 			break;
946 
947 		case CRYPTO_MD5_KPDK:
948 			axf = &swcr_auth_hash_key_md5;
949 			goto auth2common;
950 
951 		case CRYPTO_SHA1_KPDK:
952 			axf = &swcr_auth_hash_key_sha1;
953 		auth2common:
954 			(*swd)->sw_ictx = malloc(axf->ctxsize,
955 			    M_CRYPTO_DATA, M_NOWAIT);
956 			if ((*swd)->sw_ictx == NULL) {
957 				swcr_freesession(NULL, i);
958 				return ENOBUFS;
959 			}
960 
961 			/* Store the key so we can "append" it to the payload */
962 			(*swd)->sw_octx = malloc(cri->cri_klen / 8, M_CRYPTO_DATA,
963 			    M_NOWAIT);
964 			if ((*swd)->sw_octx == NULL) {
965 				swcr_freesession(NULL, i);
966 				return ENOBUFS;
967 			}
968 
969 			(*swd)->sw_klen = cri->cri_klen / 8;
970 			memcpy((*swd)->sw_octx, cri->cri_key, cri->cri_klen / 8);
971 			axf->Init((*swd)->sw_ictx);
972 			axf->Update((*swd)->sw_ictx, cri->cri_key,
973 			    cri->cri_klen / 8);
974 			axf->Final(NULL, (*swd)->sw_ictx);
975 			(*swd)->sw_axf = axf;
976 			break;
977 
978 		case CRYPTO_MD5:
979 			axf = &swcr_auth_hash_md5;
980 			goto auth3common;
981 
982 		case CRYPTO_SHA1:
983 			axf = &swcr_auth_hash_sha1;
984 		auth3common:
985 			(*swd)->sw_ictx = malloc(axf->ctxsize,
986 			    M_CRYPTO_DATA, M_NOWAIT);
987 			if ((*swd)->sw_ictx == NULL) {
988 				swcr_freesession(NULL, i);
989 				return ENOBUFS;
990 			}
991 
992 			axf->Init((*swd)->sw_ictx);
993 			(*swd)->sw_axf = axf;
994 			break;
995 
996 		case CRYPTO_AES_XCBC_MAC_96:
997 			axf = &swcr_auth_hash_aes_xcbc_mac;
998 			goto auth4common;
999 		case CRYPTO_AES_128_GMAC:
1000 			axf = &swcr_auth_hash_gmac_aes_128;
1001 			goto auth4common;
1002 		case CRYPTO_AES_192_GMAC:
1003 			axf = &swcr_auth_hash_gmac_aes_192;
1004 			goto auth4common;
1005 		case CRYPTO_AES_256_GMAC:
1006 			axf = &swcr_auth_hash_gmac_aes_256;
1007 		auth4common:
1008 			(*swd)->sw_ictx = malloc(axf->ctxsize,
1009 			    M_CRYPTO_DATA, M_NOWAIT);
1010 			if ((*swd)->sw_ictx == NULL) {
1011 				swcr_freesession(NULL, i);
1012 				return ENOBUFS;
1013 			}
1014 			axf->Init((*swd)->sw_ictx);
1015 			axf->Setkey((*swd)->sw_ictx,
1016 				cri->cri_key, cri->cri_klen / 8);
1017 			(*swd)->sw_axf = axf;
1018 			break;
1019 
1020 		case CRYPTO_DEFLATE_COMP:
1021 			cxf = &swcr_comp_algo_deflate;
1022 			(*swd)->sw_cxf = cxf;
1023 			break;
1024 
1025 		case CRYPTO_DEFLATE_COMP_NOGROW:
1026 			cxf = &swcr_comp_algo_deflate_nogrow;
1027 			(*swd)->sw_cxf = cxf;
1028 			break;
1029 
1030 		case CRYPTO_GZIP_COMP:
1031 			cxf = &swcr_comp_algo_gzip;
1032 			(*swd)->sw_cxf = cxf;
1033 			break;
1034 		default:
1035 			swcr_freesession(NULL, i);
1036 			return EINVAL;
1037 		}
1038 
1039 		(*swd)->sw_alg = cri->cri_alg;
1040 		cri = cri->cri_next;
1041 		swd = &((*swd)->sw_next);
1042 	}
1043 	return 0;
1044 }
1045 
1046 /*
1047  * Free a session.
1048  */
1049 static int
1050 swcr_freesession(void *arg, u_int64_t tid)
1051 {
1052 	struct swcr_data *swd;
1053 	const struct swcr_enc_xform *txf;
1054 	const struct swcr_auth_hash *axf;
1055 	u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
1056 
1057 	if (sid > swcr_sesnum || swcr_sessions == NULL ||
1058 	    swcr_sessions[sid] == NULL)
1059 		return EINVAL;
1060 
1061 	/* Silently accept and return */
1062 	if (sid == 0)
1063 		return 0;
1064 
1065 	while ((swd = swcr_sessions[sid]) != NULL) {
1066 		swcr_sessions[sid] = swd->sw_next;
1067 
1068 		switch (swd->sw_alg) {
1069 		case CRYPTO_DES_CBC:
1070 		case CRYPTO_3DES_CBC:
1071 		case CRYPTO_BLF_CBC:
1072 		case CRYPTO_CAST_CBC:
1073 		case CRYPTO_SKIPJACK_CBC:
1074 		case CRYPTO_RIJNDAEL128_CBC:
1075 		case CRYPTO_CAMELLIA_CBC:
1076 		case CRYPTO_AES_CTR:
1077 		case CRYPTO_AES_GCM_16:
1078 		case CRYPTO_AES_GMAC:
1079 		case CRYPTO_NULL_CBC:
1080 			txf = swd->sw_exf;
1081 
1082 			if (swd->sw_kschedule)
1083 				txf->zerokey(&(swd->sw_kschedule));
1084 			break;
1085 
1086 		case CRYPTO_MD5_HMAC:
1087 		case CRYPTO_MD5_HMAC_96:
1088 		case CRYPTO_SHA1_HMAC:
1089 		case CRYPTO_SHA1_HMAC_96:
1090 		case CRYPTO_SHA2_256_HMAC:
1091 		case CRYPTO_SHA2_384_HMAC:
1092 		case CRYPTO_SHA2_512_HMAC:
1093 		case CRYPTO_RIPEMD160_HMAC:
1094 		case CRYPTO_RIPEMD160_HMAC_96:
1095 		case CRYPTO_NULL_HMAC:
1096 			axf = swd->sw_axf;
1097 
1098 			if (swd->sw_ictx) {
1099 				explicit_memset(swd->sw_ictx, 0, axf->ctxsize);
1100 				free(swd->sw_ictx, M_CRYPTO_DATA);
1101 			}
1102 			if (swd->sw_octx) {
1103 				explicit_memset(swd->sw_octx, 0, axf->ctxsize);
1104 				free(swd->sw_octx, M_CRYPTO_DATA);
1105 			}
1106 			break;
1107 
1108 		case CRYPTO_MD5_KPDK:
1109 		case CRYPTO_SHA1_KPDK:
1110 			axf = swd->sw_axf;
1111 
1112 			if (swd->sw_ictx) {
1113 				explicit_memset(swd->sw_ictx, 0, axf->ctxsize);
1114 				free(swd->sw_ictx, M_CRYPTO_DATA);
1115 			}
1116 			if (swd->sw_octx) {
1117 				explicit_memset(swd->sw_octx, 0, swd->sw_klen);
1118 				free(swd->sw_octx, M_CRYPTO_DATA);
1119 			}
1120 			break;
1121 
1122 		case CRYPTO_MD5:
1123 		case CRYPTO_SHA1:
1124 		case CRYPTO_AES_XCBC_MAC_96:
1125 		case CRYPTO_AES_128_GMAC:
1126 		case CRYPTO_AES_192_GMAC:
1127 		case CRYPTO_AES_256_GMAC:
1128 			axf = swd->sw_axf;
1129 
1130 			if (swd->sw_ictx) {
1131 				explicit_memset(swd->sw_ictx, 0, axf->ctxsize);
1132 				free(swd->sw_ictx, M_CRYPTO_DATA);
1133 			}
1134 			break;
1135 
1136 		case CRYPTO_DEFLATE_COMP:
1137 		case CRYPTO_DEFLATE_COMP_NOGROW:
1138 		case CRYPTO_GZIP_COMP:
1139 			break;
1140 		}
1141 
1142 		free(swd, M_CRYPTO_DATA);
1143 	}
1144 	return 0;
1145 }
1146 
1147 /*
1148  * Process a software request.
1149  */
1150 static int
1151 swcr_process(void *arg, struct cryptop *crp, int hint)
1152 {
1153 	struct cryptodesc *crd;
1154 	struct swcr_data *sw;
1155 	u_int32_t lid;
1156 	int type;
1157 
1158 	/* Sanity check */
1159 	if (crp == NULL)
1160 		return EINVAL;
1161 
1162 	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1163 		crp->crp_etype = EINVAL;
1164 		goto done;
1165 	}
1166 
1167 	lid = crp->crp_sid & 0xffffffff;
1168 	if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
1169 		crp->crp_etype = ENOENT;
1170 		goto done;
1171 	}
1172 
1173 	if (crp->crp_flags & CRYPTO_F_IMBUF) {
1174 		type = CRYPTO_BUF_MBUF;
1175 	} else if (crp->crp_flags & CRYPTO_F_IOV) {
1176 		type = CRYPTO_BUF_IOV;
1177 	} else {
1178 		type = CRYPTO_BUF_CONTIG;
1179 	}
1180 
1181 	/* Go through crypto descriptors, processing as we go */
1182 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1183 		/*
1184 		 * Find the crypto context.
1185 		 *
1186 		 * XXX Note that the logic here prevents us from having
1187 		 * XXX the same algorithm multiple times in a session
1188 		 * XXX (or rather, we can but it won't give us the right
1189 		 * XXX results). To do that, we'd need some way of differentiating
1190 		 * XXX between the various instances of an algorithm (so we can
1191 		 * XXX locate the correct crypto context).
1192 		 */
1193 		for (sw = swcr_sessions[lid];
1194 		    sw && sw->sw_alg != crd->crd_alg;
1195 		    sw = sw->sw_next)
1196 			;
1197 
1198 		/* No such context ? */
1199 		if (sw == NULL) {
1200 			crp->crp_etype = EINVAL;
1201 			goto done;
1202 		}
1203 
1204 		switch (sw->sw_alg) {
1205 		case CRYPTO_DES_CBC:
1206 		case CRYPTO_3DES_CBC:
1207 		case CRYPTO_BLF_CBC:
1208 		case CRYPTO_CAST_CBC:
1209 		case CRYPTO_SKIPJACK_CBC:
1210 		case CRYPTO_RIJNDAEL128_CBC:
1211 		case CRYPTO_CAMELLIA_CBC:
1212 		case CRYPTO_AES_CTR:
1213 			if ((crp->crp_etype = swcr_encdec(crd, sw,
1214 			    crp->crp_buf, type)) != 0)
1215 				goto done;
1216 			break;
1217 		case CRYPTO_NULL_CBC:
1218 			crp->crp_etype = 0;
1219 			break;
1220 		case CRYPTO_MD5_HMAC:
1221 		case CRYPTO_MD5_HMAC_96:
1222 		case CRYPTO_SHA1_HMAC:
1223 		case CRYPTO_SHA1_HMAC_96:
1224 		case CRYPTO_SHA2_256_HMAC:
1225 		case CRYPTO_SHA2_384_HMAC:
1226 		case CRYPTO_SHA2_512_HMAC:
1227 		case CRYPTO_RIPEMD160_HMAC:
1228 		case CRYPTO_RIPEMD160_HMAC_96:
1229 		case CRYPTO_NULL_HMAC:
1230 		case CRYPTO_MD5_KPDK:
1231 		case CRYPTO_SHA1_KPDK:
1232 		case CRYPTO_MD5:
1233 		case CRYPTO_SHA1:
1234 		case CRYPTO_AES_XCBC_MAC_96:
1235 			if ((crp->crp_etype = swcr_authcompute(crp, crd, sw,
1236 			    crp->crp_buf, type)) != 0)
1237 				goto done;
1238 			break;
1239 
1240 		case CRYPTO_AES_GCM_16:
1241 		case CRYPTO_AES_GMAC:
1242 		case CRYPTO_AES_128_GMAC:
1243 		case CRYPTO_AES_192_GMAC:
1244 		case CRYPTO_AES_256_GMAC:
1245 			crp->crp_etype = swcr_combined(crp, type);
1246 			goto done;
1247 
1248 		case CRYPTO_DEFLATE_COMP:
1249 		case CRYPTO_DEFLATE_COMP_NOGROW:
1250 		case CRYPTO_GZIP_COMP:
1251 			DPRINTF(("swcr_process: compdec for %d\n", sw->sw_alg));
1252 			if ((crp->crp_etype = swcr_compdec(crd, sw,
1253 			    crp->crp_buf, type, &crp->crp_olen)) != 0)
1254 				goto done;
1255 			break;
1256 
1257 		default:
1258 			/* Unknown/unsupported algorithm */
1259 			crp->crp_etype = EINVAL;
1260 			goto done;
1261 		}
1262 	}
1263 
1264 done:
1265 	DPRINTF(("request %p done\n", crp));
1266 	crypto_done(crp);
1267 	return 0;
1268 }
1269 
1270 static void
1271 swcr_init(void)
1272 {
1273 	swcr_id = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE);
1274 	if (swcr_id < 0) {
1275 		/* This should never happen */
1276 		panic("Software crypto device cannot initialize!");
1277 	}
1278 
1279 	crypto_register(swcr_id, CRYPTO_DES_CBC,
1280 	    0, 0, swcr_newsession, swcr_freesession, swcr_process, NULL);
1281 #define	REGISTER(alg) \
1282 	crypto_register(swcr_id, alg, 0, 0, NULL, NULL, NULL, NULL)
1283 
1284 	REGISTER(CRYPTO_3DES_CBC);
1285 	REGISTER(CRYPTO_BLF_CBC);
1286 	REGISTER(CRYPTO_CAST_CBC);
1287 	REGISTER(CRYPTO_SKIPJACK_CBC);
1288 	REGISTER(CRYPTO_CAMELLIA_CBC);
1289 	REGISTER(CRYPTO_AES_CTR);
1290 	REGISTER(CRYPTO_AES_GCM_16);
1291 	REGISTER(CRYPTO_AES_GMAC);
1292 	REGISTER(CRYPTO_NULL_CBC);
1293 	REGISTER(CRYPTO_MD5_HMAC);
1294 	REGISTER(CRYPTO_MD5_HMAC_96);
1295 	REGISTER(CRYPTO_SHA1_HMAC);
1296 	REGISTER(CRYPTO_SHA1_HMAC_96);
1297 	REGISTER(CRYPTO_SHA2_256_HMAC);
1298 	REGISTER(CRYPTO_SHA2_384_HMAC);
1299 	REGISTER(CRYPTO_SHA2_512_HMAC);
1300 	REGISTER(CRYPTO_RIPEMD160_HMAC);
1301 	REGISTER(CRYPTO_RIPEMD160_HMAC_96);
1302 	REGISTER(CRYPTO_NULL_HMAC);
1303 	REGISTER(CRYPTO_MD5_KPDK);
1304 	REGISTER(CRYPTO_SHA1_KPDK);
1305 	REGISTER(CRYPTO_MD5);
1306 	REGISTER(CRYPTO_SHA1);
1307 	REGISTER(CRYPTO_AES_XCBC_MAC_96);
1308 	REGISTER(CRYPTO_AES_128_GMAC);
1309 	REGISTER(CRYPTO_AES_192_GMAC);
1310 	REGISTER(CRYPTO_AES_256_GMAC);
1311 	REGISTER(CRYPTO_RIJNDAEL128_CBC);
1312 	REGISTER(CRYPTO_DEFLATE_COMP);
1313 	REGISTER(CRYPTO_DEFLATE_COMP_NOGROW);
1314 	REGISTER(CRYPTO_GZIP_COMP);
1315 #undef REGISTER
1316 }
1317 
1318 
1319 /*
1320  * Pseudo-device init routine for software crypto.
1321  */
1322 
1323 void
1324 swcryptoattach(int num)
1325 {
1326 
1327 	swcr_init();
1328 }
1329 
1330 void	swcrypto_attach(device_t, device_t, void *);
1331 
1332 void
1333 swcrypto_attach(device_t parent, device_t self, void *opaque)
1334 {
1335 
1336 	swcr_init();
1337 
1338 	if (!pmf_device_register(self, NULL, NULL))
1339 		aprint_error_dev(self, "couldn't establish power handler\n");
1340 }
1341 
1342 int	swcrypto_detach(device_t, int);
1343 
1344 int
1345 swcrypto_detach(device_t self, int flag)
1346 {
1347 	pmf_device_deregister(self);
1348 	if (swcr_id >= 0)
1349 		crypto_unregister_all(swcr_id);
1350 	return 0;
1351 }
1352 
1353 int	swcrypto_match(device_t, cfdata_t, void *);
1354 
1355 int
1356 swcrypto_match(device_t parent, cfdata_t data, void *opaque)
1357 {
1358 
1359         return 1;
1360 }
1361 
1362 MODULE(MODULE_CLASS_DRIVER, swcrypto,
1363 	"opencrypto,zlib,blowfish,des,cast128,camellia,skipjack");
1364 
1365 CFDRIVER_DECL(swcrypto, DV_DULL, NULL);
1366 
1367 CFATTACH_DECL2_NEW(swcrypto, 0, swcrypto_match, swcrypto_attach,
1368     swcrypto_detach, NULL, NULL, NULL);
1369 
1370 static int swcryptoloc[] = { -1, -1 };
1371 
1372 static struct cfdata swcrypto_cfdata[] = {
1373 	{
1374 		.cf_name = "swcrypto",
1375 		.cf_atname = "swcrypto",
1376 		.cf_unit = 0,
1377 		.cf_fstate = 0,
1378 		.cf_loc = swcryptoloc,
1379 		.cf_flags = 0,
1380 		.cf_pspec = NULL,
1381 	},
1382 	{ NULL, NULL, 0, 0, NULL, 0, NULL }
1383 };
1384 
1385 static int
1386 swcrypto_modcmd(modcmd_t cmd, void *arg)
1387 {
1388 	int error;
1389 
1390 	switch (cmd) {
1391 	case MODULE_CMD_INIT:
1392 		error = config_cfdriver_attach(&swcrypto_cd);
1393 		if (error) {
1394 			return error;
1395 		}
1396 
1397 		error = config_cfattach_attach(swcrypto_cd.cd_name,
1398 		    &swcrypto_ca);
1399 		if (error) {
1400 			config_cfdriver_detach(&swcrypto_cd);
1401 			aprint_error("%s: unable to register cfattach\n",
1402 				swcrypto_cd.cd_name);
1403 
1404 			return error;
1405 		}
1406 
1407 		error = config_cfdata_attach(swcrypto_cfdata, 1);
1408 		if (error) {
1409 			config_cfattach_detach(swcrypto_cd.cd_name,
1410 			    &swcrypto_ca);
1411 			config_cfdriver_detach(&swcrypto_cd);
1412 			aprint_error("%s: unable to register cfdata\n",
1413 				swcrypto_cd.cd_name);
1414 
1415 			return error;
1416 		}
1417 
1418 		(void)config_attach_pseudo(swcrypto_cfdata);
1419 
1420 		return 0;
1421 	case MODULE_CMD_FINI:
1422 		error = config_cfdata_detach(swcrypto_cfdata);
1423 		if (error) {
1424 			return error;
1425 		}
1426 
1427 		config_cfattach_detach(swcrypto_cd.cd_name, &swcrypto_ca);
1428 		config_cfdriver_detach(&swcrypto_cd);
1429 
1430 		return 0;
1431 	default:
1432 		return ENOTTY;
1433 	}
1434 }
1435