xref: /netbsd-src/sys/opencrypto/cryptosoft.c (revision 181254a7b1bdde6873432bffef2d2decc4b5c22f)
1 /*	$NetBSD: cryptosoft.c,v 1.57 2020/07/04 18:07:31 riastradh Exp $ */
2 /*	$FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.2.2.1 2002/11/21 23:34:23 sam Exp $	*/
3 /*	$OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $	*/
4 
5 /*
6  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
7  *
8  * This code was written by Angelos D. Keromytis in Athens, Greece, in
9  * February 2000. Network Security Technologies Inc. (NSTI) kindly
10  * supported the development of this code.
11  *
12  * Copyright (c) 2000, 2001 Angelos D. Keromytis
13  *
14  * Permission to use, copy, and modify this software with or without fee
15  * is hereby granted, provided that this entire notice is included in
16  * all source code copies of any software which is or includes a copy or
17  * modification of this software.
18  *
19  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
20  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
21  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
22  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
23  * PURPOSE.
24  */
25 
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: cryptosoft.c,v 1.57 2020/07/04 18:07:31 riastradh Exp $");
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32 #include <sys/mbuf.h>
33 #include <sys/sysctl.h>
34 #include <sys/errno.h>
35 #include <sys/cprng.h>
36 #include <sys/module.h>
37 #include <sys/device.h>
38 
39 #ifdef _KERNEL_OPT
40 #include "opt_ocf.h"
41 #endif
42 
43 #include <opencrypto/cryptodev.h>
44 #include <opencrypto/cryptosoft.h>
45 #include <opencrypto/xform.h>
46 
47 #include <opencrypto/cryptosoft_xform.c>
48 
49 #include "ioconf.h"
50 
51 union authctx {
52 	MD5_CTX md5ctx;
53 	SHA1_CTX sha1ctx;
54 	RMD160_CTX rmd160ctx;
55 	SHA256_CTX sha256ctx;
56 	SHA384_CTX sha384ctx;
57 	SHA512_CTX sha512ctx;
58 	aesxcbc_ctx aesxcbcctx;
59 	AES_GMAC_CTX aesgmacctx;
60 };
61 
62 struct swcr_data **swcr_sessions = NULL;
63 u_int32_t swcr_sesnum = 0;
64 int32_t swcr_id = -1;
65 
66 #define COPYBACK(x, a, b, c, d) \
67 	(x) == CRYPTO_BUF_MBUF ? m_copyback((struct mbuf *)a,b,c,d) \
68 	: cuio_copyback((struct uio *)a,b,c,d)
69 #define COPYDATA(x, a, b, c, d) \
70 	(x) == CRYPTO_BUF_MBUF ? m_copydata((struct mbuf *)a,b,c,d) \
71 	: cuio_copydata((struct uio *)a,b,c,d)
72 
73 static	int swcr_encdec(struct cryptodesc *, const struct swcr_data *, void *, int);
74 static	int swcr_compdec(struct cryptodesc *, const struct swcr_data *, void *, int, int *);
75 static	int swcr_combined(struct cryptop *, int);
76 static	int swcr_process(void *, struct cryptop *, int);
77 static	int swcr_newsession(void *, u_int32_t *, struct cryptoini *);
78 static	int swcr_freesession(void *, u_int64_t);
79 
80 static	int swcryptoattach_internal(void);
81 
82 /*
83  * Apply a symmetric encryption/decryption algorithm.
84  */
85 static int
86 swcr_encdec(struct cryptodesc *crd, const struct swcr_data *sw, void *bufv,
87     int outtype)
88 {
89 	char *buf = bufv;
90 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
91 	unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
92 	const struct swcr_enc_xform *exf;
93 	int i, k, j, blks, ivlen;
94 	int count, ind;
95 
96 	exf = sw->sw_exf;
97 	blks = exf->enc_xform->blocksize;
98 	ivlen = exf->enc_xform->ivsize;
99 	KASSERT(exf->reinit ? ivlen <= blks : ivlen == blks);
100 
101 	/* Check for non-padded data */
102 	if (crd->crd_len % blks)
103 		return EINVAL;
104 
105 	/* Initialize the IV */
106 	if (crd->crd_flags & CRD_F_ENCRYPT) {
107 		/* IV explicitly provided ? */
108 		if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
109 			memcpy(iv, crd->crd_iv, ivlen);
110 			if (exf->reinit)
111 				exf->reinit(sw->sw_kschedule, iv, 0);
112 		} else if (exf->reinit) {
113 			exf->reinit(sw->sw_kschedule, 0, iv);
114 		} else {
115 			cprng_fast(iv, EALG_MAX_BLOCK_LEN);
116 		}
117 
118 		/* Do we need to write the IV */
119 		if (!(crd->crd_flags & CRD_F_IV_PRESENT)) {
120 			COPYBACK(outtype, buf, crd->crd_inject, ivlen, iv);
121 		}
122 
123 	} else {	/* Decryption */
124 			/* IV explicitly provided ? */
125 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
126 			memcpy(iv, crd->crd_iv, ivlen);
127 		else {
128 			/* Get IV off buf */
129 			COPYDATA(outtype, buf, crd->crd_inject, ivlen, iv);
130 		}
131 		if (exf->reinit)
132 			exf->reinit(sw->sw_kschedule, iv, 0);
133 	}
134 
135 	ivp = iv;
136 
137 	if (outtype == CRYPTO_BUF_CONTIG) {
138 		if (exf->reinit) {
139 			for (i = crd->crd_skip;
140 			     i < crd->crd_skip + crd->crd_len; i += blks) {
141 				if (crd->crd_flags & CRD_F_ENCRYPT) {
142 					exf->encrypt(sw->sw_kschedule, buf + i);
143 				} else {
144 					exf->decrypt(sw->sw_kschedule, buf + i);
145 				}
146 			}
147 		} else if (crd->crd_flags & CRD_F_ENCRYPT) {
148 			for (i = crd->crd_skip;
149 			    i < crd->crd_skip + crd->crd_len; i += blks) {
150 				/* XOR with the IV/previous block, as appropriate. */
151 				if (i == crd->crd_skip)
152 					for (k = 0; k < blks; k++)
153 						buf[i + k] ^= ivp[k];
154 				else
155 					for (k = 0; k < blks; k++)
156 						buf[i + k] ^= buf[i + k - blks];
157 				exf->encrypt(sw->sw_kschedule, buf + i);
158 			}
159 		} else {		/* Decrypt */
160 			/*
161 			 * Start at the end, so we don't need to keep the encrypted
162 			 * block as the IV for the next block.
163 			 */
164 			for (i = crd->crd_skip + crd->crd_len - blks;
165 			    i >= crd->crd_skip; i -= blks) {
166 				exf->decrypt(sw->sw_kschedule, buf + i);
167 
168 				/* XOR with the IV/previous block, as appropriate */
169 				if (i == crd->crd_skip)
170 					for (k = 0; k < blks; k++)
171 						buf[i + k] ^= ivp[k];
172 				else
173 					for (k = 0; k < blks; k++)
174 						buf[i + k] ^= buf[i + k - blks];
175 			}
176 		}
177 
178 		return 0;
179 	} else if (outtype == CRYPTO_BUF_MBUF) {
180 		struct mbuf *m = (struct mbuf *) buf;
181 
182 		/* Find beginning of data */
183 		m = m_getptr(m, crd->crd_skip, &k);
184 		if (m == NULL)
185 			return EINVAL;
186 
187 		i = crd->crd_len;
188 
189 		while (i > 0) {
190 			/*
191 			 * If there's insufficient data at the end of
192 			 * an mbuf, we have to do some copying.
193 			 */
194 			if (m->m_len < k + blks && m->m_len != k) {
195 				m_copydata(m, k, blks, blk);
196 
197 				/* Actual encryption/decryption */
198 				if (exf->reinit) {
199 					if (crd->crd_flags & CRD_F_ENCRYPT) {
200 						exf->encrypt(sw->sw_kschedule,
201 							     blk);
202 					} else {
203 						exf->decrypt(sw->sw_kschedule,
204 							     blk);
205 					}
206 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
207 					/* XOR with previous block */
208 					for (j = 0; j < blks; j++)
209 						blk[j] ^= ivp[j];
210 
211 					exf->encrypt(sw->sw_kschedule, blk);
212 
213 					/*
214 					 * Keep encrypted block for XOR'ing
215 					 * with next block
216 					 */
217 					memcpy(iv, blk, blks);
218 					ivp = iv;
219 				} else {	/* decrypt */
220 					/*
221 					 * Keep encrypted block for XOR'ing
222 					 * with next block
223 					 */
224 					if (ivp == iv)
225 						memcpy(piv, blk, blks);
226 					else
227 						memcpy(iv, blk, blks);
228 
229 					exf->decrypt(sw->sw_kschedule, blk);
230 
231 					/* XOR with previous block */
232 					for (j = 0; j < blks; j++)
233 						blk[j] ^= ivp[j];
234 
235 					if (ivp == iv)
236 						memcpy(iv, piv, blks);
237 					else
238 						ivp = iv;
239 				}
240 
241 				/* Copy back decrypted block */
242 				m_copyback(m, k, blks, blk);
243 
244 				/* Advance pointer */
245 				m = m_getptr(m, k + blks, &k);
246 				if (m == NULL)
247 					return EINVAL;
248 
249 				i -= blks;
250 
251 				/* Could be done... */
252 				if (i == 0)
253 					break;
254 			}
255 
256 			/* Skip possibly empty mbufs */
257 			if (k == m->m_len) {
258 				for (m = m->m_next; m && m->m_len == 0;
259 				    m = m->m_next)
260 					;
261 				k = 0;
262 			}
263 
264 			/* Sanity check */
265 			if (m == NULL)
266 				return EINVAL;
267 
268 			/*
269 			 * Warning: idat may point to garbage here, but
270 			 * we only use it in the while() loop, only if
271 			 * there are indeed enough data.
272 			 */
273 			idat = mtod(m, unsigned char *) + k;
274 
275 			while (m->m_len >= k + blks && i > 0) {
276 				if (exf->reinit) {
277 					if (crd->crd_flags & CRD_F_ENCRYPT) {
278 						exf->encrypt(sw->sw_kschedule,
279 							     idat);
280 					} else {
281 						exf->decrypt(sw->sw_kschedule,
282 							     idat);
283 					}
284 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
285 					/* XOR with previous block/IV */
286 					for (j = 0; j < blks; j++)
287 						idat[j] ^= ivp[j];
288 
289 					exf->encrypt(sw->sw_kschedule, idat);
290 					ivp = idat;
291 				} else {	/* decrypt */
292 					/*
293 					 * Keep encrypted block to be used
294 					 * in next block's processing.
295 					 */
296 					if (ivp == iv)
297 						memcpy(piv, idat, blks);
298 					else
299 						memcpy(iv, idat, blks);
300 
301 					exf->decrypt(sw->sw_kschedule, idat);
302 
303 					/* XOR with previous block/IV */
304 					for (j = 0; j < blks; j++)
305 						idat[j] ^= ivp[j];
306 
307 					if (ivp == iv)
308 						memcpy(iv, piv, blks);
309 					else
310 						ivp = iv;
311 				}
312 
313 				idat += blks;
314 				k += blks;
315 				i -= blks;
316 			}
317 		}
318 
319 		return 0; /* Done with mbuf encryption/decryption */
320 	} else if (outtype == CRYPTO_BUF_IOV) {
321 		struct uio *uio = (struct uio *) buf;
322 
323 		/* Find beginning of data */
324 		count = crd->crd_skip;
325 		ind = cuio_getptr(uio, count, &k);
326 		if (ind == -1)
327 			return EINVAL;
328 
329 		i = crd->crd_len;
330 
331 		while (i > 0) {
332 			/*
333 			 * If there's insufficient data at the end,
334 			 * we have to do some copying.
335 			 */
336 			if (uio->uio_iov[ind].iov_len < k + blks &&
337 			    uio->uio_iov[ind].iov_len != k) {
338 				cuio_copydata(uio, k, blks, blk);
339 
340 				/* Actual encryption/decryption */
341 				if (exf->reinit) {
342 					if (crd->crd_flags & CRD_F_ENCRYPT) {
343 						exf->encrypt(sw->sw_kschedule,
344 							     blk);
345 					} else {
346 						exf->decrypt(sw->sw_kschedule,
347 							     blk);
348 					}
349 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
350 					/* XOR with previous block */
351 					for (j = 0; j < blks; j++)
352 						blk[j] ^= ivp[j];
353 
354 					exf->encrypt(sw->sw_kschedule, blk);
355 
356 					/*
357 					 * Keep encrypted block for XOR'ing
358 					 * with next block
359 					 */
360 					memcpy(iv, blk, blks);
361 					ivp = iv;
362 				} else {	/* decrypt */
363 					/*
364 					 * Keep encrypted block for XOR'ing
365 					 * with next block
366 					 */
367 					if (ivp == iv)
368 						memcpy(piv, blk, blks);
369 					else
370 						memcpy(iv, blk, blks);
371 
372 					exf->decrypt(sw->sw_kschedule, blk);
373 
374 					/* XOR with previous block */
375 					for (j = 0; j < blks; j++)
376 						blk[j] ^= ivp[j];
377 
378 					if (ivp == iv)
379 						memcpy(iv, piv, blks);
380 					else
381 						ivp = iv;
382 				}
383 
384 				/* Copy back decrypted block */
385 				cuio_copyback(uio, k, blks, blk);
386 
387 				count += blks;
388 
389 				/* Advance pointer */
390 				ind = cuio_getptr(uio, count, &k);
391 				if (ind == -1)
392 					return (EINVAL);
393 
394 				i -= blks;
395 
396 				/* Could be done... */
397 				if (i == 0)
398 					break;
399 			}
400 
401 			/*
402 			 * Warning: idat may point to garbage here, but
403 			 * we only use it in the while() loop, only if
404 			 * there are indeed enough data.
405 			 */
406 			idat = ((char *)uio->uio_iov[ind].iov_base) + k;
407 
408 			while (uio->uio_iov[ind].iov_len >= k + blks &&
409 			    i > 0) {
410 				if (exf->reinit) {
411 					if (crd->crd_flags & CRD_F_ENCRYPT) {
412 						exf->encrypt(sw->sw_kschedule,
413 							    idat);
414 					} else {
415 						exf->decrypt(sw->sw_kschedule,
416 							    idat);
417 					}
418 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
419 					/* XOR with previous block/IV */
420 					for (j = 0; j < blks; j++)
421 						idat[j] ^= ivp[j];
422 
423 					exf->encrypt(sw->sw_kschedule, idat);
424 					ivp = idat;
425 				} else {	/* decrypt */
426 					/*
427 					 * Keep encrypted block to be used
428 					 * in next block's processing.
429 					 */
430 					if (ivp == iv)
431 						memcpy(piv, idat, blks);
432 					else
433 						memcpy(iv, idat, blks);
434 
435 					exf->decrypt(sw->sw_kschedule, idat);
436 
437 					/* XOR with previous block/IV */
438 					for (j = 0; j < blks; j++)
439 						idat[j] ^= ivp[j];
440 
441 					if (ivp == iv)
442 						memcpy(iv, piv, blks);
443 					else
444 						ivp = iv;
445 				}
446 
447 				idat += blks;
448 				count += blks;
449 				k += blks;
450 				i -= blks;
451 			}
452 		}
453 		return 0; /* Done with mbuf encryption/decryption */
454 	}
455 
456 	/* Unreachable */
457 	return EINVAL;
458 }
459 
460 /*
461  * Compute keyed-hash authenticator.
462  */
463 int
464 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
465     const struct swcr_data *sw, void *buf, int outtype)
466 {
467 	unsigned char aalg[AALG_MAX_RESULT_LEN];
468 	const struct swcr_auth_hash *axf;
469 	union authctx ctx;
470 	int err;
471 
472 	if (sw->sw_ictx == 0)
473 		return EINVAL;
474 
475 	axf = sw->sw_axf;
476 
477 	memcpy(&ctx, sw->sw_ictx, axf->ctxsize);
478 
479 	switch (outtype) {
480 	case CRYPTO_BUF_CONTIG:
481 		axf->Update(&ctx, (char *)buf + crd->crd_skip, crd->crd_len);
482 		break;
483 	case CRYPTO_BUF_MBUF:
484 		err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len,
485 		    (int (*)(void*, void *, unsigned int))(void *)axf->Update,
486 		    (void *) &ctx);
487 		if (err)
488 			return err;
489 		break;
490 	case CRYPTO_BUF_IOV:
491 		err = cuio_apply((struct uio *) buf, crd->crd_skip,
492 		    crd->crd_len,
493 		    (int (*)(void *, void *, unsigned int))(void *)axf->Update,
494 		    (void *) &ctx);
495 		if (err) {
496 			return err;
497 		}
498 		break;
499 	default:
500 		return EINVAL;
501 	}
502 
503 	switch (sw->sw_alg) {
504 	case CRYPTO_MD5_HMAC:
505 	case CRYPTO_MD5_HMAC_96:
506 	case CRYPTO_SHA1_HMAC:
507 	case CRYPTO_SHA1_HMAC_96:
508 	case CRYPTO_SHA2_256_HMAC:
509 	case CRYPTO_SHA2_384_HMAC:
510 	case CRYPTO_SHA2_512_HMAC:
511 	case CRYPTO_RIPEMD160_HMAC:
512 	case CRYPTO_RIPEMD160_HMAC_96:
513 		if (sw->sw_octx == NULL)
514 			return EINVAL;
515 
516 		axf->Final(aalg, &ctx);
517 		memcpy(&ctx, sw->sw_octx, axf->ctxsize);
518 		axf->Update(&ctx, aalg, axf->auth_hash->hashsize);
519 		axf->Final(aalg, &ctx);
520 		break;
521 
522 	case CRYPTO_MD5_KPDK:
523 	case CRYPTO_SHA1_KPDK:
524 		if (sw->sw_octx == NULL)
525 			return EINVAL;
526 
527 		axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
528 		axf->Final(aalg, &ctx);
529 		break;
530 
531 	case CRYPTO_NULL_HMAC:
532 	case CRYPTO_MD5:
533 	case CRYPTO_SHA1:
534 	case CRYPTO_AES_XCBC_MAC_96:
535 		axf->Final(aalg, &ctx);
536 		break;
537 	}
538 
539 	/* Inject the authentication data */
540 	switch (outtype) {
541 	case CRYPTO_BUF_CONTIG:
542 		(void)memcpy((char *)buf + crd->crd_inject, aalg,
543 		    axf->auth_hash->authsize);
544 		break;
545 	case CRYPTO_BUF_MBUF:
546 		m_copyback((struct mbuf *) buf, crd->crd_inject,
547 		    axf->auth_hash->authsize, aalg);
548 		break;
549 	case CRYPTO_BUF_IOV:
550 		memcpy(crp->crp_mac, aalg, axf->auth_hash->authsize);
551 		break;
552 	default:
553 		return EINVAL;
554 	}
555 	return 0;
556 }
557 
558 /*
559  * Apply a combined encryption-authentication transformation
560  */
561 static int
562 swcr_combined(struct cryptop *crp, int outtype)
563 {
564 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
565 	u_char *blk = (u_char *)blkbuf;
566 	u_char aalg[AALG_MAX_RESULT_LEN];
567 	u_char iv[EALG_MAX_BLOCK_LEN];
568 	union authctx ctx;
569 	struct cryptodesc *crd, *crda = NULL, *crde = NULL;
570 	struct swcr_data *sw, *swa, *swe = NULL;
571 	const struct swcr_auth_hash *axf = NULL;
572 	const struct swcr_enc_xform *exf = NULL;
573 	void *buf = (void *)crp->crp_buf;
574 	uint32_t *blkp;
575 	int i, blksz = 0, ivlen = 0, len;
576 
577 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
578 		for (sw = swcr_sessions[crp->crp_sid & 0xffffffff];
579 		     sw && sw->sw_alg != crd->crd_alg;
580 		     sw = sw->sw_next)
581 			;
582 		if (sw == NULL)
583 			return (EINVAL);
584 
585 		switch (sw->sw_alg) {
586 		case CRYPTO_AES_GCM_16:
587 		case CRYPTO_AES_GMAC:
588 			swe = sw;
589 			crde = crd;
590 			exf = swe->sw_exf;
591 			ivlen = exf->enc_xform->ivsize;
592 			break;
593 		case CRYPTO_AES_128_GMAC:
594 		case CRYPTO_AES_192_GMAC:
595 		case CRYPTO_AES_256_GMAC:
596 			swa = sw;
597 			crda = crd;
598 			axf = swa->sw_axf;
599 			if (swa->sw_ictx == 0)
600 				return (EINVAL);
601 			memcpy(&ctx, swa->sw_ictx, axf->ctxsize);
602 			blksz = axf->auth_hash->blocksize;
603 			break;
604 		default:
605 			return (EINVAL);
606 		}
607 	}
608 	if (crde == NULL || crda == NULL)
609 		return (EINVAL);
610 	if (outtype == CRYPTO_BUF_CONTIG)
611 		return (EINVAL);
612 
613 	/* Initialize the IV */
614 	if (crde->crd_flags & CRD_F_ENCRYPT) {
615 		/* IV explicitly provided ? */
616 		if (crde->crd_flags & CRD_F_IV_EXPLICIT) {
617 			memcpy(iv, crde->crd_iv, ivlen);
618 			if (exf->reinit)
619 				exf->reinit(swe->sw_kschedule, iv, 0);
620 		} else if (exf->reinit)
621 			exf->reinit(swe->sw_kschedule, 0, iv);
622 		else
623 			cprng_fast(iv, ivlen);
624 
625 		/* Do we need to write the IV */
626 		if (!(crde->crd_flags & CRD_F_IV_PRESENT))
627 			COPYBACK(outtype, buf, crde->crd_inject, ivlen, iv);
628 
629 	} else {	/* Decryption */
630 			/* IV explicitly provided ? */
631 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
632 			memcpy(iv, crde->crd_iv, ivlen);
633 		else {
634 			/* Get IV off buf */
635 			COPYDATA(outtype, buf, crde->crd_inject, ivlen, iv);
636 		}
637 		if (exf->reinit)
638 			exf->reinit(swe->sw_kschedule, iv, 0);
639 	}
640 
641 	/* Supply MAC with IV */
642 	if (axf->Reinit)
643 		axf->Reinit(&ctx, iv, ivlen);
644 
645 	/* Supply MAC with AAD */
646 	for (i = 0; i < crda->crd_len; i += blksz) {
647 		len = MIN(crda->crd_len - i, blksz);
648 		COPYDATA(outtype, buf, crda->crd_skip + i, len, blk);
649 		axf->Update(&ctx, blk, len);
650 	}
651 
652 	/* Do encryption/decryption with MAC */
653 	for (i = 0; i < crde->crd_len; i += blksz) {
654 		len = MIN(crde->crd_len - i, blksz);
655 		if (len < blksz)
656 			memset(blk, 0, blksz);
657 		COPYDATA(outtype, buf, crde->crd_skip + i, len, blk);
658 		if (crde->crd_flags & CRD_F_ENCRYPT) {
659 			exf->encrypt(swe->sw_kschedule, blk);
660 			axf->Update(&ctx, blk, len);
661 		} else {
662 			axf->Update(&ctx, blk, len);
663 			exf->decrypt(swe->sw_kschedule, blk);
664 		}
665 		COPYBACK(outtype, buf, crde->crd_skip + i, len, blk);
666 	}
667 
668 	/* Do any required special finalization */
669 	switch (crda->crd_alg) {
670 		case CRYPTO_AES_128_GMAC:
671 		case CRYPTO_AES_192_GMAC:
672 		case CRYPTO_AES_256_GMAC:
673 			/* length block */
674 			memset(blk, 0, blksz);
675 			blkp = (uint32_t *)blk + 1;
676 			*blkp = htobe32(crda->crd_len * 8);
677 			blkp = (uint32_t *)blk + 3;
678 			*blkp = htobe32(crde->crd_len * 8);
679 			axf->Update(&ctx, blk, blksz);
680 			break;
681 	}
682 
683 	/* Finalize MAC */
684 	axf->Final(aalg, &ctx);
685 
686 	/* Inject the authentication data */
687 	if (outtype == CRYPTO_BUF_MBUF)
688 		COPYBACK(outtype, buf, crda->crd_inject, axf->auth_hash->authsize, aalg);
689 	else
690 		memcpy(crp->crp_mac, aalg, axf->auth_hash->authsize);
691 
692 	return (0);
693 }
694 
695 /*
696  * Apply a compression/decompression algorithm
697  */
698 static int
699 swcr_compdec(struct cryptodesc *crd, const struct swcr_data *sw,
700     void *buf, int outtype, int *res_size)
701 {
702 	u_int8_t *data, *out;
703 	const struct swcr_comp_algo *cxf;
704 	int adj;
705 	u_int32_t result;
706 
707 	cxf = sw->sw_cxf;
708 
709 	/* We must handle the whole buffer of data in one time
710 	 * then if there is not all the data in the mbuf, we must
711 	 * copy in a buffer.
712 	 */
713 
714 	data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
715 	if (data == NULL)
716 		return (EINVAL);
717 	COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data);
718 
719 	if (crd->crd_flags & CRD_F_COMP)
720 		result = cxf->compress(data, crd->crd_len, &out);
721 	else
722 		result = cxf->decompress(data, crd->crd_len, &out,
723 					 *res_size);
724 
725 	free(data, M_CRYPTO_DATA);
726 	if (result == 0)
727 		return EINVAL;
728 
729 	/* Copy back the (de)compressed data. m_copyback is
730 	 * extending the mbuf as necessary.
731 	 */
732 	*res_size = (int)result;
733 	/* Check the compressed size when doing compression */
734 	if (crd->crd_flags & CRD_F_COMP &&
735 	    sw->sw_alg == CRYPTO_DEFLATE_COMP_NOGROW &&
736 	    result >= crd->crd_len) {
737 			/* Compression was useless, we lost time */
738 			free(out, M_CRYPTO_DATA);
739 			return 0;
740 	}
741 
742 	COPYBACK(outtype, buf, crd->crd_skip, result, out);
743 	if (result < crd->crd_len) {
744 		adj = result - crd->crd_len;
745 		if (outtype == CRYPTO_BUF_MBUF) {
746 			m_adj((struct mbuf *)buf, adj);
747 		}
748 		/* Don't adjust the iov_len, it breaks the kmem_free */
749 	}
750 	free(out, M_CRYPTO_DATA);
751 	return 0;
752 }
753 
754 /*
755  * Generate a new software session.
756  */
757 static int
758 swcr_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri)
759 {
760 	struct swcr_data **swd;
761 	const struct swcr_auth_hash *axf;
762 	const struct swcr_enc_xform *txf;
763 	const struct swcr_comp_algo *cxf;
764 	u_int32_t i;
765 	int k, error;
766 
767 	if (sid == NULL || cri == NULL)
768 		return EINVAL;
769 
770 	if (swcr_sessions) {
771 		for (i = 1; i < swcr_sesnum; i++)
772 			if (swcr_sessions[i] == NULL)
773 				break;
774 	} else
775 		i = 1;		/* NB: to silence compiler warning */
776 
777 	if (swcr_sessions == NULL || i == swcr_sesnum) {
778 		if (swcr_sessions == NULL) {
779 			i = 1; /* We leave swcr_sessions[0] empty */
780 			swcr_sesnum = CRYPTO_SW_SESSIONS;
781 		} else
782 			swcr_sesnum *= 2;
783 
784 		swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
785 		    M_CRYPTO_DATA, M_NOWAIT);
786 		if (swd == NULL) {
787 			/* Reset session number */
788 			if (swcr_sesnum == CRYPTO_SW_SESSIONS)
789 				swcr_sesnum = 0;
790 			else
791 				swcr_sesnum /= 2;
792 			return ENOBUFS;
793 		}
794 
795 		memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
796 
797 		/* Copy existing sessions */
798 		if (swcr_sessions) {
799 			memcpy(swd, swcr_sessions,
800 			    (swcr_sesnum / 2) * sizeof(struct swcr_data *));
801 			free(swcr_sessions, M_CRYPTO_DATA);
802 		}
803 
804 		swcr_sessions = swd;
805 	}
806 
807 	swd = &swcr_sessions[i];
808 	*sid = i;
809 
810 	while (cri) {
811 		*swd = malloc(sizeof **swd, M_CRYPTO_DATA, M_NOWAIT);
812 		if (*swd == NULL) {
813 			swcr_freesession(NULL, i);
814 			return ENOBUFS;
815 		}
816 		memset(*swd, 0, sizeof(struct swcr_data));
817 
818 		switch (cri->cri_alg) {
819 		case CRYPTO_DES_CBC:
820 			txf = &swcr_enc_xform_des;
821 			goto enccommon;
822 		case CRYPTO_3DES_CBC:
823 			txf = &swcr_enc_xform_3des;
824 			goto enccommon;
825 		case CRYPTO_BLF_CBC:
826 			txf = &swcr_enc_xform_blf;
827 			goto enccommon;
828 		case CRYPTO_CAST_CBC:
829 			txf = &swcr_enc_xform_cast5;
830 			goto enccommon;
831 		case CRYPTO_SKIPJACK_CBC:
832 			txf = &swcr_enc_xform_skipjack;
833 			goto enccommon;
834 		case CRYPTO_AES_CBC:
835 			txf = &swcr_enc_xform_aes;
836 			goto enccommon;
837 		case CRYPTO_CAMELLIA_CBC:
838 			txf = &swcr_enc_xform_camellia;
839 			goto enccommon;
840 		case CRYPTO_AES_CTR:
841 			txf = &swcr_enc_xform_aes_ctr;
842 			goto enccommon;
843 		case CRYPTO_AES_GCM_16:
844 			txf = &swcr_enc_xform_aes_gcm;
845 			goto enccommon;
846 		case CRYPTO_AES_GMAC:
847 			txf = &swcr_enc_xform_aes_gmac;
848 			goto enccommon;
849 		case CRYPTO_NULL_CBC:
850 			txf = &swcr_enc_xform_null;
851 			goto enccommon;
852 		enccommon:
853 			error = txf->setkey(&((*swd)->sw_kschedule),
854 					cri->cri_key, cri->cri_klen / 8);
855 			if (error) {
856 				swcr_freesession(NULL, i);
857 				return error;
858 			}
859 			(*swd)->sw_exf = txf;
860 			break;
861 
862 		case CRYPTO_MD5_HMAC:
863 			axf = &swcr_auth_hash_hmac_md5;
864 			goto authcommon;
865 		case CRYPTO_MD5_HMAC_96:
866 			axf = &swcr_auth_hash_hmac_md5_96;
867 			goto authcommon;
868 		case CRYPTO_SHA1_HMAC:
869 			axf = &swcr_auth_hash_hmac_sha1;
870 			goto authcommon;
871 		case CRYPTO_SHA1_HMAC_96:
872 			axf = &swcr_auth_hash_hmac_sha1_96;
873 			goto authcommon;
874 		case CRYPTO_SHA2_256_HMAC:
875 			axf = &swcr_auth_hash_hmac_sha2_256;
876 			goto authcommon;
877 		case CRYPTO_SHA2_384_HMAC:
878 			axf = &swcr_auth_hash_hmac_sha2_384;
879 			goto authcommon;
880 		case CRYPTO_SHA2_512_HMAC:
881 			axf = &swcr_auth_hash_hmac_sha2_512;
882 			goto authcommon;
883 		case CRYPTO_NULL_HMAC:
884 			axf = &swcr_auth_hash_null;
885 			goto authcommon;
886 		case CRYPTO_RIPEMD160_HMAC:
887 			axf = &swcr_auth_hash_hmac_ripemd_160;
888 			goto authcommon;
889 		case CRYPTO_RIPEMD160_HMAC_96:
890 			axf = &swcr_auth_hash_hmac_ripemd_160_96;
891 			goto authcommon;	/* leave this for safety */
892 		authcommon:
893 			(*swd)->sw_ictx = kmem_alloc(axf->ctxsize, KM_NOSLEEP);
894 			if ((*swd)->sw_ictx == NULL) {
895 				swcr_freesession(NULL, i);
896 				return ENOBUFS;
897 			}
898 
899 			(*swd)->sw_octx = kmem_alloc(axf->ctxsize, KM_NOSLEEP);
900 			if ((*swd)->sw_octx == NULL) {
901 				swcr_freesession(NULL, i);
902 				return ENOBUFS;
903 			}
904 
905 			for (k = 0; k < cri->cri_klen / 8; k++)
906 				cri->cri_key[k] ^= HMAC_IPAD_VAL;
907 
908 			axf->Init((*swd)->sw_ictx);
909 			axf->Update((*swd)->sw_ictx, cri->cri_key,
910 			    cri->cri_klen / 8);
911 			axf->Update((*swd)->sw_ictx, hmac_ipad_buffer,
912 			    axf->auth_hash->blocksize - (cri->cri_klen / 8));
913 
914 			for (k = 0; k < cri->cri_klen / 8; k++)
915 				cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
916 
917 			axf->Init((*swd)->sw_octx);
918 			axf->Update((*swd)->sw_octx, cri->cri_key,
919 			    cri->cri_klen / 8);
920 			axf->Update((*swd)->sw_octx, hmac_opad_buffer,
921 			    axf->auth_hash->blocksize - (cri->cri_klen / 8));
922 
923 			for (k = 0; k < cri->cri_klen / 8; k++)
924 				cri->cri_key[k] ^= HMAC_OPAD_VAL;
925 			(*swd)->sw_axf = axf;
926 			break;
927 
928 		case CRYPTO_MD5_KPDK:
929 			axf = &swcr_auth_hash_key_md5;
930 			goto auth2common;
931 
932 		case CRYPTO_SHA1_KPDK: {
933 			unsigned char digest[SHA1_DIGEST_LENGTH];
934 			CTASSERT(SHA1_DIGEST_LENGTH >= MD5_DIGEST_LENGTH);
935 			axf = &swcr_auth_hash_key_sha1;
936 		auth2common:
937 			(*swd)->sw_ictx = kmem_alloc(axf->ctxsize, KM_NOSLEEP);
938 			if ((*swd)->sw_ictx == NULL) {
939 				swcr_freesession(NULL, i);
940 				return ENOBUFS;
941 			}
942 
943 			/* Store the key so we can "append" it to the payload */
944 			(*swd)->sw_octx = kmem_alloc(cri->cri_klen / 8,
945 			    KM_NOSLEEP);
946 			if ((*swd)->sw_octx == NULL) {
947 				swcr_freesession(NULL, i);
948 				return ENOBUFS;
949 			}
950 
951 			(*swd)->sw_klen = cri->cri_klen / 8;
952 			memcpy((*swd)->sw_octx, cri->cri_key, cri->cri_klen / 8);
953 			axf->Init((*swd)->sw_ictx);
954 			axf->Update((*swd)->sw_ictx, cri->cri_key,
955 			    cri->cri_klen / 8);
956 			axf->Final(digest, (*swd)->sw_ictx);
957 			(*swd)->sw_axf = axf;
958 			break;
959 		    }
960 
961 		case CRYPTO_MD5:
962 			axf = &swcr_auth_hash_md5;
963 			goto auth3common;
964 
965 		case CRYPTO_SHA1:
966 			axf = &swcr_auth_hash_sha1;
967 		auth3common:
968 			(*swd)->sw_ictx = kmem_alloc(axf->ctxsize, KM_NOSLEEP);
969 			if ((*swd)->sw_ictx == NULL) {
970 				swcr_freesession(NULL, i);
971 				return ENOBUFS;
972 			}
973 
974 			axf->Init((*swd)->sw_ictx);
975 			(*swd)->sw_axf = axf;
976 			break;
977 
978 		case CRYPTO_AES_XCBC_MAC_96:
979 			axf = &swcr_auth_hash_aes_xcbc_mac;
980 			goto auth4common;
981 		case CRYPTO_AES_128_GMAC:
982 			axf = &swcr_auth_hash_gmac_aes_128;
983 			goto auth4common;
984 		case CRYPTO_AES_192_GMAC:
985 			axf = &swcr_auth_hash_gmac_aes_192;
986 			goto auth4common;
987 		case CRYPTO_AES_256_GMAC:
988 			axf = &swcr_auth_hash_gmac_aes_256;
989 		auth4common:
990 			(*swd)->sw_ictx = kmem_alloc(axf->ctxsize, KM_NOSLEEP);
991 			if ((*swd)->sw_ictx == NULL) {
992 				swcr_freesession(NULL, i);
993 				return ENOBUFS;
994 			}
995 			axf->Init((*swd)->sw_ictx);
996 			axf->Setkey((*swd)->sw_ictx,
997 				cri->cri_key, cri->cri_klen / 8);
998 			(*swd)->sw_axf = axf;
999 			break;
1000 
1001 		case CRYPTO_DEFLATE_COMP:
1002 			cxf = &swcr_comp_algo_deflate;
1003 			(*swd)->sw_cxf = cxf;
1004 			break;
1005 
1006 		case CRYPTO_DEFLATE_COMP_NOGROW:
1007 			cxf = &swcr_comp_algo_deflate_nogrow;
1008 			(*swd)->sw_cxf = cxf;
1009 			break;
1010 
1011 		case CRYPTO_GZIP_COMP:
1012 			cxf = &swcr_comp_algo_gzip;
1013 			(*swd)->sw_cxf = cxf;
1014 			break;
1015 		default:
1016 			swcr_freesession(NULL, i);
1017 			return EINVAL;
1018 		}
1019 
1020 		(*swd)->sw_alg = cri->cri_alg;
1021 		cri = cri->cri_next;
1022 		swd = &((*swd)->sw_next);
1023 	}
1024 	return 0;
1025 }
1026 
1027 /*
1028  * Free a session.
1029  */
1030 static int
1031 swcr_freesession(void *arg, u_int64_t tid)
1032 {
1033 	struct swcr_data *swd;
1034 	const struct swcr_enc_xform *txf;
1035 	const struct swcr_auth_hash *axf;
1036 	u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
1037 
1038 	if (sid > swcr_sesnum || swcr_sessions == NULL ||
1039 	    swcr_sessions[sid] == NULL)
1040 		return EINVAL;
1041 
1042 	/* Silently accept and return */
1043 	if (sid == 0)
1044 		return 0;
1045 
1046 	while ((swd = swcr_sessions[sid]) != NULL) {
1047 		swcr_sessions[sid] = swd->sw_next;
1048 
1049 		switch (swd->sw_alg) {
1050 		case CRYPTO_DES_CBC:
1051 		case CRYPTO_3DES_CBC:
1052 		case CRYPTO_BLF_CBC:
1053 		case CRYPTO_CAST_CBC:
1054 		case CRYPTO_SKIPJACK_CBC:
1055 		case CRYPTO_AES_CBC:
1056 		case CRYPTO_CAMELLIA_CBC:
1057 		case CRYPTO_AES_CTR:
1058 		case CRYPTO_AES_GCM_16:
1059 		case CRYPTO_AES_GMAC:
1060 		case CRYPTO_NULL_CBC:
1061 			txf = swd->sw_exf;
1062 
1063 			if (swd->sw_kschedule)
1064 				txf->zerokey(&(swd->sw_kschedule));
1065 			break;
1066 
1067 		case CRYPTO_MD5_HMAC:
1068 		case CRYPTO_MD5_HMAC_96:
1069 		case CRYPTO_SHA1_HMAC:
1070 		case CRYPTO_SHA1_HMAC_96:
1071 		case CRYPTO_SHA2_256_HMAC:
1072 		case CRYPTO_SHA2_384_HMAC:
1073 		case CRYPTO_SHA2_512_HMAC:
1074 		case CRYPTO_RIPEMD160_HMAC:
1075 		case CRYPTO_RIPEMD160_HMAC_96:
1076 		case CRYPTO_NULL_HMAC:
1077 			axf = swd->sw_axf;
1078 
1079 			if (swd->sw_ictx) {
1080 				explicit_memset(swd->sw_ictx, 0, axf->ctxsize);
1081 				kmem_free(swd->sw_ictx, axf->ctxsize);
1082 			}
1083 			if (swd->sw_octx) {
1084 				explicit_memset(swd->sw_octx, 0, axf->ctxsize);
1085 				kmem_free(swd->sw_octx, axf->ctxsize);
1086 			}
1087 			break;
1088 
1089 		case CRYPTO_MD5_KPDK:
1090 		case CRYPTO_SHA1_KPDK:
1091 			axf = swd->sw_axf;
1092 
1093 			if (swd->sw_ictx) {
1094 				explicit_memset(swd->sw_ictx, 0, axf->ctxsize);
1095 				kmem_free(swd->sw_ictx, axf->ctxsize);
1096 			}
1097 			if (swd->sw_octx) {
1098 				explicit_memset(swd->sw_octx, 0, swd->sw_klen);
1099 				kmem_free(swd->sw_octx, swd->sw_klen);
1100 			}
1101 			break;
1102 
1103 		case CRYPTO_MD5:
1104 		case CRYPTO_SHA1:
1105 		case CRYPTO_AES_XCBC_MAC_96:
1106 		case CRYPTO_AES_128_GMAC:
1107 		case CRYPTO_AES_192_GMAC:
1108 		case CRYPTO_AES_256_GMAC:
1109 			axf = swd->sw_axf;
1110 
1111 			if (swd->sw_ictx) {
1112 				explicit_memset(swd->sw_ictx, 0, axf->ctxsize);
1113 				kmem_free(swd->sw_ictx, axf->ctxsize);
1114 			}
1115 			break;
1116 
1117 		case CRYPTO_DEFLATE_COMP:
1118 		case CRYPTO_DEFLATE_COMP_NOGROW:
1119 		case CRYPTO_GZIP_COMP:
1120 			break;
1121 		}
1122 
1123 		free(swd, M_CRYPTO_DATA);
1124 	}
1125 	return 0;
1126 }
1127 
1128 /*
1129  * Process a software request.
1130  */
1131 static int
1132 swcr_process(void *arg, struct cryptop *crp, int hint)
1133 {
1134 	struct cryptodesc *crd;
1135 	struct swcr_data *sw;
1136 	u_int32_t lid;
1137 	int type;
1138 
1139 	/* Sanity check */
1140 	if (crp == NULL)
1141 		return EINVAL;
1142 
1143 	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1144 		crp->crp_etype = EINVAL;
1145 		goto done;
1146 	}
1147 
1148 	lid = crp->crp_sid & 0xffffffff;
1149 	if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
1150 		crp->crp_etype = ENOENT;
1151 		goto done;
1152 	}
1153 
1154 	if (crp->crp_flags & CRYPTO_F_IMBUF) {
1155 		type = CRYPTO_BUF_MBUF;
1156 	} else if (crp->crp_flags & CRYPTO_F_IOV) {
1157 		type = CRYPTO_BUF_IOV;
1158 	} else {
1159 		type = CRYPTO_BUF_CONTIG;
1160 	}
1161 
1162 	/* Go through crypto descriptors, processing as we go */
1163 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1164 		/*
1165 		 * Find the crypto context.
1166 		 *
1167 		 * XXX Note that the logic here prevents us from having
1168 		 * XXX the same algorithm multiple times in a session
1169 		 * XXX (or rather, we can but it won't give us the right
1170 		 * XXX results). To do that, we'd need some way of differentiating
1171 		 * XXX between the various instances of an algorithm (so we can
1172 		 * XXX locate the correct crypto context).
1173 		 */
1174 		for (sw = swcr_sessions[lid];
1175 		    sw && sw->sw_alg != crd->crd_alg;
1176 		    sw = sw->sw_next)
1177 			;
1178 
1179 		/* No such context ? */
1180 		if (sw == NULL) {
1181 			crp->crp_etype = EINVAL;
1182 			goto done;
1183 		}
1184 
1185 		switch (sw->sw_alg) {
1186 		case CRYPTO_DES_CBC:
1187 		case CRYPTO_3DES_CBC:
1188 		case CRYPTO_BLF_CBC:
1189 		case CRYPTO_CAST_CBC:
1190 		case CRYPTO_SKIPJACK_CBC:
1191 		case CRYPTO_AES_CBC:
1192 		case CRYPTO_CAMELLIA_CBC:
1193 		case CRYPTO_AES_CTR:
1194 			if ((crp->crp_etype = swcr_encdec(crd, sw,
1195 			    crp->crp_buf, type)) != 0)
1196 				goto done;
1197 			break;
1198 		case CRYPTO_NULL_CBC:
1199 			crp->crp_etype = 0;
1200 			break;
1201 		case CRYPTO_MD5_HMAC:
1202 		case CRYPTO_MD5_HMAC_96:
1203 		case CRYPTO_SHA1_HMAC:
1204 		case CRYPTO_SHA1_HMAC_96:
1205 		case CRYPTO_SHA2_256_HMAC:
1206 		case CRYPTO_SHA2_384_HMAC:
1207 		case CRYPTO_SHA2_512_HMAC:
1208 		case CRYPTO_RIPEMD160_HMAC:
1209 		case CRYPTO_RIPEMD160_HMAC_96:
1210 		case CRYPTO_NULL_HMAC:
1211 		case CRYPTO_MD5_KPDK:
1212 		case CRYPTO_SHA1_KPDK:
1213 		case CRYPTO_MD5:
1214 		case CRYPTO_SHA1:
1215 		case CRYPTO_AES_XCBC_MAC_96:
1216 			if ((crp->crp_etype = swcr_authcompute(crp, crd, sw,
1217 			    crp->crp_buf, type)) != 0)
1218 				goto done;
1219 			break;
1220 
1221 		case CRYPTO_AES_GCM_16:
1222 		case CRYPTO_AES_GMAC:
1223 		case CRYPTO_AES_128_GMAC:
1224 		case CRYPTO_AES_192_GMAC:
1225 		case CRYPTO_AES_256_GMAC:
1226 			crp->crp_etype = swcr_combined(crp, type);
1227 			goto done;
1228 
1229 		case CRYPTO_DEFLATE_COMP:
1230 		case CRYPTO_DEFLATE_COMP_NOGROW:
1231 		case CRYPTO_GZIP_COMP:
1232 			DPRINTF("compdec for %d\n", sw->sw_alg);
1233 			if ((crp->crp_etype = swcr_compdec(crd, sw,
1234 			    crp->crp_buf, type, &crp->crp_olen)) != 0)
1235 				goto done;
1236 			break;
1237 
1238 		default:
1239 			/* Unknown/unsupported algorithm */
1240 			crp->crp_etype = EINVAL;
1241 			goto done;
1242 		}
1243 	}
1244 
1245 done:
1246 	DPRINTF("request %p done\n", crp);
1247 	crypto_done(crp);
1248 	return 0;
1249 }
1250 
1251 static void
1252 swcr_init(void)
1253 {
1254 	swcr_id = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE);
1255 	if (swcr_id < 0) {
1256 		/* This should never happen */
1257 		panic("Software crypto device cannot initialize!");
1258 	}
1259 
1260 	crypto_register(swcr_id, CRYPTO_DES_CBC,
1261 	    0, 0, swcr_newsession, swcr_freesession, swcr_process, NULL);
1262 #define	REGISTER(alg) \
1263 	crypto_register(swcr_id, alg, 0, 0, NULL, NULL, NULL, NULL)
1264 
1265 	REGISTER(CRYPTO_3DES_CBC);
1266 	REGISTER(CRYPTO_BLF_CBC);
1267 	REGISTER(CRYPTO_CAST_CBC);
1268 	REGISTER(CRYPTO_SKIPJACK_CBC);
1269 	REGISTER(CRYPTO_CAMELLIA_CBC);
1270 	REGISTER(CRYPTO_AES_CTR);
1271 	REGISTER(CRYPTO_AES_GCM_16);
1272 	REGISTER(CRYPTO_AES_GMAC);
1273 	REGISTER(CRYPTO_NULL_CBC);
1274 	REGISTER(CRYPTO_MD5_HMAC);
1275 	REGISTER(CRYPTO_MD5_HMAC_96);
1276 	REGISTER(CRYPTO_SHA1_HMAC);
1277 	REGISTER(CRYPTO_SHA1_HMAC_96);
1278 	REGISTER(CRYPTO_SHA2_256_HMAC);
1279 	REGISTER(CRYPTO_SHA2_384_HMAC);
1280 	REGISTER(CRYPTO_SHA2_512_HMAC);
1281 	REGISTER(CRYPTO_RIPEMD160_HMAC);
1282 	REGISTER(CRYPTO_RIPEMD160_HMAC_96);
1283 	REGISTER(CRYPTO_NULL_HMAC);
1284 	REGISTER(CRYPTO_MD5_KPDK);
1285 	REGISTER(CRYPTO_SHA1_KPDK);
1286 	REGISTER(CRYPTO_MD5);
1287 	REGISTER(CRYPTO_SHA1);
1288 	REGISTER(CRYPTO_AES_XCBC_MAC_96);
1289 	REGISTER(CRYPTO_AES_128_GMAC);
1290 	REGISTER(CRYPTO_AES_192_GMAC);
1291 	REGISTER(CRYPTO_AES_256_GMAC);
1292 	REGISTER(CRYPTO_AES_CBC);
1293 	REGISTER(CRYPTO_DEFLATE_COMP);
1294 	REGISTER(CRYPTO_DEFLATE_COMP_NOGROW);
1295 	REGISTER(CRYPTO_GZIP_COMP);
1296 #undef REGISTER
1297 }
1298 
1299 
1300 /*
1301  * Pseudo-device init routine for software crypto.
1302  */
1303 
1304 void
1305 swcryptoattach(int num)
1306 {
1307 	/*
1308 	 * swcrypto_attach() must be called after attached cpus, because
1309 	 * it calls softint_establish() through below call path.
1310 	 *     swcr_init() => crypto_get_driverid() => crypto_init()
1311 	 *         => crypto_init0()
1312 	 * If softint_establish() is called before attached cpus that ncpu == 0,
1313 	 * the softint handler is established to CPU#0 only.
1314 	 *
1315 	 * So, swcrypto_attach() must be called from not module_init_class()
1316 	 * but config_finalize() when it is built as builtin module.
1317 	 */
1318 	swcryptoattach_internal();
1319 }
1320 
1321 void	swcrypto_attach(device_t, device_t, void *);
1322 
1323 void
1324 swcrypto_attach(device_t parent, device_t self, void *opaque)
1325 {
1326 
1327 	swcr_init();
1328 
1329 	if (!pmf_device_register(self, NULL, NULL))
1330 		aprint_error_dev(self, "couldn't establish power handler\n");
1331 }
1332 
1333 int	swcrypto_detach(device_t, int);
1334 
1335 int
1336 swcrypto_detach(device_t self, int flag)
1337 {
1338 	pmf_device_deregister(self);
1339 	if (swcr_id >= 0)
1340 		crypto_unregister_all(swcr_id);
1341 	return 0;
1342 }
1343 
1344 int	swcrypto_match(device_t, cfdata_t, void *);
1345 
1346 int
1347 swcrypto_match(device_t parent, cfdata_t data, void *opaque)
1348 {
1349 
1350         return 1;
1351 }
1352 
1353 MODULE(MODULE_CLASS_DRIVER, swcrypto,
1354 	"opencrypto,zlib,blowfish,des,cast128,camellia,skipjack");
1355 
1356 CFDRIVER_DECL(swcrypto, DV_DULL, NULL);
1357 
1358 CFATTACH_DECL2_NEW(swcrypto, 0, swcrypto_match, swcrypto_attach,
1359     swcrypto_detach, NULL, NULL, NULL);
1360 
1361 static int swcryptoloc[] = { -1, -1 };
1362 
1363 static struct cfdata swcrypto_cfdata[] = {
1364 	{
1365 		.cf_name = "swcrypto",
1366 		.cf_atname = "swcrypto",
1367 		.cf_unit = 0,
1368 		.cf_fstate = 0,
1369 		.cf_loc = swcryptoloc,
1370 		.cf_flags = 0,
1371 		.cf_pspec = NULL,
1372 	},
1373 	{ NULL, NULL, 0, 0, NULL, 0, NULL }
1374 };
1375 
1376 /*
1377  * Internal attach routine.
1378  * Don't call before attached cpus.
1379  */
1380 static int
1381 swcryptoattach_internal(void)
1382 {
1383 	int error;
1384 
1385 	error = config_cfdriver_attach(&swcrypto_cd);
1386 	if (error) {
1387 		return error;
1388 	}
1389 
1390 	error = config_cfattach_attach(swcrypto_cd.cd_name, &swcrypto_ca);
1391 	if (error) {
1392 		config_cfdriver_detach(&swcrypto_cd);
1393 		aprint_error("%s: unable to register cfattach\n",
1394 		    swcrypto_cd.cd_name);
1395 
1396 		return error;
1397 	}
1398 
1399 	error = config_cfdata_attach(swcrypto_cfdata, 1);
1400 	if (error) {
1401 		config_cfattach_detach(swcrypto_cd.cd_name,
1402 		    &swcrypto_ca);
1403 		config_cfdriver_detach(&swcrypto_cd);
1404 		aprint_error("%s: unable to register cfdata\n",
1405 		    swcrypto_cd.cd_name);
1406 
1407 		return error;
1408 	}
1409 
1410 	(void)config_attach_pseudo(swcrypto_cfdata);
1411 
1412 	return 0;
1413 }
1414 
1415 static int
1416 swcrypto_modcmd(modcmd_t cmd, void *arg)
1417 {
1418 	int error = 0;
1419 
1420 	switch (cmd) {
1421 	case MODULE_CMD_INIT:
1422 #ifdef _MODULE
1423 		error = swcryptoattach_internal();
1424 #endif
1425 		return error;
1426 	case MODULE_CMD_FINI:
1427 #if 1
1428 		// XXX: Need to keep track if we are in use.
1429 		return ENOTTY;
1430 #else
1431 		error = config_cfdata_detach(swcrypto_cfdata);
1432 		if (error) {
1433 			return error;
1434 		}
1435 
1436 		config_cfattach_detach(swcrypto_cd.cd_name, &swcrypto_ca);
1437 		config_cfdriver_detach(&swcrypto_cd);
1438 
1439 		return 0;
1440 #endif
1441 	default:
1442 		return ENOTTY;
1443 	}
1444 }
1445