xref: /netbsd-src/sys/opencrypto/cryptosoft.c (revision 4d12bfcd155352508213ace5ccc59ce930ea2974)
1 /*	$NetBSD: cryptosoft.c,v 1.43 2013/09/12 13:12:35 martin Exp $ */
2 /*	$FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.2.2.1 2002/11/21 23:34:23 sam Exp $	*/
3 /*	$OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $	*/
4 
5 /*
6  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
7  *
8  * This code was written by Angelos D. Keromytis in Athens, Greece, in
9  * February 2000. Network Security Technologies Inc. (NSTI) kindly
10  * supported the development of this code.
11  *
12  * Copyright (c) 2000, 2001 Angelos D. Keromytis
13  *
14  * Permission to use, copy, and modify this software with or without fee
15  * is hereby granted, provided that this entire notice is included in
16  * all source code copies of any software which is or includes a copy or
17  * modification of this software.
18  *
19  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
20  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
21  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
22  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
23  * PURPOSE.
24  */
25 
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: cryptosoft.c,v 1.43 2013/09/12 13:12:35 martin Exp $");
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32 #include <sys/mbuf.h>
33 #include <sys/sysctl.h>
34 #include <sys/errno.h>
35 #include <sys/cprng.h>
36 
37 #include "opt_ocf.h"
38 #include <opencrypto/cryptodev.h>
39 #include <opencrypto/cryptosoft.h>
40 #include <opencrypto/xform.h>
41 
42 #include <opencrypto/cryptosoft_xform.c>
43 
44 union authctx {
45 	MD5_CTX md5ctx;
46 	SHA1_CTX sha1ctx;
47 	RMD160_CTX rmd160ctx;
48 	SHA256_CTX sha256ctx;
49 	SHA384_CTX sha384ctx;
50 	SHA512_CTX sha512ctx;
51 	aesxcbc_ctx aesxcbcctx;
52 	AES_GMAC_CTX aesgmacctx;
53 };
54 
55 struct swcr_data **swcr_sessions = NULL;
56 u_int32_t swcr_sesnum = 0;
57 int32_t swcr_id = -1;
58 
59 #define COPYBACK(x, a, b, c, d) \
60 	(x) == CRYPTO_BUF_MBUF ? m_copyback((struct mbuf *)a,b,c,d) \
61 	: cuio_copyback((struct uio *)a,b,c,d)
62 #define COPYDATA(x, a, b, c, d) \
63 	(x) == CRYPTO_BUF_MBUF ? m_copydata((struct mbuf *)a,b,c,d) \
64 	: cuio_copydata((struct uio *)a,b,c,d)
65 
66 static	int swcr_encdec(struct cryptodesc *, const struct swcr_data *, void *, int);
67 static	int swcr_compdec(struct cryptodesc *, const struct swcr_data *, void *, int, int *);
68 static	int swcr_combined(struct cryptop *, int);
69 static	int swcr_process(void *, struct cryptop *, int);
70 static	int swcr_newsession(void *, u_int32_t *, struct cryptoini *);
71 static	int swcr_freesession(void *, u_int64_t);
72 
73 /*
74  * Apply a symmetric encryption/decryption algorithm.
75  */
76 static int
77 swcr_encdec(struct cryptodesc *crd, const struct swcr_data *sw, void *bufv,
78     int outtype)
79 {
80 	char *buf = bufv;
81 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
82 	unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
83 	const struct swcr_enc_xform *exf;
84 	int i, k, j, blks, ivlen;
85 	int count, ind;
86 
87 	exf = sw->sw_exf;
88 	blks = exf->enc_xform->blocksize;
89 	ivlen = exf->enc_xform->ivsize;
90 	KASSERT(exf->reinit ? ivlen <= blks : ivlen == blks);
91 
92 	/* Check for non-padded data */
93 	if (crd->crd_len % blks)
94 		return EINVAL;
95 
96 	/* Initialize the IV */
97 	if (crd->crd_flags & CRD_F_ENCRYPT) {
98 		/* IV explicitly provided ? */
99 		if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
100 			memcpy(iv, crd->crd_iv, ivlen);
101 			if (exf->reinit)
102 				exf->reinit(sw->sw_kschedule, iv, 0);
103 		} else if (exf->reinit) {
104 			exf->reinit(sw->sw_kschedule, 0, iv);
105 		} else {
106 			/* Get random IV */
107 			for (i = 0;
108 			    i + sizeof (u_int32_t) <= EALG_MAX_BLOCK_LEN;
109 			    i += sizeof (u_int32_t)) {
110 				u_int32_t temp = cprng_fast32();
111 
112 				memcpy(iv + i, &temp, sizeof(u_int32_t));
113 			}
114 			/*
115 			 * What if the block size is not a multiple
116 			 * of sizeof (u_int32_t), which is the size of
117 			 * what arc4random() returns ?
118 			 */
119 			if (EALG_MAX_BLOCK_LEN % sizeof (u_int32_t) != 0) {
120 				u_int32_t temp = cprng_fast32();
121 
122 				bcopy (&temp, iv + i,
123 				    EALG_MAX_BLOCK_LEN - i);
124 			}
125 		}
126 
127 		/* Do we need to write the IV */
128 		if (!(crd->crd_flags & CRD_F_IV_PRESENT)) {
129 			COPYBACK(outtype, buf, crd->crd_inject, ivlen, iv);
130 		}
131 
132 	} else {	/* Decryption */
133 			/* IV explicitly provided ? */
134 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
135 			memcpy(iv, crd->crd_iv, ivlen);
136 		else {
137 			/* Get IV off buf */
138 			COPYDATA(outtype, buf, crd->crd_inject, ivlen, iv);
139 		}
140 		if (exf->reinit)
141 			exf->reinit(sw->sw_kschedule, iv, 0);
142 	}
143 
144 	ivp = iv;
145 
146 	if (outtype == CRYPTO_BUF_CONTIG) {
147 		if (exf->reinit) {
148 			for (i = crd->crd_skip;
149 			     i < crd->crd_skip + crd->crd_len; i += blks) {
150 				if (crd->crd_flags & CRD_F_ENCRYPT) {
151 					exf->encrypt(sw->sw_kschedule, buf + i);
152 				} else {
153 					exf->decrypt(sw->sw_kschedule, buf + i);
154 				}
155 			}
156 		} else if (crd->crd_flags & CRD_F_ENCRYPT) {
157 			for (i = crd->crd_skip;
158 			    i < crd->crd_skip + crd->crd_len; i += blks) {
159 				/* XOR with the IV/previous block, as appropriate. */
160 				if (i == crd->crd_skip)
161 					for (k = 0; k < blks; k++)
162 						buf[i + k] ^= ivp[k];
163 				else
164 					for (k = 0; k < blks; k++)
165 						buf[i + k] ^= buf[i + k - blks];
166 				exf->encrypt(sw->sw_kschedule, buf + i);
167 			}
168 		} else {		/* Decrypt */
169 			/*
170 			 * Start at the end, so we don't need to keep the encrypted
171 			 * block as the IV for the next block.
172 			 */
173 			for (i = crd->crd_skip + crd->crd_len - blks;
174 			    i >= crd->crd_skip; i -= blks) {
175 				exf->decrypt(sw->sw_kschedule, buf + i);
176 
177 				/* XOR with the IV/previous block, as appropriate */
178 				if (i == crd->crd_skip)
179 					for (k = 0; k < blks; k++)
180 						buf[i + k] ^= ivp[k];
181 				else
182 					for (k = 0; k < blks; k++)
183 						buf[i + k] ^= buf[i + k - blks];
184 			}
185 		}
186 
187 		return 0;
188 	} else if (outtype == CRYPTO_BUF_MBUF) {
189 		struct mbuf *m = (struct mbuf *) buf;
190 
191 		/* Find beginning of data */
192 		m = m_getptr(m, crd->crd_skip, &k);
193 		if (m == NULL)
194 			return EINVAL;
195 
196 		i = crd->crd_len;
197 
198 		while (i > 0) {
199 			/*
200 			 * If there's insufficient data at the end of
201 			 * an mbuf, we have to do some copying.
202 			 */
203 			if (m->m_len < k + blks && m->m_len != k) {
204 				m_copydata(m, k, blks, blk);
205 
206 				/* Actual encryption/decryption */
207 				if (exf->reinit) {
208 					if (crd->crd_flags & CRD_F_ENCRYPT) {
209 						exf->encrypt(sw->sw_kschedule,
210 							     blk);
211 					} else {
212 						exf->decrypt(sw->sw_kschedule,
213 							     blk);
214 					}
215 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
216 					/* XOR with previous block */
217 					for (j = 0; j < blks; j++)
218 						blk[j] ^= ivp[j];
219 
220 					exf->encrypt(sw->sw_kschedule, blk);
221 
222 					/*
223 					 * Keep encrypted block for XOR'ing
224 					 * with next block
225 					 */
226 					memcpy(iv, blk, blks);
227 					ivp = iv;
228 				} else {	/* decrypt */
229 					/*
230 					 * Keep encrypted block for XOR'ing
231 					 * with next block
232 					 */
233 					if (ivp == iv)
234 						memcpy(piv, blk, blks);
235 					else
236 						memcpy(iv, blk, blks);
237 
238 					exf->decrypt(sw->sw_kschedule, blk);
239 
240 					/* XOR with previous block */
241 					for (j = 0; j < blks; j++)
242 						blk[j] ^= ivp[j];
243 
244 					if (ivp == iv)
245 						memcpy(iv, piv, blks);
246 					else
247 						ivp = iv;
248 				}
249 
250 				/* Copy back decrypted block */
251 				m_copyback(m, k, blks, blk);
252 
253 				/* Advance pointer */
254 				m = m_getptr(m, k + blks, &k);
255 				if (m == NULL)
256 					return EINVAL;
257 
258 				i -= blks;
259 
260 				/* Could be done... */
261 				if (i == 0)
262 					break;
263 			}
264 
265 			/* Skip possibly empty mbufs */
266 			if (k == m->m_len) {
267 				for (m = m->m_next; m && m->m_len == 0;
268 				    m = m->m_next)
269 					;
270 				k = 0;
271 			}
272 
273 			/* Sanity check */
274 			if (m == NULL)
275 				return EINVAL;
276 
277 			/*
278 			 * Warning: idat may point to garbage here, but
279 			 * we only use it in the while() loop, only if
280 			 * there are indeed enough data.
281 			 */
282 			idat = mtod(m, unsigned char *) + k;
283 
284 			while (m->m_len >= k + blks && i > 0) {
285 				if (exf->reinit) {
286 					if (crd->crd_flags & CRD_F_ENCRYPT) {
287 						exf->encrypt(sw->sw_kschedule,
288 							     idat);
289 					} else {
290 						exf->decrypt(sw->sw_kschedule,
291 							     idat);
292 					}
293 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
294 					/* XOR with previous block/IV */
295 					for (j = 0; j < blks; j++)
296 						idat[j] ^= ivp[j];
297 
298 					exf->encrypt(sw->sw_kschedule, idat);
299 					ivp = idat;
300 				} else {	/* decrypt */
301 					/*
302 					 * Keep encrypted block to be used
303 					 * in next block's processing.
304 					 */
305 					if (ivp == iv)
306 						memcpy(piv, idat, blks);
307 					else
308 						memcpy(iv, idat, blks);
309 
310 					exf->decrypt(sw->sw_kschedule, idat);
311 
312 					/* XOR with previous block/IV */
313 					for (j = 0; j < blks; j++)
314 						idat[j] ^= ivp[j];
315 
316 					if (ivp == iv)
317 						memcpy(iv, piv, blks);
318 					else
319 						ivp = iv;
320 				}
321 
322 				idat += blks;
323 				k += blks;
324 				i -= blks;
325 			}
326 		}
327 
328 		return 0; /* Done with mbuf encryption/decryption */
329 	} else if (outtype == CRYPTO_BUF_IOV) {
330 		struct uio *uio = (struct uio *) buf;
331 
332 		/* Find beginning of data */
333 		count = crd->crd_skip;
334 		ind = cuio_getptr(uio, count, &k);
335 		if (ind == -1)
336 			return EINVAL;
337 
338 		i = crd->crd_len;
339 
340 		while (i > 0) {
341 			/*
342 			 * If there's insufficient data at the end,
343 			 * we have to do some copying.
344 			 */
345 			if (uio->uio_iov[ind].iov_len < k + blks &&
346 			    uio->uio_iov[ind].iov_len != k) {
347 				cuio_copydata(uio, k, blks, blk);
348 
349 				/* Actual encryption/decryption */
350 				if (exf->reinit) {
351 					if (crd->crd_flags & CRD_F_ENCRYPT) {
352 						exf->encrypt(sw->sw_kschedule,
353 							     blk);
354 					} else {
355 						exf->decrypt(sw->sw_kschedule,
356 							     blk);
357 					}
358 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
359 					/* XOR with previous block */
360 					for (j = 0; j < blks; j++)
361 						blk[j] ^= ivp[j];
362 
363 					exf->encrypt(sw->sw_kschedule, blk);
364 
365 					/*
366 					 * Keep encrypted block for XOR'ing
367 					 * with next block
368 					 */
369 					memcpy(iv, blk, blks);
370 					ivp = iv;
371 				} else {	/* decrypt */
372 					/*
373 					 * Keep encrypted block for XOR'ing
374 					 * with next block
375 					 */
376 					if (ivp == iv)
377 						memcpy(piv, blk, blks);
378 					else
379 						memcpy(iv, blk, blks);
380 
381 					exf->decrypt(sw->sw_kschedule, blk);
382 
383 					/* XOR with previous block */
384 					for (j = 0; j < blks; j++)
385 						blk[j] ^= ivp[j];
386 
387 					if (ivp == iv)
388 						memcpy(iv, piv, blks);
389 					else
390 						ivp = iv;
391 				}
392 
393 				/* Copy back decrypted block */
394 				cuio_copyback(uio, k, blks, blk);
395 
396 				count += blks;
397 
398 				/* Advance pointer */
399 				ind = cuio_getptr(uio, count, &k);
400 				if (ind == -1)
401 					return (EINVAL);
402 
403 				i -= blks;
404 
405 				/* Could be done... */
406 				if (i == 0)
407 					break;
408 			}
409 
410 			/*
411 			 * Warning: idat may point to garbage here, but
412 			 * we only use it in the while() loop, only if
413 			 * there are indeed enough data.
414 			 */
415 			idat = ((char *)uio->uio_iov[ind].iov_base) + k;
416 
417 			while (uio->uio_iov[ind].iov_len >= k + blks &&
418 			    i > 0) {
419 				if (exf->reinit) {
420 					if (crd->crd_flags & CRD_F_ENCRYPT) {
421 						exf->encrypt(sw->sw_kschedule,
422 							    idat);
423 					} else {
424 						exf->decrypt(sw->sw_kschedule,
425 							    idat);
426 					}
427 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
428 					/* XOR with previous block/IV */
429 					for (j = 0; j < blks; j++)
430 						idat[j] ^= ivp[j];
431 
432 					exf->encrypt(sw->sw_kschedule, idat);
433 					ivp = idat;
434 				} else {	/* decrypt */
435 					/*
436 					 * Keep encrypted block to be used
437 					 * in next block's processing.
438 					 */
439 					if (ivp == iv)
440 						memcpy(piv, idat, blks);
441 					else
442 						memcpy(iv, idat, blks);
443 
444 					exf->decrypt(sw->sw_kschedule, idat);
445 
446 					/* XOR with previous block/IV */
447 					for (j = 0; j < blks; j++)
448 						idat[j] ^= ivp[j];
449 
450 					if (ivp == iv)
451 						memcpy(iv, piv, blks);
452 					else
453 						ivp = iv;
454 				}
455 
456 				idat += blks;
457 				count += blks;
458 				k += blks;
459 				i -= blks;
460 			}
461 		}
462 		return 0; /* Done with mbuf encryption/decryption */
463 	}
464 
465 	/* Unreachable */
466 	return EINVAL;
467 }
468 
469 /*
470  * Compute keyed-hash authenticator.
471  */
472 int
473 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
474     const struct swcr_data *sw, void *buf, int outtype)
475 {
476 	unsigned char aalg[AALG_MAX_RESULT_LEN];
477 	const struct swcr_auth_hash *axf;
478 	union authctx ctx;
479 	int err;
480 
481 	if (sw->sw_ictx == 0)
482 		return EINVAL;
483 
484 	axf = sw->sw_axf;
485 
486 	memcpy(&ctx, sw->sw_ictx, axf->ctxsize);
487 
488 	switch (outtype) {
489 	case CRYPTO_BUF_CONTIG:
490 		axf->Update(&ctx, (char *)buf + crd->crd_skip, crd->crd_len);
491 		break;
492 	case CRYPTO_BUF_MBUF:
493 		err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len,
494 		    (int (*)(void*, void *, unsigned int)) axf->Update,
495 		    (void *) &ctx);
496 		if (err)
497 			return err;
498 		break;
499 	case CRYPTO_BUF_IOV:
500 		err = cuio_apply((struct uio *) buf, crd->crd_skip,
501 		    crd->crd_len,
502 		    (int (*)(void *, void *, unsigned int)) axf->Update,
503 		    (void *) &ctx);
504 		if (err) {
505 			return err;
506 		}
507 		break;
508 	default:
509 		return EINVAL;
510 	}
511 
512 	switch (sw->sw_alg) {
513 	case CRYPTO_MD5_HMAC:
514 	case CRYPTO_MD5_HMAC_96:
515 	case CRYPTO_SHA1_HMAC:
516 	case CRYPTO_SHA1_HMAC_96:
517 	case CRYPTO_SHA2_256_HMAC:
518 	case CRYPTO_SHA2_384_HMAC:
519 	case CRYPTO_SHA2_512_HMAC:
520 	case CRYPTO_RIPEMD160_HMAC:
521 	case CRYPTO_RIPEMD160_HMAC_96:
522 		if (sw->sw_octx == NULL)
523 			return EINVAL;
524 
525 		axf->Final(aalg, &ctx);
526 		memcpy(&ctx, sw->sw_octx, axf->ctxsize);
527 		axf->Update(&ctx, aalg, axf->auth_hash->hashsize);
528 		axf->Final(aalg, &ctx);
529 		break;
530 
531 	case CRYPTO_MD5_KPDK:
532 	case CRYPTO_SHA1_KPDK:
533 		if (sw->sw_octx == NULL)
534 			return EINVAL;
535 
536 		axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
537 		axf->Final(aalg, &ctx);
538 		break;
539 
540 	case CRYPTO_NULL_HMAC:
541 	case CRYPTO_MD5:
542 	case CRYPTO_SHA1:
543 	case CRYPTO_AES_XCBC_MAC_96:
544 		axf->Final(aalg, &ctx);
545 		break;
546 	}
547 
548 	/* Inject the authentication data */
549 	switch (outtype) {
550 	case CRYPTO_BUF_CONTIG:
551 		(void)memcpy((char *)buf + crd->crd_inject, aalg,
552 		    axf->auth_hash->authsize);
553 		break;
554 	case CRYPTO_BUF_MBUF:
555 		m_copyback((struct mbuf *) buf, crd->crd_inject,
556 		    axf->auth_hash->authsize, aalg);
557 		break;
558 	case CRYPTO_BUF_IOV:
559 		memcpy(crp->crp_mac, aalg, axf->auth_hash->authsize);
560 		break;
561 	default:
562 		return EINVAL;
563 	}
564 	return 0;
565 }
566 
567 /*
568  * Apply a combined encryption-authentication transformation
569  */
570 static int
571 swcr_combined(struct cryptop *crp, int outtype)
572 {
573 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
574 	u_char *blk = (u_char *)blkbuf;
575 	u_char aalg[AALG_MAX_RESULT_LEN];
576 	u_char iv[EALG_MAX_BLOCK_LEN];
577 	union authctx ctx;
578 	struct cryptodesc *crd, *crda = NULL, *crde = NULL;
579 	struct swcr_data *sw, *swa, *swe = NULL;
580 	const struct swcr_auth_hash *axf = NULL;
581 	const struct swcr_enc_xform *exf = NULL;
582 	void *buf = (void *)crp->crp_buf;
583 	uint32_t *blkp;
584 	int i, blksz = 0, ivlen = 0, len;
585 
586 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
587 		for (sw = swcr_sessions[crp->crp_sid & 0xffffffff];
588 		     sw && sw->sw_alg != crd->crd_alg;
589 		     sw = sw->sw_next)
590 			;
591 		if (sw == NULL)
592 			return (EINVAL);
593 
594 		switch (sw->sw_alg) {
595 		case CRYPTO_AES_GCM_16:
596 		case CRYPTO_AES_GMAC:
597 			swe = sw;
598 			crde = crd;
599 			exf = swe->sw_exf;
600 			ivlen = exf->enc_xform->ivsize;
601 			break;
602 		case CRYPTO_AES_128_GMAC:
603 		case CRYPTO_AES_192_GMAC:
604 		case CRYPTO_AES_256_GMAC:
605 			swa = sw;
606 			crda = crd;
607 			axf = swa->sw_axf;
608 			if (swa->sw_ictx == 0)
609 				return (EINVAL);
610 			memcpy(&ctx, swa->sw_ictx, axf->ctxsize);
611 			blksz = axf->auth_hash->blocksize;
612 			break;
613 		default:
614 			return (EINVAL);
615 		}
616 	}
617 	if (crde == NULL || crda == NULL)
618 		return (EINVAL);
619 	if (outtype == CRYPTO_BUF_CONTIG)
620 		return (EINVAL);
621 
622 	/* Initialize the IV */
623 	if (crde->crd_flags & CRD_F_ENCRYPT) {
624 		/* IV explicitly provided ? */
625 		if (crde->crd_flags & CRD_F_IV_EXPLICIT) {
626 			memcpy(iv, crde->crd_iv, ivlen);
627 			if (exf->reinit)
628 				exf->reinit(swe->sw_kschedule, iv, 0);
629 		} else if (exf->reinit)
630 			exf->reinit(swe->sw_kschedule, 0, iv);
631 		else
632 			cprng_fast(iv, ivlen);
633 
634 		/* Do we need to write the IV */
635 		if (!(crde->crd_flags & CRD_F_IV_PRESENT))
636 			COPYBACK(outtype, buf, crde->crd_inject, ivlen, iv);
637 
638 	} else {	/* Decryption */
639 			/* IV explicitly provided ? */
640 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
641 			memcpy(iv, crde->crd_iv, ivlen);
642 		else {
643 			/* Get IV off buf */
644 			COPYDATA(outtype, buf, crde->crd_inject, ivlen, iv);
645 		}
646 		if (exf->reinit)
647 			exf->reinit(swe->sw_kschedule, iv, 0);
648 	}
649 
650 	/* Supply MAC with IV */
651 	if (axf->Reinit)
652 		axf->Reinit(&ctx, iv, ivlen);
653 
654 	/* Supply MAC with AAD */
655 	for (i = 0; i < crda->crd_len; i += blksz) {
656 		len = MIN(crda->crd_len - i, blksz);
657 		COPYDATA(outtype, buf, crda->crd_skip + i, len, blk);
658 		axf->Update(&ctx, blk, len);
659 	}
660 
661 	/* Do encryption/decryption with MAC */
662 	for (i = 0; i < crde->crd_len; i += blksz) {
663 		len = MIN(crde->crd_len - i, blksz);
664 		if (len < blksz)
665 			memset(blk, 0, blksz);
666 		COPYDATA(outtype, buf, crde->crd_skip + i, len, blk);
667 		if (crde->crd_flags & CRD_F_ENCRYPT) {
668 			exf->encrypt(swe->sw_kschedule, blk);
669 			axf->Update(&ctx, blk, len);
670 		} else {
671 			axf->Update(&ctx, blk, len);
672 			exf->decrypt(swe->sw_kschedule, blk);
673 		}
674 		COPYBACK(outtype, buf, crde->crd_skip + i, len, blk);
675 	}
676 
677 	/* Do any required special finalization */
678 	switch (crda->crd_alg) {
679 		case CRYPTO_AES_128_GMAC:
680 		case CRYPTO_AES_192_GMAC:
681 		case CRYPTO_AES_256_GMAC:
682 			/* length block */
683 			memset(blk, 0, blksz);
684 			blkp = (uint32_t *)blk + 1;
685 			*blkp = htobe32(crda->crd_len * 8);
686 			blkp = (uint32_t *)blk + 3;
687 			*blkp = htobe32(crde->crd_len * 8);
688 			axf->Update(&ctx, blk, blksz);
689 			break;
690 	}
691 
692 	/* Finalize MAC */
693 	axf->Final(aalg, &ctx);
694 
695 	/* Inject the authentication data */
696 	if (outtype == CRYPTO_BUF_MBUF)
697 		COPYBACK(outtype, buf, crda->crd_inject, axf->auth_hash->authsize, aalg);
698 	else
699 		memcpy(crp->crp_mac, aalg, axf->auth_hash->authsize);
700 
701 	return (0);
702 }
703 
704 /*
705  * Apply a compression/decompression algorithm
706  */
707 static int
708 swcr_compdec(struct cryptodesc *crd, const struct swcr_data *sw,
709     void *buf, int outtype, int *res_size)
710 {
711 	u_int8_t *data, *out;
712 	const struct swcr_comp_algo *cxf;
713 	int adj;
714 	u_int32_t result;
715 
716 	cxf = sw->sw_cxf;
717 
718 	/* We must handle the whole buffer of data in one time
719 	 * then if there is not all the data in the mbuf, we must
720 	 * copy in a buffer.
721 	 */
722 
723 	data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
724 	if (data == NULL)
725 		return (EINVAL);
726 	COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data);
727 
728 	if (crd->crd_flags & CRD_F_COMP)
729 		result = cxf->compress(data, crd->crd_len, &out);
730 	else
731 		result = cxf->decompress(data, crd->crd_len, &out,
732 					 *res_size);
733 
734 	free(data, M_CRYPTO_DATA);
735 	if (result == 0)
736 		return EINVAL;
737 
738 	/* Copy back the (de)compressed data. m_copyback is
739 	 * extending the mbuf as necessary.
740 	 */
741 	*res_size = (int)result;
742 	/* Check the compressed size when doing compression */
743 	if (crd->crd_flags & CRD_F_COMP &&
744 	    sw->sw_alg == CRYPTO_DEFLATE_COMP_NOGROW &&
745 	    result >= crd->crd_len) {
746 			/* Compression was useless, we lost time */
747 			free(out, M_CRYPTO_DATA);
748 			return 0;
749 	}
750 
751 	COPYBACK(outtype, buf, crd->crd_skip, result, out);
752 	if (result < crd->crd_len) {
753 		adj = result - crd->crd_len;
754 		if (outtype == CRYPTO_BUF_MBUF) {
755 			adj = result - crd->crd_len;
756 			m_adj((struct mbuf *)buf, adj);
757 		}
758 		/* Don't adjust the iov_len, it breaks the kmem_free */
759 	}
760 	free(out, M_CRYPTO_DATA);
761 	return 0;
762 }
763 
764 /*
765  * Generate a new software session.
766  */
767 static int
768 swcr_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri)
769 {
770 	struct swcr_data **swd;
771 	const struct swcr_auth_hash *axf;
772 	const struct swcr_enc_xform *txf;
773 	const struct swcr_comp_algo *cxf;
774 	u_int32_t i;
775 	int k, error;
776 
777 	if (sid == NULL || cri == NULL)
778 		return EINVAL;
779 
780 	if (swcr_sessions) {
781 		for (i = 1; i < swcr_sesnum; i++)
782 			if (swcr_sessions[i] == NULL)
783 				break;
784 	} else
785 		i = 1;		/* NB: to silence compiler warning */
786 
787 	if (swcr_sessions == NULL || i == swcr_sesnum) {
788 		if (swcr_sessions == NULL) {
789 			i = 1; /* We leave swcr_sessions[0] empty */
790 			swcr_sesnum = CRYPTO_SW_SESSIONS;
791 		} else
792 			swcr_sesnum *= 2;
793 
794 		swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
795 		    M_CRYPTO_DATA, M_NOWAIT);
796 		if (swd == NULL) {
797 			/* Reset session number */
798 			if (swcr_sesnum == CRYPTO_SW_SESSIONS)
799 				swcr_sesnum = 0;
800 			else
801 				swcr_sesnum /= 2;
802 			return ENOBUFS;
803 		}
804 
805 		memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
806 
807 		/* Copy existing sessions */
808 		if (swcr_sessions) {
809 			memcpy(swd, swcr_sessions,
810 			    (swcr_sesnum / 2) * sizeof(struct swcr_data *));
811 			free(swcr_sessions, M_CRYPTO_DATA);
812 		}
813 
814 		swcr_sessions = swd;
815 	}
816 
817 	swd = &swcr_sessions[i];
818 	*sid = i;
819 
820 	while (cri) {
821 		*swd = malloc(sizeof **swd, M_CRYPTO_DATA, M_NOWAIT);
822 		if (*swd == NULL) {
823 			swcr_freesession(NULL, i);
824 			return ENOBUFS;
825 		}
826 		memset(*swd, 0, sizeof(struct swcr_data));
827 
828 		switch (cri->cri_alg) {
829 		case CRYPTO_DES_CBC:
830 			txf = &swcr_enc_xform_des;
831 			goto enccommon;
832 		case CRYPTO_3DES_CBC:
833 			txf = &swcr_enc_xform_3des;
834 			goto enccommon;
835 		case CRYPTO_BLF_CBC:
836 			txf = &swcr_enc_xform_blf;
837 			goto enccommon;
838 		case CRYPTO_CAST_CBC:
839 			txf = &swcr_enc_xform_cast5;
840 			goto enccommon;
841 		case CRYPTO_SKIPJACK_CBC:
842 			txf = &swcr_enc_xform_skipjack;
843 			goto enccommon;
844 		case CRYPTO_RIJNDAEL128_CBC:
845 			txf = &swcr_enc_xform_rijndael128;
846 			goto enccommon;
847 		case CRYPTO_CAMELLIA_CBC:
848 			txf = &swcr_enc_xform_camellia;
849 			goto enccommon;
850 		case CRYPTO_AES_CTR:
851 			txf = &swcr_enc_xform_aes_ctr;
852 			goto enccommon;
853 		case CRYPTO_AES_GCM_16:
854 			txf = &swcr_enc_xform_aes_gcm;
855 			goto enccommon;
856 		case CRYPTO_AES_GMAC:
857 			txf = &swcr_enc_xform_aes_gmac;
858 			goto enccommon;
859 		case CRYPTO_NULL_CBC:
860 			txf = &swcr_enc_xform_null;
861 			goto enccommon;
862 		enccommon:
863 			error = txf->setkey(&((*swd)->sw_kschedule),
864 					cri->cri_key, cri->cri_klen / 8);
865 			if (error) {
866 				swcr_freesession(NULL, i);
867 				return error;
868 			}
869 			(*swd)->sw_exf = txf;
870 			break;
871 
872 		case CRYPTO_MD5_HMAC:
873 			axf = &swcr_auth_hash_hmac_md5;
874 			goto authcommon;
875 		case CRYPTO_MD5_HMAC_96:
876 			axf = &swcr_auth_hash_hmac_md5_96;
877 			goto authcommon;
878 		case CRYPTO_SHA1_HMAC:
879 			axf = &swcr_auth_hash_hmac_sha1;
880 			goto authcommon;
881 		case CRYPTO_SHA1_HMAC_96:
882 			axf = &swcr_auth_hash_hmac_sha1_96;
883 			goto authcommon;
884 		case CRYPTO_SHA2_256_HMAC:
885 			axf = &swcr_auth_hash_hmac_sha2_256;
886 			goto authcommon;
887 		case CRYPTO_SHA2_384_HMAC:
888 			axf = &swcr_auth_hash_hmac_sha2_384;
889 			goto authcommon;
890 		case CRYPTO_SHA2_512_HMAC:
891 			axf = &swcr_auth_hash_hmac_sha2_512;
892 			goto authcommon;
893 		case CRYPTO_NULL_HMAC:
894 			axf = &swcr_auth_hash_null;
895 			goto authcommon;
896 		case CRYPTO_RIPEMD160_HMAC:
897 			axf = &swcr_auth_hash_hmac_ripemd_160;
898 			goto authcommon;
899 		case CRYPTO_RIPEMD160_HMAC_96:
900 			axf = &swcr_auth_hash_hmac_ripemd_160_96;
901 			goto authcommon;	/* leave this for safety */
902 		authcommon:
903 			(*swd)->sw_ictx = malloc(axf->ctxsize,
904 			    M_CRYPTO_DATA, M_NOWAIT);
905 			if ((*swd)->sw_ictx == NULL) {
906 				swcr_freesession(NULL, i);
907 				return ENOBUFS;
908 			}
909 
910 			(*swd)->sw_octx = malloc(axf->ctxsize,
911 			    M_CRYPTO_DATA, M_NOWAIT);
912 			if ((*swd)->sw_octx == NULL) {
913 				swcr_freesession(NULL, i);
914 				return ENOBUFS;
915 			}
916 
917 			for (k = 0; k < cri->cri_klen / 8; k++)
918 				cri->cri_key[k] ^= HMAC_IPAD_VAL;
919 
920 			axf->Init((*swd)->sw_ictx);
921 			axf->Update((*swd)->sw_ictx, cri->cri_key,
922 			    cri->cri_klen / 8);
923 			axf->Update((*swd)->sw_ictx, hmac_ipad_buffer,
924 			    axf->auth_hash->blocksize - (cri->cri_klen / 8));
925 
926 			for (k = 0; k < cri->cri_klen / 8; k++)
927 				cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
928 
929 			axf->Init((*swd)->sw_octx);
930 			axf->Update((*swd)->sw_octx, cri->cri_key,
931 			    cri->cri_klen / 8);
932 			axf->Update((*swd)->sw_octx, hmac_opad_buffer,
933 			    axf->auth_hash->blocksize - (cri->cri_klen / 8));
934 
935 			for (k = 0; k < cri->cri_klen / 8; k++)
936 				cri->cri_key[k] ^= HMAC_OPAD_VAL;
937 			(*swd)->sw_axf = axf;
938 			break;
939 
940 		case CRYPTO_MD5_KPDK:
941 			axf = &swcr_auth_hash_key_md5;
942 			goto auth2common;
943 
944 		case CRYPTO_SHA1_KPDK:
945 			axf = &swcr_auth_hash_key_sha1;
946 		auth2common:
947 			(*swd)->sw_ictx = malloc(axf->ctxsize,
948 			    M_CRYPTO_DATA, M_NOWAIT);
949 			if ((*swd)->sw_ictx == NULL) {
950 				swcr_freesession(NULL, i);
951 				return ENOBUFS;
952 			}
953 
954 			/* Store the key so we can "append" it to the payload */
955 			(*swd)->sw_octx = malloc(cri->cri_klen / 8, M_CRYPTO_DATA,
956 			    M_NOWAIT);
957 			if ((*swd)->sw_octx == NULL) {
958 				swcr_freesession(NULL, i);
959 				return ENOBUFS;
960 			}
961 
962 			(*swd)->sw_klen = cri->cri_klen / 8;
963 			memcpy((*swd)->sw_octx, cri->cri_key, cri->cri_klen / 8);
964 			axf->Init((*swd)->sw_ictx);
965 			axf->Update((*swd)->sw_ictx, cri->cri_key,
966 			    cri->cri_klen / 8);
967 			axf->Final(NULL, (*swd)->sw_ictx);
968 			(*swd)->sw_axf = axf;
969 			break;
970 
971 		case CRYPTO_MD5:
972 			axf = &swcr_auth_hash_md5;
973 			goto auth3common;
974 
975 		case CRYPTO_SHA1:
976 			axf = &swcr_auth_hash_sha1;
977 		auth3common:
978 			(*swd)->sw_ictx = malloc(axf->ctxsize,
979 			    M_CRYPTO_DATA, M_NOWAIT);
980 			if ((*swd)->sw_ictx == NULL) {
981 				swcr_freesession(NULL, i);
982 				return ENOBUFS;
983 			}
984 
985 			axf->Init((*swd)->sw_ictx);
986 			(*swd)->sw_axf = axf;
987 			break;
988 
989 		case CRYPTO_AES_XCBC_MAC_96:
990 			axf = &swcr_auth_hash_aes_xcbc_mac;
991 			goto auth4common;
992 		case CRYPTO_AES_128_GMAC:
993 			axf = &swcr_auth_hash_gmac_aes_128;
994 			goto auth4common;
995 		case CRYPTO_AES_192_GMAC:
996 			axf = &swcr_auth_hash_gmac_aes_192;
997 			goto auth4common;
998 		case CRYPTO_AES_256_GMAC:
999 			axf = &swcr_auth_hash_gmac_aes_256;
1000 		auth4common:
1001 			(*swd)->sw_ictx = malloc(axf->ctxsize,
1002 			    M_CRYPTO_DATA, M_NOWAIT);
1003 			if ((*swd)->sw_ictx == NULL) {
1004 				swcr_freesession(NULL, i);
1005 				return ENOBUFS;
1006 			}
1007 			axf->Init((*swd)->sw_ictx);
1008 			axf->Setkey((*swd)->sw_ictx,
1009 				cri->cri_key, cri->cri_klen / 8);
1010 			(*swd)->sw_axf = axf;
1011 			break;
1012 
1013 		case CRYPTO_DEFLATE_COMP:
1014 			cxf = &swcr_comp_algo_deflate;
1015 			(*swd)->sw_cxf = cxf;
1016 			break;
1017 
1018 		case CRYPTO_DEFLATE_COMP_NOGROW:
1019 			cxf = &swcr_comp_algo_deflate_nogrow;
1020 			(*swd)->sw_cxf = cxf;
1021 			break;
1022 
1023 		case CRYPTO_GZIP_COMP:
1024 			cxf = &swcr_comp_algo_gzip;
1025 			(*swd)->sw_cxf = cxf;
1026 			break;
1027 		default:
1028 			swcr_freesession(NULL, i);
1029 			return EINVAL;
1030 		}
1031 
1032 		(*swd)->sw_alg = cri->cri_alg;
1033 		cri = cri->cri_next;
1034 		swd = &((*swd)->sw_next);
1035 	}
1036 	return 0;
1037 }
1038 
1039 /*
1040  * Free a session.
1041  */
1042 static int
1043 swcr_freesession(void *arg, u_int64_t tid)
1044 {
1045 	struct swcr_data *swd;
1046 	const struct swcr_enc_xform *txf;
1047 	const struct swcr_auth_hash *axf;
1048 	u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
1049 
1050 	if (sid > swcr_sesnum || swcr_sessions == NULL ||
1051 	    swcr_sessions[sid] == NULL)
1052 		return EINVAL;
1053 
1054 	/* Silently accept and return */
1055 	if (sid == 0)
1056 		return 0;
1057 
1058 	while ((swd = swcr_sessions[sid]) != NULL) {
1059 		swcr_sessions[sid] = swd->sw_next;
1060 
1061 		switch (swd->sw_alg) {
1062 		case CRYPTO_DES_CBC:
1063 		case CRYPTO_3DES_CBC:
1064 		case CRYPTO_BLF_CBC:
1065 		case CRYPTO_CAST_CBC:
1066 		case CRYPTO_SKIPJACK_CBC:
1067 		case CRYPTO_RIJNDAEL128_CBC:
1068 		case CRYPTO_CAMELLIA_CBC:
1069 		case CRYPTO_AES_CTR:
1070 		case CRYPTO_AES_GCM_16:
1071 		case CRYPTO_AES_GMAC:
1072 		case CRYPTO_NULL_CBC:
1073 			txf = swd->sw_exf;
1074 
1075 			if (swd->sw_kschedule)
1076 				txf->zerokey(&(swd->sw_kschedule));
1077 			break;
1078 
1079 		case CRYPTO_MD5_HMAC:
1080 		case CRYPTO_MD5_HMAC_96:
1081 		case CRYPTO_SHA1_HMAC:
1082 		case CRYPTO_SHA1_HMAC_96:
1083 		case CRYPTO_SHA2_256_HMAC:
1084 		case CRYPTO_SHA2_384_HMAC:
1085 		case CRYPTO_SHA2_512_HMAC:
1086 		case CRYPTO_RIPEMD160_HMAC:
1087 		case CRYPTO_RIPEMD160_HMAC_96:
1088 		case CRYPTO_NULL_HMAC:
1089 			axf = swd->sw_axf;
1090 
1091 			if (swd->sw_ictx) {
1092 				explicit_memset(swd->sw_ictx, 0, axf->ctxsize);
1093 				free(swd->sw_ictx, M_CRYPTO_DATA);
1094 			}
1095 			if (swd->sw_octx) {
1096 				explicit_memset(swd->sw_octx, 0, axf->ctxsize);
1097 				free(swd->sw_octx, M_CRYPTO_DATA);
1098 			}
1099 			break;
1100 
1101 		case CRYPTO_MD5_KPDK:
1102 		case CRYPTO_SHA1_KPDK:
1103 			axf = swd->sw_axf;
1104 
1105 			if (swd->sw_ictx) {
1106 				explicit_memset(swd->sw_ictx, 0, axf->ctxsize);
1107 				free(swd->sw_ictx, M_CRYPTO_DATA);
1108 			}
1109 			if (swd->sw_octx) {
1110 				explicit_memset(swd->sw_octx, 0, swd->sw_klen);
1111 				free(swd->sw_octx, M_CRYPTO_DATA);
1112 			}
1113 			break;
1114 
1115 		case CRYPTO_MD5:
1116 		case CRYPTO_SHA1:
1117 		case CRYPTO_AES_XCBC_MAC_96:
1118 		case CRYPTO_AES_128_GMAC:
1119 		case CRYPTO_AES_192_GMAC:
1120 		case CRYPTO_AES_256_GMAC:
1121 			axf = swd->sw_axf;
1122 
1123 			if (swd->sw_ictx) {
1124 				explicit_memset(swd->sw_ictx, 0, axf->ctxsize);
1125 				free(swd->sw_ictx, M_CRYPTO_DATA);
1126 			}
1127 			break;
1128 
1129 		case CRYPTO_DEFLATE_COMP:
1130 		case CRYPTO_DEFLATE_COMP_NOGROW:
1131 		case CRYPTO_GZIP_COMP:
1132 			break;
1133 		}
1134 
1135 		free(swd, M_CRYPTO_DATA);
1136 	}
1137 	return 0;
1138 }
1139 
1140 /*
1141  * Process a software request.
1142  */
1143 static int
1144 swcr_process(void *arg, struct cryptop *crp, int hint)
1145 {
1146 	struct cryptodesc *crd;
1147 	struct swcr_data *sw;
1148 	u_int32_t lid;
1149 	int type;
1150 
1151 	/* Sanity check */
1152 	if (crp == NULL)
1153 		return EINVAL;
1154 
1155 	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1156 		crp->crp_etype = EINVAL;
1157 		goto done;
1158 	}
1159 
1160 	lid = crp->crp_sid & 0xffffffff;
1161 	if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
1162 		crp->crp_etype = ENOENT;
1163 		goto done;
1164 	}
1165 
1166 	if (crp->crp_flags & CRYPTO_F_IMBUF) {
1167 		type = CRYPTO_BUF_MBUF;
1168 	} else if (crp->crp_flags & CRYPTO_F_IOV) {
1169 		type = CRYPTO_BUF_IOV;
1170 	} else {
1171 		type = CRYPTO_BUF_CONTIG;
1172 	}
1173 
1174 	/* Go through crypto descriptors, processing as we go */
1175 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1176 		/*
1177 		 * Find the crypto context.
1178 		 *
1179 		 * XXX Note that the logic here prevents us from having
1180 		 * XXX the same algorithm multiple times in a session
1181 		 * XXX (or rather, we can but it won't give us the right
1182 		 * XXX results). To do that, we'd need some way of differentiating
1183 		 * XXX between the various instances of an algorithm (so we can
1184 		 * XXX locate the correct crypto context).
1185 		 */
1186 		for (sw = swcr_sessions[lid];
1187 		    sw && sw->sw_alg != crd->crd_alg;
1188 		    sw = sw->sw_next)
1189 			;
1190 
1191 		/* No such context ? */
1192 		if (sw == NULL) {
1193 			crp->crp_etype = EINVAL;
1194 			goto done;
1195 		}
1196 
1197 		switch (sw->sw_alg) {
1198 		case CRYPTO_DES_CBC:
1199 		case CRYPTO_3DES_CBC:
1200 		case CRYPTO_BLF_CBC:
1201 		case CRYPTO_CAST_CBC:
1202 		case CRYPTO_SKIPJACK_CBC:
1203 		case CRYPTO_RIJNDAEL128_CBC:
1204 		case CRYPTO_CAMELLIA_CBC:
1205 		case CRYPTO_AES_CTR:
1206 			if ((crp->crp_etype = swcr_encdec(crd, sw,
1207 			    crp->crp_buf, type)) != 0)
1208 				goto done;
1209 			break;
1210 		case CRYPTO_NULL_CBC:
1211 			crp->crp_etype = 0;
1212 			break;
1213 		case CRYPTO_MD5_HMAC:
1214 		case CRYPTO_MD5_HMAC_96:
1215 		case CRYPTO_SHA1_HMAC:
1216 		case CRYPTO_SHA1_HMAC_96:
1217 		case CRYPTO_SHA2_256_HMAC:
1218 		case CRYPTO_SHA2_384_HMAC:
1219 		case CRYPTO_SHA2_512_HMAC:
1220 		case CRYPTO_RIPEMD160_HMAC:
1221 		case CRYPTO_RIPEMD160_HMAC_96:
1222 		case CRYPTO_NULL_HMAC:
1223 		case CRYPTO_MD5_KPDK:
1224 		case CRYPTO_SHA1_KPDK:
1225 		case CRYPTO_MD5:
1226 		case CRYPTO_SHA1:
1227 		case CRYPTO_AES_XCBC_MAC_96:
1228 			if ((crp->crp_etype = swcr_authcompute(crp, crd, sw,
1229 			    crp->crp_buf, type)) != 0)
1230 				goto done;
1231 			break;
1232 
1233 		case CRYPTO_AES_GCM_16:
1234 		case CRYPTO_AES_GMAC:
1235 		case CRYPTO_AES_128_GMAC:
1236 		case CRYPTO_AES_192_GMAC:
1237 		case CRYPTO_AES_256_GMAC:
1238 			crp->crp_etype = swcr_combined(crp, type);
1239 			goto done;
1240 
1241 		case CRYPTO_DEFLATE_COMP:
1242 		case CRYPTO_DEFLATE_COMP_NOGROW:
1243 		case CRYPTO_GZIP_COMP:
1244 			DPRINTF(("swcr_process: compdec for %d\n", sw->sw_alg));
1245 			if ((crp->crp_etype = swcr_compdec(crd, sw,
1246 			    crp->crp_buf, type, &crp->crp_olen)) != 0)
1247 				goto done;
1248 			break;
1249 
1250 		default:
1251 			/* Unknown/unsupported algorithm */
1252 			crp->crp_etype = EINVAL;
1253 			goto done;
1254 		}
1255 	}
1256 
1257 done:
1258 	DPRINTF(("request %p done\n", crp));
1259 	crypto_done(crp);
1260 	return 0;
1261 }
1262 
1263 static void
1264 swcr_init(void)
1265 {
1266 	swcr_id = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE);
1267 	if (swcr_id < 0) {
1268 		/* This should never happen */
1269 		panic("Software crypto device cannot initialize!");
1270 	}
1271 
1272 	crypto_register(swcr_id, CRYPTO_DES_CBC,
1273 	    0, 0, swcr_newsession, swcr_freesession, swcr_process, NULL);
1274 #define	REGISTER(alg) \
1275 	crypto_register(swcr_id, alg, 0, 0, NULL, NULL, NULL, NULL)
1276 
1277 	REGISTER(CRYPTO_3DES_CBC);
1278 	REGISTER(CRYPTO_BLF_CBC);
1279 	REGISTER(CRYPTO_CAST_CBC);
1280 	REGISTER(CRYPTO_SKIPJACK_CBC);
1281 	REGISTER(CRYPTO_CAMELLIA_CBC);
1282 	REGISTER(CRYPTO_AES_CTR);
1283 	REGISTER(CRYPTO_AES_GCM_16);
1284 	REGISTER(CRYPTO_AES_GMAC);
1285 	REGISTER(CRYPTO_NULL_CBC);
1286 	REGISTER(CRYPTO_MD5_HMAC);
1287 	REGISTER(CRYPTO_MD5_HMAC_96);
1288 	REGISTER(CRYPTO_SHA1_HMAC);
1289 	REGISTER(CRYPTO_SHA1_HMAC_96);
1290 	REGISTER(CRYPTO_SHA2_256_HMAC);
1291 	REGISTER(CRYPTO_SHA2_384_HMAC);
1292 	REGISTER(CRYPTO_SHA2_512_HMAC);
1293 	REGISTER(CRYPTO_RIPEMD160_HMAC);
1294 	REGISTER(CRYPTO_RIPEMD160_HMAC_96);
1295 	REGISTER(CRYPTO_NULL_HMAC);
1296 	REGISTER(CRYPTO_MD5_KPDK);
1297 	REGISTER(CRYPTO_SHA1_KPDK);
1298 	REGISTER(CRYPTO_MD5);
1299 	REGISTER(CRYPTO_SHA1);
1300 	REGISTER(CRYPTO_AES_XCBC_MAC_96);
1301 	REGISTER(CRYPTO_AES_128_GMAC);
1302 	REGISTER(CRYPTO_AES_192_GMAC);
1303 	REGISTER(CRYPTO_AES_256_GMAC);
1304 	REGISTER(CRYPTO_RIJNDAEL128_CBC);
1305 	REGISTER(CRYPTO_DEFLATE_COMP);
1306 	REGISTER(CRYPTO_DEFLATE_COMP_NOGROW);
1307 	REGISTER(CRYPTO_GZIP_COMP);
1308 #undef REGISTER
1309 }
1310 
1311 
1312 /*
1313  * Pseudo-device init routine for software crypto.
1314  */
1315 void	swcryptoattach(int);
1316 
1317 void
1318 swcryptoattach(int num)
1319 {
1320 
1321 	swcr_init();
1322 }
1323