xref: /openbsd-src/sys/crypto/cryptosoft.c (revision ac9b4aacc1da35008afea06a5d23c2f2dea9b93e)
1 /*	$OpenBSD: cryptosoft.c,v 1.64 2012/06/29 14:48:04 mikeb Exp $	*/
2 
3 /*
4  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5  *
6  * This code was written by Angelos D. Keromytis in Athens, Greece, in
7  * February 2000. Network Security Technologies Inc. (NSTI) kindly
8  * supported the development of this code.
9  *
10  * Copyright (c) 2000, 2001 Angelos D. Keromytis
11  *
12  * Permission to use, copy, and modify this software with or without fee
13  * is hereby granted, provided that this entire notice is included in
14  * all source code copies of any software which is or includes a copy or
15  * modification of this software.
16  *
17  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
18  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
19  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
20  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
21  * PURPOSE.
22  */
23 
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/malloc.h>
27 #include <sys/mbuf.h>
28 #include <sys/errno.h>
29 #include <dev/rndvar.h>
30 #include <crypto/md5.h>
31 #include <crypto/sha1.h>
32 #include <crypto/rmd160.h>
33 #include <crypto/cast.h>
34 #include <crypto/blf.h>
35 #include <crypto/cryptodev.h>
36 #include <crypto/cryptosoft.h>
37 #include <crypto/xform.h>
38 
39 const u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN] = {
40 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
41 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
42 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
43 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
44 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
45 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
46 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
47 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
48 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
49 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
50 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
51 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
52 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
53 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
54 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
55 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36
56 };
57 
58 const u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN] = {
59 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
60 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
61 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
62 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
63 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
64 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
65 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
66 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
67 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
68 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
69 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
70 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
71 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
72 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
73 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
74 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C
75 };
76 
77 
78 struct swcr_data **swcr_sessions = NULL;
79 u_int32_t swcr_sesnum = 0;
80 int32_t swcr_id = -1;
81 
82 #define COPYBACK(x, a, b, c, d) \
83 	do { \
84 		if ((x) == CRYPTO_BUF_MBUF) \
85 			m_copyback((struct mbuf *)a,b,c,d,M_NOWAIT); \
86 		else \
87 			cuio_copyback((struct uio *)a,b,c,d); \
88 	} while (0)
89 #define COPYDATA(x, a, b, c, d) \
90 	do { \
91 		if ((x) == CRYPTO_BUF_MBUF) \
92 			m_copydata((struct mbuf *)a,b,c,d); \
93 		else \
94 			cuio_copydata((struct uio *)a,b,c,d); \
95 	} while (0)
96 
97 /*
98  * Apply a symmetric encryption/decryption algorithm.
99  */
100 int
101 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
102     int outtype)
103 {
104 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
105 	unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
106 	struct enc_xform *exf;
107 	int i, k, j, blks, ind, count, ivlen;
108 	struct mbuf *m = NULL;
109 	struct uio *uio = NULL;
110 
111 	exf = sw->sw_exf;
112 	blks = exf->blocksize;
113 	ivlen = exf->ivsize;
114 
115 	/* Check for non-padded data */
116 	if (crd->crd_len % blks)
117 		return EINVAL;
118 
119 	if (outtype == CRYPTO_BUF_MBUF)
120 		m = (struct mbuf *) buf;
121 	else
122 		uio = (struct uio *) buf;
123 
124 	/* Initialize the IV */
125 	if (crd->crd_flags & CRD_F_ENCRYPT) {
126 		/* IV explicitly provided ? */
127 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
128 			bcopy(crd->crd_iv, iv, ivlen);
129 		else
130 			arc4random_buf(iv, ivlen);
131 
132 		/* Do we need to write the IV */
133 		if (!(crd->crd_flags & CRD_F_IV_PRESENT))
134 			COPYBACK(outtype, buf, crd->crd_inject, ivlen, iv);
135 
136 	} else {	/* Decryption */
137 			/* IV explicitly provided ? */
138 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
139 			bcopy(crd->crd_iv, iv, ivlen);
140 		else {
141 			/* Get IV off buf */
142 			COPYDATA(outtype, buf, crd->crd_inject, ivlen, iv);
143 		}
144 	}
145 
146 	ivp = iv;
147 
148 	/*
149 	 * xforms that provide a reinit method perform all IV
150 	 * handling themselves.
151 	 */
152 	if (exf->reinit)
153 		exf->reinit(sw->sw_kschedule, iv);
154 
155 	if (outtype == CRYPTO_BUF_MBUF) {
156 		/* Find beginning of data */
157 		m = m_getptr(m, crd->crd_skip, &k);
158 		if (m == NULL)
159 			return EINVAL;
160 
161 		i = crd->crd_len;
162 
163 		while (i > 0) {
164 			/*
165 			 * If there's insufficient data at the end of
166 			 * an mbuf, we have to do some copying.
167 			 */
168 			if (m->m_len < k + blks && m->m_len != k) {
169 				m_copydata(m, k, blks, blk);
170 
171 				/* Actual encryption/decryption */
172 				if (exf->reinit) {
173 					if (crd->crd_flags & CRD_F_ENCRYPT) {
174 						exf->encrypt(sw->sw_kschedule,
175 						    blk);
176 					} else {
177 						exf->decrypt(sw->sw_kschedule,
178 						    blk);
179 					}
180 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
181 					/* XOR with previous block */
182 					for (j = 0; j < blks; j++)
183 						blk[j] ^= ivp[j];
184 
185 					exf->encrypt(sw->sw_kschedule, blk);
186 
187 					/*
188 					 * Keep encrypted block for XOR'ing
189 					 * with next block
190 					 */
191 					bcopy(blk, iv, blks);
192 					ivp = iv;
193 				} else {	/* decrypt */
194 					/*
195 					 * Keep encrypted block for XOR'ing
196 					 * with next block
197 					 */
198 					nivp = (ivp == iv) ? iv2 : iv;
199 					bcopy(blk, nivp, blks);
200 
201 					exf->decrypt(sw->sw_kschedule, blk);
202 
203 					/* XOR with previous block */
204 					for (j = 0; j < blks; j++)
205 						blk[j] ^= ivp[j];
206 					ivp = nivp;
207 				}
208 
209 				/* Copy back decrypted block */
210 				m_copyback(m, k, blks, blk, M_NOWAIT);
211 
212 				/* Advance pointer */
213 				m = m_getptr(m, k + blks, &k);
214 				if (m == NULL)
215 					return EINVAL;
216 
217 				i -= blks;
218 
219 				/* Could be done... */
220 				if (i == 0)
221 					break;
222 			}
223 
224 			/* Skip possibly empty mbufs */
225 			if (k == m->m_len) {
226 				for (m = m->m_next; m && m->m_len == 0;
227 				    m = m->m_next)
228 					;
229 				k = 0;
230 			}
231 
232 			/* Sanity check */
233 			if (m == NULL)
234 				return EINVAL;
235 
236 			/*
237 			 * Warning: idat may point to garbage here, but
238 			 * we only use it in the while() loop, only if
239 			 * there are indeed enough data.
240 			 */
241 			idat = mtod(m, unsigned char *) + k;
242 
243 			while (m->m_len >= k + blks && i > 0) {
244 				if (exf->reinit) {
245 					if (crd->crd_flags & CRD_F_ENCRYPT) {
246 						exf->encrypt(sw->sw_kschedule,
247 						    idat);
248 					} else {
249 						exf->decrypt(sw->sw_kschedule,
250 						    idat);
251 					}
252 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
253 					/* XOR with previous block/IV */
254 					for (j = 0; j < blks; j++)
255 						idat[j] ^= ivp[j];
256 
257 					exf->encrypt(sw->sw_kschedule, idat);
258 					ivp = idat;
259 				} else {	/* decrypt */
260 					/*
261 					 * Keep encrypted block to be used
262 					 * in next block's processing.
263 					 */
264 					nivp = (ivp == iv) ? iv2 : iv;
265 					bcopy(idat, nivp, blks);
266 
267 					exf->decrypt(sw->sw_kschedule, idat);
268 
269 					/* XOR with previous block/IV */
270 					for (j = 0; j < blks; j++)
271 						idat[j] ^= ivp[j];
272 					ivp = nivp;
273 				}
274 
275 				idat += blks;
276 				k += blks;
277 				i -= blks;
278 			}
279 		}
280 	} else {
281 		/* Find beginning of data */
282 		count = crd->crd_skip;
283 		ind = cuio_getptr(uio, count, &k);
284 		if (ind == -1)
285 			return EINVAL;
286 
287 		i = crd->crd_len;
288 
289 		while (i > 0) {
290 			/*
291 			 * If there's insufficient data at the end,
292 			 * we have to do some copying.
293 			 */
294 			if (uio->uio_iov[ind].iov_len < k + blks &&
295 			    uio->uio_iov[ind].iov_len != k) {
296 				cuio_copydata(uio, count, blks, blk);
297 
298 				/* Actual encryption/decryption */
299 				if (exf->reinit) {
300 					if (crd->crd_flags & CRD_F_ENCRYPT) {
301 						exf->encrypt(sw->sw_kschedule,
302 						    blk);
303 					} else {
304 						exf->decrypt(sw->sw_kschedule,
305 						    blk);
306 					}
307 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
308 					/* XOR with previous block */
309 					for (j = 0; j < blks; j++)
310 						blk[j] ^= ivp[j];
311 
312 					exf->encrypt(sw->sw_kschedule, blk);
313 
314 					/*
315 					 * Keep encrypted block for XOR'ing
316 					 * with next block
317 					 */
318 					bcopy(blk, iv, blks);
319 					ivp = iv;
320 				} else {	/* decrypt */
321 					/*
322 					 * Keep encrypted block for XOR'ing
323 					 * with next block
324 					 */
325 					nivp = (ivp == iv) ? iv2 : iv;
326 					bcopy(blk, nivp, blks);
327 
328 					exf->decrypt(sw->sw_kschedule, blk);
329 
330 					/* XOR with previous block */
331 					for (j = 0; j < blks; j++)
332 						blk[j] ^= ivp[j];
333 					ivp = nivp;
334 				}
335 
336 				/* Copy back decrypted block */
337 				cuio_copyback(uio, count, blks, blk);
338 
339 				count += blks;
340 
341 				/* Advance pointer */
342 				ind = cuio_getptr(uio, count, &k);
343 				if (ind == -1)
344 					return (EINVAL);
345 
346 				i -= blks;
347 
348 				/* Could be done... */
349 				if (i == 0)
350 					break;
351 			}
352 
353 			/*
354 			 * Warning: idat may point to garbage here, but
355 			 * we only use it in the while() loop, only if
356 			 * there are indeed enough data.
357 			 */
358 			idat = (char *)uio->uio_iov[ind].iov_base + k;
359 
360 			while (uio->uio_iov[ind].iov_len >= k + blks &&
361 			    i > 0) {
362 				if (exf->reinit) {
363 					if (crd->crd_flags & CRD_F_ENCRYPT) {
364 						exf->encrypt(sw->sw_kschedule,
365 						    idat);
366 					} else {
367 						exf->decrypt(sw->sw_kschedule,
368 						    idat);
369 					}
370 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
371 					/* XOR with previous block/IV */
372 					for (j = 0; j < blks; j++)
373 						idat[j] ^= ivp[j];
374 
375 					exf->encrypt(sw->sw_kschedule, idat);
376 					ivp = idat;
377 				} else {	/* decrypt */
378 					/*
379 					 * Keep encrypted block to be used
380 					 * in next block's processing.
381 					 */
382 					nivp = (ivp == iv) ? iv2 : iv;
383 					bcopy(idat, nivp, blks);
384 
385 					exf->decrypt(sw->sw_kschedule, idat);
386 
387 					/* XOR with previous block/IV */
388 					for (j = 0; j < blks; j++)
389 						idat[j] ^= ivp[j];
390 					ivp = nivp;
391 				}
392 
393 				idat += blks;
394 				count += blks;
395 				k += blks;
396 				i -= blks;
397 			}
398 
399 			/*
400 			 * Advance to the next iov if the end of the current iov
401 			 * is aligned with the end of a cipher block.
402 			 * Note that the code is equivalent to calling:
403 			 *	ind = cuio_getptr(uio, count, &k);
404 			 */
405 			if (i > 0 && k == uio->uio_iov[ind].iov_len) {
406 				k = 0;
407 				ind++;
408 				if (ind >= uio->uio_iovcnt)
409 					return (EINVAL);
410 			}
411 		}
412 	}
413 
414 	return 0; /* Done with encryption/decryption */
415 }
416 
417 /*
418  * Compute keyed-hash authenticator.
419  */
420 int
421 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
422     struct swcr_data *sw, caddr_t buf, int outtype)
423 {
424 	unsigned char aalg[AALG_MAX_RESULT_LEN];
425 	struct auth_hash *axf;
426 	union authctx ctx;
427 	int err;
428 
429 	if (sw->sw_ictx == 0)
430 		return EINVAL;
431 
432 	axf = sw->sw_axf;
433 
434 	bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
435 
436 	if (outtype == CRYPTO_BUF_MBUF)
437 		err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len,
438 		    (int (*)(caddr_t, caddr_t, unsigned int)) axf->Update,
439 		    (caddr_t) &ctx);
440 	else
441 		err = cuio_apply((struct uio *) buf, crd->crd_skip,
442 		    crd->crd_len,
443 		    (int (*)(caddr_t, caddr_t, unsigned int)) axf->Update,
444 		    (caddr_t) &ctx);
445 
446 	if (err)
447 		return err;
448 
449 	if (crd->crd_flags & CRD_F_ESN)
450 		axf->Update(&ctx, crd->crd_esn, 4);
451 
452 	switch (sw->sw_alg) {
453 	case CRYPTO_MD5_HMAC:
454 	case CRYPTO_SHA1_HMAC:
455 	case CRYPTO_RIPEMD160_HMAC:
456 	case CRYPTO_SHA2_256_HMAC:
457 	case CRYPTO_SHA2_384_HMAC:
458 	case CRYPTO_SHA2_512_HMAC:
459 		if (sw->sw_octx == NULL)
460 			return EINVAL;
461 
462 		axf->Final(aalg, &ctx);
463 		bcopy(sw->sw_octx, &ctx, axf->ctxsize);
464 		axf->Update(&ctx, aalg, axf->hashsize);
465 		axf->Final(aalg, &ctx);
466 		break;
467 
468 	case CRYPTO_MD5_KPDK:
469 	case CRYPTO_SHA1_KPDK:
470 		if (sw->sw_octx == NULL)
471 			return EINVAL;
472 
473 		axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
474 		axf->Final(aalg, &ctx);
475 		break;
476 
477 	case CRYPTO_MD5:
478 	case CRYPTO_SHA1:
479 		axf->Final(aalg, &ctx);
480 		break;
481 	}
482 
483 	/* Inject the authentication data */
484 	if (outtype == CRYPTO_BUF_MBUF)
485 		COPYBACK(outtype, buf, crd->crd_inject, axf->authsize, aalg);
486 	else
487 		bcopy(aalg, crp->crp_mac, axf->authsize);
488 
489 	return 0;
490 }
491 
492 /*
493  * Apply a combined encryption-authentication transformation
494  */
495 int
496 swcr_combined(struct cryptop *crp)
497 {
498 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
499 	u_char *blk = (u_char *)blkbuf;
500 	u_char aalg[AALG_MAX_RESULT_LEN];
501 	u_char iv[EALG_MAX_BLOCK_LEN];
502 	union authctx ctx;
503 	struct cryptodesc *crd, *crda = NULL, *crde = NULL;
504 	struct swcr_data *sw, *swa, *swe;
505 	struct auth_hash *axf = NULL;
506 	struct enc_xform *exf = NULL;
507 	struct mbuf *m = NULL;
508 	struct uio *uio = NULL;
509 	caddr_t buf = (caddr_t)crp->crp_buf;
510 	uint32_t *blkp;
511 	int aadlen, blksz, i, ivlen, outtype, left, len;
512 
513 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
514 		for (sw = swcr_sessions[crp->crp_sid & 0xffffffff];
515 		     sw && sw->sw_alg != crd->crd_alg;
516 		     sw = sw->sw_next)
517 			;
518 		if (sw == NULL)
519 			return (EINVAL);
520 
521 		switch (sw->sw_alg) {
522 		case CRYPTO_AES_GCM_16:
523 		case CRYPTO_AES_GMAC:
524 			swe = sw;
525 			crde = crd;
526 			exf = swe->sw_exf;
527 			ivlen = exf->ivsize;
528 			break;
529 		case CRYPTO_AES_128_GMAC:
530 		case CRYPTO_AES_192_GMAC:
531 		case CRYPTO_AES_256_GMAC:
532 			swa = sw;
533 			crda = crd;
534 			axf = swa->sw_axf;
535 			if (swa->sw_ictx == 0)
536 				return (EINVAL);
537 			bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
538 			blksz = axf->blocksize;
539 			break;
540 		default:
541 			return (EINVAL);
542 		}
543 	}
544 	if (crde == NULL || crda == NULL)
545 		return (EINVAL);
546 
547 	if (crp->crp_flags & CRYPTO_F_IMBUF) {
548 		outtype = CRYPTO_BUF_MBUF;
549 		m = (struct mbuf *)buf;
550 	} else {
551 		outtype = CRYPTO_BUF_IOV;
552 		uio = (struct uio *)buf;
553 	}
554 
555 	/* Initialize the IV */
556 	if (crde->crd_flags & CRD_F_ENCRYPT) {
557 		/* IV explicitly provided ? */
558 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
559 			bcopy(crde->crd_iv, iv, ivlen);
560 		else
561 			arc4random_buf(iv, ivlen);
562 
563 		/* Do we need to write the IV */
564 		if (!(crde->crd_flags & CRD_F_IV_PRESENT))
565 			COPYBACK(outtype, buf, crde->crd_inject, ivlen, iv);
566 
567 	} else {	/* Decryption */
568 			/* IV explicitly provided ? */
569 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
570 			bcopy(crde->crd_iv, iv, ivlen);
571 		else {
572 			/* Get IV off buf */
573 			COPYDATA(outtype, buf, crde->crd_inject, ivlen, iv);
574 		}
575 	}
576 
577 	/* Supply MAC with IV */
578 	if (axf->Reinit)
579 		axf->Reinit(&ctx, iv, ivlen);
580 
581 	/* Supply MAC with AAD */
582 	aadlen = crda->crd_len;
583 	if (crda->crd_flags & CRD_F_ESN)
584 		aadlen += 4;
585 	for (i = 0; i < aadlen; i += blksz) {
586 		len = 0;
587 		if (i < crda->crd_len) {
588 			len = MIN(crda->crd_len - i, blksz);
589 			COPYDATA(outtype, buf, crda->crd_skip + i, len, blk);
590 		}
591 		left = blksz - len;
592 		if (crda->crd_flags & CRD_F_ESN && left > 0) {
593 			bcopy(crda->crd_esn, blk + len, MIN(left, aadlen - i));
594 			len += MIN(left, aadlen - i);
595 		}
596 		bzero(blk + len, blksz - len);
597 		axf->Update(&ctx, blk, blksz);
598 	}
599 
600 	if (exf->reinit)
601 		exf->reinit(swe->sw_kschedule, iv);
602 
603 	/* Do encryption/decryption with MAC */
604 	for (i = 0; i < crde->crd_len; i += blksz) {
605 		len = MIN(crde->crd_len - i, blksz);
606 		if (len < blksz)
607 			bzero(blk, blksz);
608 		COPYDATA(outtype, buf, crde->crd_skip + i, len, blk);
609 		if (crde->crd_flags & CRD_F_ENCRYPT) {
610 			exf->encrypt(swe->sw_kschedule, blk);
611 			axf->Update(&ctx, blk, len);
612 		} else {
613 			axf->Update(&ctx, blk, len);
614 			exf->decrypt(swe->sw_kschedule, blk);
615 		}
616 		COPYBACK(outtype, buf, crde->crd_skip + i, len, blk);
617 	}
618 
619 	/* Do any required special finalization */
620 	switch (crda->crd_alg) {
621 		case CRYPTO_AES_128_GMAC:
622 		case CRYPTO_AES_192_GMAC:
623 		case CRYPTO_AES_256_GMAC:
624 			/* length block */
625 			bzero(blk, blksz);
626 			blkp = (uint32_t *)blk + 1;
627 			*blkp = htobe32(crda->crd_len * 8);
628 			blkp = (uint32_t *)blk + 3;
629 			*blkp = htobe32(crde->crd_len * 8);
630 			axf->Update(&ctx, blk, blksz);
631 			break;
632 	}
633 
634 	/* Finalize MAC */
635 	axf->Final(aalg, &ctx);
636 
637 	/* Inject the authentication data */
638 	if (outtype == CRYPTO_BUF_MBUF)
639 		COPYBACK(outtype, buf, crda->crd_inject, axf->authsize, aalg);
640 	else
641 		bcopy(aalg, crp->crp_mac, axf->authsize);
642 
643 	return (0);
644 }
645 
646 /*
647  * Apply a compression/decompression algorithm
648  */
649 int
650 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
651     caddr_t buf, int outtype)
652 {
653 	u_int8_t *data, *out;
654 	struct comp_algo *cxf;
655 	int adj;
656 	u_int32_t result;
657 
658 	cxf = sw->sw_cxf;
659 
660 	/* We must handle the whole buffer of data in one time
661 	 * then if there is not all the data in the mbuf, we must
662 	 * copy in a buffer.
663 	 */
664 
665 	data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
666 	if (data == NULL)
667 		return (EINVAL);
668 	COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data);
669 
670 	if (crd->crd_flags & CRD_F_COMP)
671 		result = cxf->compress(data, crd->crd_len, &out);
672 	else
673 		result = cxf->decompress(data, crd->crd_len, &out);
674 
675 	free(data, M_CRYPTO_DATA);
676 	if (result == 0)
677 		return EINVAL;
678 
679 	/* Copy back the (de)compressed data. m_copyback is
680 	 * extending the mbuf as necessary.
681 	 */
682 	sw->sw_size = result;
683 	/* Check the compressed size when doing compression */
684 	if (crd->crd_flags & CRD_F_COMP) {
685 		if (result > crd->crd_len) {
686 			/* Compression was useless, we lost time */
687 			free(out, M_CRYPTO_DATA);
688 			return 0;
689 		}
690 	}
691 
692 	COPYBACK(outtype, buf, crd->crd_skip, result, out);
693 	if (result < crd->crd_len) {
694 		adj = result - crd->crd_len;
695 		if (outtype == CRYPTO_BUF_MBUF) {
696 			adj = result - crd->crd_len;
697 			m_adj((struct mbuf *)buf, adj);
698 		} else {
699 			struct uio *uio = (struct uio *)buf;
700 			int ind;
701 
702 			adj = crd->crd_len - result;
703 			ind = uio->uio_iovcnt - 1;
704 
705 			while (adj > 0 && ind >= 0) {
706 				if (adj < uio->uio_iov[ind].iov_len) {
707 					uio->uio_iov[ind].iov_len -= adj;
708 					break;
709 				}
710 
711 				adj -= uio->uio_iov[ind].iov_len;
712 				uio->uio_iov[ind].iov_len = 0;
713 				ind--;
714 				uio->uio_iovcnt--;
715 			}
716 		}
717 	}
718 	free(out, M_CRYPTO_DATA);
719 	return 0;
720 }
721 
722 /*
723  * Generate a new software session.
724  */
725 int
726 swcr_newsession(u_int32_t *sid, struct cryptoini *cri)
727 {
728 	struct swcr_data **swd;
729 	struct auth_hash *axf;
730 	struct enc_xform *txf;
731 	struct comp_algo *cxf;
732 	u_int32_t i;
733 	int k;
734 
735 	if (sid == NULL || cri == NULL)
736 		return EINVAL;
737 
738 	if (swcr_sessions) {
739 		for (i = 1; i < swcr_sesnum; i++)
740 			if (swcr_sessions[i] == NULL)
741 				break;
742 	}
743 
744 	if (swcr_sessions == NULL || i == swcr_sesnum) {
745 		if (swcr_sessions == NULL) {
746 			i = 1; /* We leave swcr_sessions[0] empty */
747 			swcr_sesnum = CRYPTO_SW_SESSIONS;
748 		} else
749 			swcr_sesnum *= 2;
750 
751 		swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
752 		    M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
753 		if (swd == NULL) {
754 			/* Reset session number */
755 			if (swcr_sesnum == CRYPTO_SW_SESSIONS)
756 				swcr_sesnum = 0;
757 			else
758 				swcr_sesnum /= 2;
759 			return ENOBUFS;
760 		}
761 
762 		/* Copy existing sessions */
763 		if (swcr_sessions) {
764 			bcopy(swcr_sessions, swd,
765 			    (swcr_sesnum / 2) * sizeof(struct swcr_data *));
766 			free(swcr_sessions, M_CRYPTO_DATA);
767 		}
768 
769 		swcr_sessions = swd;
770 	}
771 
772 	swd = &swcr_sessions[i];
773 	*sid = i;
774 
775 	while (cri) {
776 		*swd = malloc(sizeof(struct swcr_data), M_CRYPTO_DATA,
777 		    M_NOWAIT | M_ZERO);
778 		if (*swd == NULL) {
779 			swcr_freesession(i);
780 			return ENOBUFS;
781 		}
782 
783 		switch (cri->cri_alg) {
784 		case CRYPTO_DES_CBC:
785 			txf = &enc_xform_des;
786 			goto enccommon;
787 		case CRYPTO_3DES_CBC:
788 			txf = &enc_xform_3des;
789 			goto enccommon;
790 		case CRYPTO_BLF_CBC:
791 			txf = &enc_xform_blf;
792 			goto enccommon;
793 		case CRYPTO_CAST_CBC:
794 			txf = &enc_xform_cast5;
795 			goto enccommon;
796 		case CRYPTO_RIJNDAEL128_CBC:
797 			txf = &enc_xform_rijndael128;
798 			goto enccommon;
799 		case CRYPTO_AES_CTR:
800 			txf = &enc_xform_aes_ctr;
801 			goto enccommon;
802 		case CRYPTO_AES_XTS:
803 			txf = &enc_xform_aes_xts;
804 			goto enccommon;
805 		case CRYPTO_AES_GCM_16:
806 			txf = &enc_xform_aes_gcm;
807 			goto enccommon;
808 		case CRYPTO_AES_GMAC:
809 			txf = &enc_xform_aes_gmac;
810 			(*swd)->sw_exf = txf;
811 			break;
812 		case CRYPTO_NULL:
813 			txf = &enc_xform_null;
814 			goto enccommon;
815 		enccommon:
816 			if (txf->setkey(&((*swd)->sw_kschedule), cri->cri_key,
817 			    cri->cri_klen / 8) < 0) {
818 				swcr_freesession(i);
819 				return EINVAL;
820 			}
821 			(*swd)->sw_exf = txf;
822 			break;
823 
824 		case CRYPTO_MD5_HMAC:
825 			axf = &auth_hash_hmac_md5_96;
826 			goto authcommon;
827 		case CRYPTO_SHA1_HMAC:
828 			axf = &auth_hash_hmac_sha1_96;
829 			goto authcommon;
830 		case CRYPTO_RIPEMD160_HMAC:
831 			axf = &auth_hash_hmac_ripemd_160_96;
832 			goto authcommon;
833 		case CRYPTO_SHA2_256_HMAC:
834 			axf = &auth_hash_hmac_sha2_256_128;
835 			goto authcommon;
836 		case CRYPTO_SHA2_384_HMAC:
837 			axf = &auth_hash_hmac_sha2_384_192;
838 			goto authcommon;
839 		case CRYPTO_SHA2_512_HMAC:
840 			axf = &auth_hash_hmac_sha2_512_256;
841 		authcommon:
842 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
843 			    M_NOWAIT);
844 			if ((*swd)->sw_ictx == NULL) {
845 				swcr_freesession(i);
846 				return ENOBUFS;
847 			}
848 
849 			(*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
850 			    M_NOWAIT);
851 			if ((*swd)->sw_octx == NULL) {
852 				swcr_freesession(i);
853 				return ENOBUFS;
854 			}
855 
856 			for (k = 0; k < cri->cri_klen / 8; k++)
857 				cri->cri_key[k] ^= HMAC_IPAD_VAL;
858 
859 			axf->Init((*swd)->sw_ictx);
860 			axf->Update((*swd)->sw_ictx, cri->cri_key,
861 			    cri->cri_klen / 8);
862 			axf->Update((*swd)->sw_ictx, hmac_ipad_buffer,
863 			    axf->blocksize - (cri->cri_klen / 8));
864 
865 			for (k = 0; k < cri->cri_klen / 8; k++)
866 				cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
867 
868 			axf->Init((*swd)->sw_octx);
869 			axf->Update((*swd)->sw_octx, cri->cri_key,
870 			    cri->cri_klen / 8);
871 			axf->Update((*swd)->sw_octx, hmac_opad_buffer,
872 			    axf->blocksize - (cri->cri_klen / 8));
873 
874 			for (k = 0; k < cri->cri_klen / 8; k++)
875 				cri->cri_key[k] ^= HMAC_OPAD_VAL;
876 			(*swd)->sw_axf = axf;
877 			break;
878 
879 		case CRYPTO_MD5_KPDK:
880 			axf = &auth_hash_key_md5;
881 			goto auth2common;
882 
883 		case CRYPTO_SHA1_KPDK:
884 			axf = &auth_hash_key_sha1;
885 		auth2common:
886 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
887 			    M_NOWAIT);
888 			if ((*swd)->sw_ictx == NULL) {
889 				swcr_freesession(i);
890 				return ENOBUFS;
891 			}
892 
893 			/* Store the key so we can "append" it to the payload */
894 			(*swd)->sw_octx = malloc(cri->cri_klen / 8, M_CRYPTO_DATA,
895 			    M_NOWAIT);
896 			if ((*swd)->sw_octx == NULL) {
897 				swcr_freesession(i);
898 				return ENOBUFS;
899 			}
900 
901 			(*swd)->sw_klen = cri->cri_klen / 8;
902 			bcopy(cri->cri_key, (*swd)->sw_octx, cri->cri_klen / 8);
903 			axf->Init((*swd)->sw_ictx);
904 			axf->Update((*swd)->sw_ictx, cri->cri_key,
905 			    cri->cri_klen / 8);
906 			axf->Final(NULL, (*swd)->sw_ictx);
907 			(*swd)->sw_axf = axf;
908 			break;
909 
910 		case CRYPTO_MD5:
911 			axf = &auth_hash_md5;
912 			goto auth3common;
913 
914 		case CRYPTO_SHA1:
915 			axf = &auth_hash_sha1;
916 		auth3common:
917 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
918 			    M_NOWAIT);
919 			if ((*swd)->sw_ictx == NULL) {
920 				swcr_freesession(i);
921 				return ENOBUFS;
922 			}
923 
924 			axf->Init((*swd)->sw_ictx);
925 			(*swd)->sw_axf = axf;
926 			break;
927 
928 		case CRYPTO_AES_128_GMAC:
929 			axf = &auth_hash_gmac_aes_128;
930 			goto auth4common;
931 
932 		case CRYPTO_AES_192_GMAC:
933 			axf = &auth_hash_gmac_aes_192;
934 			goto auth4common;
935 
936 		case CRYPTO_AES_256_GMAC:
937 			axf = &auth_hash_gmac_aes_256;
938 		auth4common:
939 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
940 			    M_NOWAIT);
941 			if ((*swd)->sw_ictx == NULL) {
942 				swcr_freesession(i);
943 				return ENOBUFS;
944 			}
945 			axf->Init((*swd)->sw_ictx);
946 			axf->Setkey((*swd)->sw_ictx, cri->cri_key,
947 			    cri->cri_klen / 8);
948 			(*swd)->sw_axf = axf;
949 			break;
950 
951 		case CRYPTO_DEFLATE_COMP:
952 			cxf = &comp_algo_deflate;
953 			(*swd)->sw_cxf = cxf;
954 			break;
955 		case CRYPTO_ESN:
956 			/* nothing to do */
957 			break;
958 		default:
959 			swcr_freesession(i);
960 			return EINVAL;
961 		}
962 
963 		(*swd)->sw_alg = cri->cri_alg;
964 		cri = cri->cri_next;
965 		swd = &((*swd)->sw_next);
966 	}
967 	return 0;
968 }
969 
970 /*
971  * Free a session.
972  */
973 int
974 swcr_freesession(u_int64_t tid)
975 {
976 	struct swcr_data *swd;
977 	struct enc_xform *txf;
978 	struct auth_hash *axf;
979 	u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
980 
981 	if (sid > swcr_sesnum || swcr_sessions == NULL ||
982 	    swcr_sessions[sid] == NULL)
983 		return EINVAL;
984 
985 	/* Silently accept and return */
986 	if (sid == 0)
987 		return 0;
988 
989 	while ((swd = swcr_sessions[sid]) != NULL) {
990 		swcr_sessions[sid] = swd->sw_next;
991 
992 		switch (swd->sw_alg) {
993 		case CRYPTO_DES_CBC:
994 		case CRYPTO_3DES_CBC:
995 		case CRYPTO_BLF_CBC:
996 		case CRYPTO_CAST_CBC:
997 		case CRYPTO_RIJNDAEL128_CBC:
998 		case CRYPTO_AES_CTR:
999 		case CRYPTO_AES_XTS:
1000 		case CRYPTO_AES_GCM_16:
1001 		case CRYPTO_AES_GMAC:
1002 		case CRYPTO_NULL:
1003 			txf = swd->sw_exf;
1004 
1005 			if (swd->sw_kschedule)
1006 				txf->zerokey(&(swd->sw_kschedule));
1007 			break;
1008 
1009 		case CRYPTO_MD5_HMAC:
1010 		case CRYPTO_SHA1_HMAC:
1011 		case CRYPTO_RIPEMD160_HMAC:
1012 		case CRYPTO_SHA2_256_HMAC:
1013 		case CRYPTO_SHA2_384_HMAC:
1014 		case CRYPTO_SHA2_512_HMAC:
1015 			axf = swd->sw_axf;
1016 
1017 			if (swd->sw_ictx) {
1018 				explicit_bzero(swd->sw_ictx, axf->ctxsize);
1019 				free(swd->sw_ictx, M_CRYPTO_DATA);
1020 			}
1021 			if (swd->sw_octx) {
1022 				explicit_bzero(swd->sw_octx, axf->ctxsize);
1023 				free(swd->sw_octx, M_CRYPTO_DATA);
1024 			}
1025 			break;
1026 
1027 		case CRYPTO_MD5_KPDK:
1028 		case CRYPTO_SHA1_KPDK:
1029 			axf = swd->sw_axf;
1030 
1031 			if (swd->sw_ictx) {
1032 				explicit_bzero(swd->sw_ictx, axf->ctxsize);
1033 				free(swd->sw_ictx, M_CRYPTO_DATA);
1034 			}
1035 			if (swd->sw_octx) {
1036 				explicit_bzero(swd->sw_octx, swd->sw_klen);
1037 				free(swd->sw_octx, M_CRYPTO_DATA);
1038 			}
1039 			break;
1040 
1041 		case CRYPTO_AES_128_GMAC:
1042 		case CRYPTO_AES_192_GMAC:
1043 		case CRYPTO_AES_256_GMAC:
1044 		case CRYPTO_MD5:
1045 		case CRYPTO_SHA1:
1046 			axf = swd->sw_axf;
1047 
1048 			if (swd->sw_ictx) {
1049 				explicit_bzero(swd->sw_ictx, axf->ctxsize);
1050 				free(swd->sw_ictx, M_CRYPTO_DATA);
1051 			}
1052 			break;
1053 		}
1054 
1055 		free(swd, M_CRYPTO_DATA);
1056 	}
1057 	return 0;
1058 }
1059 
1060 /*
1061  * Process a software request.
1062  */
1063 int
1064 swcr_process(struct cryptop *crp)
1065 {
1066 	struct cryptodesc *crd;
1067 	struct swcr_data *sw;
1068 	u_int32_t lid;
1069 	int type;
1070 
1071 	/* Sanity check */
1072 	if (crp == NULL)
1073 		return EINVAL;
1074 
1075 	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1076 		crp->crp_etype = EINVAL;
1077 		goto done;
1078 	}
1079 
1080 	lid = crp->crp_sid & 0xffffffff;
1081 	if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
1082 		crp->crp_etype = ENOENT;
1083 		goto done;
1084 	}
1085 
1086 	if (crp->crp_flags & CRYPTO_F_IMBUF)
1087 		type = CRYPTO_BUF_MBUF;
1088 	else
1089 		type = CRYPTO_BUF_IOV;
1090 
1091 	/* Go through crypto descriptors, processing as we go */
1092 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1093 		/*
1094 		 * Find the crypto context.
1095 		 *
1096 		 * XXX Note that the logic here prevents us from having
1097 		 * XXX the same algorithm multiple times in a session
1098 		 * XXX (or rather, we can but it won't give us the right
1099 		 * XXX results). To do that, we'd need some way of differentiating
1100 		 * XXX between the various instances of an algorithm (so we can
1101 		 * XXX locate the correct crypto context).
1102 		 */
1103 		for (sw = swcr_sessions[lid];
1104 		    sw && sw->sw_alg != crd->crd_alg;
1105 		    sw = sw->sw_next)
1106 			;
1107 
1108 		/* No such context ? */
1109 		if (sw == NULL) {
1110 			crp->crp_etype = EINVAL;
1111 			goto done;
1112 		}
1113 
1114 		switch (sw->sw_alg) {
1115 		case CRYPTO_NULL:
1116 			break;
1117 		case CRYPTO_DES_CBC:
1118 		case CRYPTO_3DES_CBC:
1119 		case CRYPTO_BLF_CBC:
1120 		case CRYPTO_CAST_CBC:
1121 		case CRYPTO_RIJNDAEL128_CBC:
1122 		case CRYPTO_AES_CTR:
1123 		case CRYPTO_AES_XTS:
1124 			if ((crp->crp_etype = swcr_encdec(crd, sw,
1125 			    crp->crp_buf, type)) != 0)
1126 				goto done;
1127 			break;
1128 		case CRYPTO_MD5_HMAC:
1129 		case CRYPTO_SHA1_HMAC:
1130 		case CRYPTO_RIPEMD160_HMAC:
1131 		case CRYPTO_SHA2_256_HMAC:
1132 		case CRYPTO_SHA2_384_HMAC:
1133 		case CRYPTO_SHA2_512_HMAC:
1134 		case CRYPTO_MD5_KPDK:
1135 		case CRYPTO_SHA1_KPDK:
1136 		case CRYPTO_MD5:
1137 		case CRYPTO_SHA1:
1138 			if ((crp->crp_etype = swcr_authcompute(crp, crd, sw,
1139 			    crp->crp_buf, type)) != 0)
1140 				goto done;
1141 			break;
1142 
1143 		case CRYPTO_AES_GCM_16:
1144 		case CRYPTO_AES_GMAC:
1145 		case CRYPTO_AES_128_GMAC:
1146 		case CRYPTO_AES_192_GMAC:
1147 		case CRYPTO_AES_256_GMAC:
1148 			crp->crp_etype = swcr_combined(crp);
1149 			goto done;
1150 
1151 		case CRYPTO_DEFLATE_COMP:
1152 			if ((crp->crp_etype = swcr_compdec(crd, sw,
1153 			    crp->crp_buf, type)) != 0)
1154 				goto done;
1155 			else
1156 				crp->crp_olen = (int)sw->sw_size;
1157 			break;
1158 
1159 		default:
1160 			/* Unknown/unsupported algorithm */
1161 			crp->crp_etype = EINVAL;
1162 			goto done;
1163 		}
1164 	}
1165 
1166 done:
1167 	crypto_done(crp);
1168 	return 0;
1169 }
1170 
1171 /*
1172  * Initialize the driver, called from the kernel main().
1173  */
1174 void
1175 swcr_init(void)
1176 {
1177 	int algs[CRYPTO_ALGORITHM_MAX + 1];
1178 	int flags = CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_ENCRYPT_MAC |
1179 	    CRYPTOCAP_F_MAC_ENCRYPT;
1180 
1181 	swcr_id = crypto_get_driverid(flags);
1182 	if (swcr_id < 0) {
1183 		/* This should never happen */
1184 		panic("Software crypto device cannot initialize!");
1185 	}
1186 
1187 	bzero(algs, sizeof(algs));
1188 
1189 	algs[CRYPTO_DES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
1190 	algs[CRYPTO_3DES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
1191 	algs[CRYPTO_BLF_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
1192 	algs[CRYPTO_CAST_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
1193 	algs[CRYPTO_MD5_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1194 	algs[CRYPTO_SHA1_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1195 	algs[CRYPTO_RIPEMD160_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1196 	algs[CRYPTO_MD5_KPDK] = CRYPTO_ALG_FLAG_SUPPORTED;
1197 	algs[CRYPTO_SHA1_KPDK] = CRYPTO_ALG_FLAG_SUPPORTED;
1198 	algs[CRYPTO_MD5] = CRYPTO_ALG_FLAG_SUPPORTED;
1199 	algs[CRYPTO_SHA1] = CRYPTO_ALG_FLAG_SUPPORTED;
1200 	algs[CRYPTO_RIJNDAEL128_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
1201 	algs[CRYPTO_AES_CTR] = CRYPTO_ALG_FLAG_SUPPORTED;
1202 	algs[CRYPTO_AES_XTS] = CRYPTO_ALG_FLAG_SUPPORTED;
1203 	algs[CRYPTO_AES_GCM_16] = CRYPTO_ALG_FLAG_SUPPORTED;
1204 	algs[CRYPTO_AES_GMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1205 	algs[CRYPTO_DEFLATE_COMP] = CRYPTO_ALG_FLAG_SUPPORTED;
1206 	algs[CRYPTO_NULL] = CRYPTO_ALG_FLAG_SUPPORTED;
1207 	algs[CRYPTO_SHA2_256_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1208 	algs[CRYPTO_SHA2_384_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1209 	algs[CRYPTO_SHA2_512_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1210 	algs[CRYPTO_AES_128_GMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1211 	algs[CRYPTO_AES_192_GMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1212 	algs[CRYPTO_AES_256_GMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1213 	algs[CRYPTO_ESN] = CRYPTO_ALG_FLAG_SUPPORTED;
1214 
1215 	crypto_register(swcr_id, algs, swcr_newsession,
1216 	    swcr_freesession, swcr_process);
1217 }
1218