xref: /openbsd-src/sys/crypto/cryptosoft.c (revision db3296cf5c1dd9058ceecc3a29fe4aaa0bd26000)
1 /*	$OpenBSD: cryptosoft.c,v 1.39 2003/07/24 08:03:19 itojun Exp $	*/
2 
3 /*
4  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5  *
6  * This code was written by Angelos D. Keromytis in Athens, Greece, in
7  * February 2000. Network Security Technologies Inc. (NSTI) kindly
8  * supported the development of this code.
9  *
10  * Copyright (c) 2000, 2001 Angelos D. Keromytis
11  *
12  * Permission to use, copy, and modify this software with or without fee
13  * is hereby granted, provided that this entire notice is included in
14  * all source code copies of any software which is or includes a copy or
15  * modification of this software.
16  *
17  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
18  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
19  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
20  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
21  * PURPOSE.
22  */
23 
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/malloc.h>
27 #include <sys/mbuf.h>
28 #include <sys/sysctl.h>
29 #include <sys/errno.h>
30 #include <sys/md5k.h>
31 #include <dev/rndvar.h>
32 #include <crypto/sha1.h>
33 #include <crypto/rmd160.h>
34 #include <crypto/cast.h>
35 #include <crypto/skipjack.h>
36 #include <crypto/blf.h>
37 #include <crypto/cryptodev.h>
38 #include <crypto/cryptosoft.h>
39 #include <crypto/xform.h>
40 
41 u_int8_t hmac_ipad_buffer[64] = {
42 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
43 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
44 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
45 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
46 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
47 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
48 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
49 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36
50 };
51 
52 u_int8_t hmac_opad_buffer[64] = {
53 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
54 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
55 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
56 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
57 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
58 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
59 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
60 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C
61 };
62 
63 
64 struct swcr_data **swcr_sessions = NULL;
65 u_int32_t swcr_sesnum = 0;
66 int32_t swcr_id = -1;
67 
68 #define COPYBACK(x, a, b, c, d) \
69 	(x) == CRYPTO_BUF_MBUF ? m_copyback((struct mbuf *)a,b,c,d) \
70 	: cuio_copyback((struct uio *)a,b,c,d)
71 #define COPYDATA(x, a, b, c, d) \
72 	(x) == CRYPTO_BUF_MBUF ? m_copydata((struct mbuf *)a,b,c,d) \
73 	: cuio_copydata((struct uio *)a,b,c,d)
74 
75 /*
76  * Apply a symmetric encryption/decryption algorithm.
77  */
78 int
79 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
80     int outtype)
81 {
82 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
83 	unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
84 	struct enc_xform *exf;
85 	int i, k, j, blks, ind, count;
86 	struct mbuf *m = NULL;
87 	struct uio *uio = NULL;
88 
89 
90 	exf = sw->sw_exf;
91 	blks = exf->blocksize;
92 
93 	/* Check for non-padded data */
94 	if (crd->crd_len % blks)
95 		return EINVAL;
96 
97 	if (outtype == CRYPTO_BUF_MBUF)
98 		m = (struct mbuf *) buf;
99 	else
100 		uio = (struct uio *) buf;
101 
102 	/* Initialize the IV */
103 	if (crd->crd_flags & CRD_F_ENCRYPT) {
104 		/* IV explicitly provided ? */
105 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
106 			bcopy(crd->crd_iv, iv, blks);
107 		else {
108 			/* Get random IV */
109 			for (i = 0;
110 			    i + sizeof (u_int32_t) < EALG_MAX_BLOCK_LEN;
111 			    i += sizeof (u_int32_t)) {
112 				u_int32_t temp = arc4random();
113 
114 				bcopy(&temp, iv + i, sizeof(u_int32_t));
115 			}
116 			/*
117 			 * What if the block size is not a multiple
118 			 * of sizeof (u_int32_t), which is the size of
119 			 * what arc4random() returns ?
120 			 */
121 			if (EALG_MAX_BLOCK_LEN % sizeof (u_int32_t) != 0) {
122 				u_int32_t temp = arc4random();
123 
124 				bcopy (&temp, iv + i,
125 				    EALG_MAX_BLOCK_LEN - i);
126 			}
127 		}
128 
129 		/* Do we need to write the IV */
130 		if (!(crd->crd_flags & CRD_F_IV_PRESENT)) {
131 			COPYBACK(outtype, buf, crd->crd_inject, blks, iv);
132 		}
133 
134 	} else {	/* Decryption */
135 			/* IV explicitly provided ? */
136 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
137 			bcopy(crd->crd_iv, iv, blks);
138 		else {
139 			/* Get IV off buf */
140 			COPYDATA(outtype, buf, crd->crd_inject, blks, iv);
141 		}
142 	}
143 
144 	ivp = iv;
145 
146 	if (outtype == CRYPTO_BUF_MBUF) {
147 		/* Find beginning of data */
148 		m = m_getptr(m, crd->crd_skip, &k);
149 		if (m == NULL)
150 			return EINVAL;
151 
152 		i = crd->crd_len;
153 
154 		while (i > 0) {
155 			/*
156 			 * If there's insufficient data at the end of
157 			 * an mbuf, we have to do some copying.
158 			 */
159 			if (m->m_len < k + blks && m->m_len != k) {
160 				m_copydata(m, k, blks, blk);
161 
162 				/* Actual encryption/decryption */
163 				if (crd->crd_flags & CRD_F_ENCRYPT) {
164 					/* XOR with previous block */
165 					for (j = 0; j < blks; j++)
166 						blk[j] ^= ivp[j];
167 
168 					exf->encrypt(sw->sw_kschedule, blk);
169 
170 					/*
171 					 * Keep encrypted block for XOR'ing
172 					 * with next block
173 					 */
174 					bcopy(blk, iv, blks);
175 					ivp = iv;
176 				} else {	/* decrypt */
177 					/*
178 					 * Keep encrypted block for XOR'ing
179 					 * with next block
180 					 */
181 					if (ivp == iv)
182 						bcopy(blk, piv, blks);
183 					else
184 						bcopy(blk, iv, blks);
185 
186 					exf->decrypt(sw->sw_kschedule, blk);
187 
188 					/* XOR with previous block */
189 					for (j = 0; j < blks; j++)
190 						blk[j] ^= ivp[j];
191 
192 					if (ivp == iv)
193 						bcopy(piv, iv, blks);
194 					else
195 						ivp = iv;
196 				}
197 
198 				/* Copy back decrypted block */
199 				m_copyback(m, k, blks, blk);
200 
201 				/* Advance pointer */
202 				m = m_getptr(m, k + blks, &k);
203 				if (m == NULL)
204 					return EINVAL;
205 
206 				i -= blks;
207 
208 				/* Could be done... */
209 				if (i == 0)
210 					break;
211 			}
212 
213 			/* Skip possibly empty mbufs */
214 			if (k == m->m_len) {
215 				for (m = m->m_next; m && m->m_len == 0;
216 				    m = m->m_next)
217 					;
218 				k = 0;
219 			}
220 
221 			/* Sanity check */
222 			if (m == NULL)
223 				return EINVAL;
224 
225 			/*
226 			 * Warning: idat may point to garbage here, but
227 			 * we only use it in the while() loop, only if
228 			 * there are indeed enough data.
229 			 */
230 			idat = mtod(m, unsigned char *) + k;
231 
232 			while (m->m_len >= k + blks && i > 0) {
233 				if (crd->crd_flags & CRD_F_ENCRYPT) {
234 					/* XOR with previous block/IV */
235 					for (j = 0; j < blks; j++)
236 						idat[j] ^= ivp[j];
237 
238 					exf->encrypt(sw->sw_kschedule, idat);
239 					ivp = idat;
240 				} else {	/* decrypt */
241 					/*
242 					 * Keep encrypted block to be used
243 					 * in next block's processing.
244 					 */
245 					if (ivp == iv)
246 						bcopy(idat, piv, blks);
247 					else
248 						bcopy(idat, iv, blks);
249 
250 					exf->decrypt(sw->sw_kschedule, idat);
251 
252 					/* XOR with previous block/IV */
253 					for (j = 0; j < blks; j++)
254 						idat[j] ^= ivp[j];
255 
256 					if (ivp == iv)
257 						bcopy(piv, iv, blks);
258 					else
259 						ivp = iv;
260 				}
261 
262 				idat += blks;
263 				k += blks;
264 				i -= blks;
265 			}
266 		}
267 	} else {
268 		/* Find beginning of data */
269 		count = crd->crd_skip;
270 		ind = cuio_getptr(uio, count, &k);
271 		if (ind == -1)
272 			return EINVAL;
273 
274 		i = crd->crd_len;
275 
276 		while (i > 0) {
277 			/*
278 			 * If there's insufficient data at the end,
279 			 * we have to do some copying.
280 			 */
281 			if (uio->uio_iov[ind].iov_len < k + blks &&
282 			    uio->uio_iov[ind].iov_len != k) {
283 				cuio_copydata(uio, k, blks, blk);
284 
285 				/* Actual encryption/decryption */
286 				if (crd->crd_flags & CRD_F_ENCRYPT) {
287 					/* XOR with previous block */
288 					for (j = 0; j < blks; j++)
289 						blk[j] ^= ivp[j];
290 
291 					exf->encrypt(sw->sw_kschedule, blk);
292 
293 					/*
294 					 * Keep encrypted block for XOR'ing
295 					 * with next block
296 					 */
297 					bcopy(blk, iv, blks);
298 					ivp = iv;
299 				} else {	/* decrypt */
300 					/*
301 					 * Keep encrypted block for XOR'ing
302 					 * with next block
303 					 */
304 					if (ivp == iv)
305 						bcopy(blk, piv, blks);
306 					else
307 						bcopy(blk, iv, blks);
308 
309 					exf->decrypt(sw->sw_kschedule, blk);
310 
311 					/* XOR with previous block */
312 					for (j = 0; j < blks; j++)
313 						blk[j] ^= ivp[j];
314 
315 					if (ivp == iv)
316 						bcopy(piv, iv, blks);
317 					else
318 						ivp = iv;
319 				}
320 
321 				/* Copy back decrypted block */
322 				cuio_copyback(uio, k, blks, blk);
323 
324 				count += blks;
325 
326 				/* Advance pointer */
327 				ind = cuio_getptr(uio, count, &k);
328 				if (ind == -1)
329 					return (EINVAL);
330 
331 				i -= blks;
332 
333 				/* Could be done... */
334 				if (i == 0)
335 					break;
336 			}
337 
338 			/*
339 			 * Warning: idat may point to garbage here, but
340 			 * we only use it in the while() loop, only if
341 			 * there are indeed enough data.
342 			 */
343 			idat = uio->uio_iov[ind].iov_base + k;
344 
345 			while (uio->uio_iov[ind].iov_len >= k + blks &&
346 			    i > 0) {
347 				if (crd->crd_flags & CRD_F_ENCRYPT) {
348 					/* XOR with previous block/IV */
349 					for (j = 0; j < blks; j++)
350 						idat[j] ^= ivp[j];
351 
352 					exf->encrypt(sw->sw_kschedule, idat);
353 					ivp = idat;
354 				} else {	/* decrypt */
355 					/*
356 					 * Keep encrypted block to be used
357 					 * in next block's processing.
358 					 */
359 					if (ivp == iv)
360 						bcopy(idat, piv, blks);
361 					else
362 						bcopy(idat, iv, blks);
363 
364 					exf->decrypt(sw->sw_kschedule, idat);
365 
366 					/* XOR with previous block/IV */
367 					for (j = 0; j < blks; j++)
368 						idat[j] ^= ivp[j];
369 
370 					if (ivp == iv)
371 						bcopy(piv, iv, blks);
372 					else
373 						ivp = iv;
374 				}
375 
376 				idat += blks;
377 				count += blks;
378 				k += blks;
379 				i -= blks;
380 			}
381 		}
382 	}
383 
384 	return 0; /* Done with encryption/decryption */
385 }
386 
387 /*
388  * Compute keyed-hash authenticator.
389  */
390 int
391 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
392     struct swcr_data *sw, caddr_t buf, int outtype)
393 {
394 	unsigned char aalg[AALG_MAX_RESULT_LEN];
395 	struct auth_hash *axf;
396 	union authctx ctx;
397 	int err;
398 
399 	if (sw->sw_ictx == 0)
400 		return EINVAL;
401 
402 	axf = sw->sw_axf;
403 
404 	bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
405 
406 	if (outtype == CRYPTO_BUF_MBUF)
407 		err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len,
408 		    (int (*)(caddr_t, caddr_t, unsigned int)) axf->Update,
409 		    (caddr_t) &ctx);
410 	else
411 		err = cuio_apply((struct uio *) buf, crd->crd_skip,
412 		    crd->crd_len,
413 		    (int (*)(caddr_t, caddr_t, unsigned int)) axf->Update,
414 		    (caddr_t) &ctx);
415 
416 	if (err)
417 		return err;
418 
419 	switch (sw->sw_alg) {
420 	case CRYPTO_MD5_HMAC:
421 	case CRYPTO_SHA1_HMAC:
422 	case CRYPTO_RIPEMD160_HMAC:
423 	case CRYPTO_SHA2_256_HMAC:
424 	case CRYPTO_SHA2_384_HMAC:
425 	case CRYPTO_SHA2_512_HMAC:
426 		if (sw->sw_octx == NULL)
427 			return EINVAL;
428 
429 		axf->Final(aalg, &ctx);
430 		bcopy(sw->sw_octx, &ctx, axf->ctxsize);
431 		axf->Update(&ctx, aalg, axf->hashsize);
432 		axf->Final(aalg, &ctx);
433 		break;
434 
435 	case CRYPTO_MD5_KPDK:
436 	case CRYPTO_SHA1_KPDK:
437 		if (sw->sw_octx == NULL)
438 			return EINVAL;
439 
440 		axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
441 		axf->Final(aalg, &ctx);
442 		break;
443 
444 	case CRYPTO_MD5:
445 	case CRYPTO_SHA1:
446 		axf->Final(aalg, &ctx);
447 		break;
448 	}
449 
450 	/* Inject the authentication data */
451 	if (outtype == CRYPTO_BUF_MBUF)
452 		COPYBACK(outtype, buf, crd->crd_inject, axf->authsize, aalg);
453 	else
454 		bcopy(aalg, crp->crp_mac, axf->authsize);
455 
456 	return 0;
457 }
458 
459 /*
460  * Apply a compression/decompression algorithm
461  */
462 int
463 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
464     caddr_t buf, int outtype)
465 {
466 	u_int8_t *data, *out;
467 	struct comp_algo *cxf;
468 	int adj;
469 	u_int32_t result;
470 
471 	cxf = sw->sw_cxf;
472 
473 	/* We must handle the whole buffer of data in one time
474 	 * then if there is not all the data in the mbuf, we must
475 	 * copy in a buffer.
476 	 */
477 
478 	MALLOC(data, u_int8_t *, crd->crd_len, M_CRYPTO_DATA,  M_NOWAIT);
479 	if (data == NULL)
480 		return (EINVAL);
481 	COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data);
482 
483 	if (crd->crd_flags & CRD_F_COMP)
484 		result = cxf->compress(data, crd->crd_len, &out);
485 	else
486 		result = cxf->decompress(data, crd->crd_len, &out);
487 
488 	FREE(data, M_CRYPTO_DATA);
489 	if (result == 0)
490 		return EINVAL;
491 
492 	/* Copy back the (de)compressed data. m_copyback is
493 	 * extending the mbuf as necessary.
494 	 */
495 	sw->sw_size = result;
496 	/* Check the compressed size when doing compression */
497 	if (crd->crd_flags & CRD_F_COMP) {
498 		if (result > crd->crd_len) {
499 			/* Compression was useless, we lost time */
500 			FREE(out, M_CRYPTO_DATA);
501 			return 0;
502 		}
503 	}
504 
505 	COPYBACK(outtype, buf, crd->crd_skip, result, out);
506 	if (result < crd->crd_len) {
507 		adj = result - crd->crd_len;
508 		if (outtype == CRYPTO_BUF_MBUF) {
509 			adj = result - crd->crd_len;
510 			m_adj((struct mbuf *)buf, adj);
511 		} else {
512 			struct uio *uio = (struct uio *)buf;
513 			int ind;
514 
515 			adj = crd->crd_len - result;
516 			ind = uio->uio_iovcnt - 1;
517 
518 			while (adj > 0 && ind >= 0) {
519 				if (adj < uio->uio_iov[ind].iov_len) {
520 					uio->uio_iov[ind].iov_len -= adj;
521 					break;
522 				}
523 
524 				adj -= uio->uio_iov[ind].iov_len;
525 				uio->uio_iov[ind].iov_len = 0;
526 				ind--;
527 				uio->uio_iovcnt--;
528 			}
529 		}
530 	}
531 	FREE(out, M_CRYPTO_DATA);
532 	return 0;
533 }
534 
535 /*
536  * Generate a new software session.
537  */
538 int
539 swcr_newsession(u_int32_t *sid, struct cryptoini *cri)
540 {
541 	struct swcr_data **swd;
542 	struct auth_hash *axf;
543 	struct enc_xform *txf;
544 	struct comp_algo *cxf;
545 	u_int32_t i;
546 	int k;
547 
548 	if (sid == NULL || cri == NULL)
549 		return EINVAL;
550 
551 	if (swcr_sessions) {
552 		for (i = 1; i < swcr_sesnum; i++)
553 			if (swcr_sessions[i] == NULL)
554 				break;
555 	}
556 
557 	if (swcr_sessions == NULL || i == swcr_sesnum) {
558 		if (swcr_sessions == NULL) {
559 			i = 1; /* We leave swcr_sessions[0] empty */
560 			swcr_sesnum = CRYPTO_SW_SESSIONS;
561 		} else
562 			swcr_sesnum *= 2;
563 
564 		swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
565 		    M_CRYPTO_DATA, M_NOWAIT);
566 		if (swd == NULL) {
567 			/* Reset session number */
568 			if (swcr_sesnum == CRYPTO_SW_SESSIONS)
569 				swcr_sesnum = 0;
570 			else
571 				swcr_sesnum /= 2;
572 			return ENOBUFS;
573 		}
574 
575 		bzero(swd, swcr_sesnum * sizeof(struct swcr_data *));
576 
577 		/* Copy existing sessions */
578 		if (swcr_sessions) {
579 			bcopy(swcr_sessions, swd,
580 			    (swcr_sesnum / 2) * sizeof(struct swcr_data *));
581 			free(swcr_sessions, M_CRYPTO_DATA);
582 		}
583 
584 		swcr_sessions = swd;
585 	}
586 
587 	swd = &swcr_sessions[i];
588 	*sid = i;
589 
590 	while (cri) {
591 		MALLOC(*swd, struct swcr_data *, sizeof(struct swcr_data),
592 		    M_CRYPTO_DATA, M_NOWAIT);
593 		if (*swd == NULL) {
594 			swcr_freesession(i);
595 			return ENOBUFS;
596 		}
597 		bzero(*swd, sizeof(struct swcr_data));
598 
599 		switch (cri->cri_alg) {
600 		case CRYPTO_DES_CBC:
601 			txf = &enc_xform_des;
602 			goto enccommon;
603 		case CRYPTO_3DES_CBC:
604 			txf = &enc_xform_3des;
605 			goto enccommon;
606 		case CRYPTO_BLF_CBC:
607 			txf = &enc_xform_blf;
608 			goto enccommon;
609 		case CRYPTO_CAST_CBC:
610 			txf = &enc_xform_cast5;
611 			goto enccommon;
612 		case CRYPTO_SKIPJACK_CBC:
613 			txf = &enc_xform_skipjack;
614 			goto enccommon;
615 		case CRYPTO_RIJNDAEL128_CBC:
616 			txf = &enc_xform_rijndael128;
617 			goto enccommon;
618 		case CRYPTO_NULL:
619 			txf = &enc_xform_null;
620 			goto enccommon;
621 		enccommon:
622 			txf->setkey(&((*swd)->sw_kschedule), cri->cri_key,
623 			    cri->cri_klen / 8);
624 			(*swd)->sw_exf = txf;
625 			break;
626 
627 		case CRYPTO_MD5_HMAC:
628 			axf = &auth_hash_hmac_md5_96;
629 			goto authcommon;
630 		case CRYPTO_SHA1_HMAC:
631 			axf = &auth_hash_hmac_sha1_96;
632 			goto authcommon;
633 		case CRYPTO_RIPEMD160_HMAC:
634 			axf = &auth_hash_hmac_ripemd_160_96;
635 			goto authcommon;
636 		case CRYPTO_SHA2_256_HMAC:
637 			axf = &auth_hash_hmac_sha2_256_96;
638 			goto authcommon;
639 		case CRYPTO_SHA2_384_HMAC:
640 			axf = &auth_hash_hmac_sha2_384_96;
641 			goto authcommon;
642 		case CRYPTO_SHA2_512_HMAC:
643 			axf = &auth_hash_hmac_sha2_512_96;
644 		authcommon:
645 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
646 			    M_NOWAIT);
647 			if ((*swd)->sw_ictx == NULL) {
648 				swcr_freesession(i);
649 				return ENOBUFS;
650 			}
651 
652 			(*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
653 			    M_NOWAIT);
654 			if ((*swd)->sw_octx == NULL) {
655 				swcr_freesession(i);
656 				return ENOBUFS;
657 			}
658 
659 			for (k = 0; k < cri->cri_klen / 8; k++)
660 				cri->cri_key[k] ^= HMAC_IPAD_VAL;
661 
662 			axf->Init((*swd)->sw_ictx);
663 			axf->Update((*swd)->sw_ictx, cri->cri_key,
664 			    cri->cri_klen / 8);
665 			axf->Update((*swd)->sw_ictx, hmac_ipad_buffer,
666 			    HMAC_BLOCK_LEN - (cri->cri_klen / 8));
667 
668 			for (k = 0; k < cri->cri_klen / 8; k++)
669 				cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
670 
671 			axf->Init((*swd)->sw_octx);
672 			axf->Update((*swd)->sw_octx, cri->cri_key,
673 			    cri->cri_klen / 8);
674 			axf->Update((*swd)->sw_octx, hmac_opad_buffer,
675 			    HMAC_BLOCK_LEN - (cri->cri_klen / 8));
676 
677 			for (k = 0; k < cri->cri_klen / 8; k++)
678 				cri->cri_key[k] ^= HMAC_OPAD_VAL;
679 			(*swd)->sw_axf = axf;
680 			break;
681 
682 		case CRYPTO_MD5_KPDK:
683 			axf = &auth_hash_key_md5;
684 			goto auth2common;
685 
686 		case CRYPTO_SHA1_KPDK:
687 			axf = &auth_hash_key_sha1;
688 		auth2common:
689 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
690 			    M_NOWAIT);
691 			if ((*swd)->sw_ictx == NULL) {
692 				swcr_freesession(i);
693 				return ENOBUFS;
694 			}
695 
696 			/* Store the key so we can "append" it to the payload */
697 			(*swd)->sw_octx = malloc(cri->cri_klen / 8, M_CRYPTO_DATA,
698 			    M_NOWAIT);
699 			if ((*swd)->sw_octx == NULL) {
700 				swcr_freesession(i);
701 				return ENOBUFS;
702 			}
703 
704 			(*swd)->sw_klen = cri->cri_klen / 8;
705 			bcopy(cri->cri_key, (*swd)->sw_octx, cri->cri_klen / 8);
706 			axf->Init((*swd)->sw_ictx);
707 			axf->Update((*swd)->sw_ictx, cri->cri_key,
708 			    cri->cri_klen / 8);
709 			axf->Final(NULL, (*swd)->sw_ictx);
710 			(*swd)->sw_axf = axf;
711 			break;
712 
713 		case CRYPTO_MD5:
714 			axf = &auth_hash_md5;
715 			goto auth3common;
716 
717 		case CRYPTO_SHA1:
718 			axf = &auth_hash_sha1;
719 		auth3common:
720 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
721 			    M_NOWAIT);
722 			if ((*swd)->sw_ictx == NULL) {
723 				swcr_freesession(i);
724 				return ENOBUFS;
725 			}
726 
727 			axf->Init((*swd)->sw_ictx);
728 			(*swd)->sw_axf = axf;
729 			break;
730 
731 		case CRYPTO_DEFLATE_COMP:
732 			cxf = &comp_algo_deflate;
733 			(*swd)->sw_cxf = cxf;
734 			break;
735 		default:
736 			swcr_freesession(i);
737 			return EINVAL;
738 		}
739 
740 		(*swd)->sw_alg = cri->cri_alg;
741 		cri = cri->cri_next;
742 		swd = &((*swd)->sw_next);
743 	}
744 	return 0;
745 }
746 
747 /*
748  * Free a session.
749  */
750 int
751 swcr_freesession(u_int64_t tid)
752 {
753 	struct swcr_data *swd;
754 	struct enc_xform *txf;
755 	struct auth_hash *axf;
756 	u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
757 
758 	if (sid > swcr_sesnum || swcr_sessions == NULL ||
759 	    swcr_sessions[sid] == NULL)
760 		return EINVAL;
761 
762 	/* Silently accept and return */
763 	if (sid == 0)
764 		return 0;
765 
766 	while ((swd = swcr_sessions[sid]) != NULL) {
767 		swcr_sessions[sid] = swd->sw_next;
768 
769 		switch (swd->sw_alg) {
770 		case CRYPTO_DES_CBC:
771 		case CRYPTO_3DES_CBC:
772 		case CRYPTO_BLF_CBC:
773 		case CRYPTO_CAST_CBC:
774 		case CRYPTO_SKIPJACK_CBC:
775 		case CRYPTO_RIJNDAEL128_CBC:
776 		case CRYPTO_NULL:
777 			txf = swd->sw_exf;
778 
779 			if (swd->sw_kschedule)
780 				txf->zerokey(&(swd->sw_kschedule));
781 			break;
782 
783 		case CRYPTO_MD5_HMAC:
784 		case CRYPTO_SHA1_HMAC:
785 		case CRYPTO_RIPEMD160_HMAC:
786 			axf = swd->sw_axf;
787 
788 			if (swd->sw_ictx) {
789 				bzero(swd->sw_ictx, axf->ctxsize);
790 				free(swd->sw_ictx, M_CRYPTO_DATA);
791 			}
792 			if (swd->sw_octx) {
793 				bzero(swd->sw_octx, axf->ctxsize);
794 				free(swd->sw_octx, M_CRYPTO_DATA);
795 			}
796 			break;
797 
798 		case CRYPTO_MD5_KPDK:
799 		case CRYPTO_SHA1_KPDK:
800 			axf = swd->sw_axf;
801 
802 			if (swd->sw_ictx) {
803 				bzero(swd->sw_ictx, axf->ctxsize);
804 				free(swd->sw_ictx, M_CRYPTO_DATA);
805 			}
806 			if (swd->sw_octx) {
807 				bzero(swd->sw_octx, swd->sw_klen);
808 				free(swd->sw_octx, M_CRYPTO_DATA);
809 			}
810 			break;
811 
812 		case CRYPTO_MD5:
813 		case CRYPTO_SHA1:
814 			axf = swd->sw_axf;
815 
816 			if (swd->sw_ictx)
817 				free(swd->sw_ictx, M_CRYPTO_DATA);
818 			break;
819 		}
820 
821 		FREE(swd, M_CRYPTO_DATA);
822 	}
823 	return 0;
824 }
825 
826 /*
827  * Process a software request.
828  */
829 int
830 swcr_process(struct cryptop *crp)
831 {
832 	struct cryptodesc *crd;
833 	struct swcr_data *sw;
834 	u_int32_t lid;
835 	int type;
836 
837 	/* Sanity check */
838 	if (crp == NULL)
839 		return EINVAL;
840 
841 	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
842 		crp->crp_etype = EINVAL;
843 		goto done;
844 	}
845 
846 	lid = crp->crp_sid & 0xffffffff;
847 	if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
848 		crp->crp_etype = ENOENT;
849 		goto done;
850 	}
851 
852 	if (crp->crp_flags & CRYPTO_F_IMBUF)
853 		type = CRYPTO_BUF_MBUF;
854 	else
855 		type = CRYPTO_BUF_IOV;
856 
857 	/* Go through crypto descriptors, processing as we go */
858 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
859 		/*
860 		 * Find the crypto context.
861 		 *
862 		 * XXX Note that the logic here prevents us from having
863 		 * XXX the same algorithm multiple times in a session
864 		 * XXX (or rather, we can but it won't give us the right
865 		 * XXX results). To do that, we'd need some way of differentiating
866 		 * XXX between the various instances of an algorithm (so we can
867 		 * XXX locate the correct crypto context).
868 		 */
869 		for (sw = swcr_sessions[lid];
870 		    sw && sw->sw_alg != crd->crd_alg;
871 		    sw = sw->sw_next)
872 			;
873 
874 		/* No such context ? */
875 		if (sw == NULL) {
876 			crp->crp_etype = EINVAL;
877 			goto done;
878 		}
879 
880 		switch (sw->sw_alg) {
881 		case CRYPTO_DES_CBC:
882 		case CRYPTO_3DES_CBC:
883 		case CRYPTO_BLF_CBC:
884 		case CRYPTO_CAST_CBC:
885 		case CRYPTO_SKIPJACK_CBC:
886 		case CRYPTO_RIJNDAEL128_CBC:
887 		case CRYPTO_NULL:
888 			if ((crp->crp_etype = swcr_encdec(crd, sw,
889 			    crp->crp_buf, type)) != 0)
890 				goto done;
891 			break;
892 		case CRYPTO_MD5_HMAC:
893 		case CRYPTO_SHA1_HMAC:
894 		case CRYPTO_RIPEMD160_HMAC:
895 		case CRYPTO_SHA2_256_HMAC:
896 		case CRYPTO_SHA2_384_HMAC:
897 		case CRYPTO_SHA2_512_HMAC:
898 		case CRYPTO_MD5_KPDK:
899 		case CRYPTO_SHA1_KPDK:
900 		case CRYPTO_MD5:
901 		case CRYPTO_SHA1:
902 			if ((crp->crp_etype = swcr_authcompute(crp, crd, sw,
903 			    crp->crp_buf, type)) != 0)
904 				goto done;
905 			break;
906 
907 		case CRYPTO_DEFLATE_COMP:
908 			if ((crp->crp_etype = swcr_compdec(crd, sw,
909 			    crp->crp_buf, type)) != 0)
910 				goto done;
911 			else
912 				crp->crp_olen = (int)sw->sw_size;
913 			break;
914 
915 		default:
916 			/* Unknown/unsupported algorithm */
917 			crp->crp_etype = EINVAL;
918 			goto done;
919 		}
920 	}
921 
922 done:
923 	crypto_done(crp);
924 	return 0;
925 }
926 
927 /*
928  * Initialize the driver, called from the kernel main().
929  */
930 void
931 swcr_init(void)
932 {
933 	int algs[CRYPTO_ALGORITHM_MAX + 1];
934 	int flags = CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_ENCRYPT_MAC |
935 	    CRYPTOCAP_F_MAC_ENCRYPT;
936 
937 	swcr_id = crypto_get_driverid(flags);
938 	if (swcr_id < 0) {
939 		/* This should never happen */
940 		panic("Software crypto device cannot initialize!");
941 	}
942 
943 	bzero(algs, sizeof(algs));
944 
945 	algs[CRYPTO_DES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
946 	algs[CRYPTO_3DES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
947 	algs[CRYPTO_BLF_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
948 	algs[CRYPTO_CAST_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
949 	algs[CRYPTO_SKIPJACK_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
950 	algs[CRYPTO_MD5_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
951 	algs[CRYPTO_SHA1_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
952 	algs[CRYPTO_RIPEMD160_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
953 	algs[CRYPTO_MD5_KPDK] = CRYPTO_ALG_FLAG_SUPPORTED;
954 	algs[CRYPTO_SHA1_KPDK] = CRYPTO_ALG_FLAG_SUPPORTED;
955 	algs[CRYPTO_MD5] = CRYPTO_ALG_FLAG_SUPPORTED;
956 	algs[CRYPTO_SHA1] = CRYPTO_ALG_FLAG_SUPPORTED;
957 	algs[CRYPTO_RIJNDAEL128_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
958 	algs[CRYPTO_DEFLATE_COMP] = CRYPTO_ALG_FLAG_SUPPORTED;
959 	algs[CRYPTO_NULL] = CRYPTO_ALG_FLAG_SUPPORTED;
960 	algs[CRYPTO_SHA2_256_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
961 	algs[CRYPTO_SHA2_384_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
962 	algs[CRYPTO_SHA2_512_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
963 
964 	crypto_register(swcr_id, algs, swcr_newsession,
965 	    swcr_freesession, swcr_process);
966 }
967