xref: /openbsd-src/sys/crypto/cryptosoft.c (revision b33a8d55775a3013157d6b0e3602c1fe178b48a6)
1 /*	$OpenBSD: cryptosoft.c,v 1.74 2015/08/31 18:13:27 deraadt Exp $	*/
2 
3 /*
4  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5  *
6  * This code was written by Angelos D. Keromytis in Athens, Greece, in
7  * February 2000. Network Security Technologies Inc. (NSTI) kindly
8  * supported the development of this code.
9  *
10  * Copyright (c) 2000, 2001 Angelos D. Keromytis
11  *
12  * Permission to use, copy, and modify this software with or without fee
13  * is hereby granted, provided that this entire notice is included in
14  * all source code copies of any software which is or includes a copy or
15  * modification of this software.
16  *
17  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
18  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
19  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
20  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
21  * PURPOSE.
22  */
23 
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/malloc.h>
27 #include <sys/mbuf.h>
28 #include <sys/errno.h>
29 #include <dev/rndvar.h>
30 #include <crypto/md5.h>
31 #include <crypto/sha1.h>
32 #include <crypto/rmd160.h>
33 #include <crypto/cast.h>
34 #include <crypto/cryptodev.h>
35 #include <crypto/cryptosoft.h>
36 #include <crypto/xform.h>
37 
38 const u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN] = {
39 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
40 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
41 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
42 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
43 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
44 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
45 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
46 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
47 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
48 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
49 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
50 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
51 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
52 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
53 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
54 	0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36
55 };
56 
57 const u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN] = {
58 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
59 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
60 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
61 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
62 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
63 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
64 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
65 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
66 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
67 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
68 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
69 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
70 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
71 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
72 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
73 	0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C
74 };
75 
76 
77 struct swcr_data **swcr_sessions = NULL;
78 u_int32_t swcr_sesnum = 0;
79 int32_t swcr_id = -1;
80 
81 #define COPYBACK(x, a, b, c, d) \
82 	do { \
83 		if ((x) == CRYPTO_BUF_MBUF) \
84 			m_copyback((struct mbuf *)a,b,c,d,M_NOWAIT); \
85 		else \
86 			cuio_copyback((struct uio *)a,b,c,d); \
87 	} while (0)
88 #define COPYDATA(x, a, b, c, d) \
89 	do { \
90 		if ((x) == CRYPTO_BUF_MBUF) \
91 			m_copydata((struct mbuf *)a,b,c,d); \
92 		else \
93 			cuio_copydata((struct uio *)a,b,c,d); \
94 	} while (0)
95 
96 /*
97  * Apply a symmetric encryption/decryption algorithm.
98  */
99 int
100 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
101     int outtype)
102 {
103 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
104 	unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
105 	struct enc_xform *exf;
106 	int i, k, j, blks, ind, count, ivlen;
107 	struct mbuf *m = NULL;
108 	struct uio *uio = NULL;
109 
110 	exf = sw->sw_exf;
111 	blks = exf->blocksize;
112 	ivlen = exf->ivsize;
113 
114 	/* Check for non-padded data */
115 	if (crd->crd_len % blks)
116 		return EINVAL;
117 
118 	if (outtype == CRYPTO_BUF_MBUF)
119 		m = (struct mbuf *) buf;
120 	else
121 		uio = (struct uio *) buf;
122 
123 	/* Initialize the IV */
124 	if (crd->crd_flags & CRD_F_ENCRYPT) {
125 		/* IV explicitly provided ? */
126 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
127 			bcopy(crd->crd_iv, iv, ivlen);
128 		else
129 			arc4random_buf(iv, ivlen);
130 
131 		/* Do we need to write the IV */
132 		if (!(crd->crd_flags & CRD_F_IV_PRESENT))
133 			COPYBACK(outtype, buf, crd->crd_inject, ivlen, iv);
134 
135 	} else {	/* Decryption */
136 			/* IV explicitly provided ? */
137 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
138 			bcopy(crd->crd_iv, iv, ivlen);
139 		else {
140 			/* Get IV off buf */
141 			COPYDATA(outtype, buf, crd->crd_inject, ivlen, iv);
142 		}
143 	}
144 
145 	ivp = iv;
146 
147 	/*
148 	 * xforms that provide a reinit method perform all IV
149 	 * handling themselves.
150 	 */
151 	if (exf->reinit)
152 		exf->reinit(sw->sw_kschedule, iv);
153 
154 	if (outtype == CRYPTO_BUF_MBUF) {
155 		/* Find beginning of data */
156 		m = m_getptr(m, crd->crd_skip, &k);
157 		if (m == NULL)
158 			return EINVAL;
159 
160 		i = crd->crd_len;
161 
162 		while (i > 0) {
163 			/*
164 			 * If there's insufficient data at the end of
165 			 * an mbuf, we have to do some copying.
166 			 */
167 			if (m->m_len < k + blks && m->m_len != k) {
168 				m_copydata(m, k, blks, blk);
169 
170 				/* Actual encryption/decryption */
171 				if (exf->reinit) {
172 					if (crd->crd_flags & CRD_F_ENCRYPT) {
173 						exf->encrypt(sw->sw_kschedule,
174 						    blk);
175 					} else {
176 						exf->decrypt(sw->sw_kschedule,
177 						    blk);
178 					}
179 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
180 					/* XOR with previous block */
181 					for (j = 0; j < blks; j++)
182 						blk[j] ^= ivp[j];
183 
184 					exf->encrypt(sw->sw_kschedule, blk);
185 
186 					/*
187 					 * Keep encrypted block for XOR'ing
188 					 * with next block
189 					 */
190 					bcopy(blk, iv, blks);
191 					ivp = iv;
192 				} else {	/* decrypt */
193 					/*
194 					 * Keep encrypted block for XOR'ing
195 					 * with next block
196 					 */
197 					nivp = (ivp == iv) ? iv2 : iv;
198 					bcopy(blk, nivp, blks);
199 
200 					exf->decrypt(sw->sw_kschedule, blk);
201 
202 					/* XOR with previous block */
203 					for (j = 0; j < blks; j++)
204 						blk[j] ^= ivp[j];
205 					ivp = nivp;
206 				}
207 
208 				/* Copy back decrypted block */
209 				m_copyback(m, k, blks, blk, M_NOWAIT);
210 
211 				/* Advance pointer */
212 				m = m_getptr(m, k + blks, &k);
213 				if (m == NULL)
214 					return EINVAL;
215 
216 				i -= blks;
217 
218 				/* Could be done... */
219 				if (i == 0)
220 					break;
221 			}
222 
223 			/* Skip possibly empty mbufs */
224 			if (k == m->m_len) {
225 				for (m = m->m_next; m && m->m_len == 0;
226 				    m = m->m_next)
227 					;
228 				k = 0;
229 			}
230 
231 			/* Sanity check */
232 			if (m == NULL)
233 				return EINVAL;
234 
235 			/*
236 			 * Warning: idat may point to garbage here, but
237 			 * we only use it in the while() loop, only if
238 			 * there are indeed enough data.
239 			 */
240 			idat = mtod(m, unsigned char *) + k;
241 
242 			while (m->m_len >= k + blks && i > 0) {
243 				if (exf->reinit) {
244 					if (crd->crd_flags & CRD_F_ENCRYPT) {
245 						exf->encrypt(sw->sw_kschedule,
246 						    idat);
247 					} else {
248 						exf->decrypt(sw->sw_kschedule,
249 						    idat);
250 					}
251 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
252 					/* XOR with previous block/IV */
253 					for (j = 0; j < blks; j++)
254 						idat[j] ^= ivp[j];
255 
256 					exf->encrypt(sw->sw_kschedule, idat);
257 					ivp = idat;
258 				} else {	/* decrypt */
259 					/*
260 					 * Keep encrypted block to be used
261 					 * in next block's processing.
262 					 */
263 					nivp = (ivp == iv) ? iv2 : iv;
264 					bcopy(idat, nivp, blks);
265 
266 					exf->decrypt(sw->sw_kschedule, idat);
267 
268 					/* XOR with previous block/IV */
269 					for (j = 0; j < blks; j++)
270 						idat[j] ^= ivp[j];
271 					ivp = nivp;
272 				}
273 
274 				idat += blks;
275 				k += blks;
276 				i -= blks;
277 			}
278 		}
279 	} else {
280 		/* Find beginning of data */
281 		count = crd->crd_skip;
282 		ind = cuio_getptr(uio, count, &k);
283 		if (ind == -1)
284 			return EINVAL;
285 
286 		i = crd->crd_len;
287 
288 		while (i > 0) {
289 			/*
290 			 * If there's insufficient data at the end,
291 			 * we have to do some copying.
292 			 */
293 			if (uio->uio_iov[ind].iov_len < k + blks &&
294 			    uio->uio_iov[ind].iov_len != k) {
295 				cuio_copydata(uio, count, blks, blk);
296 
297 				/* Actual encryption/decryption */
298 				if (exf->reinit) {
299 					if (crd->crd_flags & CRD_F_ENCRYPT) {
300 						exf->encrypt(sw->sw_kschedule,
301 						    blk);
302 					} else {
303 						exf->decrypt(sw->sw_kschedule,
304 						    blk);
305 					}
306 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
307 					/* XOR with previous block */
308 					for (j = 0; j < blks; j++)
309 						blk[j] ^= ivp[j];
310 
311 					exf->encrypt(sw->sw_kschedule, blk);
312 
313 					/*
314 					 * Keep encrypted block for XOR'ing
315 					 * with next block
316 					 */
317 					bcopy(blk, iv, blks);
318 					ivp = iv;
319 				} else {	/* decrypt */
320 					/*
321 					 * Keep encrypted block for XOR'ing
322 					 * with next block
323 					 */
324 					nivp = (ivp == iv) ? iv2 : iv;
325 					bcopy(blk, nivp, blks);
326 
327 					exf->decrypt(sw->sw_kschedule, blk);
328 
329 					/* XOR with previous block */
330 					for (j = 0; j < blks; j++)
331 						blk[j] ^= ivp[j];
332 					ivp = nivp;
333 				}
334 
335 				/* Copy back decrypted block */
336 				cuio_copyback(uio, count, blks, blk);
337 
338 				count += blks;
339 
340 				/* Advance pointer */
341 				ind = cuio_getptr(uio, count, &k);
342 				if (ind == -1)
343 					return (EINVAL);
344 
345 				i -= blks;
346 
347 				/* Could be done... */
348 				if (i == 0)
349 					break;
350 			}
351 
352 			/*
353 			 * Warning: idat may point to garbage here, but
354 			 * we only use it in the while() loop, only if
355 			 * there are indeed enough data.
356 			 */
357 			idat = (char *)uio->uio_iov[ind].iov_base + k;
358 
359 			while (uio->uio_iov[ind].iov_len >= k + blks &&
360 			    i > 0) {
361 				if (exf->reinit) {
362 					if (crd->crd_flags & CRD_F_ENCRYPT) {
363 						exf->encrypt(sw->sw_kschedule,
364 						    idat);
365 					} else {
366 						exf->decrypt(sw->sw_kschedule,
367 						    idat);
368 					}
369 				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
370 					/* XOR with previous block/IV */
371 					for (j = 0; j < blks; j++)
372 						idat[j] ^= ivp[j];
373 
374 					exf->encrypt(sw->sw_kschedule, idat);
375 					ivp = idat;
376 				} else {	/* decrypt */
377 					/*
378 					 * Keep encrypted block to be used
379 					 * in next block's processing.
380 					 */
381 					nivp = (ivp == iv) ? iv2 : iv;
382 					bcopy(idat, nivp, blks);
383 
384 					exf->decrypt(sw->sw_kschedule, idat);
385 
386 					/* XOR with previous block/IV */
387 					for (j = 0; j < blks; j++)
388 						idat[j] ^= ivp[j];
389 					ivp = nivp;
390 				}
391 
392 				idat += blks;
393 				count += blks;
394 				k += blks;
395 				i -= blks;
396 			}
397 
398 			/*
399 			 * Advance to the next iov if the end of the current iov
400 			 * is aligned with the end of a cipher block.
401 			 * Note that the code is equivalent to calling:
402 			 *	ind = cuio_getptr(uio, count, &k);
403 			 */
404 			if (i > 0 && k == uio->uio_iov[ind].iov_len) {
405 				k = 0;
406 				ind++;
407 				if (ind >= uio->uio_iovcnt)
408 					return (EINVAL);
409 			}
410 		}
411 	}
412 
413 	return 0; /* Done with encryption/decryption */
414 }
415 
416 /*
417  * Compute keyed-hash authenticator.
418  */
419 int
420 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
421     struct swcr_data *sw, caddr_t buf, int outtype)
422 {
423 	unsigned char aalg[AALG_MAX_RESULT_LEN];
424 	struct auth_hash *axf;
425 	union authctx ctx;
426 	int err;
427 
428 	if (sw->sw_ictx == 0)
429 		return EINVAL;
430 
431 	axf = sw->sw_axf;
432 
433 	bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
434 
435 	if (outtype == CRYPTO_BUF_MBUF)
436 		err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len,
437 		    (int (*)(caddr_t, caddr_t, unsigned int)) axf->Update,
438 		    (caddr_t) &ctx);
439 	else
440 		err = cuio_apply((struct uio *) buf, crd->crd_skip,
441 		    crd->crd_len,
442 		    (int (*)(caddr_t, caddr_t, unsigned int)) axf->Update,
443 		    (caddr_t) &ctx);
444 
445 	if (err)
446 		return err;
447 
448 	if (crd->crd_flags & CRD_F_ESN)
449 		axf->Update(&ctx, crd->crd_esn, 4);
450 
451 	switch (sw->sw_alg) {
452 	case CRYPTO_MD5_HMAC:
453 	case CRYPTO_SHA1_HMAC:
454 	case CRYPTO_RIPEMD160_HMAC:
455 	case CRYPTO_SHA2_256_HMAC:
456 	case CRYPTO_SHA2_384_HMAC:
457 	case CRYPTO_SHA2_512_HMAC:
458 		if (sw->sw_octx == NULL)
459 			return EINVAL;
460 
461 		axf->Final(aalg, &ctx);
462 		bcopy(sw->sw_octx, &ctx, axf->ctxsize);
463 		axf->Update(&ctx, aalg, axf->hashsize);
464 		axf->Final(aalg, &ctx);
465 		break;
466 
467 	case CRYPTO_MD5:
468 	case CRYPTO_SHA1:
469 		axf->Final(aalg, &ctx);
470 		break;
471 	}
472 
473 	/* Inject the authentication data */
474 	if (outtype == CRYPTO_BUF_MBUF)
475 		COPYBACK(outtype, buf, crd->crd_inject, axf->authsize, aalg);
476 	else
477 		bcopy(aalg, crp->crp_mac, axf->authsize);
478 
479 	return 0;
480 }
481 
482 /*
483  * Apply a combined encryption-authentication transformation
484  */
485 int
486 swcr_authenc(struct cryptop *crp)
487 {
488 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
489 	u_char *blk = (u_char *)blkbuf;
490 	u_char aalg[AALG_MAX_RESULT_LEN];
491 	u_char iv[EALG_MAX_BLOCK_LEN];
492 	union authctx ctx;
493 	struct cryptodesc *crd, *crda = NULL, *crde = NULL;
494 	struct swcr_data *sw, *swa, *swe = NULL;
495 	struct auth_hash *axf = NULL;
496 	struct enc_xform *exf = NULL;
497 	struct mbuf *m = NULL;
498 	struct uio *uio = NULL;
499 	caddr_t buf = (caddr_t)crp->crp_buf;
500 	uint32_t *blkp;
501 	int aadlen, blksz, i, ivlen, outtype, len, iskip, oskip;
502 
503 	ivlen = blksz = iskip = oskip = 0;
504 
505 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
506 		for (sw = swcr_sessions[crp->crp_sid & 0xffffffff];
507 		     sw && sw->sw_alg != crd->crd_alg;
508 		     sw = sw->sw_next)
509 			;
510 		if (sw == NULL)
511 			return (EINVAL);
512 
513 		switch (sw->sw_alg) {
514 		case CRYPTO_AES_GCM_16:
515 		case CRYPTO_AES_GMAC:
516 			swe = sw;
517 			crde = crd;
518 			exf = swe->sw_exf;
519 			ivlen = exf->ivsize;
520 			break;
521 		case CRYPTO_AES_128_GMAC:
522 		case CRYPTO_AES_192_GMAC:
523 		case CRYPTO_AES_256_GMAC:
524 			swa = sw;
525 			crda = crd;
526 			axf = swa->sw_axf;
527 			if (swa->sw_ictx == 0)
528 				return (EINVAL);
529 			bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
530 			blksz = axf->blocksize;
531 			break;
532 		default:
533 			return (EINVAL);
534 		}
535 	}
536 	if (crde == NULL || crda == NULL)
537 		return (EINVAL);
538 
539 	if (crp->crp_flags & CRYPTO_F_IMBUF) {
540 		outtype = CRYPTO_BUF_MBUF;
541 		m = (struct mbuf *)buf;
542 	} else {
543 		outtype = CRYPTO_BUF_IOV;
544 		uio = (struct uio *)buf;
545 	}
546 
547 	/* Initialize the IV */
548 	if (crde->crd_flags & CRD_F_ENCRYPT) {
549 		/* IV explicitly provided ? */
550 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
551 			bcopy(crde->crd_iv, iv, ivlen);
552 		else
553 			arc4random_buf(iv, ivlen);
554 
555 		/* Do we need to write the IV */
556 		if (!(crde->crd_flags & CRD_F_IV_PRESENT))
557 			COPYBACK(outtype, buf, crde->crd_inject, ivlen, iv);
558 
559 	} else {	/* Decryption */
560 			/* IV explicitly provided ? */
561 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
562 			bcopy(crde->crd_iv, iv, ivlen);
563 		else {
564 			/* Get IV off buf */
565 			COPYDATA(outtype, buf, crde->crd_inject, ivlen, iv);
566 		}
567 	}
568 
569 	/* Supply MAC with IV */
570 	if (axf->Reinit)
571 		axf->Reinit(&ctx, iv, ivlen);
572 
573 	/* Supply MAC with AAD */
574 	aadlen = crda->crd_len;
575 	/*
576 	 * Section 5 of RFC 4106 specifies that AAD construction consists of
577 	 * {SPI, ESN, SN} whereas the real packet contains only {SPI, SN}.
578 	 * Unfortunately it doesn't follow a good example set in the Section
579 	 * 3.3.2.1 of RFC 4303 where upper part of the ESN, located in the
580 	 * external (to the packet) memory buffer, is processed by the hash
581 	 * function in the end thus allowing to retain simple programming
582 	 * interfaces and avoid kludges like the one below.
583 	 */
584 	if (crda->crd_flags & CRD_F_ESN) {
585 		aadlen += 4;
586 		/* SPI */
587 		COPYDATA(outtype, buf, crda->crd_skip, 4, blk);
588 		iskip = 4; /* loop below will start with an offset of 4 */
589 		/* ESN */
590 		bcopy(crda->crd_esn, blk + 4, 4);
591 		oskip = iskip + 4; /* offset output buffer blk by 8 */
592 	}
593 	for (i = iskip; i < crda->crd_len; i += blksz) {
594 		len = MIN(crda->crd_len - i, blksz - oskip);
595 		COPYDATA(outtype, buf, crda->crd_skip + i, len, blk + oskip);
596 		bzero(blk + len + oskip, blksz - len - oskip);
597 		axf->Update(&ctx, blk, blksz);
598 		oskip = 0; /* reset initial output offset */
599 	}
600 
601 	if (exf->reinit)
602 		exf->reinit(swe->sw_kschedule, iv);
603 
604 	/* Do encryption/decryption with MAC */
605 	for (i = 0; i < crde->crd_len; i += blksz) {
606 		len = MIN(crde->crd_len - i, blksz);
607 		if (len < blksz)
608 			bzero(blk, blksz);
609 		COPYDATA(outtype, buf, crde->crd_skip + i, len, blk);
610 		if (crde->crd_flags & CRD_F_ENCRYPT) {
611 			exf->encrypt(swe->sw_kschedule, blk);
612 			axf->Update(&ctx, blk, len);
613 		} else {
614 			axf->Update(&ctx, blk, len);
615 			exf->decrypt(swe->sw_kschedule, blk);
616 		}
617 		COPYBACK(outtype, buf, crde->crd_skip + i, len, blk);
618 	}
619 
620 	/* Do any required special finalization */
621 	switch (crda->crd_alg) {
622 		case CRYPTO_AES_128_GMAC:
623 		case CRYPTO_AES_192_GMAC:
624 		case CRYPTO_AES_256_GMAC:
625 			/* length block */
626 			bzero(blk, blksz);
627 			blkp = (uint32_t *)blk + 1;
628 			*blkp = htobe32(aadlen * 8);
629 			blkp = (uint32_t *)blk + 3;
630 			*blkp = htobe32(crde->crd_len * 8);
631 			axf->Update(&ctx, blk, blksz);
632 			break;
633 	}
634 
635 	/* Finalize MAC */
636 	axf->Final(aalg, &ctx);
637 
638 	/* Inject the authentication data */
639 	if (outtype == CRYPTO_BUF_MBUF)
640 		COPYBACK(outtype, buf, crda->crd_inject, axf->authsize, aalg);
641 	else
642 		bcopy(aalg, crp->crp_mac, axf->authsize);
643 
644 	return (0);
645 }
646 
647 /*
648  * Apply a compression/decompression algorithm
649  */
650 int
651 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
652     caddr_t buf, int outtype)
653 {
654 	u_int8_t *data, *out;
655 	struct comp_algo *cxf;
656 	int adj;
657 	u_int32_t result;
658 
659 	cxf = sw->sw_cxf;
660 
661 	/* We must handle the whole buffer of data in one time
662 	 * then if there is not all the data in the mbuf, we must
663 	 * copy in a buffer.
664 	 */
665 
666 	data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
667 	if (data == NULL)
668 		return (EINVAL);
669 	COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data);
670 
671 	if (crd->crd_flags & CRD_F_COMP)
672 		result = cxf->compress(data, crd->crd_len, &out);
673 	else
674 		result = cxf->decompress(data, crd->crd_len, &out);
675 
676 	free(data, M_CRYPTO_DATA, crd->crd_len);
677 	if (result == 0)
678 		return EINVAL;
679 
680 	/* Copy back the (de)compressed data. m_copyback is
681 	 * extending the mbuf as necessary.
682 	 */
683 	sw->sw_size = result;
684 	/* Check the compressed size when doing compression */
685 	if (crd->crd_flags & CRD_F_COMP) {
686 		if (result > crd->crd_len) {
687 			/* Compression was useless, we lost time */
688 			free(out, M_CRYPTO_DATA, 0);
689 			return 0;
690 		}
691 	}
692 
693 	COPYBACK(outtype, buf, crd->crd_skip, result, out);
694 	if (result < crd->crd_len) {
695 		adj = result - crd->crd_len;
696 		if (outtype == CRYPTO_BUF_MBUF) {
697 			adj = result - crd->crd_len;
698 			m_adj((struct mbuf *)buf, adj);
699 		} else {
700 			struct uio *uio = (struct uio *)buf;
701 			int ind;
702 
703 			adj = crd->crd_len - result;
704 			ind = uio->uio_iovcnt - 1;
705 
706 			while (adj > 0 && ind >= 0) {
707 				if (adj < uio->uio_iov[ind].iov_len) {
708 					uio->uio_iov[ind].iov_len -= adj;
709 					break;
710 				}
711 
712 				adj -= uio->uio_iov[ind].iov_len;
713 				uio->uio_iov[ind].iov_len = 0;
714 				ind--;
715 				uio->uio_iovcnt--;
716 			}
717 		}
718 	}
719 	free(out, M_CRYPTO_DATA, 0);
720 	return 0;
721 }
722 
723 /*
724  * Generate a new software session.
725  */
726 int
727 swcr_newsession(u_int32_t *sid, struct cryptoini *cri)
728 {
729 	struct swcr_data **swd;
730 	struct auth_hash *axf;
731 	struct enc_xform *txf;
732 	struct comp_algo *cxf;
733 	u_int32_t i;
734 	int k;
735 
736 	if (sid == NULL || cri == NULL)
737 		return EINVAL;
738 
739 	if (swcr_sessions) {
740 		for (i = 1; i < swcr_sesnum; i++)
741 			if (swcr_sessions[i] == NULL)
742 				break;
743 	}
744 
745 	if (swcr_sessions == NULL || i == swcr_sesnum) {
746 		if (swcr_sessions == NULL) {
747 			i = 1; /* We leave swcr_sessions[0] empty */
748 			swcr_sesnum = CRYPTO_SW_SESSIONS;
749 		} else
750 			swcr_sesnum *= 2;
751 
752 		swd = mallocarray(swcr_sesnum, sizeof(struct swcr_data *),
753 		    M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
754 		if (swd == NULL) {
755 			/* Reset session number */
756 			if (swcr_sesnum == CRYPTO_SW_SESSIONS)
757 				swcr_sesnum = 0;
758 			else
759 				swcr_sesnum /= 2;
760 			return ENOBUFS;
761 		}
762 
763 		/* Copy existing sessions */
764 		if (swcr_sessions) {
765 			bcopy(swcr_sessions, swd,
766 			    (swcr_sesnum / 2) * sizeof(struct swcr_data *));
767 			free(swcr_sessions, M_CRYPTO_DATA,
768 			    (swcr_sesnum / 2) * sizeof(struct swcr_data *));
769 		}
770 
771 		swcr_sessions = swd;
772 	}
773 
774 	swd = &swcr_sessions[i];
775 	*sid = i;
776 
777 	while (cri) {
778 		*swd = malloc(sizeof(struct swcr_data), M_CRYPTO_DATA,
779 		    M_NOWAIT | M_ZERO);
780 		if (*swd == NULL) {
781 			swcr_freesession(i);
782 			return ENOBUFS;
783 		}
784 
785 		switch (cri->cri_alg) {
786 		case CRYPTO_DES_CBC:
787 			txf = &enc_xform_des;
788 			goto enccommon;
789 		case CRYPTO_3DES_CBC:
790 			txf = &enc_xform_3des;
791 			goto enccommon;
792 		case CRYPTO_BLF_CBC:
793 			txf = &enc_xform_blf;
794 			goto enccommon;
795 		case CRYPTO_CAST_CBC:
796 			txf = &enc_xform_cast5;
797 			goto enccommon;
798 		case CRYPTO_RIJNDAEL128_CBC:
799 			txf = &enc_xform_rijndael128;
800 			goto enccommon;
801 		case CRYPTO_AES_CTR:
802 			txf = &enc_xform_aes_ctr;
803 			goto enccommon;
804 		case CRYPTO_AES_XTS:
805 			txf = &enc_xform_aes_xts;
806 			goto enccommon;
807 		case CRYPTO_AES_GCM_16:
808 			txf = &enc_xform_aes_gcm;
809 			goto enccommon;
810 		case CRYPTO_AES_GMAC:
811 			txf = &enc_xform_aes_gmac;
812 			(*swd)->sw_exf = txf;
813 			break;
814 		case CRYPTO_NULL:
815 			txf = &enc_xform_null;
816 			goto enccommon;
817 		enccommon:
818 			if (txf->ctxsize > 0) {
819 				(*swd)->sw_kschedule = malloc(txf->ctxsize,
820 				    M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
821 				if ((*swd)->sw_kschedule == NULL) {
822 					swcr_freesession(i);
823 					return EINVAL;
824 				}
825 			}
826 			if (txf->setkey((*swd)->sw_kschedule, cri->cri_key,
827 			    cri->cri_klen / 8) < 0) {
828 				swcr_freesession(i);
829 				return EINVAL;
830 			}
831 			(*swd)->sw_exf = txf;
832 			break;
833 
834 		case CRYPTO_MD5_HMAC:
835 			axf = &auth_hash_hmac_md5_96;
836 			goto authcommon;
837 		case CRYPTO_SHA1_HMAC:
838 			axf = &auth_hash_hmac_sha1_96;
839 			goto authcommon;
840 		case CRYPTO_RIPEMD160_HMAC:
841 			axf = &auth_hash_hmac_ripemd_160_96;
842 			goto authcommon;
843 		case CRYPTO_SHA2_256_HMAC:
844 			axf = &auth_hash_hmac_sha2_256_128;
845 			goto authcommon;
846 		case CRYPTO_SHA2_384_HMAC:
847 			axf = &auth_hash_hmac_sha2_384_192;
848 			goto authcommon;
849 		case CRYPTO_SHA2_512_HMAC:
850 			axf = &auth_hash_hmac_sha2_512_256;
851 		authcommon:
852 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
853 			    M_NOWAIT);
854 			if ((*swd)->sw_ictx == NULL) {
855 				swcr_freesession(i);
856 				return ENOBUFS;
857 			}
858 
859 			(*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
860 			    M_NOWAIT);
861 			if ((*swd)->sw_octx == NULL) {
862 				swcr_freesession(i);
863 				return ENOBUFS;
864 			}
865 
866 			for (k = 0; k < cri->cri_klen / 8; k++)
867 				cri->cri_key[k] ^= HMAC_IPAD_VAL;
868 
869 			axf->Init((*swd)->sw_ictx);
870 			axf->Update((*swd)->sw_ictx, cri->cri_key,
871 			    cri->cri_klen / 8);
872 			axf->Update((*swd)->sw_ictx, hmac_ipad_buffer,
873 			    axf->blocksize - (cri->cri_klen / 8));
874 
875 			for (k = 0; k < cri->cri_klen / 8; k++)
876 				cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
877 
878 			axf->Init((*swd)->sw_octx);
879 			axf->Update((*swd)->sw_octx, cri->cri_key,
880 			    cri->cri_klen / 8);
881 			axf->Update((*swd)->sw_octx, hmac_opad_buffer,
882 			    axf->blocksize - (cri->cri_klen / 8));
883 
884 			for (k = 0; k < cri->cri_klen / 8; k++)
885 				cri->cri_key[k] ^= HMAC_OPAD_VAL;
886 			(*swd)->sw_axf = axf;
887 			break;
888 
889 		case CRYPTO_MD5:
890 			axf = &auth_hash_md5;
891 			goto auth3common;
892 
893 		case CRYPTO_SHA1:
894 			axf = &auth_hash_sha1;
895 		auth3common:
896 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
897 			    M_NOWAIT);
898 			if ((*swd)->sw_ictx == NULL) {
899 				swcr_freesession(i);
900 				return ENOBUFS;
901 			}
902 
903 			axf->Init((*swd)->sw_ictx);
904 			(*swd)->sw_axf = axf;
905 			break;
906 
907 		case CRYPTO_AES_128_GMAC:
908 			axf = &auth_hash_gmac_aes_128;
909 			goto auth4common;
910 
911 		case CRYPTO_AES_192_GMAC:
912 			axf = &auth_hash_gmac_aes_192;
913 			goto auth4common;
914 
915 		case CRYPTO_AES_256_GMAC:
916 			axf = &auth_hash_gmac_aes_256;
917 		auth4common:
918 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
919 			    M_NOWAIT);
920 			if ((*swd)->sw_ictx == NULL) {
921 				swcr_freesession(i);
922 				return ENOBUFS;
923 			}
924 			axf->Init((*swd)->sw_ictx);
925 			axf->Setkey((*swd)->sw_ictx, cri->cri_key,
926 			    cri->cri_klen / 8);
927 			(*swd)->sw_axf = axf;
928 			break;
929 
930 		case CRYPTO_DEFLATE_COMP:
931 			cxf = &comp_algo_deflate;
932 			(*swd)->sw_cxf = cxf;
933 			break;
934 		case CRYPTO_ESN:
935 			/* nothing to do */
936 			break;
937 		default:
938 			swcr_freesession(i);
939 			return EINVAL;
940 		}
941 
942 		(*swd)->sw_alg = cri->cri_alg;
943 		cri = cri->cri_next;
944 		swd = &((*swd)->sw_next);
945 	}
946 	return 0;
947 }
948 
949 /*
950  * Free a session.
951  */
952 int
953 swcr_freesession(u_int64_t tid)
954 {
955 	struct swcr_data *swd;
956 	struct enc_xform *txf;
957 	struct auth_hash *axf;
958 	u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
959 
960 	if (sid > swcr_sesnum || swcr_sessions == NULL ||
961 	    swcr_sessions[sid] == NULL)
962 		return EINVAL;
963 
964 	/* Silently accept and return */
965 	if (sid == 0)
966 		return 0;
967 
968 	while ((swd = swcr_sessions[sid]) != NULL) {
969 		swcr_sessions[sid] = swd->sw_next;
970 
971 		switch (swd->sw_alg) {
972 		case CRYPTO_DES_CBC:
973 		case CRYPTO_3DES_CBC:
974 		case CRYPTO_BLF_CBC:
975 		case CRYPTO_CAST_CBC:
976 		case CRYPTO_RIJNDAEL128_CBC:
977 		case CRYPTO_AES_CTR:
978 		case CRYPTO_AES_XTS:
979 		case CRYPTO_AES_GCM_16:
980 		case CRYPTO_AES_GMAC:
981 		case CRYPTO_NULL:
982 			txf = swd->sw_exf;
983 
984 			if (swd->sw_kschedule) {
985 				explicit_bzero(swd->sw_kschedule, txf->ctxsize);
986 				free(swd->sw_kschedule, M_CRYPTO_DATA, 0);
987 			}
988 			break;
989 
990 		case CRYPTO_MD5_HMAC:
991 		case CRYPTO_SHA1_HMAC:
992 		case CRYPTO_RIPEMD160_HMAC:
993 		case CRYPTO_SHA2_256_HMAC:
994 		case CRYPTO_SHA2_384_HMAC:
995 		case CRYPTO_SHA2_512_HMAC:
996 			axf = swd->sw_axf;
997 
998 			if (swd->sw_ictx) {
999 				explicit_bzero(swd->sw_ictx, axf->ctxsize);
1000 				free(swd->sw_ictx, M_CRYPTO_DATA, 0);
1001 			}
1002 			if (swd->sw_octx) {
1003 				explicit_bzero(swd->sw_octx, axf->ctxsize);
1004 				free(swd->sw_octx, M_CRYPTO_DATA, 0);
1005 			}
1006 			break;
1007 
1008 		case CRYPTO_AES_128_GMAC:
1009 		case CRYPTO_AES_192_GMAC:
1010 		case CRYPTO_AES_256_GMAC:
1011 		case CRYPTO_MD5:
1012 		case CRYPTO_SHA1:
1013 			axf = swd->sw_axf;
1014 
1015 			if (swd->sw_ictx) {
1016 				explicit_bzero(swd->sw_ictx, axf->ctxsize);
1017 				free(swd->sw_ictx, M_CRYPTO_DATA, 0);
1018 			}
1019 			break;
1020 		}
1021 
1022 		free(swd, M_CRYPTO_DATA, 0);
1023 	}
1024 	return 0;
1025 }
1026 
1027 /*
1028  * Process a software request.
1029  */
1030 int
1031 swcr_process(struct cryptop *crp)
1032 {
1033 	struct cryptodesc *crd;
1034 	struct swcr_data *sw;
1035 	u_int32_t lid;
1036 	int type;
1037 
1038 	/* Sanity check */
1039 	if (crp == NULL)
1040 		return EINVAL;
1041 
1042 	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1043 		crp->crp_etype = EINVAL;
1044 		goto done;
1045 	}
1046 
1047 	lid = crp->crp_sid & 0xffffffff;
1048 	if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
1049 		crp->crp_etype = ENOENT;
1050 		goto done;
1051 	}
1052 
1053 	if (crp->crp_flags & CRYPTO_F_IMBUF)
1054 		type = CRYPTO_BUF_MBUF;
1055 	else
1056 		type = CRYPTO_BUF_IOV;
1057 
1058 	/* Go through crypto descriptors, processing as we go */
1059 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1060 		/*
1061 		 * Find the crypto context.
1062 		 *
1063 		 * XXX Note that the logic here prevents us from having
1064 		 * XXX the same algorithm multiple times in a session
1065 		 * XXX (or rather, we can but it won't give us the right
1066 		 * XXX results). To do that, we'd need some way of differentiating
1067 		 * XXX between the various instances of an algorithm (so we can
1068 		 * XXX locate the correct crypto context).
1069 		 */
1070 		for (sw = swcr_sessions[lid];
1071 		    sw && sw->sw_alg != crd->crd_alg;
1072 		    sw = sw->sw_next)
1073 			;
1074 
1075 		/* No such context ? */
1076 		if (sw == NULL) {
1077 			crp->crp_etype = EINVAL;
1078 			goto done;
1079 		}
1080 
1081 		switch (sw->sw_alg) {
1082 		case CRYPTO_NULL:
1083 			break;
1084 		case CRYPTO_DES_CBC:
1085 		case CRYPTO_3DES_CBC:
1086 		case CRYPTO_BLF_CBC:
1087 		case CRYPTO_CAST_CBC:
1088 		case CRYPTO_RIJNDAEL128_CBC:
1089 		case CRYPTO_AES_CTR:
1090 		case CRYPTO_AES_XTS:
1091 			if ((crp->crp_etype = swcr_encdec(crd, sw,
1092 			    crp->crp_buf, type)) != 0)
1093 				goto done;
1094 			break;
1095 		case CRYPTO_MD5_HMAC:
1096 		case CRYPTO_SHA1_HMAC:
1097 		case CRYPTO_RIPEMD160_HMAC:
1098 		case CRYPTO_SHA2_256_HMAC:
1099 		case CRYPTO_SHA2_384_HMAC:
1100 		case CRYPTO_SHA2_512_HMAC:
1101 		case CRYPTO_MD5:
1102 		case CRYPTO_SHA1:
1103 			if ((crp->crp_etype = swcr_authcompute(crp, crd, sw,
1104 			    crp->crp_buf, type)) != 0)
1105 				goto done;
1106 			break;
1107 
1108 		case CRYPTO_AES_GCM_16:
1109 		case CRYPTO_AES_GMAC:
1110 		case CRYPTO_AES_128_GMAC:
1111 		case CRYPTO_AES_192_GMAC:
1112 		case CRYPTO_AES_256_GMAC:
1113 			crp->crp_etype = swcr_authenc(crp);
1114 			goto done;
1115 
1116 		case CRYPTO_DEFLATE_COMP:
1117 			if ((crp->crp_etype = swcr_compdec(crd, sw,
1118 			    crp->crp_buf, type)) != 0)
1119 				goto done;
1120 			else
1121 				crp->crp_olen = (int)sw->sw_size;
1122 			break;
1123 
1124 		default:
1125 			/* Unknown/unsupported algorithm */
1126 			crp->crp_etype = EINVAL;
1127 			goto done;
1128 		}
1129 	}
1130 
1131 done:
1132 	crypto_done(crp);
1133 	return 0;
1134 }
1135 
1136 /*
1137  * Initialize the driver, called from the kernel main().
1138  */
1139 void
1140 swcr_init(void)
1141 {
1142 	int algs[CRYPTO_ALGORITHM_MAX + 1];
1143 	int flags = CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_ENCRYPT_MAC |
1144 	    CRYPTOCAP_F_MAC_ENCRYPT;
1145 
1146 	swcr_id = crypto_get_driverid(flags);
1147 	if (swcr_id < 0) {
1148 		/* This should never happen */
1149 		panic("Software crypto device cannot initialize!");
1150 	}
1151 
1152 	bzero(algs, sizeof(algs));
1153 
1154 	algs[CRYPTO_DES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
1155 	algs[CRYPTO_3DES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
1156 	algs[CRYPTO_BLF_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
1157 	algs[CRYPTO_CAST_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
1158 	algs[CRYPTO_MD5_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1159 	algs[CRYPTO_SHA1_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1160 	algs[CRYPTO_RIPEMD160_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1161 	algs[CRYPTO_MD5] = CRYPTO_ALG_FLAG_SUPPORTED;
1162 	algs[CRYPTO_SHA1] = CRYPTO_ALG_FLAG_SUPPORTED;
1163 	algs[CRYPTO_RIJNDAEL128_CBC] = CRYPTO_ALG_FLAG_SUPPORTED;
1164 	algs[CRYPTO_AES_CTR] = CRYPTO_ALG_FLAG_SUPPORTED;
1165 	algs[CRYPTO_AES_XTS] = CRYPTO_ALG_FLAG_SUPPORTED;
1166 	algs[CRYPTO_AES_GCM_16] = CRYPTO_ALG_FLAG_SUPPORTED;
1167 	algs[CRYPTO_AES_GMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1168 	algs[CRYPTO_DEFLATE_COMP] = CRYPTO_ALG_FLAG_SUPPORTED;
1169 	algs[CRYPTO_NULL] = CRYPTO_ALG_FLAG_SUPPORTED;
1170 	algs[CRYPTO_SHA2_256_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1171 	algs[CRYPTO_SHA2_384_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1172 	algs[CRYPTO_SHA2_512_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1173 	algs[CRYPTO_AES_128_GMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1174 	algs[CRYPTO_AES_192_GMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1175 	algs[CRYPTO_AES_256_GMAC] = CRYPTO_ALG_FLAG_SUPPORTED;
1176 	algs[CRYPTO_ESN] = CRYPTO_ALG_FLAG_SUPPORTED;
1177 
1178 	crypto_register(swcr_id, algs, swcr_newsession,
1179 	    swcr_freesession, swcr_process);
1180 }
1181