xref: /dflybsd-src/sys/opencrypto/cryptosoft.c (revision 11096672db3443db570cc5ab5cb6b0968d5b666a)
1  /*-
2   * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
3   * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
4   *
5   * This code was written by Angelos D. Keromytis in Athens, Greece, in
6   * February 2000. Network Security Technologies Inc. (NSTI) kindly
7   * supported the development of this code.
8   *
9   * Copyright (c) 2000, 2001 Angelos D. Keromytis
10   *
11   * SMP modifications by Matthew Dillon for the DragonFlyBSD Project
12   *
13   * Permission to use, copy, and modify this software with or without fee
14   * is hereby granted, provided that this entire notice is included in
15   * all source code copies of any software which is or includes a copy or
16   * modification of this software.
17   *
18   * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
19   * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
20   * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
21   * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
22   * PURPOSE.
23   *
24   * $FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.23 2009/02/05 17:43:12 imp Exp $
25   * $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $
26   */
27  
28  #include <sys/param.h>
29  #include <sys/systm.h>
30  #include <sys/malloc.h>
31  #include <sys/mbuf.h>
32  #include <sys/module.h>
33  #include <sys/sysctl.h>
34  #include <sys/errno.h>
35  #include <sys/endian.h>
36  #include <sys/random.h>
37  #include <sys/kernel.h>
38  #include <sys/uio.h>
39  #include <sys/spinlock2.h>
40  
41  #include <crypto/blowfish/blowfish.h>
42  #include <crypto/sha1.h>
43  #include <opencrypto/rmd160.h>
44  #include <opencrypto/cast.h>
45  #include <opencrypto/skipjack.h>
46  #include <sys/md5.h>
47  
48  #include <opencrypto/cryptodev.h>
49  #include <opencrypto/cryptosoft.h>
50  #include <opencrypto/xform.h>
51  
52  #include <sys/kobj.h>
53  #include <sys/bus.h>
54  #include "cryptodev_if.h"
55  
56  static	int32_t swcr_id;
57  static	struct swcr_data **swcr_sessions = NULL;
58  static	u_int32_t swcr_sesnum;
59  static	u_int32_t swcr_minsesnum = 1;
60  
61  static struct spinlock swcr_spin = SPINLOCK_INITIALIZER(swcr_spin, "swcr_spin");
62  
63  u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN];
64  u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN];
65  
66  static	int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
67  static	int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int);
68  static	int swcr_combined(struct cryptop *);
69  static	int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
70  static	int swcr_freesession(device_t dev, u_int64_t tid);
71  static	int swcr_freesession_slot(struct swcr_data **swdp, u_int32_t sid);
72  
73  /*
74   * Apply a symmetric encryption/decryption algorithm.
75   */
76  static int
swcr_encdec(struct cryptodesc * crd,struct swcr_data * sw,caddr_t buf,int flags)77  swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
78      int flags)
79  {
80  	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
81  	unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
82  	u_int8_t *kschedule;
83  	u_int8_t *okschedule;
84  	struct enc_xform *exf;
85  	int i, k, j, blks, ivlen;
86  	int error;
87  	int explicit_kschedule;
88  
89  	exf = sw->sw_exf;
90  	blks = exf->blocksize;
91  	ivlen = exf->ivsize;
92  
93  	/* Check for non-padded data */
94  	if (crd->crd_len % blks)
95  		return EINVAL;
96  
97  	/* Initialize the IV */
98  	if (crd->crd_flags & CRD_F_ENCRYPT) {
99  		/* IV explicitly provided ? */
100  		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
101  			bcopy(crd->crd_iv, iv, ivlen);
102  		else
103  			karc4random_buf(iv, ivlen);
104  
105  		/* Do we need to write the IV */
106  		if (!(crd->crd_flags & CRD_F_IV_PRESENT))
107  			crypto_copyback(flags, buf, crd->crd_inject, ivlen, iv);
108  
109  	} else {	/* Decryption */
110  			/* IV explicitly provided ? */
111  		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
112  			bcopy(crd->crd_iv, iv, ivlen);
113  		else {
114  			/* Get IV off buf */
115  			crypto_copydata(flags, buf, crd->crd_inject, ivlen, iv);
116  		}
117  	}
118  
119  	ivp = iv;
120  
121  	/*
122  	 * The semantics are seriously broken because the session key
123  	 * storage was never designed for concurrent ops.
124  	 */
125  	if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
126  		kschedule = kmalloc(exf->ctxsize, M_CRYPTO_DATA,
127  				    M_NOWAIT | M_ZERO);
128  		if (kschedule == NULL) {
129  			error = ENOMEM;
130  			goto out;
131  		}
132  		error = exf->setkey(kschedule, crd->crd_key,
133  				    crd->crd_klen / 8);
134  		if (error)
135  			goto out;
136  		explicit_kschedule = 1;
137  	} else {
138  		spin_lock(&swcr_spin);
139  		kschedule = sw->sw_kschedule;
140  		++sw->sw_kschedule_refs;
141  		spin_unlock(&swcr_spin);
142  		explicit_kschedule = 0;
143  	}
144  
145  	/*
146  	 * xforms that provide a reinit method perform all IV
147  	 * handling themselves.
148  	 */
149  	if (exf->reinit)
150  		exf->reinit(kschedule, iv);
151  
152  	if (flags & CRYPTO_F_IMBUF) {
153  		struct mbuf *m = (struct mbuf *) buf;
154  
155  		/* Find beginning of data */
156  		m = m_getptr(m, crd->crd_skip, &k);
157  		if (m == NULL) {
158  			error = EINVAL;
159  			goto done;
160  		}
161  
162  		i = crd->crd_len;
163  
164  		while (i > 0) {
165  			/*
166  			 * If there's insufficient data at the end of
167  			 * an mbuf, we have to do some copying.
168  			 */
169  			if (m->m_len < k + blks && m->m_len != k) {
170  				m_copydata(m, k, blks, blk);
171  
172  				/* Actual encryption/decryption */
173  				if (exf->reinit) {
174  					if (crd->crd_flags & CRD_F_ENCRYPT) {
175  						exf->encrypt(kschedule,
176  						    blk, iv);
177  					} else {
178  						exf->decrypt(kschedule,
179  						    blk, iv);
180  					}
181  				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
182  					/* XOR with previous block */
183  					for (j = 0; j < blks; j++)
184  						blk[j] ^= ivp[j];
185  
186  					exf->encrypt(kschedule, blk, iv);
187  
188  					/*
189  					 * Keep encrypted block for XOR'ing
190  					 * with next block
191  					 */
192  					bcopy(blk, iv, blks);
193  					ivp = iv;
194  				} else {	/* decrypt */
195  					/*
196  					 * Keep encrypted block for XOR'ing
197  					 * with next block
198  					 */
199  					nivp = (ivp == iv) ? iv2 : iv;
200  					bcopy(blk, nivp, blks);
201  
202  					exf->decrypt(kschedule, blk, iv);
203  
204  					/* XOR with previous block */
205  					for (j = 0; j < blks; j++)
206  						blk[j] ^= ivp[j];
207  
208  					ivp = nivp;
209  				}
210  
211  				/* Copy back decrypted block */
212  				m_copyback(m, k, blks, blk);
213  
214  				/* Advance pointer */
215  				m = m_getptr(m, k + blks, &k);
216  				if (m == NULL) {
217  					error = EINVAL;
218  					goto done;
219  				}
220  
221  				i -= blks;
222  
223  				/* Could be done... */
224  				if (i == 0)
225  					break;
226  			}
227  
228  			/* Skip possibly empty mbufs */
229  			if (k == m->m_len) {
230  				for (m = m->m_next; m && m->m_len == 0;
231  				    m = m->m_next)
232  					;
233  				k = 0;
234  			}
235  
236  			/* Sanity check */
237  			if (m == NULL) {
238  				error = EINVAL;
239  				goto done;
240  			}
241  
242  			/*
243  			 * Warning: idat may point to garbage here, but
244  			 * we only use it in the while() loop, only if
245  			 * there are indeed enough data.
246  			 */
247  			idat = mtod(m, unsigned char *) + k;
248  
249  			while (m->m_len >= k + blks && i > 0) {
250  				if (exf->reinit) {
251  					if (crd->crd_flags & CRD_F_ENCRYPT) {
252  						exf->encrypt(kschedule,
253  						    idat, iv);
254  					} else {
255  						exf->decrypt(kschedule,
256  						    idat, iv);
257  					}
258  				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
259  					/* XOR with previous block/IV */
260  					for (j = 0; j < blks; j++)
261  						idat[j] ^= ivp[j];
262  
263  					exf->encrypt(kschedule, idat, iv);
264  					ivp = idat;
265  				} else {	/* decrypt */
266  					/*
267  					 * Keep encrypted block to be used
268  					 * in next block's processing.
269  					 */
270  					nivp = (ivp == iv) ? iv2 : iv;
271  					bcopy(idat, nivp, blks);
272  
273  					exf->decrypt(kschedule, idat, iv);
274  
275  					/* XOR with previous block/IV */
276  					for (j = 0; j < blks; j++)
277  						idat[j] ^= ivp[j];
278  
279  					ivp = nivp;
280  				}
281  
282  				idat += blks;
283  				k += blks;
284  				i -= blks;
285  			}
286  		}
287  		error = 0;	/* Done with mbuf encryption/decryption */
288  	} else if (flags & CRYPTO_F_IOV) {
289  		struct uio *uio = (struct uio *) buf;
290  		struct iovec *iov;
291  
292  		/* Find beginning of data */
293  		iov = cuio_getptr(uio, crd->crd_skip, &k);
294  		if (iov == NULL) {
295  			error = EINVAL;
296  			goto done;
297  		}
298  
299  		i = crd->crd_len;
300  
301  		while (i > 0) {
302  			/*
303  			 * If there's insufficient data at the end of
304  			 * an iovec, we have to do some copying.
305  			 */
306  			if (iov->iov_len < k + blks && iov->iov_len != k) {
307  				cuio_copydata(uio, k, blks, blk);
308  
309  				/* Actual encryption/decryption */
310  				if (exf->reinit) {
311  					if (crd->crd_flags & CRD_F_ENCRYPT) {
312  						exf->encrypt(kschedule,
313  						    blk, iv);
314  					} else {
315  						exf->decrypt(kschedule,
316  						    blk, iv);
317  					}
318  				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
319  					/* XOR with previous block */
320  					for (j = 0; j < blks; j++)
321  						blk[j] ^= ivp[j];
322  
323  					exf->encrypt(kschedule, blk, iv);
324  
325  					/*
326  					 * Keep encrypted block for XOR'ing
327  					 * with next block
328  					 */
329  					bcopy(blk, iv, blks);
330  					ivp = iv;
331  				} else {	/* decrypt */
332  					/*
333  					 * Keep encrypted block for XOR'ing
334  					 * with next block
335  					 */
336  					nivp = (ivp == iv) ? iv2 : iv;
337  					bcopy(blk, nivp, blks);
338  
339  					exf->decrypt(kschedule, blk, iv);
340  
341  					/* XOR with previous block */
342  					for (j = 0; j < blks; j++)
343  						blk[j] ^= ivp[j];
344  
345  					ivp = nivp;
346  				}
347  
348  				/* Copy back decrypted block */
349  				cuio_copyback(uio, k, blks, blk);
350  
351  				/* Advance pointer */
352  				iov = cuio_getptr(uio, k + blks, &k);
353  				if (iov == NULL) {
354  					error = EINVAL;
355  					goto done;
356  				}
357  
358  				i -= blks;
359  
360  				/* Could be done... */
361  				if (i == 0)
362  					break;
363  			}
364  
365  			/*
366  			 * Warning: idat may point to garbage here, but
367  			 * we only use it in the while() loop, only if
368  			 * there are indeed enough data.
369  			 */
370  			idat = (char *)iov->iov_base + k;
371  
372  			while (iov->iov_len >= k + blks && i > 0) {
373  				if (exf->reinit) {
374  					if (crd->crd_flags & CRD_F_ENCRYPT) {
375  						exf->encrypt(kschedule,
376  						    idat, iv);
377  					} else {
378  						exf->decrypt(kschedule,
379  						    idat, iv);
380  					}
381  				} else if (crd->crd_flags & CRD_F_ENCRYPT) {
382  					/* XOR with previous block/IV */
383  					for (j = 0; j < blks; j++)
384  						idat[j] ^= ivp[j];
385  
386  					exf->encrypt(kschedule, idat, iv);
387  					ivp = idat;
388  				} else {	/* decrypt */
389  					/*
390  					 * Keep encrypted block to be used
391  					 * in next block's processing.
392  					 */
393  					nivp = (ivp == iv) ? iv2 : iv;
394  					bcopy(idat, nivp, blks);
395  
396  					exf->decrypt(kschedule, idat, iv);
397  
398  					/* XOR with previous block/IV */
399  					for (j = 0; j < blks; j++)
400  						idat[j] ^= ivp[j];
401  
402  					ivp = nivp;
403  				}
404  
405  				idat += blks;
406  				k += blks;
407  				i -= blks;
408  			}
409  			if (k == iov->iov_len) {
410  				iov++;
411  				k = 0;
412  			}
413  		}
414  		error = 0;	/* Done with iovec encryption/decryption */
415  	} else {
416  		/*
417  		 * contiguous buffer
418  		 */
419  		if (exf->reinit) {
420  			for(i = crd->crd_skip;
421  			    i < crd->crd_skip + crd->crd_len; i += blks) {
422  				if (crd->crd_flags & CRD_F_ENCRYPT) {
423  					exf->encrypt(kschedule, buf + i, iv);
424  				} else {
425  					exf->decrypt(kschedule, buf + i, iv);
426  				}
427  			}
428  		} else if (crd->crd_flags & CRD_F_ENCRYPT) {
429  			for (i = crd->crd_skip;
430  			    i < crd->crd_skip + crd->crd_len; i += blks) {
431  				/* XOR with the IV/previous block, as appropriate. */
432  				if (i == crd->crd_skip)
433  					for (k = 0; k < blks; k++)
434  						buf[i + k] ^= ivp[k];
435  				else
436  					for (k = 0; k < blks; k++)
437  						buf[i + k] ^= buf[i + k - blks];
438  				exf->encrypt(kschedule, buf + i, iv);
439  			}
440  		} else {		/* Decrypt */
441  			/*
442  			 * Start at the end, so we don't need to keep the
443  			 * encrypted block as the IV for the next block.
444  			 */
445  			for (i = crd->crd_skip + crd->crd_len - blks;
446  			    i >= crd->crd_skip; i -= blks) {
447  				exf->decrypt(kschedule, buf + i, iv);
448  
449  				/* XOR with the IV/previous block, as appropriate */
450  				if (i == crd->crd_skip)
451  					for (k = 0; k < blks; k++)
452  						buf[i + k] ^= ivp[k];
453  				else
454  					for (k = 0; k < blks; k++)
455  						buf[i + k] ^= buf[i + k - blks];
456  			}
457  		}
458  		error = 0; /* Done w/contiguous buffer encrypt/decrypt */
459  	}
460  
461  done:
462  	/*
463  	 * Cleanup - explicitly replace the session key if requested
464  	 *	     (horrible semantics for concurrent operation)
465  	 */
466  	if (explicit_kschedule) {
467  		okschedule = NULL;
468  		spin_lock(&swcr_spin);
469  		if (sw->sw_kschedule && sw->sw_kschedule_refs == 0) {
470  			okschedule = sw->sw_kschedule;
471  			sw->sw_kschedule = kschedule;
472  		}
473  		spin_unlock(&swcr_spin);
474  		if (okschedule) {
475  			bzero(okschedule, exf->ctxsize);
476  			kfree(okschedule, M_CRYPTO_DATA);
477  		}
478  	} else {
479  		spin_lock(&swcr_spin);
480  		--sw->sw_kschedule_refs;
481  		spin_unlock(&swcr_spin);
482  	}
483  
484  out:
485  	return error;
486  }
487  
488  static void
swcr_authprepare(struct auth_hash * axf,struct swcr_data * sw,u_char * key,int klen)489  swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key,
490      int klen)
491  {
492  	int k;
493  
494  	klen /= 8;
495  
496  	switch (axf->type) {
497  	case CRYPTO_MD5_HMAC:
498  	case CRYPTO_SHA1_HMAC:
499  	case CRYPTO_SHA2_256_HMAC:
500  	case CRYPTO_SHA2_384_HMAC:
501  	case CRYPTO_SHA2_512_HMAC:
502  	case CRYPTO_NULL_HMAC:
503  	case CRYPTO_RIPEMD160_HMAC:
504  		for (k = 0; k < klen; k++)
505  			key[k] ^= HMAC_IPAD_VAL;
506  
507  		axf->Init(sw->sw_ictx);
508  		axf->Update(sw->sw_ictx, key, klen);
509  		axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen);
510  
511  		for (k = 0; k < klen; k++)
512  			key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
513  
514  		axf->Init(sw->sw_octx);
515  		axf->Update(sw->sw_octx, key, klen);
516  		axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen);
517  
518  		for (k = 0; k < klen; k++)
519  			key[k] ^= HMAC_OPAD_VAL;
520  		break;
521  	case CRYPTO_MD5_KPDK:
522  	case CRYPTO_SHA1_KPDK:
523  	{
524  		/* We need a buffer that can hold an md5 and a sha1 result. */
525  		u_char buf[SHA1_RESULTLEN];
526  
527  		sw->sw_klen = klen;
528  		bcopy(key, sw->sw_octx, klen);
529  		axf->Init(sw->sw_ictx);
530  		axf->Update(sw->sw_ictx, key, klen);
531  		axf->Final(buf, sw->sw_ictx);
532  		break;
533  	}
534  	default:
535  		kprintf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d "
536  		    "doesn't use keys.\n", __func__, axf->type);
537  	}
538  }
539  
540  /*
541   * Compute keyed-hash authenticator.
542   */
543  static int
swcr_authcompute(struct cryptodesc * crd,struct swcr_data * sw,caddr_t buf,int flags)544  swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
545      int flags)
546  {
547  	unsigned char aalg[HASH_MAX_LEN];
548  	struct auth_hash *axf;
549  	union authctx ctx;
550  	int err;
551  
552  	if (sw->sw_ictx == NULL)
553  		return EINVAL;
554  
555  	axf = sw->sw_axf;
556  
557  	if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
558  		swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen);
559  
560  	bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
561  
562  	err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
563  	    (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
564  	if (err)
565  		return err;
566  
567  	switch (sw->sw_alg) {
568  	case CRYPTO_MD5_HMAC:
569  	case CRYPTO_SHA1_HMAC:
570  	case CRYPTO_SHA2_256_HMAC:
571  	case CRYPTO_SHA2_384_HMAC:
572  	case CRYPTO_SHA2_512_HMAC:
573  	case CRYPTO_RIPEMD160_HMAC:
574  		if (sw->sw_octx == NULL)
575  			return EINVAL;
576  
577  		axf->Final(aalg, &ctx);
578  		bcopy(sw->sw_octx, &ctx, axf->ctxsize);
579  		axf->Update(&ctx, aalg, axf->hashsize);
580  		axf->Final(aalg, &ctx);
581  		break;
582  
583  	case CRYPTO_MD5_KPDK:
584  	case CRYPTO_SHA1_KPDK:
585  		if (sw->sw_octx == NULL)
586  			return EINVAL;
587  
588  		axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
589  		axf->Final(aalg, &ctx);
590  		break;
591  
592  	case CRYPTO_NULL_HMAC:
593  		axf->Final(aalg, &ctx);
594  		break;
595  	}
596  
597  	/* Inject the authentication data */
598  	crypto_copyback(flags, buf, crd->crd_inject,
599  	    sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg);
600  	return 0;
601  }
602  
603  /*
604   * Apply a combined encryption-authentication transformation
605   */
606  static int
swcr_combined(struct cryptop * crp)607  swcr_combined(struct cryptop *crp)
608  {
609  	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
610  	u_char *blk = (u_char *)blkbuf;
611  	u_char aalg[HASH_MAX_LEN];
612  	u_char iv[EALG_MAX_BLOCK_LEN];
613  	uint8_t *kschedule;
614  	union authctx ctx;
615  	struct cryptodesc *crd, *crda = NULL, *crde = NULL;
616  	struct swcr_data *sw, *swa, *swe;
617  	struct auth_hash *axf = NULL;
618  	struct enc_xform *exf = NULL;
619  	caddr_t buf = (caddr_t)crp->crp_buf;
620  	uint32_t *blkp;
621  	int i, blksz, ivlen, len;
622  
623  	blksz = 0;
624  	ivlen = 0;
625  
626  	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
627  		for (sw = swcr_sessions[crp->crp_sid & 0xffffffff];
628  		     sw && sw->sw_alg != crd->crd_alg;
629  		     sw = sw->sw_next)
630  			;
631  		if (sw == NULL)
632  			return (EINVAL);
633  
634  		switch (sw->sw_alg) {
635  		case CRYPTO_AES_GCM_16:
636  		case CRYPTO_AES_GMAC:
637  			swe = sw;
638  			crde = crd;
639  			exf = swe->sw_exf;
640  			ivlen = exf->ivsize;
641  			break;
642  		case CRYPTO_AES_128_GMAC:
643  		case CRYPTO_AES_192_GMAC:
644  		case CRYPTO_AES_256_GMAC:
645  			swa = sw;
646  			crda = crd;
647  			axf = swa->sw_axf;
648  			if (swa->sw_ictx == NULL)
649  				return (EINVAL);
650  			bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
651  			blksz = axf->blocksize;
652  			break;
653  		default:
654  			return (EINVAL);
655  		}
656  	}
657  	if (crde == NULL || crda == NULL)
658  		return (EINVAL);
659  
660  	/* Initialize the IV */
661  	if (crde->crd_flags & CRD_F_ENCRYPT) {
662  		/* IV explicitly provided ? */
663  		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
664  			bcopy(crde->crd_iv, iv, ivlen);
665  		else
666  			karc4random_buf(iv, ivlen);
667  
668  		/* Do we need to write the IV */
669  		if (!(crde->crd_flags & CRD_F_IV_PRESENT))
670  			crypto_copyback(crde->crd_flags, buf, crde->crd_inject,
671  			    ivlen, iv);
672  
673  	} else {	/* Decryption */
674  			/* IV explicitly provided ? */
675  		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
676  			bcopy(crde->crd_iv, iv, ivlen);
677  		else
678  			/* Get IV off buf */
679  			crypto_copydata(crde->crd_flags, buf, crde->crd_inject,
680  			    ivlen, iv);
681  	}
682  
683  	/* Supply MAC with IV */
684  	if (axf->Reinit)
685  		axf->Reinit(&ctx, iv, ivlen);
686  
687  	/* Supply MAC with AAD */
688  	for (i = 0; i < crda->crd_len; i += blksz) {
689  		len = MIN(crda->crd_len - i, blksz);
690  		crypto_copydata(crde->crd_flags, buf, crda->crd_skip + i, len,
691  		    blk);
692  		axf->Update(&ctx, blk, len);
693  	}
694  
695  	spin_lock(&swcr_spin);
696  	kschedule = sw->sw_kschedule;
697  	++sw->sw_kschedule_refs;
698  	spin_unlock(&swcr_spin);
699  
700  	if (exf->reinit)
701  		exf->reinit(kschedule, iv);
702  
703  	/* Do encryption/decryption with MAC */
704  	for (i = 0; i < crde->crd_len; i += blksz) {
705  		len = MIN(crde->crd_len - i, blksz);
706  		if (len < blksz)
707  			bzero(blk, blksz);
708  		crypto_copydata(crde->crd_flags, buf, crde->crd_skip + i, len,
709  		    blk);
710  		if (crde->crd_flags & CRD_F_ENCRYPT) {
711  			exf->encrypt(kschedule, blk, iv);
712  			axf->Update(&ctx, blk, len);
713  		} else {
714  			axf->Update(&ctx, blk, len);
715  			exf->decrypt(kschedule, blk, iv);
716  		}
717  		crypto_copyback(crde->crd_flags, buf, crde->crd_skip + i, len,
718  		    blk);
719  	}
720  
721  	/* Do any required special finalization */
722  	switch (crda->crd_alg) {
723  		case CRYPTO_AES_128_GMAC:
724  		case CRYPTO_AES_192_GMAC:
725  		case CRYPTO_AES_256_GMAC:
726  			/* length block */
727  			bzero(blk, blksz);
728  			blkp = (uint32_t *)blk + 1;
729  			*blkp = htobe32(crda->crd_len * 8);
730  			blkp = (uint32_t *)blk + 3;
731  			*blkp = htobe32(crde->crd_len * 8);
732  			axf->Update(&ctx, blk, blksz);
733  			break;
734  	}
735  
736  	/* Finalize MAC */
737  	axf->Final(aalg, &ctx);
738  
739  	/* Inject the authentication data */
740  	crypto_copyback(crda->crd_flags, crp->crp_buf, crda->crd_inject,
741  	    axf->blocksize, aalg);
742  
743  	spin_lock(&swcr_spin);
744  	--sw->sw_kschedule_refs;
745  	spin_unlock(&swcr_spin);
746  
747  	return (0);
748  }
749  
750  /*
751   * Apply a compression/decompression algorithm
752   */
753  static int
swcr_compdec(struct cryptodesc * crd,struct swcr_data * sw,caddr_t buf,int flags)754  swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
755  	     caddr_t buf, int flags)
756  {
757  	u_int8_t *data, *out;
758  	struct comp_algo *cxf;
759  	int adj;
760  	u_int32_t result;
761  
762  	cxf = sw->sw_cxf;
763  
764  	/*
765  	 * We must handle the whole buffer of data in one time
766  	 * then if there is not all the data in the mbuf, we must
767  	 * copy in a buffer.
768  	 */
769  	data = kmalloc(crd->crd_len, M_CRYPTO_DATA, M_INTWAIT);
770  	crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data);
771  
772  	if (crd->crd_flags & CRD_F_COMP)
773  		result = cxf->compress(data, crd->crd_len, &out);
774  	else
775  		result = cxf->decompress(data, crd->crd_len, &out);
776  
777  	kfree(data, M_CRYPTO_DATA);
778  	if (result == 0)
779  		return EINVAL;
780  
781  	sw->sw_size = result;
782  	/* Check the compressed size when doing compression */
783  	if (crd->crd_flags & CRD_F_COMP) {
784  		if (result >= crd->crd_len) {
785  			/* Compression was useless, we lost time */
786  			kfree(out, M_CRYPTO_DATA);
787  			return 0;
788  		}
789  	}
790  
791  	/*
792  	 * Copy back the (de)compressed data.
793  	 * If CRYPTO_F_IMBUF, the mbuf will be extended as necessary.
794  	 */
795  	crypto_copyback(flags, buf, crd->crd_skip, result, out);
796  	if (result < crd->crd_len) {
797  		adj = result - crd->crd_len;
798  		if (flags & CRYPTO_F_IMBUF) {
799  			adj = result - crd->crd_len;
800  			m_adj((struct mbuf *)buf, adj);
801  		} else if (flags & CRYPTO_F_IOV) {
802  			struct uio *uio = (struct uio *)buf;
803  			int ind;
804  
805  			adj = crd->crd_len - result;
806  			ind = uio->uio_iovcnt - 1;
807  
808  			while (adj > 0 && ind >= 0) {
809  				if (adj < uio->uio_iov[ind].iov_len) {
810  					uio->uio_iov[ind].iov_len -= adj;
811  					break;
812  				}
813  
814  				adj -= uio->uio_iov[ind].iov_len;
815  				uio->uio_iov[ind].iov_len = 0;
816  				ind--;
817  				uio->uio_iovcnt--;
818  			}
819  		}
820  	}
821  	kfree(out, M_CRYPTO_DATA);
822  	return 0;
823  }
824  
825  /*
826   * Generate a new software session.
827   */
828  static int
swcr_newsession(device_t dev,u_int32_t * sid,struct cryptoini * cri)829  swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
830  {
831  	struct swcr_data *swd_base;
832  	struct swcr_data **swd;
833  	struct swcr_data **oswd;
834  	struct auth_hash *axf;
835  	struct enc_xform *txf;
836  	struct comp_algo *cxf;
837  	u_int32_t i;
838  	u_int32_t n;
839  	int error;
840  
841  	if (sid == NULL || cri == NULL)
842  		return EINVAL;
843  
844  	swd_base = NULL;
845  	swd = &swd_base;
846  
847  	while (cri) {
848  		*swd = kmalloc(sizeof(struct swcr_data),
849  			       M_CRYPTO_DATA, M_WAITOK | M_ZERO);
850  
851  		switch (cri->cri_alg) {
852  		case CRYPTO_DES_CBC:
853  			txf = &enc_xform_des;
854  			goto enccommon;
855  		case CRYPTO_3DES_CBC:
856  			txf = &enc_xform_3des;
857  			goto enccommon;
858  		case CRYPTO_BLF_CBC:
859  			txf = &enc_xform_blf;
860  			goto enccommon;
861  		case CRYPTO_CAST_CBC:
862  			txf = &enc_xform_cast5;
863  			goto enccommon;
864  		case CRYPTO_SKIPJACK_CBC:
865  			txf = &enc_xform_skipjack;
866  			goto enccommon;
867  		case CRYPTO_RIJNDAEL128_CBC:
868  			txf = &enc_xform_rijndael128;
869  			goto enccommon;
870  		case CRYPTO_AES_XTS:
871  			txf = &enc_xform_aes_xts;
872  			goto enccommon;
873  		case CRYPTO_AES_CTR:
874  			txf = &enc_xform_aes_ctr;
875  			goto enccommon;
876  		case CRYPTO_AES_GCM_16:
877  			txf = &enc_xform_aes_gcm;
878  			goto enccommon;
879  		case CRYPTO_AES_GMAC:
880  			txf = &enc_xform_aes_gmac;
881  			(*swd)->sw_exf = txf;
882  			break;
883  		case CRYPTO_CAMELLIA_CBC:
884  			txf = &enc_xform_camellia;
885  			goto enccommon;
886  		case CRYPTO_TWOFISH_CBC:
887  			txf = &enc_xform_twofish;
888  			goto enccommon;
889  		case CRYPTO_SERPENT_CBC:
890  			txf = &enc_xform_serpent;
891  			goto enccommon;
892  		case CRYPTO_TWOFISH_XTS:
893  			txf = &enc_xform_twofish_xts;
894  			goto enccommon;
895  		case CRYPTO_SERPENT_XTS:
896  			txf = &enc_xform_serpent_xts;
897  			goto enccommon;
898  		case CRYPTO_NULL_CBC:
899  			txf = &enc_xform_null;
900  			goto enccommon;
901  		enccommon:
902  			KKASSERT(txf->ctxsize > 0);
903  			(*swd)->sw_kschedule = kmalloc(txf->ctxsize,
904  						       M_CRYPTO_DATA,
905  						       M_WAITOK | M_ZERO);
906  			if (cri->cri_key != NULL) {
907  				error = txf->setkey((*swd)->sw_kschedule,
908  						    cri->cri_key,
909  						    cri->cri_klen / 8);
910  				if (error) {
911  					swcr_freesession_slot(&swd_base, 0);
912  					return error;
913  				}
914  			}
915  			(*swd)->sw_exf = txf;
916  			break;
917  
918  		case CRYPTO_MD5_HMAC:
919  			axf = &auth_hash_hmac_md5;
920  			goto authcommon;
921  		case CRYPTO_SHA1_HMAC:
922  			axf = &auth_hash_hmac_sha1;
923  			goto authcommon;
924  		case CRYPTO_SHA2_256_HMAC:
925  			axf = &auth_hash_hmac_sha2_256;
926  			goto authcommon;
927  		case CRYPTO_SHA2_384_HMAC:
928  			axf = &auth_hash_hmac_sha2_384;
929  			goto authcommon;
930  		case CRYPTO_SHA2_512_HMAC:
931  			axf = &auth_hash_hmac_sha2_512;
932  			goto authcommon;
933  		case CRYPTO_NULL_HMAC:
934  			axf = &auth_hash_null;
935  			goto authcommon;
936  		case CRYPTO_RIPEMD160_HMAC:
937  			axf = &auth_hash_hmac_ripemd_160;
938  		authcommon:
939  			(*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
940  						  M_WAITOK);
941  			(*swd)->sw_octx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
942  						  M_WAITOK);
943  
944  			if (cri->cri_key != NULL) {
945  				swcr_authprepare(axf, *swd, cri->cri_key,
946  				    cri->cri_klen);
947  			}
948  
949  			(*swd)->sw_mlen = cri->cri_mlen;
950  			(*swd)->sw_axf = axf;
951  			break;
952  
953  		case CRYPTO_MD5_KPDK:
954  			axf = &auth_hash_key_md5;
955  			goto auth2common;
956  
957  		case CRYPTO_SHA1_KPDK:
958  			axf = &auth_hash_key_sha1;
959  		auth2common:
960  			(*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
961  						  M_WAITOK);
962  			(*swd)->sw_octx = kmalloc(cri->cri_klen / 8,
963  						  M_CRYPTO_DATA, M_WAITOK);
964  
965  			/* Store the key so we can "append" it to the payload */
966  			if (cri->cri_key != NULL) {
967  				swcr_authprepare(axf, *swd, cri->cri_key,
968  						 cri->cri_klen);
969  			}
970  
971  			(*swd)->sw_mlen = cri->cri_mlen;
972  			(*swd)->sw_axf = axf;
973  			break;
974  #ifdef notdef
975  		case CRYPTO_MD5:
976  			axf = &auth_hash_md5;
977  			goto auth3common;
978  
979  		case CRYPTO_SHA1:
980  			axf = &auth_hash_sha1;
981  		auth3common:
982  			(*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
983  						  M_WAITOK);
984  
985  			axf->Init((*swd)->sw_ictx);
986  			(*swd)->sw_mlen = cri->cri_mlen;
987  			(*swd)->sw_axf = axf;
988  			break;
989  #endif
990  		case CRYPTO_AES_128_GMAC:
991  			axf = &auth_hash_gmac_aes_128;
992  			goto auth4common;
993  
994  		case CRYPTO_AES_192_GMAC:
995  			axf = &auth_hash_gmac_aes_192;
996  			goto auth4common;
997  
998  		case CRYPTO_AES_256_GMAC:
999  			axf = &auth_hash_gmac_aes_256;
1000  		auth4common:
1001  			(*swd)->sw_ictx = kmalloc(axf->ctxsize, M_CRYPTO_DATA,
1002  						  M_NOWAIT);
1003  			if ((*swd)->sw_ictx == NULL) {
1004  				swcr_freesession_slot(&swd_base, 0);
1005  				return ENOBUFS;
1006  			}
1007  
1008  			axf->Init((*swd)->sw_ictx);
1009  			error = axf->Setkey((*swd)->sw_ictx, cri->cri_key,
1010  					    cri->cri_klen / 8);
1011  			if (error) {
1012  				swcr_freesession_slot(&swd_base, 0);
1013  				return error;
1014  			}
1015  			(*swd)->sw_axf = axf;
1016  			break;
1017  
1018  		case CRYPTO_DEFLATE_COMP:
1019  			cxf = &comp_algo_deflate;
1020  			(*swd)->sw_cxf = cxf;
1021  			break;
1022  		default:
1023  			swcr_freesession_slot(&swd_base, 0);
1024  			return EINVAL;
1025  		}
1026  
1027  		(*swd)->sw_alg = cri->cri_alg;
1028  		cri = cri->cri_next;
1029  		swd = &((*swd)->sw_next);
1030  	}
1031  
1032  	for (;;) {
1033  		/*
1034  		 * Atomically allocate a session
1035  		 */
1036  		spin_lock(&swcr_spin);
1037  		for (i = swcr_minsesnum; i < swcr_sesnum; ++i) {
1038  			if (swcr_sessions[i] == NULL)
1039  				break;
1040  		}
1041  		if (i < swcr_sesnum) {
1042  			swcr_sessions[i] = swd_base;
1043  			swcr_minsesnum = i + 1;
1044  			spin_unlock(&swcr_spin);
1045  			break;
1046  		}
1047  		n = swcr_sesnum;
1048  		spin_unlock(&swcr_spin);
1049  
1050  		/*
1051  		 * A larger allocation is required, reallocate the array
1052  		 * and replace, checking for SMP races.
1053  		 */
1054  		if (n < CRYPTO_SW_SESSIONS)
1055  			n = CRYPTO_SW_SESSIONS;
1056  		else
1057  			n = n * 3 / 2;
1058  		swd = kmalloc(n * sizeof(struct swcr_data *),
1059  			      M_CRYPTO_DATA, M_WAITOK | M_ZERO);
1060  
1061  		spin_lock(&swcr_spin);
1062  		if (swcr_sesnum >= n) {
1063  			spin_unlock(&swcr_spin);
1064  			kfree(swd, M_CRYPTO_DATA);
1065  		} else if (swcr_sesnum) {
1066  			bcopy(swcr_sessions, swd,
1067  			      swcr_sesnum * sizeof(struct swcr_data *));
1068  			oswd = swcr_sessions;
1069  			swcr_sessions = swd;
1070  			swcr_sesnum = n;
1071  			spin_unlock(&swcr_spin);
1072  			kfree(oswd, M_CRYPTO_DATA);
1073  		} else {
1074  			swcr_sessions = swd;
1075  			swcr_sesnum = n;
1076  			spin_unlock(&swcr_spin);
1077  		}
1078  	}
1079  
1080  	*sid = i;
1081  	return 0;
1082  }
1083  
1084  /*
1085   * Free a session.
1086   */
1087  static int
swcr_freesession(device_t dev,u_int64_t tid)1088  swcr_freesession(device_t dev, u_int64_t tid)
1089  {
1090  	u_int32_t sid = CRYPTO_SESID2LID(tid);
1091  
1092  	if (sid > swcr_sesnum || swcr_sessions == NULL ||
1093  	    swcr_sessions[sid] == NULL) {
1094  		return EINVAL;
1095  	}
1096  
1097  	/* Silently accept and return */
1098  	if (sid == 0)
1099  		return 0;
1100  
1101  	return(swcr_freesession_slot(&swcr_sessions[sid], sid));
1102  }
1103  
1104  static
1105  int
swcr_freesession_slot(struct swcr_data ** swdp,u_int32_t sid)1106  swcr_freesession_slot(struct swcr_data **swdp, u_int32_t sid)
1107  {
1108  	struct enc_xform *txf;
1109  	struct auth_hash *axf;
1110  	struct swcr_data *swd;
1111  	struct swcr_data *swnext;
1112  
1113  	/*
1114  	 * Protect session detachment with the spinlock.
1115  	 */
1116  	spin_lock(&swcr_spin);
1117  	swnext = *swdp;
1118  	*swdp = NULL;
1119  	if (sid && swcr_minsesnum > sid)
1120  		swcr_minsesnum = sid;
1121  	spin_unlock(&swcr_spin);
1122  
1123  	/*
1124  	 * Clean up at our leisure.
1125  	 */
1126  	while ((swd = swnext) != NULL) {
1127  		swnext = swd->sw_next;
1128  
1129  		swd->sw_next = NULL;
1130  
1131  		switch (swd->sw_alg) {
1132  		case CRYPTO_DES_CBC:
1133  		case CRYPTO_3DES_CBC:
1134  		case CRYPTO_BLF_CBC:
1135  		case CRYPTO_CAST_CBC:
1136  		case CRYPTO_SKIPJACK_CBC:
1137  		case CRYPTO_RIJNDAEL128_CBC:
1138  		case CRYPTO_AES_XTS:
1139  		case CRYPTO_AES_CTR:
1140  		case CRYPTO_AES_GCM_16:
1141  		case CRYPTO_AES_GMAC:
1142  		case CRYPTO_CAMELLIA_CBC:
1143  		case CRYPTO_TWOFISH_CBC:
1144  		case CRYPTO_SERPENT_CBC:
1145  		case CRYPTO_TWOFISH_XTS:
1146  		case CRYPTO_SERPENT_XTS:
1147  		case CRYPTO_NULL_CBC:
1148  			txf = swd->sw_exf;
1149  
1150  			if (swd->sw_kschedule) {
1151  				bzero(swd->sw_kschedule, txf->ctxsize);
1152  				kfree(swd->sw_kschedule, M_CRYPTO_DATA);
1153  			}
1154  			break;
1155  
1156  		case CRYPTO_MD5_HMAC:
1157  		case CRYPTO_SHA1_HMAC:
1158  		case CRYPTO_SHA2_256_HMAC:
1159  		case CRYPTO_SHA2_384_HMAC:
1160  		case CRYPTO_SHA2_512_HMAC:
1161  		case CRYPTO_RIPEMD160_HMAC:
1162  		case CRYPTO_NULL_HMAC:
1163  			axf = swd->sw_axf;
1164  
1165  			if (swd->sw_ictx) {
1166  				bzero(swd->sw_ictx, axf->ctxsize);
1167  				kfree(swd->sw_ictx, M_CRYPTO_DATA);
1168  			}
1169  			if (swd->sw_octx) {
1170  				bzero(swd->sw_octx, axf->ctxsize);
1171  				kfree(swd->sw_octx, M_CRYPTO_DATA);
1172  			}
1173  			break;
1174  
1175  		case CRYPTO_MD5_KPDK:
1176  		case CRYPTO_SHA1_KPDK:
1177  			axf = swd->sw_axf;
1178  
1179  			if (swd->sw_ictx) {
1180  				bzero(swd->sw_ictx, axf->ctxsize);
1181  				kfree(swd->sw_ictx, M_CRYPTO_DATA);
1182  			}
1183  			if (swd->sw_octx) {
1184  				bzero(swd->sw_octx, swd->sw_klen);
1185  				kfree(swd->sw_octx, M_CRYPTO_DATA);
1186  			}
1187  			break;
1188  
1189  		case CRYPTO_AES_128_GMAC:
1190  		case CRYPTO_AES_192_GMAC:
1191  		case CRYPTO_AES_256_GMAC:
1192  		case CRYPTO_MD5:
1193  		case CRYPTO_SHA1:
1194  			axf = swd->sw_axf;
1195  
1196  			if (swd->sw_ictx) {
1197  				bzero(swd->sw_ictx, axf->ctxsize);
1198  				kfree(swd->sw_ictx, M_CRYPTO_DATA);
1199  			}
1200  			break;
1201  
1202  		case CRYPTO_DEFLATE_COMP:
1203  			break;
1204  		}
1205  
1206  		//FREE(swd, M_CRYPTO_DATA);
1207  		kfree(swd, M_CRYPTO_DATA);
1208  	}
1209  	return 0;
1210  }
1211  
1212  /*
1213   * Process a software request.
1214   */
1215  static int
swcr_process(device_t dev,struct cryptop * crp,int hint)1216  swcr_process(device_t dev, struct cryptop *crp, int hint)
1217  {
1218  	struct cryptodesc *crd;
1219  	struct swcr_data *sw;
1220  	u_int32_t lid;
1221  
1222  	/* Sanity check */
1223  	if (crp == NULL)
1224  		return EINVAL;
1225  
1226  	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1227  		crp->crp_etype = EINVAL;
1228  		goto done;
1229  	}
1230  
1231  	lid = crp->crp_sid & 0xffffffff;
1232  	if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
1233  		crp->crp_etype = ENOENT;
1234  		goto done;
1235  	}
1236  
1237  	/* Go through crypto descriptors, processing as we go */
1238  	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1239  		/*
1240  		 * Find the crypto context.
1241  		 *
1242  		 * XXX Note that the logic here prevents us from having
1243  		 * XXX the same algorithm multiple times in a session
1244  		 * XXX (or rather, we can but it won't give us the right
1245  		 * XXX results). To do that, we'd need some way of differentiating
1246  		 * XXX between the various instances of an algorithm (so we can
1247  		 * XXX locate the correct crypto context).
1248  		 */
1249  		for (sw = swcr_sessions[lid];
1250  		    sw && sw->sw_alg != crd->crd_alg;
1251  		    sw = sw->sw_next)
1252  			;
1253  
1254  		/* No such context ? */
1255  		if (sw == NULL) {
1256  			crp->crp_etype = EINVAL;
1257  			goto done;
1258  		}
1259  		switch (sw->sw_alg) {
1260  		case CRYPTO_DES_CBC:
1261  		case CRYPTO_3DES_CBC:
1262  		case CRYPTO_BLF_CBC:
1263  		case CRYPTO_CAST_CBC:
1264  		case CRYPTO_SKIPJACK_CBC:
1265  		case CRYPTO_RIJNDAEL128_CBC:
1266  		case CRYPTO_AES_XTS:
1267  		case CRYPTO_AES_CTR:
1268  		case CRYPTO_CAMELLIA_CBC:
1269  		case CRYPTO_TWOFISH_CBC:
1270  		case CRYPTO_SERPENT_CBC:
1271  		case CRYPTO_TWOFISH_XTS:
1272  		case CRYPTO_SERPENT_XTS:
1273  			if ((crp->crp_etype = swcr_encdec(crd, sw,
1274  			    crp->crp_buf, crp->crp_flags)) != 0)
1275  				goto done;
1276  			break;
1277  		case CRYPTO_NULL_CBC:
1278  			crp->crp_etype = 0;
1279  			break;
1280  		case CRYPTO_MD5_HMAC:
1281  		case CRYPTO_SHA1_HMAC:
1282  		case CRYPTO_SHA2_256_HMAC:
1283  		case CRYPTO_SHA2_384_HMAC:
1284  		case CRYPTO_SHA2_512_HMAC:
1285  		case CRYPTO_RIPEMD160_HMAC:
1286  		case CRYPTO_NULL_HMAC:
1287  		case CRYPTO_MD5_KPDK:
1288  		case CRYPTO_SHA1_KPDK:
1289  		case CRYPTO_MD5:
1290  		case CRYPTO_SHA1:
1291  			if ((crp->crp_etype = swcr_authcompute(crd, sw,
1292  			    crp->crp_buf, crp->crp_flags)) != 0)
1293  				goto done;
1294  			break;
1295  
1296  		case CRYPTO_AES_GCM_16:
1297  		case CRYPTO_AES_GMAC:
1298  		case CRYPTO_AES_128_GMAC:
1299  		case CRYPTO_AES_192_GMAC:
1300  		case CRYPTO_AES_256_GMAC:
1301  			crp->crp_etype = swcr_combined(crp);
1302  			goto done;
1303  
1304  		case CRYPTO_DEFLATE_COMP:
1305  			if ((crp->crp_etype = swcr_compdec(crd, sw,
1306  			    crp->crp_buf, crp->crp_flags)) != 0)
1307  				goto done;
1308  			else
1309  				crp->crp_olen = (int)sw->sw_size;
1310  			break;
1311  
1312  		default:
1313  			/* Unknown/unsupported algorithm */
1314  			crp->crp_etype = EINVAL;
1315  			goto done;
1316  		}
1317  	}
1318  
1319  done:
1320  	crypto_done(crp);
1321  	lwkt_yield();
1322  	return 0;
1323  }
1324  
1325  static void
swcr_identify(driver_t * drv,device_t parent)1326  swcr_identify(driver_t *drv, device_t parent)
1327  {
1328  	/* NB: order 10 is so we get attached after h/w devices */
1329  	/* XXX: wouldn't bet about this BUS_ADD_CHILD correctness */
1330  	if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1331  	    BUS_ADD_CHILD(parent, parent, 10, "cryptosoft", -1) == 0)
1332  		panic("cryptosoft: could not attach");
1333  }
1334  
1335  static int
swcr_probe(device_t dev)1336  swcr_probe(device_t dev)
1337  {
1338  	device_set_desc(dev, "software crypto");
1339  	return (0);
1340  }
1341  
1342  static int
swcr_attach(device_t dev)1343  swcr_attach(device_t dev)
1344  {
1345  	memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN);
1346  	memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN);
1347  
1348  	swcr_id = crypto_get_driverid(dev, CRYPTOCAP_F_SOFTWARE |
1349  					   CRYPTOCAP_F_SYNC |
1350  					   CRYPTOCAP_F_SMP);
1351  	if (swcr_id < 0) {
1352  		device_printf(dev, "cannot initialize!");
1353  		return ENOMEM;
1354  	}
1355  #define	REGISTER(alg) \
1356  	crypto_register(swcr_id, alg, 0,0)
1357  	REGISTER(CRYPTO_DES_CBC);
1358  	REGISTER(CRYPTO_3DES_CBC);
1359  	REGISTER(CRYPTO_BLF_CBC);
1360  	REGISTER(CRYPTO_CAST_CBC);
1361  	REGISTER(CRYPTO_SKIPJACK_CBC);
1362  	REGISTER(CRYPTO_NULL_CBC);
1363  	REGISTER(CRYPTO_MD5_HMAC);
1364  	REGISTER(CRYPTO_SHA1_HMAC);
1365  	REGISTER(CRYPTO_SHA2_256_HMAC);
1366  	REGISTER(CRYPTO_SHA2_384_HMAC);
1367  	REGISTER(CRYPTO_SHA2_512_HMAC);
1368  	REGISTER(CRYPTO_RIPEMD160_HMAC);
1369  	REGISTER(CRYPTO_NULL_HMAC);
1370  	REGISTER(CRYPTO_MD5_KPDK);
1371  	REGISTER(CRYPTO_SHA1_KPDK);
1372  	REGISTER(CRYPTO_MD5);
1373  	REGISTER(CRYPTO_SHA1);
1374  	REGISTER(CRYPTO_RIJNDAEL128_CBC);
1375  	REGISTER(CRYPTO_AES_XTS);
1376  	REGISTER(CRYPTO_AES_CTR);
1377  	REGISTER(CRYPTO_AES_GCM_16);
1378  	REGISTER(CRYPTO_AES_GMAC);
1379  	REGISTER(CRYPTO_AES_128_GMAC);
1380  	REGISTER(CRYPTO_AES_192_GMAC);
1381  	REGISTER(CRYPTO_AES_256_GMAC);
1382  	REGISTER(CRYPTO_CAMELLIA_CBC);
1383  	REGISTER(CRYPTO_TWOFISH_CBC);
1384  	REGISTER(CRYPTO_SERPENT_CBC);
1385  	REGISTER(CRYPTO_TWOFISH_XTS);
1386  	REGISTER(CRYPTO_SERPENT_XTS);
1387  	REGISTER(CRYPTO_DEFLATE_COMP);
1388  #undef REGISTER
1389  
1390  	return 0;
1391  }
1392  
1393  static int
swcr_detach(device_t dev)1394  swcr_detach(device_t dev)
1395  {
1396  	crypto_unregister_all(swcr_id);
1397  	if (swcr_sessions != NULL)
1398  		kfree(swcr_sessions, M_CRYPTO_DATA);
1399  	return 0;
1400  }
1401  
1402  static device_method_t swcr_methods[] = {
1403  	DEVMETHOD(device_identify,	swcr_identify),
1404  	DEVMETHOD(device_probe,		swcr_probe),
1405  	DEVMETHOD(device_attach,	swcr_attach),
1406  	DEVMETHOD(device_detach,	swcr_detach),
1407  
1408  	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
1409  	DEVMETHOD(cryptodev_freesession,swcr_freesession),
1410  	DEVMETHOD(cryptodev_process,	swcr_process),
1411  
1412  	DEVMETHOD_END
1413  };
1414  
1415  static driver_t swcr_driver = {
1416  	"cryptosoft",
1417  	swcr_methods,
1418  	0,		/* NB: no softc */
1419  };
1420  static devclass_t swcr_devclass;
1421  
1422  /*
1423   * NB: We explicitly reference the crypto module so we
1424   * get the necessary ordering when built as a loadable
1425   * module.  This is required because we bundle the crypto
1426   * module code together with the cryptosoft driver (otherwise
1427   * normal module dependencies would handle things).
1428   */
1429  extern int crypto_modevent(struct module *, int, void *);
1430  /* XXX where to attach */
1431  DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,NULL);
1432  MODULE_VERSION(cryptosoft, 1);
1433  MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
1434