1 /* $NetBSD: cryptosoft.c,v 1.64 2022/05/22 11:39:27 riastradh Exp $ */
2 /* $FreeBSD: src/sys/opencrypto/cryptosoft.c,v 1.2.2.1 2002/11/21 23:34:23 sam Exp $ */
3 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */
4
5 /*
6 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
7 *
8 * This code was written by Angelos D. Keromytis in Athens, Greece, in
9 * February 2000. Network Security Technologies Inc. (NSTI) kindly
10 * supported the development of this code.
11 *
12 * Copyright (c) 2000, 2001 Angelos D. Keromytis
13 *
14 * Permission to use, copy, and modify this software with or without fee
15 * is hereby granted, provided that this entire notice is included in
16 * all source code copies of any software which is or includes a copy or
17 * modification of this software.
18 *
19 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
20 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
21 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
22 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
23 * PURPOSE.
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: cryptosoft.c,v 1.64 2022/05/22 11:39:27 riastradh Exp $");
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kmem.h>
32 #include <sys/mbuf.h>
33 #include <sys/sysctl.h>
34 #include <sys/errno.h>
35 #include <sys/cprng.h>
36 #include <sys/module.h>
37 #include <sys/device.h>
38
39 #ifdef _KERNEL_OPT
40 #include "opt_ocf.h"
41 #endif
42
43 #include <opencrypto/cryptodev.h>
44 #include <opencrypto/cryptosoft.h>
45 #include <opencrypto/xform.h>
46
47 #include <opencrypto/cryptosoft_xform.c>
48
49 #include "ioconf.h"
50
51 union authctx {
52 MD5_CTX md5ctx;
53 SHA1_CTX sha1ctx;
54 RMD160_CTX rmd160ctx;
55 SHA256_CTX sha256ctx;
56 SHA384_CTX sha384ctx;
57 SHA512_CTX sha512ctx;
58 aesxcbc_ctx aesxcbcctx;
59 AES_GMAC_CTX aesgmacctx;
60 };
61
62 struct swcr_data **swcr_sessions = NULL;
63 u_int32_t swcr_sesnum = 0;
64 int32_t swcr_id = -1;
65
66 #define COPYBACK(x, a, b, c, d) \
67 (x) == CRYPTO_BUF_MBUF ? m_copyback((struct mbuf *)a,b,c,d) \
68 : cuio_copyback((struct uio *)a,b,c,d)
69 #define COPYDATA(x, a, b, c, d) \
70 (x) == CRYPTO_BUF_MBUF ? m_copydata((struct mbuf *)a,b,c,d) \
71 : cuio_copydata((struct uio *)a,b,c,d)
72
73 static int swcr_encdec(struct cryptodesc *, const struct swcr_data *, void *, int);
74 static int swcr_compdec(struct cryptodesc *, const struct swcr_data *, void *, int, int *);
75 static int swcr_combined(struct cryptop *, int);
76 static int swcr_process(void *, struct cryptop *, int);
77 static int swcr_newsession(void *, u_int32_t *, struct cryptoini *);
78 static void swcr_freesession(void *, u_int64_t);
79 static void swcr_freesession_internal(struct swcr_data *);
80
81 static int swcryptoattach_internal(void);
82
83 /*
84 * Apply a symmetric encryption/decryption algorithm.
85 */
86 static int
swcr_encdec(struct cryptodesc * crd,const struct swcr_data * sw,void * bufv,int outtype)87 swcr_encdec(struct cryptodesc *crd, const struct swcr_data *sw, void *bufv,
88 int outtype)
89 {
90 char *buf = bufv;
91 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
92 unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
93 const struct swcr_enc_xform *exf;
94 int i, k, j, blks, ivlen;
95 int count, ind;
96
97 exf = sw->sw_exf;
98 blks = exf->enc_xform->blocksize;
99 ivlen = exf->enc_xform->ivsize;
100 KASSERT(exf->reinit ? ivlen <= blks : ivlen == blks);
101
102 /* Check for non-padded data */
103 if (crd->crd_len % blks)
104 return EINVAL;
105
106 /* Initialize the IV */
107 if (crd->crd_flags & CRD_F_ENCRYPT) {
108 /* IV explicitly provided ? */
109 if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
110 memcpy(iv, crd->crd_iv, ivlen);
111 if (exf->reinit)
112 exf->reinit(sw->sw_kschedule, iv, 0);
113 } else if (exf->reinit) {
114 exf->reinit(sw->sw_kschedule, 0, iv);
115 } else {
116 cprng_fast(iv, EALG_MAX_BLOCK_LEN);
117 }
118
119 /* Do we need to write the IV */
120 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) {
121 COPYBACK(outtype, buf, crd->crd_inject, ivlen, iv);
122 }
123
124 } else { /* Decryption */
125 /* IV explicitly provided ? */
126 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
127 memcpy(iv, crd->crd_iv, ivlen);
128 else {
129 /* Get IV off buf */
130 COPYDATA(outtype, buf, crd->crd_inject, ivlen, iv);
131 }
132 if (exf->reinit)
133 exf->reinit(sw->sw_kschedule, iv, 0);
134 }
135
136 ivp = iv;
137
138 if (outtype == CRYPTO_BUF_CONTIG) {
139 if (exf->reinit) {
140 for (i = crd->crd_skip;
141 i < crd->crd_skip + crd->crd_len; i += blks) {
142 if (crd->crd_flags & CRD_F_ENCRYPT) {
143 exf->encrypt(sw->sw_kschedule, buf + i);
144 } else {
145 exf->decrypt(sw->sw_kschedule, buf + i);
146 }
147 }
148 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
149 for (i = crd->crd_skip;
150 i < crd->crd_skip + crd->crd_len; i += blks) {
151 /* XOR with the IV/previous block, as appropriate. */
152 if (i == crd->crd_skip)
153 for (k = 0; k < blks; k++)
154 buf[i + k] ^= ivp[k];
155 else
156 for (k = 0; k < blks; k++)
157 buf[i + k] ^= buf[i + k - blks];
158 exf->encrypt(sw->sw_kschedule, buf + i);
159 }
160 } else { /* Decrypt */
161 /*
162 * Start at the end, so we don't need to keep the encrypted
163 * block as the IV for the next block.
164 */
165 for (i = crd->crd_skip + crd->crd_len - blks;
166 i >= crd->crd_skip; i -= blks) {
167 exf->decrypt(sw->sw_kschedule, buf + i);
168
169 /* XOR with the IV/previous block, as appropriate */
170 if (i == crd->crd_skip)
171 for (k = 0; k < blks; k++)
172 buf[i + k] ^= ivp[k];
173 else
174 for (k = 0; k < blks; k++)
175 buf[i + k] ^= buf[i + k - blks];
176 }
177 }
178
179 return 0;
180 } else if (outtype == CRYPTO_BUF_MBUF) {
181 struct mbuf *m = (struct mbuf *) buf;
182
183 /* Find beginning of data */
184 m = m_getptr(m, crd->crd_skip, &k);
185 if (m == NULL)
186 return EINVAL;
187
188 i = crd->crd_len;
189
190 while (i > 0) {
191 /*
192 * If there's insufficient data at the end of
193 * an mbuf, we have to do some copying.
194 */
195 if (m->m_len < k + blks && m->m_len != k) {
196 m_copydata(m, k, blks, blk);
197
198 /* Actual encryption/decryption */
199 if (exf->reinit) {
200 if (crd->crd_flags & CRD_F_ENCRYPT) {
201 exf->encrypt(sw->sw_kschedule,
202 blk);
203 } else {
204 exf->decrypt(sw->sw_kschedule,
205 blk);
206 }
207 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
208 /* XOR with previous block */
209 for (j = 0; j < blks; j++)
210 blk[j] ^= ivp[j];
211
212 exf->encrypt(sw->sw_kschedule, blk);
213
214 /*
215 * Keep encrypted block for XOR'ing
216 * with next block
217 */
218 memcpy(iv, blk, blks);
219 ivp = iv;
220 } else { /* decrypt */
221 /*
222 * Keep encrypted block for XOR'ing
223 * with next block
224 */
225 if (ivp == iv)
226 memcpy(piv, blk, blks);
227 else
228 memcpy(iv, blk, blks);
229
230 exf->decrypt(sw->sw_kschedule, blk);
231
232 /* XOR with previous block */
233 for (j = 0; j < blks; j++)
234 blk[j] ^= ivp[j];
235
236 if (ivp == iv)
237 memcpy(iv, piv, blks);
238 else
239 ivp = iv;
240 }
241
242 /* Copy back decrypted block */
243 m_copyback(m, k, blks, blk);
244
245 /* Advance pointer */
246 m = m_getptr(m, k + blks, &k);
247 if (m == NULL)
248 return EINVAL;
249
250 i -= blks;
251
252 /* Could be done... */
253 if (i == 0)
254 break;
255 }
256
257 /* Skip possibly empty mbufs */
258 if (k == m->m_len) {
259 for (m = m->m_next; m && m->m_len == 0;
260 m = m->m_next)
261 ;
262 k = 0;
263 }
264
265 /* Sanity check */
266 if (m == NULL)
267 return EINVAL;
268
269 /*
270 * Warning: idat may point to garbage here, but
271 * we only use it in the while() loop, only if
272 * there are indeed enough data.
273 */
274 idat = mtod(m, unsigned char *) + k;
275
276 while (m->m_len >= k + blks && i > 0) {
277 if (exf->reinit) {
278 if (crd->crd_flags & CRD_F_ENCRYPT) {
279 exf->encrypt(sw->sw_kschedule,
280 idat);
281 } else {
282 exf->decrypt(sw->sw_kschedule,
283 idat);
284 }
285 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
286 /* XOR with previous block/IV */
287 for (j = 0; j < blks; j++)
288 idat[j] ^= ivp[j];
289
290 exf->encrypt(sw->sw_kschedule, idat);
291 ivp = idat;
292 } else { /* decrypt */
293 /*
294 * Keep encrypted block to be used
295 * in next block's processing.
296 */
297 if (ivp == iv)
298 memcpy(piv, idat, blks);
299 else
300 memcpy(iv, idat, blks);
301
302 exf->decrypt(sw->sw_kschedule, idat);
303
304 /* XOR with previous block/IV */
305 for (j = 0; j < blks; j++)
306 idat[j] ^= ivp[j];
307
308 if (ivp == iv)
309 memcpy(iv, piv, blks);
310 else
311 ivp = iv;
312 }
313
314 idat += blks;
315 k += blks;
316 i -= blks;
317 }
318 }
319
320 return 0; /* Done with mbuf encryption/decryption */
321 } else if (outtype == CRYPTO_BUF_IOV) {
322 struct uio *uio = (struct uio *) buf;
323
324 /* Find beginning of data */
325 count = crd->crd_skip;
326 ind = cuio_getptr(uio, count, &k);
327 if (ind == -1)
328 return EINVAL;
329
330 i = crd->crd_len;
331
332 while (i > 0) {
333 /*
334 * If there's insufficient data at the end,
335 * we have to do some copying.
336 */
337 if (uio->uio_iov[ind].iov_len < k + blks &&
338 uio->uio_iov[ind].iov_len != k) {
339 cuio_copydata(uio, k, blks, blk);
340
341 /* Actual encryption/decryption */
342 if (exf->reinit) {
343 if (crd->crd_flags & CRD_F_ENCRYPT) {
344 exf->encrypt(sw->sw_kschedule,
345 blk);
346 } else {
347 exf->decrypt(sw->sw_kschedule,
348 blk);
349 }
350 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
351 /* XOR with previous block */
352 for (j = 0; j < blks; j++)
353 blk[j] ^= ivp[j];
354
355 exf->encrypt(sw->sw_kschedule, blk);
356
357 /*
358 * Keep encrypted block for XOR'ing
359 * with next block
360 */
361 memcpy(iv, blk, blks);
362 ivp = iv;
363 } else { /* decrypt */
364 /*
365 * Keep encrypted block for XOR'ing
366 * with next block
367 */
368 if (ivp == iv)
369 memcpy(piv, blk, blks);
370 else
371 memcpy(iv, blk, blks);
372
373 exf->decrypt(sw->sw_kschedule, blk);
374
375 /* XOR with previous block */
376 for (j = 0; j < blks; j++)
377 blk[j] ^= ivp[j];
378
379 if (ivp == iv)
380 memcpy(iv, piv, blks);
381 else
382 ivp = iv;
383 }
384
385 /* Copy back decrypted block */
386 cuio_copyback(uio, k, blks, blk);
387
388 count += blks;
389
390 /* Advance pointer */
391 ind = cuio_getptr(uio, count, &k);
392 if (ind == -1)
393 return (EINVAL);
394
395 i -= blks;
396
397 /* Could be done... */
398 if (i == 0)
399 break;
400 }
401
402 /*
403 * Warning: idat may point to garbage here, but
404 * we only use it in the while() loop, only if
405 * there are indeed enough data.
406 */
407 idat = ((char *)uio->uio_iov[ind].iov_base) + k;
408
409 while (uio->uio_iov[ind].iov_len >= k + blks &&
410 i > 0) {
411 if (exf->reinit) {
412 if (crd->crd_flags & CRD_F_ENCRYPT) {
413 exf->encrypt(sw->sw_kschedule,
414 idat);
415 } else {
416 exf->decrypt(sw->sw_kschedule,
417 idat);
418 }
419 } else if (crd->crd_flags & CRD_F_ENCRYPT) {
420 /* XOR with previous block/IV */
421 for (j = 0; j < blks; j++)
422 idat[j] ^= ivp[j];
423
424 exf->encrypt(sw->sw_kschedule, idat);
425 ivp = idat;
426 } else { /* decrypt */
427 /*
428 * Keep encrypted block to be used
429 * in next block's processing.
430 */
431 if (ivp == iv)
432 memcpy(piv, idat, blks);
433 else
434 memcpy(iv, idat, blks);
435
436 exf->decrypt(sw->sw_kschedule, idat);
437
438 /* XOR with previous block/IV */
439 for (j = 0; j < blks; j++)
440 idat[j] ^= ivp[j];
441
442 if (ivp == iv)
443 memcpy(iv, piv, blks);
444 else
445 ivp = iv;
446 }
447
448 idat += blks;
449 count += blks;
450 k += blks;
451 i -= blks;
452 }
453 }
454 return 0; /* Done with mbuf encryption/decryption */
455 }
456
457 /* Unreachable */
458 return EINVAL;
459 }
460
461 /*
462 * Compute keyed-hash authenticator.
463 */
464 int
swcr_authcompute(struct cryptop * crp,struct cryptodesc * crd,const struct swcr_data * sw,void * buf,int outtype)465 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
466 const struct swcr_data *sw, void *buf, int outtype)
467 {
468 unsigned char aalg[AALG_MAX_RESULT_LEN];
469 const struct swcr_auth_hash *axf;
470 union authctx ctx;
471 int err;
472
473 if (sw->sw_ictx == 0)
474 return EINVAL;
475
476 axf = sw->sw_axf;
477
478 memcpy(&ctx, sw->sw_ictx, axf->ctxsize);
479
480 switch (outtype) {
481 case CRYPTO_BUF_CONTIG:
482 axf->Update(&ctx, (char *)buf + crd->crd_skip, crd->crd_len);
483 break;
484 case CRYPTO_BUF_MBUF:
485 err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len,
486 (int (*)(void*, void *, unsigned int))(void *)axf->Update,
487 (void *) &ctx);
488 if (err)
489 return err;
490 break;
491 case CRYPTO_BUF_IOV:
492 err = cuio_apply((struct uio *) buf, crd->crd_skip,
493 crd->crd_len,
494 (int (*)(void *, void *, unsigned int))(void *)axf->Update,
495 (void *) &ctx);
496 if (err) {
497 return err;
498 }
499 break;
500 default:
501 return EINVAL;
502 }
503
504 switch (sw->sw_alg) {
505 case CRYPTO_MD5_HMAC:
506 case CRYPTO_MD5_HMAC_96:
507 case CRYPTO_SHA1_HMAC:
508 case CRYPTO_SHA1_HMAC_96:
509 case CRYPTO_SHA2_256_HMAC:
510 case CRYPTO_SHA2_384_HMAC:
511 case CRYPTO_SHA2_512_HMAC:
512 case CRYPTO_RIPEMD160_HMAC:
513 case CRYPTO_RIPEMD160_HMAC_96:
514 if (sw->sw_octx == NULL)
515 return EINVAL;
516
517 axf->Final(aalg, &ctx);
518 memcpy(&ctx, sw->sw_octx, axf->ctxsize);
519 axf->Update(&ctx, aalg, axf->auth_hash->hashsize);
520 axf->Final(aalg, &ctx);
521 break;
522
523 case CRYPTO_MD5_KPDK:
524 case CRYPTO_SHA1_KPDK:
525 if (sw->sw_octx == NULL)
526 return EINVAL;
527
528 axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
529 axf->Final(aalg, &ctx);
530 break;
531
532 case CRYPTO_NULL_HMAC:
533 case CRYPTO_MD5:
534 case CRYPTO_SHA1:
535 case CRYPTO_AES_XCBC_MAC_96:
536 axf->Final(aalg, &ctx);
537 break;
538 }
539
540 /* Inject the authentication data */
541 switch (outtype) {
542 case CRYPTO_BUF_CONTIG:
543 (void)memcpy((char *)buf + crd->crd_inject, aalg,
544 axf->auth_hash->authsize);
545 break;
546 case CRYPTO_BUF_MBUF:
547 m_copyback((struct mbuf *) buf, crd->crd_inject,
548 axf->auth_hash->authsize, aalg);
549 break;
550 case CRYPTO_BUF_IOV:
551 memcpy(crp->crp_mac, aalg, axf->auth_hash->authsize);
552 break;
553 default:
554 return EINVAL;
555 }
556 return 0;
557 }
558
559 /*
560 * Apply a combined encryption-authentication transformation
561 */
562 static int
swcr_combined(struct cryptop * crp,int outtype)563 swcr_combined(struct cryptop *crp, int outtype)
564 {
565 uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
566 u_char *blk = (u_char *)blkbuf;
567 u_char aalg[AALG_MAX_RESULT_LEN];
568 u_char iv[EALG_MAX_BLOCK_LEN];
569 union authctx ctx;
570 struct cryptodesc *crd, *crda = NULL, *crde = NULL;
571 struct swcr_data *sw, *swa, *swe = NULL;
572 const struct swcr_auth_hash *axf = NULL;
573 const struct swcr_enc_xform *exf = NULL;
574 void *buf = (void *)crp->crp_buf;
575 uint32_t *blkp;
576 int i, blksz = 0, ivlen = 0, len;
577
578 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
579 for (sw = swcr_sessions[crp->crp_sid & 0xffffffff];
580 sw && sw->sw_alg != crd->crd_alg;
581 sw = sw->sw_next)
582 ;
583 if (sw == NULL)
584 return (EINVAL);
585
586 switch (sw->sw_alg) {
587 case CRYPTO_AES_GCM_16:
588 case CRYPTO_AES_GMAC:
589 swe = sw;
590 crde = crd;
591 exf = swe->sw_exf;
592 ivlen = exf->enc_xform->ivsize;
593 break;
594 case CRYPTO_AES_128_GMAC:
595 case CRYPTO_AES_192_GMAC:
596 case CRYPTO_AES_256_GMAC:
597 swa = sw;
598 crda = crd;
599 axf = swa->sw_axf;
600 if (swa->sw_ictx == 0)
601 return (EINVAL);
602 memcpy(&ctx, swa->sw_ictx, axf->ctxsize);
603 blksz = axf->auth_hash->blocksize;
604 break;
605 default:
606 return (EINVAL);
607 }
608 }
609 if (crde == NULL || crda == NULL)
610 return (EINVAL);
611 if (outtype == CRYPTO_BUF_CONTIG)
612 return (EINVAL);
613
614 /* Initialize the IV */
615 if (crde->crd_flags & CRD_F_ENCRYPT) {
616 /* IV explicitly provided ? */
617 if (crde->crd_flags & CRD_F_IV_EXPLICIT) {
618 memcpy(iv, crde->crd_iv, ivlen);
619 if (exf->reinit)
620 exf->reinit(swe->sw_kschedule, iv, 0);
621 } else if (exf->reinit)
622 exf->reinit(swe->sw_kschedule, 0, iv);
623 else
624 cprng_fast(iv, ivlen);
625
626 /* Do we need to write the IV */
627 if (!(crde->crd_flags & CRD_F_IV_PRESENT))
628 COPYBACK(outtype, buf, crde->crd_inject, ivlen, iv);
629
630 } else { /* Decryption */
631 /* IV explicitly provided ? */
632 if (crde->crd_flags & CRD_F_IV_EXPLICIT)
633 memcpy(iv, crde->crd_iv, ivlen);
634 else {
635 /* Get IV off buf */
636 COPYDATA(outtype, buf, crde->crd_inject, ivlen, iv);
637 }
638 if (exf->reinit)
639 exf->reinit(swe->sw_kschedule, iv, 0);
640 }
641
642 /* Supply MAC with IV */
643 if (axf->Reinit)
644 axf->Reinit(&ctx, iv, ivlen);
645
646 /* Supply MAC with AAD */
647 for (i = 0; i < crda->crd_len; i += blksz) {
648 len = MIN(crda->crd_len - i, blksz);
649 COPYDATA(outtype, buf, crda->crd_skip + i, len, blk);
650 axf->Update(&ctx, blk, len);
651 }
652
653 /* Do encryption/decryption with MAC */
654 for (i = 0; i < crde->crd_len; i += blksz) {
655 len = MIN(crde->crd_len - i, blksz);
656 if (len < blksz)
657 memset(blk, 0, blksz);
658 COPYDATA(outtype, buf, crde->crd_skip + i, len, blk);
659 if (crde->crd_flags & CRD_F_ENCRYPT) {
660 exf->encrypt(swe->sw_kschedule, blk);
661 axf->Update(&ctx, blk, len);
662 } else {
663 axf->Update(&ctx, blk, len);
664 exf->decrypt(swe->sw_kschedule, blk);
665 }
666 COPYBACK(outtype, buf, crde->crd_skip + i, len, blk);
667 }
668
669 /* Do any required special finalization */
670 switch (crda->crd_alg) {
671 case CRYPTO_AES_128_GMAC:
672 case CRYPTO_AES_192_GMAC:
673 case CRYPTO_AES_256_GMAC:
674 /* length block */
675 memset(blk, 0, blksz);
676 blkp = (uint32_t *)blk + 1;
677 *blkp = htobe32(crda->crd_len * 8);
678 blkp = (uint32_t *)blk + 3;
679 *blkp = htobe32(crde->crd_len * 8);
680 axf->Update(&ctx, blk, blksz);
681 break;
682 }
683
684 /* Finalize MAC */
685 axf->Final(aalg, &ctx);
686
687 /* Inject the authentication data */
688 if (outtype == CRYPTO_BUF_MBUF)
689 COPYBACK(outtype, buf, crda->crd_inject, axf->auth_hash->authsize, aalg);
690 else
691 memcpy(crp->crp_mac, aalg, axf->auth_hash->authsize);
692
693 return (0);
694 }
695
696 /*
697 * Apply a compression/decompression algorithm
698 */
699 static int
swcr_compdec(struct cryptodesc * crd,const struct swcr_data * sw,void * buf,int outtype,int * res_size)700 swcr_compdec(struct cryptodesc *crd, const struct swcr_data *sw,
701 void *buf, int outtype, int *res_size)
702 {
703 u_int8_t *data, *out;
704 const struct swcr_comp_algo *cxf;
705 int adj;
706 u_int32_t result;
707
708 cxf = sw->sw_cxf;
709
710 /* We must handle the whole buffer of data in one time
711 * then if there is not all the data in the mbuf, we must
712 * copy in a buffer.
713 */
714
715 data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
716 if (data == NULL)
717 return (EINVAL);
718 COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data);
719
720 if (crd->crd_flags & CRD_F_COMP)
721 result = cxf->compress(data, crd->crd_len, &out);
722 else
723 result = cxf->decompress(data, crd->crd_len, &out,
724 *res_size);
725
726 free(data, M_CRYPTO_DATA);
727 if (result == 0)
728 return EINVAL;
729
730 /* Copy back the (de)compressed data. m_copyback is
731 * extending the mbuf as necessary.
732 */
733 *res_size = (int)result;
734 /* Check the compressed size when doing compression */
735 if (crd->crd_flags & CRD_F_COMP &&
736 sw->sw_alg == CRYPTO_DEFLATE_COMP_NOGROW &&
737 result >= crd->crd_len) {
738 /* Compression was useless, we lost time */
739 free(out, M_CRYPTO_DATA);
740 return 0;
741 }
742
743 COPYBACK(outtype, buf, crd->crd_skip, result, out);
744 if (result < crd->crd_len) {
745 adj = result - crd->crd_len;
746 if (outtype == CRYPTO_BUF_MBUF) {
747 m_adj((struct mbuf *)buf, adj);
748 }
749 /* Don't adjust the iov_len, it breaks the kmem_free */
750 }
751 free(out, M_CRYPTO_DATA);
752 return 0;
753 }
754
755 /*
756 * Generate a new software session.
757 */
758 static int
swcr_newsession(void * arg,u_int32_t * sid,struct cryptoini * cri)759 swcr_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri)
760 {
761 struct swcr_data **swd;
762 struct swcr_data *first, *tmp;
763 const struct swcr_auth_hash *axf;
764 const struct swcr_enc_xform *txf;
765 const struct swcr_comp_algo *cxf;
766 u_int32_t i;
767 int k, error;
768
769 if (swcr_sessions) {
770 for (i = 1; i < swcr_sesnum; i++)
771 if (swcr_sessions[i] == NULL)
772 break;
773 } else
774 i = 1; /* NB: to silence compiler warning */
775
776 if (swcr_sessions == NULL || i == swcr_sesnum) {
777 u_int32_t newnum;
778 struct swcr_data **newsessions;
779
780 if (swcr_sessions == NULL) {
781 i = 1; /* We leave swcr_sessions[0] empty */
782 newnum = CRYPTO_SW_SESSIONS;
783 } else
784 newnum = swcr_sesnum *= 2;
785
786 newsessions = kmem_zalloc(newnum * sizeof(struct swcr_data *),
787 KM_NOSLEEP);
788 if (newsessions == NULL) {
789 return ENOBUFS;
790 }
791
792 /* Copy existing sessions */
793 if (swcr_sessions) {
794 memcpy(newsessions, swcr_sessions,
795 swcr_sesnum * sizeof(struct swcr_data *));
796 kmem_free(swcr_sessions,
797 swcr_sesnum * sizeof(struct swcr_data *));
798 }
799
800 swcr_sesnum = newnum;
801 swcr_sessions = newsessions;
802 }
803
804 first = NULL;
805 swd = &tmp;
806 while (cri) {
807 *swd = kmem_zalloc(sizeof **swd, KM_NOSLEEP);
808 if (*swd == NULL) {
809 if (first != NULL)
810 swcr_freesession_internal(first);
811 return ENOBUFS;
812 } else if (first == NULL)
813 first = *swd;
814
815 switch (cri->cri_alg) {
816 case CRYPTO_DES_CBC:
817 txf = &swcr_enc_xform_des;
818 goto enccommon;
819 case CRYPTO_3DES_CBC:
820 txf = &swcr_enc_xform_3des;
821 goto enccommon;
822 case CRYPTO_BLF_CBC:
823 txf = &swcr_enc_xform_blf;
824 goto enccommon;
825 case CRYPTO_CAST_CBC:
826 txf = &swcr_enc_xform_cast5;
827 goto enccommon;
828 case CRYPTO_SKIPJACK_CBC:
829 txf = &swcr_enc_xform_skipjack;
830 goto enccommon;
831 case CRYPTO_AES_CBC:
832 txf = &swcr_enc_xform_aes;
833 goto enccommon;
834 case CRYPTO_CAMELLIA_CBC:
835 txf = &swcr_enc_xform_camellia;
836 goto enccommon;
837 case CRYPTO_AES_CTR:
838 txf = &swcr_enc_xform_aes_ctr;
839 goto enccommon;
840 case CRYPTO_AES_GCM_16:
841 txf = &swcr_enc_xform_aes_gcm;
842 goto enccommon;
843 case CRYPTO_AES_GMAC:
844 txf = &swcr_enc_xform_aes_gmac;
845 goto enccommon;
846 case CRYPTO_NULL_CBC:
847 txf = &swcr_enc_xform_null;
848 goto enccommon;
849 enccommon:
850 error = txf->setkey(&((*swd)->sw_kschedule),
851 cri->cri_key, cri->cri_klen / 8);
852 if (error) {
853 swcr_freesession_internal(first);
854 return error;
855 }
856 (*swd)->sw_exf = txf;
857 break;
858
859 case CRYPTO_MD5_HMAC:
860 axf = &swcr_auth_hash_hmac_md5;
861 goto authcommon;
862 case CRYPTO_MD5_HMAC_96:
863 axf = &swcr_auth_hash_hmac_md5_96;
864 goto authcommon;
865 case CRYPTO_SHA1_HMAC:
866 axf = &swcr_auth_hash_hmac_sha1;
867 goto authcommon;
868 case CRYPTO_SHA1_HMAC_96:
869 axf = &swcr_auth_hash_hmac_sha1_96;
870 goto authcommon;
871 case CRYPTO_SHA2_256_HMAC:
872 axf = &swcr_auth_hash_hmac_sha2_256;
873 goto authcommon;
874 case CRYPTO_SHA2_384_HMAC:
875 axf = &swcr_auth_hash_hmac_sha2_384;
876 goto authcommon;
877 case CRYPTO_SHA2_512_HMAC:
878 axf = &swcr_auth_hash_hmac_sha2_512;
879 goto authcommon;
880 case CRYPTO_NULL_HMAC:
881 axf = &swcr_auth_hash_null;
882 goto authcommon;
883 case CRYPTO_RIPEMD160_HMAC:
884 axf = &swcr_auth_hash_hmac_ripemd_160;
885 goto authcommon;
886 case CRYPTO_RIPEMD160_HMAC_96:
887 axf = &swcr_auth_hash_hmac_ripemd_160_96;
888 goto authcommon; /* leave this for safety */
889 authcommon:
890 (*swd)->sw_ictx = kmem_alloc(axf->ctxsize, KM_NOSLEEP);
891 if ((*swd)->sw_ictx == NULL) {
892 swcr_freesession_internal(first);
893 return ENOBUFS;
894 }
895
896 (*swd)->sw_octx = kmem_alloc(axf->ctxsize, KM_NOSLEEP);
897 if ((*swd)->sw_octx == NULL) {
898 swcr_freesession_internal(first);
899 return ENOBUFS;
900 }
901
902 for (k = 0; k < cri->cri_klen / 8; k++)
903 cri->cri_key[k] ^= HMAC_IPAD_VAL;
904
905 axf->Init((*swd)->sw_ictx);
906 axf->Update((*swd)->sw_ictx, cri->cri_key,
907 cri->cri_klen / 8);
908 axf->Update((*swd)->sw_ictx, hmac_ipad_buffer,
909 axf->auth_hash->blocksize - (cri->cri_klen / 8));
910
911 for (k = 0; k < cri->cri_klen / 8; k++)
912 cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
913
914 axf->Init((*swd)->sw_octx);
915 axf->Update((*swd)->sw_octx, cri->cri_key,
916 cri->cri_klen / 8);
917 axf->Update((*swd)->sw_octx, hmac_opad_buffer,
918 axf->auth_hash->blocksize - (cri->cri_klen / 8));
919
920 for (k = 0; k < cri->cri_klen / 8; k++)
921 cri->cri_key[k] ^= HMAC_OPAD_VAL;
922 (*swd)->sw_axf = axf;
923 break;
924
925 case CRYPTO_MD5_KPDK:
926 axf = &swcr_auth_hash_key_md5;
927 goto auth2common;
928
929 case CRYPTO_SHA1_KPDK: {
930 unsigned char digest[SHA1_DIGEST_LENGTH];
931 CTASSERT(SHA1_DIGEST_LENGTH >= MD5_DIGEST_LENGTH);
932 axf = &swcr_auth_hash_key_sha1;
933 auth2common:
934 (*swd)->sw_ictx = kmem_alloc(axf->ctxsize, KM_NOSLEEP);
935 if ((*swd)->sw_ictx == NULL) {
936 swcr_freesession_internal(first);
937 return ENOBUFS;
938 }
939
940 /* Store the key so we can "append" it to the payload */
941 (*swd)->sw_octx = kmem_alloc(cri->cri_klen / 8,
942 KM_NOSLEEP);
943 if ((*swd)->sw_octx == NULL) {
944 swcr_freesession_internal(first);
945 return ENOBUFS;
946 }
947
948 (*swd)->sw_klen = cri->cri_klen / 8;
949 memcpy((*swd)->sw_octx, cri->cri_key, cri->cri_klen / 8);
950 axf->Init((*swd)->sw_ictx);
951 axf->Update((*swd)->sw_ictx, cri->cri_key,
952 cri->cri_klen / 8);
953 axf->Final(digest, (*swd)->sw_ictx);
954 (*swd)->sw_axf = axf;
955 break;
956 }
957
958 case CRYPTO_MD5:
959 axf = &swcr_auth_hash_md5;
960 goto auth3common;
961
962 case CRYPTO_SHA1:
963 axf = &swcr_auth_hash_sha1;
964 auth3common:
965 (*swd)->sw_ictx = kmem_alloc(axf->ctxsize, KM_NOSLEEP);
966 if ((*swd)->sw_ictx == NULL) {
967 swcr_freesession_internal(first);
968 return ENOBUFS;
969 }
970
971 axf->Init((*swd)->sw_ictx);
972 (*swd)->sw_axf = axf;
973 break;
974
975 case CRYPTO_AES_XCBC_MAC_96:
976 axf = &swcr_auth_hash_aes_xcbc_mac;
977 goto auth4common;
978 case CRYPTO_AES_128_GMAC:
979 axf = &swcr_auth_hash_gmac_aes_128;
980 goto auth4common;
981 case CRYPTO_AES_192_GMAC:
982 axf = &swcr_auth_hash_gmac_aes_192;
983 goto auth4common;
984 case CRYPTO_AES_256_GMAC:
985 axf = &swcr_auth_hash_gmac_aes_256;
986 auth4common:
987 (*swd)->sw_ictx = kmem_alloc(axf->ctxsize, KM_NOSLEEP);
988 if ((*swd)->sw_ictx == NULL) {
989 swcr_freesession_internal(first);
990 return ENOBUFS;
991 }
992 axf->Init((*swd)->sw_ictx);
993 axf->Setkey((*swd)->sw_ictx,
994 cri->cri_key, cri->cri_klen / 8);
995 (*swd)->sw_axf = axf;
996 break;
997
998 case CRYPTO_DEFLATE_COMP:
999 cxf = &swcr_comp_algo_deflate;
1000 (*swd)->sw_cxf = cxf;
1001 break;
1002
1003 case CRYPTO_DEFLATE_COMP_NOGROW:
1004 cxf = &swcr_comp_algo_deflate_nogrow;
1005 (*swd)->sw_cxf = cxf;
1006 break;
1007
1008 case CRYPTO_GZIP_COMP:
1009 cxf = &swcr_comp_algo_gzip;
1010 (*swd)->sw_cxf = cxf;
1011 break;
1012 default:
1013 swcr_freesession_internal(first);
1014 return EINVAL;
1015 }
1016
1017 (*swd)->sw_alg = cri->cri_alg;
1018 cri = cri->cri_next;
1019 swd = &((*swd)->sw_next);
1020 }
1021
1022 swcr_sessions[i] = first;
1023 *sid = i;
1024 return 0;
1025 }
1026
1027 static void
swcr_freesession_internal(struct swcr_data * arg)1028 swcr_freesession_internal(struct swcr_data *arg)
1029 {
1030 struct swcr_data *swd, *swd0;
1031 const struct swcr_enc_xform *txf;
1032 const struct swcr_auth_hash *axf;
1033
1034 if (arg == NULL)
1035 return;
1036
1037 swd0 = arg;
1038 while ((swd = swd0) != NULL) {
1039 swd0 = swd->sw_next;
1040
1041 switch (swd->sw_alg) {
1042 case CRYPTO_DES_CBC:
1043 case CRYPTO_3DES_CBC:
1044 case CRYPTO_BLF_CBC:
1045 case CRYPTO_CAST_CBC:
1046 case CRYPTO_SKIPJACK_CBC:
1047 case CRYPTO_AES_CBC:
1048 case CRYPTO_CAMELLIA_CBC:
1049 case CRYPTO_AES_CTR:
1050 case CRYPTO_AES_GCM_16:
1051 case CRYPTO_AES_GMAC:
1052 case CRYPTO_NULL_CBC:
1053 txf = swd->sw_exf;
1054
1055 if (swd->sw_kschedule)
1056 txf->zerokey(&(swd->sw_kschedule));
1057 break;
1058
1059 case CRYPTO_MD5_HMAC:
1060 case CRYPTO_MD5_HMAC_96:
1061 case CRYPTO_SHA1_HMAC:
1062 case CRYPTO_SHA1_HMAC_96:
1063 case CRYPTO_SHA2_256_HMAC:
1064 case CRYPTO_SHA2_384_HMAC:
1065 case CRYPTO_SHA2_512_HMAC:
1066 case CRYPTO_RIPEMD160_HMAC:
1067 case CRYPTO_RIPEMD160_HMAC_96:
1068 case CRYPTO_NULL_HMAC:
1069 axf = swd->sw_axf;
1070
1071 if (swd->sw_ictx) {
1072 explicit_memset(swd->sw_ictx, 0, axf->ctxsize);
1073 kmem_free(swd->sw_ictx, axf->ctxsize);
1074 }
1075 if (swd->sw_octx) {
1076 explicit_memset(swd->sw_octx, 0, axf->ctxsize);
1077 kmem_free(swd->sw_octx, axf->ctxsize);
1078 }
1079 break;
1080
1081 case CRYPTO_MD5_KPDK:
1082 case CRYPTO_SHA1_KPDK:
1083 axf = swd->sw_axf;
1084
1085 if (swd->sw_ictx) {
1086 explicit_memset(swd->sw_ictx, 0, axf->ctxsize);
1087 kmem_free(swd->sw_ictx, axf->ctxsize);
1088 }
1089 if (swd->sw_octx) {
1090 explicit_memset(swd->sw_octx, 0, swd->sw_klen);
1091 kmem_free(swd->sw_octx, swd->sw_klen);
1092 }
1093 break;
1094
1095 case CRYPTO_MD5:
1096 case CRYPTO_SHA1:
1097 case CRYPTO_AES_XCBC_MAC_96:
1098 case CRYPTO_AES_128_GMAC:
1099 case CRYPTO_AES_192_GMAC:
1100 case CRYPTO_AES_256_GMAC:
1101 axf = swd->sw_axf;
1102
1103 if (swd->sw_ictx) {
1104 explicit_memset(swd->sw_ictx, 0, axf->ctxsize);
1105 kmem_free(swd->sw_ictx, axf->ctxsize);
1106 }
1107 break;
1108
1109 case CRYPTO_DEFLATE_COMP:
1110 case CRYPTO_DEFLATE_COMP_NOGROW:
1111 case CRYPTO_GZIP_COMP:
1112 break;
1113 }
1114
1115 kmem_free(swd, sizeof(*swd));
1116 }
1117 }
1118
1119 /*
1120 * Free a session.
1121 */
1122 static void
swcr_freesession(void * arg,u_int64_t tid)1123 swcr_freesession(void *arg, u_int64_t tid)
1124 {
1125 struct swcr_data *swd;
1126 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
1127
1128 KASSERTMSG(sid < swcr_sesnum, "sid=%"PRIu32" swcr_sesnum=%"PRIu32,
1129 sid, swcr_sesnum);
1130 KASSERT(swcr_sessions[sid]);
1131
1132 swd = swcr_sessions[sid];
1133 swcr_sessions[sid] = NULL;
1134 swcr_freesession_internal(swd);
1135 }
1136
1137 /*
1138 * Process a software request.
1139 */
1140 static int
swcr_process(void * arg,struct cryptop * crp,int hint)1141 swcr_process(void *arg, struct cryptop *crp, int hint)
1142 {
1143 struct cryptodesc *crd;
1144 struct swcr_data *sw;
1145 u_int32_t lid;
1146 int type;
1147
1148 /* Sanity check */
1149 if (crp == NULL)
1150 return EINVAL;
1151
1152 if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1153 crp->crp_etype = EINVAL;
1154 goto done;
1155 }
1156
1157 lid = crp->crp_sid & 0xffffffff;
1158 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
1159 crp->crp_etype = ENOENT;
1160 goto done;
1161 }
1162
1163 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1164 type = CRYPTO_BUF_MBUF;
1165 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1166 type = CRYPTO_BUF_IOV;
1167 } else {
1168 type = CRYPTO_BUF_CONTIG;
1169 }
1170
1171 /* Go through crypto descriptors, processing as we go */
1172 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1173 /*
1174 * Find the crypto context.
1175 *
1176 * XXX Note that the logic here prevents us from having
1177 * XXX the same algorithm multiple times in a session
1178 * XXX (or rather, we can but it won't give us the right
1179 * XXX results). To do that, we'd need some way of differentiating
1180 * XXX between the various instances of an algorithm (so we can
1181 * XXX locate the correct crypto context).
1182 */
1183 for (sw = swcr_sessions[lid];
1184 sw && sw->sw_alg != crd->crd_alg;
1185 sw = sw->sw_next)
1186 ;
1187
1188 /* No such context ? */
1189 if (sw == NULL) {
1190 crp->crp_etype = EINVAL;
1191 goto done;
1192 }
1193
1194 switch (sw->sw_alg) {
1195 case CRYPTO_DES_CBC:
1196 case CRYPTO_3DES_CBC:
1197 case CRYPTO_BLF_CBC:
1198 case CRYPTO_CAST_CBC:
1199 case CRYPTO_SKIPJACK_CBC:
1200 case CRYPTO_AES_CBC:
1201 case CRYPTO_CAMELLIA_CBC:
1202 case CRYPTO_AES_CTR:
1203 if ((crp->crp_etype = swcr_encdec(crd, sw,
1204 crp->crp_buf, type)) != 0)
1205 goto done;
1206 break;
1207 case CRYPTO_NULL_CBC:
1208 crp->crp_etype = 0;
1209 break;
1210 case CRYPTO_MD5_HMAC:
1211 case CRYPTO_MD5_HMAC_96:
1212 case CRYPTO_SHA1_HMAC:
1213 case CRYPTO_SHA1_HMAC_96:
1214 case CRYPTO_SHA2_256_HMAC:
1215 case CRYPTO_SHA2_384_HMAC:
1216 case CRYPTO_SHA2_512_HMAC:
1217 case CRYPTO_RIPEMD160_HMAC:
1218 case CRYPTO_RIPEMD160_HMAC_96:
1219 case CRYPTO_NULL_HMAC:
1220 case CRYPTO_MD5_KPDK:
1221 case CRYPTO_SHA1_KPDK:
1222 case CRYPTO_MD5:
1223 case CRYPTO_SHA1:
1224 case CRYPTO_AES_XCBC_MAC_96:
1225 if ((crp->crp_etype = swcr_authcompute(crp, crd, sw,
1226 crp->crp_buf, type)) != 0)
1227 goto done;
1228 break;
1229
1230 case CRYPTO_AES_GCM_16:
1231 case CRYPTO_AES_GMAC:
1232 case CRYPTO_AES_128_GMAC:
1233 case CRYPTO_AES_192_GMAC:
1234 case CRYPTO_AES_256_GMAC:
1235 crp->crp_etype = swcr_combined(crp, type);
1236 goto done;
1237
1238 case CRYPTO_DEFLATE_COMP:
1239 case CRYPTO_DEFLATE_COMP_NOGROW:
1240 case CRYPTO_GZIP_COMP:
1241 DPRINTF("compdec for %d\n", sw->sw_alg);
1242 if ((crp->crp_etype = swcr_compdec(crd, sw,
1243 crp->crp_buf, type, &crp->crp_olen)) != 0)
1244 goto done;
1245 break;
1246
1247 default:
1248 /* Unknown/unsupported algorithm */
1249 crp->crp_etype = EINVAL;
1250 goto done;
1251 }
1252 }
1253
1254 done:
1255 DPRINTF("request %p done\n", crp);
1256 crypto_done(crp);
1257 return 0;
1258 }
1259
1260 static void
swcr_init(void)1261 swcr_init(void)
1262 {
1263 swcr_id = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE);
1264 if (swcr_id < 0) {
1265 /* This should never happen */
1266 panic("Software crypto device cannot initialize!");
1267 }
1268
1269 crypto_register(swcr_id, CRYPTO_DES_CBC,
1270 0, 0, swcr_newsession, swcr_freesession, swcr_process, NULL);
1271 #define REGISTER(alg) \
1272 crypto_register(swcr_id, alg, 0, 0, NULL, NULL, NULL, NULL)
1273
1274 REGISTER(CRYPTO_3DES_CBC);
1275 REGISTER(CRYPTO_BLF_CBC);
1276 REGISTER(CRYPTO_CAST_CBC);
1277 REGISTER(CRYPTO_SKIPJACK_CBC);
1278 REGISTER(CRYPTO_CAMELLIA_CBC);
1279 REGISTER(CRYPTO_AES_CTR);
1280 REGISTER(CRYPTO_AES_GCM_16);
1281 REGISTER(CRYPTO_AES_GMAC);
1282 REGISTER(CRYPTO_NULL_CBC);
1283 REGISTER(CRYPTO_MD5_HMAC);
1284 REGISTER(CRYPTO_MD5_HMAC_96);
1285 REGISTER(CRYPTO_SHA1_HMAC);
1286 REGISTER(CRYPTO_SHA1_HMAC_96);
1287 REGISTER(CRYPTO_SHA2_256_HMAC);
1288 REGISTER(CRYPTO_SHA2_384_HMAC);
1289 REGISTER(CRYPTO_SHA2_512_HMAC);
1290 REGISTER(CRYPTO_RIPEMD160_HMAC);
1291 REGISTER(CRYPTO_RIPEMD160_HMAC_96);
1292 REGISTER(CRYPTO_NULL_HMAC);
1293 REGISTER(CRYPTO_MD5_KPDK);
1294 REGISTER(CRYPTO_SHA1_KPDK);
1295 REGISTER(CRYPTO_MD5);
1296 REGISTER(CRYPTO_SHA1);
1297 REGISTER(CRYPTO_AES_XCBC_MAC_96);
1298 REGISTER(CRYPTO_AES_128_GMAC);
1299 REGISTER(CRYPTO_AES_192_GMAC);
1300 REGISTER(CRYPTO_AES_256_GMAC);
1301 REGISTER(CRYPTO_AES_CBC);
1302 REGISTER(CRYPTO_DEFLATE_COMP);
1303 REGISTER(CRYPTO_DEFLATE_COMP_NOGROW);
1304 REGISTER(CRYPTO_GZIP_COMP);
1305 #undef REGISTER
1306 }
1307
1308
1309 /*
1310 * Pseudo-device init routine for software crypto.
1311 */
1312
1313 void
swcryptoattach(int num)1314 swcryptoattach(int num)
1315 {
1316 /*
1317 * swcrypto_attach() must be called after attached cpus, because
1318 * it calls softint_establish() through below call path.
1319 * swcr_init() => crypto_get_driverid() => crypto_init()
1320 * => crypto_init0()
1321 * If softint_establish() is called before attached cpus that ncpu == 0,
1322 * the softint handler is established to CPU#0 only.
1323 *
1324 * So, swcrypto_attach() must be called from not module_init_class()
1325 * but config_finalize() when it is built as builtin module.
1326 */
1327 swcryptoattach_internal();
1328 }
1329
1330 void swcrypto_attach(device_t, device_t, void *);
1331
1332 void
swcrypto_attach(device_t parent,device_t self,void * opaque)1333 swcrypto_attach(device_t parent, device_t self, void *opaque)
1334 {
1335
1336 swcr_init();
1337
1338 if (!pmf_device_register(self, NULL, NULL))
1339 aprint_error_dev(self, "couldn't establish power handler\n");
1340 }
1341
1342 int swcrypto_detach(device_t, int);
1343
1344 int
swcrypto_detach(device_t self,int flag)1345 swcrypto_detach(device_t self, int flag)
1346 {
1347 pmf_device_deregister(self);
1348 if (swcr_id >= 0)
1349 crypto_unregister_all(swcr_id);
1350 return 0;
1351 }
1352
1353 int swcrypto_match(device_t, cfdata_t, void *);
1354
1355 int
swcrypto_match(device_t parent,cfdata_t data,void * opaque)1356 swcrypto_match(device_t parent, cfdata_t data, void *opaque)
1357 {
1358
1359 return 1;
1360 }
1361
1362 MODULE(MODULE_CLASS_DRIVER, swcrypto,
1363 "opencrypto,zlib,blowfish,des,cast128,camellia,skipjack");
1364
1365 CFDRIVER_DECL(swcrypto, DV_DULL, NULL);
1366
1367 CFATTACH_DECL2_NEW(swcrypto, 0, swcrypto_match, swcrypto_attach,
1368 swcrypto_detach, NULL, NULL, NULL);
1369
1370 static int swcryptoloc[] = { -1, -1 };
1371
1372 static struct cfdata swcrypto_cfdata[] = {
1373 {
1374 .cf_name = "swcrypto",
1375 .cf_atname = "swcrypto",
1376 .cf_unit = 0,
1377 .cf_fstate = 0,
1378 .cf_loc = swcryptoloc,
1379 .cf_flags = 0,
1380 .cf_pspec = NULL,
1381 },
1382 { NULL, NULL, 0, 0, NULL, 0, NULL }
1383 };
1384
1385 /*
1386 * Internal attach routine.
1387 * Don't call before attached cpus.
1388 */
1389 static int
swcryptoattach_internal(void)1390 swcryptoattach_internal(void)
1391 {
1392 int error;
1393
1394 error = config_cfdriver_attach(&swcrypto_cd);
1395 if (error) {
1396 return error;
1397 }
1398
1399 error = config_cfattach_attach(swcrypto_cd.cd_name, &swcrypto_ca);
1400 if (error) {
1401 config_cfdriver_detach(&swcrypto_cd);
1402 aprint_error("%s: unable to register cfattach\n",
1403 swcrypto_cd.cd_name);
1404
1405 return error;
1406 }
1407
1408 error = config_cfdata_attach(swcrypto_cfdata, 1);
1409 if (error) {
1410 config_cfattach_detach(swcrypto_cd.cd_name,
1411 &swcrypto_ca);
1412 config_cfdriver_detach(&swcrypto_cd);
1413 aprint_error("%s: unable to register cfdata\n",
1414 swcrypto_cd.cd_name);
1415
1416 return error;
1417 }
1418
1419 (void)config_attach_pseudo(swcrypto_cfdata);
1420
1421 return 0;
1422 }
1423
1424 static int
swcrypto_modcmd(modcmd_t cmd,void * arg)1425 swcrypto_modcmd(modcmd_t cmd, void *arg)
1426 {
1427 int error = 0;
1428
1429 switch (cmd) {
1430 case MODULE_CMD_INIT:
1431 #ifdef _MODULE
1432 error = swcryptoattach_internal();
1433 #endif
1434 return error;
1435 case MODULE_CMD_FINI:
1436 #if 1
1437 // XXX: Need to keep track if we are in use.
1438 return ENOTTY;
1439 #else
1440 error = config_cfdata_detach(swcrypto_cfdata);
1441 if (error) {
1442 return error;
1443 }
1444
1445 config_cfattach_detach(swcrypto_cd.cd_name, &swcrypto_ca);
1446 config_cfdriver_detach(&swcrypto_cd);
1447
1448 return 0;
1449 #endif
1450 default:
1451 return ENOTTY;
1452 }
1453 }
1454