1 /* $NetBSD: cryptodev.c,v 1.39 2008/04/21 19:05:41 tls Exp $ */ 2 /* $FreeBSD: src/sys/opencrypto/cryptodev.c,v 1.4.2.4 2003/06/03 00:09:02 sam Exp $ */ 3 /* $OpenBSD: cryptodev.c,v 1.53 2002/07/10 22:21:30 mickey Exp $ */ 4 5 /*- 6 * Copyright (c) 2008 The NetBSD Foundation, Inc. 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to The NetBSD Foundation 10 * by Coyote Point Systems, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the NetBSD 23 * Foundation, Inc. and its contributors. 24 * 4. Neither the name of The NetBSD Foundation nor the names of its 25 * contributors may be used to endorse or promote products derived 26 * from this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 30 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 32 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 38 * POSSIBILITY OF SUCH DAMAGE. 39 */ 40 41 /* 42 * Copyright (c) 2001 Theo de Raadt 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 48 * 1. Redistributions of source code must retain the above copyright 49 * notice, this list of conditions and the following disclaimer. 50 * 2. Redistributions in binary form must reproduce the above copyright 51 * notice, this list of conditions and the following disclaimer in the 52 * documentation and/or other materials provided with the distribution. 53 * 3. The name of the author may not be used to endorse or promote products 54 * derived from this software without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 58 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 59 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 60 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 61 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 62 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 63 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 64 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 65 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 66 * 67 * Effort sponsored in part by the Defense Advanced Research Projects 68 * Agency (DARPA) and Air Force Research Laboratory, Air Force 69 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 70 * 71 */ 72 73 #include <sys/cdefs.h> 74 __KERNEL_RCSID(0, "$NetBSD: cryptodev.c,v 1.39 2008/04/21 19:05:41 tls Exp $"); 75 76 #include <sys/param.h> 77 #include <sys/systm.h> 78 #include <sys/kmem.h> 79 #include <sys/malloc.h> 80 #include <sys/mbuf.h> 81 #include <sys/pool.h> 82 #include <sys/sysctl.h> 83 #include <sys/file.h> 84 #include <sys/filedesc.h> 85 #include <sys/errno.h> 86 #include <sys/md5.h> 87 #include <sys/sha1.h> 88 #include <sys/conf.h> 89 #include <sys/device.h> 90 #include <sys/kauth.h> 91 #include <sys/select.h> 92 #include <sys/poll.h> 93 #include <sys/atomic.h> 94 95 #include "opt_ocf.h" 96 #include <opencrypto/cryptodev.h> 97 #include <opencrypto/xform.h> 98 99 struct csession { 100 TAILQ_ENTRY(csession) next; 101 u_int64_t sid; 102 u_int32_t ses; 103 104 u_int32_t cipher; 105 struct enc_xform *txform; 106 u_int32_t mac; 107 struct auth_hash *thash; 108 109 void * key; 110 int keylen; 111 u_char tmp_iv[EALG_MAX_BLOCK_LEN]; 112 113 void * mackey; 114 int mackeylen; 115 u_char tmp_mac[CRYPTO_MAX_MAC_LEN]; 116 117 struct iovec iovec[1]; /* user requests never have more */ 118 struct uio uio; 119 int error; 120 }; 121 122 struct fcrypt { 123 TAILQ_HEAD(csessionlist, csession) csessions; 124 TAILQ_HEAD(crprethead, cryptop) crp_ret_mq; 125 TAILQ_HEAD(krprethead, cryptkop) crp_ret_mkq; 126 int sesn; 127 struct selinfo sinfo; 128 u_int32_t requestid; 129 }; 130 131 /* For our fixed-size allocations */ 132 static struct pool fcrpl; 133 static struct pool csepl; 134 135 /* Declaration of master device (fd-cloning/ctxt-allocating) entrypoints */ 136 static int cryptoopen(dev_t dev, int flag, int mode, struct lwp *l); 137 static int cryptoread(dev_t dev, struct uio *uio, int ioflag); 138 static int cryptowrite(dev_t dev, struct uio *uio, int ioflag); 139 static int cryptoselect(dev_t dev, int rw, struct lwp *l); 140 141 /* Declaration of cloned-device (per-ctxt) entrypoints */ 142 static int cryptof_read(struct file *, off_t *, struct uio *, 143 kauth_cred_t, int); 144 static int cryptof_write(struct file *, off_t *, struct uio *, 145 kauth_cred_t, int); 146 static int cryptof_ioctl(struct file *, u_long, void *); 147 static int cryptof_close(struct file *); 148 static int cryptof_poll(struct file *, int); 149 150 static const struct fileops cryptofops = { 151 cryptof_read, 152 cryptof_write, 153 cryptof_ioctl, 154 fnullop_fcntl, 155 cryptof_poll, 156 fbadop_stat, 157 cryptof_close, 158 fnullop_kqfilter 159 }; 160 161 static struct csession *csefind(struct fcrypt *, u_int); 162 static int csedelete(struct fcrypt *, struct csession *); 163 static struct csession *cseadd(struct fcrypt *, struct csession *); 164 static struct csession *csecreate(struct fcrypt *, u_int64_t, void *, u_int64_t, 165 void *, u_int64_t, u_int32_t, u_int32_t, struct enc_xform *, 166 struct auth_hash *); 167 static int csefree(struct csession *); 168 169 static int cryptodev_op(struct csession *, struct crypt_op *, struct lwp *); 170 static int cryptodev_mop(struct fcrypt *, struct crypt_n_op *, int, struct 171 lwp *); 172 static int cryptodev_key(struct crypt_kop *); 173 static int cryptodev_mkey(struct fcrypt *, struct crypt_n_kop *, int); 174 static int cryptodev_session(struct fcrypt *, struct session_op *); 175 static int cryptodev_msession(struct fcrypt *, struct session_n_op *, 176 int); 177 static int cryptodev_msessionfin(struct fcrypt *, int, u_int32_t *); 178 179 static int cryptodev_cb(void *); 180 static int cryptodevkey_cb(void *); 181 182 static int cryptodev_mcb(void *); 183 static int cryptodevkey_mcb(void *); 184 185 static int cryptodev_getmstatus(struct fcrypt *, struct crypt_result *, 186 int); 187 static int cryptodev_getstatus(struct fcrypt *, struct crypt_result *); 188 189 /* 190 * sysctl-able control variables for /dev/crypto now defined in crypto.c: 191 * crypto_usercrypto, crypto_userasmcrypto, crypto_devallowsoft. 192 */ 193 194 /* ARGSUSED */ 195 int 196 cryptof_read(file_t *fp, off_t *poff, 197 struct uio *uio, kauth_cred_t cred, int flags) 198 { 199 return (EIO); 200 } 201 202 /* ARGSUSED */ 203 int 204 cryptof_write(file_t *fp, off_t *poff, 205 struct uio *uio, kauth_cred_t cred, int flags) 206 { 207 return (EIO); 208 } 209 210 /* ARGSUSED */ 211 int 212 cryptof_ioctl(struct file *fp, u_long cmd, void *data) 213 { 214 struct fcrypt *fcr = fp->f_data; 215 struct csession *cse; 216 struct session_op *sop; 217 struct session_n_op *snop; 218 struct crypt_op *cop; 219 struct crypt_mop *mop; 220 struct crypt_mkop *mkop; 221 struct crypt_n_op *cnop; 222 struct crypt_n_kop *knop; 223 struct crypt_sgop *sgop; 224 struct crypt_sfop *sfop; 225 struct cryptret *crypt_ret; 226 struct crypt_result *crypt_res; 227 u_int32_t ses; 228 u_int32_t *sesid; 229 int error = 0; 230 size_t count; 231 232 /* backwards compatibility */ 233 file_t *criofp; 234 struct fcrypt *criofcr; 235 int criofd; 236 237 switch (cmd) { 238 case CRIOGET: /* XXX deprecated, remove after 5.0 */ 239 if ((error = fd_allocfile(&criofp, &criofd)) != 0) 240 return error; 241 criofcr = pool_get(&fcrpl, PR_WAITOK); 242 mutex_spin_enter(&crypto_mtx); 243 TAILQ_INIT(&criofcr->csessions); 244 TAILQ_INIT(&criofcr->crp_ret_mq); 245 TAILQ_INIT(&criofcr->crp_ret_mkq); 246 selinit(&criofcr->sinfo); 247 248 /* 249 * Don't ever return session 0, to allow detection of 250 * failed creation attempts with multi-create ioctl. 251 */ 252 criofcr->sesn = 1; 253 criofcr->requestid = 1; 254 mutex_spin_exit(&crypto_mtx); 255 (void)fd_clone(criofp, criofd, (FREAD|FWRITE), 256 &cryptofops, criofcr); 257 *(u_int32_t *)data = criofd; 258 return error; 259 break; 260 case CIOCGSESSION: 261 sop = (struct session_op *)data; 262 error = cryptodev_session(fcr, sop); 263 break; 264 case CIOCNGSESSION: 265 sgop = (struct crypt_sgop *)data; 266 snop = kmem_alloc((sgop->count * 267 sizeof(struct session_n_op)), KM_SLEEP); 268 error = copyin(sgop->sessions, snop, sgop->count * 269 sizeof(struct session_n_op)); 270 if (error) { 271 goto mbail; 272 } 273 274 error = cryptodev_msession(fcr, snop, sgop->count); 275 if (error) { 276 goto mbail; 277 } 278 279 error = copyout(snop, sgop->sessions, sgop->count * 280 sizeof(struct session_n_op)); 281 mbail: 282 kmem_free(snop, sgop->count * sizeof(struct session_n_op)); 283 break; 284 case CIOCFSESSION: 285 mutex_spin_enter(&crypto_mtx); 286 ses = *(u_int32_t *)data; 287 cse = csefind(fcr, ses); 288 if (cse == NULL) 289 return (EINVAL); 290 csedelete(fcr, cse); 291 error = csefree(cse); 292 mutex_spin_exit(&crypto_mtx); 293 break; 294 case CIOCNFSESSION: 295 sfop = (struct crypt_sfop *)data; 296 sesid = kmem_alloc((sfop->count * sizeof(u_int32_t)), 297 KM_SLEEP); 298 error = copyin(sfop->sesid, sesid, 299 (sfop->count * sizeof(u_int32_t))); 300 if (!error) { 301 error = cryptodev_msessionfin(fcr, sfop->count, sesid); 302 } 303 kmem_free(sesid, (sfop->count * sizeof(u_int32_t))); 304 break; 305 case CIOCCRYPT: 306 mutex_spin_enter(&crypto_mtx); 307 cop = (struct crypt_op *)data; 308 cse = csefind(fcr, cop->ses); 309 mutex_spin_exit(&crypto_mtx); 310 if (cse == NULL) { 311 DPRINTF(("csefind failed\n")); 312 return (EINVAL); 313 } 314 error = cryptodev_op(cse, cop, curlwp); 315 DPRINTF(("cryptodev_op error = %d\n", error)); 316 break; 317 case CIOCNCRYPTM: 318 mop = (struct crypt_mop *)data; 319 cnop = kmem_alloc((mop->count * sizeof(struct crypt_n_op)), 320 KM_SLEEP); 321 error = copyin(mop->reqs, cnop, 322 (mop->count * sizeof(struct crypt_n_op))); 323 if(!error) { 324 error = cryptodev_mop(fcr, cnop, mop->count, 325 curlwp); 326 if (!error) { 327 error = copyout(cnop, mop->reqs, 328 (mop->count * 329 sizeof(struct crypt_n_op))); 330 } 331 } 332 kmem_free(cnop, (mop->count * sizeof(struct crypt_n_op))); 333 break; 334 case CIOCKEY: 335 error = cryptodev_key((struct crypt_kop *)data); 336 DPRINTF(("cryptodev_key error = %d\n", error)); 337 break; 338 case CIOCNFKEYM: 339 mkop = (struct crypt_mkop *)data; 340 knop = kmem_alloc((mkop->count * sizeof(struct crypt_n_kop)), 341 KM_SLEEP); 342 error = copyin(mkop->reqs, knop, 343 (mkop->count * sizeof(struct crypt_n_kop))); 344 if (!error) { 345 error = cryptodev_mkey(fcr, knop, mkop->count); 346 if (!error) 347 error = copyout(knop, mkop->reqs, 348 (mkop->count * 349 sizeof(struct crypt_n_kop))); 350 } 351 kmem_free(knop, (mkop->count * sizeof(struct crypt_n_kop))); 352 break; 353 case CIOCASYMFEAT: 354 error = crypto_getfeat((int *)data); 355 break; 356 case CIOCNCRYPTRETM: 357 crypt_ret = (struct cryptret *)data; 358 count = crypt_ret->count; 359 crypt_res = kmem_alloc((count * 360 sizeof(struct crypt_result)), 361 KM_SLEEP); 362 error = copyin(crypt_ret->results, crypt_res, 363 (count * sizeof(struct crypt_result))); 364 if (error) 365 goto reterr; 366 crypt_ret->count = cryptodev_getmstatus(fcr, crypt_res, 367 crypt_ret->count); 368 /* sanity check count */ 369 if (crypt_ret->count > count) { 370 printf("%s.%d: error returned count %zd > original " 371 " count %zd\n", 372 __FILE__, __LINE__, crypt_ret->count, count); 373 crypt_ret->count = count; 374 375 } 376 error = copyout(crypt_res, crypt_ret->results, 377 (crypt_ret->count * sizeof(struct crypt_result))); 378 reterr: 379 kmem_free(crypt_res, 380 (count * sizeof(struct crypt_result))); 381 break; 382 case CIOCNCRYPTRET: 383 error = cryptodev_getstatus(fcr, (struct crypt_result *)data); 384 break; 385 default: 386 DPRINTF(("invalid ioctl cmd %ld\n", cmd)); 387 error = EINVAL; 388 } 389 return (error); 390 } 391 392 static int 393 cryptodev_op(struct csession *cse, struct crypt_op *cop, struct lwp *l) 394 { 395 struct cryptop *crp = NULL; 396 struct cryptodesc *crde = NULL, *crda = NULL; 397 int error; 398 399 if (cop->len > 256*1024-4) 400 return (E2BIG); 401 402 if (cse->txform) { 403 if (cop->len == 0 || (cop->len % cse->txform->blocksize) != 0) 404 return (EINVAL); 405 } 406 407 bzero(&cse->uio, sizeof(cse->uio)); 408 cse->uio.uio_iovcnt = 1; 409 cse->uio.uio_resid = 0; 410 cse->uio.uio_rw = UIO_WRITE; 411 cse->uio.uio_iov = cse->iovec; 412 UIO_SETUP_SYSSPACE(&cse->uio); 413 memset(&cse->iovec, 0, sizeof(cse->iovec)); 414 cse->uio.uio_iov[0].iov_len = cop->len; 415 cse->uio.uio_iov[0].iov_base = kmem_alloc(cop->len, KM_SLEEP); 416 cse->uio.uio_resid = cse->uio.uio_iov[0].iov_len; 417 418 crp = crypto_getreq((cse->txform != NULL) + (cse->thash != NULL)); 419 if (crp == NULL) { 420 error = ENOMEM; 421 goto bail; 422 } 423 424 if (cse->thash) { 425 crda = crp->crp_desc; 426 if (cse->txform) 427 crde = crda->crd_next; 428 } else { 429 if (cse->txform) 430 crde = crp->crp_desc; 431 else { 432 error = EINVAL; 433 goto bail; 434 } 435 } 436 437 if ((error = copyin(cop->src, cse->uio.uio_iov[0].iov_base, cop->len))) 438 { 439 printf("copyin failed %s %d \n", (char *)cop->src, error); 440 goto bail; 441 } 442 443 if (crda) { 444 crda->crd_skip = 0; 445 crda->crd_len = cop->len; 446 crda->crd_inject = 0; /* ??? */ 447 448 crda->crd_alg = cse->mac; 449 crda->crd_key = cse->mackey; 450 crda->crd_klen = cse->mackeylen * 8; 451 } 452 453 if (crde) { 454 if (cop->op == COP_ENCRYPT) 455 crde->crd_flags |= CRD_F_ENCRYPT; 456 else 457 crde->crd_flags &= ~CRD_F_ENCRYPT; 458 crde->crd_len = cop->len; 459 crde->crd_inject = 0; 460 461 crde->crd_alg = cse->cipher; 462 crde->crd_key = cse->key; 463 crde->crd_klen = cse->keylen * 8; 464 } 465 466 crp->crp_ilen = cop->len; 467 crp->crp_flags = CRYPTO_F_IOV | (cop->flags & COP_F_BATCH); 468 crp->crp_buf = (void *)&cse->uio; 469 crp->crp_callback = (int (*) (struct cryptop *)) cryptodev_cb; 470 crp->crp_sid = cse->sid; 471 crp->crp_opaque = (void *)cse; 472 473 if (cop->iv) { 474 if (crde == NULL) { 475 error = EINVAL; 476 goto bail; 477 } 478 if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */ 479 error = EINVAL; 480 goto bail; 481 } 482 if ((error = copyin(cop->iv, cse->tmp_iv, 483 cse->txform->blocksize))) 484 goto bail; 485 bcopy(cse->tmp_iv, crde->crd_iv, cse->txform->blocksize); 486 crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT; 487 crde->crd_skip = 0; 488 } else if (crde) { 489 if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */ 490 crde->crd_skip = 0; 491 } else { 492 crde->crd_flags |= CRD_F_IV_PRESENT; 493 crde->crd_skip = cse->txform->blocksize; 494 crde->crd_len -= cse->txform->blocksize; 495 } 496 } 497 498 if (cop->mac) { 499 if (crda == NULL) { 500 error = EINVAL; 501 goto bail; 502 } 503 crp->crp_mac=cse->tmp_mac; 504 } 505 506 /* 507 * XXX there was a comment here which said that we went to 508 * XXX splcrypto() but needed to only if CRYPTO_F_CBIMM, 509 * XXX disabled on NetBSD since 1.6O due to a race condition. 510 * XXX But crypto_dispatch went to splcrypto() itself! (And 511 * XXX now takes the crypto_mtx mutex itself). We do, however, 512 * XXX need to hold the mutex across the call to cv_wait(). 513 * XXX (should we arrange for crypto_dispatch to return to 514 * XXX us with it held? it seems quite ugly to do so.) 515 */ 516 #ifdef notyet 517 eagain: 518 #endif 519 error = crypto_dispatch(crp); 520 mutex_spin_enter(&crypto_mtx); 521 522 switch (error) { 523 #ifdef notyet /* don't loop forever -- but EAGAIN not possible here yet */ 524 case EAGAIN: 525 mutex_spin_exit(&crypto_mtx); 526 goto eagain; 527 break; 528 #endif 529 case 0: 530 break; 531 default: 532 DPRINTF(("cryptodev_op: not waiting, error.\n")); 533 mutex_spin_exit(&crypto_mtx); 534 goto bail; 535 } 536 537 while (!(crp->crp_flags & CRYPTO_F_DONE)) { 538 DPRINTF(("cryptodev_op: sleeping on cv %08x for crp %08x\n", \ 539 (uint32_t)&crp->crp_cv, (uint32_t)crp)); 540 cv_wait(&crp->crp_cv, &crypto_mtx); /* XXX cv_wait_sig? */ 541 } 542 if (crp->crp_flags & CRYPTO_F_ONRETQ) { 543 DPRINTF(("cryptodev_op: DONE, not woken by cryptoret.\n")); 544 (void)crypto_ret_q_remove(crp); 545 } 546 mutex_spin_exit(&crypto_mtx); 547 548 if (crp->crp_etype != 0) { 549 DPRINTF(("cryptodev_op: crp_etype %d\n", crp->crp_etype)); 550 error = crp->crp_etype; 551 goto bail; 552 } 553 554 if (cse->error) { 555 DPRINTF(("cryptodev_op: cse->error %d\n", cse->error)); 556 error = cse->error; 557 goto bail; 558 } 559 560 if (cop->dst && 561 (error = copyout(cse->uio.uio_iov[0].iov_base, cop->dst, cop->len))) 562 { 563 DPRINTF(("cryptodev_op: copyout error %d\n", error)); 564 goto bail; 565 } 566 567 if (cop->mac && 568 (error = copyout(crp->crp_mac, cop->mac, cse->thash->authsize))) { 569 DPRINTF(("cryptodev_op: mac copyout error %d\n", error)); 570 goto bail; 571 } 572 573 bail: 574 if (crp) 575 crypto_freereq(crp); 576 if (cse->uio.uio_iov[0].iov_base) 577 kmem_free(cse->uio.uio_iov[0].iov_base, 578 cse->uio.uio_iov[0].iov_len); 579 580 return (error); 581 } 582 583 static int 584 cryptodev_cb(void *op) 585 { 586 struct cryptop *crp = (struct cryptop *) op; 587 struct csession *cse = (struct csession *)crp->crp_opaque; 588 int error = 0; 589 590 mutex_spin_enter(&crypto_mtx); 591 cse->error = crp->crp_etype; 592 if (crp->crp_etype == EAGAIN) { 593 /* always drop mutex to call dispatch routine */ 594 mutex_spin_exit(&crypto_mtx); 595 error = crypto_dispatch(crp); 596 mutex_spin_enter(&crypto_mtx); 597 } 598 if (error != 0 || (crp->crp_flags & CRYPTO_F_DONE)) { 599 cv_signal(&crp->crp_cv); 600 } 601 mutex_spin_exit(&crypto_mtx); 602 return (0); 603 } 604 605 static int 606 cryptodev_mcb(void *op) 607 { 608 struct cryptop *crp = (struct cryptop *) op; 609 struct csession *cse = (struct csession *)crp->crp_opaque; 610 int error=0; 611 612 mutex_spin_enter(&crypto_mtx); 613 cse->error = crp->crp_etype; 614 if (crp->crp_etype == EAGAIN) { 615 mutex_spin_exit(&crypto_mtx); 616 error = crypto_dispatch(crp); 617 mutex_spin_enter(&crypto_mtx); 618 } 619 if (error != 0 || (crp->crp_flags & CRYPTO_F_DONE)) { 620 cv_signal(&crp->crp_cv); 621 } 622 623 TAILQ_INSERT_TAIL(&crp->fcrp->crp_ret_mq, crp, crp_next); 624 selnotify(&crp->fcrp->sinfo, 0, 0); 625 mutex_spin_exit(&crypto_mtx); 626 return (0); 627 } 628 629 static int 630 cryptodevkey_cb(void *op) 631 { 632 struct cryptkop *krp = (struct cryptkop *) op; 633 634 mutex_spin_enter(&crypto_mtx); 635 cv_signal(&krp->krp_cv); 636 mutex_spin_exit(&crypto_mtx); 637 return (0); 638 } 639 640 static int 641 cryptodevkey_mcb(void *op) 642 { 643 struct cryptkop *krp = (struct cryptkop *) op; 644 645 mutex_spin_enter(&crypto_mtx); 646 cv_signal(&krp->krp_cv); 647 TAILQ_INSERT_TAIL(&krp->fcrp->crp_ret_mkq, krp, krp_next); 648 selnotify(&krp->fcrp->sinfo, 0, 0); 649 mutex_spin_exit(&crypto_mtx); 650 return (0); 651 } 652 653 static int 654 cryptodev_key(struct crypt_kop *kop) 655 { 656 struct cryptkop *krp = NULL; 657 int error = EINVAL; 658 int in, out, size, i; 659 660 if (kop->crk_iparams + kop->crk_oparams > CRK_MAXPARAM) { 661 return (EFBIG); 662 } 663 664 in = kop->crk_iparams; 665 out = kop->crk_oparams; 666 switch (kop->crk_op) { 667 case CRK_MOD_EXP: 668 if (in == 3 && out == 1) 669 break; 670 return (EINVAL); 671 case CRK_MOD_EXP_CRT: 672 if (in == 6 && out == 1) 673 break; 674 return (EINVAL); 675 case CRK_DSA_SIGN: 676 if (in == 5 && out == 2) 677 break; 678 return (EINVAL); 679 case CRK_DSA_VERIFY: 680 if (in == 7 && out == 0) 681 break; 682 return (EINVAL); 683 case CRK_DH_COMPUTE_KEY: 684 if (in == 3 && out == 1) 685 break; 686 return (EINVAL); 687 case CRK_MOD_ADD: 688 if (in == 3 && out == 1) 689 break; 690 return (EINVAL); 691 case CRK_MOD_ADDINV: 692 if (in == 2 && out == 1) 693 break; 694 return (EINVAL); 695 case CRK_MOD_SUB: 696 if (in == 3 && out == 1) 697 break; 698 return (EINVAL); 699 case CRK_MOD_MULT: 700 if (in == 3 && out == 1) 701 break; 702 return (EINVAL); 703 case CRK_MOD_MULTINV: 704 if (in == 2 && out == 1) 705 break; 706 return (EINVAL); 707 case CRK_MOD: 708 if (in == 2 && out == 1) 709 break; 710 return (EINVAL); 711 default: 712 return (EINVAL); 713 } 714 715 krp = pool_get(&cryptkop_pool, PR_WAITOK); 716 bzero(krp, sizeof *krp); 717 cv_init(&krp->krp_cv, "crykdev"); 718 krp->krp_op = kop->crk_op; 719 krp->krp_status = kop->crk_status; 720 krp->krp_iparams = kop->crk_iparams; 721 krp->krp_oparams = kop->crk_oparams; 722 krp->krp_status = 0; 723 krp->krp_callback = (int (*) (struct cryptkop *)) cryptodevkey_cb; 724 725 for (i = 0; i < CRK_MAXPARAM; i++) 726 krp->krp_param[i].crp_nbits = kop->crk_param[i].crp_nbits; 727 for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) { 728 size = (krp->krp_param[i].crp_nbits + 7) / 8; 729 if (size == 0) 730 continue; 731 krp->krp_param[i].crp_p = kmem_alloc(size, KM_SLEEP); 732 if (i >= krp->krp_iparams) 733 continue; 734 error = copyin(kop->crk_param[i].crp_p, krp->krp_param[i].crp_p, size); 735 if (error) 736 goto fail; 737 } 738 739 error = crypto_kdispatch(krp); 740 if (error != 0) { 741 goto fail; 742 } 743 744 mutex_spin_enter(&crypto_mtx); 745 while (!(krp->krp_flags & CRYPTO_F_DONE)) { 746 cv_wait(&krp->krp_cv, &crypto_mtx); /* XXX cv_wait_sig? */ 747 } 748 if (krp->krp_flags & CRYPTO_F_ONRETQ) { 749 DPRINTF(("cryptodev_key: DONE early, not via cryptoret.\n")); 750 (void)crypto_ret_kq_remove(krp); 751 } 752 mutex_spin_exit(&crypto_mtx); 753 754 if (krp->krp_status != 0) { 755 DPRINTF(("cryptodev_key: krp->krp_status 0x%08x\n", krp->krp_status)); 756 error = krp->krp_status; 757 goto fail; 758 } 759 760 for (i = krp->krp_iparams; i < krp->krp_iparams + krp->krp_oparams; i++) { 761 size = (krp->krp_param[i].crp_nbits + 7) / 8; 762 if (size == 0) 763 continue; 764 error = copyout(krp->krp_param[i].crp_p, kop->crk_param[i].crp_p, size); 765 if (error) { 766 DPRINTF(("cryptodev_key: copyout oparam %d failed, error=%d\n", i-krp->krp_iparams, error)); 767 goto fail; 768 } 769 } 770 771 fail: 772 if (krp) { 773 kop->crk_status = krp->krp_status; 774 for (i = 0; i < CRK_MAXPARAM; i++) { 775 struct crparam *kp = &(krp->krp_param[i]); 776 if (krp->krp_param[i].crp_p) { 777 size = (kp->crp_nbits + 7) / 8; 778 KASSERT(size > 0); 779 memset(kp->crp_p, 0, size); 780 kmem_free(kp->crp_p, size); 781 } 782 } 783 pool_put(&cryptkop_pool, krp); 784 } 785 DPRINTF(("cryptodev_key: error=0x%08x\n", error)); 786 return (error); 787 } 788 789 /* ARGSUSED */ 790 static int 791 cryptof_close(struct file *fp) 792 { 793 struct fcrypt *fcr = fp->f_data; 794 struct csession *cse; 795 796 mutex_spin_enter(&crypto_mtx); 797 while ((cse = TAILQ_FIRST(&fcr->csessions))) { 798 TAILQ_REMOVE(&fcr->csessions, cse, next); 799 (void)csefree(cse); 800 } 801 seldestroy(&fcr->sinfo); 802 fp->f_data = NULL; 803 mutex_spin_exit(&crypto_mtx); 804 805 pool_put(&fcrpl, fcr); 806 return 0; 807 } 808 809 /* csefind: call with crypto_mtx held. */ 810 static struct csession * 811 csefind(struct fcrypt *fcr, u_int ses) 812 { 813 struct csession *cse, *ret = NULL; 814 815 KASSERT(mutex_owned(&crypto_mtx)); 816 TAILQ_FOREACH(cse, &fcr->csessions, next) 817 if (cse->ses == ses) 818 ret = cse; 819 820 return (ret); 821 } 822 823 /* csedelete: call with crypto_mtx held. */ 824 static int 825 csedelete(struct fcrypt *fcr, struct csession *cse_del) 826 { 827 struct csession *cse; 828 int ret = 0; 829 830 KASSERT(mutex_owned(&crypto_mtx)); 831 TAILQ_FOREACH(cse, &fcr->csessions, next) { 832 if (cse == cse_del) { 833 TAILQ_REMOVE(&fcr->csessions, cse, next); 834 ret = 1; 835 } 836 } 837 return (ret); 838 } 839 840 /* cseadd: call with crypto_mtx held. */ 841 static struct csession * 842 cseadd(struct fcrypt *fcr, struct csession *cse) 843 { 844 KASSERT(mutex_owned(&crypto_mtx)); 845 /* don't let session ID wrap! */ 846 if (fcr->sesn + 1 == 0) return NULL; 847 TAILQ_INSERT_TAIL(&fcr->csessions, cse, next); 848 cse->ses = fcr->sesn++; 849 return (cse); 850 } 851 852 /* csecreate: call with crypto_mtx held. */ 853 static struct csession * 854 csecreate(struct fcrypt *fcr, u_int64_t sid, void *key, u_int64_t keylen, 855 void *mackey, u_int64_t mackeylen, u_int32_t cipher, u_int32_t mac, 856 struct enc_xform *txform, struct auth_hash *thash) 857 { 858 struct csession *cse; 859 860 KASSERT(mutex_owned(&crypto_mtx)); 861 cse = pool_get(&csepl, PR_NOWAIT); 862 if (cse == NULL) 863 return NULL; 864 cse->key = key; 865 cse->keylen = keylen/8; 866 cse->mackey = mackey; 867 cse->mackeylen = mackeylen/8; 868 cse->sid = sid; 869 cse->cipher = cipher; 870 cse->mac = mac; 871 cse->txform = txform; 872 cse->thash = thash; 873 cse->error = 0; 874 if (cseadd(fcr, cse)) 875 return (cse); 876 else { 877 pool_put(&csepl, cse); 878 return NULL; 879 } 880 } 881 882 /* csefree: call with crypto_mtx held. */ 883 static int 884 csefree(struct csession *cse) 885 { 886 int error; 887 888 KASSERT(mutex_owned(&crypto_mtx)); 889 error = crypto_freesession(cse->sid); 890 if (cse->key) 891 free(cse->key, M_XDATA); 892 if (cse->mackey) 893 free(cse->mackey, M_XDATA); 894 pool_put(&csepl, cse); 895 return (error); 896 } 897 898 static int 899 cryptoopen(dev_t dev, int flag, int mode, 900 struct lwp *l) 901 { 902 file_t *fp; 903 struct fcrypt *fcr; 904 int fd, error; 905 906 if (crypto_usercrypto == 0) 907 return (ENXIO); 908 909 if ((error = fd_allocfile(&fp, &fd)) != 0) 910 return error; 911 912 fcr = pool_get(&fcrpl, PR_WAITOK); 913 mutex_spin_enter(&crypto_mtx); 914 TAILQ_INIT(&fcr->csessions); 915 TAILQ_INIT(&fcr->crp_ret_mq); 916 TAILQ_INIT(&fcr->crp_ret_mkq); 917 selinit(&fcr->sinfo); 918 /* 919 * Don't ever return session 0, to allow detection of 920 * failed creation attempts with multi-create ioctl. 921 */ 922 fcr->sesn = 1; 923 fcr->requestid = 1; 924 mutex_spin_exit(&crypto_mtx); 925 return fd_clone(fp, fd, flag, &cryptofops, fcr); 926 } 927 928 static int 929 cryptoread(dev_t dev, struct uio *uio, int ioflag) 930 { 931 return (EIO); 932 } 933 934 static int 935 cryptowrite(dev_t dev, struct uio *uio, int ioflag) 936 { 937 return (EIO); 938 } 939 940 int 941 cryptoselect(dev_t dev, int rw, struct lwp *l) 942 { 943 return (0); 944 } 945 946 /*static*/ 947 struct cdevsw crypto_cdevsw = { 948 /* open */ cryptoopen, 949 /* close */ noclose, 950 /* read */ cryptoread, 951 /* write */ cryptowrite, 952 /* ioctl */ noioctl, 953 /* ttstop?*/ nostop, 954 /* ??*/ notty, 955 /* poll */ cryptoselect /*nopoll*/, 956 /* mmap */ nommap, 957 /* kqfilter */ nokqfilter, 958 /* type */ D_OTHER, 959 }; 960 961 static int 962 cryptodev_mop(struct fcrypt *fcr, 963 struct crypt_n_op * cnop, 964 int count, struct lwp *l) 965 { 966 struct cryptop *crp = NULL; 967 struct cryptodesc *crde = NULL, *crda = NULL; 968 int req, error=0; 969 struct csession *cse; 970 971 for (req = 0; req < count; req++) { 972 mutex_spin_enter(&crypto_mtx); 973 cse = csefind(fcr, cnop[req].ses); 974 if (cse == NULL) { 975 DPRINTF(("csefind failed\n")); 976 cnop[req].status = EINVAL; 977 mutex_spin_exit(&crypto_mtx); 978 continue; 979 } 980 mutex_spin_exit(&crypto_mtx); 981 982 if (cnop[req].len > 256*1024-4) { 983 DPRINTF(("length failed\n")); 984 cnop[req].status = EINVAL; 985 continue; 986 } 987 if (cse->txform) { 988 if (cnop[req].len == 0 || 989 (cnop[req].len % cse->txform->blocksize) != 0) { 990 cnop[req].status = EINVAL; 991 continue; 992 } 993 } 994 995 crp = crypto_getreq((cse->txform != NULL) + (cse->thash != NULL)); 996 if (crp == NULL) { 997 cnop[req].status = ENOMEM; 998 goto bail; 999 } 1000 1001 bzero(&crp->uio, sizeof(crp->uio)); 1002 crp->uio.uio_iovcnt = 1; 1003 crp->uio.uio_resid = 0; 1004 crp->uio.uio_rw = UIO_WRITE; 1005 crp->uio.uio_iov = crp->iovec; 1006 UIO_SETUP_SYSSPACE(&crp->uio); 1007 memset(&crp->iovec, 0, sizeof(crp->iovec)); 1008 crp->uio.uio_iov[0].iov_len = cnop[req].len; 1009 crp->uio.uio_iov[0].iov_base = kmem_alloc(cnop[req].len, 1010 KM_SLEEP); 1011 crp->uio.uio_resid = crp->uio.uio_iov[0].iov_len; 1012 1013 if (cse->thash) { 1014 crda = crp->crp_desc; 1015 if (cse->txform) 1016 crde = crda->crd_next; 1017 } else { 1018 if (cse->txform) 1019 crde = crp->crp_desc; 1020 else { 1021 cnop[req].status = EINVAL; 1022 goto bail; 1023 } 1024 } 1025 1026 if ((copyin(cnop[req].src, 1027 crp->uio.uio_iov[0].iov_base, cnop[req].len))) { 1028 cnop[req].status = EINVAL; 1029 goto bail; 1030 } 1031 1032 if (crda) { 1033 crda->crd_skip = 0; 1034 crda->crd_len = cnop[req].len; 1035 crda->crd_inject = 0; /* ??? */ 1036 1037 crda->crd_alg = cse->mac; 1038 crda->crd_key = cse->mackey; 1039 crda->crd_klen = cse->mackeylen * 8; 1040 } 1041 1042 if (crde) { 1043 if (cnop[req].op == COP_ENCRYPT) 1044 crde->crd_flags |= CRD_F_ENCRYPT; 1045 else 1046 crde->crd_flags &= ~CRD_F_ENCRYPT; 1047 crde->crd_len = cnop[req].len; 1048 crde->crd_inject = 0; 1049 1050 crde->crd_alg = cse->cipher; 1051 #ifdef notyet /* XXX must notify h/w driver new key, drain */ 1052 if(cnop[req].key && cnop[req].keylen) { 1053 crde->crd_key = malloc(cnop[req].keylen, 1054 M_XDATA, M_WAITOK); 1055 if((error = copyin(cnop[req].key, 1056 crde->crd_key, cnop[req].keylen))) { 1057 cnop[req].status = EINVAL; 1058 goto bail; 1059 } 1060 crde->crd_klen = cnop[req].keylen * 8; 1061 } else { ... } 1062 #endif 1063 crde->crd_key = cse->key; 1064 crde->crd_klen = cse->keylen * 8; 1065 } 1066 1067 crp->crp_ilen = cnop[req].len; 1068 crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM 1069 | (cnop[req].flags & COP_F_BATCH); 1070 crp->crp_buf = (void *)&crp->uio; 1071 crp->crp_callback = (int (*) (struct cryptop *)) cryptodev_mcb; 1072 crp->crp_sid = cse->sid; 1073 crp->crp_opaque = (void *)cse; 1074 crp->fcrp = fcr; 1075 crp->dst = cnop[req].dst; 1076 /* we can use the crp_ilen in cryptop(crp) for this */ 1077 crp->len = cnop[req].len; 1078 crp->mac = cnop[req].mac; 1079 1080 if (cnop[req].iv) { 1081 if (crde == NULL) { 1082 cnop[req].status = EINVAL; 1083 goto bail; 1084 } 1085 if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */ 1086 cnop[req].status = EINVAL; 1087 goto bail; 1088 } 1089 if ((error = copyin(cnop[req].iv, crp->tmp_iv, 1090 cse->txform->blocksize))) { 1091 cnop[req].status = EINVAL; 1092 goto bail; 1093 } 1094 bcopy(crp->tmp_iv, crde->crd_iv, cse->txform->blocksize); 1095 crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT; 1096 crde->crd_skip = 0; 1097 } else if (crde) { 1098 if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */ 1099 crde->crd_skip = 0; 1100 } else { 1101 crde->crd_flags |= CRD_F_IV_PRESENT; 1102 crde->crd_skip = cse->txform->blocksize; 1103 crde->crd_len -= cse->txform->blocksize; 1104 } 1105 } 1106 1107 if (cnop[req].mac) { 1108 if (crda == NULL) { 1109 cnop[req].status = EINVAL; 1110 goto bail; 1111 } 1112 crp->crp_mac=cse->tmp_mac; 1113 } 1114 cnop[req].reqid = atomic_inc_32_nv(&(fcr->requestid)); 1115 crp->crp_reqid = cnop[req].reqid; 1116 crp->crp_usropaque = cnop[req].opaque; 1117 #ifdef notyet 1118 eagain: 1119 #endif 1120 cnop[req].status = crypto_dispatch(crp); 1121 mutex_spin_enter(&crypto_mtx); /* XXX why mutex? */ 1122 1123 switch (cnop[req].status) { 1124 #ifdef notyet /* don't loop forever -- but EAGAIN not possible here yet */ 1125 case EAGAIN: 1126 mutex_spin_exit(&crypto_mtx); 1127 goto eagain; 1128 break; 1129 #endif 1130 case 0: 1131 break; 1132 default: 1133 DPRINTF(("cryptodev_op: not waiting, error.\n")); 1134 mutex_spin_exit(&crypto_mtx); 1135 goto bail; 1136 } 1137 1138 mutex_spin_exit(&crypto_mtx); 1139 bail: 1140 if (cnop[req].status) { 1141 if (crp) { 1142 crypto_freereq(crp); 1143 if(cse->uio.uio_iov[0].iov_base) { 1144 kmem_free(cse->uio.uio_iov[0].iov_base, 1145 cse->uio.uio_iov[0].iov_len); 1146 } 1147 } 1148 error = 0; 1149 } 1150 } 1151 return (error); 1152 } 1153 1154 static int 1155 cryptodev_mkey(struct fcrypt *fcr, struct crypt_n_kop *kop, int count) 1156 { 1157 struct cryptkop *krp = NULL; 1158 int error = EINVAL; 1159 int in, out, size, i, req; 1160 1161 for (req = 0; req < count; req++) { 1162 if (kop[req].crk_iparams + kop[req].crk_oparams > CRK_MAXPARAM) { 1163 return (EFBIG); 1164 } 1165 1166 in = kop[req].crk_iparams; 1167 out = kop[req].crk_oparams; 1168 switch (kop[req].crk_op) { 1169 case CRK_MOD_EXP: 1170 if (in == 3 && out == 1) 1171 break; 1172 kop[req].crk_status = EINVAL; 1173 continue; 1174 case CRK_MOD_EXP_CRT: 1175 if (in == 6 && out == 1) 1176 break; 1177 kop[req].crk_status = EINVAL; 1178 continue; 1179 case CRK_DSA_SIGN: 1180 if (in == 5 && out == 2) 1181 break; 1182 kop[req].crk_status = EINVAL; 1183 continue; 1184 case CRK_DSA_VERIFY: 1185 if (in == 7 && out == 0) 1186 break; 1187 kop[req].crk_status = EINVAL; 1188 continue; 1189 case CRK_DH_COMPUTE_KEY: 1190 if (in == 3 && out == 1) 1191 break; 1192 kop[req].crk_status = EINVAL; 1193 continue; 1194 case CRK_MOD_ADD: 1195 if (in == 3 && out == 1) 1196 break; 1197 kop[req].crk_status = EINVAL; 1198 continue; 1199 case CRK_MOD_ADDINV: 1200 if (in == 2 && out == 1) 1201 break; 1202 kop[req].crk_status = EINVAL; 1203 continue; 1204 case CRK_MOD_SUB: 1205 if (in == 3 && out == 1) 1206 break; 1207 kop[req].crk_status = EINVAL; 1208 continue; 1209 case CRK_MOD_MULT: 1210 if (in == 3 && out == 1) 1211 break; 1212 kop[req].crk_status = EINVAL; 1213 continue; 1214 case CRK_MOD_MULTINV: 1215 if (in == 2 && out == 1) 1216 break; 1217 kop[req].crk_status = EINVAL; 1218 continue; 1219 case CRK_MOD: 1220 if (in == 2 && out == 1) 1221 break; 1222 kop[req].crk_status = EINVAL; 1223 continue; 1224 default: 1225 kop[req].crk_status = EINVAL; 1226 continue; 1227 } 1228 1229 krp = pool_get(&cryptkop_pool, PR_WAITOK); 1230 bzero(krp, sizeof *krp); 1231 cv_init(&krp->krp_cv, "crykdev"); 1232 krp->krp_op = kop[req].crk_op; 1233 krp->krp_status = kop[req].crk_status; 1234 krp->krp_iparams = kop[req].crk_iparams; 1235 krp->krp_oparams = kop[req].crk_oparams; 1236 krp->krp_status = 0; 1237 krp->krp_callback = 1238 (int (*) (struct cryptkop *)) cryptodevkey_mcb; 1239 bcopy(kop[req].crk_param, 1240 krp->crk_param, 1241 sizeof(kop[req].crk_param)); 1242 1243 krp->krp_flags = CRYPTO_F_CBIMM; 1244 1245 for (i = 0; i < CRK_MAXPARAM; i++) 1246 krp->krp_param[i].crp_nbits = 1247 kop[req].crk_param[i].crp_nbits; 1248 for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) { 1249 size = (krp->krp_param[i].crp_nbits + 7) / 8; 1250 if (size == 0) 1251 continue; 1252 krp->krp_param[i].crp_p = 1253 kmem_alloc(size, KM_SLEEP); 1254 if (i >= krp->krp_iparams) 1255 continue; 1256 kop[req].crk_status = copyin(kop[req].crk_param[i].crp_p, 1257 krp->krp_param[i].crp_p, size); 1258 if (kop[req].crk_status) 1259 goto fail; 1260 } 1261 krp->fcrp = fcr; 1262 1263 kop[req].crk_reqid = atomic_inc_32_nv(&(fcr->requestid)); 1264 krp->krp_reqid = kop[req].crk_reqid; 1265 krp->krp_usropaque = kop[req].crk_opaque; 1266 1267 kop[req].crk_status = crypto_kdispatch(krp); 1268 if (kop[req].crk_status != 0) { 1269 goto fail; 1270 } 1271 1272 fail: 1273 if(kop[req].crk_status) { 1274 if (krp) { 1275 kop[req].crk_status = krp->krp_status; 1276 for (i = 0; i < CRK_MAXPARAM; i++) { 1277 struct crparam *kp = 1278 &(krp->krp_param[i]); 1279 if (kp->crp_p) { 1280 size = (kp->crp_nbits + 7) / 8; 1281 KASSERT(size > 0); 1282 memset(kp->crp_p, 0, size); 1283 kmem_free(kp->crp_p, size); 1284 } 1285 } 1286 pool_put(&cryptkop_pool, krp); 1287 } 1288 } 1289 error = 0; 1290 } 1291 DPRINTF(("cryptodev_key: error=0x%08x\n", error)); 1292 return (error); 1293 } 1294 1295 static int 1296 cryptodev_session(struct fcrypt *fcr, struct session_op *sop) { 1297 struct cryptoini cria, crie; 1298 struct enc_xform *txform = NULL; 1299 struct auth_hash *thash = NULL; 1300 struct csession *cse; 1301 u_int64_t sid; 1302 int error = 0; 1303 1304 /* XXX there must be a way to not embed the list of xforms here */ 1305 switch (sop->cipher) { 1306 case 0: 1307 break; 1308 case CRYPTO_DES_CBC: 1309 txform = &enc_xform_des; 1310 break; 1311 case CRYPTO_3DES_CBC: 1312 txform = &enc_xform_3des; 1313 break; 1314 case CRYPTO_BLF_CBC: 1315 txform = &enc_xform_blf; 1316 break; 1317 case CRYPTO_CAST_CBC: 1318 txform = &enc_xform_cast5; 1319 case CRYPTO_SKIPJACK_CBC: 1320 txform = &enc_xform_skipjack; 1321 break; 1322 case CRYPTO_AES_CBC: 1323 txform = &enc_xform_rijndael128; 1324 break; 1325 case CRYPTO_NULL_CBC: 1326 txform = &enc_xform_null; 1327 break; 1328 case CRYPTO_ARC4: 1329 txform = &enc_xform_arc4; 1330 break; 1331 default: 1332 DPRINTF(("Invalid cipher %d\n", sop->cipher)); 1333 return EINVAL; 1334 } 1335 1336 switch (sop->mac) { 1337 case 0: 1338 break; 1339 case CRYPTO_MD5_HMAC: 1340 thash = &auth_hash_hmac_md5; 1341 break; 1342 case CRYPTO_SHA1_HMAC: 1343 thash = &auth_hash_hmac_sha1; 1344 break; 1345 case CRYPTO_MD5_HMAC_96: 1346 thash = &auth_hash_hmac_md5_96; 1347 break; 1348 case CRYPTO_SHA1_HMAC_96: 1349 thash = &auth_hash_hmac_sha1_96; 1350 break; 1351 case CRYPTO_SHA2_HMAC: 1352 /* XXX switching on key length seems questionable */ 1353 if (sop->mackeylen == auth_hash_hmac_sha2_256.keysize) { 1354 thash = &auth_hash_hmac_sha2_256; 1355 } else if (sop->mackeylen == auth_hash_hmac_sha2_384.keysize) { 1356 thash = &auth_hash_hmac_sha2_384; 1357 } else if (sop->mackeylen == auth_hash_hmac_sha2_512.keysize) { 1358 thash = &auth_hash_hmac_sha2_512; 1359 } else { 1360 DPRINTF(("Invalid mackeylen %d\n", sop->mackeylen)); 1361 return EINVAL; 1362 } 1363 break; 1364 case CRYPTO_RIPEMD160_HMAC: 1365 thash = &auth_hash_hmac_ripemd_160; 1366 break; 1367 case CRYPTO_RIPEMD160_HMAC_96: 1368 thash = &auth_hash_hmac_ripemd_160_96; 1369 break; 1370 case CRYPTO_MD5: 1371 thash = &auth_hash_md5; 1372 break; 1373 case CRYPTO_SHA1: 1374 thash = &auth_hash_sha1; 1375 break; 1376 case CRYPTO_NULL_HMAC: 1377 thash = &auth_hash_null; 1378 break; 1379 default: 1380 DPRINTF(("Invalid mac %d\n", sop->mac)); 1381 return (EINVAL); 1382 } 1383 1384 memset(&crie, 0, sizeof(crie)); 1385 memset(&cria, 0, sizeof(cria)); 1386 1387 if (txform) { 1388 crie.cri_alg = txform->type; 1389 crie.cri_klen = sop->keylen * 8; 1390 if (sop->keylen > txform->maxkey || 1391 sop->keylen < txform->minkey) { 1392 DPRINTF(("keylen %d not in [%d,%d]\n", 1393 sop->keylen, txform->minkey, 1394 txform->maxkey)); 1395 error = EINVAL ; 1396 goto bail; 1397 } 1398 1399 crie.cri_key = malloc(crie.cri_klen / 8, M_XDATA, M_WAITOK); 1400 if ((error = copyin(sop->key, crie.cri_key, 1401 crie.cri_klen / 8))) { 1402 goto bail; 1403 } 1404 if (thash) { 1405 crie.cri_next = &cria; /* XXX forces enc then hash? */ 1406 } 1407 } 1408 1409 if (thash) { 1410 cria.cri_alg = thash->type; 1411 cria.cri_klen = sop->mackeylen * 8; 1412 if (sop->mackeylen != thash->keysize) { 1413 DPRINTF(("mackeylen %d != keysize %d\n", 1414 sop->mackeylen, thash->keysize)); 1415 error = EINVAL; 1416 goto bail; 1417 } 1418 if (cria.cri_klen) { 1419 cria.cri_key = malloc(cria.cri_klen / 8, M_XDATA, 1420 M_WAITOK); 1421 if ((error = copyin(sop->mackey, cria.cri_key, 1422 cria.cri_klen / 8))) { 1423 goto bail; 1424 } 1425 } 1426 } 1427 /* crypto_newsession requires that we hold the mutex. */ 1428 mutex_spin_enter(&crypto_mtx); 1429 error = crypto_newsession(&sid, (txform ? &crie : &cria), 1430 crypto_devallowsoft); 1431 if (!error) { 1432 cse = csecreate(fcr, sid, crie.cri_key, crie.cri_klen, 1433 cria.cri_key, cria.cri_klen, sop->cipher, 1434 sop->mac, txform, thash); 1435 if (cse != NULL) { 1436 sop->ses = cse->ses; 1437 } else { 1438 DPRINTF(("csecreate failed\n")); 1439 crypto_freesession(sid); 1440 error = EINVAL; 1441 } 1442 } else { 1443 DPRINTF(("SIOCSESSION violates kernel parameters %d\n", 1444 error)); 1445 } 1446 mutex_spin_exit(&crypto_mtx); 1447 bail: 1448 if (error) { 1449 if (crie.cri_key) { 1450 memset(crie.cri_key, 0, crie.cri_klen / 8); 1451 free(crie.cri_key, M_XDATA); 1452 } 1453 if (cria.cri_key) { 1454 memset(cria.cri_key, 0, cria.cri_klen / 8); 1455 free(cria.cri_key, M_XDATA); 1456 } 1457 } 1458 return error; 1459 } 1460 1461 static int 1462 cryptodev_msession(struct fcrypt *fcr, struct session_n_op *sn_ops, 1463 int count) 1464 { 1465 int i; 1466 1467 for (i = 0; i < count; i++, sn_ops++) { 1468 struct session_op s_op; 1469 s_op.cipher = sn_ops->cipher; 1470 s_op.mac = sn_ops->mac; 1471 s_op.keylen = sn_ops->keylen; 1472 s_op.key = sn_ops->key; 1473 s_op.mackeylen = sn_ops->mackeylen; 1474 s_op.mackey = sn_ops->mackey; 1475 1476 sn_ops->status = cryptodev_session(fcr, &s_op); 1477 sn_ops->ses = s_op.ses; 1478 } 1479 1480 return 0; 1481 } 1482 1483 static int 1484 cryptodev_msessionfin(struct fcrypt *fcr, int count, u_int32_t *sesid) 1485 { 1486 struct csession *cse; 1487 int req, error = 0; 1488 1489 mutex_spin_enter(&crypto_mtx); 1490 for(req = 0; req < count; req++) { 1491 cse = csefind(fcr, sesid[req]); 1492 if (cse == NULL) 1493 continue; 1494 csedelete(fcr, cse); 1495 error = csefree(cse); 1496 } 1497 mutex_spin_exit(&crypto_mtx); 1498 return 0; 1499 } 1500 1501 /* 1502 * collect as many completed requests as are availble, or count completed requests 1503 * whichever is less. 1504 * return the number of requests. 1505 */ 1506 static int 1507 cryptodev_getmstatus(struct fcrypt *fcr, struct crypt_result *crypt_res, 1508 int count) 1509 { 1510 struct cryptop *crp = NULL; 1511 struct cryptkop *krp = NULL; 1512 struct csession *cse; 1513 int i, size, req = 0; 1514 int completed=0; 1515 1516 /* On stack so nobody else can grab them -- no locking */ 1517 SLIST_HEAD(, cryptop) crp_delfree_l = 1518 SLIST_HEAD_INITIALIZER(crp_delfree_l); 1519 SLIST_HEAD(, cryptkop) krp_delfree_l = 1520 SLIST_HEAD_INITIALIZER(krp_delfree_l); 1521 1522 mutex_spin_enter(&crypto_mtx); 1523 1524 /* at this point we do not know which response user is requesting for 1525 * (symmetric or asymmetric) so we copyout one from each i.e if the 1526 * count is 2 then 1 from symmetric and 1 from asymmetric queue and 1527 * if 3 then 2 symmetric and 1 asymmetric and so on */ 1528 for(; req < count ;) { 1529 crp = TAILQ_FIRST(&fcr->crp_ret_mq); 1530 if(crp) { 1531 cse = (struct csession *)crp->crp_opaque; 1532 crypt_res[req].reqid = crp->crp_reqid; 1533 crypt_res[req].opaque = crp->crp_usropaque; 1534 completed++; 1535 cse = csefind(fcr, cse->ses); 1536 if (cse == NULL) { 1537 DPRINTF(("csefind failed\n")); 1538 crypt_res[req].status = EINVAL; 1539 goto bail; 1540 } 1541 1542 if (crp->crp_etype != 0) { 1543 crypt_res[req].status = crp->crp_etype; 1544 goto bail; 1545 } 1546 1547 if (cse->error) { 1548 crypt_res[req].status = cse->error; 1549 goto bail; 1550 } 1551 1552 if (crp->dst && 1553 (crypt_res[req].status = copyout 1554 (crp->uio.uio_iov[0].iov_base, 1555 crp->dst, crp->len))) 1556 goto bail; 1557 1558 if (crp->mac && 1559 (crypt_res[req].status = copyout 1560 (crp->crp_mac, crp->mac, 1561 cse->thash->authsize))) 1562 goto bail; 1563 bail: 1564 TAILQ_REMOVE(&fcr->crp_ret_mq, crp, crp_next); 1565 SLIST_INSERT_HEAD(&crp_delfree_l, crp, 1566 crp_qun.crp_lnext); 1567 req++; 1568 } 1569 1570 if(req < count) { 1571 krp = TAILQ_FIRST(&fcr->crp_ret_mkq); 1572 if (krp) { 1573 crypt_res[req].reqid = krp->krp_reqid; 1574 crypt_res[req].opaque = krp->krp_usropaque; 1575 completed++; 1576 if (krp->krp_status != 0) { 1577 DPRINTF(("cryptodev_key: " 1578 "krp->krp_status" 1579 "0x%08x\n", krp->krp_status)); 1580 crypt_res[req].status = 1581 krp->krp_status; 1582 goto fail; 1583 } 1584 1585 for (i = krp->krp_iparams; i < krp->krp_iparams 1586 + krp->krp_oparams; i++) { 1587 size = (krp->krp_param[i].crp_nbits 1588 + 7) / 8; 1589 if (size == 0) 1590 continue; 1591 crypt_res[req].status = copyout 1592 (krp->krp_param[i].crp_p, 1593 krp->crk_param[i].crp_p, size); 1594 if (crypt_res[req].status) { 1595 DPRINTF(("cryptodev_key: " 1596 "copyout oparam " 1597 "%d failed, " 1598 "error=%d\n", 1599 i-krp->krp_iparams, 1600 crypt_res[req].status)); 1601 goto fail; 1602 } 1603 } 1604 fail: 1605 TAILQ_REMOVE(&fcr->crp_ret_mkq, krp, krp_next); 1606 /* not sure what to do for this */ 1607 /* kop[req].crk_status = krp->krp_status; */ 1608 SLIST_INSERT_HEAD(&krp_delfree_l, krp, 1609 krp_qun.krp_lnext); 1610 } 1611 req++; 1612 } 1613 } 1614 mutex_spin_exit(&crypto_mtx); 1615 1616 while(!SLIST_EMPTY(&crp_delfree_l)) { 1617 crp = SLIST_FIRST(&crp_delfree_l); 1618 SLIST_REMOVE_HEAD(&crp_delfree_l, crp_qun.crp_lnext); 1619 kmem_free(crp->uio.uio_iov[0].iov_base, 1620 crp->uio.uio_iov[0].iov_len); 1621 crypto_freereq(crp); 1622 } 1623 1624 while(!SLIST_EMPTY(&krp_delfree_l)) { 1625 krp = SLIST_FIRST(&krp_delfree_l); 1626 for (i = 0; i < CRK_MAXPARAM; i++) { 1627 struct crparam *kp = &(krp->krp_param[i]); 1628 if (kp->crp_p) { 1629 size = (kp->crp_nbits + 7) / 8; 1630 KASSERT(size > 0); 1631 memset(kp->crp_p, 0, size); 1632 kmem_free(kp->crp_p, size); 1633 } 1634 } 1635 SLIST_REMOVE_HEAD(&krp_delfree_l, krp_qun.krp_lnext); 1636 pool_put(&cryptkop_pool, krp); 1637 } 1638 return completed; 1639 } 1640 1641 static int 1642 cryptodev_getstatus (struct fcrypt *fcr, struct crypt_result *crypt_res) 1643 { 1644 struct cryptop *crp = NULL; 1645 struct cryptkop *krp = NULL; 1646 struct csession *cse; 1647 int i, size, req = 0; 1648 1649 mutex_spin_enter(&crypto_mtx); 1650 /* Here we dont know for which request the user is requesting the 1651 * response so checking in both the queues */ 1652 TAILQ_FOREACH(crp, &fcr->crp_ret_mq, crp_next) { 1653 if(crp && (crp->crp_reqid == crypt_res->reqid)) { 1654 cse = (struct csession *)crp->crp_opaque; 1655 crypt_res->opaque = crp->crp_usropaque; 1656 cse = csefind(fcr, cse->ses); 1657 if (cse == NULL) { 1658 DPRINTF(("csefind failed\n")); 1659 crypt_res->status = EINVAL; 1660 goto bail; 1661 } 1662 1663 if (crp->crp_etype != 0) { 1664 crypt_res->status = crp->crp_etype; 1665 goto bail; 1666 } 1667 1668 if (cse->error) { 1669 crypt_res->status = cse->error; 1670 goto bail; 1671 } 1672 1673 if (crp->dst && 1674 (crypt_res->status = copyout 1675 (crp->uio.uio_iov[0].iov_base, 1676 crp->dst, crp->len))) 1677 goto bail; 1678 1679 if (crp->mac && 1680 (crypt_res->status = copyout(crp->crp_mac, 1681 crp->mac, cse->thash->authsize))) 1682 goto bail; 1683 bail: 1684 TAILQ_REMOVE(&fcr->crp_ret_mq, crp, crp_next); 1685 1686 mutex_spin_exit(&crypto_mtx); 1687 crypto_freereq(crp); 1688 return 0; 1689 } 1690 } 1691 1692 TAILQ_FOREACH(krp, &fcr->crp_ret_mkq, krp_next) { 1693 if(krp && (krp->krp_reqid == crypt_res->reqid)) { 1694 crypt_res[req].opaque = krp->krp_usropaque; 1695 if (krp->krp_status != 0) { 1696 DPRINTF(("cryptodev_key: " 1697 "krp->krp_status 0x%08x\n", 1698 krp->krp_status)); 1699 crypt_res[req].status = krp->krp_status; 1700 goto fail; 1701 } 1702 1703 for (i = krp->krp_iparams; i < krp->krp_iparams 1704 + krp->krp_oparams; i++) { 1705 size = (krp->krp_param[i].crp_nbits + 7) / 8; 1706 if (size == 0) 1707 continue; 1708 crypt_res[req].status = copyout 1709 (krp->krp_param[i].crp_p, 1710 krp->crk_param[i].crp_p, size); 1711 if (crypt_res[req].status) { 1712 DPRINTF(("cryptodev_key: copyout oparam" 1713 "%d failed, error=%d\n", 1714 i-krp->krp_iparams, 1715 crypt_result[req].status)); 1716 goto fail; 1717 } 1718 } 1719 fail: 1720 TAILQ_REMOVE(&fcr->crp_ret_mkq, krp, krp_next); 1721 mutex_spin_exit(&crypto_mtx); 1722 /* not sure what to do for this */ 1723 /* kop[req].crk_status = krp->krp_status; */ 1724 for (i = 0; i < CRK_MAXPARAM; i++) { 1725 struct crparam *kp = &(krp->krp_param[i]); 1726 if (kp->crp_p) { 1727 size = (kp->crp_nbits + 7) / 8; 1728 KASSERT(size > 0); 1729 memset(kp->crp_p, 0, size); 1730 kmem_free(kp->crp_p, size); 1731 } 1732 } 1733 pool_put(&cryptkop_pool, krp); 1734 return 0; 1735 } 1736 } 1737 mutex_spin_exit(&crypto_mtx); 1738 return EINPROGRESS; 1739 } 1740 1741 static int 1742 cryptof_poll(struct file *fp, int events) 1743 { 1744 struct fcrypt *fcr = (struct fcrypt *)fp->f_data; 1745 int revents = 0; 1746 1747 if (!(events & (POLLIN | POLLRDNORM))) { 1748 /* only support read and POLLIN */ 1749 return 0; 1750 } 1751 1752 mutex_spin_enter(&crypto_mtx); 1753 if (TAILQ_EMPTY(&fcr->crp_ret_mq) && TAILQ_EMPTY(&fcr->crp_ret_mkq)) { 1754 /* no completed requests pending, save the poll for later */ 1755 selrecord(curlwp, &fcr->sinfo); 1756 } else { 1757 /* let the app(s) know that there are completed requests */ 1758 revents = events & (POLLIN | POLLRDNORM); 1759 } 1760 mutex_spin_exit(&crypto_mtx); 1761 1762 return revents; 1763 } 1764 1765 /* 1766 * Pseudo-device initialization routine for /dev/crypto 1767 */ 1768 void cryptoattach(int); 1769 1770 void 1771 cryptoattach(int num) 1772 { 1773 pool_init(&fcrpl, sizeof(struct fcrypt), 0, 0, 0, "fcrpl", 1774 NULL, IPL_NET); /* XXX IPL_NET ("splcrypto") */ 1775 pool_init(&csepl, sizeof(struct csession), 0, 0, 0, "csepl", 1776 NULL, IPL_NET); /* XXX IPL_NET ("splcrypto") */ 1777 1778 /* 1779 * Preallocate space for 64 users, with 5 sessions each. 1780 * (consider that a TLS protocol session requires at least 1781 * 3DES, MD5, and SHA1 (both hashes are used in the PRF) for 1782 * the negotiation, plus HMAC_SHA1 for the actual SSL records, 1783 * consuming one session here for each algorithm. 1784 */ 1785 pool_prime(&fcrpl, 64); 1786 pool_prime(&csepl, 64 * 5); 1787 } 1788