1 /* $FreeBSD: src/sys/dev/ubsec/ubsec.c,v 1.6.2.12 2003/06/04 17:56:59 sam Exp $ */ 2 /* $DragonFly: src/sys/dev/crypto/ubsec/ubsec.c,v 1.13 2006/12/22 23:26:15 swildner Exp $ */ 3 /* $OpenBSD: ubsec.c,v 1.115 2002/09/24 18:33:26 jason Exp $ */ 4 5 /* 6 * Copyright (c) 2000 Jason L. Wright (jason@thought.net) 7 * Copyright (c) 2000 Theo de Raadt (deraadt@openbsd.org) 8 * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com) 9 * 10 * All rights reserved. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by Jason L. Wright 23 * 4. The name of the author may not be used to endorse or promote products 24 * derived from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 27 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 29 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 30 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 31 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 34 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 35 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 * 38 * Effort sponsored in part by the Defense Advanced Research Projects 39 * Agency (DARPA) and Air Force Research Laboratory, Air Force 40 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 41 * 42 */ 43 44 /* 45 * uBsec 5[56]01, 58xx hardware crypto accelerator 46 */ 47 48 #include "opt_ubsec.h" 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/proc.h> 53 #include <sys/errno.h> 54 #include <sys/malloc.h> 55 #include <sys/kernel.h> 56 #include <sys/mbuf.h> 57 #include <sys/sysctl.h> 58 #include <sys/endian.h> 59 #include <sys/bus.h> 60 #include <sys/rman.h> 61 #include <sys/md5.h> 62 #include <sys/random.h> 63 #include <sys/thread2.h> 64 65 #include <vm/vm.h> 66 #include <vm/pmap.h> 67 68 #include <machine/clock.h> 69 70 #include <crypto/sha1.h> 71 #include <opencrypto/cryptodev.h> 72 #include <opencrypto/cryptosoft.h> 73 74 #include "cryptodev_if.h" 75 76 #include <bus/pci/pcivar.h> 77 #include <bus/pci/pcireg.h> 78 79 /* grr, #defines for gratuitous incompatibility in queue.h */ 80 #define SIMPLEQ_HEAD STAILQ_HEAD 81 #define SIMPLEQ_ENTRY STAILQ_ENTRY 82 #define SIMPLEQ_INIT STAILQ_INIT 83 #define SIMPLEQ_INSERT_TAIL STAILQ_INSERT_TAIL 84 #define SIMPLEQ_EMPTY STAILQ_EMPTY 85 #define SIMPLEQ_FIRST STAILQ_FIRST 86 #define SIMPLEQ_REMOVE_HEAD STAILQ_REMOVE_HEAD 87 #define SIMPLEQ_FOREACH STAILQ_FOREACH 88 /* ditto for endian.h */ 89 #define letoh16(x) le16toh(x) 90 #define letoh32(x) le32toh(x) 91 92 #ifdef UBSEC_RNDTEST 93 #include "../rndtest/rndtest.h" 94 #endif 95 #include "ubsecreg.h" 96 #include "ubsecvar.h" 97 98 /* 99 * Prototypes and count for the pci_device structure 100 */ 101 static int ubsec_probe(device_t); 102 static int ubsec_attach(device_t); 103 static int ubsec_detach(device_t); 104 static int ubsec_suspend(device_t); 105 static int ubsec_resume(device_t); 106 static void ubsec_shutdown(device_t); 107 static void ubsec_intr(void *); 108 static int ubsec_newsession(void *, u_int32_t *, struct cryptoini *); 109 static int ubsec_freesession(void *, u_int64_t); 110 static int ubsec_process(void *, struct cryptop *, int); 111 static void ubsec_callback(struct ubsec_softc *, struct ubsec_q *); 112 static void ubsec_feed(struct ubsec_softc *); 113 static void ubsec_mcopy(struct mbuf *, struct mbuf *, int, int); 114 static void ubsec_callback2(struct ubsec_softc *, struct ubsec_q2 *); 115 static int ubsec_feed2(struct ubsec_softc *); 116 static void ubsec_rng(void *); 117 static int ubsec_dma_malloc(struct ubsec_softc *, bus_size_t, 118 struct ubsec_dma_alloc *, int); 119 #define ubsec_dma_sync(_dma, _flags) \ 120 bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags)) 121 static void ubsec_dma_free(struct ubsec_softc *, struct ubsec_dma_alloc *); 122 static int ubsec_dmamap_aligned(struct ubsec_operand *op); 123 124 static void ubsec_reset_board(struct ubsec_softc *sc); 125 static void ubsec_init_board(struct ubsec_softc *sc); 126 static void ubsec_init_pciregs(device_t dev); 127 static void ubsec_totalreset(struct ubsec_softc *sc); 128 129 static int ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q); 130 131 static int ubsec_kprocess(void*, struct cryptkop *, int); 132 static int ubsec_kprocess_modexp_hw(struct ubsec_softc *, struct cryptkop *, int); 133 static int ubsec_kprocess_modexp_sw(struct ubsec_softc *, struct cryptkop *, int); 134 static int ubsec_kprocess_rsapriv(struct ubsec_softc *, struct cryptkop *, int); 135 static void ubsec_kfree(struct ubsec_softc *, struct ubsec_q2 *); 136 static int ubsec_ksigbits(struct crparam *); 137 static void ubsec_kshift_r(u_int, u_int8_t *, u_int, u_int8_t *, u_int); 138 static void ubsec_kshift_l(u_int, u_int8_t *, u_int, u_int8_t *, u_int); 139 140 141 static device_method_t ubsec_methods[] = { 142 /* Device interface */ 143 DEVMETHOD(device_probe, ubsec_probe), 144 DEVMETHOD(device_attach, ubsec_attach), 145 DEVMETHOD(device_detach, ubsec_detach), 146 DEVMETHOD(device_suspend, ubsec_suspend), 147 DEVMETHOD(device_resume, ubsec_resume), 148 DEVMETHOD(device_shutdown, ubsec_shutdown), 149 150 /* bus interface */ 151 DEVMETHOD(bus_print_child, bus_generic_print_child), 152 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 153 154 /* crypto device methods */ 155 DEVMETHOD(cryptodev_newsession, ubsec_newsession), 156 DEVMETHOD(cryptodev_freesession,ubsec_freesession), 157 DEVMETHOD(cryptodev_process, ubsec_process), 158 DEVMETHOD(cryptodev_kprocess, ubsec_kprocess), 159 160 { 0, 0 } 161 }; 162 static driver_t ubsec_driver = { 163 "ubsec", 164 ubsec_methods, 165 sizeof (struct ubsec_softc) 166 }; 167 static devclass_t ubsec_devclass; 168 169 DECLARE_DUMMY_MODULE(ubsec); 170 DRIVER_MODULE(ubsec, pci, ubsec_driver, ubsec_devclass, 0, 0); 171 MODULE_DEPEND(ubsec, crypto, 1, 1, 1); 172 #ifdef UBSEC_RNDTEST 173 MODULE_DEPEND(ubsec, rndtest, 1, 1, 1); 174 #endif 175 176 SYSCTL_NODE(_hw, OID_AUTO, ubsec, CTLFLAG_RD, 0, "Broadcom driver parameters"); 177 178 #ifdef UBSEC_DEBUG 179 static void ubsec_dump_pb(volatile struct ubsec_pktbuf *); 180 static void ubsec_dump_mcr(struct ubsec_mcr *); 181 static void ubsec_dump_ctx2(struct ubsec_ctx_keyop *); 182 183 static int ubsec_debug = 0; 184 SYSCTL_INT(_hw_ubsec, OID_AUTO, debug, CTLFLAG_RW, &ubsec_debug, 185 0, "control debugging msgs"); 186 #endif 187 188 #define READ_REG(sc,r) \ 189 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) 190 191 #define WRITE_REG(sc,reg,val) \ 192 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) 193 194 #define SWAP32(x) (x) = htole32(ntohl((x))) 195 #define HTOLE32(x) (x) = htole32(x) 196 197 198 struct ubsec_stats ubsecstats; 199 SYSCTL_STRUCT(_hw_ubsec, OID_AUTO, stats, CTLFLAG_RD, &ubsecstats, 200 ubsec_stats, "driver statistics"); 201 202 static int 203 ubsec_probe(device_t dev) 204 { 205 if (pci_get_vendor(dev) == PCI_VENDOR_SUN && 206 (pci_get_device(dev) == PCI_PRODUCT_SUN_5821 || 207 pci_get_device(dev) == PCI_PRODUCT_SUN_SCA1K)) 208 return (0); 209 if (pci_get_vendor(dev) == PCI_VENDOR_BLUESTEEL && 210 (pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5501 || 211 pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5601)) 212 return (0); 213 if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && 214 (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5801 || 215 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5802 || 216 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5805 || 217 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5820 || 218 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5821 || 219 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5822 || 220 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5823 221 )) 222 return (0); 223 return (ENXIO); 224 } 225 226 static const char* 227 ubsec_partname(struct ubsec_softc *sc) 228 { 229 /* XXX sprintf numbers when not decoded */ 230 switch (pci_get_vendor(sc->sc_dev)) { 231 case PCI_VENDOR_BROADCOM: 232 switch (pci_get_device(sc->sc_dev)) { 233 case PCI_PRODUCT_BROADCOM_5801: return "Broadcom 5801"; 234 case PCI_PRODUCT_BROADCOM_5802: return "Broadcom 5802"; 235 case PCI_PRODUCT_BROADCOM_5805: return "Broadcom 5805"; 236 case PCI_PRODUCT_BROADCOM_5820: return "Broadcom 5820"; 237 case PCI_PRODUCT_BROADCOM_5821: return "Broadcom 5821"; 238 case PCI_PRODUCT_BROADCOM_5822: return "Broadcom 5822"; 239 case PCI_PRODUCT_BROADCOM_5823: return "Broadcom 5823"; 240 } 241 return "Broadcom unknown-part"; 242 case PCI_VENDOR_BLUESTEEL: 243 switch (pci_get_device(sc->sc_dev)) { 244 case PCI_PRODUCT_BLUESTEEL_5601: return "Bluesteel 5601"; 245 } 246 return "Bluesteel unknown-part"; 247 case PCI_VENDOR_SUN: 248 switch (pci_get_device(sc->sc_dev)) { 249 case PCI_PRODUCT_SUN_5821: return "Sun Crypto 5821"; 250 case PCI_PRODUCT_SUN_SCA1K: return "Sun Crypto 1K"; 251 } 252 return "Sun unknown-part"; 253 } 254 return "Unknown-vendor unknown-part"; 255 } 256 257 static void 258 default_harvest(struct rndtest_state *rsp, void *buf, u_int count) 259 { 260 u_int32_t *p = (u_int32_t *)buf; 261 for (count /= sizeof (u_int32_t); count; count--) 262 add_true_randomness(*p++); 263 } 264 265 static int 266 ubsec_attach(device_t dev) 267 { 268 struct ubsec_softc *sc = device_get_softc(dev); 269 struct ubsec_dma *dmap; 270 u_int32_t cmd, i; 271 int rid; 272 273 KASSERT(sc != NULL, ("ubsec_attach: null software carrier!")); 274 bzero(sc, sizeof (*sc)); 275 sc->sc_dev = dev; 276 277 SIMPLEQ_INIT(&sc->sc_queue); 278 SIMPLEQ_INIT(&sc->sc_qchip); 279 SIMPLEQ_INIT(&sc->sc_queue2); 280 SIMPLEQ_INIT(&sc->sc_qchip2); 281 SIMPLEQ_INIT(&sc->sc_q2free); 282 283 /* XXX handle power management */ 284 285 sc->sc_statmask = BS_STAT_MCR1_DONE | BS_STAT_DMAERR; 286 287 if (pci_get_vendor(dev) == PCI_VENDOR_BLUESTEEL && 288 pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5601) 289 sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG; 290 291 if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && 292 (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5802 || 293 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5805)) 294 sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG; 295 296 if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && 297 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5820) 298 sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG | 299 UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY; 300 301 if ((pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && 302 (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5821 || 303 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5822 || 304 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5823)) || 305 (pci_get_vendor(dev) == PCI_VENDOR_SUN && 306 (pci_get_device(dev) == PCI_PRODUCT_SUN_SCA1K || 307 pci_get_device(dev) == PCI_PRODUCT_SUN_5821))) { 308 /* NB: the 5821/5822 defines some additional status bits */ 309 sc->sc_statmask |= BS_STAT_MCR1_ALLEMPTY | 310 BS_STAT_MCR2_ALLEMPTY; 311 sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG | 312 UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY; 313 } 314 315 cmd = pci_read_config(dev, PCIR_COMMAND, 4); 316 cmd |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN; 317 pci_write_config(dev, PCIR_COMMAND, cmd, 4); 318 cmd = pci_read_config(dev, PCIR_COMMAND, 4); 319 320 if (!(cmd & PCIM_CMD_MEMEN)) { 321 device_printf(dev, "failed to enable memory mapping\n"); 322 goto bad; 323 } 324 325 if (!(cmd & PCIM_CMD_BUSMASTEREN)) { 326 device_printf(dev, "failed to enable bus mastering\n"); 327 goto bad; 328 } 329 330 /* 331 * Setup memory-mapping of PCI registers. 332 */ 333 rid = BS_BAR; 334 sc->sc_sr = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 335 0, ~0, 1, RF_ACTIVE); 336 if (sc->sc_sr == NULL) { 337 device_printf(dev, "cannot map register space\n"); 338 goto bad; 339 } 340 sc->sc_st = rman_get_bustag(sc->sc_sr); 341 sc->sc_sh = rman_get_bushandle(sc->sc_sr); 342 343 /* 344 * Arrange interrupt line. 345 */ 346 rid = 0; 347 sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 348 0, ~0, 1, RF_SHAREABLE|RF_ACTIVE); 349 if (sc->sc_irq == NULL) { 350 device_printf(dev, "could not map interrupt\n"); 351 goto bad1; 352 } 353 /* 354 * NB: Network code assumes we are blocked with splimp() 355 * so make sure the IRQ is mapped appropriately. 356 */ 357 if (bus_setup_intr(dev, sc->sc_irq, 0, 358 ubsec_intr, sc, 359 &sc->sc_ih, NULL)) { 360 device_printf(dev, "could not establish interrupt\n"); 361 goto bad2; 362 } 363 364 sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE); 365 if (sc->sc_cid < 0) { 366 device_printf(dev, "could not get crypto driver id\n"); 367 goto bad3; 368 } 369 370 /* 371 * Setup DMA descriptor area. 372 */ 373 if (bus_dma_tag_create(NULL, /* parent */ 374 1, 0, /* alignment, bounds */ 375 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 376 BUS_SPACE_MAXADDR, /* highaddr */ 377 NULL, NULL, /* filter, filterarg */ 378 0x3ffff, /* maxsize */ 379 UBS_MAX_SCATTER, /* nsegments */ 380 0xffff, /* maxsegsize */ 381 BUS_DMA_ALLOCNOW, /* flags */ 382 &sc->sc_dmat)) { 383 device_printf(dev, "cannot allocate DMA tag\n"); 384 goto bad4; 385 } 386 SIMPLEQ_INIT(&sc->sc_freequeue); 387 dmap = sc->sc_dmaa; 388 for (i = 0; i < UBS_MAX_NQUEUE; i++, dmap++) { 389 struct ubsec_q *q; 390 391 q = kmalloc(sizeof(struct ubsec_q), M_DEVBUF, M_WAITOK); 392 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_dmachunk), 393 &dmap->d_alloc, 0)) { 394 device_printf(dev, "cannot allocate dma buffers\n"); 395 kfree(q, M_DEVBUF); 396 break; 397 } 398 dmap->d_dma = (struct ubsec_dmachunk *)dmap->d_alloc.dma_vaddr; 399 400 q->q_dma = dmap; 401 sc->sc_queuea[i] = q; 402 403 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 404 } 405 406 device_printf(sc->sc_dev, "%s\n", ubsec_partname(sc)); 407 408 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0); 409 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0); 410 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0); 411 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0); 412 413 /* 414 * Reset Broadcom chip 415 */ 416 ubsec_reset_board(sc); 417 418 /* 419 * Init Broadcom specific PCI settings 420 */ 421 ubsec_init_pciregs(dev); 422 423 /* 424 * Init Broadcom chip 425 */ 426 ubsec_init_board(sc); 427 428 #ifndef UBSEC_NO_RNG 429 if (sc->sc_flags & UBS_FLAGS_RNG) { 430 sc->sc_statmask |= BS_STAT_MCR2_DONE; 431 #ifdef UBSEC_RNDTEST 432 sc->sc_rndtest = rndtest_attach(dev); 433 if (sc->sc_rndtest) 434 sc->sc_harvest = rndtest_harvest; 435 else 436 sc->sc_harvest = default_harvest; 437 #else 438 sc->sc_harvest = default_harvest; 439 #endif 440 441 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 442 &sc->sc_rng.rng_q.q_mcr, 0)) 443 goto skip_rng; 444 445 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rngbypass), 446 &sc->sc_rng.rng_q.q_ctx, 0)) { 447 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); 448 goto skip_rng; 449 } 450 451 if (ubsec_dma_malloc(sc, sizeof(u_int32_t) * 452 UBSEC_RNG_BUFSIZ, &sc->sc_rng.rng_buf, 0)) { 453 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); 454 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); 455 goto skip_rng; 456 } 457 458 if (hz >= 100) 459 sc->sc_rnghz = hz / 100; 460 else 461 sc->sc_rnghz = 1; 462 callout_init(&sc->sc_rngto); 463 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); 464 skip_rng: 465 ; 466 } 467 #endif /* UBSEC_NO_RNG */ 468 469 if (sc->sc_flags & UBS_FLAGS_KEY) { 470 sc->sc_statmask |= BS_STAT_MCR2_DONE; 471 472 crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0); 473 #if 0 474 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0); 475 #endif 476 } 477 return (0); 478 bad4: 479 crypto_unregister_all(sc->sc_cid); 480 bad3: 481 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 482 bad2: 483 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 484 bad1: 485 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); 486 bad: 487 return (ENXIO); 488 } 489 490 /* 491 * Detach a device that successfully probed. 492 */ 493 static int 494 ubsec_detach(device_t dev) 495 { 496 struct ubsec_softc *sc = device_get_softc(dev); 497 498 KASSERT(sc != NULL, ("ubsec_detach: null software carrier")); 499 500 /* XXX wait/abort active ops */ 501 502 crit_enter(); 503 504 callout_stop(&sc->sc_rngto); 505 506 crypto_unregister_all(sc->sc_cid); 507 508 #ifdef UBSEC_RNDTEST 509 if (sc->sc_rndtest) 510 rndtest_detach(sc->sc_rndtest); 511 #endif 512 513 while (!SIMPLEQ_EMPTY(&sc->sc_freequeue)) { 514 struct ubsec_q *q; 515 516 q = SIMPLEQ_FIRST(&sc->sc_freequeue); 517 SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q_next); 518 ubsec_dma_free(sc, &q->q_dma->d_alloc); 519 kfree(q, M_DEVBUF); 520 } 521 #ifndef UBSEC_NO_RNG 522 if (sc->sc_flags & UBS_FLAGS_RNG) { 523 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); 524 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); 525 ubsec_dma_free(sc, &sc->sc_rng.rng_buf); 526 } 527 #endif /* UBSEC_NO_RNG */ 528 529 bus_generic_detach(dev); 530 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 531 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 532 533 bus_dma_tag_destroy(sc->sc_dmat); 534 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); 535 536 crit_exit(); 537 538 return (0); 539 } 540 541 /* 542 * Stop all chip i/o so that the kernel's probe routines don't 543 * get confused by errant DMAs when rebooting. 544 */ 545 static void 546 ubsec_shutdown(device_t dev) 547 { 548 #ifdef notyet 549 ubsec_stop(device_get_softc(dev)); 550 #endif 551 } 552 553 /* 554 * Device suspend routine. 555 */ 556 static int 557 ubsec_suspend(device_t dev) 558 { 559 struct ubsec_softc *sc = device_get_softc(dev); 560 561 KASSERT(sc != NULL, ("ubsec_suspend: null software carrier")); 562 #ifdef notyet 563 /* XXX stop the device and save PCI settings */ 564 #endif 565 sc->sc_suspended = 1; 566 567 return (0); 568 } 569 570 static int 571 ubsec_resume(device_t dev) 572 { 573 struct ubsec_softc *sc = device_get_softc(dev); 574 575 KASSERT(sc != NULL, ("ubsec_resume: null software carrier")); 576 #ifdef notyet 577 /* XXX retore PCI settings and start the device */ 578 #endif 579 sc->sc_suspended = 0; 580 return (0); 581 } 582 583 /* 584 * UBSEC Interrupt routine 585 */ 586 static void 587 ubsec_intr(void *arg) 588 { 589 struct ubsec_softc *sc = arg; 590 volatile u_int32_t stat; 591 struct ubsec_q *q; 592 struct ubsec_dma *dmap; 593 int npkts = 0, i; 594 595 stat = READ_REG(sc, BS_STAT); 596 stat &= sc->sc_statmask; 597 if (stat == 0) { 598 return; 599 } 600 601 WRITE_REG(sc, BS_STAT, stat); /* IACK */ 602 603 /* 604 * Check to see if we have any packets waiting for us 605 */ 606 if ((stat & BS_STAT_MCR1_DONE)) { 607 while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { 608 q = SIMPLEQ_FIRST(&sc->sc_qchip); 609 dmap = q->q_dma; 610 611 if ((dmap->d_dma->d_mcr.mcr_flags & htole16(UBS_MCR_DONE)) == 0) 612 break; 613 614 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q_next); 615 616 npkts = q->q_nstacked_mcrs; 617 sc->sc_nqchip -= 1+npkts; 618 /* 619 * search for further sc_qchip ubsec_q's that share 620 * the same MCR, and complete them too, they must be 621 * at the top. 622 */ 623 for (i = 0; i < npkts; i++) { 624 if(q->q_stacked_mcr[i]) { 625 ubsec_callback(sc, q->q_stacked_mcr[i]); 626 } else { 627 break; 628 } 629 } 630 ubsec_callback(sc, q); 631 } 632 633 /* 634 * Don't send any more packet to chip if there has been 635 * a DMAERR. 636 */ 637 if (!(stat & BS_STAT_DMAERR)) 638 ubsec_feed(sc); 639 } 640 641 /* 642 * Check to see if we have any key setups/rng's waiting for us 643 */ 644 if ((sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) && 645 (stat & BS_STAT_MCR2_DONE)) { 646 struct ubsec_q2 *q2; 647 struct ubsec_mcr *mcr; 648 649 while (!SIMPLEQ_EMPTY(&sc->sc_qchip2)) { 650 q2 = SIMPLEQ_FIRST(&sc->sc_qchip2); 651 652 ubsec_dma_sync(&q2->q_mcr, 653 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 654 655 mcr = (struct ubsec_mcr *)q2->q_mcr.dma_vaddr; 656 if ((mcr->mcr_flags & htole16(UBS_MCR_DONE)) == 0) { 657 ubsec_dma_sync(&q2->q_mcr, 658 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 659 break; 660 } 661 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip2, q_next); 662 ubsec_callback2(sc, q2); 663 /* 664 * Don't send any more packet to chip if there has been 665 * a DMAERR. 666 */ 667 if (!(stat & BS_STAT_DMAERR)) 668 ubsec_feed2(sc); 669 } 670 } 671 672 /* 673 * Check to see if we got any DMA Error 674 */ 675 if (stat & BS_STAT_DMAERR) { 676 #ifdef UBSEC_DEBUG 677 if (ubsec_debug) { 678 volatile u_int32_t a = READ_REG(sc, BS_ERR); 679 680 kprintf("dmaerr %s@%08x\n", 681 (a & BS_ERR_READ) ? "read" : "write", 682 a & BS_ERR_ADDR); 683 } 684 #endif /* UBSEC_DEBUG */ 685 ubsecstats.hst_dmaerr++; 686 ubsec_totalreset(sc); 687 ubsec_feed(sc); 688 } 689 690 if (sc->sc_needwakeup) { /* XXX check high watermark */ 691 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); 692 #ifdef UBSEC_DEBUG 693 if (ubsec_debug) 694 device_printf(sc->sc_dev, "wakeup crypto (%x)\n", 695 sc->sc_needwakeup); 696 #endif /* UBSEC_DEBUG */ 697 sc->sc_needwakeup &= ~wakeup; 698 crypto_unblock(sc->sc_cid, wakeup); 699 } 700 } 701 702 /* 703 * ubsec_feed() - aggregate and post requests to chip 704 */ 705 static void 706 ubsec_feed(struct ubsec_softc *sc) 707 { 708 struct ubsec_q *q, *q2; 709 int npkts, i; 710 void *v; 711 u_int32_t stat; 712 713 /* 714 * Decide how many ops to combine in a single MCR. We cannot 715 * aggregate more than UBS_MAX_AGGR because this is the number 716 * of slots defined in the data structure. Note that 717 * aggregation only happens if ops are marked batch'able. 718 * Aggregating ops reduces the number of interrupts to the host 719 * but also (potentially) increases the latency for processing 720 * completed ops as we only get an interrupt when all aggregated 721 * ops have completed. 722 */ 723 if (sc->sc_nqueue == 0) 724 return; 725 if (sc->sc_nqueue > 1) { 726 npkts = 0; 727 SIMPLEQ_FOREACH(q, &sc->sc_queue, q_next) { 728 npkts++; 729 if ((q->q_crp->crp_flags & CRYPTO_F_BATCH) == 0) 730 break; 731 } 732 } else 733 npkts = 1; 734 /* 735 * Check device status before going any further. 736 */ 737 if ((stat = READ_REG(sc, BS_STAT)) & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) { 738 if (stat & BS_STAT_DMAERR) { 739 ubsec_totalreset(sc); 740 ubsecstats.hst_dmaerr++; 741 } else 742 ubsecstats.hst_mcr1full++; 743 return; 744 } 745 if (sc->sc_nqueue > ubsecstats.hst_maxqueue) 746 ubsecstats.hst_maxqueue = sc->sc_nqueue; 747 if (npkts > UBS_MAX_AGGR) 748 npkts = UBS_MAX_AGGR; 749 if (npkts < 2) /* special case 1 op */ 750 goto feed1; 751 752 ubsecstats.hst_totbatch += npkts-1; 753 #ifdef UBSEC_DEBUG 754 if (ubsec_debug) 755 kprintf("merging %d records\n", npkts); 756 #endif /* UBSEC_DEBUG */ 757 758 q = SIMPLEQ_FIRST(&sc->sc_queue); 759 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next); 760 --sc->sc_nqueue; 761 762 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_PREWRITE); 763 if (q->q_dst_map != NULL) 764 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, BUS_DMASYNC_PREREAD); 765 766 q->q_nstacked_mcrs = npkts - 1; /* Number of packets stacked */ 767 768 for (i = 0; i < q->q_nstacked_mcrs; i++) { 769 q2 = SIMPLEQ_FIRST(&sc->sc_queue); 770 bus_dmamap_sync(sc->sc_dmat, q2->q_src_map, 771 BUS_DMASYNC_PREWRITE); 772 if (q2->q_dst_map != NULL) 773 bus_dmamap_sync(sc->sc_dmat, q2->q_dst_map, 774 BUS_DMASYNC_PREREAD); 775 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next); 776 --sc->sc_nqueue; 777 778 v = (void*)(((char *)&q2->q_dma->d_dma->d_mcr) + sizeof(struct ubsec_mcr) - 779 sizeof(struct ubsec_mcr_add)); 780 bcopy(v, &q->q_dma->d_dma->d_mcradd[i], sizeof(struct ubsec_mcr_add)); 781 q->q_stacked_mcr[i] = q2; 782 } 783 q->q_dma->d_dma->d_mcr.mcr_pkts = htole16(npkts); 784 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); 785 sc->sc_nqchip += npkts; 786 if (sc->sc_nqchip > ubsecstats.hst_maxqchip) 787 ubsecstats.hst_maxqchip = sc->sc_nqchip; 788 ubsec_dma_sync(&q->q_dma->d_alloc, 789 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 790 WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + 791 offsetof(struct ubsec_dmachunk, d_mcr)); 792 return; 793 794 feed1: 795 q = SIMPLEQ_FIRST(&sc->sc_queue); 796 797 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_PREWRITE); 798 if (q->q_dst_map != NULL) 799 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, BUS_DMASYNC_PREREAD); 800 ubsec_dma_sync(&q->q_dma->d_alloc, 801 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 802 803 WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + 804 offsetof(struct ubsec_dmachunk, d_mcr)); 805 #ifdef UBSEC_DEBUG 806 if (ubsec_debug) 807 kprintf("feed1: q->chip %p %08x stat %08x\n", 808 q, (u_int32_t)vtophys(&q->q_dma->d_dma->d_mcr), 809 stat); 810 #endif /* UBSEC_DEBUG */ 811 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next); 812 --sc->sc_nqueue; 813 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); 814 sc->sc_nqchip++; 815 if (sc->sc_nqchip > ubsecstats.hst_maxqchip) 816 ubsecstats.hst_maxqchip = sc->sc_nqchip; 817 return; 818 } 819 820 static void 821 ubsec_setup_enckey(struct ubsec_session *ses, int algo, caddr_t key) 822 { 823 824 /* Go ahead and compute key in ubsec's byte order */ 825 if (algo == CRYPTO_DES_CBC) { 826 bcopy(key, &ses->ses_deskey[0], 8); 827 bcopy(key, &ses->ses_deskey[2], 8); 828 bcopy(key, &ses->ses_deskey[4], 8); 829 } else 830 bcopy(key, ses->ses_deskey, 24); 831 832 SWAP32(ses->ses_deskey[0]); 833 SWAP32(ses->ses_deskey[1]); 834 SWAP32(ses->ses_deskey[2]); 835 SWAP32(ses->ses_deskey[3]); 836 SWAP32(ses->ses_deskey[4]); 837 SWAP32(ses->ses_deskey[5]); 838 } 839 840 static void 841 ubsec_setup_mackey(struct ubsec_session *ses, int algo, caddr_t key, int klen) 842 { 843 MD5_CTX md5ctx; 844 SHA1_CTX sha1ctx; 845 int i; 846 847 for (i = 0; i < klen; i++) 848 key[i] ^= HMAC_IPAD_VAL; 849 850 if (algo == CRYPTO_MD5_HMAC) { 851 MD5Init(&md5ctx); 852 MD5Update(&md5ctx, key, klen); 853 MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen); 854 bcopy(md5ctx.state, ses->ses_hminner, sizeof(md5ctx.state)); 855 } else { 856 SHA1Init(&sha1ctx); 857 SHA1Update(&sha1ctx, key, klen); 858 SHA1Update(&sha1ctx, hmac_ipad_buffer, 859 SHA1_HMAC_BLOCK_LEN - klen); 860 bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32)); 861 } 862 863 for (i = 0; i < klen; i++) 864 key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 865 866 if (algo == CRYPTO_MD5_HMAC) { 867 MD5Init(&md5ctx); 868 MD5Update(&md5ctx, key, klen); 869 MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen); 870 bcopy(md5ctx.state, ses->ses_hmouter, sizeof(md5ctx.state)); 871 } else { 872 SHA1Init(&sha1ctx); 873 SHA1Update(&sha1ctx, key, klen); 874 SHA1Update(&sha1ctx, hmac_opad_buffer, 875 SHA1_HMAC_BLOCK_LEN - klen); 876 bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32)); 877 } 878 879 for (i = 0; i < klen; i++) 880 key[i] ^= HMAC_OPAD_VAL; 881 } 882 883 /* 884 * Allocate a new 'session' and return an encoded session id. 'sidp' 885 * contains our registration id, and should contain an encoded session 886 * id on successful allocation. 887 */ 888 static int 889 ubsec_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri) 890 { 891 struct cryptoini *c, *encini = NULL, *macini = NULL; 892 struct ubsec_softc *sc = arg; 893 struct ubsec_session *ses = NULL; 894 MD5_CTX md5ctx; 895 SHA1_CTX sha1ctx; 896 int i, sesn; 897 898 KASSERT(sc != NULL, ("ubsec_newsession: null softc")); 899 if (sidp == NULL || cri == NULL || sc == NULL) 900 return (EINVAL); 901 902 for (c = cri; c != NULL; c = c->cri_next) { 903 if (c->cri_alg == CRYPTO_MD5_HMAC || 904 c->cri_alg == CRYPTO_SHA1_HMAC) { 905 if (macini) 906 return (EINVAL); 907 macini = c; 908 } else if (c->cri_alg == CRYPTO_DES_CBC || 909 c->cri_alg == CRYPTO_3DES_CBC) { 910 if (encini) 911 return (EINVAL); 912 encini = c; 913 } else 914 return (EINVAL); 915 } 916 if (encini == NULL && macini == NULL) 917 return (EINVAL); 918 919 if (sc->sc_sessions == NULL) { 920 ses = sc->sc_sessions = kmalloc(sizeof(struct ubsec_session), 921 M_DEVBUF, M_INTWAIT); 922 sesn = 0; 923 sc->sc_nsessions = 1; 924 } else { 925 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { 926 if (sc->sc_sessions[sesn].ses_used == 0) { 927 ses = &sc->sc_sessions[sesn]; 928 break; 929 } 930 } 931 932 if (ses == NULL) { 933 sesn = sc->sc_nsessions; 934 ses = kmalloc((sesn + 1) * sizeof(struct ubsec_session), 935 M_DEVBUF, M_INTWAIT); 936 bcopy(sc->sc_sessions, ses, sesn * 937 sizeof(struct ubsec_session)); 938 bzero(sc->sc_sessions, sesn * 939 sizeof(struct ubsec_session)); 940 kfree(sc->sc_sessions, M_DEVBUF); 941 sc->sc_sessions = ses; 942 ses = &sc->sc_sessions[sesn]; 943 sc->sc_nsessions++; 944 } 945 } 946 947 bzero(ses, sizeof(struct ubsec_session)); 948 ses->ses_used = 1; 949 if (encini) { 950 read_random(ses->ses_iv, sizeof(ses->ses_iv)); 951 if (encini->cri_key != NULL) { 952 ubsec_setup_enckey(ses, encini->cri_alg, 953 encini->cri_key); 954 } 955 #if 0 956 /* get an IV, network byte order */ 957 /* XXX may read fewer than requested */ 958 read_random(ses->ses_iv, sizeof(ses->ses_iv)); 959 960 /* Go ahead and compute key in ubsec's byte order */ 961 if (encini->cri_alg == CRYPTO_DES_CBC) { 962 bcopy(encini->cri_key, &ses->ses_deskey[0], 8); 963 bcopy(encini->cri_key, &ses->ses_deskey[2], 8); 964 bcopy(encini->cri_key, &ses->ses_deskey[4], 8); 965 } else 966 bcopy(encini->cri_key, ses->ses_deskey, 24); 967 968 SWAP32(ses->ses_deskey[0]); 969 SWAP32(ses->ses_deskey[1]); 970 SWAP32(ses->ses_deskey[2]); 971 SWAP32(ses->ses_deskey[3]); 972 SWAP32(ses->ses_deskey[4]); 973 SWAP32(ses->ses_deskey[5]); 974 #endif 975 } 976 977 if (macini) { 978 ses->ses_mlen = macini->cri_mlen; 979 if (ses->ses_mlen == 0) { 980 if (macini->cri_alg == CRYPTO_MD5_HMAC) 981 ses->ses_mlen = MD5_HASH_LEN; 982 else 983 ses->ses_mlen = SHA1_HASH_LEN; 984 } 985 986 if (macini->cri_key != NULL) { 987 ubsec_setup_mackey(ses, macini->cri_alg, 988 macini->cri_key, macini->cri_klen/8); 989 } 990 #if 0 991 for (i = 0; i < macini->cri_klen / 8; i++) 992 macini->cri_key[i] ^= HMAC_IPAD_VAL; 993 994 if (macini->cri_alg == CRYPTO_MD5_HMAC) { 995 MD5Init(&md5ctx); 996 MD5Update(&md5ctx, macini->cri_key, 997 macini->cri_klen / 8); 998 MD5Update(&md5ctx, hmac_ipad_buffer, 999 MD5_HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 1000 bcopy(md5ctx.state, ses->ses_hminner, 1001 sizeof(md5ctx.state)); 1002 } else { 1003 SHA1Init(&sha1ctx); 1004 SHA1Update(&sha1ctx, macini->cri_key, 1005 macini->cri_klen / 8); 1006 SHA1Update(&sha1ctx, hmac_ipad_buffer, 1007 SHA1_HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 1008 bcopy(sha1ctx.h.b32, ses->ses_hminner, 1009 sizeof(sha1ctx.h.b32)); 1010 } 1011 1012 for (i = 0; i < macini->cri_klen / 8; i++) 1013 macini->cri_key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 1014 1015 if (macini->cri_alg == CRYPTO_MD5_HMAC) { 1016 MD5Init(&md5ctx); 1017 MD5Update(&md5ctx, macini->cri_key, 1018 macini->cri_klen / 8); 1019 MD5Update(&md5ctx, hmac_opad_buffer, 1020 MD5_HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 1021 bcopy(md5ctx.state, ses->ses_hmouter, 1022 sizeof(md5ctx.state)); 1023 } else { 1024 SHA1Init(&sha1ctx); 1025 SHA1Update(&sha1ctx, macini->cri_key, 1026 macini->cri_klen / 8); 1027 SHA1Update(&sha1ctx, hmac_opad_buffer, 1028 SHA1_HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 1029 bcopy(sha1ctx.h.b32, ses->ses_hmouter, 1030 sizeof(sha1ctx.h.b32)); 1031 } 1032 1033 for (i = 0; i < macini->cri_klen / 8; i++) 1034 macini->cri_key[i] ^= HMAC_OPAD_VAL; 1035 #endif 1036 } 1037 1038 *sidp = UBSEC_SID(device_get_unit(sc->sc_dev), sesn); 1039 return (0); 1040 } 1041 1042 /* 1043 * Deallocate a session. 1044 */ 1045 static int 1046 ubsec_freesession(void *arg, u_int64_t tid) 1047 { 1048 struct ubsec_softc *sc = arg; 1049 int session; 1050 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; 1051 1052 KASSERT(sc != NULL, ("ubsec_freesession: null softc")); 1053 if (sc == NULL) 1054 return (EINVAL); 1055 1056 session = UBSEC_SESSION(sid); 1057 if (session >= sc->sc_nsessions) 1058 return (EINVAL); 1059 1060 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session])); 1061 return (0); 1062 } 1063 1064 static void 1065 ubsec_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) 1066 { 1067 struct ubsec_operand *op = arg; 1068 1069 KASSERT(nsegs <= UBS_MAX_SCATTER, 1070 ("Too many DMA segments returned when mapping operand")); 1071 #ifdef UBSEC_DEBUG 1072 if (ubsec_debug) 1073 kprintf("ubsec_op_cb: mapsize %u nsegs %d\n", 1074 (u_int) mapsize, nsegs); 1075 #endif 1076 op->mapsize = mapsize; 1077 op->nsegs = nsegs; 1078 bcopy(seg, op->segs, nsegs * sizeof (seg[0])); 1079 } 1080 1081 static int 1082 ubsec_process(void *arg, struct cryptop *crp, int hint) 1083 { 1084 struct ubsec_q *q = NULL; 1085 int err = 0, i, j, nicealign; 1086 struct ubsec_softc *sc = arg; 1087 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; 1088 int encoffset = 0, macoffset = 0, cpskip, cpoffset; 1089 int sskip, dskip, stheend, dtheend; 1090 int16_t coffset; 1091 struct ubsec_session *ses; 1092 struct ubsec_pktctx ctx; 1093 struct ubsec_dma *dmap = NULL; 1094 1095 if (crp == NULL || crp->crp_callback == NULL || sc == NULL) { 1096 ubsecstats.hst_invalid++; 1097 return (EINVAL); 1098 } 1099 if (UBSEC_SESSION(crp->crp_sid) >= sc->sc_nsessions) { 1100 ubsecstats.hst_badsession++; 1101 return (EINVAL); 1102 } 1103 1104 crit_enter(); 1105 1106 if (SIMPLEQ_EMPTY(&sc->sc_freequeue)) { 1107 ubsecstats.hst_queuefull++; 1108 sc->sc_needwakeup |= CRYPTO_SYMQ; 1109 crit_exit(); 1110 return (ERESTART); 1111 } 1112 q = SIMPLEQ_FIRST(&sc->sc_freequeue); 1113 SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q_next); 1114 crit_exit(); 1115 1116 dmap = q->q_dma; /* Save dma pointer */ 1117 bzero(q, sizeof(struct ubsec_q)); 1118 bzero(&ctx, sizeof(ctx)); 1119 1120 q->q_sesn = UBSEC_SESSION(crp->crp_sid); 1121 q->q_dma = dmap; 1122 ses = &sc->sc_sessions[q->q_sesn]; 1123 1124 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1125 q->q_src_m = (struct mbuf *)crp->crp_buf; 1126 q->q_dst_m = (struct mbuf *)crp->crp_buf; 1127 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1128 q->q_src_io = (struct uio *)crp->crp_buf; 1129 q->q_dst_io = (struct uio *)crp->crp_buf; 1130 } else { 1131 ubsecstats.hst_badflags++; 1132 err = EINVAL; 1133 goto errout; /* XXX we don't handle contiguous blocks! */ 1134 } 1135 1136 bzero(&dmap->d_dma->d_mcr, sizeof(struct ubsec_mcr)); 1137 1138 dmap->d_dma->d_mcr.mcr_pkts = htole16(1); 1139 dmap->d_dma->d_mcr.mcr_flags = 0; 1140 q->q_crp = crp; 1141 1142 crd1 = crp->crp_desc; 1143 if (crd1 == NULL) { 1144 ubsecstats.hst_nodesc++; 1145 err = EINVAL; 1146 goto errout; 1147 } 1148 crd2 = crd1->crd_next; 1149 1150 if (crd2 == NULL) { 1151 if (crd1->crd_alg == CRYPTO_MD5_HMAC || 1152 crd1->crd_alg == CRYPTO_SHA1_HMAC) { 1153 maccrd = crd1; 1154 enccrd = NULL; 1155 } else if (crd1->crd_alg == CRYPTO_DES_CBC || 1156 crd1->crd_alg == CRYPTO_3DES_CBC) { 1157 maccrd = NULL; 1158 enccrd = crd1; 1159 } else { 1160 ubsecstats.hst_badalg++; 1161 err = EINVAL; 1162 goto errout; 1163 } 1164 } else { 1165 if ((crd1->crd_alg == CRYPTO_MD5_HMAC || 1166 crd1->crd_alg == CRYPTO_SHA1_HMAC) && 1167 (crd2->crd_alg == CRYPTO_DES_CBC || 1168 crd2->crd_alg == CRYPTO_3DES_CBC) && 1169 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { 1170 maccrd = crd1; 1171 enccrd = crd2; 1172 } else if ((crd1->crd_alg == CRYPTO_DES_CBC || 1173 crd1->crd_alg == CRYPTO_3DES_CBC) && 1174 (crd2->crd_alg == CRYPTO_MD5_HMAC || 1175 crd2->crd_alg == CRYPTO_SHA1_HMAC) && 1176 (crd1->crd_flags & CRD_F_ENCRYPT)) { 1177 enccrd = crd1; 1178 maccrd = crd2; 1179 } else { 1180 /* 1181 * We cannot order the ubsec as requested 1182 */ 1183 ubsecstats.hst_badalg++; 1184 err = EINVAL; 1185 goto errout; 1186 } 1187 } 1188 1189 if (enccrd) { 1190 encoffset = enccrd->crd_skip; 1191 ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_3DES); 1192 1193 if (enccrd->crd_flags & CRD_F_ENCRYPT) { 1194 q->q_flags |= UBSEC_QFLAGS_COPYOUTIV; 1195 1196 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 1197 bcopy(enccrd->crd_iv, ctx.pc_iv, 8); 1198 else { 1199 ctx.pc_iv[0] = ses->ses_iv[0]; 1200 ctx.pc_iv[1] = ses->ses_iv[1]; 1201 } 1202 1203 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { 1204 if (crp->crp_flags & CRYPTO_F_IMBUF) 1205 m_copyback(q->q_src_m, 1206 enccrd->crd_inject, 1207 8, (caddr_t)ctx.pc_iv); 1208 else if (crp->crp_flags & CRYPTO_F_IOV) 1209 cuio_copyback(q->q_src_io, 1210 enccrd->crd_inject, 1211 8, (caddr_t)ctx.pc_iv); 1212 } 1213 } else { 1214 ctx.pc_flags |= htole16(UBS_PKTCTX_INBOUND); 1215 1216 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 1217 bcopy(enccrd->crd_iv, ctx.pc_iv, 8); 1218 else if (crp->crp_flags & CRYPTO_F_IMBUF) 1219 m_copydata(q->q_src_m, enccrd->crd_inject, 1220 8, (caddr_t)ctx.pc_iv); 1221 else if (crp->crp_flags & CRYPTO_F_IOV) 1222 cuio_copydata(q->q_src_io, 1223 enccrd->crd_inject, 8, 1224 (caddr_t)ctx.pc_iv); 1225 } 1226 1227 ctx.pc_deskey[0] = ses->ses_deskey[0]; 1228 ctx.pc_deskey[1] = ses->ses_deskey[1]; 1229 ctx.pc_deskey[2] = ses->ses_deskey[2]; 1230 ctx.pc_deskey[3] = ses->ses_deskey[3]; 1231 ctx.pc_deskey[4] = ses->ses_deskey[4]; 1232 ctx.pc_deskey[5] = ses->ses_deskey[5]; 1233 SWAP32(ctx.pc_iv[0]); 1234 SWAP32(ctx.pc_iv[1]); 1235 } 1236 1237 if (maccrd) { 1238 macoffset = maccrd->crd_skip; 1239 1240 if (maccrd->crd_alg == CRYPTO_MD5_HMAC) 1241 ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_MD5); 1242 else 1243 ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_SHA1); 1244 1245 for (i = 0; i < 5; i++) { 1246 ctx.pc_hminner[i] = ses->ses_hminner[i]; 1247 ctx.pc_hmouter[i] = ses->ses_hmouter[i]; 1248 1249 HTOLE32(ctx.pc_hminner[i]); 1250 HTOLE32(ctx.pc_hmouter[i]); 1251 } 1252 } 1253 1254 if (enccrd && maccrd) { 1255 /* 1256 * ubsec cannot handle packets where the end of encryption 1257 * and authentication are not the same, or where the 1258 * encrypted part begins before the authenticated part. 1259 */ 1260 if ((encoffset + enccrd->crd_len) != 1261 (macoffset + maccrd->crd_len)) { 1262 ubsecstats.hst_lenmismatch++; 1263 err = EINVAL; 1264 goto errout; 1265 } 1266 if (enccrd->crd_skip < maccrd->crd_skip) { 1267 ubsecstats.hst_skipmismatch++; 1268 err = EINVAL; 1269 goto errout; 1270 } 1271 sskip = maccrd->crd_skip; 1272 cpskip = dskip = enccrd->crd_skip; 1273 stheend = maccrd->crd_len; 1274 dtheend = enccrd->crd_len; 1275 coffset = enccrd->crd_skip - maccrd->crd_skip; 1276 cpoffset = cpskip + dtheend; 1277 #ifdef UBSEC_DEBUG 1278 if (ubsec_debug) { 1279 kprintf("mac: skip %d, len %d, inject %d\n", 1280 maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject); 1281 kprintf("enc: skip %d, len %d, inject %d\n", 1282 enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject); 1283 kprintf("src: skip %d, len %d\n", sskip, stheend); 1284 kprintf("dst: skip %d, len %d\n", dskip, dtheend); 1285 kprintf("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n", 1286 coffset, stheend, cpskip, cpoffset); 1287 } 1288 #endif 1289 } else { 1290 cpskip = dskip = sskip = macoffset + encoffset; 1291 dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len; 1292 cpoffset = cpskip + dtheend; 1293 coffset = 0; 1294 } 1295 ctx.pc_offset = htole16(coffset >> 2); 1296 1297 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &q->q_src_map)) { 1298 ubsecstats.hst_nomap++; 1299 err = ENOMEM; 1300 goto errout; 1301 } 1302 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1303 if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map, 1304 q->q_src_m, ubsec_op_cb, &q->q_src, BUS_DMA_NOWAIT) != 0) { 1305 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1306 q->q_src_map = NULL; 1307 ubsecstats.hst_noload++; 1308 err = ENOMEM; 1309 goto errout; 1310 } 1311 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1312 if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map, 1313 q->q_src_io, ubsec_op_cb, &q->q_src, BUS_DMA_NOWAIT) != 0) { 1314 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1315 q->q_src_map = NULL; 1316 ubsecstats.hst_noload++; 1317 err = ENOMEM; 1318 goto errout; 1319 } 1320 } 1321 nicealign = ubsec_dmamap_aligned(&q->q_src); 1322 1323 dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend); 1324 1325 #ifdef UBSEC_DEBUG 1326 if (ubsec_debug) 1327 kprintf("src skip: %d nicealign: %u\n", sskip, nicealign); 1328 #endif 1329 for (i = j = 0; i < q->q_src_nsegs; i++) { 1330 struct ubsec_pktbuf *pb; 1331 bus_size_t packl = q->q_src_segs[i].ds_len; 1332 bus_addr_t packp = q->q_src_segs[i].ds_addr; 1333 1334 if (sskip >= packl) { 1335 sskip -= packl; 1336 continue; 1337 } 1338 1339 packl -= sskip; 1340 packp += sskip; 1341 sskip = 0; 1342 1343 if (packl > 0xfffc) { 1344 err = EIO; 1345 goto errout; 1346 } 1347 1348 if (j == 0) 1349 pb = &dmap->d_dma->d_mcr.mcr_ipktbuf; 1350 else 1351 pb = &dmap->d_dma->d_sbuf[j - 1]; 1352 1353 pb->pb_addr = htole32(packp); 1354 1355 if (stheend) { 1356 if (packl > stheend) { 1357 pb->pb_len = htole32(stheend); 1358 stheend = 0; 1359 } else { 1360 pb->pb_len = htole32(packl); 1361 stheend -= packl; 1362 } 1363 } else 1364 pb->pb_len = htole32(packl); 1365 1366 if ((i + 1) == q->q_src_nsegs) 1367 pb->pb_next = 0; 1368 else 1369 pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1370 offsetof(struct ubsec_dmachunk, d_sbuf[j])); 1371 j++; 1372 } 1373 1374 if (enccrd == NULL && maccrd != NULL) { 1375 dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0; 1376 dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0; 1377 dmap->d_dma->d_mcr.mcr_opktbuf.pb_next = htole32(dmap->d_alloc.dma_paddr + 1378 offsetof(struct ubsec_dmachunk, d_macbuf[0])); 1379 #ifdef UBSEC_DEBUG 1380 if (ubsec_debug) 1381 kprintf("opkt: %x %x %x\n", 1382 dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr, 1383 dmap->d_dma->d_mcr.mcr_opktbuf.pb_len, 1384 dmap->d_dma->d_mcr.mcr_opktbuf.pb_next); 1385 #endif 1386 } else { 1387 if (crp->crp_flags & CRYPTO_F_IOV) { 1388 if (!nicealign) { 1389 ubsecstats.hst_iovmisaligned++; 1390 err = EINVAL; 1391 goto errout; 1392 } 1393 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 1394 &q->q_dst_map)) { 1395 ubsecstats.hst_nomap++; 1396 err = ENOMEM; 1397 goto errout; 1398 } 1399 if (bus_dmamap_load_uio(sc->sc_dmat, q->q_dst_map, 1400 q->q_dst_io, ubsec_op_cb, &q->q_dst, BUS_DMA_NOWAIT) != 0) { 1401 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); 1402 q->q_dst_map = NULL; 1403 ubsecstats.hst_noload++; 1404 err = ENOMEM; 1405 goto errout; 1406 } 1407 } else if (crp->crp_flags & CRYPTO_F_IMBUF) { 1408 if (nicealign) { 1409 q->q_dst = q->q_src; 1410 } else { 1411 int totlen, len; 1412 struct mbuf *m, *top, **mp; 1413 1414 ubsecstats.hst_unaligned++; 1415 totlen = q->q_src_mapsize; 1416 if (q->q_src_m->m_flags & M_PKTHDR) { 1417 len = MHLEN; 1418 MGETHDR(m, MB_DONTWAIT, MT_DATA); 1419 if (m && !m_dup_pkthdr(m, q->q_src_m, MB_DONTWAIT)) { 1420 m_free(m); 1421 m = NULL; 1422 } 1423 } else { 1424 len = MLEN; 1425 MGET(m, MB_DONTWAIT, MT_DATA); 1426 } 1427 if (m == NULL) { 1428 ubsecstats.hst_nombuf++; 1429 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1430 goto errout; 1431 } 1432 if (totlen >= MINCLSIZE) { 1433 MCLGET(m, MB_DONTWAIT); 1434 if ((m->m_flags & M_EXT) == 0) { 1435 m_free(m); 1436 ubsecstats.hst_nomcl++; 1437 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1438 goto errout; 1439 } 1440 len = MCLBYTES; 1441 } 1442 m->m_len = len; 1443 top = NULL; 1444 mp = ⊤ 1445 1446 while (totlen > 0) { 1447 if (top) { 1448 MGET(m, MB_DONTWAIT, MT_DATA); 1449 if (m == NULL) { 1450 m_freem(top); 1451 ubsecstats.hst_nombuf++; 1452 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1453 goto errout; 1454 } 1455 len = MLEN; 1456 } 1457 if (top && totlen >= MINCLSIZE) { 1458 MCLGET(m, MB_DONTWAIT); 1459 if ((m->m_flags & M_EXT) == 0) { 1460 *mp = m; 1461 m_freem(top); 1462 ubsecstats.hst_nomcl++; 1463 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1464 goto errout; 1465 } 1466 len = MCLBYTES; 1467 } 1468 m->m_len = len = min(totlen, len); 1469 totlen -= len; 1470 *mp = m; 1471 mp = &m->m_next; 1472 } 1473 q->q_dst_m = top; 1474 ubsec_mcopy(q->q_src_m, q->q_dst_m, 1475 cpskip, cpoffset); 1476 if (bus_dmamap_create(sc->sc_dmat, 1477 BUS_DMA_NOWAIT, &q->q_dst_map) != 0) { 1478 ubsecstats.hst_nomap++; 1479 err = ENOMEM; 1480 goto errout; 1481 } 1482 if (bus_dmamap_load_mbuf(sc->sc_dmat, 1483 q->q_dst_map, q->q_dst_m, 1484 ubsec_op_cb, &q->q_dst, 1485 BUS_DMA_NOWAIT) != 0) { 1486 bus_dmamap_destroy(sc->sc_dmat, 1487 q->q_dst_map); 1488 q->q_dst_map = NULL; 1489 ubsecstats.hst_noload++; 1490 err = ENOMEM; 1491 goto errout; 1492 } 1493 } 1494 } else { 1495 ubsecstats.hst_badflags++; 1496 err = EINVAL; 1497 goto errout; 1498 } 1499 1500 #ifdef UBSEC_DEBUG 1501 if (ubsec_debug) 1502 kprintf("dst skip: %d\n", dskip); 1503 #endif 1504 for (i = j = 0; i < q->q_dst_nsegs; i++) { 1505 struct ubsec_pktbuf *pb; 1506 bus_size_t packl = q->q_dst_segs[i].ds_len; 1507 bus_addr_t packp = q->q_dst_segs[i].ds_addr; 1508 1509 if (dskip >= packl) { 1510 dskip -= packl; 1511 continue; 1512 } 1513 1514 packl -= dskip; 1515 packp += dskip; 1516 dskip = 0; 1517 1518 if (packl > 0xfffc) { 1519 err = EIO; 1520 goto errout; 1521 } 1522 1523 if (j == 0) 1524 pb = &dmap->d_dma->d_mcr.mcr_opktbuf; 1525 else 1526 pb = &dmap->d_dma->d_dbuf[j - 1]; 1527 1528 pb->pb_addr = htole32(packp); 1529 1530 if (dtheend) { 1531 if (packl > dtheend) { 1532 pb->pb_len = htole32(dtheend); 1533 dtheend = 0; 1534 } else { 1535 pb->pb_len = htole32(packl); 1536 dtheend -= packl; 1537 } 1538 } else 1539 pb->pb_len = htole32(packl); 1540 1541 if ((i + 1) == q->q_dst_nsegs) { 1542 if (maccrd) 1543 pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1544 offsetof(struct ubsec_dmachunk, d_macbuf[0])); 1545 else 1546 pb->pb_next = 0; 1547 } else 1548 pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1549 offsetof(struct ubsec_dmachunk, d_dbuf[j])); 1550 j++; 1551 } 1552 } 1553 1554 dmap->d_dma->d_mcr.mcr_cmdctxp = htole32(dmap->d_alloc.dma_paddr + 1555 offsetof(struct ubsec_dmachunk, d_ctx)); 1556 1557 if (sc->sc_flags & UBS_FLAGS_LONGCTX) { 1558 struct ubsec_pktctx_long *ctxl; 1559 1560 ctxl = (struct ubsec_pktctx_long *)(dmap->d_alloc.dma_vaddr + 1561 offsetof(struct ubsec_dmachunk, d_ctx)); 1562 1563 /* transform small context into long context */ 1564 ctxl->pc_len = htole16(sizeof(struct ubsec_pktctx_long)); 1565 ctxl->pc_type = htole16(UBS_PKTCTX_TYPE_IPSEC); 1566 ctxl->pc_flags = ctx.pc_flags; 1567 ctxl->pc_offset = ctx.pc_offset; 1568 for (i = 0; i < 6; i++) 1569 ctxl->pc_deskey[i] = ctx.pc_deskey[i]; 1570 for (i = 0; i < 5; i++) 1571 ctxl->pc_hminner[i] = ctx.pc_hminner[i]; 1572 for (i = 0; i < 5; i++) 1573 ctxl->pc_hmouter[i] = ctx.pc_hmouter[i]; 1574 ctxl->pc_iv[0] = ctx.pc_iv[0]; 1575 ctxl->pc_iv[1] = ctx.pc_iv[1]; 1576 } else 1577 bcopy(&ctx, dmap->d_alloc.dma_vaddr + 1578 offsetof(struct ubsec_dmachunk, d_ctx), 1579 sizeof(struct ubsec_pktctx)); 1580 1581 crit_enter(); 1582 SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next); 1583 sc->sc_nqueue++; 1584 ubsecstats.hst_ipackets++; 1585 ubsecstats.hst_ibytes += dmap->d_alloc.dma_size; 1586 if ((hint & CRYPTO_HINT_MORE) == 0 || sc->sc_nqueue >= UBS_MAX_AGGR) 1587 ubsec_feed(sc); 1588 crit_exit(); 1589 return (0); 1590 1591 errout: 1592 if (q != NULL) { 1593 if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) 1594 m_freem(q->q_dst_m); 1595 1596 if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { 1597 bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); 1598 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); 1599 } 1600 if (q->q_src_map != NULL) { 1601 bus_dmamap_unload(sc->sc_dmat, q->q_src_map); 1602 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1603 } 1604 1605 crit_enter(); 1606 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 1607 crit_exit(); 1608 } 1609 if (err != ERESTART) { 1610 crp->crp_etype = err; 1611 crypto_done(crp); 1612 } else { 1613 sc->sc_needwakeup |= CRYPTO_SYMQ; 1614 } 1615 return (err); 1616 } 1617 1618 static void 1619 ubsec_callback(struct ubsec_softc *sc, struct ubsec_q *q) 1620 { 1621 struct cryptop *crp = (struct cryptop *)q->q_crp; 1622 struct cryptodesc *crd; 1623 struct ubsec_dma *dmap = q->q_dma; 1624 1625 ubsecstats.hst_opackets++; 1626 ubsecstats.hst_obytes += dmap->d_alloc.dma_size; 1627 1628 ubsec_dma_sync(&dmap->d_alloc, 1629 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1630 if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { 1631 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, 1632 BUS_DMASYNC_POSTREAD); 1633 bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); 1634 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); 1635 } 1636 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_POSTWRITE); 1637 bus_dmamap_unload(sc->sc_dmat, q->q_src_map); 1638 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1639 1640 if ((crp->crp_flags & CRYPTO_F_IMBUF) && (q->q_src_m != q->q_dst_m)) { 1641 m_freem(q->q_src_m); 1642 crp->crp_buf = (caddr_t)q->q_dst_m; 1643 } 1644 ubsecstats.hst_obytes += ((struct mbuf *)crp->crp_buf)->m_len; 1645 1646 /* copy out IV for future use */ 1647 if (q->q_flags & UBSEC_QFLAGS_COPYOUTIV) { 1648 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1649 if (crd->crd_alg != CRYPTO_DES_CBC && 1650 crd->crd_alg != CRYPTO_3DES_CBC) 1651 continue; 1652 if (crp->crp_flags & CRYPTO_F_IMBUF) 1653 m_copydata((struct mbuf *)crp->crp_buf, 1654 crd->crd_skip + crd->crd_len - 8, 8, 1655 (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv); 1656 else if (crp->crp_flags & CRYPTO_F_IOV) { 1657 cuio_copydata((struct uio *)crp->crp_buf, 1658 crd->crd_skip + crd->crd_len - 8, 8, 1659 (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv); 1660 } 1661 break; 1662 } 1663 } 1664 1665 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1666 if (crd->crd_alg != CRYPTO_MD5_HMAC && 1667 crd->crd_alg != CRYPTO_SHA1_HMAC) 1668 continue; 1669 crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject, 1670 sc->sc_sessions[q->q_sesn].ses_mlen, 1671 (caddr_t)dmap->d_dma->d_macbuf); 1672 #if 0 1673 if (crp->crp_flags & CRYPTO_F_IMBUF) 1674 m_copyback((struct mbuf *)crp->crp_buf, 1675 crd->crd_inject, 12, 1676 (caddr_t)dmap->d_dma->d_macbuf); 1677 else if (crp->crp_flags & CRYPTO_F_IOV && crp->crp_mac) 1678 bcopy((caddr_t)dmap->d_dma->d_macbuf, 1679 crp->crp_mac, 12); 1680 break; 1681 #endif 1682 } 1683 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 1684 crypto_done(crp); 1685 } 1686 1687 static void 1688 ubsec_mcopy(struct mbuf *srcm, struct mbuf *dstm, int hoffset, int toffset) 1689 { 1690 int i, j, dlen, slen; 1691 caddr_t dptr, sptr; 1692 1693 j = 0; 1694 sptr = srcm->m_data; 1695 slen = srcm->m_len; 1696 dptr = dstm->m_data; 1697 dlen = dstm->m_len; 1698 1699 while (1) { 1700 for (i = 0; i < min(slen, dlen); i++) { 1701 if (j < hoffset || j >= toffset) 1702 *dptr++ = *sptr++; 1703 slen--; 1704 dlen--; 1705 j++; 1706 } 1707 if (slen == 0) { 1708 srcm = srcm->m_next; 1709 if (srcm == NULL) 1710 return; 1711 sptr = srcm->m_data; 1712 slen = srcm->m_len; 1713 } 1714 if (dlen == 0) { 1715 dstm = dstm->m_next; 1716 if (dstm == NULL) 1717 return; 1718 dptr = dstm->m_data; 1719 dlen = dstm->m_len; 1720 } 1721 } 1722 } 1723 1724 /* 1725 * feed the key generator, must be called at splimp() or higher. 1726 */ 1727 static int 1728 ubsec_feed2(struct ubsec_softc *sc) 1729 { 1730 struct ubsec_q2 *q; 1731 1732 while (!SIMPLEQ_EMPTY(&sc->sc_queue2)) { 1733 if (READ_REG(sc, BS_STAT) & BS_STAT_MCR2_FULL) 1734 break; 1735 q = SIMPLEQ_FIRST(&sc->sc_queue2); 1736 1737 ubsec_dma_sync(&q->q_mcr, 1738 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1739 ubsec_dma_sync(&q->q_ctx, BUS_DMASYNC_PREWRITE); 1740 1741 WRITE_REG(sc, BS_MCR2, q->q_mcr.dma_paddr); 1742 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue2, q_next); 1743 --sc->sc_nqueue2; 1744 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip2, q, q_next); 1745 } 1746 return (0); 1747 } 1748 1749 /* 1750 * Callback for handling random numbers 1751 */ 1752 static void 1753 ubsec_callback2(struct ubsec_softc *sc, struct ubsec_q2 *q) 1754 { 1755 struct cryptkop *krp; 1756 struct ubsec_ctx_keyop *ctx; 1757 1758 ctx = (struct ubsec_ctx_keyop *)q->q_ctx.dma_vaddr; 1759 ubsec_dma_sync(&q->q_ctx, BUS_DMASYNC_POSTWRITE); 1760 1761 switch (q->q_type) { 1762 #ifndef UBSEC_NO_RNG 1763 case UBS_CTXOP_RNGBYPASS: { 1764 struct ubsec_q2_rng *rng = (struct ubsec_q2_rng *)q; 1765 1766 ubsec_dma_sync(&rng->rng_buf, BUS_DMASYNC_POSTREAD); 1767 (*sc->sc_harvest)(sc->sc_rndtest, 1768 rng->rng_buf.dma_vaddr, 1769 UBSEC_RNG_BUFSIZ*sizeof (u_int32_t)); 1770 rng->rng_used = 0; 1771 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); 1772 break; 1773 } 1774 #endif 1775 case UBS_CTXOP_MODEXP: { 1776 struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; 1777 u_int rlen, clen; 1778 1779 krp = me->me_krp; 1780 rlen = (me->me_modbits + 7) / 8; 1781 clen = (krp->krp_param[krp->krp_iparams].crp_nbits + 7) / 8; 1782 1783 ubsec_dma_sync(&me->me_M, BUS_DMASYNC_POSTWRITE); 1784 ubsec_dma_sync(&me->me_E, BUS_DMASYNC_POSTWRITE); 1785 ubsec_dma_sync(&me->me_C, BUS_DMASYNC_POSTREAD); 1786 ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_POSTWRITE); 1787 1788 if (clen < rlen) 1789 krp->krp_status = E2BIG; 1790 else { 1791 if (sc->sc_flags & UBS_FLAGS_HWNORM) { 1792 bzero(krp->krp_param[krp->krp_iparams].crp_p, 1793 (krp->krp_param[krp->krp_iparams].crp_nbits 1794 + 7) / 8); 1795 bcopy(me->me_C.dma_vaddr, 1796 krp->krp_param[krp->krp_iparams].crp_p, 1797 (me->me_modbits + 7) / 8); 1798 } else 1799 ubsec_kshift_l(me->me_shiftbits, 1800 me->me_C.dma_vaddr, me->me_normbits, 1801 krp->krp_param[krp->krp_iparams].crp_p, 1802 krp->krp_param[krp->krp_iparams].crp_nbits); 1803 } 1804 1805 crypto_kdone(krp); 1806 1807 /* bzero all potentially sensitive data */ 1808 bzero(me->me_E.dma_vaddr, me->me_E.dma_size); 1809 bzero(me->me_M.dma_vaddr, me->me_M.dma_size); 1810 bzero(me->me_C.dma_vaddr, me->me_C.dma_size); 1811 bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); 1812 1813 /* Can't free here, so put us on the free list. */ 1814 SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &me->me_q, q_next); 1815 break; 1816 } 1817 case UBS_CTXOP_RSAPRIV: { 1818 struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; 1819 u_int len; 1820 1821 krp = rp->rpr_krp; 1822 ubsec_dma_sync(&rp->rpr_msgin, BUS_DMASYNC_POSTWRITE); 1823 ubsec_dma_sync(&rp->rpr_msgout, BUS_DMASYNC_POSTREAD); 1824 1825 len = (krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_nbits + 7) / 8; 1826 bcopy(rp->rpr_msgout.dma_vaddr, 1827 krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_p, len); 1828 1829 crypto_kdone(krp); 1830 1831 bzero(rp->rpr_msgin.dma_vaddr, rp->rpr_msgin.dma_size); 1832 bzero(rp->rpr_msgout.dma_vaddr, rp->rpr_msgout.dma_size); 1833 bzero(rp->rpr_q.q_ctx.dma_vaddr, rp->rpr_q.q_ctx.dma_size); 1834 1835 /* Can't free here, so put us on the free list. */ 1836 SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &rp->rpr_q, q_next); 1837 break; 1838 } 1839 default: 1840 device_printf(sc->sc_dev, "unknown ctx op: %x\n", 1841 letoh16(ctx->ctx_op)); 1842 break; 1843 } 1844 } 1845 1846 #ifndef UBSEC_NO_RNG 1847 static void 1848 ubsec_rng(void *vsc) 1849 { 1850 struct ubsec_softc *sc = vsc; 1851 struct ubsec_q2_rng *rng = &sc->sc_rng; 1852 struct ubsec_mcr *mcr; 1853 struct ubsec_ctx_rngbypass *ctx; 1854 1855 crit_enter(); 1856 if (rng->rng_used) { 1857 crit_exit(); 1858 return; 1859 } 1860 sc->sc_nqueue2++; 1861 if (sc->sc_nqueue2 >= UBS_MAX_NQUEUE) 1862 goto out; 1863 1864 mcr = (struct ubsec_mcr *)rng->rng_q.q_mcr.dma_vaddr; 1865 ctx = (struct ubsec_ctx_rngbypass *)rng->rng_q.q_ctx.dma_vaddr; 1866 1867 mcr->mcr_pkts = htole16(1); 1868 mcr->mcr_flags = 0; 1869 mcr->mcr_cmdctxp = htole32(rng->rng_q.q_ctx.dma_paddr); 1870 mcr->mcr_ipktbuf.pb_addr = mcr->mcr_ipktbuf.pb_next = 0; 1871 mcr->mcr_ipktbuf.pb_len = 0; 1872 mcr->mcr_reserved = mcr->mcr_pktlen = 0; 1873 mcr->mcr_opktbuf.pb_addr = htole32(rng->rng_buf.dma_paddr); 1874 mcr->mcr_opktbuf.pb_len = htole32(((sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ)) & 1875 UBS_PKTBUF_LEN); 1876 mcr->mcr_opktbuf.pb_next = 0; 1877 1878 ctx->rbp_len = htole16(sizeof(struct ubsec_ctx_rngbypass)); 1879 ctx->rbp_op = htole16(UBS_CTXOP_RNGBYPASS); 1880 rng->rng_q.q_type = UBS_CTXOP_RNGBYPASS; 1881 1882 ubsec_dma_sync(&rng->rng_buf, BUS_DMASYNC_PREREAD); 1883 1884 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rng->rng_q, q_next); 1885 rng->rng_used = 1; 1886 ubsec_feed2(sc); 1887 ubsecstats.hst_rng++; 1888 crit_exit(); 1889 1890 return; 1891 1892 out: 1893 /* 1894 * Something weird happened, generate our own call back. 1895 */ 1896 sc->sc_nqueue2--; 1897 crit_exit(); 1898 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); 1899 } 1900 #endif /* UBSEC_NO_RNG */ 1901 1902 static void 1903 ubsec_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1904 { 1905 bus_addr_t *paddr = (bus_addr_t*) arg; 1906 *paddr = segs->ds_addr; 1907 } 1908 1909 static int 1910 ubsec_dma_malloc( 1911 struct ubsec_softc *sc, 1912 bus_size_t size, 1913 struct ubsec_dma_alloc *dma, 1914 int mapflags 1915 ) 1916 { 1917 int r; 1918 1919 /* XXX could specify sc_dmat as parent but that just adds overhead */ 1920 r = bus_dma_tag_create(NULL, /* parent */ 1921 1, 0, /* alignment, bounds */ 1922 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1923 BUS_SPACE_MAXADDR, /* highaddr */ 1924 NULL, NULL, /* filter, filterarg */ 1925 size, /* maxsize */ 1926 1, /* nsegments */ 1927 size, /* maxsegsize */ 1928 BUS_DMA_ALLOCNOW, /* flags */ 1929 &dma->dma_tag); 1930 if (r != 0) { 1931 device_printf(sc->sc_dev, "ubsec_dma_malloc: " 1932 "bus_dma_tag_create failed; error %u\n", r); 1933 goto fail_0; 1934 } 1935 1936 r = bus_dmamap_create(dma->dma_tag, BUS_DMA_NOWAIT, &dma->dma_map); 1937 if (r != 0) { 1938 device_printf(sc->sc_dev, "ubsec_dma_malloc: " 1939 "bus_dmamap_create failed; error %u\n", r); 1940 goto fail_1; 1941 } 1942 1943 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, 1944 BUS_DMA_NOWAIT, &dma->dma_map); 1945 if (r != 0) { 1946 device_printf(sc->sc_dev, "ubsec_dma_malloc: " 1947 "bus_dmammem_alloc failed; size %ju, error %u\n", 1948 (intmax_t)size, r); 1949 goto fail_2; 1950 } 1951 1952 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, 1953 size, 1954 ubsec_dmamap_cb, 1955 &dma->dma_paddr, 1956 mapflags | BUS_DMA_NOWAIT); 1957 if (r != 0) { 1958 device_printf(sc->sc_dev, "ubsec_dma_malloc: " 1959 "bus_dmamap_load failed; error %u\n", r); 1960 goto fail_3; 1961 } 1962 1963 dma->dma_size = size; 1964 return (0); 1965 1966 fail_3: 1967 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1968 fail_2: 1969 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1970 fail_1: 1971 bus_dmamap_destroy(dma->dma_tag, dma->dma_map); 1972 bus_dma_tag_destroy(dma->dma_tag); 1973 fail_0: 1974 dma->dma_map = NULL; 1975 dma->dma_tag = NULL; 1976 return (r); 1977 } 1978 1979 static void 1980 ubsec_dma_free(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma) 1981 { 1982 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1983 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1984 bus_dmamap_destroy(dma->dma_tag, dma->dma_map); 1985 bus_dma_tag_destroy(dma->dma_tag); 1986 } 1987 1988 /* 1989 * Resets the board. Values in the regesters are left as is 1990 * from the reset (i.e. initial values are assigned elsewhere). 1991 */ 1992 static void 1993 ubsec_reset_board(struct ubsec_softc *sc) 1994 { 1995 volatile u_int32_t ctrl; 1996 1997 ctrl = READ_REG(sc, BS_CTRL); 1998 ctrl |= BS_CTRL_RESET; 1999 WRITE_REG(sc, BS_CTRL, ctrl); 2000 2001 /* 2002 * Wait aprox. 30 PCI clocks = 900 ns = 0.9 us 2003 */ 2004 DELAY(10); 2005 } 2006 2007 /* 2008 * Init Broadcom registers 2009 */ 2010 static void 2011 ubsec_init_board(struct ubsec_softc *sc) 2012 { 2013 u_int32_t ctrl; 2014 2015 ctrl = READ_REG(sc, BS_CTRL); 2016 ctrl &= ~(BS_CTRL_BE32 | BS_CTRL_BE64); 2017 ctrl |= BS_CTRL_LITTLE_ENDIAN | BS_CTRL_MCR1INT; 2018 2019 if (sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) 2020 ctrl |= BS_CTRL_MCR2INT; 2021 else 2022 ctrl &= ~BS_CTRL_MCR2INT; 2023 2024 if (sc->sc_flags & UBS_FLAGS_HWNORM) 2025 ctrl &= ~BS_CTRL_SWNORM; 2026 2027 WRITE_REG(sc, BS_CTRL, ctrl); 2028 } 2029 2030 /* 2031 * Init Broadcom PCI registers 2032 */ 2033 static void 2034 ubsec_init_pciregs(device_t dev) 2035 { 2036 #if 0 2037 u_int32_t misc; 2038 2039 misc = pci_conf_read(pc, pa->pa_tag, BS_RTY_TOUT); 2040 misc = (misc & ~(UBS_PCI_RTY_MASK << UBS_PCI_RTY_SHIFT)) 2041 | ((UBS_DEF_RTY & 0xff) << UBS_PCI_RTY_SHIFT); 2042 misc = (misc & ~(UBS_PCI_TOUT_MASK << UBS_PCI_TOUT_SHIFT)) 2043 | ((UBS_DEF_TOUT & 0xff) << UBS_PCI_TOUT_SHIFT); 2044 pci_conf_write(pc, pa->pa_tag, BS_RTY_TOUT, misc); 2045 #endif 2046 2047 /* 2048 * This will set the cache line size to 1, this will 2049 * force the BCM58xx chip just to do burst read/writes. 2050 * Cache line read/writes are to slow 2051 */ 2052 pci_write_config(dev, PCIR_CACHELNSZ, UBS_DEF_CACHELINE, 1); 2053 } 2054 2055 /* 2056 * Clean up after a chip crash. 2057 * It is assumed that the caller in splimp() 2058 */ 2059 static void 2060 ubsec_cleanchip(struct ubsec_softc *sc) 2061 { 2062 struct ubsec_q *q; 2063 2064 while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { 2065 q = SIMPLEQ_FIRST(&sc->sc_qchip); 2066 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q_next); 2067 ubsec_free_q(sc, q); 2068 } 2069 sc->sc_nqchip = 0; 2070 } 2071 2072 /* 2073 * free a ubsec_q 2074 * It is assumed that the caller is within spimp() 2075 */ 2076 static int 2077 ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q) 2078 { 2079 struct ubsec_q *q2; 2080 struct cryptop *crp; 2081 int npkts; 2082 int i; 2083 2084 npkts = q->q_nstacked_mcrs; 2085 2086 for (i = 0; i < npkts; i++) { 2087 if(q->q_stacked_mcr[i]) { 2088 q2 = q->q_stacked_mcr[i]; 2089 2090 if ((q2->q_dst_m != NULL) && (q2->q_src_m != q2->q_dst_m)) 2091 m_freem(q2->q_dst_m); 2092 2093 crp = (struct cryptop *)q2->q_crp; 2094 2095 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q2, q_next); 2096 2097 crp->crp_etype = EFAULT; 2098 crypto_done(crp); 2099 } else { 2100 break; 2101 } 2102 } 2103 2104 /* 2105 * Free header MCR 2106 */ 2107 if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) 2108 m_freem(q->q_dst_m); 2109 2110 crp = (struct cryptop *)q->q_crp; 2111 2112 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 2113 2114 crp->crp_etype = EFAULT; 2115 crypto_done(crp); 2116 return(0); 2117 } 2118 2119 /* 2120 * Routine to reset the chip and clean up. 2121 * It is assumed that the caller is in splimp() 2122 */ 2123 static void 2124 ubsec_totalreset(struct ubsec_softc *sc) 2125 { 2126 ubsec_reset_board(sc); 2127 ubsec_init_board(sc); 2128 ubsec_cleanchip(sc); 2129 } 2130 2131 static int 2132 ubsec_dmamap_aligned(struct ubsec_operand *op) 2133 { 2134 int i; 2135 2136 for (i = 0; i < op->nsegs; i++) { 2137 if (op->segs[i].ds_addr & 3) 2138 return (0); 2139 if ((i != (op->nsegs - 1)) && 2140 (op->segs[i].ds_len & 3)) 2141 return (0); 2142 } 2143 return (1); 2144 } 2145 2146 static void 2147 ubsec_kfree(struct ubsec_softc *sc, struct ubsec_q2 *q) 2148 { 2149 switch (q->q_type) { 2150 case UBS_CTXOP_MODEXP: { 2151 struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; 2152 2153 ubsec_dma_free(sc, &me->me_q.q_mcr); 2154 ubsec_dma_free(sc, &me->me_q.q_ctx); 2155 ubsec_dma_free(sc, &me->me_M); 2156 ubsec_dma_free(sc, &me->me_E); 2157 ubsec_dma_free(sc, &me->me_C); 2158 ubsec_dma_free(sc, &me->me_epb); 2159 kfree(me, M_DEVBUF); 2160 break; 2161 } 2162 case UBS_CTXOP_RSAPRIV: { 2163 struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; 2164 2165 ubsec_dma_free(sc, &rp->rpr_q.q_mcr); 2166 ubsec_dma_free(sc, &rp->rpr_q.q_ctx); 2167 ubsec_dma_free(sc, &rp->rpr_msgin); 2168 ubsec_dma_free(sc, &rp->rpr_msgout); 2169 kfree(rp, M_DEVBUF); 2170 break; 2171 } 2172 default: 2173 device_printf(sc->sc_dev, "invalid kfree 0x%x\n", q->q_type); 2174 break; 2175 } 2176 } 2177 2178 static int 2179 ubsec_kprocess(void *arg, struct cryptkop *krp, int hint) 2180 { 2181 struct ubsec_softc *sc = arg; 2182 int r; 2183 2184 if (krp == NULL || krp->krp_callback == NULL) 2185 return (EINVAL); 2186 2187 while (!SIMPLEQ_EMPTY(&sc->sc_q2free)) { 2188 struct ubsec_q2 *q; 2189 2190 q = SIMPLEQ_FIRST(&sc->sc_q2free); 2191 SIMPLEQ_REMOVE_HEAD(&sc->sc_q2free, q_next); 2192 ubsec_kfree(sc, q); 2193 } 2194 2195 switch (krp->krp_op) { 2196 case CRK_MOD_EXP: 2197 if (sc->sc_flags & UBS_FLAGS_HWNORM) 2198 r = ubsec_kprocess_modexp_hw(sc, krp, hint); 2199 else 2200 r = ubsec_kprocess_modexp_sw(sc, krp, hint); 2201 break; 2202 case CRK_MOD_EXP_CRT: 2203 return (ubsec_kprocess_rsapriv(sc, krp, hint)); 2204 default: 2205 device_printf(sc->sc_dev, "kprocess: invalid op 0x%x\n", 2206 krp->krp_op); 2207 krp->krp_status = EOPNOTSUPP; 2208 crypto_kdone(krp); 2209 return (0); 2210 } 2211 return (0); /* silence compiler */ 2212 } 2213 2214 /* 2215 * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (sw normalization) 2216 */ 2217 static int 2218 ubsec_kprocess_modexp_sw(struct ubsec_softc *sc, struct cryptkop *krp, int hint) 2219 { 2220 struct ubsec_q2_modexp *me; 2221 struct ubsec_mcr *mcr; 2222 struct ubsec_ctx_modexp *ctx; 2223 struct ubsec_pktbuf *epb; 2224 int err = 0; 2225 u_int nbits, normbits, mbits, shiftbits, ebits; 2226 2227 me = kmalloc(sizeof *me, M_DEVBUF, M_INTWAIT | M_ZERO); 2228 me->me_krp = krp; 2229 me->me_q.q_type = UBS_CTXOP_MODEXP; 2230 2231 nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); 2232 if (nbits <= 512) 2233 normbits = 512; 2234 else if (nbits <= 768) 2235 normbits = 768; 2236 else if (nbits <= 1024) 2237 normbits = 1024; 2238 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) 2239 normbits = 1536; 2240 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) 2241 normbits = 2048; 2242 else { 2243 err = E2BIG; 2244 goto errout; 2245 } 2246 2247 shiftbits = normbits - nbits; 2248 2249 me->me_modbits = nbits; 2250 me->me_shiftbits = shiftbits; 2251 me->me_normbits = normbits; 2252 2253 /* Sanity check: result bits must be >= true modulus bits. */ 2254 if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { 2255 err = ERANGE; 2256 goto errout; 2257 } 2258 2259 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2260 &me->me_q.q_mcr, 0)) { 2261 err = ENOMEM; 2262 goto errout; 2263 } 2264 mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; 2265 2266 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), 2267 &me->me_q.q_ctx, 0)) { 2268 err = ENOMEM; 2269 goto errout; 2270 } 2271 2272 mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); 2273 if (mbits > nbits) { 2274 err = E2BIG; 2275 goto errout; 2276 } 2277 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { 2278 err = ENOMEM; 2279 goto errout; 2280 } 2281 ubsec_kshift_r(shiftbits, 2282 krp->krp_param[UBS_MODEXP_PAR_M].crp_p, mbits, 2283 me->me_M.dma_vaddr, normbits); 2284 2285 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { 2286 err = ENOMEM; 2287 goto errout; 2288 } 2289 bzero(me->me_C.dma_vaddr, me->me_C.dma_size); 2290 2291 ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); 2292 if (ebits > nbits) { 2293 err = E2BIG; 2294 goto errout; 2295 } 2296 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { 2297 err = ENOMEM; 2298 goto errout; 2299 } 2300 ubsec_kshift_r(shiftbits, 2301 krp->krp_param[UBS_MODEXP_PAR_E].crp_p, ebits, 2302 me->me_E.dma_vaddr, normbits); 2303 2304 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), 2305 &me->me_epb, 0)) { 2306 err = ENOMEM; 2307 goto errout; 2308 } 2309 epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; 2310 epb->pb_addr = htole32(me->me_E.dma_paddr); 2311 epb->pb_next = 0; 2312 epb->pb_len = htole32(normbits / 8); 2313 2314 #ifdef UBSEC_DEBUG 2315 if (ubsec_debug) { 2316 kprintf("Epb "); 2317 ubsec_dump_pb(epb); 2318 } 2319 #endif 2320 2321 mcr->mcr_pkts = htole16(1); 2322 mcr->mcr_flags = 0; 2323 mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); 2324 mcr->mcr_reserved = 0; 2325 mcr->mcr_pktlen = 0; 2326 2327 mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); 2328 mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); 2329 mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); 2330 2331 mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); 2332 mcr->mcr_opktbuf.pb_next = 0; 2333 mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); 2334 2335 #ifdef DIAGNOSTIC 2336 /* Misaligned output buffer will hang the chip. */ 2337 if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) 2338 panic("%s: modexp invalid addr 0x%x\n", 2339 device_get_nameunit(sc->sc_dev), 2340 letoh32(mcr->mcr_opktbuf.pb_addr)); 2341 if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) 2342 panic("%s: modexp invalid len 0x%x\n", 2343 device_get_nameunit(sc->sc_dev), 2344 letoh32(mcr->mcr_opktbuf.pb_len)); 2345 #endif 2346 2347 ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; 2348 bzero(ctx, sizeof(*ctx)); 2349 ubsec_kshift_r(shiftbits, 2350 krp->krp_param[UBS_MODEXP_PAR_N].crp_p, nbits, 2351 ctx->me_N, normbits); 2352 ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); 2353 ctx->me_op = htole16(UBS_CTXOP_MODEXP); 2354 ctx->me_E_len = htole16(nbits); 2355 ctx->me_N_len = htole16(nbits); 2356 2357 #ifdef UBSEC_DEBUG 2358 if (ubsec_debug) { 2359 ubsec_dump_mcr(mcr); 2360 ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); 2361 } 2362 #endif 2363 2364 /* 2365 * ubsec_feed2 will sync mcr and ctx, we just need to sync 2366 * everything else. 2367 */ 2368 ubsec_dma_sync(&me->me_M, BUS_DMASYNC_PREWRITE); 2369 ubsec_dma_sync(&me->me_E, BUS_DMASYNC_PREWRITE); 2370 ubsec_dma_sync(&me->me_C, BUS_DMASYNC_PREREAD); 2371 ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_PREWRITE); 2372 2373 /* Enqueue and we're done... */ 2374 crit_enter(); 2375 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); 2376 ubsec_feed2(sc); 2377 ubsecstats.hst_modexp++; 2378 crit_exit(); 2379 2380 return (0); 2381 2382 errout: 2383 if (me != NULL) { 2384 if (me->me_q.q_mcr.dma_map != NULL) 2385 ubsec_dma_free(sc, &me->me_q.q_mcr); 2386 if (me->me_q.q_ctx.dma_map != NULL) { 2387 bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); 2388 ubsec_dma_free(sc, &me->me_q.q_ctx); 2389 } 2390 if (me->me_M.dma_map != NULL) { 2391 bzero(me->me_M.dma_vaddr, me->me_M.dma_size); 2392 ubsec_dma_free(sc, &me->me_M); 2393 } 2394 if (me->me_E.dma_map != NULL) { 2395 bzero(me->me_E.dma_vaddr, me->me_E.dma_size); 2396 ubsec_dma_free(sc, &me->me_E); 2397 } 2398 if (me->me_C.dma_map != NULL) { 2399 bzero(me->me_C.dma_vaddr, me->me_C.dma_size); 2400 ubsec_dma_free(sc, &me->me_C); 2401 } 2402 if (me->me_epb.dma_map != NULL) 2403 ubsec_dma_free(sc, &me->me_epb); 2404 kfree(me, M_DEVBUF); 2405 } 2406 krp->krp_status = err; 2407 crypto_kdone(krp); 2408 return (0); 2409 } 2410 2411 /* 2412 * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (hw normalization) 2413 */ 2414 static int 2415 ubsec_kprocess_modexp_hw(struct ubsec_softc *sc, struct cryptkop *krp, int hint) 2416 { 2417 struct ubsec_q2_modexp *me; 2418 struct ubsec_mcr *mcr; 2419 struct ubsec_ctx_modexp *ctx; 2420 struct ubsec_pktbuf *epb; 2421 int err = 0; 2422 u_int nbits, normbits, mbits, shiftbits, ebits; 2423 2424 me = kmalloc(sizeof *me, M_DEVBUF, M_INTWAIT | M_ZERO); 2425 me->me_krp = krp; 2426 me->me_q.q_type = UBS_CTXOP_MODEXP; 2427 2428 nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); 2429 if (nbits <= 512) 2430 normbits = 512; 2431 else if (nbits <= 768) 2432 normbits = 768; 2433 else if (nbits <= 1024) 2434 normbits = 1024; 2435 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) 2436 normbits = 1536; 2437 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) 2438 normbits = 2048; 2439 else { 2440 err = E2BIG; 2441 goto errout; 2442 } 2443 2444 shiftbits = normbits - nbits; 2445 2446 /* XXX ??? */ 2447 me->me_modbits = nbits; 2448 me->me_shiftbits = shiftbits; 2449 me->me_normbits = normbits; 2450 2451 /* Sanity check: result bits must be >= true modulus bits. */ 2452 if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { 2453 err = ERANGE; 2454 goto errout; 2455 } 2456 2457 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2458 &me->me_q.q_mcr, 0)) { 2459 err = ENOMEM; 2460 goto errout; 2461 } 2462 mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; 2463 2464 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), 2465 &me->me_q.q_ctx, 0)) { 2466 err = ENOMEM; 2467 goto errout; 2468 } 2469 2470 mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); 2471 if (mbits > nbits) { 2472 err = E2BIG; 2473 goto errout; 2474 } 2475 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { 2476 err = ENOMEM; 2477 goto errout; 2478 } 2479 bzero(me->me_M.dma_vaddr, normbits / 8); 2480 bcopy(krp->krp_param[UBS_MODEXP_PAR_M].crp_p, 2481 me->me_M.dma_vaddr, (mbits + 7) / 8); 2482 2483 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { 2484 err = ENOMEM; 2485 goto errout; 2486 } 2487 bzero(me->me_C.dma_vaddr, me->me_C.dma_size); 2488 2489 ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); 2490 if (ebits > nbits) { 2491 err = E2BIG; 2492 goto errout; 2493 } 2494 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { 2495 err = ENOMEM; 2496 goto errout; 2497 } 2498 bzero(me->me_E.dma_vaddr, normbits / 8); 2499 bcopy(krp->krp_param[UBS_MODEXP_PAR_E].crp_p, 2500 me->me_E.dma_vaddr, (ebits + 7) / 8); 2501 2502 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), 2503 &me->me_epb, 0)) { 2504 err = ENOMEM; 2505 goto errout; 2506 } 2507 epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; 2508 epb->pb_addr = htole32(me->me_E.dma_paddr); 2509 epb->pb_next = 0; 2510 epb->pb_len = htole32((ebits + 7) / 8); 2511 2512 #ifdef UBSEC_DEBUG 2513 if (ubsec_debug) { 2514 kprintf("Epb "); 2515 ubsec_dump_pb(epb); 2516 } 2517 #endif 2518 2519 mcr->mcr_pkts = htole16(1); 2520 mcr->mcr_flags = 0; 2521 mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); 2522 mcr->mcr_reserved = 0; 2523 mcr->mcr_pktlen = 0; 2524 2525 mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); 2526 mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); 2527 mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); 2528 2529 mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); 2530 mcr->mcr_opktbuf.pb_next = 0; 2531 mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); 2532 2533 #ifdef DIAGNOSTIC 2534 /* Misaligned output buffer will hang the chip. */ 2535 if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) 2536 panic("%s: modexp invalid addr 0x%x\n", 2537 device_get_nameunit(sc->sc_dev), 2538 letoh32(mcr->mcr_opktbuf.pb_addr)); 2539 if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) 2540 panic("%s: modexp invalid len 0x%x\n", 2541 device_get_nameunit(sc->sc_dev), 2542 letoh32(mcr->mcr_opktbuf.pb_len)); 2543 #endif 2544 2545 ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; 2546 bzero(ctx, sizeof(*ctx)); 2547 bcopy(krp->krp_param[UBS_MODEXP_PAR_N].crp_p, ctx->me_N, 2548 (nbits + 7) / 8); 2549 ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); 2550 ctx->me_op = htole16(UBS_CTXOP_MODEXP); 2551 ctx->me_E_len = htole16(ebits); 2552 ctx->me_N_len = htole16(nbits); 2553 2554 #ifdef UBSEC_DEBUG 2555 if (ubsec_debug) { 2556 ubsec_dump_mcr(mcr); 2557 ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); 2558 } 2559 #endif 2560 2561 /* 2562 * ubsec_feed2 will sync mcr and ctx, we just need to sync 2563 * everything else. 2564 */ 2565 ubsec_dma_sync(&me->me_M, BUS_DMASYNC_PREWRITE); 2566 ubsec_dma_sync(&me->me_E, BUS_DMASYNC_PREWRITE); 2567 ubsec_dma_sync(&me->me_C, BUS_DMASYNC_PREREAD); 2568 ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_PREWRITE); 2569 2570 /* Enqueue and we're done... */ 2571 crit_enter(); 2572 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); 2573 ubsec_feed2(sc); 2574 crit_exit(); 2575 2576 return (0); 2577 2578 errout: 2579 if (me != NULL) { 2580 if (me->me_q.q_mcr.dma_map != NULL) 2581 ubsec_dma_free(sc, &me->me_q.q_mcr); 2582 if (me->me_q.q_ctx.dma_map != NULL) { 2583 bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); 2584 ubsec_dma_free(sc, &me->me_q.q_ctx); 2585 } 2586 if (me->me_M.dma_map != NULL) { 2587 bzero(me->me_M.dma_vaddr, me->me_M.dma_size); 2588 ubsec_dma_free(sc, &me->me_M); 2589 } 2590 if (me->me_E.dma_map != NULL) { 2591 bzero(me->me_E.dma_vaddr, me->me_E.dma_size); 2592 ubsec_dma_free(sc, &me->me_E); 2593 } 2594 if (me->me_C.dma_map != NULL) { 2595 bzero(me->me_C.dma_vaddr, me->me_C.dma_size); 2596 ubsec_dma_free(sc, &me->me_C); 2597 } 2598 if (me->me_epb.dma_map != NULL) 2599 ubsec_dma_free(sc, &me->me_epb); 2600 kfree(me, M_DEVBUF); 2601 } 2602 krp->krp_status = err; 2603 crypto_kdone(krp); 2604 return (0); 2605 } 2606 2607 static int 2608 ubsec_kprocess_rsapriv(struct ubsec_softc *sc, struct cryptkop *krp, int hint) 2609 { 2610 struct ubsec_q2_rsapriv *rp = NULL; 2611 struct ubsec_mcr *mcr; 2612 struct ubsec_ctx_rsapriv *ctx; 2613 int err = 0; 2614 u_int padlen, msglen; 2615 2616 msglen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_P]); 2617 padlen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_Q]); 2618 if (msglen > padlen) 2619 padlen = msglen; 2620 2621 if (padlen <= 256) 2622 padlen = 256; 2623 else if (padlen <= 384) 2624 padlen = 384; 2625 else if (padlen <= 512) 2626 padlen = 512; 2627 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 768) 2628 padlen = 768; 2629 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 1024) 2630 padlen = 1024; 2631 else { 2632 err = E2BIG; 2633 goto errout; 2634 } 2635 2636 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DP]) > padlen) { 2637 err = E2BIG; 2638 goto errout; 2639 } 2640 2641 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DQ]) > padlen) { 2642 err = E2BIG; 2643 goto errout; 2644 } 2645 2646 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_PINV]) > padlen) { 2647 err = E2BIG; 2648 goto errout; 2649 } 2650 2651 rp = kmalloc(sizeof *rp, M_DEVBUF, M_INTWAIT | M_ZERO); 2652 rp->rpr_krp = krp; 2653 rp->rpr_q.q_type = UBS_CTXOP_RSAPRIV; 2654 2655 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2656 &rp->rpr_q.q_mcr, 0)) { 2657 err = ENOMEM; 2658 goto errout; 2659 } 2660 mcr = (struct ubsec_mcr *)rp->rpr_q.q_mcr.dma_vaddr; 2661 2662 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rsapriv), 2663 &rp->rpr_q.q_ctx, 0)) { 2664 err = ENOMEM; 2665 goto errout; 2666 } 2667 ctx = (struct ubsec_ctx_rsapriv *)rp->rpr_q.q_ctx.dma_vaddr; 2668 bzero(ctx, sizeof *ctx); 2669 2670 /* Copy in p */ 2671 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_P].crp_p, 2672 &ctx->rpr_buf[0 * (padlen / 8)], 2673 (krp->krp_param[UBS_RSAPRIV_PAR_P].crp_nbits + 7) / 8); 2674 2675 /* Copy in q */ 2676 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_p, 2677 &ctx->rpr_buf[1 * (padlen / 8)], 2678 (krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_nbits + 7) / 8); 2679 2680 /* Copy in dp */ 2681 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_p, 2682 &ctx->rpr_buf[2 * (padlen / 8)], 2683 (krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_nbits + 7) / 8); 2684 2685 /* Copy in dq */ 2686 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_p, 2687 &ctx->rpr_buf[3 * (padlen / 8)], 2688 (krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_nbits + 7) / 8); 2689 2690 /* Copy in pinv */ 2691 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_p, 2692 &ctx->rpr_buf[4 * (padlen / 8)], 2693 (krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_nbits + 7) / 8); 2694 2695 msglen = padlen * 2; 2696 2697 /* Copy in input message (aligned buffer/length). */ 2698 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGIN]) > msglen) { 2699 /* Is this likely? */ 2700 err = E2BIG; 2701 goto errout; 2702 } 2703 if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgin, 0)) { 2704 err = ENOMEM; 2705 goto errout; 2706 } 2707 bzero(rp->rpr_msgin.dma_vaddr, (msglen + 7) / 8); 2708 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_p, 2709 rp->rpr_msgin.dma_vaddr, 2710 (krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_nbits + 7) / 8); 2711 2712 /* Prepare space for output message (aligned buffer/length). */ 2713 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT]) < msglen) { 2714 /* Is this likely? */ 2715 err = E2BIG; 2716 goto errout; 2717 } 2718 if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgout, 0)) { 2719 err = ENOMEM; 2720 goto errout; 2721 } 2722 bzero(rp->rpr_msgout.dma_vaddr, (msglen + 7) / 8); 2723 2724 mcr->mcr_pkts = htole16(1); 2725 mcr->mcr_flags = 0; 2726 mcr->mcr_cmdctxp = htole32(rp->rpr_q.q_ctx.dma_paddr); 2727 mcr->mcr_ipktbuf.pb_addr = htole32(rp->rpr_msgin.dma_paddr); 2728 mcr->mcr_ipktbuf.pb_next = 0; 2729 mcr->mcr_ipktbuf.pb_len = htole32(rp->rpr_msgin.dma_size); 2730 mcr->mcr_reserved = 0; 2731 mcr->mcr_pktlen = htole16(msglen); 2732 mcr->mcr_opktbuf.pb_addr = htole32(rp->rpr_msgout.dma_paddr); 2733 mcr->mcr_opktbuf.pb_next = 0; 2734 mcr->mcr_opktbuf.pb_len = htole32(rp->rpr_msgout.dma_size); 2735 2736 #ifdef DIAGNOSTIC 2737 if (rp->rpr_msgin.dma_paddr & 3 || rp->rpr_msgin.dma_size & 3) { 2738 panic("%s: rsapriv: invalid msgin %x(0x%x)", 2739 device_get_nameunit(sc->sc_dev), 2740 rp->rpr_msgin.dma_paddr, rp->rpr_msgin.dma_size); 2741 } 2742 if (rp->rpr_msgout.dma_paddr & 3 || rp->rpr_msgout.dma_size & 3) { 2743 panic("%s: rsapriv: invalid msgout %x(0x%x)", 2744 device_get_nameunit(sc->sc_dev), 2745 rp->rpr_msgout.dma_paddr, rp->rpr_msgout.dma_size); 2746 } 2747 #endif 2748 2749 ctx->rpr_len = (sizeof(u_int16_t) * 4) + (5 * (padlen / 8)); 2750 ctx->rpr_op = htole16(UBS_CTXOP_RSAPRIV); 2751 ctx->rpr_q_len = htole16(padlen); 2752 ctx->rpr_p_len = htole16(padlen); 2753 2754 /* 2755 * ubsec_feed2 will sync mcr and ctx, we just need to sync 2756 * everything else. 2757 */ 2758 ubsec_dma_sync(&rp->rpr_msgin, BUS_DMASYNC_PREWRITE); 2759 ubsec_dma_sync(&rp->rpr_msgout, BUS_DMASYNC_PREREAD); 2760 2761 /* Enqueue and we're done... */ 2762 crit_enter(); 2763 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rp->rpr_q, q_next); 2764 ubsec_feed2(sc); 2765 ubsecstats.hst_modexpcrt++; 2766 crit_exit(); 2767 return (0); 2768 2769 errout: 2770 if (rp != NULL) { 2771 if (rp->rpr_q.q_mcr.dma_map != NULL) 2772 ubsec_dma_free(sc, &rp->rpr_q.q_mcr); 2773 if (rp->rpr_msgin.dma_map != NULL) { 2774 bzero(rp->rpr_msgin.dma_vaddr, rp->rpr_msgin.dma_size); 2775 ubsec_dma_free(sc, &rp->rpr_msgin); 2776 } 2777 if (rp->rpr_msgout.dma_map != NULL) { 2778 bzero(rp->rpr_msgout.dma_vaddr, rp->rpr_msgout.dma_size); 2779 ubsec_dma_free(sc, &rp->rpr_msgout); 2780 } 2781 kfree(rp, M_DEVBUF); 2782 } 2783 krp->krp_status = err; 2784 crypto_kdone(krp); 2785 return (0); 2786 } 2787 2788 #ifdef UBSEC_DEBUG 2789 static void 2790 ubsec_dump_pb(volatile struct ubsec_pktbuf *pb) 2791 { 2792 kprintf("addr 0x%x (0x%x) next 0x%x\n", 2793 pb->pb_addr, pb->pb_len, pb->pb_next); 2794 } 2795 2796 static void 2797 ubsec_dump_ctx2(struct ubsec_ctx_keyop *c) 2798 { 2799 kprintf("CTX (0x%x):\n", c->ctx_len); 2800 switch (letoh16(c->ctx_op)) { 2801 case UBS_CTXOP_RNGBYPASS: 2802 case UBS_CTXOP_RNGSHA1: 2803 break; 2804 case UBS_CTXOP_MODEXP: 2805 { 2806 struct ubsec_ctx_modexp *cx = (void *)c; 2807 int i, len; 2808 2809 kprintf(" Elen %u, Nlen %u\n", 2810 letoh16(cx->me_E_len), letoh16(cx->me_N_len)); 2811 len = (cx->me_N_len + 7)/8; 2812 for (i = 0; i < len; i++) 2813 kprintf("%s%02x", (i == 0) ? " N: " : ":", cx->me_N[i]); 2814 kprintf("\n"); 2815 break; 2816 } 2817 default: 2818 kprintf("unknown context: %x\n", c->ctx_op); 2819 } 2820 kprintf("END CTX\n"); 2821 } 2822 2823 static void 2824 ubsec_dump_mcr(struct ubsec_mcr *mcr) 2825 { 2826 volatile struct ubsec_mcr_add *ma; 2827 int i; 2828 2829 kprintf("MCR:\n"); 2830 kprintf(" pkts: %u, flags 0x%x\n", 2831 letoh16(mcr->mcr_pkts), letoh16(mcr->mcr_flags)); 2832 ma = (volatile struct ubsec_mcr_add *)&mcr->mcr_cmdctxp; 2833 for (i = 0; i < letoh16(mcr->mcr_pkts); i++) { 2834 kprintf(" %d: ctx 0x%x len 0x%x rsvd 0x%x\n", i, 2835 letoh32(ma->mcr_cmdctxp), letoh16(ma->mcr_pktlen), 2836 letoh16(ma->mcr_reserved)); 2837 kprintf(" %d: ipkt ", i); 2838 ubsec_dump_pb(&ma->mcr_ipktbuf); 2839 kprintf(" %d: opkt ", i); 2840 ubsec_dump_pb(&ma->mcr_opktbuf); 2841 ma++; 2842 } 2843 kprintf("END MCR\n"); 2844 } 2845 #endif /* UBSEC_DEBUG */ 2846 2847 /* 2848 * Return the number of significant bits of a big number. 2849 */ 2850 static int 2851 ubsec_ksigbits(struct crparam *cr) 2852 { 2853 u_int plen = (cr->crp_nbits + 7) / 8; 2854 int i, sig = plen * 8; 2855 u_int8_t c, *p = cr->crp_p; 2856 2857 for (i = plen - 1; i >= 0; i--) { 2858 c = p[i]; 2859 if (c != 0) { 2860 while ((c & 0x80) == 0) { 2861 sig--; 2862 c <<= 1; 2863 } 2864 break; 2865 } 2866 sig -= 8; 2867 } 2868 return (sig); 2869 } 2870 2871 static void 2872 ubsec_kshift_r( 2873 u_int shiftbits, 2874 u_int8_t *src, u_int srcbits, 2875 u_int8_t *dst, u_int dstbits) 2876 { 2877 u_int slen, dlen; 2878 int i, si, di, n; 2879 2880 slen = (srcbits + 7) / 8; 2881 dlen = (dstbits + 7) / 8; 2882 2883 for (i = 0; i < slen; i++) 2884 dst[i] = src[i]; 2885 for (i = 0; i < dlen - slen; i++) 2886 dst[slen + i] = 0; 2887 2888 n = shiftbits / 8; 2889 if (n != 0) { 2890 si = dlen - n - 1; 2891 di = dlen - 1; 2892 while (si >= 0) 2893 dst[di--] = dst[si--]; 2894 while (di >= 0) 2895 dst[di--] = 0; 2896 } 2897 2898 n = shiftbits % 8; 2899 if (n != 0) { 2900 for (i = dlen - 1; i > 0; i--) 2901 dst[i] = (dst[i] << n) | 2902 (dst[i - 1] >> (8 - n)); 2903 dst[0] = dst[0] << n; 2904 } 2905 } 2906 2907 static void 2908 ubsec_kshift_l( 2909 u_int shiftbits, 2910 u_int8_t *src, u_int srcbits, 2911 u_int8_t *dst, u_int dstbits) 2912 { 2913 int slen, dlen, i, n; 2914 2915 slen = (srcbits + 7) / 8; 2916 dlen = (dstbits + 7) / 8; 2917 2918 n = shiftbits / 8; 2919 for (i = 0; i < slen; i++) 2920 dst[i] = src[i + n]; 2921 for (i = 0; i < dlen - slen; i++) 2922 dst[slen + i] = 0; 2923 2924 n = shiftbits % 8; 2925 if (n != 0) { 2926 for (i = 0; i < (dlen - 1); i++) 2927 dst[i] = (dst[i] >> n) | (dst[i + 1] << (8 - n)); 2928 dst[dlen - 1] = dst[dlen - 1] >> n; 2929 } 2930 } 2931