1 /* $FreeBSD: src/sys/dev/ubsec/ubsec.c,v 1.6.2.12 2003/06/04 17:56:59 sam Exp $ */ 2 /* $DragonFly: src/sys/dev/crypto/ubsec/ubsec.c,v 1.7 2005/05/24 20:58:59 dillon Exp $ */ 3 /* $OpenBSD: ubsec.c,v 1.115 2002/09/24 18:33:26 jason Exp $ */ 4 5 /* 6 * Copyright (c) 2000 Jason L. Wright (jason@thought.net) 7 * Copyright (c) 2000 Theo de Raadt (deraadt@openbsd.org) 8 * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com) 9 * 10 * All rights reserved. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by Jason L. Wright 23 * 4. The name of the author may not be used to endorse or promote products 24 * derived from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 27 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 28 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 29 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 30 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 31 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 34 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 35 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 * 38 * Effort sponsored in part by the Defense Advanced Research Projects 39 * Agency (DARPA) and Air Force Research Laboratory, Air Force 40 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 41 * 42 */ 43 44 /* 45 * uBsec 5[56]01, 58xx hardware crypto accelerator 46 */ 47 48 #include "opt_ubsec.h" 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/proc.h> 53 #include <sys/errno.h> 54 #include <sys/malloc.h> 55 #include <sys/kernel.h> 56 #include <sys/mbuf.h> 57 #include <sys/sysctl.h> 58 #include <sys/endian.h> 59 60 #include <vm/vm.h> 61 #include <vm/pmap.h> 62 63 #include <machine/clock.h> 64 #include <machine/bus.h> 65 #include <machine/resource.h> 66 #include <sys/bus.h> 67 #include <sys/rman.h> 68 69 #include <crypto/sha1.h> 70 #include <opencrypto/cryptodev.h> 71 #include <opencrypto/cryptosoft.h> 72 #include <sys/md5.h> 73 #include <sys/random.h> 74 75 #include <bus/pci/pcivar.h> 76 #include <bus/pci/pcireg.h> 77 78 /* grr, #defines for gratuitous incompatibility in queue.h */ 79 #define SIMPLEQ_HEAD STAILQ_HEAD 80 #define SIMPLEQ_ENTRY STAILQ_ENTRY 81 #define SIMPLEQ_INIT STAILQ_INIT 82 #define SIMPLEQ_INSERT_TAIL STAILQ_INSERT_TAIL 83 #define SIMPLEQ_EMPTY STAILQ_EMPTY 84 #define SIMPLEQ_FIRST STAILQ_FIRST 85 #define SIMPLEQ_REMOVE_HEAD STAILQ_REMOVE_HEAD_UNTIL 86 #define SIMPLEQ_FOREACH STAILQ_FOREACH 87 /* ditto for endian.h */ 88 #define letoh16(x) le16toh(x) 89 #define letoh32(x) le32toh(x) 90 91 #ifdef UBSEC_RNDTEST 92 #include "../rndtest/rndtest.h" 93 #endif 94 #include "ubsecreg.h" 95 #include "ubsecvar.h" 96 97 /* 98 * Prototypes and count for the pci_device structure 99 */ 100 static int ubsec_probe(device_t); 101 static int ubsec_attach(device_t); 102 static int ubsec_detach(device_t); 103 static int ubsec_suspend(device_t); 104 static int ubsec_resume(device_t); 105 static void ubsec_shutdown(device_t); 106 107 static device_method_t ubsec_methods[] = { 108 /* Device interface */ 109 DEVMETHOD(device_probe, ubsec_probe), 110 DEVMETHOD(device_attach, ubsec_attach), 111 DEVMETHOD(device_detach, ubsec_detach), 112 DEVMETHOD(device_suspend, ubsec_suspend), 113 DEVMETHOD(device_resume, ubsec_resume), 114 DEVMETHOD(device_shutdown, ubsec_shutdown), 115 116 /* bus interface */ 117 DEVMETHOD(bus_print_child, bus_generic_print_child), 118 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 119 120 { 0, 0 } 121 }; 122 static driver_t ubsec_driver = { 123 "ubsec", 124 ubsec_methods, 125 sizeof (struct ubsec_softc) 126 }; 127 static devclass_t ubsec_devclass; 128 129 DECLARE_DUMMY_MODULE(ubsec); 130 DRIVER_MODULE(ubsec, pci, ubsec_driver, ubsec_devclass, 0, 0); 131 MODULE_DEPEND(ubsec, crypto, 1, 1, 1); 132 #ifdef UBSEC_RNDTEST 133 MODULE_DEPEND(ubsec, rndtest, 1, 1, 1); 134 #endif 135 136 static void ubsec_intr(void *); 137 static int ubsec_newsession(void *, u_int32_t *, struct cryptoini *); 138 static int ubsec_freesession(void *, u_int64_t); 139 static int ubsec_process(void *, struct cryptop *, int); 140 static void ubsec_callback(struct ubsec_softc *, struct ubsec_q *); 141 static void ubsec_feed(struct ubsec_softc *); 142 static void ubsec_mcopy(struct mbuf *, struct mbuf *, int, int); 143 static void ubsec_callback2(struct ubsec_softc *, struct ubsec_q2 *); 144 static int ubsec_feed2(struct ubsec_softc *); 145 static void ubsec_rng(void *); 146 static int ubsec_dma_malloc(struct ubsec_softc *, bus_size_t, 147 struct ubsec_dma_alloc *, int); 148 #define ubsec_dma_sync(_dma, _flags) \ 149 bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags)) 150 static void ubsec_dma_free(struct ubsec_softc *, struct ubsec_dma_alloc *); 151 static int ubsec_dmamap_aligned(struct ubsec_operand *op); 152 153 static void ubsec_reset_board(struct ubsec_softc *sc); 154 static void ubsec_init_board(struct ubsec_softc *sc); 155 static void ubsec_init_pciregs(device_t dev); 156 static void ubsec_totalreset(struct ubsec_softc *sc); 157 158 static int ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q); 159 160 static int ubsec_kprocess(void*, struct cryptkop *, int); 161 static int ubsec_kprocess_modexp_hw(struct ubsec_softc *, struct cryptkop *, int); 162 static int ubsec_kprocess_modexp_sw(struct ubsec_softc *, struct cryptkop *, int); 163 static int ubsec_kprocess_rsapriv(struct ubsec_softc *, struct cryptkop *, int); 164 static void ubsec_kfree(struct ubsec_softc *, struct ubsec_q2 *); 165 static int ubsec_ksigbits(struct crparam *); 166 static void ubsec_kshift_r(u_int, u_int8_t *, u_int, u_int8_t *, u_int); 167 static void ubsec_kshift_l(u_int, u_int8_t *, u_int, u_int8_t *, u_int); 168 169 SYSCTL_NODE(_hw, OID_AUTO, ubsec, CTLFLAG_RD, 0, "Broadcom driver parameters"); 170 171 #ifdef UBSEC_DEBUG 172 static void ubsec_dump_pb(volatile struct ubsec_pktbuf *); 173 static void ubsec_dump_mcr(struct ubsec_mcr *); 174 static void ubsec_dump_ctx2(struct ubsec_ctx_keyop *); 175 176 static int ubsec_debug = 0; 177 SYSCTL_INT(_hw_ubsec, OID_AUTO, debug, CTLFLAG_RW, &ubsec_debug, 178 0, "control debugging msgs"); 179 #endif 180 181 #define READ_REG(sc,r) \ 182 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) 183 184 #define WRITE_REG(sc,reg,val) \ 185 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) 186 187 #define SWAP32(x) (x) = htole32(ntohl((x))) 188 #define HTOLE32(x) (x) = htole32(x) 189 190 191 struct ubsec_stats ubsecstats; 192 SYSCTL_STRUCT(_hw_ubsec, OID_AUTO, stats, CTLFLAG_RD, &ubsecstats, 193 ubsec_stats, "driver statistics"); 194 195 static int 196 ubsec_probe(device_t dev) 197 { 198 if (pci_get_vendor(dev) == PCI_VENDOR_SUN && 199 (pci_get_device(dev) == PCI_PRODUCT_SUN_5821 || 200 pci_get_device(dev) == PCI_PRODUCT_SUN_SCA1K)) 201 return (0); 202 if (pci_get_vendor(dev) == PCI_VENDOR_BLUESTEEL && 203 (pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5501 || 204 pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5601)) 205 return (0); 206 if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && 207 (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5801 || 208 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5802 || 209 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5805 || 210 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5820 || 211 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5821 || 212 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5822 || 213 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5823 214 )) 215 return (0); 216 return (ENXIO); 217 } 218 219 static const char* 220 ubsec_partname(struct ubsec_softc *sc) 221 { 222 /* XXX sprintf numbers when not decoded */ 223 switch (pci_get_vendor(sc->sc_dev)) { 224 case PCI_VENDOR_BROADCOM: 225 switch (pci_get_device(sc->sc_dev)) { 226 case PCI_PRODUCT_BROADCOM_5801: return "Broadcom 5801"; 227 case PCI_PRODUCT_BROADCOM_5802: return "Broadcom 5802"; 228 case PCI_PRODUCT_BROADCOM_5805: return "Broadcom 5805"; 229 case PCI_PRODUCT_BROADCOM_5820: return "Broadcom 5820"; 230 case PCI_PRODUCT_BROADCOM_5821: return "Broadcom 5821"; 231 case PCI_PRODUCT_BROADCOM_5822: return "Broadcom 5822"; 232 case PCI_PRODUCT_BROADCOM_5823: return "Broadcom 5823"; 233 } 234 return "Broadcom unknown-part"; 235 case PCI_VENDOR_BLUESTEEL: 236 switch (pci_get_device(sc->sc_dev)) { 237 case PCI_PRODUCT_BLUESTEEL_5601: return "Bluesteel 5601"; 238 } 239 return "Bluesteel unknown-part"; 240 case PCI_VENDOR_SUN: 241 switch (pci_get_device(sc->sc_dev)) { 242 case PCI_PRODUCT_SUN_5821: return "Sun Crypto 5821"; 243 case PCI_PRODUCT_SUN_SCA1K: return "Sun Crypto 1K"; 244 } 245 return "Sun unknown-part"; 246 } 247 return "Unknown-vendor unknown-part"; 248 } 249 250 static void 251 default_harvest(struct rndtest_state *rsp, void *buf, u_int count) 252 { 253 u_int32_t *p = (u_int32_t *)buf; 254 for (count /= sizeof (u_int32_t); count; count--) 255 add_true_randomness(*p++); 256 } 257 258 static int 259 ubsec_attach(device_t dev) 260 { 261 struct ubsec_softc *sc = device_get_softc(dev); 262 struct ubsec_dma *dmap; 263 u_int32_t cmd, i; 264 int rid; 265 266 KASSERT(sc != NULL, ("ubsec_attach: null software carrier!")); 267 bzero(sc, sizeof (*sc)); 268 sc->sc_dev = dev; 269 270 SIMPLEQ_INIT(&sc->sc_queue); 271 SIMPLEQ_INIT(&sc->sc_qchip); 272 SIMPLEQ_INIT(&sc->sc_queue2); 273 SIMPLEQ_INIT(&sc->sc_qchip2); 274 SIMPLEQ_INIT(&sc->sc_q2free); 275 276 /* XXX handle power management */ 277 278 sc->sc_statmask = BS_STAT_MCR1_DONE | BS_STAT_DMAERR; 279 280 if (pci_get_vendor(dev) == PCI_VENDOR_BLUESTEEL && 281 pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5601) 282 sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG; 283 284 if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && 285 (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5802 || 286 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5805)) 287 sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG; 288 289 if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && 290 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5820) 291 sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG | 292 UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY; 293 294 if ((pci_get_vendor(dev) == PCI_VENDOR_BROADCOM && 295 (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5821 || 296 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5822 || 297 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5823)) || 298 (pci_get_vendor(dev) == PCI_VENDOR_SUN && 299 (pci_get_device(dev) == PCI_PRODUCT_SUN_SCA1K || 300 pci_get_device(dev) == PCI_PRODUCT_SUN_5821))) { 301 /* NB: the 5821/5822 defines some additional status bits */ 302 sc->sc_statmask |= BS_STAT_MCR1_ALLEMPTY | 303 BS_STAT_MCR2_ALLEMPTY; 304 sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG | 305 UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY; 306 } 307 308 cmd = pci_read_config(dev, PCIR_COMMAND, 4); 309 cmd |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN; 310 pci_write_config(dev, PCIR_COMMAND, cmd, 4); 311 cmd = pci_read_config(dev, PCIR_COMMAND, 4); 312 313 if (!(cmd & PCIM_CMD_MEMEN)) { 314 device_printf(dev, "failed to enable memory mapping\n"); 315 goto bad; 316 } 317 318 if (!(cmd & PCIM_CMD_BUSMASTEREN)) { 319 device_printf(dev, "failed to enable bus mastering\n"); 320 goto bad; 321 } 322 323 /* 324 * Setup memory-mapping of PCI registers. 325 */ 326 rid = BS_BAR; 327 sc->sc_sr = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 328 0, ~0, 1, RF_ACTIVE); 329 if (sc->sc_sr == NULL) { 330 device_printf(dev, "cannot map register space\n"); 331 goto bad; 332 } 333 sc->sc_st = rman_get_bustag(sc->sc_sr); 334 sc->sc_sh = rman_get_bushandle(sc->sc_sr); 335 336 /* 337 * Arrange interrupt line. 338 */ 339 rid = 0; 340 sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 341 0, ~0, 1, RF_SHAREABLE|RF_ACTIVE); 342 if (sc->sc_irq == NULL) { 343 device_printf(dev, "could not map interrupt\n"); 344 goto bad1; 345 } 346 /* 347 * NB: Network code assumes we are blocked with splimp() 348 * so make sure the IRQ is mapped appropriately. 349 */ 350 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET, 351 ubsec_intr, sc, 352 &sc->sc_ih, NULL)) { 353 device_printf(dev, "could not establish interrupt\n"); 354 goto bad2; 355 } 356 357 sc->sc_cid = crypto_get_driverid(0); 358 if (sc->sc_cid < 0) { 359 device_printf(dev, "could not get crypto driver id\n"); 360 goto bad3; 361 } 362 363 /* 364 * Setup DMA descriptor area. 365 */ 366 if (bus_dma_tag_create(NULL, /* parent */ 367 1, 0, /* alignment, bounds */ 368 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 369 BUS_SPACE_MAXADDR, /* highaddr */ 370 NULL, NULL, /* filter, filterarg */ 371 0x3ffff, /* maxsize */ 372 UBS_MAX_SCATTER, /* nsegments */ 373 0xffff, /* maxsegsize */ 374 BUS_DMA_ALLOCNOW, /* flags */ 375 &sc->sc_dmat)) { 376 device_printf(dev, "cannot allocate DMA tag\n"); 377 goto bad4; 378 } 379 SIMPLEQ_INIT(&sc->sc_freequeue); 380 dmap = sc->sc_dmaa; 381 for (i = 0; i < UBS_MAX_NQUEUE; i++, dmap++) { 382 struct ubsec_q *q; 383 384 q = malloc(sizeof(struct ubsec_q), M_DEVBUF, M_WAITOK); 385 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_dmachunk), 386 &dmap->d_alloc, 0)) { 387 device_printf(dev, "cannot allocate dma buffers\n"); 388 free(q, M_DEVBUF); 389 break; 390 } 391 dmap->d_dma = (struct ubsec_dmachunk *)dmap->d_alloc.dma_vaddr; 392 393 q->q_dma = dmap; 394 sc->sc_queuea[i] = q; 395 396 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 397 } 398 399 device_printf(sc->sc_dev, "%s\n", ubsec_partname(sc)); 400 401 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, 402 ubsec_newsession, ubsec_freesession, ubsec_process, sc); 403 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, 404 ubsec_newsession, ubsec_freesession, ubsec_process, sc); 405 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0, 406 ubsec_newsession, ubsec_freesession, ubsec_process, sc); 407 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0, 408 ubsec_newsession, ubsec_freesession, ubsec_process, sc); 409 410 /* 411 * Reset Broadcom chip 412 */ 413 ubsec_reset_board(sc); 414 415 /* 416 * Init Broadcom specific PCI settings 417 */ 418 ubsec_init_pciregs(dev); 419 420 /* 421 * Init Broadcom chip 422 */ 423 ubsec_init_board(sc); 424 425 #ifndef UBSEC_NO_RNG 426 if (sc->sc_flags & UBS_FLAGS_RNG) { 427 sc->sc_statmask |= BS_STAT_MCR2_DONE; 428 #ifdef UBSEC_RNDTEST 429 sc->sc_rndtest = rndtest_attach(dev); 430 if (sc->sc_rndtest) 431 sc->sc_harvest = rndtest_harvest; 432 else 433 sc->sc_harvest = default_harvest; 434 #else 435 sc->sc_harvest = default_harvest; 436 #endif 437 438 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 439 &sc->sc_rng.rng_q.q_mcr, 0)) 440 goto skip_rng; 441 442 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rngbypass), 443 &sc->sc_rng.rng_q.q_ctx, 0)) { 444 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); 445 goto skip_rng; 446 } 447 448 if (ubsec_dma_malloc(sc, sizeof(u_int32_t) * 449 UBSEC_RNG_BUFSIZ, &sc->sc_rng.rng_buf, 0)) { 450 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); 451 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); 452 goto skip_rng; 453 } 454 455 if (hz >= 100) 456 sc->sc_rnghz = hz / 100; 457 else 458 sc->sc_rnghz = 1; 459 callout_init(&sc->sc_rngto); 460 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); 461 skip_rng: 462 ; 463 } 464 #endif /* UBSEC_NO_RNG */ 465 466 if (sc->sc_flags & UBS_FLAGS_KEY) { 467 sc->sc_statmask |= BS_STAT_MCR2_DONE; 468 469 crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0, 470 ubsec_kprocess, sc); 471 #if 0 472 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0, 473 ubsec_kprocess, sc); 474 #endif 475 } 476 return (0); 477 bad4: 478 crypto_unregister_all(sc->sc_cid); 479 bad3: 480 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 481 bad2: 482 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 483 bad1: 484 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); 485 bad: 486 return (ENXIO); 487 } 488 489 /* 490 * Detach a device that successfully probed. 491 */ 492 static int 493 ubsec_detach(device_t dev) 494 { 495 struct ubsec_softc *sc = device_get_softc(dev); 496 int s; 497 498 KASSERT(sc != NULL, ("ubsec_detach: null software carrier")); 499 500 /* XXX wait/abort active ops */ 501 502 s = splimp(); 503 504 callout_stop(&sc->sc_rngto); 505 506 crypto_unregister_all(sc->sc_cid); 507 508 #ifdef UBSEC_RNDTEST 509 if (sc->sc_rndtest) 510 rndtest_detach(sc->sc_rndtest); 511 #endif 512 513 while (!SIMPLEQ_EMPTY(&sc->sc_freequeue)) { 514 struct ubsec_q *q; 515 516 q = SIMPLEQ_FIRST(&sc->sc_freequeue); 517 SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q, q_next); 518 ubsec_dma_free(sc, &q->q_dma->d_alloc); 519 free(q, M_DEVBUF); 520 } 521 #ifndef UBSEC_NO_RNG 522 if (sc->sc_flags & UBS_FLAGS_RNG) { 523 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); 524 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); 525 ubsec_dma_free(sc, &sc->sc_rng.rng_buf); 526 } 527 #endif /* UBSEC_NO_RNG */ 528 529 bus_generic_detach(dev); 530 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 531 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 532 533 bus_dma_tag_destroy(sc->sc_dmat); 534 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); 535 536 splx(s); 537 538 return (0); 539 } 540 541 /* 542 * Stop all chip i/o so that the kernel's probe routines don't 543 * get confused by errant DMAs when rebooting. 544 */ 545 static void 546 ubsec_shutdown(device_t dev) 547 { 548 #ifdef notyet 549 ubsec_stop(device_get_softc(dev)); 550 #endif 551 } 552 553 /* 554 * Device suspend routine. 555 */ 556 static int 557 ubsec_suspend(device_t dev) 558 { 559 struct ubsec_softc *sc = device_get_softc(dev); 560 561 KASSERT(sc != NULL, ("ubsec_suspend: null software carrier")); 562 #ifdef notyet 563 /* XXX stop the device and save PCI settings */ 564 #endif 565 sc->sc_suspended = 1; 566 567 return (0); 568 } 569 570 static int 571 ubsec_resume(device_t dev) 572 { 573 struct ubsec_softc *sc = device_get_softc(dev); 574 575 KASSERT(sc != NULL, ("ubsec_resume: null software carrier")); 576 #ifdef notyet 577 /* XXX retore PCI settings and start the device */ 578 #endif 579 sc->sc_suspended = 0; 580 return (0); 581 } 582 583 /* 584 * UBSEC Interrupt routine 585 */ 586 static void 587 ubsec_intr(void *arg) 588 { 589 struct ubsec_softc *sc = arg; 590 volatile u_int32_t stat; 591 struct ubsec_q *q; 592 struct ubsec_dma *dmap; 593 int npkts = 0, i; 594 595 stat = READ_REG(sc, BS_STAT); 596 stat &= sc->sc_statmask; 597 if (stat == 0) { 598 return; 599 } 600 601 WRITE_REG(sc, BS_STAT, stat); /* IACK */ 602 603 /* 604 * Check to see if we have any packets waiting for us 605 */ 606 if ((stat & BS_STAT_MCR1_DONE)) { 607 while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { 608 q = SIMPLEQ_FIRST(&sc->sc_qchip); 609 dmap = q->q_dma; 610 611 if ((dmap->d_dma->d_mcr.mcr_flags & htole16(UBS_MCR_DONE)) == 0) 612 break; 613 614 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q, q_next); 615 616 npkts = q->q_nstacked_mcrs; 617 sc->sc_nqchip -= 1+npkts; 618 /* 619 * search for further sc_qchip ubsec_q's that share 620 * the same MCR, and complete them too, they must be 621 * at the top. 622 */ 623 for (i = 0; i < npkts; i++) { 624 if(q->q_stacked_mcr[i]) { 625 ubsec_callback(sc, q->q_stacked_mcr[i]); 626 } else { 627 break; 628 } 629 } 630 ubsec_callback(sc, q); 631 } 632 633 /* 634 * Don't send any more packet to chip if there has been 635 * a DMAERR. 636 */ 637 if (!(stat & BS_STAT_DMAERR)) 638 ubsec_feed(sc); 639 } 640 641 /* 642 * Check to see if we have any key setups/rng's waiting for us 643 */ 644 if ((sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) && 645 (stat & BS_STAT_MCR2_DONE)) { 646 struct ubsec_q2 *q2; 647 struct ubsec_mcr *mcr; 648 649 while (!SIMPLEQ_EMPTY(&sc->sc_qchip2)) { 650 q2 = SIMPLEQ_FIRST(&sc->sc_qchip2); 651 652 ubsec_dma_sync(&q2->q_mcr, 653 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 654 655 mcr = (struct ubsec_mcr *)q2->q_mcr.dma_vaddr; 656 if ((mcr->mcr_flags & htole16(UBS_MCR_DONE)) == 0) { 657 ubsec_dma_sync(&q2->q_mcr, 658 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 659 break; 660 } 661 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip2, q2, q_next); 662 ubsec_callback2(sc, q2); 663 /* 664 * Don't send any more packet to chip if there has been 665 * a DMAERR. 666 */ 667 if (!(stat & BS_STAT_DMAERR)) 668 ubsec_feed2(sc); 669 } 670 } 671 672 /* 673 * Check to see if we got any DMA Error 674 */ 675 if (stat & BS_STAT_DMAERR) { 676 #ifdef UBSEC_DEBUG 677 if (ubsec_debug) { 678 volatile u_int32_t a = READ_REG(sc, BS_ERR); 679 680 printf("dmaerr %s@%08x\n", 681 (a & BS_ERR_READ) ? "read" : "write", 682 a & BS_ERR_ADDR); 683 } 684 #endif /* UBSEC_DEBUG */ 685 ubsecstats.hst_dmaerr++; 686 ubsec_totalreset(sc); 687 ubsec_feed(sc); 688 } 689 690 if (sc->sc_needwakeup) { /* XXX check high watermark */ 691 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); 692 #ifdef UBSEC_DEBUG 693 if (ubsec_debug) 694 device_printf(sc->sc_dev, "wakeup crypto (%x)\n", 695 sc->sc_needwakeup); 696 #endif /* UBSEC_DEBUG */ 697 sc->sc_needwakeup &= ~wakeup; 698 crypto_unblock(sc->sc_cid, wakeup); 699 } 700 } 701 702 /* 703 * ubsec_feed() - aggregate and post requests to chip 704 */ 705 static void 706 ubsec_feed(struct ubsec_softc *sc) 707 { 708 struct ubsec_q *q, *q2; 709 int npkts, i; 710 void *v; 711 u_int32_t stat; 712 713 /* 714 * Decide how many ops to combine in a single MCR. We cannot 715 * aggregate more than UBS_MAX_AGGR because this is the number 716 * of slots defined in the data structure. Note that 717 * aggregation only happens if ops are marked batch'able. 718 * Aggregating ops reduces the number of interrupts to the host 719 * but also (potentially) increases the latency for processing 720 * completed ops as we only get an interrupt when all aggregated 721 * ops have completed. 722 */ 723 if (sc->sc_nqueue == 0) 724 return; 725 if (sc->sc_nqueue > 1) { 726 npkts = 0; 727 SIMPLEQ_FOREACH(q, &sc->sc_queue, q_next) { 728 npkts++; 729 if ((q->q_crp->crp_flags & CRYPTO_F_BATCH) == 0) 730 break; 731 } 732 } else 733 npkts = 1; 734 /* 735 * Check device status before going any further. 736 */ 737 if ((stat = READ_REG(sc, BS_STAT)) & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) { 738 if (stat & BS_STAT_DMAERR) { 739 ubsec_totalreset(sc); 740 ubsecstats.hst_dmaerr++; 741 } else 742 ubsecstats.hst_mcr1full++; 743 return; 744 } 745 if (sc->sc_nqueue > ubsecstats.hst_maxqueue) 746 ubsecstats.hst_maxqueue = sc->sc_nqueue; 747 if (npkts > UBS_MAX_AGGR) 748 npkts = UBS_MAX_AGGR; 749 if (npkts < 2) /* special case 1 op */ 750 goto feed1; 751 752 ubsecstats.hst_totbatch += npkts-1; 753 #ifdef UBSEC_DEBUG 754 if (ubsec_debug) 755 printf("merging %d records\n", npkts); 756 #endif /* UBSEC_DEBUG */ 757 758 q = SIMPLEQ_FIRST(&sc->sc_queue); 759 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q, q_next); 760 --sc->sc_nqueue; 761 762 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_PREWRITE); 763 if (q->q_dst_map != NULL) 764 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, BUS_DMASYNC_PREREAD); 765 766 q->q_nstacked_mcrs = npkts - 1; /* Number of packets stacked */ 767 768 for (i = 0; i < q->q_nstacked_mcrs; i++) { 769 q2 = SIMPLEQ_FIRST(&sc->sc_queue); 770 bus_dmamap_sync(sc->sc_dmat, q2->q_src_map, 771 BUS_DMASYNC_PREWRITE); 772 if (q2->q_dst_map != NULL) 773 bus_dmamap_sync(sc->sc_dmat, q2->q_dst_map, 774 BUS_DMASYNC_PREREAD); 775 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q2, q_next); 776 --sc->sc_nqueue; 777 778 v = (void*)(((char *)&q2->q_dma->d_dma->d_mcr) + sizeof(struct ubsec_mcr) - 779 sizeof(struct ubsec_mcr_add)); 780 bcopy(v, &q->q_dma->d_dma->d_mcradd[i], sizeof(struct ubsec_mcr_add)); 781 q->q_stacked_mcr[i] = q2; 782 } 783 q->q_dma->d_dma->d_mcr.mcr_pkts = htole16(npkts); 784 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); 785 sc->sc_nqchip += npkts; 786 if (sc->sc_nqchip > ubsecstats.hst_maxqchip) 787 ubsecstats.hst_maxqchip = sc->sc_nqchip; 788 ubsec_dma_sync(&q->q_dma->d_alloc, 789 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 790 WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + 791 offsetof(struct ubsec_dmachunk, d_mcr)); 792 return; 793 794 feed1: 795 q = SIMPLEQ_FIRST(&sc->sc_queue); 796 797 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_PREWRITE); 798 if (q->q_dst_map != NULL) 799 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, BUS_DMASYNC_PREREAD); 800 ubsec_dma_sync(&q->q_dma->d_alloc, 801 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 802 803 WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + 804 offsetof(struct ubsec_dmachunk, d_mcr)); 805 #ifdef UBSEC_DEBUG 806 if (ubsec_debug) 807 printf("feed1: q->chip %p %08x stat %08x\n", 808 q, (u_int32_t)vtophys(&q->q_dma->d_dma->d_mcr), 809 stat); 810 #endif /* UBSEC_DEBUG */ 811 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q, q_next); 812 --sc->sc_nqueue; 813 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); 814 sc->sc_nqchip++; 815 if (sc->sc_nqchip > ubsecstats.hst_maxqchip) 816 ubsecstats.hst_maxqchip = sc->sc_nqchip; 817 return; 818 } 819 820 /* 821 * Allocate a new 'session' and return an encoded session id. 'sidp' 822 * contains our registration id, and should contain an encoded session 823 * id on successful allocation. 824 */ 825 static int 826 ubsec_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri) 827 { 828 struct cryptoini *c, *encini = NULL, *macini = NULL; 829 struct ubsec_softc *sc = arg; 830 struct ubsec_session *ses = NULL; 831 MD5_CTX md5ctx; 832 SHA1_CTX sha1ctx; 833 int i, sesn; 834 835 KASSERT(sc != NULL, ("ubsec_newsession: null softc")); 836 if (sidp == NULL || cri == NULL || sc == NULL) 837 return (EINVAL); 838 839 for (c = cri; c != NULL; c = c->cri_next) { 840 if (c->cri_alg == CRYPTO_MD5_HMAC || 841 c->cri_alg == CRYPTO_SHA1_HMAC) { 842 if (macini) 843 return (EINVAL); 844 macini = c; 845 } else if (c->cri_alg == CRYPTO_DES_CBC || 846 c->cri_alg == CRYPTO_3DES_CBC) { 847 if (encini) 848 return (EINVAL); 849 encini = c; 850 } else 851 return (EINVAL); 852 } 853 if (encini == NULL && macini == NULL) 854 return (EINVAL); 855 856 if (sc->sc_sessions == NULL) { 857 ses = sc->sc_sessions = malloc(sizeof(struct ubsec_session), 858 M_DEVBUF, M_INTWAIT); 859 sesn = 0; 860 sc->sc_nsessions = 1; 861 } else { 862 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { 863 if (sc->sc_sessions[sesn].ses_used == 0) { 864 ses = &sc->sc_sessions[sesn]; 865 break; 866 } 867 } 868 869 if (ses == NULL) { 870 sesn = sc->sc_nsessions; 871 ses = malloc((sesn + 1) * sizeof(struct ubsec_session), 872 M_DEVBUF, M_INTWAIT); 873 bcopy(sc->sc_sessions, ses, sesn * 874 sizeof(struct ubsec_session)); 875 bzero(sc->sc_sessions, sesn * 876 sizeof(struct ubsec_session)); 877 free(sc->sc_sessions, M_DEVBUF); 878 sc->sc_sessions = ses; 879 ses = &sc->sc_sessions[sesn]; 880 sc->sc_nsessions++; 881 } 882 } 883 884 bzero(ses, sizeof(struct ubsec_session)); 885 ses->ses_used = 1; 886 if (encini) { 887 /* get an IV, network byte order */ 888 /* XXX may read fewer than requested */ 889 read_random(ses->ses_iv, sizeof(ses->ses_iv)); 890 891 /* Go ahead and compute key in ubsec's byte order */ 892 if (encini->cri_alg == CRYPTO_DES_CBC) { 893 bcopy(encini->cri_key, &ses->ses_deskey[0], 8); 894 bcopy(encini->cri_key, &ses->ses_deskey[2], 8); 895 bcopy(encini->cri_key, &ses->ses_deskey[4], 8); 896 } else 897 bcopy(encini->cri_key, ses->ses_deskey, 24); 898 899 SWAP32(ses->ses_deskey[0]); 900 SWAP32(ses->ses_deskey[1]); 901 SWAP32(ses->ses_deskey[2]); 902 SWAP32(ses->ses_deskey[3]); 903 SWAP32(ses->ses_deskey[4]); 904 SWAP32(ses->ses_deskey[5]); 905 } 906 907 if (macini) { 908 for (i = 0; i < macini->cri_klen / 8; i++) 909 macini->cri_key[i] ^= HMAC_IPAD_VAL; 910 911 if (macini->cri_alg == CRYPTO_MD5_HMAC) { 912 MD5Init(&md5ctx); 913 MD5Update(&md5ctx, macini->cri_key, 914 macini->cri_klen / 8); 915 MD5Update(&md5ctx, hmac_ipad_buffer, 916 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 917 bcopy(md5ctx.state, ses->ses_hminner, 918 sizeof(md5ctx.state)); 919 } else { 920 SHA1Init(&sha1ctx); 921 SHA1Update(&sha1ctx, macini->cri_key, 922 macini->cri_klen / 8); 923 SHA1Update(&sha1ctx, hmac_ipad_buffer, 924 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 925 bcopy(sha1ctx.h.b32, ses->ses_hminner, 926 sizeof(sha1ctx.h.b32)); 927 } 928 929 for (i = 0; i < macini->cri_klen / 8; i++) 930 macini->cri_key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 931 932 if (macini->cri_alg == CRYPTO_MD5_HMAC) { 933 MD5Init(&md5ctx); 934 MD5Update(&md5ctx, macini->cri_key, 935 macini->cri_klen / 8); 936 MD5Update(&md5ctx, hmac_opad_buffer, 937 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 938 bcopy(md5ctx.state, ses->ses_hmouter, 939 sizeof(md5ctx.state)); 940 } else { 941 SHA1Init(&sha1ctx); 942 SHA1Update(&sha1ctx, macini->cri_key, 943 macini->cri_klen / 8); 944 SHA1Update(&sha1ctx, hmac_opad_buffer, 945 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 946 bcopy(sha1ctx.h.b32, ses->ses_hmouter, 947 sizeof(sha1ctx.h.b32)); 948 } 949 950 for (i = 0; i < macini->cri_klen / 8; i++) 951 macini->cri_key[i] ^= HMAC_OPAD_VAL; 952 } 953 954 *sidp = UBSEC_SID(device_get_unit(sc->sc_dev), sesn); 955 return (0); 956 } 957 958 /* 959 * Deallocate a session. 960 */ 961 static int 962 ubsec_freesession(void *arg, u_int64_t tid) 963 { 964 struct ubsec_softc *sc = arg; 965 int session; 966 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; 967 968 KASSERT(sc != NULL, ("ubsec_freesession: null softc")); 969 if (sc == NULL) 970 return (EINVAL); 971 972 session = UBSEC_SESSION(sid); 973 if (session >= sc->sc_nsessions) 974 return (EINVAL); 975 976 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session])); 977 return (0); 978 } 979 980 static void 981 ubsec_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) 982 { 983 struct ubsec_operand *op = arg; 984 985 KASSERT(nsegs <= UBS_MAX_SCATTER, 986 ("Too many DMA segments returned when mapping operand")); 987 #ifdef UBSEC_DEBUG 988 if (ubsec_debug) 989 printf("ubsec_op_cb: mapsize %u nsegs %d\n", 990 (u_int) mapsize, nsegs); 991 #endif 992 op->mapsize = mapsize; 993 op->nsegs = nsegs; 994 bcopy(seg, op->segs, nsegs * sizeof (seg[0])); 995 } 996 997 static int 998 ubsec_process(void *arg, struct cryptop *crp, int hint) 999 { 1000 struct ubsec_q *q = NULL; 1001 int err = 0, i, j, s, nicealign; 1002 struct ubsec_softc *sc = arg; 1003 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; 1004 int encoffset = 0, macoffset = 0, cpskip, cpoffset; 1005 int sskip, dskip, stheend, dtheend; 1006 int16_t coffset; 1007 struct ubsec_session *ses; 1008 struct ubsec_pktctx ctx; 1009 struct ubsec_dma *dmap = NULL; 1010 1011 if (crp == NULL || crp->crp_callback == NULL || sc == NULL) { 1012 ubsecstats.hst_invalid++; 1013 return (EINVAL); 1014 } 1015 if (UBSEC_SESSION(crp->crp_sid) >= sc->sc_nsessions) { 1016 ubsecstats.hst_badsession++; 1017 return (EINVAL); 1018 } 1019 1020 s = splimp(); 1021 1022 if (SIMPLEQ_EMPTY(&sc->sc_freequeue)) { 1023 ubsecstats.hst_queuefull++; 1024 sc->sc_needwakeup |= CRYPTO_SYMQ; 1025 splx(s); 1026 return (ERESTART); 1027 } 1028 q = SIMPLEQ_FIRST(&sc->sc_freequeue); 1029 SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q, q_next); 1030 splx(s); 1031 1032 dmap = q->q_dma; /* Save dma pointer */ 1033 bzero(q, sizeof(struct ubsec_q)); 1034 bzero(&ctx, sizeof(ctx)); 1035 1036 q->q_sesn = UBSEC_SESSION(crp->crp_sid); 1037 q->q_dma = dmap; 1038 ses = &sc->sc_sessions[q->q_sesn]; 1039 1040 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1041 q->q_src_m = (struct mbuf *)crp->crp_buf; 1042 q->q_dst_m = (struct mbuf *)crp->crp_buf; 1043 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1044 q->q_src_io = (struct uio *)crp->crp_buf; 1045 q->q_dst_io = (struct uio *)crp->crp_buf; 1046 } else { 1047 ubsecstats.hst_badflags++; 1048 err = EINVAL; 1049 goto errout; /* XXX we don't handle contiguous blocks! */ 1050 } 1051 1052 bzero(&dmap->d_dma->d_mcr, sizeof(struct ubsec_mcr)); 1053 1054 dmap->d_dma->d_mcr.mcr_pkts = htole16(1); 1055 dmap->d_dma->d_mcr.mcr_flags = 0; 1056 q->q_crp = crp; 1057 1058 crd1 = crp->crp_desc; 1059 if (crd1 == NULL) { 1060 ubsecstats.hst_nodesc++; 1061 err = EINVAL; 1062 goto errout; 1063 } 1064 crd2 = crd1->crd_next; 1065 1066 if (crd2 == NULL) { 1067 if (crd1->crd_alg == CRYPTO_MD5_HMAC || 1068 crd1->crd_alg == CRYPTO_SHA1_HMAC) { 1069 maccrd = crd1; 1070 enccrd = NULL; 1071 } else if (crd1->crd_alg == CRYPTO_DES_CBC || 1072 crd1->crd_alg == CRYPTO_3DES_CBC) { 1073 maccrd = NULL; 1074 enccrd = crd1; 1075 } else { 1076 ubsecstats.hst_badalg++; 1077 err = EINVAL; 1078 goto errout; 1079 } 1080 } else { 1081 if ((crd1->crd_alg == CRYPTO_MD5_HMAC || 1082 crd1->crd_alg == CRYPTO_SHA1_HMAC) && 1083 (crd2->crd_alg == CRYPTO_DES_CBC || 1084 crd2->crd_alg == CRYPTO_3DES_CBC) && 1085 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { 1086 maccrd = crd1; 1087 enccrd = crd2; 1088 } else if ((crd1->crd_alg == CRYPTO_DES_CBC || 1089 crd1->crd_alg == CRYPTO_3DES_CBC) && 1090 (crd2->crd_alg == CRYPTO_MD5_HMAC || 1091 crd2->crd_alg == CRYPTO_SHA1_HMAC) && 1092 (crd1->crd_flags & CRD_F_ENCRYPT)) { 1093 enccrd = crd1; 1094 maccrd = crd2; 1095 } else { 1096 /* 1097 * We cannot order the ubsec as requested 1098 */ 1099 ubsecstats.hst_badalg++; 1100 err = EINVAL; 1101 goto errout; 1102 } 1103 } 1104 1105 if (enccrd) { 1106 encoffset = enccrd->crd_skip; 1107 ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_3DES); 1108 1109 if (enccrd->crd_flags & CRD_F_ENCRYPT) { 1110 q->q_flags |= UBSEC_QFLAGS_COPYOUTIV; 1111 1112 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 1113 bcopy(enccrd->crd_iv, ctx.pc_iv, 8); 1114 else { 1115 ctx.pc_iv[0] = ses->ses_iv[0]; 1116 ctx.pc_iv[1] = ses->ses_iv[1]; 1117 } 1118 1119 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { 1120 if (crp->crp_flags & CRYPTO_F_IMBUF) 1121 m_copyback(q->q_src_m, 1122 enccrd->crd_inject, 1123 8, (caddr_t)ctx.pc_iv); 1124 else if (crp->crp_flags & CRYPTO_F_IOV) 1125 cuio_copyback(q->q_src_io, 1126 enccrd->crd_inject, 1127 8, (caddr_t)ctx.pc_iv); 1128 } 1129 } else { 1130 ctx.pc_flags |= htole16(UBS_PKTCTX_INBOUND); 1131 1132 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 1133 bcopy(enccrd->crd_iv, ctx.pc_iv, 8); 1134 else if (crp->crp_flags & CRYPTO_F_IMBUF) 1135 m_copydata(q->q_src_m, enccrd->crd_inject, 1136 8, (caddr_t)ctx.pc_iv); 1137 else if (crp->crp_flags & CRYPTO_F_IOV) 1138 cuio_copydata(q->q_src_io, 1139 enccrd->crd_inject, 8, 1140 (caddr_t)ctx.pc_iv); 1141 } 1142 1143 ctx.pc_deskey[0] = ses->ses_deskey[0]; 1144 ctx.pc_deskey[1] = ses->ses_deskey[1]; 1145 ctx.pc_deskey[2] = ses->ses_deskey[2]; 1146 ctx.pc_deskey[3] = ses->ses_deskey[3]; 1147 ctx.pc_deskey[4] = ses->ses_deskey[4]; 1148 ctx.pc_deskey[5] = ses->ses_deskey[5]; 1149 SWAP32(ctx.pc_iv[0]); 1150 SWAP32(ctx.pc_iv[1]); 1151 } 1152 1153 if (maccrd) { 1154 macoffset = maccrd->crd_skip; 1155 1156 if (maccrd->crd_alg == CRYPTO_MD5_HMAC) 1157 ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_MD5); 1158 else 1159 ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_SHA1); 1160 1161 for (i = 0; i < 5; i++) { 1162 ctx.pc_hminner[i] = ses->ses_hminner[i]; 1163 ctx.pc_hmouter[i] = ses->ses_hmouter[i]; 1164 1165 HTOLE32(ctx.pc_hminner[i]); 1166 HTOLE32(ctx.pc_hmouter[i]); 1167 } 1168 } 1169 1170 if (enccrd && maccrd) { 1171 /* 1172 * ubsec cannot handle packets where the end of encryption 1173 * and authentication are not the same, or where the 1174 * encrypted part begins before the authenticated part. 1175 */ 1176 if ((encoffset + enccrd->crd_len) != 1177 (macoffset + maccrd->crd_len)) { 1178 ubsecstats.hst_lenmismatch++; 1179 err = EINVAL; 1180 goto errout; 1181 } 1182 if (enccrd->crd_skip < maccrd->crd_skip) { 1183 ubsecstats.hst_skipmismatch++; 1184 err = EINVAL; 1185 goto errout; 1186 } 1187 sskip = maccrd->crd_skip; 1188 cpskip = dskip = enccrd->crd_skip; 1189 stheend = maccrd->crd_len; 1190 dtheend = enccrd->crd_len; 1191 coffset = enccrd->crd_skip - maccrd->crd_skip; 1192 cpoffset = cpskip + dtheend; 1193 #ifdef UBSEC_DEBUG 1194 if (ubsec_debug) { 1195 printf("mac: skip %d, len %d, inject %d\n", 1196 maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject); 1197 printf("enc: skip %d, len %d, inject %d\n", 1198 enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject); 1199 printf("src: skip %d, len %d\n", sskip, stheend); 1200 printf("dst: skip %d, len %d\n", dskip, dtheend); 1201 printf("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n", 1202 coffset, stheend, cpskip, cpoffset); 1203 } 1204 #endif 1205 } else { 1206 cpskip = dskip = sskip = macoffset + encoffset; 1207 dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len; 1208 cpoffset = cpskip + dtheend; 1209 coffset = 0; 1210 } 1211 ctx.pc_offset = htole16(coffset >> 2); 1212 1213 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &q->q_src_map)) { 1214 ubsecstats.hst_nomap++; 1215 err = ENOMEM; 1216 goto errout; 1217 } 1218 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1219 if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map, 1220 q->q_src_m, ubsec_op_cb, &q->q_src, BUS_DMA_NOWAIT) != 0) { 1221 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1222 q->q_src_map = NULL; 1223 ubsecstats.hst_noload++; 1224 err = ENOMEM; 1225 goto errout; 1226 } 1227 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1228 if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map, 1229 q->q_src_io, ubsec_op_cb, &q->q_src, BUS_DMA_NOWAIT) != 0) { 1230 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1231 q->q_src_map = NULL; 1232 ubsecstats.hst_noload++; 1233 err = ENOMEM; 1234 goto errout; 1235 } 1236 } 1237 nicealign = ubsec_dmamap_aligned(&q->q_src); 1238 1239 dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend); 1240 1241 #ifdef UBSEC_DEBUG 1242 if (ubsec_debug) 1243 printf("src skip: %d nicealign: %u\n", sskip, nicealign); 1244 #endif 1245 for (i = j = 0; i < q->q_src_nsegs; i++) { 1246 struct ubsec_pktbuf *pb; 1247 bus_size_t packl = q->q_src_segs[i].ds_len; 1248 bus_addr_t packp = q->q_src_segs[i].ds_addr; 1249 1250 if (sskip >= packl) { 1251 sskip -= packl; 1252 continue; 1253 } 1254 1255 packl -= sskip; 1256 packp += sskip; 1257 sskip = 0; 1258 1259 if (packl > 0xfffc) { 1260 err = EIO; 1261 goto errout; 1262 } 1263 1264 if (j == 0) 1265 pb = &dmap->d_dma->d_mcr.mcr_ipktbuf; 1266 else 1267 pb = &dmap->d_dma->d_sbuf[j - 1]; 1268 1269 pb->pb_addr = htole32(packp); 1270 1271 if (stheend) { 1272 if (packl > stheend) { 1273 pb->pb_len = htole32(stheend); 1274 stheend = 0; 1275 } else { 1276 pb->pb_len = htole32(packl); 1277 stheend -= packl; 1278 } 1279 } else 1280 pb->pb_len = htole32(packl); 1281 1282 if ((i + 1) == q->q_src_nsegs) 1283 pb->pb_next = 0; 1284 else 1285 pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1286 offsetof(struct ubsec_dmachunk, d_sbuf[j])); 1287 j++; 1288 } 1289 1290 if (enccrd == NULL && maccrd != NULL) { 1291 dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0; 1292 dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0; 1293 dmap->d_dma->d_mcr.mcr_opktbuf.pb_next = htole32(dmap->d_alloc.dma_paddr + 1294 offsetof(struct ubsec_dmachunk, d_macbuf[0])); 1295 #ifdef UBSEC_DEBUG 1296 if (ubsec_debug) 1297 printf("opkt: %x %x %x\n", 1298 dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr, 1299 dmap->d_dma->d_mcr.mcr_opktbuf.pb_len, 1300 dmap->d_dma->d_mcr.mcr_opktbuf.pb_next); 1301 #endif 1302 } else { 1303 if (crp->crp_flags & CRYPTO_F_IOV) { 1304 if (!nicealign) { 1305 ubsecstats.hst_iovmisaligned++; 1306 err = EINVAL; 1307 goto errout; 1308 } 1309 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, 1310 &q->q_dst_map)) { 1311 ubsecstats.hst_nomap++; 1312 err = ENOMEM; 1313 goto errout; 1314 } 1315 if (bus_dmamap_load_uio(sc->sc_dmat, q->q_dst_map, 1316 q->q_dst_io, ubsec_op_cb, &q->q_dst, BUS_DMA_NOWAIT) != 0) { 1317 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); 1318 q->q_dst_map = NULL; 1319 ubsecstats.hst_noload++; 1320 err = ENOMEM; 1321 goto errout; 1322 } 1323 } else if (crp->crp_flags & CRYPTO_F_IMBUF) { 1324 if (nicealign) { 1325 q->q_dst = q->q_src; 1326 } else { 1327 int totlen, len; 1328 struct mbuf *m, *top, **mp; 1329 1330 ubsecstats.hst_unaligned++; 1331 totlen = q->q_src_mapsize; 1332 if (q->q_src_m->m_flags & M_PKTHDR) { 1333 len = MHLEN; 1334 MGETHDR(m, MB_DONTWAIT, MT_DATA); 1335 if (m && !m_dup_pkthdr(m, q->q_src_m, MB_DONTWAIT)) { 1336 m_free(m); 1337 m = NULL; 1338 } 1339 } else { 1340 len = MLEN; 1341 MGET(m, MB_DONTWAIT, MT_DATA); 1342 } 1343 if (m == NULL) { 1344 ubsecstats.hst_nombuf++; 1345 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1346 goto errout; 1347 } 1348 if (totlen >= MINCLSIZE) { 1349 MCLGET(m, MB_DONTWAIT); 1350 if ((m->m_flags & M_EXT) == 0) { 1351 m_free(m); 1352 ubsecstats.hst_nomcl++; 1353 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1354 goto errout; 1355 } 1356 len = MCLBYTES; 1357 } 1358 m->m_len = len; 1359 top = NULL; 1360 mp = ⊤ 1361 1362 while (totlen > 0) { 1363 if (top) { 1364 MGET(m, MB_DONTWAIT, MT_DATA); 1365 if (m == NULL) { 1366 m_freem(top); 1367 ubsecstats.hst_nombuf++; 1368 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1369 goto errout; 1370 } 1371 len = MLEN; 1372 } 1373 if (top && totlen >= MINCLSIZE) { 1374 MCLGET(m, MB_DONTWAIT); 1375 if ((m->m_flags & M_EXT) == 0) { 1376 *mp = m; 1377 m_freem(top); 1378 ubsecstats.hst_nomcl++; 1379 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1380 goto errout; 1381 } 1382 len = MCLBYTES; 1383 } 1384 m->m_len = len = min(totlen, len); 1385 totlen -= len; 1386 *mp = m; 1387 mp = &m->m_next; 1388 } 1389 q->q_dst_m = top; 1390 ubsec_mcopy(q->q_src_m, q->q_dst_m, 1391 cpskip, cpoffset); 1392 if (bus_dmamap_create(sc->sc_dmat, 1393 BUS_DMA_NOWAIT, &q->q_dst_map) != 0) { 1394 ubsecstats.hst_nomap++; 1395 err = ENOMEM; 1396 goto errout; 1397 } 1398 if (bus_dmamap_load_mbuf(sc->sc_dmat, 1399 q->q_dst_map, q->q_dst_m, 1400 ubsec_op_cb, &q->q_dst, 1401 BUS_DMA_NOWAIT) != 0) { 1402 bus_dmamap_destroy(sc->sc_dmat, 1403 q->q_dst_map); 1404 q->q_dst_map = NULL; 1405 ubsecstats.hst_noload++; 1406 err = ENOMEM; 1407 goto errout; 1408 } 1409 } 1410 } else { 1411 ubsecstats.hst_badflags++; 1412 err = EINVAL; 1413 goto errout; 1414 } 1415 1416 #ifdef UBSEC_DEBUG 1417 if (ubsec_debug) 1418 printf("dst skip: %d\n", dskip); 1419 #endif 1420 for (i = j = 0; i < q->q_dst_nsegs; i++) { 1421 struct ubsec_pktbuf *pb; 1422 bus_size_t packl = q->q_dst_segs[i].ds_len; 1423 bus_addr_t packp = q->q_dst_segs[i].ds_addr; 1424 1425 if (dskip >= packl) { 1426 dskip -= packl; 1427 continue; 1428 } 1429 1430 packl -= dskip; 1431 packp += dskip; 1432 dskip = 0; 1433 1434 if (packl > 0xfffc) { 1435 err = EIO; 1436 goto errout; 1437 } 1438 1439 if (j == 0) 1440 pb = &dmap->d_dma->d_mcr.mcr_opktbuf; 1441 else 1442 pb = &dmap->d_dma->d_dbuf[j - 1]; 1443 1444 pb->pb_addr = htole32(packp); 1445 1446 if (dtheend) { 1447 if (packl > dtheend) { 1448 pb->pb_len = htole32(dtheend); 1449 dtheend = 0; 1450 } else { 1451 pb->pb_len = htole32(packl); 1452 dtheend -= packl; 1453 } 1454 } else 1455 pb->pb_len = htole32(packl); 1456 1457 if ((i + 1) == q->q_dst_nsegs) { 1458 if (maccrd) 1459 pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1460 offsetof(struct ubsec_dmachunk, d_macbuf[0])); 1461 else 1462 pb->pb_next = 0; 1463 } else 1464 pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1465 offsetof(struct ubsec_dmachunk, d_dbuf[j])); 1466 j++; 1467 } 1468 } 1469 1470 dmap->d_dma->d_mcr.mcr_cmdctxp = htole32(dmap->d_alloc.dma_paddr + 1471 offsetof(struct ubsec_dmachunk, d_ctx)); 1472 1473 if (sc->sc_flags & UBS_FLAGS_LONGCTX) { 1474 struct ubsec_pktctx_long *ctxl; 1475 1476 ctxl = (struct ubsec_pktctx_long *)(dmap->d_alloc.dma_vaddr + 1477 offsetof(struct ubsec_dmachunk, d_ctx)); 1478 1479 /* transform small context into long context */ 1480 ctxl->pc_len = htole16(sizeof(struct ubsec_pktctx_long)); 1481 ctxl->pc_type = htole16(UBS_PKTCTX_TYPE_IPSEC); 1482 ctxl->pc_flags = ctx.pc_flags; 1483 ctxl->pc_offset = ctx.pc_offset; 1484 for (i = 0; i < 6; i++) 1485 ctxl->pc_deskey[i] = ctx.pc_deskey[i]; 1486 for (i = 0; i < 5; i++) 1487 ctxl->pc_hminner[i] = ctx.pc_hminner[i]; 1488 for (i = 0; i < 5; i++) 1489 ctxl->pc_hmouter[i] = ctx.pc_hmouter[i]; 1490 ctxl->pc_iv[0] = ctx.pc_iv[0]; 1491 ctxl->pc_iv[1] = ctx.pc_iv[1]; 1492 } else 1493 bcopy(&ctx, dmap->d_alloc.dma_vaddr + 1494 offsetof(struct ubsec_dmachunk, d_ctx), 1495 sizeof(struct ubsec_pktctx)); 1496 1497 s = splimp(); 1498 SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next); 1499 sc->sc_nqueue++; 1500 ubsecstats.hst_ipackets++; 1501 ubsecstats.hst_ibytes += dmap->d_alloc.dma_size; 1502 if ((hint & CRYPTO_HINT_MORE) == 0 || sc->sc_nqueue >= UBS_MAX_AGGR) 1503 ubsec_feed(sc); 1504 splx(s); 1505 return (0); 1506 1507 errout: 1508 if (q != NULL) { 1509 if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) 1510 m_freem(q->q_dst_m); 1511 1512 if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { 1513 bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); 1514 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); 1515 } 1516 if (q->q_src_map != NULL) { 1517 bus_dmamap_unload(sc->sc_dmat, q->q_src_map); 1518 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1519 } 1520 1521 s = splimp(); 1522 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 1523 splx(s); 1524 } 1525 if (err != ERESTART) { 1526 crp->crp_etype = err; 1527 crypto_done(crp); 1528 } else { 1529 sc->sc_needwakeup |= CRYPTO_SYMQ; 1530 } 1531 return (err); 1532 } 1533 1534 static void 1535 ubsec_callback(struct ubsec_softc *sc, struct ubsec_q *q) 1536 { 1537 struct cryptop *crp = (struct cryptop *)q->q_crp; 1538 struct cryptodesc *crd; 1539 struct ubsec_dma *dmap = q->q_dma; 1540 1541 ubsecstats.hst_opackets++; 1542 ubsecstats.hst_obytes += dmap->d_alloc.dma_size; 1543 1544 ubsec_dma_sync(&dmap->d_alloc, 1545 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1546 if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { 1547 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, 1548 BUS_DMASYNC_POSTREAD); 1549 bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); 1550 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); 1551 } 1552 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_POSTWRITE); 1553 bus_dmamap_unload(sc->sc_dmat, q->q_src_map); 1554 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1555 1556 if ((crp->crp_flags & CRYPTO_F_IMBUF) && (q->q_src_m != q->q_dst_m)) { 1557 m_freem(q->q_src_m); 1558 crp->crp_buf = (caddr_t)q->q_dst_m; 1559 } 1560 ubsecstats.hst_obytes += ((struct mbuf *)crp->crp_buf)->m_len; 1561 1562 /* copy out IV for future use */ 1563 if (q->q_flags & UBSEC_QFLAGS_COPYOUTIV) { 1564 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1565 if (crd->crd_alg != CRYPTO_DES_CBC && 1566 crd->crd_alg != CRYPTO_3DES_CBC) 1567 continue; 1568 if (crp->crp_flags & CRYPTO_F_IMBUF) 1569 m_copydata((struct mbuf *)crp->crp_buf, 1570 crd->crd_skip + crd->crd_len - 8, 8, 1571 (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv); 1572 else if (crp->crp_flags & CRYPTO_F_IOV) { 1573 cuio_copydata((struct uio *)crp->crp_buf, 1574 crd->crd_skip + crd->crd_len - 8, 8, 1575 (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv); 1576 } 1577 break; 1578 } 1579 } 1580 1581 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1582 if (crd->crd_alg != CRYPTO_MD5_HMAC && 1583 crd->crd_alg != CRYPTO_SHA1_HMAC) 1584 continue; 1585 if (crp->crp_flags & CRYPTO_F_IMBUF) 1586 m_copyback((struct mbuf *)crp->crp_buf, 1587 crd->crd_inject, 12, 1588 (caddr_t)dmap->d_dma->d_macbuf); 1589 else if (crp->crp_flags & CRYPTO_F_IOV && crp->crp_mac) 1590 bcopy((caddr_t)dmap->d_dma->d_macbuf, 1591 crp->crp_mac, 12); 1592 break; 1593 } 1594 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 1595 crypto_done(crp); 1596 } 1597 1598 static void 1599 ubsec_mcopy(struct mbuf *srcm, struct mbuf *dstm, int hoffset, int toffset) 1600 { 1601 int i, j, dlen, slen; 1602 caddr_t dptr, sptr; 1603 1604 j = 0; 1605 sptr = srcm->m_data; 1606 slen = srcm->m_len; 1607 dptr = dstm->m_data; 1608 dlen = dstm->m_len; 1609 1610 while (1) { 1611 for (i = 0; i < min(slen, dlen); i++) { 1612 if (j < hoffset || j >= toffset) 1613 *dptr++ = *sptr++; 1614 slen--; 1615 dlen--; 1616 j++; 1617 } 1618 if (slen == 0) { 1619 srcm = srcm->m_next; 1620 if (srcm == NULL) 1621 return; 1622 sptr = srcm->m_data; 1623 slen = srcm->m_len; 1624 } 1625 if (dlen == 0) { 1626 dstm = dstm->m_next; 1627 if (dstm == NULL) 1628 return; 1629 dptr = dstm->m_data; 1630 dlen = dstm->m_len; 1631 } 1632 } 1633 } 1634 1635 /* 1636 * feed the key generator, must be called at splimp() or higher. 1637 */ 1638 static int 1639 ubsec_feed2(struct ubsec_softc *sc) 1640 { 1641 struct ubsec_q2 *q; 1642 1643 while (!SIMPLEQ_EMPTY(&sc->sc_queue2)) { 1644 if (READ_REG(sc, BS_STAT) & BS_STAT_MCR2_FULL) 1645 break; 1646 q = SIMPLEQ_FIRST(&sc->sc_queue2); 1647 1648 ubsec_dma_sync(&q->q_mcr, 1649 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1650 ubsec_dma_sync(&q->q_ctx, BUS_DMASYNC_PREWRITE); 1651 1652 WRITE_REG(sc, BS_MCR2, q->q_mcr.dma_paddr); 1653 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue2, q, q_next); 1654 --sc->sc_nqueue2; 1655 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip2, q, q_next); 1656 } 1657 return (0); 1658 } 1659 1660 /* 1661 * Callback for handling random numbers 1662 */ 1663 static void 1664 ubsec_callback2(struct ubsec_softc *sc, struct ubsec_q2 *q) 1665 { 1666 struct cryptkop *krp; 1667 struct ubsec_ctx_keyop *ctx; 1668 1669 ctx = (struct ubsec_ctx_keyop *)q->q_ctx.dma_vaddr; 1670 ubsec_dma_sync(&q->q_ctx, BUS_DMASYNC_POSTWRITE); 1671 1672 switch (q->q_type) { 1673 #ifndef UBSEC_NO_RNG 1674 case UBS_CTXOP_RNGBYPASS: { 1675 struct ubsec_q2_rng *rng = (struct ubsec_q2_rng *)q; 1676 1677 ubsec_dma_sync(&rng->rng_buf, BUS_DMASYNC_POSTREAD); 1678 (*sc->sc_harvest)(sc->sc_rndtest, 1679 rng->rng_buf.dma_vaddr, 1680 UBSEC_RNG_BUFSIZ*sizeof (u_int32_t)); 1681 rng->rng_used = 0; 1682 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); 1683 break; 1684 } 1685 #endif 1686 case UBS_CTXOP_MODEXP: { 1687 struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; 1688 u_int rlen, clen; 1689 1690 krp = me->me_krp; 1691 rlen = (me->me_modbits + 7) / 8; 1692 clen = (krp->krp_param[krp->krp_iparams].crp_nbits + 7) / 8; 1693 1694 ubsec_dma_sync(&me->me_M, BUS_DMASYNC_POSTWRITE); 1695 ubsec_dma_sync(&me->me_E, BUS_DMASYNC_POSTWRITE); 1696 ubsec_dma_sync(&me->me_C, BUS_DMASYNC_POSTREAD); 1697 ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_POSTWRITE); 1698 1699 if (clen < rlen) 1700 krp->krp_status = E2BIG; 1701 else { 1702 if (sc->sc_flags & UBS_FLAGS_HWNORM) { 1703 bzero(krp->krp_param[krp->krp_iparams].crp_p, 1704 (krp->krp_param[krp->krp_iparams].crp_nbits 1705 + 7) / 8); 1706 bcopy(me->me_C.dma_vaddr, 1707 krp->krp_param[krp->krp_iparams].crp_p, 1708 (me->me_modbits + 7) / 8); 1709 } else 1710 ubsec_kshift_l(me->me_shiftbits, 1711 me->me_C.dma_vaddr, me->me_normbits, 1712 krp->krp_param[krp->krp_iparams].crp_p, 1713 krp->krp_param[krp->krp_iparams].crp_nbits); 1714 } 1715 1716 crypto_kdone(krp); 1717 1718 /* bzero all potentially sensitive data */ 1719 bzero(me->me_E.dma_vaddr, me->me_E.dma_size); 1720 bzero(me->me_M.dma_vaddr, me->me_M.dma_size); 1721 bzero(me->me_C.dma_vaddr, me->me_C.dma_size); 1722 bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); 1723 1724 /* Can't free here, so put us on the free list. */ 1725 SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &me->me_q, q_next); 1726 break; 1727 } 1728 case UBS_CTXOP_RSAPRIV: { 1729 struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; 1730 u_int len; 1731 1732 krp = rp->rpr_krp; 1733 ubsec_dma_sync(&rp->rpr_msgin, BUS_DMASYNC_POSTWRITE); 1734 ubsec_dma_sync(&rp->rpr_msgout, BUS_DMASYNC_POSTREAD); 1735 1736 len = (krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_nbits + 7) / 8; 1737 bcopy(rp->rpr_msgout.dma_vaddr, 1738 krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_p, len); 1739 1740 crypto_kdone(krp); 1741 1742 bzero(rp->rpr_msgin.dma_vaddr, rp->rpr_msgin.dma_size); 1743 bzero(rp->rpr_msgout.dma_vaddr, rp->rpr_msgout.dma_size); 1744 bzero(rp->rpr_q.q_ctx.dma_vaddr, rp->rpr_q.q_ctx.dma_size); 1745 1746 /* Can't free here, so put us on the free list. */ 1747 SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &rp->rpr_q, q_next); 1748 break; 1749 } 1750 default: 1751 device_printf(sc->sc_dev, "unknown ctx op: %x\n", 1752 letoh16(ctx->ctx_op)); 1753 break; 1754 } 1755 } 1756 1757 #ifndef UBSEC_NO_RNG 1758 static void 1759 ubsec_rng(void *vsc) 1760 { 1761 struct ubsec_softc *sc = vsc; 1762 struct ubsec_q2_rng *rng = &sc->sc_rng; 1763 struct ubsec_mcr *mcr; 1764 struct ubsec_ctx_rngbypass *ctx; 1765 int s; 1766 1767 s = splimp(); 1768 if (rng->rng_used) { 1769 splx(s); 1770 return; 1771 } 1772 sc->sc_nqueue2++; 1773 if (sc->sc_nqueue2 >= UBS_MAX_NQUEUE) 1774 goto out; 1775 1776 mcr = (struct ubsec_mcr *)rng->rng_q.q_mcr.dma_vaddr; 1777 ctx = (struct ubsec_ctx_rngbypass *)rng->rng_q.q_ctx.dma_vaddr; 1778 1779 mcr->mcr_pkts = htole16(1); 1780 mcr->mcr_flags = 0; 1781 mcr->mcr_cmdctxp = htole32(rng->rng_q.q_ctx.dma_paddr); 1782 mcr->mcr_ipktbuf.pb_addr = mcr->mcr_ipktbuf.pb_next = 0; 1783 mcr->mcr_ipktbuf.pb_len = 0; 1784 mcr->mcr_reserved = mcr->mcr_pktlen = 0; 1785 mcr->mcr_opktbuf.pb_addr = htole32(rng->rng_buf.dma_paddr); 1786 mcr->mcr_opktbuf.pb_len = htole32(((sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ)) & 1787 UBS_PKTBUF_LEN); 1788 mcr->mcr_opktbuf.pb_next = 0; 1789 1790 ctx->rbp_len = htole16(sizeof(struct ubsec_ctx_rngbypass)); 1791 ctx->rbp_op = htole16(UBS_CTXOP_RNGBYPASS); 1792 rng->rng_q.q_type = UBS_CTXOP_RNGBYPASS; 1793 1794 ubsec_dma_sync(&rng->rng_buf, BUS_DMASYNC_PREREAD); 1795 1796 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rng->rng_q, q_next); 1797 rng->rng_used = 1; 1798 ubsec_feed2(sc); 1799 ubsecstats.hst_rng++; 1800 splx(s); 1801 1802 return; 1803 1804 out: 1805 /* 1806 * Something weird happened, generate our own call back. 1807 */ 1808 sc->sc_nqueue2--; 1809 splx(s); 1810 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); 1811 } 1812 #endif /* UBSEC_NO_RNG */ 1813 1814 static void 1815 ubsec_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1816 { 1817 bus_addr_t *paddr = (bus_addr_t*) arg; 1818 *paddr = segs->ds_addr; 1819 } 1820 1821 static int 1822 ubsec_dma_malloc( 1823 struct ubsec_softc *sc, 1824 bus_size_t size, 1825 struct ubsec_dma_alloc *dma, 1826 int mapflags 1827 ) 1828 { 1829 int r; 1830 1831 /* XXX could specify sc_dmat as parent but that just adds overhead */ 1832 r = bus_dma_tag_create(NULL, /* parent */ 1833 1, 0, /* alignment, bounds */ 1834 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1835 BUS_SPACE_MAXADDR, /* highaddr */ 1836 NULL, NULL, /* filter, filterarg */ 1837 size, /* maxsize */ 1838 1, /* nsegments */ 1839 size, /* maxsegsize */ 1840 BUS_DMA_ALLOCNOW, /* flags */ 1841 &dma->dma_tag); 1842 if (r != 0) { 1843 device_printf(sc->sc_dev, "ubsec_dma_malloc: " 1844 "bus_dma_tag_create failed; error %u\n", r); 1845 goto fail_0; 1846 } 1847 1848 r = bus_dmamap_create(dma->dma_tag, BUS_DMA_NOWAIT, &dma->dma_map); 1849 if (r != 0) { 1850 device_printf(sc->sc_dev, "ubsec_dma_malloc: " 1851 "bus_dmamap_create failed; error %u\n", r); 1852 goto fail_1; 1853 } 1854 1855 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, 1856 BUS_DMA_NOWAIT, &dma->dma_map); 1857 if (r != 0) { 1858 device_printf(sc->sc_dev, "ubsec_dma_malloc: " 1859 "bus_dmammem_alloc failed; size %u, error %u\n", 1860 size, r); 1861 goto fail_2; 1862 } 1863 1864 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, 1865 size, 1866 ubsec_dmamap_cb, 1867 &dma->dma_paddr, 1868 mapflags | BUS_DMA_NOWAIT); 1869 if (r != 0) { 1870 device_printf(sc->sc_dev, "ubsec_dma_malloc: " 1871 "bus_dmamap_load failed; error %u\n", r); 1872 goto fail_3; 1873 } 1874 1875 dma->dma_size = size; 1876 return (0); 1877 1878 fail_3: 1879 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1880 fail_2: 1881 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1882 fail_1: 1883 bus_dmamap_destroy(dma->dma_tag, dma->dma_map); 1884 bus_dma_tag_destroy(dma->dma_tag); 1885 fail_0: 1886 dma->dma_map = NULL; 1887 dma->dma_tag = NULL; 1888 return (r); 1889 } 1890 1891 static void 1892 ubsec_dma_free(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma) 1893 { 1894 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1895 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1896 bus_dmamap_destroy(dma->dma_tag, dma->dma_map); 1897 bus_dma_tag_destroy(dma->dma_tag); 1898 } 1899 1900 /* 1901 * Resets the board. Values in the regesters are left as is 1902 * from the reset (i.e. initial values are assigned elsewhere). 1903 */ 1904 static void 1905 ubsec_reset_board(struct ubsec_softc *sc) 1906 { 1907 volatile u_int32_t ctrl; 1908 1909 ctrl = READ_REG(sc, BS_CTRL); 1910 ctrl |= BS_CTRL_RESET; 1911 WRITE_REG(sc, BS_CTRL, ctrl); 1912 1913 /* 1914 * Wait aprox. 30 PCI clocks = 900 ns = 0.9 us 1915 */ 1916 DELAY(10); 1917 } 1918 1919 /* 1920 * Init Broadcom registers 1921 */ 1922 static void 1923 ubsec_init_board(struct ubsec_softc *sc) 1924 { 1925 u_int32_t ctrl; 1926 1927 ctrl = READ_REG(sc, BS_CTRL); 1928 ctrl &= ~(BS_CTRL_BE32 | BS_CTRL_BE64); 1929 ctrl |= BS_CTRL_LITTLE_ENDIAN | BS_CTRL_MCR1INT; 1930 1931 if (sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) 1932 ctrl |= BS_CTRL_MCR2INT; 1933 else 1934 ctrl &= ~BS_CTRL_MCR2INT; 1935 1936 if (sc->sc_flags & UBS_FLAGS_HWNORM) 1937 ctrl &= ~BS_CTRL_SWNORM; 1938 1939 WRITE_REG(sc, BS_CTRL, ctrl); 1940 } 1941 1942 /* 1943 * Init Broadcom PCI registers 1944 */ 1945 static void 1946 ubsec_init_pciregs(device_t dev) 1947 { 1948 #if 0 1949 u_int32_t misc; 1950 1951 misc = pci_conf_read(pc, pa->pa_tag, BS_RTY_TOUT); 1952 misc = (misc & ~(UBS_PCI_RTY_MASK << UBS_PCI_RTY_SHIFT)) 1953 | ((UBS_DEF_RTY & 0xff) << UBS_PCI_RTY_SHIFT); 1954 misc = (misc & ~(UBS_PCI_TOUT_MASK << UBS_PCI_TOUT_SHIFT)) 1955 | ((UBS_DEF_TOUT & 0xff) << UBS_PCI_TOUT_SHIFT); 1956 pci_conf_write(pc, pa->pa_tag, BS_RTY_TOUT, misc); 1957 #endif 1958 1959 /* 1960 * This will set the cache line size to 1, this will 1961 * force the BCM58xx chip just to do burst read/writes. 1962 * Cache line read/writes are to slow 1963 */ 1964 pci_write_config(dev, PCIR_CACHELNSZ, UBS_DEF_CACHELINE, 1); 1965 } 1966 1967 /* 1968 * Clean up after a chip crash. 1969 * It is assumed that the caller in splimp() 1970 */ 1971 static void 1972 ubsec_cleanchip(struct ubsec_softc *sc) 1973 { 1974 struct ubsec_q *q; 1975 1976 while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { 1977 q = SIMPLEQ_FIRST(&sc->sc_qchip); 1978 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q, q_next); 1979 ubsec_free_q(sc, q); 1980 } 1981 sc->sc_nqchip = 0; 1982 } 1983 1984 /* 1985 * free a ubsec_q 1986 * It is assumed that the caller is within spimp() 1987 */ 1988 static int 1989 ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q) 1990 { 1991 struct ubsec_q *q2; 1992 struct cryptop *crp; 1993 int npkts; 1994 int i; 1995 1996 npkts = q->q_nstacked_mcrs; 1997 1998 for (i = 0; i < npkts; i++) { 1999 if(q->q_stacked_mcr[i]) { 2000 q2 = q->q_stacked_mcr[i]; 2001 2002 if ((q2->q_dst_m != NULL) && (q2->q_src_m != q2->q_dst_m)) 2003 m_freem(q2->q_dst_m); 2004 2005 crp = (struct cryptop *)q2->q_crp; 2006 2007 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q2, q_next); 2008 2009 crp->crp_etype = EFAULT; 2010 crypto_done(crp); 2011 } else { 2012 break; 2013 } 2014 } 2015 2016 /* 2017 * Free header MCR 2018 */ 2019 if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) 2020 m_freem(q->q_dst_m); 2021 2022 crp = (struct cryptop *)q->q_crp; 2023 2024 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 2025 2026 crp->crp_etype = EFAULT; 2027 crypto_done(crp); 2028 return(0); 2029 } 2030 2031 /* 2032 * Routine to reset the chip and clean up. 2033 * It is assumed that the caller is in splimp() 2034 */ 2035 static void 2036 ubsec_totalreset(struct ubsec_softc *sc) 2037 { 2038 ubsec_reset_board(sc); 2039 ubsec_init_board(sc); 2040 ubsec_cleanchip(sc); 2041 } 2042 2043 static int 2044 ubsec_dmamap_aligned(struct ubsec_operand *op) 2045 { 2046 int i; 2047 2048 for (i = 0; i < op->nsegs; i++) { 2049 if (op->segs[i].ds_addr & 3) 2050 return (0); 2051 if ((i != (op->nsegs - 1)) && 2052 (op->segs[i].ds_len & 3)) 2053 return (0); 2054 } 2055 return (1); 2056 } 2057 2058 static void 2059 ubsec_kfree(struct ubsec_softc *sc, struct ubsec_q2 *q) 2060 { 2061 switch (q->q_type) { 2062 case UBS_CTXOP_MODEXP: { 2063 struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; 2064 2065 ubsec_dma_free(sc, &me->me_q.q_mcr); 2066 ubsec_dma_free(sc, &me->me_q.q_ctx); 2067 ubsec_dma_free(sc, &me->me_M); 2068 ubsec_dma_free(sc, &me->me_E); 2069 ubsec_dma_free(sc, &me->me_C); 2070 ubsec_dma_free(sc, &me->me_epb); 2071 free(me, M_DEVBUF); 2072 break; 2073 } 2074 case UBS_CTXOP_RSAPRIV: { 2075 struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; 2076 2077 ubsec_dma_free(sc, &rp->rpr_q.q_mcr); 2078 ubsec_dma_free(sc, &rp->rpr_q.q_ctx); 2079 ubsec_dma_free(sc, &rp->rpr_msgin); 2080 ubsec_dma_free(sc, &rp->rpr_msgout); 2081 free(rp, M_DEVBUF); 2082 break; 2083 } 2084 default: 2085 device_printf(sc->sc_dev, "invalid kfree 0x%x\n", q->q_type); 2086 break; 2087 } 2088 } 2089 2090 static int 2091 ubsec_kprocess(void *arg, struct cryptkop *krp, int hint) 2092 { 2093 struct ubsec_softc *sc = arg; 2094 int r; 2095 2096 if (krp == NULL || krp->krp_callback == NULL) 2097 return (EINVAL); 2098 2099 while (!SIMPLEQ_EMPTY(&sc->sc_q2free)) { 2100 struct ubsec_q2 *q; 2101 2102 q = SIMPLEQ_FIRST(&sc->sc_q2free); 2103 SIMPLEQ_REMOVE_HEAD(&sc->sc_q2free, q, q_next); 2104 ubsec_kfree(sc, q); 2105 } 2106 2107 switch (krp->krp_op) { 2108 case CRK_MOD_EXP: 2109 if (sc->sc_flags & UBS_FLAGS_HWNORM) 2110 r = ubsec_kprocess_modexp_hw(sc, krp, hint); 2111 else 2112 r = ubsec_kprocess_modexp_sw(sc, krp, hint); 2113 break; 2114 case CRK_MOD_EXP_CRT: 2115 return (ubsec_kprocess_rsapriv(sc, krp, hint)); 2116 default: 2117 device_printf(sc->sc_dev, "kprocess: invalid op 0x%x\n", 2118 krp->krp_op); 2119 krp->krp_status = EOPNOTSUPP; 2120 crypto_kdone(krp); 2121 return (0); 2122 } 2123 return (0); /* silence compiler */ 2124 } 2125 2126 /* 2127 * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (sw normalization) 2128 */ 2129 static int 2130 ubsec_kprocess_modexp_sw(struct ubsec_softc *sc, struct cryptkop *krp, int hint) 2131 { 2132 struct ubsec_q2_modexp *me; 2133 struct ubsec_mcr *mcr; 2134 struct ubsec_ctx_modexp *ctx; 2135 struct ubsec_pktbuf *epb; 2136 int s, err = 0; 2137 u_int nbits, normbits, mbits, shiftbits, ebits; 2138 2139 me = malloc(sizeof *me, M_DEVBUF, M_INTWAIT | M_ZERO); 2140 me->me_krp = krp; 2141 me->me_q.q_type = UBS_CTXOP_MODEXP; 2142 2143 nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); 2144 if (nbits <= 512) 2145 normbits = 512; 2146 else if (nbits <= 768) 2147 normbits = 768; 2148 else if (nbits <= 1024) 2149 normbits = 1024; 2150 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) 2151 normbits = 1536; 2152 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) 2153 normbits = 2048; 2154 else { 2155 err = E2BIG; 2156 goto errout; 2157 } 2158 2159 shiftbits = normbits - nbits; 2160 2161 me->me_modbits = nbits; 2162 me->me_shiftbits = shiftbits; 2163 me->me_normbits = normbits; 2164 2165 /* Sanity check: result bits must be >= true modulus bits. */ 2166 if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { 2167 err = ERANGE; 2168 goto errout; 2169 } 2170 2171 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2172 &me->me_q.q_mcr, 0)) { 2173 err = ENOMEM; 2174 goto errout; 2175 } 2176 mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; 2177 2178 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), 2179 &me->me_q.q_ctx, 0)) { 2180 err = ENOMEM; 2181 goto errout; 2182 } 2183 2184 mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); 2185 if (mbits > nbits) { 2186 err = E2BIG; 2187 goto errout; 2188 } 2189 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { 2190 err = ENOMEM; 2191 goto errout; 2192 } 2193 ubsec_kshift_r(shiftbits, 2194 krp->krp_param[UBS_MODEXP_PAR_M].crp_p, mbits, 2195 me->me_M.dma_vaddr, normbits); 2196 2197 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { 2198 err = ENOMEM; 2199 goto errout; 2200 } 2201 bzero(me->me_C.dma_vaddr, me->me_C.dma_size); 2202 2203 ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); 2204 if (ebits > nbits) { 2205 err = E2BIG; 2206 goto errout; 2207 } 2208 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { 2209 err = ENOMEM; 2210 goto errout; 2211 } 2212 ubsec_kshift_r(shiftbits, 2213 krp->krp_param[UBS_MODEXP_PAR_E].crp_p, ebits, 2214 me->me_E.dma_vaddr, normbits); 2215 2216 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), 2217 &me->me_epb, 0)) { 2218 err = ENOMEM; 2219 goto errout; 2220 } 2221 epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; 2222 epb->pb_addr = htole32(me->me_E.dma_paddr); 2223 epb->pb_next = 0; 2224 epb->pb_len = htole32(normbits / 8); 2225 2226 #ifdef UBSEC_DEBUG 2227 if (ubsec_debug) { 2228 printf("Epb "); 2229 ubsec_dump_pb(epb); 2230 } 2231 #endif 2232 2233 mcr->mcr_pkts = htole16(1); 2234 mcr->mcr_flags = 0; 2235 mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); 2236 mcr->mcr_reserved = 0; 2237 mcr->mcr_pktlen = 0; 2238 2239 mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); 2240 mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); 2241 mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); 2242 2243 mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); 2244 mcr->mcr_opktbuf.pb_next = 0; 2245 mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); 2246 2247 #ifdef DIAGNOSTIC 2248 /* Misaligned output buffer will hang the chip. */ 2249 if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) 2250 panic("%s: modexp invalid addr 0x%x\n", 2251 device_get_nameunit(sc->sc_dev), 2252 letoh32(mcr->mcr_opktbuf.pb_addr)); 2253 if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) 2254 panic("%s: modexp invalid len 0x%x\n", 2255 device_get_nameunit(sc->sc_dev), 2256 letoh32(mcr->mcr_opktbuf.pb_len)); 2257 #endif 2258 2259 ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; 2260 bzero(ctx, sizeof(*ctx)); 2261 ubsec_kshift_r(shiftbits, 2262 krp->krp_param[UBS_MODEXP_PAR_N].crp_p, nbits, 2263 ctx->me_N, normbits); 2264 ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); 2265 ctx->me_op = htole16(UBS_CTXOP_MODEXP); 2266 ctx->me_E_len = htole16(nbits); 2267 ctx->me_N_len = htole16(nbits); 2268 2269 #ifdef UBSEC_DEBUG 2270 if (ubsec_debug) { 2271 ubsec_dump_mcr(mcr); 2272 ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); 2273 } 2274 #endif 2275 2276 /* 2277 * ubsec_feed2 will sync mcr and ctx, we just need to sync 2278 * everything else. 2279 */ 2280 ubsec_dma_sync(&me->me_M, BUS_DMASYNC_PREWRITE); 2281 ubsec_dma_sync(&me->me_E, BUS_DMASYNC_PREWRITE); 2282 ubsec_dma_sync(&me->me_C, BUS_DMASYNC_PREREAD); 2283 ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_PREWRITE); 2284 2285 /* Enqueue and we're done... */ 2286 s = splimp(); 2287 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); 2288 ubsec_feed2(sc); 2289 ubsecstats.hst_modexp++; 2290 splx(s); 2291 2292 return (0); 2293 2294 errout: 2295 if (me != NULL) { 2296 if (me->me_q.q_mcr.dma_map != NULL) 2297 ubsec_dma_free(sc, &me->me_q.q_mcr); 2298 if (me->me_q.q_ctx.dma_map != NULL) { 2299 bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); 2300 ubsec_dma_free(sc, &me->me_q.q_ctx); 2301 } 2302 if (me->me_M.dma_map != NULL) { 2303 bzero(me->me_M.dma_vaddr, me->me_M.dma_size); 2304 ubsec_dma_free(sc, &me->me_M); 2305 } 2306 if (me->me_E.dma_map != NULL) { 2307 bzero(me->me_E.dma_vaddr, me->me_E.dma_size); 2308 ubsec_dma_free(sc, &me->me_E); 2309 } 2310 if (me->me_C.dma_map != NULL) { 2311 bzero(me->me_C.dma_vaddr, me->me_C.dma_size); 2312 ubsec_dma_free(sc, &me->me_C); 2313 } 2314 if (me->me_epb.dma_map != NULL) 2315 ubsec_dma_free(sc, &me->me_epb); 2316 free(me, M_DEVBUF); 2317 } 2318 krp->krp_status = err; 2319 crypto_kdone(krp); 2320 return (0); 2321 } 2322 2323 /* 2324 * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (hw normalization) 2325 */ 2326 static int 2327 ubsec_kprocess_modexp_hw(struct ubsec_softc *sc, struct cryptkop *krp, int hint) 2328 { 2329 struct ubsec_q2_modexp *me; 2330 struct ubsec_mcr *mcr; 2331 struct ubsec_ctx_modexp *ctx; 2332 struct ubsec_pktbuf *epb; 2333 int s, err = 0; 2334 u_int nbits, normbits, mbits, shiftbits, ebits; 2335 2336 me = malloc(sizeof *me, M_DEVBUF, M_INTWAIT | M_ZERO); 2337 me->me_krp = krp; 2338 me->me_q.q_type = UBS_CTXOP_MODEXP; 2339 2340 nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); 2341 if (nbits <= 512) 2342 normbits = 512; 2343 else if (nbits <= 768) 2344 normbits = 768; 2345 else if (nbits <= 1024) 2346 normbits = 1024; 2347 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) 2348 normbits = 1536; 2349 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) 2350 normbits = 2048; 2351 else { 2352 err = E2BIG; 2353 goto errout; 2354 } 2355 2356 shiftbits = normbits - nbits; 2357 2358 /* XXX ??? */ 2359 me->me_modbits = nbits; 2360 me->me_shiftbits = shiftbits; 2361 me->me_normbits = normbits; 2362 2363 /* Sanity check: result bits must be >= true modulus bits. */ 2364 if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { 2365 err = ERANGE; 2366 goto errout; 2367 } 2368 2369 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2370 &me->me_q.q_mcr, 0)) { 2371 err = ENOMEM; 2372 goto errout; 2373 } 2374 mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; 2375 2376 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), 2377 &me->me_q.q_ctx, 0)) { 2378 err = ENOMEM; 2379 goto errout; 2380 } 2381 2382 mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); 2383 if (mbits > nbits) { 2384 err = E2BIG; 2385 goto errout; 2386 } 2387 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { 2388 err = ENOMEM; 2389 goto errout; 2390 } 2391 bzero(me->me_M.dma_vaddr, normbits / 8); 2392 bcopy(krp->krp_param[UBS_MODEXP_PAR_M].crp_p, 2393 me->me_M.dma_vaddr, (mbits + 7) / 8); 2394 2395 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { 2396 err = ENOMEM; 2397 goto errout; 2398 } 2399 bzero(me->me_C.dma_vaddr, me->me_C.dma_size); 2400 2401 ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); 2402 if (ebits > nbits) { 2403 err = E2BIG; 2404 goto errout; 2405 } 2406 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { 2407 err = ENOMEM; 2408 goto errout; 2409 } 2410 bzero(me->me_E.dma_vaddr, normbits / 8); 2411 bcopy(krp->krp_param[UBS_MODEXP_PAR_E].crp_p, 2412 me->me_E.dma_vaddr, (ebits + 7) / 8); 2413 2414 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), 2415 &me->me_epb, 0)) { 2416 err = ENOMEM; 2417 goto errout; 2418 } 2419 epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; 2420 epb->pb_addr = htole32(me->me_E.dma_paddr); 2421 epb->pb_next = 0; 2422 epb->pb_len = htole32((ebits + 7) / 8); 2423 2424 #ifdef UBSEC_DEBUG 2425 if (ubsec_debug) { 2426 printf("Epb "); 2427 ubsec_dump_pb(epb); 2428 } 2429 #endif 2430 2431 mcr->mcr_pkts = htole16(1); 2432 mcr->mcr_flags = 0; 2433 mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); 2434 mcr->mcr_reserved = 0; 2435 mcr->mcr_pktlen = 0; 2436 2437 mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); 2438 mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); 2439 mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); 2440 2441 mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); 2442 mcr->mcr_opktbuf.pb_next = 0; 2443 mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); 2444 2445 #ifdef DIAGNOSTIC 2446 /* Misaligned output buffer will hang the chip. */ 2447 if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) 2448 panic("%s: modexp invalid addr 0x%x\n", 2449 device_get_nameunit(sc->sc_dev), 2450 letoh32(mcr->mcr_opktbuf.pb_addr)); 2451 if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) 2452 panic("%s: modexp invalid len 0x%x\n", 2453 device_get_nameunit(sc->sc_dev), 2454 letoh32(mcr->mcr_opktbuf.pb_len)); 2455 #endif 2456 2457 ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; 2458 bzero(ctx, sizeof(*ctx)); 2459 bcopy(krp->krp_param[UBS_MODEXP_PAR_N].crp_p, ctx->me_N, 2460 (nbits + 7) / 8); 2461 ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); 2462 ctx->me_op = htole16(UBS_CTXOP_MODEXP); 2463 ctx->me_E_len = htole16(ebits); 2464 ctx->me_N_len = htole16(nbits); 2465 2466 #ifdef UBSEC_DEBUG 2467 if (ubsec_debug) { 2468 ubsec_dump_mcr(mcr); 2469 ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); 2470 } 2471 #endif 2472 2473 /* 2474 * ubsec_feed2 will sync mcr and ctx, we just need to sync 2475 * everything else. 2476 */ 2477 ubsec_dma_sync(&me->me_M, BUS_DMASYNC_PREWRITE); 2478 ubsec_dma_sync(&me->me_E, BUS_DMASYNC_PREWRITE); 2479 ubsec_dma_sync(&me->me_C, BUS_DMASYNC_PREREAD); 2480 ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_PREWRITE); 2481 2482 /* Enqueue and we're done... */ 2483 s = splimp(); 2484 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); 2485 ubsec_feed2(sc); 2486 splx(s); 2487 2488 return (0); 2489 2490 errout: 2491 if (me != NULL) { 2492 if (me->me_q.q_mcr.dma_map != NULL) 2493 ubsec_dma_free(sc, &me->me_q.q_mcr); 2494 if (me->me_q.q_ctx.dma_map != NULL) { 2495 bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); 2496 ubsec_dma_free(sc, &me->me_q.q_ctx); 2497 } 2498 if (me->me_M.dma_map != NULL) { 2499 bzero(me->me_M.dma_vaddr, me->me_M.dma_size); 2500 ubsec_dma_free(sc, &me->me_M); 2501 } 2502 if (me->me_E.dma_map != NULL) { 2503 bzero(me->me_E.dma_vaddr, me->me_E.dma_size); 2504 ubsec_dma_free(sc, &me->me_E); 2505 } 2506 if (me->me_C.dma_map != NULL) { 2507 bzero(me->me_C.dma_vaddr, me->me_C.dma_size); 2508 ubsec_dma_free(sc, &me->me_C); 2509 } 2510 if (me->me_epb.dma_map != NULL) 2511 ubsec_dma_free(sc, &me->me_epb); 2512 free(me, M_DEVBUF); 2513 } 2514 krp->krp_status = err; 2515 crypto_kdone(krp); 2516 return (0); 2517 } 2518 2519 static int 2520 ubsec_kprocess_rsapriv(struct ubsec_softc *sc, struct cryptkop *krp, int hint) 2521 { 2522 struct ubsec_q2_rsapriv *rp = NULL; 2523 struct ubsec_mcr *mcr; 2524 struct ubsec_ctx_rsapriv *ctx; 2525 int s, err = 0; 2526 u_int padlen, msglen; 2527 2528 msglen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_P]); 2529 padlen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_Q]); 2530 if (msglen > padlen) 2531 padlen = msglen; 2532 2533 if (padlen <= 256) 2534 padlen = 256; 2535 else if (padlen <= 384) 2536 padlen = 384; 2537 else if (padlen <= 512) 2538 padlen = 512; 2539 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 768) 2540 padlen = 768; 2541 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 1024) 2542 padlen = 1024; 2543 else { 2544 err = E2BIG; 2545 goto errout; 2546 } 2547 2548 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DP]) > padlen) { 2549 err = E2BIG; 2550 goto errout; 2551 } 2552 2553 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DQ]) > padlen) { 2554 err = E2BIG; 2555 goto errout; 2556 } 2557 2558 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_PINV]) > padlen) { 2559 err = E2BIG; 2560 goto errout; 2561 } 2562 2563 rp = malloc(sizeof *rp, M_DEVBUF, M_INTWAIT | M_ZERO); 2564 rp->rpr_krp = krp; 2565 rp->rpr_q.q_type = UBS_CTXOP_RSAPRIV; 2566 2567 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2568 &rp->rpr_q.q_mcr, 0)) { 2569 err = ENOMEM; 2570 goto errout; 2571 } 2572 mcr = (struct ubsec_mcr *)rp->rpr_q.q_mcr.dma_vaddr; 2573 2574 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rsapriv), 2575 &rp->rpr_q.q_ctx, 0)) { 2576 err = ENOMEM; 2577 goto errout; 2578 } 2579 ctx = (struct ubsec_ctx_rsapriv *)rp->rpr_q.q_ctx.dma_vaddr; 2580 bzero(ctx, sizeof *ctx); 2581 2582 /* Copy in p */ 2583 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_P].crp_p, 2584 &ctx->rpr_buf[0 * (padlen / 8)], 2585 (krp->krp_param[UBS_RSAPRIV_PAR_P].crp_nbits + 7) / 8); 2586 2587 /* Copy in q */ 2588 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_p, 2589 &ctx->rpr_buf[1 * (padlen / 8)], 2590 (krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_nbits + 7) / 8); 2591 2592 /* Copy in dp */ 2593 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_p, 2594 &ctx->rpr_buf[2 * (padlen / 8)], 2595 (krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_nbits + 7) / 8); 2596 2597 /* Copy in dq */ 2598 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_p, 2599 &ctx->rpr_buf[3 * (padlen / 8)], 2600 (krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_nbits + 7) / 8); 2601 2602 /* Copy in pinv */ 2603 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_p, 2604 &ctx->rpr_buf[4 * (padlen / 8)], 2605 (krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_nbits + 7) / 8); 2606 2607 msglen = padlen * 2; 2608 2609 /* Copy in input message (aligned buffer/length). */ 2610 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGIN]) > msglen) { 2611 /* Is this likely? */ 2612 err = E2BIG; 2613 goto errout; 2614 } 2615 if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgin, 0)) { 2616 err = ENOMEM; 2617 goto errout; 2618 } 2619 bzero(rp->rpr_msgin.dma_vaddr, (msglen + 7) / 8); 2620 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_p, 2621 rp->rpr_msgin.dma_vaddr, 2622 (krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_nbits + 7) / 8); 2623 2624 /* Prepare space for output message (aligned buffer/length). */ 2625 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT]) < msglen) { 2626 /* Is this likely? */ 2627 err = E2BIG; 2628 goto errout; 2629 } 2630 if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgout, 0)) { 2631 err = ENOMEM; 2632 goto errout; 2633 } 2634 bzero(rp->rpr_msgout.dma_vaddr, (msglen + 7) / 8); 2635 2636 mcr->mcr_pkts = htole16(1); 2637 mcr->mcr_flags = 0; 2638 mcr->mcr_cmdctxp = htole32(rp->rpr_q.q_ctx.dma_paddr); 2639 mcr->mcr_ipktbuf.pb_addr = htole32(rp->rpr_msgin.dma_paddr); 2640 mcr->mcr_ipktbuf.pb_next = 0; 2641 mcr->mcr_ipktbuf.pb_len = htole32(rp->rpr_msgin.dma_size); 2642 mcr->mcr_reserved = 0; 2643 mcr->mcr_pktlen = htole16(msglen); 2644 mcr->mcr_opktbuf.pb_addr = htole32(rp->rpr_msgout.dma_paddr); 2645 mcr->mcr_opktbuf.pb_next = 0; 2646 mcr->mcr_opktbuf.pb_len = htole32(rp->rpr_msgout.dma_size); 2647 2648 #ifdef DIAGNOSTIC 2649 if (rp->rpr_msgin.dma_paddr & 3 || rp->rpr_msgin.dma_size & 3) { 2650 panic("%s: rsapriv: invalid msgin %x(0x%x)", 2651 device_get_nameunit(sc->sc_dev), 2652 rp->rpr_msgin.dma_paddr, rp->rpr_msgin.dma_size); 2653 } 2654 if (rp->rpr_msgout.dma_paddr & 3 || rp->rpr_msgout.dma_size & 3) { 2655 panic("%s: rsapriv: invalid msgout %x(0x%x)", 2656 device_get_nameunit(sc->sc_dev), 2657 rp->rpr_msgout.dma_paddr, rp->rpr_msgout.dma_size); 2658 } 2659 #endif 2660 2661 ctx->rpr_len = (sizeof(u_int16_t) * 4) + (5 * (padlen / 8)); 2662 ctx->rpr_op = htole16(UBS_CTXOP_RSAPRIV); 2663 ctx->rpr_q_len = htole16(padlen); 2664 ctx->rpr_p_len = htole16(padlen); 2665 2666 /* 2667 * ubsec_feed2 will sync mcr and ctx, we just need to sync 2668 * everything else. 2669 */ 2670 ubsec_dma_sync(&rp->rpr_msgin, BUS_DMASYNC_PREWRITE); 2671 ubsec_dma_sync(&rp->rpr_msgout, BUS_DMASYNC_PREREAD); 2672 2673 /* Enqueue and we're done... */ 2674 s = splimp(); 2675 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rp->rpr_q, q_next); 2676 ubsec_feed2(sc); 2677 ubsecstats.hst_modexpcrt++; 2678 splx(s); 2679 return (0); 2680 2681 errout: 2682 if (rp != NULL) { 2683 if (rp->rpr_q.q_mcr.dma_map != NULL) 2684 ubsec_dma_free(sc, &rp->rpr_q.q_mcr); 2685 if (rp->rpr_msgin.dma_map != NULL) { 2686 bzero(rp->rpr_msgin.dma_vaddr, rp->rpr_msgin.dma_size); 2687 ubsec_dma_free(sc, &rp->rpr_msgin); 2688 } 2689 if (rp->rpr_msgout.dma_map != NULL) { 2690 bzero(rp->rpr_msgout.dma_vaddr, rp->rpr_msgout.dma_size); 2691 ubsec_dma_free(sc, &rp->rpr_msgout); 2692 } 2693 free(rp, M_DEVBUF); 2694 } 2695 krp->krp_status = err; 2696 crypto_kdone(krp); 2697 return (0); 2698 } 2699 2700 #ifdef UBSEC_DEBUG 2701 static void 2702 ubsec_dump_pb(volatile struct ubsec_pktbuf *pb) 2703 { 2704 printf("addr 0x%x (0x%x) next 0x%x\n", 2705 pb->pb_addr, pb->pb_len, pb->pb_next); 2706 } 2707 2708 static void 2709 ubsec_dump_ctx2(struct ubsec_ctx_keyop *c) 2710 { 2711 printf("CTX (0x%x):\n", c->ctx_len); 2712 switch (letoh16(c->ctx_op)) { 2713 case UBS_CTXOP_RNGBYPASS: 2714 case UBS_CTXOP_RNGSHA1: 2715 break; 2716 case UBS_CTXOP_MODEXP: 2717 { 2718 struct ubsec_ctx_modexp *cx = (void *)c; 2719 int i, len; 2720 2721 printf(" Elen %u, Nlen %u\n", 2722 letoh16(cx->me_E_len), letoh16(cx->me_N_len)); 2723 len = (cx->me_N_len + 7)/8; 2724 for (i = 0; i < len; i++) 2725 printf("%s%02x", (i == 0) ? " N: " : ":", cx->me_N[i]); 2726 printf("\n"); 2727 break; 2728 } 2729 default: 2730 printf("unknown context: %x\n", c->ctx_op); 2731 } 2732 printf("END CTX\n"); 2733 } 2734 2735 static void 2736 ubsec_dump_mcr(struct ubsec_mcr *mcr) 2737 { 2738 volatile struct ubsec_mcr_add *ma; 2739 int i; 2740 2741 printf("MCR:\n"); 2742 printf(" pkts: %u, flags 0x%x\n", 2743 letoh16(mcr->mcr_pkts), letoh16(mcr->mcr_flags)); 2744 ma = (volatile struct ubsec_mcr_add *)&mcr->mcr_cmdctxp; 2745 for (i = 0; i < letoh16(mcr->mcr_pkts); i++) { 2746 printf(" %d: ctx 0x%x len 0x%x rsvd 0x%x\n", i, 2747 letoh32(ma->mcr_cmdctxp), letoh16(ma->mcr_pktlen), 2748 letoh16(ma->mcr_reserved)); 2749 printf(" %d: ipkt ", i); 2750 ubsec_dump_pb(&ma->mcr_ipktbuf); 2751 printf(" %d: opkt ", i); 2752 ubsec_dump_pb(&ma->mcr_opktbuf); 2753 ma++; 2754 } 2755 printf("END MCR\n"); 2756 } 2757 #endif /* UBSEC_DEBUG */ 2758 2759 /* 2760 * Return the number of significant bits of a big number. 2761 */ 2762 static int 2763 ubsec_ksigbits(struct crparam *cr) 2764 { 2765 u_int plen = (cr->crp_nbits + 7) / 8; 2766 int i, sig = plen * 8; 2767 u_int8_t c, *p = cr->crp_p; 2768 2769 for (i = plen - 1; i >= 0; i--) { 2770 c = p[i]; 2771 if (c != 0) { 2772 while ((c & 0x80) == 0) { 2773 sig--; 2774 c <<= 1; 2775 } 2776 break; 2777 } 2778 sig -= 8; 2779 } 2780 return (sig); 2781 } 2782 2783 static void 2784 ubsec_kshift_r( 2785 u_int shiftbits, 2786 u_int8_t *src, u_int srcbits, 2787 u_int8_t *dst, u_int dstbits) 2788 { 2789 u_int slen, dlen; 2790 int i, si, di, n; 2791 2792 slen = (srcbits + 7) / 8; 2793 dlen = (dstbits + 7) / 8; 2794 2795 for (i = 0; i < slen; i++) 2796 dst[i] = src[i]; 2797 for (i = 0; i < dlen - slen; i++) 2798 dst[slen + i] = 0; 2799 2800 n = shiftbits / 8; 2801 if (n != 0) { 2802 si = dlen - n - 1; 2803 di = dlen - 1; 2804 while (si >= 0) 2805 dst[di--] = dst[si--]; 2806 while (di >= 0) 2807 dst[di--] = 0; 2808 } 2809 2810 n = shiftbits % 8; 2811 if (n != 0) { 2812 for (i = dlen - 1; i > 0; i--) 2813 dst[i] = (dst[i] << n) | 2814 (dst[i - 1] >> (8 - n)); 2815 dst[0] = dst[0] << n; 2816 } 2817 } 2818 2819 static void 2820 ubsec_kshift_l( 2821 u_int shiftbits, 2822 u_int8_t *src, u_int srcbits, 2823 u_int8_t *dst, u_int dstbits) 2824 { 2825 int slen, dlen, i, n; 2826 2827 slen = (srcbits + 7) / 8; 2828 dlen = (dstbits + 7) / 8; 2829 2830 n = shiftbits / 8; 2831 for (i = 0; i < slen; i++) 2832 dst[i] = src[i + n]; 2833 for (i = 0; i < dlen - slen; i++) 2834 dst[slen + i] = 0; 2835 2836 n = shiftbits % 8; 2837 if (n != 0) { 2838 for (i = 0; i < (dlen - 1); i++) 2839 dst[i] = (dst[i] >> n) | (dst[i + 1] << (8 - n)); 2840 dst[dlen - 1] = dst[dlen - 1] >> n; 2841 } 2842 } 2843