1 /* $NetBSD: ubsec.c,v 1.27 2012/01/30 19:41:23 drochner Exp $ */ 2 /* $FreeBSD: src/sys/dev/ubsec/ubsec.c,v 1.6.2.6 2003/01/23 21:06:43 sam Exp $ */ 3 /* $OpenBSD: ubsec.c,v 1.127 2003/06/04 14:04:58 jason Exp $ */ 4 5 /* 6 * Copyright (c) 2000 Jason L. Wright (jason@thought.net) 7 * Copyright (c) 2000 Theo de Raadt (deraadt@openbsd.org) 8 * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com) 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 23 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 27 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * Effort sponsored in part by the Defense Advanced Research Projects 32 * Agency (DARPA) and Air Force Research Laboratory, Air Force 33 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 34 * 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: ubsec.c,v 1.27 2012/01/30 19:41:23 drochner Exp $"); 39 40 #undef UBSEC_DEBUG 41 42 /* 43 * uBsec 5[56]01, bcm580xx, bcm582x hardware crypto accelerator 44 */ 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/proc.h> 49 #include <sys/endian.h> 50 #ifdef __NetBSD__ 51 #define letoh16 htole16 52 #define letoh32 htole32 53 #define UBSEC_NO_RNG /* until statistically tested */ 54 #endif 55 #include <sys/errno.h> 56 #include <sys/malloc.h> 57 #include <sys/kernel.h> 58 #include <sys/mbuf.h> 59 #include <sys/device.h> 60 #include <sys/queue.h> 61 62 #include <opencrypto/cryptodev.h> 63 #include <opencrypto/xform.h> 64 #ifdef __OpenBSD__ 65 #include <dev/rndvar.h> 66 #include <sys/md5k.h> 67 #else 68 #include <sys/cprng.h> 69 #include <sys/md5.h> 70 #endif 71 #include <sys/sha1.h> 72 73 #include <dev/pci/pcireg.h> 74 #include <dev/pci/pcivar.h> 75 #include <dev/pci/pcidevs.h> 76 77 #include <dev/pci/ubsecreg.h> 78 #include <dev/pci/ubsecvar.h> 79 80 /* 81 * Prototypes and count for the pci_device structure 82 */ 83 static int ubsec_probe(device_t, cfdata_t, void *); 84 static void ubsec_attach(device_t, device_t, void *); 85 static void ubsec_reset_board(struct ubsec_softc *); 86 static void ubsec_init_board(struct ubsec_softc *); 87 static void ubsec_init_pciregs(struct pci_attach_args *pa); 88 static void ubsec_cleanchip(struct ubsec_softc *); 89 static void ubsec_totalreset(struct ubsec_softc *); 90 static int ubsec_free_q(struct ubsec_softc*, struct ubsec_q *); 91 92 #ifdef __OpenBSD__ 93 struct cfattach ubsec_ca = { 94 sizeof(struct ubsec_softc), ubsec_probe, ubsec_attach, 95 }; 96 97 struct cfdriver ubsec_cd = { 98 0, "ubsec", DV_DULL 99 }; 100 #else 101 CFATTACH_DECL(ubsec, sizeof(struct ubsec_softc), ubsec_probe, ubsec_attach, 102 NULL, NULL); 103 extern struct cfdriver ubsec_cd; 104 #endif 105 106 /* patchable */ 107 #ifdef UBSEC_DEBUG 108 extern int ubsec_debug; 109 int ubsec_debug=1; 110 #endif 111 112 static int ubsec_intr(void *); 113 static int ubsec_newsession(void*, u_int32_t *, struct cryptoini *); 114 static int ubsec_freesession(void*, u_int64_t); 115 static int ubsec_process(void*, struct cryptop *, int hint); 116 static void ubsec_callback(struct ubsec_softc *, struct ubsec_q *); 117 static void ubsec_feed(struct ubsec_softc *); 118 static void ubsec_mcopy(struct mbuf *, struct mbuf *, int, int); 119 static void ubsec_callback2(struct ubsec_softc *, struct ubsec_q2 *); 120 static void ubsec_feed2(struct ubsec_softc *); 121 #ifndef UBSEC_NO_RNG 122 static void ubsec_rng(void *); 123 #endif /* UBSEC_NO_RNG */ 124 static int ubsec_dma_malloc(struct ubsec_softc *, bus_size_t, 125 struct ubsec_dma_alloc *, int); 126 static void ubsec_dma_free(struct ubsec_softc *, struct ubsec_dma_alloc *); 127 static int ubsec_dmamap_aligned(bus_dmamap_t); 128 129 static int ubsec_kprocess(void*, struct cryptkop *, int); 130 static int ubsec_kprocess_modexp_sw(struct ubsec_softc *, 131 struct cryptkop *, int); 132 static int ubsec_kprocess_modexp_hw(struct ubsec_softc *, 133 struct cryptkop *, int); 134 static int ubsec_kprocess_rsapriv(struct ubsec_softc *, 135 struct cryptkop *, int); 136 static void ubsec_kfree(struct ubsec_softc *, struct ubsec_q2 *); 137 static int ubsec_ksigbits(struct crparam *); 138 static void ubsec_kshift_r(u_int, u_int8_t *, u_int, u_int8_t *, u_int); 139 static void ubsec_kshift_l(u_int, u_int8_t *, u_int, u_int8_t *, u_int); 140 141 #ifdef UBSEC_DEBUG 142 static void ubsec_dump_pb(volatile struct ubsec_pktbuf *); 143 static void ubsec_dump_mcr(struct ubsec_mcr *); 144 static void ubsec_dump_ctx2(volatile struct ubsec_ctx_keyop *); 145 #endif 146 147 #define READ_REG(sc,r) \ 148 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) 149 150 #define WRITE_REG(sc,reg,val) \ 151 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) 152 153 #define SWAP32(x) (x) = htole32(ntohl((x))) 154 #ifndef HTOLE32 155 #define HTOLE32(x) (x) = htole32(x) 156 #endif 157 158 struct ubsec_stats ubsecstats; 159 160 /* 161 * ubsec_maxbatch controls the number of crypto ops to voluntarily 162 * collect into one submission to the hardware. This batching happens 163 * when ops are dispatched from the crypto subsystem with a hint that 164 * more are to follow immediately. These ops must also not be marked 165 * with a ``no delay'' flag. 166 */ 167 static int ubsec_maxbatch = 1; 168 #ifdef SYSCTL_INT 169 SYSCTL_INT(_kern, OID_AUTO, ubsec_maxbatch, CTLFLAG_RW, &ubsec_maxbatch, 170 0, "Broadcom driver: max ops to batch w/o interrupt"); 171 #endif 172 173 /* 174 * ubsec_maxaggr controls the number of crypto ops to submit to the 175 * hardware as a unit. This aggregation reduces the number of interrupts 176 * to the host at the expense of increased latency (for all but the last 177 * operation). For network traffic setting this to one yields the highest 178 * performance but at the expense of more interrupt processing. 179 */ 180 static int ubsec_maxaggr = 1; 181 #ifdef SYSCTL_INT 182 SYSCTL_INT(_kern, OID_AUTO, ubsec_maxaggr, CTLFLAG_RW, &ubsec_maxaggr, 183 0, "Broadcom driver: max ops to aggregate under one interrupt"); 184 #endif 185 186 static const struct ubsec_product { 187 pci_vendor_id_t ubsec_vendor; 188 pci_product_id_t ubsec_product; 189 int ubsec_flags; 190 int ubsec_statmask; 191 const char *ubsec_name; 192 } ubsec_products[] = { 193 { PCI_VENDOR_BLUESTEEL, PCI_PRODUCT_BLUESTEEL_5501, 194 0, 195 BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 196 "Bluesteel 5501" 197 }, 198 { PCI_VENDOR_BLUESTEEL, PCI_PRODUCT_BLUESTEEL_5601, 199 UBS_FLAGS_KEY | UBS_FLAGS_RNG, 200 BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 201 "Bluesteel 5601" 202 }, 203 204 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5801, 205 0, 206 BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 207 "Broadcom BCM5801" 208 }, 209 210 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5802, 211 UBS_FLAGS_KEY | UBS_FLAGS_RNG, 212 BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 213 "Broadcom BCM5802" 214 }, 215 216 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5805, 217 UBS_FLAGS_KEY | UBS_FLAGS_RNG, 218 BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 219 "Broadcom BCM5805" 220 }, 221 222 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5820, 223 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 224 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 225 BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 226 "Broadcom BCM5820" 227 }, 228 229 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5821, 230 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 231 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 232 BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 233 BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 234 "Broadcom BCM5821" 235 }, 236 { PCI_VENDOR_SUN, PCI_PRODUCT_SUN_SCA1K, 237 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 238 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 239 BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 240 BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 241 "Sun Crypto Accelerator 1000" 242 }, 243 { PCI_VENDOR_SUN, PCI_PRODUCT_SUN_5821, 244 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 245 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 246 BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 247 BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 248 "Broadcom BCM5821 (Sun)" 249 }, 250 251 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5822, 252 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 253 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 254 BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 255 BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 256 "Broadcom BCM5822" 257 }, 258 259 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5823, 260 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 261 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 262 BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 263 BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 264 "Broadcom BCM5823" 265 }, 266 267 { 0, 0, 268 0, 269 0, 270 NULL 271 } 272 }; 273 274 static const struct ubsec_product * 275 ubsec_lookup(const struct pci_attach_args *pa) 276 { 277 const struct ubsec_product *up; 278 279 for (up = ubsec_products; up->ubsec_name != NULL; up++) { 280 if (PCI_VENDOR(pa->pa_id) == up->ubsec_vendor && 281 PCI_PRODUCT(pa->pa_id) == up->ubsec_product) 282 return (up); 283 } 284 return (NULL); 285 } 286 287 static int 288 ubsec_probe(device_t parent, cfdata_t match, void *aux) 289 { 290 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 291 292 if (ubsec_lookup(pa) != NULL) 293 return (1); 294 295 return (0); 296 } 297 298 static void 299 ubsec_attach(device_t parent, device_t self, void *aux) 300 { 301 struct ubsec_softc *sc = device_private(self); 302 struct pci_attach_args *pa = aux; 303 const struct ubsec_product *up; 304 pci_chipset_tag_t pc = pa->pa_pc; 305 pci_intr_handle_t ih; 306 const char *intrstr = NULL; 307 struct ubsec_dma *dmap; 308 u_int32_t cmd, i; 309 310 up = ubsec_lookup(pa); 311 if (up == NULL) { 312 printf("\n"); 313 panic("ubsec_attach: impossible"); 314 } 315 316 pci_aprint_devinfo_fancy(pa, "Crypto processor", up->ubsec_name, 1); 317 318 SIMPLEQ_INIT(&sc->sc_queue); 319 SIMPLEQ_INIT(&sc->sc_qchip); 320 SIMPLEQ_INIT(&sc->sc_queue2); 321 SIMPLEQ_INIT(&sc->sc_qchip2); 322 SIMPLEQ_INIT(&sc->sc_q2free); 323 324 sc->sc_flags = up->ubsec_flags; 325 sc->sc_statmask = up->ubsec_statmask; 326 327 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 328 cmd |= PCI_COMMAND_MASTER_ENABLE; 329 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd); 330 331 if (pci_mapreg_map(pa, BS_BAR, PCI_MAPREG_TYPE_MEM, 0, 332 &sc->sc_st, &sc->sc_sh, NULL, NULL)) { 333 aprint_error_dev(&sc->sc_dv, "can't find mem space"); 334 return; 335 } 336 337 sc->sc_dmat = pa->pa_dmat; 338 339 if (pci_intr_map(pa, &ih)) { 340 aprint_error_dev(&sc->sc_dv, "couldn't map interrupt\n"); 341 return; 342 } 343 intrstr = pci_intr_string(pc, ih); 344 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, ubsec_intr, sc); 345 if (sc->sc_ih == NULL) { 346 aprint_error_dev(&sc->sc_dv, "couldn't establish interrupt"); 347 if (intrstr != NULL) 348 aprint_error(" at %s", intrstr); 349 aprint_error("\n"); 350 return; 351 } 352 aprint_normal_dev(&sc->sc_dv, "interrupting at %s\n", intrstr); 353 354 sc->sc_cid = crypto_get_driverid(0); 355 if (sc->sc_cid < 0) { 356 aprint_error_dev(&sc->sc_dv, "couldn't get crypto driver id\n"); 357 pci_intr_disestablish(pc, sc->sc_ih); 358 return; 359 } 360 361 SIMPLEQ_INIT(&sc->sc_freequeue); 362 dmap = sc->sc_dmaa; 363 for (i = 0; i < UBS_MAX_NQUEUE; i++, dmap++) { 364 struct ubsec_q *q; 365 366 q = (struct ubsec_q *)malloc(sizeof(struct ubsec_q), 367 M_DEVBUF, M_NOWAIT); 368 if (q == NULL) { 369 aprint_error_dev(&sc->sc_dv, "can't allocate queue buffers\n"); 370 break; 371 } 372 373 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_dmachunk), 374 &dmap->d_alloc, 0)) { 375 aprint_error_dev(&sc->sc_dv, "can't allocate dma buffers\n"); 376 free(q, M_DEVBUF); 377 break; 378 } 379 dmap->d_dma = (struct ubsec_dmachunk *)dmap->d_alloc.dma_vaddr; 380 381 q->q_dma = dmap; 382 sc->sc_queuea[i] = q; 383 384 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 385 } 386 387 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, 388 ubsec_newsession, ubsec_freesession, ubsec_process, sc); 389 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, 390 ubsec_newsession, ubsec_freesession, ubsec_process, sc); 391 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96, 0, 0, 392 ubsec_newsession, ubsec_freesession, ubsec_process, sc); 393 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96, 0, 0, 394 ubsec_newsession, ubsec_freesession, ubsec_process, sc); 395 396 /* 397 * Reset Broadcom chip 398 */ 399 ubsec_reset_board(sc); 400 401 /* 402 * Init Broadcom specific PCI settings 403 */ 404 ubsec_init_pciregs(pa); 405 406 /* 407 * Init Broadcom chip 408 */ 409 ubsec_init_board(sc); 410 411 #ifndef UBSEC_NO_RNG 412 if (sc->sc_flags & UBS_FLAGS_RNG) { 413 sc->sc_statmask |= BS_STAT_MCR2_DONE; 414 415 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 416 &sc->sc_rng.rng_q.q_mcr, 0)) 417 goto skip_rng; 418 419 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rngbypass), 420 &sc->sc_rng.rng_q.q_ctx, 0)) { 421 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); 422 goto skip_rng; 423 } 424 425 if (ubsec_dma_malloc(sc, sizeof(u_int32_t) * 426 UBSEC_RNG_BUFSIZ, &sc->sc_rng.rng_buf, 0)) { 427 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); 428 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); 429 goto skip_rng; 430 } 431 432 if (hz >= 100) 433 sc->sc_rnghz = hz / 100; 434 else 435 sc->sc_rnghz = 1; 436 #ifdef __OpenBSD__ 437 timeout_set(&sc->sc_rngto, ubsec_rng, sc); 438 timeout_add(&sc->sc_rngto, sc->sc_rnghz); 439 #else 440 callout_init(&sc->sc_rngto, 0); 441 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); 442 #endif 443 skip_rng: 444 if (sc->sc_rnghz) 445 aprint_normal_dev(&sc->sc_dv, "random number generator enabled\n"); 446 else 447 aprint_error_dev(&sc->sc_dv, "WARNING: random number generator " 448 "disabled\n"); 449 } 450 #endif /* UBSEC_NO_RNG */ 451 452 if (sc->sc_flags & UBS_FLAGS_KEY) { 453 sc->sc_statmask |= BS_STAT_MCR2_DONE; 454 455 crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0, 456 ubsec_kprocess, sc); 457 #if 0 458 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0, 459 ubsec_kprocess, sc); 460 #endif 461 } 462 } 463 464 /* 465 * UBSEC Interrupt routine 466 */ 467 static int 468 ubsec_intr(void *arg) 469 { 470 struct ubsec_softc *sc = arg; 471 volatile u_int32_t stat; 472 struct ubsec_q *q; 473 struct ubsec_dma *dmap; 474 int npkts = 0, i; 475 476 stat = READ_REG(sc, BS_STAT); 477 stat &= sc->sc_statmask; 478 if (stat == 0) { 479 return (0); 480 } 481 482 WRITE_REG(sc, BS_STAT, stat); /* IACK */ 483 484 /* 485 * Check to see if we have any packets waiting for us 486 */ 487 if ((stat & BS_STAT_MCR1_DONE)) { 488 while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { 489 q = SIMPLEQ_FIRST(&sc->sc_qchip); 490 dmap = q->q_dma; 491 492 if ((dmap->d_dma->d_mcr.mcr_flags & htole16(UBS_MCR_DONE)) == 0) 493 break; 494 495 q = SIMPLEQ_FIRST(&sc->sc_qchip); 496 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, /*q,*/ q_next); 497 498 npkts = q->q_nstacked_mcrs; 499 sc->sc_nqchip -= 1+npkts; 500 /* 501 * search for further sc_qchip ubsec_q's that share 502 * the same MCR, and complete them too, they must be 503 * at the top. 504 */ 505 for (i = 0; i < npkts; i++) { 506 if(q->q_stacked_mcr[i]) 507 ubsec_callback(sc, q->q_stacked_mcr[i]); 508 else 509 break; 510 } 511 ubsec_callback(sc, q); 512 } 513 514 /* 515 * Don't send any more packet to chip if there has been 516 * a DMAERR. 517 */ 518 if (!(stat & BS_STAT_DMAERR)) 519 ubsec_feed(sc); 520 } 521 522 /* 523 * Check to see if we have any key setups/rng's waiting for us 524 */ 525 if ((sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) && 526 (stat & BS_STAT_MCR2_DONE)) { 527 struct ubsec_q2 *q2; 528 struct ubsec_mcr *mcr; 529 530 while (!SIMPLEQ_EMPTY(&sc->sc_qchip2)) { 531 q2 = SIMPLEQ_FIRST(&sc->sc_qchip2); 532 533 bus_dmamap_sync(sc->sc_dmat, q2->q_mcr.dma_map, 534 0, q2->q_mcr.dma_map->dm_mapsize, 535 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 536 537 mcr = (struct ubsec_mcr *)q2->q_mcr.dma_vaddr; 538 if ((mcr->mcr_flags & htole16(UBS_MCR_DONE)) == 0) { 539 bus_dmamap_sync(sc->sc_dmat, 540 q2->q_mcr.dma_map, 0, 541 q2->q_mcr.dma_map->dm_mapsize, 542 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 543 break; 544 } 545 q2 = SIMPLEQ_FIRST(&sc->sc_qchip2); 546 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip2, /*q2,*/ q_next); 547 ubsec_callback2(sc, q2); 548 /* 549 * Don't send any more packet to chip if there has been 550 * a DMAERR. 551 */ 552 if (!(stat & BS_STAT_DMAERR)) 553 ubsec_feed2(sc); 554 } 555 } 556 557 /* 558 * Check to see if we got any DMA Error 559 */ 560 if (stat & BS_STAT_DMAERR) { 561 #ifdef UBSEC_DEBUG 562 if (ubsec_debug) { 563 volatile u_int32_t a = READ_REG(sc, BS_ERR); 564 565 printf("%s: dmaerr %s@%08x\n", device_xname(&sc->sc_dv), 566 (a & BS_ERR_READ) ? "read" : "write", 567 a & BS_ERR_ADDR); 568 } 569 #endif /* UBSEC_DEBUG */ 570 ubsecstats.hst_dmaerr++; 571 ubsec_totalreset(sc); 572 ubsec_feed(sc); 573 } 574 575 if (sc->sc_needwakeup) { /* XXX check high watermark */ 576 int wkeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); 577 #ifdef UBSEC_DEBUG 578 if (ubsec_debug) 579 printf("%s: wakeup crypto (%x)\n", device_xname(&sc->sc_dv), 580 sc->sc_needwakeup); 581 #endif /* UBSEC_DEBUG */ 582 sc->sc_needwakeup &= ~wkeup; 583 crypto_unblock(sc->sc_cid, wkeup); 584 } 585 return (1); 586 } 587 588 /* 589 * ubsec_feed() - aggregate and post requests to chip 590 * OpenBSD comments: 591 * It is assumed that the caller set splnet() 592 */ 593 static void 594 ubsec_feed(struct ubsec_softc *sc) 595 { 596 struct ubsec_q *q, *q2; 597 int npkts, i; 598 void *v; 599 u_int32_t stat; 600 #ifdef UBSEC_DEBUG 601 static int max; 602 #endif /* UBSEC_DEBUG */ 603 604 npkts = sc->sc_nqueue; 605 if (npkts > ubsecstats.hst_maxqueue) 606 ubsecstats.hst_maxqueue = npkts; 607 if (npkts < 2) 608 goto feed1; 609 610 /* 611 * Decide how many ops to combine in a single MCR. We cannot 612 * aggregate more than UBS_MAX_AGGR because this is the number 613 * of slots defined in the data structure. Otherwise we clamp 614 * based on the tunable parameter ubsec_maxaggr. Note that 615 * aggregation can happen in two ways: either by batching ops 616 * from above or because the h/w backs up and throttles us. 617 * Aggregating ops reduces the number of interrupts to the host 618 * but also (potentially) increases the latency for processing 619 * completed ops as we only get an interrupt when all aggregated 620 * ops have completed. 621 */ 622 if (npkts > UBS_MAX_AGGR) 623 npkts = UBS_MAX_AGGR; 624 if (npkts > ubsec_maxaggr) 625 npkts = ubsec_maxaggr; 626 if (npkts > ubsecstats.hst_maxbatch) 627 ubsecstats.hst_maxbatch = npkts; 628 if (npkts < 2) 629 goto feed1; 630 ubsecstats.hst_totbatch += npkts-1; 631 632 if ((stat = READ_REG(sc, BS_STAT)) & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) { 633 if (stat & BS_STAT_DMAERR) { 634 ubsec_totalreset(sc); 635 ubsecstats.hst_dmaerr++; 636 } else { 637 ubsecstats.hst_mcr1full++; 638 } 639 return; 640 } 641 642 #ifdef UBSEC_DEBUG 643 if (ubsec_debug) 644 printf("merging %d records\n", npkts); 645 /* XXX temporary aggregation statistics reporting code */ 646 if (max < npkts) { 647 max = npkts; 648 printf("%s: new max aggregate %d\n", device_xname(&sc->sc_dv), max); 649 } 650 #endif /* UBSEC_DEBUG */ 651 652 q = SIMPLEQ_FIRST(&sc->sc_queue); 653 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, /*q,*/ q_next); 654 --sc->sc_nqueue; 655 656 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, 657 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 658 if (q->q_dst_map != NULL) 659 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, 660 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 661 662 q->q_nstacked_mcrs = npkts - 1; /* Number of packets stacked */ 663 664 for (i = 0; i < q->q_nstacked_mcrs; i++) { 665 q2 = SIMPLEQ_FIRST(&sc->sc_queue); 666 bus_dmamap_sync(sc->sc_dmat, q2->q_src_map, 667 0, q2->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 668 if (q2->q_dst_map != NULL) 669 bus_dmamap_sync(sc->sc_dmat, q2->q_dst_map, 670 0, q2->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 671 q2= SIMPLEQ_FIRST(&sc->sc_queue); 672 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, /*q2,*/ q_next); 673 --sc->sc_nqueue; 674 675 v = ((void *)&q2->q_dma->d_dma->d_mcr); 676 v = (char*)v + (sizeof(struct ubsec_mcr) - 677 sizeof(struct ubsec_mcr_add)); 678 memcpy(&q->q_dma->d_dma->d_mcradd[i], v, sizeof(struct ubsec_mcr_add)); 679 q->q_stacked_mcr[i] = q2; 680 } 681 q->q_dma->d_dma->d_mcr.mcr_pkts = htole16(npkts); 682 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); 683 sc->sc_nqchip += npkts; 684 if (sc->sc_nqchip > ubsecstats.hst_maxqchip) 685 ubsecstats.hst_maxqchip = sc->sc_nqchip; 686 bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map, 687 0, q->q_dma->d_alloc.dma_map->dm_mapsize, 688 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 689 WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + 690 offsetof(struct ubsec_dmachunk, d_mcr)); 691 return; 692 693 feed1: 694 while (!SIMPLEQ_EMPTY(&sc->sc_queue)) { 695 if ((stat = READ_REG(sc, BS_STAT)) & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) { 696 if (stat & BS_STAT_DMAERR) { 697 ubsec_totalreset(sc); 698 ubsecstats.hst_dmaerr++; 699 } else { 700 ubsecstats.hst_mcr1full++; 701 } 702 break; 703 } 704 705 q = SIMPLEQ_FIRST(&sc->sc_queue); 706 707 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, 708 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 709 if (q->q_dst_map != NULL) 710 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, 711 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 712 bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map, 713 0, q->q_dma->d_alloc.dma_map->dm_mapsize, 714 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 715 716 WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + 717 offsetof(struct ubsec_dmachunk, d_mcr)); 718 #ifdef UBSEC_DEBUG 719 if (ubsec_debug) 720 printf("feed: q->chip %p %08x stat %08x\n", 721 q, (u_int32_t)q->q_dma->d_alloc.dma_paddr, 722 stat); 723 #endif /* UBSEC_DEBUG */ 724 q = SIMPLEQ_FIRST(&sc->sc_queue); 725 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, /*q,*/ q_next); 726 --sc->sc_nqueue; 727 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); 728 sc->sc_nqchip++; 729 } 730 if (sc->sc_nqchip > ubsecstats.hst_maxqchip) 731 ubsecstats.hst_maxqchip = sc->sc_nqchip; 732 } 733 734 /* 735 * Allocate a new 'session' and return an encoded session id. 'sidp' 736 * contains our registration id, and should contain an encoded session 737 * id on successful allocation. 738 */ 739 static int 740 ubsec_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri) 741 { 742 struct cryptoini *c, *encini = NULL, *macini = NULL; 743 struct ubsec_softc *sc; 744 struct ubsec_session *ses = NULL; 745 MD5_CTX md5ctx; 746 SHA1_CTX sha1ctx; 747 int i, sesn; 748 749 sc = arg; 750 KASSERT(sc != NULL /*, ("ubsec_newsession: null softc")*/); 751 752 if (sidp == NULL || cri == NULL || sc == NULL) 753 return (EINVAL); 754 755 for (c = cri; c != NULL; c = c->cri_next) { 756 if (c->cri_alg == CRYPTO_MD5_HMAC_96 || 757 c->cri_alg == CRYPTO_SHA1_HMAC_96) { 758 if (macini) 759 return (EINVAL); 760 macini = c; 761 } else if (c->cri_alg == CRYPTO_DES_CBC || 762 c->cri_alg == CRYPTO_3DES_CBC) { 763 if (encini) 764 return (EINVAL); 765 encini = c; 766 } else 767 return (EINVAL); 768 } 769 if (encini == NULL && macini == NULL) 770 return (EINVAL); 771 772 if (sc->sc_sessions == NULL) { 773 ses = sc->sc_sessions = (struct ubsec_session *)malloc( 774 sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT); 775 if (ses == NULL) 776 return (ENOMEM); 777 sesn = 0; 778 sc->sc_nsessions = 1; 779 } else { 780 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { 781 if (sc->sc_sessions[sesn].ses_used == 0) { 782 ses = &sc->sc_sessions[sesn]; 783 break; 784 } 785 } 786 787 if (ses == NULL) { 788 sesn = sc->sc_nsessions; 789 ses = (struct ubsec_session *)malloc((sesn + 1) * 790 sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT); 791 if (ses == NULL) 792 return (ENOMEM); 793 memcpy(ses, sc->sc_sessions, sesn * 794 sizeof(struct ubsec_session)); 795 memset(sc->sc_sessions, 0, sesn * 796 sizeof(struct ubsec_session)); 797 free(sc->sc_sessions, M_DEVBUF); 798 sc->sc_sessions = ses; 799 ses = &sc->sc_sessions[sesn]; 800 sc->sc_nsessions++; 801 } 802 } 803 804 memset(ses, 0, sizeof(struct ubsec_session)); 805 ses->ses_used = 1; 806 if (encini) { 807 /* get an IV, network byte order */ 808 #ifdef __NetBSD__ 809 cprng_fast(ses->ses_iv, sizeof(ses->ses_iv)); 810 #else 811 get_random_bytes(ses->ses_iv, sizeof(ses->ses_iv)); 812 #endif 813 814 /* Go ahead and compute key in ubsec's byte order */ 815 if (encini->cri_alg == CRYPTO_DES_CBC) { 816 memcpy(&ses->ses_deskey[0], encini->cri_key, 8); 817 memcpy(&ses->ses_deskey[2], encini->cri_key, 8); 818 memcpy(&ses->ses_deskey[4], encini->cri_key, 8); 819 } else 820 memcpy(ses->ses_deskey, encini->cri_key, 24); 821 822 SWAP32(ses->ses_deskey[0]); 823 SWAP32(ses->ses_deskey[1]); 824 SWAP32(ses->ses_deskey[2]); 825 SWAP32(ses->ses_deskey[3]); 826 SWAP32(ses->ses_deskey[4]); 827 SWAP32(ses->ses_deskey[5]); 828 } 829 830 if (macini) { 831 for (i = 0; i < macini->cri_klen / 8; i++) 832 macini->cri_key[i] ^= HMAC_IPAD_VAL; 833 834 if (macini->cri_alg == CRYPTO_MD5_HMAC_96) { 835 MD5Init(&md5ctx); 836 MD5Update(&md5ctx, macini->cri_key, 837 macini->cri_klen / 8); 838 MD5Update(&md5ctx, hmac_ipad_buffer, 839 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 840 memcpy(ses->ses_hminner, md5ctx.state, 841 sizeof(md5ctx.state)); 842 } else { 843 SHA1Init(&sha1ctx); 844 SHA1Update(&sha1ctx, macini->cri_key, 845 macini->cri_klen / 8); 846 SHA1Update(&sha1ctx, hmac_ipad_buffer, 847 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 848 memcpy(ses->ses_hminner, sha1ctx.state, 849 sizeof(sha1ctx.state)); 850 } 851 852 for (i = 0; i < macini->cri_klen / 8; i++) 853 macini->cri_key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 854 855 if (macini->cri_alg == CRYPTO_MD5_HMAC_96) { 856 MD5Init(&md5ctx); 857 MD5Update(&md5ctx, macini->cri_key, 858 macini->cri_klen / 8); 859 MD5Update(&md5ctx, hmac_opad_buffer, 860 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 861 memcpy(ses->ses_hmouter, md5ctx.state, 862 sizeof(md5ctx.state)); 863 } else { 864 SHA1Init(&sha1ctx); 865 SHA1Update(&sha1ctx, macini->cri_key, 866 macini->cri_klen / 8); 867 SHA1Update(&sha1ctx, hmac_opad_buffer, 868 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 869 memcpy(ses->ses_hmouter, sha1ctx.state, 870 sizeof(sha1ctx.state)); 871 } 872 873 for (i = 0; i < macini->cri_klen / 8; i++) 874 macini->cri_key[i] ^= HMAC_OPAD_VAL; 875 } 876 877 *sidp = UBSEC_SID(device_unit(&sc->sc_dv), sesn); 878 return (0); 879 } 880 881 /* 882 * Deallocate a session. 883 */ 884 static int 885 ubsec_freesession(void *arg, u_int64_t tid) 886 { 887 struct ubsec_softc *sc; 888 int session; 889 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; 890 891 sc = arg; 892 KASSERT(sc != NULL /*, ("ubsec_freesession: null softc")*/); 893 894 session = UBSEC_SESSION(sid); 895 if (session >= sc->sc_nsessions) 896 return (EINVAL); 897 898 memset(&sc->sc_sessions[session], 0, sizeof(sc->sc_sessions[session])); 899 return (0); 900 } 901 902 #ifdef __FreeBSD__ /* Ugly gratuitous changes to bus_dma */ 903 static void 904 ubsec_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) 905 { 906 struct ubsec_operand *op = arg; 907 908 KASSERT(nsegs <= UBS_MAX_SCATTER 909 /*, ("Too many DMA segments returned when mapping operand")*/); 910 #ifdef UBSEC_DEBUG 911 if (ubsec_debug) 912 printf("ubsec_op_cb: mapsize %u nsegs %d\n", 913 (u_int) mapsize, nsegs); 914 #endif 915 op->mapsize = mapsize; 916 op->nsegs = nsegs; 917 memcpy(op->segs, seg, nsegs * sizeof (seg[0])); 918 } 919 #endif 920 921 static int 922 ubsec_process(void *arg, struct cryptop *crp, int hint) 923 { 924 struct ubsec_q *q = NULL; 925 #ifdef __OpenBSD__ 926 int card; 927 #endif 928 int err = 0, i, j, s, nicealign; 929 struct ubsec_softc *sc; 930 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; 931 int encoffset = 0, macoffset = 0, cpskip, cpoffset; 932 int sskip, dskip, stheend, dtheend; 933 int16_t coffset; 934 struct ubsec_session *ses; 935 struct ubsec_pktctx ctx; 936 struct ubsec_dma *dmap = NULL; 937 938 sc = arg; 939 KASSERT(sc != NULL /*, ("ubsec_process: null softc")*/); 940 941 if (crp == NULL || crp->crp_callback == NULL || sc == NULL) { 942 ubsecstats.hst_invalid++; 943 return (EINVAL); 944 } 945 if (UBSEC_SESSION(crp->crp_sid) >= sc->sc_nsessions) { 946 ubsecstats.hst_badsession++; 947 return (EINVAL); 948 } 949 950 s = splnet(); 951 952 if (SIMPLEQ_EMPTY(&sc->sc_freequeue)) { 953 ubsecstats.hst_queuefull++; 954 sc->sc_needwakeup |= CRYPTO_SYMQ; 955 splx(s); 956 return(ERESTART); 957 } 958 959 q = SIMPLEQ_FIRST(&sc->sc_freequeue); 960 SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, /*q,*/ q_next); 961 splx(s); 962 963 dmap = q->q_dma; /* Save dma pointer */ 964 memset(q, 0, sizeof(struct ubsec_q)); 965 memset(&ctx, 0, sizeof(ctx)); 966 967 q->q_sesn = UBSEC_SESSION(crp->crp_sid); 968 q->q_dma = dmap; 969 ses = &sc->sc_sessions[q->q_sesn]; 970 971 if (crp->crp_flags & CRYPTO_F_IMBUF) { 972 q->q_src_m = (struct mbuf *)crp->crp_buf; 973 q->q_dst_m = (struct mbuf *)crp->crp_buf; 974 } else if (crp->crp_flags & CRYPTO_F_IOV) { 975 q->q_src_io = (struct uio *)crp->crp_buf; 976 q->q_dst_io = (struct uio *)crp->crp_buf; 977 } else { 978 ubsecstats.hst_badflags++; 979 err = EINVAL; 980 goto errout; /* XXX we don't handle contiguous blocks! */ 981 } 982 983 memset(&dmap->d_dma->d_mcr, 0, sizeof(struct ubsec_mcr)); 984 985 dmap->d_dma->d_mcr.mcr_pkts = htole16(1); 986 dmap->d_dma->d_mcr.mcr_flags = 0; 987 q->q_crp = crp; 988 989 crd1 = crp->crp_desc; 990 if (crd1 == NULL) { 991 ubsecstats.hst_nodesc++; 992 err = EINVAL; 993 goto errout; 994 } 995 crd2 = crd1->crd_next; 996 997 if (crd2 == NULL) { 998 if (crd1->crd_alg == CRYPTO_MD5_HMAC_96 || 999 crd1->crd_alg == CRYPTO_SHA1_HMAC_96) { 1000 maccrd = crd1; 1001 enccrd = NULL; 1002 } else if (crd1->crd_alg == CRYPTO_DES_CBC || 1003 crd1->crd_alg == CRYPTO_3DES_CBC) { 1004 maccrd = NULL; 1005 enccrd = crd1; 1006 } else { 1007 ubsecstats.hst_badalg++; 1008 err = EINVAL; 1009 goto errout; 1010 } 1011 } else { 1012 if ((crd1->crd_alg == CRYPTO_MD5_HMAC_96 || 1013 crd1->crd_alg == CRYPTO_SHA1_HMAC_96) && 1014 (crd2->crd_alg == CRYPTO_DES_CBC || 1015 crd2->crd_alg == CRYPTO_3DES_CBC) && 1016 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { 1017 maccrd = crd1; 1018 enccrd = crd2; 1019 } else if ((crd1->crd_alg == CRYPTO_DES_CBC || 1020 crd1->crd_alg == CRYPTO_3DES_CBC) && 1021 (crd2->crd_alg == CRYPTO_MD5_HMAC_96 || 1022 crd2->crd_alg == CRYPTO_SHA1_HMAC_96) && 1023 (crd1->crd_flags & CRD_F_ENCRYPT)) { 1024 enccrd = crd1; 1025 maccrd = crd2; 1026 } else { 1027 /* 1028 * We cannot order the ubsec as requested 1029 */ 1030 ubsecstats.hst_badalg++; 1031 err = EINVAL; 1032 goto errout; 1033 } 1034 } 1035 1036 if (enccrd) { 1037 encoffset = enccrd->crd_skip; 1038 ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_3DES); 1039 1040 if (enccrd->crd_flags & CRD_F_ENCRYPT) { 1041 q->q_flags |= UBSEC_QFLAGS_COPYOUTIV; 1042 1043 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 1044 memcpy(ctx.pc_iv, enccrd->crd_iv, 8); 1045 else { 1046 ctx.pc_iv[0] = ses->ses_iv[0]; 1047 ctx.pc_iv[1] = ses->ses_iv[1]; 1048 } 1049 1050 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { 1051 if (crp->crp_flags & CRYPTO_F_IMBUF) 1052 m_copyback(q->q_src_m, 1053 enccrd->crd_inject, 1054 8, (void *)ctx.pc_iv); 1055 else if (crp->crp_flags & CRYPTO_F_IOV) 1056 cuio_copyback(q->q_src_io, 1057 enccrd->crd_inject, 1058 8, (void *)ctx.pc_iv); 1059 } 1060 } else { 1061 ctx.pc_flags |= htole16(UBS_PKTCTX_INBOUND); 1062 1063 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 1064 memcpy(ctx.pc_iv, enccrd->crd_iv, 8); 1065 else if (crp->crp_flags & CRYPTO_F_IMBUF) 1066 m_copydata(q->q_src_m, enccrd->crd_inject, 1067 8, (void *)ctx.pc_iv); 1068 else if (crp->crp_flags & CRYPTO_F_IOV) 1069 cuio_copydata(q->q_src_io, 1070 enccrd->crd_inject, 8, 1071 (void *)ctx.pc_iv); 1072 } 1073 1074 ctx.pc_deskey[0] = ses->ses_deskey[0]; 1075 ctx.pc_deskey[1] = ses->ses_deskey[1]; 1076 ctx.pc_deskey[2] = ses->ses_deskey[2]; 1077 ctx.pc_deskey[3] = ses->ses_deskey[3]; 1078 ctx.pc_deskey[4] = ses->ses_deskey[4]; 1079 ctx.pc_deskey[5] = ses->ses_deskey[5]; 1080 SWAP32(ctx.pc_iv[0]); 1081 SWAP32(ctx.pc_iv[1]); 1082 } 1083 1084 if (maccrd) { 1085 macoffset = maccrd->crd_skip; 1086 1087 if (maccrd->crd_alg == CRYPTO_MD5_HMAC_96) 1088 ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_MD5); 1089 else 1090 ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_SHA1); 1091 1092 for (i = 0; i < 5; i++) { 1093 ctx.pc_hminner[i] = ses->ses_hminner[i]; 1094 ctx.pc_hmouter[i] = ses->ses_hmouter[i]; 1095 1096 HTOLE32(ctx.pc_hminner[i]); 1097 HTOLE32(ctx.pc_hmouter[i]); 1098 } 1099 } 1100 1101 if (enccrd && maccrd) { 1102 /* 1103 * ubsec cannot handle packets where the end of encryption 1104 * and authentication are not the same, or where the 1105 * encrypted part begins before the authenticated part. 1106 */ 1107 if ((encoffset + enccrd->crd_len) != 1108 (macoffset + maccrd->crd_len)) { 1109 ubsecstats.hst_lenmismatch++; 1110 err = EINVAL; 1111 goto errout; 1112 } 1113 if (enccrd->crd_skip < maccrd->crd_skip) { 1114 ubsecstats.hst_skipmismatch++; 1115 err = EINVAL; 1116 goto errout; 1117 } 1118 sskip = maccrd->crd_skip; 1119 cpskip = dskip = enccrd->crd_skip; 1120 stheend = maccrd->crd_len; 1121 dtheend = enccrd->crd_len; 1122 coffset = enccrd->crd_skip - maccrd->crd_skip; 1123 cpoffset = cpskip + dtheend; 1124 #ifdef UBSEC_DEBUG 1125 if (ubsec_debug) { 1126 printf("mac: skip %d, len %d, inject %d\n", 1127 maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject); 1128 printf("enc: skip %d, len %d, inject %d\n", 1129 enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject); 1130 printf("src: skip %d, len %d\n", sskip, stheend); 1131 printf("dst: skip %d, len %d\n", dskip, dtheend); 1132 printf("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n", 1133 coffset, stheend, cpskip, cpoffset); 1134 } 1135 #endif 1136 } else { 1137 cpskip = dskip = sskip = macoffset + encoffset; 1138 dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len; 1139 cpoffset = cpskip + dtheend; 1140 coffset = 0; 1141 } 1142 ctx.pc_offset = htole16(coffset >> 2); 1143 1144 /* XXX FIXME: jonathan asks, what the heck's that 0xfff0? */ 1145 if (bus_dmamap_create(sc->sc_dmat, 0xfff0, UBS_MAX_SCATTER, 1146 0xfff0, 0, BUS_DMA_NOWAIT, &q->q_src_map) != 0) { 1147 err = ENOMEM; 1148 goto errout; 1149 } 1150 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1151 if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map, 1152 q->q_src_m, BUS_DMA_NOWAIT) != 0) { 1153 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1154 q->q_src_map = NULL; 1155 ubsecstats.hst_noload++; 1156 err = ENOMEM; 1157 goto errout; 1158 } 1159 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1160 if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map, 1161 q->q_src_io, BUS_DMA_NOWAIT) != 0) { 1162 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1163 q->q_src_map = NULL; 1164 ubsecstats.hst_noload++; 1165 err = ENOMEM; 1166 goto errout; 1167 } 1168 } 1169 nicealign = ubsec_dmamap_aligned(q->q_src_map); 1170 1171 dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend); 1172 1173 #ifdef UBSEC_DEBUG 1174 if (ubsec_debug) 1175 printf("src skip: %d nicealign: %u\n", sskip, nicealign); 1176 #endif 1177 for (i = j = 0; i < q->q_src_map->dm_nsegs; i++) { 1178 struct ubsec_pktbuf *pb; 1179 bus_size_t packl = q->q_src_map->dm_segs[i].ds_len; 1180 bus_addr_t packp = q->q_src_map->dm_segs[i].ds_addr; 1181 1182 if (sskip >= packl) { 1183 sskip -= packl; 1184 continue; 1185 } 1186 1187 packl -= sskip; 1188 packp += sskip; 1189 sskip = 0; 1190 1191 if (packl > 0xfffc) { 1192 err = EIO; 1193 goto errout; 1194 } 1195 1196 if (j == 0) 1197 pb = &dmap->d_dma->d_mcr.mcr_ipktbuf; 1198 else 1199 pb = &dmap->d_dma->d_sbuf[j - 1]; 1200 1201 pb->pb_addr = htole32(packp); 1202 1203 if (stheend) { 1204 if (packl > stheend) { 1205 pb->pb_len = htole32(stheend); 1206 stheend = 0; 1207 } else { 1208 pb->pb_len = htole32(packl); 1209 stheend -= packl; 1210 } 1211 } else 1212 pb->pb_len = htole32(packl); 1213 1214 if ((i + 1) == q->q_src_map->dm_nsegs) 1215 pb->pb_next = 0; 1216 else 1217 pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1218 offsetof(struct ubsec_dmachunk, d_sbuf[j])); 1219 j++; 1220 } 1221 1222 if (enccrd == NULL && maccrd != NULL) { 1223 dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0; 1224 dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0; 1225 dmap->d_dma->d_mcr.mcr_opktbuf.pb_next = htole32(dmap->d_alloc.dma_paddr + 1226 offsetof(struct ubsec_dmachunk, d_macbuf[0])); 1227 #ifdef UBSEC_DEBUG 1228 if (ubsec_debug) 1229 printf("opkt: %x %x %x\n", 1230 dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr, 1231 dmap->d_dma->d_mcr.mcr_opktbuf.pb_len, 1232 dmap->d_dma->d_mcr.mcr_opktbuf.pb_next); 1233 1234 #endif 1235 } else { 1236 if (crp->crp_flags & CRYPTO_F_IOV) { 1237 if (!nicealign) { 1238 ubsecstats.hst_iovmisaligned++; 1239 err = EINVAL; 1240 goto errout; 1241 } 1242 /* XXX: ``what the heck's that'' 0xfff0? */ 1243 if (bus_dmamap_create(sc->sc_dmat, 0xfff0, 1244 UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT, 1245 &q->q_dst_map) != 0) { 1246 ubsecstats.hst_nomap++; 1247 err = ENOMEM; 1248 goto errout; 1249 } 1250 if (bus_dmamap_load_uio(sc->sc_dmat, q->q_dst_map, 1251 q->q_dst_io, BUS_DMA_NOWAIT) != 0) { 1252 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); 1253 q->q_dst_map = NULL; 1254 ubsecstats.hst_noload++; 1255 err = ENOMEM; 1256 goto errout; 1257 } 1258 } else if (crp->crp_flags & CRYPTO_F_IMBUF) { 1259 if (nicealign) { 1260 q->q_dst_m = q->q_src_m; 1261 q->q_dst_map = q->q_src_map; 1262 } else { 1263 int totlen, len; 1264 struct mbuf *m, *top, **mp; 1265 1266 ubsecstats.hst_unaligned++; 1267 totlen = q->q_src_map->dm_mapsize; 1268 if (q->q_src_m->m_flags & M_PKTHDR) { 1269 len = MHLEN; 1270 MGETHDR(m, M_DONTWAIT, MT_DATA); 1271 /*XXX FIXME: m_dup_pkthdr */ 1272 if (m && 1 /*!m_dup_pkthdr(m, q->q_src_m, M_DONTWAIT)*/) { 1273 m_free(m); 1274 m = NULL; 1275 } 1276 } else { 1277 len = MLEN; 1278 MGET(m, M_DONTWAIT, MT_DATA); 1279 } 1280 if (m == NULL) { 1281 ubsecstats.hst_nombuf++; 1282 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1283 goto errout; 1284 } 1285 if (len == MHLEN) 1286 /*XXX was M_DUP_PKTHDR*/ 1287 M_COPY_PKTHDR(m, q->q_src_m); 1288 if (totlen >= MINCLSIZE) { 1289 MCLGET(m, M_DONTWAIT); 1290 if ((m->m_flags & M_EXT) == 0) { 1291 m_free(m); 1292 ubsecstats.hst_nomcl++; 1293 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1294 goto errout; 1295 } 1296 len = MCLBYTES; 1297 } 1298 m->m_len = len; 1299 top = NULL; 1300 mp = ⊤ 1301 1302 while (totlen > 0) { 1303 if (top) { 1304 MGET(m, M_DONTWAIT, MT_DATA); 1305 if (m == NULL) { 1306 m_freem(top); 1307 ubsecstats.hst_nombuf++; 1308 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1309 goto errout; 1310 } 1311 len = MLEN; 1312 } 1313 if (top && totlen >= MINCLSIZE) { 1314 MCLGET(m, M_DONTWAIT); 1315 if ((m->m_flags & M_EXT) == 0) { 1316 *mp = m; 1317 m_freem(top); 1318 ubsecstats.hst_nomcl++; 1319 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1320 goto errout; 1321 } 1322 len = MCLBYTES; 1323 } 1324 m->m_len = len = min(totlen, len); 1325 totlen -= len; 1326 *mp = m; 1327 mp = &m->m_next; 1328 } 1329 q->q_dst_m = top; 1330 ubsec_mcopy(q->q_src_m, q->q_dst_m, 1331 cpskip, cpoffset); 1332 /* XXX again, what the heck is that 0xfff0? */ 1333 if (bus_dmamap_create(sc->sc_dmat, 0xfff0, 1334 UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT, 1335 &q->q_dst_map) != 0) { 1336 ubsecstats.hst_nomap++; 1337 err = ENOMEM; 1338 goto errout; 1339 } 1340 if (bus_dmamap_load_mbuf(sc->sc_dmat, 1341 q->q_dst_map, q->q_dst_m, 1342 BUS_DMA_NOWAIT) != 0) { 1343 bus_dmamap_destroy(sc->sc_dmat, 1344 q->q_dst_map); 1345 q->q_dst_map = NULL; 1346 ubsecstats.hst_noload++; 1347 err = ENOMEM; 1348 goto errout; 1349 } 1350 } 1351 } else { 1352 ubsecstats.hst_badflags++; 1353 err = EINVAL; 1354 goto errout; 1355 } 1356 1357 #ifdef UBSEC_DEBUG 1358 if (ubsec_debug) 1359 printf("dst skip: %d\n", dskip); 1360 #endif 1361 for (i = j = 0; i < q->q_dst_map->dm_nsegs; i++) { 1362 struct ubsec_pktbuf *pb; 1363 bus_size_t packl = q->q_dst_map->dm_segs[i].ds_len; 1364 bus_addr_t packp = q->q_dst_map->dm_segs[i].ds_addr; 1365 1366 if (dskip >= packl) { 1367 dskip -= packl; 1368 continue; 1369 } 1370 1371 packl -= dskip; 1372 packp += dskip; 1373 dskip = 0; 1374 1375 if (packl > 0xfffc) { 1376 err = EIO; 1377 goto errout; 1378 } 1379 1380 if (j == 0) 1381 pb = &dmap->d_dma->d_mcr.mcr_opktbuf; 1382 else 1383 pb = &dmap->d_dma->d_dbuf[j - 1]; 1384 1385 pb->pb_addr = htole32(packp); 1386 1387 if (dtheend) { 1388 if (packl > dtheend) { 1389 pb->pb_len = htole32(dtheend); 1390 dtheend = 0; 1391 } else { 1392 pb->pb_len = htole32(packl); 1393 dtheend -= packl; 1394 } 1395 } else 1396 pb->pb_len = htole32(packl); 1397 1398 if ((i + 1) == q->q_dst_map->dm_nsegs) { 1399 if (maccrd) 1400 pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1401 offsetof(struct ubsec_dmachunk, d_macbuf[0])); 1402 else 1403 pb->pb_next = 0; 1404 } else 1405 pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1406 offsetof(struct ubsec_dmachunk, d_dbuf[j])); 1407 j++; 1408 } 1409 } 1410 1411 dmap->d_dma->d_mcr.mcr_cmdctxp = htole32(dmap->d_alloc.dma_paddr + 1412 offsetof(struct ubsec_dmachunk, d_ctx)); 1413 1414 if (sc->sc_flags & UBS_FLAGS_LONGCTX) { 1415 struct ubsec_pktctx_long *ctxl; 1416 1417 ctxl = (struct ubsec_pktctx_long *)((char *)dmap->d_alloc.dma_vaddr + 1418 offsetof(struct ubsec_dmachunk, d_ctx)); 1419 1420 /* transform small context into long context */ 1421 ctxl->pc_len = htole16(sizeof(struct ubsec_pktctx_long)); 1422 ctxl->pc_type = htole16(UBS_PKTCTX_TYPE_IPSEC); 1423 ctxl->pc_flags = ctx.pc_flags; 1424 ctxl->pc_offset = ctx.pc_offset; 1425 for (i = 0; i < 6; i++) 1426 ctxl->pc_deskey[i] = ctx.pc_deskey[i]; 1427 for (i = 0; i < 5; i++) 1428 ctxl->pc_hminner[i] = ctx.pc_hminner[i]; 1429 for (i = 0; i < 5; i++) 1430 ctxl->pc_hmouter[i] = ctx.pc_hmouter[i]; 1431 ctxl->pc_iv[0] = ctx.pc_iv[0]; 1432 ctxl->pc_iv[1] = ctx.pc_iv[1]; 1433 } else 1434 memcpy((char *)dmap->d_alloc.dma_vaddr + 1435 offsetof(struct ubsec_dmachunk, d_ctx), &ctx, 1436 sizeof(struct ubsec_pktctx)); 1437 1438 s = splnet(); 1439 SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next); 1440 sc->sc_nqueue++; 1441 ubsecstats.hst_ipackets++; 1442 ubsecstats.hst_ibytes += dmap->d_alloc.dma_map->dm_mapsize; 1443 if ((hint & CRYPTO_HINT_MORE) == 0 || sc->sc_nqueue >= ubsec_maxbatch) 1444 ubsec_feed(sc); 1445 splx(s); 1446 return (0); 1447 1448 errout: 1449 if (q != NULL) { 1450 if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) 1451 m_freem(q->q_dst_m); 1452 1453 if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { 1454 bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); 1455 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); 1456 } 1457 if (q->q_src_map != NULL) { 1458 bus_dmamap_unload(sc->sc_dmat, q->q_src_map); 1459 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1460 } 1461 1462 s = splnet(); 1463 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 1464 splx(s); 1465 } 1466 #if 0 /* jonathan says: this openbsd code seems to be subsumed elsewhere */ 1467 if (err == EINVAL) 1468 ubsecstats.hst_invalid++; 1469 else 1470 ubsecstats.hst_nomem++; 1471 #endif 1472 if (err != ERESTART) { 1473 crp->crp_etype = err; 1474 crypto_done(crp); 1475 } else { 1476 sc->sc_needwakeup |= CRYPTO_SYMQ; 1477 } 1478 return (err); 1479 } 1480 1481 static void 1482 ubsec_callback(struct ubsec_softc *sc, struct ubsec_q *q) 1483 { 1484 struct cryptop *crp = (struct cryptop *)q->q_crp; 1485 struct cryptodesc *crd; 1486 struct ubsec_dma *dmap = q->q_dma; 1487 1488 ubsecstats.hst_opackets++; 1489 ubsecstats.hst_obytes += dmap->d_alloc.dma_size; 1490 1491 bus_dmamap_sync(sc->sc_dmat, dmap->d_alloc.dma_map, 0, 1492 dmap->d_alloc.dma_map->dm_mapsize, 1493 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1494 if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { 1495 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, 1496 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1497 bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); 1498 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); 1499 } 1500 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, 1501 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1502 bus_dmamap_unload(sc->sc_dmat, q->q_src_map); 1503 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1504 1505 if ((crp->crp_flags & CRYPTO_F_IMBUF) && (q->q_src_m != q->q_dst_m)) { 1506 m_freem(q->q_src_m); 1507 crp->crp_buf = (void *)q->q_dst_m; 1508 } 1509 1510 /* copy out IV for future use */ 1511 if (q->q_flags & UBSEC_QFLAGS_COPYOUTIV) { 1512 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1513 if (crd->crd_alg != CRYPTO_DES_CBC && 1514 crd->crd_alg != CRYPTO_3DES_CBC) 1515 continue; 1516 if (crp->crp_flags & CRYPTO_F_IMBUF) 1517 m_copydata((struct mbuf *)crp->crp_buf, 1518 crd->crd_skip + crd->crd_len - 8, 8, 1519 (void *)sc->sc_sessions[q->q_sesn].ses_iv); 1520 else if (crp->crp_flags & CRYPTO_F_IOV) { 1521 cuio_copydata((struct uio *)crp->crp_buf, 1522 crd->crd_skip + crd->crd_len - 8, 8, 1523 (void *)sc->sc_sessions[q->q_sesn].ses_iv); 1524 } 1525 break; 1526 } 1527 } 1528 1529 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1530 if (crd->crd_alg != CRYPTO_MD5_HMAC_96 && 1531 crd->crd_alg != CRYPTO_SHA1_HMAC_96) 1532 continue; 1533 if (crp->crp_flags & CRYPTO_F_IMBUF) 1534 m_copyback((struct mbuf *)crp->crp_buf, 1535 crd->crd_inject, 12, 1536 (void *)dmap->d_dma->d_macbuf); 1537 else if (crp->crp_flags & CRYPTO_F_IOV && crp->crp_mac) 1538 bcopy((void *)dmap->d_dma->d_macbuf, 1539 crp->crp_mac, 12); 1540 break; 1541 } 1542 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 1543 crypto_done(crp); 1544 } 1545 1546 static void 1547 ubsec_mcopy(struct mbuf *srcm, struct mbuf *dstm, int hoffset, int toffset) 1548 { 1549 int i, j, dlen, slen; 1550 char *dptr, *sptr; 1551 1552 j = 0; 1553 sptr = srcm->m_data; 1554 slen = srcm->m_len; 1555 dptr = dstm->m_data; 1556 dlen = dstm->m_len; 1557 1558 while (1) { 1559 for (i = 0; i < min(slen, dlen); i++) { 1560 if (j < hoffset || j >= toffset) 1561 *dptr++ = *sptr++; 1562 slen--; 1563 dlen--; 1564 j++; 1565 } 1566 if (slen == 0) { 1567 srcm = srcm->m_next; 1568 if (srcm == NULL) 1569 return; 1570 sptr = srcm->m_data; 1571 slen = srcm->m_len; 1572 } 1573 if (dlen == 0) { 1574 dstm = dstm->m_next; 1575 if (dstm == NULL) 1576 return; 1577 dptr = dstm->m_data; 1578 dlen = dstm->m_len; 1579 } 1580 } 1581 } 1582 1583 /* 1584 * feed the key generator, must be called at splnet() or higher. 1585 */ 1586 static void 1587 ubsec_feed2(struct ubsec_softc *sc) 1588 { 1589 struct ubsec_q2 *q; 1590 1591 while (!SIMPLEQ_EMPTY(&sc->sc_queue2)) { 1592 if (READ_REG(sc, BS_STAT) & BS_STAT_MCR2_FULL) 1593 break; 1594 q = SIMPLEQ_FIRST(&sc->sc_queue2); 1595 1596 bus_dmamap_sync(sc->sc_dmat, q->q_mcr.dma_map, 0, 1597 q->q_mcr.dma_map->dm_mapsize, 1598 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1599 bus_dmamap_sync(sc->sc_dmat, q->q_ctx.dma_map, 0, 1600 q->q_ctx.dma_map->dm_mapsize, 1601 BUS_DMASYNC_PREWRITE); 1602 1603 WRITE_REG(sc, BS_MCR2, q->q_mcr.dma_paddr); 1604 q = SIMPLEQ_FIRST(&sc->sc_queue2); 1605 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue2, /*q,*/ q_next); 1606 --sc->sc_nqueue2; 1607 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip2, q, q_next); 1608 } 1609 } 1610 1611 /* 1612 * Callback for handling random numbers 1613 */ 1614 static void 1615 ubsec_callback2(struct ubsec_softc *sc, struct ubsec_q2 *q) 1616 { 1617 struct cryptkop *krp; 1618 struct ubsec_ctx_keyop *ctx; 1619 1620 ctx = (struct ubsec_ctx_keyop *)q->q_ctx.dma_vaddr; 1621 bus_dmamap_sync(sc->sc_dmat, q->q_ctx.dma_map, 0, 1622 q->q_ctx.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1623 1624 switch (q->q_type) { 1625 #ifndef UBSEC_NO_RNG 1626 case UBS_CTXOP_RNGSHA1: 1627 case UBS_CTXOP_RNGBYPASS: { 1628 struct ubsec_q2_rng *rng = (struct ubsec_q2_rng *)q; 1629 u_int32_t *p; 1630 int i; 1631 1632 bus_dmamap_sync(sc->sc_dmat, rng->rng_buf.dma_map, 0, 1633 rng->rng_buf.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1634 p = (u_int32_t *)rng->rng_buf.dma_vaddr; 1635 #ifndef __NetBSD__ 1636 for (i = 0; i < UBSEC_RNG_BUFSIZ; p++, i++) 1637 add_true_randomness(letoh32(*p)); 1638 rng->rng_used = 0; 1639 #else 1640 /* XXX NetBSD rnd subsystem too weak */ 1641 i = 0; (void)i; /* shut off gcc warnings */ 1642 #endif 1643 #ifdef __OpenBSD__ 1644 timeout_add(&sc->sc_rngto, sc->sc_rnghz); 1645 #else 1646 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); 1647 #endif 1648 break; 1649 } 1650 #endif 1651 case UBS_CTXOP_MODEXP: { 1652 struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; 1653 u_int rlen, clen; 1654 1655 krp = me->me_krp; 1656 rlen = (me->me_modbits + 7) / 8; 1657 clen = (krp->krp_param[krp->krp_iparams].crp_nbits + 7) / 8; 1658 1659 bus_dmamap_sync(sc->sc_dmat, me->me_M.dma_map, 1660 0, me->me_M.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1661 bus_dmamap_sync(sc->sc_dmat, me->me_E.dma_map, 1662 0, me->me_E.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1663 bus_dmamap_sync(sc->sc_dmat, me->me_C.dma_map, 1664 0, me->me_C.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1665 bus_dmamap_sync(sc->sc_dmat, me->me_epb.dma_map, 1666 0, me->me_epb.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1667 1668 if (clen < rlen) 1669 krp->krp_status = E2BIG; 1670 else { 1671 if (sc->sc_flags & UBS_FLAGS_HWNORM) { 1672 memset(krp->krp_param[krp->krp_iparams].crp_p, 0, 1673 (krp->krp_param[krp->krp_iparams].crp_nbits 1674 + 7) / 8); 1675 bcopy(me->me_C.dma_vaddr, 1676 krp->krp_param[krp->krp_iparams].crp_p, 1677 (me->me_modbits + 7) / 8); 1678 } else 1679 ubsec_kshift_l(me->me_shiftbits, 1680 me->me_C.dma_vaddr, me->me_normbits, 1681 krp->krp_param[krp->krp_iparams].crp_p, 1682 krp->krp_param[krp->krp_iparams].crp_nbits); 1683 } 1684 1685 crypto_kdone(krp); 1686 1687 /* bzero all potentially sensitive data */ 1688 memset(me->me_E.dma_vaddr, 0, me->me_E.dma_size); 1689 memset(me->me_M.dma_vaddr, 0, me->me_M.dma_size); 1690 memset(me->me_C.dma_vaddr, 0, me->me_C.dma_size); 1691 memset(me->me_q.q_ctx.dma_vaddr, 0, me->me_q.q_ctx.dma_size); 1692 1693 /* Can't free here, so put us on the free list. */ 1694 SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &me->me_q, q_next); 1695 break; 1696 } 1697 case UBS_CTXOP_RSAPRIV: { 1698 struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; 1699 u_int len; 1700 1701 krp = rp->rpr_krp; 1702 bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgin.dma_map, 0, 1703 rp->rpr_msgin.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1704 bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgout.dma_map, 0, 1705 rp->rpr_msgout.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1706 1707 len = (krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_nbits + 7) / 8; 1708 bcopy(rp->rpr_msgout.dma_vaddr, 1709 krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_p, len); 1710 1711 crypto_kdone(krp); 1712 1713 memset(rp->rpr_msgin.dma_vaddr, 0, rp->rpr_msgin.dma_size); 1714 memset(rp->rpr_msgout.dma_vaddr, 0, rp->rpr_msgout.dma_size); 1715 memset(rp->rpr_q.q_ctx.dma_vaddr, 0, rp->rpr_q.q_ctx.dma_size); 1716 1717 /* Can't free here, so put us on the free list. */ 1718 SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &rp->rpr_q, q_next); 1719 break; 1720 } 1721 default: 1722 printf("%s: unknown ctx op: %x\n", device_xname(&sc->sc_dv), 1723 letoh16(ctx->ctx_op)); 1724 break; 1725 } 1726 } 1727 1728 #ifndef UBSEC_NO_RNG 1729 static void 1730 ubsec_rng(void *vsc) 1731 { 1732 struct ubsec_softc *sc = vsc; 1733 struct ubsec_q2_rng *rng = &sc->sc_rng; 1734 struct ubsec_mcr *mcr; 1735 struct ubsec_ctx_rngbypass *ctx; 1736 int s; 1737 1738 s = splnet(); 1739 if (rng->rng_used) { 1740 splx(s); 1741 return; 1742 } 1743 sc->sc_nqueue2++; 1744 if (sc->sc_nqueue2 >= UBS_MAX_NQUEUE) 1745 goto out; 1746 1747 mcr = (struct ubsec_mcr *)rng->rng_q.q_mcr.dma_vaddr; 1748 ctx = (struct ubsec_ctx_rngbypass *)rng->rng_q.q_ctx.dma_vaddr; 1749 1750 mcr->mcr_pkts = htole16(1); 1751 mcr->mcr_flags = 0; 1752 mcr->mcr_cmdctxp = htole32(rng->rng_q.q_ctx.dma_paddr); 1753 mcr->mcr_ipktbuf.pb_addr = mcr->mcr_ipktbuf.pb_next = 0; 1754 mcr->mcr_ipktbuf.pb_len = 0; 1755 mcr->mcr_reserved = mcr->mcr_pktlen = 0; 1756 mcr->mcr_opktbuf.pb_addr = htole32(rng->rng_buf.dma_paddr); 1757 mcr->mcr_opktbuf.pb_len = htole32(((sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ)) & 1758 UBS_PKTBUF_LEN); 1759 mcr->mcr_opktbuf.pb_next = 0; 1760 1761 ctx->rbp_len = htole16(sizeof(struct ubsec_ctx_rngbypass)); 1762 ctx->rbp_op = htole16(UBS_CTXOP_RNGSHA1); 1763 rng->rng_q.q_type = UBS_CTXOP_RNGSHA1; 1764 1765 bus_dmamap_sync(sc->sc_dmat, rng->rng_buf.dma_map, 0, 1766 rng->rng_buf.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1767 1768 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rng->rng_q, q_next); 1769 rng->rng_used = 1; 1770 ubsec_feed2(sc); 1771 ubsecstats.hst_rng++; 1772 splx(s); 1773 1774 return; 1775 1776 out: 1777 /* 1778 * Something weird happened, generate our own call back. 1779 */ 1780 sc->sc_nqueue2--; 1781 splx(s); 1782 #ifdef __OpenBSD__ 1783 timeout_add(&sc->sc_rngto, sc->sc_rnghz); 1784 #else 1785 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); 1786 #endif 1787 } 1788 #endif /* UBSEC_NO_RNG */ 1789 1790 static int 1791 ubsec_dma_malloc(struct ubsec_softc *sc, bus_size_t size, 1792 struct ubsec_dma_alloc *dma,int mapflags) 1793 { 1794 int r; 1795 1796 if ((r = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, 1797 &dma->dma_seg, 1, &dma->dma_nseg, BUS_DMA_NOWAIT)) != 0) 1798 goto fail_0; 1799 1800 if ((r = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg, 1801 size, &dma->dma_vaddr, mapflags | BUS_DMA_NOWAIT)) != 0) 1802 goto fail_1; 1803 1804 if ((r = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1805 BUS_DMA_NOWAIT, &dma->dma_map)) != 0) 1806 goto fail_2; 1807 1808 if ((r = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr, 1809 size, NULL, BUS_DMA_NOWAIT)) != 0) 1810 goto fail_3; 1811 1812 dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr; 1813 dma->dma_size = size; 1814 return (0); 1815 1816 fail_3: 1817 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map); 1818 fail_2: 1819 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, size); 1820 fail_1: 1821 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg); 1822 fail_0: 1823 dma->dma_map = NULL; 1824 return (r); 1825 } 1826 1827 static void 1828 ubsec_dma_free(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma) 1829 { 1830 bus_dmamap_unload(sc->sc_dmat, dma->dma_map); 1831 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_size); 1832 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg); 1833 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map); 1834 } 1835 1836 /* 1837 * Resets the board. Values in the regesters are left as is 1838 * from the reset (i.e. initial values are assigned elsewhere). 1839 */ 1840 static void 1841 ubsec_reset_board(struct ubsec_softc *sc) 1842 { 1843 volatile u_int32_t ctrl; 1844 1845 ctrl = READ_REG(sc, BS_CTRL); 1846 ctrl |= BS_CTRL_RESET; 1847 WRITE_REG(sc, BS_CTRL, ctrl); 1848 1849 /* 1850 * Wait aprox. 30 PCI clocks = 900 ns = 0.9 us 1851 */ 1852 DELAY(10); 1853 } 1854 1855 /* 1856 * Init Broadcom registers 1857 */ 1858 static void 1859 ubsec_init_board(struct ubsec_softc *sc) 1860 { 1861 u_int32_t ctrl; 1862 1863 ctrl = READ_REG(sc, BS_CTRL); 1864 ctrl &= ~(BS_CTRL_BE32 | BS_CTRL_BE64); 1865 ctrl |= BS_CTRL_LITTLE_ENDIAN | BS_CTRL_MCR1INT; 1866 1867 /* 1868 * XXX: Sam Leffler's code has (UBS_FLAGS_KEY|UBS_FLAGS_RNG)). 1869 * anyone got hw docs? 1870 */ 1871 if (sc->sc_flags & UBS_FLAGS_KEY) 1872 ctrl |= BS_CTRL_MCR2INT; 1873 else 1874 ctrl &= ~BS_CTRL_MCR2INT; 1875 1876 if (sc->sc_flags & UBS_FLAGS_HWNORM) 1877 ctrl &= ~BS_CTRL_SWNORM; 1878 1879 WRITE_REG(sc, BS_CTRL, ctrl); 1880 } 1881 1882 /* 1883 * Init Broadcom PCI registers 1884 */ 1885 static void 1886 ubsec_init_pciregs(struct pci_attach_args *pa) 1887 { 1888 pci_chipset_tag_t pc = pa->pa_pc; 1889 u_int32_t misc; 1890 1891 /* 1892 * This will set the cache line size to 1, this will 1893 * force the BCM58xx chip just to do burst read/writes. 1894 * Cache line read/writes are to slow 1895 */ 1896 misc = pci_conf_read(pc, pa->pa_tag, PCI_BHLC_REG); 1897 misc = (misc & ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT)) 1898 | ((UBS_DEF_CACHELINE & 0xff) << PCI_CACHELINE_SHIFT); 1899 pci_conf_write(pc, pa->pa_tag, PCI_BHLC_REG, misc); 1900 } 1901 1902 /* 1903 * Clean up after a chip crash. 1904 * It is assumed that the caller in splnet() 1905 */ 1906 static void 1907 ubsec_cleanchip(struct ubsec_softc *sc) 1908 { 1909 struct ubsec_q *q; 1910 1911 while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { 1912 q = SIMPLEQ_FIRST(&sc->sc_qchip); 1913 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, /*q,*/ q_next); 1914 ubsec_free_q(sc, q); 1915 } 1916 sc->sc_nqchip = 0; 1917 } 1918 1919 /* 1920 * free a ubsec_q 1921 * It is assumed that the caller is within splnet() 1922 */ 1923 static int 1924 ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q) 1925 { 1926 struct ubsec_q *q2; 1927 struct cryptop *crp; 1928 int npkts; 1929 int i; 1930 1931 npkts = q->q_nstacked_mcrs; 1932 1933 for (i = 0; i < npkts; i++) { 1934 if(q->q_stacked_mcr[i]) { 1935 q2 = q->q_stacked_mcr[i]; 1936 1937 if ((q2->q_dst_m != NULL) && (q2->q_src_m != q2->q_dst_m)) 1938 m_freem(q2->q_dst_m); 1939 1940 crp = (struct cryptop *)q2->q_crp; 1941 1942 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q2, q_next); 1943 1944 crp->crp_etype = EFAULT; 1945 crypto_done(crp); 1946 } else { 1947 break; 1948 } 1949 } 1950 1951 /* 1952 * Free header MCR 1953 */ 1954 if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) 1955 m_freem(q->q_dst_m); 1956 1957 crp = (struct cryptop *)q->q_crp; 1958 1959 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 1960 1961 crp->crp_etype = EFAULT; 1962 crypto_done(crp); 1963 return(0); 1964 } 1965 1966 /* 1967 * Routine to reset the chip and clean up. 1968 * It is assumed that the caller is in splnet() 1969 */ 1970 static void 1971 ubsec_totalreset(struct ubsec_softc *sc) 1972 { 1973 ubsec_reset_board(sc); 1974 ubsec_init_board(sc); 1975 ubsec_cleanchip(sc); 1976 } 1977 1978 static int 1979 ubsec_dmamap_aligned(bus_dmamap_t map) 1980 { 1981 int i; 1982 1983 for (i = 0; i < map->dm_nsegs; i++) { 1984 if (map->dm_segs[i].ds_addr & 3) 1985 return (0); 1986 if ((i != (map->dm_nsegs - 1)) && 1987 (map->dm_segs[i].ds_len & 3)) 1988 return (0); 1989 } 1990 return (1); 1991 } 1992 1993 #ifdef __OpenBSD__ 1994 struct ubsec_softc * 1995 ubsec_kfind(struct cryptkop *krp) 1996 { 1997 struct ubsec_softc *sc; 1998 int i; 1999 2000 for (i = 0; i < ubsec_cd.cd_ndevs; i++) { 2001 sc = ubsec_cd.cd_devs[i]; 2002 if (sc == NULL) 2003 continue; 2004 if (sc->sc_cid == krp->krp_hid) 2005 return (sc); 2006 } 2007 return (NULL); 2008 } 2009 #endif 2010 2011 static void 2012 ubsec_kfree(struct ubsec_softc *sc, struct ubsec_q2 *q) 2013 { 2014 switch (q->q_type) { 2015 case UBS_CTXOP_MODEXP: { 2016 struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; 2017 2018 ubsec_dma_free(sc, &me->me_q.q_mcr); 2019 ubsec_dma_free(sc, &me->me_q.q_ctx); 2020 ubsec_dma_free(sc, &me->me_M); 2021 ubsec_dma_free(sc, &me->me_E); 2022 ubsec_dma_free(sc, &me->me_C); 2023 ubsec_dma_free(sc, &me->me_epb); 2024 free(me, M_DEVBUF); 2025 break; 2026 } 2027 case UBS_CTXOP_RSAPRIV: { 2028 struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; 2029 2030 ubsec_dma_free(sc, &rp->rpr_q.q_mcr); 2031 ubsec_dma_free(sc, &rp->rpr_q.q_ctx); 2032 ubsec_dma_free(sc, &rp->rpr_msgin); 2033 ubsec_dma_free(sc, &rp->rpr_msgout); 2034 free(rp, M_DEVBUF); 2035 break; 2036 } 2037 default: 2038 printf("%s: invalid kfree 0x%x\n", device_xname(&sc->sc_dv), 2039 q->q_type); 2040 break; 2041 } 2042 } 2043 2044 static int 2045 ubsec_kprocess(void *arg, struct cryptkop *krp, int hint) 2046 { 2047 struct ubsec_softc *sc; 2048 int r; 2049 2050 if (krp == NULL || krp->krp_callback == NULL) 2051 return (EINVAL); 2052 #ifdef __OpenBSD__ 2053 if ((sc = ubsec_kfind(krp)) == NULL) 2054 return (EINVAL); 2055 #else 2056 sc = arg; 2057 KASSERT(sc != NULL /*, ("ubsec_kprocess: null softc")*/); 2058 #endif 2059 2060 while (!SIMPLEQ_EMPTY(&sc->sc_q2free)) { 2061 struct ubsec_q2 *q; 2062 2063 q = SIMPLEQ_FIRST(&sc->sc_q2free); 2064 SIMPLEQ_REMOVE_HEAD(&sc->sc_q2free, /*q,*/ q_next); 2065 ubsec_kfree(sc, q); 2066 } 2067 2068 switch (krp->krp_op) { 2069 case CRK_MOD_EXP: 2070 if (sc->sc_flags & UBS_FLAGS_HWNORM) 2071 r = ubsec_kprocess_modexp_hw(sc, krp, hint); 2072 else 2073 r = ubsec_kprocess_modexp_sw(sc, krp, hint); 2074 break; 2075 case CRK_MOD_EXP_CRT: 2076 r = ubsec_kprocess_rsapriv(sc, krp, hint); 2077 break; 2078 default: 2079 printf("%s: kprocess: invalid op 0x%x\n", 2080 device_xname(&sc->sc_dv), krp->krp_op); 2081 krp->krp_status = EOPNOTSUPP; 2082 crypto_kdone(krp); 2083 r = 0; 2084 } 2085 return (r); 2086 } 2087 2088 /* 2089 * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (sw normalization) 2090 */ 2091 static int 2092 ubsec_kprocess_modexp_sw(struct ubsec_softc *sc, struct cryptkop *krp, 2093 int hint) 2094 { 2095 struct ubsec_q2_modexp *me; 2096 struct ubsec_mcr *mcr; 2097 struct ubsec_ctx_modexp *ctx; 2098 struct ubsec_pktbuf *epb; 2099 int s, err = 0; 2100 u_int nbits, normbits, mbits, shiftbits, ebits; 2101 2102 me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT); 2103 if (me == NULL) { 2104 err = ENOMEM; 2105 goto errout; 2106 } 2107 memset(me, 0, sizeof *me); 2108 me->me_krp = krp; 2109 me->me_q.q_type = UBS_CTXOP_MODEXP; 2110 2111 nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); 2112 if (nbits <= 512) 2113 normbits = 512; 2114 else if (nbits <= 768) 2115 normbits = 768; 2116 else if (nbits <= 1024) 2117 normbits = 1024; 2118 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) 2119 normbits = 1536; 2120 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) 2121 normbits = 2048; 2122 else { 2123 err = E2BIG; 2124 goto errout; 2125 } 2126 2127 shiftbits = normbits - nbits; 2128 2129 me->me_modbits = nbits; 2130 me->me_shiftbits = shiftbits; 2131 me->me_normbits = normbits; 2132 2133 /* Sanity check: result bits must be >= true modulus bits. */ 2134 if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { 2135 err = ERANGE; 2136 goto errout; 2137 } 2138 2139 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2140 &me->me_q.q_mcr, 0)) { 2141 err = ENOMEM; 2142 goto errout; 2143 } 2144 mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; 2145 2146 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), 2147 &me->me_q.q_ctx, 0)) { 2148 err = ENOMEM; 2149 goto errout; 2150 } 2151 2152 mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); 2153 if (mbits > nbits) { 2154 err = E2BIG; 2155 goto errout; 2156 } 2157 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { 2158 err = ENOMEM; 2159 goto errout; 2160 } 2161 ubsec_kshift_r(shiftbits, 2162 krp->krp_param[UBS_MODEXP_PAR_M].crp_p, mbits, 2163 me->me_M.dma_vaddr, normbits); 2164 2165 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { 2166 err = ENOMEM; 2167 goto errout; 2168 } 2169 memset(me->me_C.dma_vaddr, 0, me->me_C.dma_size); 2170 2171 ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); 2172 if (ebits > nbits) { 2173 err = E2BIG; 2174 goto errout; 2175 } 2176 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { 2177 err = ENOMEM; 2178 goto errout; 2179 } 2180 ubsec_kshift_r(shiftbits, 2181 krp->krp_param[UBS_MODEXP_PAR_E].crp_p, ebits, 2182 me->me_E.dma_vaddr, normbits); 2183 2184 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), 2185 &me->me_epb, 0)) { 2186 err = ENOMEM; 2187 goto errout; 2188 } 2189 epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; 2190 epb->pb_addr = htole32(me->me_E.dma_paddr); 2191 epb->pb_next = 0; 2192 epb->pb_len = htole32(normbits / 8); 2193 2194 #ifdef UBSEC_DEBUG 2195 if (ubsec_debug) { 2196 printf("Epb "); 2197 ubsec_dump_pb(epb); 2198 } 2199 #endif 2200 2201 mcr->mcr_pkts = htole16(1); 2202 mcr->mcr_flags = 0; 2203 mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); 2204 mcr->mcr_reserved = 0; 2205 mcr->mcr_pktlen = 0; 2206 2207 mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); 2208 mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); 2209 mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); 2210 2211 mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); 2212 mcr->mcr_opktbuf.pb_next = 0; 2213 mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); 2214 2215 #ifdef DIAGNOSTIC 2216 /* Misaligned output buffer will hang the chip. */ 2217 if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) 2218 panic("%s: modexp invalid addr 0x%x", 2219 device_xname(&sc->sc_dv), letoh32(mcr->mcr_opktbuf.pb_addr)); 2220 if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) 2221 panic("%s: modexp invalid len 0x%x", 2222 device_xname(&sc->sc_dv), letoh32(mcr->mcr_opktbuf.pb_len)); 2223 #endif 2224 2225 ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; 2226 memset(ctx, 0, sizeof(*ctx)); 2227 ubsec_kshift_r(shiftbits, 2228 krp->krp_param[UBS_MODEXP_PAR_N].crp_p, nbits, 2229 ctx->me_N, normbits); 2230 ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); 2231 ctx->me_op = htole16(UBS_CTXOP_MODEXP); 2232 ctx->me_E_len = htole16(nbits); 2233 ctx->me_N_len = htole16(nbits); 2234 2235 #ifdef UBSEC_DEBUG 2236 if (ubsec_debug) { 2237 ubsec_dump_mcr(mcr); 2238 ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); 2239 } 2240 #endif 2241 2242 /* 2243 * ubsec_feed2 will sync mcr and ctx, we just need to sync 2244 * everything else. 2245 */ 2246 bus_dmamap_sync(sc->sc_dmat, me->me_M.dma_map, 2247 0, me->me_M.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2248 bus_dmamap_sync(sc->sc_dmat, me->me_E.dma_map, 2249 0, me->me_E.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2250 bus_dmamap_sync(sc->sc_dmat, me->me_C.dma_map, 2251 0, me->me_C.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2252 bus_dmamap_sync(sc->sc_dmat, me->me_epb.dma_map, 2253 0, me->me_epb.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2254 2255 /* Enqueue and we're done... */ 2256 s = splnet(); 2257 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); 2258 ubsec_feed2(sc); 2259 ubsecstats.hst_modexp++; 2260 splx(s); 2261 2262 return (0); 2263 2264 errout: 2265 if (me != NULL) { 2266 if (me->me_q.q_mcr.dma_map != NULL) 2267 ubsec_dma_free(sc, &me->me_q.q_mcr); 2268 if (me->me_q.q_ctx.dma_map != NULL) { 2269 memset(me->me_q.q_ctx.dma_vaddr, 0, me->me_q.q_ctx.dma_size); 2270 ubsec_dma_free(sc, &me->me_q.q_ctx); 2271 } 2272 if (me->me_M.dma_map != NULL) { 2273 memset(me->me_M.dma_vaddr, 0, me->me_M.dma_size); 2274 ubsec_dma_free(sc, &me->me_M); 2275 } 2276 if (me->me_E.dma_map != NULL) { 2277 memset(me->me_E.dma_vaddr, 0, me->me_E.dma_size); 2278 ubsec_dma_free(sc, &me->me_E); 2279 } 2280 if (me->me_C.dma_map != NULL) { 2281 memset(me->me_C.dma_vaddr, 0, me->me_C.dma_size); 2282 ubsec_dma_free(sc, &me->me_C); 2283 } 2284 if (me->me_epb.dma_map != NULL) 2285 ubsec_dma_free(sc, &me->me_epb); 2286 free(me, M_DEVBUF); 2287 } 2288 krp->krp_status = err; 2289 crypto_kdone(krp); 2290 return (0); 2291 } 2292 2293 /* 2294 * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (hw normalization) 2295 */ 2296 static int 2297 ubsec_kprocess_modexp_hw(struct ubsec_softc *sc, struct cryptkop *krp, 2298 int hint) 2299 { 2300 struct ubsec_q2_modexp *me; 2301 struct ubsec_mcr *mcr; 2302 struct ubsec_ctx_modexp *ctx; 2303 struct ubsec_pktbuf *epb; 2304 int s, err = 0; 2305 u_int nbits, normbits, mbits, shiftbits, ebits; 2306 2307 me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT); 2308 if (me == NULL) { 2309 err = ENOMEM; 2310 goto errout; 2311 } 2312 memset(me, 0, sizeof *me); 2313 me->me_krp = krp; 2314 me->me_q.q_type = UBS_CTXOP_MODEXP; 2315 2316 nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); 2317 if (nbits <= 512) 2318 normbits = 512; 2319 else if (nbits <= 768) 2320 normbits = 768; 2321 else if (nbits <= 1024) 2322 normbits = 1024; 2323 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) 2324 normbits = 1536; 2325 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) 2326 normbits = 2048; 2327 else { 2328 err = E2BIG; 2329 goto errout; 2330 } 2331 2332 shiftbits = normbits - nbits; 2333 2334 /* XXX ??? */ 2335 me->me_modbits = nbits; 2336 me->me_shiftbits = shiftbits; 2337 me->me_normbits = normbits; 2338 2339 /* Sanity check: result bits must be >= true modulus bits. */ 2340 if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { 2341 err = ERANGE; 2342 goto errout; 2343 } 2344 2345 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2346 &me->me_q.q_mcr, 0)) { 2347 err = ENOMEM; 2348 goto errout; 2349 } 2350 mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; 2351 2352 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), 2353 &me->me_q.q_ctx, 0)) { 2354 err = ENOMEM; 2355 goto errout; 2356 } 2357 2358 mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); 2359 if (mbits > nbits) { 2360 err = E2BIG; 2361 goto errout; 2362 } 2363 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { 2364 err = ENOMEM; 2365 goto errout; 2366 } 2367 memset(me->me_M.dma_vaddr, 0, normbits / 8); 2368 bcopy(krp->krp_param[UBS_MODEXP_PAR_M].crp_p, 2369 me->me_M.dma_vaddr, (mbits + 7) / 8); 2370 2371 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { 2372 err = ENOMEM; 2373 goto errout; 2374 } 2375 memset(me->me_C.dma_vaddr, 0, me->me_C.dma_size); 2376 2377 ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); 2378 if (ebits > nbits) { 2379 err = E2BIG; 2380 goto errout; 2381 } 2382 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { 2383 err = ENOMEM; 2384 goto errout; 2385 } 2386 memset(me->me_E.dma_vaddr, 0, normbits / 8); 2387 bcopy(krp->krp_param[UBS_MODEXP_PAR_E].crp_p, 2388 me->me_E.dma_vaddr, (ebits + 7) / 8); 2389 2390 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), 2391 &me->me_epb, 0)) { 2392 err = ENOMEM; 2393 goto errout; 2394 } 2395 epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; 2396 epb->pb_addr = htole32(me->me_E.dma_paddr); 2397 epb->pb_next = 0; 2398 epb->pb_len = htole32((ebits + 7) / 8); 2399 2400 #ifdef UBSEC_DEBUG 2401 if (ubsec_debug) { 2402 printf("Epb "); 2403 ubsec_dump_pb(epb); 2404 } 2405 #endif 2406 2407 mcr->mcr_pkts = htole16(1); 2408 mcr->mcr_flags = 0; 2409 mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); 2410 mcr->mcr_reserved = 0; 2411 mcr->mcr_pktlen = 0; 2412 2413 mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); 2414 mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); 2415 mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); 2416 2417 mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); 2418 mcr->mcr_opktbuf.pb_next = 0; 2419 mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); 2420 2421 #ifdef DIAGNOSTIC 2422 /* Misaligned output buffer will hang the chip. */ 2423 if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) 2424 panic("%s: modexp invalid addr 0x%x", 2425 device_xname(&sc->sc_dv), letoh32(mcr->mcr_opktbuf.pb_addr)); 2426 if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) 2427 panic("%s: modexp invalid len 0x%x", 2428 device_xname(&sc->sc_dv), letoh32(mcr->mcr_opktbuf.pb_len)); 2429 #endif 2430 2431 ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; 2432 memset(ctx, 0, sizeof(*ctx)); 2433 memcpy(ctx->me_N, krp->krp_param[UBS_MODEXP_PAR_N].crp_p, 2434 (nbits + 7) / 8); 2435 ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); 2436 ctx->me_op = htole16(UBS_CTXOP_MODEXP); 2437 ctx->me_E_len = htole16(ebits); 2438 ctx->me_N_len = htole16(nbits); 2439 2440 #ifdef UBSEC_DEBUG 2441 if (ubsec_debug) { 2442 ubsec_dump_mcr(mcr); 2443 ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); 2444 } 2445 #endif 2446 2447 /* 2448 * ubsec_feed2 will sync mcr and ctx, we just need to sync 2449 * everything else. 2450 */ 2451 bus_dmamap_sync(sc->sc_dmat, me->me_M.dma_map, 2452 0, me->me_M.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2453 bus_dmamap_sync(sc->sc_dmat, me->me_E.dma_map, 2454 0, me->me_E.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2455 bus_dmamap_sync(sc->sc_dmat, me->me_C.dma_map, 2456 0, me->me_C.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2457 bus_dmamap_sync(sc->sc_dmat, me->me_epb.dma_map, 2458 0, me->me_epb.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2459 2460 /* Enqueue and we're done... */ 2461 s = splnet(); 2462 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); 2463 ubsec_feed2(sc); 2464 splx(s); 2465 2466 return (0); 2467 2468 errout: 2469 if (me != NULL) { 2470 if (me->me_q.q_mcr.dma_map != NULL) 2471 ubsec_dma_free(sc, &me->me_q.q_mcr); 2472 if (me->me_q.q_ctx.dma_map != NULL) { 2473 memset(me->me_q.q_ctx.dma_vaddr, 0, me->me_q.q_ctx.dma_size); 2474 ubsec_dma_free(sc, &me->me_q.q_ctx); 2475 } 2476 if (me->me_M.dma_map != NULL) { 2477 memset(me->me_M.dma_vaddr, 0, me->me_M.dma_size); 2478 ubsec_dma_free(sc, &me->me_M); 2479 } 2480 if (me->me_E.dma_map != NULL) { 2481 memset(me->me_E.dma_vaddr, 0, me->me_E.dma_size); 2482 ubsec_dma_free(sc, &me->me_E); 2483 } 2484 if (me->me_C.dma_map != NULL) { 2485 memset(me->me_C.dma_vaddr, 0, me->me_C.dma_size); 2486 ubsec_dma_free(sc, &me->me_C); 2487 } 2488 if (me->me_epb.dma_map != NULL) 2489 ubsec_dma_free(sc, &me->me_epb); 2490 free(me, M_DEVBUF); 2491 } 2492 krp->krp_status = err; 2493 crypto_kdone(krp); 2494 return (0); 2495 } 2496 2497 static int 2498 ubsec_kprocess_rsapriv(struct ubsec_softc *sc, struct cryptkop *krp, 2499 int hint) 2500 { 2501 struct ubsec_q2_rsapriv *rp = NULL; 2502 struct ubsec_mcr *mcr; 2503 struct ubsec_ctx_rsapriv *ctx; 2504 int s, err = 0; 2505 u_int padlen, msglen; 2506 2507 msglen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_P]); 2508 padlen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_Q]); 2509 if (msglen > padlen) 2510 padlen = msglen; 2511 2512 if (padlen <= 256) 2513 padlen = 256; 2514 else if (padlen <= 384) 2515 padlen = 384; 2516 else if (padlen <= 512) 2517 padlen = 512; 2518 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 768) 2519 padlen = 768; 2520 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 1024) 2521 padlen = 1024; 2522 else { 2523 err = E2BIG; 2524 goto errout; 2525 } 2526 2527 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DP]) > padlen) { 2528 err = E2BIG; 2529 goto errout; 2530 } 2531 2532 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DQ]) > padlen) { 2533 err = E2BIG; 2534 goto errout; 2535 } 2536 2537 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_PINV]) > padlen) { 2538 err = E2BIG; 2539 goto errout; 2540 } 2541 2542 rp = malloc(sizeof *rp, M_DEVBUF, M_NOWAIT|M_ZERO); 2543 if (rp == NULL) 2544 return (ENOMEM); 2545 rp->rpr_krp = krp; 2546 rp->rpr_q.q_type = UBS_CTXOP_RSAPRIV; 2547 2548 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2549 &rp->rpr_q.q_mcr, 0)) { 2550 err = ENOMEM; 2551 goto errout; 2552 } 2553 mcr = (struct ubsec_mcr *)rp->rpr_q.q_mcr.dma_vaddr; 2554 2555 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rsapriv), 2556 &rp->rpr_q.q_ctx, 0)) { 2557 err = ENOMEM; 2558 goto errout; 2559 } 2560 ctx = (struct ubsec_ctx_rsapriv *)rp->rpr_q.q_ctx.dma_vaddr; 2561 memset(ctx, 0, sizeof *ctx); 2562 2563 /* Copy in p */ 2564 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_P].crp_p, 2565 &ctx->rpr_buf[0 * (padlen / 8)], 2566 (krp->krp_param[UBS_RSAPRIV_PAR_P].crp_nbits + 7) / 8); 2567 2568 /* Copy in q */ 2569 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_p, 2570 &ctx->rpr_buf[1 * (padlen / 8)], 2571 (krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_nbits + 7) / 8); 2572 2573 /* Copy in dp */ 2574 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_p, 2575 &ctx->rpr_buf[2 * (padlen / 8)], 2576 (krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_nbits + 7) / 8); 2577 2578 /* Copy in dq */ 2579 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_p, 2580 &ctx->rpr_buf[3 * (padlen / 8)], 2581 (krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_nbits + 7) / 8); 2582 2583 /* Copy in pinv */ 2584 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_p, 2585 &ctx->rpr_buf[4 * (padlen / 8)], 2586 (krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_nbits + 7) / 8); 2587 2588 msglen = padlen * 2; 2589 2590 /* Copy in input message (aligned buffer/length). */ 2591 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGIN]) > msglen) { 2592 /* Is this likely? */ 2593 err = E2BIG; 2594 goto errout; 2595 } 2596 if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgin, 0)) { 2597 err = ENOMEM; 2598 goto errout; 2599 } 2600 memset(rp->rpr_msgin.dma_vaddr, 0, (msglen + 7) / 8); 2601 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_p, 2602 rp->rpr_msgin.dma_vaddr, 2603 (krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_nbits + 7) / 8); 2604 2605 /* Prepare space for output message (aligned buffer/length). */ 2606 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT]) < msglen) { 2607 /* Is this likely? */ 2608 err = E2BIG; 2609 goto errout; 2610 } 2611 if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgout, 0)) { 2612 err = ENOMEM; 2613 goto errout; 2614 } 2615 memset(rp->rpr_msgout.dma_vaddr, 0, (msglen + 7) / 8); 2616 2617 mcr->mcr_pkts = htole16(1); 2618 mcr->mcr_flags = 0; 2619 mcr->mcr_cmdctxp = htole32(rp->rpr_q.q_ctx.dma_paddr); 2620 mcr->mcr_ipktbuf.pb_addr = htole32(rp->rpr_msgin.dma_paddr); 2621 mcr->mcr_ipktbuf.pb_next = 0; 2622 mcr->mcr_ipktbuf.pb_len = htole32(rp->rpr_msgin.dma_size); 2623 mcr->mcr_reserved = 0; 2624 mcr->mcr_pktlen = htole16(msglen); 2625 mcr->mcr_opktbuf.pb_addr = htole32(rp->rpr_msgout.dma_paddr); 2626 mcr->mcr_opktbuf.pb_next = 0; 2627 mcr->mcr_opktbuf.pb_len = htole32(rp->rpr_msgout.dma_size); 2628 2629 #ifdef DIAGNOSTIC 2630 if (rp->rpr_msgin.dma_paddr & 3 || rp->rpr_msgin.dma_size & 3) { 2631 panic("%s: rsapriv: invalid msgin 0x%lx(0x%lx)", 2632 device_xname(&sc->sc_dv), (u_long) rp->rpr_msgin.dma_paddr, 2633 (u_long) rp->rpr_msgin.dma_size); 2634 } 2635 if (rp->rpr_msgout.dma_paddr & 3 || rp->rpr_msgout.dma_size & 3) { 2636 panic("%s: rsapriv: invalid msgout 0x%lx(0x%lx)", 2637 device_xname(&sc->sc_dv), (u_long) rp->rpr_msgout.dma_paddr, 2638 (u_long) rp->rpr_msgout.dma_size); 2639 } 2640 #endif 2641 2642 ctx->rpr_len = (sizeof(u_int16_t) * 4) + (5 * (padlen / 8)); 2643 ctx->rpr_op = htole16(UBS_CTXOP_RSAPRIV); 2644 ctx->rpr_q_len = htole16(padlen); 2645 ctx->rpr_p_len = htole16(padlen); 2646 2647 /* 2648 * ubsec_feed2 will sync mcr and ctx, we just need to sync 2649 * everything else. 2650 */ 2651 bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgin.dma_map, 2652 0, rp->rpr_msgin.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2653 bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgout.dma_map, 2654 0, rp->rpr_msgout.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2655 2656 /* Enqueue and we're done... */ 2657 s = splnet(); 2658 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rp->rpr_q, q_next); 2659 ubsec_feed2(sc); 2660 ubsecstats.hst_modexpcrt++; 2661 splx(s); 2662 return (0); 2663 2664 errout: 2665 if (rp != NULL) { 2666 if (rp->rpr_q.q_mcr.dma_map != NULL) 2667 ubsec_dma_free(sc, &rp->rpr_q.q_mcr); 2668 if (rp->rpr_msgin.dma_map != NULL) { 2669 memset(rp->rpr_msgin.dma_vaddr, 0, rp->rpr_msgin.dma_size); 2670 ubsec_dma_free(sc, &rp->rpr_msgin); 2671 } 2672 if (rp->rpr_msgout.dma_map != NULL) { 2673 memset(rp->rpr_msgout.dma_vaddr, 0, rp->rpr_msgout.dma_size); 2674 ubsec_dma_free(sc, &rp->rpr_msgout); 2675 } 2676 free(rp, M_DEVBUF); 2677 } 2678 krp->krp_status = err; 2679 crypto_kdone(krp); 2680 return (0); 2681 } 2682 2683 #ifdef UBSEC_DEBUG 2684 static void 2685 ubsec_dump_pb(volatile struct ubsec_pktbuf *pb) 2686 { 2687 printf("addr 0x%x (0x%x) next 0x%x\n", 2688 pb->pb_addr, pb->pb_len, pb->pb_next); 2689 } 2690 2691 static void 2692 ubsec_dump_ctx2(volatile struct ubsec_ctx_keyop *c) 2693 { 2694 printf("CTX (0x%x):\n", c->ctx_len); 2695 switch (letoh16(c->ctx_op)) { 2696 case UBS_CTXOP_RNGBYPASS: 2697 case UBS_CTXOP_RNGSHA1: 2698 break; 2699 case UBS_CTXOP_MODEXP: 2700 { 2701 struct ubsec_ctx_modexp *cx = (void *)c; 2702 int i, len; 2703 2704 printf(" Elen %u, Nlen %u\n", 2705 letoh16(cx->me_E_len), letoh16(cx->me_N_len)); 2706 len = (cx->me_N_len + 7)/8; 2707 for (i = 0; i < len; i++) 2708 printf("%s%02x", (i == 0) ? " N: " : ":", cx->me_N[i]); 2709 printf("\n"); 2710 break; 2711 } 2712 default: 2713 printf("unknown context: %x\n", c->ctx_op); 2714 } 2715 printf("END CTX\n"); 2716 } 2717 2718 static void 2719 ubsec_dump_mcr(struct ubsec_mcr *mcr) 2720 { 2721 volatile struct ubsec_mcr_add *ma; 2722 int i; 2723 2724 printf("MCR:\n"); 2725 printf(" pkts: %u, flags 0x%x\n", 2726 letoh16(mcr->mcr_pkts), letoh16(mcr->mcr_flags)); 2727 ma = (volatile struct ubsec_mcr_add *)&mcr->mcr_cmdctxp; 2728 for (i = 0; i < letoh16(mcr->mcr_pkts); i++) { 2729 printf(" %d: ctx 0x%x len 0x%x rsvd 0x%x\n", i, 2730 letoh32(ma->mcr_cmdctxp), letoh16(ma->mcr_pktlen), 2731 letoh16(ma->mcr_reserved)); 2732 printf(" %d: ipkt ", i); 2733 ubsec_dump_pb(&ma->mcr_ipktbuf); 2734 printf(" %d: opkt ", i); 2735 ubsec_dump_pb(&ma->mcr_opktbuf); 2736 ma++; 2737 } 2738 printf("END MCR\n"); 2739 } 2740 #endif /* UBSEC_DEBUG */ 2741 2742 /* 2743 * Return the number of significant bits of a big number. 2744 */ 2745 static int 2746 ubsec_ksigbits(struct crparam *cr) 2747 { 2748 u_int plen = (cr->crp_nbits + 7) / 8; 2749 int i, sig = plen * 8; 2750 u_int8_t c, *p = cr->crp_p; 2751 2752 for (i = plen - 1; i >= 0; i--) { 2753 c = p[i]; 2754 if (c != 0) { 2755 while ((c & 0x80) == 0) { 2756 sig--; 2757 c <<= 1; 2758 } 2759 break; 2760 } 2761 sig -= 8; 2762 } 2763 return (sig); 2764 } 2765 2766 static void 2767 ubsec_kshift_r(u_int shiftbits, u_int8_t *src, u_int srcbits, 2768 u_int8_t *dst, u_int dstbits) 2769 { 2770 u_int slen, dlen; 2771 int i, si, di, n; 2772 2773 slen = (srcbits + 7) / 8; 2774 dlen = (dstbits + 7) / 8; 2775 2776 for (i = 0; i < slen; i++) 2777 dst[i] = src[i]; 2778 for (i = 0; i < dlen - slen; i++) 2779 dst[slen + i] = 0; 2780 2781 n = shiftbits / 8; 2782 if (n != 0) { 2783 si = dlen - n - 1; 2784 di = dlen - 1; 2785 while (si >= 0) 2786 dst[di--] = dst[si--]; 2787 while (di >= 0) 2788 dst[di--] = 0; 2789 } 2790 2791 n = shiftbits % 8; 2792 if (n != 0) { 2793 for (i = dlen - 1; i > 0; i--) 2794 dst[i] = (dst[i] << n) | 2795 (dst[i - 1] >> (8 - n)); 2796 dst[0] = dst[0] << n; 2797 } 2798 } 2799 2800 static void 2801 ubsec_kshift_l(u_int shiftbits, u_int8_t *src, u_int srcbits, 2802 u_int8_t *dst, u_int dstbits) 2803 { 2804 int slen, dlen, i, n; 2805 2806 slen = (srcbits + 7) / 8; 2807 dlen = (dstbits + 7) / 8; 2808 2809 n = shiftbits / 8; 2810 for (i = 0; i < slen; i++) 2811 dst[i] = src[i + n]; 2812 for (i = 0; i < dlen - slen; i++) 2813 dst[slen + i] = 0; 2814 2815 n = shiftbits % 8; 2816 if (n != 0) { 2817 for (i = 0; i < (dlen - 1); i++) 2818 dst[i] = (dst[i] >> n) | (dst[i + 1] << (8 - n)); 2819 dst[dlen - 1] = dst[dlen - 1] >> n; 2820 } 2821 } 2822