1 /* $NetBSD: ubsec.c,v 1.4 2003/08/28 19:00:52 thorpej Exp $ */ 2 /* $FreeBSD: src/sys/dev/ubsec/ubsec.c,v 1.6.2.6 2003/01/23 21:06:43 sam Exp $ */ 3 /* $OpenBSD: ubsec.c,v 1.127 2003/06/04 14:04:58 jason Exp $ */ 4 5 /* 6 * Copyright (c) 2000 Jason L. Wright (jason@thought.net) 7 * Copyright (c) 2000 Theo de Raadt (deraadt@openbsd.org) 8 * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com) 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 23 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 27 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * Effort sponsored in part by the Defense Advanced Research Projects 32 * Agency (DARPA) and Air Force Research Laboratory, Air Force 33 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 34 * 35 */ 36 37 #undef UBSEC_DEBUG 38 39 /* 40 * uBsec 5[56]01, bcm580xx, bcm582x hardware crypto accelerator 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/proc.h> 46 #include <sys/endian.h> 47 #ifdef __NetBSD__ 48 #define letoh16 htole16 49 #define letoh32 htole32 50 #define UBSEC_NO_RNG /* until statistically tested */ 51 #endif 52 #include <sys/errno.h> 53 #include <sys/malloc.h> 54 #include <sys/kernel.h> 55 #include <sys/mbuf.h> 56 #include <sys/device.h> 57 #include <sys/queue.h> 58 59 #include <uvm/uvm_extern.h> 60 61 #include <opencrypto/cryptodev.h> 62 #include <opencrypto/cryptosoft.h> 63 #ifdef __OpenBSD__ 64 #include <dev/rndvar.h> 65 #include <sys/md5k.h> 66 #else 67 #include <sys/rnd.h> 68 #include <sys/md5.h> 69 #endif 70 #include <sys/sha1.h> 71 72 #include <dev/pci/pcireg.h> 73 #include <dev/pci/pcivar.h> 74 #include <dev/pci/pcidevs.h> 75 76 #include <dev/pci/ubsecreg.h> 77 #include <dev/pci/ubsecvar.h> 78 79 /* 80 * Prototypes and count for the pci_device structure 81 */ 82 static int ubsec_probe(struct device *, struct cfdata *, void *); 83 static void ubsec_attach(struct device *, struct device *, void *); 84 static void ubsec_reset_board(struct ubsec_softc *); 85 static void ubsec_init_board(struct ubsec_softc *); 86 static void ubsec_init_pciregs(struct pci_attach_args *pa); 87 static void ubsec_cleanchip(struct ubsec_softc *); 88 static void ubsec_totalreset(struct ubsec_softc *); 89 static int ubsec_free_q(struct ubsec_softc*, struct ubsec_q *); 90 91 #ifdef __OpenBSD__ 92 struct cfattach ubsec_ca = { 93 sizeof(struct ubsec_softc), ubsec_probe, ubsec_attach, 94 }; 95 96 struct cfdriver ubsec_cd = { 97 0, "ubsec", DV_DULL 98 }; 99 #else 100 CFATTACH_DECL(ubsec, sizeof(struct ubsec_softc), ubsec_probe, ubsec_attach, 101 NULL, NULL); 102 extern struct cfdriver ubsec_cd; 103 #endif 104 105 /* patchable */ 106 #ifdef UBSEC_DEBUG 107 extern int ubsec_debug; 108 int ubsec_debug=1; 109 #endif 110 111 static int ubsec_intr(void *); 112 static int ubsec_newsession(void*, u_int32_t *, struct cryptoini *); 113 static int ubsec_freesession(void*, u_int64_t); 114 static int ubsec_process(void*, struct cryptop *, int hint); 115 static void ubsec_callback(struct ubsec_softc *, struct ubsec_q *); 116 static void ubsec_feed(struct ubsec_softc *); 117 static void ubsec_mcopy(struct mbuf *, struct mbuf *, int, int); 118 static void ubsec_callback2(struct ubsec_softc *, struct ubsec_q2 *); 119 static void ubsec_feed2(struct ubsec_softc *); 120 #ifndef UBSEC_NO_RNG 121 static void ubsec_rng(void *); 122 #endif /* UBSEC_NO_RNG */ 123 static int ubsec_dma_malloc(struct ubsec_softc *, bus_size_t, 124 struct ubsec_dma_alloc *, int); 125 static void ubsec_dma_free(struct ubsec_softc *, struct ubsec_dma_alloc *); 126 static int ubsec_dmamap_aligned(bus_dmamap_t); 127 128 static int ubsec_kprocess(void*, struct cryptkop *, int); 129 static int ubsec_kprocess_modexp_sw(struct ubsec_softc *, 130 struct cryptkop *, int); 131 static int ubsec_kprocess_modexp_hw(struct ubsec_softc *, 132 struct cryptkop *, int); 133 static int ubsec_kprocess_rsapriv(struct ubsec_softc *, 134 struct cryptkop *, int); 135 static void ubsec_kfree(struct ubsec_softc *, struct ubsec_q2 *); 136 static int ubsec_ksigbits(struct crparam *); 137 static void ubsec_kshift_r(u_int, u_int8_t *, u_int, u_int8_t *, u_int); 138 static void ubsec_kshift_l(u_int, u_int8_t *, u_int, u_int8_t *, u_int); 139 140 #ifdef UBSEC_DEBUG 141 static void ubsec_dump_pb(volatile struct ubsec_pktbuf *); 142 static void ubsec_dump_mcr(struct ubsec_mcr *); 143 static void ubsec_dump_ctx2(volatile struct ubsec_ctx_keyop *); 144 #endif 145 146 #define READ_REG(sc,r) \ 147 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) 148 149 #define WRITE_REG(sc,reg,val) \ 150 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) 151 152 #define SWAP32(x) (x) = htole32(ntohl((x))) 153 #ifndef HTOLE32 154 #define HTOLE32(x) (x) = htole32(x) 155 #endif 156 157 struct ubsec_stats ubsecstats; 158 159 /* 160 * ubsec_maxbatch controls the number of crypto ops to voluntarily 161 * collect into one submission to the hardware. This batching happens 162 * when ops are dispatched from the crypto subsystem with a hint that 163 * more are to follow immediately. These ops must also not be marked 164 * with a ``no delay'' flag. 165 */ 166 static int ubsec_maxbatch = 1; 167 #ifdef SYSCTL_INT 168 SYSCTL_INT(_kern, OID_AUTO, ubsec_maxbatch, CTLFLAG_RW, &ubsec_maxbatch, 169 0, "Broadcom driver: max ops to batch w/o interrupt"); 170 #endif 171 172 /* 173 * ubsec_maxaggr controls the number of crypto ops to submit to the 174 * hardware as a unit. This aggregation reduces the number of interrupts 175 * to the host at the expense of increased latency (for all but the last 176 * operation). For network traffic setting this to one yields the highest 177 * performance but at the expense of more interrupt processing. 178 */ 179 static int ubsec_maxaggr = 1; 180 #ifdef SYSCTL_INT 181 SYSCTL_INT(_kern, OID_AUTO, ubsec_maxaggr, CTLFLAG_RW, &ubsec_maxaggr, 182 0, "Broadcom driver: max ops to aggregate under one interrupt"); 183 #endif 184 185 static const struct ubsec_product { 186 pci_vendor_id_t ubsec_vendor; 187 pci_product_id_t ubsec_product; 188 int ubsec_flags; 189 int ubsec_statmask; 190 const char *ubsec_name; 191 } ubsec_products[] = { 192 { PCI_VENDOR_BLUESTEEL, PCI_PRODUCT_BLUESTEEL_5501, 193 0, 194 BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 195 "Bluesteel 5501" 196 }, 197 { PCI_VENDOR_BLUESTEEL, PCI_PRODUCT_BLUESTEEL_5601, 198 UBS_FLAGS_KEY | UBS_FLAGS_RNG, 199 BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 200 "Bluesteel 5601" 201 }, 202 203 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5801, 204 0, 205 BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 206 "Broadcom BCM5801" 207 }, 208 209 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5802, 210 UBS_FLAGS_KEY | UBS_FLAGS_RNG, 211 BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 212 "Broadcom BCM5802" 213 }, 214 215 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5805, 216 UBS_FLAGS_KEY | UBS_FLAGS_RNG, 217 BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 218 "Broadcom BCM5805" 219 }, 220 221 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5820, 222 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 223 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 224 BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 225 "Broadcom BCM5820" 226 }, 227 228 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5821, 229 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 230 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 231 BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 232 BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 233 "Broadcom BCM5821" 234 }, 235 { PCI_VENDOR_SUN, PCI_PRODUCT_SUN_SCA1K, 236 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 237 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 238 BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 239 BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 240 "Sun Crypto Accelerator 1000" 241 }, 242 { PCI_VENDOR_SUN, PCI_PRODUCT_SUN_5821, 243 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 244 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 245 BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 246 BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 247 "Broadcom BCM5821 (Sun)" 248 }, 249 250 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5822, 251 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 252 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 253 BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 254 BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 255 "Broadcom BCM5822" 256 }, 257 258 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5823, 259 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 260 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 261 BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 262 BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 263 "Broadcom BCM5823" 264 }, 265 266 { 0, 0, 267 0, 268 0, 269 NULL 270 } 271 }; 272 273 static const struct ubsec_product * 274 ubsec_lookup(const struct pci_attach_args *pa) 275 { 276 const struct ubsec_product *up; 277 278 for (up = ubsec_products; up->ubsec_name != NULL; up++) { 279 if (PCI_VENDOR(pa->pa_id) == up->ubsec_vendor && 280 PCI_PRODUCT(pa->pa_id) == up->ubsec_product) 281 return (up); 282 } 283 return (NULL); 284 } 285 286 static int 287 ubsec_probe(parent, match, aux) 288 struct device *parent; 289 struct cfdata *match; 290 void *aux; 291 { 292 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 293 294 if (ubsec_lookup(pa) != NULL) 295 return (1); 296 297 return (0); 298 } 299 300 void 301 ubsec_attach(parent, self, aux) 302 struct device *parent, *self; 303 void *aux; 304 { 305 struct ubsec_softc *sc = (struct ubsec_softc *)self; 306 struct pci_attach_args *pa = aux; 307 const struct ubsec_product *up; 308 pci_chipset_tag_t pc = pa->pa_pc; 309 pci_intr_handle_t ih; 310 const char *intrstr = NULL; 311 struct ubsec_dma *dmap; 312 u_int32_t cmd, i; 313 314 up = ubsec_lookup(pa); 315 if (up == NULL) { 316 printf("\n"); 317 panic("ubsec_attach: impossible"); 318 } 319 320 aprint_naive(": Crypto processor\n"); 321 aprint_normal(": %s, rev. %d\n", up->ubsec_name, 322 PCI_REVISION(pa->pa_class)); 323 324 SIMPLEQ_INIT(&sc->sc_queue); 325 SIMPLEQ_INIT(&sc->sc_qchip); 326 SIMPLEQ_INIT(&sc->sc_queue2); 327 SIMPLEQ_INIT(&sc->sc_qchip2); 328 SIMPLEQ_INIT(&sc->sc_q2free); 329 330 sc->sc_flags = up->ubsec_flags; 331 sc->sc_statmask = up->ubsec_statmask; 332 333 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 334 cmd |= PCI_COMMAND_MASTER_ENABLE; 335 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd); 336 337 if (pci_mapreg_map(pa, BS_BAR, PCI_MAPREG_TYPE_MEM, 0, 338 &sc->sc_st, &sc->sc_sh, NULL, NULL)) { 339 aprint_error("%s: can't find mem space", 340 sc->sc_dv.dv_xname); 341 return; 342 } 343 344 sc->sc_dmat = pa->pa_dmat; 345 346 if (pci_intr_map(pa, &ih)) { 347 aprint_error("%s: couldn't map interrupt\n", 348 sc->sc_dv.dv_xname); 349 return; 350 } 351 intrstr = pci_intr_string(pc, ih); 352 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, ubsec_intr, sc); 353 if (sc->sc_ih == NULL) { 354 aprint_error("%s: couldn't establish interrupt", 355 sc->sc_dv.dv_xname); 356 if (intrstr != NULL) 357 aprint_normal(" at %s", intrstr); 358 aprint_normal("\n"); 359 return; 360 } 361 aprint_normal("%s: interrupting at %s\n", sc->sc_dv.dv_xname, intrstr); 362 363 sc->sc_cid = crypto_get_driverid(0); 364 if (sc->sc_cid < 0) { 365 aprint_error("%s: couldn't get crypto driver id\n", 366 sc->sc_dv.dv_xname); 367 pci_intr_disestablish(pc, sc->sc_ih); 368 return; 369 } 370 371 SIMPLEQ_INIT(&sc->sc_freequeue); 372 dmap = sc->sc_dmaa; 373 for (i = 0; i < UBS_MAX_NQUEUE; i++, dmap++) { 374 struct ubsec_q *q; 375 376 q = (struct ubsec_q *)malloc(sizeof(struct ubsec_q), 377 M_DEVBUF, M_NOWAIT); 378 if (q == NULL) { 379 aprint_error("%s: can't allocate queue buffers\n", 380 sc->sc_dv.dv_xname); 381 break; 382 } 383 384 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_dmachunk), 385 &dmap->d_alloc, 0)) { 386 aprint_error("%s: can't allocate dma buffers\n", 387 sc->sc_dv.dv_xname); 388 free(q, M_DEVBUF); 389 break; 390 } 391 dmap->d_dma = (struct ubsec_dmachunk *)dmap->d_alloc.dma_vaddr; 392 393 q->q_dma = dmap; 394 sc->sc_queuea[i] = q; 395 396 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 397 } 398 399 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, 400 ubsec_newsession, ubsec_freesession, ubsec_process, sc); 401 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, 402 ubsec_newsession, ubsec_freesession, ubsec_process, sc); 403 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0, 404 ubsec_newsession, ubsec_freesession, ubsec_process, sc); 405 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0, 406 ubsec_newsession, ubsec_freesession, ubsec_process, sc); 407 408 /* 409 * Reset Broadcom chip 410 */ 411 ubsec_reset_board(sc); 412 413 /* 414 * Init Broadcom specific PCI settings 415 */ 416 ubsec_init_pciregs(pa); 417 418 /* 419 * Init Broadcom chip 420 */ 421 ubsec_init_board(sc); 422 423 #ifndef UBSEC_NO_RNG 424 if (sc->sc_flags & UBS_FLAGS_RNG) { 425 sc->sc_statmask |= BS_STAT_MCR2_DONE; 426 427 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 428 &sc->sc_rng.rng_q.q_mcr, 0)) 429 goto skip_rng; 430 431 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rngbypass), 432 &sc->sc_rng.rng_q.q_ctx, 0)) { 433 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); 434 goto skip_rng; 435 } 436 437 if (ubsec_dma_malloc(sc, sizeof(u_int32_t) * 438 UBSEC_RNG_BUFSIZ, &sc->sc_rng.rng_buf, 0)) { 439 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); 440 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); 441 goto skip_rng; 442 } 443 444 if (hz >= 100) 445 sc->sc_rnghz = hz / 100; 446 else 447 sc->sc_rnghz = 1; 448 #ifdef __OpenBSD__ 449 timeout_set(&sc->sc_rngto, ubsec_rng, sc); 450 timeout_add(&sc->sc_rngto, sc->sc_rnghz); 451 #else 452 callout_init(&sc->sc_rngto); 453 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); 454 #endif 455 skip_rng: 456 if (sc->sc_rnghz) 457 aprint_normal("%s: random number generator enabled\n", 458 sc->sc_dv.dv_xname); 459 else 460 aprint_error("%s: WARNING: random number generator " 461 "disabled\n", sc->sc_dv.dv_xname); 462 } 463 #endif /* UBSEC_NO_RNG */ 464 465 if (sc->sc_flags & UBS_FLAGS_KEY) { 466 sc->sc_statmask |= BS_STAT_MCR2_DONE; 467 468 crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0, 469 ubsec_kprocess, sc); 470 #if 0 471 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0, 472 ubsec_kprocess, sc); 473 #endif 474 } 475 } 476 477 /* 478 * UBSEC Interrupt routine 479 */ 480 int 481 ubsec_intr(void *arg) 482 { 483 struct ubsec_softc *sc = arg; 484 volatile u_int32_t stat; 485 struct ubsec_q *q; 486 struct ubsec_dma *dmap; 487 int npkts = 0, i; 488 489 stat = READ_REG(sc, BS_STAT); 490 stat &= sc->sc_statmask; 491 if (stat == 0) { 492 return (0); 493 } 494 495 WRITE_REG(sc, BS_STAT, stat); /* IACK */ 496 497 /* 498 * Check to see if we have any packets waiting for us 499 */ 500 if ((stat & BS_STAT_MCR1_DONE)) { 501 while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { 502 q = SIMPLEQ_FIRST(&sc->sc_qchip); 503 dmap = q->q_dma; 504 505 if ((dmap->d_dma->d_mcr.mcr_flags & htole16(UBS_MCR_DONE)) == 0) 506 break; 507 508 q = SIMPLEQ_FIRST(&sc->sc_qchip); 509 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, /*q,*/ q_next); 510 511 npkts = q->q_nstacked_mcrs; 512 sc->sc_nqchip -= 1+npkts; 513 /* 514 * search for further sc_qchip ubsec_q's that share 515 * the same MCR, and complete them too, they must be 516 * at the top. 517 */ 518 for (i = 0; i < npkts; i++) { 519 if(q->q_stacked_mcr[i]) 520 ubsec_callback(sc, q->q_stacked_mcr[i]); 521 else 522 break; 523 } 524 ubsec_callback(sc, q); 525 } 526 527 /* 528 * Don't send any more packet to chip if there has been 529 * a DMAERR. 530 */ 531 if (!(stat & BS_STAT_DMAERR)) 532 ubsec_feed(sc); 533 } 534 535 /* 536 * Check to see if we have any key setups/rng's waiting for us 537 */ 538 if ((sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) && 539 (stat & BS_STAT_MCR2_DONE)) { 540 struct ubsec_q2 *q2; 541 struct ubsec_mcr *mcr; 542 543 while (!SIMPLEQ_EMPTY(&sc->sc_qchip2)) { 544 q2 = SIMPLEQ_FIRST(&sc->sc_qchip2); 545 546 bus_dmamap_sync(sc->sc_dmat, q2->q_mcr.dma_map, 547 0, q2->q_mcr.dma_map->dm_mapsize, 548 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 549 550 mcr = (struct ubsec_mcr *)q2->q_mcr.dma_vaddr; 551 if ((mcr->mcr_flags & htole16(UBS_MCR_DONE)) == 0) { 552 bus_dmamap_sync(sc->sc_dmat, 553 q2->q_mcr.dma_map, 0, 554 q2->q_mcr.dma_map->dm_mapsize, 555 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 556 break; 557 } 558 q2 = SIMPLEQ_FIRST(&sc->sc_qchip2); 559 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip2, /*q2,*/ q_next); 560 ubsec_callback2(sc, q2); 561 /* 562 * Don't send any more packet to chip if there has been 563 * a DMAERR. 564 */ 565 if (!(stat & BS_STAT_DMAERR)) 566 ubsec_feed2(sc); 567 } 568 } 569 570 /* 571 * Check to see if we got any DMA Error 572 */ 573 if (stat & BS_STAT_DMAERR) { 574 #ifdef UBSEC_DEBUG 575 if (ubsec_debug) { 576 volatile u_int32_t a = READ_REG(sc, BS_ERR); 577 578 printf("%s: dmaerr %s@%08x\n", sc->sc_dv.dv_xname, 579 (a & BS_ERR_READ) ? "read" : "write", 580 a & BS_ERR_ADDR); 581 } 582 #endif /* UBSEC_DEBUG */ 583 ubsecstats.hst_dmaerr++; 584 ubsec_totalreset(sc); 585 ubsec_feed(sc); 586 } 587 588 if (sc->sc_needwakeup) { /* XXX check high watermark */ 589 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); 590 #ifdef UBSEC_DEBUG 591 if (ubsec_debug) 592 printf("%s: wakeup crypto (%x)\n", sc->sc_dv.dv_xname, 593 sc->sc_needwakeup); 594 #endif /* UBSEC_DEBUG */ 595 sc->sc_needwakeup &= ~wakeup; 596 crypto_unblock(sc->sc_cid, wakeup); 597 } 598 return (1); 599 } 600 601 /* 602 * ubsec_feed() - aggregate and post requests to chip 603 * OpenBSD comments: 604 * It is assumed that the caller set splnet() 605 */ 606 static void 607 ubsec_feed(struct ubsec_softc *sc) 608 { 609 struct ubsec_q *q, *q2; 610 int npkts, i; 611 void *v; 612 u_int32_t stat; 613 #ifdef UBSEC_DEBUG 614 static int max; 615 #endif /* UBSEC_DEBUG */ 616 617 npkts = sc->sc_nqueue; 618 if (npkts > ubsecstats.hst_maxqueue) 619 ubsecstats.hst_maxqueue = npkts; 620 if (npkts < 2) 621 goto feed1; 622 623 /* 624 * Decide how many ops to combine in a single MCR. We cannot 625 * aggregate more than UBS_MAX_AGGR because this is the number 626 * of slots defined in the data structure. Otherwise we clamp 627 * based on the tunable parameter ubsec_maxaggr. Note that 628 * aggregation can happen in two ways: either by batching ops 629 * from above or because the h/w backs up and throttles us. 630 * Aggregating ops reduces the number of interrupts to the host 631 * but also (potentially) increases the latency for processing 632 * completed ops as we only get an interrupt when all aggregated 633 * ops have completed. 634 */ 635 if (npkts > UBS_MAX_AGGR) 636 npkts = UBS_MAX_AGGR; 637 if (npkts > ubsec_maxaggr) 638 npkts = ubsec_maxaggr; 639 if (npkts > ubsecstats.hst_maxbatch) 640 ubsecstats.hst_maxbatch = npkts; 641 if (npkts < 2) 642 goto feed1; 643 ubsecstats.hst_totbatch += npkts-1; 644 645 if ((stat = READ_REG(sc, BS_STAT)) & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) { 646 if (stat & BS_STAT_DMAERR) { 647 ubsec_totalreset(sc); 648 ubsecstats.hst_dmaerr++; 649 } else { 650 ubsecstats.hst_mcr1full++; 651 } 652 return; 653 } 654 655 #ifdef UBSEC_DEBUG 656 if (ubsec_debug) 657 printf("merging %d records\n", npkts); 658 /* XXX temporary aggregation statistics reporting code */ 659 if (max < npkts) { 660 max = npkts; 661 printf("%s: new max aggregate %d\n", sc->sc_dv.dv_xname, max); 662 } 663 #endif /* UBSEC_DEBUG */ 664 665 q = SIMPLEQ_FIRST(&sc->sc_queue); 666 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, /*q,*/ q_next); 667 --sc->sc_nqueue; 668 669 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, 670 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 671 if (q->q_dst_map != NULL) 672 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, 673 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 674 675 q->q_nstacked_mcrs = npkts - 1; /* Number of packets stacked */ 676 677 for (i = 0; i < q->q_nstacked_mcrs; i++) { 678 q2 = SIMPLEQ_FIRST(&sc->sc_queue); 679 bus_dmamap_sync(sc->sc_dmat, q2->q_src_map, 680 0, q2->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 681 if (q2->q_dst_map != NULL) 682 bus_dmamap_sync(sc->sc_dmat, q2->q_dst_map, 683 0, q2->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 684 q2= SIMPLEQ_FIRST(&sc->sc_queue); 685 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, /*q2,*/ q_next); 686 --sc->sc_nqueue; 687 688 v = ((void *)&q2->q_dma->d_dma->d_mcr); 689 v = (char*)v + (sizeof(struct ubsec_mcr) - 690 sizeof(struct ubsec_mcr_add)); 691 bcopy(v, &q->q_dma->d_dma->d_mcradd[i], sizeof(struct ubsec_mcr_add)); 692 q->q_stacked_mcr[i] = q2; 693 } 694 q->q_dma->d_dma->d_mcr.mcr_pkts = htole16(npkts); 695 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); 696 sc->sc_nqchip += npkts; 697 if (sc->sc_nqchip > ubsecstats.hst_maxqchip) 698 ubsecstats.hst_maxqchip = sc->sc_nqchip; 699 bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map, 700 0, q->q_dma->d_alloc.dma_map->dm_mapsize, 701 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 702 WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + 703 offsetof(struct ubsec_dmachunk, d_mcr)); 704 return; 705 706 feed1: 707 while (!SIMPLEQ_EMPTY(&sc->sc_queue)) { 708 if ((stat = READ_REG(sc, BS_STAT)) & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) { 709 if (stat & BS_STAT_DMAERR) { 710 ubsec_totalreset(sc); 711 ubsecstats.hst_dmaerr++; 712 } else { 713 ubsecstats.hst_mcr1full++; 714 } 715 break; 716 } 717 718 q = SIMPLEQ_FIRST(&sc->sc_queue); 719 720 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, 721 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 722 if (q->q_dst_map != NULL) 723 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, 724 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 725 bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map, 726 0, q->q_dma->d_alloc.dma_map->dm_mapsize, 727 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 728 729 WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + 730 offsetof(struct ubsec_dmachunk, d_mcr)); 731 #ifdef UBSEC_DEBUG 732 if (ubsec_debug) 733 printf("feed: q->chip %p %08x stat %08x\n", 734 q, (u_int32_t)q->q_dma->d_alloc.dma_paddr, 735 stat); 736 #endif /* UBSEC_DEBUG */ 737 q = SIMPLEQ_FIRST(&sc->sc_queue); 738 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, /*q,*/ q_next); 739 --sc->sc_nqueue; 740 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); 741 sc->sc_nqchip++; 742 } 743 if (sc->sc_nqchip > ubsecstats.hst_maxqchip) 744 ubsecstats.hst_maxqchip = sc->sc_nqchip; 745 } 746 747 /* 748 * Allocate a new 'session' and return an encoded session id. 'sidp' 749 * contains our registration id, and should contain an encoded session 750 * id on successful allocation. 751 */ 752 static int 753 ubsec_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri) 754 { 755 struct cryptoini *c, *encini = NULL, *macini = NULL; 756 struct ubsec_softc *sc; 757 struct ubsec_session *ses = NULL; 758 MD5_CTX md5ctx; 759 SHA1_CTX sha1ctx; 760 int i, sesn; 761 762 sc = arg; 763 KASSERT(sc != NULL /*, ("ubsec_newsession: null softc")*/); 764 765 if (sidp == NULL || cri == NULL || sc == NULL) 766 return (EINVAL); 767 768 for (c = cri; c != NULL; c = c->cri_next) { 769 if (c->cri_alg == CRYPTO_MD5_HMAC || 770 c->cri_alg == CRYPTO_SHA1_HMAC) { 771 if (macini) 772 return (EINVAL); 773 macini = c; 774 } else if (c->cri_alg == CRYPTO_DES_CBC || 775 c->cri_alg == CRYPTO_3DES_CBC) { 776 if (encini) 777 return (EINVAL); 778 encini = c; 779 } else 780 return (EINVAL); 781 } 782 if (encini == NULL && macini == NULL) 783 return (EINVAL); 784 785 if (sc->sc_sessions == NULL) { 786 ses = sc->sc_sessions = (struct ubsec_session *)malloc( 787 sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT); 788 if (ses == NULL) 789 return (ENOMEM); 790 sesn = 0; 791 sc->sc_nsessions = 1; 792 } else { 793 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { 794 if (sc->sc_sessions[sesn].ses_used == 0) { 795 ses = &sc->sc_sessions[sesn]; 796 break; 797 } 798 } 799 800 if (ses == NULL) { 801 sesn = sc->sc_nsessions; 802 ses = (struct ubsec_session *)malloc((sesn + 1) * 803 sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT); 804 if (ses == NULL) 805 return (ENOMEM); 806 bcopy(sc->sc_sessions, ses, sesn * 807 sizeof(struct ubsec_session)); 808 bzero(sc->sc_sessions, sesn * 809 sizeof(struct ubsec_session)); 810 free(sc->sc_sessions, M_DEVBUF); 811 sc->sc_sessions = ses; 812 ses = &sc->sc_sessions[sesn]; 813 sc->sc_nsessions++; 814 } 815 } 816 817 bzero(ses, sizeof(struct ubsec_session)); 818 ses->ses_used = 1; 819 if (encini) { 820 /* get an IV, network byte order */ 821 #ifdef __NetBSD__ 822 rnd_extract_data(ses->ses_iv, 823 sizeof(ses->ses_iv), RND_EXTRACT_ANY); 824 #else 825 get_random_bytes(ses->ses_iv, sizeof(ses->ses_iv)); 826 #endif 827 828 /* Go ahead and compute key in ubsec's byte order */ 829 if (encini->cri_alg == CRYPTO_DES_CBC) { 830 bcopy(encini->cri_key, &ses->ses_deskey[0], 8); 831 bcopy(encini->cri_key, &ses->ses_deskey[2], 8); 832 bcopy(encini->cri_key, &ses->ses_deskey[4], 8); 833 } else 834 bcopy(encini->cri_key, ses->ses_deskey, 24); 835 836 SWAP32(ses->ses_deskey[0]); 837 SWAP32(ses->ses_deskey[1]); 838 SWAP32(ses->ses_deskey[2]); 839 SWAP32(ses->ses_deskey[3]); 840 SWAP32(ses->ses_deskey[4]); 841 SWAP32(ses->ses_deskey[5]); 842 } 843 844 if (macini) { 845 for (i = 0; i < macini->cri_klen / 8; i++) 846 macini->cri_key[i] ^= HMAC_IPAD_VAL; 847 848 if (macini->cri_alg == CRYPTO_MD5_HMAC) { 849 MD5Init(&md5ctx); 850 MD5Update(&md5ctx, macini->cri_key, 851 macini->cri_klen / 8); 852 MD5Update(&md5ctx, hmac_ipad_buffer, 853 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 854 bcopy(md5ctx.state, ses->ses_hminner, 855 sizeof(md5ctx.state)); 856 } else { 857 SHA1Init(&sha1ctx); 858 SHA1Update(&sha1ctx, macini->cri_key, 859 macini->cri_klen / 8); 860 SHA1Update(&sha1ctx, hmac_ipad_buffer, 861 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 862 bcopy(sha1ctx.state, ses->ses_hminner, 863 sizeof(sha1ctx.state)); 864 } 865 866 for (i = 0; i < macini->cri_klen / 8; i++) 867 macini->cri_key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 868 869 if (macini->cri_alg == CRYPTO_MD5_HMAC) { 870 MD5Init(&md5ctx); 871 MD5Update(&md5ctx, macini->cri_key, 872 macini->cri_klen / 8); 873 MD5Update(&md5ctx, hmac_opad_buffer, 874 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 875 bcopy(md5ctx.state, ses->ses_hmouter, 876 sizeof(md5ctx.state)); 877 } else { 878 SHA1Init(&sha1ctx); 879 SHA1Update(&sha1ctx, macini->cri_key, 880 macini->cri_klen / 8); 881 SHA1Update(&sha1ctx, hmac_opad_buffer, 882 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 883 bcopy(sha1ctx.state, ses->ses_hmouter, 884 sizeof(sha1ctx.state)); 885 } 886 887 for (i = 0; i < macini->cri_klen / 8; i++) 888 macini->cri_key[i] ^= HMAC_OPAD_VAL; 889 } 890 891 *sidp = UBSEC_SID(sc->sc_dv.dv_unit, sesn); 892 return (0); 893 } 894 895 /* 896 * Deallocate a session. 897 */ 898 static int 899 ubsec_freesession(void *arg, u_int64_t tid) 900 { 901 struct ubsec_softc *sc; 902 int session; 903 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; 904 905 sc = arg; 906 KASSERT(sc != NULL /*, ("ubsec_freesession: null softc")*/); 907 908 session = UBSEC_SESSION(sid); 909 if (session >= sc->sc_nsessions) 910 return (EINVAL); 911 912 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session])); 913 return (0); 914 } 915 916 #ifdef __FreeBSD__ /* Ugly gratuitous changes to bus_dma */ 917 static void 918 ubsec_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) 919 { 920 struct ubsec_operand *op = arg; 921 922 KASSERT(nsegs <= UBS_MAX_SCATTER 923 /*, ("Too many DMA segments returned when mapping operand")*/); 924 #ifdef UBSEC_DEBUG 925 if (ubsec_debug) 926 printf("ubsec_op_cb: mapsize %u nsegs %d\n", 927 (u_int) mapsize, nsegs); 928 #endif 929 op->mapsize = mapsize; 930 op->nsegs = nsegs; 931 bcopy(seg, op->segs, nsegs * sizeof (seg[0])); 932 } 933 #endif 934 935 static int 936 ubsec_process(void *arg, struct cryptop *crp, int hint) 937 { 938 struct ubsec_q *q = NULL; 939 #ifdef __OpenBSD__ 940 int card; 941 #endif 942 int err = 0, i, j, s, nicealign; 943 struct ubsec_softc *sc; 944 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; 945 int encoffset = 0, macoffset = 0, cpskip, cpoffset; 946 int sskip, dskip, stheend, dtheend; 947 int16_t coffset; 948 struct ubsec_session *ses; 949 struct ubsec_pktctx ctx; 950 struct ubsec_dma *dmap = NULL; 951 952 sc = arg; 953 KASSERT(sc != NULL /*, ("ubsec_process: null softc")*/); 954 955 if (crp == NULL || crp->crp_callback == NULL || sc == NULL) { 956 ubsecstats.hst_invalid++; 957 return (EINVAL); 958 } 959 if (UBSEC_SESSION(crp->crp_sid) >= sc->sc_nsessions) { 960 ubsecstats.hst_badsession++; 961 return (EINVAL); 962 } 963 964 s = splnet(); 965 966 if (SIMPLEQ_EMPTY(&sc->sc_freequeue)) { 967 ubsecstats.hst_queuefull++; 968 sc->sc_needwakeup |= CRYPTO_SYMQ; 969 splx(s); 970 return(ERESTART); 971 } 972 973 q = SIMPLEQ_FIRST(&sc->sc_freequeue); 974 SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, /*q,*/ q_next); 975 splx(s); 976 977 dmap = q->q_dma; /* Save dma pointer */ 978 bzero(q, sizeof(struct ubsec_q)); 979 bzero(&ctx, sizeof(ctx)); 980 981 q->q_sesn = UBSEC_SESSION(crp->crp_sid); 982 q->q_dma = dmap; 983 ses = &sc->sc_sessions[q->q_sesn]; 984 985 if (crp->crp_flags & CRYPTO_F_IMBUF) { 986 q->q_src_m = (struct mbuf *)crp->crp_buf; 987 q->q_dst_m = (struct mbuf *)crp->crp_buf; 988 } else if (crp->crp_flags & CRYPTO_F_IOV) { 989 q->q_src_io = (struct uio *)crp->crp_buf; 990 q->q_dst_io = (struct uio *)crp->crp_buf; 991 } else { 992 ubsecstats.hst_badflags++; 993 err = EINVAL; 994 goto errout; /* XXX we don't handle contiguous blocks! */ 995 } 996 997 bzero(&dmap->d_dma->d_mcr, sizeof(struct ubsec_mcr)); 998 999 dmap->d_dma->d_mcr.mcr_pkts = htole16(1); 1000 dmap->d_dma->d_mcr.mcr_flags = 0; 1001 q->q_crp = crp; 1002 1003 crd1 = crp->crp_desc; 1004 if (crd1 == NULL) { 1005 ubsecstats.hst_nodesc++; 1006 err = EINVAL; 1007 goto errout; 1008 } 1009 crd2 = crd1->crd_next; 1010 1011 if (crd2 == NULL) { 1012 if (crd1->crd_alg == CRYPTO_MD5_HMAC || 1013 crd1->crd_alg == CRYPTO_SHA1_HMAC) { 1014 maccrd = crd1; 1015 enccrd = NULL; 1016 } else if (crd1->crd_alg == CRYPTO_DES_CBC || 1017 crd1->crd_alg == CRYPTO_3DES_CBC) { 1018 maccrd = NULL; 1019 enccrd = crd1; 1020 } else { 1021 ubsecstats.hst_badalg++; 1022 err = EINVAL; 1023 goto errout; 1024 } 1025 } else { 1026 if ((crd1->crd_alg == CRYPTO_MD5_HMAC || 1027 crd1->crd_alg == CRYPTO_SHA1_HMAC) && 1028 (crd2->crd_alg == CRYPTO_DES_CBC || 1029 crd2->crd_alg == CRYPTO_3DES_CBC) && 1030 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { 1031 maccrd = crd1; 1032 enccrd = crd2; 1033 } else if ((crd1->crd_alg == CRYPTO_DES_CBC || 1034 crd1->crd_alg == CRYPTO_3DES_CBC) && 1035 (crd2->crd_alg == CRYPTO_MD5_HMAC || 1036 crd2->crd_alg == CRYPTO_SHA1_HMAC) && 1037 (crd1->crd_flags & CRD_F_ENCRYPT)) { 1038 enccrd = crd1; 1039 maccrd = crd2; 1040 } else { 1041 /* 1042 * We cannot order the ubsec as requested 1043 */ 1044 ubsecstats.hst_badalg++; 1045 err = EINVAL; 1046 goto errout; 1047 } 1048 } 1049 1050 if (enccrd) { 1051 encoffset = enccrd->crd_skip; 1052 ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_3DES); 1053 1054 if (enccrd->crd_flags & CRD_F_ENCRYPT) { 1055 q->q_flags |= UBSEC_QFLAGS_COPYOUTIV; 1056 1057 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 1058 bcopy(enccrd->crd_iv, ctx.pc_iv, 8); 1059 else { 1060 ctx.pc_iv[0] = ses->ses_iv[0]; 1061 ctx.pc_iv[1] = ses->ses_iv[1]; 1062 } 1063 1064 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { 1065 if (crp->crp_flags & CRYPTO_F_IMBUF) 1066 m_copyback(q->q_src_m, 1067 enccrd->crd_inject, 1068 8, (caddr_t)ctx.pc_iv); 1069 else if (crp->crp_flags & CRYPTO_F_IOV) 1070 cuio_copyback(q->q_src_io, 1071 enccrd->crd_inject, 1072 8, (caddr_t)ctx.pc_iv); 1073 } 1074 } else { 1075 ctx.pc_flags |= htole16(UBS_PKTCTX_INBOUND); 1076 1077 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 1078 bcopy(enccrd->crd_iv, ctx.pc_iv, 8); 1079 else if (crp->crp_flags & CRYPTO_F_IMBUF) 1080 m_copydata(q->q_src_m, enccrd->crd_inject, 1081 8, (caddr_t)ctx.pc_iv); 1082 else if (crp->crp_flags & CRYPTO_F_IOV) 1083 cuio_copydata(q->q_src_io, 1084 enccrd->crd_inject, 8, 1085 (caddr_t)ctx.pc_iv); 1086 } 1087 1088 ctx.pc_deskey[0] = ses->ses_deskey[0]; 1089 ctx.pc_deskey[1] = ses->ses_deskey[1]; 1090 ctx.pc_deskey[2] = ses->ses_deskey[2]; 1091 ctx.pc_deskey[3] = ses->ses_deskey[3]; 1092 ctx.pc_deskey[4] = ses->ses_deskey[4]; 1093 ctx.pc_deskey[5] = ses->ses_deskey[5]; 1094 SWAP32(ctx.pc_iv[0]); 1095 SWAP32(ctx.pc_iv[1]); 1096 } 1097 1098 if (maccrd) { 1099 macoffset = maccrd->crd_skip; 1100 1101 if (maccrd->crd_alg == CRYPTO_MD5_HMAC) 1102 ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_MD5); 1103 else 1104 ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_SHA1); 1105 1106 for (i = 0; i < 5; i++) { 1107 ctx.pc_hminner[i] = ses->ses_hminner[i]; 1108 ctx.pc_hmouter[i] = ses->ses_hmouter[i]; 1109 1110 HTOLE32(ctx.pc_hminner[i]); 1111 HTOLE32(ctx.pc_hmouter[i]); 1112 } 1113 } 1114 1115 if (enccrd && maccrd) { 1116 /* 1117 * ubsec cannot handle packets where the end of encryption 1118 * and authentication are not the same, or where the 1119 * encrypted part begins before the authenticated part. 1120 */ 1121 if ((encoffset + enccrd->crd_len) != 1122 (macoffset + maccrd->crd_len)) { 1123 ubsecstats.hst_lenmismatch++; 1124 err = EINVAL; 1125 goto errout; 1126 } 1127 if (enccrd->crd_skip < maccrd->crd_skip) { 1128 ubsecstats.hst_skipmismatch++; 1129 err = EINVAL; 1130 goto errout; 1131 } 1132 sskip = maccrd->crd_skip; 1133 cpskip = dskip = enccrd->crd_skip; 1134 stheend = maccrd->crd_len; 1135 dtheend = enccrd->crd_len; 1136 coffset = enccrd->crd_skip - maccrd->crd_skip; 1137 cpoffset = cpskip + dtheend; 1138 #ifdef UBSEC_DEBUG 1139 if (ubsec_debug) { 1140 printf("mac: skip %d, len %d, inject %d\n", 1141 maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject); 1142 printf("enc: skip %d, len %d, inject %d\n", 1143 enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject); 1144 printf("src: skip %d, len %d\n", sskip, stheend); 1145 printf("dst: skip %d, len %d\n", dskip, dtheend); 1146 printf("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n", 1147 coffset, stheend, cpskip, cpoffset); 1148 } 1149 #endif 1150 } else { 1151 cpskip = dskip = sskip = macoffset + encoffset; 1152 dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len; 1153 cpoffset = cpskip + dtheend; 1154 coffset = 0; 1155 } 1156 ctx.pc_offset = htole16(coffset >> 2); 1157 1158 /* XXX FIXME: jonathan asks, what the heck's that 0xfff0? */ 1159 if (bus_dmamap_create(sc->sc_dmat, 0xfff0, UBS_MAX_SCATTER, 1160 0xfff0, 0, BUS_DMA_NOWAIT, &q->q_src_map) != 0) { 1161 err = ENOMEM; 1162 goto errout; 1163 } 1164 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1165 if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map, 1166 q->q_src_m, BUS_DMA_NOWAIT) != 0) { 1167 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1168 q->q_src_map = NULL; 1169 ubsecstats.hst_noload++; 1170 err = ENOMEM; 1171 goto errout; 1172 } 1173 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1174 if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map, 1175 q->q_src_io, BUS_DMA_NOWAIT) != 0) { 1176 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1177 q->q_src_map = NULL; 1178 ubsecstats.hst_noload++; 1179 err = ENOMEM; 1180 goto errout; 1181 } 1182 } 1183 nicealign = ubsec_dmamap_aligned(q->q_src_map); 1184 1185 dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend); 1186 1187 #ifdef UBSEC_DEBUG 1188 if (ubsec_debug) 1189 printf("src skip: %d nicealign: %u\n", sskip, nicealign); 1190 #endif 1191 for (i = j = 0; i < q->q_src_map->dm_nsegs; i++) { 1192 struct ubsec_pktbuf *pb; 1193 bus_size_t packl = q->q_src_map->dm_segs[i].ds_len; 1194 bus_addr_t packp = q->q_src_map->dm_segs[i].ds_addr; 1195 1196 if (sskip >= packl) { 1197 sskip -= packl; 1198 continue; 1199 } 1200 1201 packl -= sskip; 1202 packp += sskip; 1203 sskip = 0; 1204 1205 if (packl > 0xfffc) { 1206 err = EIO; 1207 goto errout; 1208 } 1209 1210 if (j == 0) 1211 pb = &dmap->d_dma->d_mcr.mcr_ipktbuf; 1212 else 1213 pb = &dmap->d_dma->d_sbuf[j - 1]; 1214 1215 pb->pb_addr = htole32(packp); 1216 1217 if (stheend) { 1218 if (packl > stheend) { 1219 pb->pb_len = htole32(stheend); 1220 stheend = 0; 1221 } else { 1222 pb->pb_len = htole32(packl); 1223 stheend -= packl; 1224 } 1225 } else 1226 pb->pb_len = htole32(packl); 1227 1228 if ((i + 1) == q->q_src_map->dm_nsegs) 1229 pb->pb_next = 0; 1230 else 1231 pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1232 offsetof(struct ubsec_dmachunk, d_sbuf[j])); 1233 j++; 1234 } 1235 1236 if (enccrd == NULL && maccrd != NULL) { 1237 dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0; 1238 dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0; 1239 dmap->d_dma->d_mcr.mcr_opktbuf.pb_next = htole32(dmap->d_alloc.dma_paddr + 1240 offsetof(struct ubsec_dmachunk, d_macbuf[0])); 1241 #ifdef UBSEC_DEBUG 1242 if (ubsec_debug) 1243 printf("opkt: %x %x %x\n", 1244 dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr, 1245 dmap->d_dma->d_mcr.mcr_opktbuf.pb_len, 1246 dmap->d_dma->d_mcr.mcr_opktbuf.pb_next); 1247 1248 #endif 1249 } else { 1250 if (crp->crp_flags & CRYPTO_F_IOV) { 1251 if (!nicealign) { 1252 ubsecstats.hst_iovmisaligned++; 1253 err = EINVAL; 1254 goto errout; 1255 } 1256 /* XXX: ``what the heck's that'' 0xfff0? */ 1257 if (bus_dmamap_create(sc->sc_dmat, 0xfff0, 1258 UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT, 1259 &q->q_dst_map) != 0) { 1260 ubsecstats.hst_nomap++; 1261 err = ENOMEM; 1262 goto errout; 1263 } 1264 if (bus_dmamap_load_uio(sc->sc_dmat, q->q_dst_map, 1265 q->q_dst_io, BUS_DMA_NOWAIT) != 0) { 1266 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); 1267 q->q_dst_map = NULL; 1268 ubsecstats.hst_noload++; 1269 err = ENOMEM; 1270 goto errout; 1271 } 1272 } else if (crp->crp_flags & CRYPTO_F_IMBUF) { 1273 if (nicealign) { 1274 q->q_dst_m = q->q_src_m; 1275 q->q_dst_map = q->q_src_map; 1276 } else { 1277 int totlen, len; 1278 struct mbuf *m, *top, **mp; 1279 1280 ubsecstats.hst_unaligned++; 1281 totlen = q->q_src_map->dm_mapsize; 1282 if (q->q_src_m->m_flags & M_PKTHDR) { 1283 len = MHLEN; 1284 MGETHDR(m, M_DONTWAIT, MT_DATA); 1285 /*XXX FIXME: m_dup_pkthdr */ 1286 if (m && 1 /*!m_dup_pkthdr(m, q->q_src_m, M_DONTWAIT)*/) { 1287 m_free(m); 1288 m = NULL; 1289 } 1290 } else { 1291 len = MLEN; 1292 MGET(m, M_DONTWAIT, MT_DATA); 1293 } 1294 if (m == NULL) { 1295 ubsecstats.hst_nombuf++; 1296 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1297 goto errout; 1298 } 1299 if (len == MHLEN) 1300 /*XXX was M_DUP_PKTHDR*/ 1301 M_COPY_PKTHDR(m, q->q_src_m); 1302 if (totlen >= MINCLSIZE) { 1303 MCLGET(m, M_DONTWAIT); 1304 if ((m->m_flags & M_EXT) == 0) { 1305 m_free(m); 1306 ubsecstats.hst_nomcl++; 1307 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1308 goto errout; 1309 } 1310 len = MCLBYTES; 1311 } 1312 m->m_len = len; 1313 top = NULL; 1314 mp = ⊤ 1315 1316 while (totlen > 0) { 1317 if (top) { 1318 MGET(m, M_DONTWAIT, MT_DATA); 1319 if (m == NULL) { 1320 m_freem(top); 1321 ubsecstats.hst_nombuf++; 1322 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1323 goto errout; 1324 } 1325 len = MLEN; 1326 } 1327 if (top && totlen >= MINCLSIZE) { 1328 MCLGET(m, M_DONTWAIT); 1329 if ((m->m_flags & M_EXT) == 0) { 1330 *mp = m; 1331 m_freem(top); 1332 ubsecstats.hst_nomcl++; 1333 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1334 goto errout; 1335 } 1336 len = MCLBYTES; 1337 } 1338 m->m_len = len = min(totlen, len); 1339 totlen -= len; 1340 *mp = m; 1341 mp = &m->m_next; 1342 } 1343 q->q_dst_m = top; 1344 ubsec_mcopy(q->q_src_m, q->q_dst_m, 1345 cpskip, cpoffset); 1346 /* XXX again, what the heck is that 0xfff0? */ 1347 if (bus_dmamap_create(sc->sc_dmat, 0xfff0, 1348 UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT, 1349 &q->q_dst_map) != 0) { 1350 ubsecstats.hst_nomap++; 1351 err = ENOMEM; 1352 goto errout; 1353 } 1354 if (bus_dmamap_load_mbuf(sc->sc_dmat, 1355 q->q_dst_map, q->q_dst_m, 1356 BUS_DMA_NOWAIT) != 0) { 1357 bus_dmamap_destroy(sc->sc_dmat, 1358 q->q_dst_map); 1359 q->q_dst_map = NULL; 1360 ubsecstats.hst_noload++; 1361 err = ENOMEM; 1362 goto errout; 1363 } 1364 } 1365 } else { 1366 ubsecstats.hst_badflags++; 1367 err = EINVAL; 1368 goto errout; 1369 } 1370 1371 #ifdef UBSEC_DEBUG 1372 if (ubsec_debug) 1373 printf("dst skip: %d\n", dskip); 1374 #endif 1375 for (i = j = 0; i < q->q_dst_map->dm_nsegs; i++) { 1376 struct ubsec_pktbuf *pb; 1377 bus_size_t packl = q->q_dst_map->dm_segs[i].ds_len; 1378 bus_addr_t packp = q->q_dst_map->dm_segs[i].ds_addr; 1379 1380 if (dskip >= packl) { 1381 dskip -= packl; 1382 continue; 1383 } 1384 1385 packl -= dskip; 1386 packp += dskip; 1387 dskip = 0; 1388 1389 if (packl > 0xfffc) { 1390 err = EIO; 1391 goto errout; 1392 } 1393 1394 if (j == 0) 1395 pb = &dmap->d_dma->d_mcr.mcr_opktbuf; 1396 else 1397 pb = &dmap->d_dma->d_dbuf[j - 1]; 1398 1399 pb->pb_addr = htole32(packp); 1400 1401 if (dtheend) { 1402 if (packl > dtheend) { 1403 pb->pb_len = htole32(dtheend); 1404 dtheend = 0; 1405 } else { 1406 pb->pb_len = htole32(packl); 1407 dtheend -= packl; 1408 } 1409 } else 1410 pb->pb_len = htole32(packl); 1411 1412 if ((i + 1) == q->q_dst_map->dm_nsegs) { 1413 if (maccrd) 1414 pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1415 offsetof(struct ubsec_dmachunk, d_macbuf[0])); 1416 else 1417 pb->pb_next = 0; 1418 } else 1419 pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1420 offsetof(struct ubsec_dmachunk, d_dbuf[j])); 1421 j++; 1422 } 1423 } 1424 1425 dmap->d_dma->d_mcr.mcr_cmdctxp = htole32(dmap->d_alloc.dma_paddr + 1426 offsetof(struct ubsec_dmachunk, d_ctx)); 1427 1428 if (sc->sc_flags & UBS_FLAGS_LONGCTX) { 1429 struct ubsec_pktctx_long *ctxl; 1430 1431 ctxl = (struct ubsec_pktctx_long *)(dmap->d_alloc.dma_vaddr + 1432 offsetof(struct ubsec_dmachunk, d_ctx)); 1433 1434 /* transform small context into long context */ 1435 ctxl->pc_len = htole16(sizeof(struct ubsec_pktctx_long)); 1436 ctxl->pc_type = htole16(UBS_PKTCTX_TYPE_IPSEC); 1437 ctxl->pc_flags = ctx.pc_flags; 1438 ctxl->pc_offset = ctx.pc_offset; 1439 for (i = 0; i < 6; i++) 1440 ctxl->pc_deskey[i] = ctx.pc_deskey[i]; 1441 for (i = 0; i < 5; i++) 1442 ctxl->pc_hminner[i] = ctx.pc_hminner[i]; 1443 for (i = 0; i < 5; i++) 1444 ctxl->pc_hmouter[i] = ctx.pc_hmouter[i]; 1445 ctxl->pc_iv[0] = ctx.pc_iv[0]; 1446 ctxl->pc_iv[1] = ctx.pc_iv[1]; 1447 } else 1448 bcopy(&ctx, dmap->d_alloc.dma_vaddr + 1449 offsetof(struct ubsec_dmachunk, d_ctx), 1450 sizeof(struct ubsec_pktctx)); 1451 1452 s = splnet(); 1453 SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next); 1454 sc->sc_nqueue++; 1455 ubsecstats.hst_ipackets++; 1456 ubsecstats.hst_ibytes += dmap->d_alloc.dma_map->dm_mapsize; 1457 if ((hint & CRYPTO_HINT_MORE) == 0 || sc->sc_nqueue >= ubsec_maxbatch) 1458 ubsec_feed(sc); 1459 splx(s); 1460 return (0); 1461 1462 errout: 1463 if (q != NULL) { 1464 if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) 1465 m_freem(q->q_dst_m); 1466 1467 if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { 1468 bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); 1469 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); 1470 } 1471 if (q->q_src_map != NULL) { 1472 bus_dmamap_unload(sc->sc_dmat, q->q_src_map); 1473 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1474 } 1475 1476 s = splnet(); 1477 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 1478 splx(s); 1479 } 1480 #if 0 /* jonathan says: this openbsd code seems to be subsumed elsewhere */ 1481 if (err == EINVAL) 1482 ubsecstats.hst_invalid++; 1483 else 1484 ubsecstats.hst_nomem++; 1485 #endif 1486 if (err != ERESTART) { 1487 crp->crp_etype = err; 1488 crypto_done(crp); 1489 } else { 1490 sc->sc_needwakeup |= CRYPTO_SYMQ; 1491 } 1492 return (err); 1493 } 1494 1495 void 1496 ubsec_callback(sc, q) 1497 struct ubsec_softc *sc; 1498 struct ubsec_q *q; 1499 { 1500 struct cryptop *crp = (struct cryptop *)q->q_crp; 1501 struct cryptodesc *crd; 1502 struct ubsec_dma *dmap = q->q_dma; 1503 1504 ubsecstats.hst_opackets++; 1505 ubsecstats.hst_obytes += dmap->d_alloc.dma_size; 1506 1507 bus_dmamap_sync(sc->sc_dmat, dmap->d_alloc.dma_map, 0, 1508 dmap->d_alloc.dma_map->dm_mapsize, 1509 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1510 if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { 1511 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, 1512 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1513 bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); 1514 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); 1515 } 1516 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, 1517 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1518 bus_dmamap_unload(sc->sc_dmat, q->q_src_map); 1519 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1520 1521 if ((crp->crp_flags & CRYPTO_F_IMBUF) && (q->q_src_m != q->q_dst_m)) { 1522 m_freem(q->q_src_m); 1523 crp->crp_buf = (caddr_t)q->q_dst_m; 1524 } 1525 1526 /* copy out IV for future use */ 1527 if (q->q_flags & UBSEC_QFLAGS_COPYOUTIV) { 1528 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1529 if (crd->crd_alg != CRYPTO_DES_CBC && 1530 crd->crd_alg != CRYPTO_3DES_CBC) 1531 continue; 1532 if (crp->crp_flags & CRYPTO_F_IMBUF) 1533 m_copydata((struct mbuf *)crp->crp_buf, 1534 crd->crd_skip + crd->crd_len - 8, 8, 1535 (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv); 1536 else if (crp->crp_flags & CRYPTO_F_IOV) { 1537 cuio_copydata((struct uio *)crp->crp_buf, 1538 crd->crd_skip + crd->crd_len - 8, 8, 1539 (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv); 1540 } 1541 break; 1542 } 1543 } 1544 1545 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1546 if (crd->crd_alg != CRYPTO_MD5_HMAC && 1547 crd->crd_alg != CRYPTO_SHA1_HMAC) 1548 continue; 1549 if (crp->crp_flags & CRYPTO_F_IMBUF) 1550 m_copyback((struct mbuf *)crp->crp_buf, 1551 crd->crd_inject, 12, 1552 (caddr_t)dmap->d_dma->d_macbuf); 1553 else if (crp->crp_flags & CRYPTO_F_IOV && crp->crp_mac) 1554 bcopy((caddr_t)dmap->d_dma->d_macbuf, 1555 crp->crp_mac, 12); 1556 break; 1557 } 1558 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 1559 crypto_done(crp); 1560 } 1561 1562 static void 1563 ubsec_mcopy(struct mbuf *srcm, struct mbuf *dstm, int hoffset, int toffset) 1564 { 1565 int i, j, dlen, slen; 1566 caddr_t dptr, sptr; 1567 1568 j = 0; 1569 sptr = srcm->m_data; 1570 slen = srcm->m_len; 1571 dptr = dstm->m_data; 1572 dlen = dstm->m_len; 1573 1574 while (1) { 1575 for (i = 0; i < min(slen, dlen); i++) { 1576 if (j < hoffset || j >= toffset) 1577 *dptr++ = *sptr++; 1578 slen--; 1579 dlen--; 1580 j++; 1581 } 1582 if (slen == 0) { 1583 srcm = srcm->m_next; 1584 if (srcm == NULL) 1585 return; 1586 sptr = srcm->m_data; 1587 slen = srcm->m_len; 1588 } 1589 if (dlen == 0) { 1590 dstm = dstm->m_next; 1591 if (dstm == NULL) 1592 return; 1593 dptr = dstm->m_data; 1594 dlen = dstm->m_len; 1595 } 1596 } 1597 } 1598 1599 /* 1600 * feed the key generator, must be called at splnet() or higher. 1601 */ 1602 static void 1603 ubsec_feed2(struct ubsec_softc *sc) 1604 { 1605 struct ubsec_q2 *q; 1606 1607 while (!SIMPLEQ_EMPTY(&sc->sc_queue2)) { 1608 if (READ_REG(sc, BS_STAT) & BS_STAT_MCR2_FULL) 1609 break; 1610 q = SIMPLEQ_FIRST(&sc->sc_queue2); 1611 1612 bus_dmamap_sync(sc->sc_dmat, q->q_mcr.dma_map, 0, 1613 q->q_mcr.dma_map->dm_mapsize, 1614 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1615 bus_dmamap_sync(sc->sc_dmat, q->q_ctx.dma_map, 0, 1616 q->q_ctx.dma_map->dm_mapsize, 1617 BUS_DMASYNC_PREWRITE); 1618 1619 WRITE_REG(sc, BS_MCR2, q->q_mcr.dma_paddr); 1620 q = SIMPLEQ_FIRST(&sc->sc_queue2); 1621 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue2, /*q,*/ q_next); 1622 --sc->sc_nqueue2; 1623 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip2, q, q_next); 1624 } 1625 } 1626 1627 /* 1628 * Callback for handling random numbers 1629 */ 1630 static void 1631 ubsec_callback2(struct ubsec_softc *sc, struct ubsec_q2 *q) 1632 { 1633 struct cryptkop *krp; 1634 struct ubsec_ctx_keyop *ctx; 1635 1636 ctx = (struct ubsec_ctx_keyop *)q->q_ctx.dma_vaddr; 1637 bus_dmamap_sync(sc->sc_dmat, q->q_ctx.dma_map, 0, 1638 q->q_ctx.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1639 1640 switch (q->q_type) { 1641 #ifndef UBSEC_NO_RNG 1642 case UBS_CTXOP_RNGSHA1: 1643 case UBS_CTXOP_RNGBYPASS: { 1644 struct ubsec_q2_rng *rng = (struct ubsec_q2_rng *)q; 1645 u_int32_t *p; 1646 int i; 1647 1648 bus_dmamap_sync(sc->sc_dmat, rng->rng_buf.dma_map, 0, 1649 rng->rng_buf.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1650 p = (u_int32_t *)rng->rng_buf.dma_vaddr; 1651 #ifndef __NetBSD__ 1652 for (i = 0; i < UBSEC_RNG_BUFSIZ; p++, i++) 1653 add_true_randomness(letoh32(*p)); 1654 rng->rng_used = 0; 1655 #else 1656 /* XXX NetBSD rnd subsystem too weak */ 1657 i = 0; (void)i; /* shut off gcc warnings */ 1658 #endif 1659 #ifdef __OpenBSD__ 1660 timeout_add(&sc->sc_rngto, sc->sc_rnghz); 1661 #else 1662 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); 1663 #endif 1664 break; 1665 } 1666 #endif 1667 case UBS_CTXOP_MODEXP: { 1668 struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; 1669 u_int rlen, clen; 1670 1671 krp = me->me_krp; 1672 rlen = (me->me_modbits + 7) / 8; 1673 clen = (krp->krp_param[krp->krp_iparams].crp_nbits + 7) / 8; 1674 1675 bus_dmamap_sync(sc->sc_dmat, me->me_M.dma_map, 1676 0, me->me_M.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1677 bus_dmamap_sync(sc->sc_dmat, me->me_E.dma_map, 1678 0, me->me_E.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1679 bus_dmamap_sync(sc->sc_dmat, me->me_C.dma_map, 1680 0, me->me_C.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1681 bus_dmamap_sync(sc->sc_dmat, me->me_epb.dma_map, 1682 0, me->me_epb.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1683 1684 if (clen < rlen) 1685 krp->krp_status = E2BIG; 1686 else { 1687 if (sc->sc_flags & UBS_FLAGS_HWNORM) { 1688 bzero(krp->krp_param[krp->krp_iparams].crp_p, 1689 (krp->krp_param[krp->krp_iparams].crp_nbits 1690 + 7) / 8); 1691 bcopy(me->me_C.dma_vaddr, 1692 krp->krp_param[krp->krp_iparams].crp_p, 1693 (me->me_modbits + 7) / 8); 1694 } else 1695 ubsec_kshift_l(me->me_shiftbits, 1696 me->me_C.dma_vaddr, me->me_normbits, 1697 krp->krp_param[krp->krp_iparams].crp_p, 1698 krp->krp_param[krp->krp_iparams].crp_nbits); 1699 } 1700 1701 crypto_kdone(krp); 1702 1703 /* bzero all potentially sensitive data */ 1704 bzero(me->me_E.dma_vaddr, me->me_E.dma_size); 1705 bzero(me->me_M.dma_vaddr, me->me_M.dma_size); 1706 bzero(me->me_C.dma_vaddr, me->me_C.dma_size); 1707 bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); 1708 1709 /* Can't free here, so put us on the free list. */ 1710 SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &me->me_q, q_next); 1711 break; 1712 } 1713 case UBS_CTXOP_RSAPRIV: { 1714 struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; 1715 u_int len; 1716 1717 krp = rp->rpr_krp; 1718 bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgin.dma_map, 0, 1719 rp->rpr_msgin.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1720 bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgout.dma_map, 0, 1721 rp->rpr_msgout.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1722 1723 len = (krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_nbits + 7) / 8; 1724 bcopy(rp->rpr_msgout.dma_vaddr, 1725 krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_p, len); 1726 1727 crypto_kdone(krp); 1728 1729 bzero(rp->rpr_msgin.dma_vaddr, rp->rpr_msgin.dma_size); 1730 bzero(rp->rpr_msgout.dma_vaddr, rp->rpr_msgout.dma_size); 1731 bzero(rp->rpr_q.q_ctx.dma_vaddr, rp->rpr_q.q_ctx.dma_size); 1732 1733 /* Can't free here, so put us on the free list. */ 1734 SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &rp->rpr_q, q_next); 1735 break; 1736 } 1737 default: 1738 printf("%s: unknown ctx op: %x\n", sc->sc_dv.dv_xname, 1739 letoh16(ctx->ctx_op)); 1740 break; 1741 } 1742 } 1743 1744 #ifndef UBSEC_NO_RNG 1745 static void 1746 ubsec_rng(void *vsc) 1747 { 1748 struct ubsec_softc *sc = vsc; 1749 struct ubsec_q2_rng *rng = &sc->sc_rng; 1750 struct ubsec_mcr *mcr; 1751 struct ubsec_ctx_rngbypass *ctx; 1752 int s; 1753 1754 s = splnet(); 1755 if (rng->rng_used) { 1756 splx(s); 1757 return; 1758 } 1759 sc->sc_nqueue2++; 1760 if (sc->sc_nqueue2 >= UBS_MAX_NQUEUE) 1761 goto out; 1762 1763 mcr = (struct ubsec_mcr *)rng->rng_q.q_mcr.dma_vaddr; 1764 ctx = (struct ubsec_ctx_rngbypass *)rng->rng_q.q_ctx.dma_vaddr; 1765 1766 mcr->mcr_pkts = htole16(1); 1767 mcr->mcr_flags = 0; 1768 mcr->mcr_cmdctxp = htole32(rng->rng_q.q_ctx.dma_paddr); 1769 mcr->mcr_ipktbuf.pb_addr = mcr->mcr_ipktbuf.pb_next = 0; 1770 mcr->mcr_ipktbuf.pb_len = 0; 1771 mcr->mcr_reserved = mcr->mcr_pktlen = 0; 1772 mcr->mcr_opktbuf.pb_addr = htole32(rng->rng_buf.dma_paddr); 1773 mcr->mcr_opktbuf.pb_len = htole32(((sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ)) & 1774 UBS_PKTBUF_LEN); 1775 mcr->mcr_opktbuf.pb_next = 0; 1776 1777 ctx->rbp_len = htole16(sizeof(struct ubsec_ctx_rngbypass)); 1778 ctx->rbp_op = htole16(UBS_CTXOP_RNGSHA1); 1779 rng->rng_q.q_type = UBS_CTXOP_RNGSHA1; 1780 1781 bus_dmamap_sync(sc->sc_dmat, rng->rng_buf.dma_map, 0, 1782 rng->rng_buf.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1783 1784 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rng->rng_q, q_next); 1785 rng->rng_used = 1; 1786 ubsec_feed2(sc); 1787 ubsecstats.hst_rng++; 1788 splx(s); 1789 1790 return; 1791 1792 out: 1793 /* 1794 * Something weird happened, generate our own call back. 1795 */ 1796 sc->sc_nqueue2--; 1797 splx(s); 1798 #ifdef __OpenBSD__ 1799 timeout_add(&sc->sc_rngto, sc->sc_rnghz); 1800 #else 1801 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); 1802 #endif 1803 } 1804 #endif /* UBSEC_NO_RNG */ 1805 1806 static int 1807 ubsec_dma_malloc(struct ubsec_softc *sc, bus_size_t size, 1808 struct ubsec_dma_alloc *dma,int mapflags) 1809 { 1810 int r; 1811 1812 if ((r = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, 1813 &dma->dma_seg, 1, &dma->dma_nseg, BUS_DMA_NOWAIT)) != 0) 1814 goto fail_0; 1815 1816 if ((r = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg, 1817 size, &dma->dma_vaddr, mapflags | BUS_DMA_NOWAIT)) != 0) 1818 goto fail_1; 1819 1820 if ((r = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1821 BUS_DMA_NOWAIT, &dma->dma_map)) != 0) 1822 goto fail_2; 1823 1824 if ((r = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr, 1825 size, NULL, BUS_DMA_NOWAIT)) != 0) 1826 goto fail_3; 1827 1828 dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr; 1829 dma->dma_size = size; 1830 return (0); 1831 1832 fail_3: 1833 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map); 1834 fail_2: 1835 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, size); 1836 fail_1: 1837 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg); 1838 fail_0: 1839 dma->dma_map = NULL; 1840 return (r); 1841 } 1842 1843 static void 1844 ubsec_dma_free(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma) 1845 { 1846 bus_dmamap_unload(sc->sc_dmat, dma->dma_map); 1847 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_size); 1848 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg); 1849 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map); 1850 } 1851 1852 /* 1853 * Resets the board. Values in the regesters are left as is 1854 * from the reset (i.e. initial values are assigned elsewhere). 1855 */ 1856 static void 1857 ubsec_reset_board(struct ubsec_softc *sc) 1858 { 1859 volatile u_int32_t ctrl; 1860 1861 ctrl = READ_REG(sc, BS_CTRL); 1862 ctrl |= BS_CTRL_RESET; 1863 WRITE_REG(sc, BS_CTRL, ctrl); 1864 1865 /* 1866 * Wait aprox. 30 PCI clocks = 900 ns = 0.9 us 1867 */ 1868 DELAY(10); 1869 } 1870 1871 /* 1872 * Init Broadcom registers 1873 */ 1874 static void 1875 ubsec_init_board(struct ubsec_softc *sc) 1876 { 1877 u_int32_t ctrl; 1878 1879 ctrl = READ_REG(sc, BS_CTRL); 1880 ctrl &= ~(BS_CTRL_BE32 | BS_CTRL_BE64); 1881 ctrl |= BS_CTRL_LITTLE_ENDIAN | BS_CTRL_MCR1INT; 1882 1883 /* 1884 * XXX: Sam Leffler's code has (UBS_FLAGS_KEY|UBS_FLAGS_RNG)). 1885 * anyone got hw docs? 1886 */ 1887 if (sc->sc_flags & UBS_FLAGS_KEY) 1888 ctrl |= BS_CTRL_MCR2INT; 1889 else 1890 ctrl &= ~BS_CTRL_MCR2INT; 1891 1892 if (sc->sc_flags & UBS_FLAGS_HWNORM) 1893 ctrl &= ~BS_CTRL_SWNORM; 1894 1895 WRITE_REG(sc, BS_CTRL, ctrl); 1896 } 1897 1898 /* 1899 * Init Broadcom PCI registers 1900 */ 1901 static void 1902 ubsec_init_pciregs(pa) 1903 struct pci_attach_args *pa; 1904 { 1905 pci_chipset_tag_t pc = pa->pa_pc; 1906 u_int32_t misc; 1907 1908 /* 1909 * This will set the cache line size to 1, this will 1910 * force the BCM58xx chip just to do burst read/writes. 1911 * Cache line read/writes are to slow 1912 */ 1913 misc = pci_conf_read(pc, pa->pa_tag, PCI_BHLC_REG); 1914 misc = (misc & ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT)) 1915 | ((UBS_DEF_CACHELINE & 0xff) << PCI_CACHELINE_SHIFT); 1916 pci_conf_write(pc, pa->pa_tag, PCI_BHLC_REG, misc); 1917 } 1918 1919 /* 1920 * Clean up after a chip crash. 1921 * It is assumed that the caller in splnet() 1922 */ 1923 static void 1924 ubsec_cleanchip(struct ubsec_softc *sc) 1925 { 1926 struct ubsec_q *q; 1927 1928 while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { 1929 q = SIMPLEQ_FIRST(&sc->sc_qchip); 1930 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, /*q,*/ q_next); 1931 ubsec_free_q(sc, q); 1932 } 1933 sc->sc_nqchip = 0; 1934 } 1935 1936 /* 1937 * free a ubsec_q 1938 * It is assumed that the caller is within splnet() 1939 */ 1940 static int 1941 ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q) 1942 { 1943 struct ubsec_q *q2; 1944 struct cryptop *crp; 1945 int npkts; 1946 int i; 1947 1948 npkts = q->q_nstacked_mcrs; 1949 1950 for (i = 0; i < npkts; i++) { 1951 if(q->q_stacked_mcr[i]) { 1952 q2 = q->q_stacked_mcr[i]; 1953 1954 if ((q2->q_dst_m != NULL) && (q2->q_src_m != q2->q_dst_m)) 1955 m_freem(q2->q_dst_m); 1956 1957 crp = (struct cryptop *)q2->q_crp; 1958 1959 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q2, q_next); 1960 1961 crp->crp_etype = EFAULT; 1962 crypto_done(crp); 1963 } else { 1964 break; 1965 } 1966 } 1967 1968 /* 1969 * Free header MCR 1970 */ 1971 if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) 1972 m_freem(q->q_dst_m); 1973 1974 crp = (struct cryptop *)q->q_crp; 1975 1976 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 1977 1978 crp->crp_etype = EFAULT; 1979 crypto_done(crp); 1980 return(0); 1981 } 1982 1983 /* 1984 * Routine to reset the chip and clean up. 1985 * It is assumed that the caller is in splnet() 1986 */ 1987 static void 1988 ubsec_totalreset(struct ubsec_softc *sc) 1989 { 1990 ubsec_reset_board(sc); 1991 ubsec_init_board(sc); 1992 ubsec_cleanchip(sc); 1993 } 1994 1995 static int 1996 ubsec_dmamap_aligned(bus_dmamap_t map) 1997 { 1998 int i; 1999 2000 for (i = 0; i < map->dm_nsegs; i++) { 2001 if (map->dm_segs[i].ds_addr & 3) 2002 return (0); 2003 if ((i != (map->dm_nsegs - 1)) && 2004 (map->dm_segs[i].ds_len & 3)) 2005 return (0); 2006 } 2007 return (1); 2008 } 2009 2010 #ifdef __OpenBSD__ 2011 struct ubsec_softc * 2012 ubsec_kfind(krp) 2013 struct cryptkop *krp; 2014 { 2015 struct ubsec_softc *sc; 2016 int i; 2017 2018 for (i = 0; i < ubsec_cd.cd_ndevs; i++) { 2019 sc = ubsec_cd.cd_devs[i]; 2020 if (sc == NULL) 2021 continue; 2022 if (sc->sc_cid == krp->krp_hid) 2023 return (sc); 2024 } 2025 return (NULL); 2026 } 2027 #endif 2028 2029 static void 2030 ubsec_kfree(struct ubsec_softc *sc, struct ubsec_q2 *q) 2031 { 2032 switch (q->q_type) { 2033 case UBS_CTXOP_MODEXP: { 2034 struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; 2035 2036 ubsec_dma_free(sc, &me->me_q.q_mcr); 2037 ubsec_dma_free(sc, &me->me_q.q_ctx); 2038 ubsec_dma_free(sc, &me->me_M); 2039 ubsec_dma_free(sc, &me->me_E); 2040 ubsec_dma_free(sc, &me->me_C); 2041 ubsec_dma_free(sc, &me->me_epb); 2042 free(me, M_DEVBUF); 2043 break; 2044 } 2045 case UBS_CTXOP_RSAPRIV: { 2046 struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; 2047 2048 ubsec_dma_free(sc, &rp->rpr_q.q_mcr); 2049 ubsec_dma_free(sc, &rp->rpr_q.q_ctx); 2050 ubsec_dma_free(sc, &rp->rpr_msgin); 2051 ubsec_dma_free(sc, &rp->rpr_msgout); 2052 free(rp, M_DEVBUF); 2053 break; 2054 } 2055 default: 2056 printf("%s: invalid kfree 0x%x\n", sc->sc_dv.dv_xname, 2057 q->q_type); 2058 break; 2059 } 2060 } 2061 2062 static int 2063 ubsec_kprocess(void *arg, struct cryptkop *krp, int hint) 2064 { 2065 struct ubsec_softc *sc; 2066 int r; 2067 2068 if (krp == NULL || krp->krp_callback == NULL) 2069 return (EINVAL); 2070 #ifdef __OpenBSD__ 2071 if ((sc = ubsec_kfind(krp)) == NULL) 2072 return (EINVAL); 2073 #else 2074 sc = arg; 2075 KASSERT(sc != NULL /*, ("ubsec_kprocess: null softc")*/); 2076 #endif 2077 2078 while (!SIMPLEQ_EMPTY(&sc->sc_q2free)) { 2079 struct ubsec_q2 *q; 2080 2081 q = SIMPLEQ_FIRST(&sc->sc_q2free); 2082 SIMPLEQ_REMOVE_HEAD(&sc->sc_q2free, /*q,*/ q_next); 2083 ubsec_kfree(sc, q); 2084 } 2085 2086 switch (krp->krp_op) { 2087 case CRK_MOD_EXP: 2088 if (sc->sc_flags & UBS_FLAGS_HWNORM) 2089 r = ubsec_kprocess_modexp_hw(sc, krp, hint); 2090 else 2091 r = ubsec_kprocess_modexp_sw(sc, krp, hint); 2092 break; 2093 case CRK_MOD_EXP_CRT: 2094 r = ubsec_kprocess_rsapriv(sc, krp, hint); 2095 break; 2096 default: 2097 printf("%s: kprocess: invalid op 0x%x\n", 2098 sc->sc_dv.dv_xname, krp->krp_op); 2099 krp->krp_status = EOPNOTSUPP; 2100 crypto_kdone(krp); 2101 r = 0; 2102 } 2103 return (r); 2104 } 2105 2106 /* 2107 * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (sw normalization) 2108 */ 2109 static int 2110 ubsec_kprocess_modexp_sw(struct ubsec_softc *sc, struct cryptkop *krp, 2111 int hint) 2112 { 2113 struct ubsec_q2_modexp *me; 2114 struct ubsec_mcr *mcr; 2115 struct ubsec_ctx_modexp *ctx; 2116 struct ubsec_pktbuf *epb; 2117 int s, err = 0; 2118 u_int nbits, normbits, mbits, shiftbits, ebits; 2119 2120 me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT); 2121 if (me == NULL) { 2122 err = ENOMEM; 2123 goto errout; 2124 } 2125 bzero(me, sizeof *me); 2126 me->me_krp = krp; 2127 me->me_q.q_type = UBS_CTXOP_MODEXP; 2128 2129 nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); 2130 if (nbits <= 512) 2131 normbits = 512; 2132 else if (nbits <= 768) 2133 normbits = 768; 2134 else if (nbits <= 1024) 2135 normbits = 1024; 2136 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) 2137 normbits = 1536; 2138 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) 2139 normbits = 2048; 2140 else { 2141 err = E2BIG; 2142 goto errout; 2143 } 2144 2145 shiftbits = normbits - nbits; 2146 2147 me->me_modbits = nbits; 2148 me->me_shiftbits = shiftbits; 2149 me->me_normbits = normbits; 2150 2151 /* Sanity check: result bits must be >= true modulus bits. */ 2152 if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { 2153 err = ERANGE; 2154 goto errout; 2155 } 2156 2157 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2158 &me->me_q.q_mcr, 0)) { 2159 err = ENOMEM; 2160 goto errout; 2161 } 2162 mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; 2163 2164 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), 2165 &me->me_q.q_ctx, 0)) { 2166 err = ENOMEM; 2167 goto errout; 2168 } 2169 2170 mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); 2171 if (mbits > nbits) { 2172 err = E2BIG; 2173 goto errout; 2174 } 2175 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { 2176 err = ENOMEM; 2177 goto errout; 2178 } 2179 ubsec_kshift_r(shiftbits, 2180 krp->krp_param[UBS_MODEXP_PAR_M].crp_p, mbits, 2181 me->me_M.dma_vaddr, normbits); 2182 2183 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { 2184 err = ENOMEM; 2185 goto errout; 2186 } 2187 bzero(me->me_C.dma_vaddr, me->me_C.dma_size); 2188 2189 ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); 2190 if (ebits > nbits) { 2191 err = E2BIG; 2192 goto errout; 2193 } 2194 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { 2195 err = ENOMEM; 2196 goto errout; 2197 } 2198 ubsec_kshift_r(shiftbits, 2199 krp->krp_param[UBS_MODEXP_PAR_E].crp_p, ebits, 2200 me->me_E.dma_vaddr, normbits); 2201 2202 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), 2203 &me->me_epb, 0)) { 2204 err = ENOMEM; 2205 goto errout; 2206 } 2207 epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; 2208 epb->pb_addr = htole32(me->me_E.dma_paddr); 2209 epb->pb_next = 0; 2210 epb->pb_len = htole32(normbits / 8); 2211 2212 #ifdef UBSEC_DEBUG 2213 if (ubsec_debug) { 2214 printf("Epb "); 2215 ubsec_dump_pb(epb); 2216 } 2217 #endif 2218 2219 mcr->mcr_pkts = htole16(1); 2220 mcr->mcr_flags = 0; 2221 mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); 2222 mcr->mcr_reserved = 0; 2223 mcr->mcr_pktlen = 0; 2224 2225 mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); 2226 mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); 2227 mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); 2228 2229 mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); 2230 mcr->mcr_opktbuf.pb_next = 0; 2231 mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); 2232 2233 #ifdef DIAGNOSTIC 2234 /* Misaligned output buffer will hang the chip. */ 2235 if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) 2236 panic("%s: modexp invalid addr 0x%x", 2237 sc->sc_dv.dv_xname, letoh32(mcr->mcr_opktbuf.pb_addr)); 2238 if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) 2239 panic("%s: modexp invalid len 0x%x", 2240 sc->sc_dv.dv_xname, letoh32(mcr->mcr_opktbuf.pb_len)); 2241 #endif 2242 2243 ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; 2244 bzero(ctx, sizeof(*ctx)); 2245 ubsec_kshift_r(shiftbits, 2246 krp->krp_param[UBS_MODEXP_PAR_N].crp_p, nbits, 2247 ctx->me_N, normbits); 2248 ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); 2249 ctx->me_op = htole16(UBS_CTXOP_MODEXP); 2250 ctx->me_E_len = htole16(nbits); 2251 ctx->me_N_len = htole16(nbits); 2252 2253 #ifdef UBSEC_DEBUG 2254 if (ubsec_debug) { 2255 ubsec_dump_mcr(mcr); 2256 ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); 2257 } 2258 #endif 2259 2260 /* 2261 * ubsec_feed2 will sync mcr and ctx, we just need to sync 2262 * everything else. 2263 */ 2264 bus_dmamap_sync(sc->sc_dmat, me->me_M.dma_map, 2265 0, me->me_M.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2266 bus_dmamap_sync(sc->sc_dmat, me->me_E.dma_map, 2267 0, me->me_E.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2268 bus_dmamap_sync(sc->sc_dmat, me->me_C.dma_map, 2269 0, me->me_C.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2270 bus_dmamap_sync(sc->sc_dmat, me->me_epb.dma_map, 2271 0, me->me_epb.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2272 2273 /* Enqueue and we're done... */ 2274 s = splnet(); 2275 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); 2276 ubsec_feed2(sc); 2277 ubsecstats.hst_modexp++; 2278 splx(s); 2279 2280 return (0); 2281 2282 errout: 2283 if (me != NULL) { 2284 if (me->me_q.q_mcr.dma_map != NULL) 2285 ubsec_dma_free(sc, &me->me_q.q_mcr); 2286 if (me->me_q.q_ctx.dma_map != NULL) { 2287 bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); 2288 ubsec_dma_free(sc, &me->me_q.q_ctx); 2289 } 2290 if (me->me_M.dma_map != NULL) { 2291 bzero(me->me_M.dma_vaddr, me->me_M.dma_size); 2292 ubsec_dma_free(sc, &me->me_M); 2293 } 2294 if (me->me_E.dma_map != NULL) { 2295 bzero(me->me_E.dma_vaddr, me->me_E.dma_size); 2296 ubsec_dma_free(sc, &me->me_E); 2297 } 2298 if (me->me_C.dma_map != NULL) { 2299 bzero(me->me_C.dma_vaddr, me->me_C.dma_size); 2300 ubsec_dma_free(sc, &me->me_C); 2301 } 2302 if (me->me_epb.dma_map != NULL) 2303 ubsec_dma_free(sc, &me->me_epb); 2304 free(me, M_DEVBUF); 2305 } 2306 krp->krp_status = err; 2307 crypto_kdone(krp); 2308 return (0); 2309 } 2310 2311 /* 2312 * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (hw normalization) 2313 */ 2314 static int 2315 ubsec_kprocess_modexp_hw(struct ubsec_softc *sc, struct cryptkop *krp, 2316 int hint) 2317 { 2318 struct ubsec_q2_modexp *me; 2319 struct ubsec_mcr *mcr; 2320 struct ubsec_ctx_modexp *ctx; 2321 struct ubsec_pktbuf *epb; 2322 int s, err = 0; 2323 u_int nbits, normbits, mbits, shiftbits, ebits; 2324 2325 me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT); 2326 if (me == NULL) { 2327 err = ENOMEM; 2328 goto errout; 2329 } 2330 bzero(me, sizeof *me); 2331 me->me_krp = krp; 2332 me->me_q.q_type = UBS_CTXOP_MODEXP; 2333 2334 nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); 2335 if (nbits <= 512) 2336 normbits = 512; 2337 else if (nbits <= 768) 2338 normbits = 768; 2339 else if (nbits <= 1024) 2340 normbits = 1024; 2341 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) 2342 normbits = 1536; 2343 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) 2344 normbits = 2048; 2345 else { 2346 err = E2BIG; 2347 goto errout; 2348 } 2349 2350 shiftbits = normbits - nbits; 2351 2352 /* XXX ??? */ 2353 me->me_modbits = nbits; 2354 me->me_shiftbits = shiftbits; 2355 me->me_normbits = normbits; 2356 2357 /* Sanity check: result bits must be >= true modulus bits. */ 2358 if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { 2359 err = ERANGE; 2360 goto errout; 2361 } 2362 2363 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2364 &me->me_q.q_mcr, 0)) { 2365 err = ENOMEM; 2366 goto errout; 2367 } 2368 mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; 2369 2370 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), 2371 &me->me_q.q_ctx, 0)) { 2372 err = ENOMEM; 2373 goto errout; 2374 } 2375 2376 mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); 2377 if (mbits > nbits) { 2378 err = E2BIG; 2379 goto errout; 2380 } 2381 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { 2382 err = ENOMEM; 2383 goto errout; 2384 } 2385 bzero(me->me_M.dma_vaddr, normbits / 8); 2386 bcopy(krp->krp_param[UBS_MODEXP_PAR_M].crp_p, 2387 me->me_M.dma_vaddr, (mbits + 7) / 8); 2388 2389 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { 2390 err = ENOMEM; 2391 goto errout; 2392 } 2393 bzero(me->me_C.dma_vaddr, me->me_C.dma_size); 2394 2395 ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); 2396 if (ebits > nbits) { 2397 err = E2BIG; 2398 goto errout; 2399 } 2400 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { 2401 err = ENOMEM; 2402 goto errout; 2403 } 2404 bzero(me->me_E.dma_vaddr, normbits / 8); 2405 bcopy(krp->krp_param[UBS_MODEXP_PAR_E].crp_p, 2406 me->me_E.dma_vaddr, (ebits + 7) / 8); 2407 2408 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), 2409 &me->me_epb, 0)) { 2410 err = ENOMEM; 2411 goto errout; 2412 } 2413 epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; 2414 epb->pb_addr = htole32(me->me_E.dma_paddr); 2415 epb->pb_next = 0; 2416 epb->pb_len = htole32((ebits + 7) / 8); 2417 2418 #ifdef UBSEC_DEBUG 2419 if (ubsec_debug) { 2420 printf("Epb "); 2421 ubsec_dump_pb(epb); 2422 } 2423 #endif 2424 2425 mcr->mcr_pkts = htole16(1); 2426 mcr->mcr_flags = 0; 2427 mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); 2428 mcr->mcr_reserved = 0; 2429 mcr->mcr_pktlen = 0; 2430 2431 mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); 2432 mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); 2433 mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); 2434 2435 mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); 2436 mcr->mcr_opktbuf.pb_next = 0; 2437 mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); 2438 2439 #ifdef DIAGNOSTIC 2440 /* Misaligned output buffer will hang the chip. */ 2441 if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) 2442 panic("%s: modexp invalid addr 0x%x", 2443 sc->sc_dv.dv_xname, letoh32(mcr->mcr_opktbuf.pb_addr)); 2444 if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) 2445 panic("%s: modexp invalid len 0x%x", 2446 sc->sc_dv.dv_xname, letoh32(mcr->mcr_opktbuf.pb_len)); 2447 #endif 2448 2449 ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; 2450 bzero(ctx, sizeof(*ctx)); 2451 bcopy(krp->krp_param[UBS_MODEXP_PAR_N].crp_p, ctx->me_N, 2452 (nbits + 7) / 8); 2453 ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); 2454 ctx->me_op = htole16(UBS_CTXOP_MODEXP); 2455 ctx->me_E_len = htole16(ebits); 2456 ctx->me_N_len = htole16(nbits); 2457 2458 #ifdef UBSEC_DEBUG 2459 if (ubsec_debug) { 2460 ubsec_dump_mcr(mcr); 2461 ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); 2462 } 2463 #endif 2464 2465 /* 2466 * ubsec_feed2 will sync mcr and ctx, we just need to sync 2467 * everything else. 2468 */ 2469 bus_dmamap_sync(sc->sc_dmat, me->me_M.dma_map, 2470 0, me->me_M.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2471 bus_dmamap_sync(sc->sc_dmat, me->me_E.dma_map, 2472 0, me->me_E.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2473 bus_dmamap_sync(sc->sc_dmat, me->me_C.dma_map, 2474 0, me->me_C.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2475 bus_dmamap_sync(sc->sc_dmat, me->me_epb.dma_map, 2476 0, me->me_epb.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2477 2478 /* Enqueue and we're done... */ 2479 s = splnet(); 2480 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); 2481 ubsec_feed2(sc); 2482 splx(s); 2483 2484 return (0); 2485 2486 errout: 2487 if (me != NULL) { 2488 if (me->me_q.q_mcr.dma_map != NULL) 2489 ubsec_dma_free(sc, &me->me_q.q_mcr); 2490 if (me->me_q.q_ctx.dma_map != NULL) { 2491 bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size); 2492 ubsec_dma_free(sc, &me->me_q.q_ctx); 2493 } 2494 if (me->me_M.dma_map != NULL) { 2495 bzero(me->me_M.dma_vaddr, me->me_M.dma_size); 2496 ubsec_dma_free(sc, &me->me_M); 2497 } 2498 if (me->me_E.dma_map != NULL) { 2499 bzero(me->me_E.dma_vaddr, me->me_E.dma_size); 2500 ubsec_dma_free(sc, &me->me_E); 2501 } 2502 if (me->me_C.dma_map != NULL) { 2503 bzero(me->me_C.dma_vaddr, me->me_C.dma_size); 2504 ubsec_dma_free(sc, &me->me_C); 2505 } 2506 if (me->me_epb.dma_map != NULL) 2507 ubsec_dma_free(sc, &me->me_epb); 2508 free(me, M_DEVBUF); 2509 } 2510 krp->krp_status = err; 2511 crypto_kdone(krp); 2512 return (0); 2513 } 2514 2515 static int 2516 ubsec_kprocess_rsapriv(struct ubsec_softc *sc, struct cryptkop *krp, 2517 int hint) 2518 { 2519 struct ubsec_q2_rsapriv *rp = NULL; 2520 struct ubsec_mcr *mcr; 2521 struct ubsec_ctx_rsapriv *ctx; 2522 int s, err = 0; 2523 u_int padlen, msglen; 2524 2525 msglen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_P]); 2526 padlen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_Q]); 2527 if (msglen > padlen) 2528 padlen = msglen; 2529 2530 if (padlen <= 256) 2531 padlen = 256; 2532 else if (padlen <= 384) 2533 padlen = 384; 2534 else if (padlen <= 512) 2535 padlen = 512; 2536 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 768) 2537 padlen = 768; 2538 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 1024) 2539 padlen = 1024; 2540 else { 2541 err = E2BIG; 2542 goto errout; 2543 } 2544 2545 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DP]) > padlen) { 2546 err = E2BIG; 2547 goto errout; 2548 } 2549 2550 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DQ]) > padlen) { 2551 err = E2BIG; 2552 goto errout; 2553 } 2554 2555 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_PINV]) > padlen) { 2556 err = E2BIG; 2557 goto errout; 2558 } 2559 2560 rp = (struct ubsec_q2_rsapriv *)malloc(sizeof *rp, M_DEVBUF, M_NOWAIT); 2561 if (rp == NULL) 2562 return (ENOMEM); 2563 bzero(rp, sizeof *rp); 2564 rp->rpr_krp = krp; 2565 rp->rpr_q.q_type = UBS_CTXOP_RSAPRIV; 2566 2567 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2568 &rp->rpr_q.q_mcr, 0)) { 2569 err = ENOMEM; 2570 goto errout; 2571 } 2572 mcr = (struct ubsec_mcr *)rp->rpr_q.q_mcr.dma_vaddr; 2573 2574 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rsapriv), 2575 &rp->rpr_q.q_ctx, 0)) { 2576 err = ENOMEM; 2577 goto errout; 2578 } 2579 ctx = (struct ubsec_ctx_rsapriv *)rp->rpr_q.q_ctx.dma_vaddr; 2580 bzero(ctx, sizeof *ctx); 2581 2582 /* Copy in p */ 2583 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_P].crp_p, 2584 &ctx->rpr_buf[0 * (padlen / 8)], 2585 (krp->krp_param[UBS_RSAPRIV_PAR_P].crp_nbits + 7) / 8); 2586 2587 /* Copy in q */ 2588 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_p, 2589 &ctx->rpr_buf[1 * (padlen / 8)], 2590 (krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_nbits + 7) / 8); 2591 2592 /* Copy in dp */ 2593 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_p, 2594 &ctx->rpr_buf[2 * (padlen / 8)], 2595 (krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_nbits + 7) / 8); 2596 2597 /* Copy in dq */ 2598 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_p, 2599 &ctx->rpr_buf[3 * (padlen / 8)], 2600 (krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_nbits + 7) / 8); 2601 2602 /* Copy in pinv */ 2603 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_p, 2604 &ctx->rpr_buf[4 * (padlen / 8)], 2605 (krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_nbits + 7) / 8); 2606 2607 msglen = padlen * 2; 2608 2609 /* Copy in input message (aligned buffer/length). */ 2610 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGIN]) > msglen) { 2611 /* Is this likely? */ 2612 err = E2BIG; 2613 goto errout; 2614 } 2615 if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgin, 0)) { 2616 err = ENOMEM; 2617 goto errout; 2618 } 2619 bzero(rp->rpr_msgin.dma_vaddr, (msglen + 7) / 8); 2620 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_p, 2621 rp->rpr_msgin.dma_vaddr, 2622 (krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_nbits + 7) / 8); 2623 2624 /* Prepare space for output message (aligned buffer/length). */ 2625 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT]) < msglen) { 2626 /* Is this likely? */ 2627 err = E2BIG; 2628 goto errout; 2629 } 2630 if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgout, 0)) { 2631 err = ENOMEM; 2632 goto errout; 2633 } 2634 bzero(rp->rpr_msgout.dma_vaddr, (msglen + 7) / 8); 2635 2636 mcr->mcr_pkts = htole16(1); 2637 mcr->mcr_flags = 0; 2638 mcr->mcr_cmdctxp = htole32(rp->rpr_q.q_ctx.dma_paddr); 2639 mcr->mcr_ipktbuf.pb_addr = htole32(rp->rpr_msgin.dma_paddr); 2640 mcr->mcr_ipktbuf.pb_next = 0; 2641 mcr->mcr_ipktbuf.pb_len = htole32(rp->rpr_msgin.dma_size); 2642 mcr->mcr_reserved = 0; 2643 mcr->mcr_pktlen = htole16(msglen); 2644 mcr->mcr_opktbuf.pb_addr = htole32(rp->rpr_msgout.dma_paddr); 2645 mcr->mcr_opktbuf.pb_next = 0; 2646 mcr->mcr_opktbuf.pb_len = htole32(rp->rpr_msgout.dma_size); 2647 2648 #ifdef DIAGNOSTIC 2649 if (rp->rpr_msgin.dma_paddr & 3 || rp->rpr_msgin.dma_size & 3) { 2650 panic("%s: rsapriv: invalid msgin 0x%lx(0x%lx)", 2651 sc->sc_dv.dv_xname, (u_long) rp->rpr_msgin.dma_paddr, 2652 (u_long) rp->rpr_msgin.dma_size); 2653 } 2654 if (rp->rpr_msgout.dma_paddr & 3 || rp->rpr_msgout.dma_size & 3) { 2655 panic("%s: rsapriv: invalid msgout 0x%lx(0x%lx)", 2656 sc->sc_dv.dv_xname, (u_long) rp->rpr_msgout.dma_paddr, 2657 (u_long) rp->rpr_msgout.dma_size); 2658 } 2659 #endif 2660 2661 ctx->rpr_len = (sizeof(u_int16_t) * 4) + (5 * (padlen / 8)); 2662 ctx->rpr_op = htole16(UBS_CTXOP_RSAPRIV); 2663 ctx->rpr_q_len = htole16(padlen); 2664 ctx->rpr_p_len = htole16(padlen); 2665 2666 /* 2667 * ubsec_feed2 will sync mcr and ctx, we just need to sync 2668 * everything else. 2669 */ 2670 bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgin.dma_map, 2671 0, rp->rpr_msgin.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2672 bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgout.dma_map, 2673 0, rp->rpr_msgout.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2674 2675 /* Enqueue and we're done... */ 2676 s = splnet(); 2677 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rp->rpr_q, q_next); 2678 ubsec_feed2(sc); 2679 ubsecstats.hst_modexpcrt++; 2680 splx(s); 2681 return (0); 2682 2683 errout: 2684 if (rp != NULL) { 2685 if (rp->rpr_q.q_mcr.dma_map != NULL) 2686 ubsec_dma_free(sc, &rp->rpr_q.q_mcr); 2687 if (rp->rpr_msgin.dma_map != NULL) { 2688 bzero(rp->rpr_msgin.dma_vaddr, rp->rpr_msgin.dma_size); 2689 ubsec_dma_free(sc, &rp->rpr_msgin); 2690 } 2691 if (rp->rpr_msgout.dma_map != NULL) { 2692 bzero(rp->rpr_msgout.dma_vaddr, rp->rpr_msgout.dma_size); 2693 ubsec_dma_free(sc, &rp->rpr_msgout); 2694 } 2695 free(rp, M_DEVBUF); 2696 } 2697 krp->krp_status = err; 2698 crypto_kdone(krp); 2699 return (0); 2700 } 2701 2702 #ifdef UBSEC_DEBUG 2703 static void 2704 ubsec_dump_pb(volatile struct ubsec_pktbuf *pb) 2705 { 2706 printf("addr 0x%x (0x%x) next 0x%x\n", 2707 pb->pb_addr, pb->pb_len, pb->pb_next); 2708 } 2709 2710 static void 2711 ubsec_dump_ctx2(volatile struct ubsec_ctx_keyop *c) 2712 { 2713 printf("CTX (0x%x):\n", c->ctx_len); 2714 switch (letoh16(c->ctx_op)) { 2715 case UBS_CTXOP_RNGBYPASS: 2716 case UBS_CTXOP_RNGSHA1: 2717 break; 2718 case UBS_CTXOP_MODEXP: 2719 { 2720 struct ubsec_ctx_modexp *cx = (void *)c; 2721 int i, len; 2722 2723 printf(" Elen %u, Nlen %u\n", 2724 letoh16(cx->me_E_len), letoh16(cx->me_N_len)); 2725 len = (cx->me_N_len + 7)/8; 2726 for (i = 0; i < len; i++) 2727 printf("%s%02x", (i == 0) ? " N: " : ":", cx->me_N[i]); 2728 printf("\n"); 2729 break; 2730 } 2731 default: 2732 printf("unknown context: %x\n", c->ctx_op); 2733 } 2734 printf("END CTX\n"); 2735 } 2736 2737 static void 2738 ubsec_dump_mcr(struct ubsec_mcr *mcr) 2739 { 2740 volatile struct ubsec_mcr_add *ma; 2741 int i; 2742 2743 printf("MCR:\n"); 2744 printf(" pkts: %u, flags 0x%x\n", 2745 letoh16(mcr->mcr_pkts), letoh16(mcr->mcr_flags)); 2746 ma = (volatile struct ubsec_mcr_add *)&mcr->mcr_cmdctxp; 2747 for (i = 0; i < letoh16(mcr->mcr_pkts); i++) { 2748 printf(" %d: ctx 0x%x len 0x%x rsvd 0x%x\n", i, 2749 letoh32(ma->mcr_cmdctxp), letoh16(ma->mcr_pktlen), 2750 letoh16(ma->mcr_reserved)); 2751 printf(" %d: ipkt ", i); 2752 ubsec_dump_pb(&ma->mcr_ipktbuf); 2753 printf(" %d: opkt ", i); 2754 ubsec_dump_pb(&ma->mcr_opktbuf); 2755 ma++; 2756 } 2757 printf("END MCR\n"); 2758 } 2759 #endif /* UBSEC_DEBUG */ 2760 2761 /* 2762 * Return the number of significant bits of a big number. 2763 */ 2764 static int 2765 ubsec_ksigbits(struct crparam *cr) 2766 { 2767 u_int plen = (cr->crp_nbits + 7) / 8; 2768 int i, sig = plen * 8; 2769 u_int8_t c, *p = cr->crp_p; 2770 2771 for (i = plen - 1; i >= 0; i--) { 2772 c = p[i]; 2773 if (c != 0) { 2774 while ((c & 0x80) == 0) { 2775 sig--; 2776 c <<= 1; 2777 } 2778 break; 2779 } 2780 sig -= 8; 2781 } 2782 return (sig); 2783 } 2784 2785 static void 2786 ubsec_kshift_r(shiftbits, src, srcbits, dst, dstbits) 2787 u_int shiftbits, srcbits, dstbits; 2788 u_int8_t *src, *dst; 2789 { 2790 u_int slen, dlen; 2791 int i, si, di, n; 2792 2793 slen = (srcbits + 7) / 8; 2794 dlen = (dstbits + 7) / 8; 2795 2796 for (i = 0; i < slen; i++) 2797 dst[i] = src[i]; 2798 for (i = 0; i < dlen - slen; i++) 2799 dst[slen + i] = 0; 2800 2801 n = shiftbits / 8; 2802 if (n != 0) { 2803 si = dlen - n - 1; 2804 di = dlen - 1; 2805 while (si >= 0) 2806 dst[di--] = dst[si--]; 2807 while (di >= 0) 2808 dst[di--] = 0; 2809 } 2810 2811 n = shiftbits % 8; 2812 if (n != 0) { 2813 for (i = dlen - 1; i > 0; i--) 2814 dst[i] = (dst[i] << n) | 2815 (dst[i - 1] >> (8 - n)); 2816 dst[0] = dst[0] << n; 2817 } 2818 } 2819 2820 static void 2821 ubsec_kshift_l(shiftbits, src, srcbits, dst, dstbits) 2822 u_int shiftbits, srcbits, dstbits; 2823 u_int8_t *src, *dst; 2824 { 2825 int slen, dlen, i, n; 2826 2827 slen = (srcbits + 7) / 8; 2828 dlen = (dstbits + 7) / 8; 2829 2830 n = shiftbits / 8; 2831 for (i = 0; i < slen; i++) 2832 dst[i] = src[i + n]; 2833 for (i = 0; i < dlen - slen; i++) 2834 dst[slen + i] = 0; 2835 2836 n = shiftbits % 8; 2837 if (n != 0) { 2838 for (i = 0; i < (dlen - 1); i++) 2839 dst[i] = (dst[i] >> n) | (dst[i + 1] << (8 - n)); 2840 dst[dlen - 1] = dst[dlen - 1] >> n; 2841 } 2842 } 2843