1 /* $NetBSD: ubsec.c,v 1.28 2012/10/27 17:18:35 chs Exp $ */ 2 /* $FreeBSD: src/sys/dev/ubsec/ubsec.c,v 1.6.2.6 2003/01/23 21:06:43 sam Exp $ */ 3 /* $OpenBSD: ubsec.c,v 1.127 2003/06/04 14:04:58 jason Exp $ */ 4 5 /* 6 * Copyright (c) 2000 Jason L. Wright (jason@thought.net) 7 * Copyright (c) 2000 Theo de Raadt (deraadt@openbsd.org) 8 * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com) 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 23 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 27 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * Effort sponsored in part by the Defense Advanced Research Projects 32 * Agency (DARPA) and Air Force Research Laboratory, Air Force 33 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 34 * 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: ubsec.c,v 1.28 2012/10/27 17:18:35 chs Exp $"); 39 40 #undef UBSEC_DEBUG 41 42 /* 43 * uBsec 5[56]01, bcm580xx, bcm582x hardware crypto accelerator 44 */ 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/proc.h> 49 #include <sys/endian.h> 50 #ifdef __NetBSD__ 51 #define letoh16 htole16 52 #define letoh32 htole32 53 #define UBSEC_NO_RNG /* until statistically tested */ 54 #endif 55 #include <sys/errno.h> 56 #include <sys/malloc.h> 57 #include <sys/kernel.h> 58 #include <sys/mbuf.h> 59 #include <sys/device.h> 60 #include <sys/queue.h> 61 62 #include <opencrypto/cryptodev.h> 63 #include <opencrypto/xform.h> 64 #ifdef __OpenBSD__ 65 #include <dev/rndvar.h> 66 #include <sys/md5k.h> 67 #else 68 #include <sys/cprng.h> 69 #include <sys/md5.h> 70 #endif 71 #include <sys/sha1.h> 72 73 #include <dev/pci/pcireg.h> 74 #include <dev/pci/pcivar.h> 75 #include <dev/pci/pcidevs.h> 76 77 #include <dev/pci/ubsecreg.h> 78 #include <dev/pci/ubsecvar.h> 79 80 /* 81 * Prototypes and count for the pci_device structure 82 */ 83 static int ubsec_probe(device_t, cfdata_t, void *); 84 static void ubsec_attach(device_t, device_t, void *); 85 static void ubsec_reset_board(struct ubsec_softc *); 86 static void ubsec_init_board(struct ubsec_softc *); 87 static void ubsec_init_pciregs(struct pci_attach_args *pa); 88 static void ubsec_cleanchip(struct ubsec_softc *); 89 static void ubsec_totalreset(struct ubsec_softc *); 90 static int ubsec_free_q(struct ubsec_softc*, struct ubsec_q *); 91 92 #ifdef __OpenBSD__ 93 struct cfattach ubsec_ca = { 94 sizeof(struct ubsec_softc), ubsec_probe, ubsec_attach, 95 }; 96 97 struct cfdriver ubsec_cd = { 98 0, "ubsec", DV_DULL 99 }; 100 #else 101 CFATTACH_DECL_NEW(ubsec, sizeof(struct ubsec_softc), ubsec_probe, ubsec_attach, 102 NULL, NULL); 103 extern struct cfdriver ubsec_cd; 104 #endif 105 106 /* patchable */ 107 #ifdef UBSEC_DEBUG 108 extern int ubsec_debug; 109 int ubsec_debug=1; 110 #endif 111 112 static int ubsec_intr(void *); 113 static int ubsec_newsession(void*, u_int32_t *, struct cryptoini *); 114 static int ubsec_freesession(void*, u_int64_t); 115 static int ubsec_process(void*, struct cryptop *, int hint); 116 static void ubsec_callback(struct ubsec_softc *, struct ubsec_q *); 117 static void ubsec_feed(struct ubsec_softc *); 118 static void ubsec_mcopy(struct mbuf *, struct mbuf *, int, int); 119 static void ubsec_callback2(struct ubsec_softc *, struct ubsec_q2 *); 120 static void ubsec_feed2(struct ubsec_softc *); 121 #ifndef UBSEC_NO_RNG 122 static void ubsec_rng(void *); 123 #endif /* UBSEC_NO_RNG */ 124 static int ubsec_dma_malloc(struct ubsec_softc *, bus_size_t, 125 struct ubsec_dma_alloc *, int); 126 static void ubsec_dma_free(struct ubsec_softc *, struct ubsec_dma_alloc *); 127 static int ubsec_dmamap_aligned(bus_dmamap_t); 128 129 static int ubsec_kprocess(void*, struct cryptkop *, int); 130 static int ubsec_kprocess_modexp_sw(struct ubsec_softc *, 131 struct cryptkop *, int); 132 static int ubsec_kprocess_modexp_hw(struct ubsec_softc *, 133 struct cryptkop *, int); 134 static int ubsec_kprocess_rsapriv(struct ubsec_softc *, 135 struct cryptkop *, int); 136 static void ubsec_kfree(struct ubsec_softc *, struct ubsec_q2 *); 137 static int ubsec_ksigbits(struct crparam *); 138 static void ubsec_kshift_r(u_int, u_int8_t *, u_int, u_int8_t *, u_int); 139 static void ubsec_kshift_l(u_int, u_int8_t *, u_int, u_int8_t *, u_int); 140 141 #ifdef UBSEC_DEBUG 142 static void ubsec_dump_pb(volatile struct ubsec_pktbuf *); 143 static void ubsec_dump_mcr(struct ubsec_mcr *); 144 static void ubsec_dump_ctx2(volatile struct ubsec_ctx_keyop *); 145 #endif 146 147 #define READ_REG(sc,r) \ 148 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) 149 150 #define WRITE_REG(sc,reg,val) \ 151 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) 152 153 #define SWAP32(x) (x) = htole32(ntohl((x))) 154 #ifndef HTOLE32 155 #define HTOLE32(x) (x) = htole32(x) 156 #endif 157 158 struct ubsec_stats ubsecstats; 159 160 /* 161 * ubsec_maxbatch controls the number of crypto ops to voluntarily 162 * collect into one submission to the hardware. This batching happens 163 * when ops are dispatched from the crypto subsystem with a hint that 164 * more are to follow immediately. These ops must also not be marked 165 * with a ``no delay'' flag. 166 */ 167 static int ubsec_maxbatch = 1; 168 #ifdef SYSCTL_INT 169 SYSCTL_INT(_kern, OID_AUTO, ubsec_maxbatch, CTLFLAG_RW, &ubsec_maxbatch, 170 0, "Broadcom driver: max ops to batch w/o interrupt"); 171 #endif 172 173 /* 174 * ubsec_maxaggr controls the number of crypto ops to submit to the 175 * hardware as a unit. This aggregation reduces the number of interrupts 176 * to the host at the expense of increased latency (for all but the last 177 * operation). For network traffic setting this to one yields the highest 178 * performance but at the expense of more interrupt processing. 179 */ 180 static int ubsec_maxaggr = 1; 181 #ifdef SYSCTL_INT 182 SYSCTL_INT(_kern, OID_AUTO, ubsec_maxaggr, CTLFLAG_RW, &ubsec_maxaggr, 183 0, "Broadcom driver: max ops to aggregate under one interrupt"); 184 #endif 185 186 static const struct ubsec_product { 187 pci_vendor_id_t ubsec_vendor; 188 pci_product_id_t ubsec_product; 189 int ubsec_flags; 190 int ubsec_statmask; 191 const char *ubsec_name; 192 } ubsec_products[] = { 193 { PCI_VENDOR_BLUESTEEL, PCI_PRODUCT_BLUESTEEL_5501, 194 0, 195 BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 196 "Bluesteel 5501" 197 }, 198 { PCI_VENDOR_BLUESTEEL, PCI_PRODUCT_BLUESTEEL_5601, 199 UBS_FLAGS_KEY | UBS_FLAGS_RNG, 200 BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 201 "Bluesteel 5601" 202 }, 203 204 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5801, 205 0, 206 BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 207 "Broadcom BCM5801" 208 }, 209 210 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5802, 211 UBS_FLAGS_KEY | UBS_FLAGS_RNG, 212 BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 213 "Broadcom BCM5802" 214 }, 215 216 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5805, 217 UBS_FLAGS_KEY | UBS_FLAGS_RNG, 218 BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 219 "Broadcom BCM5805" 220 }, 221 222 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5820, 223 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 224 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 225 BS_STAT_MCR1_DONE | BS_STAT_DMAERR, 226 "Broadcom BCM5820" 227 }, 228 229 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5821, 230 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 231 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 232 BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 233 BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 234 "Broadcom BCM5821" 235 }, 236 { PCI_VENDOR_SUN, PCI_PRODUCT_SUN_SCA1K, 237 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 238 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 239 BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 240 BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 241 "Sun Crypto Accelerator 1000" 242 }, 243 { PCI_VENDOR_SUN, PCI_PRODUCT_SUN_5821, 244 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 245 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 246 BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 247 BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 248 "Broadcom BCM5821 (Sun)" 249 }, 250 251 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5822, 252 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 253 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 254 BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 255 BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 256 "Broadcom BCM5822" 257 }, 258 259 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5823, 260 UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | 261 UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, 262 BS_STAT_MCR1_DONE | BS_STAT_DMAERR | 263 BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, 264 "Broadcom BCM5823" 265 }, 266 267 { 0, 0, 268 0, 269 0, 270 NULL 271 } 272 }; 273 274 static const struct ubsec_product * 275 ubsec_lookup(const struct pci_attach_args *pa) 276 { 277 const struct ubsec_product *up; 278 279 for (up = ubsec_products; up->ubsec_name != NULL; up++) { 280 if (PCI_VENDOR(pa->pa_id) == up->ubsec_vendor && 281 PCI_PRODUCT(pa->pa_id) == up->ubsec_product) 282 return (up); 283 } 284 return (NULL); 285 } 286 287 static int 288 ubsec_probe(device_t parent, cfdata_t match, void *aux) 289 { 290 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 291 292 if (ubsec_lookup(pa) != NULL) 293 return (1); 294 295 return (0); 296 } 297 298 static void 299 ubsec_attach(device_t parent, device_t self, void *aux) 300 { 301 struct ubsec_softc *sc = device_private(self); 302 struct pci_attach_args *pa = aux; 303 const struct ubsec_product *up; 304 pci_chipset_tag_t pc = pa->pa_pc; 305 pci_intr_handle_t ih; 306 const char *intrstr = NULL; 307 struct ubsec_dma *dmap; 308 u_int32_t cmd, i; 309 310 sc->sc_dev = self; 311 up = ubsec_lookup(pa); 312 if (up == NULL) { 313 printf("\n"); 314 panic("ubsec_attach: impossible"); 315 } 316 317 pci_aprint_devinfo_fancy(pa, "Crypto processor", up->ubsec_name, 1); 318 319 SIMPLEQ_INIT(&sc->sc_queue); 320 SIMPLEQ_INIT(&sc->sc_qchip); 321 SIMPLEQ_INIT(&sc->sc_queue2); 322 SIMPLEQ_INIT(&sc->sc_qchip2); 323 SIMPLEQ_INIT(&sc->sc_q2free); 324 325 sc->sc_flags = up->ubsec_flags; 326 sc->sc_statmask = up->ubsec_statmask; 327 328 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 329 cmd |= PCI_COMMAND_MASTER_ENABLE; 330 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd); 331 332 if (pci_mapreg_map(pa, BS_BAR, PCI_MAPREG_TYPE_MEM, 0, 333 &sc->sc_st, &sc->sc_sh, NULL, NULL)) { 334 aprint_error_dev(self, "can't find mem space"); 335 return; 336 } 337 338 sc->sc_dmat = pa->pa_dmat; 339 340 if (pci_intr_map(pa, &ih)) { 341 aprint_error_dev(self, "couldn't map interrupt\n"); 342 return; 343 } 344 intrstr = pci_intr_string(pc, ih); 345 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, ubsec_intr, sc); 346 if (sc->sc_ih == NULL) { 347 aprint_error_dev(self, "couldn't establish interrupt"); 348 if (intrstr != NULL) 349 aprint_error(" at %s", intrstr); 350 aprint_error("\n"); 351 return; 352 } 353 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 354 355 sc->sc_cid = crypto_get_driverid(0); 356 if (sc->sc_cid < 0) { 357 aprint_error_dev(self, "couldn't get crypto driver id\n"); 358 pci_intr_disestablish(pc, sc->sc_ih); 359 return; 360 } 361 362 SIMPLEQ_INIT(&sc->sc_freequeue); 363 dmap = sc->sc_dmaa; 364 for (i = 0; i < UBS_MAX_NQUEUE; i++, dmap++) { 365 struct ubsec_q *q; 366 367 q = (struct ubsec_q *)malloc(sizeof(struct ubsec_q), 368 M_DEVBUF, M_NOWAIT); 369 if (q == NULL) { 370 aprint_error_dev(self, "can't allocate queue buffers\n"); 371 break; 372 } 373 374 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_dmachunk), 375 &dmap->d_alloc, 0)) { 376 aprint_error_dev(self, "can't allocate dma buffers\n"); 377 free(q, M_DEVBUF); 378 break; 379 } 380 dmap->d_dma = (struct ubsec_dmachunk *)dmap->d_alloc.dma_vaddr; 381 382 q->q_dma = dmap; 383 sc->sc_queuea[i] = q; 384 385 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 386 } 387 388 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, 389 ubsec_newsession, ubsec_freesession, ubsec_process, sc); 390 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, 391 ubsec_newsession, ubsec_freesession, ubsec_process, sc); 392 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96, 0, 0, 393 ubsec_newsession, ubsec_freesession, ubsec_process, sc); 394 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96, 0, 0, 395 ubsec_newsession, ubsec_freesession, ubsec_process, sc); 396 397 /* 398 * Reset Broadcom chip 399 */ 400 ubsec_reset_board(sc); 401 402 /* 403 * Init Broadcom specific PCI settings 404 */ 405 ubsec_init_pciregs(pa); 406 407 /* 408 * Init Broadcom chip 409 */ 410 ubsec_init_board(sc); 411 412 #ifndef UBSEC_NO_RNG 413 if (sc->sc_flags & UBS_FLAGS_RNG) { 414 sc->sc_statmask |= BS_STAT_MCR2_DONE; 415 416 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 417 &sc->sc_rng.rng_q.q_mcr, 0)) 418 goto skip_rng; 419 420 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rngbypass), 421 &sc->sc_rng.rng_q.q_ctx, 0)) { 422 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); 423 goto skip_rng; 424 } 425 426 if (ubsec_dma_malloc(sc, sizeof(u_int32_t) * 427 UBSEC_RNG_BUFSIZ, &sc->sc_rng.rng_buf, 0)) { 428 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); 429 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); 430 goto skip_rng; 431 } 432 433 if (hz >= 100) 434 sc->sc_rnghz = hz / 100; 435 else 436 sc->sc_rnghz = 1; 437 #ifdef __OpenBSD__ 438 timeout_set(&sc->sc_rngto, ubsec_rng, sc); 439 timeout_add(&sc->sc_rngto, sc->sc_rnghz); 440 #else 441 callout_init(&sc->sc_rngto, 0); 442 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); 443 #endif 444 skip_rng: 445 if (sc->sc_rnghz) 446 aprint_normal_dev(self, "random number generator enabled\n"); 447 else 448 aprint_error_dev(self, "WARNING: random number generator " 449 "disabled\n"); 450 } 451 #endif /* UBSEC_NO_RNG */ 452 453 if (sc->sc_flags & UBS_FLAGS_KEY) { 454 sc->sc_statmask |= BS_STAT_MCR2_DONE; 455 456 crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0, 457 ubsec_kprocess, sc); 458 #if 0 459 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0, 460 ubsec_kprocess, sc); 461 #endif 462 } 463 } 464 465 /* 466 * UBSEC Interrupt routine 467 */ 468 static int 469 ubsec_intr(void *arg) 470 { 471 struct ubsec_softc *sc = arg; 472 volatile u_int32_t stat; 473 struct ubsec_q *q; 474 struct ubsec_dma *dmap; 475 int npkts = 0, i; 476 477 stat = READ_REG(sc, BS_STAT); 478 stat &= sc->sc_statmask; 479 if (stat == 0) { 480 return (0); 481 } 482 483 WRITE_REG(sc, BS_STAT, stat); /* IACK */ 484 485 /* 486 * Check to see if we have any packets waiting for us 487 */ 488 if ((stat & BS_STAT_MCR1_DONE)) { 489 while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { 490 q = SIMPLEQ_FIRST(&sc->sc_qchip); 491 dmap = q->q_dma; 492 493 if ((dmap->d_dma->d_mcr.mcr_flags & htole16(UBS_MCR_DONE)) == 0) 494 break; 495 496 q = SIMPLEQ_FIRST(&sc->sc_qchip); 497 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, /*q,*/ q_next); 498 499 npkts = q->q_nstacked_mcrs; 500 sc->sc_nqchip -= 1+npkts; 501 /* 502 * search for further sc_qchip ubsec_q's that share 503 * the same MCR, and complete them too, they must be 504 * at the top. 505 */ 506 for (i = 0; i < npkts; i++) { 507 if(q->q_stacked_mcr[i]) 508 ubsec_callback(sc, q->q_stacked_mcr[i]); 509 else 510 break; 511 } 512 ubsec_callback(sc, q); 513 } 514 515 /* 516 * Don't send any more packet to chip if there has been 517 * a DMAERR. 518 */ 519 if (!(stat & BS_STAT_DMAERR)) 520 ubsec_feed(sc); 521 } 522 523 /* 524 * Check to see if we have any key setups/rng's waiting for us 525 */ 526 if ((sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) && 527 (stat & BS_STAT_MCR2_DONE)) { 528 struct ubsec_q2 *q2; 529 struct ubsec_mcr *mcr; 530 531 while (!SIMPLEQ_EMPTY(&sc->sc_qchip2)) { 532 q2 = SIMPLEQ_FIRST(&sc->sc_qchip2); 533 534 bus_dmamap_sync(sc->sc_dmat, q2->q_mcr.dma_map, 535 0, q2->q_mcr.dma_map->dm_mapsize, 536 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 537 538 mcr = (struct ubsec_mcr *)q2->q_mcr.dma_vaddr; 539 if ((mcr->mcr_flags & htole16(UBS_MCR_DONE)) == 0) { 540 bus_dmamap_sync(sc->sc_dmat, 541 q2->q_mcr.dma_map, 0, 542 q2->q_mcr.dma_map->dm_mapsize, 543 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 544 break; 545 } 546 q2 = SIMPLEQ_FIRST(&sc->sc_qchip2); 547 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip2, /*q2,*/ q_next); 548 ubsec_callback2(sc, q2); 549 /* 550 * Don't send any more packet to chip if there has been 551 * a DMAERR. 552 */ 553 if (!(stat & BS_STAT_DMAERR)) 554 ubsec_feed2(sc); 555 } 556 } 557 558 /* 559 * Check to see if we got any DMA Error 560 */ 561 if (stat & BS_STAT_DMAERR) { 562 #ifdef UBSEC_DEBUG 563 if (ubsec_debug) { 564 volatile u_int32_t a = READ_REG(sc, BS_ERR); 565 566 printf("%s: dmaerr %s@%08x\n", device_xname(sc->sc_dev), 567 (a & BS_ERR_READ) ? "read" : "write", 568 a & BS_ERR_ADDR); 569 } 570 #endif /* UBSEC_DEBUG */ 571 ubsecstats.hst_dmaerr++; 572 ubsec_totalreset(sc); 573 ubsec_feed(sc); 574 } 575 576 if (sc->sc_needwakeup) { /* XXX check high watermark */ 577 int wkeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); 578 #ifdef UBSEC_DEBUG 579 if (ubsec_debug) 580 printf("%s: wakeup crypto (%x)\n", device_xname(sc->sc_dev), 581 sc->sc_needwakeup); 582 #endif /* UBSEC_DEBUG */ 583 sc->sc_needwakeup &= ~wkeup; 584 crypto_unblock(sc->sc_cid, wkeup); 585 } 586 return (1); 587 } 588 589 /* 590 * ubsec_feed() - aggregate and post requests to chip 591 * OpenBSD comments: 592 * It is assumed that the caller set splnet() 593 */ 594 static void 595 ubsec_feed(struct ubsec_softc *sc) 596 { 597 struct ubsec_q *q, *q2; 598 int npkts, i; 599 void *v; 600 u_int32_t stat; 601 #ifdef UBSEC_DEBUG 602 static int max; 603 #endif /* UBSEC_DEBUG */ 604 605 npkts = sc->sc_nqueue; 606 if (npkts > ubsecstats.hst_maxqueue) 607 ubsecstats.hst_maxqueue = npkts; 608 if (npkts < 2) 609 goto feed1; 610 611 /* 612 * Decide how many ops to combine in a single MCR. We cannot 613 * aggregate more than UBS_MAX_AGGR because this is the number 614 * of slots defined in the data structure. Otherwise we clamp 615 * based on the tunable parameter ubsec_maxaggr. Note that 616 * aggregation can happen in two ways: either by batching ops 617 * from above or because the h/w backs up and throttles us. 618 * Aggregating ops reduces the number of interrupts to the host 619 * but also (potentially) increases the latency for processing 620 * completed ops as we only get an interrupt when all aggregated 621 * ops have completed. 622 */ 623 if (npkts > UBS_MAX_AGGR) 624 npkts = UBS_MAX_AGGR; 625 if (npkts > ubsec_maxaggr) 626 npkts = ubsec_maxaggr; 627 if (npkts > ubsecstats.hst_maxbatch) 628 ubsecstats.hst_maxbatch = npkts; 629 if (npkts < 2) 630 goto feed1; 631 ubsecstats.hst_totbatch += npkts-1; 632 633 if ((stat = READ_REG(sc, BS_STAT)) & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) { 634 if (stat & BS_STAT_DMAERR) { 635 ubsec_totalreset(sc); 636 ubsecstats.hst_dmaerr++; 637 } else { 638 ubsecstats.hst_mcr1full++; 639 } 640 return; 641 } 642 643 #ifdef UBSEC_DEBUG 644 if (ubsec_debug) 645 printf("merging %d records\n", npkts); 646 /* XXX temporary aggregation statistics reporting code */ 647 if (max < npkts) { 648 max = npkts; 649 printf("%s: new max aggregate %d\n", device_xname(sc->sc_dev), max); 650 } 651 #endif /* UBSEC_DEBUG */ 652 653 q = SIMPLEQ_FIRST(&sc->sc_queue); 654 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, /*q,*/ q_next); 655 --sc->sc_nqueue; 656 657 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, 658 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 659 if (q->q_dst_map != NULL) 660 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, 661 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 662 663 q->q_nstacked_mcrs = npkts - 1; /* Number of packets stacked */ 664 665 for (i = 0; i < q->q_nstacked_mcrs; i++) { 666 q2 = SIMPLEQ_FIRST(&sc->sc_queue); 667 bus_dmamap_sync(sc->sc_dmat, q2->q_src_map, 668 0, q2->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 669 if (q2->q_dst_map != NULL) 670 bus_dmamap_sync(sc->sc_dmat, q2->q_dst_map, 671 0, q2->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 672 q2= SIMPLEQ_FIRST(&sc->sc_queue); 673 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, /*q2,*/ q_next); 674 --sc->sc_nqueue; 675 676 v = ((void *)&q2->q_dma->d_dma->d_mcr); 677 v = (char*)v + (sizeof(struct ubsec_mcr) - 678 sizeof(struct ubsec_mcr_add)); 679 memcpy(&q->q_dma->d_dma->d_mcradd[i], v, sizeof(struct ubsec_mcr_add)); 680 q->q_stacked_mcr[i] = q2; 681 } 682 q->q_dma->d_dma->d_mcr.mcr_pkts = htole16(npkts); 683 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); 684 sc->sc_nqchip += npkts; 685 if (sc->sc_nqchip > ubsecstats.hst_maxqchip) 686 ubsecstats.hst_maxqchip = sc->sc_nqchip; 687 bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map, 688 0, q->q_dma->d_alloc.dma_map->dm_mapsize, 689 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 690 WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + 691 offsetof(struct ubsec_dmachunk, d_mcr)); 692 return; 693 694 feed1: 695 while (!SIMPLEQ_EMPTY(&sc->sc_queue)) { 696 if ((stat = READ_REG(sc, BS_STAT)) & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) { 697 if (stat & BS_STAT_DMAERR) { 698 ubsec_totalreset(sc); 699 ubsecstats.hst_dmaerr++; 700 } else { 701 ubsecstats.hst_mcr1full++; 702 } 703 break; 704 } 705 706 q = SIMPLEQ_FIRST(&sc->sc_queue); 707 708 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, 709 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 710 if (q->q_dst_map != NULL) 711 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, 712 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 713 bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map, 714 0, q->q_dma->d_alloc.dma_map->dm_mapsize, 715 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 716 717 WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + 718 offsetof(struct ubsec_dmachunk, d_mcr)); 719 #ifdef UBSEC_DEBUG 720 if (ubsec_debug) 721 printf("feed: q->chip %p %08x stat %08x\n", 722 q, (u_int32_t)q->q_dma->d_alloc.dma_paddr, 723 stat); 724 #endif /* UBSEC_DEBUG */ 725 q = SIMPLEQ_FIRST(&sc->sc_queue); 726 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, /*q,*/ q_next); 727 --sc->sc_nqueue; 728 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); 729 sc->sc_nqchip++; 730 } 731 if (sc->sc_nqchip > ubsecstats.hst_maxqchip) 732 ubsecstats.hst_maxqchip = sc->sc_nqchip; 733 } 734 735 /* 736 * Allocate a new 'session' and return an encoded session id. 'sidp' 737 * contains our registration id, and should contain an encoded session 738 * id on successful allocation. 739 */ 740 static int 741 ubsec_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri) 742 { 743 struct cryptoini *c, *encini = NULL, *macini = NULL; 744 struct ubsec_softc *sc; 745 struct ubsec_session *ses = NULL; 746 MD5_CTX md5ctx; 747 SHA1_CTX sha1ctx; 748 int i, sesn; 749 750 sc = arg; 751 KASSERT(sc != NULL /*, ("ubsec_newsession: null softc")*/); 752 753 if (sidp == NULL || cri == NULL || sc == NULL) 754 return (EINVAL); 755 756 for (c = cri; c != NULL; c = c->cri_next) { 757 if (c->cri_alg == CRYPTO_MD5_HMAC_96 || 758 c->cri_alg == CRYPTO_SHA1_HMAC_96) { 759 if (macini) 760 return (EINVAL); 761 macini = c; 762 } else if (c->cri_alg == CRYPTO_DES_CBC || 763 c->cri_alg == CRYPTO_3DES_CBC) { 764 if (encini) 765 return (EINVAL); 766 encini = c; 767 } else 768 return (EINVAL); 769 } 770 if (encini == NULL && macini == NULL) 771 return (EINVAL); 772 773 if (sc->sc_sessions == NULL) { 774 ses = sc->sc_sessions = (struct ubsec_session *)malloc( 775 sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT); 776 if (ses == NULL) 777 return (ENOMEM); 778 sesn = 0; 779 sc->sc_nsessions = 1; 780 } else { 781 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { 782 if (sc->sc_sessions[sesn].ses_used == 0) { 783 ses = &sc->sc_sessions[sesn]; 784 break; 785 } 786 } 787 788 if (ses == NULL) { 789 sesn = sc->sc_nsessions; 790 ses = (struct ubsec_session *)malloc((sesn + 1) * 791 sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT); 792 if (ses == NULL) 793 return (ENOMEM); 794 memcpy(ses, sc->sc_sessions, sesn * 795 sizeof(struct ubsec_session)); 796 memset(sc->sc_sessions, 0, sesn * 797 sizeof(struct ubsec_session)); 798 free(sc->sc_sessions, M_DEVBUF); 799 sc->sc_sessions = ses; 800 ses = &sc->sc_sessions[sesn]; 801 sc->sc_nsessions++; 802 } 803 } 804 805 memset(ses, 0, sizeof(struct ubsec_session)); 806 ses->ses_used = 1; 807 if (encini) { 808 /* get an IV, network byte order */ 809 #ifdef __NetBSD__ 810 cprng_fast(ses->ses_iv, sizeof(ses->ses_iv)); 811 #else 812 get_random_bytes(ses->ses_iv, sizeof(ses->ses_iv)); 813 #endif 814 815 /* Go ahead and compute key in ubsec's byte order */ 816 if (encini->cri_alg == CRYPTO_DES_CBC) { 817 memcpy(&ses->ses_deskey[0], encini->cri_key, 8); 818 memcpy(&ses->ses_deskey[2], encini->cri_key, 8); 819 memcpy(&ses->ses_deskey[4], encini->cri_key, 8); 820 } else 821 memcpy(ses->ses_deskey, encini->cri_key, 24); 822 823 SWAP32(ses->ses_deskey[0]); 824 SWAP32(ses->ses_deskey[1]); 825 SWAP32(ses->ses_deskey[2]); 826 SWAP32(ses->ses_deskey[3]); 827 SWAP32(ses->ses_deskey[4]); 828 SWAP32(ses->ses_deskey[5]); 829 } 830 831 if (macini) { 832 for (i = 0; i < macini->cri_klen / 8; i++) 833 macini->cri_key[i] ^= HMAC_IPAD_VAL; 834 835 if (macini->cri_alg == CRYPTO_MD5_HMAC_96) { 836 MD5Init(&md5ctx); 837 MD5Update(&md5ctx, macini->cri_key, 838 macini->cri_klen / 8); 839 MD5Update(&md5ctx, hmac_ipad_buffer, 840 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 841 memcpy(ses->ses_hminner, md5ctx.state, 842 sizeof(md5ctx.state)); 843 } else { 844 SHA1Init(&sha1ctx); 845 SHA1Update(&sha1ctx, macini->cri_key, 846 macini->cri_klen / 8); 847 SHA1Update(&sha1ctx, hmac_ipad_buffer, 848 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 849 memcpy(ses->ses_hminner, sha1ctx.state, 850 sizeof(sha1ctx.state)); 851 } 852 853 for (i = 0; i < macini->cri_klen / 8; i++) 854 macini->cri_key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 855 856 if (macini->cri_alg == CRYPTO_MD5_HMAC_96) { 857 MD5Init(&md5ctx); 858 MD5Update(&md5ctx, macini->cri_key, 859 macini->cri_klen / 8); 860 MD5Update(&md5ctx, hmac_opad_buffer, 861 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 862 memcpy(ses->ses_hmouter, md5ctx.state, 863 sizeof(md5ctx.state)); 864 } else { 865 SHA1Init(&sha1ctx); 866 SHA1Update(&sha1ctx, macini->cri_key, 867 macini->cri_klen / 8); 868 SHA1Update(&sha1ctx, hmac_opad_buffer, 869 HMAC_BLOCK_LEN - (macini->cri_klen / 8)); 870 memcpy(ses->ses_hmouter, sha1ctx.state, 871 sizeof(sha1ctx.state)); 872 } 873 874 for (i = 0; i < macini->cri_klen / 8; i++) 875 macini->cri_key[i] ^= HMAC_OPAD_VAL; 876 } 877 878 *sidp = UBSEC_SID(device_unit(sc->sc_dev), sesn); 879 return (0); 880 } 881 882 /* 883 * Deallocate a session. 884 */ 885 static int 886 ubsec_freesession(void *arg, u_int64_t tid) 887 { 888 struct ubsec_softc *sc; 889 int session; 890 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; 891 892 sc = arg; 893 KASSERT(sc != NULL /*, ("ubsec_freesession: null softc")*/); 894 895 session = UBSEC_SESSION(sid); 896 if (session >= sc->sc_nsessions) 897 return (EINVAL); 898 899 memset(&sc->sc_sessions[session], 0, sizeof(sc->sc_sessions[session])); 900 return (0); 901 } 902 903 #ifdef __FreeBSD__ /* Ugly gratuitous changes to bus_dma */ 904 static void 905 ubsec_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) 906 { 907 struct ubsec_operand *op = arg; 908 909 KASSERT(nsegs <= UBS_MAX_SCATTER 910 /*, ("Too many DMA segments returned when mapping operand")*/); 911 #ifdef UBSEC_DEBUG 912 if (ubsec_debug) 913 printf("ubsec_op_cb: mapsize %u nsegs %d\n", 914 (u_int) mapsize, nsegs); 915 #endif 916 op->mapsize = mapsize; 917 op->nsegs = nsegs; 918 memcpy(op->segs, seg, nsegs * sizeof (seg[0])); 919 } 920 #endif 921 922 static int 923 ubsec_process(void *arg, struct cryptop *crp, int hint) 924 { 925 struct ubsec_q *q = NULL; 926 #ifdef __OpenBSD__ 927 int card; 928 #endif 929 int err = 0, i, j, s, nicealign; 930 struct ubsec_softc *sc; 931 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; 932 int encoffset = 0, macoffset = 0, cpskip, cpoffset; 933 int sskip, dskip, stheend, dtheend; 934 int16_t coffset; 935 struct ubsec_session *ses; 936 struct ubsec_pktctx ctx; 937 struct ubsec_dma *dmap = NULL; 938 939 sc = arg; 940 KASSERT(sc != NULL /*, ("ubsec_process: null softc")*/); 941 942 if (crp == NULL || crp->crp_callback == NULL || sc == NULL) { 943 ubsecstats.hst_invalid++; 944 return (EINVAL); 945 } 946 if (UBSEC_SESSION(crp->crp_sid) >= sc->sc_nsessions) { 947 ubsecstats.hst_badsession++; 948 return (EINVAL); 949 } 950 951 s = splnet(); 952 953 if (SIMPLEQ_EMPTY(&sc->sc_freequeue)) { 954 ubsecstats.hst_queuefull++; 955 sc->sc_needwakeup |= CRYPTO_SYMQ; 956 splx(s); 957 return(ERESTART); 958 } 959 960 q = SIMPLEQ_FIRST(&sc->sc_freequeue); 961 SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, /*q,*/ q_next); 962 splx(s); 963 964 dmap = q->q_dma; /* Save dma pointer */ 965 memset(q, 0, sizeof(struct ubsec_q)); 966 memset(&ctx, 0, sizeof(ctx)); 967 968 q->q_sesn = UBSEC_SESSION(crp->crp_sid); 969 q->q_dma = dmap; 970 ses = &sc->sc_sessions[q->q_sesn]; 971 972 if (crp->crp_flags & CRYPTO_F_IMBUF) { 973 q->q_src_m = (struct mbuf *)crp->crp_buf; 974 q->q_dst_m = (struct mbuf *)crp->crp_buf; 975 } else if (crp->crp_flags & CRYPTO_F_IOV) { 976 q->q_src_io = (struct uio *)crp->crp_buf; 977 q->q_dst_io = (struct uio *)crp->crp_buf; 978 } else { 979 ubsecstats.hst_badflags++; 980 err = EINVAL; 981 goto errout; /* XXX we don't handle contiguous blocks! */ 982 } 983 984 memset(&dmap->d_dma->d_mcr, 0, sizeof(struct ubsec_mcr)); 985 986 dmap->d_dma->d_mcr.mcr_pkts = htole16(1); 987 dmap->d_dma->d_mcr.mcr_flags = 0; 988 q->q_crp = crp; 989 990 crd1 = crp->crp_desc; 991 if (crd1 == NULL) { 992 ubsecstats.hst_nodesc++; 993 err = EINVAL; 994 goto errout; 995 } 996 crd2 = crd1->crd_next; 997 998 if (crd2 == NULL) { 999 if (crd1->crd_alg == CRYPTO_MD5_HMAC_96 || 1000 crd1->crd_alg == CRYPTO_SHA1_HMAC_96) { 1001 maccrd = crd1; 1002 enccrd = NULL; 1003 } else if (crd1->crd_alg == CRYPTO_DES_CBC || 1004 crd1->crd_alg == CRYPTO_3DES_CBC) { 1005 maccrd = NULL; 1006 enccrd = crd1; 1007 } else { 1008 ubsecstats.hst_badalg++; 1009 err = EINVAL; 1010 goto errout; 1011 } 1012 } else { 1013 if ((crd1->crd_alg == CRYPTO_MD5_HMAC_96 || 1014 crd1->crd_alg == CRYPTO_SHA1_HMAC_96) && 1015 (crd2->crd_alg == CRYPTO_DES_CBC || 1016 crd2->crd_alg == CRYPTO_3DES_CBC) && 1017 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { 1018 maccrd = crd1; 1019 enccrd = crd2; 1020 } else if ((crd1->crd_alg == CRYPTO_DES_CBC || 1021 crd1->crd_alg == CRYPTO_3DES_CBC) && 1022 (crd2->crd_alg == CRYPTO_MD5_HMAC_96 || 1023 crd2->crd_alg == CRYPTO_SHA1_HMAC_96) && 1024 (crd1->crd_flags & CRD_F_ENCRYPT)) { 1025 enccrd = crd1; 1026 maccrd = crd2; 1027 } else { 1028 /* 1029 * We cannot order the ubsec as requested 1030 */ 1031 ubsecstats.hst_badalg++; 1032 err = EINVAL; 1033 goto errout; 1034 } 1035 } 1036 1037 if (enccrd) { 1038 encoffset = enccrd->crd_skip; 1039 ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_3DES); 1040 1041 if (enccrd->crd_flags & CRD_F_ENCRYPT) { 1042 q->q_flags |= UBSEC_QFLAGS_COPYOUTIV; 1043 1044 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 1045 memcpy(ctx.pc_iv, enccrd->crd_iv, 8); 1046 else { 1047 ctx.pc_iv[0] = ses->ses_iv[0]; 1048 ctx.pc_iv[1] = ses->ses_iv[1]; 1049 } 1050 1051 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { 1052 if (crp->crp_flags & CRYPTO_F_IMBUF) 1053 m_copyback(q->q_src_m, 1054 enccrd->crd_inject, 1055 8, (void *)ctx.pc_iv); 1056 else if (crp->crp_flags & CRYPTO_F_IOV) 1057 cuio_copyback(q->q_src_io, 1058 enccrd->crd_inject, 1059 8, (void *)ctx.pc_iv); 1060 } 1061 } else { 1062 ctx.pc_flags |= htole16(UBS_PKTCTX_INBOUND); 1063 1064 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 1065 memcpy(ctx.pc_iv, enccrd->crd_iv, 8); 1066 else if (crp->crp_flags & CRYPTO_F_IMBUF) 1067 m_copydata(q->q_src_m, enccrd->crd_inject, 1068 8, (void *)ctx.pc_iv); 1069 else if (crp->crp_flags & CRYPTO_F_IOV) 1070 cuio_copydata(q->q_src_io, 1071 enccrd->crd_inject, 8, 1072 (void *)ctx.pc_iv); 1073 } 1074 1075 ctx.pc_deskey[0] = ses->ses_deskey[0]; 1076 ctx.pc_deskey[1] = ses->ses_deskey[1]; 1077 ctx.pc_deskey[2] = ses->ses_deskey[2]; 1078 ctx.pc_deskey[3] = ses->ses_deskey[3]; 1079 ctx.pc_deskey[4] = ses->ses_deskey[4]; 1080 ctx.pc_deskey[5] = ses->ses_deskey[5]; 1081 SWAP32(ctx.pc_iv[0]); 1082 SWAP32(ctx.pc_iv[1]); 1083 } 1084 1085 if (maccrd) { 1086 macoffset = maccrd->crd_skip; 1087 1088 if (maccrd->crd_alg == CRYPTO_MD5_HMAC_96) 1089 ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_MD5); 1090 else 1091 ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_SHA1); 1092 1093 for (i = 0; i < 5; i++) { 1094 ctx.pc_hminner[i] = ses->ses_hminner[i]; 1095 ctx.pc_hmouter[i] = ses->ses_hmouter[i]; 1096 1097 HTOLE32(ctx.pc_hminner[i]); 1098 HTOLE32(ctx.pc_hmouter[i]); 1099 } 1100 } 1101 1102 if (enccrd && maccrd) { 1103 /* 1104 * ubsec cannot handle packets where the end of encryption 1105 * and authentication are not the same, or where the 1106 * encrypted part begins before the authenticated part. 1107 */ 1108 if ((encoffset + enccrd->crd_len) != 1109 (macoffset + maccrd->crd_len)) { 1110 ubsecstats.hst_lenmismatch++; 1111 err = EINVAL; 1112 goto errout; 1113 } 1114 if (enccrd->crd_skip < maccrd->crd_skip) { 1115 ubsecstats.hst_skipmismatch++; 1116 err = EINVAL; 1117 goto errout; 1118 } 1119 sskip = maccrd->crd_skip; 1120 cpskip = dskip = enccrd->crd_skip; 1121 stheend = maccrd->crd_len; 1122 dtheend = enccrd->crd_len; 1123 coffset = enccrd->crd_skip - maccrd->crd_skip; 1124 cpoffset = cpskip + dtheend; 1125 #ifdef UBSEC_DEBUG 1126 if (ubsec_debug) { 1127 printf("mac: skip %d, len %d, inject %d\n", 1128 maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject); 1129 printf("enc: skip %d, len %d, inject %d\n", 1130 enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject); 1131 printf("src: skip %d, len %d\n", sskip, stheend); 1132 printf("dst: skip %d, len %d\n", dskip, dtheend); 1133 printf("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n", 1134 coffset, stheend, cpskip, cpoffset); 1135 } 1136 #endif 1137 } else { 1138 cpskip = dskip = sskip = macoffset + encoffset; 1139 dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len; 1140 cpoffset = cpskip + dtheend; 1141 coffset = 0; 1142 } 1143 ctx.pc_offset = htole16(coffset >> 2); 1144 1145 /* XXX FIXME: jonathan asks, what the heck's that 0xfff0? */ 1146 if (bus_dmamap_create(sc->sc_dmat, 0xfff0, UBS_MAX_SCATTER, 1147 0xfff0, 0, BUS_DMA_NOWAIT, &q->q_src_map) != 0) { 1148 err = ENOMEM; 1149 goto errout; 1150 } 1151 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1152 if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map, 1153 q->q_src_m, BUS_DMA_NOWAIT) != 0) { 1154 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1155 q->q_src_map = NULL; 1156 ubsecstats.hst_noload++; 1157 err = ENOMEM; 1158 goto errout; 1159 } 1160 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1161 if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map, 1162 q->q_src_io, BUS_DMA_NOWAIT) != 0) { 1163 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1164 q->q_src_map = NULL; 1165 ubsecstats.hst_noload++; 1166 err = ENOMEM; 1167 goto errout; 1168 } 1169 } 1170 nicealign = ubsec_dmamap_aligned(q->q_src_map); 1171 1172 dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend); 1173 1174 #ifdef UBSEC_DEBUG 1175 if (ubsec_debug) 1176 printf("src skip: %d nicealign: %u\n", sskip, nicealign); 1177 #endif 1178 for (i = j = 0; i < q->q_src_map->dm_nsegs; i++) { 1179 struct ubsec_pktbuf *pb; 1180 bus_size_t packl = q->q_src_map->dm_segs[i].ds_len; 1181 bus_addr_t packp = q->q_src_map->dm_segs[i].ds_addr; 1182 1183 if (sskip >= packl) { 1184 sskip -= packl; 1185 continue; 1186 } 1187 1188 packl -= sskip; 1189 packp += sskip; 1190 sskip = 0; 1191 1192 if (packl > 0xfffc) { 1193 err = EIO; 1194 goto errout; 1195 } 1196 1197 if (j == 0) 1198 pb = &dmap->d_dma->d_mcr.mcr_ipktbuf; 1199 else 1200 pb = &dmap->d_dma->d_sbuf[j - 1]; 1201 1202 pb->pb_addr = htole32(packp); 1203 1204 if (stheend) { 1205 if (packl > stheend) { 1206 pb->pb_len = htole32(stheend); 1207 stheend = 0; 1208 } else { 1209 pb->pb_len = htole32(packl); 1210 stheend -= packl; 1211 } 1212 } else 1213 pb->pb_len = htole32(packl); 1214 1215 if ((i + 1) == q->q_src_map->dm_nsegs) 1216 pb->pb_next = 0; 1217 else 1218 pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1219 offsetof(struct ubsec_dmachunk, d_sbuf[j])); 1220 j++; 1221 } 1222 1223 if (enccrd == NULL && maccrd != NULL) { 1224 dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0; 1225 dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0; 1226 dmap->d_dma->d_mcr.mcr_opktbuf.pb_next = htole32(dmap->d_alloc.dma_paddr + 1227 offsetof(struct ubsec_dmachunk, d_macbuf[0])); 1228 #ifdef UBSEC_DEBUG 1229 if (ubsec_debug) 1230 printf("opkt: %x %x %x\n", 1231 dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr, 1232 dmap->d_dma->d_mcr.mcr_opktbuf.pb_len, 1233 dmap->d_dma->d_mcr.mcr_opktbuf.pb_next); 1234 1235 #endif 1236 } else { 1237 if (crp->crp_flags & CRYPTO_F_IOV) { 1238 if (!nicealign) { 1239 ubsecstats.hst_iovmisaligned++; 1240 err = EINVAL; 1241 goto errout; 1242 } 1243 /* XXX: ``what the heck's that'' 0xfff0? */ 1244 if (bus_dmamap_create(sc->sc_dmat, 0xfff0, 1245 UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT, 1246 &q->q_dst_map) != 0) { 1247 ubsecstats.hst_nomap++; 1248 err = ENOMEM; 1249 goto errout; 1250 } 1251 if (bus_dmamap_load_uio(sc->sc_dmat, q->q_dst_map, 1252 q->q_dst_io, BUS_DMA_NOWAIT) != 0) { 1253 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); 1254 q->q_dst_map = NULL; 1255 ubsecstats.hst_noload++; 1256 err = ENOMEM; 1257 goto errout; 1258 } 1259 } else if (crp->crp_flags & CRYPTO_F_IMBUF) { 1260 if (nicealign) { 1261 q->q_dst_m = q->q_src_m; 1262 q->q_dst_map = q->q_src_map; 1263 } else { 1264 int totlen, len; 1265 struct mbuf *m, *top, **mp; 1266 1267 ubsecstats.hst_unaligned++; 1268 totlen = q->q_src_map->dm_mapsize; 1269 if (q->q_src_m->m_flags & M_PKTHDR) { 1270 len = MHLEN; 1271 MGETHDR(m, M_DONTWAIT, MT_DATA); 1272 /*XXX FIXME: m_dup_pkthdr */ 1273 if (m && 1 /*!m_dup_pkthdr(m, q->q_src_m, M_DONTWAIT)*/) { 1274 m_free(m); 1275 m = NULL; 1276 } 1277 } else { 1278 len = MLEN; 1279 MGET(m, M_DONTWAIT, MT_DATA); 1280 } 1281 if (m == NULL) { 1282 ubsecstats.hst_nombuf++; 1283 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1284 goto errout; 1285 } 1286 if (len == MHLEN) 1287 /*XXX was M_DUP_PKTHDR*/ 1288 M_COPY_PKTHDR(m, q->q_src_m); 1289 if (totlen >= MINCLSIZE) { 1290 MCLGET(m, M_DONTWAIT); 1291 if ((m->m_flags & M_EXT) == 0) { 1292 m_free(m); 1293 ubsecstats.hst_nomcl++; 1294 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1295 goto errout; 1296 } 1297 len = MCLBYTES; 1298 } 1299 m->m_len = len; 1300 top = NULL; 1301 mp = ⊤ 1302 1303 while (totlen > 0) { 1304 if (top) { 1305 MGET(m, M_DONTWAIT, MT_DATA); 1306 if (m == NULL) { 1307 m_freem(top); 1308 ubsecstats.hst_nombuf++; 1309 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1310 goto errout; 1311 } 1312 len = MLEN; 1313 } 1314 if (top && totlen >= MINCLSIZE) { 1315 MCLGET(m, M_DONTWAIT); 1316 if ((m->m_flags & M_EXT) == 0) { 1317 *mp = m; 1318 m_freem(top); 1319 ubsecstats.hst_nomcl++; 1320 err = sc->sc_nqueue ? ERESTART : ENOMEM; 1321 goto errout; 1322 } 1323 len = MCLBYTES; 1324 } 1325 m->m_len = len = min(totlen, len); 1326 totlen -= len; 1327 *mp = m; 1328 mp = &m->m_next; 1329 } 1330 q->q_dst_m = top; 1331 ubsec_mcopy(q->q_src_m, q->q_dst_m, 1332 cpskip, cpoffset); 1333 /* XXX again, what the heck is that 0xfff0? */ 1334 if (bus_dmamap_create(sc->sc_dmat, 0xfff0, 1335 UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT, 1336 &q->q_dst_map) != 0) { 1337 ubsecstats.hst_nomap++; 1338 err = ENOMEM; 1339 goto errout; 1340 } 1341 if (bus_dmamap_load_mbuf(sc->sc_dmat, 1342 q->q_dst_map, q->q_dst_m, 1343 BUS_DMA_NOWAIT) != 0) { 1344 bus_dmamap_destroy(sc->sc_dmat, 1345 q->q_dst_map); 1346 q->q_dst_map = NULL; 1347 ubsecstats.hst_noload++; 1348 err = ENOMEM; 1349 goto errout; 1350 } 1351 } 1352 } else { 1353 ubsecstats.hst_badflags++; 1354 err = EINVAL; 1355 goto errout; 1356 } 1357 1358 #ifdef UBSEC_DEBUG 1359 if (ubsec_debug) 1360 printf("dst skip: %d\n", dskip); 1361 #endif 1362 for (i = j = 0; i < q->q_dst_map->dm_nsegs; i++) { 1363 struct ubsec_pktbuf *pb; 1364 bus_size_t packl = q->q_dst_map->dm_segs[i].ds_len; 1365 bus_addr_t packp = q->q_dst_map->dm_segs[i].ds_addr; 1366 1367 if (dskip >= packl) { 1368 dskip -= packl; 1369 continue; 1370 } 1371 1372 packl -= dskip; 1373 packp += dskip; 1374 dskip = 0; 1375 1376 if (packl > 0xfffc) { 1377 err = EIO; 1378 goto errout; 1379 } 1380 1381 if (j == 0) 1382 pb = &dmap->d_dma->d_mcr.mcr_opktbuf; 1383 else 1384 pb = &dmap->d_dma->d_dbuf[j - 1]; 1385 1386 pb->pb_addr = htole32(packp); 1387 1388 if (dtheend) { 1389 if (packl > dtheend) { 1390 pb->pb_len = htole32(dtheend); 1391 dtheend = 0; 1392 } else { 1393 pb->pb_len = htole32(packl); 1394 dtheend -= packl; 1395 } 1396 } else 1397 pb->pb_len = htole32(packl); 1398 1399 if ((i + 1) == q->q_dst_map->dm_nsegs) { 1400 if (maccrd) 1401 pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1402 offsetof(struct ubsec_dmachunk, d_macbuf[0])); 1403 else 1404 pb->pb_next = 0; 1405 } else 1406 pb->pb_next = htole32(dmap->d_alloc.dma_paddr + 1407 offsetof(struct ubsec_dmachunk, d_dbuf[j])); 1408 j++; 1409 } 1410 } 1411 1412 dmap->d_dma->d_mcr.mcr_cmdctxp = htole32(dmap->d_alloc.dma_paddr + 1413 offsetof(struct ubsec_dmachunk, d_ctx)); 1414 1415 if (sc->sc_flags & UBS_FLAGS_LONGCTX) { 1416 struct ubsec_pktctx_long *ctxl; 1417 1418 ctxl = (struct ubsec_pktctx_long *)((char *)dmap->d_alloc.dma_vaddr + 1419 offsetof(struct ubsec_dmachunk, d_ctx)); 1420 1421 /* transform small context into long context */ 1422 ctxl->pc_len = htole16(sizeof(struct ubsec_pktctx_long)); 1423 ctxl->pc_type = htole16(UBS_PKTCTX_TYPE_IPSEC); 1424 ctxl->pc_flags = ctx.pc_flags; 1425 ctxl->pc_offset = ctx.pc_offset; 1426 for (i = 0; i < 6; i++) 1427 ctxl->pc_deskey[i] = ctx.pc_deskey[i]; 1428 for (i = 0; i < 5; i++) 1429 ctxl->pc_hminner[i] = ctx.pc_hminner[i]; 1430 for (i = 0; i < 5; i++) 1431 ctxl->pc_hmouter[i] = ctx.pc_hmouter[i]; 1432 ctxl->pc_iv[0] = ctx.pc_iv[0]; 1433 ctxl->pc_iv[1] = ctx.pc_iv[1]; 1434 } else 1435 memcpy((char *)dmap->d_alloc.dma_vaddr + 1436 offsetof(struct ubsec_dmachunk, d_ctx), &ctx, 1437 sizeof(struct ubsec_pktctx)); 1438 1439 s = splnet(); 1440 SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next); 1441 sc->sc_nqueue++; 1442 ubsecstats.hst_ipackets++; 1443 ubsecstats.hst_ibytes += dmap->d_alloc.dma_map->dm_mapsize; 1444 if ((hint & CRYPTO_HINT_MORE) == 0 || sc->sc_nqueue >= ubsec_maxbatch) 1445 ubsec_feed(sc); 1446 splx(s); 1447 return (0); 1448 1449 errout: 1450 if (q != NULL) { 1451 if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) 1452 m_freem(q->q_dst_m); 1453 1454 if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { 1455 bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); 1456 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); 1457 } 1458 if (q->q_src_map != NULL) { 1459 bus_dmamap_unload(sc->sc_dmat, q->q_src_map); 1460 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1461 } 1462 1463 s = splnet(); 1464 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 1465 splx(s); 1466 } 1467 #if 0 /* jonathan says: this openbsd code seems to be subsumed elsewhere */ 1468 if (err == EINVAL) 1469 ubsecstats.hst_invalid++; 1470 else 1471 ubsecstats.hst_nomem++; 1472 #endif 1473 if (err != ERESTART) { 1474 crp->crp_etype = err; 1475 crypto_done(crp); 1476 } else { 1477 sc->sc_needwakeup |= CRYPTO_SYMQ; 1478 } 1479 return (err); 1480 } 1481 1482 static void 1483 ubsec_callback(struct ubsec_softc *sc, struct ubsec_q *q) 1484 { 1485 struct cryptop *crp = (struct cryptop *)q->q_crp; 1486 struct cryptodesc *crd; 1487 struct ubsec_dma *dmap = q->q_dma; 1488 1489 ubsecstats.hst_opackets++; 1490 ubsecstats.hst_obytes += dmap->d_alloc.dma_size; 1491 1492 bus_dmamap_sync(sc->sc_dmat, dmap->d_alloc.dma_map, 0, 1493 dmap->d_alloc.dma_map->dm_mapsize, 1494 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1495 if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { 1496 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, 1497 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1498 bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); 1499 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map); 1500 } 1501 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, 1502 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1503 bus_dmamap_unload(sc->sc_dmat, q->q_src_map); 1504 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); 1505 1506 if ((crp->crp_flags & CRYPTO_F_IMBUF) && (q->q_src_m != q->q_dst_m)) { 1507 m_freem(q->q_src_m); 1508 crp->crp_buf = (void *)q->q_dst_m; 1509 } 1510 1511 /* copy out IV for future use */ 1512 if (q->q_flags & UBSEC_QFLAGS_COPYOUTIV) { 1513 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1514 if (crd->crd_alg != CRYPTO_DES_CBC && 1515 crd->crd_alg != CRYPTO_3DES_CBC) 1516 continue; 1517 if (crp->crp_flags & CRYPTO_F_IMBUF) 1518 m_copydata((struct mbuf *)crp->crp_buf, 1519 crd->crd_skip + crd->crd_len - 8, 8, 1520 (void *)sc->sc_sessions[q->q_sesn].ses_iv); 1521 else if (crp->crp_flags & CRYPTO_F_IOV) { 1522 cuio_copydata((struct uio *)crp->crp_buf, 1523 crd->crd_skip + crd->crd_len - 8, 8, 1524 (void *)sc->sc_sessions[q->q_sesn].ses_iv); 1525 } 1526 break; 1527 } 1528 } 1529 1530 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1531 if (crd->crd_alg != CRYPTO_MD5_HMAC_96 && 1532 crd->crd_alg != CRYPTO_SHA1_HMAC_96) 1533 continue; 1534 if (crp->crp_flags & CRYPTO_F_IMBUF) 1535 m_copyback((struct mbuf *)crp->crp_buf, 1536 crd->crd_inject, 12, 1537 (void *)dmap->d_dma->d_macbuf); 1538 else if (crp->crp_flags & CRYPTO_F_IOV && crp->crp_mac) 1539 bcopy((void *)dmap->d_dma->d_macbuf, 1540 crp->crp_mac, 12); 1541 break; 1542 } 1543 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 1544 crypto_done(crp); 1545 } 1546 1547 static void 1548 ubsec_mcopy(struct mbuf *srcm, struct mbuf *dstm, int hoffset, int toffset) 1549 { 1550 int i, j, dlen, slen; 1551 char *dptr, *sptr; 1552 1553 j = 0; 1554 sptr = srcm->m_data; 1555 slen = srcm->m_len; 1556 dptr = dstm->m_data; 1557 dlen = dstm->m_len; 1558 1559 while (1) { 1560 for (i = 0; i < min(slen, dlen); i++) { 1561 if (j < hoffset || j >= toffset) 1562 *dptr++ = *sptr++; 1563 slen--; 1564 dlen--; 1565 j++; 1566 } 1567 if (slen == 0) { 1568 srcm = srcm->m_next; 1569 if (srcm == NULL) 1570 return; 1571 sptr = srcm->m_data; 1572 slen = srcm->m_len; 1573 } 1574 if (dlen == 0) { 1575 dstm = dstm->m_next; 1576 if (dstm == NULL) 1577 return; 1578 dptr = dstm->m_data; 1579 dlen = dstm->m_len; 1580 } 1581 } 1582 } 1583 1584 /* 1585 * feed the key generator, must be called at splnet() or higher. 1586 */ 1587 static void 1588 ubsec_feed2(struct ubsec_softc *sc) 1589 { 1590 struct ubsec_q2 *q; 1591 1592 while (!SIMPLEQ_EMPTY(&sc->sc_queue2)) { 1593 if (READ_REG(sc, BS_STAT) & BS_STAT_MCR2_FULL) 1594 break; 1595 q = SIMPLEQ_FIRST(&sc->sc_queue2); 1596 1597 bus_dmamap_sync(sc->sc_dmat, q->q_mcr.dma_map, 0, 1598 q->q_mcr.dma_map->dm_mapsize, 1599 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1600 bus_dmamap_sync(sc->sc_dmat, q->q_ctx.dma_map, 0, 1601 q->q_ctx.dma_map->dm_mapsize, 1602 BUS_DMASYNC_PREWRITE); 1603 1604 WRITE_REG(sc, BS_MCR2, q->q_mcr.dma_paddr); 1605 q = SIMPLEQ_FIRST(&sc->sc_queue2); 1606 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue2, /*q,*/ q_next); 1607 --sc->sc_nqueue2; 1608 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip2, q, q_next); 1609 } 1610 } 1611 1612 /* 1613 * Callback for handling random numbers 1614 */ 1615 static void 1616 ubsec_callback2(struct ubsec_softc *sc, struct ubsec_q2 *q) 1617 { 1618 struct cryptkop *krp; 1619 struct ubsec_ctx_keyop *ctx; 1620 1621 ctx = (struct ubsec_ctx_keyop *)q->q_ctx.dma_vaddr; 1622 bus_dmamap_sync(sc->sc_dmat, q->q_ctx.dma_map, 0, 1623 q->q_ctx.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1624 1625 switch (q->q_type) { 1626 #ifndef UBSEC_NO_RNG 1627 case UBS_CTXOP_RNGSHA1: 1628 case UBS_CTXOP_RNGBYPASS: { 1629 struct ubsec_q2_rng *rng = (struct ubsec_q2_rng *)q; 1630 u_int32_t *p; 1631 int i; 1632 1633 bus_dmamap_sync(sc->sc_dmat, rng->rng_buf.dma_map, 0, 1634 rng->rng_buf.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1635 p = (u_int32_t *)rng->rng_buf.dma_vaddr; 1636 #ifndef __NetBSD__ 1637 for (i = 0; i < UBSEC_RNG_BUFSIZ; p++, i++) 1638 add_true_randomness(letoh32(*p)); 1639 rng->rng_used = 0; 1640 #else 1641 /* XXX NetBSD rnd subsystem too weak */ 1642 i = 0; (void)i; /* shut off gcc warnings */ 1643 #endif 1644 #ifdef __OpenBSD__ 1645 timeout_add(&sc->sc_rngto, sc->sc_rnghz); 1646 #else 1647 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); 1648 #endif 1649 break; 1650 } 1651 #endif 1652 case UBS_CTXOP_MODEXP: { 1653 struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; 1654 u_int rlen, clen; 1655 1656 krp = me->me_krp; 1657 rlen = (me->me_modbits + 7) / 8; 1658 clen = (krp->krp_param[krp->krp_iparams].crp_nbits + 7) / 8; 1659 1660 bus_dmamap_sync(sc->sc_dmat, me->me_M.dma_map, 1661 0, me->me_M.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1662 bus_dmamap_sync(sc->sc_dmat, me->me_E.dma_map, 1663 0, me->me_E.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1664 bus_dmamap_sync(sc->sc_dmat, me->me_C.dma_map, 1665 0, me->me_C.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1666 bus_dmamap_sync(sc->sc_dmat, me->me_epb.dma_map, 1667 0, me->me_epb.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1668 1669 if (clen < rlen) 1670 krp->krp_status = E2BIG; 1671 else { 1672 if (sc->sc_flags & UBS_FLAGS_HWNORM) { 1673 memset(krp->krp_param[krp->krp_iparams].crp_p, 0, 1674 (krp->krp_param[krp->krp_iparams].crp_nbits 1675 + 7) / 8); 1676 bcopy(me->me_C.dma_vaddr, 1677 krp->krp_param[krp->krp_iparams].crp_p, 1678 (me->me_modbits + 7) / 8); 1679 } else 1680 ubsec_kshift_l(me->me_shiftbits, 1681 me->me_C.dma_vaddr, me->me_normbits, 1682 krp->krp_param[krp->krp_iparams].crp_p, 1683 krp->krp_param[krp->krp_iparams].crp_nbits); 1684 } 1685 1686 crypto_kdone(krp); 1687 1688 /* bzero all potentially sensitive data */ 1689 memset(me->me_E.dma_vaddr, 0, me->me_E.dma_size); 1690 memset(me->me_M.dma_vaddr, 0, me->me_M.dma_size); 1691 memset(me->me_C.dma_vaddr, 0, me->me_C.dma_size); 1692 memset(me->me_q.q_ctx.dma_vaddr, 0, me->me_q.q_ctx.dma_size); 1693 1694 /* Can't free here, so put us on the free list. */ 1695 SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &me->me_q, q_next); 1696 break; 1697 } 1698 case UBS_CTXOP_RSAPRIV: { 1699 struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; 1700 u_int len; 1701 1702 krp = rp->rpr_krp; 1703 bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgin.dma_map, 0, 1704 rp->rpr_msgin.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1705 bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgout.dma_map, 0, 1706 rp->rpr_msgout.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1707 1708 len = (krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_nbits + 7) / 8; 1709 bcopy(rp->rpr_msgout.dma_vaddr, 1710 krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_p, len); 1711 1712 crypto_kdone(krp); 1713 1714 memset(rp->rpr_msgin.dma_vaddr, 0, rp->rpr_msgin.dma_size); 1715 memset(rp->rpr_msgout.dma_vaddr, 0, rp->rpr_msgout.dma_size); 1716 memset(rp->rpr_q.q_ctx.dma_vaddr, 0, rp->rpr_q.q_ctx.dma_size); 1717 1718 /* Can't free here, so put us on the free list. */ 1719 SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &rp->rpr_q, q_next); 1720 break; 1721 } 1722 default: 1723 printf("%s: unknown ctx op: %x\n", device_xname(sc->sc_dev), 1724 letoh16(ctx->ctx_op)); 1725 break; 1726 } 1727 } 1728 1729 #ifndef UBSEC_NO_RNG 1730 static void 1731 ubsec_rng(void *vsc) 1732 { 1733 struct ubsec_softc *sc = vsc; 1734 struct ubsec_q2_rng *rng = &sc->sc_rng; 1735 struct ubsec_mcr *mcr; 1736 struct ubsec_ctx_rngbypass *ctx; 1737 int s; 1738 1739 s = splnet(); 1740 if (rng->rng_used) { 1741 splx(s); 1742 return; 1743 } 1744 sc->sc_nqueue2++; 1745 if (sc->sc_nqueue2 >= UBS_MAX_NQUEUE) 1746 goto out; 1747 1748 mcr = (struct ubsec_mcr *)rng->rng_q.q_mcr.dma_vaddr; 1749 ctx = (struct ubsec_ctx_rngbypass *)rng->rng_q.q_ctx.dma_vaddr; 1750 1751 mcr->mcr_pkts = htole16(1); 1752 mcr->mcr_flags = 0; 1753 mcr->mcr_cmdctxp = htole32(rng->rng_q.q_ctx.dma_paddr); 1754 mcr->mcr_ipktbuf.pb_addr = mcr->mcr_ipktbuf.pb_next = 0; 1755 mcr->mcr_ipktbuf.pb_len = 0; 1756 mcr->mcr_reserved = mcr->mcr_pktlen = 0; 1757 mcr->mcr_opktbuf.pb_addr = htole32(rng->rng_buf.dma_paddr); 1758 mcr->mcr_opktbuf.pb_len = htole32(((sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ)) & 1759 UBS_PKTBUF_LEN); 1760 mcr->mcr_opktbuf.pb_next = 0; 1761 1762 ctx->rbp_len = htole16(sizeof(struct ubsec_ctx_rngbypass)); 1763 ctx->rbp_op = htole16(UBS_CTXOP_RNGSHA1); 1764 rng->rng_q.q_type = UBS_CTXOP_RNGSHA1; 1765 1766 bus_dmamap_sync(sc->sc_dmat, rng->rng_buf.dma_map, 0, 1767 rng->rng_buf.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1768 1769 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rng->rng_q, q_next); 1770 rng->rng_used = 1; 1771 ubsec_feed2(sc); 1772 ubsecstats.hst_rng++; 1773 splx(s); 1774 1775 return; 1776 1777 out: 1778 /* 1779 * Something weird happened, generate our own call back. 1780 */ 1781 sc->sc_nqueue2--; 1782 splx(s); 1783 #ifdef __OpenBSD__ 1784 timeout_add(&sc->sc_rngto, sc->sc_rnghz); 1785 #else 1786 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc); 1787 #endif 1788 } 1789 #endif /* UBSEC_NO_RNG */ 1790 1791 static int 1792 ubsec_dma_malloc(struct ubsec_softc *sc, bus_size_t size, 1793 struct ubsec_dma_alloc *dma,int mapflags) 1794 { 1795 int r; 1796 1797 if ((r = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, 1798 &dma->dma_seg, 1, &dma->dma_nseg, BUS_DMA_NOWAIT)) != 0) 1799 goto fail_0; 1800 1801 if ((r = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg, 1802 size, &dma->dma_vaddr, mapflags | BUS_DMA_NOWAIT)) != 0) 1803 goto fail_1; 1804 1805 if ((r = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1806 BUS_DMA_NOWAIT, &dma->dma_map)) != 0) 1807 goto fail_2; 1808 1809 if ((r = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr, 1810 size, NULL, BUS_DMA_NOWAIT)) != 0) 1811 goto fail_3; 1812 1813 dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr; 1814 dma->dma_size = size; 1815 return (0); 1816 1817 fail_3: 1818 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map); 1819 fail_2: 1820 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, size); 1821 fail_1: 1822 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg); 1823 fail_0: 1824 dma->dma_map = NULL; 1825 return (r); 1826 } 1827 1828 static void 1829 ubsec_dma_free(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma) 1830 { 1831 bus_dmamap_unload(sc->sc_dmat, dma->dma_map); 1832 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_size); 1833 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg); 1834 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map); 1835 } 1836 1837 /* 1838 * Resets the board. Values in the regesters are left as is 1839 * from the reset (i.e. initial values are assigned elsewhere). 1840 */ 1841 static void 1842 ubsec_reset_board(struct ubsec_softc *sc) 1843 { 1844 volatile u_int32_t ctrl; 1845 1846 ctrl = READ_REG(sc, BS_CTRL); 1847 ctrl |= BS_CTRL_RESET; 1848 WRITE_REG(sc, BS_CTRL, ctrl); 1849 1850 /* 1851 * Wait aprox. 30 PCI clocks = 900 ns = 0.9 us 1852 */ 1853 DELAY(10); 1854 } 1855 1856 /* 1857 * Init Broadcom registers 1858 */ 1859 static void 1860 ubsec_init_board(struct ubsec_softc *sc) 1861 { 1862 u_int32_t ctrl; 1863 1864 ctrl = READ_REG(sc, BS_CTRL); 1865 ctrl &= ~(BS_CTRL_BE32 | BS_CTRL_BE64); 1866 ctrl |= BS_CTRL_LITTLE_ENDIAN | BS_CTRL_MCR1INT; 1867 1868 /* 1869 * XXX: Sam Leffler's code has (UBS_FLAGS_KEY|UBS_FLAGS_RNG)). 1870 * anyone got hw docs? 1871 */ 1872 if (sc->sc_flags & UBS_FLAGS_KEY) 1873 ctrl |= BS_CTRL_MCR2INT; 1874 else 1875 ctrl &= ~BS_CTRL_MCR2INT; 1876 1877 if (sc->sc_flags & UBS_FLAGS_HWNORM) 1878 ctrl &= ~BS_CTRL_SWNORM; 1879 1880 WRITE_REG(sc, BS_CTRL, ctrl); 1881 } 1882 1883 /* 1884 * Init Broadcom PCI registers 1885 */ 1886 static void 1887 ubsec_init_pciregs(struct pci_attach_args *pa) 1888 { 1889 pci_chipset_tag_t pc = pa->pa_pc; 1890 u_int32_t misc; 1891 1892 /* 1893 * This will set the cache line size to 1, this will 1894 * force the BCM58xx chip just to do burst read/writes. 1895 * Cache line read/writes are to slow 1896 */ 1897 misc = pci_conf_read(pc, pa->pa_tag, PCI_BHLC_REG); 1898 misc = (misc & ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT)) 1899 | ((UBS_DEF_CACHELINE & 0xff) << PCI_CACHELINE_SHIFT); 1900 pci_conf_write(pc, pa->pa_tag, PCI_BHLC_REG, misc); 1901 } 1902 1903 /* 1904 * Clean up after a chip crash. 1905 * It is assumed that the caller in splnet() 1906 */ 1907 static void 1908 ubsec_cleanchip(struct ubsec_softc *sc) 1909 { 1910 struct ubsec_q *q; 1911 1912 while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { 1913 q = SIMPLEQ_FIRST(&sc->sc_qchip); 1914 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, /*q,*/ q_next); 1915 ubsec_free_q(sc, q); 1916 } 1917 sc->sc_nqchip = 0; 1918 } 1919 1920 /* 1921 * free a ubsec_q 1922 * It is assumed that the caller is within splnet() 1923 */ 1924 static int 1925 ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q) 1926 { 1927 struct ubsec_q *q2; 1928 struct cryptop *crp; 1929 int npkts; 1930 int i; 1931 1932 npkts = q->q_nstacked_mcrs; 1933 1934 for (i = 0; i < npkts; i++) { 1935 if(q->q_stacked_mcr[i]) { 1936 q2 = q->q_stacked_mcr[i]; 1937 1938 if ((q2->q_dst_m != NULL) && (q2->q_src_m != q2->q_dst_m)) 1939 m_freem(q2->q_dst_m); 1940 1941 crp = (struct cryptop *)q2->q_crp; 1942 1943 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q2, q_next); 1944 1945 crp->crp_etype = EFAULT; 1946 crypto_done(crp); 1947 } else { 1948 break; 1949 } 1950 } 1951 1952 /* 1953 * Free header MCR 1954 */ 1955 if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) 1956 m_freem(q->q_dst_m); 1957 1958 crp = (struct cryptop *)q->q_crp; 1959 1960 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); 1961 1962 crp->crp_etype = EFAULT; 1963 crypto_done(crp); 1964 return(0); 1965 } 1966 1967 /* 1968 * Routine to reset the chip and clean up. 1969 * It is assumed that the caller is in splnet() 1970 */ 1971 static void 1972 ubsec_totalreset(struct ubsec_softc *sc) 1973 { 1974 ubsec_reset_board(sc); 1975 ubsec_init_board(sc); 1976 ubsec_cleanchip(sc); 1977 } 1978 1979 static int 1980 ubsec_dmamap_aligned(bus_dmamap_t map) 1981 { 1982 int i; 1983 1984 for (i = 0; i < map->dm_nsegs; i++) { 1985 if (map->dm_segs[i].ds_addr & 3) 1986 return (0); 1987 if ((i != (map->dm_nsegs - 1)) && 1988 (map->dm_segs[i].ds_len & 3)) 1989 return (0); 1990 } 1991 return (1); 1992 } 1993 1994 #ifdef __OpenBSD__ 1995 struct ubsec_softc * 1996 ubsec_kfind(struct cryptkop *krp) 1997 { 1998 struct ubsec_softc *sc; 1999 int i; 2000 2001 for (i = 0; i < ubsec_cd.cd_ndevs; i++) { 2002 sc = ubsec_cd.cd_devs[i]; 2003 if (sc == NULL) 2004 continue; 2005 if (sc->sc_cid == krp->krp_hid) 2006 return (sc); 2007 } 2008 return (NULL); 2009 } 2010 #endif 2011 2012 static void 2013 ubsec_kfree(struct ubsec_softc *sc, struct ubsec_q2 *q) 2014 { 2015 switch (q->q_type) { 2016 case UBS_CTXOP_MODEXP: { 2017 struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; 2018 2019 ubsec_dma_free(sc, &me->me_q.q_mcr); 2020 ubsec_dma_free(sc, &me->me_q.q_ctx); 2021 ubsec_dma_free(sc, &me->me_M); 2022 ubsec_dma_free(sc, &me->me_E); 2023 ubsec_dma_free(sc, &me->me_C); 2024 ubsec_dma_free(sc, &me->me_epb); 2025 free(me, M_DEVBUF); 2026 break; 2027 } 2028 case UBS_CTXOP_RSAPRIV: { 2029 struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; 2030 2031 ubsec_dma_free(sc, &rp->rpr_q.q_mcr); 2032 ubsec_dma_free(sc, &rp->rpr_q.q_ctx); 2033 ubsec_dma_free(sc, &rp->rpr_msgin); 2034 ubsec_dma_free(sc, &rp->rpr_msgout); 2035 free(rp, M_DEVBUF); 2036 break; 2037 } 2038 default: 2039 printf("%s: invalid kfree 0x%x\n", device_xname(sc->sc_dev), 2040 q->q_type); 2041 break; 2042 } 2043 } 2044 2045 static int 2046 ubsec_kprocess(void *arg, struct cryptkop *krp, int hint) 2047 { 2048 struct ubsec_softc *sc; 2049 int r; 2050 2051 if (krp == NULL || krp->krp_callback == NULL) 2052 return (EINVAL); 2053 #ifdef __OpenBSD__ 2054 if ((sc = ubsec_kfind(krp)) == NULL) 2055 return (EINVAL); 2056 #else 2057 sc = arg; 2058 KASSERT(sc != NULL /*, ("ubsec_kprocess: null softc")*/); 2059 #endif 2060 2061 while (!SIMPLEQ_EMPTY(&sc->sc_q2free)) { 2062 struct ubsec_q2 *q; 2063 2064 q = SIMPLEQ_FIRST(&sc->sc_q2free); 2065 SIMPLEQ_REMOVE_HEAD(&sc->sc_q2free, /*q,*/ q_next); 2066 ubsec_kfree(sc, q); 2067 } 2068 2069 switch (krp->krp_op) { 2070 case CRK_MOD_EXP: 2071 if (sc->sc_flags & UBS_FLAGS_HWNORM) 2072 r = ubsec_kprocess_modexp_hw(sc, krp, hint); 2073 else 2074 r = ubsec_kprocess_modexp_sw(sc, krp, hint); 2075 break; 2076 case CRK_MOD_EXP_CRT: 2077 r = ubsec_kprocess_rsapriv(sc, krp, hint); 2078 break; 2079 default: 2080 printf("%s: kprocess: invalid op 0x%x\n", 2081 device_xname(sc->sc_dev), krp->krp_op); 2082 krp->krp_status = EOPNOTSUPP; 2083 crypto_kdone(krp); 2084 r = 0; 2085 } 2086 return (r); 2087 } 2088 2089 /* 2090 * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (sw normalization) 2091 */ 2092 static int 2093 ubsec_kprocess_modexp_sw(struct ubsec_softc *sc, struct cryptkop *krp, 2094 int hint) 2095 { 2096 struct ubsec_q2_modexp *me; 2097 struct ubsec_mcr *mcr; 2098 struct ubsec_ctx_modexp *ctx; 2099 struct ubsec_pktbuf *epb; 2100 int s, err = 0; 2101 u_int nbits, normbits, mbits, shiftbits, ebits; 2102 2103 me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT); 2104 if (me == NULL) { 2105 err = ENOMEM; 2106 goto errout; 2107 } 2108 memset(me, 0, sizeof *me); 2109 me->me_krp = krp; 2110 me->me_q.q_type = UBS_CTXOP_MODEXP; 2111 2112 nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); 2113 if (nbits <= 512) 2114 normbits = 512; 2115 else if (nbits <= 768) 2116 normbits = 768; 2117 else if (nbits <= 1024) 2118 normbits = 1024; 2119 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) 2120 normbits = 1536; 2121 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) 2122 normbits = 2048; 2123 else { 2124 err = E2BIG; 2125 goto errout; 2126 } 2127 2128 shiftbits = normbits - nbits; 2129 2130 me->me_modbits = nbits; 2131 me->me_shiftbits = shiftbits; 2132 me->me_normbits = normbits; 2133 2134 /* Sanity check: result bits must be >= true modulus bits. */ 2135 if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { 2136 err = ERANGE; 2137 goto errout; 2138 } 2139 2140 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2141 &me->me_q.q_mcr, 0)) { 2142 err = ENOMEM; 2143 goto errout; 2144 } 2145 mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; 2146 2147 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), 2148 &me->me_q.q_ctx, 0)) { 2149 err = ENOMEM; 2150 goto errout; 2151 } 2152 2153 mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); 2154 if (mbits > nbits) { 2155 err = E2BIG; 2156 goto errout; 2157 } 2158 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { 2159 err = ENOMEM; 2160 goto errout; 2161 } 2162 ubsec_kshift_r(shiftbits, 2163 krp->krp_param[UBS_MODEXP_PAR_M].crp_p, mbits, 2164 me->me_M.dma_vaddr, normbits); 2165 2166 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { 2167 err = ENOMEM; 2168 goto errout; 2169 } 2170 memset(me->me_C.dma_vaddr, 0, me->me_C.dma_size); 2171 2172 ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); 2173 if (ebits > nbits) { 2174 err = E2BIG; 2175 goto errout; 2176 } 2177 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { 2178 err = ENOMEM; 2179 goto errout; 2180 } 2181 ubsec_kshift_r(shiftbits, 2182 krp->krp_param[UBS_MODEXP_PAR_E].crp_p, ebits, 2183 me->me_E.dma_vaddr, normbits); 2184 2185 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), 2186 &me->me_epb, 0)) { 2187 err = ENOMEM; 2188 goto errout; 2189 } 2190 epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; 2191 epb->pb_addr = htole32(me->me_E.dma_paddr); 2192 epb->pb_next = 0; 2193 epb->pb_len = htole32(normbits / 8); 2194 2195 #ifdef UBSEC_DEBUG 2196 if (ubsec_debug) { 2197 printf("Epb "); 2198 ubsec_dump_pb(epb); 2199 } 2200 #endif 2201 2202 mcr->mcr_pkts = htole16(1); 2203 mcr->mcr_flags = 0; 2204 mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); 2205 mcr->mcr_reserved = 0; 2206 mcr->mcr_pktlen = 0; 2207 2208 mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); 2209 mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); 2210 mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); 2211 2212 mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); 2213 mcr->mcr_opktbuf.pb_next = 0; 2214 mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); 2215 2216 #ifdef DIAGNOSTIC 2217 /* Misaligned output buffer will hang the chip. */ 2218 if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) 2219 panic("%s: modexp invalid addr 0x%x", 2220 device_xname(sc->sc_dev), letoh32(mcr->mcr_opktbuf.pb_addr)); 2221 if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) 2222 panic("%s: modexp invalid len 0x%x", 2223 device_xname(sc->sc_dev), letoh32(mcr->mcr_opktbuf.pb_len)); 2224 #endif 2225 2226 ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; 2227 memset(ctx, 0, sizeof(*ctx)); 2228 ubsec_kshift_r(shiftbits, 2229 krp->krp_param[UBS_MODEXP_PAR_N].crp_p, nbits, 2230 ctx->me_N, normbits); 2231 ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); 2232 ctx->me_op = htole16(UBS_CTXOP_MODEXP); 2233 ctx->me_E_len = htole16(nbits); 2234 ctx->me_N_len = htole16(nbits); 2235 2236 #ifdef UBSEC_DEBUG 2237 if (ubsec_debug) { 2238 ubsec_dump_mcr(mcr); 2239 ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); 2240 } 2241 #endif 2242 2243 /* 2244 * ubsec_feed2 will sync mcr and ctx, we just need to sync 2245 * everything else. 2246 */ 2247 bus_dmamap_sync(sc->sc_dmat, me->me_M.dma_map, 2248 0, me->me_M.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2249 bus_dmamap_sync(sc->sc_dmat, me->me_E.dma_map, 2250 0, me->me_E.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2251 bus_dmamap_sync(sc->sc_dmat, me->me_C.dma_map, 2252 0, me->me_C.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2253 bus_dmamap_sync(sc->sc_dmat, me->me_epb.dma_map, 2254 0, me->me_epb.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2255 2256 /* Enqueue and we're done... */ 2257 s = splnet(); 2258 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); 2259 ubsec_feed2(sc); 2260 ubsecstats.hst_modexp++; 2261 splx(s); 2262 2263 return (0); 2264 2265 errout: 2266 if (me != NULL) { 2267 if (me->me_q.q_mcr.dma_map != NULL) 2268 ubsec_dma_free(sc, &me->me_q.q_mcr); 2269 if (me->me_q.q_ctx.dma_map != NULL) { 2270 memset(me->me_q.q_ctx.dma_vaddr, 0, me->me_q.q_ctx.dma_size); 2271 ubsec_dma_free(sc, &me->me_q.q_ctx); 2272 } 2273 if (me->me_M.dma_map != NULL) { 2274 memset(me->me_M.dma_vaddr, 0, me->me_M.dma_size); 2275 ubsec_dma_free(sc, &me->me_M); 2276 } 2277 if (me->me_E.dma_map != NULL) { 2278 memset(me->me_E.dma_vaddr, 0, me->me_E.dma_size); 2279 ubsec_dma_free(sc, &me->me_E); 2280 } 2281 if (me->me_C.dma_map != NULL) { 2282 memset(me->me_C.dma_vaddr, 0, me->me_C.dma_size); 2283 ubsec_dma_free(sc, &me->me_C); 2284 } 2285 if (me->me_epb.dma_map != NULL) 2286 ubsec_dma_free(sc, &me->me_epb); 2287 free(me, M_DEVBUF); 2288 } 2289 krp->krp_status = err; 2290 crypto_kdone(krp); 2291 return (0); 2292 } 2293 2294 /* 2295 * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (hw normalization) 2296 */ 2297 static int 2298 ubsec_kprocess_modexp_hw(struct ubsec_softc *sc, struct cryptkop *krp, 2299 int hint) 2300 { 2301 struct ubsec_q2_modexp *me; 2302 struct ubsec_mcr *mcr; 2303 struct ubsec_ctx_modexp *ctx; 2304 struct ubsec_pktbuf *epb; 2305 int s, err = 0; 2306 u_int nbits, normbits, mbits, shiftbits, ebits; 2307 2308 me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT); 2309 if (me == NULL) { 2310 err = ENOMEM; 2311 goto errout; 2312 } 2313 memset(me, 0, sizeof *me); 2314 me->me_krp = krp; 2315 me->me_q.q_type = UBS_CTXOP_MODEXP; 2316 2317 nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); 2318 if (nbits <= 512) 2319 normbits = 512; 2320 else if (nbits <= 768) 2321 normbits = 768; 2322 else if (nbits <= 1024) 2323 normbits = 1024; 2324 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) 2325 normbits = 1536; 2326 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) 2327 normbits = 2048; 2328 else { 2329 err = E2BIG; 2330 goto errout; 2331 } 2332 2333 shiftbits = normbits - nbits; 2334 2335 /* XXX ??? */ 2336 me->me_modbits = nbits; 2337 me->me_shiftbits = shiftbits; 2338 me->me_normbits = normbits; 2339 2340 /* Sanity check: result bits must be >= true modulus bits. */ 2341 if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { 2342 err = ERANGE; 2343 goto errout; 2344 } 2345 2346 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2347 &me->me_q.q_mcr, 0)) { 2348 err = ENOMEM; 2349 goto errout; 2350 } 2351 mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; 2352 2353 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), 2354 &me->me_q.q_ctx, 0)) { 2355 err = ENOMEM; 2356 goto errout; 2357 } 2358 2359 mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); 2360 if (mbits > nbits) { 2361 err = E2BIG; 2362 goto errout; 2363 } 2364 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { 2365 err = ENOMEM; 2366 goto errout; 2367 } 2368 memset(me->me_M.dma_vaddr, 0, normbits / 8); 2369 bcopy(krp->krp_param[UBS_MODEXP_PAR_M].crp_p, 2370 me->me_M.dma_vaddr, (mbits + 7) / 8); 2371 2372 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { 2373 err = ENOMEM; 2374 goto errout; 2375 } 2376 memset(me->me_C.dma_vaddr, 0, me->me_C.dma_size); 2377 2378 ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); 2379 if (ebits > nbits) { 2380 err = E2BIG; 2381 goto errout; 2382 } 2383 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { 2384 err = ENOMEM; 2385 goto errout; 2386 } 2387 memset(me->me_E.dma_vaddr, 0, normbits / 8); 2388 bcopy(krp->krp_param[UBS_MODEXP_PAR_E].crp_p, 2389 me->me_E.dma_vaddr, (ebits + 7) / 8); 2390 2391 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), 2392 &me->me_epb, 0)) { 2393 err = ENOMEM; 2394 goto errout; 2395 } 2396 epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; 2397 epb->pb_addr = htole32(me->me_E.dma_paddr); 2398 epb->pb_next = 0; 2399 epb->pb_len = htole32((ebits + 7) / 8); 2400 2401 #ifdef UBSEC_DEBUG 2402 if (ubsec_debug) { 2403 printf("Epb "); 2404 ubsec_dump_pb(epb); 2405 } 2406 #endif 2407 2408 mcr->mcr_pkts = htole16(1); 2409 mcr->mcr_flags = 0; 2410 mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); 2411 mcr->mcr_reserved = 0; 2412 mcr->mcr_pktlen = 0; 2413 2414 mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); 2415 mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); 2416 mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); 2417 2418 mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); 2419 mcr->mcr_opktbuf.pb_next = 0; 2420 mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); 2421 2422 #ifdef DIAGNOSTIC 2423 /* Misaligned output buffer will hang the chip. */ 2424 if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) 2425 panic("%s: modexp invalid addr 0x%x", 2426 device_xname(sc->sc_dev), letoh32(mcr->mcr_opktbuf.pb_addr)); 2427 if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) 2428 panic("%s: modexp invalid len 0x%x", 2429 device_xname(sc->sc_dev), letoh32(mcr->mcr_opktbuf.pb_len)); 2430 #endif 2431 2432 ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; 2433 memset(ctx, 0, sizeof(*ctx)); 2434 memcpy(ctx->me_N, krp->krp_param[UBS_MODEXP_PAR_N].crp_p, 2435 (nbits + 7) / 8); 2436 ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); 2437 ctx->me_op = htole16(UBS_CTXOP_MODEXP); 2438 ctx->me_E_len = htole16(ebits); 2439 ctx->me_N_len = htole16(nbits); 2440 2441 #ifdef UBSEC_DEBUG 2442 if (ubsec_debug) { 2443 ubsec_dump_mcr(mcr); 2444 ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); 2445 } 2446 #endif 2447 2448 /* 2449 * ubsec_feed2 will sync mcr and ctx, we just need to sync 2450 * everything else. 2451 */ 2452 bus_dmamap_sync(sc->sc_dmat, me->me_M.dma_map, 2453 0, me->me_M.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2454 bus_dmamap_sync(sc->sc_dmat, me->me_E.dma_map, 2455 0, me->me_E.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2456 bus_dmamap_sync(sc->sc_dmat, me->me_C.dma_map, 2457 0, me->me_C.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2458 bus_dmamap_sync(sc->sc_dmat, me->me_epb.dma_map, 2459 0, me->me_epb.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2460 2461 /* Enqueue and we're done... */ 2462 s = splnet(); 2463 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); 2464 ubsec_feed2(sc); 2465 splx(s); 2466 2467 return (0); 2468 2469 errout: 2470 if (me != NULL) { 2471 if (me->me_q.q_mcr.dma_map != NULL) 2472 ubsec_dma_free(sc, &me->me_q.q_mcr); 2473 if (me->me_q.q_ctx.dma_map != NULL) { 2474 memset(me->me_q.q_ctx.dma_vaddr, 0, me->me_q.q_ctx.dma_size); 2475 ubsec_dma_free(sc, &me->me_q.q_ctx); 2476 } 2477 if (me->me_M.dma_map != NULL) { 2478 memset(me->me_M.dma_vaddr, 0, me->me_M.dma_size); 2479 ubsec_dma_free(sc, &me->me_M); 2480 } 2481 if (me->me_E.dma_map != NULL) { 2482 memset(me->me_E.dma_vaddr, 0, me->me_E.dma_size); 2483 ubsec_dma_free(sc, &me->me_E); 2484 } 2485 if (me->me_C.dma_map != NULL) { 2486 memset(me->me_C.dma_vaddr, 0, me->me_C.dma_size); 2487 ubsec_dma_free(sc, &me->me_C); 2488 } 2489 if (me->me_epb.dma_map != NULL) 2490 ubsec_dma_free(sc, &me->me_epb); 2491 free(me, M_DEVBUF); 2492 } 2493 krp->krp_status = err; 2494 crypto_kdone(krp); 2495 return (0); 2496 } 2497 2498 static int 2499 ubsec_kprocess_rsapriv(struct ubsec_softc *sc, struct cryptkop *krp, 2500 int hint) 2501 { 2502 struct ubsec_q2_rsapriv *rp = NULL; 2503 struct ubsec_mcr *mcr; 2504 struct ubsec_ctx_rsapriv *ctx; 2505 int s, err = 0; 2506 u_int padlen, msglen; 2507 2508 msglen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_P]); 2509 padlen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_Q]); 2510 if (msglen > padlen) 2511 padlen = msglen; 2512 2513 if (padlen <= 256) 2514 padlen = 256; 2515 else if (padlen <= 384) 2516 padlen = 384; 2517 else if (padlen <= 512) 2518 padlen = 512; 2519 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 768) 2520 padlen = 768; 2521 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 1024) 2522 padlen = 1024; 2523 else { 2524 err = E2BIG; 2525 goto errout; 2526 } 2527 2528 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DP]) > padlen) { 2529 err = E2BIG; 2530 goto errout; 2531 } 2532 2533 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DQ]) > padlen) { 2534 err = E2BIG; 2535 goto errout; 2536 } 2537 2538 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_PINV]) > padlen) { 2539 err = E2BIG; 2540 goto errout; 2541 } 2542 2543 rp = malloc(sizeof *rp, M_DEVBUF, M_NOWAIT|M_ZERO); 2544 if (rp == NULL) 2545 return (ENOMEM); 2546 rp->rpr_krp = krp; 2547 rp->rpr_q.q_type = UBS_CTXOP_RSAPRIV; 2548 2549 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), 2550 &rp->rpr_q.q_mcr, 0)) { 2551 err = ENOMEM; 2552 goto errout; 2553 } 2554 mcr = (struct ubsec_mcr *)rp->rpr_q.q_mcr.dma_vaddr; 2555 2556 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rsapriv), 2557 &rp->rpr_q.q_ctx, 0)) { 2558 err = ENOMEM; 2559 goto errout; 2560 } 2561 ctx = (struct ubsec_ctx_rsapriv *)rp->rpr_q.q_ctx.dma_vaddr; 2562 memset(ctx, 0, sizeof *ctx); 2563 2564 /* Copy in p */ 2565 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_P].crp_p, 2566 &ctx->rpr_buf[0 * (padlen / 8)], 2567 (krp->krp_param[UBS_RSAPRIV_PAR_P].crp_nbits + 7) / 8); 2568 2569 /* Copy in q */ 2570 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_p, 2571 &ctx->rpr_buf[1 * (padlen / 8)], 2572 (krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_nbits + 7) / 8); 2573 2574 /* Copy in dp */ 2575 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_p, 2576 &ctx->rpr_buf[2 * (padlen / 8)], 2577 (krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_nbits + 7) / 8); 2578 2579 /* Copy in dq */ 2580 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_p, 2581 &ctx->rpr_buf[3 * (padlen / 8)], 2582 (krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_nbits + 7) / 8); 2583 2584 /* Copy in pinv */ 2585 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_p, 2586 &ctx->rpr_buf[4 * (padlen / 8)], 2587 (krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_nbits + 7) / 8); 2588 2589 msglen = padlen * 2; 2590 2591 /* Copy in input message (aligned buffer/length). */ 2592 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGIN]) > msglen) { 2593 /* Is this likely? */ 2594 err = E2BIG; 2595 goto errout; 2596 } 2597 if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgin, 0)) { 2598 err = ENOMEM; 2599 goto errout; 2600 } 2601 memset(rp->rpr_msgin.dma_vaddr, 0, (msglen + 7) / 8); 2602 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_p, 2603 rp->rpr_msgin.dma_vaddr, 2604 (krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_nbits + 7) / 8); 2605 2606 /* Prepare space for output message (aligned buffer/length). */ 2607 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT]) < msglen) { 2608 /* Is this likely? */ 2609 err = E2BIG; 2610 goto errout; 2611 } 2612 if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgout, 0)) { 2613 err = ENOMEM; 2614 goto errout; 2615 } 2616 memset(rp->rpr_msgout.dma_vaddr, 0, (msglen + 7) / 8); 2617 2618 mcr->mcr_pkts = htole16(1); 2619 mcr->mcr_flags = 0; 2620 mcr->mcr_cmdctxp = htole32(rp->rpr_q.q_ctx.dma_paddr); 2621 mcr->mcr_ipktbuf.pb_addr = htole32(rp->rpr_msgin.dma_paddr); 2622 mcr->mcr_ipktbuf.pb_next = 0; 2623 mcr->mcr_ipktbuf.pb_len = htole32(rp->rpr_msgin.dma_size); 2624 mcr->mcr_reserved = 0; 2625 mcr->mcr_pktlen = htole16(msglen); 2626 mcr->mcr_opktbuf.pb_addr = htole32(rp->rpr_msgout.dma_paddr); 2627 mcr->mcr_opktbuf.pb_next = 0; 2628 mcr->mcr_opktbuf.pb_len = htole32(rp->rpr_msgout.dma_size); 2629 2630 #ifdef DIAGNOSTIC 2631 if (rp->rpr_msgin.dma_paddr & 3 || rp->rpr_msgin.dma_size & 3) { 2632 panic("%s: rsapriv: invalid msgin 0x%lx(0x%lx)", 2633 device_xname(sc->sc_dev), (u_long) rp->rpr_msgin.dma_paddr, 2634 (u_long) rp->rpr_msgin.dma_size); 2635 } 2636 if (rp->rpr_msgout.dma_paddr & 3 || rp->rpr_msgout.dma_size & 3) { 2637 panic("%s: rsapriv: invalid msgout 0x%lx(0x%lx)", 2638 device_xname(sc->sc_dev), (u_long) rp->rpr_msgout.dma_paddr, 2639 (u_long) rp->rpr_msgout.dma_size); 2640 } 2641 #endif 2642 2643 ctx->rpr_len = (sizeof(u_int16_t) * 4) + (5 * (padlen / 8)); 2644 ctx->rpr_op = htole16(UBS_CTXOP_RSAPRIV); 2645 ctx->rpr_q_len = htole16(padlen); 2646 ctx->rpr_p_len = htole16(padlen); 2647 2648 /* 2649 * ubsec_feed2 will sync mcr and ctx, we just need to sync 2650 * everything else. 2651 */ 2652 bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgin.dma_map, 2653 0, rp->rpr_msgin.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2654 bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgout.dma_map, 2655 0, rp->rpr_msgout.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2656 2657 /* Enqueue and we're done... */ 2658 s = splnet(); 2659 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rp->rpr_q, q_next); 2660 ubsec_feed2(sc); 2661 ubsecstats.hst_modexpcrt++; 2662 splx(s); 2663 return (0); 2664 2665 errout: 2666 if (rp != NULL) { 2667 if (rp->rpr_q.q_mcr.dma_map != NULL) 2668 ubsec_dma_free(sc, &rp->rpr_q.q_mcr); 2669 if (rp->rpr_msgin.dma_map != NULL) { 2670 memset(rp->rpr_msgin.dma_vaddr, 0, rp->rpr_msgin.dma_size); 2671 ubsec_dma_free(sc, &rp->rpr_msgin); 2672 } 2673 if (rp->rpr_msgout.dma_map != NULL) { 2674 memset(rp->rpr_msgout.dma_vaddr, 0, rp->rpr_msgout.dma_size); 2675 ubsec_dma_free(sc, &rp->rpr_msgout); 2676 } 2677 free(rp, M_DEVBUF); 2678 } 2679 krp->krp_status = err; 2680 crypto_kdone(krp); 2681 return (0); 2682 } 2683 2684 #ifdef UBSEC_DEBUG 2685 static void 2686 ubsec_dump_pb(volatile struct ubsec_pktbuf *pb) 2687 { 2688 printf("addr 0x%x (0x%x) next 0x%x\n", 2689 pb->pb_addr, pb->pb_len, pb->pb_next); 2690 } 2691 2692 static void 2693 ubsec_dump_ctx2(volatile struct ubsec_ctx_keyop *c) 2694 { 2695 printf("CTX (0x%x):\n", c->ctx_len); 2696 switch (letoh16(c->ctx_op)) { 2697 case UBS_CTXOP_RNGBYPASS: 2698 case UBS_CTXOP_RNGSHA1: 2699 break; 2700 case UBS_CTXOP_MODEXP: 2701 { 2702 struct ubsec_ctx_modexp *cx = (void *)c; 2703 int i, len; 2704 2705 printf(" Elen %u, Nlen %u\n", 2706 letoh16(cx->me_E_len), letoh16(cx->me_N_len)); 2707 len = (cx->me_N_len + 7)/8; 2708 for (i = 0; i < len; i++) 2709 printf("%s%02x", (i == 0) ? " N: " : ":", cx->me_N[i]); 2710 printf("\n"); 2711 break; 2712 } 2713 default: 2714 printf("unknown context: %x\n", c->ctx_op); 2715 } 2716 printf("END CTX\n"); 2717 } 2718 2719 static void 2720 ubsec_dump_mcr(struct ubsec_mcr *mcr) 2721 { 2722 volatile struct ubsec_mcr_add *ma; 2723 int i; 2724 2725 printf("MCR:\n"); 2726 printf(" pkts: %u, flags 0x%x\n", 2727 letoh16(mcr->mcr_pkts), letoh16(mcr->mcr_flags)); 2728 ma = (volatile struct ubsec_mcr_add *)&mcr->mcr_cmdctxp; 2729 for (i = 0; i < letoh16(mcr->mcr_pkts); i++) { 2730 printf(" %d: ctx 0x%x len 0x%x rsvd 0x%x\n", i, 2731 letoh32(ma->mcr_cmdctxp), letoh16(ma->mcr_pktlen), 2732 letoh16(ma->mcr_reserved)); 2733 printf(" %d: ipkt ", i); 2734 ubsec_dump_pb(&ma->mcr_ipktbuf); 2735 printf(" %d: opkt ", i); 2736 ubsec_dump_pb(&ma->mcr_opktbuf); 2737 ma++; 2738 } 2739 printf("END MCR\n"); 2740 } 2741 #endif /* UBSEC_DEBUG */ 2742 2743 /* 2744 * Return the number of significant bits of a big number. 2745 */ 2746 static int 2747 ubsec_ksigbits(struct crparam *cr) 2748 { 2749 u_int plen = (cr->crp_nbits + 7) / 8; 2750 int i, sig = plen * 8; 2751 u_int8_t c, *p = cr->crp_p; 2752 2753 for (i = plen - 1; i >= 0; i--) { 2754 c = p[i]; 2755 if (c != 0) { 2756 while ((c & 0x80) == 0) { 2757 sig--; 2758 c <<= 1; 2759 } 2760 break; 2761 } 2762 sig -= 8; 2763 } 2764 return (sig); 2765 } 2766 2767 static void 2768 ubsec_kshift_r(u_int shiftbits, u_int8_t *src, u_int srcbits, 2769 u_int8_t *dst, u_int dstbits) 2770 { 2771 u_int slen, dlen; 2772 int i, si, di, n; 2773 2774 slen = (srcbits + 7) / 8; 2775 dlen = (dstbits + 7) / 8; 2776 2777 for (i = 0; i < slen; i++) 2778 dst[i] = src[i]; 2779 for (i = 0; i < dlen - slen; i++) 2780 dst[slen + i] = 0; 2781 2782 n = shiftbits / 8; 2783 if (n != 0) { 2784 si = dlen - n - 1; 2785 di = dlen - 1; 2786 while (si >= 0) 2787 dst[di--] = dst[si--]; 2788 while (di >= 0) 2789 dst[di--] = 0; 2790 } 2791 2792 n = shiftbits % 8; 2793 if (n != 0) { 2794 for (i = dlen - 1; i > 0; i--) 2795 dst[i] = (dst[i] << n) | 2796 (dst[i - 1] >> (8 - n)); 2797 dst[0] = dst[0] << n; 2798 } 2799 } 2800 2801 static void 2802 ubsec_kshift_l(u_int shiftbits, u_int8_t *src, u_int srcbits, 2803 u_int8_t *dst, u_int dstbits) 2804 { 2805 int slen, dlen, i, n; 2806 2807 slen = (srcbits + 7) / 8; 2808 dlen = (dstbits + 7) / 8; 2809 2810 n = shiftbits / 8; 2811 for (i = 0; i < slen; i++) 2812 dst[i] = src[i + n]; 2813 for (i = 0; i < dlen - slen; i++) 2814 dst[slen + i] = 0; 2815 2816 n = shiftbits % 8; 2817 if (n != 0) { 2818 for (i = 0; i < (dlen - 1); i++) 2819 dst[i] = (dst[i] >> n) | (dst[i + 1] << (8 - n)); 2820 dst[dlen - 1] = dst[dlen - 1] >> n; 2821 } 2822 } 2823