1 /* $NetBSD: hifn7751.c,v 1.23 2005/06/28 00:28:42 thorpej Exp $ */ 2 /* $FreeBSD: hifn7751.c,v 1.5.2.7 2003/10/08 23:52:00 sam Exp $ */ 3 /* $OpenBSD: hifn7751.c,v 1.140 2003/08/01 17:55:54 deraadt Exp $ */ 4 5 /* 6 * Invertex AEON / Hifn 7751 driver 7 * Copyright (c) 1999 Invertex Inc. All rights reserved. 8 * Copyright (c) 1999 Theo de Raadt 9 * Copyright (c) 2000-2001 Network Security Technologies, Inc. 10 * http://www.netsec.net 11 * Copyright (c) 2003 Hifn Inc. 12 * 13 * This driver is based on a previous driver by Invertex, for which they 14 * requested: Please send any comments, feedback, bug-fixes, or feature 15 * requests to software@invertex.com. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions 19 * are met: 20 * 21 * 1. Redistributions of source code must retain the above copyright 22 * notice, this list of conditions and the following disclaimer. 23 * 2. Redistributions in binary form must reproduce the above copyright 24 * notice, this list of conditions and the following disclaimer in the 25 * documentation and/or other materials provided with the distribution. 26 * 3. The name of the author may not be used to endorse or promote products 27 * derived from this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 30 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 31 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 32 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 33 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 34 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 38 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Effort sponsored in part by the Defense Advanced Research Projects 41 * Agency (DARPA) and Air Force Research Laboratory, Air Force 42 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 43 * 44 */ 45 46 /* 47 * Driver for various Hifn pre-HIPP encryption processors. 48 */ 49 50 #include <sys/cdefs.h> 51 __KERNEL_RCSID(0, "$NetBSD: hifn7751.c,v 1.23 2005/06/28 00:28:42 thorpej Exp $"); 52 53 #include "rnd.h" 54 #include "opencrypto.h" 55 56 #if NRND == 0 || NOPENCRYPTO == 0 57 #error hifn7751 requires rnd and opencrypto pseudo-devices 58 #endif 59 60 61 #include <sys/param.h> 62 #include <sys/systm.h> 63 #include <sys/proc.h> 64 #include <sys/errno.h> 65 #include <sys/malloc.h> 66 #include <sys/kernel.h> 67 #include <sys/mbuf.h> 68 #include <sys/device.h> 69 70 #include <uvm/uvm_extern.h> 71 72 73 #ifdef __OpenBSD__ 74 #include <crypto/crypto.h> 75 #include <dev/rndvar.h> 76 #else 77 #include <opencrypto/cryptodev.h> 78 #include <sys/rnd.h> 79 #endif 80 81 #include <dev/pci/pcireg.h> 82 #include <dev/pci/pcivar.h> 83 #include <dev/pci/pcidevs.h> 84 85 #include <dev/pci/hifn7751reg.h> 86 #include <dev/pci/hifn7751var.h> 87 88 #undef HIFN_DEBUG 89 90 #ifdef __NetBSD__ 91 #define HIFN_NO_RNG /* until statistically tested */ 92 #define M_DUP_PKTHDR M_COPY_PKTHDR /* XXX */ 93 #endif 94 95 #ifdef HIFN_DEBUG 96 extern int hifn_debug; /* patchable */ 97 int hifn_debug = 1; 98 #endif 99 100 #ifdef __OpenBSD__ 101 #define HAVE_CRYPTO_LZS /* OpenBSD OCF supports CRYPTO_COMP_LZS */ 102 #endif 103 104 /* 105 * Prototypes and count for the pci_device structure 106 */ 107 #ifdef __OpenBSD__ 108 static int hifn_probe((struct device *, void *, void *); 109 #else 110 static int hifn_probe(struct device *, struct cfdata *, void *); 111 #endif 112 static void hifn_attach(struct device *, struct device *, void *); 113 114 CFATTACH_DECL(hifn, sizeof(struct hifn_softc), 115 hifn_probe, hifn_attach, NULL, NULL); 116 117 #ifdef __OpenBSD__ 118 struct cfdriver hifn_cd = { 119 0, "hifn", DV_DULL 120 }; 121 #endif 122 123 static void hifn_reset_board(struct hifn_softc *, int); 124 static void hifn_reset_puc(struct hifn_softc *); 125 static void hifn_puc_wait(struct hifn_softc *); 126 static const char *hifn_enable_crypto(struct hifn_softc *, pcireg_t); 127 static void hifn_set_retry(struct hifn_softc *); 128 static void hifn_init_dma(struct hifn_softc *); 129 static void hifn_init_pci_registers(struct hifn_softc *); 130 static int hifn_sramsize(struct hifn_softc *); 131 static int hifn_dramsize(struct hifn_softc *); 132 static int hifn_ramtype(struct hifn_softc *); 133 static void hifn_sessions(struct hifn_softc *); 134 static int hifn_intr(void *); 135 static u_int hifn_write_command(struct hifn_command *, u_int8_t *); 136 static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt); 137 static int hifn_newsession(void*, u_int32_t *, struct cryptoini *); 138 static int hifn_freesession(void*, u_int64_t); 139 static int hifn_process(void*, struct cryptop *, int); 140 static void hifn_callback(struct hifn_softc *, struct hifn_command *, 141 u_int8_t *); 142 static int hifn_crypto(struct hifn_softc *, struct hifn_command *, 143 struct cryptop*, int); 144 static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *); 145 static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *); 146 static int hifn_dmamap_aligned(bus_dmamap_t); 147 static int hifn_dmamap_load_src(struct hifn_softc *, 148 struct hifn_command *); 149 static int hifn_dmamap_load_dst(struct hifn_softc *, 150 struct hifn_command *); 151 static int hifn_init_pubrng(struct hifn_softc *); 152 #ifndef HIFN_NO_RNG 153 static static void hifn_rng(void *); 154 #endif 155 static void hifn_tick(void *); 156 static void hifn_abort(struct hifn_softc *); 157 static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, 158 int *); 159 static void hifn_write_4(struct hifn_softc *, int, bus_size_t, u_int32_t); 160 static u_int32_t hifn_read_4(struct hifn_softc *, int, bus_size_t); 161 #ifdef HAVE_CRYPTO_LZS 162 static int hifn_compression(struct hifn_softc *, struct cryptop *, 163 struct hifn_command *); 164 static struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *); 165 static int hifn_compress_enter(struct hifn_softc *, struct hifn_command *); 166 static void hifn_callback_comp(struct hifn_softc *, struct hifn_command *, 167 u_int8_t *); 168 #endif /* HAVE_CRYPTO_LZS */ 169 170 171 struct hifn_stats hifnstats; 172 173 static const struct hifn_product { 174 pci_vendor_id_t hifn_vendor; 175 pci_product_id_t hifn_product; 176 int hifn_flags; 177 const char *hifn_name; 178 } hifn_products[] = { 179 { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON, 180 0, 181 "Invertex AEON", 182 }, 183 184 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751, 185 0, 186 "Hifn 7751", 187 }, 188 { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751, 189 0, 190 "Hifn 7751 (NetSec)" 191 }, 192 193 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811, 194 HIFN_IS_7811 | HIFN_HAS_RNG | HIFN_HAS_LEDS | HIFN_NO_BURSTWRITE, 195 "Hifn 7811", 196 }, 197 198 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951, 199 HIFN_HAS_RNG | HIFN_HAS_PUBLIC, 200 "Hifn 7951", 201 }, 202 203 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955, 204 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES, 205 "Hifn 7955", 206 }, 207 208 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956, 209 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES, 210 "Hifn 7956", 211 }, 212 213 214 { 0, 0, 215 0, 216 NULL 217 } 218 }; 219 220 static const struct hifn_product * 221 hifn_lookup(const struct pci_attach_args *pa) 222 { 223 const struct hifn_product *hp; 224 225 for (hp = hifn_products; hp->hifn_name != NULL; hp++) { 226 if (PCI_VENDOR(pa->pa_id) == hp->hifn_vendor && 227 PCI_PRODUCT(pa->pa_id) == hp->hifn_product) 228 return (hp); 229 } 230 return (NULL); 231 } 232 233 static int 234 hifn_probe(struct device *parent, struct cfdata *match, void *aux) 235 { 236 struct pci_attach_args *pa = (struct pci_attach_args *) aux; 237 238 if (hifn_lookup(pa) != NULL) 239 return (1); 240 241 return (0); 242 } 243 244 static void 245 hifn_attach(struct device *parent, struct device *self, void *aux) 246 { 247 struct hifn_softc *sc = (struct hifn_softc *)self; 248 struct pci_attach_args *pa = aux; 249 const struct hifn_product *hp; 250 pci_chipset_tag_t pc = pa->pa_pc; 251 pci_intr_handle_t ih; 252 const char *intrstr = NULL; 253 const char *hifncap; 254 char rbase; 255 bus_size_t iosize0, iosize1; 256 u_int32_t cmd; 257 u_int16_t ena; 258 bus_dma_segment_t seg; 259 bus_dmamap_t dmamap; 260 int rseg; 261 caddr_t kva; 262 263 hp = hifn_lookup(pa); 264 if (hp == NULL) { 265 printf("\n"); 266 panic("hifn_attach: impossible"); 267 } 268 269 aprint_naive(": Crypto processor\n"); 270 aprint_normal(": %s, rev. %d\n", hp->hifn_name, 271 PCI_REVISION(pa->pa_class)); 272 273 sc->sc_pci_pc = pa->pa_pc; 274 sc->sc_pci_tag = pa->pa_tag; 275 276 sc->sc_flags = hp->hifn_flags; 277 278 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 279 cmd |= PCI_COMMAND_MASTER_ENABLE; 280 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd); 281 282 if (pci_mapreg_map(pa, HIFN_BAR0, PCI_MAPREG_TYPE_MEM, 0, 283 &sc->sc_st0, &sc->sc_sh0, NULL, &iosize0)) { 284 aprint_error("%s: can't map mem space %d\n", 285 sc->sc_dv.dv_xname, 0); 286 return; 287 } 288 289 if (pci_mapreg_map(pa, HIFN_BAR1, PCI_MAPREG_TYPE_MEM, 0, 290 &sc->sc_st1, &sc->sc_sh1, NULL, &iosize1)) { 291 aprint_error("%s: can't find mem space %d\n", 292 sc->sc_dv.dv_xname, 1); 293 goto fail_io0; 294 } 295 296 hifn_set_retry(sc); 297 298 if (sc->sc_flags & HIFN_NO_BURSTWRITE) { 299 sc->sc_waw_lastgroup = -1; 300 sc->sc_waw_lastreg = 1; 301 } 302 303 sc->sc_dmat = pa->pa_dmat; 304 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(*sc->sc_dma), PAGE_SIZE, 0, 305 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 306 aprint_error("%s: can't alloc DMA buffer\n", 307 sc->sc_dv.dv_xname); 308 goto fail_io1; 309 } 310 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(*sc->sc_dma), &kva, 311 BUS_DMA_NOWAIT)) { 312 aprint_error("%s: can't map DMA buffers (%lu bytes)\n", 313 sc->sc_dv.dv_xname, (u_long)sizeof(*sc->sc_dma)); 314 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 315 goto fail_io1; 316 } 317 if (bus_dmamap_create(sc->sc_dmat, sizeof(*sc->sc_dma), 1, 318 sizeof(*sc->sc_dma), 0, BUS_DMA_NOWAIT, &dmamap)) { 319 aprint_error("%s: can't create DMA map\n", 320 sc->sc_dv.dv_xname); 321 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma)); 322 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 323 goto fail_io1; 324 } 325 if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, sizeof(*sc->sc_dma), 326 NULL, BUS_DMA_NOWAIT)) { 327 aprint_error("%s: can't load DMA map\n", 328 sc->sc_dv.dv_xname); 329 bus_dmamap_destroy(sc->sc_dmat, dmamap); 330 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma)); 331 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 332 goto fail_io1; 333 } 334 sc->sc_dmamap = dmamap; 335 sc->sc_dma = (struct hifn_dma *)kva; 336 bzero(sc->sc_dma, sizeof(*sc->sc_dma)); 337 338 hifn_reset_board(sc, 0); 339 340 if ((hifncap = hifn_enable_crypto(sc, pa->pa_id)) == NULL) { 341 aprint_error("%s: crypto enabling failed\n", 342 sc->sc_dv.dv_xname); 343 goto fail_mem; 344 } 345 hifn_reset_puc(sc); 346 347 hifn_init_dma(sc); 348 hifn_init_pci_registers(sc); 349 350 /* XXX can't dynamically determine ram type for 795x; force dram */ 351 if (sc->sc_flags & HIFN_IS_7956) 352 sc->sc_drammodel = 1; 353 else if (hifn_ramtype(sc)) 354 goto fail_mem; 355 356 if (sc->sc_drammodel == 0) 357 hifn_sramsize(sc); 358 else 359 hifn_dramsize(sc); 360 361 /* 362 * Workaround for NetSec 7751 rev A: half ram size because two 363 * of the address lines were left floating 364 */ 365 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETSEC && 366 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETSEC_7751 && 367 PCI_REVISION(pa->pa_class) == 0x61) 368 sc->sc_ramsize >>= 1; 369 370 if (pci_intr_map(pa, &ih)) { 371 aprint_error("%s: couldn't map interrupt\n", 372 sc->sc_dv.dv_xname); 373 goto fail_mem; 374 } 375 intrstr = pci_intr_string(pc, ih); 376 #ifdef __OpenBSD__ 377 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc, 378 self->dv_xname); 379 #else 380 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc); 381 #endif 382 if (sc->sc_ih == NULL) { 383 aprint_error("%s: couldn't establish interrupt\n", 384 sc->sc_dv.dv_xname); 385 if (intrstr != NULL) 386 aprint_normal(" at %s", intrstr); 387 aprint_normal("\n"); 388 goto fail_mem; 389 } 390 391 hifn_sessions(sc); 392 393 rseg = sc->sc_ramsize / 1024; 394 rbase = 'K'; 395 if (sc->sc_ramsize >= (1024 * 1024)) { 396 rbase = 'M'; 397 rseg /= 1024; 398 } 399 aprint_normal("%s: %s, %d%cB %cram, interrupting at %s\n", 400 sc->sc_dv.dv_xname, hifncap, rseg, rbase, 401 sc->sc_drammodel ? 'd' : 's', intrstr); 402 403 sc->sc_cid = crypto_get_driverid(0); 404 if (sc->sc_cid < 0) { 405 aprint_error("%s: couldn't get crypto driver id\n", 406 sc->sc_dv.dv_xname); 407 goto fail_intr; 408 } 409 410 WRITE_REG_0(sc, HIFN_0_PUCNFG, 411 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID); 412 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 413 414 switch (ena) { 415 case HIFN_PUSTAT_ENA_2: 416 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, 417 hifn_newsession, hifn_freesession, hifn_process, sc); 418 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0, 419 hifn_newsession, hifn_freesession, hifn_process, sc); 420 if (sc->sc_flags & HIFN_HAS_AES) 421 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0, 422 hifn_newsession, hifn_freesession, 423 hifn_process, sc); 424 /*FALLTHROUGH*/ 425 case HIFN_PUSTAT_ENA_1: 426 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0, 427 hifn_newsession, hifn_freesession, hifn_process, sc); 428 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0, 429 hifn_newsession, hifn_freesession, hifn_process, sc); 430 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0, 431 hifn_newsession, hifn_freesession, hifn_process, sc); 432 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0, 433 hifn_newsession, hifn_freesession, hifn_process, sc); 434 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, 435 hifn_newsession, hifn_freesession, hifn_process, sc); 436 break; 437 } 438 439 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 0, 440 sc->sc_dmamap->dm_mapsize, 441 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 442 443 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) 444 hifn_init_pubrng(sc); 445 446 #ifdef __OpenBSD__ 447 timeout_set(&sc->sc_tickto, hifn_tick, sc); 448 timeout_add(&sc->sc_tickto, hz); 449 #else 450 callout_init(&sc->sc_tickto); 451 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); 452 #endif 453 return; 454 455 fail_intr: 456 pci_intr_disestablish(pc, sc->sc_ih); 457 fail_mem: 458 bus_dmamap_unload(sc->sc_dmat, dmamap); 459 bus_dmamap_destroy(sc->sc_dmat, dmamap); 460 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma)); 461 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 462 463 /* Turn off DMA polling */ 464 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 465 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 466 467 fail_io1: 468 bus_space_unmap(sc->sc_st1, sc->sc_sh1, iosize1); 469 fail_io0: 470 bus_space_unmap(sc->sc_st0, sc->sc_sh0, iosize0); 471 } 472 473 static int 474 hifn_init_pubrng(struct hifn_softc *sc) 475 { 476 u_int32_t r; 477 int i; 478 479 if ((sc->sc_flags & HIFN_IS_7811) == 0) { 480 /* Reset 7951 public key/rng engine */ 481 WRITE_REG_1(sc, HIFN_1_PUB_RESET, 482 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET); 483 484 for (i = 0; i < 100; i++) { 485 DELAY(1000); 486 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) & 487 HIFN_PUBRST_RESET) == 0) 488 break; 489 } 490 491 if (i == 100) { 492 printf("%s: public key init failed\n", 493 sc->sc_dv.dv_xname); 494 return (1); 495 } 496 } 497 498 /* Enable the rng, if available */ 499 if (sc->sc_flags & HIFN_HAS_RNG) { 500 if (sc->sc_flags & HIFN_IS_7811) { 501 r = READ_REG_1(sc, HIFN_1_7811_RNGENA); 502 if (r & HIFN_7811_RNGENA_ENA) { 503 r &= ~HIFN_7811_RNGENA_ENA; 504 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); 505 } 506 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG, 507 HIFN_7811_RNGCFG_DEFL); 508 r |= HIFN_7811_RNGENA_ENA; 509 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); 510 } else 511 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG, 512 READ_REG_1(sc, HIFN_1_RNG_CONFIG) | 513 HIFN_RNGCFG_ENA); 514 515 sc->sc_rngfirst = 1; 516 if (hz >= 100) 517 sc->sc_rnghz = hz / 100; 518 else 519 sc->sc_rnghz = 1; 520 #ifndef HIFN_NO_RNG 521 #ifdef __OpenBSD__ 522 timeout_set(&sc->sc_rngto, hifn_rng, sc); 523 timeout_add(&sc->sc_rngto, sc->sc_rnghz); 524 #else /* !__OpenBSD__ */ 525 callout_init(&sc->sc_rngto); 526 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); 527 #endif /* !__OpenBSD__ */ 528 #endif /* HIFN_NO_RNG */ 529 } 530 531 /* Enable public key engine, if available */ 532 if (sc->sc_flags & HIFN_HAS_PUBLIC) { 533 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); 534 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE; 535 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 536 } 537 538 return (0); 539 } 540 541 #ifndef HIFN_NO_RNG 542 static void 543 hifn_rng(void *vsc) 544 { 545 #ifndef __NetBSD__ 546 struct hifn_softc *sc = vsc; 547 u_int32_t num1, sts, num2; 548 int i; 549 550 if (sc->sc_flags & HIFN_IS_7811) { 551 for (i = 0; i < 5; i++) { 552 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS); 553 if (sts & HIFN_7811_RNGSTS_UFL) { 554 printf("%s: RNG underflow: disabling\n", 555 sc->sc_dv.dv_xname); 556 return; 557 } 558 if ((sts & HIFN_7811_RNGSTS_RDY) == 0) 559 break; 560 561 /* 562 * There are at least two words in the RNG FIFO 563 * at this point. 564 */ 565 num1 = READ_REG_1(sc, HIFN_1_7811_RNGDAT); 566 num2 = READ_REG_1(sc, HIFN_1_7811_RNGDAT); 567 if (sc->sc_rngfirst) 568 sc->sc_rngfirst = 0; 569 else { 570 add_true_randomness(num1); 571 add_true_randomness(num2); 572 } 573 } 574 } else { 575 num1 = READ_REG_1(sc, HIFN_1_RNG_DATA); 576 577 if (sc->sc_rngfirst) 578 sc->sc_rngfirst = 0; 579 else 580 add_true_randomness(num1); 581 } 582 583 #ifdef __OpenBSD__ 584 timeout_add(&sc->sc_rngto, sc->sc_rnghz); 585 #else 586 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); 587 #endif 588 #endif /*!__NetBSD__*/ 589 } 590 #endif 591 592 static void 593 hifn_puc_wait(struct hifn_softc *sc) 594 { 595 int i; 596 597 for (i = 5000; i > 0; i--) { 598 DELAY(1); 599 if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET)) 600 break; 601 } 602 if (!i) 603 printf("%s: proc unit did not reset\n", sc->sc_dv.dv_xname); 604 } 605 606 /* 607 * Reset the processing unit. 608 */ 609 static void 610 hifn_reset_puc(struct hifn_softc *sc) 611 { 612 /* Reset processing unit */ 613 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); 614 hifn_puc_wait(sc); 615 } 616 617 static void 618 hifn_set_retry(struct hifn_softc *sc) 619 { 620 u_int32_t r; 621 622 r = pci_conf_read(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT); 623 r &= 0xffff0000; 624 pci_conf_write(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT, r); 625 } 626 627 /* 628 * Resets the board. Values in the regesters are left as is 629 * from the reset (i.e. initial values are assigned elsewhere). 630 */ 631 static void 632 hifn_reset_board(struct hifn_softc *sc, int full) 633 { 634 u_int32_t reg; 635 636 /* 637 * Set polling in the DMA configuration register to zero. 0x7 avoids 638 * resetting the board and zeros out the other fields. 639 */ 640 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 641 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 642 643 /* 644 * Now that polling has been disabled, we have to wait 1 ms 645 * before resetting the board. 646 */ 647 DELAY(1000); 648 649 /* Reset the DMA unit */ 650 if (full) { 651 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE); 652 DELAY(1000); 653 } else { 654 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, 655 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET); 656 hifn_reset_puc(sc); 657 } 658 659 bzero(sc->sc_dma, sizeof(*sc->sc_dma)); 660 661 /* Bring dma unit out of reset */ 662 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 663 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 664 665 hifn_puc_wait(sc); 666 667 hifn_set_retry(sc); 668 669 if (sc->sc_flags & HIFN_IS_7811) { 670 for (reg = 0; reg < 1000; reg++) { 671 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) & 672 HIFN_MIPSRST_CRAMINIT) 673 break; 674 DELAY(1000); 675 } 676 if (reg == 1000) 677 printf(": cram init timeout\n"); 678 } 679 } 680 681 static u_int32_t 682 hifn_next_signature(u_int32_t a, u_int cnt) 683 { 684 int i; 685 u_int32_t v; 686 687 for (i = 0; i < cnt; i++) { 688 689 /* get the parity */ 690 v = a & 0x80080125; 691 v ^= v >> 16; 692 v ^= v >> 8; 693 v ^= v >> 4; 694 v ^= v >> 2; 695 v ^= v >> 1; 696 697 a = (v & 1) ^ (a << 1); 698 } 699 700 return a; 701 } 702 703 struct pci2id { 704 u_short pci_vendor; 705 u_short pci_prod; 706 char card_id[13]; 707 } static const pci2id[] = { 708 { 709 PCI_VENDOR_HIFN, 710 PCI_PRODUCT_HIFN_7951, 711 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 712 0x00, 0x00, 0x00, 0x00, 0x00 } 713 }, { 714 PCI_VENDOR_HIFN, 715 PCI_PRODUCT_HIFN_7955, 716 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 717 0x00, 0x00, 0x00, 0x00, 0x00 } 718 }, { 719 PCI_VENDOR_HIFN, 720 PCI_PRODUCT_HIFN_7956, 721 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 722 0x00, 0x00, 0x00, 0x00, 0x00 } 723 }, { 724 PCI_VENDOR_NETSEC, 725 PCI_PRODUCT_NETSEC_7751, 726 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 727 0x00, 0x00, 0x00, 0x00, 0x00 } 728 }, { 729 PCI_VENDOR_INVERTEX, 730 PCI_PRODUCT_INVERTEX_AEON, 731 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 732 0x00, 0x00, 0x00, 0x00, 0x00 } 733 }, { 734 PCI_VENDOR_HIFN, 735 PCI_PRODUCT_HIFN_7811, 736 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 737 0x00, 0x00, 0x00, 0x00, 0x00 } 738 }, { 739 /* 740 * Other vendors share this PCI ID as well, such as 741 * http://www.powercrypt.com, and obviously they also 742 * use the same key. 743 */ 744 PCI_VENDOR_HIFN, 745 PCI_PRODUCT_HIFN_7751, 746 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 747 0x00, 0x00, 0x00, 0x00, 0x00 } 748 }, 749 }; 750 751 /* 752 * Checks to see if crypto is already enabled. If crypto isn't enable, 753 * "hifn_enable_crypto" is called to enable it. The check is important, 754 * as enabling crypto twice will lock the board. 755 */ 756 static const char * 757 hifn_enable_crypto(struct hifn_softc *sc, pcireg_t pciid) 758 { 759 u_int32_t dmacfg, ramcfg, encl, addr, i; 760 const char *offtbl = NULL; 761 762 for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) { 763 if (pci2id[i].pci_vendor == PCI_VENDOR(pciid) && 764 pci2id[i].pci_prod == PCI_PRODUCT(pciid)) { 765 offtbl = pci2id[i].card_id; 766 break; 767 } 768 } 769 770 if (offtbl == NULL) { 771 #ifdef HIFN_DEBUG 772 aprint_debug("%s: Unknown card!\n", sc->sc_dv.dv_xname); 773 #endif 774 return (NULL); 775 } 776 777 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG); 778 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG); 779 780 /* 781 * The RAM config register's encrypt level bit needs to be set before 782 * every read performed on the encryption level register. 783 */ 784 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); 785 786 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 787 788 /* 789 * Make sure we don't re-unlock. Two unlocks kills chip until the 790 * next reboot. 791 */ 792 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) { 793 #ifdef HIFN_DEBUG 794 aprint_debug("%s: Strong Crypto already enabled!\n", 795 sc->sc_dv.dv_xname); 796 #endif 797 goto report; 798 } 799 800 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) { 801 #ifdef HIFN_DEBUG 802 aprint_debug("%s: Unknown encryption level\n", 803 sc->sc_dv.dv_xname); 804 #endif 805 return (NULL); 806 } 807 808 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK | 809 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 810 DELAY(1000); 811 addr = READ_REG_1(sc, HIFN_1_UNLOCK_SECRET1); 812 DELAY(1000); 813 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, 0); 814 DELAY(1000); 815 816 for (i = 0; i <= 12; i++) { 817 addr = hifn_next_signature(addr, offtbl[i] + 0x101); 818 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, addr); 819 820 DELAY(1000); 821 } 822 823 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); 824 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 825 826 #ifdef HIFN_DEBUG 827 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2) 828 aprint_debug("Encryption engine is permanently locked until next system reset."); 829 else 830 aprint_debug("Encryption engine enabled successfully!"); 831 #endif 832 833 report: 834 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg); 835 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg); 836 837 switch (encl) { 838 case HIFN_PUSTAT_ENA_0: 839 return ("LZS-only (no encr/auth)"); 840 841 case HIFN_PUSTAT_ENA_1: 842 return ("DES"); 843 844 case HIFN_PUSTAT_ENA_2: 845 if (sc->sc_flags & HIFN_HAS_AES) 846 return ("3DES/AES"); 847 else 848 return ("3DES"); 849 850 default: 851 return ("disabled"); 852 } 853 /* NOTREACHED */ 854 } 855 856 /* 857 * Give initial values to the registers listed in the "Register Space" 858 * section of the HIFN Software Development reference manual. 859 */ 860 static void 861 hifn_init_pci_registers(struct hifn_softc *sc) 862 { 863 /* write fixed values needed by the Initialization registers */ 864 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); 865 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD); 866 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); 867 868 /* write all 4 ring address registers */ 869 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dmamap->dm_segs[0].ds_addr + 870 offsetof(struct hifn_dma, cmdr[0])); 871 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dmamap->dm_segs[0].ds_addr + 872 offsetof(struct hifn_dma, srcr[0])); 873 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dmamap->dm_segs[0].ds_addr + 874 offsetof(struct hifn_dma, dstr[0])); 875 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dmamap->dm_segs[0].ds_addr + 876 offsetof(struct hifn_dma, resr[0])); 877 878 DELAY(2000); 879 880 /* write status register */ 881 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 882 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS | 883 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS | 884 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST | 885 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER | 886 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST | 887 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER | 888 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST | 889 HIFN_DMACSR_S_WAIT | 890 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST | 891 HIFN_DMACSR_C_WAIT | 892 HIFN_DMACSR_ENGINE | 893 ((sc->sc_flags & HIFN_HAS_PUBLIC) ? 894 HIFN_DMACSR_PUBDONE : 0) | 895 ((sc->sc_flags & HIFN_IS_7811) ? 896 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0)); 897 898 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0; 899 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT | 900 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER | 901 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT | 902 HIFN_DMAIER_ENGINE | 903 ((sc->sc_flags & HIFN_IS_7811) ? 904 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0); 905 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; 906 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 907 CLR_LED(sc, HIFN_MIPSRST_LED0 | HIFN_MIPSRST_LED1 | HIFN_MIPSRST_LED2); 908 909 if (sc->sc_flags & HIFN_IS_7956) { 910 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | 911 HIFN_PUCNFG_TCALLPHASES | 912 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32); 913 WRITE_REG_1(sc, HIFN_1_PLL, HIFN_PLL_7956); 914 } else { 915 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | 916 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES | 917 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 | 918 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM)); 919 } 920 921 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); 922 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 923 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST | 924 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) | 925 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL)); 926 } 927 928 /* 929 * The maximum number of sessions supported by the card 930 * is dependent on the amount of context ram, which 931 * encryption algorithms are enabled, and how compression 932 * is configured. This should be configured before this 933 * routine is called. 934 */ 935 static void 936 hifn_sessions(struct hifn_softc *sc) 937 { 938 u_int32_t pucnfg; 939 int ctxsize; 940 941 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG); 942 943 if (pucnfg & HIFN_PUCNFG_COMPSING) { 944 if (pucnfg & HIFN_PUCNFG_ENCCNFG) 945 ctxsize = 128; 946 else 947 ctxsize = 512; 948 /* 949 * 7955/7956 has internal context memory of 32K 950 */ 951 if (sc->sc_flags & HIFN_IS_7956) 952 sc->sc_maxses = 32768 / ctxsize; 953 else 954 sc->sc_maxses = 1 + 955 ((sc->sc_ramsize - 32768) / ctxsize); 956 } 957 else 958 sc->sc_maxses = sc->sc_ramsize / 16384; 959 960 if (sc->sc_maxses > 2048) 961 sc->sc_maxses = 2048; 962 } 963 964 /* 965 * Determine ram type (sram or dram). Board should be just out of a reset 966 * state when this is called. 967 */ 968 static int 969 hifn_ramtype(struct hifn_softc *sc) 970 { 971 u_int8_t data[8], dataexpect[8]; 972 int i; 973 974 for (i = 0; i < sizeof(data); i++) 975 data[i] = dataexpect[i] = 0x55; 976 if (hifn_writeramaddr(sc, 0, data)) 977 return (-1); 978 if (hifn_readramaddr(sc, 0, data)) 979 return (-1); 980 if (bcmp(data, dataexpect, sizeof(data)) != 0) { 981 sc->sc_drammodel = 1; 982 return (0); 983 } 984 985 for (i = 0; i < sizeof(data); i++) 986 data[i] = dataexpect[i] = 0xaa; 987 if (hifn_writeramaddr(sc, 0, data)) 988 return (-1); 989 if (hifn_readramaddr(sc, 0, data)) 990 return (-1); 991 if (bcmp(data, dataexpect, sizeof(data)) != 0) { 992 sc->sc_drammodel = 1; 993 return (0); 994 } 995 996 return (0); 997 } 998 999 #define HIFN_SRAM_MAX (32 << 20) 1000 #define HIFN_SRAM_STEP_SIZE 16384 1001 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE) 1002 1003 static int 1004 hifn_sramsize(struct hifn_softc *sc) 1005 { 1006 u_int32_t a; 1007 u_int8_t data[8]; 1008 u_int8_t dataexpect[sizeof(data)]; 1009 int32_t i; 1010 1011 for (i = 0; i < sizeof(data); i++) 1012 data[i] = dataexpect[i] = i ^ 0x5a; 1013 1014 for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) { 1015 a = i * HIFN_SRAM_STEP_SIZE; 1016 bcopy(&i, data, sizeof(i)); 1017 hifn_writeramaddr(sc, a, data); 1018 } 1019 1020 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) { 1021 a = i * HIFN_SRAM_STEP_SIZE; 1022 bcopy(&i, dataexpect, sizeof(i)); 1023 if (hifn_readramaddr(sc, a, data) < 0) 1024 return (0); 1025 if (bcmp(data, dataexpect, sizeof(data)) != 0) 1026 return (0); 1027 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE; 1028 } 1029 1030 return (0); 1031 } 1032 1033 /* 1034 * XXX For dram boards, one should really try all of the 1035 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG 1036 * is already set up correctly. 1037 */ 1038 static int 1039 hifn_dramsize(struct hifn_softc *sc) 1040 { 1041 u_int32_t cnfg; 1042 1043 if (sc->sc_flags & HIFN_IS_7956) { 1044 /* 1045 * 7955/7956 have a fixed internal ram of only 32K. 1046 */ 1047 sc->sc_ramsize = 32768; 1048 } else { 1049 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) & 1050 HIFN_PUCNFG_DRAMMASK; 1051 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18); 1052 } 1053 return (0); 1054 } 1055 1056 static void 1057 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, 1058 int *resp) 1059 { 1060 struct hifn_dma *dma = sc->sc_dma; 1061 1062 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 1063 dma->cmdi = 0; 1064 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 1065 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1066 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 1067 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1068 } 1069 *cmdp = dma->cmdi++; 1070 dma->cmdk = dma->cmdi; 1071 1072 if (dma->srci == HIFN_D_SRC_RSIZE) { 1073 dma->srci = 0; 1074 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID | 1075 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1076 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, 1077 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1078 } 1079 *srcp = dma->srci++; 1080 dma->srck = dma->srci; 1081 1082 if (dma->dsti == HIFN_D_DST_RSIZE) { 1083 dma->dsti = 0; 1084 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID | 1085 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1086 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE, 1087 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1088 } 1089 *dstp = dma->dsti++; 1090 dma->dstk = dma->dsti; 1091 1092 if (dma->resi == HIFN_D_RES_RSIZE) { 1093 dma->resi = 0; 1094 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 1095 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1096 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 1097 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1098 } 1099 *resp = dma->resi++; 1100 dma->resk = dma->resi; 1101 } 1102 1103 static int 1104 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) 1105 { 1106 struct hifn_dma *dma = sc->sc_dma; 1107 struct hifn_base_command wc; 1108 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; 1109 int r, cmdi, resi, srci, dsti; 1110 1111 wc.masks = htole16(3 << 13); 1112 wc.session_num = htole16(addr >> 14); 1113 wc.total_source_count = htole16(8); 1114 wc.total_dest_count = htole16(addr & 0x3fff); 1115 1116 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); 1117 1118 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1119 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | 1120 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); 1121 1122 /* build write command */ 1123 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); 1124 *(struct hifn_base_command *)dma->command_bufs[cmdi] = wc; 1125 bcopy(data, &dma->test_src, sizeof(dma->test_src)); 1126 1127 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr 1128 + offsetof(struct hifn_dma, test_src)); 1129 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr 1130 + offsetof(struct hifn_dma, test_dst)); 1131 1132 dma->cmdr[cmdi].l = htole32(16 | masks); 1133 dma->srcr[srci].l = htole32(8 | masks); 1134 dma->dstr[dsti].l = htole32(4 | masks); 1135 dma->resr[resi].l = htole32(4 | masks); 1136 1137 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1138 0, sc->sc_dmamap->dm_mapsize, 1139 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1140 1141 for (r = 10000; r >= 0; r--) { 1142 DELAY(10); 1143 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1144 0, sc->sc_dmamap->dm_mapsize, 1145 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1146 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) 1147 break; 1148 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1149 0, sc->sc_dmamap->dm_mapsize, 1150 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1151 } 1152 if (r == 0) { 1153 printf("%s: writeramaddr -- " 1154 "result[%d](addr %d) still valid\n", 1155 sc->sc_dv.dv_xname, resi, addr); 1156 r = -1; 1157 return (-1); 1158 } else 1159 r = 0; 1160 1161 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1162 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | 1163 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); 1164 1165 return (r); 1166 } 1167 1168 static int 1169 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) 1170 { 1171 struct hifn_dma *dma = sc->sc_dma; 1172 struct hifn_base_command rc; 1173 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; 1174 int r, cmdi, srci, dsti, resi; 1175 1176 rc.masks = htole16(2 << 13); 1177 rc.session_num = htole16(addr >> 14); 1178 rc.total_source_count = htole16(addr & 0x3fff); 1179 rc.total_dest_count = htole16(8); 1180 1181 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); 1182 1183 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1184 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | 1185 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); 1186 1187 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); 1188 *(struct hifn_base_command *)dma->command_bufs[cmdi] = rc; 1189 1190 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1191 offsetof(struct hifn_dma, test_src)); 1192 dma->test_src = 0; 1193 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1194 offsetof(struct hifn_dma, test_dst)); 1195 dma->test_dst = 0; 1196 dma->cmdr[cmdi].l = htole32(8 | masks); 1197 dma->srcr[srci].l = htole32(8 | masks); 1198 dma->dstr[dsti].l = htole32(8 | masks); 1199 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks); 1200 1201 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1202 0, sc->sc_dmamap->dm_mapsize, 1203 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1204 1205 for (r = 10000; r >= 0; r--) { 1206 DELAY(10); 1207 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1208 0, sc->sc_dmamap->dm_mapsize, 1209 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1210 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) 1211 break; 1212 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1213 0, sc->sc_dmamap->dm_mapsize, 1214 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1215 } 1216 if (r == 0) { 1217 printf("%s: readramaddr -- " 1218 "result[%d](addr %d) still valid\n", 1219 sc->sc_dv.dv_xname, resi, addr); 1220 r = -1; 1221 } else { 1222 r = 0; 1223 bcopy(&dma->test_dst, data, sizeof(dma->test_dst)); 1224 } 1225 1226 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1227 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | 1228 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); 1229 1230 return (r); 1231 } 1232 1233 /* 1234 * Initialize the descriptor rings. 1235 */ 1236 static void 1237 hifn_init_dma(struct hifn_softc *sc) 1238 { 1239 struct hifn_dma *dma = sc->sc_dma; 1240 int i; 1241 1242 hifn_set_retry(sc); 1243 1244 /* initialize static pointer values */ 1245 for (i = 0; i < HIFN_D_CMD_RSIZE; i++) 1246 dma->cmdr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1247 offsetof(struct hifn_dma, command_bufs[i][0])); 1248 for (i = 0; i < HIFN_D_RES_RSIZE; i++) 1249 dma->resr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1250 offsetof(struct hifn_dma, result_bufs[i][0])); 1251 1252 dma->cmdr[HIFN_D_CMD_RSIZE].p = 1253 htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1254 offsetof(struct hifn_dma, cmdr[0])); 1255 dma->srcr[HIFN_D_SRC_RSIZE].p = 1256 htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1257 offsetof(struct hifn_dma, srcr[0])); 1258 dma->dstr[HIFN_D_DST_RSIZE].p = 1259 htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1260 offsetof(struct hifn_dma, dstr[0])); 1261 dma->resr[HIFN_D_RES_RSIZE].p = 1262 htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1263 offsetof(struct hifn_dma, resr[0])); 1264 1265 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0; 1266 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0; 1267 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0; 1268 } 1269 1270 /* 1271 * Writes out the raw command buffer space. Returns the 1272 * command buffer size. 1273 */ 1274 static u_int 1275 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf) 1276 { 1277 u_int8_t *buf_pos; 1278 struct hifn_base_command *base_cmd; 1279 struct hifn_mac_command *mac_cmd; 1280 struct hifn_crypt_command *cry_cmd; 1281 struct hifn_comp_command *comp_cmd; 1282 int using_mac, using_crypt, using_comp, len, ivlen; 1283 u_int32_t dlen, slen; 1284 1285 buf_pos = buf; 1286 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC; 1287 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT; 1288 using_comp = cmd->base_masks & HIFN_BASE_CMD_COMP; 1289 1290 base_cmd = (struct hifn_base_command *)buf_pos; 1291 base_cmd->masks = htole16(cmd->base_masks); 1292 slen = cmd->src_map->dm_mapsize; 1293 if (cmd->sloplen) 1294 dlen = cmd->dst_map->dm_mapsize - cmd->sloplen + 1295 sizeof(u_int32_t); 1296 else 1297 dlen = cmd->dst_map->dm_mapsize; 1298 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO); 1299 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO); 1300 dlen >>= 16; 1301 slen >>= 16; 1302 base_cmd->session_num = htole16(cmd->session_num | 1303 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) | 1304 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M)); 1305 buf_pos += sizeof(struct hifn_base_command); 1306 1307 if (using_comp) { 1308 comp_cmd = (struct hifn_comp_command *)buf_pos; 1309 dlen = cmd->compcrd->crd_len; 1310 comp_cmd->source_count = htole16(dlen & 0xffff); 1311 dlen >>= 16; 1312 comp_cmd->masks = htole16(cmd->comp_masks | 1313 ((dlen << HIFN_COMP_CMD_SRCLEN_S) & HIFN_COMP_CMD_SRCLEN_M)); 1314 comp_cmd->header_skip = htole16(cmd->compcrd->crd_skip); 1315 comp_cmd->reserved = 0; 1316 buf_pos += sizeof(struct hifn_comp_command); 1317 } 1318 1319 if (using_mac) { 1320 mac_cmd = (struct hifn_mac_command *)buf_pos; 1321 dlen = cmd->maccrd->crd_len; 1322 mac_cmd->source_count = htole16(dlen & 0xffff); 1323 dlen >>= 16; 1324 mac_cmd->masks = htole16(cmd->mac_masks | 1325 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M)); 1326 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip); 1327 mac_cmd->reserved = 0; 1328 buf_pos += sizeof(struct hifn_mac_command); 1329 } 1330 1331 if (using_crypt) { 1332 cry_cmd = (struct hifn_crypt_command *)buf_pos; 1333 dlen = cmd->enccrd->crd_len; 1334 cry_cmd->source_count = htole16(dlen & 0xffff); 1335 dlen >>= 16; 1336 cry_cmd->masks = htole16(cmd->cry_masks | 1337 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M)); 1338 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip); 1339 cry_cmd->reserved = 0; 1340 buf_pos += sizeof(struct hifn_crypt_command); 1341 } 1342 1343 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) { 1344 bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH); 1345 buf_pos += HIFN_MAC_KEY_LENGTH; 1346 } 1347 1348 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) { 1349 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { 1350 case HIFN_CRYPT_CMD_ALG_3DES: 1351 bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH); 1352 buf_pos += HIFN_3DES_KEY_LENGTH; 1353 break; 1354 case HIFN_CRYPT_CMD_ALG_DES: 1355 bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH); 1356 buf_pos += HIFN_DES_KEY_LENGTH; 1357 break; 1358 case HIFN_CRYPT_CMD_ALG_RC4: 1359 len = 256; 1360 do { 1361 int clen; 1362 1363 clen = MIN(cmd->cklen, len); 1364 bcopy(cmd->ck, buf_pos, clen); 1365 len -= clen; 1366 buf_pos += clen; 1367 } while (len > 0); 1368 bzero(buf_pos, 4); 1369 buf_pos += 4; 1370 break; 1371 case HIFN_CRYPT_CMD_ALG_AES: 1372 /* 1373 * AES keys are variable 128, 192 and 1374 * 256 bits (16, 24 and 32 bytes). 1375 */ 1376 bcopy(cmd->ck, buf_pos, cmd->cklen); 1377 buf_pos += cmd->cklen; 1378 break; 1379 } 1380 } 1381 1382 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) { 1383 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { 1384 case HIFN_CRYPT_CMD_ALG_AES: 1385 ivlen = HIFN_AES_IV_LENGTH; 1386 break; 1387 default: 1388 ivlen = HIFN_IV_LENGTH; 1389 break; 1390 } 1391 bcopy(cmd->iv, buf_pos, ivlen); 1392 buf_pos += ivlen; 1393 } 1394 1395 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC | HIFN_BASE_CMD_CRYPT | 1396 HIFN_BASE_CMD_COMP)) == 0) { 1397 bzero(buf_pos, 8); 1398 buf_pos += 8; 1399 } 1400 1401 return (buf_pos - buf); 1402 } 1403 1404 static int 1405 hifn_dmamap_aligned(bus_dmamap_t map) 1406 { 1407 int i; 1408 1409 for (i = 0; i < map->dm_nsegs; i++) { 1410 if (map->dm_segs[i].ds_addr & 3) 1411 return (0); 1412 if ((i != (map->dm_nsegs - 1)) && 1413 (map->dm_segs[i].ds_len & 3)) 1414 return (0); 1415 } 1416 return (1); 1417 } 1418 1419 static int 1420 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd) 1421 { 1422 struct hifn_dma *dma = sc->sc_dma; 1423 bus_dmamap_t map = cmd->dst_map; 1424 u_int32_t p, l; 1425 int idx, used = 0, i; 1426 1427 idx = dma->dsti; 1428 for (i = 0; i < map->dm_nsegs - 1; i++) { 1429 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr); 1430 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1431 HIFN_D_MASKDONEIRQ | map->dm_segs[i].ds_len); 1432 HIFN_DSTR_SYNC(sc, idx, 1433 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1434 used++; 1435 1436 if (++idx == HIFN_D_DST_RSIZE) { 1437 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1438 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1439 HIFN_DSTR_SYNC(sc, idx, 1440 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1441 idx = 0; 1442 } 1443 } 1444 1445 if (cmd->sloplen == 0) { 1446 p = map->dm_segs[i].ds_addr; 1447 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | 1448 map->dm_segs[i].ds_len; 1449 } else { 1450 p = sc->sc_dmamap->dm_segs[0].ds_addr + 1451 offsetof(struct hifn_dma, slop[cmd->slopidx]); 1452 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | 1453 sizeof(u_int32_t); 1454 1455 if ((map->dm_segs[i].ds_len - cmd->sloplen) != 0) { 1456 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr); 1457 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1458 HIFN_D_MASKDONEIRQ | 1459 (map->dm_segs[i].ds_len - cmd->sloplen)); 1460 HIFN_DSTR_SYNC(sc, idx, 1461 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1462 used++; 1463 1464 if (++idx == HIFN_D_DST_RSIZE) { 1465 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1466 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1467 HIFN_DSTR_SYNC(sc, idx, 1468 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1469 idx = 0; 1470 } 1471 } 1472 } 1473 dma->dstr[idx].p = htole32(p); 1474 dma->dstr[idx].l = htole32(l); 1475 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1476 used++; 1477 1478 if (++idx == HIFN_D_DST_RSIZE) { 1479 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | 1480 HIFN_D_MASKDONEIRQ); 1481 HIFN_DSTR_SYNC(sc, idx, 1482 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1483 idx = 0; 1484 } 1485 1486 dma->dsti = idx; 1487 dma->dstu += used; 1488 return (idx); 1489 } 1490 1491 static int 1492 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd) 1493 { 1494 struct hifn_dma *dma = sc->sc_dma; 1495 bus_dmamap_t map = cmd->src_map; 1496 int idx, i; 1497 u_int32_t last = 0; 1498 1499 idx = dma->srci; 1500 for (i = 0; i < map->dm_nsegs; i++) { 1501 if (i == map->dm_nsegs - 1) 1502 last = HIFN_D_LAST; 1503 1504 dma->srcr[idx].p = htole32(map->dm_segs[i].ds_addr); 1505 dma->srcr[idx].l = htole32(map->dm_segs[i].ds_len | 1506 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last); 1507 HIFN_SRCR_SYNC(sc, idx, 1508 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1509 1510 if (++idx == HIFN_D_SRC_RSIZE) { 1511 dma->srcr[idx].l = htole32(HIFN_D_VALID | 1512 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1513 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, 1514 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1515 idx = 0; 1516 } 1517 } 1518 dma->srci = idx; 1519 dma->srcu += map->dm_nsegs; 1520 return (idx); 1521 } 1522 1523 static int 1524 hifn_crypto(struct hifn_softc *sc, struct hifn_command *cmd, 1525 struct cryptop *crp, int hint) 1526 { 1527 struct hifn_dma *dma = sc->sc_dma; 1528 u_int32_t cmdlen; 1529 int cmdi, resi, s, err = 0; 1530 1531 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER, 1532 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map)) 1533 return (ENOMEM); 1534 1535 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1536 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map, 1537 cmd->srcu.src_m, BUS_DMA_NOWAIT)) { 1538 err = ENOMEM; 1539 goto err_srcmap1; 1540 } 1541 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1542 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map, 1543 cmd->srcu.src_io, BUS_DMA_NOWAIT)) { 1544 err = ENOMEM; 1545 goto err_srcmap1; 1546 } 1547 } else { 1548 err = EINVAL; 1549 goto err_srcmap1; 1550 } 1551 1552 if (hifn_dmamap_aligned(cmd->src_map)) { 1553 cmd->sloplen = cmd->src_map->dm_mapsize & 3; 1554 if (crp->crp_flags & CRYPTO_F_IOV) 1555 cmd->dstu.dst_io = cmd->srcu.src_io; 1556 else if (crp->crp_flags & CRYPTO_F_IMBUF) 1557 cmd->dstu.dst_m = cmd->srcu.src_m; 1558 cmd->dst_map = cmd->src_map; 1559 } else { 1560 if (crp->crp_flags & CRYPTO_F_IOV) { 1561 err = EINVAL; 1562 goto err_srcmap; 1563 } else if (crp->crp_flags & CRYPTO_F_IMBUF) { 1564 int totlen, len; 1565 struct mbuf *m, *m0, *mlast; 1566 1567 totlen = cmd->src_map->dm_mapsize; 1568 if (cmd->srcu.src_m->m_flags & M_PKTHDR) { 1569 len = MHLEN; 1570 MGETHDR(m0, M_DONTWAIT, MT_DATA); 1571 } else { 1572 len = MLEN; 1573 MGET(m0, M_DONTWAIT, MT_DATA); 1574 } 1575 if (m0 == NULL) { 1576 err = ENOMEM; 1577 goto err_srcmap; 1578 } 1579 if (len == MHLEN) 1580 M_DUP_PKTHDR(m0, cmd->srcu.src_m); 1581 if (totlen >= MINCLSIZE) { 1582 MCLGET(m0, M_DONTWAIT); 1583 if (m0->m_flags & M_EXT) 1584 len = MCLBYTES; 1585 } 1586 totlen -= len; 1587 m0->m_pkthdr.len = m0->m_len = len; 1588 mlast = m0; 1589 1590 while (totlen > 0) { 1591 MGET(m, M_DONTWAIT, MT_DATA); 1592 if (m == NULL) { 1593 err = ENOMEM; 1594 m_freem(m0); 1595 goto err_srcmap; 1596 } 1597 len = MLEN; 1598 if (totlen >= MINCLSIZE) { 1599 MCLGET(m, M_DONTWAIT); 1600 if (m->m_flags & M_EXT) 1601 len = MCLBYTES; 1602 } 1603 1604 m->m_len = len; 1605 if (m0->m_flags & M_PKTHDR) 1606 m0->m_pkthdr.len += len; 1607 totlen -= len; 1608 1609 mlast->m_next = m; 1610 mlast = m; 1611 } 1612 cmd->dstu.dst_m = m0; 1613 } 1614 } 1615 1616 if (cmd->dst_map == NULL) { 1617 if (bus_dmamap_create(sc->sc_dmat, 1618 HIFN_MAX_SEGLEN * MAX_SCATTER, MAX_SCATTER, 1619 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) { 1620 err = ENOMEM; 1621 goto err_srcmap; 1622 } 1623 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1624 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, 1625 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) { 1626 err = ENOMEM; 1627 goto err_dstmap1; 1628 } 1629 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1630 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map, 1631 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) { 1632 err = ENOMEM; 1633 goto err_dstmap1; 1634 } 1635 } 1636 } 1637 1638 #ifdef HIFN_DEBUG 1639 if (hifn_debug) 1640 printf("%s: Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n", 1641 sc->sc_dv.dv_xname, 1642 READ_REG_1(sc, HIFN_1_DMA_CSR), 1643 READ_REG_1(sc, HIFN_1_DMA_IER), 1644 dma->cmdu, dma->srcu, dma->dstu, dma->resu, 1645 cmd->src_map->dm_nsegs, cmd->dst_map->dm_nsegs); 1646 #endif 1647 1648 if (cmd->src_map == cmd->dst_map) 1649 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 1650 0, cmd->src_map->dm_mapsize, 1651 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1652 else { 1653 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 1654 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1655 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 1656 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1657 } 1658 1659 s = splnet(); 1660 1661 /* 1662 * need 1 cmd, and 1 res 1663 * need N src, and N dst 1664 */ 1665 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE || 1666 (dma->resu + 1) > HIFN_D_RES_RSIZE) { 1667 splx(s); 1668 err = ENOMEM; 1669 goto err_dstmap; 1670 } 1671 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE || 1672 (dma->dstu + cmd->dst_map->dm_nsegs + 1) > HIFN_D_DST_RSIZE) { 1673 splx(s); 1674 err = ENOMEM; 1675 goto err_dstmap; 1676 } 1677 1678 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 1679 dma->cmdi = 0; 1680 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 1681 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1682 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 1683 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1684 } 1685 cmdi = dma->cmdi++; 1686 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]); 1687 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE); 1688 1689 /* .p for command/result already set */ 1690 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST | 1691 HIFN_D_MASKDONEIRQ); 1692 HIFN_CMDR_SYNC(sc, cmdi, 1693 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1694 dma->cmdu++; 1695 if (sc->sc_c_busy == 0) { 1696 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); 1697 sc->sc_c_busy = 1; 1698 SET_LED(sc, HIFN_MIPSRST_LED0); 1699 } 1700 1701 /* 1702 * We don't worry about missing an interrupt (which a "command wait" 1703 * interrupt salvages us from), unless there is more than one command 1704 * in the queue. 1705 */ 1706 if (dma->cmdu > 1) { 1707 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT; 1708 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 1709 } 1710 1711 hifnstats.hst_ipackets++; 1712 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize; 1713 1714 hifn_dmamap_load_src(sc, cmd); 1715 if (sc->sc_s_busy == 0) { 1716 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA); 1717 sc->sc_s_busy = 1; 1718 SET_LED(sc, HIFN_MIPSRST_LED1); 1719 } 1720 1721 /* 1722 * Unlike other descriptors, we don't mask done interrupt from 1723 * result descriptor. 1724 */ 1725 #ifdef HIFN_DEBUG 1726 if (hifn_debug) 1727 printf("load res\n"); 1728 #endif 1729 if (dma->resi == HIFN_D_RES_RSIZE) { 1730 dma->resi = 0; 1731 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 1732 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1733 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 1734 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1735 } 1736 resi = dma->resi++; 1737 dma->hifn_commands[resi] = cmd; 1738 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD); 1739 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | 1740 HIFN_D_VALID | HIFN_D_LAST); 1741 HIFN_RESR_SYNC(sc, resi, 1742 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1743 dma->resu++; 1744 if (sc->sc_r_busy == 0) { 1745 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA); 1746 sc->sc_r_busy = 1; 1747 SET_LED(sc, HIFN_MIPSRST_LED2); 1748 } 1749 1750 if (cmd->sloplen) 1751 cmd->slopidx = resi; 1752 1753 hifn_dmamap_load_dst(sc, cmd); 1754 1755 if (sc->sc_d_busy == 0) { 1756 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA); 1757 sc->sc_d_busy = 1; 1758 } 1759 1760 #ifdef HIFN_DEBUG 1761 if (hifn_debug) 1762 printf("%s: command: stat %8x ier %8x\n", 1763 sc->sc_dv.dv_xname, 1764 READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER)); 1765 #endif 1766 1767 sc->sc_active = 5; 1768 splx(s); 1769 return (err); /* success */ 1770 1771 err_dstmap: 1772 if (cmd->src_map != cmd->dst_map) 1773 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 1774 err_dstmap1: 1775 if (cmd->src_map != cmd->dst_map) 1776 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 1777 err_srcmap: 1778 if (crp->crp_flags & CRYPTO_F_IMBUF && 1779 cmd->srcu.src_m != cmd->dstu.dst_m) 1780 m_freem(cmd->dstu.dst_m); 1781 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 1782 err_srcmap1: 1783 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 1784 return (err); 1785 } 1786 1787 static void 1788 hifn_tick(void *vsc) 1789 { 1790 struct hifn_softc *sc = vsc; 1791 int s; 1792 1793 s = splnet(); 1794 if (sc->sc_active == 0) { 1795 struct hifn_dma *dma = sc->sc_dma; 1796 u_int32_t r = 0; 1797 1798 if (dma->cmdu == 0 && sc->sc_c_busy) { 1799 sc->sc_c_busy = 0; 1800 r |= HIFN_DMACSR_C_CTRL_DIS; 1801 CLR_LED(sc, HIFN_MIPSRST_LED0); 1802 } 1803 if (dma->srcu == 0 && sc->sc_s_busy) { 1804 sc->sc_s_busy = 0; 1805 r |= HIFN_DMACSR_S_CTRL_DIS; 1806 CLR_LED(sc, HIFN_MIPSRST_LED1); 1807 } 1808 if (dma->dstu == 0 && sc->sc_d_busy) { 1809 sc->sc_d_busy = 0; 1810 r |= HIFN_DMACSR_D_CTRL_DIS; 1811 } 1812 if (dma->resu == 0 && sc->sc_r_busy) { 1813 sc->sc_r_busy = 0; 1814 r |= HIFN_DMACSR_R_CTRL_DIS; 1815 CLR_LED(sc, HIFN_MIPSRST_LED2); 1816 } 1817 if (r) 1818 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r); 1819 } 1820 else 1821 sc->sc_active--; 1822 splx(s); 1823 #ifdef __OpenBSD__ 1824 timeout_add(&sc->sc_tickto, hz); 1825 #else 1826 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); 1827 #endif 1828 } 1829 1830 static int 1831 hifn_intr(void *arg) 1832 { 1833 struct hifn_softc *sc = arg; 1834 struct hifn_dma *dma = sc->sc_dma; 1835 u_int32_t dmacsr, restart; 1836 int i, u; 1837 1838 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR); 1839 1840 #ifdef HIFN_DEBUG 1841 if (hifn_debug) 1842 printf("%s: irq: stat %08x ien %08x u %d/%d/%d/%d\n", 1843 sc->sc_dv.dv_xname, 1844 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), 1845 dma->cmdu, dma->srcu, dma->dstu, dma->resu); 1846 #endif 1847 1848 /* Nothing in the DMA unit interrupted */ 1849 if ((dmacsr & sc->sc_dmaier) == 0) 1850 return (0); 1851 1852 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier); 1853 1854 if (dmacsr & HIFN_DMACSR_ENGINE) 1855 WRITE_REG_0(sc, HIFN_0_PUISR, READ_REG_0(sc, HIFN_0_PUISR)); 1856 1857 if ((sc->sc_flags & HIFN_HAS_PUBLIC) && 1858 (dmacsr & HIFN_DMACSR_PUBDONE)) 1859 WRITE_REG_1(sc, HIFN_1_PUB_STATUS, 1860 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE); 1861 1862 restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER); 1863 if (restart) 1864 printf("%s: overrun %x\n", sc->sc_dv.dv_xname, dmacsr); 1865 1866 if (sc->sc_flags & HIFN_IS_7811) { 1867 if (dmacsr & HIFN_DMACSR_ILLR) 1868 printf("%s: illegal read\n", sc->sc_dv.dv_xname); 1869 if (dmacsr & HIFN_DMACSR_ILLW) 1870 printf("%s: illegal write\n", sc->sc_dv.dv_xname); 1871 } 1872 1873 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | 1874 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); 1875 if (restart) { 1876 printf("%s: abort, resetting.\n", sc->sc_dv.dv_xname); 1877 hifnstats.hst_abort++; 1878 hifn_abort(sc); 1879 return (1); 1880 } 1881 1882 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->resu == 0)) { 1883 /* 1884 * If no slots to process and we receive a "waiting on 1885 * command" interrupt, we disable the "waiting on command" 1886 * (by clearing it). 1887 */ 1888 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; 1889 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 1890 } 1891 1892 /* clear the rings */ 1893 i = dma->resk; 1894 while (dma->resu != 0) { 1895 HIFN_RESR_SYNC(sc, i, 1896 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1897 if (dma->resr[i].l & htole32(HIFN_D_VALID)) { 1898 HIFN_RESR_SYNC(sc, i, 1899 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1900 break; 1901 } 1902 1903 if (i != HIFN_D_RES_RSIZE) { 1904 struct hifn_command *cmd; 1905 u_int8_t *macbuf = NULL; 1906 1907 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD); 1908 cmd = dma->hifn_commands[i]; 1909 KASSERT(cmd != NULL 1910 /*("hifn_intr: null command slot %u", i)*/); 1911 dma->hifn_commands[i] = NULL; 1912 1913 if (cmd->base_masks & HIFN_BASE_CMD_MAC) { 1914 macbuf = dma->result_bufs[i]; 1915 macbuf += 12; 1916 } 1917 1918 hifn_callback(sc, cmd, macbuf); 1919 hifnstats.hst_opackets++; 1920 } 1921 1922 if (++i == (HIFN_D_RES_RSIZE + 1)) 1923 i = 0; 1924 else 1925 dma->resu--; 1926 } 1927 dma->resk = i; 1928 1929 i = dma->srck; u = dma->srcu; 1930 while (u != 0) { 1931 HIFN_SRCR_SYNC(sc, i, 1932 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1933 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) { 1934 HIFN_SRCR_SYNC(sc, i, 1935 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1936 break; 1937 } 1938 if (++i == (HIFN_D_SRC_RSIZE + 1)) 1939 i = 0; 1940 else 1941 u--; 1942 } 1943 dma->srck = i; dma->srcu = u; 1944 1945 i = dma->cmdk; u = dma->cmdu; 1946 while (u != 0) { 1947 HIFN_CMDR_SYNC(sc, i, 1948 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1949 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) { 1950 HIFN_CMDR_SYNC(sc, i, 1951 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1952 break; 1953 } 1954 if (i != HIFN_D_CMD_RSIZE) { 1955 u--; 1956 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE); 1957 } 1958 if (++i == (HIFN_D_CMD_RSIZE + 1)) 1959 i = 0; 1960 } 1961 dma->cmdk = i; dma->cmdu = u; 1962 1963 return (1); 1964 } 1965 1966 /* 1967 * Allocate a new 'session' and return an encoded session id. 'sidp' 1968 * contains our registration id, and should contain an encoded session 1969 * id on successful allocation. 1970 */ 1971 static int 1972 hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri) 1973 { 1974 struct cryptoini *c; 1975 struct hifn_softc *sc = arg; 1976 int i, mac = 0, cry = 0, comp = 0; 1977 1978 KASSERT(sc != NULL /*, ("hifn_newsession: null softc")*/); 1979 if (sidp == NULL || cri == NULL || sc == NULL) 1980 return (EINVAL); 1981 1982 for (i = 0; i < sc->sc_maxses; i++) 1983 if (sc->sc_sessions[i].hs_state == HS_STATE_FREE) 1984 break; 1985 if (i == sc->sc_maxses) 1986 return (ENOMEM); 1987 1988 for (c = cri; c != NULL; c = c->cri_next) { 1989 switch (c->cri_alg) { 1990 case CRYPTO_MD5: 1991 case CRYPTO_SHA1: 1992 case CRYPTO_MD5_HMAC: 1993 case CRYPTO_SHA1_HMAC: 1994 if (mac) 1995 return (EINVAL); 1996 mac = 1; 1997 break; 1998 case CRYPTO_DES_CBC: 1999 case CRYPTO_3DES_CBC: 2000 case CRYPTO_AES_CBC: 2001 #ifdef __NetBSD__ 2002 rnd_extract_data(sc->sc_sessions[i].hs_iv, 2003 c->cri_alg == CRYPTO_AES_CBC ? 2004 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH, 2005 RND_EXTRACT_ANY); 2006 #else /* FreeBSD and OpenBSD have get_random_bytes */ 2007 /* XXX this may read fewer, does it matter? */ 2008 get_random_bytes(sc->sc_sessions[i].hs_iv, 2009 c->cri_alg == CRYPTO_AES_CBC ? 2010 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2011 #endif 2012 /*FALLTHROUGH*/ 2013 case CRYPTO_ARC4: 2014 if (cry) 2015 return (EINVAL); 2016 cry = 1; 2017 break; 2018 #ifdef HAVE_CRYPTO_LSZ 2019 case CRYPTO_LZS_COMP: 2020 if (comp) 2021 return (EINVAL); 2022 comp = 1; 2023 break; 2024 #endif 2025 default: 2026 return (EINVAL); 2027 } 2028 } 2029 if (mac == 0 && cry == 0 && comp == 0) 2030 return (EINVAL); 2031 2032 /* 2033 * XXX only want to support compression without chaining to 2034 * MAC/crypt engine right now 2035 */ 2036 if ((comp && mac) || (comp && cry)) 2037 return (EINVAL); 2038 2039 *sidp = HIFN_SID(sc->sc_dv.dv_unit, i); 2040 sc->sc_sessions[i].hs_state = HS_STATE_USED; 2041 2042 return (0); 2043 } 2044 2045 /* 2046 * Deallocate a session. 2047 * XXX this routine should run a zero'd mac/encrypt key into context ram. 2048 * XXX to blow away any keys already stored there. 2049 */ 2050 static int 2051 hifn_freesession(void *arg, u_int64_t tid) 2052 { 2053 struct hifn_softc *sc = arg; 2054 int session; 2055 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; 2056 2057 KASSERT(sc != NULL /*, ("hifn_freesession: null softc")*/); 2058 if (sc == NULL) 2059 return (EINVAL); 2060 2061 session = HIFN_SESSION(sid); 2062 if (session >= sc->sc_maxses) 2063 return (EINVAL); 2064 2065 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session])); 2066 return (0); 2067 } 2068 2069 static int 2070 hifn_process(void *arg, struct cryptop *crp, int hint) 2071 { 2072 struct hifn_softc *sc = arg; 2073 struct hifn_command *cmd = NULL; 2074 int session, err, ivlen; 2075 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; 2076 2077 if (crp == NULL || crp->crp_callback == NULL) { 2078 hifnstats.hst_invalid++; 2079 return (EINVAL); 2080 } 2081 session = HIFN_SESSION(crp->crp_sid); 2082 2083 if (sc == NULL || session >= sc->sc_maxses) { 2084 err = EINVAL; 2085 goto errout; 2086 } 2087 2088 cmd = (struct hifn_command *)malloc(sizeof(struct hifn_command), 2089 M_DEVBUF, M_NOWAIT|M_ZERO); 2090 if (cmd == NULL) { 2091 hifnstats.hst_nomem++; 2092 err = ENOMEM; 2093 goto errout; 2094 } 2095 2096 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2097 cmd->srcu.src_m = (struct mbuf *)crp->crp_buf; 2098 cmd->dstu.dst_m = (struct mbuf *)crp->crp_buf; 2099 } else if (crp->crp_flags & CRYPTO_F_IOV) { 2100 cmd->srcu.src_io = (struct uio *)crp->crp_buf; 2101 cmd->dstu.dst_io = (struct uio *)crp->crp_buf; 2102 } else { 2103 err = EINVAL; 2104 goto errout; /* XXX we don't handle contiguous buffers! */ 2105 } 2106 2107 crd1 = crp->crp_desc; 2108 if (crd1 == NULL) { 2109 err = EINVAL; 2110 goto errout; 2111 } 2112 crd2 = crd1->crd_next; 2113 2114 if (crd2 == NULL) { 2115 if (crd1->crd_alg == CRYPTO_MD5_HMAC || 2116 crd1->crd_alg == CRYPTO_SHA1_HMAC || 2117 crd1->crd_alg == CRYPTO_SHA1 || 2118 crd1->crd_alg == CRYPTO_MD5) { 2119 maccrd = crd1; 2120 enccrd = NULL; 2121 } else if (crd1->crd_alg == CRYPTO_DES_CBC || 2122 crd1->crd_alg == CRYPTO_3DES_CBC || 2123 crd1->crd_alg == CRYPTO_AES_CBC || 2124 crd1->crd_alg == CRYPTO_ARC4) { 2125 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0) 2126 cmd->base_masks |= HIFN_BASE_CMD_DECODE; 2127 maccrd = NULL; 2128 enccrd = crd1; 2129 #ifdef HAVE_CRYPTO_LSZ 2130 } else if (crd1->crd_alg == CRYPTO_LZS_COMP) { 2131 return (hifn_compression(sc, crp, cmd)); 2132 #endif 2133 } else { 2134 err = EINVAL; 2135 goto errout; 2136 } 2137 } else { 2138 if ((crd1->crd_alg == CRYPTO_MD5_HMAC || 2139 crd1->crd_alg == CRYPTO_SHA1_HMAC || 2140 crd1->crd_alg == CRYPTO_MD5 || 2141 crd1->crd_alg == CRYPTO_SHA1) && 2142 (crd2->crd_alg == CRYPTO_DES_CBC || 2143 crd2->crd_alg == CRYPTO_3DES_CBC || 2144 crd2->crd_alg == CRYPTO_AES_CBC || 2145 crd2->crd_alg == CRYPTO_ARC4) && 2146 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { 2147 cmd->base_masks = HIFN_BASE_CMD_DECODE; 2148 maccrd = crd1; 2149 enccrd = crd2; 2150 } else if ((crd1->crd_alg == CRYPTO_DES_CBC || 2151 crd1->crd_alg == CRYPTO_ARC4 || 2152 crd1->crd_alg == CRYPTO_3DES_CBC || 2153 crd1->crd_alg == CRYPTO_AES_CBC) && 2154 (crd2->crd_alg == CRYPTO_MD5_HMAC || 2155 crd2->crd_alg == CRYPTO_SHA1_HMAC || 2156 crd2->crd_alg == CRYPTO_MD5 || 2157 crd2->crd_alg == CRYPTO_SHA1) && 2158 (crd1->crd_flags & CRD_F_ENCRYPT)) { 2159 enccrd = crd1; 2160 maccrd = crd2; 2161 } else { 2162 /* 2163 * We cannot order the 7751 as requested 2164 */ 2165 err = EINVAL; 2166 goto errout; 2167 } 2168 } 2169 2170 if (enccrd) { 2171 cmd->enccrd = enccrd; 2172 cmd->base_masks |= HIFN_BASE_CMD_CRYPT; 2173 switch (enccrd->crd_alg) { 2174 case CRYPTO_ARC4: 2175 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4; 2176 if ((enccrd->crd_flags & CRD_F_ENCRYPT) 2177 != sc->sc_sessions[session].hs_prev_op) 2178 sc->sc_sessions[session].hs_state = 2179 HS_STATE_USED; 2180 break; 2181 case CRYPTO_DES_CBC: 2182 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES | 2183 HIFN_CRYPT_CMD_MODE_CBC | 2184 HIFN_CRYPT_CMD_NEW_IV; 2185 break; 2186 case CRYPTO_3DES_CBC: 2187 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES | 2188 HIFN_CRYPT_CMD_MODE_CBC | 2189 HIFN_CRYPT_CMD_NEW_IV; 2190 break; 2191 case CRYPTO_AES_CBC: 2192 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES | 2193 HIFN_CRYPT_CMD_MODE_CBC | 2194 HIFN_CRYPT_CMD_NEW_IV; 2195 break; 2196 default: 2197 err = EINVAL; 2198 goto errout; 2199 } 2200 if (enccrd->crd_alg != CRYPTO_ARC4) { 2201 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ? 2202 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2203 if (enccrd->crd_flags & CRD_F_ENCRYPT) { 2204 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 2205 bcopy(enccrd->crd_iv, cmd->iv, ivlen); 2206 else 2207 bcopy(sc->sc_sessions[session].hs_iv, 2208 cmd->iv, ivlen); 2209 2210 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) 2211 == 0) { 2212 if (crp->crp_flags & CRYPTO_F_IMBUF) 2213 m_copyback(cmd->srcu.src_m, 2214 enccrd->crd_inject, 2215 ivlen, cmd->iv); 2216 else if (crp->crp_flags & CRYPTO_F_IOV) 2217 cuio_copyback(cmd->srcu.src_io, 2218 enccrd->crd_inject, 2219 ivlen, cmd->iv); 2220 } 2221 } else { 2222 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 2223 bcopy(enccrd->crd_iv, cmd->iv, ivlen); 2224 else if (crp->crp_flags & CRYPTO_F_IMBUF) 2225 m_copydata(cmd->srcu.src_m, 2226 enccrd->crd_inject, ivlen, cmd->iv); 2227 else if (crp->crp_flags & CRYPTO_F_IOV) 2228 cuio_copydata(cmd->srcu.src_io, 2229 enccrd->crd_inject, ivlen, cmd->iv); 2230 } 2231 } 2232 2233 cmd->ck = enccrd->crd_key; 2234 cmd->cklen = enccrd->crd_klen >> 3; 2235 2236 /* 2237 * Need to specify the size for the AES key in the masks. 2238 */ 2239 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) == 2240 HIFN_CRYPT_CMD_ALG_AES) { 2241 switch (cmd->cklen) { 2242 case 16: 2243 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128; 2244 break; 2245 case 24: 2246 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192; 2247 break; 2248 case 32: 2249 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256; 2250 break; 2251 default: 2252 err = EINVAL; 2253 goto errout; 2254 } 2255 } 2256 2257 if (sc->sc_sessions[session].hs_state == HS_STATE_USED) 2258 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; 2259 } 2260 2261 if (maccrd) { 2262 cmd->maccrd = maccrd; 2263 cmd->base_masks |= HIFN_BASE_CMD_MAC; 2264 2265 switch (maccrd->crd_alg) { 2266 case CRYPTO_MD5: 2267 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | 2268 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | 2269 HIFN_MAC_CMD_POS_IPSEC; 2270 break; 2271 case CRYPTO_MD5_HMAC: 2272 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | 2273 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | 2274 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; 2275 break; 2276 case CRYPTO_SHA1: 2277 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | 2278 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | 2279 HIFN_MAC_CMD_POS_IPSEC; 2280 break; 2281 case CRYPTO_SHA1_HMAC: 2282 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | 2283 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | 2284 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; 2285 break; 2286 } 2287 2288 if ((maccrd->crd_alg == CRYPTO_SHA1_HMAC || 2289 maccrd->crd_alg == CRYPTO_MD5_HMAC) && 2290 sc->sc_sessions[session].hs_state == HS_STATE_USED) { 2291 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY; 2292 bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3); 2293 bzero(cmd->mac + (maccrd->crd_klen >> 3), 2294 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3)); 2295 } 2296 } 2297 2298 cmd->crp = crp; 2299 cmd->session_num = session; 2300 cmd->softc = sc; 2301 2302 err = hifn_crypto(sc, cmd, crp, hint); 2303 if (err == 0) { 2304 if (enccrd) 2305 sc->sc_sessions[session].hs_prev_op = 2306 enccrd->crd_flags & CRD_F_ENCRYPT; 2307 if (sc->sc_sessions[session].hs_state == HS_STATE_USED) 2308 sc->sc_sessions[session].hs_state = HS_STATE_KEY; 2309 return 0; 2310 } else if (err == ERESTART) { 2311 /* 2312 * There weren't enough resources to dispatch the request 2313 * to the part. Notify the caller so they'll requeue this 2314 * request and resubmit it again soon. 2315 */ 2316 #ifdef HIFN_DEBUG 2317 if (hifn_debug) 2318 printf(sc->sc_dv.dv_xname, "requeue request\n"); 2319 #endif 2320 free(cmd, M_DEVBUF); 2321 sc->sc_needwakeup |= CRYPTO_SYMQ; 2322 return (err); 2323 } 2324 2325 errout: 2326 if (cmd != NULL) 2327 free(cmd, M_DEVBUF); 2328 if (err == EINVAL) 2329 hifnstats.hst_invalid++; 2330 else 2331 hifnstats.hst_nomem++; 2332 crp->crp_etype = err; 2333 crypto_done(crp); 2334 return (0); 2335 } 2336 2337 static void 2338 hifn_abort(struct hifn_softc *sc) 2339 { 2340 struct hifn_dma *dma = sc->sc_dma; 2341 struct hifn_command *cmd; 2342 struct cryptop *crp; 2343 int i, u; 2344 2345 i = dma->resk; u = dma->resu; 2346 while (u != 0) { 2347 cmd = dma->hifn_commands[i]; 2348 KASSERT(cmd != NULL /*, ("hifn_abort: null cmd slot %u", i)*/); 2349 dma->hifn_commands[i] = NULL; 2350 crp = cmd->crp; 2351 2352 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) { 2353 /* Salvage what we can. */ 2354 u_int8_t *macbuf; 2355 2356 if (cmd->base_masks & HIFN_BASE_CMD_MAC) { 2357 macbuf = dma->result_bufs[i]; 2358 macbuf += 12; 2359 } else 2360 macbuf = NULL; 2361 hifnstats.hst_opackets++; 2362 hifn_callback(sc, cmd, macbuf); 2363 } else { 2364 if (cmd->src_map == cmd->dst_map) { 2365 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2366 0, cmd->src_map->dm_mapsize, 2367 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2368 } else { 2369 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2370 0, cmd->src_map->dm_mapsize, 2371 BUS_DMASYNC_POSTWRITE); 2372 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2373 0, cmd->dst_map->dm_mapsize, 2374 BUS_DMASYNC_POSTREAD); 2375 } 2376 2377 if (cmd->srcu.src_m != cmd->dstu.dst_m) { 2378 m_freem(cmd->srcu.src_m); 2379 crp->crp_buf = (caddr_t)cmd->dstu.dst_m; 2380 } 2381 2382 /* non-shared buffers cannot be restarted */ 2383 if (cmd->src_map != cmd->dst_map) { 2384 /* 2385 * XXX should be EAGAIN, delayed until 2386 * after the reset. 2387 */ 2388 crp->crp_etype = ENOMEM; 2389 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2390 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2391 } else 2392 crp->crp_etype = ENOMEM; 2393 2394 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2395 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2396 2397 free(cmd, M_DEVBUF); 2398 if (crp->crp_etype != EAGAIN) 2399 crypto_done(crp); 2400 } 2401 2402 if (++i == HIFN_D_RES_RSIZE) 2403 i = 0; 2404 u--; 2405 } 2406 dma->resk = i; dma->resu = u; 2407 2408 /* Force upload of key next time */ 2409 for (i = 0; i < sc->sc_maxses; i++) 2410 if (sc->sc_sessions[i].hs_state == HS_STATE_KEY) 2411 sc->sc_sessions[i].hs_state = HS_STATE_USED; 2412 2413 hifn_reset_board(sc, 1); 2414 hifn_init_dma(sc); 2415 hifn_init_pci_registers(sc); 2416 } 2417 2418 static void 2419 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *resbuf) 2420 { 2421 struct hifn_dma *dma = sc->sc_dma; 2422 struct cryptop *crp = cmd->crp; 2423 struct cryptodesc *crd; 2424 struct mbuf *m; 2425 int totlen, i, u, ivlen; 2426 2427 if (cmd->src_map == cmd->dst_map) 2428 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2429 0, cmd->src_map->dm_mapsize, 2430 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 2431 else { 2432 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2433 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2434 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2435 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 2436 } 2437 2438 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2439 if (cmd->srcu.src_m != cmd->dstu.dst_m) { 2440 crp->crp_buf = (caddr_t)cmd->dstu.dst_m; 2441 totlen = cmd->src_map->dm_mapsize; 2442 for (m = cmd->dstu.dst_m; m != NULL; m = m->m_next) { 2443 if (totlen < m->m_len) { 2444 m->m_len = totlen; 2445 totlen = 0; 2446 } else 2447 totlen -= m->m_len; 2448 } 2449 cmd->dstu.dst_m->m_pkthdr.len = 2450 cmd->srcu.src_m->m_pkthdr.len; 2451 m_freem(cmd->srcu.src_m); 2452 } 2453 } 2454 2455 if (cmd->sloplen != 0) { 2456 if (crp->crp_flags & CRYPTO_F_IMBUF) 2457 m_copyback((struct mbuf *)crp->crp_buf, 2458 cmd->src_map->dm_mapsize - cmd->sloplen, 2459 cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]); 2460 else if (crp->crp_flags & CRYPTO_F_IOV) 2461 cuio_copyback((struct uio *)crp->crp_buf, 2462 cmd->src_map->dm_mapsize - cmd->sloplen, 2463 cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]); 2464 } 2465 2466 i = dma->dstk; u = dma->dstu; 2467 while (u != 0) { 2468 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2469 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc), 2470 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2471 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) { 2472 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2473 offsetof(struct hifn_dma, dstr[i]), 2474 sizeof(struct hifn_desc), 2475 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2476 break; 2477 } 2478 if (++i == (HIFN_D_DST_RSIZE + 1)) 2479 i = 0; 2480 else 2481 u--; 2482 } 2483 dma->dstk = i; dma->dstu = u; 2484 2485 hifnstats.hst_obytes += cmd->dst_map->dm_mapsize; 2486 2487 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) == 2488 HIFN_BASE_CMD_CRYPT) { 2489 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 2490 if (crd->crd_alg != CRYPTO_DES_CBC && 2491 crd->crd_alg != CRYPTO_3DES_CBC && 2492 crd->crd_alg != CRYPTO_AES_CBC) 2493 continue; 2494 ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ? 2495 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2496 if (crp->crp_flags & CRYPTO_F_IMBUF) 2497 m_copydata((struct mbuf *)crp->crp_buf, 2498 crd->crd_skip + crd->crd_len - ivlen, 2499 ivlen, 2500 cmd->softc->sc_sessions[cmd->session_num].hs_iv); 2501 else if (crp->crp_flags & CRYPTO_F_IOV) { 2502 cuio_copydata((struct uio *)crp->crp_buf, 2503 crd->crd_skip + crd->crd_len - ivlen, 2504 ivlen, 2505 cmd->softc->sc_sessions[cmd->session_num].hs_iv); 2506 } 2507 /* XXX We do not handle contig data */ 2508 break; 2509 } 2510 } 2511 2512 if (cmd->base_masks & HIFN_BASE_CMD_MAC) { 2513 u_int8_t *macbuf; 2514 2515 macbuf = resbuf + sizeof(struct hifn_base_result); 2516 if (cmd->base_masks & HIFN_BASE_CMD_COMP) 2517 macbuf += sizeof(struct hifn_comp_result); 2518 macbuf += sizeof(struct hifn_mac_result); 2519 2520 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 2521 int len; 2522 2523 if (crd->crd_alg == CRYPTO_MD5) 2524 len = 16; 2525 else if (crd->crd_alg == CRYPTO_SHA1) 2526 len = 20; 2527 else if (crd->crd_alg == CRYPTO_MD5_HMAC || 2528 crd->crd_alg == CRYPTO_SHA1_HMAC) 2529 len = 12; 2530 else 2531 continue; 2532 2533 if (crp->crp_flags & CRYPTO_F_IMBUF) 2534 m_copyback((struct mbuf *)crp->crp_buf, 2535 crd->crd_inject, len, macbuf); 2536 else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac) 2537 bcopy((caddr_t)macbuf, crp->crp_mac, len); 2538 break; 2539 } 2540 } 2541 2542 if (cmd->src_map != cmd->dst_map) { 2543 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2544 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2545 } 2546 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2547 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2548 free(cmd, M_DEVBUF); 2549 crypto_done(crp); 2550 } 2551 2552 #ifdef HAVE_CRYPTO_LSZ 2553 2554 static int 2555 hifn_compression(struct hifn_softc *sc, struct cryptop *crp, 2556 struct hifn_command *cmd) 2557 { 2558 struct cryptodesc *crd = crp->crp_desc; 2559 int s, err = 0; 2560 2561 cmd->compcrd = crd; 2562 cmd->base_masks |= HIFN_BASE_CMD_COMP; 2563 2564 if ((crp->crp_flags & CRYPTO_F_IMBUF) == 0) { 2565 /* 2566 * XXX can only handle mbufs right now since we can 2567 * XXX dynamically resize them. 2568 */ 2569 err = EINVAL; 2570 return (ENOMEM); 2571 } 2572 2573 if ((crd->crd_flags & CRD_F_COMP) == 0) 2574 cmd->base_masks |= HIFN_BASE_CMD_DECODE; 2575 if (crd->crd_alg == CRYPTO_LZS_COMP) 2576 cmd->comp_masks |= HIFN_COMP_CMD_ALG_LZS | 2577 HIFN_COMP_CMD_CLEARHIST; 2578 2579 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER, 2580 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map)) { 2581 err = ENOMEM; 2582 goto fail; 2583 } 2584 2585 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER, 2586 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) { 2587 err = ENOMEM; 2588 goto fail; 2589 } 2590 2591 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2592 int len; 2593 2594 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map, 2595 cmd->srcu.src_m, BUS_DMA_NOWAIT)) { 2596 err = ENOMEM; 2597 goto fail; 2598 } 2599 2600 len = cmd->src_map->dm_mapsize / MCLBYTES; 2601 if ((cmd->src_map->dm_mapsize % MCLBYTES) != 0) 2602 len++; 2603 len *= MCLBYTES; 2604 2605 if ((crd->crd_flags & CRD_F_COMP) == 0) 2606 len *= 4; 2607 2608 if (len > HIFN_MAX_DMALEN) 2609 len = HIFN_MAX_DMALEN; 2610 2611 cmd->dstu.dst_m = hifn_mkmbuf_chain(len, cmd->srcu.src_m); 2612 if (cmd->dstu.dst_m == NULL) { 2613 err = ENOMEM; 2614 goto fail; 2615 } 2616 2617 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, 2618 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) { 2619 err = ENOMEM; 2620 goto fail; 2621 } 2622 } else if (crp->crp_flags & CRYPTO_F_IOV) { 2623 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map, 2624 cmd->srcu.src_io, BUS_DMA_NOWAIT)) { 2625 err = ENOMEM; 2626 goto fail; 2627 } 2628 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map, 2629 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) { 2630 err = ENOMEM; 2631 goto fail; 2632 } 2633 } 2634 2635 if (cmd->src_map == cmd->dst_map) 2636 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2637 0, cmd->src_map->dm_mapsize, 2638 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2639 else { 2640 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2641 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2642 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2643 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2644 } 2645 2646 cmd->crp = crp; 2647 /* 2648 * Always use session 0. The modes of compression we use are 2649 * stateless and there is always at least one compression 2650 * context, zero. 2651 */ 2652 cmd->session_num = 0; 2653 cmd->softc = sc; 2654 2655 s = splnet(); 2656 err = hifn_compress_enter(sc, cmd); 2657 splx(s); 2658 2659 if (err != 0) 2660 goto fail; 2661 return (0); 2662 2663 fail: 2664 if (cmd->dst_map != NULL) { 2665 if (cmd->dst_map->dm_nsegs > 0) 2666 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2667 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2668 } 2669 if (cmd->src_map != NULL) { 2670 if (cmd->src_map->dm_nsegs > 0) 2671 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2672 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2673 } 2674 free(cmd, M_DEVBUF); 2675 if (err == EINVAL) 2676 hifnstats.hst_invalid++; 2677 else 2678 hifnstats.hst_nomem++; 2679 crp->crp_etype = err; 2680 crypto_done(crp); 2681 return (0); 2682 } 2683 2684 /* 2685 * must be called at splnet() 2686 */ 2687 static int 2688 hifn_compress_enter(struct hifn_softc *sc, struct hifn_command *cmd) 2689 { 2690 struct hifn_dma *dma = sc->sc_dma; 2691 int cmdi, resi; 2692 u_int32_t cmdlen; 2693 2694 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE || 2695 (dma->resu + 1) > HIFN_D_CMD_RSIZE) 2696 return (ENOMEM); 2697 2698 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE || 2699 (dma->dstu + cmd->dst_map->dm_nsegs) > HIFN_D_DST_RSIZE) 2700 return (ENOMEM); 2701 2702 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 2703 dma->cmdi = 0; 2704 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 2705 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 2706 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 2707 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2708 } 2709 cmdi = dma->cmdi++; 2710 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]); 2711 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE); 2712 2713 /* .p for command/result already set */ 2714 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST | 2715 HIFN_D_MASKDONEIRQ); 2716 HIFN_CMDR_SYNC(sc, cmdi, 2717 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2718 dma->cmdu++; 2719 if (sc->sc_c_busy == 0) { 2720 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); 2721 sc->sc_c_busy = 1; 2722 SET_LED(sc, HIFN_MIPSRST_LED0); 2723 } 2724 2725 /* 2726 * We don't worry about missing an interrupt (which a "command wait" 2727 * interrupt salvages us from), unless there is more than one command 2728 * in the queue. 2729 */ 2730 if (dma->cmdu > 1) { 2731 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT; 2732 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 2733 } 2734 2735 hifnstats.hst_ipackets++; 2736 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize; 2737 2738 hifn_dmamap_load_src(sc, cmd); 2739 if (sc->sc_s_busy == 0) { 2740 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA); 2741 sc->sc_s_busy = 1; 2742 SET_LED(sc, HIFN_MIPSRST_LED1); 2743 } 2744 2745 /* 2746 * Unlike other descriptors, we don't mask done interrupt from 2747 * result descriptor. 2748 */ 2749 if (dma->resi == HIFN_D_RES_RSIZE) { 2750 dma->resi = 0; 2751 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 2752 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 2753 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 2754 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2755 } 2756 resi = dma->resi++; 2757 dma->hifn_commands[resi] = cmd; 2758 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD); 2759 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | 2760 HIFN_D_VALID | HIFN_D_LAST); 2761 HIFN_RESR_SYNC(sc, resi, 2762 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2763 dma->resu++; 2764 if (sc->sc_r_busy == 0) { 2765 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA); 2766 sc->sc_r_busy = 1; 2767 SET_LED(sc, HIFN_MIPSRST_LED2); 2768 } 2769 2770 if (cmd->sloplen) 2771 cmd->slopidx = resi; 2772 2773 hifn_dmamap_load_dst(sc, cmd); 2774 2775 if (sc->sc_d_busy == 0) { 2776 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA); 2777 sc->sc_d_busy = 1; 2778 } 2779 sc->sc_active = 5; 2780 cmd->cmd_callback = hifn_callback_comp; 2781 return (0); 2782 } 2783 2784 static void 2785 hifn_callback_comp(struct hifn_softc *sc, struct hifn_command *cmd, 2786 u_int8_t *resbuf) 2787 { 2788 struct hifn_base_result baseres; 2789 struct cryptop *crp = cmd->crp; 2790 struct hifn_dma *dma = sc->sc_dma; 2791 struct mbuf *m; 2792 int err = 0, i, u; 2793 u_int32_t olen; 2794 bus_size_t dstsize; 2795 2796 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2797 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2798 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2799 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 2800 2801 dstsize = cmd->dst_map->dm_mapsize; 2802 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2803 2804 bcopy(resbuf, &baseres, sizeof(struct hifn_base_result)); 2805 2806 i = dma->dstk; u = dma->dstu; 2807 while (u != 0) { 2808 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2809 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc), 2810 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2811 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) { 2812 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2813 offsetof(struct hifn_dma, dstr[i]), 2814 sizeof(struct hifn_desc), 2815 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2816 break; 2817 } 2818 if (++i == (HIFN_D_DST_RSIZE + 1)) 2819 i = 0; 2820 else 2821 u--; 2822 } 2823 dma->dstk = i; dma->dstu = u; 2824 2825 if (baseres.flags & htole16(HIFN_BASE_RES_DSTOVERRUN)) { 2826 bus_size_t xlen; 2827 2828 xlen = dstsize; 2829 2830 m_freem(cmd->dstu.dst_m); 2831 2832 if (xlen == HIFN_MAX_DMALEN) { 2833 /* We've done all we can. */ 2834 err = E2BIG; 2835 goto out; 2836 } 2837 2838 xlen += MCLBYTES; 2839 2840 if (xlen > HIFN_MAX_DMALEN) 2841 xlen = HIFN_MAX_DMALEN; 2842 2843 cmd->dstu.dst_m = hifn_mkmbuf_chain(xlen, 2844 cmd->srcu.src_m); 2845 if (cmd->dstu.dst_m == NULL) { 2846 err = ENOMEM; 2847 goto out; 2848 } 2849 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, 2850 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) { 2851 err = ENOMEM; 2852 goto out; 2853 } 2854 2855 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2856 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2857 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2858 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2859 2860 /* already at splnet... */ 2861 err = hifn_compress_enter(sc, cmd); 2862 if (err != 0) 2863 goto out; 2864 return; 2865 } 2866 2867 olen = dstsize - (letoh16(baseres.dst_cnt) | 2868 (((letoh16(baseres.session) & HIFN_BASE_RES_DSTLEN_M) >> 2869 HIFN_BASE_RES_DSTLEN_S) << 16)); 2870 2871 crp->crp_olen = olen - cmd->compcrd->crd_skip; 2872 2873 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2874 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2875 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2876 2877 m = cmd->dstu.dst_m; 2878 if (m->m_flags & M_PKTHDR) 2879 m->m_pkthdr.len = olen; 2880 crp->crp_buf = (caddr_t)m; 2881 for (; m != NULL; m = m->m_next) { 2882 if (olen >= m->m_len) 2883 olen -= m->m_len; 2884 else { 2885 m->m_len = olen; 2886 olen = 0; 2887 } 2888 } 2889 2890 m_freem(cmd->srcu.src_m); 2891 free(cmd, M_DEVBUF); 2892 crp->crp_etype = 0; 2893 crypto_done(crp); 2894 return; 2895 2896 out: 2897 if (cmd->dst_map != NULL) { 2898 if (cmd->src_map->dm_nsegs != 0) 2899 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2900 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2901 } 2902 if (cmd->src_map != NULL) { 2903 if (cmd->src_map->dm_nsegs != 0) 2904 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2905 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2906 } 2907 if (cmd->dstu.dst_m != NULL) 2908 m_freem(cmd->dstu.dst_m); 2909 free(cmd, M_DEVBUF); 2910 crp->crp_etype = err; 2911 crypto_done(crp); 2912 } 2913 2914 static struct mbuf * 2915 hifn_mkmbuf_chain(int totlen, struct mbuf *mtemplate) 2916 { 2917 int len; 2918 struct mbuf *m, *m0, *mlast; 2919 2920 if (mtemplate->m_flags & M_PKTHDR) { 2921 len = MHLEN; 2922 MGETHDR(m0, M_DONTWAIT, MT_DATA); 2923 } else { 2924 len = MLEN; 2925 MGET(m0, M_DONTWAIT, MT_DATA); 2926 } 2927 if (m0 == NULL) 2928 return (NULL); 2929 if (len == MHLEN) 2930 M_DUP_PKTHDR(m0, mtemplate); 2931 MCLGET(m0, M_DONTWAIT); 2932 if (!(m0->m_flags & M_EXT)) 2933 m_freem(m0); 2934 len = MCLBYTES; 2935 2936 totlen -= len; 2937 m0->m_pkthdr.len = m0->m_len = len; 2938 mlast = m0; 2939 2940 while (totlen > 0) { 2941 MGET(m, M_DONTWAIT, MT_DATA); 2942 if (m == NULL) { 2943 m_freem(m0); 2944 return (NULL); 2945 } 2946 MCLGET(m, M_DONTWAIT); 2947 if (!(m->m_flags & M_EXT)) { 2948 m_freem(m0); 2949 return (NULL); 2950 } 2951 len = MCLBYTES; 2952 m->m_len = len; 2953 if (m0->m_flags & M_PKTHDR) 2954 m0->m_pkthdr.len += len; 2955 totlen -= len; 2956 2957 mlast->m_next = m; 2958 mlast = m; 2959 } 2960 2961 return (m0); 2962 } 2963 #endif /* HAVE_CRYPTO_LSZ */ 2964 2965 static void 2966 hifn_write_4(struct hifn_softc *sc, int reggrp, bus_size_t reg, u_int32_t val) 2967 { 2968 /* 2969 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0 2970 * and Group 1 registers; avoid conditions that could create 2971 * burst writes by doing a read in between the writes. 2972 */ 2973 if (sc->sc_flags & HIFN_NO_BURSTWRITE) { 2974 if (sc->sc_waw_lastgroup == reggrp && 2975 sc->sc_waw_lastreg == reg - 4) { 2976 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID); 2977 } 2978 sc->sc_waw_lastgroup = reggrp; 2979 sc->sc_waw_lastreg = reg; 2980 } 2981 if (reggrp == 0) 2982 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val); 2983 else 2984 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val); 2985 2986 } 2987 2988 static u_int32_t 2989 hifn_read_4(struct hifn_softc *sc, int reggrp, bus_size_t reg) 2990 { 2991 if (sc->sc_flags & HIFN_NO_BURSTWRITE) { 2992 sc->sc_waw_lastgroup = -1; 2993 sc->sc_waw_lastreg = 1; 2994 } 2995 if (reggrp == 0) 2996 return (bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg)); 2997 return (bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg)); 2998 } 2999