1 /* $NetBSD: hifn7751.c,v 1.21 2004/04/29 01:46:06 jonathan Exp $ */ 2 /* $FreeBSD: hifn7751.c,v 1.5.2.7 2003/10/08 23:52:00 sam Exp $ */ 3 /* $OpenBSD: hifn7751.c,v 1.140 2003/08/01 17:55:54 deraadt Exp $ */ 4 5 /* 6 * Invertex AEON / Hifn 7751 driver 7 * Copyright (c) 1999 Invertex Inc. All rights reserved. 8 * Copyright (c) 1999 Theo de Raadt 9 * Copyright (c) 2000-2001 Network Security Technologies, Inc. 10 * http://www.netsec.net 11 * Copyright (c) 2003 Hifn Inc. 12 * 13 * This driver is based on a previous driver by Invertex, for which they 14 * requested: Please send any comments, feedback, bug-fixes, or feature 15 * requests to software@invertex.com. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions 19 * are met: 20 * 21 * 1. Redistributions of source code must retain the above copyright 22 * notice, this list of conditions and the following disclaimer. 23 * 2. Redistributions in binary form must reproduce the above copyright 24 * notice, this list of conditions and the following disclaimer in the 25 * documentation and/or other materials provided with the distribution. 26 * 3. The name of the author may not be used to endorse or promote products 27 * derived from this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 30 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 31 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 32 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 33 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 34 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 38 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Effort sponsored in part by the Defense Advanced Research Projects 41 * Agency (DARPA) and Air Force Research Laboratory, Air Force 42 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 43 * 44 */ 45 46 /* 47 * Driver for various Hifn pre-HIPP encryption processors. 48 */ 49 50 #include <sys/cdefs.h> 51 __KERNEL_RCSID(0, "$NetBSD: hifn7751.c,v 1.21 2004/04/29 01:46:06 jonathan Exp $"); 52 53 #include "rnd.h" 54 #include "opencrypto.h" 55 56 #if NRND == 0 || NOPENCRYPTO == 0 57 #error hifn7751 requires rnd and opencrypto pseudo-devices 58 #endif 59 60 61 #include <sys/param.h> 62 #include <sys/systm.h> 63 #include <sys/proc.h> 64 #include <sys/errno.h> 65 #include <sys/malloc.h> 66 #include <sys/kernel.h> 67 #include <sys/mbuf.h> 68 #include <sys/device.h> 69 70 #include <uvm/uvm_extern.h> 71 72 73 #ifdef __OpenBSD__ 74 #include <crypto/crypto.h> 75 #include <dev/rndvar.h> 76 #else 77 #include <opencrypto/cryptodev.h> 78 #include <sys/rnd.h> 79 #endif 80 81 #include <dev/pci/pcireg.h> 82 #include <dev/pci/pcivar.h> 83 #include <dev/pci/pcidevs.h> 84 85 #include <dev/pci/hifn7751reg.h> 86 #include <dev/pci/hifn7751var.h> 87 88 #undef HIFN_DEBUG 89 90 #ifdef __NetBSD__ 91 #define HIFN_NO_RNG /* until statistically tested */ 92 #define M_DUP_PKTHDR M_COPY_PKTHDR /* XXX */ 93 #endif 94 95 #ifdef HIFN_DEBUG 96 extern int hifn_debug; /* patchable */ 97 int hifn_debug = 1; 98 #endif 99 100 #ifdef __OpenBSD__ 101 #define HAVE_CRYPTO_LZS /* OpenBSD OCF supports CRYPTO_COMP_LZS */ 102 #endif 103 104 /* 105 * Prototypes and count for the pci_device structure 106 */ 107 #ifdef __OpenBSD__ 108 int hifn_probe((struct device *, void *, void *); 109 #else 110 int hifn_probe(struct device *, struct cfdata *, void *); 111 #endif 112 void hifn_attach(struct device *, struct device *, void *); 113 114 CFATTACH_DECL(hifn, sizeof(struct hifn_softc), 115 hifn_probe, hifn_attach, NULL, NULL); 116 117 #ifdef __OpenBSD__ 118 struct cfdriver hifn_cd = { 119 0, "hifn", DV_DULL 120 }; 121 #endif 122 123 void hifn_reset_board(struct hifn_softc *, int); 124 void hifn_reset_puc(struct hifn_softc *); 125 void hifn_puc_wait(struct hifn_softc *); 126 const char *hifn_enable_crypto(struct hifn_softc *, pcireg_t); 127 void hifn_set_retry(struct hifn_softc *); 128 void hifn_init_dma(struct hifn_softc *); 129 void hifn_init_pci_registers(struct hifn_softc *); 130 int hifn_sramsize(struct hifn_softc *); 131 int hifn_dramsize(struct hifn_softc *); 132 int hifn_ramtype(struct hifn_softc *); 133 void hifn_sessions(struct hifn_softc *); 134 int hifn_intr(void *); 135 u_int hifn_write_command(struct hifn_command *, u_int8_t *); 136 u_int32_t hifn_next_signature(u_int32_t a, u_int cnt); 137 int hifn_newsession(void*, u_int32_t *, struct cryptoini *); 138 int hifn_freesession(void*, u_int64_t); 139 int hifn_process(void*, struct cryptop *, int); 140 void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *); 141 int hifn_crypto(struct hifn_softc *, struct hifn_command *, 142 struct cryptop*, int); 143 int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *); 144 int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *); 145 int hifn_dmamap_aligned(bus_dmamap_t); 146 int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *); 147 int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *); 148 int hifn_init_pubrng(struct hifn_softc *); 149 #ifndef HIFN_NO_RNG 150 static void hifn_rng(void *); 151 #endif 152 void hifn_tick(void *); 153 void hifn_abort(struct hifn_softc *); 154 void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *); 155 void hifn_write_4(struct hifn_softc *, int, bus_size_t, u_int32_t); 156 u_int32_t hifn_read_4(struct hifn_softc *, int, bus_size_t); 157 #ifdef HAVE_CRYPTO_LZS 158 int hifn_compression(struct hifn_softc *, struct cryptop *, 159 struct hifn_command *); 160 struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *); 161 int hifn_compress_enter(struct hifn_softc *, struct hifn_command *); 162 void hifn_callback_comp(struct hifn_softc *, struct hifn_command *, 163 u_int8_t *); 164 #endif /* HAVE_CRYPTO_LZS */ 165 166 167 #ifdef notyet 168 int hifn_compression(struct hifn_softc *, struct cryptop *, 169 struct hifn_command *); 170 struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *); 171 int hifn_compress_enter(struct hifn_softc *, struct hifn_command *); 172 void hifn_callback_comp(struct hifn_softc *, struct hifn_command *, 173 u_int8_t *); 174 #endif 175 176 struct hifn_stats hifnstats; 177 178 static const struct hifn_product { 179 pci_vendor_id_t hifn_vendor; 180 pci_product_id_t hifn_product; 181 int hifn_flags; 182 const char *hifn_name; 183 } hifn_products[] = { 184 { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON, 185 0, 186 "Invertex AEON", 187 }, 188 189 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751, 190 0, 191 "Hifn 7751", 192 }, 193 { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751, 194 0, 195 "Hifn 7751 (NetSec)" 196 }, 197 198 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811, 199 HIFN_IS_7811 | HIFN_HAS_RNG | HIFN_HAS_LEDS | HIFN_NO_BURSTWRITE, 200 "Hifn 7811", 201 }, 202 203 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951, 204 HIFN_HAS_RNG | HIFN_HAS_PUBLIC, 205 "Hifn 7951", 206 }, 207 208 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955, 209 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES, 210 "Hifn 7955", 211 }, 212 213 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956, 214 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES, 215 "Hifn 7956", 216 }, 217 218 219 { 0, 0, 220 0, 221 NULL 222 } 223 }; 224 225 static const struct hifn_product * 226 hifn_lookup(const struct pci_attach_args *pa) 227 { 228 const struct hifn_product *hp; 229 230 for (hp = hifn_products; hp->hifn_name != NULL; hp++) { 231 if (PCI_VENDOR(pa->pa_id) == hp->hifn_vendor && 232 PCI_PRODUCT(pa->pa_id) == hp->hifn_product) 233 return (hp); 234 } 235 return (NULL); 236 } 237 238 int 239 hifn_probe(struct device *parent, struct cfdata *match, void *aux) 240 { 241 struct pci_attach_args *pa = (struct pci_attach_args *) aux; 242 243 if (hifn_lookup(pa) != NULL) 244 return (1); 245 246 return (0); 247 } 248 249 void 250 hifn_attach(struct device *parent, struct device *self, void *aux) 251 { 252 struct hifn_softc *sc = (struct hifn_softc *)self; 253 struct pci_attach_args *pa = aux; 254 const struct hifn_product *hp; 255 pci_chipset_tag_t pc = pa->pa_pc; 256 pci_intr_handle_t ih; 257 const char *intrstr = NULL; 258 const char *hifncap; 259 char rbase; 260 bus_size_t iosize0, iosize1; 261 u_int32_t cmd; 262 u_int16_t ena; 263 bus_dma_segment_t seg; 264 bus_dmamap_t dmamap; 265 int rseg; 266 caddr_t kva; 267 268 hp = hifn_lookup(pa); 269 if (hp == NULL) { 270 printf("\n"); 271 panic("hifn_attach: impossible"); 272 } 273 274 aprint_naive(": Crypto processor\n"); 275 aprint_normal(": %s, rev. %d\n", hp->hifn_name, 276 PCI_REVISION(pa->pa_class)); 277 278 sc->sc_pci_pc = pa->pa_pc; 279 sc->sc_pci_tag = pa->pa_tag; 280 281 sc->sc_flags = hp->hifn_flags; 282 283 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 284 cmd |= PCI_COMMAND_MASTER_ENABLE; 285 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd); 286 287 if (pci_mapreg_map(pa, HIFN_BAR0, PCI_MAPREG_TYPE_MEM, 0, 288 &sc->sc_st0, &sc->sc_sh0, NULL, &iosize0)) { 289 aprint_error("%s: can't map mem space %d\n", 290 sc->sc_dv.dv_xname, 0); 291 return; 292 } 293 294 if (pci_mapreg_map(pa, HIFN_BAR1, PCI_MAPREG_TYPE_MEM, 0, 295 &sc->sc_st1, &sc->sc_sh1, NULL, &iosize1)) { 296 aprint_error("%s: can't find mem space %d\n", 297 sc->sc_dv.dv_xname, 1); 298 goto fail_io0; 299 } 300 301 hifn_set_retry(sc); 302 303 if (sc->sc_flags & HIFN_NO_BURSTWRITE) { 304 sc->sc_waw_lastgroup = -1; 305 sc->sc_waw_lastreg = 1; 306 } 307 308 sc->sc_dmat = pa->pa_dmat; 309 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(*sc->sc_dma), PAGE_SIZE, 0, 310 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 311 aprint_error("%s: can't alloc DMA buffer\n", 312 sc->sc_dv.dv_xname); 313 goto fail_io1; 314 } 315 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(*sc->sc_dma), &kva, 316 BUS_DMA_NOWAIT)) { 317 aprint_error("%s: can't map DMA buffers (%lu bytes)\n", 318 sc->sc_dv.dv_xname, (u_long)sizeof(*sc->sc_dma)); 319 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 320 goto fail_io1; 321 } 322 if (bus_dmamap_create(sc->sc_dmat, sizeof(*sc->sc_dma), 1, 323 sizeof(*sc->sc_dma), 0, BUS_DMA_NOWAIT, &dmamap)) { 324 aprint_error("%s: can't create DMA map\n", 325 sc->sc_dv.dv_xname); 326 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma)); 327 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 328 goto fail_io1; 329 } 330 if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, sizeof(*sc->sc_dma), 331 NULL, BUS_DMA_NOWAIT)) { 332 aprint_error("%s: can't load DMA map\n", 333 sc->sc_dv.dv_xname); 334 bus_dmamap_destroy(sc->sc_dmat, dmamap); 335 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma)); 336 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 337 goto fail_io1; 338 } 339 sc->sc_dmamap = dmamap; 340 sc->sc_dma = (struct hifn_dma *)kva; 341 bzero(sc->sc_dma, sizeof(*sc->sc_dma)); 342 343 hifn_reset_board(sc, 0); 344 345 if ((hifncap = hifn_enable_crypto(sc, pa->pa_id)) == NULL) { 346 aprint_error("%s: crypto enabling failed\n", 347 sc->sc_dv.dv_xname); 348 goto fail_mem; 349 } 350 hifn_reset_puc(sc); 351 352 hifn_init_dma(sc); 353 hifn_init_pci_registers(sc); 354 355 /* XXX can't dynamically determine ram type for 795x; force dram */ 356 if (sc->sc_flags & HIFN_IS_7956) 357 sc->sc_drammodel = 1; 358 else if (hifn_ramtype(sc)) 359 goto fail_mem; 360 361 if (sc->sc_drammodel == 0) 362 hifn_sramsize(sc); 363 else 364 hifn_dramsize(sc); 365 366 /* 367 * Workaround for NetSec 7751 rev A: half ram size because two 368 * of the address lines were left floating 369 */ 370 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETSEC && 371 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETSEC_7751 && 372 PCI_REVISION(pa->pa_class) == 0x61) 373 sc->sc_ramsize >>= 1; 374 375 if (pci_intr_map(pa, &ih)) { 376 aprint_error("%s: couldn't map interrupt\n", 377 sc->sc_dv.dv_xname); 378 goto fail_mem; 379 } 380 intrstr = pci_intr_string(pc, ih); 381 #ifdef __OpenBSD__ 382 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc, 383 self->dv_xname); 384 #else 385 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc); 386 #endif 387 if (sc->sc_ih == NULL) { 388 aprint_error("%s: couldn't establish interrupt\n", 389 sc->sc_dv.dv_xname); 390 if (intrstr != NULL) 391 aprint_normal(" at %s", intrstr); 392 aprint_normal("\n"); 393 goto fail_mem; 394 } 395 396 hifn_sessions(sc); 397 398 rseg = sc->sc_ramsize / 1024; 399 rbase = 'K'; 400 if (sc->sc_ramsize >= (1024 * 1024)) { 401 rbase = 'M'; 402 rseg /= 1024; 403 } 404 aprint_normal("%s: %s, %d%cB %cram, interrupting at %s\n", 405 sc->sc_dv.dv_xname, hifncap, rseg, rbase, 406 sc->sc_drammodel ? 'd' : 's', intrstr); 407 408 sc->sc_cid = crypto_get_driverid(0); 409 if (sc->sc_cid < 0) { 410 aprint_error("%s: couldn't get crypto driver id\n", 411 sc->sc_dv.dv_xname); 412 goto fail_intr; 413 } 414 415 WRITE_REG_0(sc, HIFN_0_PUCNFG, 416 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID); 417 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 418 419 switch (ena) { 420 case HIFN_PUSTAT_ENA_2: 421 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, 422 hifn_newsession, hifn_freesession, hifn_process, sc); 423 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0, 424 hifn_newsession, hifn_freesession, hifn_process, sc); 425 if (sc->sc_flags & HIFN_HAS_AES) 426 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0, 427 hifn_newsession, hifn_freesession, 428 hifn_process, sc); 429 /*FALLTHROUGH*/ 430 case HIFN_PUSTAT_ENA_1: 431 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0, 432 hifn_newsession, hifn_freesession, hifn_process, sc); 433 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0, 434 hifn_newsession, hifn_freesession, hifn_process, sc); 435 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0, 436 hifn_newsession, hifn_freesession, hifn_process, sc); 437 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0, 438 hifn_newsession, hifn_freesession, hifn_process, sc); 439 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, 440 hifn_newsession, hifn_freesession, hifn_process, sc); 441 break; 442 } 443 444 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 0, 445 sc->sc_dmamap->dm_mapsize, 446 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 447 448 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) 449 hifn_init_pubrng(sc); 450 451 #ifdef __OpenBSD__ 452 timeout_set(&sc->sc_tickto, hifn_tick, sc); 453 timeout_add(&sc->sc_tickto, hz); 454 #else 455 callout_init(&sc->sc_tickto); 456 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); 457 #endif 458 return; 459 460 fail_intr: 461 pci_intr_disestablish(pc, sc->sc_ih); 462 fail_mem: 463 bus_dmamap_unload(sc->sc_dmat, dmamap); 464 bus_dmamap_destroy(sc->sc_dmat, dmamap); 465 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma)); 466 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 467 468 /* Turn off DMA polling */ 469 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 470 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 471 472 fail_io1: 473 bus_space_unmap(sc->sc_st1, sc->sc_sh1, iosize1); 474 fail_io0: 475 bus_space_unmap(sc->sc_st0, sc->sc_sh0, iosize0); 476 } 477 478 int 479 hifn_init_pubrng(struct hifn_softc *sc) 480 { 481 u_int32_t r; 482 int i; 483 484 if ((sc->sc_flags & HIFN_IS_7811) == 0) { 485 /* Reset 7951 public key/rng engine */ 486 WRITE_REG_1(sc, HIFN_1_PUB_RESET, 487 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET); 488 489 for (i = 0; i < 100; i++) { 490 DELAY(1000); 491 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) & 492 HIFN_PUBRST_RESET) == 0) 493 break; 494 } 495 496 if (i == 100) { 497 printf("%s: public key init failed\n", 498 sc->sc_dv.dv_xname); 499 return (1); 500 } 501 } 502 503 /* Enable the rng, if available */ 504 if (sc->sc_flags & HIFN_HAS_RNG) { 505 if (sc->sc_flags & HIFN_IS_7811) { 506 r = READ_REG_1(sc, HIFN_1_7811_RNGENA); 507 if (r & HIFN_7811_RNGENA_ENA) { 508 r &= ~HIFN_7811_RNGENA_ENA; 509 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); 510 } 511 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG, 512 HIFN_7811_RNGCFG_DEFL); 513 r |= HIFN_7811_RNGENA_ENA; 514 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); 515 } else 516 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG, 517 READ_REG_1(sc, HIFN_1_RNG_CONFIG) | 518 HIFN_RNGCFG_ENA); 519 520 sc->sc_rngfirst = 1; 521 if (hz >= 100) 522 sc->sc_rnghz = hz / 100; 523 else 524 sc->sc_rnghz = 1; 525 #ifndef HIFN_NO_RNG 526 #ifdef __OpenBSD__ 527 timeout_set(&sc->sc_rngto, hifn_rng, sc); 528 timeout_add(&sc->sc_rngto, sc->sc_rnghz); 529 #else /* !__OpenBSD__ */ 530 callout_init(&sc->sc_rngto); 531 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); 532 #endif /* !__OpenBSD__ */ 533 #endif /* HIFN_NO_RNG */ 534 } 535 536 /* Enable public key engine, if available */ 537 if (sc->sc_flags & HIFN_HAS_PUBLIC) { 538 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); 539 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE; 540 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 541 } 542 543 return (0); 544 } 545 546 #ifndef HIFN_NO_RNG 547 static void 548 hifn_rng(void *vsc) 549 { 550 #ifndef __NetBSD__ 551 struct hifn_softc *sc = vsc; 552 u_int32_t num1, sts, num2; 553 int i; 554 555 if (sc->sc_flags & HIFN_IS_7811) { 556 for (i = 0; i < 5; i++) { 557 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS); 558 if (sts & HIFN_7811_RNGSTS_UFL) { 559 printf("%s: RNG underflow: disabling\n", 560 sc->sc_dv.dv_xname); 561 return; 562 } 563 if ((sts & HIFN_7811_RNGSTS_RDY) == 0) 564 break; 565 566 /* 567 * There are at least two words in the RNG FIFO 568 * at this point. 569 */ 570 num1 = READ_REG_1(sc, HIFN_1_7811_RNGDAT); 571 num2 = READ_REG_1(sc, HIFN_1_7811_RNGDAT); 572 if (sc->sc_rngfirst) 573 sc->sc_rngfirst = 0; 574 else { 575 add_true_randomness(num1); 576 add_true_randomness(num2); 577 } 578 } 579 } else { 580 num1 = READ_REG_1(sc, HIFN_1_RNG_DATA); 581 582 if (sc->sc_rngfirst) 583 sc->sc_rngfirst = 0; 584 else 585 add_true_randomness(num1); 586 } 587 588 #ifdef __OpenBSD__ 589 timeout_add(&sc->sc_rngto, sc->sc_rnghz); 590 #else 591 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); 592 #endif 593 #endif /*!__NetBSD__*/ 594 } 595 #endif 596 597 void 598 hifn_puc_wait(struct hifn_softc *sc) 599 { 600 int i; 601 602 for (i = 5000; i > 0; i--) { 603 DELAY(1); 604 if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET)) 605 break; 606 } 607 if (!i) 608 printf("%s: proc unit did not reset\n", sc->sc_dv.dv_xname); 609 } 610 611 /* 612 * Reset the processing unit. 613 */ 614 void 615 hifn_reset_puc(struct hifn_softc *sc) 616 { 617 /* Reset processing unit */ 618 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); 619 hifn_puc_wait(sc); 620 } 621 622 void 623 hifn_set_retry(struct hifn_softc *sc) 624 { 625 u_int32_t r; 626 627 r = pci_conf_read(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT); 628 r &= 0xffff0000; 629 pci_conf_write(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT, r); 630 } 631 632 /* 633 * Resets the board. Values in the regesters are left as is 634 * from the reset (i.e. initial values are assigned elsewhere). 635 */ 636 void 637 hifn_reset_board(struct hifn_softc *sc, int full) 638 { 639 u_int32_t reg; 640 641 /* 642 * Set polling in the DMA configuration register to zero. 0x7 avoids 643 * resetting the board and zeros out the other fields. 644 */ 645 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 646 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 647 648 /* 649 * Now that polling has been disabled, we have to wait 1 ms 650 * before resetting the board. 651 */ 652 DELAY(1000); 653 654 /* Reset the DMA unit */ 655 if (full) { 656 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE); 657 DELAY(1000); 658 } else { 659 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, 660 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET); 661 hifn_reset_puc(sc); 662 } 663 664 bzero(sc->sc_dma, sizeof(*sc->sc_dma)); 665 666 /* Bring dma unit out of reset */ 667 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 668 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 669 670 hifn_puc_wait(sc); 671 672 hifn_set_retry(sc); 673 674 if (sc->sc_flags & HIFN_IS_7811) { 675 for (reg = 0; reg < 1000; reg++) { 676 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) & 677 HIFN_MIPSRST_CRAMINIT) 678 break; 679 DELAY(1000); 680 } 681 if (reg == 1000) 682 printf(": cram init timeout\n"); 683 } 684 } 685 686 u_int32_t 687 hifn_next_signature(u_int32_t a, u_int cnt) 688 { 689 int i; 690 u_int32_t v; 691 692 for (i = 0; i < cnt; i++) { 693 694 /* get the parity */ 695 v = a & 0x80080125; 696 v ^= v >> 16; 697 v ^= v >> 8; 698 v ^= v >> 4; 699 v ^= v >> 2; 700 v ^= v >> 1; 701 702 a = (v & 1) ^ (a << 1); 703 } 704 705 return a; 706 } 707 708 struct pci2id { 709 u_short pci_vendor; 710 u_short pci_prod; 711 char card_id[13]; 712 } pci2id[] = { 713 { 714 PCI_VENDOR_HIFN, 715 PCI_PRODUCT_HIFN_7951, 716 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 717 0x00, 0x00, 0x00, 0x00, 0x00 } 718 }, { 719 PCI_VENDOR_HIFN, 720 PCI_PRODUCT_HIFN_7955, 721 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 722 0x00, 0x00, 0x00, 0x00, 0x00 } 723 }, { 724 PCI_VENDOR_HIFN, 725 PCI_PRODUCT_HIFN_7956, 726 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 727 0x00, 0x00, 0x00, 0x00, 0x00 } 728 }, { 729 PCI_VENDOR_NETSEC, 730 PCI_PRODUCT_NETSEC_7751, 731 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 732 0x00, 0x00, 0x00, 0x00, 0x00 } 733 }, { 734 PCI_VENDOR_INVERTEX, 735 PCI_PRODUCT_INVERTEX_AEON, 736 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 737 0x00, 0x00, 0x00, 0x00, 0x00 } 738 }, { 739 PCI_VENDOR_HIFN, 740 PCI_PRODUCT_HIFN_7811, 741 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 742 0x00, 0x00, 0x00, 0x00, 0x00 } 743 }, { 744 /* 745 * Other vendors share this PCI ID as well, such as 746 * http://www.powercrypt.com, and obviously they also 747 * use the same key. 748 */ 749 PCI_VENDOR_HIFN, 750 PCI_PRODUCT_HIFN_7751, 751 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 752 0x00, 0x00, 0x00, 0x00, 0x00 } 753 }, 754 }; 755 756 /* 757 * Checks to see if crypto is already enabled. If crypto isn't enable, 758 * "hifn_enable_crypto" is called to enable it. The check is important, 759 * as enabling crypto twice will lock the board. 760 */ 761 const char * 762 hifn_enable_crypto(struct hifn_softc *sc, pcireg_t pciid) 763 { 764 u_int32_t dmacfg, ramcfg, encl, addr, i; 765 char *offtbl = NULL; 766 767 for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) { 768 if (pci2id[i].pci_vendor == PCI_VENDOR(pciid) && 769 pci2id[i].pci_prod == PCI_PRODUCT(pciid)) { 770 offtbl = pci2id[i].card_id; 771 break; 772 } 773 } 774 775 if (offtbl == NULL) { 776 #ifdef HIFN_DEBUG 777 aprint_debug("%s: Unknown card!\n", sc->sc_dv.dv_xname); 778 #endif 779 return (NULL); 780 } 781 782 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG); 783 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG); 784 785 /* 786 * The RAM config register's encrypt level bit needs to be set before 787 * every read performed on the encryption level register. 788 */ 789 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); 790 791 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 792 793 /* 794 * Make sure we don't re-unlock. Two unlocks kills chip until the 795 * next reboot. 796 */ 797 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) { 798 #ifdef HIFN_DEBUG 799 aprint_debug("%s: Strong Crypto already enabled!\n", 800 sc->sc_dv.dv_xname); 801 #endif 802 goto report; 803 } 804 805 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) { 806 #ifdef HIFN_DEBUG 807 aprint_debug("%s: Unknown encryption level\n", 808 sc->sc_dv.dv_xname); 809 #endif 810 return (NULL); 811 } 812 813 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK | 814 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 815 DELAY(1000); 816 addr = READ_REG_1(sc, HIFN_1_UNLOCK_SECRET1); 817 DELAY(1000); 818 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, 0); 819 DELAY(1000); 820 821 for (i = 0; i <= 12; i++) { 822 addr = hifn_next_signature(addr, offtbl[i] + 0x101); 823 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, addr); 824 825 DELAY(1000); 826 } 827 828 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); 829 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 830 831 #ifdef HIFN_DEBUG 832 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2) 833 aprint_debug("Encryption engine is permanently locked until next system reset."); 834 else 835 aprint_debug("Encryption engine enabled successfully!"); 836 #endif 837 838 report: 839 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg); 840 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg); 841 842 switch (encl) { 843 case HIFN_PUSTAT_ENA_0: 844 return ("LZS-only (no encr/auth)"); 845 846 case HIFN_PUSTAT_ENA_1: 847 return ("DES"); 848 849 case HIFN_PUSTAT_ENA_2: 850 if (sc->sc_flags & HIFN_HAS_AES) 851 return ("3DES/AES"); 852 else 853 return ("3DES"); 854 855 default: 856 return ("disabled"); 857 } 858 /* NOTREACHED */ 859 } 860 861 /* 862 * Give initial values to the registers listed in the "Register Space" 863 * section of the HIFN Software Development reference manual. 864 */ 865 void 866 hifn_init_pci_registers(struct hifn_softc *sc) 867 { 868 /* write fixed values needed by the Initialization registers */ 869 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); 870 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD); 871 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); 872 873 /* write all 4 ring address registers */ 874 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dmamap->dm_segs[0].ds_addr + 875 offsetof(struct hifn_dma, cmdr[0])); 876 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dmamap->dm_segs[0].ds_addr + 877 offsetof(struct hifn_dma, srcr[0])); 878 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dmamap->dm_segs[0].ds_addr + 879 offsetof(struct hifn_dma, dstr[0])); 880 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dmamap->dm_segs[0].ds_addr + 881 offsetof(struct hifn_dma, resr[0])); 882 883 DELAY(2000); 884 885 /* write status register */ 886 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 887 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS | 888 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS | 889 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST | 890 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER | 891 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST | 892 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER | 893 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST | 894 HIFN_DMACSR_S_WAIT | 895 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST | 896 HIFN_DMACSR_C_WAIT | 897 HIFN_DMACSR_ENGINE | 898 ((sc->sc_flags & HIFN_HAS_PUBLIC) ? 899 HIFN_DMACSR_PUBDONE : 0) | 900 ((sc->sc_flags & HIFN_IS_7811) ? 901 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0)); 902 903 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0; 904 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT | 905 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER | 906 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT | 907 HIFN_DMAIER_ENGINE | 908 ((sc->sc_flags & HIFN_IS_7811) ? 909 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0); 910 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; 911 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 912 CLR_LED(sc, HIFN_MIPSRST_LED0 | HIFN_MIPSRST_LED1 | HIFN_MIPSRST_LED2); 913 914 if (sc->sc_flags & HIFN_IS_7956) { 915 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | 916 HIFN_PUCNFG_TCALLPHASES | 917 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32); 918 WRITE_REG_1(sc, HIFN_1_PLL, HIFN_PLL_7956); 919 } else { 920 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | 921 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES | 922 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 | 923 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM)); 924 } 925 926 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); 927 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 928 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST | 929 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) | 930 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL)); 931 } 932 933 /* 934 * The maximum number of sessions supported by the card 935 * is dependent on the amount of context ram, which 936 * encryption algorithms are enabled, and how compression 937 * is configured. This should be configured before this 938 * routine is called. 939 */ 940 void 941 hifn_sessions(struct hifn_softc *sc) 942 { 943 u_int32_t pucnfg; 944 int ctxsize; 945 946 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG); 947 948 if (pucnfg & HIFN_PUCNFG_COMPSING) { 949 if (pucnfg & HIFN_PUCNFG_ENCCNFG) 950 ctxsize = 128; 951 else 952 ctxsize = 512; 953 /* 954 * 7955/7956 has internal context memory of 32K 955 */ 956 if (sc->sc_flags & HIFN_IS_7956) 957 sc->sc_maxses = 32768 / ctxsize; 958 else 959 sc->sc_maxses = 1 + 960 ((sc->sc_ramsize - 32768) / ctxsize); 961 } 962 else 963 sc->sc_maxses = sc->sc_ramsize / 16384; 964 965 if (sc->sc_maxses > 2048) 966 sc->sc_maxses = 2048; 967 } 968 969 /* 970 * Determine ram type (sram or dram). Board should be just out of a reset 971 * state when this is called. 972 */ 973 int 974 hifn_ramtype(struct hifn_softc *sc) 975 { 976 u_int8_t data[8], dataexpect[8]; 977 int i; 978 979 for (i = 0; i < sizeof(data); i++) 980 data[i] = dataexpect[i] = 0x55; 981 if (hifn_writeramaddr(sc, 0, data)) 982 return (-1); 983 if (hifn_readramaddr(sc, 0, data)) 984 return (-1); 985 if (bcmp(data, dataexpect, sizeof(data)) != 0) { 986 sc->sc_drammodel = 1; 987 return (0); 988 } 989 990 for (i = 0; i < sizeof(data); i++) 991 data[i] = dataexpect[i] = 0xaa; 992 if (hifn_writeramaddr(sc, 0, data)) 993 return (-1); 994 if (hifn_readramaddr(sc, 0, data)) 995 return (-1); 996 if (bcmp(data, dataexpect, sizeof(data)) != 0) { 997 sc->sc_drammodel = 1; 998 return (0); 999 } 1000 1001 return (0); 1002 } 1003 1004 #define HIFN_SRAM_MAX (32 << 20) 1005 #define HIFN_SRAM_STEP_SIZE 16384 1006 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE) 1007 1008 int 1009 hifn_sramsize(struct hifn_softc *sc) 1010 { 1011 u_int32_t a; 1012 u_int8_t data[8]; 1013 u_int8_t dataexpect[sizeof(data)]; 1014 int32_t i; 1015 1016 for (i = 0; i < sizeof(data); i++) 1017 data[i] = dataexpect[i] = i ^ 0x5a; 1018 1019 for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) { 1020 a = i * HIFN_SRAM_STEP_SIZE; 1021 bcopy(&i, data, sizeof(i)); 1022 hifn_writeramaddr(sc, a, data); 1023 } 1024 1025 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) { 1026 a = i * HIFN_SRAM_STEP_SIZE; 1027 bcopy(&i, dataexpect, sizeof(i)); 1028 if (hifn_readramaddr(sc, a, data) < 0) 1029 return (0); 1030 if (bcmp(data, dataexpect, sizeof(data)) != 0) 1031 return (0); 1032 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE; 1033 } 1034 1035 return (0); 1036 } 1037 1038 /* 1039 * XXX For dram boards, one should really try all of the 1040 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG 1041 * is already set up correctly. 1042 */ 1043 int 1044 hifn_dramsize(struct hifn_softc *sc) 1045 { 1046 u_int32_t cnfg; 1047 1048 if (sc->sc_flags & HIFN_IS_7956) { 1049 /* 1050 * 7955/7956 have a fixed internal ram of only 32K. 1051 */ 1052 sc->sc_ramsize = 32768; 1053 } else { 1054 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) & 1055 HIFN_PUCNFG_DRAMMASK; 1056 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18); 1057 } 1058 return (0); 1059 } 1060 1061 void 1062 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, 1063 int *resp) 1064 { 1065 struct hifn_dma *dma = sc->sc_dma; 1066 1067 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 1068 dma->cmdi = 0; 1069 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 1070 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1071 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 1072 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1073 } 1074 *cmdp = dma->cmdi++; 1075 dma->cmdk = dma->cmdi; 1076 1077 if (dma->srci == HIFN_D_SRC_RSIZE) { 1078 dma->srci = 0; 1079 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID | 1080 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1081 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, 1082 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1083 } 1084 *srcp = dma->srci++; 1085 dma->srck = dma->srci; 1086 1087 if (dma->dsti == HIFN_D_DST_RSIZE) { 1088 dma->dsti = 0; 1089 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID | 1090 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1091 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE, 1092 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1093 } 1094 *dstp = dma->dsti++; 1095 dma->dstk = dma->dsti; 1096 1097 if (dma->resi == HIFN_D_RES_RSIZE) { 1098 dma->resi = 0; 1099 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 1100 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1101 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 1102 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1103 } 1104 *resp = dma->resi++; 1105 dma->resk = dma->resi; 1106 } 1107 1108 int 1109 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) 1110 { 1111 struct hifn_dma *dma = sc->sc_dma; 1112 struct hifn_base_command wc; 1113 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; 1114 int r, cmdi, resi, srci, dsti; 1115 1116 wc.masks = htole16(3 << 13); 1117 wc.session_num = htole16(addr >> 14); 1118 wc.total_source_count = htole16(8); 1119 wc.total_dest_count = htole16(addr & 0x3fff); 1120 1121 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); 1122 1123 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1124 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | 1125 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); 1126 1127 /* build write command */ 1128 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); 1129 *(struct hifn_base_command *)dma->command_bufs[cmdi] = wc; 1130 bcopy(data, &dma->test_src, sizeof(dma->test_src)); 1131 1132 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr 1133 + offsetof(struct hifn_dma, test_src)); 1134 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr 1135 + offsetof(struct hifn_dma, test_dst)); 1136 1137 dma->cmdr[cmdi].l = htole32(16 | masks); 1138 dma->srcr[srci].l = htole32(8 | masks); 1139 dma->dstr[dsti].l = htole32(4 | masks); 1140 dma->resr[resi].l = htole32(4 | masks); 1141 1142 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1143 0, sc->sc_dmamap->dm_mapsize, 1144 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1145 1146 for (r = 10000; r >= 0; r--) { 1147 DELAY(10); 1148 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1149 0, sc->sc_dmamap->dm_mapsize, 1150 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1151 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) 1152 break; 1153 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1154 0, sc->sc_dmamap->dm_mapsize, 1155 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1156 } 1157 if (r == 0) { 1158 printf("%s: writeramaddr -- " 1159 "result[%d](addr %d) still valid\n", 1160 sc->sc_dv.dv_xname, resi, addr); 1161 r = -1; 1162 return (-1); 1163 } else 1164 r = 0; 1165 1166 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1167 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | 1168 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); 1169 1170 return (r); 1171 } 1172 1173 int 1174 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) 1175 { 1176 struct hifn_dma *dma = sc->sc_dma; 1177 struct hifn_base_command rc; 1178 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; 1179 int r, cmdi, srci, dsti, resi; 1180 1181 rc.masks = htole16(2 << 13); 1182 rc.session_num = htole16(addr >> 14); 1183 rc.total_source_count = htole16(addr & 0x3fff); 1184 rc.total_dest_count = htole16(8); 1185 1186 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); 1187 1188 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1189 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | 1190 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); 1191 1192 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); 1193 *(struct hifn_base_command *)dma->command_bufs[cmdi] = rc; 1194 1195 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1196 offsetof(struct hifn_dma, test_src)); 1197 dma->test_src = 0; 1198 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1199 offsetof(struct hifn_dma, test_dst)); 1200 dma->test_dst = 0; 1201 dma->cmdr[cmdi].l = htole32(8 | masks); 1202 dma->srcr[srci].l = htole32(8 | masks); 1203 dma->dstr[dsti].l = htole32(8 | masks); 1204 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks); 1205 1206 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1207 0, sc->sc_dmamap->dm_mapsize, 1208 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1209 1210 for (r = 10000; r >= 0; r--) { 1211 DELAY(10); 1212 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1213 0, sc->sc_dmamap->dm_mapsize, 1214 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1215 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) 1216 break; 1217 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1218 0, sc->sc_dmamap->dm_mapsize, 1219 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1220 } 1221 if (r == 0) { 1222 printf("%s: readramaddr -- " 1223 "result[%d](addr %d) still valid\n", 1224 sc->sc_dv.dv_xname, resi, addr); 1225 r = -1; 1226 } else { 1227 r = 0; 1228 bcopy(&dma->test_dst, data, sizeof(dma->test_dst)); 1229 } 1230 1231 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1232 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | 1233 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); 1234 1235 return (r); 1236 } 1237 1238 /* 1239 * Initialize the descriptor rings. 1240 */ 1241 void 1242 hifn_init_dma(struct hifn_softc *sc) 1243 { 1244 struct hifn_dma *dma = sc->sc_dma; 1245 int i; 1246 1247 hifn_set_retry(sc); 1248 1249 /* initialize static pointer values */ 1250 for (i = 0; i < HIFN_D_CMD_RSIZE; i++) 1251 dma->cmdr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1252 offsetof(struct hifn_dma, command_bufs[i][0])); 1253 for (i = 0; i < HIFN_D_RES_RSIZE; i++) 1254 dma->resr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1255 offsetof(struct hifn_dma, result_bufs[i][0])); 1256 1257 dma->cmdr[HIFN_D_CMD_RSIZE].p = 1258 htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1259 offsetof(struct hifn_dma, cmdr[0])); 1260 dma->srcr[HIFN_D_SRC_RSIZE].p = 1261 htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1262 offsetof(struct hifn_dma, srcr[0])); 1263 dma->dstr[HIFN_D_DST_RSIZE].p = 1264 htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1265 offsetof(struct hifn_dma, dstr[0])); 1266 dma->resr[HIFN_D_RES_RSIZE].p = 1267 htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1268 offsetof(struct hifn_dma, resr[0])); 1269 1270 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0; 1271 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0; 1272 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0; 1273 } 1274 1275 /* 1276 * Writes out the raw command buffer space. Returns the 1277 * command buffer size. 1278 */ 1279 u_int 1280 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf) 1281 { 1282 u_int8_t *buf_pos; 1283 struct hifn_base_command *base_cmd; 1284 struct hifn_mac_command *mac_cmd; 1285 struct hifn_crypt_command *cry_cmd; 1286 struct hifn_comp_command *comp_cmd; 1287 int using_mac, using_crypt, using_comp, len, ivlen; 1288 u_int32_t dlen, slen; 1289 1290 buf_pos = buf; 1291 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC; 1292 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT; 1293 using_comp = cmd->base_masks & HIFN_BASE_CMD_COMP; 1294 1295 base_cmd = (struct hifn_base_command *)buf_pos; 1296 base_cmd->masks = htole16(cmd->base_masks); 1297 slen = cmd->src_map->dm_mapsize; 1298 if (cmd->sloplen) 1299 dlen = cmd->dst_map->dm_mapsize - cmd->sloplen + 1300 sizeof(u_int32_t); 1301 else 1302 dlen = cmd->dst_map->dm_mapsize; 1303 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO); 1304 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO); 1305 dlen >>= 16; 1306 slen >>= 16; 1307 base_cmd->session_num = htole16(cmd->session_num | 1308 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) | 1309 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M)); 1310 buf_pos += sizeof(struct hifn_base_command); 1311 1312 if (using_comp) { 1313 comp_cmd = (struct hifn_comp_command *)buf_pos; 1314 dlen = cmd->compcrd->crd_len; 1315 comp_cmd->source_count = htole16(dlen & 0xffff); 1316 dlen >>= 16; 1317 comp_cmd->masks = htole16(cmd->comp_masks | 1318 ((dlen << HIFN_COMP_CMD_SRCLEN_S) & HIFN_COMP_CMD_SRCLEN_M)); 1319 comp_cmd->header_skip = htole16(cmd->compcrd->crd_skip); 1320 comp_cmd->reserved = 0; 1321 buf_pos += sizeof(struct hifn_comp_command); 1322 } 1323 1324 if (using_mac) { 1325 mac_cmd = (struct hifn_mac_command *)buf_pos; 1326 dlen = cmd->maccrd->crd_len; 1327 mac_cmd->source_count = htole16(dlen & 0xffff); 1328 dlen >>= 16; 1329 mac_cmd->masks = htole16(cmd->mac_masks | 1330 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M)); 1331 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip); 1332 mac_cmd->reserved = 0; 1333 buf_pos += sizeof(struct hifn_mac_command); 1334 } 1335 1336 if (using_crypt) { 1337 cry_cmd = (struct hifn_crypt_command *)buf_pos; 1338 dlen = cmd->enccrd->crd_len; 1339 cry_cmd->source_count = htole16(dlen & 0xffff); 1340 dlen >>= 16; 1341 cry_cmd->masks = htole16(cmd->cry_masks | 1342 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M)); 1343 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip); 1344 cry_cmd->reserved = 0; 1345 buf_pos += sizeof(struct hifn_crypt_command); 1346 } 1347 1348 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) { 1349 bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH); 1350 buf_pos += HIFN_MAC_KEY_LENGTH; 1351 } 1352 1353 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) { 1354 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { 1355 case HIFN_CRYPT_CMD_ALG_3DES: 1356 bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH); 1357 buf_pos += HIFN_3DES_KEY_LENGTH; 1358 break; 1359 case HIFN_CRYPT_CMD_ALG_DES: 1360 bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH); 1361 buf_pos += HIFN_DES_KEY_LENGTH; 1362 break; 1363 case HIFN_CRYPT_CMD_ALG_RC4: 1364 len = 256; 1365 do { 1366 int clen; 1367 1368 clen = MIN(cmd->cklen, len); 1369 bcopy(cmd->ck, buf_pos, clen); 1370 len -= clen; 1371 buf_pos += clen; 1372 } while (len > 0); 1373 bzero(buf_pos, 4); 1374 buf_pos += 4; 1375 break; 1376 case HIFN_CRYPT_CMD_ALG_AES: 1377 /* 1378 * AES keys are variable 128, 192 and 1379 * 256 bits (16, 24 and 32 bytes). 1380 */ 1381 bcopy(cmd->ck, buf_pos, cmd->cklen); 1382 buf_pos += cmd->cklen; 1383 break; 1384 } 1385 } 1386 1387 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) { 1388 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { 1389 case HIFN_CRYPT_CMD_ALG_AES: 1390 ivlen = HIFN_AES_IV_LENGTH; 1391 break; 1392 default: 1393 ivlen = HIFN_IV_LENGTH; 1394 break; 1395 } 1396 bcopy(cmd->iv, buf_pos, ivlen); 1397 buf_pos += ivlen; 1398 } 1399 1400 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC | HIFN_BASE_CMD_CRYPT | 1401 HIFN_BASE_CMD_COMP)) == 0) { 1402 bzero(buf_pos, 8); 1403 buf_pos += 8; 1404 } 1405 1406 return (buf_pos - buf); 1407 } 1408 1409 int 1410 hifn_dmamap_aligned(bus_dmamap_t map) 1411 { 1412 int i; 1413 1414 for (i = 0; i < map->dm_nsegs; i++) { 1415 if (map->dm_segs[i].ds_addr & 3) 1416 return (0); 1417 if ((i != (map->dm_nsegs - 1)) && 1418 (map->dm_segs[i].ds_len & 3)) 1419 return (0); 1420 } 1421 return (1); 1422 } 1423 1424 int 1425 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd) 1426 { 1427 struct hifn_dma *dma = sc->sc_dma; 1428 bus_dmamap_t map = cmd->dst_map; 1429 u_int32_t p, l; 1430 int idx, used = 0, i; 1431 1432 idx = dma->dsti; 1433 for (i = 0; i < map->dm_nsegs - 1; i++) { 1434 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr); 1435 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1436 HIFN_D_MASKDONEIRQ | map->dm_segs[i].ds_len); 1437 HIFN_DSTR_SYNC(sc, idx, 1438 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1439 used++; 1440 1441 if (++idx == HIFN_D_DST_RSIZE) { 1442 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1443 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1444 HIFN_DSTR_SYNC(sc, idx, 1445 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1446 idx = 0; 1447 } 1448 } 1449 1450 if (cmd->sloplen == 0) { 1451 p = map->dm_segs[i].ds_addr; 1452 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | 1453 map->dm_segs[i].ds_len; 1454 } else { 1455 p = sc->sc_dmamap->dm_segs[0].ds_addr + 1456 offsetof(struct hifn_dma, slop[cmd->slopidx]); 1457 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | 1458 sizeof(u_int32_t); 1459 1460 if ((map->dm_segs[i].ds_len - cmd->sloplen) != 0) { 1461 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr); 1462 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1463 HIFN_D_MASKDONEIRQ | 1464 (map->dm_segs[i].ds_len - cmd->sloplen)); 1465 HIFN_DSTR_SYNC(sc, idx, 1466 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1467 used++; 1468 1469 if (++idx == HIFN_D_DST_RSIZE) { 1470 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1471 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1472 HIFN_DSTR_SYNC(sc, idx, 1473 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1474 idx = 0; 1475 } 1476 } 1477 } 1478 dma->dstr[idx].p = htole32(p); 1479 dma->dstr[idx].l = htole32(l); 1480 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1481 used++; 1482 1483 if (++idx == HIFN_D_DST_RSIZE) { 1484 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | 1485 HIFN_D_MASKDONEIRQ); 1486 HIFN_DSTR_SYNC(sc, idx, 1487 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1488 idx = 0; 1489 } 1490 1491 dma->dsti = idx; 1492 dma->dstu += used; 1493 return (idx); 1494 } 1495 1496 int 1497 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd) 1498 { 1499 struct hifn_dma *dma = sc->sc_dma; 1500 bus_dmamap_t map = cmd->src_map; 1501 int idx, i; 1502 u_int32_t last = 0; 1503 1504 idx = dma->srci; 1505 for (i = 0; i < map->dm_nsegs; i++) { 1506 if (i == map->dm_nsegs - 1) 1507 last = HIFN_D_LAST; 1508 1509 dma->srcr[idx].p = htole32(map->dm_segs[i].ds_addr); 1510 dma->srcr[idx].l = htole32(map->dm_segs[i].ds_len | 1511 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last); 1512 HIFN_SRCR_SYNC(sc, idx, 1513 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1514 1515 if (++idx == HIFN_D_SRC_RSIZE) { 1516 dma->srcr[idx].l = htole32(HIFN_D_VALID | 1517 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1518 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, 1519 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1520 idx = 0; 1521 } 1522 } 1523 dma->srci = idx; 1524 dma->srcu += map->dm_nsegs; 1525 return (idx); 1526 } 1527 1528 int 1529 hifn_crypto(struct hifn_softc *sc, struct hifn_command *cmd, 1530 struct cryptop *crp, int hint) 1531 { 1532 struct hifn_dma *dma = sc->sc_dma; 1533 u_int32_t cmdlen; 1534 int cmdi, resi, s, err = 0; 1535 1536 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER, 1537 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map)) 1538 return (ENOMEM); 1539 1540 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1541 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map, 1542 cmd->srcu.src_m, BUS_DMA_NOWAIT)) { 1543 err = ENOMEM; 1544 goto err_srcmap1; 1545 } 1546 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1547 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map, 1548 cmd->srcu.src_io, BUS_DMA_NOWAIT)) { 1549 err = ENOMEM; 1550 goto err_srcmap1; 1551 } 1552 } else { 1553 err = EINVAL; 1554 goto err_srcmap1; 1555 } 1556 1557 if (hifn_dmamap_aligned(cmd->src_map)) { 1558 cmd->sloplen = cmd->src_map->dm_mapsize & 3; 1559 if (crp->crp_flags & CRYPTO_F_IOV) 1560 cmd->dstu.dst_io = cmd->srcu.src_io; 1561 else if (crp->crp_flags & CRYPTO_F_IMBUF) 1562 cmd->dstu.dst_m = cmd->srcu.src_m; 1563 cmd->dst_map = cmd->src_map; 1564 } else { 1565 if (crp->crp_flags & CRYPTO_F_IOV) { 1566 err = EINVAL; 1567 goto err_srcmap; 1568 } else if (crp->crp_flags & CRYPTO_F_IMBUF) { 1569 int totlen, len; 1570 struct mbuf *m, *m0, *mlast; 1571 1572 totlen = cmd->src_map->dm_mapsize; 1573 if (cmd->srcu.src_m->m_flags & M_PKTHDR) { 1574 len = MHLEN; 1575 MGETHDR(m0, M_DONTWAIT, MT_DATA); 1576 } else { 1577 len = MLEN; 1578 MGET(m0, M_DONTWAIT, MT_DATA); 1579 } 1580 if (m0 == NULL) { 1581 err = ENOMEM; 1582 goto err_srcmap; 1583 } 1584 if (len == MHLEN) 1585 M_DUP_PKTHDR(m0, cmd->srcu.src_m); 1586 if (totlen >= MINCLSIZE) { 1587 MCLGET(m0, M_DONTWAIT); 1588 if (m0->m_flags & M_EXT) 1589 len = MCLBYTES; 1590 } 1591 totlen -= len; 1592 m0->m_pkthdr.len = m0->m_len = len; 1593 mlast = m0; 1594 1595 while (totlen > 0) { 1596 MGET(m, M_DONTWAIT, MT_DATA); 1597 if (m == NULL) { 1598 err = ENOMEM; 1599 m_freem(m0); 1600 goto err_srcmap; 1601 } 1602 len = MLEN; 1603 if (totlen >= MINCLSIZE) { 1604 MCLGET(m, M_DONTWAIT); 1605 if (m->m_flags & M_EXT) 1606 len = MCLBYTES; 1607 } 1608 1609 m->m_len = len; 1610 if (m0->m_flags & M_PKTHDR) 1611 m0->m_pkthdr.len += len; 1612 totlen -= len; 1613 1614 mlast->m_next = m; 1615 mlast = m; 1616 } 1617 cmd->dstu.dst_m = m0; 1618 } 1619 } 1620 1621 if (cmd->dst_map == NULL) { 1622 if (bus_dmamap_create(sc->sc_dmat, 1623 HIFN_MAX_SEGLEN * MAX_SCATTER, MAX_SCATTER, 1624 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) { 1625 err = ENOMEM; 1626 goto err_srcmap; 1627 } 1628 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1629 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, 1630 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) { 1631 err = ENOMEM; 1632 goto err_dstmap1; 1633 } 1634 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1635 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map, 1636 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) { 1637 err = ENOMEM; 1638 goto err_dstmap1; 1639 } 1640 } 1641 } 1642 1643 #ifdef HIFN_DEBUG 1644 if (hifn_debug) 1645 printf("%s: Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n", 1646 sc->sc_dv.dv_xname, 1647 READ_REG_1(sc, HIFN_1_DMA_CSR), 1648 READ_REG_1(sc, HIFN_1_DMA_IER), 1649 dma->cmdu, dma->srcu, dma->dstu, dma->resu, 1650 cmd->src_map->dm_nsegs, cmd->dst_map->dm_nsegs); 1651 #endif 1652 1653 if (cmd->src_map == cmd->dst_map) 1654 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 1655 0, cmd->src_map->dm_mapsize, 1656 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1657 else { 1658 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 1659 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1660 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 1661 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1662 } 1663 1664 s = splnet(); 1665 1666 /* 1667 * need 1 cmd, and 1 res 1668 * need N src, and N dst 1669 */ 1670 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE || 1671 (dma->resu + 1) > HIFN_D_RES_RSIZE) { 1672 splx(s); 1673 err = ENOMEM; 1674 goto err_dstmap; 1675 } 1676 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE || 1677 (dma->dstu + cmd->dst_map->dm_nsegs + 1) > HIFN_D_DST_RSIZE) { 1678 splx(s); 1679 err = ENOMEM; 1680 goto err_dstmap; 1681 } 1682 1683 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 1684 dma->cmdi = 0; 1685 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 1686 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1687 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 1688 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1689 } 1690 cmdi = dma->cmdi++; 1691 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]); 1692 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE); 1693 1694 /* .p for command/result already set */ 1695 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST | 1696 HIFN_D_MASKDONEIRQ); 1697 HIFN_CMDR_SYNC(sc, cmdi, 1698 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1699 dma->cmdu++; 1700 if (sc->sc_c_busy == 0) { 1701 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); 1702 sc->sc_c_busy = 1; 1703 SET_LED(sc, HIFN_MIPSRST_LED0); 1704 } 1705 1706 /* 1707 * We don't worry about missing an interrupt (which a "command wait" 1708 * interrupt salvages us from), unless there is more than one command 1709 * in the queue. 1710 */ 1711 if (dma->cmdu > 1) { 1712 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT; 1713 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 1714 } 1715 1716 hifnstats.hst_ipackets++; 1717 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize; 1718 1719 hifn_dmamap_load_src(sc, cmd); 1720 if (sc->sc_s_busy == 0) { 1721 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA); 1722 sc->sc_s_busy = 1; 1723 SET_LED(sc, HIFN_MIPSRST_LED1); 1724 } 1725 1726 /* 1727 * Unlike other descriptors, we don't mask done interrupt from 1728 * result descriptor. 1729 */ 1730 #ifdef HIFN_DEBUG 1731 if (hifn_debug) 1732 printf("load res\n"); 1733 #endif 1734 if (dma->resi == HIFN_D_RES_RSIZE) { 1735 dma->resi = 0; 1736 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 1737 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1738 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 1739 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1740 } 1741 resi = dma->resi++; 1742 dma->hifn_commands[resi] = cmd; 1743 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD); 1744 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | 1745 HIFN_D_VALID | HIFN_D_LAST); 1746 HIFN_RESR_SYNC(sc, resi, 1747 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1748 dma->resu++; 1749 if (sc->sc_r_busy == 0) { 1750 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA); 1751 sc->sc_r_busy = 1; 1752 SET_LED(sc, HIFN_MIPSRST_LED2); 1753 } 1754 1755 if (cmd->sloplen) 1756 cmd->slopidx = resi; 1757 1758 hifn_dmamap_load_dst(sc, cmd); 1759 1760 if (sc->sc_d_busy == 0) { 1761 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA); 1762 sc->sc_d_busy = 1; 1763 } 1764 1765 #ifdef HIFN_DEBUG 1766 if (hifn_debug) 1767 printf("%s: command: stat %8x ier %8x\n", 1768 sc->sc_dv.dv_xname, 1769 READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER)); 1770 #endif 1771 1772 sc->sc_active = 5; 1773 splx(s); 1774 return (err); /* success */ 1775 1776 err_dstmap: 1777 if (cmd->src_map != cmd->dst_map) 1778 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 1779 err_dstmap1: 1780 if (cmd->src_map != cmd->dst_map) 1781 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 1782 err_srcmap: 1783 if (crp->crp_flags & CRYPTO_F_IMBUF && 1784 cmd->srcu.src_m != cmd->dstu.dst_m) 1785 m_freem(cmd->dstu.dst_m); 1786 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 1787 err_srcmap1: 1788 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 1789 return (err); 1790 } 1791 1792 void 1793 hifn_tick(void *vsc) 1794 { 1795 struct hifn_softc *sc = vsc; 1796 int s; 1797 1798 s = splnet(); 1799 if (sc->sc_active == 0) { 1800 struct hifn_dma *dma = sc->sc_dma; 1801 u_int32_t r = 0; 1802 1803 if (dma->cmdu == 0 && sc->sc_c_busy) { 1804 sc->sc_c_busy = 0; 1805 r |= HIFN_DMACSR_C_CTRL_DIS; 1806 CLR_LED(sc, HIFN_MIPSRST_LED0); 1807 } 1808 if (dma->srcu == 0 && sc->sc_s_busy) { 1809 sc->sc_s_busy = 0; 1810 r |= HIFN_DMACSR_S_CTRL_DIS; 1811 CLR_LED(sc, HIFN_MIPSRST_LED1); 1812 } 1813 if (dma->dstu == 0 && sc->sc_d_busy) { 1814 sc->sc_d_busy = 0; 1815 r |= HIFN_DMACSR_D_CTRL_DIS; 1816 } 1817 if (dma->resu == 0 && sc->sc_r_busy) { 1818 sc->sc_r_busy = 0; 1819 r |= HIFN_DMACSR_R_CTRL_DIS; 1820 CLR_LED(sc, HIFN_MIPSRST_LED2); 1821 } 1822 if (r) 1823 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r); 1824 } 1825 else 1826 sc->sc_active--; 1827 splx(s); 1828 #ifdef __OpenBSD__ 1829 timeout_add(&sc->sc_tickto, hz); 1830 #else 1831 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); 1832 #endif 1833 } 1834 1835 int 1836 hifn_intr(void *arg) 1837 { 1838 struct hifn_softc *sc = arg; 1839 struct hifn_dma *dma = sc->sc_dma; 1840 u_int32_t dmacsr, restart; 1841 int i, u; 1842 1843 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR); 1844 1845 #ifdef HIFN_DEBUG 1846 if (hifn_debug) 1847 printf("%s: irq: stat %08x ien %08x u %d/%d/%d/%d\n", 1848 sc->sc_dv.dv_xname, 1849 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), 1850 dma->cmdu, dma->srcu, dma->dstu, dma->resu); 1851 #endif 1852 1853 /* Nothing in the DMA unit interrupted */ 1854 if ((dmacsr & sc->sc_dmaier) == 0) 1855 return (0); 1856 1857 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier); 1858 1859 if (dmacsr & HIFN_DMACSR_ENGINE) 1860 WRITE_REG_0(sc, HIFN_0_PUISR, READ_REG_0(sc, HIFN_0_PUISR)); 1861 1862 if ((sc->sc_flags & HIFN_HAS_PUBLIC) && 1863 (dmacsr & HIFN_DMACSR_PUBDONE)) 1864 WRITE_REG_1(sc, HIFN_1_PUB_STATUS, 1865 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE); 1866 1867 restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER); 1868 if (restart) 1869 printf("%s: overrun %x\n", sc->sc_dv.dv_xname, dmacsr); 1870 1871 if (sc->sc_flags & HIFN_IS_7811) { 1872 if (dmacsr & HIFN_DMACSR_ILLR) 1873 printf("%s: illegal read\n", sc->sc_dv.dv_xname); 1874 if (dmacsr & HIFN_DMACSR_ILLW) 1875 printf("%s: illegal write\n", sc->sc_dv.dv_xname); 1876 } 1877 1878 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | 1879 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); 1880 if (restart) { 1881 printf("%s: abort, resetting.\n", sc->sc_dv.dv_xname); 1882 hifnstats.hst_abort++; 1883 hifn_abort(sc); 1884 return (1); 1885 } 1886 1887 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->resu == 0)) { 1888 /* 1889 * If no slots to process and we receive a "waiting on 1890 * command" interrupt, we disable the "waiting on command" 1891 * (by clearing it). 1892 */ 1893 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; 1894 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 1895 } 1896 1897 /* clear the rings */ 1898 i = dma->resk; 1899 while (dma->resu != 0) { 1900 HIFN_RESR_SYNC(sc, i, 1901 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1902 if (dma->resr[i].l & htole32(HIFN_D_VALID)) { 1903 HIFN_RESR_SYNC(sc, i, 1904 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1905 break; 1906 } 1907 1908 if (i != HIFN_D_RES_RSIZE) { 1909 struct hifn_command *cmd; 1910 u_int8_t *macbuf = NULL; 1911 1912 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD); 1913 cmd = dma->hifn_commands[i]; 1914 KASSERT(cmd != NULL 1915 /*("hifn_intr: null command slot %u", i)*/); 1916 dma->hifn_commands[i] = NULL; 1917 1918 if (cmd->base_masks & HIFN_BASE_CMD_MAC) { 1919 macbuf = dma->result_bufs[i]; 1920 macbuf += 12; 1921 } 1922 1923 hifn_callback(sc, cmd, macbuf); 1924 hifnstats.hst_opackets++; 1925 } 1926 1927 if (++i == (HIFN_D_RES_RSIZE + 1)) 1928 i = 0; 1929 else 1930 dma->resu--; 1931 } 1932 dma->resk = i; 1933 1934 i = dma->srck; u = dma->srcu; 1935 while (u != 0) { 1936 HIFN_SRCR_SYNC(sc, i, 1937 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1938 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) { 1939 HIFN_SRCR_SYNC(sc, i, 1940 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1941 break; 1942 } 1943 if (++i == (HIFN_D_SRC_RSIZE + 1)) 1944 i = 0; 1945 else 1946 u--; 1947 } 1948 dma->srck = i; dma->srcu = u; 1949 1950 i = dma->cmdk; u = dma->cmdu; 1951 while (u != 0) { 1952 HIFN_CMDR_SYNC(sc, i, 1953 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1954 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) { 1955 HIFN_CMDR_SYNC(sc, i, 1956 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1957 break; 1958 } 1959 if (i != HIFN_D_CMD_RSIZE) { 1960 u--; 1961 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE); 1962 } 1963 if (++i == (HIFN_D_CMD_RSIZE + 1)) 1964 i = 0; 1965 } 1966 dma->cmdk = i; dma->cmdu = u; 1967 1968 return (1); 1969 } 1970 1971 /* 1972 * Allocate a new 'session' and return an encoded session id. 'sidp' 1973 * contains our registration id, and should contain an encoded session 1974 * id on successful allocation. 1975 */ 1976 int 1977 hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri) 1978 { 1979 struct cryptoini *c; 1980 struct hifn_softc *sc = arg; 1981 int i, mac = 0, cry = 0, comp = 0; 1982 1983 KASSERT(sc != NULL /*, ("hifn_newsession: null softc")*/); 1984 if (sidp == NULL || cri == NULL || sc == NULL) 1985 return (EINVAL); 1986 1987 for (i = 0; i < sc->sc_maxses; i++) 1988 if (sc->sc_sessions[i].hs_state == HS_STATE_FREE) 1989 break; 1990 if (i == sc->sc_maxses) 1991 return (ENOMEM); 1992 1993 for (c = cri; c != NULL; c = c->cri_next) { 1994 switch (c->cri_alg) { 1995 case CRYPTO_MD5: 1996 case CRYPTO_SHA1: 1997 case CRYPTO_MD5_HMAC: 1998 case CRYPTO_SHA1_HMAC: 1999 if (mac) 2000 return (EINVAL); 2001 mac = 1; 2002 break; 2003 case CRYPTO_DES_CBC: 2004 case CRYPTO_3DES_CBC: 2005 case CRYPTO_AES_CBC: 2006 #ifdef __NetBSD__ 2007 rnd_extract_data(sc->sc_sessions[i].hs_iv, 2008 c->cri_alg == CRYPTO_AES_CBC ? 2009 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH, 2010 RND_EXTRACT_ANY); 2011 #else /* FreeBSD and OpenBSD have get_random_bytes */ 2012 /* XXX this may read fewer, does it matter? */ 2013 get_random_bytes(sc->sc_sessions[i].hs_iv, 2014 c->cri_alg == CRYPTO_AES_CBC ? 2015 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2016 #endif 2017 /*FALLTHROUGH*/ 2018 case CRYPTO_ARC4: 2019 if (cry) 2020 return (EINVAL); 2021 cry = 1; 2022 break; 2023 #ifdef HAVE_CRYPTO_LSZ 2024 case CRYPTO_LZS_COMP: 2025 if (comp) 2026 return (EINVAL); 2027 comp = 1; 2028 break; 2029 #endif 2030 default: 2031 return (EINVAL); 2032 } 2033 } 2034 if (mac == 0 && cry == 0 && comp == 0) 2035 return (EINVAL); 2036 2037 /* 2038 * XXX only want to support compression without chaining to 2039 * MAC/crypt engine right now 2040 */ 2041 if ((comp && mac) || (comp && cry)) 2042 return (EINVAL); 2043 2044 *sidp = HIFN_SID(sc->sc_dv.dv_unit, i); 2045 sc->sc_sessions[i].hs_state = HS_STATE_USED; 2046 2047 return (0); 2048 } 2049 2050 /* 2051 * Deallocate a session. 2052 * XXX this routine should run a zero'd mac/encrypt key into context ram. 2053 * XXX to blow away any keys already stored there. 2054 */ 2055 int 2056 hifn_freesession(void *arg, u_int64_t tid) 2057 { 2058 struct hifn_softc *sc = arg; 2059 int session; 2060 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; 2061 2062 KASSERT(sc != NULL /*, ("hifn_freesession: null softc")*/); 2063 if (sc == NULL) 2064 return (EINVAL); 2065 2066 session = HIFN_SESSION(sid); 2067 if (session >= sc->sc_maxses) 2068 return (EINVAL); 2069 2070 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session])); 2071 return (0); 2072 } 2073 2074 int 2075 hifn_process(void *arg, struct cryptop *crp, int hint) 2076 { 2077 struct hifn_softc *sc = arg; 2078 struct hifn_command *cmd = NULL; 2079 int session, err, ivlen; 2080 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; 2081 2082 if (crp == NULL || crp->crp_callback == NULL) { 2083 hifnstats.hst_invalid++; 2084 return (EINVAL); 2085 } 2086 session = HIFN_SESSION(crp->crp_sid); 2087 2088 if (sc == NULL || session >= sc->sc_maxses) { 2089 err = EINVAL; 2090 goto errout; 2091 } 2092 2093 cmd = (struct hifn_command *)malloc(sizeof(struct hifn_command), 2094 M_DEVBUF, M_NOWAIT|M_ZERO); 2095 if (cmd == NULL) { 2096 hifnstats.hst_nomem++; 2097 err = ENOMEM; 2098 goto errout; 2099 } 2100 2101 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2102 cmd->srcu.src_m = (struct mbuf *)crp->crp_buf; 2103 cmd->dstu.dst_m = (struct mbuf *)crp->crp_buf; 2104 } else if (crp->crp_flags & CRYPTO_F_IOV) { 2105 cmd->srcu.src_io = (struct uio *)crp->crp_buf; 2106 cmd->dstu.dst_io = (struct uio *)crp->crp_buf; 2107 } else { 2108 err = EINVAL; 2109 goto errout; /* XXX we don't handle contiguous buffers! */ 2110 } 2111 2112 crd1 = crp->crp_desc; 2113 if (crd1 == NULL) { 2114 err = EINVAL; 2115 goto errout; 2116 } 2117 crd2 = crd1->crd_next; 2118 2119 if (crd2 == NULL) { 2120 if (crd1->crd_alg == CRYPTO_MD5_HMAC || 2121 crd1->crd_alg == CRYPTO_SHA1_HMAC || 2122 crd1->crd_alg == CRYPTO_SHA1 || 2123 crd1->crd_alg == CRYPTO_MD5) { 2124 maccrd = crd1; 2125 enccrd = NULL; 2126 } else if (crd1->crd_alg == CRYPTO_DES_CBC || 2127 crd1->crd_alg == CRYPTO_3DES_CBC || 2128 crd1->crd_alg == CRYPTO_AES_CBC || 2129 crd1->crd_alg == CRYPTO_ARC4) { 2130 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0) 2131 cmd->base_masks |= HIFN_BASE_CMD_DECODE; 2132 maccrd = NULL; 2133 enccrd = crd1; 2134 #ifdef HAVE_CRYPTO_LSZ 2135 } else if (crd1->crd_alg == CRYPTO_LZS_COMP) { 2136 return (hifn_compression(sc, crp, cmd)); 2137 #endif 2138 } else { 2139 err = EINVAL; 2140 goto errout; 2141 } 2142 } else { 2143 if ((crd1->crd_alg == CRYPTO_MD5_HMAC || 2144 crd1->crd_alg == CRYPTO_SHA1_HMAC || 2145 crd1->crd_alg == CRYPTO_MD5 || 2146 crd1->crd_alg == CRYPTO_SHA1) && 2147 (crd2->crd_alg == CRYPTO_DES_CBC || 2148 crd2->crd_alg == CRYPTO_3DES_CBC || 2149 crd2->crd_alg == CRYPTO_AES_CBC || 2150 crd2->crd_alg == CRYPTO_ARC4) && 2151 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { 2152 cmd->base_masks = HIFN_BASE_CMD_DECODE; 2153 maccrd = crd1; 2154 enccrd = crd2; 2155 } else if ((crd1->crd_alg == CRYPTO_DES_CBC || 2156 crd1->crd_alg == CRYPTO_ARC4 || 2157 crd1->crd_alg == CRYPTO_3DES_CBC || 2158 crd1->crd_alg == CRYPTO_AES_CBC) && 2159 (crd2->crd_alg == CRYPTO_MD5_HMAC || 2160 crd2->crd_alg == CRYPTO_SHA1_HMAC || 2161 crd2->crd_alg == CRYPTO_MD5 || 2162 crd2->crd_alg == CRYPTO_SHA1) && 2163 (crd1->crd_flags & CRD_F_ENCRYPT)) { 2164 enccrd = crd1; 2165 maccrd = crd2; 2166 } else { 2167 /* 2168 * We cannot order the 7751 as requested 2169 */ 2170 err = EINVAL; 2171 goto errout; 2172 } 2173 } 2174 2175 if (enccrd) { 2176 cmd->enccrd = enccrd; 2177 cmd->base_masks |= HIFN_BASE_CMD_CRYPT; 2178 switch (enccrd->crd_alg) { 2179 case CRYPTO_ARC4: 2180 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4; 2181 if ((enccrd->crd_flags & CRD_F_ENCRYPT) 2182 != sc->sc_sessions[session].hs_prev_op) 2183 sc->sc_sessions[session].hs_state = 2184 HS_STATE_USED; 2185 break; 2186 case CRYPTO_DES_CBC: 2187 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES | 2188 HIFN_CRYPT_CMD_MODE_CBC | 2189 HIFN_CRYPT_CMD_NEW_IV; 2190 break; 2191 case CRYPTO_3DES_CBC: 2192 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES | 2193 HIFN_CRYPT_CMD_MODE_CBC | 2194 HIFN_CRYPT_CMD_NEW_IV; 2195 break; 2196 case CRYPTO_AES_CBC: 2197 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES | 2198 HIFN_CRYPT_CMD_MODE_CBC | 2199 HIFN_CRYPT_CMD_NEW_IV; 2200 break; 2201 default: 2202 err = EINVAL; 2203 goto errout; 2204 } 2205 if (enccrd->crd_alg != CRYPTO_ARC4) { 2206 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ? 2207 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2208 if (enccrd->crd_flags & CRD_F_ENCRYPT) { 2209 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 2210 bcopy(enccrd->crd_iv, cmd->iv, ivlen); 2211 else 2212 bcopy(sc->sc_sessions[session].hs_iv, 2213 cmd->iv, ivlen); 2214 2215 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) 2216 == 0) { 2217 if (crp->crp_flags & CRYPTO_F_IMBUF) 2218 m_copyback(cmd->srcu.src_m, 2219 enccrd->crd_inject, 2220 ivlen, cmd->iv); 2221 else if (crp->crp_flags & CRYPTO_F_IOV) 2222 cuio_copyback(cmd->srcu.src_io, 2223 enccrd->crd_inject, 2224 ivlen, cmd->iv); 2225 } 2226 } else { 2227 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 2228 bcopy(enccrd->crd_iv, cmd->iv, ivlen); 2229 else if (crp->crp_flags & CRYPTO_F_IMBUF) 2230 m_copydata(cmd->srcu.src_m, 2231 enccrd->crd_inject, ivlen, cmd->iv); 2232 else if (crp->crp_flags & CRYPTO_F_IOV) 2233 cuio_copydata(cmd->srcu.src_io, 2234 enccrd->crd_inject, ivlen, cmd->iv); 2235 } 2236 } 2237 2238 cmd->ck = enccrd->crd_key; 2239 cmd->cklen = enccrd->crd_klen >> 3; 2240 2241 /* 2242 * Need to specify the size for the AES key in the masks. 2243 */ 2244 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) == 2245 HIFN_CRYPT_CMD_ALG_AES) { 2246 switch (cmd->cklen) { 2247 case 16: 2248 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128; 2249 break; 2250 case 24: 2251 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192; 2252 break; 2253 case 32: 2254 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256; 2255 break; 2256 default: 2257 err = EINVAL; 2258 goto errout; 2259 } 2260 } 2261 2262 if (sc->sc_sessions[session].hs_state == HS_STATE_USED) 2263 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; 2264 } 2265 2266 if (maccrd) { 2267 cmd->maccrd = maccrd; 2268 cmd->base_masks |= HIFN_BASE_CMD_MAC; 2269 2270 switch (maccrd->crd_alg) { 2271 case CRYPTO_MD5: 2272 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | 2273 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | 2274 HIFN_MAC_CMD_POS_IPSEC; 2275 break; 2276 case CRYPTO_MD5_HMAC: 2277 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | 2278 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | 2279 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; 2280 break; 2281 case CRYPTO_SHA1: 2282 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | 2283 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | 2284 HIFN_MAC_CMD_POS_IPSEC; 2285 break; 2286 case CRYPTO_SHA1_HMAC: 2287 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | 2288 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | 2289 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; 2290 break; 2291 } 2292 2293 if ((maccrd->crd_alg == CRYPTO_SHA1_HMAC || 2294 maccrd->crd_alg == CRYPTO_MD5_HMAC) && 2295 sc->sc_sessions[session].hs_state == HS_STATE_USED) { 2296 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY; 2297 bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3); 2298 bzero(cmd->mac + (maccrd->crd_klen >> 3), 2299 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3)); 2300 } 2301 } 2302 2303 cmd->crp = crp; 2304 cmd->session_num = session; 2305 cmd->softc = sc; 2306 2307 err = hifn_crypto(sc, cmd, crp, hint); 2308 if (err == 0) { 2309 if (enccrd) 2310 sc->sc_sessions[session].hs_prev_op = 2311 enccrd->crd_flags & CRD_F_ENCRYPT; 2312 if (sc->sc_sessions[session].hs_state == HS_STATE_USED) 2313 sc->sc_sessions[session].hs_state = HS_STATE_KEY; 2314 return 0; 2315 } else if (err == ERESTART) { 2316 /* 2317 * There weren't enough resources to dispatch the request 2318 * to the part. Notify the caller so they'll requeue this 2319 * request and resubmit it again soon. 2320 */ 2321 #ifdef HIFN_DEBUG 2322 if (hifn_debug) 2323 printf(sc->sc_dv.dv_xname, "requeue request\n"); 2324 #endif 2325 free(cmd, M_DEVBUF); 2326 sc->sc_needwakeup |= CRYPTO_SYMQ; 2327 return (err); 2328 } 2329 2330 errout: 2331 if (cmd != NULL) 2332 free(cmd, M_DEVBUF); 2333 if (err == EINVAL) 2334 hifnstats.hst_invalid++; 2335 else 2336 hifnstats.hst_nomem++; 2337 crp->crp_etype = err; 2338 crypto_done(crp); 2339 return (0); 2340 } 2341 2342 void 2343 hifn_abort(struct hifn_softc *sc) 2344 { 2345 struct hifn_dma *dma = sc->sc_dma; 2346 struct hifn_command *cmd; 2347 struct cryptop *crp; 2348 int i, u; 2349 2350 i = dma->resk; u = dma->resu; 2351 while (u != 0) { 2352 cmd = dma->hifn_commands[i]; 2353 KASSERT(cmd != NULL /*, ("hifn_abort: null cmd slot %u", i)*/); 2354 dma->hifn_commands[i] = NULL; 2355 crp = cmd->crp; 2356 2357 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) { 2358 /* Salvage what we can. */ 2359 u_int8_t *macbuf; 2360 2361 if (cmd->base_masks & HIFN_BASE_CMD_MAC) { 2362 macbuf = dma->result_bufs[i]; 2363 macbuf += 12; 2364 } else 2365 macbuf = NULL; 2366 hifnstats.hst_opackets++; 2367 hifn_callback(sc, cmd, macbuf); 2368 } else { 2369 if (cmd->src_map == cmd->dst_map) { 2370 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2371 0, cmd->src_map->dm_mapsize, 2372 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2373 } else { 2374 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2375 0, cmd->src_map->dm_mapsize, 2376 BUS_DMASYNC_POSTWRITE); 2377 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2378 0, cmd->dst_map->dm_mapsize, 2379 BUS_DMASYNC_POSTREAD); 2380 } 2381 2382 if (cmd->srcu.src_m != cmd->dstu.dst_m) { 2383 m_freem(cmd->srcu.src_m); 2384 crp->crp_buf = (caddr_t)cmd->dstu.dst_m; 2385 } 2386 2387 /* non-shared buffers cannot be restarted */ 2388 if (cmd->src_map != cmd->dst_map) { 2389 /* 2390 * XXX should be EAGAIN, delayed until 2391 * after the reset. 2392 */ 2393 crp->crp_etype = ENOMEM; 2394 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2395 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2396 } else 2397 crp->crp_etype = ENOMEM; 2398 2399 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2400 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2401 2402 free(cmd, M_DEVBUF); 2403 if (crp->crp_etype != EAGAIN) 2404 crypto_done(crp); 2405 } 2406 2407 if (++i == HIFN_D_RES_RSIZE) 2408 i = 0; 2409 u--; 2410 } 2411 dma->resk = i; dma->resu = u; 2412 2413 /* Force upload of key next time */ 2414 for (i = 0; i < sc->sc_maxses; i++) 2415 if (sc->sc_sessions[i].hs_state == HS_STATE_KEY) 2416 sc->sc_sessions[i].hs_state = HS_STATE_USED; 2417 2418 hifn_reset_board(sc, 1); 2419 hifn_init_dma(sc); 2420 hifn_init_pci_registers(sc); 2421 } 2422 2423 void 2424 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *resbuf) 2425 { 2426 struct hifn_dma *dma = sc->sc_dma; 2427 struct cryptop *crp = cmd->crp; 2428 struct cryptodesc *crd; 2429 struct mbuf *m; 2430 int totlen, i, u, ivlen; 2431 2432 if (cmd->src_map == cmd->dst_map) 2433 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2434 0, cmd->src_map->dm_mapsize, 2435 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 2436 else { 2437 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2438 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2439 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2440 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 2441 } 2442 2443 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2444 if (cmd->srcu.src_m != cmd->dstu.dst_m) { 2445 crp->crp_buf = (caddr_t)cmd->dstu.dst_m; 2446 totlen = cmd->src_map->dm_mapsize; 2447 for (m = cmd->dstu.dst_m; m != NULL; m = m->m_next) { 2448 if (totlen < m->m_len) { 2449 m->m_len = totlen; 2450 totlen = 0; 2451 } else 2452 totlen -= m->m_len; 2453 } 2454 cmd->dstu.dst_m->m_pkthdr.len = 2455 cmd->srcu.src_m->m_pkthdr.len; 2456 m_freem(cmd->srcu.src_m); 2457 } 2458 } 2459 2460 if (cmd->sloplen != 0) { 2461 if (crp->crp_flags & CRYPTO_F_IMBUF) 2462 m_copyback((struct mbuf *)crp->crp_buf, 2463 cmd->src_map->dm_mapsize - cmd->sloplen, 2464 cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]); 2465 else if (crp->crp_flags & CRYPTO_F_IOV) 2466 cuio_copyback((struct uio *)crp->crp_buf, 2467 cmd->src_map->dm_mapsize - cmd->sloplen, 2468 cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]); 2469 } 2470 2471 i = dma->dstk; u = dma->dstu; 2472 while (u != 0) { 2473 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2474 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc), 2475 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2476 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) { 2477 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2478 offsetof(struct hifn_dma, dstr[i]), 2479 sizeof(struct hifn_desc), 2480 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2481 break; 2482 } 2483 if (++i == (HIFN_D_DST_RSIZE + 1)) 2484 i = 0; 2485 else 2486 u--; 2487 } 2488 dma->dstk = i; dma->dstu = u; 2489 2490 hifnstats.hst_obytes += cmd->dst_map->dm_mapsize; 2491 2492 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) == 2493 HIFN_BASE_CMD_CRYPT) { 2494 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 2495 if (crd->crd_alg != CRYPTO_DES_CBC && 2496 crd->crd_alg != CRYPTO_3DES_CBC && 2497 crd->crd_alg != CRYPTO_AES_CBC) 2498 continue; 2499 ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ? 2500 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2501 if (crp->crp_flags & CRYPTO_F_IMBUF) 2502 m_copydata((struct mbuf *)crp->crp_buf, 2503 crd->crd_skip + crd->crd_len - ivlen, 2504 ivlen, 2505 cmd->softc->sc_sessions[cmd->session_num].hs_iv); 2506 else if (crp->crp_flags & CRYPTO_F_IOV) { 2507 cuio_copydata((struct uio *)crp->crp_buf, 2508 crd->crd_skip + crd->crd_len - ivlen, 2509 ivlen, 2510 cmd->softc->sc_sessions[cmd->session_num].hs_iv); 2511 } 2512 /* XXX We do not handle contig data */ 2513 break; 2514 } 2515 } 2516 2517 if (cmd->base_masks & HIFN_BASE_CMD_MAC) { 2518 u_int8_t *macbuf; 2519 2520 macbuf = resbuf + sizeof(struct hifn_base_result); 2521 if (cmd->base_masks & HIFN_BASE_CMD_COMP) 2522 macbuf += sizeof(struct hifn_comp_result); 2523 macbuf += sizeof(struct hifn_mac_result); 2524 2525 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 2526 int len; 2527 2528 if (crd->crd_alg == CRYPTO_MD5) 2529 len = 16; 2530 else if (crd->crd_alg == CRYPTO_SHA1) 2531 len = 20; 2532 else if (crd->crd_alg == CRYPTO_MD5_HMAC || 2533 crd->crd_alg == CRYPTO_SHA1_HMAC) 2534 len = 12; 2535 else 2536 continue; 2537 2538 if (crp->crp_flags & CRYPTO_F_IMBUF) 2539 m_copyback((struct mbuf *)crp->crp_buf, 2540 crd->crd_inject, len, macbuf); 2541 else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac) 2542 bcopy((caddr_t)macbuf, crp->crp_mac, len); 2543 break; 2544 } 2545 } 2546 2547 if (cmd->src_map != cmd->dst_map) { 2548 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2549 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2550 } 2551 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2552 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2553 free(cmd, M_DEVBUF); 2554 crypto_done(crp); 2555 } 2556 2557 #ifdef HAVE_CRYPTO_LSZ 2558 2559 int 2560 hifn_compression(struct hifn_softc *sc, struct cryptop *crp, 2561 struct hifn_command *cmd) 2562 { 2563 struct cryptodesc *crd = crp->crp_desc; 2564 int s, err = 0; 2565 2566 cmd->compcrd = crd; 2567 cmd->base_masks |= HIFN_BASE_CMD_COMP; 2568 2569 if ((crp->crp_flags & CRYPTO_F_IMBUF) == 0) { 2570 /* 2571 * XXX can only handle mbufs right now since we can 2572 * XXX dynamically resize them. 2573 */ 2574 err = EINVAL; 2575 return (ENOMEM); 2576 } 2577 2578 if ((crd->crd_flags & CRD_F_COMP) == 0) 2579 cmd->base_masks |= HIFN_BASE_CMD_DECODE; 2580 if (crd->crd_alg == CRYPTO_LZS_COMP) 2581 cmd->comp_masks |= HIFN_COMP_CMD_ALG_LZS | 2582 HIFN_COMP_CMD_CLEARHIST; 2583 2584 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER, 2585 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map)) { 2586 err = ENOMEM; 2587 goto fail; 2588 } 2589 2590 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER, 2591 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) { 2592 err = ENOMEM; 2593 goto fail; 2594 } 2595 2596 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2597 int len; 2598 2599 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map, 2600 cmd->srcu.src_m, BUS_DMA_NOWAIT)) { 2601 err = ENOMEM; 2602 goto fail; 2603 } 2604 2605 len = cmd->src_map->dm_mapsize / MCLBYTES; 2606 if ((cmd->src_map->dm_mapsize % MCLBYTES) != 0) 2607 len++; 2608 len *= MCLBYTES; 2609 2610 if ((crd->crd_flags & CRD_F_COMP) == 0) 2611 len *= 4; 2612 2613 if (len > HIFN_MAX_DMALEN) 2614 len = HIFN_MAX_DMALEN; 2615 2616 cmd->dstu.dst_m = hifn_mkmbuf_chain(len, cmd->srcu.src_m); 2617 if (cmd->dstu.dst_m == NULL) { 2618 err = ENOMEM; 2619 goto fail; 2620 } 2621 2622 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, 2623 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) { 2624 err = ENOMEM; 2625 goto fail; 2626 } 2627 } else if (crp->crp_flags & CRYPTO_F_IOV) { 2628 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map, 2629 cmd->srcu.src_io, BUS_DMA_NOWAIT)) { 2630 err = ENOMEM; 2631 goto fail; 2632 } 2633 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map, 2634 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) { 2635 err = ENOMEM; 2636 goto fail; 2637 } 2638 } 2639 2640 if (cmd->src_map == cmd->dst_map) 2641 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2642 0, cmd->src_map->dm_mapsize, 2643 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2644 else { 2645 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2646 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2647 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2648 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2649 } 2650 2651 cmd->crp = crp; 2652 /* 2653 * Always use session 0. The modes of compression we use are 2654 * stateless and there is always at least one compression 2655 * context, zero. 2656 */ 2657 cmd->session_num = 0; 2658 cmd->softc = sc; 2659 2660 s = splnet(); 2661 err = hifn_compress_enter(sc, cmd); 2662 splx(s); 2663 2664 if (err != 0) 2665 goto fail; 2666 return (0); 2667 2668 fail: 2669 if (cmd->dst_map != NULL) { 2670 if (cmd->dst_map->dm_nsegs > 0) 2671 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2672 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2673 } 2674 if (cmd->src_map != NULL) { 2675 if (cmd->src_map->dm_nsegs > 0) 2676 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2677 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2678 } 2679 free(cmd, M_DEVBUF); 2680 if (err == EINVAL) 2681 hifnstats.hst_invalid++; 2682 else 2683 hifnstats.hst_nomem++; 2684 crp->crp_etype = err; 2685 crypto_done(crp); 2686 return (0); 2687 } 2688 2689 /* 2690 * must be called at splnet() 2691 */ 2692 int 2693 hifn_compress_enter(struct hifn_softc *sc, struct hifn_command *cmd) 2694 { 2695 struct hifn_dma *dma = sc->sc_dma; 2696 int cmdi, resi; 2697 u_int32_t cmdlen; 2698 2699 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE || 2700 (dma->resu + 1) > HIFN_D_CMD_RSIZE) 2701 return (ENOMEM); 2702 2703 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE || 2704 (dma->dstu + cmd->dst_map->dm_nsegs) > HIFN_D_DST_RSIZE) 2705 return (ENOMEM); 2706 2707 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 2708 dma->cmdi = 0; 2709 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 2710 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 2711 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 2712 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2713 } 2714 cmdi = dma->cmdi++; 2715 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]); 2716 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE); 2717 2718 /* .p for command/result already set */ 2719 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST | 2720 HIFN_D_MASKDONEIRQ); 2721 HIFN_CMDR_SYNC(sc, cmdi, 2722 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2723 dma->cmdu++; 2724 if (sc->sc_c_busy == 0) { 2725 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); 2726 sc->sc_c_busy = 1; 2727 SET_LED(sc, HIFN_MIPSRST_LED0); 2728 } 2729 2730 /* 2731 * We don't worry about missing an interrupt (which a "command wait" 2732 * interrupt salvages us from), unless there is more than one command 2733 * in the queue. 2734 */ 2735 if (dma->cmdu > 1) { 2736 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT; 2737 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 2738 } 2739 2740 hifnstats.hst_ipackets++; 2741 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize; 2742 2743 hifn_dmamap_load_src(sc, cmd); 2744 if (sc->sc_s_busy == 0) { 2745 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA); 2746 sc->sc_s_busy = 1; 2747 SET_LED(sc, HIFN_MIPSRST_LED1); 2748 } 2749 2750 /* 2751 * Unlike other descriptors, we don't mask done interrupt from 2752 * result descriptor. 2753 */ 2754 if (dma->resi == HIFN_D_RES_RSIZE) { 2755 dma->resi = 0; 2756 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 2757 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 2758 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 2759 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2760 } 2761 resi = dma->resi++; 2762 dma->hifn_commands[resi] = cmd; 2763 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD); 2764 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | 2765 HIFN_D_VALID | HIFN_D_LAST); 2766 HIFN_RESR_SYNC(sc, resi, 2767 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2768 dma->resu++; 2769 if (sc->sc_r_busy == 0) { 2770 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA); 2771 sc->sc_r_busy = 1; 2772 SET_LED(sc, HIFN_MIPSRST_LED2); 2773 } 2774 2775 if (cmd->sloplen) 2776 cmd->slopidx = resi; 2777 2778 hifn_dmamap_load_dst(sc, cmd); 2779 2780 if (sc->sc_d_busy == 0) { 2781 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA); 2782 sc->sc_d_busy = 1; 2783 } 2784 sc->sc_active = 5; 2785 cmd->cmd_callback = hifn_callback_comp; 2786 return (0); 2787 } 2788 2789 void 2790 hifn_callback_comp(struct hifn_softc *sc, struct hifn_command *cmd, 2791 u_int8_t *resbuf) 2792 { 2793 struct hifn_base_result baseres; 2794 struct cryptop *crp = cmd->crp; 2795 struct hifn_dma *dma = sc->sc_dma; 2796 struct mbuf *m; 2797 int err = 0, i, u; 2798 u_int32_t olen; 2799 bus_size_t dstsize; 2800 2801 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2802 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2803 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2804 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 2805 2806 dstsize = cmd->dst_map->dm_mapsize; 2807 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2808 2809 bcopy(resbuf, &baseres, sizeof(struct hifn_base_result)); 2810 2811 i = dma->dstk; u = dma->dstu; 2812 while (u != 0) { 2813 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2814 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc), 2815 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2816 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) { 2817 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2818 offsetof(struct hifn_dma, dstr[i]), 2819 sizeof(struct hifn_desc), 2820 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2821 break; 2822 } 2823 if (++i == (HIFN_D_DST_RSIZE + 1)) 2824 i = 0; 2825 else 2826 u--; 2827 } 2828 dma->dstk = i; dma->dstu = u; 2829 2830 if (baseres.flags & htole16(HIFN_BASE_RES_DSTOVERRUN)) { 2831 bus_size_t xlen; 2832 2833 xlen = dstsize; 2834 2835 m_freem(cmd->dstu.dst_m); 2836 2837 if (xlen == HIFN_MAX_DMALEN) { 2838 /* We've done all we can. */ 2839 err = E2BIG; 2840 goto out; 2841 } 2842 2843 xlen += MCLBYTES; 2844 2845 if (xlen > HIFN_MAX_DMALEN) 2846 xlen = HIFN_MAX_DMALEN; 2847 2848 cmd->dstu.dst_m = hifn_mkmbuf_chain(xlen, 2849 cmd->srcu.src_m); 2850 if (cmd->dstu.dst_m == NULL) { 2851 err = ENOMEM; 2852 goto out; 2853 } 2854 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, 2855 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) { 2856 err = ENOMEM; 2857 goto out; 2858 } 2859 2860 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2861 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2862 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2863 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2864 2865 /* already at splnet... */ 2866 err = hifn_compress_enter(sc, cmd); 2867 if (err != 0) 2868 goto out; 2869 return; 2870 } 2871 2872 olen = dstsize - (letoh16(baseres.dst_cnt) | 2873 (((letoh16(baseres.session) & HIFN_BASE_RES_DSTLEN_M) >> 2874 HIFN_BASE_RES_DSTLEN_S) << 16)); 2875 2876 crp->crp_olen = olen - cmd->compcrd->crd_skip; 2877 2878 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2879 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2880 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2881 2882 m = cmd->dstu.dst_m; 2883 if (m->m_flags & M_PKTHDR) 2884 m->m_pkthdr.len = olen; 2885 crp->crp_buf = (caddr_t)m; 2886 for (; m != NULL; m = m->m_next) { 2887 if (olen >= m->m_len) 2888 olen -= m->m_len; 2889 else { 2890 m->m_len = olen; 2891 olen = 0; 2892 } 2893 } 2894 2895 m_freem(cmd->srcu.src_m); 2896 free(cmd, M_DEVBUF); 2897 crp->crp_etype = 0; 2898 crypto_done(crp); 2899 return; 2900 2901 out: 2902 if (cmd->dst_map != NULL) { 2903 if (cmd->src_map->dm_nsegs != 0) 2904 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2905 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2906 } 2907 if (cmd->src_map != NULL) { 2908 if (cmd->src_map->dm_nsegs != 0) 2909 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2910 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2911 } 2912 if (cmd->dstu.dst_m != NULL) 2913 m_freem(cmd->dstu.dst_m); 2914 free(cmd, M_DEVBUF); 2915 crp->crp_etype = err; 2916 crypto_done(crp); 2917 } 2918 2919 struct mbuf * 2920 hifn_mkmbuf_chain(int totlen, struct mbuf *mtemplate) 2921 { 2922 int len; 2923 struct mbuf *m, *m0, *mlast; 2924 2925 if (mtemplate->m_flags & M_PKTHDR) { 2926 len = MHLEN; 2927 MGETHDR(m0, M_DONTWAIT, MT_DATA); 2928 } else { 2929 len = MLEN; 2930 MGET(m0, M_DONTWAIT, MT_DATA); 2931 } 2932 if (m0 == NULL) 2933 return (NULL); 2934 if (len == MHLEN) 2935 M_DUP_PKTHDR(m0, mtemplate); 2936 MCLGET(m0, M_DONTWAIT); 2937 if (!(m0->m_flags & M_EXT)) 2938 m_freem(m0); 2939 len = MCLBYTES; 2940 2941 totlen -= len; 2942 m0->m_pkthdr.len = m0->m_len = len; 2943 mlast = m0; 2944 2945 while (totlen > 0) { 2946 MGET(m, M_DONTWAIT, MT_DATA); 2947 if (m == NULL) { 2948 m_freem(m0); 2949 return (NULL); 2950 } 2951 MCLGET(m, M_DONTWAIT); 2952 if (!(m->m_flags & M_EXT)) { 2953 m_freem(m0); 2954 return (NULL); 2955 } 2956 len = MCLBYTES; 2957 m->m_len = len; 2958 if (m0->m_flags & M_PKTHDR) 2959 m0->m_pkthdr.len += len; 2960 totlen -= len; 2961 2962 mlast->m_next = m; 2963 mlast = m; 2964 } 2965 2966 return (m0); 2967 } 2968 #endif /* HAVE_CRYPTO_LSZ */ 2969 2970 void 2971 hifn_write_4(struct hifn_softc *sc, int reggrp, bus_size_t reg, u_int32_t val) 2972 { 2973 /* 2974 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0 2975 * and Group 1 registers; avoid conditions that could create 2976 * burst writes by doing a read in between the writes. 2977 */ 2978 if (sc->sc_flags & HIFN_NO_BURSTWRITE) { 2979 if (sc->sc_waw_lastgroup == reggrp && 2980 sc->sc_waw_lastreg == reg - 4) { 2981 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID); 2982 } 2983 sc->sc_waw_lastgroup = reggrp; 2984 sc->sc_waw_lastreg = reg; 2985 } 2986 if (reggrp == 0) 2987 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val); 2988 else 2989 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val); 2990 2991 } 2992 2993 u_int32_t 2994 hifn_read_4(struct hifn_softc *sc, int reggrp, bus_size_t reg) 2995 { 2996 if (sc->sc_flags & HIFN_NO_BURSTWRITE) { 2997 sc->sc_waw_lastgroup = -1; 2998 sc->sc_waw_lastreg = 1; 2999 } 3000 if (reggrp == 0) 3001 return (bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg)); 3002 return (bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg)); 3003 } 3004