1 /* $NetBSD: hifn7751.c,v 1.30 2006/03/28 17:38:34 thorpej Exp $ */ 2 /* $FreeBSD: hifn7751.c,v 1.5.2.7 2003/10/08 23:52:00 sam Exp $ */ 3 /* $OpenBSD: hifn7751.c,v 1.140 2003/08/01 17:55:54 deraadt Exp $ */ 4 5 /* 6 * Invertex AEON / Hifn 7751 driver 7 * Copyright (c) 1999 Invertex Inc. All rights reserved. 8 * Copyright (c) 1999 Theo de Raadt 9 * Copyright (c) 2000-2001 Network Security Technologies, Inc. 10 * http://www.netsec.net 11 * Copyright (c) 2003 Hifn Inc. 12 * 13 * This driver is based on a previous driver by Invertex, for which they 14 * requested: Please send any comments, feedback, bug-fixes, or feature 15 * requests to software@invertex.com. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions 19 * are met: 20 * 21 * 1. Redistributions of source code must retain the above copyright 22 * notice, this list of conditions and the following disclaimer. 23 * 2. Redistributions in binary form must reproduce the above copyright 24 * notice, this list of conditions and the following disclaimer in the 25 * documentation and/or other materials provided with the distribution. 26 * 3. The name of the author may not be used to endorse or promote products 27 * derived from this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 30 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 31 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 32 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 33 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 34 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 38 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Effort sponsored in part by the Defense Advanced Research Projects 41 * Agency (DARPA) and Air Force Research Laboratory, Air Force 42 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 43 * 44 */ 45 46 /* 47 * Driver for various Hifn pre-HIPP encryption processors. 48 */ 49 50 #include <sys/cdefs.h> 51 __KERNEL_RCSID(0, "$NetBSD: hifn7751.c,v 1.30 2006/03/28 17:38:34 thorpej Exp $"); 52 53 #include "rnd.h" 54 55 #if NRND == 0 56 #error hifn7751 requires rnd pseudo-devices 57 #endif 58 59 60 #include <sys/param.h> 61 #include <sys/systm.h> 62 #include <sys/proc.h> 63 #include <sys/errno.h> 64 #include <sys/malloc.h> 65 #include <sys/kernel.h> 66 #include <sys/mbuf.h> 67 #include <sys/device.h> 68 69 #include <uvm/uvm_extern.h> 70 71 72 #ifdef __OpenBSD__ 73 #include <crypto/crypto.h> 74 #include <dev/rndvar.h> 75 #else 76 #include <opencrypto/cryptodev.h> 77 #include <sys/rnd.h> 78 #endif 79 80 #include <dev/pci/pcireg.h> 81 #include <dev/pci/pcivar.h> 82 #include <dev/pci/pcidevs.h> 83 84 #include <dev/pci/hifn7751reg.h> 85 #include <dev/pci/hifn7751var.h> 86 87 #undef HIFN_DEBUG 88 89 #ifdef __NetBSD__ 90 #define M_DUP_PKTHDR M_COPY_PKTHDR /* XXX */ 91 #endif 92 93 #ifdef HIFN_DEBUG 94 extern int hifn_debug; /* patchable */ 95 int hifn_debug = 1; 96 #endif 97 98 #ifdef __OpenBSD__ 99 #define HAVE_CRYPTO_LZS /* OpenBSD OCF supports CRYPTO_COMP_LZS */ 100 #endif 101 102 /* 103 * Prototypes and count for the pci_device structure 104 */ 105 #ifdef __OpenBSD__ 106 static int hifn_probe((struct device *, void *, void *); 107 #else 108 static int hifn_probe(struct device *, struct cfdata *, void *); 109 #endif 110 static void hifn_attach(struct device *, struct device *, void *); 111 112 CFATTACH_DECL(hifn, sizeof(struct hifn_softc), 113 hifn_probe, hifn_attach, NULL, NULL); 114 115 #ifdef __OpenBSD__ 116 struct cfdriver hifn_cd = { 117 0, "hifn", DV_DULL 118 }; 119 #endif 120 121 static void hifn_reset_board(struct hifn_softc *, int); 122 static void hifn_reset_puc(struct hifn_softc *); 123 static void hifn_puc_wait(struct hifn_softc *); 124 static const char *hifn_enable_crypto(struct hifn_softc *, pcireg_t); 125 static void hifn_set_retry(struct hifn_softc *); 126 static void hifn_init_dma(struct hifn_softc *); 127 static void hifn_init_pci_registers(struct hifn_softc *); 128 static int hifn_sramsize(struct hifn_softc *); 129 static int hifn_dramsize(struct hifn_softc *); 130 static int hifn_ramtype(struct hifn_softc *); 131 static void hifn_sessions(struct hifn_softc *); 132 static int hifn_intr(void *); 133 static u_int hifn_write_command(struct hifn_command *, u_int8_t *); 134 static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt); 135 static int hifn_newsession(void*, u_int32_t *, struct cryptoini *); 136 static int hifn_freesession(void*, u_int64_t); 137 static int hifn_process(void*, struct cryptop *, int); 138 static void hifn_callback(struct hifn_softc *, struct hifn_command *, 139 u_int8_t *); 140 static int hifn_crypto(struct hifn_softc *, struct hifn_command *, 141 struct cryptop*, int); 142 static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *); 143 static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *); 144 static int hifn_dmamap_aligned(bus_dmamap_t); 145 static int hifn_dmamap_load_src(struct hifn_softc *, 146 struct hifn_command *); 147 static int hifn_dmamap_load_dst(struct hifn_softc *, 148 struct hifn_command *); 149 static int hifn_init_pubrng(struct hifn_softc *); 150 static void hifn_rng(void *); 151 static void hifn_tick(void *); 152 static void hifn_abort(struct hifn_softc *); 153 static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, 154 int *); 155 static void hifn_write_4(struct hifn_softc *, int, bus_size_t, u_int32_t); 156 static u_int32_t hifn_read_4(struct hifn_softc *, int, bus_size_t); 157 #ifdef HAVE_CRYPTO_LZS 158 static int hifn_compression(struct hifn_softc *, struct cryptop *, 159 struct hifn_command *); 160 static struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *); 161 static int hifn_compress_enter(struct hifn_softc *, struct hifn_command *); 162 static void hifn_callback_comp(struct hifn_softc *, struct hifn_command *, 163 u_int8_t *); 164 #endif /* HAVE_CRYPTO_LZS */ 165 166 167 struct hifn_stats hifnstats; 168 169 static const struct hifn_product { 170 pci_vendor_id_t hifn_vendor; 171 pci_product_id_t hifn_product; 172 int hifn_flags; 173 const char *hifn_name; 174 } hifn_products[] = { 175 { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON, 176 0, 177 "Invertex AEON", 178 }, 179 180 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751, 181 0, 182 "Hifn 7751", 183 }, 184 { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751, 185 0, 186 "Hifn 7751 (NetSec)" 187 }, 188 189 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811, 190 HIFN_IS_7811 | HIFN_HAS_RNG | HIFN_HAS_LEDS | HIFN_NO_BURSTWRITE, 191 "Hifn 7811", 192 }, 193 194 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951, 195 HIFN_HAS_RNG | HIFN_HAS_PUBLIC, 196 "Hifn 7951", 197 }, 198 199 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955, 200 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES, 201 "Hifn 7955", 202 }, 203 204 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956, 205 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES, 206 "Hifn 7956", 207 }, 208 209 210 { 0, 0, 211 0, 212 NULL 213 } 214 }; 215 216 static const struct hifn_product * 217 hifn_lookup(const struct pci_attach_args *pa) 218 { 219 const struct hifn_product *hp; 220 221 for (hp = hifn_products; hp->hifn_name != NULL; hp++) { 222 if (PCI_VENDOR(pa->pa_id) == hp->hifn_vendor && 223 PCI_PRODUCT(pa->pa_id) == hp->hifn_product) 224 return (hp); 225 } 226 return (NULL); 227 } 228 229 static int 230 hifn_probe(struct device *parent, struct cfdata *match, void *aux) 231 { 232 struct pci_attach_args *pa = (struct pci_attach_args *) aux; 233 234 if (hifn_lookup(pa) != NULL) 235 return (1); 236 237 return (0); 238 } 239 240 static void 241 hifn_attach(struct device *parent, struct device *self, void *aux) 242 { 243 struct hifn_softc *sc = (struct hifn_softc *)self; 244 struct pci_attach_args *pa = aux; 245 const struct hifn_product *hp; 246 pci_chipset_tag_t pc = pa->pa_pc; 247 pci_intr_handle_t ih; 248 const char *intrstr = NULL; 249 const char *hifncap; 250 char rbase; 251 bus_size_t iosize0, iosize1; 252 u_int32_t cmd; 253 u_int16_t ena; 254 bus_dma_segment_t seg; 255 bus_dmamap_t dmamap; 256 int rseg; 257 caddr_t kva; 258 259 hp = hifn_lookup(pa); 260 if (hp == NULL) { 261 printf("\n"); 262 panic("hifn_attach: impossible"); 263 } 264 265 aprint_naive(": Crypto processor\n"); 266 aprint_normal(": %s, rev. %d\n", hp->hifn_name, 267 PCI_REVISION(pa->pa_class)); 268 269 sc->sc_pci_pc = pa->pa_pc; 270 sc->sc_pci_tag = pa->pa_tag; 271 272 sc->sc_flags = hp->hifn_flags; 273 274 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 275 cmd |= PCI_COMMAND_MASTER_ENABLE; 276 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd); 277 278 if (pci_mapreg_map(pa, HIFN_BAR0, PCI_MAPREG_TYPE_MEM, 0, 279 &sc->sc_st0, &sc->sc_sh0, NULL, &iosize0)) { 280 aprint_error("%s: can't map mem space %d\n", 281 sc->sc_dv.dv_xname, 0); 282 return; 283 } 284 285 if (pci_mapreg_map(pa, HIFN_BAR1, PCI_MAPREG_TYPE_MEM, 0, 286 &sc->sc_st1, &sc->sc_sh1, NULL, &iosize1)) { 287 aprint_error("%s: can't find mem space %d\n", 288 sc->sc_dv.dv_xname, 1); 289 goto fail_io0; 290 } 291 292 hifn_set_retry(sc); 293 294 if (sc->sc_flags & HIFN_NO_BURSTWRITE) { 295 sc->sc_waw_lastgroup = -1; 296 sc->sc_waw_lastreg = 1; 297 } 298 299 sc->sc_dmat = pa->pa_dmat; 300 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(*sc->sc_dma), PAGE_SIZE, 0, 301 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 302 aprint_error("%s: can't alloc DMA buffer\n", 303 sc->sc_dv.dv_xname); 304 goto fail_io1; 305 } 306 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(*sc->sc_dma), &kva, 307 BUS_DMA_NOWAIT)) { 308 aprint_error("%s: can't map DMA buffers (%lu bytes)\n", 309 sc->sc_dv.dv_xname, (u_long)sizeof(*sc->sc_dma)); 310 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 311 goto fail_io1; 312 } 313 if (bus_dmamap_create(sc->sc_dmat, sizeof(*sc->sc_dma), 1, 314 sizeof(*sc->sc_dma), 0, BUS_DMA_NOWAIT, &dmamap)) { 315 aprint_error("%s: can't create DMA map\n", 316 sc->sc_dv.dv_xname); 317 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma)); 318 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 319 goto fail_io1; 320 } 321 if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, sizeof(*sc->sc_dma), 322 NULL, BUS_DMA_NOWAIT)) { 323 aprint_error("%s: can't load DMA map\n", 324 sc->sc_dv.dv_xname); 325 bus_dmamap_destroy(sc->sc_dmat, dmamap); 326 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma)); 327 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 328 goto fail_io1; 329 } 330 sc->sc_dmamap = dmamap; 331 sc->sc_dma = (struct hifn_dma *)kva; 332 bzero(sc->sc_dma, sizeof(*sc->sc_dma)); 333 334 hifn_reset_board(sc, 0); 335 336 if ((hifncap = hifn_enable_crypto(sc, pa->pa_id)) == NULL) { 337 aprint_error("%s: crypto enabling failed\n", 338 sc->sc_dv.dv_xname); 339 goto fail_mem; 340 } 341 hifn_reset_puc(sc); 342 343 hifn_init_dma(sc); 344 hifn_init_pci_registers(sc); 345 346 /* XXX can't dynamically determine ram type for 795x; force dram */ 347 if (sc->sc_flags & HIFN_IS_7956) 348 sc->sc_drammodel = 1; 349 else if (hifn_ramtype(sc)) 350 goto fail_mem; 351 352 if (sc->sc_drammodel == 0) 353 hifn_sramsize(sc); 354 else 355 hifn_dramsize(sc); 356 357 /* 358 * Workaround for NetSec 7751 rev A: half ram size because two 359 * of the address lines were left floating 360 */ 361 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETSEC && 362 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETSEC_7751 && 363 PCI_REVISION(pa->pa_class) == 0x61) 364 sc->sc_ramsize >>= 1; 365 366 if (pci_intr_map(pa, &ih)) { 367 aprint_error("%s: couldn't map interrupt\n", 368 sc->sc_dv.dv_xname); 369 goto fail_mem; 370 } 371 intrstr = pci_intr_string(pc, ih); 372 #ifdef __OpenBSD__ 373 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc, 374 self->dv_xname); 375 #else 376 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc); 377 #endif 378 if (sc->sc_ih == NULL) { 379 aprint_error("%s: couldn't establish interrupt\n", 380 sc->sc_dv.dv_xname); 381 if (intrstr != NULL) 382 aprint_normal(" at %s", intrstr); 383 aprint_normal("\n"); 384 goto fail_mem; 385 } 386 387 hifn_sessions(sc); 388 389 rseg = sc->sc_ramsize / 1024; 390 rbase = 'K'; 391 if (sc->sc_ramsize >= (1024 * 1024)) { 392 rbase = 'M'; 393 rseg /= 1024; 394 } 395 aprint_normal("%s: %s, %d%cB %cram, interrupting at %s\n", 396 sc->sc_dv.dv_xname, hifncap, rseg, rbase, 397 sc->sc_drammodel ? 'd' : 's', intrstr); 398 399 sc->sc_cid = crypto_get_driverid(0); 400 if (sc->sc_cid < 0) { 401 aprint_error("%s: couldn't get crypto driver id\n", 402 sc->sc_dv.dv_xname); 403 goto fail_intr; 404 } 405 406 WRITE_REG_0(sc, HIFN_0_PUCNFG, 407 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID); 408 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 409 410 switch (ena) { 411 case HIFN_PUSTAT_ENA_2: 412 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, 413 hifn_newsession, hifn_freesession, hifn_process, sc); 414 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0, 415 hifn_newsession, hifn_freesession, hifn_process, sc); 416 if (sc->sc_flags & HIFN_HAS_AES) 417 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0, 418 hifn_newsession, hifn_freesession, 419 hifn_process, sc); 420 /*FALLTHROUGH*/ 421 case HIFN_PUSTAT_ENA_1: 422 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0, 423 hifn_newsession, hifn_freesession, hifn_process, sc); 424 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0, 425 hifn_newsession, hifn_freesession, hifn_process, sc); 426 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0, 427 hifn_newsession, hifn_freesession, hifn_process, sc); 428 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0, 429 hifn_newsession, hifn_freesession, hifn_process, sc); 430 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, 431 hifn_newsession, hifn_freesession, hifn_process, sc); 432 break; 433 } 434 435 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 0, 436 sc->sc_dmamap->dm_mapsize, 437 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 438 439 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) 440 hifn_init_pubrng(sc); 441 442 #ifdef __OpenBSD__ 443 timeout_set(&sc->sc_tickto, hifn_tick, sc); 444 timeout_add(&sc->sc_tickto, hz); 445 #else 446 callout_init(&sc->sc_tickto); 447 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); 448 #endif 449 return; 450 451 fail_intr: 452 pci_intr_disestablish(pc, sc->sc_ih); 453 fail_mem: 454 bus_dmamap_unload(sc->sc_dmat, dmamap); 455 bus_dmamap_destroy(sc->sc_dmat, dmamap); 456 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma)); 457 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 458 459 /* Turn off DMA polling */ 460 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 461 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 462 463 fail_io1: 464 bus_space_unmap(sc->sc_st1, sc->sc_sh1, iosize1); 465 fail_io0: 466 bus_space_unmap(sc->sc_st0, sc->sc_sh0, iosize0); 467 } 468 469 static int 470 hifn_init_pubrng(struct hifn_softc *sc) 471 { 472 u_int32_t r; 473 int i; 474 475 if ((sc->sc_flags & HIFN_IS_7811) == 0) { 476 /* Reset 7951 public key/rng engine */ 477 WRITE_REG_1(sc, HIFN_1_PUB_RESET, 478 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET); 479 480 for (i = 0; i < 100; i++) { 481 DELAY(1000); 482 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) & 483 HIFN_PUBRST_RESET) == 0) 484 break; 485 } 486 487 if (i == 100) { 488 printf("%s: public key init failed\n", 489 sc->sc_dv.dv_xname); 490 return (1); 491 } 492 } 493 494 /* Enable the rng, if available */ 495 if (sc->sc_flags & HIFN_HAS_RNG) { 496 if (sc->sc_flags & HIFN_IS_7811) { 497 r = READ_REG_1(sc, HIFN_1_7811_RNGENA); 498 if (r & HIFN_7811_RNGENA_ENA) { 499 r &= ~HIFN_7811_RNGENA_ENA; 500 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); 501 } 502 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG, 503 HIFN_7811_RNGCFG_DEFL); 504 r |= HIFN_7811_RNGENA_ENA; 505 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); 506 } else 507 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG, 508 READ_REG_1(sc, HIFN_1_RNG_CONFIG) | 509 HIFN_RNGCFG_ENA); 510 511 /* 512 * The Hifn RNG documentation states that at their 513 * recommended "conservative" RNG config values, 514 * the RNG must warm up for 0.4s before providing 515 * data that meet their worst-case estimate of 0.06 516 * bits of random data per output register bit. 517 */ 518 DELAY(4000); 519 520 #ifdef __NetBSD__ 521 /* 522 * XXX Careful! The use of RND_FLAG_NO_ESTIMATE 523 * XXX here is unobvious: we later feed raw bits 524 * XXX into the "entropy pool" with rnd_add_data, 525 * XXX explicitly supplying an entropy estimate. 526 * XXX In this context, NO_ESTIMATE serves only 527 * XXX to prevent rnd_add_data from trying to 528 * XXX use the *time at which we added the data* 529 * XXX as entropy, which is not a good idea since 530 * XXX we add data periodically from a callout. 531 */ 532 rnd_attach_source(&sc->sc_rnd_source, sc->sc_dv.dv_xname, 533 RND_TYPE_RNG, RND_FLAG_NO_ESTIMATE); 534 #endif 535 536 sc->sc_rngfirst = 1; 537 if (hz >= 100) 538 sc->sc_rnghz = hz / 100; 539 else 540 sc->sc_rnghz = 1; 541 #ifdef __OpenBSD__ 542 timeout_set(&sc->sc_rngto, hifn_rng, sc); 543 #else /* !__OpenBSD__ */ 544 callout_init(&sc->sc_rngto); 545 #endif /* !__OpenBSD__ */ 546 } 547 548 /* Enable public key engine, if available */ 549 if (sc->sc_flags & HIFN_HAS_PUBLIC) { 550 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); 551 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE; 552 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 553 } 554 555 /* Call directly into the RNG once to prime the pool. */ 556 hifn_rng(sc); /* Sets callout/timeout at end */ 557 558 return (0); 559 } 560 561 static void 562 hifn_rng(void *vsc) 563 { 564 struct hifn_softc *sc = vsc; 565 #ifdef __NetBSD__ 566 u_int32_t num[HIFN_RNG_BITSPER * RND_ENTROPY_THRESHOLD]; 567 #else 568 u_int32_t num[2]; 569 #endif 570 u_int32_t sts; 571 int i; 572 573 if (sc->sc_flags & HIFN_IS_7811) { 574 for (i = 0; i < 5; i++) { /* XXX why 5? */ 575 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS); 576 if (sts & HIFN_7811_RNGSTS_UFL) { 577 printf("%s: RNG underflow: disabling\n", 578 sc->sc_dv.dv_xname); 579 return; 580 } 581 if ((sts & HIFN_7811_RNGSTS_RDY) == 0) 582 break; 583 584 /* 585 * There are at least two words in the RNG FIFO 586 * at this point. 587 */ 588 num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); 589 num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); 590 591 if (sc->sc_rngfirst) 592 sc->sc_rngfirst = 0; 593 #ifdef __NetBSD__ 594 rnd_add_data(&sc->sc_rnd_source, num, 595 2 * sizeof(num[0]), 596 (2 * sizeof(num[0]) * NBBY) / 597 HIFN_RNG_BITSPER); 598 #else 599 /* 600 * XXX This is a really bad idea. 601 * XXX Hifn estimate as little as 0.06 602 * XXX actual bits of entropy per output 603 * XXX register bit. How can we tell the 604 * XXX kernel RNG subsystem we're handing 605 * XXX it 64 "true" random bits, for any 606 * XXX sane value of "true"? 607 * XXX 608 * XXX The right thing to do here, if we 609 * XXX cannot supply an estimate ourselves, 610 * XXX would be to hash the bits locally. 611 */ 612 add_true_randomness(num[0]); 613 add_true_randomness(num[1]); 614 #endif 615 616 } 617 } else { 618 #ifdef __NetBSD__ 619 /* First time through, try to help fill the pool. */ 620 int nwords = sc->sc_rngfirst ? 621 sizeof(num) / sizeof(num[0]) : 4; 622 #else 623 int nwords = 2; 624 #endif 625 /* 626 * We must be *extremely* careful here. The Hifn 627 * 795x differ from the published 6500 RNG design 628 * in more ways than the obvious lack of the output 629 * FIFO and LFSR control registers. In fact, there 630 * is only one LFSR, instead of the 6500's two, and 631 * it's 32 bits, not 31. 632 * 633 * Further, a block diagram obtained from Hifn shows 634 * a very curious latching of this register: the LFSR 635 * rotates at a frequency of RNG_Clk / 8, but the 636 * RNG_Data register is latched at a frequency of 637 * RNG_Clk, which means that it is possible for 638 * consecutive reads of the RNG_Data register to read 639 * identical state from the LFSR. The simplest 640 * workaround seems to be to read eight samples from 641 * the register for each one that we use. Since each 642 * read must require at least one PCI cycle, and 643 * RNG_Clk is at least PCI_Clk, this is safe. 644 */ 645 646 647 if (sc->sc_rngfirst) { 648 sc->sc_rngfirst = 0; 649 } 650 651 652 for(i = 0 ; i < nwords * 8; i++) 653 { 654 volatile u_int32_t regtmp; 655 regtmp = READ_REG_1(sc, HIFN_1_RNG_DATA); 656 num[i / 8] = regtmp; 657 } 658 #ifdef __NetBSD__ 659 rnd_add_data(&sc->sc_rnd_source, num, 660 nwords * sizeof(num[0]), 661 (nwords * sizeof(num[0]) * NBBY) / 662 HIFN_RNG_BITSPER); 663 #else 664 /* XXX a bad idea; see 7811 block above */ 665 add_true_randomness(num[0]); 666 #endif 667 } 668 669 #ifdef __OpenBSD__ 670 timeout_add(&sc->sc_rngto, sc->sc_rnghz); 671 #else 672 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); 673 #endif 674 } 675 676 static void 677 hifn_puc_wait(struct hifn_softc *sc) 678 { 679 int i; 680 681 for (i = 5000; i > 0; i--) { 682 DELAY(1); 683 if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET)) 684 break; 685 } 686 if (!i) 687 printf("%s: proc unit did not reset\n", sc->sc_dv.dv_xname); 688 } 689 690 /* 691 * Reset the processing unit. 692 */ 693 static void 694 hifn_reset_puc(struct hifn_softc *sc) 695 { 696 /* Reset processing unit */ 697 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); 698 hifn_puc_wait(sc); 699 } 700 701 static void 702 hifn_set_retry(struct hifn_softc *sc) 703 { 704 u_int32_t r; 705 706 r = pci_conf_read(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT); 707 r &= 0xffff0000; 708 pci_conf_write(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT, r); 709 } 710 711 /* 712 * Resets the board. Values in the regesters are left as is 713 * from the reset (i.e. initial values are assigned elsewhere). 714 */ 715 static void 716 hifn_reset_board(struct hifn_softc *sc, int full) 717 { 718 u_int32_t reg; 719 720 /* 721 * Set polling in the DMA configuration register to zero. 0x7 avoids 722 * resetting the board and zeros out the other fields. 723 */ 724 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 725 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 726 727 /* 728 * Now that polling has been disabled, we have to wait 1 ms 729 * before resetting the board. 730 */ 731 DELAY(1000); 732 733 /* Reset the DMA unit */ 734 if (full) { 735 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE); 736 DELAY(1000); 737 } else { 738 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, 739 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET); 740 hifn_reset_puc(sc); 741 } 742 743 bzero(sc->sc_dma, sizeof(*sc->sc_dma)); 744 745 /* Bring dma unit out of reset */ 746 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 747 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 748 749 hifn_puc_wait(sc); 750 751 hifn_set_retry(sc); 752 753 if (sc->sc_flags & HIFN_IS_7811) { 754 for (reg = 0; reg < 1000; reg++) { 755 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) & 756 HIFN_MIPSRST_CRAMINIT) 757 break; 758 DELAY(1000); 759 } 760 if (reg == 1000) 761 printf(": cram init timeout\n"); 762 } 763 } 764 765 static u_int32_t 766 hifn_next_signature(u_int32_t a, u_int cnt) 767 { 768 int i; 769 u_int32_t v; 770 771 for (i = 0; i < cnt; i++) { 772 773 /* get the parity */ 774 v = a & 0x80080125; 775 v ^= v >> 16; 776 v ^= v >> 8; 777 v ^= v >> 4; 778 v ^= v >> 2; 779 v ^= v >> 1; 780 781 a = (v & 1) ^ (a << 1); 782 } 783 784 return a; 785 } 786 787 struct pci2id { 788 u_short pci_vendor; 789 u_short pci_prod; 790 char card_id[13]; 791 } static const pci2id[] = { 792 { 793 PCI_VENDOR_HIFN, 794 PCI_PRODUCT_HIFN_7951, 795 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 796 0x00, 0x00, 0x00, 0x00, 0x00 } 797 }, { 798 PCI_VENDOR_HIFN, 799 PCI_PRODUCT_HIFN_7955, 800 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 801 0x00, 0x00, 0x00, 0x00, 0x00 } 802 }, { 803 PCI_VENDOR_HIFN, 804 PCI_PRODUCT_HIFN_7956, 805 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 806 0x00, 0x00, 0x00, 0x00, 0x00 } 807 }, { 808 PCI_VENDOR_NETSEC, 809 PCI_PRODUCT_NETSEC_7751, 810 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 811 0x00, 0x00, 0x00, 0x00, 0x00 } 812 }, { 813 PCI_VENDOR_INVERTEX, 814 PCI_PRODUCT_INVERTEX_AEON, 815 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 816 0x00, 0x00, 0x00, 0x00, 0x00 } 817 }, { 818 PCI_VENDOR_HIFN, 819 PCI_PRODUCT_HIFN_7811, 820 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 821 0x00, 0x00, 0x00, 0x00, 0x00 } 822 }, { 823 /* 824 * Other vendors share this PCI ID as well, such as 825 * http://www.powercrypt.com, and obviously they also 826 * use the same key. 827 */ 828 PCI_VENDOR_HIFN, 829 PCI_PRODUCT_HIFN_7751, 830 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 831 0x00, 0x00, 0x00, 0x00, 0x00 } 832 }, 833 }; 834 835 /* 836 * Checks to see if crypto is already enabled. If crypto isn't enable, 837 * "hifn_enable_crypto" is called to enable it. The check is important, 838 * as enabling crypto twice will lock the board. 839 */ 840 static const char * 841 hifn_enable_crypto(struct hifn_softc *sc, pcireg_t pciid) 842 { 843 u_int32_t dmacfg, ramcfg, encl, addr, i; 844 const char *offtbl = NULL; 845 846 for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) { 847 if (pci2id[i].pci_vendor == PCI_VENDOR(pciid) && 848 pci2id[i].pci_prod == PCI_PRODUCT(pciid)) { 849 offtbl = pci2id[i].card_id; 850 break; 851 } 852 } 853 854 if (offtbl == NULL) { 855 #ifdef HIFN_DEBUG 856 aprint_debug("%s: Unknown card!\n", sc->sc_dv.dv_xname); 857 #endif 858 return (NULL); 859 } 860 861 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG); 862 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG); 863 864 /* 865 * The RAM config register's encrypt level bit needs to be set before 866 * every read performed on the encryption level register. 867 */ 868 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); 869 870 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 871 872 /* 873 * Make sure we don't re-unlock. Two unlocks kills chip until the 874 * next reboot. 875 */ 876 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) { 877 #ifdef HIFN_DEBUG 878 aprint_debug("%s: Strong Crypto already enabled!\n", 879 sc->sc_dv.dv_xname); 880 #endif 881 goto report; 882 } 883 884 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) { 885 #ifdef HIFN_DEBUG 886 aprint_debug("%s: Unknown encryption level\n", 887 sc->sc_dv.dv_xname); 888 #endif 889 return (NULL); 890 } 891 892 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK | 893 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 894 DELAY(1000); 895 addr = READ_REG_1(sc, HIFN_1_UNLOCK_SECRET1); 896 DELAY(1000); 897 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, 0); 898 DELAY(1000); 899 900 for (i = 0; i <= 12; i++) { 901 addr = hifn_next_signature(addr, offtbl[i] + 0x101); 902 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, addr); 903 904 DELAY(1000); 905 } 906 907 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); 908 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 909 910 #ifdef HIFN_DEBUG 911 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2) 912 aprint_debug("Encryption engine is permanently locked until next system reset."); 913 else 914 aprint_debug("Encryption engine enabled successfully!"); 915 #endif 916 917 report: 918 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg); 919 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg); 920 921 switch (encl) { 922 case HIFN_PUSTAT_ENA_0: 923 return ("LZS-only (no encr/auth)"); 924 925 case HIFN_PUSTAT_ENA_1: 926 return ("DES"); 927 928 case HIFN_PUSTAT_ENA_2: 929 if (sc->sc_flags & HIFN_HAS_AES) 930 return ("3DES/AES"); 931 else 932 return ("3DES"); 933 934 default: 935 return ("disabled"); 936 } 937 /* NOTREACHED */ 938 } 939 940 /* 941 * Give initial values to the registers listed in the "Register Space" 942 * section of the HIFN Software Development reference manual. 943 */ 944 static void 945 hifn_init_pci_registers(struct hifn_softc *sc) 946 { 947 /* write fixed values needed by the Initialization registers */ 948 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); 949 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD); 950 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); 951 952 /* write all 4 ring address registers */ 953 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dmamap->dm_segs[0].ds_addr + 954 offsetof(struct hifn_dma, cmdr[0])); 955 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dmamap->dm_segs[0].ds_addr + 956 offsetof(struct hifn_dma, srcr[0])); 957 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dmamap->dm_segs[0].ds_addr + 958 offsetof(struct hifn_dma, dstr[0])); 959 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dmamap->dm_segs[0].ds_addr + 960 offsetof(struct hifn_dma, resr[0])); 961 962 DELAY(2000); 963 964 /* write status register */ 965 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 966 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS | 967 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS | 968 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST | 969 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER | 970 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST | 971 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER | 972 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST | 973 HIFN_DMACSR_S_WAIT | 974 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST | 975 HIFN_DMACSR_C_WAIT | 976 HIFN_DMACSR_ENGINE | 977 ((sc->sc_flags & HIFN_HAS_PUBLIC) ? 978 HIFN_DMACSR_PUBDONE : 0) | 979 ((sc->sc_flags & HIFN_IS_7811) ? 980 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0)); 981 982 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0; 983 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT | 984 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER | 985 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT | 986 HIFN_DMAIER_ENGINE | 987 ((sc->sc_flags & HIFN_IS_7811) ? 988 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0); 989 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; 990 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 991 CLR_LED(sc, HIFN_MIPSRST_LED0 | HIFN_MIPSRST_LED1 | HIFN_MIPSRST_LED2); 992 993 if (sc->sc_flags & HIFN_IS_7956) { 994 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | 995 HIFN_PUCNFG_TCALLPHASES | 996 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32); 997 WRITE_REG_1(sc, HIFN_1_PLL, HIFN_PLL_7956); 998 } else { 999 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | 1000 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES | 1001 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 | 1002 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM)); 1003 } 1004 1005 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); 1006 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 1007 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST | 1008 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) | 1009 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL)); 1010 } 1011 1012 /* 1013 * The maximum number of sessions supported by the card 1014 * is dependent on the amount of context ram, which 1015 * encryption algorithms are enabled, and how compression 1016 * is configured. This should be configured before this 1017 * routine is called. 1018 */ 1019 static void 1020 hifn_sessions(struct hifn_softc *sc) 1021 { 1022 u_int32_t pucnfg; 1023 int ctxsize; 1024 1025 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG); 1026 1027 if (pucnfg & HIFN_PUCNFG_COMPSING) { 1028 if (pucnfg & HIFN_PUCNFG_ENCCNFG) 1029 ctxsize = 128; 1030 else 1031 ctxsize = 512; 1032 /* 1033 * 7955/7956 has internal context memory of 32K 1034 */ 1035 if (sc->sc_flags & HIFN_IS_7956) 1036 sc->sc_maxses = 32768 / ctxsize; 1037 else 1038 sc->sc_maxses = 1 + 1039 ((sc->sc_ramsize - 32768) / ctxsize); 1040 } 1041 else 1042 sc->sc_maxses = sc->sc_ramsize / 16384; 1043 1044 if (sc->sc_maxses > 2048) 1045 sc->sc_maxses = 2048; 1046 } 1047 1048 /* 1049 * Determine ram type (sram or dram). Board should be just out of a reset 1050 * state when this is called. 1051 */ 1052 static int 1053 hifn_ramtype(struct hifn_softc *sc) 1054 { 1055 u_int8_t data[8], dataexpect[8]; 1056 int i; 1057 1058 for (i = 0; i < sizeof(data); i++) 1059 data[i] = dataexpect[i] = 0x55; 1060 if (hifn_writeramaddr(sc, 0, data)) 1061 return (-1); 1062 if (hifn_readramaddr(sc, 0, data)) 1063 return (-1); 1064 if (bcmp(data, dataexpect, sizeof(data)) != 0) { 1065 sc->sc_drammodel = 1; 1066 return (0); 1067 } 1068 1069 for (i = 0; i < sizeof(data); i++) 1070 data[i] = dataexpect[i] = 0xaa; 1071 if (hifn_writeramaddr(sc, 0, data)) 1072 return (-1); 1073 if (hifn_readramaddr(sc, 0, data)) 1074 return (-1); 1075 if (bcmp(data, dataexpect, sizeof(data)) != 0) { 1076 sc->sc_drammodel = 1; 1077 return (0); 1078 } 1079 1080 return (0); 1081 } 1082 1083 #define HIFN_SRAM_MAX (32 << 20) 1084 #define HIFN_SRAM_STEP_SIZE 16384 1085 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE) 1086 1087 static int 1088 hifn_sramsize(struct hifn_softc *sc) 1089 { 1090 u_int32_t a; 1091 u_int8_t data[8]; 1092 u_int8_t dataexpect[sizeof(data)]; 1093 int32_t i; 1094 1095 for (i = 0; i < sizeof(data); i++) 1096 data[i] = dataexpect[i] = i ^ 0x5a; 1097 1098 for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) { 1099 a = i * HIFN_SRAM_STEP_SIZE; 1100 bcopy(&i, data, sizeof(i)); 1101 hifn_writeramaddr(sc, a, data); 1102 } 1103 1104 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) { 1105 a = i * HIFN_SRAM_STEP_SIZE; 1106 bcopy(&i, dataexpect, sizeof(i)); 1107 if (hifn_readramaddr(sc, a, data) < 0) 1108 return (0); 1109 if (bcmp(data, dataexpect, sizeof(data)) != 0) 1110 return (0); 1111 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE; 1112 } 1113 1114 return (0); 1115 } 1116 1117 /* 1118 * XXX For dram boards, one should really try all of the 1119 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG 1120 * is already set up correctly. 1121 */ 1122 static int 1123 hifn_dramsize(struct hifn_softc *sc) 1124 { 1125 u_int32_t cnfg; 1126 1127 if (sc->sc_flags & HIFN_IS_7956) { 1128 /* 1129 * 7955/7956 have a fixed internal ram of only 32K. 1130 */ 1131 sc->sc_ramsize = 32768; 1132 } else { 1133 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) & 1134 HIFN_PUCNFG_DRAMMASK; 1135 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18); 1136 } 1137 return (0); 1138 } 1139 1140 static void 1141 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, 1142 int *resp) 1143 { 1144 struct hifn_dma *dma = sc->sc_dma; 1145 1146 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 1147 dma->cmdi = 0; 1148 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 1149 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1150 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 1151 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1152 } 1153 *cmdp = dma->cmdi++; 1154 dma->cmdk = dma->cmdi; 1155 1156 if (dma->srci == HIFN_D_SRC_RSIZE) { 1157 dma->srci = 0; 1158 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID | 1159 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1160 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, 1161 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1162 } 1163 *srcp = dma->srci++; 1164 dma->srck = dma->srci; 1165 1166 if (dma->dsti == HIFN_D_DST_RSIZE) { 1167 dma->dsti = 0; 1168 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID | 1169 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1170 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE, 1171 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1172 } 1173 *dstp = dma->dsti++; 1174 dma->dstk = dma->dsti; 1175 1176 if (dma->resi == HIFN_D_RES_RSIZE) { 1177 dma->resi = 0; 1178 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 1179 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1180 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 1181 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1182 } 1183 *resp = dma->resi++; 1184 dma->resk = dma->resi; 1185 } 1186 1187 static int 1188 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) 1189 { 1190 struct hifn_dma *dma = sc->sc_dma; 1191 struct hifn_base_command wc; 1192 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; 1193 int r, cmdi, resi, srci, dsti; 1194 1195 wc.masks = htole16(3 << 13); 1196 wc.session_num = htole16(addr >> 14); 1197 wc.total_source_count = htole16(8); 1198 wc.total_dest_count = htole16(addr & 0x3fff); 1199 1200 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); 1201 1202 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1203 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | 1204 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); 1205 1206 /* build write command */ 1207 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); 1208 *(struct hifn_base_command *)dma->command_bufs[cmdi] = wc; 1209 bcopy(data, &dma->test_src, sizeof(dma->test_src)); 1210 1211 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr 1212 + offsetof(struct hifn_dma, test_src)); 1213 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr 1214 + offsetof(struct hifn_dma, test_dst)); 1215 1216 dma->cmdr[cmdi].l = htole32(16 | masks); 1217 dma->srcr[srci].l = htole32(8 | masks); 1218 dma->dstr[dsti].l = htole32(4 | masks); 1219 dma->resr[resi].l = htole32(4 | masks); 1220 1221 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1222 0, sc->sc_dmamap->dm_mapsize, 1223 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1224 1225 for (r = 10000; r >= 0; r--) { 1226 DELAY(10); 1227 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1228 0, sc->sc_dmamap->dm_mapsize, 1229 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1230 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) 1231 break; 1232 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1233 0, sc->sc_dmamap->dm_mapsize, 1234 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1235 } 1236 if (r == 0) { 1237 printf("%s: writeramaddr -- " 1238 "result[%d](addr %d) still valid\n", 1239 sc->sc_dv.dv_xname, resi, addr); 1240 r = -1; 1241 return (-1); 1242 } else 1243 r = 0; 1244 1245 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1246 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | 1247 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); 1248 1249 return (r); 1250 } 1251 1252 static int 1253 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) 1254 { 1255 struct hifn_dma *dma = sc->sc_dma; 1256 struct hifn_base_command rc; 1257 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; 1258 int r, cmdi, srci, dsti, resi; 1259 1260 rc.masks = htole16(2 << 13); 1261 rc.session_num = htole16(addr >> 14); 1262 rc.total_source_count = htole16(addr & 0x3fff); 1263 rc.total_dest_count = htole16(8); 1264 1265 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); 1266 1267 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1268 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | 1269 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); 1270 1271 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); 1272 *(struct hifn_base_command *)dma->command_bufs[cmdi] = rc; 1273 1274 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1275 offsetof(struct hifn_dma, test_src)); 1276 dma->test_src = 0; 1277 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1278 offsetof(struct hifn_dma, test_dst)); 1279 dma->test_dst = 0; 1280 dma->cmdr[cmdi].l = htole32(8 | masks); 1281 dma->srcr[srci].l = htole32(8 | masks); 1282 dma->dstr[dsti].l = htole32(8 | masks); 1283 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks); 1284 1285 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1286 0, sc->sc_dmamap->dm_mapsize, 1287 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1288 1289 for (r = 10000; r >= 0; r--) { 1290 DELAY(10); 1291 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1292 0, sc->sc_dmamap->dm_mapsize, 1293 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1294 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) 1295 break; 1296 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1297 0, sc->sc_dmamap->dm_mapsize, 1298 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1299 } 1300 if (r == 0) { 1301 printf("%s: readramaddr -- " 1302 "result[%d](addr %d) still valid\n", 1303 sc->sc_dv.dv_xname, resi, addr); 1304 r = -1; 1305 } else { 1306 r = 0; 1307 bcopy(&dma->test_dst, data, sizeof(dma->test_dst)); 1308 } 1309 1310 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1311 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | 1312 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); 1313 1314 return (r); 1315 } 1316 1317 /* 1318 * Initialize the descriptor rings. 1319 */ 1320 static void 1321 hifn_init_dma(struct hifn_softc *sc) 1322 { 1323 struct hifn_dma *dma = sc->sc_dma; 1324 int i; 1325 1326 hifn_set_retry(sc); 1327 1328 /* initialize static pointer values */ 1329 for (i = 0; i < HIFN_D_CMD_RSIZE; i++) 1330 dma->cmdr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1331 offsetof(struct hifn_dma, command_bufs[i][0])); 1332 for (i = 0; i < HIFN_D_RES_RSIZE; i++) 1333 dma->resr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1334 offsetof(struct hifn_dma, result_bufs[i][0])); 1335 1336 dma->cmdr[HIFN_D_CMD_RSIZE].p = 1337 htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1338 offsetof(struct hifn_dma, cmdr[0])); 1339 dma->srcr[HIFN_D_SRC_RSIZE].p = 1340 htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1341 offsetof(struct hifn_dma, srcr[0])); 1342 dma->dstr[HIFN_D_DST_RSIZE].p = 1343 htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1344 offsetof(struct hifn_dma, dstr[0])); 1345 dma->resr[HIFN_D_RES_RSIZE].p = 1346 htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1347 offsetof(struct hifn_dma, resr[0])); 1348 1349 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0; 1350 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0; 1351 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0; 1352 } 1353 1354 /* 1355 * Writes out the raw command buffer space. Returns the 1356 * command buffer size. 1357 */ 1358 static u_int 1359 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf) 1360 { 1361 u_int8_t *buf_pos; 1362 struct hifn_base_command *base_cmd; 1363 struct hifn_mac_command *mac_cmd; 1364 struct hifn_crypt_command *cry_cmd; 1365 struct hifn_comp_command *comp_cmd; 1366 int using_mac, using_crypt, using_comp, len, ivlen; 1367 u_int32_t dlen, slen; 1368 1369 buf_pos = buf; 1370 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC; 1371 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT; 1372 using_comp = cmd->base_masks & HIFN_BASE_CMD_COMP; 1373 1374 base_cmd = (struct hifn_base_command *)buf_pos; 1375 base_cmd->masks = htole16(cmd->base_masks); 1376 slen = cmd->src_map->dm_mapsize; 1377 if (cmd->sloplen) 1378 dlen = cmd->dst_map->dm_mapsize - cmd->sloplen + 1379 sizeof(u_int32_t); 1380 else 1381 dlen = cmd->dst_map->dm_mapsize; 1382 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO); 1383 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO); 1384 dlen >>= 16; 1385 slen >>= 16; 1386 base_cmd->session_num = htole16(cmd->session_num | 1387 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) | 1388 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M)); 1389 buf_pos += sizeof(struct hifn_base_command); 1390 1391 if (using_comp) { 1392 comp_cmd = (struct hifn_comp_command *)buf_pos; 1393 dlen = cmd->compcrd->crd_len; 1394 comp_cmd->source_count = htole16(dlen & 0xffff); 1395 dlen >>= 16; 1396 comp_cmd->masks = htole16(cmd->comp_masks | 1397 ((dlen << HIFN_COMP_CMD_SRCLEN_S) & HIFN_COMP_CMD_SRCLEN_M)); 1398 comp_cmd->header_skip = htole16(cmd->compcrd->crd_skip); 1399 comp_cmd->reserved = 0; 1400 buf_pos += sizeof(struct hifn_comp_command); 1401 } 1402 1403 if (using_mac) { 1404 mac_cmd = (struct hifn_mac_command *)buf_pos; 1405 dlen = cmd->maccrd->crd_len; 1406 mac_cmd->source_count = htole16(dlen & 0xffff); 1407 dlen >>= 16; 1408 mac_cmd->masks = htole16(cmd->mac_masks | 1409 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M)); 1410 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip); 1411 mac_cmd->reserved = 0; 1412 buf_pos += sizeof(struct hifn_mac_command); 1413 } 1414 1415 if (using_crypt) { 1416 cry_cmd = (struct hifn_crypt_command *)buf_pos; 1417 dlen = cmd->enccrd->crd_len; 1418 cry_cmd->source_count = htole16(dlen & 0xffff); 1419 dlen >>= 16; 1420 cry_cmd->masks = htole16(cmd->cry_masks | 1421 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M)); 1422 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip); 1423 cry_cmd->reserved = 0; 1424 buf_pos += sizeof(struct hifn_crypt_command); 1425 } 1426 1427 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) { 1428 bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH); 1429 buf_pos += HIFN_MAC_KEY_LENGTH; 1430 } 1431 1432 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) { 1433 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { 1434 case HIFN_CRYPT_CMD_ALG_3DES: 1435 bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH); 1436 buf_pos += HIFN_3DES_KEY_LENGTH; 1437 break; 1438 case HIFN_CRYPT_CMD_ALG_DES: 1439 bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH); 1440 buf_pos += HIFN_DES_KEY_LENGTH; 1441 break; 1442 case HIFN_CRYPT_CMD_ALG_RC4: 1443 len = 256; 1444 do { 1445 int clen; 1446 1447 clen = MIN(cmd->cklen, len); 1448 bcopy(cmd->ck, buf_pos, clen); 1449 len -= clen; 1450 buf_pos += clen; 1451 } while (len > 0); 1452 bzero(buf_pos, 4); 1453 buf_pos += 4; 1454 break; 1455 case HIFN_CRYPT_CMD_ALG_AES: 1456 /* 1457 * AES keys are variable 128, 192 and 1458 * 256 bits (16, 24 and 32 bytes). 1459 */ 1460 bcopy(cmd->ck, buf_pos, cmd->cklen); 1461 buf_pos += cmd->cklen; 1462 break; 1463 } 1464 } 1465 1466 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) { 1467 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { 1468 case HIFN_CRYPT_CMD_ALG_AES: 1469 ivlen = HIFN_AES_IV_LENGTH; 1470 break; 1471 default: 1472 ivlen = HIFN_IV_LENGTH; 1473 break; 1474 } 1475 bcopy(cmd->iv, buf_pos, ivlen); 1476 buf_pos += ivlen; 1477 } 1478 1479 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC | HIFN_BASE_CMD_CRYPT | 1480 HIFN_BASE_CMD_COMP)) == 0) { 1481 bzero(buf_pos, 8); 1482 buf_pos += 8; 1483 } 1484 1485 return (buf_pos - buf); 1486 } 1487 1488 static int 1489 hifn_dmamap_aligned(bus_dmamap_t map) 1490 { 1491 int i; 1492 1493 for (i = 0; i < map->dm_nsegs; i++) { 1494 if (map->dm_segs[i].ds_addr & 3) 1495 return (0); 1496 if ((i != (map->dm_nsegs - 1)) && 1497 (map->dm_segs[i].ds_len & 3)) 1498 return (0); 1499 } 1500 return (1); 1501 } 1502 1503 static int 1504 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd) 1505 { 1506 struct hifn_dma *dma = sc->sc_dma; 1507 bus_dmamap_t map = cmd->dst_map; 1508 u_int32_t p, l; 1509 int idx, used = 0, i; 1510 1511 idx = dma->dsti; 1512 for (i = 0; i < map->dm_nsegs - 1; i++) { 1513 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr); 1514 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1515 HIFN_D_MASKDONEIRQ | map->dm_segs[i].ds_len); 1516 HIFN_DSTR_SYNC(sc, idx, 1517 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1518 used++; 1519 1520 if (++idx == HIFN_D_DST_RSIZE) { 1521 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1522 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1523 HIFN_DSTR_SYNC(sc, idx, 1524 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1525 idx = 0; 1526 } 1527 } 1528 1529 if (cmd->sloplen == 0) { 1530 p = map->dm_segs[i].ds_addr; 1531 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | 1532 map->dm_segs[i].ds_len; 1533 } else { 1534 p = sc->sc_dmamap->dm_segs[0].ds_addr + 1535 offsetof(struct hifn_dma, slop[cmd->slopidx]); 1536 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | 1537 sizeof(u_int32_t); 1538 1539 if ((map->dm_segs[i].ds_len - cmd->sloplen) != 0) { 1540 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr); 1541 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1542 HIFN_D_MASKDONEIRQ | 1543 (map->dm_segs[i].ds_len - cmd->sloplen)); 1544 HIFN_DSTR_SYNC(sc, idx, 1545 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1546 used++; 1547 1548 if (++idx == HIFN_D_DST_RSIZE) { 1549 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1550 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1551 HIFN_DSTR_SYNC(sc, idx, 1552 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1553 idx = 0; 1554 } 1555 } 1556 } 1557 dma->dstr[idx].p = htole32(p); 1558 dma->dstr[idx].l = htole32(l); 1559 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1560 used++; 1561 1562 if (++idx == HIFN_D_DST_RSIZE) { 1563 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | 1564 HIFN_D_MASKDONEIRQ); 1565 HIFN_DSTR_SYNC(sc, idx, 1566 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1567 idx = 0; 1568 } 1569 1570 dma->dsti = idx; 1571 dma->dstu += used; 1572 return (idx); 1573 } 1574 1575 static int 1576 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd) 1577 { 1578 struct hifn_dma *dma = sc->sc_dma; 1579 bus_dmamap_t map = cmd->src_map; 1580 int idx, i; 1581 u_int32_t last = 0; 1582 1583 idx = dma->srci; 1584 for (i = 0; i < map->dm_nsegs; i++) { 1585 if (i == map->dm_nsegs - 1) 1586 last = HIFN_D_LAST; 1587 1588 dma->srcr[idx].p = htole32(map->dm_segs[i].ds_addr); 1589 dma->srcr[idx].l = htole32(map->dm_segs[i].ds_len | 1590 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last); 1591 HIFN_SRCR_SYNC(sc, idx, 1592 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1593 1594 if (++idx == HIFN_D_SRC_RSIZE) { 1595 dma->srcr[idx].l = htole32(HIFN_D_VALID | 1596 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1597 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, 1598 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1599 idx = 0; 1600 } 1601 } 1602 dma->srci = idx; 1603 dma->srcu += map->dm_nsegs; 1604 return (idx); 1605 } 1606 1607 static int 1608 hifn_crypto(struct hifn_softc *sc, struct hifn_command *cmd, 1609 struct cryptop *crp, int hint) 1610 { 1611 struct hifn_dma *dma = sc->sc_dma; 1612 u_int32_t cmdlen; 1613 int cmdi, resi, s, err = 0; 1614 1615 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER, 1616 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map)) 1617 return (ENOMEM); 1618 1619 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1620 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map, 1621 cmd->srcu.src_m, BUS_DMA_NOWAIT)) { 1622 err = ENOMEM; 1623 goto err_srcmap1; 1624 } 1625 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1626 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map, 1627 cmd->srcu.src_io, BUS_DMA_NOWAIT)) { 1628 err = ENOMEM; 1629 goto err_srcmap1; 1630 } 1631 } else { 1632 err = EINVAL; 1633 goto err_srcmap1; 1634 } 1635 1636 if (hifn_dmamap_aligned(cmd->src_map)) { 1637 cmd->sloplen = cmd->src_map->dm_mapsize & 3; 1638 if (crp->crp_flags & CRYPTO_F_IOV) 1639 cmd->dstu.dst_io = cmd->srcu.src_io; 1640 else if (crp->crp_flags & CRYPTO_F_IMBUF) 1641 cmd->dstu.dst_m = cmd->srcu.src_m; 1642 cmd->dst_map = cmd->src_map; 1643 } else { 1644 if (crp->crp_flags & CRYPTO_F_IOV) { 1645 err = EINVAL; 1646 goto err_srcmap; 1647 } else if (crp->crp_flags & CRYPTO_F_IMBUF) { 1648 int totlen, len; 1649 struct mbuf *m, *m0, *mlast; 1650 1651 totlen = cmd->src_map->dm_mapsize; 1652 if (cmd->srcu.src_m->m_flags & M_PKTHDR) { 1653 len = MHLEN; 1654 MGETHDR(m0, M_DONTWAIT, MT_DATA); 1655 } else { 1656 len = MLEN; 1657 MGET(m0, M_DONTWAIT, MT_DATA); 1658 } 1659 if (m0 == NULL) { 1660 err = ENOMEM; 1661 goto err_srcmap; 1662 } 1663 if (len == MHLEN) 1664 M_DUP_PKTHDR(m0, cmd->srcu.src_m); 1665 if (totlen >= MINCLSIZE) { 1666 MCLGET(m0, M_DONTWAIT); 1667 if (m0->m_flags & M_EXT) 1668 len = MCLBYTES; 1669 } 1670 totlen -= len; 1671 m0->m_pkthdr.len = m0->m_len = len; 1672 mlast = m0; 1673 1674 while (totlen > 0) { 1675 MGET(m, M_DONTWAIT, MT_DATA); 1676 if (m == NULL) { 1677 err = ENOMEM; 1678 m_freem(m0); 1679 goto err_srcmap; 1680 } 1681 len = MLEN; 1682 if (totlen >= MINCLSIZE) { 1683 MCLGET(m, M_DONTWAIT); 1684 if (m->m_flags & M_EXT) 1685 len = MCLBYTES; 1686 } 1687 1688 m->m_len = len; 1689 if (m0->m_flags & M_PKTHDR) 1690 m0->m_pkthdr.len += len; 1691 totlen -= len; 1692 1693 mlast->m_next = m; 1694 mlast = m; 1695 } 1696 cmd->dstu.dst_m = m0; 1697 } 1698 } 1699 1700 if (cmd->dst_map == NULL) { 1701 if (bus_dmamap_create(sc->sc_dmat, 1702 HIFN_MAX_SEGLEN * MAX_SCATTER, MAX_SCATTER, 1703 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) { 1704 err = ENOMEM; 1705 goto err_srcmap; 1706 } 1707 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1708 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, 1709 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) { 1710 err = ENOMEM; 1711 goto err_dstmap1; 1712 } 1713 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1714 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map, 1715 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) { 1716 err = ENOMEM; 1717 goto err_dstmap1; 1718 } 1719 } 1720 } 1721 1722 #ifdef HIFN_DEBUG 1723 if (hifn_debug) 1724 printf("%s: Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n", 1725 sc->sc_dv.dv_xname, 1726 READ_REG_1(sc, HIFN_1_DMA_CSR), 1727 READ_REG_1(sc, HIFN_1_DMA_IER), 1728 dma->cmdu, dma->srcu, dma->dstu, dma->resu, 1729 cmd->src_map->dm_nsegs, cmd->dst_map->dm_nsegs); 1730 #endif 1731 1732 if (cmd->src_map == cmd->dst_map) 1733 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 1734 0, cmd->src_map->dm_mapsize, 1735 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1736 else { 1737 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 1738 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1739 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 1740 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1741 } 1742 1743 s = splnet(); 1744 1745 /* 1746 * need 1 cmd, and 1 res 1747 * need N src, and N dst 1748 */ 1749 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE || 1750 (dma->resu + 1) > HIFN_D_RES_RSIZE) { 1751 splx(s); 1752 err = ENOMEM; 1753 goto err_dstmap; 1754 } 1755 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE || 1756 (dma->dstu + cmd->dst_map->dm_nsegs + 1) > HIFN_D_DST_RSIZE) { 1757 splx(s); 1758 err = ENOMEM; 1759 goto err_dstmap; 1760 } 1761 1762 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 1763 dma->cmdi = 0; 1764 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 1765 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1766 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 1767 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1768 } 1769 cmdi = dma->cmdi++; 1770 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]); 1771 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE); 1772 1773 /* .p for command/result already set */ 1774 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST | 1775 HIFN_D_MASKDONEIRQ); 1776 HIFN_CMDR_SYNC(sc, cmdi, 1777 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1778 dma->cmdu++; 1779 if (sc->sc_c_busy == 0) { 1780 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); 1781 sc->sc_c_busy = 1; 1782 SET_LED(sc, HIFN_MIPSRST_LED0); 1783 } 1784 1785 /* 1786 * We don't worry about missing an interrupt (which a "command wait" 1787 * interrupt salvages us from), unless there is more than one command 1788 * in the queue. 1789 * 1790 * XXX We do seem to miss some interrupts. So we always enable 1791 * XXX command wait. From OpenBSD revision 1.149. 1792 * 1793 */ 1794 #if 0 1795 if (dma->cmdu > 1) { 1796 #endif 1797 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT; 1798 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 1799 #if 0 1800 } 1801 #endif 1802 1803 hifnstats.hst_ipackets++; 1804 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize; 1805 1806 hifn_dmamap_load_src(sc, cmd); 1807 if (sc->sc_s_busy == 0) { 1808 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA); 1809 sc->sc_s_busy = 1; 1810 SET_LED(sc, HIFN_MIPSRST_LED1); 1811 } 1812 1813 /* 1814 * Unlike other descriptors, we don't mask done interrupt from 1815 * result descriptor. 1816 */ 1817 #ifdef HIFN_DEBUG 1818 if (hifn_debug) 1819 printf("load res\n"); 1820 #endif 1821 if (dma->resi == HIFN_D_RES_RSIZE) { 1822 dma->resi = 0; 1823 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 1824 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1825 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 1826 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1827 } 1828 resi = dma->resi++; 1829 dma->hifn_commands[resi] = cmd; 1830 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD); 1831 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | 1832 HIFN_D_VALID | HIFN_D_LAST); 1833 HIFN_RESR_SYNC(sc, resi, 1834 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1835 dma->resu++; 1836 if (sc->sc_r_busy == 0) { 1837 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA); 1838 sc->sc_r_busy = 1; 1839 SET_LED(sc, HIFN_MIPSRST_LED2); 1840 } 1841 1842 if (cmd->sloplen) 1843 cmd->slopidx = resi; 1844 1845 hifn_dmamap_load_dst(sc, cmd); 1846 1847 if (sc->sc_d_busy == 0) { 1848 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA); 1849 sc->sc_d_busy = 1; 1850 } 1851 1852 #ifdef HIFN_DEBUG 1853 if (hifn_debug) 1854 printf("%s: command: stat %8x ier %8x\n", 1855 sc->sc_dv.dv_xname, 1856 READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER)); 1857 #endif 1858 1859 sc->sc_active = 5; 1860 splx(s); 1861 return (err); /* success */ 1862 1863 err_dstmap: 1864 if (cmd->src_map != cmd->dst_map) 1865 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 1866 err_dstmap1: 1867 if (cmd->src_map != cmd->dst_map) 1868 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 1869 err_srcmap: 1870 if (crp->crp_flags & CRYPTO_F_IMBUF && 1871 cmd->srcu.src_m != cmd->dstu.dst_m) 1872 m_freem(cmd->dstu.dst_m); 1873 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 1874 err_srcmap1: 1875 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 1876 return (err); 1877 } 1878 1879 static void 1880 hifn_tick(void *vsc) 1881 { 1882 struct hifn_softc *sc = vsc; 1883 int s; 1884 1885 s = splnet(); 1886 if (sc->sc_active == 0) { 1887 struct hifn_dma *dma = sc->sc_dma; 1888 u_int32_t r = 0; 1889 1890 if (dma->cmdu == 0 && sc->sc_c_busy) { 1891 sc->sc_c_busy = 0; 1892 r |= HIFN_DMACSR_C_CTRL_DIS; 1893 CLR_LED(sc, HIFN_MIPSRST_LED0); 1894 } 1895 if (dma->srcu == 0 && sc->sc_s_busy) { 1896 sc->sc_s_busy = 0; 1897 r |= HIFN_DMACSR_S_CTRL_DIS; 1898 CLR_LED(sc, HIFN_MIPSRST_LED1); 1899 } 1900 if (dma->dstu == 0 && sc->sc_d_busy) { 1901 sc->sc_d_busy = 0; 1902 r |= HIFN_DMACSR_D_CTRL_DIS; 1903 } 1904 if (dma->resu == 0 && sc->sc_r_busy) { 1905 sc->sc_r_busy = 0; 1906 r |= HIFN_DMACSR_R_CTRL_DIS; 1907 CLR_LED(sc, HIFN_MIPSRST_LED2); 1908 } 1909 if (r) 1910 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r); 1911 } 1912 else 1913 sc->sc_active--; 1914 splx(s); 1915 #ifdef __OpenBSD__ 1916 timeout_add(&sc->sc_tickto, hz); 1917 #else 1918 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); 1919 #endif 1920 } 1921 1922 static int 1923 hifn_intr(void *arg) 1924 { 1925 struct hifn_softc *sc = arg; 1926 struct hifn_dma *dma = sc->sc_dma; 1927 u_int32_t dmacsr, restart; 1928 int i, u; 1929 1930 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR); 1931 1932 #ifdef HIFN_DEBUG 1933 if (hifn_debug) 1934 printf("%s: irq: stat %08x ien %08x u %d/%d/%d/%d\n", 1935 sc->sc_dv.dv_xname, 1936 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), 1937 dma->cmdu, dma->srcu, dma->dstu, dma->resu); 1938 #endif 1939 1940 /* Nothing in the DMA unit interrupted */ 1941 if ((dmacsr & sc->sc_dmaier) == 0) 1942 return (0); 1943 1944 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier); 1945 1946 if (dmacsr & HIFN_DMACSR_ENGINE) 1947 WRITE_REG_0(sc, HIFN_0_PUISR, READ_REG_0(sc, HIFN_0_PUISR)); 1948 1949 if ((sc->sc_flags & HIFN_HAS_PUBLIC) && 1950 (dmacsr & HIFN_DMACSR_PUBDONE)) 1951 WRITE_REG_1(sc, HIFN_1_PUB_STATUS, 1952 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE); 1953 1954 restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER); 1955 if (restart) 1956 printf("%s: overrun %x\n", sc->sc_dv.dv_xname, dmacsr); 1957 1958 if (sc->sc_flags & HIFN_IS_7811) { 1959 if (dmacsr & HIFN_DMACSR_ILLR) 1960 printf("%s: illegal read\n", sc->sc_dv.dv_xname); 1961 if (dmacsr & HIFN_DMACSR_ILLW) 1962 printf("%s: illegal write\n", sc->sc_dv.dv_xname); 1963 } 1964 1965 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | 1966 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); 1967 if (restart) { 1968 printf("%s: abort, resetting.\n", sc->sc_dv.dv_xname); 1969 hifnstats.hst_abort++; 1970 hifn_abort(sc); 1971 return (1); 1972 } 1973 1974 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->resu == 0)) { 1975 /* 1976 * If no slots to process and we receive a "waiting on 1977 * command" interrupt, we disable the "waiting on command" 1978 * (by clearing it). 1979 */ 1980 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; 1981 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 1982 } 1983 1984 /* clear the rings */ 1985 i = dma->resk; 1986 while (dma->resu != 0) { 1987 HIFN_RESR_SYNC(sc, i, 1988 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1989 if (dma->resr[i].l & htole32(HIFN_D_VALID)) { 1990 HIFN_RESR_SYNC(sc, i, 1991 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1992 break; 1993 } 1994 1995 if (i != HIFN_D_RES_RSIZE) { 1996 struct hifn_command *cmd; 1997 1998 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD); 1999 cmd = dma->hifn_commands[i]; 2000 KASSERT(cmd != NULL 2001 /*("hifn_intr: null command slot %u", i)*/); 2002 dma->hifn_commands[i] = NULL; 2003 2004 hifn_callback(sc, cmd, dma->result_bufs[i]); 2005 hifnstats.hst_opackets++; 2006 } 2007 2008 if (++i == (HIFN_D_RES_RSIZE + 1)) 2009 i = 0; 2010 else 2011 dma->resu--; 2012 } 2013 dma->resk = i; 2014 2015 i = dma->srck; u = dma->srcu; 2016 while (u != 0) { 2017 HIFN_SRCR_SYNC(sc, i, 2018 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2019 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) { 2020 HIFN_SRCR_SYNC(sc, i, 2021 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2022 break; 2023 } 2024 if (++i == (HIFN_D_SRC_RSIZE + 1)) 2025 i = 0; 2026 else 2027 u--; 2028 } 2029 dma->srck = i; dma->srcu = u; 2030 2031 i = dma->cmdk; u = dma->cmdu; 2032 while (u != 0) { 2033 HIFN_CMDR_SYNC(sc, i, 2034 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2035 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) { 2036 HIFN_CMDR_SYNC(sc, i, 2037 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2038 break; 2039 } 2040 if (i != HIFN_D_CMD_RSIZE) { 2041 u--; 2042 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE); 2043 } 2044 if (++i == (HIFN_D_CMD_RSIZE + 1)) 2045 i = 0; 2046 } 2047 dma->cmdk = i; dma->cmdu = u; 2048 2049 return (1); 2050 } 2051 2052 /* 2053 * Allocate a new 'session' and return an encoded session id. 'sidp' 2054 * contains our registration id, and should contain an encoded session 2055 * id on successful allocation. 2056 */ 2057 static int 2058 hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri) 2059 { 2060 struct cryptoini *c; 2061 struct hifn_softc *sc = arg; 2062 int i, mac = 0, cry = 0, comp = 0; 2063 2064 KASSERT(sc != NULL /*, ("hifn_newsession: null softc")*/); 2065 if (sidp == NULL || cri == NULL || sc == NULL) 2066 return (EINVAL); 2067 2068 for (i = 0; i < sc->sc_maxses; i++) 2069 if (sc->sc_sessions[i].hs_state == HS_STATE_FREE) 2070 break; 2071 if (i == sc->sc_maxses) 2072 return (ENOMEM); 2073 2074 for (c = cri; c != NULL; c = c->cri_next) { 2075 switch (c->cri_alg) { 2076 case CRYPTO_MD5: 2077 case CRYPTO_SHA1: 2078 case CRYPTO_MD5_HMAC: 2079 case CRYPTO_SHA1_HMAC: 2080 if (mac) 2081 return (EINVAL); 2082 mac = 1; 2083 break; 2084 case CRYPTO_DES_CBC: 2085 case CRYPTO_3DES_CBC: 2086 case CRYPTO_AES_CBC: 2087 /* Note that this is an initialization 2088 vector, not a cipher key; any function 2089 giving sufficient Hamming distance 2090 between outputs is fine. Use of RC4 2091 to generate IVs has been FIPS140-2 2092 certified by several labs. */ 2093 #ifdef __NetBSD__ 2094 arc4randbytes(sc->sc_sessions[i].hs_iv, 2095 c->cri_alg == CRYPTO_AES_CBC ? 2096 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2097 #else /* FreeBSD and OpenBSD have get_random_bytes */ 2098 /* XXX this may read fewer, does it matter? */ 2099 get_random_bytes(sc->sc_sessions[i].hs_iv, 2100 c->cri_alg == CRYPTO_AES_CBC ? 2101 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2102 #endif 2103 /*FALLTHROUGH*/ 2104 case CRYPTO_ARC4: 2105 if (cry) 2106 return (EINVAL); 2107 cry = 1; 2108 break; 2109 #ifdef HAVE_CRYPTO_LZS 2110 case CRYPTO_LZS_COMP: 2111 if (comp) 2112 return (EINVAL); 2113 comp = 1; 2114 break; 2115 #endif 2116 default: 2117 return (EINVAL); 2118 } 2119 } 2120 if (mac == 0 && cry == 0 && comp == 0) 2121 return (EINVAL); 2122 2123 /* 2124 * XXX only want to support compression without chaining to 2125 * MAC/crypt engine right now 2126 */ 2127 if ((comp && mac) || (comp && cry)) 2128 return (EINVAL); 2129 2130 *sidp = HIFN_SID(device_unit(&sc->sc_dv), i); 2131 sc->sc_sessions[i].hs_state = HS_STATE_USED; 2132 2133 return (0); 2134 } 2135 2136 /* 2137 * Deallocate a session. 2138 * XXX this routine should run a zero'd mac/encrypt key into context ram. 2139 * XXX to blow away any keys already stored there. 2140 */ 2141 static int 2142 hifn_freesession(void *arg, u_int64_t tid) 2143 { 2144 struct hifn_softc *sc = arg; 2145 int session; 2146 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; 2147 2148 KASSERT(sc != NULL /*, ("hifn_freesession: null softc")*/); 2149 if (sc == NULL) 2150 return (EINVAL); 2151 2152 session = HIFN_SESSION(sid); 2153 if (session >= sc->sc_maxses) 2154 return (EINVAL); 2155 2156 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session])); 2157 return (0); 2158 } 2159 2160 static int 2161 hifn_process(void *arg, struct cryptop *crp, int hint) 2162 { 2163 struct hifn_softc *sc = arg; 2164 struct hifn_command *cmd = NULL; 2165 int session, err, ivlen; 2166 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; 2167 2168 if (crp == NULL || crp->crp_callback == NULL) { 2169 hifnstats.hst_invalid++; 2170 return (EINVAL); 2171 } 2172 session = HIFN_SESSION(crp->crp_sid); 2173 2174 if (sc == NULL || session >= sc->sc_maxses) { 2175 err = EINVAL; 2176 goto errout; 2177 } 2178 2179 cmd = (struct hifn_command *)malloc(sizeof(struct hifn_command), 2180 M_DEVBUF, M_NOWAIT|M_ZERO); 2181 if (cmd == NULL) { 2182 hifnstats.hst_nomem++; 2183 err = ENOMEM; 2184 goto errout; 2185 } 2186 2187 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2188 cmd->srcu.src_m = (struct mbuf *)crp->crp_buf; 2189 cmd->dstu.dst_m = (struct mbuf *)crp->crp_buf; 2190 } else if (crp->crp_flags & CRYPTO_F_IOV) { 2191 cmd->srcu.src_io = (struct uio *)crp->crp_buf; 2192 cmd->dstu.dst_io = (struct uio *)crp->crp_buf; 2193 } else { 2194 err = EINVAL; 2195 goto errout; /* XXX we don't handle contiguous buffers! */ 2196 } 2197 2198 crd1 = crp->crp_desc; 2199 if (crd1 == NULL) { 2200 err = EINVAL; 2201 goto errout; 2202 } 2203 crd2 = crd1->crd_next; 2204 2205 if (crd2 == NULL) { 2206 if (crd1->crd_alg == CRYPTO_MD5_HMAC || 2207 crd1->crd_alg == CRYPTO_SHA1_HMAC || 2208 crd1->crd_alg == CRYPTO_SHA1 || 2209 crd1->crd_alg == CRYPTO_MD5) { 2210 maccrd = crd1; 2211 enccrd = NULL; 2212 } else if (crd1->crd_alg == CRYPTO_DES_CBC || 2213 crd1->crd_alg == CRYPTO_3DES_CBC || 2214 crd1->crd_alg == CRYPTO_AES_CBC || 2215 crd1->crd_alg == CRYPTO_ARC4) { 2216 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0) 2217 cmd->base_masks |= HIFN_BASE_CMD_DECODE; 2218 maccrd = NULL; 2219 enccrd = crd1; 2220 #ifdef HAVE_CRYPTO_LZS 2221 } else if (crd1->crd_alg == CRYPTO_LZS_COMP) { 2222 return (hifn_compression(sc, crp, cmd)); 2223 #endif 2224 } else { 2225 err = EINVAL; 2226 goto errout; 2227 } 2228 } else { 2229 if ((crd1->crd_alg == CRYPTO_MD5_HMAC || 2230 crd1->crd_alg == CRYPTO_SHA1_HMAC || 2231 crd1->crd_alg == CRYPTO_MD5 || 2232 crd1->crd_alg == CRYPTO_SHA1) && 2233 (crd2->crd_alg == CRYPTO_DES_CBC || 2234 crd2->crd_alg == CRYPTO_3DES_CBC || 2235 crd2->crd_alg == CRYPTO_AES_CBC || 2236 crd2->crd_alg == CRYPTO_ARC4) && 2237 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { 2238 cmd->base_masks = HIFN_BASE_CMD_DECODE; 2239 maccrd = crd1; 2240 enccrd = crd2; 2241 } else if ((crd1->crd_alg == CRYPTO_DES_CBC || 2242 crd1->crd_alg == CRYPTO_ARC4 || 2243 crd1->crd_alg == CRYPTO_3DES_CBC || 2244 crd1->crd_alg == CRYPTO_AES_CBC) && 2245 (crd2->crd_alg == CRYPTO_MD5_HMAC || 2246 crd2->crd_alg == CRYPTO_SHA1_HMAC || 2247 crd2->crd_alg == CRYPTO_MD5 || 2248 crd2->crd_alg == CRYPTO_SHA1) && 2249 (crd1->crd_flags & CRD_F_ENCRYPT)) { 2250 enccrd = crd1; 2251 maccrd = crd2; 2252 } else { 2253 /* 2254 * We cannot order the 7751 as requested 2255 */ 2256 err = EINVAL; 2257 goto errout; 2258 } 2259 } 2260 2261 if (enccrd) { 2262 cmd->enccrd = enccrd; 2263 cmd->base_masks |= HIFN_BASE_CMD_CRYPT; 2264 switch (enccrd->crd_alg) { 2265 case CRYPTO_ARC4: 2266 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4; 2267 if ((enccrd->crd_flags & CRD_F_ENCRYPT) 2268 != sc->sc_sessions[session].hs_prev_op) 2269 sc->sc_sessions[session].hs_state = 2270 HS_STATE_USED; 2271 break; 2272 case CRYPTO_DES_CBC: 2273 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES | 2274 HIFN_CRYPT_CMD_MODE_CBC | 2275 HIFN_CRYPT_CMD_NEW_IV; 2276 break; 2277 case CRYPTO_3DES_CBC: 2278 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES | 2279 HIFN_CRYPT_CMD_MODE_CBC | 2280 HIFN_CRYPT_CMD_NEW_IV; 2281 break; 2282 case CRYPTO_AES_CBC: 2283 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES | 2284 HIFN_CRYPT_CMD_MODE_CBC | 2285 HIFN_CRYPT_CMD_NEW_IV; 2286 break; 2287 default: 2288 err = EINVAL; 2289 goto errout; 2290 } 2291 if (enccrd->crd_alg != CRYPTO_ARC4) { 2292 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ? 2293 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2294 if (enccrd->crd_flags & CRD_F_ENCRYPT) { 2295 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 2296 bcopy(enccrd->crd_iv, cmd->iv, ivlen); 2297 else 2298 bcopy(sc->sc_sessions[session].hs_iv, 2299 cmd->iv, ivlen); 2300 2301 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) 2302 == 0) { 2303 if (crp->crp_flags & CRYPTO_F_IMBUF) 2304 m_copyback(cmd->srcu.src_m, 2305 enccrd->crd_inject, 2306 ivlen, cmd->iv); 2307 else if (crp->crp_flags & CRYPTO_F_IOV) 2308 cuio_copyback(cmd->srcu.src_io, 2309 enccrd->crd_inject, 2310 ivlen, cmd->iv); 2311 } 2312 } else { 2313 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 2314 bcopy(enccrd->crd_iv, cmd->iv, ivlen); 2315 else if (crp->crp_flags & CRYPTO_F_IMBUF) 2316 m_copydata(cmd->srcu.src_m, 2317 enccrd->crd_inject, ivlen, cmd->iv); 2318 else if (crp->crp_flags & CRYPTO_F_IOV) 2319 cuio_copydata(cmd->srcu.src_io, 2320 enccrd->crd_inject, ivlen, cmd->iv); 2321 } 2322 } 2323 2324 cmd->ck = enccrd->crd_key; 2325 cmd->cklen = enccrd->crd_klen >> 3; 2326 2327 /* 2328 * Need to specify the size for the AES key in the masks. 2329 */ 2330 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) == 2331 HIFN_CRYPT_CMD_ALG_AES) { 2332 switch (cmd->cklen) { 2333 case 16: 2334 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128; 2335 break; 2336 case 24: 2337 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192; 2338 break; 2339 case 32: 2340 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256; 2341 break; 2342 default: 2343 err = EINVAL; 2344 goto errout; 2345 } 2346 } 2347 2348 if (sc->sc_sessions[session].hs_state == HS_STATE_USED) 2349 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; 2350 } 2351 2352 if (maccrd) { 2353 cmd->maccrd = maccrd; 2354 cmd->base_masks |= HIFN_BASE_CMD_MAC; 2355 2356 switch (maccrd->crd_alg) { 2357 case CRYPTO_MD5: 2358 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | 2359 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | 2360 HIFN_MAC_CMD_POS_IPSEC; 2361 break; 2362 case CRYPTO_MD5_HMAC: 2363 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | 2364 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | 2365 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; 2366 break; 2367 case CRYPTO_SHA1: 2368 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | 2369 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | 2370 HIFN_MAC_CMD_POS_IPSEC; 2371 break; 2372 case CRYPTO_SHA1_HMAC: 2373 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | 2374 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | 2375 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; 2376 break; 2377 } 2378 2379 if ((maccrd->crd_alg == CRYPTO_SHA1_HMAC || 2380 maccrd->crd_alg == CRYPTO_MD5_HMAC) && 2381 sc->sc_sessions[session].hs_state == HS_STATE_USED) { 2382 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY; 2383 bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3); 2384 bzero(cmd->mac + (maccrd->crd_klen >> 3), 2385 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3)); 2386 } 2387 } 2388 2389 cmd->crp = crp; 2390 cmd->session_num = session; 2391 cmd->softc = sc; 2392 2393 err = hifn_crypto(sc, cmd, crp, hint); 2394 if (err == 0) { 2395 if (enccrd) 2396 sc->sc_sessions[session].hs_prev_op = 2397 enccrd->crd_flags & CRD_F_ENCRYPT; 2398 if (sc->sc_sessions[session].hs_state == HS_STATE_USED) 2399 sc->sc_sessions[session].hs_state = HS_STATE_KEY; 2400 return 0; 2401 } else if (err == ERESTART) { 2402 /* 2403 * There weren't enough resources to dispatch the request 2404 * to the part. Notify the caller so they'll requeue this 2405 * request and resubmit it again soon. 2406 */ 2407 #ifdef HIFN_DEBUG 2408 if (hifn_debug) 2409 printf(sc->sc_dv.dv_xname, "requeue request\n"); 2410 #endif 2411 free(cmd, M_DEVBUF); 2412 sc->sc_needwakeup |= CRYPTO_SYMQ; 2413 return (err); 2414 } 2415 2416 errout: 2417 if (cmd != NULL) 2418 free(cmd, M_DEVBUF); 2419 if (err == EINVAL) 2420 hifnstats.hst_invalid++; 2421 else 2422 hifnstats.hst_nomem++; 2423 crp->crp_etype = err; 2424 crypto_done(crp); 2425 return (0); 2426 } 2427 2428 static void 2429 hifn_abort(struct hifn_softc *sc) 2430 { 2431 struct hifn_dma *dma = sc->sc_dma; 2432 struct hifn_command *cmd; 2433 struct cryptop *crp; 2434 int i, u; 2435 2436 i = dma->resk; u = dma->resu; 2437 while (u != 0) { 2438 cmd = dma->hifn_commands[i]; 2439 KASSERT(cmd != NULL /*, ("hifn_abort: null cmd slot %u", i)*/); 2440 dma->hifn_commands[i] = NULL; 2441 crp = cmd->crp; 2442 2443 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) { 2444 /* Salvage what we can. */ 2445 hifnstats.hst_opackets++; 2446 hifn_callback(sc, cmd, dma->result_bufs[i]); 2447 } else { 2448 if (cmd->src_map == cmd->dst_map) { 2449 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2450 0, cmd->src_map->dm_mapsize, 2451 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2452 } else { 2453 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2454 0, cmd->src_map->dm_mapsize, 2455 BUS_DMASYNC_POSTWRITE); 2456 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2457 0, cmd->dst_map->dm_mapsize, 2458 BUS_DMASYNC_POSTREAD); 2459 } 2460 2461 if (cmd->srcu.src_m != cmd->dstu.dst_m) { 2462 m_freem(cmd->srcu.src_m); 2463 crp->crp_buf = (caddr_t)cmd->dstu.dst_m; 2464 } 2465 2466 /* non-shared buffers cannot be restarted */ 2467 if (cmd->src_map != cmd->dst_map) { 2468 /* 2469 * XXX should be EAGAIN, delayed until 2470 * after the reset. 2471 */ 2472 crp->crp_etype = ENOMEM; 2473 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2474 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2475 } else 2476 crp->crp_etype = ENOMEM; 2477 2478 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2479 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2480 2481 free(cmd, M_DEVBUF); 2482 if (crp->crp_etype != EAGAIN) 2483 crypto_done(crp); 2484 } 2485 2486 if (++i == HIFN_D_RES_RSIZE) 2487 i = 0; 2488 u--; 2489 } 2490 dma->resk = i; dma->resu = u; 2491 2492 /* Force upload of key next time */ 2493 for (i = 0; i < sc->sc_maxses; i++) 2494 if (sc->sc_sessions[i].hs_state == HS_STATE_KEY) 2495 sc->sc_sessions[i].hs_state = HS_STATE_USED; 2496 2497 hifn_reset_board(sc, 1); 2498 hifn_init_dma(sc); 2499 hifn_init_pci_registers(sc); 2500 } 2501 2502 static void 2503 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *resbuf) 2504 { 2505 struct hifn_dma *dma = sc->sc_dma; 2506 struct cryptop *crp = cmd->crp; 2507 struct cryptodesc *crd; 2508 struct mbuf *m; 2509 int totlen, i, u, ivlen; 2510 2511 if (cmd->src_map == cmd->dst_map) 2512 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2513 0, cmd->src_map->dm_mapsize, 2514 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 2515 else { 2516 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2517 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2518 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2519 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 2520 } 2521 2522 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2523 if (cmd->srcu.src_m != cmd->dstu.dst_m) { 2524 crp->crp_buf = (caddr_t)cmd->dstu.dst_m; 2525 totlen = cmd->src_map->dm_mapsize; 2526 for (m = cmd->dstu.dst_m; m != NULL; m = m->m_next) { 2527 if (totlen < m->m_len) { 2528 m->m_len = totlen; 2529 totlen = 0; 2530 } else 2531 totlen -= m->m_len; 2532 } 2533 cmd->dstu.dst_m->m_pkthdr.len = 2534 cmd->srcu.src_m->m_pkthdr.len; 2535 m_freem(cmd->srcu.src_m); 2536 } 2537 } 2538 2539 if (cmd->sloplen != 0) { 2540 if (crp->crp_flags & CRYPTO_F_IMBUF) 2541 m_copyback((struct mbuf *)crp->crp_buf, 2542 cmd->src_map->dm_mapsize - cmd->sloplen, 2543 cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]); 2544 else if (crp->crp_flags & CRYPTO_F_IOV) 2545 cuio_copyback((struct uio *)crp->crp_buf, 2546 cmd->src_map->dm_mapsize - cmd->sloplen, 2547 cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]); 2548 } 2549 2550 i = dma->dstk; u = dma->dstu; 2551 while (u != 0) { 2552 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2553 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc), 2554 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2555 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) { 2556 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2557 offsetof(struct hifn_dma, dstr[i]), 2558 sizeof(struct hifn_desc), 2559 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2560 break; 2561 } 2562 if (++i == (HIFN_D_DST_RSIZE + 1)) 2563 i = 0; 2564 else 2565 u--; 2566 } 2567 dma->dstk = i; dma->dstu = u; 2568 2569 hifnstats.hst_obytes += cmd->dst_map->dm_mapsize; 2570 2571 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) == 2572 HIFN_BASE_CMD_CRYPT) { 2573 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 2574 if (crd->crd_alg != CRYPTO_DES_CBC && 2575 crd->crd_alg != CRYPTO_3DES_CBC && 2576 crd->crd_alg != CRYPTO_AES_CBC) 2577 continue; 2578 ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ? 2579 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2580 if (crp->crp_flags & CRYPTO_F_IMBUF) 2581 m_copydata((struct mbuf *)crp->crp_buf, 2582 crd->crd_skip + crd->crd_len - ivlen, 2583 ivlen, 2584 cmd->softc->sc_sessions[cmd->session_num].hs_iv); 2585 else if (crp->crp_flags & CRYPTO_F_IOV) { 2586 cuio_copydata((struct uio *)crp->crp_buf, 2587 crd->crd_skip + crd->crd_len - ivlen, 2588 ivlen, 2589 cmd->softc->sc_sessions[cmd->session_num].hs_iv); 2590 } 2591 /* XXX We do not handle contig data */ 2592 break; 2593 } 2594 } 2595 2596 if (cmd->base_masks & HIFN_BASE_CMD_MAC) { 2597 u_int8_t *macbuf; 2598 2599 macbuf = resbuf + sizeof(struct hifn_base_result); 2600 if (cmd->base_masks & HIFN_BASE_CMD_COMP) 2601 macbuf += sizeof(struct hifn_comp_result); 2602 macbuf += sizeof(struct hifn_mac_result); 2603 2604 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 2605 int len; 2606 2607 if (crd->crd_alg == CRYPTO_MD5) 2608 len = 16; 2609 else if (crd->crd_alg == CRYPTO_SHA1) 2610 len = 20; 2611 else if (crd->crd_alg == CRYPTO_MD5_HMAC || 2612 crd->crd_alg == CRYPTO_SHA1_HMAC) 2613 len = 12; 2614 else 2615 continue; 2616 2617 if (crp->crp_flags & CRYPTO_F_IMBUF) 2618 m_copyback((struct mbuf *)crp->crp_buf, 2619 crd->crd_inject, len, macbuf); 2620 else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac) 2621 bcopy((caddr_t)macbuf, crp->crp_mac, len); 2622 break; 2623 } 2624 } 2625 2626 if (cmd->src_map != cmd->dst_map) { 2627 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2628 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2629 } 2630 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2631 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2632 free(cmd, M_DEVBUF); 2633 crypto_done(crp); 2634 } 2635 2636 #ifdef HAVE_CRYPTO_LZS 2637 2638 static int 2639 hifn_compression(struct hifn_softc *sc, struct cryptop *crp, 2640 struct hifn_command *cmd) 2641 { 2642 struct cryptodesc *crd = crp->crp_desc; 2643 int s, err = 0; 2644 2645 cmd->compcrd = crd; 2646 cmd->base_masks |= HIFN_BASE_CMD_COMP; 2647 2648 if ((crp->crp_flags & CRYPTO_F_IMBUF) == 0) { 2649 /* 2650 * XXX can only handle mbufs right now since we can 2651 * XXX dynamically resize them. 2652 */ 2653 err = EINVAL; 2654 return (ENOMEM); 2655 } 2656 2657 if ((crd->crd_flags & CRD_F_COMP) == 0) 2658 cmd->base_masks |= HIFN_BASE_CMD_DECODE; 2659 if (crd->crd_alg == CRYPTO_LZS_COMP) 2660 cmd->comp_masks |= HIFN_COMP_CMD_ALG_LZS | 2661 HIFN_COMP_CMD_CLEARHIST; 2662 2663 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER, 2664 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map)) { 2665 err = ENOMEM; 2666 goto fail; 2667 } 2668 2669 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER, 2670 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) { 2671 err = ENOMEM; 2672 goto fail; 2673 } 2674 2675 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2676 int len; 2677 2678 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map, 2679 cmd->srcu.src_m, BUS_DMA_NOWAIT)) { 2680 err = ENOMEM; 2681 goto fail; 2682 } 2683 2684 len = cmd->src_map->dm_mapsize / MCLBYTES; 2685 if ((cmd->src_map->dm_mapsize % MCLBYTES) != 0) 2686 len++; 2687 len *= MCLBYTES; 2688 2689 if ((crd->crd_flags & CRD_F_COMP) == 0) 2690 len *= 4; 2691 2692 if (len > HIFN_MAX_DMALEN) 2693 len = HIFN_MAX_DMALEN; 2694 2695 cmd->dstu.dst_m = hifn_mkmbuf_chain(len, cmd->srcu.src_m); 2696 if (cmd->dstu.dst_m == NULL) { 2697 err = ENOMEM; 2698 goto fail; 2699 } 2700 2701 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, 2702 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) { 2703 err = ENOMEM; 2704 goto fail; 2705 } 2706 } else if (crp->crp_flags & CRYPTO_F_IOV) { 2707 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map, 2708 cmd->srcu.src_io, BUS_DMA_NOWAIT)) { 2709 err = ENOMEM; 2710 goto fail; 2711 } 2712 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map, 2713 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) { 2714 err = ENOMEM; 2715 goto fail; 2716 } 2717 } 2718 2719 if (cmd->src_map == cmd->dst_map) 2720 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2721 0, cmd->src_map->dm_mapsize, 2722 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2723 else { 2724 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2725 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2726 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2727 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2728 } 2729 2730 cmd->crp = crp; 2731 /* 2732 * Always use session 0. The modes of compression we use are 2733 * stateless and there is always at least one compression 2734 * context, zero. 2735 */ 2736 cmd->session_num = 0; 2737 cmd->softc = sc; 2738 2739 s = splnet(); 2740 err = hifn_compress_enter(sc, cmd); 2741 splx(s); 2742 2743 if (err != 0) 2744 goto fail; 2745 return (0); 2746 2747 fail: 2748 if (cmd->dst_map != NULL) { 2749 if (cmd->dst_map->dm_nsegs > 0) 2750 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2751 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2752 } 2753 if (cmd->src_map != NULL) { 2754 if (cmd->src_map->dm_nsegs > 0) 2755 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2756 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2757 } 2758 free(cmd, M_DEVBUF); 2759 if (err == EINVAL) 2760 hifnstats.hst_invalid++; 2761 else 2762 hifnstats.hst_nomem++; 2763 crp->crp_etype = err; 2764 crypto_done(crp); 2765 return (0); 2766 } 2767 2768 /* 2769 * must be called at splnet() 2770 */ 2771 static int 2772 hifn_compress_enter(struct hifn_softc *sc, struct hifn_command *cmd) 2773 { 2774 struct hifn_dma *dma = sc->sc_dma; 2775 int cmdi, resi; 2776 u_int32_t cmdlen; 2777 2778 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE || 2779 (dma->resu + 1) > HIFN_D_CMD_RSIZE) 2780 return (ENOMEM); 2781 2782 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE || 2783 (dma->dstu + cmd->dst_map->dm_nsegs) > HIFN_D_DST_RSIZE) 2784 return (ENOMEM); 2785 2786 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 2787 dma->cmdi = 0; 2788 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 2789 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 2790 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 2791 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2792 } 2793 cmdi = dma->cmdi++; 2794 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]); 2795 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE); 2796 2797 /* .p for command/result already set */ 2798 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST | 2799 HIFN_D_MASKDONEIRQ); 2800 HIFN_CMDR_SYNC(sc, cmdi, 2801 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2802 dma->cmdu++; 2803 if (sc->sc_c_busy == 0) { 2804 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); 2805 sc->sc_c_busy = 1; 2806 SET_LED(sc, HIFN_MIPSRST_LED0); 2807 } 2808 2809 /* 2810 * We don't worry about missing an interrupt (which a "command wait" 2811 * interrupt salvages us from), unless there is more than one command 2812 * in the queue. 2813 */ 2814 if (dma->cmdu > 1) { 2815 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT; 2816 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 2817 } 2818 2819 hifnstats.hst_ipackets++; 2820 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize; 2821 2822 hifn_dmamap_load_src(sc, cmd); 2823 if (sc->sc_s_busy == 0) { 2824 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA); 2825 sc->sc_s_busy = 1; 2826 SET_LED(sc, HIFN_MIPSRST_LED1); 2827 } 2828 2829 /* 2830 * Unlike other descriptors, we don't mask done interrupt from 2831 * result descriptor. 2832 */ 2833 if (dma->resi == HIFN_D_RES_RSIZE) { 2834 dma->resi = 0; 2835 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 2836 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 2837 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 2838 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2839 } 2840 resi = dma->resi++; 2841 dma->hifn_commands[resi] = cmd; 2842 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD); 2843 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | 2844 HIFN_D_VALID | HIFN_D_LAST); 2845 HIFN_RESR_SYNC(sc, resi, 2846 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2847 dma->resu++; 2848 if (sc->sc_r_busy == 0) { 2849 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA); 2850 sc->sc_r_busy = 1; 2851 SET_LED(sc, HIFN_MIPSRST_LED2); 2852 } 2853 2854 if (cmd->sloplen) 2855 cmd->slopidx = resi; 2856 2857 hifn_dmamap_load_dst(sc, cmd); 2858 2859 if (sc->sc_d_busy == 0) { 2860 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA); 2861 sc->sc_d_busy = 1; 2862 } 2863 sc->sc_active = 5; 2864 cmd->cmd_callback = hifn_callback_comp; 2865 return (0); 2866 } 2867 2868 static void 2869 hifn_callback_comp(struct hifn_softc *sc, struct hifn_command *cmd, 2870 u_int8_t *resbuf) 2871 { 2872 struct hifn_base_result baseres; 2873 struct cryptop *crp = cmd->crp; 2874 struct hifn_dma *dma = sc->sc_dma; 2875 struct mbuf *m; 2876 int err = 0, i, u; 2877 u_int32_t olen; 2878 bus_size_t dstsize; 2879 2880 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2881 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2882 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2883 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 2884 2885 dstsize = cmd->dst_map->dm_mapsize; 2886 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2887 2888 bcopy(resbuf, &baseres, sizeof(struct hifn_base_result)); 2889 2890 i = dma->dstk; u = dma->dstu; 2891 while (u != 0) { 2892 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2893 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc), 2894 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2895 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) { 2896 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2897 offsetof(struct hifn_dma, dstr[i]), 2898 sizeof(struct hifn_desc), 2899 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2900 break; 2901 } 2902 if (++i == (HIFN_D_DST_RSIZE + 1)) 2903 i = 0; 2904 else 2905 u--; 2906 } 2907 dma->dstk = i; dma->dstu = u; 2908 2909 if (baseres.flags & htole16(HIFN_BASE_RES_DSTOVERRUN)) { 2910 bus_size_t xlen; 2911 2912 xlen = dstsize; 2913 2914 m_freem(cmd->dstu.dst_m); 2915 2916 if (xlen == HIFN_MAX_DMALEN) { 2917 /* We've done all we can. */ 2918 err = E2BIG; 2919 goto out; 2920 } 2921 2922 xlen += MCLBYTES; 2923 2924 if (xlen > HIFN_MAX_DMALEN) 2925 xlen = HIFN_MAX_DMALEN; 2926 2927 cmd->dstu.dst_m = hifn_mkmbuf_chain(xlen, 2928 cmd->srcu.src_m); 2929 if (cmd->dstu.dst_m == NULL) { 2930 err = ENOMEM; 2931 goto out; 2932 } 2933 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, 2934 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) { 2935 err = ENOMEM; 2936 goto out; 2937 } 2938 2939 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2940 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2941 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2942 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2943 2944 /* already at splnet... */ 2945 err = hifn_compress_enter(sc, cmd); 2946 if (err != 0) 2947 goto out; 2948 return; 2949 } 2950 2951 olen = dstsize - (letoh16(baseres.dst_cnt) | 2952 (((letoh16(baseres.session) & HIFN_BASE_RES_DSTLEN_M) >> 2953 HIFN_BASE_RES_DSTLEN_S) << 16)); 2954 2955 crp->crp_olen = olen - cmd->compcrd->crd_skip; 2956 2957 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2958 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2959 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2960 2961 m = cmd->dstu.dst_m; 2962 if (m->m_flags & M_PKTHDR) 2963 m->m_pkthdr.len = olen; 2964 crp->crp_buf = (caddr_t)m; 2965 for (; m != NULL; m = m->m_next) { 2966 if (olen >= m->m_len) 2967 olen -= m->m_len; 2968 else { 2969 m->m_len = olen; 2970 olen = 0; 2971 } 2972 } 2973 2974 m_freem(cmd->srcu.src_m); 2975 free(cmd, M_DEVBUF); 2976 crp->crp_etype = 0; 2977 crypto_done(crp); 2978 return; 2979 2980 out: 2981 if (cmd->dst_map != NULL) { 2982 if (cmd->src_map->dm_nsegs != 0) 2983 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2984 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2985 } 2986 if (cmd->src_map != NULL) { 2987 if (cmd->src_map->dm_nsegs != 0) 2988 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2989 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2990 } 2991 if (cmd->dstu.dst_m != NULL) 2992 m_freem(cmd->dstu.dst_m); 2993 free(cmd, M_DEVBUF); 2994 crp->crp_etype = err; 2995 crypto_done(crp); 2996 } 2997 2998 static struct mbuf * 2999 hifn_mkmbuf_chain(int totlen, struct mbuf *mtemplate) 3000 { 3001 int len; 3002 struct mbuf *m, *m0, *mlast; 3003 3004 if (mtemplate->m_flags & M_PKTHDR) { 3005 len = MHLEN; 3006 MGETHDR(m0, M_DONTWAIT, MT_DATA); 3007 } else { 3008 len = MLEN; 3009 MGET(m0, M_DONTWAIT, MT_DATA); 3010 } 3011 if (m0 == NULL) 3012 return (NULL); 3013 if (len == MHLEN) 3014 M_DUP_PKTHDR(m0, mtemplate); 3015 MCLGET(m0, M_DONTWAIT); 3016 if (!(m0->m_flags & M_EXT)) 3017 m_freem(m0); 3018 len = MCLBYTES; 3019 3020 totlen -= len; 3021 m0->m_pkthdr.len = m0->m_len = len; 3022 mlast = m0; 3023 3024 while (totlen > 0) { 3025 MGET(m, M_DONTWAIT, MT_DATA); 3026 if (m == NULL) { 3027 m_freem(m0); 3028 return (NULL); 3029 } 3030 MCLGET(m, M_DONTWAIT); 3031 if (!(m->m_flags & M_EXT)) { 3032 m_freem(m0); 3033 return (NULL); 3034 } 3035 len = MCLBYTES; 3036 m->m_len = len; 3037 if (m0->m_flags & M_PKTHDR) 3038 m0->m_pkthdr.len += len; 3039 totlen -= len; 3040 3041 mlast->m_next = m; 3042 mlast = m; 3043 } 3044 3045 return (m0); 3046 } 3047 #endif /* HAVE_CRYPTO_LZS */ 3048 3049 static void 3050 hifn_write_4(struct hifn_softc *sc, int reggrp, bus_size_t reg, u_int32_t val) 3051 { 3052 /* 3053 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0 3054 * and Group 1 registers; avoid conditions that could create 3055 * burst writes by doing a read in between the writes. 3056 */ 3057 if (sc->sc_flags & HIFN_NO_BURSTWRITE) { 3058 if (sc->sc_waw_lastgroup == reggrp && 3059 sc->sc_waw_lastreg == reg - 4) { 3060 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID); 3061 } 3062 sc->sc_waw_lastgroup = reggrp; 3063 sc->sc_waw_lastreg = reg; 3064 } 3065 if (reggrp == 0) 3066 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val); 3067 else 3068 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val); 3069 3070 } 3071 3072 static u_int32_t 3073 hifn_read_4(struct hifn_softc *sc, int reggrp, bus_size_t reg) 3074 { 3075 if (sc->sc_flags & HIFN_NO_BURSTWRITE) { 3076 sc->sc_waw_lastgroup = -1; 3077 sc->sc_waw_lastreg = 1; 3078 } 3079 if (reggrp == 0) 3080 return (bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg)); 3081 return (bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg)); 3082 } 3083