1 /* $NetBSD: hifn7751.c,v 1.44 2010/02/01 22:34:29 hubertf Exp $ */ 2 /* $FreeBSD: hifn7751.c,v 1.5.2.7 2003/10/08 23:52:00 sam Exp $ */ 3 /* $OpenBSD: hifn7751.c,v 1.140 2003/08/01 17:55:54 deraadt Exp $ */ 4 5 /* 6 * Invertex AEON / Hifn 7751 driver 7 * Copyright (c) 1999 Invertex Inc. All rights reserved. 8 * Copyright (c) 1999 Theo de Raadt 9 * Copyright (c) 2000-2001 Network Security Technologies, Inc. 10 * http://www.netsec.net 11 * Copyright (c) 2003 Hifn Inc. 12 * 13 * This driver is based on a previous driver by Invertex, for which they 14 * requested: Please send any comments, feedback, bug-fixes, or feature 15 * requests to software@invertex.com. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions 19 * are met: 20 * 21 * 1. Redistributions of source code must retain the above copyright 22 * notice, this list of conditions and the following disclaimer. 23 * 2. Redistributions in binary form must reproduce the above copyright 24 * notice, this list of conditions and the following disclaimer in the 25 * documentation and/or other materials provided with the distribution. 26 * 3. The name of the author may not be used to endorse or promote products 27 * derived from this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 30 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 31 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 32 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 33 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 34 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 38 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Effort sponsored in part by the Defense Advanced Research Projects 41 * Agency (DARPA) and Air Force Research Laboratory, Air Force 42 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 43 * 44 */ 45 46 /* 47 * Driver for various Hifn pre-HIPP encryption processors. 48 */ 49 50 #include <sys/cdefs.h> 51 __KERNEL_RCSID(0, "$NetBSD: hifn7751.c,v 1.44 2010/02/01 22:34:29 hubertf Exp $"); 52 53 #include "rnd.h" 54 55 #if NRND == 0 56 #error hifn7751 requires rnd pseudo-devices 57 #endif 58 59 60 #include <sys/param.h> 61 #include <sys/systm.h> 62 #include <sys/proc.h> 63 #include <sys/errno.h> 64 #include <sys/malloc.h> 65 #include <sys/kernel.h> 66 #include <sys/mbuf.h> 67 #include <sys/device.h> 68 69 #include <uvm/uvm_extern.h> 70 71 72 #ifdef __OpenBSD__ 73 #include <crypto/crypto.h> 74 #include <dev/rndvar.h> 75 #else 76 #include <opencrypto/cryptodev.h> 77 #include <sys/rnd.h> 78 #endif 79 80 #include <dev/pci/pcireg.h> 81 #include <dev/pci/pcivar.h> 82 #include <dev/pci/pcidevs.h> 83 84 #include <dev/pci/hifn7751reg.h> 85 #include <dev/pci/hifn7751var.h> 86 87 #undef HIFN_DEBUG 88 89 #ifdef __NetBSD__ 90 #define M_DUP_PKTHDR M_COPY_PKTHDR /* XXX */ 91 #endif 92 93 #ifdef HIFN_DEBUG 94 extern int hifn_debug; /* patchable */ 95 int hifn_debug = 1; 96 #endif 97 98 #ifdef __OpenBSD__ 99 #define HAVE_CRYPTO_LZS /* OpenBSD OCF supports CRYPTO_COMP_LZS */ 100 #endif 101 102 /* 103 * Prototypes and count for the pci_device structure 104 */ 105 #ifdef __OpenBSD__ 106 static int hifn_probe((struct device *, void *, void *); 107 #else 108 static int hifn_probe(device_t, cfdata_t, void *); 109 #endif 110 static void hifn_attach(device_t, device_t, void *); 111 112 CFATTACH_DECL(hifn, sizeof(struct hifn_softc), 113 hifn_probe, hifn_attach, NULL, NULL); 114 115 #ifdef __OpenBSD__ 116 struct cfdriver hifn_cd = { 117 0, "hifn", DV_DULL 118 }; 119 #endif 120 121 static void hifn_reset_board(struct hifn_softc *, int); 122 static void hifn_reset_puc(struct hifn_softc *); 123 static void hifn_puc_wait(struct hifn_softc *); 124 static const char *hifn_enable_crypto(struct hifn_softc *, pcireg_t); 125 static void hifn_set_retry(struct hifn_softc *); 126 static void hifn_init_dma(struct hifn_softc *); 127 static void hifn_init_pci_registers(struct hifn_softc *); 128 static int hifn_sramsize(struct hifn_softc *); 129 static int hifn_dramsize(struct hifn_softc *); 130 static int hifn_ramtype(struct hifn_softc *); 131 static void hifn_sessions(struct hifn_softc *); 132 static int hifn_intr(void *); 133 static u_int hifn_write_command(struct hifn_command *, u_int8_t *); 134 static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt); 135 static int hifn_newsession(void*, u_int32_t *, struct cryptoini *); 136 static int hifn_freesession(void*, u_int64_t); 137 static int hifn_process(void*, struct cryptop *, int); 138 static void hifn_callback(struct hifn_softc *, struct hifn_command *, 139 u_int8_t *); 140 static int hifn_crypto(struct hifn_softc *, struct hifn_command *, 141 struct cryptop*, int); 142 static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *); 143 static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *); 144 static int hifn_dmamap_aligned(bus_dmamap_t); 145 static int hifn_dmamap_load_src(struct hifn_softc *, 146 struct hifn_command *); 147 static int hifn_dmamap_load_dst(struct hifn_softc *, 148 struct hifn_command *); 149 static int hifn_init_pubrng(struct hifn_softc *); 150 static void hifn_rng(void *); 151 static void hifn_tick(void *); 152 static void hifn_abort(struct hifn_softc *); 153 static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, 154 int *); 155 static void hifn_write_4(struct hifn_softc *, int, bus_size_t, u_int32_t); 156 static u_int32_t hifn_read_4(struct hifn_softc *, int, bus_size_t); 157 #ifdef HAVE_CRYPTO_LZS 158 static int hifn_compression(struct hifn_softc *, struct cryptop *, 159 struct hifn_command *); 160 static struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *); 161 static int hifn_compress_enter(struct hifn_softc *, struct hifn_command *); 162 static void hifn_callback_comp(struct hifn_softc *, struct hifn_command *, 163 u_int8_t *); 164 #endif /* HAVE_CRYPTO_LZS */ 165 166 167 struct hifn_stats hifnstats; 168 169 static const struct hifn_product { 170 pci_vendor_id_t hifn_vendor; 171 pci_product_id_t hifn_product; 172 int hifn_flags; 173 const char *hifn_name; 174 } hifn_products[] = { 175 { PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON, 176 0, 177 "Invertex AEON", 178 }, 179 180 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751, 181 0, 182 "Hifn 7751", 183 }, 184 { PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751, 185 0, 186 "Hifn 7751 (NetSec)" 187 }, 188 189 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811, 190 HIFN_IS_7811 | HIFN_HAS_RNG | HIFN_HAS_LEDS | HIFN_NO_BURSTWRITE, 191 "Hifn 7811", 192 }, 193 194 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951, 195 HIFN_HAS_RNG | HIFN_HAS_PUBLIC, 196 "Hifn 7951", 197 }, 198 199 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955, 200 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES, 201 "Hifn 7955", 202 }, 203 204 { PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956, 205 HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES, 206 "Hifn 7956", 207 }, 208 209 210 { 0, 0, 211 0, 212 NULL 213 } 214 }; 215 216 static const struct hifn_product * 217 hifn_lookup(const struct pci_attach_args *pa) 218 { 219 const struct hifn_product *hp; 220 221 for (hp = hifn_products; hp->hifn_name != NULL; hp++) { 222 if (PCI_VENDOR(pa->pa_id) == hp->hifn_vendor && 223 PCI_PRODUCT(pa->pa_id) == hp->hifn_product) 224 return (hp); 225 } 226 return (NULL); 227 } 228 229 static int 230 hifn_probe(device_t parent, cfdata_t match, void *aux) 231 { 232 struct pci_attach_args *pa = aux; 233 234 if (hifn_lookup(pa) != NULL) 235 return 1; 236 237 return 0; 238 } 239 240 static void 241 hifn_attach(device_t parent, device_t self, void *aux) 242 { 243 struct hifn_softc *sc = device_private(self); 244 struct pci_attach_args *pa = aux; 245 const struct hifn_product *hp; 246 pci_chipset_tag_t pc = pa->pa_pc; 247 pci_intr_handle_t ih; 248 const char *intrstr = NULL; 249 const char *hifncap; 250 char rbase; 251 bus_size_t iosize0, iosize1; 252 u_int32_t cmd; 253 u_int16_t ena; 254 bus_dma_segment_t seg; 255 bus_dmamap_t dmamap; 256 int rseg; 257 void *kva; 258 259 hp = hifn_lookup(pa); 260 if (hp == NULL) { 261 printf("\n"); 262 panic("hifn_attach: impossible"); 263 } 264 265 aprint_naive(": Crypto processor\n"); 266 aprint_normal(": %s, rev. %d\n", hp->hifn_name, 267 PCI_REVISION(pa->pa_class)); 268 269 sc->sc_pci_pc = pa->pa_pc; 270 sc->sc_pci_tag = pa->pa_tag; 271 272 sc->sc_flags = hp->hifn_flags; 273 274 cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 275 cmd |= PCI_COMMAND_MASTER_ENABLE; 276 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd); 277 278 if (pci_mapreg_map(pa, HIFN_BAR0, PCI_MAPREG_TYPE_MEM, 0, 279 &sc->sc_st0, &sc->sc_sh0, NULL, &iosize0)) { 280 aprint_error_dev(&sc->sc_dv, "can't map mem space %d\n", 0); 281 return; 282 } 283 284 if (pci_mapreg_map(pa, HIFN_BAR1, PCI_MAPREG_TYPE_MEM, 0, 285 &sc->sc_st1, &sc->sc_sh1, NULL, &iosize1)) { 286 aprint_error_dev(&sc->sc_dv, "can't find mem space %d\n", 1); 287 goto fail_io0; 288 } 289 290 hifn_set_retry(sc); 291 292 if (sc->sc_flags & HIFN_NO_BURSTWRITE) { 293 sc->sc_waw_lastgroup = -1; 294 sc->sc_waw_lastreg = 1; 295 } 296 297 sc->sc_dmat = pa->pa_dmat; 298 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(*sc->sc_dma), PAGE_SIZE, 0, 299 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 300 aprint_error_dev(&sc->sc_dv, "can't alloc DMA buffer\n"); 301 goto fail_io1; 302 } 303 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(*sc->sc_dma), &kva, 304 BUS_DMA_NOWAIT)) { 305 aprint_error_dev(&sc->sc_dv, "can't map DMA buffers (%lu bytes)\n", 306 (u_long)sizeof(*sc->sc_dma)); 307 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 308 goto fail_io1; 309 } 310 if (bus_dmamap_create(sc->sc_dmat, sizeof(*sc->sc_dma), 1, 311 sizeof(*sc->sc_dma), 0, BUS_DMA_NOWAIT, &dmamap)) { 312 aprint_error_dev(&sc->sc_dv, "can't create DMA map\n"); 313 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma)); 314 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 315 goto fail_io1; 316 } 317 if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, sizeof(*sc->sc_dma), 318 NULL, BUS_DMA_NOWAIT)) { 319 aprint_error_dev(&sc->sc_dv, "can't load DMA map\n"); 320 bus_dmamap_destroy(sc->sc_dmat, dmamap); 321 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma)); 322 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 323 goto fail_io1; 324 } 325 sc->sc_dmamap = dmamap; 326 sc->sc_dma = (struct hifn_dma *)kva; 327 memset(sc->sc_dma, 0, sizeof(*sc->sc_dma)); 328 329 hifn_reset_board(sc, 0); 330 331 if ((hifncap = hifn_enable_crypto(sc, pa->pa_id)) == NULL) { 332 aprint_error_dev(&sc->sc_dv, "crypto enabling failed\n"); 333 goto fail_mem; 334 } 335 hifn_reset_puc(sc); 336 337 hifn_init_dma(sc); 338 hifn_init_pci_registers(sc); 339 340 /* XXX can't dynamically determine ram type for 795x; force dram */ 341 if (sc->sc_flags & HIFN_IS_7956) 342 sc->sc_drammodel = 1; 343 else if (hifn_ramtype(sc)) 344 goto fail_mem; 345 346 if (sc->sc_drammodel == 0) 347 hifn_sramsize(sc); 348 else 349 hifn_dramsize(sc); 350 351 /* 352 * Workaround for NetSec 7751 rev A: half ram size because two 353 * of the address lines were left floating 354 */ 355 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETSEC && 356 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETSEC_7751 && 357 PCI_REVISION(pa->pa_class) == 0x61) 358 sc->sc_ramsize >>= 1; 359 360 if (pci_intr_map(pa, &ih)) { 361 aprint_error_dev(&sc->sc_dv, "couldn't map interrupt\n"); 362 goto fail_mem; 363 } 364 intrstr = pci_intr_string(pc, ih); 365 #ifdef __OpenBSD__ 366 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc, 367 self->dv_xname); 368 #else 369 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc); 370 #endif 371 if (sc->sc_ih == NULL) { 372 aprint_error_dev(&sc->sc_dv, "couldn't establish interrupt\n"); 373 if (intrstr != NULL) 374 aprint_error(" at %s", intrstr); 375 aprint_error("\n"); 376 goto fail_mem; 377 } 378 379 hifn_sessions(sc); 380 381 rseg = sc->sc_ramsize / 1024; 382 rbase = 'K'; 383 if (sc->sc_ramsize >= (1024 * 1024)) { 384 rbase = 'M'; 385 rseg /= 1024; 386 } 387 aprint_normal_dev(&sc->sc_dv, "%s, %d%cB %cRAM, interrupting at %s\n", 388 hifncap, rseg, rbase, 389 sc->sc_drammodel ? 'D' : 'S', intrstr); 390 391 sc->sc_cid = crypto_get_driverid(0); 392 if (sc->sc_cid < 0) { 393 aprint_error_dev(&sc->sc_dv, "couldn't get crypto driver id\n"); 394 goto fail_intr; 395 } 396 397 WRITE_REG_0(sc, HIFN_0_PUCNFG, 398 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID); 399 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 400 401 switch (ena) { 402 case HIFN_PUSTAT_ENA_2: 403 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, 404 hifn_newsession, hifn_freesession, hifn_process, sc); 405 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0, 406 hifn_newsession, hifn_freesession, hifn_process, sc); 407 if (sc->sc_flags & HIFN_HAS_AES) 408 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0, 409 hifn_newsession, hifn_freesession, 410 hifn_process, sc); 411 /*FALLTHROUGH*/ 412 case HIFN_PUSTAT_ENA_1: 413 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0, 414 hifn_newsession, hifn_freesession, hifn_process, sc); 415 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0, 416 hifn_newsession, hifn_freesession, hifn_process, sc); 417 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96, 0, 0, 418 hifn_newsession, hifn_freesession, hifn_process, sc); 419 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96, 0, 0, 420 hifn_newsession, hifn_freesession, hifn_process, sc); 421 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, 422 hifn_newsession, hifn_freesession, hifn_process, sc); 423 break; 424 } 425 426 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 0, 427 sc->sc_dmamap->dm_mapsize, 428 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 429 430 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) 431 hifn_init_pubrng(sc); 432 433 #ifdef __OpenBSD__ 434 timeout_set(&sc->sc_tickto, hifn_tick, sc); 435 timeout_add(&sc->sc_tickto, hz); 436 #else 437 callout_init(&sc->sc_tickto, 0); 438 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); 439 #endif 440 return; 441 442 fail_intr: 443 pci_intr_disestablish(pc, sc->sc_ih); 444 fail_mem: 445 bus_dmamap_unload(sc->sc_dmat, dmamap); 446 bus_dmamap_destroy(sc->sc_dmat, dmamap); 447 bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma)); 448 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 449 450 /* Turn off DMA polling */ 451 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 452 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 453 454 fail_io1: 455 bus_space_unmap(sc->sc_st1, sc->sc_sh1, iosize1); 456 fail_io0: 457 bus_space_unmap(sc->sc_st0, sc->sc_sh0, iosize0); 458 } 459 460 static int 461 hifn_init_pubrng(struct hifn_softc *sc) 462 { 463 u_int32_t r; 464 int i; 465 466 if ((sc->sc_flags & HIFN_IS_7811) == 0) { 467 /* Reset 7951 public key/rng engine */ 468 WRITE_REG_1(sc, HIFN_1_PUB_RESET, 469 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET); 470 471 for (i = 0; i < 100; i++) { 472 DELAY(1000); 473 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) & 474 HIFN_PUBRST_RESET) == 0) 475 break; 476 } 477 478 if (i == 100) { 479 printf("%s: public key init failed\n", 480 device_xname(&sc->sc_dv)); 481 return (1); 482 } 483 } 484 485 /* Enable the rng, if available */ 486 if (sc->sc_flags & HIFN_HAS_RNG) { 487 if (sc->sc_flags & HIFN_IS_7811) { 488 r = READ_REG_1(sc, HIFN_1_7811_RNGENA); 489 if (r & HIFN_7811_RNGENA_ENA) { 490 r &= ~HIFN_7811_RNGENA_ENA; 491 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); 492 } 493 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG, 494 HIFN_7811_RNGCFG_DEFL); 495 r |= HIFN_7811_RNGENA_ENA; 496 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); 497 } else 498 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG, 499 READ_REG_1(sc, HIFN_1_RNG_CONFIG) | 500 HIFN_RNGCFG_ENA); 501 502 /* 503 * The Hifn RNG documentation states that at their 504 * recommended "conservative" RNG config values, 505 * the RNG must warm up for 0.4s before providing 506 * data that meet their worst-case estimate of 0.06 507 * bits of random data per output register bit. 508 */ 509 DELAY(4000); 510 511 #ifdef __NetBSD__ 512 /* 513 * XXX Careful! The use of RND_FLAG_NO_ESTIMATE 514 * XXX here is unobvious: we later feed raw bits 515 * XXX into the "entropy pool" with rnd_add_data, 516 * XXX explicitly supplying an entropy estimate. 517 * XXX In this context, NO_ESTIMATE serves only 518 * XXX to prevent rnd_add_data from trying to 519 * XXX use the *time at which we added the data* 520 * XXX as entropy, which is not a good idea since 521 * XXX we add data periodically from a callout. 522 */ 523 rnd_attach_source(&sc->sc_rnd_source, device_xname(&sc->sc_dv), 524 RND_TYPE_RNG, RND_FLAG_NO_ESTIMATE); 525 #endif 526 527 sc->sc_rngfirst = 1; 528 if (hz >= 100) 529 sc->sc_rnghz = hz / 100; 530 else 531 sc->sc_rnghz = 1; 532 #ifdef __OpenBSD__ 533 timeout_set(&sc->sc_rngto, hifn_rng, sc); 534 #else /* !__OpenBSD__ */ 535 callout_init(&sc->sc_rngto, 0); 536 #endif /* !__OpenBSD__ */ 537 } 538 539 /* Enable public key engine, if available */ 540 if (sc->sc_flags & HIFN_HAS_PUBLIC) { 541 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); 542 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE; 543 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 544 } 545 546 /* Call directly into the RNG once to prime the pool. */ 547 hifn_rng(sc); /* Sets callout/timeout at end */ 548 549 return (0); 550 } 551 552 static void 553 hifn_rng(void *vsc) 554 { 555 struct hifn_softc *sc = vsc; 556 #ifdef __NetBSD__ 557 u_int32_t num[HIFN_RNG_BITSPER * RND_ENTROPY_THRESHOLD]; 558 #else 559 u_int32_t num[2]; 560 #endif 561 u_int32_t sts; 562 int i; 563 564 if (sc->sc_flags & HIFN_IS_7811) { 565 for (i = 0; i < 5; i++) { /* XXX why 5? */ 566 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS); 567 if (sts & HIFN_7811_RNGSTS_UFL) { 568 printf("%s: RNG underflow: disabling\n", 569 device_xname(&sc->sc_dv)); 570 return; 571 } 572 if ((sts & HIFN_7811_RNGSTS_RDY) == 0) 573 break; 574 575 /* 576 * There are at least two words in the RNG FIFO 577 * at this point. 578 */ 579 num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); 580 num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); 581 582 if (sc->sc_rngfirst) 583 sc->sc_rngfirst = 0; 584 #ifdef __NetBSD__ 585 rnd_add_data(&sc->sc_rnd_source, num, 586 2 * sizeof(num[0]), 587 (2 * sizeof(num[0]) * NBBY) / 588 HIFN_RNG_BITSPER); 589 #else 590 /* 591 * XXX This is a really bad idea. 592 * XXX Hifn estimate as little as 0.06 593 * XXX actual bits of entropy per output 594 * XXX register bit. How can we tell the 595 * XXX kernel RNG subsystem we're handing 596 * XXX it 64 "true" random bits, for any 597 * XXX sane value of "true"? 598 * XXX 599 * XXX The right thing to do here, if we 600 * XXX cannot supply an estimate ourselves, 601 * XXX would be to hash the bits locally. 602 */ 603 add_true_randomness(num[0]); 604 add_true_randomness(num[1]); 605 #endif 606 607 } 608 } else { 609 #ifdef __NetBSD__ 610 /* First time through, try to help fill the pool. */ 611 int nwords = sc->sc_rngfirst ? 612 sizeof(num) / sizeof(num[0]) : 4; 613 #else 614 int nwords = 2; 615 #endif 616 /* 617 * We must be *extremely* careful here. The Hifn 618 * 795x differ from the published 6500 RNG design 619 * in more ways than the obvious lack of the output 620 * FIFO and LFSR control registers. In fact, there 621 * is only one LFSR, instead of the 6500's two, and 622 * it's 32 bits, not 31. 623 * 624 * Further, a block diagram obtained from Hifn shows 625 * a very curious latching of this register: the LFSR 626 * rotates at a frequency of RNG_Clk / 8, but the 627 * RNG_Data register is latched at a frequency of 628 * RNG_Clk, which means that it is possible for 629 * consecutive reads of the RNG_Data register to read 630 * identical state from the LFSR. The simplest 631 * workaround seems to be to read eight samples from 632 * the register for each one that we use. Since each 633 * read must require at least one PCI cycle, and 634 * RNG_Clk is at least PCI_Clk, this is safe. 635 */ 636 637 638 if (sc->sc_rngfirst) { 639 sc->sc_rngfirst = 0; 640 } 641 642 643 for(i = 0 ; i < nwords * 8; i++) 644 { 645 volatile u_int32_t regtmp; 646 regtmp = READ_REG_1(sc, HIFN_1_RNG_DATA); 647 num[i / 8] = regtmp; 648 } 649 #ifdef __NetBSD__ 650 rnd_add_data(&sc->sc_rnd_source, num, 651 nwords * sizeof(num[0]), 652 (nwords * sizeof(num[0]) * NBBY) / 653 HIFN_RNG_BITSPER); 654 #else 655 /* XXX a bad idea; see 7811 block above */ 656 add_true_randomness(num[0]); 657 #endif 658 } 659 660 #ifdef __OpenBSD__ 661 timeout_add(&sc->sc_rngto, sc->sc_rnghz); 662 #else 663 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); 664 #endif 665 } 666 667 static void 668 hifn_puc_wait(struct hifn_softc *sc) 669 { 670 int i; 671 672 for (i = 5000; i > 0; i--) { 673 DELAY(1); 674 if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET)) 675 break; 676 } 677 if (!i) 678 printf("%s: proc unit did not reset\n", device_xname(&sc->sc_dv)); 679 } 680 681 /* 682 * Reset the processing unit. 683 */ 684 static void 685 hifn_reset_puc(struct hifn_softc *sc) 686 { 687 /* Reset processing unit */ 688 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); 689 hifn_puc_wait(sc); 690 } 691 692 static void 693 hifn_set_retry(struct hifn_softc *sc) 694 { 695 u_int32_t r; 696 697 r = pci_conf_read(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT); 698 r &= 0xffff0000; 699 pci_conf_write(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT, r); 700 } 701 702 /* 703 * Resets the board. Values in the regesters are left as is 704 * from the reset (i.e. initial values are assigned elsewhere). 705 */ 706 static void 707 hifn_reset_board(struct hifn_softc *sc, int full) 708 { 709 u_int32_t reg; 710 711 /* 712 * Set polling in the DMA configuration register to zero. 0x7 avoids 713 * resetting the board and zeros out the other fields. 714 */ 715 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 716 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 717 718 /* 719 * Now that polling has been disabled, we have to wait 1 ms 720 * before resetting the board. 721 */ 722 DELAY(1000); 723 724 /* Reset the DMA unit */ 725 if (full) { 726 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE); 727 DELAY(1000); 728 } else { 729 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, 730 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET); 731 hifn_reset_puc(sc); 732 } 733 734 memset(sc->sc_dma, 0, sizeof(*sc->sc_dma)); 735 736 /* Bring dma unit out of reset */ 737 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 738 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 739 740 hifn_puc_wait(sc); 741 742 hifn_set_retry(sc); 743 744 if (sc->sc_flags & HIFN_IS_7811) { 745 for (reg = 0; reg < 1000; reg++) { 746 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) & 747 HIFN_MIPSRST_CRAMINIT) 748 break; 749 DELAY(1000); 750 } 751 if (reg == 1000) 752 printf(": cram init timeout\n"); 753 } 754 } 755 756 static u_int32_t 757 hifn_next_signature(u_int32_t a, u_int cnt) 758 { 759 int i; 760 u_int32_t v; 761 762 for (i = 0; i < cnt; i++) { 763 764 /* get the parity */ 765 v = a & 0x80080125; 766 v ^= v >> 16; 767 v ^= v >> 8; 768 v ^= v >> 4; 769 v ^= v >> 2; 770 v ^= v >> 1; 771 772 a = (v & 1) ^ (a << 1); 773 } 774 775 return a; 776 } 777 778 static struct pci2id { 779 u_short pci_vendor; 780 u_short pci_prod; 781 char card_id[13]; 782 } const pci2id[] = { 783 { 784 PCI_VENDOR_HIFN, 785 PCI_PRODUCT_HIFN_7951, 786 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 787 0x00, 0x00, 0x00, 0x00, 0x00 } 788 }, { 789 PCI_VENDOR_HIFN, 790 PCI_PRODUCT_HIFN_7955, 791 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 792 0x00, 0x00, 0x00, 0x00, 0x00 } 793 }, { 794 PCI_VENDOR_HIFN, 795 PCI_PRODUCT_HIFN_7956, 796 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 797 0x00, 0x00, 0x00, 0x00, 0x00 } 798 }, { 799 PCI_VENDOR_NETSEC, 800 PCI_PRODUCT_NETSEC_7751, 801 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 802 0x00, 0x00, 0x00, 0x00, 0x00 } 803 }, { 804 PCI_VENDOR_INVERTEX, 805 PCI_PRODUCT_INVERTEX_AEON, 806 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 807 0x00, 0x00, 0x00, 0x00, 0x00 } 808 }, { 809 PCI_VENDOR_HIFN, 810 PCI_PRODUCT_HIFN_7811, 811 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 812 0x00, 0x00, 0x00, 0x00, 0x00 } 813 }, { 814 /* 815 * Other vendors share this PCI ID as well, such as 816 * http://www.powercrypt.com, and obviously they also 817 * use the same key. 818 */ 819 PCI_VENDOR_HIFN, 820 PCI_PRODUCT_HIFN_7751, 821 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 822 0x00, 0x00, 0x00, 0x00, 0x00 } 823 }, 824 }; 825 826 /* 827 * Checks to see if crypto is already enabled. If crypto isn't enable, 828 * "hifn_enable_crypto" is called to enable it. The check is important, 829 * as enabling crypto twice will lock the board. 830 */ 831 static const char * 832 hifn_enable_crypto(struct hifn_softc *sc, pcireg_t pciid) 833 { 834 u_int32_t dmacfg, ramcfg, encl, addr, i; 835 const char *offtbl = NULL; 836 837 for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) { 838 if (pci2id[i].pci_vendor == PCI_VENDOR(pciid) && 839 pci2id[i].pci_prod == PCI_PRODUCT(pciid)) { 840 offtbl = pci2id[i].card_id; 841 break; 842 } 843 } 844 845 if (offtbl == NULL) { 846 #ifdef HIFN_DEBUG 847 aprint_debug_dev(&sc->sc_dv, "Unknown card!\n"); 848 #endif 849 return (NULL); 850 } 851 852 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG); 853 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG); 854 855 /* 856 * The RAM config register's encrypt level bit needs to be set before 857 * every read performed on the encryption level register. 858 */ 859 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); 860 861 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 862 863 /* 864 * Make sure we don't re-unlock. Two unlocks kills chip until the 865 * next reboot. 866 */ 867 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) { 868 #ifdef HIFN_DEBUG 869 aprint_debug_dev(&sc->sc_dv, "Strong Crypto already enabled!\n"); 870 #endif 871 goto report; 872 } 873 874 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) { 875 #ifdef HIFN_DEBUG 876 aprint_debug_dev(&sc->sc_dv, "Unknown encryption level\n"); 877 #endif 878 return (NULL); 879 } 880 881 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK | 882 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 883 DELAY(1000); 884 addr = READ_REG_1(sc, HIFN_1_UNLOCK_SECRET1); 885 DELAY(1000); 886 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, 0); 887 DELAY(1000); 888 889 for (i = 0; i <= 12; i++) { 890 addr = hifn_next_signature(addr, offtbl[i] + 0x101); 891 WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, addr); 892 893 DELAY(1000); 894 } 895 896 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); 897 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 898 899 #ifdef HIFN_DEBUG 900 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2) 901 aprint_debug("Encryption engine is permanently locked until next system reset."); 902 else 903 aprint_debug("Encryption engine enabled successfully!"); 904 #endif 905 906 report: 907 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg); 908 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg); 909 910 switch (encl) { 911 case HIFN_PUSTAT_ENA_0: 912 return ("LZS-only (no encr/auth)"); 913 914 case HIFN_PUSTAT_ENA_1: 915 return ("DES"); 916 917 case HIFN_PUSTAT_ENA_2: 918 if (sc->sc_flags & HIFN_HAS_AES) 919 return ("3DES/AES"); 920 else 921 return ("3DES"); 922 923 default: 924 return ("disabled"); 925 } 926 /* NOTREACHED */ 927 } 928 929 /* 930 * Give initial values to the registers listed in the "Register Space" 931 * section of the HIFN Software Development reference manual. 932 */ 933 static void 934 hifn_init_pci_registers(struct hifn_softc *sc) 935 { 936 /* write fixed values needed by the Initialization registers */ 937 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); 938 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD); 939 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); 940 941 /* write all 4 ring address registers */ 942 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dmamap->dm_segs[0].ds_addr + 943 offsetof(struct hifn_dma, cmdr[0])); 944 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dmamap->dm_segs[0].ds_addr + 945 offsetof(struct hifn_dma, srcr[0])); 946 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dmamap->dm_segs[0].ds_addr + 947 offsetof(struct hifn_dma, dstr[0])); 948 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dmamap->dm_segs[0].ds_addr + 949 offsetof(struct hifn_dma, resr[0])); 950 951 DELAY(2000); 952 953 /* write status register */ 954 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 955 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS | 956 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS | 957 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST | 958 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER | 959 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST | 960 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER | 961 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST | 962 HIFN_DMACSR_S_WAIT | 963 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST | 964 HIFN_DMACSR_C_WAIT | 965 HIFN_DMACSR_ENGINE | 966 ((sc->sc_flags & HIFN_HAS_PUBLIC) ? 967 HIFN_DMACSR_PUBDONE : 0) | 968 ((sc->sc_flags & HIFN_IS_7811) ? 969 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0)); 970 971 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0; 972 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT | 973 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER | 974 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT | 975 HIFN_DMAIER_ENGINE | 976 ((sc->sc_flags & HIFN_IS_7811) ? 977 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0); 978 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; 979 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 980 CLR_LED(sc, HIFN_MIPSRST_LED0 | HIFN_MIPSRST_LED1 | HIFN_MIPSRST_LED2); 981 982 if (sc->sc_flags & HIFN_IS_7956) { 983 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | 984 HIFN_PUCNFG_TCALLPHASES | 985 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32); 986 WRITE_REG_1(sc, HIFN_1_PLL, HIFN_PLL_7956); 987 } else { 988 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | 989 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES | 990 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 | 991 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM)); 992 } 993 994 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); 995 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 996 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST | 997 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) | 998 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL)); 999 } 1000 1001 /* 1002 * The maximum number of sessions supported by the card 1003 * is dependent on the amount of context ram, which 1004 * encryption algorithms are enabled, and how compression 1005 * is configured. This should be configured before this 1006 * routine is called. 1007 */ 1008 static void 1009 hifn_sessions(struct hifn_softc *sc) 1010 { 1011 u_int32_t pucnfg; 1012 int ctxsize; 1013 1014 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG); 1015 1016 if (pucnfg & HIFN_PUCNFG_COMPSING) { 1017 if (pucnfg & HIFN_PUCNFG_ENCCNFG) 1018 ctxsize = 128; 1019 else 1020 ctxsize = 512; 1021 /* 1022 * 7955/7956 has internal context memory of 32K 1023 */ 1024 if (sc->sc_flags & HIFN_IS_7956) 1025 sc->sc_maxses = 32768 / ctxsize; 1026 else 1027 sc->sc_maxses = 1 + 1028 ((sc->sc_ramsize - 32768) / ctxsize); 1029 } 1030 else 1031 sc->sc_maxses = sc->sc_ramsize / 16384; 1032 1033 if (sc->sc_maxses > 2048) 1034 sc->sc_maxses = 2048; 1035 } 1036 1037 /* 1038 * Determine ram type (sram or dram). Board should be just out of a reset 1039 * state when this is called. 1040 */ 1041 static int 1042 hifn_ramtype(struct hifn_softc *sc) 1043 { 1044 u_int8_t data[8], dataexpect[8]; 1045 int i; 1046 1047 for (i = 0; i < sizeof(data); i++) 1048 data[i] = dataexpect[i] = 0x55; 1049 if (hifn_writeramaddr(sc, 0, data)) 1050 return (-1); 1051 if (hifn_readramaddr(sc, 0, data)) 1052 return (-1); 1053 if (memcmp(data, dataexpect, sizeof(data)) != 0) { 1054 sc->sc_drammodel = 1; 1055 return (0); 1056 } 1057 1058 for (i = 0; i < sizeof(data); i++) 1059 data[i] = dataexpect[i] = 0xaa; 1060 if (hifn_writeramaddr(sc, 0, data)) 1061 return (-1); 1062 if (hifn_readramaddr(sc, 0, data)) 1063 return (-1); 1064 if (memcmp(data, dataexpect, sizeof(data)) != 0) { 1065 sc->sc_drammodel = 1; 1066 return (0); 1067 } 1068 1069 return (0); 1070 } 1071 1072 #define HIFN_SRAM_MAX (32 << 20) 1073 #define HIFN_SRAM_STEP_SIZE 16384 1074 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE) 1075 1076 static int 1077 hifn_sramsize(struct hifn_softc *sc) 1078 { 1079 u_int32_t a; 1080 u_int8_t data[8]; 1081 u_int8_t dataexpect[sizeof(data)]; 1082 int32_t i; 1083 1084 for (i = 0; i < sizeof(data); i++) 1085 data[i] = dataexpect[i] = i ^ 0x5a; 1086 1087 for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) { 1088 a = i * HIFN_SRAM_STEP_SIZE; 1089 memcpy(data, &i, sizeof(i)); 1090 hifn_writeramaddr(sc, a, data); 1091 } 1092 1093 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) { 1094 a = i * HIFN_SRAM_STEP_SIZE; 1095 memcpy(dataexpect, &i, sizeof(i)); 1096 if (hifn_readramaddr(sc, a, data) < 0) 1097 return (0); 1098 if (memcmp(data, dataexpect, sizeof(data)) != 0) 1099 return (0); 1100 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE; 1101 } 1102 1103 return (0); 1104 } 1105 1106 /* 1107 * XXX For dram boards, one should really try all of the 1108 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG 1109 * is already set up correctly. 1110 */ 1111 static int 1112 hifn_dramsize(struct hifn_softc *sc) 1113 { 1114 u_int32_t cnfg; 1115 1116 if (sc->sc_flags & HIFN_IS_7956) { 1117 /* 1118 * 7955/7956 have a fixed internal ram of only 32K. 1119 */ 1120 sc->sc_ramsize = 32768; 1121 } else { 1122 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) & 1123 HIFN_PUCNFG_DRAMMASK; 1124 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18); 1125 } 1126 return (0); 1127 } 1128 1129 static void 1130 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, 1131 int *resp) 1132 { 1133 struct hifn_dma *dma = sc->sc_dma; 1134 1135 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 1136 dma->cmdi = 0; 1137 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 1138 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1139 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 1140 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1141 } 1142 *cmdp = dma->cmdi++; 1143 dma->cmdk = dma->cmdi; 1144 1145 if (dma->srci == HIFN_D_SRC_RSIZE) { 1146 dma->srci = 0; 1147 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID | 1148 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1149 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, 1150 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1151 } 1152 *srcp = dma->srci++; 1153 dma->srck = dma->srci; 1154 1155 if (dma->dsti == HIFN_D_DST_RSIZE) { 1156 dma->dsti = 0; 1157 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID | 1158 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1159 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE, 1160 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1161 } 1162 *dstp = dma->dsti++; 1163 dma->dstk = dma->dsti; 1164 1165 if (dma->resi == HIFN_D_RES_RSIZE) { 1166 dma->resi = 0; 1167 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 1168 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1169 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 1170 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1171 } 1172 *resp = dma->resi++; 1173 dma->resk = dma->resi; 1174 } 1175 1176 static int 1177 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) 1178 { 1179 struct hifn_dma *dma = sc->sc_dma; 1180 struct hifn_base_command wc; 1181 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; 1182 int r, cmdi, resi, srci, dsti; 1183 1184 wc.masks = htole16(3 << 13); 1185 wc.session_num = htole16(addr >> 14); 1186 wc.total_source_count = htole16(8); 1187 wc.total_dest_count = htole16(addr & 0x3fff); 1188 1189 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); 1190 1191 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1192 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | 1193 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); 1194 1195 /* build write command */ 1196 memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND); 1197 *(struct hifn_base_command *)dma->command_bufs[cmdi] = wc; 1198 memcpy(&dma->test_src, data, sizeof(dma->test_src)); 1199 1200 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr 1201 + offsetof(struct hifn_dma, test_src)); 1202 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr 1203 + offsetof(struct hifn_dma, test_dst)); 1204 1205 dma->cmdr[cmdi].l = htole32(16 | masks); 1206 dma->srcr[srci].l = htole32(8 | masks); 1207 dma->dstr[dsti].l = htole32(4 | masks); 1208 dma->resr[resi].l = htole32(4 | masks); 1209 1210 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1211 0, sc->sc_dmamap->dm_mapsize, 1212 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1213 1214 for (r = 10000; r >= 0; r--) { 1215 DELAY(10); 1216 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1217 0, sc->sc_dmamap->dm_mapsize, 1218 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1219 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) 1220 break; 1221 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1222 0, sc->sc_dmamap->dm_mapsize, 1223 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1224 } 1225 if (r == 0) { 1226 printf("%s: writeramaddr -- " 1227 "result[%d](addr %d) still valid\n", 1228 device_xname(&sc->sc_dv), resi, addr); 1229 r = -1; 1230 return (-1); 1231 } else 1232 r = 0; 1233 1234 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1235 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | 1236 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); 1237 1238 return (r); 1239 } 1240 1241 static int 1242 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) 1243 { 1244 struct hifn_dma *dma = sc->sc_dma; 1245 struct hifn_base_command rc; 1246 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; 1247 int r, cmdi, srci, dsti, resi; 1248 1249 rc.masks = htole16(2 << 13); 1250 rc.session_num = htole16(addr >> 14); 1251 rc.total_source_count = htole16(addr & 0x3fff); 1252 rc.total_dest_count = htole16(8); 1253 1254 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); 1255 1256 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1257 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | 1258 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); 1259 1260 memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND); 1261 *(struct hifn_base_command *)dma->command_bufs[cmdi] = rc; 1262 1263 dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1264 offsetof(struct hifn_dma, test_src)); 1265 dma->test_src = 0; 1266 dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1267 offsetof(struct hifn_dma, test_dst)); 1268 dma->test_dst = 0; 1269 dma->cmdr[cmdi].l = htole32(8 | masks); 1270 dma->srcr[srci].l = htole32(8 | masks); 1271 dma->dstr[dsti].l = htole32(8 | masks); 1272 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks); 1273 1274 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1275 0, sc->sc_dmamap->dm_mapsize, 1276 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1277 1278 for (r = 10000; r >= 0; r--) { 1279 DELAY(10); 1280 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1281 0, sc->sc_dmamap->dm_mapsize, 1282 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1283 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) 1284 break; 1285 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1286 0, sc->sc_dmamap->dm_mapsize, 1287 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1288 } 1289 if (r == 0) { 1290 printf("%s: readramaddr -- " 1291 "result[%d](addr %d) still valid\n", 1292 device_xname(&sc->sc_dv), resi, addr); 1293 r = -1; 1294 } else { 1295 r = 0; 1296 memcpy(data, &dma->test_dst, sizeof(dma->test_dst)); 1297 } 1298 1299 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1300 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | 1301 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); 1302 1303 return (r); 1304 } 1305 1306 /* 1307 * Initialize the descriptor rings. 1308 */ 1309 static void 1310 hifn_init_dma(struct hifn_softc *sc) 1311 { 1312 struct hifn_dma *dma = sc->sc_dma; 1313 int i; 1314 1315 hifn_set_retry(sc); 1316 1317 /* initialize static pointer values */ 1318 for (i = 0; i < HIFN_D_CMD_RSIZE; i++) 1319 dma->cmdr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1320 offsetof(struct hifn_dma, command_bufs[i][0])); 1321 for (i = 0; i < HIFN_D_RES_RSIZE; i++) 1322 dma->resr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1323 offsetof(struct hifn_dma, result_bufs[i][0])); 1324 1325 dma->cmdr[HIFN_D_CMD_RSIZE].p = 1326 htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1327 offsetof(struct hifn_dma, cmdr[0])); 1328 dma->srcr[HIFN_D_SRC_RSIZE].p = 1329 htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1330 offsetof(struct hifn_dma, srcr[0])); 1331 dma->dstr[HIFN_D_DST_RSIZE].p = 1332 htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1333 offsetof(struct hifn_dma, dstr[0])); 1334 dma->resr[HIFN_D_RES_RSIZE].p = 1335 htole32(sc->sc_dmamap->dm_segs[0].ds_addr + 1336 offsetof(struct hifn_dma, resr[0])); 1337 1338 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0; 1339 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0; 1340 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0; 1341 } 1342 1343 /* 1344 * Writes out the raw command buffer space. Returns the 1345 * command buffer size. 1346 */ 1347 static u_int 1348 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf) 1349 { 1350 u_int8_t *buf_pos; 1351 struct hifn_base_command *base_cmd; 1352 struct hifn_mac_command *mac_cmd; 1353 struct hifn_crypt_command *cry_cmd; 1354 struct hifn_comp_command *comp_cmd; 1355 int using_mac, using_crypt, using_comp, len, ivlen; 1356 u_int32_t dlen, slen; 1357 1358 buf_pos = buf; 1359 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC; 1360 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT; 1361 using_comp = cmd->base_masks & HIFN_BASE_CMD_COMP; 1362 1363 base_cmd = (struct hifn_base_command *)buf_pos; 1364 base_cmd->masks = htole16(cmd->base_masks); 1365 slen = cmd->src_map->dm_mapsize; 1366 if (cmd->sloplen) 1367 dlen = cmd->dst_map->dm_mapsize - cmd->sloplen + 1368 sizeof(u_int32_t); 1369 else 1370 dlen = cmd->dst_map->dm_mapsize; 1371 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO); 1372 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO); 1373 dlen >>= 16; 1374 slen >>= 16; 1375 base_cmd->session_num = htole16(cmd->session_num | 1376 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) | 1377 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M)); 1378 buf_pos += sizeof(struct hifn_base_command); 1379 1380 if (using_comp) { 1381 comp_cmd = (struct hifn_comp_command *)buf_pos; 1382 dlen = cmd->compcrd->crd_len; 1383 comp_cmd->source_count = htole16(dlen & 0xffff); 1384 dlen >>= 16; 1385 comp_cmd->masks = htole16(cmd->comp_masks | 1386 ((dlen << HIFN_COMP_CMD_SRCLEN_S) & HIFN_COMP_CMD_SRCLEN_M)); 1387 comp_cmd->header_skip = htole16(cmd->compcrd->crd_skip); 1388 comp_cmd->reserved = 0; 1389 buf_pos += sizeof(struct hifn_comp_command); 1390 } 1391 1392 if (using_mac) { 1393 mac_cmd = (struct hifn_mac_command *)buf_pos; 1394 dlen = cmd->maccrd->crd_len; 1395 mac_cmd->source_count = htole16(dlen & 0xffff); 1396 dlen >>= 16; 1397 mac_cmd->masks = htole16(cmd->mac_masks | 1398 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M)); 1399 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip); 1400 mac_cmd->reserved = 0; 1401 buf_pos += sizeof(struct hifn_mac_command); 1402 } 1403 1404 if (using_crypt) { 1405 cry_cmd = (struct hifn_crypt_command *)buf_pos; 1406 dlen = cmd->enccrd->crd_len; 1407 cry_cmd->source_count = htole16(dlen & 0xffff); 1408 dlen >>= 16; 1409 cry_cmd->masks = htole16(cmd->cry_masks | 1410 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M)); 1411 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip); 1412 cry_cmd->reserved = 0; 1413 buf_pos += sizeof(struct hifn_crypt_command); 1414 } 1415 1416 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) { 1417 memcpy(buf_pos, cmd->mac, HIFN_MAC_KEY_LENGTH); 1418 buf_pos += HIFN_MAC_KEY_LENGTH; 1419 } 1420 1421 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) { 1422 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { 1423 case HIFN_CRYPT_CMD_ALG_3DES: 1424 memcpy(buf_pos, cmd->ck, HIFN_3DES_KEY_LENGTH); 1425 buf_pos += HIFN_3DES_KEY_LENGTH; 1426 break; 1427 case HIFN_CRYPT_CMD_ALG_DES: 1428 memcpy(buf_pos, cmd->ck, HIFN_DES_KEY_LENGTH); 1429 buf_pos += HIFN_DES_KEY_LENGTH; 1430 break; 1431 case HIFN_CRYPT_CMD_ALG_RC4: 1432 len = 256; 1433 do { 1434 int clen; 1435 1436 clen = MIN(cmd->cklen, len); 1437 memcpy(buf_pos, cmd->ck, clen); 1438 len -= clen; 1439 buf_pos += clen; 1440 } while (len > 0); 1441 memset(buf_pos, 0, 4); 1442 buf_pos += 4; 1443 break; 1444 case HIFN_CRYPT_CMD_ALG_AES: 1445 /* 1446 * AES keys are variable 128, 192 and 1447 * 256 bits (16, 24 and 32 bytes). 1448 */ 1449 memcpy(buf_pos, cmd->ck, cmd->cklen); 1450 buf_pos += cmd->cklen; 1451 break; 1452 } 1453 } 1454 1455 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) { 1456 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { 1457 case HIFN_CRYPT_CMD_ALG_AES: 1458 ivlen = HIFN_AES_IV_LENGTH; 1459 break; 1460 default: 1461 ivlen = HIFN_IV_LENGTH; 1462 break; 1463 } 1464 memcpy(buf_pos, cmd->iv, ivlen); 1465 buf_pos += ivlen; 1466 } 1467 1468 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC | HIFN_BASE_CMD_CRYPT | 1469 HIFN_BASE_CMD_COMP)) == 0) { 1470 memset(buf_pos, 0, 8); 1471 buf_pos += 8; 1472 } 1473 1474 return (buf_pos - buf); 1475 } 1476 1477 static int 1478 hifn_dmamap_aligned(bus_dmamap_t map) 1479 { 1480 int i; 1481 1482 for (i = 0; i < map->dm_nsegs; i++) { 1483 if (map->dm_segs[i].ds_addr & 3) 1484 return (0); 1485 if ((i != (map->dm_nsegs - 1)) && 1486 (map->dm_segs[i].ds_len & 3)) 1487 return (0); 1488 } 1489 return (1); 1490 } 1491 1492 static int 1493 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd) 1494 { 1495 struct hifn_dma *dma = sc->sc_dma; 1496 bus_dmamap_t map = cmd->dst_map; 1497 u_int32_t p, l; 1498 int idx, used = 0, i; 1499 1500 idx = dma->dsti; 1501 for (i = 0; i < map->dm_nsegs - 1; i++) { 1502 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr); 1503 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1504 HIFN_D_MASKDONEIRQ | map->dm_segs[i].ds_len); 1505 HIFN_DSTR_SYNC(sc, idx, 1506 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1507 used++; 1508 1509 if (++idx == HIFN_D_DST_RSIZE) { 1510 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1511 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1512 HIFN_DSTR_SYNC(sc, idx, 1513 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1514 idx = 0; 1515 } 1516 } 1517 1518 if (cmd->sloplen == 0) { 1519 p = map->dm_segs[i].ds_addr; 1520 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | 1521 map->dm_segs[i].ds_len; 1522 } else { 1523 p = sc->sc_dmamap->dm_segs[0].ds_addr + 1524 offsetof(struct hifn_dma, slop[cmd->slopidx]); 1525 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | 1526 sizeof(u_int32_t); 1527 1528 if ((map->dm_segs[i].ds_len - cmd->sloplen) != 0) { 1529 dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr); 1530 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1531 HIFN_D_MASKDONEIRQ | 1532 (map->dm_segs[i].ds_len - cmd->sloplen)); 1533 HIFN_DSTR_SYNC(sc, idx, 1534 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1535 used++; 1536 1537 if (++idx == HIFN_D_DST_RSIZE) { 1538 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1539 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1540 HIFN_DSTR_SYNC(sc, idx, 1541 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1542 idx = 0; 1543 } 1544 } 1545 } 1546 dma->dstr[idx].p = htole32(p); 1547 dma->dstr[idx].l = htole32(l); 1548 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1549 used++; 1550 1551 if (++idx == HIFN_D_DST_RSIZE) { 1552 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | 1553 HIFN_D_MASKDONEIRQ); 1554 HIFN_DSTR_SYNC(sc, idx, 1555 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1556 idx = 0; 1557 } 1558 1559 dma->dsti = idx; 1560 dma->dstu += used; 1561 return (idx); 1562 } 1563 1564 static int 1565 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd) 1566 { 1567 struct hifn_dma *dma = sc->sc_dma; 1568 bus_dmamap_t map = cmd->src_map; 1569 int idx, i; 1570 u_int32_t last = 0; 1571 1572 idx = dma->srci; 1573 for (i = 0; i < map->dm_nsegs; i++) { 1574 if (i == map->dm_nsegs - 1) 1575 last = HIFN_D_LAST; 1576 1577 dma->srcr[idx].p = htole32(map->dm_segs[i].ds_addr); 1578 dma->srcr[idx].l = htole32(map->dm_segs[i].ds_len | 1579 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last); 1580 HIFN_SRCR_SYNC(sc, idx, 1581 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1582 1583 if (++idx == HIFN_D_SRC_RSIZE) { 1584 dma->srcr[idx].l = htole32(HIFN_D_VALID | 1585 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1586 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, 1587 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1588 idx = 0; 1589 } 1590 } 1591 dma->srci = idx; 1592 dma->srcu += map->dm_nsegs; 1593 return (idx); 1594 } 1595 1596 static int 1597 hifn_crypto(struct hifn_softc *sc, struct hifn_command *cmd, 1598 struct cryptop *crp, int hint) 1599 { 1600 struct hifn_dma *dma = sc->sc_dma; 1601 u_int32_t cmdlen; 1602 int cmdi, resi, s, err = 0; 1603 1604 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER, 1605 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map)) 1606 return (ENOMEM); 1607 1608 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1609 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map, 1610 cmd->srcu.src_m, BUS_DMA_NOWAIT)) { 1611 err = ENOMEM; 1612 goto err_srcmap1; 1613 } 1614 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1615 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map, 1616 cmd->srcu.src_io, BUS_DMA_NOWAIT)) { 1617 err = ENOMEM; 1618 goto err_srcmap1; 1619 } 1620 } else { 1621 err = EINVAL; 1622 goto err_srcmap1; 1623 } 1624 1625 if (hifn_dmamap_aligned(cmd->src_map)) { 1626 cmd->sloplen = cmd->src_map->dm_mapsize & 3; 1627 if (crp->crp_flags & CRYPTO_F_IOV) 1628 cmd->dstu.dst_io = cmd->srcu.src_io; 1629 else if (crp->crp_flags & CRYPTO_F_IMBUF) 1630 cmd->dstu.dst_m = cmd->srcu.src_m; 1631 cmd->dst_map = cmd->src_map; 1632 } else { 1633 if (crp->crp_flags & CRYPTO_F_IOV) { 1634 err = EINVAL; 1635 goto err_srcmap; 1636 } else if (crp->crp_flags & CRYPTO_F_IMBUF) { 1637 int totlen, len; 1638 struct mbuf *m, *m0, *mlast; 1639 1640 totlen = cmd->src_map->dm_mapsize; 1641 if (cmd->srcu.src_m->m_flags & M_PKTHDR) { 1642 len = MHLEN; 1643 MGETHDR(m0, M_DONTWAIT, MT_DATA); 1644 } else { 1645 len = MLEN; 1646 MGET(m0, M_DONTWAIT, MT_DATA); 1647 } 1648 if (m0 == NULL) { 1649 err = ENOMEM; 1650 goto err_srcmap; 1651 } 1652 if (len == MHLEN) 1653 M_DUP_PKTHDR(m0, cmd->srcu.src_m); 1654 if (totlen >= MINCLSIZE) { 1655 MCLGET(m0, M_DONTWAIT); 1656 if (m0->m_flags & M_EXT) 1657 len = MCLBYTES; 1658 } 1659 totlen -= len; 1660 m0->m_pkthdr.len = m0->m_len = len; 1661 mlast = m0; 1662 1663 while (totlen > 0) { 1664 MGET(m, M_DONTWAIT, MT_DATA); 1665 if (m == NULL) { 1666 err = ENOMEM; 1667 m_freem(m0); 1668 goto err_srcmap; 1669 } 1670 len = MLEN; 1671 if (totlen >= MINCLSIZE) { 1672 MCLGET(m, M_DONTWAIT); 1673 if (m->m_flags & M_EXT) 1674 len = MCLBYTES; 1675 } 1676 1677 m->m_len = len; 1678 if (m0->m_flags & M_PKTHDR) 1679 m0->m_pkthdr.len += len; 1680 totlen -= len; 1681 1682 mlast->m_next = m; 1683 mlast = m; 1684 } 1685 cmd->dstu.dst_m = m0; 1686 } 1687 } 1688 1689 if (cmd->dst_map == NULL) { 1690 if (bus_dmamap_create(sc->sc_dmat, 1691 HIFN_MAX_SEGLEN * MAX_SCATTER, MAX_SCATTER, 1692 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) { 1693 err = ENOMEM; 1694 goto err_srcmap; 1695 } 1696 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1697 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, 1698 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) { 1699 err = ENOMEM; 1700 goto err_dstmap1; 1701 } 1702 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1703 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map, 1704 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) { 1705 err = ENOMEM; 1706 goto err_dstmap1; 1707 } 1708 } 1709 } 1710 1711 #ifdef HIFN_DEBUG 1712 if (hifn_debug) 1713 printf("%s: Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n", 1714 device_xname(&sc->sc_dv), 1715 READ_REG_1(sc, HIFN_1_DMA_CSR), 1716 READ_REG_1(sc, HIFN_1_DMA_IER), 1717 dma->cmdu, dma->srcu, dma->dstu, dma->resu, 1718 cmd->src_map->dm_nsegs, cmd->dst_map->dm_nsegs); 1719 #endif 1720 1721 if (cmd->src_map == cmd->dst_map) 1722 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 1723 0, cmd->src_map->dm_mapsize, 1724 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1725 else { 1726 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 1727 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1728 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 1729 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1730 } 1731 1732 s = splnet(); 1733 1734 /* 1735 * need 1 cmd, and 1 res 1736 * need N src, and N dst 1737 */ 1738 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE || 1739 (dma->resu + 1) > HIFN_D_RES_RSIZE) { 1740 splx(s); 1741 err = ENOMEM; 1742 goto err_dstmap; 1743 } 1744 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE || 1745 (dma->dstu + cmd->dst_map->dm_nsegs + 1) > HIFN_D_DST_RSIZE) { 1746 splx(s); 1747 err = ENOMEM; 1748 goto err_dstmap; 1749 } 1750 1751 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 1752 dma->cmdi = 0; 1753 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 1754 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1755 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 1756 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1757 } 1758 cmdi = dma->cmdi++; 1759 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]); 1760 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE); 1761 1762 /* .p for command/result already set */ 1763 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST | 1764 HIFN_D_MASKDONEIRQ); 1765 HIFN_CMDR_SYNC(sc, cmdi, 1766 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1767 dma->cmdu++; 1768 if (sc->sc_c_busy == 0) { 1769 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); 1770 sc->sc_c_busy = 1; 1771 SET_LED(sc, HIFN_MIPSRST_LED0); 1772 } 1773 1774 /* 1775 * We don't worry about missing an interrupt (which a "command wait" 1776 * interrupt salvages us from), unless there is more than one command 1777 * in the queue. 1778 * 1779 * XXX We do seem to miss some interrupts. So we always enable 1780 * XXX command wait. From OpenBSD revision 1.149. 1781 * 1782 */ 1783 #if 0 1784 if (dma->cmdu > 1) { 1785 #endif 1786 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT; 1787 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 1788 #if 0 1789 } 1790 #endif 1791 1792 hifnstats.hst_ipackets++; 1793 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize; 1794 1795 hifn_dmamap_load_src(sc, cmd); 1796 if (sc->sc_s_busy == 0) { 1797 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA); 1798 sc->sc_s_busy = 1; 1799 SET_LED(sc, HIFN_MIPSRST_LED1); 1800 } 1801 1802 /* 1803 * Unlike other descriptors, we don't mask done interrupt from 1804 * result descriptor. 1805 */ 1806 #ifdef HIFN_DEBUG 1807 if (hifn_debug) 1808 printf("load res\n"); 1809 #endif 1810 if (dma->resi == HIFN_D_RES_RSIZE) { 1811 dma->resi = 0; 1812 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 1813 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1814 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 1815 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1816 } 1817 resi = dma->resi++; 1818 dma->hifn_commands[resi] = cmd; 1819 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD); 1820 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | 1821 HIFN_D_VALID | HIFN_D_LAST); 1822 HIFN_RESR_SYNC(sc, resi, 1823 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1824 dma->resu++; 1825 if (sc->sc_r_busy == 0) { 1826 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA); 1827 sc->sc_r_busy = 1; 1828 SET_LED(sc, HIFN_MIPSRST_LED2); 1829 } 1830 1831 if (cmd->sloplen) 1832 cmd->slopidx = resi; 1833 1834 hifn_dmamap_load_dst(sc, cmd); 1835 1836 if (sc->sc_d_busy == 0) { 1837 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA); 1838 sc->sc_d_busy = 1; 1839 } 1840 1841 #ifdef HIFN_DEBUG 1842 if (hifn_debug) 1843 printf("%s: command: stat %8x ier %8x\n", 1844 device_xname(&sc->sc_dv), 1845 READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER)); 1846 #endif 1847 1848 sc->sc_active = 5; 1849 splx(s); 1850 return (err); /* success */ 1851 1852 err_dstmap: 1853 if (cmd->src_map != cmd->dst_map) 1854 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 1855 err_dstmap1: 1856 if (cmd->src_map != cmd->dst_map) 1857 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 1858 err_srcmap: 1859 if (crp->crp_flags & CRYPTO_F_IMBUF && 1860 cmd->srcu.src_m != cmd->dstu.dst_m) 1861 m_freem(cmd->dstu.dst_m); 1862 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 1863 err_srcmap1: 1864 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 1865 return (err); 1866 } 1867 1868 static void 1869 hifn_tick(void *vsc) 1870 { 1871 struct hifn_softc *sc = vsc; 1872 int s; 1873 1874 s = splnet(); 1875 if (sc->sc_active == 0) { 1876 struct hifn_dma *dma = sc->sc_dma; 1877 u_int32_t r = 0; 1878 1879 if (dma->cmdu == 0 && sc->sc_c_busy) { 1880 sc->sc_c_busy = 0; 1881 r |= HIFN_DMACSR_C_CTRL_DIS; 1882 CLR_LED(sc, HIFN_MIPSRST_LED0); 1883 } 1884 if (dma->srcu == 0 && sc->sc_s_busy) { 1885 sc->sc_s_busy = 0; 1886 r |= HIFN_DMACSR_S_CTRL_DIS; 1887 CLR_LED(sc, HIFN_MIPSRST_LED1); 1888 } 1889 if (dma->dstu == 0 && sc->sc_d_busy) { 1890 sc->sc_d_busy = 0; 1891 r |= HIFN_DMACSR_D_CTRL_DIS; 1892 } 1893 if (dma->resu == 0 && sc->sc_r_busy) { 1894 sc->sc_r_busy = 0; 1895 r |= HIFN_DMACSR_R_CTRL_DIS; 1896 CLR_LED(sc, HIFN_MIPSRST_LED2); 1897 } 1898 if (r) 1899 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r); 1900 } 1901 else 1902 sc->sc_active--; 1903 splx(s); 1904 #ifdef __OpenBSD__ 1905 timeout_add(&sc->sc_tickto, hz); 1906 #else 1907 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); 1908 #endif 1909 } 1910 1911 static int 1912 hifn_intr(void *arg) 1913 { 1914 struct hifn_softc *sc = arg; 1915 struct hifn_dma *dma = sc->sc_dma; 1916 u_int32_t dmacsr, restart; 1917 int i, u; 1918 1919 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR); 1920 1921 #ifdef HIFN_DEBUG 1922 if (hifn_debug) 1923 printf("%s: irq: stat %08x ien %08x u %d/%d/%d/%d\n", 1924 device_xname(&sc->sc_dv), 1925 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), 1926 dma->cmdu, dma->srcu, dma->dstu, dma->resu); 1927 #endif 1928 1929 /* Nothing in the DMA unit interrupted */ 1930 if ((dmacsr & sc->sc_dmaier) == 0) 1931 return (0); 1932 1933 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier); 1934 1935 if (dmacsr & HIFN_DMACSR_ENGINE) 1936 WRITE_REG_0(sc, HIFN_0_PUISR, READ_REG_0(sc, HIFN_0_PUISR)); 1937 1938 if ((sc->sc_flags & HIFN_HAS_PUBLIC) && 1939 (dmacsr & HIFN_DMACSR_PUBDONE)) 1940 WRITE_REG_1(sc, HIFN_1_PUB_STATUS, 1941 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE); 1942 1943 restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER); 1944 if (restart) 1945 printf("%s: overrun %x\n", device_xname(&sc->sc_dv), dmacsr); 1946 1947 if (sc->sc_flags & HIFN_IS_7811) { 1948 if (dmacsr & HIFN_DMACSR_ILLR) 1949 printf("%s: illegal read\n", device_xname(&sc->sc_dv)); 1950 if (dmacsr & HIFN_DMACSR_ILLW) 1951 printf("%s: illegal write\n", device_xname(&sc->sc_dv)); 1952 } 1953 1954 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | 1955 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); 1956 if (restart) { 1957 printf("%s: abort, resetting.\n", device_xname(&sc->sc_dv)); 1958 hifnstats.hst_abort++; 1959 hifn_abort(sc); 1960 return (1); 1961 } 1962 1963 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->resu == 0)) { 1964 /* 1965 * If no slots to process and we receive a "waiting on 1966 * command" interrupt, we disable the "waiting on command" 1967 * (by clearing it). 1968 */ 1969 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; 1970 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 1971 } 1972 1973 /* clear the rings */ 1974 i = dma->resk; 1975 while (dma->resu != 0) { 1976 HIFN_RESR_SYNC(sc, i, 1977 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1978 if (dma->resr[i].l & htole32(HIFN_D_VALID)) { 1979 HIFN_RESR_SYNC(sc, i, 1980 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1981 break; 1982 } 1983 1984 if (i != HIFN_D_RES_RSIZE) { 1985 struct hifn_command *cmd; 1986 1987 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD); 1988 cmd = dma->hifn_commands[i]; 1989 KASSERT(cmd != NULL 1990 /*("hifn_intr: null command slot %u", i)*/); 1991 dma->hifn_commands[i] = NULL; 1992 1993 hifn_callback(sc, cmd, dma->result_bufs[i]); 1994 hifnstats.hst_opackets++; 1995 } 1996 1997 if (++i == (HIFN_D_RES_RSIZE + 1)) 1998 i = 0; 1999 else 2000 dma->resu--; 2001 } 2002 dma->resk = i; 2003 2004 i = dma->srck; u = dma->srcu; 2005 while (u != 0) { 2006 HIFN_SRCR_SYNC(sc, i, 2007 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2008 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) { 2009 HIFN_SRCR_SYNC(sc, i, 2010 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2011 break; 2012 } 2013 if (++i == (HIFN_D_SRC_RSIZE + 1)) 2014 i = 0; 2015 else 2016 u--; 2017 } 2018 dma->srck = i; dma->srcu = u; 2019 2020 i = dma->cmdk; u = dma->cmdu; 2021 while (u != 0) { 2022 HIFN_CMDR_SYNC(sc, i, 2023 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2024 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) { 2025 HIFN_CMDR_SYNC(sc, i, 2026 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2027 break; 2028 } 2029 if (i != HIFN_D_CMD_RSIZE) { 2030 u--; 2031 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE); 2032 } 2033 if (++i == (HIFN_D_CMD_RSIZE + 1)) 2034 i = 0; 2035 } 2036 dma->cmdk = i; dma->cmdu = u; 2037 2038 return (1); 2039 } 2040 2041 /* 2042 * Allocate a new 'session' and return an encoded session id. 'sidp' 2043 * contains our registration id, and should contain an encoded session 2044 * id on successful allocation. 2045 */ 2046 static int 2047 hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri) 2048 { 2049 struct cryptoini *c; 2050 struct hifn_softc *sc = arg; 2051 int i, mac = 0, cry = 0, comp = 0; 2052 2053 KASSERT(sc != NULL /*, ("hifn_newsession: null softc")*/); 2054 if (sidp == NULL || cri == NULL || sc == NULL) 2055 return (EINVAL); 2056 2057 for (i = 0; i < sc->sc_maxses; i++) 2058 if (sc->sc_sessions[i].hs_state == HS_STATE_FREE) 2059 break; 2060 if (i == sc->sc_maxses) 2061 return (ENOMEM); 2062 2063 for (c = cri; c != NULL; c = c->cri_next) { 2064 switch (c->cri_alg) { 2065 case CRYPTO_MD5: 2066 case CRYPTO_SHA1: 2067 case CRYPTO_MD5_HMAC_96: 2068 case CRYPTO_SHA1_HMAC_96: 2069 if (mac) 2070 return (EINVAL); 2071 mac = 1; 2072 break; 2073 case CRYPTO_DES_CBC: 2074 case CRYPTO_3DES_CBC: 2075 case CRYPTO_AES_CBC: 2076 /* Note that this is an initialization 2077 vector, not a cipher key; any function 2078 giving sufficient Hamming distance 2079 between outputs is fine. Use of RC4 2080 to generate IVs has been FIPS140-2 2081 certified by several labs. */ 2082 #ifdef __NetBSD__ 2083 arc4randbytes(sc->sc_sessions[i].hs_iv, 2084 c->cri_alg == CRYPTO_AES_CBC ? 2085 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2086 #else /* FreeBSD and OpenBSD have get_random_bytes */ 2087 /* XXX this may read fewer, does it matter? */ 2088 get_random_bytes(sc->sc_sessions[i].hs_iv, 2089 c->cri_alg == CRYPTO_AES_CBC ? 2090 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2091 #endif 2092 /*FALLTHROUGH*/ 2093 case CRYPTO_ARC4: 2094 if (cry) 2095 return (EINVAL); 2096 cry = 1; 2097 break; 2098 #ifdef HAVE_CRYPTO_LZS 2099 case CRYPTO_LZS_COMP: 2100 if (comp) 2101 return (EINVAL); 2102 comp = 1; 2103 break; 2104 #endif 2105 default: 2106 return (EINVAL); 2107 } 2108 } 2109 if (mac == 0 && cry == 0 && comp == 0) 2110 return (EINVAL); 2111 2112 /* 2113 * XXX only want to support compression without chaining to 2114 * MAC/crypt engine right now 2115 */ 2116 if ((comp && mac) || (comp && cry)) 2117 return (EINVAL); 2118 2119 *sidp = HIFN_SID(device_unit(&sc->sc_dv), i); 2120 sc->sc_sessions[i].hs_state = HS_STATE_USED; 2121 2122 return (0); 2123 } 2124 2125 /* 2126 * Deallocate a session. 2127 * XXX this routine should run a zero'd mac/encrypt key into context ram. 2128 * XXX to blow away any keys already stored there. 2129 */ 2130 static int 2131 hifn_freesession(void *arg, u_int64_t tid) 2132 { 2133 struct hifn_softc *sc = arg; 2134 int session; 2135 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; 2136 2137 KASSERT(sc != NULL /*, ("hifn_freesession: null softc")*/); 2138 if (sc == NULL) 2139 return (EINVAL); 2140 2141 session = HIFN_SESSION(sid); 2142 if (session >= sc->sc_maxses) 2143 return (EINVAL); 2144 2145 memset(&sc->sc_sessions[session], 0, sizeof(sc->sc_sessions[session])); 2146 return (0); 2147 } 2148 2149 static int 2150 hifn_process(void *arg, struct cryptop *crp, int hint) 2151 { 2152 struct hifn_softc *sc = arg; 2153 struct hifn_command *cmd = NULL; 2154 int session, err, ivlen; 2155 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; 2156 2157 if (crp == NULL || crp->crp_callback == NULL) { 2158 hifnstats.hst_invalid++; 2159 return (EINVAL); 2160 } 2161 session = HIFN_SESSION(crp->crp_sid); 2162 2163 if (sc == NULL || session >= sc->sc_maxses) { 2164 err = EINVAL; 2165 goto errout; 2166 } 2167 2168 cmd = (struct hifn_command *)malloc(sizeof(struct hifn_command), 2169 M_DEVBUF, M_NOWAIT|M_ZERO); 2170 if (cmd == NULL) { 2171 hifnstats.hst_nomem++; 2172 err = ENOMEM; 2173 goto errout; 2174 } 2175 2176 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2177 cmd->srcu.src_m = (struct mbuf *)crp->crp_buf; 2178 cmd->dstu.dst_m = (struct mbuf *)crp->crp_buf; 2179 } else if (crp->crp_flags & CRYPTO_F_IOV) { 2180 cmd->srcu.src_io = (struct uio *)crp->crp_buf; 2181 cmd->dstu.dst_io = (struct uio *)crp->crp_buf; 2182 } else { 2183 err = EINVAL; 2184 goto errout; /* XXX we don't handle contiguous buffers! */ 2185 } 2186 2187 crd1 = crp->crp_desc; 2188 if (crd1 == NULL) { 2189 err = EINVAL; 2190 goto errout; 2191 } 2192 crd2 = crd1->crd_next; 2193 2194 if (crd2 == NULL) { 2195 if (crd1->crd_alg == CRYPTO_MD5_HMAC_96 || 2196 crd1->crd_alg == CRYPTO_SHA1_HMAC_96 || 2197 crd1->crd_alg == CRYPTO_SHA1 || 2198 crd1->crd_alg == CRYPTO_MD5) { 2199 maccrd = crd1; 2200 enccrd = NULL; 2201 } else if (crd1->crd_alg == CRYPTO_DES_CBC || 2202 crd1->crd_alg == CRYPTO_3DES_CBC || 2203 crd1->crd_alg == CRYPTO_AES_CBC || 2204 crd1->crd_alg == CRYPTO_ARC4) { 2205 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0) 2206 cmd->base_masks |= HIFN_BASE_CMD_DECODE; 2207 maccrd = NULL; 2208 enccrd = crd1; 2209 #ifdef HAVE_CRYPTO_LZS 2210 } else if (crd1->crd_alg == CRYPTO_LZS_COMP) { 2211 return (hifn_compression(sc, crp, cmd)); 2212 #endif 2213 } else { 2214 err = EINVAL; 2215 goto errout; 2216 } 2217 } else { 2218 if ((crd1->crd_alg == CRYPTO_MD5_HMAC_96 || 2219 crd1->crd_alg == CRYPTO_SHA1_HMAC_96 || 2220 crd1->crd_alg == CRYPTO_MD5 || 2221 crd1->crd_alg == CRYPTO_SHA1) && 2222 (crd2->crd_alg == CRYPTO_DES_CBC || 2223 crd2->crd_alg == CRYPTO_3DES_CBC || 2224 crd2->crd_alg == CRYPTO_AES_CBC || 2225 crd2->crd_alg == CRYPTO_ARC4) && 2226 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { 2227 cmd->base_masks = HIFN_BASE_CMD_DECODE; 2228 maccrd = crd1; 2229 enccrd = crd2; 2230 } else if ((crd1->crd_alg == CRYPTO_DES_CBC || 2231 crd1->crd_alg == CRYPTO_ARC4 || 2232 crd1->crd_alg == CRYPTO_3DES_CBC || 2233 crd1->crd_alg == CRYPTO_AES_CBC) && 2234 (crd2->crd_alg == CRYPTO_MD5_HMAC_96 || 2235 crd2->crd_alg == CRYPTO_SHA1_HMAC_96 || 2236 crd2->crd_alg == CRYPTO_MD5 || 2237 crd2->crd_alg == CRYPTO_SHA1) && 2238 (crd1->crd_flags & CRD_F_ENCRYPT)) { 2239 enccrd = crd1; 2240 maccrd = crd2; 2241 } else { 2242 /* 2243 * We cannot order the 7751 as requested 2244 */ 2245 err = EINVAL; 2246 goto errout; 2247 } 2248 } 2249 2250 if (enccrd) { 2251 cmd->enccrd = enccrd; 2252 cmd->base_masks |= HIFN_BASE_CMD_CRYPT; 2253 switch (enccrd->crd_alg) { 2254 case CRYPTO_ARC4: 2255 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4; 2256 if ((enccrd->crd_flags & CRD_F_ENCRYPT) 2257 != sc->sc_sessions[session].hs_prev_op) 2258 sc->sc_sessions[session].hs_state = 2259 HS_STATE_USED; 2260 break; 2261 case CRYPTO_DES_CBC: 2262 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES | 2263 HIFN_CRYPT_CMD_MODE_CBC | 2264 HIFN_CRYPT_CMD_NEW_IV; 2265 break; 2266 case CRYPTO_3DES_CBC: 2267 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES | 2268 HIFN_CRYPT_CMD_MODE_CBC | 2269 HIFN_CRYPT_CMD_NEW_IV; 2270 break; 2271 case CRYPTO_AES_CBC: 2272 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES | 2273 HIFN_CRYPT_CMD_MODE_CBC | 2274 HIFN_CRYPT_CMD_NEW_IV; 2275 break; 2276 default: 2277 err = EINVAL; 2278 goto errout; 2279 } 2280 if (enccrd->crd_alg != CRYPTO_ARC4) { 2281 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ? 2282 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2283 if (enccrd->crd_flags & CRD_F_ENCRYPT) { 2284 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 2285 memcpy(cmd->iv, enccrd->crd_iv, ivlen); 2286 else 2287 bcopy(sc->sc_sessions[session].hs_iv, 2288 cmd->iv, ivlen); 2289 2290 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) 2291 == 0) { 2292 if (crp->crp_flags & CRYPTO_F_IMBUF) 2293 m_copyback(cmd->srcu.src_m, 2294 enccrd->crd_inject, 2295 ivlen, cmd->iv); 2296 else if (crp->crp_flags & CRYPTO_F_IOV) 2297 cuio_copyback(cmd->srcu.src_io, 2298 enccrd->crd_inject, 2299 ivlen, cmd->iv); 2300 } 2301 } else { 2302 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 2303 memcpy(cmd->iv, enccrd->crd_iv, ivlen); 2304 else if (crp->crp_flags & CRYPTO_F_IMBUF) 2305 m_copydata(cmd->srcu.src_m, 2306 enccrd->crd_inject, ivlen, cmd->iv); 2307 else if (crp->crp_flags & CRYPTO_F_IOV) 2308 cuio_copydata(cmd->srcu.src_io, 2309 enccrd->crd_inject, ivlen, cmd->iv); 2310 } 2311 } 2312 2313 cmd->ck = enccrd->crd_key; 2314 cmd->cklen = enccrd->crd_klen >> 3; 2315 2316 /* 2317 * Need to specify the size for the AES key in the masks. 2318 */ 2319 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) == 2320 HIFN_CRYPT_CMD_ALG_AES) { 2321 switch (cmd->cklen) { 2322 case 16: 2323 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128; 2324 break; 2325 case 24: 2326 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192; 2327 break; 2328 case 32: 2329 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256; 2330 break; 2331 default: 2332 err = EINVAL; 2333 goto errout; 2334 } 2335 } 2336 2337 if (sc->sc_sessions[session].hs_state == HS_STATE_USED) 2338 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; 2339 } 2340 2341 if (maccrd) { 2342 cmd->maccrd = maccrd; 2343 cmd->base_masks |= HIFN_BASE_CMD_MAC; 2344 2345 switch (maccrd->crd_alg) { 2346 case CRYPTO_MD5: 2347 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | 2348 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | 2349 HIFN_MAC_CMD_POS_IPSEC; 2350 break; 2351 case CRYPTO_MD5_HMAC_96: 2352 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | 2353 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | 2354 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; 2355 break; 2356 case CRYPTO_SHA1: 2357 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | 2358 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | 2359 HIFN_MAC_CMD_POS_IPSEC; 2360 break; 2361 case CRYPTO_SHA1_HMAC_96: 2362 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | 2363 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | 2364 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; 2365 break; 2366 } 2367 2368 if ((maccrd->crd_alg == CRYPTO_SHA1_HMAC_96 || 2369 maccrd->crd_alg == CRYPTO_MD5_HMAC_96) && 2370 sc->sc_sessions[session].hs_state == HS_STATE_USED) { 2371 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY; 2372 memcpy(cmd->mac, maccrd->crd_key, maccrd->crd_klen >> 3); 2373 memset(cmd->mac + (maccrd->crd_klen >> 3), 0, 2374 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3)); 2375 } 2376 } 2377 2378 cmd->crp = crp; 2379 cmd->session_num = session; 2380 cmd->softc = sc; 2381 2382 err = hifn_crypto(sc, cmd, crp, hint); 2383 if (err == 0) { 2384 if (enccrd) 2385 sc->sc_sessions[session].hs_prev_op = 2386 enccrd->crd_flags & CRD_F_ENCRYPT; 2387 if (sc->sc_sessions[session].hs_state == HS_STATE_USED) 2388 sc->sc_sessions[session].hs_state = HS_STATE_KEY; 2389 return 0; 2390 } else if (err == ERESTART) { 2391 /* 2392 * There weren't enough resources to dispatch the request 2393 * to the part. Notify the caller so they'll requeue this 2394 * request and resubmit it again soon. 2395 */ 2396 #ifdef HIFN_DEBUG 2397 if (hifn_debug) 2398 printf(device_xname(&sc->sc_dv), "requeue request\n"); 2399 #endif 2400 free(cmd, M_DEVBUF); 2401 sc->sc_needwakeup |= CRYPTO_SYMQ; 2402 return (err); 2403 } 2404 2405 errout: 2406 if (cmd != NULL) 2407 free(cmd, M_DEVBUF); 2408 if (err == EINVAL) 2409 hifnstats.hst_invalid++; 2410 else 2411 hifnstats.hst_nomem++; 2412 crp->crp_etype = err; 2413 crypto_done(crp); 2414 return (0); 2415 } 2416 2417 static void 2418 hifn_abort(struct hifn_softc *sc) 2419 { 2420 struct hifn_dma *dma = sc->sc_dma; 2421 struct hifn_command *cmd; 2422 struct cryptop *crp; 2423 int i, u; 2424 2425 i = dma->resk; u = dma->resu; 2426 while (u != 0) { 2427 cmd = dma->hifn_commands[i]; 2428 KASSERT(cmd != NULL /*, ("hifn_abort: null cmd slot %u", i)*/); 2429 dma->hifn_commands[i] = NULL; 2430 crp = cmd->crp; 2431 2432 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) { 2433 /* Salvage what we can. */ 2434 hifnstats.hst_opackets++; 2435 hifn_callback(sc, cmd, dma->result_bufs[i]); 2436 } else { 2437 if (cmd->src_map == cmd->dst_map) { 2438 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2439 0, cmd->src_map->dm_mapsize, 2440 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2441 } else { 2442 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2443 0, cmd->src_map->dm_mapsize, 2444 BUS_DMASYNC_POSTWRITE); 2445 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2446 0, cmd->dst_map->dm_mapsize, 2447 BUS_DMASYNC_POSTREAD); 2448 } 2449 2450 if (cmd->srcu.src_m != cmd->dstu.dst_m) { 2451 m_freem(cmd->srcu.src_m); 2452 crp->crp_buf = (void *)cmd->dstu.dst_m; 2453 } 2454 2455 /* non-shared buffers cannot be restarted */ 2456 if (cmd->src_map != cmd->dst_map) { 2457 /* 2458 * XXX should be EAGAIN, delayed until 2459 * after the reset. 2460 */ 2461 crp->crp_etype = ENOMEM; 2462 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2463 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2464 } else 2465 crp->crp_etype = ENOMEM; 2466 2467 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2468 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2469 2470 free(cmd, M_DEVBUF); 2471 if (crp->crp_etype != EAGAIN) 2472 crypto_done(crp); 2473 } 2474 2475 if (++i == HIFN_D_RES_RSIZE) 2476 i = 0; 2477 u--; 2478 } 2479 dma->resk = i; dma->resu = u; 2480 2481 /* Force upload of key next time */ 2482 for (i = 0; i < sc->sc_maxses; i++) 2483 if (sc->sc_sessions[i].hs_state == HS_STATE_KEY) 2484 sc->sc_sessions[i].hs_state = HS_STATE_USED; 2485 2486 hifn_reset_board(sc, 1); 2487 hifn_init_dma(sc); 2488 hifn_init_pci_registers(sc); 2489 } 2490 2491 static void 2492 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *resbuf) 2493 { 2494 struct hifn_dma *dma = sc->sc_dma; 2495 struct cryptop *crp = cmd->crp; 2496 struct cryptodesc *crd; 2497 struct mbuf *m; 2498 int totlen, i, u, ivlen; 2499 2500 if (cmd->src_map == cmd->dst_map) 2501 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2502 0, cmd->src_map->dm_mapsize, 2503 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 2504 else { 2505 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2506 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2507 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2508 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 2509 } 2510 2511 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2512 if (cmd->srcu.src_m != cmd->dstu.dst_m) { 2513 crp->crp_buf = (void *)cmd->dstu.dst_m; 2514 totlen = cmd->src_map->dm_mapsize; 2515 for (m = cmd->dstu.dst_m; m != NULL; m = m->m_next) { 2516 if (totlen < m->m_len) { 2517 m->m_len = totlen; 2518 totlen = 0; 2519 } else 2520 totlen -= m->m_len; 2521 } 2522 cmd->dstu.dst_m->m_pkthdr.len = 2523 cmd->srcu.src_m->m_pkthdr.len; 2524 m_freem(cmd->srcu.src_m); 2525 } 2526 } 2527 2528 if (cmd->sloplen != 0) { 2529 if (crp->crp_flags & CRYPTO_F_IMBUF) 2530 m_copyback((struct mbuf *)crp->crp_buf, 2531 cmd->src_map->dm_mapsize - cmd->sloplen, 2532 cmd->sloplen, (void *)&dma->slop[cmd->slopidx]); 2533 else if (crp->crp_flags & CRYPTO_F_IOV) 2534 cuio_copyback((struct uio *)crp->crp_buf, 2535 cmd->src_map->dm_mapsize - cmd->sloplen, 2536 cmd->sloplen, (void *)&dma->slop[cmd->slopidx]); 2537 } 2538 2539 i = dma->dstk; u = dma->dstu; 2540 while (u != 0) { 2541 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2542 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc), 2543 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2544 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) { 2545 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2546 offsetof(struct hifn_dma, dstr[i]), 2547 sizeof(struct hifn_desc), 2548 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2549 break; 2550 } 2551 if (++i == (HIFN_D_DST_RSIZE + 1)) 2552 i = 0; 2553 else 2554 u--; 2555 } 2556 dma->dstk = i; dma->dstu = u; 2557 2558 hifnstats.hst_obytes += cmd->dst_map->dm_mapsize; 2559 2560 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) == 2561 HIFN_BASE_CMD_CRYPT) { 2562 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 2563 if (crd->crd_alg != CRYPTO_DES_CBC && 2564 crd->crd_alg != CRYPTO_3DES_CBC && 2565 crd->crd_alg != CRYPTO_AES_CBC) 2566 continue; 2567 ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ? 2568 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2569 if (crp->crp_flags & CRYPTO_F_IMBUF) 2570 m_copydata((struct mbuf *)crp->crp_buf, 2571 crd->crd_skip + crd->crd_len - ivlen, 2572 ivlen, 2573 cmd->softc->sc_sessions[cmd->session_num].hs_iv); 2574 else if (crp->crp_flags & CRYPTO_F_IOV) { 2575 cuio_copydata((struct uio *)crp->crp_buf, 2576 crd->crd_skip + crd->crd_len - ivlen, 2577 ivlen, 2578 cmd->softc->sc_sessions[cmd->session_num].hs_iv); 2579 } 2580 /* XXX We do not handle contig data */ 2581 break; 2582 } 2583 } 2584 2585 if (cmd->base_masks & HIFN_BASE_CMD_MAC) { 2586 u_int8_t *macbuf; 2587 2588 macbuf = resbuf + sizeof(struct hifn_base_result); 2589 if (cmd->base_masks & HIFN_BASE_CMD_COMP) 2590 macbuf += sizeof(struct hifn_comp_result); 2591 macbuf += sizeof(struct hifn_mac_result); 2592 2593 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 2594 int len; 2595 2596 if (crd->crd_alg == CRYPTO_MD5) 2597 len = 16; 2598 else if (crd->crd_alg == CRYPTO_SHA1) 2599 len = 20; 2600 else if (crd->crd_alg == CRYPTO_MD5_HMAC_96 || 2601 crd->crd_alg == CRYPTO_SHA1_HMAC_96) 2602 len = 12; 2603 else 2604 continue; 2605 2606 if (crp->crp_flags & CRYPTO_F_IMBUF) 2607 m_copyback((struct mbuf *)crp->crp_buf, 2608 crd->crd_inject, len, macbuf); 2609 else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac) 2610 memcpy(crp->crp_mac, (void *)macbuf, len); 2611 break; 2612 } 2613 } 2614 2615 if (cmd->src_map != cmd->dst_map) { 2616 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2617 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2618 } 2619 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2620 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2621 free(cmd, M_DEVBUF); 2622 crypto_done(crp); 2623 } 2624 2625 #ifdef HAVE_CRYPTO_LZS 2626 2627 static int 2628 hifn_compression(struct hifn_softc *sc, struct cryptop *crp, 2629 struct hifn_command *cmd) 2630 { 2631 struct cryptodesc *crd = crp->crp_desc; 2632 int s, err = 0; 2633 2634 cmd->compcrd = crd; 2635 cmd->base_masks |= HIFN_BASE_CMD_COMP; 2636 2637 if ((crp->crp_flags & CRYPTO_F_IMBUF) == 0) { 2638 /* 2639 * XXX can only handle mbufs right now since we can 2640 * XXX dynamically resize them. 2641 */ 2642 err = EINVAL; 2643 return (ENOMEM); 2644 } 2645 2646 if ((crd->crd_flags & CRD_F_COMP) == 0) 2647 cmd->base_masks |= HIFN_BASE_CMD_DECODE; 2648 if (crd->crd_alg == CRYPTO_LZS_COMP) 2649 cmd->comp_masks |= HIFN_COMP_CMD_ALG_LZS | 2650 HIFN_COMP_CMD_CLEARHIST; 2651 2652 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER, 2653 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map)) { 2654 err = ENOMEM; 2655 goto fail; 2656 } 2657 2658 if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER, 2659 HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) { 2660 err = ENOMEM; 2661 goto fail; 2662 } 2663 2664 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2665 int len; 2666 2667 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map, 2668 cmd->srcu.src_m, BUS_DMA_NOWAIT)) { 2669 err = ENOMEM; 2670 goto fail; 2671 } 2672 2673 len = cmd->src_map->dm_mapsize / MCLBYTES; 2674 if ((cmd->src_map->dm_mapsize % MCLBYTES) != 0) 2675 len++; 2676 len *= MCLBYTES; 2677 2678 if ((crd->crd_flags & CRD_F_COMP) == 0) 2679 len *= 4; 2680 2681 if (len > HIFN_MAX_DMALEN) 2682 len = HIFN_MAX_DMALEN; 2683 2684 cmd->dstu.dst_m = hifn_mkmbuf_chain(len, cmd->srcu.src_m); 2685 if (cmd->dstu.dst_m == NULL) { 2686 err = ENOMEM; 2687 goto fail; 2688 } 2689 2690 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, 2691 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) { 2692 err = ENOMEM; 2693 goto fail; 2694 } 2695 } else if (crp->crp_flags & CRYPTO_F_IOV) { 2696 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map, 2697 cmd->srcu.src_io, BUS_DMA_NOWAIT)) { 2698 err = ENOMEM; 2699 goto fail; 2700 } 2701 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map, 2702 cmd->dstu.dst_io, BUS_DMA_NOWAIT)) { 2703 err = ENOMEM; 2704 goto fail; 2705 } 2706 } 2707 2708 if (cmd->src_map == cmd->dst_map) 2709 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2710 0, cmd->src_map->dm_mapsize, 2711 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2712 else { 2713 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2714 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2715 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2716 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2717 } 2718 2719 cmd->crp = crp; 2720 /* 2721 * Always use session 0. The modes of compression we use are 2722 * stateless and there is always at least one compression 2723 * context, zero. 2724 */ 2725 cmd->session_num = 0; 2726 cmd->softc = sc; 2727 2728 s = splnet(); 2729 err = hifn_compress_enter(sc, cmd); 2730 splx(s); 2731 2732 if (err != 0) 2733 goto fail; 2734 return (0); 2735 2736 fail: 2737 if (cmd->dst_map != NULL) { 2738 if (cmd->dst_map->dm_nsegs > 0) 2739 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2740 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2741 } 2742 if (cmd->src_map != NULL) { 2743 if (cmd->src_map->dm_nsegs > 0) 2744 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2745 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2746 } 2747 free(cmd, M_DEVBUF); 2748 if (err == EINVAL) 2749 hifnstats.hst_invalid++; 2750 else 2751 hifnstats.hst_nomem++; 2752 crp->crp_etype = err; 2753 crypto_done(crp); 2754 return (0); 2755 } 2756 2757 /* 2758 * must be called at splnet() 2759 */ 2760 static int 2761 hifn_compress_enter(struct hifn_softc *sc, struct hifn_command *cmd) 2762 { 2763 struct hifn_dma *dma = sc->sc_dma; 2764 int cmdi, resi; 2765 u_int32_t cmdlen; 2766 2767 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE || 2768 (dma->resu + 1) > HIFN_D_CMD_RSIZE) 2769 return (ENOMEM); 2770 2771 if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE || 2772 (dma->dstu + cmd->dst_map->dm_nsegs) > HIFN_D_DST_RSIZE) 2773 return (ENOMEM); 2774 2775 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 2776 dma->cmdi = 0; 2777 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 2778 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 2779 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 2780 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2781 } 2782 cmdi = dma->cmdi++; 2783 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]); 2784 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE); 2785 2786 /* .p for command/result already set */ 2787 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST | 2788 HIFN_D_MASKDONEIRQ); 2789 HIFN_CMDR_SYNC(sc, cmdi, 2790 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2791 dma->cmdu++; 2792 if (sc->sc_c_busy == 0) { 2793 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); 2794 sc->sc_c_busy = 1; 2795 SET_LED(sc, HIFN_MIPSRST_LED0); 2796 } 2797 2798 /* 2799 * We don't worry about missing an interrupt (which a "command wait" 2800 * interrupt salvages us from), unless there is more than one command 2801 * in the queue. 2802 */ 2803 if (dma->cmdu > 1) { 2804 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT; 2805 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 2806 } 2807 2808 hifnstats.hst_ipackets++; 2809 hifnstats.hst_ibytes += cmd->src_map->dm_mapsize; 2810 2811 hifn_dmamap_load_src(sc, cmd); 2812 if (sc->sc_s_busy == 0) { 2813 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA); 2814 sc->sc_s_busy = 1; 2815 SET_LED(sc, HIFN_MIPSRST_LED1); 2816 } 2817 2818 /* 2819 * Unlike other descriptors, we don't mask done interrupt from 2820 * result descriptor. 2821 */ 2822 if (dma->resi == HIFN_D_RES_RSIZE) { 2823 dma->resi = 0; 2824 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 2825 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 2826 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 2827 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2828 } 2829 resi = dma->resi++; 2830 dma->hifn_commands[resi] = cmd; 2831 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD); 2832 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | 2833 HIFN_D_VALID | HIFN_D_LAST); 2834 HIFN_RESR_SYNC(sc, resi, 2835 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2836 dma->resu++; 2837 if (sc->sc_r_busy == 0) { 2838 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA); 2839 sc->sc_r_busy = 1; 2840 SET_LED(sc, HIFN_MIPSRST_LED2); 2841 } 2842 2843 if (cmd->sloplen) 2844 cmd->slopidx = resi; 2845 2846 hifn_dmamap_load_dst(sc, cmd); 2847 2848 if (sc->sc_d_busy == 0) { 2849 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA); 2850 sc->sc_d_busy = 1; 2851 } 2852 sc->sc_active = 5; 2853 cmd->cmd_callback = hifn_callback_comp; 2854 return (0); 2855 } 2856 2857 static void 2858 hifn_callback_comp(struct hifn_softc *sc, struct hifn_command *cmd, 2859 u_int8_t *resbuf) 2860 { 2861 struct hifn_base_result baseres; 2862 struct cryptop *crp = cmd->crp; 2863 struct hifn_dma *dma = sc->sc_dma; 2864 struct mbuf *m; 2865 int err = 0, i, u; 2866 u_int32_t olen; 2867 bus_size_t dstsize; 2868 2869 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2870 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2871 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2872 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 2873 2874 dstsize = cmd->dst_map->dm_mapsize; 2875 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2876 2877 memcpy(&baseres, resbuf, sizeof(struct hifn_base_result)); 2878 2879 i = dma->dstk; u = dma->dstu; 2880 while (u != 0) { 2881 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2882 offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc), 2883 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2884 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) { 2885 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2886 offsetof(struct hifn_dma, dstr[i]), 2887 sizeof(struct hifn_desc), 2888 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2889 break; 2890 } 2891 if (++i == (HIFN_D_DST_RSIZE + 1)) 2892 i = 0; 2893 else 2894 u--; 2895 } 2896 dma->dstk = i; dma->dstu = u; 2897 2898 if (baseres.flags & htole16(HIFN_BASE_RES_DSTOVERRUN)) { 2899 bus_size_t xlen; 2900 2901 xlen = dstsize; 2902 2903 m_freem(cmd->dstu.dst_m); 2904 2905 if (xlen == HIFN_MAX_DMALEN) { 2906 /* We've done all we can. */ 2907 err = E2BIG; 2908 goto out; 2909 } 2910 2911 xlen += MCLBYTES; 2912 2913 if (xlen > HIFN_MAX_DMALEN) 2914 xlen = HIFN_MAX_DMALEN; 2915 2916 cmd->dstu.dst_m = hifn_mkmbuf_chain(xlen, 2917 cmd->srcu.src_m); 2918 if (cmd->dstu.dst_m == NULL) { 2919 err = ENOMEM; 2920 goto out; 2921 } 2922 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, 2923 cmd->dstu.dst_m, BUS_DMA_NOWAIT)) { 2924 err = ENOMEM; 2925 goto out; 2926 } 2927 2928 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2929 0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2930 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2931 0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2932 2933 /* already at splnet... */ 2934 err = hifn_compress_enter(sc, cmd); 2935 if (err != 0) 2936 goto out; 2937 return; 2938 } 2939 2940 olen = dstsize - (letoh16(baseres.dst_cnt) | 2941 (((letoh16(baseres.session) & HIFN_BASE_RES_DSTLEN_M) >> 2942 HIFN_BASE_RES_DSTLEN_S) << 16)); 2943 2944 crp->crp_olen = olen - cmd->compcrd->crd_skip; 2945 2946 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2947 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2948 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2949 2950 m = cmd->dstu.dst_m; 2951 if (m->m_flags & M_PKTHDR) 2952 m->m_pkthdr.len = olen; 2953 crp->crp_buf = (void *)m; 2954 for (; m != NULL; m = m->m_next) { 2955 if (olen >= m->m_len) 2956 olen -= m->m_len; 2957 else { 2958 m->m_len = olen; 2959 olen = 0; 2960 } 2961 } 2962 2963 m_freem(cmd->srcu.src_m); 2964 free(cmd, M_DEVBUF); 2965 crp->crp_etype = 0; 2966 crypto_done(crp); 2967 return; 2968 2969 out: 2970 if (cmd->dst_map != NULL) { 2971 if (cmd->src_map->dm_nsegs != 0) 2972 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2973 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2974 } 2975 if (cmd->src_map != NULL) { 2976 if (cmd->src_map->dm_nsegs != 0) 2977 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2978 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2979 } 2980 if (cmd->dstu.dst_m != NULL) 2981 m_freem(cmd->dstu.dst_m); 2982 free(cmd, M_DEVBUF); 2983 crp->crp_etype = err; 2984 crypto_done(crp); 2985 } 2986 2987 static struct mbuf * 2988 hifn_mkmbuf_chain(int totlen, struct mbuf *mtemplate) 2989 { 2990 int len; 2991 struct mbuf *m, *m0, *mlast; 2992 2993 if (mtemplate->m_flags & M_PKTHDR) { 2994 len = MHLEN; 2995 MGETHDR(m0, M_DONTWAIT, MT_DATA); 2996 } else { 2997 len = MLEN; 2998 MGET(m0, M_DONTWAIT, MT_DATA); 2999 } 3000 if (m0 == NULL) 3001 return (NULL); 3002 if (len == MHLEN) 3003 M_DUP_PKTHDR(m0, mtemplate); 3004 MCLGET(m0, M_DONTWAIT); 3005 if (!(m0->m_flags & M_EXT)) 3006 m_freem(m0); 3007 len = MCLBYTES; 3008 3009 totlen -= len; 3010 m0->m_pkthdr.len = m0->m_len = len; 3011 mlast = m0; 3012 3013 while (totlen > 0) { 3014 MGET(m, M_DONTWAIT, MT_DATA); 3015 if (m == NULL) { 3016 m_freem(m0); 3017 return (NULL); 3018 } 3019 MCLGET(m, M_DONTWAIT); 3020 if (!(m->m_flags & M_EXT)) { 3021 m_freem(m0); 3022 return (NULL); 3023 } 3024 len = MCLBYTES; 3025 m->m_len = len; 3026 if (m0->m_flags & M_PKTHDR) 3027 m0->m_pkthdr.len += len; 3028 totlen -= len; 3029 3030 mlast->m_next = m; 3031 mlast = m; 3032 } 3033 3034 return (m0); 3035 } 3036 #endif /* HAVE_CRYPTO_LZS */ 3037 3038 static void 3039 hifn_write_4(struct hifn_softc *sc, int reggrp, bus_size_t reg, u_int32_t val) 3040 { 3041 /* 3042 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0 3043 * and Group 1 registers; avoid conditions that could create 3044 * burst writes by doing a read in between the writes. 3045 */ 3046 if (sc->sc_flags & HIFN_NO_BURSTWRITE) { 3047 if (sc->sc_waw_lastgroup == reggrp && 3048 sc->sc_waw_lastreg == reg - 4) { 3049 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID); 3050 } 3051 sc->sc_waw_lastgroup = reggrp; 3052 sc->sc_waw_lastreg = reg; 3053 } 3054 if (reggrp == 0) 3055 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val); 3056 else 3057 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val); 3058 3059 } 3060 3061 static u_int32_t 3062 hifn_read_4(struct hifn_softc *sc, int reggrp, bus_size_t reg) 3063 { 3064 if (sc->sc_flags & HIFN_NO_BURSTWRITE) { 3065 sc->sc_waw_lastgroup = -1; 3066 sc->sc_waw_lastreg = 1; 3067 } 3068 if (reggrp == 0) 3069 return (bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg)); 3070 return (bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg)); 3071 } 3072