xref: /netbsd-src/sys/dev/pci/hifn7751.c (revision 7788a0781fe6ff2cce37368b4578a7ade0850cb1)
1 /*	$NetBSD: hifn7751.c,v 1.52 2013/06/13 00:55:01 tls Exp $	*/
2 /*	$FreeBSD: hifn7751.c,v 1.5.2.7 2003/10/08 23:52:00 sam Exp $ */
3 /*	$OpenBSD: hifn7751.c,v 1.140 2003/08/01 17:55:54 deraadt Exp $	*/
4 
5 /*
6  * Invertex AEON / Hifn 7751 driver
7  * Copyright (c) 1999 Invertex Inc. All rights reserved.
8  * Copyright (c) 1999 Theo de Raadt
9  * Copyright (c) 2000-2001 Network Security Technologies, Inc.
10  *			http://www.netsec.net
11  * Copyright (c) 2003 Hifn Inc.
12  *
13  * This driver is based on a previous driver by Invertex, for which they
14  * requested:  Please send any comments, feedback, bug-fixes, or feature
15  * requests to software@invertex.com.
16  *
17  * Redistribution and use in source and binary forms, with or without
18  * modification, are permitted provided that the following conditions
19  * are met:
20  *
21  * 1. Redistributions of source code must retain the above copyright
22  *   notice, this list of conditions and the following disclaimer.
23  * 2. Redistributions in binary form must reproduce the above copyright
24  *   notice, this list of conditions and the following disclaimer in the
25  *   documentation and/or other materials provided with the distribution.
26  * 3. The name of the author may not be used to endorse or promote products
27  *   derived from this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
30  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
31  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
32  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
33  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
34  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
38  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  *
40  * Effort sponsored in part by the Defense Advanced Research Projects
41  * Agency (DARPA) and Air Force Research Laboratory, Air Force
42  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
43  *
44  */
45 
46 /*
47  * Driver for various  Hifn pre-HIPP encryption processors.
48  */
49 
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: hifn7751.c,v 1.52 2013/06/13 00:55:01 tls Exp $");
52 
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/mutex.h>
56 #include <sys/proc.h>
57 #include <sys/errno.h>
58 #include <sys/malloc.h>
59 #include <sys/kernel.h>
60 #include <sys/mbuf.h>
61 #include <sys/device.h>
62 
63 #ifdef __OpenBSD__
64 #include <crypto/crypto.h>
65 #include <dev/rndvar.h>
66 #else
67 #include <opencrypto/cryptodev.h>
68 #include <sys/cprng.h>
69 #include <sys/rnd.h>
70 #include <sys/sha1.h>
71 #endif
72 
73 #include <dev/pci/pcireg.h>
74 #include <dev/pci/pcivar.h>
75 #include <dev/pci/pcidevs.h>
76 
77 #include <dev/pci/hifn7751reg.h>
78 #include <dev/pci/hifn7751var.h>
79 
80 #undef HIFN_DEBUG
81 
82 #ifdef __NetBSD__
83 #define M_DUP_PKTHDR M_COPY_PKTHDR	/* XXX */
84 #endif
85 
86 #ifdef HIFN_DEBUG
87 extern int hifn_debug;		/* patchable */
88 int hifn_debug = 1;
89 #endif
90 
91 #ifdef __OpenBSD__
92 #define HAVE_CRYPTO_LZS		/* OpenBSD OCF supports CRYPTO_COMP_LZS */
93 #endif
94 
95 /*
96  * Prototypes and count for the pci_device structure
97  */
98 #ifdef __OpenBSD__
99 static int hifn_probe((struct device *, void *, void *);
100 #else
101 static int hifn_probe(device_t, cfdata_t, void *);
102 #endif
103 static void hifn_attach(device_t, device_t, void *);
104 
105 CFATTACH_DECL_NEW(hifn, sizeof(struct hifn_softc),
106     hifn_probe, hifn_attach, NULL, NULL);
107 
108 #ifdef __OpenBSD__
109 struct cfdriver hifn_cd = {
110 	0, "hifn", DV_DULL
111 };
112 #endif
113 
114 static void	hifn_reset_board(struct hifn_softc *, int);
115 static void	hifn_reset_puc(struct hifn_softc *);
116 static void	hifn_puc_wait(struct hifn_softc *);
117 static const char *hifn_enable_crypto(struct hifn_softc *, pcireg_t);
118 static void	hifn_set_retry(struct hifn_softc *);
119 static void	hifn_init_dma(struct hifn_softc *);
120 static void	hifn_init_pci_registers(struct hifn_softc *);
121 static int	hifn_sramsize(struct hifn_softc *);
122 static int	hifn_dramsize(struct hifn_softc *);
123 static int	hifn_ramtype(struct hifn_softc *);
124 static void	hifn_sessions(struct hifn_softc *);
125 static int	hifn_intr(void *);
126 static u_int	hifn_write_command(struct hifn_command *, u_int8_t *);
127 static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
128 static int	hifn_newsession(void*, u_int32_t *, struct cryptoini *);
129 static int	hifn_freesession(void*, u_int64_t);
130 static int	hifn_process(void*, struct cryptop *, int);
131 static void	hifn_callback(struct hifn_softc *, struct hifn_command *,
132 			      u_int8_t *);
133 static int	hifn_crypto(struct hifn_softc *, struct hifn_command *,
134 			    struct cryptop*, int);
135 static int	hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
136 static int	hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
137 static int	hifn_dmamap_aligned(bus_dmamap_t);
138 static int	hifn_dmamap_load_src(struct hifn_softc *,
139 				     struct hifn_command *);
140 static int	hifn_dmamap_load_dst(struct hifn_softc *,
141 				     struct hifn_command *);
142 static int	hifn_init_pubrng(struct hifn_softc *);
143 static void	hifn_rng(void *);
144 static void	hifn_rng_locked(void *);
145 static void	hifn_tick(void *);
146 static void	hifn_abort(struct hifn_softc *);
147 static void	hifn_alloc_slot(struct hifn_softc *, int *, int *, int *,
148 				int *);
149 static void	hifn_write_4(struct hifn_softc *, int, bus_size_t, u_int32_t);
150 static u_int32_t hifn_read_4(struct hifn_softc *, int, bus_size_t);
151 #ifdef	HAVE_CRYPTO_LZS
152 static int	hifn_compression(struct hifn_softc *, struct cryptop *,
153 				 struct hifn_command *);
154 static struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *);
155 static int	hifn_compress_enter(struct hifn_softc *, struct hifn_command *);
156 static void	hifn_callback_comp(struct hifn_softc *, struct hifn_command *,
157 				   u_int8_t *);
158 #endif	/* HAVE_CRYPTO_LZS */
159 
160 struct hifn_stats hifnstats;
161 
162 static const struct hifn_product {
163 	pci_vendor_id_t		hifn_vendor;
164 	pci_product_id_t	hifn_product;
165 	int			hifn_flags;
166 	const char		*hifn_name;
167 } hifn_products[] = {
168 	{ PCI_VENDOR_INVERTEX,	PCI_PRODUCT_INVERTEX_AEON,
169 	  0,
170 	  "Invertex AEON",
171 	},
172 
173 	{ PCI_VENDOR_HIFN,	PCI_PRODUCT_HIFN_7751,
174 	  0,
175 	  "Hifn 7751",
176 	},
177 	{ PCI_VENDOR_NETSEC,	PCI_PRODUCT_NETSEC_7751,
178 	  0,
179 	  "Hifn 7751 (NetSec)"
180 	},
181 
182 	{ PCI_VENDOR_HIFN,	PCI_PRODUCT_HIFN_7811,
183 	  HIFN_IS_7811 | HIFN_HAS_RNG | HIFN_HAS_LEDS | HIFN_NO_BURSTWRITE,
184 	  "Hifn 7811",
185 	},
186 
187 	{ PCI_VENDOR_HIFN,	PCI_PRODUCT_HIFN_7951,
188 	  HIFN_HAS_RNG | HIFN_HAS_PUBLIC,
189 	  "Hifn 7951",
190 	},
191 
192 	{ PCI_VENDOR_HIFN,	PCI_PRODUCT_HIFN_7955,
193 	  HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
194 	  "Hifn 7955",
195 	},
196 
197 	{ PCI_VENDOR_HIFN,	PCI_PRODUCT_HIFN_7956,
198 	  HIFN_HAS_RNG | HIFN_HAS_PUBLIC | HIFN_IS_7956 | HIFN_HAS_AES,
199 	  "Hifn 7956",
200 	},
201 
202 
203 	{ 0,			0,
204 	  0,
205 	  NULL
206 	}
207 };
208 
209 static const struct hifn_product *
210 hifn_lookup(const struct pci_attach_args *pa)
211 {
212 	const struct hifn_product *hp;
213 
214 	for (hp = hifn_products; hp->hifn_name != NULL; hp++) {
215 		if (PCI_VENDOR(pa->pa_id) == hp->hifn_vendor &&
216 		    PCI_PRODUCT(pa->pa_id) == hp->hifn_product)
217 			return (hp);
218 	}
219 	return (NULL);
220 }
221 
222 static int
223 hifn_probe(device_t parent, cfdata_t match, void *aux)
224 {
225 	struct pci_attach_args *pa = aux;
226 
227 	if (hifn_lookup(pa) != NULL)
228 		return 1;
229 
230 	return 0;
231 }
232 
233 static void
234 hifn_attach(device_t parent, device_t self, void *aux)
235 {
236 	struct hifn_softc *sc = device_private(self);
237 	struct pci_attach_args *pa = aux;
238 	const struct hifn_product *hp;
239 	pci_chipset_tag_t pc = pa->pa_pc;
240 	pci_intr_handle_t ih;
241 	const char *intrstr = NULL;
242 	const char *hifncap;
243 	char rbase;
244 	bus_size_t iosize0, iosize1;
245 	u_int32_t cmd;
246 	u_int16_t ena;
247 	bus_dma_segment_t seg;
248 	bus_dmamap_t dmamap;
249 	int rseg;
250 	void *kva;
251 
252 	hp = hifn_lookup(pa);
253 	if (hp == NULL) {
254 		printf("\n");
255 		panic("hifn_attach: impossible");
256 	}
257 
258 	pci_aprint_devinfo_fancy(pa, "Crypto processor", hp->hifn_name, 1);
259 
260 	sc->sc_dv = self;
261 	sc->sc_pci_pc = pa->pa_pc;
262 	sc->sc_pci_tag = pa->pa_tag;
263 
264 	sc->sc_flags = hp->hifn_flags;
265 
266 	cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
267 	cmd |= PCI_COMMAND_MASTER_ENABLE;
268 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd);
269 
270 	if (pci_mapreg_map(pa, HIFN_BAR0, PCI_MAPREG_TYPE_MEM, 0,
271 	    &sc->sc_st0, &sc->sc_sh0, NULL, &iosize0)) {
272 		aprint_error_dev(sc->sc_dv, "can't map mem space %d\n", 0);
273 		return;
274 	}
275 
276 	if (pci_mapreg_map(pa, HIFN_BAR1, PCI_MAPREG_TYPE_MEM, 0,
277 	    &sc->sc_st1, &sc->sc_sh1, NULL, &iosize1)) {
278 		aprint_error_dev(sc->sc_dv, "can't find mem space %d\n", 1);
279 		goto fail_io0;
280 	}
281 
282 	hifn_set_retry(sc);
283 
284 	if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
285 		sc->sc_waw_lastgroup = -1;
286 		sc->sc_waw_lastreg = 1;
287 	}
288 
289 	sc->sc_dmat = pa->pa_dmat;
290 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(*sc->sc_dma), PAGE_SIZE, 0,
291 	    &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
292 		aprint_error_dev(sc->sc_dv, "can't alloc DMA buffer\n");
293 		goto fail_io1;
294         }
295 	if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(*sc->sc_dma), &kva,
296 	    BUS_DMA_NOWAIT)) {
297 		aprint_error_dev(sc->sc_dv, "can't map DMA buffers (%lu bytes)\n",
298 		    (u_long)sizeof(*sc->sc_dma));
299 		bus_dmamem_free(sc->sc_dmat, &seg, rseg);
300 		goto fail_io1;
301 	}
302 	if (bus_dmamap_create(sc->sc_dmat, sizeof(*sc->sc_dma), 1,
303 	    sizeof(*sc->sc_dma), 0, BUS_DMA_NOWAIT, &dmamap)) {
304 		aprint_error_dev(sc->sc_dv, "can't create DMA map\n");
305 		bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
306 		bus_dmamem_free(sc->sc_dmat, &seg, rseg);
307 		goto fail_io1;
308 	}
309 	if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, sizeof(*sc->sc_dma),
310 	    NULL, BUS_DMA_NOWAIT)) {
311 		aprint_error_dev(sc->sc_dv, "can't load DMA map\n");
312 		bus_dmamap_destroy(sc->sc_dmat, dmamap);
313 		bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
314 		bus_dmamem_free(sc->sc_dmat, &seg, rseg);
315 		goto fail_io1;
316 	}
317 	sc->sc_dmamap = dmamap;
318 	sc->sc_dma = (struct hifn_dma *)kva;
319 	memset(sc->sc_dma, 0, sizeof(*sc->sc_dma));
320 
321 	hifn_reset_board(sc, 0);
322 
323 	if ((hifncap = hifn_enable_crypto(sc, pa->pa_id)) == NULL) {
324 		aprint_error_dev(sc->sc_dv, "crypto enabling failed\n");
325 		goto fail_mem;
326 	}
327 	hifn_reset_puc(sc);
328 
329 	hifn_init_dma(sc);
330 	hifn_init_pci_registers(sc);
331 
332 	/* XXX can't dynamically determine ram type for 795x; force dram */
333 	if (sc->sc_flags & HIFN_IS_7956)
334 		sc->sc_drammodel = 1;
335 	else if (hifn_ramtype(sc))
336 		goto fail_mem;
337 
338 	if (sc->sc_drammodel == 0)
339 		hifn_sramsize(sc);
340 	else
341 		hifn_dramsize(sc);
342 
343 	/*
344 	 * Workaround for NetSec 7751 rev A: half ram size because two
345 	 * of the address lines were left floating
346 	 */
347 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETSEC &&
348 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETSEC_7751 &&
349 	    PCI_REVISION(pa->pa_class) == 0x61)
350 		sc->sc_ramsize >>= 1;
351 
352 	if (pci_intr_map(pa, &ih)) {
353 		aprint_error_dev(sc->sc_dv, "couldn't map interrupt\n");
354 		goto fail_mem;
355 	}
356 	intrstr = pci_intr_string(pc, ih);
357 #ifdef	__OpenBSD__
358 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc,
359 	    device_xname(self));
360 #else
361 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc);
362 #endif
363 	if (sc->sc_ih == NULL) {
364 		aprint_error_dev(sc->sc_dv, "couldn't establish interrupt\n");
365 		if (intrstr != NULL)
366 			aprint_error(" at %s", intrstr);
367 		aprint_error("\n");
368 		goto fail_mem;
369 	}
370 
371 	hifn_sessions(sc);
372 
373 	rseg = sc->sc_ramsize / 1024;
374 	rbase = 'K';
375 	if (sc->sc_ramsize >= (1024 * 1024)) {
376 		rbase = 'M';
377 		rseg /= 1024;
378 	}
379 	aprint_normal_dev(sc->sc_dv, "%s, %d%cB %cRAM, interrupting at %s\n",
380 	    hifncap, rseg, rbase,
381 	    sc->sc_drammodel ? 'D' : 'S', intrstr);
382 
383 	sc->sc_cid = crypto_get_driverid(0);
384 	if (sc->sc_cid < 0) {
385 		aprint_error_dev(sc->sc_dv, "couldn't get crypto driver id\n");
386 		goto fail_intr;
387 	}
388 
389 	WRITE_REG_0(sc, HIFN_0_PUCNFG,
390 	    READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
391 	ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
392 
393 	switch (ena) {
394 	case HIFN_PUSTAT_ENA_2:
395 		crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0,
396 		    hifn_newsession, hifn_freesession, hifn_process, sc);
397 		crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0,
398 		    hifn_newsession, hifn_freesession, hifn_process, sc);
399 		if (sc->sc_flags & HIFN_HAS_AES)
400 			crypto_register(sc->sc_cid, CRYPTO_AES_CBC,  0, 0,
401 				hifn_newsession, hifn_freesession,
402 				hifn_process, sc);
403 		/*FALLTHROUGH*/
404 	case HIFN_PUSTAT_ENA_1:
405 		crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0,
406 		    hifn_newsession, hifn_freesession, hifn_process, sc);
407 		crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0,
408 		    hifn_newsession, hifn_freesession, hifn_process, sc);
409 		crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96, 0, 0,
410 		    hifn_newsession, hifn_freesession, hifn_process, sc);
411 		crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96, 0, 0,
412 		    hifn_newsession, hifn_freesession, hifn_process, sc);
413 		crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0,
414 		    hifn_newsession, hifn_freesession, hifn_process, sc);
415 		break;
416 	}
417 
418 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 0,
419 	    sc->sc_dmamap->dm_mapsize,
420 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
421 
422 	if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) {
423 		hifn_init_pubrng(sc);
424 		sc->sc_rng_need = RND_POOLBITS / NBBY;
425 	}
426 
427 	mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_VM);
428 
429 #ifdef	__OpenBSD__
430 	timeout_set(&sc->sc_tickto, hifn_tick, sc);
431 	timeout_add(&sc->sc_tickto, hz);
432 #else
433 	callout_init(&sc->sc_tickto, CALLOUT_MPSAFE);
434 	callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
435 #endif
436 	return;
437 
438 fail_intr:
439 	pci_intr_disestablish(pc, sc->sc_ih);
440 fail_mem:
441 	bus_dmamap_unload(sc->sc_dmat, dmamap);
442 	bus_dmamap_destroy(sc->sc_dmat, dmamap);
443 	bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
444 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
445 
446 	/* Turn off DMA polling */
447 	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
448 	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
449 
450 fail_io1:
451 	bus_space_unmap(sc->sc_st1, sc->sc_sh1, iosize1);
452 fail_io0:
453 	bus_space_unmap(sc->sc_st0, sc->sc_sh0, iosize0);
454 }
455 
456 static void
457 hifn_rng_get(size_t bytes, void *priv)
458 {
459 	struct hifn_softc *sc = priv;
460 
461 	mutex_enter(&sc->sc_mtx);
462 	sc->sc_rng_need = bytes;
463 
464 	hifn_rng_locked(sc);
465 	mutex_exit(&sc->sc_mtx);
466 }
467 
468 static int
469 hifn_init_pubrng(struct hifn_softc *sc)
470 {
471 	u_int32_t r;
472 	int i;
473 
474 	if ((sc->sc_flags & HIFN_IS_7811) == 0) {
475 		/* Reset 7951 public key/rng engine */
476 		WRITE_REG_1(sc, HIFN_1_PUB_RESET,
477 		    READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
478 
479 		for (i = 0; i < 100; i++) {
480 			DELAY(1000);
481 			if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
482 			    HIFN_PUBRST_RESET) == 0)
483 				break;
484 		}
485 
486 		if (i == 100) {
487 			printf("%s: public key init failed\n",
488 			    device_xname(sc->sc_dv));
489 			return (1);
490 		}
491 	}
492 
493 	/* Enable the rng, if available */
494 	if (sc->sc_flags & HIFN_HAS_RNG) {
495 		if (sc->sc_flags & HIFN_IS_7811) {
496 			r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
497 			if (r & HIFN_7811_RNGENA_ENA) {
498 				r &= ~HIFN_7811_RNGENA_ENA;
499 				WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
500 			}
501 			WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
502 			    HIFN_7811_RNGCFG_DEFL);
503 			r |= HIFN_7811_RNGENA_ENA;
504 			WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
505 		} else
506 			WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
507 			    READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
508 			    HIFN_RNGCFG_ENA);
509 
510 		/*
511 		 * The Hifn RNG documentation states that at their
512 		 * recommended "conservative" RNG config values,
513 		 * the RNG must warm up for 0.4s before providing
514 		 * data that meet their worst-case estimate of 0.06
515 		 * bits of random data per output register bit.
516 		 */
517 		DELAY(4000);
518 
519 #ifdef __NetBSD__
520 		rndsource_setcb(&sc->sc_rnd_source, hifn_rng_get, sc);
521 		/*
522 		 * XXX Careful!  The use of RND_FLAG_NO_ESTIMATE
523 		 * XXX here is unobvious: we later feed raw bits
524 		 * XXX into the "entropy pool" with rnd_add_data,
525 		 * XXX explicitly supplying an entropy estimate.
526 		 * XXX In this context, NO_ESTIMATE serves only
527 		 * XXX to prevent rnd_add_data from trying to
528 		 * XXX use the *time at which we added the data*
529 		 * XXX as entropy, which is not a good idea since
530 		 * XXX we add data periodically from a callout.
531 		 */
532 		rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dv),
533 				  RND_TYPE_RNG,
534 				  RND_FLAG_NO_ESTIMATE|RND_FLAG_HASCB);
535 #endif
536 
537 		if (hz >= 100)
538 			sc->sc_rnghz = hz / 100;
539 		else
540 			sc->sc_rnghz = 1;
541 #ifdef	__OpenBSD__
542 		timeout_set(&sc->sc_rngto, hifn_rng, sc);
543 #else	/* !__OpenBSD__ */
544 		callout_init(&sc->sc_rngto, CALLOUT_MPSAFE);
545 #endif	/* !__OpenBSD__ */
546 	}
547 
548 	/* Enable public key engine, if available */
549 	if (sc->sc_flags & HIFN_HAS_PUBLIC) {
550 		WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
551 		sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
552 		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
553 	}
554 
555 	/* Call directly into the RNG once to prime the pool. */
556 	hifn_rng(sc);   /* Sets callout/timeout at end */
557 
558 	return (0);
559 }
560 
561 static void
562 hifn_rng_locked(void *vsc)
563 {
564 	struct hifn_softc *sc = vsc;
565 #ifdef __NetBSD__
566 	uint32_t num[64];
567 #else
568 	uint32_t num[2];
569 #endif
570 	uint32_t sts;
571 	int i;
572 	size_t got, gotent;
573 
574 	if (sc->sc_rng_need < 1) {
575 		callout_stop(&sc->sc_rngto);
576 		return;
577 	}
578 
579 	if (sc->sc_flags & HIFN_IS_7811) {
580 		for (i = 0; i < 5; i++) {	/* XXX why 5? */
581 			sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
582 			if (sts & HIFN_7811_RNGSTS_UFL) {
583 				printf("%s: RNG underflow: disabling\n",
584 				    device_xname(sc->sc_dv));
585 				return;
586 			}
587 			if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
588 				break;
589 
590 			/*
591 			 * There are at least two words in the RNG FIFO
592 			 * at this point.
593 			 */
594 			num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
595 			num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
596 			got = 2 * sizeof(num[0]);
597 			gotent = (got * NBBY) / HIFN_RNG_BITSPER;
598 
599 #ifdef __NetBSD__
600 			rnd_add_data(&sc->sc_rnd_source, num, got, gotent);
601 			sc->sc_rng_need -= gotent;
602 #else
603 			/*
604 			 * XXX This is a really bad idea.
605 			 * XXX Hifn estimate as little as 0.06
606 			 * XXX actual bits of entropy per output
607 			 * XXX register bit.  How can we tell the
608 			 * XXX kernel RNG subsystem we're handing
609 			 * XXX it 64 "true" random bits, for any
610 			 * XXX sane value of "true"?
611 			 * XXX
612 			 * XXX The right thing to do here, if we
613 			 * XXX cannot supply an estimate ourselves,
614 			 * XXX would be to hash the bits locally.
615 			 */
616 			add_true_randomness(num[0]);
617 			add_true_randomness(num[1]);
618 #endif
619 
620 		}
621 	} else {
622 		int nwords = 0;
623 
624 		if (sc->sc_rng_need) {
625 			nwords = (sc->sc_rng_need * NBBY) / HIFN_RNG_BITSPER;
626 		}
627 
628 		if (nwords < 2) {
629 			nwords = 2;
630 		}
631 
632 		/*
633 		 * We must be *extremely* careful here.  The Hifn
634 		 * 795x differ from the published 6500 RNG design
635 		 * in more ways than the obvious lack of the output
636 		 * FIFO and LFSR control registers.  In fact, there
637 		 * is only one LFSR, instead of the 6500's two, and
638 		 * it's 32 bits, not 31.
639 		 *
640 		 * Further, a block diagram obtained from Hifn shows
641 		 * a very curious latching of this register: the LFSR
642 		 * rotates at a frequency of RNG_Clk / 8, but the
643 		 * RNG_Data register is latched at a frequency of
644 		 * RNG_Clk, which means that it is possible for
645 		 * consecutive reads of the RNG_Data register to read
646 		 * identical state from the LFSR.  The simplest
647 		 * workaround seems to be to read eight samples from
648 		 * the register for each one that we use.  Since each
649 		 * read must require at least one PCI cycle, and
650 		 * RNG_Clk is at least PCI_Clk, this is safe.
651 		 */
652 		for(i = 0 ; i < nwords * 8; i++)
653 		{
654 			volatile u_int32_t regtmp;
655 			regtmp = READ_REG_1(sc, HIFN_1_RNG_DATA);
656 			num[i / 8] = regtmp;
657 		}
658 
659 		got = nwords * sizeof(num[0]);
660 		gotent = (got * NBBY) / HIFN_RNG_BITSPER;
661 #ifdef __NetBSD__
662 		rnd_add_data(&sc->sc_rnd_source, num, got, gotent);
663 		sc->sc_rng_need -= gotent;
664 #else
665 		/* XXX a bad idea; see 7811 block above */
666 		add_true_randomness(num[0]);
667 #endif
668 	}
669 
670 #ifdef	__OpenBSD__
671 	timeout_add(&sc->sc_rngto, sc->sc_rnghz);
672 #else
673 	if (sc->sc_rng_need > 0) {
674 		callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
675 	}
676 #endif
677 }
678 
679 static void
680 hifn_rng(void *vsc)
681 {
682 	struct hifn_softc *sc = vsc;
683 
684 	mutex_spin_enter(&sc->sc_mtx);
685 	hifn_rng_locked(vsc);
686 	mutex_spin_exit(&sc->sc_mtx);
687 }
688 
689 static void
690 hifn_puc_wait(struct hifn_softc *sc)
691 {
692 	int i;
693 
694 	for (i = 5000; i > 0; i--) {
695 		DELAY(1);
696 		if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET))
697 			break;
698 	}
699 	if (!i)
700 		printf("%s: proc unit did not reset\n", device_xname(sc->sc_dv));
701 }
702 
703 /*
704  * Reset the processing unit.
705  */
706 static void
707 hifn_reset_puc(struct hifn_softc *sc)
708 {
709 	/* Reset processing unit */
710 	WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
711 	hifn_puc_wait(sc);
712 }
713 
714 static void
715 hifn_set_retry(struct hifn_softc *sc)
716 {
717 	u_int32_t r;
718 
719 	r = pci_conf_read(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT);
720 	r &= 0xffff0000;
721 	pci_conf_write(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT, r);
722 }
723 
724 /*
725  * Resets the board.  Values in the regesters are left as is
726  * from the reset (i.e. initial values are assigned elsewhere).
727  */
728 static void
729 hifn_reset_board(struct hifn_softc *sc, int full)
730 {
731 	u_int32_t reg;
732 
733 	/*
734 	 * Set polling in the DMA configuration register to zero.  0x7 avoids
735 	 * resetting the board and zeros out the other fields.
736 	 */
737 	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
738 	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
739 
740 	/*
741 	 * Now that polling has been disabled, we have to wait 1 ms
742 	 * before resetting the board.
743 	 */
744 	DELAY(1000);
745 
746 	/* Reset the DMA unit */
747 	if (full) {
748 		WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
749 		DELAY(1000);
750 	} else {
751 		WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
752 		    HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
753 		hifn_reset_puc(sc);
754 	}
755 
756 	memset(sc->sc_dma, 0, sizeof(*sc->sc_dma));
757 
758 	/* Bring dma unit out of reset */
759 	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
760 	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
761 
762 	hifn_puc_wait(sc);
763 
764 	hifn_set_retry(sc);
765 
766 	if (sc->sc_flags & HIFN_IS_7811) {
767 		for (reg = 0; reg < 1000; reg++) {
768 			if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
769 			    HIFN_MIPSRST_CRAMINIT)
770 				break;
771 			DELAY(1000);
772 		}
773 		if (reg == 1000)
774 			printf(": cram init timeout\n");
775 	}
776 }
777 
778 static u_int32_t
779 hifn_next_signature(u_int32_t a, u_int cnt)
780 {
781 	int i;
782 	u_int32_t v;
783 
784 	for (i = 0; i < cnt; i++) {
785 
786 		/* get the parity */
787 		v = a & 0x80080125;
788 		v ^= v >> 16;
789 		v ^= v >> 8;
790 		v ^= v >> 4;
791 		v ^= v >> 2;
792 		v ^= v >> 1;
793 
794 		a = (v & 1) ^ (a << 1);
795 	}
796 
797 	return a;
798 }
799 
800 static struct pci2id {
801 	u_short		pci_vendor;
802 	u_short		pci_prod;
803 	char		card_id[13];
804 } const pci2id[] = {
805 	{
806 		PCI_VENDOR_HIFN,
807 		PCI_PRODUCT_HIFN_7951,
808 		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
809 		  0x00, 0x00, 0x00, 0x00, 0x00 }
810 	}, {
811 		PCI_VENDOR_HIFN,
812 		PCI_PRODUCT_HIFN_7955,
813 		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
814 		  0x00, 0x00, 0x00, 0x00, 0x00 }
815 	}, {
816 		PCI_VENDOR_HIFN,
817 		PCI_PRODUCT_HIFN_7956,
818 		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
819 		  0x00, 0x00, 0x00, 0x00, 0x00 }
820 	}, {
821 		PCI_VENDOR_NETSEC,
822 		PCI_PRODUCT_NETSEC_7751,
823 		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
824 		  0x00, 0x00, 0x00, 0x00, 0x00 }
825 	}, {
826 		PCI_VENDOR_INVERTEX,
827 		PCI_PRODUCT_INVERTEX_AEON,
828 		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
829 		  0x00, 0x00, 0x00, 0x00, 0x00 }
830 	}, {
831 		PCI_VENDOR_HIFN,
832 		PCI_PRODUCT_HIFN_7811,
833 		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
834 		  0x00, 0x00, 0x00, 0x00, 0x00 }
835 	}, {
836 		/*
837 		 * Other vendors share this PCI ID as well, such as
838 		 * http://www.powercrypt.com, and obviously they also
839 		 * use the same key.
840 		 */
841 		PCI_VENDOR_HIFN,
842 		PCI_PRODUCT_HIFN_7751,
843 		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
844 		  0x00, 0x00, 0x00, 0x00, 0x00 }
845 	},
846 };
847 
848 /*
849  * Checks to see if crypto is already enabled.  If crypto isn't enable,
850  * "hifn_enable_crypto" is called to enable it.  The check is important,
851  * as enabling crypto twice will lock the board.
852  */
853 static const char *
854 hifn_enable_crypto(struct hifn_softc *sc, pcireg_t pciid)
855 {
856 	u_int32_t dmacfg, ramcfg, encl, addr, i;
857 	const char *offtbl = NULL;
858 
859 	for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
860 		if (pci2id[i].pci_vendor == PCI_VENDOR(pciid) &&
861 		    pci2id[i].pci_prod == PCI_PRODUCT(pciid)) {
862 			offtbl = pci2id[i].card_id;
863 			break;
864 		}
865 	}
866 
867 	if (offtbl == NULL) {
868 #ifdef HIFN_DEBUG
869 		aprint_debug_dev(sc->sc_dv, "Unknown card!\n");
870 #endif
871 		return (NULL);
872 	}
873 
874 	ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
875 	dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
876 
877 	/*
878 	 * The RAM config register's encrypt level bit needs to be set before
879 	 * every read performed on the encryption level register.
880 	 */
881 	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
882 
883 	encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
884 
885 	/*
886 	 * Make sure we don't re-unlock.  Two unlocks kills chip until the
887 	 * next reboot.
888 	 */
889 	if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
890 #ifdef HIFN_DEBUG
891 		aprint_debug_dev(sc->sc_dv, "Strong Crypto already enabled!\n");
892 #endif
893 		goto report;
894 	}
895 
896 	if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
897 #ifdef HIFN_DEBUG
898 		aprint_debug_dev(sc->sc_dv, "Unknown encryption level\n");
899 #endif
900 		return (NULL);
901 	}
902 
903 	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
904 	    HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
905 	DELAY(1000);
906 	addr = READ_REG_1(sc, HIFN_1_UNLOCK_SECRET1);
907 	DELAY(1000);
908 	WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, 0);
909 	DELAY(1000);
910 
911 	for (i = 0; i <= 12; i++) {
912 		addr = hifn_next_signature(addr, offtbl[i] + 0x101);
913 		WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, addr);
914 
915 		DELAY(1000);
916 	}
917 
918 	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
919 	encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
920 
921 #ifdef HIFN_DEBUG
922 	if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
923 		aprint_debug("Encryption engine is permanently locked until next system reset.");
924 	else
925 		aprint_debug("Encryption engine enabled successfully!");
926 #endif
927 
928 report:
929 	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
930 	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
931 
932 	switch (encl) {
933 	case HIFN_PUSTAT_ENA_0:
934 		return ("LZS-only (no encr/auth)");
935 
936 	case HIFN_PUSTAT_ENA_1:
937 		return ("DES");
938 
939 	case HIFN_PUSTAT_ENA_2:
940 		if (sc->sc_flags & HIFN_HAS_AES)
941 		    return ("3DES/AES");
942 		else
943 		    return ("3DES");
944 
945 	default:
946 		return ("disabled");
947 	}
948 	/* NOTREACHED */
949 }
950 
951 /*
952  * Give initial values to the registers listed in the "Register Space"
953  * section of the HIFN Software Development reference manual.
954  */
955 static void
956 hifn_init_pci_registers(struct hifn_softc *sc)
957 {
958 	/* write fixed values needed by the Initialization registers */
959 	WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
960 	WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
961 	WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
962 
963 	/* write all 4 ring address registers */
964 	WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
965 	    offsetof(struct hifn_dma, cmdr[0]));
966 	WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
967 	    offsetof(struct hifn_dma, srcr[0]));
968 	WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
969 	    offsetof(struct hifn_dma, dstr[0]));
970 	WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
971 	    offsetof(struct hifn_dma, resr[0]));
972 
973 	DELAY(2000);
974 
975 	/* write status register */
976 	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
977 	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
978 	    HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
979 	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
980 	    HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
981 	    HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
982 	    HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
983 	    HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
984 	    HIFN_DMACSR_S_WAIT |
985 	    HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
986 	    HIFN_DMACSR_C_WAIT |
987 	    HIFN_DMACSR_ENGINE |
988 	    ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
989 		HIFN_DMACSR_PUBDONE : 0) |
990 	    ((sc->sc_flags & HIFN_IS_7811) ?
991 		HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
992 
993 	sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
994 	sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
995 	    HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
996 	    HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
997 	    HIFN_DMAIER_ENGINE |
998 	    ((sc->sc_flags & HIFN_IS_7811) ?
999 		HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
1000 	sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1001 	WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1002 	CLR_LED(sc, HIFN_MIPSRST_LED0 | HIFN_MIPSRST_LED1 | HIFN_MIPSRST_LED2);
1003 
1004 	if (sc->sc_flags & HIFN_IS_7956) {
1005 		WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1006 		    HIFN_PUCNFG_TCALLPHASES |
1007 		    HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
1008 		WRITE_REG_1(sc, HIFN_1_PLL, HIFN_PLL_7956);
1009 	} else {
1010 		WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1011 		    HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1012 		    HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1013 		    (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
1014 	}
1015 
1016 	WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1017 	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1018 	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1019 	    ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1020 	    ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1021 }
1022 
1023 /*
1024  * The maximum number of sessions supported by the card
1025  * is dependent on the amount of context ram, which
1026  * encryption algorithms are enabled, and how compression
1027  * is configured.  This should be configured before this
1028  * routine is called.
1029  */
1030 static void
1031 hifn_sessions(struct hifn_softc *sc)
1032 {
1033 	u_int32_t pucnfg;
1034 	int ctxsize;
1035 
1036 	pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1037 
1038 	if (pucnfg & HIFN_PUCNFG_COMPSING) {
1039 		if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1040 			ctxsize = 128;
1041 		else
1042 			ctxsize = 512;
1043 		/*
1044 		 * 7955/7956 has internal context memory of 32K
1045 		 */
1046 		if (sc->sc_flags & HIFN_IS_7956)
1047 			sc->sc_maxses = 32768 / ctxsize;
1048 		else
1049 			sc->sc_maxses = 1 +
1050 			    ((sc->sc_ramsize - 32768) / ctxsize);
1051 	}
1052 	else
1053 		sc->sc_maxses = sc->sc_ramsize / 16384;
1054 
1055 	if (sc->sc_maxses > 2048)
1056 		sc->sc_maxses = 2048;
1057 }
1058 
1059 /*
1060  * Determine ram type (sram or dram).  Board should be just out of a reset
1061  * state when this is called.
1062  */
1063 static int
1064 hifn_ramtype(struct hifn_softc *sc)
1065 {
1066 	u_int8_t data[8], dataexpect[8];
1067 	int i;
1068 
1069 	for (i = 0; i < sizeof(data); i++)
1070 		data[i] = dataexpect[i] = 0x55;
1071 	if (hifn_writeramaddr(sc, 0, data))
1072 		return (-1);
1073 	if (hifn_readramaddr(sc, 0, data))
1074 		return (-1);
1075 	if (memcmp(data, dataexpect, sizeof(data)) != 0) {
1076 		sc->sc_drammodel = 1;
1077 		return (0);
1078 	}
1079 
1080 	for (i = 0; i < sizeof(data); i++)
1081 		data[i] = dataexpect[i] = 0xaa;
1082 	if (hifn_writeramaddr(sc, 0, data))
1083 		return (-1);
1084 	if (hifn_readramaddr(sc, 0, data))
1085 		return (-1);
1086 	if (memcmp(data, dataexpect, sizeof(data)) != 0) {
1087 		sc->sc_drammodel = 1;
1088 		return (0);
1089 	}
1090 
1091 	return (0);
1092 }
1093 
1094 #define	HIFN_SRAM_MAX		(32 << 20)
1095 #define	HIFN_SRAM_STEP_SIZE	16384
1096 #define	HIFN_SRAM_GRANULARITY	(HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1097 
1098 static int
1099 hifn_sramsize(struct hifn_softc *sc)
1100 {
1101 	u_int32_t a;
1102 	u_int8_t data[8];
1103 	u_int8_t dataexpect[sizeof(data)];
1104 	int32_t i;
1105 
1106 	for (i = 0; i < sizeof(data); i++)
1107 		data[i] = dataexpect[i] = i ^ 0x5a;
1108 
1109 	for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1110 		a = i * HIFN_SRAM_STEP_SIZE;
1111 		memcpy(data, &i, sizeof(i));
1112 		hifn_writeramaddr(sc, a, data);
1113 	}
1114 
1115 	for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1116 		a = i * HIFN_SRAM_STEP_SIZE;
1117 		memcpy(dataexpect, &i, sizeof(i));
1118 		if (hifn_readramaddr(sc, a, data) < 0)
1119 			return (0);
1120 		if (memcmp(data, dataexpect, sizeof(data)) != 0)
1121 			return (0);
1122 		sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1123 	}
1124 
1125 	return (0);
1126 }
1127 
1128 /*
1129  * XXX For dram boards, one should really try all of the
1130  * HIFN_PUCNFG_DSZ_*'s.  This just assumes that PUCNFG
1131  * is already set up correctly.
1132  */
1133 static int
1134 hifn_dramsize(struct hifn_softc *sc)
1135 {
1136 	u_int32_t cnfg;
1137 
1138 	if (sc->sc_flags & HIFN_IS_7956) {
1139 		/*
1140 		 * 7955/7956 have a fixed internal ram of only 32K.
1141 		 */
1142 		sc->sc_ramsize = 32768;
1143 	} else {
1144 		cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1145 		    HIFN_PUCNFG_DRAMMASK;
1146 		sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1147 	}
1148 	return (0);
1149 }
1150 
1151 static void
1152 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp,
1153     int *resp)
1154 {
1155 	struct hifn_dma *dma = sc->sc_dma;
1156 
1157 	if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1158 		dma->cmdi = 0;
1159 		dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1160 		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1161 		HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1162 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1163 	}
1164 	*cmdp = dma->cmdi++;
1165 	dma->cmdk = dma->cmdi;
1166 
1167 	if (dma->srci == HIFN_D_SRC_RSIZE) {
1168 		dma->srci = 0;
1169 		dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1170 		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1171 		HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1172 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1173 	}
1174 	*srcp = dma->srci++;
1175 	dma->srck = dma->srci;
1176 
1177 	if (dma->dsti == HIFN_D_DST_RSIZE) {
1178 		dma->dsti = 0;
1179 		dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1180 		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1181 		HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1182 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1183 	}
1184 	*dstp = dma->dsti++;
1185 	dma->dstk = dma->dsti;
1186 
1187 	if (dma->resi == HIFN_D_RES_RSIZE) {
1188 		dma->resi = 0;
1189 		dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1190 		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1191 		HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1192 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1193 	}
1194 	*resp = dma->resi++;
1195 	dma->resk = dma->resi;
1196 }
1197 
1198 static int
1199 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1200 {
1201 	struct hifn_dma *dma = sc->sc_dma;
1202 	struct hifn_base_command wc;
1203 	const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1204 	int r, cmdi, resi, srci, dsti;
1205 
1206 	wc.masks = htole16(3 << 13);
1207 	wc.session_num = htole16(addr >> 14);
1208 	wc.total_source_count = htole16(8);
1209 	wc.total_dest_count = htole16(addr & 0x3fff);
1210 
1211 	hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1212 
1213 	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1214 	    HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1215 	    HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1216 
1217 	/* build write command */
1218 	memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND);
1219 	*(struct hifn_base_command *)dma->command_bufs[cmdi] = wc;
1220 	memcpy(&dma->test_src, data, sizeof(dma->test_src));
1221 
1222 	dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1223 	    + offsetof(struct hifn_dma, test_src));
1224 	dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1225 	    + offsetof(struct hifn_dma, test_dst));
1226 
1227 	dma->cmdr[cmdi].l = htole32(16 | masks);
1228 	dma->srcr[srci].l = htole32(8 | masks);
1229 	dma->dstr[dsti].l = htole32(4 | masks);
1230 	dma->resr[resi].l = htole32(4 | masks);
1231 
1232 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1233 	    0, sc->sc_dmamap->dm_mapsize,
1234 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1235 
1236 	for (r = 10000; r >= 0; r--) {
1237 		DELAY(10);
1238 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1239 		    0, sc->sc_dmamap->dm_mapsize,
1240 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1241 		if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1242 			break;
1243 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1244 		    0, sc->sc_dmamap->dm_mapsize,
1245 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1246 	}
1247 	if (r == 0) {
1248 		printf("%s: writeramaddr -- "
1249 		    "result[%d](addr %d) still valid\n",
1250 		    device_xname(sc->sc_dv), resi, addr);
1251 		r = -1;
1252 		return (-1);
1253 	} else
1254 		r = 0;
1255 
1256 	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1257 	    HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1258 	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1259 
1260 	return (r);
1261 }
1262 
1263 static int
1264 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1265 {
1266 	struct hifn_dma *dma = sc->sc_dma;
1267 	struct hifn_base_command rc;
1268 	const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1269 	int r, cmdi, srci, dsti, resi;
1270 
1271 	rc.masks = htole16(2 << 13);
1272 	rc.session_num = htole16(addr >> 14);
1273 	rc.total_source_count = htole16(addr & 0x3fff);
1274 	rc.total_dest_count = htole16(8);
1275 
1276 	hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1277 
1278 	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1279 	    HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1280 	    HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1281 
1282 	memset(dma->command_bufs[cmdi], 0, HIFN_MAX_COMMAND);
1283 	*(struct hifn_base_command *)dma->command_bufs[cmdi] = rc;
1284 
1285 	dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1286 	    offsetof(struct hifn_dma, test_src));
1287 	dma->test_src = 0;
1288 	dma->dstr[dsti].p =  htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1289 	    offsetof(struct hifn_dma, test_dst));
1290 	dma->test_dst = 0;
1291 	dma->cmdr[cmdi].l = htole32(8 | masks);
1292 	dma->srcr[srci].l = htole32(8 | masks);
1293 	dma->dstr[dsti].l = htole32(8 | masks);
1294 	dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1295 
1296 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1297 	    0, sc->sc_dmamap->dm_mapsize,
1298 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1299 
1300 	for (r = 10000; r >= 0; r--) {
1301 		DELAY(10);
1302 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1303 		    0, sc->sc_dmamap->dm_mapsize,
1304 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1305 		if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1306 			break;
1307 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1308 		    0, sc->sc_dmamap->dm_mapsize,
1309 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1310 	}
1311 	if (r == 0) {
1312 		printf("%s: readramaddr -- "
1313 		    "result[%d](addr %d) still valid\n",
1314 		    device_xname(sc->sc_dv), resi, addr);
1315 		r = -1;
1316 	} else {
1317 		r = 0;
1318 		memcpy(data, &dma->test_dst, sizeof(dma->test_dst));
1319 	}
1320 
1321 	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1322 	    HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1323 	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1324 
1325 	return (r);
1326 }
1327 
1328 /*
1329  * Initialize the descriptor rings.
1330  */
1331 static void
1332 hifn_init_dma(struct hifn_softc *sc)
1333 {
1334 	struct hifn_dma *dma = sc->sc_dma;
1335 	int i;
1336 
1337 	hifn_set_retry(sc);
1338 
1339 	/* initialize static pointer values */
1340 	for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1341 		dma->cmdr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1342 		    offsetof(struct hifn_dma, command_bufs[i][0]));
1343 	for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1344 		dma->resr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1345 		    offsetof(struct hifn_dma, result_bufs[i][0]));
1346 
1347 	dma->cmdr[HIFN_D_CMD_RSIZE].p =
1348 	    htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1349 		offsetof(struct hifn_dma, cmdr[0]));
1350 	dma->srcr[HIFN_D_SRC_RSIZE].p =
1351 	    htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1352 		offsetof(struct hifn_dma, srcr[0]));
1353 	dma->dstr[HIFN_D_DST_RSIZE].p =
1354 	    htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1355 		offsetof(struct hifn_dma, dstr[0]));
1356 	dma->resr[HIFN_D_RES_RSIZE].p =
1357 	    htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1358 		offsetof(struct hifn_dma, resr[0]));
1359 
1360 	dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1361 	dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1362 	dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1363 }
1364 
1365 /*
1366  * Writes out the raw command buffer space.  Returns the
1367  * command buffer size.
1368  */
1369 static u_int
1370 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1371 {
1372 	u_int8_t *buf_pos;
1373 	struct hifn_base_command *base_cmd;
1374 	struct hifn_mac_command *mac_cmd;
1375 	struct hifn_crypt_command *cry_cmd;
1376 	struct hifn_comp_command *comp_cmd;
1377 	int using_mac, using_crypt, using_comp, len, ivlen;
1378 	u_int32_t dlen, slen;
1379 
1380 	buf_pos = buf;
1381 	using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1382 	using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1383 	using_comp = cmd->base_masks & HIFN_BASE_CMD_COMP;
1384 
1385 	base_cmd = (struct hifn_base_command *)buf_pos;
1386 	base_cmd->masks = htole16(cmd->base_masks);
1387 	slen = cmd->src_map->dm_mapsize;
1388 	if (cmd->sloplen)
1389 		dlen = cmd->dst_map->dm_mapsize - cmd->sloplen +
1390 		    sizeof(u_int32_t);
1391 	else
1392 		dlen = cmd->dst_map->dm_mapsize;
1393 	base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1394 	base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1395 	dlen >>= 16;
1396 	slen >>= 16;
1397 	base_cmd->session_num = htole16(cmd->session_num |
1398 	    ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1399 	    ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1400 	buf_pos += sizeof(struct hifn_base_command);
1401 
1402 	if (using_comp) {
1403 		comp_cmd = (struct hifn_comp_command *)buf_pos;
1404 		dlen = cmd->compcrd->crd_len;
1405 		comp_cmd->source_count = htole16(dlen & 0xffff);
1406 		dlen >>= 16;
1407 		comp_cmd->masks = htole16(cmd->comp_masks |
1408 		    ((dlen << HIFN_COMP_CMD_SRCLEN_S) & HIFN_COMP_CMD_SRCLEN_M));
1409 		comp_cmd->header_skip = htole16(cmd->compcrd->crd_skip);
1410 		comp_cmd->reserved = 0;
1411 		buf_pos += sizeof(struct hifn_comp_command);
1412 	}
1413 
1414 	if (using_mac) {
1415 		mac_cmd = (struct hifn_mac_command *)buf_pos;
1416 		dlen = cmd->maccrd->crd_len;
1417 		mac_cmd->source_count = htole16(dlen & 0xffff);
1418 		dlen >>= 16;
1419 		mac_cmd->masks = htole16(cmd->mac_masks |
1420 		    ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1421 		mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1422 		mac_cmd->reserved = 0;
1423 		buf_pos += sizeof(struct hifn_mac_command);
1424 	}
1425 
1426 	if (using_crypt) {
1427 		cry_cmd = (struct hifn_crypt_command *)buf_pos;
1428 		dlen = cmd->enccrd->crd_len;
1429 		cry_cmd->source_count = htole16(dlen & 0xffff);
1430 		dlen >>= 16;
1431 		cry_cmd->masks = htole16(cmd->cry_masks |
1432 		    ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1433 		cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1434 		cry_cmd->reserved = 0;
1435 		buf_pos += sizeof(struct hifn_crypt_command);
1436 	}
1437 
1438 	if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1439 		memcpy(buf_pos, cmd->mac, HIFN_MAC_KEY_LENGTH);
1440 		buf_pos += HIFN_MAC_KEY_LENGTH;
1441 	}
1442 
1443 	if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1444 		switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1445 		case HIFN_CRYPT_CMD_ALG_3DES:
1446 			memcpy(buf_pos, cmd->ck, HIFN_3DES_KEY_LENGTH);
1447 			buf_pos += HIFN_3DES_KEY_LENGTH;
1448 			break;
1449 		case HIFN_CRYPT_CMD_ALG_DES:
1450 			memcpy(buf_pos, cmd->ck, HIFN_DES_KEY_LENGTH);
1451 			buf_pos += HIFN_DES_KEY_LENGTH;
1452 			break;
1453 		case HIFN_CRYPT_CMD_ALG_RC4:
1454 			len = 256;
1455 			do {
1456 				int clen;
1457 
1458 				clen = MIN(cmd->cklen, len);
1459 				memcpy(buf_pos, cmd->ck, clen);
1460 				len -= clen;
1461 				buf_pos += clen;
1462 			} while (len > 0);
1463 			memset(buf_pos, 0, 4);
1464 			buf_pos += 4;
1465 			break;
1466 		case HIFN_CRYPT_CMD_ALG_AES:
1467 			/*
1468 			 * AES keys are variable 128, 192 and
1469 			 * 256 bits (16, 24 and 32 bytes).
1470 			 */
1471 			memcpy(buf_pos, cmd->ck, cmd->cklen);
1472 			buf_pos += cmd->cklen;
1473 			break;
1474 		}
1475 	}
1476 
1477 	if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1478 		switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1479 		case HIFN_CRYPT_CMD_ALG_AES:
1480 			ivlen = HIFN_AES_IV_LENGTH;
1481 			break;
1482 		default:
1483 			ivlen = HIFN_IV_LENGTH;
1484 			break;
1485 		}
1486 		memcpy(buf_pos, cmd->iv, ivlen);
1487 		buf_pos += ivlen;
1488 	}
1489 
1490 	if ((cmd->base_masks & (HIFN_BASE_CMD_MAC | HIFN_BASE_CMD_CRYPT |
1491 	    HIFN_BASE_CMD_COMP)) == 0) {
1492 		memset(buf_pos, 0, 8);
1493 		buf_pos += 8;
1494 	}
1495 
1496 	return (buf_pos - buf);
1497 }
1498 
1499 static int
1500 hifn_dmamap_aligned(bus_dmamap_t map)
1501 {
1502 	int i;
1503 
1504 	for (i = 0; i < map->dm_nsegs; i++) {
1505 		if (map->dm_segs[i].ds_addr & 3)
1506 			return (0);
1507 		if ((i != (map->dm_nsegs - 1)) &&
1508 		    (map->dm_segs[i].ds_len & 3))
1509 			return (0);
1510 	}
1511 	return (1);
1512 }
1513 
1514 static int
1515 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1516 {
1517 	struct hifn_dma *dma = sc->sc_dma;
1518 	bus_dmamap_t map = cmd->dst_map;
1519 	u_int32_t p, l;
1520 	int idx, used = 0, i;
1521 
1522 	idx = dma->dsti;
1523 	for (i = 0; i < map->dm_nsegs - 1; i++) {
1524 		dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1525 		dma->dstr[idx].l = htole32(HIFN_D_VALID |
1526 		    HIFN_D_MASKDONEIRQ | map->dm_segs[i].ds_len);
1527 		HIFN_DSTR_SYNC(sc, idx,
1528 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1529 		used++;
1530 
1531 		if (++idx == HIFN_D_DST_RSIZE) {
1532 			dma->dstr[idx].l = htole32(HIFN_D_VALID |
1533 			    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1534 			HIFN_DSTR_SYNC(sc, idx,
1535 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1536 			idx = 0;
1537 		}
1538 	}
1539 
1540 	if (cmd->sloplen == 0) {
1541 		p = map->dm_segs[i].ds_addr;
1542 		l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1543 		    map->dm_segs[i].ds_len;
1544 	} else {
1545 		p = sc->sc_dmamap->dm_segs[0].ds_addr +
1546 		    offsetof(struct hifn_dma, slop[cmd->slopidx]);
1547 		l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1548 		    sizeof(u_int32_t);
1549 
1550 		if ((map->dm_segs[i].ds_len - cmd->sloplen) != 0) {
1551 			dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1552 			dma->dstr[idx].l = htole32(HIFN_D_VALID |
1553 			    HIFN_D_MASKDONEIRQ |
1554 			    (map->dm_segs[i].ds_len - cmd->sloplen));
1555 			HIFN_DSTR_SYNC(sc, idx,
1556 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1557 			used++;
1558 
1559 			if (++idx == HIFN_D_DST_RSIZE) {
1560 				dma->dstr[idx].l = htole32(HIFN_D_VALID |
1561 				    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1562 				HIFN_DSTR_SYNC(sc, idx,
1563 				    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1564 				idx = 0;
1565 			}
1566 		}
1567 	}
1568 	dma->dstr[idx].p = htole32(p);
1569 	dma->dstr[idx].l = htole32(l);
1570 	HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1571 	used++;
1572 
1573 	if (++idx == HIFN_D_DST_RSIZE) {
1574 		dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1575 		    HIFN_D_MASKDONEIRQ);
1576 		HIFN_DSTR_SYNC(sc, idx,
1577 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1578 		idx = 0;
1579 	}
1580 
1581 	dma->dsti = idx;
1582 	dma->dstu += used;
1583 	return (idx);
1584 }
1585 
1586 static int
1587 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1588 {
1589 	struct hifn_dma *dma = sc->sc_dma;
1590 	bus_dmamap_t map = cmd->src_map;
1591 	int idx, i;
1592 	u_int32_t last = 0;
1593 
1594 	idx = dma->srci;
1595 	for (i = 0; i < map->dm_nsegs; i++) {
1596 		if (i == map->dm_nsegs - 1)
1597 			last = HIFN_D_LAST;
1598 
1599 		dma->srcr[idx].p = htole32(map->dm_segs[i].ds_addr);
1600 		dma->srcr[idx].l = htole32(map->dm_segs[i].ds_len |
1601 		    HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1602 		HIFN_SRCR_SYNC(sc, idx,
1603 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1604 
1605 		if (++idx == HIFN_D_SRC_RSIZE) {
1606 			dma->srcr[idx].l = htole32(HIFN_D_VALID |
1607 			    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1608 			HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1609 			    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1610 			idx = 0;
1611 		}
1612 	}
1613 	dma->srci = idx;
1614 	dma->srcu += map->dm_nsegs;
1615 	return (idx);
1616 }
1617 
1618 static int
1619 hifn_crypto(struct hifn_softc *sc, struct hifn_command *cmd,
1620     struct cryptop *crp, int hint)
1621 {
1622 	struct	hifn_dma *dma = sc->sc_dma;
1623 	u_int32_t cmdlen;
1624 	int	cmdi, resi, err = 0;
1625 
1626 	if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
1627 	    HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map))
1628 		return (ENOMEM);
1629 
1630 	if (crp->crp_flags & CRYPTO_F_IMBUF) {
1631 		if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1632 		    cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
1633 			err = ENOMEM;
1634 			goto err_srcmap1;
1635 		}
1636 	} else if (crp->crp_flags & CRYPTO_F_IOV) {
1637 		if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1638 		    cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
1639 			err = ENOMEM;
1640 			goto err_srcmap1;
1641 		}
1642 	} else {
1643 		err = EINVAL;
1644 		goto err_srcmap1;
1645 	}
1646 
1647 	if (hifn_dmamap_aligned(cmd->src_map)) {
1648 		cmd->sloplen = cmd->src_map->dm_mapsize & 3;
1649 		if (crp->crp_flags & CRYPTO_F_IOV)
1650 			cmd->dstu.dst_io = cmd->srcu.src_io;
1651 		else if (crp->crp_flags & CRYPTO_F_IMBUF)
1652 			cmd->dstu.dst_m = cmd->srcu.src_m;
1653 		cmd->dst_map = cmd->src_map;
1654 	} else {
1655 		if (crp->crp_flags & CRYPTO_F_IOV) {
1656 			err = EINVAL;
1657 			goto err_srcmap;
1658 		} else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1659 			int totlen, len;
1660 			struct mbuf *m, *m0, *mlast;
1661 
1662 			totlen = cmd->src_map->dm_mapsize;
1663 			if (cmd->srcu.src_m->m_flags & M_PKTHDR) {
1664 				len = MHLEN;
1665 				MGETHDR(m0, M_DONTWAIT, MT_DATA);
1666 			} else {
1667 				len = MLEN;
1668 				MGET(m0, M_DONTWAIT, MT_DATA);
1669 			}
1670 			if (m0 == NULL) {
1671 				err = ENOMEM;
1672 				goto err_srcmap;
1673 			}
1674 			if (len == MHLEN)
1675 				M_DUP_PKTHDR(m0, cmd->srcu.src_m);
1676 			if (totlen >= MINCLSIZE) {
1677 				MCLGET(m0, M_DONTWAIT);
1678 				if (m0->m_flags & M_EXT)
1679 					len = MCLBYTES;
1680 			}
1681 			totlen -= len;
1682 			m0->m_pkthdr.len = m0->m_len = len;
1683 			mlast = m0;
1684 
1685 			while (totlen > 0) {
1686 				MGET(m, M_DONTWAIT, MT_DATA);
1687 				if (m == NULL) {
1688 					err = ENOMEM;
1689 					m_freem(m0);
1690 					goto err_srcmap;
1691 				}
1692 				len = MLEN;
1693 				if (totlen >= MINCLSIZE) {
1694 					MCLGET(m, M_DONTWAIT);
1695 					if (m->m_flags & M_EXT)
1696 						len = MCLBYTES;
1697 				}
1698 
1699 				m->m_len = len;
1700 				if (m0->m_flags & M_PKTHDR)
1701 					m0->m_pkthdr.len += len;
1702 				totlen -= len;
1703 
1704 				mlast->m_next = m;
1705 				mlast = m;
1706 			}
1707 			cmd->dstu.dst_m = m0;
1708 		}
1709 	}
1710 
1711 	if (cmd->dst_map == NULL) {
1712 		if (bus_dmamap_create(sc->sc_dmat,
1713 		    HIFN_MAX_SEGLEN * MAX_SCATTER, MAX_SCATTER,
1714 		    HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1715 			err = ENOMEM;
1716 			goto err_srcmap;
1717 		}
1718 		if (crp->crp_flags & CRYPTO_F_IMBUF) {
1719 			if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1720 			    cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
1721 				err = ENOMEM;
1722 				goto err_dstmap1;
1723 			}
1724 		} else if (crp->crp_flags & CRYPTO_F_IOV) {
1725 			if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
1726 			    cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
1727 				err = ENOMEM;
1728 				goto err_dstmap1;
1729 			}
1730 		}
1731 	}
1732 
1733 #ifdef HIFN_DEBUG
1734 	if (hifn_debug)
1735 		printf("%s: Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1736 		    device_xname(sc->sc_dv),
1737 		    READ_REG_1(sc, HIFN_1_DMA_CSR),
1738 		    READ_REG_1(sc, HIFN_1_DMA_IER),
1739 		    dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1740 		    cmd->src_map->dm_nsegs, cmd->dst_map->dm_nsegs);
1741 #endif
1742 
1743 	if (cmd->src_map == cmd->dst_map)
1744 		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1745 		    0, cmd->src_map->dm_mapsize,
1746 		    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1747 	else {
1748 		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1749 		    0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1750 		bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1751 		    0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1752 	}
1753 
1754 	/*
1755 	 * need 1 cmd, and 1 res
1756 	 * need N src, and N dst
1757 	 */
1758 	if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1759 	    (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1760 		err = ENOMEM;
1761 		goto err_dstmap;
1762 	}
1763 	if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
1764 	    (dma->dstu + cmd->dst_map->dm_nsegs + 1) > HIFN_D_DST_RSIZE) {
1765 		err = ENOMEM;
1766 		goto err_dstmap;
1767 	}
1768 
1769 	if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1770 		dma->cmdi = 0;
1771 		dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1772 		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1773 		HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1774 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1775 	}
1776 	cmdi = dma->cmdi++;
1777 	cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
1778 	HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
1779 
1780 	/* .p for command/result already set */
1781 	dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
1782 	    HIFN_D_MASKDONEIRQ);
1783 	HIFN_CMDR_SYNC(sc, cmdi,
1784 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1785 	dma->cmdu++;
1786 	if (sc->sc_c_busy == 0) {
1787 		WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1788 		sc->sc_c_busy = 1;
1789 		SET_LED(sc, HIFN_MIPSRST_LED0);
1790 	}
1791 
1792 	/*
1793 	 * We don't worry about missing an interrupt (which a "command wait"
1794 	 * interrupt salvages us from), unless there is more than one command
1795 	 * in the queue.
1796 	 *
1797 	 * XXX We do seem to miss some interrupts.  So we always enable
1798 	 * XXX command wait.  From OpenBSD revision 1.149.
1799 	 *
1800 	 */
1801 #if 0
1802 	if (dma->cmdu > 1) {
1803 #endif
1804 		sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
1805 		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1806 #if 0
1807 	}
1808 #endif
1809 
1810 	hifnstats.hst_ipackets++;
1811 	hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
1812 
1813 	hifn_dmamap_load_src(sc, cmd);
1814 	if (sc->sc_s_busy == 0) {
1815 		WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1816 		sc->sc_s_busy = 1;
1817 		SET_LED(sc, HIFN_MIPSRST_LED1);
1818 	}
1819 
1820 	/*
1821 	 * Unlike other descriptors, we don't mask done interrupt from
1822 	 * result descriptor.
1823 	 */
1824 #ifdef HIFN_DEBUG
1825 	if (hifn_debug)
1826 		printf("load res\n");
1827 #endif
1828 	if (dma->resi == HIFN_D_RES_RSIZE) {
1829 		dma->resi = 0;
1830 		dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1831 		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1832 		HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1833 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1834 	}
1835 	resi = dma->resi++;
1836 	dma->hifn_commands[resi] = cmd;
1837 	HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
1838 	dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
1839 	    HIFN_D_VALID | HIFN_D_LAST);
1840 	HIFN_RESR_SYNC(sc, resi,
1841 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1842 	dma->resu++;
1843 	if (sc->sc_r_busy == 0) {
1844 		WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1845 		sc->sc_r_busy = 1;
1846 		SET_LED(sc, HIFN_MIPSRST_LED2);
1847 	}
1848 
1849 	if (cmd->sloplen)
1850 		cmd->slopidx = resi;
1851 
1852 	hifn_dmamap_load_dst(sc, cmd);
1853 
1854 	if (sc->sc_d_busy == 0) {
1855 		WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1856 		sc->sc_d_busy = 1;
1857 	}
1858 
1859 #ifdef HIFN_DEBUG
1860 	if (hifn_debug)
1861 		printf("%s: command: stat %8x ier %8x\n",
1862 		    device_xname(sc->sc_dv),
1863 		    READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER));
1864 #endif
1865 
1866 	sc->sc_active = 5;
1867 	return (err);		/* success */
1868 
1869 err_dstmap:
1870 	if (cmd->src_map != cmd->dst_map)
1871 		bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
1872 err_dstmap1:
1873 	if (cmd->src_map != cmd->dst_map)
1874 		bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
1875 err_srcmap:
1876 	if (crp->crp_flags & CRYPTO_F_IMBUF &&
1877 	    cmd->srcu.src_m != cmd->dstu.dst_m)
1878 		m_freem(cmd->dstu.dst_m);
1879 	bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
1880 err_srcmap1:
1881 	bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
1882 	return (err);
1883 }
1884 
1885 static void
1886 hifn_tick(void *vsc)
1887 {
1888 	struct hifn_softc *sc = vsc;
1889 
1890 	mutex_spin_enter(&sc->sc_mtx);
1891 	if (sc->sc_active == 0) {
1892 		struct hifn_dma *dma = sc->sc_dma;
1893 		u_int32_t r = 0;
1894 
1895 		if (dma->cmdu == 0 && sc->sc_c_busy) {
1896 			sc->sc_c_busy = 0;
1897 			r |= HIFN_DMACSR_C_CTRL_DIS;
1898 			CLR_LED(sc, HIFN_MIPSRST_LED0);
1899 		}
1900 		if (dma->srcu == 0 && sc->sc_s_busy) {
1901 			sc->sc_s_busy = 0;
1902 			r |= HIFN_DMACSR_S_CTRL_DIS;
1903 			CLR_LED(sc, HIFN_MIPSRST_LED1);
1904 		}
1905 		if (dma->dstu == 0 && sc->sc_d_busy) {
1906 			sc->sc_d_busy = 0;
1907 			r |= HIFN_DMACSR_D_CTRL_DIS;
1908 		}
1909 		if (dma->resu == 0 && sc->sc_r_busy) {
1910 			sc->sc_r_busy = 0;
1911 			r |= HIFN_DMACSR_R_CTRL_DIS;
1912 			CLR_LED(sc, HIFN_MIPSRST_LED2);
1913 		}
1914 		if (r)
1915 			WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
1916 	}
1917 	else
1918 		sc->sc_active--;
1919 #ifdef	__OpenBSD__
1920 	timeout_add(&sc->sc_tickto, hz);
1921 #else
1922 	callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
1923 #endif
1924 	mutex_spin_exit(&sc->sc_mtx);
1925 }
1926 
1927 static int
1928 hifn_intr(void *arg)
1929 {
1930 	struct hifn_softc *sc = arg;
1931 	struct hifn_dma *dma = sc->sc_dma;
1932 	u_int32_t dmacsr, restart;
1933 	int i, u;
1934 
1935 	dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
1936 
1937 #ifdef HIFN_DEBUG
1938 	if (hifn_debug)
1939 		printf("%s: irq: stat %08x ien %08x u %d/%d/%d/%d\n",
1940 		       device_xname(sc->sc_dv),
1941 		       dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER),
1942 		       dma->cmdu, dma->srcu, dma->dstu, dma->resu);
1943 #endif
1944 
1945 	mutex_spin_enter(&sc->sc_mtx);
1946 
1947 	/* Nothing in the DMA unit interrupted */
1948 	if ((dmacsr & sc->sc_dmaier) == 0) {
1949 		mutex_spin_exit(&sc->sc_mtx);
1950 		return (0);
1951 	}
1952 
1953 	WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
1954 
1955 	if (dmacsr & HIFN_DMACSR_ENGINE)
1956 		WRITE_REG_0(sc, HIFN_0_PUISR, READ_REG_0(sc, HIFN_0_PUISR));
1957 
1958 	if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
1959 	    (dmacsr & HIFN_DMACSR_PUBDONE))
1960 		WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
1961 		    READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
1962 
1963 	restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER);
1964 	if (restart)
1965 		printf("%s: overrun %x\n", device_xname(sc->sc_dv), dmacsr);
1966 
1967 	if (sc->sc_flags & HIFN_IS_7811) {
1968 		if (dmacsr & HIFN_DMACSR_ILLR)
1969 			printf("%s: illegal read\n", device_xname(sc->sc_dv));
1970 		if (dmacsr & HIFN_DMACSR_ILLW)
1971 			printf("%s: illegal write\n", device_xname(sc->sc_dv));
1972 	}
1973 
1974 	restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
1975 	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
1976 	if (restart) {
1977 		printf("%s: abort, resetting.\n", device_xname(sc->sc_dv));
1978 		hifnstats.hst_abort++;
1979 		hifn_abort(sc);
1980 		goto out;
1981 	}
1982 
1983 	if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->resu == 0)) {
1984 		/*
1985 		 * If no slots to process and we receive a "waiting on
1986 		 * command" interrupt, we disable the "waiting on command"
1987 		 * (by clearing it).
1988 		 */
1989 		sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1990 		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1991 	}
1992 
1993 	/* clear the rings */
1994 	i = dma->resk;
1995 	while (dma->resu != 0) {
1996 		HIFN_RESR_SYNC(sc, i,
1997 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1998 		if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
1999 			HIFN_RESR_SYNC(sc, i,
2000 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2001 			break;
2002 		}
2003 
2004 		if (i != HIFN_D_RES_RSIZE) {
2005 			struct hifn_command *cmd;
2006 
2007 			HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
2008 			cmd = dma->hifn_commands[i];
2009 			KASSERT(cmd != NULL
2010 				/*("hifn_intr: null command slot %u", i)*/);
2011 			dma->hifn_commands[i] = NULL;
2012 
2013 			hifn_callback(sc, cmd, dma->result_bufs[i]);
2014 			hifnstats.hst_opackets++;
2015 		}
2016 
2017 		if (++i == (HIFN_D_RES_RSIZE + 1))
2018 			i = 0;
2019 		else
2020 			dma->resu--;
2021 	}
2022 	dma->resk = i;
2023 
2024 	i = dma->srck; u = dma->srcu;
2025 	while (u != 0) {
2026 		HIFN_SRCR_SYNC(sc, i,
2027 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2028 		if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2029 			HIFN_SRCR_SYNC(sc, i,
2030 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2031 			break;
2032 		}
2033 		if (++i == (HIFN_D_SRC_RSIZE + 1))
2034 			i = 0;
2035 		else
2036 			u--;
2037 	}
2038 	dma->srck = i; dma->srcu = u;
2039 
2040 	i = dma->cmdk; u = dma->cmdu;
2041 	while (u != 0) {
2042 		HIFN_CMDR_SYNC(sc, i,
2043 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2044 		if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2045 			HIFN_CMDR_SYNC(sc, i,
2046 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2047 			break;
2048 		}
2049 		if (i != HIFN_D_CMD_RSIZE) {
2050 			u--;
2051 			HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2052 		}
2053 		if (++i == (HIFN_D_CMD_RSIZE + 1))
2054 			i = 0;
2055 	}
2056 	dma->cmdk = i; dma->cmdu = u;
2057 
2058 out:
2059 	mutex_spin_exit(&sc->sc_mtx);
2060 	return (1);
2061 }
2062 
2063 /*
2064  * Allocate a new 'session' and return an encoded session id.  'sidp'
2065  * contains our registration id, and should contain an encoded session
2066  * id on successful allocation.
2067  */
2068 static int
2069 hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri)
2070 {
2071 	struct cryptoini *c;
2072 	struct hifn_softc *sc = arg;
2073 	int i, mac = 0, cry = 0, comp = 0, retval = EINVAL;
2074 
2075 	KASSERT(sc != NULL /*, ("hifn_newsession: null softc")*/);
2076 	if (sidp == NULL || cri == NULL || sc == NULL)
2077 		return retval;
2078 
2079 	mutex_spin_enter(&sc->sc_mtx);
2080 
2081 	for (i = 0; i < sc->sc_maxses; i++)
2082 		if (sc->sc_sessions[i].hs_state == HS_STATE_FREE)
2083 			break;
2084 	if (i == sc->sc_maxses) {
2085 		retval = ENOMEM;
2086 		goto out;
2087 	}
2088 
2089 	for (c = cri; c != NULL; c = c->cri_next) {
2090 		switch (c->cri_alg) {
2091 		case CRYPTO_MD5:
2092 		case CRYPTO_SHA1:
2093 		case CRYPTO_MD5_HMAC_96:
2094 		case CRYPTO_SHA1_HMAC_96:
2095 			if (mac) {
2096 				goto out;
2097 			}
2098 			mac = 1;
2099 			break;
2100 		case CRYPTO_DES_CBC:
2101 		case CRYPTO_3DES_CBC:
2102 		case CRYPTO_AES_CBC:
2103 			/* Note that this is an initialization
2104 			   vector, not a cipher key; any function
2105 			   giving sufficient Hamming distance
2106 			   between outputs is fine.  Use of RC4
2107 			   to generate IVs has been FIPS140-2
2108 			   certified by several labs. */
2109 #ifdef __NetBSD__
2110 			cprng_fast(sc->sc_sessions[i].hs_iv,
2111 			    c->cri_alg == CRYPTO_AES_CBC ?
2112 				HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2113 #else	/* FreeBSD and OpenBSD have get_random_bytes */
2114 			/* XXX this may read fewer, does it matter? */
2115  			get_random_bytes(sc->sc_sessions[i].hs_iv,
2116 				c->cri_alg == CRYPTO_AES_CBC ?
2117 					HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2118 #endif
2119 			/*FALLTHROUGH*/
2120 		case CRYPTO_ARC4:
2121 			if (cry) {
2122 				goto out;
2123 			}
2124 			cry = 1;
2125 			break;
2126 #ifdef HAVE_CRYPTO_LZS
2127 		case CRYPTO_LZS_COMP:
2128 			if (comp) {
2129 				goto out;
2130 			}
2131 			comp = 1;
2132 			break;
2133 #endif
2134 		default:
2135 			goto out;
2136 		}
2137 	}
2138 	if (mac == 0 && cry == 0 && comp == 0) {
2139 		goto out;
2140 	}
2141 
2142 	/*
2143 	 * XXX only want to support compression without chaining to
2144 	 * MAC/crypt engine right now
2145 	 */
2146 	if ((comp && mac) || (comp && cry)) {
2147 		goto out;
2148 	}
2149 
2150 	*sidp = HIFN_SID(device_unit(sc->sc_dv), i);
2151 	sc->sc_sessions[i].hs_state = HS_STATE_USED;
2152 
2153 	retval = 0;
2154 out:
2155 	mutex_spin_exit(&sc->sc_mtx);
2156 	return retval;
2157 }
2158 
2159 /*
2160  * Deallocate a session.
2161  * XXX this routine should run a zero'd mac/encrypt key into context ram.
2162  * XXX to blow away any keys already stored there.
2163  */
2164 static int
2165 hifn_freesession(void *arg, u_int64_t tid)
2166 {
2167 	struct hifn_softc *sc = arg;
2168 	int session;
2169 	u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
2170 
2171 	KASSERT(sc != NULL /*, ("hifn_freesession: null softc")*/);
2172 	if (sc == NULL)
2173 		return (EINVAL);
2174 
2175 	mutex_spin_enter(&sc->sc_mtx);
2176 	session = HIFN_SESSION(sid);
2177 	if (session >= sc->sc_maxses) {
2178 		mutex_spin_exit(&sc->sc_mtx);
2179 		return (EINVAL);
2180 	}
2181 
2182 	memset(&sc->sc_sessions[session], 0, sizeof(sc->sc_sessions[session]));
2183 	mutex_spin_exit(&sc->sc_mtx);
2184 	return (0);
2185 }
2186 
2187 static int
2188 hifn_process(void *arg, struct cryptop *crp, int hint)
2189 {
2190 	struct hifn_softc *sc = arg;
2191 	struct hifn_command *cmd = NULL;
2192 	int session, err, ivlen;
2193 	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2194 
2195 	if (crp == NULL || crp->crp_callback == NULL) {
2196 		hifnstats.hst_invalid++;
2197 		return (EINVAL);
2198 	}
2199 
2200 	mutex_spin_enter(&sc->sc_mtx);
2201 	session = HIFN_SESSION(crp->crp_sid);
2202 
2203 	if (sc == NULL || session >= sc->sc_maxses) {
2204 		err = EINVAL;
2205 		goto errout;
2206 	}
2207 
2208 	cmd = (struct hifn_command *)malloc(sizeof(struct hifn_command),
2209 	    M_DEVBUF, M_NOWAIT|M_ZERO);
2210 	if (cmd == NULL) {
2211 		hifnstats.hst_nomem++;
2212 		err = ENOMEM;
2213 		goto errout;
2214 	}
2215 
2216 	if (crp->crp_flags & CRYPTO_F_IMBUF) {
2217 		cmd->srcu.src_m = (struct mbuf *)crp->crp_buf;
2218 		cmd->dstu.dst_m = (struct mbuf *)crp->crp_buf;
2219 	} else if (crp->crp_flags & CRYPTO_F_IOV) {
2220 		cmd->srcu.src_io = (struct uio *)crp->crp_buf;
2221 		cmd->dstu.dst_io = (struct uio *)crp->crp_buf;
2222 	} else {
2223 		err = EINVAL;
2224 		goto errout;	/* XXX we don't handle contiguous buffers! */
2225 	}
2226 
2227 	crd1 = crp->crp_desc;
2228 	if (crd1 == NULL) {
2229 		err = EINVAL;
2230 		goto errout;
2231 	}
2232 	crd2 = crd1->crd_next;
2233 
2234 	if (crd2 == NULL) {
2235 		if (crd1->crd_alg == CRYPTO_MD5_HMAC_96 ||
2236 		    crd1->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2237 		    crd1->crd_alg == CRYPTO_SHA1 ||
2238 		    crd1->crd_alg == CRYPTO_MD5) {
2239 			maccrd = crd1;
2240 			enccrd = NULL;
2241 		} else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2242 			   crd1->crd_alg == CRYPTO_3DES_CBC ||
2243 			   crd1->crd_alg == CRYPTO_AES_CBC ||
2244 			   crd1->crd_alg == CRYPTO_ARC4) {
2245 			if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2246 				cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2247 			maccrd = NULL;
2248 			enccrd = crd1;
2249 #ifdef	HAVE_CRYPTO_LZS
2250 		} else if (crd1->crd_alg == CRYPTO_LZS_COMP) {
2251 		  return (hifn_compression(sc, crp, cmd));
2252 #endif
2253 		} else {
2254 			err = EINVAL;
2255 			goto errout;
2256 		}
2257 	} else {
2258 		if ((crd1->crd_alg == CRYPTO_MD5_HMAC_96 ||
2259 		     crd1->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2260 		     crd1->crd_alg == CRYPTO_MD5 ||
2261 		     crd1->crd_alg == CRYPTO_SHA1) &&
2262 		    (crd2->crd_alg == CRYPTO_DES_CBC ||
2263 		     crd2->crd_alg == CRYPTO_3DES_CBC ||
2264 		     crd2->crd_alg == CRYPTO_AES_CBC ||
2265 		     crd2->crd_alg == CRYPTO_ARC4) &&
2266 		    ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2267 			cmd->base_masks = HIFN_BASE_CMD_DECODE;
2268 			maccrd = crd1;
2269 			enccrd = crd2;
2270 		} else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2271 			    crd1->crd_alg == CRYPTO_ARC4 ||
2272 			    crd1->crd_alg == CRYPTO_3DES_CBC ||
2273 			    crd1->crd_alg == CRYPTO_AES_CBC) &&
2274 			   (crd2->crd_alg == CRYPTO_MD5_HMAC_96 ||
2275 			    crd2->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2276 			    crd2->crd_alg == CRYPTO_MD5 ||
2277 			    crd2->crd_alg == CRYPTO_SHA1) &&
2278 			   (crd1->crd_flags & CRD_F_ENCRYPT)) {
2279 			enccrd = crd1;
2280 			maccrd = crd2;
2281 		} else {
2282 			/*
2283 			 * We cannot order the 7751 as requested
2284 			 */
2285 			err = EINVAL;
2286 			goto errout;
2287 		}
2288 	}
2289 
2290 	if (enccrd) {
2291 		cmd->enccrd = enccrd;
2292 		cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2293 		switch (enccrd->crd_alg) {
2294 		case CRYPTO_ARC4:
2295 			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2296 			if ((enccrd->crd_flags & CRD_F_ENCRYPT)
2297 			    != sc->sc_sessions[session].hs_prev_op)
2298 				sc->sc_sessions[session].hs_state =
2299 				    HS_STATE_USED;
2300 			break;
2301 		case CRYPTO_DES_CBC:
2302 			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2303 			    HIFN_CRYPT_CMD_MODE_CBC |
2304 			    HIFN_CRYPT_CMD_NEW_IV;
2305 			break;
2306 		case CRYPTO_3DES_CBC:
2307 			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2308 			    HIFN_CRYPT_CMD_MODE_CBC |
2309 			    HIFN_CRYPT_CMD_NEW_IV;
2310 			break;
2311 		case CRYPTO_AES_CBC:
2312 			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2313 			    HIFN_CRYPT_CMD_MODE_CBC |
2314 			    HIFN_CRYPT_CMD_NEW_IV;
2315 			break;
2316 		default:
2317 			err = EINVAL;
2318 			goto errout;
2319 		}
2320 		if (enccrd->crd_alg != CRYPTO_ARC4) {
2321 			ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2322 				HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2323 			if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2324 				if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2325 					memcpy(cmd->iv, enccrd->crd_iv, ivlen);
2326 				else
2327 					bcopy(sc->sc_sessions[session].hs_iv,
2328 					    cmd->iv, ivlen);
2329 
2330 				if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2331 				    == 0) {
2332 					if (crp->crp_flags & CRYPTO_F_IMBUF)
2333 						m_copyback(cmd->srcu.src_m,
2334 						    enccrd->crd_inject,
2335 						    ivlen, cmd->iv);
2336 					else if (crp->crp_flags & CRYPTO_F_IOV)
2337 						cuio_copyback(cmd->srcu.src_io,
2338 						    enccrd->crd_inject,
2339 						    ivlen, cmd->iv);
2340 				}
2341 			} else {
2342 				if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2343 					memcpy(cmd->iv, enccrd->crd_iv, ivlen);
2344 				else if (crp->crp_flags & CRYPTO_F_IMBUF)
2345 					m_copydata(cmd->srcu.src_m,
2346 					    enccrd->crd_inject, ivlen, cmd->iv);
2347 				else if (crp->crp_flags & CRYPTO_F_IOV)
2348 					cuio_copydata(cmd->srcu.src_io,
2349 					    enccrd->crd_inject, ivlen, cmd->iv);
2350 			}
2351 		}
2352 
2353 		cmd->ck = enccrd->crd_key;
2354 		cmd->cklen = enccrd->crd_klen >> 3;
2355 
2356 		/*
2357 		 * Need to specify the size for the AES key in the masks.
2358 		 */
2359 		if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2360 		    HIFN_CRYPT_CMD_ALG_AES) {
2361 			switch (cmd->cklen) {
2362 			case 16:
2363 				cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2364 				break;
2365 			case 24:
2366 				cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2367 				break;
2368 			case 32:
2369 				cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2370 				break;
2371 			default:
2372 				err = EINVAL;
2373 				goto errout;
2374 			}
2375 		}
2376 
2377 		if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2378 			cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2379 	}
2380 
2381 	if (maccrd) {
2382 		cmd->maccrd = maccrd;
2383 		cmd->base_masks |= HIFN_BASE_CMD_MAC;
2384 
2385 		switch (maccrd->crd_alg) {
2386 		case CRYPTO_MD5:
2387 			cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2388 			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2389 			    HIFN_MAC_CMD_POS_IPSEC;
2390 			break;
2391 		case CRYPTO_MD5_HMAC_96:
2392 			cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2393 			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2394 			    HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2395 			break;
2396 		case CRYPTO_SHA1:
2397 			cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2398 			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2399 			    HIFN_MAC_CMD_POS_IPSEC;
2400 			break;
2401 		case CRYPTO_SHA1_HMAC_96:
2402 			cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2403 			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2404 			    HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2405 			break;
2406 		}
2407 
2408 		if ((maccrd->crd_alg == CRYPTO_SHA1_HMAC_96 ||
2409 		     maccrd->crd_alg == CRYPTO_MD5_HMAC_96) &&
2410 		    sc->sc_sessions[session].hs_state == HS_STATE_USED) {
2411 			cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2412 			memcpy(cmd->mac, maccrd->crd_key, maccrd->crd_klen >> 3);
2413 			memset(cmd->mac + (maccrd->crd_klen >> 3), 0,
2414 			    HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2415 		}
2416 	}
2417 
2418 	cmd->crp = crp;
2419 	cmd->session_num = session;
2420 	cmd->softc = sc;
2421 
2422 	err = hifn_crypto(sc, cmd, crp, hint);
2423 	if (err == 0) {
2424 		if (enccrd)
2425 			sc->sc_sessions[session].hs_prev_op =
2426 				enccrd->crd_flags & CRD_F_ENCRYPT;
2427 		if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2428 			sc->sc_sessions[session].hs_state = HS_STATE_KEY;
2429 		mutex_spin_exit(&sc->sc_mtx);
2430 		return 0;
2431 	} else if (err == ERESTART) {
2432 		/*
2433 		 * There weren't enough resources to dispatch the request
2434 		 * to the part.  Notify the caller so they'll requeue this
2435 		 * request and resubmit it again soon.
2436 		 */
2437 #ifdef HIFN_DEBUG
2438 		if (hifn_debug)
2439 			printf("%s: requeue request\n", device_xname(sc->sc_dv));
2440 #endif
2441 		free(cmd, M_DEVBUF);
2442 		sc->sc_needwakeup |= CRYPTO_SYMQ;
2443 		mutex_spin_exit(&sc->sc_mtx);
2444 		return (err);
2445 	}
2446 
2447 errout:
2448 	if (cmd != NULL)
2449 		free(cmd, M_DEVBUF);
2450 	if (err == EINVAL)
2451 		hifnstats.hst_invalid++;
2452 	else
2453 		hifnstats.hst_nomem++;
2454 	crp->crp_etype = err;
2455 	mutex_spin_exit(&sc->sc_mtx);
2456 	crypto_done(crp);
2457 	return (0);
2458 }
2459 
2460 static void
2461 hifn_abort(struct hifn_softc *sc)
2462 {
2463 	struct hifn_dma *dma = sc->sc_dma;
2464 	struct hifn_command *cmd;
2465 	struct cryptop *crp;
2466 	int i, u;
2467 
2468 	i = dma->resk; u = dma->resu;
2469 	while (u != 0) {
2470 		cmd = dma->hifn_commands[i];
2471 		KASSERT(cmd != NULL /*, ("hifn_abort: null cmd slot %u", i)*/);
2472 		dma->hifn_commands[i] = NULL;
2473 		crp = cmd->crp;
2474 
2475 		if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2476 			/* Salvage what we can. */
2477 			hifnstats.hst_opackets++;
2478 			hifn_callback(sc, cmd, dma->result_bufs[i]);
2479 		} else {
2480 			if (cmd->src_map == cmd->dst_map) {
2481 				bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2482 						0, cmd->src_map->dm_mapsize,
2483 				    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2484 			} else {
2485 				bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2486 				    0, cmd->src_map->dm_mapsize,
2487 				    BUS_DMASYNC_POSTWRITE);
2488 				bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2489 				    0, cmd->dst_map->dm_mapsize,
2490 				    BUS_DMASYNC_POSTREAD);
2491 			}
2492 
2493 			if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2494 				m_freem(cmd->srcu.src_m);
2495 				crp->crp_buf = (void *)cmd->dstu.dst_m;
2496 			}
2497 
2498 			/* non-shared buffers cannot be restarted */
2499 			if (cmd->src_map != cmd->dst_map) {
2500 				/*
2501 				 * XXX should be EAGAIN, delayed until
2502 				 * after the reset.
2503 				 */
2504 				crp->crp_etype = ENOMEM;
2505 				bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2506 				bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2507 			} else
2508 				crp->crp_etype = ENOMEM;
2509 
2510 			bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2511 			bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2512 
2513 			free(cmd, M_DEVBUF);
2514 			if (crp->crp_etype != EAGAIN)
2515 				crypto_done(crp);
2516 		}
2517 
2518 		if (++i == HIFN_D_RES_RSIZE)
2519 			i = 0;
2520 		u--;
2521 	}
2522 	dma->resk = i; dma->resu = u;
2523 
2524 	/* Force upload of key next time */
2525 	for (i = 0; i < sc->sc_maxses; i++)
2526 		if (sc->sc_sessions[i].hs_state == HS_STATE_KEY)
2527 			sc->sc_sessions[i].hs_state = HS_STATE_USED;
2528 
2529 	hifn_reset_board(sc, 1);
2530 	hifn_init_dma(sc);
2531 	hifn_init_pci_registers(sc);
2532 }
2533 
2534 static void
2535 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *resbuf)
2536 {
2537 	struct hifn_dma *dma = sc->sc_dma;
2538 	struct cryptop *crp = cmd->crp;
2539 	struct cryptodesc *crd;
2540 	struct mbuf *m;
2541 	int totlen, i, u, ivlen;
2542 
2543 	if (cmd->src_map == cmd->dst_map)
2544 		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2545 		    0, cmd->src_map->dm_mapsize,
2546 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2547 	else {
2548 		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2549 		    0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2550 		bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2551 		    0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2552 	}
2553 
2554 	if (crp->crp_flags & CRYPTO_F_IMBUF) {
2555 		if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2556 			crp->crp_buf = (void *)cmd->dstu.dst_m;
2557 			totlen = cmd->src_map->dm_mapsize;
2558 			for (m = cmd->dstu.dst_m; m != NULL; m = m->m_next) {
2559 				if (totlen < m->m_len) {
2560 					m->m_len = totlen;
2561 					totlen = 0;
2562 				} else
2563 					totlen -= m->m_len;
2564 			}
2565 			cmd->dstu.dst_m->m_pkthdr.len =
2566 			    cmd->srcu.src_m->m_pkthdr.len;
2567 			m_freem(cmd->srcu.src_m);
2568 		}
2569 	}
2570 
2571 	if (cmd->sloplen != 0) {
2572 		if (crp->crp_flags & CRYPTO_F_IMBUF)
2573 			m_copyback((struct mbuf *)crp->crp_buf,
2574 			    cmd->src_map->dm_mapsize - cmd->sloplen,
2575 			    cmd->sloplen, (void *)&dma->slop[cmd->slopidx]);
2576 		else if (crp->crp_flags & CRYPTO_F_IOV)
2577 			cuio_copyback((struct uio *)crp->crp_buf,
2578 			    cmd->src_map->dm_mapsize - cmd->sloplen,
2579 			    cmd->sloplen, (void *)&dma->slop[cmd->slopidx]);
2580 	}
2581 
2582 	i = dma->dstk; u = dma->dstu;
2583 	while (u != 0) {
2584 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2585 		    offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2586 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2587 		if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2588 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2589 			    offsetof(struct hifn_dma, dstr[i]),
2590 			    sizeof(struct hifn_desc),
2591 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2592 			break;
2593 		}
2594 		if (++i == (HIFN_D_DST_RSIZE + 1))
2595 			i = 0;
2596 		else
2597 			u--;
2598 	}
2599 	dma->dstk = i; dma->dstu = u;
2600 
2601 	hifnstats.hst_obytes += cmd->dst_map->dm_mapsize;
2602 
2603 	if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2604 	    HIFN_BASE_CMD_CRYPT) {
2605 		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2606 			if (crd->crd_alg != CRYPTO_DES_CBC &&
2607 			    crd->crd_alg != CRYPTO_3DES_CBC &&
2608 			    crd->crd_alg != CRYPTO_AES_CBC)
2609 				continue;
2610 			ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
2611 				HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2612 			if (crp->crp_flags & CRYPTO_F_IMBUF)
2613 				m_copydata((struct mbuf *)crp->crp_buf,
2614 				    crd->crd_skip + crd->crd_len - ivlen,
2615 				    ivlen,
2616 				    cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2617 			else if (crp->crp_flags & CRYPTO_F_IOV) {
2618 				cuio_copydata((struct uio *)crp->crp_buf,
2619 				    crd->crd_skip + crd->crd_len - ivlen,
2620 				    ivlen,
2621 				    cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2622 			}
2623 			/* XXX We do not handle contig data */
2624 			break;
2625 		}
2626 	}
2627 
2628 	if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2629 		u_int8_t *macbuf;
2630 
2631 		macbuf = resbuf + sizeof(struct hifn_base_result);
2632 		if (cmd->base_masks & HIFN_BASE_CMD_COMP)
2633 			macbuf += sizeof(struct hifn_comp_result);
2634 		macbuf += sizeof(struct hifn_mac_result);
2635 
2636 		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2637 			int len;
2638 
2639 			if (crd->crd_alg == CRYPTO_MD5)
2640 				len = 16;
2641 			else if (crd->crd_alg == CRYPTO_SHA1)
2642 				len = 20;
2643 			else if (crd->crd_alg == CRYPTO_MD5_HMAC_96 ||
2644 			    crd->crd_alg == CRYPTO_SHA1_HMAC_96)
2645 				len = 12;
2646 			else
2647 				continue;
2648 
2649 			if (crp->crp_flags & CRYPTO_F_IMBUF)
2650 				m_copyback((struct mbuf *)crp->crp_buf,
2651 				    crd->crd_inject, len, macbuf);
2652 			else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac)
2653 				memcpy(crp->crp_mac, (void *)macbuf, len);
2654 			break;
2655 		}
2656 	}
2657 
2658 	if (cmd->src_map != cmd->dst_map) {
2659 		bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2660 		bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2661 	}
2662 	bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2663 	bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2664 	free(cmd, M_DEVBUF);
2665 	crypto_done(crp);
2666 }
2667 
2668 #ifdef HAVE_CRYPTO_LZS
2669 
2670 static int
2671 hifn_compression(struct hifn_softc *sc, struct cryptop *crp,
2672     struct hifn_command *cmd)
2673 {
2674 	struct cryptodesc *crd = crp->crp_desc;
2675 	int s, err = 0;
2676 
2677 	cmd->compcrd = crd;
2678 	cmd->base_masks |= HIFN_BASE_CMD_COMP;
2679 
2680 	if ((crp->crp_flags & CRYPTO_F_IMBUF) == 0) {
2681 		/*
2682 		 * XXX can only handle mbufs right now since we can
2683 		 * XXX dynamically resize them.
2684 		 */
2685 		err = EINVAL;
2686 		return (ENOMEM);
2687 	}
2688 
2689 	if ((crd->crd_flags & CRD_F_COMP) == 0)
2690 		cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2691 	if (crd->crd_alg == CRYPTO_LZS_COMP)
2692 		cmd->comp_masks |= HIFN_COMP_CMD_ALG_LZS |
2693 		    HIFN_COMP_CMD_CLEARHIST;
2694 
2695 	if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2696 	    HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map)) {
2697 		err = ENOMEM;
2698 		goto fail;
2699 	}
2700 
2701 	if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2702 	    HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
2703 		err = ENOMEM;
2704 		goto fail;
2705 	}
2706 
2707 	if (crp->crp_flags & CRYPTO_F_IMBUF) {
2708 		int len;
2709 
2710 		if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
2711 		    cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
2712 			err = ENOMEM;
2713 			goto fail;
2714 		}
2715 
2716 		len = cmd->src_map->dm_mapsize / MCLBYTES;
2717 		if ((cmd->src_map->dm_mapsize % MCLBYTES) != 0)
2718 			len++;
2719 		len *= MCLBYTES;
2720 
2721 		if ((crd->crd_flags & CRD_F_COMP) == 0)
2722 			len *= 4;
2723 
2724 		if (len > HIFN_MAX_DMALEN)
2725 			len = HIFN_MAX_DMALEN;
2726 
2727 		cmd->dstu.dst_m = hifn_mkmbuf_chain(len, cmd->srcu.src_m);
2728 		if (cmd->dstu.dst_m == NULL) {
2729 			err = ENOMEM;
2730 			goto fail;
2731 		}
2732 
2733 		if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2734 		    cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2735 			err = ENOMEM;
2736 			goto fail;
2737 		}
2738 	} else if (crp->crp_flags & CRYPTO_F_IOV) {
2739 		if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
2740 		    cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
2741 			err = ENOMEM;
2742 			goto fail;
2743 		}
2744 		if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
2745 		    cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
2746 			err = ENOMEM;
2747 			goto fail;
2748 		}
2749 	}
2750 
2751 	if (cmd->src_map == cmd->dst_map)
2752 		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2753 		    0, cmd->src_map->dm_mapsize,
2754 		    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2755 	else {
2756 		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2757 		    0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2758 		bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2759 		    0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2760 	}
2761 
2762 	cmd->crp = crp;
2763 	/*
2764 	 * Always use session 0.  The modes of compression we use are
2765 	 * stateless and there is always at least one compression
2766 	 * context, zero.
2767 	 */
2768 	cmd->session_num = 0;
2769 	cmd->softc = sc;
2770 
2771 	err = hifn_compress_enter(sc, cmd);
2772 
2773 	if (err != 0)
2774 		goto fail;
2775 	return (0);
2776 
2777 fail:
2778 	if (cmd->dst_map != NULL) {
2779 		if (cmd->dst_map->dm_nsegs > 0)
2780 			bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2781 		bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2782 	}
2783 	if (cmd->src_map != NULL) {
2784 		if (cmd->src_map->dm_nsegs > 0)
2785 			bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2786 		bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2787 	}
2788 	free(cmd, M_DEVBUF);
2789 	if (err == EINVAL)
2790 		hifnstats.hst_invalid++;
2791 	else
2792 		hifnstats.hst_nomem++;
2793 	crp->crp_etype = err;
2794 	crypto_done(crp);
2795 	return (0);
2796 }
2797 
2798 static int
2799 hifn_compress_enter(struct hifn_softc *sc, struct hifn_command *cmd)
2800 {
2801 	struct hifn_dma *dma = sc->sc_dma;
2802 	int cmdi, resi;
2803 	u_int32_t cmdlen;
2804 
2805 	if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
2806 	    (dma->resu + 1) > HIFN_D_CMD_RSIZE)
2807 		return (ENOMEM);
2808 
2809 	if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
2810 	    (dma->dstu + cmd->dst_map->dm_nsegs) > HIFN_D_DST_RSIZE)
2811 		return (ENOMEM);
2812 
2813 	if (dma->cmdi == HIFN_D_CMD_RSIZE) {
2814 		dma->cmdi = 0;
2815 		dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
2816 		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2817 		HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
2818 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2819 	}
2820 	cmdi = dma->cmdi++;
2821 	cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2822 	HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2823 
2824 	/* .p for command/result already set */
2825 	dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2826 	    HIFN_D_MASKDONEIRQ);
2827 	HIFN_CMDR_SYNC(sc, cmdi,
2828 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2829 	dma->cmdu++;
2830 	if (sc->sc_c_busy == 0) {
2831 		WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
2832 		sc->sc_c_busy = 1;
2833 		SET_LED(sc, HIFN_MIPSRST_LED0);
2834 	}
2835 
2836 	/*
2837 	 * We don't worry about missing an interrupt (which a "command wait"
2838 	 * interrupt salvages us from), unless there is more than one command
2839 	 * in the queue.
2840 	 */
2841 	if (dma->cmdu > 1) {
2842 		sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2843 		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2844 	}
2845 
2846 	hifnstats.hst_ipackets++;
2847 	hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
2848 
2849 	hifn_dmamap_load_src(sc, cmd);
2850 	if (sc->sc_s_busy == 0) {
2851 		WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
2852 		sc->sc_s_busy = 1;
2853 		SET_LED(sc, HIFN_MIPSRST_LED1);
2854 	}
2855 
2856 	/*
2857 	 * Unlike other descriptors, we don't mask done interrupt from
2858 	 * result descriptor.
2859 	 */
2860 	if (dma->resi == HIFN_D_RES_RSIZE) {
2861 		dma->resi = 0;
2862 		dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2863 		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2864 		HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2865 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2866 	}
2867 	resi = dma->resi++;
2868 	dma->hifn_commands[resi] = cmd;
2869 	HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2870 	dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2871 	    HIFN_D_VALID | HIFN_D_LAST);
2872 	HIFN_RESR_SYNC(sc, resi,
2873 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2874 	dma->resu++;
2875 	if (sc->sc_r_busy == 0) {
2876 		WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
2877 		sc->sc_r_busy = 1;
2878 		SET_LED(sc, HIFN_MIPSRST_LED2);
2879 	}
2880 
2881 	if (cmd->sloplen)
2882 		cmd->slopidx = resi;
2883 
2884 	hifn_dmamap_load_dst(sc, cmd);
2885 
2886 	if (sc->sc_d_busy == 0) {
2887 		WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
2888 		sc->sc_d_busy = 1;
2889 	}
2890 	sc->sc_active = 5;
2891 	cmd->cmd_callback = hifn_callback_comp;
2892 	return (0);
2893 }
2894 
2895 static void
2896 hifn_callback_comp(struct hifn_softc *sc, struct hifn_command *cmd,
2897     u_int8_t *resbuf)
2898 {
2899 	struct hifn_base_result baseres;
2900 	struct cryptop *crp = cmd->crp;
2901 	struct hifn_dma *dma = sc->sc_dma;
2902 	struct mbuf *m;
2903 	int err = 0, i, u;
2904 	u_int32_t olen;
2905 	bus_size_t dstsize;
2906 
2907 	bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2908 	    0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2909 	bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2910 	    0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2911 
2912 	dstsize = cmd->dst_map->dm_mapsize;
2913 	bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2914 
2915 	memcpy(&baseres, resbuf, sizeof(struct hifn_base_result));
2916 
2917 	i = dma->dstk; u = dma->dstu;
2918 	while (u != 0) {
2919 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2920 		    offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2921 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2922 		if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2923 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2924 			    offsetof(struct hifn_dma, dstr[i]),
2925 			    sizeof(struct hifn_desc),
2926 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2927 			break;
2928 		}
2929 		if (++i == (HIFN_D_DST_RSIZE + 1))
2930 			i = 0;
2931 		else
2932 			u--;
2933 	}
2934 	dma->dstk = i; dma->dstu = u;
2935 
2936 	if (baseres.flags & htole16(HIFN_BASE_RES_DSTOVERRUN)) {
2937 		bus_size_t xlen;
2938 
2939 		xlen = dstsize;
2940 
2941 		m_freem(cmd->dstu.dst_m);
2942 
2943 		if (xlen == HIFN_MAX_DMALEN) {
2944 			/* We've done all we can. */
2945 			err = E2BIG;
2946 			goto out;
2947 		}
2948 
2949 		xlen += MCLBYTES;
2950 
2951 		if (xlen > HIFN_MAX_DMALEN)
2952 			xlen = HIFN_MAX_DMALEN;
2953 
2954 		cmd->dstu.dst_m = hifn_mkmbuf_chain(xlen,
2955 		    cmd->srcu.src_m);
2956 		if (cmd->dstu.dst_m == NULL) {
2957 			err = ENOMEM;
2958 			goto out;
2959 		}
2960 		if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2961 		    cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2962 			err = ENOMEM;
2963 			goto out;
2964 		}
2965 
2966 		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2967 		    0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2968 		bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2969 		    0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2970 
2971 		err = hifn_compress_enter(sc, cmd);
2972 		if (err != 0)
2973 			goto out;
2974 		return;
2975 	}
2976 
2977 	olen = dstsize - (letoh16(baseres.dst_cnt) |
2978 	    (((letoh16(baseres.session) & HIFN_BASE_RES_DSTLEN_M) >>
2979 	    HIFN_BASE_RES_DSTLEN_S) << 16));
2980 
2981 	crp->crp_olen = olen - cmd->compcrd->crd_skip;
2982 
2983 	bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2984 	bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2985 	bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2986 
2987 	m = cmd->dstu.dst_m;
2988 	if (m->m_flags & M_PKTHDR)
2989 		m->m_pkthdr.len = olen;
2990 	crp->crp_buf = (void *)m;
2991 	for (; m != NULL; m = m->m_next) {
2992 		if (olen >= m->m_len)
2993 			olen -= m->m_len;
2994 		else {
2995 			m->m_len = olen;
2996 			olen = 0;
2997 		}
2998 	}
2999 
3000 	m_freem(cmd->srcu.src_m);
3001 	free(cmd, M_DEVBUF);
3002 	crp->crp_etype = 0;
3003 	crypto_done(crp);
3004 	return;
3005 
3006 out:
3007 	if (cmd->dst_map != NULL) {
3008 		if (cmd->src_map->dm_nsegs != 0)
3009 			bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
3010 		bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
3011 	}
3012 	if (cmd->src_map != NULL) {
3013 		if (cmd->src_map->dm_nsegs != 0)
3014 			bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
3015 		bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
3016 	}
3017 	if (cmd->dstu.dst_m != NULL)
3018 		m_freem(cmd->dstu.dst_m);
3019 	free(cmd, M_DEVBUF);
3020 	crp->crp_etype = err;
3021 	crypto_done(crp);
3022 }
3023 
3024 static struct mbuf *
3025 hifn_mkmbuf_chain(int totlen, struct mbuf *mtemplate)
3026 {
3027 	int len;
3028 	struct mbuf *m, *m0, *mlast;
3029 
3030 	if (mtemplate->m_flags & M_PKTHDR) {
3031 		len = MHLEN;
3032 		MGETHDR(m0, M_DONTWAIT, MT_DATA);
3033 	} else {
3034 		len = MLEN;
3035 		MGET(m0, M_DONTWAIT, MT_DATA);
3036 	}
3037 	if (m0 == NULL)
3038 		return (NULL);
3039 	if (len == MHLEN)
3040 		M_DUP_PKTHDR(m0, mtemplate);
3041 	MCLGET(m0, M_DONTWAIT);
3042 	if (!(m0->m_flags & M_EXT))
3043 		m_freem(m0);
3044 	len = MCLBYTES;
3045 
3046 	totlen -= len;
3047 	m0->m_pkthdr.len = m0->m_len = len;
3048 	mlast = m0;
3049 
3050 	while (totlen > 0) {
3051 		MGET(m, M_DONTWAIT, MT_DATA);
3052 		if (m == NULL) {
3053 			m_freem(m0);
3054 			return (NULL);
3055 		}
3056 		MCLGET(m, M_DONTWAIT);
3057 		if (!(m->m_flags & M_EXT)) {
3058 			m_freem(m0);
3059 			return (NULL);
3060 		}
3061 		len = MCLBYTES;
3062 		m->m_len = len;
3063 		if (m0->m_flags & M_PKTHDR)
3064 			m0->m_pkthdr.len += len;
3065 		totlen -= len;
3066 
3067 		mlast->m_next = m;
3068 		mlast = m;
3069 	}
3070 
3071 	return (m0);
3072 }
3073 #endif	/* HAVE_CRYPTO_LZS */
3074 
3075 static void
3076 hifn_write_4(struct hifn_softc *sc, int reggrp, bus_size_t reg, u_int32_t val)
3077 {
3078 	/*
3079 	 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
3080 	 * and Group 1 registers; avoid conditions that could create
3081 	 * burst writes by doing a read in between the writes.
3082 	 */
3083 	if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
3084 		if (sc->sc_waw_lastgroup == reggrp &&
3085 		    sc->sc_waw_lastreg == reg - 4) {
3086 			bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
3087 		}
3088 		sc->sc_waw_lastgroup = reggrp;
3089 		sc->sc_waw_lastreg = reg;
3090 	}
3091 	if (reggrp == 0)
3092 		bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
3093 	else
3094 		bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
3095 
3096 }
3097 
3098 static u_int32_t
3099 hifn_read_4(struct hifn_softc *sc, int reggrp, bus_size_t reg)
3100 {
3101 	if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
3102 		sc->sc_waw_lastgroup = -1;
3103 		sc->sc_waw_lastreg = 1;
3104 	}
3105 	if (reggrp == 0)
3106 		return (bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg));
3107 	return (bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg));
3108 }
3109