xref: /netbsd-src/sys/dev/pci/hifn7751.c (revision a93ea220fcb3e34cdfdcd4d7a5d391e0b2b4f2ba)
1 /*	$NetBSD: hifn7751.c,v 1.15 2003/07/30 18:49:27 jonathan Exp $	*/
2 /* 	$FreeBSD: hifn7751.c,v 1.5.2.6 2003/07/02 17:04:50 sam Exp $ */
3 /*	$OpenBSD: hifn7751.c,v 1.139 2003/03/13 20:08:06 jason Exp $	*/
4 
5 /*
6  * Invertex AEON / Hifn 7751 driver
7  * Copyright (c) 1999 Invertex Inc. All rights reserved.
8  * Copyright (c) 1999 Theo de Raadt
9  * Copyright (c) 2000-2001 Network Security Technologies, Inc.
10  *			http://www.netsec.net
11  *
12  * This driver is based on a previous driver by Invertex, for which they
13  * requested:  Please send any comments, feedback, bug-fixes, or feature
14  * requests to software@invertex.com.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  *
20  * 1. Redistributions of source code must retain the above copyright
21  *   notice, this list of conditions and the following disclaimer.
22  * 2. Redistributions in binary form must reproduce the above copyright
23  *   notice, this list of conditions and the following disclaimer in the
24  *   documentation and/or other materials provided with the distribution.
25  * 3. The name of the author may not be used to endorse or promote products
26  *   derived from this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
29  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
30  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
31  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
32  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
33  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
37  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38  *
39  * Effort sponsored in part by the Defense Advanced Research Projects
40  * Agency (DARPA) and Air Force Research Laboratory, Air Force
41  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
42  *
43  */
44 
45 /*
46  * Driver for the Hifn 7751 encryption processor.
47  */
48 
49 #include <sys/cdefs.h>
50 __KERNEL_RCSID(0, "$NetBSD: hifn7751.c,v 1.15 2003/07/30 18:49:27 jonathan Exp $");
51 
52 #include "rnd.h"
53 #include "opencrypto.h"
54 
55 #if NRND == 0 || NOPENCRYPTO == 0
56 #error hifn7751 requires rnd and opencrypto pseudo-devices
57 #endif
58 
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/proc.h>
63 #include <sys/errno.h>
64 #include <sys/malloc.h>
65 #include <sys/kernel.h>
66 #include <sys/mbuf.h>
67 #include <sys/device.h>
68 
69 #include <uvm/uvm_extern.h>
70 
71 
72 #ifdef __OpenBSD__
73 #include <crypto/crypto.h>
74 #include <dev/rndvar.h>
75 #else
76 #include <opencrypto/cryptodev.h>
77 #include <sys/rnd.h>
78 #endif
79 
80 #include <dev/pci/pcireg.h>
81 #include <dev/pci/pcivar.h>
82 #include <dev/pci/pcidevs.h>
83 
84 #include <dev/pci/hifn7751reg.h>
85 #include <dev/pci/hifn7751var.h>
86 
87 #undef HIFN_DEBUG
88 
89 #ifdef __NetBSD__
90 #define	HIFN_NO_RNG			/* until statistically tested */
91 #define M_DUP_PKTHDR M_COPY_PKTHDR	/* XXX */
92 #endif
93 
94 #ifdef HIFN_DEBUG
95 extern int hifn_debug;		/* patchable */
96 int hifn_debug = 1;
97 #endif
98 
99 #ifdef __OpenBSD__
100 #define HAVE_CRYPTO_LZS		/* OpenBSD OCF supports CRYPTO_COMP_LZS */
101 #endif
102 
103 /*
104  * Prototypes and count for the pci_device structure
105  */
106 #ifdef __OpenBSD__
107 int hifn_probe((struct device *, void *, void *);
108 #else
109 int hifn_probe(struct device *, struct cfdata *, void *);
110 #endif
111 void hifn_attach(struct device *, struct device *, void *);
112 
113 CFATTACH_DECL(hifn, sizeof(struct hifn_softc),
114     hifn_probe, hifn_attach, NULL, NULL);
115 
116 #ifdef __OpenBSD__
117 struct cfdriver hifn_cd = {
118 	0, "hifn", DV_DULL
119 };
120 #endif
121 
122 void	hifn_reset_board(struct hifn_softc *, int);
123 void	hifn_reset_puc(struct hifn_softc *);
124 void	hifn_puc_wait(struct hifn_softc *);
125 int	hifn_enable_crypto(struct hifn_softc *, pcireg_t);
126 void	hifn_set_retry(struct hifn_softc *);
127 void	hifn_init_dma(struct hifn_softc *);
128 void	hifn_init_pci_registers(struct hifn_softc *);
129 int	hifn_sramsize(struct hifn_softc *);
130 int	hifn_dramsize(struct hifn_softc *);
131 int	hifn_ramtype(struct hifn_softc *);
132 void	hifn_sessions(struct hifn_softc *);
133 int	hifn_intr(void *);
134 u_int	hifn_write_command(struct hifn_command *, u_int8_t *);
135 u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
136 int	hifn_newsession(void*, u_int32_t *, struct cryptoini *);
137 int	hifn_freesession(void*, u_int64_t);
138 int	hifn_process(void*, struct cryptop *, int);
139 void	hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
140 int	hifn_crypto(struct hifn_softc *, struct hifn_command *,
141 		    struct cryptop*, int);
142 int	hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
143 int	hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
144 int	hifn_dmamap_aligned(bus_dmamap_t);
145 int	hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
146 int	hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
147 int	hifn_init_pubrng(struct hifn_softc *);
148 #ifndef HIFN_NO_RNG
149 static	void hifn_rng(void *);
150 #endif
151 void	hifn_tick(void *);
152 void	hifn_abort(struct hifn_softc *);
153 void	hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
154 void	hifn_write_4(struct hifn_softc *, int, bus_size_t, u_int32_t);
155 u_int32_t hifn_read_4(struct hifn_softc *, int, bus_size_t);
156 #ifdef	HAVE_CRYPTO_LZS
157 int	hifn_compression(struct hifn_softc *, struct cryptop *,
158 			 struct hifn_command *);
159 struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *);
160 int	hifn_compress_enter(struct hifn_softc *, struct hifn_command *);
161 void	hifn_callback_comp(struct hifn_softc *, struct hifn_command *,
162 			   u_int8_t *);
163 #endif	/* HAVE_CRYPTO_LZS */
164 
165 
166 #ifdef	notyet
167 int	hifn_compression(struct hifn_softc *, struct cryptop *,
168     struct hifn_command *);
169 struct mbuf *hifn_mkmbuf_chain(int, struct mbuf *);
170 int	hifn_compress_enter(struct hifn_softc *, struct hifn_command *);
171 void	hifn_callback_comp(struct hifn_softc *, struct hifn_command *,
172     u_int8_t *);
173 #endif
174 
175 struct hifn_stats hifnstats;
176 
177 int
178 hifn_probe(parent, match, aux)
179 	struct device *parent;
180 #ifdef	__OpenBSD__
181 	void *match;
182 #else
183 	struct cfdata *match;
184 #endif
185 	void *aux;
186 {
187 	struct pci_attach_args *pa = (struct pci_attach_args *) aux;
188 
189 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_INVERTEX &&
190 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INVERTEX_AEON)
191 		return (1);
192 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_HIFN &&
193 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_HIFN_7751)
194 		return (1);
195 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_HIFN &&
196 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_HIFN_7811)
197 		return (1);
198 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_HIFN &&
199 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_HIFN_7951)
200 		return (1);
201 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETSEC &&
202 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETSEC_7751)
203 		return (1);
204 	return (0);
205 }
206 
207 void
208 hifn_attach(parent, self, aux)
209 	struct device *parent, *self;
210 	void *aux;
211 {
212 	struct hifn_softc *sc = (struct hifn_softc *)self;
213 	struct pci_attach_args *pa = aux;
214 	pci_chipset_tag_t pc = pa->pa_pc;
215 	pci_intr_handle_t ih;
216 	const char *intrstr = NULL;
217 	char rbase;
218 	bus_size_t iosize0, iosize1;
219 	u_int32_t cmd;
220 	u_int16_t ena;
221 	bus_dma_segment_t seg;
222 	bus_dmamap_t dmamap;
223 	int rseg;
224 	caddr_t kva;
225 
226 	aprint_naive(": Crypto processor\n");
227 
228 	sc->sc_pci_pc = pa->pa_pc;
229 	sc->sc_pci_tag = pa->pa_tag;
230 
231 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_HIFN &&
232 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_HIFN_7951)
233 		sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
234 
235 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_HIFN &&
236 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_HIFN_7811)
237 		sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG | HIFN_HAS_LEDS |
238 		    HIFN_NO_BURSTWRITE;
239 
240 	cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
241 	cmd |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
242 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd);
243 	cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
244 
245 	if (!(cmd & PCI_COMMAND_MEM_ENABLE)) {
246 		aprint_error(": failed to enable memory mapping\n");
247 		return;
248 	}
249 
250 	if (pci_mapreg_map(pa, HIFN_BAR0, PCI_MAPREG_TYPE_MEM, 0,
251 	    &sc->sc_st0, &sc->sc_sh0, NULL, &iosize0)) {
252 		aprint_error(": can't find mem space %d\n", 0);
253 		return;
254 	}
255 
256 	if (pci_mapreg_map(pa, HIFN_BAR1, PCI_MAPREG_TYPE_MEM, 0,
257 	    &sc->sc_st1, &sc->sc_sh1, NULL, &iosize1)) {
258 		aprint_error(": can't find mem space %d\n", 1);
259 		goto fail_io0;
260 	}
261 
262 	hifn_set_retry(sc);
263 
264 	if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
265 		sc->sc_waw_lastgroup = -1;
266 		sc->sc_waw_lastreg = 1;
267 	}
268 
269 	sc->sc_dmat = pa->pa_dmat;
270 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(*sc->sc_dma), PAGE_SIZE, 0,
271 	    &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
272 		aprint_error(": can't alloc DMA buffer\n");
273 		goto fail_io1;
274         }
275 	if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(*sc->sc_dma), &kva,
276 	    BUS_DMA_NOWAIT)) {
277 		aprint_error(": can't map DMA buffers (%lu bytes)\n",
278 		    (u_long)sizeof(*sc->sc_dma));
279 		bus_dmamem_free(sc->sc_dmat, &seg, rseg);
280 		goto fail_io1;
281 	}
282 	if (bus_dmamap_create(sc->sc_dmat, sizeof(*sc->sc_dma), 1,
283 	    sizeof(*sc->sc_dma), 0, BUS_DMA_NOWAIT, &dmamap)) {
284 		aprint_error(": can't create DMA map\n");
285 		bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
286 		bus_dmamem_free(sc->sc_dmat, &seg, rseg);
287 		goto fail_io1;
288 	}
289 	if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, sizeof(*sc->sc_dma),
290 	    NULL, BUS_DMA_NOWAIT)) {
291 		aprint_error(": can't load DMA map\n");
292 		bus_dmamap_destroy(sc->sc_dmat, dmamap);
293 		bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
294 		bus_dmamem_free(sc->sc_dmat, &seg, rseg);
295 		goto fail_io1;
296 	}
297 	sc->sc_dmamap = dmamap;
298 	sc->sc_dma = (struct hifn_dma *)kva;
299 	bzero(sc->sc_dma, sizeof(*sc->sc_dma));
300 
301 	hifn_reset_board(sc, 0);
302 
303 	if (hifn_enable_crypto(sc, pa->pa_id) != 0) {
304 		aprint_error("%s: crypto enabling failed\n",
305 		    sc->sc_dv.dv_xname);
306 		goto fail_mem;
307 	}
308 	hifn_reset_puc(sc);
309 
310 	hifn_init_dma(sc);
311 	hifn_init_pci_registers(sc);
312 
313 	if (hifn_ramtype(sc))
314 		goto fail_mem;
315 
316 	if (sc->sc_drammodel == 0)
317 		hifn_sramsize(sc);
318 	else
319 		hifn_dramsize(sc);
320 
321 	/*
322 	 * Workaround for NetSec 7751 rev A: half ram size because two
323 	 * of the address lines were left floating
324 	 */
325 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETSEC &&
326 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETSEC_7751 &&
327 	    PCI_REVISION(pa->pa_class) == 0x61)
328 		sc->sc_ramsize >>= 1;
329 
330 	if (pci_intr_map(pa, &ih)) {
331 		aprint_error(": couldn't map interrupt\n");
332 		goto fail_mem;
333 	}
334 	intrstr = pci_intr_string(pc, ih);
335 #ifdef	__OpenBSD__
336 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc,
337 	    self->dv_xname);
338 #else
339 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc);
340 #endif
341 	if (sc->sc_ih == NULL) {
342 		aprint_error(": couldn't establish interrupt\n");
343 		if (intrstr != NULL)
344 			aprint_normal(" at %s", intrstr);
345 		aprint_normal("\n");
346 		goto fail_mem;
347 	}
348 
349 	hifn_sessions(sc);
350 
351 	rseg = sc->sc_ramsize / 1024;
352 	rbase = 'K';
353 	if (sc->sc_ramsize >= (1024 * 1024)) {
354 		rbase = 'M';
355 		rseg /= 1024;
356 	}
357 	aprint_normal(", %d%cB %cram, %s\n", rseg, rbase,
358 	    sc->sc_drammodel ? 'd' : 's', intrstr);
359 
360 	sc->sc_cid = crypto_get_driverid(0);
361 	if (sc->sc_cid < 0) {
362 		aprint_error(": couldn't get crypto driver id\n");
363 		goto fail_intr;
364 	}
365 
366 	WRITE_REG_0(sc, HIFN_0_PUCNFG,
367 	    READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
368 	ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
369 
370 	switch (ena) {
371 	case HIFN_PUSTAT_ENA_2:
372 		crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0,
373 		    hifn_newsession, hifn_freesession, hifn_process, sc);
374 		crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0,
375 		    hifn_newsession, hifn_freesession, hifn_process, sc);
376 		/*FALLTHROUGH*/
377 	case HIFN_PUSTAT_ENA_1:
378 		crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0,
379 		    hifn_newsession, hifn_freesession, hifn_process, sc);
380 		crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0,
381 		    hifn_newsession, hifn_freesession, hifn_process, sc);
382 		crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0,
383 		    hifn_newsession, hifn_freesession, hifn_process, sc);
384 		crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0,
385 		    hifn_newsession, hifn_freesession, hifn_process, sc);
386 		crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0,
387 		    hifn_newsession, hifn_freesession, hifn_process, sc);
388 		break;
389 	}
390 
391 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 0,
392 	    sc->sc_dmamap->dm_mapsize,
393 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
394 
395 	if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
396 		hifn_init_pubrng(sc);
397 
398 #ifdef	__OpenBSD__
399 	timeout_set(&sc->sc_tickto, hifn_tick, sc);
400 	timeout_add(&sc->sc_tickto, hz);
401 #else
402 	callout_init(&sc->sc_tickto);
403 	callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
404 #endif
405 	return;
406 
407 fail_intr:
408 	pci_intr_disestablish(pc, sc->sc_ih);
409 fail_mem:
410 	bus_dmamap_unload(sc->sc_dmat, dmamap);
411 	bus_dmamap_destroy(sc->sc_dmat, dmamap);
412 	bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
413 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
414 
415 	/* Turn off DMA polling */
416 	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
417 	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
418 
419 fail_io1:
420 	bus_space_unmap(sc->sc_st1, sc->sc_sh1, iosize1);
421 fail_io0:
422 	bus_space_unmap(sc->sc_st0, sc->sc_sh0, iosize0);
423 }
424 
425 int
426 hifn_init_pubrng(sc)
427 	struct hifn_softc *sc;
428 {
429 	u_int32_t r;
430 	int i;
431 
432 	if ((sc->sc_flags & HIFN_IS_7811) == 0) {
433 		/* Reset 7951 public key/rng engine */
434 		WRITE_REG_1(sc, HIFN_1_PUB_RESET,
435 		    READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
436 
437 		for (i = 0; i < 100; i++) {
438 			DELAY(1000);
439 			if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
440 			    HIFN_PUBRST_RESET) == 0)
441 				break;
442 		}
443 
444 		if (i == 100) {
445 			printf("%s: public key init failed\n",
446 			    sc->sc_dv.dv_xname);
447 			return (1);
448 		}
449 	}
450 
451 	/* Enable the rng, if available */
452 	if (sc->sc_flags & HIFN_HAS_RNG) {
453 		if (sc->sc_flags & HIFN_IS_7811) {
454 			r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
455 			if (r & HIFN_7811_RNGENA_ENA) {
456 				r &= ~HIFN_7811_RNGENA_ENA;
457 				WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
458 			}
459 			WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
460 			    HIFN_7811_RNGCFG_DEFL);
461 			r |= HIFN_7811_RNGENA_ENA;
462 			WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
463 		} else
464 			WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
465 			    READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
466 			    HIFN_RNGCFG_ENA);
467 
468 		sc->sc_rngfirst = 1;
469 		if (hz >= 100)
470 			sc->sc_rnghz = hz / 100;
471 		else
472 			sc->sc_rnghz = 1;
473 #ifndef	HIFN_NO_RNG
474 #ifdef	__OpenBSD__
475 		timeout_set(&sc->sc_rngto, hifn_rng, sc);
476 		timeout_add(&sc->sc_rngto, sc->sc_rnghz);
477 #else	/* !__OpenBSD__ */
478 		callout_init(&sc->sc_rngto);
479 		callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
480 #endif	/* !__OpenBSD__ */
481 #endif	/* HIFN_NO_RNG */
482 	}
483 
484 	/* Enable public key engine, if available */
485 	if (sc->sc_flags & HIFN_HAS_PUBLIC) {
486 		WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
487 		sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
488 		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
489 	}
490 
491 	return (0);
492 }
493 
494 #ifndef HIFN_NO_RNG
495 static void
496 hifn_rng(vsc)
497 	void *vsc;
498 {
499 #ifndef	__NetBSD__
500 	struct hifn_softc *sc = vsc;
501 	u_int32_t num1, sts, num2;
502 	int i;
503 
504 	if (sc->sc_flags & HIFN_IS_7811) {
505 		for (i = 0; i < 5; i++) {
506 			sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
507 			if (sts & HIFN_7811_RNGSTS_UFL) {
508 				printf("%s: RNG underflow: disabling\n",
509 				    sc->sc_dv.dv_xname);
510 				return;
511 			}
512 			if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
513 				break;
514 
515 			/*
516 			 * There are at least two words in the RNG FIFO
517 			 * at this point.
518 			 */
519 			num1 = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
520 			num2 = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
521 			if (sc->sc_rngfirst)
522 				sc->sc_rngfirst = 0;
523 			else {
524 				add_true_randomness(num1);
525 				add_true_randomness(num2);
526 			}
527 		}
528 	} else {
529 		num1 = READ_REG_1(sc, HIFN_1_RNG_DATA);
530 
531 		if (sc->sc_rngfirst)
532 			sc->sc_rngfirst = 0;
533 		else
534 			add_true_randomness(num1);
535 	}
536 
537 #ifdef	__OpenBSD__
538 	timeout_add(&sc->sc_rngto, sc->sc_rnghz);
539 #else
540 	callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
541 #endif
542 #endif	/*!__NetBSD__*/
543 }
544 #endif
545 
546 void
547 hifn_puc_wait(sc)
548 	struct hifn_softc *sc;
549 {
550 	int i;
551 
552 	for (i = 5000; i > 0; i--) {
553 		DELAY(1);
554 		if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET))
555 			break;
556 	}
557 	if (!i)
558 		printf("%s: proc unit did not reset\n", sc->sc_dv.dv_xname);
559 }
560 
561 /*
562  * Reset the processing unit.
563  */
564 void
565 hifn_reset_puc(sc)
566 	struct hifn_softc *sc;
567 {
568 	/* Reset processing unit */
569 	WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
570 	hifn_puc_wait(sc);
571 }
572 
573 void
574 hifn_set_retry(sc)
575 	struct hifn_softc *sc;
576 {
577 	u_int32_t r;
578 
579 	r = pci_conf_read(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT);
580 	r &= 0xffff0000;
581 	pci_conf_write(sc->sc_pci_pc, sc->sc_pci_tag, HIFN_TRDY_TIMEOUT, r);
582 }
583 
584 /*
585  * Resets the board.  Values in the regesters are left as is
586  * from the reset (i.e. initial values are assigned elsewhere).
587  */
588 void
589 hifn_reset_board(struct hifn_softc *sc, int full)
590 {
591 	u_int32_t reg;
592 
593 	/*
594 	 * Set polling in the DMA configuration register to zero.  0x7 avoids
595 	 * resetting the board and zeros out the other fields.
596 	 */
597 	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
598 	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
599 
600 	/*
601 	 * Now that polling has been disabled, we have to wait 1 ms
602 	 * before resetting the board.
603 	 */
604 	DELAY(1000);
605 
606 	/* Reset the DMA unit */
607 	if (full) {
608 		WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
609 		DELAY(1000);
610 	} else {
611 		WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
612 		    HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
613 		hifn_reset_puc(sc);
614 	}
615 
616 	bzero(sc->sc_dma, sizeof(*sc->sc_dma));
617 
618 	/* Bring dma unit out of reset */
619 	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
620 	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
621 
622 	hifn_puc_wait(sc);
623 
624 	hifn_set_retry(sc);
625 
626 	if (sc->sc_flags & HIFN_IS_7811) {
627 		for (reg = 0; reg < 1000; reg++) {
628 			if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
629 			    HIFN_MIPSRST_CRAMINIT)
630 				break;
631 			DELAY(1000);
632 		}
633 		if (reg == 1000)
634 			printf(": cram init timeout\n");
635 	}
636 }
637 
638 u_int32_t
639 hifn_next_signature(a, cnt)
640 	u_int32_t a;
641 	u_int cnt;
642 {
643 	int i;
644 	u_int32_t v;
645 
646 	for (i = 0; i < cnt; i++) {
647 
648 		/* get the parity */
649 		v = a & 0x80080125;
650 		v ^= v >> 16;
651 		v ^= v >> 8;
652 		v ^= v >> 4;
653 		v ^= v >> 2;
654 		v ^= v >> 1;
655 
656 		a = (v & 1) ^ (a << 1);
657 	}
658 
659 	return a;
660 }
661 
662 struct pci2id {
663 	u_short		pci_vendor;
664 	u_short		pci_prod;
665 	char		card_id[13];
666 } pci2id[] = {
667 	{
668 		PCI_VENDOR_HIFN,
669 		PCI_PRODUCT_HIFN_7951,
670 		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
671 		  0x00, 0x00, 0x00, 0x00, 0x00 }
672 	}, {
673 		PCI_VENDOR_NETSEC,
674 		PCI_PRODUCT_NETSEC_7751,
675 		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
676 		  0x00, 0x00, 0x00, 0x00, 0x00 }
677 	}, {
678 		PCI_VENDOR_INVERTEX,
679 		PCI_PRODUCT_INVERTEX_AEON,
680 		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
681 		  0x00, 0x00, 0x00, 0x00, 0x00 }
682 	}, {
683 		PCI_VENDOR_HIFN,
684 		PCI_PRODUCT_HIFN_7811,
685 		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
686 		  0x00, 0x00, 0x00, 0x00, 0x00 }
687 	}, {
688 		/*
689 		 * Other vendors share this PCI ID as well, such as
690 		 * http://www.powercrypt.com, and obviously they also
691 		 * use the same key.
692 		 */
693 		PCI_VENDOR_HIFN,
694 		PCI_PRODUCT_HIFN_7751,
695 		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
696 		  0x00, 0x00, 0x00, 0x00, 0x00 }
697 	},
698 };
699 
700 /*
701  * Checks to see if crypto is already enabled.  If crypto isn't enable,
702  * "hifn_enable_crypto" is called to enable it.  The check is important,
703  * as enabling crypto twice will lock the board.
704  */
705 int
706 hifn_enable_crypto(sc, pciid)
707 	struct hifn_softc *sc;
708 	pcireg_t pciid;
709 {
710 	u_int32_t dmacfg, ramcfg, encl, addr, i;
711 	char *offtbl = NULL;
712 
713 	for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
714 		if (pci2id[i].pci_vendor == PCI_VENDOR(pciid) &&
715 		    pci2id[i].pci_prod == PCI_PRODUCT(pciid)) {
716 			offtbl = pci2id[i].card_id;
717 			break;
718 		}
719 	}
720 
721 	if (offtbl == NULL) {
722 #ifdef HIFN_DEBUG
723 		aprint_debug("%s: Unknown card!\n", sc->sc_dv.dv_xname);
724 #endif
725 		return (1);
726 	}
727 
728 	ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
729 	dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
730 
731 	/*
732 	 * The RAM config register's encrypt level bit needs to be set before
733 	 * every read performed on the encryption level register.
734 	 */
735 	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
736 
737 	encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
738 
739 	/*
740 	 * Make sure we don't re-unlock.  Two unlocks kills chip until the
741 	 * next reboot.
742 	 */
743 	if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
744 #ifdef HIFN_DEBUG
745 		aprint_debug("%s: Strong Crypto already enabled!\n",
746 		    sc->sc_dv.dv_xname);
747 #endif
748 		goto report;
749 	}
750 
751 	if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
752 #ifdef HIFN_DEBUG
753 		aprint_debug("%s: Unknown encryption level\n",
754 		    sc->sc_dv.dv_xname);
755 #endif
756 		return 1;
757 	}
758 
759 	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
760 	    HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
761 	DELAY(1000);
762 	addr = READ_REG_1(sc, HIFN_1_UNLOCK_SECRET1);
763 	DELAY(1000);
764 	WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, 0);
765 	DELAY(1000);
766 
767 	for (i = 0; i <= 12; i++) {
768 		addr = hifn_next_signature(addr, offtbl[i] + 0x101);
769 		WRITE_REG_1(sc, HIFN_1_UNLOCK_SECRET2, addr);
770 
771 		DELAY(1000);
772 	}
773 
774 	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
775 	encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
776 
777 #ifdef HIFN_DEBUG
778 	if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
779 		aprint_debug("Encryption engine is permanently locked until next system reset.");
780 	else
781 		aprint_debug("Encryption engine enabled successfully!");
782 #endif
783 
784 report:
785 	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
786 	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
787 
788 	switch (encl) {
789 	case HIFN_PUSTAT_ENA_0:
790 		offtbl = "LZS-only (no encr/auth)";
791 		break;
792 	case HIFN_PUSTAT_ENA_1:
793 		offtbl = "DES";
794 		break;
795 	case HIFN_PUSTAT_ENA_2:
796 		offtbl = "3DES";
797 		break;
798 	default:
799 		offtbl = "disabled";
800 		break;
801 	}
802 	aprint_normal(": %s", offtbl);
803 
804 	return 0;
805 }
806 
807 /*
808  * Give initial values to the registers listed in the "Register Space"
809  * section of the HIFN Software Development reference manual.
810  */
811 void
812 hifn_init_pci_registers(sc)
813 	struct hifn_softc *sc;
814 {
815 	/* write fixed values needed by the Initialization registers */
816 	WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
817 	WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
818 	WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
819 
820 	/* write all 4 ring address registers */
821 	WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
822 	    offsetof(struct hifn_dma, cmdr[0]));
823 	WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
824 	    offsetof(struct hifn_dma, srcr[0]));
825 	WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
826 	    offsetof(struct hifn_dma, dstr[0]));
827 	WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dmamap->dm_segs[0].ds_addr +
828 	    offsetof(struct hifn_dma, resr[0]));
829 
830 	DELAY(2000);
831 
832 	/* write status register */
833 	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
834 	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
835 	    HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
836 	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
837 	    HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
838 	    HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
839 	    HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
840 	    HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
841 	    HIFN_DMACSR_S_WAIT |
842 	    HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
843 	    HIFN_DMACSR_C_WAIT |
844 	    HIFN_DMACSR_ENGINE |
845 	    ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
846 		HIFN_DMACSR_PUBDONE : 0) |
847 	    ((sc->sc_flags & HIFN_IS_7811) ?
848 		HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
849 
850 	sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
851 	sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
852 	    HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
853 	    HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
854 	    HIFN_DMAIER_ENGINE |
855 	    ((sc->sc_flags & HIFN_IS_7811) ?
856 		HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
857 	sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
858 	WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
859 	CLR_LED(sc, HIFN_MIPSRST_LED0 | HIFN_MIPSRST_LED1 | HIFN_MIPSRST_LED2);
860 
861 	WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
862 	    HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
863 	    HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
864 	    (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
865 
866 	WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
867 	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
868 	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
869 	    ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
870 	    ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
871 }
872 
873 /*
874  * The maximum number of sessions supported by the card
875  * is dependent on the amount of context ram, which
876  * encryption algorithms are enabled, and how compression
877  * is configured.  This should be configured before this
878  * routine is called.
879  */
880 void
881 hifn_sessions(sc)
882 	struct hifn_softc *sc;
883 {
884 	u_int32_t pucnfg;
885 	int ctxsize;
886 
887 	pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
888 
889 	if (pucnfg & HIFN_PUCNFG_COMPSING) {
890 		if (pucnfg & HIFN_PUCNFG_ENCCNFG)
891 			ctxsize = 128;
892 		else
893 			ctxsize = 512;
894 		sc->sc_maxses = 1 +
895 		    ((sc->sc_ramsize - 32768) / ctxsize);
896 	}
897 	else
898 		sc->sc_maxses = sc->sc_ramsize / 16384;
899 
900 	if (sc->sc_maxses > 2048)
901 		sc->sc_maxses = 2048;
902 }
903 
904 /*
905  * Determine ram type (sram or dram).  Board should be just out of a reset
906  * state when this is called.
907  */
908 int
909 hifn_ramtype(sc)
910 	struct hifn_softc *sc;
911 {
912 	u_int8_t data[8], dataexpect[8];
913 	int i;
914 
915 	for (i = 0; i < sizeof(data); i++)
916 		data[i] = dataexpect[i] = 0x55;
917 	if (hifn_writeramaddr(sc, 0, data))
918 		return (-1);
919 	if (hifn_readramaddr(sc, 0, data))
920 		return (-1);
921 	if (bcmp(data, dataexpect, sizeof(data)) != 0) {
922 		sc->sc_drammodel = 1;
923 		return (0);
924 	}
925 
926 	for (i = 0; i < sizeof(data); i++)
927 		data[i] = dataexpect[i] = 0xaa;
928 	if (hifn_writeramaddr(sc, 0, data))
929 		return (-1);
930 	if (hifn_readramaddr(sc, 0, data))
931 		return (-1);
932 	if (bcmp(data, dataexpect, sizeof(data)) != 0) {
933 		sc->sc_drammodel = 1;
934 		return (0);
935 	}
936 
937 	return (0);
938 }
939 
940 #define	HIFN_SRAM_MAX		(32 << 20)
941 #define	HIFN_SRAM_STEP_SIZE	16384
942 #define	HIFN_SRAM_GRANULARITY	(HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
943 
944 int
945 hifn_sramsize(sc)
946 	struct hifn_softc *sc;
947 {
948 	u_int32_t a;
949 	u_int8_t data[8];
950 	u_int8_t dataexpect[sizeof(data)];
951 	int32_t i;
952 
953 	for (i = 0; i < sizeof(data); i++)
954 		data[i] = dataexpect[i] = i ^ 0x5a;
955 
956 	for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
957 		a = i * HIFN_SRAM_STEP_SIZE;
958 		bcopy(&i, data, sizeof(i));
959 		hifn_writeramaddr(sc, a, data);
960 	}
961 
962 	for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
963 		a = i * HIFN_SRAM_STEP_SIZE;
964 		bcopy(&i, dataexpect, sizeof(i));
965 		if (hifn_readramaddr(sc, a, data) < 0)
966 			return (0);
967 		if (bcmp(data, dataexpect, sizeof(data)) != 0)
968 			return (0);
969 		sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
970 	}
971 
972 	return (0);
973 }
974 
975 /*
976  * XXX For dram boards, one should really try all of the
977  * HIFN_PUCNFG_DSZ_*'s.  This just assumes that PUCNFG
978  * is already set up correctly.
979  */
980 int
981 hifn_dramsize(sc)
982 	struct hifn_softc *sc;
983 {
984 	u_int32_t cnfg;
985 
986 	cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
987 	    HIFN_PUCNFG_DRAMMASK;
988 	sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
989 	return (0);
990 }
991 
992 void
993 hifn_alloc_slot(sc, cmdp, srcp, dstp, resp)
994 	struct hifn_softc *sc;
995 	int *cmdp, *srcp, *dstp, *resp;
996 {
997 	struct hifn_dma *dma = sc->sc_dma;
998 
999 	if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1000 		dma->cmdi = 0;
1001 		dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1002 		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1003 		HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1004 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1005 	}
1006 	*cmdp = dma->cmdi++;
1007 	dma->cmdk = dma->cmdi;
1008 
1009 	if (dma->srci == HIFN_D_SRC_RSIZE) {
1010 		dma->srci = 0;
1011 		dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1012 		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1013 		HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1014 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1015 	}
1016 	*srcp = dma->srci++;
1017 	dma->srck = dma->srci;
1018 
1019 	if (dma->dsti == HIFN_D_DST_RSIZE) {
1020 		dma->dsti = 0;
1021 		dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1022 		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1023 		HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1024 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1025 	}
1026 	*dstp = dma->dsti++;
1027 	dma->dstk = dma->dsti;
1028 
1029 	if (dma->resi == HIFN_D_RES_RSIZE) {
1030 		dma->resi = 0;
1031 		dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1032 		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1033 		HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1034 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1035 	}
1036 	*resp = dma->resi++;
1037 	dma->resk = dma->resi;
1038 }
1039 
1040 int
1041 hifn_writeramaddr(sc, addr, data)
1042 	struct hifn_softc *sc;
1043 	int addr;
1044 	u_int8_t *data;
1045 {
1046 	struct hifn_dma *dma = sc->sc_dma;
1047 	struct hifn_base_command wc;
1048 	const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1049 	int r, cmdi, resi, srci, dsti;
1050 
1051 	wc.masks = htole16(3 << 13);
1052 	wc.session_num = htole16(addr >> 14);
1053 	wc.total_source_count = htole16(8);
1054 	wc.total_dest_count = htole16(addr & 0x3fff);
1055 
1056 	hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1057 
1058 	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1059 	    HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1060 	    HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1061 
1062 	/* build write command */
1063 	bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1064 	*(struct hifn_base_command *)dma->command_bufs[cmdi] = wc;
1065 	bcopy(data, &dma->test_src, sizeof(dma->test_src));
1066 
1067 	dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1068 	    + offsetof(struct hifn_dma, test_src));
1069 	dma->dstr[dsti].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr
1070 	    + offsetof(struct hifn_dma, test_dst));
1071 
1072 	dma->cmdr[cmdi].l = htole32(16 | masks);
1073 	dma->srcr[srci].l = htole32(8 | masks);
1074 	dma->dstr[dsti].l = htole32(4 | masks);
1075 	dma->resr[resi].l = htole32(4 | masks);
1076 
1077 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1078 	    0, sc->sc_dmamap->dm_mapsize,
1079 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1080 
1081 	for (r = 10000; r >= 0; r--) {
1082 		DELAY(10);
1083 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1084 		    0, sc->sc_dmamap->dm_mapsize,
1085 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1086 		if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1087 			break;
1088 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1089 		    0, sc->sc_dmamap->dm_mapsize,
1090 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1091 	}
1092 	if (r == 0) {
1093 		printf("%s: writeramaddr -- "
1094 		    "result[%d](addr %d) still valid\n",
1095 		    sc->sc_dv.dv_xname, resi, addr);
1096 		r = -1;
1097 		return (-1);
1098 	} else
1099 		r = 0;
1100 
1101 	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1102 	    HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1103 	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1104 
1105 	return (r);
1106 }
1107 
1108 int
1109 hifn_readramaddr(sc, addr, data)
1110 	struct hifn_softc *sc;
1111 	int addr;
1112 	u_int8_t *data;
1113 {
1114 	struct hifn_dma *dma = sc->sc_dma;
1115 	struct hifn_base_command rc;
1116 	const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1117 	int r, cmdi, srci, dsti, resi;
1118 
1119 	rc.masks = htole16(2 << 13);
1120 	rc.session_num = htole16(addr >> 14);
1121 	rc.total_source_count = htole16(addr & 0x3fff);
1122 	rc.total_dest_count = htole16(8);
1123 
1124 	hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1125 
1126 	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1127 	    HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1128 	    HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1129 
1130 	bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1131 	*(struct hifn_base_command *)dma->command_bufs[cmdi] = rc;
1132 
1133 	dma->srcr[srci].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1134 	    offsetof(struct hifn_dma, test_src));
1135 	dma->test_src = 0;
1136 	dma->dstr[dsti].p =  htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1137 	    offsetof(struct hifn_dma, test_dst));
1138 	dma->test_dst = 0;
1139 	dma->cmdr[cmdi].l = htole32(8 | masks);
1140 	dma->srcr[srci].l = htole32(8 | masks);
1141 	dma->dstr[dsti].l = htole32(8 | masks);
1142 	dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1143 
1144 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1145 	    0, sc->sc_dmamap->dm_mapsize,
1146 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1147 
1148 	for (r = 10000; r >= 0; r--) {
1149 		DELAY(10);
1150 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1151 		    0, sc->sc_dmamap->dm_mapsize,
1152 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1153 		if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1154 			break;
1155 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1156 		    0, sc->sc_dmamap->dm_mapsize,
1157 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1158 	}
1159 	if (r == 0) {
1160 		printf("%s: readramaddr -- "
1161 		    "result[%d](addr %d) still valid\n",
1162 		    sc->sc_dv.dv_xname, resi, addr);
1163 		r = -1;
1164 	} else {
1165 		r = 0;
1166 		bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
1167 	}
1168 
1169 	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1170 	    HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1171 	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1172 
1173 	return (r);
1174 }
1175 
1176 /*
1177  * Initialize the descriptor rings.
1178  */
1179 void
1180 hifn_init_dma(sc)
1181 	struct hifn_softc *sc;
1182 {
1183 	struct hifn_dma *dma = sc->sc_dma;
1184 	int i;
1185 
1186 	hifn_set_retry(sc);
1187 
1188 	/* initialize static pointer values */
1189 	for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1190 		dma->cmdr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1191 		    offsetof(struct hifn_dma, command_bufs[i][0]));
1192 	for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1193 		dma->resr[i].p = htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1194 		    offsetof(struct hifn_dma, result_bufs[i][0]));
1195 
1196 	dma->cmdr[HIFN_D_CMD_RSIZE].p =
1197 	    htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1198 		offsetof(struct hifn_dma, cmdr[0]));
1199 	dma->srcr[HIFN_D_SRC_RSIZE].p =
1200 	    htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1201 		offsetof(struct hifn_dma, srcr[0]));
1202 	dma->dstr[HIFN_D_DST_RSIZE].p =
1203 	    htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1204 		offsetof(struct hifn_dma, dstr[0]));
1205 	dma->resr[HIFN_D_RES_RSIZE].p =
1206 	    htole32(sc->sc_dmamap->dm_segs[0].ds_addr +
1207 		offsetof(struct hifn_dma, resr[0]));
1208 
1209 	dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1210 	dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1211 	dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1212 }
1213 
1214 /*
1215  * Writes out the raw command buffer space.  Returns the
1216  * command buffer size.
1217  */
1218 u_int
1219 hifn_write_command(cmd, buf)
1220 	struct hifn_command *cmd;
1221 	u_int8_t *buf;
1222 {
1223 	u_int8_t *buf_pos;
1224 	struct hifn_base_command *base_cmd;
1225 	struct hifn_mac_command *mac_cmd;
1226 	struct hifn_crypt_command *cry_cmd;
1227 	struct hifn_comp_command *comp_cmd;
1228 	int using_mac, using_crypt, using_comp, len;
1229 	u_int32_t dlen, slen;
1230 
1231 	buf_pos = buf;
1232 	using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1233 	using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1234 	using_comp = cmd->base_masks & HIFN_BASE_CMD_COMP;
1235 
1236 	base_cmd = (struct hifn_base_command *)buf_pos;
1237 	base_cmd->masks = htole16(cmd->base_masks);
1238 	slen = cmd->src_map->dm_mapsize;
1239 	if (cmd->sloplen)
1240 		dlen = cmd->dst_map->dm_mapsize - cmd->sloplen +
1241 		    sizeof(u_int32_t);
1242 	else
1243 		dlen = cmd->dst_map->dm_mapsize;
1244 	base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1245 	base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1246 	dlen >>= 16;
1247 	slen >>= 16;
1248 	base_cmd->session_num = htole16(cmd->session_num |
1249 	    ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1250 	    ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1251 	buf_pos += sizeof(struct hifn_base_command);
1252 
1253 	if (using_comp) {
1254 		comp_cmd = (struct hifn_comp_command *)buf_pos;
1255 		dlen = cmd->compcrd->crd_len;
1256 		comp_cmd->source_count = htole16(dlen & 0xffff);
1257 		dlen >>= 16;
1258 		comp_cmd->masks = htole16(cmd->comp_masks |
1259 		    ((dlen << HIFN_COMP_CMD_SRCLEN_S) & HIFN_COMP_CMD_SRCLEN_M));
1260 		comp_cmd->header_skip = htole16(cmd->compcrd->crd_skip);
1261 		comp_cmd->reserved = 0;
1262 		buf_pos += sizeof(struct hifn_comp_command);
1263 	}
1264 
1265 	if (using_mac) {
1266 		mac_cmd = (struct hifn_mac_command *)buf_pos;
1267 		dlen = cmd->maccrd->crd_len;
1268 		mac_cmd->source_count = htole16(dlen & 0xffff);
1269 		dlen >>= 16;
1270 		mac_cmd->masks = htole16(cmd->mac_masks |
1271 		    ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1272 		mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1273 		mac_cmd->reserved = 0;
1274 		buf_pos += sizeof(struct hifn_mac_command);
1275 	}
1276 
1277 	if (using_crypt) {
1278 		cry_cmd = (struct hifn_crypt_command *)buf_pos;
1279 		dlen = cmd->enccrd->crd_len;
1280 		cry_cmd->source_count = htole16(dlen & 0xffff);
1281 		dlen >>= 16;
1282 		cry_cmd->masks = htole16(cmd->cry_masks |
1283 		    ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1284 		cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1285 		cry_cmd->reserved = 0;
1286 		buf_pos += sizeof(struct hifn_crypt_command);
1287 	}
1288 
1289 	if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1290 		bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
1291 		buf_pos += HIFN_MAC_KEY_LENGTH;
1292 	}
1293 
1294 	if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1295 		switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1296 		case HIFN_CRYPT_CMD_ALG_3DES:
1297 			bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
1298 			buf_pos += HIFN_3DES_KEY_LENGTH;
1299 			break;
1300 		case HIFN_CRYPT_CMD_ALG_DES:
1301 			bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
1302 			buf_pos += cmd->cklen;
1303 			break;
1304 		case HIFN_CRYPT_CMD_ALG_RC4:
1305 			len = 256;
1306 			do {
1307 				int clen;
1308 
1309 				clen = MIN(cmd->cklen, len);
1310 				bcopy(cmd->ck, buf_pos, clen);
1311 				len -= clen;
1312 				buf_pos += clen;
1313 			} while (len > 0);
1314 			bzero(buf_pos, 4);
1315 			buf_pos += 4;
1316 			break;
1317 		}
1318 	}
1319 
1320 	if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1321 		bcopy(cmd->iv, buf_pos, HIFN_IV_LENGTH);
1322 		buf_pos += HIFN_IV_LENGTH;
1323 	}
1324 
1325 	if ((cmd->base_masks & (HIFN_BASE_CMD_MAC | HIFN_BASE_CMD_CRYPT |
1326 	    HIFN_BASE_CMD_COMP)) == 0) {
1327 		bzero(buf_pos, 8);
1328 		buf_pos += 8;
1329 	}
1330 
1331 	return (buf_pos - buf);
1332 }
1333 
1334 int
1335 hifn_dmamap_aligned(map)
1336 	bus_dmamap_t map;
1337 {
1338 	int i;
1339 
1340 	for (i = 0; i < map->dm_nsegs; i++) {
1341 		if (map->dm_segs[i].ds_addr & 3)
1342 			return (0);
1343 		if ((i != (map->dm_nsegs - 1)) &&
1344 		    (map->dm_segs[i].ds_len & 3))
1345 			return (0);
1346 	}
1347 	return (1);
1348 }
1349 
1350 int
1351 hifn_dmamap_load_dst(sc, cmd)
1352 	struct hifn_softc *sc;
1353 	struct hifn_command *cmd;
1354 {
1355 	struct hifn_dma *dma = sc->sc_dma;
1356 	bus_dmamap_t map = cmd->dst_map;
1357 	u_int32_t p, l;
1358 	int idx, used = 0, i;
1359 
1360 	idx = dma->dsti;
1361 	for (i = 0; i < map->dm_nsegs - 1; i++) {
1362 		dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1363 		dma->dstr[idx].l = htole32(HIFN_D_VALID |
1364 		    HIFN_D_MASKDONEIRQ | map->dm_segs[i].ds_len);
1365 		HIFN_DSTR_SYNC(sc, idx,
1366 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1367 		used++;
1368 
1369 		if (++idx == HIFN_D_DST_RSIZE) {
1370 			dma->dstr[idx].l = htole32(HIFN_D_VALID |
1371 			    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1372 			HIFN_DSTR_SYNC(sc, idx,
1373 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1374 			idx = 0;
1375 		}
1376 	}
1377 
1378 	if (cmd->sloplen == 0) {
1379 		p = map->dm_segs[i].ds_addr;
1380 		l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1381 		    map->dm_segs[i].ds_len;
1382 	} else {
1383 		p = sc->sc_dmamap->dm_segs[0].ds_addr +
1384 		    offsetof(struct hifn_dma, slop[cmd->slopidx]);
1385 		l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1386 		    sizeof(u_int32_t);
1387 
1388 		if ((map->dm_segs[i].ds_len - cmd->sloplen) != 0) {
1389 			dma->dstr[idx].p = htole32(map->dm_segs[i].ds_addr);
1390 			dma->dstr[idx].l = htole32(HIFN_D_VALID |
1391 			    HIFN_D_MASKDONEIRQ |
1392 			    (map->dm_segs[i].ds_len - cmd->sloplen));
1393 			HIFN_DSTR_SYNC(sc, idx,
1394 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1395 			used++;
1396 
1397 			if (++idx == HIFN_D_DST_RSIZE) {
1398 				dma->dstr[idx].l = htole32(HIFN_D_VALID |
1399 				    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1400 				HIFN_DSTR_SYNC(sc, idx,
1401 				    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1402 				idx = 0;
1403 			}
1404 		}
1405 	}
1406 	dma->dstr[idx].p = htole32(p);
1407 	dma->dstr[idx].l = htole32(l);
1408 	HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1409 	used++;
1410 
1411 	if (++idx == HIFN_D_DST_RSIZE) {
1412 		dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1413 		    HIFN_D_MASKDONEIRQ);
1414 		HIFN_DSTR_SYNC(sc, idx,
1415 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1416 		idx = 0;
1417 	}
1418 
1419 	dma->dsti = idx;
1420 	dma->dstu += used;
1421 	return (idx);
1422 }
1423 
1424 int
1425 hifn_dmamap_load_src(sc, cmd)
1426 	struct hifn_softc *sc;
1427 	struct hifn_command *cmd;
1428 {
1429 	struct hifn_dma *dma = sc->sc_dma;
1430 	bus_dmamap_t map = cmd->src_map;
1431 	int idx, i;
1432 	u_int32_t last = 0;
1433 
1434 	idx = dma->srci;
1435 	for (i = 0; i < map->dm_nsegs; i++) {
1436 		if (i == map->dm_nsegs - 1)
1437 			last = HIFN_D_LAST;
1438 
1439 		dma->srcr[idx].p = htole32(map->dm_segs[i].ds_addr);
1440 		dma->srcr[idx].l = htole32(map->dm_segs[i].ds_len |
1441 		    HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1442 		HIFN_SRCR_SYNC(sc, idx,
1443 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1444 
1445 		if (++idx == HIFN_D_SRC_RSIZE) {
1446 			dma->srcr[idx].l = htole32(HIFN_D_VALID |
1447 			    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1448 			HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1449 			    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1450 			idx = 0;
1451 		}
1452 	}
1453 	dma->srci = idx;
1454 	dma->srcu += map->dm_nsegs;
1455 	return (idx);
1456 }
1457 
1458 int
1459 hifn_crypto(
1460 	struct hifn_softc *sc,
1461 	struct hifn_command *cmd,
1462 	struct cryptop *crp,
1463 	int hint)
1464 
1465 {
1466 	struct	hifn_dma *dma = sc->sc_dma;
1467 	u_int32_t cmdlen;
1468 	int	cmdi, resi, s, err = 0;
1469 
1470 	if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
1471 	    HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map))
1472 		return (ENOMEM);
1473 
1474 	if (crp->crp_flags & CRYPTO_F_IMBUF) {
1475 		if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1476 		    cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
1477 			err = ENOMEM;
1478 			goto err_srcmap1;
1479 		}
1480 	} else if (crp->crp_flags & CRYPTO_F_IOV) {
1481 		if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1482 		    cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
1483 			err = ENOMEM;
1484 			goto err_srcmap1;
1485 		}
1486 	} else {
1487 		err = EINVAL;
1488 		goto err_srcmap1;
1489 	}
1490 
1491 	if (hifn_dmamap_aligned(cmd->src_map)) {
1492 		cmd->sloplen = cmd->src_map->dm_mapsize & 3;
1493 		if (crp->crp_flags & CRYPTO_F_IOV)
1494 			cmd->dstu.dst_io = cmd->srcu.src_io;
1495 		else if (crp->crp_flags & CRYPTO_F_IMBUF)
1496 			cmd->dstu.dst_m = cmd->srcu.src_m;
1497 		cmd->dst_map = cmd->src_map;
1498 	} else {
1499 		if (crp->crp_flags & CRYPTO_F_IOV) {
1500 			err = EINVAL;
1501 			goto err_srcmap;
1502 		} else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1503 			int totlen, len;
1504 			struct mbuf *m, *m0, *mlast;
1505 
1506 			totlen = cmd->src_map->dm_mapsize;
1507 			if (cmd->srcu.src_m->m_flags & M_PKTHDR) {
1508 				len = MHLEN;
1509 				MGETHDR(m0, M_DONTWAIT, MT_DATA);
1510 			} else {
1511 				len = MLEN;
1512 				MGET(m0, M_DONTWAIT, MT_DATA);
1513 			}
1514 			if (m0 == NULL) {
1515 				err = ENOMEM;
1516 				goto err_srcmap;
1517 			}
1518 			if (len == MHLEN)
1519 				M_DUP_PKTHDR(m0, cmd->srcu.src_m);
1520 			if (totlen >= MINCLSIZE) {
1521 				MCLGET(m0, M_DONTWAIT);
1522 				if (m0->m_flags & M_EXT)
1523 					len = MCLBYTES;
1524 			}
1525 			totlen -= len;
1526 			m0->m_pkthdr.len = m0->m_len = len;
1527 			mlast = m0;
1528 
1529 			while (totlen > 0) {
1530 				MGET(m, M_DONTWAIT, MT_DATA);
1531 				if (m == NULL) {
1532 					err = ENOMEM;
1533 					m_freem(m0);
1534 					goto err_srcmap;
1535 				}
1536 				len = MLEN;
1537 				if (totlen >= MINCLSIZE) {
1538 					MCLGET(m, M_DONTWAIT);
1539 					if (m->m_flags & M_EXT)
1540 						len = MCLBYTES;
1541 				}
1542 
1543 				m->m_len = len;
1544 				if (m0->m_flags & M_PKTHDR)
1545 					m0->m_pkthdr.len += len;
1546 				totlen -= len;
1547 
1548 				mlast->m_next = m;
1549 				mlast = m;
1550 			}
1551 			cmd->dstu.dst_m = m0;
1552 		}
1553 	}
1554 
1555 	if (cmd->dst_map == NULL) {
1556 		if (bus_dmamap_create(sc->sc_dmat,
1557 		    HIFN_MAX_SEGLEN * MAX_SCATTER, MAX_SCATTER,
1558 		    HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1559 			err = ENOMEM;
1560 			goto err_srcmap;
1561 		}
1562 		if (crp->crp_flags & CRYPTO_F_IMBUF) {
1563 			if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1564 			    cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
1565 				err = ENOMEM;
1566 				goto err_dstmap1;
1567 			}
1568 		} else if (crp->crp_flags & CRYPTO_F_IOV) {
1569 			if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
1570 			    cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
1571 				err = ENOMEM;
1572 				goto err_dstmap1;
1573 			}
1574 		}
1575 	}
1576 
1577 #ifdef HIFN_DEBUG
1578 	if (hifn_debug)
1579 		printf("%s: Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1580 		    sc->sc_dv.dv_xname,
1581 		    READ_REG_1(sc, HIFN_1_DMA_CSR),
1582 		    READ_REG_1(sc, HIFN_1_DMA_IER),
1583 		    dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1584 		    cmd->src_map->dm_nsegs, cmd->dst_map->dm_nsegs);
1585 #endif
1586 
1587 	if (cmd->src_map == cmd->dst_map)
1588 		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1589 		    0, cmd->src_map->dm_mapsize,
1590 		    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1591 	else {
1592 		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1593 		    0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1594 		bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1595 		    0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1596 	}
1597 
1598 	s = splnet();
1599 
1600 	/*
1601 	 * need 1 cmd, and 1 res
1602 	 * need N src, and N dst
1603 	 */
1604 	if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1605 	    (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1606 		splx(s);
1607 		err = ENOMEM;
1608 		goto err_dstmap;
1609 	}
1610 	if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
1611 	    (dma->dstu + cmd->dst_map->dm_nsegs + 1) > HIFN_D_DST_RSIZE) {
1612 		splx(s);
1613 		err = ENOMEM;
1614 		goto err_dstmap;
1615 	}
1616 
1617 	if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1618 		dma->cmdi = 0;
1619 		dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1620 		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1621 		HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1622 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1623 	}
1624 	cmdi = dma->cmdi++;
1625 	cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
1626 	HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
1627 
1628 	/* .p for command/result already set */
1629 	dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
1630 	    HIFN_D_MASKDONEIRQ);
1631 	HIFN_CMDR_SYNC(sc, cmdi,
1632 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1633 	dma->cmdu++;
1634 	if (sc->sc_c_busy == 0) {
1635 		WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1636 		sc->sc_c_busy = 1;
1637 		SET_LED(sc, HIFN_MIPSRST_LED0);
1638 	}
1639 
1640 	/*
1641 	 * We don't worry about missing an interrupt (which a "command wait"
1642 	 * interrupt salvages us from), unless there is more than one command
1643 	 * in the queue.
1644 	 */
1645 	if (dma->cmdu > 1) {
1646 		sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
1647 		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1648 	}
1649 
1650 	hifnstats.hst_ipackets++;
1651 	hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
1652 
1653 	hifn_dmamap_load_src(sc, cmd);
1654 	if (sc->sc_s_busy == 0) {
1655 		WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1656 		sc->sc_s_busy = 1;
1657 		SET_LED(sc, HIFN_MIPSRST_LED1);
1658 	}
1659 
1660 	/*
1661 	 * Unlike other descriptors, we don't mask done interrupt from
1662 	 * result descriptor.
1663 	 */
1664 #ifdef HIFN_DEBUG
1665 	if (hifn_debug)
1666 		printf("load res\n");
1667 #endif
1668 	if (dma->resi == HIFN_D_RES_RSIZE) {
1669 		dma->resi = 0;
1670 		dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1671 		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1672 		HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1673 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1674 	}
1675 	resi = dma->resi++;
1676 	dma->hifn_commands[resi] = cmd;
1677 	HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
1678 	dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
1679 	    HIFN_D_VALID | HIFN_D_LAST);
1680 	HIFN_RESR_SYNC(sc, resi,
1681 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1682 	dma->resu++;
1683 	if (sc->sc_r_busy == 0) {
1684 		WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1685 		sc->sc_r_busy = 1;
1686 		SET_LED(sc, HIFN_MIPSRST_LED2);
1687 	}
1688 
1689 	if (cmd->sloplen)
1690 		cmd->slopidx = resi;
1691 
1692 	hifn_dmamap_load_dst(sc, cmd);
1693 
1694 	if (sc->sc_d_busy == 0) {
1695 		WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1696 		sc->sc_d_busy = 1;
1697 	}
1698 
1699 #ifdef HIFN_DEBUG
1700 	if (hifn_debug)
1701 		printf("%s: command: stat %8x ier %8x\n",
1702 		    sc->sc_dv.dv_xname,
1703 		    READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER));
1704 #endif
1705 
1706 	sc->sc_active = 5;
1707 	splx(s);
1708 	return (err);		/* success */
1709 
1710 err_dstmap:
1711 	if (cmd->src_map != cmd->dst_map)
1712 		bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
1713 err_dstmap1:
1714 	if (cmd->src_map != cmd->dst_map)
1715 		bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
1716 err_srcmap:
1717 	if (crp->crp_flags & CRYPTO_F_IMBUF &&
1718 	    cmd->srcu.src_m != cmd->dstu.dst_m)
1719 		m_freem(cmd->dstu.dst_m);
1720 	bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
1721 err_srcmap1:
1722 	bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
1723 	return (err);
1724 }
1725 
1726 void
1727 hifn_tick(vsc)
1728 	void *vsc;
1729 {
1730 	struct hifn_softc *sc = vsc;
1731 	int s;
1732 
1733 	s = splnet();
1734 	if (sc->sc_active == 0) {
1735 		struct hifn_dma *dma = sc->sc_dma;
1736 		u_int32_t r = 0;
1737 
1738 		if (dma->cmdu == 0 && sc->sc_c_busy) {
1739 			sc->sc_c_busy = 0;
1740 			r |= HIFN_DMACSR_C_CTRL_DIS;
1741 			CLR_LED(sc, HIFN_MIPSRST_LED0);
1742 		}
1743 		if (dma->srcu == 0 && sc->sc_s_busy) {
1744 			sc->sc_s_busy = 0;
1745 			r |= HIFN_DMACSR_S_CTRL_DIS;
1746 			CLR_LED(sc, HIFN_MIPSRST_LED1);
1747 		}
1748 		if (dma->dstu == 0 && sc->sc_d_busy) {
1749 			sc->sc_d_busy = 0;
1750 			r |= HIFN_DMACSR_D_CTRL_DIS;
1751 		}
1752 		if (dma->resu == 0 && sc->sc_r_busy) {
1753 			sc->sc_r_busy = 0;
1754 			r |= HIFN_DMACSR_R_CTRL_DIS;
1755 			CLR_LED(sc, HIFN_MIPSRST_LED2);
1756 		}
1757 		if (r)
1758 			WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
1759 	}
1760 	else
1761 		sc->sc_active--;
1762 	splx(s);
1763 #ifdef	__OpenBSD__
1764 	timeout_add(&sc->sc_tickto, hz);
1765 #else
1766 	callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
1767 #endif
1768 }
1769 
1770 int
1771 hifn_intr(void *arg)
1772 {
1773 	struct hifn_softc *sc = arg;
1774 	struct hifn_dma *dma = sc->sc_dma;
1775 	u_int32_t dmacsr, restart;
1776 	int i, u;
1777 
1778 	dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
1779 
1780 #ifdef HIFN_DEBUG
1781 	if (hifn_debug)
1782 		printf("%s: irq: stat %08x ien %08x u %d/%d/%d/%d\n",
1783 		       sc->sc_dv.dv_xname,
1784 		       dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER),
1785 		       dma->cmdu, dma->srcu, dma->dstu, dma->resu);
1786 #endif
1787 
1788 	/* Nothing in the DMA unit interrupted */
1789 	if ((dmacsr & sc->sc_dmaier) == 0)
1790 		return (0);
1791 
1792 	WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
1793 
1794 	if (dmacsr & HIFN_DMACSR_ENGINE)
1795 		WRITE_REG_0(sc, HIFN_0_PUISR, READ_REG_0(sc, HIFN_0_PUISR));
1796 
1797 	if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
1798 	    (dmacsr & HIFN_DMACSR_PUBDONE))
1799 		WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
1800 		    READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
1801 
1802 	restart = dmacsr & (HIFN_DMACSR_R_OVER | HIFN_DMACSR_D_OVER);
1803 	if (restart)
1804 		printf("%s: overrun %x\n", sc->sc_dv.dv_xname, dmacsr);
1805 
1806 	if (sc->sc_flags & HIFN_IS_7811) {
1807 		if (dmacsr & HIFN_DMACSR_ILLR)
1808 			printf("%s: illegal read\n", sc->sc_dv.dv_xname);
1809 		if (dmacsr & HIFN_DMACSR_ILLW)
1810 			printf("%s: illegal write\n", sc->sc_dv.dv_xname);
1811 	}
1812 
1813 	restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
1814 	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
1815 	if (restart) {
1816 		printf("%s: abort, resetting.\n", sc->sc_dv.dv_xname);
1817 		hifnstats.hst_abort++;
1818 		hifn_abort(sc);
1819 		return (1);
1820 	}
1821 
1822 	if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->resu == 0)) {
1823 		/*
1824 		 * If no slots to process and we receive a "waiting on
1825 		 * command" interrupt, we disable the "waiting on command"
1826 		 * (by clearing it).
1827 		 */
1828 		sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1829 		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1830 	}
1831 
1832 	/* clear the rings */
1833 	i = dma->resk;
1834 	while (dma->resu != 0) {
1835 		HIFN_RESR_SYNC(sc, i,
1836 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1837 		if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
1838 			HIFN_RESR_SYNC(sc, i,
1839 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1840 			break;
1841 		}
1842 
1843 		if (i != HIFN_D_RES_RSIZE) {
1844 			struct hifn_command *cmd;
1845 			u_int8_t *macbuf = NULL;
1846 
1847 			HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
1848 			cmd = dma->hifn_commands[i];
1849 			KASSERT(cmd != NULL
1850 				/*("hifn_intr: null command slot %u", i)*/);
1851 			dma->hifn_commands[i] = NULL;
1852 
1853 			if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
1854 				macbuf = dma->result_bufs[i];
1855 				macbuf += 12;
1856 			}
1857 
1858 			hifn_callback(sc, cmd, macbuf);
1859 			hifnstats.hst_opackets++;
1860 		}
1861 
1862 		if (++i == (HIFN_D_RES_RSIZE + 1))
1863 			i = 0;
1864 		else
1865 			dma->resu--;
1866 	}
1867 	dma->resk = i;
1868 
1869 	i = dma->srck; u = dma->srcu;
1870 	while (u != 0) {
1871 		HIFN_SRCR_SYNC(sc, i,
1872 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1873 		if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
1874 			HIFN_SRCR_SYNC(sc, i,
1875 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1876 			break;
1877 		}
1878 		if (++i == (HIFN_D_SRC_RSIZE + 1))
1879 			i = 0;
1880 		else
1881 			u--;
1882 	}
1883 	dma->srck = i; dma->srcu = u;
1884 
1885 	i = dma->cmdk; u = dma->cmdu;
1886 	while (u != 0) {
1887 		HIFN_CMDR_SYNC(sc, i,
1888 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1889 		if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
1890 			HIFN_CMDR_SYNC(sc, i,
1891 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1892 			break;
1893 		}
1894 		if (i != HIFN_D_CMD_RSIZE) {
1895 			u--;
1896 			HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
1897 		}
1898 		if (++i == (HIFN_D_CMD_RSIZE + 1))
1899 			i = 0;
1900 	}
1901 	dma->cmdk = i; dma->cmdu = u;
1902 
1903 	return (1);
1904 }
1905 
1906 /*
1907  * Allocate a new 'session' and return an encoded session id.  'sidp'
1908  * contains our registration id, and should contain an encoded session
1909  * id on successful allocation.
1910  */
1911 int
1912 hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri)
1913 {
1914 	struct cryptoini *c;
1915 	struct hifn_softc *sc = arg;
1916 	int i, mac = 0, cry = 0, comp = 0;
1917 
1918 	KASSERT(sc != NULL /*, ("hifn_newsession: null softc")*/);
1919 	if (sidp == NULL || cri == NULL || sc == NULL)
1920 		return (EINVAL);
1921 
1922 	for (i = 0; i < sc->sc_maxses; i++)
1923 		if (sc->sc_sessions[i].hs_state == HS_STATE_FREE)
1924 			break;
1925 	if (i == sc->sc_maxses)
1926 		return (ENOMEM);
1927 
1928 	for (c = cri; c != NULL; c = c->cri_next) {
1929 		switch (c->cri_alg) {
1930 		case CRYPTO_MD5:
1931 		case CRYPTO_SHA1:
1932 		case CRYPTO_MD5_HMAC:
1933 		case CRYPTO_SHA1_HMAC:
1934 			if (mac)
1935 				return (EINVAL);
1936 			mac = 1;
1937 			break;
1938 		case CRYPTO_DES_CBC:
1939 		case CRYPTO_3DES_CBC:
1940 #ifdef __NetBSD__
1941 			rnd_extract_data(sc->sc_sessions[i].hs_iv,
1942 			    HIFN_IV_LENGTH, RND_EXTRACT_ANY);
1943 #else	/* FreeBSD and OpenBSD have get_random_bytes */
1944 			/* XXX this may read fewer, does it matter? */
1945  			get_random_bytes(sc->sc_sessions[i].hs_iv,
1946  			    HIFN_IV_LENGTH);
1947 #endif
1948 			/*FALLTHROUGH*/
1949 		case CRYPTO_ARC4:
1950 			if (cry)
1951 				return (EINVAL);
1952 			cry = 1;
1953 			break;
1954 #ifdef HAVE_CRYPTO_LSZ
1955 		case CRYPTO_LZS_COMP:
1956 			if (comp)
1957 				return (EINVAL);
1958 			comp = 1;
1959 			break;
1960 #endif
1961 		default:
1962 			return (EINVAL);
1963 		}
1964 	}
1965 	if (mac == 0 && cry == 0 && comp == 0)
1966 		return (EINVAL);
1967 
1968 	/*
1969 	 * XXX only want to support compression without chaining to
1970 	 * MAC/crypt engine right now
1971 	 */
1972 	if ((comp && mac) || (comp && cry))
1973 		return (EINVAL);
1974 
1975 	*sidp = HIFN_SID(sc->sc_dv.dv_unit, i);
1976 	sc->sc_sessions[i].hs_state = HS_STATE_USED;
1977 
1978 	return (0);
1979 }
1980 
1981 /*
1982  * Deallocate a session.
1983  * XXX this routine should run a zero'd mac/encrypt key into context ram.
1984  * XXX to blow away any keys already stored there.
1985  */
1986 int
1987 hifn_freesession(void *arg, u_int64_t tid)
1988 {
1989 	struct hifn_softc *sc = arg;
1990 	int session;
1991 	u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
1992 
1993 	KASSERT(sc != NULL /*, ("hifn_freesession: null softc")*/);
1994 	if (sc == NULL)
1995 		return (EINVAL);
1996 
1997 	session = HIFN_SESSION(sid);
1998 	if (session >= sc->sc_maxses)
1999 		return (EINVAL);
2000 
2001 	bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
2002 	return (0);
2003 }
2004 
2005 int
2006 hifn_process(void *arg, struct cryptop *crp, int hint)
2007 {
2008 	struct hifn_softc *sc = arg;
2009 	struct hifn_command *cmd = NULL;
2010 	int session, err;
2011 	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2012 
2013 	if (crp == NULL || crp->crp_callback == NULL) {
2014 		hifnstats.hst_invalid++;
2015 		return (EINVAL);
2016 	}
2017 	session = HIFN_SESSION(crp->crp_sid);
2018 
2019 	if (sc == NULL || session >= sc->sc_maxses) {
2020 		err = EINVAL;
2021 		goto errout;
2022 	}
2023 
2024 	cmd = (struct hifn_command *)malloc(sizeof(struct hifn_command),
2025 	    M_DEVBUF, M_NOWAIT|M_ZERO);
2026 	if (cmd == NULL) {
2027 		hifnstats.hst_nomem++;
2028 		err = ENOMEM;
2029 		goto errout;
2030 	}
2031 
2032 	if (crp->crp_flags & CRYPTO_F_IMBUF) {
2033 		cmd->srcu.src_m = (struct mbuf *)crp->crp_buf;
2034 		cmd->dstu.dst_m = (struct mbuf *)crp->crp_buf;
2035 	} else if (crp->crp_flags & CRYPTO_F_IOV) {
2036 		cmd->srcu.src_io = (struct uio *)crp->crp_buf;
2037 		cmd->dstu.dst_io = (struct uio *)crp->crp_buf;
2038 	} else {
2039 		err = EINVAL;
2040 		goto errout;	/* XXX we don't handle contiguous buffers! */
2041 	}
2042 
2043 	crd1 = crp->crp_desc;
2044 	if (crd1 == NULL) {
2045 		err = EINVAL;
2046 		goto errout;
2047 	}
2048 	crd2 = crd1->crd_next;
2049 
2050 	if (crd2 == NULL) {
2051 		if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
2052 		    crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2053 		    crd1->crd_alg == CRYPTO_SHA1 ||
2054 		    crd1->crd_alg == CRYPTO_MD5) {
2055 			maccrd = crd1;
2056 			enccrd = NULL;
2057 		} else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2058 			   crd1->crd_alg == CRYPTO_3DES_CBC ||
2059 			   crd1->crd_alg == CRYPTO_ARC4) {
2060 			if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2061 				cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2062 			maccrd = NULL;
2063 			enccrd = crd1;
2064 #ifdef	HAVE_CRYPTO_LSZ
2065 		} else if (crd1->crd_alg == CRYPTO_LZS_COMP) {
2066 		  return (hifn_compression(sc, crp, cmd));
2067 #endif
2068 		} else {
2069 			err = EINVAL;
2070 			goto errout;
2071 		}
2072 	} else {
2073 		if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
2074 		     crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2075 		     crd1->crd_alg == CRYPTO_MD5 ||
2076 		     crd1->crd_alg == CRYPTO_SHA1) &&
2077 		    (crd2->crd_alg == CRYPTO_DES_CBC ||
2078 		     crd2->crd_alg == CRYPTO_3DES_CBC ||
2079 		     crd2->crd_alg == CRYPTO_ARC4) &&
2080 		    ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2081 			cmd->base_masks = HIFN_BASE_CMD_DECODE;
2082 			maccrd = crd1;
2083 			enccrd = crd2;
2084 		} else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2085 			    crd1->crd_alg == CRYPTO_ARC4 ||
2086 			    crd1->crd_alg == CRYPTO_3DES_CBC) &&
2087 			   (crd2->crd_alg == CRYPTO_MD5_HMAC ||
2088 			    crd2->crd_alg == CRYPTO_SHA1_HMAC ||
2089 			    crd2->crd_alg == CRYPTO_MD5 ||
2090 			    crd2->crd_alg == CRYPTO_SHA1) &&
2091 			   (crd1->crd_flags & CRD_F_ENCRYPT)) {
2092 			enccrd = crd1;
2093 			maccrd = crd2;
2094 		} else {
2095 			/*
2096 			 * We cannot order the 7751 as requested
2097 			 */
2098 			err = EINVAL;
2099 			goto errout;
2100 		}
2101 	}
2102 
2103 	if (enccrd) {
2104 		cmd->enccrd = enccrd;
2105 		cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2106 		switch (enccrd->crd_alg) {
2107 		case CRYPTO_ARC4:
2108 			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2109 			if ((enccrd->crd_flags & CRD_F_ENCRYPT)
2110 			    != sc->sc_sessions[session].hs_prev_op)
2111 				sc->sc_sessions[session].hs_state =
2112 				    HS_STATE_USED;
2113 			break;
2114 		case CRYPTO_DES_CBC:
2115 			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2116 			    HIFN_CRYPT_CMD_MODE_CBC |
2117 			    HIFN_CRYPT_CMD_NEW_IV;
2118 			break;
2119 		case CRYPTO_3DES_CBC:
2120 			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2121 			    HIFN_CRYPT_CMD_MODE_CBC |
2122 			    HIFN_CRYPT_CMD_NEW_IV;
2123 			break;
2124 		default:
2125 			err = EINVAL;
2126 			goto errout;
2127 		}
2128 		if (enccrd->crd_alg != CRYPTO_ARC4) {
2129 			if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2130 				if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2131 					bcopy(enccrd->crd_iv, cmd->iv,
2132 					    HIFN_IV_LENGTH);
2133 				else
2134 					bcopy(sc->sc_sessions[session].hs_iv,
2135 					    cmd->iv, HIFN_IV_LENGTH);
2136 
2137 				if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2138 				    == 0) {
2139 					if (crp->crp_flags & CRYPTO_F_IMBUF)
2140 						m_copyback(cmd->srcu.src_m,
2141 						    enccrd->crd_inject,
2142 						    HIFN_IV_LENGTH, cmd->iv);
2143 					else if (crp->crp_flags & CRYPTO_F_IOV)
2144 						cuio_copyback(cmd->srcu.src_io,
2145 						    enccrd->crd_inject,
2146 						    HIFN_IV_LENGTH, cmd->iv);
2147 				}
2148 			} else {
2149 				if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2150 					bcopy(enccrd->crd_iv, cmd->iv,
2151 					    HIFN_IV_LENGTH);
2152 				else if (crp->crp_flags & CRYPTO_F_IMBUF)
2153 					m_copydata(cmd->srcu.src_m,
2154 					    enccrd->crd_inject,
2155 					    HIFN_IV_LENGTH, cmd->iv);
2156 				else if (crp->crp_flags & CRYPTO_F_IOV)
2157 					cuio_copydata(cmd->srcu.src_io,
2158 					    enccrd->crd_inject,
2159 					    HIFN_IV_LENGTH, cmd->iv);
2160 			}
2161 		}
2162 
2163 		cmd->ck = enccrd->crd_key;
2164 		cmd->cklen = enccrd->crd_klen >> 3;
2165 
2166 		if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2167 			cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2168 	}
2169 
2170 	if (maccrd) {
2171 		cmd->maccrd = maccrd;
2172 		cmd->base_masks |= HIFN_BASE_CMD_MAC;
2173 
2174 		switch (maccrd->crd_alg) {
2175 		case CRYPTO_MD5:
2176 			cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2177 			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2178 			    HIFN_MAC_CMD_POS_IPSEC;
2179 			break;
2180 		case CRYPTO_MD5_HMAC:
2181 			cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2182 			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2183 			    HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2184 			break;
2185 		case CRYPTO_SHA1:
2186 			cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2187 			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2188 			    HIFN_MAC_CMD_POS_IPSEC;
2189 			break;
2190 		case CRYPTO_SHA1_HMAC:
2191 			cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2192 			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2193 			    HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2194 			break;
2195 		}
2196 
2197 		if ((maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
2198 		     maccrd->crd_alg == CRYPTO_MD5_HMAC) &&
2199 		    sc->sc_sessions[session].hs_state == HS_STATE_USED) {
2200 			cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2201 			bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
2202 			bzero(cmd->mac + (maccrd->crd_klen >> 3),
2203 			    HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2204 		}
2205 	}
2206 
2207 	cmd->crp = crp;
2208 	cmd->session_num = session;
2209 	cmd->softc = sc;
2210 
2211 	err = hifn_crypto(sc, cmd, crp, hint);
2212 	if (err == 0) {
2213 		if (enccrd)
2214 			sc->sc_sessions[session].hs_prev_op =
2215 				enccrd->crd_flags & CRD_F_ENCRYPT;
2216 		if (sc->sc_sessions[session].hs_state == HS_STATE_USED)
2217 			sc->sc_sessions[session].hs_state = HS_STATE_KEY;
2218 		return 0;
2219 	} else if (err == ERESTART) {
2220 		/*
2221 		 * There weren't enough resources to dispatch the request
2222 		 * to the part.  Notify the caller so they'll requeue this
2223 		 * request and resubmit it again soon.
2224 		 */
2225 #ifdef HIFN_DEBUG
2226 		if (hifn_debug)
2227 			printf(sc->sc_dv.dv_xname, "requeue request\n");
2228 #endif
2229 		free(cmd, M_DEVBUF);
2230 		sc->sc_needwakeup |= CRYPTO_SYMQ;
2231 		return (err);
2232 	}
2233 
2234 errout:
2235 	if (cmd != NULL)
2236 		free(cmd, M_DEVBUF);
2237 	if (err == EINVAL)
2238 		hifnstats.hst_invalid++;
2239 	else
2240 		hifnstats.hst_nomem++;
2241 	crp->crp_etype = err;
2242 	crypto_done(crp);
2243 	return (0);
2244 }
2245 
2246 void
2247 hifn_abort(struct hifn_softc *sc)
2248 {
2249 	struct hifn_dma *dma = sc->sc_dma;
2250 	struct hifn_command *cmd;
2251 	struct cryptop *crp;
2252 	int i, u;
2253 
2254 	i = dma->resk; u = dma->resu;
2255 	while (u != 0) {
2256 		cmd = dma->hifn_commands[i];
2257 		KASSERT(cmd != NULL /*, ("hifn_abort: null cmd slot %u", i)*/);
2258 		dma->hifn_commands[i] = NULL;
2259 		crp = cmd->crp;
2260 
2261 		if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2262 			/* Salvage what we can. */
2263 			u_int8_t *macbuf;
2264 
2265 			if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2266 				macbuf = dma->result_bufs[i];
2267 				macbuf += 12;
2268 			} else
2269 				macbuf = NULL;
2270 			hifnstats.hst_opackets++;
2271 			hifn_callback(sc, cmd, macbuf);
2272 		} else {
2273 			if (cmd->src_map == cmd->dst_map) {
2274 				bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2275 						0, cmd->src_map->dm_mapsize,
2276 				    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2277 			} else {
2278 				bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2279 				    0, cmd->src_map->dm_mapsize,
2280 				    BUS_DMASYNC_POSTWRITE);
2281 				bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2282 				    0, cmd->dst_map->dm_mapsize,
2283 				    BUS_DMASYNC_POSTREAD);
2284 			}
2285 
2286 			if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2287 				m_freem(cmd->srcu.src_m);
2288 				crp->crp_buf = (caddr_t)cmd->dstu.dst_m;
2289 			}
2290 
2291 			/* non-shared buffers cannot be restarted */
2292 			if (cmd->src_map != cmd->dst_map) {
2293 				/*
2294 				 * XXX should be EAGAIN, delayed until
2295 				 * after the reset.
2296 				 */
2297 				crp->crp_etype = ENOMEM;
2298 				bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2299 				bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2300 			} else
2301 				crp->crp_etype = ENOMEM;
2302 
2303 			bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2304 			bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2305 
2306 			free(cmd, M_DEVBUF);
2307 			if (crp->crp_etype != EAGAIN)
2308 				crypto_done(crp);
2309 		}
2310 
2311 		if (++i == HIFN_D_RES_RSIZE)
2312 			i = 0;
2313 		u--;
2314 	}
2315 	dma->resk = i; dma->resu = u;
2316 
2317 	/* Force upload of key next time */
2318 	for (i = 0; i < sc->sc_maxses; i++)
2319 		if (sc->sc_sessions[i].hs_state == HS_STATE_KEY)
2320 			sc->sc_sessions[i].hs_state = HS_STATE_USED;
2321 
2322 	hifn_reset_board(sc, 1);
2323 	hifn_init_dma(sc);
2324 	hifn_init_pci_registers(sc);
2325 }
2326 
2327 void
2328 hifn_callback(sc, cmd, resbuf)
2329 	struct hifn_softc *sc;
2330 	struct hifn_command *cmd;
2331 	u_int8_t *resbuf;
2332 {
2333 	struct hifn_dma *dma = sc->sc_dma;
2334 	struct cryptop *crp = cmd->crp;
2335 	struct cryptodesc *crd;
2336 	struct mbuf *m;
2337 	int totlen, i, u;
2338 
2339 	if (cmd->src_map == cmd->dst_map)
2340 		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2341 		    0, cmd->src_map->dm_mapsize,
2342 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2343 	else {
2344 		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2345 		    0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2346 		bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2347 		    0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2348 	}
2349 
2350 	if (crp->crp_flags & CRYPTO_F_IMBUF) {
2351 		if (cmd->srcu.src_m != cmd->dstu.dst_m) {
2352 			crp->crp_buf = (caddr_t)cmd->dstu.dst_m;
2353 			totlen = cmd->src_map->dm_mapsize;
2354 			for (m = cmd->dstu.dst_m; m != NULL; m = m->m_next) {
2355 				if (totlen < m->m_len) {
2356 					m->m_len = totlen;
2357 					totlen = 0;
2358 				} else
2359 					totlen -= m->m_len;
2360 			}
2361 			cmd->dstu.dst_m->m_pkthdr.len =
2362 			    cmd->srcu.src_m->m_pkthdr.len;
2363 			m_freem(cmd->srcu.src_m);
2364 		}
2365 	}
2366 
2367 	if (cmd->sloplen != 0) {
2368 		if (crp->crp_flags & CRYPTO_F_IMBUF)
2369 			m_copyback((struct mbuf *)crp->crp_buf,
2370 			    cmd->src_map->dm_mapsize - cmd->sloplen,
2371 			    cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]);
2372 		else if (crp->crp_flags & CRYPTO_F_IOV)
2373 			cuio_copyback((struct uio *)crp->crp_buf,
2374 			    cmd->src_map->dm_mapsize - cmd->sloplen,
2375 			    cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]);
2376 	}
2377 
2378 	i = dma->dstk; u = dma->dstu;
2379 	while (u != 0) {
2380 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2381 		    offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2382 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2383 		if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2384 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2385 			    offsetof(struct hifn_dma, dstr[i]),
2386 			    sizeof(struct hifn_desc),
2387 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2388 			break;
2389 		}
2390 		if (++i == (HIFN_D_DST_RSIZE + 1))
2391 			i = 0;
2392 		else
2393 			u--;
2394 	}
2395 	dma->dstk = i; dma->dstu = u;
2396 
2397 	hifnstats.hst_obytes += cmd->dst_map->dm_mapsize;
2398 
2399 	if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2400 	    HIFN_BASE_CMD_CRYPT) {
2401 		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2402 			if (crd->crd_alg != CRYPTO_DES_CBC &&
2403 			    crd->crd_alg != CRYPTO_3DES_CBC)
2404 				continue;
2405 			if (crp->crp_flags & CRYPTO_F_IMBUF)
2406 				m_copydata((struct mbuf *)crp->crp_buf,
2407 				    crd->crd_skip + crd->crd_len - HIFN_IV_LENGTH,
2408 				    HIFN_IV_LENGTH,
2409 				    cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2410 			else if (crp->crp_flags & CRYPTO_F_IOV) {
2411 				cuio_copydata((struct uio *)crp->crp_buf,
2412 				    crd->crd_skip + crd->crd_len - HIFN_IV_LENGTH,
2413 				    HIFN_IV_LENGTH,
2414 				    cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2415 			}
2416 			/* XXX We do not handle contig data */
2417 			break;
2418 		}
2419 	}
2420 
2421 	if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2422 		u_int8_t *macbuf;
2423 
2424 		macbuf = resbuf + sizeof(struct hifn_base_result);
2425 		if (cmd->base_masks & HIFN_BASE_CMD_COMP)
2426 			macbuf += sizeof(struct hifn_comp_result);
2427 		macbuf += sizeof(struct hifn_mac_result);
2428 
2429 		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2430 			int len;
2431 
2432 			if (crd->crd_alg == CRYPTO_MD5)
2433 				len = 16;
2434 			else if (crd->crd_alg == CRYPTO_SHA1)
2435 				len = 20;
2436 			else if (crd->crd_alg == CRYPTO_MD5_HMAC ||
2437 			    crd->crd_alg == CRYPTO_SHA1_HMAC)
2438 				len = 12;
2439 			else
2440 				continue;
2441 
2442 			if (crp->crp_flags & CRYPTO_F_IMBUF)
2443 				m_copyback((struct mbuf *)crp->crp_buf,
2444 				    crd->crd_inject, len, macbuf);
2445 			else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac)
2446 				bcopy((caddr_t)macbuf, crp->crp_mac, len);
2447 			break;
2448 		}
2449 	}
2450 
2451 	if (cmd->src_map != cmd->dst_map) {
2452 		bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2453 		bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2454 	}
2455 	bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2456 	bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2457 	free(cmd, M_DEVBUF);
2458 	crypto_done(crp);
2459 }
2460 
2461 #ifdef HAVE_CRYPTO_LSZ
2462 
2463 int
2464 hifn_compression(struct hifn_softc *sc, struct cryptop *crp,
2465     struct hifn_command *cmd)
2466 {
2467 	struct cryptodesc *crd = crp->crp_desc;
2468 	int s, err = 0;
2469 
2470 	cmd->compcrd = crd;
2471 	cmd->base_masks |= HIFN_BASE_CMD_COMP;
2472 
2473 	if ((crp->crp_flags & CRYPTO_F_IMBUF) == 0) {
2474 		/*
2475 		 * XXX can only handle mbufs right now since we can
2476 		 * XXX dynamically resize them.
2477 		 */
2478 		err = EINVAL;
2479 		return (ENOMEM);
2480 	}
2481 
2482 	if ((crd->crd_flags & CRD_F_COMP) == 0)
2483 		cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2484 	if (crd->crd_alg == CRYPTO_LZS_COMP)
2485 		cmd->comp_masks |= HIFN_COMP_CMD_ALG_LZS |
2486 		    HIFN_COMP_CMD_CLEARHIST;
2487 
2488 	if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2489 	    HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->src_map)) {
2490 		err = ENOMEM;
2491 		goto fail;
2492 	}
2493 
2494 	if (bus_dmamap_create(sc->sc_dmat, HIFN_MAX_DMALEN, MAX_SCATTER,
2495 	    HIFN_MAX_SEGLEN, 0, BUS_DMA_NOWAIT, &cmd->dst_map)) {
2496 		err = ENOMEM;
2497 		goto fail;
2498 	}
2499 
2500 	if (crp->crp_flags & CRYPTO_F_IMBUF) {
2501 		int len;
2502 
2503 		if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
2504 		    cmd->srcu.src_m, BUS_DMA_NOWAIT)) {
2505 			err = ENOMEM;
2506 			goto fail;
2507 		}
2508 
2509 		len = cmd->src_map->dm_mapsize / MCLBYTES;
2510 		if ((cmd->src_map->dm_mapsize % MCLBYTES) != 0)
2511 			len++;
2512 		len *= MCLBYTES;
2513 
2514 		if ((crd->crd_flags & CRD_F_COMP) == 0)
2515 			len *= 4;
2516 
2517 		if (len > HIFN_MAX_DMALEN)
2518 			len = HIFN_MAX_DMALEN;
2519 
2520 		cmd->dstu.dst_m = hifn_mkmbuf_chain(len, cmd->srcu.src_m);
2521 		if (cmd->dstu.dst_m == NULL) {
2522 			err = ENOMEM;
2523 			goto fail;
2524 		}
2525 
2526 		if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2527 		    cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2528 			err = ENOMEM;
2529 			goto fail;
2530 		}
2531 	} else if (crp->crp_flags & CRYPTO_F_IOV) {
2532 		if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
2533 		    cmd->srcu.src_io, BUS_DMA_NOWAIT)) {
2534 			err = ENOMEM;
2535 			goto fail;
2536 		}
2537 		if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
2538 		    cmd->dstu.dst_io, BUS_DMA_NOWAIT)) {
2539 			err = ENOMEM;
2540 			goto fail;
2541 		}
2542 	}
2543 
2544 	if (cmd->src_map == cmd->dst_map)
2545 		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2546 		    0, cmd->src_map->dm_mapsize,
2547 		    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
2548 	else {
2549 		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2550 		    0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2551 		bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2552 		    0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2553 	}
2554 
2555 	cmd->crp = crp;
2556 	/*
2557 	 * Always use session 0.  The modes of compression we use are
2558 	 * stateless and there is always at least one compression
2559 	 * context, zero.
2560 	 */
2561 	cmd->session_num = 0;
2562 	cmd->softc = sc;
2563 
2564 	s = splnet();
2565 	err = hifn_compress_enter(sc, cmd);
2566 	splx(s);
2567 
2568 	if (err != 0)
2569 		goto fail;
2570 	return (0);
2571 
2572 fail:
2573 	if (cmd->dst_map != NULL) {
2574 		if (cmd->dst_map->dm_nsegs > 0)
2575 			bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2576 		bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2577 	}
2578 	if (cmd->src_map != NULL) {
2579 		if (cmd->src_map->dm_nsegs > 0)
2580 			bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2581 		bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2582 	}
2583 	free(cmd, M_DEVBUF);
2584 	if (err == EINVAL)
2585 		hifnstats.hst_invalid++;
2586 	else
2587 		hifnstats.hst_nomem++;
2588 	crp->crp_etype = err;
2589 	crypto_done(crp);
2590 	return (0);
2591 }
2592 
2593 /*
2594  * must be called at splnet()
2595  */
2596 int
2597 hifn_compress_enter(struct hifn_softc *sc, struct hifn_command *cmd)
2598 {
2599 	struct hifn_dma *dma = sc->sc_dma;
2600 	int cmdi, resi;
2601 	u_int32_t cmdlen;
2602 
2603 	if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
2604 	    (dma->resu + 1) > HIFN_D_CMD_RSIZE)
2605 		return (ENOMEM);
2606 
2607 	if ((dma->srcu + cmd->src_map->dm_nsegs) > HIFN_D_SRC_RSIZE ||
2608 	    (dma->dstu + cmd->dst_map->dm_nsegs) > HIFN_D_DST_RSIZE)
2609 		return (ENOMEM);
2610 
2611 	if (dma->cmdi == HIFN_D_CMD_RSIZE) {
2612 		dma->cmdi = 0;
2613 		dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
2614 		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2615 		HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
2616 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2617 	}
2618 	cmdi = dma->cmdi++;
2619 	cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
2620 	HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
2621 
2622 	/* .p for command/result already set */
2623 	dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
2624 	    HIFN_D_MASKDONEIRQ);
2625 	HIFN_CMDR_SYNC(sc, cmdi,
2626 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2627 	dma->cmdu++;
2628 	if (sc->sc_c_busy == 0) {
2629 		WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
2630 		sc->sc_c_busy = 1;
2631 		SET_LED(sc, HIFN_MIPSRST_LED0);
2632 	}
2633 
2634 	/*
2635 	 * We don't worry about missing an interrupt (which a "command wait"
2636 	 * interrupt salvages us from), unless there is more than one command
2637 	 * in the queue.
2638 	 */
2639 	if (dma->cmdu > 1) {
2640 		sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
2641 		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2642 	}
2643 
2644 	hifnstats.hst_ipackets++;
2645 	hifnstats.hst_ibytes += cmd->src_map->dm_mapsize;
2646 
2647 	hifn_dmamap_load_src(sc, cmd);
2648 	if (sc->sc_s_busy == 0) {
2649 		WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
2650 		sc->sc_s_busy = 1;
2651 		SET_LED(sc, HIFN_MIPSRST_LED1);
2652 	}
2653 
2654 	/*
2655 	 * Unlike other descriptors, we don't mask done interrupt from
2656 	 * result descriptor.
2657 	 */
2658 	if (dma->resi == HIFN_D_RES_RSIZE) {
2659 		dma->resi = 0;
2660 		dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
2661 		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
2662 		HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
2663 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2664 	}
2665 	resi = dma->resi++;
2666 	dma->hifn_commands[resi] = cmd;
2667 	HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
2668 	dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
2669 	    HIFN_D_VALID | HIFN_D_LAST);
2670 	HIFN_RESR_SYNC(sc, resi,
2671 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2672 	dma->resu++;
2673 	if (sc->sc_r_busy == 0) {
2674 		WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
2675 		sc->sc_r_busy = 1;
2676 		SET_LED(sc, HIFN_MIPSRST_LED2);
2677 	}
2678 
2679 	if (cmd->sloplen)
2680 		cmd->slopidx = resi;
2681 
2682 	hifn_dmamap_load_dst(sc, cmd);
2683 
2684 	if (sc->sc_d_busy == 0) {
2685 		WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
2686 		sc->sc_d_busy = 1;
2687 	}
2688 	sc->sc_active = 5;
2689 	cmd->cmd_callback = hifn_callback_comp;
2690 	return (0);
2691 }
2692 
2693 void
2694 hifn_callback_comp(struct hifn_softc *sc, struct hifn_command *cmd,
2695     u_int8_t *resbuf)
2696 {
2697 	struct hifn_base_result baseres;
2698 	struct cryptop *crp = cmd->crp;
2699 	struct hifn_dma *dma = sc->sc_dma;
2700 	struct mbuf *m;
2701 	int err = 0, i, u;
2702 	u_int32_t olen;
2703 	bus_size_t dstsize;
2704 
2705 	bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2706 	    0, cmd->src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2707 	bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2708 	    0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2709 
2710 	dstsize = cmd->dst_map->dm_mapsize;
2711 	bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2712 
2713 	bcopy(resbuf, &baseres, sizeof(struct hifn_base_result));
2714 
2715 	i = dma->dstk; u = dma->dstu;
2716 	while (u != 0) {
2717 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2718 		    offsetof(struct hifn_dma, dstr[i]), sizeof(struct hifn_desc),
2719 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2720 		if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2721 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2722 			    offsetof(struct hifn_dma, dstr[i]),
2723 			    sizeof(struct hifn_desc),
2724 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2725 			break;
2726 		}
2727 		if (++i == (HIFN_D_DST_RSIZE + 1))
2728 			i = 0;
2729 		else
2730 			u--;
2731 	}
2732 	dma->dstk = i; dma->dstu = u;
2733 
2734 	if (baseres.flags & htole16(HIFN_BASE_RES_DSTOVERRUN)) {
2735 		bus_size_t xlen;
2736 
2737 		xlen = dstsize;
2738 
2739 		m_freem(cmd->dstu.dst_m);
2740 
2741 		if (xlen == HIFN_MAX_DMALEN) {
2742 			/* We've done all we can. */
2743 			err = E2BIG;
2744 			goto out;
2745 		}
2746 
2747 		xlen += MCLBYTES;
2748 
2749 		if (xlen > HIFN_MAX_DMALEN)
2750 			xlen = HIFN_MAX_DMALEN;
2751 
2752 		cmd->dstu.dst_m = hifn_mkmbuf_chain(xlen,
2753 		    cmd->srcu.src_m);
2754 		if (cmd->dstu.dst_m == NULL) {
2755 			err = ENOMEM;
2756 			goto out;
2757 		}
2758 		if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
2759 		    cmd->dstu.dst_m, BUS_DMA_NOWAIT)) {
2760 			err = ENOMEM;
2761 			goto out;
2762 		}
2763 
2764 		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2765 		    0, cmd->src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2766 		bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2767 		    0, cmd->dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2768 
2769 		/* already at splnet... */
2770 		err = hifn_compress_enter(sc, cmd);
2771 		if (err != 0)
2772 			goto out;
2773 		return;
2774 	}
2775 
2776 	olen = dstsize - (letoh16(baseres.dst_cnt) |
2777 	    (((letoh16(baseres.session) & HIFN_BASE_RES_DSTLEN_M) >>
2778 	    HIFN_BASE_RES_DSTLEN_S) << 16));
2779 
2780 	crp->crp_olen = olen - cmd->compcrd->crd_skip;
2781 
2782 	bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2783 	bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2784 	bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2785 
2786 	m = cmd->dstu.dst_m;
2787 	if (m->m_flags & M_PKTHDR)
2788 		m->m_pkthdr.len = olen;
2789 	crp->crp_buf = (caddr_t)m;
2790 	for (; m != NULL; m = m->m_next) {
2791 		if (olen >= m->m_len)
2792 			olen -= m->m_len;
2793 		else {
2794 			m->m_len = olen;
2795 			olen = 0;
2796 		}
2797 	}
2798 
2799 	m_freem(cmd->srcu.src_m);
2800 	free(cmd, M_DEVBUF);
2801 	crp->crp_etype = 0;
2802 	crypto_done(crp);
2803 	return;
2804 
2805 out:
2806 	if (cmd->dst_map != NULL) {
2807 		if (cmd->src_map->dm_nsegs != 0)
2808 			bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2809 		bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2810 	}
2811 	if (cmd->src_map != NULL) {
2812 		if (cmd->src_map->dm_nsegs != 0)
2813 			bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2814 		bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2815 	}
2816 	if (cmd->dstu.dst_m != NULL)
2817 		m_freem(cmd->dstu.dst_m);
2818 	free(cmd, M_DEVBUF);
2819 	crp->crp_etype = err;
2820 	crypto_done(crp);
2821 }
2822 
2823 struct mbuf *
2824 hifn_mkmbuf_chain(int totlen, struct mbuf *mtemplate)
2825 {
2826 	int len;
2827 	struct mbuf *m, *m0, *mlast;
2828 
2829 	if (mtemplate->m_flags & M_PKTHDR) {
2830 		len = MHLEN;
2831 		MGETHDR(m0, M_DONTWAIT, MT_DATA);
2832 	} else {
2833 		len = MLEN;
2834 		MGET(m0, M_DONTWAIT, MT_DATA);
2835 	}
2836 	if (m0 == NULL)
2837 		return (NULL);
2838 	if (len == MHLEN)
2839 		M_DUP_PKTHDR(m0, mtemplate);
2840 	MCLGET(m0, M_DONTWAIT);
2841 	if (!(m0->m_flags & M_EXT))
2842 		m_freem(m0);
2843 	len = MCLBYTES;
2844 
2845 	totlen -= len;
2846 	m0->m_pkthdr.len = m0->m_len = len;
2847 	mlast = m0;
2848 
2849 	while (totlen > 0) {
2850 		MGET(m, M_DONTWAIT, MT_DATA);
2851 		if (m == NULL) {
2852 			m_freem(m0);
2853 			return (NULL);
2854 		}
2855 		MCLGET(m, M_DONTWAIT);
2856 		if (!(m->m_flags & M_EXT)) {
2857 			m_freem(m0);
2858 			return (NULL);
2859 		}
2860 		len = MCLBYTES;
2861 		m->m_len = len;
2862 		if (m0->m_flags & M_PKTHDR)
2863 			m0->m_pkthdr.len += len;
2864 		totlen -= len;
2865 
2866 		mlast->m_next = m;
2867 		mlast = m;
2868 	}
2869 
2870 	return (m0);
2871 }
2872 #endif	/* HAVE_CRYPTO_LSZ */
2873 
2874 void
2875 hifn_write_4(sc, reggrp, reg, val)
2876 	struct hifn_softc *sc;
2877 	int reggrp;
2878 	bus_size_t reg;
2879 	u_int32_t val;
2880 {
2881 	/*
2882 	 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
2883 	 * and Group 1 registers; avoid conditions that could create
2884 	 * burst writes by doing a read in between the writes.
2885 	 */
2886 	if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
2887 		if (sc->sc_waw_lastgroup == reggrp &&
2888 		    sc->sc_waw_lastreg == reg - 4) {
2889 			bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
2890 		}
2891 		sc->sc_waw_lastgroup = reggrp;
2892 		sc->sc_waw_lastreg = reg;
2893 	}
2894 	if (reggrp == 0)
2895 		bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
2896 	else
2897 		bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
2898 
2899 }
2900 
2901 u_int32_t
2902 hifn_read_4(sc, reggrp, reg)
2903 	struct hifn_softc *sc;
2904 	int reggrp;
2905 	bus_size_t reg;
2906 {
2907 	if (sc->sc_flags & HIFN_NO_BURSTWRITE) {
2908 		sc->sc_waw_lastgroup = -1;
2909 		sc->sc_waw_lastreg = 1;
2910 	}
2911 	if (reggrp == 0)
2912 		return (bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg));
2913 	return (bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg));
2914 }
2915