xref: /netbsd-src/sys/dev/pci/hifn7751.c (revision d710132b4b8ce7f7cccaaf660cb16aa16b4077a0)
1 /*	$NetBSD: hifn7751.c,v 1.14 2003/05/03 18:11:34 wiz Exp $	*/
2 /*	$OpenBSD: hifn7751.c,v 1.47 2000/10/11 13:15:41 itojun Exp $	*/
3 
4 /*
5  * Invertex AEON / Hi/fn 7751 driver
6  * Copyright (c) 1999 Invertex Inc. All rights reserved.
7  * Copyright (c) 1999 Theo de Raadt
8  * Copyright (c) 2000 Network Security Technologies, Inc.
9  *			http://www.netsec.net
10  *
11  * This driver is based on a previous driver by Invertex, for which they
12  * requested:  Please send any comments, feedback, bug-fixes, or feature
13  * requests to software@invertex.com.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  *
19  * 1. Redistributions of source code must retain the above copyright
20  *   notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *   notice, this list of conditions and the following disclaimer in the
23  *   documentation and/or other materials provided with the distribution.
24  * 3. The name of the author may not be used to endorse or promote products
25  *   derived from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
32  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
36  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Driver for the Hi/Fn 7751 encryption processor.
41  */
42 
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: hifn7751.c,v 1.14 2003/05/03 18:11:34 wiz Exp $");
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/proc.h>
49 #include <sys/errno.h>
50 #include <sys/malloc.h>
51 #include <sys/kernel.h>
52 #include <sys/mbuf.h>
53 #ifdef __OpenBSD__
54 #include <vm/vm.h>
55 #include <vm/vm_extern.h>
56 #include <vm/pmap.h>
57 #else
58 #include <uvm/uvm.h>
59 #include <uvm/uvm_extern.h>
60 #include <uvm/uvm_pmap.h>
61 #endif
62 #include <machine/pmap.h>
63 #include <sys/device.h>
64 
65 #ifdef __OpenBSD__
66 #include <crypto/crypto.h>
67 #include <dev/rndvar.h>
68 #endif
69 
70 #include <dev/pci/pcireg.h>
71 #include <dev/pci/pcivar.h>
72 #include <dev/pci/pcidevs.h>
73 
74 #include <dev/pci/hifn7751var.h>
75 #include <dev/pci/hifn7751reg.h>
76 
77 #undef HIFN_DEBUG
78 
79 /*
80  * Prototypes and count for the pci_device structure
81  */
82 #ifdef __OpenBSD__
83 int hifn_probe		__P((struct device *, void *, void *));
84 #else
85 int hifn_probe		__P((struct device *, struct cfdata *, void *));
86 #endif
87 void hifn_attach	__P((struct device *, struct device *, void *));
88 
89 CFATTACH_DECL(hifn, sizeof(struct hifn_softc),
90     hifn_probe, hifn_attach, NULL, NULL);
91 
92 #ifdef __OpenBSD__
93 struct cfdriver hifn_cd = {
94 	0, "hifn", DV_DULL
95 };
96 #endif
97 
98 void	hifn_reset_board __P((struct hifn_softc *));
99 int	hifn_enable_crypto __P((struct hifn_softc *, pcireg_t));
100 void	hifn_init_dma __P((struct hifn_softc *));
101 void	hifn_init_pci_registers __P((struct hifn_softc *));
102 int	hifn_sramsize __P((struct hifn_softc *));
103 int	hifn_dramsize __P((struct hifn_softc *));
104 void	hifn_ramtype __P((struct hifn_softc *));
105 void	hifn_sessions __P((struct hifn_softc *));
106 int	hifn_intr __P((void *));
107 u_int	hifn_write_command __P((struct hifn_command *, u_int8_t *));
108 u_int32_t hifn_next_signature __P((u_int32_t a, u_int cnt));
109 #ifdef __OpenBSD__
110 int	hifn_newsession __P((u_int32_t *, struct cryptoini *));
111 int	hifn_freesession __P((u_int64_t));
112 int	hifn_process __P((struct cryptop *));
113 void	hifn_callback __P((struct hifn_softc *, struct hifn_command *, u_int8_t *));
114 #endif
115 int	hifn_crypto __P((struct hifn_softc *, hifn_command_t *));
116 int	hifn_readramaddr __P((struct hifn_softc *, int, u_int8_t *, int));
117 int	hifn_writeramaddr __P((struct hifn_softc *, int, u_int8_t *, int));
118 
119 struct hifn_stats {
120 	u_int64_t hst_ibytes;
121 	u_int64_t hst_obytes;
122 	u_int32_t hst_ipackets;
123 	u_int32_t hst_opackets;
124 	u_int32_t hst_invalid;
125 	u_int32_t hst_nomem;
126 } hifnstats;
127 
128 int
129 hifn_probe(parent, match, aux)
130 	struct device *parent;
131 #ifdef __OpenBSD__
132 	void *match;
133 #else
134 	struct cfdata *match;
135 #endif
136 	void *aux;
137 {
138 	struct pci_attach_args *pa = (struct pci_attach_args *) aux;
139 
140 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_INVERTEX &&
141 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INVERTEX_AEON)
142 		return (1);
143 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_HIFN &&
144 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_HIFN_7751)
145 		return (1);
146 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETSEC &&
147 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETSEC_7751)
148 		return (1);
149 	return (0);
150 }
151 
152 void
153 hifn_attach(parent, self, aux)
154 	struct device *parent, *self;
155 	void *aux;
156 {
157 	struct hifn_softc *sc = (struct hifn_softc *)self;
158 	struct pci_attach_args *pa = aux;
159 	pci_chipset_tag_t pc = pa->pa_pc;
160 	pci_intr_handle_t ih;
161 	const char *intrstr = NULL;
162 	char rbase;
163 	bus_size_t iosize0, iosize1;
164 	u_int32_t cmd;
165 	u_int16_t ena;
166 	bus_dma_segment_t seg;
167 	bus_dmamap_t dmamap;
168 	int rseg;
169 	caddr_t kva;
170 
171 	aprint_naive(": Crypto processor\n");
172 
173 	cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
174 	cmd |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
175 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd);
176 	cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
177 
178 	if (!(cmd & PCI_COMMAND_MEM_ENABLE)) {
179 		aprint_error(": failed to enable memory mapping\n");
180 		return;
181 	}
182 
183 	if (pci_mapreg_map(pa, HIFN_BAR0, PCI_MAPREG_TYPE_MEM, 0,
184 	    &sc->sc_st0, &sc->sc_sh0, NULL, &iosize0)) {
185 		aprint_error(": can't find mem space %d\n", 0);
186 		return;
187 	}
188 
189 	if (pci_mapreg_map(pa, HIFN_BAR1, PCI_MAPREG_TYPE_MEM, 0,
190 	    &sc->sc_st1, &sc->sc_sh1, NULL, &iosize1)) {
191 		aprint_error(": can't find mem space %d\n", 1);
192 		goto fail_io0;
193 	}
194 
195 	sc->sc_dmat = pa->pa_dmat;
196 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(*sc->sc_dma), PAGE_SIZE, 0,
197 	    &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
198 		aprint_error(": can't alloc DMA buffer\n");
199 		goto fail_io1;
200         }
201 	if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(*sc->sc_dma), &kva,
202 	    BUS_DMA_NOWAIT)) {
203 		aprint_error(": can't map DMA buffers (%lu bytes)\n",
204 		    (u_long)sizeof(*sc->sc_dma));
205 		bus_dmamem_free(sc->sc_dmat, &seg, rseg);
206 		goto fail_io1;
207 	}
208 	if (bus_dmamap_create(sc->sc_dmat, sizeof(*sc->sc_dma), 1,
209 	    sizeof(*sc->sc_dma), 0, BUS_DMA_NOWAIT, &dmamap)) {
210 		aprint_error(": can't create DMA map\n");
211 		bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
212 		bus_dmamem_free(sc->sc_dmat, &seg, rseg);
213 		goto fail_io1;
214 	}
215 	if (bus_dmamap_load(sc->sc_dmat, dmamap, kva, sizeof(*sc->sc_dma),
216 	    NULL, BUS_DMA_NOWAIT)) {
217 		aprint_error(": can't load DMA map\n");
218 		bus_dmamap_destroy(sc->sc_dmat, dmamap);
219 		bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
220 		bus_dmamem_free(sc->sc_dmat, &seg, rseg);
221 		goto fail_io1;
222 	}
223 	sc->sc_dma = (struct hifn_dma *)kva;
224 	memset(sc->sc_dma, 0, sizeof(*sc->sc_dma));
225 
226 	hifn_reset_board(sc);
227 
228 	if (hifn_enable_crypto(sc, pa->pa_id) != 0) {
229 		aprint_error("%s: crypto enabling failed\n",
230 		    sc->sc_dv.dv_xname);
231 		goto fail_mem;
232 	}
233 
234 	hifn_init_dma(sc);
235 	hifn_init_pci_registers(sc);
236 
237 	hifn_ramtype(sc);
238 
239 	if (sc->sc_drammodel == 0)
240 		hifn_sramsize(sc);
241 	else
242 		hifn_dramsize(sc);
243 
244 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETSEC &&
245 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETSEC_7751 &&
246 	    PCI_REVISION(pa->pa_class) == 0x61)
247 		sc->sc_ramsize >>= 1;
248 
249 	/*
250 	 * Reinitialize again, since the DRAM/SRAM detection shifted our ring
251 	 * pointers and may have changed the value we send to the RAM Config
252 	 * Register.
253 	 */
254 	hifn_reset_board(sc);
255 	hifn_init_dma(sc);
256 	hifn_init_pci_registers(sc);
257 
258 	if (pci_intr_map(pa, &ih)) {
259 		aprint_error(": couldn't map interrupt\n");
260 		goto fail_mem;
261 	}
262 	intrstr = pci_intr_string(pc, ih);
263 #ifdef __OpenBSD__
264 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc,
265 	    self->dv_xname);
266 #else
267 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, hifn_intr, sc);
268 #endif
269 	if (sc->sc_ih == NULL) {
270 		aprint_error(": couldn't establish interrupt\n");
271 		if (intrstr != NULL)
272 			aprint_normal(" at %s", intrstr);
273 		aprint_normal("\n");
274 		goto fail_mem;
275 	}
276 
277 	hifn_sessions(sc);
278 
279 	rseg = sc->sc_ramsize / 1024;
280 	rbase = 'K';
281 	if (sc->sc_ramsize >= (1024 * 1024)) {
282 		rbase = 'M';
283 		rseg /= 1024;
284 	}
285 	aprint_normal(", %d%cB %cram, %s\n", rseg, rbase,
286 	    sc->sc_drammodel ? 'd' : 's', intrstr);
287 
288 #ifdef __OpenBSD__
289 	sc->sc_cid = crypto_get_driverid();
290 	if (sc->sc_cid < 0)
291 		goto fail_intr;
292 #endif
293 
294 	WRITE_REG_0(sc, HIFN_0_PUCNFG,
295 	    READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
296 	ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
297 
298 #ifdef __OpenBSD__
299 	switch (ena) {
300 	case HIFN_PUSTAT_ENA_2:
301 		crypto_register(sc->sc_cid, CRYPTO_3DES_CBC,
302 		    hifn_newsession, hifn_freesession, hifn_process);
303 		/*FALLTHROUGH*/
304 	case HIFN_PUSTAT_ENA_1:
305 		crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC96,
306 		    hifn_newsession, hifn_freesession, hifn_process);
307 		crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC96,
308 		    NULL, NULL, NULL);
309 		crypto_register(sc->sc_cid, CRYPTO_DES_CBC,
310 		    NULL, NULL, NULL);
311 	}
312 #endif
313 
314 	return;
315 
316 #ifdef __OpenBSD__
317 fail_intr:
318 #endif
319 	pci_intr_disestablish(pc, sc->sc_ih);
320 fail_mem:
321 	bus_dmamap_unload(sc->sc_dmat, dmamap);
322 	bus_dmamap_destroy(sc->sc_dmat, dmamap);
323 	bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(*sc->sc_dma));
324 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
325 fail_io1:
326 	bus_space_unmap(sc->sc_st1, sc->sc_sh1, iosize1);
327 fail_io0:
328 	bus_space_unmap(sc->sc_st0, sc->sc_sh0, iosize0);
329 }
330 
331 /*
332  * Resets the board.  Values in the regesters are left as is
333  * from the reset (i.e. initial values are assigned elsewhere).
334  */
335 void
336 hifn_reset_board(sc)
337 	struct hifn_softc *sc;
338 {
339 	/*
340 	 * Set polling in the DMA configuration register to zero.  0x7 avoids
341 	 * resetting the board and zeros out the other fields.
342 	 */
343 	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
344 	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
345 
346 	/*
347 	 * Now that polling has been disabled, we have to wait 1 ms
348 	 * before resetting the board.
349 	 */
350 	DELAY(1000);
351 
352 	/* Reset the board.  We do this by writing zeros to the DMA reset
353 	 * field, the BRD reset field, and the manditory 1 at position 2.
354 	 * Every other field is set to zero.
355 	 */
356 	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
357 
358 	/*
359 	 * Wait another millisecond for the board to reset.
360 	 */
361 	DELAY(1000);
362 
363 	/*
364 	 * Turn off the reset!  (No joke.)
365 	 */
366 	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
367 	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
368 }
369 
370 u_int32_t
371 hifn_next_signature(a, cnt)
372 	u_int32_t a;
373 	u_int cnt;
374 {
375 	int i;
376 	u_int32_t v;
377 
378 	for (i = 0; i < cnt; i++) {
379 
380 		/* get the parity */
381 		v = a & 0x80080125;
382 		v ^= v >> 16;
383 		v ^= v >> 8;
384 		v ^= v >> 4;
385 		v ^= v >> 2;
386 		v ^= v >> 1;
387 
388 		a = (v & 1) ^ (a << 1);
389 	}
390 
391 	return a;
392 }
393 
394 struct pci2id {
395 	u_short		pci_vendor;
396 	u_short		pci_prod;
397 	char		card_id[13];
398 } pci2id[] = {
399 	{
400 		PCI_VENDOR_NETSEC,
401 		PCI_PRODUCT_NETSEC_7751,
402 		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
403 		  0x00, 0x00, 0x00, 0x00, 0x00 }
404 	}, {
405 		PCI_VENDOR_INVERTEX,
406 		PCI_PRODUCT_INVERTEX_AEON,
407 		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
408 		  0x00, 0x00, 0x00, 0x00, 0x00 }
409 	}, {
410 		/*
411 		 * Other vendors share this PCI ID as well, such as
412 		 * http://www.powercrypt.com, and obviously they also
413 		 * use the same key.
414 		 */
415 		PCI_VENDOR_HIFN,
416 		PCI_PRODUCT_HIFN_7751,
417 		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
418 		  0x00, 0x00, 0x00, 0x00, 0x00 }
419 	},
420 };
421 
422 /*
423  * Checks to see if crypto is already enabled.  If crypto isn't enable,
424  * "hifn_enable_crypto" is called to enable it.  The check is important,
425  * as enabling crypto twice will lock the board.
426  */
427 int
428 hifn_enable_crypto(sc, pciid)
429 	struct hifn_softc *sc;
430 	pcireg_t pciid;
431 {
432 	u_int32_t dmacfg, ramcfg, encl, addr, i;
433 	char *offtbl = NULL;
434 
435 	for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
436 		if (pci2id[i].pci_vendor == PCI_VENDOR(pciid) &&
437 		    pci2id[i].pci_prod == PCI_PRODUCT(pciid)) {
438 			offtbl = pci2id[i].card_id;
439 			break;
440 		}
441 	}
442 
443 	if (offtbl == NULL) {
444 #ifdef HIFN_DEBUG
445 		aprint_debug("%s: Unknown card!\n", sc->sc_dv.dv_xname);
446 #endif
447 		return (1);
448 	}
449 
450 	ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
451 	dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
452 
453 	/*
454 	 * The RAM config register's encrypt level bit needs to be set before
455 	 * every read performed on the encryption level register.
456 	 */
457 	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
458 
459 	encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
460 
461 	/*
462 	 * Make sure we don't re-unlock.  Two unlocks kills chip until the
463 	 * next reboot.
464 	 */
465 	if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
466 #ifdef HIFN_DEBUG
467 		aprint_debug("%s: Strong Crypto already enabled!\n",
468 		    sc->sc_dv.dv_xname);
469 #endif
470 		WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
471 		WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
472 		return 0;	/* success */
473 	}
474 
475 	if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
476 #ifdef HIFN_DEBUG
477 		aprint_debug("%s: Unknown encryption level\n",
478 		    sc->sc_dv.dv_xname);
479 #endif
480 		return 1;
481 	}
482 
483 	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
484 	    HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
485 	DELAY(1000);
486 	addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
487 	DELAY(1000);
488 	WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
489 	DELAY(1000);
490 
491 	for (i = 0; i <= 12; i++) {
492 		addr = hifn_next_signature(addr, offtbl[i] + 0x101);
493 		WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
494 
495 		DELAY(1000);
496 	}
497 
498 	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
499 	encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
500 
501 #ifdef HIFN_DEBUG
502 	if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
503 		aprint_debug("Encryption engine is permanently locked until next system reset.");
504 	else
505 		aprint_debug("Encryption engine enabled successfully!");
506 #endif
507 
508 	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
509 	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
510 
511 	switch (encl) {
512 	case HIFN_PUSTAT_ENA_0:
513 		aprint_normal(": no encr/auth");
514 		break;
515 	case HIFN_PUSTAT_ENA_1:
516 		aprint_normal(": DES enabled");
517 		break;
518 	case HIFN_PUSTAT_ENA_2:
519 		aprint_normal(": fully enabled");
520 		break;
521 	default:
522 		aprint_normal(": disabled");
523 		break;
524 	}
525 
526 	return 0;
527 }
528 
529 /*
530  * Give initial values to the registers listed in the "Register Space"
531  * section of the HIFN Software Development reference manual.
532  */
533 void
534 hifn_init_pci_registers(sc)
535 	struct hifn_softc *sc;
536 {
537 	/* write fixed values needed by the Initialization registers */
538 	WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
539 	WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
540 	WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
541 
542 	/* write all 4 ring address registers */
543 	WRITE_REG_1(sc, HIFN_1_DMA_CRAR, vtophys((vaddr_t)sc->sc_dma->cmdr));
544 	WRITE_REG_1(sc, HIFN_1_DMA_SRAR, vtophys((vaddr_t)sc->sc_dma->srcr));
545 	WRITE_REG_1(sc, HIFN_1_DMA_DRAR, vtophys((vaddr_t)sc->sc_dma->dstr));
546 	WRITE_REG_1(sc, HIFN_1_DMA_RRAR, vtophys((vaddr_t)sc->sc_dma->resr));
547 
548 	/* write status register */
549 	WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA |
550 	    HIFN_DMACSR_R_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
551 	    HIFN_DMACSR_C_CTRL_ENA);
552 	WRITE_REG_1(sc, HIFN_1_DMA_IER, HIFN_DMAIER_R_DONE);
553 
554 #if 0
555 #if BYTE_ORDER == BIG_ENDIAN
556 	    (0x1 << 7) |
557 #endif
558 #endif
559 	WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
560 	    HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
561 	    HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
562 	    (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
563 
564 	WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
565 	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
566 	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
567 	    ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
568 	    ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
569 }
570 
571 /*
572  * The maximum number of sessions supported by the card
573  * is dependent on the amount of context ram, which
574  * encryption algorithms are enabled, and how compression
575  * is configured.  This should be configured before this
576  * routine is called.
577  */
578 void
579 hifn_sessions(sc)
580 	struct hifn_softc *sc;
581 {
582 	u_int32_t pucnfg;
583 	int ctxsize;
584 
585 	pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
586 
587 	if (pucnfg & HIFN_PUCNFG_COMPSING) {
588 		if (pucnfg & HIFN_PUCNFG_ENCCNFG)
589 			ctxsize = 128;
590 		else
591 			ctxsize = 512;
592 		sc->sc_maxses = 1 +
593 		    ((sc->sc_ramsize - 32768) / ctxsize);
594 	}
595 	else
596 		sc->sc_maxses = sc->sc_ramsize / 16384;
597 
598 	if (sc->sc_maxses > 2048)
599 		sc->sc_maxses = 2048;
600 }
601 
602 void
603 hifn_ramtype(sc)
604 	struct hifn_softc *sc;
605 {
606 	u_int8_t data[8], dataexpect[8];
607 	int i;
608 
609 	hifn_reset_board(sc);
610 	hifn_init_dma(sc);
611 	hifn_init_pci_registers(sc);
612 
613 	for (i = 0; i < sizeof(data); i++)
614 		data[i] = dataexpect[i] = 0x55;
615 	if (hifn_writeramaddr(sc, 0, data, 0) < 0)
616 		return;
617 	if (hifn_readramaddr(sc, 0, data, 1) < 0)
618 		return;
619 	if (memcmp(data, dataexpect, sizeof(data)) != 0) {
620 		sc->sc_drammodel = 1;
621 		return;
622 	}
623 
624 	hifn_reset_board(sc);
625 	hifn_init_dma(sc);
626 	hifn_init_pci_registers(sc);
627 
628 	for (i = 0; i < sizeof(data); i++)
629 		data[i] = dataexpect[i] = 0xaa;
630 	if (hifn_writeramaddr(sc, 0, data, 0) < 0)
631 		return;
632 	if (hifn_readramaddr(sc, 0, data, 1) < 0)
633 		return;
634 	if (memcmp(data, dataexpect, sizeof(data)) != 0)
635 		sc->sc_drammodel = 1;
636 }
637 
638 /*
639  * For sram boards, just write/read memory until it fails, also check for
640  * banking.
641  */
642 int
643 hifn_sramsize(sc)
644 	struct hifn_softc *sc;
645 {
646 	u_int32_t a = 0, end;
647 	u_int8_t data[8], dataexpect[8];
648 
649 	for (a = 0; a < sizeof(data); a++)
650 		data[a] = dataexpect[a] = 0x5a;
651 
652 	hifn_reset_board(sc);
653 	hifn_init_dma(sc);
654 	hifn_init_pci_registers(sc);
655 	end = 1 << 20;	/* 1MB */
656 	for (a = 0; a < end; a += 16384) {
657 		if (hifn_writeramaddr(sc, a, data, 0) < 0)
658 			return (0);
659 		if (hifn_readramaddr(sc, a, data, 1) < 0)
660 			return (0);
661 		if (memcmp(data, dataexpect, sizeof(data)) != 0)
662 			return (0);
663 		hifn_reset_board(sc);
664 		hifn_init_dma(sc);
665 		hifn_init_pci_registers(sc);
666 		sc->sc_ramsize = a + 16384;
667 	}
668 
669 	for (a = 0; a < sizeof(data); a++)
670 		data[a] = dataexpect[a] = 0xa5;
671 	if (hifn_writeramaddr(sc, 0, data, 0) < 0)
672 		return (0);
673 
674 	end = sc->sc_ramsize;
675 	for (a = 0; a < end; a += 16384) {
676 		hifn_reset_board(sc);
677 		hifn_init_dma(sc);
678 		hifn_init_pci_registers(sc);
679 		if (hifn_readramaddr(sc, a, data, 0) < 0)
680 			return (0);
681 		if (a != 0 && memcmp(data, dataexpect, sizeof(data)) == 0)
682 			return (0);
683 		sc->sc_ramsize = a + 16384;
684 	}
685 
686 	hifn_reset_board(sc);
687 	hifn_init_dma(sc);
688 	hifn_init_pci_registers(sc);
689 
690 	return (0);
691 }
692 
693 /*
694  * XXX For dram boards, one should really try all of the
695  * HIFN_PUCNFG_DSZ_*'s.  This just assumes that PUCNFG
696  * is already set up correctly.
697  */
698 int
699 hifn_dramsize(sc)
700 	struct hifn_softc *sc;
701 {
702 	u_int32_t cnfg;
703 
704 	cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
705 	    HIFN_PUCNFG_DRAMMASK;
706 	sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
707 	return (0);
708 }
709 
710 int
711 hifn_writeramaddr(sc, addr, data, slot)
712 	struct hifn_softc *sc;
713 	int addr, slot;
714 	u_int8_t *data;
715 {
716 	struct hifn_dma *dma = sc->sc_dma;
717 	hifn_base_command_t wc;
718 	const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
719 	u_int64_t src, dst;
720 
721 	wc.masks = 3 << 13;
722 	wc.session_num = addr >> 14;
723 	wc.total_source_count = 8;
724 	wc.total_dest_count = addr & 0x3fff;
725 
726 	/* build write command */
727 	*(hifn_base_command_t *) sc->sc_dma->command_bufs[slot] = wc;
728 	memcpy(&src, data, sizeof(src));
729 
730 	dma->srcr[slot].p = vtophys((vaddr_t)&src);
731 	dma->dstr[slot].p = vtophys((vaddr_t)&dst);
732 
733 	dma->cmdr[slot].l = 16 | masks;
734 	dma->srcr[slot].l = 8 | masks;
735 	dma->dstr[slot].l = 8 | masks;
736 	dma->resr[slot].l = HIFN_MAX_RESULT | masks;
737 
738 	DELAY(1000);	/* let write command execute */
739 	if (dma->resr[slot].l & HIFN_D_VALID) {
740 		printf("%s: SRAM/DRAM detection error -- "
741 		    "result[%d] valid still set\n", sc->sc_dv.dv_xname, slot);
742 		return (-1);
743 	}
744 	return (0);
745 }
746 
747 int
748 hifn_readramaddr(sc, addr, data, slot)
749 	struct hifn_softc *sc;
750 	int addr, slot;
751 	u_int8_t *data;
752 {
753 	struct hifn_dma *dma = sc->sc_dma;
754 	hifn_base_command_t rc;
755 	const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
756 	u_int64_t src, dst;
757 
758 	rc.masks = 2 << 13;
759 	rc.session_num = addr >> 14;
760 	rc.total_source_count = addr & 0x3fff;
761 	rc.total_dest_count = 8;
762 
763 	*(hifn_base_command_t *) sc->sc_dma->command_bufs[slot] = rc;
764 
765 	dma->srcr[slot].p = vtophys((vaddr_t)&src);
766 	dma->dstr[slot].p = vtophys((vaddr_t)&dst);
767 	dma->cmdr[slot].l = 16 | masks;
768 	dma->srcr[slot].l = 8 | masks;
769 	dma->dstr[slot].l = 8 | masks;
770 	dma->resr[slot].l = HIFN_MAX_RESULT | masks;
771 
772 	DELAY(1000);	/* let read command execute */
773 	if (dma->resr[slot].l & HIFN_D_VALID) {
774 		printf("%s: SRAM/DRAM detection error -- "
775 		    "result[%d] valid still set\n", sc->sc_dv.dv_xname, slot);
776 		return (-1);
777 	}
778 	memcpy(data, &dst, sizeof(dst));
779 	return (0);
780 }
781 
782 /*
783  * Initialize the descriptor rings.
784  */
785 void
786 hifn_init_dma(sc)
787 	struct hifn_softc *sc;
788 {
789 	struct hifn_dma *dma = sc->sc_dma;
790 	int i;
791 
792 	/* initialize static pointer values */
793 	for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
794 		dma->cmdr[i].p = vtophys((vaddr_t)dma->command_bufs[i]);
795 	for (i = 0; i < HIFN_D_RES_RSIZE; i++)
796 		dma->resr[i].p = vtophys((vaddr_t)dma->result_bufs[i]);
797 
798 	dma->cmdr[HIFN_D_CMD_RSIZE].p = vtophys((vaddr_t)dma->cmdr);
799 	dma->srcr[HIFN_D_SRC_RSIZE].p = vtophys((vaddr_t)dma->srcr);
800 	dma->dstr[HIFN_D_DST_RSIZE].p = vtophys((vaddr_t)dma->dstr);
801 	dma->resr[HIFN_D_RES_RSIZE].p = vtophys((vaddr_t)dma->resr);
802 	dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
803 	dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
804 	dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
805 }
806 
807 /*
808  * Writes out the raw command buffer space.  Returns the
809  * command buffer size.
810  */
811 u_int
812 hifn_write_command(cmd, buf)
813 	struct hifn_command *cmd;
814 	u_int8_t *buf;
815 {
816 	u_int8_t *buf_pos;
817 	hifn_base_command_t *base_cmd;
818 	hifn_mac_command_t *mac_cmd;
819 	hifn_crypt_command_t *cry_cmd;
820 	int using_mac, using_crypt, len;
821 
822 	buf_pos = buf;
823 	using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
824 	using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
825 
826 	base_cmd = (hifn_base_command_t *)buf_pos;
827 	base_cmd->masks = cmd->base_masks;
828 	base_cmd->total_source_count = cmd->src_l;
829 	base_cmd->total_dest_count = cmd->dst_l;
830 	base_cmd->session_num = cmd->session_num;
831 	buf_pos += sizeof(hifn_base_command_t);
832 
833 	if (using_mac) {
834 		mac_cmd = (hifn_mac_command_t *)buf_pos;
835 		mac_cmd->masks = cmd->mac_masks;
836 		mac_cmd->header_skip = cmd->mac_header_skip;
837 		mac_cmd->source_count = cmd->mac_process_len;
838 		buf_pos += sizeof(hifn_mac_command_t);
839 	}
840 
841 	if (using_crypt) {
842 		cry_cmd = (hifn_crypt_command_t *)buf_pos;
843 		cry_cmd->masks = cmd->cry_masks;
844 		cry_cmd->header_skip = cmd->crypt_header_skip;
845 		cry_cmd->source_count = cmd->crypt_process_len;
846 		buf_pos += sizeof(hifn_crypt_command_t);
847 	}
848 
849 	if (using_mac && mac_cmd->masks & HIFN_MAC_CMD_NEW_KEY) {
850 		memcpy(buf_pos, cmd->mac, HIFN_MAC_KEY_LENGTH);
851 		buf_pos += HIFN_MAC_KEY_LENGTH;
852 	}
853 
854 	if (using_crypt && cry_cmd->masks & HIFN_CRYPT_CMD_NEW_KEY) {
855 		len = (cry_cmd->masks & HIFN_CRYPT_CMD_ALG_3DES) ?
856 		    HIFN_3DES_KEY_LENGTH : HIFN_DES_KEY_LENGTH;
857 		memcpy(buf_pos, cmd->ck, len);
858 		buf_pos += len;
859 	}
860 
861 	if (using_crypt && cry_cmd->masks & HIFN_CRYPT_CMD_NEW_IV) {
862 		memcpy(buf_pos, cmd->iv, HIFN_IV_LENGTH);
863 		buf_pos += HIFN_IV_LENGTH;
864 	}
865 
866 	if ((base_cmd->masks & (HIFN_BASE_CMD_MAC | HIFN_BASE_CMD_CRYPT)) == 0) {
867 		memset(buf_pos, 0, 8);
868 		buf_pos += 8;
869 	}
870 
871 	return (buf_pos - buf);
872 }
873 
874 int
875 hifn_crypto(sc, cmd)
876 	struct hifn_softc *sc;
877 	struct hifn_command *cmd;
878 {
879 #ifndef __OpenBSD__
880 	return -1;
881 #else
882 	u_int32_t cmdlen;
883 	struct	hifn_dma *dma = sc->sc_dma;
884 	int	cmdi, srci, dsti, resi, nicealign = 0;
885 	int     s, i;
886 
887 	if (cmd->src_npa == 0 && cmd->src_m)
888 		cmd->src_l = mbuf2pages(cmd->src_m, &cmd->src_npa,
889 		    cmd->src_packp, cmd->src_packl, MAX_SCATTER, &nicealign);
890 	if (cmd->src_l == 0)
891 		return (-1);
892 
893 	if (nicealign == 0) {
894 		int totlen, len;
895 		struct mbuf *m, *top, **mp;
896 
897 		totlen = cmd->dst_l = cmd->src_l;
898 		if (cmd->src_m->m_flags & M_PKTHDR) {
899 			MGETHDR(m, M_DONTWAIT, MT_DATA);
900 			M_COPY_PKTHDR(m, cmd->src_m);
901 			len = MHLEN;
902 		} else {
903 			MGET(m, M_DONTWAIT, MT_DATA);
904 			len = MLEN;
905 		}
906 		if (m == NULL)
907 			return (-1);
908 		if (totlen >= MINCLSIZE) {
909 			MCLGET(m, M_DONTWAIT);
910 			if (m->m_flags & M_EXT)
911 				len = MCLBYTES;
912 		}
913 		m->m_len = len;
914 		top = NULL;
915 		mp = &top;
916 
917 		while (totlen > 0) {
918 			if (top) {
919 				MGET(m, M_DONTWAIT, MT_DATA);
920 				if (m == NULL) {
921 					m_freem(top);
922 					return (-1);
923 				}
924 				len = MLEN;
925 			}
926 			if (top && totlen >= MINCLSIZE) {
927 				MCLGET(m, M_DONTWAIT);
928 				if (m->m_flags & M_EXT)
929 					len = MCLBYTES;
930 			}
931 			m->m_len = len;
932 			totlen -= len;
933 			*mp = m;
934 			mp = &m->m_next;
935 		}
936 		cmd->dst_m = top;
937 	}
938 	else
939 		cmd->dst_m = cmd->src_m;
940 
941 	cmd->dst_l = mbuf2pages(cmd->dst_m, &cmd->dst_npa,
942 	    cmd->dst_packp, cmd->dst_packl, MAX_SCATTER, NULL);
943 	if (cmd->dst_l == 0)
944 		return (-1);
945 
946 #ifdef HIFN_DEBUG
947 	printf("%s: Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
948 	    sc->sc_dv.dv_xname,
949 	    READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER),
950 	    dma->cmdu, dma->srcu, dma->dstu, dma->resu, cmd->src_npa,
951 	    cmd->dst_npa);
952 #endif
953 
954 	s = splnet();
955 
956 	/*
957 	 * need 1 cmd, and 1 res
958 	 * need N src, and N dst
959 	 */
960 	if (dma->cmdu+1 > HIFN_D_CMD_RSIZE ||
961 	    dma->srcu+cmd->src_npa > HIFN_D_SRC_RSIZE ||
962 	    dma->dstu+cmd->dst_npa > HIFN_D_DST_RSIZE ||
963 	    dma->resu+1 > HIFN_D_RES_RSIZE) {
964 		splx(s);
965 		return (HIFN_CRYPTO_RINGS_FULL);
966 	}
967 
968 	if (dma->cmdi == HIFN_D_CMD_RSIZE) {
969 		dma->cmdi = 0;
970 		dma->cmdr[HIFN_D_CMD_RSIZE].l = HIFN_D_VALID | HIFN_D_LAST |
971 		    HIFN_D_MASKDONEIRQ | HIFN_D_JUMP;
972 	}
973 	cmdi = dma->cmdi++;
974 
975 	if (dma->resi == HIFN_D_RES_RSIZE) {
976 		dma->resi = 0;
977 		dma->resr[HIFN_D_RES_RSIZE].l = HIFN_D_VALID | HIFN_D_LAST |
978 		    HIFN_D_MASKDONEIRQ | HIFN_D_JUMP;
979 	}
980 	resi = dma->resi++;
981 
982 	cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
983 #ifdef HIFN_DEBUG
984 	printf("write_command %d (nice %d)\n", cmdlen, nicealign);
985 #endif
986 	/* .p for command/result already set */
987 	dma->cmdr[cmdi].l = cmdlen | HIFN_D_VALID | HIFN_D_LAST |
988 	    HIFN_D_MASKDONEIRQ;
989 	dma->cmdu++;
990 
991 	/*
992 	 * We don't worry about missing an interrupt (which a "command wait"
993 	 * interrupt salvages us from), unless there is more than one command
994 	 * in the queue.
995 	 */
996 	if (dma->cmdu > 1)
997 		WRITE_REG_1(sc, HIFN_1_DMA_IER,
998 		    HIFN_DMAIER_C_WAIT | HIFN_DMAIER_R_DONE);
999 
1000 	hifnstats.hst_ipackets++;
1001 
1002 	for (i = 0; i < cmd->src_npa; i++) {
1003 		int last = 0;
1004 
1005 		if (i == cmd->src_npa-1)
1006 			last = HIFN_D_LAST;
1007 
1008 		if (dma->srci == HIFN_D_SRC_RSIZE) {
1009 			srci = 0, dma->srci = 1;
1010 			dma->srcr[HIFN_D_SRC_RSIZE].l = HIFN_D_VALID |
1011 			    HIFN_D_MASKDONEIRQ | HIFN_D_JUMP | HIFN_D_LAST;
1012 		} else
1013 			srci = dma->srci++;
1014 		dma->srcr[srci].p = cmd->src_packp[i];
1015 		dma->srcr[srci].l = cmd->src_packl[i] | HIFN_D_VALID |
1016 		    HIFN_D_MASKDONEIRQ | last;
1017 		hifnstats.hst_ibytes += cmd->src_packl[i];
1018 	}
1019 	dma->srcu += cmd->src_npa;
1020 
1021 	for (i = 0; i < cmd->dst_npa; i++) {
1022 		int last = 0;
1023 
1024 		if (i == cmd->dst_npa-1)
1025 			last = HIFN_D_LAST;
1026 
1027 		if (dma->dsti == HIFN_D_DST_RSIZE) {
1028 			dsti = 0, dma->dsti = 1;
1029 			dma->dstr[HIFN_D_DST_RSIZE].l = HIFN_D_VALID |
1030 			    HIFN_D_MASKDONEIRQ | HIFN_D_JUMP | HIFN_D_LAST;
1031 		} else
1032 			dsti = dma->dsti++;
1033 		dma->dstr[dsti].p = cmd->dst_packp[i];
1034 		dma->dstr[dsti].l = cmd->dst_packl[i] | HIFN_D_VALID |
1035 		    HIFN_D_MASKDONEIRQ | last;
1036 	}
1037 	dma->dstu += cmd->dst_npa;
1038 
1039 	/*
1040 	 * Unlike other descriptors, we don't mask done interrupt from
1041 	 * result descriptor.
1042 	 */
1043 #ifdef HIFN_DEBUG
1044 	printf("load res\n");
1045 #endif
1046 	dma->hifn_commands[resi] = cmd;
1047 	dma->resr[resi].l = HIFN_MAX_RESULT | HIFN_D_VALID | HIFN_D_LAST;
1048 	dma->resu++;
1049 
1050 #ifdef HIFN_DEBUG
1051 	printf("%s: command: stat %8x ier %8x\n",
1052 	    sc->sc_dv.dv_xname,
1053 	    READ_REG_1(sc, HIFN_1_DMA_CSR), READ_REG_1(sc, HIFN_1_DMA_IER));
1054 #endif
1055 
1056 	splx(s);
1057 	return 0;		/* success */
1058 #endif
1059 }
1060 
1061 int
1062 hifn_intr(arg)
1063 	void *arg;
1064 {
1065 	struct hifn_softc *sc = arg;
1066 	struct hifn_dma *dma = sc->sc_dma;
1067 	u_int32_t dmacsr;
1068 	int i, u;
1069 
1070 	dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
1071 
1072 #ifdef HIFN_DEBUG
1073 	printf("%s: irq: stat %08x ien %08x u %d/%d/%d/%d\n",
1074 	    sc->sc_dv.dv_xname,
1075 	    dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER),
1076 	    dma->cmdu, dma->srcu, dma->dstu, dma->resu);
1077 #endif
1078 
1079 	if ((dmacsr & (HIFN_DMACSR_R_DONE | HIFN_DMACSR_C_WAIT)) == 0)
1080 		return (0);
1081 
1082 	if (dma->resu > HIFN_D_RES_RSIZE)
1083 		printf("%s: Internal Error -- ring overflow\n",
1084 		    sc->sc_dv.dv_xname);
1085 
1086 	if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
1087 		/*
1088 		 * If no slots to process and we receive a "waiting on
1089 		 * command" interrupt, we disable the "waiting on command"
1090 		 * (by clearing it).
1091 		 */
1092 		WRITE_REG_1(sc, HIFN_1_DMA_IER, HIFN_DMAIER_R_DONE);
1093 	}
1094 
1095 	while (dma->resu > 0) {
1096 		struct hifn_command *cmd;
1097 		u_int8_t *macbuf = NULL;
1098 
1099 		cmd = dma->hifn_commands[dma->resk];
1100 
1101 		/* if still valid, stop processing */
1102 		if (dma->resr[dma->resk].l & HIFN_D_VALID)
1103 			break;
1104 
1105 		if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
1106 			macbuf = dma->result_bufs[dma->resk];
1107 			macbuf += 12;
1108 		}
1109 
1110 #ifdef __OpenBSD__
1111 		hifn_callback(sc, cmd, macbuf);
1112 #endif
1113 
1114 		if (++dma->resk == HIFN_D_RES_RSIZE)
1115 			dma->resk = 0;
1116 		dma->resu--;
1117 		hifnstats.hst_opackets++;
1118 	}
1119 
1120 	/* clear the rings */
1121 
1122 	i = dma->srck; u = dma->srcu;
1123 	while (u != 0 && (dma->srcr[i].l & HIFN_D_VALID) == 0) {
1124 		if (++i == HIFN_D_SRC_RSIZE)
1125 			i = 0;
1126 		u--;
1127 	}
1128 	dma->srck = i; dma->srcu = u;
1129 
1130 	i = dma->cmdk; u = dma->cmdu;
1131 	while (u != 0 && (dma->cmdr[i].l & HIFN_D_VALID) == 0) {
1132 		if (++i == HIFN_D_CMD_RSIZE)
1133 			i = 0;
1134 		u--;
1135 	}
1136 	dma->cmdk = i; dma->cmdu = u;
1137 
1138 	/*
1139 	 * Clear "result done" and "command wait" flags in status register.
1140 	 * If we still have slots to process and we received a "command wait"
1141 	 * interrupt, this will interrupt us again.
1142 	 */
1143 	WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_DONE|HIFN_DMACSR_C_WAIT);
1144 	return (1);
1145 }
1146 
1147 #ifdef __OpenBSD__
1148 /*
1149  * Allocate a new 'session' and return an encoded session id.  'sidp'
1150  * contains our registration id, and should contain an encoded session
1151  * id on successful allocation.
1152  */
1153 int
1154 hifn_newsession(sidp, cri)
1155 	u_int32_t *sidp;
1156 	struct cryptoini *cri;
1157 {
1158 	struct cryptoini *c;
1159 	struct hifn_softc *sc = NULL;
1160 	int i, mac = 0, cry = 0;
1161 
1162 	if (sidp == NULL || cri == NULL)
1163 		return (EINVAL);
1164 
1165 	for (i = 0; i < hifn_cd.cd_ndevs; i++) {
1166 		sc = hifn_cd.cd_devs[i];
1167 		if (sc == NULL)
1168 			break;
1169 		if (sc->sc_cid == (*sidp))
1170 			break;
1171 	}
1172 	if (sc == NULL)
1173 		return (EINVAL);
1174 
1175 	for (i = 0; i < sc->sc_maxses; i++)
1176 		if (sc->sc_sessions[i].hs_flags == 0)
1177 			break;
1178 	if (i == sc->sc_maxses)
1179 		return (ENOMEM);
1180 
1181 	for (c = cri; c != NULL; c = c->cri_next) {
1182 		if (c->cri_alg == CRYPTO_MD5_HMAC96 ||
1183 		    c->cri_alg == CRYPTO_SHA1_HMAC96) {
1184 			if (mac)
1185 				return (EINVAL);
1186 			mac = 1;
1187 		} else if (c->cri_alg == CRYPTO_DES_CBC ||
1188 		    c->cri_alg == CRYPTO_3DES_CBC) {
1189 			if (cry)
1190 				return (EINVAL);
1191 			cry = 1;
1192 		}
1193 		else
1194 			return (EINVAL);
1195 	}
1196 	if (mac == 0 && cry == 0)
1197 		return (EINVAL);
1198 
1199 	*sidp = HIFN_SID(sc->sc_dv.dv_unit, i);
1200 	sc->sc_sessions[i].hs_flags = 1;
1201 	get_random_bytes(sc->sc_sessions[i].hs_iv, HIFN_IV_LENGTH);
1202 
1203 	return (0);
1204 }
1205 
1206 /*
1207  * Deallocate a session.
1208  * XXX this routine should run a zero'd mac/encrypt key into context ram.
1209  * XXX to blow away any keys already stored there.
1210  */
1211 int
1212 hifn_freesession(tid)
1213 	u_int64_t tid;
1214 {
1215 	struct hifn_softc *sc;
1216 	int card, session;
1217 	u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
1218 
1219 	card = HIFN_CARD(sid);
1220 	if (card >= hifn_cd.cd_ndevs || hifn_cd.cd_devs[card] == NULL)
1221 		return (EINVAL);
1222 
1223 	sc = hifn_cd.cd_devs[card];
1224 	session = HIFN_SESSION(sid);
1225 	if (session >= sc->sc_maxses)
1226 		return (EINVAL);
1227 
1228 	memset(&sc->sc_sessions[session], 0, sizeof(sc->sc_sessions[session]));
1229 	return (0);
1230 }
1231 
1232 int
1233 hifn_process(crp)
1234 	struct cryptop *crp;
1235 {
1236 	struct hifn_command *cmd = NULL;
1237 	int card, session, err;
1238 	struct hifn_softc *sc;
1239 	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
1240 
1241 	if (crp == NULL || crp->crp_callback == NULL) {
1242 		hifnstats.hst_invalid++;
1243 		return (EINVAL);
1244 	}
1245 
1246 	card = HIFN_CARD(crp->crp_sid);
1247 	if (card >= hifn_cd.cd_ndevs || hifn_cd.cd_devs[card] == NULL) {
1248 		err = EINVAL;
1249 		goto errout;
1250 	}
1251 
1252 	sc = hifn_cd.cd_devs[card];
1253 	session = HIFN_SESSION(crp->crp_sid);
1254 	if (session >= sc->sc_maxses) {
1255 		err = EINVAL;
1256 		goto errout;
1257 	}
1258 
1259 	cmd = (struct hifn_command *)malloc(sizeof(struct hifn_command),
1260 	    M_DEVBUF, M_NOWAIT|M_ZERO);
1261 	if (cmd == NULL) {
1262 		err = ENOMEM;
1263 		goto errout;
1264 	}
1265 
1266 	if (crp->crp_flags & CRYPTO_F_IMBUF) {
1267 		cmd->src_m = (struct mbuf *)crp->crp_buf;
1268 		cmd->dst_m = (struct mbuf *)crp->crp_buf;
1269 	} else {
1270 		err = EINVAL;
1271 		goto errout;	/* XXX only handle mbufs right now */
1272 	}
1273 
1274 	crd1 = crp->crp_desc;
1275 	if (crd1 == NULL) {
1276 		err = EINVAL;
1277 		goto errout;
1278 	}
1279 	crd2 = crd1->crd_next;
1280 
1281 	if (crd2 == NULL) {
1282 		if (crd1->crd_alg == CRYPTO_MD5_HMAC96 ||
1283 		    crd1->crd_alg == CRYPTO_SHA1_HMAC96) {
1284 			maccrd = crd1;
1285 			enccrd = NULL;
1286 		} else if (crd1->crd_alg == CRYPTO_DES_CBC ||
1287 			 crd1->crd_alg == CRYPTO_3DES_CBC) {
1288 			if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
1289 				cmd->base_masks |= HIFN_BASE_CMD_DECODE;
1290 			maccrd = NULL;
1291 			enccrd = crd1;
1292 		} else {
1293 			err = EINVAL;
1294 			goto errout;
1295 		}
1296 	} else {
1297 		if ((crd1->crd_alg == CRYPTO_MD5_HMAC96 ||
1298 		    crd1->crd_alg == CRYPTO_SHA1_HMAC96) &&
1299 		    (crd2->crd_alg == CRYPTO_DES_CBC ||
1300 			crd2->crd_alg == CRYPTO_3DES_CBC) &&
1301 		    ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
1302 			cmd->base_masks = HIFN_BASE_CMD_DECODE;
1303 			maccrd = crd1;
1304 			enccrd = crd2;
1305 		} else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
1306 		    crd1->crd_alg == CRYPTO_3DES_CBC) &&
1307 		    (crd2->crd_alg == CRYPTO_MD5_HMAC96 ||
1308 			crd2->crd_alg == CRYPTO_SHA1_HMAC96) &&
1309 		    (crd1->crd_flags & CRD_F_ENCRYPT)) {
1310 			enccrd = crd1;
1311 			maccrd = crd2;
1312 		} else {
1313 			/*
1314 			 * We cannot order the 7751 as requested
1315 			 */
1316 			err = EINVAL;
1317 			goto errout;
1318 		}
1319 	}
1320 
1321 	if (enccrd) {
1322 		cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
1323 		cmd->cry_masks |= HIFN_CRYPT_CMD_MODE_CBC |
1324 		    HIFN_CRYPT_CMD_NEW_IV;
1325 		if (enccrd->crd_flags & CRD_F_ENCRYPT) {
1326 			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
1327 				memcpy(cmd->iv, enccrd->crd_iv, HIFN_IV_LENGTH);
1328 			else
1329 				memcpy(cmd->iv, sc->sc_sessions[session].hs_iv,
1330 				    HIFN_IV_LENGTH);
1331 
1332 			if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
1333 				m_copyback(cmd->src_m, enccrd->crd_inject,
1334 				    HIFN_IV_LENGTH, cmd->iv);
1335 		} else {
1336 			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
1337 				memcpy(cmd->iv, enccrd->crd_iv, HIFN_IV_LENGTH);
1338 			else
1339 				m_copydata(cmd->src_m, enccrd->crd_inject,
1340 				    HIFN_IV_LENGTH, cmd->iv);
1341 		}
1342 
1343 		if (enccrd->crd_alg == CRYPTO_DES_CBC)
1344 			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES;
1345 		else
1346 			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES;
1347 
1348 		cmd->crypt_header_skip = enccrd->crd_skip;
1349 		cmd->crypt_process_len = enccrd->crd_len;
1350 		cmd->ck = enccrd->crd_key;
1351 
1352 		if (sc->sc_sessions[session].hs_flags == 1)
1353 			cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
1354 	}
1355 
1356 	if (maccrd) {
1357 		cmd->base_masks |= HIFN_BASE_CMD_MAC;
1358 		cmd->mac_masks |= HIFN_MAC_CMD_RESULT |
1359 		    HIFN_MAC_CMD_MODE_HMAC | HIFN_MAC_CMD_RESULT |
1360 		    HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
1361 
1362 		if (maccrd->crd_alg == CRYPTO_MD5_HMAC96)
1363 			cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5;
1364 		else
1365 			cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1;
1366 
1367 		if (sc->sc_sessions[session].hs_flags == 1) {
1368 			cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
1369 			memcpy(cmd->mac, maccrd->crd_key,
1370 			    maccrd->crd_klen >> 3);
1371 			memset(cmd->mac + (maccrd->crd_klen >> 3), 0,
1372 			    HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
1373 		}
1374 
1375 		cmd->mac_header_skip = maccrd->crd_skip;
1376 		cmd->mac_process_len = maccrd->crd_len;
1377 	}
1378 
1379 	if (sc->sc_sessions[session].hs_flags == 1)
1380 		sc->sc_sessions[session].hs_flags = 2;
1381 
1382 	cmd->private_data = (u_long)crp;
1383 	cmd->session_num = session;
1384 	cmd->softc = sc;
1385 
1386 	if (hifn_crypto(sc, cmd) == 0)
1387 		return (0);
1388 
1389 	err = ENOMEM;
1390 
1391 errout:
1392 	if (cmd != NULL)
1393 		free(cmd, M_DEVBUF);
1394 	if (err == EINVAL)
1395 		hifnstats.hst_invalid++;
1396 	else
1397 		hifnstats.hst_nomem++;
1398 	crp->crp_etype = err;
1399 	crp->crp_callback(crp);
1400 	return (0);
1401 }
1402 
1403 void
1404 hifn_callback(sc, cmd, macbuf)
1405 	struct hifn_softc *sc;
1406 	struct hifn_command *cmd;
1407 	u_int8_t *macbuf;
1408 {
1409 	struct hifn_dma *dma = sc->sc_dma;
1410 	struct cryptop *crp = (struct cryptop *)cmd->private_data;
1411 	struct cryptodesc *crd;
1412 	struct mbuf *m;
1413 	int totlen;
1414 
1415 	if ((crp->crp_flags & CRYPTO_F_IMBUF) && (cmd->src_m != cmd->dst_m)) {
1416 		m_freem(cmd->src_m);
1417 		crp->crp_buf = (caddr_t)cmd->dst_m;
1418 	}
1419 
1420 	if ((m = cmd->dst_m) != NULL) {
1421 		totlen = cmd->src_l;
1422 		hifnstats.hst_obytes += totlen;
1423 		while (m) {
1424 			if (totlen < m->m_len) {
1425 				m->m_len = totlen;
1426 				totlen = 0;
1427 			} else
1428 				totlen -= m->m_len;
1429 			m = m->m_next;
1430 			if (++dma->dstk == HIFN_D_DST_RSIZE)
1431 				dma->dstk = 0;
1432 			dma->dstu--;
1433 		}
1434 	} else {
1435 		hifnstats.hst_obytes += dma->dstr[dma->dstk].l & HIFN_D_LENGTH;
1436 		if (++dma->dstk == HIFN_D_DST_RSIZE)
1437 			dma->dstk = 0;
1438 		dma->dstu--;
1439 	}
1440 
1441 	if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
1442 	    HIFN_BASE_CMD_CRYPT) {
1443 		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1444 			if (crd->crd_alg != CRYPTO_DES_CBC &&
1445 			    crd->crd_alg != CRYPTO_3DES_CBC)
1446 				continue;
1447 			m_copydata((struct mbuf *)crp->crp_buf,
1448 			    crd->crd_skip + crd->crd_len - HIFN_IV_LENGTH,
1449 			    HIFN_IV_LENGTH,
1450 			    cmd->softc->sc_sessions[cmd->session_num].hs_iv);
1451 			break;
1452 		}
1453 	}
1454 
1455 	if (macbuf != NULL) {
1456 		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1457 			if (crd->crd_alg != CRYPTO_MD5_HMAC96 &&
1458 			    crd->crd_alg != CRYPTO_SHA1_HMAC96)
1459 				continue;
1460 			m_copyback((struct mbuf *)crp->crp_buf,
1461 			    crd->crd_inject, 12, macbuf);
1462 			break;
1463 		}
1464 	}
1465 
1466 	free(cmd, M_DEVBUF);
1467 	crypto_done(crp);
1468 }
1469 #endif
1470