xref: /netbsd-src/sys/dev/marvell/mvxpsec.c (revision 2718af68c3efc72c9769069b5c7f9ed36f6b9def)
1 /*	$NetBSD: mvxpsec.c,v 1.14 2022/04/12 21:05:37 andvar Exp $	*/
2 /*
3  * Copyright (c) 2015 Internet Initiative Japan Inc.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #ifdef _KERNEL_OPT
29 #include "opt_ipsec.h"
30 #endif
31 
32 /*
33  * Cryptographic Engine and Security Accelerator(MVXPSEC)
34  */
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/types.h>
38 #include <sys/kernel.h>
39 #include <sys/queue.h>
40 #include <sys/conf.h>
41 #include <sys/proc.h>
42 #include <sys/bus.h>
43 #include <sys/evcnt.h>
44 #include <sys/device.h>
45 #include <sys/endian.h>
46 #include <sys/errno.h>
47 #include <sys/kmem.h>
48 #include <sys/mbuf.h>
49 #include <sys/callout.h>
50 #include <sys/pool.h>
51 #include <sys/cprng.h>
52 #include <sys/syslog.h>
53 #include <sys/mutex.h>
54 #include <sys/kthread.h>
55 #include <sys/atomic.h>
56 #include <sys/sha1.h>
57 #include <sys/md5.h>
58 
59 #include <uvm/uvm_extern.h>
60 
61 #include <opencrypto/cryptodev.h>
62 #include <opencrypto/xform.h>
63 
64 #include <net/net_stats.h>
65 
66 #include <netinet/in_systm.h>
67 #include <netinet/in.h>
68 #include <netinet/ip.h>
69 #include <netinet/ip6.h>
70 
71 #if NIPSEC > 0
72 #include <netipsec/esp_var.h>
73 #endif
74 
75 #include <arm/cpufunc.h>
76 #include <arm/marvell/mvsocvar.h>
77 #include <arm/marvell/armadaxpreg.h>
78 #include <dev/marvell/marvellreg.h>
79 #include <dev/marvell/marvellvar.h>
80 #include <dev/marvell/mvxpsecreg.h>
81 #include <dev/marvell/mvxpsecvar.h>
82 
83 #ifdef DEBUG
84 #define STATIC __attribute__ ((noinline)) extern
85 #define _STATIC __attribute__ ((noinline)) extern
86 #define INLINE __attribute__ ((noinline)) extern
87 #define _INLINE __attribute__ ((noinline)) extern
88 #else
89 #define STATIC static
90 #define _STATIC __attribute__ ((unused)) static
91 #define INLINE static inline
92 #define _INLINE __attribute__ ((unused)) static inline
93 #endif
94 
95 /*
96  * IRQ and SRAM spaces for each of unit
97  * XXX: move to attach_args
98  */
99 struct {
100 	int		err_int;
101 } mvxpsec_config[] = {
102 	{ .err_int = ARMADAXP_IRQ_CESA0_ERR, }, /* unit 0 */
103 	{ .err_int = ARMADAXP_IRQ_CESA1_ERR, }, /* unit 1 */
104 };
105 #define MVXPSEC_ERR_INT(sc) \
106     mvxpsec_config[device_unit((sc)->sc_dev)].err_int
107 
108 /*
109  * AES
110  */
111 #define MAXBC				(128/32)
112 #define MAXKC				(256/32)
113 #define MAXROUNDS			14
114 STATIC int mv_aes_ksched(uint8_t[4][MAXKC], int,
115     uint8_t[MAXROUNDS+1][4][MAXBC]);
116 STATIC int mv_aes_deckey(uint8_t *, uint8_t *, int);
117 
118 /*
119  * device driver autoconf interface
120  */
121 STATIC int mvxpsec_match(device_t, cfdata_t, void *);
122 STATIC void mvxpsec_attach(device_t, device_t, void *);
123 STATIC void mvxpsec_evcnt_attach(struct mvxpsec_softc *);
124 
125 /*
126  * register setup
127  */
128 STATIC int mvxpsec_wininit(struct mvxpsec_softc *, enum marvell_tags *);
129 
130 /*
131  * timer(callout) interface
132  *
133  * XXX: callout is not MP safe...
134  */
135 STATIC void mvxpsec_timer(void *);
136 
137 /*
138  * interrupt interface
139  */
140 STATIC int mvxpsec_intr(void *);
141 INLINE void mvxpsec_intr_cleanup(struct mvxpsec_softc *);
142 STATIC int mvxpsec_eintr(void *);
143 STATIC uint32_t mvxpsec_intr_ack(struct mvxpsec_softc *);
144 STATIC uint32_t mvxpsec_eintr_ack(struct mvxpsec_softc *);
145 INLINE void mvxpsec_intr_cnt(struct mvxpsec_softc *, int);
146 
147 /*
148  * memory allocators and VM management
149  */
150 STATIC struct mvxpsec_devmem *mvxpsec_alloc_devmem(struct mvxpsec_softc *,
151     paddr_t, int);
152 STATIC int mvxpsec_init_sram(struct mvxpsec_softc *);
153 
154 /*
155  * Low-level DMA interface
156  */
157 STATIC int mvxpsec_init_dma(struct mvxpsec_softc *,
158     struct marvell_attach_args *);
159 INLINE int mvxpsec_dma_wait(struct mvxpsec_softc *);
160 INLINE int mvxpsec_acc_wait(struct mvxpsec_softc *);
161 INLINE struct mvxpsec_descriptor_handle *mvxpsec_dma_getdesc(struct mvxpsec_softc *);
162 _INLINE void mvxpsec_dma_putdesc(struct mvxpsec_softc *, struct mvxpsec_descriptor_handle *);
163 INLINE void mvxpsec_dma_setup(struct mvxpsec_descriptor_handle *,
164     uint32_t, uint32_t, uint32_t);
165 INLINE void mvxpsec_dma_cat(struct mvxpsec_softc *,
166     struct mvxpsec_descriptor_handle *, struct mvxpsec_descriptor_handle *);
167 
168 /*
169  * High-level DMA interface
170  */
171 INLINE int mvxpsec_dma_copy0(struct mvxpsec_softc *,
172     mvxpsec_dma_ring *, uint32_t, uint32_t, uint32_t);
173 INLINE int mvxpsec_dma_copy(struct mvxpsec_softc *,
174     mvxpsec_dma_ring *, uint32_t, uint32_t, uint32_t);
175 INLINE int mvxpsec_dma_acc_activate(struct mvxpsec_softc *,
176     mvxpsec_dma_ring *);
177 INLINE void mvxpsec_dma_finalize(struct mvxpsec_softc *,
178     mvxpsec_dma_ring *);
179 INLINE void mvxpsec_dma_free(struct mvxpsec_softc *,
180     mvxpsec_dma_ring *);
181 INLINE int mvxpsec_dma_copy_packet(struct mvxpsec_softc *, struct mvxpsec_packet *);
182 INLINE int mvxpsec_dma_sync_packet(struct mvxpsec_softc *, struct mvxpsec_packet *);
183 
184 /*
185  * Session management interface (OpenCrypto)
186  */
187 #define MVXPSEC_SESSION(sid)	((sid) & 0x0fffffff)
188 #define MVXPSEC_SID(crd, sesn)	(((crd) << 28) | ((sesn) & 0x0fffffff))
189 /* pool management */
190 STATIC int mvxpsec_session_ctor(void *, void *, int);
191 STATIC void mvxpsec_session_dtor(void *, void *);
192 STATIC int mvxpsec_packet_ctor(void *, void *, int);
193 STATIC void mvxpsec_packet_dtor(void *, void *);
194 
195 /* session management */
196 STATIC struct mvxpsec_session *mvxpsec_session_alloc(struct mvxpsec_softc *);
197 STATIC void mvxpsec_session_dealloc(struct mvxpsec_session *);
198 INLINE struct mvxpsec_session *mvxpsec_session_lookup(struct mvxpsec_softc *, int);
199 INLINE int mvxpsec_session_ref(struct mvxpsec_session *);
200 INLINE void mvxpsec_session_unref(struct mvxpsec_session *);
201 
202 /* packet management */
203 STATIC struct mvxpsec_packet *mvxpsec_packet_alloc(struct mvxpsec_session *);
204 INLINE void mvxpsec_packet_enqueue(struct mvxpsec_packet *);
205 STATIC void mvxpsec_packet_dealloc(struct mvxpsec_packet *);
206 STATIC int mvxpsec_done_packet(struct mvxpsec_packet *);
207 
208 /* session header manegement */
209 STATIC int mvxpsec_header_finalize(struct mvxpsec_packet *);
210 
211 /* packet queue management */
212 INLINE void mvxpsec_drop(struct mvxpsec_softc *, struct cryptop *, struct mvxpsec_packet *, int);
213 STATIC int mvxpsec_dispatch_queue(struct mvxpsec_softc *);
214 
215 /* opencrypto operation */
216 INLINE int mvxpsec_parse_crd(struct mvxpsec_packet *, struct cryptodesc *);
217 INLINE int mvxpsec_parse_crp(struct mvxpsec_packet *);
218 
219 /* payload data management */
220 INLINE int mvxpsec_packet_setcrp(struct mvxpsec_packet *, struct cryptop *);
221 STATIC int mvxpsec_packet_setdata(struct mvxpsec_packet *, void *, uint32_t);
222 STATIC int mvxpsec_packet_setmbuf(struct mvxpsec_packet *, struct mbuf *);
223 STATIC int mvxpsec_packet_setuio(struct mvxpsec_packet *, struct uio *);
224 STATIC int mvxpsec_packet_rdata(struct mvxpsec_packet *, int, int, void *);
225 _STATIC int mvxpsec_packet_wdata(struct mvxpsec_packet *, int, int, void *);
226 STATIC int mvxpsec_packet_write_iv(struct mvxpsec_packet *, void *, int);
227 STATIC int mvxpsec_packet_copy_iv(struct mvxpsec_packet *, int, int);
228 
229 /* key pre-computation */
230 STATIC int mvxpsec_key_precomp(int, void *, int, void *, void *);
231 STATIC int mvxpsec_hmac_precomp(int, void *, int, void *, void *);
232 
233 /* crypto operation management */
234 INLINE void mvxpsec_packet_reset_op(struct mvxpsec_packet *);
235 INLINE void mvxpsec_packet_update_op_order(struct mvxpsec_packet *, int);
236 
237 /*
238  * parameter converters
239  */
240 INLINE uint32_t mvxpsec_alg2acc(uint32_t alg);
241 INLINE uint32_t mvxpsec_aesklen(int klen);
242 
243 /*
244  * string formatters
245  */
246 _STATIC const char *s_ctrlreg(uint32_t);
247 _STATIC const char *s_winreg(uint32_t);
248 _STATIC const char *s_errreg(uint32_t);
249 _STATIC const char *s_xpsecintr(uint32_t);
250 _STATIC const char *s_ctlalg(uint32_t);
251 _STATIC const char *s_xpsec_op(uint32_t);
252 _STATIC const char *s_xpsec_enc(uint32_t);
253 _STATIC const char *s_xpsec_mac(uint32_t);
254 _STATIC const char *s_xpsec_frag(uint32_t);
255 
256 /*
257  * debugging supports
258  */
259 #ifdef MVXPSEC_DEBUG
260 _STATIC void mvxpsec_dump_dmaq(struct mvxpsec_descriptor_handle *);
261 _STATIC void mvxpsec_dump_reg(struct mvxpsec_softc *);
262 _STATIC void mvxpsec_dump_sram(const char *, struct mvxpsec_softc *, size_t);
263 _STATIC void mvxpsec_dump_data(const char *, void *, size_t);
264 
265 _STATIC void mvxpsec_dump_packet(const char *, struct mvxpsec_packet *);
266 _STATIC void mvxpsec_dump_packet_data(const char *, struct mvxpsec_packet *);
267 _STATIC void mvxpsec_dump_packet_desc(const char *, struct mvxpsec_packet *);
268 
269 _STATIC void mvxpsec_dump_acc_config(const char *, uint32_t);
270 _STATIC void mvxpsec_dump_acc_encdata(const char *, uint32_t, uint32_t);
271 _STATIC void mvxpsec_dump_acc_enclen(const char *, uint32_t);
272 _STATIC void mvxpsec_dump_acc_enckey(const char *, uint32_t);
273 _STATIC void mvxpsec_dump_acc_enciv(const char *, uint32_t);
274 _STATIC void mvxpsec_dump_acc_macsrc(const char *, uint32_t);
275 _STATIC void mvxpsec_dump_acc_macdst(const char *, uint32_t);
276 _STATIC void mvxpsec_dump_acc_maciv(const char *, uint32_t);
277 #endif
278 
279 /*
280  * global configurations, params, work spaces, ...
281  *
282  * XXX: use sysctl for global configurations
283  */
284 /* waiting for device */
285 static int mvxpsec_wait_interval = 10;		/* usec */
286 static int mvxpsec_wait_retry = 100;		/* times = wait for 1 [msec] */
287 #ifdef MVXPSEC_DEBUG
288 static uint32_t mvxpsec_debug = MVXPSEC_DEBUG;	/* debug level */
289 #endif
290 
291 /*
292  * Register accessors
293  */
294 #define MVXPSEC_WRITE(sc, off, val) \
295 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (off), (val))
296 #define MVXPSEC_READ(sc, off) \
297 	bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (off))
298 
299 /*
300  * device driver autoconf interface
301  */
302 CFATTACH_DECL2_NEW(mvxpsec_mbus, sizeof(struct mvxpsec_softc),
303     mvxpsec_match, mvxpsec_attach, NULL, NULL, NULL, NULL);
304 
305 STATIC int
306 mvxpsec_match(device_t dev, cfdata_t match, void *aux)
307 {
308 	struct marvell_attach_args *mva = aux;
309 	uint32_t tag;
310 	int window;
311 
312 	if (strcmp(mva->mva_name, match->cf_name) != 0)
313 		return 0;
314 	if (mva->mva_offset == MVA_OFFSET_DEFAULT)
315 		return 0;
316 
317 	switch (mva->mva_unit) {
318 	case 0:
319 		tag = ARMADAXP_TAG_CRYPT0;
320 		break;
321 	case 1:
322 		tag = ARMADAXP_TAG_CRYPT1;
323 		break;
324 	default:
325 		aprint_error_dev(dev,
326 		    "unit %d is not supported\n", mva->mva_unit);
327 		return 0;
328 	}
329 
330 	window = mvsoc_target(tag, NULL, NULL, NULL, NULL);
331 	if (window >= nwindow) {
332 		aprint_error_dev(dev,
333 		    "Security Accelerator SRAM is not configured.\n");
334 		return 0;
335 	}
336 
337 	return 1;
338 }
339 
340 STATIC void
341 mvxpsec_attach(device_t parent, device_t self, void *aux)
342 {
343 	struct marvell_attach_args *mva = aux;
344 	struct mvxpsec_softc *sc = device_private(self);
345 	int v;
346 	int i;
347 
348 	sc->sc_dev = self;
349 
350 	aprint_normal(": Marvell Crypto Engines and Security Accelerator\n");
351 	aprint_naive("\n");
352 #ifdef MVXPSEC_MULTI_PACKET
353 	aprint_normal_dev(sc->sc_dev, "multi-packet chained mode enabled.\n");
354 #else
355 	aprint_normal_dev(sc->sc_dev, "multi-packet chained mode disabled.\n");
356 #endif
357 	aprint_normal_dev(sc->sc_dev,
358 	    "Max %d sessions.\n", MVXPSEC_MAX_SESSIONS);
359 
360 	/* mutex */
361 	mutex_init(&sc->sc_session_mtx, MUTEX_DEFAULT, IPL_NET);
362 	mutex_init(&sc->sc_dma_mtx, MUTEX_DEFAULT, IPL_NET);
363 	mutex_init(&sc->sc_queue_mtx, MUTEX_DEFAULT, IPL_NET);
364 
365 	/* Packet queue */
366 	SIMPLEQ_INIT(&sc->sc_wait_queue);
367 	SIMPLEQ_INIT(&sc->sc_run_queue);
368 	SLIST_INIT(&sc->sc_free_list);
369 	sc->sc_wait_qlen = 0;
370 #ifdef MVXPSEC_MULTI_PACKET
371 	sc->sc_wait_qlimit = 16;
372 #else
373 	sc->sc_wait_qlimit = 0;
374 #endif
375 	sc->sc_free_qlen = 0;
376 
377 	/* Timer */
378 	callout_init(&sc->sc_timeout, 0); /* XXX: use CALLOUT_MPSAFE */
379 	callout_setfunc(&sc->sc_timeout, mvxpsec_timer, sc);
380 
381 	/* I/O */
382 	sc->sc_iot = mva->mva_iot;
383 	if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
384 	    mva->mva_offset, mva->mva_size, &sc->sc_ioh)) {
385 		aprint_error_dev(self, "Cannot map registers\n");
386 		return;
387 	}
388 
389 	/* DMA */
390 	sc->sc_dmat = mva->mva_dmat;
391 	if (mvxpsec_init_dma(sc, mva) < 0)
392 		return;
393 
394 	/* SRAM */
395 	if (mvxpsec_init_sram(sc) < 0)
396 		return;
397 
398 	/* Registers */
399 	mvxpsec_wininit(sc, mva->mva_tags);
400 
401 	/* INTR */
402 	MVXPSEC_WRITE(sc, MVXPSEC_INT_MASK, MVXPSEC_DEFAULT_INT);
403 	MVXPSEC_WRITE(sc, MV_TDMA_ERR_MASK, MVXPSEC_DEFAULT_ERR);
404 	sc->sc_done_ih =
405 	    marvell_intr_establish(mva->mva_irq, IPL_NET, mvxpsec_intr, sc);
406 	/* XXX: should pass error IRQ using mva */
407 	sc->sc_error_ih = marvell_intr_establish(MVXPSEC_ERR_INT(sc),
408 	    IPL_NET, mvxpsec_eintr, sc);
409 	aprint_normal_dev(self,
410 	    "Error Reporting IRQ %d\n", MVXPSEC_ERR_INT(sc));
411 
412 	/* Initialize TDMA (It's enabled here, but waiting for SA) */
413 	if (mvxpsec_dma_wait(sc) < 0)
414 		panic("%s: DMA DEVICE not responding\n", __func__);
415 	MVXPSEC_WRITE(sc, MV_TDMA_CNT, 0);
416 	MVXPSEC_WRITE(sc, MV_TDMA_SRC, 0);
417 	MVXPSEC_WRITE(sc, MV_TDMA_DST, 0);
418 	MVXPSEC_WRITE(sc, MV_TDMA_NXT, 0);
419 	MVXPSEC_WRITE(sc, MV_TDMA_CUR, 0);
420 	v  = MVXPSEC_READ(sc, MV_TDMA_CONTROL);
421 	v |= MV_TDMA_CONTROL_ENABLE;
422 	MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, v);
423 
424 	/* Initialize SA */
425 	if (mvxpsec_acc_wait(sc) < 0)
426 		panic("%s: MVXPSEC not responding\n", __func__);
427 	v  = MVXPSEC_READ(sc, MV_ACC_CONFIG);
428 	v &= ~MV_ACC_CONFIG_STOP_ON_ERR;
429 	v |= MV_ACC_CONFIG_MULT_PKT;
430 	v |= MV_ACC_CONFIG_WAIT_TDMA;
431 	v |= MV_ACC_CONFIG_ACT_TDMA;
432 	MVXPSEC_WRITE(sc, MV_ACC_CONFIG, v);
433 	MVXPSEC_WRITE(sc, MV_ACC_DESC, 0);
434 	MVXPSEC_WRITE(sc, MV_ACC_COMMAND, MV_ACC_COMMAND_STOP);
435 
436 	/* Session */
437 	sc->sc_session_pool =
438 	    pool_cache_init(sizeof(struct mvxpsec_session), 0, 0, 0,
439 	    "mvxpsecpl", NULL, IPL_NET,
440 	    mvxpsec_session_ctor, mvxpsec_session_dtor, sc);
441 	pool_cache_sethiwat(sc->sc_session_pool, MVXPSEC_MAX_SESSIONS);
442 	pool_cache_setlowat(sc->sc_session_pool, MVXPSEC_MAX_SESSIONS / 2);
443 	sc->sc_last_session = NULL;
444 
445 	/* Pakcet */
446 	sc->sc_packet_pool =
447 	    pool_cache_init(sizeof(struct mvxpsec_session), 0, 0, 0,
448 	    "mvxpsec_pktpl", NULL, IPL_NET,
449 	    mvxpsec_packet_ctor, mvxpsec_packet_dtor, sc);
450 	pool_cache_sethiwat(sc->sc_packet_pool, MVXPSEC_MAX_SESSIONS);
451 	pool_cache_setlowat(sc->sc_packet_pool, MVXPSEC_MAX_SESSIONS / 2);
452 
453 	/* Register to EVCNT framework */
454 	mvxpsec_evcnt_attach(sc);
455 
456 	/* Register to Opencrypto */
457 	for (i = 0; i < MVXPSEC_MAX_SESSIONS; i++) {
458 		sc->sc_sessions[i] = NULL;
459 	}
460 	if (mvxpsec_register(sc))
461 		panic("cannot initialize OpenCrypto module.\n");
462 
463 	return;
464 }
465 
466 STATIC void
467 mvxpsec_evcnt_attach(struct mvxpsec_softc *sc)
468 {
469 	struct mvxpsec_evcnt *sc_ev = &sc->sc_ev;
470 
471 	evcnt_attach_dynamic(&sc_ev->intr_all, EVCNT_TYPE_INTR,
472 	    NULL, device_xname(sc->sc_dev), "Main Intr.");
473 	evcnt_attach_dynamic(&sc_ev->intr_auth, EVCNT_TYPE_INTR,
474 	    NULL, device_xname(sc->sc_dev), "Auth Intr.");
475 	evcnt_attach_dynamic(&sc_ev->intr_des, EVCNT_TYPE_INTR,
476 	    NULL, device_xname(sc->sc_dev), "DES Intr.");
477 	evcnt_attach_dynamic(&sc_ev->intr_aes_enc, EVCNT_TYPE_INTR,
478 	    NULL, device_xname(sc->sc_dev), "AES-Encrypt Intr.");
479 	evcnt_attach_dynamic(&sc_ev->intr_aes_dec, EVCNT_TYPE_INTR,
480 	    NULL, device_xname(sc->sc_dev), "AES-Decrypt Intr.");
481 	evcnt_attach_dynamic(&sc_ev->intr_enc, EVCNT_TYPE_INTR,
482 	    NULL, device_xname(sc->sc_dev), "Crypto Intr.");
483 	evcnt_attach_dynamic(&sc_ev->intr_sa, EVCNT_TYPE_INTR,
484 	    NULL, device_xname(sc->sc_dev), "SA Intr.");
485 	evcnt_attach_dynamic(&sc_ev->intr_acctdma, EVCNT_TYPE_INTR,
486 	    NULL, device_xname(sc->sc_dev), "AccTDMA Intr.");
487 	evcnt_attach_dynamic(&sc_ev->intr_comp, EVCNT_TYPE_INTR,
488 	    NULL, device_xname(sc->sc_dev), "TDMA-Complete Intr.");
489 	evcnt_attach_dynamic(&sc_ev->intr_own, EVCNT_TYPE_INTR,
490 	    NULL, device_xname(sc->sc_dev), "TDMA-Ownership Intr.");
491 	evcnt_attach_dynamic(&sc_ev->intr_acctdma_cont, EVCNT_TYPE_INTR,
492 	    NULL, device_xname(sc->sc_dev), "AccTDMA-Continue Intr.");
493 
494 	evcnt_attach_dynamic(&sc_ev->session_new, EVCNT_TYPE_MISC,
495 	    NULL, device_xname(sc->sc_dev), "New-Session");
496 	evcnt_attach_dynamic(&sc_ev->session_free, EVCNT_TYPE_MISC,
497 	    NULL, device_xname(sc->sc_dev), "Free-Session");
498 
499 	evcnt_attach_dynamic(&sc_ev->packet_ok, EVCNT_TYPE_MISC,
500 	    NULL, device_xname(sc->sc_dev), "Packet-OK");
501 	evcnt_attach_dynamic(&sc_ev->packet_err, EVCNT_TYPE_MISC,
502 	    NULL, device_xname(sc->sc_dev), "Packet-ERR");
503 
504 	evcnt_attach_dynamic(&sc_ev->dispatch_packets, EVCNT_TYPE_MISC,
505 	    NULL, device_xname(sc->sc_dev), "Packet-Dispatch");
506 	evcnt_attach_dynamic(&sc_ev->dispatch_queue, EVCNT_TYPE_MISC,
507 	    NULL, device_xname(sc->sc_dev), "Queue-Dispatch");
508 	evcnt_attach_dynamic(&sc_ev->queue_full, EVCNT_TYPE_MISC,
509 	    NULL, device_xname(sc->sc_dev), "Queue-Full");
510 	evcnt_attach_dynamic(&sc_ev->max_dispatch, EVCNT_TYPE_MISC,
511 	    NULL, device_xname(sc->sc_dev), "Max-Dispatch");
512 	evcnt_attach_dynamic(&sc_ev->max_done, EVCNT_TYPE_MISC,
513 	    NULL, device_xname(sc->sc_dev), "Max-Done");
514 }
515 
516 /*
517  * Register setup
518  */
519 STATIC int mvxpsec_wininit(struct mvxpsec_softc *sc, enum marvell_tags *tags)
520 {
521 	device_t pdev = device_parent(sc->sc_dev);
522 	uint64_t base;
523 	uint32_t size, reg;
524 	int window, target, attr, rv, i;
525 
526 	/* disable all window */
527 	for (window = 0; window < MV_TDMA_NWINDOW; window++)
528 	{
529 		MVXPSEC_WRITE(sc, MV_TDMA_BAR(window), 0);
530 		MVXPSEC_WRITE(sc, MV_TDMA_ATTR(window), 0);
531 	}
532 
533 	for (window = 0, i = 0;
534 	    tags[i] != MARVELL_TAG_UNDEFINED && window < MV_TDMA_NWINDOW; i++) {
535 		rv = marvell_winparams_by_tag(pdev, tags[i],
536 		    &target, &attr, &base, &size);
537 		if (rv != 0 || size == 0)
538 			continue;
539 
540 		if (base > 0xffffffffULL) {
541 			aprint_error_dev(sc->sc_dev,
542 			    "can't remap window %d\n", window);
543 			continue;
544 		}
545 
546 		reg  = MV_TDMA_BAR_BASE(base);
547 		MVXPSEC_WRITE(sc, MV_TDMA_BAR(window), reg);
548 
549 		reg  = MV_TDMA_ATTR_TARGET(target);
550 		reg |= MV_TDMA_ATTR_ATTR(attr);
551 		reg |= MV_TDMA_ATTR_SIZE(size);
552 		reg |= MV_TDMA_ATTR_ENABLE;
553 		MVXPSEC_WRITE(sc, MV_TDMA_ATTR(window), reg);
554 
555 		window++;
556 	}
557 
558 	return 0;
559 }
560 
561 /*
562  * Timer handling
563  */
564 STATIC void
565 mvxpsec_timer(void *aux)
566 {
567 	struct mvxpsec_softc *sc = aux;
568 	struct mvxpsec_packet *mv_p;
569 	uint32_t reg;
570 	int ndone;
571 	int refill;
572 	int s;
573 
574 	/* IPL_SOFTCLOCK */
575 
576 	log(LOG_ERR, "%s: device timeout.\n", __func__);
577 #ifdef MVXPSEC_DEBUG
578 	mvxpsec_dump_reg(sc);
579 #endif
580 
581 	s = splnet();
582 	/* stop security accelerator */
583 	MVXPSEC_WRITE(sc, MV_ACC_COMMAND, MV_ACC_COMMAND_STOP);
584 
585 	/* stop TDMA */
586 	MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, 0);
587 
588 	/* cleanup packet queue */
589 	mutex_enter(&sc->sc_queue_mtx);
590 	ndone = 0;
591 	while ( (mv_p = SIMPLEQ_FIRST(&sc->sc_run_queue)) != NULL) {
592 		SIMPLEQ_REMOVE_HEAD(&sc->sc_run_queue, queue);
593 
594 		mv_p->crp->crp_etype = EINVAL;
595 		mvxpsec_done_packet(mv_p);
596 		ndone++;
597 	}
598 	MVXPSEC_EVCNT_MAX(sc, max_done, ndone);
599 	sc->sc_flags &= ~HW_RUNNING;
600 	refill = (sc->sc_wait_qlen > 0) ? 1 : 0;
601 	mutex_exit(&sc->sc_queue_mtx);
602 
603 	/* reenable TDMA */
604 	if (mvxpsec_dma_wait(sc) < 0)
605 		panic("%s: failed to reset DMA DEVICE. give up.", __func__);
606 	MVXPSEC_WRITE(sc, MV_TDMA_CNT, 0);
607 	MVXPSEC_WRITE(sc, MV_TDMA_SRC, 0);
608 	MVXPSEC_WRITE(sc, MV_TDMA_DST, 0);
609 	MVXPSEC_WRITE(sc, MV_TDMA_CUR, 0);
610 	MVXPSEC_WRITE(sc, MV_TDMA_NXT, 0);
611 	reg  = MV_TDMA_DEFAULT_CONTROL;
612 	reg |= MV_TDMA_CONTROL_ENABLE;
613 	MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, reg);
614 
615 	if (mvxpsec_acc_wait(sc) < 0)
616 		panic("%s: failed to reset MVXPSEC. give up.", __func__);
617 	reg  = MV_ACC_CONFIG_MULT_PKT;
618 	reg |= MV_ACC_CONFIG_WAIT_TDMA;
619 	reg |= MV_ACC_CONFIG_ACT_TDMA;
620 	MVXPSEC_WRITE(sc, MV_ACC_CONFIG, reg);
621 	MVXPSEC_WRITE(sc, MV_ACC_DESC, 0);
622 
623 	if (refill) {
624 		mutex_enter(&sc->sc_queue_mtx);
625 		mvxpsec_dispatch_queue(sc);
626 		mutex_exit(&sc->sc_queue_mtx);
627 	}
628 
629 	crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
630 	splx(s);
631 }
632 
633 /*
634  * DMA handling
635  */
636 
637 /*
638  * Allocate kernel devmem and DMA safe memory with bus_dma API
639  * used for DMA descriptors.
640  *
641  * if phys != 0, assume phys is a DMA safe memory and bypass
642  * allocator.
643  */
644 STATIC struct mvxpsec_devmem *
645 mvxpsec_alloc_devmem(struct mvxpsec_softc *sc, paddr_t phys, int size)
646 {
647 	struct mvxpsec_devmem *devmem;
648 	bus_dma_segment_t seg;
649 	int rseg;
650 	int err;
651 
652 	if (sc == NULL)
653 		return NULL;
654 
655 	devmem = kmem_alloc(sizeof(*devmem), KM_SLEEP);
656 	devmem->size = size;
657 
658 	if (phys) {
659 		seg.ds_addr = phys;
660 		seg.ds_len = devmem->size;
661 		rseg = 1;
662 		err = 0;
663 	}
664 	else {
665 		err = bus_dmamem_alloc(sc->sc_dmat,
666 		    devmem->size, PAGE_SIZE, 0,
667 		    &seg, MVXPSEC_DMA_MAX_SEGS, &rseg, BUS_DMA_NOWAIT);
668 	}
669 	if (err) {
670 		aprint_error_dev(sc->sc_dev, "can't alloc DMA buffer\n");
671 		goto fail_kmem_free;
672 	}
673 
674 	err = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
675 	     devmem->size, &devmem->kva, BUS_DMA_NOWAIT);
676 	if (err) {
677 		aprint_error_dev(sc->sc_dev, "can't map DMA buffer\n");
678 		goto fail_dmamem_free;
679 	}
680 
681 	err = bus_dmamap_create(sc->sc_dmat,
682 	    size, 1, size, 0, BUS_DMA_NOWAIT, &devmem->map);
683 	if (err) {
684 		aprint_error_dev(sc->sc_dev, "can't create DMA map\n");
685 		goto fail_unmap;
686 	}
687 
688 	err = bus_dmamap_load(sc->sc_dmat,
689 	    devmem->map, devmem->kva, devmem->size, NULL,
690 	    BUS_DMA_NOWAIT);
691 	if (err) {
692 		aprint_error_dev(sc->sc_dev,
693 		   "can't load DMA buffer VA:%p PA:0x%08x\n",
694 		    devmem->kva, (int)seg.ds_addr);
695 		goto fail_destroy;
696 	}
697 
698 	return devmem;
699 
700 fail_destroy:
701 	bus_dmamap_destroy(sc->sc_dmat, devmem->map);
702 fail_unmap:
703 	bus_dmamem_unmap(sc->sc_dmat, devmem->kva, devmem->size);
704 fail_dmamem_free:
705 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
706 fail_kmem_free:
707 	kmem_free(devmem, sizeof(*devmem));
708 
709 	return NULL;
710 }
711 
712 /*
713  * Get DMA Descriptor from (DMA safe) descriptor pool.
714  */
715 INLINE struct mvxpsec_descriptor_handle *
716 mvxpsec_dma_getdesc(struct mvxpsec_softc *sc)
717 {
718 	struct mvxpsec_descriptor_handle *entry;
719 
720 	/* must called with sc->sc_dma_mtx held */
721 	KASSERT(mutex_owned(&sc->sc_dma_mtx));
722 
723 	if (sc->sc_desc_ring_prod == sc->sc_desc_ring_cons)
724 		return NULL;
725 
726 	entry = &sc->sc_desc_ring[sc->sc_desc_ring_prod];
727 	sc->sc_desc_ring_prod++;
728 	if (sc->sc_desc_ring_prod >= sc->sc_desc_ring_size)
729 		sc->sc_desc_ring_prod -= sc->sc_desc_ring_size;
730 
731 	return entry;
732 }
733 
734 /*
735  * Put DMA Descriptor to descriptor pool.
736  */
737 _INLINE void
738 mvxpsec_dma_putdesc(struct mvxpsec_softc *sc,
739     struct mvxpsec_descriptor_handle *dh)
740 {
741 	/* must called with sc->sc_dma_mtx held */
742 	KASSERT(mutex_owned(&sc->sc_dma_mtx));
743 
744 	sc->sc_desc_ring_cons++;
745 	if (sc->sc_desc_ring_cons >= sc->sc_desc_ring_size)
746 		sc->sc_desc_ring_cons -= sc->sc_desc_ring_size;
747 
748 	return;
749 }
750 
751 /*
752  * Setup DMA Descriptor
753  * copy from 'src' to 'dst' by 'size' bytes.
754  * 'src' or 'dst' must be SRAM address.
755  */
756 INLINE void
757 mvxpsec_dma_setup(struct mvxpsec_descriptor_handle *dh,
758     uint32_t dst, uint32_t src, uint32_t size)
759 {
760 	struct mvxpsec_descriptor *desc;
761 
762 	desc = (struct mvxpsec_descriptor *)dh->_desc;
763 
764 	desc->tdma_dst = dst;
765 	desc->tdma_src = src;
766 	desc->tdma_word0 = size;
767 	if (size != 0)
768 		desc->tdma_word0 |= MV_TDMA_CNT_OWN;
769 	/* size == 0 is owned by ACC, not TDMA */
770 
771 #ifdef MVXPSEC_DEBUG
772 	mvxpsec_dump_dmaq(dh);
773 #endif
774 }
775 
776 /*
777  * Concat 2 DMA
778  */
779 INLINE void
780 mvxpsec_dma_cat(struct mvxpsec_softc *sc,
781     struct mvxpsec_descriptor_handle *dh1,
782     struct mvxpsec_descriptor_handle *dh2)
783 {
784 	((struct mvxpsec_descriptor*)dh1->_desc)->tdma_nxt = dh2->phys_addr;
785 	MVXPSEC_SYNC_DESC(sc, dh1, BUS_DMASYNC_PREWRITE);
786 }
787 
788 /*
789  * Schedule DMA Copy
790  */
791 INLINE int
792 mvxpsec_dma_copy0(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r,
793     uint32_t dst, uint32_t src, uint32_t size)
794 {
795 	struct mvxpsec_descriptor_handle *dh;
796 
797 	dh = mvxpsec_dma_getdesc(sc);
798 	if (dh == NULL) {
799 		log(LOG_ERR, "%s: descriptor full\n", __func__);
800 		return -1;
801 	}
802 
803 	mvxpsec_dma_setup(dh, dst, src, size);
804 	if (r->dma_head == NULL) {
805 		r->dma_head = dh;
806 		r->dma_last = dh;
807 		r->dma_size = 1;
808 	}
809 	else {
810 		mvxpsec_dma_cat(sc, r->dma_last, dh);
811 		r->dma_last = dh;
812 		r->dma_size++;
813 	}
814 
815 	return 0;
816 }
817 
818 INLINE int
819 mvxpsec_dma_copy(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r,
820     uint32_t dst, uint32_t src, uint32_t size)
821 {
822 	if (size == 0) /* 0 is very special descriptor */
823 		return 0;
824 
825 	return mvxpsec_dma_copy0(sc, r, dst, src, size);
826 }
827 
828 /*
829  * Schedule ACC Activate
830  */
831 INLINE int
832 mvxpsec_dma_acc_activate(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r)
833 {
834 	return mvxpsec_dma_copy0(sc, r, 0, 0, 0);
835 }
836 
837 /*
838  * Finalize DMA setup
839  */
840 INLINE void
841 mvxpsec_dma_finalize(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r)
842 {
843 	struct mvxpsec_descriptor_handle *dh;
844 
845 	dh = r->dma_last;
846 	((struct mvxpsec_descriptor*)dh->_desc)->tdma_nxt = 0;
847 	MVXPSEC_SYNC_DESC(sc, dh, BUS_DMASYNC_PREWRITE);
848 }
849 
850 /*
851  * Free entire DMA ring
852  */
853 INLINE void
854 mvxpsec_dma_free(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r)
855 {
856 	sc->sc_desc_ring_cons += r->dma_size;
857 	if (sc->sc_desc_ring_cons >= sc->sc_desc_ring_size)
858 		sc->sc_desc_ring_cons -= sc->sc_desc_ring_size;
859 	r->dma_head = NULL;
860 	r->dma_last = NULL;
861 	r->dma_size = 0;
862 }
863 
864 /*
865  * create DMA descriptor chain for the packet
866  */
867 INLINE int
868 mvxpsec_dma_copy_packet(struct mvxpsec_softc *sc, struct mvxpsec_packet *mv_p)
869 {
870 	struct mvxpsec_session *mv_s = mv_p->mv_s;
871 	uint32_t src, dst, len;
872 	uint32_t pkt_off, pkt_off_r;
873 	int err;
874 	int i;
875 
876 	/* must called with sc->sc_dma_mtx held */
877 	KASSERT(mutex_owned(&sc->sc_dma_mtx));
878 
879 	/*
880 	 * set offset for mem->device copy
881 	 *
882 	 * typical packet image:
883 	 *
884 	 *   enc_ivoff
885 	 *   mac_off
886 	 *   |
887 	 *   |    enc_off
888 	 *   |    |
889 	 *   v    v
890 	 *   +----+--------...
891 	 *   |IV  |DATA
892 	 *   +----+--------...
893 	 */
894 	pkt_off = 0;
895 	if (mv_p->mac_off > 0)
896 		pkt_off = mv_p->mac_off;
897 	if ((mv_p->flags & CRP_EXT_IV) == 0 && pkt_off > mv_p->enc_ivoff)
898 		pkt_off = mv_p->enc_ivoff;
899 	if (mv_p->enc_off > 0 && pkt_off > mv_p->enc_off)
900 		pkt_off = mv_p->enc_off;
901 	pkt_off_r = pkt_off;
902 
903 	/* make DMA descriptors to copy packet header: DRAM -> SRAM */
904 	dst = (uint32_t)MVXPSEC_SRAM_PKT_HDR_PA(sc);
905 	src = (uint32_t)mv_p->pkt_header_map->dm_segs[0].ds_addr;
906 	len = sizeof(mv_p->pkt_header);
907 	err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
908 	if (__predict_false(err))
909 		return err;
910 
911 	/*
912 	 * make DMA descriptors to copy session header: DRAM -> SRAM
913 	 * we can reuse session header on SRAM if session is not changed.
914 	 */
915 	if (sc->sc_last_session != mv_s) {
916 		dst = (uint32_t)MVXPSEC_SRAM_SESS_HDR_PA(sc);
917 		src = (uint32_t)mv_s->session_header_map->dm_segs[0].ds_addr;
918 		len = sizeof(mv_s->session_header);
919 		err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
920 		if (__predict_false(err))
921 			return err;
922 		sc->sc_last_session = mv_s;
923 	}
924 
925 	/* make DMA descriptor to copy payload data: DRAM -> SRAM */
926 	dst = MVXPSEC_SRAM_PAYLOAD_PA(sc, 0);
927 	for (i = 0; i < mv_p->data_map->dm_nsegs; i++) {
928 		src = mv_p->data_map->dm_segs[i].ds_addr;
929 		len = mv_p->data_map->dm_segs[i].ds_len;
930 		if (pkt_off) {
931 			if (len <= pkt_off) {
932 				/* ignore the segment */
933 				dst += len;
934 				pkt_off -= len;
935 				continue;
936 			}
937 			/* copy from the middle of the segment */
938 			dst += pkt_off;
939 			src += pkt_off;
940 			len -= pkt_off;
941 			pkt_off = 0;
942 		}
943 		err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
944 		if (__predict_false(err))
945 			return err;
946 		dst += len;
947 	}
948 
949 	/* make special descriptor to activate security accelerator */
950 	err = mvxpsec_dma_acc_activate(sc, &mv_p->dma_ring);
951 	if (__predict_false(err))
952 		return err;
953 
954 	/* make DMA descriptors to copy payload: SRAM -> DRAM */
955 	src = (uint32_t)MVXPSEC_SRAM_PAYLOAD_PA(sc, 0);
956 	for (i = 0; i < mv_p->data_map->dm_nsegs; i++) {
957 		dst = (uint32_t)mv_p->data_map->dm_segs[i].ds_addr;
958 		len = (uint32_t)mv_p->data_map->dm_segs[i].ds_len;
959 		if (pkt_off_r) {
960 			if (len <= pkt_off_r) {
961 				/* ignore the segment */
962 				src += len;
963 				pkt_off_r -= len;
964 				continue;
965 			}
966 			/* copy from the middle of the segment */
967 			src += pkt_off_r;
968 			dst += pkt_off_r;
969 			len -= pkt_off_r;
970 			pkt_off_r = 0;
971 		}
972 		err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
973 		if (__predict_false(err))
974 			return err;
975 		src += len;
976 	}
977 	KASSERT(pkt_off == 0);
978 	KASSERT(pkt_off_r == 0);
979 
980 	/*
981 	 * make DMA descriptors to copy packet header: SRAM->DRAM
982 	 * if IV is present in the payload, no need to copy.
983 	 */
984 	if (mv_p->flags & CRP_EXT_IV) {
985 		dst = (uint32_t)mv_p->pkt_header_map->dm_segs[0].ds_addr;
986 		src = (uint32_t)MVXPSEC_SRAM_PKT_HDR_PA(sc);
987 		len = sizeof(mv_p->pkt_header);
988 		err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
989 		if (__predict_false(err))
990 			return err;
991 	}
992 
993 	return 0;
994 }
995 
996 INLINE int
997 mvxpsec_dma_sync_packet(struct mvxpsec_softc *sc, struct mvxpsec_packet *mv_p)
998 {
999 	/* sync packet header */
1000 	bus_dmamap_sync(sc->sc_dmat,
1001 	    mv_p->pkt_header_map, 0, sizeof(mv_p->pkt_header),
1002 	    BUS_DMASYNC_PREWRITE);
1003 
1004 #ifdef MVXPSEC_DEBUG
1005 	/* sync session header */
1006 	if (mvxpsec_debug != 0) {
1007 		struct mvxpsec_session *mv_s = mv_p->mv_s;
1008 
1009 		/* only debug code touch the session header after newsession */
1010 		bus_dmamap_sync(sc->sc_dmat,
1011 		    mv_s->session_header_map,
1012 		    0, sizeof(mv_s->session_header),
1013 		    BUS_DMASYNC_PREWRITE);
1014 	}
1015 #endif
1016 
1017 	/* sync packet buffer */
1018 	bus_dmamap_sync(sc->sc_dmat,
1019 	    mv_p->data_map, 0, mv_p->data_len,
1020 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1021 
1022 	return 0;
1023 }
1024 
1025 /*
1026  * Initialize MVXPSEC Internal SRAM
1027  *
1028  * - must be called after DMA initizlization.
1029  * - make VM mapping for SRAM area on MBus.
1030  */
1031 STATIC int
1032 mvxpsec_init_sram(struct mvxpsec_softc *sc)
1033 {
1034 	uint32_t tag, target, attr, base, size;
1035 	vaddr_t va;
1036 	int window;
1037 
1038 	switch (device_unit(sc->sc_dev)) {
1039 	case 0:
1040 		tag = ARMADAXP_TAG_CRYPT0;
1041 		break;
1042 	case 1:
1043 		tag = ARMADAXP_TAG_CRYPT1;
1044 		break;
1045 	default:
1046 		aprint_error_dev(sc->sc_dev, "no internal SRAM mapping\n");
1047 		return -1;
1048 	}
1049 
1050 	window = mvsoc_target(tag, &target, &attr, &base, &size);
1051 	if (window >= nwindow) {
1052 		aprint_error_dev(sc->sc_dev, "no internal SRAM mapping\n");
1053 		return -1;
1054 	}
1055 
1056 	if (sizeof(struct mvxpsec_crypt_sram) > size) {
1057 		aprint_error_dev(sc->sc_dev,
1058 		    "SRAM Data Structure Excceeds SRAM window size.\n");
1059 		return -1;
1060 	}
1061 
1062 	aprint_normal_dev(sc->sc_dev,
1063 	    "internal SRAM window at 0x%08x-0x%08x",
1064 	    base, base + size - 1);
1065 	sc->sc_sram_pa = base;
1066 
1067 	/* get vmspace to read/write device internal SRAM */
1068 	va = uvm_km_alloc(kernel_map, PAGE_SIZE, PAGE_SIZE,
1069 			UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
1070 	if (va == 0) {
1071 		aprint_error_dev(sc->sc_dev, "cannot map SRAM window\n");
1072 		sc->sc_sram_va = NULL;
1073 		aprint_normal("\n");
1074 		return 0;
1075 	}
1076 	/* XXX: not working. PMAP_NOCACHE is not affected? */
1077 	pmap_kenter_pa(va, base, VM_PROT_READ|VM_PROT_WRITE, PMAP_NOCACHE);
1078 	pmap_update(pmap_kernel());
1079 	sc->sc_sram_va = (void *)va;
1080 	aprint_normal(" va %p\n", sc->sc_sram_va);
1081 	memset(sc->sc_sram_va, 0xff, MV_ACC_SRAM_SIZE);
1082 
1083 	return 0;
1084 }
1085 
1086 /*
1087  * Initialize TDMA engine.
1088  */
1089 STATIC int
1090 mvxpsec_init_dma(struct mvxpsec_softc *sc, struct marvell_attach_args *mva)
1091 {
1092 	struct mvxpsec_descriptor_handle *dh;
1093 	uint8_t *va;
1094 	paddr_t pa;
1095 	off_t va_off, pa_off;
1096 	int i, n, seg, ndh;
1097 
1098 	/* Init Deviced's control parameters (disabled yet) */
1099 	MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, MV_TDMA_DEFAULT_CONTROL);
1100 
1101 	/* Init Software DMA Handlers */
1102 	sc->sc_devmem_desc =
1103 	    mvxpsec_alloc_devmem(sc, 0, PAGE_SIZE * MVXPSEC_DMA_DESC_PAGES);
1104 	ndh = (PAGE_SIZE / sizeof(struct mvxpsec_descriptor))
1105 	    * MVXPSEC_DMA_DESC_PAGES;
1106 	sc->sc_desc_ring =
1107 	    kmem_alloc(sizeof(struct mvxpsec_descriptor_handle) * ndh,
1108 	        KM_SLEEP);
1109 	aprint_normal_dev(sc->sc_dev, "%d DMA handles in %zu bytes array\n",
1110 	    ndh, sizeof(struct mvxpsec_descriptor_handle) * ndh);
1111 
1112 	ndh = 0;
1113 	for (seg = 0; seg < devmem_nseg(sc->sc_devmem_desc); seg++) {
1114 		va = devmem_va(sc->sc_devmem_desc);
1115 		pa = devmem_pa(sc->sc_devmem_desc, seg);
1116 		n = devmem_palen(sc->sc_devmem_desc, seg) /
1117 		       	sizeof(struct mvxpsec_descriptor);
1118 		va_off = (PAGE_SIZE * seg);
1119 		pa_off = 0;
1120 		for (i = 0; i < n; i++) {
1121 			dh = &sc->sc_desc_ring[ndh];
1122 			dh->map = devmem_map(sc->sc_devmem_desc);
1123 			dh->off = va_off + pa_off;
1124 			dh->_desc = (void *)(va + va_off + pa_off);
1125 			dh->phys_addr = pa + pa_off;
1126 			pa_off += sizeof(struct mvxpsec_descriptor);
1127 			ndh++;
1128 		}
1129 	}
1130 	sc->sc_desc_ring_size = ndh;
1131 	sc->sc_desc_ring_prod = 0;
1132 	sc->sc_desc_ring_cons = sc->sc_desc_ring_size - 1;
1133 
1134 	return 0;
1135 }
1136 
1137 /*
1138  * Wait for TDMA controller become idle
1139  */
1140 INLINE int
1141 mvxpsec_dma_wait(struct mvxpsec_softc *sc)
1142 {
1143 	int retry = 0;
1144 
1145 	while (MVXPSEC_READ(sc, MV_TDMA_CONTROL) & MV_TDMA_CONTROL_ACT) {
1146 		delay(mvxpsec_wait_interval);
1147 		if (retry++ >= mvxpsec_wait_retry)
1148 			return -1;
1149 	}
1150 	return 0;
1151 }
1152 
1153 /*
1154  * Wait for Security Accelerator become idle
1155  */
1156 INLINE int
1157 mvxpsec_acc_wait(struct mvxpsec_softc *sc)
1158 {
1159 	int retry = 0;
1160 
1161 	while (MVXPSEC_READ(sc, MV_ACC_COMMAND) & MV_ACC_COMMAND_ACT) {
1162 		delay(mvxpsec_wait_interval);
1163 		if (++retry >= mvxpsec_wait_retry)
1164 			return -1;
1165 	}
1166 	return 0;
1167 }
1168 
1169 /*
1170  * Entry of interrupt handler
1171  *
1172  * register this to kernel via marvell_intr_establish()
1173  */
1174 int
1175 mvxpsec_intr(void *arg)
1176 {
1177 	struct mvxpsec_softc *sc = arg;
1178 	uint32_t v;
1179 
1180 	/* IPL_NET */
1181 	while ((v = mvxpsec_intr_ack(sc)) != 0) {
1182 		mvxpsec_intr_cnt(sc, v);
1183 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "MVXPSEC Intr 0x%08x\n", v);
1184 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "%s\n", s_xpsecintr(v));
1185 #ifdef MVXPSEC_DEBUG
1186 		mvxpsec_dump_reg(sc);
1187 #endif
1188 
1189 		/* call high-level handlers */
1190 		if (v & MVXPSEC_INT_ACCTDMA)
1191 			mvxpsec_done(sc);
1192 	}
1193 
1194 	return 0;
1195 }
1196 
1197 INLINE void
1198 mvxpsec_intr_cleanup(struct mvxpsec_softc *sc)
1199 {
1200 	struct mvxpsec_packet *mv_p;
1201 
1202 	/* must called with sc->sc_dma_mtx held */
1203 	KASSERT(mutex_owned(&sc->sc_dma_mtx));
1204 
1205 	/*
1206 	 * there is only one intr for run_queue.
1207 	 * no one touch sc_run_queue.
1208 	 */
1209 	SIMPLEQ_FOREACH(mv_p, &sc->sc_run_queue, queue)
1210 		mvxpsec_dma_free(sc, &mv_p->dma_ring);
1211 }
1212 
1213 /*
1214  * Acknowledge to interrupt
1215  *
1216  * read cause bits, clear it, and return it.
1217  * NOTE: multiple cause bits may be returned at once.
1218  */
1219 STATIC uint32_t
1220 mvxpsec_intr_ack(struct mvxpsec_softc *sc)
1221 {
1222 	uint32_t reg;
1223 
1224 	reg  = MVXPSEC_READ(sc, MVXPSEC_INT_CAUSE);
1225 	reg &= MVXPSEC_DEFAULT_INT;
1226 	MVXPSEC_WRITE(sc, MVXPSEC_INT_CAUSE, ~reg);
1227 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "Int: %s\n", s_xpsecintr(reg));
1228 
1229 	return reg;
1230 }
1231 
1232 /*
1233  * Entry of TDMA error interrupt handler
1234  *
1235  * register this to kernel via marvell_intr_establish()
1236  */
1237 int
1238 mvxpsec_eintr(void *arg)
1239 {
1240 	struct mvxpsec_softc *sc = arg;
1241 	uint32_t err;
1242 
1243 	/* IPL_NET */
1244 again:
1245 	err = mvxpsec_eintr_ack(sc);
1246 	if (err == 0)
1247 		goto done;
1248 
1249 	log(LOG_ERR, "%s: DMA Error Interrupt: %s\n", __func__,
1250 	    s_errreg(err));
1251 #ifdef MVXPSEC_DEBUG
1252 	mvxpsec_dump_reg(sc);
1253 #endif
1254 
1255 	goto again;
1256 done:
1257 	return 0;
1258 }
1259 
1260 /*
1261  * Acknowledge to TDMA error interrupt
1262  *
1263  * read cause bits, clear it, and return it.
1264  * NOTE: multiple cause bits may be returned at once.
1265  */
1266 STATIC uint32_t
1267 mvxpsec_eintr_ack(struct mvxpsec_softc *sc)
1268 {
1269 	uint32_t reg;
1270 
1271 	reg  = MVXPSEC_READ(sc, MV_TDMA_ERR_CAUSE);
1272 	reg &= MVXPSEC_DEFAULT_ERR;
1273 	MVXPSEC_WRITE(sc, MV_TDMA_ERR_CAUSE, ~reg);
1274 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "Int: %s\n", s_xpsecintr(reg));
1275 
1276 	return reg;
1277 }
1278 
1279 /*
1280  * Interrupt statistics
1281  *
1282  * this is NOT a statistics of how many times the events 'occurred'.
1283  * this ONLY means how many times the events 'handled'.
1284  */
1285 INLINE void
1286 mvxpsec_intr_cnt(struct mvxpsec_softc *sc, int cause)
1287 {
1288 	MVXPSEC_EVCNT_INCR(sc, intr_all);
1289 	if (cause & MVXPSEC_INT_AUTH)
1290 		MVXPSEC_EVCNT_INCR(sc, intr_auth);
1291 	if (cause & MVXPSEC_INT_DES)
1292 		MVXPSEC_EVCNT_INCR(sc, intr_des);
1293 	if (cause & MVXPSEC_INT_AES_ENC)
1294 		MVXPSEC_EVCNT_INCR(sc, intr_aes_enc);
1295 	if (cause & MVXPSEC_INT_AES_DEC)
1296 		MVXPSEC_EVCNT_INCR(sc, intr_aes_dec);
1297 	if (cause & MVXPSEC_INT_ENC)
1298 		MVXPSEC_EVCNT_INCR(sc, intr_enc);
1299 	if (cause & MVXPSEC_INT_SA)
1300 		MVXPSEC_EVCNT_INCR(sc, intr_sa);
1301 	if (cause & MVXPSEC_INT_ACCTDMA)
1302 		MVXPSEC_EVCNT_INCR(sc, intr_acctdma);
1303 	if (cause & MVXPSEC_INT_TDMA_COMP)
1304 		MVXPSEC_EVCNT_INCR(sc, intr_comp);
1305 	if (cause & MVXPSEC_INT_TDMA_OWN)
1306 		MVXPSEC_EVCNT_INCR(sc, intr_own);
1307 	if (cause & MVXPSEC_INT_ACCTDMA_CONT)
1308 		MVXPSEC_EVCNT_INCR(sc, intr_acctdma_cont);
1309 }
1310 
1311 /*
1312  * Setup MVXPSEC header structure.
1313  *
1314  * the header contains descriptor of security accelerator,
1315  * key material of chiphers, iv of ciphers and macs, ...
1316  *
1317  * the header is transferred to MVXPSEC Internal SRAM by TDMA,
1318  * and parsed by MVXPSEC H/W.
1319  */
1320 STATIC int
1321 mvxpsec_header_finalize(struct mvxpsec_packet *mv_p)
1322 {
1323 	struct mvxpsec_acc_descriptor *desc = &mv_p->pkt_header.desc;
1324 	int enc_start, enc_len, iv_offset;
1325 	int mac_start, mac_len, mac_offset;
1326 
1327 	/* offset -> device address */
1328 	enc_start = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->enc_off);
1329 	enc_len = mv_p->enc_len;
1330 	if (mv_p->flags & CRP_EXT_IV)
1331 		iv_offset = mv_p->enc_ivoff;
1332 	else
1333 		iv_offset = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->enc_ivoff);
1334 	mac_start = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->mac_off);
1335 	mac_len = mv_p->mac_len;
1336 	mac_offset = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->mac_dst);
1337 
1338 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1339 	    "PAYLOAD at 0x%08x\n", (int)MVXPSEC_SRAM_PAYLOAD_OFF);
1340 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1341 	    "ENC from 0x%08x\n", enc_start);
1342 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1343 	    "MAC from 0x%08x\n", mac_start);
1344 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1345 	    "MAC to 0x%08x\n", mac_offset);
1346 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1347 	    "ENC IV at 0x%08x\n", iv_offset);
1348 
1349 	/* setup device addresses in Security Accelerator Descriptors */
1350 	desc->acc_encdata = MV_ACC_DESC_ENC_DATA(enc_start, enc_start);
1351 	desc->acc_enclen = MV_ACC_DESC_ENC_LEN(enc_len);
1352 	if (desc->acc_config & MV_ACC_CRYPTO_DECRYPT)
1353 		desc->acc_enckey =
1354 		    MV_ACC_DESC_ENC_KEY(MVXPSEC_SRAM_KEY_D_DA);
1355 	else
1356 		desc->acc_enckey =
1357 		    MV_ACC_DESC_ENC_KEY(MVXPSEC_SRAM_KEY_DA);
1358 	desc->acc_enciv =
1359 	    MV_ACC_DESC_ENC_IV(MVXPSEC_SRAM_IV_WORK_DA, iv_offset);
1360 
1361 	desc->acc_macsrc = MV_ACC_DESC_MAC_SRC(mac_start, mac_len);
1362 	desc->acc_macdst = MV_ACC_DESC_MAC_DST(mac_offset, mac_len);
1363 	desc->acc_maciv =
1364 	    MV_ACC_DESC_MAC_IV(MVXPSEC_SRAM_MIV_IN_DA,
1365 	        MVXPSEC_SRAM_MIV_OUT_DA);
1366 
1367 	return 0;
1368 }
1369 
1370 /*
1371  * constractor of session structure.
1372  *
1373  * this constrator will be called by pool_cache framework.
1374  */
1375 STATIC int
1376 mvxpsec_session_ctor(void *arg, void *obj, int flags)
1377 {
1378 	struct mvxpsec_softc *sc = arg;
1379 	struct mvxpsec_session *mv_s = obj;
1380 
1381 	/* pool is owned by softc */
1382 	mv_s->sc = sc;
1383 
1384 	/* Create and load DMA map for session header */
1385 	mv_s->session_header_map = 0;
1386 	if (bus_dmamap_create(sc->sc_dmat,
1387 	    sizeof(mv_s->session_header), 1,
1388 	    sizeof(mv_s->session_header), 0,
1389 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1390 	    &mv_s->session_header_map)) {
1391 		log(LOG_ERR, "%s: cannot create DMA map\n", __func__);
1392 		goto fail;
1393 	}
1394 	if (bus_dmamap_load(sc->sc_dmat, mv_s->session_header_map,
1395 	    &mv_s->session_header, sizeof(mv_s->session_header),
1396 	    NULL, BUS_DMA_NOWAIT)) {
1397 		log(LOG_ERR, "%s: cannot load header\n", __func__);
1398 		goto fail;
1399 	}
1400 
1401 	return 0;
1402 fail:
1403 	if (mv_s->session_header_map)
1404 		bus_dmamap_destroy(sc->sc_dmat, mv_s->session_header_map);
1405 	return ENOMEM;
1406 }
1407 
1408 /*
1409  * destractor of session structure.
1410  *
1411  * this destrator will be called by pool_cache framework.
1412  */
1413 STATIC void
1414 mvxpsec_session_dtor(void *arg, void *obj)
1415 {
1416 	struct mvxpsec_softc *sc = arg;
1417 	struct mvxpsec_session *mv_s = obj;
1418 
1419 	if (mv_s->sc != sc)
1420 		panic("inconsitent context\n");
1421 
1422 	bus_dmamap_destroy(sc->sc_dmat, mv_s->session_header_map);
1423 }
1424 
1425 /*
1426  * constructor of packet structure.
1427  */
1428 STATIC int
1429 mvxpsec_packet_ctor(void *arg, void *obj, int flags)
1430 {
1431 	struct mvxpsec_softc *sc = arg;
1432 	struct mvxpsec_packet *mv_p = obj;
1433 
1434 	mv_p->dma_ring.dma_head = NULL;
1435 	mv_p->dma_ring.dma_last = NULL;
1436 	mv_p->dma_ring.dma_size = 0;
1437 
1438 	/* Create and load DMA map for packet header */
1439 	mv_p->pkt_header_map = 0;
1440 	if (bus_dmamap_create(sc->sc_dmat,
1441 	    sizeof(mv_p->pkt_header), 1, sizeof(mv_p->pkt_header), 0,
1442 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1443 	    &mv_p->pkt_header_map)) {
1444 		log(LOG_ERR, "%s: cannot create DMA map\n", __func__);
1445 		goto fail;
1446 	}
1447 	if (bus_dmamap_load(sc->sc_dmat, mv_p->pkt_header_map,
1448 	    &mv_p->pkt_header, sizeof(mv_p->pkt_header),
1449 	    NULL, BUS_DMA_NOWAIT)) {
1450 		log(LOG_ERR, "%s: cannot load header\n", __func__);
1451 		goto fail;
1452 	}
1453 
1454 	/* Create DMA map for session data. */
1455 	mv_p->data_map = 0;
1456 	if (bus_dmamap_create(sc->sc_dmat,
1457 	    MVXPSEC_DMA_MAX_SIZE, MVXPSEC_DMA_MAX_SEGS, MVXPSEC_DMA_MAX_SIZE,
1458 	    0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mv_p->data_map)) {
1459 		log(LOG_ERR, "%s: cannot create DMA map\n", __func__);
1460 		goto fail;
1461 	}
1462 
1463 	return 0;
1464 fail:
1465 	if (mv_p->pkt_header_map)
1466 		bus_dmamap_destroy(sc->sc_dmat, mv_p->pkt_header_map);
1467 	if (mv_p->data_map)
1468 		bus_dmamap_destroy(sc->sc_dmat, mv_p->data_map);
1469 	return ENOMEM;
1470 }
1471 
1472 /*
1473  * destractor of packet structure.
1474  */
1475 STATIC void
1476 mvxpsec_packet_dtor(void *arg, void *obj)
1477 {
1478 	struct mvxpsec_softc *sc = arg;
1479 	struct mvxpsec_packet *mv_p = obj;
1480 
1481 	mutex_enter(&sc->sc_dma_mtx);
1482 	mvxpsec_dma_free(sc, &mv_p->dma_ring);
1483 	mutex_exit(&sc->sc_dma_mtx);
1484 	bus_dmamap_destroy(sc->sc_dmat, mv_p->pkt_header_map);
1485 	bus_dmamap_destroy(sc->sc_dmat, mv_p->data_map);
1486 }
1487 
1488 /*
1489  * allocate new session structure.
1490  */
1491 STATIC struct mvxpsec_session *
1492 mvxpsec_session_alloc(struct mvxpsec_softc *sc)
1493 {
1494 	struct mvxpsec_session *mv_s;
1495 
1496 	mv_s = pool_cache_get(sc->sc_session_pool, PR_NOWAIT);
1497 	if (mv_s == NULL) {
1498 		log(LOG_ERR, "%s: cannot allocate memory\n", __func__);
1499 		return NULL;
1500 	}
1501 	mv_s->refs = 1; /* 0 means session is alredy invalid */
1502 	mv_s->sflags = 0;
1503 
1504 	return mv_s;
1505 }
1506 
1507 /*
1508  * deallocate session structure.
1509  */
1510 STATIC void
1511 mvxpsec_session_dealloc(struct mvxpsec_session *mv_s)
1512 {
1513 	struct mvxpsec_softc *sc = mv_s->sc;
1514 
1515 	mv_s->sflags |= DELETED;
1516 	mvxpsec_session_unref(mv_s);
1517 	crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
1518 
1519 	return;
1520 }
1521 
1522 STATIC int
1523 mvxpsec_session_ref(struct mvxpsec_session *mv_s)
1524 {
1525 	uint32_t refs;
1526 
1527 	if (mv_s->sflags & DELETED) {
1528 		log(LOG_ERR,
1529 		    "%s: session is already deleted.\n", __func__);
1530 		return -1;
1531 	}
1532 
1533 	refs = atomic_inc_32_nv(&mv_s->refs);
1534 	if (refs == 1) {
1535 		/*
1536 		 * a session with refs == 0 is
1537 		 * already invalidated. revert it.
1538 		 * XXX: use CAS ?
1539 		 */
1540 		atomic_dec_32(&mv_s->refs);
1541 		log(LOG_ERR,
1542 		    "%s: session is already invalidated.\n", __func__);
1543 		return -1;
1544 	}
1545 
1546 	return 0;
1547 }
1548 
1549 STATIC void
1550 mvxpsec_session_unref(struct mvxpsec_session *mv_s)
1551 {
1552 	uint32_t refs;
1553 
1554 	membar_release();
1555 	refs = atomic_dec_32_nv(&mv_s->refs);
1556 	if (refs == 0) {
1557 		membar_acquire();
1558 		pool_cache_put(mv_s->sc->sc_session_pool, mv_s);
1559 	}
1560 }
1561 
1562 /*
1563  * look for session is exist or not
1564  */
1565 INLINE struct mvxpsec_session *
1566 mvxpsec_session_lookup(struct mvxpsec_softc *sc, int sid)
1567 {
1568 	struct mvxpsec_session *mv_s;
1569 	int session;
1570 
1571 	/* must called sc->sc_session_mtx held */
1572 	KASSERT(mutex_owned(&sc->sc_session_mtx));
1573 
1574 	session = MVXPSEC_SESSION(sid);
1575 	if (__predict_false(session > MVXPSEC_MAX_SESSIONS)) {
1576 		log(LOG_ERR, "%s: session number too large %d\n",
1577 		    __func__, session);
1578 		return NULL;
1579 	}
1580 	if (__predict_false( (mv_s = sc->sc_sessions[session]) == NULL)) {
1581 		log(LOG_ERR, "%s: invalid session %d\n",
1582 		    __func__, session);
1583 		return NULL;
1584 	}
1585 
1586 	KASSERT(mv_s->sid == session);
1587 
1588 	return mv_s;
1589 }
1590 
1591 /*
1592  * allocation new packet structure.
1593  */
1594 STATIC struct mvxpsec_packet *
1595 mvxpsec_packet_alloc(struct mvxpsec_session *mv_s)
1596 {
1597 	struct mvxpsec_softc *sc = mv_s->sc;
1598 	struct mvxpsec_packet *mv_p;
1599 
1600 	/* must be called mv_queue_mtx held. */
1601 	KASSERT(mutex_owned(&sc->sc_queue_mtx));
1602 	/* must be called mv_session_mtx held. */
1603 	KASSERT(mutex_owned(&sc->sc_session_mtx));
1604 
1605 	if (mvxpsec_session_ref(mv_s) < 0) {
1606 		log(LOG_ERR, "%s: invalid session.\n", __func__);
1607 		return NULL;
1608 	}
1609 
1610 	if ( (mv_p = SLIST_FIRST(&sc->sc_free_list)) != NULL) {
1611 		SLIST_REMOVE_HEAD(&sc->sc_free_list, free_list);
1612 		sc->sc_free_qlen--;
1613 	}
1614 	else {
1615 		mv_p = pool_cache_get(sc->sc_packet_pool, PR_NOWAIT);
1616 		if (mv_p == NULL) {
1617 			log(LOG_ERR, "%s: cannot allocate memory\n",
1618 			    __func__);
1619 			mvxpsec_session_unref(mv_s);
1620 			return NULL;
1621 		}
1622 	}
1623 	mv_p->mv_s = mv_s;
1624 	mv_p->flags = 0;
1625 	mv_p->data_ptr = NULL;
1626 
1627 	return mv_p;
1628 }
1629 
1630 /*
1631  * free packet structure.
1632  */
1633 STATIC void
1634 mvxpsec_packet_dealloc(struct mvxpsec_packet *mv_p)
1635 {
1636 	struct mvxpsec_session *mv_s = mv_p->mv_s;
1637 	struct mvxpsec_softc *sc = mv_s->sc;
1638 
1639 	/* must called with sc->sc_queue_mtx held */
1640 	KASSERT(mutex_owned(&sc->sc_queue_mtx));
1641 
1642 	if (mv_p->dma_ring.dma_size != 0) {
1643 		sc->sc_desc_ring_cons += mv_p->dma_ring.dma_size;
1644 	}
1645 	mv_p->dma_ring.dma_head = NULL;
1646 	mv_p->dma_ring.dma_last = NULL;
1647 	mv_p->dma_ring.dma_size = 0;
1648 
1649 	if (mv_p->data_map) {
1650 		if (mv_p->flags & RDY_DATA) {
1651 			bus_dmamap_unload(sc->sc_dmat, mv_p->data_map);
1652 			mv_p->flags &= ~RDY_DATA;
1653 		}
1654 	}
1655 
1656 	if (sc->sc_free_qlen > sc->sc_wait_qlimit)
1657 		pool_cache_put(sc->sc_packet_pool, mv_p);
1658 	else {
1659 		SLIST_INSERT_HEAD(&sc->sc_free_list, mv_p, free_list);
1660 		sc->sc_free_qlen++;
1661 	}
1662 	mvxpsec_session_unref(mv_s);
1663 }
1664 
1665 INLINE void
1666 mvxpsec_packet_enqueue(struct mvxpsec_packet *mv_p)
1667 {
1668 	struct mvxpsec_softc *sc = mv_p->mv_s->sc;
1669 	struct mvxpsec_packet *last_packet;
1670 	struct mvxpsec_descriptor_handle *cur_dma, *prev_dma;
1671 
1672 	/* must called with sc->sc_queue_mtx held */
1673 	KASSERT(mutex_owned(&sc->sc_queue_mtx));
1674 
1675 	if (sc->sc_wait_qlen == 0) {
1676 		SIMPLEQ_INSERT_TAIL(&sc->sc_wait_queue, mv_p, queue);
1677 		sc->sc_wait_qlen++;
1678 		mv_p->flags |= SETUP_DONE;
1679 		return;
1680 	}
1681 
1682 	last_packet = SIMPLEQ_LAST(&sc->sc_wait_queue, mvxpsec_packet, queue);
1683 	SIMPLEQ_INSERT_TAIL(&sc->sc_wait_queue, mv_p, queue);
1684 	sc->sc_wait_qlen++;
1685 
1686 	/* chain the DMA */
1687 	cur_dma = mv_p->dma_ring.dma_head;
1688 	prev_dma = last_packet->dma_ring.dma_last;
1689 	mvxpsec_dma_cat(sc, prev_dma, cur_dma);
1690 	mv_p->flags |= SETUP_DONE;
1691 }
1692 
1693 /*
1694  * called by interrupt handler
1695  */
1696 STATIC int
1697 mvxpsec_done_packet(struct mvxpsec_packet *mv_p)
1698 {
1699 	struct mvxpsec_session *mv_s = mv_p->mv_s;
1700 	struct mvxpsec_softc *sc = mv_s->sc;
1701 
1702 	KASSERT((mv_p->flags & RDY_DATA));
1703 	KASSERT((mv_p->flags & SETUP_DONE));
1704 
1705 	/* unload data */
1706 	bus_dmamap_sync(sc->sc_dmat, mv_p->data_map,
1707 	    0, mv_p->data_len,
1708 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1709 	bus_dmamap_unload(sc->sc_dmat, mv_p->data_map);
1710 	mv_p->flags &= ~RDY_DATA;
1711 
1712 #ifdef MVXPSEC_DEBUG
1713 	if (mvxpsec_debug != 0) {
1714 		int s;
1715 
1716 		bus_dmamap_sync(sc->sc_dmat, mv_p->pkt_header_map,
1717 		    0, sizeof(mv_p->pkt_header),
1718 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1719 		bus_dmamap_sync(sc->sc_dmat, mv_s->session_header_map,
1720 		    0, sizeof(mv_s->session_header),
1721 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1722 
1723 		if (mvxpsec_debug & MVXPSEC_DEBUG_OPENCRYPTO) {
1724 			char buf[1500];
1725 			struct mbuf *m;
1726 			struct uio *uio;
1727 			size_t len;
1728 
1729 			switch (mv_p->data_type) {
1730 			case MVXPSEC_DATA_MBUF:
1731 				m = mv_p->data_mbuf;
1732 				len = m->m_pkthdr.len;
1733 				if (len > sizeof(buf))
1734 					len = sizeof(buf);
1735 				m_copydata(m, 0, len, buf);
1736 				break;
1737 			case MVXPSEC_DATA_UIO:
1738 				uio = mv_p->data_uio;
1739 				len = uio->uio_resid;
1740 				if (len > sizeof(buf))
1741 					len = sizeof(buf);
1742 				cuio_copydata(uio, 0, len, buf);
1743 				break;
1744 			default:
1745 				len = 0;
1746 			}
1747 			if (len > 0)
1748 				mvxpsec_dump_data(__func__, buf, len);
1749 		}
1750 
1751 		if (mvxpsec_debug & MVXPSEC_DEBUG_PAYLOAD) {
1752 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_PAYLOAD,
1753 			    "%s: session_descriptor:\n", __func__);
1754 			mvxpsec_dump_packet_desc(__func__, mv_p);
1755 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_PAYLOAD,
1756 			    "%s: session_data:\n", __func__);
1757 			mvxpsec_dump_packet_data(__func__, mv_p);
1758 		}
1759 
1760 		if (mvxpsec_debug & MVXPSEC_DEBUG_SRAM) {
1761 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_SRAM,
1762 			    "%s: SRAM\n", __func__);
1763 			mvxpsec_dump_sram(__func__, sc, 2000);
1764 		}
1765 
1766 		s = MVXPSEC_READ(sc, MV_ACC_STATUS);
1767 		if (s & MV_ACC_STATUS_MAC_ERR) {
1768 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR,
1769 			    "%s: Message Authentication Failed.\n", __func__);
1770 		}
1771 	}
1772 #endif
1773 
1774 	/* copy back IV */
1775 	if (mv_p->flags & CRP_EXT_IV) {
1776 		memcpy(mv_p->ext_iv,
1777 		    &mv_p->pkt_header.crp_iv_ext, mv_p->ext_ivlen);
1778 		mv_p->ext_iv = NULL;
1779 		mv_p->ext_ivlen = 0;
1780 	}
1781 
1782 	/* notify opencrypto */
1783 	mv_p->crp->crp_etype = 0;
1784 	crypto_done(mv_p->crp);
1785 	mv_p->crp = NULL;
1786 
1787 	/* unblock driver */
1788 	mvxpsec_packet_dealloc(mv_p);
1789 	crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
1790 
1791 	MVXPSEC_EVCNT_INCR(sc, packet_ok);
1792 
1793 	return 0;
1794 }
1795 
1796 
1797 /*
1798  * Opencrypto API registration
1799  */
1800 int
1801 mvxpsec_register(struct mvxpsec_softc *sc)
1802 {
1803 	int oplen = SRAM_PAYLOAD_SIZE;
1804 	int flags = 0;
1805 	int err;
1806 
1807 	sc->sc_nsessions = 0;
1808 	sc->sc_cid = crypto_get_driverid(0);
1809 	if (sc->sc_cid < 0) {
1810 		log(LOG_ERR,
1811 		    "%s: crypto_get_driverid() failed.\n", __func__);
1812 		err = EINVAL;
1813 		goto done;
1814 	}
1815 
1816 	/* Ciphers */
1817 	err = crypto_register(sc->sc_cid, CRYPTO_DES_CBC, oplen, flags,
1818 	    mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1819 	if (err)
1820 		goto done;
1821 
1822 	err = crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, oplen, flags,
1823 	    mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1824 	if (err)
1825 		goto done;
1826 
1827 	err = crypto_register(sc->sc_cid, CRYPTO_AES_CBC, oplen, flags,
1828 	    mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1829 	if (err)
1830 		goto done;
1831 
1832 	/* MACs */
1833 	err = crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96,
1834 	    oplen, flags,
1835 	    mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1836 	if (err)
1837 		goto done;
1838 
1839 	err = crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96,
1840 	    oplen, flags,
1841 	    mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1842 	if (err)
1843 		goto done;
1844 
1845 #ifdef DEBUG
1846 	log(LOG_DEBUG,
1847 	    "%s: registered to opencrypto(max data = %d bytes)\n",
1848 	    device_xname(sc->sc_dev), oplen);
1849 #endif
1850 
1851 	err = 0;
1852 done:
1853 	return err;
1854 }
1855 
1856 /*
1857  * Create new opencrypto session
1858  *
1859  *   - register cipher key, mac key.
1860  *   - initialize mac internal state.
1861  */
1862 int
1863 mvxpsec_newsession(void *arg, uint32_t *sidp, struct cryptoini *cri)
1864 {
1865 	struct mvxpsec_softc *sc = arg;
1866 	struct mvxpsec_session *mv_s = NULL;
1867 	struct cryptoini *c;
1868 	static int hint = 0;
1869 	int session = -1;
1870 	int sid;
1871 	int err;
1872 	int i;
1873 
1874 	/* allocate driver session context */
1875 	mv_s = mvxpsec_session_alloc(sc);
1876 	if (mv_s == NULL)
1877 		return ENOMEM;
1878 
1879 	/*
1880 	 * lookup opencrypto session table
1881 	 *
1882 	 * we have sc_session_mtx after here.
1883 	 */
1884 	mutex_enter(&sc->sc_session_mtx);
1885 	if (sc->sc_nsessions >= MVXPSEC_MAX_SESSIONS) {
1886 		mutex_exit(&sc->sc_session_mtx);
1887 		log(LOG_ERR, "%s: too many IPsec SA(max %d)\n",
1888 				__func__, MVXPSEC_MAX_SESSIONS);
1889 		mvxpsec_session_dealloc(mv_s);
1890 		return ENOMEM;
1891 	}
1892 	for (i = hint; i < MVXPSEC_MAX_SESSIONS; i++) {
1893 		if (sc->sc_sessions[i])
1894 			continue;
1895 		session = i;
1896 		hint = session + 1;
1897 	       	break;
1898 	}
1899 	if (session < 0) {
1900 		for (i = 0; i < hint; i++) {
1901 			if (sc->sc_sessions[i])
1902 				continue;
1903 			session = i;
1904 			hint = session + 1;
1905 			break;
1906 		}
1907 		if (session < 0) {
1908 			mutex_exit(&sc->sc_session_mtx);
1909 			/* session full */
1910 			log(LOG_ERR, "%s: too many IPsec SA(max %d)\n",
1911 				__func__, MVXPSEC_MAX_SESSIONS);
1912 			mvxpsec_session_dealloc(mv_s);
1913 			hint = 0;
1914 			return ENOMEM;
1915 		}
1916 	}
1917 	if (hint >= MVXPSEC_MAX_SESSIONS)
1918 		hint = 0;
1919 	sc->sc_nsessions++;
1920 	sc->sc_sessions[session] = mv_s;
1921 #ifdef DEBUG
1922 	log(LOG_DEBUG, "%s: new session %d allocated\n", __func__, session);
1923 #endif
1924 
1925 	sid = MVXPSEC_SID(device_unit(sc->sc_dev), session);
1926 	mv_s->sid = sid;
1927 
1928 	/* setup the session key ... */
1929 	for (c = cri; c; c = c->cri_next) {
1930 		switch (c->cri_alg) {
1931 		case CRYPTO_DES_CBC:
1932 		case CRYPTO_3DES_CBC:
1933 		case CRYPTO_AES_CBC:
1934 			/* key */
1935 			if (mvxpsec_key_precomp(c->cri_alg,
1936 			    c->cri_key, c->cri_klen,
1937 			    &mv_s->session_header.crp_key,
1938 			    &mv_s->session_header.crp_key_d)) {
1939 				log(LOG_ERR,
1940 				    "%s: Invalid HMAC key for %s.\n",
1941 				    __func__, s_ctlalg(c->cri_alg));
1942 				err = EINVAL;
1943 				goto fail;
1944 			}
1945 			if (mv_s->sflags & RDY_CRP_KEY) {
1946 				log(LOG_WARNING,
1947 				    "%s: overwrite cipher: %s->%s.\n",
1948 				    __func__,
1949 				    s_ctlalg(mv_s->cipher_alg),
1950 				    s_ctlalg(c->cri_alg));
1951 			}
1952 			mv_s->sflags |= RDY_CRP_KEY;
1953 			mv_s->enc_klen = c->cri_klen;
1954 			mv_s->cipher_alg = c->cri_alg;
1955 			/* create per session IV (compatible with KAME IPsec) */
1956 			cprng_fast(&mv_s->session_iv, sizeof(mv_s->session_iv));
1957 			mv_s->sflags |= RDY_CRP_IV;
1958 			break;
1959 		case CRYPTO_SHA1_HMAC_96:
1960 		case CRYPTO_MD5_HMAC_96:
1961 			/* key */
1962 			if (mvxpsec_hmac_precomp(c->cri_alg,
1963 			    c->cri_key, c->cri_klen,
1964 			    (uint32_t *)&mv_s->session_header.miv_in,
1965 			    (uint32_t *)&mv_s->session_header.miv_out)) {
1966 				log(LOG_ERR,
1967 				    "%s: Invalid MAC key\n", __func__);
1968 				err = EINVAL;
1969 				goto fail;
1970 			}
1971 			if (mv_s->sflags & RDY_MAC_KEY ||
1972 			    mv_s->sflags & RDY_MAC_IV) {
1973 				log(LOG_ERR,
1974 				    "%s: overwrite HMAC: %s->%s.\n",
1975 				    __func__, s_ctlalg(mv_s->hmac_alg),
1976 				    s_ctlalg(c->cri_alg));
1977 			}
1978 			mv_s->sflags |= RDY_MAC_KEY;
1979 			mv_s->sflags |= RDY_MAC_IV;
1980 
1981 			mv_s->mac_klen = c->cri_klen;
1982 			mv_s->hmac_alg = c->cri_alg;
1983 			break;
1984 		default:
1985 			log(LOG_ERR, "%s: Unknown algorithm %d\n",
1986 			    __func__, c->cri_alg);
1987 			err = EINVAL;
1988 			goto fail;
1989 		}
1990 	}
1991 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1992 	    "H/W Crypto session (id:%u) added.\n", session);
1993 
1994 	*sidp = sid;
1995 	MVXPSEC_EVCNT_INCR(sc, session_new);
1996 	mutex_exit(&sc->sc_session_mtx);
1997 
1998 	/* sync session header(it's never touched after here) */
1999 	bus_dmamap_sync(sc->sc_dmat,
2000 	    mv_s->session_header_map,
2001 	    0, sizeof(mv_s->session_header),
2002 	    BUS_DMASYNC_PREWRITE);
2003 
2004 	return 0;
2005 
2006 fail:
2007 	sc->sc_nsessions--;
2008 	sc->sc_sessions[session] = NULL;
2009 	hint = session;
2010 	if (mv_s)
2011 		mvxpsec_session_dealloc(mv_s);
2012 	log(LOG_WARNING,
2013 	    "%s: Failed to add H/W crypto sessoin (id:%u): err=%d\n",
2014 	   __func__, session, err);
2015 
2016 	mutex_exit(&sc->sc_session_mtx);
2017 	return err;
2018 }
2019 
2020 /*
2021  * remove opencrypto session
2022  */
2023 int
2024 mvxpsec_freesession(void *arg, uint64_t tid)
2025 {
2026 	struct mvxpsec_softc *sc = arg;
2027 	struct mvxpsec_session *mv_s;
2028 	int session;
2029 	uint32_t sid = ((uint32_t)tid) & 0xffffffff;
2030 
2031 	session = MVXPSEC_SESSION(sid);
2032 	if (session < 0 || session >= MVXPSEC_MAX_SESSIONS) {
2033 		log(LOG_ERR, "%s: invalid session (id:%u)\n",
2034 		    __func__, session);
2035 		return EINVAL;
2036 	}
2037 
2038 	mutex_enter(&sc->sc_session_mtx);
2039 	if ( (mv_s = sc->sc_sessions[session]) == NULL) {
2040 		mutex_exit(&sc->sc_session_mtx);
2041 #ifdef DEBUG
2042 		log(LOG_DEBUG, "%s: session %d already inactivated\n",
2043 		    __func__, session);
2044 #endif
2045 		return ENOENT;
2046 	}
2047 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2048 	    "%s: inactivate session %d\n", __func__, session);
2049 
2050 	/* inactivate mvxpsec session */
2051 	sc->sc_sessions[session] = NULL;
2052 	sc->sc_nsessions--;
2053 	sc->sc_last_session = NULL;
2054 	mutex_exit(&sc->sc_session_mtx);
2055 
2056 	KASSERT(sc->sc_nsessions >= 0);
2057 	KASSERT(mv_s->sid == sid);
2058 
2059 	mvxpsec_session_dealloc(mv_s);
2060 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2061 	    "H/W Crypto session (id: %d) deleted.\n", session);
2062 
2063 	/* force unblock opencrypto */
2064 	crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
2065 
2066 	MVXPSEC_EVCNT_INCR(sc, session_free);
2067 
2068 	return 0;
2069 }
2070 
2071 /*
2072  * process data with existing session
2073  */
2074 int
2075 mvxpsec_dispatch(void *arg, struct cryptop *crp, int hint)
2076 {
2077 	struct mvxpsec_softc *sc = arg;
2078 	struct mvxpsec_session *mv_s;
2079 	struct mvxpsec_packet *mv_p;
2080 	int q_full;
2081 	int running;
2082 	int err;
2083 
2084 	mutex_enter(&sc->sc_queue_mtx);
2085 
2086 	/*
2087 	 * lookup session
2088 	 */
2089 	mutex_enter(&sc->sc_session_mtx);
2090 	mv_s = mvxpsec_session_lookup(sc, crp->crp_sid);
2091 	if (__predict_false(mv_s == NULL)) {
2092 		err = EINVAL;
2093 		mv_p = NULL;
2094 		mutex_exit(&sc->sc_session_mtx);
2095 		goto fail;
2096 	}
2097 	mv_p = mvxpsec_packet_alloc(mv_s);
2098 	if (__predict_false(mv_p == NULL)) {
2099 		mutex_exit(&sc->sc_session_mtx);
2100 		mutex_exit(&sc->sc_queue_mtx);
2101 		return ERESTART; /* => queued in opencrypto layer */
2102 	}
2103 	mutex_exit(&sc->sc_session_mtx);
2104 
2105 	/*
2106 	 * check queue status
2107 	 */
2108 #ifdef MVXPSEC_MULTI_PACKET
2109 	q_full = (sc->sc_wait_qlen >= sc->sc_wait_qlimit) ? 1 : 0;
2110 #else
2111 	q_full = (sc->sc_wait_qlen != 0) ? 1 : 0;
2112 #endif
2113 	running = (sc->sc_flags & HW_RUNNING) ?  1: 0;
2114 	if (q_full) {
2115 		/* input queue is full. */
2116 		if (!running && sc->sc_wait_qlen > 0)
2117 			mvxpsec_dispatch_queue(sc);
2118 		MVXPSEC_EVCNT_INCR(sc, queue_full);
2119 		mvxpsec_packet_dealloc(mv_p);
2120 		mutex_exit(&sc->sc_queue_mtx);
2121 		return ERESTART; /* => queued in opencrypto layer */
2122 	}
2123 
2124 	/*
2125 	 * Load and setup packet data
2126 	 */
2127 	err = mvxpsec_packet_setcrp(mv_p, crp);
2128 	if (__predict_false(err))
2129 		goto fail;
2130 
2131 	/*
2132 	 * Setup DMA descriptor chains
2133 	 */
2134 	mutex_enter(&sc->sc_dma_mtx);
2135 	err = mvxpsec_dma_copy_packet(sc, mv_p);
2136 	mutex_exit(&sc->sc_dma_mtx);
2137 	if (__predict_false(err))
2138 		goto fail;
2139 
2140 #ifdef MVXPSEC_DEBUG
2141 	mvxpsec_dump_packet(__func__, mv_p);
2142 #endif
2143 
2144 	/*
2145 	 * Sync/inval the data cache
2146 	 */
2147 	err = mvxpsec_dma_sync_packet(sc, mv_p);
2148 	if (__predict_false(err))
2149 		goto fail;
2150 
2151 	/*
2152 	 * Enqueue the packet
2153 	 */
2154 	MVXPSEC_EVCNT_INCR(sc, dispatch_packets);
2155 #ifdef MVXPSEC_MULTI_PACKET
2156 	mvxpsec_packet_enqueue(mv_p);
2157 	if (!running)
2158 		mvxpsec_dispatch_queue(sc);
2159 #else
2160 	SIMPLEQ_INSERT_TAIL(&sc->sc_wait_queue, mv_p, queue);
2161 	sc->sc_wait_qlen++;
2162 	mv_p->flags |= SETUP_DONE;
2163 	if (!running)
2164 		mvxpsec_dispatch_queue(sc);
2165 #endif
2166 	mutex_exit(&sc->sc_queue_mtx);
2167 	return 0;
2168 
2169 fail:
2170 	/* Drop the incoming packet */
2171 	mvxpsec_drop(sc, crp, mv_p, err);
2172 	mutex_exit(&sc->sc_queue_mtx);
2173 	return 0;
2174 }
2175 
2176 /*
2177  * back the packet to the IP stack
2178  */
2179 void
2180 mvxpsec_done(void *arg)
2181 {
2182 	struct mvxpsec_softc *sc = arg;
2183 	struct mvxpsec_packet *mv_p;
2184 	mvxpsec_queue_t ret_queue;
2185 	int ndone;
2186 
2187 	mutex_enter(&sc->sc_queue_mtx);
2188 
2189 	/* stop wdog timer */
2190 	callout_stop(&sc->sc_timeout);
2191 
2192 	/* refill MVXPSEC */
2193 	ret_queue = sc->sc_run_queue;
2194 	SIMPLEQ_INIT(&sc->sc_run_queue);
2195 	sc->sc_flags &= ~HW_RUNNING;
2196 	if (sc->sc_wait_qlen > 0)
2197 		mvxpsec_dispatch_queue(sc);
2198 
2199 	ndone = 0;
2200 	while ( (mv_p = SIMPLEQ_FIRST(&ret_queue)) != NULL) {
2201 		SIMPLEQ_REMOVE_HEAD(&ret_queue, queue);
2202 		mvxpsec_dma_free(sc, &mv_p->dma_ring);
2203 		mvxpsec_done_packet(mv_p);
2204 		ndone++;
2205 	}
2206 	MVXPSEC_EVCNT_MAX(sc, max_done, ndone);
2207 
2208 	mutex_exit(&sc->sc_queue_mtx);
2209 }
2210 
2211 /*
2212  * drop the packet
2213  */
2214 INLINE void
2215 mvxpsec_drop(struct mvxpsec_softc *sc, struct cryptop *crp,
2216     struct mvxpsec_packet *mv_p, int err)
2217 {
2218 	/* must called with sc->sc_queue_mtx held */
2219 	KASSERT(mutex_owned(&sc->sc_queue_mtx));
2220 
2221 	if (mv_p)
2222 		mvxpsec_packet_dealloc(mv_p);
2223 	if (err < 0)
2224 		err = EINVAL;
2225 	crp->crp_etype = err;
2226 	crypto_done(crp);
2227 	MVXPSEC_EVCNT_INCR(sc, packet_err);
2228 
2229 	/* dispatch other packets in queue */
2230 	if (sc->sc_wait_qlen > 0 &&
2231 	    !(sc->sc_flags & HW_RUNNING))
2232 		mvxpsec_dispatch_queue(sc);
2233 
2234 	/* unblock driver for dropped packet */
2235 	crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
2236 }
2237 
2238 /* move wait queue entry to run queue */
2239 STATIC int
2240 mvxpsec_dispatch_queue(struct mvxpsec_softc *sc)
2241 {
2242 	struct mvxpsec_packet *mv_p;
2243 	paddr_t head;
2244 	int ndispatch = 0;
2245 
2246 	/* must called with sc->sc_queue_mtx held */
2247 	KASSERT(mutex_owned(&sc->sc_queue_mtx));
2248 
2249 	/* check there is any task */
2250 	if (__predict_false(sc->sc_flags & HW_RUNNING)) {
2251 		log(LOG_WARNING,
2252 		    "%s: another packet already exist.\n", __func__);
2253 		return 0;
2254 	}
2255 	if (__predict_false(SIMPLEQ_EMPTY(&sc->sc_wait_queue))) {
2256 		log(LOG_WARNING,
2257 		    "%s: no waiting packet yet(qlen=%d).\n",
2258 		    __func__, sc->sc_wait_qlen);
2259 		return 0;
2260 	}
2261 
2262 	/* move queue */
2263 	sc->sc_run_queue = sc->sc_wait_queue;
2264 	sc->sc_flags |= HW_RUNNING; /* dropped by intr or timeout */
2265 	SIMPLEQ_INIT(&sc->sc_wait_queue);
2266 	ndispatch = sc->sc_wait_qlen;
2267 	sc->sc_wait_qlen = 0;
2268 
2269 	/* get 1st DMA descriptor */
2270 	mv_p = SIMPLEQ_FIRST(&sc->sc_run_queue);
2271 	head = mv_p->dma_ring.dma_head->phys_addr;
2272 
2273 	/* terminate last DMA descriptor */
2274 	mv_p = SIMPLEQ_LAST(&sc->sc_run_queue, mvxpsec_packet, queue);
2275 	mvxpsec_dma_finalize(sc, &mv_p->dma_ring);
2276 
2277 	/* configure TDMA */
2278 	if (mvxpsec_dma_wait(sc) < 0) {
2279 		log(LOG_ERR, "%s: DMA DEVICE not responding", __func__);
2280 		callout_schedule(&sc->sc_timeout, hz);
2281 		return 0;
2282 	}
2283 	MVXPSEC_WRITE(sc, MV_TDMA_NXT, head);
2284 
2285 	/* trigger ACC */
2286 	if (mvxpsec_acc_wait(sc) < 0) {
2287 		log(LOG_ERR, "%s: MVXPSEC not responding", __func__);
2288 		callout_schedule(&sc->sc_timeout, hz);
2289 		return 0;
2290 	}
2291 	MVXPSEC_WRITE(sc, MV_ACC_COMMAND, MV_ACC_COMMAND_ACT);
2292 
2293 	MVXPSEC_EVCNT_MAX(sc, max_dispatch, ndispatch);
2294 	MVXPSEC_EVCNT_INCR(sc, dispatch_queue);
2295 	callout_schedule(&sc->sc_timeout, hz);
2296 	return 0;
2297 }
2298 
2299 /*
2300  * process opencrypto operations(cryptop) for packets.
2301  */
2302 INLINE int
2303 mvxpsec_parse_crd(struct mvxpsec_packet *mv_p, struct cryptodesc *crd)
2304 {
2305 	int ivlen;
2306 
2307 	KASSERT(mv_p->flags & RDY_DATA);
2308 
2309 	/* MAC & Ciphers: set data location and operation */
2310 	switch (crd->crd_alg) {
2311 	case CRYPTO_SHA1_HMAC_96:
2312 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_MAC_96;
2313 		/* fall through */
2314 	case CRYPTO_SHA1_HMAC:
2315 		mv_p->mac_dst = crd->crd_inject;
2316 		mv_p->mac_off = crd->crd_skip;
2317 		mv_p->mac_len = crd->crd_len;
2318 		MV_ACC_CRYPTO_MAC_SET(mv_p->pkt_header.desc.acc_config,
2319 		    MV_ACC_CRYPTO_MAC_HMAC_SHA1);
2320 		mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_MAC);
2321 		/* No more setup for MAC */
2322 		return 0;
2323 	case CRYPTO_MD5_HMAC_96:
2324 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_MAC_96;
2325 		/* fall through */
2326 	case CRYPTO_MD5_HMAC:
2327 		mv_p->mac_dst = crd->crd_inject;
2328 		mv_p->mac_off = crd->crd_skip;
2329 		mv_p->mac_len = crd->crd_len;
2330 		MV_ACC_CRYPTO_MAC_SET(mv_p->pkt_header.desc.acc_config,
2331 		    MV_ACC_CRYPTO_MAC_HMAC_MD5);
2332 		mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_MAC);
2333 		/* No more setup for MAC */
2334 		return 0;
2335 	case CRYPTO_DES_CBC:
2336 		mv_p->enc_ivoff = crd->crd_inject;
2337 		mv_p->enc_off = crd->crd_skip;
2338 		mv_p->enc_len = crd->crd_len;
2339 		ivlen = 8;
2340 		MV_ACC_CRYPTO_ENC_SET(mv_p->pkt_header.desc.acc_config,
2341 		    MV_ACC_CRYPTO_ENC_DES);
2342 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_CBC;
2343 		mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_ENC);
2344 		break;
2345 	case CRYPTO_3DES_CBC:
2346 		mv_p->enc_ivoff = crd->crd_inject;
2347 		mv_p->enc_off = crd->crd_skip;
2348 		mv_p->enc_len = crd->crd_len;
2349 		ivlen = 8;
2350 		MV_ACC_CRYPTO_ENC_SET(mv_p->pkt_header.desc.acc_config,
2351 		    MV_ACC_CRYPTO_ENC_3DES);
2352 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_CBC;
2353 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_3DES_EDE;
2354 		mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_ENC);
2355 		break;
2356 	case CRYPTO_AES_CBC:
2357 		mv_p->enc_ivoff = crd->crd_inject;
2358 		mv_p->enc_off = crd->crd_skip;
2359 		mv_p->enc_len = crd->crd_len;
2360 		ivlen = 16;
2361 		MV_ACC_CRYPTO_ENC_SET(mv_p->pkt_header.desc.acc_config,
2362 		    MV_ACC_CRYPTO_ENC_AES);
2363 		MV_ACC_CRYPTO_AES_KLEN_SET(
2364 		    mv_p->pkt_header.desc.acc_config,
2365 		   mvxpsec_aesklen(mv_p->mv_s->enc_klen));
2366 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_CBC;
2367 		mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_ENC);
2368 		break;
2369 	default:
2370 		log(LOG_ERR, "%s: Unknown algorithm %d\n",
2371 		    __func__, crd->crd_alg);
2372 		return EINVAL;
2373 	}
2374 
2375 	/* Operations only for Cipher, not MAC */
2376 	if (crd->crd_flags & CRD_F_ENCRYPT) {
2377 		/* Ciphers: Originate IV for Encryption.*/
2378 		mv_p->pkt_header.desc.acc_config &= ~MV_ACC_CRYPTO_DECRYPT;
2379 		mv_p->flags |= DIR_ENCRYPT;
2380 
2381 		if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
2382 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV, "EXPLICIT IV\n");
2383 			mv_p->flags |= CRP_EXT_IV;
2384 			mvxpsec_packet_write_iv(mv_p, crd->crd_iv, ivlen);
2385 			mv_p->enc_ivoff = MVXPSEC_SRAM_IV_EXT_OFF;
2386 		}
2387 		else if (crd->crd_flags & CRD_F_IV_PRESENT) {
2388 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV, "IV is present\n");
2389 			mvxpsec_packet_copy_iv(mv_p, crd->crd_inject, ivlen);
2390 		}
2391 		else {
2392 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV, "Create New IV\n");
2393 			mvxpsec_packet_write_iv(mv_p, NULL, ivlen);
2394 		}
2395 	}
2396 	else {
2397 		/* Ciphers: IV is loadded from crd_inject when it's present */
2398 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_DECRYPT;
2399 		mv_p->flags |= DIR_DECRYPT;
2400 
2401 		if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
2402 #ifdef MVXPSEC_DEBUG
2403 			if (mvxpsec_debug & MVXPSEC_DEBUG_ENC_IV) {
2404 				MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV,
2405 				    "EXPLICIT IV(Decrypt)\n");
2406 				mvxpsec_dump_data(__func__, crd->crd_iv, ivlen);
2407 			}
2408 #endif
2409 			mv_p->flags |= CRP_EXT_IV;
2410 			mvxpsec_packet_write_iv(mv_p, crd->crd_iv, ivlen);
2411 			mv_p->enc_ivoff = MVXPSEC_SRAM_IV_EXT_OFF;
2412 		}
2413 	}
2414 
2415 	KASSERT(!((mv_p->flags & DIR_ENCRYPT) && (mv_p->flags & DIR_DECRYPT)));
2416 
2417 	return 0;
2418 }
2419 
2420 INLINE int
2421 mvxpsec_parse_crp(struct mvxpsec_packet *mv_p)
2422 {
2423 	struct cryptop *crp = mv_p->crp;
2424 	struct cryptodesc *crd;
2425 	int err;
2426 
2427 	KASSERT(crp);
2428 
2429 	mvxpsec_packet_reset_op(mv_p);
2430 
2431 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2432 		err = mvxpsec_parse_crd(mv_p, crd);
2433 		if (err)
2434 			return err;
2435 	}
2436 
2437 	return 0;
2438 }
2439 
2440 INLINE int
2441 mvxpsec_packet_setcrp(struct mvxpsec_packet *mv_p, struct cryptop *crp)
2442 {
2443 	int err = EINVAL;
2444 
2445 	/* regiseter crp to the MVXPSEC packet */
2446 	if (crp->crp_flags & CRYPTO_F_IMBUF) {
2447 		err = mvxpsec_packet_setmbuf(mv_p,
2448 		    (struct mbuf *)crp->crp_buf);
2449 		mv_p->crp = crp;
2450 	}
2451 	else if (crp->crp_flags & CRYPTO_F_IOV) {
2452 		err = mvxpsec_packet_setuio(mv_p,
2453 		    (struct uio *)crp->crp_buf);
2454 		mv_p->crp = crp;
2455 	}
2456 	else {
2457 		err = mvxpsec_packet_setdata(mv_p,
2458 		    (struct mbuf *)crp->crp_buf, crp->crp_ilen);
2459 		mv_p->crp = crp;
2460 	}
2461 	if (__predict_false(err))
2462 		return err;
2463 
2464 	/* parse crp and setup MVXPSEC registers/descriptors */
2465 	err = mvxpsec_parse_crp(mv_p);
2466 	if (__predict_false(err))
2467 		return err;
2468 
2469 	/* fixup data offset to fit MVXPSEC internal SRAM */
2470 	err = mvxpsec_header_finalize(mv_p);
2471 	if (__predict_false(err))
2472 		return err;
2473 
2474 	return 0;
2475 }
2476 
2477 /*
2478  * load data for encrypt/decrypt/authentication
2479  *
2480  * data is raw kernel memory area.
2481  */
2482 STATIC int
2483 mvxpsec_packet_setdata(struct mvxpsec_packet *mv_p,
2484     void *data, uint32_t data_len)
2485 {
2486 	struct mvxpsec_session *mv_s = mv_p->mv_s;
2487 	struct mvxpsec_softc *sc = mv_s->sc;
2488 
2489 	if (bus_dmamap_load(sc->sc_dmat, mv_p->data_map, data, data_len,
2490 	    NULL, BUS_DMA_NOWAIT)) {
2491 		log(LOG_ERR, "%s: cannot load data\n", __func__);
2492 		return -1;
2493 	}
2494 	mv_p->data_type = MVXPSEC_DATA_RAW;
2495 	mv_p->data_raw = data;
2496 	mv_p->data_len = data_len;
2497 	mv_p->flags |= RDY_DATA;
2498 
2499 	return 0;
2500 }
2501 
2502 /*
2503  * load data for encrypt/decrypt/authentication
2504  *
2505  * data is mbuf based network data.
2506  */
2507 STATIC int
2508 mvxpsec_packet_setmbuf(struct mvxpsec_packet *mv_p, struct mbuf *m)
2509 {
2510 	struct mvxpsec_session *mv_s = mv_p->mv_s;
2511 	struct mvxpsec_softc *sc = mv_s->sc;
2512 	size_t pktlen = 0;
2513 
2514 	if (__predict_true(m->m_flags & M_PKTHDR))
2515 		pktlen = m->m_pkthdr.len;
2516 	else {
2517 		struct mbuf *mp = m;
2518 
2519 		while (mp != NULL) {
2520 			pktlen += m->m_len;
2521 			mp = mp->m_next;
2522 		}
2523 	}
2524 	if (pktlen > SRAM_PAYLOAD_SIZE) {
2525 #if NIPSEC > 0
2526 		extern   percpu_t *espstat_percpu;
2527 	       	/* XXX:
2528 		 * layer violation. opencrypto knows our max packet size
2529 		 * from crypto_register(9) API.
2530 		 */
2531 
2532 		_NET_STATINC(espstat_percpu, ESP_STAT_TOOBIG);
2533 #endif
2534 		log(LOG_ERR,
2535 		    "%s: ESP Packet too large: %zu [oct.] > %zu [oct.]\n",
2536 		    device_xname(sc->sc_dev),
2537 		    (size_t)pktlen, SRAM_PAYLOAD_SIZE);
2538 		mv_p->data_type = MVXPSEC_DATA_NONE;
2539 		mv_p->data_mbuf = NULL;
2540 		return -1;
2541 	}
2542 
2543 	if (bus_dmamap_load_mbuf(sc->sc_dmat, mv_p->data_map, m,
2544 	    BUS_DMA_NOWAIT)) {
2545 		mv_p->data_type = MVXPSEC_DATA_NONE;
2546 		mv_p->data_mbuf = NULL;
2547 		log(LOG_ERR, "%s: cannot load mbuf\n", __func__);
2548 		return -1;
2549 	}
2550 
2551 	/* set payload buffer */
2552 	mv_p->data_type = MVXPSEC_DATA_MBUF;
2553 	mv_p->data_mbuf = m;
2554 	if (m->m_flags & M_PKTHDR) {
2555 		mv_p->data_len = m->m_pkthdr.len;
2556 	}
2557 	else {
2558 		mv_p->data_len = 0;
2559 		while (m) {
2560 			mv_p->data_len += m->m_len;
2561 			m = m->m_next;
2562 		}
2563 	}
2564 	mv_p->flags |= RDY_DATA;
2565 
2566 	return 0;
2567 }
2568 
2569 STATIC int
2570 mvxpsec_packet_setuio(struct mvxpsec_packet *mv_p, struct uio *uio)
2571 {
2572 	struct mvxpsec_session *mv_s = mv_p->mv_s;
2573 	struct mvxpsec_softc *sc = mv_s->sc;
2574 
2575 	if (uio->uio_resid > SRAM_PAYLOAD_SIZE) {
2576 #if NIPSEC > 0
2577 		extern   percpu_t *espstat_percpu;
2578 	       	/* XXX:
2579 		 * layer violation. opencrypto knows our max packet size
2580 		 * from crypto_register(9) API.
2581 		 */
2582 
2583 		_NET_STATINC(espstat_percpu, ESP_STAT_TOOBIG);
2584 #endif
2585 		log(LOG_ERR,
2586 		    "%s: uio request too large: %zu [oct.] > %zu [oct.]\n",
2587 		    device_xname(sc->sc_dev),
2588 		    uio->uio_resid, SRAM_PAYLOAD_SIZE);
2589 		mv_p->data_type = MVXPSEC_DATA_NONE;
2590 		mv_p->data_mbuf = NULL;
2591 		return -1;
2592 	}
2593 
2594 	if (bus_dmamap_load_uio(sc->sc_dmat, mv_p->data_map, uio,
2595 	    BUS_DMA_NOWAIT)) {
2596 		mv_p->data_type = MVXPSEC_DATA_NONE;
2597 		mv_p->data_mbuf = NULL;
2598 		log(LOG_ERR, "%s: cannot load uio buf\n", __func__);
2599 		return -1;
2600 	}
2601 
2602 	/* set payload buffer */
2603 	mv_p->data_type = MVXPSEC_DATA_UIO;
2604 	mv_p->data_uio = uio;
2605 	mv_p->data_len = uio->uio_resid;
2606 	mv_p->flags |= RDY_DATA;
2607 
2608 	return 0;
2609 }
2610 
2611 STATIC int
2612 mvxpsec_packet_rdata(struct mvxpsec_packet *mv_p,
2613     int off, int len, void *cp)
2614 {
2615 	uint8_t *p;
2616 
2617 	if (mv_p->data_type == MVXPSEC_DATA_RAW) {
2618 		p = (uint8_t *)mv_p->data_raw + off;
2619 		memcpy(cp, p, len);
2620 	}
2621 	else if (mv_p->data_type == MVXPSEC_DATA_MBUF) {
2622 		m_copydata(mv_p->data_mbuf, off, len, cp);
2623 	}
2624 	else if (mv_p->data_type == MVXPSEC_DATA_UIO) {
2625 		cuio_copydata(mv_p->data_uio, off, len, cp);
2626 	}
2627 	else
2628 		return -1;
2629 
2630 	return 0;
2631 }
2632 
2633 STATIC int
2634 mvxpsec_packet_wdata(struct mvxpsec_packet *mv_p,
2635     int off, int len, void *cp)
2636 {
2637 	uint8_t *p;
2638 
2639 	if (mv_p->data_type == MVXPSEC_DATA_RAW) {
2640 		p = (uint8_t *)mv_p->data_raw + off;
2641 		memcpy(p, cp, len);
2642 	}
2643 	else if (mv_p->data_type == MVXPSEC_DATA_MBUF) {
2644 		m_copyback(mv_p->data_mbuf, off, len, cp);
2645 	}
2646 	else if (mv_p->data_type == MVXPSEC_DATA_UIO) {
2647 		cuio_copyback(mv_p->data_uio, off, len, cp);
2648 	}
2649 	else
2650 		return -1;
2651 
2652 	return 0;
2653 }
2654 
2655 /*
2656  * Set initial vector of cipher to the session.
2657  */
2658 STATIC int
2659 mvxpsec_packet_write_iv(struct mvxpsec_packet *mv_p, void *iv, int ivlen)
2660 {
2661 	uint8_t ivbuf[16];
2662 
2663 	KASSERT(ivlen == 8 || ivlen == 16);
2664 
2665 	if (iv == NULL) {
2666 	       	if (mv_p->mv_s->sflags & RDY_CRP_IV) {
2667 			/* use per session IV (compatible with KAME IPsec) */
2668 			mv_p->pkt_header.crp_iv_work = mv_p->mv_s->session_iv;
2669 			mv_p->flags |= RDY_CRP_IV;
2670 			return 0;
2671 		}
2672 		cprng_fast(ivbuf, ivlen);
2673 		iv = ivbuf;
2674 	}
2675 	memcpy(&mv_p->pkt_header.crp_iv_work, iv, ivlen);
2676 	if (mv_p->flags & CRP_EXT_IV) {
2677 		memcpy(&mv_p->pkt_header.crp_iv_ext, iv, ivlen);
2678 		mv_p->ext_iv = iv;
2679 		mv_p->ext_ivlen = ivlen;
2680 	}
2681 	mv_p->flags |= RDY_CRP_IV;
2682 
2683 	return 0;
2684 }
2685 
2686 STATIC int
2687 mvxpsec_packet_copy_iv(struct mvxpsec_packet *mv_p, int off, int ivlen)
2688 {
2689 	mvxpsec_packet_rdata(mv_p, off, ivlen,
2690 	    &mv_p->pkt_header.crp_iv_work);
2691 	mv_p->flags |= RDY_CRP_IV;
2692 
2693 	return 0;
2694 }
2695 
2696 /*
2697  * set a encryption or decryption key to the session
2698  *
2699  * Input key material is big endian.
2700  */
2701 STATIC int
2702 mvxpsec_key_precomp(int alg, void *keymat, int kbitlen,
2703     void *key_encrypt, void *key_decrypt)
2704 {
2705 	uint32_t *kp = keymat;
2706 	uint32_t *ekp = key_encrypt;
2707 	uint32_t *dkp = key_decrypt;
2708 	int i;
2709 
2710 	switch (alg) {
2711 	case CRYPTO_DES_CBC:
2712 		if (kbitlen < 64 || (kbitlen % 8) != 0) {
2713 			log(LOG_WARNING,
2714 			    "mvxpsec: invalid DES keylen %d\n", kbitlen);
2715 			return EINVAL;
2716 		}
2717 		for (i = 0; i < 2; i++)
2718 			dkp[i] = ekp[i] = kp[i];
2719 		for (; i < 8; i++)
2720 			dkp[i] = ekp[i] = 0;
2721 		break;
2722 	case CRYPTO_3DES_CBC:
2723 		if (kbitlen < 192 || (kbitlen % 8) != 0) {
2724 			log(LOG_WARNING,
2725 			    "mvxpsec: invalid 3DES keylen %d\n", kbitlen);
2726 			return EINVAL;
2727 		}
2728 		for (i = 0; i < 8; i++)
2729 			dkp[i] = ekp[i] = kp[i];
2730 		break;
2731 	case CRYPTO_AES_CBC:
2732 		if (kbitlen < 128) {
2733 			log(LOG_WARNING,
2734 			    "mvxpsec: invalid AES keylen %d\n", kbitlen);
2735 			return EINVAL;
2736 		}
2737 		else if (kbitlen < 192) {
2738 			/* AES-128 */
2739 			for (i = 0; i < 4; i++)
2740 				ekp[i] = kp[i];
2741 			for (; i < 8; i++)
2742 				ekp[i] = 0;
2743 		}
2744 	       	else if (kbitlen < 256) {
2745 			/* AES-192 */
2746 			for (i = 0; i < 6; i++)
2747 				ekp[i] = kp[i];
2748 			for (; i < 8; i++)
2749 				ekp[i] = 0;
2750 		}
2751 		else  {
2752 			/* AES-256 */
2753 			for (i = 0; i < 8; i++)
2754 				ekp[i] = kp[i];
2755 		}
2756 		/* make decryption key */
2757 		mv_aes_deckey((uint8_t *)dkp, (uint8_t *)ekp, kbitlen);
2758 		break;
2759 	default:
2760 		for (i = 0; i < 8; i++)
2761 			ekp[0] = dkp[0] = 0;
2762 		break;
2763 	}
2764 
2765 #ifdef MVXPSEC_DEBUG
2766 	if (mvxpsec_debug & MVXPSEC_DEBUG_OPENCRYPTO) {
2767 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2768 		    "%s: keyregistered\n", __func__);
2769 		mvxpsec_dump_data(__func__, ekp, 32);
2770 	}
2771 #endif
2772 
2773 	return 0;
2774 }
2775 
2776 /*
2777  * set MAC key to the session
2778  *
2779  * MAC engine has no register for key itself, but the engine has
2780  * inner and outer IV register. software must compute IV before
2781  * enable the engine.
2782  *
2783  * IV is a hash of ipad/opad. these are defined by FIPS-198a
2784  * standard.
2785  */
2786 STATIC int
2787 mvxpsec_hmac_precomp(int alg, void *key, int kbitlen,
2788     void *iv_inner, void *iv_outer)
2789 {
2790 	SHA1_CTX sha1;
2791 	MD5_CTX md5;
2792 	uint8_t *key8 = key;
2793 	uint8_t kbuf[64];
2794 	uint8_t ipad[64];
2795 	uint8_t opad[64];
2796 	uint32_t *iv_in = iv_inner;
2797 	uint32_t *iv_out = iv_outer;
2798 	int kbytelen;
2799 	int i;
2800 #define HMAC_IPAD 0x36
2801 #define HMAC_OPAD 0x5c
2802 
2803 	kbytelen = kbitlen / 8;
2804 	KASSERT(kbitlen == kbytelen * 8);
2805 	if (kbytelen > 64) {
2806 		SHA1Init(&sha1);
2807 		SHA1Update(&sha1, key, kbytelen);
2808 		SHA1Final(kbuf, &sha1);
2809 		key8 = kbuf;
2810 		kbytelen = 64;
2811 	}
2812 
2813 	/* make initial 64 oct. string */
2814 	switch (alg) {
2815 	case CRYPTO_SHA1_HMAC_96:
2816 	case CRYPTO_SHA1_HMAC:
2817 	case CRYPTO_MD5_HMAC_96:
2818 	case CRYPTO_MD5_HMAC:
2819 		for (i = 0; i < kbytelen; i++) {
2820 			ipad[i] = (key8[i] ^ HMAC_IPAD);
2821 			opad[i] = (key8[i] ^ HMAC_OPAD);
2822 		}
2823 		for (; i < 64; i++) {
2824 			ipad[i] = HMAC_IPAD;
2825 			opad[i] = HMAC_OPAD;
2826 		}
2827 		break;
2828 	default:
2829 		break;
2830 	}
2831 #ifdef MVXPSEC_DEBUG
2832 	if (mvxpsec_debug & MVXPSEC_DEBUG_OPENCRYPTO) {
2833 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2834 		    "%s: HMAC-KEY Pre-comp:\n", __func__);
2835 		mvxpsec_dump_data(__func__, key, 64);
2836 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2837 		    "%s: ipad:\n", __func__);
2838 		mvxpsec_dump_data(__func__, ipad, sizeof(ipad));
2839 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2840 		    "%s: opad:\n", __func__);
2841 		mvxpsec_dump_data(__func__, opad, sizeof(opad));
2842 	}
2843 #endif
2844 
2845 	/* make iv from string */
2846 	switch (alg) {
2847 	case CRYPTO_SHA1_HMAC_96:
2848 	case CRYPTO_SHA1_HMAC:
2849 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2850 		    "%s: Generate iv_in(SHA1)\n", __func__);
2851 		SHA1Init(&sha1);
2852 		SHA1Update(&sha1, ipad, 64);
2853 		/* XXX: private state... (LE) */
2854 		iv_in[0] = htobe32(sha1.state[0]);
2855 		iv_in[1] = htobe32(sha1.state[1]);
2856 		iv_in[2] = htobe32(sha1.state[2]);
2857 		iv_in[3] = htobe32(sha1.state[3]);
2858 		iv_in[4] = htobe32(sha1.state[4]);
2859 
2860 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2861 		    "%s: Generate iv_out(SHA1)\n", __func__);
2862 		SHA1Init(&sha1);
2863 		SHA1Update(&sha1, opad, 64);
2864 		/* XXX: private state... (LE) */
2865 		iv_out[0] = htobe32(sha1.state[0]);
2866 		iv_out[1] = htobe32(sha1.state[1]);
2867 		iv_out[2] = htobe32(sha1.state[2]);
2868 		iv_out[3] = htobe32(sha1.state[3]);
2869 		iv_out[4] = htobe32(sha1.state[4]);
2870 		break;
2871 	case CRYPTO_MD5_HMAC_96:
2872 	case CRYPTO_MD5_HMAC:
2873 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2874 		    "%s: Generate iv_in(MD5)\n", __func__);
2875 		MD5Init(&md5);
2876 		MD5Update(&md5, ipad, sizeof(ipad));
2877 		/* XXX: private state... (LE) */
2878 		iv_in[0] = htobe32(md5.state[0]);
2879 		iv_in[1] = htobe32(md5.state[1]);
2880 		iv_in[2] = htobe32(md5.state[2]);
2881 		iv_in[3] = htobe32(md5.state[3]);
2882 		iv_in[4] = 0;
2883 
2884 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2885 		    "%s: Generate iv_out(MD5)\n", __func__);
2886 		MD5Init(&md5);
2887 		MD5Update(&md5, opad, sizeof(opad));
2888 		/* XXX: private state... (LE) */
2889 		iv_out[0] = htobe32(md5.state[0]);
2890 		iv_out[1] = htobe32(md5.state[1]);
2891 		iv_out[2] = htobe32(md5.state[2]);
2892 		iv_out[3] = htobe32(md5.state[3]);
2893 		iv_out[4] = 0;
2894 		break;
2895 	default:
2896 		break;
2897 	}
2898 
2899 #ifdef MVXPSEC_DEBUG
2900 	if (mvxpsec_debug & MVXPSEC_DEBUG_HASH_IV) {
2901 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_HASH_IV,
2902 		    "%s: HMAC IV-IN\n", __func__);
2903 		mvxpsec_dump_data(__func__, (uint8_t *)iv_in, 20);
2904 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_HASH_IV,
2905 		    "%s: HMAC IV-OUT\n", __func__);
2906 		mvxpsec_dump_data(__func__, (uint8_t *)iv_out, 20);
2907 	}
2908 #endif
2909 
2910 	return 0;
2911 #undef HMAC_IPAD
2912 #undef HMAC_OPAD
2913 }
2914 
2915 /*
2916  * AES Support routine
2917  */
2918 static uint8_t AES_SBOX[256] = {
2919 	 99, 124, 119, 123, 242, 107, 111, 197,  48,   1, 103,  43, 254, 215,
2920        	171, 118, 202, 130, 201, 125, 250,  89,  71, 240, 173, 212, 162, 175,
2921        	156, 164, 114, 192, 183, 253, 147,  38,  54,  63, 247, 204,  52, 165,
2922        	229, 241, 113, 216,  49,  21,   4, 199,  35, 195,  24, 150,   5, 154,
2923        	  7,  18, 128, 226, 235,  39, 178, 117,   9, 131,  44,  26,  27, 110,
2924 	 90, 160,  82,  59, 214, 179,  41, 227,  47, 132,  83, 209,   0, 237,
2925        	 32, 252, 177,  91, 106, 203, 190,  57,  74,  76,  88, 207, 208, 239,
2926 	170, 251,  67,  77,  51, 133,  69, 249,   2, 127,  80,  60, 159, 168,
2927 	 81, 163,  64, 143, 146, 157,  56, 245, 188, 182, 218,  33,  16, 255,
2928 	243, 210, 205,  12,  19, 236,  95, 151,  68,  23, 196, 167, 126,  61,
2929        	100,  93,  25, 115,  96, 129,  79, 220,  34,  42, 144, 136,  70, 238,
2930        	184,  20, 222,  94,  11, 219, 224,  50,  58,  10,  73,   6,  36,  92,
2931        	194, 211, 172,  98, 145, 149, 228, 121, 231, 200,  55, 109, 141, 213,
2932       	 78, 169, 108,  86, 244, 234, 101, 122, 174,   8, 186, 120,  37,  46,
2933        	 28, 166, 180, 198, 232, 221, 116,  31,  75, 189, 139, 138, 112,  62,
2934 	181, 102,  72,   3, 246,  14,  97,  53,  87, 185, 134, 193,  29, 158,
2935        	225, 248, 152,  17, 105, 217, 142, 148, 155,  30, 135, 233, 206,  85,
2936       	 40, 223, 140, 161, 137,  13, 191, 230,  66, 104,  65, 153,  45,  15,
2937 	176,  84, 187,  22
2938 };
2939 
2940 static uint32_t AES_RCON[30] = {
2941 	0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8,
2942        	0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4,
2943        	0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91
2944 };
2945 
2946 STATIC int
2947 mv_aes_ksched(uint8_t k[4][MAXKC], int keyBits,
2948     uint8_t W[MAXROUNDS+1][4][MAXBC])
2949 {
2950 	int KC, BC, ROUNDS;
2951 	int i, j, t, rconpointer = 0;
2952 	uint8_t tk[4][MAXKC];
2953 
2954 	switch (keyBits) {
2955 	case 128:
2956 		ROUNDS = 10;
2957 		KC = 4;
2958 		break;
2959 	case 192:
2960 		ROUNDS = 12;
2961 		KC = 6;
2962 	       	break;
2963 	case 256:
2964 		ROUNDS = 14;
2965 	       	KC = 8;
2966 	       	break;
2967 	default:
2968 	       	return (-1);
2969 	}
2970 	BC = 4; /* 128 bits */
2971 
2972 	for(j = 0; j < KC; j++)
2973 		for(i = 0; i < 4; i++)
2974 			tk[i][j] = k[i][j];
2975 	t = 0;
2976 
2977 	/* copy values into round key array */
2978 	for(j = 0; (j < KC) && (t < (ROUNDS+1)*BC); j++, t++)
2979 		for(i = 0; i < 4; i++) W[t / BC][i][t % BC] = tk[i][j];
2980 
2981 	while (t < (ROUNDS+1)*BC) { /* while not enough round key material calculated */
2982 		/* calculate new values */
2983 		for(i = 0; i < 4; i++)
2984 			tk[i][0] ^= AES_SBOX[tk[(i+1)%4][KC-1]];
2985 		tk[0][0] ^= AES_RCON[rconpointer++];
2986 
2987 		if (KC != 8)
2988 			for(j = 1; j < KC; j++)
2989 				for(i = 0; i < 4; i++)
2990 				       	tk[i][j] ^= tk[i][j-1];
2991 		else {
2992 			for(j = 1; j < KC/2; j++)
2993 				for(i = 0; i < 4; i++)
2994 				       	tk[i][j] ^= tk[i][j-1];
2995 			for(i = 0; i < 4; i++)
2996 			       	tk[i][KC/2] ^= AES_SBOX[tk[i][KC/2 - 1]];
2997 			for(j = KC/2 + 1; j < KC; j++)
2998 				for(i = 0; i < 4; i++)
2999 				       	tk[i][j] ^= tk[i][j-1];
3000 	}
3001 	/* copy values into round key array */
3002 	for(j = 0; (j < KC) && (t < (ROUNDS+1)*BC); j++, t++)
3003 		for(i = 0; i < 4; i++) W[t / BC][i][t % BC] = tk[i][j];
3004 	}
3005 
3006 	return 0;
3007 }
3008 
3009 STATIC int
3010 mv_aes_deckey(uint8_t *expandedKey, uint8_t *keyMaterial, int keyLen)
3011 {
3012 	uint8_t   W[MAXROUNDS+1][4][MAXBC];
3013 	uint8_t   k[4][MAXKC];
3014 	uint8_t   j;
3015 	int     i, rounds, KC;
3016 
3017 	if (expandedKey == NULL)
3018 		return -1;
3019 
3020 	if (!((keyLen == 128) || (keyLen == 192) || (keyLen == 256)))
3021 		return -1;
3022 
3023 	if (keyMaterial == NULL)
3024 		return -1;
3025 
3026 	/* initialize key schedule: */
3027 	for (i=0; i<keyLen/8; i++) {
3028 		j = keyMaterial[i];
3029 		k[i % 4][i / 4] = j;
3030 	}
3031 
3032 	mv_aes_ksched(k, keyLen, W);
3033 	switch (keyLen) {
3034 	case 128:
3035 		rounds = 10;
3036 		KC = 4;
3037 		break;
3038 	case 192:
3039 		rounds = 12;
3040 		KC = 6;
3041 		break;
3042 	case 256:
3043 		rounds = 14;
3044 		KC = 8;
3045 		break;
3046 	default:
3047 		return -1;
3048 	}
3049 
3050 	for(i=0; i<MAXBC; i++)
3051 		for(j=0; j<4; j++)
3052 			expandedKey[i*4+j] = W[rounds][j][i];
3053 	for(; i<KC; i++)
3054 		for(j=0; j<4; j++)
3055 			expandedKey[i*4+j] = W[rounds-1][j][i+MAXBC-KC];
3056 
3057 	return 0;
3058 }
3059 
3060 /*
3061  * Clear cipher/mac operation state
3062  */
3063 INLINE void
3064 mvxpsec_packet_reset_op(struct mvxpsec_packet *mv_p)
3065 {
3066 	mv_p->pkt_header.desc.acc_config = 0;
3067 	mv_p->enc_off = mv_p->enc_ivoff = mv_p->enc_len = 0;
3068 	mv_p->mac_off = mv_p->mac_dst = mv_p->mac_len = 0;
3069 }
3070 
3071 /*
3072  * update MVXPSEC operation order
3073  */
3074 INLINE void
3075 mvxpsec_packet_update_op_order(struct mvxpsec_packet *mv_p, int op)
3076 {
3077 	struct mvxpsec_acc_descriptor *acc_desc = &mv_p->pkt_header.desc;
3078 	uint32_t cur_op = acc_desc->acc_config & MV_ACC_CRYPTO_OP_MASK;
3079 
3080 	KASSERT(op == MV_ACC_CRYPTO_OP_MAC || op == MV_ACC_CRYPTO_OP_ENC);
3081 	KASSERT((op & MV_ACC_CRYPTO_OP_MASK) == op);
3082 
3083 	if (cur_op == 0)
3084 		acc_desc->acc_config |= op;
3085 	else if (cur_op == MV_ACC_CRYPTO_OP_MAC && op == MV_ACC_CRYPTO_OP_ENC) {
3086 		acc_desc->acc_config &= ~MV_ACC_CRYPTO_OP_MASK;
3087 		acc_desc->acc_config |= MV_ACC_CRYPTO_OP_MACENC;
3088 		/* MAC then ENC (= decryption) */
3089 	}
3090 	else if (cur_op == MV_ACC_CRYPTO_OP_ENC && op == MV_ACC_CRYPTO_OP_MAC) {
3091 		acc_desc->acc_config &= ~MV_ACC_CRYPTO_OP_MASK;
3092 		acc_desc->acc_config |= MV_ACC_CRYPTO_OP_ENCMAC;
3093 		/* ENC then MAC (= encryption) */
3094 	}
3095 	else {
3096 		log(LOG_ERR, "%s: multiple %s algorithm is not supported.\n",
3097 		    __func__,
3098 		    (op == MV_ACC_CRYPTO_OP_ENC) ?  "encryption" : "authentication");
3099 	}
3100 }
3101 
3102 /*
3103  * Parameter Conversions
3104  */
3105 INLINE uint32_t
3106 mvxpsec_alg2acc(uint32_t alg)
3107 {
3108 	uint32_t reg;
3109 
3110 	switch (alg) {
3111 	case CRYPTO_DES_CBC:
3112 		reg = MV_ACC_CRYPTO_ENC_DES;
3113 		reg |= MV_ACC_CRYPTO_CBC;
3114 		break;
3115 	case CRYPTO_3DES_CBC:
3116 		reg = MV_ACC_CRYPTO_ENC_3DES;
3117 		reg |= MV_ACC_CRYPTO_3DES_EDE;
3118 		reg |= MV_ACC_CRYPTO_CBC;
3119 		break;
3120 	case CRYPTO_AES_CBC:
3121 		reg = MV_ACC_CRYPTO_ENC_AES;
3122 		reg |= MV_ACC_CRYPTO_CBC;
3123 		break;
3124 	case CRYPTO_SHA1_HMAC_96:
3125 		reg = MV_ACC_CRYPTO_MAC_HMAC_SHA1;
3126 		reg |= MV_ACC_CRYPTO_MAC_96;
3127 		break;
3128 	case CRYPTO_MD5_HMAC_96:
3129 		reg = MV_ACC_CRYPTO_MAC_HMAC_MD5;
3130 		reg |= MV_ACC_CRYPTO_MAC_96;
3131 		break;
3132 	default:
3133 		reg = 0;
3134 		break;
3135 	}
3136 
3137 	return reg;
3138 }
3139 
3140 INLINE uint32_t
3141 mvxpsec_aesklen(int klen)
3142 {
3143 	if (klen < 128)
3144 		return 0;
3145 	else if (klen < 192)
3146 		return MV_ACC_CRYPTO_AES_KLEN_128;
3147 	else if (klen < 256)
3148 		return MV_ACC_CRYPTO_AES_KLEN_192;
3149 	else
3150 		return MV_ACC_CRYPTO_AES_KLEN_256;
3151 
3152 	return 0;
3153 }
3154 
3155 /*
3156  * String Conversions
3157  */
3158 STATIC const char *
3159 s_errreg(uint32_t v)
3160 {
3161 	static char buf[80];
3162 
3163 	snprintf(buf, sizeof(buf),
3164 	    "%sMiss %sDoubleHit %sBothHit %sDataError",
3165 	    (v & MV_TDMA_ERRC_MISS) ? "+" : "-",
3166 	    (v & MV_TDMA_ERRC_DHIT) ? "+" : "-",
3167 	    (v & MV_TDMA_ERRC_BHIT) ? "+" : "-",
3168 	    (v & MV_TDMA_ERRC_DERR) ? "+" : "-");
3169 
3170 	return (const char *)buf;
3171 }
3172 
3173 STATIC const char *
3174 s_winreg(uint32_t v)
3175 {
3176 	static char buf[80];
3177 
3178 	snprintf(buf, sizeof(buf),
3179 	    "%s TGT 0x%x ATTR 0x%02x size %u(0x%04x)[64KB]",
3180 	    (v & MV_TDMA_ATTR_ENABLE) ? "EN" : "DIS",
3181 	    MV_TDMA_ATTR_GET_TARGET(v), MV_TDMA_ATTR_GET_ATTR(v),
3182 	    MV_TDMA_ATTR_GET_SIZE(v), MV_TDMA_ATTR_GET_SIZE(v));
3183 
3184 	return (const char *)buf;
3185 }
3186 
3187 STATIC const char *
3188 s_ctrlreg(uint32_t reg)
3189 {
3190 	static char buf[80];
3191 
3192 	snprintf(buf, sizeof(buf),
3193 	    "%s: %sFETCH DBURST-%u SBURST-%u %sOUTS %sCHAIN %sBSWAP %sACT",
3194 	    (reg & MV_TDMA_CONTROL_ENABLE) ? "ENABLE" : "DISABLE",
3195 	    (reg & MV_TDMA_CONTROL_FETCH) ? "+" : "-",
3196 	    MV_TDMA_CONTROL_GET_DST_BURST(reg),
3197 	    MV_TDMA_CONTROL_GET_SRC_BURST(reg),
3198 	    (reg & MV_TDMA_CONTROL_OUTS_EN) ? "+" : "-",
3199 	    (reg & MV_TDMA_CONTROL_CHAIN_DIS) ? "-" : "+",
3200 	    (reg & MV_TDMA_CONTROL_BSWAP_DIS) ? "-" : "+",
3201 	    (reg & MV_TDMA_CONTROL_ACT) ? "+" : "-");
3202 
3203 	return (const char *)buf;
3204 }
3205 
3206 _STATIC const char *
3207 s_xpsecintr(uint32_t v)
3208 {
3209 	static char buf[160];
3210 
3211 	snprintf(buf, sizeof(buf),
3212 	    "%sAuth %sDES %sAES-ENC %sAES-DEC %sENC %sSA %sAccAndTDMA "
3213 	    "%sTDMAComp %sTDMAOwn %sAccAndTDMA_Cont",
3214 	    (v & MVXPSEC_INT_AUTH) ? "+" : "-",
3215 	    (v & MVXPSEC_INT_DES) ? "+" : "-",
3216 	    (v & MVXPSEC_INT_AES_ENC) ? "+" : "-",
3217 	    (v & MVXPSEC_INT_AES_DEC) ? "+" : "-",
3218 	    (v & MVXPSEC_INT_ENC) ? "+" : "-",
3219 	    (v & MVXPSEC_INT_SA) ? "+" : "-",
3220 	    (v & MVXPSEC_INT_ACCTDMA) ? "+" : "-",
3221 	    (v & MVXPSEC_INT_TDMA_COMP) ? "+" : "-",
3222 	    (v & MVXPSEC_INT_TDMA_OWN) ? "+" : "-",
3223 	    (v & MVXPSEC_INT_ACCTDMA_CONT) ? "+" : "-");
3224 
3225 	return (const char *)buf;
3226 }
3227 
3228 STATIC const char *
3229 s_ctlalg(uint32_t alg)
3230 {
3231 	switch (alg) {
3232 	case CRYPTO_SHA1_HMAC_96:
3233 		return "HMAC-SHA1-96";
3234 	case CRYPTO_SHA1_HMAC:
3235 		return "HMAC-SHA1";
3236 	case CRYPTO_SHA1:
3237 		return "SHA1";
3238 	case CRYPTO_MD5_HMAC_96:
3239 		return "HMAC-MD5-96";
3240 	case CRYPTO_MD5_HMAC:
3241 		return "HMAC-MD5";
3242 	case CRYPTO_MD5:
3243 		return "MD5";
3244 	case CRYPTO_DES_CBC:
3245 		return "DES-CBC";
3246 	case CRYPTO_3DES_CBC:
3247 		return "3DES-CBC";
3248 	case CRYPTO_AES_CBC:
3249 		return "AES-CBC";
3250 	default:
3251 		break;
3252 	}
3253 
3254 	return "Unknown";
3255 }
3256 
3257 STATIC const char *
3258 s_xpsec_op(uint32_t reg)
3259 {
3260 	reg &= MV_ACC_CRYPTO_OP_MASK;
3261 	switch (reg) {
3262 	case MV_ACC_CRYPTO_OP_ENC:
3263 		return "ENC";
3264 	case MV_ACC_CRYPTO_OP_MAC:
3265 		return "MAC";
3266 	case MV_ACC_CRYPTO_OP_ENCMAC:
3267 		return "ENC-MAC";
3268 	case MV_ACC_CRYPTO_OP_MACENC:
3269 		return "MAC-ENC";
3270 	default:
3271 		break;
3272 	}
3273 
3274 	return "Unknown";
3275 }
3276 
3277 STATIC const char *
3278 s_xpsec_enc(uint32_t alg)
3279 {
3280 	alg <<= MV_ACC_CRYPTO_ENC_SHIFT;
3281 	switch (alg) {
3282 	case MV_ACC_CRYPTO_ENC_DES:
3283 		return "DES";
3284 	case MV_ACC_CRYPTO_ENC_3DES:
3285 		return "3DES";
3286 	case MV_ACC_CRYPTO_ENC_AES:
3287 		return "AES";
3288 	default:
3289 		break;
3290 	}
3291 
3292 	return "Unknown";
3293 }
3294 
3295 STATIC const char *
3296 s_xpsec_mac(uint32_t alg)
3297 {
3298 	alg <<= MV_ACC_CRYPTO_MAC_SHIFT;
3299 	switch (alg) {
3300 	case MV_ACC_CRYPTO_MAC_NONE:
3301 		return "Disabled";
3302 	case MV_ACC_CRYPTO_MAC_MD5:
3303 		return "MD5";
3304 	case MV_ACC_CRYPTO_MAC_SHA1:
3305 		return "SHA1";
3306 	case MV_ACC_CRYPTO_MAC_HMAC_MD5:
3307 		return "HMAC-MD5";
3308 	case MV_ACC_CRYPTO_MAC_HMAC_SHA1:
3309 		return "HMAC-SHA1";
3310 	default:
3311 		break;
3312 	}
3313 
3314 	return "Unknown";
3315 }
3316 
3317 STATIC const char *
3318 s_xpsec_frag(uint32_t frag)
3319 {
3320 	frag <<= MV_ACC_CRYPTO_FRAG_SHIFT;
3321 	switch (frag) {
3322 	case MV_ACC_CRYPTO_NOFRAG:
3323 		return "NoFragment";
3324 	case MV_ACC_CRYPTO_FRAG_FIRST:
3325 		return "FirstFragment";
3326 	case MV_ACC_CRYPTO_FRAG_MID:
3327 		return "MiddleFragment";
3328 	case MV_ACC_CRYPTO_FRAG_LAST:
3329 		return "LastFragment";
3330 	default:
3331 		break;
3332 	}
3333 
3334 	return "Unknown";
3335 }
3336 
3337 #ifdef MVXPSEC_DEBUG
3338 void
3339 mvxpsec_dump_reg(struct mvxpsec_softc *sc)
3340 {
3341 	uint32_t reg;
3342 	int i;
3343 
3344 	if ((mvxpsec_debug & MVXPSEC_DEBUG_DESC) == 0)
3345 		return;
3346 
3347 	printf("--- Interrupt Registers ---\n");
3348 	reg = MVXPSEC_READ(sc, MVXPSEC_INT_CAUSE);
3349 	printf("MVXPSEC INT CAUSE: 0x%08x\n", reg);
3350 	printf("MVXPSEC INT CAUSE: %s\n", s_xpsecintr(reg));
3351 	reg = MVXPSEC_READ(sc, MVXPSEC_INT_MASK);
3352 	printf("MVXPSEC INT MASK: 0x%08x\n", reg);
3353 	printf("MVXPSEC INT MASKE: %s\n", s_xpsecintr(reg));
3354 
3355 	printf("--- DMA Configuration Registers ---\n");
3356 	for (i = 0; i < MV_TDMA_NWINDOW; i++) {
3357 		reg = MVXPSEC_READ(sc, MV_TDMA_BAR(i));
3358 		printf("TDMA BAR%d: 0x%08x\n", i, reg);
3359 		reg = MVXPSEC_READ(sc, MV_TDMA_ATTR(i));
3360 		printf("TDMA ATTR%d: 0x%08x\n", i, reg);
3361 		printf("  -> %s\n", s_winreg(reg));
3362 	}
3363 
3364 	printf("--- DMA Control Registers ---\n");
3365 
3366 	reg = MVXPSEC_READ(sc, MV_TDMA_CONTROL);
3367 	printf("TDMA CONTROL: 0x%08x\n", reg);
3368 	printf("  -> %s\n", s_ctrlreg(reg));
3369 
3370 	printf("--- DMA Current Command Descriptors ---\n");
3371 
3372 	reg = MVXPSEC_READ(sc, MV_TDMA_ERR_CAUSE);
3373 	printf("TDMA ERR CAUSE: 0x%08x\n", reg);
3374 
3375 	reg = MVXPSEC_READ(sc, MV_TDMA_ERR_MASK);
3376 	printf("TDMA ERR MASK: 0x%08x\n", reg);
3377 
3378 	reg = MVXPSEC_READ(sc, MV_TDMA_CNT);
3379 	printf("TDMA DATA OWNER: %s\n",
3380 	    (reg & MV_TDMA_CNT_OWN) ? "DMAC" : "CPU");
3381 	printf("TDMA DATA COUNT: %d(0x%x)\n",
3382 	    (reg & ~MV_TDMA_CNT_OWN), (reg & ~MV_TDMA_CNT_OWN));
3383 
3384 	reg = MVXPSEC_READ(sc, MV_TDMA_SRC);
3385 	printf("TDMA DATA SRC: 0x%08x\n", reg);
3386 
3387 	reg = MVXPSEC_READ(sc, MV_TDMA_DST);
3388 	printf("TDMA DATA DST: 0x%08x\n", reg);
3389 
3390 	reg = MVXPSEC_READ(sc, MV_TDMA_NXT);
3391 	printf("TDMA DATA NXT: 0x%08x\n", reg);
3392 
3393 	reg = MVXPSEC_READ(sc, MV_TDMA_CUR);
3394 	printf("TDMA DATA CUR: 0x%08x\n", reg);
3395 
3396 	printf("--- ACC Command Register ---\n");
3397 	reg = MVXPSEC_READ(sc, MV_ACC_COMMAND);
3398 	printf("ACC COMMAND: 0x%08x\n", reg);
3399 	printf("ACC: %sACT %sSTOP\n",
3400 	    (reg & MV_ACC_COMMAND_ACT) ? "+" : "-",
3401 	    (reg & MV_ACC_COMMAND_STOP) ? "+" : "-");
3402 
3403 	reg = MVXPSEC_READ(sc, MV_ACC_CONFIG);
3404 	printf("ACC CONFIG: 0x%08x\n", reg);
3405 	reg = MVXPSEC_READ(sc, MV_ACC_DESC);
3406 	printf("ACC DESC: 0x%08x\n", reg);
3407 
3408 	printf("--- DES Key Register ---\n");
3409 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY0L);
3410 	printf("DES KEY0  Low: 0x%08x\n", reg);
3411 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY0H);
3412 	printf("DES KEY0 High: 0x%08x\n", reg);
3413 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY1L);
3414 	printf("DES KEY1  Low: 0x%08x\n", reg);
3415 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY1H);
3416 	printf("DES KEY1 High: 0x%08x\n", reg);
3417 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY2L);
3418 	printf("DES KEY2  Low: 0x%08x\n", reg);
3419 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY2H);
3420 	printf("DES KEY2 High: 0x%08x\n", reg);
3421 
3422 	printf("--- AES Key Register ---\n");
3423 	for (i = 0; i < 8; i++) {
3424 		reg = MVXPSEC_READ(sc, MV_CE_AES_EKEY(i));
3425 		printf("AES ENC KEY COL%d: %08x\n", i, reg);
3426 	}
3427 	for (i = 0; i < 8; i++) {
3428 		reg = MVXPSEC_READ(sc, MV_CE_AES_DKEY(i));
3429 		printf("AES DEC KEY COL%d: %08x\n", i, reg);
3430 	}
3431 
3432 	return;
3433 }
3434 
3435 STATIC void
3436 mvxpsec_dump_sram(const char *name, struct mvxpsec_softc *sc, size_t len)
3437 {
3438 	uint32_t reg;
3439 
3440 	if (sc->sc_sram_va == NULL)
3441 		return;
3442 
3443 	if (len == 0) {
3444 		printf("\n%s NO DATA(len=0)\n", name);
3445 		return;
3446 	}
3447 	else if (len > MV_ACC_SRAM_SIZE)
3448 		len = MV_ACC_SRAM_SIZE;
3449 
3450 	mutex_enter(&sc->sc_dma_mtx);
3451 	reg = MVXPSEC_READ(sc, MV_TDMA_CONTROL);
3452 	if (reg & MV_TDMA_CONTROL_ACT) {
3453 		printf("TDMA is active, cannot access SRAM\n");
3454 		mutex_exit(&sc->sc_dma_mtx);
3455 		return;
3456 	}
3457 	reg = MVXPSEC_READ(sc, MV_ACC_COMMAND);
3458 	if (reg & MV_ACC_COMMAND_ACT) {
3459 		printf("SA is active, cannot access SRAM\n");
3460 		mutex_exit(&sc->sc_dma_mtx);
3461 		return;
3462 	}
3463 
3464 	printf("%s: dump SRAM, %zu bytes\n", name, len);
3465 	mvxpsec_dump_data(name, sc->sc_sram_va, len);
3466 	mutex_exit(&sc->sc_dma_mtx);
3467 	return;
3468 }
3469 
3470 
3471 _STATIC void
3472 mvxpsec_dump_dmaq(struct mvxpsec_descriptor_handle *dh)
3473 {
3474 	struct mvxpsec_descriptor *d =
3475            (struct mvxpsec_descriptor *)dh->_desc;
3476 
3477 	printf("--- DMA Command Descriptor ---\n");
3478 	printf("DESC: VA=%p PA=0x%08x\n",
3479 	    d, (uint32_t)dh->phys_addr);
3480 	printf("DESC: WORD0 = 0x%08x\n", d->tdma_word0);
3481 	printf("DESC: SRC = 0x%08x\n", d->tdma_src);
3482 	printf("DESC: DST = 0x%08x\n", d->tdma_dst);
3483 	printf("DESC: NXT = 0x%08x\n", d->tdma_nxt);
3484 
3485 	return;
3486 }
3487 
3488 STATIC void
3489 mvxpsec_dump_data(const char *name, void *p, size_t len)
3490 {
3491 	uint8_t *data = p;
3492 	off_t off;
3493 
3494 	printf("%s: dump %p, %zu bytes", name, p, len);
3495 	if (p == NULL || len == 0) {
3496 		printf("\n%s: NO DATA\n", name);
3497 		return;
3498 	}
3499 	for (off = 0; off < len; off++) {
3500 		if ((off % 16) == 0) {
3501 			printf("\n%s: 0x%08x:", name, (uint32_t)off);
3502 		}
3503 		if ((off % 4) == 0) {
3504 			printf(" ");
3505 		}
3506 		printf("%02x", data[off]);
3507 	}
3508 	printf("\n");
3509 
3510 	return;
3511 }
3512 
3513 _STATIC void
3514 mvxpsec_dump_packet(const char *name, struct mvxpsec_packet *mv_p)
3515 {
3516 	struct mvxpsec_softc *sc = mv_p->mv_s->sc;
3517 
3518 	printf("%s: packet_data:\n", name);
3519 	mvxpsec_dump_packet_data(name, mv_p);
3520 
3521 	printf("%s: SRAM:\n", name);
3522 	mvxpsec_dump_sram(name, sc, 2000);
3523 
3524 	printf("%s: packet_descriptor:\n", name);
3525 	mvxpsec_dump_packet_desc(name, mv_p);
3526 }
3527 
3528 _STATIC void
3529 mvxpsec_dump_packet_data(const char *name, struct mvxpsec_packet *mv_p)
3530 {
3531 	static char buf[1500];
3532 	int len;
3533 
3534 	if (mv_p->data_type == MVXPSEC_DATA_MBUF) {
3535 		struct mbuf *m;
3536 
3537 		m = mv_p->data.mbuf;
3538 		len = m->m_pkthdr.len;
3539 		if (len > sizeof(buf))
3540 			len = sizeof(buf);
3541 		m_copydata(m, 0, len, buf);
3542 	}
3543 	else if (mv_p->data_type == MVXPSEC_DATA_UIO) {
3544 		struct uio *uio;
3545 
3546 		uio = mv_p->data.uio;
3547 		len = uio->uio_resid;
3548 		if (len > sizeof(buf))
3549 			len = sizeof(buf);
3550 		cuio_copydata(uio, 0, len, buf);
3551 	}
3552 	else if (mv_p->data_type == MVXPSEC_DATA_RAW) {
3553 		len = mv_p->data_len;
3554 		if (len > sizeof(buf))
3555 			len = sizeof(buf);
3556 		memcpy(buf, mv_p->data.raw, len);
3557 	}
3558 	else
3559 		return;
3560 	mvxpsec_dump_data(name, buf, len);
3561 
3562 	return;
3563 }
3564 
3565 _STATIC void
3566 mvxpsec_dump_packet_desc(const char *name, struct mvxpsec_packet *mv_p)
3567 {
3568 	uint32_t *words;
3569 
3570 	if (mv_p == NULL)
3571 		return;
3572 
3573 	words = &mv_p->pkt_header.desc.acc_desc_dword0;
3574 	mvxpsec_dump_acc_config(name, words[0]);
3575 	mvxpsec_dump_acc_encdata(name, words[1], words[2]);
3576 	mvxpsec_dump_acc_enclen(name, words[2]);
3577 	mvxpsec_dump_acc_enckey(name, words[3]);
3578 	mvxpsec_dump_acc_enciv(name, words[4]);
3579 	mvxpsec_dump_acc_macsrc(name, words[5]);
3580 	mvxpsec_dump_acc_macdst(name, words[6]);
3581 	mvxpsec_dump_acc_maciv(name, words[7]);
3582 
3583 	return;
3584 }
3585 
3586 _STATIC void
3587 mvxpsec_dump_acc_config(const char *name, uint32_t w)
3588 {
3589 	/* SA: Dword 0 */
3590 	printf("%s: Dword0=0x%08x\n", name, w);
3591 	printf("%s:   OP = %s\n", name,
3592 	    s_xpsec_op(MV_ACC_CRYPTO_OP(w)));
3593 	printf("%s:   MAC = %s\n", name,
3594 	    s_xpsec_mac(MV_ACC_CRYPTO_MAC(w)));
3595 	printf("%s:   MAC_LEN = %s\n", name,
3596 	    w & MV_ACC_CRYPTO_MAC_96 ? "96-bit" : "full-bit");
3597 	printf("%s:   ENC = %s\n", name,
3598 	    s_xpsec_enc(MV_ACC_CRYPTO_ENC(w)));
3599 	printf("%s:   DIR = %s\n", name,
3600 	    w & MV_ACC_CRYPTO_DECRYPT ? "decryption" : "encryption");
3601 	printf("%s:   CHAIN = %s\n", name,
3602 	    w & MV_ACC_CRYPTO_CBC ? "CBC" : "ECB");
3603 	printf("%s:   3DES = %s\n", name,
3604 	    w & MV_ACC_CRYPTO_3DES_EDE ? "EDE" : "EEE");
3605 	printf("%s:   FRAGMENT = %s\n", name,
3606 	    s_xpsec_frag(MV_ACC_CRYPTO_FRAG(w)));
3607 	return;
3608 }
3609 
3610 STATIC void
3611 mvxpsec_dump_acc_encdata(const char *name, uint32_t w, uint32_t w2)
3612 {
3613 	/* SA: Dword 1 */
3614 	printf("%s: Dword1=0x%08x\n", name, w);
3615 	printf("%s:   ENC SRC = 0x%x\n", name, MV_ACC_DESC_GET_VAL_1(w));
3616 	printf("%s:   ENC DST = 0x%x\n", name, MV_ACC_DESC_GET_VAL_2(w));
3617 	printf("%s:   ENC RANGE = 0x%x - 0x%x\n", name,
3618 	    MV_ACC_DESC_GET_VAL_1(w),
3619 	    MV_ACC_DESC_GET_VAL_1(w) + MV_ACC_DESC_GET_VAL_1(w2) - 1);
3620 	return;
3621 }
3622 
3623 STATIC void
3624 mvxpsec_dump_acc_enclen(const char *name, uint32_t w)
3625 {
3626 	/* SA: Dword 2 */
3627 	printf("%s: Dword2=0x%08x\n", name, w);
3628 	printf("%s:   ENC LEN = %d\n", name,
3629 	    MV_ACC_DESC_GET_VAL_1(w));
3630 	return;
3631 }
3632 
3633 STATIC void
3634 mvxpsec_dump_acc_enckey(const char *name, uint32_t w)
3635 {
3636 	/* SA: Dword 3 */
3637 	printf("%s: Dword3=0x%08x\n", name, w);
3638 	printf("%s:   EKEY = 0x%x\n", name,
3639 	    MV_ACC_DESC_GET_VAL_1(w));
3640 	return;
3641 }
3642 
3643 STATIC void
3644 mvxpsec_dump_acc_enciv(const char *name, uint32_t w)
3645 {
3646 	/* SA: Dword 4 */
3647 	printf("%s: Dword4=0x%08x\n", name, w);
3648 	printf("%s:   EIV = 0x%x\n", name, MV_ACC_DESC_GET_VAL_1(w));
3649 	printf("%s:   EIV_BUF = 0x%x\n", name, MV_ACC_DESC_GET_VAL_2(w));
3650 	return;
3651 }
3652 
3653 STATIC void
3654 mvxpsec_dump_acc_macsrc(const char *name, uint32_t w)
3655 {
3656 	/* SA: Dword 5 */
3657 	printf("%s: Dword5=0x%08x\n", name, w);
3658 	printf("%s:   MAC_SRC = 0x%x\n", name,
3659 	    MV_ACC_DESC_GET_VAL_1(w));
3660 	printf("%s:   MAC_TOTAL_LEN = %d\n", name,
3661 	    MV_ACC_DESC_GET_VAL_3(w));
3662 	printf("%s:   MAC_RANGE = 0x%0x - 0x%0x\n", name,
3663 	    MV_ACC_DESC_GET_VAL_1(w),
3664 	    MV_ACC_DESC_GET_VAL_1(w) + MV_ACC_DESC_GET_VAL_3(w) - 1);
3665 	return;
3666 }
3667 
3668 STATIC void
3669 mvxpsec_dump_acc_macdst(const char *name, uint32_t w)
3670 {
3671 	/* SA: Dword 6 */
3672 	printf("%s: Dword6=0x%08x\n", name, w);
3673 	printf("%s:   MAC_DST = 0x%x\n", name, MV_ACC_DESC_GET_VAL_1(w));
3674 	printf("%s:   MAC_BLOCK_LEN = %d\n", name,
3675 	    MV_ACC_DESC_GET_VAL_2(w));
3676 	return;
3677 }
3678 
3679 STATIC void
3680 mvxpsec_dump_acc_maciv(const char *name, uint32_t w)
3681 {
3682 	/* SA: Dword 7 */
3683 	printf("%s: Dword7=0x%08x\n", name, w);
3684 	printf("%s:   MAC_INNER_IV = 0x%x\n", name,
3685 	    MV_ACC_DESC_GET_VAL_1(w));
3686 	printf("%s:   MAC_OUTER_IV = 0x%x\n", name,
3687 	    MV_ACC_DESC_GET_VAL_2(w));
3688 	return;
3689 }
3690 #endif
3691