xref: /netbsd-src/sys/dev/marvell/mvxpsec.c (revision 181254a7b1bdde6873432bffef2d2decc4b5c22f)
1 /*	$NetBSD: mvxpsec.c,v 1.7 2020/07/25 22:37:48 riastradh Exp $	*/
2 /*
3  * Copyright (c) 2015 Internet Initiative Japan Inc.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #ifdef _KERNEL_OPT
29 #include "opt_ipsec.h"
30 #endif
31 
32 /*
33  * Cryptographic Engine and Security Accelerator(MVXPSEC)
34  */
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/types.h>
38 #include <sys/kernel.h>
39 #include <sys/queue.h>
40 #include <sys/conf.h>
41 #include <sys/proc.h>
42 #include <sys/bus.h>
43 #include <sys/evcnt.h>
44 #include <sys/device.h>
45 #include <sys/endian.h>
46 #include <sys/errno.h>
47 #include <sys/kmem.h>
48 #include <sys/mbuf.h>
49 #include <sys/callout.h>
50 #include <sys/pool.h>
51 #include <sys/cprng.h>
52 #include <sys/syslog.h>
53 #include <sys/mutex.h>
54 #include <sys/kthread.h>
55 #include <sys/atomic.h>
56 #include <sys/sha1.h>
57 #include <sys/md5.h>
58 
59 #include <uvm/uvm_extern.h>
60 
61 #include <opencrypto/cryptodev.h>
62 #include <opencrypto/xform.h>
63 
64 #include <net/net_stats.h>
65 
66 #include <netinet/in_systm.h>
67 #include <netinet/in.h>
68 #include <netinet/ip.h>
69 #include <netinet/ip6.h>
70 
71 #if NIPSEC > 0
72 #include <netipsec/esp_var.h>
73 #endif
74 
75 #include <arm/cpufunc.h>
76 #include <arm/marvell/mvsocvar.h>
77 #include <arm/marvell/armadaxpreg.h>
78 #include <dev/marvell/marvellreg.h>
79 #include <dev/marvell/marvellvar.h>
80 #include <dev/marvell/mvxpsecreg.h>
81 #include <dev/marvell/mvxpsecvar.h>
82 
83 #ifdef DEBUG
84 #define STATIC __attribute__ ((noinline)) extern
85 #define _STATIC __attribute__ ((noinline)) extern
86 #define INLINE __attribute__ ((noinline)) extern
87 #define _INLINE __attribute__ ((noinline)) extern
88 #else
89 #define STATIC static
90 #define _STATIC __attribute__ ((unused)) static
91 #define INLINE static inline
92 #define _INLINE __attribute__ ((unused)) static inline
93 #endif
94 
95 /*
96  * IRQ and SRAM spaces for each of unit
97  * XXX: move to attach_args
98  */
99 struct {
100 	int		err_int;
101 } mvxpsec_config[] = {
102 	{ .err_int = ARMADAXP_IRQ_CESA0_ERR, }, /* unit 0 */
103 	{ .err_int = ARMADAXP_IRQ_CESA1_ERR, }, /* unit 1 */
104 };
105 #define MVXPSEC_ERR_INT(sc) \
106     mvxpsec_config[device_unit((sc)->sc_dev)].err_int
107 
108 /*
109  * AES
110  */
111 #define MAXBC				(128/32)
112 #define MAXKC				(256/32)
113 #define MAXROUNDS			14
114 STATIC int mv_aes_ksched(uint8_t[4][MAXKC], int,
115     uint8_t[MAXROUNDS+1][4][MAXBC]);
116 STATIC int mv_aes_deckey(uint8_t *, uint8_t *, int);
117 
118 /*
119  * device driver autoconf interface
120  */
121 STATIC int mvxpsec_match(device_t, cfdata_t, void *);
122 STATIC void mvxpsec_attach(device_t, device_t, void *);
123 STATIC void mvxpsec_evcnt_attach(struct mvxpsec_softc *);
124 
125 /*
126  * register setup
127  */
128 STATIC int mvxpsec_wininit(struct mvxpsec_softc *, enum marvell_tags *);
129 
130 /*
131  * timer(callout) interface
132  *
133  * XXX: callout is not MP safe...
134  */
135 STATIC void mvxpsec_timer(void *);
136 
137 /*
138  * interrupt interface
139  */
140 STATIC int mvxpsec_intr(void *);
141 INLINE void mvxpsec_intr_cleanup(struct mvxpsec_softc *);
142 STATIC int mvxpsec_eintr(void *);
143 STATIC uint32_t mvxpsec_intr_ack(struct mvxpsec_softc *);
144 STATIC uint32_t mvxpsec_eintr_ack(struct mvxpsec_softc *);
145 INLINE void mvxpsec_intr_cnt(struct mvxpsec_softc *, int);
146 
147 /*
148  * memory allocators and VM management
149  */
150 STATIC struct mvxpsec_devmem *mvxpsec_alloc_devmem(struct mvxpsec_softc *,
151     paddr_t, int);
152 STATIC int mvxpsec_init_sram(struct mvxpsec_softc *);
153 
154 /*
155  * Low-level DMA interface
156  */
157 STATIC int mvxpsec_init_dma(struct mvxpsec_softc *,
158     struct marvell_attach_args *);
159 INLINE int mvxpsec_dma_wait(struct mvxpsec_softc *);
160 INLINE int mvxpsec_acc_wait(struct mvxpsec_softc *);
161 INLINE struct mvxpsec_descriptor_handle *mvxpsec_dma_getdesc(struct mvxpsec_softc *);
162 _INLINE void mvxpsec_dma_putdesc(struct mvxpsec_softc *, struct mvxpsec_descriptor_handle *);
163 INLINE void mvxpsec_dma_setup(struct mvxpsec_descriptor_handle *,
164     uint32_t, uint32_t, uint32_t);
165 INLINE void mvxpsec_dma_cat(struct mvxpsec_softc *,
166     struct mvxpsec_descriptor_handle *, struct mvxpsec_descriptor_handle *);
167 
168 /*
169  * High-level DMA interface
170  */
171 INLINE int mvxpsec_dma_copy0(struct mvxpsec_softc *,
172     mvxpsec_dma_ring *, uint32_t, uint32_t, uint32_t);
173 INLINE int mvxpsec_dma_copy(struct mvxpsec_softc *,
174     mvxpsec_dma_ring *, uint32_t, uint32_t, uint32_t);
175 INLINE int mvxpsec_dma_acc_activate(struct mvxpsec_softc *,
176     mvxpsec_dma_ring *);
177 INLINE void mvxpsec_dma_finalize(struct mvxpsec_softc *,
178     mvxpsec_dma_ring *);
179 INLINE void mvxpsec_dma_free(struct mvxpsec_softc *,
180     mvxpsec_dma_ring *);
181 INLINE int mvxpsec_dma_copy_packet(struct mvxpsec_softc *, struct mvxpsec_packet *);
182 INLINE int mvxpsec_dma_sync_packet(struct mvxpsec_softc *, struct mvxpsec_packet *);
183 
184 /*
185  * Session management interface (OpenCrypto)
186  */
187 #define MVXPSEC_SESSION(sid)	((sid) & 0x0fffffff)
188 #define MVXPSEC_SID(crd, sesn)	(((crd) << 28) | ((sesn) & 0x0fffffff))
189 /* pool management */
190 STATIC int mvxpsec_session_ctor(void *, void *, int);
191 STATIC void mvxpsec_session_dtor(void *, void *);
192 STATIC int mvxpsec_packet_ctor(void *, void *, int);
193 STATIC void mvxpsec_packet_dtor(void *, void *);
194 
195 /* session management */
196 STATIC struct mvxpsec_session *mvxpsec_session_alloc(struct mvxpsec_softc *);
197 STATIC void mvxpsec_session_dealloc(struct mvxpsec_session *);
198 INLINE struct mvxpsec_session *mvxpsec_session_lookup(struct mvxpsec_softc *, int);
199 INLINE int mvxpsec_session_ref(struct mvxpsec_session *);
200 INLINE void mvxpsec_session_unref(struct mvxpsec_session *);
201 
202 /* packet management */
203 STATIC struct mvxpsec_packet *mvxpsec_packet_alloc(struct mvxpsec_session *);
204 INLINE void mvxpsec_packet_enqueue(struct mvxpsec_packet *);
205 STATIC void mvxpsec_packet_dealloc(struct mvxpsec_packet *);
206 STATIC int mvxpsec_done_packet(struct mvxpsec_packet *);
207 
208 /* session header manegement */
209 STATIC int mvxpsec_header_finalize(struct mvxpsec_packet *);
210 
211 /* packet queue management */
212 INLINE void mvxpsec_drop(struct mvxpsec_softc *, struct cryptop *, struct mvxpsec_packet *, int);
213 STATIC int mvxpsec_dispatch_queue(struct mvxpsec_softc *);
214 
215 /* opencrypto operation */
216 INLINE int mvxpsec_parse_crd(struct mvxpsec_packet *, struct cryptodesc *);
217 INLINE int mvxpsec_parse_crp(struct mvxpsec_packet *);
218 
219 /* payload data management */
220 INLINE int mvxpsec_packet_setcrp(struct mvxpsec_packet *, struct cryptop *);
221 STATIC int mvxpsec_packet_setdata(struct mvxpsec_packet *, void *, uint32_t);
222 STATIC int mvxpsec_packet_setmbuf(struct mvxpsec_packet *, struct mbuf *);
223 STATIC int mvxpsec_packet_setuio(struct mvxpsec_packet *, struct uio *);
224 STATIC int mvxpsec_packet_rdata(struct mvxpsec_packet *, int, int, void *);
225 _STATIC int mvxpsec_packet_wdata(struct mvxpsec_packet *, int, int, void *);
226 STATIC int mvxpsec_packet_write_iv(struct mvxpsec_packet *, void *, int);
227 STATIC int mvxpsec_packet_copy_iv(struct mvxpsec_packet *, int, int);
228 
229 /* key pre-computation */
230 STATIC int mvxpsec_key_precomp(int, void *, int, void *, void *);
231 STATIC int mvxpsec_hmac_precomp(int, void *, int, void *, void *);
232 
233 /* crypto operation management */
234 INLINE void mvxpsec_packet_reset_op(struct mvxpsec_packet *);
235 INLINE void mvxpsec_packet_update_op_order(struct mvxpsec_packet *, int);
236 
237 /*
238  * parameter converters
239  */
240 INLINE uint32_t mvxpsec_alg2acc(uint32_t alg);
241 INLINE uint32_t mvxpsec_aesklen(int klen);
242 
243 /*
244  * string formatters
245  */
246 _STATIC const char *s_ctrlreg(uint32_t);
247 _STATIC const char *s_winreg(uint32_t);
248 _STATIC const char *s_errreg(uint32_t);
249 _STATIC const char *s_xpsecintr(uint32_t);
250 _STATIC const char *s_ctlalg(uint32_t);
251 _STATIC const char *s_xpsec_op(uint32_t);
252 _STATIC const char *s_xpsec_enc(uint32_t);
253 _STATIC const char *s_xpsec_mac(uint32_t);
254 _STATIC const char *s_xpsec_frag(uint32_t);
255 
256 /*
257  * debugging supports
258  */
259 #ifdef MVXPSEC_DEBUG
260 _STATIC void mvxpsec_dump_dmaq(struct mvxpsec_descriptor_handle *);
261 _STATIC void mvxpsec_dump_reg(struct mvxpsec_softc *);
262 _STATIC void mvxpsec_dump_sram(const char *, struct mvxpsec_softc *, size_t);
263 _STATIC void mvxpsec_dump_data(const char *, void *, size_t);
264 
265 _STATIC void mvxpsec_dump_packet(const char *, struct mvxpsec_packet *);
266 _STATIC void mvxpsec_dump_packet_data(const char *, struct mvxpsec_packet *);
267 _STATIC void mvxpsec_dump_packet_desc(const char *, struct mvxpsec_packet *);
268 
269 _STATIC void mvxpsec_dump_acc_config(const char *, uint32_t);
270 _STATIC void mvxpsec_dump_acc_encdata(const char *, uint32_t, uint32_t);
271 _STATIC void mvxpsec_dump_acc_enclen(const char *, uint32_t);
272 _STATIC void mvxpsec_dump_acc_enckey(const char *, uint32_t);
273 _STATIC void mvxpsec_dump_acc_enciv(const char *, uint32_t);
274 _STATIC void mvxpsec_dump_acc_macsrc(const char *, uint32_t);
275 _STATIC void mvxpsec_dump_acc_macdst(const char *, uint32_t);
276 _STATIC void mvxpsec_dump_acc_maciv(const char *, uint32_t);
277 #endif
278 
279 /*
280  * global configurations, params, work spaces, ...
281  *
282  * XXX: use sysctl for global configurations
283  */
284 /* waiting for device */
285 static int mvxpsec_wait_interval = 10;		/* usec */
286 static int mvxpsec_wait_retry = 100;		/* times = wait for 1 [msec] */
287 #ifdef MVXPSEC_DEBUG
288 static uint32_t mvxpsec_debug = MVXPSEC_DEBUG;	/* debug level */
289 #endif
290 
291 /*
292  * Register accessors
293  */
294 #define MVXPSEC_WRITE(sc, off, val) \
295 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (off), (val))
296 #define MVXPSEC_READ(sc, off) \
297 	bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (off))
298 
299 /*
300  * device driver autoconf interface
301  */
302 CFATTACH_DECL2_NEW(mvxpsec_mbus, sizeof(struct mvxpsec_softc),
303     mvxpsec_match, mvxpsec_attach, NULL, NULL, NULL, NULL);
304 
305 STATIC int
306 mvxpsec_match(device_t dev, cfdata_t match, void *aux)
307 {
308 	struct marvell_attach_args *mva = aux;
309 	uint32_t tag;
310 	int window;
311 
312 	if (strcmp(mva->mva_name, match->cf_name) != 0)
313 		return 0;
314 	if (mva->mva_offset == MVA_OFFSET_DEFAULT)
315 		return 0;
316 
317 	switch (mva->mva_unit) {
318 	case 0:
319 		tag = ARMADAXP_TAG_CRYPT0;
320 		break;
321 	case 1:
322 		tag = ARMADAXP_TAG_CRYPT1;
323 		break;
324 	default:
325 		aprint_error_dev(dev,
326 		    "unit %d is not supported\n", mva->mva_unit);
327 		return 0;
328 	}
329 
330 	window = mvsoc_target(tag, NULL, NULL, NULL, NULL);
331 	if (window >= nwindow) {
332 		aprint_error_dev(dev,
333 		    "Security Accelerator SRAM is not configured.\n");
334 		return 0;
335 	}
336 
337 	return 1;
338 }
339 
340 STATIC void
341 mvxpsec_attach(device_t parent, device_t self, void *aux)
342 {
343 	struct marvell_attach_args *mva = aux;
344 	struct mvxpsec_softc *sc = device_private(self);
345 	int v;
346 	int i;
347 
348 	sc->sc_dev = self;
349 
350 	aprint_normal(": Marvell Crypto Engines and Security Accelerator\n");
351 	aprint_naive("\n");
352 #ifdef MVXPSEC_MULTI_PACKET
353 	aprint_normal_dev(sc->sc_dev, "multi-packet chained mode enabled.\n");
354 #else
355 	aprint_normal_dev(sc->sc_dev, "multi-packet chained mode disabled.\n");
356 #endif
357 	aprint_normal_dev(sc->sc_dev,
358 	    "Max %d sessions.\n", MVXPSEC_MAX_SESSIONS);
359 
360 	/* mutex */
361 	mutex_init(&sc->sc_session_mtx, MUTEX_DEFAULT, IPL_NET);
362 	mutex_init(&sc->sc_dma_mtx, MUTEX_DEFAULT, IPL_NET);
363 	mutex_init(&sc->sc_queue_mtx, MUTEX_DEFAULT, IPL_NET);
364 
365 	/* Packet queue */
366 	SIMPLEQ_INIT(&sc->sc_wait_queue);
367 	SIMPLEQ_INIT(&sc->sc_run_queue);
368 	SLIST_INIT(&sc->sc_free_list);
369 	sc->sc_wait_qlen = 0;
370 #ifdef MVXPSEC_MULTI_PACKET
371 	sc->sc_wait_qlimit = 16;
372 #else
373 	sc->sc_wait_qlimit = 0;
374 #endif
375 	sc->sc_free_qlen = 0;
376 
377 	/* Timer */
378 	callout_init(&sc->sc_timeout, 0); /* XXX: use CALLOUT_MPSAFE */
379 	callout_setfunc(&sc->sc_timeout, mvxpsec_timer, sc);
380 
381 	/* I/O */
382 	sc->sc_iot = mva->mva_iot;
383 	if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
384 	    mva->mva_offset, mva->mva_size, &sc->sc_ioh)) {
385 		aprint_error_dev(self, "Cannot map registers\n");
386 		return;
387 	}
388 
389 	/* DMA */
390 	sc->sc_dmat = mva->mva_dmat;
391 	if (mvxpsec_init_dma(sc, mva) < 0)
392 		return;
393 
394 	/* SRAM */
395 	if (mvxpsec_init_sram(sc) < 0)
396 		return;
397 
398 	/* Registers */
399 	mvxpsec_wininit(sc, mva->mva_tags);
400 
401 	/* INTR */
402 	MVXPSEC_WRITE(sc, MVXPSEC_INT_MASK, MVXPSEC_DEFAULT_INT);
403 	MVXPSEC_WRITE(sc, MV_TDMA_ERR_MASK, MVXPSEC_DEFAULT_ERR);
404 	sc->sc_done_ih =
405 	    marvell_intr_establish(mva->mva_irq, IPL_NET, mvxpsec_intr, sc);
406 	/* XXX: sould pass error IRQ using mva */
407 	sc->sc_error_ih = marvell_intr_establish(MVXPSEC_ERR_INT(sc),
408 	    IPL_NET, mvxpsec_eintr, sc);
409 	aprint_normal_dev(self,
410 	    "Error Reporting IRQ %d\n", MVXPSEC_ERR_INT(sc));
411 
412 	/* Initialize TDMA (It's enabled here, but waiting for SA) */
413 	if (mvxpsec_dma_wait(sc) < 0)
414 		panic("%s: DMA DEVICE not responding\n", __func__);
415 	MVXPSEC_WRITE(sc, MV_TDMA_CNT, 0);
416 	MVXPSEC_WRITE(sc, MV_TDMA_SRC, 0);
417 	MVXPSEC_WRITE(sc, MV_TDMA_DST, 0);
418 	MVXPSEC_WRITE(sc, MV_TDMA_NXT, 0);
419 	MVXPSEC_WRITE(sc, MV_TDMA_CUR, 0);
420 	v  = MVXPSEC_READ(sc, MV_TDMA_CONTROL);
421 	v |= MV_TDMA_CONTROL_ENABLE;
422 	MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, v);
423 
424 	/* Initialize SA */
425 	if (mvxpsec_acc_wait(sc) < 0)
426 		panic("%s: MVXPSEC not responding\n", __func__);
427 	v  = MVXPSEC_READ(sc, MV_ACC_CONFIG);
428 	v &= ~MV_ACC_CONFIG_STOP_ON_ERR;
429 	v |= MV_ACC_CONFIG_MULT_PKT;
430 	v |= MV_ACC_CONFIG_WAIT_TDMA;
431 	v |= MV_ACC_CONFIG_ACT_TDMA;
432 	MVXPSEC_WRITE(sc, MV_ACC_CONFIG, v);
433 	MVXPSEC_WRITE(sc, MV_ACC_DESC, 0);
434 	MVXPSEC_WRITE(sc, MV_ACC_COMMAND, MV_ACC_COMMAND_STOP);
435 
436 	/* Session */
437 	sc->sc_session_pool =
438 	    pool_cache_init(sizeof(struct mvxpsec_session), 0, 0, 0,
439 	    "mvxpsecpl", NULL, IPL_NET,
440 	    mvxpsec_session_ctor, mvxpsec_session_dtor, sc);
441 	pool_cache_sethiwat(sc->sc_session_pool, MVXPSEC_MAX_SESSIONS);
442 	pool_cache_setlowat(sc->sc_session_pool, MVXPSEC_MAX_SESSIONS / 2);
443 	sc->sc_last_session = NULL;
444 
445 	/* Pakcet */
446 	sc->sc_packet_pool =
447 	    pool_cache_init(sizeof(struct mvxpsec_session), 0, 0, 0,
448 	    "mvxpsec_pktpl", NULL, IPL_NET,
449 	    mvxpsec_packet_ctor, mvxpsec_packet_dtor, sc);
450 	pool_cache_sethiwat(sc->sc_packet_pool, MVXPSEC_MAX_SESSIONS);
451 	pool_cache_setlowat(sc->sc_packet_pool, MVXPSEC_MAX_SESSIONS / 2);
452 
453 	/* Register to EVCNT framework */
454 	mvxpsec_evcnt_attach(sc);
455 
456 	/* Register to Opencrypto */
457 	for (i = 0; i < MVXPSEC_MAX_SESSIONS; i++) {
458 		sc->sc_sessions[i] = NULL;
459 	}
460 	if (mvxpsec_register(sc))
461 		panic("cannot initialize OpenCrypto module.\n");
462 
463 	return;
464 }
465 
466 STATIC void
467 mvxpsec_evcnt_attach(struct mvxpsec_softc *sc)
468 {
469 	struct mvxpsec_evcnt *sc_ev = &sc->sc_ev;
470 
471 	evcnt_attach_dynamic(&sc_ev->intr_all, EVCNT_TYPE_INTR,
472 	    NULL, device_xname(sc->sc_dev), "Main Intr.");
473 	evcnt_attach_dynamic(&sc_ev->intr_auth, EVCNT_TYPE_INTR,
474 	    NULL, device_xname(sc->sc_dev), "Auth Intr.");
475 	evcnt_attach_dynamic(&sc_ev->intr_des, EVCNT_TYPE_INTR,
476 	    NULL, device_xname(sc->sc_dev), "DES Intr.");
477 	evcnt_attach_dynamic(&sc_ev->intr_aes_enc, EVCNT_TYPE_INTR,
478 	    NULL, device_xname(sc->sc_dev), "AES-Encrypt Intr.");
479 	evcnt_attach_dynamic(&sc_ev->intr_aes_dec, EVCNT_TYPE_INTR,
480 	    NULL, device_xname(sc->sc_dev), "AES-Decrypt Intr.");
481 	evcnt_attach_dynamic(&sc_ev->intr_enc, EVCNT_TYPE_INTR,
482 	    NULL, device_xname(sc->sc_dev), "Crypto Intr.");
483 	evcnt_attach_dynamic(&sc_ev->intr_sa, EVCNT_TYPE_INTR,
484 	    NULL, device_xname(sc->sc_dev), "SA Intr.");
485 	evcnt_attach_dynamic(&sc_ev->intr_acctdma, EVCNT_TYPE_INTR,
486 	    NULL, device_xname(sc->sc_dev), "AccTDMA Intr.");
487 	evcnt_attach_dynamic(&sc_ev->intr_comp, EVCNT_TYPE_INTR,
488 	    NULL, device_xname(sc->sc_dev), "TDMA-Complete Intr.");
489 	evcnt_attach_dynamic(&sc_ev->intr_own, EVCNT_TYPE_INTR,
490 	    NULL, device_xname(sc->sc_dev), "TDMA-Ownership Intr.");
491 	evcnt_attach_dynamic(&sc_ev->intr_acctdma_cont, EVCNT_TYPE_INTR,
492 	    NULL, device_xname(sc->sc_dev), "AccTDMA-Continue Intr.");
493 
494 	evcnt_attach_dynamic(&sc_ev->session_new, EVCNT_TYPE_MISC,
495 	    NULL, device_xname(sc->sc_dev), "New-Session");
496 	evcnt_attach_dynamic(&sc_ev->session_free, EVCNT_TYPE_MISC,
497 	    NULL, device_xname(sc->sc_dev), "Free-Session");
498 
499 	evcnt_attach_dynamic(&sc_ev->packet_ok, EVCNT_TYPE_MISC,
500 	    NULL, device_xname(sc->sc_dev), "Packet-OK");
501 	evcnt_attach_dynamic(&sc_ev->packet_err, EVCNT_TYPE_MISC,
502 	    NULL, device_xname(sc->sc_dev), "Packet-ERR");
503 
504 	evcnt_attach_dynamic(&sc_ev->dispatch_packets, EVCNT_TYPE_MISC,
505 	    NULL, device_xname(sc->sc_dev), "Packet-Dispatch");
506 	evcnt_attach_dynamic(&sc_ev->dispatch_queue, EVCNT_TYPE_MISC,
507 	    NULL, device_xname(sc->sc_dev), "Queue-Dispatch");
508 	evcnt_attach_dynamic(&sc_ev->queue_full, EVCNT_TYPE_MISC,
509 	    NULL, device_xname(sc->sc_dev), "Queue-Full");
510 	evcnt_attach_dynamic(&sc_ev->max_dispatch, EVCNT_TYPE_MISC,
511 	    NULL, device_xname(sc->sc_dev), "Max-Dispatch");
512 	evcnt_attach_dynamic(&sc_ev->max_done, EVCNT_TYPE_MISC,
513 	    NULL, device_xname(sc->sc_dev), "Max-Done");
514 }
515 
516 /*
517  * Register setup
518  */
519 STATIC int mvxpsec_wininit(struct mvxpsec_softc *sc, enum marvell_tags *tags)
520 {
521 	device_t pdev = device_parent(sc->sc_dev);
522 	uint64_t base;
523 	uint32_t size, reg;
524 	int window, target, attr, rv, i;
525 
526 	/* disable all window */
527 	for (window = 0; window < MV_TDMA_NWINDOW; window++)
528 	{
529 		MVXPSEC_WRITE(sc, MV_TDMA_BAR(window), 0);
530 		MVXPSEC_WRITE(sc, MV_TDMA_ATTR(window), 0);
531 	}
532 
533 	for (window = 0, i = 0;
534 	    tags[i] != MARVELL_TAG_UNDEFINED && window < MV_TDMA_NWINDOW; i++) {
535 		rv = marvell_winparams_by_tag(pdev, tags[i],
536 		    &target, &attr, &base, &size);
537 		if (rv != 0 || size == 0)
538 			continue;
539 
540 		if (base > 0xffffffffULL) {
541 			aprint_error_dev(sc->sc_dev,
542 			    "can't remap window %d\n", window);
543 			continue;
544 		}
545 
546 		reg  = MV_TDMA_BAR_BASE(base);
547 		MVXPSEC_WRITE(sc, MV_TDMA_BAR(window), reg);
548 
549 		reg  = MV_TDMA_ATTR_TARGET(target);
550 		reg |= MV_TDMA_ATTR_ATTR(attr);
551 		reg |= MV_TDMA_ATTR_SIZE(size);
552 		reg |= MV_TDMA_ATTR_ENABLE;
553 		MVXPSEC_WRITE(sc, MV_TDMA_ATTR(window), reg);
554 
555 		window++;
556 	}
557 
558 	return 0;
559 }
560 
561 /*
562  * Timer handling
563  */
564 STATIC void
565 mvxpsec_timer(void *aux)
566 {
567 	struct mvxpsec_softc *sc = aux;
568 	struct mvxpsec_packet *mv_p;
569 	uint32_t reg;
570 	int ndone;
571 	int refill;
572 	int s;
573 
574 	/* IPL_SOFTCLOCK */
575 
576 	log(LOG_ERR, "%s: device timeout.\n", __func__);
577 #ifdef MVXPSEC_DEBUG
578 	mvxpsec_dump_reg(sc);
579 #endif
580 
581 	s = splnet();
582 	/* stop security accelerator */
583 	MVXPSEC_WRITE(sc, MV_ACC_COMMAND, MV_ACC_COMMAND_STOP);
584 
585 	/* stop TDMA */
586 	MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, 0);
587 
588 	/* cleanup packet queue */
589 	mutex_enter(&sc->sc_queue_mtx);
590 	ndone = 0;
591 	while ( (mv_p = SIMPLEQ_FIRST(&sc->sc_run_queue)) != NULL) {
592 		SIMPLEQ_REMOVE_HEAD(&sc->sc_run_queue, queue);
593 
594 		mv_p->crp->crp_etype = EINVAL;
595 		mvxpsec_done_packet(mv_p);
596 		ndone++;
597 	}
598 	MVXPSEC_EVCNT_MAX(sc, max_done, ndone);
599 	sc->sc_flags &= ~HW_RUNNING;
600 	refill = (sc->sc_wait_qlen > 0) ? 1 : 0;
601 	mutex_exit(&sc->sc_queue_mtx);
602 
603 	/* reenable TDMA */
604 	if (mvxpsec_dma_wait(sc) < 0)
605 		panic("%s: failed to reset DMA DEVICE. give up.", __func__);
606 	MVXPSEC_WRITE(sc, MV_TDMA_CNT, 0);
607 	MVXPSEC_WRITE(sc, MV_TDMA_SRC, 0);
608 	MVXPSEC_WRITE(sc, MV_TDMA_DST, 0);
609 	MVXPSEC_WRITE(sc, MV_TDMA_CUR, 0);
610 	MVXPSEC_WRITE(sc, MV_TDMA_NXT, 0);
611 	reg  = MV_TDMA_DEFAULT_CONTROL;
612 	reg |= MV_TDMA_CONTROL_ENABLE;
613 	MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, reg);
614 
615 	if (mvxpsec_acc_wait(sc) < 0)
616 		panic("%s: failed to reset MVXPSEC. give up.", __func__);
617 	reg  = MV_ACC_CONFIG_MULT_PKT;
618 	reg |= MV_ACC_CONFIG_WAIT_TDMA;
619 	reg |= MV_ACC_CONFIG_ACT_TDMA;
620 	MVXPSEC_WRITE(sc, MV_ACC_CONFIG, reg);
621 	MVXPSEC_WRITE(sc, MV_ACC_DESC, 0);
622 
623 	if (refill) {
624 		mutex_enter(&sc->sc_queue_mtx);
625 		mvxpsec_dispatch_queue(sc);
626 		mutex_exit(&sc->sc_queue_mtx);
627 	}
628 
629 	crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
630 	splx(s);
631 }
632 
633 /*
634  * DMA handling
635  */
636 
637 /*
638  * Allocate kernel devmem and DMA safe memory with bus_dma API
639  * used for DMA descriptors.
640  *
641  * if phys != 0, assume phys is a DMA safe memory and bypass
642  * allocator.
643  */
644 STATIC struct mvxpsec_devmem *
645 mvxpsec_alloc_devmem(struct mvxpsec_softc *sc, paddr_t phys, int size)
646 {
647 	struct mvxpsec_devmem *devmem;
648 	bus_dma_segment_t seg;
649 	int rseg;
650 	int err;
651 
652 	if (sc == NULL)
653 		return NULL;
654 
655 	devmem = kmem_alloc(sizeof(*devmem), KM_SLEEP);
656 	devmem->size = size;
657 
658 	if (phys) {
659 		seg.ds_addr = phys;
660 		seg.ds_len = devmem->size;
661 		rseg = 1;
662 		err = 0;
663 	}
664 	else {
665 		err = bus_dmamem_alloc(sc->sc_dmat,
666 		    devmem->size, PAGE_SIZE, 0,
667 		    &seg, MVXPSEC_DMA_MAX_SEGS, &rseg, BUS_DMA_NOWAIT);
668 	}
669 	if (err) {
670 		aprint_error_dev(sc->sc_dev, "can't alloc DMA buffer\n");
671 		goto fail_kmem_free;
672 	}
673 
674 	err = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
675 	     devmem->size, &devmem->kva, BUS_DMA_NOWAIT);
676 	if (err) {
677 		aprint_error_dev(sc->sc_dev, "can't map DMA buffer\n");
678 		goto fail_dmamem_free;
679 	}
680 
681 	err = bus_dmamap_create(sc->sc_dmat,
682 	    size, 1, size, 0, BUS_DMA_NOWAIT, &devmem->map);
683 	if (err) {
684 		aprint_error_dev(sc->sc_dev, "can't create DMA map\n");
685 		goto fail_unmap;
686 	}
687 
688 	err = bus_dmamap_load(sc->sc_dmat,
689 	    devmem->map, devmem->kva, devmem->size, NULL,
690 	    BUS_DMA_NOWAIT);
691 	if (err) {
692 		aprint_error_dev(sc->sc_dev,
693 		   "can't load DMA buffer VA:%p PA:0x%08x\n",
694 		    devmem->kva, (int)seg.ds_addr);
695 		goto fail_destroy;
696 	}
697 
698 	return devmem;
699 
700 fail_destroy:
701 	bus_dmamap_destroy(sc->sc_dmat, devmem->map);
702 fail_unmap:
703 	bus_dmamem_unmap(sc->sc_dmat, devmem->kva, devmem->size);
704 fail_dmamem_free:
705 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
706 fail_kmem_free:
707 	kmem_free(devmem, sizeof(*devmem));
708 
709 	return NULL;
710 }
711 
712 /*
713  * Get DMA Descriptor from (DMA safe) descriptor pool.
714  */
715 INLINE struct mvxpsec_descriptor_handle *
716 mvxpsec_dma_getdesc(struct mvxpsec_softc *sc)
717 {
718 	struct mvxpsec_descriptor_handle *entry;
719 
720 	/* must called with sc->sc_dma_mtx held */
721 	KASSERT(mutex_owned(&sc->sc_dma_mtx));
722 
723 	if (sc->sc_desc_ring_prod == sc->sc_desc_ring_cons)
724 		return NULL;
725 
726 	entry = &sc->sc_desc_ring[sc->sc_desc_ring_prod];
727 	sc->sc_desc_ring_prod++;
728 	if (sc->sc_desc_ring_prod >= sc->sc_desc_ring_size)
729 		sc->sc_desc_ring_prod -= sc->sc_desc_ring_size;
730 
731 	return entry;
732 }
733 
734 /*
735  * Put DMA Descriptor to descriptor pool.
736  */
737 _INLINE void
738 mvxpsec_dma_putdesc(struct mvxpsec_softc *sc,
739     struct mvxpsec_descriptor_handle *dh)
740 {
741 	/* must called with sc->sc_dma_mtx held */
742 	KASSERT(mutex_owned(&sc->sc_dma_mtx));
743 
744 	sc->sc_desc_ring_cons++;
745 	if (sc->sc_desc_ring_cons >= sc->sc_desc_ring_size)
746 		sc->sc_desc_ring_cons -= sc->sc_desc_ring_size;
747 
748 	return;
749 }
750 
751 /*
752  * Setup DMA Descriptor
753  * copy from 'src' to 'dst' by 'size' bytes.
754  * 'src' or 'dst' must be SRAM address.
755  */
756 INLINE void
757 mvxpsec_dma_setup(struct mvxpsec_descriptor_handle *dh,
758     uint32_t dst, uint32_t src, uint32_t size)
759 {
760 	struct mvxpsec_descriptor *desc;
761 
762 	desc = (struct mvxpsec_descriptor *)dh->_desc;
763 
764 	desc->tdma_dst = dst;
765 	desc->tdma_src = src;
766 	desc->tdma_word0 = size;
767 	if (size != 0)
768 		desc->tdma_word0 |= MV_TDMA_CNT_OWN;
769 	/* size == 0 is owned by ACC, not TDMA */
770 
771 #ifdef MVXPSEC_DEBUG
772 	mvxpsec_dump_dmaq(dh);
773 #endif
774 
775 }
776 
777 /*
778  * Concat 2 DMA
779  */
780 INLINE void
781 mvxpsec_dma_cat(struct mvxpsec_softc *sc,
782     struct mvxpsec_descriptor_handle *dh1,
783     struct mvxpsec_descriptor_handle *dh2)
784 {
785 	((struct mvxpsec_descriptor*)dh1->_desc)->tdma_nxt = dh2->phys_addr;
786 	MVXPSEC_SYNC_DESC(sc, dh1, BUS_DMASYNC_PREWRITE);
787 }
788 
789 /*
790  * Schedule DMA Copy
791  */
792 INLINE int
793 mvxpsec_dma_copy0(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r,
794     uint32_t dst, uint32_t src, uint32_t size)
795 {
796 	struct mvxpsec_descriptor_handle *dh;
797 
798 	dh = mvxpsec_dma_getdesc(sc);
799 	if (dh == NULL) {
800 		log(LOG_ERR, "%s: descriptor full\n", __func__);
801 		return -1;
802 	}
803 
804 	mvxpsec_dma_setup(dh, dst, src, size);
805 	if (r->dma_head == NULL) {
806 		r->dma_head = dh;
807 		r->dma_last = dh;
808 		r->dma_size = 1;
809 	}
810 	else {
811 		mvxpsec_dma_cat(sc, r->dma_last, dh);
812 		r->dma_last = dh;
813 		r->dma_size++;
814 	}
815 
816 	return 0;
817 }
818 
819 INLINE int
820 mvxpsec_dma_copy(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r,
821     uint32_t dst, uint32_t src, uint32_t size)
822 {
823 	if (size == 0) /* 0 is very special descriptor */
824 		return 0;
825 
826 	return mvxpsec_dma_copy0(sc, r, dst, src, size);
827 }
828 
829 /*
830  * Schedule ACC Activate
831  */
832 INLINE int
833 mvxpsec_dma_acc_activate(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r)
834 {
835 	return mvxpsec_dma_copy0(sc, r, 0, 0, 0);
836 }
837 
838 /*
839  * Finalize DMA setup
840  */
841 INLINE void
842 mvxpsec_dma_finalize(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r)
843 {
844 	struct mvxpsec_descriptor_handle *dh;
845 
846 	dh = r->dma_last;
847 	((struct mvxpsec_descriptor*)dh->_desc)->tdma_nxt = 0;
848 	MVXPSEC_SYNC_DESC(sc, dh, BUS_DMASYNC_PREWRITE);
849 }
850 
851 /*
852  * Free entire DMA ring
853  */
854 INLINE void
855 mvxpsec_dma_free(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r)
856 {
857 	sc->sc_desc_ring_cons += r->dma_size;
858 	if (sc->sc_desc_ring_cons >= sc->sc_desc_ring_size)
859 		sc->sc_desc_ring_cons -= sc->sc_desc_ring_size;
860 	r->dma_head = NULL;
861 	r->dma_last = NULL;
862 	r->dma_size = 0;
863 }
864 
865 /*
866  * create DMA descriptor chain for the packet
867  */
868 INLINE int
869 mvxpsec_dma_copy_packet(struct mvxpsec_softc *sc, struct mvxpsec_packet *mv_p)
870 {
871 	struct mvxpsec_session *mv_s = mv_p->mv_s;
872 	uint32_t src, dst, len;
873 	uint32_t pkt_off, pkt_off_r;
874 	int err;
875 	int i;
876 
877 	/* must called with sc->sc_dma_mtx held */
878 	KASSERT(mutex_owned(&sc->sc_dma_mtx));
879 
880 	/*
881 	 * set offset for mem->device copy
882 	 *
883 	 * typical packet image:
884 	 *
885 	 *   enc_ivoff
886 	 *   mac_off
887 	 *   |
888 	 *   |    enc_off
889 	 *   |    |
890 	 *   v    v
891 	 *   +----+--------...
892 	 *   |IV  |DATA
893 	 *   +----+--------...
894 	 */
895 	pkt_off = 0;
896 	if (mv_p->mac_off > 0)
897 		pkt_off = mv_p->mac_off;
898 	if ((mv_p->flags & CRP_EXT_IV) == 0 && pkt_off > mv_p->enc_ivoff)
899 		pkt_off = mv_p->enc_ivoff;
900 	if (mv_p->enc_off > 0 && pkt_off > mv_p->enc_off)
901 		pkt_off = mv_p->enc_off;
902 	pkt_off_r = pkt_off;
903 
904 	/* make DMA descriptors to copy packet header: DRAM -> SRAM */
905 	dst = (uint32_t)MVXPSEC_SRAM_PKT_HDR_PA(sc);
906 	src = (uint32_t)mv_p->pkt_header_map->dm_segs[0].ds_addr;
907 	len = sizeof(mv_p->pkt_header);
908 	err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
909 	if (__predict_false(err))
910 		return err;
911 
912 	/*
913 	 * make DMA descriptors to copy session header: DRAM -> SRAM
914 	 * we can reuse session header on SRAM if session is not changed.
915 	 */
916 	if (sc->sc_last_session != mv_s) {
917 		dst = (uint32_t)MVXPSEC_SRAM_SESS_HDR_PA(sc);
918 		src = (uint32_t)mv_s->session_header_map->dm_segs[0].ds_addr;
919 		len = sizeof(mv_s->session_header);
920 		err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
921 		if (__predict_false(err))
922 			return err;
923 		sc->sc_last_session = mv_s;
924 	}
925 
926 	/* make DMA descriptor to copy payload data: DRAM -> SRAM */
927 	dst = MVXPSEC_SRAM_PAYLOAD_PA(sc, 0);
928 	for (i = 0; i < mv_p->data_map->dm_nsegs; i++) {
929 		src = mv_p->data_map->dm_segs[i].ds_addr;
930 		len = mv_p->data_map->dm_segs[i].ds_len;
931 		if (pkt_off) {
932 			if (len <= pkt_off) {
933 				/* ignore the segment */
934 				dst += len;
935 				pkt_off -= len;
936 				continue;
937 			}
938 			/* copy from the middle of the segment */
939 			dst += pkt_off;
940 			src += pkt_off;
941 			len -= pkt_off;
942 			pkt_off = 0;
943 		}
944 		err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
945 		if (__predict_false(err))
946 			return err;
947 		dst += len;
948 	}
949 
950 	/* make special descriptor to activate security accelerator */
951 	err = mvxpsec_dma_acc_activate(sc, &mv_p->dma_ring);
952 	if (__predict_false(err))
953 		return err;
954 
955 	/* make DMA descriptors to copy payload: SRAM -> DRAM */
956 	src = (uint32_t)MVXPSEC_SRAM_PAYLOAD_PA(sc, 0);
957 	for (i = 0; i < mv_p->data_map->dm_nsegs; i++) {
958 		dst = (uint32_t)mv_p->data_map->dm_segs[i].ds_addr;
959 		len = (uint32_t)mv_p->data_map->dm_segs[i].ds_len;
960 		if (pkt_off_r) {
961 			if (len <= pkt_off_r) {
962 				/* ignore the segment */
963 				src += len;
964 				pkt_off_r -= len;
965 				continue;
966 			}
967 			/* copy from the middle of the segment */
968 			src += pkt_off_r;
969 			dst += pkt_off_r;
970 			len -= pkt_off_r;
971 			pkt_off_r = 0;
972 		}
973 		err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
974 		if (__predict_false(err))
975 			return err;
976 		src += len;
977 	}
978 	KASSERT(pkt_off == 0);
979 	KASSERT(pkt_off_r == 0);
980 
981 	/*
982 	 * make DMA descriptors to copy packet header: SRAM->DRAM
983 	 * if IV is present in the payload, no need to copy.
984 	 */
985 	if (mv_p->flags & CRP_EXT_IV) {
986 		dst = (uint32_t)mv_p->pkt_header_map->dm_segs[0].ds_addr;
987 		src = (uint32_t)MVXPSEC_SRAM_PKT_HDR_PA(sc);
988 		len = sizeof(mv_p->pkt_header);
989 		err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
990 		if (__predict_false(err))
991 			return err;
992 	}
993 
994 	return 0;
995 }
996 
997 INLINE int
998 mvxpsec_dma_sync_packet(struct mvxpsec_softc *sc, struct mvxpsec_packet *mv_p)
999 {
1000 	/* sync packet header */
1001 	bus_dmamap_sync(sc->sc_dmat,
1002 	    mv_p->pkt_header_map, 0, sizeof(mv_p->pkt_header),
1003 	    BUS_DMASYNC_PREWRITE);
1004 
1005 #ifdef MVXPSEC_DEBUG
1006 	/* sync session header */
1007 	if (mvxpsec_debug != 0) {
1008 		struct mvxpsec_session *mv_s = mv_p->mv_s;
1009 
1010 		/* only debug code touch the session header after newsession */
1011 		bus_dmamap_sync(sc->sc_dmat,
1012 		    mv_s->session_header_map,
1013 		    0, sizeof(mv_s->session_header),
1014 		    BUS_DMASYNC_PREWRITE);
1015 	}
1016 #endif
1017 
1018 	/* sync packet buffer */
1019 	bus_dmamap_sync(sc->sc_dmat,
1020 	    mv_p->data_map, 0, mv_p->data_len,
1021 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1022 
1023 	return 0;
1024 }
1025 
1026 /*
1027  * Initialize MVXPSEC Internal SRAM
1028  *
1029  * - must be called after DMA initizlization.
1030  * - make VM mapping for SRAM area on MBus.
1031  */
1032 STATIC int
1033 mvxpsec_init_sram(struct mvxpsec_softc *sc)
1034 {
1035 	uint32_t tag, target, attr, base, size;
1036 	vaddr_t va;
1037 	int window;
1038 
1039 	switch (sc->sc_dev->dv_unit) {
1040 	case 0:
1041 		tag = ARMADAXP_TAG_CRYPT0;
1042 		break;
1043 	case 1:
1044 		tag = ARMADAXP_TAG_CRYPT1;
1045 		break;
1046 	default:
1047 		aprint_error_dev(sc->sc_dev, "no internal SRAM mapping\n");
1048 		return -1;
1049 	}
1050 
1051 	window = mvsoc_target(tag, &target, &attr, &base, &size);
1052 	if (window >= nwindow) {
1053 		aprint_error_dev(sc->sc_dev, "no internal SRAM mapping\n");
1054 		return -1;
1055 	}
1056 
1057 	if (sizeof(struct mvxpsec_crypt_sram) > size) {
1058 		aprint_error_dev(sc->sc_dev,
1059 		    "SRAM Data Structure Excceeds SRAM window size.\n");
1060 		return -1;
1061 	}
1062 
1063 	aprint_normal_dev(sc->sc_dev,
1064 	    "internal SRAM window at 0x%08x-0x%08x",
1065 	    base, base + size - 1);
1066 	sc->sc_sram_pa = base;
1067 
1068 	/* get vmspace to read/write device internal SRAM */
1069 	va = uvm_km_alloc(kernel_map, PAGE_SIZE, PAGE_SIZE,
1070 			UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
1071 	if (va == 0) {
1072 		aprint_error_dev(sc->sc_dev, "cannot map SRAM window\n");
1073 		sc->sc_sram_va = NULL;
1074 		aprint_normal("\n");
1075 		return 0;
1076 	}
1077 	/* XXX: not working. PMAP_NOCACHE is not affected? */
1078 	pmap_kenter_pa(va, base, VM_PROT_READ|VM_PROT_WRITE, PMAP_NOCACHE);
1079 	pmap_update(pmap_kernel());
1080 	sc->sc_sram_va = (void *)va;
1081 	aprint_normal(" va %p\n", sc->sc_sram_va);
1082 	memset(sc->sc_sram_va, 0xff, MV_ACC_SRAM_SIZE);
1083 
1084 	return 0;
1085 }
1086 
1087 /*
1088  * Initialize TDMA engine.
1089  */
1090 STATIC int
1091 mvxpsec_init_dma(struct mvxpsec_softc *sc, struct marvell_attach_args *mva)
1092 {
1093 	struct mvxpsec_descriptor_handle *dh;
1094 	uint8_t *va;
1095 	paddr_t pa;
1096 	off_t va_off, pa_off;
1097 	int i, n, seg, ndh;
1098 
1099 	/* Init Deviced's control parameters (disabled yet) */
1100 	MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, MV_TDMA_DEFAULT_CONTROL);
1101 
1102 	/* Init Software DMA Handlers */
1103 	sc->sc_devmem_desc =
1104 	    mvxpsec_alloc_devmem(sc, 0, PAGE_SIZE * MVXPSEC_DMA_DESC_PAGES);
1105 	ndh = (PAGE_SIZE / sizeof(struct mvxpsec_descriptor))
1106 	    * MVXPSEC_DMA_DESC_PAGES;
1107 	sc->sc_desc_ring =
1108 	    kmem_alloc(sizeof(struct mvxpsec_descriptor_handle) * ndh,
1109 	        KM_SLEEP);
1110 	aprint_normal_dev(sc->sc_dev, "%d DMA handles in %zu bytes array\n",
1111 	    ndh, sizeof(struct mvxpsec_descriptor_handle) * ndh);
1112 
1113 	ndh = 0;
1114 	for (seg = 0; seg < devmem_nseg(sc->sc_devmem_desc); seg++) {
1115 		va = devmem_va(sc->sc_devmem_desc);
1116 		pa = devmem_pa(sc->sc_devmem_desc, seg);
1117 		n = devmem_palen(sc->sc_devmem_desc, seg) /
1118 		       	sizeof(struct mvxpsec_descriptor);
1119 		va_off = (PAGE_SIZE * seg);
1120 		pa_off = 0;
1121 		for (i = 0; i < n; i++) {
1122 			dh = &sc->sc_desc_ring[ndh];
1123 			dh->map = devmem_map(sc->sc_devmem_desc);
1124 			dh->off = va_off + pa_off;
1125 			dh->_desc = (void *)(va + va_off + pa_off);
1126 			dh->phys_addr = pa + pa_off;
1127 			pa_off += sizeof(struct mvxpsec_descriptor);
1128 			ndh++;
1129 		}
1130 	}
1131 	sc->sc_desc_ring_size = ndh;
1132 	sc->sc_desc_ring_prod = 0;
1133 	sc->sc_desc_ring_cons = sc->sc_desc_ring_size - 1;
1134 
1135 	return 0;
1136 }
1137 
1138 /*
1139  * Wait for TDMA controller become idle
1140  */
1141 INLINE int
1142 mvxpsec_dma_wait(struct mvxpsec_softc *sc)
1143 {
1144 	int retry = 0;
1145 
1146 	while (MVXPSEC_READ(sc, MV_TDMA_CONTROL) & MV_TDMA_CONTROL_ACT) {
1147 		delay(mvxpsec_wait_interval);
1148 		if (retry++ >= mvxpsec_wait_retry)
1149 			return -1;
1150 	}
1151 	return 0;
1152 }
1153 
1154 /*
1155  * Wait for Security Accelerator become idle
1156  */
1157 INLINE int
1158 mvxpsec_acc_wait(struct mvxpsec_softc *sc)
1159 {
1160 	int retry = 0;
1161 
1162 	while (MVXPSEC_READ(sc, MV_ACC_COMMAND) & MV_ACC_COMMAND_ACT) {
1163 		delay(mvxpsec_wait_interval);
1164 		if (++retry >= mvxpsec_wait_retry)
1165 			return -1;
1166 	}
1167 	return 0;
1168 }
1169 
1170 /*
1171  * Entry of interrupt handler
1172  *
1173  * register this to kernel via marvell_intr_establish()
1174  */
1175 int
1176 mvxpsec_intr(void *arg)
1177 {
1178 	struct mvxpsec_softc *sc = arg;
1179 	uint32_t v;
1180 
1181 	/* IPL_NET */
1182 	while ((v = mvxpsec_intr_ack(sc)) != 0) {
1183 		mvxpsec_intr_cnt(sc, v);
1184 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "MVXPSEC Intr 0x%08x\n", v);
1185 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "%s\n", s_xpsecintr(v));
1186 #ifdef MVXPSEC_DEBUG
1187 		mvxpsec_dump_reg(sc);
1188 #endif
1189 
1190 		/* call high-level handlers */
1191 		if (v & MVXPSEC_INT_ACCTDMA)
1192 			mvxpsec_done(sc);
1193 	}
1194 
1195 	return 0;
1196 }
1197 
1198 INLINE void
1199 mvxpsec_intr_cleanup(struct mvxpsec_softc *sc)
1200 {
1201 	struct mvxpsec_packet *mv_p;
1202 
1203 	/* must called with sc->sc_dma_mtx held */
1204 	KASSERT(mutex_owned(&sc->sc_dma_mtx));
1205 
1206 	/*
1207 	 * there is only one intr for run_queue.
1208 	 * no one touch sc_run_queue.
1209 	 */
1210 	SIMPLEQ_FOREACH(mv_p, &sc->sc_run_queue, queue)
1211 		mvxpsec_dma_free(sc, &mv_p->dma_ring);
1212 }
1213 
1214 /*
1215  * Acknowledge to interrupt
1216  *
1217  * read cause bits, clear it, and return it.
1218  * NOTE: multiple cause bits may be returned at once.
1219  */
1220 STATIC uint32_t
1221 mvxpsec_intr_ack(struct mvxpsec_softc *sc)
1222 {
1223 	uint32_t reg;
1224 
1225 	reg  = MVXPSEC_READ(sc, MVXPSEC_INT_CAUSE);
1226 	reg &= MVXPSEC_DEFAULT_INT;
1227 	MVXPSEC_WRITE(sc, MVXPSEC_INT_CAUSE, ~reg);
1228 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "Int: %s\n", s_xpsecintr(reg));
1229 
1230 	return reg;
1231 }
1232 
1233 /*
1234  * Entry of TDMA error interrupt handler
1235  *
1236  * register this to kernel via marvell_intr_establish()
1237  */
1238 int
1239 mvxpsec_eintr(void *arg)
1240 {
1241 	struct mvxpsec_softc *sc = arg;
1242 	uint32_t err;
1243 
1244 	/* IPL_NET */
1245 again:
1246 	err = mvxpsec_eintr_ack(sc);
1247 	if (err == 0)
1248 		goto done;
1249 
1250 	log(LOG_ERR, "%s: DMA Error Interrupt: %s\n", __func__,
1251 	    s_errreg(err));
1252 #ifdef MVXPSEC_DEBUG
1253 	mvxpsec_dump_reg(sc);
1254 #endif
1255 
1256 	goto again;
1257 done:
1258 	return 0;
1259 }
1260 
1261 /*
1262  * Acknowledge to TDMA error interrupt
1263  *
1264  * read cause bits, clear it, and return it.
1265  * NOTE: multiple cause bits may be returned at once.
1266  */
1267 STATIC uint32_t
1268 mvxpsec_eintr_ack(struct mvxpsec_softc *sc)
1269 {
1270 	uint32_t reg;
1271 
1272 	reg  = MVXPSEC_READ(sc, MV_TDMA_ERR_CAUSE);
1273 	reg &= MVXPSEC_DEFAULT_ERR;
1274 	MVXPSEC_WRITE(sc, MV_TDMA_ERR_CAUSE, ~reg);
1275 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "Int: %s\n", s_xpsecintr(reg));
1276 
1277 	return reg;
1278 }
1279 
1280 /*
1281  * Interrupt statistics
1282  *
1283  * this is NOT a statistics of how may times the events 'occured'.
1284  * this ONLY means how many times the events 'handled'.
1285  */
1286 INLINE void
1287 mvxpsec_intr_cnt(struct mvxpsec_softc *sc, int cause)
1288 {
1289 	MVXPSEC_EVCNT_INCR(sc, intr_all);
1290 	if (cause & MVXPSEC_INT_AUTH)
1291 		MVXPSEC_EVCNT_INCR(sc, intr_auth);
1292 	if (cause & MVXPSEC_INT_DES)
1293 		MVXPSEC_EVCNT_INCR(sc, intr_des);
1294 	if (cause & MVXPSEC_INT_AES_ENC)
1295 		MVXPSEC_EVCNT_INCR(sc, intr_aes_enc);
1296 	if (cause & MVXPSEC_INT_AES_DEC)
1297 		MVXPSEC_EVCNT_INCR(sc, intr_aes_dec);
1298 	if (cause & MVXPSEC_INT_ENC)
1299 		MVXPSEC_EVCNT_INCR(sc, intr_enc);
1300 	if (cause & MVXPSEC_INT_SA)
1301 		MVXPSEC_EVCNT_INCR(sc, intr_sa);
1302 	if (cause & MVXPSEC_INT_ACCTDMA)
1303 		MVXPSEC_EVCNT_INCR(sc, intr_acctdma);
1304 	if (cause & MVXPSEC_INT_TDMA_COMP)
1305 		MVXPSEC_EVCNT_INCR(sc, intr_comp);
1306 	if (cause & MVXPSEC_INT_TDMA_OWN)
1307 		MVXPSEC_EVCNT_INCR(sc, intr_own);
1308 	if (cause & MVXPSEC_INT_ACCTDMA_CONT)
1309 		MVXPSEC_EVCNT_INCR(sc, intr_acctdma_cont);
1310 }
1311 
1312 /*
1313  * Setup MVXPSEC header structure.
1314  *
1315  * the header contains descriptor of security accelerator,
1316  * key material of chiphers, iv of ciphers and macs, ...
1317  *
1318  * the header is transferred to MVXPSEC Internal SRAM by TDMA,
1319  * and parsed by MVXPSEC H/W.
1320  */
1321 STATIC int
1322 mvxpsec_header_finalize(struct mvxpsec_packet *mv_p)
1323 {
1324 	struct mvxpsec_acc_descriptor *desc = &mv_p->pkt_header.desc;
1325 	int enc_start, enc_len, iv_offset;
1326 	int mac_start, mac_len, mac_offset;
1327 
1328 	/* offset -> device address */
1329 	enc_start = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->enc_off);
1330 	enc_len = mv_p->enc_len;
1331 	if (mv_p->flags & CRP_EXT_IV)
1332 		iv_offset = mv_p->enc_ivoff;
1333 	else
1334 		iv_offset = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->enc_ivoff);
1335 	mac_start = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->mac_off);
1336 	mac_len = mv_p->mac_len;
1337 	mac_offset = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->mac_dst);
1338 
1339 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1340 	    "PAYLOAD at 0x%08x\n", (int)MVXPSEC_SRAM_PAYLOAD_OFF);
1341 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1342 	    "ENC from 0x%08x\n", enc_start);
1343 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1344 	    "MAC from 0x%08x\n", mac_start);
1345 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1346 	    "MAC to 0x%08x\n", mac_offset);
1347 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1348 	    "ENC IV at 0x%08x\n", iv_offset);
1349 
1350 	/* setup device addresses in Security Accelerator Descriptors */
1351 	desc->acc_encdata = MV_ACC_DESC_ENC_DATA(enc_start, enc_start);
1352 	desc->acc_enclen = MV_ACC_DESC_ENC_LEN(enc_len);
1353 	if (desc->acc_config & MV_ACC_CRYPTO_DECRYPT)
1354 		desc->acc_enckey =
1355 		    MV_ACC_DESC_ENC_KEY(MVXPSEC_SRAM_KEY_D_DA);
1356 	else
1357 		desc->acc_enckey =
1358 		    MV_ACC_DESC_ENC_KEY(MVXPSEC_SRAM_KEY_DA);
1359 	desc->acc_enciv =
1360 	    MV_ACC_DESC_ENC_IV(MVXPSEC_SRAM_IV_WORK_DA, iv_offset);
1361 
1362 	desc->acc_macsrc = MV_ACC_DESC_MAC_SRC(mac_start, mac_len);
1363 	desc->acc_macdst = MV_ACC_DESC_MAC_DST(mac_offset, mac_len);
1364 	desc->acc_maciv =
1365 	    MV_ACC_DESC_MAC_IV(MVXPSEC_SRAM_MIV_IN_DA,
1366 	        MVXPSEC_SRAM_MIV_OUT_DA);
1367 
1368 	return 0;
1369 }
1370 
1371 /*
1372  * constractor of session structure.
1373  *
1374  * this constrator will be called by pool_cache framework.
1375  */
1376 STATIC int
1377 mvxpsec_session_ctor(void *arg, void *obj, int flags)
1378 {
1379 	struct mvxpsec_softc *sc = arg;
1380 	struct mvxpsec_session *mv_s = obj;
1381 
1382 	/* pool is owned by softc */
1383 	mv_s->sc = sc;
1384 
1385 	/* Create and load DMA map for session header */
1386 	mv_s->session_header_map = 0;
1387 	if (bus_dmamap_create(sc->sc_dmat,
1388 	    sizeof(mv_s->session_header), 1,
1389 	    sizeof(mv_s->session_header), 0,
1390 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1391 	    &mv_s->session_header_map)) {
1392 		log(LOG_ERR, "%s: cannot create DMA map\n", __func__);
1393 		goto fail;
1394 	}
1395 	if (bus_dmamap_load(sc->sc_dmat, mv_s->session_header_map,
1396 	    &mv_s->session_header, sizeof(mv_s->session_header),
1397 	    NULL, BUS_DMA_NOWAIT)) {
1398 		log(LOG_ERR, "%s: cannot load header\n", __func__);
1399 		goto fail;
1400 	}
1401 
1402 	return 0;
1403 fail:
1404 	if (mv_s->session_header_map)
1405 		bus_dmamap_destroy(sc->sc_dmat, mv_s->session_header_map);
1406 	return ENOMEM;
1407 }
1408 
1409 /*
1410  * destractor of session structure.
1411  *
1412  * this destrator will be called by pool_cache framework.
1413  */
1414 STATIC void
1415 mvxpsec_session_dtor(void *arg, void *obj)
1416 {
1417 	struct mvxpsec_softc *sc = arg;
1418 	struct mvxpsec_session *mv_s = obj;
1419 
1420 	if (mv_s->sc != sc)
1421 		panic("inconsitent context\n");
1422 
1423 	bus_dmamap_destroy(sc->sc_dmat, mv_s->session_header_map);
1424 }
1425 
1426 /*
1427  * constructor of packet structure.
1428  */
1429 STATIC int
1430 mvxpsec_packet_ctor(void *arg, void *obj, int flags)
1431 {
1432 	struct mvxpsec_softc *sc = arg;
1433 	struct mvxpsec_packet *mv_p = obj;
1434 
1435 	mv_p->dma_ring.dma_head = NULL;
1436 	mv_p->dma_ring.dma_last = NULL;
1437 	mv_p->dma_ring.dma_size = 0;
1438 
1439 	/* Create and load DMA map for packet header */
1440 	mv_p->pkt_header_map = 0;
1441 	if (bus_dmamap_create(sc->sc_dmat,
1442 	    sizeof(mv_p->pkt_header), 1, sizeof(mv_p->pkt_header), 0,
1443 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1444 	    &mv_p->pkt_header_map)) {
1445 		log(LOG_ERR, "%s: cannot create DMA map\n", __func__);
1446 		goto fail;
1447 	}
1448 	if (bus_dmamap_load(sc->sc_dmat, mv_p->pkt_header_map,
1449 	    &mv_p->pkt_header, sizeof(mv_p->pkt_header),
1450 	    NULL, BUS_DMA_NOWAIT)) {
1451 		log(LOG_ERR, "%s: cannot load header\n", __func__);
1452 		goto fail;
1453 	}
1454 
1455 	/* Create DMA map for session data. */
1456 	mv_p->data_map = 0;
1457 	if (bus_dmamap_create(sc->sc_dmat,
1458 	    MVXPSEC_DMA_MAX_SIZE, MVXPSEC_DMA_MAX_SEGS, MVXPSEC_DMA_MAX_SIZE,
1459 	    0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mv_p->data_map)) {
1460 		log(LOG_ERR, "%s: cannot create DMA map\n", __func__);
1461 		goto fail;
1462 	}
1463 
1464 	return 0;
1465 fail:
1466 	if (mv_p->pkt_header_map)
1467 		bus_dmamap_destroy(sc->sc_dmat, mv_p->pkt_header_map);
1468 	if (mv_p->data_map)
1469 		bus_dmamap_destroy(sc->sc_dmat, mv_p->data_map);
1470 	return ENOMEM;
1471 }
1472 
1473 /*
1474  * destractor of packet structure.
1475  */
1476 STATIC void
1477 mvxpsec_packet_dtor(void *arg, void *obj)
1478 {
1479 	struct mvxpsec_softc *sc = arg;
1480 	struct mvxpsec_packet *mv_p = obj;
1481 
1482 	mutex_enter(&sc->sc_dma_mtx);
1483 	mvxpsec_dma_free(sc, &mv_p->dma_ring);
1484 	mutex_exit(&sc->sc_dma_mtx);
1485 	bus_dmamap_destroy(sc->sc_dmat, mv_p->pkt_header_map);
1486 	bus_dmamap_destroy(sc->sc_dmat, mv_p->data_map);
1487 }
1488 
1489 /*
1490  * allocate new session struture.
1491  */
1492 STATIC struct mvxpsec_session *
1493 mvxpsec_session_alloc(struct mvxpsec_softc *sc)
1494 {
1495 	struct mvxpsec_session *mv_s;
1496 
1497 	mv_s = pool_cache_get(sc->sc_session_pool, PR_NOWAIT);
1498 	if (mv_s == NULL) {
1499 		log(LOG_ERR, "%s: cannot allocate memory\n", __func__);
1500 		return NULL;
1501 	}
1502 	mv_s->refs = 1; /* 0 means session is alredy invalid */
1503 	mv_s->sflags = 0;
1504 
1505 	return mv_s;
1506 }
1507 
1508 /*
1509  * deallocate session structure.
1510  */
1511 STATIC void
1512 mvxpsec_session_dealloc(struct mvxpsec_session *mv_s)
1513 {
1514 	struct mvxpsec_softc *sc = mv_s->sc;
1515 
1516 	mv_s->sflags |= DELETED;
1517 	mvxpsec_session_unref(mv_s);
1518 	crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
1519 
1520 	return;
1521 }
1522 
1523 STATIC int
1524 mvxpsec_session_ref(struct mvxpsec_session *mv_s)
1525 {
1526 	uint32_t refs;
1527 
1528 	if (mv_s->sflags & DELETED) {
1529 		log(LOG_ERR,
1530 		    "%s: session is already deleted.\n", __func__);
1531 		return -1;
1532 	}
1533 
1534 	refs = atomic_inc_32_nv(&mv_s->refs);
1535 	if (refs == 1) {
1536 		/*
1537 		 * a session with refs == 0 is
1538 		 * already invalidated. revert it.
1539 		 * XXX: use CAS ?
1540 		 */
1541 		atomic_dec_32(&mv_s->refs);
1542 		log(LOG_ERR,
1543 		    "%s: session is already invalidated.\n", __func__);
1544 		return -1;
1545 	}
1546 
1547 	return 0;
1548 }
1549 
1550 STATIC void
1551 mvxpsec_session_unref(struct mvxpsec_session *mv_s)
1552 {
1553 	uint32_t refs;
1554 
1555 	refs = atomic_dec_32_nv(&mv_s->refs);
1556 	if (refs == 0)
1557 		pool_cache_put(mv_s->sc->sc_session_pool, mv_s);
1558 }
1559 
1560 /*
1561  * look for session is exist or not
1562  */
1563 INLINE struct mvxpsec_session *
1564 mvxpsec_session_lookup(struct mvxpsec_softc *sc, int sid)
1565 {
1566 	struct mvxpsec_session *mv_s;
1567 	int session;
1568 
1569 	/* must called sc->sc_session_mtx held */
1570 	KASSERT(mutex_owned(&sc->sc_session_mtx));
1571 
1572 	session = MVXPSEC_SESSION(sid);
1573 	if (__predict_false(session > MVXPSEC_MAX_SESSIONS)) {
1574 		log(LOG_ERR, "%s: session number too large %d\n",
1575 		    __func__, session);
1576 		return NULL;
1577 	}
1578 	if (__predict_false( (mv_s = sc->sc_sessions[session]) == NULL)) {
1579 		log(LOG_ERR, "%s: invalid session %d\n",
1580 		    __func__, session);
1581 		return NULL;
1582 	}
1583 
1584 	KASSERT(mv_s->sid == session);
1585 
1586 	return mv_s;
1587 }
1588 
1589 /*
1590  * allocation new packet structure.
1591  */
1592 STATIC struct mvxpsec_packet *
1593 mvxpsec_packet_alloc(struct mvxpsec_session *mv_s)
1594 {
1595 	struct mvxpsec_softc *sc = mv_s->sc;
1596 	struct mvxpsec_packet *mv_p;
1597 
1598 	/* must be called mv_queue_mtx held. */
1599 	KASSERT(mutex_owned(&sc->sc_queue_mtx));
1600 	/* must be called mv_session_mtx held. */
1601 	KASSERT(mutex_owned(&sc->sc_session_mtx));
1602 
1603 	if (mvxpsec_session_ref(mv_s) < 0) {
1604 		log(LOG_ERR, "%s: invalid session.\n", __func__);
1605 		return NULL;
1606 	}
1607 
1608 	if ( (mv_p = SLIST_FIRST(&sc->sc_free_list)) != NULL) {
1609 		SLIST_REMOVE_HEAD(&sc->sc_free_list, free_list);
1610 		sc->sc_free_qlen--;
1611 	}
1612 	else {
1613 		mv_p = pool_cache_get(sc->sc_packet_pool, PR_NOWAIT);
1614 		if (mv_p == NULL) {
1615 			log(LOG_ERR, "%s: cannot allocate memory\n",
1616 			    __func__);
1617 			mvxpsec_session_unref(mv_s);
1618 			return NULL;
1619 		}
1620 	}
1621 	mv_p->mv_s = mv_s;
1622 	mv_p->flags = 0;
1623 	mv_p->data_ptr = NULL;
1624 
1625 	return mv_p;
1626 }
1627 
1628 /*
1629  * free packet structure.
1630  */
1631 STATIC void
1632 mvxpsec_packet_dealloc(struct mvxpsec_packet *mv_p)
1633 {
1634 	struct mvxpsec_session *mv_s = mv_p->mv_s;
1635 	struct mvxpsec_softc *sc = mv_s->sc;
1636 
1637 	/* must called with sc->sc_queue_mtx held */
1638 	KASSERT(mutex_owned(&sc->sc_queue_mtx));
1639 
1640 	if (mv_p->dma_ring.dma_size != 0) {
1641 		sc->sc_desc_ring_cons += mv_p->dma_ring.dma_size;
1642 	}
1643 	mv_p->dma_ring.dma_head = NULL;
1644 	mv_p->dma_ring.dma_last = NULL;
1645 	mv_p->dma_ring.dma_size = 0;
1646 
1647 	if (mv_p->data_map) {
1648 		if (mv_p->flags & RDY_DATA) {
1649 			bus_dmamap_unload(sc->sc_dmat, mv_p->data_map);
1650 			mv_p->flags &= ~RDY_DATA;
1651 		}
1652 	}
1653 
1654 	if (sc->sc_free_qlen > sc->sc_wait_qlimit)
1655 		pool_cache_put(sc->sc_packet_pool, mv_p);
1656 	else {
1657 		SLIST_INSERT_HEAD(&sc->sc_free_list, mv_p, free_list);
1658 		sc->sc_free_qlen++;
1659 	}
1660 	mvxpsec_session_unref(mv_s);
1661 }
1662 
1663 INLINE void
1664 mvxpsec_packet_enqueue(struct mvxpsec_packet *mv_p)
1665 {
1666 	struct mvxpsec_softc *sc = mv_p->mv_s->sc;
1667 	struct mvxpsec_packet *last_packet;
1668 	struct mvxpsec_descriptor_handle *cur_dma, *prev_dma;
1669 
1670 	/* must called with sc->sc_queue_mtx held */
1671 	KASSERT(mutex_owned(&sc->sc_queue_mtx));
1672 
1673 	if (sc->sc_wait_qlen == 0) {
1674 		SIMPLEQ_INSERT_TAIL(&sc->sc_wait_queue, mv_p, queue);
1675 		sc->sc_wait_qlen++;
1676 		mv_p->flags |= SETUP_DONE;
1677 		return;
1678 	}
1679 
1680 	last_packet = SIMPLEQ_LAST(&sc->sc_wait_queue, mvxpsec_packet, queue);
1681 	SIMPLEQ_INSERT_TAIL(&sc->sc_wait_queue, mv_p, queue);
1682 	sc->sc_wait_qlen++;
1683 
1684 	/* chain the DMA */
1685 	cur_dma = mv_p->dma_ring.dma_head;
1686 	prev_dma = last_packet->dma_ring.dma_last;
1687 	mvxpsec_dma_cat(sc, prev_dma, cur_dma);
1688 	mv_p->flags |= SETUP_DONE;
1689 }
1690 
1691 /*
1692  * called by interrupt handler
1693  */
1694 STATIC int
1695 mvxpsec_done_packet(struct mvxpsec_packet *mv_p)
1696 {
1697 	struct mvxpsec_session *mv_s = mv_p->mv_s;
1698 	struct mvxpsec_softc *sc = mv_s->sc;
1699 
1700 	KASSERT((mv_p->flags & RDY_DATA));
1701 	KASSERT((mv_p->flags & SETUP_DONE));
1702 
1703 	/* unload data */
1704 	bus_dmamap_sync(sc->sc_dmat, mv_p->data_map,
1705 	    0, mv_p->data_len,
1706 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1707 	bus_dmamap_unload(sc->sc_dmat, mv_p->data_map);
1708 	mv_p->flags &= ~RDY_DATA;
1709 
1710 #ifdef MVXPSEC_DEBUG
1711 	if (mvxpsec_debug != 0) {
1712 		int s;
1713 
1714 		bus_dmamap_sync(sc->sc_dmat, mv_p->pkt_header_map,
1715 		    0, sizeof(mv_p->pkt_header),
1716 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1717 		bus_dmamap_sync(sc->sc_dmat, mv_s->session_header_map,
1718 		    0, sizeof(mv_s->session_header),
1719 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1720 
1721 		if (mvxpsec_debug & MVXPSEC_DEBUG_OPENCRYPTO) {
1722 			char buf[1500];
1723 			struct mbuf *m;
1724 			struct uio *uio;
1725 			size_t len;
1726 
1727 			switch (mv_p->data_type) {
1728 			case MVXPSEC_DATA_MBUF:
1729 				m = mv_p->data_mbuf;
1730 				len = m->m_pkthdr.len;
1731 				if (len > sizeof(buf))
1732 					len = sizeof(buf);
1733 				m_copydata(m, 0, len, buf);
1734 				break;
1735 			case MVXPSEC_DATA_UIO:
1736 				uio = mv_p->data_uio;
1737 				len = uio->uio_resid;
1738 				if (len > sizeof(buf))
1739 					len = sizeof(buf);
1740 				cuio_copydata(uio, 0, len, buf);
1741 				break;
1742 			default:
1743 				len = 0;
1744 			}
1745 			if (len > 0)
1746 				mvxpsec_dump_data(__func__, buf, len);
1747 		}
1748 
1749 		if (mvxpsec_debug & MVXPSEC_DEBUG_PAYLOAD) {
1750 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_PAYLOAD,
1751 			    "%s: session_descriptor:\n", __func__);
1752 			mvxpsec_dump_packet_desc(__func__, mv_p);
1753 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_PAYLOAD,
1754 			    "%s: session_data:\n", __func__);
1755 			mvxpsec_dump_packet_data(__func__, mv_p);
1756 		}
1757 
1758 		if (mvxpsec_debug & MVXPSEC_DEBUG_SRAM) {
1759 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_SRAM,
1760 			    "%s: SRAM\n", __func__);
1761 			mvxpsec_dump_sram(__func__, sc, 2000);
1762 		}
1763 
1764 		s = MVXPSEC_READ(sc, MV_ACC_STATUS);
1765 		if (s & MV_ACC_STATUS_MAC_ERR) {
1766 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR,
1767 			    "%s: Message Authentication Failed.\n", __func__);
1768 		}
1769 	}
1770 #endif
1771 
1772 	/* copy back IV */
1773 	if (mv_p->flags & CRP_EXT_IV) {
1774 		memcpy(mv_p->ext_iv,
1775 		    &mv_p->pkt_header.crp_iv_ext, mv_p->ext_ivlen);
1776 		mv_p->ext_iv = NULL;
1777 		mv_p->ext_ivlen = 0;
1778 	}
1779 
1780 	/* notify opencrypto */
1781 	mv_p->crp->crp_etype = 0;
1782 	crypto_done(mv_p->crp);
1783 	mv_p->crp = NULL;
1784 
1785 	/* unblock driver */
1786 	mvxpsec_packet_dealloc(mv_p);
1787 	crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
1788 
1789 	MVXPSEC_EVCNT_INCR(sc, packet_ok);
1790 
1791 	return 0;
1792 }
1793 
1794 
1795 /*
1796  * Opencrypto API registration
1797  */
1798 int
1799 mvxpsec_register(struct mvxpsec_softc *sc)
1800 {
1801 	int oplen = SRAM_PAYLOAD_SIZE;
1802 	int flags = 0;
1803 	int err;
1804 
1805 	sc->sc_nsessions = 0;
1806 	sc->sc_cid = crypto_get_driverid(0);
1807 	if (sc->sc_cid < 0) {
1808 		log(LOG_ERR,
1809 		    "%s: crypto_get_driverid() failed.\n", __func__);
1810 		err = EINVAL;
1811 		goto done;
1812 	}
1813 
1814 	/* Ciphers */
1815 	err = crypto_register(sc->sc_cid, CRYPTO_DES_CBC, oplen, flags,
1816 	    mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1817 	if (err)
1818 		goto done;
1819 
1820 	err = crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, oplen, flags,
1821 	    mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1822 	if (err)
1823 		goto done;
1824 
1825 	err = crypto_register(sc->sc_cid, CRYPTO_AES_CBC, oplen, flags,
1826 	    mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1827 	if (err)
1828 		goto done;
1829 
1830 	/* MACs */
1831 	err = crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96,
1832 	    oplen, flags,
1833 	    mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1834 	if (err)
1835 		goto done;
1836 
1837 	err = crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96,
1838 	    oplen, flags,
1839 	    mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1840 	if (err)
1841 		goto done;
1842 
1843 #ifdef DEBUG
1844 	log(LOG_DEBUG,
1845 	    "%s: registered to opencrypto(max data = %d bytes)\n",
1846 	    device_xname(sc->sc_dev), oplen);
1847 #endif
1848 
1849 	err = 0;
1850 done:
1851 	return err;
1852 }
1853 
1854 /*
1855  * Create new opencrypto session
1856  *
1857  *   - register cipher key, mac key.
1858  *   - initialize mac internal state.
1859  */
1860 int
1861 mvxpsec_newsession(void *arg, uint32_t *sidp, struct cryptoini *cri)
1862 {
1863 	struct mvxpsec_softc *sc = arg;
1864 	struct mvxpsec_session *mv_s = NULL;
1865 	struct cryptoini *c;
1866 	static int hint = 0;
1867 	int session = -1;
1868 	int sid;
1869 	int err;
1870 	int i;
1871 
1872 	/* allocate driver session context */
1873 	mv_s = mvxpsec_session_alloc(sc);
1874 	if (mv_s == NULL)
1875 		return ENOMEM;
1876 
1877 	/*
1878 	 * lookup opencrypto session table
1879 	 *
1880 	 * we have sc_session_mtx after here.
1881 	 */
1882 	mutex_enter(&sc->sc_session_mtx);
1883 	if (sc->sc_nsessions >= MVXPSEC_MAX_SESSIONS) {
1884 		mutex_exit(&sc->sc_session_mtx);
1885 		log(LOG_ERR, "%s: too many IPsec SA(max %d)\n",
1886 				__func__, MVXPSEC_MAX_SESSIONS);
1887 		mvxpsec_session_dealloc(mv_s);
1888 		return ENOMEM;
1889 	}
1890 	for (i = hint; i < MVXPSEC_MAX_SESSIONS; i++) {
1891 		if (sc->sc_sessions[i])
1892 			continue;
1893 		session = i;
1894 		hint = session + 1;
1895 	       	break;
1896 	}
1897 	if (session < 0) {
1898 		for (i = 0; i < hint; i++) {
1899 			if (sc->sc_sessions[i])
1900 				continue;
1901 			session = i;
1902 			hint = session + 1;
1903 			break;
1904 		}
1905 		if (session < 0) {
1906 			mutex_exit(&sc->sc_session_mtx);
1907 			/* session full */
1908 			log(LOG_ERR, "%s: too many IPsec SA(max %d)\n",
1909 				__func__, MVXPSEC_MAX_SESSIONS);
1910 			mvxpsec_session_dealloc(mv_s);
1911 			hint = 0;
1912 			return ENOMEM;
1913 		}
1914 	}
1915 	if (hint >= MVXPSEC_MAX_SESSIONS)
1916 		hint = 0;
1917 	sc->sc_nsessions++;
1918 	sc->sc_sessions[session] = mv_s;
1919 #ifdef DEBUG
1920 	log(LOG_DEBUG, "%s: new session %d allocated\n", __func__, session);
1921 #endif
1922 
1923 	sid = MVXPSEC_SID(device_unit(sc->sc_dev), session);
1924 	mv_s->sid = sid;
1925 
1926 	/* setup the session key ... */
1927 	for (c = cri; c; c = c->cri_next) {
1928 		switch (c->cri_alg) {
1929 		case CRYPTO_DES_CBC:
1930 		case CRYPTO_3DES_CBC:
1931 		case CRYPTO_AES_CBC:
1932 			/* key */
1933 			if (mvxpsec_key_precomp(c->cri_alg,
1934 			    c->cri_key, c->cri_klen,
1935 			    &mv_s->session_header.crp_key,
1936 			    &mv_s->session_header.crp_key_d)) {
1937 				log(LOG_ERR,
1938 				    "%s: Invalid HMAC key for %s.\n",
1939 				    __func__, s_ctlalg(c->cri_alg));
1940 				err = EINVAL;
1941 				goto fail;
1942 			}
1943 			if (mv_s->sflags & RDY_CRP_KEY) {
1944 				log(LOG_WARNING,
1945 				    "%s: overwrite cipher: %s->%s.\n",
1946 				    __func__,
1947 				    s_ctlalg(mv_s->cipher_alg),
1948 				    s_ctlalg(c->cri_alg));
1949 			}
1950 			mv_s->sflags |= RDY_CRP_KEY;
1951 			mv_s->enc_klen = c->cri_klen;
1952 			mv_s->cipher_alg = c->cri_alg;
1953 			/* create per session IV (compatible with KAME IPsec) */
1954 			cprng_fast(&mv_s->session_iv, sizeof(mv_s->session_iv));
1955 			mv_s->sflags |= RDY_CRP_IV;
1956 			break;
1957 		case CRYPTO_SHA1_HMAC_96:
1958 		case CRYPTO_MD5_HMAC_96:
1959 			/* key */
1960 			if (mvxpsec_hmac_precomp(c->cri_alg,
1961 			    c->cri_key, c->cri_klen,
1962 			    (uint32_t *)&mv_s->session_header.miv_in,
1963 			    (uint32_t *)&mv_s->session_header.miv_out)) {
1964 				log(LOG_ERR,
1965 				    "%s: Invalid MAC key\n", __func__);
1966 				err = EINVAL;
1967 				goto fail;
1968 			}
1969 			if (mv_s->sflags & RDY_MAC_KEY ||
1970 			    mv_s->sflags & RDY_MAC_IV) {
1971 				log(LOG_ERR,
1972 				    "%s: overwrite HMAC: %s->%s.\n",
1973 				    __func__, s_ctlalg(mv_s->hmac_alg),
1974 				    s_ctlalg(c->cri_alg));
1975 			}
1976 			mv_s->sflags |= RDY_MAC_KEY;
1977 			mv_s->sflags |= RDY_MAC_IV;
1978 
1979 			mv_s->mac_klen = c->cri_klen;
1980 			mv_s->hmac_alg = c->cri_alg;
1981 			break;
1982 		default:
1983 			log(LOG_ERR, "%s: Unknown algorithm %d\n",
1984 			    __func__, c->cri_alg);
1985 			err = EINVAL;
1986 			goto fail;
1987 		}
1988 	}
1989 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1990 	    "H/W Crypto session (id:%u) added.\n", session);
1991 
1992 	*sidp = sid;
1993 	MVXPSEC_EVCNT_INCR(sc, session_new);
1994 	mutex_exit(&sc->sc_session_mtx);
1995 
1996 	/* sync session header(it's never touched after here) */
1997 	bus_dmamap_sync(sc->sc_dmat,
1998 	    mv_s->session_header_map,
1999 	    0, sizeof(mv_s->session_header),
2000 	    BUS_DMASYNC_PREWRITE);
2001 
2002 	return 0;
2003 
2004 fail:
2005 	sc->sc_nsessions--;
2006 	sc->sc_sessions[session] = NULL;
2007 	hint = session;
2008 	if (mv_s)
2009 		mvxpsec_session_dealloc(mv_s);
2010 	log(LOG_WARNING,
2011 	    "%s: Failed to add H/W crypto sessoin (id:%u): err=%d\n",
2012 	   __func__, session, err);
2013 
2014 	mutex_exit(&sc->sc_session_mtx);
2015 	return err;
2016 }
2017 
2018 /*
2019  * remove opencrypto session
2020  */
2021 int
2022 mvxpsec_freesession(void *arg, uint64_t tid)
2023 {
2024 	struct mvxpsec_softc *sc = arg;
2025 	struct mvxpsec_session *mv_s;
2026 	int session;
2027 	uint32_t sid = ((uint32_t)tid) & 0xffffffff;
2028 
2029 	session = MVXPSEC_SESSION(sid);
2030 	if (session < 0 || session >= MVXPSEC_MAX_SESSIONS) {
2031 		log(LOG_ERR, "%s: invalid session (id:%u)\n",
2032 		    __func__, session);
2033 		return EINVAL;
2034 	}
2035 
2036 	mutex_enter(&sc->sc_session_mtx);
2037 	if ( (mv_s = sc->sc_sessions[session]) == NULL) {
2038 		mutex_exit(&sc->sc_session_mtx);
2039 #ifdef DEBUG
2040 		log(LOG_DEBUG, "%s: session %d already inactivated\n",
2041 		    __func__, session);
2042 #endif
2043 		return ENOENT;
2044 	}
2045 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2046 	    "%s: inactivate session %d\n", __func__, session);
2047 
2048 	/* inactivate mvxpsec session */
2049 	sc->sc_sessions[session] = NULL;
2050 	sc->sc_nsessions--;
2051 	sc->sc_last_session = NULL;
2052 	mutex_exit(&sc->sc_session_mtx);
2053 
2054 	KASSERT(sc->sc_nsessions >= 0);
2055 	KASSERT(mv_s->sid == sid);
2056 
2057 	mvxpsec_session_dealloc(mv_s);
2058 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2059 	    "H/W Crypto session (id: %d) deleted.\n", session);
2060 
2061 	/* force unblock opencrypto */
2062 	crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
2063 
2064 	MVXPSEC_EVCNT_INCR(sc, session_free);
2065 
2066 	return 0;
2067 }
2068 
2069 /*
2070  * process data with existing session
2071  */
2072 int
2073 mvxpsec_dispatch(void *arg, struct cryptop *crp, int hint)
2074 {
2075 	struct mvxpsec_softc *sc = arg;
2076 	struct mvxpsec_session *mv_s;
2077 	struct mvxpsec_packet *mv_p;
2078 	int q_full;
2079 	int running;
2080 	int err;
2081 
2082 	mutex_enter(&sc->sc_queue_mtx);
2083 
2084 	/*
2085 	 * lookup session
2086 	 */
2087 	mutex_enter(&sc->sc_session_mtx);
2088 	mv_s = mvxpsec_session_lookup(sc, crp->crp_sid);
2089 	if (__predict_false(mv_s == NULL)) {
2090 		err = EINVAL;
2091 		mv_p = NULL;
2092 		mutex_exit(&sc->sc_session_mtx);
2093 		goto fail;
2094 	}
2095 	mv_p = mvxpsec_packet_alloc(mv_s);
2096 	if (__predict_false(mv_p == NULL)) {
2097 		mutex_exit(&sc->sc_session_mtx);
2098 		mutex_exit(&sc->sc_queue_mtx);
2099 		return ERESTART; /* => queued in opencrypto layer */
2100 	}
2101 	mutex_exit(&sc->sc_session_mtx);
2102 
2103 	/*
2104 	 * check queue status
2105 	 */
2106 #ifdef MVXPSEC_MULTI_PACKET
2107 	q_full = (sc->sc_wait_qlen >= sc->sc_wait_qlimit) ? 1 : 0;
2108 #else
2109 	q_full = (sc->sc_wait_qlen != 0) ? 1 : 0;
2110 #endif
2111 	running = (sc->sc_flags & HW_RUNNING) ?  1: 0;
2112 	if (q_full) {
2113 		/* input queue is full. */
2114 		if (!running && sc->sc_wait_qlen > 0)
2115 			mvxpsec_dispatch_queue(sc);
2116 		MVXPSEC_EVCNT_INCR(sc, queue_full);
2117 		mvxpsec_packet_dealloc(mv_p);
2118 		mutex_exit(&sc->sc_queue_mtx);
2119 		return ERESTART; /* => queued in opencrypto layer */
2120 	}
2121 
2122 	/*
2123 	 * Load and setup packet data
2124 	 */
2125 	err = mvxpsec_packet_setcrp(mv_p, crp);
2126 	if (__predict_false(err))
2127 		goto fail;
2128 
2129 	/*
2130 	 * Setup DMA descriptor chains
2131 	 */
2132 	mutex_enter(&sc->sc_dma_mtx);
2133 	err = mvxpsec_dma_copy_packet(sc, mv_p);
2134 	mutex_exit(&sc->sc_dma_mtx);
2135 	if (__predict_false(err))
2136 		goto fail;
2137 
2138 #ifdef MVXPSEC_DEBUG
2139 	mvxpsec_dump_packet(__func__, mv_p);
2140 #endif
2141 
2142 	/*
2143 	 * Sync/inval the data cache
2144 	 */
2145 	err = mvxpsec_dma_sync_packet(sc, mv_p);
2146 	if (__predict_false(err))
2147 		goto fail;
2148 
2149 	/*
2150 	 * Enqueue the packet
2151 	 */
2152 	MVXPSEC_EVCNT_INCR(sc, dispatch_packets);
2153 #ifdef MVXPSEC_MULTI_PACKET
2154 	mvxpsec_packet_enqueue(mv_p);
2155 	if (!running)
2156 		mvxpsec_dispatch_queue(sc);
2157 #else
2158 	SIMPLEQ_INSERT_TAIL(&sc->sc_wait_queue, mv_p, queue);
2159 	sc->sc_wait_qlen++;
2160 	mv_p->flags |= SETUP_DONE;
2161 	if (!running)
2162 		mvxpsec_dispatch_queue(sc);
2163 #endif
2164 	mutex_exit(&sc->sc_queue_mtx);
2165 	return 0;
2166 
2167 fail:
2168 	/* Drop the incoming packet */
2169 	mvxpsec_drop(sc, crp, mv_p, err);
2170 	mutex_exit(&sc->sc_queue_mtx);
2171 	return 0;
2172 }
2173 
2174 /*
2175  * back the packet to the IP stack
2176  */
2177 void
2178 mvxpsec_done(void *arg)
2179 {
2180 	struct mvxpsec_softc *sc = arg;
2181 	struct mvxpsec_packet *mv_p;
2182 	mvxpsec_queue_t ret_queue;
2183 	int ndone;
2184 
2185 	mutex_enter(&sc->sc_queue_mtx);
2186 
2187 	/* stop wdog timer */
2188 	callout_stop(&sc->sc_timeout);
2189 
2190 	/* refill MVXPSEC */
2191 	ret_queue = sc->sc_run_queue;
2192 	SIMPLEQ_INIT(&sc->sc_run_queue);
2193 	sc->sc_flags &= ~HW_RUNNING;
2194 	if (sc->sc_wait_qlen > 0)
2195 		mvxpsec_dispatch_queue(sc);
2196 
2197 	ndone = 0;
2198 	while ( (mv_p = SIMPLEQ_FIRST(&ret_queue)) != NULL) {
2199 		SIMPLEQ_REMOVE_HEAD(&ret_queue, queue);
2200 		mvxpsec_dma_free(sc, &mv_p->dma_ring);
2201 		mvxpsec_done_packet(mv_p);
2202 		ndone++;
2203 	}
2204 	MVXPSEC_EVCNT_MAX(sc, max_done, ndone);
2205 
2206 	mutex_exit(&sc->sc_queue_mtx);
2207 }
2208 
2209 /*
2210  * drop the packet
2211  */
2212 INLINE void
2213 mvxpsec_drop(struct mvxpsec_softc *sc, struct cryptop *crp,
2214     struct mvxpsec_packet *mv_p, int err)
2215 {
2216 	/* must called with sc->sc_queue_mtx held */
2217 	KASSERT(mutex_owned(&sc->sc_queue_mtx));
2218 
2219 	if (mv_p)
2220 		mvxpsec_packet_dealloc(mv_p);
2221 	if (err < 0)
2222 		err = EINVAL;
2223 	crp->crp_etype = err;
2224 	crypto_done(crp);
2225 	MVXPSEC_EVCNT_INCR(sc, packet_err);
2226 
2227 	/* dispatch other packets in queue */
2228 	if (sc->sc_wait_qlen > 0 &&
2229 	    !(sc->sc_flags & HW_RUNNING))
2230 		mvxpsec_dispatch_queue(sc);
2231 
2232 	/* unblock driver for dropped packet */
2233 	crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
2234 }
2235 
2236 /* move wait queue entry to run queue */
2237 STATIC int
2238 mvxpsec_dispatch_queue(struct mvxpsec_softc *sc)
2239 {
2240 	struct mvxpsec_packet *mv_p;
2241 	paddr_t head;
2242 	int ndispatch = 0;
2243 
2244 	/* must called with sc->sc_queue_mtx held */
2245 	KASSERT(mutex_owned(&sc->sc_queue_mtx));
2246 
2247 	/* check there is any task */
2248 	if (__predict_false(sc->sc_flags & HW_RUNNING)) {
2249 		log(LOG_WARNING,
2250 		    "%s: another packet already exist.\n", __func__);
2251 		return 0;
2252 	}
2253 	if (__predict_false(SIMPLEQ_EMPTY(&sc->sc_wait_queue))) {
2254 		log(LOG_WARNING,
2255 		    "%s: no waiting packet yet(qlen=%d).\n",
2256 		    __func__, sc->sc_wait_qlen);
2257 		return 0;
2258 	}
2259 
2260 	/* move queue */
2261 	sc->sc_run_queue = sc->sc_wait_queue;
2262 	sc->sc_flags |= HW_RUNNING; /* dropped by intr or timeout */
2263 	SIMPLEQ_INIT(&sc->sc_wait_queue);
2264 	ndispatch = sc->sc_wait_qlen;
2265 	sc->sc_wait_qlen = 0;
2266 
2267 	/* get 1st DMA descriptor */
2268 	mv_p = SIMPLEQ_FIRST(&sc->sc_run_queue);
2269 	head = mv_p->dma_ring.dma_head->phys_addr;
2270 
2271 	/* terminate last DMA descriptor */
2272 	mv_p = SIMPLEQ_LAST(&sc->sc_run_queue, mvxpsec_packet, queue);
2273 	mvxpsec_dma_finalize(sc, &mv_p->dma_ring);
2274 
2275 	/* configure TDMA */
2276 	if (mvxpsec_dma_wait(sc) < 0) {
2277 		log(LOG_ERR, "%s: DMA DEVICE not responding", __func__);
2278 		callout_schedule(&sc->sc_timeout, hz);
2279 		return 0;
2280 	}
2281 	MVXPSEC_WRITE(sc, MV_TDMA_NXT, head);
2282 
2283 	/* trigger ACC */
2284 	if (mvxpsec_acc_wait(sc) < 0) {
2285 		log(LOG_ERR, "%s: MVXPSEC not responding", __func__);
2286 		callout_schedule(&sc->sc_timeout, hz);
2287 		return 0;
2288 	}
2289 	MVXPSEC_WRITE(sc, MV_ACC_COMMAND, MV_ACC_COMMAND_ACT);
2290 
2291 	MVXPSEC_EVCNT_MAX(sc, max_dispatch, ndispatch);
2292 	MVXPSEC_EVCNT_INCR(sc, dispatch_queue);
2293 	callout_schedule(&sc->sc_timeout, hz);
2294 	return 0;
2295 }
2296 
2297 /*
2298  * process opencrypto operations(cryptop) for packets.
2299  */
2300 INLINE int
2301 mvxpsec_parse_crd(struct mvxpsec_packet *mv_p, struct cryptodesc *crd)
2302 {
2303 	int ivlen;
2304 
2305 	KASSERT(mv_p->flags & RDY_DATA);
2306 
2307 	/* MAC & Ciphers: set data location and operation */
2308 	switch (crd->crd_alg) {
2309 	case CRYPTO_SHA1_HMAC_96:
2310 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_MAC_96;
2311 		/* fall through */
2312 	case CRYPTO_SHA1_HMAC:
2313 		mv_p->mac_dst = crd->crd_inject;
2314 		mv_p->mac_off = crd->crd_skip;
2315 		mv_p->mac_len = crd->crd_len;
2316 		MV_ACC_CRYPTO_MAC_SET(mv_p->pkt_header.desc.acc_config,
2317 		    MV_ACC_CRYPTO_MAC_HMAC_SHA1);
2318 		mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_MAC);
2319 		/* No more setup for MAC */
2320 		return 0;
2321 	case CRYPTO_MD5_HMAC_96:
2322 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_MAC_96;
2323 		/* fall through */
2324 	case CRYPTO_MD5_HMAC:
2325 		mv_p->mac_dst = crd->crd_inject;
2326 		mv_p->mac_off = crd->crd_skip;
2327 		mv_p->mac_len = crd->crd_len;
2328 		MV_ACC_CRYPTO_MAC_SET(mv_p->pkt_header.desc.acc_config,
2329 		    MV_ACC_CRYPTO_MAC_HMAC_MD5);
2330 		mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_MAC);
2331 		/* No more setup for MAC */
2332 		return 0;
2333 	case CRYPTO_DES_CBC:
2334 		mv_p->enc_ivoff = crd->crd_inject;
2335 		mv_p->enc_off = crd->crd_skip;
2336 		mv_p->enc_len = crd->crd_len;
2337 		ivlen = 8;
2338 		MV_ACC_CRYPTO_ENC_SET(mv_p->pkt_header.desc.acc_config,
2339 		    MV_ACC_CRYPTO_ENC_DES);
2340 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_CBC;
2341 		mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_ENC);
2342 		break;
2343 	case CRYPTO_3DES_CBC:
2344 		mv_p->enc_ivoff = crd->crd_inject;
2345 		mv_p->enc_off = crd->crd_skip;
2346 		mv_p->enc_len = crd->crd_len;
2347 		ivlen = 8;
2348 		MV_ACC_CRYPTO_ENC_SET(mv_p->pkt_header.desc.acc_config,
2349 		    MV_ACC_CRYPTO_ENC_3DES);
2350 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_CBC;
2351 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_3DES_EDE;
2352 		mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_ENC);
2353 		break;
2354 	case CRYPTO_AES_CBC:
2355 		mv_p->enc_ivoff = crd->crd_inject;
2356 		mv_p->enc_off = crd->crd_skip;
2357 		mv_p->enc_len = crd->crd_len;
2358 		ivlen = 16;
2359 		MV_ACC_CRYPTO_ENC_SET(mv_p->pkt_header.desc.acc_config,
2360 		    MV_ACC_CRYPTO_ENC_AES);
2361 		MV_ACC_CRYPTO_AES_KLEN_SET(
2362 		    mv_p->pkt_header.desc.acc_config,
2363 		   mvxpsec_aesklen(mv_p->mv_s->enc_klen));
2364 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_CBC;
2365 		mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_ENC);
2366 		break;
2367 	default:
2368 		log(LOG_ERR, "%s: Unknown algorithm %d\n",
2369 		    __func__, crd->crd_alg);
2370 		return EINVAL;
2371 	}
2372 
2373 	/* Operations only for Cipher, not MAC */
2374 	if (crd->crd_flags & CRD_F_ENCRYPT) {
2375 		/* Ciphers: Originate IV for Encryption.*/
2376 		mv_p->pkt_header.desc.acc_config &= ~MV_ACC_CRYPTO_DECRYPT;
2377 		mv_p->flags |= DIR_ENCRYPT;
2378 
2379 		if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
2380 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV, "EXPLICIT IV\n");
2381 			mv_p->flags |= CRP_EXT_IV;
2382 			mvxpsec_packet_write_iv(mv_p, crd->crd_iv, ivlen);
2383 			mv_p->enc_ivoff = MVXPSEC_SRAM_IV_EXT_OFF;
2384 		}
2385 		else if (crd->crd_flags & CRD_F_IV_PRESENT) {
2386 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV, "IV is present\n");
2387 			mvxpsec_packet_copy_iv(mv_p, crd->crd_inject, ivlen);
2388 		}
2389 		else {
2390 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV, "Create New IV\n");
2391 			mvxpsec_packet_write_iv(mv_p, NULL, ivlen);
2392 		}
2393 	}
2394 	else {
2395 		/* Ciphers: IV is loadded from crd_inject when it's present */
2396 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_DECRYPT;
2397 		mv_p->flags |= DIR_DECRYPT;
2398 
2399 		if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
2400 #ifdef MVXPSEC_DEBUG
2401 			if (mvxpsec_debug & MVXPSEC_DEBUG_ENC_IV) {
2402 				MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV,
2403 				    "EXPLICIT IV(Decrypt)\n");
2404 				mvxpsec_dump_data(__func__, crd->crd_iv, ivlen);
2405 			}
2406 #endif
2407 			mv_p->flags |= CRP_EXT_IV;
2408 			mvxpsec_packet_write_iv(mv_p, crd->crd_iv, ivlen);
2409 			mv_p->enc_ivoff = MVXPSEC_SRAM_IV_EXT_OFF;
2410 		}
2411 	}
2412 
2413 	KASSERT(!((mv_p->flags & DIR_ENCRYPT) && (mv_p->flags & DIR_DECRYPT)));
2414 
2415 	return 0;
2416 }
2417 
2418 INLINE int
2419 mvxpsec_parse_crp(struct mvxpsec_packet *mv_p)
2420 {
2421 	struct cryptop *crp = mv_p->crp;
2422 	struct cryptodesc *crd;
2423 	int err;
2424 
2425 	KASSERT(crp);
2426 
2427 	mvxpsec_packet_reset_op(mv_p);
2428 
2429 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2430 		err = mvxpsec_parse_crd(mv_p, crd);
2431 		if (err)
2432 			return err;
2433 	}
2434 
2435 	return 0;
2436 }
2437 
2438 INLINE int
2439 mvxpsec_packet_setcrp(struct mvxpsec_packet *mv_p, struct cryptop *crp)
2440 {
2441 	int err = EINVAL;
2442 
2443 	/* regiseter crp to the MVXPSEC packet */
2444 	if (crp->crp_flags & CRYPTO_F_IMBUF) {
2445 		err = mvxpsec_packet_setmbuf(mv_p,
2446 		    (struct mbuf *)crp->crp_buf);
2447 		mv_p->crp = crp;
2448 	}
2449 	else if (crp->crp_flags & CRYPTO_F_IOV) {
2450 		err = mvxpsec_packet_setuio(mv_p,
2451 		    (struct uio *)crp->crp_buf);
2452 		mv_p->crp = crp;
2453 	}
2454 	else {
2455 		err = mvxpsec_packet_setdata(mv_p,
2456 		    (struct mbuf *)crp->crp_buf, crp->crp_ilen);
2457 		mv_p->crp = crp;
2458 	}
2459 	if (__predict_false(err))
2460 		return err;
2461 
2462 	/* parse crp and setup MVXPSEC registers/descriptors */
2463 	err = mvxpsec_parse_crp(mv_p);
2464 	if (__predict_false(err))
2465 		return err;
2466 
2467 	/* fixup data offset to fit MVXPSEC internal SRAM */
2468 	err = mvxpsec_header_finalize(mv_p);
2469 	if (__predict_false(err))
2470 		return err;
2471 
2472 	return 0;
2473 }
2474 
2475 /*
2476  * load data for encrypt/decrypt/authentication
2477  *
2478  * data is raw kernel memory area.
2479  */
2480 STATIC int
2481 mvxpsec_packet_setdata(struct mvxpsec_packet *mv_p,
2482     void *data, uint32_t data_len)
2483 {
2484 	struct mvxpsec_session *mv_s = mv_p->mv_s;
2485 	struct mvxpsec_softc *sc = mv_s->sc;
2486 
2487 	if (bus_dmamap_load(sc->sc_dmat, mv_p->data_map, data, data_len,
2488 	    NULL, BUS_DMA_NOWAIT)) {
2489 		log(LOG_ERR, "%s: cannot load data\n", __func__);
2490 		return -1;
2491 	}
2492 	mv_p->data_type = MVXPSEC_DATA_RAW;
2493 	mv_p->data_raw = data;
2494 	mv_p->data_len = data_len;
2495 	mv_p->flags |= RDY_DATA;
2496 
2497 	return 0;
2498 }
2499 
2500 /*
2501  * load data for encrypt/decrypt/authentication
2502  *
2503  * data is mbuf based network data.
2504  */
2505 STATIC int
2506 mvxpsec_packet_setmbuf(struct mvxpsec_packet *mv_p, struct mbuf *m)
2507 {
2508 	struct mvxpsec_session *mv_s = mv_p->mv_s;
2509 	struct mvxpsec_softc *sc = mv_s->sc;
2510 	size_t pktlen = 0;
2511 
2512 	if (__predict_true(m->m_flags & M_PKTHDR))
2513 		pktlen = m->m_pkthdr.len;
2514 	else {
2515 		struct mbuf *mp = m;
2516 
2517 		while (mp != NULL) {
2518 			pktlen += m->m_len;
2519 			mp = mp->m_next;
2520 		}
2521 	}
2522 	if (pktlen > SRAM_PAYLOAD_SIZE) {
2523 #if NIPSEC > 0
2524 		extern   percpu_t *espstat_percpu;
2525 	       	/* XXX:
2526 		 * layer violation. opencrypto knows our max packet size
2527 		 * from crypto_register(9) API.
2528 		 */
2529 
2530 		_NET_STATINC(espstat_percpu, ESP_STAT_TOOBIG);
2531 #endif
2532 		log(LOG_ERR,
2533 		    "%s: ESP Packet too large: %zu [oct.] > %zu [oct.]\n",
2534 		    device_xname(sc->sc_dev),
2535 		    (size_t)pktlen, SRAM_PAYLOAD_SIZE);
2536 		mv_p->data_type = MVXPSEC_DATA_NONE;
2537 		mv_p->data_mbuf = NULL;
2538 		return -1;
2539 	}
2540 
2541 	if (bus_dmamap_load_mbuf(sc->sc_dmat, mv_p->data_map, m,
2542 	    BUS_DMA_NOWAIT)) {
2543 		mv_p->data_type = MVXPSEC_DATA_NONE;
2544 		mv_p->data_mbuf = NULL;
2545 		log(LOG_ERR, "%s: cannot load mbuf\n", __func__);
2546 		return -1;
2547 	}
2548 
2549 	/* set payload buffer */
2550 	mv_p->data_type = MVXPSEC_DATA_MBUF;
2551 	mv_p->data_mbuf = m;
2552 	if (m->m_flags & M_PKTHDR) {
2553 		mv_p->data_len = m->m_pkthdr.len;
2554 	}
2555 	else {
2556 		mv_p->data_len = 0;
2557 		while (m) {
2558 			mv_p->data_len += m->m_len;
2559 			m = m->m_next;
2560 		}
2561 	}
2562 	mv_p->flags |= RDY_DATA;
2563 
2564 	return 0;
2565 }
2566 
2567 STATIC int
2568 mvxpsec_packet_setuio(struct mvxpsec_packet *mv_p, struct uio *uio)
2569 {
2570 	struct mvxpsec_session *mv_s = mv_p->mv_s;
2571 	struct mvxpsec_softc *sc = mv_s->sc;
2572 
2573 	if (uio->uio_resid > SRAM_PAYLOAD_SIZE) {
2574 #if NIPSEC > 0
2575 		extern   percpu_t *espstat_percpu;
2576 	       	/* XXX:
2577 		 * layer violation. opencrypto knows our max packet size
2578 		 * from crypto_register(9) API.
2579 		 */
2580 
2581 		_NET_STATINC(espstat_percpu, ESP_STAT_TOOBIG);
2582 #endif
2583 		log(LOG_ERR,
2584 		    "%s: uio request too large: %zu [oct.] > %zu [oct.]\n",
2585 		    device_xname(sc->sc_dev),
2586 		    uio->uio_resid, SRAM_PAYLOAD_SIZE);
2587 		mv_p->data_type = MVXPSEC_DATA_NONE;
2588 		mv_p->data_mbuf = NULL;
2589 		return -1;
2590 	}
2591 
2592 	if (bus_dmamap_load_uio(sc->sc_dmat, mv_p->data_map, uio,
2593 	    BUS_DMA_NOWAIT)) {
2594 		mv_p->data_type = MVXPSEC_DATA_NONE;
2595 		mv_p->data_mbuf = NULL;
2596 		log(LOG_ERR, "%s: cannot load uio buf\n", __func__);
2597 		return -1;
2598 	}
2599 
2600 	/* set payload buffer */
2601 	mv_p->data_type = MVXPSEC_DATA_UIO;
2602 	mv_p->data_uio = uio;
2603 	mv_p->data_len = uio->uio_resid;
2604 	mv_p->flags |= RDY_DATA;
2605 
2606 	return 0;
2607 }
2608 
2609 STATIC int
2610 mvxpsec_packet_rdata(struct mvxpsec_packet *mv_p,
2611     int off, int len, void *cp)
2612 {
2613 	uint8_t *p;
2614 
2615 	if (mv_p->data_type == MVXPSEC_DATA_RAW) {
2616 		p = (uint8_t *)mv_p->data_raw + off;
2617 		memcpy(cp, p, len);
2618 	}
2619 	else if (mv_p->data_type == MVXPSEC_DATA_MBUF) {
2620 		m_copydata(mv_p->data_mbuf, off, len, cp);
2621 	}
2622 	else if (mv_p->data_type == MVXPSEC_DATA_UIO) {
2623 		cuio_copydata(mv_p->data_uio, off, len, cp);
2624 	}
2625 	else
2626 		return -1;
2627 
2628 	return 0;
2629 }
2630 
2631 STATIC int
2632 mvxpsec_packet_wdata(struct mvxpsec_packet *mv_p,
2633     int off, int len, void *cp)
2634 {
2635 	uint8_t *p;
2636 
2637 	if (mv_p->data_type == MVXPSEC_DATA_RAW) {
2638 		p = (uint8_t *)mv_p->data_raw + off;
2639 		memcpy(p, cp, len);
2640 	}
2641 	else if (mv_p->data_type == MVXPSEC_DATA_MBUF) {
2642 		m_copyback(mv_p->data_mbuf, off, len, cp);
2643 	}
2644 	else if (mv_p->data_type == MVXPSEC_DATA_UIO) {
2645 		cuio_copyback(mv_p->data_uio, off, len, cp);
2646 	}
2647 	else
2648 		return -1;
2649 
2650 	return 0;
2651 }
2652 
2653 /*
2654  * Set initial vector of cipher to the session.
2655  */
2656 STATIC int
2657 mvxpsec_packet_write_iv(struct mvxpsec_packet *mv_p, void *iv, int ivlen)
2658 {
2659 	uint8_t ivbuf[16];
2660 
2661 	KASSERT(ivlen == 8 || ivlen == 16);
2662 
2663 	if (iv == NULL) {
2664 	       	if (mv_p->mv_s->sflags & RDY_CRP_IV) {
2665 			/* use per session IV (compatible with KAME IPsec) */
2666 			mv_p->pkt_header.crp_iv_work = mv_p->mv_s->session_iv;
2667 			mv_p->flags |= RDY_CRP_IV;
2668 			return 0;
2669 		}
2670 		cprng_fast(ivbuf, ivlen);
2671 		iv = ivbuf;
2672 	}
2673 	memcpy(&mv_p->pkt_header.crp_iv_work, iv, ivlen);
2674 	if (mv_p->flags & CRP_EXT_IV) {
2675 		memcpy(&mv_p->pkt_header.crp_iv_ext, iv, ivlen);
2676 		mv_p->ext_iv = iv;
2677 		mv_p->ext_ivlen = ivlen;
2678 	}
2679 	mv_p->flags |= RDY_CRP_IV;
2680 
2681 	return 0;
2682 }
2683 
2684 STATIC int
2685 mvxpsec_packet_copy_iv(struct mvxpsec_packet *mv_p, int off, int ivlen)
2686 {
2687 	mvxpsec_packet_rdata(mv_p, off, ivlen,
2688 	    &mv_p->pkt_header.crp_iv_work);
2689 	mv_p->flags |= RDY_CRP_IV;
2690 
2691 	return 0;
2692 }
2693 
2694 /*
2695  * set a encryption or decryption key to the session
2696  *
2697  * Input key material is big endian.
2698  */
2699 STATIC int
2700 mvxpsec_key_precomp(int alg, void *keymat, int kbitlen,
2701     void *key_encrypt, void *key_decrypt)
2702 {
2703 	uint32_t *kp = keymat;
2704 	uint32_t *ekp = key_encrypt;
2705 	uint32_t *dkp = key_decrypt;
2706 	int i;
2707 
2708 	switch (alg) {
2709 	case CRYPTO_DES_CBC:
2710 		if (kbitlen < 64 || (kbitlen % 8) != 0) {
2711 			log(LOG_WARNING,
2712 			    "mvxpsec: invalid DES keylen %d\n", kbitlen);
2713 			return EINVAL;
2714 		}
2715 		for (i = 0; i < 2; i++)
2716 			dkp[i] = ekp[i] = kp[i];
2717 		for (; i < 8; i++)
2718 			dkp[i] = ekp[i] = 0;
2719 		break;
2720 	case CRYPTO_3DES_CBC:
2721 		if (kbitlen < 192 || (kbitlen % 8) != 0) {
2722 			log(LOG_WARNING,
2723 			    "mvxpsec: invalid 3DES keylen %d\n", kbitlen);
2724 			return EINVAL;
2725 		}
2726 		for (i = 0; i < 8; i++)
2727 			dkp[i] = ekp[i] = kp[i];
2728 		break;
2729 	case CRYPTO_AES_CBC:
2730 		if (kbitlen < 128) {
2731 			log(LOG_WARNING,
2732 			    "mvxpsec: invalid AES keylen %d\n", kbitlen);
2733 			return EINVAL;
2734 		}
2735 		else if (kbitlen < 192) {
2736 			/* AES-128 */
2737 			for (i = 0; i < 4; i++)
2738 				ekp[i] = kp[i];
2739 			for (; i < 8; i++)
2740 				ekp[i] = 0;
2741 		}
2742 	       	else if (kbitlen < 256) {
2743 			/* AES-192 */
2744 			for (i = 0; i < 6; i++)
2745 				ekp[i] = kp[i];
2746 			for (; i < 8; i++)
2747 				ekp[i] = 0;
2748 		}
2749 		else  {
2750 			/* AES-256 */
2751 			for (i = 0; i < 8; i++)
2752 				ekp[i] = kp[i];
2753 		}
2754 		/* make decryption key */
2755 		mv_aes_deckey((uint8_t *)dkp, (uint8_t *)ekp, kbitlen);
2756 		break;
2757 	default:
2758 		for (i = 0; i < 8; i++)
2759 			ekp[0] = dkp[0] = 0;
2760 		break;
2761 	}
2762 
2763 #ifdef MVXPSEC_DEBUG
2764 	if (mvxpsec_debug & MVXPSEC_DEBUG_OPENCRYPTO) {
2765 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2766 		    "%s: keyregistered\n", __func__);
2767 		mvxpsec_dump_data(__func__, ekp, 32);
2768 	}
2769 #endif
2770 
2771 	return 0;
2772 }
2773 
2774 /*
2775  * set MAC key to the session
2776  *
2777  * MAC engine has no register for key itself, but the engine has
2778  * inner and outer IV register. software must compute IV before
2779  * enable the engine.
2780  *
2781  * IV is a hash of ipad/opad. these are defined by FIPS-198a
2782  * standard.
2783  */
2784 STATIC int
2785 mvxpsec_hmac_precomp(int alg, void *key, int kbitlen,
2786     void *iv_inner, void *iv_outer)
2787 {
2788 	SHA1_CTX sha1;
2789 	MD5_CTX md5;
2790 	uint8_t *key8 = key;
2791 	uint8_t kbuf[64];
2792 	uint8_t ipad[64];
2793 	uint8_t opad[64];
2794 	uint32_t *iv_in = iv_inner;
2795 	uint32_t *iv_out = iv_outer;
2796 	int kbytelen;
2797 	int i;
2798 #define HMAC_IPAD 0x36
2799 #define HMAC_OPAD 0x5c
2800 
2801 	kbytelen = kbitlen / 8;
2802 	KASSERT(kbitlen == kbytelen * 8);
2803 	if (kbytelen > 64) {
2804 		SHA1Init(&sha1);
2805 		SHA1Update(&sha1, key, kbytelen);
2806 		SHA1Final(kbuf, &sha1);
2807 		key8 = kbuf;
2808 		kbytelen = 64;
2809 	}
2810 
2811 	/* make initial 64 oct. string */
2812 	switch (alg) {
2813 	case CRYPTO_SHA1_HMAC_96:
2814 	case CRYPTO_SHA1_HMAC:
2815 	case CRYPTO_MD5_HMAC_96:
2816 	case CRYPTO_MD5_HMAC:
2817 		for (i = 0; i < kbytelen; i++) {
2818 			ipad[i] = (key8[i] ^ HMAC_IPAD);
2819 			opad[i] = (key8[i] ^ HMAC_OPAD);
2820 		}
2821 		for (; i < 64; i++) {
2822 			ipad[i] = HMAC_IPAD;
2823 			opad[i] = HMAC_OPAD;
2824 		}
2825 		break;
2826 	default:
2827 		break;
2828 	}
2829 #ifdef MVXPSEC_DEBUG
2830 	if (mvxpsec_debug & MVXPSEC_DEBUG_OPENCRYPTO) {
2831 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2832 		    "%s: HMAC-KEY Pre-comp:\n", __func__);
2833 		mvxpsec_dump_data(__func__, key, 64);
2834 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2835 		    "%s: ipad:\n", __func__);
2836 		mvxpsec_dump_data(__func__, ipad, sizeof(ipad));
2837 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2838 		    "%s: opad:\n", __func__);
2839 		mvxpsec_dump_data(__func__, opad, sizeof(opad));
2840 	}
2841 #endif
2842 
2843 	/* make iv from string */
2844 	switch (alg) {
2845 	case CRYPTO_SHA1_HMAC_96:
2846 	case CRYPTO_SHA1_HMAC:
2847 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2848 		    "%s: Generate iv_in(SHA1)\n", __func__);
2849 		SHA1Init(&sha1);
2850 		SHA1Update(&sha1, ipad, 64);
2851 		/* XXX: private state... (LE) */
2852 		iv_in[0] = htobe32(sha1.state[0]);
2853 		iv_in[1] = htobe32(sha1.state[1]);
2854 		iv_in[2] = htobe32(sha1.state[2]);
2855 		iv_in[3] = htobe32(sha1.state[3]);
2856 		iv_in[4] = htobe32(sha1.state[4]);
2857 
2858 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2859 		    "%s: Generate iv_out(SHA1)\n", __func__);
2860 		SHA1Init(&sha1);
2861 		SHA1Update(&sha1, opad, 64);
2862 		/* XXX: private state... (LE) */
2863 		iv_out[0] = htobe32(sha1.state[0]);
2864 		iv_out[1] = htobe32(sha1.state[1]);
2865 		iv_out[2] = htobe32(sha1.state[2]);
2866 		iv_out[3] = htobe32(sha1.state[3]);
2867 		iv_out[4] = htobe32(sha1.state[4]);
2868 		break;
2869 	case CRYPTO_MD5_HMAC_96:
2870 	case CRYPTO_MD5_HMAC:
2871 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2872 		    "%s: Generate iv_in(MD5)\n", __func__);
2873 		MD5Init(&md5);
2874 		MD5Update(&md5, ipad, sizeof(ipad));
2875 		/* XXX: private state... (LE) */
2876 		iv_in[0] = htobe32(md5.state[0]);
2877 		iv_in[1] = htobe32(md5.state[1]);
2878 		iv_in[2] = htobe32(md5.state[2]);
2879 		iv_in[3] = htobe32(md5.state[3]);
2880 		iv_in[4] = 0;
2881 
2882 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2883 		    "%s: Generate iv_out(MD5)\n", __func__);
2884 		MD5Init(&md5);
2885 		MD5Update(&md5, opad, sizeof(opad));
2886 		/* XXX: private state... (LE) */
2887 		iv_out[0] = htobe32(md5.state[0]);
2888 		iv_out[1] = htobe32(md5.state[1]);
2889 		iv_out[2] = htobe32(md5.state[2]);
2890 		iv_out[3] = htobe32(md5.state[3]);
2891 		iv_out[4] = 0;
2892 		break;
2893 	default:
2894 		break;
2895 	}
2896 
2897 #ifdef MVXPSEC_DEBUG
2898 	if (mvxpsec_debug & MVXPSEC_DEBUG_HASH_IV) {
2899 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_HASH_IV,
2900 		    "%s: HMAC IV-IN\n", __func__);
2901 		mvxpsec_dump_data(__func__, (uint8_t *)iv_in, 20);
2902 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_HASH_IV,
2903 		    "%s: HMAC IV-OUT\n", __func__);
2904 		mvxpsec_dump_data(__func__, (uint8_t *)iv_out, 20);
2905 	}
2906 #endif
2907 
2908 	return 0;
2909 #undef HMAC_IPAD
2910 #undef HMAC_OPAD
2911 }
2912 
2913 /*
2914  * AES Support routine
2915  */
2916 static uint8_t AES_SBOX[256] = {
2917 	 99, 124, 119, 123, 242, 107, 111, 197,  48,   1, 103,  43, 254, 215,
2918        	171, 118, 202, 130, 201, 125, 250,  89,  71, 240, 173, 212, 162, 175,
2919        	156, 164, 114, 192, 183, 253, 147,  38,  54,  63, 247, 204,  52, 165,
2920        	229, 241, 113, 216,  49,  21,   4, 199,  35, 195,  24, 150,   5, 154,
2921        	  7,  18, 128, 226, 235,  39, 178, 117,   9, 131,  44,  26,  27, 110,
2922 	 90, 160,  82,  59, 214, 179,  41, 227,  47, 132,  83, 209,   0, 237,
2923        	 32, 252, 177,  91, 106, 203, 190,  57,  74,  76,  88, 207, 208, 239,
2924 	170, 251,  67,  77,  51, 133,  69, 249,   2, 127,  80,  60, 159, 168,
2925 	 81, 163,  64, 143, 146, 157,  56, 245, 188, 182, 218,  33,  16, 255,
2926 	243, 210, 205,  12,  19, 236,  95, 151,  68,  23, 196, 167, 126,  61,
2927        	100,  93,  25, 115,  96, 129,  79, 220,  34,  42, 144, 136,  70, 238,
2928        	184,  20, 222,  94,  11, 219, 224,  50,  58,  10,  73,   6,  36,  92,
2929        	194, 211, 172,  98, 145, 149, 228, 121, 231, 200,  55, 109, 141, 213,
2930       	 78, 169, 108,  86, 244, 234, 101, 122, 174,   8, 186, 120,  37,  46,
2931        	 28, 166, 180, 198, 232, 221, 116,  31,  75, 189, 139, 138, 112,  62,
2932 	181, 102,  72,   3, 246,  14,  97,  53,  87, 185, 134, 193,  29, 158,
2933        	225, 248, 152,  17, 105, 217, 142, 148, 155,  30, 135, 233, 206,  85,
2934       	 40, 223, 140, 161, 137,  13, 191, 230,  66, 104,  65, 153,  45,  15,
2935 	176,  84, 187,  22
2936 };
2937 
2938 static uint32_t AES_RCON[30] = {
2939 	0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8,
2940        	0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4,
2941        	0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91
2942 };
2943 
2944 STATIC int
2945 mv_aes_ksched(uint8_t k[4][MAXKC], int keyBits,
2946     uint8_t W[MAXROUNDS+1][4][MAXBC])
2947 {
2948 	int KC, BC, ROUNDS;
2949 	int i, j, t, rconpointer = 0;
2950 	uint8_t tk[4][MAXKC];
2951 
2952 	switch (keyBits) {
2953 	case 128:
2954 		ROUNDS = 10;
2955 		KC = 4;
2956 		break;
2957 	case 192:
2958 		ROUNDS = 12;
2959 		KC = 6;
2960 	       	break;
2961 	case 256:
2962 		ROUNDS = 14;
2963 	       	KC = 8;
2964 	       	break;
2965 	default:
2966 	       	return (-1);
2967 	}
2968 	BC = 4; /* 128 bits */
2969 
2970 	for(j = 0; j < KC; j++)
2971 		for(i = 0; i < 4; i++)
2972 			tk[i][j] = k[i][j];
2973 	t = 0;
2974 
2975 	/* copy values into round key array */
2976 	for(j = 0; (j < KC) && (t < (ROUNDS+1)*BC); j++, t++)
2977 		for(i = 0; i < 4; i++) W[t / BC][i][t % BC] = tk[i][j];
2978 
2979 	while (t < (ROUNDS+1)*BC) { /* while not enough round key material calculated */
2980 		/* calculate new values */
2981 		for(i = 0; i < 4; i++)
2982 			tk[i][0] ^= AES_SBOX[tk[(i+1)%4][KC-1]];
2983 		tk[0][0] ^= AES_RCON[rconpointer++];
2984 
2985 		if (KC != 8)
2986 			for(j = 1; j < KC; j++)
2987 				for(i = 0; i < 4; i++)
2988 				       	tk[i][j] ^= tk[i][j-1];
2989 		else {
2990 			for(j = 1; j < KC/2; j++)
2991 				for(i = 0; i < 4; i++)
2992 				       	tk[i][j] ^= tk[i][j-1];
2993 			for(i = 0; i < 4; i++)
2994 			       	tk[i][KC/2] ^= AES_SBOX[tk[i][KC/2 - 1]];
2995 			for(j = KC/2 + 1; j < KC; j++)
2996 				for(i = 0; i < 4; i++)
2997 				       	tk[i][j] ^= tk[i][j-1];
2998 	}
2999 	/* copy values into round key array */
3000 	for(j = 0; (j < KC) && (t < (ROUNDS+1)*BC); j++, t++)
3001 		for(i = 0; i < 4; i++) W[t / BC][i][t % BC] = tk[i][j];
3002 	}
3003 
3004 	return 0;
3005 }
3006 
3007 STATIC int
3008 mv_aes_deckey(uint8_t *expandedKey, uint8_t *keyMaterial, int keyLen)
3009 {
3010 	uint8_t   W[MAXROUNDS+1][4][MAXBC];
3011 	uint8_t   k[4][MAXKC];
3012 	uint8_t   j;
3013 	int     i, rounds, KC;
3014 
3015 	if (expandedKey == NULL)
3016 		return -1;
3017 
3018 	if (!((keyLen == 128) || (keyLen == 192) || (keyLen == 256)))
3019 		return -1;
3020 
3021 	if (keyMaterial == NULL)
3022 		return -1;
3023 
3024 	/* initialize key schedule: */
3025 	for (i=0; i<keyLen/8; i++) {
3026 		j = keyMaterial[i];
3027 		k[i % 4][i / 4] = j;
3028 	}
3029 
3030 	mv_aes_ksched(k, keyLen, W);
3031 	switch (keyLen) {
3032 	case 128:
3033 		rounds = 10;
3034 		KC = 4;
3035 		break;
3036 	case 192:
3037 		rounds = 12;
3038 		KC = 6;
3039 		break;
3040 	case 256:
3041 		rounds = 14;
3042 		KC = 8;
3043 		break;
3044 	default:
3045 		return -1;
3046 	}
3047 
3048 	for(i=0; i<MAXBC; i++)
3049 		for(j=0; j<4; j++)
3050 			expandedKey[i*4+j] = W[rounds][j][i];
3051 	for(; i<KC; i++)
3052 		for(j=0; j<4; j++)
3053 			expandedKey[i*4+j] = W[rounds-1][j][i+MAXBC-KC];
3054 
3055 	return 0;
3056 }
3057 
3058 /*
3059  * Clear cipher/mac operation state
3060  */
3061 INLINE void
3062 mvxpsec_packet_reset_op(struct mvxpsec_packet *mv_p)
3063 {
3064 	mv_p->pkt_header.desc.acc_config = 0;
3065 	mv_p->enc_off = mv_p->enc_ivoff = mv_p->enc_len = 0;
3066 	mv_p->mac_off = mv_p->mac_dst = mv_p->mac_len = 0;
3067 }
3068 
3069 /*
3070  * update MVXPSEC operation order
3071  */
3072 INLINE void
3073 mvxpsec_packet_update_op_order(struct mvxpsec_packet *mv_p, int op)
3074 {
3075 	struct mvxpsec_acc_descriptor *acc_desc = &mv_p->pkt_header.desc;
3076 	uint32_t cur_op = acc_desc->acc_config & MV_ACC_CRYPTO_OP_MASK;
3077 
3078 	KASSERT(op == MV_ACC_CRYPTO_OP_MAC || op == MV_ACC_CRYPTO_OP_ENC);
3079 	KASSERT((op & MV_ACC_CRYPTO_OP_MASK) == op);
3080 
3081 	if (cur_op == 0)
3082 		acc_desc->acc_config |= op;
3083 	else if (cur_op == MV_ACC_CRYPTO_OP_MAC && op == MV_ACC_CRYPTO_OP_ENC) {
3084 		acc_desc->acc_config &= ~MV_ACC_CRYPTO_OP_MASK;
3085 		acc_desc->acc_config |= MV_ACC_CRYPTO_OP_MACENC;
3086 		/* MAC then ENC (= decryption) */
3087 	}
3088 	else if (cur_op == MV_ACC_CRYPTO_OP_ENC && op == MV_ACC_CRYPTO_OP_MAC) {
3089 		acc_desc->acc_config &= ~MV_ACC_CRYPTO_OP_MASK;
3090 		acc_desc->acc_config |= MV_ACC_CRYPTO_OP_ENCMAC;
3091 		/* ENC then MAC (= encryption) */
3092 	}
3093 	else {
3094 		log(LOG_ERR, "%s: multiple %s algorithm is not supported.\n",
3095 		    __func__,
3096 		    (op == MV_ACC_CRYPTO_OP_ENC) ?  "encryption" : "authentication");
3097 	}
3098 }
3099 
3100 /*
3101  * Parameter Conversions
3102  */
3103 INLINE uint32_t
3104 mvxpsec_alg2acc(uint32_t alg)
3105 {
3106 	uint32_t reg;
3107 
3108 	switch (alg) {
3109 	case CRYPTO_DES_CBC:
3110 		reg = MV_ACC_CRYPTO_ENC_DES;
3111 		reg |= MV_ACC_CRYPTO_CBC;
3112 		break;
3113 	case CRYPTO_3DES_CBC:
3114 		reg = MV_ACC_CRYPTO_ENC_3DES;
3115 		reg |= MV_ACC_CRYPTO_3DES_EDE;
3116 		reg |= MV_ACC_CRYPTO_CBC;
3117 		break;
3118 	case CRYPTO_AES_CBC:
3119 		reg = MV_ACC_CRYPTO_ENC_AES;
3120 		reg |= MV_ACC_CRYPTO_CBC;
3121 		break;
3122 	case CRYPTO_SHA1_HMAC_96:
3123 		reg = MV_ACC_CRYPTO_MAC_HMAC_SHA1;
3124 		reg |= MV_ACC_CRYPTO_MAC_96;
3125 		break;
3126 	case CRYPTO_MD5_HMAC_96:
3127 		reg = MV_ACC_CRYPTO_MAC_HMAC_MD5;
3128 		reg |= MV_ACC_CRYPTO_MAC_96;
3129 		break;
3130 	default:
3131 		reg = 0;
3132 		break;
3133 	}
3134 
3135 	return reg;
3136 }
3137 
3138 INLINE uint32_t
3139 mvxpsec_aesklen(int klen)
3140 {
3141 	if (klen < 128)
3142 		return 0;
3143 	else if (klen < 192)
3144 		return MV_ACC_CRYPTO_AES_KLEN_128;
3145 	else if (klen < 256)
3146 		return MV_ACC_CRYPTO_AES_KLEN_192;
3147 	else
3148 		return MV_ACC_CRYPTO_AES_KLEN_256;
3149 
3150 	return 0;
3151 }
3152 
3153 /*
3154  * String Conversions
3155  */
3156 STATIC const char *
3157 s_errreg(uint32_t v)
3158 {
3159 	static char buf[80];
3160 
3161 	snprintf(buf, sizeof(buf),
3162 	    "%sMiss %sDoubleHit %sBothHit %sDataError",
3163 	    (v & MV_TDMA_ERRC_MISS) ? "+" : "-",
3164 	    (v & MV_TDMA_ERRC_DHIT) ? "+" : "-",
3165 	    (v & MV_TDMA_ERRC_BHIT) ? "+" : "-",
3166 	    (v & MV_TDMA_ERRC_DERR) ? "+" : "-");
3167 
3168 	return (const char *)buf;
3169 }
3170 
3171 STATIC const char *
3172 s_winreg(uint32_t v)
3173 {
3174 	static char buf[80];
3175 
3176 	snprintf(buf, sizeof(buf),
3177 	    "%s TGT 0x%x ATTR 0x%02x size %u(0x%04x)[64KB]",
3178 	    (v & MV_TDMA_ATTR_ENABLE) ? "EN" : "DIS",
3179 	    MV_TDMA_ATTR_GET_TARGET(v), MV_TDMA_ATTR_GET_ATTR(v),
3180 	    MV_TDMA_ATTR_GET_SIZE(v), MV_TDMA_ATTR_GET_SIZE(v));
3181 
3182 	return (const char *)buf;
3183 }
3184 
3185 STATIC const char *
3186 s_ctrlreg(uint32_t reg)
3187 {
3188 	static char buf[80];
3189 
3190 	snprintf(buf, sizeof(buf),
3191 	    "%s: %sFETCH DBURST-%u SBURST-%u %sOUTS %sCHAIN %sBSWAP %sACT",
3192 	    (reg & MV_TDMA_CONTROL_ENABLE) ? "ENABLE" : "DISABLE",
3193 	    (reg & MV_TDMA_CONTROL_FETCH) ? "+" : "-",
3194 	    MV_TDMA_CONTROL_GET_DST_BURST(reg),
3195 	    MV_TDMA_CONTROL_GET_SRC_BURST(reg),
3196 	    (reg & MV_TDMA_CONTROL_OUTS_EN) ? "+" : "-",
3197 	    (reg & MV_TDMA_CONTROL_CHAIN_DIS) ? "-" : "+",
3198 	    (reg & MV_TDMA_CONTROL_BSWAP_DIS) ? "-" : "+",
3199 	    (reg & MV_TDMA_CONTROL_ACT) ? "+" : "-");
3200 
3201 	return (const char *)buf;
3202 }
3203 
3204 _STATIC const char *
3205 s_xpsecintr(uint32_t v)
3206 {
3207 	static char buf[160];
3208 
3209 	snprintf(buf, sizeof(buf),
3210 	    "%sAuth %sDES %sAES-ENC %sAES-DEC %sENC %sSA %sAccAndTDMA "
3211 	    "%sTDMAComp %sTDMAOwn %sAccAndTDMA_Cont",
3212 	    (v & MVXPSEC_INT_AUTH) ? "+" : "-",
3213 	    (v & MVXPSEC_INT_DES) ? "+" : "-",
3214 	    (v & MVXPSEC_INT_AES_ENC) ? "+" : "-",
3215 	    (v & MVXPSEC_INT_AES_DEC) ? "+" : "-",
3216 	    (v & MVXPSEC_INT_ENC) ? "+" : "-",
3217 	    (v & MVXPSEC_INT_SA) ? "+" : "-",
3218 	    (v & MVXPSEC_INT_ACCTDMA) ? "+" : "-",
3219 	    (v & MVXPSEC_INT_TDMA_COMP) ? "+" : "-",
3220 	    (v & MVXPSEC_INT_TDMA_OWN) ? "+" : "-",
3221 	    (v & MVXPSEC_INT_ACCTDMA_CONT) ? "+" : "-");
3222 
3223 	return (const char *)buf;
3224 }
3225 
3226 STATIC const char *
3227 s_ctlalg(uint32_t alg)
3228 {
3229 	switch (alg) {
3230 	case CRYPTO_SHA1_HMAC_96:
3231 		return "HMAC-SHA1-96";
3232 	case CRYPTO_SHA1_HMAC:
3233 		return "HMAC-SHA1";
3234 	case CRYPTO_SHA1:
3235 		return "SHA1";
3236 	case CRYPTO_MD5_HMAC_96:
3237 		return "HMAC-MD5-96";
3238 	case CRYPTO_MD5_HMAC:
3239 		return "HMAC-MD5";
3240 	case CRYPTO_MD5:
3241 		return "MD5";
3242 	case CRYPTO_DES_CBC:
3243 		return "DES-CBC";
3244 	case CRYPTO_3DES_CBC:
3245 		return "3DES-CBC";
3246 	case CRYPTO_AES_CBC:
3247 		return "AES-CBC";
3248 	default:
3249 		break;
3250 	}
3251 
3252 	return "Unknown";
3253 }
3254 
3255 STATIC const char *
3256 s_xpsec_op(uint32_t reg)
3257 {
3258 	reg &= MV_ACC_CRYPTO_OP_MASK;
3259 	switch (reg) {
3260 	case MV_ACC_CRYPTO_OP_ENC:
3261 		return "ENC";
3262 	case MV_ACC_CRYPTO_OP_MAC:
3263 		return "MAC";
3264 	case MV_ACC_CRYPTO_OP_ENCMAC:
3265 		return "ENC-MAC";
3266 	case MV_ACC_CRYPTO_OP_MACENC:
3267 		return "MAC-ENC";
3268 	default:
3269 		break;
3270 	}
3271 
3272 	return "Unknown";
3273 
3274 }
3275 
3276 STATIC const char *
3277 s_xpsec_enc(uint32_t alg)
3278 {
3279 	alg <<= MV_ACC_CRYPTO_ENC_SHIFT;
3280 	switch (alg) {
3281 	case MV_ACC_CRYPTO_ENC_DES:
3282 		return "DES";
3283 	case MV_ACC_CRYPTO_ENC_3DES:
3284 		return "3DES";
3285 	case MV_ACC_CRYPTO_ENC_AES:
3286 		return "AES";
3287 	default:
3288 		break;
3289 	}
3290 
3291 	return "Unknown";
3292 }
3293 
3294 STATIC const char *
3295 s_xpsec_mac(uint32_t alg)
3296 {
3297 	alg <<= MV_ACC_CRYPTO_MAC_SHIFT;
3298 	switch (alg) {
3299 	case MV_ACC_CRYPTO_MAC_NONE:
3300 		return "Disabled";
3301 	case MV_ACC_CRYPTO_MAC_MD5:
3302 		return "MD5";
3303 	case MV_ACC_CRYPTO_MAC_SHA1:
3304 		return "SHA1";
3305 	case MV_ACC_CRYPTO_MAC_HMAC_MD5:
3306 		return "HMAC-MD5";
3307 	case MV_ACC_CRYPTO_MAC_HMAC_SHA1:
3308 		return "HMAC-SHA1";
3309 	default:
3310 		break;
3311 	}
3312 
3313 	return "Unknown";
3314 }
3315 
3316 STATIC const char *
3317 s_xpsec_frag(uint32_t frag)
3318 {
3319 	frag <<= MV_ACC_CRYPTO_FRAG_SHIFT;
3320 	switch (frag) {
3321 	case MV_ACC_CRYPTO_NOFRAG:
3322 		return "NoFragment";
3323 	case MV_ACC_CRYPTO_FRAG_FIRST:
3324 		return "FirstFragment";
3325 	case MV_ACC_CRYPTO_FRAG_MID:
3326 		return "MiddleFragment";
3327 	case MV_ACC_CRYPTO_FRAG_LAST:
3328 		return "LastFragment";
3329 	default:
3330 		break;
3331 	}
3332 
3333 	return "Unknown";
3334 }
3335 
3336 #ifdef MVXPSEC_DEBUG
3337 void
3338 mvxpsec_dump_reg(struct mvxpsec_softc *sc)
3339 {
3340 	uint32_t reg;
3341 	int i;
3342 
3343 	if ((mvxpsec_debug & MVXPSEC_DEBUG_DESC) == 0)
3344 		return;
3345 
3346 	printf("--- Interrupt Registers ---\n");
3347 	reg = MVXPSEC_READ(sc, MVXPSEC_INT_CAUSE);
3348 	printf("MVXPSEC INT CAUSE: 0x%08x\n", reg);
3349 	printf("MVXPSEC INT CAUSE: %s\n", s_xpsecintr(reg));
3350 	reg = MVXPSEC_READ(sc, MVXPSEC_INT_MASK);
3351 	printf("MVXPSEC INT MASK: 0x%08x\n", reg);
3352 	printf("MVXPSEC INT MASKE: %s\n", s_xpsecintr(reg));
3353 
3354 	printf("--- DMA Configuration Registers ---\n");
3355 	for (i = 0; i < MV_TDMA_NWINDOW; i++) {
3356 		reg = MVXPSEC_READ(sc, MV_TDMA_BAR(i));
3357 		printf("TDMA BAR%d: 0x%08x\n", i, reg);
3358 		reg = MVXPSEC_READ(sc, MV_TDMA_ATTR(i));
3359 		printf("TDMA ATTR%d: 0x%08x\n", i, reg);
3360 		printf("  -> %s\n", s_winreg(reg));
3361 	}
3362 
3363 	printf("--- DMA Control Registers ---\n");
3364 
3365 	reg = MVXPSEC_READ(sc, MV_TDMA_CONTROL);
3366 	printf("TDMA CONTROL: 0x%08x\n", reg);
3367 	printf("  -> %s\n", s_ctrlreg(reg));
3368 
3369 	printf("--- DMA Current Command Descriptors ---\n");
3370 
3371 	reg = MVXPSEC_READ(sc, MV_TDMA_ERR_CAUSE);
3372 	printf("TDMA ERR CAUSE: 0x%08x\n", reg);
3373 
3374 	reg = MVXPSEC_READ(sc, MV_TDMA_ERR_MASK);
3375 	printf("TDMA ERR MASK: 0x%08x\n", reg);
3376 
3377 	reg = MVXPSEC_READ(sc, MV_TDMA_CNT);
3378 	printf("TDMA DATA OWNER: %s\n",
3379 	    (reg & MV_TDMA_CNT_OWN) ? "DMAC" : "CPU");
3380 	printf("TDMA DATA COUNT: %d(0x%x)\n",
3381 	    (reg & ~MV_TDMA_CNT_OWN), (reg & ~MV_TDMA_CNT_OWN));
3382 
3383 	reg = MVXPSEC_READ(sc, MV_TDMA_SRC);
3384 	printf("TDMA DATA SRC: 0x%08x\n", reg);
3385 
3386 	reg = MVXPSEC_READ(sc, MV_TDMA_DST);
3387 	printf("TDMA DATA DST: 0x%08x\n", reg);
3388 
3389 	reg = MVXPSEC_READ(sc, MV_TDMA_NXT);
3390 	printf("TDMA DATA NXT: 0x%08x\n", reg);
3391 
3392 	reg = MVXPSEC_READ(sc, MV_TDMA_CUR);
3393 	printf("TDMA DATA CUR: 0x%08x\n", reg);
3394 
3395 	printf("--- ACC Command Register ---\n");
3396 	reg = MVXPSEC_READ(sc, MV_ACC_COMMAND);
3397 	printf("ACC COMMAND: 0x%08x\n", reg);
3398 	printf("ACC: %sACT %sSTOP\n",
3399 	    (reg & MV_ACC_COMMAND_ACT) ? "+" : "-",
3400 	    (reg & MV_ACC_COMMAND_STOP) ? "+" : "-");
3401 
3402 	reg = MVXPSEC_READ(sc, MV_ACC_CONFIG);
3403 	printf("ACC CONFIG: 0x%08x\n", reg);
3404 	reg = MVXPSEC_READ(sc, MV_ACC_DESC);
3405 	printf("ACC DESC: 0x%08x\n", reg);
3406 
3407 	printf("--- DES Key Register ---\n");
3408 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY0L);
3409 	printf("DES KEY0  Low: 0x%08x\n", reg);
3410 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY0H);
3411 	printf("DES KEY0 High: 0x%08x\n", reg);
3412 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY1L);
3413 	printf("DES KEY1  Low: 0x%08x\n", reg);
3414 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY1H);
3415 	printf("DES KEY1 High: 0x%08x\n", reg);
3416 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY2L);
3417 	printf("DES KEY2  Low: 0x%08x\n", reg);
3418 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY2H);
3419 	printf("DES KEY2 High: 0x%08x\n", reg);
3420 
3421 	printf("--- AES Key Register ---\n");
3422 	for (i = 0; i < 8; i++) {
3423 		reg = MVXPSEC_READ(sc, MV_CE_AES_EKEY(i));
3424 		printf("AES ENC KEY COL%d: %08x\n", i, reg);
3425 	}
3426 	for (i = 0; i < 8; i++) {
3427 		reg = MVXPSEC_READ(sc, MV_CE_AES_DKEY(i));
3428 		printf("AES DEC KEY COL%d: %08x\n", i, reg);
3429 	}
3430 
3431 	return;
3432 }
3433 
3434 STATIC void
3435 mvxpsec_dump_sram(const char *name, struct mvxpsec_softc *sc, size_t len)
3436 {
3437 	uint32_t reg;
3438 
3439 	if (sc->sc_sram_va == NULL)
3440 		return;
3441 
3442 	if (len == 0) {
3443 		printf("\n%s NO DATA(len=0)\n", name);
3444 		return;
3445 	}
3446 	else if (len > MV_ACC_SRAM_SIZE)
3447 		len = MV_ACC_SRAM_SIZE;
3448 
3449 	mutex_enter(&sc->sc_dma_mtx);
3450 	reg = MVXPSEC_READ(sc, MV_TDMA_CONTROL);
3451 	if (reg & MV_TDMA_CONTROL_ACT) {
3452 		printf("TDMA is active, cannot access SRAM\n");
3453 		mutex_exit(&sc->sc_dma_mtx);
3454 		return;
3455 	}
3456 	reg = MVXPSEC_READ(sc, MV_ACC_COMMAND);
3457 	if (reg & MV_ACC_COMMAND_ACT) {
3458 		printf("SA is active, cannot access SRAM\n");
3459 		mutex_exit(&sc->sc_dma_mtx);
3460 		return;
3461 	}
3462 
3463 	printf("%s: dump SRAM, %zu bytes\n", name, len);
3464 	mvxpsec_dump_data(name, sc->sc_sram_va, len);
3465 	mutex_exit(&sc->sc_dma_mtx);
3466 	return;
3467 }
3468 
3469 
3470 _STATIC void
3471 mvxpsec_dump_dmaq(struct mvxpsec_descriptor_handle *dh)
3472 {
3473 	struct mvxpsec_descriptor *d =
3474            (struct mvxpsec_descriptor *)dh->_desc;
3475 
3476 	printf("--- DMA Command Descriptor ---\n");
3477 	printf("DESC: VA=%p PA=0x%08x\n",
3478 	    d, (uint32_t)dh->phys_addr);
3479 	printf("DESC: WORD0 = 0x%08x\n", d->tdma_word0);
3480 	printf("DESC: SRC = 0x%08x\n", d->tdma_src);
3481 	printf("DESC: DST = 0x%08x\n", d->tdma_dst);
3482 	printf("DESC: NXT = 0x%08x\n", d->tdma_nxt);
3483 
3484 	return;
3485 }
3486 
3487 STATIC void
3488 mvxpsec_dump_data(const char *name, void *p, size_t len)
3489 {
3490 	uint8_t *data = p;
3491 	off_t off;
3492 
3493 	printf("%s: dump %p, %zu bytes", name, p, len);
3494 	if (p == NULL || len == 0) {
3495 		printf("\n%s: NO DATA\n", name);
3496 		return;
3497 	}
3498 	for (off = 0; off < len; off++) {
3499 		if ((off % 16) == 0) {
3500 			printf("\n%s: 0x%08x:", name, (uint32_t)off);
3501 		}
3502 		if ((off % 4) == 0) {
3503 			printf(" ");
3504 		}
3505 		printf("%02x", data[off]);
3506 	}
3507 	printf("\n");
3508 
3509 	return;
3510 }
3511 
3512 _STATIC void
3513 mvxpsec_dump_packet(const char *name, struct mvxpsec_packet *mv_p)
3514 {
3515 	struct mvxpsec_softc *sc = mv_p->mv_s->sc;
3516 
3517 	printf("%s: packet_data:\n", name);
3518 	mvxpsec_dump_packet_data(name, mv_p);
3519 
3520 	printf("%s: SRAM:\n", name);
3521 	mvxpsec_dump_sram(name, sc, 2000);
3522 
3523 	printf("%s: packet_descriptor:\n", name);
3524 	mvxpsec_dump_packet_desc(name, mv_p);
3525 }
3526 
3527 _STATIC void
3528 mvxpsec_dump_packet_data(const char *name, struct mvxpsec_packet *mv_p)
3529 {
3530 	static char buf[1500];
3531 	int len;
3532 
3533 	if (mv_p->data_type == MVXPSEC_DATA_MBUF) {
3534 		struct mbuf *m;
3535 
3536 		m = mv_p->data.mbuf;
3537 		len = m->m_pkthdr.len;
3538 		if (len > sizeof(buf))
3539 			len = sizeof(buf);
3540 		m_copydata(m, 0, len, buf);
3541 	}
3542 	else if (mv_p->data_type == MVXPSEC_DATA_UIO) {
3543 		struct uio *uio;
3544 
3545 		uio = mv_p->data.uio;
3546 		len = uio->uio_resid;
3547 		if (len > sizeof(buf))
3548 			len = sizeof(buf);
3549 		cuio_copydata(uio, 0, len, buf);
3550 	}
3551 	else if (mv_p->data_type == MVXPSEC_DATA_RAW) {
3552 		len = mv_p->data_len;
3553 		if (len > sizeof(buf))
3554 			len = sizeof(buf);
3555 		memcpy(buf, mv_p->data.raw, len);
3556 	}
3557 	else
3558 		return;
3559 	mvxpsec_dump_data(name, buf, len);
3560 
3561 	return;
3562 }
3563 
3564 _STATIC void
3565 mvxpsec_dump_packet_desc(const char *name, struct mvxpsec_packet *mv_p)
3566 {
3567 	uint32_t *words;
3568 
3569 	if (mv_p == NULL)
3570 		return;
3571 
3572 	words = &mv_p->pkt_header.desc.acc_desc_dword0;
3573 	mvxpsec_dump_acc_config(name, words[0]);
3574 	mvxpsec_dump_acc_encdata(name, words[1], words[2]);
3575 	mvxpsec_dump_acc_enclen(name, words[2]);
3576 	mvxpsec_dump_acc_enckey(name, words[3]);
3577 	mvxpsec_dump_acc_enciv(name, words[4]);
3578 	mvxpsec_dump_acc_macsrc(name, words[5]);
3579 	mvxpsec_dump_acc_macdst(name, words[6]);
3580 	mvxpsec_dump_acc_maciv(name, words[7]);
3581 
3582 	return;
3583 }
3584 
3585 _STATIC void
3586 mvxpsec_dump_acc_config(const char *name, uint32_t w)
3587 {
3588 	/* SA: Dword 0 */
3589 	printf("%s: Dword0=0x%08x\n", name, w);
3590 	printf("%s:   OP = %s\n", name,
3591 	    s_xpsec_op(MV_ACC_CRYPTO_OP(w)));
3592 	printf("%s:   MAC = %s\n", name,
3593 	    s_xpsec_mac(MV_ACC_CRYPTO_MAC(w)));
3594 	printf("%s:   MAC_LEN = %s\n", name,
3595 	    w & MV_ACC_CRYPTO_MAC_96 ? "96-bit" : "full-bit");
3596 	printf("%s:   ENC = %s\n", name,
3597 	    s_xpsec_enc(MV_ACC_CRYPTO_ENC(w)));
3598 	printf("%s:   DIR = %s\n", name,
3599 	    w & MV_ACC_CRYPTO_DECRYPT ? "decryption" : "encryption");
3600 	printf("%s:   CHAIN = %s\n", name,
3601 	    w & MV_ACC_CRYPTO_CBC ? "CBC" : "ECB");
3602 	printf("%s:   3DES = %s\n", name,
3603 	    w & MV_ACC_CRYPTO_3DES_EDE ? "EDE" : "EEE");
3604 	printf("%s:   FRAGMENT = %s\n", name,
3605 	    s_xpsec_frag(MV_ACC_CRYPTO_FRAG(w)));
3606 	return;
3607 }
3608 
3609 STATIC void
3610 mvxpsec_dump_acc_encdata(const char *name, uint32_t w, uint32_t w2)
3611 {
3612 	/* SA: Dword 1 */
3613 	printf("%s: Dword1=0x%08x\n", name, w);
3614 	printf("%s:   ENC SRC = 0x%x\n", name, MV_ACC_DESC_GET_VAL_1(w));
3615 	printf("%s:   ENC DST = 0x%x\n", name, MV_ACC_DESC_GET_VAL_2(w));
3616 	printf("%s:   ENC RANGE = 0x%x - 0x%x\n", name,
3617 	    MV_ACC_DESC_GET_VAL_1(w),
3618 	    MV_ACC_DESC_GET_VAL_1(w) + MV_ACC_DESC_GET_VAL_1(w2) - 1);
3619 	return;
3620 }
3621 
3622 STATIC void
3623 mvxpsec_dump_acc_enclen(const char *name, uint32_t w)
3624 {
3625 	/* SA: Dword 2 */
3626 	printf("%s: Dword2=0x%08x\n", name, w);
3627 	printf("%s:   ENC LEN = %d\n", name,
3628 	    MV_ACC_DESC_GET_VAL_1(w));
3629 	return;
3630 }
3631 
3632 STATIC void
3633 mvxpsec_dump_acc_enckey(const char *name, uint32_t w)
3634 {
3635 	/* SA: Dword 3 */
3636 	printf("%s: Dword3=0x%08x\n", name, w);
3637 	printf("%s:   EKEY = 0x%x\n", name,
3638 	    MV_ACC_DESC_GET_VAL_1(w));
3639 	return;
3640 }
3641 
3642 STATIC void
3643 mvxpsec_dump_acc_enciv(const char *name, uint32_t w)
3644 {
3645 	/* SA: Dword 4 */
3646 	printf("%s: Dword4=0x%08x\n", name, w);
3647 	printf("%s:   EIV = 0x%x\n", name, MV_ACC_DESC_GET_VAL_1(w));
3648 	printf("%s:   EIV_BUF = 0x%x\n", name, MV_ACC_DESC_GET_VAL_2(w));
3649 	return;
3650 }
3651 
3652 STATIC void
3653 mvxpsec_dump_acc_macsrc(const char *name, uint32_t w)
3654 {
3655 	/* SA: Dword 5 */
3656 	printf("%s: Dword5=0x%08x\n", name, w);
3657 	printf("%s:   MAC_SRC = 0x%x\n", name,
3658 	    MV_ACC_DESC_GET_VAL_1(w));
3659 	printf("%s:   MAC_TOTAL_LEN = %d\n", name,
3660 	    MV_ACC_DESC_GET_VAL_3(w));
3661 	printf("%s:   MAC_RANGE = 0x%0x - 0x%0x\n", name,
3662 	    MV_ACC_DESC_GET_VAL_1(w),
3663 	    MV_ACC_DESC_GET_VAL_1(w) + MV_ACC_DESC_GET_VAL_3(w) - 1);
3664 	return;
3665 }
3666 
3667 STATIC void
3668 mvxpsec_dump_acc_macdst(const char *name, uint32_t w)
3669 {
3670 	/* SA: Dword 6 */
3671 	printf("%s: Dword6=0x%08x\n", name, w);
3672 	printf("%s:   MAC_DST = 0x%x\n", name, MV_ACC_DESC_GET_VAL_1(w));
3673 	printf("%s:   MAC_BLOCK_LEN = %d\n", name,
3674 	    MV_ACC_DESC_GET_VAL_2(w));
3675 	return;
3676 }
3677 
3678 STATIC void
3679 mvxpsec_dump_acc_maciv(const char *name, uint32_t w)
3680 {
3681 	/* SA: Dword 7 */
3682 	printf("%s: Dword7=0x%08x\n", name, w);
3683 	printf("%s:   MAC_INNER_IV = 0x%x\n", name,
3684 	    MV_ACC_DESC_GET_VAL_1(w));
3685 	printf("%s:   MAC_OUTER_IV = 0x%x\n", name,
3686 	    MV_ACC_DESC_GET_VAL_2(w));
3687 	return;
3688 }
3689 #endif
3690