xref: /netbsd-src/sys/dev/marvell/mvxpsec.c (revision e6c7e151de239c49d2e38720a061ed9d1fa99309)
1 /*	$NetBSD: mvxpsec.c,v 1.5 2019/12/27 09:41:51 msaitoh Exp $	*/
2 /*
3  * Copyright (c) 2015 Internet Initiative Japan Inc.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 /*
28  * Cryptographic Engine and Security Accelerator(MVXPSEC)
29  */
30 #include <sys/cdefs.h>
31 #include <sys/param.h>
32 #include <sys/types.h>
33 #include <sys/kernel.h>
34 #include <sys/queue.h>
35 #include <sys/conf.h>
36 #include <sys/proc.h>
37 #include <sys/bus.h>
38 #include <sys/evcnt.h>
39 #include <sys/device.h>
40 #include <sys/endian.h>
41 #include <sys/errno.h>
42 #include <sys/kmem.h>
43 #include <sys/mbuf.h>
44 #include <sys/callout.h>
45 #include <sys/pool.h>
46 #include <sys/cprng.h>
47 #include <sys/syslog.h>
48 #include <sys/mutex.h>
49 #include <sys/kthread.h>
50 #include <sys/atomic.h>
51 #include <sys/sha1.h>
52 #include <sys/md5.h>
53 
54 #include <uvm/uvm_extern.h>
55 
56 #include <crypto/rijndael/rijndael.h>
57 
58 #include <opencrypto/cryptodev.h>
59 #include <opencrypto/xform.h>
60 
61 #include <net/net_stats.h>
62 
63 #include <netinet/in_systm.h>
64 #include <netinet/in.h>
65 #include <netinet/ip.h>
66 #include <netinet/ip6.h>
67 
68 #include <netipsec/esp_var.h>
69 
70 #include <arm/cpufunc.h>
71 #include <arm/marvell/mvsocvar.h>
72 #include <arm/marvell/armadaxpreg.h>
73 #include <dev/marvell/marvellreg.h>
74 #include <dev/marvell/marvellvar.h>
75 #include <dev/marvell/mvxpsecreg.h>
76 #include <dev/marvell/mvxpsecvar.h>
77 
78 #ifdef DEBUG
79 #define STATIC __attribute__ ((noinline)) extern
80 #define _STATIC __attribute__ ((noinline)) extern
81 #define INLINE __attribute__ ((noinline)) extern
82 #define _INLINE __attribute__ ((noinline)) extern
83 #else
84 #define STATIC static
85 #define _STATIC __attribute__ ((unused)) static
86 #define INLINE static inline
87 #define _INLINE __attribute__ ((unused)) static inline
88 #endif
89 
90 /*
91  * IRQ and SRAM spaces for each of unit
92  * XXX: move to attach_args
93  */
94 struct {
95 	int		err_int;
96 } mvxpsec_config[] = {
97 	{ .err_int = ARMADAXP_IRQ_CESA0_ERR, }, /* unit 0 */
98 	{ .err_int = ARMADAXP_IRQ_CESA1_ERR, }, /* unit 1 */
99 };
100 #define MVXPSEC_ERR_INT(sc) \
101     mvxpsec_config[device_unit((sc)->sc_dev)].err_int
102 
103 /*
104  * AES
105  */
106 #define MAXBC				(128/32)
107 #define MAXKC				(256/32)
108 #define MAXROUNDS			14
109 STATIC int mv_aes_ksched(uint8_t[4][MAXKC], int,
110     uint8_t[MAXROUNDS+1][4][MAXBC]);
111 STATIC int mv_aes_deckey(uint8_t *, uint8_t *, int);
112 
113 /*
114  * device driver autoconf interface
115  */
116 STATIC int mvxpsec_match(device_t, cfdata_t, void *);
117 STATIC void mvxpsec_attach(device_t, device_t, void *);
118 STATIC void mvxpsec_evcnt_attach(struct mvxpsec_softc *);
119 
120 /*
121  * register setup
122  */
123 STATIC int mvxpsec_wininit(struct mvxpsec_softc *, enum marvell_tags *);
124 
125 /*
126  * timer(callout) interface
127  *
128  * XXX: callout is not MP safe...
129  */
130 STATIC void mvxpsec_timer(void *);
131 
132 /*
133  * interrupt interface
134  */
135 STATIC int mvxpsec_intr(void *);
136 INLINE void mvxpsec_intr_cleanup(struct mvxpsec_softc *);
137 STATIC int mvxpsec_eintr(void *);
138 STATIC uint32_t mvxpsec_intr_ack(struct mvxpsec_softc *);
139 STATIC uint32_t mvxpsec_eintr_ack(struct mvxpsec_softc *);
140 INLINE void mvxpsec_intr_cnt(struct mvxpsec_softc *, int);
141 
142 /*
143  * memory allocators and VM management
144  */
145 STATIC struct mvxpsec_devmem *mvxpsec_alloc_devmem(struct mvxpsec_softc *,
146     paddr_t, int);
147 STATIC int mvxpsec_init_sram(struct mvxpsec_softc *);
148 
149 /*
150  * Low-level DMA interface
151  */
152 STATIC int mvxpsec_init_dma(struct mvxpsec_softc *,
153     struct marvell_attach_args *);
154 INLINE int mvxpsec_dma_wait(struct mvxpsec_softc *);
155 INLINE int mvxpsec_acc_wait(struct mvxpsec_softc *);
156 INLINE struct mvxpsec_descriptor_handle *mvxpsec_dma_getdesc(struct mvxpsec_softc *);
157 _INLINE void mvxpsec_dma_putdesc(struct mvxpsec_softc *, struct mvxpsec_descriptor_handle *);
158 INLINE void mvxpsec_dma_setup(struct mvxpsec_descriptor_handle *,
159     uint32_t, uint32_t, uint32_t);
160 INLINE void mvxpsec_dma_cat(struct mvxpsec_softc *,
161     struct mvxpsec_descriptor_handle *, struct mvxpsec_descriptor_handle *);
162 
163 /*
164  * High-level DMA interface
165  */
166 INLINE int mvxpsec_dma_copy0(struct mvxpsec_softc *,
167     mvxpsec_dma_ring *, uint32_t, uint32_t, uint32_t);
168 INLINE int mvxpsec_dma_copy(struct mvxpsec_softc *,
169     mvxpsec_dma_ring *, uint32_t, uint32_t, uint32_t);
170 INLINE int mvxpsec_dma_acc_activate(struct mvxpsec_softc *,
171     mvxpsec_dma_ring *);
172 INLINE void mvxpsec_dma_finalize(struct mvxpsec_softc *,
173     mvxpsec_dma_ring *);
174 INLINE void mvxpsec_dma_free(struct mvxpsec_softc *,
175     mvxpsec_dma_ring *);
176 INLINE int mvxpsec_dma_copy_packet(struct mvxpsec_softc *, struct mvxpsec_packet *);
177 INLINE int mvxpsec_dma_sync_packet(struct mvxpsec_softc *, struct mvxpsec_packet *);
178 
179 /*
180  * Session management interface (OpenCrypto)
181  */
182 #define MVXPSEC_SESSION(sid)	((sid) & 0x0fffffff)
183 #define MVXPSEC_SID(crd, sesn)	(((crd) << 28) | ((sesn) & 0x0fffffff))
184 /* pool management */
185 STATIC int mvxpsec_session_ctor(void *, void *, int);
186 STATIC void mvxpsec_session_dtor(void *, void *);
187 STATIC int mvxpsec_packet_ctor(void *, void *, int);
188 STATIC void mvxpsec_packet_dtor(void *, void *);
189 
190 /* session management */
191 STATIC struct mvxpsec_session *mvxpsec_session_alloc(struct mvxpsec_softc *);
192 STATIC void mvxpsec_session_dealloc(struct mvxpsec_session *);
193 INLINE struct mvxpsec_session *mvxpsec_session_lookup(struct mvxpsec_softc *, int);
194 INLINE int mvxpsec_session_ref(struct mvxpsec_session *);
195 INLINE void mvxpsec_session_unref(struct mvxpsec_session *);
196 
197 /* packet management */
198 STATIC struct mvxpsec_packet *mvxpsec_packet_alloc(struct mvxpsec_session *);
199 INLINE void mvxpsec_packet_enqueue(struct mvxpsec_packet *);
200 STATIC void mvxpsec_packet_dealloc(struct mvxpsec_packet *);
201 STATIC int mvxpsec_done_packet(struct mvxpsec_packet *);
202 
203 /* session header manegement */
204 STATIC int mvxpsec_header_finalize(struct mvxpsec_packet *);
205 
206 /* packet queue management */
207 INLINE void mvxpsec_drop(struct mvxpsec_softc *, struct cryptop *, struct mvxpsec_packet *, int);
208 STATIC int mvxpsec_dispatch_queue(struct mvxpsec_softc *);
209 
210 /* opencrypto operation */
211 INLINE int mvxpsec_parse_crd(struct mvxpsec_packet *, struct cryptodesc *);
212 INLINE int mvxpsec_parse_crp(struct mvxpsec_packet *);
213 
214 /* payload data management */
215 INLINE int mvxpsec_packet_setcrp(struct mvxpsec_packet *, struct cryptop *);
216 STATIC int mvxpsec_packet_setdata(struct mvxpsec_packet *, void *, uint32_t);
217 STATIC int mvxpsec_packet_setmbuf(struct mvxpsec_packet *, struct mbuf *);
218 STATIC int mvxpsec_packet_setuio(struct mvxpsec_packet *, struct uio *);
219 STATIC int mvxpsec_packet_rdata(struct mvxpsec_packet *, int, int, void *);
220 _STATIC int mvxpsec_packet_wdata(struct mvxpsec_packet *, int, int, void *);
221 STATIC int mvxpsec_packet_write_iv(struct mvxpsec_packet *, void *, int);
222 STATIC int mvxpsec_packet_copy_iv(struct mvxpsec_packet *, int, int);
223 
224 /* key pre-computation */
225 STATIC int mvxpsec_key_precomp(int, void *, int, void *, void *);
226 STATIC int mvxpsec_hmac_precomp(int, void *, int, void *, void *);
227 
228 /* crypto operation management */
229 INLINE void mvxpsec_packet_reset_op(struct mvxpsec_packet *);
230 INLINE void mvxpsec_packet_update_op_order(struct mvxpsec_packet *, int);
231 
232 /*
233  * parameter converters
234  */
235 INLINE uint32_t mvxpsec_alg2acc(uint32_t alg);
236 INLINE uint32_t mvxpsec_aesklen(int klen);
237 
238 /*
239  * string formatters
240  */
241 _STATIC const char *s_ctrlreg(uint32_t);
242 _STATIC const char *s_winreg(uint32_t);
243 _STATIC const char *s_errreg(uint32_t);
244 _STATIC const char *s_xpsecintr(uint32_t);
245 _STATIC const char *s_ctlalg(uint32_t);
246 _STATIC const char *s_xpsec_op(uint32_t);
247 _STATIC const char *s_xpsec_enc(uint32_t);
248 _STATIC const char *s_xpsec_mac(uint32_t);
249 _STATIC const char *s_xpsec_frag(uint32_t);
250 
251 /*
252  * debugging supports
253  */
254 #ifdef MVXPSEC_DEBUG
255 _STATIC void mvxpsec_dump_dmaq(struct mvxpsec_descriptor_handle *);
256 _STATIC void mvxpsec_dump_reg(struct mvxpsec_softc *);
257 _STATIC void mvxpsec_dump_sram(const char *, struct mvxpsec_softc *, size_t);
258 _STATIC void mvxpsec_dump_data(const char *, void *, size_t);
259 
260 _STATIC void mvxpsec_dump_packet(const char *, struct mvxpsec_packet *);
261 _STATIC void mvxpsec_dump_packet_data(const char *, struct mvxpsec_packet *);
262 _STATIC void mvxpsec_dump_packet_desc(const char *, struct mvxpsec_packet *);
263 
264 _STATIC void mvxpsec_dump_acc_config(const char *, uint32_t);
265 _STATIC void mvxpsec_dump_acc_encdata(const char *, uint32_t, uint32_t);
266 _STATIC void mvxpsec_dump_acc_enclen(const char *, uint32_t);
267 _STATIC void mvxpsec_dump_acc_enckey(const char *, uint32_t);
268 _STATIC void mvxpsec_dump_acc_enciv(const char *, uint32_t);
269 _STATIC void mvxpsec_dump_acc_macsrc(const char *, uint32_t);
270 _STATIC void mvxpsec_dump_acc_macdst(const char *, uint32_t);
271 _STATIC void mvxpsec_dump_acc_maciv(const char *, uint32_t);
272 #endif
273 
274 /*
275  * global configurations, params, work spaces, ...
276  *
277  * XXX: use sysctl for global configurations
278  */
279 /* waiting for device */
280 static int mvxpsec_wait_interval = 10;		/* usec */
281 static int mvxpsec_wait_retry = 100;		/* times = wait for 1 [msec] */
282 #ifdef MVXPSEC_DEBUG
283 static uint32_t mvxpsec_debug = MVXPSEC_DEBUG;	/* debug level */
284 #endif
285 
286 /*
287  * Register accessors
288  */
289 #define MVXPSEC_WRITE(sc, off, val) \
290 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (off), (val))
291 #define MVXPSEC_READ(sc, off) \
292 	bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (off))
293 
294 /*
295  * device driver autoconf interface
296  */
297 CFATTACH_DECL2_NEW(mvxpsec_mbus, sizeof(struct mvxpsec_softc),
298     mvxpsec_match, mvxpsec_attach, NULL, NULL, NULL, NULL);
299 
300 STATIC int
301 mvxpsec_match(device_t dev, cfdata_t match, void *aux)
302 {
303 	struct marvell_attach_args *mva = aux;
304 	uint32_t tag;
305 	int window;
306 
307 	if (strcmp(mva->mva_name, match->cf_name) != 0)
308 		return 0;
309 	if (mva->mva_offset == MVA_OFFSET_DEFAULT)
310 		return 0;
311 
312 	switch (mva->mva_unit) {
313 	case 0:
314 		tag = ARMADAXP_TAG_CRYPT0;
315 		break;
316 	case 1:
317 		tag = ARMADAXP_TAG_CRYPT1;
318 		break;
319 	default:
320 		aprint_error_dev(dev,
321 		    "unit %d is not supported\n", mva->mva_unit);
322 		return 0;
323 	}
324 
325 	window = mvsoc_target(tag, NULL, NULL, NULL, NULL);
326 	if (window >= nwindow) {
327 		aprint_error_dev(dev,
328 		    "Security Accelerator SRAM is not configured.\n");
329 		return 0;
330 	}
331 
332 	return 1;
333 }
334 
335 STATIC void
336 mvxpsec_attach(device_t parent, device_t self, void *aux)
337 {
338 	struct marvell_attach_args *mva = aux;
339 	struct mvxpsec_softc *sc = device_private(self);
340 	int v;
341 	int i;
342 
343 	sc->sc_dev = self;
344 
345 	aprint_normal(": Marvell Crypto Engines and Security Accelerator\n");
346 	aprint_naive("\n");
347 #ifdef MVXPSEC_MULTI_PACKET
348 	aprint_normal_dev(sc->sc_dev, "multi-packet chained mode enabled.\n");
349 #else
350 	aprint_normal_dev(sc->sc_dev, "multi-packet chained mode disabled.\n");
351 #endif
352 	aprint_normal_dev(sc->sc_dev,
353 	    "Max %d sessions.\n", MVXPSEC_MAX_SESSIONS);
354 
355 	/* mutex */
356 	mutex_init(&sc->sc_session_mtx, MUTEX_DEFAULT, IPL_NET);
357 	mutex_init(&sc->sc_dma_mtx, MUTEX_DEFAULT, IPL_NET);
358 	mutex_init(&sc->sc_queue_mtx, MUTEX_DEFAULT, IPL_NET);
359 
360 	/* Packet queue */
361 	SIMPLEQ_INIT(&sc->sc_wait_queue);
362 	SIMPLEQ_INIT(&sc->sc_run_queue);
363 	SLIST_INIT(&sc->sc_free_list);
364 	sc->sc_wait_qlen = 0;
365 #ifdef MVXPSEC_MULTI_PACKET
366 	sc->sc_wait_qlimit = 16;
367 #else
368 	sc->sc_wait_qlimit = 0;
369 #endif
370 	sc->sc_free_qlen = 0;
371 
372 	/* Timer */
373 	callout_init(&sc->sc_timeout, 0); /* XXX: use CALLOUT_MPSAFE */
374 	callout_setfunc(&sc->sc_timeout, mvxpsec_timer, sc);
375 
376 	/* I/O */
377 	sc->sc_iot = mva->mva_iot;
378 	if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
379 	    mva->mva_offset, mva->mva_size, &sc->sc_ioh)) {
380 		aprint_error_dev(self, "Cannot map registers\n");
381 		return;
382 	}
383 
384 	/* DMA */
385 	sc->sc_dmat = mva->mva_dmat;
386 	if (mvxpsec_init_dma(sc, mva) < 0)
387 		return;
388 
389 	/* SRAM */
390 	if (mvxpsec_init_sram(sc) < 0)
391 		return;
392 
393 	/* Registers */
394 	mvxpsec_wininit(sc, mva->mva_tags);
395 
396 	/* INTR */
397 	MVXPSEC_WRITE(sc, MVXPSEC_INT_MASK, MVXPSEC_DEFAULT_INT);
398 	MVXPSEC_WRITE(sc, MV_TDMA_ERR_MASK, MVXPSEC_DEFAULT_ERR);
399 	sc->sc_done_ih =
400 	    marvell_intr_establish(mva->mva_irq, IPL_NET, mvxpsec_intr, sc);
401 	/* XXX: sould pass error IRQ using mva */
402 	sc->sc_error_ih = marvell_intr_establish(MVXPSEC_ERR_INT(sc),
403 	    IPL_NET, mvxpsec_eintr, sc);
404 	aprint_normal_dev(self,
405 	    "Error Reporting IRQ %d\n", MVXPSEC_ERR_INT(sc));
406 
407 	/* Initialize TDMA (It's enabled here, but waiting for SA) */
408 	if (mvxpsec_dma_wait(sc) < 0)
409 		panic("%s: DMA DEVICE not responding\n", __func__);
410 	MVXPSEC_WRITE(sc, MV_TDMA_CNT, 0);
411 	MVXPSEC_WRITE(sc, MV_TDMA_SRC, 0);
412 	MVXPSEC_WRITE(sc, MV_TDMA_DST, 0);
413 	MVXPSEC_WRITE(sc, MV_TDMA_NXT, 0);
414 	MVXPSEC_WRITE(sc, MV_TDMA_CUR, 0);
415 	v  = MVXPSEC_READ(sc, MV_TDMA_CONTROL);
416 	v |= MV_TDMA_CONTROL_ENABLE;
417 	MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, v);
418 
419 	/* Initialize SA */
420 	if (mvxpsec_acc_wait(sc) < 0)
421 		panic("%s: MVXPSEC not responding\n", __func__);
422 	v  = MVXPSEC_READ(sc, MV_ACC_CONFIG);
423 	v &= ~MV_ACC_CONFIG_STOP_ON_ERR;
424 	v |= MV_ACC_CONFIG_MULT_PKT;
425 	v |= MV_ACC_CONFIG_WAIT_TDMA;
426 	v |= MV_ACC_CONFIG_ACT_TDMA;
427 	MVXPSEC_WRITE(sc, MV_ACC_CONFIG, v);
428 	MVXPSEC_WRITE(sc, MV_ACC_DESC, 0);
429 	MVXPSEC_WRITE(sc, MV_ACC_COMMAND, MV_ACC_COMMAND_STOP);
430 
431 	/* Session */
432 	sc->sc_session_pool =
433 	    pool_cache_init(sizeof(struct mvxpsec_session), 0, 0, 0,
434 	    "mvxpsecpl", NULL, IPL_NET,
435 	    mvxpsec_session_ctor, mvxpsec_session_dtor, sc);
436 	pool_cache_sethiwat(sc->sc_session_pool, MVXPSEC_MAX_SESSIONS);
437 	pool_cache_setlowat(sc->sc_session_pool, MVXPSEC_MAX_SESSIONS / 2);
438 	sc->sc_last_session = NULL;
439 
440 	/* Pakcet */
441 	sc->sc_packet_pool =
442 	    pool_cache_init(sizeof(struct mvxpsec_session), 0, 0, 0,
443 	    "mvxpsec_pktpl", NULL, IPL_NET,
444 	    mvxpsec_packet_ctor, mvxpsec_packet_dtor, sc);
445 	pool_cache_sethiwat(sc->sc_packet_pool, MVXPSEC_MAX_SESSIONS);
446 	pool_cache_setlowat(sc->sc_packet_pool, MVXPSEC_MAX_SESSIONS / 2);
447 
448 	/* Register to EVCNT framework */
449 	mvxpsec_evcnt_attach(sc);
450 
451 	/* Register to Opencrypto */
452 	for (i = 0; i < MVXPSEC_MAX_SESSIONS; i++) {
453 		sc->sc_sessions[i] = NULL;
454 	}
455 	if (mvxpsec_register(sc))
456 		panic("cannot initialize OpenCrypto module.\n");
457 
458 	return;
459 }
460 
461 STATIC void
462 mvxpsec_evcnt_attach(struct mvxpsec_softc *sc)
463 {
464 	struct mvxpsec_evcnt *sc_ev = &sc->sc_ev;
465 
466 	evcnt_attach_dynamic(&sc_ev->intr_all, EVCNT_TYPE_INTR,
467 	    NULL, device_xname(sc->sc_dev), "Main Intr.");
468 	evcnt_attach_dynamic(&sc_ev->intr_auth, EVCNT_TYPE_INTR,
469 	    NULL, device_xname(sc->sc_dev), "Auth Intr.");
470 	evcnt_attach_dynamic(&sc_ev->intr_des, EVCNT_TYPE_INTR,
471 	    NULL, device_xname(sc->sc_dev), "DES Intr.");
472 	evcnt_attach_dynamic(&sc_ev->intr_aes_enc, EVCNT_TYPE_INTR,
473 	    NULL, device_xname(sc->sc_dev), "AES-Encrypt Intr.");
474 	evcnt_attach_dynamic(&sc_ev->intr_aes_dec, EVCNT_TYPE_INTR,
475 	    NULL, device_xname(sc->sc_dev), "AES-Decrypt Intr.");
476 	evcnt_attach_dynamic(&sc_ev->intr_enc, EVCNT_TYPE_INTR,
477 	    NULL, device_xname(sc->sc_dev), "Crypto Intr.");
478 	evcnt_attach_dynamic(&sc_ev->intr_sa, EVCNT_TYPE_INTR,
479 	    NULL, device_xname(sc->sc_dev), "SA Intr.");
480 	evcnt_attach_dynamic(&sc_ev->intr_acctdma, EVCNT_TYPE_INTR,
481 	    NULL, device_xname(sc->sc_dev), "AccTDMA Intr.");
482 	evcnt_attach_dynamic(&sc_ev->intr_comp, EVCNT_TYPE_INTR,
483 	    NULL, device_xname(sc->sc_dev), "TDMA-Complete Intr.");
484 	evcnt_attach_dynamic(&sc_ev->intr_own, EVCNT_TYPE_INTR,
485 	    NULL, device_xname(sc->sc_dev), "TDMA-Ownership Intr.");
486 	evcnt_attach_dynamic(&sc_ev->intr_acctdma_cont, EVCNT_TYPE_INTR,
487 	    NULL, device_xname(sc->sc_dev), "AccTDMA-Continue Intr.");
488 
489 	evcnt_attach_dynamic(&sc_ev->session_new, EVCNT_TYPE_MISC,
490 	    NULL, device_xname(sc->sc_dev), "New-Session");
491 	evcnt_attach_dynamic(&sc_ev->session_free, EVCNT_TYPE_MISC,
492 	    NULL, device_xname(sc->sc_dev), "Free-Session");
493 
494 	evcnt_attach_dynamic(&sc_ev->packet_ok, EVCNT_TYPE_MISC,
495 	    NULL, device_xname(sc->sc_dev), "Packet-OK");
496 	evcnt_attach_dynamic(&sc_ev->packet_err, EVCNT_TYPE_MISC,
497 	    NULL, device_xname(sc->sc_dev), "Packet-ERR");
498 
499 	evcnt_attach_dynamic(&sc_ev->dispatch_packets, EVCNT_TYPE_MISC,
500 	    NULL, device_xname(sc->sc_dev), "Packet-Dispatch");
501 	evcnt_attach_dynamic(&sc_ev->dispatch_queue, EVCNT_TYPE_MISC,
502 	    NULL, device_xname(sc->sc_dev), "Queue-Dispatch");
503 	evcnt_attach_dynamic(&sc_ev->queue_full, EVCNT_TYPE_MISC,
504 	    NULL, device_xname(sc->sc_dev), "Queue-Full");
505 	evcnt_attach_dynamic(&sc_ev->max_dispatch, EVCNT_TYPE_MISC,
506 	    NULL, device_xname(sc->sc_dev), "Max-Dispatch");
507 	evcnt_attach_dynamic(&sc_ev->max_done, EVCNT_TYPE_MISC,
508 	    NULL, device_xname(sc->sc_dev), "Max-Done");
509 }
510 
511 /*
512  * Register setup
513  */
514 STATIC int mvxpsec_wininit(struct mvxpsec_softc *sc, enum marvell_tags *tags)
515 {
516 	device_t pdev = device_parent(sc->sc_dev);
517 	uint64_t base;
518 	uint32_t size, reg;
519 	int window, target, attr, rv, i;
520 
521 	/* disable all window */
522 	for (window = 0; window < MV_TDMA_NWINDOW; window++)
523 	{
524 		MVXPSEC_WRITE(sc, MV_TDMA_BAR(window), 0);
525 		MVXPSEC_WRITE(sc, MV_TDMA_ATTR(window), 0);
526 	}
527 
528 	for (window = 0, i = 0;
529 	    tags[i] != MARVELL_TAG_UNDEFINED && window < MV_TDMA_NWINDOW; i++) {
530 		rv = marvell_winparams_by_tag(pdev, tags[i],
531 		    &target, &attr, &base, &size);
532 		if (rv != 0 || size == 0)
533 			continue;
534 
535 		if (base > 0xffffffffULL) {
536 			aprint_error_dev(sc->sc_dev,
537 			    "can't remap window %d\n", window);
538 			continue;
539 		}
540 
541 		reg  = MV_TDMA_BAR_BASE(base);
542 		MVXPSEC_WRITE(sc, MV_TDMA_BAR(window), reg);
543 
544 		reg  = MV_TDMA_ATTR_TARGET(target);
545 		reg |= MV_TDMA_ATTR_ATTR(attr);
546 		reg |= MV_TDMA_ATTR_SIZE(size);
547 		reg |= MV_TDMA_ATTR_ENABLE;
548 		MVXPSEC_WRITE(sc, MV_TDMA_ATTR(window), reg);
549 
550 		window++;
551 	}
552 
553 	return 0;
554 }
555 
556 /*
557  * Timer handling
558  */
559 STATIC void
560 mvxpsec_timer(void *aux)
561 {
562 	struct mvxpsec_softc *sc = aux;
563 	struct mvxpsec_packet *mv_p;
564 	uint32_t reg;
565 	int ndone;
566 	int refill;
567 	int s;
568 
569 	/* IPL_SOFTCLOCK */
570 
571 	log(LOG_ERR, "%s: device timeout.\n", __func__);
572 #ifdef MVXPSEC_DEBUG
573 	mvxpsec_dump_reg(sc);
574 #endif
575 
576 	s = splnet();
577 	/* stop security accelerator */
578 	MVXPSEC_WRITE(sc, MV_ACC_COMMAND, MV_ACC_COMMAND_STOP);
579 
580 	/* stop TDMA */
581 	MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, 0);
582 
583 	/* cleanup packet queue */
584 	mutex_enter(&sc->sc_queue_mtx);
585 	ndone = 0;
586 	while ( (mv_p = SIMPLEQ_FIRST(&sc->sc_run_queue)) != NULL) {
587 		SIMPLEQ_REMOVE_HEAD(&sc->sc_run_queue, queue);
588 
589 		mv_p->crp->crp_etype = EINVAL;
590 		mvxpsec_done_packet(mv_p);
591 		ndone++;
592 	}
593 	MVXPSEC_EVCNT_MAX(sc, max_done, ndone);
594 	sc->sc_flags &= ~HW_RUNNING;
595 	refill = (sc->sc_wait_qlen > 0) ? 1 : 0;
596 	mutex_exit(&sc->sc_queue_mtx);
597 
598 	/* reenable TDMA */
599 	if (mvxpsec_dma_wait(sc) < 0)
600 		panic("%s: failed to reset DMA DEVICE. give up.", __func__);
601 	MVXPSEC_WRITE(sc, MV_TDMA_CNT, 0);
602 	MVXPSEC_WRITE(sc, MV_TDMA_SRC, 0);
603 	MVXPSEC_WRITE(sc, MV_TDMA_DST, 0);
604 	MVXPSEC_WRITE(sc, MV_TDMA_CUR, 0);
605 	MVXPSEC_WRITE(sc, MV_TDMA_NXT, 0);
606 	reg  = MV_TDMA_DEFAULT_CONTROL;
607 	reg |= MV_TDMA_CONTROL_ENABLE;
608 	MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, reg);
609 
610 	if (mvxpsec_acc_wait(sc) < 0)
611 		panic("%s: failed to reset MVXPSEC. give up.", __func__);
612 	reg  = MV_ACC_CONFIG_MULT_PKT;
613 	reg |= MV_ACC_CONFIG_WAIT_TDMA;
614 	reg |= MV_ACC_CONFIG_ACT_TDMA;
615 	MVXPSEC_WRITE(sc, MV_ACC_CONFIG, reg);
616 	MVXPSEC_WRITE(sc, MV_ACC_DESC, 0);
617 
618 	if (refill) {
619 		mutex_enter(&sc->sc_queue_mtx);
620 		mvxpsec_dispatch_queue(sc);
621 		mutex_exit(&sc->sc_queue_mtx);
622 	}
623 
624 	crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
625 	splx(s);
626 }
627 
628 /*
629  * DMA handling
630  */
631 
632 /*
633  * Allocate kernel devmem and DMA safe memory with bus_dma API
634  * used for DMA descriptors.
635  *
636  * if phys != 0, assume phys is a DMA safe memory and bypass
637  * allocator.
638  */
639 STATIC struct mvxpsec_devmem *
640 mvxpsec_alloc_devmem(struct mvxpsec_softc *sc, paddr_t phys, int size)
641 {
642 	struct mvxpsec_devmem *devmem;
643 	bus_dma_segment_t seg;
644 	int rseg;
645 	int err;
646 
647 	if (sc == NULL)
648 		return NULL;
649 
650 	devmem = kmem_alloc(sizeof(*devmem), KM_SLEEP);
651 	devmem->size = size;
652 
653 	if (phys) {
654 		seg.ds_addr = phys;
655 		seg.ds_len = devmem->size;
656 		rseg = 1;
657 		err = 0;
658 	}
659 	else {
660 		err = bus_dmamem_alloc(sc->sc_dmat,
661 		    devmem->size, PAGE_SIZE, 0,
662 		    &seg, MVXPSEC_DMA_MAX_SEGS, &rseg, BUS_DMA_NOWAIT);
663 	}
664 	if (err) {
665 		aprint_error_dev(sc->sc_dev, "can't alloc DMA buffer\n");
666 		goto fail_kmem_free;
667 	}
668 
669 	err = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
670 	     devmem->size, &devmem->kva, BUS_DMA_NOWAIT);
671 	if (err) {
672 		aprint_error_dev(sc->sc_dev, "can't map DMA buffer\n");
673 		goto fail_dmamem_free;
674 	}
675 
676 	err = bus_dmamap_create(sc->sc_dmat,
677 	    size, 1, size, 0, BUS_DMA_NOWAIT, &devmem->map);
678 	if (err) {
679 		aprint_error_dev(sc->sc_dev, "can't create DMA map\n");
680 		goto fail_unmap;
681 	}
682 
683 	err = bus_dmamap_load(sc->sc_dmat,
684 	    devmem->map, devmem->kva, devmem->size, NULL,
685 	    BUS_DMA_NOWAIT);
686 	if (err) {
687 		aprint_error_dev(sc->sc_dev,
688 		   "can't load DMA buffer VA:%p PA:0x%08x\n",
689 		    devmem->kva, (int)seg.ds_addr);
690 		goto fail_destroy;
691 	}
692 
693 	return devmem;
694 
695 fail_destroy:
696 	bus_dmamap_destroy(sc->sc_dmat, devmem->map);
697 fail_unmap:
698 	bus_dmamem_unmap(sc->sc_dmat, devmem->kva, devmem->size);
699 fail_dmamem_free:
700 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
701 fail_kmem_free:
702 	kmem_free(devmem, sizeof(*devmem));
703 
704 	return NULL;
705 }
706 
707 /*
708  * Get DMA Descriptor from (DMA safe) descriptor pool.
709  */
710 INLINE struct mvxpsec_descriptor_handle *
711 mvxpsec_dma_getdesc(struct mvxpsec_softc *sc)
712 {
713 	struct mvxpsec_descriptor_handle *entry;
714 
715 	/* must called with sc->sc_dma_mtx held */
716 	KASSERT(mutex_owned(&sc->sc_dma_mtx));
717 
718 	if (sc->sc_desc_ring_prod == sc->sc_desc_ring_cons)
719 		return NULL;
720 
721 	entry = &sc->sc_desc_ring[sc->sc_desc_ring_prod];
722 	sc->sc_desc_ring_prod++;
723 	if (sc->sc_desc_ring_prod >= sc->sc_desc_ring_size)
724 		sc->sc_desc_ring_prod -= sc->sc_desc_ring_size;
725 
726 	return entry;
727 }
728 
729 /*
730  * Put DMA Descriptor to descriptor pool.
731  */
732 _INLINE void
733 mvxpsec_dma_putdesc(struct mvxpsec_softc *sc,
734     struct mvxpsec_descriptor_handle *dh)
735 {
736 	/* must called with sc->sc_dma_mtx held */
737 	KASSERT(mutex_owned(&sc->sc_dma_mtx));
738 
739 	sc->sc_desc_ring_cons++;
740 	if (sc->sc_desc_ring_cons >= sc->sc_desc_ring_size)
741 		sc->sc_desc_ring_cons -= sc->sc_desc_ring_size;
742 
743 	return;
744 }
745 
746 /*
747  * Setup DMA Descriptor
748  * copy from 'src' to 'dst' by 'size' bytes.
749  * 'src' or 'dst' must be SRAM address.
750  */
751 INLINE void
752 mvxpsec_dma_setup(struct mvxpsec_descriptor_handle *dh,
753     uint32_t dst, uint32_t src, uint32_t size)
754 {
755 	struct mvxpsec_descriptor *desc;
756 
757 	desc = (struct mvxpsec_descriptor *)dh->_desc;
758 
759 	desc->tdma_dst = dst;
760 	desc->tdma_src = src;
761 	desc->tdma_word0 = size;
762 	if (size != 0)
763 		desc->tdma_word0 |= MV_TDMA_CNT_OWN;
764 	/* size == 0 is owned by ACC, not TDMA */
765 
766 #ifdef MVXPSEC_DEBUG
767 	mvxpsec_dump_dmaq(dh);
768 #endif
769 
770 }
771 
772 /*
773  * Concat 2 DMA
774  */
775 INLINE void
776 mvxpsec_dma_cat(struct mvxpsec_softc *sc,
777     struct mvxpsec_descriptor_handle *dh1,
778     struct mvxpsec_descriptor_handle *dh2)
779 {
780 	((struct mvxpsec_descriptor*)dh1->_desc)->tdma_nxt = dh2->phys_addr;
781 	MVXPSEC_SYNC_DESC(sc, dh1, BUS_DMASYNC_PREWRITE);
782 }
783 
784 /*
785  * Schedule DMA Copy
786  */
787 INLINE int
788 mvxpsec_dma_copy0(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r,
789     uint32_t dst, uint32_t src, uint32_t size)
790 {
791 	struct mvxpsec_descriptor_handle *dh;
792 
793 	dh = mvxpsec_dma_getdesc(sc);
794 	if (dh == NULL) {
795 		log(LOG_ERR, "%s: descriptor full\n", __func__);
796 		return -1;
797 	}
798 
799 	mvxpsec_dma_setup(dh, dst, src, size);
800 	if (r->dma_head == NULL) {
801 		r->dma_head = dh;
802 		r->dma_last = dh;
803 		r->dma_size = 1;
804 	}
805 	else {
806 		mvxpsec_dma_cat(sc, r->dma_last, dh);
807 		r->dma_last = dh;
808 		r->dma_size++;
809 	}
810 
811 	return 0;
812 }
813 
814 INLINE int
815 mvxpsec_dma_copy(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r,
816     uint32_t dst, uint32_t src, uint32_t size)
817 {
818 	if (size == 0) /* 0 is very special descriptor */
819 		return 0;
820 
821 	return mvxpsec_dma_copy0(sc, r, dst, src, size);
822 }
823 
824 /*
825  * Schedule ACC Activate
826  */
827 INLINE int
828 mvxpsec_dma_acc_activate(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r)
829 {
830 	return mvxpsec_dma_copy0(sc, r, 0, 0, 0);
831 }
832 
833 /*
834  * Finalize DMA setup
835  */
836 INLINE void
837 mvxpsec_dma_finalize(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r)
838 {
839 	struct mvxpsec_descriptor_handle *dh;
840 
841 	dh = r->dma_last;
842 	((struct mvxpsec_descriptor*)dh->_desc)->tdma_nxt = 0;
843 	MVXPSEC_SYNC_DESC(sc, dh, BUS_DMASYNC_PREWRITE);
844 }
845 
846 /*
847  * Free entire DMA ring
848  */
849 INLINE void
850 mvxpsec_dma_free(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r)
851 {
852 	sc->sc_desc_ring_cons += r->dma_size;
853 	if (sc->sc_desc_ring_cons >= sc->sc_desc_ring_size)
854 		sc->sc_desc_ring_cons -= sc->sc_desc_ring_size;
855 	r->dma_head = NULL;
856 	r->dma_last = NULL;
857 	r->dma_size = 0;
858 }
859 
860 /*
861  * create DMA descriptor chain for the packet
862  */
863 INLINE int
864 mvxpsec_dma_copy_packet(struct mvxpsec_softc *sc, struct mvxpsec_packet *mv_p)
865 {
866 	struct mvxpsec_session *mv_s = mv_p->mv_s;
867 	uint32_t src, dst, len;
868 	uint32_t pkt_off, pkt_off_r;
869 	int err;
870 	int i;
871 
872 	/* must called with sc->sc_dma_mtx held */
873 	KASSERT(mutex_owned(&sc->sc_dma_mtx));
874 
875 	/*
876 	 * set offset for mem->device copy
877 	 *
878 	 * typical packet image:
879 	 *
880 	 *   enc_ivoff
881 	 *   mac_off
882 	 *   |
883 	 *   |    enc_off
884 	 *   |    |
885 	 *   v    v
886 	 *   +----+--------...
887 	 *   |IV  |DATA
888 	 *   +----+--------...
889 	 */
890 	pkt_off = 0;
891 	if (mv_p->mac_off > 0)
892 		pkt_off = mv_p->mac_off;
893 	if ((mv_p->flags & CRP_EXT_IV) == 0 && pkt_off > mv_p->enc_ivoff)
894 		pkt_off = mv_p->enc_ivoff;
895 	if (mv_p->enc_off > 0 && pkt_off > mv_p->enc_off)
896 		pkt_off = mv_p->enc_off;
897 	pkt_off_r = pkt_off;
898 
899 	/* make DMA descriptors to copy packet header: DRAM -> SRAM */
900 	dst = (uint32_t)MVXPSEC_SRAM_PKT_HDR_PA(sc);
901 	src = (uint32_t)mv_p->pkt_header_map->dm_segs[0].ds_addr;
902 	len = sizeof(mv_p->pkt_header);
903 	err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
904 	if (__predict_false(err))
905 		return err;
906 
907 	/*
908 	 * make DMA descriptors to copy session header: DRAM -> SRAM
909 	 * we can reuse session header on SRAM if session is not changed.
910 	 */
911 	if (sc->sc_last_session != mv_s) {
912 		dst = (uint32_t)MVXPSEC_SRAM_SESS_HDR_PA(sc);
913 		src = (uint32_t)mv_s->session_header_map->dm_segs[0].ds_addr;
914 		len = sizeof(mv_s->session_header);
915 		err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
916 		if (__predict_false(err))
917 			return err;
918 		sc->sc_last_session = mv_s;
919 	}
920 
921 	/* make DMA descriptor to copy payload data: DRAM -> SRAM */
922 	dst = MVXPSEC_SRAM_PAYLOAD_PA(sc, 0);
923 	for (i = 0; i < mv_p->data_map->dm_nsegs; i++) {
924 		src = mv_p->data_map->dm_segs[i].ds_addr;
925 		len = mv_p->data_map->dm_segs[i].ds_len;
926 		if (pkt_off) {
927 			if (len <= pkt_off) {
928 				/* ignore the segment */
929 				dst += len;
930 				pkt_off -= len;
931 				continue;
932 			}
933 			/* copy from the middle of the segment */
934 			dst += pkt_off;
935 			src += pkt_off;
936 			len -= pkt_off;
937 			pkt_off = 0;
938 		}
939 		err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
940 		if (__predict_false(err))
941 			return err;
942 		dst += len;
943 	}
944 
945 	/* make special descriptor to activate security accelerator */
946 	err = mvxpsec_dma_acc_activate(sc, &mv_p->dma_ring);
947 	if (__predict_false(err))
948 		return err;
949 
950 	/* make DMA descriptors to copy payload: SRAM -> DRAM */
951 	src = (uint32_t)MVXPSEC_SRAM_PAYLOAD_PA(sc, 0);
952 	for (i = 0; i < mv_p->data_map->dm_nsegs; i++) {
953 		dst = (uint32_t)mv_p->data_map->dm_segs[i].ds_addr;
954 		len = (uint32_t)mv_p->data_map->dm_segs[i].ds_len;
955 		if (pkt_off_r) {
956 			if (len <= pkt_off_r) {
957 				/* ignore the segment */
958 				src += len;
959 				pkt_off_r -= len;
960 				continue;
961 			}
962 			/* copy from the middle of the segment */
963 			src += pkt_off_r;
964 			dst += pkt_off_r;
965 			len -= pkt_off_r;
966 			pkt_off_r = 0;
967 		}
968 		err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
969 		if (__predict_false(err))
970 			return err;
971 		src += len;
972 	}
973 	KASSERT(pkt_off == 0);
974 	KASSERT(pkt_off_r == 0);
975 
976 	/*
977 	 * make DMA descriptors to copy packet header: SRAM->DRAM
978 	 * if IV is present in the payload, no need to copy.
979 	 */
980 	if (mv_p->flags & CRP_EXT_IV) {
981 		dst = (uint32_t)mv_p->pkt_header_map->dm_segs[0].ds_addr;
982 		src = (uint32_t)MVXPSEC_SRAM_PKT_HDR_PA(sc);
983 		len = sizeof(mv_p->pkt_header);
984 		err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
985 		if (__predict_false(err))
986 			return err;
987 	}
988 
989 	return 0;
990 }
991 
992 INLINE int
993 mvxpsec_dma_sync_packet(struct mvxpsec_softc *sc, struct mvxpsec_packet *mv_p)
994 {
995 	/* sync packet header */
996 	bus_dmamap_sync(sc->sc_dmat,
997 	    mv_p->pkt_header_map, 0, sizeof(mv_p->pkt_header),
998 	    BUS_DMASYNC_PREWRITE);
999 
1000 #ifdef MVXPSEC_DEBUG
1001 	/* sync session header */
1002 	if (mvxpsec_debug != 0) {
1003 		struct mvxpsec_session *mv_s = mv_p->mv_s;
1004 
1005 		/* only debug code touch the session header after newsession */
1006 		bus_dmamap_sync(sc->sc_dmat,
1007 		    mv_s->session_header_map,
1008 		    0, sizeof(mv_s->session_header),
1009 		    BUS_DMASYNC_PREWRITE);
1010 	}
1011 #endif
1012 
1013 	/* sync packet buffer */
1014 	bus_dmamap_sync(sc->sc_dmat,
1015 	    mv_p->data_map, 0, mv_p->data_len,
1016 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1017 
1018 	return 0;
1019 }
1020 
1021 /*
1022  * Initialize MVXPSEC Internal SRAM
1023  *
1024  * - must be called after DMA initizlization.
1025  * - make VM mapping for SRAM area on MBus.
1026  */
1027 STATIC int
1028 mvxpsec_init_sram(struct mvxpsec_softc *sc)
1029 {
1030 	uint32_t tag, target, attr, base, size;
1031 	vaddr_t va;
1032 	int window;
1033 
1034 	switch (sc->sc_dev->dv_unit) {
1035 	case 0:
1036 		tag = ARMADAXP_TAG_CRYPT0;
1037 		break;
1038 	case 1:
1039 		tag = ARMADAXP_TAG_CRYPT1;
1040 		break;
1041 	default:
1042 		aprint_error_dev(sc->sc_dev, "no internal SRAM mapping\n");
1043 		return -1;
1044 	}
1045 
1046 	window = mvsoc_target(tag, &target, &attr, &base, &size);
1047 	if (window >= nwindow) {
1048 		aprint_error_dev(sc->sc_dev, "no internal SRAM mapping\n");
1049 		return -1;
1050 	}
1051 
1052 	if (sizeof(struct mvxpsec_crypt_sram) > size) {
1053 		aprint_error_dev(sc->sc_dev,
1054 		    "SRAM Data Structure Excceeds SRAM window size.\n");
1055 		return -1;
1056 	}
1057 
1058 	aprint_normal_dev(sc->sc_dev,
1059 	    "internal SRAM window at 0x%08x-0x%08x",
1060 	    base, base + size - 1);
1061 	sc->sc_sram_pa = base;
1062 
1063 	/* get vmspace to read/write device internal SRAM */
1064 	va = uvm_km_alloc(kernel_map, PAGE_SIZE, PAGE_SIZE,
1065 			UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
1066 	if (va == 0) {
1067 		aprint_error_dev(sc->sc_dev, "cannot map SRAM window\n");
1068 		sc->sc_sram_va = NULL;
1069 		aprint_normal("\n");
1070 		return 0;
1071 	}
1072 	/* XXX: not working. PMAP_NOCACHE is not affected? */
1073 	pmap_kenter_pa(va, base, VM_PROT_READ|VM_PROT_WRITE, PMAP_NOCACHE);
1074 	pmap_update(pmap_kernel());
1075 	sc->sc_sram_va = (void *)va;
1076 	aprint_normal(" va %p\n", sc->sc_sram_va);
1077 	memset(sc->sc_sram_va, 0xff, MV_ACC_SRAM_SIZE);
1078 
1079 	return 0;
1080 }
1081 
1082 /*
1083  * Initialize TDMA engine.
1084  */
1085 STATIC int
1086 mvxpsec_init_dma(struct mvxpsec_softc *sc, struct marvell_attach_args *mva)
1087 {
1088 	struct mvxpsec_descriptor_handle *dh;
1089 	uint8_t *va;
1090 	paddr_t pa;
1091 	off_t va_off, pa_off;
1092 	int i, n, seg, ndh;
1093 
1094 	/* Init Deviced's control parameters (disabled yet) */
1095 	MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, MV_TDMA_DEFAULT_CONTROL);
1096 
1097 	/* Init Software DMA Handlers */
1098 	sc->sc_devmem_desc =
1099 	    mvxpsec_alloc_devmem(sc, 0, PAGE_SIZE * MVXPSEC_DMA_DESC_PAGES);
1100 	ndh = (PAGE_SIZE / sizeof(struct mvxpsec_descriptor))
1101 	    * MVXPSEC_DMA_DESC_PAGES;
1102 	sc->sc_desc_ring =
1103 	    kmem_alloc(sizeof(struct mvxpsec_descriptor_handle) * ndh,
1104 	        KM_SLEEP);
1105 	aprint_normal_dev(sc->sc_dev, "%d DMA handles in %zu bytes array\n",
1106 	    ndh, sizeof(struct mvxpsec_descriptor_handle) * ndh);
1107 
1108 	ndh = 0;
1109 	for (seg = 0; seg < devmem_nseg(sc->sc_devmem_desc); seg++) {
1110 		va = devmem_va(sc->sc_devmem_desc);
1111 		pa = devmem_pa(sc->sc_devmem_desc, seg);
1112 		n = devmem_palen(sc->sc_devmem_desc, seg) /
1113 		       	sizeof(struct mvxpsec_descriptor);
1114 		va_off = (PAGE_SIZE * seg);
1115 		pa_off = 0;
1116 		for (i = 0; i < n; i++) {
1117 			dh = &sc->sc_desc_ring[ndh];
1118 			dh->map = devmem_map(sc->sc_devmem_desc);
1119 			dh->off = va_off + pa_off;
1120 			dh->_desc = (void *)(va + va_off + pa_off);
1121 			dh->phys_addr = pa + pa_off;
1122 			pa_off += sizeof(struct mvxpsec_descriptor);
1123 			ndh++;
1124 		}
1125 	}
1126 	sc->sc_desc_ring_size = ndh;
1127 	sc->sc_desc_ring_prod = 0;
1128 	sc->sc_desc_ring_cons = sc->sc_desc_ring_size - 1;
1129 
1130 	return 0;
1131 }
1132 
1133 /*
1134  * Wait for TDMA controller become idle
1135  */
1136 INLINE int
1137 mvxpsec_dma_wait(struct mvxpsec_softc *sc)
1138 {
1139 	int retry = 0;
1140 
1141 	while (MVXPSEC_READ(sc, MV_TDMA_CONTROL) & MV_TDMA_CONTROL_ACT) {
1142 		delay(mvxpsec_wait_interval);
1143 		if (retry++ >= mvxpsec_wait_retry)
1144 			return -1;
1145 	}
1146 	return 0;
1147 }
1148 
1149 /*
1150  * Wait for Security Accelerator become idle
1151  */
1152 INLINE int
1153 mvxpsec_acc_wait(struct mvxpsec_softc *sc)
1154 {
1155 	int retry = 0;
1156 
1157 	while (MVXPSEC_READ(sc, MV_ACC_COMMAND) & MV_ACC_COMMAND_ACT) {
1158 		delay(mvxpsec_wait_interval);
1159 		if (++retry >= mvxpsec_wait_retry)
1160 			return -1;
1161 	}
1162 	return 0;
1163 }
1164 
1165 /*
1166  * Entry of interrupt handler
1167  *
1168  * register this to kernel via marvell_intr_establish()
1169  */
1170 int
1171 mvxpsec_intr(void *arg)
1172 {
1173 	struct mvxpsec_softc *sc = arg;
1174 	uint32_t v;
1175 
1176 	/* IPL_NET */
1177 	while ((v = mvxpsec_intr_ack(sc)) != 0) {
1178 		mvxpsec_intr_cnt(sc, v);
1179 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "MVXPSEC Intr 0x%08x\n", v);
1180 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "%s\n", s_xpsecintr(v));
1181 #ifdef MVXPSEC_DEBUG
1182 		mvxpsec_dump_reg(sc);
1183 #endif
1184 
1185 		/* call high-level handlers */
1186 		if (v & MVXPSEC_INT_ACCTDMA)
1187 			mvxpsec_done(sc);
1188 	}
1189 
1190 	return 0;
1191 }
1192 
1193 INLINE void
1194 mvxpsec_intr_cleanup(struct mvxpsec_softc *sc)
1195 {
1196 	struct mvxpsec_packet *mv_p;
1197 
1198 	/* must called with sc->sc_dma_mtx held */
1199 	KASSERT(mutex_owned(&sc->sc_dma_mtx));
1200 
1201 	/*
1202 	 * there is only one intr for run_queue.
1203 	 * no one touch sc_run_queue.
1204 	 */
1205 	SIMPLEQ_FOREACH(mv_p, &sc->sc_run_queue, queue)
1206 		mvxpsec_dma_free(sc, &mv_p->dma_ring);
1207 }
1208 
1209 /*
1210  * Acknowledge to interrupt
1211  *
1212  * read cause bits, clear it, and return it.
1213  * NOTE: multiple cause bits may be returned at once.
1214  */
1215 STATIC uint32_t
1216 mvxpsec_intr_ack(struct mvxpsec_softc *sc)
1217 {
1218 	uint32_t reg;
1219 
1220 	reg  = MVXPSEC_READ(sc, MVXPSEC_INT_CAUSE);
1221 	reg &= MVXPSEC_DEFAULT_INT;
1222 	MVXPSEC_WRITE(sc, MVXPSEC_INT_CAUSE, ~reg);
1223 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "Int: %s\n", s_xpsecintr(reg));
1224 
1225 	return reg;
1226 }
1227 
1228 /*
1229  * Entry of TDMA error interrupt handler
1230  *
1231  * register this to kernel via marvell_intr_establish()
1232  */
1233 int
1234 mvxpsec_eintr(void *arg)
1235 {
1236 	struct mvxpsec_softc *sc = arg;
1237 	uint32_t err;
1238 
1239 	/* IPL_NET */
1240 again:
1241 	err = mvxpsec_eintr_ack(sc);
1242 	if (err == 0)
1243 		goto done;
1244 
1245 	log(LOG_ERR, "%s: DMA Error Interrupt: %s\n", __func__,
1246 	    s_errreg(err));
1247 #ifdef MVXPSEC_DEBUG
1248 	mvxpsec_dump_reg(sc);
1249 #endif
1250 
1251 	goto again;
1252 done:
1253 	return 0;
1254 }
1255 
1256 /*
1257  * Acknowledge to TDMA error interrupt
1258  *
1259  * read cause bits, clear it, and return it.
1260  * NOTE: multiple cause bits may be returned at once.
1261  */
1262 STATIC uint32_t
1263 mvxpsec_eintr_ack(struct mvxpsec_softc *sc)
1264 {
1265 	uint32_t reg;
1266 
1267 	reg  = MVXPSEC_READ(sc, MV_TDMA_ERR_CAUSE);
1268 	reg &= MVXPSEC_DEFAULT_ERR;
1269 	MVXPSEC_WRITE(sc, MV_TDMA_ERR_CAUSE, ~reg);
1270 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "Int: %s\n", s_xpsecintr(reg));
1271 
1272 	return reg;
1273 }
1274 
1275 /*
1276  * Interrupt statistics
1277  *
1278  * this is NOT a statistics of how may times the events 'occured'.
1279  * this ONLY means how many times the events 'handled'.
1280  */
1281 INLINE void
1282 mvxpsec_intr_cnt(struct mvxpsec_softc *sc, int cause)
1283 {
1284 	MVXPSEC_EVCNT_INCR(sc, intr_all);
1285 	if (cause & MVXPSEC_INT_AUTH)
1286 		MVXPSEC_EVCNT_INCR(sc, intr_auth);
1287 	if (cause & MVXPSEC_INT_DES)
1288 		MVXPSEC_EVCNT_INCR(sc, intr_des);
1289 	if (cause & MVXPSEC_INT_AES_ENC)
1290 		MVXPSEC_EVCNT_INCR(sc, intr_aes_enc);
1291 	if (cause & MVXPSEC_INT_AES_DEC)
1292 		MVXPSEC_EVCNT_INCR(sc, intr_aes_dec);
1293 	if (cause & MVXPSEC_INT_ENC)
1294 		MVXPSEC_EVCNT_INCR(sc, intr_enc);
1295 	if (cause & MVXPSEC_INT_SA)
1296 		MVXPSEC_EVCNT_INCR(sc, intr_sa);
1297 	if (cause & MVXPSEC_INT_ACCTDMA)
1298 		MVXPSEC_EVCNT_INCR(sc, intr_acctdma);
1299 	if (cause & MVXPSEC_INT_TDMA_COMP)
1300 		MVXPSEC_EVCNT_INCR(sc, intr_comp);
1301 	if (cause & MVXPSEC_INT_TDMA_OWN)
1302 		MVXPSEC_EVCNT_INCR(sc, intr_own);
1303 	if (cause & MVXPSEC_INT_ACCTDMA_CONT)
1304 		MVXPSEC_EVCNT_INCR(sc, intr_acctdma_cont);
1305 }
1306 
1307 /*
1308  * Setup MVXPSEC header structure.
1309  *
1310  * the header contains descriptor of security accelerator,
1311  * key material of chiphers, iv of ciphers and macs, ...
1312  *
1313  * the header is transferred to MVXPSEC Internal SRAM by TDMA,
1314  * and parsed by MVXPSEC H/W.
1315  */
1316 STATIC int
1317 mvxpsec_header_finalize(struct mvxpsec_packet *mv_p)
1318 {
1319 	struct mvxpsec_acc_descriptor *desc = &mv_p->pkt_header.desc;
1320 	int enc_start, enc_len, iv_offset;
1321 	int mac_start, mac_len, mac_offset;
1322 
1323 	/* offset -> device address */
1324 	enc_start = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->enc_off);
1325 	enc_len = mv_p->enc_len;
1326 	if (mv_p->flags & CRP_EXT_IV)
1327 		iv_offset = mv_p->enc_ivoff;
1328 	else
1329 		iv_offset = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->enc_ivoff);
1330 	mac_start = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->mac_off);
1331 	mac_len = mv_p->mac_len;
1332 	mac_offset = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->mac_dst);
1333 
1334 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1335 	    "PAYLOAD at 0x%08x\n", (int)MVXPSEC_SRAM_PAYLOAD_OFF);
1336 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1337 	    "ENC from 0x%08x\n", enc_start);
1338 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1339 	    "MAC from 0x%08x\n", mac_start);
1340 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1341 	    "MAC to 0x%08x\n", mac_offset);
1342 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1343 	    "ENC IV at 0x%08x\n", iv_offset);
1344 
1345 	/* setup device addresses in Security Accelerator Descriptors */
1346 	desc->acc_encdata = MV_ACC_DESC_ENC_DATA(enc_start, enc_start);
1347 	desc->acc_enclen = MV_ACC_DESC_ENC_LEN(enc_len);
1348 	if (desc->acc_config & MV_ACC_CRYPTO_DECRYPT)
1349 		desc->acc_enckey =
1350 		    MV_ACC_DESC_ENC_KEY(MVXPSEC_SRAM_KEY_D_DA);
1351 	else
1352 		desc->acc_enckey =
1353 		    MV_ACC_DESC_ENC_KEY(MVXPSEC_SRAM_KEY_DA);
1354 	desc->acc_enciv =
1355 	    MV_ACC_DESC_ENC_IV(MVXPSEC_SRAM_IV_WORK_DA, iv_offset);
1356 
1357 	desc->acc_macsrc = MV_ACC_DESC_MAC_SRC(mac_start, mac_len);
1358 	desc->acc_macdst = MV_ACC_DESC_MAC_DST(mac_offset, mac_len);
1359 	desc->acc_maciv =
1360 	    MV_ACC_DESC_MAC_IV(MVXPSEC_SRAM_MIV_IN_DA,
1361 	        MVXPSEC_SRAM_MIV_OUT_DA);
1362 
1363 	return 0;
1364 }
1365 
1366 /*
1367  * constractor of session structure.
1368  *
1369  * this constrator will be called by pool_cache framework.
1370  */
1371 STATIC int
1372 mvxpsec_session_ctor(void *arg, void *obj, int flags)
1373 {
1374 	struct mvxpsec_softc *sc = arg;
1375 	struct mvxpsec_session *mv_s = obj;
1376 
1377 	/* pool is owned by softc */
1378 	mv_s->sc = sc;
1379 
1380 	/* Create and load DMA map for session header */
1381 	mv_s->session_header_map = 0;
1382 	if (bus_dmamap_create(sc->sc_dmat,
1383 	    sizeof(mv_s->session_header), 1,
1384 	    sizeof(mv_s->session_header), 0,
1385 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1386 	    &mv_s->session_header_map)) {
1387 		log(LOG_ERR, "%s: cannot create DMA map\n", __func__);
1388 		goto fail;
1389 	}
1390 	if (bus_dmamap_load(sc->sc_dmat, mv_s->session_header_map,
1391 	    &mv_s->session_header, sizeof(mv_s->session_header),
1392 	    NULL, BUS_DMA_NOWAIT)) {
1393 		log(LOG_ERR, "%s: cannot load header\n", __func__);
1394 		goto fail;
1395 	}
1396 
1397 	return 0;
1398 fail:
1399 	if (mv_s->session_header_map)
1400 		bus_dmamap_destroy(sc->sc_dmat, mv_s->session_header_map);
1401 	return ENOMEM;
1402 }
1403 
1404 /*
1405  * destractor of session structure.
1406  *
1407  * this destrator will be called by pool_cache framework.
1408  */
1409 STATIC void
1410 mvxpsec_session_dtor(void *arg, void *obj)
1411 {
1412 	struct mvxpsec_softc *sc = arg;
1413 	struct mvxpsec_session *mv_s = obj;
1414 
1415 	if (mv_s->sc != sc)
1416 		panic("inconsitent context\n");
1417 
1418 	bus_dmamap_destroy(sc->sc_dmat, mv_s->session_header_map);
1419 }
1420 
1421 /*
1422  * constructor of packet structure.
1423  */
1424 STATIC int
1425 mvxpsec_packet_ctor(void *arg, void *obj, int flags)
1426 {
1427 	struct mvxpsec_softc *sc = arg;
1428 	struct mvxpsec_packet *mv_p = obj;
1429 
1430 	mv_p->dma_ring.dma_head = NULL;
1431 	mv_p->dma_ring.dma_last = NULL;
1432 	mv_p->dma_ring.dma_size = 0;
1433 
1434 	/* Create and load DMA map for packet header */
1435 	mv_p->pkt_header_map = 0;
1436 	if (bus_dmamap_create(sc->sc_dmat,
1437 	    sizeof(mv_p->pkt_header), 1, sizeof(mv_p->pkt_header), 0,
1438 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1439 	    &mv_p->pkt_header_map)) {
1440 		log(LOG_ERR, "%s: cannot create DMA map\n", __func__);
1441 		goto fail;
1442 	}
1443 	if (bus_dmamap_load(sc->sc_dmat, mv_p->pkt_header_map,
1444 	    &mv_p->pkt_header, sizeof(mv_p->pkt_header),
1445 	    NULL, BUS_DMA_NOWAIT)) {
1446 		log(LOG_ERR, "%s: cannot load header\n", __func__);
1447 		goto fail;
1448 	}
1449 
1450 	/* Create DMA map for session data. */
1451 	mv_p->data_map = 0;
1452 	if (bus_dmamap_create(sc->sc_dmat,
1453 	    MVXPSEC_DMA_MAX_SIZE, MVXPSEC_DMA_MAX_SEGS, MVXPSEC_DMA_MAX_SIZE,
1454 	    0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mv_p->data_map)) {
1455 		log(LOG_ERR, "%s: cannot create DMA map\n", __func__);
1456 		goto fail;
1457 	}
1458 
1459 	return 0;
1460 fail:
1461 	if (mv_p->pkt_header_map)
1462 		bus_dmamap_destroy(sc->sc_dmat, mv_p->pkt_header_map);
1463 	if (mv_p->data_map)
1464 		bus_dmamap_destroy(sc->sc_dmat, mv_p->data_map);
1465 	return ENOMEM;
1466 }
1467 
1468 /*
1469  * destractor of packet structure.
1470  */
1471 STATIC void
1472 mvxpsec_packet_dtor(void *arg, void *obj)
1473 {
1474 	struct mvxpsec_softc *sc = arg;
1475 	struct mvxpsec_packet *mv_p = obj;
1476 
1477 	mutex_enter(&sc->sc_dma_mtx);
1478 	mvxpsec_dma_free(sc, &mv_p->dma_ring);
1479 	mutex_exit(&sc->sc_dma_mtx);
1480 	bus_dmamap_destroy(sc->sc_dmat, mv_p->pkt_header_map);
1481 	bus_dmamap_destroy(sc->sc_dmat, mv_p->data_map);
1482 }
1483 
1484 /*
1485  * allocate new session struture.
1486  */
1487 STATIC struct mvxpsec_session *
1488 mvxpsec_session_alloc(struct mvxpsec_softc *sc)
1489 {
1490 	struct mvxpsec_session *mv_s;
1491 
1492 	mv_s = pool_cache_get(sc->sc_session_pool, PR_NOWAIT);
1493 	if (mv_s == NULL) {
1494 		log(LOG_ERR, "%s: cannot allocate memory\n", __func__);
1495 		return NULL;
1496 	}
1497 	mv_s->refs = 1; /* 0 means session is alredy invalid */
1498 	mv_s->sflags = 0;
1499 
1500 	return mv_s;
1501 }
1502 
1503 /*
1504  * deallocate session structure.
1505  */
1506 STATIC void
1507 mvxpsec_session_dealloc(struct mvxpsec_session *mv_s)
1508 {
1509 	struct mvxpsec_softc *sc = mv_s->sc;
1510 
1511 	mv_s->sflags |= DELETED;
1512 	mvxpsec_session_unref(mv_s);
1513 	crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
1514 
1515 	return;
1516 }
1517 
1518 STATIC int
1519 mvxpsec_session_ref(struct mvxpsec_session *mv_s)
1520 {
1521 	uint32_t refs;
1522 
1523 	if (mv_s->sflags & DELETED) {
1524 		log(LOG_ERR,
1525 		    "%s: session is already deleted.\n", __func__);
1526 		return -1;
1527 	}
1528 
1529 	refs = atomic_inc_32_nv(&mv_s->refs);
1530 	if (refs == 1) {
1531 		/*
1532 		 * a session with refs == 0 is
1533 		 * already invalidated. revert it.
1534 		 * XXX: use CAS ?
1535 		 */
1536 		atomic_dec_32(&mv_s->refs);
1537 		log(LOG_ERR,
1538 		    "%s: session is already invalidated.\n", __func__);
1539 		return -1;
1540 	}
1541 
1542 	return 0;
1543 }
1544 
1545 STATIC void
1546 mvxpsec_session_unref(struct mvxpsec_session *mv_s)
1547 {
1548 	uint32_t refs;
1549 
1550 	refs = atomic_dec_32_nv(&mv_s->refs);
1551 	if (refs == 0)
1552 		pool_cache_put(mv_s->sc->sc_session_pool, mv_s);
1553 }
1554 
1555 /*
1556  * look for session is exist or not
1557  */
1558 INLINE struct mvxpsec_session *
1559 mvxpsec_session_lookup(struct mvxpsec_softc *sc, int sid)
1560 {
1561 	struct mvxpsec_session *mv_s;
1562 	int session;
1563 
1564 	/* must called sc->sc_session_mtx held */
1565 	KASSERT(mutex_owned(&sc->sc_session_mtx));
1566 
1567 	session = MVXPSEC_SESSION(sid);
1568 	if (__predict_false(session > MVXPSEC_MAX_SESSIONS)) {
1569 		log(LOG_ERR, "%s: session number too large %d\n",
1570 		    __func__, session);
1571 		return NULL;
1572 	}
1573 	if (__predict_false( (mv_s = sc->sc_sessions[session]) == NULL)) {
1574 		log(LOG_ERR, "%s: invalid session %d\n",
1575 		    __func__, session);
1576 		return NULL;
1577 	}
1578 
1579 	KASSERT(mv_s->sid == session);
1580 
1581 	return mv_s;
1582 }
1583 
1584 /*
1585  * allocation new packet structure.
1586  */
1587 STATIC struct mvxpsec_packet *
1588 mvxpsec_packet_alloc(struct mvxpsec_session *mv_s)
1589 {
1590 	struct mvxpsec_softc *sc = mv_s->sc;
1591 	struct mvxpsec_packet *mv_p;
1592 
1593 	/* must be called mv_queue_mtx held. */
1594 	KASSERT(mutex_owned(&sc->sc_queue_mtx));
1595 	/* must be called mv_session_mtx held. */
1596 	KASSERT(mutex_owned(&sc->sc_session_mtx));
1597 
1598 	if (mvxpsec_session_ref(mv_s) < 0) {
1599 		log(LOG_ERR, "%s: invalid session.\n", __func__);
1600 		return NULL;
1601 	}
1602 
1603 	if ( (mv_p = SLIST_FIRST(&sc->sc_free_list)) != NULL) {
1604 		SLIST_REMOVE_HEAD(&sc->sc_free_list, free_list);
1605 		sc->sc_free_qlen--;
1606 	}
1607 	else {
1608 		mv_p = pool_cache_get(sc->sc_packet_pool, PR_NOWAIT);
1609 		if (mv_p == NULL) {
1610 			log(LOG_ERR, "%s: cannot allocate memory\n",
1611 			    __func__);
1612 			mvxpsec_session_unref(mv_s);
1613 			return NULL;
1614 		}
1615 	}
1616 	mv_p->mv_s = mv_s;
1617 	mv_p->flags = 0;
1618 	mv_p->data_ptr = NULL;
1619 
1620 	return mv_p;
1621 }
1622 
1623 /*
1624  * free packet structure.
1625  */
1626 STATIC void
1627 mvxpsec_packet_dealloc(struct mvxpsec_packet *mv_p)
1628 {
1629 	struct mvxpsec_session *mv_s = mv_p->mv_s;
1630 	struct mvxpsec_softc *sc = mv_s->sc;
1631 
1632 	/* must called with sc->sc_queue_mtx held */
1633 	KASSERT(mutex_owned(&sc->sc_queue_mtx));
1634 
1635 	if (mv_p->dma_ring.dma_size != 0) {
1636 		sc->sc_desc_ring_cons += mv_p->dma_ring.dma_size;
1637 	}
1638 	mv_p->dma_ring.dma_head = NULL;
1639 	mv_p->dma_ring.dma_last = NULL;
1640 	mv_p->dma_ring.dma_size = 0;
1641 
1642 	if (mv_p->data_map) {
1643 		if (mv_p->flags & RDY_DATA) {
1644 			bus_dmamap_unload(sc->sc_dmat, mv_p->data_map);
1645 			mv_p->flags &= ~RDY_DATA;
1646 		}
1647 	}
1648 
1649 	if (sc->sc_free_qlen > sc->sc_wait_qlimit)
1650 		pool_cache_put(sc->sc_packet_pool, mv_p);
1651 	else {
1652 		SLIST_INSERT_HEAD(&sc->sc_free_list, mv_p, free_list);
1653 		sc->sc_free_qlen++;
1654 	}
1655 	mvxpsec_session_unref(mv_s);
1656 }
1657 
1658 INLINE void
1659 mvxpsec_packet_enqueue(struct mvxpsec_packet *mv_p)
1660 {
1661 	struct mvxpsec_softc *sc = mv_p->mv_s->sc;
1662 	struct mvxpsec_packet *last_packet;
1663 	struct mvxpsec_descriptor_handle *cur_dma, *prev_dma;
1664 
1665 	/* must called with sc->sc_queue_mtx held */
1666 	KASSERT(mutex_owned(&sc->sc_queue_mtx));
1667 
1668 	if (sc->sc_wait_qlen == 0) {
1669 		SIMPLEQ_INSERT_TAIL(&sc->sc_wait_queue, mv_p, queue);
1670 		sc->sc_wait_qlen++;
1671 		mv_p->flags |= SETUP_DONE;
1672 		return;
1673 	}
1674 
1675 	last_packet = SIMPLEQ_LAST(&sc->sc_wait_queue, mvxpsec_packet, queue);
1676 	SIMPLEQ_INSERT_TAIL(&sc->sc_wait_queue, mv_p, queue);
1677 	sc->sc_wait_qlen++;
1678 
1679 	/* chain the DMA */
1680 	cur_dma = mv_p->dma_ring.dma_head;
1681 	prev_dma = last_packet->dma_ring.dma_last;
1682 	mvxpsec_dma_cat(sc, prev_dma, cur_dma);
1683 	mv_p->flags |= SETUP_DONE;
1684 }
1685 
1686 /*
1687  * called by interrupt handler
1688  */
1689 STATIC int
1690 mvxpsec_done_packet(struct mvxpsec_packet *mv_p)
1691 {
1692 	struct mvxpsec_session *mv_s = mv_p->mv_s;
1693 	struct mvxpsec_softc *sc = mv_s->sc;
1694 
1695 	KASSERT((mv_p->flags & RDY_DATA));
1696 	KASSERT((mv_p->flags & SETUP_DONE));
1697 
1698 	/* unload data */
1699 	bus_dmamap_sync(sc->sc_dmat, mv_p->data_map,
1700 	    0, mv_p->data_len,
1701 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1702 	bus_dmamap_unload(sc->sc_dmat, mv_p->data_map);
1703 	mv_p->flags &= ~RDY_DATA;
1704 
1705 #ifdef MVXPSEC_DEBUG
1706 	if (mvxpsec_debug != 0) {
1707 		int s;
1708 
1709 		bus_dmamap_sync(sc->sc_dmat, mv_p->pkt_header_map,
1710 		    0, sizeof(mv_p->pkt_header),
1711 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1712 		bus_dmamap_sync(sc->sc_dmat, mv_s->session_header_map,
1713 		    0, sizeof(mv_s->session_header),
1714 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1715 
1716 		if (mvxpsec_debug & MVXPSEC_DEBUG_OPENCRYPTO) {
1717 			char buf[1500];
1718 			struct mbuf *m;
1719 			struct uio *uio;
1720 			size_t len;
1721 
1722 			switch (mv_p->data_type) {
1723 			case MVXPSEC_DATA_MBUF:
1724 				m = mv_p->data_mbuf;
1725 				len = m->m_pkthdr.len;
1726 				if (len > sizeof(buf))
1727 					len = sizeof(buf);
1728 				m_copydata(m, 0, len, buf);
1729 				break;
1730 			case MVXPSEC_DATA_UIO:
1731 				uio = mv_p->data_uio;
1732 				len = uio->uio_resid;
1733 				if (len > sizeof(buf))
1734 					len = sizeof(buf);
1735 				cuio_copydata(uio, 0, len, buf);
1736 				break;
1737 			default:
1738 				len = 0;
1739 			}
1740 			if (len > 0)
1741 				mvxpsec_dump_data(__func__, buf, len);
1742 		}
1743 
1744 		if (mvxpsec_debug & MVXPSEC_DEBUG_PAYLOAD) {
1745 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_PAYLOAD,
1746 			    "%s: session_descriptor:\n", __func__);
1747 			mvxpsec_dump_packet_desc(__func__, mv_p);
1748 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_PAYLOAD,
1749 			    "%s: session_data:\n", __func__);
1750 			mvxpsec_dump_packet_data(__func__, mv_p);
1751 		}
1752 
1753 		if (mvxpsec_debug & MVXPSEC_DEBUG_SRAM) {
1754 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_SRAM,
1755 			    "%s: SRAM\n", __func__);
1756 			mvxpsec_dump_sram(__func__, sc, 2000);
1757 		}
1758 
1759 		s = MVXPSEC_READ(sc, MV_ACC_STATUS);
1760 		if (s & MV_ACC_STATUS_MAC_ERR) {
1761 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR,
1762 			    "%s: Message Authentication Failed.\n", __func__);
1763 		}
1764 	}
1765 #endif
1766 
1767 	/* copy back IV */
1768 	if (mv_p->flags & CRP_EXT_IV) {
1769 		memcpy(mv_p->ext_iv,
1770 		    &mv_p->pkt_header.crp_iv_ext, mv_p->ext_ivlen);
1771 		mv_p->ext_iv = NULL;
1772 		mv_p->ext_ivlen = 0;
1773 	}
1774 
1775 	/* notify opencrypto */
1776 	mv_p->crp->crp_etype = 0;
1777 	crypto_done(mv_p->crp);
1778 	mv_p->crp = NULL;
1779 
1780 	/* unblock driver */
1781 	mvxpsec_packet_dealloc(mv_p);
1782 	crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
1783 
1784 	MVXPSEC_EVCNT_INCR(sc, packet_ok);
1785 
1786 	return 0;
1787 }
1788 
1789 
1790 /*
1791  * Opencrypto API registration
1792  */
1793 int
1794 mvxpsec_register(struct mvxpsec_softc *sc)
1795 {
1796 	int oplen = SRAM_PAYLOAD_SIZE;
1797 	int flags = 0;
1798 	int err;
1799 
1800 	sc->sc_nsessions = 0;
1801 	sc->sc_cid = crypto_get_driverid(0);
1802 	if (sc->sc_cid < 0) {
1803 		log(LOG_ERR,
1804 		    "%s: crypto_get_driverid() failed.\n", __func__);
1805 		err = EINVAL;
1806 		goto done;
1807 	}
1808 
1809 	/* Ciphers */
1810 	err = crypto_register(sc->sc_cid, CRYPTO_DES_CBC, oplen, flags,
1811 	    mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1812 	if (err)
1813 		goto done;
1814 
1815 	err = crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, oplen, flags,
1816 	    mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1817 	if (err)
1818 		goto done;
1819 
1820 	err = crypto_register(sc->sc_cid, CRYPTO_AES_CBC, oplen, flags,
1821 	    mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1822 	if (err)
1823 		goto done;
1824 
1825 	/* MACs */
1826 	err = crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96,
1827 	    oplen, flags,
1828 	    mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1829 	if (err)
1830 		goto done;
1831 
1832 	err = crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96,
1833 	    oplen, flags,
1834 	    mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1835 	if (err)
1836 		goto done;
1837 
1838 #ifdef DEBUG
1839 	log(LOG_DEBUG,
1840 	    "%s: registered to opencrypto(max data = %d bytes)\n",
1841 	    device_xname(sc->sc_dev), oplen);
1842 #endif
1843 
1844 	err = 0;
1845 done:
1846 	return err;
1847 }
1848 
1849 /*
1850  * Create new opencrypto session
1851  *
1852  *   - register cipher key, mac key.
1853  *   - initialize mac internal state.
1854  */
1855 int
1856 mvxpsec_newsession(void *arg, uint32_t *sidp, struct cryptoini *cri)
1857 {
1858 	struct mvxpsec_softc *sc = arg;
1859 	struct mvxpsec_session *mv_s = NULL;
1860 	struct cryptoini *c;
1861 	static int hint = 0;
1862 	int session = -1;
1863 	int sid;
1864 	int err;
1865 	int i;
1866 
1867 	/* allocate driver session context */
1868 	mv_s = mvxpsec_session_alloc(sc);
1869 	if (mv_s == NULL)
1870 		return ENOMEM;
1871 
1872 	/*
1873 	 * lookup opencrypto session table
1874 	 *
1875 	 * we have sc_session_mtx after here.
1876 	 */
1877 	mutex_enter(&sc->sc_session_mtx);
1878 	if (sc->sc_nsessions >= MVXPSEC_MAX_SESSIONS) {
1879 		mutex_exit(&sc->sc_session_mtx);
1880 		log(LOG_ERR, "%s: too many IPsec SA(max %d)\n",
1881 				__func__, MVXPSEC_MAX_SESSIONS);
1882 		mvxpsec_session_dealloc(mv_s);
1883 		return ENOMEM;
1884 	}
1885 	for (i = hint; i < MVXPSEC_MAX_SESSIONS; i++) {
1886 		if (sc->sc_sessions[i])
1887 			continue;
1888 		session = i;
1889 		hint = session + 1;
1890 	       	break;
1891 	}
1892 	if (session < 0) {
1893 		for (i = 0; i < hint; i++) {
1894 			if (sc->sc_sessions[i])
1895 				continue;
1896 			session = i;
1897 			hint = session + 1;
1898 			break;
1899 		}
1900 		if (session < 0) {
1901 			mutex_exit(&sc->sc_session_mtx);
1902 			/* session full */
1903 			log(LOG_ERR, "%s: too many IPsec SA(max %d)\n",
1904 				__func__, MVXPSEC_MAX_SESSIONS);
1905 			mvxpsec_session_dealloc(mv_s);
1906 			hint = 0;
1907 			return ENOMEM;
1908 		}
1909 	}
1910 	if (hint >= MVXPSEC_MAX_SESSIONS)
1911 		hint = 0;
1912 	sc->sc_nsessions++;
1913 	sc->sc_sessions[session] = mv_s;
1914 #ifdef DEBUG
1915 	log(LOG_DEBUG, "%s: new session %d allocated\n", __func__, session);
1916 #endif
1917 
1918 	sid = MVXPSEC_SID(device_unit(sc->sc_dev), session);
1919 	mv_s->sid = sid;
1920 
1921 	/* setup the session key ... */
1922 	for (c = cri; c; c = c->cri_next) {
1923 		switch (c->cri_alg) {
1924 		case CRYPTO_DES_CBC:
1925 		case CRYPTO_3DES_CBC:
1926 		case CRYPTO_AES_CBC:
1927 			/* key */
1928 			if (mvxpsec_key_precomp(c->cri_alg,
1929 			    c->cri_key, c->cri_klen,
1930 			    &mv_s->session_header.crp_key,
1931 			    &mv_s->session_header.crp_key_d)) {
1932 				log(LOG_ERR,
1933 				    "%s: Invalid HMAC key for %s.\n",
1934 				    __func__, s_ctlalg(c->cri_alg));
1935 				err = EINVAL;
1936 				goto fail;
1937 			}
1938 			if (mv_s->sflags & RDY_CRP_KEY) {
1939 				log(LOG_WARNING,
1940 				    "%s: overwrite cipher: %s->%s.\n",
1941 				    __func__,
1942 				    s_ctlalg(mv_s->cipher_alg),
1943 				    s_ctlalg(c->cri_alg));
1944 			}
1945 			mv_s->sflags |= RDY_CRP_KEY;
1946 			mv_s->enc_klen = c->cri_klen;
1947 			mv_s->cipher_alg = c->cri_alg;
1948 			/* create per session IV (compatible with KAME IPsec) */
1949 			cprng_fast(&mv_s->session_iv, sizeof(mv_s->session_iv));
1950 			mv_s->sflags |= RDY_CRP_IV;
1951 			break;
1952 		case CRYPTO_SHA1_HMAC_96:
1953 		case CRYPTO_MD5_HMAC_96:
1954 			/* key */
1955 			if (mvxpsec_hmac_precomp(c->cri_alg,
1956 			    c->cri_key, c->cri_klen,
1957 			    (uint32_t *)&mv_s->session_header.miv_in,
1958 			    (uint32_t *)&mv_s->session_header.miv_out)) {
1959 				log(LOG_ERR,
1960 				    "%s: Invalid MAC key\n", __func__);
1961 				err = EINVAL;
1962 				goto fail;
1963 			}
1964 			if (mv_s->sflags & RDY_MAC_KEY ||
1965 			    mv_s->sflags & RDY_MAC_IV) {
1966 				log(LOG_ERR,
1967 				    "%s: overwrite HMAC: %s->%s.\n",
1968 				    __func__, s_ctlalg(mv_s->hmac_alg),
1969 				    s_ctlalg(c->cri_alg));
1970 			}
1971 			mv_s->sflags |= RDY_MAC_KEY;
1972 			mv_s->sflags |= RDY_MAC_IV;
1973 
1974 			mv_s->mac_klen = c->cri_klen;
1975 			mv_s->hmac_alg = c->cri_alg;
1976 			break;
1977 		default:
1978 			log(LOG_ERR, "%s: Unknown algorithm %d\n",
1979 			    __func__, c->cri_alg);
1980 			err = EINVAL;
1981 			goto fail;
1982 		}
1983 	}
1984 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1985 	    "H/W Crypto session (id:%u) added.\n", session);
1986 
1987 	*sidp = sid;
1988 	MVXPSEC_EVCNT_INCR(sc, session_new);
1989 	mutex_exit(&sc->sc_session_mtx);
1990 
1991 	/* sync session header(it's never touched after here) */
1992 	bus_dmamap_sync(sc->sc_dmat,
1993 	    mv_s->session_header_map,
1994 	    0, sizeof(mv_s->session_header),
1995 	    BUS_DMASYNC_PREWRITE);
1996 
1997 	return 0;
1998 
1999 fail:
2000 	sc->sc_nsessions--;
2001 	sc->sc_sessions[session] = NULL;
2002 	hint = session;
2003 	if (mv_s)
2004 		mvxpsec_session_dealloc(mv_s);
2005 	log(LOG_WARNING,
2006 	    "%s: Failed to add H/W crypto sessoin (id:%u): err=%d\n",
2007 	   __func__, session, err);
2008 
2009 	mutex_exit(&sc->sc_session_mtx);
2010 	return err;
2011 }
2012 
2013 /*
2014  * remove opencrypto session
2015  */
2016 int
2017 mvxpsec_freesession(void *arg, uint64_t tid)
2018 {
2019 	struct mvxpsec_softc *sc = arg;
2020 	struct mvxpsec_session *mv_s;
2021 	int session;
2022 	uint32_t sid = ((uint32_t)tid) & 0xffffffff;
2023 
2024 	session = MVXPSEC_SESSION(sid);
2025 	if (session < 0 || session >= MVXPSEC_MAX_SESSIONS) {
2026 		log(LOG_ERR, "%s: invalid session (id:%u)\n",
2027 		    __func__, session);
2028 		return EINVAL;
2029 	}
2030 
2031 	mutex_enter(&sc->sc_session_mtx);
2032 	if ( (mv_s = sc->sc_sessions[session]) == NULL) {
2033 		mutex_exit(&sc->sc_session_mtx);
2034 #ifdef DEBUG
2035 		log(LOG_DEBUG, "%s: session %d already inactivated\n",
2036 		    __func__, session);
2037 #endif
2038 		return ENOENT;
2039 	}
2040 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2041 	    "%s: inactivate session %d\n", __func__, session);
2042 
2043 	/* inactivate mvxpsec session */
2044 	sc->sc_sessions[session] = NULL;
2045 	sc->sc_nsessions--;
2046 	sc->sc_last_session = NULL;
2047 	mutex_exit(&sc->sc_session_mtx);
2048 
2049 	KASSERT(sc->sc_nsessions >= 0);
2050 	KASSERT(mv_s->sid == sid);
2051 
2052 	mvxpsec_session_dealloc(mv_s);
2053 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2054 	    "H/W Crypto session (id: %d) deleted.\n", session);
2055 
2056 	/* force unblock opencrypto */
2057 	crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
2058 
2059 	MVXPSEC_EVCNT_INCR(sc, session_free);
2060 
2061 	return 0;
2062 }
2063 
2064 /*
2065  * process data with existing session
2066  */
2067 int
2068 mvxpsec_dispatch(void *arg, struct cryptop *crp, int hint)
2069 {
2070 	struct mvxpsec_softc *sc = arg;
2071 	struct mvxpsec_session *mv_s;
2072 	struct mvxpsec_packet *mv_p;
2073 	int q_full;
2074 	int running;
2075 	int err;
2076 
2077 	mutex_enter(&sc->sc_queue_mtx);
2078 
2079 	/*
2080 	 * lookup session
2081 	 */
2082 	mutex_enter(&sc->sc_session_mtx);
2083 	mv_s = mvxpsec_session_lookup(sc, crp->crp_sid);
2084 	if (__predict_false(mv_s == NULL)) {
2085 		err = EINVAL;
2086 		mv_p = NULL;
2087 		mutex_exit(&sc->sc_session_mtx);
2088 		goto fail;
2089 	}
2090 	mv_p = mvxpsec_packet_alloc(mv_s);
2091 	if (__predict_false(mv_p == NULL)) {
2092 		mutex_exit(&sc->sc_session_mtx);
2093 		mutex_exit(&sc->sc_queue_mtx);
2094 		return ERESTART; /* => queued in opencrypto layer */
2095 	}
2096 	mutex_exit(&sc->sc_session_mtx);
2097 
2098 	/*
2099 	 * check queue status
2100 	 */
2101 #ifdef MVXPSEC_MULTI_PACKET
2102 	q_full = (sc->sc_wait_qlen >= sc->sc_wait_qlimit) ? 1 : 0;
2103 #else
2104 	q_full = (sc->sc_wait_qlen != 0) ? 1 : 0;
2105 #endif
2106 	running = (sc->sc_flags & HW_RUNNING) ?  1: 0;
2107 	if (q_full) {
2108 		/* input queue is full. */
2109 		if (!running && sc->sc_wait_qlen > 0)
2110 			mvxpsec_dispatch_queue(sc);
2111 		MVXPSEC_EVCNT_INCR(sc, queue_full);
2112 		mvxpsec_packet_dealloc(mv_p);
2113 		mutex_exit(&sc->sc_queue_mtx);
2114 		return ERESTART; /* => queued in opencrypto layer */
2115 	}
2116 
2117 	/*
2118 	 * Load and setup packet data
2119 	 */
2120 	err = mvxpsec_packet_setcrp(mv_p, crp);
2121 	if (__predict_false(err))
2122 		goto fail;
2123 
2124 	/*
2125 	 * Setup DMA descriptor chains
2126 	 */
2127 	mutex_enter(&sc->sc_dma_mtx);
2128 	err = mvxpsec_dma_copy_packet(sc, mv_p);
2129 	mutex_exit(&sc->sc_dma_mtx);
2130 	if (__predict_false(err))
2131 		goto fail;
2132 
2133 #ifdef MVXPSEC_DEBUG
2134 	mvxpsec_dump_packet(__func__, mv_p);
2135 #endif
2136 
2137 	/*
2138 	 * Sync/inval the data cache
2139 	 */
2140 	err = mvxpsec_dma_sync_packet(sc, mv_p);
2141 	if (__predict_false(err))
2142 		goto fail;
2143 
2144 	/*
2145 	 * Enqueue the packet
2146 	 */
2147 	MVXPSEC_EVCNT_INCR(sc, dispatch_packets);
2148 #ifdef MVXPSEC_MULTI_PACKET
2149 	mvxpsec_packet_enqueue(mv_p);
2150 	if (!running)
2151 		mvxpsec_dispatch_queue(sc);
2152 #else
2153 	SIMPLEQ_INSERT_TAIL(&sc->sc_wait_queue, mv_p, queue);
2154 	sc->sc_wait_qlen++;
2155 	mv_p->flags |= SETUP_DONE;
2156 	if (!running)
2157 		mvxpsec_dispatch_queue(sc);
2158 #endif
2159 	mutex_exit(&sc->sc_queue_mtx);
2160 	return 0;
2161 
2162 fail:
2163 	/* Drop the incoming packet */
2164 	mvxpsec_drop(sc, crp, mv_p, err);
2165 	mutex_exit(&sc->sc_queue_mtx);
2166 	return 0;
2167 }
2168 
2169 /*
2170  * back the packet to the IP stack
2171  */
2172 void
2173 mvxpsec_done(void *arg)
2174 {
2175 	struct mvxpsec_softc *sc = arg;
2176 	struct mvxpsec_packet *mv_p;
2177 	mvxpsec_queue_t ret_queue;
2178 	int ndone;
2179 
2180 	mutex_enter(&sc->sc_queue_mtx);
2181 
2182 	/* stop wdog timer */
2183 	callout_stop(&sc->sc_timeout);
2184 
2185 	/* refill MVXPSEC */
2186 	ret_queue = sc->sc_run_queue;
2187 	SIMPLEQ_INIT(&sc->sc_run_queue);
2188 	sc->sc_flags &= ~HW_RUNNING;
2189 	if (sc->sc_wait_qlen > 0)
2190 		mvxpsec_dispatch_queue(sc);
2191 
2192 	ndone = 0;
2193 	while ( (mv_p = SIMPLEQ_FIRST(&ret_queue)) != NULL) {
2194 		SIMPLEQ_REMOVE_HEAD(&ret_queue, queue);
2195 		mvxpsec_dma_free(sc, &mv_p->dma_ring);
2196 		mvxpsec_done_packet(mv_p);
2197 		ndone++;
2198 	}
2199 	MVXPSEC_EVCNT_MAX(sc, max_done, ndone);
2200 
2201 	mutex_exit(&sc->sc_queue_mtx);
2202 }
2203 
2204 /*
2205  * drop the packet
2206  */
2207 INLINE void
2208 mvxpsec_drop(struct mvxpsec_softc *sc, struct cryptop *crp,
2209     struct mvxpsec_packet *mv_p, int err)
2210 {
2211 	/* must called with sc->sc_queue_mtx held */
2212 	KASSERT(mutex_owned(&sc->sc_queue_mtx));
2213 
2214 	if (mv_p)
2215 		mvxpsec_packet_dealloc(mv_p);
2216 	if (err < 0)
2217 		err = EINVAL;
2218 	crp->crp_etype = err;
2219 	crypto_done(crp);
2220 	MVXPSEC_EVCNT_INCR(sc, packet_err);
2221 
2222 	/* dispatch other packets in queue */
2223 	if (sc->sc_wait_qlen > 0 &&
2224 	    !(sc->sc_flags & HW_RUNNING))
2225 		mvxpsec_dispatch_queue(sc);
2226 
2227 	/* unblock driver for dropped packet */
2228 	crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
2229 }
2230 
2231 /* move wait queue entry to run queue */
2232 STATIC int
2233 mvxpsec_dispatch_queue(struct mvxpsec_softc *sc)
2234 {
2235 	struct mvxpsec_packet *mv_p;
2236 	paddr_t head;
2237 	int ndispatch = 0;
2238 
2239 	/* must called with sc->sc_queue_mtx held */
2240 	KASSERT(mutex_owned(&sc->sc_queue_mtx));
2241 
2242 	/* check there is any task */
2243 	if (__predict_false(sc->sc_flags & HW_RUNNING)) {
2244 		log(LOG_WARNING,
2245 		    "%s: another packet already exist.\n", __func__);
2246 		return 0;
2247 	}
2248 	if (__predict_false(SIMPLEQ_EMPTY(&sc->sc_wait_queue))) {
2249 		log(LOG_WARNING,
2250 		    "%s: no waiting packet yet(qlen=%d).\n",
2251 		    __func__, sc->sc_wait_qlen);
2252 		return 0;
2253 	}
2254 
2255 	/* move queue */
2256 	sc->sc_run_queue = sc->sc_wait_queue;
2257 	sc->sc_flags |= HW_RUNNING; /* dropped by intr or timeout */
2258 	SIMPLEQ_INIT(&sc->sc_wait_queue);
2259 	ndispatch = sc->sc_wait_qlen;
2260 	sc->sc_wait_qlen = 0;
2261 
2262 	/* get 1st DMA descriptor */
2263 	mv_p = SIMPLEQ_FIRST(&sc->sc_run_queue);
2264 	head = mv_p->dma_ring.dma_head->phys_addr;
2265 
2266 	/* terminate last DMA descriptor */
2267 	mv_p = SIMPLEQ_LAST(&sc->sc_run_queue, mvxpsec_packet, queue);
2268 	mvxpsec_dma_finalize(sc, &mv_p->dma_ring);
2269 
2270 	/* configure TDMA */
2271 	if (mvxpsec_dma_wait(sc) < 0) {
2272 		log(LOG_ERR, "%s: DMA DEVICE not responding", __func__);
2273 		callout_schedule(&sc->sc_timeout, hz);
2274 		return 0;
2275 	}
2276 	MVXPSEC_WRITE(sc, MV_TDMA_NXT, head);
2277 
2278 	/* trigger ACC */
2279 	if (mvxpsec_acc_wait(sc) < 0) {
2280 		log(LOG_ERR, "%s: MVXPSEC not responding", __func__);
2281 		callout_schedule(&sc->sc_timeout, hz);
2282 		return 0;
2283 	}
2284 	MVXPSEC_WRITE(sc, MV_ACC_COMMAND, MV_ACC_COMMAND_ACT);
2285 
2286 	MVXPSEC_EVCNT_MAX(sc, max_dispatch, ndispatch);
2287 	MVXPSEC_EVCNT_INCR(sc, dispatch_queue);
2288 	callout_schedule(&sc->sc_timeout, hz);
2289 	return 0;
2290 }
2291 
2292 /*
2293  * process opencrypto operations(cryptop) for packets.
2294  */
2295 INLINE int
2296 mvxpsec_parse_crd(struct mvxpsec_packet *mv_p, struct cryptodesc *crd)
2297 {
2298 	int ivlen;
2299 
2300 	KASSERT(mv_p->flags & RDY_DATA);
2301 
2302 	/* MAC & Ciphers: set data location and operation */
2303 	switch (crd->crd_alg) {
2304 	case CRYPTO_SHA1_HMAC_96:
2305 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_MAC_96;
2306 		/* fall through */
2307 	case CRYPTO_SHA1_HMAC:
2308 		mv_p->mac_dst = crd->crd_inject;
2309 		mv_p->mac_off = crd->crd_skip;
2310 		mv_p->mac_len = crd->crd_len;
2311 		MV_ACC_CRYPTO_MAC_SET(mv_p->pkt_header.desc.acc_config,
2312 		    MV_ACC_CRYPTO_MAC_HMAC_SHA1);
2313 		mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_MAC);
2314 		/* No more setup for MAC */
2315 		return 0;
2316 	case CRYPTO_MD5_HMAC_96:
2317 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_MAC_96;
2318 		/* fall through */
2319 	case CRYPTO_MD5_HMAC:
2320 		mv_p->mac_dst = crd->crd_inject;
2321 		mv_p->mac_off = crd->crd_skip;
2322 		mv_p->mac_len = crd->crd_len;
2323 		MV_ACC_CRYPTO_MAC_SET(mv_p->pkt_header.desc.acc_config,
2324 		    MV_ACC_CRYPTO_MAC_HMAC_MD5);
2325 		mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_MAC);
2326 		/* No more setup for MAC */
2327 		return 0;
2328 	case CRYPTO_DES_CBC:
2329 		mv_p->enc_ivoff = crd->crd_inject;
2330 		mv_p->enc_off = crd->crd_skip;
2331 		mv_p->enc_len = crd->crd_len;
2332 		ivlen = 8;
2333 		MV_ACC_CRYPTO_ENC_SET(mv_p->pkt_header.desc.acc_config,
2334 		    MV_ACC_CRYPTO_ENC_DES);
2335 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_CBC;
2336 		mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_ENC);
2337 		break;
2338 	case CRYPTO_3DES_CBC:
2339 		mv_p->enc_ivoff = crd->crd_inject;
2340 		mv_p->enc_off = crd->crd_skip;
2341 		mv_p->enc_len = crd->crd_len;
2342 		ivlen = 8;
2343 		MV_ACC_CRYPTO_ENC_SET(mv_p->pkt_header.desc.acc_config,
2344 		    MV_ACC_CRYPTO_ENC_3DES);
2345 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_CBC;
2346 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_3DES_EDE;
2347 		mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_ENC);
2348 		break;
2349 	case CRYPTO_AES_CBC:
2350 		mv_p->enc_ivoff = crd->crd_inject;
2351 		mv_p->enc_off = crd->crd_skip;
2352 		mv_p->enc_len = crd->crd_len;
2353 		ivlen = 16;
2354 		MV_ACC_CRYPTO_ENC_SET(mv_p->pkt_header.desc.acc_config,
2355 		    MV_ACC_CRYPTO_ENC_AES);
2356 		MV_ACC_CRYPTO_AES_KLEN_SET(
2357 		    mv_p->pkt_header.desc.acc_config,
2358 		   mvxpsec_aesklen(mv_p->mv_s->enc_klen));
2359 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_CBC;
2360 		mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_ENC);
2361 		break;
2362 	default:
2363 		log(LOG_ERR, "%s: Unknown algorithm %d\n",
2364 		    __func__, crd->crd_alg);
2365 		return EINVAL;
2366 	}
2367 
2368 	/* Operations only for Cipher, not MAC */
2369 	if (crd->crd_flags & CRD_F_ENCRYPT) {
2370 		/* Ciphers: Originate IV for Encryption.*/
2371 		mv_p->pkt_header.desc.acc_config &= ~MV_ACC_CRYPTO_DECRYPT;
2372 		mv_p->flags |= DIR_ENCRYPT;
2373 
2374 		if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
2375 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV, "EXPLICIT IV\n");
2376 			mv_p->flags |= CRP_EXT_IV;
2377 			mvxpsec_packet_write_iv(mv_p, crd->crd_iv, ivlen);
2378 			mv_p->enc_ivoff = MVXPSEC_SRAM_IV_EXT_OFF;
2379 		}
2380 		else if (crd->crd_flags & CRD_F_IV_PRESENT) {
2381 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV, "IV is present\n");
2382 			mvxpsec_packet_copy_iv(mv_p, crd->crd_inject, ivlen);
2383 		}
2384 		else {
2385 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV, "Create New IV\n");
2386 			mvxpsec_packet_write_iv(mv_p, NULL, ivlen);
2387 		}
2388 	}
2389 	else {
2390 		/* Ciphers: IV is loadded from crd_inject when it's present */
2391 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_DECRYPT;
2392 		mv_p->flags |= DIR_DECRYPT;
2393 
2394 		if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
2395 #ifdef MVXPSEC_DEBUG
2396 			if (mvxpsec_debug & MVXPSEC_DEBUG_ENC_IV) {
2397 				MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV,
2398 				    "EXPLICIT IV(Decrypt)\n");
2399 				mvxpsec_dump_data(__func__, crd->crd_iv, ivlen);
2400 			}
2401 #endif
2402 			mv_p->flags |= CRP_EXT_IV;
2403 			mvxpsec_packet_write_iv(mv_p, crd->crd_iv, ivlen);
2404 			mv_p->enc_ivoff = MVXPSEC_SRAM_IV_EXT_OFF;
2405 		}
2406 	}
2407 
2408 	KASSERT(!((mv_p->flags & DIR_ENCRYPT) && (mv_p->flags & DIR_DECRYPT)));
2409 
2410 	return 0;
2411 }
2412 
2413 INLINE int
2414 mvxpsec_parse_crp(struct mvxpsec_packet *mv_p)
2415 {
2416 	struct cryptop *crp = mv_p->crp;
2417 	struct cryptodesc *crd;
2418 	int err;
2419 
2420 	KASSERT(crp);
2421 
2422 	mvxpsec_packet_reset_op(mv_p);
2423 
2424 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2425 		err = mvxpsec_parse_crd(mv_p, crd);
2426 		if (err)
2427 			return err;
2428 	}
2429 
2430 	return 0;
2431 }
2432 
2433 INLINE int
2434 mvxpsec_packet_setcrp(struct mvxpsec_packet *mv_p, struct cryptop *crp)
2435 {
2436 	int err = EINVAL;
2437 
2438 	/* regiseter crp to the MVXPSEC packet */
2439 	if (crp->crp_flags & CRYPTO_F_IMBUF) {
2440 		err = mvxpsec_packet_setmbuf(mv_p,
2441 		    (struct mbuf *)crp->crp_buf);
2442 		mv_p->crp = crp;
2443 	}
2444 	else if (crp->crp_flags & CRYPTO_F_IOV) {
2445 		err = mvxpsec_packet_setuio(mv_p,
2446 		    (struct uio *)crp->crp_buf);
2447 		mv_p->crp = crp;
2448 	}
2449 	else {
2450 		err = mvxpsec_packet_setdata(mv_p,
2451 		    (struct mbuf *)crp->crp_buf, crp->crp_ilen);
2452 		mv_p->crp = crp;
2453 	}
2454 	if (__predict_false(err))
2455 		return err;
2456 
2457 	/* parse crp and setup MVXPSEC registers/descriptors */
2458 	err = mvxpsec_parse_crp(mv_p);
2459 	if (__predict_false(err))
2460 		return err;
2461 
2462 	/* fixup data offset to fit MVXPSEC internal SRAM */
2463 	err = mvxpsec_header_finalize(mv_p);
2464 	if (__predict_false(err))
2465 		return err;
2466 
2467 	return 0;
2468 }
2469 
2470 /*
2471  * load data for encrypt/decrypt/authentication
2472  *
2473  * data is raw kernel memory area.
2474  */
2475 STATIC int
2476 mvxpsec_packet_setdata(struct mvxpsec_packet *mv_p,
2477     void *data, uint32_t data_len)
2478 {
2479 	struct mvxpsec_session *mv_s = mv_p->mv_s;
2480 	struct mvxpsec_softc *sc = mv_s->sc;
2481 
2482 	if (bus_dmamap_load(sc->sc_dmat, mv_p->data_map, data, data_len,
2483 	    NULL, BUS_DMA_NOWAIT)) {
2484 		log(LOG_ERR, "%s: cannot load data\n", __func__);
2485 		return -1;
2486 	}
2487 	mv_p->data_type = MVXPSEC_DATA_RAW;
2488 	mv_p->data_raw = data;
2489 	mv_p->data_len = data_len;
2490 	mv_p->flags |= RDY_DATA;
2491 
2492 	return 0;
2493 }
2494 
2495 /*
2496  * load data for encrypt/decrypt/authentication
2497  *
2498  * data is mbuf based network data.
2499  */
2500 STATIC int
2501 mvxpsec_packet_setmbuf(struct mvxpsec_packet *mv_p, struct mbuf *m)
2502 {
2503 	struct mvxpsec_session *mv_s = mv_p->mv_s;
2504 	struct mvxpsec_softc *sc = mv_s->sc;
2505 	size_t pktlen = 0;
2506 
2507 	if (__predict_true(m->m_flags & M_PKTHDR))
2508 		pktlen = m->m_pkthdr.len;
2509 	else {
2510 		struct mbuf *mp = m;
2511 
2512 		while (mp != NULL) {
2513 			pktlen += m->m_len;
2514 			mp = mp->m_next;
2515 		}
2516 	}
2517 	if (pktlen > SRAM_PAYLOAD_SIZE) {
2518 		extern   percpu_t *espstat_percpu;
2519 	       	/* XXX:
2520 		 * layer violation. opencrypto knows our max packet size
2521 		 * from crypto_register(9) API.
2522 		 */
2523 
2524 		_NET_STATINC(espstat_percpu, ESP_STAT_TOOBIG);
2525 		log(LOG_ERR,
2526 		    "%s: ESP Packet too large: %zu [oct.] > %zu [oct.]\n",
2527 		    device_xname(sc->sc_dev),
2528 		    (size_t)pktlen, SRAM_PAYLOAD_SIZE);
2529 		mv_p->data_type = MVXPSEC_DATA_NONE;
2530 		mv_p->data_mbuf = NULL;
2531 		return -1;
2532 	}
2533 
2534 	if (bus_dmamap_load_mbuf(sc->sc_dmat, mv_p->data_map, m,
2535 	    BUS_DMA_NOWAIT)) {
2536 		mv_p->data_type = MVXPSEC_DATA_NONE;
2537 		mv_p->data_mbuf = NULL;
2538 		log(LOG_ERR, "%s: cannot load mbuf\n", __func__);
2539 		return -1;
2540 	}
2541 
2542 	/* set payload buffer */
2543 	mv_p->data_type = MVXPSEC_DATA_MBUF;
2544 	mv_p->data_mbuf = m;
2545 	if (m->m_flags & M_PKTHDR) {
2546 		mv_p->data_len = m->m_pkthdr.len;
2547 	}
2548 	else {
2549 		mv_p->data_len = 0;
2550 		while (m) {
2551 			mv_p->data_len += m->m_len;
2552 			m = m->m_next;
2553 		}
2554 	}
2555 	mv_p->flags |= RDY_DATA;
2556 
2557 	return 0;
2558 }
2559 
2560 STATIC int
2561 mvxpsec_packet_setuio(struct mvxpsec_packet *mv_p, struct uio *uio)
2562 {
2563 	struct mvxpsec_session *mv_s = mv_p->mv_s;
2564 	struct mvxpsec_softc *sc = mv_s->sc;
2565 
2566 	if (uio->uio_resid > SRAM_PAYLOAD_SIZE) {
2567 		extern   percpu_t *espstat_percpu;
2568 	       	/* XXX:
2569 		 * layer violation. opencrypto knows our max packet size
2570 		 * from crypto_register(9) API.
2571 		 */
2572 
2573 		_NET_STATINC(espstat_percpu, ESP_STAT_TOOBIG);
2574 		log(LOG_ERR,
2575 		    "%s: uio request too large: %zu [oct.] > %zu [oct.]\n",
2576 		    device_xname(sc->sc_dev),
2577 		    uio->uio_resid, SRAM_PAYLOAD_SIZE);
2578 		mv_p->data_type = MVXPSEC_DATA_NONE;
2579 		mv_p->data_mbuf = NULL;
2580 		return -1;
2581 	}
2582 
2583 	if (bus_dmamap_load_uio(sc->sc_dmat, mv_p->data_map, uio,
2584 	    BUS_DMA_NOWAIT)) {
2585 		mv_p->data_type = MVXPSEC_DATA_NONE;
2586 		mv_p->data_mbuf = NULL;
2587 		log(LOG_ERR, "%s: cannot load uio buf\n", __func__);
2588 		return -1;
2589 	}
2590 
2591 	/* set payload buffer */
2592 	mv_p->data_type = MVXPSEC_DATA_UIO;
2593 	mv_p->data_uio = uio;
2594 	mv_p->data_len = uio->uio_resid;
2595 	mv_p->flags |= RDY_DATA;
2596 
2597 	return 0;
2598 }
2599 
2600 STATIC int
2601 mvxpsec_packet_rdata(struct mvxpsec_packet *mv_p,
2602     int off, int len, void *cp)
2603 {
2604 	uint8_t *p;
2605 
2606 	if (mv_p->data_type == MVXPSEC_DATA_RAW) {
2607 		p = (uint8_t *)mv_p->data_raw + off;
2608 		memcpy(cp, p, len);
2609 	}
2610 	else if (mv_p->data_type == MVXPSEC_DATA_MBUF) {
2611 		m_copydata(mv_p->data_mbuf, off, len, cp);
2612 	}
2613 	else if (mv_p->data_type == MVXPSEC_DATA_UIO) {
2614 		cuio_copydata(mv_p->data_uio, off, len, cp);
2615 	}
2616 	else
2617 		return -1;
2618 
2619 	return 0;
2620 }
2621 
2622 STATIC int
2623 mvxpsec_packet_wdata(struct mvxpsec_packet *mv_p,
2624     int off, int len, void *cp)
2625 {
2626 	uint8_t *p;
2627 
2628 	if (mv_p->data_type == MVXPSEC_DATA_RAW) {
2629 		p = (uint8_t *)mv_p->data_raw + off;
2630 		memcpy(p, cp, len);
2631 	}
2632 	else if (mv_p->data_type == MVXPSEC_DATA_MBUF) {
2633 		m_copyback(mv_p->data_mbuf, off, len, cp);
2634 	}
2635 	else if (mv_p->data_type == MVXPSEC_DATA_UIO) {
2636 		cuio_copyback(mv_p->data_uio, off, len, cp);
2637 	}
2638 	else
2639 		return -1;
2640 
2641 	return 0;
2642 }
2643 
2644 /*
2645  * Set initial vector of cipher to the session.
2646  */
2647 STATIC int
2648 mvxpsec_packet_write_iv(struct mvxpsec_packet *mv_p, void *iv, int ivlen)
2649 {
2650 	uint8_t ivbuf[16];
2651 
2652 	KASSERT(ivlen == 8 || ivlen == 16);
2653 
2654 	if (iv == NULL) {
2655 	       	if (mv_p->mv_s->sflags & RDY_CRP_IV) {
2656 			/* use per session IV (compatible with KAME IPsec) */
2657 			mv_p->pkt_header.crp_iv_work = mv_p->mv_s->session_iv;
2658 			mv_p->flags |= RDY_CRP_IV;
2659 			return 0;
2660 		}
2661 		cprng_fast(ivbuf, ivlen);
2662 		iv = ivbuf;
2663 	}
2664 	memcpy(&mv_p->pkt_header.crp_iv_work, iv, ivlen);
2665 	if (mv_p->flags & CRP_EXT_IV) {
2666 		memcpy(&mv_p->pkt_header.crp_iv_ext, iv, ivlen);
2667 		mv_p->ext_iv = iv;
2668 		mv_p->ext_ivlen = ivlen;
2669 	}
2670 	mv_p->flags |= RDY_CRP_IV;
2671 
2672 	return 0;
2673 }
2674 
2675 STATIC int
2676 mvxpsec_packet_copy_iv(struct mvxpsec_packet *mv_p, int off, int ivlen)
2677 {
2678 	mvxpsec_packet_rdata(mv_p, off, ivlen,
2679 	    &mv_p->pkt_header.crp_iv_work);
2680 	mv_p->flags |= RDY_CRP_IV;
2681 
2682 	return 0;
2683 }
2684 
2685 /*
2686  * set a encryption or decryption key to the session
2687  *
2688  * Input key material is big endian.
2689  */
2690 STATIC int
2691 mvxpsec_key_precomp(int alg, void *keymat, int kbitlen,
2692     void *key_encrypt, void *key_decrypt)
2693 {
2694 	uint32_t *kp = keymat;
2695 	uint32_t *ekp = key_encrypt;
2696 	uint32_t *dkp = key_decrypt;
2697 	int i;
2698 
2699 	switch (alg) {
2700 	case CRYPTO_DES_CBC:
2701 		if (kbitlen < 64 || (kbitlen % 8) != 0) {
2702 			log(LOG_WARNING,
2703 			    "mvxpsec: invalid DES keylen %d\n", kbitlen);
2704 			return EINVAL;
2705 		}
2706 		for (i = 0; i < 2; i++)
2707 			dkp[i] = ekp[i] = kp[i];
2708 		for (; i < 8; i++)
2709 			dkp[i] = ekp[i] = 0;
2710 		break;
2711 	case CRYPTO_3DES_CBC:
2712 		if (kbitlen < 192 || (kbitlen % 8) != 0) {
2713 			log(LOG_WARNING,
2714 			    "mvxpsec: invalid 3DES keylen %d\n", kbitlen);
2715 			return EINVAL;
2716 		}
2717 		for (i = 0; i < 8; i++)
2718 			dkp[i] = ekp[i] = kp[i];
2719 		break;
2720 	case CRYPTO_AES_CBC:
2721 		if (kbitlen < 128) {
2722 			log(LOG_WARNING,
2723 			    "mvxpsec: invalid AES keylen %d\n", kbitlen);
2724 			return EINVAL;
2725 		}
2726 		else if (kbitlen < 192) {
2727 			/* AES-128 */
2728 			for (i = 0; i < 4; i++)
2729 				ekp[i] = kp[i];
2730 			for (; i < 8; i++)
2731 				ekp[i] = 0;
2732 		}
2733 	       	else if (kbitlen < 256) {
2734 			/* AES-192 */
2735 			for (i = 0; i < 6; i++)
2736 				ekp[i] = kp[i];
2737 			for (; i < 8; i++)
2738 				ekp[i] = 0;
2739 		}
2740 		else  {
2741 			/* AES-256 */
2742 			for (i = 0; i < 8; i++)
2743 				ekp[i] = kp[i];
2744 		}
2745 		/* make decryption key */
2746 		mv_aes_deckey((uint8_t *)dkp, (uint8_t *)ekp, kbitlen);
2747 		break;
2748 	default:
2749 		for (i = 0; i < 8; i++)
2750 			ekp[0] = dkp[0] = 0;
2751 		break;
2752 	}
2753 
2754 #ifdef MVXPSEC_DEBUG
2755 	if (mvxpsec_debug & MVXPSEC_DEBUG_OPENCRYPTO) {
2756 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2757 		    "%s: keyregistered\n", __func__);
2758 		mvxpsec_dump_data(__func__, ekp, 32);
2759 	}
2760 #endif
2761 
2762 	return 0;
2763 }
2764 
2765 /*
2766  * set MAC key to the session
2767  *
2768  * MAC engine has no register for key itself, but the engine has
2769  * inner and outer IV register. software must compute IV before
2770  * enable the engine.
2771  *
2772  * IV is a hash of ipad/opad. these are defined by FIPS-198a
2773  * standard.
2774  */
2775 STATIC int
2776 mvxpsec_hmac_precomp(int alg, void *key, int kbitlen,
2777     void *iv_inner, void *iv_outer)
2778 {
2779 	SHA1_CTX sha1;
2780 	MD5_CTX md5;
2781 	uint8_t *key8 = key;
2782 	uint8_t kbuf[64];
2783 	uint8_t ipad[64];
2784 	uint8_t opad[64];
2785 	uint32_t *iv_in = iv_inner;
2786 	uint32_t *iv_out = iv_outer;
2787 	int kbytelen;
2788 	int i;
2789 #define HMAC_IPAD 0x36
2790 #define HMAC_OPAD 0x5c
2791 
2792 	kbytelen = kbitlen / 8;
2793 	KASSERT(kbitlen == kbytelen * 8);
2794 	if (kbytelen > 64) {
2795 		SHA1Init(&sha1);
2796 		SHA1Update(&sha1, key, kbytelen);
2797 		SHA1Final(kbuf, &sha1);
2798 		key8 = kbuf;
2799 		kbytelen = 64;
2800 	}
2801 
2802 	/* make initial 64 oct. string */
2803 	switch (alg) {
2804 	case CRYPTO_SHA1_HMAC_96:
2805 	case CRYPTO_SHA1_HMAC:
2806 	case CRYPTO_MD5_HMAC_96:
2807 	case CRYPTO_MD5_HMAC:
2808 		for (i = 0; i < kbytelen; i++) {
2809 			ipad[i] = (key8[i] ^ HMAC_IPAD);
2810 			opad[i] = (key8[i] ^ HMAC_OPAD);
2811 		}
2812 		for (; i < 64; i++) {
2813 			ipad[i] = HMAC_IPAD;
2814 			opad[i] = HMAC_OPAD;
2815 		}
2816 		break;
2817 	default:
2818 		break;
2819 	}
2820 #ifdef MVXPSEC_DEBUG
2821 	if (mvxpsec_debug & MVXPSEC_DEBUG_OPENCRYPTO) {
2822 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2823 		    "%s: HMAC-KEY Pre-comp:\n", __func__);
2824 		mvxpsec_dump_data(__func__, key, 64);
2825 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2826 		    "%s: ipad:\n", __func__);
2827 		mvxpsec_dump_data(__func__, ipad, sizeof(ipad));
2828 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2829 		    "%s: opad:\n", __func__);
2830 		mvxpsec_dump_data(__func__, opad, sizeof(opad));
2831 	}
2832 #endif
2833 
2834 	/* make iv from string */
2835 	switch (alg) {
2836 	case CRYPTO_SHA1_HMAC_96:
2837 	case CRYPTO_SHA1_HMAC:
2838 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2839 		    "%s: Generate iv_in(SHA1)\n", __func__);
2840 		SHA1Init(&sha1);
2841 		SHA1Update(&sha1, ipad, 64);
2842 		/* XXX: private state... (LE) */
2843 		iv_in[0] = htobe32(sha1.state[0]);
2844 		iv_in[1] = htobe32(sha1.state[1]);
2845 		iv_in[2] = htobe32(sha1.state[2]);
2846 		iv_in[3] = htobe32(sha1.state[3]);
2847 		iv_in[4] = htobe32(sha1.state[4]);
2848 
2849 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2850 		    "%s: Generate iv_out(SHA1)\n", __func__);
2851 		SHA1Init(&sha1);
2852 		SHA1Update(&sha1, opad, 64);
2853 		/* XXX: private state... (LE) */
2854 		iv_out[0] = htobe32(sha1.state[0]);
2855 		iv_out[1] = htobe32(sha1.state[1]);
2856 		iv_out[2] = htobe32(sha1.state[2]);
2857 		iv_out[3] = htobe32(sha1.state[3]);
2858 		iv_out[4] = htobe32(sha1.state[4]);
2859 		break;
2860 	case CRYPTO_MD5_HMAC_96:
2861 	case CRYPTO_MD5_HMAC:
2862 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2863 		    "%s: Generate iv_in(MD5)\n", __func__);
2864 		MD5Init(&md5);
2865 		MD5Update(&md5, ipad, sizeof(ipad));
2866 		/* XXX: private state... (LE) */
2867 		iv_in[0] = htobe32(md5.state[0]);
2868 		iv_in[1] = htobe32(md5.state[1]);
2869 		iv_in[2] = htobe32(md5.state[2]);
2870 		iv_in[3] = htobe32(md5.state[3]);
2871 		iv_in[4] = 0;
2872 
2873 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2874 		    "%s: Generate iv_out(MD5)\n", __func__);
2875 		MD5Init(&md5);
2876 		MD5Update(&md5, opad, sizeof(opad));
2877 		/* XXX: private state... (LE) */
2878 		iv_out[0] = htobe32(md5.state[0]);
2879 		iv_out[1] = htobe32(md5.state[1]);
2880 		iv_out[2] = htobe32(md5.state[2]);
2881 		iv_out[3] = htobe32(md5.state[3]);
2882 		iv_out[4] = 0;
2883 		break;
2884 	default:
2885 		break;
2886 	}
2887 
2888 #ifdef MVXPSEC_DEBUG
2889 	if (mvxpsec_debug & MVXPSEC_DEBUG_HASH_IV) {
2890 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_HASH_IV,
2891 		    "%s: HMAC IV-IN\n", __func__);
2892 		mvxpsec_dump_data(__func__, (uint8_t *)iv_in, 20);
2893 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_HASH_IV,
2894 		    "%s: HMAC IV-OUT\n", __func__);
2895 		mvxpsec_dump_data(__func__, (uint8_t *)iv_out, 20);
2896 	}
2897 #endif
2898 
2899 	return 0;
2900 #undef HMAC_IPAD
2901 #undef HMAC_OPAD
2902 }
2903 
2904 /*
2905  * AES Support routine
2906  */
2907 static uint8_t AES_SBOX[256] = {
2908 	 99, 124, 119, 123, 242, 107, 111, 197,  48,   1, 103,  43, 254, 215,
2909        	171, 118, 202, 130, 201, 125, 250,  89,  71, 240, 173, 212, 162, 175,
2910        	156, 164, 114, 192, 183, 253, 147,  38,  54,  63, 247, 204,  52, 165,
2911        	229, 241, 113, 216,  49,  21,   4, 199,  35, 195,  24, 150,   5, 154,
2912        	  7,  18, 128, 226, 235,  39, 178, 117,   9, 131,  44,  26,  27, 110,
2913 	 90, 160,  82,  59, 214, 179,  41, 227,  47, 132,  83, 209,   0, 237,
2914        	 32, 252, 177,  91, 106, 203, 190,  57,  74,  76,  88, 207, 208, 239,
2915 	170, 251,  67,  77,  51, 133,  69, 249,   2, 127,  80,  60, 159, 168,
2916 	 81, 163,  64, 143, 146, 157,  56, 245, 188, 182, 218,  33,  16, 255,
2917 	243, 210, 205,  12,  19, 236,  95, 151,  68,  23, 196, 167, 126,  61,
2918        	100,  93,  25, 115,  96, 129,  79, 220,  34,  42, 144, 136,  70, 238,
2919        	184,  20, 222,  94,  11, 219, 224,  50,  58,  10,  73,   6,  36,  92,
2920        	194, 211, 172,  98, 145, 149, 228, 121, 231, 200,  55, 109, 141, 213,
2921       	 78, 169, 108,  86, 244, 234, 101, 122, 174,   8, 186, 120,  37,  46,
2922        	 28, 166, 180, 198, 232, 221, 116,  31,  75, 189, 139, 138, 112,  62,
2923 	181, 102,  72,   3, 246,  14,  97,  53,  87, 185, 134, 193,  29, 158,
2924        	225, 248, 152,  17, 105, 217, 142, 148, 155,  30, 135, 233, 206,  85,
2925       	 40, 223, 140, 161, 137,  13, 191, 230,  66, 104,  65, 153,  45,  15,
2926 	176,  84, 187,  22
2927 };
2928 
2929 static uint32_t AES_RCON[30] = {
2930 	0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8,
2931        	0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4,
2932        	0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91
2933 };
2934 
2935 STATIC int
2936 mv_aes_ksched(uint8_t k[4][MAXKC], int keyBits,
2937     uint8_t W[MAXROUNDS+1][4][MAXBC])
2938 {
2939 	int KC, BC, ROUNDS;
2940 	int i, j, t, rconpointer = 0;
2941 	uint8_t tk[4][MAXKC];
2942 
2943 	switch (keyBits) {
2944 	case 128:
2945 		ROUNDS = 10;
2946 		KC = 4;
2947 		break;
2948 	case 192:
2949 		ROUNDS = 12;
2950 		KC = 6;
2951 	       	break;
2952 	case 256:
2953 		ROUNDS = 14;
2954 	       	KC = 8;
2955 	       	break;
2956 	default:
2957 	       	return (-1);
2958 	}
2959 	BC = 4; /* 128 bits */
2960 
2961 	for(j = 0; j < KC; j++)
2962 		for(i = 0; i < 4; i++)
2963 			tk[i][j] = k[i][j];
2964 	t = 0;
2965 
2966 	/* copy values into round key array */
2967 	for(j = 0; (j < KC) && (t < (ROUNDS+1)*BC); j++, t++)
2968 		for(i = 0; i < 4; i++) W[t / BC][i][t % BC] = tk[i][j];
2969 
2970 	while (t < (ROUNDS+1)*BC) { /* while not enough round key material calculated */
2971 		/* calculate new values */
2972 		for(i = 0; i < 4; i++)
2973 			tk[i][0] ^= AES_SBOX[tk[(i+1)%4][KC-1]];
2974 		tk[0][0] ^= AES_RCON[rconpointer++];
2975 
2976 		if (KC != 8)
2977 			for(j = 1; j < KC; j++)
2978 				for(i = 0; i < 4; i++)
2979 				       	tk[i][j] ^= tk[i][j-1];
2980 		else {
2981 			for(j = 1; j < KC/2; j++)
2982 				for(i = 0; i < 4; i++)
2983 				       	tk[i][j] ^= tk[i][j-1];
2984 			for(i = 0; i < 4; i++)
2985 			       	tk[i][KC/2] ^= AES_SBOX[tk[i][KC/2 - 1]];
2986 			for(j = KC/2 + 1; j < KC; j++)
2987 				for(i = 0; i < 4; i++)
2988 				       	tk[i][j] ^= tk[i][j-1];
2989 	}
2990 	/* copy values into round key array */
2991 	for(j = 0; (j < KC) && (t < (ROUNDS+1)*BC); j++, t++)
2992 		for(i = 0; i < 4; i++) W[t / BC][i][t % BC] = tk[i][j];
2993 	}
2994 
2995 	return 0;
2996 }
2997 
2998 STATIC int
2999 mv_aes_deckey(uint8_t *expandedKey, uint8_t *keyMaterial, int keyLen)
3000 {
3001 	uint8_t   W[MAXROUNDS+1][4][MAXBC];
3002 	uint8_t   k[4][MAXKC];
3003 	uint8_t   j;
3004 	int     i, rounds, KC;
3005 
3006 	if (expandedKey == NULL)
3007 		return -1;
3008 
3009 	if (!((keyLen == 128) || (keyLen == 192) || (keyLen == 256)))
3010 		return -1;
3011 
3012 	if (keyMaterial == NULL)
3013 		return -1;
3014 
3015 	/* initialize key schedule: */
3016 	for (i=0; i<keyLen/8; i++) {
3017 		j = keyMaterial[i];
3018 		k[i % 4][i / 4] = j;
3019 	}
3020 
3021 	mv_aes_ksched(k, keyLen, W);
3022 	switch (keyLen) {
3023 	case 128:
3024 		rounds = 10;
3025 		KC = 4;
3026 		break;
3027 	case 192:
3028 		rounds = 12;
3029 		KC = 6;
3030 		break;
3031 	case 256:
3032 		rounds = 14;
3033 		KC = 8;
3034 		break;
3035 	default:
3036 		return -1;
3037 	}
3038 
3039 	for(i=0; i<MAXBC; i++)
3040 		for(j=0; j<4; j++)
3041 			expandedKey[i*4+j] = W[rounds][j][i];
3042 	for(; i<KC; i++)
3043 		for(j=0; j<4; j++)
3044 			expandedKey[i*4+j] = W[rounds-1][j][i+MAXBC-KC];
3045 
3046 	return 0;
3047 }
3048 
3049 /*
3050  * Clear cipher/mac operation state
3051  */
3052 INLINE void
3053 mvxpsec_packet_reset_op(struct mvxpsec_packet *mv_p)
3054 {
3055 	mv_p->pkt_header.desc.acc_config = 0;
3056 	mv_p->enc_off = mv_p->enc_ivoff = mv_p->enc_len = 0;
3057 	mv_p->mac_off = mv_p->mac_dst = mv_p->mac_len = 0;
3058 }
3059 
3060 /*
3061  * update MVXPSEC operation order
3062  */
3063 INLINE void
3064 mvxpsec_packet_update_op_order(struct mvxpsec_packet *mv_p, int op)
3065 {
3066 	struct mvxpsec_acc_descriptor *acc_desc = &mv_p->pkt_header.desc;
3067 	uint32_t cur_op = acc_desc->acc_config & MV_ACC_CRYPTO_OP_MASK;
3068 
3069 	KASSERT(op == MV_ACC_CRYPTO_OP_MAC || op == MV_ACC_CRYPTO_OP_ENC);
3070 	KASSERT((op & MV_ACC_CRYPTO_OP_MASK) == op);
3071 
3072 	if (cur_op == 0)
3073 		acc_desc->acc_config |= op;
3074 	else if (cur_op == MV_ACC_CRYPTO_OP_MAC && op == MV_ACC_CRYPTO_OP_ENC) {
3075 		acc_desc->acc_config &= ~MV_ACC_CRYPTO_OP_MASK;
3076 		acc_desc->acc_config |= MV_ACC_CRYPTO_OP_MACENC;
3077 		/* MAC then ENC (= decryption) */
3078 	}
3079 	else if (cur_op == MV_ACC_CRYPTO_OP_ENC && op == MV_ACC_CRYPTO_OP_MAC) {
3080 		acc_desc->acc_config &= ~MV_ACC_CRYPTO_OP_MASK;
3081 		acc_desc->acc_config |= MV_ACC_CRYPTO_OP_ENCMAC;
3082 		/* ENC then MAC (= encryption) */
3083 	}
3084 	else {
3085 		log(LOG_ERR, "%s: multiple %s algorithm is not supported.\n",
3086 		    __func__,
3087 		    (op == MV_ACC_CRYPTO_OP_ENC) ?  "encryption" : "authentication");
3088 	}
3089 }
3090 
3091 /*
3092  * Parameter Conversions
3093  */
3094 INLINE uint32_t
3095 mvxpsec_alg2acc(uint32_t alg)
3096 {
3097 	uint32_t reg;
3098 
3099 	switch (alg) {
3100 	case CRYPTO_DES_CBC:
3101 		reg = MV_ACC_CRYPTO_ENC_DES;
3102 		reg |= MV_ACC_CRYPTO_CBC;
3103 		break;
3104 	case CRYPTO_3DES_CBC:
3105 		reg = MV_ACC_CRYPTO_ENC_3DES;
3106 		reg |= MV_ACC_CRYPTO_3DES_EDE;
3107 		reg |= MV_ACC_CRYPTO_CBC;
3108 		break;
3109 	case CRYPTO_AES_CBC:
3110 		reg = MV_ACC_CRYPTO_ENC_AES;
3111 		reg |= MV_ACC_CRYPTO_CBC;
3112 		break;
3113 	case CRYPTO_SHA1_HMAC_96:
3114 		reg = MV_ACC_CRYPTO_MAC_HMAC_SHA1;
3115 		reg |= MV_ACC_CRYPTO_MAC_96;
3116 		break;
3117 	case CRYPTO_MD5_HMAC_96:
3118 		reg = MV_ACC_CRYPTO_MAC_HMAC_MD5;
3119 		reg |= MV_ACC_CRYPTO_MAC_96;
3120 		break;
3121 	default:
3122 		reg = 0;
3123 		break;
3124 	}
3125 
3126 	return reg;
3127 }
3128 
3129 INLINE uint32_t
3130 mvxpsec_aesklen(int klen)
3131 {
3132 	if (klen < 128)
3133 		return 0;
3134 	else if (klen < 192)
3135 		return MV_ACC_CRYPTO_AES_KLEN_128;
3136 	else if (klen < 256)
3137 		return MV_ACC_CRYPTO_AES_KLEN_192;
3138 	else
3139 		return MV_ACC_CRYPTO_AES_KLEN_256;
3140 
3141 	return 0;
3142 }
3143 
3144 /*
3145  * String Conversions
3146  */
3147 STATIC const char *
3148 s_errreg(uint32_t v)
3149 {
3150 	static char buf[80];
3151 
3152 	snprintf(buf, sizeof(buf),
3153 	    "%sMiss %sDoubleHit %sBothHit %sDataError",
3154 	    (v & MV_TDMA_ERRC_MISS) ? "+" : "-",
3155 	    (v & MV_TDMA_ERRC_DHIT) ? "+" : "-",
3156 	    (v & MV_TDMA_ERRC_BHIT) ? "+" : "-",
3157 	    (v & MV_TDMA_ERRC_DERR) ? "+" : "-");
3158 
3159 	return (const char *)buf;
3160 }
3161 
3162 STATIC const char *
3163 s_winreg(uint32_t v)
3164 {
3165 	static char buf[80];
3166 
3167 	snprintf(buf, sizeof(buf),
3168 	    "%s TGT 0x%x ATTR 0x%02x size %u(0x%04x)[64KB]",
3169 	    (v & MV_TDMA_ATTR_ENABLE) ? "EN" : "DIS",
3170 	    MV_TDMA_ATTR_GET_TARGET(v), MV_TDMA_ATTR_GET_ATTR(v),
3171 	    MV_TDMA_ATTR_GET_SIZE(v), MV_TDMA_ATTR_GET_SIZE(v));
3172 
3173 	return (const char *)buf;
3174 }
3175 
3176 STATIC const char *
3177 s_ctrlreg(uint32_t reg)
3178 {
3179 	static char buf[80];
3180 
3181 	snprintf(buf, sizeof(buf),
3182 	    "%s: %sFETCH DBURST-%u SBURST-%u %sOUTS %sCHAIN %sBSWAP %sACT",
3183 	    (reg & MV_TDMA_CONTROL_ENABLE) ? "ENABLE" : "DISABLE",
3184 	    (reg & MV_TDMA_CONTROL_FETCH) ? "+" : "-",
3185 	    MV_TDMA_CONTROL_GET_DST_BURST(reg),
3186 	    MV_TDMA_CONTROL_GET_SRC_BURST(reg),
3187 	    (reg & MV_TDMA_CONTROL_OUTS_EN) ? "+" : "-",
3188 	    (reg & MV_TDMA_CONTROL_CHAIN_DIS) ? "-" : "+",
3189 	    (reg & MV_TDMA_CONTROL_BSWAP_DIS) ? "-" : "+",
3190 	    (reg & MV_TDMA_CONTROL_ACT) ? "+" : "-");
3191 
3192 	return (const char *)buf;
3193 }
3194 
3195 _STATIC const char *
3196 s_xpsecintr(uint32_t v)
3197 {
3198 	static char buf[160];
3199 
3200 	snprintf(buf, sizeof(buf),
3201 	    "%sAuth %sDES %sAES-ENC %sAES-DEC %sENC %sSA %sAccAndTDMA "
3202 	    "%sTDMAComp %sTDMAOwn %sAccAndTDMA_Cont",
3203 	    (v & MVXPSEC_INT_AUTH) ? "+" : "-",
3204 	    (v & MVXPSEC_INT_DES) ? "+" : "-",
3205 	    (v & MVXPSEC_INT_AES_ENC) ? "+" : "-",
3206 	    (v & MVXPSEC_INT_AES_DEC) ? "+" : "-",
3207 	    (v & MVXPSEC_INT_ENC) ? "+" : "-",
3208 	    (v & MVXPSEC_INT_SA) ? "+" : "-",
3209 	    (v & MVXPSEC_INT_ACCTDMA) ? "+" : "-",
3210 	    (v & MVXPSEC_INT_TDMA_COMP) ? "+" : "-",
3211 	    (v & MVXPSEC_INT_TDMA_OWN) ? "+" : "-",
3212 	    (v & MVXPSEC_INT_ACCTDMA_CONT) ? "+" : "-");
3213 
3214 	return (const char *)buf;
3215 }
3216 
3217 STATIC const char *
3218 s_ctlalg(uint32_t alg)
3219 {
3220 	switch (alg) {
3221 	case CRYPTO_SHA1_HMAC_96:
3222 		return "HMAC-SHA1-96";
3223 	case CRYPTO_SHA1_HMAC:
3224 		return "HMAC-SHA1";
3225 	case CRYPTO_SHA1:
3226 		return "SHA1";
3227 	case CRYPTO_MD5_HMAC_96:
3228 		return "HMAC-MD5-96";
3229 	case CRYPTO_MD5_HMAC:
3230 		return "HMAC-MD5";
3231 	case CRYPTO_MD5:
3232 		return "MD5";
3233 	case CRYPTO_DES_CBC:
3234 		return "DES-CBC";
3235 	case CRYPTO_3DES_CBC:
3236 		return "3DES-CBC";
3237 	case CRYPTO_AES_CBC:
3238 		return "AES-CBC";
3239 	default:
3240 		break;
3241 	}
3242 
3243 	return "Unknown";
3244 }
3245 
3246 STATIC const char *
3247 s_xpsec_op(uint32_t reg)
3248 {
3249 	reg &= MV_ACC_CRYPTO_OP_MASK;
3250 	switch (reg) {
3251 	case MV_ACC_CRYPTO_OP_ENC:
3252 		return "ENC";
3253 	case MV_ACC_CRYPTO_OP_MAC:
3254 		return "MAC";
3255 	case MV_ACC_CRYPTO_OP_ENCMAC:
3256 		return "ENC-MAC";
3257 	case MV_ACC_CRYPTO_OP_MACENC:
3258 		return "MAC-ENC";
3259 	default:
3260 		break;
3261 	}
3262 
3263 	return "Unknown";
3264 
3265 }
3266 
3267 STATIC const char *
3268 s_xpsec_enc(uint32_t alg)
3269 {
3270 	alg <<= MV_ACC_CRYPTO_ENC_SHIFT;
3271 	switch (alg) {
3272 	case MV_ACC_CRYPTO_ENC_DES:
3273 		return "DES";
3274 	case MV_ACC_CRYPTO_ENC_3DES:
3275 		return "3DES";
3276 	case MV_ACC_CRYPTO_ENC_AES:
3277 		return "AES";
3278 	default:
3279 		break;
3280 	}
3281 
3282 	return "Unknown";
3283 }
3284 
3285 STATIC const char *
3286 s_xpsec_mac(uint32_t alg)
3287 {
3288 	alg <<= MV_ACC_CRYPTO_MAC_SHIFT;
3289 	switch (alg) {
3290 	case MV_ACC_CRYPTO_MAC_NONE:
3291 		return "Disabled";
3292 	case MV_ACC_CRYPTO_MAC_MD5:
3293 		return "MD5";
3294 	case MV_ACC_CRYPTO_MAC_SHA1:
3295 		return "SHA1";
3296 	case MV_ACC_CRYPTO_MAC_HMAC_MD5:
3297 		return "HMAC-MD5";
3298 	case MV_ACC_CRYPTO_MAC_HMAC_SHA1:
3299 		return "HMAC-SHA1";
3300 	default:
3301 		break;
3302 	}
3303 
3304 	return "Unknown";
3305 }
3306 
3307 STATIC const char *
3308 s_xpsec_frag(uint32_t frag)
3309 {
3310 	frag <<= MV_ACC_CRYPTO_FRAG_SHIFT;
3311 	switch (frag) {
3312 	case MV_ACC_CRYPTO_NOFRAG:
3313 		return "NoFragment";
3314 	case MV_ACC_CRYPTO_FRAG_FIRST:
3315 		return "FirstFragment";
3316 	case MV_ACC_CRYPTO_FRAG_MID:
3317 		return "MiddleFragment";
3318 	case MV_ACC_CRYPTO_FRAG_LAST:
3319 		return "LastFragment";
3320 	default:
3321 		break;
3322 	}
3323 
3324 	return "Unknown";
3325 }
3326 
3327 #ifdef MVXPSEC_DEBUG
3328 void
3329 mvxpsec_dump_reg(struct mvxpsec_softc *sc)
3330 {
3331 	uint32_t reg;
3332 	int i;
3333 
3334 	if ((mvxpsec_debug & MVXPSEC_DEBUG_DESC) == 0)
3335 		return;
3336 
3337 	printf("--- Interrupt Registers ---\n");
3338 	reg = MVXPSEC_READ(sc, MVXPSEC_INT_CAUSE);
3339 	printf("MVXPSEC INT CAUSE: 0x%08x\n", reg);
3340 	printf("MVXPSEC INT CAUSE: %s\n", s_xpsecintr(reg));
3341 	reg = MVXPSEC_READ(sc, MVXPSEC_INT_MASK);
3342 	printf("MVXPSEC INT MASK: 0x%08x\n", reg);
3343 	printf("MVXPSEC INT MASKE: %s\n", s_xpsecintr(reg));
3344 
3345 	printf("--- DMA Configuration Registers ---\n");
3346 	for (i = 0; i < MV_TDMA_NWINDOW; i++) {
3347 		reg = MVXPSEC_READ(sc, MV_TDMA_BAR(i));
3348 		printf("TDMA BAR%d: 0x%08x\n", i, reg);
3349 		reg = MVXPSEC_READ(sc, MV_TDMA_ATTR(i));
3350 		printf("TDMA ATTR%d: 0x%08x\n", i, reg);
3351 		printf("  -> %s\n", s_winreg(reg));
3352 	}
3353 
3354 	printf("--- DMA Control Registers ---\n");
3355 
3356 	reg = MVXPSEC_READ(sc, MV_TDMA_CONTROL);
3357 	printf("TDMA CONTROL: 0x%08x\n", reg);
3358 	printf("  -> %s\n", s_ctrlreg(reg));
3359 
3360 	printf("--- DMA Current Command Descriptors ---\n");
3361 
3362 	reg = MVXPSEC_READ(sc, MV_TDMA_ERR_CAUSE);
3363 	printf("TDMA ERR CAUSE: 0x%08x\n", reg);
3364 
3365 	reg = MVXPSEC_READ(sc, MV_TDMA_ERR_MASK);
3366 	printf("TDMA ERR MASK: 0x%08x\n", reg);
3367 
3368 	reg = MVXPSEC_READ(sc, MV_TDMA_CNT);
3369 	printf("TDMA DATA OWNER: %s\n",
3370 	    (reg & MV_TDMA_CNT_OWN) ? "DMAC" : "CPU");
3371 	printf("TDMA DATA COUNT: %d(0x%x)\n",
3372 	    (reg & ~MV_TDMA_CNT_OWN), (reg & ~MV_TDMA_CNT_OWN));
3373 
3374 	reg = MVXPSEC_READ(sc, MV_TDMA_SRC);
3375 	printf("TDMA DATA SRC: 0x%08x\n", reg);
3376 
3377 	reg = MVXPSEC_READ(sc, MV_TDMA_DST);
3378 	printf("TDMA DATA DST: 0x%08x\n", reg);
3379 
3380 	reg = MVXPSEC_READ(sc, MV_TDMA_NXT);
3381 	printf("TDMA DATA NXT: 0x%08x\n", reg);
3382 
3383 	reg = MVXPSEC_READ(sc, MV_TDMA_CUR);
3384 	printf("TDMA DATA CUR: 0x%08x\n", reg);
3385 
3386 	printf("--- ACC Command Register ---\n");
3387 	reg = MVXPSEC_READ(sc, MV_ACC_COMMAND);
3388 	printf("ACC COMMAND: 0x%08x\n", reg);
3389 	printf("ACC: %sACT %sSTOP\n",
3390 	    (reg & MV_ACC_COMMAND_ACT) ? "+" : "-",
3391 	    (reg & MV_ACC_COMMAND_STOP) ? "+" : "-");
3392 
3393 	reg = MVXPSEC_READ(sc, MV_ACC_CONFIG);
3394 	printf("ACC CONFIG: 0x%08x\n", reg);
3395 	reg = MVXPSEC_READ(sc, MV_ACC_DESC);
3396 	printf("ACC DESC: 0x%08x\n", reg);
3397 
3398 	printf("--- DES Key Register ---\n");
3399 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY0L);
3400 	printf("DES KEY0  Low: 0x%08x\n", reg);
3401 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY0H);
3402 	printf("DES KEY0 High: 0x%08x\n", reg);
3403 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY1L);
3404 	printf("DES KEY1  Low: 0x%08x\n", reg);
3405 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY1H);
3406 	printf("DES KEY1 High: 0x%08x\n", reg);
3407 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY2L);
3408 	printf("DES KEY2  Low: 0x%08x\n", reg);
3409 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY2H);
3410 	printf("DES KEY2 High: 0x%08x\n", reg);
3411 
3412 	printf("--- AES Key Register ---\n");
3413 	for (i = 0; i < 8; i++) {
3414 		reg = MVXPSEC_READ(sc, MV_CE_AES_EKEY(i));
3415 		printf("AES ENC KEY COL%d: %08x\n", i, reg);
3416 	}
3417 	for (i = 0; i < 8; i++) {
3418 		reg = MVXPSEC_READ(sc, MV_CE_AES_DKEY(i));
3419 		printf("AES DEC KEY COL%d: %08x\n", i, reg);
3420 	}
3421 
3422 	return;
3423 }
3424 
3425 STATIC void
3426 mvxpsec_dump_sram(const char *name, struct mvxpsec_softc *sc, size_t len)
3427 {
3428 	uint32_t reg;
3429 
3430 	if (sc->sc_sram_va == NULL)
3431 		return;
3432 
3433 	if (len == 0) {
3434 		printf("\n%s NO DATA(len=0)\n", name);
3435 		return;
3436 	}
3437 	else if (len > MV_ACC_SRAM_SIZE)
3438 		len = MV_ACC_SRAM_SIZE;
3439 
3440 	mutex_enter(&sc->sc_dma_mtx);
3441 	reg = MVXPSEC_READ(sc, MV_TDMA_CONTROL);
3442 	if (reg & MV_TDMA_CONTROL_ACT) {
3443 		printf("TDMA is active, cannot access SRAM\n");
3444 		mutex_exit(&sc->sc_dma_mtx);
3445 		return;
3446 	}
3447 	reg = MVXPSEC_READ(sc, MV_ACC_COMMAND);
3448 	if (reg & MV_ACC_COMMAND_ACT) {
3449 		printf("SA is active, cannot access SRAM\n");
3450 		mutex_exit(&sc->sc_dma_mtx);
3451 		return;
3452 	}
3453 
3454 	printf("%s: dump SRAM, %zu bytes\n", name, len);
3455 	mvxpsec_dump_data(name, sc->sc_sram_va, len);
3456 	mutex_exit(&sc->sc_dma_mtx);
3457 	return;
3458 }
3459 
3460 
3461 _STATIC void
3462 mvxpsec_dump_dmaq(struct mvxpsec_descriptor_handle *dh)
3463 {
3464 	struct mvxpsec_descriptor *d =
3465            (struct mvxpsec_descriptor *)dh->_desc;
3466 
3467 	printf("--- DMA Command Descriptor ---\n");
3468 	printf("DESC: VA=%p PA=0x%08x\n",
3469 	    d, (uint32_t)dh->phys_addr);
3470 	printf("DESC: WORD0 = 0x%08x\n", d->tdma_word0);
3471 	printf("DESC: SRC = 0x%08x\n", d->tdma_src);
3472 	printf("DESC: DST = 0x%08x\n", d->tdma_dst);
3473 	printf("DESC: NXT = 0x%08x\n", d->tdma_nxt);
3474 
3475 	return;
3476 }
3477 
3478 STATIC void
3479 mvxpsec_dump_data(const char *name, void *p, size_t len)
3480 {
3481 	uint8_t *data = p;
3482 	off_t off;
3483 
3484 	printf("%s: dump %p, %zu bytes", name, p, len);
3485 	if (p == NULL || len == 0) {
3486 		printf("\n%s: NO DATA\n", name);
3487 		return;
3488 	}
3489 	for (off = 0; off < len; off++) {
3490 		if ((off % 16) == 0) {
3491 			printf("\n%s: 0x%08x:", name, (uint32_t)off);
3492 		}
3493 		if ((off % 4) == 0) {
3494 			printf(" ");
3495 		}
3496 		printf("%02x", data[off]);
3497 	}
3498 	printf("\n");
3499 
3500 	return;
3501 }
3502 
3503 _STATIC void
3504 mvxpsec_dump_packet(const char *name, struct mvxpsec_packet *mv_p)
3505 {
3506 	struct mvxpsec_softc *sc = mv_p->mv_s->sc;
3507 
3508 	printf("%s: packet_data:\n", name);
3509 	mvxpsec_dump_packet_data(name, mv_p);
3510 
3511 	printf("%s: SRAM:\n", name);
3512 	mvxpsec_dump_sram(name, sc, 2000);
3513 
3514 	printf("%s: packet_descriptor:\n", name);
3515 	mvxpsec_dump_packet_desc(name, mv_p);
3516 }
3517 
3518 _STATIC void
3519 mvxpsec_dump_packet_data(const char *name, struct mvxpsec_packet *mv_p)
3520 {
3521 	static char buf[1500];
3522 	int len;
3523 
3524 	if (mv_p->data_type == MVXPSEC_DATA_MBUF) {
3525 		struct mbuf *m;
3526 
3527 		m = mv_p->data.mbuf;
3528 		len = m->m_pkthdr.len;
3529 		if (len > sizeof(buf))
3530 			len = sizeof(buf);
3531 		m_copydata(m, 0, len, buf);
3532 	}
3533 	else if (mv_p->data_type == MVXPSEC_DATA_UIO) {
3534 		struct uio *uio;
3535 
3536 		uio = mv_p->data.uio;
3537 		len = uio->uio_resid;
3538 		if (len > sizeof(buf))
3539 			len = sizeof(buf);
3540 		cuio_copydata(uio, 0, len, buf);
3541 	}
3542 	else if (mv_p->data_type == MVXPSEC_DATA_RAW) {
3543 		len = mv_p->data_len;
3544 		if (len > sizeof(buf))
3545 			len = sizeof(buf);
3546 		memcpy(buf, mv_p->data.raw, len);
3547 	}
3548 	else
3549 		return;
3550 	mvxpsec_dump_data(name, buf, len);
3551 
3552 	return;
3553 }
3554 
3555 _STATIC void
3556 mvxpsec_dump_packet_desc(const char *name, struct mvxpsec_packet *mv_p)
3557 {
3558 	uint32_t *words;
3559 
3560 	if (mv_p == NULL)
3561 		return;
3562 
3563 	words = &mv_p->pkt_header.desc.acc_desc_dword0;
3564 	mvxpsec_dump_acc_config(name, words[0]);
3565 	mvxpsec_dump_acc_encdata(name, words[1], words[2]);
3566 	mvxpsec_dump_acc_enclen(name, words[2]);
3567 	mvxpsec_dump_acc_enckey(name, words[3]);
3568 	mvxpsec_dump_acc_enciv(name, words[4]);
3569 	mvxpsec_dump_acc_macsrc(name, words[5]);
3570 	mvxpsec_dump_acc_macdst(name, words[6]);
3571 	mvxpsec_dump_acc_maciv(name, words[7]);
3572 
3573 	return;
3574 }
3575 
3576 _STATIC void
3577 mvxpsec_dump_acc_config(const char *name, uint32_t w)
3578 {
3579 	/* SA: Dword 0 */
3580 	printf("%s: Dword0=0x%08x\n", name, w);
3581 	printf("%s:   OP = %s\n", name,
3582 	    s_xpsec_op(MV_ACC_CRYPTO_OP(w)));
3583 	printf("%s:   MAC = %s\n", name,
3584 	    s_xpsec_mac(MV_ACC_CRYPTO_MAC(w)));
3585 	printf("%s:   MAC_LEN = %s\n", name,
3586 	    w & MV_ACC_CRYPTO_MAC_96 ? "96-bit" : "full-bit");
3587 	printf("%s:   ENC = %s\n", name,
3588 	    s_xpsec_enc(MV_ACC_CRYPTO_ENC(w)));
3589 	printf("%s:   DIR = %s\n", name,
3590 	    w & MV_ACC_CRYPTO_DECRYPT ? "decryption" : "encryption");
3591 	printf("%s:   CHAIN = %s\n", name,
3592 	    w & MV_ACC_CRYPTO_CBC ? "CBC" : "ECB");
3593 	printf("%s:   3DES = %s\n", name,
3594 	    w & MV_ACC_CRYPTO_3DES_EDE ? "EDE" : "EEE");
3595 	printf("%s:   FRAGMENT = %s\n", name,
3596 	    s_xpsec_frag(MV_ACC_CRYPTO_FRAG(w)));
3597 	return;
3598 }
3599 
3600 STATIC void
3601 mvxpsec_dump_acc_encdata(const char *name, uint32_t w, uint32_t w2)
3602 {
3603 	/* SA: Dword 1 */
3604 	printf("%s: Dword1=0x%08x\n", name, w);
3605 	printf("%s:   ENC SRC = 0x%x\n", name, MV_ACC_DESC_GET_VAL_1(w));
3606 	printf("%s:   ENC DST = 0x%x\n", name, MV_ACC_DESC_GET_VAL_2(w));
3607 	printf("%s:   ENC RANGE = 0x%x - 0x%x\n", name,
3608 	    MV_ACC_DESC_GET_VAL_1(w),
3609 	    MV_ACC_DESC_GET_VAL_1(w) + MV_ACC_DESC_GET_VAL_1(w2) - 1);
3610 	return;
3611 }
3612 
3613 STATIC void
3614 mvxpsec_dump_acc_enclen(const char *name, uint32_t w)
3615 {
3616 	/* SA: Dword 2 */
3617 	printf("%s: Dword2=0x%08x\n", name, w);
3618 	printf("%s:   ENC LEN = %d\n", name,
3619 	    MV_ACC_DESC_GET_VAL_1(w));
3620 	return;
3621 }
3622 
3623 STATIC void
3624 mvxpsec_dump_acc_enckey(const char *name, uint32_t w)
3625 {
3626 	/* SA: Dword 3 */
3627 	printf("%s: Dword3=0x%08x\n", name, w);
3628 	printf("%s:   EKEY = 0x%x\n", name,
3629 	    MV_ACC_DESC_GET_VAL_1(w));
3630 	return;
3631 }
3632 
3633 STATIC void
3634 mvxpsec_dump_acc_enciv(const char *name, uint32_t w)
3635 {
3636 	/* SA: Dword 4 */
3637 	printf("%s: Dword4=0x%08x\n", name, w);
3638 	printf("%s:   EIV = 0x%x\n", name, MV_ACC_DESC_GET_VAL_1(w));
3639 	printf("%s:   EIV_BUF = 0x%x\n", name, MV_ACC_DESC_GET_VAL_2(w));
3640 	return;
3641 }
3642 
3643 STATIC void
3644 mvxpsec_dump_acc_macsrc(const char *name, uint32_t w)
3645 {
3646 	/* SA: Dword 5 */
3647 	printf("%s: Dword5=0x%08x\n", name, w);
3648 	printf("%s:   MAC_SRC = 0x%x\n", name,
3649 	    MV_ACC_DESC_GET_VAL_1(w));
3650 	printf("%s:   MAC_TOTAL_LEN = %d\n", name,
3651 	    MV_ACC_DESC_GET_VAL_3(w));
3652 	printf("%s:   MAC_RANGE = 0x%0x - 0x%0x\n", name,
3653 	    MV_ACC_DESC_GET_VAL_1(w),
3654 	    MV_ACC_DESC_GET_VAL_1(w) + MV_ACC_DESC_GET_VAL_3(w) - 1);
3655 	return;
3656 }
3657 
3658 STATIC void
3659 mvxpsec_dump_acc_macdst(const char *name, uint32_t w)
3660 {
3661 	/* SA: Dword 6 */
3662 	printf("%s: Dword6=0x%08x\n", name, w);
3663 	printf("%s:   MAC_DST = 0x%x\n", name, MV_ACC_DESC_GET_VAL_1(w));
3664 	printf("%s:   MAC_BLOCK_LEN = %d\n", name,
3665 	    MV_ACC_DESC_GET_VAL_2(w));
3666 	return;
3667 }
3668 
3669 STATIC void
3670 mvxpsec_dump_acc_maciv(const char *name, uint32_t w)
3671 {
3672 	/* SA: Dword 7 */
3673 	printf("%s: Dword7=0x%08x\n", name, w);
3674 	printf("%s:   MAC_INNER_IV = 0x%x\n", name,
3675 	    MV_ACC_DESC_GET_VAL_1(w));
3676 	printf("%s:   MAC_OUTER_IV = 0x%x\n", name,
3677 	    MV_ACC_DESC_GET_VAL_2(w));
3678 	return;
3679 }
3680 #endif
3681