xref: /netbsd-src/sys/dev/marvell/mvxpsec.c (revision 100a3398b8d3c64e571cff36b46c23431b410e09)
1 /*	$NetBSD: mvxpsec.c,v 1.20 2024/02/09 22:08:35 andvar Exp $	*/
2 /*
3  * Copyright (c) 2015 Internet Initiative Japan Inc.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #ifdef _KERNEL_OPT
29 #include "opt_ipsec.h"
30 #endif
31 
32 /*
33  * Cryptographic Engine and Security Accelerator(MVXPSEC)
34  */
35 #include <sys/cdefs.h>
36 #include <sys/param.h>
37 #include <sys/types.h>
38 #include <sys/kernel.h>
39 #include <sys/queue.h>
40 #include <sys/conf.h>
41 #include <sys/proc.h>
42 #include <sys/bus.h>
43 #include <sys/evcnt.h>
44 #include <sys/device.h>
45 #include <sys/endian.h>
46 #include <sys/errno.h>
47 #include <sys/kmem.h>
48 #include <sys/mbuf.h>
49 #include <sys/callout.h>
50 #include <sys/pool.h>
51 #include <sys/cprng.h>
52 #include <sys/syslog.h>
53 #include <sys/mutex.h>
54 #include <sys/kthread.h>
55 #include <sys/atomic.h>
56 #include <sys/sha1.h>
57 #include <sys/md5.h>
58 
59 #include <uvm/uvm_extern.h>
60 
61 #include <opencrypto/cryptodev.h>
62 #include <opencrypto/xform.h>
63 
64 #include <net/net_stats.h>
65 
66 #include <netinet/in_systm.h>
67 #include <netinet/in.h>
68 #include <netinet/ip.h>
69 #include <netinet/ip6.h>
70 
71 #if NIPSEC > 0
72 #include <netipsec/esp_var.h>
73 #endif
74 
75 #include <arm/cpufunc.h>
76 #include <arm/marvell/mvsocvar.h>
77 #include <arm/marvell/armadaxpreg.h>
78 #include <dev/marvell/marvellreg.h>
79 #include <dev/marvell/marvellvar.h>
80 #include <dev/marvell/mvxpsecreg.h>
81 #include <dev/marvell/mvxpsecvar.h>
82 
83 #ifdef DEBUG
84 #define STATIC __attribute__ ((noinline)) extern
85 #define _STATIC __attribute__ ((noinline)) extern
86 #define INLINE __attribute__ ((noinline)) extern
87 #define _INLINE __attribute__ ((noinline)) extern
88 #else
89 #define STATIC static
90 #define _STATIC __attribute__ ((unused)) static
91 #define INLINE static inline
92 #define _INLINE __attribute__ ((unused)) static inline
93 #endif
94 
95 /*
96  * IRQ and SRAM spaces for each of unit
97  * XXX: move to attach_args
98  */
99 struct {
100 	int		err_int;
101 } mvxpsec_config[] = {
102 	{ .err_int = ARMADAXP_IRQ_CESA0_ERR, }, /* unit 0 */
103 	{ .err_int = ARMADAXP_IRQ_CESA1_ERR, }, /* unit 1 */
104 };
105 #define MVXPSEC_ERR_INT(sc) \
106     mvxpsec_config[device_unit((sc)->sc_dev)].err_int
107 
108 /*
109  * AES
110  */
111 #define MAXBC				(128/32)
112 #define MAXKC				(256/32)
113 #define MAXROUNDS			14
114 STATIC int mv_aes_ksched(uint8_t[4][MAXKC], int,
115     uint8_t[MAXROUNDS+1][4][MAXBC]);
116 STATIC int mv_aes_deckey(uint8_t *, uint8_t *, int);
117 
118 /*
119  * device driver autoconf interface
120  */
121 STATIC int mvxpsec_match(device_t, cfdata_t, void *);
122 STATIC void mvxpsec_attach(device_t, device_t, void *);
123 STATIC void mvxpsec_evcnt_attach(struct mvxpsec_softc *);
124 
125 /*
126  * register setup
127  */
128 STATIC int mvxpsec_wininit(struct mvxpsec_softc *, enum marvell_tags *);
129 
130 /*
131  * timer(callout) interface
132  *
133  * XXX: callout is not MP safe...
134  */
135 STATIC void mvxpsec_timer(void *);
136 
137 /*
138  * interrupt interface
139  */
140 STATIC int mvxpsec_intr(void *);
141 INLINE void mvxpsec_intr_cleanup(struct mvxpsec_softc *);
142 STATIC int mvxpsec_eintr(void *);
143 STATIC uint32_t mvxpsec_intr_ack(struct mvxpsec_softc *);
144 STATIC uint32_t mvxpsec_eintr_ack(struct mvxpsec_softc *);
145 INLINE void mvxpsec_intr_cnt(struct mvxpsec_softc *, int);
146 
147 /*
148  * memory allocators and VM management
149  */
150 STATIC struct mvxpsec_devmem *mvxpsec_alloc_devmem(struct mvxpsec_softc *,
151     paddr_t, int);
152 STATIC int mvxpsec_init_sram(struct mvxpsec_softc *);
153 
154 /*
155  * Low-level DMA interface
156  */
157 STATIC int mvxpsec_init_dma(struct mvxpsec_softc *,
158     struct marvell_attach_args *);
159 INLINE int mvxpsec_dma_wait(struct mvxpsec_softc *);
160 INLINE int mvxpsec_acc_wait(struct mvxpsec_softc *);
161 INLINE struct mvxpsec_descriptor_handle *mvxpsec_dma_getdesc(struct mvxpsec_softc *);
162 _INLINE void mvxpsec_dma_putdesc(struct mvxpsec_softc *, struct mvxpsec_descriptor_handle *);
163 INLINE void mvxpsec_dma_setup(struct mvxpsec_descriptor_handle *,
164     uint32_t, uint32_t, uint32_t);
165 INLINE void mvxpsec_dma_cat(struct mvxpsec_softc *,
166     struct mvxpsec_descriptor_handle *, struct mvxpsec_descriptor_handle *);
167 
168 /*
169  * High-level DMA interface
170  */
171 INLINE int mvxpsec_dma_copy0(struct mvxpsec_softc *,
172     mvxpsec_dma_ring *, uint32_t, uint32_t, uint32_t);
173 INLINE int mvxpsec_dma_copy(struct mvxpsec_softc *,
174     mvxpsec_dma_ring *, uint32_t, uint32_t, uint32_t);
175 INLINE int mvxpsec_dma_acc_activate(struct mvxpsec_softc *,
176     mvxpsec_dma_ring *);
177 INLINE void mvxpsec_dma_finalize(struct mvxpsec_softc *,
178     mvxpsec_dma_ring *);
179 INLINE void mvxpsec_dma_free(struct mvxpsec_softc *,
180     mvxpsec_dma_ring *);
181 INLINE int mvxpsec_dma_copy_packet(struct mvxpsec_softc *, struct mvxpsec_packet *);
182 INLINE int mvxpsec_dma_sync_packet(struct mvxpsec_softc *, struct mvxpsec_packet *);
183 
184 /*
185  * Session management interface (OpenCrypto)
186  */
187 #define MVXPSEC_SESSION(sid)	((sid) & 0x0fffffff)
188 #define MVXPSEC_SID(crd, sesn)	(((crd) << 28) | ((sesn) & 0x0fffffff))
189 /* pool management */
190 STATIC int mvxpsec_session_ctor(void *, void *, int);
191 STATIC void mvxpsec_session_dtor(void *, void *);
192 STATIC int mvxpsec_packet_ctor(void *, void *, int);
193 STATIC void mvxpsec_packet_dtor(void *, void *);
194 
195 /* session management */
196 STATIC struct mvxpsec_session *mvxpsec_session_alloc(struct mvxpsec_softc *);
197 STATIC void mvxpsec_session_dealloc(struct mvxpsec_session *);
198 INLINE struct mvxpsec_session *mvxpsec_session_lookup(struct mvxpsec_softc *, int);
199 INLINE int mvxpsec_session_ref(struct mvxpsec_session *);
200 INLINE void mvxpsec_session_unref(struct mvxpsec_session *);
201 
202 /* packet management */
203 STATIC struct mvxpsec_packet *mvxpsec_packet_alloc(struct mvxpsec_session *);
204 INLINE void mvxpsec_packet_enqueue(struct mvxpsec_packet *);
205 STATIC void mvxpsec_packet_dealloc(struct mvxpsec_packet *);
206 STATIC int mvxpsec_done_packet(struct mvxpsec_packet *);
207 
208 /* session header manegement */
209 STATIC int mvxpsec_header_finalize(struct mvxpsec_packet *);
210 
211 /* packet queue management */
212 INLINE void mvxpsec_drop(struct mvxpsec_softc *, struct cryptop *, struct mvxpsec_packet *, int);
213 STATIC int mvxpsec_dispatch_queue(struct mvxpsec_softc *);
214 
215 /* opencrypto operation */
216 INLINE int mvxpsec_parse_crd(struct mvxpsec_packet *, struct cryptodesc *);
217 INLINE int mvxpsec_parse_crp(struct mvxpsec_packet *);
218 
219 /* payload data management */
220 INLINE int mvxpsec_packet_setcrp(struct mvxpsec_packet *, struct cryptop *);
221 STATIC int mvxpsec_packet_setdata(struct mvxpsec_packet *, void *, uint32_t);
222 STATIC int mvxpsec_packet_setmbuf(struct mvxpsec_packet *, struct mbuf *);
223 STATIC int mvxpsec_packet_setuio(struct mvxpsec_packet *, struct uio *);
224 STATIC int mvxpsec_packet_rdata(struct mvxpsec_packet *, int, int, void *);
225 _STATIC int mvxpsec_packet_wdata(struct mvxpsec_packet *, int, int, void *);
226 STATIC int mvxpsec_packet_write_iv(struct mvxpsec_packet *, void *, int);
227 STATIC int mvxpsec_packet_copy_iv(struct mvxpsec_packet *, int, int);
228 
229 /* key pre-computation */
230 STATIC int mvxpsec_key_precomp(int, void *, int, void *, void *);
231 STATIC int mvxpsec_hmac_precomp(int, void *, int, void *, void *);
232 
233 /* crypto operation management */
234 INLINE void mvxpsec_packet_reset_op(struct mvxpsec_packet *);
235 INLINE void mvxpsec_packet_update_op_order(struct mvxpsec_packet *, int);
236 
237 /*
238  * parameter converters
239  */
240 INLINE uint32_t mvxpsec_alg2acc(uint32_t alg);
241 INLINE uint32_t mvxpsec_aesklen(int klen);
242 
243 /*
244  * string formatters
245  */
246 _STATIC const char *s_ctrlreg(uint32_t);
247 _STATIC const char *s_winreg(uint32_t);
248 _STATIC const char *s_errreg(uint32_t);
249 _STATIC const char *s_xpsecintr(uint32_t);
250 _STATIC const char *s_ctlalg(uint32_t);
251 _STATIC const char *s_xpsec_op(uint32_t);
252 _STATIC const char *s_xpsec_enc(uint32_t);
253 _STATIC const char *s_xpsec_mac(uint32_t);
254 _STATIC const char *s_xpsec_frag(uint32_t);
255 
256 /*
257  * debugging supports
258  */
259 #ifdef MVXPSEC_DEBUG
260 _STATIC void mvxpsec_dump_dmaq(struct mvxpsec_descriptor_handle *);
261 _STATIC void mvxpsec_dump_reg(struct mvxpsec_softc *);
262 _STATIC void mvxpsec_dump_sram(const char *, struct mvxpsec_softc *, size_t);
263 _STATIC void mvxpsec_dump_data(const char *, void *, size_t);
264 
265 _STATIC void mvxpsec_dump_packet(const char *, struct mvxpsec_packet *);
266 _STATIC void mvxpsec_dump_packet_data(const char *, struct mvxpsec_packet *);
267 _STATIC void mvxpsec_dump_packet_desc(const char *, struct mvxpsec_packet *);
268 
269 _STATIC void mvxpsec_dump_acc_config(const char *, uint32_t);
270 _STATIC void mvxpsec_dump_acc_encdata(const char *, uint32_t, uint32_t);
271 _STATIC void mvxpsec_dump_acc_enclen(const char *, uint32_t);
272 _STATIC void mvxpsec_dump_acc_enckey(const char *, uint32_t);
273 _STATIC void mvxpsec_dump_acc_enciv(const char *, uint32_t);
274 _STATIC void mvxpsec_dump_acc_macsrc(const char *, uint32_t);
275 _STATIC void mvxpsec_dump_acc_macdst(const char *, uint32_t);
276 _STATIC void mvxpsec_dump_acc_maciv(const char *, uint32_t);
277 #endif
278 
279 /*
280  * global configurations, params, work spaces, ...
281  *
282  * XXX: use sysctl for global configurations
283  */
284 /* waiting for device */
285 static int mvxpsec_wait_interval = 10;		/* usec */
286 static int mvxpsec_wait_retry = 100;		/* times = wait for 1 [msec] */
287 #ifdef MVXPSEC_DEBUG
288 static uint32_t mvxpsec_debug = MVXPSEC_DEBUG;	/* debug level */
289 #endif
290 
291 /*
292  * Register accessors
293  */
294 #define MVXPSEC_WRITE(sc, off, val) \
295 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (off), (val))
296 #define MVXPSEC_READ(sc, off) \
297 	bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (off))
298 
299 /*
300  * device driver autoconf interface
301  */
302 CFATTACH_DECL2_NEW(mvxpsec_mbus, sizeof(struct mvxpsec_softc),
303     mvxpsec_match, mvxpsec_attach, NULL, NULL, NULL, NULL);
304 
305 STATIC int
mvxpsec_match(device_t dev,cfdata_t match,void * aux)306 mvxpsec_match(device_t dev, cfdata_t match, void *aux)
307 {
308 	struct marvell_attach_args *mva = aux;
309 	uint32_t tag;
310 	int window;
311 
312 	if (strcmp(mva->mva_name, match->cf_name) != 0)
313 		return 0;
314 	if (mva->mva_offset == MVA_OFFSET_DEFAULT)
315 		return 0;
316 
317 	switch (mva->mva_unit) {
318 	case 0:
319 		tag = ARMADAXP_TAG_CRYPT0;
320 		break;
321 	case 1:
322 		tag = ARMADAXP_TAG_CRYPT1;
323 		break;
324 	default:
325 		aprint_error_dev(dev,
326 		    "unit %d is not supported\n", mva->mva_unit);
327 		return 0;
328 	}
329 
330 	window = mvsoc_target(tag, NULL, NULL, NULL, NULL);
331 	if (window >= nwindow) {
332 		aprint_error_dev(dev,
333 		    "Security Accelerator SRAM is not configured.\n");
334 		return 0;
335 	}
336 
337 	return 1;
338 }
339 
340 STATIC void
mvxpsec_attach(device_t parent,device_t self,void * aux)341 mvxpsec_attach(device_t parent, device_t self, void *aux)
342 {
343 	struct marvell_attach_args *mva = aux;
344 	struct mvxpsec_softc *sc = device_private(self);
345 	int v;
346 	int i;
347 
348 	sc->sc_dev = self;
349 
350 	aprint_normal(": Marvell Crypto Engines and Security Accelerator\n");
351 	aprint_naive("\n");
352 #ifdef MVXPSEC_MULTI_PACKET
353 	aprint_normal_dev(sc->sc_dev, "multi-packet chained mode enabled.\n");
354 #else
355 	aprint_normal_dev(sc->sc_dev, "multi-packet chained mode disabled.\n");
356 #endif
357 	aprint_normal_dev(sc->sc_dev,
358 	    "Max %d sessions.\n", MVXPSEC_MAX_SESSIONS);
359 
360 	/* mutex */
361 	mutex_init(&sc->sc_session_mtx, MUTEX_DEFAULT, IPL_NET);
362 	mutex_init(&sc->sc_dma_mtx, MUTEX_DEFAULT, IPL_NET);
363 	mutex_init(&sc->sc_queue_mtx, MUTEX_DEFAULT, IPL_NET);
364 
365 	/* Packet queue */
366 	SIMPLEQ_INIT(&sc->sc_wait_queue);
367 	SIMPLEQ_INIT(&sc->sc_run_queue);
368 	SLIST_INIT(&sc->sc_free_list);
369 	sc->sc_wait_qlen = 0;
370 #ifdef MVXPSEC_MULTI_PACKET
371 	sc->sc_wait_qlimit = 16;
372 #else
373 	sc->sc_wait_qlimit = 0;
374 #endif
375 	sc->sc_free_qlen = 0;
376 
377 	/* Timer */
378 	callout_init(&sc->sc_timeout, 0); /* XXX: use CALLOUT_MPSAFE */
379 	callout_setfunc(&sc->sc_timeout, mvxpsec_timer, sc);
380 
381 	/* I/O */
382 	sc->sc_iot = mva->mva_iot;
383 	if (bus_space_subregion(mva->mva_iot, mva->mva_ioh,
384 	    mva->mva_offset, mva->mva_size, &sc->sc_ioh)) {
385 		aprint_error_dev(self, "Cannot map registers\n");
386 		return;
387 	}
388 
389 	/* DMA */
390 	sc->sc_dmat = mva->mva_dmat;
391 	if (mvxpsec_init_dma(sc, mva) < 0)
392 		return;
393 
394 	/* SRAM */
395 	if (mvxpsec_init_sram(sc) < 0)
396 		return;
397 
398 	/* Registers */
399 	mvxpsec_wininit(sc, mva->mva_tags);
400 
401 	/* INTR */
402 	MVXPSEC_WRITE(sc, MVXPSEC_INT_MASK, MVXPSEC_DEFAULT_INT);
403 	MVXPSEC_WRITE(sc, MV_TDMA_ERR_MASK, MVXPSEC_DEFAULT_ERR);
404 	sc->sc_done_ih =
405 	    marvell_intr_establish(mva->mva_irq, IPL_NET, mvxpsec_intr, sc);
406 	/* XXX: should pass error IRQ using mva */
407 	sc->sc_error_ih = marvell_intr_establish(MVXPSEC_ERR_INT(sc),
408 	    IPL_NET, mvxpsec_eintr, sc);
409 	aprint_normal_dev(self,
410 	    "Error Reporting IRQ %d\n", MVXPSEC_ERR_INT(sc));
411 
412 	/* Initialize TDMA (It's enabled here, but waiting for SA) */
413 	if (mvxpsec_dma_wait(sc) < 0)
414 		panic("%s: DMA DEVICE not responding\n", __func__);
415 	MVXPSEC_WRITE(sc, MV_TDMA_CNT, 0);
416 	MVXPSEC_WRITE(sc, MV_TDMA_SRC, 0);
417 	MVXPSEC_WRITE(sc, MV_TDMA_DST, 0);
418 	MVXPSEC_WRITE(sc, MV_TDMA_NXT, 0);
419 	MVXPSEC_WRITE(sc, MV_TDMA_CUR, 0);
420 	v  = MVXPSEC_READ(sc, MV_TDMA_CONTROL);
421 	v |= MV_TDMA_CONTROL_ENABLE;
422 	MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, v);
423 
424 	/* Initialize SA */
425 	if (mvxpsec_acc_wait(sc) < 0)
426 		panic("%s: MVXPSEC not responding\n", __func__);
427 	v  = MVXPSEC_READ(sc, MV_ACC_CONFIG);
428 	v &= ~MV_ACC_CONFIG_STOP_ON_ERR;
429 	v |= MV_ACC_CONFIG_MULT_PKT;
430 	v |= MV_ACC_CONFIG_WAIT_TDMA;
431 	v |= MV_ACC_CONFIG_ACT_TDMA;
432 	MVXPSEC_WRITE(sc, MV_ACC_CONFIG, v);
433 	MVXPSEC_WRITE(sc, MV_ACC_DESC, 0);
434 	MVXPSEC_WRITE(sc, MV_ACC_COMMAND, MV_ACC_COMMAND_STOP);
435 
436 	/* Session */
437 	sc->sc_session_pool =
438 	    pool_cache_init(sizeof(struct mvxpsec_session), 0, 0, 0,
439 	    "mvxpsecpl", NULL, IPL_NET,
440 	    mvxpsec_session_ctor, mvxpsec_session_dtor, sc);
441 	pool_cache_sethiwat(sc->sc_session_pool, MVXPSEC_MAX_SESSIONS);
442 	pool_cache_setlowat(sc->sc_session_pool, MVXPSEC_MAX_SESSIONS / 2);
443 	sc->sc_last_session = NULL;
444 
445 	/* Packet */
446 	sc->sc_packet_pool =
447 	    pool_cache_init(sizeof(struct mvxpsec_session), 0, 0, 0,
448 	    "mvxpsec_pktpl", NULL, IPL_NET,
449 	    mvxpsec_packet_ctor, mvxpsec_packet_dtor, sc);
450 	pool_cache_sethiwat(sc->sc_packet_pool, MVXPSEC_MAX_SESSIONS);
451 	pool_cache_setlowat(sc->sc_packet_pool, MVXPSEC_MAX_SESSIONS / 2);
452 
453 	/* Register to EVCNT framework */
454 	mvxpsec_evcnt_attach(sc);
455 
456 	/* Register to Opencrypto */
457 	for (i = 0; i < MVXPSEC_MAX_SESSIONS; i++) {
458 		sc->sc_sessions[i] = NULL;
459 	}
460 	if (mvxpsec_register(sc))
461 		panic("cannot initialize OpenCrypto module.\n");
462 
463 	return;
464 }
465 
466 STATIC void
mvxpsec_evcnt_attach(struct mvxpsec_softc * sc)467 mvxpsec_evcnt_attach(struct mvxpsec_softc *sc)
468 {
469 	struct mvxpsec_evcnt *sc_ev = &sc->sc_ev;
470 
471 	evcnt_attach_dynamic(&sc_ev->intr_all, EVCNT_TYPE_INTR,
472 	    NULL, device_xname(sc->sc_dev), "Main Intr.");
473 	evcnt_attach_dynamic(&sc_ev->intr_auth, EVCNT_TYPE_INTR,
474 	    NULL, device_xname(sc->sc_dev), "Auth Intr.");
475 	evcnt_attach_dynamic(&sc_ev->intr_des, EVCNT_TYPE_INTR,
476 	    NULL, device_xname(sc->sc_dev), "DES Intr.");
477 	evcnt_attach_dynamic(&sc_ev->intr_aes_enc, EVCNT_TYPE_INTR,
478 	    NULL, device_xname(sc->sc_dev), "AES-Encrypt Intr.");
479 	evcnt_attach_dynamic(&sc_ev->intr_aes_dec, EVCNT_TYPE_INTR,
480 	    NULL, device_xname(sc->sc_dev), "AES-Decrypt Intr.");
481 	evcnt_attach_dynamic(&sc_ev->intr_enc, EVCNT_TYPE_INTR,
482 	    NULL, device_xname(sc->sc_dev), "Crypto Intr.");
483 	evcnt_attach_dynamic(&sc_ev->intr_sa, EVCNT_TYPE_INTR,
484 	    NULL, device_xname(sc->sc_dev), "SA Intr.");
485 	evcnt_attach_dynamic(&sc_ev->intr_acctdma, EVCNT_TYPE_INTR,
486 	    NULL, device_xname(sc->sc_dev), "AccTDMA Intr.");
487 	evcnt_attach_dynamic(&sc_ev->intr_comp, EVCNT_TYPE_INTR,
488 	    NULL, device_xname(sc->sc_dev), "TDMA-Complete Intr.");
489 	evcnt_attach_dynamic(&sc_ev->intr_own, EVCNT_TYPE_INTR,
490 	    NULL, device_xname(sc->sc_dev), "TDMA-Ownership Intr.");
491 	evcnt_attach_dynamic(&sc_ev->intr_acctdma_cont, EVCNT_TYPE_INTR,
492 	    NULL, device_xname(sc->sc_dev), "AccTDMA-Continue Intr.");
493 
494 	evcnt_attach_dynamic(&sc_ev->session_new, EVCNT_TYPE_MISC,
495 	    NULL, device_xname(sc->sc_dev), "New-Session");
496 	evcnt_attach_dynamic(&sc_ev->session_free, EVCNT_TYPE_MISC,
497 	    NULL, device_xname(sc->sc_dev), "Free-Session");
498 
499 	evcnt_attach_dynamic(&sc_ev->packet_ok, EVCNT_TYPE_MISC,
500 	    NULL, device_xname(sc->sc_dev), "Packet-OK");
501 	evcnt_attach_dynamic(&sc_ev->packet_err, EVCNT_TYPE_MISC,
502 	    NULL, device_xname(sc->sc_dev), "Packet-ERR");
503 
504 	evcnt_attach_dynamic(&sc_ev->dispatch_packets, EVCNT_TYPE_MISC,
505 	    NULL, device_xname(sc->sc_dev), "Packet-Dispatch");
506 	evcnt_attach_dynamic(&sc_ev->dispatch_queue, EVCNT_TYPE_MISC,
507 	    NULL, device_xname(sc->sc_dev), "Queue-Dispatch");
508 	evcnt_attach_dynamic(&sc_ev->queue_full, EVCNT_TYPE_MISC,
509 	    NULL, device_xname(sc->sc_dev), "Queue-Full");
510 	evcnt_attach_dynamic(&sc_ev->max_dispatch, EVCNT_TYPE_MISC,
511 	    NULL, device_xname(sc->sc_dev), "Max-Dispatch");
512 	evcnt_attach_dynamic(&sc_ev->max_done, EVCNT_TYPE_MISC,
513 	    NULL, device_xname(sc->sc_dev), "Max-Done");
514 }
515 
516 /*
517  * Register setup
518  */
mvxpsec_wininit(struct mvxpsec_softc * sc,enum marvell_tags * tags)519 STATIC int mvxpsec_wininit(struct mvxpsec_softc *sc, enum marvell_tags *tags)
520 {
521 	device_t pdev = device_parent(sc->sc_dev);
522 	uint64_t base;
523 	uint32_t size, reg;
524 	int window, target, attr, rv, i;
525 
526 	/* disable all window */
527 	for (window = 0; window < MV_TDMA_NWINDOW; window++)
528 	{
529 		MVXPSEC_WRITE(sc, MV_TDMA_BAR(window), 0);
530 		MVXPSEC_WRITE(sc, MV_TDMA_ATTR(window), 0);
531 	}
532 
533 	for (window = 0, i = 0;
534 	    tags[i] != MARVELL_TAG_UNDEFINED && window < MV_TDMA_NWINDOW; i++) {
535 		rv = marvell_winparams_by_tag(pdev, tags[i],
536 		    &target, &attr, &base, &size);
537 		if (rv != 0 || size == 0)
538 			continue;
539 
540 		if (base > 0xffffffffULL) {
541 			aprint_error_dev(sc->sc_dev,
542 			    "can't remap window %d\n", window);
543 			continue;
544 		}
545 
546 		reg  = MV_TDMA_BAR_BASE(base);
547 		MVXPSEC_WRITE(sc, MV_TDMA_BAR(window), reg);
548 
549 		reg  = MV_TDMA_ATTR_TARGET(target);
550 		reg |= MV_TDMA_ATTR_ATTR(attr);
551 		reg |= MV_TDMA_ATTR_SIZE(size);
552 		reg |= MV_TDMA_ATTR_ENABLE;
553 		MVXPSEC_WRITE(sc, MV_TDMA_ATTR(window), reg);
554 
555 		window++;
556 	}
557 
558 	return 0;
559 }
560 
561 /*
562  * Timer handling
563  */
564 STATIC void
mvxpsec_timer(void * aux)565 mvxpsec_timer(void *aux)
566 {
567 	struct mvxpsec_softc *sc = aux;
568 	struct mvxpsec_packet *mv_p;
569 	uint32_t reg;
570 	int ndone;
571 	int refill;
572 	int s;
573 
574 	/* IPL_SOFTCLOCK */
575 
576 	log(LOG_ERR, "%s: device timeout.\n", __func__);
577 #ifdef MVXPSEC_DEBUG
578 	mvxpsec_dump_reg(sc);
579 #endif
580 
581 	s = splnet();
582 	/* stop security accelerator */
583 	MVXPSEC_WRITE(sc, MV_ACC_COMMAND, MV_ACC_COMMAND_STOP);
584 
585 	/* stop TDMA */
586 	MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, 0);
587 
588 	/* cleanup packet queue */
589 	mutex_enter(&sc->sc_queue_mtx);
590 	ndone = 0;
591 	while ( (mv_p = SIMPLEQ_FIRST(&sc->sc_run_queue)) != NULL) {
592 		SIMPLEQ_REMOVE_HEAD(&sc->sc_run_queue, queue);
593 
594 		mv_p->crp->crp_etype = EINVAL;
595 		mvxpsec_done_packet(mv_p);
596 		ndone++;
597 	}
598 	MVXPSEC_EVCNT_MAX(sc, max_done, ndone);
599 	sc->sc_flags &= ~HW_RUNNING;
600 	refill = (sc->sc_wait_qlen > 0) ? 1 : 0;
601 	mutex_exit(&sc->sc_queue_mtx);
602 
603 	/* reenable TDMA */
604 	if (mvxpsec_dma_wait(sc) < 0)
605 		panic("%s: failed to reset DMA DEVICE. give up.", __func__);
606 	MVXPSEC_WRITE(sc, MV_TDMA_CNT, 0);
607 	MVXPSEC_WRITE(sc, MV_TDMA_SRC, 0);
608 	MVXPSEC_WRITE(sc, MV_TDMA_DST, 0);
609 	MVXPSEC_WRITE(sc, MV_TDMA_CUR, 0);
610 	MVXPSEC_WRITE(sc, MV_TDMA_NXT, 0);
611 	reg  = MV_TDMA_DEFAULT_CONTROL;
612 	reg |= MV_TDMA_CONTROL_ENABLE;
613 	MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, reg);
614 
615 	if (mvxpsec_acc_wait(sc) < 0)
616 		panic("%s: failed to reset MVXPSEC. give up.", __func__);
617 	reg  = MV_ACC_CONFIG_MULT_PKT;
618 	reg |= MV_ACC_CONFIG_WAIT_TDMA;
619 	reg |= MV_ACC_CONFIG_ACT_TDMA;
620 	MVXPSEC_WRITE(sc, MV_ACC_CONFIG, reg);
621 	MVXPSEC_WRITE(sc, MV_ACC_DESC, 0);
622 
623 	if (refill) {
624 		mutex_enter(&sc->sc_queue_mtx);
625 		mvxpsec_dispatch_queue(sc);
626 		mutex_exit(&sc->sc_queue_mtx);
627 	}
628 
629 	crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
630 	splx(s);
631 }
632 
633 /*
634  * DMA handling
635  */
636 
637 /*
638  * Allocate kernel devmem and DMA safe memory with bus_dma API
639  * used for DMA descriptors.
640  *
641  * if phys != 0, assume phys is a DMA safe memory and bypass
642  * allocator.
643  */
644 STATIC struct mvxpsec_devmem *
mvxpsec_alloc_devmem(struct mvxpsec_softc * sc,paddr_t phys,int size)645 mvxpsec_alloc_devmem(struct mvxpsec_softc *sc, paddr_t phys, int size)
646 {
647 	struct mvxpsec_devmem *devmem;
648 	bus_dma_segment_t seg;
649 	int rseg;
650 	int err;
651 
652 	if (sc == NULL)
653 		return NULL;
654 
655 	devmem = kmem_alloc(sizeof(*devmem), KM_SLEEP);
656 	devmem->size = size;
657 
658 	if (phys) {
659 		seg.ds_addr = phys;
660 		seg.ds_len = devmem->size;
661 		rseg = 1;
662 		err = 0;
663 	}
664 	else {
665 		err = bus_dmamem_alloc(sc->sc_dmat,
666 		    devmem->size, PAGE_SIZE, 0,
667 		    &seg, MVXPSEC_DMA_MAX_SEGS, &rseg, BUS_DMA_NOWAIT);
668 	}
669 	if (err) {
670 		aprint_error_dev(sc->sc_dev, "can't alloc DMA buffer\n");
671 		goto fail_kmem_free;
672 	}
673 
674 	err = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
675 	     devmem->size, &devmem->kva, BUS_DMA_NOWAIT);
676 	if (err) {
677 		aprint_error_dev(sc->sc_dev, "can't map DMA buffer\n");
678 		goto fail_dmamem_free;
679 	}
680 
681 	err = bus_dmamap_create(sc->sc_dmat,
682 	    size, 1, size, 0, BUS_DMA_NOWAIT, &devmem->map);
683 	if (err) {
684 		aprint_error_dev(sc->sc_dev, "can't create DMA map\n");
685 		goto fail_unmap;
686 	}
687 
688 	err = bus_dmamap_load(sc->sc_dmat,
689 	    devmem->map, devmem->kva, devmem->size, NULL,
690 	    BUS_DMA_NOWAIT);
691 	if (err) {
692 		aprint_error_dev(sc->sc_dev,
693 		   "can't load DMA buffer VA:%p PA:0x%08x\n",
694 		    devmem->kva, (int)seg.ds_addr);
695 		goto fail_destroy;
696 	}
697 
698 	return devmem;
699 
700 fail_destroy:
701 	bus_dmamap_destroy(sc->sc_dmat, devmem->map);
702 fail_unmap:
703 	bus_dmamem_unmap(sc->sc_dmat, devmem->kva, devmem->size);
704 fail_dmamem_free:
705 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
706 fail_kmem_free:
707 	kmem_free(devmem, sizeof(*devmem));
708 
709 	return NULL;
710 }
711 
712 /*
713  * Get DMA Descriptor from (DMA safe) descriptor pool.
714  */
715 INLINE struct mvxpsec_descriptor_handle *
mvxpsec_dma_getdesc(struct mvxpsec_softc * sc)716 mvxpsec_dma_getdesc(struct mvxpsec_softc *sc)
717 {
718 	struct mvxpsec_descriptor_handle *entry;
719 
720 	/* must called with sc->sc_dma_mtx held */
721 	KASSERT(mutex_owned(&sc->sc_dma_mtx));
722 
723 	if (sc->sc_desc_ring_prod == sc->sc_desc_ring_cons)
724 		return NULL;
725 
726 	entry = &sc->sc_desc_ring[sc->sc_desc_ring_prod];
727 	sc->sc_desc_ring_prod++;
728 	if (sc->sc_desc_ring_prod >= sc->sc_desc_ring_size)
729 		sc->sc_desc_ring_prod -= sc->sc_desc_ring_size;
730 
731 	return entry;
732 }
733 
734 /*
735  * Put DMA Descriptor to descriptor pool.
736  */
737 _INLINE void
mvxpsec_dma_putdesc(struct mvxpsec_softc * sc,struct mvxpsec_descriptor_handle * dh)738 mvxpsec_dma_putdesc(struct mvxpsec_softc *sc,
739     struct mvxpsec_descriptor_handle *dh)
740 {
741 	/* must called with sc->sc_dma_mtx held */
742 	KASSERT(mutex_owned(&sc->sc_dma_mtx));
743 
744 	sc->sc_desc_ring_cons++;
745 	if (sc->sc_desc_ring_cons >= sc->sc_desc_ring_size)
746 		sc->sc_desc_ring_cons -= sc->sc_desc_ring_size;
747 
748 	return;
749 }
750 
751 /*
752  * Setup DMA Descriptor
753  * copy from 'src' to 'dst' by 'size' bytes.
754  * 'src' or 'dst' must be SRAM address.
755  */
756 INLINE void
mvxpsec_dma_setup(struct mvxpsec_descriptor_handle * dh,uint32_t dst,uint32_t src,uint32_t size)757 mvxpsec_dma_setup(struct mvxpsec_descriptor_handle *dh,
758     uint32_t dst, uint32_t src, uint32_t size)
759 {
760 	struct mvxpsec_descriptor *desc;
761 
762 	desc = (struct mvxpsec_descriptor *)dh->_desc;
763 
764 	desc->tdma_dst = dst;
765 	desc->tdma_src = src;
766 	desc->tdma_word0 = size;
767 	if (size != 0)
768 		desc->tdma_word0 |= MV_TDMA_CNT_OWN;
769 	/* size == 0 is owned by ACC, not TDMA */
770 
771 #ifdef MVXPSEC_DEBUG
772 	mvxpsec_dump_dmaq(dh);
773 #endif
774 }
775 
776 /*
777  * Concat 2 DMA
778  */
779 INLINE void
mvxpsec_dma_cat(struct mvxpsec_softc * sc,struct mvxpsec_descriptor_handle * dh1,struct mvxpsec_descriptor_handle * dh2)780 mvxpsec_dma_cat(struct mvxpsec_softc *sc,
781     struct mvxpsec_descriptor_handle *dh1,
782     struct mvxpsec_descriptor_handle *dh2)
783 {
784 	((struct mvxpsec_descriptor*)dh1->_desc)->tdma_nxt = dh2->phys_addr;
785 	MVXPSEC_SYNC_DESC(sc, dh1, BUS_DMASYNC_PREWRITE);
786 }
787 
788 /*
789  * Schedule DMA Copy
790  */
791 INLINE int
mvxpsec_dma_copy0(struct mvxpsec_softc * sc,mvxpsec_dma_ring * r,uint32_t dst,uint32_t src,uint32_t size)792 mvxpsec_dma_copy0(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r,
793     uint32_t dst, uint32_t src, uint32_t size)
794 {
795 	struct mvxpsec_descriptor_handle *dh;
796 
797 	dh = mvxpsec_dma_getdesc(sc);
798 	if (dh == NULL) {
799 		log(LOG_ERR, "%s: descriptor full\n", __func__);
800 		return -1;
801 	}
802 
803 	mvxpsec_dma_setup(dh, dst, src, size);
804 	if (r->dma_head == NULL) {
805 		r->dma_head = dh;
806 		r->dma_last = dh;
807 		r->dma_size = 1;
808 	}
809 	else {
810 		mvxpsec_dma_cat(sc, r->dma_last, dh);
811 		r->dma_last = dh;
812 		r->dma_size++;
813 	}
814 
815 	return 0;
816 }
817 
818 INLINE int
mvxpsec_dma_copy(struct mvxpsec_softc * sc,mvxpsec_dma_ring * r,uint32_t dst,uint32_t src,uint32_t size)819 mvxpsec_dma_copy(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r,
820     uint32_t dst, uint32_t src, uint32_t size)
821 {
822 	if (size == 0) /* 0 is very special descriptor */
823 		return 0;
824 
825 	return mvxpsec_dma_copy0(sc, r, dst, src, size);
826 }
827 
828 /*
829  * Schedule ACC Activate
830  */
831 INLINE int
mvxpsec_dma_acc_activate(struct mvxpsec_softc * sc,mvxpsec_dma_ring * r)832 mvxpsec_dma_acc_activate(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r)
833 {
834 	return mvxpsec_dma_copy0(sc, r, 0, 0, 0);
835 }
836 
837 /*
838  * Finalize DMA setup
839  */
840 INLINE void
mvxpsec_dma_finalize(struct mvxpsec_softc * sc,mvxpsec_dma_ring * r)841 mvxpsec_dma_finalize(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r)
842 {
843 	struct mvxpsec_descriptor_handle *dh;
844 
845 	dh = r->dma_last;
846 	((struct mvxpsec_descriptor*)dh->_desc)->tdma_nxt = 0;
847 	MVXPSEC_SYNC_DESC(sc, dh, BUS_DMASYNC_PREWRITE);
848 }
849 
850 /*
851  * Free entire DMA ring
852  */
853 INLINE void
mvxpsec_dma_free(struct mvxpsec_softc * sc,mvxpsec_dma_ring * r)854 mvxpsec_dma_free(struct mvxpsec_softc *sc, mvxpsec_dma_ring *r)
855 {
856 	sc->sc_desc_ring_cons += r->dma_size;
857 	if (sc->sc_desc_ring_cons >= sc->sc_desc_ring_size)
858 		sc->sc_desc_ring_cons -= sc->sc_desc_ring_size;
859 	r->dma_head = NULL;
860 	r->dma_last = NULL;
861 	r->dma_size = 0;
862 }
863 
864 /*
865  * create DMA descriptor chain for the packet
866  */
867 INLINE int
mvxpsec_dma_copy_packet(struct mvxpsec_softc * sc,struct mvxpsec_packet * mv_p)868 mvxpsec_dma_copy_packet(struct mvxpsec_softc *sc, struct mvxpsec_packet *mv_p)
869 {
870 	struct mvxpsec_session *mv_s = mv_p->mv_s;
871 	uint32_t src, dst, len;
872 	uint32_t pkt_off, pkt_off_r;
873 	int err;
874 	int i;
875 
876 	/* must called with sc->sc_dma_mtx held */
877 	KASSERT(mutex_owned(&sc->sc_dma_mtx));
878 
879 	/*
880 	 * set offset for mem->device copy
881 	 *
882 	 * typical packet image:
883 	 *
884 	 *   enc_ivoff
885 	 *   mac_off
886 	 *   |
887 	 *   |    enc_off
888 	 *   |    |
889 	 *   v    v
890 	 *   +----+--------...
891 	 *   |IV  |DATA
892 	 *   +----+--------...
893 	 */
894 	pkt_off = 0;
895 	if (mv_p->mac_off > 0)
896 		pkt_off = mv_p->mac_off;
897 	if ((mv_p->flags & CRP_EXT_IV) == 0 && pkt_off > mv_p->enc_ivoff)
898 		pkt_off = mv_p->enc_ivoff;
899 	if (mv_p->enc_off > 0 && pkt_off > mv_p->enc_off)
900 		pkt_off = mv_p->enc_off;
901 	pkt_off_r = pkt_off;
902 
903 	/* make DMA descriptors to copy packet header: DRAM -> SRAM */
904 	dst = (uint32_t)MVXPSEC_SRAM_PKT_HDR_PA(sc);
905 	src = (uint32_t)mv_p->pkt_header_map->dm_segs[0].ds_addr;
906 	len = sizeof(mv_p->pkt_header);
907 	err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
908 	if (__predict_false(err))
909 		return err;
910 
911 	/*
912 	 * make DMA descriptors to copy session header: DRAM -> SRAM
913 	 * we can reuse session header on SRAM if session is not changed.
914 	 */
915 	if (sc->sc_last_session != mv_s) {
916 		dst = (uint32_t)MVXPSEC_SRAM_SESS_HDR_PA(sc);
917 		src = (uint32_t)mv_s->session_header_map->dm_segs[0].ds_addr;
918 		len = sizeof(mv_s->session_header);
919 		err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
920 		if (__predict_false(err))
921 			return err;
922 		sc->sc_last_session = mv_s;
923 	}
924 
925 	/* make DMA descriptor to copy payload data: DRAM -> SRAM */
926 	dst = MVXPSEC_SRAM_PAYLOAD_PA(sc, 0);
927 	for (i = 0; i < mv_p->data_map->dm_nsegs; i++) {
928 		src = mv_p->data_map->dm_segs[i].ds_addr;
929 		len = mv_p->data_map->dm_segs[i].ds_len;
930 		if (pkt_off) {
931 			if (len <= pkt_off) {
932 				/* ignore the segment */
933 				dst += len;
934 				pkt_off -= len;
935 				continue;
936 			}
937 			/* copy from the middle of the segment */
938 			dst += pkt_off;
939 			src += pkt_off;
940 			len -= pkt_off;
941 			pkt_off = 0;
942 		}
943 		err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
944 		if (__predict_false(err))
945 			return err;
946 		dst += len;
947 	}
948 
949 	/* make special descriptor to activate security accelerator */
950 	err = mvxpsec_dma_acc_activate(sc, &mv_p->dma_ring);
951 	if (__predict_false(err))
952 		return err;
953 
954 	/* make DMA descriptors to copy payload: SRAM -> DRAM */
955 	src = (uint32_t)MVXPSEC_SRAM_PAYLOAD_PA(sc, 0);
956 	for (i = 0; i < mv_p->data_map->dm_nsegs; i++) {
957 		dst = (uint32_t)mv_p->data_map->dm_segs[i].ds_addr;
958 		len = (uint32_t)mv_p->data_map->dm_segs[i].ds_len;
959 		if (pkt_off_r) {
960 			if (len <= pkt_off_r) {
961 				/* ignore the segment */
962 				src += len;
963 				pkt_off_r -= len;
964 				continue;
965 			}
966 			/* copy from the middle of the segment */
967 			src += pkt_off_r;
968 			dst += pkt_off_r;
969 			len -= pkt_off_r;
970 			pkt_off_r = 0;
971 		}
972 		err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
973 		if (__predict_false(err))
974 			return err;
975 		src += len;
976 	}
977 	KASSERT(pkt_off == 0);
978 	KASSERT(pkt_off_r == 0);
979 
980 	/*
981 	 * make DMA descriptors to copy packet header: SRAM->DRAM
982 	 * if IV is present in the payload, no need to copy.
983 	 */
984 	if (mv_p->flags & CRP_EXT_IV) {
985 		dst = (uint32_t)mv_p->pkt_header_map->dm_segs[0].ds_addr;
986 		src = (uint32_t)MVXPSEC_SRAM_PKT_HDR_PA(sc);
987 		len = sizeof(mv_p->pkt_header);
988 		err = mvxpsec_dma_copy(sc, &mv_p->dma_ring, dst, src, len);
989 		if (__predict_false(err))
990 			return err;
991 	}
992 
993 	return 0;
994 }
995 
996 INLINE int
mvxpsec_dma_sync_packet(struct mvxpsec_softc * sc,struct mvxpsec_packet * mv_p)997 mvxpsec_dma_sync_packet(struct mvxpsec_softc *sc, struct mvxpsec_packet *mv_p)
998 {
999 	/* sync packet header */
1000 	bus_dmamap_sync(sc->sc_dmat,
1001 	    mv_p->pkt_header_map, 0, sizeof(mv_p->pkt_header),
1002 	    BUS_DMASYNC_PREWRITE);
1003 
1004 #ifdef MVXPSEC_DEBUG
1005 	/* sync session header */
1006 	if (mvxpsec_debug != 0) {
1007 		struct mvxpsec_session *mv_s = mv_p->mv_s;
1008 
1009 		/* only debug code touch the session header after newsession */
1010 		bus_dmamap_sync(sc->sc_dmat,
1011 		    mv_s->session_header_map,
1012 		    0, sizeof(mv_s->session_header),
1013 		    BUS_DMASYNC_PREWRITE);
1014 	}
1015 #endif
1016 
1017 	/* sync packet buffer */
1018 	bus_dmamap_sync(sc->sc_dmat,
1019 	    mv_p->data_map, 0, mv_p->data_len,
1020 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1021 
1022 	return 0;
1023 }
1024 
1025 /*
1026  * Initialize MVXPSEC Internal SRAM
1027  *
1028  * - must be called after DMA initialization.
1029  * - make VM mapping for SRAM area on MBus.
1030  */
1031 STATIC int
mvxpsec_init_sram(struct mvxpsec_softc * sc)1032 mvxpsec_init_sram(struct mvxpsec_softc *sc)
1033 {
1034 	uint32_t tag, target, attr, base, size;
1035 	vaddr_t va;
1036 	int window;
1037 
1038 	switch (device_unit(sc->sc_dev)) {
1039 	case 0:
1040 		tag = ARMADAXP_TAG_CRYPT0;
1041 		break;
1042 	case 1:
1043 		tag = ARMADAXP_TAG_CRYPT1;
1044 		break;
1045 	default:
1046 		aprint_error_dev(sc->sc_dev, "no internal SRAM mapping\n");
1047 		return -1;
1048 	}
1049 
1050 	window = mvsoc_target(tag, &target, &attr, &base, &size);
1051 	if (window >= nwindow) {
1052 		aprint_error_dev(sc->sc_dev, "no internal SRAM mapping\n");
1053 		return -1;
1054 	}
1055 
1056 	if (sizeof(struct mvxpsec_crypt_sram) > size) {
1057 		aprint_error_dev(sc->sc_dev,
1058 		    "SRAM Data Structure Excceeds SRAM window size.\n");
1059 		return -1;
1060 	}
1061 
1062 	aprint_normal_dev(sc->sc_dev,
1063 	    "internal SRAM window at 0x%08x-0x%08x",
1064 	    base, base + size - 1);
1065 	sc->sc_sram_pa = base;
1066 
1067 	/* get vmspace to read/write device internal SRAM */
1068 	va = uvm_km_alloc(kernel_map, PAGE_SIZE, PAGE_SIZE,
1069 			UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
1070 	if (va == 0) {
1071 		aprint_error_dev(sc->sc_dev, "cannot map SRAM window\n");
1072 		sc->sc_sram_va = NULL;
1073 		aprint_normal("\n");
1074 		return 0;
1075 	}
1076 	/* XXX: not working. PMAP_NOCACHE is not affected? */
1077 	pmap_kenter_pa(va, base, VM_PROT_READ|VM_PROT_WRITE, PMAP_NOCACHE);
1078 	pmap_update(pmap_kernel());
1079 	sc->sc_sram_va = (void *)va;
1080 	aprint_normal(" va %p\n", sc->sc_sram_va);
1081 	memset(sc->sc_sram_va, 0xff, MV_ACC_SRAM_SIZE);
1082 
1083 	return 0;
1084 }
1085 
1086 /*
1087  * Initialize TDMA engine.
1088  */
1089 STATIC int
mvxpsec_init_dma(struct mvxpsec_softc * sc,struct marvell_attach_args * mva)1090 mvxpsec_init_dma(struct mvxpsec_softc *sc, struct marvell_attach_args *mva)
1091 {
1092 	struct mvxpsec_descriptor_handle *dh;
1093 	uint8_t *va;
1094 	paddr_t pa;
1095 	off_t va_off, pa_off;
1096 	int i, n, seg, ndh;
1097 
1098 	/* Init Deviced's control parameters (disabled yet) */
1099 	MVXPSEC_WRITE(sc, MV_TDMA_CONTROL, MV_TDMA_DEFAULT_CONTROL);
1100 
1101 	/* Init Software DMA Handlers */
1102 	sc->sc_devmem_desc =
1103 	    mvxpsec_alloc_devmem(sc, 0, PAGE_SIZE * MVXPSEC_DMA_DESC_PAGES);
1104 	ndh = (PAGE_SIZE / sizeof(struct mvxpsec_descriptor))
1105 	    * MVXPSEC_DMA_DESC_PAGES;
1106 	sc->sc_desc_ring =
1107 	    kmem_alloc(sizeof(struct mvxpsec_descriptor_handle) * ndh,
1108 	        KM_SLEEP);
1109 	aprint_normal_dev(sc->sc_dev, "%d DMA handles in %zu bytes array\n",
1110 	    ndh, sizeof(struct mvxpsec_descriptor_handle) * ndh);
1111 
1112 	ndh = 0;
1113 	for (seg = 0; seg < devmem_nseg(sc->sc_devmem_desc); seg++) {
1114 		va = devmem_va(sc->sc_devmem_desc);
1115 		pa = devmem_pa(sc->sc_devmem_desc, seg);
1116 		n = devmem_palen(sc->sc_devmem_desc, seg) /
1117 		       	sizeof(struct mvxpsec_descriptor);
1118 		va_off = (PAGE_SIZE * seg);
1119 		pa_off = 0;
1120 		for (i = 0; i < n; i++) {
1121 			dh = &sc->sc_desc_ring[ndh];
1122 			dh->map = devmem_map(sc->sc_devmem_desc);
1123 			dh->off = va_off + pa_off;
1124 			dh->_desc = (void *)(va + va_off + pa_off);
1125 			dh->phys_addr = pa + pa_off;
1126 			pa_off += sizeof(struct mvxpsec_descriptor);
1127 			ndh++;
1128 		}
1129 	}
1130 	sc->sc_desc_ring_size = ndh;
1131 	sc->sc_desc_ring_prod = 0;
1132 	sc->sc_desc_ring_cons = sc->sc_desc_ring_size - 1;
1133 
1134 	return 0;
1135 }
1136 
1137 /*
1138  * Wait for TDMA controller become idle
1139  */
1140 INLINE int
mvxpsec_dma_wait(struct mvxpsec_softc * sc)1141 mvxpsec_dma_wait(struct mvxpsec_softc *sc)
1142 {
1143 	int retry = 0;
1144 
1145 	while (MVXPSEC_READ(sc, MV_TDMA_CONTROL) & MV_TDMA_CONTROL_ACT) {
1146 		delay(mvxpsec_wait_interval);
1147 		if (retry++ >= mvxpsec_wait_retry)
1148 			return -1;
1149 	}
1150 	return 0;
1151 }
1152 
1153 /*
1154  * Wait for Security Accelerator become idle
1155  */
1156 INLINE int
mvxpsec_acc_wait(struct mvxpsec_softc * sc)1157 mvxpsec_acc_wait(struct mvxpsec_softc *sc)
1158 {
1159 	int retry = 0;
1160 
1161 	while (MVXPSEC_READ(sc, MV_ACC_COMMAND) & MV_ACC_COMMAND_ACT) {
1162 		delay(mvxpsec_wait_interval);
1163 		if (++retry >= mvxpsec_wait_retry)
1164 			return -1;
1165 	}
1166 	return 0;
1167 }
1168 
1169 /*
1170  * Entry of interrupt handler
1171  *
1172  * register this to kernel via marvell_intr_establish()
1173  */
1174 int
mvxpsec_intr(void * arg)1175 mvxpsec_intr(void *arg)
1176 {
1177 	struct mvxpsec_softc *sc = arg;
1178 	uint32_t v;
1179 
1180 	/* IPL_NET */
1181 	while ((v = mvxpsec_intr_ack(sc)) != 0) {
1182 		mvxpsec_intr_cnt(sc, v);
1183 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "MVXPSEC Intr 0x%08x\n", v);
1184 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "%s\n", s_xpsecintr(v));
1185 #ifdef MVXPSEC_DEBUG
1186 		mvxpsec_dump_reg(sc);
1187 #endif
1188 
1189 		/* call high-level handlers */
1190 		if (v & MVXPSEC_INT_ACCTDMA)
1191 			mvxpsec_done(sc);
1192 	}
1193 
1194 	return 0;
1195 }
1196 
1197 INLINE void
mvxpsec_intr_cleanup(struct mvxpsec_softc * sc)1198 mvxpsec_intr_cleanup(struct mvxpsec_softc *sc)
1199 {
1200 	struct mvxpsec_packet *mv_p;
1201 
1202 	/* must called with sc->sc_dma_mtx held */
1203 	KASSERT(mutex_owned(&sc->sc_dma_mtx));
1204 
1205 	/*
1206 	 * there is only one intr for run_queue.
1207 	 * no one touch sc_run_queue.
1208 	 */
1209 	SIMPLEQ_FOREACH(mv_p, &sc->sc_run_queue, queue)
1210 		mvxpsec_dma_free(sc, &mv_p->dma_ring);
1211 }
1212 
1213 /*
1214  * Acknowledge to interrupt
1215  *
1216  * read cause bits, clear it, and return it.
1217  * NOTE: multiple cause bits may be returned at once.
1218  */
1219 STATIC uint32_t
mvxpsec_intr_ack(struct mvxpsec_softc * sc)1220 mvxpsec_intr_ack(struct mvxpsec_softc *sc)
1221 {
1222 	uint32_t reg;
1223 
1224 	reg  = MVXPSEC_READ(sc, MVXPSEC_INT_CAUSE);
1225 	reg &= MVXPSEC_DEFAULT_INT;
1226 	MVXPSEC_WRITE(sc, MVXPSEC_INT_CAUSE, ~reg);
1227 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "Int: %s\n", s_xpsecintr(reg));
1228 
1229 	return reg;
1230 }
1231 
1232 /*
1233  * Entry of TDMA error interrupt handler
1234  *
1235  * register this to kernel via marvell_intr_establish()
1236  */
1237 int
mvxpsec_eintr(void * arg)1238 mvxpsec_eintr(void *arg)
1239 {
1240 	struct mvxpsec_softc *sc = arg;
1241 	uint32_t err;
1242 
1243 	/* IPL_NET */
1244 again:
1245 	err = mvxpsec_eintr_ack(sc);
1246 	if (err == 0)
1247 		goto done;
1248 
1249 	log(LOG_ERR, "%s: DMA Error Interrupt: %s\n", __func__,
1250 	    s_errreg(err));
1251 #ifdef MVXPSEC_DEBUG
1252 	mvxpsec_dump_reg(sc);
1253 #endif
1254 
1255 	goto again;
1256 done:
1257 	return 0;
1258 }
1259 
1260 /*
1261  * Acknowledge to TDMA error interrupt
1262  *
1263  * read cause bits, clear it, and return it.
1264  * NOTE: multiple cause bits may be returned at once.
1265  */
1266 STATIC uint32_t
mvxpsec_eintr_ack(struct mvxpsec_softc * sc)1267 mvxpsec_eintr_ack(struct mvxpsec_softc *sc)
1268 {
1269 	uint32_t reg;
1270 
1271 	reg  = MVXPSEC_READ(sc, MV_TDMA_ERR_CAUSE);
1272 	reg &= MVXPSEC_DEFAULT_ERR;
1273 	MVXPSEC_WRITE(sc, MV_TDMA_ERR_CAUSE, ~reg);
1274 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR, "Int: %s\n", s_xpsecintr(reg));
1275 
1276 	return reg;
1277 }
1278 
1279 /*
1280  * Interrupt statistics
1281  *
1282  * this is NOT a statistics of how many times the events 'occurred'.
1283  * this ONLY means how many times the events 'handled'.
1284  */
1285 INLINE void
mvxpsec_intr_cnt(struct mvxpsec_softc * sc,int cause)1286 mvxpsec_intr_cnt(struct mvxpsec_softc *sc, int cause)
1287 {
1288 	MVXPSEC_EVCNT_INCR(sc, intr_all);
1289 	if (cause & MVXPSEC_INT_AUTH)
1290 		MVXPSEC_EVCNT_INCR(sc, intr_auth);
1291 	if (cause & MVXPSEC_INT_DES)
1292 		MVXPSEC_EVCNT_INCR(sc, intr_des);
1293 	if (cause & MVXPSEC_INT_AES_ENC)
1294 		MVXPSEC_EVCNT_INCR(sc, intr_aes_enc);
1295 	if (cause & MVXPSEC_INT_AES_DEC)
1296 		MVXPSEC_EVCNT_INCR(sc, intr_aes_dec);
1297 	if (cause & MVXPSEC_INT_ENC)
1298 		MVXPSEC_EVCNT_INCR(sc, intr_enc);
1299 	if (cause & MVXPSEC_INT_SA)
1300 		MVXPSEC_EVCNT_INCR(sc, intr_sa);
1301 	if (cause & MVXPSEC_INT_ACCTDMA)
1302 		MVXPSEC_EVCNT_INCR(sc, intr_acctdma);
1303 	if (cause & MVXPSEC_INT_TDMA_COMP)
1304 		MVXPSEC_EVCNT_INCR(sc, intr_comp);
1305 	if (cause & MVXPSEC_INT_TDMA_OWN)
1306 		MVXPSEC_EVCNT_INCR(sc, intr_own);
1307 	if (cause & MVXPSEC_INT_ACCTDMA_CONT)
1308 		MVXPSEC_EVCNT_INCR(sc, intr_acctdma_cont);
1309 }
1310 
1311 /*
1312  * Setup MVXPSEC header structure.
1313  *
1314  * the header contains descriptor of security accelerator,
1315  * key material of ciphers, iv of ciphers and macs, ...
1316  *
1317  * the header is transferred to MVXPSEC Internal SRAM by TDMA,
1318  * and parsed by MVXPSEC H/W.
1319  */
1320 STATIC int
mvxpsec_header_finalize(struct mvxpsec_packet * mv_p)1321 mvxpsec_header_finalize(struct mvxpsec_packet *mv_p)
1322 {
1323 	struct mvxpsec_acc_descriptor *desc = &mv_p->pkt_header.desc;
1324 	int enc_start, enc_len, iv_offset;
1325 	int mac_start, mac_len, mac_offset;
1326 
1327 	/* offset -> device address */
1328 	enc_start = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->enc_off);
1329 	enc_len = mv_p->enc_len;
1330 	if (mv_p->flags & CRP_EXT_IV)
1331 		iv_offset = mv_p->enc_ivoff;
1332 	else
1333 		iv_offset = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->enc_ivoff);
1334 	mac_start = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->mac_off);
1335 	mac_len = mv_p->mac_len;
1336 	mac_offset = MVXPSEC_SRAM_PAYLOAD_DA(mv_p->mac_dst);
1337 
1338 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1339 	    "PAYLOAD at 0x%08x\n", (int)MVXPSEC_SRAM_PAYLOAD_OFF);
1340 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1341 	    "ENC from 0x%08x\n", enc_start);
1342 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1343 	    "MAC from 0x%08x\n", mac_start);
1344 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1345 	    "MAC to 0x%08x\n", mac_offset);
1346 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1347 	    "ENC IV at 0x%08x\n", iv_offset);
1348 
1349 	/* setup device addresses in Security Accelerator Descriptors */
1350 	desc->acc_encdata = MV_ACC_DESC_ENC_DATA(enc_start, enc_start);
1351 	desc->acc_enclen = MV_ACC_DESC_ENC_LEN(enc_len);
1352 	if (desc->acc_config & MV_ACC_CRYPTO_DECRYPT)
1353 		desc->acc_enckey =
1354 		    MV_ACC_DESC_ENC_KEY(MVXPSEC_SRAM_KEY_D_DA);
1355 	else
1356 		desc->acc_enckey =
1357 		    MV_ACC_DESC_ENC_KEY(MVXPSEC_SRAM_KEY_DA);
1358 	desc->acc_enciv =
1359 	    MV_ACC_DESC_ENC_IV(MVXPSEC_SRAM_IV_WORK_DA, iv_offset);
1360 
1361 	desc->acc_macsrc = MV_ACC_DESC_MAC_SRC(mac_start, mac_len);
1362 	desc->acc_macdst = MV_ACC_DESC_MAC_DST(mac_offset, mac_len);
1363 	desc->acc_maciv =
1364 	    MV_ACC_DESC_MAC_IV(MVXPSEC_SRAM_MIV_IN_DA,
1365 	        MVXPSEC_SRAM_MIV_OUT_DA);
1366 
1367 	return 0;
1368 }
1369 
1370 /*
1371  * constractor of session structure.
1372  *
1373  * this constrator will be called by pool_cache framework.
1374  */
1375 STATIC int
mvxpsec_session_ctor(void * arg,void * obj,int flags)1376 mvxpsec_session_ctor(void *arg, void *obj, int flags)
1377 {
1378 	struct mvxpsec_softc *sc = arg;
1379 	struct mvxpsec_session *mv_s = obj;
1380 
1381 	/* pool is owned by softc */
1382 	mv_s->sc = sc;
1383 
1384 	/* Create and load DMA map for session header */
1385 	mv_s->session_header_map = 0;
1386 	if (bus_dmamap_create(sc->sc_dmat,
1387 	    sizeof(mv_s->session_header), 1,
1388 	    sizeof(mv_s->session_header), 0,
1389 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1390 	    &mv_s->session_header_map)) {
1391 		log(LOG_ERR, "%s: cannot create DMA map\n", __func__);
1392 		goto fail;
1393 	}
1394 	if (bus_dmamap_load(sc->sc_dmat, mv_s->session_header_map,
1395 	    &mv_s->session_header, sizeof(mv_s->session_header),
1396 	    NULL, BUS_DMA_NOWAIT)) {
1397 		log(LOG_ERR, "%s: cannot load header\n", __func__);
1398 		goto fail;
1399 	}
1400 
1401 	return 0;
1402 fail:
1403 	if (mv_s->session_header_map)
1404 		bus_dmamap_destroy(sc->sc_dmat, mv_s->session_header_map);
1405 	return ENOMEM;
1406 }
1407 
1408 /*
1409  * destractor of session structure.
1410  *
1411  * this destrator will be called by pool_cache framework.
1412  */
1413 STATIC void
mvxpsec_session_dtor(void * arg,void * obj)1414 mvxpsec_session_dtor(void *arg, void *obj)
1415 {
1416 	struct mvxpsec_softc *sc = arg;
1417 	struct mvxpsec_session *mv_s = obj;
1418 
1419 	if (mv_s->sc != sc)
1420 		panic("inconsitent context\n");
1421 
1422 	bus_dmamap_destroy(sc->sc_dmat, mv_s->session_header_map);
1423 }
1424 
1425 /*
1426  * constructor of packet structure.
1427  */
1428 STATIC int
mvxpsec_packet_ctor(void * arg,void * obj,int flags)1429 mvxpsec_packet_ctor(void *arg, void *obj, int flags)
1430 {
1431 	struct mvxpsec_softc *sc = arg;
1432 	struct mvxpsec_packet *mv_p = obj;
1433 
1434 	mv_p->dma_ring.dma_head = NULL;
1435 	mv_p->dma_ring.dma_last = NULL;
1436 	mv_p->dma_ring.dma_size = 0;
1437 
1438 	/* Create and load DMA map for packet header */
1439 	mv_p->pkt_header_map = 0;
1440 	if (bus_dmamap_create(sc->sc_dmat,
1441 	    sizeof(mv_p->pkt_header), 1, sizeof(mv_p->pkt_header), 0,
1442 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1443 	    &mv_p->pkt_header_map)) {
1444 		log(LOG_ERR, "%s: cannot create DMA map\n", __func__);
1445 		goto fail;
1446 	}
1447 	if (bus_dmamap_load(sc->sc_dmat, mv_p->pkt_header_map,
1448 	    &mv_p->pkt_header, sizeof(mv_p->pkt_header),
1449 	    NULL, BUS_DMA_NOWAIT)) {
1450 		log(LOG_ERR, "%s: cannot load header\n", __func__);
1451 		goto fail;
1452 	}
1453 
1454 	/* Create DMA map for session data. */
1455 	mv_p->data_map = 0;
1456 	if (bus_dmamap_create(sc->sc_dmat,
1457 	    MVXPSEC_DMA_MAX_SIZE, MVXPSEC_DMA_MAX_SEGS, MVXPSEC_DMA_MAX_SIZE,
1458 	    0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mv_p->data_map)) {
1459 		log(LOG_ERR, "%s: cannot create DMA map\n", __func__);
1460 		goto fail;
1461 	}
1462 
1463 	return 0;
1464 fail:
1465 	if (mv_p->pkt_header_map)
1466 		bus_dmamap_destroy(sc->sc_dmat, mv_p->pkt_header_map);
1467 	if (mv_p->data_map)
1468 		bus_dmamap_destroy(sc->sc_dmat, mv_p->data_map);
1469 	return ENOMEM;
1470 }
1471 
1472 /*
1473  * destractor of packet structure.
1474  */
1475 STATIC void
mvxpsec_packet_dtor(void * arg,void * obj)1476 mvxpsec_packet_dtor(void *arg, void *obj)
1477 {
1478 	struct mvxpsec_softc *sc = arg;
1479 	struct mvxpsec_packet *mv_p = obj;
1480 
1481 	mutex_enter(&sc->sc_dma_mtx);
1482 	mvxpsec_dma_free(sc, &mv_p->dma_ring);
1483 	mutex_exit(&sc->sc_dma_mtx);
1484 	bus_dmamap_destroy(sc->sc_dmat, mv_p->pkt_header_map);
1485 	bus_dmamap_destroy(sc->sc_dmat, mv_p->data_map);
1486 }
1487 
1488 /*
1489  * allocate new session structure.
1490  */
1491 STATIC struct mvxpsec_session *
mvxpsec_session_alloc(struct mvxpsec_softc * sc)1492 mvxpsec_session_alloc(struct mvxpsec_softc *sc)
1493 {
1494 	struct mvxpsec_session *mv_s;
1495 
1496 	mv_s = pool_cache_get(sc->sc_session_pool, PR_NOWAIT);
1497 	if (mv_s == NULL) {
1498 		log(LOG_ERR, "%s: cannot allocate memory\n", __func__);
1499 		return NULL;
1500 	}
1501 	mv_s->refs = 1; /* 0 means session is already invalid */
1502 	mv_s->sflags = 0;
1503 
1504 	return mv_s;
1505 }
1506 
1507 /*
1508  * deallocate session structure.
1509  */
1510 STATIC void
mvxpsec_session_dealloc(struct mvxpsec_session * mv_s)1511 mvxpsec_session_dealloc(struct mvxpsec_session *mv_s)
1512 {
1513 	struct mvxpsec_softc *sc = mv_s->sc;
1514 
1515 	mv_s->sflags |= DELETED;
1516 	mvxpsec_session_unref(mv_s);
1517 	crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
1518 
1519 	return;
1520 }
1521 
1522 STATIC int
mvxpsec_session_ref(struct mvxpsec_session * mv_s)1523 mvxpsec_session_ref(struct mvxpsec_session *mv_s)
1524 {
1525 	uint32_t refs;
1526 
1527 	if (mv_s->sflags & DELETED) {
1528 		log(LOG_ERR,
1529 		    "%s: session is already deleted.\n", __func__);
1530 		return -1;
1531 	}
1532 
1533 	refs = atomic_inc_32_nv(&mv_s->refs);
1534 	if (refs == 1) {
1535 		/*
1536 		 * a session with refs == 0 is
1537 		 * already invalidated. revert it.
1538 		 * XXX: use CAS ?
1539 		 */
1540 		atomic_dec_32(&mv_s->refs);
1541 		log(LOG_ERR,
1542 		    "%s: session is already invalidated.\n", __func__);
1543 		return -1;
1544 	}
1545 
1546 	return 0;
1547 }
1548 
1549 STATIC void
mvxpsec_session_unref(struct mvxpsec_session * mv_s)1550 mvxpsec_session_unref(struct mvxpsec_session *mv_s)
1551 {
1552 	uint32_t refs;
1553 
1554 	membar_release();
1555 	refs = atomic_dec_32_nv(&mv_s->refs);
1556 	if (refs == 0) {
1557 		membar_acquire();
1558 		pool_cache_put(mv_s->sc->sc_session_pool, mv_s);
1559 	}
1560 }
1561 
1562 /*
1563  * look for session is exist or not
1564  */
1565 INLINE struct mvxpsec_session *
mvxpsec_session_lookup(struct mvxpsec_softc * sc,int sid)1566 mvxpsec_session_lookup(struct mvxpsec_softc *sc, int sid)
1567 {
1568 	struct mvxpsec_session *mv_s;
1569 	int session;
1570 
1571 	/* must called sc->sc_session_mtx held */
1572 	KASSERT(mutex_owned(&sc->sc_session_mtx));
1573 
1574 	session = MVXPSEC_SESSION(sid);
1575 	if (__predict_false(session > MVXPSEC_MAX_SESSIONS)) {
1576 		log(LOG_ERR, "%s: session number too large %d\n",
1577 		    __func__, session);
1578 		return NULL;
1579 	}
1580 	if (__predict_false( (mv_s = sc->sc_sessions[session]) == NULL)) {
1581 		log(LOG_ERR, "%s: invalid session %d\n",
1582 		    __func__, session);
1583 		return NULL;
1584 	}
1585 
1586 	KASSERT(mv_s->sid == session);
1587 
1588 	return mv_s;
1589 }
1590 
1591 /*
1592  * allocation new packet structure.
1593  */
1594 STATIC struct mvxpsec_packet *
mvxpsec_packet_alloc(struct mvxpsec_session * mv_s)1595 mvxpsec_packet_alloc(struct mvxpsec_session *mv_s)
1596 {
1597 	struct mvxpsec_softc *sc = mv_s->sc;
1598 	struct mvxpsec_packet *mv_p;
1599 
1600 	/* must be called mv_queue_mtx held. */
1601 	KASSERT(mutex_owned(&sc->sc_queue_mtx));
1602 	/* must be called mv_session_mtx held. */
1603 	KASSERT(mutex_owned(&sc->sc_session_mtx));
1604 
1605 	if (mvxpsec_session_ref(mv_s) < 0) {
1606 		log(LOG_ERR, "%s: invalid session.\n", __func__);
1607 		return NULL;
1608 	}
1609 
1610 	if ( (mv_p = SLIST_FIRST(&sc->sc_free_list)) != NULL) {
1611 		SLIST_REMOVE_HEAD(&sc->sc_free_list, free_list);
1612 		sc->sc_free_qlen--;
1613 	}
1614 	else {
1615 		mv_p = pool_cache_get(sc->sc_packet_pool, PR_NOWAIT);
1616 		if (mv_p == NULL) {
1617 			log(LOG_ERR, "%s: cannot allocate memory\n",
1618 			    __func__);
1619 			mvxpsec_session_unref(mv_s);
1620 			return NULL;
1621 		}
1622 	}
1623 	mv_p->mv_s = mv_s;
1624 	mv_p->flags = 0;
1625 	mv_p->data_ptr = NULL;
1626 
1627 	return mv_p;
1628 }
1629 
1630 /*
1631  * free packet structure.
1632  */
1633 STATIC void
mvxpsec_packet_dealloc(struct mvxpsec_packet * mv_p)1634 mvxpsec_packet_dealloc(struct mvxpsec_packet *mv_p)
1635 {
1636 	struct mvxpsec_session *mv_s = mv_p->mv_s;
1637 	struct mvxpsec_softc *sc = mv_s->sc;
1638 
1639 	/* must called with sc->sc_queue_mtx held */
1640 	KASSERT(mutex_owned(&sc->sc_queue_mtx));
1641 
1642 	if (mv_p->dma_ring.dma_size != 0) {
1643 		sc->sc_desc_ring_cons += mv_p->dma_ring.dma_size;
1644 	}
1645 	mv_p->dma_ring.dma_head = NULL;
1646 	mv_p->dma_ring.dma_last = NULL;
1647 	mv_p->dma_ring.dma_size = 0;
1648 
1649 	if (mv_p->data_map) {
1650 		if (mv_p->flags & RDY_DATA) {
1651 			bus_dmamap_unload(sc->sc_dmat, mv_p->data_map);
1652 			mv_p->flags &= ~RDY_DATA;
1653 		}
1654 	}
1655 
1656 	if (sc->sc_free_qlen > sc->sc_wait_qlimit)
1657 		pool_cache_put(sc->sc_packet_pool, mv_p);
1658 	else {
1659 		SLIST_INSERT_HEAD(&sc->sc_free_list, mv_p, free_list);
1660 		sc->sc_free_qlen++;
1661 	}
1662 	mvxpsec_session_unref(mv_s);
1663 }
1664 
1665 INLINE void
mvxpsec_packet_enqueue(struct mvxpsec_packet * mv_p)1666 mvxpsec_packet_enqueue(struct mvxpsec_packet *mv_p)
1667 {
1668 	struct mvxpsec_softc *sc = mv_p->mv_s->sc;
1669 	struct mvxpsec_packet *last_packet;
1670 	struct mvxpsec_descriptor_handle *cur_dma, *prev_dma;
1671 
1672 	/* must called with sc->sc_queue_mtx held */
1673 	KASSERT(mutex_owned(&sc->sc_queue_mtx));
1674 
1675 	if (sc->sc_wait_qlen == 0) {
1676 		SIMPLEQ_INSERT_TAIL(&sc->sc_wait_queue, mv_p, queue);
1677 		sc->sc_wait_qlen++;
1678 		mv_p->flags |= SETUP_DONE;
1679 		return;
1680 	}
1681 
1682 	last_packet = SIMPLEQ_LAST(&sc->sc_wait_queue, mvxpsec_packet, queue);
1683 	SIMPLEQ_INSERT_TAIL(&sc->sc_wait_queue, mv_p, queue);
1684 	sc->sc_wait_qlen++;
1685 
1686 	/* chain the DMA */
1687 	cur_dma = mv_p->dma_ring.dma_head;
1688 	prev_dma = last_packet->dma_ring.dma_last;
1689 	mvxpsec_dma_cat(sc, prev_dma, cur_dma);
1690 	mv_p->flags |= SETUP_DONE;
1691 }
1692 
1693 /*
1694  * called by interrupt handler
1695  */
1696 STATIC int
mvxpsec_done_packet(struct mvxpsec_packet * mv_p)1697 mvxpsec_done_packet(struct mvxpsec_packet *mv_p)
1698 {
1699 	struct mvxpsec_session *mv_s = mv_p->mv_s;
1700 	struct mvxpsec_softc *sc = mv_s->sc;
1701 
1702 	KASSERT((mv_p->flags & RDY_DATA));
1703 	KASSERT((mv_p->flags & SETUP_DONE));
1704 
1705 	/* unload data */
1706 	bus_dmamap_sync(sc->sc_dmat, mv_p->data_map,
1707 	    0, mv_p->data_len,
1708 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1709 	bus_dmamap_unload(sc->sc_dmat, mv_p->data_map);
1710 	mv_p->flags &= ~RDY_DATA;
1711 
1712 #ifdef MVXPSEC_DEBUG
1713 	if (mvxpsec_debug != 0) {
1714 		int s;
1715 
1716 		bus_dmamap_sync(sc->sc_dmat, mv_p->pkt_header_map,
1717 		    0, sizeof(mv_p->pkt_header),
1718 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1719 		bus_dmamap_sync(sc->sc_dmat, mv_s->session_header_map,
1720 		    0, sizeof(mv_s->session_header),
1721 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1722 
1723 		if (mvxpsec_debug & MVXPSEC_DEBUG_OPENCRYPTO) {
1724 			char buf[1500];
1725 			struct mbuf *m;
1726 			struct uio *uio;
1727 			size_t len;
1728 
1729 			switch (mv_p->data_type) {
1730 			case MVXPSEC_DATA_MBUF:
1731 				m = mv_p->data_mbuf;
1732 				len = m->m_pkthdr.len;
1733 				if (len > sizeof(buf))
1734 					len = sizeof(buf);
1735 				m_copydata(m, 0, len, buf);
1736 				break;
1737 			case MVXPSEC_DATA_UIO:
1738 				uio = mv_p->data_uio;
1739 				len = uio->uio_resid;
1740 				if (len > sizeof(buf))
1741 					len = sizeof(buf);
1742 				cuio_copydata(uio, 0, len, buf);
1743 				break;
1744 			default:
1745 				len = 0;
1746 			}
1747 			if (len > 0)
1748 				mvxpsec_dump_data(__func__, buf, len);
1749 		}
1750 
1751 		if (mvxpsec_debug & MVXPSEC_DEBUG_PAYLOAD) {
1752 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_PAYLOAD,
1753 			    "%s: session_descriptor:\n", __func__);
1754 			mvxpsec_dump_packet_desc(__func__, mv_p);
1755 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_PAYLOAD,
1756 			    "%s: session_data:\n", __func__);
1757 			mvxpsec_dump_packet_data(__func__, mv_p);
1758 		}
1759 
1760 		if (mvxpsec_debug & MVXPSEC_DEBUG_SRAM) {
1761 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_SRAM,
1762 			    "%s: SRAM\n", __func__);
1763 			mvxpsec_dump_sram(__func__, sc, 2000);
1764 		}
1765 
1766 		s = MVXPSEC_READ(sc, MV_ACC_STATUS);
1767 		if (s & MV_ACC_STATUS_MAC_ERR) {
1768 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_INTR,
1769 			    "%s: Message Authentication Failed.\n", __func__);
1770 		}
1771 	}
1772 #endif
1773 
1774 	/* copy back IV */
1775 	if (mv_p->flags & CRP_EXT_IV) {
1776 		memcpy(mv_p->ext_iv,
1777 		    &mv_p->pkt_header.crp_iv_ext, mv_p->ext_ivlen);
1778 		mv_p->ext_iv = NULL;
1779 		mv_p->ext_ivlen = 0;
1780 	}
1781 
1782 	/* notify opencrypto */
1783 	mv_p->crp->crp_etype = 0;
1784 	crypto_done(mv_p->crp);
1785 	mv_p->crp = NULL;
1786 
1787 	/* unblock driver */
1788 	mvxpsec_packet_dealloc(mv_p);
1789 	crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
1790 
1791 	MVXPSEC_EVCNT_INCR(sc, packet_ok);
1792 
1793 	return 0;
1794 }
1795 
1796 
1797 /*
1798  * Opencrypto API registration
1799  */
1800 int
mvxpsec_register(struct mvxpsec_softc * sc)1801 mvxpsec_register(struct mvxpsec_softc *sc)
1802 {
1803 	int oplen = SRAM_PAYLOAD_SIZE;
1804 	int flags = 0;
1805 	int err;
1806 
1807 	sc->sc_nsessions = 0;
1808 	sc->sc_cid = crypto_get_driverid(0);
1809 	if (sc->sc_cid < 0) {
1810 		log(LOG_ERR,
1811 		    "%s: crypto_get_driverid() failed.\n", __func__);
1812 		err = EINVAL;
1813 		goto done;
1814 	}
1815 
1816 	/* Ciphers */
1817 	err = crypto_register(sc->sc_cid, CRYPTO_DES_CBC, oplen, flags,
1818 	    mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1819 	if (err)
1820 		goto done;
1821 
1822 	err = crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, oplen, flags,
1823 	    mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1824 	if (err)
1825 		goto done;
1826 
1827 	err = crypto_register(sc->sc_cid, CRYPTO_AES_CBC, oplen, flags,
1828 	    mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1829 	if (err)
1830 		goto done;
1831 
1832 	/* MACs */
1833 	err = crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96,
1834 	    oplen, flags,
1835 	    mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1836 	if (err)
1837 		goto done;
1838 
1839 	err = crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96,
1840 	    oplen, flags,
1841 	    mvxpsec_newsession, mvxpsec_freesession, mvxpsec_dispatch, sc);
1842 	if (err)
1843 		goto done;
1844 
1845 #ifdef DEBUG
1846 	log(LOG_DEBUG,
1847 	    "%s: registered to opencrypto(max data = %d bytes)\n",
1848 	    device_xname(sc->sc_dev), oplen);
1849 #endif
1850 
1851 	err = 0;
1852 done:
1853 	return err;
1854 }
1855 
1856 /*
1857  * Create new opencrypto session
1858  *
1859  *   - register cipher key, mac key.
1860  *   - initialize mac internal state.
1861  */
1862 int
mvxpsec_newsession(void * arg,uint32_t * sidp,struct cryptoini * cri)1863 mvxpsec_newsession(void *arg, uint32_t *sidp, struct cryptoini *cri)
1864 {
1865 	struct mvxpsec_softc *sc = arg;
1866 	struct mvxpsec_session *mv_s = NULL;
1867 	struct cryptoini *c;
1868 	static int hint = 0;
1869 	int session = -1;
1870 	int sid;
1871 	int err;
1872 	int i;
1873 
1874 	/* allocate driver session context */
1875 	mv_s = mvxpsec_session_alloc(sc);
1876 	if (mv_s == NULL)
1877 		return ENOMEM;
1878 
1879 	/*
1880 	 * lookup opencrypto session table
1881 	 *
1882 	 * we have sc_session_mtx after here.
1883 	 */
1884 	mutex_enter(&sc->sc_session_mtx);
1885 	if (sc->sc_nsessions >= MVXPSEC_MAX_SESSIONS) {
1886 		mutex_exit(&sc->sc_session_mtx);
1887 		log(LOG_ERR, "%s: too many IPsec SA(max %d)\n",
1888 				__func__, MVXPSEC_MAX_SESSIONS);
1889 		mvxpsec_session_dealloc(mv_s);
1890 		return ENOMEM;
1891 	}
1892 	for (i = hint; i < MVXPSEC_MAX_SESSIONS; i++) {
1893 		if (sc->sc_sessions[i])
1894 			continue;
1895 		session = i;
1896 		hint = session + 1;
1897 	       	break;
1898 	}
1899 	if (session < 0) {
1900 		for (i = 0; i < hint; i++) {
1901 			if (sc->sc_sessions[i])
1902 				continue;
1903 			session = i;
1904 			hint = session + 1;
1905 			break;
1906 		}
1907 		if (session < 0) {
1908 			mutex_exit(&sc->sc_session_mtx);
1909 			/* session full */
1910 			log(LOG_ERR, "%s: too many IPsec SA(max %d)\n",
1911 				__func__, MVXPSEC_MAX_SESSIONS);
1912 			mvxpsec_session_dealloc(mv_s);
1913 			hint = 0;
1914 			return ENOMEM;
1915 		}
1916 	}
1917 	if (hint >= MVXPSEC_MAX_SESSIONS)
1918 		hint = 0;
1919 	sc->sc_nsessions++;
1920 	sc->sc_sessions[session] = mv_s;
1921 #ifdef DEBUG
1922 	log(LOG_DEBUG, "%s: new session %d allocated\n", __func__, session);
1923 #endif
1924 
1925 	sid = MVXPSEC_SID(device_unit(sc->sc_dev), session);
1926 	mv_s->sid = sid;
1927 
1928 	/* setup the session key ... */
1929 	for (c = cri; c; c = c->cri_next) {
1930 		switch (c->cri_alg) {
1931 		case CRYPTO_DES_CBC:
1932 		case CRYPTO_3DES_CBC:
1933 		case CRYPTO_AES_CBC:
1934 			/* key */
1935 			if (mvxpsec_key_precomp(c->cri_alg,
1936 			    c->cri_key, c->cri_klen,
1937 			    &mv_s->session_header.crp_key,
1938 			    &mv_s->session_header.crp_key_d)) {
1939 				log(LOG_ERR,
1940 				    "%s: Invalid HMAC key for %s.\n",
1941 				    __func__, s_ctlalg(c->cri_alg));
1942 				err = EINVAL;
1943 				goto fail;
1944 			}
1945 			if (mv_s->sflags & RDY_CRP_KEY) {
1946 				log(LOG_WARNING,
1947 				    "%s: overwrite cipher: %s->%s.\n",
1948 				    __func__,
1949 				    s_ctlalg(mv_s->cipher_alg),
1950 				    s_ctlalg(c->cri_alg));
1951 			}
1952 			mv_s->sflags |= RDY_CRP_KEY;
1953 			mv_s->enc_klen = c->cri_klen;
1954 			mv_s->cipher_alg = c->cri_alg;
1955 			/* create per session IV (compatible with KAME IPsec) */
1956 			cprng_fast(&mv_s->session_iv, sizeof(mv_s->session_iv));
1957 			mv_s->sflags |= RDY_CRP_IV;
1958 			break;
1959 		case CRYPTO_SHA1_HMAC_96:
1960 		case CRYPTO_MD5_HMAC_96:
1961 			/* key */
1962 			if (mvxpsec_hmac_precomp(c->cri_alg,
1963 			    c->cri_key, c->cri_klen,
1964 			    (uint32_t *)&mv_s->session_header.miv_in,
1965 			    (uint32_t *)&mv_s->session_header.miv_out)) {
1966 				log(LOG_ERR,
1967 				    "%s: Invalid MAC key\n", __func__);
1968 				err = EINVAL;
1969 				goto fail;
1970 			}
1971 			if (mv_s->sflags & RDY_MAC_KEY ||
1972 			    mv_s->sflags & RDY_MAC_IV) {
1973 				log(LOG_ERR,
1974 				    "%s: overwrite HMAC: %s->%s.\n",
1975 				    __func__, s_ctlalg(mv_s->hmac_alg),
1976 				    s_ctlalg(c->cri_alg));
1977 			}
1978 			mv_s->sflags |= RDY_MAC_KEY;
1979 			mv_s->sflags |= RDY_MAC_IV;
1980 
1981 			mv_s->mac_klen = c->cri_klen;
1982 			mv_s->hmac_alg = c->cri_alg;
1983 			break;
1984 		default:
1985 			log(LOG_ERR, "%s: Unknown algorithm %d\n",
1986 			    __func__, c->cri_alg);
1987 			err = EINVAL;
1988 			goto fail;
1989 		}
1990 	}
1991 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
1992 	    "H/W Crypto session (id:%u) added.\n", session);
1993 
1994 	*sidp = sid;
1995 	MVXPSEC_EVCNT_INCR(sc, session_new);
1996 	mutex_exit(&sc->sc_session_mtx);
1997 
1998 	/* sync session header(it's never touched after here) */
1999 	bus_dmamap_sync(sc->sc_dmat,
2000 	    mv_s->session_header_map,
2001 	    0, sizeof(mv_s->session_header),
2002 	    BUS_DMASYNC_PREWRITE);
2003 
2004 	return 0;
2005 
2006 fail:
2007 	sc->sc_nsessions--;
2008 	sc->sc_sessions[session] = NULL;
2009 	hint = session;
2010 	if (mv_s)
2011 		mvxpsec_session_dealloc(mv_s);
2012 	log(LOG_WARNING,
2013 	    "%s: Failed to add H/W crypto session (id:%u): err=%d\n",
2014 	   __func__, session, err);
2015 
2016 	mutex_exit(&sc->sc_session_mtx);
2017 	return err;
2018 }
2019 
2020 /*
2021  * remove opencrypto session
2022  */
2023 void
mvxpsec_freesession(void * arg,uint64_t tid)2024 mvxpsec_freesession(void *arg, uint64_t tid)
2025 {
2026 	struct mvxpsec_softc *sc = arg;
2027 	struct mvxpsec_session *mv_s;
2028 	int session;
2029 	uint32_t sid = ((uint32_t)tid) & 0xffffffff;
2030 
2031 	session = MVXPSEC_SESSION(sid);
2032 	KASSERTMSG(session >= 0, "session=%d", session);
2033 	KASSERTMSG(session < MVXPSEC_MAX_SESSIONS, "session=%d max=%d",
2034 	    session, MVXPSEC_MAX_SESSIONS);
2035 
2036 	mutex_enter(&sc->sc_session_mtx);
2037 	mv_s = sc->sc_sessions[session];
2038 	KASSERT(mv_s != NULL);
2039 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2040 	    "%s: inactivate session %d\n", __func__, session);
2041 
2042 	/* inactivate mvxpsec session */
2043 	sc->sc_sessions[session] = NULL;
2044 	sc->sc_nsessions--;
2045 	sc->sc_last_session = NULL;
2046 	mutex_exit(&sc->sc_session_mtx);
2047 
2048 	KASSERT(sc->sc_nsessions >= 0);
2049 	KASSERT(mv_s->sid == sid);
2050 
2051 	mvxpsec_session_dealloc(mv_s);
2052 	MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2053 	    "H/W Crypto session (id: %d) deleted.\n", session);
2054 
2055 	/* force unblock opencrypto */
2056 	crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
2057 
2058 	MVXPSEC_EVCNT_INCR(sc, session_free);
2059 }
2060 
2061 /*
2062  * process data with existing session
2063  */
2064 int
mvxpsec_dispatch(void * arg,struct cryptop * crp,int hint)2065 mvxpsec_dispatch(void *arg, struct cryptop *crp, int hint)
2066 {
2067 	struct mvxpsec_softc *sc = arg;
2068 	struct mvxpsec_session *mv_s;
2069 	struct mvxpsec_packet *mv_p;
2070 	int q_full;
2071 	int running;
2072 	int err;
2073 
2074 	mutex_enter(&sc->sc_queue_mtx);
2075 
2076 	/*
2077 	 * lookup session
2078 	 */
2079 	mutex_enter(&sc->sc_session_mtx);
2080 	mv_s = mvxpsec_session_lookup(sc, crp->crp_sid);
2081 	if (__predict_false(mv_s == NULL)) {
2082 		err = EINVAL;
2083 		mv_p = NULL;
2084 		mutex_exit(&sc->sc_session_mtx);
2085 		goto fail;
2086 	}
2087 	mv_p = mvxpsec_packet_alloc(mv_s);
2088 	if (__predict_false(mv_p == NULL)) {
2089 		mutex_exit(&sc->sc_session_mtx);
2090 		mutex_exit(&sc->sc_queue_mtx);
2091 		return ERESTART; /* => queued in opencrypto layer */
2092 	}
2093 	mutex_exit(&sc->sc_session_mtx);
2094 
2095 	/*
2096 	 * check queue status
2097 	 */
2098 #ifdef MVXPSEC_MULTI_PACKET
2099 	q_full = (sc->sc_wait_qlen >= sc->sc_wait_qlimit) ? 1 : 0;
2100 #else
2101 	q_full = (sc->sc_wait_qlen != 0) ? 1 : 0;
2102 #endif
2103 	running = (sc->sc_flags & HW_RUNNING) ?  1: 0;
2104 	if (q_full) {
2105 		/* input queue is full. */
2106 		if (!running && sc->sc_wait_qlen > 0)
2107 			mvxpsec_dispatch_queue(sc);
2108 		MVXPSEC_EVCNT_INCR(sc, queue_full);
2109 		mvxpsec_packet_dealloc(mv_p);
2110 		mutex_exit(&sc->sc_queue_mtx);
2111 		return ERESTART; /* => queued in opencrypto layer */
2112 	}
2113 
2114 	/*
2115 	 * Load and setup packet data
2116 	 */
2117 	err = mvxpsec_packet_setcrp(mv_p, crp);
2118 	if (__predict_false(err))
2119 		goto fail;
2120 
2121 	/*
2122 	 * Setup DMA descriptor chains
2123 	 */
2124 	mutex_enter(&sc->sc_dma_mtx);
2125 	err = mvxpsec_dma_copy_packet(sc, mv_p);
2126 	mutex_exit(&sc->sc_dma_mtx);
2127 	if (__predict_false(err))
2128 		goto fail;
2129 
2130 #ifdef MVXPSEC_DEBUG
2131 	mvxpsec_dump_packet(__func__, mv_p);
2132 #endif
2133 
2134 	/*
2135 	 * Sync/inval the data cache
2136 	 */
2137 	err = mvxpsec_dma_sync_packet(sc, mv_p);
2138 	if (__predict_false(err))
2139 		goto fail;
2140 
2141 	/*
2142 	 * Enqueue the packet
2143 	 */
2144 	MVXPSEC_EVCNT_INCR(sc, dispatch_packets);
2145 #ifdef MVXPSEC_MULTI_PACKET
2146 	mvxpsec_packet_enqueue(mv_p);
2147 	if (!running)
2148 		mvxpsec_dispatch_queue(sc);
2149 #else
2150 	SIMPLEQ_INSERT_TAIL(&sc->sc_wait_queue, mv_p, queue);
2151 	sc->sc_wait_qlen++;
2152 	mv_p->flags |= SETUP_DONE;
2153 	if (!running)
2154 		mvxpsec_dispatch_queue(sc);
2155 #endif
2156 	mutex_exit(&sc->sc_queue_mtx);
2157 	return 0;
2158 
2159 fail:
2160 	/* Drop the incoming packet */
2161 	mvxpsec_drop(sc, crp, mv_p, err);
2162 	mutex_exit(&sc->sc_queue_mtx);
2163 	return 0;
2164 }
2165 
2166 /*
2167  * back the packet to the IP stack
2168  */
2169 void
mvxpsec_done(void * arg)2170 mvxpsec_done(void *arg)
2171 {
2172 	struct mvxpsec_softc *sc = arg;
2173 	struct mvxpsec_packet *mv_p;
2174 	mvxpsec_queue_t ret_queue;
2175 	int ndone;
2176 
2177 	mutex_enter(&sc->sc_queue_mtx);
2178 
2179 	/* stop wdog timer */
2180 	callout_stop(&sc->sc_timeout);
2181 
2182 	/* refill MVXPSEC */
2183 	ret_queue = sc->sc_run_queue;
2184 	SIMPLEQ_INIT(&sc->sc_run_queue);
2185 	sc->sc_flags &= ~HW_RUNNING;
2186 	if (sc->sc_wait_qlen > 0)
2187 		mvxpsec_dispatch_queue(sc);
2188 
2189 	ndone = 0;
2190 	while ( (mv_p = SIMPLEQ_FIRST(&ret_queue)) != NULL) {
2191 		SIMPLEQ_REMOVE_HEAD(&ret_queue, queue);
2192 		mvxpsec_dma_free(sc, &mv_p->dma_ring);
2193 		mvxpsec_done_packet(mv_p);
2194 		ndone++;
2195 	}
2196 	MVXPSEC_EVCNT_MAX(sc, max_done, ndone);
2197 
2198 	mutex_exit(&sc->sc_queue_mtx);
2199 }
2200 
2201 /*
2202  * drop the packet
2203  */
2204 INLINE void
mvxpsec_drop(struct mvxpsec_softc * sc,struct cryptop * crp,struct mvxpsec_packet * mv_p,int err)2205 mvxpsec_drop(struct mvxpsec_softc *sc, struct cryptop *crp,
2206     struct mvxpsec_packet *mv_p, int err)
2207 {
2208 	/* must called with sc->sc_queue_mtx held */
2209 	KASSERT(mutex_owned(&sc->sc_queue_mtx));
2210 
2211 	if (mv_p)
2212 		mvxpsec_packet_dealloc(mv_p);
2213 	if (err < 0)
2214 		err = EINVAL;
2215 	crp->crp_etype = err;
2216 	crypto_done(crp);
2217 	MVXPSEC_EVCNT_INCR(sc, packet_err);
2218 
2219 	/* dispatch other packets in queue */
2220 	if (sc->sc_wait_qlen > 0 &&
2221 	    !(sc->sc_flags & HW_RUNNING))
2222 		mvxpsec_dispatch_queue(sc);
2223 
2224 	/* unblock driver for dropped packet */
2225 	crypto_unblock(sc->sc_cid, CRYPTO_SYMQ|CRYPTO_ASYMQ);
2226 }
2227 
2228 /* move wait queue entry to run queue */
2229 STATIC int
mvxpsec_dispatch_queue(struct mvxpsec_softc * sc)2230 mvxpsec_dispatch_queue(struct mvxpsec_softc *sc)
2231 {
2232 	struct mvxpsec_packet *mv_p;
2233 	paddr_t head;
2234 	int ndispatch = 0;
2235 
2236 	/* must called with sc->sc_queue_mtx held */
2237 	KASSERT(mutex_owned(&sc->sc_queue_mtx));
2238 
2239 	/* check there is any task */
2240 	if (__predict_false(sc->sc_flags & HW_RUNNING)) {
2241 		log(LOG_WARNING,
2242 		    "%s: another packet already exist.\n", __func__);
2243 		return 0;
2244 	}
2245 	if (__predict_false(SIMPLEQ_EMPTY(&sc->sc_wait_queue))) {
2246 		log(LOG_WARNING,
2247 		    "%s: no waiting packet yet(qlen=%d).\n",
2248 		    __func__, sc->sc_wait_qlen);
2249 		return 0;
2250 	}
2251 
2252 	/* move queue */
2253 	sc->sc_run_queue = sc->sc_wait_queue;
2254 	sc->sc_flags |= HW_RUNNING; /* dropped by intr or timeout */
2255 	SIMPLEQ_INIT(&sc->sc_wait_queue);
2256 	ndispatch = sc->sc_wait_qlen;
2257 	sc->sc_wait_qlen = 0;
2258 
2259 	/* get 1st DMA descriptor */
2260 	mv_p = SIMPLEQ_FIRST(&sc->sc_run_queue);
2261 	head = mv_p->dma_ring.dma_head->phys_addr;
2262 
2263 	/* terminate last DMA descriptor */
2264 	mv_p = SIMPLEQ_LAST(&sc->sc_run_queue, mvxpsec_packet, queue);
2265 	mvxpsec_dma_finalize(sc, &mv_p->dma_ring);
2266 
2267 	/* configure TDMA */
2268 	if (mvxpsec_dma_wait(sc) < 0) {
2269 		log(LOG_ERR, "%s: DMA DEVICE not responding", __func__);
2270 		callout_schedule(&sc->sc_timeout, hz);
2271 		return 0;
2272 	}
2273 	MVXPSEC_WRITE(sc, MV_TDMA_NXT, head);
2274 
2275 	/* trigger ACC */
2276 	if (mvxpsec_acc_wait(sc) < 0) {
2277 		log(LOG_ERR, "%s: MVXPSEC not responding", __func__);
2278 		callout_schedule(&sc->sc_timeout, hz);
2279 		return 0;
2280 	}
2281 	MVXPSEC_WRITE(sc, MV_ACC_COMMAND, MV_ACC_COMMAND_ACT);
2282 
2283 	MVXPSEC_EVCNT_MAX(sc, max_dispatch, ndispatch);
2284 	MVXPSEC_EVCNT_INCR(sc, dispatch_queue);
2285 	callout_schedule(&sc->sc_timeout, hz);
2286 	return 0;
2287 }
2288 
2289 /*
2290  * process opencrypto operations(cryptop) for packets.
2291  */
2292 INLINE int
mvxpsec_parse_crd(struct mvxpsec_packet * mv_p,struct cryptodesc * crd)2293 mvxpsec_parse_crd(struct mvxpsec_packet *mv_p, struct cryptodesc *crd)
2294 {
2295 	int ivlen;
2296 
2297 	KASSERT(mv_p->flags & RDY_DATA);
2298 
2299 	/* MAC & Ciphers: set data location and operation */
2300 	switch (crd->crd_alg) {
2301 	case CRYPTO_SHA1_HMAC_96:
2302 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_MAC_96;
2303 		/* fall through */
2304 	case CRYPTO_SHA1_HMAC:
2305 		mv_p->mac_dst = crd->crd_inject;
2306 		mv_p->mac_off = crd->crd_skip;
2307 		mv_p->mac_len = crd->crd_len;
2308 		MV_ACC_CRYPTO_MAC_SET(mv_p->pkt_header.desc.acc_config,
2309 		    MV_ACC_CRYPTO_MAC_HMAC_SHA1);
2310 		mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_MAC);
2311 		/* No more setup for MAC */
2312 		return 0;
2313 	case CRYPTO_MD5_HMAC_96:
2314 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_MAC_96;
2315 		/* fall through */
2316 	case CRYPTO_MD5_HMAC:
2317 		mv_p->mac_dst = crd->crd_inject;
2318 		mv_p->mac_off = crd->crd_skip;
2319 		mv_p->mac_len = crd->crd_len;
2320 		MV_ACC_CRYPTO_MAC_SET(mv_p->pkt_header.desc.acc_config,
2321 		    MV_ACC_CRYPTO_MAC_HMAC_MD5);
2322 		mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_MAC);
2323 		/* No more setup for MAC */
2324 		return 0;
2325 	case CRYPTO_DES_CBC:
2326 		mv_p->enc_ivoff = crd->crd_inject;
2327 		mv_p->enc_off = crd->crd_skip;
2328 		mv_p->enc_len = crd->crd_len;
2329 		ivlen = 8;
2330 		MV_ACC_CRYPTO_ENC_SET(mv_p->pkt_header.desc.acc_config,
2331 		    MV_ACC_CRYPTO_ENC_DES);
2332 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_CBC;
2333 		mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_ENC);
2334 		break;
2335 	case CRYPTO_3DES_CBC:
2336 		mv_p->enc_ivoff = crd->crd_inject;
2337 		mv_p->enc_off = crd->crd_skip;
2338 		mv_p->enc_len = crd->crd_len;
2339 		ivlen = 8;
2340 		MV_ACC_CRYPTO_ENC_SET(mv_p->pkt_header.desc.acc_config,
2341 		    MV_ACC_CRYPTO_ENC_3DES);
2342 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_CBC;
2343 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_3DES_EDE;
2344 		mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_ENC);
2345 		break;
2346 	case CRYPTO_AES_CBC:
2347 		mv_p->enc_ivoff = crd->crd_inject;
2348 		mv_p->enc_off = crd->crd_skip;
2349 		mv_p->enc_len = crd->crd_len;
2350 		ivlen = 16;
2351 		MV_ACC_CRYPTO_ENC_SET(mv_p->pkt_header.desc.acc_config,
2352 		    MV_ACC_CRYPTO_ENC_AES);
2353 		MV_ACC_CRYPTO_AES_KLEN_SET(
2354 		    mv_p->pkt_header.desc.acc_config,
2355 		   mvxpsec_aesklen(mv_p->mv_s->enc_klen));
2356 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_CBC;
2357 		mvxpsec_packet_update_op_order(mv_p, MV_ACC_CRYPTO_OP_ENC);
2358 		break;
2359 	default:
2360 		log(LOG_ERR, "%s: Unknown algorithm %d\n",
2361 		    __func__, crd->crd_alg);
2362 		return EINVAL;
2363 	}
2364 
2365 	/* Operations only for Cipher, not MAC */
2366 	if (crd->crd_flags & CRD_F_ENCRYPT) {
2367 		/* Ciphers: Originate IV for Encryption.*/
2368 		mv_p->pkt_header.desc.acc_config &= ~MV_ACC_CRYPTO_DECRYPT;
2369 		mv_p->flags |= DIR_ENCRYPT;
2370 
2371 		if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
2372 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV, "EXPLICIT IV\n");
2373 			mv_p->flags |= CRP_EXT_IV;
2374 			mvxpsec_packet_write_iv(mv_p, crd->crd_iv, ivlen);
2375 			mv_p->enc_ivoff = MVXPSEC_SRAM_IV_EXT_OFF;
2376 		}
2377 		else if (crd->crd_flags & CRD_F_IV_PRESENT) {
2378 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV, "IV is present\n");
2379 			mvxpsec_packet_copy_iv(mv_p, crd->crd_inject, ivlen);
2380 		}
2381 		else {
2382 			MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV, "Create New IV\n");
2383 			mvxpsec_packet_write_iv(mv_p, NULL, ivlen);
2384 		}
2385 	}
2386 	else {
2387 		/* Ciphers: IV is loadded from crd_inject when it's present */
2388 		mv_p->pkt_header.desc.acc_config |= MV_ACC_CRYPTO_DECRYPT;
2389 		mv_p->flags |= DIR_DECRYPT;
2390 
2391 		if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
2392 #ifdef MVXPSEC_DEBUG
2393 			if (mvxpsec_debug & MVXPSEC_DEBUG_ENC_IV) {
2394 				MVXPSEC_PRINTF(MVXPSEC_DEBUG_ENC_IV,
2395 				    "EXPLICIT IV(Decrypt)\n");
2396 				mvxpsec_dump_data(__func__, crd->crd_iv, ivlen);
2397 			}
2398 #endif
2399 			mv_p->flags |= CRP_EXT_IV;
2400 			mvxpsec_packet_write_iv(mv_p, crd->crd_iv, ivlen);
2401 			mv_p->enc_ivoff = MVXPSEC_SRAM_IV_EXT_OFF;
2402 		}
2403 	}
2404 
2405 	KASSERT(!((mv_p->flags & DIR_ENCRYPT) && (mv_p->flags & DIR_DECRYPT)));
2406 
2407 	return 0;
2408 }
2409 
2410 INLINE int
mvxpsec_parse_crp(struct mvxpsec_packet * mv_p)2411 mvxpsec_parse_crp(struct mvxpsec_packet *mv_p)
2412 {
2413 	struct cryptop *crp = mv_p->crp;
2414 	struct cryptodesc *crd;
2415 	int err;
2416 
2417 	KASSERT(crp);
2418 
2419 	mvxpsec_packet_reset_op(mv_p);
2420 
2421 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2422 		err = mvxpsec_parse_crd(mv_p, crd);
2423 		if (err)
2424 			return err;
2425 	}
2426 
2427 	return 0;
2428 }
2429 
2430 INLINE int
mvxpsec_packet_setcrp(struct mvxpsec_packet * mv_p,struct cryptop * crp)2431 mvxpsec_packet_setcrp(struct mvxpsec_packet *mv_p, struct cryptop *crp)
2432 {
2433 	int err = EINVAL;
2434 
2435 	/* register crp to the MVXPSEC packet */
2436 	if (crp->crp_flags & CRYPTO_F_IMBUF) {
2437 		err = mvxpsec_packet_setmbuf(mv_p,
2438 		    (struct mbuf *)crp->crp_buf);
2439 		mv_p->crp = crp;
2440 	}
2441 	else if (crp->crp_flags & CRYPTO_F_IOV) {
2442 		err = mvxpsec_packet_setuio(mv_p,
2443 		    (struct uio *)crp->crp_buf);
2444 		mv_p->crp = crp;
2445 	}
2446 	else {
2447 		err = mvxpsec_packet_setdata(mv_p,
2448 		    (struct mbuf *)crp->crp_buf, crp->crp_ilen);
2449 		mv_p->crp = crp;
2450 	}
2451 	if (__predict_false(err))
2452 		return err;
2453 
2454 	/* parse crp and setup MVXPSEC registers/descriptors */
2455 	err = mvxpsec_parse_crp(mv_p);
2456 	if (__predict_false(err))
2457 		return err;
2458 
2459 	/* fixup data offset to fit MVXPSEC internal SRAM */
2460 	err = mvxpsec_header_finalize(mv_p);
2461 	if (__predict_false(err))
2462 		return err;
2463 
2464 	return 0;
2465 }
2466 
2467 /*
2468  * load data for encrypt/decrypt/authentication
2469  *
2470  * data is raw kernel memory area.
2471  */
2472 STATIC int
mvxpsec_packet_setdata(struct mvxpsec_packet * mv_p,void * data,uint32_t data_len)2473 mvxpsec_packet_setdata(struct mvxpsec_packet *mv_p,
2474     void *data, uint32_t data_len)
2475 {
2476 	struct mvxpsec_session *mv_s = mv_p->mv_s;
2477 	struct mvxpsec_softc *sc = mv_s->sc;
2478 
2479 	if (bus_dmamap_load(sc->sc_dmat, mv_p->data_map, data, data_len,
2480 	    NULL, BUS_DMA_NOWAIT)) {
2481 		log(LOG_ERR, "%s: cannot load data\n", __func__);
2482 		return -1;
2483 	}
2484 	mv_p->data_type = MVXPSEC_DATA_RAW;
2485 	mv_p->data_raw = data;
2486 	mv_p->data_len = data_len;
2487 	mv_p->flags |= RDY_DATA;
2488 
2489 	return 0;
2490 }
2491 
2492 /*
2493  * load data for encrypt/decrypt/authentication
2494  *
2495  * data is mbuf based network data.
2496  */
2497 STATIC int
mvxpsec_packet_setmbuf(struct mvxpsec_packet * mv_p,struct mbuf * m)2498 mvxpsec_packet_setmbuf(struct mvxpsec_packet *mv_p, struct mbuf *m)
2499 {
2500 	struct mvxpsec_session *mv_s = mv_p->mv_s;
2501 	struct mvxpsec_softc *sc = mv_s->sc;
2502 	size_t pktlen = 0;
2503 
2504 	if (__predict_true(m->m_flags & M_PKTHDR))
2505 		pktlen = m->m_pkthdr.len;
2506 	else {
2507 		struct mbuf *mp = m;
2508 
2509 		while (mp != NULL) {
2510 			pktlen += m->m_len;
2511 			mp = mp->m_next;
2512 		}
2513 	}
2514 	if (pktlen > SRAM_PAYLOAD_SIZE) {
2515 #if NIPSEC > 0
2516 		extern   percpu_t *espstat_percpu;
2517 	       	/* XXX:
2518 		 * layer violation. opencrypto knows our max packet size
2519 		 * from crypto_register(9) API.
2520 		 */
2521 
2522 		_NET_STATINC(espstat_percpu, ESP_STAT_TOOBIG);
2523 #endif
2524 		log(LOG_ERR,
2525 		    "%s: ESP Packet too large: %zu [oct.] > %zu [oct.]\n",
2526 		    device_xname(sc->sc_dev),
2527 		    (size_t)pktlen, SRAM_PAYLOAD_SIZE);
2528 		mv_p->data_type = MVXPSEC_DATA_NONE;
2529 		mv_p->data_mbuf = NULL;
2530 		return -1;
2531 	}
2532 
2533 	if (bus_dmamap_load_mbuf(sc->sc_dmat, mv_p->data_map, m,
2534 	    BUS_DMA_NOWAIT)) {
2535 		mv_p->data_type = MVXPSEC_DATA_NONE;
2536 		mv_p->data_mbuf = NULL;
2537 		log(LOG_ERR, "%s: cannot load mbuf\n", __func__);
2538 		return -1;
2539 	}
2540 
2541 	/* set payload buffer */
2542 	mv_p->data_type = MVXPSEC_DATA_MBUF;
2543 	mv_p->data_mbuf = m;
2544 	if (m->m_flags & M_PKTHDR) {
2545 		mv_p->data_len = m->m_pkthdr.len;
2546 	}
2547 	else {
2548 		mv_p->data_len = 0;
2549 		while (m) {
2550 			mv_p->data_len += m->m_len;
2551 			m = m->m_next;
2552 		}
2553 	}
2554 	mv_p->flags |= RDY_DATA;
2555 
2556 	return 0;
2557 }
2558 
2559 STATIC int
mvxpsec_packet_setuio(struct mvxpsec_packet * mv_p,struct uio * uio)2560 mvxpsec_packet_setuio(struct mvxpsec_packet *mv_p, struct uio *uio)
2561 {
2562 	struct mvxpsec_session *mv_s = mv_p->mv_s;
2563 	struct mvxpsec_softc *sc = mv_s->sc;
2564 
2565 	if (uio->uio_resid > SRAM_PAYLOAD_SIZE) {
2566 #if NIPSEC > 0
2567 		extern   percpu_t *espstat_percpu;
2568 	       	/* XXX:
2569 		 * layer violation. opencrypto knows our max packet size
2570 		 * from crypto_register(9) API.
2571 		 */
2572 
2573 		_NET_STATINC(espstat_percpu, ESP_STAT_TOOBIG);
2574 #endif
2575 		log(LOG_ERR,
2576 		    "%s: uio request too large: %zu [oct.] > %zu [oct.]\n",
2577 		    device_xname(sc->sc_dev),
2578 		    uio->uio_resid, SRAM_PAYLOAD_SIZE);
2579 		mv_p->data_type = MVXPSEC_DATA_NONE;
2580 		mv_p->data_mbuf = NULL;
2581 		return -1;
2582 	}
2583 
2584 	if (bus_dmamap_load_uio(sc->sc_dmat, mv_p->data_map, uio,
2585 	    BUS_DMA_NOWAIT)) {
2586 		mv_p->data_type = MVXPSEC_DATA_NONE;
2587 		mv_p->data_mbuf = NULL;
2588 		log(LOG_ERR, "%s: cannot load uio buf\n", __func__);
2589 		return -1;
2590 	}
2591 
2592 	/* set payload buffer */
2593 	mv_p->data_type = MVXPSEC_DATA_UIO;
2594 	mv_p->data_uio = uio;
2595 	mv_p->data_len = uio->uio_resid;
2596 	mv_p->flags |= RDY_DATA;
2597 
2598 	return 0;
2599 }
2600 
2601 STATIC int
mvxpsec_packet_rdata(struct mvxpsec_packet * mv_p,int off,int len,void * cp)2602 mvxpsec_packet_rdata(struct mvxpsec_packet *mv_p,
2603     int off, int len, void *cp)
2604 {
2605 	uint8_t *p;
2606 
2607 	if (mv_p->data_type == MVXPSEC_DATA_RAW) {
2608 		p = (uint8_t *)mv_p->data_raw + off;
2609 		memcpy(cp, p, len);
2610 	}
2611 	else if (mv_p->data_type == MVXPSEC_DATA_MBUF) {
2612 		m_copydata(mv_p->data_mbuf, off, len, cp);
2613 	}
2614 	else if (mv_p->data_type == MVXPSEC_DATA_UIO) {
2615 		cuio_copydata(mv_p->data_uio, off, len, cp);
2616 	}
2617 	else
2618 		return -1;
2619 
2620 	return 0;
2621 }
2622 
2623 STATIC int
mvxpsec_packet_wdata(struct mvxpsec_packet * mv_p,int off,int len,void * cp)2624 mvxpsec_packet_wdata(struct mvxpsec_packet *mv_p,
2625     int off, int len, void *cp)
2626 {
2627 	uint8_t *p;
2628 
2629 	if (mv_p->data_type == MVXPSEC_DATA_RAW) {
2630 		p = (uint8_t *)mv_p->data_raw + off;
2631 		memcpy(p, cp, len);
2632 	}
2633 	else if (mv_p->data_type == MVXPSEC_DATA_MBUF) {
2634 		m_copyback(mv_p->data_mbuf, off, len, cp);
2635 	}
2636 	else if (mv_p->data_type == MVXPSEC_DATA_UIO) {
2637 		cuio_copyback(mv_p->data_uio, off, len, cp);
2638 	}
2639 	else
2640 		return -1;
2641 
2642 	return 0;
2643 }
2644 
2645 /*
2646  * Set initial vector of cipher to the session.
2647  */
2648 STATIC int
mvxpsec_packet_write_iv(struct mvxpsec_packet * mv_p,void * iv,int ivlen)2649 mvxpsec_packet_write_iv(struct mvxpsec_packet *mv_p, void *iv, int ivlen)
2650 {
2651 	uint8_t ivbuf[16];
2652 
2653 	KASSERT(ivlen == 8 || ivlen == 16);
2654 
2655 	if (iv == NULL) {
2656 	       	if (mv_p->mv_s->sflags & RDY_CRP_IV) {
2657 			/* use per session IV (compatible with KAME IPsec) */
2658 			mv_p->pkt_header.crp_iv_work = mv_p->mv_s->session_iv;
2659 			mv_p->flags |= RDY_CRP_IV;
2660 			return 0;
2661 		}
2662 		cprng_fast(ivbuf, ivlen);
2663 		iv = ivbuf;
2664 	}
2665 	memcpy(&mv_p->pkt_header.crp_iv_work, iv, ivlen);
2666 	if (mv_p->flags & CRP_EXT_IV) {
2667 		memcpy(&mv_p->pkt_header.crp_iv_ext, iv, ivlen);
2668 		mv_p->ext_iv = iv;
2669 		mv_p->ext_ivlen = ivlen;
2670 	}
2671 	mv_p->flags |= RDY_CRP_IV;
2672 
2673 	return 0;
2674 }
2675 
2676 STATIC int
mvxpsec_packet_copy_iv(struct mvxpsec_packet * mv_p,int off,int ivlen)2677 mvxpsec_packet_copy_iv(struct mvxpsec_packet *mv_p, int off, int ivlen)
2678 {
2679 	mvxpsec_packet_rdata(mv_p, off, ivlen,
2680 	    &mv_p->pkt_header.crp_iv_work);
2681 	mv_p->flags |= RDY_CRP_IV;
2682 
2683 	return 0;
2684 }
2685 
2686 /*
2687  * set a encryption or decryption key to the session
2688  *
2689  * Input key material is big endian.
2690  */
2691 STATIC int
mvxpsec_key_precomp(int alg,void * keymat,int kbitlen,void * key_encrypt,void * key_decrypt)2692 mvxpsec_key_precomp(int alg, void *keymat, int kbitlen,
2693     void *key_encrypt, void *key_decrypt)
2694 {
2695 	uint32_t *kp = keymat;
2696 	uint32_t *ekp = key_encrypt;
2697 	uint32_t *dkp = key_decrypt;
2698 	int i;
2699 
2700 	switch (alg) {
2701 	case CRYPTO_DES_CBC:
2702 		if (kbitlen < 64 || (kbitlen % 8) != 0) {
2703 			log(LOG_WARNING,
2704 			    "mvxpsec: invalid DES keylen %d\n", kbitlen);
2705 			return EINVAL;
2706 		}
2707 		for (i = 0; i < 2; i++)
2708 			dkp[i] = ekp[i] = kp[i];
2709 		for (; i < 8; i++)
2710 			dkp[i] = ekp[i] = 0;
2711 		break;
2712 	case CRYPTO_3DES_CBC:
2713 		if (kbitlen < 192 || (kbitlen % 8) != 0) {
2714 			log(LOG_WARNING,
2715 			    "mvxpsec: invalid 3DES keylen %d\n", kbitlen);
2716 			return EINVAL;
2717 		}
2718 		for (i = 0; i < 8; i++)
2719 			dkp[i] = ekp[i] = kp[i];
2720 		break;
2721 	case CRYPTO_AES_CBC:
2722 		if (kbitlen < 128) {
2723 			log(LOG_WARNING,
2724 			    "mvxpsec: invalid AES keylen %d\n", kbitlen);
2725 			return EINVAL;
2726 		}
2727 		else if (kbitlen < 192) {
2728 			/* AES-128 */
2729 			for (i = 0; i < 4; i++)
2730 				ekp[i] = kp[i];
2731 			for (; i < 8; i++)
2732 				ekp[i] = 0;
2733 		}
2734 	       	else if (kbitlen < 256) {
2735 			/* AES-192 */
2736 			for (i = 0; i < 6; i++)
2737 				ekp[i] = kp[i];
2738 			for (; i < 8; i++)
2739 				ekp[i] = 0;
2740 		}
2741 		else  {
2742 			/* AES-256 */
2743 			for (i = 0; i < 8; i++)
2744 				ekp[i] = kp[i];
2745 		}
2746 		/* make decryption key */
2747 		mv_aes_deckey((uint8_t *)dkp, (uint8_t *)ekp, kbitlen);
2748 		break;
2749 	default:
2750 		for (i = 0; i < 8; i++)
2751 			ekp[0] = dkp[0] = 0;
2752 		break;
2753 	}
2754 
2755 #ifdef MVXPSEC_DEBUG
2756 	if (mvxpsec_debug & MVXPSEC_DEBUG_OPENCRYPTO) {
2757 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2758 		    "%s: keyregistered\n", __func__);
2759 		mvxpsec_dump_data(__func__, ekp, 32);
2760 	}
2761 #endif
2762 
2763 	return 0;
2764 }
2765 
2766 /*
2767  * set MAC key to the session
2768  *
2769  * MAC engine has no register for key itself, but the engine has
2770  * inner and outer IV register. software must compute IV before
2771  * enable the engine.
2772  *
2773  * IV is a hash of ipad/opad. these are defined by FIPS-198a
2774  * standard.
2775  */
2776 STATIC int
mvxpsec_hmac_precomp(int alg,void * key,int kbitlen,void * iv_inner,void * iv_outer)2777 mvxpsec_hmac_precomp(int alg, void *key, int kbitlen,
2778     void *iv_inner, void *iv_outer)
2779 {
2780 	SHA1_CTX sha1;
2781 	MD5_CTX md5;
2782 	uint8_t *key8 = key;
2783 	uint8_t kbuf[64];
2784 	uint8_t ipad[64];
2785 	uint8_t opad[64];
2786 	uint32_t *iv_in = iv_inner;
2787 	uint32_t *iv_out = iv_outer;
2788 	int kbytelen;
2789 	int i;
2790 #define HMAC_IPAD 0x36
2791 #define HMAC_OPAD 0x5c
2792 
2793 	kbytelen = kbitlen / 8;
2794 	KASSERT(kbitlen == kbytelen * 8);
2795 	if (kbytelen > 64) {
2796 		SHA1Init(&sha1);
2797 		SHA1Update(&sha1, key, kbytelen);
2798 		SHA1Final(kbuf, &sha1);
2799 		key8 = kbuf;
2800 		kbytelen = 64;
2801 	}
2802 
2803 	/* make initial 64 oct. string */
2804 	switch (alg) {
2805 	case CRYPTO_SHA1_HMAC_96:
2806 	case CRYPTO_SHA1_HMAC:
2807 	case CRYPTO_MD5_HMAC_96:
2808 	case CRYPTO_MD5_HMAC:
2809 		for (i = 0; i < kbytelen; i++) {
2810 			ipad[i] = (key8[i] ^ HMAC_IPAD);
2811 			opad[i] = (key8[i] ^ HMAC_OPAD);
2812 		}
2813 		for (; i < 64; i++) {
2814 			ipad[i] = HMAC_IPAD;
2815 			opad[i] = HMAC_OPAD;
2816 		}
2817 		break;
2818 	default:
2819 		break;
2820 	}
2821 #ifdef MVXPSEC_DEBUG
2822 	if (mvxpsec_debug & MVXPSEC_DEBUG_OPENCRYPTO) {
2823 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2824 		    "%s: HMAC-KEY Pre-comp:\n", __func__);
2825 		mvxpsec_dump_data(__func__, key, 64);
2826 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2827 		    "%s: ipad:\n", __func__);
2828 		mvxpsec_dump_data(__func__, ipad, sizeof(ipad));
2829 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2830 		    "%s: opad:\n", __func__);
2831 		mvxpsec_dump_data(__func__, opad, sizeof(opad));
2832 	}
2833 #endif
2834 
2835 	/* make iv from string */
2836 	switch (alg) {
2837 	case CRYPTO_SHA1_HMAC_96:
2838 	case CRYPTO_SHA1_HMAC:
2839 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2840 		    "%s: Generate iv_in(SHA1)\n", __func__);
2841 		SHA1Init(&sha1);
2842 		SHA1Update(&sha1, ipad, 64);
2843 		/* XXX: private state... (LE) */
2844 		iv_in[0] = htobe32(sha1.state[0]);
2845 		iv_in[1] = htobe32(sha1.state[1]);
2846 		iv_in[2] = htobe32(sha1.state[2]);
2847 		iv_in[3] = htobe32(sha1.state[3]);
2848 		iv_in[4] = htobe32(sha1.state[4]);
2849 
2850 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2851 		    "%s: Generate iv_out(SHA1)\n", __func__);
2852 		SHA1Init(&sha1);
2853 		SHA1Update(&sha1, opad, 64);
2854 		/* XXX: private state... (LE) */
2855 		iv_out[0] = htobe32(sha1.state[0]);
2856 		iv_out[1] = htobe32(sha1.state[1]);
2857 		iv_out[2] = htobe32(sha1.state[2]);
2858 		iv_out[3] = htobe32(sha1.state[3]);
2859 		iv_out[4] = htobe32(sha1.state[4]);
2860 		break;
2861 	case CRYPTO_MD5_HMAC_96:
2862 	case CRYPTO_MD5_HMAC:
2863 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2864 		    "%s: Generate iv_in(MD5)\n", __func__);
2865 		MD5Init(&md5);
2866 		MD5Update(&md5, ipad, sizeof(ipad));
2867 		/* XXX: private state... (LE) */
2868 		iv_in[0] = htobe32(md5.state[0]);
2869 		iv_in[1] = htobe32(md5.state[1]);
2870 		iv_in[2] = htobe32(md5.state[2]);
2871 		iv_in[3] = htobe32(md5.state[3]);
2872 		iv_in[4] = 0;
2873 
2874 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_OPENCRYPTO,
2875 		    "%s: Generate iv_out(MD5)\n", __func__);
2876 		MD5Init(&md5);
2877 		MD5Update(&md5, opad, sizeof(opad));
2878 		/* XXX: private state... (LE) */
2879 		iv_out[0] = htobe32(md5.state[0]);
2880 		iv_out[1] = htobe32(md5.state[1]);
2881 		iv_out[2] = htobe32(md5.state[2]);
2882 		iv_out[3] = htobe32(md5.state[3]);
2883 		iv_out[4] = 0;
2884 		break;
2885 	default:
2886 		break;
2887 	}
2888 
2889 #ifdef MVXPSEC_DEBUG
2890 	if (mvxpsec_debug & MVXPSEC_DEBUG_HASH_IV) {
2891 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_HASH_IV,
2892 		    "%s: HMAC IV-IN\n", __func__);
2893 		mvxpsec_dump_data(__func__, (uint8_t *)iv_in, 20);
2894 		MVXPSEC_PRINTF(MVXPSEC_DEBUG_HASH_IV,
2895 		    "%s: HMAC IV-OUT\n", __func__);
2896 		mvxpsec_dump_data(__func__, (uint8_t *)iv_out, 20);
2897 	}
2898 #endif
2899 
2900 	return 0;
2901 #undef HMAC_IPAD
2902 #undef HMAC_OPAD
2903 }
2904 
2905 /*
2906  * AES Support routine
2907  */
2908 static uint8_t AES_SBOX[256] = {
2909 	 99, 124, 119, 123, 242, 107, 111, 197,  48,   1, 103,  43, 254, 215,
2910        	171, 118, 202, 130, 201, 125, 250,  89,  71, 240, 173, 212, 162, 175,
2911        	156, 164, 114, 192, 183, 253, 147,  38,  54,  63, 247, 204,  52, 165,
2912        	229, 241, 113, 216,  49,  21,   4, 199,  35, 195,  24, 150,   5, 154,
2913        	  7,  18, 128, 226, 235,  39, 178, 117,   9, 131,  44,  26,  27, 110,
2914 	 90, 160,  82,  59, 214, 179,  41, 227,  47, 132,  83, 209,   0, 237,
2915        	 32, 252, 177,  91, 106, 203, 190,  57,  74,  76,  88, 207, 208, 239,
2916 	170, 251,  67,  77,  51, 133,  69, 249,   2, 127,  80,  60, 159, 168,
2917 	 81, 163,  64, 143, 146, 157,  56, 245, 188, 182, 218,  33,  16, 255,
2918 	243, 210, 205,  12,  19, 236,  95, 151,  68,  23, 196, 167, 126,  61,
2919        	100,  93,  25, 115,  96, 129,  79, 220,  34,  42, 144, 136,  70, 238,
2920        	184,  20, 222,  94,  11, 219, 224,  50,  58,  10,  73,   6,  36,  92,
2921        	194, 211, 172,  98, 145, 149, 228, 121, 231, 200,  55, 109, 141, 213,
2922       	 78, 169, 108,  86, 244, 234, 101, 122, 174,   8, 186, 120,  37,  46,
2923        	 28, 166, 180, 198, 232, 221, 116,  31,  75, 189, 139, 138, 112,  62,
2924 	181, 102,  72,   3, 246,  14,  97,  53,  87, 185, 134, 193,  29, 158,
2925        	225, 248, 152,  17, 105, 217, 142, 148, 155,  30, 135, 233, 206,  85,
2926       	 40, 223, 140, 161, 137,  13, 191, 230,  66, 104,  65, 153,  45,  15,
2927 	176,  84, 187,  22
2928 };
2929 
2930 static uint32_t AES_RCON[30] = {
2931 	0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8,
2932        	0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4,
2933        	0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91
2934 };
2935 
2936 STATIC int
mv_aes_ksched(uint8_t k[4][MAXKC],int keyBits,uint8_t W[MAXROUNDS+1][4][MAXBC])2937 mv_aes_ksched(uint8_t k[4][MAXKC], int keyBits,
2938     uint8_t W[MAXROUNDS+1][4][MAXBC])
2939 {
2940 	int KC, BC, ROUNDS;
2941 	int i, j, t, rconpointer = 0;
2942 	uint8_t tk[4][MAXKC];
2943 
2944 	switch (keyBits) {
2945 	case 128:
2946 		ROUNDS = 10;
2947 		KC = 4;
2948 		break;
2949 	case 192:
2950 		ROUNDS = 12;
2951 		KC = 6;
2952 	       	break;
2953 	case 256:
2954 		ROUNDS = 14;
2955 	       	KC = 8;
2956 	       	break;
2957 	default:
2958 	       	return (-1);
2959 	}
2960 	BC = 4; /* 128 bits */
2961 
2962 	for(j = 0; j < KC; j++)
2963 		for(i = 0; i < 4; i++)
2964 			tk[i][j] = k[i][j];
2965 	t = 0;
2966 
2967 	/* copy values into round key array */
2968 	for(j = 0; (j < KC) && (t < (ROUNDS+1)*BC); j++, t++)
2969 		for(i = 0; i < 4; i++) W[t / BC][i][t % BC] = tk[i][j];
2970 
2971 	while (t < (ROUNDS+1)*BC) { /* while not enough round key material calculated */
2972 		/* calculate new values */
2973 		for(i = 0; i < 4; i++)
2974 			tk[i][0] ^= AES_SBOX[tk[(i+1)%4][KC-1]];
2975 		tk[0][0] ^= AES_RCON[rconpointer++];
2976 
2977 		if (KC != 8)
2978 			for(j = 1; j < KC; j++)
2979 				for(i = 0; i < 4; i++)
2980 				       	tk[i][j] ^= tk[i][j-1];
2981 		else {
2982 			for(j = 1; j < KC/2; j++)
2983 				for(i = 0; i < 4; i++)
2984 				       	tk[i][j] ^= tk[i][j-1];
2985 			for(i = 0; i < 4; i++)
2986 			       	tk[i][KC/2] ^= AES_SBOX[tk[i][KC/2 - 1]];
2987 			for(j = KC/2 + 1; j < KC; j++)
2988 				for(i = 0; i < 4; i++)
2989 				       	tk[i][j] ^= tk[i][j-1];
2990 	}
2991 	/* copy values into round key array */
2992 	for(j = 0; (j < KC) && (t < (ROUNDS+1)*BC); j++, t++)
2993 		for(i = 0; i < 4; i++) W[t / BC][i][t % BC] = tk[i][j];
2994 	}
2995 
2996 	return 0;
2997 }
2998 
2999 STATIC int
mv_aes_deckey(uint8_t * expandedKey,uint8_t * keyMaterial,int keyLen)3000 mv_aes_deckey(uint8_t *expandedKey, uint8_t *keyMaterial, int keyLen)
3001 {
3002 	uint8_t   W[MAXROUNDS+1][4][MAXBC];
3003 	uint8_t   k[4][MAXKC];
3004 	uint8_t   j;
3005 	int     i, rounds, KC;
3006 
3007 	if (expandedKey == NULL)
3008 		return -1;
3009 
3010 	if (!((keyLen == 128) || (keyLen == 192) || (keyLen == 256)))
3011 		return -1;
3012 
3013 	if (keyMaterial == NULL)
3014 		return -1;
3015 
3016 	/* initialize key schedule: */
3017 	for (i=0; i<keyLen/8; i++) {
3018 		j = keyMaterial[i];
3019 		k[i % 4][i / 4] = j;
3020 	}
3021 
3022 	mv_aes_ksched(k, keyLen, W);
3023 	switch (keyLen) {
3024 	case 128:
3025 		rounds = 10;
3026 		KC = 4;
3027 		break;
3028 	case 192:
3029 		rounds = 12;
3030 		KC = 6;
3031 		break;
3032 	case 256:
3033 		rounds = 14;
3034 		KC = 8;
3035 		break;
3036 	default:
3037 		return -1;
3038 	}
3039 
3040 	for(i=0; i<MAXBC; i++)
3041 		for(j=0; j<4; j++)
3042 			expandedKey[i*4+j] = W[rounds][j][i];
3043 	for(; i<KC; i++)
3044 		for(j=0; j<4; j++)
3045 			expandedKey[i*4+j] = W[rounds-1][j][i+MAXBC-KC];
3046 
3047 	return 0;
3048 }
3049 
3050 /*
3051  * Clear cipher/mac operation state
3052  */
3053 INLINE void
mvxpsec_packet_reset_op(struct mvxpsec_packet * mv_p)3054 mvxpsec_packet_reset_op(struct mvxpsec_packet *mv_p)
3055 {
3056 	mv_p->pkt_header.desc.acc_config = 0;
3057 	mv_p->enc_off = mv_p->enc_ivoff = mv_p->enc_len = 0;
3058 	mv_p->mac_off = mv_p->mac_dst = mv_p->mac_len = 0;
3059 }
3060 
3061 /*
3062  * update MVXPSEC operation order
3063  */
3064 INLINE void
mvxpsec_packet_update_op_order(struct mvxpsec_packet * mv_p,int op)3065 mvxpsec_packet_update_op_order(struct mvxpsec_packet *mv_p, int op)
3066 {
3067 	struct mvxpsec_acc_descriptor *acc_desc = &mv_p->pkt_header.desc;
3068 	uint32_t cur_op = acc_desc->acc_config & MV_ACC_CRYPTO_OP_MASK;
3069 
3070 	KASSERT(op == MV_ACC_CRYPTO_OP_MAC || op == MV_ACC_CRYPTO_OP_ENC);
3071 	KASSERT((op & MV_ACC_CRYPTO_OP_MASK) == op);
3072 
3073 	if (cur_op == 0)
3074 		acc_desc->acc_config |= op;
3075 	else if (cur_op == MV_ACC_CRYPTO_OP_MAC && op == MV_ACC_CRYPTO_OP_ENC) {
3076 		acc_desc->acc_config &= ~MV_ACC_CRYPTO_OP_MASK;
3077 		acc_desc->acc_config |= MV_ACC_CRYPTO_OP_MACENC;
3078 		/* MAC then ENC (= decryption) */
3079 	}
3080 	else if (cur_op == MV_ACC_CRYPTO_OP_ENC && op == MV_ACC_CRYPTO_OP_MAC) {
3081 		acc_desc->acc_config &= ~MV_ACC_CRYPTO_OP_MASK;
3082 		acc_desc->acc_config |= MV_ACC_CRYPTO_OP_ENCMAC;
3083 		/* ENC then MAC (= encryption) */
3084 	}
3085 	else {
3086 		log(LOG_ERR, "%s: multiple %s algorithm is not supported.\n",
3087 		    __func__,
3088 		    (op == MV_ACC_CRYPTO_OP_ENC) ?  "encryption" : "authentication");
3089 	}
3090 }
3091 
3092 /*
3093  * Parameter Conversions
3094  */
3095 INLINE uint32_t
mvxpsec_alg2acc(uint32_t alg)3096 mvxpsec_alg2acc(uint32_t alg)
3097 {
3098 	uint32_t reg;
3099 
3100 	switch (alg) {
3101 	case CRYPTO_DES_CBC:
3102 		reg = MV_ACC_CRYPTO_ENC_DES;
3103 		reg |= MV_ACC_CRYPTO_CBC;
3104 		break;
3105 	case CRYPTO_3DES_CBC:
3106 		reg = MV_ACC_CRYPTO_ENC_3DES;
3107 		reg |= MV_ACC_CRYPTO_3DES_EDE;
3108 		reg |= MV_ACC_CRYPTO_CBC;
3109 		break;
3110 	case CRYPTO_AES_CBC:
3111 		reg = MV_ACC_CRYPTO_ENC_AES;
3112 		reg |= MV_ACC_CRYPTO_CBC;
3113 		break;
3114 	case CRYPTO_SHA1_HMAC_96:
3115 		reg = MV_ACC_CRYPTO_MAC_HMAC_SHA1;
3116 		reg |= MV_ACC_CRYPTO_MAC_96;
3117 		break;
3118 	case CRYPTO_MD5_HMAC_96:
3119 		reg = MV_ACC_CRYPTO_MAC_HMAC_MD5;
3120 		reg |= MV_ACC_CRYPTO_MAC_96;
3121 		break;
3122 	default:
3123 		reg = 0;
3124 		break;
3125 	}
3126 
3127 	return reg;
3128 }
3129 
3130 INLINE uint32_t
mvxpsec_aesklen(int klen)3131 mvxpsec_aesklen(int klen)
3132 {
3133 	if (klen < 128)
3134 		return 0;
3135 	else if (klen < 192)
3136 		return MV_ACC_CRYPTO_AES_KLEN_128;
3137 	else if (klen < 256)
3138 		return MV_ACC_CRYPTO_AES_KLEN_192;
3139 	else
3140 		return MV_ACC_CRYPTO_AES_KLEN_256;
3141 
3142 	return 0;
3143 }
3144 
3145 /*
3146  * String Conversions
3147  */
3148 STATIC const char *
s_errreg(uint32_t v)3149 s_errreg(uint32_t v)
3150 {
3151 	static char buf[80];
3152 
3153 	snprintf(buf, sizeof(buf),
3154 	    "%sMiss %sDoubleHit %sBothHit %sDataError",
3155 	    (v & MV_TDMA_ERRC_MISS) ? "+" : "-",
3156 	    (v & MV_TDMA_ERRC_DHIT) ? "+" : "-",
3157 	    (v & MV_TDMA_ERRC_BHIT) ? "+" : "-",
3158 	    (v & MV_TDMA_ERRC_DERR) ? "+" : "-");
3159 
3160 	return (const char *)buf;
3161 }
3162 
3163 STATIC const char *
s_winreg(uint32_t v)3164 s_winreg(uint32_t v)
3165 {
3166 	static char buf[80];
3167 
3168 	snprintf(buf, sizeof(buf),
3169 	    "%s TGT 0x%x ATTR 0x%02x size %u(0x%04x)[64KB]",
3170 	    (v & MV_TDMA_ATTR_ENABLE) ? "EN" : "DIS",
3171 	    MV_TDMA_ATTR_GET_TARGET(v), MV_TDMA_ATTR_GET_ATTR(v),
3172 	    MV_TDMA_ATTR_GET_SIZE(v), MV_TDMA_ATTR_GET_SIZE(v));
3173 
3174 	return (const char *)buf;
3175 }
3176 
3177 STATIC const char *
s_ctrlreg(uint32_t reg)3178 s_ctrlreg(uint32_t reg)
3179 {
3180 	static char buf[80];
3181 
3182 	snprintf(buf, sizeof(buf),
3183 	    "%s: %sFETCH DBURST-%u SBURST-%u %sOUTS %sCHAIN %sBSWAP %sACT",
3184 	    (reg & MV_TDMA_CONTROL_ENABLE) ? "ENABLE" : "DISABLE",
3185 	    (reg & MV_TDMA_CONTROL_FETCH) ? "+" : "-",
3186 	    MV_TDMA_CONTROL_GET_DST_BURST(reg),
3187 	    MV_TDMA_CONTROL_GET_SRC_BURST(reg),
3188 	    (reg & MV_TDMA_CONTROL_OUTS_EN) ? "+" : "-",
3189 	    (reg & MV_TDMA_CONTROL_CHAIN_DIS) ? "-" : "+",
3190 	    (reg & MV_TDMA_CONTROL_BSWAP_DIS) ? "-" : "+",
3191 	    (reg & MV_TDMA_CONTROL_ACT) ? "+" : "-");
3192 
3193 	return (const char *)buf;
3194 }
3195 
3196 _STATIC const char *
s_xpsecintr(uint32_t v)3197 s_xpsecintr(uint32_t v)
3198 {
3199 	static char buf[160];
3200 
3201 	snprintf(buf, sizeof(buf),
3202 	    "%sAuth %sDES %sAES-ENC %sAES-DEC %sENC %sSA %sAccAndTDMA "
3203 	    "%sTDMAComp %sTDMAOwn %sAccAndTDMA_Cont",
3204 	    (v & MVXPSEC_INT_AUTH) ? "+" : "-",
3205 	    (v & MVXPSEC_INT_DES) ? "+" : "-",
3206 	    (v & MVXPSEC_INT_AES_ENC) ? "+" : "-",
3207 	    (v & MVXPSEC_INT_AES_DEC) ? "+" : "-",
3208 	    (v & MVXPSEC_INT_ENC) ? "+" : "-",
3209 	    (v & MVXPSEC_INT_SA) ? "+" : "-",
3210 	    (v & MVXPSEC_INT_ACCTDMA) ? "+" : "-",
3211 	    (v & MVXPSEC_INT_TDMA_COMP) ? "+" : "-",
3212 	    (v & MVXPSEC_INT_TDMA_OWN) ? "+" : "-",
3213 	    (v & MVXPSEC_INT_ACCTDMA_CONT) ? "+" : "-");
3214 
3215 	return (const char *)buf;
3216 }
3217 
3218 STATIC const char *
s_ctlalg(uint32_t alg)3219 s_ctlalg(uint32_t alg)
3220 {
3221 	switch (alg) {
3222 	case CRYPTO_SHA1_HMAC_96:
3223 		return "HMAC-SHA1-96";
3224 	case CRYPTO_SHA1_HMAC:
3225 		return "HMAC-SHA1";
3226 	case CRYPTO_SHA1:
3227 		return "SHA1";
3228 	case CRYPTO_MD5_HMAC_96:
3229 		return "HMAC-MD5-96";
3230 	case CRYPTO_MD5_HMAC:
3231 		return "HMAC-MD5";
3232 	case CRYPTO_MD5:
3233 		return "MD5";
3234 	case CRYPTO_DES_CBC:
3235 		return "DES-CBC";
3236 	case CRYPTO_3DES_CBC:
3237 		return "3DES-CBC";
3238 	case CRYPTO_AES_CBC:
3239 		return "AES-CBC";
3240 	default:
3241 		break;
3242 	}
3243 
3244 	return "Unknown";
3245 }
3246 
3247 STATIC const char *
s_xpsec_op(uint32_t reg)3248 s_xpsec_op(uint32_t reg)
3249 {
3250 	reg &= MV_ACC_CRYPTO_OP_MASK;
3251 	switch (reg) {
3252 	case MV_ACC_CRYPTO_OP_ENC:
3253 		return "ENC";
3254 	case MV_ACC_CRYPTO_OP_MAC:
3255 		return "MAC";
3256 	case MV_ACC_CRYPTO_OP_ENCMAC:
3257 		return "ENC-MAC";
3258 	case MV_ACC_CRYPTO_OP_MACENC:
3259 		return "MAC-ENC";
3260 	default:
3261 		break;
3262 	}
3263 
3264 	return "Unknown";
3265 }
3266 
3267 STATIC const char *
s_xpsec_enc(uint32_t alg)3268 s_xpsec_enc(uint32_t alg)
3269 {
3270 	alg <<= MV_ACC_CRYPTO_ENC_SHIFT;
3271 	switch (alg) {
3272 	case MV_ACC_CRYPTO_ENC_DES:
3273 		return "DES";
3274 	case MV_ACC_CRYPTO_ENC_3DES:
3275 		return "3DES";
3276 	case MV_ACC_CRYPTO_ENC_AES:
3277 		return "AES";
3278 	default:
3279 		break;
3280 	}
3281 
3282 	return "Unknown";
3283 }
3284 
3285 STATIC const char *
s_xpsec_mac(uint32_t alg)3286 s_xpsec_mac(uint32_t alg)
3287 {
3288 	alg <<= MV_ACC_CRYPTO_MAC_SHIFT;
3289 	switch (alg) {
3290 	case MV_ACC_CRYPTO_MAC_NONE:
3291 		return "Disabled";
3292 	case MV_ACC_CRYPTO_MAC_MD5:
3293 		return "MD5";
3294 	case MV_ACC_CRYPTO_MAC_SHA1:
3295 		return "SHA1";
3296 	case MV_ACC_CRYPTO_MAC_HMAC_MD5:
3297 		return "HMAC-MD5";
3298 	case MV_ACC_CRYPTO_MAC_HMAC_SHA1:
3299 		return "HMAC-SHA1";
3300 	default:
3301 		break;
3302 	}
3303 
3304 	return "Unknown";
3305 }
3306 
3307 STATIC const char *
s_xpsec_frag(uint32_t frag)3308 s_xpsec_frag(uint32_t frag)
3309 {
3310 	frag <<= MV_ACC_CRYPTO_FRAG_SHIFT;
3311 	switch (frag) {
3312 	case MV_ACC_CRYPTO_NOFRAG:
3313 		return "NoFragment";
3314 	case MV_ACC_CRYPTO_FRAG_FIRST:
3315 		return "FirstFragment";
3316 	case MV_ACC_CRYPTO_FRAG_MID:
3317 		return "MiddleFragment";
3318 	case MV_ACC_CRYPTO_FRAG_LAST:
3319 		return "LastFragment";
3320 	default:
3321 		break;
3322 	}
3323 
3324 	return "Unknown";
3325 }
3326 
3327 #ifdef MVXPSEC_DEBUG
3328 void
mvxpsec_dump_reg(struct mvxpsec_softc * sc)3329 mvxpsec_dump_reg(struct mvxpsec_softc *sc)
3330 {
3331 	uint32_t reg;
3332 	int i;
3333 
3334 	if ((mvxpsec_debug & MVXPSEC_DEBUG_DESC) == 0)
3335 		return;
3336 
3337 	printf("--- Interrupt Registers ---\n");
3338 	reg = MVXPSEC_READ(sc, MVXPSEC_INT_CAUSE);
3339 	printf("MVXPSEC INT CAUSE: 0x%08x\n", reg);
3340 	printf("MVXPSEC INT CAUSE: %s\n", s_xpsecintr(reg));
3341 	reg = MVXPSEC_READ(sc, MVXPSEC_INT_MASK);
3342 	printf("MVXPSEC INT MASK: 0x%08x\n", reg);
3343 	printf("MVXPSEC INT MASKE: %s\n", s_xpsecintr(reg));
3344 
3345 	printf("--- DMA Configuration Registers ---\n");
3346 	for (i = 0; i < MV_TDMA_NWINDOW; i++) {
3347 		reg = MVXPSEC_READ(sc, MV_TDMA_BAR(i));
3348 		printf("TDMA BAR%d: 0x%08x\n", i, reg);
3349 		reg = MVXPSEC_READ(sc, MV_TDMA_ATTR(i));
3350 		printf("TDMA ATTR%d: 0x%08x\n", i, reg);
3351 		printf("  -> %s\n", s_winreg(reg));
3352 	}
3353 
3354 	printf("--- DMA Control Registers ---\n");
3355 
3356 	reg = MVXPSEC_READ(sc, MV_TDMA_CONTROL);
3357 	printf("TDMA CONTROL: 0x%08x\n", reg);
3358 	printf("  -> %s\n", s_ctrlreg(reg));
3359 
3360 	printf("--- DMA Current Command Descriptors ---\n");
3361 
3362 	reg = MVXPSEC_READ(sc, MV_TDMA_ERR_CAUSE);
3363 	printf("TDMA ERR CAUSE: 0x%08x\n", reg);
3364 
3365 	reg = MVXPSEC_READ(sc, MV_TDMA_ERR_MASK);
3366 	printf("TDMA ERR MASK: 0x%08x\n", reg);
3367 
3368 	reg = MVXPSEC_READ(sc, MV_TDMA_CNT);
3369 	printf("TDMA DATA OWNER: %s\n",
3370 	    (reg & MV_TDMA_CNT_OWN) ? "DMAC" : "CPU");
3371 	printf("TDMA DATA COUNT: %d(0x%x)\n",
3372 	    (reg & ~MV_TDMA_CNT_OWN), (reg & ~MV_TDMA_CNT_OWN));
3373 
3374 	reg = MVXPSEC_READ(sc, MV_TDMA_SRC);
3375 	printf("TDMA DATA SRC: 0x%08x\n", reg);
3376 
3377 	reg = MVXPSEC_READ(sc, MV_TDMA_DST);
3378 	printf("TDMA DATA DST: 0x%08x\n", reg);
3379 
3380 	reg = MVXPSEC_READ(sc, MV_TDMA_NXT);
3381 	printf("TDMA DATA NXT: 0x%08x\n", reg);
3382 
3383 	reg = MVXPSEC_READ(sc, MV_TDMA_CUR);
3384 	printf("TDMA DATA CUR: 0x%08x\n", reg);
3385 
3386 	printf("--- ACC Command Register ---\n");
3387 	reg = MVXPSEC_READ(sc, MV_ACC_COMMAND);
3388 	printf("ACC COMMAND: 0x%08x\n", reg);
3389 	printf("ACC: %sACT %sSTOP\n",
3390 	    (reg & MV_ACC_COMMAND_ACT) ? "+" : "-",
3391 	    (reg & MV_ACC_COMMAND_STOP) ? "+" : "-");
3392 
3393 	reg = MVXPSEC_READ(sc, MV_ACC_CONFIG);
3394 	printf("ACC CONFIG: 0x%08x\n", reg);
3395 	reg = MVXPSEC_READ(sc, MV_ACC_DESC);
3396 	printf("ACC DESC: 0x%08x\n", reg);
3397 
3398 	printf("--- DES Key Register ---\n");
3399 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY0L);
3400 	printf("DES KEY0  Low: 0x%08x\n", reg);
3401 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY0H);
3402 	printf("DES KEY0 High: 0x%08x\n", reg);
3403 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY1L);
3404 	printf("DES KEY1  Low: 0x%08x\n", reg);
3405 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY1H);
3406 	printf("DES KEY1 High: 0x%08x\n", reg);
3407 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY2L);
3408 	printf("DES KEY2  Low: 0x%08x\n", reg);
3409 	reg = MVXPSEC_READ(sc, MV_CE_DES_KEY2H);
3410 	printf("DES KEY2 High: 0x%08x\n", reg);
3411 
3412 	printf("--- AES Key Register ---\n");
3413 	for (i = 0; i < 8; i++) {
3414 		reg = MVXPSEC_READ(sc, MV_CE_AES_EKEY(i));
3415 		printf("AES ENC KEY COL%d: %08x\n", i, reg);
3416 	}
3417 	for (i = 0; i < 8; i++) {
3418 		reg = MVXPSEC_READ(sc, MV_CE_AES_DKEY(i));
3419 		printf("AES DEC KEY COL%d: %08x\n", i, reg);
3420 	}
3421 
3422 	return;
3423 }
3424 
3425 STATIC void
mvxpsec_dump_sram(const char * name,struct mvxpsec_softc * sc,size_t len)3426 mvxpsec_dump_sram(const char *name, struct mvxpsec_softc *sc, size_t len)
3427 {
3428 	uint32_t reg;
3429 
3430 	if (sc->sc_sram_va == NULL)
3431 		return;
3432 
3433 	if (len == 0) {
3434 		printf("\n%s NO DATA(len=0)\n", name);
3435 		return;
3436 	}
3437 	else if (len > MV_ACC_SRAM_SIZE)
3438 		len = MV_ACC_SRAM_SIZE;
3439 
3440 	mutex_enter(&sc->sc_dma_mtx);
3441 	reg = MVXPSEC_READ(sc, MV_TDMA_CONTROL);
3442 	if (reg & MV_TDMA_CONTROL_ACT) {
3443 		printf("TDMA is active, cannot access SRAM\n");
3444 		mutex_exit(&sc->sc_dma_mtx);
3445 		return;
3446 	}
3447 	reg = MVXPSEC_READ(sc, MV_ACC_COMMAND);
3448 	if (reg & MV_ACC_COMMAND_ACT) {
3449 		printf("SA is active, cannot access SRAM\n");
3450 		mutex_exit(&sc->sc_dma_mtx);
3451 		return;
3452 	}
3453 
3454 	printf("%s: dump SRAM, %zu bytes\n", name, len);
3455 	mvxpsec_dump_data(name, sc->sc_sram_va, len);
3456 	mutex_exit(&sc->sc_dma_mtx);
3457 	return;
3458 }
3459 
3460 
3461 _STATIC void
mvxpsec_dump_dmaq(struct mvxpsec_descriptor_handle * dh)3462 mvxpsec_dump_dmaq(struct mvxpsec_descriptor_handle *dh)
3463 {
3464 	struct mvxpsec_descriptor *d =
3465            (struct mvxpsec_descriptor *)dh->_desc;
3466 
3467 	printf("--- DMA Command Descriptor ---\n");
3468 	printf("DESC: VA=%p PA=0x%08x\n",
3469 	    d, (uint32_t)dh->phys_addr);
3470 	printf("DESC: WORD0 = 0x%08x\n", d->tdma_word0);
3471 	printf("DESC: SRC = 0x%08x\n", d->tdma_src);
3472 	printf("DESC: DST = 0x%08x\n", d->tdma_dst);
3473 	printf("DESC: NXT = 0x%08x\n", d->tdma_nxt);
3474 
3475 	return;
3476 }
3477 
3478 STATIC void
mvxpsec_dump_data(const char * name,void * p,size_t len)3479 mvxpsec_dump_data(const char *name, void *p, size_t len)
3480 {
3481 	uint8_t *data = p;
3482 	off_t off;
3483 
3484 	printf("%s: dump %p, %zu bytes", name, p, len);
3485 	if (p == NULL || len == 0) {
3486 		printf("\n%s: NO DATA\n", name);
3487 		return;
3488 	}
3489 	for (off = 0; off < len; off++) {
3490 		if ((off % 16) == 0) {
3491 			printf("\n%s: 0x%08x:", name, (uint32_t)off);
3492 		}
3493 		if ((off % 4) == 0) {
3494 			printf(" ");
3495 		}
3496 		printf("%02x", data[off]);
3497 	}
3498 	printf("\n");
3499 
3500 	return;
3501 }
3502 
3503 _STATIC void
mvxpsec_dump_packet(const char * name,struct mvxpsec_packet * mv_p)3504 mvxpsec_dump_packet(const char *name, struct mvxpsec_packet *mv_p)
3505 {
3506 	struct mvxpsec_softc *sc = mv_p->mv_s->sc;
3507 
3508 	printf("%s: packet_data:\n", name);
3509 	mvxpsec_dump_packet_data(name, mv_p);
3510 
3511 	printf("%s: SRAM:\n", name);
3512 	mvxpsec_dump_sram(name, sc, 2000);
3513 
3514 	printf("%s: packet_descriptor:\n", name);
3515 	mvxpsec_dump_packet_desc(name, mv_p);
3516 }
3517 
3518 _STATIC void
mvxpsec_dump_packet_data(const char * name,struct mvxpsec_packet * mv_p)3519 mvxpsec_dump_packet_data(const char *name, struct mvxpsec_packet *mv_p)
3520 {
3521 	static char buf[1500];
3522 	int len;
3523 
3524 	if (mv_p->data_type == MVXPSEC_DATA_MBUF) {
3525 		struct mbuf *m;
3526 
3527 		m = mv_p->data.mbuf;
3528 		len = m->m_pkthdr.len;
3529 		if (len > sizeof(buf))
3530 			len = sizeof(buf);
3531 		m_copydata(m, 0, len, buf);
3532 	}
3533 	else if (mv_p->data_type == MVXPSEC_DATA_UIO) {
3534 		struct uio *uio;
3535 
3536 		uio = mv_p->data.uio;
3537 		len = uio->uio_resid;
3538 		if (len > sizeof(buf))
3539 			len = sizeof(buf);
3540 		cuio_copydata(uio, 0, len, buf);
3541 	}
3542 	else if (mv_p->data_type == MVXPSEC_DATA_RAW) {
3543 		len = mv_p->data_len;
3544 		if (len > sizeof(buf))
3545 			len = sizeof(buf);
3546 		memcpy(buf, mv_p->data.raw, len);
3547 	}
3548 	else
3549 		return;
3550 	mvxpsec_dump_data(name, buf, len);
3551 
3552 	return;
3553 }
3554 
3555 _STATIC void
mvxpsec_dump_packet_desc(const char * name,struct mvxpsec_packet * mv_p)3556 mvxpsec_dump_packet_desc(const char *name, struct mvxpsec_packet *mv_p)
3557 {
3558 	uint32_t *words;
3559 
3560 	if (mv_p == NULL)
3561 		return;
3562 
3563 	words = &mv_p->pkt_header.desc.acc_desc_dword0;
3564 	mvxpsec_dump_acc_config(name, words[0]);
3565 	mvxpsec_dump_acc_encdata(name, words[1], words[2]);
3566 	mvxpsec_dump_acc_enclen(name, words[2]);
3567 	mvxpsec_dump_acc_enckey(name, words[3]);
3568 	mvxpsec_dump_acc_enciv(name, words[4]);
3569 	mvxpsec_dump_acc_macsrc(name, words[5]);
3570 	mvxpsec_dump_acc_macdst(name, words[6]);
3571 	mvxpsec_dump_acc_maciv(name, words[7]);
3572 
3573 	return;
3574 }
3575 
3576 _STATIC void
mvxpsec_dump_acc_config(const char * name,uint32_t w)3577 mvxpsec_dump_acc_config(const char *name, uint32_t w)
3578 {
3579 	/* SA: Dword 0 */
3580 	printf("%s: Dword0=0x%08x\n", name, w);
3581 	printf("%s:   OP = %s\n", name,
3582 	    s_xpsec_op(MV_ACC_CRYPTO_OP(w)));
3583 	printf("%s:   MAC = %s\n", name,
3584 	    s_xpsec_mac(MV_ACC_CRYPTO_MAC(w)));
3585 	printf("%s:   MAC_LEN = %s\n", name,
3586 	    w & MV_ACC_CRYPTO_MAC_96 ? "96-bit" : "full-bit");
3587 	printf("%s:   ENC = %s\n", name,
3588 	    s_xpsec_enc(MV_ACC_CRYPTO_ENC(w)));
3589 	printf("%s:   DIR = %s\n", name,
3590 	    w & MV_ACC_CRYPTO_DECRYPT ? "decryption" : "encryption");
3591 	printf("%s:   CHAIN = %s\n", name,
3592 	    w & MV_ACC_CRYPTO_CBC ? "CBC" : "ECB");
3593 	printf("%s:   3DES = %s\n", name,
3594 	    w & MV_ACC_CRYPTO_3DES_EDE ? "EDE" : "EEE");
3595 	printf("%s:   FRAGMENT = %s\n", name,
3596 	    s_xpsec_frag(MV_ACC_CRYPTO_FRAG(w)));
3597 	return;
3598 }
3599 
3600 STATIC void
mvxpsec_dump_acc_encdata(const char * name,uint32_t w,uint32_t w2)3601 mvxpsec_dump_acc_encdata(const char *name, uint32_t w, uint32_t w2)
3602 {
3603 	/* SA: Dword 1 */
3604 	printf("%s: Dword1=0x%08x\n", name, w);
3605 	printf("%s:   ENC SRC = 0x%x\n", name, MV_ACC_DESC_GET_VAL_1(w));
3606 	printf("%s:   ENC DST = 0x%x\n", name, MV_ACC_DESC_GET_VAL_2(w));
3607 	printf("%s:   ENC RANGE = 0x%x - 0x%x\n", name,
3608 	    MV_ACC_DESC_GET_VAL_1(w),
3609 	    MV_ACC_DESC_GET_VAL_1(w) + MV_ACC_DESC_GET_VAL_1(w2) - 1);
3610 	return;
3611 }
3612 
3613 STATIC void
mvxpsec_dump_acc_enclen(const char * name,uint32_t w)3614 mvxpsec_dump_acc_enclen(const char *name, uint32_t w)
3615 {
3616 	/* SA: Dword 2 */
3617 	printf("%s: Dword2=0x%08x\n", name, w);
3618 	printf("%s:   ENC LEN = %d\n", name,
3619 	    MV_ACC_DESC_GET_VAL_1(w));
3620 	return;
3621 }
3622 
3623 STATIC void
mvxpsec_dump_acc_enckey(const char * name,uint32_t w)3624 mvxpsec_dump_acc_enckey(const char *name, uint32_t w)
3625 {
3626 	/* SA: Dword 3 */
3627 	printf("%s: Dword3=0x%08x\n", name, w);
3628 	printf("%s:   EKEY = 0x%x\n", name,
3629 	    MV_ACC_DESC_GET_VAL_1(w));
3630 	return;
3631 }
3632 
3633 STATIC void
mvxpsec_dump_acc_enciv(const char * name,uint32_t w)3634 mvxpsec_dump_acc_enciv(const char *name, uint32_t w)
3635 {
3636 	/* SA: Dword 4 */
3637 	printf("%s: Dword4=0x%08x\n", name, w);
3638 	printf("%s:   EIV = 0x%x\n", name, MV_ACC_DESC_GET_VAL_1(w));
3639 	printf("%s:   EIV_BUF = 0x%x\n", name, MV_ACC_DESC_GET_VAL_2(w));
3640 	return;
3641 }
3642 
3643 STATIC void
mvxpsec_dump_acc_macsrc(const char * name,uint32_t w)3644 mvxpsec_dump_acc_macsrc(const char *name, uint32_t w)
3645 {
3646 	/* SA: Dword 5 */
3647 	printf("%s: Dword5=0x%08x\n", name, w);
3648 	printf("%s:   MAC_SRC = 0x%x\n", name,
3649 	    MV_ACC_DESC_GET_VAL_1(w));
3650 	printf("%s:   MAC_TOTAL_LEN = %d\n", name,
3651 	    MV_ACC_DESC_GET_VAL_3(w));
3652 	printf("%s:   MAC_RANGE = 0x%0x - 0x%0x\n", name,
3653 	    MV_ACC_DESC_GET_VAL_1(w),
3654 	    MV_ACC_DESC_GET_VAL_1(w) + MV_ACC_DESC_GET_VAL_3(w) - 1);
3655 	return;
3656 }
3657 
3658 STATIC void
mvxpsec_dump_acc_macdst(const char * name,uint32_t w)3659 mvxpsec_dump_acc_macdst(const char *name, uint32_t w)
3660 {
3661 	/* SA: Dword 6 */
3662 	printf("%s: Dword6=0x%08x\n", name, w);
3663 	printf("%s:   MAC_DST = 0x%x\n", name, MV_ACC_DESC_GET_VAL_1(w));
3664 	printf("%s:   MAC_BLOCK_LEN = %d\n", name,
3665 	    MV_ACC_DESC_GET_VAL_2(w));
3666 	return;
3667 }
3668 
3669 STATIC void
mvxpsec_dump_acc_maciv(const char * name,uint32_t w)3670 mvxpsec_dump_acc_maciv(const char *name, uint32_t w)
3671 {
3672 	/* SA: Dword 7 */
3673 	printf("%s: Dword7=0x%08x\n", name, w);
3674 	printf("%s:   MAC_INNER_IV = 0x%x\n", name,
3675 	    MV_ACC_DESC_GET_VAL_1(w));
3676 	printf("%s:   MAC_OUTER_IV = 0x%x\n", name,
3677 	    MV_ACC_DESC_GET_VAL_2(w));
3678 	return;
3679 }
3680 #endif
3681