xref: /netbsd-src/sys/dev/marvell/gtidmac.c (revision 5bbd2a12505d72a8177929a37b5cee489d0a1cfd)
1 /*	$NetBSD: gtidmac.c,v 1.8 2012/07/23 06:09:47 kiyohara Exp $	*/
2 /*
3  * Copyright (c) 2008, 2012 KIYOHARA Takashi
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: gtidmac.c,v 1.8 2012/07/23 06:09:47 kiyohara Exp $");
30 
31 #include <sys/param.h>
32 #include <sys/bus.h>
33 #include <sys/device.h>
34 #include <sys/errno.h>
35 #include <sys/endian.h>
36 #include <sys/kmem.h>
37 
38 #include <uvm/uvm_param.h>	/* For PAGE_SIZE */
39 
40 #include <dev/dmover/dmovervar.h>
41 
42 #include <dev/marvell/gtidmacreg.h>
43 #include <dev/marvell/gtidmacvar.h>
44 #include <dev/marvell/marvellreg.h>
45 #include <dev/marvell/marvellvar.h>
46 
47 #include <prop/proplib.h>
48 
49 #include "locators.h"
50 
51 #ifdef GTIDMAC_DEBUG
52 #define DPRINTF(x)	if (gtidmac_debug) printf x
53 int gtidmac_debug = 0;
54 #else
55 #define DPRINTF(x)
56 #endif
57 
58 #define GTIDMAC_NDESC		64
59 #define GTIDMAC_MAXCHAN		8
60 #define MVXORE_NDESC		128
61 #define MVXORE_MAXCHAN		2
62 
63 #define GTIDMAC_NSEGS		((GTIDMAC_MAXXFER + PAGE_SIZE - 1) / PAGE_SIZE)
64 #define MVXORE_NSEGS		((MVXORE_MAXXFER + PAGE_SIZE - 1) / PAGE_SIZE)
65 
66 
67 struct gtidmac_softc;
68 
69 struct gtidmac_function {
70 	int (*chan_alloc)(void *, bus_dmamap_t **, bus_dmamap_t **, void *);
71 	void (*chan_free)(void *, int);
72 	int (*dma_setup)(void *, int, int, bus_dmamap_t *, bus_dmamap_t *,
73 			 bus_size_t);
74 	void (*dma_start)(void *, int,
75 			  void (*dma_done_cb)(void *, int, bus_dmamap_t *,
76 						      bus_dmamap_t *, int));
77 	uint32_t (*dma_finish)(void *, int, int);
78 };
79 
80 struct gtidmac_dma_desc {
81 	int dd_index;
82 	union {
83 		struct gtidmac_desc *idmac_vaddr;
84 		struct mvxore_desc *xore_vaddr;
85 	} dd_vaddr;
86 #define dd_idmac_vaddr	dd_vaddr.idmac_vaddr
87 #define dd_xore_vaddr	dd_vaddr.xore_vaddr
88 	paddr_t dd_paddr;
89 	SLIST_ENTRY(gtidmac_dma_desc) dd_next;
90 };
91 
92 struct gtidmac_softc {
93 	device_t sc_dev;
94 
95 	bus_space_tag_t sc_iot;
96 	bus_space_handle_t sc_ioh;
97 
98 	bus_dma_tag_t sc_dmat;
99 	struct gtidmac_dma_desc *sc_dd_buffer;
100 	bus_dma_segment_t sc_pattern_segment;
101 	struct {
102 		u_char pbuf[16];	/* 16byte/pattern */
103 	} *sc_pbuf;			/*   x256 pattern */
104 
105 	int sc_gtidmac_nchan;
106 	struct gtidmac_desc *sc_dbuf;
107 	bus_dmamap_t sc_dmap;
108 	SLIST_HEAD(, gtidmac_dma_desc) sc_dlist;
109 	struct {
110 		bus_dmamap_t chan_in;		/* In dmamap */
111 		bus_dmamap_t chan_out;		/* Out dmamap */
112 		uint64_t chan_totalcnt;		/* total transfered byte */
113 		int chan_ddidx;
114 		void *chan_running;		/* opaque object data */
115 		void (*chan_dma_done)(void *, int, bus_dmamap_t *,
116 				      bus_dmamap_t *, int);
117 	} sc_cdesc[GTIDMAC_MAXCHAN];
118 	struct gtidmac_intr_arg {
119 		struct gtidmac_softc *ia_sc;
120 		uint32_t ia_cause;
121 		uint32_t ia_mask;
122 		uint32_t ia_eaddr;
123 		uint32_t ia_eselect;
124 	} sc_intrarg[GTIDMAC_NINTRRUPT];
125 
126 	int sc_mvxore_nchan;
127 	struct mvxore_desc *sc_dbuf_xore;
128 	bus_dmamap_t sc_dmap_xore;
129 	SLIST_HEAD(, gtidmac_dma_desc) sc_dlist_xore;
130 	struct {
131 		bus_dmamap_t chan_in[MVXORE_NSRC];	/* In dmamap */
132 		bus_dmamap_t chan_out;			/* Out dmamap */
133 		uint64_t chan_totalcnt;			/* total transfered */
134 		int chan_ddidx;
135 		void *chan_running;			/* opaque object data */
136 		void (*chan_dma_done)(void *, int, bus_dmamap_t *,
137 				      bus_dmamap_t *, int);
138 	} sc_cdesc_xore[MVXORE_MAXCHAN];
139 
140 	struct dmover_backend sc_dmb;
141 	struct dmover_backend sc_dmb_xore;
142 	int sc_dmb_busy;
143 };
144 struct gtidmac_softc *gtidmac_softc = NULL;
145 
146 static int gtidmac_match(device_t, struct cfdata *, void *);
147 static void gtidmac_attach(device_t, device_t, void *);
148 
149 static int gtidmac_intr(void *);
150 static int mvxore_port0_intr(void *);
151 static int mvxore_port1_intr(void *);
152 static int mvxore_intr(struct gtidmac_softc *, int);
153 
154 static void gtidmac_process(struct dmover_backend *);
155 static void gtidmac_dmover_run(struct dmover_backend *);
156 static void gtidmac_dmover_done(void *, int, bus_dmamap_t *, bus_dmamap_t *,
157 				int);
158 __inline int gtidmac_dmmap_load(struct gtidmac_softc *, bus_dmamap_t,
159 				dmover_buffer_type, dmover_buffer *, int);
160 __inline void gtidmac_dmmap_unload(struct gtidmac_softc *, bus_dmamap_t, int);
161 
162 static uint32_t gtidmac_finish(void *, int, int);
163 static uint32_t mvxore_finish(void *, int, int);
164 
165 static void gtidmac_wininit(struct gtidmac_softc *);
166 static void mvxore_wininit(struct gtidmac_softc *);
167 
168 static int gtidmac_buffer_setup(struct gtidmac_softc *);
169 static int mvxore_buffer_setup(struct gtidmac_softc *);
170 
171 #ifdef GTIDMAC_DEBUG
172 static void gtidmac_dump_idmacreg(struct gtidmac_softc *, int);
173 static void gtidmac_dump_idmacdesc(struct gtidmac_softc *,
174 				   struct gtidmac_dma_desc *, uint32_t, int);
175 static void gtidmac_dump_xorereg(struct gtidmac_softc *, int);
176 static void gtidmac_dump_xoredesc(struct gtidmac_softc *,
177 				  struct gtidmac_dma_desc *, uint32_t, int);
178 #endif
179 
180 
181 static struct gtidmac_function gtidmac_functions = {
182 	.chan_alloc = gtidmac_chan_alloc,
183 	.chan_free = gtidmac_chan_free,
184 	.dma_setup = gtidmac_setup,
185 	.dma_start = gtidmac_start,
186 	.dma_finish = gtidmac_finish,
187 };
188 
189 static struct gtidmac_function mvxore_functions = {
190 	.chan_alloc = mvxore_chan_alloc,
191 	.chan_free = mvxore_chan_free,
192 	.dma_setup = mvxore_setup,
193 	.dma_start = mvxore_start,
194 	.dma_finish = mvxore_finish,
195 };
196 
197 static const struct dmover_algdesc gtidmac_algdescs[] = {
198 	{
199 		.dad_name = DMOVER_FUNC_ZERO,
200 		.dad_data = &gtidmac_functions,
201 		.dad_ninputs = 0
202 	},
203 	{
204 		.dad_name = DMOVER_FUNC_FILL8,
205 		.dad_data = &gtidmac_functions,
206 		.dad_ninputs = 0
207 	},
208 	{
209 		.dad_name = DMOVER_FUNC_COPY,
210 		.dad_data = &gtidmac_functions,
211 		.dad_ninputs = 1
212 	},
213 };
214 
215 static const struct dmover_algdesc mvxore_algdescs[] = {
216 #if 0
217 	/*
218 	 * As for these operations, there are a lot of restrictions.  It is
219 	 * necessary to use IDMAC.
220 	 */
221 	{
222 		.dad_name = DMOVER_FUNC_ZERO,
223 		.dad_data = &mvxore_functions,
224 		.dad_ninputs = 0
225 	},
226 	{
227 		.dad_name = DMOVER_FUNC_FILL8,
228 		.dad_data = &mvxore_functions,
229 		.dad_ninputs = 0
230 	},
231 #endif
232 	{
233 		.dad_name = DMOVER_FUNC_COPY,
234 		.dad_data = &mvxore_functions,
235 		.dad_ninputs = 1
236 	},
237 	{
238 		.dad_name = DMOVER_FUNC_ISCSI_CRC32C,
239 		.dad_data = &mvxore_functions,
240 		.dad_ninputs = 1
241 	},
242 	{
243 		.dad_name = DMOVER_FUNC_XOR2,
244 		.dad_data = &mvxore_functions,
245 		.dad_ninputs = 2
246 	},
247 	{
248 		.dad_name = DMOVER_FUNC_XOR3,
249 		.dad_data = &mvxore_functions,
250 		.dad_ninputs = 3
251 	},
252 	{
253 		.dad_name = DMOVER_FUNC_XOR4,
254 		.dad_data = &mvxore_functions,
255 		.dad_ninputs = 4
256 	},
257 	{
258 		.dad_name = DMOVER_FUNC_XOR5,
259 		.dad_data = &mvxore_functions,
260 		.dad_ninputs = 5
261 	},
262 	{
263 		.dad_name = DMOVER_FUNC_XOR6,
264 		.dad_data = &mvxore_functions,
265 		.dad_ninputs = 6
266 	},
267 	{
268 		.dad_name = DMOVER_FUNC_XOR7,
269 		.dad_data = &mvxore_functions,
270 		.dad_ninputs = 7
271 	},
272 	{
273 		.dad_name = DMOVER_FUNC_XOR8,
274 		.dad_data = &mvxore_functions,
275 		.dad_ninputs = 8
276 	},
277 };
278 
279 static struct {
280 	int model;
281 	int idmac_nchan;
282 	int idmac_irq;
283 	int xore_nchan;
284 	int xore_irq;
285 } channels[] = {
286 	/*
287 	 * Marvell System Controllers:
288 	 * need irqs in attach_args.
289 	 */
290 	{ MARVELL_DISCOVERY,		8, -1, 0, -1 },
291 	{ MARVELL_DISCOVERY_II,		8, -1, 0, -1 },
292 	{ MARVELL_DISCOVERY_III,	8, -1, 0, -1 },
293 #if 0
294 	{ MARVELL_DISCOVERY_LT,		4, -1, 2, -1 },
295 	{ MARVELL_DISCOVERY_V,		4, -1, 2, -1 },
296 	{ MARVELL_DISCOVERY_VI,		4, -1, 2, -1 },		????
297 #endif
298 
299 	/*
300 	 * Marvell System on Chips:
301 	 * No need irqs in attach_args.  We always connecting to interrupt-pin
302 	 * statically.
303 	 */
304 	{ MARVELL_ORION_1_88F1181,	4, 24, 0, -1 },
305 	{ MARVELL_ORION_2_88F1281,	4, 24, 0, -1 },
306 	{ MARVELL_ORION_1_88F5082,	4, 24, 0, -1 },
307 	{ MARVELL_ORION_1_88F5180N,	4, 24, 0, -1 },
308 	{ MARVELL_ORION_1_88F5181,	4, 24, 0, -1 },
309 	{ MARVELL_ORION_1_88F5182,	4, 24, 2, 30 },
310 	{ MARVELL_ORION_2_88F5281,	4, 24, 0, -1 },
311 	{ MARVELL_ORION_1_88W8660,	4, 24, 0, -1 },
312 	{ MARVELL_KIRKWOOD_88F6180,	0, -1, 4, 5 },
313 	{ MARVELL_KIRKWOOD_88F6192,	0, -1, 4, 5 },
314 	{ MARVELL_KIRKWOOD_88F6281,	0, -1, 4, 5 },
315 	{ MARVELL_KIRKWOOD_88F6282,	0, -1, 4, 5 },
316 };
317 
318 CFATTACH_DECL_NEW(gtidmac_gt, sizeof(struct gtidmac_softc),
319     gtidmac_match, gtidmac_attach, NULL, NULL);
320 CFATTACH_DECL_NEW(gtidmac_mbus, sizeof(struct gtidmac_softc),
321     gtidmac_match, gtidmac_attach, NULL, NULL);
322 
323 
324 /* ARGSUSED */
325 static int
326 gtidmac_match(device_t parent, struct cfdata *match, void *aux)
327 {
328 	struct marvell_attach_args *mva = aux;
329 	int i;
330 
331 	if (strcmp(mva->mva_name, match->cf_name) != 0)
332 		return 0;
333 	if (mva->mva_offset == MVA_OFFSET_DEFAULT)
334 		return 0;
335 	for (i = 0; i < __arraycount(channels); i++)
336 		if (mva->mva_model == channels[i].model) {
337 			mva->mva_size = GTIDMAC_SIZE;
338 			return 1;
339 		}
340 	return 0;
341 }
342 
343 /* ARGSUSED */
344 static void
345 gtidmac_attach(device_t parent, device_t self, void *aux)
346 {
347 	struct gtidmac_softc *sc = device_private(self);
348 	struct marvell_attach_args *mva = aux;
349 	prop_dictionary_t dict = device_properties(self);
350 	uint32_t idmac_irq, xore_irq, dmb_speed;
351 	int idmac_nchan, xore_nchan, nsegs, i, j, n;
352 
353 	for (i = 0; i < __arraycount(channels); i++)
354 		if (mva->mva_model == channels[i].model)
355 			break;
356 	idmac_nchan = channels[i].idmac_nchan;
357 	idmac_irq = channels[i].idmac_irq;
358 	if (idmac_nchan != 0) {
359 		if (idmac_irq == -1)
360 			idmac_irq = mva->mva_irq;
361 		if (idmac_irq == -1)
362 			/* Discovery */
363 			if (!prop_dictionary_get_uint32(dict,
364 			    "idmac-irq", &idmac_irq)) {
365 				aprint_error(": no idmac-irq property\n");
366 				return;
367 			}
368 	}
369 	xore_nchan = channels[i].xore_nchan;
370 	xore_irq = channels[i].xore_irq;
371 	if (xore_nchan != 0) {
372 		if (xore_irq == -1)
373 			xore_irq = mva->mva_irq;
374 		if (xore_irq == -1)
375 			/* Discovery LT/V/VI */
376 			if (!prop_dictionary_get_uint32(dict,
377 			    "xore-irq", &xore_irq)) {
378 				aprint_error(": no xore-irq property\n");
379 				return;
380 			}
381 	}
382 
383 	aprint_naive("\n");
384 	aprint_normal(": Marvell IDMA Controller%s\n",
385 	    xore_nchan ? "/XOR Engine" : "");
386 	if (idmac_nchan > 0)
387 		aprint_normal_dev(self,
388 		    "IDMA Controller %d channels, intr %d...%d\n",
389 		    idmac_nchan, idmac_irq, idmac_irq + GTIDMAC_NINTRRUPT - 1);
390 	if (xore_nchan > 0)
391 		aprint_normal_dev(self,
392 		    "XOR Engine %d channels, intr %d...%d\n",
393 		    xore_nchan, xore_irq, xore_irq + xore_nchan - 1);
394 
395 	sc->sc_dev = self;
396 	sc->sc_iot = mva->mva_iot;
397 
398 	/* Map I/O registers */
399 	if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, mva->mva_offset,
400 	    mva->mva_size, &sc->sc_ioh)) {
401 		aprint_error_dev(self, "can't map registers\n");
402 		return;
403 	}
404 
405 	/*
406 	 * Initialise DMA descriptors and associated metadata
407 	 */
408 	sc->sc_dmat = mva->mva_dmat;
409 	n = idmac_nchan * GTIDMAC_NDESC + xore_nchan * MVXORE_NDESC;
410 	sc->sc_dd_buffer =
411 	    kmem_alloc(sizeof(struct gtidmac_dma_desc) * n, KM_SLEEP);
412 	if (sc->sc_dd_buffer == NULL) {
413 		aprint_error_dev(self, "can't allocate memory\n");
414 		goto fail1;
415 	}
416 	/* pattern buffer */
417 	if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
418 	    &sc->sc_pattern_segment, 1, &nsegs, BUS_DMA_NOWAIT)) {
419 		aprint_error_dev(self,
420 		    "bus_dmamem_alloc failed: pattern buffer\n");
421 		goto fail2;
422 	}
423 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_pattern_segment, 1, PAGE_SIZE,
424 	    (void **)&sc->sc_pbuf, BUS_DMA_NOWAIT)) {
425 		aprint_error_dev(self,
426 		    "bus_dmamem_map failed: pattern buffer\n");
427 		goto fail3;
428 	}
429 	for (i = 0; i < 0x100; i++)
430 		for (j = 0; j < sizeof(sc->sc_pbuf[i].pbuf); j++)
431 			sc->sc_pbuf[i].pbuf[j] = i;
432 
433 	if (!prop_dictionary_get_uint32(dict, "dmb_speed", &dmb_speed)) {
434 		aprint_error_dev(self, "no dmb_speed property\n");
435 		dmb_speed = 10;	/* More than fast swdmover perhaps. */
436 	}
437 
438 	/* IDMAC DMA descriptor buffer */
439 	sc->sc_gtidmac_nchan = idmac_nchan;
440 	if (sc->sc_gtidmac_nchan > 0) {
441 		if (gtidmac_buffer_setup(sc) != 0)
442 			goto fail4;
443 
444 		if (mva->mva_model != MARVELL_DISCOVERY)
445 			gtidmac_wininit(sc);
446 
447 		/* Setup interrupt */
448 		for (i = 0; i < GTIDMAC_NINTRRUPT; i++) {
449 			j = i * idmac_nchan / GTIDMAC_NINTRRUPT;
450 
451 			sc->sc_intrarg[i].ia_sc = sc;
452 			sc->sc_intrarg[i].ia_cause = GTIDMAC_ICR(j);
453 			sc->sc_intrarg[i].ia_eaddr = GTIDMAC_EAR(j);
454 			sc->sc_intrarg[i].ia_eselect = GTIDMAC_ESR(j);
455 			marvell_intr_establish(idmac_irq + i, IPL_BIO,
456 			    gtidmac_intr, &sc->sc_intrarg[i]);
457 		}
458 
459 		/* Register us with dmover. */
460 		sc->sc_dmb.dmb_name = device_xname(self);
461 		sc->sc_dmb.dmb_speed = dmb_speed;
462 		sc->sc_dmb.dmb_cookie = sc;
463 		sc->sc_dmb.dmb_algdescs = gtidmac_algdescs;
464 		sc->sc_dmb.dmb_nalgdescs = __arraycount(gtidmac_algdescs);
465 		sc->sc_dmb.dmb_process = gtidmac_process;
466 		dmover_backend_register(&sc->sc_dmb);
467 		sc->sc_dmb_busy = 0;
468 	}
469 
470 	/* XORE DMA descriptor buffer */
471 	sc->sc_mvxore_nchan = xore_nchan;
472 	if (sc->sc_mvxore_nchan > 0) {
473 		if (mvxore_buffer_setup(sc) != 0)
474 			goto fail5;
475 
476 		/* Setup interrupt */
477 		for (i = 0; i < sc->sc_mvxore_nchan; i++)
478 			marvell_intr_establish(xore_irq + i, IPL_BIO,
479 			    (i & 0x2) ? mvxore_port1_intr : mvxore_port0_intr,
480 			    sc);
481 
482 		mvxore_wininit(sc);
483 
484 		/* Register us with dmover. */
485 		sc->sc_dmb_xore.dmb_name = device_xname(sc->sc_dev);
486 		sc->sc_dmb_xore.dmb_speed = dmb_speed;
487 		sc->sc_dmb_xore.dmb_cookie = sc;
488 		sc->sc_dmb_xore.dmb_algdescs = mvxore_algdescs;
489 		sc->sc_dmb_xore.dmb_nalgdescs =
490 		    __arraycount(mvxore_algdescs);
491 		sc->sc_dmb_xore.dmb_process = gtidmac_process;
492 		dmover_backend_register(&sc->sc_dmb_xore);
493 	}
494 
495 	gtidmac_softc = sc;
496 
497 	return;
498 
499 fail5:
500 	for (i = sc->sc_gtidmac_nchan - 1; i >= 0; i--) {
501 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_in);
502 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_out);
503 	}
504 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
505 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap);
506 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf,
507 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC);
508 	bus_dmamem_free(sc->sc_dmat,
509 	    sc->sc_dmap->dm_segs, sc->sc_dmap->dm_nsegs);
510 fail4:
511 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_pbuf, PAGE_SIZE);
512 fail3:
513 	bus_dmamem_free(sc->sc_dmat, &sc->sc_pattern_segment, 1);
514 fail2:
515 	kmem_free(sc->sc_dd_buffer, sizeof(struct gtidmac_dma_desc) * n);
516 fail1:
517 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, mva->mva_size);
518 	return;
519 }
520 
521 
522 static int
523 gtidmac_intr(void *arg)
524 {
525 	struct gtidmac_intr_arg *ia = arg;
526 	struct gtidmac_softc *sc = ia->ia_sc;
527 	uint32_t cause;
528 	int handled = 0, chan, error;
529 
530 	cause = bus_space_read_4(sc->sc_iot, sc->sc_ioh, ia->ia_cause);
531 	DPRINTF(("IDMAC intr: cause=0x%x\n", cause));
532 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, ia->ia_cause, ~cause);
533 
534 	chan = 0;
535 	while (cause) {
536 		error = 0;
537 		if (cause & GTIDMAC_I_ADDRMISS) {
538 			aprint_error_dev(sc->sc_dev, "Address Miss");
539 			error = EINVAL;
540 		}
541 		if (cause & GTIDMAC_I_ACCPROT) {
542 			aprint_error_dev(sc->sc_dev,
543 			    "Access Protect Violation");
544 			error = EACCES;
545 		}
546 		if (cause & GTIDMAC_I_WRPROT) {
547 			aprint_error_dev(sc->sc_dev, "Write Protect");
548 			error = EACCES;
549 		}
550 		if (cause & GTIDMAC_I_OWN) {
551 			aprint_error_dev(sc->sc_dev, "Ownership Violation");
552 			error = EINVAL;
553 		}
554 
555 #define GTIDMAC_I_ERROR		  \
556 	   (GTIDMAC_I_ADDRMISS	| \
557 	    GTIDMAC_I_ACCPROT	| \
558 	    GTIDMAC_I_WRPROT	| \
559 	    GTIDMAC_I_OWN)
560 		if (cause & GTIDMAC_I_ERROR) {
561 			uint32_t sel;
562 			int select;
563 
564 			sel = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
565 			    ia->ia_eselect) & GTIDMAC_ESR_SEL;
566 			select = sel - chan * GTIDMAC_I_BITS;
567 			if (select >= 0 && select < GTIDMAC_I_BITS) {
568 				uint32_t ear;
569 
570 				ear = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
571 				    ia->ia_eaddr);
572 				aprint_error(": Error Address 0x%x\n", ear);
573 			} else
574 				aprint_error(": lost Error Address\n");
575 		}
576 
577 		if (cause & (GTIDMAC_I_COMP | GTIDMAC_I_ERROR)) {
578 			sc->sc_cdesc[chan].chan_dma_done(
579 			    sc->sc_cdesc[chan].chan_running, chan,
580 			    &sc->sc_cdesc[chan].chan_in,
581 			    &sc->sc_cdesc[chan].chan_out, error);
582 			handled++;
583 		}
584 
585 		cause >>= GTIDMAC_I_BITS;
586 	}
587 	DPRINTF(("IDMAC intr: %shandled\n", handled ? "" : "not "));
588 
589 	return handled;
590 }
591 
592 static int
593 mvxore_port0_intr(void *arg)
594 {
595 	struct gtidmac_softc *sc = arg;
596 
597 	return mvxore_intr(sc, 0);
598 }
599 
600 static int
601 mvxore_port1_intr(void *arg)
602 {
603 	struct gtidmac_softc *sc = arg;
604 
605 	return mvxore_intr(sc, 1);
606 }
607 
608 static int
609 mvxore_intr(struct gtidmac_softc *sc, int port)
610 {
611 	uint32_t cause;
612 	int handled = 0, chan, error;
613 
614 	cause =
615 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEICR(sc, port));
616 	DPRINTF(("XORE port %d intr: cause=0x%x\n", port, cause));
617 printf("XORE port %d intr: cause=0x%x\n", port, cause);
618 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
619 	    MVXORE_XEICR(sc, port), ~cause);
620 
621 	chan = 0;
622 	while (cause) {
623 		error = 0;
624 		if (cause & MVXORE_I_ADDRDECODE) {
625 			aprint_error_dev(sc->sc_dev, "Failed address decoding");
626 			error = EINVAL;
627 		}
628 		if (cause & MVXORE_I_ACCPROT) {
629 			aprint_error_dev(sc->sc_dev,
630 			    "Access Protect Violation");
631 			error = EACCES;
632 		}
633 		if (cause & MVXORE_I_WRPROT) {
634 			aprint_error_dev(sc->sc_dev, "Write Protect");
635 			error = EACCES;
636 		}
637 		if (cause & MVXORE_I_OWN) {
638 			aprint_error_dev(sc->sc_dev, "Ownership Violation");
639 			error = EINVAL;
640 		}
641 		if (cause & MVXORE_I_INTPARITY) {
642 			aprint_error_dev(sc->sc_dev, "Parity Error");
643 			error = EIO;
644 		}
645 		if (cause & MVXORE_I_XBAR) {
646 			aprint_error_dev(sc->sc_dev, "Crossbar Parity Error");
647 			error = EINVAL;
648 		}
649 
650 #define MVXORE_I_ERROR		  \
651 	   (MVXORE_I_ADDRDECODE	| \
652 	    MVXORE_I_ACCPROT	| \
653 	    MVXORE_I_WRPROT	| \
654 	    MVXORE_I_OWN	| \
655 	    MVXORE_I_INTPARITY	| \
656 	    MVXORE_I_XBAR)
657 		if (cause & MVXORE_I_ERROR) {
658 			uint32_t type;
659 			int event;
660 
661 			type = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
662 			    MVXORE_XEECR(sc, port));
663 			type &= MVXORE_XEECR_ERRORTYPE_MASK;
664 			event = type - chan * MVXORE_I_BITS;
665 			if (event >= 0 && event < MVXORE_I_BITS) {
666 				uint32_t xeear;
667 
668 				xeear = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
669 				    MVXORE_XEEAR(sc, port));
670 				aprint_error(": Error Address 0x%x\n", xeear);
671 			} else
672 				aprint_error(": lost Error Address\n");
673 		}
674 
675 		if (cause & (MVXORE_I_EOC | MVXORE_I_ERROR)) {
676 			sc->sc_cdesc_xore[chan].chan_dma_done(
677 			    sc->sc_cdesc_xore[chan].chan_running, chan,
678 			    sc->sc_cdesc_xore[chan].chan_in,
679 			    &sc->sc_cdesc_xore[chan].chan_out, error);
680 			handled++;
681 		}
682 
683 		cause >>= MVXORE_I_BITS;
684 	}
685 printf("XORE port %d intr: %shandled\n", port, handled ? "" : "not ");
686 	DPRINTF(("XORE port %d intr: %shandled\n",
687 	    port, handled ? "" : "not "));
688 
689 	return handled;
690 }
691 
692 
693 /*
694  * dmover(9) backend function.
695  */
696 static void
697 gtidmac_process(struct dmover_backend *dmb)
698 {
699 	struct gtidmac_softc *sc = dmb->dmb_cookie;
700 	int s;
701 
702 	/* If the backend is currently idle, go process the queue. */
703 	s = splbio();
704 	if (!sc->sc_dmb_busy)
705 		gtidmac_dmover_run(dmb);
706 	splx(s);
707 }
708 
709 static void
710 gtidmac_dmover_run(struct dmover_backend *dmb)
711 {
712 	struct gtidmac_softc *sc = dmb->dmb_cookie;
713 	struct dmover_request *dreq;
714 	const struct dmover_algdesc *algdesc;
715 	struct gtidmac_function *df;
716 	bus_dmamap_t *dmamap_in, *dmamap_out;
717 	int chan, ninputs, error, i;
718 
719 	sc->sc_dmb_busy = 1;
720 
721 	for (;;) {
722 		dreq = TAILQ_FIRST(&dmb->dmb_pendreqs);
723 		if (dreq == NULL)
724 			break;
725 		algdesc = dreq->dreq_assignment->das_algdesc;
726 		df = algdesc->dad_data;
727 		chan = (*df->chan_alloc)(sc, &dmamap_in, &dmamap_out, dreq);
728 		if (chan == -1)
729 			return;
730 
731 		dmover_backend_remque(dmb, dreq);
732 		dreq->dreq_flags |= DMOVER_REQ_RUNNING;
733 
734 		/* XXXUNLOCK */
735 
736 		error = 0;
737 
738 		/* Load in/out buffers of dmover to bus_dmamap. */
739 		ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
740 		if (ninputs == 0) {
741 			int pno = 0;
742 
743 			if (algdesc->dad_name == DMOVER_FUNC_FILL8)
744 				pno = dreq->dreq_immediate[0];
745 
746 			i = 0;
747 			error = bus_dmamap_load(sc->sc_dmat, *dmamap_in,
748 			    &sc->sc_pbuf[pno], sizeof(sc->sc_pbuf[pno]), NULL,
749 			    BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_WRITE);
750 			if (error == 0) {
751 				bus_dmamap_sync(sc->sc_dmat, *dmamap_in, 0,
752 				    sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
753 
754 				/*
755 				 * We will call gtidmac_dmmap_unload() when
756 				 * becoming an error.
757 				 */
758 				i = 1;
759 			}
760 		} else
761 			for (i = 0; i < ninputs; i++) {
762 				error = gtidmac_dmmap_load(sc,
763 				    *(dmamap_in + i), dreq->dreq_inbuf_type,
764 				    &dreq->dreq_inbuf[i], 0/*write*/);
765 				if (error != 0)
766 					break;
767 			}
768 		if (algdesc->dad_name != DMOVER_FUNC_ISCSI_CRC32C) {
769 			if (error == 0)
770 				error = gtidmac_dmmap_load(sc, *dmamap_out,
771 				    dreq->dreq_outbuf_type, &dreq->dreq_outbuf,
772 				    1/*read*/);
773 
774 			if (error == 0) {
775 				/*
776 				 * The size of outbuf is always believed to be
777 				 * DMA transfer size in dmover request.
778 				 */
779 				error = (*df->dma_setup)(sc, chan, ninputs,
780 				    dmamap_in, dmamap_out,
781 				    (*dmamap_out)->dm_mapsize);
782 				if (error != 0)
783 					gtidmac_dmmap_unload(sc, *dmamap_out,
784 					    1);
785 			}
786 		} else
787 			if (error == 0)
788 				error = (*df->dma_setup)(sc, chan, ninputs,
789 				    dmamap_in, dmamap_out,
790 				    (*dmamap_in)->dm_mapsize);
791 
792 		/* XXXLOCK */
793 
794 		if (error != 0) {
795 			for (; i-- > 0;)
796 				gtidmac_dmmap_unload(sc, *(dmamap_in + i), 0);
797 			(*df->chan_free)(sc, chan);
798 
799 			dreq->dreq_flags |= DMOVER_REQ_ERROR;
800 			dreq->dreq_error = error;
801 			/* XXXUNLOCK */
802 			dmover_done(dreq);
803 			/* XXXLOCK */
804 			continue;
805 		}
806 
807 		(*df->dma_start)(sc, chan, gtidmac_dmover_done);
808 		break;
809 	}
810 
811 	/* All done */
812 	sc->sc_dmb_busy = 0;
813 }
814 
815 static void
816 gtidmac_dmover_done(void *object, int chan, bus_dmamap_t *dmamap_in,
817 		    bus_dmamap_t *dmamap_out, int error)
818 {
819 	struct gtidmac_softc *sc;
820 	struct dmover_request *dreq = object;
821 	struct dmover_backend *dmb;
822 	struct gtidmac_function *df;
823 	uint32_t result;
824 	int ninputs, i;
825 
826 	KASSERT(dreq != NULL);
827 
828 	dmb = dreq->dreq_assignment->das_backend;
829 	df = dreq->dreq_assignment->das_algdesc->dad_data;
830 	ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
831 	sc = dmb->dmb_cookie;
832 
833 	result = (*df->dma_finish)(sc, chan, error);
834 	for (i = 0; i < ninputs; i++)
835 		gtidmac_dmmap_unload(sc, *(dmamap_in + i), 0);
836 	if (dreq->dreq_assignment->das_algdesc->dad_name ==
837 	    DMOVER_FUNC_ISCSI_CRC32C)
838 		memcpy(dreq->dreq_immediate, &result, sizeof(result));
839 	else
840 		gtidmac_dmmap_unload(sc, *dmamap_out, 1);
841 
842 	(*df->chan_free)(sc, chan);
843 
844 	if (error) {
845 		dreq->dreq_error = error;
846 		dreq->dreq_flags |= DMOVER_REQ_ERROR;
847 	}
848 
849 	dmover_done(dreq);
850 
851 	/*
852 	 * See if we can start some more dmover(9) requests.
853 	 *
854 	 * Note: We're already at splbio() here.
855 	 */
856 	if (!sc->sc_dmb_busy)
857 		gtidmac_dmover_run(dmb);
858 }
859 
860 __inline int
861 gtidmac_dmmap_load(struct gtidmac_softc *sc, bus_dmamap_t dmamap,
862 		   dmover_buffer_type dmbuf_type, dmover_buffer *dmbuf,
863 		   int read)
864 {
865 	int error, flags;
866 
867 	flags = BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
868 	    read ? BUS_DMA_READ : BUS_DMA_WRITE;
869 
870 	switch (dmbuf_type) {
871 	case DMOVER_BUF_LINEAR:
872 		error = bus_dmamap_load(sc->sc_dmat, dmamap,
873 		    dmbuf->dmbuf_linear.l_addr, dmbuf->dmbuf_linear.l_len,
874 		    NULL, flags);
875 		break;
876 
877 	case DMOVER_BUF_UIO:
878 		if ((read && dmbuf->dmbuf_uio->uio_rw != UIO_READ) ||
879 		    (!read && dmbuf->dmbuf_uio->uio_rw == UIO_READ))
880 			return (EINVAL);
881 
882 		error = bus_dmamap_load_uio(sc->sc_dmat, dmamap,
883 		    dmbuf->dmbuf_uio, flags);
884 		break;
885 
886 	default:
887 		error = EINVAL;
888 	}
889 
890 	if (error == 0)
891 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
892 		    read ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
893 
894 	return error;
895 }
896 
897 __inline void
898 gtidmac_dmmap_unload(struct gtidmac_softc *sc, bus_dmamap_t dmamap, int read)
899 {
900 
901 	bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
902 	    read ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
903 
904 	bus_dmamap_unload(sc->sc_dmat, dmamap);
905 }
906 
907 
908 void *
909 gtidmac_tag_get(void)
910 {
911 
912 	return gtidmac_softc;
913 }
914 
915 /*
916  * IDMAC functions
917  */
918 int
919 gtidmac_chan_alloc(void *tag, bus_dmamap_t **dmamap_in,
920 		   bus_dmamap_t **dmamap_out, void *object)
921 {
922 	struct gtidmac_softc *sc = tag;
923 	int chan;
924 
925 /* maybe need lock */
926 
927 	for (chan = 0; chan < sc->sc_gtidmac_nchan; chan++)
928 		if (sc->sc_cdesc[chan].chan_running == NULL)
929 			break;
930 	if (chan >= sc->sc_gtidmac_nchan)
931 		return -1;
932 
933 
934 	sc->sc_cdesc[chan].chan_running = object;
935 
936 /* unlock */
937 
938 	*dmamap_in = &sc->sc_cdesc[chan].chan_in;
939 	*dmamap_out = &sc->sc_cdesc[chan].chan_out;
940 
941 	return chan;
942 }
943 
944 void
945 gtidmac_chan_free(void *tag, int chan)
946 {
947 	struct gtidmac_softc *sc = tag;
948 
949 /* maybe need lock */
950 
951 	sc->sc_cdesc[chan].chan_running = NULL;
952 
953 /* unlock */
954 }
955 
956 /* ARGSUSED */
957 int
958 gtidmac_setup(void *tag, int chan, int ninputs, bus_dmamap_t *dmamap_in,
959 	      bus_dmamap_t *dmamap_out, bus_size_t size)
960 {
961 	struct gtidmac_softc *sc = tag;
962 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
963 	struct gtidmac_desc *desc;
964 	uint32_t ccl, bcnt, ires, ores;
965 	int n = 0, iidx, oidx;
966 
967 	KASSERT(ninputs == 0 || ninputs == 1);
968 
969 	ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
970 #ifdef DIAGNOSTIC
971 	if (ccl & GTIDMAC_CCLR_CHANACT)
972 		panic("gtidmac_setup: chan%d already active", chan);
973 #endif
974 
975 	/* We always Chain-mode and max (16M - 1)byte/desc */
976 	ccl = (GTIDMAC_CCLR_DESCMODE_16M				|
977 #ifdef GTIDMAC_DEBUG
978 	    GTIDMAC_CCLR_CDEN						|
979 #endif
980 	    GTIDMAC_CCLR_TRANSFERMODE_B /* Transfer Mode: Block */	|
981 	    GTIDMAC_CCLR_INTMODE_NULL   /* Intr Mode: Next Desc NULL */	|
982 	    GTIDMAC_CCLR_CHAINMODE_C    /* Chain Mode: Chaind */);
983 	if (size != (*dmamap_in)->dm_mapsize) {
984 		ccl |= GTIDMAC_CCLR_SRCHOLD;
985 		if ((*dmamap_in)->dm_mapsize == 8)
986 			ccl |= GTIDMAC_CCLR_SBL_8B;
987 		else if ((*dmamap_in)->dm_mapsize == 16)
988 			ccl |= GTIDMAC_CCLR_SBL_16B;
989 		else if ((*dmamap_in)->dm_mapsize == 32)
990 			ccl |= GTIDMAC_CCLR_SBL_32B;
991 		else if ((*dmamap_in)->dm_mapsize == 64)
992 			ccl |= GTIDMAC_CCLR_SBL_64B;
993 		else if ((*dmamap_in)->dm_mapsize == 128)
994 			ccl |= GTIDMAC_CCLR_SBL_128B;
995 		else
996 			panic("gtidmac_setup: chan%d source:"
997 			    " unsupport hold size", chan);
998 	} else
999 		ccl |= GTIDMAC_CCLR_SBL_128B;
1000 	if (size != (*dmamap_out)->dm_mapsize) {
1001 		ccl |= GTIDMAC_CCLR_DESTHOLD;
1002 		if ((*dmamap_out)->dm_mapsize == 8)
1003 			ccl |= GTIDMAC_CCLR_DBL_8B;
1004 		else if ((*dmamap_out)->dm_mapsize == 16)
1005 			ccl |= GTIDMAC_CCLR_DBL_16B;
1006 		else if ((*dmamap_out)->dm_mapsize == 32)
1007 			ccl |= GTIDMAC_CCLR_DBL_32B;
1008 		else if ((*dmamap_out)->dm_mapsize == 64)
1009 			ccl |= GTIDMAC_CCLR_DBL_64B;
1010 		else if ((*dmamap_out)->dm_mapsize == 128)
1011 			ccl |= GTIDMAC_CCLR_DBL_128B;
1012 		else
1013 			panic("gtidmac_setup: chan%d destination:"
1014 			    " unsupport hold size", chan);
1015 	} else
1016 		ccl |= GTIDMAC_CCLR_DBL_128B;
1017 
1018 	fstdd = SLIST_FIRST(&sc->sc_dlist);
1019 	if (fstdd == NULL) {
1020 		aprint_error_dev(sc->sc_dev, "no descriptor\n");
1021 		return ENOMEM;
1022 	}
1023 	SLIST_REMOVE_HEAD(&sc->sc_dlist, dd_next);
1024 	sc->sc_cdesc[chan].chan_ddidx = fstdd->dd_index;
1025 
1026 	dd = fstdd;
1027 	ires = ores = 0;
1028 	iidx = oidx = 0;
1029 	while (1 /*CONSTCOND*/) {
1030 		if (ccl & GTIDMAC_CCLR_SRCHOLD) {
1031 			if (ccl & GTIDMAC_CCLR_DESTHOLD)
1032 				bcnt = size;	/* src/dst hold */
1033 			else
1034 				bcnt = (*dmamap_out)->dm_segs[oidx].ds_len;
1035 		} else if (ccl & GTIDMAC_CCLR_DESTHOLD)
1036 			bcnt = (*dmamap_in)->dm_segs[iidx].ds_len;
1037 		else
1038 			bcnt = min((*dmamap_in)->dm_segs[iidx].ds_len - ires,
1039 			    (*dmamap_out)->dm_segs[oidx].ds_len - ores);
1040 
1041 		desc = dd->dd_idmac_vaddr;
1042 		desc->bc.mode16m.bcnt =
1043 		    bcnt | GTIDMAC_CIDMABCR_BCLEFT | GTIDMAC_CIDMABCR_OWN;
1044 		desc->srcaddr = (*dmamap_in)->dm_segs[iidx].ds_addr + ires;
1045 		desc->dstaddr = (*dmamap_out)->dm_segs[oidx].ds_addr + ores;
1046 
1047 		n += bcnt;
1048 		if (n >= size)
1049 			break;
1050 		if (!(ccl & GTIDMAC_CCLR_SRCHOLD)) {
1051 			ires += bcnt;
1052 			if (ires >= (*dmamap_in)->dm_segs[iidx].ds_len) {
1053 				ires = 0;
1054 				iidx++;
1055 				KASSERT(iidx < (*dmamap_in)->dm_nsegs);
1056 			}
1057 		}
1058 		if (!(ccl & GTIDMAC_CCLR_DESTHOLD)) {
1059 			ores += bcnt;
1060 			if (ores >= (*dmamap_out)->dm_segs[oidx].ds_len) {
1061 				ores = 0;
1062 				oidx++;
1063 				KASSERT(oidx < (*dmamap_out)->dm_nsegs);
1064 			}
1065 		}
1066 
1067 		nxtdd = SLIST_FIRST(&sc->sc_dlist);
1068 		if (nxtdd == NULL) {
1069 			aprint_error_dev(sc->sc_dev, "no descriptor\n");
1070 			return ENOMEM;
1071 		}
1072 		SLIST_REMOVE_HEAD(&sc->sc_dlist, dd_next);
1073 
1074 		desc->nextdp = (uint32_t)nxtdd->dd_paddr;
1075 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1076 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
1077 #ifdef GTIDMAC_DEBUG
1078 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1079 #else
1080 		    BUS_DMASYNC_PREWRITE);
1081 #endif
1082 
1083 		SLIST_INSERT_AFTER(dd, nxtdd, dd_next);
1084 		dd = nxtdd;
1085 	}
1086 	desc->nextdp = (uint32_t)NULL;
1087 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, dd->dd_index * sizeof(*desc),
1088 #ifdef GTIDMAC_DEBUG
1089 	    sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1090 #else
1091 	    sizeof(*desc), BUS_DMASYNC_PREWRITE);
1092 #endif
1093 
1094 	/* Set paddr of descriptor to Channel Next Descriptor Pointer */
1095 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CNDPR(chan),
1096 	    fstdd->dd_paddr);
1097 
1098 #if BYTE_ORDER == LITTLE_ENDIAN
1099 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan),
1100 	    GTIDMAC_CCHR_DESCBYTESWAP | GTIDMAC_CCHR_ENDIAN_LE);
1101 #else
1102 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan),
1103 	    GTIDMAC_CCHR_DESCBYTESWAP | GTIDMAC_CCHR_ENDIAN_BE);
1104 #endif
1105 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan), ccl);
1106 
1107 #ifdef GTIDMAC_DEBUG
1108 	gtidmac_dump_idmacdesc(sc, fstdd, ccl, 0/*pre*/);
1109 #endif
1110 
1111 	sc->sc_cdesc[chan].chan_totalcnt += size;
1112 
1113 	return 0;
1114 }
1115 
1116 void
1117 gtidmac_start(void *tag, int chan,
1118 	      void (*dma_done_cb)(void *, int, bus_dmamap_t *, bus_dmamap_t *,
1119 				  int))
1120 {
1121 	struct gtidmac_softc *sc = tag;
1122 	uint32_t ccl;
1123 
1124 	DPRINTF(("%s:%d: starting\n", device_xname(sc->sc_dev), chan));
1125 
1126 #ifdef GTIDMAC_DEBUG
1127 	gtidmac_dump_idmacreg(sc, chan);
1128 #endif
1129 
1130 	sc->sc_cdesc[chan].chan_dma_done = dma_done_cb;
1131 
1132 	ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
1133 	/* Start and 'Fetch Next Descriptor' */
1134 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan),
1135 	    ccl | GTIDMAC_CCLR_CHANEN | GTIDMAC_CCLR_FETCHND);
1136 }
1137 
1138 static uint32_t
1139 gtidmac_finish(void *tag, int chan, int error)
1140 {
1141 	struct gtidmac_softc *sc = tag;
1142 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
1143 	struct gtidmac_desc *desc;
1144 
1145 	fstdd = &sc->sc_dd_buffer[sc->sc_cdesc[chan].chan_ddidx];
1146 
1147 #ifdef GTIDMAC_DEBUG
1148 	if (error || gtidmac_debug > 1) {
1149 		uint32_t ccl;
1150 
1151 		gtidmac_dump_idmacreg(sc, chan);
1152 		ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1153 		    GTIDMAC_CCLR(chan));
1154 		gtidmac_dump_idmacdesc(sc, fstdd, ccl, 1/*post*/);
1155 	}
1156 #endif
1157 
1158 	dd = fstdd;
1159 	do {
1160 		desc = dd->dd_idmac_vaddr;
1161 
1162 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1163 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
1164 #ifdef GTIDMAC_DEBUG
1165 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1166 #else
1167 		    BUS_DMASYNC_POSTWRITE);
1168 #endif
1169 
1170 		nxtdd = SLIST_NEXT(dd, dd_next);
1171 		SLIST_INSERT_HEAD(&sc->sc_dlist, dd, dd_next);
1172 		dd = nxtdd;
1173 	} while (desc->nextdp);
1174 
1175 	return 0;
1176 }
1177 
1178 /*
1179  * XORE functions
1180  */
1181 int
1182 mvxore_chan_alloc(void *tag, bus_dmamap_t **dmamap_in,
1183 		  bus_dmamap_t **dmamap_out, void *object)
1184 {
1185 	struct gtidmac_softc *sc = tag;
1186 	int chan;
1187 
1188 /* maybe need lock */
1189 
1190 	for (chan = 0; chan < sc->sc_mvxore_nchan; chan++)
1191 		if (sc->sc_cdesc_xore[chan].chan_running == NULL)
1192 			break;
1193 	if (chan >= sc->sc_mvxore_nchan)
1194 		return -1;
1195 
1196 
1197 	sc->sc_cdesc_xore[chan].chan_running = object;
1198 
1199 /* unlock */
1200 
1201 	*dmamap_in = sc->sc_cdesc_xore[chan].chan_in;
1202 	*dmamap_out = &sc->sc_cdesc_xore[chan].chan_out;
1203 
1204 	return chan;
1205 }
1206 
1207 void
1208 mvxore_chan_free(void *tag, int chan)
1209 {
1210 	struct gtidmac_softc *sc = tag;
1211 
1212 /* maybe need lock */
1213 
1214 	sc->sc_cdesc_xore[chan].chan_running = NULL;
1215 
1216 /* unlock */
1217 }
1218 
1219 /* ARGSUSED */
1220 int
1221 mvxore_setup(void *tag, int chan, int ninputs, bus_dmamap_t *dmamap_in,
1222 	     bus_dmamap_t *dmamap_out, bus_size_t size)
1223 {
1224 	struct gtidmac_softc *sc = tag;
1225 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
1226 	struct mvxore_desc *desc;
1227 	uint32_t xexc, bcnt, cmd, lastcmd;
1228 	int n = 0, i;
1229 	uint32_t ires[MVXORE_NSRC] = { 0, 0, 0, 0, 0, 0, 0, 0 }, ores = 0;
1230 	int iidx[MVXORE_NSRC] = { 0, 0, 0, 0, 0, 0, 0, 0 }, oidx = 0;
1231 
1232 #ifdef DIAGNOSTIC
1233 	uint32_t xexact;
1234 
1235 	xexact =
1236 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan));
1237 	if ((xexact & MVXORE_XEXACTR_XESTATUS_MASK) ==
1238 	    MVXORE_XEXACTR_XESTATUS_ACT)
1239 		panic("mvxore_setup: chan%d already active."
1240 		    " mvxore not support hot insertion", chan);
1241 #endif
1242 
1243 	xexc =
1244 	    (MVXORE_XEXCR_REGACCPROTECT	|
1245 	     MVXORE_XEXCR_DBL_128B	|
1246 	     MVXORE_XEXCR_SBL_128B);
1247 	cmd = lastcmd = 0;
1248 	if (ninputs > 1) {
1249 		xexc |= MVXORE_XEXCR_OM_XOR;
1250 		lastcmd = cmd = (1 << ninputs) - 1;
1251 	} else if (ninputs == 1) {
1252 		if ((*dmamap_out)->dm_nsegs == 0) {
1253 			xexc |= MVXORE_XEXCR_OM_CRC32;
1254 			lastcmd = MVXORE_DESC_CMD_CRCLAST;
1255 		} else
1256 			xexc |= MVXORE_XEXCR_OM_DMA;
1257 	} else if (ninputs == 0) {
1258 		if ((*dmamap_out)->dm_nsegs != 1) {
1259 			aprint_error_dev(sc->sc_dev,
1260 			    "XORE not supports %d DMA segments\n",
1261 			    (*dmamap_out)->dm_nsegs);
1262 			return EINVAL;
1263 		}
1264 
1265 		if ((*dmamap_in)->dm_mapsize == 0) {
1266 			xexc |= MVXORE_XEXCR_OM_ECC;
1267 
1268 			/* XXXXX: Maybe need to set Timer Mode registers? */
1269 
1270 #if 0
1271 		} else if ((*dmamap_in)->dm_mapsize == 8 ||
1272 		    (*dmamap_in)->dm_mapsize == 16) { /* in case dmover */
1273 			uint64_t pattern;
1274 
1275 			/* XXXX: Get pattern data */
1276 
1277 			KASSERT((*dmamap_in)->dm_mapsize == 8 ||
1278 			    (void *)((uint32_t)(*dmamap_in)->_dm_origbuf &
1279 						~PAGE_MASK) == sc->sc_pbuf);
1280 			pattern = *(uint64_t *)(*dmamap_in)->_dm_origbuf;
1281 
1282 			/* XXXXX: XORE has a IVR.  We should get this first. */
1283 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEIVRL,
1284 			    pattern);
1285 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEIVRH,
1286 			    pattern >> 32);
1287 
1288 			xexc |= MVXORE_XEXCR_OM_MEMINIT;
1289 #endif
1290 		} else {
1291 			aprint_error_dev(sc->sc_dev,
1292 			    "XORE not supports DMA mapsize %zd\n",
1293 			    (*dmamap_in)->dm_mapsize);
1294 			return EINVAL;
1295 		}
1296 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1297 		    MVXORE_XEXDPR(sc, chan), (*dmamap_out)->dm_segs[0].ds_addr);
1298 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1299 		    MVXORE_XEXBSR(sc, chan), (*dmamap_out)->dm_mapsize);
1300 
1301 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1302 		    MVXORE_XEXCR(sc, chan), xexc);
1303 		sc->sc_cdesc_xore[chan].chan_totalcnt += size;
1304 
1305 		return 0;
1306 	}
1307 
1308 	/* Make descriptor for DMA/CRC32/XOR */
1309 
1310 	fstdd = SLIST_FIRST(&sc->sc_dlist_xore);
1311 	if (fstdd == NULL) {
1312 		aprint_error_dev(sc->sc_dev, "no xore descriptor\n");
1313 		return ENOMEM;
1314 	}
1315 	SLIST_REMOVE_HEAD(&sc->sc_dlist_xore, dd_next);
1316 	sc->sc_cdesc_xore[chan].chan_ddidx =
1317 	    fstdd->dd_index + GTIDMAC_NDESC * sc->sc_gtidmac_nchan;
1318 
1319 	dd = fstdd;
1320 	while (1 /*CONSTCOND*/) {
1321 		desc = dd->dd_xore_vaddr;
1322 		desc->stat = MVXORE_DESC_STAT_OWN;
1323 		desc->cmd = cmd;
1324 		if ((*dmamap_out)->dm_nsegs != 0) {
1325 			desc->dstaddr =
1326 			    (*dmamap_out)->dm_segs[oidx].ds_addr + ores;
1327 			bcnt = (*dmamap_out)->dm_segs[oidx].ds_len - ores;
1328 		} else {
1329 			desc->dstaddr = 0;
1330 			bcnt = MVXORE_MAXXFER;	/* XXXXX */
1331 		}
1332 		for (i = 0; i < ninputs; i++) {
1333 			desc->srcaddr[i] =
1334 			    (*dmamap_in[i]).dm_segs[iidx[i]].ds_addr + ires[i];
1335 			bcnt = min(bcnt,
1336 			    (*dmamap_in[i]).dm_segs[iidx[i]].ds_len - ires[i]);
1337 		}
1338 		desc->bcnt = bcnt;
1339 
1340 		n += bcnt;
1341 		if (n >= size)
1342 			break;
1343 		ores += bcnt;
1344 		if ((*dmamap_out)->dm_nsegs != 0 &&
1345 		    ores >= (*dmamap_out)->dm_segs[oidx].ds_len) {
1346 			ores = 0;
1347 			oidx++;
1348 			KASSERT(oidx < (*dmamap_out)->dm_nsegs);
1349 		}
1350 		for (i = 0; i < ninputs; i++) {
1351 			ires[i] += bcnt;
1352 			if (ires[i] >=
1353 			    (*dmamap_in[i]).dm_segs[iidx[i]].ds_len) {
1354 				ires[i] = 0;
1355 				iidx[i]++;
1356 				KASSERT(iidx[i] < (*dmamap_in[i]).dm_nsegs);
1357 			}
1358 		}
1359 
1360 		nxtdd = SLIST_FIRST(&sc->sc_dlist_xore);
1361 		if (nxtdd == NULL) {
1362 			aprint_error_dev(sc->sc_dev, "no xore descriptor\n");
1363 			return ENOMEM;
1364 		}
1365 		SLIST_REMOVE_HEAD(&sc->sc_dlist_xore, dd_next);
1366 
1367 		desc->nextda = (uint32_t)nxtdd->dd_paddr;
1368 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
1369 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
1370 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1371 
1372 		SLIST_INSERT_AFTER(dd, nxtdd, dd_next);
1373 		dd = nxtdd;
1374 	}
1375 	desc->cmd = lastcmd;
1376 	desc->nextda = (uint32_t)NULL;
1377 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
1378 	    dd->dd_index * sizeof(*desc), sizeof(*desc),
1379 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1380 
1381 	/* Set paddr of descriptor to Channel Next Descriptor Pointer */
1382 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXNDPR(sc, chan),
1383 	    fstdd->dd_paddr);
1384 
1385 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(sc, chan), xexc);
1386 
1387 #ifdef GTIDMAC_DEBUG
1388 	gtidmac_dump_xoredesc(sc, fstdd, xexc, 0/*pre*/);
1389 #endif
1390 
1391 	sc->sc_cdesc_xore[chan].chan_totalcnt += size;
1392 
1393 	return 0;
1394 }
1395 
1396 void
1397 mvxore_start(void *tag, int chan,
1398 	     void (*dma_done_cb)(void *, int, bus_dmamap_t *, bus_dmamap_t *,
1399 				 int))
1400 {
1401 	struct gtidmac_softc *sc = tag;
1402 	uint32_t xexact;
1403 
1404 	DPRINTF(("%s:%d: xore starting\n", device_xname(sc->sc_dev), chan));
1405 
1406 #ifdef GTIDMAC_DEBUG
1407 	gtidmac_dump_xorereg(sc, chan);
1408 #endif
1409 
1410 	sc->sc_cdesc_xore[chan].chan_dma_done = dma_done_cb;
1411 
1412 	xexact =
1413 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan));
1414 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan),
1415 	    xexact | MVXORE_XEXACTR_XESTART);
1416 }
1417 
1418 static uint32_t
1419 mvxore_finish(void *tag, int chan, int error)
1420 {
1421 	struct gtidmac_softc *sc = tag;
1422 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
1423 	struct mvxore_desc *desc;
1424 	uint32_t xexc;
1425 
1426 #ifdef GTIDMAC_DEBUG
1427 	if (error || gtidmac_debug > 1)
1428 		gtidmac_dump_xorereg(sc, chan);
1429 #endif
1430 
1431 	xexc = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(sc, chan));
1432 	if ((xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_ECC ||
1433 	    (xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_MEMINIT)
1434 		return 0;
1435 
1436 	fstdd = &sc->sc_dd_buffer[sc->sc_cdesc_xore[chan].chan_ddidx];
1437 
1438 #ifdef GTIDMAC_DEBUG
1439 	if (error || gtidmac_debug > 1)
1440 		gtidmac_dump_xoredesc(sc, fstdd, xexc, 1/*post*/);
1441 #endif
1442 
1443 	dd = fstdd;
1444 	do {
1445 		desc = dd->dd_xore_vaddr;
1446 
1447 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
1448 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
1449 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1450 
1451 		nxtdd = SLIST_NEXT(dd, dd_next);
1452 		SLIST_INSERT_HEAD(&sc->sc_dlist_xore, dd, dd_next);
1453 		dd = nxtdd;
1454 	} while (desc->nextda);
1455 
1456 	if ((xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_CRC32)
1457 		return desc->result;
1458 	return 0;
1459 }
1460 
1461 static void
1462 gtidmac_wininit(struct gtidmac_softc *sc)
1463 {
1464 	device_t pdev = device_parent(sc->sc_dev);
1465 	uint64_t base;
1466 	uint32_t size, cxap, en;
1467 	int window, target, attr, rv, i;
1468 	struct {
1469 		int tag;
1470 		int winacc;
1471 	} targets[] = {
1472 		{ MARVELL_TAG_SDRAM_CS0,	GTIDMAC_CXAPR_WINACC_FA },
1473 		{ MARVELL_TAG_SDRAM_CS1,	GTIDMAC_CXAPR_WINACC_FA },
1474 		{ MARVELL_TAG_SDRAM_CS2,	GTIDMAC_CXAPR_WINACC_FA },
1475 		{ MARVELL_TAG_SDRAM_CS3,	GTIDMAC_CXAPR_WINACC_FA },
1476 
1477 		/* Also can set following targets. */
1478 		/*   Devices       = 0x1(ORION_TARGETID_DEVICE_*) */
1479 		/*   PCI           = 0x3(ORION_TARGETID_PCI0_*) */
1480 		/*   PCI Express   = 0x4(ORION_TARGETID_PEX?_*) */
1481 		/*   Tunit SRAM(?) = 0x5(???) */
1482 
1483 		{ MARVELL_TAG_UNDEFINED,	GTIDMAC_CXAPR_WINACC_NOAA }
1484 	};
1485 
1486 	en = 0xff;
1487 	cxap = 0;
1488 	for (window = 0, i = 0;
1489 	    targets[i].tag != MARVELL_TAG_UNDEFINED && window < GTIDMAC_NWINDOW;
1490 	    i++) {
1491 		rv = marvell_winparams_by_tag(pdev, targets[i].tag,
1492 		    &target, &attr, &base, &size);
1493 		if (rv != 0 || size == 0)
1494 			continue;
1495 
1496 		if (base > 0xffffffffULL) {
1497 			if (window >= GTIDMAC_NREMAP) {
1498 				aprint_error_dev(sc->sc_dev,
1499 				    "can't remap window %d\n", window);
1500 				continue;
1501 			}
1502 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1503 			    GTIDMAC_HARXR(window), (base >> 32) & 0xffffffff);
1504 		}
1505 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_BARX(window),
1506 		    GTIDMAC_BARX_TARGET(target)	|
1507 		    GTIDMAC_BARX_ATTR(attr)	|
1508 		    GTIDMAC_BARX_BASE(base));
1509 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_SRX(window),
1510 		    GTIDMAC_SRX_SIZE(size));
1511 		en &= ~GTIDMAC_BAER_EN(window);
1512 		cxap |= GTIDMAC_CXAPR_WINACC(window, targets[i].winacc);
1513 		window++;
1514 	}
1515 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_BAER, en);
1516 
1517 	for (i = 0; i < GTIDMAC_NACCPROT; i++)
1518 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CXAPR(i),
1519 		    cxap);
1520 }
1521 
1522 static void
1523 mvxore_wininit(struct gtidmac_softc *sc)
1524 {
1525 	device_t pdev = device_parent(sc->sc_dev);
1526 	uint64_t base;
1527 	uint32_t target, attr, size, xexwc;
1528 	int window, rv, i, p;
1529 	struct {
1530 		int tag;
1531 		int winacc;
1532 	} targets[] = {
1533 		{ MARVELL_TAG_SDRAM_CS0,	MVXORE_XEXWCR_WINACC_FA },
1534 		{ MARVELL_TAG_SDRAM_CS1,	MVXORE_XEXWCR_WINACC_FA },
1535 		{ MARVELL_TAG_SDRAM_CS2,	MVXORE_XEXWCR_WINACC_FA },
1536 		{ MARVELL_TAG_SDRAM_CS3,	MVXORE_XEXWCR_WINACC_FA },
1537 
1538 		{ MARVELL_TAG_UNDEFINED,	MVXORE_XEXWCR_WINACC_NOAA }
1539 	};
1540 
1541 	xexwc = 0;
1542 	for (window = 0, i = 0;
1543 	    targets[i].tag != MARVELL_TAG_UNDEFINED && window < MVXORE_NWINDOW;
1544 	    i++) {
1545 		rv = marvell_winparams_by_tag(pdev, targets[i].tag,
1546 		    &target, &attr, &base, &size);
1547 		if (rv != 0 || size == 0)
1548 			continue;
1549 
1550 		if (base > 0xffffffffULL) {
1551 			if (window >= MVXORE_NREMAP) {
1552 				aprint_error_dev(sc->sc_dev,
1553 				    "can't remap window %d\n", window);
1554 				continue;
1555 			}
1556 			for (p = 0; p < sc->sc_mvxore_nchan >> 1; p++)
1557 				bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1558 				    MVXORE_XEHARRX(sc, p, window),
1559 				    (base >> 32) & 0xffffffff);
1560 		}
1561 
1562 		for (p = 0; p < sc->sc_mvxore_nchan >> 1; p++) {
1563 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1564 			    MVXORE_XEBARX(sc, p, window),
1565 			    MVXORE_XEBARX_TARGET(target) |
1566 			    MVXORE_XEBARX_ATTR(attr) |
1567 			    MVXORE_XEBARX_BASE(base));
1568 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1569 			    MVXORE_XESMRX(sc, p, window),
1570 			    MVXORE_XESMRX_SIZE(size));
1571 		}
1572 		xexwc |= (MVXORE_XEXWCR_WINEN(window) |
1573 		    MVXORE_XEXWCR_WINACC(window, targets[i].winacc));
1574 		window++;
1575 	}
1576 
1577 	for (i = 0; i < sc->sc_mvxore_nchan; i++) {
1578 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXWCR(sc, i),
1579 		    xexwc);
1580 
1581 		/* XXXXX: reset... */
1582 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXAOCR(sc, 0),
1583 		    0);
1584 	}
1585 }
1586 
1587 static int
1588 gtidmac_buffer_setup(struct gtidmac_softc *sc)
1589 {
1590 	bus_dma_segment_t segs;
1591 	struct gtidmac_dma_desc *dd;
1592 	uint32_t mask;
1593 	int nchan, nsegs, i;
1594 
1595 	nchan = sc->sc_gtidmac_nchan;
1596 
1597 	if (bus_dmamem_alloc(sc->sc_dmat,
1598 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan,
1599 	    PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
1600 		aprint_error_dev(sc->sc_dev,
1601 		    "bus_dmamem_alloc failed: descriptor buffer\n");
1602 		goto fail0;
1603 	}
1604 	if (bus_dmamem_map(sc->sc_dmat, &segs, 1,
1605 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan,
1606 	    (void **)&sc->sc_dbuf, BUS_DMA_NOWAIT)) {
1607 		aprint_error_dev(sc->sc_dev,
1608 		    "bus_dmamem_map failed: descriptor buffer\n");
1609 		goto fail1;
1610 	}
1611 	if (bus_dmamap_create(sc->sc_dmat,
1612 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan, 1,
1613 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan, 0,
1614 	    BUS_DMA_NOWAIT, &sc->sc_dmap)) {
1615 		aprint_error_dev(sc->sc_dev,
1616 		    "bus_dmamap_create failed: descriptor buffer\n");
1617 		goto fail2;
1618 	}
1619 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, sc->sc_dbuf,
1620 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan,
1621 	    NULL, BUS_DMA_NOWAIT)) {
1622 		aprint_error_dev(sc->sc_dev,
1623 		    "bus_dmamap_load failed: descriptor buffer\n");
1624 		goto fail3;
1625 	}
1626 	SLIST_INIT(&sc->sc_dlist);
1627 	for (i = 0; i < GTIDMAC_NDESC * nchan; i++) {
1628 		dd = &sc->sc_dd_buffer[i];
1629 		dd->dd_index = i;
1630 		dd->dd_idmac_vaddr = &sc->sc_dbuf[i];
1631 		dd->dd_paddr = sc->sc_dmap->dm_segs[0].ds_addr +
1632 		    (sizeof(struct gtidmac_desc) * i);
1633 			SLIST_INSERT_HEAD(&sc->sc_dlist, dd, dd_next);
1634 	}
1635 
1636 	/* Initialize IDMAC DMA channels */
1637 	mask = 0;
1638 	for (i = 0; i < nchan; i++) {
1639 		if (i > 0 && ((i * GTIDMAC_I_BITS) & 31 /*bit*/) == 0) {
1640 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1641 			    GTIDMAC_IMR(i - 1), mask);
1642 			mask = 0;
1643 		}
1644 
1645 		if (bus_dmamap_create(sc->sc_dmat, GTIDMAC_MAXXFER,
1646 		    GTIDMAC_NSEGS, GTIDMAC_MAXXFER, 0, BUS_DMA_NOWAIT,
1647 		    &sc->sc_cdesc[i].chan_in)) {
1648 			aprint_error_dev(sc->sc_dev,
1649 			    "bus_dmamap_create failed: chan%d in\n", i);
1650 			goto fail4;
1651 		}
1652 		if (bus_dmamap_create(sc->sc_dmat, GTIDMAC_MAXXFER,
1653 		    GTIDMAC_NSEGS, GTIDMAC_MAXXFER, 0, BUS_DMA_NOWAIT,
1654 		    &sc->sc_cdesc[i].chan_out)) {
1655 			aprint_error_dev(sc->sc_dev,
1656 			    "bus_dmamap_create failed: chan%d out\n", i);
1657 			bus_dmamap_destroy(sc->sc_dmat,
1658 			    sc->sc_cdesc[i].chan_in);
1659 			goto fail4;
1660 		}
1661 		sc->sc_cdesc[i].chan_totalcnt = 0;
1662 		sc->sc_cdesc[i].chan_running = NULL;
1663 
1664 		/* Ignore bits overflow.  The mask is 32bit. */
1665 		mask |= GTIDMAC_I(i,
1666 		    GTIDMAC_I_COMP	|
1667 		    GTIDMAC_I_ADDRMISS	|
1668 		    GTIDMAC_I_ACCPROT	|
1669 		    GTIDMAC_I_WRPROT	|
1670 		    GTIDMAC_I_OWN);
1671 
1672 		/* 8bits/channel * 4channels => 32bit */
1673 		if ((i & 0x3) == 0x3) {
1674 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1675 			    GTIDMAC_IMR(i), mask);
1676 			mask = 0;
1677 		}
1678 	}
1679 
1680 	return 0;
1681 
1682 fail4:
1683 	for (; i-- > 0;) {
1684 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_in);
1685 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_out);
1686 	}
1687 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1688 fail3:
1689 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap);
1690 fail2:
1691 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf,
1692 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC);
1693 fail1:
1694 	bus_dmamem_free(sc->sc_dmat, &segs, 1);
1695 fail0:
1696 	return -1;
1697 }
1698 
1699 static int
1700 mvxore_buffer_setup(struct gtidmac_softc *sc)
1701 {
1702 	bus_dma_segment_t segs;
1703 	struct gtidmac_dma_desc *dd;
1704 	uint32_t mask;
1705 	int nchan, nsegs, i, j;
1706 
1707 	nchan = sc->sc_mvxore_nchan;
1708 
1709 	if (bus_dmamem_alloc(sc->sc_dmat,
1710 	    sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan,
1711 	    PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
1712 		aprint_error_dev(sc->sc_dev,
1713 		    "bus_dmamem_alloc failed: xore descriptor buffer\n");
1714 		goto fail0;
1715 	}
1716 	if (bus_dmamem_map(sc->sc_dmat, &segs, 1,
1717 	    sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan,
1718 	    (void **)&sc->sc_dbuf_xore, BUS_DMA_NOWAIT)) {
1719 		aprint_error_dev(sc->sc_dev,
1720 		    "bus_dmamem_map failed: xore descriptor buffer\n");
1721 		goto fail1;
1722 	}
1723 	if (bus_dmamap_create(sc->sc_dmat,
1724 	    sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan, 1,
1725 	    sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan, 0,
1726 	    BUS_DMA_NOWAIT, &sc->sc_dmap_xore)) {
1727 		aprint_error_dev(sc->sc_dev,
1728 		    "bus_dmamap_create failed: xore descriptor buffer\n");
1729 		goto fail2;
1730 	}
1731 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmap_xore, sc->sc_dbuf_xore,
1732 	    sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan,
1733 	    NULL, BUS_DMA_NOWAIT)) {
1734 		aprint_error_dev(sc->sc_dev,
1735 		    "bus_dmamap_load failed: xore descriptor buffer\n");
1736 		goto fail3;
1737 	}
1738 	SLIST_INIT(&sc->sc_dlist_xore);
1739 	for (i = 0; i < MVXORE_NDESC * nchan; i++) {
1740 		dd =
1741 		    &sc->sc_dd_buffer[i + GTIDMAC_NDESC * sc->sc_gtidmac_nchan];
1742 		dd->dd_index = i;
1743 		dd->dd_xore_vaddr = &sc->sc_dbuf_xore[i];
1744 		dd->dd_paddr = sc->sc_dmap_xore->dm_segs[0].ds_addr +
1745 		    (sizeof(struct mvxore_desc) * i);
1746 		SLIST_INSERT_HEAD(&sc->sc_dlist_xore, dd, dd_next);
1747 	}
1748 
1749 	/* Initialize XORE DMA channels */
1750 	mask = 0;
1751 	for (i = 0; i < nchan; i++) {
1752 		for (j = 0; j < MVXORE_NSRC; j++) {
1753 			if (bus_dmamap_create(sc->sc_dmat,
1754 			    MVXORE_MAXXFER, MVXORE_NSEGS,
1755 			    MVXORE_MAXXFER, 0, BUS_DMA_NOWAIT,
1756 			    &sc->sc_cdesc_xore[i].chan_in[j])) {
1757 				aprint_error_dev(sc->sc_dev,
1758 				    "bus_dmamap_create failed:"
1759 				    " xore chan%d in[%d]\n", i, j);
1760 				goto fail4;
1761 			}
1762 		}
1763 		if (bus_dmamap_create(sc->sc_dmat, MVXORE_MAXXFER,
1764 		    MVXORE_NSEGS, MVXORE_MAXXFER, 0,
1765 		    BUS_DMA_NOWAIT, &sc->sc_cdesc_xore[i].chan_out)) {
1766 			aprint_error_dev(sc->sc_dev,
1767 			    "bus_dmamap_create failed: chan%d out\n", i);
1768 			goto fail5;
1769 		}
1770 		sc->sc_cdesc_xore[i].chan_totalcnt = 0;
1771 		sc->sc_cdesc_xore[i].chan_running = NULL;
1772 
1773 		mask |= MVXORE_I(i,
1774 		    MVXORE_I_EOC	|
1775 		    MVXORE_I_ADDRDECODE	|
1776 		    MVXORE_I_ACCPROT	|
1777 		    MVXORE_I_WRPROT	|
1778 		    MVXORE_I_OWN	|
1779 		    MVXORE_I_INTPARITY	|
1780 		    MVXORE_I_XBAR);
1781 
1782 		/* 16bits/channel * 2channels => 32bit */
1783 		if (i & 0x1) {
1784 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1785 			    MVXORE_XEIMR(sc, i >> 1), mask);
1786 			mask = 0;
1787 		}
1788 	}
1789 
1790 	return 0;
1791 
1792 	for (; i-- > 0;) {
1793 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc_xore[i].chan_out);
1794 
1795 fail5:
1796 		j = MVXORE_NSRC;
1797 fail4:
1798 		for (; j-- > 0;)
1799 			bus_dmamap_destroy(sc->sc_dmat,
1800 			    sc->sc_cdesc_xore[i].chan_in[j]);
1801 	}
1802 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap_xore);
1803 fail3:
1804 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap_xore);
1805 fail2:
1806 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf_xore,
1807 	    sizeof(struct mvxore_desc) * MVXORE_NDESC);
1808 fail1:
1809 	bus_dmamem_free(sc->sc_dmat, &segs, 1);
1810 fail0:
1811 	return -1;
1812 }
1813 
1814 #ifdef GTIDMAC_DEBUG
1815 static void
1816 gtidmac_dump_idmacreg(struct gtidmac_softc *sc, int chan)
1817 {
1818 	uint32_t val;
1819 	char buf[256];
1820 
1821 	printf("IDMAC Registers\n");
1822 
1823 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMABCR(chan));
1824 	snprintb(buf, sizeof(buf), "\177\020b\037Own\0b\036BCLeft\0", val);
1825 	printf("  Byte Count                 : %s\n", buf);
1826 	printf("    ByteCnt                  :   0x%06x\n",
1827 	    val & GTIDMAC_CIDMABCR_BYTECNT_MASK);
1828 	printf("  Source Address             : 0x%08x\n",
1829 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMASAR(chan)));
1830 	printf("  Destination Address        : 0x%08x\n",
1831 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMADAR(chan)));
1832 	printf("  Next Descriptor Pointer    : 0x%08x\n",
1833 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CNDPR(chan)));
1834 	printf("  Current Descriptor Pointer : 0x%08x\n",
1835 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCDPR(chan)));
1836 
1837 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
1838 	snprintb(buf, sizeof(buf),
1839 	    "\177\020b\024Abr\0b\021CDEn\0b\016ChanAct\0b\015FetchND\0"
1840 	    "b\014ChanEn\0b\012IntMode\0b\005DestHold\0b\003SrcHold\0",
1841 	    val);
1842 	printf("  Channel Control (Low)      : %s\n", buf);
1843 	printf("    SrcBurstLimit            : %s Bytes\n",
1844 	  (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_128B ? "128" :
1845 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_64B ? "64" :
1846 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_32B ? "32" :
1847 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_16B ? "16" :
1848 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_8B ? "8" :
1849 	    "unknwon");
1850 	printf("    DstBurstLimit            : %s Bytes\n",
1851 	  (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_128B ? "128" :
1852 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_64B ? "64" :
1853 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_32B ? "32" :
1854 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_16B ? "16" :
1855 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_8B ? "8" :
1856 	    "unknwon");
1857 	printf("    ChainMode                : %sChained\n",
1858 	    val & GTIDMAC_CCLR_CHAINMODE_NC ? "Non-" : "");
1859 	printf("    TransferMode             : %s\n",
1860 	    val & GTIDMAC_CCLR_TRANSFERMODE_B ? "Block" : "Demand");
1861 	printf("    DescMode                 : %s\n",
1862 	    val & GTIDMAC_CCLR_DESCMODE_16M ? "16M" : "64k");
1863 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan));
1864 	snprintb(buf, sizeof(buf),
1865 	    "\177\020b\001DescByteSwap\0b\000Endianness\0", val);
1866 	printf("  Channel Control (High)     : %s\n", buf);
1867 }
1868 
1869 static void
1870 gtidmac_dump_idmacdesc(struct gtidmac_softc *sc, struct gtidmac_dma_desc *dd,
1871 		       uint32_t mode, int post)
1872 {
1873 	struct gtidmac_desc *desc;
1874 	int i;
1875 	char buf[256];
1876 
1877 	printf("IDMAC Descriptor\n");
1878 
1879 	i = 0;
1880 	while (1 /*CONSTCOND*/) {
1881 		if (post)
1882 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1883 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
1884 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1885 
1886 		desc = dd->dd_idmac_vaddr;
1887 
1888 		printf("%d (0x%lx)\n", i, dd->dd_paddr);
1889 		if (mode & GTIDMAC_CCLR_DESCMODE_16M) {
1890 			snprintb(buf, sizeof(buf),
1891 			    "\177\020b\037Own\0b\036BCLeft\0",
1892 			    desc->bc.mode16m.bcnt);
1893 			printf("  Byte Count              : %s\n", buf);
1894 			printf("    ByteCount             :   0x%06x\n",
1895 			    desc->bc.mode16m.bcnt &
1896 			    GTIDMAC_CIDMABCR_BYTECNT_MASK);
1897 		} else {
1898 			printf("  Byte Count              :     0x%04x\n",
1899 			    desc->bc.mode64k.bcnt);
1900 			printf("  Remind Byte Count       :     0x%04x\n",
1901 			    desc->bc.mode64k.rbc);
1902 		}
1903 		printf("  Source Address          : 0x%08x\n", desc->srcaddr);
1904 		printf("  Destination Address     : 0x%08x\n", desc->dstaddr);
1905 		printf("  Next Descriptor Pointer : 0x%08x\n", desc->nextdp);
1906 
1907 		if (desc->nextdp == (uint32_t)NULL)
1908 			break;
1909 
1910 		if (!post)
1911 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1912 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
1913 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1914 
1915 		i++;
1916 		dd = SLIST_NEXT(dd, dd_next);
1917 	}
1918 	if (!post)
1919 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1920 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
1921 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1922 }
1923 
1924 static void
1925 gtidmac_dump_xorereg(struct gtidmac_softc *sc, int chan)
1926 {
1927 	uint32_t val, opmode;
1928 	char buf[64];
1929 
1930 	printf("XORE Registers\n");
1931 
1932 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(sc, chan));
1933 	snprintb(buf, sizeof(buf),
1934 	    "\177\020"
1935 	    "b\017RegAccProtect\0b\016DesSwp\0b\015DwrReqSwp\0b\014DrdResSwp\0",
1936 	    val);
1937 	printf(" Configuration    : 0x%s\n", buf);
1938 	opmode = val & MVXORE_XEXCR_OM_MASK;
1939 	printf("    OperationMode : %s operation\n",
1940 	  opmode == MVXORE_XEXCR_OM_XOR ? "XOR calculate" :
1941 	  opmode == MVXORE_XEXCR_OM_CRC32 ? "CRC-32 calculate" :
1942 	  opmode == MVXORE_XEXCR_OM_DMA ? "DMA" :
1943 	  opmode == MVXORE_XEXCR_OM_ECC ? "ECC cleanup" :
1944 	  opmode == MVXORE_XEXCR_OM_MEMINIT ? "Memory Initialization" :
1945 	  "unknown");
1946 	printf("    SrcBurstLimit : %s Bytes\n",
1947 	  (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_128B ? "128" :
1948 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_64B ? "64" :
1949 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_32B ? "32" :
1950 	    "unknwon");
1951 	printf("    DstBurstLimit : %s Bytes\n",
1952 	  (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_128B ? "128" :
1953 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_64B ? "64" :
1954 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_32B ? "32" :
1955 	    "unknwon");
1956 	val =
1957 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan));
1958 	printf("  Activation      : 0x%08x\n", val);
1959 	val &= MVXORE_XEXACTR_XESTATUS_MASK;
1960 	printf("    XEstatus      : %s\n",
1961 	    val == MVXORE_XEXACTR_XESTATUS_NA ? "Channel not active" :
1962 	    val == MVXORE_XEXACTR_XESTATUS_ACT ? "Channel active" :
1963 	    val == MVXORE_XEXACTR_XESTATUS_P ? "Channel paused" : "???");
1964 
1965 	if (opmode == MVXORE_XEXCR_OM_XOR ||
1966 	    opmode == MVXORE_XEXCR_OM_CRC32 ||
1967 	    opmode == MVXORE_XEXCR_OM_DMA) {
1968 		printf("  NextDescPtr     : 0x%08x\n",
1969 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1970 		    MVXORE_XEXNDPR(sc, chan)));
1971 		printf("  CurrentDescPtr  : 0x%08x\n",
1972 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1973 		    MVXORE_XEXCDPR(chan)));
1974 	}
1975 	printf("  ByteCnt         : 0x%08x\n",
1976 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXBCR(chan)));
1977 
1978 	if (opmode == MVXORE_XEXCR_OM_ECC ||
1979 	    opmode == MVXORE_XEXCR_OM_MEMINIT) {
1980 		printf("  DstPtr          : 0x%08x\n",
1981 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1982 		    MVXORE_XEXDPR(sc, chan)));
1983 		printf("  BlockSize       : 0x%08x\n",
1984 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1985 		    MVXORE_XEXBSR(sc, chan)));
1986 
1987 		if (opmode == MVXORE_XEXCR_OM_ECC) {
1988 			val = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1989 			    MVXORE_XETMCR);
1990 			if (val & MVXORE_XETMCR_TIMEREN) {
1991 				val >>= MVXORE_XETMCR_SECTIONSIZECTRL_SHIFT;
1992 				val &= MVXORE_XETMCR_SECTIONSIZECTRL_MASK;
1993 				printf("  SectionSizeCtrl : 0x%08x\n", 2 ^ val);
1994 				printf("  TimerInitVal    : 0x%08x\n",
1995 				    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1996 				    MVXORE_XETMIVR));
1997 				printf("  TimerCrntVal    : 0x%08x\n",
1998 				    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1999 				    MVXORE_XETMCVR));
2000 			}
2001 		} else	/* MVXORE_XEXCR_OM_MEMINIT */
2002 			printf("  InitVal         : 0x%08x%08x\n",
2003 			    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2004 			    MVXORE_XEIVRH),
2005 			    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2006 			    MVXORE_XEIVRL));
2007 	}
2008 }
2009 
2010 static void
2011 gtidmac_dump_xoredesc(struct gtidmac_softc *sc, struct gtidmac_dma_desc *dd,
2012 		      uint32_t mode, int post)
2013 {
2014 	struct mvxore_desc *desc;
2015 	int i, j;
2016 	char buf[256];
2017 
2018 	printf("XORE Descriptor\n");
2019 
2020 	mode &= MVXORE_XEXCR_OM_MASK;
2021 
2022 	i = 0;
2023 	while (1 /*CONSTCOND*/) {
2024 		if (post)
2025 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
2026 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
2027 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2028 
2029 		desc = dd->dd_xore_vaddr;
2030 
2031 		printf("%d (0x%lx)\n", i, dd->dd_paddr);
2032 
2033 		snprintb(buf, sizeof(buf), "\177\020b\037Own\0b\036Success\0",
2034 		    desc->stat);
2035 		printf("  Status                  : 0x%s\n", buf);
2036 		if (desc->cmd & MVXORE_DESC_CMD_CRCLAST && post)
2037 			printf("  CRC-32 Result           : 0x%08x\n",
2038 			    desc->result);
2039 		snprintb(buf, sizeof(buf),
2040 		    "\177\020b\037EODIntEn\0b\036CRCLast\0"
2041 		    "b\007Src7Cmd\0b\006Src6Cmd\0b\005Src5Cmd\0b\004Src4Cmd\0"
2042 		    "b\003Src3Cmd\0b\002Src2Cmd\0b\001Src1Cmd\0b\000Src0Cmd\0",
2043 		    desc->cmd);
2044 		printf("  Command                 : 0x%s\n", buf);
2045 		printf("  Next Descriptor Address : 0x%08x\n", desc->nextda);
2046 		printf("  Byte Count              :   0x%06x\n", desc->bcnt);
2047 		printf("  Destination Address     : 0x%08x\n", desc->dstaddr);
2048 		if (mode == MVXORE_XEXCR_OM_XOR) {
2049 			for (j = 0; j < MVXORE_NSRC; j++)
2050 				if (desc->cmd & MVXORE_DESC_CMD_SRCCMD(j))
2051 					printf("  Source Address#%d        :"
2052 					    " 0x%08x\n", j, desc->srcaddr[j]);
2053 		} else
2054 			printf("  Source Address          : 0x%08x\n",
2055 			    desc->srcaddr[0]);
2056 
2057 		if (desc->nextda == (uint32_t)NULL)
2058 			break;
2059 
2060 		if (!post)
2061 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
2062 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
2063 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2064 
2065 		i++;
2066 		dd = SLIST_NEXT(dd, dd_next);
2067 	}
2068 	if (!post)
2069 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
2070 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
2071 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2072 }
2073 #endif
2074