xref: /netbsd-src/sys/dev/marvell/gtidmac.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /*	$NetBSD: gtidmac.c,v 1.11 2014/03/15 13:33:48 kiyohara Exp $	*/
2 /*
3  * Copyright (c) 2008, 2012 KIYOHARA Takashi
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: gtidmac.c,v 1.11 2014/03/15 13:33:48 kiyohara Exp $");
30 
31 #include <sys/param.h>
32 #include <sys/bus.h>
33 #include <sys/device.h>
34 #include <sys/errno.h>
35 #include <sys/endian.h>
36 #include <sys/kmem.h>
37 
38 #include <uvm/uvm_param.h>	/* For PAGE_SIZE */
39 
40 #include <dev/dmover/dmovervar.h>
41 
42 #include <dev/marvell/gtidmacreg.h>
43 #include <dev/marvell/gtidmacvar.h>
44 #include <dev/marvell/marvellreg.h>
45 #include <dev/marvell/marvellvar.h>
46 
47 #include <prop/proplib.h>
48 
49 #include "locators.h"
50 
51 #ifdef GTIDMAC_DEBUG
52 #define DPRINTF(x)	if (gtidmac_debug) printf x
53 int gtidmac_debug = 0;
54 #else
55 #define DPRINTF(x)
56 #endif
57 
58 #define GTIDMAC_NDESC		64
59 #define GTIDMAC_MAXCHAN		8
60 #define MVXORE_NDESC		128
61 #define MVXORE_MAXCHAN		2
62 
63 #define GTIDMAC_NSEGS		((GTIDMAC_MAXXFER + PAGE_SIZE - 1) / PAGE_SIZE)
64 #define MVXORE_NSEGS		((MVXORE_MAXXFER + PAGE_SIZE - 1) / PAGE_SIZE)
65 
66 
67 struct gtidmac_softc;
68 
69 struct gtidmac_function {
70 	int (*chan_alloc)(void *, bus_dmamap_t **, bus_dmamap_t **, void *);
71 	void (*chan_free)(void *, int);
72 	int (*dma_setup)(void *, int, int, bus_dmamap_t *, bus_dmamap_t *,
73 			 bus_size_t);
74 	void (*dma_start)(void *, int,
75 			  void (*dma_done_cb)(void *, int, bus_dmamap_t *,
76 						      bus_dmamap_t *, int));
77 	uint32_t (*dma_finish)(void *, int, int);
78 };
79 
80 struct gtidmac_dma_desc {
81 	int dd_index;
82 	union {
83 		struct gtidmac_desc *idmac_vaddr;
84 		struct mvxore_desc *xore_vaddr;
85 	} dd_vaddr;
86 #define dd_idmac_vaddr	dd_vaddr.idmac_vaddr
87 #define dd_xore_vaddr	dd_vaddr.xore_vaddr
88 	paddr_t dd_paddr;
89 	SLIST_ENTRY(gtidmac_dma_desc) dd_next;
90 };
91 
92 struct gtidmac_softc {
93 	device_t sc_dev;
94 
95 	bus_space_tag_t sc_iot;
96 	bus_space_handle_t sc_ioh;
97 
98 	bus_dma_tag_t sc_dmat;
99 	struct gtidmac_dma_desc *sc_dd_buffer;
100 	bus_dma_segment_t sc_pattern_segment;
101 	struct {
102 		u_char pbuf[16];	/* 16byte/pattern */
103 	} *sc_pbuf;			/*   x256 pattern */
104 
105 	int sc_gtidmac_nchan;
106 	struct gtidmac_desc *sc_dbuf;
107 	bus_dmamap_t sc_dmap;
108 	SLIST_HEAD(, gtidmac_dma_desc) sc_dlist;
109 	struct {
110 		bus_dmamap_t chan_in;		/* In dmamap */
111 		bus_dmamap_t chan_out;		/* Out dmamap */
112 		uint64_t chan_totalcnt;		/* total transfered byte */
113 		int chan_ddidx;
114 		void *chan_running;		/* opaque object data */
115 		void (*chan_dma_done)(void *, int, bus_dmamap_t *,
116 				      bus_dmamap_t *, int);
117 	} sc_cdesc[GTIDMAC_MAXCHAN];
118 	struct gtidmac_intr_arg {
119 		struct gtidmac_softc *ia_sc;
120 		uint32_t ia_cause;
121 		uint32_t ia_mask;
122 		uint32_t ia_eaddr;
123 		uint32_t ia_eselect;
124 	} sc_intrarg[GTIDMAC_NINTRRUPT];
125 
126 	int sc_mvxore_nchan;
127 	struct mvxore_desc *sc_dbuf_xore;
128 	bus_dmamap_t sc_dmap_xore;
129 	SLIST_HEAD(, gtidmac_dma_desc) sc_dlist_xore;
130 	struct {
131 		bus_dmamap_t chan_in[MVXORE_NSRC];	/* In dmamap */
132 		bus_dmamap_t chan_out;			/* Out dmamap */
133 		uint64_t chan_totalcnt;			/* total transfered */
134 		int chan_ddidx;
135 		void *chan_running;			/* opaque object data */
136 		void (*chan_dma_done)(void *, int, bus_dmamap_t *,
137 				      bus_dmamap_t *, int);
138 	} sc_cdesc_xore[MVXORE_MAXCHAN];
139 
140 	struct dmover_backend sc_dmb;
141 	struct dmover_backend sc_dmb_xore;
142 	int sc_dmb_busy;
143 };
144 struct gtidmac_softc *gtidmac_softc = NULL;
145 
146 static int gtidmac_match(device_t, struct cfdata *, void *);
147 static void gtidmac_attach(device_t, device_t, void *);
148 
149 static int gtidmac_intr(void *);
150 static int mvxore_port0_intr(void *);
151 static int mvxore_port1_intr(void *);
152 static int mvxore_intr(struct gtidmac_softc *, int);
153 
154 static void gtidmac_process(struct dmover_backend *);
155 static void gtidmac_dmover_run(struct dmover_backend *);
156 static void gtidmac_dmover_done(void *, int, bus_dmamap_t *, bus_dmamap_t *,
157 				int);
158 static __inline int gtidmac_dmmap_load(struct gtidmac_softc *, bus_dmamap_t,
159 				dmover_buffer_type, dmover_buffer *, int);
160 static __inline void gtidmac_dmmap_unload(struct gtidmac_softc *, bus_dmamap_t, int);
161 
162 static uint32_t gtidmac_finish(void *, int, int);
163 static uint32_t mvxore_finish(void *, int, int);
164 
165 static void gtidmac_wininit(struct gtidmac_softc *, enum marvell_tags *);
166 static void mvxore_wininit(struct gtidmac_softc *, enum marvell_tags *);
167 
168 static int gtidmac_buffer_setup(struct gtidmac_softc *);
169 static int mvxore_buffer_setup(struct gtidmac_softc *);
170 
171 #ifdef GTIDMAC_DEBUG
172 static void gtidmac_dump_idmacreg(struct gtidmac_softc *, int);
173 static void gtidmac_dump_idmacdesc(struct gtidmac_softc *,
174 				   struct gtidmac_dma_desc *, uint32_t, int);
175 static void gtidmac_dump_xorereg(struct gtidmac_softc *, int);
176 static void gtidmac_dump_xoredesc(struct gtidmac_softc *,
177 				  struct gtidmac_dma_desc *, uint32_t, int);
178 #endif
179 
180 
181 static struct gtidmac_function gtidmac_functions = {
182 	.chan_alloc = gtidmac_chan_alloc,
183 	.chan_free = gtidmac_chan_free,
184 	.dma_setup = gtidmac_setup,
185 	.dma_start = gtidmac_start,
186 	.dma_finish = gtidmac_finish,
187 };
188 
189 static struct gtidmac_function mvxore_functions = {
190 	.chan_alloc = mvxore_chan_alloc,
191 	.chan_free = mvxore_chan_free,
192 	.dma_setup = mvxore_setup,
193 	.dma_start = mvxore_start,
194 	.dma_finish = mvxore_finish,
195 };
196 
197 static const struct dmover_algdesc gtidmac_algdescs[] = {
198 	{
199 		.dad_name = DMOVER_FUNC_ZERO,
200 		.dad_data = &gtidmac_functions,
201 		.dad_ninputs = 0
202 	},
203 	{
204 		.dad_name = DMOVER_FUNC_FILL8,
205 		.dad_data = &gtidmac_functions,
206 		.dad_ninputs = 0
207 	},
208 	{
209 		.dad_name = DMOVER_FUNC_COPY,
210 		.dad_data = &gtidmac_functions,
211 		.dad_ninputs = 1
212 	},
213 };
214 
215 static const struct dmover_algdesc mvxore_algdescs[] = {
216 #if 0
217 	/*
218 	 * As for these operations, there are a lot of restrictions.  It is
219 	 * necessary to use IDMAC.
220 	 */
221 	{
222 		.dad_name = DMOVER_FUNC_ZERO,
223 		.dad_data = &mvxore_functions,
224 		.dad_ninputs = 0
225 	},
226 	{
227 		.dad_name = DMOVER_FUNC_FILL8,
228 		.dad_data = &mvxore_functions,
229 		.dad_ninputs = 0
230 	},
231 #endif
232 	{
233 		.dad_name = DMOVER_FUNC_COPY,
234 		.dad_data = &mvxore_functions,
235 		.dad_ninputs = 1
236 	},
237 	{
238 		.dad_name = DMOVER_FUNC_ISCSI_CRC32C,
239 		.dad_data = &mvxore_functions,
240 		.dad_ninputs = 1
241 	},
242 	{
243 		.dad_name = DMOVER_FUNC_XOR2,
244 		.dad_data = &mvxore_functions,
245 		.dad_ninputs = 2
246 	},
247 	{
248 		.dad_name = DMOVER_FUNC_XOR3,
249 		.dad_data = &mvxore_functions,
250 		.dad_ninputs = 3
251 	},
252 	{
253 		.dad_name = DMOVER_FUNC_XOR4,
254 		.dad_data = &mvxore_functions,
255 		.dad_ninputs = 4
256 	},
257 	{
258 		.dad_name = DMOVER_FUNC_XOR5,
259 		.dad_data = &mvxore_functions,
260 		.dad_ninputs = 5
261 	},
262 	{
263 		.dad_name = DMOVER_FUNC_XOR6,
264 		.dad_data = &mvxore_functions,
265 		.dad_ninputs = 6
266 	},
267 	{
268 		.dad_name = DMOVER_FUNC_XOR7,
269 		.dad_data = &mvxore_functions,
270 		.dad_ninputs = 7
271 	},
272 	{
273 		.dad_name = DMOVER_FUNC_XOR8,
274 		.dad_data = &mvxore_functions,
275 		.dad_ninputs = 8
276 	},
277 };
278 
279 static struct {
280 	int model;
281 	int idmac_nchan;
282 	int idmac_irq;
283 	int xore_nchan;
284 	int xore_irq;
285 } channels[] = {
286 	/*
287 	 * Marvell System Controllers:
288 	 * need irqs in attach_args.
289 	 */
290 	{ MARVELL_DISCOVERY,		8, -1, 0, -1 },
291 	{ MARVELL_DISCOVERY_II,		8, -1, 0, -1 },
292 	{ MARVELL_DISCOVERY_III,	8, -1, 0, -1 },
293 #if 0
294 	{ MARVELL_DISCOVERY_LT,		4, -1, 2, -1 },
295 	{ MARVELL_DISCOVERY_V,		4, -1, 2, -1 },
296 	{ MARVELL_DISCOVERY_VI,		4, -1, 2, -1 },		????
297 #endif
298 
299 	/*
300 	 * Marvell System on Chips:
301 	 * No need irqs in attach_args.  We always connecting to interrupt-pin
302 	 * statically.
303 	 */
304 	{ MARVELL_ORION_1_88F1181,	4, 24, 0, -1 },
305 	{ MARVELL_ORION_2_88F1281,	4, 24, 0, -1 },
306 	{ MARVELL_ORION_1_88F5082,	4, 24, 0, -1 },
307 	{ MARVELL_ORION_1_88F5180N,	4, 24, 0, -1 },
308 	{ MARVELL_ORION_1_88F5181,	4, 24, 0, -1 },
309 	{ MARVELL_ORION_1_88F5182,	4, 24, 2, 30 },
310 	{ MARVELL_ORION_2_88F5281,	4, 24, 0, -1 },
311 	{ MARVELL_ORION_1_88W8660,	4, 24, 0, -1 },
312 	{ MARVELL_KIRKWOOD_88F6180,	0, -1, 4, 5 },
313 	{ MARVELL_KIRKWOOD_88F6192,	0, -1, 4, 5 },
314 	{ MARVELL_KIRKWOOD_88F6281,	0, -1, 4, 5 },
315 	{ MARVELL_KIRKWOOD_88F6282,	0, -1, 4, 5 },
316 	{ MARVELL_ARMADAXP_MV78130,	4, 33, 2, 51 },
317 	{ MARVELL_ARMADAXP_MV78130,	0, -1, 2, 94 },
318 	{ MARVELL_ARMADAXP_MV78160,	4, 33, 2, 51 },
319 	{ MARVELL_ARMADAXP_MV78160,	0, -1, 2, 94 },
320 	{ MARVELL_ARMADAXP_MV78230,	4, 33, 2, 51 },
321 	{ MARVELL_ARMADAXP_MV78230,	0, -1, 2, 94 },
322 	{ MARVELL_ARMADAXP_MV78260,	4, 33, 2, 51 },
323 	{ MARVELL_ARMADAXP_MV78260,	0, -1, 2, 94 },
324 	{ MARVELL_ARMADAXP_MV78460,	4, 33, 2, 51 },
325 	{ MARVELL_ARMADAXP_MV78460,	0, -1, 2, 94 },
326 };
327 
328 struct gtidmac_winacctbl *gtidmac_winacctbl;
329 struct gtidmac_winacctbl *mvxore_winacctbl;
330 
331 CFATTACH_DECL_NEW(gtidmac_gt, sizeof(struct gtidmac_softc),
332     gtidmac_match, gtidmac_attach, NULL, NULL);
333 CFATTACH_DECL_NEW(gtidmac_mbus, sizeof(struct gtidmac_softc),
334     gtidmac_match, gtidmac_attach, NULL, NULL);
335 
336 
337 /* ARGSUSED */
338 static int
339 gtidmac_match(device_t parent, struct cfdata *match, void *aux)
340 {
341 	struct marvell_attach_args *mva = aux;
342 	int unit, i;
343 
344 	if (strcmp(mva->mva_name, match->cf_name) != 0)
345 		return 0;
346 	if (mva->mva_offset == MVA_OFFSET_DEFAULT)
347 		return 0;
348 	unit = 0;
349 	for (i = 0; i < __arraycount(channels); i++)
350 		if (mva->mva_model == channels[i].model) {
351 			if (mva->mva_unit == unit) {
352 				mva->mva_size = GTIDMAC_SIZE;
353 				return 1;
354 			}
355 			unit++;
356 		}
357 	return 0;
358 }
359 
360 /* ARGSUSED */
361 static void
362 gtidmac_attach(device_t parent, device_t self, void *aux)
363 {
364 	struct gtidmac_softc *sc = device_private(self);
365 	struct marvell_attach_args *mva = aux;
366 	prop_dictionary_t dict = device_properties(self);
367 	uint32_t idmac_irq, xore_irq, dmb_speed;
368 	int unit, idmac_nchan, xore_nchan, nsegs, i, j, n;
369 
370 	unit = 0;
371 	for (i = 0; i < __arraycount(channels); i++)
372 		if (mva->mva_model == channels[i].model) {
373 			if (mva->mva_unit == unit)
374 				break;
375 			unit++;
376 		}
377 	idmac_nchan = channels[i].idmac_nchan;
378 	idmac_irq = channels[i].idmac_irq;
379 	if (idmac_nchan != 0) {
380 		if (idmac_irq == -1)
381 			idmac_irq = mva->mva_irq;
382 		if (idmac_irq == -1)
383 			/* Discovery */
384 			if (!prop_dictionary_get_uint32(dict,
385 			    "idmac-irq", &idmac_irq)) {
386 				aprint_error(": no idmac-irq property\n");
387 				return;
388 			}
389 	}
390 	xore_nchan = channels[i].xore_nchan;
391 	xore_irq = channels[i].xore_irq;
392 	if (xore_nchan != 0) {
393 		if (xore_irq == -1)
394 			xore_irq = mva->mva_irq;
395 		if (xore_irq == -1)
396 			/* Discovery LT/V/VI */
397 			if (!prop_dictionary_get_uint32(dict,
398 			    "xore-irq", &xore_irq)) {
399 				aprint_error(": no xore-irq property\n");
400 				return;
401 			}
402 	}
403 
404 	aprint_naive("\n");
405 	aprint_normal(": Marvell IDMA Controller%s\n",
406 	    xore_nchan ? "/XOR Engine" : "");
407 	if (idmac_nchan > 0)
408 		aprint_normal_dev(self,
409 		    "IDMA Controller %d channels, intr %d...%d\n",
410 		    idmac_nchan, idmac_irq, idmac_irq + GTIDMAC_NINTRRUPT - 1);
411 	if (xore_nchan > 0)
412 		aprint_normal_dev(self,
413 		    "XOR Engine %d channels, intr %d...%d\n",
414 		    xore_nchan, xore_irq, xore_irq + xore_nchan - 1);
415 
416 	sc->sc_dev = self;
417 	sc->sc_iot = mva->mva_iot;
418 
419 	/* Map I/O registers */
420 	if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, mva->mva_offset,
421 	    mva->mva_size, &sc->sc_ioh)) {
422 		aprint_error_dev(self, "can't map registers\n");
423 		return;
424 	}
425 
426 	/*
427 	 * Initialise DMA descriptors and associated metadata
428 	 */
429 	sc->sc_dmat = mva->mva_dmat;
430 	n = idmac_nchan * GTIDMAC_NDESC + xore_nchan * MVXORE_NDESC;
431 	sc->sc_dd_buffer =
432 	    kmem_alloc(sizeof(struct gtidmac_dma_desc) * n, KM_SLEEP);
433 	if (sc->sc_dd_buffer == NULL) {
434 		aprint_error_dev(self, "can't allocate memory\n");
435 		goto fail1;
436 	}
437 	/* pattern buffer */
438 	if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
439 	    &sc->sc_pattern_segment, 1, &nsegs, BUS_DMA_NOWAIT)) {
440 		aprint_error_dev(self,
441 		    "bus_dmamem_alloc failed: pattern buffer\n");
442 		goto fail2;
443 	}
444 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_pattern_segment, 1, PAGE_SIZE,
445 	    (void **)&sc->sc_pbuf, BUS_DMA_NOWAIT)) {
446 		aprint_error_dev(self,
447 		    "bus_dmamem_map failed: pattern buffer\n");
448 		goto fail3;
449 	}
450 	for (i = 0; i < 0x100; i++)
451 		for (j = 0; j < sizeof(sc->sc_pbuf[i].pbuf); j++)
452 			sc->sc_pbuf[i].pbuf[j] = i;
453 
454 	if (!prop_dictionary_get_uint32(dict, "dmb_speed", &dmb_speed)) {
455 		aprint_error_dev(self, "no dmb_speed property\n");
456 		dmb_speed = 10;	/* More than fast swdmover perhaps. */
457 	}
458 
459 	/* IDMAC DMA descriptor buffer */
460 	sc->sc_gtidmac_nchan = idmac_nchan;
461 	if (sc->sc_gtidmac_nchan > 0) {
462 		if (gtidmac_buffer_setup(sc) != 0)
463 			goto fail4;
464 
465 		if (mva->mva_model != MARVELL_DISCOVERY)
466 			gtidmac_wininit(sc, mva->mva_tags);
467 
468 		/* Setup interrupt */
469 		for (i = 0; i < GTIDMAC_NINTRRUPT; i++) {
470 			j = i * idmac_nchan / GTIDMAC_NINTRRUPT;
471 
472 			sc->sc_intrarg[i].ia_sc = sc;
473 			sc->sc_intrarg[i].ia_cause = GTIDMAC_ICR(j);
474 			sc->sc_intrarg[i].ia_eaddr = GTIDMAC_EAR(j);
475 			sc->sc_intrarg[i].ia_eselect = GTIDMAC_ESR(j);
476 			marvell_intr_establish(idmac_irq + i, IPL_BIO,
477 			    gtidmac_intr, &sc->sc_intrarg[i]);
478 		}
479 
480 		/* Register us with dmover. */
481 		sc->sc_dmb.dmb_name = device_xname(self);
482 		sc->sc_dmb.dmb_speed = dmb_speed;
483 		sc->sc_dmb.dmb_cookie = sc;
484 		sc->sc_dmb.dmb_algdescs = gtidmac_algdescs;
485 		sc->sc_dmb.dmb_nalgdescs = __arraycount(gtidmac_algdescs);
486 		sc->sc_dmb.dmb_process = gtidmac_process;
487 		dmover_backend_register(&sc->sc_dmb);
488 		sc->sc_dmb_busy = 0;
489 	}
490 
491 	/* XORE DMA descriptor buffer */
492 	sc->sc_mvxore_nchan = xore_nchan;
493 	if (sc->sc_mvxore_nchan > 0) {
494 		if (mvxore_buffer_setup(sc) != 0)
495 			goto fail5;
496 
497 		/* Setup interrupt */
498 		for (i = 0; i < sc->sc_mvxore_nchan; i++)
499 			marvell_intr_establish(xore_irq + i, IPL_BIO,
500 			    (i & 0x2) ? mvxore_port1_intr : mvxore_port0_intr,
501 			    sc);
502 
503 		mvxore_wininit(sc, mva->mva_tags);
504 
505 		/* Register us with dmover. */
506 		sc->sc_dmb_xore.dmb_name = device_xname(sc->sc_dev);
507 		sc->sc_dmb_xore.dmb_speed = dmb_speed;
508 		sc->sc_dmb_xore.dmb_cookie = sc;
509 		sc->sc_dmb_xore.dmb_algdescs = mvxore_algdescs;
510 		sc->sc_dmb_xore.dmb_nalgdescs =
511 		    __arraycount(mvxore_algdescs);
512 		sc->sc_dmb_xore.dmb_process = gtidmac_process;
513 		dmover_backend_register(&sc->sc_dmb_xore);
514 	}
515 
516 	gtidmac_softc = sc;
517 
518 	return;
519 
520 fail5:
521 	for (i = sc->sc_gtidmac_nchan - 1; i >= 0; i--) {
522 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_in);
523 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_out);
524 	}
525 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
526 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap);
527 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf,
528 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC);
529 	bus_dmamem_free(sc->sc_dmat,
530 	    sc->sc_dmap->dm_segs, sc->sc_dmap->dm_nsegs);
531 fail4:
532 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_pbuf, PAGE_SIZE);
533 fail3:
534 	bus_dmamem_free(sc->sc_dmat, &sc->sc_pattern_segment, 1);
535 fail2:
536 	kmem_free(sc->sc_dd_buffer, sizeof(struct gtidmac_dma_desc) * n);
537 fail1:
538 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, mva->mva_size);
539 	return;
540 }
541 
542 
543 static int
544 gtidmac_intr(void *arg)
545 {
546 	struct gtidmac_intr_arg *ia = arg;
547 	struct gtidmac_softc *sc = ia->ia_sc;
548 	uint32_t cause;
549 	int handled = 0, chan, error;
550 
551 	cause = bus_space_read_4(sc->sc_iot, sc->sc_ioh, ia->ia_cause);
552 	DPRINTF(("IDMAC intr: cause=0x%x\n", cause));
553 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, ia->ia_cause, ~cause);
554 
555 	chan = 0;
556 	while (cause) {
557 		error = 0;
558 		if (cause & GTIDMAC_I_ADDRMISS) {
559 			aprint_error_dev(sc->sc_dev, "Address Miss");
560 			error = EINVAL;
561 		}
562 		if (cause & GTIDMAC_I_ACCPROT) {
563 			aprint_error_dev(sc->sc_dev,
564 			    "Access Protect Violation");
565 			error = EACCES;
566 		}
567 		if (cause & GTIDMAC_I_WRPROT) {
568 			aprint_error_dev(sc->sc_dev, "Write Protect");
569 			error = EACCES;
570 		}
571 		if (cause & GTIDMAC_I_OWN) {
572 			aprint_error_dev(sc->sc_dev, "Ownership Violation");
573 			error = EINVAL;
574 		}
575 
576 #define GTIDMAC_I_ERROR		  \
577 	   (GTIDMAC_I_ADDRMISS	| \
578 	    GTIDMAC_I_ACCPROT	| \
579 	    GTIDMAC_I_WRPROT	| \
580 	    GTIDMAC_I_OWN)
581 		if (cause & GTIDMAC_I_ERROR) {
582 			uint32_t sel;
583 			int select;
584 
585 			sel = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
586 			    ia->ia_eselect) & GTIDMAC_ESR_SEL;
587 			select = sel - chan * GTIDMAC_I_BITS;
588 			if (select >= 0 && select < GTIDMAC_I_BITS) {
589 				uint32_t ear;
590 
591 				ear = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
592 				    ia->ia_eaddr);
593 				aprint_error(": Error Address 0x%x\n", ear);
594 			} else
595 				aprint_error(": lost Error Address\n");
596 		}
597 
598 		if (cause & (GTIDMAC_I_COMP | GTIDMAC_I_ERROR)) {
599 			sc->sc_cdesc[chan].chan_dma_done(
600 			    sc->sc_cdesc[chan].chan_running, chan,
601 			    &sc->sc_cdesc[chan].chan_in,
602 			    &sc->sc_cdesc[chan].chan_out, error);
603 			handled++;
604 		}
605 
606 		cause >>= GTIDMAC_I_BITS;
607 	}
608 	DPRINTF(("IDMAC intr: %shandled\n", handled ? "" : "not "));
609 
610 	return handled;
611 }
612 
613 static int
614 mvxore_port0_intr(void *arg)
615 {
616 	struct gtidmac_softc *sc = arg;
617 
618 	return mvxore_intr(sc, 0);
619 }
620 
621 static int
622 mvxore_port1_intr(void *arg)
623 {
624 	struct gtidmac_softc *sc = arg;
625 
626 	return mvxore_intr(sc, 1);
627 }
628 
629 static int
630 mvxore_intr(struct gtidmac_softc *sc, int port)
631 {
632 	uint32_t cause;
633 	int handled = 0, chan, error;
634 
635 	cause =
636 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEICR(sc, port));
637 	DPRINTF(("XORE port %d intr: cause=0x%x\n", port, cause));
638 printf("XORE port %d intr: cause=0x%x\n", port, cause);
639 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
640 	    MVXORE_XEICR(sc, port), ~cause);
641 
642 	chan = 0;
643 	while (cause) {
644 		error = 0;
645 		if (cause & MVXORE_I_ADDRDECODE) {
646 			aprint_error_dev(sc->sc_dev, "Failed address decoding");
647 			error = EINVAL;
648 		}
649 		if (cause & MVXORE_I_ACCPROT) {
650 			aprint_error_dev(sc->sc_dev,
651 			    "Access Protect Violation");
652 			error = EACCES;
653 		}
654 		if (cause & MVXORE_I_WRPROT) {
655 			aprint_error_dev(sc->sc_dev, "Write Protect");
656 			error = EACCES;
657 		}
658 		if (cause & MVXORE_I_OWN) {
659 			aprint_error_dev(sc->sc_dev, "Ownership Violation");
660 			error = EINVAL;
661 		}
662 		if (cause & MVXORE_I_INTPARITY) {
663 			aprint_error_dev(sc->sc_dev, "Parity Error");
664 			error = EIO;
665 		}
666 		if (cause & MVXORE_I_XBAR) {
667 			aprint_error_dev(sc->sc_dev, "Crossbar Parity Error");
668 			error = EINVAL;
669 		}
670 
671 #define MVXORE_I_ERROR		  \
672 	   (MVXORE_I_ADDRDECODE	| \
673 	    MVXORE_I_ACCPROT	| \
674 	    MVXORE_I_WRPROT	| \
675 	    MVXORE_I_OWN	| \
676 	    MVXORE_I_INTPARITY	| \
677 	    MVXORE_I_XBAR)
678 		if (cause & MVXORE_I_ERROR) {
679 			uint32_t type;
680 			int event;
681 
682 			type = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
683 			    MVXORE_XEECR(sc, port));
684 			type &= MVXORE_XEECR_ERRORTYPE_MASK;
685 			event = type - chan * MVXORE_I_BITS;
686 			if (event >= 0 && event < MVXORE_I_BITS) {
687 				uint32_t xeear;
688 
689 				xeear = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
690 				    MVXORE_XEEAR(sc, port));
691 				aprint_error(": Error Address 0x%x\n", xeear);
692 			} else
693 				aprint_error(": lost Error Address\n");
694 		}
695 
696 		if (cause & (MVXORE_I_EOC | MVXORE_I_ERROR)) {
697 			sc->sc_cdesc_xore[chan].chan_dma_done(
698 			    sc->sc_cdesc_xore[chan].chan_running, chan,
699 			    sc->sc_cdesc_xore[chan].chan_in,
700 			    &sc->sc_cdesc_xore[chan].chan_out, error);
701 			handled++;
702 		}
703 
704 		cause >>= MVXORE_I_BITS;
705 	}
706 printf("XORE port %d intr: %shandled\n", port, handled ? "" : "not ");
707 	DPRINTF(("XORE port %d intr: %shandled\n",
708 	    port, handled ? "" : "not "));
709 
710 	return handled;
711 }
712 
713 
714 /*
715  * dmover(9) backend function.
716  */
717 static void
718 gtidmac_process(struct dmover_backend *dmb)
719 {
720 	struct gtidmac_softc *sc = dmb->dmb_cookie;
721 	int s;
722 
723 	/* If the backend is currently idle, go process the queue. */
724 	s = splbio();
725 	if (!sc->sc_dmb_busy)
726 		gtidmac_dmover_run(dmb);
727 	splx(s);
728 }
729 
730 static void
731 gtidmac_dmover_run(struct dmover_backend *dmb)
732 {
733 	struct gtidmac_softc *sc = dmb->dmb_cookie;
734 	struct dmover_request *dreq;
735 	const struct dmover_algdesc *algdesc;
736 	struct gtidmac_function *df;
737 	bus_dmamap_t *dmamap_in, *dmamap_out;
738 	int chan, ninputs, error, i;
739 
740 	sc->sc_dmb_busy = 1;
741 
742 	for (;;) {
743 		dreq = TAILQ_FIRST(&dmb->dmb_pendreqs);
744 		if (dreq == NULL)
745 			break;
746 		algdesc = dreq->dreq_assignment->das_algdesc;
747 		df = algdesc->dad_data;
748 		chan = (*df->chan_alloc)(sc, &dmamap_in, &dmamap_out, dreq);
749 		if (chan == -1)
750 			return;
751 
752 		dmover_backend_remque(dmb, dreq);
753 		dreq->dreq_flags |= DMOVER_REQ_RUNNING;
754 
755 		/* XXXUNLOCK */
756 
757 		error = 0;
758 
759 		/* Load in/out buffers of dmover to bus_dmamap. */
760 		ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
761 		if (ninputs == 0) {
762 			int pno = 0;
763 
764 			if (algdesc->dad_name == DMOVER_FUNC_FILL8)
765 				pno = dreq->dreq_immediate[0];
766 
767 			i = 0;
768 			error = bus_dmamap_load(sc->sc_dmat, *dmamap_in,
769 			    &sc->sc_pbuf[pno], sizeof(sc->sc_pbuf[pno]), NULL,
770 			    BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_WRITE);
771 			if (error == 0) {
772 				bus_dmamap_sync(sc->sc_dmat, *dmamap_in, 0,
773 				    sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
774 
775 				/*
776 				 * We will call gtidmac_dmmap_unload() when
777 				 * becoming an error.
778 				 */
779 				i = 1;
780 			}
781 		} else
782 			for (i = 0; i < ninputs; i++) {
783 				error = gtidmac_dmmap_load(sc,
784 				    *(dmamap_in + i), dreq->dreq_inbuf_type,
785 				    &dreq->dreq_inbuf[i], 0/*write*/);
786 				if (error != 0)
787 					break;
788 			}
789 		if (algdesc->dad_name != DMOVER_FUNC_ISCSI_CRC32C) {
790 			if (error == 0)
791 				error = gtidmac_dmmap_load(sc, *dmamap_out,
792 				    dreq->dreq_outbuf_type, &dreq->dreq_outbuf,
793 				    1/*read*/);
794 
795 			if (error == 0) {
796 				/*
797 				 * The size of outbuf is always believed to be
798 				 * DMA transfer size in dmover request.
799 				 */
800 				error = (*df->dma_setup)(sc, chan, ninputs,
801 				    dmamap_in, dmamap_out,
802 				    (*dmamap_out)->dm_mapsize);
803 				if (error != 0)
804 					gtidmac_dmmap_unload(sc, *dmamap_out,
805 					    1);
806 			}
807 		} else
808 			if (error == 0)
809 				error = (*df->dma_setup)(sc, chan, ninputs,
810 				    dmamap_in, dmamap_out,
811 				    (*dmamap_in)->dm_mapsize);
812 
813 		/* XXXLOCK */
814 
815 		if (error != 0) {
816 			for (; i-- > 0;)
817 				gtidmac_dmmap_unload(sc, *(dmamap_in + i), 0);
818 			(*df->chan_free)(sc, chan);
819 
820 			dreq->dreq_flags |= DMOVER_REQ_ERROR;
821 			dreq->dreq_error = error;
822 			/* XXXUNLOCK */
823 			dmover_done(dreq);
824 			/* XXXLOCK */
825 			continue;
826 		}
827 
828 		(*df->dma_start)(sc, chan, gtidmac_dmover_done);
829 		break;
830 	}
831 
832 	/* All done */
833 	sc->sc_dmb_busy = 0;
834 }
835 
836 static void
837 gtidmac_dmover_done(void *object, int chan, bus_dmamap_t *dmamap_in,
838 		    bus_dmamap_t *dmamap_out, int error)
839 {
840 	struct gtidmac_softc *sc;
841 	struct dmover_request *dreq = object;
842 	struct dmover_backend *dmb;
843 	struct gtidmac_function *df;
844 	uint32_t result;
845 	int ninputs, i;
846 
847 	KASSERT(dreq != NULL);
848 
849 	dmb = dreq->dreq_assignment->das_backend;
850 	df = dreq->dreq_assignment->das_algdesc->dad_data;
851 	ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
852 	sc = dmb->dmb_cookie;
853 
854 	result = (*df->dma_finish)(sc, chan, error);
855 	for (i = 0; i < ninputs; i++)
856 		gtidmac_dmmap_unload(sc, *(dmamap_in + i), 0);
857 	if (dreq->dreq_assignment->das_algdesc->dad_name ==
858 	    DMOVER_FUNC_ISCSI_CRC32C)
859 		memcpy(dreq->dreq_immediate, &result, sizeof(result));
860 	else
861 		gtidmac_dmmap_unload(sc, *dmamap_out, 1);
862 
863 	(*df->chan_free)(sc, chan);
864 
865 	if (error) {
866 		dreq->dreq_error = error;
867 		dreq->dreq_flags |= DMOVER_REQ_ERROR;
868 	}
869 
870 	dmover_done(dreq);
871 
872 	/*
873 	 * See if we can start some more dmover(9) requests.
874 	 *
875 	 * Note: We're already at splbio() here.
876 	 */
877 	if (!sc->sc_dmb_busy)
878 		gtidmac_dmover_run(dmb);
879 }
880 
881 static __inline int
882 gtidmac_dmmap_load(struct gtidmac_softc *sc, bus_dmamap_t dmamap,
883 		   dmover_buffer_type dmbuf_type, dmover_buffer *dmbuf,
884 		   int read)
885 {
886 	int error, flags;
887 
888 	flags = BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
889 	    read ? BUS_DMA_READ : BUS_DMA_WRITE;
890 
891 	switch (dmbuf_type) {
892 	case DMOVER_BUF_LINEAR:
893 		error = bus_dmamap_load(sc->sc_dmat, dmamap,
894 		    dmbuf->dmbuf_linear.l_addr, dmbuf->dmbuf_linear.l_len,
895 		    NULL, flags);
896 		break;
897 
898 	case DMOVER_BUF_UIO:
899 		if ((read && dmbuf->dmbuf_uio->uio_rw != UIO_READ) ||
900 		    (!read && dmbuf->dmbuf_uio->uio_rw == UIO_READ))
901 			return (EINVAL);
902 
903 		error = bus_dmamap_load_uio(sc->sc_dmat, dmamap,
904 		    dmbuf->dmbuf_uio, flags);
905 		break;
906 
907 	default:
908 		error = EINVAL;
909 	}
910 
911 	if (error == 0)
912 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
913 		    read ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
914 
915 	return error;
916 }
917 
918 static __inline void
919 gtidmac_dmmap_unload(struct gtidmac_softc *sc, bus_dmamap_t dmamap, int read)
920 {
921 
922 	bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
923 	    read ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
924 
925 	bus_dmamap_unload(sc->sc_dmat, dmamap);
926 }
927 
928 
929 void *
930 gtidmac_tag_get(void)
931 {
932 
933 	return gtidmac_softc;
934 }
935 
936 /*
937  * IDMAC functions
938  */
939 int
940 gtidmac_chan_alloc(void *tag, bus_dmamap_t **dmamap_in,
941 		   bus_dmamap_t **dmamap_out, void *object)
942 {
943 	struct gtidmac_softc *sc = tag;
944 	int chan;
945 
946 /* maybe need lock */
947 
948 	for (chan = 0; chan < sc->sc_gtidmac_nchan; chan++)
949 		if (sc->sc_cdesc[chan].chan_running == NULL)
950 			break;
951 	if (chan >= sc->sc_gtidmac_nchan)
952 		return -1;
953 
954 
955 	sc->sc_cdesc[chan].chan_running = object;
956 
957 /* unlock */
958 
959 	*dmamap_in = &sc->sc_cdesc[chan].chan_in;
960 	*dmamap_out = &sc->sc_cdesc[chan].chan_out;
961 
962 	return chan;
963 }
964 
965 void
966 gtidmac_chan_free(void *tag, int chan)
967 {
968 	struct gtidmac_softc *sc = tag;
969 
970 /* maybe need lock */
971 
972 	sc->sc_cdesc[chan].chan_running = NULL;
973 
974 /* unlock */
975 }
976 
977 /* ARGSUSED */
978 int
979 gtidmac_setup(void *tag, int chan, int ninputs, bus_dmamap_t *dmamap_in,
980 	      bus_dmamap_t *dmamap_out, bus_size_t size)
981 {
982 	struct gtidmac_softc *sc = tag;
983 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
984 	struct gtidmac_desc *desc;
985 	uint32_t ccl, bcnt, ires, ores;
986 	int n = 0, iidx, oidx;
987 
988 	KASSERT(ninputs == 0 || ninputs == 1);
989 
990 	ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
991 #ifdef DIAGNOSTIC
992 	if (ccl & GTIDMAC_CCLR_CHANACT)
993 		panic("gtidmac_setup: chan%d already active", chan);
994 #endif
995 
996 	/* We always Chain-mode and max (16M - 1)byte/desc */
997 	ccl = (GTIDMAC_CCLR_DESCMODE_16M				|
998 #ifdef GTIDMAC_DEBUG
999 	    GTIDMAC_CCLR_CDEN						|
1000 #endif
1001 	    GTIDMAC_CCLR_TRANSFERMODE_B /* Transfer Mode: Block */	|
1002 	    GTIDMAC_CCLR_INTMODE_NULL   /* Intr Mode: Next Desc NULL */	|
1003 	    GTIDMAC_CCLR_CHAINMODE_C    /* Chain Mode: Chaind */);
1004 	if (size != (*dmamap_in)->dm_mapsize) {
1005 		ccl |= GTIDMAC_CCLR_SRCHOLD;
1006 		if ((*dmamap_in)->dm_mapsize == 8)
1007 			ccl |= GTIDMAC_CCLR_SBL_8B;
1008 		else if ((*dmamap_in)->dm_mapsize == 16)
1009 			ccl |= GTIDMAC_CCLR_SBL_16B;
1010 		else if ((*dmamap_in)->dm_mapsize == 32)
1011 			ccl |= GTIDMAC_CCLR_SBL_32B;
1012 		else if ((*dmamap_in)->dm_mapsize == 64)
1013 			ccl |= GTIDMAC_CCLR_SBL_64B;
1014 		else if ((*dmamap_in)->dm_mapsize == 128)
1015 			ccl |= GTIDMAC_CCLR_SBL_128B;
1016 		else
1017 			panic("gtidmac_setup: chan%d source:"
1018 			    " unsupport hold size", chan);
1019 	} else
1020 		ccl |= GTIDMAC_CCLR_SBL_128B;
1021 	if (size != (*dmamap_out)->dm_mapsize) {
1022 		ccl |= GTIDMAC_CCLR_DESTHOLD;
1023 		if ((*dmamap_out)->dm_mapsize == 8)
1024 			ccl |= GTIDMAC_CCLR_DBL_8B;
1025 		else if ((*dmamap_out)->dm_mapsize == 16)
1026 			ccl |= GTIDMAC_CCLR_DBL_16B;
1027 		else if ((*dmamap_out)->dm_mapsize == 32)
1028 			ccl |= GTIDMAC_CCLR_DBL_32B;
1029 		else if ((*dmamap_out)->dm_mapsize == 64)
1030 			ccl |= GTIDMAC_CCLR_DBL_64B;
1031 		else if ((*dmamap_out)->dm_mapsize == 128)
1032 			ccl |= GTIDMAC_CCLR_DBL_128B;
1033 		else
1034 			panic("gtidmac_setup: chan%d destination:"
1035 			    " unsupport hold size", chan);
1036 	} else
1037 		ccl |= GTIDMAC_CCLR_DBL_128B;
1038 
1039 	fstdd = SLIST_FIRST(&sc->sc_dlist);
1040 	if (fstdd == NULL) {
1041 		aprint_error_dev(sc->sc_dev, "no descriptor\n");
1042 		return ENOMEM;
1043 	}
1044 	SLIST_REMOVE_HEAD(&sc->sc_dlist, dd_next);
1045 	sc->sc_cdesc[chan].chan_ddidx = fstdd->dd_index;
1046 
1047 	dd = fstdd;
1048 	ires = ores = 0;
1049 	iidx = oidx = 0;
1050 	while (1 /*CONSTCOND*/) {
1051 		if (ccl & GTIDMAC_CCLR_SRCHOLD) {
1052 			if (ccl & GTIDMAC_CCLR_DESTHOLD)
1053 				bcnt = size;	/* src/dst hold */
1054 			else
1055 				bcnt = (*dmamap_out)->dm_segs[oidx].ds_len;
1056 		} else if (ccl & GTIDMAC_CCLR_DESTHOLD)
1057 			bcnt = (*dmamap_in)->dm_segs[iidx].ds_len;
1058 		else
1059 			bcnt = min((*dmamap_in)->dm_segs[iidx].ds_len - ires,
1060 			    (*dmamap_out)->dm_segs[oidx].ds_len - ores);
1061 
1062 		desc = dd->dd_idmac_vaddr;
1063 		desc->bc.mode16m.bcnt =
1064 		    bcnt | GTIDMAC_CIDMABCR_BCLEFT | GTIDMAC_CIDMABCR_OWN;
1065 		desc->srcaddr = (*dmamap_in)->dm_segs[iidx].ds_addr + ires;
1066 		desc->dstaddr = (*dmamap_out)->dm_segs[oidx].ds_addr + ores;
1067 
1068 		n += bcnt;
1069 		if (n >= size)
1070 			break;
1071 		if (!(ccl & GTIDMAC_CCLR_SRCHOLD)) {
1072 			ires += bcnt;
1073 			if (ires >= (*dmamap_in)->dm_segs[iidx].ds_len) {
1074 				ires = 0;
1075 				iidx++;
1076 				KASSERT(iidx < (*dmamap_in)->dm_nsegs);
1077 			}
1078 		}
1079 		if (!(ccl & GTIDMAC_CCLR_DESTHOLD)) {
1080 			ores += bcnt;
1081 			if (ores >= (*dmamap_out)->dm_segs[oidx].ds_len) {
1082 				ores = 0;
1083 				oidx++;
1084 				KASSERT(oidx < (*dmamap_out)->dm_nsegs);
1085 			}
1086 		}
1087 
1088 		nxtdd = SLIST_FIRST(&sc->sc_dlist);
1089 		if (nxtdd == NULL) {
1090 			aprint_error_dev(sc->sc_dev, "no descriptor\n");
1091 			return ENOMEM;
1092 		}
1093 		SLIST_REMOVE_HEAD(&sc->sc_dlist, dd_next);
1094 
1095 		desc->nextdp = (uint32_t)nxtdd->dd_paddr;
1096 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1097 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
1098 #ifdef GTIDMAC_DEBUG
1099 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1100 #else
1101 		    BUS_DMASYNC_PREWRITE);
1102 #endif
1103 
1104 		SLIST_INSERT_AFTER(dd, nxtdd, dd_next);
1105 		dd = nxtdd;
1106 	}
1107 	desc->nextdp = (uint32_t)NULL;
1108 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, dd->dd_index * sizeof(*desc),
1109 #ifdef GTIDMAC_DEBUG
1110 	    sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1111 #else
1112 	    sizeof(*desc), BUS_DMASYNC_PREWRITE);
1113 #endif
1114 
1115 	/* Set paddr of descriptor to Channel Next Descriptor Pointer */
1116 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CNDPR(chan),
1117 	    fstdd->dd_paddr);
1118 
1119 #if BYTE_ORDER == LITTLE_ENDIAN
1120 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan),
1121 	    GTIDMAC_CCHR_DESCBYTESWAP | GTIDMAC_CCHR_ENDIAN_LE);
1122 #else
1123 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan),
1124 	    GTIDMAC_CCHR_DESCBYTESWAP | GTIDMAC_CCHR_ENDIAN_BE);
1125 #endif
1126 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan), ccl);
1127 
1128 #ifdef GTIDMAC_DEBUG
1129 	gtidmac_dump_idmacdesc(sc, fstdd, ccl, 0/*pre*/);
1130 #endif
1131 
1132 	sc->sc_cdesc[chan].chan_totalcnt += size;
1133 
1134 	return 0;
1135 }
1136 
1137 void
1138 gtidmac_start(void *tag, int chan,
1139 	      void (*dma_done_cb)(void *, int, bus_dmamap_t *, bus_dmamap_t *,
1140 				  int))
1141 {
1142 	struct gtidmac_softc *sc = tag;
1143 	uint32_t ccl;
1144 
1145 	DPRINTF(("%s:%d: starting\n", device_xname(sc->sc_dev), chan));
1146 
1147 #ifdef GTIDMAC_DEBUG
1148 	gtidmac_dump_idmacreg(sc, chan);
1149 #endif
1150 
1151 	sc->sc_cdesc[chan].chan_dma_done = dma_done_cb;
1152 
1153 	ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
1154 	/* Start and 'Fetch Next Descriptor' */
1155 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan),
1156 	    ccl | GTIDMAC_CCLR_CHANEN | GTIDMAC_CCLR_FETCHND);
1157 }
1158 
1159 static uint32_t
1160 gtidmac_finish(void *tag, int chan, int error)
1161 {
1162 	struct gtidmac_softc *sc = tag;
1163 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
1164 	struct gtidmac_desc *desc;
1165 
1166 	fstdd = &sc->sc_dd_buffer[sc->sc_cdesc[chan].chan_ddidx];
1167 
1168 #ifdef GTIDMAC_DEBUG
1169 	if (error || gtidmac_debug > 1) {
1170 		uint32_t ccl;
1171 
1172 		gtidmac_dump_idmacreg(sc, chan);
1173 		ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1174 		    GTIDMAC_CCLR(chan));
1175 		gtidmac_dump_idmacdesc(sc, fstdd, ccl, 1/*post*/);
1176 	}
1177 #endif
1178 
1179 	dd = fstdd;
1180 	do {
1181 		desc = dd->dd_idmac_vaddr;
1182 
1183 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1184 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
1185 #ifdef GTIDMAC_DEBUG
1186 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1187 #else
1188 		    BUS_DMASYNC_POSTWRITE);
1189 #endif
1190 
1191 		nxtdd = SLIST_NEXT(dd, dd_next);
1192 		SLIST_INSERT_HEAD(&sc->sc_dlist, dd, dd_next);
1193 		dd = nxtdd;
1194 	} while (desc->nextdp);
1195 
1196 	return 0;
1197 }
1198 
1199 /*
1200  * XORE functions
1201  */
1202 int
1203 mvxore_chan_alloc(void *tag, bus_dmamap_t **dmamap_in,
1204 		  bus_dmamap_t **dmamap_out, void *object)
1205 {
1206 	struct gtidmac_softc *sc = tag;
1207 	int chan;
1208 
1209 /* maybe need lock */
1210 
1211 	for (chan = 0; chan < sc->sc_mvxore_nchan; chan++)
1212 		if (sc->sc_cdesc_xore[chan].chan_running == NULL)
1213 			break;
1214 	if (chan >= sc->sc_mvxore_nchan)
1215 		return -1;
1216 
1217 
1218 	sc->sc_cdesc_xore[chan].chan_running = object;
1219 
1220 /* unlock */
1221 
1222 	*dmamap_in = sc->sc_cdesc_xore[chan].chan_in;
1223 	*dmamap_out = &sc->sc_cdesc_xore[chan].chan_out;
1224 
1225 	return chan;
1226 }
1227 
1228 void
1229 mvxore_chan_free(void *tag, int chan)
1230 {
1231 	struct gtidmac_softc *sc = tag;
1232 
1233 /* maybe need lock */
1234 
1235 	sc->sc_cdesc_xore[chan].chan_running = NULL;
1236 
1237 /* unlock */
1238 }
1239 
1240 /* ARGSUSED */
1241 int
1242 mvxore_setup(void *tag, int chan, int ninputs, bus_dmamap_t *dmamap_in,
1243 	     bus_dmamap_t *dmamap_out, bus_size_t size)
1244 {
1245 	struct gtidmac_softc *sc = tag;
1246 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
1247 	struct mvxore_desc *desc;
1248 	uint32_t xexc, bcnt, cmd, lastcmd;
1249 	int n = 0, i;
1250 	uint32_t ires[MVXORE_NSRC] = { 0, 0, 0, 0, 0, 0, 0, 0 }, ores = 0;
1251 	int iidx[MVXORE_NSRC] = { 0, 0, 0, 0, 0, 0, 0, 0 }, oidx = 0;
1252 
1253 #ifdef DIAGNOSTIC
1254 	uint32_t xexact;
1255 
1256 	xexact =
1257 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan));
1258 	if ((xexact & MVXORE_XEXACTR_XESTATUS_MASK) ==
1259 	    MVXORE_XEXACTR_XESTATUS_ACT)
1260 		panic("mvxore_setup: chan%d already active."
1261 		    " mvxore not support hot insertion", chan);
1262 #endif
1263 
1264 	xexc =
1265 	    (MVXORE_XEXCR_REGACCPROTECT	|
1266 	     MVXORE_XEXCR_DBL_128B	|
1267 	     MVXORE_XEXCR_SBL_128B);
1268 	cmd = lastcmd = 0;
1269 	if (ninputs > 1) {
1270 		xexc |= MVXORE_XEXCR_OM_XOR;
1271 		lastcmd = cmd = (1 << ninputs) - 1;
1272 	} else if (ninputs == 1) {
1273 		if ((*dmamap_out)->dm_nsegs == 0) {
1274 			xexc |= MVXORE_XEXCR_OM_CRC32;
1275 			lastcmd = MVXORE_DESC_CMD_CRCLAST;
1276 		} else
1277 			xexc |= MVXORE_XEXCR_OM_DMA;
1278 	} else if (ninputs == 0) {
1279 		if ((*dmamap_out)->dm_nsegs != 1) {
1280 			aprint_error_dev(sc->sc_dev,
1281 			    "XORE not supports %d DMA segments\n",
1282 			    (*dmamap_out)->dm_nsegs);
1283 			return EINVAL;
1284 		}
1285 
1286 		if ((*dmamap_in)->dm_mapsize == 0) {
1287 			xexc |= MVXORE_XEXCR_OM_ECC;
1288 
1289 			/* XXXXX: Maybe need to set Timer Mode registers? */
1290 
1291 #if 0
1292 		} else if ((*dmamap_in)->dm_mapsize == 8 ||
1293 		    (*dmamap_in)->dm_mapsize == 16) { /* in case dmover */
1294 			uint64_t pattern;
1295 
1296 			/* XXXX: Get pattern data */
1297 
1298 			KASSERT((*dmamap_in)->dm_mapsize == 8 ||
1299 			    (void *)((uint32_t)(*dmamap_in)->_dm_origbuf &
1300 						~PAGE_MASK) == sc->sc_pbuf);
1301 			pattern = *(uint64_t *)(*dmamap_in)->_dm_origbuf;
1302 
1303 			/* XXXXX: XORE has a IVR.  We should get this first. */
1304 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEIVRL,
1305 			    pattern);
1306 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEIVRH,
1307 			    pattern >> 32);
1308 
1309 			xexc |= MVXORE_XEXCR_OM_MEMINIT;
1310 #endif
1311 		} else {
1312 			aprint_error_dev(sc->sc_dev,
1313 			    "XORE not supports DMA mapsize %zd\n",
1314 			    (*dmamap_in)->dm_mapsize);
1315 			return EINVAL;
1316 		}
1317 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1318 		    MVXORE_XEXDPR(sc, chan), (*dmamap_out)->dm_segs[0].ds_addr);
1319 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1320 		    MVXORE_XEXBSR(sc, chan), (*dmamap_out)->dm_mapsize);
1321 
1322 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1323 		    MVXORE_XEXCR(sc, chan), xexc);
1324 		sc->sc_cdesc_xore[chan].chan_totalcnt += size;
1325 
1326 		return 0;
1327 	}
1328 
1329 	/* Make descriptor for DMA/CRC32/XOR */
1330 
1331 	fstdd = SLIST_FIRST(&sc->sc_dlist_xore);
1332 	if (fstdd == NULL) {
1333 		aprint_error_dev(sc->sc_dev, "no xore descriptor\n");
1334 		return ENOMEM;
1335 	}
1336 	SLIST_REMOVE_HEAD(&sc->sc_dlist_xore, dd_next);
1337 	sc->sc_cdesc_xore[chan].chan_ddidx =
1338 	    fstdd->dd_index + GTIDMAC_NDESC * sc->sc_gtidmac_nchan;
1339 
1340 	dd = fstdd;
1341 	while (1 /*CONSTCOND*/) {
1342 		desc = dd->dd_xore_vaddr;
1343 		desc->stat = MVXORE_DESC_STAT_OWN;
1344 		desc->cmd = cmd;
1345 		if ((*dmamap_out)->dm_nsegs != 0) {
1346 			desc->dstaddr =
1347 			    (*dmamap_out)->dm_segs[oidx].ds_addr + ores;
1348 			bcnt = (*dmamap_out)->dm_segs[oidx].ds_len - ores;
1349 		} else {
1350 			desc->dstaddr = 0;
1351 			bcnt = MVXORE_MAXXFER;	/* XXXXX */
1352 		}
1353 		for (i = 0; i < ninputs; i++) {
1354 			desc->srcaddr[i] =
1355 			    (*dmamap_in[i]).dm_segs[iidx[i]].ds_addr + ires[i];
1356 			bcnt = min(bcnt,
1357 			    (*dmamap_in[i]).dm_segs[iidx[i]].ds_len - ires[i]);
1358 		}
1359 		desc->bcnt = bcnt;
1360 
1361 		n += bcnt;
1362 		if (n >= size)
1363 			break;
1364 		ores += bcnt;
1365 		if ((*dmamap_out)->dm_nsegs != 0 &&
1366 		    ores >= (*dmamap_out)->dm_segs[oidx].ds_len) {
1367 			ores = 0;
1368 			oidx++;
1369 			KASSERT(oidx < (*dmamap_out)->dm_nsegs);
1370 		}
1371 		for (i = 0; i < ninputs; i++) {
1372 			ires[i] += bcnt;
1373 			if (ires[i] >=
1374 			    (*dmamap_in[i]).dm_segs[iidx[i]].ds_len) {
1375 				ires[i] = 0;
1376 				iidx[i]++;
1377 				KASSERT(iidx[i] < (*dmamap_in[i]).dm_nsegs);
1378 			}
1379 		}
1380 
1381 		nxtdd = SLIST_FIRST(&sc->sc_dlist_xore);
1382 		if (nxtdd == NULL) {
1383 			aprint_error_dev(sc->sc_dev, "no xore descriptor\n");
1384 			return ENOMEM;
1385 		}
1386 		SLIST_REMOVE_HEAD(&sc->sc_dlist_xore, dd_next);
1387 
1388 		desc->nextda = (uint32_t)nxtdd->dd_paddr;
1389 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
1390 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
1391 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1392 
1393 		SLIST_INSERT_AFTER(dd, nxtdd, dd_next);
1394 		dd = nxtdd;
1395 	}
1396 	desc->cmd = lastcmd;
1397 	desc->nextda = (uint32_t)NULL;
1398 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
1399 	    dd->dd_index * sizeof(*desc), sizeof(*desc),
1400 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1401 
1402 	/* Set paddr of descriptor to Channel Next Descriptor Pointer */
1403 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXNDPR(sc, chan),
1404 	    fstdd->dd_paddr);
1405 
1406 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(sc, chan), xexc);
1407 
1408 #ifdef GTIDMAC_DEBUG
1409 	gtidmac_dump_xoredesc(sc, fstdd, xexc, 0/*pre*/);
1410 #endif
1411 
1412 	sc->sc_cdesc_xore[chan].chan_totalcnt += size;
1413 
1414 	return 0;
1415 }
1416 
1417 void
1418 mvxore_start(void *tag, int chan,
1419 	     void (*dma_done_cb)(void *, int, bus_dmamap_t *, bus_dmamap_t *,
1420 				 int))
1421 {
1422 	struct gtidmac_softc *sc = tag;
1423 	uint32_t xexact;
1424 
1425 	DPRINTF(("%s:%d: xore starting\n", device_xname(sc->sc_dev), chan));
1426 
1427 #ifdef GTIDMAC_DEBUG
1428 	gtidmac_dump_xorereg(sc, chan);
1429 #endif
1430 
1431 	sc->sc_cdesc_xore[chan].chan_dma_done = dma_done_cb;
1432 
1433 	xexact =
1434 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan));
1435 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan),
1436 	    xexact | MVXORE_XEXACTR_XESTART);
1437 }
1438 
1439 static uint32_t
1440 mvxore_finish(void *tag, int chan, int error)
1441 {
1442 	struct gtidmac_softc *sc = tag;
1443 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
1444 	struct mvxore_desc *desc;
1445 	uint32_t xexc;
1446 
1447 #ifdef GTIDMAC_DEBUG
1448 	if (error || gtidmac_debug > 1)
1449 		gtidmac_dump_xorereg(sc, chan);
1450 #endif
1451 
1452 	xexc = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(sc, chan));
1453 	if ((xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_ECC ||
1454 	    (xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_MEMINIT)
1455 		return 0;
1456 
1457 	fstdd = &sc->sc_dd_buffer[sc->sc_cdesc_xore[chan].chan_ddidx];
1458 
1459 #ifdef GTIDMAC_DEBUG
1460 	if (error || gtidmac_debug > 1)
1461 		gtidmac_dump_xoredesc(sc, fstdd, xexc, 1/*post*/);
1462 #endif
1463 
1464 	dd = fstdd;
1465 	do {
1466 		desc = dd->dd_xore_vaddr;
1467 
1468 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
1469 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
1470 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1471 
1472 		nxtdd = SLIST_NEXT(dd, dd_next);
1473 		SLIST_INSERT_HEAD(&sc->sc_dlist_xore, dd, dd_next);
1474 		dd = nxtdd;
1475 	} while (desc->nextda);
1476 
1477 	if ((xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_CRC32)
1478 		return desc->result;
1479 	return 0;
1480 }
1481 
1482 static void
1483 gtidmac_wininit(struct gtidmac_softc *sc, enum marvell_tags *tags)
1484 {
1485 	device_t pdev = device_parent(sc->sc_dev);
1486 	uint64_t base;
1487 	uint32_t size, cxap, en, winacc;
1488 	int window, target, attr, rv, i, j;
1489 
1490 	en = 0xff;
1491 	cxap = 0;
1492 	for (window = 0, i = 0;
1493 	    tags[i] != MARVELL_TAG_UNDEFINED && window < GTIDMAC_NWINDOW; i++) {
1494 		rv = marvell_winparams_by_tag(pdev, tags[i],
1495 		    &target, &attr, &base, &size);
1496 		if (rv != 0 || size == 0)
1497 			continue;
1498 
1499 		if (base > 0xffffffffULL) {
1500 			if (window >= GTIDMAC_NREMAP) {
1501 				aprint_error_dev(sc->sc_dev,
1502 				    "can't remap window %d\n", window);
1503 				continue;
1504 			}
1505 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1506 			    GTIDMAC_HARXR(window), (base >> 32) & 0xffffffff);
1507 		}
1508 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_BARX(window),
1509 		    GTIDMAC_BARX_TARGET(target)	|
1510 		    GTIDMAC_BARX_ATTR(attr)	|
1511 		    GTIDMAC_BARX_BASE(base));
1512 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_SRX(window),
1513 		    GTIDMAC_SRX_SIZE(size));
1514 		en &= ~GTIDMAC_BAER_EN(window);
1515 
1516 		winacc = GTIDMAC_CXAPR_WINACC_FA;
1517 		if (gtidmac_winacctbl != NULL)
1518 			for (j = 0;
1519 			    gtidmac_winacctbl[j].tag != MARVELL_TAG_UNDEFINED;
1520 			    j++) {
1521 				if (gtidmac_winacctbl[j].tag != tags[i])
1522 					continue;
1523 
1524 				switch (gtidmac_winacctbl[j].winacc) {
1525 				case GTIDMAC_WINACC_NOACCESSALLOWED:
1526 					winacc = GTIDMAC_CXAPR_WINACC_NOAA;
1527 					break;
1528 				case GTIDMAC_WINACC_READONLY:
1529 					winacc = GTIDMAC_CXAPR_WINACC_RO;
1530 					break;
1531 				case GTIDMAC_WINACC_FULLACCESS:
1532 				default: /* XXXX: default is full access */
1533 					break;
1534 				}
1535 				break;
1536 			}
1537 		cxap |= GTIDMAC_CXAPR_WINACC(window, winacc);
1538 
1539 		window++;
1540 	}
1541 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_BAER, en);
1542 
1543 	for (i = 0; i < GTIDMAC_NACCPROT; i++)
1544 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CXAPR(i),
1545 		    cxap);
1546 }
1547 
1548 static void
1549 mvxore_wininit(struct gtidmac_softc *sc, enum marvell_tags *tags)
1550 {
1551 	device_t pdev = device_parent(sc->sc_dev);
1552 	uint64_t base;
1553 	uint32_t target, attr, size, xexwc, winacc;
1554 	int window, rv, i, j, p;
1555 
1556 	xexwc = 0;
1557 	for (window = 0, i = 0;
1558 	    tags[i] != MARVELL_TAG_UNDEFINED && window < MVXORE_NWINDOW; i++) {
1559 		rv = marvell_winparams_by_tag(pdev, tags[i],
1560 		    &target, &attr, &base, &size);
1561 		if (rv != 0 || size == 0)
1562 			continue;
1563 
1564 		if (base > 0xffffffffULL) {
1565 			if (window >= MVXORE_NREMAP) {
1566 				aprint_error_dev(sc->sc_dev,
1567 				    "can't remap window %d\n", window);
1568 				continue;
1569 			}
1570 			for (p = 0; p < sc->sc_mvxore_nchan >> 1; p++)
1571 				bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1572 				    MVXORE_XEHARRX(sc, p, window),
1573 				    (base >> 32) & 0xffffffff);
1574 		}
1575 
1576 		for (p = 0; p < sc->sc_mvxore_nchan >> 1; p++) {
1577 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1578 			    MVXORE_XEBARX(sc, p, window),
1579 			    MVXORE_XEBARX_TARGET(target) |
1580 			    MVXORE_XEBARX_ATTR(attr) |
1581 			    MVXORE_XEBARX_BASE(base));
1582 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1583 			    MVXORE_XESMRX(sc, p, window),
1584 			    MVXORE_XESMRX_SIZE(size));
1585 		}
1586 
1587 		winacc = MVXORE_XEXWCR_WINACC_FA;
1588 		if (mvxore_winacctbl != NULL)
1589 			for (j = 0;
1590 			    mvxore_winacctbl[j].tag != MARVELL_TAG_UNDEFINED;
1591 			    j++) {
1592 				if (gtidmac_winacctbl[j].tag != tags[i])
1593 					continue;
1594 
1595 				switch (gtidmac_winacctbl[j].winacc) {
1596 				case GTIDMAC_WINACC_NOACCESSALLOWED:
1597 					winacc = MVXORE_XEXWCR_WINACC_NOAA;
1598 					break;
1599 				case GTIDMAC_WINACC_READONLY:
1600 					winacc = MVXORE_XEXWCR_WINACC_RO;
1601 					break;
1602 				case GTIDMAC_WINACC_FULLACCESS:
1603 				default: /* XXXX: default is full access */
1604 					break;
1605 				}
1606 				break;
1607 			}
1608 		xexwc |= (MVXORE_XEXWCR_WINEN(window) |
1609 		    MVXORE_XEXWCR_WINACC(window, winacc));
1610 		window++;
1611 	}
1612 
1613 	for (i = 0; i < sc->sc_mvxore_nchan; i++) {
1614 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXWCR(sc, i),
1615 		    xexwc);
1616 
1617 		/* XXXXX: reset... */
1618 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXAOCR(sc, 0),
1619 		    0);
1620 	}
1621 }
1622 
1623 static int
1624 gtidmac_buffer_setup(struct gtidmac_softc *sc)
1625 {
1626 	bus_dma_segment_t segs;
1627 	struct gtidmac_dma_desc *dd;
1628 	uint32_t mask;
1629 	int nchan, nsegs, i;
1630 
1631 	nchan = sc->sc_gtidmac_nchan;
1632 
1633 	if (bus_dmamem_alloc(sc->sc_dmat,
1634 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan,
1635 	    PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
1636 		aprint_error_dev(sc->sc_dev,
1637 		    "bus_dmamem_alloc failed: descriptor buffer\n");
1638 		goto fail0;
1639 	}
1640 	if (bus_dmamem_map(sc->sc_dmat, &segs, 1,
1641 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan,
1642 	    (void **)&sc->sc_dbuf, BUS_DMA_NOWAIT)) {
1643 		aprint_error_dev(sc->sc_dev,
1644 		    "bus_dmamem_map failed: descriptor buffer\n");
1645 		goto fail1;
1646 	}
1647 	if (bus_dmamap_create(sc->sc_dmat,
1648 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan, 1,
1649 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan, 0,
1650 	    BUS_DMA_NOWAIT, &sc->sc_dmap)) {
1651 		aprint_error_dev(sc->sc_dev,
1652 		    "bus_dmamap_create failed: descriptor buffer\n");
1653 		goto fail2;
1654 	}
1655 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, sc->sc_dbuf,
1656 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan,
1657 	    NULL, BUS_DMA_NOWAIT)) {
1658 		aprint_error_dev(sc->sc_dev,
1659 		    "bus_dmamap_load failed: descriptor buffer\n");
1660 		goto fail3;
1661 	}
1662 	SLIST_INIT(&sc->sc_dlist);
1663 	for (i = 0; i < GTIDMAC_NDESC * nchan; i++) {
1664 		dd = &sc->sc_dd_buffer[i];
1665 		dd->dd_index = i;
1666 		dd->dd_idmac_vaddr = &sc->sc_dbuf[i];
1667 		dd->dd_paddr = sc->sc_dmap->dm_segs[0].ds_addr +
1668 		    (sizeof(struct gtidmac_desc) * i);
1669 			SLIST_INSERT_HEAD(&sc->sc_dlist, dd, dd_next);
1670 	}
1671 
1672 	/* Initialize IDMAC DMA channels */
1673 	mask = 0;
1674 	for (i = 0; i < nchan; i++) {
1675 		if (i > 0 && ((i * GTIDMAC_I_BITS) & 31 /*bit*/) == 0) {
1676 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1677 			    GTIDMAC_IMR(i - 1), mask);
1678 			mask = 0;
1679 		}
1680 
1681 		if (bus_dmamap_create(sc->sc_dmat, GTIDMAC_MAXXFER,
1682 		    GTIDMAC_NSEGS, GTIDMAC_MAXXFER, 0, BUS_DMA_NOWAIT,
1683 		    &sc->sc_cdesc[i].chan_in)) {
1684 			aprint_error_dev(sc->sc_dev,
1685 			    "bus_dmamap_create failed: chan%d in\n", i);
1686 			goto fail4;
1687 		}
1688 		if (bus_dmamap_create(sc->sc_dmat, GTIDMAC_MAXXFER,
1689 		    GTIDMAC_NSEGS, GTIDMAC_MAXXFER, 0, BUS_DMA_NOWAIT,
1690 		    &sc->sc_cdesc[i].chan_out)) {
1691 			aprint_error_dev(sc->sc_dev,
1692 			    "bus_dmamap_create failed: chan%d out\n", i);
1693 			bus_dmamap_destroy(sc->sc_dmat,
1694 			    sc->sc_cdesc[i].chan_in);
1695 			goto fail4;
1696 		}
1697 		sc->sc_cdesc[i].chan_totalcnt = 0;
1698 		sc->sc_cdesc[i].chan_running = NULL;
1699 
1700 		/* Ignore bits overflow.  The mask is 32bit. */
1701 		mask |= GTIDMAC_I(i,
1702 		    GTIDMAC_I_COMP	|
1703 		    GTIDMAC_I_ADDRMISS	|
1704 		    GTIDMAC_I_ACCPROT	|
1705 		    GTIDMAC_I_WRPROT	|
1706 		    GTIDMAC_I_OWN);
1707 
1708 		/* 8bits/channel * 4channels => 32bit */
1709 		if ((i & 0x3) == 0x3) {
1710 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1711 			    GTIDMAC_IMR(i), mask);
1712 			mask = 0;
1713 		}
1714 	}
1715 
1716 	return 0;
1717 
1718 fail4:
1719 	for (; i-- > 0;) {
1720 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_in);
1721 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_out);
1722 	}
1723 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1724 fail3:
1725 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap);
1726 fail2:
1727 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf,
1728 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC);
1729 fail1:
1730 	bus_dmamem_free(sc->sc_dmat, &segs, 1);
1731 fail0:
1732 	return -1;
1733 }
1734 
1735 static int
1736 mvxore_buffer_setup(struct gtidmac_softc *sc)
1737 {
1738 	bus_dma_segment_t segs;
1739 	struct gtidmac_dma_desc *dd;
1740 	uint32_t mask;
1741 	int nchan, nsegs, i, j;
1742 
1743 	nchan = sc->sc_mvxore_nchan;
1744 
1745 	if (bus_dmamem_alloc(sc->sc_dmat,
1746 	    sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan,
1747 	    PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
1748 		aprint_error_dev(sc->sc_dev,
1749 		    "bus_dmamem_alloc failed: xore descriptor buffer\n");
1750 		goto fail0;
1751 	}
1752 	if (bus_dmamem_map(sc->sc_dmat, &segs, 1,
1753 	    sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan,
1754 	    (void **)&sc->sc_dbuf_xore, BUS_DMA_NOWAIT)) {
1755 		aprint_error_dev(sc->sc_dev,
1756 		    "bus_dmamem_map failed: xore descriptor buffer\n");
1757 		goto fail1;
1758 	}
1759 	if (bus_dmamap_create(sc->sc_dmat,
1760 	    sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan, 1,
1761 	    sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan, 0,
1762 	    BUS_DMA_NOWAIT, &sc->sc_dmap_xore)) {
1763 		aprint_error_dev(sc->sc_dev,
1764 		    "bus_dmamap_create failed: xore descriptor buffer\n");
1765 		goto fail2;
1766 	}
1767 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmap_xore, sc->sc_dbuf_xore,
1768 	    sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan,
1769 	    NULL, BUS_DMA_NOWAIT)) {
1770 		aprint_error_dev(sc->sc_dev,
1771 		    "bus_dmamap_load failed: xore descriptor buffer\n");
1772 		goto fail3;
1773 	}
1774 	SLIST_INIT(&sc->sc_dlist_xore);
1775 	for (i = 0; i < MVXORE_NDESC * nchan; i++) {
1776 		dd =
1777 		    &sc->sc_dd_buffer[i + GTIDMAC_NDESC * sc->sc_gtidmac_nchan];
1778 		dd->dd_index = i;
1779 		dd->dd_xore_vaddr = &sc->sc_dbuf_xore[i];
1780 		dd->dd_paddr = sc->sc_dmap_xore->dm_segs[0].ds_addr +
1781 		    (sizeof(struct mvxore_desc) * i);
1782 		SLIST_INSERT_HEAD(&sc->sc_dlist_xore, dd, dd_next);
1783 	}
1784 
1785 	/* Initialize XORE DMA channels */
1786 	mask = 0;
1787 	for (i = 0; i < nchan; i++) {
1788 		for (j = 0; j < MVXORE_NSRC; j++) {
1789 			if (bus_dmamap_create(sc->sc_dmat,
1790 			    MVXORE_MAXXFER, MVXORE_NSEGS,
1791 			    MVXORE_MAXXFER, 0, BUS_DMA_NOWAIT,
1792 			    &sc->sc_cdesc_xore[i].chan_in[j])) {
1793 				aprint_error_dev(sc->sc_dev,
1794 				    "bus_dmamap_create failed:"
1795 				    " xore chan%d in[%d]\n", i, j);
1796 				goto fail4;
1797 			}
1798 		}
1799 		if (bus_dmamap_create(sc->sc_dmat, MVXORE_MAXXFER,
1800 		    MVXORE_NSEGS, MVXORE_MAXXFER, 0,
1801 		    BUS_DMA_NOWAIT, &sc->sc_cdesc_xore[i].chan_out)) {
1802 			aprint_error_dev(sc->sc_dev,
1803 			    "bus_dmamap_create failed: chan%d out\n", i);
1804 			goto fail5;
1805 		}
1806 		sc->sc_cdesc_xore[i].chan_totalcnt = 0;
1807 		sc->sc_cdesc_xore[i].chan_running = NULL;
1808 
1809 		mask |= MVXORE_I(i,
1810 		    MVXORE_I_EOC	|
1811 		    MVXORE_I_ADDRDECODE	|
1812 		    MVXORE_I_ACCPROT	|
1813 		    MVXORE_I_WRPROT	|
1814 		    MVXORE_I_OWN	|
1815 		    MVXORE_I_INTPARITY	|
1816 		    MVXORE_I_XBAR);
1817 
1818 		/* 16bits/channel * 2channels => 32bit */
1819 		if (i & 0x1) {
1820 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1821 			    MVXORE_XEIMR(sc, i >> 1), mask);
1822 			mask = 0;
1823 		}
1824 	}
1825 
1826 	return 0;
1827 
1828 	for (; i-- > 0;) {
1829 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc_xore[i].chan_out);
1830 
1831 fail5:
1832 		j = MVXORE_NSRC;
1833 fail4:
1834 		for (; j-- > 0;)
1835 			bus_dmamap_destroy(sc->sc_dmat,
1836 			    sc->sc_cdesc_xore[i].chan_in[j]);
1837 	}
1838 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap_xore);
1839 fail3:
1840 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap_xore);
1841 fail2:
1842 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf_xore,
1843 	    sizeof(struct mvxore_desc) * MVXORE_NDESC);
1844 fail1:
1845 	bus_dmamem_free(sc->sc_dmat, &segs, 1);
1846 fail0:
1847 	return -1;
1848 }
1849 
1850 #ifdef GTIDMAC_DEBUG
1851 static void
1852 gtidmac_dump_idmacreg(struct gtidmac_softc *sc, int chan)
1853 {
1854 	uint32_t val;
1855 	char buf[256];
1856 
1857 	printf("IDMAC Registers\n");
1858 
1859 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMABCR(chan));
1860 	snprintb(buf, sizeof(buf), "\177\020b\037Own\0b\036BCLeft\0", val);
1861 	printf("  Byte Count                 : %s\n", buf);
1862 	printf("    ByteCnt                  :   0x%06x\n",
1863 	    val & GTIDMAC_CIDMABCR_BYTECNT_MASK);
1864 	printf("  Source Address             : 0x%08x\n",
1865 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMASAR(chan)));
1866 	printf("  Destination Address        : 0x%08x\n",
1867 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMADAR(chan)));
1868 	printf("  Next Descriptor Pointer    : 0x%08x\n",
1869 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CNDPR(chan)));
1870 	printf("  Current Descriptor Pointer : 0x%08x\n",
1871 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCDPR(chan)));
1872 
1873 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
1874 	snprintb(buf, sizeof(buf),
1875 	    "\177\020b\024Abr\0b\021CDEn\0b\016ChanAct\0b\015FetchND\0"
1876 	    "b\014ChanEn\0b\012IntMode\0b\005DestHold\0b\003SrcHold\0",
1877 	    val);
1878 	printf("  Channel Control (Low)      : %s\n", buf);
1879 	printf("    SrcBurstLimit            : %s Bytes\n",
1880 	  (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_128B ? "128" :
1881 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_64B ? "64" :
1882 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_32B ? "32" :
1883 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_16B ? "16" :
1884 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_8B ? "8" :
1885 	    "unknwon");
1886 	printf("    DstBurstLimit            : %s Bytes\n",
1887 	  (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_128B ? "128" :
1888 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_64B ? "64" :
1889 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_32B ? "32" :
1890 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_16B ? "16" :
1891 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_8B ? "8" :
1892 	    "unknwon");
1893 	printf("    ChainMode                : %sChained\n",
1894 	    val & GTIDMAC_CCLR_CHAINMODE_NC ? "Non-" : "");
1895 	printf("    TransferMode             : %s\n",
1896 	    val & GTIDMAC_CCLR_TRANSFERMODE_B ? "Block" : "Demand");
1897 	printf("    DescMode                 : %s\n",
1898 	    val & GTIDMAC_CCLR_DESCMODE_16M ? "16M" : "64k");
1899 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan));
1900 	snprintb(buf, sizeof(buf),
1901 	    "\177\020b\001DescByteSwap\0b\000Endianness\0", val);
1902 	printf("  Channel Control (High)     : %s\n", buf);
1903 }
1904 
1905 static void
1906 gtidmac_dump_idmacdesc(struct gtidmac_softc *sc, struct gtidmac_dma_desc *dd,
1907 		       uint32_t mode, int post)
1908 {
1909 	struct gtidmac_desc *desc;
1910 	int i;
1911 	char buf[256];
1912 
1913 	printf("IDMAC Descriptor\n");
1914 
1915 	i = 0;
1916 	while (1 /*CONSTCOND*/) {
1917 		if (post)
1918 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1919 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
1920 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1921 
1922 		desc = dd->dd_idmac_vaddr;
1923 
1924 		printf("%d (0x%lx)\n", i, dd->dd_paddr);
1925 		if (mode & GTIDMAC_CCLR_DESCMODE_16M) {
1926 			snprintb(buf, sizeof(buf),
1927 			    "\177\020b\037Own\0b\036BCLeft\0",
1928 			    desc->bc.mode16m.bcnt);
1929 			printf("  Byte Count              : %s\n", buf);
1930 			printf("    ByteCount             :   0x%06x\n",
1931 			    desc->bc.mode16m.bcnt &
1932 			    GTIDMAC_CIDMABCR_BYTECNT_MASK);
1933 		} else {
1934 			printf("  Byte Count              :     0x%04x\n",
1935 			    desc->bc.mode64k.bcnt);
1936 			printf("  Remind Byte Count       :     0x%04x\n",
1937 			    desc->bc.mode64k.rbc);
1938 		}
1939 		printf("  Source Address          : 0x%08x\n", desc->srcaddr);
1940 		printf("  Destination Address     : 0x%08x\n", desc->dstaddr);
1941 		printf("  Next Descriptor Pointer : 0x%08x\n", desc->nextdp);
1942 
1943 		if (desc->nextdp == (uint32_t)NULL)
1944 			break;
1945 
1946 		if (!post)
1947 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1948 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
1949 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1950 
1951 		i++;
1952 		dd = SLIST_NEXT(dd, dd_next);
1953 	}
1954 	if (!post)
1955 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1956 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
1957 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1958 }
1959 
1960 static void
1961 gtidmac_dump_xorereg(struct gtidmac_softc *sc, int chan)
1962 {
1963 	uint32_t val, opmode;
1964 	char buf[64];
1965 
1966 	printf("XORE Registers\n");
1967 
1968 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(sc, chan));
1969 	snprintb(buf, sizeof(buf),
1970 	    "\177\020"
1971 	    "b\017RegAccProtect\0b\016DesSwp\0b\015DwrReqSwp\0b\014DrdResSwp\0",
1972 	    val);
1973 	printf(" Configuration    : 0x%s\n", buf);
1974 	opmode = val & MVXORE_XEXCR_OM_MASK;
1975 	printf("    OperationMode : %s operation\n",
1976 	  opmode == MVXORE_XEXCR_OM_XOR ? "XOR calculate" :
1977 	  opmode == MVXORE_XEXCR_OM_CRC32 ? "CRC-32 calculate" :
1978 	  opmode == MVXORE_XEXCR_OM_DMA ? "DMA" :
1979 	  opmode == MVXORE_XEXCR_OM_ECC ? "ECC cleanup" :
1980 	  opmode == MVXORE_XEXCR_OM_MEMINIT ? "Memory Initialization" :
1981 	  "unknown");
1982 	printf("    SrcBurstLimit : %s Bytes\n",
1983 	  (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_128B ? "128" :
1984 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_64B ? "64" :
1985 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_32B ? "32" :
1986 	    "unknwon");
1987 	printf("    DstBurstLimit : %s Bytes\n",
1988 	  (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_128B ? "128" :
1989 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_64B ? "64" :
1990 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_32B ? "32" :
1991 	    "unknwon");
1992 	val =
1993 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan));
1994 	printf("  Activation      : 0x%08x\n", val);
1995 	val &= MVXORE_XEXACTR_XESTATUS_MASK;
1996 	printf("    XEstatus      : %s\n",
1997 	    val == MVXORE_XEXACTR_XESTATUS_NA ? "Channel not active" :
1998 	    val == MVXORE_XEXACTR_XESTATUS_ACT ? "Channel active" :
1999 	    val == MVXORE_XEXACTR_XESTATUS_P ? "Channel paused" : "???");
2000 
2001 	if (opmode == MVXORE_XEXCR_OM_XOR ||
2002 	    opmode == MVXORE_XEXCR_OM_CRC32 ||
2003 	    opmode == MVXORE_XEXCR_OM_DMA) {
2004 		printf("  NextDescPtr     : 0x%08x\n",
2005 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2006 		    MVXORE_XEXNDPR(sc, chan)));
2007 		printf("  CurrentDescPtr  : 0x%08x\n",
2008 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2009 		    MVXORE_XEXCDPR(chan)));
2010 	}
2011 	printf("  ByteCnt         : 0x%08x\n",
2012 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXBCR(chan)));
2013 
2014 	if (opmode == MVXORE_XEXCR_OM_ECC ||
2015 	    opmode == MVXORE_XEXCR_OM_MEMINIT) {
2016 		printf("  DstPtr          : 0x%08x\n",
2017 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2018 		    MVXORE_XEXDPR(sc, chan)));
2019 		printf("  BlockSize       : 0x%08x\n",
2020 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2021 		    MVXORE_XEXBSR(sc, chan)));
2022 
2023 		if (opmode == MVXORE_XEXCR_OM_ECC) {
2024 			val = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2025 			    MVXORE_XETMCR);
2026 			if (val & MVXORE_XETMCR_TIMEREN) {
2027 				val >>= MVXORE_XETMCR_SECTIONSIZECTRL_SHIFT;
2028 				val &= MVXORE_XETMCR_SECTIONSIZECTRL_MASK;
2029 				printf("  SectionSizeCtrl : 0x%08x\n", 2 ^ val);
2030 				printf("  TimerInitVal    : 0x%08x\n",
2031 				    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2032 				    MVXORE_XETMIVR));
2033 				printf("  TimerCrntVal    : 0x%08x\n",
2034 				    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2035 				    MVXORE_XETMCVR));
2036 			}
2037 		} else	/* MVXORE_XEXCR_OM_MEMINIT */
2038 			printf("  InitVal         : 0x%08x%08x\n",
2039 			    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2040 			    MVXORE_XEIVRH),
2041 			    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2042 			    MVXORE_XEIVRL));
2043 	}
2044 }
2045 
2046 static void
2047 gtidmac_dump_xoredesc(struct gtidmac_softc *sc, struct gtidmac_dma_desc *dd,
2048 		      uint32_t mode, int post)
2049 {
2050 	struct mvxore_desc *desc;
2051 	int i, j;
2052 	char buf[256];
2053 
2054 	printf("XORE Descriptor\n");
2055 
2056 	mode &= MVXORE_XEXCR_OM_MASK;
2057 
2058 	i = 0;
2059 	while (1 /*CONSTCOND*/) {
2060 		if (post)
2061 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
2062 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
2063 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2064 
2065 		desc = dd->dd_xore_vaddr;
2066 
2067 		printf("%d (0x%lx)\n", i, dd->dd_paddr);
2068 
2069 		snprintb(buf, sizeof(buf), "\177\020b\037Own\0b\036Success\0",
2070 		    desc->stat);
2071 		printf("  Status                  : 0x%s\n", buf);
2072 		if (desc->cmd & MVXORE_DESC_CMD_CRCLAST && post)
2073 			printf("  CRC-32 Result           : 0x%08x\n",
2074 			    desc->result);
2075 		snprintb(buf, sizeof(buf),
2076 		    "\177\020b\037EODIntEn\0b\036CRCLast\0"
2077 		    "b\007Src7Cmd\0b\006Src6Cmd\0b\005Src5Cmd\0b\004Src4Cmd\0"
2078 		    "b\003Src3Cmd\0b\002Src2Cmd\0b\001Src1Cmd\0b\000Src0Cmd\0",
2079 		    desc->cmd);
2080 		printf("  Command                 : 0x%s\n", buf);
2081 		printf("  Next Descriptor Address : 0x%08x\n", desc->nextda);
2082 		printf("  Byte Count              :   0x%06x\n", desc->bcnt);
2083 		printf("  Destination Address     : 0x%08x\n", desc->dstaddr);
2084 		if (mode == MVXORE_XEXCR_OM_XOR) {
2085 			for (j = 0; j < MVXORE_NSRC; j++)
2086 				if (desc->cmd & MVXORE_DESC_CMD_SRCCMD(j))
2087 					printf("  Source Address#%d        :"
2088 					    " 0x%08x\n", j, desc->srcaddr[j]);
2089 		} else
2090 			printf("  Source Address          : 0x%08x\n",
2091 			    desc->srcaddr[0]);
2092 
2093 		if (desc->nextda == (uint32_t)NULL)
2094 			break;
2095 
2096 		if (!post)
2097 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
2098 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
2099 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2100 
2101 		i++;
2102 		dd = SLIST_NEXT(dd, dd_next);
2103 	}
2104 	if (!post)
2105 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
2106 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
2107 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2108 }
2109 #endif
2110