xref: /netbsd-src/sys/dev/marvell/gtidmac.c (revision 6a493d6bc668897c91594964a732d38505b70cbb)
1 /*	$NetBSD: gtidmac.c,v 1.10 2013/09/28 05:39:06 kiyohara Exp $	*/
2 /*
3  * Copyright (c) 2008, 2012 KIYOHARA Takashi
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: gtidmac.c,v 1.10 2013/09/28 05:39:06 kiyohara Exp $");
30 
31 #include <sys/param.h>
32 #include <sys/bus.h>
33 #include <sys/device.h>
34 #include <sys/errno.h>
35 #include <sys/endian.h>
36 #include <sys/kmem.h>
37 
38 #include <uvm/uvm_param.h>	/* For PAGE_SIZE */
39 
40 #include <dev/dmover/dmovervar.h>
41 
42 #include <dev/marvell/gtidmacreg.h>
43 #include <dev/marvell/gtidmacvar.h>
44 #include <dev/marvell/marvellreg.h>
45 #include <dev/marvell/marvellvar.h>
46 
47 #include <prop/proplib.h>
48 
49 #include "locators.h"
50 
51 #ifdef GTIDMAC_DEBUG
52 #define DPRINTF(x)	if (gtidmac_debug) printf x
53 int gtidmac_debug = 0;
54 #else
55 #define DPRINTF(x)
56 #endif
57 
58 #define GTIDMAC_NDESC		64
59 #define GTIDMAC_MAXCHAN		8
60 #define MVXORE_NDESC		128
61 #define MVXORE_MAXCHAN		2
62 
63 #define GTIDMAC_NSEGS		((GTIDMAC_MAXXFER + PAGE_SIZE - 1) / PAGE_SIZE)
64 #define MVXORE_NSEGS		((MVXORE_MAXXFER + PAGE_SIZE - 1) / PAGE_SIZE)
65 
66 
67 struct gtidmac_softc;
68 
69 struct gtidmac_function {
70 	int (*chan_alloc)(void *, bus_dmamap_t **, bus_dmamap_t **, void *);
71 	void (*chan_free)(void *, int);
72 	int (*dma_setup)(void *, int, int, bus_dmamap_t *, bus_dmamap_t *,
73 			 bus_size_t);
74 	void (*dma_start)(void *, int,
75 			  void (*dma_done_cb)(void *, int, bus_dmamap_t *,
76 						      bus_dmamap_t *, int));
77 	uint32_t (*dma_finish)(void *, int, int);
78 };
79 
80 struct gtidmac_dma_desc {
81 	int dd_index;
82 	union {
83 		struct gtidmac_desc *idmac_vaddr;
84 		struct mvxore_desc *xore_vaddr;
85 	} dd_vaddr;
86 #define dd_idmac_vaddr	dd_vaddr.idmac_vaddr
87 #define dd_xore_vaddr	dd_vaddr.xore_vaddr
88 	paddr_t dd_paddr;
89 	SLIST_ENTRY(gtidmac_dma_desc) dd_next;
90 };
91 
92 struct gtidmac_softc {
93 	device_t sc_dev;
94 
95 	bus_space_tag_t sc_iot;
96 	bus_space_handle_t sc_ioh;
97 
98 	bus_dma_tag_t sc_dmat;
99 	struct gtidmac_dma_desc *sc_dd_buffer;
100 	bus_dma_segment_t sc_pattern_segment;
101 	struct {
102 		u_char pbuf[16];	/* 16byte/pattern */
103 	} *sc_pbuf;			/*   x256 pattern */
104 
105 	int sc_gtidmac_nchan;
106 	struct gtidmac_desc *sc_dbuf;
107 	bus_dmamap_t sc_dmap;
108 	SLIST_HEAD(, gtidmac_dma_desc) sc_dlist;
109 	struct {
110 		bus_dmamap_t chan_in;		/* In dmamap */
111 		bus_dmamap_t chan_out;		/* Out dmamap */
112 		uint64_t chan_totalcnt;		/* total transfered byte */
113 		int chan_ddidx;
114 		void *chan_running;		/* opaque object data */
115 		void (*chan_dma_done)(void *, int, bus_dmamap_t *,
116 				      bus_dmamap_t *, int);
117 	} sc_cdesc[GTIDMAC_MAXCHAN];
118 	struct gtidmac_intr_arg {
119 		struct gtidmac_softc *ia_sc;
120 		uint32_t ia_cause;
121 		uint32_t ia_mask;
122 		uint32_t ia_eaddr;
123 		uint32_t ia_eselect;
124 	} sc_intrarg[GTIDMAC_NINTRRUPT];
125 
126 	int sc_mvxore_nchan;
127 	struct mvxore_desc *sc_dbuf_xore;
128 	bus_dmamap_t sc_dmap_xore;
129 	SLIST_HEAD(, gtidmac_dma_desc) sc_dlist_xore;
130 	struct {
131 		bus_dmamap_t chan_in[MVXORE_NSRC];	/* In dmamap */
132 		bus_dmamap_t chan_out;			/* Out dmamap */
133 		uint64_t chan_totalcnt;			/* total transfered */
134 		int chan_ddidx;
135 		void *chan_running;			/* opaque object data */
136 		void (*chan_dma_done)(void *, int, bus_dmamap_t *,
137 				      bus_dmamap_t *, int);
138 	} sc_cdesc_xore[MVXORE_MAXCHAN];
139 
140 	struct dmover_backend sc_dmb;
141 	struct dmover_backend sc_dmb_xore;
142 	int sc_dmb_busy;
143 };
144 struct gtidmac_softc *gtidmac_softc = NULL;
145 
146 static int gtidmac_match(device_t, struct cfdata *, void *);
147 static void gtidmac_attach(device_t, device_t, void *);
148 
149 static int gtidmac_intr(void *);
150 static int mvxore_port0_intr(void *);
151 static int mvxore_port1_intr(void *);
152 static int mvxore_intr(struct gtidmac_softc *, int);
153 
154 static void gtidmac_process(struct dmover_backend *);
155 static void gtidmac_dmover_run(struct dmover_backend *);
156 static void gtidmac_dmover_done(void *, int, bus_dmamap_t *, bus_dmamap_t *,
157 				int);
158 static __inline int gtidmac_dmmap_load(struct gtidmac_softc *, bus_dmamap_t,
159 				dmover_buffer_type, dmover_buffer *, int);
160 static __inline void gtidmac_dmmap_unload(struct gtidmac_softc *, bus_dmamap_t, int);
161 
162 static uint32_t gtidmac_finish(void *, int, int);
163 static uint32_t mvxore_finish(void *, int, int);
164 
165 static void gtidmac_wininit(struct gtidmac_softc *);
166 static void mvxore_wininit(struct gtidmac_softc *);
167 
168 static int gtidmac_buffer_setup(struct gtidmac_softc *);
169 static int mvxore_buffer_setup(struct gtidmac_softc *);
170 
171 #ifdef GTIDMAC_DEBUG
172 static void gtidmac_dump_idmacreg(struct gtidmac_softc *, int);
173 static void gtidmac_dump_idmacdesc(struct gtidmac_softc *,
174 				   struct gtidmac_dma_desc *, uint32_t, int);
175 static void gtidmac_dump_xorereg(struct gtidmac_softc *, int);
176 static void gtidmac_dump_xoredesc(struct gtidmac_softc *,
177 				  struct gtidmac_dma_desc *, uint32_t, int);
178 #endif
179 
180 
181 static struct gtidmac_function gtidmac_functions = {
182 	.chan_alloc = gtidmac_chan_alloc,
183 	.chan_free = gtidmac_chan_free,
184 	.dma_setup = gtidmac_setup,
185 	.dma_start = gtidmac_start,
186 	.dma_finish = gtidmac_finish,
187 };
188 
189 static struct gtidmac_function mvxore_functions = {
190 	.chan_alloc = mvxore_chan_alloc,
191 	.chan_free = mvxore_chan_free,
192 	.dma_setup = mvxore_setup,
193 	.dma_start = mvxore_start,
194 	.dma_finish = mvxore_finish,
195 };
196 
197 static const struct dmover_algdesc gtidmac_algdescs[] = {
198 	{
199 		.dad_name = DMOVER_FUNC_ZERO,
200 		.dad_data = &gtidmac_functions,
201 		.dad_ninputs = 0
202 	},
203 	{
204 		.dad_name = DMOVER_FUNC_FILL8,
205 		.dad_data = &gtidmac_functions,
206 		.dad_ninputs = 0
207 	},
208 	{
209 		.dad_name = DMOVER_FUNC_COPY,
210 		.dad_data = &gtidmac_functions,
211 		.dad_ninputs = 1
212 	},
213 };
214 
215 static const struct dmover_algdesc mvxore_algdescs[] = {
216 #if 0
217 	/*
218 	 * As for these operations, there are a lot of restrictions.  It is
219 	 * necessary to use IDMAC.
220 	 */
221 	{
222 		.dad_name = DMOVER_FUNC_ZERO,
223 		.dad_data = &mvxore_functions,
224 		.dad_ninputs = 0
225 	},
226 	{
227 		.dad_name = DMOVER_FUNC_FILL8,
228 		.dad_data = &mvxore_functions,
229 		.dad_ninputs = 0
230 	},
231 #endif
232 	{
233 		.dad_name = DMOVER_FUNC_COPY,
234 		.dad_data = &mvxore_functions,
235 		.dad_ninputs = 1
236 	},
237 	{
238 		.dad_name = DMOVER_FUNC_ISCSI_CRC32C,
239 		.dad_data = &mvxore_functions,
240 		.dad_ninputs = 1
241 	},
242 	{
243 		.dad_name = DMOVER_FUNC_XOR2,
244 		.dad_data = &mvxore_functions,
245 		.dad_ninputs = 2
246 	},
247 	{
248 		.dad_name = DMOVER_FUNC_XOR3,
249 		.dad_data = &mvxore_functions,
250 		.dad_ninputs = 3
251 	},
252 	{
253 		.dad_name = DMOVER_FUNC_XOR4,
254 		.dad_data = &mvxore_functions,
255 		.dad_ninputs = 4
256 	},
257 	{
258 		.dad_name = DMOVER_FUNC_XOR5,
259 		.dad_data = &mvxore_functions,
260 		.dad_ninputs = 5
261 	},
262 	{
263 		.dad_name = DMOVER_FUNC_XOR6,
264 		.dad_data = &mvxore_functions,
265 		.dad_ninputs = 6
266 	},
267 	{
268 		.dad_name = DMOVER_FUNC_XOR7,
269 		.dad_data = &mvxore_functions,
270 		.dad_ninputs = 7
271 	},
272 	{
273 		.dad_name = DMOVER_FUNC_XOR8,
274 		.dad_data = &mvxore_functions,
275 		.dad_ninputs = 8
276 	},
277 };
278 
279 static struct {
280 	int model;
281 	int idmac_nchan;
282 	int idmac_irq;
283 	int xore_nchan;
284 	int xore_irq;
285 } channels[] = {
286 	/*
287 	 * Marvell System Controllers:
288 	 * need irqs in attach_args.
289 	 */
290 	{ MARVELL_DISCOVERY,		8, -1, 0, -1 },
291 	{ MARVELL_DISCOVERY_II,		8, -1, 0, -1 },
292 	{ MARVELL_DISCOVERY_III,	8, -1, 0, -1 },
293 #if 0
294 	{ MARVELL_DISCOVERY_LT,		4, -1, 2, -1 },
295 	{ MARVELL_DISCOVERY_V,		4, -1, 2, -1 },
296 	{ MARVELL_DISCOVERY_VI,		4, -1, 2, -1 },		????
297 #endif
298 
299 	/*
300 	 * Marvell System on Chips:
301 	 * No need irqs in attach_args.  We always connecting to interrupt-pin
302 	 * statically.
303 	 */
304 	{ MARVELL_ORION_1_88F1181,	4, 24, 0, -1 },
305 	{ MARVELL_ORION_2_88F1281,	4, 24, 0, -1 },
306 	{ MARVELL_ORION_1_88F5082,	4, 24, 0, -1 },
307 	{ MARVELL_ORION_1_88F5180N,	4, 24, 0, -1 },
308 	{ MARVELL_ORION_1_88F5181,	4, 24, 0, -1 },
309 	{ MARVELL_ORION_1_88F5182,	4, 24, 2, 30 },
310 	{ MARVELL_ORION_2_88F5281,	4, 24, 0, -1 },
311 	{ MARVELL_ORION_1_88W8660,	4, 24, 0, -1 },
312 	{ MARVELL_KIRKWOOD_88F6180,	0, -1, 4, 5 },
313 	{ MARVELL_KIRKWOOD_88F6192,	0, -1, 4, 5 },
314 	{ MARVELL_KIRKWOOD_88F6281,	0, -1, 4, 5 },
315 	{ MARVELL_KIRKWOOD_88F6282,	0, -1, 4, 5 },
316 	{ MARVELL_ARMADAXP_MV78130,	4, 33, 2, 51 },
317 	{ MARVELL_ARMADAXP_MV78130,	0, -1, 2, 94 },
318 	{ MARVELL_ARMADAXP_MV78160,	4, 33, 2, 51 },
319 	{ MARVELL_ARMADAXP_MV78160,	0, -1, 2, 94 },
320 	{ MARVELL_ARMADAXP_MV78230,	4, 33, 2, 51 },
321 	{ MARVELL_ARMADAXP_MV78230,	0, -1, 2, 94 },
322 	{ MARVELL_ARMADAXP_MV78260,	4, 33, 2, 51 },
323 	{ MARVELL_ARMADAXP_MV78260,	0, -1, 2, 94 },
324 	{ MARVELL_ARMADAXP_MV78460,	4, 33, 2, 51 },
325 	{ MARVELL_ARMADAXP_MV78460,	0, -1, 2, 94 },
326 };
327 
328 CFATTACH_DECL_NEW(gtidmac_gt, sizeof(struct gtidmac_softc),
329     gtidmac_match, gtidmac_attach, NULL, NULL);
330 CFATTACH_DECL_NEW(gtidmac_mbus, sizeof(struct gtidmac_softc),
331     gtidmac_match, gtidmac_attach, NULL, NULL);
332 
333 
334 /* ARGSUSED */
335 static int
336 gtidmac_match(device_t parent, struct cfdata *match, void *aux)
337 {
338 	struct marvell_attach_args *mva = aux;
339 	int unit, i;
340 
341 	if (strcmp(mva->mva_name, match->cf_name) != 0)
342 		return 0;
343 	if (mva->mva_offset == MVA_OFFSET_DEFAULT)
344 		return 0;
345 	unit = 0;
346 	for (i = 0; i < __arraycount(channels); i++)
347 		if (mva->mva_model == channels[i].model) {
348 			if (mva->mva_unit == unit) {
349 				mva->mva_size = GTIDMAC_SIZE;
350 				return 1;
351 			}
352 			unit++;
353 		}
354 	return 0;
355 }
356 
357 /* ARGSUSED */
358 static void
359 gtidmac_attach(device_t parent, device_t self, void *aux)
360 {
361 	struct gtidmac_softc *sc = device_private(self);
362 	struct marvell_attach_args *mva = aux;
363 	prop_dictionary_t dict = device_properties(self);
364 	uint32_t idmac_irq, xore_irq, dmb_speed;
365 	int unit, idmac_nchan, xore_nchan, nsegs, i, j, n;
366 
367 	unit = 0;
368 	for (i = 0; i < __arraycount(channels); i++)
369 		if (mva->mva_model == channels[i].model) {
370 			if (mva->mva_unit == unit)
371 				break;
372 			unit++;
373 		}
374 	idmac_nchan = channels[i].idmac_nchan;
375 	idmac_irq = channels[i].idmac_irq;
376 	if (idmac_nchan != 0) {
377 		if (idmac_irq == -1)
378 			idmac_irq = mva->mva_irq;
379 		if (idmac_irq == -1)
380 			/* Discovery */
381 			if (!prop_dictionary_get_uint32(dict,
382 			    "idmac-irq", &idmac_irq)) {
383 				aprint_error(": no idmac-irq property\n");
384 				return;
385 			}
386 	}
387 	xore_nchan = channels[i].xore_nchan;
388 	xore_irq = channels[i].xore_irq;
389 	if (xore_nchan != 0) {
390 		if (xore_irq == -1)
391 			xore_irq = mva->mva_irq;
392 		if (xore_irq == -1)
393 			/* Discovery LT/V/VI */
394 			if (!prop_dictionary_get_uint32(dict,
395 			    "xore-irq", &xore_irq)) {
396 				aprint_error(": no xore-irq property\n");
397 				return;
398 			}
399 	}
400 
401 	aprint_naive("\n");
402 	aprint_normal(": Marvell IDMA Controller%s\n",
403 	    xore_nchan ? "/XOR Engine" : "");
404 	if (idmac_nchan > 0)
405 		aprint_normal_dev(self,
406 		    "IDMA Controller %d channels, intr %d...%d\n",
407 		    idmac_nchan, idmac_irq, idmac_irq + GTIDMAC_NINTRRUPT - 1);
408 	if (xore_nchan > 0)
409 		aprint_normal_dev(self,
410 		    "XOR Engine %d channels, intr %d...%d\n",
411 		    xore_nchan, xore_irq, xore_irq + xore_nchan - 1);
412 
413 	sc->sc_dev = self;
414 	sc->sc_iot = mva->mva_iot;
415 
416 	/* Map I/O registers */
417 	if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, mva->mva_offset,
418 	    mva->mva_size, &sc->sc_ioh)) {
419 		aprint_error_dev(self, "can't map registers\n");
420 		return;
421 	}
422 
423 	/*
424 	 * Initialise DMA descriptors and associated metadata
425 	 */
426 	sc->sc_dmat = mva->mva_dmat;
427 	n = idmac_nchan * GTIDMAC_NDESC + xore_nchan * MVXORE_NDESC;
428 	sc->sc_dd_buffer =
429 	    kmem_alloc(sizeof(struct gtidmac_dma_desc) * n, KM_SLEEP);
430 	if (sc->sc_dd_buffer == NULL) {
431 		aprint_error_dev(self, "can't allocate memory\n");
432 		goto fail1;
433 	}
434 	/* pattern buffer */
435 	if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
436 	    &sc->sc_pattern_segment, 1, &nsegs, BUS_DMA_NOWAIT)) {
437 		aprint_error_dev(self,
438 		    "bus_dmamem_alloc failed: pattern buffer\n");
439 		goto fail2;
440 	}
441 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_pattern_segment, 1, PAGE_SIZE,
442 	    (void **)&sc->sc_pbuf, BUS_DMA_NOWAIT)) {
443 		aprint_error_dev(self,
444 		    "bus_dmamem_map failed: pattern buffer\n");
445 		goto fail3;
446 	}
447 	for (i = 0; i < 0x100; i++)
448 		for (j = 0; j < sizeof(sc->sc_pbuf[i].pbuf); j++)
449 			sc->sc_pbuf[i].pbuf[j] = i;
450 
451 	if (!prop_dictionary_get_uint32(dict, "dmb_speed", &dmb_speed)) {
452 		aprint_error_dev(self, "no dmb_speed property\n");
453 		dmb_speed = 10;	/* More than fast swdmover perhaps. */
454 	}
455 
456 	/* IDMAC DMA descriptor buffer */
457 	sc->sc_gtidmac_nchan = idmac_nchan;
458 	if (sc->sc_gtidmac_nchan > 0) {
459 		if (gtidmac_buffer_setup(sc) != 0)
460 			goto fail4;
461 
462 		if (mva->mva_model != MARVELL_DISCOVERY)
463 			gtidmac_wininit(sc);
464 
465 		/* Setup interrupt */
466 		for (i = 0; i < GTIDMAC_NINTRRUPT; i++) {
467 			j = i * idmac_nchan / GTIDMAC_NINTRRUPT;
468 
469 			sc->sc_intrarg[i].ia_sc = sc;
470 			sc->sc_intrarg[i].ia_cause = GTIDMAC_ICR(j);
471 			sc->sc_intrarg[i].ia_eaddr = GTIDMAC_EAR(j);
472 			sc->sc_intrarg[i].ia_eselect = GTIDMAC_ESR(j);
473 			marvell_intr_establish(idmac_irq + i, IPL_BIO,
474 			    gtidmac_intr, &sc->sc_intrarg[i]);
475 		}
476 
477 		/* Register us with dmover. */
478 		sc->sc_dmb.dmb_name = device_xname(self);
479 		sc->sc_dmb.dmb_speed = dmb_speed;
480 		sc->sc_dmb.dmb_cookie = sc;
481 		sc->sc_dmb.dmb_algdescs = gtidmac_algdescs;
482 		sc->sc_dmb.dmb_nalgdescs = __arraycount(gtidmac_algdescs);
483 		sc->sc_dmb.dmb_process = gtidmac_process;
484 		dmover_backend_register(&sc->sc_dmb);
485 		sc->sc_dmb_busy = 0;
486 	}
487 
488 	/* XORE DMA descriptor buffer */
489 	sc->sc_mvxore_nchan = xore_nchan;
490 	if (sc->sc_mvxore_nchan > 0) {
491 		if (mvxore_buffer_setup(sc) != 0)
492 			goto fail5;
493 
494 		/* Setup interrupt */
495 		for (i = 0; i < sc->sc_mvxore_nchan; i++)
496 			marvell_intr_establish(xore_irq + i, IPL_BIO,
497 			    (i & 0x2) ? mvxore_port1_intr : mvxore_port0_intr,
498 			    sc);
499 
500 		mvxore_wininit(sc);
501 
502 		/* Register us with dmover. */
503 		sc->sc_dmb_xore.dmb_name = device_xname(sc->sc_dev);
504 		sc->sc_dmb_xore.dmb_speed = dmb_speed;
505 		sc->sc_dmb_xore.dmb_cookie = sc;
506 		sc->sc_dmb_xore.dmb_algdescs = mvxore_algdescs;
507 		sc->sc_dmb_xore.dmb_nalgdescs =
508 		    __arraycount(mvxore_algdescs);
509 		sc->sc_dmb_xore.dmb_process = gtidmac_process;
510 		dmover_backend_register(&sc->sc_dmb_xore);
511 	}
512 
513 	gtidmac_softc = sc;
514 
515 	return;
516 
517 fail5:
518 	for (i = sc->sc_gtidmac_nchan - 1; i >= 0; i--) {
519 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_in);
520 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_out);
521 	}
522 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
523 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap);
524 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf,
525 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC);
526 	bus_dmamem_free(sc->sc_dmat,
527 	    sc->sc_dmap->dm_segs, sc->sc_dmap->dm_nsegs);
528 fail4:
529 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_pbuf, PAGE_SIZE);
530 fail3:
531 	bus_dmamem_free(sc->sc_dmat, &sc->sc_pattern_segment, 1);
532 fail2:
533 	kmem_free(sc->sc_dd_buffer, sizeof(struct gtidmac_dma_desc) * n);
534 fail1:
535 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, mva->mva_size);
536 	return;
537 }
538 
539 
540 static int
541 gtidmac_intr(void *arg)
542 {
543 	struct gtidmac_intr_arg *ia = arg;
544 	struct gtidmac_softc *sc = ia->ia_sc;
545 	uint32_t cause;
546 	int handled = 0, chan, error;
547 
548 	cause = bus_space_read_4(sc->sc_iot, sc->sc_ioh, ia->ia_cause);
549 	DPRINTF(("IDMAC intr: cause=0x%x\n", cause));
550 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, ia->ia_cause, ~cause);
551 
552 	chan = 0;
553 	while (cause) {
554 		error = 0;
555 		if (cause & GTIDMAC_I_ADDRMISS) {
556 			aprint_error_dev(sc->sc_dev, "Address Miss");
557 			error = EINVAL;
558 		}
559 		if (cause & GTIDMAC_I_ACCPROT) {
560 			aprint_error_dev(sc->sc_dev,
561 			    "Access Protect Violation");
562 			error = EACCES;
563 		}
564 		if (cause & GTIDMAC_I_WRPROT) {
565 			aprint_error_dev(sc->sc_dev, "Write Protect");
566 			error = EACCES;
567 		}
568 		if (cause & GTIDMAC_I_OWN) {
569 			aprint_error_dev(sc->sc_dev, "Ownership Violation");
570 			error = EINVAL;
571 		}
572 
573 #define GTIDMAC_I_ERROR		  \
574 	   (GTIDMAC_I_ADDRMISS	| \
575 	    GTIDMAC_I_ACCPROT	| \
576 	    GTIDMAC_I_WRPROT	| \
577 	    GTIDMAC_I_OWN)
578 		if (cause & GTIDMAC_I_ERROR) {
579 			uint32_t sel;
580 			int select;
581 
582 			sel = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
583 			    ia->ia_eselect) & GTIDMAC_ESR_SEL;
584 			select = sel - chan * GTIDMAC_I_BITS;
585 			if (select >= 0 && select < GTIDMAC_I_BITS) {
586 				uint32_t ear;
587 
588 				ear = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
589 				    ia->ia_eaddr);
590 				aprint_error(": Error Address 0x%x\n", ear);
591 			} else
592 				aprint_error(": lost Error Address\n");
593 		}
594 
595 		if (cause & (GTIDMAC_I_COMP | GTIDMAC_I_ERROR)) {
596 			sc->sc_cdesc[chan].chan_dma_done(
597 			    sc->sc_cdesc[chan].chan_running, chan,
598 			    &sc->sc_cdesc[chan].chan_in,
599 			    &sc->sc_cdesc[chan].chan_out, error);
600 			handled++;
601 		}
602 
603 		cause >>= GTIDMAC_I_BITS;
604 	}
605 	DPRINTF(("IDMAC intr: %shandled\n", handled ? "" : "not "));
606 
607 	return handled;
608 }
609 
610 static int
611 mvxore_port0_intr(void *arg)
612 {
613 	struct gtidmac_softc *sc = arg;
614 
615 	return mvxore_intr(sc, 0);
616 }
617 
618 static int
619 mvxore_port1_intr(void *arg)
620 {
621 	struct gtidmac_softc *sc = arg;
622 
623 	return mvxore_intr(sc, 1);
624 }
625 
626 static int
627 mvxore_intr(struct gtidmac_softc *sc, int port)
628 {
629 	uint32_t cause;
630 	int handled = 0, chan, error;
631 
632 	cause =
633 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEICR(sc, port));
634 	DPRINTF(("XORE port %d intr: cause=0x%x\n", port, cause));
635 printf("XORE port %d intr: cause=0x%x\n", port, cause);
636 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
637 	    MVXORE_XEICR(sc, port), ~cause);
638 
639 	chan = 0;
640 	while (cause) {
641 		error = 0;
642 		if (cause & MVXORE_I_ADDRDECODE) {
643 			aprint_error_dev(sc->sc_dev, "Failed address decoding");
644 			error = EINVAL;
645 		}
646 		if (cause & MVXORE_I_ACCPROT) {
647 			aprint_error_dev(sc->sc_dev,
648 			    "Access Protect Violation");
649 			error = EACCES;
650 		}
651 		if (cause & MVXORE_I_WRPROT) {
652 			aprint_error_dev(sc->sc_dev, "Write Protect");
653 			error = EACCES;
654 		}
655 		if (cause & MVXORE_I_OWN) {
656 			aprint_error_dev(sc->sc_dev, "Ownership Violation");
657 			error = EINVAL;
658 		}
659 		if (cause & MVXORE_I_INTPARITY) {
660 			aprint_error_dev(sc->sc_dev, "Parity Error");
661 			error = EIO;
662 		}
663 		if (cause & MVXORE_I_XBAR) {
664 			aprint_error_dev(sc->sc_dev, "Crossbar Parity Error");
665 			error = EINVAL;
666 		}
667 
668 #define MVXORE_I_ERROR		  \
669 	   (MVXORE_I_ADDRDECODE	| \
670 	    MVXORE_I_ACCPROT	| \
671 	    MVXORE_I_WRPROT	| \
672 	    MVXORE_I_OWN	| \
673 	    MVXORE_I_INTPARITY	| \
674 	    MVXORE_I_XBAR)
675 		if (cause & MVXORE_I_ERROR) {
676 			uint32_t type;
677 			int event;
678 
679 			type = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
680 			    MVXORE_XEECR(sc, port));
681 			type &= MVXORE_XEECR_ERRORTYPE_MASK;
682 			event = type - chan * MVXORE_I_BITS;
683 			if (event >= 0 && event < MVXORE_I_BITS) {
684 				uint32_t xeear;
685 
686 				xeear = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
687 				    MVXORE_XEEAR(sc, port));
688 				aprint_error(": Error Address 0x%x\n", xeear);
689 			} else
690 				aprint_error(": lost Error Address\n");
691 		}
692 
693 		if (cause & (MVXORE_I_EOC | MVXORE_I_ERROR)) {
694 			sc->sc_cdesc_xore[chan].chan_dma_done(
695 			    sc->sc_cdesc_xore[chan].chan_running, chan,
696 			    sc->sc_cdesc_xore[chan].chan_in,
697 			    &sc->sc_cdesc_xore[chan].chan_out, error);
698 			handled++;
699 		}
700 
701 		cause >>= MVXORE_I_BITS;
702 	}
703 printf("XORE port %d intr: %shandled\n", port, handled ? "" : "not ");
704 	DPRINTF(("XORE port %d intr: %shandled\n",
705 	    port, handled ? "" : "not "));
706 
707 	return handled;
708 }
709 
710 
711 /*
712  * dmover(9) backend function.
713  */
714 static void
715 gtidmac_process(struct dmover_backend *dmb)
716 {
717 	struct gtidmac_softc *sc = dmb->dmb_cookie;
718 	int s;
719 
720 	/* If the backend is currently idle, go process the queue. */
721 	s = splbio();
722 	if (!sc->sc_dmb_busy)
723 		gtidmac_dmover_run(dmb);
724 	splx(s);
725 }
726 
727 static void
728 gtidmac_dmover_run(struct dmover_backend *dmb)
729 {
730 	struct gtidmac_softc *sc = dmb->dmb_cookie;
731 	struct dmover_request *dreq;
732 	const struct dmover_algdesc *algdesc;
733 	struct gtidmac_function *df;
734 	bus_dmamap_t *dmamap_in, *dmamap_out;
735 	int chan, ninputs, error, i;
736 
737 	sc->sc_dmb_busy = 1;
738 
739 	for (;;) {
740 		dreq = TAILQ_FIRST(&dmb->dmb_pendreqs);
741 		if (dreq == NULL)
742 			break;
743 		algdesc = dreq->dreq_assignment->das_algdesc;
744 		df = algdesc->dad_data;
745 		chan = (*df->chan_alloc)(sc, &dmamap_in, &dmamap_out, dreq);
746 		if (chan == -1)
747 			return;
748 
749 		dmover_backend_remque(dmb, dreq);
750 		dreq->dreq_flags |= DMOVER_REQ_RUNNING;
751 
752 		/* XXXUNLOCK */
753 
754 		error = 0;
755 
756 		/* Load in/out buffers of dmover to bus_dmamap. */
757 		ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
758 		if (ninputs == 0) {
759 			int pno = 0;
760 
761 			if (algdesc->dad_name == DMOVER_FUNC_FILL8)
762 				pno = dreq->dreq_immediate[0];
763 
764 			i = 0;
765 			error = bus_dmamap_load(sc->sc_dmat, *dmamap_in,
766 			    &sc->sc_pbuf[pno], sizeof(sc->sc_pbuf[pno]), NULL,
767 			    BUS_DMA_NOWAIT | BUS_DMA_STREAMING | BUS_DMA_WRITE);
768 			if (error == 0) {
769 				bus_dmamap_sync(sc->sc_dmat, *dmamap_in, 0,
770 				    sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
771 
772 				/*
773 				 * We will call gtidmac_dmmap_unload() when
774 				 * becoming an error.
775 				 */
776 				i = 1;
777 			}
778 		} else
779 			for (i = 0; i < ninputs; i++) {
780 				error = gtidmac_dmmap_load(sc,
781 				    *(dmamap_in + i), dreq->dreq_inbuf_type,
782 				    &dreq->dreq_inbuf[i], 0/*write*/);
783 				if (error != 0)
784 					break;
785 			}
786 		if (algdesc->dad_name != DMOVER_FUNC_ISCSI_CRC32C) {
787 			if (error == 0)
788 				error = gtidmac_dmmap_load(sc, *dmamap_out,
789 				    dreq->dreq_outbuf_type, &dreq->dreq_outbuf,
790 				    1/*read*/);
791 
792 			if (error == 0) {
793 				/*
794 				 * The size of outbuf is always believed to be
795 				 * DMA transfer size in dmover request.
796 				 */
797 				error = (*df->dma_setup)(sc, chan, ninputs,
798 				    dmamap_in, dmamap_out,
799 				    (*dmamap_out)->dm_mapsize);
800 				if (error != 0)
801 					gtidmac_dmmap_unload(sc, *dmamap_out,
802 					    1);
803 			}
804 		} else
805 			if (error == 0)
806 				error = (*df->dma_setup)(sc, chan, ninputs,
807 				    dmamap_in, dmamap_out,
808 				    (*dmamap_in)->dm_mapsize);
809 
810 		/* XXXLOCK */
811 
812 		if (error != 0) {
813 			for (; i-- > 0;)
814 				gtidmac_dmmap_unload(sc, *(dmamap_in + i), 0);
815 			(*df->chan_free)(sc, chan);
816 
817 			dreq->dreq_flags |= DMOVER_REQ_ERROR;
818 			dreq->dreq_error = error;
819 			/* XXXUNLOCK */
820 			dmover_done(dreq);
821 			/* XXXLOCK */
822 			continue;
823 		}
824 
825 		(*df->dma_start)(sc, chan, gtidmac_dmover_done);
826 		break;
827 	}
828 
829 	/* All done */
830 	sc->sc_dmb_busy = 0;
831 }
832 
833 static void
834 gtidmac_dmover_done(void *object, int chan, bus_dmamap_t *dmamap_in,
835 		    bus_dmamap_t *dmamap_out, int error)
836 {
837 	struct gtidmac_softc *sc;
838 	struct dmover_request *dreq = object;
839 	struct dmover_backend *dmb;
840 	struct gtidmac_function *df;
841 	uint32_t result;
842 	int ninputs, i;
843 
844 	KASSERT(dreq != NULL);
845 
846 	dmb = dreq->dreq_assignment->das_backend;
847 	df = dreq->dreq_assignment->das_algdesc->dad_data;
848 	ninputs = dreq->dreq_assignment->das_algdesc->dad_ninputs;
849 	sc = dmb->dmb_cookie;
850 
851 	result = (*df->dma_finish)(sc, chan, error);
852 	for (i = 0; i < ninputs; i++)
853 		gtidmac_dmmap_unload(sc, *(dmamap_in + i), 0);
854 	if (dreq->dreq_assignment->das_algdesc->dad_name ==
855 	    DMOVER_FUNC_ISCSI_CRC32C)
856 		memcpy(dreq->dreq_immediate, &result, sizeof(result));
857 	else
858 		gtidmac_dmmap_unload(sc, *dmamap_out, 1);
859 
860 	(*df->chan_free)(sc, chan);
861 
862 	if (error) {
863 		dreq->dreq_error = error;
864 		dreq->dreq_flags |= DMOVER_REQ_ERROR;
865 	}
866 
867 	dmover_done(dreq);
868 
869 	/*
870 	 * See if we can start some more dmover(9) requests.
871 	 *
872 	 * Note: We're already at splbio() here.
873 	 */
874 	if (!sc->sc_dmb_busy)
875 		gtidmac_dmover_run(dmb);
876 }
877 
878 static __inline int
879 gtidmac_dmmap_load(struct gtidmac_softc *sc, bus_dmamap_t dmamap,
880 		   dmover_buffer_type dmbuf_type, dmover_buffer *dmbuf,
881 		   int read)
882 {
883 	int error, flags;
884 
885 	flags = BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
886 	    read ? BUS_DMA_READ : BUS_DMA_WRITE;
887 
888 	switch (dmbuf_type) {
889 	case DMOVER_BUF_LINEAR:
890 		error = bus_dmamap_load(sc->sc_dmat, dmamap,
891 		    dmbuf->dmbuf_linear.l_addr, dmbuf->dmbuf_linear.l_len,
892 		    NULL, flags);
893 		break;
894 
895 	case DMOVER_BUF_UIO:
896 		if ((read && dmbuf->dmbuf_uio->uio_rw != UIO_READ) ||
897 		    (!read && dmbuf->dmbuf_uio->uio_rw == UIO_READ))
898 			return (EINVAL);
899 
900 		error = bus_dmamap_load_uio(sc->sc_dmat, dmamap,
901 		    dmbuf->dmbuf_uio, flags);
902 		break;
903 
904 	default:
905 		error = EINVAL;
906 	}
907 
908 	if (error == 0)
909 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
910 		    read ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
911 
912 	return error;
913 }
914 
915 static __inline void
916 gtidmac_dmmap_unload(struct gtidmac_softc *sc, bus_dmamap_t dmamap, int read)
917 {
918 
919 	bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
920 	    read ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
921 
922 	bus_dmamap_unload(sc->sc_dmat, dmamap);
923 }
924 
925 
926 void *
927 gtidmac_tag_get(void)
928 {
929 
930 	return gtidmac_softc;
931 }
932 
933 /*
934  * IDMAC functions
935  */
936 int
937 gtidmac_chan_alloc(void *tag, bus_dmamap_t **dmamap_in,
938 		   bus_dmamap_t **dmamap_out, void *object)
939 {
940 	struct gtidmac_softc *sc = tag;
941 	int chan;
942 
943 /* maybe need lock */
944 
945 	for (chan = 0; chan < sc->sc_gtidmac_nchan; chan++)
946 		if (sc->sc_cdesc[chan].chan_running == NULL)
947 			break;
948 	if (chan >= sc->sc_gtidmac_nchan)
949 		return -1;
950 
951 
952 	sc->sc_cdesc[chan].chan_running = object;
953 
954 /* unlock */
955 
956 	*dmamap_in = &sc->sc_cdesc[chan].chan_in;
957 	*dmamap_out = &sc->sc_cdesc[chan].chan_out;
958 
959 	return chan;
960 }
961 
962 void
963 gtidmac_chan_free(void *tag, int chan)
964 {
965 	struct gtidmac_softc *sc = tag;
966 
967 /* maybe need lock */
968 
969 	sc->sc_cdesc[chan].chan_running = NULL;
970 
971 /* unlock */
972 }
973 
974 /* ARGSUSED */
975 int
976 gtidmac_setup(void *tag, int chan, int ninputs, bus_dmamap_t *dmamap_in,
977 	      bus_dmamap_t *dmamap_out, bus_size_t size)
978 {
979 	struct gtidmac_softc *sc = tag;
980 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
981 	struct gtidmac_desc *desc;
982 	uint32_t ccl, bcnt, ires, ores;
983 	int n = 0, iidx, oidx;
984 
985 	KASSERT(ninputs == 0 || ninputs == 1);
986 
987 	ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
988 #ifdef DIAGNOSTIC
989 	if (ccl & GTIDMAC_CCLR_CHANACT)
990 		panic("gtidmac_setup: chan%d already active", chan);
991 #endif
992 
993 	/* We always Chain-mode and max (16M - 1)byte/desc */
994 	ccl = (GTIDMAC_CCLR_DESCMODE_16M				|
995 #ifdef GTIDMAC_DEBUG
996 	    GTIDMAC_CCLR_CDEN						|
997 #endif
998 	    GTIDMAC_CCLR_TRANSFERMODE_B /* Transfer Mode: Block */	|
999 	    GTIDMAC_CCLR_INTMODE_NULL   /* Intr Mode: Next Desc NULL */	|
1000 	    GTIDMAC_CCLR_CHAINMODE_C    /* Chain Mode: Chaind */);
1001 	if (size != (*dmamap_in)->dm_mapsize) {
1002 		ccl |= GTIDMAC_CCLR_SRCHOLD;
1003 		if ((*dmamap_in)->dm_mapsize == 8)
1004 			ccl |= GTIDMAC_CCLR_SBL_8B;
1005 		else if ((*dmamap_in)->dm_mapsize == 16)
1006 			ccl |= GTIDMAC_CCLR_SBL_16B;
1007 		else if ((*dmamap_in)->dm_mapsize == 32)
1008 			ccl |= GTIDMAC_CCLR_SBL_32B;
1009 		else if ((*dmamap_in)->dm_mapsize == 64)
1010 			ccl |= GTIDMAC_CCLR_SBL_64B;
1011 		else if ((*dmamap_in)->dm_mapsize == 128)
1012 			ccl |= GTIDMAC_CCLR_SBL_128B;
1013 		else
1014 			panic("gtidmac_setup: chan%d source:"
1015 			    " unsupport hold size", chan);
1016 	} else
1017 		ccl |= GTIDMAC_CCLR_SBL_128B;
1018 	if (size != (*dmamap_out)->dm_mapsize) {
1019 		ccl |= GTIDMAC_CCLR_DESTHOLD;
1020 		if ((*dmamap_out)->dm_mapsize == 8)
1021 			ccl |= GTIDMAC_CCLR_DBL_8B;
1022 		else if ((*dmamap_out)->dm_mapsize == 16)
1023 			ccl |= GTIDMAC_CCLR_DBL_16B;
1024 		else if ((*dmamap_out)->dm_mapsize == 32)
1025 			ccl |= GTIDMAC_CCLR_DBL_32B;
1026 		else if ((*dmamap_out)->dm_mapsize == 64)
1027 			ccl |= GTIDMAC_CCLR_DBL_64B;
1028 		else if ((*dmamap_out)->dm_mapsize == 128)
1029 			ccl |= GTIDMAC_CCLR_DBL_128B;
1030 		else
1031 			panic("gtidmac_setup: chan%d destination:"
1032 			    " unsupport hold size", chan);
1033 	} else
1034 		ccl |= GTIDMAC_CCLR_DBL_128B;
1035 
1036 	fstdd = SLIST_FIRST(&sc->sc_dlist);
1037 	if (fstdd == NULL) {
1038 		aprint_error_dev(sc->sc_dev, "no descriptor\n");
1039 		return ENOMEM;
1040 	}
1041 	SLIST_REMOVE_HEAD(&sc->sc_dlist, dd_next);
1042 	sc->sc_cdesc[chan].chan_ddidx = fstdd->dd_index;
1043 
1044 	dd = fstdd;
1045 	ires = ores = 0;
1046 	iidx = oidx = 0;
1047 	while (1 /*CONSTCOND*/) {
1048 		if (ccl & GTIDMAC_CCLR_SRCHOLD) {
1049 			if (ccl & GTIDMAC_CCLR_DESTHOLD)
1050 				bcnt = size;	/* src/dst hold */
1051 			else
1052 				bcnt = (*dmamap_out)->dm_segs[oidx].ds_len;
1053 		} else if (ccl & GTIDMAC_CCLR_DESTHOLD)
1054 			bcnt = (*dmamap_in)->dm_segs[iidx].ds_len;
1055 		else
1056 			bcnt = min((*dmamap_in)->dm_segs[iidx].ds_len - ires,
1057 			    (*dmamap_out)->dm_segs[oidx].ds_len - ores);
1058 
1059 		desc = dd->dd_idmac_vaddr;
1060 		desc->bc.mode16m.bcnt =
1061 		    bcnt | GTIDMAC_CIDMABCR_BCLEFT | GTIDMAC_CIDMABCR_OWN;
1062 		desc->srcaddr = (*dmamap_in)->dm_segs[iidx].ds_addr + ires;
1063 		desc->dstaddr = (*dmamap_out)->dm_segs[oidx].ds_addr + ores;
1064 
1065 		n += bcnt;
1066 		if (n >= size)
1067 			break;
1068 		if (!(ccl & GTIDMAC_CCLR_SRCHOLD)) {
1069 			ires += bcnt;
1070 			if (ires >= (*dmamap_in)->dm_segs[iidx].ds_len) {
1071 				ires = 0;
1072 				iidx++;
1073 				KASSERT(iidx < (*dmamap_in)->dm_nsegs);
1074 			}
1075 		}
1076 		if (!(ccl & GTIDMAC_CCLR_DESTHOLD)) {
1077 			ores += bcnt;
1078 			if (ores >= (*dmamap_out)->dm_segs[oidx].ds_len) {
1079 				ores = 0;
1080 				oidx++;
1081 				KASSERT(oidx < (*dmamap_out)->dm_nsegs);
1082 			}
1083 		}
1084 
1085 		nxtdd = SLIST_FIRST(&sc->sc_dlist);
1086 		if (nxtdd == NULL) {
1087 			aprint_error_dev(sc->sc_dev, "no descriptor\n");
1088 			return ENOMEM;
1089 		}
1090 		SLIST_REMOVE_HEAD(&sc->sc_dlist, dd_next);
1091 
1092 		desc->nextdp = (uint32_t)nxtdd->dd_paddr;
1093 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1094 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
1095 #ifdef GTIDMAC_DEBUG
1096 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1097 #else
1098 		    BUS_DMASYNC_PREWRITE);
1099 #endif
1100 
1101 		SLIST_INSERT_AFTER(dd, nxtdd, dd_next);
1102 		dd = nxtdd;
1103 	}
1104 	desc->nextdp = (uint32_t)NULL;
1105 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, dd->dd_index * sizeof(*desc),
1106 #ifdef GTIDMAC_DEBUG
1107 	    sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1108 #else
1109 	    sizeof(*desc), BUS_DMASYNC_PREWRITE);
1110 #endif
1111 
1112 	/* Set paddr of descriptor to Channel Next Descriptor Pointer */
1113 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CNDPR(chan),
1114 	    fstdd->dd_paddr);
1115 
1116 #if BYTE_ORDER == LITTLE_ENDIAN
1117 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan),
1118 	    GTIDMAC_CCHR_DESCBYTESWAP | GTIDMAC_CCHR_ENDIAN_LE);
1119 #else
1120 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan),
1121 	    GTIDMAC_CCHR_DESCBYTESWAP | GTIDMAC_CCHR_ENDIAN_BE);
1122 #endif
1123 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan), ccl);
1124 
1125 #ifdef GTIDMAC_DEBUG
1126 	gtidmac_dump_idmacdesc(sc, fstdd, ccl, 0/*pre*/);
1127 #endif
1128 
1129 	sc->sc_cdesc[chan].chan_totalcnt += size;
1130 
1131 	return 0;
1132 }
1133 
1134 void
1135 gtidmac_start(void *tag, int chan,
1136 	      void (*dma_done_cb)(void *, int, bus_dmamap_t *, bus_dmamap_t *,
1137 				  int))
1138 {
1139 	struct gtidmac_softc *sc = tag;
1140 	uint32_t ccl;
1141 
1142 	DPRINTF(("%s:%d: starting\n", device_xname(sc->sc_dev), chan));
1143 
1144 #ifdef GTIDMAC_DEBUG
1145 	gtidmac_dump_idmacreg(sc, chan);
1146 #endif
1147 
1148 	sc->sc_cdesc[chan].chan_dma_done = dma_done_cb;
1149 
1150 	ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
1151 	/* Start and 'Fetch Next Descriptor' */
1152 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan),
1153 	    ccl | GTIDMAC_CCLR_CHANEN | GTIDMAC_CCLR_FETCHND);
1154 }
1155 
1156 static uint32_t
1157 gtidmac_finish(void *tag, int chan, int error)
1158 {
1159 	struct gtidmac_softc *sc = tag;
1160 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
1161 	struct gtidmac_desc *desc;
1162 
1163 	fstdd = &sc->sc_dd_buffer[sc->sc_cdesc[chan].chan_ddidx];
1164 
1165 #ifdef GTIDMAC_DEBUG
1166 	if (error || gtidmac_debug > 1) {
1167 		uint32_t ccl;
1168 
1169 		gtidmac_dump_idmacreg(sc, chan);
1170 		ccl = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1171 		    GTIDMAC_CCLR(chan));
1172 		gtidmac_dump_idmacdesc(sc, fstdd, ccl, 1/*post*/);
1173 	}
1174 #endif
1175 
1176 	dd = fstdd;
1177 	do {
1178 		desc = dd->dd_idmac_vaddr;
1179 
1180 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1181 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
1182 #ifdef GTIDMAC_DEBUG
1183 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1184 #else
1185 		    BUS_DMASYNC_POSTWRITE);
1186 #endif
1187 
1188 		nxtdd = SLIST_NEXT(dd, dd_next);
1189 		SLIST_INSERT_HEAD(&sc->sc_dlist, dd, dd_next);
1190 		dd = nxtdd;
1191 	} while (desc->nextdp);
1192 
1193 	return 0;
1194 }
1195 
1196 /*
1197  * XORE functions
1198  */
1199 int
1200 mvxore_chan_alloc(void *tag, bus_dmamap_t **dmamap_in,
1201 		  bus_dmamap_t **dmamap_out, void *object)
1202 {
1203 	struct gtidmac_softc *sc = tag;
1204 	int chan;
1205 
1206 /* maybe need lock */
1207 
1208 	for (chan = 0; chan < sc->sc_mvxore_nchan; chan++)
1209 		if (sc->sc_cdesc_xore[chan].chan_running == NULL)
1210 			break;
1211 	if (chan >= sc->sc_mvxore_nchan)
1212 		return -1;
1213 
1214 
1215 	sc->sc_cdesc_xore[chan].chan_running = object;
1216 
1217 /* unlock */
1218 
1219 	*dmamap_in = sc->sc_cdesc_xore[chan].chan_in;
1220 	*dmamap_out = &sc->sc_cdesc_xore[chan].chan_out;
1221 
1222 	return chan;
1223 }
1224 
1225 void
1226 mvxore_chan_free(void *tag, int chan)
1227 {
1228 	struct gtidmac_softc *sc = tag;
1229 
1230 /* maybe need lock */
1231 
1232 	sc->sc_cdesc_xore[chan].chan_running = NULL;
1233 
1234 /* unlock */
1235 }
1236 
1237 /* ARGSUSED */
1238 int
1239 mvxore_setup(void *tag, int chan, int ninputs, bus_dmamap_t *dmamap_in,
1240 	     bus_dmamap_t *dmamap_out, bus_size_t size)
1241 {
1242 	struct gtidmac_softc *sc = tag;
1243 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
1244 	struct mvxore_desc *desc;
1245 	uint32_t xexc, bcnt, cmd, lastcmd;
1246 	int n = 0, i;
1247 	uint32_t ires[MVXORE_NSRC] = { 0, 0, 0, 0, 0, 0, 0, 0 }, ores = 0;
1248 	int iidx[MVXORE_NSRC] = { 0, 0, 0, 0, 0, 0, 0, 0 }, oidx = 0;
1249 
1250 #ifdef DIAGNOSTIC
1251 	uint32_t xexact;
1252 
1253 	xexact =
1254 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan));
1255 	if ((xexact & MVXORE_XEXACTR_XESTATUS_MASK) ==
1256 	    MVXORE_XEXACTR_XESTATUS_ACT)
1257 		panic("mvxore_setup: chan%d already active."
1258 		    " mvxore not support hot insertion", chan);
1259 #endif
1260 
1261 	xexc =
1262 	    (MVXORE_XEXCR_REGACCPROTECT	|
1263 	     MVXORE_XEXCR_DBL_128B	|
1264 	     MVXORE_XEXCR_SBL_128B);
1265 	cmd = lastcmd = 0;
1266 	if (ninputs > 1) {
1267 		xexc |= MVXORE_XEXCR_OM_XOR;
1268 		lastcmd = cmd = (1 << ninputs) - 1;
1269 	} else if (ninputs == 1) {
1270 		if ((*dmamap_out)->dm_nsegs == 0) {
1271 			xexc |= MVXORE_XEXCR_OM_CRC32;
1272 			lastcmd = MVXORE_DESC_CMD_CRCLAST;
1273 		} else
1274 			xexc |= MVXORE_XEXCR_OM_DMA;
1275 	} else if (ninputs == 0) {
1276 		if ((*dmamap_out)->dm_nsegs != 1) {
1277 			aprint_error_dev(sc->sc_dev,
1278 			    "XORE not supports %d DMA segments\n",
1279 			    (*dmamap_out)->dm_nsegs);
1280 			return EINVAL;
1281 		}
1282 
1283 		if ((*dmamap_in)->dm_mapsize == 0) {
1284 			xexc |= MVXORE_XEXCR_OM_ECC;
1285 
1286 			/* XXXXX: Maybe need to set Timer Mode registers? */
1287 
1288 #if 0
1289 		} else if ((*dmamap_in)->dm_mapsize == 8 ||
1290 		    (*dmamap_in)->dm_mapsize == 16) { /* in case dmover */
1291 			uint64_t pattern;
1292 
1293 			/* XXXX: Get pattern data */
1294 
1295 			KASSERT((*dmamap_in)->dm_mapsize == 8 ||
1296 			    (void *)((uint32_t)(*dmamap_in)->_dm_origbuf &
1297 						~PAGE_MASK) == sc->sc_pbuf);
1298 			pattern = *(uint64_t *)(*dmamap_in)->_dm_origbuf;
1299 
1300 			/* XXXXX: XORE has a IVR.  We should get this first. */
1301 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEIVRL,
1302 			    pattern);
1303 			bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEIVRH,
1304 			    pattern >> 32);
1305 
1306 			xexc |= MVXORE_XEXCR_OM_MEMINIT;
1307 #endif
1308 		} else {
1309 			aprint_error_dev(sc->sc_dev,
1310 			    "XORE not supports DMA mapsize %zd\n",
1311 			    (*dmamap_in)->dm_mapsize);
1312 			return EINVAL;
1313 		}
1314 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1315 		    MVXORE_XEXDPR(sc, chan), (*dmamap_out)->dm_segs[0].ds_addr);
1316 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1317 		    MVXORE_XEXBSR(sc, chan), (*dmamap_out)->dm_mapsize);
1318 
1319 		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1320 		    MVXORE_XEXCR(sc, chan), xexc);
1321 		sc->sc_cdesc_xore[chan].chan_totalcnt += size;
1322 
1323 		return 0;
1324 	}
1325 
1326 	/* Make descriptor for DMA/CRC32/XOR */
1327 
1328 	fstdd = SLIST_FIRST(&sc->sc_dlist_xore);
1329 	if (fstdd == NULL) {
1330 		aprint_error_dev(sc->sc_dev, "no xore descriptor\n");
1331 		return ENOMEM;
1332 	}
1333 	SLIST_REMOVE_HEAD(&sc->sc_dlist_xore, dd_next);
1334 	sc->sc_cdesc_xore[chan].chan_ddidx =
1335 	    fstdd->dd_index + GTIDMAC_NDESC * sc->sc_gtidmac_nchan;
1336 
1337 	dd = fstdd;
1338 	while (1 /*CONSTCOND*/) {
1339 		desc = dd->dd_xore_vaddr;
1340 		desc->stat = MVXORE_DESC_STAT_OWN;
1341 		desc->cmd = cmd;
1342 		if ((*dmamap_out)->dm_nsegs != 0) {
1343 			desc->dstaddr =
1344 			    (*dmamap_out)->dm_segs[oidx].ds_addr + ores;
1345 			bcnt = (*dmamap_out)->dm_segs[oidx].ds_len - ores;
1346 		} else {
1347 			desc->dstaddr = 0;
1348 			bcnt = MVXORE_MAXXFER;	/* XXXXX */
1349 		}
1350 		for (i = 0; i < ninputs; i++) {
1351 			desc->srcaddr[i] =
1352 			    (*dmamap_in[i]).dm_segs[iidx[i]].ds_addr + ires[i];
1353 			bcnt = min(bcnt,
1354 			    (*dmamap_in[i]).dm_segs[iidx[i]].ds_len - ires[i]);
1355 		}
1356 		desc->bcnt = bcnt;
1357 
1358 		n += bcnt;
1359 		if (n >= size)
1360 			break;
1361 		ores += bcnt;
1362 		if ((*dmamap_out)->dm_nsegs != 0 &&
1363 		    ores >= (*dmamap_out)->dm_segs[oidx].ds_len) {
1364 			ores = 0;
1365 			oidx++;
1366 			KASSERT(oidx < (*dmamap_out)->dm_nsegs);
1367 		}
1368 		for (i = 0; i < ninputs; i++) {
1369 			ires[i] += bcnt;
1370 			if (ires[i] >=
1371 			    (*dmamap_in[i]).dm_segs[iidx[i]].ds_len) {
1372 				ires[i] = 0;
1373 				iidx[i]++;
1374 				KASSERT(iidx[i] < (*dmamap_in[i]).dm_nsegs);
1375 			}
1376 		}
1377 
1378 		nxtdd = SLIST_FIRST(&sc->sc_dlist_xore);
1379 		if (nxtdd == NULL) {
1380 			aprint_error_dev(sc->sc_dev, "no xore descriptor\n");
1381 			return ENOMEM;
1382 		}
1383 		SLIST_REMOVE_HEAD(&sc->sc_dlist_xore, dd_next);
1384 
1385 		desc->nextda = (uint32_t)nxtdd->dd_paddr;
1386 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
1387 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
1388 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1389 
1390 		SLIST_INSERT_AFTER(dd, nxtdd, dd_next);
1391 		dd = nxtdd;
1392 	}
1393 	desc->cmd = lastcmd;
1394 	desc->nextda = (uint32_t)NULL;
1395 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
1396 	    dd->dd_index * sizeof(*desc), sizeof(*desc),
1397 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1398 
1399 	/* Set paddr of descriptor to Channel Next Descriptor Pointer */
1400 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXNDPR(sc, chan),
1401 	    fstdd->dd_paddr);
1402 
1403 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(sc, chan), xexc);
1404 
1405 #ifdef GTIDMAC_DEBUG
1406 	gtidmac_dump_xoredesc(sc, fstdd, xexc, 0/*pre*/);
1407 #endif
1408 
1409 	sc->sc_cdesc_xore[chan].chan_totalcnt += size;
1410 
1411 	return 0;
1412 }
1413 
1414 void
1415 mvxore_start(void *tag, int chan,
1416 	     void (*dma_done_cb)(void *, int, bus_dmamap_t *, bus_dmamap_t *,
1417 				 int))
1418 {
1419 	struct gtidmac_softc *sc = tag;
1420 	uint32_t xexact;
1421 
1422 	DPRINTF(("%s:%d: xore starting\n", device_xname(sc->sc_dev), chan));
1423 
1424 #ifdef GTIDMAC_DEBUG
1425 	gtidmac_dump_xorereg(sc, chan);
1426 #endif
1427 
1428 	sc->sc_cdesc_xore[chan].chan_dma_done = dma_done_cb;
1429 
1430 	xexact =
1431 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan));
1432 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan),
1433 	    xexact | MVXORE_XEXACTR_XESTART);
1434 }
1435 
1436 static uint32_t
1437 mvxore_finish(void *tag, int chan, int error)
1438 {
1439 	struct gtidmac_softc *sc = tag;
1440 	struct gtidmac_dma_desc *dd, *fstdd, *nxtdd;
1441 	struct mvxore_desc *desc;
1442 	uint32_t xexc;
1443 
1444 #ifdef GTIDMAC_DEBUG
1445 	if (error || gtidmac_debug > 1)
1446 		gtidmac_dump_xorereg(sc, chan);
1447 #endif
1448 
1449 	xexc = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(sc, chan));
1450 	if ((xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_ECC ||
1451 	    (xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_MEMINIT)
1452 		return 0;
1453 
1454 	fstdd = &sc->sc_dd_buffer[sc->sc_cdesc_xore[chan].chan_ddidx];
1455 
1456 #ifdef GTIDMAC_DEBUG
1457 	if (error || gtidmac_debug > 1)
1458 		gtidmac_dump_xoredesc(sc, fstdd, xexc, 1/*post*/);
1459 #endif
1460 
1461 	dd = fstdd;
1462 	do {
1463 		desc = dd->dd_xore_vaddr;
1464 
1465 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
1466 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
1467 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1468 
1469 		nxtdd = SLIST_NEXT(dd, dd_next);
1470 		SLIST_INSERT_HEAD(&sc->sc_dlist_xore, dd, dd_next);
1471 		dd = nxtdd;
1472 	} while (desc->nextda);
1473 
1474 	if ((xexc & MVXORE_XEXCR_OM_MASK) == MVXORE_XEXCR_OM_CRC32)
1475 		return desc->result;
1476 	return 0;
1477 }
1478 
1479 static void
1480 gtidmac_wininit(struct gtidmac_softc *sc)
1481 {
1482 	device_t pdev = device_parent(sc->sc_dev);
1483 	uint64_t base;
1484 	uint32_t size, cxap, en;
1485 	int window, target, attr, rv, i;
1486 	struct {
1487 		int tag;
1488 		int winacc;
1489 	} targets[] = {
1490 		{ MARVELL_TAG_SDRAM_CS0,	GTIDMAC_CXAPR_WINACC_FA },
1491 		{ MARVELL_TAG_SDRAM_CS1,	GTIDMAC_CXAPR_WINACC_FA },
1492 		{ MARVELL_TAG_SDRAM_CS2,	GTIDMAC_CXAPR_WINACC_FA },
1493 		{ MARVELL_TAG_SDRAM_CS3,	GTIDMAC_CXAPR_WINACC_FA },
1494 
1495 		/* Also can set following targets. */
1496 		/*   Devices       = 0x1(ORION_TARGETID_DEVICE_*) */
1497 		/*   PCI           = 0x3(ORION_TARGETID_PCI0_*) */
1498 		/*   PCI Express   = 0x4(ORION_TARGETID_PEX?_*) */
1499 		/*   Tunit SRAM(?) = 0x5(???) */
1500 
1501 		{ MARVELL_TAG_UNDEFINED,	GTIDMAC_CXAPR_WINACC_NOAA }
1502 	};
1503 
1504 	en = 0xff;
1505 	cxap = 0;
1506 	for (window = 0, i = 0;
1507 	    targets[i].tag != MARVELL_TAG_UNDEFINED && window < GTIDMAC_NWINDOW;
1508 	    i++) {
1509 		rv = marvell_winparams_by_tag(pdev, targets[i].tag,
1510 		    &target, &attr, &base, &size);
1511 		if (rv != 0 || size == 0)
1512 			continue;
1513 
1514 		if (base > 0xffffffffULL) {
1515 			if (window >= GTIDMAC_NREMAP) {
1516 				aprint_error_dev(sc->sc_dev,
1517 				    "can't remap window %d\n", window);
1518 				continue;
1519 			}
1520 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1521 			    GTIDMAC_HARXR(window), (base >> 32) & 0xffffffff);
1522 		}
1523 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_BARX(window),
1524 		    GTIDMAC_BARX_TARGET(target)	|
1525 		    GTIDMAC_BARX_ATTR(attr)	|
1526 		    GTIDMAC_BARX_BASE(base));
1527 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_SRX(window),
1528 		    GTIDMAC_SRX_SIZE(size));
1529 		en &= ~GTIDMAC_BAER_EN(window);
1530 		cxap |= GTIDMAC_CXAPR_WINACC(window, targets[i].winacc);
1531 		window++;
1532 	}
1533 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_BAER, en);
1534 
1535 	for (i = 0; i < GTIDMAC_NACCPROT; i++)
1536 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CXAPR(i),
1537 		    cxap);
1538 }
1539 
1540 static void
1541 mvxore_wininit(struct gtidmac_softc *sc)
1542 {
1543 	device_t pdev = device_parent(sc->sc_dev);
1544 	uint64_t base;
1545 	uint32_t target, attr, size, xexwc;
1546 	int window, rv, i, p;
1547 	struct {
1548 		int tag;
1549 		int winacc;
1550 	} targets[] = {
1551 		{ MARVELL_TAG_SDRAM_CS0,	MVXORE_XEXWCR_WINACC_FA },
1552 		{ MARVELL_TAG_SDRAM_CS1,	MVXORE_XEXWCR_WINACC_FA },
1553 		{ MARVELL_TAG_SDRAM_CS2,	MVXORE_XEXWCR_WINACC_FA },
1554 		{ MARVELL_TAG_SDRAM_CS3,	MVXORE_XEXWCR_WINACC_FA },
1555 
1556 		{ MARVELL_TAG_UNDEFINED,	MVXORE_XEXWCR_WINACC_NOAA }
1557 	};
1558 
1559 	xexwc = 0;
1560 	for (window = 0, i = 0;
1561 	    targets[i].tag != MARVELL_TAG_UNDEFINED && window < MVXORE_NWINDOW;
1562 	    i++) {
1563 		rv = marvell_winparams_by_tag(pdev, targets[i].tag,
1564 		    &target, &attr, &base, &size);
1565 		if (rv != 0 || size == 0)
1566 			continue;
1567 
1568 		if (base > 0xffffffffULL) {
1569 			if (window >= MVXORE_NREMAP) {
1570 				aprint_error_dev(sc->sc_dev,
1571 				    "can't remap window %d\n", window);
1572 				continue;
1573 			}
1574 			for (p = 0; p < sc->sc_mvxore_nchan >> 1; p++)
1575 				bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1576 				    MVXORE_XEHARRX(sc, p, window),
1577 				    (base >> 32) & 0xffffffff);
1578 		}
1579 
1580 		for (p = 0; p < sc->sc_mvxore_nchan >> 1; p++) {
1581 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1582 			    MVXORE_XEBARX(sc, p, window),
1583 			    MVXORE_XEBARX_TARGET(target) |
1584 			    MVXORE_XEBARX_ATTR(attr) |
1585 			    MVXORE_XEBARX_BASE(base));
1586 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1587 			    MVXORE_XESMRX(sc, p, window),
1588 			    MVXORE_XESMRX_SIZE(size));
1589 		}
1590 		xexwc |= (MVXORE_XEXWCR_WINEN(window) |
1591 		    MVXORE_XEXWCR_WINACC(window, targets[i].winacc));
1592 		window++;
1593 	}
1594 
1595 	for (i = 0; i < sc->sc_mvxore_nchan; i++) {
1596 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXWCR(sc, i),
1597 		    xexwc);
1598 
1599 		/* XXXXX: reset... */
1600 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXAOCR(sc, 0),
1601 		    0);
1602 	}
1603 }
1604 
1605 static int
1606 gtidmac_buffer_setup(struct gtidmac_softc *sc)
1607 {
1608 	bus_dma_segment_t segs;
1609 	struct gtidmac_dma_desc *dd;
1610 	uint32_t mask;
1611 	int nchan, nsegs, i;
1612 
1613 	nchan = sc->sc_gtidmac_nchan;
1614 
1615 	if (bus_dmamem_alloc(sc->sc_dmat,
1616 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan,
1617 	    PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
1618 		aprint_error_dev(sc->sc_dev,
1619 		    "bus_dmamem_alloc failed: descriptor buffer\n");
1620 		goto fail0;
1621 	}
1622 	if (bus_dmamem_map(sc->sc_dmat, &segs, 1,
1623 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan,
1624 	    (void **)&sc->sc_dbuf, BUS_DMA_NOWAIT)) {
1625 		aprint_error_dev(sc->sc_dev,
1626 		    "bus_dmamem_map failed: descriptor buffer\n");
1627 		goto fail1;
1628 	}
1629 	if (bus_dmamap_create(sc->sc_dmat,
1630 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan, 1,
1631 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan, 0,
1632 	    BUS_DMA_NOWAIT, &sc->sc_dmap)) {
1633 		aprint_error_dev(sc->sc_dev,
1634 		    "bus_dmamap_create failed: descriptor buffer\n");
1635 		goto fail2;
1636 	}
1637 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmap, sc->sc_dbuf,
1638 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC * nchan,
1639 	    NULL, BUS_DMA_NOWAIT)) {
1640 		aprint_error_dev(sc->sc_dev,
1641 		    "bus_dmamap_load failed: descriptor buffer\n");
1642 		goto fail3;
1643 	}
1644 	SLIST_INIT(&sc->sc_dlist);
1645 	for (i = 0; i < GTIDMAC_NDESC * nchan; i++) {
1646 		dd = &sc->sc_dd_buffer[i];
1647 		dd->dd_index = i;
1648 		dd->dd_idmac_vaddr = &sc->sc_dbuf[i];
1649 		dd->dd_paddr = sc->sc_dmap->dm_segs[0].ds_addr +
1650 		    (sizeof(struct gtidmac_desc) * i);
1651 			SLIST_INSERT_HEAD(&sc->sc_dlist, dd, dd_next);
1652 	}
1653 
1654 	/* Initialize IDMAC DMA channels */
1655 	mask = 0;
1656 	for (i = 0; i < nchan; i++) {
1657 		if (i > 0 && ((i * GTIDMAC_I_BITS) & 31 /*bit*/) == 0) {
1658 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1659 			    GTIDMAC_IMR(i - 1), mask);
1660 			mask = 0;
1661 		}
1662 
1663 		if (bus_dmamap_create(sc->sc_dmat, GTIDMAC_MAXXFER,
1664 		    GTIDMAC_NSEGS, GTIDMAC_MAXXFER, 0, BUS_DMA_NOWAIT,
1665 		    &sc->sc_cdesc[i].chan_in)) {
1666 			aprint_error_dev(sc->sc_dev,
1667 			    "bus_dmamap_create failed: chan%d in\n", i);
1668 			goto fail4;
1669 		}
1670 		if (bus_dmamap_create(sc->sc_dmat, GTIDMAC_MAXXFER,
1671 		    GTIDMAC_NSEGS, GTIDMAC_MAXXFER, 0, BUS_DMA_NOWAIT,
1672 		    &sc->sc_cdesc[i].chan_out)) {
1673 			aprint_error_dev(sc->sc_dev,
1674 			    "bus_dmamap_create failed: chan%d out\n", i);
1675 			bus_dmamap_destroy(sc->sc_dmat,
1676 			    sc->sc_cdesc[i].chan_in);
1677 			goto fail4;
1678 		}
1679 		sc->sc_cdesc[i].chan_totalcnt = 0;
1680 		sc->sc_cdesc[i].chan_running = NULL;
1681 
1682 		/* Ignore bits overflow.  The mask is 32bit. */
1683 		mask |= GTIDMAC_I(i,
1684 		    GTIDMAC_I_COMP	|
1685 		    GTIDMAC_I_ADDRMISS	|
1686 		    GTIDMAC_I_ACCPROT	|
1687 		    GTIDMAC_I_WRPROT	|
1688 		    GTIDMAC_I_OWN);
1689 
1690 		/* 8bits/channel * 4channels => 32bit */
1691 		if ((i & 0x3) == 0x3) {
1692 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1693 			    GTIDMAC_IMR(i), mask);
1694 			mask = 0;
1695 		}
1696 	}
1697 
1698 	return 0;
1699 
1700 fail4:
1701 	for (; i-- > 0;) {
1702 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_in);
1703 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc[i].chan_out);
1704 	}
1705 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap);
1706 fail3:
1707 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap);
1708 fail2:
1709 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf,
1710 	    sizeof(struct gtidmac_desc) * GTIDMAC_NDESC);
1711 fail1:
1712 	bus_dmamem_free(sc->sc_dmat, &segs, 1);
1713 fail0:
1714 	return -1;
1715 }
1716 
1717 static int
1718 mvxore_buffer_setup(struct gtidmac_softc *sc)
1719 {
1720 	bus_dma_segment_t segs;
1721 	struct gtidmac_dma_desc *dd;
1722 	uint32_t mask;
1723 	int nchan, nsegs, i, j;
1724 
1725 	nchan = sc->sc_mvxore_nchan;
1726 
1727 	if (bus_dmamem_alloc(sc->sc_dmat,
1728 	    sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan,
1729 	    PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
1730 		aprint_error_dev(sc->sc_dev,
1731 		    "bus_dmamem_alloc failed: xore descriptor buffer\n");
1732 		goto fail0;
1733 	}
1734 	if (bus_dmamem_map(sc->sc_dmat, &segs, 1,
1735 	    sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan,
1736 	    (void **)&sc->sc_dbuf_xore, BUS_DMA_NOWAIT)) {
1737 		aprint_error_dev(sc->sc_dev,
1738 		    "bus_dmamem_map failed: xore descriptor buffer\n");
1739 		goto fail1;
1740 	}
1741 	if (bus_dmamap_create(sc->sc_dmat,
1742 	    sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan, 1,
1743 	    sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan, 0,
1744 	    BUS_DMA_NOWAIT, &sc->sc_dmap_xore)) {
1745 		aprint_error_dev(sc->sc_dev,
1746 		    "bus_dmamap_create failed: xore descriptor buffer\n");
1747 		goto fail2;
1748 	}
1749 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmap_xore, sc->sc_dbuf_xore,
1750 	    sizeof(struct mvxore_desc) * MVXORE_NDESC * nchan,
1751 	    NULL, BUS_DMA_NOWAIT)) {
1752 		aprint_error_dev(sc->sc_dev,
1753 		    "bus_dmamap_load failed: xore descriptor buffer\n");
1754 		goto fail3;
1755 	}
1756 	SLIST_INIT(&sc->sc_dlist_xore);
1757 	for (i = 0; i < MVXORE_NDESC * nchan; i++) {
1758 		dd =
1759 		    &sc->sc_dd_buffer[i + GTIDMAC_NDESC * sc->sc_gtidmac_nchan];
1760 		dd->dd_index = i;
1761 		dd->dd_xore_vaddr = &sc->sc_dbuf_xore[i];
1762 		dd->dd_paddr = sc->sc_dmap_xore->dm_segs[0].ds_addr +
1763 		    (sizeof(struct mvxore_desc) * i);
1764 		SLIST_INSERT_HEAD(&sc->sc_dlist_xore, dd, dd_next);
1765 	}
1766 
1767 	/* Initialize XORE DMA channels */
1768 	mask = 0;
1769 	for (i = 0; i < nchan; i++) {
1770 		for (j = 0; j < MVXORE_NSRC; j++) {
1771 			if (bus_dmamap_create(sc->sc_dmat,
1772 			    MVXORE_MAXXFER, MVXORE_NSEGS,
1773 			    MVXORE_MAXXFER, 0, BUS_DMA_NOWAIT,
1774 			    &sc->sc_cdesc_xore[i].chan_in[j])) {
1775 				aprint_error_dev(sc->sc_dev,
1776 				    "bus_dmamap_create failed:"
1777 				    " xore chan%d in[%d]\n", i, j);
1778 				goto fail4;
1779 			}
1780 		}
1781 		if (bus_dmamap_create(sc->sc_dmat, MVXORE_MAXXFER,
1782 		    MVXORE_NSEGS, MVXORE_MAXXFER, 0,
1783 		    BUS_DMA_NOWAIT, &sc->sc_cdesc_xore[i].chan_out)) {
1784 			aprint_error_dev(sc->sc_dev,
1785 			    "bus_dmamap_create failed: chan%d out\n", i);
1786 			goto fail5;
1787 		}
1788 		sc->sc_cdesc_xore[i].chan_totalcnt = 0;
1789 		sc->sc_cdesc_xore[i].chan_running = NULL;
1790 
1791 		mask |= MVXORE_I(i,
1792 		    MVXORE_I_EOC	|
1793 		    MVXORE_I_ADDRDECODE	|
1794 		    MVXORE_I_ACCPROT	|
1795 		    MVXORE_I_WRPROT	|
1796 		    MVXORE_I_OWN	|
1797 		    MVXORE_I_INTPARITY	|
1798 		    MVXORE_I_XBAR);
1799 
1800 		/* 16bits/channel * 2channels => 32bit */
1801 		if (i & 0x1) {
1802 			bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1803 			    MVXORE_XEIMR(sc, i >> 1), mask);
1804 			mask = 0;
1805 		}
1806 	}
1807 
1808 	return 0;
1809 
1810 	for (; i-- > 0;) {
1811 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdesc_xore[i].chan_out);
1812 
1813 fail5:
1814 		j = MVXORE_NSRC;
1815 fail4:
1816 		for (; j-- > 0;)
1817 			bus_dmamap_destroy(sc->sc_dmat,
1818 			    sc->sc_cdesc_xore[i].chan_in[j]);
1819 	}
1820 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmap_xore);
1821 fail3:
1822 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmap_xore);
1823 fail2:
1824 	bus_dmamem_unmap(sc->sc_dmat, sc->sc_dbuf_xore,
1825 	    sizeof(struct mvxore_desc) * MVXORE_NDESC);
1826 fail1:
1827 	bus_dmamem_free(sc->sc_dmat, &segs, 1);
1828 fail0:
1829 	return -1;
1830 }
1831 
1832 #ifdef GTIDMAC_DEBUG
1833 static void
1834 gtidmac_dump_idmacreg(struct gtidmac_softc *sc, int chan)
1835 {
1836 	uint32_t val;
1837 	char buf[256];
1838 
1839 	printf("IDMAC Registers\n");
1840 
1841 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMABCR(chan));
1842 	snprintb(buf, sizeof(buf), "\177\020b\037Own\0b\036BCLeft\0", val);
1843 	printf("  Byte Count                 : %s\n", buf);
1844 	printf("    ByteCnt                  :   0x%06x\n",
1845 	    val & GTIDMAC_CIDMABCR_BYTECNT_MASK);
1846 	printf("  Source Address             : 0x%08x\n",
1847 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMASAR(chan)));
1848 	printf("  Destination Address        : 0x%08x\n",
1849 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CIDMADAR(chan)));
1850 	printf("  Next Descriptor Pointer    : 0x%08x\n",
1851 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CNDPR(chan)));
1852 	printf("  Current Descriptor Pointer : 0x%08x\n",
1853 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCDPR(chan)));
1854 
1855 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCLR(chan));
1856 	snprintb(buf, sizeof(buf),
1857 	    "\177\020b\024Abr\0b\021CDEn\0b\016ChanAct\0b\015FetchND\0"
1858 	    "b\014ChanEn\0b\012IntMode\0b\005DestHold\0b\003SrcHold\0",
1859 	    val);
1860 	printf("  Channel Control (Low)      : %s\n", buf);
1861 	printf("    SrcBurstLimit            : %s Bytes\n",
1862 	  (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_128B ? "128" :
1863 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_64B ? "64" :
1864 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_32B ? "32" :
1865 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_16B ? "16" :
1866 	    (val & GTIDMAC_CCLR_SBL_MASK) == GTIDMAC_CCLR_SBL_8B ? "8" :
1867 	    "unknwon");
1868 	printf("    DstBurstLimit            : %s Bytes\n",
1869 	  (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_128B ? "128" :
1870 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_64B ? "64" :
1871 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_32B ? "32" :
1872 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_16B ? "16" :
1873 	    (val & GTIDMAC_CCLR_DBL_MASK) == GTIDMAC_CCLR_DBL_8B ? "8" :
1874 	    "unknwon");
1875 	printf("    ChainMode                : %sChained\n",
1876 	    val & GTIDMAC_CCLR_CHAINMODE_NC ? "Non-" : "");
1877 	printf("    TransferMode             : %s\n",
1878 	    val & GTIDMAC_CCLR_TRANSFERMODE_B ? "Block" : "Demand");
1879 	printf("    DescMode                 : %s\n",
1880 	    val & GTIDMAC_CCLR_DESCMODE_16M ? "16M" : "64k");
1881 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, GTIDMAC_CCHR(chan));
1882 	snprintb(buf, sizeof(buf),
1883 	    "\177\020b\001DescByteSwap\0b\000Endianness\0", val);
1884 	printf("  Channel Control (High)     : %s\n", buf);
1885 }
1886 
1887 static void
1888 gtidmac_dump_idmacdesc(struct gtidmac_softc *sc, struct gtidmac_dma_desc *dd,
1889 		       uint32_t mode, int post)
1890 {
1891 	struct gtidmac_desc *desc;
1892 	int i;
1893 	char buf[256];
1894 
1895 	printf("IDMAC Descriptor\n");
1896 
1897 	i = 0;
1898 	while (1 /*CONSTCOND*/) {
1899 		if (post)
1900 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1901 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
1902 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1903 
1904 		desc = dd->dd_idmac_vaddr;
1905 
1906 		printf("%d (0x%lx)\n", i, dd->dd_paddr);
1907 		if (mode & GTIDMAC_CCLR_DESCMODE_16M) {
1908 			snprintb(buf, sizeof(buf),
1909 			    "\177\020b\037Own\0b\036BCLeft\0",
1910 			    desc->bc.mode16m.bcnt);
1911 			printf("  Byte Count              : %s\n", buf);
1912 			printf("    ByteCount             :   0x%06x\n",
1913 			    desc->bc.mode16m.bcnt &
1914 			    GTIDMAC_CIDMABCR_BYTECNT_MASK);
1915 		} else {
1916 			printf("  Byte Count              :     0x%04x\n",
1917 			    desc->bc.mode64k.bcnt);
1918 			printf("  Remind Byte Count       :     0x%04x\n",
1919 			    desc->bc.mode64k.rbc);
1920 		}
1921 		printf("  Source Address          : 0x%08x\n", desc->srcaddr);
1922 		printf("  Destination Address     : 0x%08x\n", desc->dstaddr);
1923 		printf("  Next Descriptor Pointer : 0x%08x\n", desc->nextdp);
1924 
1925 		if (desc->nextdp == (uint32_t)NULL)
1926 			break;
1927 
1928 		if (!post)
1929 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1930 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
1931 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1932 
1933 		i++;
1934 		dd = SLIST_NEXT(dd, dd_next);
1935 	}
1936 	if (!post)
1937 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap,
1938 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
1939 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1940 }
1941 
1942 static void
1943 gtidmac_dump_xorereg(struct gtidmac_softc *sc, int chan)
1944 {
1945 	uint32_t val, opmode;
1946 	char buf[64];
1947 
1948 	printf("XORE Registers\n");
1949 
1950 	val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXCR(sc, chan));
1951 	snprintb(buf, sizeof(buf),
1952 	    "\177\020"
1953 	    "b\017RegAccProtect\0b\016DesSwp\0b\015DwrReqSwp\0b\014DrdResSwp\0",
1954 	    val);
1955 	printf(" Configuration    : 0x%s\n", buf);
1956 	opmode = val & MVXORE_XEXCR_OM_MASK;
1957 	printf("    OperationMode : %s operation\n",
1958 	  opmode == MVXORE_XEXCR_OM_XOR ? "XOR calculate" :
1959 	  opmode == MVXORE_XEXCR_OM_CRC32 ? "CRC-32 calculate" :
1960 	  opmode == MVXORE_XEXCR_OM_DMA ? "DMA" :
1961 	  opmode == MVXORE_XEXCR_OM_ECC ? "ECC cleanup" :
1962 	  opmode == MVXORE_XEXCR_OM_MEMINIT ? "Memory Initialization" :
1963 	  "unknown");
1964 	printf("    SrcBurstLimit : %s Bytes\n",
1965 	  (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_128B ? "128" :
1966 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_64B ? "64" :
1967 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_32B ? "32" :
1968 	    "unknwon");
1969 	printf("    DstBurstLimit : %s Bytes\n",
1970 	  (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_128B ? "128" :
1971 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_64B ? "64" :
1972 	    (val & MVXORE_XEXCR_SBL_MASK) == MVXORE_XEXCR_SBL_32B ? "32" :
1973 	    "unknwon");
1974 	val =
1975 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXACTR(sc, chan));
1976 	printf("  Activation      : 0x%08x\n", val);
1977 	val &= MVXORE_XEXACTR_XESTATUS_MASK;
1978 	printf("    XEstatus      : %s\n",
1979 	    val == MVXORE_XEXACTR_XESTATUS_NA ? "Channel not active" :
1980 	    val == MVXORE_XEXACTR_XESTATUS_ACT ? "Channel active" :
1981 	    val == MVXORE_XEXACTR_XESTATUS_P ? "Channel paused" : "???");
1982 
1983 	if (opmode == MVXORE_XEXCR_OM_XOR ||
1984 	    opmode == MVXORE_XEXCR_OM_CRC32 ||
1985 	    opmode == MVXORE_XEXCR_OM_DMA) {
1986 		printf("  NextDescPtr     : 0x%08x\n",
1987 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1988 		    MVXORE_XEXNDPR(sc, chan)));
1989 		printf("  CurrentDescPtr  : 0x%08x\n",
1990 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
1991 		    MVXORE_XEXCDPR(chan)));
1992 	}
1993 	printf("  ByteCnt         : 0x%08x\n",
1994 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVXORE_XEXBCR(chan)));
1995 
1996 	if (opmode == MVXORE_XEXCR_OM_ECC ||
1997 	    opmode == MVXORE_XEXCR_OM_MEMINIT) {
1998 		printf("  DstPtr          : 0x%08x\n",
1999 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2000 		    MVXORE_XEXDPR(sc, chan)));
2001 		printf("  BlockSize       : 0x%08x\n",
2002 		    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2003 		    MVXORE_XEXBSR(sc, chan)));
2004 
2005 		if (opmode == MVXORE_XEXCR_OM_ECC) {
2006 			val = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2007 			    MVXORE_XETMCR);
2008 			if (val & MVXORE_XETMCR_TIMEREN) {
2009 				val >>= MVXORE_XETMCR_SECTIONSIZECTRL_SHIFT;
2010 				val &= MVXORE_XETMCR_SECTIONSIZECTRL_MASK;
2011 				printf("  SectionSizeCtrl : 0x%08x\n", 2 ^ val);
2012 				printf("  TimerInitVal    : 0x%08x\n",
2013 				    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2014 				    MVXORE_XETMIVR));
2015 				printf("  TimerCrntVal    : 0x%08x\n",
2016 				    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2017 				    MVXORE_XETMCVR));
2018 			}
2019 		} else	/* MVXORE_XEXCR_OM_MEMINIT */
2020 			printf("  InitVal         : 0x%08x%08x\n",
2021 			    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2022 			    MVXORE_XEIVRH),
2023 			    bus_space_read_4(sc->sc_iot, sc->sc_ioh,
2024 			    MVXORE_XEIVRL));
2025 	}
2026 }
2027 
2028 static void
2029 gtidmac_dump_xoredesc(struct gtidmac_softc *sc, struct gtidmac_dma_desc *dd,
2030 		      uint32_t mode, int post)
2031 {
2032 	struct mvxore_desc *desc;
2033 	int i, j;
2034 	char buf[256];
2035 
2036 	printf("XORE Descriptor\n");
2037 
2038 	mode &= MVXORE_XEXCR_OM_MASK;
2039 
2040 	i = 0;
2041 	while (1 /*CONSTCOND*/) {
2042 		if (post)
2043 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
2044 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
2045 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2046 
2047 		desc = dd->dd_xore_vaddr;
2048 
2049 		printf("%d (0x%lx)\n", i, dd->dd_paddr);
2050 
2051 		snprintb(buf, sizeof(buf), "\177\020b\037Own\0b\036Success\0",
2052 		    desc->stat);
2053 		printf("  Status                  : 0x%s\n", buf);
2054 		if (desc->cmd & MVXORE_DESC_CMD_CRCLAST && post)
2055 			printf("  CRC-32 Result           : 0x%08x\n",
2056 			    desc->result);
2057 		snprintb(buf, sizeof(buf),
2058 		    "\177\020b\037EODIntEn\0b\036CRCLast\0"
2059 		    "b\007Src7Cmd\0b\006Src6Cmd\0b\005Src5Cmd\0b\004Src4Cmd\0"
2060 		    "b\003Src3Cmd\0b\002Src2Cmd\0b\001Src1Cmd\0b\000Src0Cmd\0",
2061 		    desc->cmd);
2062 		printf("  Command                 : 0x%s\n", buf);
2063 		printf("  Next Descriptor Address : 0x%08x\n", desc->nextda);
2064 		printf("  Byte Count              :   0x%06x\n", desc->bcnt);
2065 		printf("  Destination Address     : 0x%08x\n", desc->dstaddr);
2066 		if (mode == MVXORE_XEXCR_OM_XOR) {
2067 			for (j = 0; j < MVXORE_NSRC; j++)
2068 				if (desc->cmd & MVXORE_DESC_CMD_SRCCMD(j))
2069 					printf("  Source Address#%d        :"
2070 					    " 0x%08x\n", j, desc->srcaddr[j]);
2071 		} else
2072 			printf("  Source Address          : 0x%08x\n",
2073 			    desc->srcaddr[0]);
2074 
2075 		if (desc->nextda == (uint32_t)NULL)
2076 			break;
2077 
2078 		if (!post)
2079 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
2080 			    dd->dd_index * sizeof(*desc), sizeof(*desc),
2081 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2082 
2083 		i++;
2084 		dd = SLIST_NEXT(dd, dd_next);
2085 	}
2086 	if (!post)
2087 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap_xore,
2088 		    dd->dd_index * sizeof(*desc), sizeof(*desc),
2089 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2090 }
2091 #endif
2092