xref: /netbsd-src/sys/arch/next68k/dev/nextdma.c (revision 1ca06f9c9235889e2ff6dc77279d01d151d70a9a)
1 /*	$NetBSD: nextdma.c,v 1.45 2009/11/05 18:11:09 dyoung Exp $	*/
2 /*
3  * Copyright (c) 1998 Darrin B. Jewell
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Darrin B. Jewell
17  * 4. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: nextdma.c,v 1.45 2009/11/05 18:11:09 dyoung Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/mbuf.h>
38 #include <sys/syslog.h>
39 #include <sys/socket.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/ioctl.h>
43 #include <sys/errno.h>
44 
45 #define _M68K_BUS_DMA_PRIVATE
46 #include <machine/autoconf.h>
47 #include <machine/cpu.h>
48 #include <machine/intr.h>
49 
50 #include <m68k/cacheops.h>
51 
52 #include <next68k/next68k/isr.h>
53 #include <next68k/next68k/nextrom.h>
54 
55 #include <next68k/dev/intiovar.h>
56 
57 #include "nextdmareg.h"
58 #include "nextdmavar.h"
59 
60 #include "esp.h"
61 #include "xe.h"
62 
63 #if DEBUG
64 #define ND_DEBUG
65 #endif
66 
67 extern int turbo;
68 
69 #define panic		__asm volatile("trap  #15"); printf
70 
71 #define NEXTDMA_DEBUG nextdma_debug
72 /* (nsc->sc_chan->nd_intr == NEXT_I_SCSI_DMA) && nextdma_debug */
73 #if defined(ND_DEBUG)
74 int nextdma_debug = 0;
75 #define DPRINTF(x) if (NEXTDMA_DEBUG) printf x;
76 int ndtraceshow = 0;
77 char ndtrace[8192+100];
78 char *ndtracep = ndtrace;
79 #define NDTRACEIF(x) if (10 && /* (nsc->sc_chan->nd_intr == NEXT_I_SCSI_DMA) && */ ndtracep < (ndtrace + 8192)) do {x;} while (0)
80 #else
81 #define DPRINTF(x)
82 #define NDTRACEIF(x)
83 #endif
84 #define PRINTF(x) printf x
85 
86 #if defined(ND_DEBUG)
87 int nextdma_debug_enetr_idx = 0;
88 unsigned int nextdma_debug_enetr_state[100] = { 0 };
89 int nextdma_debug_scsi_idx = 0;
90 unsigned int nextdma_debug_scsi_state[100] = { 0 };
91 
92 void nextdma_debug_initstate(struct nextdma_softc *);
93 void nextdma_debug_savestate(struct nextdma_softc *, unsigned int);
94 void nextdma_debug_scsi_dumpstate(void);
95 void nextdma_debug_enetr_dumpstate(void);
96 #endif
97 
98 
99 int	nextdma_match(struct device *, struct cfdata *, void *);
100 void	nextdma_attach(struct device *, struct device *, void *);
101 
102 void nextdmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t, bus_size_t, int);
103 int nextdma_continue(struct nextdma_softc *);
104 void nextdma_rotate(struct nextdma_softc *);
105 
106 void nextdma_setup_cont_regs(struct nextdma_softc *);
107 void nextdma_setup_curr_regs(struct nextdma_softc *);
108 
109 #if NESP > 0
110 static int nextdma_esp_intr(void *);
111 #endif
112 #if NXE > 0
113 static int nextdma_enet_intr(void *);
114 #endif
115 
116 #define nd_bsr4(reg) \
117 	bus_space_read_4(nsc->sc_bst, nsc->sc_bsh, (reg))
118 #define nd_bsw4(reg,val) \
119 	bus_space_write_4(nsc->sc_bst, nsc->sc_bsh, (reg), (val))
120 
121 CFATTACH_DECL(nextdma, sizeof(struct nextdma_softc),
122     nextdma_match, nextdma_attach, NULL, NULL);
123 
124 static struct nextdma_channel nextdma_channel[] = {
125 #if NESP > 0
126 	{ "scsi", NEXT_P_SCSI_CSR, DD_SIZE, NEXT_I_SCSI_DMA, &nextdma_esp_intr },
127 #endif
128 #if NXE > 0
129 	{ "enetx", NEXT_P_ENETX_CSR, DD_SIZE, NEXT_I_ENETX_DMA, &nextdma_enet_intr },
130 	{ "enetr", NEXT_P_ENETR_CSR, DD_SIZE, NEXT_I_ENETR_DMA, &nextdma_enet_intr },
131 #endif
132 };
133 static int nnextdma_channels = (sizeof(nextdma_channel)/sizeof(nextdma_channel[0]));
134 
135 static int attached = 0;
136 
137 struct nextdma_softc *
138 nextdma_findchannel(const char *name)
139 {
140 	device_t dev;
141 	deviter_t di;
142 
143 	for (dev = deviter_first(&di, DEVITER_F_ROOT_FIRST);
144 	     dev != NULL;
145 	     dev = deviter_next(&di)) {
146 		if (strncmp(dev->dv_xname, "nextdma", 7) == 0) {
147 			struct nextdma_softc *nsc = device_private(dev);
148 			if (strcmp(nsc->sc_chan->nd_name, name) == 0)
149 				break;
150 		}
151 	}
152 	deviter_release(&di);
153 	if (dev == NULL)
154 		return NULL;
155 	return device_private(dev);
156 }
157 
158 int
159 nextdma_match(struct device *parent, struct cfdata *match, void *aux)
160 {
161 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
162 
163 	if (attached >= nnextdma_channels)
164 		return (0);
165 
166 	ia->ia_addr = (void *)nextdma_channel[attached].nd_base;
167 
168 	return (1);
169 }
170 
171 void
172 nextdma_attach(struct device *parent, struct device *self, void *aux)
173 {
174 	struct nextdma_softc *nsc = (struct nextdma_softc *)self;
175 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
176 
177 	if (attached >= nnextdma_channels)
178 		return;
179 
180 	nsc->sc_chan = &nextdma_channel[attached];
181 
182 	nsc->sc_dmat = ia->ia_dmat;
183 	nsc->sc_bst = ia->ia_bst;
184 
185 	if (bus_space_map(nsc->sc_bst, nsc->sc_chan->nd_base,
186 			  nsc->sc_chan->nd_size, 0, &nsc->sc_bsh)) {
187 		panic("%s: can't map DMA registers for channel %s",
188 		      nsc->sc_dev.dv_xname, nsc->sc_chan->nd_name);
189 	}
190 
191 	nextdma_init (nsc);
192 
193 	isrlink_autovec(nsc->sc_chan->nd_intrfunc, nsc,
194 			NEXT_I_IPL(nsc->sc_chan->nd_intr), 10, NULL);
195 	INTR_ENABLE(nsc->sc_chan->nd_intr);
196 
197 	printf (": channel %d (%s)\n", attached,
198 		nsc->sc_chan->nd_name);
199 	attached++;
200 
201 	return;
202 }
203 
204 void
205 nextdma_init(struct nextdma_softc *nsc)
206 {
207 #ifdef ND_DEBUG
208 	if (NEXTDMA_DEBUG) {
209 		char sbuf[256];
210 
211 		snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
212 		    (NEXT_I_BIT(nsc->sc_chan->nd_intr));
213 		printf("DMA init ipl (%ld) intr(0x%s)\n",
214 			NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
215 	}
216 #endif
217 
218 	nsc->sc_stat.nd_map = NULL;
219 	nsc->sc_stat.nd_idx = 0;
220 	nsc->sc_stat.nd_map_cont = NULL;
221 	nsc->sc_stat.nd_idx_cont = 0;
222 	nsc->sc_stat.nd_exception = 0;
223 
224 	nd_bsw4 (DD_CSR, DMACSR_RESET | DMACSR_CLRCOMPLETE);
225 	nd_bsw4 (DD_CSR, 0);
226 
227 #if 01
228 	nextdma_setup_curr_regs(nsc);
229 	nextdma_setup_cont_regs(nsc);
230 #endif
231 
232 #if defined(DIAGNOSTIC)
233 	{
234 		u_long state;
235 		state = nd_bsr4 (DD_CSR);
236 
237 #if 1
238 		/* mourning (a 25 MHz 68040 mono slab) appears to set BUSEXC
239 		 * milo (a 25 MHz 68040 mono cube) didn't have this problem
240 		 * Darrin B. Jewell <jewell@mit.edu>  Mon May 25 07:53:05 1998
241 		 */
242 		state &= (DMACSR_COMPLETE | DMACSR_SUPDATE | DMACSR_ENABLE);
243 #else
244 		state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
245 			  DMACSR_SUPDATE | DMACSR_ENABLE);
246 #endif
247 		if (state) {
248 			nextdma_print(nsc);
249 			panic("DMA did not reset");
250 		}
251 	}
252 #endif
253 }
254 
255 void
256 nextdma_reset(struct nextdma_softc *nsc)
257 {
258 	int s;
259 	struct nextdma_status *stat = &nsc->sc_stat;
260 
261 	s = spldma();
262 
263 	DPRINTF(("DMA reset\n"));
264 
265 #if (defined(ND_DEBUG))
266 	if (NEXTDMA_DEBUG > 1) nextdma_print(nsc);
267 #endif
268 
269 	nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
270 	if ((stat->nd_map) || (stat->nd_map_cont)) {
271 		if (stat->nd_map_cont) {
272 			DPRINTF(("DMA: resetting with non null continue map\n"));
273 			if (nsc->sc_conf.nd_completed_cb)
274 				(*nsc->sc_conf.nd_completed_cb)
275 					(stat->nd_map_cont, nsc->sc_conf.nd_cb_arg);
276 
277 			stat->nd_map_cont = 0;
278 			stat->nd_idx_cont = 0;
279 		}
280 		if (nsc->sc_conf.nd_shutdown_cb)
281 			(*nsc->sc_conf.nd_shutdown_cb)(nsc->sc_conf.nd_cb_arg);
282 		stat->nd_map = 0;
283 		stat->nd_idx = 0;
284 	}
285 
286 	splx(s);
287 }
288 
289 /****************************************************************/
290 
291 
292 /* Call the completed and continue callbacks to try to fill
293  * in the dma continue buffers.
294  */
295 void
296 nextdma_rotate(struct nextdma_softc *nsc)
297 {
298 	struct nextdma_status *stat = &nsc->sc_stat;
299 
300 	NDTRACEIF (*ndtracep++ = 'r');
301 	DPRINTF(("DMA nextdma_rotate()\n"));
302 
303 	/* Rotate the continue map into the current map */
304 	stat->nd_map = stat->nd_map_cont;
305 	stat->nd_idx = stat->nd_idx_cont;
306 
307 	if ((!stat->nd_map_cont) ||
308 	    ((++stat->nd_idx_cont >= stat->nd_map_cont->dm_nsegs))) {
309 		if (nsc->sc_conf.nd_continue_cb) {
310 			stat->nd_map_cont = (*nsc->sc_conf.nd_continue_cb)
311 				(nsc->sc_conf.nd_cb_arg);
312 			if (stat->nd_map_cont) {
313 				stat->nd_map_cont->dm_xfer_len = 0;
314 			}
315 		} else {
316 			stat->nd_map_cont = 0;
317 		}
318 		stat->nd_idx_cont = 0;
319 	}
320 
321 #if defined(DIAGNOSTIC) && 0
322 	if (stat->nd_map_cont) {
323 		if (!DMA_BEGINALIGNED(stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr)) {
324 			nextdma_print(nsc);
325 			panic("DMA request unaligned at start");
326 		}
327 		if (!DMA_ENDALIGNED(stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr +
328 				stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len)) {
329 			nextdma_print(nsc);
330 			panic("DMA request unaligned at end");
331 		}
332 	}
333 #endif
334 
335 }
336 
337 void
338 nextdma_setup_curr_regs(struct nextdma_softc *nsc)
339 {
340 	bus_addr_t dd_next;
341 	bus_addr_t dd_limit;
342 	bus_addr_t dd_saved_next;
343 	bus_addr_t dd_saved_limit;
344 	struct nextdma_status *stat = &nsc->sc_stat;
345 
346 	NDTRACEIF (*ndtracep++ = 'C');
347 	DPRINTF(("DMA nextdma_setup_curr_regs()\n"));
348 
349 	if (stat->nd_map) {
350 		dd_next = stat->nd_map->dm_segs[stat->nd_idx].ds_addr;
351 		dd_limit = (stat->nd_map->dm_segs[stat->nd_idx].ds_addr +
352 			    stat->nd_map->dm_segs[stat->nd_idx].ds_len);
353 
354 		if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
355 			dd_limit |= 0x80000000; /* Ethernet transmit needs secret magic */
356 			dd_limit += 15;
357 		}
358 	} else {
359 		dd_next = turbo ? 0 : 0xdeadbeef;
360 		dd_limit = turbo ? 0 : 0xdeadbeef;
361 	}
362 
363 	dd_saved_next = dd_next;
364 	dd_saved_limit = dd_limit;
365 
366 	NDTRACEIF (if (stat->nd_map) {
367 		sprintf (ndtracep, "%ld", stat->nd_map->dm_segs[stat->nd_idx].ds_len);
368 		ndtracep += strlen (ndtracep);
369 	});
370 
371 	if (!turbo && (nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA)) {
372 		nd_bsw4 (DD_NEXT_INITBUF, dd_next);
373 	} else {
374 		nd_bsw4 (DD_NEXT, dd_next);
375 	}
376 	nd_bsw4 (DD_LIMIT, dd_limit);
377 	if (!turbo) nd_bsw4 (DD_SAVED_NEXT, dd_saved_next);
378 	if (!turbo) nd_bsw4 (DD_SAVED_LIMIT, dd_saved_limit);
379 
380 #ifdef DIAGNOSTIC
381 	if ((nd_bsr4 (DD_NEXT_INITBUF) != dd_next)
382 	    || (nd_bsr4 (DD_NEXT) != dd_next)
383 	    || (nd_bsr4 (DD_LIMIT) != dd_limit)
384 	    || (!turbo && (nd_bsr4 (DD_SAVED_NEXT) != dd_saved_next))
385 	    || (!turbo && (nd_bsr4 (DD_SAVED_LIMIT) != dd_saved_limit))
386 		) {
387 		nextdma_print(nsc);
388 		panic("DMA failure writing to current regs");
389 	}
390 #endif
391 }
392 
393 void
394 nextdma_setup_cont_regs(struct nextdma_softc *nsc)
395 {
396 	bus_addr_t dd_start;
397 	bus_addr_t dd_stop;
398 	bus_addr_t dd_saved_start;
399 	bus_addr_t dd_saved_stop;
400 	struct nextdma_status *stat = &nsc->sc_stat;
401 
402 	NDTRACEIF (*ndtracep++ = 'c');
403 	DPRINTF(("DMA nextdma_setup_regs()\n"));
404 
405 	if (stat->nd_map_cont) {
406 		dd_start = stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr;
407 		dd_stop  = (stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr +
408 			    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
409 
410 		if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
411 			dd_stop |= 0x80000000; /* Ethernet transmit needs secret magic */
412 			dd_stop += 15;
413 		}
414 	} else {
415 		dd_start = turbo ? nd_bsr4 (DD_NEXT) : 0xdeadbee0;
416 		dd_stop = turbo ? 0 : 0xdeadbee0;
417 	}
418 
419 	dd_saved_start = dd_start;
420 	dd_saved_stop  = dd_stop;
421 
422 	NDTRACEIF (if (stat->nd_map_cont) {
423 		sprintf (ndtracep, "%ld", stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
424 		ndtracep += strlen (ndtracep);
425 	});
426 
427 	nd_bsw4 (DD_START, dd_start);
428 	nd_bsw4 (DD_STOP, dd_stop);
429 	if (!turbo) nd_bsw4 (DD_SAVED_START, dd_saved_start);
430 	if (!turbo) nd_bsw4 (DD_SAVED_STOP, dd_saved_stop);
431 	if (turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETR_DMA)
432 		nd_bsw4 (DD_STOP - 0x40, dd_start);
433 
434 #ifdef DIAGNOSTIC
435 	if ((nd_bsr4 (DD_START) != dd_start)
436 	    || (dd_stop && (nd_bsr4 (DD_STOP) != dd_stop))
437 	    || (!turbo && (nd_bsr4 (DD_SAVED_START) != dd_saved_start))
438 	    || (!turbo && (nd_bsr4 (DD_SAVED_STOP) != dd_saved_stop))
439 		) {
440 		nextdma_print(nsc);
441 		panic("DMA failure writing to continue regs");
442 	}
443 #endif
444 }
445 
446 /****************************************************************/
447 
448 #if NESP > 0
449 static int
450 nextdma_esp_intr(void *arg)
451 {
452 	/* @@@ This is bogus, we can't be certain of arg's type
453 	 * unless the interrupt is for us.  For now we successfully
454 	 * cheat because DMA interrupts are the only things invoked
455 	 * at this interrupt level.
456 	 */
457 	struct nextdma_softc *nsc = arg;
458 	int esp_dma_int(void *); /* XXX */
459 
460 	if (!INTR_OCCURRED(nsc->sc_chan->nd_intr))
461 		return 0;
462 	/* Handle dma interrupts */
463 
464 	return esp_dma_int (nsc->sc_conf.nd_cb_arg);
465 
466 }
467 #endif
468 
469 #if NXE > 0
470 static int
471 nextdma_enet_intr(void *arg)
472 {
473 	/* @@@ This is bogus, we can't be certain of arg's type
474 	 * unless the interrupt is for us.  For now we successfully
475 	 * cheat because DMA interrupts are the only things invoked
476 	 * at this interrupt level.
477 	 */
478 	struct nextdma_softc *nsc = arg;
479 	unsigned int state;
480 	bus_addr_t onext;
481 	bus_addr_t olimit;
482 	bus_addr_t slimit;
483 	int result;
484 	struct nextdma_status *stat = &nsc->sc_stat;
485 
486 	if (!INTR_OCCURRED(nsc->sc_chan->nd_intr))
487 		return 0;
488 	/* Handle dma interrupts */
489 
490 	NDTRACEIF (*ndtracep++ = 'D');
491 #ifdef ND_DEBUG
492 	if (NEXTDMA_DEBUG) {
493 		char sbuf[256];
494 
495 		snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
496 		    (NEXT_I_BIT(nsc->sc_chan->nd_intr));
497 		printf("DMA interrupt ipl (%ld) intr(0x%s)\n",
498 		       NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
499 	}
500 #endif
501 
502 #ifdef DIAGNOSTIC
503 	if (!stat->nd_map) {
504 		nextdma_print(nsc);
505 		panic("DMA missing current map in interrupt!");
506 	}
507 #endif
508 
509 	state = nd_bsr4 (DD_CSR);
510 
511 #if defined(ND_DEBUG)
512 	nextdma_debug_savestate(nsc, state);
513 #endif
514 
515 #ifdef DIAGNOSTIC
516 	if (/* (state & DMACSR_READ) || */ !(state & DMACSR_COMPLETE)) {
517 		char sbuf[256];
518 		nextdma_print(nsc);
519 		snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
520 		printf("DMA: state 0x%s\n",sbuf);
521 		panic("DMA complete not set in interrupt");
522 	}
523 #endif
524 
525 	DPRINTF(("DMA: finishing xfer\n"));
526 
527 	onext = stat->nd_map->dm_segs[stat->nd_idx].ds_addr;
528 	olimit = onext + stat->nd_map->dm_segs[stat->nd_idx].ds_len;
529 
530 	result = 0;
531 	if (state & DMACSR_ENABLE) {
532 		/* enable bit was set */
533 		result |= 0x01;
534 	}
535 	if (state & DMACSR_SUPDATE) {
536 		/* supdate bit was set */
537 		result |= 0x02;
538 	}
539 	if (stat->nd_map_cont == NULL) {
540 		KASSERT(stat->nd_idx+1 == stat->nd_map->dm_nsegs);
541 		/* Expecting a shutdown, didn't SETSUPDATE last turn */
542 		result |= 0x04;
543 	}
544 	if (state & DMACSR_BUSEXC) {
545 		/* bus exception bit was set */
546 		result |= 0x08;
547 	}
548 	switch (result) {
549 	case 0x00: /* !BUSEXC && !expecting && !SUPDATE && !ENABLE */
550 	case 0x08: /* BUSEXC && !expecting && !SUPDATE && !ENABLE */
551 		if (turbo) {
552 			volatile u_int *limit = (volatile u_int *)IIOV(0x2000050+0x4000);
553 			slimit = *limit;
554 		} else {
555 			slimit = nd_bsr4 (DD_SAVED_LIMIT);
556 		}
557 		break;
558 	case 0x01: /* !BUSEXC && !expecting && !SUPDATE && ENABLE */
559 	case 0x09: /* BUSEXC && !expecting && !SUPDATE && ENABLE */
560 		if (turbo) {
561 			volatile u_int *limit = (volatile u_int *)IIOV(0x2000050+0x4000);
562 			slimit = *limit;
563 		} else {
564 			slimit = nd_bsr4 (DD_SAVED_LIMIT);
565 		}
566 		break;
567 	case 0x02: /* !BUSEXC && !expecting && SUPDATE && !ENABLE */
568 	case 0x0a: /* BUSEXC && !expecting && SUPDATE && !ENABLE */
569 		slimit = nd_bsr4 (DD_NEXT);
570 		break;
571 	case 0x04:  /* !BUSEXC && expecting && !SUPDATE && !ENABLE */
572 	case 0x0c: /* BUSEXC && expecting && !SUPDATE && !ENABLE */
573 		slimit = nd_bsr4 (DD_LIMIT);
574 		break;
575 	default:
576 #ifdef DIAGNOSTIC
577 	{
578 		char sbuf[256];
579 		printf("DMA: please send this output to port-next68k-maintainer@NetBSD.org:\n");
580 		snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
581 		printf("DMA: state 0x%s\n",sbuf);
582 		nextdma_print(nsc);
583 		panic("DMA: condition 0x%02x not yet documented to occur",result);
584 	}
585 #endif
586 	slimit = olimit;
587 	break;
588 	}
589 
590 	if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
591 		slimit &= ~0x80000000;
592 		slimit -= 15;
593 	}
594 
595 #ifdef DIAGNOSTIC
596 	if ((state & DMACSR_READ))
597 		DPRINTF (("limits: 0x%08lx <= 0x%08lx <= 0x%08lx %s\n", onext, slimit, olimit,
598 			  (state & DMACSR_READ) ? "read" : "write"));
599 	if ((slimit < onext) || (slimit > olimit)) {
600 		char sbuf[256];
601 		snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
602 		printf("DMA: state 0x%s\n",sbuf);
603 		nextdma_print(nsc);
604 		panic("DMA: Unexpected limit register (0x%08lx) in finish_xfer",slimit);
605 	}
606 #endif
607 
608 #ifdef DIAGNOSTIC
609 	if ((state & DMACSR_ENABLE) && ((stat->nd_idx+1) != stat->nd_map->dm_nsegs)) {
610 		if (slimit != olimit) {
611 			char sbuf[256];
612 			snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
613 			printf("DMA: state 0x%s\n",sbuf);
614 			nextdma_print(nsc);
615 			panic("DMA: short limit register (0x%08lx) w/o finishing map.",slimit);
616 		}
617 	}
618 #endif
619 
620 #if (defined(ND_DEBUG))
621 	if (NEXTDMA_DEBUG > 2) nextdma_print(nsc);
622 #endif
623 
624 	stat->nd_map->dm_xfer_len += slimit-onext;
625 
626 	/* If we've reached the end of the current map, then inform
627 	 * that we've completed that map.
628 	 */
629 	if ((stat->nd_idx+1) == stat->nd_map->dm_nsegs) {
630 		if (nsc->sc_conf.nd_completed_cb)
631 			(*nsc->sc_conf.nd_completed_cb)
632 				(stat->nd_map, nsc->sc_conf.nd_cb_arg);
633 	} else {
634 		KASSERT(stat->nd_map == stat->nd_map_cont);
635 		KASSERT(stat->nd_idx+1 == stat->nd_idx_cont);
636 	}
637 	stat->nd_map = 0;
638 	stat->nd_idx = 0;
639 
640 #if (defined(ND_DEBUG))
641 	if (NEXTDMA_DEBUG) {
642 		char sbuf[256];
643 		snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, state);
644 		printf("CLNDMAP: dd->dd_csr          = 0x%s\n",   sbuf);
645 	}
646 #endif
647 	if (state & DMACSR_ENABLE) {
648 		u_long dmadir;		/* DMACSR_SETREAD or DMACSR_SETWRITE */
649 
650 		nextdma_rotate(nsc);
651 		nextdma_setup_cont_regs(nsc);
652 
653 		if (state & DMACSR_READ) {
654 			dmadir = DMACSR_SETREAD;
655 		} else {
656 			dmadir = DMACSR_SETWRITE;
657 		}
658 
659 		if (stat->nd_map_cont == NULL) {
660 			KASSERT(stat->nd_idx+1 == stat->nd_map->dm_nsegs);
661 			nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | dmadir);
662 			NDTRACEIF (*ndtracep++ = 'g');
663 		} else {
664 			nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETSUPDATE);
665 			NDTRACEIF (*ndtracep++ = 'G');
666 		}
667 	} else {
668 		DPRINTF(("DMA: a shutdown occurred\n"));
669 		nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
670 
671 		/* Cleanup more incomplete transfers */
672 		/* cleanup continue map */
673 		if (stat->nd_map_cont) {
674 			DPRINTF(("DMA: shutting down with non null continue map\n"));
675 			if (nsc->sc_conf.nd_completed_cb)
676 				(*nsc->sc_conf.nd_completed_cb)
677 					(stat->nd_map_cont, nsc->sc_conf.nd_cb_arg);
678 
679 			stat->nd_map_cont = 0;
680 			stat->nd_idx_cont = 0;
681 		}
682 		if (nsc->sc_conf.nd_shutdown_cb)
683 			(*nsc->sc_conf.nd_shutdown_cb)(nsc->sc_conf.nd_cb_arg);
684 	}
685 
686 #ifdef ND_DEBUG
687 	if (NEXTDMA_DEBUG) {
688 		char sbuf[256];
689 
690 		snprintb(sbuf, sizeof(sbuf),
691 		    NEXT_INTR_BITS, NEXT_I_BIT(nsc->sc_chan->nd_intr));
692 		printf("DMA exiting interrupt ipl (%ld) intr(0x%s)\n",
693 		       NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
694 	}
695 #endif
696 
697 	return(1);
698 }
699 #endif
700 
701 /*
702  * Check to see if dma has finished for a channel */
703 int
704 nextdma_finished(struct nextdma_softc *nsc)
705 {
706 	int r;
707 	int s;
708 	struct nextdma_status *stat = &nsc->sc_stat;
709 
710 	s = spldma();
711 	r = (stat->nd_map == NULL) && (stat->nd_map_cont == NULL);
712 	splx(s);
713 
714 	return(r);
715 }
716 
717 void
718 nextdma_start(struct nextdma_softc *nsc, u_long dmadir)
719 {
720 	struct nextdma_status *stat = &nsc->sc_stat;
721 
722 	NDTRACEIF (*ndtracep++ = 'n');
723 #ifdef DIAGNOSTIC
724 	if (!nextdma_finished(nsc)) {
725 		char sbuf[256];
726 
727 		snprintb(sbuf, sizeof(sbuf),
728 		    NEXT_INTR_BITS, NEXT_I_BIT(nsc->sc_chan->nd_intr));
729 		panic("DMA trying to start before previous finished on intr(0x%s)", sbuf);
730 	}
731 #endif
732 
733 #ifdef ND_DEBUG
734 	if (NEXTDMA_DEBUG) {
735 		char sbuf[256];
736 
737 		snprintb(sbuf, sizeof(sbuf),
738 		    NEXT_INTR_BITS, NEXT_I_BIT(nsc->sc_chan->nd_intr));
739 		printf("DMA start (%ld) intr(0x%s)\n",
740 		       NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
741 	}
742 #endif
743 
744 #ifdef DIAGNOSTIC
745 	if (stat->nd_map) {
746 		nextdma_print(nsc);
747 		panic("DMA: nextdma_start() with non null map");
748 	}
749 	if (stat->nd_map_cont) {
750 		nextdma_print(nsc);
751 		panic("DMA: nextdma_start() with non null continue map");
752 	}
753 #endif
754 
755 #ifdef DIAGNOSTIC
756 	if ((dmadir != DMACSR_SETREAD) && (dmadir != DMACSR_SETWRITE)) {
757 		panic("DMA: nextdma_start(), dmadir arg must be DMACSR_SETREAD or DMACSR_SETWRITE");
758 	}
759 #endif
760 
761 #if defined(ND_DEBUG)
762 	nextdma_debug_initstate(nsc);
763 #endif
764 
765 	/* preload both the current and the continue maps */
766 	nextdma_rotate(nsc);
767 
768 #ifdef DIAGNOSTIC
769 	if (!stat->nd_map_cont) {
770 		panic("No map available in nextdma_start()");
771 	}
772 #endif
773 
774 	nextdma_rotate(nsc);
775 
776 #ifdef ND_DEBUG
777 	if (NEXTDMA_DEBUG) {
778 		char sbuf[256];
779 
780 		snprintb(sbuf, sizeof(sbuf),
781 		    NEXT_INTR_BITS, NEXT_I_BIT(nsc->sc_chan->nd_intr));
782 		printf("DMA initiating DMA %s of %d segments on intr(0x%s)\n",
783 		       (dmadir == DMACSR_SETREAD ? "read" : "write"), stat->nd_map->dm_nsegs, sbuf);
784 	}
785 #endif
786 
787 	nd_bsw4 (DD_CSR, (turbo ? DMACSR_INITBUFTURBO : DMACSR_INITBUF) |
788 		 DMACSR_RESET | dmadir);
789 	nd_bsw4 (DD_CSR, 0);
790 
791 	nextdma_setup_curr_regs(nsc);
792 	nextdma_setup_cont_regs(nsc);
793 
794 #if (defined(ND_DEBUG))
795 	if (NEXTDMA_DEBUG > 2) nextdma_print(nsc);
796 #endif
797 
798 	if (stat->nd_map_cont == NULL) {
799 		nd_bsw4 (DD_CSR, DMACSR_SETENABLE | dmadir);
800 	} else {
801 		nd_bsw4 (DD_CSR, DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir);
802 	}
803 }
804 
805 /* This routine is used for debugging */
806 void
807 nextdma_print(struct nextdma_softc *nsc)
808 {
809 	u_long dd_csr;
810 	u_long dd_next;
811 	u_long dd_next_initbuf;
812 	u_long dd_limit;
813 	u_long dd_start;
814 	u_long dd_stop;
815 	u_long dd_saved_next;
816 	u_long dd_saved_limit;
817 	u_long dd_saved_start;
818 	u_long dd_saved_stop;
819 	char sbuf[256];
820 	struct nextdma_status *stat = &nsc->sc_stat;
821 
822 	/* Read all of the registers before we print anything out,
823 	 * in case something changes
824 	 */
825 	dd_csr          = nd_bsr4 (DD_CSR);
826 	dd_next         = nd_bsr4 (DD_NEXT);
827 	dd_next_initbuf = nd_bsr4 (DD_NEXT_INITBUF);
828 	dd_limit        = nd_bsr4 (DD_LIMIT);
829 	dd_start        = nd_bsr4 (DD_START);
830 	dd_stop         = nd_bsr4 (DD_STOP);
831 	dd_saved_next   = nd_bsr4 (DD_SAVED_NEXT);
832 	dd_saved_limit  = nd_bsr4 (DD_SAVED_LIMIT);
833 	dd_saved_start  = nd_bsr4 (DD_SAVED_START);
834 	dd_saved_stop   = nd_bsr4 (DD_SAVED_STOP);
835 
836 	snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
837 	    *(volatile u_long *)IIOV(NEXT_P_INTRSTAT));
838 	printf("NDMAP: *intrstat = 0x%s\n", sbuf);
839 
840 	snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
841 	    *(volatile u_long *)IIOV(NEXT_P_INTRMASK));
842 	printf("NDMAP: *intrmask = 0x%s\n", sbuf);
843 
844 	/* NDMAP is Next DMA Print (really!) */
845 
846 	if (stat->nd_map) {
847 		int i;
848 
849 		printf("NDMAP: nd_map->dm_mapsize = %ld\n",
850 		       stat->nd_map->dm_mapsize);
851 		printf("NDMAP: nd_map->dm_nsegs = %d\n",
852 		       stat->nd_map->dm_nsegs);
853 		printf("NDMAP: nd_map->dm_xfer_len = %ld\n",
854 		       stat->nd_map->dm_xfer_len);
855 		printf("NDMAP: nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
856 		       stat->nd_idx, stat->nd_map->dm_segs[stat->nd_idx].ds_addr);
857 		printf("NDMAP: nd_map->dm_segs[%d].ds_len = %ld\n",
858 		       stat->nd_idx, stat->nd_map->dm_segs[stat->nd_idx].ds_len);
859 
860 		printf("NDMAP: Entire map;\n");
861 		for(i=0;i<stat->nd_map->dm_nsegs;i++) {
862 			printf("NDMAP:   nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
863 			       i,stat->nd_map->dm_segs[i].ds_addr);
864 			printf("NDMAP:   nd_map->dm_segs[%d].ds_len = %ld\n",
865 			       i,stat->nd_map->dm_segs[i].ds_len);
866 		}
867 	} else {
868 		printf("NDMAP: nd_map = NULL\n");
869 	}
870 	if (stat->nd_map_cont) {
871 		printf("NDMAP: nd_map_cont->dm_mapsize = %ld\n",
872 		       stat->nd_map_cont->dm_mapsize);
873 		printf("NDMAP: nd_map_cont->dm_nsegs = %d\n",
874 		       stat->nd_map_cont->dm_nsegs);
875 		printf("NDMAP: nd_map_cont->dm_xfer_len = %ld\n",
876 		       stat->nd_map_cont->dm_xfer_len);
877 		printf("NDMAP: nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
878 		       stat->nd_idx_cont,stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr);
879 		printf("NDMAP: nd_map_cont->dm_segs[%d].ds_len = %ld\n",
880 		       stat->nd_idx_cont,stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
881 		if (stat->nd_map_cont != stat->nd_map) {
882 			int i;
883 			printf("NDMAP: Entire map;\n");
884 			for(i=0;i<stat->nd_map_cont->dm_nsegs;i++) {
885 				printf("NDMAP:   nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
886 				       i,stat->nd_map_cont->dm_segs[i].ds_addr);
887 				printf("NDMAP:   nd_map_cont->dm_segs[%d].ds_len = %ld\n",
888 				       i,stat->nd_map_cont->dm_segs[i].ds_len);
889 			}
890 		}
891 	} else {
892 		printf("NDMAP: nd_map_cont = NULL\n");
893 	}
894 
895 	snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, dd_csr);
896 	printf("NDMAP: dd->dd_csr          = 0x%s\n",   sbuf);
897 
898 	printf("NDMAP: dd->dd_saved_next   = 0x%08lx\n", dd_saved_next);
899 	printf("NDMAP: dd->dd_saved_limit  = 0x%08lx\n", dd_saved_limit);
900 	printf("NDMAP: dd->dd_saved_start  = 0x%08lx\n", dd_saved_start);
901 	printf("NDMAP: dd->dd_saved_stop   = 0x%08lx\n", dd_saved_stop);
902 	printf("NDMAP: dd->dd_next         = 0x%08lx\n", dd_next);
903 	printf("NDMAP: dd->dd_next_initbuf = 0x%08lx\n", dd_next_initbuf);
904 	printf("NDMAP: dd->dd_limit        = 0x%08lx\n", dd_limit);
905 	printf("NDMAP: dd->dd_start        = 0x%08lx\n", dd_start);
906 	printf("NDMAP: dd->dd_stop         = 0x%08lx\n", dd_stop);
907 
908 	snprintb(sbuf, sizeof(sbuf), NEXT_INTR_BITS,
909 	    (NEXT_I_BIT(nsc->sc_chan->nd_intr)));
910 	printf("NDMAP: interrupt ipl (%ld) intr(0x%s)\n",
911 			NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
912 }
913 
914 #if defined(ND_DEBUG)
915 void
916 nextdma_debug_initstate(struct nextdma_softc *nsc)
917 {
918 	switch(nsc->sc_chan->nd_intr) {
919 	case NEXT_I_ENETR_DMA:
920 		memset(nextdma_debug_enetr_state,0,sizeof(nextdma_debug_enetr_state));
921 		break;
922 	case NEXT_I_SCSI_DMA:
923 		memset(nextdma_debug_scsi_state,0,sizeof(nextdma_debug_scsi_state));
924 		break;
925 	}
926 }
927 
928 void
929 nextdma_debug_savestate(struct nextdma_softc *nsc, unsigned int state)
930 {
931 	switch(nsc->sc_chan->nd_intr) {
932 	case NEXT_I_ENETR_DMA:
933 		nextdma_debug_enetr_state[nextdma_debug_enetr_idx++] = state;
934 		nextdma_debug_enetr_idx %= (sizeof(nextdma_debug_enetr_state)/sizeof(unsigned int));
935 		break;
936 	case NEXT_I_SCSI_DMA:
937 		nextdma_debug_scsi_state[nextdma_debug_scsi_idx++] = state;
938 		nextdma_debug_scsi_idx %= (sizeof(nextdma_debug_scsi_state)/sizeof(unsigned int));
939 		break;
940 	}
941 }
942 
943 void
944 nextdma_debug_enetr_dumpstate(void)
945 {
946 	int i;
947 	int s;
948 	s = spldma();
949 	i = nextdma_debug_enetr_idx;
950 	do {
951 		char sbuf[256];
952 		if (nextdma_debug_enetr_state[i]) {
953 			snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, nextdma_debug_enetr_state[i]);
954 			printf("DMA: 0x%02x state 0x%s\n",i,sbuf);
955 		}
956 		i++;
957 		i %= (sizeof(nextdma_debug_enetr_state)/sizeof(unsigned int));
958 	} while (i != nextdma_debug_enetr_idx);
959 	splx(s);
960 }
961 
962 void
963 nextdma_debug_scsi_dumpstate(void)
964 {
965 	int i;
966 	int s;
967 	s = spldma();
968 	i = nextdma_debug_scsi_idx;
969 	do {
970 		char sbuf[256];
971 		if (nextdma_debug_scsi_state[i]) {
972 			snprintb(sbuf, sizeof(sbuf), DMACSR_BITS, nextdma_debug_scsi_state[i]);
973 			printf("DMA: 0x%02x state 0x%s\n",i,sbuf);
974 		}
975 		i++;
976 		i %= (sizeof(nextdma_debug_scsi_state)/sizeof(unsigned int));
977 	} while (i != nextdma_debug_scsi_idx);
978 	splx(s);
979 }
980 #endif
981 
982