xref: /netbsd-src/sys/arch/next68k/dev/nextdma.c (revision 23c8222edbfb0f0932d88a8351d3a0cf817dfb9e)
1 /*	$NetBSD: nextdma.c,v 1.36 2003/12/04 13:05:17 keihan Exp $	*/
2 /*
3  * Copyright (c) 1998 Darrin B. Jewell
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Darrin B. Jewell
17  * 4. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: nextdma.c,v 1.36 2003/12/04 13:05:17 keihan Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/mbuf.h>
38 #include <sys/syslog.h>
39 #include <sys/socket.h>
40 #include <sys/device.h>
41 #include <sys/malloc.h>
42 #include <sys/ioctl.h>
43 #include <sys/errno.h>
44 
45 #define _M68K_BUS_DMA_PRIVATE
46 #include <machine/autoconf.h>
47 #include <machine/cpu.h>
48 #include <machine/intr.h>
49 
50 #include <m68k/cacheops.h>
51 
52 #include <next68k/next68k/isr.h>
53 #include <next68k/next68k/nextrom.h>
54 
55 #include <next68k/dev/intiovar.h>
56 
57 #include "nextdmareg.h"
58 #include "nextdmavar.h"
59 
60 #include "esp.h"
61 #include "xe.h"
62 
63 #if DEBUG
64 #define ND_DEBUG
65 #endif
66 
67 extern int turbo;
68 
69 #define panic		__asm __volatile("trap  #15"); printf
70 
71 #define NEXTDMA_DEBUG nextdma_debug
72 /* (nsc->sc_chan->nd_intr == NEXT_I_SCSI_DMA) && nextdma_debug */
73 #if defined(ND_DEBUG)
74 int nextdma_debug = 0;
75 #define DPRINTF(x) if (NEXTDMA_DEBUG) printf x;
76 int ndtraceshow = 0;
77 char ndtrace[8192+100];
78 char *ndtracep = ndtrace;
79 #define NDTRACEIF(x) if (10 && /* (nsc->sc_chan->nd_intr == NEXT_I_SCSI_DMA) && */ ndtracep < (ndtrace + 8192)) do {x;} while (0)
80 #else
81 #define DPRINTF(x)
82 #define NDTRACEIF(x)
83 #endif
84 #define PRINTF(x) printf x
85 
86 #if defined(ND_DEBUG)
87 int nextdma_debug_enetr_idx = 0;
88 unsigned int nextdma_debug_enetr_state[100] = { 0 };
89 int nextdma_debug_scsi_idx = 0;
90 unsigned int nextdma_debug_scsi_state[100] = { 0 };
91 
92 void nextdma_debug_initstate(struct nextdma_softc *);
93 void nextdma_debug_savestate(struct nextdma_softc *, unsigned int);
94 void nextdma_debug_scsi_dumpstate(void);
95 void nextdma_debug_enetr_dumpstate(void);
96 #endif
97 
98 
99 int	nextdma_match		__P((struct device *, struct cfdata *, void *));
100 void	nextdma_attach		__P((struct device *, struct device *, void *));
101 
102 void nextdmamap_sync		__P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
103 				     bus_size_t, int));
104 int nextdma_continue		__P((struct nextdma_softc *));
105 void nextdma_rotate		__P((struct nextdma_softc *));
106 
107 void nextdma_setup_cont_regs	__P((struct nextdma_softc *));
108 void nextdma_setup_curr_regs	__P((struct nextdma_softc *));
109 
110 #if NESP > 0
111 static int nextdma_esp_intr	__P((void *));
112 #endif
113 #if NXE > 0
114 static int nextdma_enet_intr	__P((void *));
115 #endif
116 
117 #define nd_bsr4(reg) bus_space_read_4(nsc->sc_bst, nsc->sc_bsh, (reg))
118 #define nd_bsw4(reg,val) bus_space_write_4(nsc->sc_bst, nsc->sc_bsh, (reg), (val))
119 
120 CFATTACH_DECL(nextdma, sizeof(struct nextdma_softc),
121     nextdma_match, nextdma_attach, NULL, NULL);
122 
123 static struct nextdma_channel nextdma_channel[] = {
124 #if NESP > 0
125 	{ "scsi", NEXT_P_SCSI_CSR, DD_SIZE, NEXT_I_SCSI_DMA, &nextdma_esp_intr },
126 #endif
127 #if NXE > 0
128 	{ "enetx", NEXT_P_ENETX_CSR, DD_SIZE, NEXT_I_ENETX_DMA, &nextdma_enet_intr },
129 	{ "enetr", NEXT_P_ENETR_CSR, DD_SIZE, NEXT_I_ENETR_DMA, &nextdma_enet_intr },
130 #endif
131 };
132 static int nnextdma_channels = (sizeof(nextdma_channel)/sizeof(nextdma_channel[0]));
133 
134 static int attached = 0;
135 
136 struct nextdma_softc *
137 nextdma_findchannel(name)
138 	char *name;
139 {
140 	struct device *dev = alldevs.tqh_first;
141 
142 	while (dev != NULL) {
143 		if (!strncmp(dev->dv_xname, "nextdma", 7)) {
144 			struct nextdma_softc *nsc = (struct nextdma_softc *)dev;
145 			if (!strcmp (nsc->sc_chan->nd_name, name))
146 				return (nsc);
147 		}
148 		dev = dev->dv_list.tqe_next;
149 	}
150 	return (NULL);
151 }
152 
153 int
154 nextdma_match(parent, match, aux)
155 	struct device *parent;
156 	struct cfdata *match;
157 	void *aux;
158 {
159 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
160 
161 	if (attached >= nnextdma_channels)
162 		return (0);
163 
164 	ia->ia_addr = (void *)nextdma_channel[attached].nd_base;
165 
166 	return (1);
167 }
168 
169 void
170 nextdma_attach(parent, self, aux)
171 	struct device *parent, *self;
172 	void *aux;
173 {
174 	struct nextdma_softc *nsc = (struct nextdma_softc *)self;
175 	struct intio_attach_args *ia = (struct intio_attach_args *)aux;
176 
177 	if (attached >= nnextdma_channels)
178 		return;
179 
180 	nsc->sc_chan = &nextdma_channel[attached];
181 
182 	nsc->sc_dmat = ia->ia_dmat;
183 	nsc->sc_bst = ia->ia_bst;
184 
185 	if (bus_space_map(nsc->sc_bst, nsc->sc_chan->nd_base,
186 			  nsc->sc_chan->nd_size, 0, &nsc->sc_bsh)) {
187 		panic("%s: can't map DMA registers for channel %s",
188 		      nsc->sc_dev.dv_xname, nsc->sc_chan->nd_name);
189 	}
190 
191 	nextdma_init (nsc);
192 
193 	isrlink_autovec(nsc->sc_chan->nd_intrfunc, nsc,
194 			NEXT_I_IPL(nsc->sc_chan->nd_intr), 10, NULL);
195 	INTR_ENABLE(nsc->sc_chan->nd_intr);
196 
197 	printf (": channel %d (%s)\n", attached,
198 		nsc->sc_chan->nd_name);
199 	attached++;
200 
201 	return;
202 }
203 
204 void
205 nextdma_init(nsc)
206 	struct nextdma_softc *nsc;
207 {
208 #ifdef ND_DEBUG
209 	if (NEXTDMA_DEBUG) {
210 		char sbuf[256];
211 
212 		bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
213 				 sbuf, sizeof(sbuf));
214 		printf("DMA init ipl (%ld) intr(0x%s)\n",
215 			NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
216 	}
217 #endif
218 
219 	nsc->sc_stat.nd_map = NULL;
220 	nsc->sc_stat.nd_idx = 0;
221 	nsc->sc_stat.nd_map_cont = NULL;
222 	nsc->sc_stat.nd_idx_cont = 0;
223 	nsc->sc_stat.nd_exception = 0;
224 
225 	nd_bsw4 (DD_CSR, DMACSR_RESET | DMACSR_CLRCOMPLETE);
226 	nd_bsw4 (DD_CSR, 0);
227 
228 #if 01
229 	nextdma_setup_curr_regs(nsc);
230 	nextdma_setup_cont_regs(nsc);
231 #endif
232 
233 #if defined(DIAGNOSTIC)
234 	{
235 		u_long state;
236 		state = nd_bsr4 (DD_CSR);
237 
238 #if 1
239 		/* mourning (a 25Mhz 68040 mono slab) appears to set BUSEXC
240 		 * milo (a 25Mhz 68040 mono cube) didn't have this problem
241 		 * Darrin B. Jewell <jewell@mit.edu>  Mon May 25 07:53:05 1998
242 		 */
243 		state &= (DMACSR_COMPLETE | DMACSR_SUPDATE | DMACSR_ENABLE);
244 #else
245 		state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
246 			  DMACSR_SUPDATE | DMACSR_ENABLE);
247 #endif
248 		if (state) {
249 			nextdma_print(nsc);
250 			panic("DMA did not reset");
251 		}
252 	}
253 #endif
254 }
255 
256 void
257 nextdma_reset(nsc)
258 	struct nextdma_softc *nsc;
259 {
260 	int s;
261 	struct nextdma_status *stat = &nsc->sc_stat;
262 
263 	s = spldma();
264 
265 	DPRINTF(("DMA reset\n"));
266 
267 #if (defined(ND_DEBUG))
268 	if (NEXTDMA_DEBUG > 1) nextdma_print(nsc);
269 #endif
270 
271 	nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
272 	if ((stat->nd_map) || (stat->nd_map_cont)) {
273 		if (stat->nd_map_cont) {
274 			DPRINTF(("DMA: resetting with non null continue map\n"));
275 			if (nsc->sc_conf.nd_completed_cb)
276 				(*nsc->sc_conf.nd_completed_cb)
277 					(stat->nd_map_cont, nsc->sc_conf.nd_cb_arg);
278 
279 			stat->nd_map_cont = 0;
280 			stat->nd_idx_cont = 0;
281 		}
282 		if (nsc->sc_conf.nd_shutdown_cb)
283 			(*nsc->sc_conf.nd_shutdown_cb)(nsc->sc_conf.nd_cb_arg);
284 		stat->nd_map = 0;
285 		stat->nd_idx = 0;
286 	}
287 
288 	splx(s);
289 }
290 
291 /****************************************************************/
292 
293 
294 /* Call the completed and continue callbacks to try to fill
295  * in the dma continue buffers.
296  */
297 void
298 nextdma_rotate(nsc)
299 	struct nextdma_softc *nsc;
300 {
301 	struct nextdma_status *stat = &nsc->sc_stat;
302 
303 	NDTRACEIF (*ndtracep++ = 'r');
304 	DPRINTF(("DMA nextdma_rotate()\n"));
305 
306 	/* Rotate the continue map into the current map */
307 	stat->nd_map = stat->nd_map_cont;
308 	stat->nd_idx = stat->nd_idx_cont;
309 
310 	if ((!stat->nd_map_cont) ||
311 	    ((++stat->nd_idx_cont >= stat->nd_map_cont->dm_nsegs))) {
312 		if (nsc->sc_conf.nd_continue_cb) {
313 			stat->nd_map_cont = (*nsc->sc_conf.nd_continue_cb)
314 				(nsc->sc_conf.nd_cb_arg);
315 			if (stat->nd_map_cont) {
316 				stat->nd_map_cont->dm_xfer_len = 0;
317 			}
318 		} else {
319 			stat->nd_map_cont = 0;
320 		}
321 		stat->nd_idx_cont = 0;
322 	}
323 
324 #if defined(DIAGNOSTIC) && 0
325 	if (stat->nd_map_cont) {
326 		if (!DMA_BEGINALIGNED(stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr)) {
327 			nextdma_print(nsc);
328 			panic("DMA request unaligned at start");
329 		}
330 		if (!DMA_ENDALIGNED(stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr +
331 				stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len)) {
332 			nextdma_print(nsc);
333 			panic("DMA request unaligned at end");
334 		}
335 	}
336 #endif
337 
338 }
339 
340 void
341 nextdma_setup_curr_regs(nsc)
342 	struct nextdma_softc *nsc;
343 {
344 	bus_addr_t dd_next;
345 	bus_addr_t dd_limit;
346 	bus_addr_t dd_saved_next;
347 	bus_addr_t dd_saved_limit;
348 	struct nextdma_status *stat = &nsc->sc_stat;
349 
350 	NDTRACEIF (*ndtracep++ = 'C');
351 	DPRINTF(("DMA nextdma_setup_curr_regs()\n"));
352 
353 	if (stat->nd_map) {
354 		dd_next = stat->nd_map->dm_segs[stat->nd_idx].ds_addr;
355 		dd_limit = (stat->nd_map->dm_segs[stat->nd_idx].ds_addr +
356 			    stat->nd_map->dm_segs[stat->nd_idx].ds_len);
357 
358 		if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
359 			dd_limit |= 0x80000000; /* Ethernet transmit needs secret magic */
360 			dd_limit += 15;
361 		}
362 	} else {
363 		dd_next = turbo ? 0 : 0xdeadbeef;
364 		dd_limit = turbo ? 0 : 0xdeadbeef;
365 	}
366 
367 	dd_saved_next = dd_next;
368 	dd_saved_limit = dd_limit;
369 
370 	NDTRACEIF (if (stat->nd_map) {
371 		sprintf (ndtracep, "%ld", stat->nd_map->dm_segs[stat->nd_idx].ds_len);
372 		ndtracep += strlen (ndtracep);
373 	});
374 
375 	if (!turbo && (nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA)) {
376 		nd_bsw4 (DD_NEXT_INITBUF, dd_next);
377 	} else {
378 		nd_bsw4 (DD_NEXT, dd_next);
379 	}
380 	nd_bsw4 (DD_LIMIT, dd_limit);
381 	if (!turbo) nd_bsw4 (DD_SAVED_NEXT, dd_saved_next);
382 	if (!turbo) nd_bsw4 (DD_SAVED_LIMIT, dd_saved_limit);
383 
384 #ifdef DIAGNOSTIC
385 	if ((nd_bsr4 (DD_NEXT_INITBUF) != dd_next)
386 	    || (nd_bsr4 (DD_NEXT) != dd_next)
387 	    || (nd_bsr4 (DD_LIMIT) != dd_limit)
388 	    || (!turbo && (nd_bsr4 (DD_SAVED_NEXT) != dd_saved_next))
389 	    || (!turbo && (nd_bsr4 (DD_SAVED_LIMIT) != dd_saved_limit))
390 		) {
391 		nextdma_print(nsc);
392 		panic("DMA failure writing to current regs");
393 	}
394 #endif
395 }
396 
397 void
398 nextdma_setup_cont_regs(nsc)
399 	struct nextdma_softc *nsc;
400 {
401 	bus_addr_t dd_start;
402 	bus_addr_t dd_stop;
403 	bus_addr_t dd_saved_start;
404 	bus_addr_t dd_saved_stop;
405 	struct nextdma_status *stat = &nsc->sc_stat;
406 
407 	NDTRACEIF (*ndtracep++ = 'c');
408 	DPRINTF(("DMA nextdma_setup_regs()\n"));
409 
410 	if (stat->nd_map_cont) {
411 		dd_start = stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr;
412 		dd_stop  = (stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr +
413 			    stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
414 
415 		if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
416 			dd_stop |= 0x80000000; /* Ethernet transmit needs secret magic */
417 			dd_stop += 15;
418 		}
419 	} else {
420 		dd_start = turbo ? nd_bsr4 (DD_NEXT) : 0xdeadbee0;
421 		dd_stop = turbo ? 0 : 0xdeadbee0;
422 	}
423 
424 	dd_saved_start = dd_start;
425 	dd_saved_stop  = dd_stop;
426 
427 	NDTRACEIF (if (stat->nd_map_cont) {
428 		sprintf (ndtracep, "%ld", stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
429 		ndtracep += strlen (ndtracep);
430 	});
431 
432 	nd_bsw4 (DD_START, dd_start);
433 	nd_bsw4 (DD_STOP, dd_stop);
434 	if (!turbo) nd_bsw4 (DD_SAVED_START, dd_saved_start);
435 	if (!turbo) nd_bsw4 (DD_SAVED_STOP, dd_saved_stop);
436 	if (turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETR_DMA)
437 		nd_bsw4 (DD_STOP - 0x40, dd_start);
438 
439 #ifdef DIAGNOSTIC
440 	if ((nd_bsr4 (DD_START) != dd_start)
441 	    || (dd_stop && (nd_bsr4 (DD_STOP) != dd_stop))
442 	    || (!turbo && (nd_bsr4 (DD_SAVED_START) != dd_saved_start))
443 	    || (!turbo && (nd_bsr4 (DD_SAVED_STOP) != dd_saved_stop))
444 		) {
445 		nextdma_print(nsc);
446 		panic("DMA failure writing to continue regs");
447 	}
448 #endif
449 }
450 
451 /****************************************************************/
452 
453 #if NESP > 0
454 static int
455 nextdma_esp_intr(arg)
456 	void *arg;
457 {
458 	/* @@@ This is bogus, we can't be certain of arg's type
459 	 * unless the interrupt is for us.  For now we successfully
460 	 * cheat because DMA interrupts are the only things invoked
461 	 * at this interrupt level.
462 	 */
463 	struct nextdma_softc *nsc = arg;
464 	int esp_dma_int __P((void *)); /* XXX */
465 
466 	if (!INTR_OCCURRED(nsc->sc_chan->nd_intr))
467 		return 0;
468 	/* Handle dma interrupts */
469 
470 	return esp_dma_int (nsc->sc_conf.nd_cb_arg);
471 
472 }
473 #endif
474 
475 #if NXE > 0
476 static int
477 nextdma_enet_intr(arg)
478 	void *arg;
479 {
480 	/* @@@ This is bogus, we can't be certain of arg's type
481 	 * unless the interrupt is for us.  For now we successfully
482 	 * cheat because DMA interrupts are the only things invoked
483 	 * at this interrupt level.
484 	 */
485 	struct nextdma_softc *nsc = arg;
486 	unsigned int state;
487 	bus_addr_t onext;
488 	bus_addr_t olimit;
489 	bus_addr_t slimit;
490 	int result;
491 	struct nextdma_status *stat = &nsc->sc_stat;
492 
493 	if (!INTR_OCCURRED(nsc->sc_chan->nd_intr))
494 		return 0;
495 	/* Handle dma interrupts */
496 
497 	NDTRACEIF (*ndtracep++ = 'D');
498 #ifdef ND_DEBUG
499 	if (NEXTDMA_DEBUG) {
500 		char sbuf[256];
501 
502 		bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
503 				 sbuf, sizeof(sbuf));
504 		printf("DMA interrupt ipl (%ld) intr(0x%s)\n",
505 		       NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
506 	}
507 #endif
508 
509 #ifdef DIAGNOSTIC
510 	if (!stat->nd_map) {
511 		nextdma_print(nsc);
512 		panic("DMA missing current map in interrupt!");
513 	}
514 #endif
515 
516 	state = nd_bsr4 (DD_CSR);
517 
518 #if defined(ND_DEBUG)
519 	nextdma_debug_savestate(nsc, state);
520 #endif
521 
522 #ifdef DIAGNOSTIC
523 	if (/* (state & DMACSR_READ) || */ !(state & DMACSR_COMPLETE)) {
524 		char sbuf[256];
525 		nextdma_print(nsc);
526 		bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
527 		printf("DMA: state 0x%s\n",sbuf);
528 		panic("DMA complete not set in interrupt");
529 	}
530 #endif
531 
532 	DPRINTF(("DMA: finishing xfer\n"));
533 
534 	onext = stat->nd_map->dm_segs[stat->nd_idx].ds_addr;
535 	olimit = onext + stat->nd_map->dm_segs[stat->nd_idx].ds_len;
536 
537 	result = 0;
538 	if (state & DMACSR_ENABLE) {
539 		/* enable bit was set */
540 		result |= 0x01;
541 	}
542 	if (state & DMACSR_SUPDATE) {
543 		/* supdate bit was set */
544 		result |= 0x02;
545 	}
546 	if (stat->nd_map_cont == NULL) {
547 		KASSERT(stat->nd_idx+1 == stat->nd_map->dm_nsegs);
548 		/* Expecting a shutdown, didn't SETSUPDATE last turn */
549 		result |= 0x04;
550 	}
551 	if (state & DMACSR_BUSEXC) {
552 		/* bus exception bit was set */
553 		result |= 0x08;
554 	}
555 	switch (result) {
556 	case 0x00: /* !BUSEXC && !expecting && !SUPDATE && !ENABLE */
557 	case 0x08: /* BUSEXC && !expecting && !SUPDATE && !ENABLE */
558 		if (turbo) {
559 			volatile u_int *limit = (volatile u_int *)IIOV(0x2000050+0x4000);
560 			slimit = *limit;
561 		} else {
562 			slimit = nd_bsr4 (DD_SAVED_LIMIT);
563 		}
564 		break;
565 	case 0x01: /* !BUSEXC && !expecting && !SUPDATE && ENABLE */
566 	case 0x09: /* BUSEXC && !expecting && !SUPDATE && ENABLE */
567 		if (turbo) {
568 			volatile u_int *limit = (volatile u_int *)IIOV(0x2000050+0x4000);
569 			slimit = *limit;
570 		} else {
571 			slimit = nd_bsr4 (DD_SAVED_LIMIT);
572 		}
573 		break;
574 	case 0x02: /* !BUSEXC && !expecting && SUPDATE && !ENABLE */
575 	case 0x0a: /* BUSEXC && !expecting && SUPDATE && !ENABLE */
576 		slimit = nd_bsr4 (DD_NEXT);
577 		break;
578 	case 0x04:  /* !BUSEXC && expecting && !SUPDATE && !ENABLE */
579 	case 0x0c: /* BUSEXC && expecting && !SUPDATE && !ENABLE */
580 		slimit = nd_bsr4 (DD_LIMIT);
581 		break;
582 	default:
583 #ifdef DIAGNOSTIC
584 	{
585 		char sbuf[256];
586 		printf("DMA: please send this output to port-next68k-maintainer@NetBSD.org:\n");
587 		bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
588 		printf("DMA: state 0x%s\n",sbuf);
589 		nextdma_print(nsc);
590 		panic("DMA: condition 0x%02x not yet documented to occur",result);
591 	}
592 #endif
593 	slimit = olimit;
594 	break;
595 	}
596 
597 	if (!turbo && nsc->sc_chan->nd_intr == NEXT_I_ENETX_DMA) {
598 		slimit &= ~0x80000000;
599 		slimit -= 15;
600 	}
601 
602 #ifdef DIAGNOSTIC
603 	if ((state & DMACSR_READ))
604 		DPRINTF (("limits: 0x%08lx <= 0x%08lx <= 0x%08lx %s\n", onext, slimit, olimit,
605 			  (state & DMACSR_READ) ? "read" : "write"));
606 	if ((slimit < onext) || (slimit > olimit)) {
607 		char sbuf[256];
608 		bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
609 		printf("DMA: state 0x%s\n",sbuf);
610 		nextdma_print(nsc);
611 		panic("DMA: Unexpected limit register (0x%08lx) in finish_xfer",slimit);
612 	}
613 #endif
614 
615 #ifdef DIAGNOSTIC
616 	if ((state & DMACSR_ENABLE) && ((stat->nd_idx+1) != stat->nd_map->dm_nsegs)) {
617 		if (slimit != olimit) {
618 			char sbuf[256];
619 			bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
620 			printf("DMA: state 0x%s\n",sbuf);
621 			nextdma_print(nsc);
622 			panic("DMA: short limit register (0x%08lx) w/o finishing map.",slimit);
623 		}
624 	}
625 #endif
626 
627 #if (defined(ND_DEBUG))
628 	if (NEXTDMA_DEBUG > 2) nextdma_print(nsc);
629 #endif
630 
631 	stat->nd_map->dm_xfer_len += slimit-onext;
632 
633 	/* If we've reached the end of the current map, then inform
634 	 * that we've completed that map.
635 	 */
636 	if ((stat->nd_idx+1) == stat->nd_map->dm_nsegs) {
637 		if (nsc->sc_conf.nd_completed_cb)
638 			(*nsc->sc_conf.nd_completed_cb)
639 				(stat->nd_map, nsc->sc_conf.nd_cb_arg);
640 	} else {
641 		KASSERT(stat->nd_map == stat->nd_map_cont);
642 		KASSERT(stat->nd_idx+1 == stat->nd_idx_cont);
643 	}
644 	stat->nd_map = 0;
645 	stat->nd_idx = 0;
646 
647 #if (defined(ND_DEBUG))
648 	if (NEXTDMA_DEBUG) {
649 		char sbuf[256];
650 		bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf));
651 		printf("CLNDMAP: dd->dd_csr          = 0x%s\n",   sbuf);
652 	}
653 #endif
654 	if (state & DMACSR_ENABLE) {
655 		u_long dmadir;		/* DMACSR_SETREAD or DMACSR_SETWRITE */
656 
657 		nextdma_rotate(nsc);
658 		nextdma_setup_cont_regs(nsc);
659 
660 		if (state & DMACSR_READ) {
661 			dmadir = DMACSR_SETREAD;
662 		} else {
663 			dmadir = DMACSR_SETWRITE;
664 		}
665 
666 		if (stat->nd_map_cont == NULL) {
667 			KASSERT(stat->nd_idx+1 == stat->nd_map->dm_nsegs);
668 			nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | dmadir);
669 			NDTRACEIF (*ndtracep++ = 'g');
670 		} else {
671 			nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETSUPDATE);
672 			NDTRACEIF (*ndtracep++ = 'G');
673 		}
674 	} else {
675 		DPRINTF(("DMA: a shutdown occurred\n"));
676 		nd_bsw4 (DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET);
677 
678 		/* Cleanup more incomplete transfers */
679 		/* cleanup continue map */
680 		if (stat->nd_map_cont) {
681 			DPRINTF(("DMA: shutting down with non null continue map\n"));
682 			if (nsc->sc_conf.nd_completed_cb)
683 				(*nsc->sc_conf.nd_completed_cb)
684 					(stat->nd_map_cont, nsc->sc_conf.nd_cb_arg);
685 
686 			stat->nd_map_cont = 0;
687 			stat->nd_idx_cont = 0;
688 		}
689 		if (nsc->sc_conf.nd_shutdown_cb)
690 			(*nsc->sc_conf.nd_shutdown_cb)(nsc->sc_conf.nd_cb_arg);
691 	}
692 
693 #ifdef ND_DEBUG
694 	if (NEXTDMA_DEBUG) {
695 		char sbuf[256];
696 
697 		bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
698 				 sbuf, sizeof(sbuf));
699 		printf("DMA exiting interrupt ipl (%ld) intr(0x%s)\n",
700 		       NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
701 	}
702 #endif
703 
704 	return(1);
705 }
706 #endif
707 
708 /*
709  * Check to see if dma has finished for a channel */
710 int
711 nextdma_finished(nsc)
712 	struct nextdma_softc *nsc;
713 {
714 	int r;
715 	int s;
716 	struct nextdma_status *stat = &nsc->sc_stat;
717 
718 	s = spldma();
719 	r = (stat->nd_map == NULL) && (stat->nd_map_cont == NULL);
720 	splx(s);
721 
722 	return(r);
723 }
724 
725 void
726 nextdma_start(nsc, dmadir)
727 	struct nextdma_softc *nsc;
728 	u_long dmadir;		/* DMACSR_SETREAD or DMACSR_SETWRITE */
729 {
730 	struct nextdma_status *stat = &nsc->sc_stat;
731 
732 	NDTRACEIF (*ndtracep++ = 'n');
733 #ifdef DIAGNOSTIC
734 	if (!nextdma_finished(nsc)) {
735 		char sbuf[256];
736 
737 		bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
738 				 sbuf, sizeof(sbuf));
739 		panic("DMA trying to start before previous finished on intr(0x%s)", sbuf);
740 	}
741 #endif
742 
743 #ifdef ND_DEBUG
744 	if (NEXTDMA_DEBUG) {
745 		char sbuf[256];
746 
747 		bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
748 				 sbuf, sizeof(sbuf));
749 		printf("DMA start (%ld) intr(0x%s)\n",
750 		       NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
751 	}
752 #endif
753 
754 #ifdef DIAGNOSTIC
755 	if (stat->nd_map) {
756 		nextdma_print(nsc);
757 		panic("DMA: nextdma_start() with non null map");
758 	}
759 	if (stat->nd_map_cont) {
760 		nextdma_print(nsc);
761 		panic("DMA: nextdma_start() with non null continue map");
762 	}
763 #endif
764 
765 #ifdef DIAGNOSTIC
766 	if ((dmadir != DMACSR_SETREAD) && (dmadir != DMACSR_SETWRITE)) {
767 		panic("DMA: nextdma_start(), dmadir arg must be DMACSR_SETREAD or DMACSR_SETWRITE");
768 	}
769 #endif
770 
771 #if defined(ND_DEBUG)
772 	nextdma_debug_initstate(nsc);
773 #endif
774 
775 	/* preload both the current and the continue maps */
776 	nextdma_rotate(nsc);
777 
778 #ifdef DIAGNOSTIC
779 	if (!stat->nd_map_cont) {
780 		panic("No map available in nextdma_start()");
781 	}
782 #endif
783 
784 	nextdma_rotate(nsc);
785 
786 #ifdef ND_DEBUG
787 	if (NEXTDMA_DEBUG) {
788 		char sbuf[256];
789 
790 		bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
791 				 sbuf, sizeof(sbuf));
792 		printf("DMA initiating DMA %s of %d segments on intr(0x%s)\n",
793 		       (dmadir == DMACSR_SETREAD ? "read" : "write"), stat->nd_map->dm_nsegs, sbuf);
794 	}
795 #endif
796 
797 	nd_bsw4 (DD_CSR, (turbo ? DMACSR_INITBUFTURBO : DMACSR_INITBUF) |
798 		 DMACSR_RESET | dmadir);
799 	nd_bsw4 (DD_CSR, 0);
800 
801 	nextdma_setup_curr_regs(nsc);
802 	nextdma_setup_cont_regs(nsc);
803 
804 #if (defined(ND_DEBUG))
805 	if (NEXTDMA_DEBUG > 2) nextdma_print(nsc);
806 #endif
807 
808 	if (stat->nd_map_cont == NULL) {
809 		nd_bsw4 (DD_CSR, DMACSR_SETENABLE | dmadir);
810 	} else {
811 		nd_bsw4 (DD_CSR, DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir);
812 	}
813 }
814 
815 /* This routine is used for debugging */
816 void
817 nextdma_print(nsc)
818 	struct nextdma_softc *nsc;
819 {
820 	u_long dd_csr;
821 	u_long dd_next;
822 	u_long dd_next_initbuf;
823 	u_long dd_limit;
824 	u_long dd_start;
825 	u_long dd_stop;
826 	u_long dd_saved_next;
827 	u_long dd_saved_limit;
828 	u_long dd_saved_start;
829 	u_long dd_saved_stop;
830 	char sbuf[256];
831 	struct nextdma_status *stat = &nsc->sc_stat;
832 
833 	/* Read all of the registers before we print anything out,
834 	 * in case something changes
835 	 */
836 	dd_csr          = nd_bsr4 (DD_CSR);
837 	dd_next         = nd_bsr4 (DD_NEXT);
838 	dd_next_initbuf = nd_bsr4 (DD_NEXT_INITBUF);
839 	dd_limit        = nd_bsr4 (DD_LIMIT);
840 	dd_start        = nd_bsr4 (DD_START);
841 	dd_stop         = nd_bsr4 (DD_STOP);
842 	dd_saved_next   = nd_bsr4 (DD_SAVED_NEXT);
843 	dd_saved_limit  = nd_bsr4 (DD_SAVED_LIMIT);
844 	dd_saved_start  = nd_bsr4 (DD_SAVED_START);
845 	dd_saved_stop   = nd_bsr4 (DD_SAVED_STOP);
846 
847 	bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRSTAT)),
848 			 NEXT_INTR_BITS, sbuf, sizeof(sbuf));
849 	printf("NDMAP: *intrstat = 0x%s\n", sbuf);
850 
851 	bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRMASK)),
852 			 NEXT_INTR_BITS, sbuf, sizeof(sbuf));
853 	printf("NDMAP: *intrmask = 0x%s\n", sbuf);
854 
855 	/* NDMAP is Next DMA Print (really!) */
856 
857 	if (stat->nd_map) {
858 		int i;
859 
860 		printf("NDMAP: nd_map->dm_mapsize = %ld\n",
861 		       stat->nd_map->dm_mapsize);
862 		printf("NDMAP: nd_map->dm_nsegs = %d\n",
863 		       stat->nd_map->dm_nsegs);
864 		printf("NDMAP: nd_map->dm_xfer_len = %ld\n",
865 		       stat->nd_map->dm_xfer_len);
866 		printf("NDMAP: nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
867 		       stat->nd_idx, stat->nd_map->dm_segs[stat->nd_idx].ds_addr);
868 		printf("NDMAP: nd_map->dm_segs[%d].ds_len = %ld\n",
869 		       stat->nd_idx, stat->nd_map->dm_segs[stat->nd_idx].ds_len);
870 
871 		printf("NDMAP: Entire map;\n");
872 		for(i=0;i<stat->nd_map->dm_nsegs;i++) {
873 			printf("NDMAP:   nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
874 			       i,stat->nd_map->dm_segs[i].ds_addr);
875 			printf("NDMAP:   nd_map->dm_segs[%d].ds_len = %ld\n",
876 			       i,stat->nd_map->dm_segs[i].ds_len);
877 		}
878 	} else {
879 		printf("NDMAP: nd_map = NULL\n");
880 	}
881 	if (stat->nd_map_cont) {
882 		printf("NDMAP: nd_map_cont->dm_mapsize = %ld\n",
883 		       stat->nd_map_cont->dm_mapsize);
884 		printf("NDMAP: nd_map_cont->dm_nsegs = %d\n",
885 		       stat->nd_map_cont->dm_nsegs);
886 		printf("NDMAP: nd_map_cont->dm_xfer_len = %ld\n",
887 		       stat->nd_map_cont->dm_xfer_len);
888 		printf("NDMAP: nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
889 		       stat->nd_idx_cont,stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_addr);
890 		printf("NDMAP: nd_map_cont->dm_segs[%d].ds_len = %ld\n",
891 		       stat->nd_idx_cont,stat->nd_map_cont->dm_segs[stat->nd_idx_cont].ds_len);
892 		if (stat->nd_map_cont != stat->nd_map) {
893 			int i;
894 			printf("NDMAP: Entire map;\n");
895 			for(i=0;i<stat->nd_map_cont->dm_nsegs;i++) {
896 				printf("NDMAP:   nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
897 				       i,stat->nd_map_cont->dm_segs[i].ds_addr);
898 				printf("NDMAP:   nd_map_cont->dm_segs[%d].ds_len = %ld\n",
899 				       i,stat->nd_map_cont->dm_segs[i].ds_len);
900 			}
901 		}
902 	} else {
903 		printf("NDMAP: nd_map_cont = NULL\n");
904 	}
905 
906 	bitmask_snprintf(dd_csr, DMACSR_BITS, sbuf, sizeof(sbuf));
907 	printf("NDMAP: dd->dd_csr          = 0x%s\n",   sbuf);
908 
909 	printf("NDMAP: dd->dd_saved_next   = 0x%08lx\n", dd_saved_next);
910 	printf("NDMAP: dd->dd_saved_limit  = 0x%08lx\n", dd_saved_limit);
911 	printf("NDMAP: dd->dd_saved_start  = 0x%08lx\n", dd_saved_start);
912 	printf("NDMAP: dd->dd_saved_stop   = 0x%08lx\n", dd_saved_stop);
913 	printf("NDMAP: dd->dd_next         = 0x%08lx\n", dd_next);
914 	printf("NDMAP: dd->dd_next_initbuf = 0x%08lx\n", dd_next_initbuf);
915 	printf("NDMAP: dd->dd_limit        = 0x%08lx\n", dd_limit);
916 	printf("NDMAP: dd->dd_start        = 0x%08lx\n", dd_start);
917 	printf("NDMAP: dd->dd_stop         = 0x%08lx\n", dd_stop);
918 
919 	bitmask_snprintf(NEXT_I_BIT(nsc->sc_chan->nd_intr), NEXT_INTR_BITS,
920 			 sbuf, sizeof(sbuf));
921 	printf("NDMAP: interrupt ipl (%ld) intr(0x%s)\n",
922 			NEXT_I_IPL(nsc->sc_chan->nd_intr), sbuf);
923 }
924 
925 #if defined(ND_DEBUG)
926 void
927 nextdma_debug_initstate(struct nextdma_softc *nsc)
928 {
929 	switch(nsc->sc_chan->nd_intr) {
930 	case NEXT_I_ENETR_DMA:
931 		memset(nextdma_debug_enetr_state,0,sizeof(nextdma_debug_enetr_state));
932 		break;
933 	case NEXT_I_SCSI_DMA:
934 		memset(nextdma_debug_scsi_state,0,sizeof(nextdma_debug_scsi_state));
935 		break;
936 	}
937 }
938 
939 void
940 nextdma_debug_savestate(struct nextdma_softc *nsc, unsigned int state)
941 {
942 	switch(nsc->sc_chan->nd_intr) {
943 	case NEXT_I_ENETR_DMA:
944 		nextdma_debug_enetr_state[nextdma_debug_enetr_idx++] = state;
945 		nextdma_debug_enetr_idx %= (sizeof(nextdma_debug_enetr_state)/sizeof(unsigned int));
946 		break;
947 	case NEXT_I_SCSI_DMA:
948 		nextdma_debug_scsi_state[nextdma_debug_scsi_idx++] = state;
949 		nextdma_debug_scsi_idx %= (sizeof(nextdma_debug_scsi_state)/sizeof(unsigned int));
950 		break;
951 	}
952 }
953 
954 void
955 nextdma_debug_enetr_dumpstate(void)
956 {
957 	int i;
958 	int s;
959 	s = spldma();
960 	i = nextdma_debug_enetr_idx;
961 	do {
962 		char sbuf[256];
963 		if (nextdma_debug_enetr_state[i]) {
964 			bitmask_snprintf(nextdma_debug_enetr_state[i], DMACSR_BITS, sbuf, sizeof(sbuf));
965 			printf("DMA: 0x%02x state 0x%s\n",i,sbuf);
966 		}
967 		i++;
968 		i %= (sizeof(nextdma_debug_enetr_state)/sizeof(unsigned int));
969 	} while (i != nextdma_debug_enetr_idx);
970 	splx(s);
971 }
972 
973 void
974 nextdma_debug_scsi_dumpstate(void)
975 {
976 	int i;
977 	int s;
978 	s = spldma();
979 	i = nextdma_debug_scsi_idx;
980 	do {
981 		char sbuf[256];
982 		if (nextdma_debug_scsi_state[i]) {
983 			bitmask_snprintf(nextdma_debug_scsi_state[i], DMACSR_BITS, sbuf, sizeof(sbuf));
984 			printf("DMA: 0x%02x state 0x%s\n",i,sbuf);
985 		}
986 		i++;
987 		i %= (sizeof(nextdma_debug_scsi_state)/sizeof(unsigned int));
988 	} while (i != nextdma_debug_scsi_idx);
989 	splx(s);
990 }
991 #endif
992 
993