xref: /netbsd-src/sys/arch/next68k/dev/nextdma.c (revision 481fca6e59249d8ffcf24fef7cfbe7b131bfb080)
1 /*	$NetBSD: nextdma.c,v 1.21 2000/01/12 19:18:00 dbj Exp $	*/
2 /*
3  * Copyright (c) 1998 Darrin B. Jewell
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Darrin B. Jewell
17  * 4. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/mbuf.h>
35 #include <sys/syslog.h>
36 #include <sys/socket.h>
37 #include <sys/device.h>
38 #include <sys/malloc.h>
39 #include <sys/ioctl.h>
40 #include <sys/errno.h>
41 
42 #include <machine/autoconf.h>
43 #include <machine/cpu.h>
44 #include <machine/intr.h>
45 
46 #include <m68k/cacheops.h>
47 
48 #include <next68k/next68k/isr.h>
49 
50 #define _NEXT68K_BUS_DMA_PRIVATE
51 #include <machine/bus.h>
52 
53 #include "nextdmareg.h"
54 #include "nextdmavar.h"
55 
56 #if 1
57 #define ND_DEBUG
58 #endif
59 
60 #if defined(ND_DEBUG)
61 int nextdma_debug = 0;
62 #define DPRINTF(x) if (nextdma_debug) printf x;
63 #else
64 #define DPRINTF(x)
65 #endif
66 
67 void next_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
68                        bus_size_t, int));
69 int next_dma_continue __P((struct nextdma_config *));
70 void next_dma_rotate __P((struct nextdma_config *));
71 
72 void next_dma_setup_cont_regs __P((struct nextdma_config *));
73 void next_dma_setup_curr_regs __P((struct nextdma_config *));
74 void next_dma_finish_xfer __P((struct nextdma_config *));
75 
76 void
77 nextdma_config(nd)
78 	struct nextdma_config *nd;
79 {
80 	/* Initialize the dma_tag. As a hack, we currently
81 	 * put the dma tag in the structure itself.  It shouldn't be there.
82 	 */
83 
84 	{
85 		bus_dma_tag_t t;
86 		t = &nd->_nd_dmat;
87 		t->_cookie = nd;
88 		t->_dmamap_create = _bus_dmamap_create;
89 		t->_dmamap_destroy = _bus_dmamap_destroy;
90 		t->_dmamap_load = _bus_dmamap_load_direct;
91 		t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct;
92 		t->_dmamap_load_uio = _bus_dmamap_load_uio_direct;
93 		t->_dmamap_load_raw = _bus_dmamap_load_raw_direct;
94 		t->_dmamap_unload = _bus_dmamap_unload;
95 		t->_dmamap_sync = _bus_dmamap_sync;
96 
97 		t->_dmamem_alloc = _bus_dmamem_alloc;
98 		t->_dmamem_free = _bus_dmamem_free;
99 		t->_dmamem_map = _bus_dmamem_map;
100 		t->_dmamem_unmap = _bus_dmamem_unmap;
101 		t->_dmamem_mmap = _bus_dmamem_mmap;
102 
103 		nd->nd_dmat = t;
104 	}
105 
106 	nextdma_init(nd);
107 
108 	isrlink_autovec(nextdma_intr, nd, NEXT_I_IPL(nd->nd_intr), 10);
109 	INTR_ENABLE(nd->nd_intr);
110 }
111 
112 void
113 nextdma_init(nd)
114 	struct nextdma_config *nd;
115 {
116   DPRINTF(("DMA init ipl (%ld) intr(0x%b)\n",
117 			NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
118 
119 	nd->_nd_map = NULL;
120 	nd->_nd_idx = 0;
121 	nd->_nd_map_cont = NULL;
122 	nd->_nd_idx_cont = 0;
123 
124 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
125 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
126 			DMACSR_RESET | DMACSR_INITBUF);
127 
128 	next_dma_setup_curr_regs(nd);
129 	next_dma_setup_cont_regs(nd);
130 
131 #if defined(DIAGNOSTIC)
132 	{
133 		u_long state;
134 		state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
135 
136 #if 1
137 	/* mourning (a 25Mhz 68040 mono slab) appears to set BUSEXC
138 	 * milo (a 25Mhz 68040 mono cube) didn't have this problem
139 	 * Darrin B. Jewell <jewell@mit.edu>  Mon May 25 07:53:05 1998
140 	 */
141     state &= (DMACSR_COMPLETE | DMACSR_SUPDATE | DMACSR_ENABLE);
142 #else
143     state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
144               DMACSR_SUPDATE | DMACSR_ENABLE);
145 #endif
146 		if (state) {
147 			next_dma_print(nd);
148 			panic("DMA did not reset");
149 		}
150 	}
151 #endif
152 }
153 
154 
155 void
156 nextdma_reset(nd)
157 	struct nextdma_config *nd;
158 {
159 	int s;
160 	s = spldma();
161 
162 	DPRINTF(("DMA reset\n"));
163 
164 #if (defined(ND_DEBUG))
165 	if (nextdma_debug) next_dma_print(nd);
166 #endif
167 
168 	/* @@@ clean up dma maps */
169 
170 	nextdma_init(nd);
171 	splx(s);
172 }
173 
174 /****************************************************************/
175 
176 
177 /* Call the completed and continue callbacks to try to fill
178  * in the dma continue buffers.
179  */
180 void
181 next_dma_rotate(nd)
182 	struct nextdma_config *nd;
183 {
184 
185 	DPRINTF(("DMA next_dma_rotate()\n"));
186 
187 	/* Rotate the continue map into the current map */
188 	nd->_nd_map = nd->_nd_map_cont;
189 	nd->_nd_idx = nd->_nd_idx_cont;
190 
191 	if ((!nd->_nd_map_cont) ||
192 			((nd->_nd_map_cont) &&
193 					(++nd->_nd_idx_cont >= nd->_nd_map_cont->dm_nsegs))) {
194 		if (nd->nd_continue_cb) {
195 			nd->_nd_map_cont = (*nd->nd_continue_cb)(nd->nd_cb_arg);
196 		} else {
197 			nd->_nd_map_cont = 0;
198 		}
199 		nd->_nd_idx_cont = 0;
200 	}
201 
202 #ifdef DIAGNOSTIC
203 	if (nd->_nd_map) {
204 		nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len = 0x1234beef;
205 	}
206 #endif
207 
208 #ifdef DIAGNOSTIC
209 	if (nd->_nd_map_cont) {
210 		if (!DMA_BEGINALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr)) {
211 			next_dma_print(nd);
212 			panic("DMA request unaligned at start\n");
213 		}
214 		if (!DMA_ENDALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
215 				nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len)) {
216 			next_dma_print(nd);
217 			panic("DMA request unaligned at end\n");
218 		}
219 	}
220 #endif
221 
222 }
223 
224 void
225 next_dma_setup_cont_regs(nd)
226 	struct nextdma_config *nd;
227 {
228 	bus_addr_t dd_start;
229 	bus_addr_t dd_stop;
230 	bus_addr_t dd_saved_start;
231 	bus_addr_t dd_saved_stop;
232 
233 	DPRINTF(("DMA next_dma_setup_regs()\n"));
234 
235 	if (nd->_nd_map_cont) {
236 		dd_start = nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr;
237 		dd_stop  = (nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
238 				nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
239 
240 		if (nd->nd_intr == NEXT_I_ENETX_DMA) {
241 			dd_stop |= 0x80000000;		/* Ethernet transmit needs secret magic */
242 		}
243 	} else {
244 		dd_start = 0xdeadbeef;
245 		dd_stop = 0xdeadbeef;
246 	}
247 
248 	dd_saved_start = dd_start;
249 	dd_saved_stop  = dd_stop;
250 
251 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START, dd_start);
252 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP, dd_stop);
253 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START, dd_saved_start);
254 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP, dd_saved_stop);
255 
256 #ifdef DIAGNOSTIC
257 	if ((bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START) != dd_start) ||
258 			(bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP) != dd_stop) ||
259 			(bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START) != dd_saved_start) ||
260 			(bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP) != dd_saved_stop)) {
261 		next_dma_print(nd);
262 		panic("DMA failure writing to continue regs");
263 	}
264 #endif
265 }
266 
267 void
268 next_dma_setup_curr_regs(nd)
269 	struct nextdma_config *nd;
270 {
271 	bus_addr_t dd_next;
272 	bus_addr_t dd_limit;
273 	bus_addr_t dd_saved_next;
274 	bus_addr_t dd_saved_limit;
275 
276 	DPRINTF(("DMA next_dma_setup_curr_regs()\n"));
277 
278 
279 	if (nd->_nd_map) {
280 		dd_next = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
281 		dd_limit = (nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
282 				nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
283 		if (nd->nd_intr == NEXT_I_ENETX_DMA) {
284 			dd_limit |= 0x80000000; /* Ethernet transmit needs secret magic */
285 		}
286 	} else {
287 		dd_next = 0xdeadbeef;
288 		dd_limit = 0xdeadbeef;
289 	}
290 
291 	dd_saved_next = dd_next;
292 	dd_saved_limit = dd_limit;
293 
294 	if (nd->nd_intr == NEXT_I_ENETX_DMA) {
295 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF, dd_next);
296 	} else {
297 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT, dd_next);
298 	}
299 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, dd_limit);
300 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT, dd_saved_next);
301 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT, dd_saved_limit);
302 
303 #ifdef DIAGNOSTIC
304 	if ((bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF) != dd_next) ||
305 			(bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT) != dd_next) ||
306 			(bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT) != dd_limit) ||
307 			(bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT) != dd_saved_next) ||
308 			(bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT) != dd_saved_limit)) {
309 		next_dma_print(nd);
310 		panic("DMA failure writing to current regs");
311 	}
312 #endif
313 }
314 
315 
316 /* This routine is used for debugging */
317 
318 void
319 next_dma_print(nd)
320 	struct nextdma_config *nd;
321 {
322 	u_long dd_csr;
323 	u_long dd_next;
324 	u_long dd_next_initbuf;
325 	u_long dd_limit;
326 	u_long dd_start;
327 	u_long dd_stop;
328 	u_long dd_saved_next;
329 	u_long dd_saved_limit;
330 	u_long dd_saved_start;
331 	u_long dd_saved_stop;
332 
333   /* Read all of the registers before we print anything out,
334 	 * in case something changes
335 	 */
336 	dd_csr          = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
337 	dd_next         = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
338 	dd_next_initbuf = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF);
339 	dd_limit        = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
340 	dd_start        = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START);
341 	dd_stop         = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP);
342 	dd_saved_next   = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
343 	dd_saved_limit  = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
344 	dd_saved_start  = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START);
345 	dd_saved_stop   = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP);
346 
347 	printf("NDMAP: *intrstat = 0x%b\n",
348 			(*(volatile u_long *)IIOV(NEXT_P_INTRSTAT)),NEXT_INTR_BITS);
349 	printf("NDMAP: *intrmask = 0x%b\n",
350 			(*(volatile u_long *)IIOV(NEXT_P_INTRMASK)),NEXT_INTR_BITS);
351 
352 	/* NDMAP is Next DMA Print (really!) */
353 
354 	if (nd->_nd_map) {
355 		printf("NDMAP: nd->_nd_map->dm_mapsize = %d\n",
356 				nd->_nd_map->dm_mapsize);
357 		printf("NDMAP: nd->_nd_map->dm_nsegs = %d\n",
358 				nd->_nd_map->dm_nsegs);
359 		printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
360 				nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
361 		printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_len = %d\n",
362 				nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
363 		printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_xfer_len = %d\n",
364 				nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len);
365 	} else {
366 		printf("NDMAP: nd->_nd_map = NULL\n");
367 	}
368 	if (nd->_nd_map_cont) {
369 		printf("NDMAP: nd->_nd_map_cont->dm_mapsize = %d\n",
370 				nd->_nd_map_cont->dm_mapsize);
371 		printf("NDMAP: nd->_nd_map_cont->dm_nsegs = %d\n",
372 				nd->_nd_map_cont->dm_nsegs);
373 		printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
374 				nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
375 		printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_len = %d\n",
376 				nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
377 		printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_xfer_len = %d\n",
378 				nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_xfer_len);
379 	} else {
380 		printf("NDMAP: nd->_nd_map_cont = NULL\n");
381 	}
382 
383 	printf("NDMAP: dd->dd_csr          = 0x%b\n",   dd_csr,   DMACSR_BITS);
384 	printf("NDMAP: dd->dd_saved_next   = 0x%08x\n", dd_saved_next);
385 	printf("NDMAP: dd->dd_saved_limit  = 0x%08x\n", dd_saved_limit);
386 	printf("NDMAP: dd->dd_saved_start  = 0x%08x\n", dd_saved_start);
387 	printf("NDMAP: dd->dd_saved_stop   = 0x%08x\n", dd_saved_stop);
388 	printf("NDMAP: dd->dd_next         = 0x%08x\n", dd_next);
389 	printf("NDMAP: dd->dd_next_initbuf = 0x%08x\n", dd_next_initbuf);
390 	printf("NDMAP: dd->dd_limit        = 0x%08x\n", dd_limit);
391 	printf("NDMAP: dd->dd_start        = 0x%08x\n", dd_start);
392 	printf("NDMAP: dd->dd_stop         = 0x%08x\n", dd_stop);
393 
394 	printf("NDMAP: interrupt ipl (%ld) intr(0x%b)\n",
395 			NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
396 }
397 
398 /****************************************************************/
399 void
400 next_dma_finish_xfer(nd)
401 	struct nextdma_config *nd;
402 {
403 	bus_addr_t onext;
404 	bus_addr_t olimit;
405 	bus_addr_t slimit;
406 
407 	onext = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
408 	olimit = onext + nd->_nd_map->dm_segs[nd->_nd_idx].ds_len;
409 
410 	if ((nd->_nd_map_cont == NULL) && (nd->_nd_idx+1 == nd->_nd_map->dm_nsegs)) {
411 		slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
412 	} else {
413 		slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
414 	}
415 
416 	if (nd->nd_intr == NEXT_I_ENETX_DMA) {
417 		slimit &= ~0x80000000;
418 	}
419 
420 #ifdef DIAGNOSTIC
421 	if ((slimit < onext) || (slimit > olimit)) {
422 		next_dma_print(nd);
423 		panic("DMA: Unexpected registers in finish_xfer\n");
424 	}
425 #endif
426 
427 	nd->_nd_map->dm_segs[nd->_nd_idx].ds_xfer_len = slimit-onext;
428 
429 	/* If we've reached the end of the current map, then inform
430 	 * that we've completed that map.
431 	 */
432 	if (nd->_nd_map && ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs)) {
433 		if (nd->nd_completed_cb)
434 			(*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
435 	}
436 	nd->_nd_map = 0;
437 	nd->_nd_idx = 0;
438 }
439 
440 
441 int
442 nextdma_intr(arg)
443      void *arg;
444 {
445   /* @@@ This is bogus, we can't be certain of arg's type
446 	 * unless the interrupt is for us.  For now we successfully
447 	 * cheat because DMA interrupts are the only things invoked
448 	 * at this interrupt level.
449 	 */
450   struct nextdma_config *nd = arg;
451 
452   if (!INTR_OCCURRED(nd->nd_intr)) return 0;
453   /* Handle dma interrupts */
454 
455   DPRINTF(("DMA interrupt ipl (%ld) intr(0x%b)\n",
456           NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
457 
458 #ifdef DIAGNOSTIC
459 	if (!nd->_nd_map) {
460 		next_dma_print(nd);
461 		panic("DMA missing current map in interrupt!\n");
462 	}
463 #endif
464 
465   {
466     int state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
467 
468 #ifdef DIAGNOSTIC
469 		if ((!(state & DMACSR_COMPLETE)) || (state & DMACSR_SUPDATE)) {
470 			next_dma_print(nd);
471 			panic("DMA Unexpected dma state in interrupt (0x%b)",state,DMACSR_BITS);
472 		}
473 #endif
474 
475 		next_dma_finish_xfer(nd);
476 
477 		/* Check to see if we are expecting dma to shut down */
478 		if ((nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL)) {
479 
480 #ifdef DIAGNOSTIC
481 			if (state & DMACSR_ENABLE) {
482 				next_dma_print(nd);
483 				panic("DMA: unexpected DMA state at shutdown (0x%b)\n",
484 						state,DMACSR_BITS);
485 			}
486 #endif
487 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
488 					DMACSR_CLRCOMPLETE | DMACSR_RESET);
489 
490 			DPRINTF(("DMA: a normal and expected shutdown occurred\n"));
491 			if (nd->nd_shutdown_cb) (*nd->nd_shutdown_cb)(nd->nd_cb_arg);
492 
493 			return(1);
494 		}
495 
496 		next_dma_rotate(nd);
497 		next_dma_setup_cont_regs(nd);
498 
499 		{
500 			u_long dmadir;								/* 	DMACSR_SETREAD or DMACSR_SETWRITE */
501 
502 			if (state & DMACSR_READ) {
503 				dmadir = DMACSR_SETREAD;
504 			} else {
505 				dmadir = DMACSR_SETWRITE;
506 			}
507 
508 				/* we used to SETENABLE here only
509                                    conditionally, but we got burned
510                                    because DMA sometimes would shut
511                                    down between when we checked and
512                                    when we acted upon it.  CL19991211 */
513 			if ((nd->_nd_map_cont == NULL) && (nd->_nd_idx+1 == nd->_nd_map->dm_nsegs)) {
514 				bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
515 						  DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETENABLE);
516 			} else {
517 				bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
518 						  DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETSUPDATE | DMACSR_SETENABLE);
519 			}
520 
521 		}
522 
523 	}
524 
525   DPRINTF(("DMA exiting interrupt ipl (%ld) intr(0x%b)\n",
526           NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
527 
528   return(1);
529 }
530 
531 /*
532  * Check to see if dma has finished for a channel */
533 int
534 nextdma_finished(nd)
535 	struct nextdma_config *nd;
536 {
537 	int r;
538 	int s;
539 	s = spldma();									/* @@@ should this be splimp()? */
540 	r = (nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL);
541 	splx(s);
542 	return(r);
543 }
544 
545 void
546 nextdma_start(nd, dmadir)
547 	struct nextdma_config *nd;
548 	u_long dmadir;								/* 	DMACSR_SETREAD or DMACSR_SETWRITE */
549 {
550 
551 #ifdef DIAGNOSTIC
552 	if (!nextdma_finished(nd)) {
553 		panic("DMA trying to start before previous finished on intr(0x%b)\n",
554 				NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
555 	}
556 #endif
557 
558   DPRINTF(("DMA start (%ld) intr(0x%b)\n",
559           NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
560 
561 #ifdef DIAGNOSTIC
562 	if (nd->_nd_map) {
563 		next_dma_print(nd);
564 		panic("DMA: nextdma_start() with non null map\n");
565 	}
566 	if (nd->_nd_map_cont) {
567 		next_dma_print(nd);
568 		panic("DMA: nextdma_start() with non null continue map\n");
569 	}
570 #endif
571 
572 #ifdef DIAGNOSTIC
573 	if ((dmadir != DMACSR_SETREAD) && (dmadir != DMACSR_SETWRITE)) {
574 		panic("DMA: nextdma_start(), dmadir arg must be DMACSR_SETREAD or DMACSR_SETWRITE\n");
575 	}
576 #endif
577 
578 	/* preload both the current and the continue maps */
579 	next_dma_rotate(nd);
580 
581 #ifdef DIAGNOSTIC
582 	if (!nd->_nd_map_cont) {
583 		panic("No map available in nextdma_start()");
584 	}
585 #endif
586 
587 	next_dma_rotate(nd);
588 
589 	DPRINTF(("DMA initiating DMA %s of %d segments on intr(0x%b)\n",
590 			(dmadir == DMACSR_SETREAD ? "read" : "write"), nd->_nd_map->dm_nsegs,
591 			NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
592 
593 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
594 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
595 			DMACSR_INITBUF | DMACSR_RESET | dmadir);
596 
597 	next_dma_setup_curr_regs(nd);
598 	next_dma_setup_cont_regs(nd);
599 
600 #if (defined(ND_DEBUG))
601 	if (nextdma_debug) next_dma_print(nd);
602 #endif
603 
604 	if ((nd->_nd_map_cont == NULL) && (nd->_nd_idx+1 == nd->_nd_map->dm_nsegs)) {
605 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
606 				DMACSR_SETENABLE | dmadir);
607 	} else {
608 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
609 				DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir);
610 	}
611 }
612