xref: /netbsd-src/sys/arch/next68k/dev/nextdma.c (revision dc306354b0b29af51801a7632f1e95265a68cd81)
1 /*	$NetBSD: nextdma.c,v 1.9 1998/12/30 03:05:29 dbj Exp $	*/
2 /*
3  * Copyright (c) 1998 Darrin B. Jewell
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Darrin B. Jewell
17  * 4. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/mbuf.h>
35 #include <sys/syslog.h>
36 #include <sys/socket.h>
37 #include <sys/device.h>
38 #include <sys/malloc.h>
39 #include <sys/ioctl.h>
40 #include <sys/errno.h>
41 
42 #include <machine/autoconf.h>
43 #include <machine/cpu.h>
44 #include <machine/intr.h>
45 
46 #include <m68k/cacheops.h>
47 
48 #include <next68k/next68k/isr.h>
49 
50 #define _GENERIC_BUS_DMA_PRIVATE
51 #include <machine/bus.h>
52 
53 #include "nextdmareg.h"
54 #include "nextdmavar.h"
55 
56 #if 1
57 #define ND_DEBUG
58 #endif
59 
60 #if defined(ND_DEBUG)
61 int nextdma_debug = 0;
62 #define DPRINTF(x) if (nextdma_debug) printf x;
63 #else
64 #define DPRINTF(x)
65 #endif
66 
67   /* @@@ for debugging */
68 struct nextdma_config *debugernd;
69 struct nextdma_config *debugexnd;
70 
71 int nextdma_intr __P((void *));
72 void next_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
73                        bus_size_t, int));
74 int next_dma_continue __P((struct nextdma_config *));
75 void next_dma_rotate __P((struct nextdma_config *));
76 
77 void next_dma_setup_cont_regs __P((struct nextdma_config *));
78 void next_dma_setup_curr_regs __P((struct nextdma_config *));
79 
80 void next_dma_print __P((struct nextdma_config *));
81 
82 void
83 nextdma_config(nd)
84 	struct nextdma_config *nd;
85 {
86 	/* Initialize the dma_tag. As a hack, we currently
87 	 * put the dma tag in the structure itself.  It shouldn't be there.
88 	 */
89 
90 	{
91 		bus_dma_tag_t t;
92 		t = &nd->_nd_dmat;
93 		t->_cookie = nd;
94 		t->_get_tag = NULL;           /* lose */
95 		t->_dmamap_create = _bus_dmamap_create;
96 		t->_dmamap_destroy = _bus_dmamap_destroy;
97 		t->_dmamap_load = _bus_dmamap_load_direct;
98 		t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct;
99 		t->_dmamap_load_uio = _bus_dmamap_load_uio_direct;
100 		t->_dmamap_load_raw = _bus_dmamap_load_raw_direct;
101 		t->_dmamap_unload = _bus_dmamap_unload;
102 		t->_dmamap_sync = next_dmamap_sync;
103 
104 		t->_dmamem_alloc = _bus_dmamem_alloc;
105 		t->_dmamem_free = _bus_dmamem_free;
106 		t->_dmamem_map = _bus_dmamem_map;
107 		t->_dmamem_unmap = _bus_dmamem_unmap;
108 		t->_dmamem_mmap = _bus_dmamem_mmap;
109 
110 		nd->nd_dmat = t;
111 	}
112 
113   /* @@@ for debugging */
114 	if (nd->nd_intr == NEXT_I_ENETR_DMA) {
115 		debugernd = nd;
116 	}
117 	if (nd->nd_intr == NEXT_I_ENETX_DMA) {
118 		debugexnd = nd;
119 	}
120 
121 	nextdma_init(nd);
122 
123   isrlink_autovec(nextdma_intr, nd, NEXT_I_IPL(nd->nd_intr), 10);
124   INTR_ENABLE(nd->nd_intr);
125 }
126 
127 void
128 nextdma_init(nd)
129 	struct nextdma_config *nd;
130 {
131   DPRINTF(("DMA init ipl (%ld) intr(0x%b)\n",
132 			NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
133 
134 	/* @@@ should probably check and free these maps */
135 	nd->_nd_map = NULL;
136 	nd->_nd_idx = 0;
137 	nd->_nd_map_cont = NULL;
138 	nd->_nd_idx_cont = 0;
139 
140 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
141 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
142 			DMACSR_INITBUF | DMACSR_CLRCOMPLETE | DMACSR_RESET);
143 
144 	next_dma_setup_curr_regs(nd);
145 	next_dma_setup_cont_regs(nd);
146 
147 #if 0 && defined(DIAGNOSTIC)
148 	/* Today, my computer (mourning) appears to fail this test.
149 	 * yesterday, another NeXT (milo) didn't have this problem
150 	 * Darrin B. Jewell <jewell@mit.edu>  Mon May 25 07:53:05 1998
151 	 */
152 	{
153 		u_long state;
154 		state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
155 		state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
156     state &= (DMACSR_BUSEXC | DMACSR_COMPLETE |
157               DMACSR_SUPDATE | DMACSR_ENABLE);
158 
159 		if (state) {
160 			next_dma_print(nd);
161 			panic("DMA did not reset\n");
162 		}
163 	}
164 #endif
165 }
166 
167 
168 void
169 nextdma_reset(nd)
170 	struct nextdma_config *nd;
171 {
172 	int s;
173 	s = spldma();									/* @@@ should this be splimp()? */
174 
175 	DPRINTF(("DMA reset\n"));
176 
177 #if (defined(ND_DEBUG))
178 	if (nextdma_debug) next_dma_print(nd);
179 #endif
180 
181 	nextdma_init(nd);
182 	splx(s);
183 }
184 
185 /****************************************************************/
186 
187 /* If the next had multiple busses, this should probably
188  * go elsewhere, but it is here anyway */
189 void
190 next_dmamap_sync(t, map, offset, len, ops)
191      bus_dma_tag_t t;
192      bus_dmamap_t map;
193      bus_addr_t offset;
194      bus_size_t len;
195      int ops;
196 {
197 	/* flush/purge the cache.
198 	 * assumes pointers are aligned
199 	 * @@@ should probably be fixed to use offset and len args.
200 	 * should also optimize this to work on pages for larger regions?
201 	 */
202 	if (ops & BUS_DMASYNC_PREWRITE) {
203 		int i;
204 		for(i=0;i<map->dm_nsegs;i++) {
205 			bus_addr_t p = map->dm_segs[i].ds_addr;
206 			bus_addr_t e = p+map->dm_segs[i].ds_len;
207 			while(p<e) {
208 				DCFL(p);								/* flush */
209 				p += 16;								/* cache line length */
210 			}
211 		}
212 	}
213 
214 	if (ops & BUS_DMASYNC_POSTREAD) {
215 		int i;
216 		for(i=0;i<map->dm_nsegs;i++) {
217 			bus_addr_t p = map->dm_segs[i].ds_addr;
218 			bus_addr_t e = p+map->dm_segs[i].ds_len;
219 			while(p<e) {
220 				DCPL(p);								/* purge */
221 				p += 16;								/* cache line length */
222 			}
223 		}
224 	}
225 }
226 
227 /****************************************************************/
228 
229 
230 /* Call the completed and continue callbacks to try to fill
231  * in the dma continue buffers.
232  */
233 void
234 next_dma_rotate(nd)
235 	struct nextdma_config *nd;
236 {
237 
238 	DPRINTF(("DMA next_dma_rotate()\n"));
239 
240 	/* If we've reached the end of the current map, then inform
241 	 * that we've completed that map.
242 	 */
243 	if (nd->_nd_map && ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs)) {
244 		if (nd->nd_completed_cb)
245 			(*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
246 	}
247 
248 	/* Rotate the continue map into the current map */
249 	nd->_nd_map = nd->_nd_map_cont;
250 	nd->_nd_idx = nd->_nd_idx_cont;
251 
252 	if ((!nd->_nd_map_cont) ||
253 			((nd->_nd_map_cont) &&
254 					(++nd->_nd_idx_cont >= nd->_nd_map_cont->dm_nsegs))) {
255 		if (nd->nd_continue_cb) {
256 			nd->_nd_map_cont = (*nd->nd_continue_cb)(nd->nd_cb_arg);
257 		} else {
258 			nd->_nd_map_cont = 0;
259 		}
260 		nd->_nd_idx_cont = 0;
261 	}
262 
263 #ifdef DIAGNOSTIC
264 	if (nd->_nd_map_cont) {
265 		if (!DMA_BEGINALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx].ds_addr)) {
266 			panic("DMA request unaligned at start\n");
267 		}
268 		if (!DMA_ENDALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx].ds_addr +
269 				nd->_nd_map_cont->dm_segs[nd->_nd_idx].ds_len)) {
270 			panic("DMA request unaligned at end\n");
271 		}
272 	}
273 #endif
274 
275 }
276 
277 void
278 next_dma_setup_cont_regs(nd)
279 	struct nextdma_config *nd;
280 {
281 	DPRINTF(("DMA next_dma_setup_regs()\n"));
282 
283 	if (nd->_nd_map_cont) {
284 
285 		if (nd->nd_intr == NEXT_I_ENETX_DMA) {
286 			/* Ethernet transmit needs secret magic */
287 
288 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START,
289 					nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
290 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP,
291 					((nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
292 							nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len)
293 							+ 0x0) | 0x80000000);
294 		} else {
295 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START,
296 					nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
297 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP,
298 					nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr +
299 					nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
300 		}
301 
302 	} else {
303 
304 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START, 0xdeadbeef);
305 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP, 0xdeadbeef);
306 	}
307 
308 #if 0
309 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START,
310 			bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START));
311 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP,
312 			bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP));
313 #else
314 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START, 0xfeedbeef);
315 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP, 0xfeedbeef);
316 #endif
317 
318 }
319 
320 void
321 next_dma_setup_curr_regs(nd)
322 	struct nextdma_config *nd;
323 {
324 	DPRINTF(("DMA next_dma_setup_curr_regs()\n"));
325 
326 	if (nd->nd_intr == NEXT_I_ENETX_DMA) {
327 			/* Ethernet transmit needs secret magic */
328 
329 		if (nd->_nd_map) {
330 
331 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF,
332 					nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
333 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT,
334 					((nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
335 							nd->_nd_map->dm_segs[nd->_nd_idx].ds_len)
336 							+ 0x0) | 0x80000000);
337 		} else {
338 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF,0xdeadbeef);
339 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, 0xdeadbeef);
340 
341 		}
342 
343 #if 0
344 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT,
345 				bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF));
346 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT,
347 				bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT));
348 #else
349 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT, 0xfeedbeef);
350 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT, 0xfeedbeef);
351 #endif
352 
353 	} else {
354 
355 		if (nd->_nd_map) {
356 
357 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF,
358 					nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
359 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT,
360 					nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
361 					nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
362 		} else {
363 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF, 0xdeadbeef);
364 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, 0xdeadbeef);
365 
366 		}
367 
368 #if 0
369 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT,
370 				bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF));
371 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT,
372 				bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT));
373 #else
374 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT, 0xfeedbeef);
375 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT, 0xfeedbeef);
376 #endif
377 
378 	}
379 
380 }
381 
382 
383 /* This routine is used for debugging */
384 
385 void
386 next_dma_print(nd)
387 	struct nextdma_config *nd;
388 {
389 	u_long dd_csr;
390 	u_long dd_next;
391 	u_long dd_next_initbuf;
392 	u_long dd_limit;
393 	u_long dd_start;
394 	u_long dd_stop;
395 	u_long dd_saved_next;
396 	u_long dd_saved_limit;
397 	u_long dd_saved_start;
398 	u_long dd_saved_stop;
399 
400   /* Read all of the registers before we print anything out,
401 	 * in case something changes
402 	 */
403 	dd_csr          = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
404 	dd_next         = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
405 	dd_next_initbuf = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF);
406 	dd_limit        = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
407 	dd_start        = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START);
408 	dd_stop         = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP);
409 	dd_saved_next   = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
410 	dd_saved_limit  = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
411 	dd_saved_start  = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START);
412 	dd_saved_stop   = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP);
413 
414 	if (nd->_nd_map) {
415 		printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_addr = 0x%08lx\n",
416 				nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr);
417 		printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_len = %d\n",
418 				nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_len);
419 	} else {
420 		printf("NDMAP: nd->_nd_map = NULL\n");
421 	}
422 	if (nd->_nd_map_cont) {
423 		printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n",
424 				nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr);
425 		printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_len = %d\n",
426 				nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len);
427 	} else {
428 		printf("NDMAP: nd->_nd_map_cont = NULL\n");
429 	}
430 
431 	printf("NDMAP: dd->dd_csr          = 0x%b\n",   dd_csr,   DMACSR_BITS);
432 	printf("NDMAP: dd->dd_saved_next   = 0x%08x\n", dd_saved_next);
433 	printf("NDMAP: dd->dd_saved_limit  = 0x%08x\n", dd_saved_limit);
434 	printf("NDMAP: dd->dd_saved_start  = 0x%08x\n", dd_saved_start);
435 	printf("NDMAP: dd->dd_saved_stop   = 0x%08x\n", dd_saved_stop);
436 	printf("NDMAP: dd->dd_next         = 0x%08x\n", dd_next);
437 	printf("NDMAP: dd->dd_next_initbuf = 0x%08x\n", dd_next_initbuf);
438 	printf("NDMAP: dd->dd_limit        = 0x%08x\n", dd_limit);
439 	printf("NDMAP: dd->dd_start        = 0x%08x\n", dd_start);
440 	printf("NDMAP: dd->dd_stop         = 0x%08x\n", dd_stop);
441 
442 	printf("NDMAP: interrupt ipl (%ld) intr(0x%b)\n",
443 			NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
444 }
445 
446 /****************************************************************/
447 
448 int
449 nextdma_intr(arg)
450      void *arg;
451 {
452   struct nextdma_config *nd = arg;
453 
454   /* @@@ This is bogus, we can't be certain of arg's type
455 	 * unless the interrupt is for us
456 	 */
457 
458   if (!INTR_OCCURRED(nd->nd_intr)) return 0;
459   /* Handle dma interrupts */
460 
461 #ifdef DIAGNOSTIC
462 	if (nd->nd_intr == NEXT_I_ENETR_DMA) {
463 		if (debugernd != nd) {
464 			panic("DMA incorrect handling of rx nd->nd_intr");
465 		}
466 	}
467 	if (nd->nd_intr == NEXT_I_ENETX_DMA) {
468 		if (debugexnd != nd) {
469 			panic("DMA incorrect handling of tx nd->nd_intr");
470 		}
471 	}
472 #endif
473 
474   DPRINTF(("DMA interrupt ipl (%ld) intr(0x%b)\n",
475           NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
476 
477 #ifdef DIAGNOSTIC
478 	if (!nd->_nd_map) {
479 		next_dma_print(nd);
480 		panic("DMA missing current map in interrupt!\n");
481 	}
482 #endif
483 
484   {
485     int state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR);
486 
487 #ifdef DIAGNOSTIC
488 		if (!(state & DMACSR_COMPLETE)) {
489 			next_dma_print(nd);
490 			printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
491 			panic("DMA  ipl (%ld) intr(0x%b), DMACSR_COMPLETE not set in intr\n",
492 					NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
493 		}
494 #endif
495 
496 #if 0 /* This bit gets set sometimes & I don't know why. */
497 #ifdef DIAGNOSTIC
498 		if (state & DMACSR_BUSEXC) {
499 			next_dma_print(nd);
500 			printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
501 			panic("DMA  ipl (%ld) intr(0x%b), DMACSR_COMPLETE not set in intr\n",
502 					NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
503 		}
504 #endif
505 #endif
506 
507 		/* Check to see if we are expecting dma to shut down */
508 		if (!nd->_nd_map_cont) {
509 
510 #ifdef DIAGNOSTIC
511 			if (state & (DMACSR_SUPDATE|DMACSR_ENABLE)) {
512 				next_dma_print(nd);
513 				panic("unexpected bits set in DMA state at shutdown (0x%b)\n", state,DMACSR_BITS);
514 			}
515 #endif
516 
517 #ifdef DIAGNOSTIC
518 #if 0 /* Sometimes the DMA registers have totally bogus values when read.
519 			 * Until that's understood, we skip this check
520 			 */
521 
522 			/* Verify that the registers are laid out as expected */
523 			{
524 				bus_addr_t next;
525 				bus_addr_t limit;
526 				bus_addr_t expected_limit;
527 				expected_limit =
528 						nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr +
529 						nd->_nd_map->dm_segs[nd->_nd_idx].ds_len;
530 
531 				if (nd->nd_intr == NEXT_I_ENETX_DMA) {
532 					next  = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF);
533 					limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT) & ~0x80000000;
534 				} else {
535 					next  = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT);
536 					limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT);
537 				}
538 
539 				if ((next != limit) || (limit != expected_limit)) {
540 					next_dma_print(nd);
541 					printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
542 					panic("unexpected DMA limit at shutdown 0x%08x, 0x%08x, 0x%08x",
543 							next,limit,expected_limit);
544 				}
545 			}
546 #endif
547 #endif
548 
549 			if ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs) {
550 				if (nd->nd_completed_cb)
551 					(*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg);
552 			}
553 			nd->_nd_map = 0;
554 			nd->_nd_idx = 0;
555 
556 			bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
557 					DMACSR_CLRCOMPLETE | DMACSR_RESET);
558 
559 			DPRINTF(("DMA: a normal and expected shutdown occurred\n"));
560 			if (nd->nd_shutdown_cb) (*nd->nd_shutdown_cb)(nd->nd_cb_arg);
561 
562 			return(1);
563 		}
564 
565 #if 0
566 #ifdef DIAGNOSTIC
567 		if (!(state & DMACSR_SUPDATE)) {
568 			next_dma_print(nd);
569 			printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
570 			panic("SUPDATE not set with continuing DMA");
571 		}
572 #endif
573 #endif
574 
575 		/* Check that the buffer we are interrupted for is the one we expect.
576 		 * Shorten the buffer if the dma completed with a short buffer
577 		 */
578 		{
579 			bus_addr_t next;
580 			bus_addr_t limit;
581 			bus_addr_t expected_next;
582 			bus_addr_t expected_limit;
583 
584 			expected_next = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr;
585 			expected_limit = expected_next + nd->_nd_map->dm_segs[nd->_nd_idx].ds_len;
586 
587 #if 0 /* for some unknown reason, somtimes DD_SAVED_NEXT has value from
588 			 * nd->_nd_map and sometimes it has value from nd->_nd_map_cont.
589 			 * Somtimes, it has a completely different unknown value.
590 			 * Until that's understood, we won't sanity check the expected_next value.
591 			 */
592 			next  = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT);
593 #else
594 			next  = expected_next;
595 #endif
596 			limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT);
597 
598 			if (nd->nd_intr == NEXT_I_ENETX_DMA) {
599 				limit &= ~0x80000000;
600 			}
601 
602 			if ((limit-next < 0) ||
603 					(limit-next >= expected_limit-expected_next)) {
604 #ifdef DIAGNOSTIC
605 #if 0 /* Sometimes, (under load I think) even DD_SAVED_LIMIT has
606 			 * a bogus value.  Until that's understood, we don't panic
607 			 * here.
608 			 */
609 				next_dma_print(nd);
610 				printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
611 				panic("Unexpected saved registers values.");
612 #endif
613 #endif
614 			} else {
615 				/* Set the length of the segment to match actual length.
616 				 * @@@ is it okay to resize dma segments here?
617 				 * i should probably ask jason about this.
618 				 */
619 				nd->_nd_map->dm_segs[nd->_nd_idx].ds_len = limit-next;
620 				expected_limit = expected_next + nd->_nd_map->dm_segs[nd->_nd_idx].ds_len;
621 			}
622 
623 #if 0 /* these checks are turned off until the above mentioned weirdness is fixed. */
624 #ifdef DIAGNOSTIC
625 			if (next != expected_next) {
626 				next_dma_print(nd);
627 				printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
628 				panic("unexpected DMA next buffer in interrupt (found 0x%08x, expected 0x%08x)",
629 						next,expected_next);
630 			}
631 			if (limit != expected_limit) {
632 				next_dma_print(nd);
633 				printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS);
634 				panic("unexpected DMA limit buffer in interrupt (found 0x%08x, expected 0x%08x)",
635 						limit,expected_limit);
636 			}
637 #endif
638 #endif
639 		}
640 
641 		next_dma_rotate(nd);
642 		next_dma_setup_cont_regs(nd);
643 
644 		if (!(state & DMACSR_ENABLE)) {
645 			DPRINTF(("Unexpected DMA shutdownn, restarting."));
646 
647 			if (nd->_nd_map_cont) {
648 				bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
649 						DMACSR_SETSUPDATE | DMACSR_SETENABLE | nd->_nd_dmadir);
650 			} else {
651 				bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
652 						DMACSR_SETENABLE | nd->_nd_dmadir);
653 			}
654 
655 		} else {
656 
657 			if (nd->_nd_map_cont) {
658 				bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
659 						DMACSR_SETSUPDATE | DMACSR_CLRCOMPLETE | nd->_nd_dmadir);
660 			} else {
661 				bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
662 						DMACSR_CLRCOMPLETE | nd->_nd_dmadir);
663 			}
664 		}
665 
666 	}
667 
668   DPRINTF(("DMA exiting interrupt ipl (%ld) intr(0x%b)\n",
669           NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
670 
671   return(1);
672 }
673 
674 /*
675  * Check to see if dma has finished for a channel */
676 int
677 nextdma_finished(nd)
678 	struct nextdma_config *nd;
679 {
680 	int r;
681 	int s;
682 	s = spldma();									/* @@@ should this be splimp()? */
683 	r = (nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL);
684 	splx(s);
685 	return(r);
686 }
687 
688 void
689 nextdma_start(nd, dmadir)
690 	struct nextdma_config *nd;
691 	u_long dmadir;								/* 	DMACSR_READ or DMACSR_WRITE */
692 {
693 
694 #ifdef DIAGNOSTIC
695 	if (!nextdma_finished(nd)) {
696 		panic("DMA trying to start before previous finished on intr(0x%b)\n",
697 				NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS);
698 	}
699 #endif
700 
701   DPRINTF(("DMA start (%ld) intr(0x%b)\n",
702           NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
703 
704 #ifdef DIAGNOSTIC
705 	if (nd->_nd_map) {
706 		next_dma_print(nd);
707 		panic("DMA: nextdma_start() with non null map\n");
708 	}
709 	if (nd->_nd_map_cont) {
710 		next_dma_print(nd);
711 		panic("DMA: nextdma_start() with non null continue map\n");
712 	}
713 #endif
714 
715 #ifdef DIAGNOSTIC
716 	if ((dmadir != DMACSR_READ) && (dmadir != DMACSR_WRITE)) {
717 		panic("DMA: nextdma_start(), dmadir arg must be DMACSR_READ or DMACSR_WRITE\n");
718 	}
719 #endif
720 
721 	nd->_nd_dmadir = dmadir;
722 
723 	/* preload both the current and the continue maps */
724 	next_dma_rotate(nd);
725 
726 #ifdef DIAGNOSTIC
727 	if (!nd->_nd_map_cont) {
728 		panic("No map available in nextdma_start()");
729 	}
730 #endif
731 
732 	next_dma_rotate(nd);
733 
734 	DPRINTF(("DMA initiating DMA %s of %d segments on intr(0x%b)\n",
735 			(nd->_nd_dmadir == DMACSR_READ ? "read" : "write"), nd->_nd_map->dm_nsegs,
736 			NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS));
737 
738 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0);
739 	bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
740 			DMACSR_INITBUF | DMACSR_RESET | nd->_nd_dmadir);
741 
742 	next_dma_setup_curr_regs(nd);
743 	next_dma_setup_cont_regs(nd);
744 
745 #if (defined(ND_DEBUG))
746 	if (nextdma_debug) next_dma_print(nd);
747 #endif
748 
749 	if (nd->_nd_map_cont) {
750 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
751 				DMACSR_SETSUPDATE | DMACSR_SETENABLE | nd->_nd_dmadir);
752 	} else {
753 		bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR,
754 				DMACSR_SETENABLE | nd->_nd_dmadir);
755 	}
756 
757 }
758