xref: /netbsd-src/sys/arch/hp300/dev/dma.c (revision 1ffa7b76c40339c17a0fb2a09fac93f287cfc046)
1 /*	$NetBSD: dma.c,v 1.29 2003/04/01 20:41:36 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the NetBSD
21  *	Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Copyright (c) 1982, 1990, 1993
41  *	The Regents of the University of California.  All rights reserved.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. All advertising materials mentioning features or use of this software
52  *    must display the following acknowledgement:
53  *	This product includes software developed by the University of
54  *	California, Berkeley and its contributors.
55  * 4. Neither the name of the University nor the names of its contributors
56  *    may be used to endorse or promote products derived from this software
57  *    without specific prior written permission.
58  *
59  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69  * SUCH DAMAGE.
70  *
71  *	@(#)dma.c	8.1 (Berkeley) 6/10/93
72  */
73 
74 /*
75  * DMA driver
76  */
77 
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: dma.c,v 1.29 2003/04/01 20:41:36 thorpej Exp $");
80 
81 #include <machine/hp300spu.h>	/* XXX param.h includes cpu.h */
82 
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/callout.h>
86 #include <sys/device.h>
87 #include <sys/kernel.h>
88 #include <sys/proc.h>
89 
90 #include <uvm/uvm_extern.h>
91 
92 #include <machine/bus.h>
93 
94 #include <m68k/cacheops.h>
95 
96 #include <hp300/dev/intiovar.h>
97 #include <hp300/dev/dmareg.h>
98 #include <hp300/dev/dmavar.h>
99 
100 /*
101  * The largest single request will be MAXPHYS bytes which will require
102  * at most MAXPHYS/PAGE_SIZE+1 chain elements to describe, i.e. if none of
103  * the buffer pages are physically contiguous (MAXPHYS/PAGE_SIZE) and the
104  * buffer is not page aligned (+1).
105  */
106 #define	DMAMAXIO	(MAXPHYS/PAGE_SIZE+1)
107 
108 struct dma_chain {
109 	int	dc_count;
110 	char	*dc_addr;
111 };
112 
113 struct dma_channel {
114 	struct	dmaqueue *dm_job;		/* current job */
115 	struct	dmadevice *dm_hwaddr;		/* registers if DMA_C */
116 	struct	dmaBdevice *dm_Bhwaddr;		/* registers if not DMA_C */
117 	char	dm_flags;			/* misc. flags */
118 	u_short	dm_cmd;				/* DMA controller command */
119 	int	dm_cur;				/* current segment */
120 	int	dm_last;			/* last segment */
121 	struct	dma_chain dm_chain[DMAMAXIO];	/* all segments */
122 };
123 
124 struct dma_softc {
125 	struct  device sc_dev;
126 	bus_space_tag_t sc_bst;
127 	bus_space_handle_t sc_bsh;
128 
129 	struct	dmareg *sc_dmareg;		/* pointer to our hardware */
130 	struct	dma_channel sc_chan[NDMACHAN];	/* 2 channels */
131 	TAILQ_HEAD(, dmaqueue) sc_queue;	/* job queue */
132 	struct	callout sc_debug_ch;
133 	char	sc_type;			/* A, B, or C */
134 	int	sc_ipl;				/* our interrupt level */
135 	void	*sc_ih;				/* interrupt cookie */
136 };
137 
138 /* types */
139 #define	DMA_B	0
140 #define DMA_C	1
141 
142 /* flags */
143 #define DMAF_PCFLUSH	0x01
144 #define DMAF_VCFLUSH	0x02
145 #define DMAF_NOINTR	0x04
146 
147 int	dmamatch(struct device *, struct cfdata *, void *);
148 void	dmaattach(struct device *, struct device *, void *);
149 
150 CFATTACH_DECL(dma, sizeof(struct dma_softc),
151     dmamatch, dmaattach, NULL, NULL);
152 
153 int	dmaintr __P((void *));
154 
155 #ifdef DEBUG
156 int	dmadebug = 0;
157 #define DDB_WORD	0x01	/* same as DMAGO_WORD */
158 #define DDB_LWORD	0x02	/* same as DMAGO_LWORD */
159 #define	DDB_FOLLOW	0x04
160 #define DDB_IO		0x08
161 
162 void	dmatimeout __P((void *));
163 int	dmatimo[NDMACHAN];
164 
165 long	dmahits[NDMACHAN];
166 long	dmamisses[NDMACHAN];
167 long	dmabyte[NDMACHAN];
168 long	dmaword[NDMACHAN];
169 long	dmalword[NDMACHAN];
170 #endif
171 
172 static struct dma_softc *dma_softc;
173 
174 int
175 dmamatch(parent, match, aux)
176 	struct device *parent;
177 	struct cfdata *match;
178 	void *aux;
179 {
180 	struct intio_attach_args *ia = aux;
181 	static int dmafound = 0;                /* can only have one */
182 
183 	if (strcmp("dma", ia->ia_modname) != 0 || dmafound)
184 		return (0);
185 
186 	dmafound = 1;
187 	return (1);
188 }
189 
190 
191 
192 void
193 dmaattach(parent, self, aux)
194 	struct device *parent, *self;
195 	void *aux;
196 {
197 	struct dma_softc *sc = (struct dma_softc *)self;
198 	struct intio_attach_args *ia = aux;
199 	struct dma_channel *dc;
200 	struct dmareg *dma;
201 	int i;
202 	char rev;
203 
204 	/* There's just one. */
205 	dma_softc = sc;
206 
207 	sc->sc_bst = ia->ia_bst;
208 	if (bus_space_map(sc->sc_bst, ia->ia_iobase, INTIO_DEVSIZE, 0,
209 	     &sc->sc_bsh)) {
210 		printf("%s: can't map registers\n", sc->sc_dev.dv_xname);
211 		return;
212 	}
213 
214 	dma = (struct dmareg *)bus_space_vaddr(sc->sc_bst, sc->sc_bsh);
215 	sc->sc_dmareg = dma;
216 
217 	/*
218 	 * Determine the DMA type.  A DMA_A or DMA_B will fail the
219 	 * following probe.
220 	 *
221 	 * XXX Don't know how to easily differentiate the A and B cards,
222 	 * so we just hope nobody has an A card (A cards will work if
223 	 * splbio works out to ipl 3).
224 	 */
225 	if (badbaddr((char *)&dma->dma_id[2])) {
226 		rev = 'B';
227 #if !defined(HP320)
228 		panic("dmainit: DMA card requires hp320 support");
229 #endif
230 	} else
231 		rev = dma->dma_id[2];
232 
233 	sc->sc_type = (rev == 'B') ? DMA_B : DMA_C;
234 
235 	TAILQ_INIT(&sc->sc_queue);
236 	callout_init(&sc->sc_debug_ch);
237 
238 	for (i = 0; i < NDMACHAN; i++) {
239 		dc = &sc->sc_chan[i];
240 		dc->dm_job = NULL;
241 		switch (i) {
242 		case 0:
243 			dc->dm_hwaddr = &dma->dma_chan0;
244 			dc->dm_Bhwaddr = &dma->dma_Bchan0;
245 			break;
246 
247 		case 1:
248 			dc->dm_hwaddr = &dma->dma_chan1;
249 			dc->dm_Bhwaddr = &dma->dma_Bchan1;
250 			break;
251 
252 		default:
253 			panic("dmainit: more than 2 channels?");
254 			/* NOTREACHED */
255 		}
256 	}
257 
258 #ifdef DEBUG
259 	/* make sure timeout is really not needed */
260 	callout_reset(&sc->sc_debug_ch, 30 * hz, dmatimeout, sc);
261 #endif
262 
263 	printf(": 98620%c, 2 channels, %d-bit DMA\n",
264 	    rev, (rev == 'B') ? 16 : 32);
265 
266 	/*
267 	 * Defer hooking up our interrupt until the first
268 	 * DMA-using controller has hooked up theirs.
269 	 */
270 	sc->sc_ih = NULL;
271 }
272 
273 /*
274  * Compute the ipl and (re)establish the interrupt handler
275  * for the DMA controller.
276  */
277 void
278 dmacomputeipl()
279 {
280 	struct dma_softc *sc = dma_softc;
281 
282 	if (sc->sc_ih != NULL)
283 		intr_disestablish(sc->sc_ih);
284 
285 	/*
286 	 * Our interrupt level must be as high as the highest
287 	 * device using DMA (i.e. splbio).
288 	 */
289 	sc->sc_ipl = PSLTOIPL(hp300_ipls[HP300_IPL_BIO]);
290 	sc->sc_ih = intr_establish(dmaintr, sc, sc->sc_ipl, IPL_BIO);
291 }
292 
293 int
294 dmareq(dq)
295 	struct dmaqueue *dq;
296 {
297 	struct dma_softc *sc = dma_softc;
298 	int i, chan, s;
299 
300 #if 1
301 	s = splhigh();	/* XXXthorpej */
302 #else
303 	s = splbio();
304 #endif
305 
306 	chan = dq->dq_chan;
307 	for (i = NDMACHAN - 1; i >= 0; i--) {
308 		/*
309 		 * Can we use this channel?
310 		 */
311 		if ((chan & (1 << i)) == 0)
312 			continue;
313 
314 		/*
315 		 * We can use it; is it busy?
316 		 */
317 		if (sc->sc_chan[i].dm_job != NULL)
318 			continue;
319 
320 		/*
321 		 * Not busy; give the caller this channel.
322 		 */
323 		sc->sc_chan[i].dm_job = dq;
324 		dq->dq_chan = i;
325 		splx(s);
326 		return (1);
327 	}
328 
329 	/*
330 	 * Couldn't get a channel now; put this in the queue.
331 	 */
332 	TAILQ_INSERT_TAIL(&sc->sc_queue, dq, dq_list);
333 	splx(s);
334 	return (0);
335 }
336 
337 void
338 dmafree(dq)
339 	struct dmaqueue *dq;
340 {
341 	int unit = dq->dq_chan;
342 	struct dma_softc *sc = dma_softc;
343 	struct dma_channel *dc = &sc->sc_chan[unit];
344 	struct dmaqueue *dn;
345 	int chan, s;
346 
347 #if 1
348 	s = splhigh();	/* XXXthorpej */
349 #else
350 	s = splbio();
351 #endif
352 
353 #ifdef DEBUG
354 	dmatimo[unit] = 0;
355 #endif
356 
357 	DMA_CLEAR(dc);
358 
359 #if defined(CACHE_HAVE_PAC) || defined(M68040)
360 	/*
361 	 * XXX we may not always go thru the flush code in dmastop()
362 	 */
363 	if (dc->dm_flags & DMAF_PCFLUSH) {
364 		PCIA();
365 		dc->dm_flags &= ~DMAF_PCFLUSH;
366 	}
367 #endif
368 
369 #if defined(CACHE_HAVE_VAC)
370 	if (dc->dm_flags & DMAF_VCFLUSH) {
371 		/*
372 		 * 320/350s have VACs that may also need flushing.
373 		 * In our case we only flush the supervisor side
374 		 * because we know that if we are DMAing to user
375 		 * space, the physical pages will also be mapped
376 		 * in kernel space (via vmapbuf) and hence cache-
377 		 * inhibited by the pmap module due to the multiple
378 		 * mapping.
379 		 */
380 		DCIS();
381 		dc->dm_flags &= ~DMAF_VCFLUSH;
382 	}
383 #endif
384 
385 	/*
386 	 * Channel is now free.  Look for another job to run on this
387 	 * channel.
388 	 */
389 	dc->dm_job = NULL;
390 	chan = 1 << unit;
391 	for (dn = sc->sc_queue.tqh_first; dn != NULL;
392 	    dn = dn->dq_list.tqe_next) {
393 		if (dn->dq_chan & chan) {
394 			/* Found one... */
395 			TAILQ_REMOVE(&sc->sc_queue, dn, dq_list);
396 			dc->dm_job = dn;
397 			dn->dq_chan = dq->dq_chan;
398 			splx(s);
399 
400 			/* Start the initiator. */
401 			(*dn->dq_start)(dn->dq_softc);
402 			return;
403 		}
404 	}
405 	splx(s);
406 }
407 
408 void
409 dmago(unit, addr, count, flags)
410 	int unit;
411 	char *addr;
412 	int count;
413 	int flags;
414 {
415 	struct dma_softc *sc = dma_softc;
416 	struct dma_channel *dc = &sc->sc_chan[unit];
417 	char *dmaend = NULL;
418 	int seg, tcount;
419 
420 	if (count > MAXPHYS)
421 		panic("dmago: count > MAXPHYS");
422 
423 #if defined(HP320)
424 	if (sc->sc_type == DMA_B && (flags & DMAGO_LWORD))
425 		panic("dmago: no can do 32-bit DMA");
426 #endif
427 
428 #ifdef DEBUG
429 	if (dmadebug & DDB_FOLLOW)
430 		printf("dmago(%d, %p, %x, %x)\n",
431 		       unit, addr, count, flags);
432 	if (flags & DMAGO_LWORD)
433 		dmalword[unit]++;
434 	else if (flags & DMAGO_WORD)
435 		dmaword[unit]++;
436 	else
437 		dmabyte[unit]++;
438 #endif
439 	/*
440 	 * Build the DMA chain
441 	 */
442 	for (seg = 0; count > 0; seg++) {
443 		dc->dm_chain[seg].dc_addr = (char *) kvtop(addr);
444 #if defined(M68040)
445 		/*
446 		 * Push back dirty cache lines
447 		 */
448 		if (mmutype == MMU_68040)
449 			DCFP((paddr_t)dc->dm_chain[seg].dc_addr);
450 #endif
451 		if (count < (tcount = PAGE_SIZE - ((int)addr & PGOFSET)))
452 			tcount = count;
453 		dc->dm_chain[seg].dc_count = tcount;
454 		addr += tcount;
455 		count -= tcount;
456 		if (flags & DMAGO_LWORD)
457 			tcount >>= 2;
458 		else if (flags & DMAGO_WORD)
459 			tcount >>= 1;
460 
461 		/*
462 		 * Try to compact the DMA transfer if the pages are adjacent.
463 		 * Note: this will never happen on the first iteration.
464 		 */
465 		if (dc->dm_chain[seg].dc_addr == dmaend
466 #if defined(HP320)
467 		    /* only 16-bit count on 98620B */
468 		    && (sc->sc_type != DMA_B ||
469 			dc->dm_chain[seg - 1].dc_count + tcount <= 65536)
470 #endif
471 		) {
472 #ifdef DEBUG
473 			dmahits[unit]++;
474 #endif
475 			dmaend += dc->dm_chain[seg].dc_count;
476 			dc->dm_chain[--seg].dc_count += tcount;
477 		} else {
478 #ifdef DEBUG
479 			dmamisses[unit]++;
480 #endif
481 			dmaend = dc->dm_chain[seg].dc_addr +
482 			    dc->dm_chain[seg].dc_count;
483 			dc->dm_chain[seg].dc_count = tcount;
484 		}
485 	}
486 	dc->dm_cur = 0;
487 	dc->dm_last = --seg;
488 	dc->dm_flags = 0;
489 	/*
490 	 * Set up the command word based on flags
491 	 */
492 	dc->dm_cmd = DMA_ENAB | DMA_IPL(sc->sc_ipl) | DMA_START;
493 	if ((flags & DMAGO_READ) == 0)
494 		dc->dm_cmd |= DMA_WRT;
495 	if (flags & DMAGO_LWORD)
496 		dc->dm_cmd |= DMA_LWORD;
497 	else if (flags & DMAGO_WORD)
498 		dc->dm_cmd |= DMA_WORD;
499 	if (flags & DMAGO_PRI)
500 		dc->dm_cmd |= DMA_PRI;
501 
502 #if defined(M68040)
503 	/*
504 	 * On the 68040 we need to flush (push) the data cache before a
505 	 * DMA (already done above) and flush again after DMA completes.
506 	 * In theory we should only need to flush prior to a write DMA
507 	 * and purge after a read DMA but if the entire page is not
508 	 * involved in the DMA we might purge some valid data.
509 	 */
510 	if (mmutype == MMU_68040 && (flags & DMAGO_READ))
511 		dc->dm_flags |= DMAF_PCFLUSH;
512 #endif
513 
514 #if defined(CACHE_HAVE_PAC)
515 	/*
516 	 * Remember if we need to flush external physical cache when
517 	 * DMA is done.  We only do this if we are reading (writing memory).
518 	 */
519 	if (ectype == EC_PHYS && (flags & DMAGO_READ))
520 		dc->dm_flags |= DMAF_PCFLUSH;
521 #endif
522 
523 #if defined(CACHE_HAVE_VAC)
524 	if (ectype == EC_VIRT && (flags & DMAGO_READ))
525 		dc->dm_flags |= DMAF_VCFLUSH;
526 #endif
527 
528 	/*
529 	 * Remember if we can skip the dma completion interrupt on
530 	 * the last segment in the chain.
531 	 */
532 	if (flags & DMAGO_NOINT) {
533 		if (dc->dm_cur == dc->dm_last)
534 			dc->dm_cmd &= ~DMA_ENAB;
535 		else
536 			dc->dm_flags |= DMAF_NOINTR;
537 	}
538 #ifdef DEBUG
539 	if (dmadebug & DDB_IO) {
540 		if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
541 		    ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD))) {
542 			printf("dmago: cmd %x, flags %x\n",
543 			       dc->dm_cmd, dc->dm_flags);
544 			for (seg = 0; seg <= dc->dm_last; seg++)
545 				printf("  %d: %d@%p\n", seg,
546 				    dc->dm_chain[seg].dc_count,
547 				    dc->dm_chain[seg].dc_addr);
548 		}
549 	}
550 	dmatimo[unit] = 1;
551 #endif
552 	DMA_ARM(sc, dc);
553 }
554 
555 void
556 dmastop(unit)
557 	int unit;
558 {
559 	struct dma_softc *sc = dma_softc;
560 	struct dma_channel *dc = &sc->sc_chan[unit];
561 
562 #ifdef DEBUG
563 	if (dmadebug & DDB_FOLLOW)
564 		printf("dmastop(%d)\n", unit);
565 	dmatimo[unit] = 0;
566 #endif
567 	DMA_CLEAR(dc);
568 
569 #if defined(CACHE_HAVE_PAC) || defined(M68040)
570 	if (dc->dm_flags & DMAF_PCFLUSH) {
571 		PCIA();
572 		dc->dm_flags &= ~DMAF_PCFLUSH;
573 	}
574 #endif
575 
576 #if defined(CACHE_HAVE_VAC)
577 	if (dc->dm_flags & DMAF_VCFLUSH) {
578 		/*
579 		 * 320/350s have VACs that may also need flushing.
580 		 * In our case we only flush the supervisor side
581 		 * because we know that if we are DMAing to user
582 		 * space, the physical pages will also be mapped
583 		 * in kernel space (via vmapbuf) and hence cache-
584 		 * inhibited by the pmap module due to the multiple
585 		 * mapping.
586 		 */
587 		DCIS();
588 		dc->dm_flags &= ~DMAF_VCFLUSH;
589 	}
590 #endif
591 
592 	/*
593 	 * We may get this interrupt after a device service routine
594 	 * has freed the dma channel.  So, ignore the intr if there's
595 	 * nothing on the queue.
596 	 */
597 	if (dc->dm_job != NULL)
598 		(*dc->dm_job->dq_done)(dc->dm_job->dq_softc);
599 }
600 
601 int
602 dmaintr(arg)
603 	void *arg;
604 {
605 	struct dma_softc *sc = arg;
606 	struct dma_channel *dc;
607 	int i, stat;
608 	int found = 0;
609 
610 #ifdef DEBUG
611 	if (dmadebug & DDB_FOLLOW)
612 		printf("dmaintr\n");
613 #endif
614 	for (i = 0; i < NDMACHAN; i++) {
615 		dc = &sc->sc_chan[i];
616 		stat = DMA_STAT(dc);
617 		if ((stat & DMA_INTR) == 0)
618 			continue;
619 		found++;
620 #ifdef DEBUG
621 		if (dmadebug & DDB_IO) {
622 			if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
623 			    ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD)))
624 			  printf("dmaintr: flags %x unit %d stat %x next %d\n",
625 			   dc->dm_flags, i, stat, dc->dm_cur + 1);
626 		}
627 		if (stat & DMA_ARMED)
628 			printf("dma channel %d: intr when armed\n", i);
629 #endif
630 		/*
631 		 * Load the next segemnt, or finish up if we're done.
632 		 */
633 		dc->dm_cur++;
634 		if (dc->dm_cur <= dc->dm_last) {
635 #ifdef DEBUG
636 			dmatimo[i] = 1;
637 #endif
638 			/*
639 			 * If we're the last segment, disable the
640 			 * completion interrupt, if necessary.
641 			 */
642 			if (dc->dm_cur == dc->dm_last &&
643 			    (dc->dm_flags & DMAF_NOINTR))
644 				dc->dm_cmd &= ~DMA_ENAB;
645 			DMA_CLEAR(dc);
646 			DMA_ARM(sc, dc);
647 		} else
648 			dmastop(i);
649 	}
650 	return(found);
651 }
652 
653 #ifdef DEBUG
654 void
655 dmatimeout(arg)
656 	void *arg;
657 {
658 	int i, s;
659 	struct dma_softc *sc = arg;
660 
661 	for (i = 0; i < NDMACHAN; i++) {
662 		s = splbio();
663 		if (dmatimo[i]) {
664 			if (dmatimo[i] > 1)
665 				printf("dma channel %d timeout #%d\n",
666 				    i, dmatimo[i]-1);
667 			dmatimo[i]++;
668 		}
669 		splx(s);
670 	}
671 	callout_reset(&sc->sc_debug_ch, 30 * hz, dmatimeout, sc);
672 }
673 #endif
674