xref: /netbsd-src/sys/arch/hp300/dev/dma.c (revision 23c8222edbfb0f0932d88a8351d3a0cf817dfb9e)
1 /*	$NetBSD: dma.c,v 1.31 2004/08/28 17:37:00 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the NetBSD
21  *	Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Copyright (c) 1982, 1990, 1993
41  *	The Regents of the University of California.  All rights reserved.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. Neither the name of the University nor the names of its contributors
52  *    may be used to endorse or promote products derived from this software
53  *    without specific prior written permission.
54  *
55  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65  * SUCH DAMAGE.
66  *
67  *	@(#)dma.c	8.1 (Berkeley) 6/10/93
68  */
69 
70 /*
71  * DMA driver
72  */
73 
74 #include <sys/cdefs.h>
75 __KERNEL_RCSID(0, "$NetBSD: dma.c,v 1.31 2004/08/28 17:37:00 thorpej Exp $");
76 
77 #include <machine/hp300spu.h>	/* XXX param.h includes cpu.h */
78 
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/callout.h>
82 #include <sys/device.h>
83 #include <sys/kernel.h>
84 #include <sys/proc.h>
85 
86 #include <uvm/uvm_extern.h>
87 
88 #include <machine/bus.h>
89 
90 #include <m68k/cacheops.h>
91 
92 #include <hp300/dev/intiovar.h>
93 #include <hp300/dev/dmareg.h>
94 #include <hp300/dev/dmavar.h>
95 
96 /*
97  * The largest single request will be MAXPHYS bytes which will require
98  * at most MAXPHYS/PAGE_SIZE+1 chain elements to describe, i.e. if none of
99  * the buffer pages are physically contiguous (MAXPHYS/PAGE_SIZE) and the
100  * buffer is not page aligned (+1).
101  */
102 #define	DMAMAXIO	(MAXPHYS/PAGE_SIZE+1)
103 
104 struct dma_chain {
105 	int	dc_count;
106 	char	*dc_addr;
107 };
108 
109 struct dma_channel {
110 	struct	dmaqueue *dm_job;		/* current job */
111 	struct	dmadevice *dm_hwaddr;		/* registers if DMA_C */
112 	struct	dmaBdevice *dm_Bhwaddr;		/* registers if not DMA_C */
113 	char	dm_flags;			/* misc. flags */
114 	u_short	dm_cmd;				/* DMA controller command */
115 	int	dm_cur;				/* current segment */
116 	int	dm_last;			/* last segment */
117 	struct	dma_chain dm_chain[DMAMAXIO];	/* all segments */
118 };
119 
120 struct dma_softc {
121 	struct  device sc_dev;
122 	bus_space_tag_t sc_bst;
123 	bus_space_handle_t sc_bsh;
124 
125 	struct	dmareg *sc_dmareg;		/* pointer to our hardware */
126 	struct	dma_channel sc_chan[NDMACHAN];	/* 2 channels */
127 	TAILQ_HEAD(, dmaqueue) sc_queue;	/* job queue */
128 	struct	callout sc_debug_ch;
129 	char	sc_type;			/* A, B, or C */
130 	int	sc_ipl;				/* our interrupt level */
131 	void	*sc_ih;				/* interrupt cookie */
132 };
133 
134 /* types */
135 #define	DMA_B	0
136 #define DMA_C	1
137 
138 /* flags */
139 #define DMAF_PCFLUSH	0x01
140 #define DMAF_VCFLUSH	0x02
141 #define DMAF_NOINTR	0x04
142 
143 static int	dmamatch(struct device *, struct cfdata *, void *);
144 static void	dmaattach(struct device *, struct device *, void *);
145 
146 CFATTACH_DECL(dma, sizeof(struct dma_softc),
147     dmamatch, dmaattach, NULL, NULL);
148 
149 static int	dmaintr(void *);
150 
151 #ifdef DEBUG
152 int	dmadebug = 0;
153 #define DDB_WORD	0x01	/* same as DMAGO_WORD */
154 #define DDB_LWORD	0x02	/* same as DMAGO_LWORD */
155 #define	DDB_FOLLOW	0x04
156 #define DDB_IO		0x08
157 
158 static void	dmatimeout(void *);
159 int	dmatimo[NDMACHAN];
160 
161 long	dmahits[NDMACHAN];
162 long	dmamisses[NDMACHAN];
163 long	dmabyte[NDMACHAN];
164 long	dmaword[NDMACHAN];
165 long	dmalword[NDMACHAN];
166 #endif
167 
168 static struct dma_softc *dma_softc;
169 
170 static int
171 dmamatch(struct device *parent, struct cfdata *match, void *aux)
172 {
173 	struct intio_attach_args *ia = aux;
174 	static int dmafound = 0;                /* can only have one */
175 
176 	if (strcmp("dma", ia->ia_modname) != 0 || dmafound)
177 		return (0);
178 
179 	dmafound = 1;
180 	return (1);
181 }
182 
183 static void
184 dmaattach(struct device *parent, struct device *self, void *aux)
185 {
186 	struct dma_softc *sc = (struct dma_softc *)self;
187 	struct intio_attach_args *ia = aux;
188 	struct dma_channel *dc;
189 	struct dmareg *dma;
190 	int i;
191 	char rev;
192 
193 	/* There's just one. */
194 	dma_softc = sc;
195 
196 	sc->sc_bst = ia->ia_bst;
197 	if (bus_space_map(sc->sc_bst, ia->ia_iobase, INTIO_DEVSIZE, 0,
198 	     &sc->sc_bsh)) {
199 		printf("%s: can't map registers\n", sc->sc_dev.dv_xname);
200 		return;
201 	}
202 
203 	dma = (struct dmareg *)bus_space_vaddr(sc->sc_bst, sc->sc_bsh);
204 	sc->sc_dmareg = dma;
205 
206 	/*
207 	 * Determine the DMA type.  A DMA_A or DMA_B will fail the
208 	 * following probe.
209 	 *
210 	 * XXX Don't know how to easily differentiate the A and B cards,
211 	 * so we just hope nobody has an A card (A cards will work if
212 	 * splbio works out to ipl 3).
213 	 */
214 	if (badbaddr((char *)&dma->dma_id[2])) {
215 		rev = 'B';
216 #if !defined(HP320)
217 		panic("dmainit: DMA card requires hp320 support");
218 #endif
219 	} else
220 		rev = dma->dma_id[2];
221 
222 	sc->sc_type = (rev == 'B') ? DMA_B : DMA_C;
223 
224 	TAILQ_INIT(&sc->sc_queue);
225 	callout_init(&sc->sc_debug_ch);
226 
227 	for (i = 0; i < NDMACHAN; i++) {
228 		dc = &sc->sc_chan[i];
229 		dc->dm_job = NULL;
230 		switch (i) {
231 		case 0:
232 			dc->dm_hwaddr = &dma->dma_chan0;
233 			dc->dm_Bhwaddr = &dma->dma_Bchan0;
234 			break;
235 
236 		case 1:
237 			dc->dm_hwaddr = &dma->dma_chan1;
238 			dc->dm_Bhwaddr = &dma->dma_Bchan1;
239 			break;
240 
241 		default:
242 			panic("dmainit: more than 2 channels?");
243 			/* NOTREACHED */
244 		}
245 	}
246 
247 #ifdef DEBUG
248 	/* make sure timeout is really not needed */
249 	callout_reset(&sc->sc_debug_ch, 30 * hz, dmatimeout, sc);
250 #endif
251 
252 	printf(": 98620%c, 2 channels, %d-bit DMA\n",
253 	    rev, (rev == 'B') ? 16 : 32);
254 
255 	/*
256 	 * Defer hooking up our interrupt until the first
257 	 * DMA-using controller has hooked up theirs.
258 	 */
259 	sc->sc_ih = NULL;
260 }
261 
262 /*
263  * Compute the ipl and (re)establish the interrupt handler
264  * for the DMA controller.
265  */
266 void
267 dmacomputeipl(void)
268 {
269 	struct dma_softc *sc = dma_softc;
270 
271 	if (sc->sc_ih != NULL)
272 		intr_disestablish(sc->sc_ih);
273 
274 	/*
275 	 * Our interrupt level must be as high as the highest
276 	 * device using DMA (i.e. splbio).
277 	 */
278 	sc->sc_ipl = PSLTOIPL(hp300_ipls[HP300_IPL_BIO]);
279 	sc->sc_ih = intr_establish(dmaintr, sc, sc->sc_ipl, IPL_BIO);
280 }
281 
282 int
283 dmareq(struct dmaqueue *dq)
284 {
285 	struct dma_softc *sc = dma_softc;
286 	int i, chan, s;
287 
288 #if 1
289 	s = splhigh();	/* XXXthorpej */
290 #else
291 	s = splbio();
292 #endif
293 
294 	chan = dq->dq_chan;
295 	for (i = NDMACHAN - 1; i >= 0; i--) {
296 		/*
297 		 * Can we use this channel?
298 		 */
299 		if ((chan & (1 << i)) == 0)
300 			continue;
301 
302 		/*
303 		 * We can use it; is it busy?
304 		 */
305 		if (sc->sc_chan[i].dm_job != NULL)
306 			continue;
307 
308 		/*
309 		 * Not busy; give the caller this channel.
310 		 */
311 		sc->sc_chan[i].dm_job = dq;
312 		dq->dq_chan = i;
313 		splx(s);
314 		return (1);
315 	}
316 
317 	/*
318 	 * Couldn't get a channel now; put this in the queue.
319 	 */
320 	TAILQ_INSERT_TAIL(&sc->sc_queue, dq, dq_list);
321 	splx(s);
322 	return (0);
323 }
324 
325 void
326 dmafree(struct dmaqueue *dq)
327 {
328 	int unit = dq->dq_chan;
329 	struct dma_softc *sc = dma_softc;
330 	struct dma_channel *dc = &sc->sc_chan[unit];
331 	struct dmaqueue *dn;
332 	int chan, s;
333 
334 #if 1
335 	s = splhigh();	/* XXXthorpej */
336 #else
337 	s = splbio();
338 #endif
339 
340 #ifdef DEBUG
341 	dmatimo[unit] = 0;
342 #endif
343 
344 	DMA_CLEAR(dc);
345 
346 #if defined(CACHE_HAVE_PAC) || defined(M68040)
347 	/*
348 	 * XXX we may not always go thru the flush code in dmastop()
349 	 */
350 	if (dc->dm_flags & DMAF_PCFLUSH) {
351 		PCIA();
352 		dc->dm_flags &= ~DMAF_PCFLUSH;
353 	}
354 #endif
355 
356 #if defined(CACHE_HAVE_VAC)
357 	if (dc->dm_flags & DMAF_VCFLUSH) {
358 		/*
359 		 * 320/350s have VACs that may also need flushing.
360 		 * In our case we only flush the supervisor side
361 		 * because we know that if we are DMAing to user
362 		 * space, the physical pages will also be mapped
363 		 * in kernel space (via vmapbuf) and hence cache-
364 		 * inhibited by the pmap module due to the multiple
365 		 * mapping.
366 		 */
367 		DCIS();
368 		dc->dm_flags &= ~DMAF_VCFLUSH;
369 	}
370 #endif
371 
372 	/*
373 	 * Channel is now free.  Look for another job to run on this
374 	 * channel.
375 	 */
376 	dc->dm_job = NULL;
377 	chan = 1 << unit;
378 	for (dn = sc->sc_queue.tqh_first; dn != NULL;
379 	    dn = dn->dq_list.tqe_next) {
380 		if (dn->dq_chan & chan) {
381 			/* Found one... */
382 			TAILQ_REMOVE(&sc->sc_queue, dn, dq_list);
383 			dc->dm_job = dn;
384 			dn->dq_chan = dq->dq_chan;
385 			splx(s);
386 
387 			/* Start the initiator. */
388 			(*dn->dq_start)(dn->dq_softc);
389 			return;
390 		}
391 	}
392 	splx(s);
393 }
394 
395 void
396 dmago(int unit, char *addr, int count, int flags)
397 {
398 	struct dma_softc *sc = dma_softc;
399 	struct dma_channel *dc = &sc->sc_chan[unit];
400 	char *dmaend = NULL;
401 	int seg, tcount;
402 
403 	if (count > MAXPHYS)
404 		panic("dmago: count > MAXPHYS");
405 
406 #if defined(HP320)
407 	if (sc->sc_type == DMA_B && (flags & DMAGO_LWORD))
408 		panic("dmago: no can do 32-bit DMA");
409 #endif
410 
411 #ifdef DEBUG
412 	if (dmadebug & DDB_FOLLOW)
413 		printf("dmago(%d, %p, %x, %x)\n",
414 		       unit, addr, count, flags);
415 	if (flags & DMAGO_LWORD)
416 		dmalword[unit]++;
417 	else if (flags & DMAGO_WORD)
418 		dmaword[unit]++;
419 	else
420 		dmabyte[unit]++;
421 #endif
422 	/*
423 	 * Build the DMA chain
424 	 */
425 	for (seg = 0; count > 0; seg++) {
426 		dc->dm_chain[seg].dc_addr = (char *) kvtop(addr);
427 #if defined(M68040)
428 		/*
429 		 * Push back dirty cache lines
430 		 */
431 		if (mmutype == MMU_68040)
432 			DCFP((paddr_t)dc->dm_chain[seg].dc_addr);
433 #endif
434 		if (count < (tcount = PAGE_SIZE - ((int)addr & PGOFSET)))
435 			tcount = count;
436 		dc->dm_chain[seg].dc_count = tcount;
437 		addr += tcount;
438 		count -= tcount;
439 		if (flags & DMAGO_LWORD)
440 			tcount >>= 2;
441 		else if (flags & DMAGO_WORD)
442 			tcount >>= 1;
443 
444 		/*
445 		 * Try to compact the DMA transfer if the pages are adjacent.
446 		 * Note: this will never happen on the first iteration.
447 		 */
448 		if (dc->dm_chain[seg].dc_addr == dmaend
449 #if defined(HP320)
450 		    /* only 16-bit count on 98620B */
451 		    && (sc->sc_type != DMA_B ||
452 			dc->dm_chain[seg - 1].dc_count + tcount <= 65536)
453 #endif
454 		) {
455 #ifdef DEBUG
456 			dmahits[unit]++;
457 #endif
458 			dmaend += dc->dm_chain[seg].dc_count;
459 			dc->dm_chain[--seg].dc_count += tcount;
460 		} else {
461 #ifdef DEBUG
462 			dmamisses[unit]++;
463 #endif
464 			dmaend = dc->dm_chain[seg].dc_addr +
465 			    dc->dm_chain[seg].dc_count;
466 			dc->dm_chain[seg].dc_count = tcount;
467 		}
468 	}
469 	dc->dm_cur = 0;
470 	dc->dm_last = --seg;
471 	dc->dm_flags = 0;
472 	/*
473 	 * Set up the command word based on flags
474 	 */
475 	dc->dm_cmd = DMA_ENAB | DMA_IPL(sc->sc_ipl) | DMA_START;
476 	if ((flags & DMAGO_READ) == 0)
477 		dc->dm_cmd |= DMA_WRT;
478 	if (flags & DMAGO_LWORD)
479 		dc->dm_cmd |= DMA_LWORD;
480 	else if (flags & DMAGO_WORD)
481 		dc->dm_cmd |= DMA_WORD;
482 	if (flags & DMAGO_PRI)
483 		dc->dm_cmd |= DMA_PRI;
484 
485 #if defined(M68040)
486 	/*
487 	 * On the 68040 we need to flush (push) the data cache before a
488 	 * DMA (already done above) and flush again after DMA completes.
489 	 * In theory we should only need to flush prior to a write DMA
490 	 * and purge after a read DMA but if the entire page is not
491 	 * involved in the DMA we might purge some valid data.
492 	 */
493 	if (mmutype == MMU_68040 && (flags & DMAGO_READ))
494 		dc->dm_flags |= DMAF_PCFLUSH;
495 #endif
496 
497 #if defined(CACHE_HAVE_PAC)
498 	/*
499 	 * Remember if we need to flush external physical cache when
500 	 * DMA is done.  We only do this if we are reading (writing memory).
501 	 */
502 	if (ectype == EC_PHYS && (flags & DMAGO_READ))
503 		dc->dm_flags |= DMAF_PCFLUSH;
504 #endif
505 
506 #if defined(CACHE_HAVE_VAC)
507 	if (ectype == EC_VIRT && (flags & DMAGO_READ))
508 		dc->dm_flags |= DMAF_VCFLUSH;
509 #endif
510 
511 	/*
512 	 * Remember if we can skip the dma completion interrupt on
513 	 * the last segment in the chain.
514 	 */
515 	if (flags & DMAGO_NOINT) {
516 		if (dc->dm_cur == dc->dm_last)
517 			dc->dm_cmd &= ~DMA_ENAB;
518 		else
519 			dc->dm_flags |= DMAF_NOINTR;
520 	}
521 #ifdef DEBUG
522 	if (dmadebug & DDB_IO) {
523 		if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
524 		    ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD))) {
525 			printf("dmago: cmd %x, flags %x\n",
526 			       dc->dm_cmd, dc->dm_flags);
527 			for (seg = 0; seg <= dc->dm_last; seg++)
528 				printf("  %d: %d@%p\n", seg,
529 				    dc->dm_chain[seg].dc_count,
530 				    dc->dm_chain[seg].dc_addr);
531 		}
532 	}
533 	dmatimo[unit] = 1;
534 #endif
535 	DMA_ARM(sc, dc);
536 }
537 
538 void
539 dmastop(int unit)
540 {
541 	struct dma_softc *sc = dma_softc;
542 	struct dma_channel *dc = &sc->sc_chan[unit];
543 
544 #ifdef DEBUG
545 	if (dmadebug & DDB_FOLLOW)
546 		printf("dmastop(%d)\n", unit);
547 	dmatimo[unit] = 0;
548 #endif
549 	DMA_CLEAR(dc);
550 
551 #if defined(CACHE_HAVE_PAC) || defined(M68040)
552 	if (dc->dm_flags & DMAF_PCFLUSH) {
553 		PCIA();
554 		dc->dm_flags &= ~DMAF_PCFLUSH;
555 	}
556 #endif
557 
558 #if defined(CACHE_HAVE_VAC)
559 	if (dc->dm_flags & DMAF_VCFLUSH) {
560 		/*
561 		 * 320/350s have VACs that may also need flushing.
562 		 * In our case we only flush the supervisor side
563 		 * because we know that if we are DMAing to user
564 		 * space, the physical pages will also be mapped
565 		 * in kernel space (via vmapbuf) and hence cache-
566 		 * inhibited by the pmap module due to the multiple
567 		 * mapping.
568 		 */
569 		DCIS();
570 		dc->dm_flags &= ~DMAF_VCFLUSH;
571 	}
572 #endif
573 
574 	/*
575 	 * We may get this interrupt after a device service routine
576 	 * has freed the dma channel.  So, ignore the intr if there's
577 	 * nothing on the queue.
578 	 */
579 	if (dc->dm_job != NULL)
580 		(*dc->dm_job->dq_done)(dc->dm_job->dq_softc);
581 }
582 
583 static int
584 dmaintr(void *arg)
585 {
586 	struct dma_softc *sc = arg;
587 	struct dma_channel *dc;
588 	int i, stat;
589 	int found = 0;
590 
591 #ifdef DEBUG
592 	if (dmadebug & DDB_FOLLOW)
593 		printf("dmaintr\n");
594 #endif
595 	for (i = 0; i < NDMACHAN; i++) {
596 		dc = &sc->sc_chan[i];
597 		stat = DMA_STAT(dc);
598 		if ((stat & DMA_INTR) == 0)
599 			continue;
600 		found++;
601 #ifdef DEBUG
602 		if (dmadebug & DDB_IO) {
603 			if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
604 			    ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD)))
605 			  printf("dmaintr: flags %x unit %d stat %x next %d\n",
606 			   dc->dm_flags, i, stat, dc->dm_cur + 1);
607 		}
608 		if (stat & DMA_ARMED)
609 			printf("dma channel %d: intr when armed\n", i);
610 #endif
611 		/*
612 		 * Load the next segemnt, or finish up if we're done.
613 		 */
614 		dc->dm_cur++;
615 		if (dc->dm_cur <= dc->dm_last) {
616 #ifdef DEBUG
617 			dmatimo[i] = 1;
618 #endif
619 			/*
620 			 * If we're the last segment, disable the
621 			 * completion interrupt, if necessary.
622 			 */
623 			if (dc->dm_cur == dc->dm_last &&
624 			    (dc->dm_flags & DMAF_NOINTR))
625 				dc->dm_cmd &= ~DMA_ENAB;
626 			DMA_CLEAR(dc);
627 			DMA_ARM(sc, dc);
628 		} else
629 			dmastop(i);
630 	}
631 	return(found);
632 }
633 
634 #ifdef DEBUG
635 static void
636 dmatimeout(void *arg)
637 {
638 	int i, s;
639 	struct dma_softc *sc = arg;
640 
641 	for (i = 0; i < NDMACHAN; i++) {
642 		s = splbio();
643 		if (dmatimo[i]) {
644 			if (dmatimo[i] > 1)
645 				printf("dma channel %d timeout #%d\n",
646 				    i, dmatimo[i]-1);
647 			dmatimo[i]++;
648 		}
649 		splx(s);
650 	}
651 	callout_reset(&sc->sc_debug_ch, 30 * hz, dmatimeout, sc);
652 }
653 #endif
654