xref: /netbsd-src/sys/arch/hp300/dev/dma.c (revision 9b67dda14d749c18e695a12b834ae261471c3045)
1 /*	$NetBSD: dma.c,v 1.48 2024/04/30 09:55:45 tsutsui Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Copyright (c) 1982, 1990, 1993
34  *	The Regents of the University of California.  All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  * 3. Neither the name of the University nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  *	@(#)dma.c	8.1 (Berkeley) 6/10/93
61  */
62 
63 /*
64  * DMA driver
65  */
66 
67 #include "opt_m68k_arch.h"
68 
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: dma.c,v 1.48 2024/04/30 09:55:45 tsutsui Exp $");
71 
72 #include <machine/hp300spu.h>	/* XXX param.h includes cpu.h */
73 
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/callout.h>
77 #include <sys/device.h>
78 #include <sys/kernel.h>
79 #include <sys/proc.h>
80 
81 #include <uvm/uvm_extern.h>
82 
83 #include <machine/bus.h>
84 
85 #include <m68k/cacheops.h>
86 
87 #include <hp300/dev/intiovar.h>
88 #include <hp300/dev/dmareg.h>
89 #include <hp300/dev/dmavar.h>
90 
91 /*
92  * The largest single request will be MAXPHYS bytes which will require
93  * at most MAXPHYS/PAGE_SIZE+1 chain elements to describe, i.e. if none of
94  * the buffer pages are physically contiguous (MAXPHYS/PAGE_SIZE) and the
95  * buffer is not page aligned (+1).
96  */
97 #define	DMAMAXIO	(MAXPHYS/PAGE_SIZE+1)
98 
99 struct dma_chain {
100 	int	dc_count;
101 	char	*dc_addr;
102 };
103 
104 struct dma_channel {
105 	struct	dmaqueue *dm_job;		/* current job */
106 	struct	dmadevice *dm_hwaddr;		/* registers if DMA_C */
107 	struct	dmaBdevice *dm_Bhwaddr;		/* registers if not DMA_C */
108 	char	dm_flags;			/* misc. flags */
109 	u_short	dm_cmd;				/* DMA controller command */
110 	int	dm_cur;				/* current segment */
111 	int	dm_last;			/* last segment */
112 	struct	dma_chain dm_chain[DMAMAXIO];	/* all segments */
113 };
114 
115 struct dma_softc {
116 	device_t sc_dev;
117 	bus_space_tag_t sc_bst;
118 	bus_space_handle_t sc_bsh;
119 
120 	struct	dmareg *sc_dmareg;		/* pointer to our hardware */
121 	struct	dma_channel sc_chan[NDMACHAN];	/* 2 channels */
122 	TAILQ_HEAD(, dmaqueue) sc_queue;	/* job queue */
123 	struct	callout sc_debug_ch;
124 	char	sc_type;			/* A, B, or C */
125 	int	sc_ipl;				/* our interrupt level */
126 	void	*sc_ih;				/* interrupt cookie */
127 };
128 
129 /* types */
130 #define	DMA_B	0
131 #define DMA_C	1
132 
133 /* flags */
134 #define DMAF_PCFLUSH	0x01
135 #define DMAF_VCFLUSH	0x02
136 #define DMAF_NOINTR	0x04
137 
138 static int	dmamatch(device_t, cfdata_t, void *);
139 static void	dmaattach(device_t, device_t, void *);
140 
141 CFATTACH_DECL_NEW(dma, sizeof(struct dma_softc),
142     dmamatch, dmaattach, NULL, NULL);
143 
144 static int	dmaintr(void *);
145 
146 #ifdef DEBUG
147 int	dmadebug = 0;
148 #define DDB_WORD	0x01	/* same as DMAGO_WORD */
149 #define DDB_LWORD	0x02	/* same as DMAGO_LWORD */
150 #define	DDB_FOLLOW	0x04
151 #define DDB_IO		0x08
152 
153 static void	dmatimeout(void *);
154 int	dmatimo[NDMACHAN];
155 
156 long	dmahits[NDMACHAN];
157 long	dmamisses[NDMACHAN];
158 long	dmabyte[NDMACHAN];
159 long	dmaword[NDMACHAN];
160 long	dmalword[NDMACHAN];
161 #endif
162 
163 static struct dma_softc *dma_softc;
164 
165 static int
dmamatch(device_t parent,cfdata_t cf,void * aux)166 dmamatch(device_t parent, cfdata_t cf, void *aux)
167 {
168 	struct intio_attach_args *ia = aux;
169 	static int dmafound = 0;                /* can only have one */
170 
171 	if (strcmp("dma", ia->ia_modname) != 0 || dmafound)
172 		return 0;
173 
174 	dmafound = 1;
175 	return 1;
176 }
177 
178 static void
dmaattach(device_t parent,device_t self,void * aux)179 dmaattach(device_t parent, device_t self, void *aux)
180 {
181 	struct dma_softc *sc = device_private(self);
182 	struct intio_attach_args *ia = aux;
183 	struct dma_channel *dc;
184 	struct dmareg *dma;
185 	int i;
186 	char rev;
187 
188 	sc->sc_dev = self;
189 
190 	/* There's just one. */
191 	dma_softc = sc;
192 
193 	sc->sc_bst = ia->ia_bst;
194 	if (bus_space_map(sc->sc_bst, ia->ia_iobase, INTIO_DEVSIZE, 0,
195 	     &sc->sc_bsh)) {
196 		aprint_error(": can't map registers\n");
197 		return;
198 	}
199 
200 	dma = bus_space_vaddr(sc->sc_bst, sc->sc_bsh);
201 	sc->sc_dmareg = dma;
202 
203 	/*
204 	 * Determine the DMA type.  A DMA_A or DMA_B will fail the
205 	 * following probe.
206 	 *
207 	 * XXX Don't know how to easily differentiate the A and B cards,
208 	 * so we just hope nobody has an A card (A cards will work if
209 	 * splbio works out to ipl 3).
210 	 */
211 	if (hp300_bus_space_probe(sc->sc_bst, sc->sc_bsh, DMA_ID2, 1) == 0) {
212 		rev = 'B';
213 #if !defined(HP320)
214 		aprint_normal("\n");
215 		panic("%s: DMA card requires hp320 support", __func__);
216 #endif
217 	} else
218 		rev = dma->dma_id[2];
219 
220 	sc->sc_type = (rev == 'B') ? DMA_B : DMA_C;
221 
222 	TAILQ_INIT(&sc->sc_queue);
223 	callout_init(&sc->sc_debug_ch, 0);
224 
225 	for (i = 0; i < NDMACHAN; i++) {
226 		dc = &sc->sc_chan[i];
227 		dc->dm_job = NULL;
228 		switch (i) {
229 		case 0:
230 			dc->dm_hwaddr = &dma->dma_chan0;
231 			dc->dm_Bhwaddr = &dma->dma_Bchan0;
232 			break;
233 
234 		case 1:
235 			dc->dm_hwaddr = &dma->dma_chan1;
236 			dc->dm_Bhwaddr = &dma->dma_Bchan1;
237 			break;
238 
239 		default:
240 			aprint_normal("\n");
241 			panic("%s: more than 2 channels?", __func__);
242 			/* NOTREACHED */
243 		}
244 	}
245 
246 #ifdef DEBUG
247 	/* make sure timeout is really not needed */
248 	callout_reset(&sc->sc_debug_ch, 30 * hz, dmatimeout, sc);
249 #endif
250 
251 	aprint_normal(": 98620%c, 2 channels, %d-bit DMA\n",
252 	    rev, (rev == 'B') ? 16 : 32);
253 
254 	/*
255 	 * Defer hooking up our interrupt until the first
256 	 * DMA-using controller has hooked up theirs.
257 	 */
258 	sc->sc_ih = NULL;
259 }
260 
261 /*
262  * Compute the ipl and (re)establish the interrupt handler
263  * for the DMA controller.
264  */
265 void
dmaupdateipl(int ipl)266 dmaupdateipl(int ipl)
267 {
268 	struct dma_softc *sc = dma_softc;
269 
270 	if (sc->sc_ih != NULL && sc->sc_ipl == ipl) {
271 		/* No change. */
272 		return;
273 	}
274 
275 	if (sc->sc_ih != NULL) {
276 		intr_disestablish(sc->sc_ih);
277 	}
278 
279 	if ((sc->sc_ipl = ipl) == 0) {
280 		/* Don't hook up a new handler. */
281 		return;
282 	}
283 
284 	sc->sc_ih = intr_establish(dmaintr, sc, sc->sc_ipl, ISRPRI_BIO);
285 	if (sc->sc_type == DMA_B && sc->sc_ipl != 3) {
286 		aprint_error_dev(sc->sc_dev,
287 		    "WARNING: IPL set to %d on maybe-rev. A card!\n",
288 		    sc->sc_ipl);
289 	}
290 }
291 
292 int
dmareq(struct dmaqueue * dq)293 dmareq(struct dmaqueue *dq)
294 {
295 	struct dma_softc *sc = dma_softc;
296 	int i, chan, s;
297 
298 #if 1
299 	s = splhigh();	/* XXXthorpej */
300 #else
301 	s = splbio();
302 #endif
303 
304 	chan = dq->dq_chan;
305 	for (i = NDMACHAN - 1; i >= 0; i--) {
306 		/*
307 		 * Can we use this channel?
308 		 */
309 		if ((chan & (1 << i)) == 0)
310 			continue;
311 
312 		/*
313 		 * We can use it; is it busy?
314 		 */
315 		if (sc->sc_chan[i].dm_job != NULL)
316 			continue;
317 
318 		/*
319 		 * Not busy; give the caller this channel.
320 		 */
321 		sc->sc_chan[i].dm_job = dq;
322 		dq->dq_chan = i;
323 		splx(s);
324 		return 1;
325 	}
326 
327 	/*
328 	 * Couldn't get a channel now; put this in the queue.
329 	 */
330 	TAILQ_INSERT_TAIL(&sc->sc_queue, dq, dq_list);
331 	splx(s);
332 	return 0;
333 }
334 
335 void
dmafree(struct dmaqueue * dq)336 dmafree(struct dmaqueue *dq)
337 {
338 	int unit = dq->dq_chan;
339 	struct dma_softc *sc = dma_softc;
340 	struct dma_channel *dc = &sc->sc_chan[unit];
341 	struct dmaqueue *dn;
342 	int chan, s;
343 
344 #if 1
345 	s = splhigh();	/* XXXthorpej */
346 #else
347 	s = splbio();
348 #endif
349 
350 #ifdef DEBUG
351 	dmatimo[unit] = 0;
352 #endif
353 
354 	DMA_CLEAR(dc);
355 
356 #if defined(CACHE_HAVE_PAC) || defined(M68040)
357 	/*
358 	 * XXX we may not always go thru the flush code in dmastop()
359 	 */
360 	if (dc->dm_flags & DMAF_PCFLUSH) {
361 		PCIA();
362 		dc->dm_flags &= ~DMAF_PCFLUSH;
363 	}
364 #endif
365 
366 #if defined(CACHE_HAVE_VAC)
367 	if (dc->dm_flags & DMAF_VCFLUSH) {
368 		/*
369 		 * 320/350s have VACs that may also need flushing.
370 		 * In our case we only flush the supervisor side
371 		 * because we know that if we are DMAing to user
372 		 * space, the physical pages will also be mapped
373 		 * in kernel space (via vmapbuf) and hence cache-
374 		 * inhibited by the pmap module due to the multiple
375 		 * mapping.
376 		 */
377 		DCIS();
378 		dc->dm_flags &= ~DMAF_VCFLUSH;
379 	}
380 #endif
381 
382 	/*
383 	 * Channel is now free.  Look for another job to run on this
384 	 * channel.
385 	 */
386 	dc->dm_job = NULL;
387 	chan = 1 << unit;
388 	for (dn = TAILQ_FIRST(&sc->sc_queue); dn != NULL;
389 	    dn = TAILQ_NEXT(dn, dq_list)) {
390 		if (dn->dq_chan & chan) {
391 			/* Found one... */
392 			TAILQ_REMOVE(&sc->sc_queue, dn, dq_list);
393 			dc->dm_job = dn;
394 			dn->dq_chan = dq->dq_chan;
395 			splx(s);
396 
397 			/* Start the initiator. */
398 			(*dn->dq_start)(dn->dq_softc);
399 			return;
400 		}
401 	}
402 	splx(s);
403 }
404 
405 void
dmago(int unit,char * addr,int count,int flags)406 dmago(int unit, char *addr, int count, int flags)
407 {
408 	struct dma_softc *sc = dma_softc;
409 	struct dma_channel *dc = &sc->sc_chan[unit];
410 	char *dmaend = NULL;
411 	int seg, tcount;
412 
413 	if (count > MAXPHYS)
414 		panic("dmago: count > MAXPHYS");
415 
416 #if defined(HP320)
417 	if (sc->sc_type == DMA_B && (flags & DMAGO_LWORD))
418 		panic("dmago: no can do 32-bit DMA");
419 #endif
420 
421 #ifdef DEBUG
422 	if (dmadebug & DDB_FOLLOW)
423 		printf("dmago(%d, %p, %x, %x)\n",
424 		       unit, addr, count, flags);
425 	if (flags & DMAGO_LWORD)
426 		dmalword[unit]++;
427 	else if (flags & DMAGO_WORD)
428 		dmaword[unit]++;
429 	else
430 		dmabyte[unit]++;
431 #endif
432 	/*
433 	 * Build the DMA chain
434 	 */
435 	for (seg = 0; count > 0; seg++) {
436 		dc->dm_chain[seg].dc_addr = (char *) kvtop(addr);
437 #if defined(M68040)
438 		/*
439 		 * Push back dirty cache lines
440 		 */
441 		if (mmutype == MMU_68040)
442 			DCFP((paddr_t)dc->dm_chain[seg].dc_addr);
443 #endif
444 		if (count < (tcount = PAGE_SIZE - ((int)addr & PGOFSET)))
445 			tcount = count;
446 		dc->dm_chain[seg].dc_count = tcount;
447 		addr += tcount;
448 		count -= tcount;
449 		if (flags & DMAGO_LWORD)
450 			tcount >>= 2;
451 		else if (flags & DMAGO_WORD)
452 			tcount >>= 1;
453 
454 		/*
455 		 * Try to compact the DMA transfer if the pages are adjacent.
456 		 * Note: this will never happen on the first iteration.
457 		 */
458 		if (dc->dm_chain[seg].dc_addr == dmaend
459 #if defined(HP320)
460 		    /* only 16-bit count on 98620B */
461 		    && (sc->sc_type != DMA_B ||
462 			dc->dm_chain[seg - 1].dc_count + tcount <= 65536)
463 #endif
464 		) {
465 #ifdef DEBUG
466 			dmahits[unit]++;
467 #endif
468 			dmaend += dc->dm_chain[seg].dc_count;
469 			dc->dm_chain[--seg].dc_count += tcount;
470 		} else {
471 #ifdef DEBUG
472 			dmamisses[unit]++;
473 #endif
474 			dmaend = dc->dm_chain[seg].dc_addr +
475 			    dc->dm_chain[seg].dc_count;
476 			dc->dm_chain[seg].dc_count = tcount;
477 		}
478 	}
479 	dc->dm_cur = 0;
480 	dc->dm_last = --seg;
481 	dc->dm_flags = 0;
482 	/*
483 	 * Set up the command word based on flags
484 	 */
485 	dc->dm_cmd = DMA_ENAB | DMA_IPL(sc->sc_ipl) | DMA_START;
486 	if ((flags & DMAGO_READ) == 0)
487 		dc->dm_cmd |= DMA_WRT;
488 	if (flags & DMAGO_LWORD)
489 		dc->dm_cmd |= DMA_LWORD;
490 	else if (flags & DMAGO_WORD)
491 		dc->dm_cmd |= DMA_WORD;
492 	if (flags & DMAGO_PRI)
493 		dc->dm_cmd |= DMA_PRI;
494 
495 #if defined(M68040)
496 	/*
497 	 * On the 68040 we need to flush (push) the data cache before a
498 	 * DMA (already done above) and flush again after DMA completes.
499 	 * In theory we should only need to flush prior to a write DMA
500 	 * and purge after a read DMA but if the entire page is not
501 	 * involved in the DMA we might purge some valid data.
502 	 */
503 	if (mmutype == MMU_68040 && (flags & DMAGO_READ))
504 		dc->dm_flags |= DMAF_PCFLUSH;
505 #endif
506 
507 #if defined(CACHE_HAVE_PAC)
508 	/*
509 	 * Remember if we need to flush external physical cache when
510 	 * DMA is done.  We only do this if we are reading (writing memory).
511 	 */
512 	if (ectype == EC_PHYS && (flags & DMAGO_READ))
513 		dc->dm_flags |= DMAF_PCFLUSH;
514 #endif
515 
516 #if defined(CACHE_HAVE_VAC)
517 	if (ectype == EC_VIRT && (flags & DMAGO_READ))
518 		dc->dm_flags |= DMAF_VCFLUSH;
519 #endif
520 
521 	/*
522 	 * Remember if we can skip the dma completion interrupt on
523 	 * the last segment in the chain.
524 	 */
525 	if (flags & DMAGO_NOINT) {
526 		if (dc->dm_cur == dc->dm_last)
527 			dc->dm_cmd &= ~DMA_ENAB;
528 		else
529 			dc->dm_flags |= DMAF_NOINTR;
530 	}
531 #ifdef DEBUG
532 	if (dmadebug & DDB_IO) {
533 		if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
534 		    ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD))) {
535 			printf("dmago: cmd %x, flags %x\n",
536 			    dc->dm_cmd, dc->dm_flags);
537 			for (seg = 0; seg <= dc->dm_last; seg++)
538 				printf("  %d: %d@%p\n", seg,
539 				    dc->dm_chain[seg].dc_count,
540 				    dc->dm_chain[seg].dc_addr);
541 		}
542 	}
543 	dmatimo[unit] = 1;
544 #endif
545 	DMA_ARM(sc, dc);
546 }
547 
548 void
dmastop(int unit)549 dmastop(int unit)
550 {
551 	struct dma_softc *sc = dma_softc;
552 	struct dma_channel *dc = &sc->sc_chan[unit];
553 
554 #ifdef DEBUG
555 	if (dmadebug & DDB_FOLLOW)
556 		printf("dmastop(%d)\n", unit);
557 	dmatimo[unit] = 0;
558 #endif
559 	DMA_CLEAR(dc);
560 
561 #if defined(CACHE_HAVE_PAC) || defined(M68040)
562 	if (dc->dm_flags & DMAF_PCFLUSH) {
563 		PCIA();
564 		dc->dm_flags &= ~DMAF_PCFLUSH;
565 	}
566 #endif
567 
568 #if defined(CACHE_HAVE_VAC)
569 	if (dc->dm_flags & DMAF_VCFLUSH) {
570 		/*
571 		 * 320/350s have VACs that may also need flushing.
572 		 * In our case we only flush the supervisor side
573 		 * because we know that if we are DMAing to user
574 		 * space, the physical pages will also be mapped
575 		 * in kernel space (via vmapbuf) and hence cache-
576 		 * inhibited by the pmap module due to the multiple
577 		 * mapping.
578 		 */
579 		DCIS();
580 		dc->dm_flags &= ~DMAF_VCFLUSH;
581 	}
582 #endif
583 
584 	/*
585 	 * We may get this interrupt after a device service routine
586 	 * has freed the dma channel.  So, ignore the intr if there's
587 	 * nothing on the queue.
588 	 */
589 	if (dc->dm_job != NULL)
590 		(*dc->dm_job->dq_done)(dc->dm_job->dq_softc);
591 }
592 
593 static int
dmaintr(void * arg)594 dmaintr(void *arg)
595 {
596 	struct dma_softc *sc = arg;
597 	struct dma_channel *dc;
598 	int i, stat;
599 	int found = 0;
600 
601 #ifdef DEBUG
602 	if (dmadebug & DDB_FOLLOW)
603 		printf("dmaintr\n");
604 #endif
605 	for (i = 0; i < NDMACHAN; i++) {
606 		dc = &sc->sc_chan[i];
607 		stat = DMA_STAT(dc);
608 		if ((stat & DMA_INTR) == 0)
609 			continue;
610 		found++;
611 #ifdef DEBUG
612 		if (dmadebug & DDB_IO) {
613 			if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
614 			    ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD)))
615 				printf("dmaintr: flags %x unit %d stat %x "
616 				    "next %d\n",
617 				    dc->dm_flags, i, stat, dc->dm_cur + 1);
618 		}
619 		if (stat & DMA_ARMED)
620 			printf("dma channel %d: intr when armed\n", i);
621 #endif
622 		/*
623 		 * Load the next segment, or finish up if we're done.
624 		 */
625 		dc->dm_cur++;
626 		if (dc->dm_cur <= dc->dm_last) {
627 #ifdef DEBUG
628 			dmatimo[i] = 1;
629 #endif
630 			/*
631 			 * If we're the last segment, disable the
632 			 * completion interrupt, if necessary.
633 			 */
634 			if (dc->dm_cur == dc->dm_last &&
635 			    (dc->dm_flags & DMAF_NOINTR))
636 				dc->dm_cmd &= ~DMA_ENAB;
637 			DMA_CLEAR(dc);
638 			DMA_ARM(sc, dc);
639 		} else
640 			dmastop(i);
641 	}
642 	return found;
643 }
644 
645 #ifdef DEBUG
646 static void
dmatimeout(void * arg)647 dmatimeout(void *arg)
648 {
649 	int i, s;
650 	struct dma_softc *sc = arg;
651 
652 	for (i = 0; i < NDMACHAN; i++) {
653 		s = splbio();
654 		if (dmatimo[i]) {
655 			if (dmatimo[i] > 1)
656 				printf("dma channel %d timeout #%d\n",
657 				    i, dmatimo[i]-1);
658 			dmatimo[i]++;
659 		}
660 		splx(s);
661 	}
662 	callout_reset(&sc->sc_debug_ch, 30 * hz, dmatimeout, sc);
663 }
664 #endif
665