xref: /netbsd-src/sys/arch/hp300/dev/dma.c (revision fdecd6a253f999ae92b139670d9e15cc9df4497c)
1 /*	$NetBSD: dma.c,v 1.19 1997/05/05 21:02:39 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 1995, 1996, 1997
5  *	Jason R. Thorpe.  All rights reserved.
6  * Copyright (c) 1982, 1990, 1993
7  *	The Regents of the University of California.  All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by the University of
20  *	California, Berkeley and its contributors.
21  * 4. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)dma.c	8.1 (Berkeley) 6/10/93
38  */
39 
40 /*
41  * DMA driver
42  */
43 
44 #include <machine/hp300spu.h>	/* XXX param.h includes cpu.h */
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/time.h>
49 #include <sys/kernel.h>
50 #include <sys/proc.h>
51 #include <sys/device.h>
52 
53 #include <machine/frame.h>
54 #include <machine/cpu.h>
55 #include <machine/intr.h>
56 
57 #include <hp300/dev/dmareg.h>
58 #include <hp300/dev/dmavar.h>
59 
60 /*
61  * The largest single request will be MAXPHYS bytes which will require
62  * at most MAXPHYS/NBPG+1 chain elements to describe, i.e. if none of
63  * the buffer pages are physically contiguous (MAXPHYS/NBPG) and the
64  * buffer is not page aligned (+1).
65  */
66 #define	DMAMAXIO	(MAXPHYS/NBPG+1)
67 
68 struct dma_chain {
69 	int	dc_count;
70 	char	*dc_addr;
71 };
72 
73 struct dma_channel {
74 	struct	dmaqueue *dm_job;		/* current job */
75 	struct	dmadevice *dm_hwaddr;		/* registers if DMA_C */
76 	struct	dmaBdevice *dm_Bhwaddr;		/* registers if not DMA_C */
77 	char	dm_flags;			/* misc. flags */
78 	u_short	dm_cmd;				/* DMA controller command */
79 	int	dm_cur;				/* current segment */
80 	int	dm_last;			/* last segment */
81 	struct	dma_chain dm_chain[DMAMAXIO];	/* all segments */
82 };
83 
84 struct dma_softc {
85 	struct	dmareg *sc_dmareg;		/* pointer to our hardware */
86 	struct	dma_channel sc_chan[NDMACHAN];	/* 2 channels */
87 	TAILQ_HEAD(, dmaqueue) sc_queue;	/* job queue */
88 	char	sc_type;			/* A, B, or C */
89 	int	sc_ipl;				/* our interrupt level */
90 	void	*sc_ih;				/* interrupt cookie */
91 } dma_softc;
92 
93 /* types */
94 #define	DMA_B	0
95 #define DMA_C	1
96 
97 /* flags */
98 #define DMAF_PCFLUSH	0x01
99 #define DMAF_VCFLUSH	0x02
100 #define DMAF_NOINTR	0x04
101 
102 int	dmaintr __P((void *));
103 
104 #ifdef DEBUG
105 int	dmadebug = 0;
106 #define DDB_WORD	0x01	/* same as DMAGO_WORD */
107 #define DDB_LWORD	0x02	/* same as DMAGO_LWORD */
108 #define	DDB_FOLLOW	0x04
109 #define DDB_IO		0x08
110 
111 void	dmatimeout __P((void *));
112 int	dmatimo[NDMACHAN];
113 
114 long	dmahits[NDMACHAN];
115 long	dmamisses[NDMACHAN];
116 long	dmabyte[NDMACHAN];
117 long	dmaword[NDMACHAN];
118 long	dmalword[NDMACHAN];
119 #endif
120 
121 /*
122  * Initialize the DMA engine, called by dioattach()
123  */
124 void
125 dmainit()
126 {
127 	struct dma_softc *sc = &dma_softc;
128 	struct dmareg *dma;
129 	struct dma_channel *dc;
130 	int i;
131 	char rev;
132 
133 	/* There's just one. */
134 	sc->sc_dmareg = (struct dmareg *)DMA_BASE;
135 	dma = sc->sc_dmareg;
136 
137 	/*
138 	 * Determine the DMA type.  A DMA_A or DMA_B will fail the
139 	 * following probe.
140 	 *
141 	 * XXX Don't know how to easily differentiate the A and B cards,
142 	 * so we just hope nobody has an A card (A cards will work if
143 	 * splbio works out to ipl 3).
144 	 */
145 	if (badbaddr((char *)&dma->dma_id[2])) {
146 		rev = 'B';
147 #if !defined(HP320)
148 		panic("dmainit: DMA card requires hp320 support");
149 #endif
150 	} else
151 		rev = dma->dma_id[2];
152 
153 	sc->sc_type = (rev == 'B') ? DMA_B : DMA_C;
154 
155 	TAILQ_INIT(&sc->sc_queue);
156 
157 	for (i = 0; i < NDMACHAN; i++) {
158 		dc = &sc->sc_chan[i];
159 		dc->dm_job = NULL;
160 		switch (i) {
161 		case 0:
162 			dc->dm_hwaddr = &dma->dma_chan0;
163 			dc->dm_Bhwaddr = &dma->dma_Bchan0;
164 			break;
165 
166 		case 1:
167 			dc->dm_hwaddr = &dma->dma_chan1;
168 			dc->dm_Bhwaddr = &dma->dma_Bchan1;
169 			break;
170 
171 		default:
172 			panic("dmainit: more than 2 channels?");
173 			/* NOTREACHED */
174 		}
175 	}
176 
177 #ifdef DEBUG
178 	/* make sure timeout is really not needed */
179 	timeout(dmatimeout, sc, 30 * hz);
180 #endif
181 
182 	printf("98620%c, 2 channels, %d bit DMA\n",
183 	    rev, (rev == 'B') ? 16 : 32);
184 
185 	/*
186 	 * Defer hooking up our interrupt until the first
187 	 * DMA-using controller has hooked up theirs.
188 	 */
189 	sc->sc_ih = NULL;
190 }
191 
192 /*
193  * Compute the ipl and (re)establish the interrupt handler
194  * for the DMA controller.
195  */
196 void
197 dmacomputeipl()
198 {
199 	struct dma_softc *sc = &dma_softc;
200 
201 	if (sc->sc_ih != NULL)
202 		intr_disestablish(sc->sc_ih);
203 
204 	/*
205 	 * Our interrupt level must be as high as the highest
206 	 * device using DMA (i.e. splbio).
207 	 */
208 	sc->sc_ipl = PSLTOIPL(hp300_bioipl);
209 	sc->sc_ih = intr_establish(dmaintr, sc, sc->sc_ipl, IPL_BIO);
210 }
211 
212 int
213 dmareq(dq)
214 	struct dmaqueue *dq;
215 {
216 	struct dma_softc *sc = &dma_softc;
217 	int i, chan, s;
218 
219 #if 1
220 	s = splhigh();	/* XXXthorpej */
221 #else
222 	s = splbio();
223 #endif
224 
225 	chan = dq->dq_chan;
226 	for (i = NDMACHAN - 1; i >= 0; i--) {
227 		/*
228 		 * Can we use this channel?
229 		 */
230 		if ((chan & (1 << i)) == 0)
231 			continue;
232 
233 		/*
234 		 * We can use it; is it busy?
235 		 */
236 		if (sc->sc_chan[i].dm_job != NULL)
237 			continue;
238 
239 		/*
240 		 * Not busy; give the caller this channel.
241 		 */
242 		sc->sc_chan[i].dm_job = dq;
243 		dq->dq_chan = i;
244 		splx(s);
245 		return (1);
246 	}
247 
248 	/*
249 	 * Couldn't get a channel now; put this in the queue.
250 	 */
251 	TAILQ_INSERT_TAIL(&sc->sc_queue, dq, dq_list);
252 	splx(s);
253 	return (0);
254 }
255 
256 void
257 dmafree(dq)
258 	struct dmaqueue *dq;
259 {
260 	int unit = dq->dq_chan;
261 	struct dma_softc *sc = &dma_softc;
262 	struct dma_channel *dc = &sc->sc_chan[unit];
263 	struct dmaqueue *dn;
264 	int chan, s;
265 
266 #if 1
267 	s = splhigh();	/* XXXthorpej */
268 #else
269 	s = splbio();
270 #endif
271 
272 #ifdef DEBUG
273 	dmatimo[unit] = 0;
274 #endif
275 
276 	DMA_CLEAR(dc);
277 
278 #if defined(CACHE_HAVE_PAC) || defined(M68040)
279 	/*
280 	 * XXX we may not always go thru the flush code in dmastop()
281 	 */
282 	if (dc->dm_flags & DMAF_PCFLUSH) {
283 		PCIA();
284 		dc->dm_flags &= ~DMAF_PCFLUSH;
285 	}
286 #endif
287 
288 #if defined(CACHE_HAVE_VAC)
289 	if (dc->dm_flags & DMAF_VCFLUSH) {
290 		/*
291 		 * 320/350s have VACs that may also need flushing.
292 		 * In our case we only flush the supervisor side
293 		 * because we know that if we are DMAing to user
294 		 * space, the physical pages will also be mapped
295 		 * in kernel space (via vmapbuf) and hence cache-
296 		 * inhibited by the pmap module due to the multiple
297 		 * mapping.
298 		 */
299 		DCIS();
300 		dc->dm_flags &= ~DMAF_VCFLUSH;
301 	}
302 #endif
303 
304 	/*
305 	 * Channel is now free.  Look for another job to run on this
306 	 * channel.
307 	 */
308 	dc->dm_job = NULL;
309 	chan = 1 << unit;
310 	for (dn = sc->sc_queue.tqh_first; dn != NULL;
311 	    dn = dn->dq_list.tqe_next) {
312 		if (dn->dq_chan & chan) {
313 			/* Found one... */
314 			TAILQ_REMOVE(&sc->sc_queue, dn, dq_list);
315 			dc->dm_job = dn;
316 			dn->dq_chan = dq->dq_chan;
317 			splx(s);
318 
319 			/* Start the initiator. */
320 			(*dn->dq_start)(dn->dq_softc);
321 			return;
322 		}
323 	}
324 	splx(s);
325 }
326 
327 void
328 dmago(unit, addr, count, flags)
329 	int unit;
330 	char *addr;
331 	int count;
332 	int flags;
333 {
334 	struct dma_softc *sc = &dma_softc;
335 	struct dma_channel *dc = &sc->sc_chan[unit];
336 	char *dmaend = NULL;
337 	int seg, tcount;
338 
339 	if (count > MAXPHYS)
340 		panic("dmago: count > MAXPHYS");
341 
342 #if defined(HP320)
343 	if (sc->sc_type == DMA_B && (flags & DMAGO_LWORD))
344 		panic("dmago: no can do 32-bit DMA");
345 #endif
346 
347 #ifdef DEBUG
348 	if (dmadebug & DDB_FOLLOW)
349 		printf("dmago(%d, %p, %x, %x)\n",
350 		       unit, addr, count, flags);
351 	if (flags & DMAGO_LWORD)
352 		dmalword[unit]++;
353 	else if (flags & DMAGO_WORD)
354 		dmaword[unit]++;
355 	else
356 		dmabyte[unit]++;
357 #endif
358 	/*
359 	 * Build the DMA chain
360 	 */
361 	for (seg = 0; count > 0; seg++) {
362 		dc->dm_chain[seg].dc_addr = (char *) kvtop(addr);
363 #if defined(M68040)
364 		/*
365 		 * Push back dirty cache lines
366 		 */
367 		if (mmutype == MMU_68040)
368 			DCFP((vm_offset_t)dc->dm_chain[seg].dc_addr);
369 #endif
370 		if (count < (tcount = NBPG - ((int)addr & PGOFSET)))
371 			tcount = count;
372 		dc->dm_chain[seg].dc_count = tcount;
373 		addr += tcount;
374 		count -= tcount;
375 		if (flags & DMAGO_LWORD)
376 			tcount >>= 2;
377 		else if (flags & DMAGO_WORD)
378 			tcount >>= 1;
379 
380 		/*
381 		 * Try to compact the DMA transfer if the pages are adjacent.
382 		 * Note: this will never happen on the first iteration.
383 		 */
384 		if (dc->dm_chain[seg].dc_addr == dmaend
385 #if defined(HP320)
386 		    /* only 16-bit count on 98620B */
387 		    && (sc->sc_type != DMA_B ||
388 			dc->dm_chain[seg - 1].dc_count + tcount <= 65536)
389 #endif
390 		) {
391 #ifdef DEBUG
392 			dmahits[unit]++;
393 #endif
394 			dmaend += dc->dm_chain[seg].dc_count;
395 			dc->dm_chain[--seg].dc_count += tcount;
396 		} else {
397 #ifdef DEBUG
398 			dmamisses[unit]++;
399 #endif
400 			dmaend = dc->dm_chain[seg].dc_addr +
401 			    dc->dm_chain[seg].dc_count;
402 			dc->dm_chain[seg].dc_count = tcount;
403 		}
404 	}
405 	dc->dm_cur = 0;
406 	dc->dm_last = --seg;
407 	dc->dm_flags = 0;
408 	/*
409 	 * Set up the command word based on flags
410 	 */
411 	dc->dm_cmd = DMA_ENAB | DMA_IPL(sc->sc_ipl) | DMA_START;
412 	if ((flags & DMAGO_READ) == 0)
413 		dc->dm_cmd |= DMA_WRT;
414 	if (flags & DMAGO_LWORD)
415 		dc->dm_cmd |= DMA_LWORD;
416 	else if (flags & DMAGO_WORD)
417 		dc->dm_cmd |= DMA_WORD;
418 	if (flags & DMAGO_PRI)
419 		dc->dm_cmd |= DMA_PRI;
420 
421 #if defined(M68040)
422 	/*
423 	 * On the 68040 we need to flush (push) the data cache before a
424 	 * DMA (already done above) and flush again after DMA completes.
425 	 * In theory we should only need to flush prior to a write DMA
426 	 * and purge after a read DMA but if the entire page is not
427 	 * involved in the DMA we might purge some valid data.
428 	 */
429 	if (mmutype == MMU_68040 && (flags & DMAGO_READ))
430 		dc->dm_flags |= DMAF_PCFLUSH;
431 #endif
432 
433 #if defined(CACHE_HAVE_PAC)
434 	/*
435 	 * Remember if we need to flush external physical cache when
436 	 * DMA is done.  We only do this if we are reading (writing memory).
437 	 */
438 	if (ectype == EC_PHYS && (flags & DMAGO_READ))
439 		dc->dm_flags |= DMAF_PCFLUSH;
440 #endif
441 
442 #if defined(CACHE_HAVE_VAC)
443 	if (ectype == EC_VIRT && (flags & DMAGO_READ))
444 		dc->dm_flags |= DMAF_VCFLUSH;
445 #endif
446 
447 	/*
448 	 * Remember if we can skip the dma completion interrupt on
449 	 * the last segment in the chain.
450 	 */
451 	if (flags & DMAGO_NOINT) {
452 		if (dc->dm_cur == dc->dm_last)
453 			dc->dm_cmd &= ~DMA_ENAB;
454 		else
455 			dc->dm_flags |= DMAF_NOINTR;
456 	}
457 #ifdef DEBUG
458 	if (dmadebug & DDB_IO) {
459 		if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
460 		    ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD))) {
461 			printf("dmago: cmd %x, flags %x\n",
462 			       dc->dm_cmd, dc->dm_flags);
463 			for (seg = 0; seg <= dc->dm_last; seg++)
464 				printf("  %d: %d@%p\n", seg,
465 				    dc->dm_chain[seg].dc_count,
466 				    dc->dm_chain[seg].dc_addr);
467 		}
468 	}
469 	dmatimo[unit] = 1;
470 #endif
471 	DMA_ARM(sc, dc);
472 }
473 
474 void
475 dmastop(unit)
476 	int unit;
477 {
478 	struct dma_softc *sc = &dma_softc;
479 	struct dma_channel *dc = &sc->sc_chan[unit];
480 
481 #ifdef DEBUG
482 	if (dmadebug & DDB_FOLLOW)
483 		printf("dmastop(%d)\n", unit);
484 	dmatimo[unit] = 0;
485 #endif
486 	DMA_CLEAR(dc);
487 
488 #if defined(CACHE_HAVE_PAC) || defined(M68040)
489 	if (dc->dm_flags & DMAF_PCFLUSH) {
490 		PCIA();
491 		dc->dm_flags &= ~DMAF_PCFLUSH;
492 	}
493 #endif
494 
495 #if defined(CACHE_HAVE_VAC)
496 	if (dc->dm_flags & DMAF_VCFLUSH) {
497 		/*
498 		 * 320/350s have VACs that may also need flushing.
499 		 * In our case we only flush the supervisor side
500 		 * because we know that if we are DMAing to user
501 		 * space, the physical pages will also be mapped
502 		 * in kernel space (via vmapbuf) and hence cache-
503 		 * inhibited by the pmap module due to the multiple
504 		 * mapping.
505 		 */
506 		DCIS();
507 		dc->dm_flags &= ~DMAF_VCFLUSH;
508 	}
509 #endif
510 
511 	/*
512 	 * We may get this interrupt after a device service routine
513 	 * has freed the dma channel.  So, ignore the intr if there's
514 	 * nothing on the queue.
515 	 */
516 	if (dc->dm_job != NULL)
517 		(*dc->dm_job->dq_done)(dc->dm_job->dq_softc);
518 }
519 
520 int
521 dmaintr(arg)
522 	void *arg;
523 {
524 	struct dma_softc *sc = arg;
525 	struct dma_channel *dc;
526 	int i, stat;
527 	int found = 0;
528 
529 #ifdef DEBUG
530 	if (dmadebug & DDB_FOLLOW)
531 		printf("dmaintr\n");
532 #endif
533 	for (i = 0; i < NDMACHAN; i++) {
534 		dc = &sc->sc_chan[i];
535 		stat = DMA_STAT(dc);
536 		if ((stat & DMA_INTR) == 0)
537 			continue;
538 		found++;
539 #ifdef DEBUG
540 		if (dmadebug & DDB_IO) {
541 			if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) ||
542 			    ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD)))
543 			  printf("dmaintr: flags %x unit %d stat %x next %d\n",
544 			   dc->dm_flags, i, stat, dc->dm_cur + 1);
545 		}
546 		if (stat & DMA_ARMED)
547 			printf("dma channel %d: intr when armed\n", i);
548 #endif
549 		/*
550 		 * Load the next segemnt, or finish up if we're done.
551 		 */
552 		dc->dm_cur++;
553 		if (dc->dm_cur <= dc->dm_last) {
554 #ifdef DEBUG
555 			dmatimo[i] = 1;
556 #endif
557 			/*
558 			 * If we're the last segment, disable the
559 			 * completion interrupt, if necessary.
560 			 */
561 			if (dc->dm_cur == dc->dm_last &&
562 			    (dc->dm_flags & DMAF_NOINTR))
563 				dc->dm_cmd &= ~DMA_ENAB;
564 			DMA_CLEAR(dc);
565 			DMA_ARM(sc, dc);
566 		} else
567 			dmastop(i);
568 	}
569 	return(found);
570 }
571 
572 #ifdef DEBUG
573 void
574 dmatimeout(arg)
575 	void *arg;
576 {
577 	int i, s;
578 	struct dma_softc *sc = arg;
579 
580 	for (i = 0; i < NDMACHAN; i++) {
581 		s = splbio();
582 		if (dmatimo[i]) {
583 			if (dmatimo[i] > 1)
584 				printf("dma channel %d timeout #%d\n",
585 				    i, dmatimo[i]-1);
586 			dmatimo[i]++;
587 		}
588 		splx(s);
589 	}
590 	timeout(dmatimeout, sc, 30 * hz);
591 }
592 #endif
593