xref: /netbsd-src/sys/arch/hp300/dev/dma.c (revision cda4f8f6ee55684e8d311b86c99ea59191e6b74f)
1 /*
2  * Copyright (c) 1982, 1990 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)dma.c	7.5 (Berkeley) 5/4/91
34  */
35 
36 /*
37  * DMA driver
38  */
39 
40 #include "param.h"
41 #include "systm.h"
42 #include "time.h"
43 #include "kernel.h"
44 #include "proc.h"
45 
46 #include "dmareg.h"
47 #include "dmavar.h"
48 #include "device.h"
49 
50 #include "../include/cpu.h"
51 #include "../hp300/isr.h"
52 
53 extern void isrlink();
54 extern void _insque();
55 extern void _remque();
56 extern void timeout();
57 extern u_int kvtop();
58 extern void PCIA();
59 
60 /*
61  * The largest single request will be MAXPHYS bytes which will require
62  * at most MAXPHYS/NBPG+1 chain elements to describe, i.e. if none of
63  * the buffer pages are physically contiguous (MAXPHYS/NBPG) and the
64  * buffer is not page aligned (+1).
65  */
66 #define	DMAMAXIO	(MAXPHYS/NBPG+1)
67 
68 struct	dma_chain {
69 	int	dc_count;
70 	char	*dc_addr;
71 };
72 
73 struct	dma_softc {
74 	struct	dmadevice *sc_hwaddr;
75 	struct	dmaBdevice *sc_Bhwaddr;
76 	char	sc_type;
77 	char	sc_flags;
78 	u_short	sc_cmd;
79 	struct	dma_chain *sc_cur;
80 	struct	dma_chain *sc_last;
81 	struct	dma_chain sc_chain[DMAMAXIO];
82 } dma_softc[NDMA];
83 
84 /* types */
85 #define	DMA_B	0
86 #define DMA_C	1
87 
88 /* flags */
89 #define DMAF_PCFLUSH	0x01
90 #define DMAF_VCFLUSH	0x02
91 #define DMAF_NOINTR	0x04
92 
93 struct	devqueue dmachan[NDMA + 1];
94 int	dmaintr();
95 
96 #ifdef DEBUG
97 int	dmadebug = 0;
98 #define DDB_WORD	0x01	/* same as DMAGO_WORD */
99 #define DDB_LWORD	0x02	/* same as DMAGO_LWORD */
100 #define	DDB_FOLLOW	0x04
101 #define DDB_IO		0x08
102 
103 void	dmatimeout();
104 int	dmatimo[NDMA];
105 
106 long	dmahits[NDMA];
107 long	dmamisses[NDMA];
108 long	dmabyte[NDMA];
109 long	dmaword[NDMA];
110 long	dmalword[NDMA];
111 #endif
112 
113 void
114 dmainit()
115 {
116 	register struct dmareg *dma = (struct dmareg *)DMA_BASE;
117 	register struct dma_softc *dc;
118 	register int i;
119 	char rev;
120 
121 	/*
122 	 * Determine the DMA type.
123 	 * Don't know how to easily differentiate the A and B cards,
124 	 * so we just hope nobody has an A card (A cards will work if
125 	 * DMAINTLVL is set to 3).
126 	 */
127 	if (!badbaddr((char *)&dma->dma_id[2]))
128 		rev = dma->dma_id[2];
129 	else {
130 		rev = 'B';
131 #if !defined(HP320)
132 		panic("dmainit: DMA card requires hp320 support");
133 #endif
134 	}
135 
136 	dc = &dma_softc[0];
137 	for (i = 0; i < NDMA; i++) {
138 		dc->sc_hwaddr = (i & 1) ? &dma->dma_chan1 : &dma->dma_chan0;
139 		dc->sc_Bhwaddr = (i & 1) ? &dma->dma_Bchan1 : &dma->dma_Bchan0;
140 		dc->sc_type = rev == 'B' ? DMA_B : DMA_C;
141 		dc++;
142 		dmachan[i].dq_forw = dmachan[i].dq_back = &dmachan[i];
143 	}
144 	dmachan[i].dq_forw = dmachan[i].dq_back = &dmachan[i];
145 #ifdef DEBUG
146 	/* make sure timeout is really not needed */
147 	timeout(dmatimeout, 0, 30 * hz);
148 #endif
149 
150 	printf("dma: 98620%c with 2 channels, %d bit DMA\n",
151 	       rev, rev == 'B' ? 16 : 32);
152 }
153 
154 int
155 dmareq(dq)
156 	register struct devqueue *dq;
157 {
158 	register int i;
159 	register int chan;
160 	register int s = splbio();
161 
162 	chan = dq->dq_ctlr;
163 	i = NDMA;
164 	while (--i >= 0) {
165 		if ((chan & (1 << i)) == 0)
166 			continue;
167 		if (dmachan[i].dq_forw != &dmachan[i])
168 			continue;
169 		insque(dq, &dmachan[i]);
170 		dq->dq_ctlr = i;
171 		splx(s);
172 		return(1);
173 	}
174 	insque(dq, dmachan[NDMA].dq_back);
175 	splx(s);
176 	return(0);
177 }
178 
179 void
180 dmafree(dq)
181 	register struct devqueue *dq;
182 {
183 	int unit = dq->dq_ctlr;
184 	register struct dma_softc *dc = &dma_softc[unit];
185 	register struct devqueue *dn;
186 	register int chan, s;
187 
188 	s = splbio();
189 #ifdef DEBUG
190 	dmatimo[unit] = 0;
191 #endif
192 	DMA_CLEAR(dc);
193 	/*
194 	 * XXX we may not always go thru the flush code in dmastop()
195 	 */
196 #if defined(HP360) || defined(HP370)
197 	if (dc->sc_flags & DMAF_PCFLUSH) {
198 		PCIA();
199 		dc->sc_flags &= ~DMAF_PCFLUSH;
200 	}
201 #endif
202 #if defined(HP320) || defined(HP350)
203 	if (dc->sc_flags & DMAF_VCFLUSH) {
204 		/*
205 		 * 320/350s have VACs that may also need flushing.
206 		 * In our case we only flush the supervisor side
207 		 * because we know that if we are DMAing to user
208 		 * space, the physical pages will also be mapped
209 		 * in kernel space (via vmapbuf) and hence cache-
210 		 * inhibited by the pmap module due to the multiple
211 		 * mapping.
212 		 */
213 		DCIS();
214 		dc->sc_flags &= ~DMAF_VCFLUSH;
215 	}
216 #endif
217 	remque(dq);
218 	chan = 1 << unit;
219 	for (dn = dmachan[NDMA].dq_forw;
220 	     dn != &dmachan[NDMA]; dn = dn->dq_forw) {
221 		if (dn->dq_ctlr & chan) {
222 			remque((caddr_t)dn);
223 			insque((caddr_t)dn, (caddr_t)dq->dq_back);
224 			splx(s);
225 			dn->dq_ctlr = dq->dq_ctlr;
226 			(dn->dq_driver->d_start)(dn->dq_unit);
227 			return;
228 		}
229 	}
230 	splx(s);
231 }
232 
233 void
234 dmago(unit, addr, count, flags)
235 	int unit;
236 	register char *addr;
237 	register int count;
238 	register int flags;
239 {
240 	register struct dma_softc *dc = &dma_softc[unit];
241 	register struct dma_chain *dcp;
242 	register char *dmaend = NULL;
243 	register int tcount;
244 
245 	if (count > MAXPHYS)
246 		panic("dmago: count > MAXPHYS");
247 #if defined(HP320)
248 	if (dc->sc_type == DMA_B && (flags & DMAGO_LWORD))
249 		panic("dmago: no can do 32-bit DMA");
250 #endif
251 #ifdef DEBUG
252 	if (dmadebug & DDB_FOLLOW)
253 		printf("dmago(%d, %x, %x, %x)\n",
254 		       unit, addr, count, flags);
255 	if (flags & DMAGO_LWORD)
256 		dmalword[unit]++;
257 	else if (flags & DMAGO_WORD)
258 		dmaword[unit]++;
259 	else
260 		dmabyte[unit]++;
261 #endif
262 	/*
263 	 * Build the DMA chain
264 	 */
265 	for (dcp = dc->sc_chain; count > 0; dcp++) {
266 		dcp->dc_addr = (char *) kvtop(addr);
267 		if (count < (tcount = NBPG - ((int)addr & PGOFSET)))
268 			tcount = count;
269 		dcp->dc_count = tcount;
270 		addr += tcount;
271 		count -= tcount;
272 		if (flags & DMAGO_LWORD)
273 			tcount >>= 2;
274 		else if (flags & DMAGO_WORD)
275 			tcount >>= 1;
276 		if (dcp->dc_addr == dmaend
277 #if defined(HP320)
278 		    /* only 16-bit count on 98620B */
279 		    && (dc->sc_type != DMA_B ||
280 			(dcp-1)->dc_count + tcount <= 65536)
281 #endif
282 		) {
283 #ifdef DEBUG
284 			dmahits[unit]++;
285 #endif
286 			dmaend += dcp->dc_count;
287 			(--dcp)->dc_count += tcount;
288 		} else {
289 #ifdef DEBUG
290 			dmamisses[unit]++;
291 #endif
292 			dmaend = dcp->dc_addr + dcp->dc_count;
293 			dcp->dc_count = tcount;
294 		}
295 	}
296 	dc->sc_cur = dc->sc_chain;
297 	dc->sc_last = --dcp;
298 	dc->sc_flags = 0;
299 	/*
300 	 * Set up the command word based on flags
301 	 */
302 	dc->sc_cmd = DMA_ENAB | DMA_IPL(DMAINTLVL) | DMA_START;
303 	if ((flags & DMAGO_READ) == 0)
304 		dc->sc_cmd |= DMA_WRT;
305 	if (flags & DMAGO_LWORD)
306 		dc->sc_cmd |= DMA_LWORD;
307 	else if (flags & DMAGO_WORD)
308 		dc->sc_cmd |= DMA_WORD;
309 	if (flags & DMAGO_PRI)
310 		dc->sc_cmd |= DMA_PRI;
311 #if defined(HP360) || defined(HP370)
312 	/*
313 	 * Remember if we need to flush external physical cache when
314 	 * DMA is done.  We only do this if we are reading (writing memory).
315 	 */
316 	if (ectype == EC_PHYS && (flags & DMAGO_READ))
317 		dc->sc_flags |= DMAF_PCFLUSH;
318 #endif
319 #if defined(HP320) || defined(HP350)
320 	if (ectype == EC_VIRT && (flags & DMAGO_READ))
321 		dc->sc_flags |= DMAF_VCFLUSH;
322 #endif
323 	/*
324 	 * Remember if we can skip the dma completion interrupt on
325 	 * the last segment in the chain.
326 	 */
327 	if (flags & DMAGO_NOINT) {
328 		if (dc->sc_cur == dc->sc_last)
329 			dc->sc_cmd &= ~DMA_ENAB;
330 		else
331 			dc->sc_flags |= DMAF_NOINTR;
332 	}
333 #ifdef DEBUG
334 	if (dmadebug & DDB_IO)
335 		if ((dmadebug&DDB_WORD) && (dc->sc_cmd&DMA_WORD) ||
336 		    (dmadebug&DDB_LWORD) && (dc->sc_cmd&DMA_LWORD)) {
337 			printf("dmago: cmd %x, flags %x\n",
338 			       dc->sc_cmd, dc->sc_flags);
339 			for (dcp = dc->sc_chain; dcp <= dc->sc_last; dcp++)
340 				printf("  %d: %d@%x\n", dcp-dc->sc_chain,
341 				       dcp->dc_count, dcp->dc_addr);
342 		}
343 	dmatimo[unit] = 1;
344 #endif
345 	DMA_ARM(dc);
346 }
347 
348 void
349 dmastop(unit)
350 	register int unit;
351 {
352 	register struct dma_softc *dc = &dma_softc[unit];
353 	register struct devqueue *dq;
354 
355 #ifdef DEBUG
356 	if (dmadebug & DDB_FOLLOW)
357 		printf("dmastop(%d)\n", unit);
358 	dmatimo[unit] = 0;
359 #endif
360 	DMA_CLEAR(dc);
361 #if defined(HP360) || defined(HP370)
362 	if (dc->sc_flags & DMAF_PCFLUSH) {
363 		PCIA();
364 		dc->sc_flags &= ~DMAF_PCFLUSH;
365 	}
366 #endif
367 #if defined(HP320) || defined(HP350)
368 	if (dc->sc_flags & DMAF_VCFLUSH) {
369 		/*
370 		 * 320/350s have VACs that may also need flushing.
371 		 * In our case we only flush the supervisor side
372 		 * because we know that if we are DMAing to user
373 		 * space, the physical pages will also be mapped
374 		 * in kernel space (via vmapbuf) and hence cache-
375 		 * inhibited by the pmap module due to the multiple
376 		 * mapping.
377 		 */
378 		DCIS();
379 		dc->sc_flags &= ~DMAF_VCFLUSH;
380 	}
381 #endif
382 	/*
383 	 * We may get this interrupt after a device service routine
384 	 * has freed the dma channel.  So, ignore the intr if there's
385 	 * nothing on the queue.
386 	 */
387 	dq = dmachan[unit].dq_forw;
388 	if (dq != &dmachan[unit])
389 		(dq->dq_driver->d_done)(dq->dq_unit);
390 }
391 
392 int
393 dmaintr()
394 {
395 	register struct dma_softc *dc;
396 	register int i, stat;
397 	int found = 0;
398 
399 #ifdef DEBUG
400 	if (dmadebug & DDB_FOLLOW)
401 		printf("dmaintr\n");
402 #endif
403 	for (i = 0, dc = dma_softc; i < NDMA; i++, dc++) {
404 		stat = DMA_STAT(dc);
405 		if ((stat & DMA_INTR) == 0)
406 			continue;
407 		found++;
408 #ifdef DEBUG
409 		if (dmadebug & DDB_IO) {
410 			if ((dmadebug&DDB_WORD) && (dc->sc_cmd&DMA_WORD) ||
411 			    (dmadebug&DDB_LWORD) && (dc->sc_cmd&DMA_LWORD))
412 				printf("dmaintr: unit %d stat %x next %d\n",
413 				       i, stat, (dc->sc_cur-dc->sc_chain)+1);
414 		}
415 		if (stat & DMA_ARMED)
416 			printf("dma%d: intr when armed\n", i);
417 #endif
418 		if (++dc->sc_cur <= dc->sc_last) {
419 #ifdef DEBUG
420 			dmatimo[i] = 1;
421 #endif
422 			/*
423 			 * Last chain segment, disable DMA interrupt.
424 			 */
425 			if (dc->sc_cur == dc->sc_last &&
426 			    (dc->sc_flags & DMAF_NOINTR))
427 				dc->sc_cmd &= ~DMA_ENAB;
428 			DMA_CLEAR(dc);
429 			DMA_ARM(dc);
430 		} else
431 			dmastop(i);
432 	}
433 	return(found);
434 }
435 
436 #ifdef DEBUG
437 void
438 dmatimeout()
439 {
440 	register int i, s;
441 
442 	for (i = 0; i < NDMA; i++) {
443 		s = splbio();
444 		if (dmatimo[i]) {
445 			if (dmatimo[i] > 1)
446 				printf("dma%d: timeout #%d\n",
447 				       i, dmatimo[i]-1);
448 			dmatimo[i]++;
449 		}
450 		splx(s);
451 	}
452 	timeout(dmatimeout, (caddr_t)0, 30 * hz);
453 }
454 #endif
455