141480Smckusick /*
2*63151Sbostic * Copyright (c) 1982, 1990, 1993
3*63151Sbostic * The Regents of the University of California. All rights reserved.
441480Smckusick *
541480Smckusick * %sccs.include.redist.c%
641480Smckusick *
7*63151Sbostic * @(#)dma.c 8.1 (Berkeley) 06/10/93
841480Smckusick */
941480Smckusick
1041480Smckusick /*
1141480Smckusick * DMA driver
1241480Smckusick */
1341480Smckusick
1456507Sbostic #include <sys/param.h>
1556507Sbostic #include <sys/systm.h>
1656507Sbostic #include <sys/time.h>
1756507Sbostic #include <sys/kernel.h>
1856507Sbostic #include <sys/proc.h>
1949132Skarels
2056507Sbostic #include <hp300/dev/dmareg.h>
2156507Sbostic #include <hp300/dev/dmavar.h>
2241480Smckusick
2356507Sbostic #include <hp/dev/device.h>
2441480Smckusick
2556507Sbostic #include <machine/cpu.h>
2656507Sbostic #include <hp300/hp300/isr.h>
2756507Sbostic
2841480Smckusick extern void isrlink();
2941480Smckusick extern void _insque();
3041480Smckusick extern void _remque();
3141480Smckusick extern void timeout();
3241480Smckusick extern u_int kvtop();
3341480Smckusick extern void PCIA();
3441480Smckusick
3541480Smckusick /*
3641480Smckusick * The largest single request will be MAXPHYS bytes which will require
3741480Smckusick * at most MAXPHYS/NBPG+1 chain elements to describe, i.e. if none of
3841480Smckusick * the buffer pages are physically contiguous (MAXPHYS/NBPG) and the
3941480Smckusick * buffer is not page aligned (+1).
4041480Smckusick */
4141480Smckusick #define DMAMAXIO (MAXPHYS/NBPG+1)
4241480Smckusick
4345750Smckusick struct dma_chain {
4445750Smckusick int dc_count;
4545750Smckusick char *dc_addr;
4645750Smckusick };
4741480Smckusick
4841480Smckusick struct dma_softc {
4945750Smckusick struct dmadevice *sc_hwaddr;
5045750Smckusick struct dmaBdevice *sc_Bhwaddr;
5145750Smckusick char sc_type;
5245750Smckusick char sc_flags;
5345750Smckusick u_short sc_cmd;
5445750Smckusick struct dma_chain *sc_cur;
5545750Smckusick struct dma_chain *sc_last;
5645750Smckusick struct dma_chain sc_chain[DMAMAXIO];
5741480Smckusick } dma_softc[NDMA];
5841480Smckusick
5941480Smckusick /* types */
6041480Smckusick #define DMA_B 0
6141480Smckusick #define DMA_C 1
6241480Smckusick
6345750Smckusick /* flags */
6445750Smckusick #define DMAF_PCFLUSH 0x01
6545750Smckusick #define DMAF_VCFLUSH 0x02
6645750Smckusick #define DMAF_NOINTR 0x04
6745750Smckusick
6841480Smckusick struct devqueue dmachan[NDMA + 1];
6941480Smckusick int dmaintr();
7041480Smckusick
7141480Smckusick #ifdef DEBUG
7241480Smckusick int dmadebug = 0;
7341480Smckusick #define DDB_WORD 0x01 /* same as DMAGO_WORD */
7441480Smckusick #define DDB_LWORD 0x02 /* same as DMAGO_LWORD */
7541480Smckusick #define DDB_FOLLOW 0x04
7641480Smckusick #define DDB_IO 0x08
7741480Smckusick
7845750Smckusick void dmatimeout();
7945750Smckusick int dmatimo[NDMA];
8045750Smckusick
8141480Smckusick long dmahits[NDMA];
8241480Smckusick long dmamisses[NDMA];
8341480Smckusick long dmabyte[NDMA];
8441480Smckusick long dmaword[NDMA];
8541480Smckusick long dmalword[NDMA];
8641480Smckusick #endif
8741480Smckusick
8841480Smckusick void
dmainit()8941480Smckusick dmainit()
9041480Smckusick {
9141480Smckusick register struct dmareg *dma = (struct dmareg *)DMA_BASE;
9241480Smckusick register struct dma_softc *dc;
9341480Smckusick register int i;
9441480Smckusick char rev;
9541480Smckusick
9641480Smckusick /*
9741480Smckusick * Determine the DMA type.
9841480Smckusick * Don't know how to easily differentiate the A and B cards,
9941480Smckusick * so we just hope nobody has an A card (A cards will work if
10041480Smckusick * DMAINTLVL is set to 3).
10141480Smckusick */
10241480Smckusick if (!badbaddr((char *)&dma->dma_id[2]))
10341480Smckusick rev = dma->dma_id[2];
10441480Smckusick else {
10541480Smckusick rev = 'B';
10641480Smckusick #if !defined(HP320)
10741480Smckusick panic("dmainit: DMA card requires hp320 support");
10841480Smckusick #endif
10941480Smckusick }
11041480Smckusick
11141480Smckusick dc = &dma_softc[0];
11241480Smckusick for (i = 0; i < NDMA; i++) {
11341480Smckusick dc->sc_hwaddr = (i & 1) ? &dma->dma_chan1 : &dma->dma_chan0;
11441480Smckusick dc->sc_Bhwaddr = (i & 1) ? &dma->dma_Bchan1 : &dma->dma_Bchan0;
11541480Smckusick dc->sc_type = rev == 'B' ? DMA_B : DMA_C;
11641480Smckusick dc++;
11741480Smckusick dmachan[i].dq_forw = dmachan[i].dq_back = &dmachan[i];
11841480Smckusick }
11941480Smckusick dmachan[i].dq_forw = dmachan[i].dq_back = &dmachan[i];
12045750Smckusick #ifdef DEBUG
12145750Smckusick /* make sure timeout is really not needed */
12245750Smckusick timeout(dmatimeout, 0, 30 * hz);
12345750Smckusick #endif
12441480Smckusick
12541480Smckusick printf("dma: 98620%c with 2 channels, %d bit DMA\n",
12641480Smckusick rev, rev == 'B' ? 16 : 32);
12741480Smckusick }
12841480Smckusick
12941480Smckusick int
dmareq(dq)13041480Smckusick dmareq(dq)
13141480Smckusick register struct devqueue *dq;
13241480Smckusick {
13341480Smckusick register int i;
13441480Smckusick register int chan;
13541480Smckusick register int s = splbio();
13641480Smckusick
13741480Smckusick chan = dq->dq_ctlr;
13841480Smckusick i = NDMA;
13941480Smckusick while (--i >= 0) {
14041480Smckusick if ((chan & (1 << i)) == 0)
14141480Smckusick continue;
14241480Smckusick if (dmachan[i].dq_forw != &dmachan[i])
14341480Smckusick continue;
14441480Smckusick insque(dq, &dmachan[i]);
14541480Smckusick dq->dq_ctlr = i;
14641480Smckusick splx(s);
14741480Smckusick return(1);
14841480Smckusick }
14941480Smckusick insque(dq, dmachan[NDMA].dq_back);
15041480Smckusick splx(s);
15141480Smckusick return(0);
15241480Smckusick }
15341480Smckusick
15441480Smckusick void
dmafree(dq)15541480Smckusick dmafree(dq)
15641480Smckusick register struct devqueue *dq;
15741480Smckusick {
15841480Smckusick int unit = dq->dq_ctlr;
15941480Smckusick register struct dma_softc *dc = &dma_softc[unit];
16041480Smckusick register struct devqueue *dn;
16141480Smckusick register int chan, s;
16241480Smckusick
16341480Smckusick s = splbio();
16445750Smckusick #ifdef DEBUG
16545750Smckusick dmatimo[unit] = 0;
16645750Smckusick #endif
16741480Smckusick DMA_CLEAR(dc);
16853930Shibler #if defined(HP360) || defined(HP370) || defined(HP380)
16945750Smckusick /*
17045750Smckusick * XXX we may not always go thru the flush code in dmastop()
17145750Smckusick */
17245750Smckusick if (dc->sc_flags & DMAF_PCFLUSH) {
17345750Smckusick PCIA();
17445750Smckusick dc->sc_flags &= ~DMAF_PCFLUSH;
17545750Smckusick }
17645750Smckusick #endif
17745750Smckusick #if defined(HP320) || defined(HP350)
17845750Smckusick if (dc->sc_flags & DMAF_VCFLUSH) {
17945750Smckusick /*
18045750Smckusick * 320/350s have VACs that may also need flushing.
18145750Smckusick * In our case we only flush the supervisor side
18245750Smckusick * because we know that if we are DMAing to user
18345750Smckusick * space, the physical pages will also be mapped
18445750Smckusick * in kernel space (via vmapbuf) and hence cache-
18545750Smckusick * inhibited by the pmap module due to the multiple
18645750Smckusick * mapping.
18745750Smckusick */
18845750Smckusick DCIS();
18945750Smckusick dc->sc_flags &= ~DMAF_VCFLUSH;
19045750Smckusick }
19145750Smckusick #endif
19241480Smckusick remque(dq);
19341480Smckusick chan = 1 << unit;
19441480Smckusick for (dn = dmachan[NDMA].dq_forw;
19541480Smckusick dn != &dmachan[NDMA]; dn = dn->dq_forw) {
19641480Smckusick if (dn->dq_ctlr & chan) {
19741480Smckusick remque((caddr_t)dn);
19841480Smckusick insque((caddr_t)dn, (caddr_t)dq->dq_back);
19941480Smckusick splx(s);
20041480Smckusick dn->dq_ctlr = dq->dq_ctlr;
20141480Smckusick (dn->dq_driver->d_start)(dn->dq_unit);
20241480Smckusick return;
20341480Smckusick }
20441480Smckusick }
20541480Smckusick splx(s);
20641480Smckusick }
20741480Smckusick
20841480Smckusick void
dmago(unit,addr,count,flags)20941480Smckusick dmago(unit, addr, count, flags)
21041480Smckusick int unit;
21141480Smckusick register char *addr;
21241480Smckusick register int count;
21341480Smckusick register int flags;
21441480Smckusick {
21541480Smckusick register struct dma_softc *dc = &dma_softc[unit];
21645750Smckusick register struct dma_chain *dcp;
21741480Smckusick register char *dmaend = NULL;
21845750Smckusick register int tcount;
21941480Smckusick
22045750Smckusick if (count > MAXPHYS)
22145750Smckusick panic("dmago: count > MAXPHYS");
22245750Smckusick #if defined(HP320)
22345750Smckusick if (dc->sc_type == DMA_B && (flags & DMAGO_LWORD))
22445750Smckusick panic("dmago: no can do 32-bit DMA");
22545750Smckusick #endif
22641480Smckusick #ifdef DEBUG
22741480Smckusick if (dmadebug & DDB_FOLLOW)
22841480Smckusick printf("dmago(%d, %x, %x, %x)\n",
22941480Smckusick unit, addr, count, flags);
23041480Smckusick if (flags & DMAGO_LWORD)
23141480Smckusick dmalword[unit]++;
23241480Smckusick else if (flags & DMAGO_WORD)
23341480Smckusick dmaword[unit]++;
23441480Smckusick else
23541480Smckusick dmabyte[unit]++;
23641480Smckusick #endif
23741480Smckusick /*
23841480Smckusick * Build the DMA chain
23941480Smckusick */
24045750Smckusick for (dcp = dc->sc_chain; count > 0; dcp++) {
24145750Smckusick dcp->dc_addr = (char *) kvtop(addr);
24253930Shibler #if defined(HP380)
24353930Shibler /*
24453930Shibler * Push back dirty cache lines
24553930Shibler */
24653930Shibler if (mmutype == MMU_68040)
24753930Shibler DCFP(dcp->dc_addr);
24853930Shibler #endif
24945750Smckusick if (count < (tcount = NBPG - ((int)addr & PGOFSET)))
25045750Smckusick tcount = count;
25145750Smckusick dcp->dc_count = tcount;
25245750Smckusick addr += tcount;
25341480Smckusick count -= tcount;
25445750Smckusick if (flags & DMAGO_LWORD)
25545750Smckusick tcount >>= 2;
25645750Smckusick else if (flags & DMAGO_WORD)
25745750Smckusick tcount >>= 1;
25845750Smckusick if (dcp->dc_addr == dmaend
25941480Smckusick #if defined(HP320)
26041480Smckusick /* only 16-bit count on 98620B */
26141480Smckusick && (dc->sc_type != DMA_B ||
26245750Smckusick (dcp-1)->dc_count + tcount <= 65536)
26341480Smckusick #endif
26441480Smckusick ) {
26541480Smckusick #ifdef DEBUG
26641480Smckusick dmahits[unit]++;
26741480Smckusick #endif
26845750Smckusick dmaend += dcp->dc_count;
26945750Smckusick (--dcp)->dc_count += tcount;
27041480Smckusick } else {
27141480Smckusick #ifdef DEBUG
27241480Smckusick dmamisses[unit]++;
27341480Smckusick #endif
27445750Smckusick dmaend = dcp->dc_addr + dcp->dc_count;
27545750Smckusick dcp->dc_count = tcount;
27641480Smckusick }
27741480Smckusick }
27845750Smckusick dc->sc_cur = dc->sc_chain;
27945750Smckusick dc->sc_last = --dcp;
28045750Smckusick dc->sc_flags = 0;
28141480Smckusick /*
28241480Smckusick * Set up the command word based on flags
28341480Smckusick */
28441480Smckusick dc->sc_cmd = DMA_ENAB | DMA_IPL(DMAINTLVL) | DMA_START;
28541480Smckusick if ((flags & DMAGO_READ) == 0)
28641480Smckusick dc->sc_cmd |= DMA_WRT;
28741480Smckusick if (flags & DMAGO_LWORD)
28841480Smckusick dc->sc_cmd |= DMA_LWORD;
28941480Smckusick else if (flags & DMAGO_WORD)
29041480Smckusick dc->sc_cmd |= DMA_WORD;
29141480Smckusick if (flags & DMAGO_PRI)
29241480Smckusick dc->sc_cmd |= DMA_PRI;
29353930Shibler #if defined(HP380)
29453930Shibler /*
29553930Shibler * On the 68040 we need to flush (push) the data cache before a
29653930Shibler * DMA (already done above) and flush again after DMA completes.
29753930Shibler * In theory we should only need to flush prior to a write DMA
29853930Shibler * and purge after a read DMA but if the entire page is not
29953930Shibler * involved in the DMA we might purge some valid data.
30053930Shibler */
30153930Shibler if (mmutype == MMU_68040 && (flags & DMAGO_READ))
30253930Shibler dc->sc_flags |= DMAF_PCFLUSH;
30353930Shibler #endif
30445750Smckusick #if defined(HP360) || defined(HP370)
30541480Smckusick /*
30645750Smckusick * Remember if we need to flush external physical cache when
30745750Smckusick * DMA is done. We only do this if we are reading (writing memory).
30841480Smckusick */
30945750Smckusick if (ectype == EC_PHYS && (flags & DMAGO_READ))
31045750Smckusick dc->sc_flags |= DMAF_PCFLUSH;
31141480Smckusick #endif
31245750Smckusick #if defined(HP320) || defined(HP350)
31345750Smckusick if (ectype == EC_VIRT && (flags & DMAGO_READ))
31445750Smckusick dc->sc_flags |= DMAF_VCFLUSH;
31545750Smckusick #endif
31645750Smckusick /*
31745750Smckusick * Remember if we can skip the dma completion interrupt on
31845750Smckusick * the last segment in the chain.
31945750Smckusick */
32045750Smckusick if (flags & DMAGO_NOINT) {
32145750Smckusick if (dc->sc_cur == dc->sc_last)
32245750Smckusick dc->sc_cmd &= ~DMA_ENAB;
32345750Smckusick else
32445750Smckusick dc->sc_flags |= DMAF_NOINTR;
32545750Smckusick }
32641480Smckusick #ifdef DEBUG
32741480Smckusick if (dmadebug & DDB_IO)
32841480Smckusick if ((dmadebug&DDB_WORD) && (dc->sc_cmd&DMA_WORD) ||
32941480Smckusick (dmadebug&DDB_LWORD) && (dc->sc_cmd&DMA_LWORD)) {
33045750Smckusick printf("dmago: cmd %x, flags %x\n",
33145750Smckusick dc->sc_cmd, dc->sc_flags);
33245750Smckusick for (dcp = dc->sc_chain; dcp <= dc->sc_last; dcp++)
33345750Smckusick printf(" %d: %d@%x\n", dcp-dc->sc_chain,
33445750Smckusick dcp->dc_count, dcp->dc_addr);
33541480Smckusick }
33645750Smckusick dmatimo[unit] = 1;
33741480Smckusick #endif
33845817Smckusick DMA_ARM(dc);
33941480Smckusick }
34041480Smckusick
34141480Smckusick void
dmastop(unit)34241480Smckusick dmastop(unit)
34341480Smckusick register int unit;
34441480Smckusick {
34541480Smckusick register struct dma_softc *dc = &dma_softc[unit];
34641480Smckusick register struct devqueue *dq;
34741480Smckusick
34841480Smckusick #ifdef DEBUG
34941480Smckusick if (dmadebug & DDB_FOLLOW)
35041480Smckusick printf("dmastop(%d)\n", unit);
35145750Smckusick dmatimo[unit] = 0;
35241480Smckusick #endif
35341480Smckusick DMA_CLEAR(dc);
35453930Shibler #if defined(HP360) || defined(HP370) || defined(HP380)
35545750Smckusick if (dc->sc_flags & DMAF_PCFLUSH) {
35645750Smckusick PCIA();
35745750Smckusick dc->sc_flags &= ~DMAF_PCFLUSH;
35845750Smckusick }
35945750Smckusick #endif
36045750Smckusick #if defined(HP320) || defined(HP350)
36145750Smckusick if (dc->sc_flags & DMAF_VCFLUSH) {
36245750Smckusick /*
36345750Smckusick * 320/350s have VACs that may also need flushing.
36445750Smckusick * In our case we only flush the supervisor side
36545750Smckusick * because we know that if we are DMAing to user
36645750Smckusick * space, the physical pages will also be mapped
36745750Smckusick * in kernel space (via vmapbuf) and hence cache-
36845750Smckusick * inhibited by the pmap module due to the multiple
36945750Smckusick * mapping.
37045750Smckusick */
37145750Smckusick DCIS();
37245750Smckusick dc->sc_flags &= ~DMAF_VCFLUSH;
37345750Smckusick }
37445750Smckusick #endif
37541480Smckusick /*
37641480Smckusick * We may get this interrupt after a device service routine
37741480Smckusick * has freed the dma channel. So, ignore the intr if there's
37841480Smckusick * nothing on the queue.
37941480Smckusick */
38041480Smckusick dq = dmachan[unit].dq_forw;
38145750Smckusick if (dq != &dmachan[unit])
38241480Smckusick (dq->dq_driver->d_done)(dq->dq_unit);
38341480Smckusick }
38441480Smckusick
38541480Smckusick int
dmaintr()38641480Smckusick dmaintr()
38741480Smckusick {
38841480Smckusick register struct dma_softc *dc;
38945750Smckusick register int i, stat;
39041480Smckusick int found = 0;
39141480Smckusick
39241480Smckusick #ifdef DEBUG
39341480Smckusick if (dmadebug & DDB_FOLLOW)
39441480Smckusick printf("dmaintr\n");
39541480Smckusick #endif
39641480Smckusick for (i = 0, dc = dma_softc; i < NDMA; i++, dc++) {
39741480Smckusick stat = DMA_STAT(dc);
39841480Smckusick if ((stat & DMA_INTR) == 0)
39941480Smckusick continue;
40041480Smckusick found++;
40141480Smckusick #ifdef DEBUG
40241480Smckusick if (dmadebug & DDB_IO) {
40341480Smckusick if ((dmadebug&DDB_WORD) && (dc->sc_cmd&DMA_WORD) ||
40441480Smckusick (dmadebug&DDB_LWORD) && (dc->sc_cmd&DMA_LWORD))
40541480Smckusick printf("dmaintr: unit %d stat %x next %d\n",
40645750Smckusick i, stat, (dc->sc_cur-dc->sc_chain)+1);
40741480Smckusick }
40841480Smckusick if (stat & DMA_ARMED)
40941480Smckusick printf("dma%d: intr when armed\n", i);
41041480Smckusick #endif
41145750Smckusick if (++dc->sc_cur <= dc->sc_last) {
41245750Smckusick #ifdef DEBUG
41345750Smckusick dmatimo[i] = 1;
41445750Smckusick #endif
41545750Smckusick /*
41645750Smckusick * Last chain segment, disable DMA interrupt.
41745750Smckusick */
41845750Smckusick if (dc->sc_cur == dc->sc_last &&
41945750Smckusick (dc->sc_flags & DMAF_NOINTR))
42045750Smckusick dc->sc_cmd &= ~DMA_ENAB;
42141480Smckusick DMA_CLEAR(dc);
42245817Smckusick DMA_ARM(dc);
42341480Smckusick } else
42441480Smckusick dmastop(i);
42541480Smckusick }
42641480Smckusick return(found);
42741480Smckusick }
42841480Smckusick
42945750Smckusick #ifdef DEBUG
43041480Smckusick void
dmatimeout()43145750Smckusick dmatimeout()
43241480Smckusick {
43341480Smckusick register int i, s;
43441480Smckusick
43545750Smckusick for (i = 0; i < NDMA; i++) {
43641480Smckusick s = splbio();
43745750Smckusick if (dmatimo[i]) {
43845750Smckusick if (dmatimo[i] > 1)
43945750Smckusick printf("dma%d: timeout #%d\n",
44045750Smckusick i, dmatimo[i]-1);
44145750Smckusick dmatimo[i]++;
44241480Smckusick }
44341480Smckusick splx(s);
44441480Smckusick }
44545750Smckusick timeout(dmatimeout, (caddr_t)0, 30 * hz);
44641480Smckusick }
44745750Smckusick #endif
448