1 /* $NetBSD: dma.c,v 1.7 1996/02/14 02:44:17 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 1995 Jason R. Thorpe. 5 * Copyright (c) 1982, 1990, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)dma.c 8.1 (Berkeley) 6/10/93 37 */ 38 39 /* 40 * DMA driver 41 */ 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/time.h> 46 #include <sys/kernel.h> 47 #include <sys/proc.h> 48 49 #include <machine/cpu.h> 50 51 #include <hp300/dev/device.h> 52 #include <hp300/dev/dmareg.h> 53 #include <hp300/dev/dmavar.h> 54 55 #include <hp300/hp300/isr.h> 56 57 extern void isrlink(); 58 extern void _insque(); 59 extern void _remque(); 60 extern u_int kvtop(); 61 extern void PCIA(); 62 63 /* 64 * The largest single request will be MAXPHYS bytes which will require 65 * at most MAXPHYS/NBPG+1 chain elements to describe, i.e. if none of 66 * the buffer pages are physically contiguous (MAXPHYS/NBPG) and the 67 * buffer is not page aligned (+1). 68 */ 69 #define DMAMAXIO (MAXPHYS/NBPG+1) 70 71 struct dma_chain { 72 int dc_count; 73 char *dc_addr; 74 }; 75 76 struct dma_channel { 77 struct dma_softc *dm_softc; /* pointer back to softc */ 78 struct dmadevice *dm_hwaddr; /* registers if DMA_C */ 79 struct dmaBdevice *dm_Bhwaddr; /* registers if not DMA_C */ 80 char dm_flags; /* misc. flags */ 81 u_short dm_cmd; /* DMA controller command */ 82 struct dma_chain *dm_cur; /* current segment */ 83 struct dma_chain *dm_last; /* last segment */ 84 struct dma_chain dm_chain[DMAMAXIO]; /* all segments */ 85 }; 86 87 struct dma_softc { 88 char *sc_xname; /* XXX external name */ 89 struct dmareg *sc_dmareg; /* pointer to our hardware */ 90 struct dma_channel sc_chan[NDMACHAN]; /* 2 channels */ 91 char sc_type; /* A, B, or C */ 92 } Dma_softc; 93 94 /* types */ 95 #define DMA_B 0 96 #define DMA_C 1 97 98 /* flags */ 99 #define DMAF_PCFLUSH 0x01 100 #define DMAF_VCFLUSH 0x02 101 #define DMAF_NOINTR 0x04 102 103 struct devqueue dmachan[NDMACHAN + 1]; 104 int dmaintr __P((void *)); 105 106 #ifdef DEBUG 107 int dmadebug = 0; 108 #define DDB_WORD 0x01 /* same as DMAGO_WORD */ 109 #define DDB_LWORD 0x02 /* same as DMAGO_LWORD */ 110 #define DDB_FOLLOW 0x04 111 #define DDB_IO 0x08 112 113 void dmatimeout __P((void *)); 114 int dmatimo[NDMACHAN]; 115 116 long dmahits[NDMACHAN]; 117 long dmamisses[NDMACHAN]; 118 long dmabyte[NDMACHAN]; 119 long dmaword[NDMACHAN]; 120 long dmalword[NDMACHAN]; 121 #endif 122 123 void 124 dmainit() 125 { 126 struct dma_softc *sc = &Dma_softc; 127 struct dmareg *dma; 128 struct dma_channel *dc; 129 int i; 130 char rev; 131 132 /* There's just one. */ 133 sc->sc_dmareg = (struct dmareg *)DMA_BASE; 134 dma = sc->sc_dmareg; 135 sc->sc_xname = "dma0"; 136 137 /* 138 * Determine the DMA type. A DMA_A or DMA_B will fail the 139 * following probe. 140 * 141 * XXX Don't know how to easily differentiate the A and B cards, 142 * so we just hope nobody has an A card (A cards will work if 143 * DMAINTLVL is set to 3). 144 */ 145 if (badbaddr((char *)&dma->dma_id[2])) { 146 rev = 'B'; 147 #if !defined(HP320) 148 panic("dmainit: DMA card requires hp320 support"); 149 #endif 150 } else 151 rev = dma->dma_id[2]; 152 153 sc->sc_type = (rev == 'B') ? DMA_B : DMA_C; 154 155 for (i = 0; i < NDMACHAN; i++) { 156 dc = &sc->sc_chan[i]; 157 dc->dm_softc = sc; 158 switch (i) { 159 case 0: 160 dc->dm_hwaddr = &dma->dma_chan0; 161 dc->dm_Bhwaddr = &dma->dma_Bchan0; 162 break; 163 164 case 1: 165 dc->dm_hwaddr = &dma->dma_chan1; 166 dc->dm_Bhwaddr = &dma->dma_Bchan1; 167 break; 168 169 default: 170 panic("dmainit: more than 2 channels?"); 171 /* NOTREACHED */ 172 } 173 dmachan[i].dq_forw = dmachan[i].dq_back = &dmachan[i]; 174 } 175 dmachan[i].dq_forw = dmachan[i].dq_back = &dmachan[i]; 176 #ifdef DEBUG 177 /* make sure timeout is really not needed */ 178 timeout(dmatimeout, sc, 30 * hz); 179 #endif 180 181 printf("%s: 98620%c, 2 channels, %d bit\n", sc->sc_xname, 182 rev, (rev == 'B') ? 16 : 32); 183 184 /* Establish the interrupt handler */ 185 isrlink(dmaintr, sc, DMAINTLVL, ISRPRI_BIO); 186 } 187 188 int 189 dmareq(dq) 190 register struct devqueue *dq; 191 { 192 register int i; 193 register int chan; 194 register int s = splbio(); 195 196 chan = dq->dq_ctlr; 197 i = NDMACHAN; 198 while (--i >= 0) { 199 if ((chan & (1 << i)) == 0) 200 continue; 201 if (dmachan[i].dq_forw != &dmachan[i]) 202 continue; 203 insque(dq, &dmachan[i]); 204 dq->dq_ctlr = i; 205 splx(s); 206 return(1); 207 } 208 insque(dq, dmachan[NDMACHAN].dq_back); 209 splx(s); 210 return(0); 211 } 212 213 void 214 dmafree(dq) 215 register struct devqueue *dq; 216 { 217 int unit = dq->dq_ctlr; 218 struct dma_softc *sc = &Dma_softc; 219 register struct dma_channel *dc = &sc->sc_chan[unit]; 220 register struct devqueue *dn; 221 register int chan, s; 222 223 s = splbio(); 224 #ifdef DEBUG 225 dmatimo[unit] = 0; 226 #endif 227 DMA_CLEAR(dc); 228 #if defined(HP360) || defined(HP370) || defined(HP380) 229 /* 230 * XXX we may not always go thru the flush code in dmastop() 231 */ 232 if (dc->dm_flags & DMAF_PCFLUSH) { 233 PCIA(); 234 dc->dm_flags &= ~DMAF_PCFLUSH; 235 } 236 #endif 237 #if defined(HP320) || defined(HP350) 238 if (dc->dm_flags & DMAF_VCFLUSH) { 239 /* 240 * 320/350s have VACs that may also need flushing. 241 * In our case we only flush the supervisor side 242 * because we know that if we are DMAing to user 243 * space, the physical pages will also be mapped 244 * in kernel space (via vmapbuf) and hence cache- 245 * inhibited by the pmap module due to the multiple 246 * mapping. 247 */ 248 DCIS(); 249 dc->dm_flags &= ~DMAF_VCFLUSH; 250 } 251 #endif 252 remque(dq); 253 chan = 1 << unit; 254 for (dn = dmachan[NDMACHAN].dq_forw; 255 dn != &dmachan[NDMACHAN]; dn = dn->dq_forw) { 256 if (dn->dq_ctlr & chan) { 257 remque((caddr_t)dn); 258 insque((caddr_t)dn, (caddr_t)dq->dq_back); 259 splx(s); 260 dn->dq_ctlr = dq->dq_ctlr; 261 (dn->dq_driver->d_start)(dn->dq_unit); 262 return; 263 } 264 } 265 splx(s); 266 } 267 268 void 269 dmago(unit, addr, count, flags) 270 int unit; 271 register char *addr; 272 register int count; 273 register int flags; 274 { 275 struct dma_softc *sc = &Dma_softc; 276 register struct dma_channel *dc = &sc->sc_chan[unit]; 277 register struct dma_chain *dcp; 278 register char *dmaend = NULL; 279 register int tcount; 280 281 if (count > MAXPHYS) 282 panic("dmago: count > MAXPHYS"); 283 #if defined(HP320) 284 if (sc->sc_type == DMA_B && (flags & DMAGO_LWORD)) 285 panic("dmago: no can do 32-bit DMA"); 286 #endif 287 #ifdef DEBUG 288 if (dmadebug & DDB_FOLLOW) 289 printf("dmago(%d, %x, %x, %x)\n", 290 unit, addr, count, flags); 291 if (flags & DMAGO_LWORD) 292 dmalword[unit]++; 293 else if (flags & DMAGO_WORD) 294 dmaword[unit]++; 295 else 296 dmabyte[unit]++; 297 #endif 298 /* 299 * Build the DMA chain 300 */ 301 for (dcp = dc->dm_chain; count > 0; dcp++) { 302 dcp->dc_addr = (char *) kvtop(addr); 303 #if defined(HP380) 304 /* 305 * Push back dirty cache lines 306 */ 307 if (mmutype == MMU_68040) 308 DCFP(dcp->dc_addr); 309 #endif 310 if (count < (tcount = NBPG - ((int)addr & PGOFSET))) 311 tcount = count; 312 dcp->dc_count = tcount; 313 addr += tcount; 314 count -= tcount; 315 if (flags & DMAGO_LWORD) 316 tcount >>= 2; 317 else if (flags & DMAGO_WORD) 318 tcount >>= 1; 319 if (dcp->dc_addr == dmaend 320 #if defined(HP320) 321 /* only 16-bit count on 98620B */ 322 && (sc->sc_type != DMA_B || 323 (dcp-1)->dc_count + tcount <= 65536) 324 #endif 325 ) { 326 #ifdef DEBUG 327 dmahits[unit]++; 328 #endif 329 dmaend += dcp->dc_count; 330 (--dcp)->dc_count += tcount; 331 } else { 332 #ifdef DEBUG 333 dmamisses[unit]++; 334 #endif 335 dmaend = dcp->dc_addr + dcp->dc_count; 336 dcp->dc_count = tcount; 337 } 338 } 339 dc->dm_cur = dc->dm_chain; 340 dc->dm_last = --dcp; 341 dc->dm_flags = 0; 342 /* 343 * Set up the command word based on flags 344 */ 345 dc->dm_cmd = DMA_ENAB | DMA_IPL(DMAINTLVL) | DMA_START; 346 if ((flags & DMAGO_READ) == 0) 347 dc->dm_cmd |= DMA_WRT; 348 if (flags & DMAGO_LWORD) 349 dc->dm_cmd |= DMA_LWORD; 350 else if (flags & DMAGO_WORD) 351 dc->dm_cmd |= DMA_WORD; 352 if (flags & DMAGO_PRI) 353 dc->dm_cmd |= DMA_PRI; 354 #if defined(HP380) 355 /* 356 * On the 68040 we need to flush (push) the data cache before a 357 * DMA (already done above) and flush again after DMA completes. 358 * In theory we should only need to flush prior to a write DMA 359 * and purge after a read DMA but if the entire page is not 360 * involved in the DMA we might purge some valid data. 361 */ 362 if (mmutype == MMU_68040 && (flags & DMAGO_READ)) 363 dc->dm_flags |= DMAF_PCFLUSH; 364 #endif 365 #if defined(HP360) || defined(HP370) 366 /* 367 * Remember if we need to flush external physical cache when 368 * DMA is done. We only do this if we are reading (writing memory). 369 */ 370 if (ectype == EC_PHYS && (flags & DMAGO_READ)) 371 dc->dm_flags |= DMAF_PCFLUSH; 372 #endif 373 #if defined(HP320) || defined(HP350) 374 if (ectype == EC_VIRT && (flags & DMAGO_READ)) 375 dc->dm_flags |= DMAF_VCFLUSH; 376 #endif 377 /* 378 * Remember if we can skip the dma completion interrupt on 379 * the last segment in the chain. 380 */ 381 if (flags & DMAGO_NOINT) { 382 if (dc->dm_cur == dc->dm_last) 383 dc->dm_cmd &= ~DMA_ENAB; 384 else 385 dc->dm_flags |= DMAF_NOINTR; 386 } 387 #ifdef DEBUG 388 if (dmadebug & DDB_IO) 389 if ((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD) || 390 (dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD)) { 391 printf("dmago: cmd %x, flags %x\n", 392 dc->dm_cmd, dc->dm_flags); 393 for (dcp = dc->dm_chain; dcp <= dc->dm_last; dcp++) 394 printf(" %d: %d@%x\n", dcp-dc->dm_chain, 395 dcp->dc_count, dcp->dc_addr); 396 } 397 dmatimo[unit] = 1; 398 #endif 399 DMA_ARM(dc); 400 } 401 402 void 403 dmastop(unit) 404 register int unit; 405 { 406 struct dma_softc *sc = &Dma_softc; 407 register struct dma_channel *dc = &sc->sc_chan[unit]; 408 register struct devqueue *dq; 409 410 #ifdef DEBUG 411 if (dmadebug & DDB_FOLLOW) 412 printf("dmastop(%d)\n", unit); 413 dmatimo[unit] = 0; 414 #endif 415 DMA_CLEAR(dc); 416 #if defined(HP360) || defined(HP370) || defined(HP380) 417 if (dc->dm_flags & DMAF_PCFLUSH) { 418 PCIA(); 419 dc->dm_flags &= ~DMAF_PCFLUSH; 420 } 421 #endif 422 #if defined(HP320) || defined(HP350) 423 if (dc->dm_flags & DMAF_VCFLUSH) { 424 /* 425 * 320/350s have VACs that may also need flushing. 426 * In our case we only flush the supervisor side 427 * because we know that if we are DMAing to user 428 * space, the physical pages will also be mapped 429 * in kernel space (via vmapbuf) and hence cache- 430 * inhibited by the pmap module due to the multiple 431 * mapping. 432 */ 433 DCIS(); 434 dc->dm_flags &= ~DMAF_VCFLUSH; 435 } 436 #endif 437 /* 438 * We may get this interrupt after a device service routine 439 * has freed the dma channel. So, ignore the intr if there's 440 * nothing on the queue. 441 */ 442 dq = dmachan[unit].dq_forw; 443 if (dq != &dmachan[unit]) 444 (dq->dq_driver->d_done)(dq->dq_unit); 445 } 446 447 int 448 dmaintr(arg) 449 void *arg; 450 { 451 struct dma_softc *sc = arg; 452 register struct dma_channel *dc; 453 register int i, stat; 454 int found = 0; 455 456 #ifdef DEBUG 457 if (dmadebug & DDB_FOLLOW) 458 printf("dmaintr\n"); 459 #endif 460 for (i = 0; i < NDMACHAN; i++) { 461 dc = &sc->sc_chan[i]; 462 stat = DMA_STAT(dc); 463 if ((stat & DMA_INTR) == 0) 464 continue; 465 found++; 466 #ifdef DEBUG 467 if (dmadebug & DDB_IO) { 468 if ((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD) || 469 (dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD)) 470 printf("dmaintr: unit %d stat %x next %d\n", 471 i, stat, (dc->dm_cur-dc->dm_chain)+1); 472 } 473 if (stat & DMA_ARMED) 474 printf("%s, chan %d: intr when armed\n", 475 sc->sc_xname, i); 476 #endif 477 if (++dc->dm_cur <= dc->dm_last) { 478 #ifdef DEBUG 479 dmatimo[i] = 1; 480 #endif 481 /* 482 * Last chain segment, disable DMA interrupt. 483 */ 484 if (dc->dm_cur == dc->dm_last && 485 (dc->dm_flags & DMAF_NOINTR)) 486 dc->dm_cmd &= ~DMA_ENAB; 487 DMA_CLEAR(dc); 488 DMA_ARM(dc); 489 } else 490 dmastop(i); 491 } 492 return(found); 493 } 494 495 #ifdef DEBUG 496 void 497 dmatimeout(arg) 498 void *arg; 499 { 500 register int i, s; 501 struct dma_softc *sc = arg; 502 503 for (i = 0; i < NDMACHAN; i++) { 504 s = splbio(); 505 if (dmatimo[i]) { 506 if (dmatimo[i] > 1) 507 printf("%s: timeout #%d\n", sc->sc_xname, 508 i, dmatimo[i]-1); 509 dmatimo[i]++; 510 } 511 splx(s); 512 } 513 timeout(dmatimeout, sc, 30 * hz); 514 } 515 #endif 516