1 /* $NetBSD: dma.c,v 1.17 1997/04/14 02:33:18 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 1995, 1996, 1997 5 * Jason R. Thorpe. All rights reserved. 6 * Copyright (c) 1982, 1990, 1993 7 * The Regents of the University of California. All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)dma.c 8.1 (Berkeley) 6/10/93 38 */ 39 40 /* 41 * DMA driver 42 */ 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/time.h> 47 #include <sys/kernel.h> 48 #include <sys/proc.h> 49 #include <sys/device.h> 50 51 #include <machine/frame.h> 52 #include <machine/cpu.h> 53 #include <machine/intr.h> 54 55 #include <hp300/dev/dmareg.h> 56 #include <hp300/dev/dmavar.h> 57 58 /* 59 * The largest single request will be MAXPHYS bytes which will require 60 * at most MAXPHYS/NBPG+1 chain elements to describe, i.e. if none of 61 * the buffer pages are physically contiguous (MAXPHYS/NBPG) and the 62 * buffer is not page aligned (+1). 63 */ 64 #define DMAMAXIO (MAXPHYS/NBPG+1) 65 66 struct dma_chain { 67 int dc_count; 68 char *dc_addr; 69 }; 70 71 struct dma_channel { 72 struct dmaqueue *dm_job; /* current job */ 73 struct dma_softc *dm_softc; /* pointer back to softc */ 74 struct dmadevice *dm_hwaddr; /* registers if DMA_C */ 75 struct dmaBdevice *dm_Bhwaddr; /* registers if not DMA_C */ 76 char dm_flags; /* misc. flags */ 77 u_short dm_cmd; /* DMA controller command */ 78 int dm_cur; /* current segment */ 79 int dm_last; /* last segment */ 80 struct dma_chain dm_chain[DMAMAXIO]; /* all segments */ 81 }; 82 83 struct dma_softc { 84 char *sc_xname; /* XXX external name */ 85 struct dmareg *sc_dmareg; /* pointer to our hardware */ 86 struct dma_channel sc_chan[NDMACHAN]; /* 2 channels */ 87 TAILQ_HEAD(, dmaqueue) sc_queue; /* job queue */ 88 char sc_type; /* A, B, or C */ 89 int sc_ipl; /* our interrupt level */ 90 void *sc_ih; /* interrupt cookie */ 91 } Dma_softc; 92 93 /* types */ 94 #define DMA_B 0 95 #define DMA_C 1 96 97 /* flags */ 98 #define DMAF_PCFLUSH 0x01 99 #define DMAF_VCFLUSH 0x02 100 #define DMAF_NOINTR 0x04 101 102 int dmaintr __P((void *)); 103 104 #ifdef DEBUG 105 int dmadebug = 0; 106 #define DDB_WORD 0x01 /* same as DMAGO_WORD */ 107 #define DDB_LWORD 0x02 /* same as DMAGO_LWORD */ 108 #define DDB_FOLLOW 0x04 109 #define DDB_IO 0x08 110 111 void dmatimeout __P((void *)); 112 int dmatimo[NDMACHAN]; 113 114 long dmahits[NDMACHAN]; 115 long dmamisses[NDMACHAN]; 116 long dmabyte[NDMACHAN]; 117 long dmaword[NDMACHAN]; 118 long dmalword[NDMACHAN]; 119 #endif 120 121 void 122 dmainit() 123 { 124 struct dma_softc *sc = &Dma_softc; 125 struct dmareg *dma; 126 struct dma_channel *dc; 127 int i; 128 char rev; 129 130 /* There's just one. */ 131 sc->sc_dmareg = (struct dmareg *)DMA_BASE; 132 dma = sc->sc_dmareg; 133 sc->sc_xname = "dma0"; 134 135 /* 136 * Determine the DMA type. A DMA_A or DMA_B will fail the 137 * following probe. 138 * 139 * XXX Don't know how to easily differentiate the A and B cards, 140 * so we just hope nobody has an A card (A cards will work if 141 * splbio works out to ipl 3). 142 */ 143 if (badbaddr((char *)&dma->dma_id[2])) { 144 rev = 'B'; 145 #if !defined(HP320) 146 panic("dmainit: DMA card requires hp320 support"); 147 #endif 148 } else 149 rev = dma->dma_id[2]; 150 151 sc->sc_type = (rev == 'B') ? DMA_B : DMA_C; 152 153 TAILQ_INIT(&sc->sc_queue); 154 155 for (i = 0; i < NDMACHAN; i++) { 156 dc = &sc->sc_chan[i]; 157 dc->dm_softc = sc; 158 dc->dm_job = NULL; 159 switch (i) { 160 case 0: 161 dc->dm_hwaddr = &dma->dma_chan0; 162 dc->dm_Bhwaddr = &dma->dma_Bchan0; 163 break; 164 165 case 1: 166 dc->dm_hwaddr = &dma->dma_chan1; 167 dc->dm_Bhwaddr = &dma->dma_Bchan1; 168 break; 169 170 default: 171 panic("dmainit: more than 2 channels?"); 172 /* NOTREACHED */ 173 } 174 } 175 176 #ifdef DEBUG 177 /* make sure timeout is really not needed */ 178 timeout(dmatimeout, sc, 30 * hz); 179 #endif 180 181 printf("%s: 98620%c, 2 channels, %d bit\n", sc->sc_xname, 182 rev, (rev == 'B') ? 16 : 32); 183 184 /* 185 * Defer hooking up our interrupt until the first 186 * DMA-using controller has hooked up theirs. 187 */ 188 sc->sc_ih = NULL; 189 } 190 191 /* 192 * Compute the ipl and (re)establish the interrupt handler 193 * for the DMA controller. 194 */ 195 void 196 dmacomputeipl() 197 { 198 struct dma_softc *sc = &Dma_softc; 199 200 if (sc->sc_ih != NULL) 201 intr_disestablish(sc->sc_ih); 202 203 /* 204 * Our interrupt level must be as high as the highest 205 * device using DMA (i.e. splbio). 206 */ 207 sc->sc_ipl = PSLTOIPL(hp300_bioipl); 208 sc->sc_ih = intr_establish(dmaintr, sc, sc->sc_ipl, IPL_BIO); 209 } 210 211 int 212 dmareq(dq) 213 struct dmaqueue *dq; 214 { 215 struct dma_softc *sc = &Dma_softc; 216 int i, chan, s; 217 218 #if 1 219 s = splhigh(); /* XXXthorpej */ 220 #else 221 s = splbio(); 222 #endif 223 224 chan = dq->dq_chan; 225 for (i = NDMACHAN - 1; i >= 0; i--) { 226 /* 227 * Can we use this channel? 228 */ 229 if ((chan & (1 << i)) == 0) 230 continue; 231 232 /* 233 * We can use it; is it busy? 234 */ 235 if (sc->sc_chan[i].dm_job != NULL) 236 continue; 237 238 /* 239 * Not busy; give the caller this channel. 240 */ 241 sc->sc_chan[i].dm_job = dq; 242 dq->dq_chan = i; 243 splx(s); 244 return (1); 245 } 246 247 /* 248 * Couldn't get a channel now; put this in the queue. 249 */ 250 TAILQ_INSERT_TAIL(&sc->sc_queue, dq, dq_list); 251 splx(s); 252 return (0); 253 } 254 255 void 256 dmafree(dq) 257 struct dmaqueue *dq; 258 { 259 int unit = dq->dq_chan; 260 struct dma_softc *sc = &Dma_softc; 261 struct dma_channel *dc = &sc->sc_chan[unit]; 262 struct dmaqueue *dn; 263 int chan, s; 264 265 #if 1 266 s = splhigh(); /* XXXthorpej */ 267 #else 268 s = splbio(); 269 #endif 270 271 #ifdef DEBUG 272 dmatimo[unit] = 0; 273 #endif 274 275 DMA_CLEAR(dc); 276 #if defined(HP340) || defined(HP360) || defined(HP370) || defined(HP375) || defined(HP380) 277 /* 278 * XXX we may not always go thru the flush code in dmastop() 279 */ 280 if (dc->dm_flags & DMAF_PCFLUSH) { 281 PCIA(); 282 dc->dm_flags &= ~DMAF_PCFLUSH; 283 } 284 #endif 285 #if defined(HP320) || defined(HP350) 286 if (dc->dm_flags & DMAF_VCFLUSH) { 287 /* 288 * 320/350s have VACs that may also need flushing. 289 * In our case we only flush the supervisor side 290 * because we know that if we are DMAing to user 291 * space, the physical pages will also be mapped 292 * in kernel space (via vmapbuf) and hence cache- 293 * inhibited by the pmap module due to the multiple 294 * mapping. 295 */ 296 DCIS(); 297 dc->dm_flags &= ~DMAF_VCFLUSH; 298 } 299 #endif 300 /* 301 * Channel is now free. Look for another job to run on this 302 * channel. 303 */ 304 dc->dm_job = NULL; 305 chan = 1 << unit; 306 for (dn = sc->sc_queue.tqh_first; dn != NULL; 307 dn = dn->dq_list.tqe_next) { 308 if (dn->dq_chan & chan) { 309 /* Found one... */ 310 TAILQ_REMOVE(&sc->sc_queue, dn, dq_list); 311 dc->dm_job = dn; 312 dn->dq_chan = dq->dq_chan; 313 splx(s); 314 315 /* Start the initiator. */ 316 (*dn->dq_start)(dn->dq_softc); 317 return; 318 } 319 } 320 splx(s); 321 } 322 323 void 324 dmago(unit, addr, count, flags) 325 int unit; 326 char *addr; 327 int count; 328 int flags; 329 { 330 struct dma_softc *sc = &Dma_softc; 331 struct dma_channel *dc = &sc->sc_chan[unit]; 332 char *dmaend = NULL; 333 int seg, tcount; 334 335 if (count > MAXPHYS) 336 panic("dmago: count > MAXPHYS"); 337 #if defined(HP320) 338 if (sc->sc_type == DMA_B && (flags & DMAGO_LWORD)) 339 panic("dmago: no can do 32-bit DMA"); 340 #endif 341 #ifdef DEBUG 342 if (dmadebug & DDB_FOLLOW) 343 printf("dmago(%d, %p, %x, %x)\n", 344 unit, addr, count, flags); 345 if (flags & DMAGO_LWORD) 346 dmalword[unit]++; 347 else if (flags & DMAGO_WORD) 348 dmaword[unit]++; 349 else 350 dmabyte[unit]++; 351 #endif 352 /* 353 * Build the DMA chain 354 */ 355 for (seg = 0; count > 0; seg++) { 356 dc->dm_chain[seg].dc_addr = (char *) kvtop(addr); 357 #if defined(HP380) 358 /* 359 * Push back dirty cache lines 360 */ 361 if (mmutype == MMU_68040) 362 DCFP((vm_offset_t)dc->dm_chain[seg].dc_addr); 363 #endif 364 if (count < (tcount = NBPG - ((int)addr & PGOFSET))) 365 tcount = count; 366 dc->dm_chain[seg].dc_count = tcount; 367 addr += tcount; 368 count -= tcount; 369 if (flags & DMAGO_LWORD) 370 tcount >>= 2; 371 else if (flags & DMAGO_WORD) 372 tcount >>= 1; 373 374 /* 375 * Try to compact the DMA transfer if the pages are adjacent. 376 * Note: this will never happen on the first iteration. 377 */ 378 if (dc->dm_chain[seg].dc_addr == dmaend 379 #if defined(HP320) 380 /* only 16-bit count on 98620B */ 381 && (sc->sc_type != DMA_B || 382 dc->dm_chain[seg - 1].dc_count + tcount <= 65536) 383 #endif 384 ) { 385 #ifdef DEBUG 386 dmahits[unit]++; 387 #endif 388 dmaend += dc->dm_chain[seg].dc_count; 389 dc->dm_chain[--seg].dc_count += tcount; 390 } else { 391 #ifdef DEBUG 392 dmamisses[unit]++; 393 #endif 394 dmaend = dc->dm_chain[seg].dc_addr + 395 dc->dm_chain[seg].dc_count; 396 dc->dm_chain[seg].dc_count = tcount; 397 } 398 } 399 dc->dm_cur = 0; 400 dc->dm_last = --seg; 401 dc->dm_flags = 0; 402 /* 403 * Set up the command word based on flags 404 */ 405 dc->dm_cmd = DMA_ENAB | DMA_IPL(sc->sc_ipl) | DMA_START; 406 if ((flags & DMAGO_READ) == 0) 407 dc->dm_cmd |= DMA_WRT; 408 if (flags & DMAGO_LWORD) 409 dc->dm_cmd |= DMA_LWORD; 410 else if (flags & DMAGO_WORD) 411 dc->dm_cmd |= DMA_WORD; 412 if (flags & DMAGO_PRI) 413 dc->dm_cmd |= DMA_PRI; 414 #if defined(HP380) 415 /* 416 * On the 68040 we need to flush (push) the data cache before a 417 * DMA (already done above) and flush again after DMA completes. 418 * In theory we should only need to flush prior to a write DMA 419 * and purge after a read DMA but if the entire page is not 420 * involved in the DMA we might purge some valid data. 421 */ 422 if (mmutype == MMU_68040 && (flags & DMAGO_READ)) 423 dc->dm_flags |= DMAF_PCFLUSH; 424 #endif 425 #if defined(HP340) || defined(HP360) || defined(HP370) || defined(HP375) 426 /* 427 * Remember if we need to flush external physical cache when 428 * DMA is done. We only do this if we are reading (writing memory). 429 */ 430 if (ectype == EC_PHYS && (flags & DMAGO_READ)) 431 dc->dm_flags |= DMAF_PCFLUSH; 432 #endif 433 #if defined(HP320) || defined(HP350) 434 if (ectype == EC_VIRT && (flags & DMAGO_READ)) 435 dc->dm_flags |= DMAF_VCFLUSH; 436 #endif 437 /* 438 * Remember if we can skip the dma completion interrupt on 439 * the last segment in the chain. 440 */ 441 if (flags & DMAGO_NOINT) { 442 if (dc->dm_cur == dc->dm_last) 443 dc->dm_cmd &= ~DMA_ENAB; 444 else 445 dc->dm_flags |= DMAF_NOINTR; 446 } 447 #ifdef DEBUG 448 if (dmadebug & DDB_IO) { 449 if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) || 450 ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD))) { 451 printf("dmago: cmd %x, flags %x\n", 452 dc->dm_cmd, dc->dm_flags); 453 for (seg = 0; seg <= dc->dm_last; seg++) 454 printf(" %d: %d@%p\n", seg, 455 dc->dm_chain[seg].dc_count, 456 dc->dm_chain[seg].dc_addr); 457 } 458 } 459 dmatimo[unit] = 1; 460 #endif 461 DMA_ARM(dc); 462 } 463 464 void 465 dmastop(unit) 466 int unit; 467 { 468 struct dma_softc *sc = &Dma_softc; 469 struct dma_channel *dc = &sc->sc_chan[unit]; 470 471 #ifdef DEBUG 472 if (dmadebug & DDB_FOLLOW) 473 printf("dmastop(%d)\n", unit); 474 dmatimo[unit] = 0; 475 #endif 476 DMA_CLEAR(dc); 477 #if defined(HP340) || defined(HP360) || defined(HP370) || defined(HP375) || defined(HP380) 478 if (dc->dm_flags & DMAF_PCFLUSH) { 479 PCIA(); 480 dc->dm_flags &= ~DMAF_PCFLUSH; 481 } 482 #endif 483 #if defined(HP320) || defined(HP350) 484 if (dc->dm_flags & DMAF_VCFLUSH) { 485 /* 486 * 320/350s have VACs that may also need flushing. 487 * In our case we only flush the supervisor side 488 * because we know that if we are DMAing to user 489 * space, the physical pages will also be mapped 490 * in kernel space (via vmapbuf) and hence cache- 491 * inhibited by the pmap module due to the multiple 492 * mapping. 493 */ 494 DCIS(); 495 dc->dm_flags &= ~DMAF_VCFLUSH; 496 } 497 #endif 498 /* 499 * We may get this interrupt after a device service routine 500 * has freed the dma channel. So, ignore the intr if there's 501 * nothing on the queue. 502 */ 503 if (dc->dm_job != NULL) 504 (*dc->dm_job->dq_done)(dc->dm_job->dq_softc); 505 } 506 507 int 508 dmaintr(arg) 509 void *arg; 510 { 511 struct dma_softc *sc = arg; 512 struct dma_channel *dc; 513 int i, stat; 514 int found = 0; 515 516 #ifdef DEBUG 517 if (dmadebug & DDB_FOLLOW) 518 printf("dmaintr\n"); 519 #endif 520 for (i = 0; i < NDMACHAN; i++) { 521 dc = &sc->sc_chan[i]; 522 stat = DMA_STAT(dc); 523 if ((stat & DMA_INTR) == 0) 524 continue; 525 found++; 526 #ifdef DEBUG 527 if (dmadebug & DDB_IO) { 528 if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) || 529 ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD))) 530 printf("dmaintr: flags %x unit %d stat %x next %d\n", 531 dc->dm_flags, i, stat, dc->dm_cur + 1); 532 } 533 if (stat & DMA_ARMED) 534 printf("%s, chan %d: intr when armed\n", 535 sc->sc_xname, i); 536 #endif 537 /* 538 * Load the next segemnt, or finish up if we're done. 539 */ 540 dc->dm_cur++; 541 if (dc->dm_cur <= dc->dm_last) { 542 #ifdef DEBUG 543 dmatimo[i] = 1; 544 #endif 545 /* 546 * If we're the last segment, disable the 547 * completion interrupt, if necessary. 548 */ 549 if (dc->dm_cur == dc->dm_last && 550 (dc->dm_flags & DMAF_NOINTR)) 551 dc->dm_cmd &= ~DMA_ENAB; 552 DMA_CLEAR(dc); 553 DMA_ARM(dc); 554 } else 555 dmastop(i); 556 } 557 return(found); 558 } 559 560 #ifdef DEBUG 561 void 562 dmatimeout(arg) 563 void *arg; 564 { 565 int i, s; 566 struct dma_softc *sc = arg; 567 568 for (i = 0; i < NDMACHAN; i++) { 569 s = splbio(); 570 if (dmatimo[i]) { 571 if (dmatimo[i] > 1) 572 printf("%s: chan %d timeout #%d\n", 573 sc->sc_xname, i, dmatimo[i]-1); 574 dmatimo[i]++; 575 } 576 splx(s); 577 } 578 timeout(dmatimeout, sc, 30 * hz); 579 } 580 #endif 581