1 /* $NetBSD: dma.c,v 1.28 2002/12/22 00:17:15 gmcgarry Exp $ */ 2 3 /*- 4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Copyright (c) 1982, 1990, 1993 41 * The Regents of the University of California. All rights reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by the University of 54 * California, Berkeley and its contributors. 55 * 4. Neither the name of the University nor the names of its contributors 56 * may be used to endorse or promote products derived from this software 57 * without specific prior written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 * 71 * @(#)dma.c 8.1 (Berkeley) 6/10/93 72 */ 73 74 /* 75 * DMA driver 76 */ 77 78 #include <sys/cdefs.h> 79 __KERNEL_RCSID(0, "$NetBSD: dma.c,v 1.28 2002/12/22 00:17:15 gmcgarry Exp $"); 80 81 #include <machine/hp300spu.h> /* XXX param.h includes cpu.h */ 82 83 #include <sys/param.h> 84 #include <sys/systm.h> 85 #include <sys/callout.h> 86 #include <sys/device.h> 87 #include <sys/kernel.h> 88 #include <sys/proc.h> 89 90 #include <machine/bus.h> 91 92 #include <m68k/cacheops.h> 93 94 #include <hp300/dev/intiovar.h> 95 #include <hp300/dev/dmareg.h> 96 #include <hp300/dev/dmavar.h> 97 98 /* 99 * The largest single request will be MAXPHYS bytes which will require 100 * at most MAXPHYS/NBPG+1 chain elements to describe, i.e. if none of 101 * the buffer pages are physically contiguous (MAXPHYS/NBPG) and the 102 * buffer is not page aligned (+1). 103 */ 104 #define DMAMAXIO (MAXPHYS/NBPG+1) 105 106 struct dma_chain { 107 int dc_count; 108 char *dc_addr; 109 }; 110 111 struct dma_channel { 112 struct dmaqueue *dm_job; /* current job */ 113 struct dmadevice *dm_hwaddr; /* registers if DMA_C */ 114 struct dmaBdevice *dm_Bhwaddr; /* registers if not DMA_C */ 115 char dm_flags; /* misc. flags */ 116 u_short dm_cmd; /* DMA controller command */ 117 int dm_cur; /* current segment */ 118 int dm_last; /* last segment */ 119 struct dma_chain dm_chain[DMAMAXIO]; /* all segments */ 120 }; 121 122 struct dma_softc { 123 struct device sc_dev; 124 bus_space_tag_t sc_bst; 125 bus_space_handle_t sc_bsh; 126 127 struct dmareg *sc_dmareg; /* pointer to our hardware */ 128 struct dma_channel sc_chan[NDMACHAN]; /* 2 channels */ 129 TAILQ_HEAD(, dmaqueue) sc_queue; /* job queue */ 130 struct callout sc_debug_ch; 131 char sc_type; /* A, B, or C */ 132 int sc_ipl; /* our interrupt level */ 133 void *sc_ih; /* interrupt cookie */ 134 }; 135 136 /* types */ 137 #define DMA_B 0 138 #define DMA_C 1 139 140 /* flags */ 141 #define DMAF_PCFLUSH 0x01 142 #define DMAF_VCFLUSH 0x02 143 #define DMAF_NOINTR 0x04 144 145 int dmamatch(struct device *, struct cfdata *, void *); 146 void dmaattach(struct device *, struct device *, void *); 147 148 CFATTACH_DECL(dma, sizeof(struct dma_softc), 149 dmamatch, dmaattach, NULL, NULL); 150 151 int dmaintr __P((void *)); 152 153 #ifdef DEBUG 154 int dmadebug = 0; 155 #define DDB_WORD 0x01 /* same as DMAGO_WORD */ 156 #define DDB_LWORD 0x02 /* same as DMAGO_LWORD */ 157 #define DDB_FOLLOW 0x04 158 #define DDB_IO 0x08 159 160 void dmatimeout __P((void *)); 161 int dmatimo[NDMACHAN]; 162 163 long dmahits[NDMACHAN]; 164 long dmamisses[NDMACHAN]; 165 long dmabyte[NDMACHAN]; 166 long dmaword[NDMACHAN]; 167 long dmalword[NDMACHAN]; 168 #endif 169 170 static struct dma_softc *dma_softc; 171 172 int 173 dmamatch(parent, match, aux) 174 struct device *parent; 175 struct cfdata *match; 176 void *aux; 177 { 178 struct intio_attach_args *ia = aux; 179 static int dmafound = 0; /* can only have one */ 180 181 if (strcmp("dma", ia->ia_modname) != 0 || dmafound) 182 return (0); 183 184 dmafound = 1; 185 return (1); 186 } 187 188 189 190 void 191 dmaattach(parent, self, aux) 192 struct device *parent, *self; 193 void *aux; 194 { 195 struct dma_softc *sc = (struct dma_softc *)self; 196 struct intio_attach_args *ia = aux; 197 struct dma_channel *dc; 198 struct dmareg *dma; 199 int i; 200 char rev; 201 202 /* There's just one. */ 203 dma_softc = sc; 204 205 sc->sc_bst = ia->ia_bst; 206 if (bus_space_map(sc->sc_bst, ia->ia_iobase, INTIO_DEVSIZE, 0, 207 &sc->sc_bsh)) { 208 printf("%s: can't map registers\n", sc->sc_dev.dv_xname); 209 return; 210 } 211 212 dma = (struct dmareg *)bus_space_vaddr(sc->sc_bst, sc->sc_bsh); 213 sc->sc_dmareg = dma; 214 215 /* 216 * Determine the DMA type. A DMA_A or DMA_B will fail the 217 * following probe. 218 * 219 * XXX Don't know how to easily differentiate the A and B cards, 220 * so we just hope nobody has an A card (A cards will work if 221 * splbio works out to ipl 3). 222 */ 223 if (badbaddr((char *)&dma->dma_id[2])) { 224 rev = 'B'; 225 #if !defined(HP320) 226 panic("dmainit: DMA card requires hp320 support"); 227 #endif 228 } else 229 rev = dma->dma_id[2]; 230 231 sc->sc_type = (rev == 'B') ? DMA_B : DMA_C; 232 233 TAILQ_INIT(&sc->sc_queue); 234 callout_init(&sc->sc_debug_ch); 235 236 for (i = 0; i < NDMACHAN; i++) { 237 dc = &sc->sc_chan[i]; 238 dc->dm_job = NULL; 239 switch (i) { 240 case 0: 241 dc->dm_hwaddr = &dma->dma_chan0; 242 dc->dm_Bhwaddr = &dma->dma_Bchan0; 243 break; 244 245 case 1: 246 dc->dm_hwaddr = &dma->dma_chan1; 247 dc->dm_Bhwaddr = &dma->dma_Bchan1; 248 break; 249 250 default: 251 panic("dmainit: more than 2 channels?"); 252 /* NOTREACHED */ 253 } 254 } 255 256 #ifdef DEBUG 257 /* make sure timeout is really not needed */ 258 callout_reset(&sc->sc_debug_ch, 30 * hz, dmatimeout, sc); 259 #endif 260 261 printf(": 98620%c, 2 channels, %d-bit DMA\n", 262 rev, (rev == 'B') ? 16 : 32); 263 264 /* 265 * Defer hooking up our interrupt until the first 266 * DMA-using controller has hooked up theirs. 267 */ 268 sc->sc_ih = NULL; 269 } 270 271 /* 272 * Compute the ipl and (re)establish the interrupt handler 273 * for the DMA controller. 274 */ 275 void 276 dmacomputeipl() 277 { 278 struct dma_softc *sc = dma_softc; 279 280 if (sc->sc_ih != NULL) 281 intr_disestablish(sc->sc_ih); 282 283 /* 284 * Our interrupt level must be as high as the highest 285 * device using DMA (i.e. splbio). 286 */ 287 sc->sc_ipl = PSLTOIPL(hp300_ipls[HP300_IPL_BIO]); 288 sc->sc_ih = intr_establish(dmaintr, sc, sc->sc_ipl, IPL_BIO); 289 } 290 291 int 292 dmareq(dq) 293 struct dmaqueue *dq; 294 { 295 struct dma_softc *sc = dma_softc; 296 int i, chan, s; 297 298 #if 1 299 s = splhigh(); /* XXXthorpej */ 300 #else 301 s = splbio(); 302 #endif 303 304 chan = dq->dq_chan; 305 for (i = NDMACHAN - 1; i >= 0; i--) { 306 /* 307 * Can we use this channel? 308 */ 309 if ((chan & (1 << i)) == 0) 310 continue; 311 312 /* 313 * We can use it; is it busy? 314 */ 315 if (sc->sc_chan[i].dm_job != NULL) 316 continue; 317 318 /* 319 * Not busy; give the caller this channel. 320 */ 321 sc->sc_chan[i].dm_job = dq; 322 dq->dq_chan = i; 323 splx(s); 324 return (1); 325 } 326 327 /* 328 * Couldn't get a channel now; put this in the queue. 329 */ 330 TAILQ_INSERT_TAIL(&sc->sc_queue, dq, dq_list); 331 splx(s); 332 return (0); 333 } 334 335 void 336 dmafree(dq) 337 struct dmaqueue *dq; 338 { 339 int unit = dq->dq_chan; 340 struct dma_softc *sc = dma_softc; 341 struct dma_channel *dc = &sc->sc_chan[unit]; 342 struct dmaqueue *dn; 343 int chan, s; 344 345 #if 1 346 s = splhigh(); /* XXXthorpej */ 347 #else 348 s = splbio(); 349 #endif 350 351 #ifdef DEBUG 352 dmatimo[unit] = 0; 353 #endif 354 355 DMA_CLEAR(dc); 356 357 #if defined(CACHE_HAVE_PAC) || defined(M68040) 358 /* 359 * XXX we may not always go thru the flush code in dmastop() 360 */ 361 if (dc->dm_flags & DMAF_PCFLUSH) { 362 PCIA(); 363 dc->dm_flags &= ~DMAF_PCFLUSH; 364 } 365 #endif 366 367 #if defined(CACHE_HAVE_VAC) 368 if (dc->dm_flags & DMAF_VCFLUSH) { 369 /* 370 * 320/350s have VACs that may also need flushing. 371 * In our case we only flush the supervisor side 372 * because we know that if we are DMAing to user 373 * space, the physical pages will also be mapped 374 * in kernel space (via vmapbuf) and hence cache- 375 * inhibited by the pmap module due to the multiple 376 * mapping. 377 */ 378 DCIS(); 379 dc->dm_flags &= ~DMAF_VCFLUSH; 380 } 381 #endif 382 383 /* 384 * Channel is now free. Look for another job to run on this 385 * channel. 386 */ 387 dc->dm_job = NULL; 388 chan = 1 << unit; 389 for (dn = sc->sc_queue.tqh_first; dn != NULL; 390 dn = dn->dq_list.tqe_next) { 391 if (dn->dq_chan & chan) { 392 /* Found one... */ 393 TAILQ_REMOVE(&sc->sc_queue, dn, dq_list); 394 dc->dm_job = dn; 395 dn->dq_chan = dq->dq_chan; 396 splx(s); 397 398 /* Start the initiator. */ 399 (*dn->dq_start)(dn->dq_softc); 400 return; 401 } 402 } 403 splx(s); 404 } 405 406 void 407 dmago(unit, addr, count, flags) 408 int unit; 409 char *addr; 410 int count; 411 int flags; 412 { 413 struct dma_softc *sc = dma_softc; 414 struct dma_channel *dc = &sc->sc_chan[unit]; 415 char *dmaend = NULL; 416 int seg, tcount; 417 418 if (count > MAXPHYS) 419 panic("dmago: count > MAXPHYS"); 420 421 #if defined(HP320) 422 if (sc->sc_type == DMA_B && (flags & DMAGO_LWORD)) 423 panic("dmago: no can do 32-bit DMA"); 424 #endif 425 426 #ifdef DEBUG 427 if (dmadebug & DDB_FOLLOW) 428 printf("dmago(%d, %p, %x, %x)\n", 429 unit, addr, count, flags); 430 if (flags & DMAGO_LWORD) 431 dmalword[unit]++; 432 else if (flags & DMAGO_WORD) 433 dmaword[unit]++; 434 else 435 dmabyte[unit]++; 436 #endif 437 /* 438 * Build the DMA chain 439 */ 440 for (seg = 0; count > 0; seg++) { 441 dc->dm_chain[seg].dc_addr = (char *) kvtop(addr); 442 #if defined(M68040) 443 /* 444 * Push back dirty cache lines 445 */ 446 if (mmutype == MMU_68040) 447 DCFP((paddr_t)dc->dm_chain[seg].dc_addr); 448 #endif 449 if (count < (tcount = NBPG - ((int)addr & PGOFSET))) 450 tcount = count; 451 dc->dm_chain[seg].dc_count = tcount; 452 addr += tcount; 453 count -= tcount; 454 if (flags & DMAGO_LWORD) 455 tcount >>= 2; 456 else if (flags & DMAGO_WORD) 457 tcount >>= 1; 458 459 /* 460 * Try to compact the DMA transfer if the pages are adjacent. 461 * Note: this will never happen on the first iteration. 462 */ 463 if (dc->dm_chain[seg].dc_addr == dmaend 464 #if defined(HP320) 465 /* only 16-bit count on 98620B */ 466 && (sc->sc_type != DMA_B || 467 dc->dm_chain[seg - 1].dc_count + tcount <= 65536) 468 #endif 469 ) { 470 #ifdef DEBUG 471 dmahits[unit]++; 472 #endif 473 dmaend += dc->dm_chain[seg].dc_count; 474 dc->dm_chain[--seg].dc_count += tcount; 475 } else { 476 #ifdef DEBUG 477 dmamisses[unit]++; 478 #endif 479 dmaend = dc->dm_chain[seg].dc_addr + 480 dc->dm_chain[seg].dc_count; 481 dc->dm_chain[seg].dc_count = tcount; 482 } 483 } 484 dc->dm_cur = 0; 485 dc->dm_last = --seg; 486 dc->dm_flags = 0; 487 /* 488 * Set up the command word based on flags 489 */ 490 dc->dm_cmd = DMA_ENAB | DMA_IPL(sc->sc_ipl) | DMA_START; 491 if ((flags & DMAGO_READ) == 0) 492 dc->dm_cmd |= DMA_WRT; 493 if (flags & DMAGO_LWORD) 494 dc->dm_cmd |= DMA_LWORD; 495 else if (flags & DMAGO_WORD) 496 dc->dm_cmd |= DMA_WORD; 497 if (flags & DMAGO_PRI) 498 dc->dm_cmd |= DMA_PRI; 499 500 #if defined(M68040) 501 /* 502 * On the 68040 we need to flush (push) the data cache before a 503 * DMA (already done above) and flush again after DMA completes. 504 * In theory we should only need to flush prior to a write DMA 505 * and purge after a read DMA but if the entire page is not 506 * involved in the DMA we might purge some valid data. 507 */ 508 if (mmutype == MMU_68040 && (flags & DMAGO_READ)) 509 dc->dm_flags |= DMAF_PCFLUSH; 510 #endif 511 512 #if defined(CACHE_HAVE_PAC) 513 /* 514 * Remember if we need to flush external physical cache when 515 * DMA is done. We only do this if we are reading (writing memory). 516 */ 517 if (ectype == EC_PHYS && (flags & DMAGO_READ)) 518 dc->dm_flags |= DMAF_PCFLUSH; 519 #endif 520 521 #if defined(CACHE_HAVE_VAC) 522 if (ectype == EC_VIRT && (flags & DMAGO_READ)) 523 dc->dm_flags |= DMAF_VCFLUSH; 524 #endif 525 526 /* 527 * Remember if we can skip the dma completion interrupt on 528 * the last segment in the chain. 529 */ 530 if (flags & DMAGO_NOINT) { 531 if (dc->dm_cur == dc->dm_last) 532 dc->dm_cmd &= ~DMA_ENAB; 533 else 534 dc->dm_flags |= DMAF_NOINTR; 535 } 536 #ifdef DEBUG 537 if (dmadebug & DDB_IO) { 538 if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) || 539 ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD))) { 540 printf("dmago: cmd %x, flags %x\n", 541 dc->dm_cmd, dc->dm_flags); 542 for (seg = 0; seg <= dc->dm_last; seg++) 543 printf(" %d: %d@%p\n", seg, 544 dc->dm_chain[seg].dc_count, 545 dc->dm_chain[seg].dc_addr); 546 } 547 } 548 dmatimo[unit] = 1; 549 #endif 550 DMA_ARM(sc, dc); 551 } 552 553 void 554 dmastop(unit) 555 int unit; 556 { 557 struct dma_softc *sc = dma_softc; 558 struct dma_channel *dc = &sc->sc_chan[unit]; 559 560 #ifdef DEBUG 561 if (dmadebug & DDB_FOLLOW) 562 printf("dmastop(%d)\n", unit); 563 dmatimo[unit] = 0; 564 #endif 565 DMA_CLEAR(dc); 566 567 #if defined(CACHE_HAVE_PAC) || defined(M68040) 568 if (dc->dm_flags & DMAF_PCFLUSH) { 569 PCIA(); 570 dc->dm_flags &= ~DMAF_PCFLUSH; 571 } 572 #endif 573 574 #if defined(CACHE_HAVE_VAC) 575 if (dc->dm_flags & DMAF_VCFLUSH) { 576 /* 577 * 320/350s have VACs that may also need flushing. 578 * In our case we only flush the supervisor side 579 * because we know that if we are DMAing to user 580 * space, the physical pages will also be mapped 581 * in kernel space (via vmapbuf) and hence cache- 582 * inhibited by the pmap module due to the multiple 583 * mapping. 584 */ 585 DCIS(); 586 dc->dm_flags &= ~DMAF_VCFLUSH; 587 } 588 #endif 589 590 /* 591 * We may get this interrupt after a device service routine 592 * has freed the dma channel. So, ignore the intr if there's 593 * nothing on the queue. 594 */ 595 if (dc->dm_job != NULL) 596 (*dc->dm_job->dq_done)(dc->dm_job->dq_softc); 597 } 598 599 int 600 dmaintr(arg) 601 void *arg; 602 { 603 struct dma_softc *sc = arg; 604 struct dma_channel *dc; 605 int i, stat; 606 int found = 0; 607 608 #ifdef DEBUG 609 if (dmadebug & DDB_FOLLOW) 610 printf("dmaintr\n"); 611 #endif 612 for (i = 0; i < NDMACHAN; i++) { 613 dc = &sc->sc_chan[i]; 614 stat = DMA_STAT(dc); 615 if ((stat & DMA_INTR) == 0) 616 continue; 617 found++; 618 #ifdef DEBUG 619 if (dmadebug & DDB_IO) { 620 if (((dmadebug&DDB_WORD) && (dc->dm_cmd&DMA_WORD)) || 621 ((dmadebug&DDB_LWORD) && (dc->dm_cmd&DMA_LWORD))) 622 printf("dmaintr: flags %x unit %d stat %x next %d\n", 623 dc->dm_flags, i, stat, dc->dm_cur + 1); 624 } 625 if (stat & DMA_ARMED) 626 printf("dma channel %d: intr when armed\n", i); 627 #endif 628 /* 629 * Load the next segemnt, or finish up if we're done. 630 */ 631 dc->dm_cur++; 632 if (dc->dm_cur <= dc->dm_last) { 633 #ifdef DEBUG 634 dmatimo[i] = 1; 635 #endif 636 /* 637 * If we're the last segment, disable the 638 * completion interrupt, if necessary. 639 */ 640 if (dc->dm_cur == dc->dm_last && 641 (dc->dm_flags & DMAF_NOINTR)) 642 dc->dm_cmd &= ~DMA_ENAB; 643 DMA_CLEAR(dc); 644 DMA_ARM(sc, dc); 645 } else 646 dmastop(i); 647 } 648 return(found); 649 } 650 651 #ifdef DEBUG 652 void 653 dmatimeout(arg) 654 void *arg; 655 { 656 int i, s; 657 struct dma_softc *sc = arg; 658 659 for (i = 0; i < NDMACHAN; i++) { 660 s = splbio(); 661 if (dmatimo[i]) { 662 if (dmatimo[i] > 1) 663 printf("dma channel %d timeout #%d\n", 664 i, dmatimo[i]-1); 665 dmatimo[i]++; 666 } 667 splx(s); 668 } 669 callout_reset(&sc->sc_debug_ch, 30 * hz, dmatimeout, sc); 670 } 671 #endif 672