1 /* 2 * Copyright (c) 1982, 1990, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * from: @(#)dma.c 8.1 (Berkeley) 6/10/93 34 * $Id: dma.c,v 1.4 1994/05/23 05:58:46 mycroft Exp $ 35 */ 36 37 /* 38 * DMA driver 39 */ 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/time.h> 44 #include <sys/kernel.h> 45 #include <sys/proc.h> 46 47 #include <machine/cpu.h> 48 49 #include <hp300/dev/device.h> 50 #include <hp300/dev/dmareg.h> 51 #include <hp300/dev/dmavar.h> 52 53 #include <hp300/hp300/isr.h> 54 55 extern void isrlink(); 56 extern void _insque(); 57 extern void _remque(); 58 extern u_int kvtop(); 59 extern void PCIA(); 60 61 /* 62 * The largest single request will be MAXPHYS bytes which will require 63 * at most MAXPHYS/NBPG+1 chain elements to describe, i.e. if none of 64 * the buffer pages are physically contiguous (MAXPHYS/NBPG) and the 65 * buffer is not page aligned (+1). 66 */ 67 #define DMAMAXIO (MAXPHYS/NBPG+1) 68 69 struct dma_chain { 70 int dc_count; 71 char *dc_addr; 72 }; 73 74 struct dma_softc { 75 struct dmadevice *sc_hwaddr; 76 struct dmaBdevice *sc_Bhwaddr; 77 char sc_type; 78 char sc_flags; 79 u_short sc_cmd; 80 struct dma_chain *sc_cur; 81 struct dma_chain *sc_last; 82 struct dma_chain sc_chain[DMAMAXIO]; 83 } dma_softc[NDMA]; 84 85 /* types */ 86 #define DMA_B 0 87 #define DMA_C 1 88 89 /* flags */ 90 #define DMAF_PCFLUSH 0x01 91 #define DMAF_VCFLUSH 0x02 92 #define DMAF_NOINTR 0x04 93 94 struct devqueue dmachan[NDMA + 1]; 95 int dmaintr(); 96 97 #ifdef DEBUG 98 int dmadebug = 0; 99 #define DDB_WORD 0x01 /* same as DMAGO_WORD */ 100 #define DDB_LWORD 0x02 /* same as DMAGO_LWORD */ 101 #define DDB_FOLLOW 0x04 102 #define DDB_IO 0x08 103 104 void dmatimeout __P((void *)); 105 int dmatimo[NDMA]; 106 107 long dmahits[NDMA]; 108 long dmamisses[NDMA]; 109 long dmabyte[NDMA]; 110 long dmaword[NDMA]; 111 long dmalword[NDMA]; 112 #endif 113 114 void 115 dmainit() 116 { 117 register struct dmareg *dma = (struct dmareg *)DMA_BASE; 118 register struct dma_softc *dc; 119 register int i; 120 char rev; 121 122 /* 123 * Determine the DMA type. 124 * Don't know how to easily differentiate the A and B cards, 125 * so we just hope nobody has an A card (A cards will work if 126 * DMAINTLVL is set to 3). 127 */ 128 if (!badbaddr((char *)&dma->dma_id[2])) 129 rev = dma->dma_id[2]; 130 else { 131 rev = 'B'; 132 #if !defined(HP320) 133 panic("dmainit: DMA card requires hp320 support"); 134 #endif 135 } 136 137 dc = &dma_softc[0]; 138 for (i = 0; i < NDMA; i++) { 139 dc->sc_hwaddr = (i & 1) ? &dma->dma_chan1 : &dma->dma_chan0; 140 dc->sc_Bhwaddr = (i & 1) ? &dma->dma_Bchan1 : &dma->dma_Bchan0; 141 dc->sc_type = rev == 'B' ? DMA_B : DMA_C; 142 dc++; 143 dmachan[i].dq_forw = dmachan[i].dq_back = &dmachan[i]; 144 } 145 dmachan[i].dq_forw = dmachan[i].dq_back = &dmachan[i]; 146 #ifdef DEBUG 147 /* make sure timeout is really not needed */ 148 timeout(dmatimeout, (void *)0, 30 * hz); 149 #endif 150 151 printf("dma: 98620%c with 2 channels, %d bit DMA\n", 152 rev, rev == 'B' ? 16 : 32); 153 } 154 155 int 156 dmareq(dq) 157 register struct devqueue *dq; 158 { 159 register int i; 160 register int chan; 161 register int s = splbio(); 162 163 chan = dq->dq_ctlr; 164 i = NDMA; 165 while (--i >= 0) { 166 if ((chan & (1 << i)) == 0) 167 continue; 168 if (dmachan[i].dq_forw != &dmachan[i]) 169 continue; 170 insque(dq, &dmachan[i]); 171 dq->dq_ctlr = i; 172 splx(s); 173 return(1); 174 } 175 insque(dq, dmachan[NDMA].dq_back); 176 splx(s); 177 return(0); 178 } 179 180 void 181 dmafree(dq) 182 register struct devqueue *dq; 183 { 184 int unit = dq->dq_ctlr; 185 register struct dma_softc *dc = &dma_softc[unit]; 186 register struct devqueue *dn; 187 register int chan, s; 188 189 s = splbio(); 190 #ifdef DEBUG 191 dmatimo[unit] = 0; 192 #endif 193 DMA_CLEAR(dc); 194 #if defined(HP360) || defined(HP370) || defined(HP380) 195 /* 196 * XXX we may not always go thru the flush code in dmastop() 197 */ 198 if (dc->sc_flags & DMAF_PCFLUSH) { 199 PCIA(); 200 dc->sc_flags &= ~DMAF_PCFLUSH; 201 } 202 #endif 203 #if defined(HP320) || defined(HP350) 204 if (dc->sc_flags & DMAF_VCFLUSH) { 205 /* 206 * 320/350s have VACs that may also need flushing. 207 * In our case we only flush the supervisor side 208 * because we know that if we are DMAing to user 209 * space, the physical pages will also be mapped 210 * in kernel space (via vmapbuf) and hence cache- 211 * inhibited by the pmap module due to the multiple 212 * mapping. 213 */ 214 DCIS(); 215 dc->sc_flags &= ~DMAF_VCFLUSH; 216 } 217 #endif 218 remque(dq); 219 chan = 1 << unit; 220 for (dn = dmachan[NDMA].dq_forw; 221 dn != &dmachan[NDMA]; dn = dn->dq_forw) { 222 if (dn->dq_ctlr & chan) { 223 remque((caddr_t)dn); 224 insque((caddr_t)dn, (caddr_t)dq->dq_back); 225 splx(s); 226 dn->dq_ctlr = dq->dq_ctlr; 227 (dn->dq_driver->d_start)(dn->dq_unit); 228 return; 229 } 230 } 231 splx(s); 232 } 233 234 void 235 dmago(unit, addr, count, flags) 236 int unit; 237 register char *addr; 238 register int count; 239 register int flags; 240 { 241 register struct dma_softc *dc = &dma_softc[unit]; 242 register struct dma_chain *dcp; 243 register char *dmaend = NULL; 244 register int tcount; 245 246 if (count > MAXPHYS) 247 panic("dmago: count > MAXPHYS"); 248 #if defined(HP320) 249 if (dc->sc_type == DMA_B && (flags & DMAGO_LWORD)) 250 panic("dmago: no can do 32-bit DMA"); 251 #endif 252 #ifdef DEBUG 253 if (dmadebug & DDB_FOLLOW) 254 printf("dmago(%d, %x, %x, %x)\n", 255 unit, addr, count, flags); 256 if (flags & DMAGO_LWORD) 257 dmalword[unit]++; 258 else if (flags & DMAGO_WORD) 259 dmaword[unit]++; 260 else 261 dmabyte[unit]++; 262 #endif 263 /* 264 * Build the DMA chain 265 */ 266 for (dcp = dc->sc_chain; count > 0; dcp++) { 267 dcp->dc_addr = (char *) kvtop(addr); 268 #if defined(HP380) 269 /* 270 * Push back dirty cache lines 271 */ 272 if (mmutype == MMU_68040) 273 DCFP(dcp->dc_addr); 274 #endif 275 if (count < (tcount = NBPG - ((int)addr & PGOFSET))) 276 tcount = count; 277 dcp->dc_count = tcount; 278 addr += tcount; 279 count -= tcount; 280 if (flags & DMAGO_LWORD) 281 tcount >>= 2; 282 else if (flags & DMAGO_WORD) 283 tcount >>= 1; 284 if (dcp->dc_addr == dmaend 285 #if defined(HP320) 286 /* only 16-bit count on 98620B */ 287 && (dc->sc_type != DMA_B || 288 (dcp-1)->dc_count + tcount <= 65536) 289 #endif 290 ) { 291 #ifdef DEBUG 292 dmahits[unit]++; 293 #endif 294 dmaend += dcp->dc_count; 295 (--dcp)->dc_count += tcount; 296 } else { 297 #ifdef DEBUG 298 dmamisses[unit]++; 299 #endif 300 dmaend = dcp->dc_addr + dcp->dc_count; 301 dcp->dc_count = tcount; 302 } 303 } 304 dc->sc_cur = dc->sc_chain; 305 dc->sc_last = --dcp; 306 dc->sc_flags = 0; 307 /* 308 * Set up the command word based on flags 309 */ 310 dc->sc_cmd = DMA_ENAB | DMA_IPL(DMAINTLVL) | DMA_START; 311 if ((flags & DMAGO_READ) == 0) 312 dc->sc_cmd |= DMA_WRT; 313 if (flags & DMAGO_LWORD) 314 dc->sc_cmd |= DMA_LWORD; 315 else if (flags & DMAGO_WORD) 316 dc->sc_cmd |= DMA_WORD; 317 if (flags & DMAGO_PRI) 318 dc->sc_cmd |= DMA_PRI; 319 #if defined(HP380) 320 /* 321 * On the 68040 we need to flush (push) the data cache before a 322 * DMA (already done above) and flush again after DMA completes. 323 * In theory we should only need to flush prior to a write DMA 324 * and purge after a read DMA but if the entire page is not 325 * involved in the DMA we might purge some valid data. 326 */ 327 if (mmutype == MMU_68040 && (flags & DMAGO_READ)) 328 dc->sc_flags |= DMAF_PCFLUSH; 329 #endif 330 #if defined(HP360) || defined(HP370) 331 /* 332 * Remember if we need to flush external physical cache when 333 * DMA is done. We only do this if we are reading (writing memory). 334 */ 335 if (ectype == EC_PHYS && (flags & DMAGO_READ)) 336 dc->sc_flags |= DMAF_PCFLUSH; 337 #endif 338 #if defined(HP320) || defined(HP350) 339 if (ectype == EC_VIRT && (flags & DMAGO_READ)) 340 dc->sc_flags |= DMAF_VCFLUSH; 341 #endif 342 /* 343 * Remember if we can skip the dma completion interrupt on 344 * the last segment in the chain. 345 */ 346 if (flags & DMAGO_NOINT) { 347 if (dc->sc_cur == dc->sc_last) 348 dc->sc_cmd &= ~DMA_ENAB; 349 else 350 dc->sc_flags |= DMAF_NOINTR; 351 } 352 #ifdef DEBUG 353 if (dmadebug & DDB_IO) 354 if ((dmadebug&DDB_WORD) && (dc->sc_cmd&DMA_WORD) || 355 (dmadebug&DDB_LWORD) && (dc->sc_cmd&DMA_LWORD)) { 356 printf("dmago: cmd %x, flags %x\n", 357 dc->sc_cmd, dc->sc_flags); 358 for (dcp = dc->sc_chain; dcp <= dc->sc_last; dcp++) 359 printf(" %d: %d@%x\n", dcp-dc->sc_chain, 360 dcp->dc_count, dcp->dc_addr); 361 } 362 dmatimo[unit] = 1; 363 #endif 364 DMA_ARM(dc); 365 } 366 367 void 368 dmastop(unit) 369 register int unit; 370 { 371 register struct dma_softc *dc = &dma_softc[unit]; 372 register struct devqueue *dq; 373 374 #ifdef DEBUG 375 if (dmadebug & DDB_FOLLOW) 376 printf("dmastop(%d)\n", unit); 377 dmatimo[unit] = 0; 378 #endif 379 DMA_CLEAR(dc); 380 #if defined(HP360) || defined(HP370) || defined(HP380) 381 if (dc->sc_flags & DMAF_PCFLUSH) { 382 PCIA(); 383 dc->sc_flags &= ~DMAF_PCFLUSH; 384 } 385 #endif 386 #if defined(HP320) || defined(HP350) 387 if (dc->sc_flags & DMAF_VCFLUSH) { 388 /* 389 * 320/350s have VACs that may also need flushing. 390 * In our case we only flush the supervisor side 391 * because we know that if we are DMAing to user 392 * space, the physical pages will also be mapped 393 * in kernel space (via vmapbuf) and hence cache- 394 * inhibited by the pmap module due to the multiple 395 * mapping. 396 */ 397 DCIS(); 398 dc->sc_flags &= ~DMAF_VCFLUSH; 399 } 400 #endif 401 /* 402 * We may get this interrupt after a device service routine 403 * has freed the dma channel. So, ignore the intr if there's 404 * nothing on the queue. 405 */ 406 dq = dmachan[unit].dq_forw; 407 if (dq != &dmachan[unit]) 408 (dq->dq_driver->d_done)(dq->dq_unit); 409 } 410 411 int 412 dmaintr() 413 { 414 register struct dma_softc *dc; 415 register int i, stat; 416 int found = 0; 417 418 #ifdef DEBUG 419 if (dmadebug & DDB_FOLLOW) 420 printf("dmaintr\n"); 421 #endif 422 for (i = 0, dc = dma_softc; i < NDMA; i++, dc++) { 423 stat = DMA_STAT(dc); 424 if ((stat & DMA_INTR) == 0) 425 continue; 426 found++; 427 #ifdef DEBUG 428 if (dmadebug & DDB_IO) { 429 if ((dmadebug&DDB_WORD) && (dc->sc_cmd&DMA_WORD) || 430 (dmadebug&DDB_LWORD) && (dc->sc_cmd&DMA_LWORD)) 431 printf("dmaintr: unit %d stat %x next %d\n", 432 i, stat, (dc->sc_cur-dc->sc_chain)+1); 433 } 434 if (stat & DMA_ARMED) 435 printf("dma%d: intr when armed\n", i); 436 #endif 437 if (++dc->sc_cur <= dc->sc_last) { 438 #ifdef DEBUG 439 dmatimo[i] = 1; 440 #endif 441 /* 442 * Last chain segment, disable DMA interrupt. 443 */ 444 if (dc->sc_cur == dc->sc_last && 445 (dc->sc_flags & DMAF_NOINTR)) 446 dc->sc_cmd &= ~DMA_ENAB; 447 DMA_CLEAR(dc); 448 DMA_ARM(dc); 449 } else 450 dmastop(i); 451 } 452 return(found); 453 } 454 455 #ifdef DEBUG 456 void 457 dmatimeout(arg) 458 void *arg; 459 { 460 register int i, s; 461 462 for (i = 0; i < NDMA; i++) { 463 s = splbio(); 464 if (dmatimo[i]) { 465 if (dmatimo[i] > 1) 466 printf("dma%d: timeout #%d\n", 467 i, dmatimo[i]-1); 468 dmatimo[i]++; 469 } 470 splx(s); 471 } 472 timeout(dmatimeout, (void *)0, 30 * hz); 473 } 474 #endif 475