1 /* $NetBSD: nextdma.c,v 1.5 1998/11/10 22:45:44 dbj Exp $ */ 2 /* 3 * Copyright (c) 1998 Darrin B. Jewell 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Darrin B. Jewell 17 * 4. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/mbuf.h> 35 #include <sys/syslog.h> 36 #include <sys/socket.h> 37 #include <sys/device.h> 38 #include <sys/malloc.h> 39 #include <sys/ioctl.h> 40 #include <sys/errno.h> 41 42 #include <machine/autoconf.h> 43 #include <machine/cpu.h> 44 #include <machine/intr.h> 45 46 #include <m68k/cacheops.h> 47 48 #include <next68k/next68k/isr.h> 49 50 #define _GENERIC_BUS_DMA_PRIVATE 51 #include <machine/bus.h> 52 53 #include "nextdmareg.h" 54 #include "nextdmavar.h" 55 56 #if 0 57 #define ND_DEBUG 58 #endif 59 60 #if defined(ND_DEBUG) 61 #define DPRINTF(x) printf x; 62 #else 63 #define DPRINTF(x) 64 #endif 65 66 /* @@@ for debugging */ 67 struct nextdma_config *debugernd; 68 struct nextdma_config *debugexnd; 69 70 int nextdma_intr __P((void *)); 71 void next_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 72 bus_size_t, int)); 73 int next_dma_continue __P((struct nextdma_config *)); 74 void next_dma_rotate __P((struct nextdma_config *)); 75 76 void next_dma_setup_cont_regs __P((struct nextdma_config *)); 77 void next_dma_setup_curr_regs __P((struct nextdma_config *)); 78 79 void next_dma_print __P((struct nextdma_config *)); 80 81 void 82 nextdma_config(nd) 83 struct nextdma_config *nd; 84 { 85 /* Initialize the dma_tag. As a hack, we currently 86 * put the dma tag in the structure itself. It shouldn't be there. 87 */ 88 89 { 90 bus_dma_tag_t t; 91 t = &nd->_nd_dmat; 92 t->_cookie = nd; 93 t->_get_tag = NULL; /* lose */ 94 t->_dmamap_create = _bus_dmamap_create; 95 t->_dmamap_destroy = _bus_dmamap_destroy; 96 t->_dmamap_load = _bus_dmamap_load_direct; 97 t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct; 98 t->_dmamap_load_uio = _bus_dmamap_load_uio_direct; 99 t->_dmamap_load_raw = _bus_dmamap_load_raw_direct; 100 t->_dmamap_unload = _bus_dmamap_unload; 101 t->_dmamap_sync = next_dmamap_sync; 102 103 t->_dmamem_alloc = _bus_dmamem_alloc; 104 t->_dmamem_free = _bus_dmamem_free; 105 t->_dmamem_map = _bus_dmamem_map; 106 t->_dmamem_unmap = _bus_dmamem_unmap; 107 t->_dmamem_mmap = _bus_dmamem_mmap; 108 109 nd->nd_dmat = t; 110 } 111 112 /* @@@ for debugging */ 113 if (nd->nd_intr == NEXT_I_ENETR_DMA) { 114 debugernd = nd; 115 } 116 if (nd->nd_intr == NEXT_I_ENETX_DMA) { 117 debugexnd = nd; 118 } 119 120 nextdma_init(nd); 121 122 isrlink_autovec(nextdma_intr, nd, NEXT_I_IPL(nd->nd_intr), 10); 123 INTR_ENABLE(nd->nd_intr); 124 } 125 126 void 127 nextdma_init(nd) 128 struct nextdma_config *nd; 129 { 130 DPRINTF(("DMA init ipl (%ld) intr(0x%b)\n", 131 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS)); 132 133 /* @@@ should probably check and free these maps */ 134 nd->_nd_map = NULL; 135 nd->_nd_idx = 0; 136 nd->_nd_map_cont = NULL; 137 nd->_nd_idx_cont = 0; 138 139 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0); 140 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 141 DMACSR_INITBUF | DMACSR_CLRCOMPLETE | DMACSR_RESET); 142 143 next_dma_setup_curr_regs(nd); 144 next_dma_setup_cont_regs(nd); 145 146 #if 0 && defined(DIAGNOSTIC) 147 /* Today, my computer (mourning) appears to fail this test. 148 * yesterday, another NeXT (milo) didn't have this problem 149 * Darrin B. Jewell <jewell@mit.edu> Mon May 25 07:53:05 1998 150 */ 151 { 152 u_long state; 153 state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR); 154 state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR); 155 state &= (DMACSR_BUSEXC | DMACSR_COMPLETE | 156 DMACSR_SUPDATE | DMACSR_ENABLE); 157 158 if (state) { 159 next_dma_print(nd); 160 panic("DMA did not reset\n"); 161 } 162 } 163 #endif 164 } 165 166 167 void 168 nextdma_reset(nd) 169 struct nextdma_config *nd; 170 { 171 int s; 172 s = spldma(); /* @@@ should this be splimp()? */ 173 nextdma_init(nd); 174 splx(s); 175 } 176 177 /****************************************************************/ 178 179 /* If the next had multiple busses, this should probably 180 * go elsewhere, but it is here anyway */ 181 void 182 next_dmamap_sync(t, map, offset, len, ops) 183 bus_dma_tag_t t; 184 bus_dmamap_t map; 185 bus_addr_t offset; 186 bus_size_t len; 187 int ops; 188 { 189 /* flush/purge the cache. 190 * assumes pointers are aligned 191 * @@@ should probably be fixed to use offset and len args. 192 * should also optimize this to work on pages for larger regions? 193 */ 194 if (ops & BUS_DMASYNC_PREWRITE) { 195 int i; 196 for(i=0;i<map->dm_nsegs;i++) { 197 bus_addr_t p = map->dm_segs[i].ds_addr; 198 bus_addr_t e = p+map->dm_segs[i].ds_len; 199 while(p<e) { 200 DCFL(p); /* flush */ 201 p += 16; /* cache line length */ 202 } 203 } 204 } 205 206 if (ops & BUS_DMASYNC_POSTREAD) { 207 int i; 208 for(i=0;i<map->dm_nsegs;i++) { 209 bus_addr_t p = map->dm_segs[i].ds_addr; 210 bus_addr_t e = p+map->dm_segs[i].ds_len; 211 while(p<e) { 212 DCPL(p); /* purge */ 213 p += 16; /* cache line length */ 214 } 215 } 216 } 217 } 218 219 /****************************************************************/ 220 221 222 /* Call the completed and continue callbacks to try to fill 223 * in the dma continue buffers. 224 */ 225 void 226 next_dma_rotate(nd) 227 struct nextdma_config *nd; 228 { 229 230 DPRINTF(("DMA next_dma_rotate()\n")); 231 232 /* If we've reached the end of the current map, then inform 233 * that we've completed that map. 234 */ 235 if (nd->_nd_map && ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs)) { 236 if (nd->nd_completed_cb) 237 (*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg); 238 } 239 240 /* Rotate the continue map into the current map */ 241 nd->_nd_map = nd->_nd_map_cont; 242 nd->_nd_idx = nd->_nd_idx_cont; 243 244 if ((!nd->_nd_map_cont) || 245 ((nd->_nd_map_cont) && 246 (++nd->_nd_idx_cont >= nd->_nd_map_cont->dm_nsegs))) { 247 if (nd->nd_continue_cb) { 248 nd->_nd_map_cont = (*nd->nd_continue_cb)(nd->nd_cb_arg); 249 } else { 250 nd->_nd_map_cont = 0; 251 } 252 nd->_nd_idx_cont = 0; 253 } 254 } 255 256 void 257 next_dma_setup_cont_regs(nd) 258 struct nextdma_config *nd; 259 { 260 DPRINTF(("DMA next_dma_setup_regs()\n")); 261 262 if (nd->_nd_map_cont) { 263 264 if (nd->nd_intr == NEXT_I_ENETX_DMA) { 265 /* Ethernet transmit needs secret magic */ 266 267 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START, 268 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr); 269 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP, 270 ((nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr + 271 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len) 272 + 0x0) | 0x80000000); 273 } else { 274 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START, 275 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr); 276 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP, 277 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr + 278 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len); 279 } 280 281 } else { 282 283 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START,0); 284 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP, 0); 285 } 286 287 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START, 288 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START)); 289 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP, 290 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP)); 291 292 } 293 294 void 295 next_dma_setup_curr_regs(nd) 296 struct nextdma_config *nd; 297 { 298 DPRINTF(("DMA next_dma_setup_curr_regs()\n")); 299 300 if (nd->nd_intr == NEXT_I_ENETX_DMA) { 301 /* Ethernet transmit needs secret magic */ 302 303 if (nd->_nd_map) { 304 305 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF, 306 nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr); 307 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, 308 ((nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr + 309 nd->_nd_map->dm_segs[nd->_nd_idx].ds_len) 310 + 0x0) | 0x80000000); 311 } else { 312 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF,0); 313 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, 0); 314 315 } 316 317 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT, 318 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF)); 319 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT, 320 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT)); 321 322 } else { 323 324 if (nd->_nd_map) { 325 326 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT, 327 nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr); 328 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, 329 nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr + 330 nd->_nd_map->dm_segs[nd->_nd_idx].ds_len); 331 } else { 332 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT,0); 333 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, 0); 334 335 } 336 337 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT, 338 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT)); 339 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT, 340 bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT)); 341 342 } 343 344 } 345 346 347 /* This routine is used for debugging */ 348 349 void 350 next_dma_print(nd) 351 struct nextdma_config *nd; 352 { 353 u_long dd_csr; 354 u_long dd_next; 355 u_long dd_next_initbuf; 356 u_long dd_limit; 357 u_long dd_start; 358 u_long dd_stop; 359 u_long dd_saved_next; 360 u_long dd_saved_limit; 361 u_long dd_saved_start; 362 u_long dd_saved_stop; 363 364 /* Read all of the registers before we print anything out, 365 * in case something changes 366 */ 367 dd_csr = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR); 368 dd_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT); 369 dd_next_initbuf = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF); 370 dd_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT); 371 dd_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START); 372 dd_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP); 373 dd_saved_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT); 374 dd_saved_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT); 375 dd_saved_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START); 376 dd_saved_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP); 377 378 if (nd->_nd_map) { 379 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_addr = 0x%08lx\n", 380 nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr); 381 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_len = %d\n", 382 nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_len); 383 } else { 384 printf("NDMAP: nd->_nd_map = NULL\n"); 385 } 386 if (nd->_nd_map_cont) { 387 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n", 388 nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr); 389 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_len = %d\n", 390 nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len); 391 } else { 392 printf("NDMAP: nd->_nd_map_cont = NULL\n"); 393 } 394 395 printf("NDMAP: dd->dd_csr = 0x%b\n", dd_csr, DMACSR_BITS); 396 printf("NDMAP: dd->dd_saved_next = 0x%08x\n", dd_saved_next); 397 printf("NDMAP: dd->dd_saved_limit = 0x%08x\n", dd_saved_limit); 398 printf("NDMAP: dd->dd_saved_start = 0x%08x\n", dd_saved_start); 399 printf("NDMAP: dd->dd_saved_stop = 0x%08x\n", dd_saved_stop); 400 printf("NDMAP: dd->dd_next = 0x%08x\n", dd_next); 401 printf("NDMAP: dd->dd_next_initbuf = 0x%08x\n", dd_next_initbuf); 402 printf("NDMAP: dd->dd_limit = 0x%08x\n", dd_limit); 403 printf("NDMAP: dd->dd_start = 0x%08x\n", dd_start); 404 printf("NDMAP: dd->dd_stop = 0x%08x\n", dd_stop); 405 406 printf("NDMAP: interrupt ipl (%ld) intr(0x%b)\n", 407 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS); 408 } 409 410 /****************************************************************/ 411 412 int 413 nextdma_intr(arg) 414 void *arg; 415 { 416 struct nextdma_config *nd = arg; 417 418 /* @@@ This is bogus, we can't be certain of arg's type 419 * unless the interrupt is for us 420 */ 421 422 if (!INTR_OCCURRED(nd->nd_intr)) return 0; 423 /* Handle dma interrupts */ 424 425 #ifdef DIAGNOSTIC 426 if (nd->nd_intr == NEXT_I_ENETR_DMA) { 427 if (debugernd != nd) { 428 panic("DMA incorrect handling of rx nd->nd_intr"); 429 } 430 } 431 if (nd->nd_intr == NEXT_I_ENETX_DMA) { 432 if (debugexnd != nd) { 433 panic("DMA incorrect handling of tx nd->nd_intr"); 434 } 435 } 436 #endif 437 438 DPRINTF(("DMA interrupt ipl (%ld) intr(0x%b)\n", 439 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS)); 440 441 { 442 int state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR); 443 444 state &= (DMACSR_BUSEXC | DMACSR_COMPLETE | 445 DMACSR_SUPDATE | DMACSR_ENABLE); 446 447 if (state & DMACSR_BUSEXC) { 448 #if 0 /* This bit seems to get set periodically and I don't know why */ 449 next_dma_print(nd); 450 panic("Bus exception in DMA ipl (%ld) intr(0x%b)\n", 451 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS); 452 #endif 453 } 454 455 #ifdef DIAGNOSTIC 456 if (!(state & DMACSR_COMPLETE)) { 457 next_dma_print(nd); 458 #if 0 /* This bit doesn't seem to get set every once in a while, 459 * and I don't know why. Let's try treating it as a spurious 460 * interrupt. ie. report it and ignore the interrupt. 461 */ 462 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS); 463 panic("DMA ipl (%ld) intr(0x%b), DMACSR_COMPLETE not set in intr\n", 464 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS); 465 #else 466 printf("DMA ipl (%ld) intr(0x%b), DMACSR_COMPLETE not set in intr\n", 467 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS); 468 return(1); 469 #endif 470 } 471 #endif 472 473 /* Set the length of the segment to match actual length. 474 * @@@ is it okay to resize dma segments here? 475 * i should probably ask jason about this. 476 */ 477 if (nd->_nd_map) { 478 479 bus_addr_t next; 480 bus_addr_t limit; 481 482 #if 0 483 if (state & DMACSR_ENABLE) { 484 next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT); 485 } else { 486 next = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr; 487 } 488 #else 489 next = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr; 490 #endif 491 limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT); 492 493 if (nd->nd_intr == NEXT_I_ENETX_DMA) { 494 limit &= ~0x80000000; 495 } 496 497 #ifdef DIAGNOSTIC 498 if (next != nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr) { 499 next_dma_print(nd); 500 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS); 501 502 panic("DMA ipl (%ld) intr(0x%b), unexpected completed address\n", 503 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS); 504 } 505 #endif 506 507 /* @@@ I observed a case where DMACSR_ENABLE wasn't set and 508 * DD_SAVED_LIMIT didn't contain the expected limit value. This 509 * should be tested, fixed, and removed. */ 510 511 if (((limit-next) > nd->_nd_map->dm_segs[nd->_nd_idx].ds_len) 512 || (limit-next < 0)) { 513 #if 0 514 next_dma_print(nd); 515 printf("DEBUG: state = 0x%b\n", state,DMACSR_BITS); 516 panic("DMA packlen: next = 0x%08x limit = 0x%08x\n",next,limit); 517 #else 518 DPRINTF(("DMA packlen: next = 0x%08x limit = 0x%08x",next,limit)); 519 #endif 520 521 } else { 522 nd->_nd_map->dm_segs[nd->_nd_idx].ds_len = limit - next; 523 } 524 } 525 526 527 if ((state & DMACSR_ENABLE) == 0) { 528 529 /* Non chaining interrupts shutdown immediately */ 530 if (!nd->nd_chaining_flag) { 531 nd->_nd_map = nd->_nd_map_cont; 532 nd->_nd_idx = nd->_nd_idx_cont; 533 nd->_nd_map_cont = 0; 534 nd->_nd_idx_cont = 0; 535 } 536 537 /* Call the completed callback for the last packet */ 538 if (nd->_nd_map && ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs)) { 539 if (nd->nd_completed_cb) 540 (*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg); 541 } 542 nd->_nd_map = 0; 543 nd->_nd_idx = 0; 544 545 if (nd->_nd_map_cont) { 546 DPRINTF(("DMA ipl (%ld) intr(0x%b), restarting\n", 547 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS)); 548 549 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 550 DMACSR_SETSUPDATE | DMACSR_SETENABLE); 551 552 } else { 553 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 554 DMACSR_CLRCOMPLETE | DMACSR_RESET); 555 DPRINTF(("DMA: enable not set w/o continue map, shutting down dma\n")); 556 if (nd->nd_shutdown_cb) (*nd->nd_shutdown_cb)(nd->nd_cb_arg); 557 } 558 559 } else { 560 next_dma_rotate(nd); 561 next_dma_setup_cont_regs(nd); 562 563 if (nd->_nd_map_cont) { 564 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 565 DMACSR_SETSUPDATE | DMACSR_CLRCOMPLETE); 566 } else { 567 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 568 DMACSR_CLRCOMPLETE); 569 } 570 571 } 572 573 } 574 575 DPRINTF(("DMA exiting interrupt ipl (%ld) intr(0x%b)\n", 576 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS)); 577 578 return(1); 579 } 580 581 /* 582 * Check to see if dma has finished for a channel */ 583 int 584 nextdma_finished(nd) 585 struct nextdma_config *nd; 586 { 587 int r; 588 int s; 589 s = spldma(); /* @@@ should this be splimp()? */ 590 r = (nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL); 591 splx(s); 592 return(r); 593 } 594 595 void 596 nextdma_start(nd, dmadir) 597 struct nextdma_config *nd; 598 u_long dmadir; /* DMACSR_READ or DMACSR_WRITE */ 599 { 600 601 #ifdef DIAGNOSTIC 602 if (!nextdma_finished(nd)) { 603 panic("DMA trying to start before previous finished on intr(0x%b)\n", 604 NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS); 605 } 606 #endif 607 608 609 DPRINTF(("DMA start (%ld) intr(0x%b)\n", 610 NEXT_I_IPL(nd->nd_intr), NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS)); 611 612 #ifdef DIAGNOSTIC 613 if (nd->_nd_map) { 614 next_dma_print(nd); 615 panic("DMA: nextdma_start() with non null map\n"); 616 } 617 if (nd->_nd_map_cont) { 618 next_dma_print(nd); 619 panic("DMA: nextdma_start() with non null continue map\n"); 620 } 621 #endif 622 623 next_dma_rotate(nd); 624 625 #ifdef DIAGNOSTIC 626 if (!nd->_nd_map_cont) { 627 panic("No map available in nextdma_start()"); 628 } 629 if (!DMA_BEGINALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx].ds_addr)) { 630 panic("unaligned begin dma at start\n"); 631 } 632 if (!DMA_ENDALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx].ds_addr + 633 nd->_nd_map_cont->dm_segs[nd->_nd_idx].ds_len)) { 634 panic("unaligned end dma at start\n"); 635 } 636 #endif 637 638 DPRINTF(("DMA initiating DMA %s of %d segments on intr(0x%b)\n", 639 (dmadir == DMACSR_READ ? "read" : "write"), nd->_nd_map_cont->dm_nsegs, 640 NEXT_I_BIT(nd->nd_intr),NEXT_INTR_BITS)); 641 642 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0); 643 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 644 DMACSR_INITBUF | DMACSR_RESET | dmadir); 645 646 next_dma_setup_cont_regs(nd); 647 648 /* When starting DMA, we must put the continue map 649 * into the current register. We reset the nd->_nd_map 650 * pointer here to avoid duplicated completed callbacks 651 * for the first buffer. 652 */ 653 nd->_nd_map = nd->_nd_map_cont; 654 nd->_nd_idx = nd->_nd_idx_cont; 655 next_dma_setup_curr_regs(nd); 656 nd->_nd_map = 0; 657 nd->_nd_idx = 0; 658 659 660 #if (defined(ND_DEBUG)) 661 next_dma_print(nd); 662 #endif 663 664 if (nd->nd_chaining_flag) { 665 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 666 DMACSR_SETSUPDATE | DMACSR_SETENABLE); 667 } else { 668 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 669 DMACSR_SETENABLE); 670 } 671 672 } 673