1 /* $NetBSD: nextdma.c,v 1.27 2001/04/17 03:42:25 dbj Exp $ */ 2 /* 3 * Copyright (c) 1998 Darrin B. Jewell 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Darrin B. Jewell 17 * 4. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/mbuf.h> 35 #include <sys/syslog.h> 36 #include <sys/socket.h> 37 #include <sys/device.h> 38 #include <sys/malloc.h> 39 #include <sys/ioctl.h> 40 #include <sys/errno.h> 41 42 #include <machine/autoconf.h> 43 #include <machine/cpu.h> 44 #include <machine/intr.h> 45 46 #include <m68k/cacheops.h> 47 48 #include <next68k/next68k/isr.h> 49 50 #define _NEXT68K_BUS_DMA_PRIVATE 51 #include <machine/bus.h> 52 53 #include "nextdmareg.h" 54 #include "nextdmavar.h" 55 56 #if 1 57 #define ND_DEBUG 58 #endif 59 60 #if defined(ND_DEBUG) 61 int nextdma_debug = 0; 62 #define DPRINTF(x) if (nextdma_debug) printf x; 63 #else 64 #define DPRINTF(x) 65 #endif 66 67 #if defined(ND_DEBUG) 68 int nextdma_debug_enetr_idx = 0; 69 unsigned int nextdma_debug_enetr_state[100] = { 0 }; 70 int nextdma_debug_scsi_idx = 0; 71 unsigned int nextdma_debug_scsi_state[100] = { 0 }; 72 73 void nextdma_debug_initstate(struct nextdma_config *nd); 74 void nextdma_debug_savestate(struct nextdma_config *nd, unsigned int state); 75 void nextdma_debug_scsi_dumpstate(void); 76 void nextdma_debug_enetr_dumpstate(void); 77 78 void 79 nextdma_debug_initstate(struct nextdma_config *nd) 80 { 81 switch(nd->nd_intr) { 82 case NEXT_I_ENETR_DMA: 83 memset(nextdma_debug_enetr_state,0,sizeof(nextdma_debug_enetr_state)); 84 break; 85 case NEXT_I_SCSI_DMA: 86 memset(nextdma_debug_scsi_state,0,sizeof(nextdma_debug_scsi_state)); 87 break; 88 } 89 } 90 91 void 92 nextdma_debug_savestate(struct nextdma_config *nd, unsigned int state) 93 { 94 switch(nd->nd_intr) { 95 case NEXT_I_ENETR_DMA: 96 nextdma_debug_enetr_state[nextdma_debug_enetr_idx++] = state; 97 nextdma_debug_enetr_idx %= (sizeof(nextdma_debug_enetr_state)/sizeof(unsigned int)); 98 break; 99 case NEXT_I_SCSI_DMA: 100 nextdma_debug_scsi_state[nextdma_debug_scsi_idx++] = state; 101 nextdma_debug_scsi_idx %= (sizeof(nextdma_debug_scsi_state)/sizeof(unsigned int)); 102 break; 103 } 104 } 105 106 void 107 nextdma_debug_enetr_dumpstate(void) 108 { 109 int i; 110 int s; 111 s = spldma(); 112 i = nextdma_debug_enetr_idx; 113 do { 114 char sbuf[256]; 115 if (nextdma_debug_enetr_state[i]) { 116 bitmask_snprintf(nextdma_debug_enetr_state[i], DMACSR_BITS, sbuf, sizeof(sbuf)); 117 printf("DMA: 0x%02x state 0x%s\n",i,sbuf); 118 } 119 i++; 120 i %= (sizeof(nextdma_debug_enetr_state)/sizeof(unsigned int)); 121 } while (i != nextdma_debug_enetr_idx); 122 splx(s); 123 } 124 125 void 126 nextdma_debug_scsi_dumpstate(void) 127 { 128 int i; 129 int s; 130 s = spldma(); 131 i = nextdma_debug_scsi_idx; 132 do { 133 char sbuf[256]; 134 if (nextdma_debug_scsi_state[i]) { 135 bitmask_snprintf(nextdma_debug_scsi_state[i], DMACSR_BITS, sbuf, sizeof(sbuf)); 136 printf("DMA: 0x%02x state 0x%s\n",i,sbuf); 137 } 138 i++; 139 i %= (sizeof(nextdma_debug_scsi_state)/sizeof(unsigned int)); 140 } while (i != nextdma_debug_scsi_idx); 141 splx(s); 142 } 143 #endif 144 145 146 void next_dmamap_sync __P((bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 147 bus_size_t, int)); 148 int next_dma_continue __P((struct nextdma_config *)); 149 void next_dma_rotate __P((struct nextdma_config *)); 150 151 void next_dma_setup_cont_regs __P((struct nextdma_config *)); 152 void next_dma_setup_curr_regs __P((struct nextdma_config *)); 153 154 void 155 nextdma_config(nd) 156 struct nextdma_config *nd; 157 { 158 /* Initialize the dma_tag. As a hack, we currently 159 * put the dma tag in the structure itself. It shouldn't be there. 160 */ 161 162 { 163 bus_dma_tag_t t; 164 t = &nd->_nd_dmat; 165 t->_cookie = nd; 166 t->_dmamap_create = _bus_dmamap_create; 167 t->_dmamap_destroy = _bus_dmamap_destroy; 168 t->_dmamap_load = _bus_dmamap_load_direct; 169 t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf_direct; 170 t->_dmamap_load_uio = _bus_dmamap_load_uio_direct; 171 t->_dmamap_load_raw = _bus_dmamap_load_raw_direct; 172 t->_dmamap_unload = _bus_dmamap_unload; 173 t->_dmamap_sync = _bus_dmamap_sync; 174 175 t->_dmamem_alloc = _bus_dmamem_alloc; 176 t->_dmamem_free = _bus_dmamem_free; 177 t->_dmamem_map = _bus_dmamem_map; 178 t->_dmamem_unmap = _bus_dmamem_unmap; 179 t->_dmamem_mmap = _bus_dmamem_mmap; 180 181 nd->nd_dmat = t; 182 } 183 184 nextdma_init(nd); 185 186 isrlink_autovec(nextdma_intr, nd, NEXT_I_IPL(nd->nd_intr), 10); 187 INTR_ENABLE(nd->nd_intr); 188 } 189 190 void 191 nextdma_init(nd) 192 struct nextdma_config *nd; 193 { 194 #ifdef ND_DEBUG 195 if (nextdma_debug) { 196 char sbuf[256]; 197 198 bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS, 199 sbuf, sizeof(sbuf)); 200 printf("DMA init ipl (%ld) intr(0x%s)\n", 201 NEXT_I_IPL(nd->nd_intr), sbuf); 202 } 203 #endif 204 205 nd->_nd_map = NULL; 206 nd->_nd_idx = 0; 207 nd->_nd_map_cont = NULL; 208 nd->_nd_idx_cont = 0; 209 210 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0); 211 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 212 DMACSR_RESET | DMACSR_INITBUF); 213 214 next_dma_setup_curr_regs(nd); 215 next_dma_setup_cont_regs(nd); 216 217 #if defined(DIAGNOSTIC) 218 { 219 u_long state; 220 state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR); 221 222 #if 1 223 /* mourning (a 25Mhz 68040 mono slab) appears to set BUSEXC 224 * milo (a 25Mhz 68040 mono cube) didn't have this problem 225 * Darrin B. Jewell <jewell@mit.edu> Mon May 25 07:53:05 1998 226 */ 227 state &= (DMACSR_COMPLETE | DMACSR_SUPDATE | DMACSR_ENABLE); 228 #else 229 state &= (DMACSR_BUSEXC | DMACSR_COMPLETE | 230 DMACSR_SUPDATE | DMACSR_ENABLE); 231 #endif 232 if (state) { 233 next_dma_print(nd); 234 panic("DMA did not reset"); 235 } 236 } 237 #endif 238 } 239 240 241 void 242 nextdma_reset(nd) 243 struct nextdma_config *nd; 244 { 245 int s; 246 s = spldma(); 247 248 DPRINTF(("DMA reset\n")); 249 250 #if (defined(ND_DEBUG)) 251 if (nextdma_debug) next_dma_print(nd); 252 #endif 253 254 if ((nd->_nd_map) || (nd->_nd_map_cont)) { 255 /* @@@ clean up dma maps */ 256 panic("DMA abort not implemented\n"); 257 } 258 259 nextdma_init(nd); 260 splx(s); 261 } 262 263 /****************************************************************/ 264 265 266 /* Call the completed and continue callbacks to try to fill 267 * in the dma continue buffers. 268 */ 269 void 270 next_dma_rotate(nd) 271 struct nextdma_config *nd; 272 { 273 274 DPRINTF(("DMA next_dma_rotate()\n")); 275 276 /* Rotate the continue map into the current map */ 277 nd->_nd_map = nd->_nd_map_cont; 278 nd->_nd_idx = nd->_nd_idx_cont; 279 280 if ((!nd->_nd_map_cont) || 281 ((nd->_nd_map_cont) && 282 (++nd->_nd_idx_cont >= nd->_nd_map_cont->dm_nsegs))) { 283 if (nd->nd_continue_cb) { 284 nd->_nd_map_cont = (*nd->nd_continue_cb)(nd->nd_cb_arg); 285 if (nd->_nd_map_cont) { 286 nd->_nd_map_cont->dm_xfer_len = 0; 287 } 288 } else { 289 nd->_nd_map_cont = 0; 290 } 291 nd->_nd_idx_cont = 0; 292 } 293 294 #ifdef DIAGNOSTIC 295 if (nd->_nd_map_cont) { 296 if (!DMA_BEGINALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr)) { 297 next_dma_print(nd); 298 panic("DMA request unaligned at start\n"); 299 } 300 if (!DMA_ENDALIGNED(nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr + 301 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len)) { 302 next_dma_print(nd); 303 panic("DMA request unaligned at end\n"); 304 } 305 } 306 #endif 307 308 } 309 310 void 311 next_dma_setup_cont_regs(nd) 312 struct nextdma_config *nd; 313 { 314 bus_addr_t dd_start; 315 bus_addr_t dd_stop; 316 bus_addr_t dd_saved_start; 317 bus_addr_t dd_saved_stop; 318 319 DPRINTF(("DMA next_dma_setup_regs()\n")); 320 321 if (nd->_nd_map_cont) { 322 dd_start = nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr; 323 dd_stop = (nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr + 324 nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len); 325 326 if (nd->nd_intr == NEXT_I_ENETX_DMA) { 327 dd_stop |= 0x80000000; /* Ethernet transmit needs secret magic */ 328 } 329 } else { 330 dd_start = 0xdeadbeef; 331 dd_stop = 0xdeadbeef; 332 } 333 334 dd_saved_start = dd_start; 335 dd_saved_stop = dd_stop; 336 337 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_START, dd_start); 338 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_STOP, dd_stop); 339 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START, dd_saved_start); 340 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP, dd_saved_stop); 341 342 #ifdef DIAGNOSTIC 343 if ( (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START) != dd_start) 344 || (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP) != dd_stop) 345 || (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START) != dd_saved_start) 346 || (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP) != dd_saved_stop) 347 ) { 348 next_dma_print(nd); 349 panic("DMA failure writing to continue regs"); 350 } 351 #endif 352 } 353 354 void 355 next_dma_setup_curr_regs(nd) 356 struct nextdma_config *nd; 357 { 358 bus_addr_t dd_next; 359 bus_addr_t dd_limit; 360 bus_addr_t dd_saved_next; 361 bus_addr_t dd_saved_limit; 362 363 DPRINTF(("DMA next_dma_setup_curr_regs()\n")); 364 365 366 if (nd->_nd_map) { 367 dd_next = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr; 368 dd_limit = (nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr + 369 nd->_nd_map->dm_segs[nd->_nd_idx].ds_len); 370 371 if (nd->nd_intr == NEXT_I_ENETX_DMA) { 372 dd_limit |= 0x80000000; /* Ethernet transmit needs secret magic */ 373 } 374 } else { 375 dd_next = 0xdeadbeef; 376 dd_limit = 0xdeadbeef; 377 } 378 379 dd_saved_next = dd_next; 380 dd_saved_limit = dd_limit; 381 382 if (nd->nd_intr == NEXT_I_ENETX_DMA) { 383 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF, dd_next); 384 } else { 385 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_NEXT, dd_next); 386 } 387 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT, dd_limit); 388 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT, dd_saved_next); 389 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT, dd_saved_limit); 390 391 #ifdef DIAGNOSTIC 392 if ( (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF) != dd_next) 393 || (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT) != dd_next) 394 || (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT) != dd_limit) 395 || (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT) != dd_saved_next) 396 || (bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT) != dd_saved_limit) 397 ) { 398 next_dma_print(nd); 399 panic("DMA failure writing to current regs"); 400 } 401 #endif 402 } 403 404 405 /* This routine is used for debugging */ 406 407 void 408 next_dma_print(nd) 409 struct nextdma_config *nd; 410 { 411 u_long dd_csr; 412 u_long dd_next; 413 u_long dd_next_initbuf; 414 u_long dd_limit; 415 u_long dd_start; 416 u_long dd_stop; 417 u_long dd_saved_next; 418 u_long dd_saved_limit; 419 u_long dd_saved_start; 420 u_long dd_saved_stop; 421 char sbuf[256]; 422 423 /* Read all of the registers before we print anything out, 424 * in case something changes 425 */ 426 dd_csr = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR); 427 dd_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT); 428 dd_next_initbuf = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT_INITBUF); 429 dd_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT); 430 dd_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_START); 431 dd_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_STOP); 432 dd_saved_next = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT); 433 dd_saved_limit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT); 434 dd_saved_start = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_START); 435 dd_saved_stop = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_STOP); 436 437 bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRSTAT)), 438 NEXT_INTR_BITS, sbuf, sizeof(sbuf)); 439 printf("NDMAP: *intrstat = 0x%s\n", sbuf); 440 441 bitmask_snprintf((*(volatile u_long *)IIOV(NEXT_P_INTRMASK)), 442 NEXT_INTR_BITS, sbuf, sizeof(sbuf)); 443 printf("NDMAP: *intrmask = 0x%s\n", sbuf); 444 445 /* NDMAP is Next DMA Print (really!) */ 446 447 if (nd->_nd_map) { 448 printf("NDMAP: nd->_nd_map->dm_mapsize = %d\n", 449 nd->_nd_map->dm_mapsize); 450 printf("NDMAP: nd->_nd_map->dm_nsegs = %d\n", 451 nd->_nd_map->dm_nsegs); 452 printf("NDMAP: nd->_nd_map->dm_xfer_len = %d\n", 453 nd->_nd_map->dm_xfer_len); 454 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_addr = 0x%08lx\n", 455 nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr); 456 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_len = %d\n", 457 nd->_nd_idx,nd->_nd_map->dm_segs[nd->_nd_idx].ds_len); 458 { 459 int i; 460 printf("NDMAP: Entire map;\n"); 461 for(i=0;i<nd->_nd_map->dm_nsegs;i++) { 462 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_addr = 0x%08lx\n", 463 i,nd->_nd_map->dm_segs[i].ds_addr); 464 printf("NDMAP: nd->_nd_map->dm_segs[%d].ds_len = %d\n", 465 i,nd->_nd_map->dm_segs[i].ds_len); 466 } 467 } 468 } else { 469 printf("NDMAP: nd->_nd_map = NULL\n"); 470 } 471 if (nd->_nd_map_cont) { 472 printf("NDMAP: nd->_nd_map_cont->dm_mapsize = %d\n", 473 nd->_nd_map_cont->dm_mapsize); 474 printf("NDMAP: nd->_nd_map_cont->dm_nsegs = %d\n", 475 nd->_nd_map_cont->dm_nsegs); 476 printf("NDMAP: nd->_nd_map_cont->dm_xfer_len = %d\n", 477 nd->_nd_map_cont->dm_xfer_len); 478 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n", 479 nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_addr); 480 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_len = %d\n", 481 nd->_nd_idx_cont,nd->_nd_map_cont->dm_segs[nd->_nd_idx_cont].ds_len); 482 if (nd->_nd_map_cont != nd->_nd_map) { 483 int i; 484 printf("NDMAP: Entire map;\n"); 485 for(i=0;i<nd->_nd_map_cont->dm_nsegs;i++) { 486 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_addr = 0x%08lx\n", 487 i,nd->_nd_map_cont->dm_segs[i].ds_addr); 488 printf("NDMAP: nd->_nd_map_cont->dm_segs[%d].ds_len = %d\n", 489 i,nd->_nd_map_cont->dm_segs[i].ds_len); 490 } 491 } 492 } else { 493 printf("NDMAP: nd->_nd_map_cont = NULL\n"); 494 } 495 496 bitmask_snprintf(dd_csr, DMACSR_BITS, sbuf, sizeof(sbuf)); 497 printf("NDMAP: dd->dd_csr = 0x%s\n", sbuf); 498 499 printf("NDMAP: dd->dd_saved_next = 0x%08x\n", dd_saved_next); 500 printf("NDMAP: dd->dd_saved_limit = 0x%08x\n", dd_saved_limit); 501 printf("NDMAP: dd->dd_saved_start = 0x%08x\n", dd_saved_start); 502 printf("NDMAP: dd->dd_saved_stop = 0x%08x\n", dd_saved_stop); 503 printf("NDMAP: dd->dd_next = 0x%08x\n", dd_next); 504 printf("NDMAP: dd->dd_next_initbuf = 0x%08x\n", dd_next_initbuf); 505 printf("NDMAP: dd->dd_limit = 0x%08x\n", dd_limit); 506 printf("NDMAP: dd->dd_start = 0x%08x\n", dd_start); 507 printf("NDMAP: dd->dd_stop = 0x%08x\n", dd_stop); 508 509 bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS, 510 sbuf, sizeof(sbuf)); 511 printf("NDMAP: interrupt ipl (%ld) intr(0x%s)\n", 512 NEXT_I_IPL(nd->nd_intr), sbuf); 513 } 514 515 /****************************************************************/ 516 517 int 518 nextdma_intr(arg) 519 void *arg; 520 { 521 /* @@@ This is bogus, we can't be certain of arg's type 522 * unless the interrupt is for us. For now we successfully 523 * cheat because DMA interrupts are the only things invoked 524 * at this interrupt level. 525 */ 526 struct nextdma_config *nd = arg; 527 528 if (!INTR_OCCURRED(nd->nd_intr)) return 0; 529 /* Handle dma interrupts */ 530 531 #ifdef ND_DEBUG 532 if (nextdma_debug) { 533 char sbuf[256]; 534 535 bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS, 536 sbuf, sizeof(sbuf)); 537 printf("DMA interrupt ipl (%ld) intr(0x%s)\n", 538 NEXT_I_IPL(nd->nd_intr), sbuf); 539 } 540 #endif 541 542 #ifdef DIAGNOSTIC 543 if (!nd->_nd_map) { 544 next_dma_print(nd); 545 panic("DMA missing current map in interrupt!\n"); 546 } 547 #endif 548 549 { 550 unsigned int state = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_CSR); 551 552 #if defined(ND_DEBUG) 553 nextdma_debug_savestate(nd,state); 554 #endif 555 556 #ifdef DIAGNOSTIC 557 if (!(state & DMACSR_COMPLETE)) { 558 char sbuf[256]; 559 next_dma_print(nd); 560 bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf)); 561 printf("DMA: state 0x%s\n",sbuf); 562 panic("DMA complete not set in interrupt\n"); 563 } 564 #endif 565 566 { 567 bus_addr_t onext; 568 bus_addr_t olimit; 569 bus_addr_t slimit; 570 571 DPRINTF(("DMA: finishing xfer\n")); 572 573 onext = nd->_nd_map->dm_segs[nd->_nd_idx].ds_addr; 574 olimit = onext + nd->_nd_map->dm_segs[nd->_nd_idx].ds_len; 575 576 { 577 int result = 0; 578 if (state & DMACSR_ENABLE) { 579 /* enable bit was set */ 580 result |= 0x01; 581 } 582 if (state & DMACSR_SUPDATE) { 583 /* supdate bit was set */ 584 result |= 0x02; 585 } 586 if (nd->_nd_map_cont == NULL) { 587 KASSERT(nd->_nd_idx+1 == nd->_nd_map->dm_nsegs); 588 /* Expecting a shutdown, didn't SETSUPDATE last turn */ 589 result |= 0x04; 590 } 591 if (state & DMACSR_BUSEXC) { 592 /* bus exception bit was set */ 593 result |= 0x08; 594 } 595 switch (result) { 596 case 0x00: /* !BUSEXC && !expecting && !SUPDATE && !ENABLE */ 597 case 0x08: /* BUSEXC && !expecting && !SUPDATE && !ENABLE */ 598 if (nd->nd_intr == NEXT_I_SCSI_DMA) { 599 slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT); 600 } else { 601 slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT); 602 } 603 break; 604 case 0x01: /* !BUSEXC && !expecting && !SUPDATE && ENABLE */ 605 case 0x09: /* BUSEXC && !expecting && !SUPDATE && ENABLE */ 606 if (nd->nd_intr == NEXT_I_SCSI_DMA) { 607 bus_addr_t snext; 608 snext = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_NEXT); 609 if (snext != onext) { 610 slimit = olimit; 611 } else { 612 slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT); 613 } 614 } else { 615 slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_SAVED_LIMIT); 616 } 617 break; 618 case 0x02: /* !BUSEXC && !expecting && SUPDATE && !ENABLE */ 619 case 0x0a: /* BUSEXC && !expecting && SUPDATE && !ENABLE */ 620 slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_NEXT); 621 break; 622 case 0x04: /* !BUSEXC && expecting && !SUPDATE && !ENABLE */ 623 case 0x0c: /* BUSEXC && expecting && !SUPDATE && !ENABLE */ 624 slimit = bus_space_read_4(nd->nd_bst, nd->nd_bsh, DD_LIMIT); 625 break; 626 default: 627 #ifdef DIAGNOSTIC 628 { 629 char sbuf[256]; 630 printf("DMA: please send this output to port-next68k-maintainer@netbsd.org:\n"); 631 bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf)); 632 printf("DMA: state 0x%s\n",sbuf); 633 next_dma_print(nd); 634 panic("DMA: condition 0x%02x not yet documented to occur\n",result); 635 } 636 #endif 637 slimit = olimit; 638 break; 639 } 640 } 641 642 if (nd->nd_intr == NEXT_I_ENETX_DMA) { 643 slimit &= ~0x80000000; 644 } 645 646 #ifdef DIAGNOSTIC 647 if ((slimit < onext) || (slimit > olimit)) { 648 char sbuf[256]; 649 bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf)); 650 printf("DMA: state 0x%s\n",sbuf); 651 next_dma_print(nd); 652 panic("DMA: Unexpected limit register (0x%08x) in finish_xfer\n",slimit); 653 } 654 #endif 655 656 #ifdef DIAGNOSTIC 657 if ((state & DMACSR_ENABLE) && ((nd->_nd_idx+1) != nd->_nd_map->dm_nsegs)) { 658 if (slimit != olimit) { 659 char sbuf[256]; 660 bitmask_snprintf(state, DMACSR_BITS, sbuf, sizeof(sbuf)); 661 printf("DMA: state 0x%s\n",sbuf); 662 next_dma_print(nd); 663 panic("DMA: short limit register (0x%08x) w/o finishing map.\n",slimit); 664 } 665 } 666 #endif 667 668 #if (defined(ND_DEBUG)) 669 if (nextdma_debug > 2) next_dma_print(nd); 670 #endif 671 672 nd->_nd_map->dm_xfer_len += slimit-onext; 673 674 /* If we've reached the end of the current map, then inform 675 * that we've completed that map. 676 */ 677 if ((nd->_nd_idx+1) == nd->_nd_map->dm_nsegs) { 678 if (nd->nd_completed_cb) 679 (*nd->nd_completed_cb)(nd->_nd_map, nd->nd_cb_arg); 680 } else { 681 KASSERT(nd->_nd_map == nd->_nd_map_cont); 682 KASSERT(nd->_nd_idx+1 == nd->_nd_idx_cont); 683 } 684 nd->_nd_map = 0; 685 nd->_nd_idx = 0; 686 } 687 688 if (state & DMACSR_ENABLE) { 689 690 next_dma_rotate(nd); 691 next_dma_setup_cont_regs(nd); 692 693 { 694 u_long dmadir; /* DMACSR_SETREAD or DMACSR_SETWRITE */ 695 696 if (state & DMACSR_READ) { 697 dmadir = DMACSR_SETREAD; 698 } else { 699 dmadir = DMACSR_SETWRITE; 700 } 701 702 if (nd->_nd_map_cont == NULL) { 703 KASSERT(nd->_nd_idx+1 == nd->_nd_map->dm_nsegs); 704 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 705 DMACSR_CLRCOMPLETE | dmadir); 706 } else { 707 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 708 DMACSR_CLRCOMPLETE | dmadir | DMACSR_SETSUPDATE); 709 } 710 } 711 712 } else { 713 714 DPRINTF(("DMA: a shutdown occurred\n")); 715 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, DMACSR_CLRCOMPLETE | DMACSR_RESET); 716 717 /* Cleanup more incomplete transfers */ 718 #if 1 719 /* cleanup continue map */ 720 if (nd->_nd_map_cont) { 721 DPRINTF(("DMA: shutting down with non null continue map\n")); 722 if (nd->nd_completed_cb) 723 (*nd->nd_completed_cb)(nd->_nd_map_cont, nd->nd_cb_arg); 724 725 nd->_nd_map_cont = 0; 726 nd->_nd_idx_cont = 0; 727 } 728 #else 729 /* Do an automatic dma restart */ 730 if (nd->_nd_map_cont) { 731 u_long dmadir; /* DMACSR_SETREAD or DMACSR_SETWRITE */ 732 733 next_dma_rotate(nd); 734 735 if (state & DMACSR_READ) { 736 dmadir = DMACSR_SETREAD; 737 } else { 738 dmadir = DMACSR_SETWRITE; 739 } 740 741 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0); 742 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 743 DMACSR_INITBUF | DMACSR_RESET | dmadir); 744 745 next_dma_setup_curr_regs(nd); 746 next_dma_setup_cont_regs(nd); 747 748 if (nd->_nd_map_cont == NULL) { 749 KASSERT(nd->_nd_idx+1 == nd->_nd_map->dm_nsegs); 750 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 751 DMACSR_SETENABLE | dmadir); 752 } else { 753 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 754 DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir); 755 } 756 return 1; 757 } 758 #endif 759 if (nd->nd_shutdown_cb) (*nd->nd_shutdown_cb)(nd->nd_cb_arg); 760 } 761 } 762 763 #ifdef ND_DEBUG 764 if (nextdma_debug) { 765 char sbuf[256]; 766 767 bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS, 768 sbuf, sizeof(sbuf)); 769 printf("DMA exiting interrupt ipl (%ld) intr(0x%s)\n", 770 NEXT_I_IPL(nd->nd_intr), sbuf); 771 } 772 #endif 773 774 return(1); 775 } 776 777 /* 778 * Check to see if dma has finished for a channel */ 779 int 780 nextdma_finished(nd) 781 struct nextdma_config *nd; 782 { 783 int r; 784 int s; 785 s = spldma(); /* @@@ should this be splimp()? */ 786 r = (nd->_nd_map == NULL) && (nd->_nd_map_cont == NULL); 787 splx(s); 788 return(r); 789 } 790 791 void 792 nextdma_start(nd, dmadir) 793 struct nextdma_config *nd; 794 u_long dmadir; /* DMACSR_SETREAD or DMACSR_SETWRITE */ 795 { 796 797 #ifdef DIAGNOSTIC 798 if (!nextdma_finished(nd)) { 799 char sbuf[256]; 800 801 bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS, 802 sbuf, sizeof(sbuf)); 803 panic("DMA trying to start before previous finished on intr(0x%s)\n", sbuf); 804 } 805 #endif 806 807 #ifdef ND_DEBUG 808 if (nextdma_debug) { 809 char sbuf[256]; 810 811 bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS, 812 sbuf, sizeof(sbuf)); 813 printf("DMA start (%ld) intr(0x%s)\n", 814 NEXT_I_IPL(nd->nd_intr), sbuf); 815 } 816 #endif 817 818 #ifdef DIAGNOSTIC 819 if (nd->_nd_map) { 820 next_dma_print(nd); 821 panic("DMA: nextdma_start() with non null map\n"); 822 } 823 if (nd->_nd_map_cont) { 824 next_dma_print(nd); 825 panic("DMA: nextdma_start() with non null continue map\n"); 826 } 827 #endif 828 829 #ifdef DIAGNOSTIC 830 if ((dmadir != DMACSR_SETREAD) && (dmadir != DMACSR_SETWRITE)) { 831 panic("DMA: nextdma_start(), dmadir arg must be DMACSR_SETREAD or DMACSR_SETWRITE\n"); 832 } 833 #endif 834 835 #if defined(ND_DEBUG) 836 nextdma_debug_initstate(nd); 837 #endif 838 839 /* preload both the current and the continue maps */ 840 next_dma_rotate(nd); 841 842 #ifdef DIAGNOSTIC 843 if (!nd->_nd_map_cont) { 844 panic("No map available in nextdma_start()"); 845 } 846 #endif 847 848 next_dma_rotate(nd); 849 850 #ifdef ND_DEBUG 851 if (nextdma_debug) { 852 char sbuf[256]; 853 854 bitmask_snprintf(NEXT_I_BIT(nd->nd_intr), NEXT_INTR_BITS, 855 sbuf, sizeof(sbuf)); 856 printf("DMA initiating DMA %s of %d segments on intr(0x%s)\n", 857 (dmadir == DMACSR_SETREAD ? "read" : "write"), nd->_nd_map->dm_nsegs, sbuf); 858 } 859 #endif 860 861 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 0); 862 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 863 DMACSR_INITBUF | DMACSR_RESET | dmadir); 864 865 next_dma_setup_curr_regs(nd); 866 next_dma_setup_cont_regs(nd); 867 868 #if (defined(ND_DEBUG)) 869 if (nextdma_debug > 2) next_dma_print(nd); 870 #endif 871 872 if (nd->_nd_map_cont == NULL) { 873 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 874 DMACSR_SETENABLE | dmadir); 875 } else { 876 bus_space_write_4(nd->nd_bst, nd->nd_bsh, DD_CSR, 877 DMACSR_SETSUPDATE | DMACSR_SETENABLE | dmadir); 878 } 879 } 880