1 /* $NetBSD: maple.c,v 1.50 2014/03/27 18:22:56 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by ITOH Yasufumi. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /*- 33 * Copyright (c) 2001 Marcus Comstedt 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. All advertising materials mentioning features or use of this software 45 * must display the following acknowledgement: 46 * This product includes software developed by Marcus Comstedt. 47 * 4. Neither the name of The NetBSD Foundation nor the names of its 48 * contributors may be used to endorse or promote products derived 49 * from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 52 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 53 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 54 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 55 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 56 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 57 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 58 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 59 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 60 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 61 * POSSIBILITY OF SUCH DAMAGE. 62 */ 63 64 #include <sys/cdefs.h> 65 __KERNEL_RCSID(0, "$NetBSD: maple.c,v 1.50 2014/03/27 18:22:56 christos Exp $"); 66 67 #include <sys/param.h> 68 #include <sys/device.h> 69 #include <sys/fcntl.h> 70 #include <sys/kernel.h> 71 #include <sys/kthread.h> 72 #include <sys/poll.h> 73 #include <sys/select.h> 74 #include <sys/proc.h> 75 #include <sys/signalvar.h> 76 #include <sys/systm.h> 77 #include <sys/conf.h> 78 #include <sys/bus.h> 79 80 #include <uvm/uvm.h> 81 82 #include <machine/cpu.h> 83 #include <machine/sysasicvar.h> 84 #include <sh3/pmap.h> 85 86 #include <dreamcast/dev/maple/maple.h> 87 #include <dreamcast/dev/maple/mapleconf.h> 88 #include <dreamcast/dev/maple/maplevar.h> 89 #include <dreamcast/dev/maple/maplereg.h> 90 #include <dreamcast/dev/maple/mapleio.h> 91 92 #include "ioconf.h" 93 #include "locators.h" 94 95 /* Internal macros, functions, and variables. */ 96 97 #define MAPLE_CALLOUT_TICKS 2 98 99 #define MAPLEBUSUNIT(dev) (minor(dev)>>5) 100 #define MAPLEPORT(dev) ((minor(dev) & 0x18) >> 3) 101 #define MAPLESUBUNIT(dev) (minor(dev) & 0x7) 102 103 /* interrupt priority level */ 104 #define IPL_MAPLE IPL_BIO 105 #define splmaple() splbio() 106 #define IRL_MAPLE SYSASIC_IRL9 107 108 /* 109 * Function declarations. 110 */ 111 static int maplematch(device_t, cfdata_t, void *); 112 static void mapleattach(device_t, device_t, void *); 113 static void maple_scanbus(struct maple_softc *); 114 static char * maple_unit_name(char *, size_t, int port, int subunit); 115 static void maple_begin_txbuf(struct maple_softc *); 116 static int maple_end_txbuf(struct maple_softc *); 117 static void maple_queue_command(struct maple_softc *, struct maple_unit *, 118 int command, int datalen, const void *dataaddr); 119 static void maple_write_command(struct maple_softc *, struct maple_unit *, 120 int, int, const void *); 121 static void maple_start(struct maple_softc *sc); 122 static void maple_start_poll(struct maple_softc *); 123 static void maple_check_subunit_change(struct maple_softc *, 124 struct maple_unit *); 125 static void maple_check_unit_change(struct maple_softc *, 126 struct maple_unit *); 127 static void maple_print_unit(void *, const char *); 128 static int maplesubmatch(device_t, cfdata_t, const int *, void *); 129 static int mapleprint(void *, const char *); 130 static void maple_attach_unit(struct maple_softc *, struct maple_unit *); 131 static void maple_detach_unit_nofix(struct maple_softc *, 132 struct maple_unit *); 133 static void maple_detach_unit(struct maple_softc *, struct maple_unit *); 134 static void maple_queue_cmds(struct maple_softc *, 135 struct maple_cmdq_head *); 136 static void maple_unit_probe(struct maple_softc *); 137 static void maple_unit_ping(struct maple_softc *); 138 static int maple_send_defered_periodic(struct maple_softc *); 139 static void maple_send_periodic(struct maple_softc *); 140 static void maple_remove_from_queues(struct maple_softc *, 141 struct maple_unit *); 142 static int maple_retry(struct maple_softc *, struct maple_unit *, 143 enum maple_dma_stat); 144 static void maple_queue_retry(struct maple_softc *); 145 static void maple_check_responses(struct maple_softc *); 146 static void maple_event_thread(void *); 147 static int maple_intr(void *); 148 static void maple_callout(void *); 149 150 int maple_alloc_dma(size_t, vaddr_t *, paddr_t *); 151 #if 0 152 void maple_free_dma(paddr_t, size_t); 153 #endif 154 155 /* 156 * Global variables. 157 */ 158 int maple_polling; /* Are we polling? (Debugger mode) */ 159 160 CFATTACH_DECL_NEW(maple, sizeof(struct maple_softc), 161 maplematch, mapleattach, NULL, NULL); 162 163 dev_type_open(mapleopen); 164 dev_type_close(mapleclose); 165 dev_type_ioctl(mapleioctl); 166 167 const struct cdevsw maple_cdevsw = { 168 .d_open = mapleopen, 169 .d_close = mapleclose, 170 .d_read = noread, 171 .d_write = nowrite, 172 .d_ioctl = mapleioctl, 173 .d_stop = nostop, 174 .d_tty = notty, 175 .d_poll = nopoll, 176 .d_mmap = nommap, 177 .d_kqfilter = nokqfilter, 178 .d_flag = 0 179 }; 180 181 static int 182 maplematch(device_t parent, cfdata_t cf, void *aux) 183 { 184 185 return 1; 186 } 187 188 static void 189 mapleattach(device_t parent, device_t self, void *aux) 190 { 191 struct maple_softc *sc; 192 struct maple_unit *u; 193 vaddr_t dmabuffer; 194 paddr_t dmabuffer_phys; 195 uint32_t *p; 196 int port, subunit, f; 197 198 sc = device_private(self); 199 sc->sc_dev = self; 200 201 printf(": %s\n", sysasic_intr_string(IRL_MAPLE)); 202 203 if (maple_alloc_dma(MAPLE_DMABUF_SIZE, &dmabuffer, &dmabuffer_phys)) { 204 printf("%s: unable to allocate DMA buffers.\n", 205 device_xname(self)); 206 return; 207 } 208 209 p = (uint32_t *)dmabuffer; 210 211 for (port = 0; port < MAPLE_PORTS; port++) { 212 for (subunit = 0; subunit < MAPLE_SUBUNITS; subunit++) { 213 u = &sc->sc_unit[port][subunit]; 214 u->port = port; 215 u->subunit = subunit; 216 u->u_dma_stat = MAPLE_DMA_IDLE; 217 u->u_rxbuf = p; 218 u->u_rxbuf_phys = SH3_P2SEG_TO_PHYS(p); 219 p += 256; 220 221 for (f = 0; f < MAPLE_NFUNC; f++) { 222 u->u_func[f].f_funcno = f; 223 u->u_func[f].f_unit = u; 224 } 225 } 226 } 227 228 sc->sc_txbuf = p; 229 sc->sc_txbuf_phys = SH3_P2SEG_TO_PHYS(p); 230 231 SIMPLEQ_INIT(&sc->sc_retryq); 232 TAILQ_INIT(&sc->sc_probeq); 233 TAILQ_INIT(&sc->sc_pingq); 234 TAILQ_INIT(&sc->sc_periodicq); 235 TAILQ_INIT(&sc->sc_periodicdeferq); 236 TAILQ_INIT(&sc->sc_acmdq); 237 TAILQ_INIT(&sc->sc_pcmdq); 238 239 MAPLE_RESET = RESET_MAGIC; 240 MAPLE_RESET2 = 0; 241 242 MAPLE_SPEED = SPEED_2MBPS | TIMEOUT(50000); 243 244 MAPLE_ENABLE = 1; 245 246 maple_polling = 1; 247 maple_scanbus(sc); 248 249 callout_init(&sc->maple_callout_ch, 0); 250 251 sc->sc_intrhand = sysasic_intr_establish(SYSASIC_EVENT_MAPLE_DMADONE, 252 IPL_MAPLE, IRL_MAPLE, maple_intr, sc); 253 254 config_pending_incr(self); /* create thread before mounting root */ 255 256 if (kthread_create(PRI_NONE, 0, NULL, maple_event_thread, sc, 257 &sc->event_thread, "%s", device_xname(self)) == 0) 258 return; 259 260 panic("%s: unable to create event thread", device_xname(self)); 261 } 262 263 /* 264 * initial device attach 265 */ 266 static void 267 maple_scanbus(struct maple_softc *sc) 268 { 269 struct maple_unit *u; 270 int port; 271 int last_port, last_subunit; 272 int i; 273 274 KASSERT(cold && maple_polling); 275 276 /* probe all ports */ 277 for (port = 0; port < MAPLE_PORTS; port++) { 278 u = &sc->sc_unit[port][0]; 279 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2 280 { 281 char buf[16]; 282 printf("%s: queued to probe 1\n", 283 maple_unit_name(buf, sizeof(buf), u->port, u->subunit)); 284 } 285 #endif 286 TAILQ_INSERT_TAIL(&sc->sc_probeq, u, u_q); 287 u->u_queuestat = MAPLE_QUEUE_PROBE; 288 } 289 290 last_port = last_subunit = -1; 291 maple_begin_txbuf(sc); 292 while ((u = TAILQ_FIRST(&sc->sc_probeq)) != NULL) { 293 /* 294 * Check wrap condition 295 */ 296 if (u->port < last_port || u->subunit <= last_subunit) 297 break; 298 last_port = u->port; 299 if (u->port == MAPLE_PORTS - 1) 300 last_subunit = u->subunit; 301 302 maple_unit_probe(sc); 303 for (i = 10 /* just not forever */; maple_end_txbuf(sc); i--) { 304 maple_start_poll(sc); 305 maple_check_responses(sc); 306 if (i == 0) 307 break; 308 /* attach may issue cmds */ 309 maple_queue_cmds(sc, &sc->sc_acmdq); 310 } 311 } 312 } 313 314 void 315 maple_run_polling(device_t dev) 316 { 317 struct maple_softc *sc; 318 int port, subunit; 319 int i; 320 321 sc = device_private(dev); 322 323 /* 324 * first, make sure polling works 325 */ 326 while (MAPLE_STATE != 0) /* XXX may lost a DMA cycle */ 327 ; 328 329 /* XXX this will break internal state */ 330 for (port = 0; port < MAPLE_PORTS; port++) 331 for (subunit = 0; subunit < MAPLE_SUBUNITS; subunit++) 332 sc->sc_unit[port][subunit].u_dma_stat = MAPLE_DMA_IDLE; 333 SIMPLEQ_INIT(&sc->sc_retryq); /* XXX discard current retrys */ 334 335 /* 336 * do polling (periodic status check only) 337 */ 338 maple_begin_txbuf(sc); 339 maple_send_defered_periodic(sc); 340 maple_send_periodic(sc); 341 for (i = 10 /* just not forever */; maple_end_txbuf(sc); i--) { 342 maple_start_poll(sc); 343 maple_check_responses(sc); 344 if (i == 0) 345 break; 346 347 /* maple_check_responses() has executed maple_begin_txbuf() */ 348 maple_queue_retry(sc); 349 maple_send_defered_periodic(sc); 350 } 351 } 352 353 static char * 354 maple_unit_name(char *buf, size_t len, int port, int subunit) 355 { 356 size_t l = snprintf(buf, len, "maple%c", port + 'A'); 357 if (l > len) 358 l = len; 359 if (subunit) 360 snprintf(buf + l, len - l, "%d", subunit); 361 362 return buf; 363 } 364 365 int 366 maple_alloc_dma(size_t size, vaddr_t *vap, paddr_t *pap) 367 { 368 extern paddr_t avail_start, avail_end; /* from pmap.c */ 369 struct pglist mlist; 370 struct vm_page *m; 371 int error; 372 373 size = round_page(size); 374 375 error = uvm_pglistalloc(size, avail_start, avail_end - PAGE_SIZE, 376 0, 0, &mlist, 1, 0); 377 if (error) 378 return error; 379 380 m = TAILQ_FIRST(&mlist); 381 *pap = VM_PAGE_TO_PHYS(m); 382 *vap = SH3_PHYS_TO_P2SEG(VM_PAGE_TO_PHYS(m)); 383 384 return 0; 385 } 386 387 #if 0 /* currently unused */ 388 void 389 maple_free_dma(paddr_t paddr, size_t size) 390 { 391 struct pglist mlist; 392 struct vm_page *m; 393 bus_addr_t addr; 394 395 TAILQ_INIT(&mlist); 396 for (addr = paddr; addr < paddr + size; addr += PAGE_SIZE) { 397 m = PHYS_TO_VM_PAGE(addr); 398 TAILQ_INSERT_TAIL(&mlist, m, pageq.queue); 399 } 400 uvm_pglistfree(&mlist); 401 } 402 #endif 403 404 static void 405 maple_begin_txbuf(struct maple_softc *sc) 406 { 407 408 sc->sc_txlink = sc->sc_txpos = sc->sc_txbuf; 409 SIMPLEQ_INIT(&sc->sc_dmaq); 410 } 411 412 static int 413 maple_end_txbuf(struct maple_softc *sc) 414 { 415 416 /* if no frame have been written, we can't mark the 417 list end, and so the DMA must not be activated */ 418 if (sc->sc_txpos == sc->sc_txbuf) 419 return 0; 420 421 *sc->sc_txlink |= 0x80000000; 422 423 return 1; 424 } 425 426 static const int8_t subunit_code[] = { 0x20, 0x01, 0x02, 0x04, 0x08, 0x10 }; 427 428 static void 429 maple_queue_command(struct maple_softc *sc, struct maple_unit *u, 430 int command, int datalen, const void *dataaddr) 431 { 432 int to, from; 433 uint32_t *p = sc->sc_txpos; 434 435 /* Max data length = 255 longs = 1020 bytes */ 436 KASSERT(datalen >= 0 && datalen <= 255); 437 438 /* Compute sender and recipient address */ 439 from = u->port << 6; 440 to = from | subunit_code[u->subunit]; 441 442 sc->sc_txlink = p; 443 444 /* Set length of packet and destination port (A-D) */ 445 *p++ = datalen | (u->port << 16); 446 447 /* Write address to receive buffer where the response 448 frame should be put */ 449 *p++ = u->u_rxbuf_phys; 450 451 /* Create the frame header. The fields are assembled "backwards" 452 because of the Maple Bus big-endianness. */ 453 *p++ = (command & 0xff) | (to << 8) | (from << 16) | (datalen << 24); 454 455 /* Copy parameter data, if any */ 456 if (datalen > 0) { 457 const uint32_t *param = dataaddr; 458 int i; 459 for (i = 0; i < datalen; i++) 460 *p++ = *param++; 461 } 462 463 sc->sc_txpos = p; 464 465 SIMPLEQ_INSERT_TAIL(&sc->sc_dmaq, u, u_dmaq); 466 } 467 468 static void 469 maple_write_command(struct maple_softc *sc, struct maple_unit *u, int command, 470 int datalen, const void *dataaddr) 471 { 472 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2 473 char buf[16]; 474 475 if (u->u_retrycnt) 476 printf("%s: retrycnt %d\n", maple_unit_name(buf, sizeof(buf), 477 u->port, u->subunit), u->u_retrycnt); 478 #endif 479 u->u_retrycnt = 0; 480 u->u_command = command; 481 u->u_datalen = datalen; 482 u->u_dataaddr = dataaddr; 483 484 maple_queue_command(sc, u, command, datalen, dataaddr); 485 } 486 487 /* start DMA */ 488 static void 489 maple_start(struct maple_softc *sc) 490 { 491 492 MAPLE_DMAADDR = sc->sc_txbuf_phys; 493 MAPLE_STATE = 1; 494 } 495 496 /* start DMA -- wait until DMA done */ 497 static void 498 maple_start_poll(struct maple_softc *sc) 499 { 500 501 MAPLE_DMAADDR = sc->sc_txbuf_phys; 502 MAPLE_STATE = 1; 503 while (MAPLE_STATE != 0) 504 ; 505 } 506 507 static void 508 maple_check_subunit_change(struct maple_softc *sc, struct maple_unit *u) 509 { 510 struct maple_unit *u1; 511 int port; 512 int8_t unit_map; 513 int units, un; 514 int i; 515 516 KASSERT(u->subunit == 0); 517 518 port = u->port; 519 unit_map = ((int8_t *) u->u_rxbuf)[2]; 520 if (sc->sc_port_unit_map[port] == unit_map) 521 return; 522 523 units = ((unit_map & 0x1f) << 1) | 1; 524 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1 525 { 526 char buf[16]; 527 printf("%s: unit_map 0x%x -> 0x%x (units 0x%x)\n", 528 maple_unit_name(buf, sizeof(buf), u->port, u->subunit), 529 sc->sc_port_unit_map[port], unit_map, units); 530 } 531 #endif 532 #if 0 /* this detects unit removal rapidly but is not reliable */ 533 /* check for unit change */ 534 un = sc->sc_port_units[port] & ~units; 535 536 /* detach removed devices */ 537 for (i = MAPLE_SUBUNITS - 1; i > 0; i--) 538 if (un & (1 << i)) 539 maple_detach_unit_nofix(sc, &sc->sc_unit[port][i]); 540 #endif 541 542 sc->sc_port_unit_map[port] = unit_map; 543 544 /* schedule scanning child devices */ 545 un = units & ~sc->sc_port_units[port]; 546 for (i = MAPLE_SUBUNITS - 1; i > 0; i--) 547 if (un & (1 << i)) { 548 u1 = &sc->sc_unit[port][i]; 549 maple_remove_from_queues(sc, u1); 550 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2 551 { 552 char buf[16]; 553 printf("%s: queued to probe 2\n", 554 maple_unit_name(buf, sizeof(buf), 555 u1->port, u1->subunit)); 556 } 557 #endif 558 TAILQ_INSERT_HEAD(&sc->sc_probeq, u1, u_q); 559 u1->u_queuestat = MAPLE_QUEUE_PROBE; 560 u1->u_proberetry = 0; 561 } 562 } 563 564 static void 565 maple_check_unit_change(struct maple_softc *sc, struct maple_unit *u) 566 { 567 struct maple_devinfo *newinfo = (void *) (u->u_rxbuf + 1); 568 569 if (memcmp(&u->devinfo, newinfo, sizeof(struct maple_devinfo)) == 0) 570 goto out; /* no change */ 571 572 /* unit inserted */ 573 574 /* attach this device */ 575 u->devinfo = *newinfo; 576 maple_attach_unit(sc, u); 577 578 out: 579 maple_remove_from_queues(sc, u); 580 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2 581 { 582 char buf[16]; 583 printf("%s: queued to ping\n", 584 maple_unit_name(buf, sizeof(buf), u->port, u->subunit)); 585 } 586 #endif 587 TAILQ_INSERT_TAIL(&sc->sc_pingq, u, u_q); 588 u->u_queuestat = MAPLE_QUEUE_PING; 589 } 590 591 static void 592 maple_print_unit(void *aux, const char *pnp) 593 { 594 struct maple_attach_args *ma = aux; 595 int port, subunit; 596 char buf[16]; 597 char *prod, *p, oc; 598 599 port = ma->ma_unit->port; 600 subunit = ma->ma_unit->subunit; 601 602 if (pnp != NULL) 603 printf("%s at %s", maple_unit_name(buf, sizeof(buf), port, 604 subunit), pnp); 605 606 printf(" port %d", port); 607 608 if (subunit != 0) 609 printf(" subunit %d", subunit); 610 611 #ifdef MAPLE_DEBUG 612 printf(": a %#x c %#x fn %#x d %#x,%#x,%#x", 613 ma->ma_devinfo->di_area_code, 614 ma->ma_devinfo->di_connector_direction, 615 be32toh(ma->ma_devinfo->di_func), 616 be32toh(ma->ma_devinfo->di_function_data[0]), 617 be32toh(ma->ma_devinfo->di_function_data[1]), 618 be32toh(ma->ma_devinfo->di_function_data[2])); 619 #endif 620 621 /* nul termination */ 622 prod = ma->ma_devinfo->di_product_name; 623 for (p = prod + sizeof ma->ma_devinfo->di_product_name; p >= prod; p--) 624 if (p[-1] != '\0' && p[-1] != ' ') 625 break; 626 oc = *p; 627 *p = '\0'; 628 629 printf(": %s", prod); 630 631 *p = oc; /* restore */ 632 } 633 634 static int 635 maplesubmatch(device_t parent, cfdata_t match, const int *ldesc, void *aux) 636 { 637 struct maple_attach_args *ma = aux; 638 639 if (match->cf_loc[MAPLECF_PORT] != MAPLECF_PORT_DEFAULT && 640 match->cf_loc[MAPLECF_PORT] != ma->ma_unit->port) 641 return 0; 642 643 if (match->cf_loc[MAPLECF_SUBUNIT] != MAPLECF_SUBUNIT_DEFAULT && 644 match->cf_loc[MAPLECF_SUBUNIT] != ma->ma_unit->subunit) 645 return 0; 646 647 return config_match(parent, match, aux); 648 } 649 650 static int 651 mapleprint(void *aux, const char *str) 652 { 653 struct maple_attach_args *ma = aux; 654 655 #ifdef MAPLE_DEBUG 656 if (str) 657 aprint_normal("%s", str); 658 aprint_normal(" function %d", ma->ma_function); 659 660 return UNCONF; 661 #else /* quiet */ 662 if (!str) 663 aprint_normal(" function %d", ma->ma_function); 664 665 return QUIET; 666 #endif 667 } 668 669 static void 670 maple_attach_unit(struct maple_softc *sc, struct maple_unit *u) 671 { 672 struct maple_attach_args ma; 673 uint32_t func; 674 int f; 675 char oldxname[16]; 676 677 ma.ma_unit = u; 678 ma.ma_devinfo = &u->devinfo; 679 ma.ma_basedevinfo = &sc->sc_unit[u->port][0].devinfo; 680 func = be32toh(ma.ma_devinfo->di_func); 681 682 maple_print_unit(&ma, device_xname(sc->sc_dev)); 683 printf("\n"); 684 strcpy(oldxname, device_xname(sc->sc_dev)); 685 maple_unit_name(sc->sc_dev->dv_xname, sizeof(sc->sc_dev->dv_xname), 686 u->port, u->subunit); 687 688 for (f = 0; f < MAPLE_NFUNC; f++) { 689 u->u_func[f].f_callback = NULL; 690 u->u_func[f].f_arg = NULL; 691 u->u_func[f].f_cmdstat = MAPLE_CMDSTAT_NONE; 692 u->u_func[f].f_dev = NULL; 693 if (func & MAPLE_FUNC(f)) { 694 ma.ma_function = f; 695 u->u_func[f].f_dev = config_found_sm_loc(sc->sc_dev, 696 "maple", NULL, &ma, mapleprint, maplesubmatch); 697 u->u_ping_func = f; /* XXX using largest func */ 698 } 699 } 700 #ifdef MAPLE_MEMCARD_PING_HACK 701 /* 702 * Some 3rd party memory card pretend to be Visual Memory, 703 * but need special handling for ping. 704 */ 705 if (func == (MAPLE_FUNC(MAPLE_FN_MEMCARD) | MAPLE_FUNC(MAPLE_FN_LCD) | 706 MAPLE_FUNC(MAPLE_FN_CLOCK))) { 707 u->u_ping_func = MAPLE_FN_MEMCARD; 708 u->u_ping_stat = MAPLE_PING_MEMCARD; 709 } else { 710 u->u_ping_stat = MAPLE_PING_NORMAL; 711 } 712 #endif 713 strcpy(sc->sc_dev->dv_xname, oldxname); 714 715 sc->sc_port_units[u->port] |= 1 << u->subunit; 716 } 717 718 static void 719 maple_detach_unit_nofix(struct maple_softc *sc, struct maple_unit *u) 720 { 721 struct maple_func *fn; 722 device_t dev; 723 struct maple_unit *u1; 724 int port; 725 int error; 726 int i; 727 char buf[16]; 728 729 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1 730 printf("%s: remove\n", maple_unit_name(buf, sizeof(buf), u->port, u->subunit)); 731 #endif 732 maple_remove_from_queues(sc, u); 733 port = u->port; 734 sc->sc_port_units[port] &= ~(1 << u->subunit); 735 736 if (u->subunit == 0) { 737 for (i = MAPLE_SUBUNITS - 1; i > 0; i--) 738 maple_detach_unit_nofix(sc, &sc->sc_unit[port][i]); 739 } 740 741 for (fn = u->u_func; fn < &u->u_func[MAPLE_NFUNC]; fn++) { 742 if ((dev = fn->f_dev) != NULL) { 743 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1 744 printf("%s: detaching func %d\n", 745 maple_unit_name(buf, sizeof(buf), port, u->subunit), 746 fn->f_funcno); 747 #endif 748 749 /* 750 * Remove functions from command queue. 751 */ 752 switch (fn->f_cmdstat) { 753 case MAPLE_CMDSTAT_ASYNC: 754 case MAPLE_CMDSTAT_PERIODIC_DEFERED: 755 TAILQ_REMOVE(&sc->sc_acmdq, fn, f_cmdq); 756 break; 757 case MAPLE_CMDSTAT_ASYNC_PERIODICQ: 758 case MAPLE_CMDSTAT_PERIODIC: 759 TAILQ_REMOVE(&sc->sc_pcmdq, fn, f_cmdq); 760 break; 761 default: 762 break; 763 } 764 765 /* 766 * Detach devices. 767 */ 768 if ((error = config_detach(fn->f_dev, DETACH_FORCE))) { 769 printf("%s: failed to detach %s (func %d), errno %d\n", 770 maple_unit_name(buf, sizeof(buf), port, u->subunit), 771 device_xname(fn->f_dev), fn->f_funcno, error); 772 } 773 } 774 775 maple_enable_periodic(sc->sc_dev, u, fn->f_funcno, 0); 776 777 fn->f_dev = NULL; 778 fn->f_callback = NULL; 779 fn->f_arg = NULL; 780 fn->f_cmdstat = MAPLE_CMDSTAT_NONE; 781 } 782 if (u->u_dma_stat == MAPLE_DMA_RETRY) { 783 /* XXX expensive? */ 784 SIMPLEQ_FOREACH(u1, &sc->sc_retryq, u_dmaq) { 785 if (u1 == u) { 786 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1 787 printf("%s: abort retry\n", 788 maple_unit_name(buf, sizeof(buf), port, u->subunit)); 789 #endif 790 SIMPLEQ_REMOVE(&sc->sc_retryq, u, maple_unit, 791 u_dmaq); 792 break; 793 } 794 } 795 } 796 u->u_dma_stat = MAPLE_DMA_IDLE; 797 u->u_noping = 0; 798 /* u->u_dma_func = uninitialized; */ 799 KASSERT(u->getcond_func_set == 0); 800 memset(&u->devinfo, 0, sizeof(struct maple_devinfo)); 801 802 if (u->subunit == 0) { 803 sc->sc_port_unit_map[port] = 0; 804 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2 805 { 806 char buf2[16]; 807 printf("%s: queued to probe 3\n", 808 maple_unit_name(buf2, sizeof(buf2), port, u->subunit)); 809 } 810 #endif 811 TAILQ_INSERT_TAIL(&sc->sc_probeq, u, u_q); 812 u->u_queuestat = MAPLE_QUEUE_PROBE; 813 } 814 } 815 816 static void 817 maple_detach_unit(struct maple_softc *sc, struct maple_unit *u) 818 { 819 820 maple_detach_unit_nofix(sc, u); 821 if (u->subunit != 0) 822 sc->sc_port_unit_map[u->port] &= ~(1 << (u->subunit - 1)); 823 } 824 825 /* 826 * Send a command (called by drivers) 827 * 828 * The "cataaddr" must not point at temporary storage like stack. 829 * Only one command (per function) is valid at a time. 830 */ 831 void 832 maple_command(device_t dev, struct maple_unit *u, int func, 833 int command, int datalen, const void *dataaddr, int flags) 834 { 835 struct maple_softc *sc = device_private(dev); 836 struct maple_func *fn; 837 int s; 838 839 KASSERT(func >= 0 && func < 32); 840 KASSERT(command); 841 KASSERT((flags & ~MAPLE_FLAG_CMD_PERIODIC_TIMING) == 0); 842 843 s = splsoftclock(); 844 845 fn = &u->u_func[func]; 846 #if 1 /*def DIAGNOSTIC*/ 847 {char buf[16]; 848 if (fn->f_cmdstat != MAPLE_CMDSTAT_NONE) 849 panic("maple_command: %s func %d: requesting more than one commands", 850 maple_unit_name(buf, sizeof(buf), u->port, u->subunit), func); 851 } 852 #endif 853 fn->f_command = command; 854 fn->f_datalen = datalen; 855 fn->f_dataaddr = dataaddr; 856 if (flags & MAPLE_FLAG_CMD_PERIODIC_TIMING) { 857 fn->f_cmdstat = MAPLE_CMDSTAT_PERIODIC; 858 TAILQ_INSERT_TAIL(&sc->sc_pcmdq, fn, f_cmdq); 859 } else { 860 fn->f_cmdstat = MAPLE_CMDSTAT_ASYNC; 861 TAILQ_INSERT_TAIL(&sc->sc_acmdq, fn, f_cmdq); 862 wakeup(&sc->sc_event); /* wake for async event */ 863 } 864 splx(s); 865 } 866 867 static void 868 maple_queue_cmds(struct maple_softc *sc, 869 struct maple_cmdq_head *head) 870 { 871 struct maple_func *fn, *nextfn; 872 struct maple_unit *u; 873 874 /* 875 * Note: since the queue element may be queued immediately, 876 * we can't use TAILQ_FOREACH. 877 */ 878 fn = TAILQ_FIRST(head); 879 TAILQ_INIT(head); 880 for ( ; fn; fn = nextfn) { 881 nextfn = TAILQ_NEXT(fn, f_cmdq); 882 883 KASSERT(fn->f_cmdstat != MAPLE_CMDSTAT_NONE); 884 u = fn->f_unit; 885 if (u->u_dma_stat == MAPLE_DMA_IDLE) { 886 maple_write_command(sc, u, 887 fn->f_command, fn->f_datalen, fn->f_dataaddr); 888 u->u_dma_stat = (fn->f_cmdstat == MAPLE_CMDSTAT_ASYNC || 889 fn->f_cmdstat == MAPLE_CMDSTAT_ASYNC_PERIODICQ) ? 890 MAPLE_DMA_ACMD : MAPLE_DMA_PCMD; 891 u->u_dma_func = fn->f_funcno; 892 fn->f_cmdstat = MAPLE_CMDSTAT_NONE; 893 } else if (u->u_dma_stat == MAPLE_DMA_RETRY) { 894 /* unit is busy --- try again */ 895 /* 896 * always add to periodic command queue 897 * (wait until the next periodic timing), 898 * since the unit will never be freed until the 899 * next periodic timing. 900 */ 901 switch (fn->f_cmdstat) { 902 case MAPLE_CMDSTAT_ASYNC: 903 fn->f_cmdstat = MAPLE_CMDSTAT_ASYNC_PERIODICQ; 904 break; 905 case MAPLE_CMDSTAT_PERIODIC_DEFERED: 906 fn->f_cmdstat = MAPLE_CMDSTAT_PERIODIC; 907 break; 908 default: 909 break; 910 } 911 TAILQ_INSERT_TAIL(&sc->sc_pcmdq, fn, f_cmdq); 912 } else { 913 /* unit is busy --- try again */ 914 /* 915 * always add to async command queue 916 * (process immediately) 917 */ 918 switch (fn->f_cmdstat) { 919 case MAPLE_CMDSTAT_ASYNC_PERIODICQ: 920 fn->f_cmdstat = MAPLE_CMDSTAT_ASYNC; 921 break; 922 case MAPLE_CMDSTAT_PERIODIC: 923 fn->f_cmdstat = MAPLE_CMDSTAT_PERIODIC_DEFERED; 924 break; 925 default: 926 break; 927 } 928 TAILQ_INSERT_TAIL(&sc->sc_acmdq, fn, f_cmdq); 929 } 930 } 931 } 932 933 /* schedule probing a device */ 934 static void 935 maple_unit_probe(struct maple_softc *sc) 936 { 937 struct maple_unit *u; 938 939 if ((u = TAILQ_FIRST(&sc->sc_probeq)) != NULL) { 940 KASSERT(u->u_dma_stat == MAPLE_DMA_IDLE); 941 KASSERT(u->u_queuestat == MAPLE_QUEUE_PROBE); 942 maple_remove_from_queues(sc, u); 943 maple_write_command(sc, u, MAPLE_COMMAND_DEVINFO, 0, NULL); 944 u->u_dma_stat = MAPLE_DMA_PROBE; 945 /* u->u_dma_func = ignored; */ 946 } 947 } 948 949 /* 950 * Enable/disable unit pinging (called by drivers) 951 */ 952 /* ARGSUSED */ 953 void 954 maple_enable_unit_ping(device_t dev, struct maple_unit *u, int func, int enable) 955 { 956 #if 0 /* currently unused */ 957 struct maple_softc *sc = device_private(dev); 958 #endif 959 960 if (enable) 961 u->u_noping &= ~MAPLE_FUNC(func); 962 else 963 u->u_noping |= MAPLE_FUNC(func); 964 } 965 966 /* schedule pinging a device */ 967 static void 968 maple_unit_ping(struct maple_softc *sc) 969 { 970 struct maple_unit *u; 971 struct maple_func *fn; 972 #ifdef MAPLE_MEMCARD_PING_HACK 973 static const uint32_t memcard_ping_arg[2] = { 974 0x02000000, /* htobe32(MAPLE_FUNC(MAPLE_FN_MEMCARD)) */ 975 0 /* pt (1 byte) and unused 3 bytes */ 976 }; 977 #endif 978 979 if ((u = TAILQ_FIRST(&sc->sc_pingq)) != NULL) { 980 KASSERT(u->u_queuestat == MAPLE_QUEUE_PING); 981 maple_remove_from_queues(sc, u); 982 if (u->u_dma_stat == MAPLE_DMA_IDLE && u->u_noping == 0) { 983 #ifdef MAPLE_MEMCARD_PING_HACK 984 if (u->u_ping_stat == MAPLE_PING_MINFO) { 985 /* use MINFO for some memory cards */ 986 maple_write_command(sc, u, 987 MAPLE_COMMAND_GETMINFO, 988 2, memcard_ping_arg); 989 } else 990 #endif 991 { 992 fn = &u->u_func[u->u_ping_func]; 993 fn->f_work = htobe32(MAPLE_FUNC(u->u_ping_func)); 994 maple_write_command(sc, u, 995 MAPLE_COMMAND_GETCOND, 996 1, &fn->f_work); 997 } 998 u->u_dma_stat = MAPLE_DMA_PING; 999 /* u->u_dma_func = XXX; */ 1000 } else { 1001 /* no need if periodic */ 1002 TAILQ_INSERT_TAIL(&sc->sc_pingq, u, u_q); 1003 u->u_queuestat = MAPLE_QUEUE_PING; 1004 } 1005 } 1006 } 1007 1008 /* 1009 * Enable/disable periodic GETCOND (called by drivers) 1010 */ 1011 void 1012 maple_enable_periodic(device_t dev, struct maple_unit *u, int func, int on) 1013 { 1014 struct maple_softc *sc = device_private(dev); 1015 struct maple_func *fn; 1016 1017 KASSERT(func >= 0 && func < 32); 1018 1019 fn = &u->u_func[func]; 1020 1021 if (on) { 1022 if (fn->f_periodic_stat == MAPLE_PERIODIC_NONE) { 1023 TAILQ_INSERT_TAIL(&sc->sc_periodicq, fn, f_periodicq); 1024 fn->f_periodic_stat = MAPLE_PERIODIC_INQ; 1025 u->getcond_func_set |= MAPLE_FUNC(func); 1026 } 1027 } else { 1028 if (fn->f_periodic_stat == MAPLE_PERIODIC_INQ) 1029 TAILQ_REMOVE(&sc->sc_periodicq, fn, f_periodicq); 1030 else if (fn->f_periodic_stat == MAPLE_PERIODIC_DEFERED) 1031 TAILQ_REMOVE(&sc->sc_periodicdeferq, fn, f_periodicq); 1032 fn->f_periodic_stat = MAPLE_PERIODIC_NONE; 1033 u->getcond_func_set &= ~MAPLE_FUNC(func); 1034 } 1035 } 1036 1037 /* 1038 * queue periodic GETCOND 1039 */ 1040 static int 1041 maple_send_defered_periodic(struct maple_softc *sc) 1042 { 1043 struct maple_unit *u; 1044 struct maple_func *fn, *nextfn; 1045 int defer_remain = 0; 1046 1047 for (fn = TAILQ_FIRST(&sc->sc_periodicdeferq); fn; fn = nextfn) { 1048 KASSERT(fn->f_periodic_stat == MAPLE_PERIODIC_DEFERED); 1049 1050 nextfn = TAILQ_NEXT(fn, f_periodicq); 1051 1052 u = fn->f_unit; 1053 if (u->u_dma_stat == MAPLE_DMA_IDLE || 1054 u->u_dma_stat == MAPLE_DMA_RETRY) { 1055 /* 1056 * if IDLE -> queue this request 1057 * if RETRY -> the unit never be freed until the next 1058 * periodic timing, so just restore to 1059 * the normal periodic queue. 1060 */ 1061 TAILQ_REMOVE(&sc->sc_periodicdeferq, fn, f_periodicq); 1062 TAILQ_INSERT_TAIL(&sc->sc_periodicq, fn, f_periodicq); 1063 fn->f_periodic_stat = MAPLE_PERIODIC_INQ; 1064 1065 if (u->u_dma_stat == MAPLE_DMA_IDLE) { 1066 /* 1067 * queue periodic command 1068 */ 1069 fn->f_work = htobe32(MAPLE_FUNC(fn->f_funcno)); 1070 maple_write_command(sc, u, 1071 MAPLE_COMMAND_GETCOND, 1, &fn->f_work); 1072 u->u_dma_stat = MAPLE_DMA_PERIODIC; 1073 u->u_dma_func = fn->f_funcno; 1074 } 1075 } else { 1076 defer_remain = 1; 1077 } 1078 } 1079 1080 return defer_remain; 1081 } 1082 1083 static void 1084 maple_send_periodic(struct maple_softc *sc) 1085 { 1086 struct maple_unit *u; 1087 struct maple_func *fn, *nextfn; 1088 1089 for (fn = TAILQ_FIRST(&sc->sc_periodicq); fn; fn = nextfn) { 1090 KASSERT(fn->f_periodic_stat == MAPLE_PERIODIC_INQ); 1091 1092 nextfn = TAILQ_NEXT(fn, f_periodicq); 1093 1094 u = fn->f_unit; 1095 if (u->u_dma_stat != MAPLE_DMA_IDLE) { 1096 if (u->u_dma_stat != MAPLE_DMA_RETRY) { 1097 /* 1098 * can't be queued --- move to defered queue 1099 */ 1100 TAILQ_REMOVE(&sc->sc_periodicq, fn, 1101 f_periodicq); 1102 TAILQ_INSERT_TAIL(&sc->sc_periodicdeferq, fn, 1103 f_periodicq); 1104 fn->f_periodic_stat = MAPLE_PERIODIC_DEFERED; 1105 } 1106 } else { 1107 /* 1108 * queue periodic command 1109 */ 1110 fn->f_work = htobe32(MAPLE_FUNC(fn->f_funcno)); 1111 maple_write_command(sc, u, MAPLE_COMMAND_GETCOND, 1112 1, &fn->f_work); 1113 u->u_dma_stat = MAPLE_DMA_PERIODIC; 1114 u->u_dma_func = fn->f_funcno; 1115 } 1116 } 1117 } 1118 1119 static void 1120 maple_remove_from_queues(struct maple_softc *sc, struct maple_unit *u) 1121 { 1122 1123 /* remove from queues */ 1124 if (u->u_queuestat == MAPLE_QUEUE_PROBE) 1125 TAILQ_REMOVE(&sc->sc_probeq, u, u_q); 1126 else if (u->u_queuestat == MAPLE_QUEUE_PING) 1127 TAILQ_REMOVE(&sc->sc_pingq, u, u_q); 1128 #ifdef DIAGNOSTIC 1129 else if (u->u_queuestat != MAPLE_QUEUE_NONE) 1130 panic("maple_remove_from_queues: queuestat %d", u->u_queuestat); 1131 #endif 1132 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2 1133 if (u->u_queuestat != MAPLE_QUEUE_NONE) { 1134 char buf[16]; 1135 printf("%s: dequeued\n", 1136 maple_unit_name(buf, sizeof(buf), u->port, u->subunit)); 1137 } 1138 #endif 1139 1140 u->u_queuestat = MAPLE_QUEUE_NONE; 1141 } 1142 1143 /* 1144 * retry current command at next periodic timing 1145 */ 1146 static int 1147 maple_retry(struct maple_softc *sc, struct maple_unit *u, 1148 enum maple_dma_stat st) 1149 { 1150 1151 KASSERT(st != MAPLE_DMA_IDLE && st != MAPLE_DMA_RETRY); 1152 1153 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2 1154 if (u->u_retrycnt == 0) { 1155 char buf[16]; 1156 printf("%s: retrying: %#x, %#x, %p\n", 1157 maple_unit_name(buf, sizeof(buf), u->port, u->subunit), 1158 u->u_command, u->u_datalen, u->u_dataaddr); 1159 } 1160 #endif 1161 if (u->u_retrycnt >= MAPLE_RETRY_MAX) 1162 return 1; 1163 1164 u->u_retrycnt++; 1165 1166 u->u_saved_dma_stat = st; 1167 u->u_dma_stat = MAPLE_DMA_RETRY; /* no new command before retry done */ 1168 SIMPLEQ_INSERT_TAIL(&sc->sc_retryq, u, u_dmaq); 1169 1170 return 0; 1171 } 1172 1173 static void 1174 maple_queue_retry(struct maple_softc *sc) 1175 { 1176 struct maple_unit *u, *nextu; 1177 1178 /* 1179 * Note: since the queue element is queued immediately 1180 * in maple_queue_command, we can't use SIMPLEQ_FOREACH. 1181 */ 1182 for (u = SIMPLEQ_FIRST(&sc->sc_retryq); u; u = nextu) { 1183 nextu = SIMPLEQ_NEXT(u, u_dmaq); 1184 1185 /* 1186 * Retrying is in the highest priority, and the unit shall 1187 * always be free. 1188 */ 1189 KASSERT(u->u_dma_stat == MAPLE_DMA_RETRY); 1190 maple_queue_command(sc, u, u->u_command, u->u_datalen, 1191 u->u_dataaddr); 1192 u->u_dma_stat = u->u_saved_dma_stat; 1193 1194 #ifdef DIAGNOSTIC 1195 KASSERT(u->u_saved_dma_stat != MAPLE_DMA_IDLE); 1196 u->u_saved_dma_stat = MAPLE_DMA_IDLE; 1197 #endif 1198 } 1199 SIMPLEQ_INIT(&sc->sc_retryq); 1200 } 1201 1202 /* 1203 * Process DMA results. 1204 * Requires kernel context. 1205 */ 1206 static void 1207 maple_check_responses(struct maple_softc *sc) 1208 { 1209 struct maple_unit *u, *nextu; 1210 struct maple_func *fn; 1211 maple_response_t response; 1212 int func_code, len; 1213 int flags; 1214 char buf[16]; 1215 1216 /* 1217 * Note: since the queue element may be queued immediately, 1218 * we can't use SIMPLEQ_FOREACH. 1219 */ 1220 for (u = SIMPLEQ_FIRST(&sc->sc_dmaq), maple_begin_txbuf(sc); 1221 u; u = nextu) { 1222 nextu = SIMPLEQ_NEXT(u, u_dmaq); 1223 1224 if (u->u_dma_stat == MAPLE_DMA_IDLE) 1225 continue; /* just detached or DDB was active */ 1226 1227 /* 1228 * check for retransmission 1229 */ 1230 if ((response = u->u_rxbuf[0]) == MAPLE_RESPONSE_AGAIN) { 1231 if (maple_retry(sc, u, u->u_dma_stat) == 0) 1232 continue; 1233 /* else pass error to upper layer */ 1234 } 1235 1236 len = (u->u_rxbuf[0] >> 24); /* length in long */ 1237 len <<= 2; /* length in byte */ 1238 1239 /* 1240 * call handler 1241 */ 1242 if (u->u_dma_stat == MAPLE_DMA_PERIODIC) { 1243 /* 1244 * periodic GETCOND 1245 */ 1246 u->u_dma_stat = MAPLE_DMA_IDLE; 1247 func_code = u->u_dma_func; 1248 if (response == MAPLE_RESPONSE_DATATRF && len > 0 && 1249 be32toh(u->u_rxbuf[1]) == MAPLE_FUNC(func_code)) { 1250 fn = &u->u_func[func_code]; 1251 if (fn->f_dev) 1252 (*fn->f_callback)(fn->f_arg, 1253 (void *)u->u_rxbuf, len, 1254 MAPLE_FLAG_PERIODIC); 1255 } else if (response == MAPLE_RESPONSE_NONE) { 1256 /* XXX OK? */ 1257 /* detach */ 1258 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2 1259 printf("%s: func: %d: periodic response %d\n", 1260 maple_unit_name(buf, sizeof(buf), u->port, u->subunit), 1261 u->u_dma_func, 1262 response); 1263 #endif 1264 /* 1265 * Some 3rd party devices sometimes 1266 * do not respond. 1267 */ 1268 if (maple_retry(sc, u, MAPLE_DMA_PERIODIC)) 1269 maple_detach_unit(sc, u); 1270 } 1271 /* XXX check unexpected conditions? */ 1272 1273 } else if (u->u_dma_stat == MAPLE_DMA_PROBE) { 1274 KASSERT(u->u_queuestat == MAPLE_QUEUE_NONE); 1275 u->u_dma_stat = MAPLE_DMA_IDLE; 1276 switch (response) { 1277 default: 1278 case MAPLE_RESPONSE_NONE: 1279 /* 1280 * Do not use maple_retry(), which conflicts 1281 * with probe structure. 1282 */ 1283 if (u->subunit != 0 && 1284 ++u->u_proberetry > MAPLE_PROBERETRY_MAX) { 1285 printf("%s: no response\n", 1286 maple_unit_name(buf, sizeof(buf), 1287 u->port, u->subunit)); 1288 } else { 1289 /* probe again */ 1290 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2 1291 printf("%s: queued to probe 4\n", 1292 maple_unit_name(buf, sizeof(buf), u->port, u->subunit)); 1293 #endif 1294 TAILQ_INSERT_TAIL(&sc->sc_probeq, u, 1295 u_q); 1296 u->u_queuestat = MAPLE_QUEUE_PROBE; 1297 } 1298 break; 1299 case MAPLE_RESPONSE_DEVINFO: 1300 /* check if the unit is changed */ 1301 maple_check_unit_change(sc, u); 1302 break; 1303 } 1304 1305 } else if (u->u_dma_stat == MAPLE_DMA_PING) { 1306 KASSERT(u->u_queuestat == MAPLE_QUEUE_NONE); 1307 u->u_dma_stat = MAPLE_DMA_IDLE; 1308 switch (response) { 1309 default: 1310 case MAPLE_RESPONSE_NONE: 1311 /* 1312 * Some 3rd party devices sometimes 1313 * do not respond. 1314 */ 1315 if (maple_retry(sc, u, MAPLE_DMA_PING)) { 1316 /* detach */ 1317 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1 1318 printf("%s: ping response %d\n", 1319 maple_unit_name(buf, sizeof(buf), u->port, 1320 u->subunit), 1321 response); 1322 #endif 1323 #ifdef MAPLE_MEMCARD_PING_HACK 1324 if (u->u_ping_stat 1325 == MAPLE_PING_MEMCARD) { 1326 /* 1327 * The unit claims itself to be 1328 * a Visual Memory, and has 1329 * never responded to GETCOND. 1330 * Try again using MINFO, in 1331 * case it is a poorly 1332 * implemented 3rd party card. 1333 */ 1334 #ifdef MAPLE_DEBUG 1335 printf("%s: switching ping method\n", 1336 maple_unit_name(buf, sizeof(buf), 1337 u->port, u->subunit)); 1338 #endif 1339 u->u_ping_stat 1340 = MAPLE_PING_MINFO; 1341 TAILQ_INSERT_TAIL(&sc->sc_pingq, 1342 u, u_q); 1343 u->u_queuestat 1344 = MAPLE_QUEUE_PING; 1345 } else 1346 #endif /* MAPLE_MEMCARD_PING_HACK */ 1347 maple_detach_unit(sc, u); 1348 } 1349 break; 1350 case MAPLE_RESPONSE_BADCMD: 1351 case MAPLE_RESPONSE_BADFUNC: 1352 case MAPLE_RESPONSE_DATATRF: 1353 TAILQ_INSERT_TAIL(&sc->sc_pingq, u, u_q); 1354 u->u_queuestat = MAPLE_QUEUE_PING; 1355 #ifdef MAPLE_MEMCARD_PING_HACK 1356 /* 1357 * If the unit responds to GETCOND, it is a 1358 * normal implementation. 1359 */ 1360 if (u->u_ping_stat == MAPLE_PING_MEMCARD) 1361 u->u_ping_stat = MAPLE_PING_NORMAL; 1362 #endif 1363 break; 1364 } 1365 1366 } else { 1367 /* 1368 * Note: Do not rely on the consistency of responses. 1369 */ 1370 1371 if (response == MAPLE_RESPONSE_NONE) { 1372 if (maple_retry(sc, u, u->u_dma_stat)) { 1373 /* detach */ 1374 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1 1375 printf("%s: command response %d\n", 1376 maple_unit_name(buf, sizeof(buf), u->port, 1377 u->subunit), 1378 response); 1379 #endif 1380 maple_detach_unit(sc, u); 1381 } 1382 continue; 1383 } 1384 1385 flags = (u->u_dma_stat == MAPLE_DMA_PCMD) ? 1386 MAPLE_FLAG_CMD_PERIODIC_TIMING : 0; 1387 u->u_dma_stat = MAPLE_DMA_IDLE; 1388 1389 func_code = u->u_dma_func; 1390 fn = &u->u_func[func_code]; 1391 if (fn->f_dev == NULL) { 1392 /* detached right now */ 1393 #ifdef MAPLE_DEBUG 1394 printf("%s: unknown function: function %d, response %d\n", 1395 maple_unit_name(buf, sizeof(buf), u->port, u->subunit), 1396 func_code, response); 1397 #endif 1398 continue; 1399 } 1400 if (fn->f_callback != NULL) { 1401 (*fn->f_callback)(fn->f_arg, 1402 (void *)u->u_rxbuf, len, flags); 1403 } 1404 } 1405 1406 /* 1407 * check for subunit change and schedule probing subunits 1408 */ 1409 if (u->subunit == 0 && response != MAPLE_RESPONSE_NONE && 1410 response != MAPLE_RESPONSE_AGAIN && 1411 ((int8_t *) u->u_rxbuf)[2] != sc->sc_port_unit_map[u->port]) 1412 maple_check_subunit_change(sc, u); 1413 } 1414 } 1415 1416 /* 1417 * Main Maple Bus thread 1418 */ 1419 static void 1420 maple_event_thread(void *arg) 1421 { 1422 struct maple_softc *sc = arg; 1423 unsigned cnt = 1; /* timing counter */ 1424 int s; 1425 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1 1426 int noreq = 0; 1427 #endif 1428 1429 #ifdef MAPLE_DEBUG 1430 printf("%s: forked event thread, pid %d\n", 1431 device_xname(sc->sc_dev), sc->event_thread->l_proc->p_pid); 1432 #endif 1433 1434 /* begin first DMA cycle */ 1435 maple_begin_txbuf(sc); 1436 1437 sc->sc_event = 1; 1438 1439 /* OK, continue booting system */ 1440 maple_polling = 0; 1441 config_pending_decr(sc->sc_dev); 1442 1443 for (;;) { 1444 /* 1445 * queue requests 1446 */ 1447 1448 /* queue async commands */ 1449 if (!TAILQ_EMPTY(&sc->sc_acmdq)) 1450 maple_queue_cmds(sc, &sc->sc_acmdq); 1451 1452 /* send defered periodic command */ 1453 if (!TAILQ_EMPTY(&sc->sc_periodicdeferq)) 1454 maple_send_defered_periodic(sc); 1455 1456 /* queue periodic commands */ 1457 if (sc->sc_event) { 1458 /* queue commands on periodic timing */ 1459 if (!TAILQ_EMPTY(&sc->sc_pcmdq)) 1460 maple_queue_cmds(sc, &sc->sc_pcmdq); 1461 1462 /* retry */ 1463 if (!SIMPLEQ_EMPTY(&sc->sc_retryq)) 1464 maple_queue_retry(sc); 1465 1466 if ((cnt & 31) == 0) /* XXX */ 1467 maple_unit_probe(sc); 1468 cnt++; 1469 1470 maple_send_periodic(sc); 1471 if ((cnt & 7) == 0) /* XXX */ 1472 maple_unit_ping(sc); 1473 1474 /* 1475 * schedule periodic event 1476 */ 1477 sc->sc_event = 0; 1478 callout_reset(&sc->maple_callout_ch, 1479 MAPLE_CALLOUT_TICKS, maple_callout, sc); 1480 } 1481 1482 if (maple_end_txbuf(sc)) { 1483 1484 /* 1485 * start DMA 1486 */ 1487 s = splmaple(); 1488 maple_start(sc); 1489 1490 /* 1491 * wait until DMA done 1492 */ 1493 if (tsleep(&sc->sc_dmadone, PWAIT, "mdma", hz) 1494 == EWOULDBLOCK) { 1495 /* was DDB active? */ 1496 printf("%s: timed out\n", 1497 device_xname(sc->sc_dev)); 1498 } 1499 splx(s); 1500 1501 /* 1502 * call handlers 1503 */ 1504 maple_check_responses(sc); 1505 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1 1506 noreq = 0; 1507 #endif 1508 } 1509 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1 1510 else { 1511 /* weird if occurs in succession */ 1512 #if MAPLE_DEBUG <= 2 1513 if (noreq) /* ignore first time */ 1514 #endif 1515 printf("%s: no request %d\n", 1516 device_xname(sc->sc_dev), noreq); 1517 noreq++; 1518 } 1519 #endif 1520 1521 /* 1522 * wait for an event 1523 */ 1524 s = splsoftclock(); 1525 if (TAILQ_EMPTY(&sc->sc_acmdq) && sc->sc_event == 0 && 1526 TAILQ_EMPTY(&sc->sc_periodicdeferq)) { 1527 if (tsleep(&sc->sc_event, PWAIT, "mslp", hz) 1528 == EWOULDBLOCK) { 1529 printf("%s: event timed out\n", 1530 device_xname(sc->sc_dev)); 1531 } 1532 1533 } 1534 splx(s); 1535 1536 } 1537 1538 #if 0 /* maple root device can't be detached */ 1539 kthread_exit(0); 1540 /* NOTREACHED */ 1541 #endif 1542 } 1543 1544 static int 1545 maple_intr(void *arg) 1546 { 1547 struct maple_softc *sc = arg; 1548 1549 wakeup(&sc->sc_dmadone); 1550 1551 return 1; 1552 } 1553 1554 static void 1555 maple_callout(void *ctx) 1556 { 1557 struct maple_softc *sc = ctx; 1558 1559 sc->sc_event = 1; /* mark as periodic event */ 1560 wakeup(&sc->sc_event); 1561 } 1562 1563 /* 1564 * Install callback handler (called by drivers) 1565 */ 1566 /* ARGSUSED */ 1567 void 1568 maple_set_callback(device_t dev, struct maple_unit *u, int func, 1569 void (*callback)(void *, struct maple_response *, int, int), void *arg) 1570 { 1571 #if 0 /* currently unused */ 1572 struct maple_softc *sc = device_private(dev); 1573 #endif 1574 struct maple_func *fn; 1575 1576 KASSERT(func >= 0 && func < MAPLE_NFUNC); 1577 1578 fn = &u->u_func[func]; 1579 1580 fn->f_callback = callback; 1581 fn->f_arg = arg; 1582 } 1583 1584 /* 1585 * Return function definition data (called by drivers) 1586 */ 1587 uint32_t 1588 maple_get_function_data(struct maple_devinfo *devinfo, int function_code) 1589 { 1590 int i, p = 0; 1591 uint32_t func; 1592 1593 func = be32toh(devinfo->di_func); 1594 for (i = 31; i >= 0; --i) 1595 if (func & MAPLE_FUNC(i)) { 1596 if (function_code == i) 1597 return be32toh(devinfo->di_function_data[p]); 1598 else 1599 if (++p >= 3) 1600 break; 1601 } 1602 1603 return 0; 1604 } 1605 1606 /* Generic maple device interface */ 1607 1608 int 1609 mapleopen(dev_t dev, int flag, int mode, struct lwp *l) 1610 { 1611 struct maple_softc *sc; 1612 1613 sc = device_lookup_private(&maple_cd, MAPLEBUSUNIT(dev)); 1614 if (sc == NULL) /* make sure it was attached */ 1615 return ENXIO; 1616 1617 if (MAPLEPORT(dev) >= MAPLE_PORTS) 1618 return ENXIO; 1619 1620 if (MAPLESUBUNIT(dev) >= MAPLE_SUBUNITS) 1621 return ENXIO; 1622 1623 if (!(sc->sc_port_units[MAPLEPORT(dev)] & (1 << MAPLESUBUNIT(dev)))) 1624 return ENXIO; 1625 1626 sc->sc_port_units_open[MAPLEPORT(dev)] |= 1 << MAPLESUBUNIT(dev); 1627 1628 return 0; 1629 } 1630 1631 int 1632 mapleclose(dev_t dev, int flag, int mode, struct lwp *l) 1633 { 1634 struct maple_softc *sc; 1635 1636 sc = device_lookup_private(&maple_cd, MAPLEBUSUNIT(dev)); 1637 1638 sc->sc_port_units_open[MAPLEPORT(dev)] &= ~(1 << MAPLESUBUNIT(dev)); 1639 1640 return 0; 1641 } 1642 1643 int 1644 maple_unit_ioctl(device_t dev, struct maple_unit *u, u_long cmd, 1645 void *data, int flag, struct lwp *l) 1646 { 1647 struct maple_softc *sc = device_private(dev); 1648 1649 if (!(sc->sc_port_units[u->port] & (1 << u->subunit))) 1650 return ENXIO; 1651 1652 switch(cmd) { 1653 case MAPLEIO_GDEVINFO: 1654 memcpy(data, &u->devinfo, sizeof(struct maple_devinfo)); 1655 break; 1656 default: 1657 return EPASSTHROUGH; 1658 } 1659 1660 return 0; 1661 } 1662 1663 int 1664 mapleioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l) 1665 { 1666 struct maple_softc *sc; 1667 struct maple_unit *u; 1668 1669 sc = device_lookup_private(&maple_cd, MAPLEBUSUNIT(dev)); 1670 u = &sc->sc_unit[MAPLEPORT(dev)][MAPLESUBUNIT(dev)]; 1671 1672 return maple_unit_ioctl(sc->sc_dev, u, cmd, data, flag, l); 1673 } 1674