1 /* $NetBSD: smc83c170.c,v 1.32 2000/05/26 00:14:41 tsutsui Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Device driver for the Standard Microsystems Corp. 83C170 42 * Ethernet PCI Integrated Controller (EPIC/100). 43 */ 44 45 #include "opt_inet.h" 46 #include "opt_ns.h" 47 #include "bpfilter.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/callout.h> 52 #include <sys/mbuf.h> 53 #include <sys/malloc.h> 54 #include <sys/kernel.h> 55 #include <sys/socket.h> 56 #include <sys/ioctl.h> 57 #include <sys/errno.h> 58 #include <sys/device.h> 59 60 #include <net/if.h> 61 #include <net/if_dl.h> 62 #include <net/if_media.h> 63 #include <net/if_ether.h> 64 65 #if NBPFILTER > 0 66 #include <net/bpf.h> 67 #endif 68 69 #ifdef INET 70 #include <netinet/in.h> 71 #include <netinet/if_inarp.h> 72 #endif 73 74 #ifdef NS 75 #include <netns/ns.h> 76 #include <netns/ns_if.h> 77 #endif 78 79 #include <machine/bus.h> 80 #include <machine/intr.h> 81 82 #include <dev/mii/miivar.h> 83 84 #include <dev/ic/smc83c170reg.h> 85 #include <dev/ic/smc83c170var.h> 86 87 void epic_start __P((struct ifnet *)); 88 void epic_watchdog __P((struct ifnet *)); 89 int epic_ioctl __P((struct ifnet *, u_long, caddr_t)); 90 91 void epic_shutdown __P((void *)); 92 93 void epic_reset __P((struct epic_softc *)); 94 int epic_init __P((struct epic_softc *)); 95 void epic_rxdrain __P((struct epic_softc *)); 96 void epic_stop __P((struct epic_softc *, int)); 97 int epic_add_rxbuf __P((struct epic_softc *, int)); 98 void epic_read_eeprom __P((struct epic_softc *, int, int, u_int16_t *)); 99 void epic_set_mchash __P((struct epic_softc *)); 100 void epic_fixup_clock_source __P((struct epic_softc *)); 101 int epic_mii_read __P((struct device *, int, int)); 102 void epic_mii_write __P((struct device *, int, int, int)); 103 int epic_mii_wait __P((struct epic_softc *, u_int32_t)); 104 void epic_tick __P((void *)); 105 106 void epic_statchg __P((struct device *)); 107 int epic_mediachange __P((struct ifnet *)); 108 void epic_mediastatus __P((struct ifnet *, struct ifmediareq *)); 109 110 #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \ 111 INTSTAT_TXC | INTSTAT_RXE | INTSTAT_RQE | INTSTAT_RCC) 112 113 int epic_copy_small = 0; 114 115 /* 116 * Attach an EPIC interface to the system. 117 */ 118 void 119 epic_attach(sc) 120 struct epic_softc *sc; 121 { 122 bus_space_tag_t st = sc->sc_st; 123 bus_space_handle_t sh = sc->sc_sh; 124 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 125 int i, rseg, error; 126 bus_dma_segment_t seg; 127 u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1]; 128 u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6]; 129 130 callout_init(&sc->sc_mii_callout); 131 132 /* 133 * Allocate the control data structures, and create and load the 134 * DMA map for it. 135 */ 136 if ((error = bus_dmamem_alloc(sc->sc_dmat, 137 sizeof(struct epic_control_data), NBPG, 0, &seg, 1, &rseg, 138 BUS_DMA_NOWAIT)) != 0) { 139 printf("%s: unable to allocate control data, error = %d\n", 140 sc->sc_dev.dv_xname, error); 141 goto fail_0; 142 } 143 144 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 145 sizeof(struct epic_control_data), (caddr_t *)&sc->sc_control_data, 146 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 147 printf("%s: unable to map control data, error = %d\n", 148 sc->sc_dev.dv_xname, error); 149 goto fail_1; 150 } 151 152 if ((error = bus_dmamap_create(sc->sc_dmat, 153 sizeof(struct epic_control_data), 1, 154 sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT, 155 &sc->sc_cddmamap)) != 0) { 156 printf("%s: unable to create control data DMA map, " 157 "error = %d\n", sc->sc_dev.dv_xname, error); 158 goto fail_2; 159 } 160 161 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 162 sc->sc_control_data, sizeof(struct epic_control_data), NULL, 163 BUS_DMA_NOWAIT)) != 0) { 164 printf("%s: unable to load control data DMA map, error = %d\n", 165 sc->sc_dev.dv_xname, error); 166 goto fail_3; 167 } 168 169 /* 170 * Create the transmit buffer DMA maps. 171 */ 172 for (i = 0; i < EPIC_NTXDESC; i++) { 173 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 174 EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 175 &EPIC_DSTX(sc, i)->ds_dmamap)) != 0) { 176 printf("%s: unable to create tx DMA map %d, " 177 "error = %d\n", sc->sc_dev.dv_xname, i, error); 178 goto fail_4; 179 } 180 } 181 182 /* 183 * Create the recieve buffer DMA maps. 184 */ 185 for (i = 0; i < EPIC_NRXDESC; i++) { 186 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 187 MCLBYTES, 0, BUS_DMA_NOWAIT, 188 &EPIC_DSRX(sc, i)->ds_dmamap)) != 0) { 189 printf("%s: unable to create rx DMA map %d, " 190 "error = %d\n", sc->sc_dev.dv_xname, i, error); 191 goto fail_5; 192 } 193 EPIC_DSRX(sc, i)->ds_mbuf = NULL; 194 } 195 196 197 /* 198 * Bring the chip out of low-power mode and reset it to a known state. 199 */ 200 bus_space_write_4(st, sh, EPIC_GENCTL, 0); 201 epic_reset(sc); 202 203 /* 204 * Read the Ethernet address from the EEPROM. 205 */ 206 epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea); 207 for (i = 0; i < sizeof(myea)/ sizeof(myea[0]); i++) { 208 enaddr[i * 2] = myea[i] & 0xff; 209 enaddr[i * 2 + 1] = myea[i] >> 8; 210 } 211 212 /* 213 * ...and the device name. 214 */ 215 epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])), 216 mydevname); 217 for (i = 0; i < sizeof(mydevname) / sizeof(mydevname[0]); i++) { 218 devname[i * 2] = mydevname[i] & 0xff; 219 devname[i * 2 + 1] = mydevname[i] >> 8; 220 } 221 222 devname[sizeof(mydevname)] = '\0'; 223 for (i = sizeof(mydevname) - 1; i >= 0; i--) { 224 if (devname[i] == ' ') 225 devname[i] = '\0'; 226 else 227 break; 228 } 229 230 printf("%s: %s, Ethernet address %s\n", sc->sc_dev.dv_xname, 231 devname, ether_sprintf(enaddr)); 232 233 /* 234 * Initialize our media structures and probe the MII. 235 */ 236 sc->sc_mii.mii_ifp = ifp; 237 sc->sc_mii.mii_readreg = epic_mii_read; 238 sc->sc_mii.mii_writereg = epic_mii_write; 239 sc->sc_mii.mii_statchg = epic_statchg; 240 ifmedia_init(&sc->sc_mii.mii_media, 0, epic_mediachange, 241 epic_mediastatus); 242 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 243 MII_OFFSET_ANY, 0); 244 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 245 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 246 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 247 } else 248 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 249 250 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 251 ifp->if_softc = sc; 252 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 253 ifp->if_ioctl = epic_ioctl; 254 ifp->if_start = epic_start; 255 ifp->if_watchdog = epic_watchdog; 256 257 /* 258 * Attach the interface. 259 */ 260 if_attach(ifp); 261 ether_ifattach(ifp, enaddr); 262 #if NBPFILTER > 0 263 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB, 264 sizeof(struct ether_header)); 265 #endif 266 267 /* 268 * Make sure the interface is shutdown during reboot. 269 */ 270 sc->sc_sdhook = shutdownhook_establish(epic_shutdown, sc); 271 if (sc->sc_sdhook == NULL) 272 printf("%s: WARNING: unable to establish shutdown hook\n", 273 sc->sc_dev.dv_xname); 274 return; 275 276 /* 277 * Free any resources we've allocated during the failed attach 278 * attempt. Do this in reverse order and fall through. 279 */ 280 fail_5: 281 for (i = 0; i < EPIC_NRXDESC; i++) { 282 if (EPIC_DSRX(sc, i)->ds_dmamap != NULL) 283 bus_dmamap_destroy(sc->sc_dmat, 284 EPIC_DSRX(sc, i)->ds_dmamap); 285 } 286 fail_4: 287 for (i = 0; i < EPIC_NTXDESC; i++) { 288 if (EPIC_DSTX(sc, i)->ds_dmamap != NULL) 289 bus_dmamap_destroy(sc->sc_dmat, 290 EPIC_DSTX(sc, i)->ds_dmamap); 291 } 292 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 293 fail_3: 294 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 295 fail_2: 296 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data, 297 sizeof(struct epic_control_data)); 298 fail_1: 299 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 300 fail_0: 301 return; 302 } 303 304 /* 305 * Shutdown hook. Make sure the interface is stopped at reboot. 306 */ 307 void 308 epic_shutdown(arg) 309 void *arg; 310 { 311 struct epic_softc *sc = arg; 312 313 epic_stop(sc, 1); 314 } 315 316 /* 317 * Start packet transmission on the interface. 318 * [ifnet interface function] 319 */ 320 void 321 epic_start(ifp) 322 struct ifnet *ifp; 323 { 324 struct epic_softc *sc = ifp->if_softc; 325 struct mbuf *m0, *m; 326 struct epic_txdesc *txd; 327 struct epic_descsoft *ds; 328 struct epic_fraglist *fr; 329 bus_dmamap_t dmamap; 330 int error, firsttx, nexttx, opending, seg; 331 332 /* 333 * Remember the previous txpending and the first transmit 334 * descriptor we use. 335 */ 336 opending = sc->sc_txpending; 337 firsttx = EPIC_NEXTTX(sc->sc_txlast); 338 339 /* 340 * Loop through the send queue, setting up transmit descriptors 341 * until we drain the queue, or use up all available transmit 342 * descriptors. 343 */ 344 while (sc->sc_txpending < EPIC_NTXDESC) { 345 /* 346 * Grab a packet off the queue. 347 */ 348 IF_DEQUEUE(&ifp->if_snd, m0); 349 if (m0 == NULL) 350 break; 351 352 /* 353 * Get the last and next available transmit descriptor. 354 */ 355 nexttx = EPIC_NEXTTX(sc->sc_txlast); 356 txd = EPIC_CDTX(sc, nexttx); 357 fr = EPIC_CDFL(sc, nexttx); 358 ds = EPIC_DSTX(sc, nexttx); 359 dmamap = ds->ds_dmamap; 360 361 /* 362 * Load the DMA map. If this fails, the packet either 363 * didn't fit in the alloted number of frags, or we were 364 * short on resources. In this case, we'll copy and try 365 * again. 366 */ 367 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 368 BUS_DMA_NOWAIT) != 0) { 369 MGETHDR(m, M_DONTWAIT, MT_DATA); 370 if (m == NULL) { 371 printf("%s: unable to allocate Tx mbuf\n", 372 sc->sc_dev.dv_xname); 373 IF_PREPEND(&ifp->if_snd, m0); 374 break; 375 } 376 if (m0->m_pkthdr.len > MHLEN) { 377 MCLGET(m, M_DONTWAIT); 378 if ((m->m_flags & M_EXT) == 0) { 379 printf("%s: unable to allocate Tx " 380 "cluster\n", sc->sc_dev.dv_xname); 381 m_freem(m); 382 IF_PREPEND(&ifp->if_snd, m0); 383 break; 384 } 385 } 386 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 387 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 388 m_freem(m0); 389 m0 = m; 390 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 391 m0, BUS_DMA_NOWAIT); 392 if (error) { 393 printf("%s: unable to load Tx buffer, " 394 "error = %d\n", sc->sc_dev.dv_xname, error); 395 IF_PREPEND(&ifp->if_snd, m0); 396 break; 397 } 398 } 399 400 /* Initialize the fraglist. */ 401 fr->ef_nfrags = dmamap->dm_nsegs; 402 for (seg = 0; seg < dmamap->dm_nsegs; seg++) { 403 fr->ef_frags[seg].ef_addr = 404 dmamap->dm_segs[seg].ds_addr; 405 fr->ef_frags[seg].ef_length = 406 dmamap->dm_segs[seg].ds_len; 407 } 408 409 EPIC_CDFLSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE); 410 411 /* Sync the DMA map. */ 412 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 413 BUS_DMASYNC_PREWRITE); 414 415 /* 416 * Store a pointer to the packet so we can free it later. 417 */ 418 ds->ds_mbuf = m0; 419 420 /* 421 * Fill in the transmit descriptor. The EPIC doesn't 422 * auto-pad, so we have to do this ourselves. 423 */ 424 txd->et_control = ET_TXCTL_LASTDESC | ET_TXCTL_FRAGLIST; 425 txd->et_txlength = max(m0->m_pkthdr.len, 426 ETHER_MIN_LEN - ETHER_CRC_LEN); 427 428 /* 429 * If this is the first descriptor we're enqueueing, 430 * don't give it to the EPIC yet. That could cause 431 * a race condition. We'll do it below. 432 */ 433 if (nexttx == firsttx) 434 txd->et_txstatus = 0; 435 else 436 txd->et_txstatus = ET_TXSTAT_OWNER; 437 438 EPIC_CDTXSYNC(sc, nexttx, 439 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 440 441 /* Advance the tx pointer. */ 442 sc->sc_txpending++; 443 sc->sc_txlast = nexttx; 444 445 #if NBPFILTER > 0 446 /* 447 * Pass the packet to any BPF listeners. 448 */ 449 if (ifp->if_bpf) 450 bpf_mtap(ifp->if_bpf, m0); 451 #endif 452 } 453 454 if (sc->sc_txpending == EPIC_NTXDESC) { 455 /* No more slots left; notify upper layer. */ 456 ifp->if_flags |= IFF_OACTIVE; 457 } 458 459 if (sc->sc_txpending != opending) { 460 /* 461 * We enqueued packets. If the transmitter was idle, 462 * reset the txdirty pointer. 463 */ 464 if (opending == 0) 465 sc->sc_txdirty = firsttx; 466 467 /* 468 * Cause a transmit interrupt to happen on the 469 * last packet we enqueued. 470 */ 471 EPIC_CDTX(sc, sc->sc_txlast)->et_control |= ET_TXCTL_IAF; 472 EPIC_CDTXSYNC(sc, sc->sc_txlast, 473 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 474 475 /* 476 * The entire packet chain is set up. Give the 477 * first descriptor to the EPIC now. 478 */ 479 EPIC_CDTX(sc, firsttx)->et_txstatus = ET_TXSTAT_OWNER; 480 EPIC_CDTXSYNC(sc, firsttx, 481 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 482 483 /* Start the transmitter. */ 484 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND, 485 COMMAND_TXQUEUED); 486 487 /* Set a watchdog timer in case the chip flakes out. */ 488 ifp->if_timer = 5; 489 } 490 } 491 492 /* 493 * Watchdog timer handler. 494 * [ifnet interface function] 495 */ 496 void 497 epic_watchdog(ifp) 498 struct ifnet *ifp; 499 { 500 struct epic_softc *sc = ifp->if_softc; 501 502 printf("%s: device timeout\n", sc->sc_dev.dv_xname); 503 ifp->if_oerrors++; 504 505 (void) epic_init(sc); 506 } 507 508 /* 509 * Handle control requests from the operator. 510 * [ifnet interface function] 511 */ 512 int 513 epic_ioctl(ifp, cmd, data) 514 struct ifnet *ifp; 515 u_long cmd; 516 caddr_t data; 517 { 518 struct epic_softc *sc = ifp->if_softc; 519 struct ifreq *ifr = (struct ifreq *)data; 520 struct ifaddr *ifa = (struct ifaddr *)data; 521 int s, error = 0; 522 523 s = splnet(); 524 525 switch (cmd) { 526 case SIOCSIFADDR: 527 ifp->if_flags |= IFF_UP; 528 529 switch (ifa->ifa_addr->sa_family) { 530 #ifdef INET 531 case AF_INET: 532 if ((error = epic_init(sc)) != 0) 533 break; 534 arp_ifinit(ifp, ifa); 535 break; 536 #endif /* INET */ 537 #ifdef NS 538 case AF_NS: 539 { 540 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr; 541 542 if (ns_nullhost(*ina)) 543 ina->x_host = *(union ns_host *) 544 LLADDR(ifp->if_sadl); 545 else 546 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl), 547 ifp->if_addrlen); 548 /* Set new address. */ 549 error = epic_init(sc); 550 break; 551 } 552 #endif /* NS */ 553 default: 554 error = epic_init(sc); 555 break; 556 } 557 break; 558 559 case SIOCSIFMTU: 560 if (ifr->ifr_mtu > ETHERMTU) 561 error = EINVAL; 562 else 563 ifp->if_mtu = ifr->ifr_mtu; 564 break; 565 566 case SIOCSIFFLAGS: 567 if ((ifp->if_flags & IFF_UP) == 0 && 568 (ifp->if_flags & IFF_RUNNING) != 0) { 569 /* 570 * If interface is marked down and it is running, then 571 * stop it. 572 */ 573 epic_stop(sc, 1); 574 } else if ((ifp->if_flags & IFF_UP) != 0 && 575 (ifp->if_flags & IFF_RUNNING) == 0) { 576 /* 577 * If interfase it marked up and it is stopped, then 578 * start it. 579 */ 580 error = epic_init(sc); 581 } else if ((ifp->if_flags & IFF_UP) != 0) { 582 /* 583 * Reset the interface to pick up changes in any other 584 * flags that affect the hardware state. 585 */ 586 error = epic_init(sc); 587 } 588 break; 589 590 case SIOCADDMULTI: 591 case SIOCDELMULTI: 592 error = (cmd == SIOCADDMULTI) ? 593 ether_addmulti(ifr, &sc->sc_ethercom) : 594 ether_delmulti(ifr, &sc->sc_ethercom); 595 596 if (error == ENETRESET) { 597 /* 598 * Multicast list has changed; set the hardware filter 599 * accordingly. Update our idea of the current media; 600 * epic_set_mchash() needs to know what it is. 601 */ 602 mii_pollstat(&sc->sc_mii); 603 epic_set_mchash(sc); 604 error = 0; 605 } 606 break; 607 608 case SIOCSIFMEDIA: 609 case SIOCGIFMEDIA: 610 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 611 break; 612 613 default: 614 error = EINVAL; 615 break; 616 } 617 618 splx(s); 619 return (error); 620 } 621 622 /* 623 * Interrupt handler. 624 */ 625 int 626 epic_intr(arg) 627 void *arg; 628 { 629 struct epic_softc *sc = arg; 630 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 631 struct ether_header *eh; 632 struct epic_rxdesc *rxd; 633 struct epic_txdesc *txd; 634 struct epic_descsoft *ds; 635 struct mbuf *m; 636 u_int32_t intstat; 637 int i, len, claimed = 0; 638 639 top: 640 /* 641 * Get the interrupt status from the EPIC. 642 */ 643 intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT); 644 if ((intstat & INTSTAT_INT_ACTV) == 0) 645 return (claimed); 646 647 claimed = 1; 648 649 /* 650 * Acknowledge the interrupt. 651 */ 652 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT, 653 intstat & INTMASK); 654 655 /* 656 * Check for receive interrupts. 657 */ 658 if (intstat & (INTSTAT_RCC | INTSTAT_RXE | INTSTAT_RQE)) { 659 for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) { 660 rxd = EPIC_CDRX(sc, i); 661 ds = EPIC_DSRX(sc, i); 662 663 EPIC_CDRXSYNC(sc, i, 664 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 665 666 if (rxd->er_rxstatus & ER_RXSTAT_OWNER) { 667 /* 668 * We have processed all of the 669 * receive buffers. 670 */ 671 break; 672 } 673 674 /* 675 * Make sure the packet arrived intact. If an error 676 * occurred, update stats and reset the descriptor. 677 * The buffer will be reused the next time the 678 * descriptor comes up in the ring. 679 */ 680 if ((rxd->er_rxstatus & ER_RXSTAT_PKTINTACT) == 0) { 681 if (rxd->er_rxstatus & ER_RXSTAT_CRCERROR) 682 printf("%s: CRC error\n", 683 sc->sc_dev.dv_xname); 684 if (rxd->er_rxstatus & ER_RXSTAT_ALIGNERROR) 685 printf("%s: alignment error\n", 686 sc->sc_dev.dv_xname); 687 ifp->if_ierrors++; 688 EPIC_INIT_RXDESC(sc, i); 689 continue; 690 } 691 692 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 693 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 694 695 /* 696 * The EPIC includes the CRC with every packet; 697 * trim it. 698 */ 699 len = rxd->er_rxlength - ETHER_CRC_LEN; 700 701 if (len < sizeof(struct ether_header)) { 702 /* 703 * Runt packet; drop it now. 704 */ 705 ifp->if_ierrors++; 706 EPIC_INIT_RXDESC(sc, i); 707 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 708 ds->ds_dmamap->dm_mapsize, 709 BUS_DMASYNC_PREREAD); 710 continue; 711 } 712 713 /* 714 * If the packet is small enough to fit in a 715 * single header mbuf, allocate one and copy 716 * the data into it. This greatly reduces 717 * memory consumption when we receive lots 718 * of small packets. 719 * 720 * Otherwise, we add a new buffer to the receive 721 * chain. If this fails, we drop the packet and 722 * recycle the old buffer. 723 */ 724 if (epic_copy_small != 0 && len <= MHLEN) { 725 MGETHDR(m, M_DONTWAIT, MT_DATA); 726 if (m == NULL) 727 goto dropit; 728 memcpy(mtod(m, caddr_t), 729 mtod(ds->ds_mbuf, caddr_t), len); 730 EPIC_INIT_RXDESC(sc, i); 731 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 732 ds->ds_dmamap->dm_mapsize, 733 BUS_DMASYNC_PREREAD); 734 } else { 735 m = ds->ds_mbuf; 736 if (epic_add_rxbuf(sc, i) != 0) { 737 dropit: 738 ifp->if_ierrors++; 739 EPIC_INIT_RXDESC(sc, i); 740 bus_dmamap_sync(sc->sc_dmat, 741 ds->ds_dmamap, 0, 742 ds->ds_dmamap->dm_mapsize, 743 BUS_DMASYNC_PREREAD); 744 continue; 745 } 746 } 747 748 m->m_pkthdr.rcvif = ifp; 749 m->m_pkthdr.len = m->m_len = len; 750 eh = mtod(m, struct ether_header *); 751 752 #if NBPFILTER > 0 753 /* 754 * Pass this up to any BPF listeners, but only 755 * pass it up the stack if its for us. 756 */ 757 if (ifp->if_bpf) { 758 bpf_mtap(ifp->if_bpf, m); 759 if ((ifp->if_flags & IFF_PROMISC) != 0 && 760 memcmp(LLADDR(ifp->if_sadl), 761 eh->ether_dhost, 762 ETHER_ADDR_LEN) != 0 && 763 ETHER_IS_MULTICAST(eh->ether_dhost) == 0) { 764 m_freem(m); 765 continue; 766 } 767 } 768 #endif /* NPBFILTER > 0 */ 769 770 /* Pass it on. */ 771 (*ifp->if_input)(ifp, m); 772 ifp->if_ipackets++; 773 } 774 775 /* Update the recieve pointer. */ 776 sc->sc_rxptr = i; 777 778 /* 779 * Check for receive queue underflow. 780 */ 781 if (intstat & INTSTAT_RQE) { 782 printf("%s: receiver queue empty\n", 783 sc->sc_dev.dv_xname); 784 /* 785 * Ring is already built; just restart the 786 * receiver. 787 */ 788 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR, 789 EPIC_CDRXADDR(sc, sc->sc_rxptr)); 790 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND, 791 COMMAND_RXQUEUED | COMMAND_START_RX); 792 } 793 } 794 795 /* 796 * Check for transmission complete interrupts. 797 */ 798 if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) { 799 ifp->if_flags &= ~IFF_OACTIVE; 800 for (i = sc->sc_txdirty; sc->sc_txpending != 0; 801 i = EPIC_NEXTTX(i), sc->sc_txpending--) { 802 txd = EPIC_CDTX(sc, i); 803 ds = EPIC_DSTX(sc, i); 804 805 EPIC_CDTXSYNC(sc, i, 806 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 807 808 if (txd->et_txstatus & ET_TXSTAT_OWNER) 809 break; 810 811 EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE); 812 813 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 814 0, ds->ds_dmamap->dm_mapsize, 815 BUS_DMASYNC_POSTWRITE); 816 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 817 m_freem(ds->ds_mbuf); 818 ds->ds_mbuf = NULL; 819 820 /* 821 * Check for errors and collisions. 822 */ 823 if ((txd->et_txstatus & ET_TXSTAT_PACKETTX) == 0) 824 ifp->if_oerrors++; 825 else 826 ifp->if_opackets++; 827 ifp->if_collisions += 828 TXSTAT_COLLISIONS(txd->et_txstatus); 829 if (txd->et_txstatus & ET_TXSTAT_CARSENSELOST) 830 printf("%s: lost carrier\n", 831 sc->sc_dev.dv_xname); 832 } 833 834 /* Update the dirty transmit buffer pointer. */ 835 sc->sc_txdirty = i; 836 837 /* 838 * Cancel the watchdog timer if there are no pending 839 * transmissions. 840 */ 841 if (sc->sc_txpending == 0) 842 ifp->if_timer = 0; 843 844 /* 845 * Kick the transmitter after a DMA underrun. 846 */ 847 if (intstat & INTSTAT_TXU) { 848 printf("%s: transmit underrun\n", sc->sc_dev.dv_xname); 849 bus_space_write_4(sc->sc_st, sc->sc_sh, 850 EPIC_COMMAND, COMMAND_TXUGO); 851 if (sc->sc_txpending) 852 bus_space_write_4(sc->sc_st, sc->sc_sh, 853 EPIC_COMMAND, COMMAND_TXQUEUED); 854 } 855 856 /* 857 * Try to get more packets going. 858 */ 859 epic_start(ifp); 860 } 861 862 /* 863 * Check for fatal interrupts. 864 */ 865 if (intstat & INTSTAT_FATAL_INT) { 866 if (intstat & INTSTAT_PTA) 867 printf("%s: PCI target abort error\n", 868 sc->sc_dev.dv_xname); 869 else if (intstat & INTSTAT_PMA) 870 printf("%s: PCI master abort error\n", 871 sc->sc_dev.dv_xname); 872 else if (intstat & INTSTAT_APE) 873 printf("%s: PCI address parity error\n", 874 sc->sc_dev.dv_xname); 875 else if (intstat & INTSTAT_DPE) 876 printf("%s: PCI data parity error\n", 877 sc->sc_dev.dv_xname); 878 else 879 printf("%s: unknown fatal error\n", 880 sc->sc_dev.dv_xname); 881 (void) epic_init(sc); 882 } 883 884 /* 885 * Check for more interrupts. 886 */ 887 goto top; 888 } 889 890 /* 891 * One second timer, used to tick the MII. 892 */ 893 void 894 epic_tick(arg) 895 void *arg; 896 { 897 struct epic_softc *sc = arg; 898 int s; 899 900 s = splnet(); 901 mii_tick(&sc->sc_mii); 902 splx(s); 903 904 callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc); 905 } 906 907 /* 908 * Fixup the clock source on the EPIC. 909 */ 910 void 911 epic_fixup_clock_source(sc) 912 struct epic_softc *sc; 913 { 914 int i; 915 916 /* 917 * According to SMC Application Note 7-15, the EPIC's clock 918 * source is incorrect following a reset. This manifests itself 919 * as failure to recognize when host software has written to 920 * a register on the EPIC. The appnote recommends issuing at 921 * least 16 consecutive writes to the CLOCK TEST bit to correctly 922 * configure the clock source. 923 */ 924 for (i = 0; i < 16; i++) 925 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TEST, 926 TEST_CLOCKTEST); 927 } 928 929 /* 930 * Perform a soft reset on the EPIC. 931 */ 932 void 933 epic_reset(sc) 934 struct epic_softc *sc; 935 { 936 937 epic_fixup_clock_source(sc); 938 939 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0); 940 delay(100); 941 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET); 942 delay(100); 943 944 epic_fixup_clock_source(sc); 945 } 946 947 /* 948 * Initialize the interface. Must be called at splnet(). 949 */ 950 int 951 epic_init(sc) 952 struct epic_softc *sc; 953 { 954 bus_space_tag_t st = sc->sc_st; 955 bus_space_handle_t sh = sc->sc_sh; 956 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 957 u_int8_t *enaddr = LLADDR(ifp->if_sadl); 958 struct epic_txdesc *txd; 959 struct epic_descsoft *ds; 960 u_int32_t genctl, reg0; 961 int i, error = 0; 962 963 /* 964 * Cancel any pending I/O. 965 */ 966 epic_stop(sc, 0); 967 968 /* 969 * Reset the EPIC to a known state. 970 */ 971 epic_reset(sc); 972 973 /* 974 * Magical mystery initialization. 975 */ 976 bus_space_write_4(st, sh, EPIC_TXTEST, 0); 977 978 /* 979 * Initialize the EPIC genctl register: 980 * 981 * - 64 byte receive FIFO threshold 982 * - automatic advance to next receive frame 983 */ 984 genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY; 985 #if BYTE_ORDER == BIG_ENDIAN 986 genctl |= GENCTL_BIG_ENDIAN; 987 #endif 988 bus_space_write_4(st, sh, EPIC_GENCTL, genctl); 989 990 /* 991 * Reset the MII bus and PHY. 992 */ 993 reg0 = bus_space_read_4(st, sh, EPIC_NVCTL); 994 bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1); 995 bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER); 996 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY); 997 delay(100); 998 bus_space_write_4(st, sh, EPIC_GENCTL, genctl); 999 delay(100); 1000 bus_space_write_4(st, sh, EPIC_NVCTL, reg0); 1001 1002 /* 1003 * Initialize Ethernet address. 1004 */ 1005 reg0 = enaddr[1] << 8 | enaddr[0]; 1006 bus_space_write_4(st, sh, EPIC_LAN0, reg0); 1007 reg0 = enaddr[3] << 8 | enaddr[2]; 1008 bus_space_write_4(st, sh, EPIC_LAN1, reg0); 1009 reg0 = enaddr[5] << 8 | enaddr[4]; 1010 bus_space_write_4(st, sh, EPIC_LAN2, reg0); 1011 1012 /* 1013 * Initialize receive control. Remember the external buffer 1014 * size setting. 1015 */ 1016 reg0 = bus_space_read_4(st, sh, EPIC_RXCON) & 1017 (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0); 1018 reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST); 1019 if (ifp->if_flags & IFF_PROMISC) 1020 reg0 |= RXCON_PROMISCMODE; 1021 bus_space_write_4(st, sh, EPIC_RXCON, reg0); 1022 1023 /* Set the current media. */ 1024 mii_mediachg(&sc->sc_mii); 1025 1026 /* Set up the multicast hash table. */ 1027 epic_set_mchash(sc); 1028 1029 /* 1030 * Initialize the transmit descriptor ring. txlast is initialized 1031 * to the end of the list so that it will wrap around to the first 1032 * descriptor when the first packet is transmitted. 1033 */ 1034 for (i = 0; i < EPIC_NTXDESC; i++) { 1035 txd = EPIC_CDTX(sc, i); 1036 memset(txd, 0, sizeof(struct epic_txdesc)); 1037 txd->et_bufaddr = EPIC_CDFLADDR(sc, i); 1038 txd->et_nextdesc = EPIC_CDTXADDR(sc, EPIC_NEXTTX(i)); 1039 EPIC_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1040 } 1041 sc->sc_txpending = 0; 1042 sc->sc_txdirty = 0; 1043 sc->sc_txlast = EPIC_NTXDESC - 1; 1044 1045 /* 1046 * Initialize the receive descriptor ring. 1047 */ 1048 for (i = 0; i < EPIC_NRXDESC; i++) { 1049 ds = EPIC_DSRX(sc, i); 1050 if (ds->ds_mbuf == NULL) { 1051 if ((error = epic_add_rxbuf(sc, i)) != 0) { 1052 printf("%s: unable to allocate or map rx " 1053 "buffer %d error = %d\n", 1054 sc->sc_dev.dv_xname, i, error); 1055 /* 1056 * XXX Should attempt to run with fewer receive 1057 * XXX buffers instead of just failing. 1058 */ 1059 epic_rxdrain(sc); 1060 goto out; 1061 } 1062 } 1063 } 1064 sc->sc_rxptr = 0; 1065 1066 /* 1067 * Initialize the interrupt mask and enable interrupts. 1068 */ 1069 bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK); 1070 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA); 1071 1072 /* 1073 * Give the transmit and receive rings to the EPIC. 1074 */ 1075 bus_space_write_4(st, sh, EPIC_PTCDAR, 1076 EPIC_CDTXADDR(sc, EPIC_NEXTTX(sc->sc_txlast))); 1077 bus_space_write_4(st, sh, EPIC_PRCDAR, 1078 EPIC_CDRXADDR(sc, sc->sc_rxptr)); 1079 1080 /* 1081 * Set the EPIC in motion. 1082 */ 1083 bus_space_write_4(st, sh, EPIC_COMMAND, 1084 COMMAND_RXQUEUED | COMMAND_START_RX); 1085 1086 /* 1087 * ...all done! 1088 */ 1089 ifp->if_flags |= IFF_RUNNING; 1090 ifp->if_flags &= ~IFF_OACTIVE; 1091 1092 /* 1093 * Start the one second clock. 1094 */ 1095 callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc); 1096 1097 /* 1098 * Attempt to start output on the interface. 1099 */ 1100 epic_start(ifp); 1101 1102 out: 1103 if (error) 1104 printf("%s: interface not running\n", sc->sc_dev.dv_xname); 1105 return (error); 1106 } 1107 1108 /* 1109 * Drain the receive queue. 1110 */ 1111 void 1112 epic_rxdrain(sc) 1113 struct epic_softc *sc; 1114 { 1115 struct epic_descsoft *ds; 1116 int i; 1117 1118 for (i = 0; i < EPIC_NRXDESC; i++) { 1119 ds = EPIC_DSRX(sc, i); 1120 if (ds->ds_mbuf != NULL) { 1121 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1122 m_freem(ds->ds_mbuf); 1123 ds->ds_mbuf = NULL; 1124 } 1125 } 1126 } 1127 1128 /* 1129 * Stop transmission on the interface. 1130 */ 1131 void 1132 epic_stop(sc, drain) 1133 struct epic_softc *sc; 1134 int drain; 1135 { 1136 bus_space_tag_t st = sc->sc_st; 1137 bus_space_handle_t sh = sc->sc_sh; 1138 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1139 struct epic_descsoft *ds; 1140 u_int32_t reg; 1141 int i; 1142 1143 /* 1144 * Stop the one second clock. 1145 */ 1146 callout_stop(&sc->sc_mii_callout); 1147 1148 /* Down the MII. */ 1149 mii_down(&sc->sc_mii); 1150 1151 /* Paranoia... */ 1152 epic_fixup_clock_source(sc); 1153 1154 /* 1155 * Disable interrupts. 1156 */ 1157 reg = bus_space_read_4(st, sh, EPIC_GENCTL); 1158 bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA); 1159 bus_space_write_4(st, sh, EPIC_INTMASK, 0); 1160 1161 /* 1162 * Stop the DMA engine and take the receiver off-line. 1163 */ 1164 bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA | 1165 COMMAND_STOP_TDMA | COMMAND_STOP_RX); 1166 1167 /* 1168 * Release any queued transmit buffers. 1169 */ 1170 for (i = 0; i < EPIC_NTXDESC; i++) { 1171 ds = EPIC_DSTX(sc, i); 1172 if (ds->ds_mbuf != NULL) { 1173 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1174 m_freem(ds->ds_mbuf); 1175 ds->ds_mbuf = NULL; 1176 } 1177 } 1178 1179 if (drain) { 1180 /* 1181 * Release the receive buffers. 1182 */ 1183 epic_rxdrain(sc); 1184 } 1185 1186 /* 1187 * Mark the interface down and cancel the watchdog timer. 1188 */ 1189 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1190 ifp->if_timer = 0; 1191 } 1192 1193 /* 1194 * Read the EPIC Serial EEPROM. 1195 */ 1196 void 1197 epic_read_eeprom(sc, word, wordcnt, data) 1198 struct epic_softc *sc; 1199 int word, wordcnt; 1200 u_int16_t *data; 1201 { 1202 bus_space_tag_t st = sc->sc_st; 1203 bus_space_handle_t sh = sc->sc_sh; 1204 u_int16_t reg; 1205 int i, x; 1206 1207 #define EEPROM_WAIT_READY(st, sh) \ 1208 while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \ 1209 /* nothing */ 1210 1211 /* 1212 * Enable the EEPROM. 1213 */ 1214 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE); 1215 EEPROM_WAIT_READY(st, sh); 1216 1217 for (i = 0; i < wordcnt; i++) { 1218 /* Send CHIP SELECT for one clock tick. */ 1219 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS); 1220 EEPROM_WAIT_READY(st, sh); 1221 1222 /* Shift in the READ opcode. */ 1223 for (x = 3; x > 0; x--) { 1224 reg = EECTL_ENABLE|EECTL_EECS; 1225 if (EPIC_EEPROM_OPC_READ & (1 << (x - 1))) 1226 reg |= EECTL_EEDI; 1227 bus_space_write_4(st, sh, EPIC_EECTL, reg); 1228 EEPROM_WAIT_READY(st, sh); 1229 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK); 1230 EEPROM_WAIT_READY(st, sh); 1231 bus_space_write_4(st, sh, EPIC_EECTL, reg); 1232 EEPROM_WAIT_READY(st, sh); 1233 } 1234 1235 /* Shift in address. */ 1236 for (x = 6; x > 0; x--) { 1237 reg = EECTL_ENABLE|EECTL_EECS; 1238 if ((word + i) & (1 << (x - 1))) 1239 reg |= EECTL_EEDI; 1240 bus_space_write_4(st, sh, EPIC_EECTL, reg); 1241 EEPROM_WAIT_READY(st, sh); 1242 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK); 1243 EEPROM_WAIT_READY(st, sh); 1244 bus_space_write_4(st, sh, EPIC_EECTL, reg); 1245 EEPROM_WAIT_READY(st, sh); 1246 } 1247 1248 /* Shift out data. */ 1249 reg = EECTL_ENABLE|EECTL_EECS; 1250 data[i] = 0; 1251 for (x = 16; x > 0; x--) { 1252 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK); 1253 EEPROM_WAIT_READY(st, sh); 1254 if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO) 1255 data[i] |= (1 << (x - 1)); 1256 bus_space_write_4(st, sh, EPIC_EECTL, reg); 1257 EEPROM_WAIT_READY(st, sh); 1258 } 1259 1260 /* Clear CHIP SELECT. */ 1261 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE); 1262 EEPROM_WAIT_READY(st, sh); 1263 } 1264 1265 /* 1266 * Disable the EEPROM. 1267 */ 1268 bus_space_write_4(st, sh, EPIC_EECTL, 0); 1269 1270 #undef EEPROM_WAIT_READY 1271 } 1272 1273 /* 1274 * Add a receive buffer to the indicated descriptor. 1275 */ 1276 int 1277 epic_add_rxbuf(sc, idx) 1278 struct epic_softc *sc; 1279 int idx; 1280 { 1281 struct epic_descsoft *ds = EPIC_DSRX(sc, idx); 1282 struct mbuf *m; 1283 int error; 1284 1285 MGETHDR(m, M_DONTWAIT, MT_DATA); 1286 if (m == NULL) 1287 return (ENOBUFS); 1288 1289 MCLGET(m, M_DONTWAIT); 1290 if ((m->m_flags & M_EXT) == 0) { 1291 m_freem(m); 1292 return (ENOBUFS); 1293 } 1294 1295 if (ds->ds_mbuf != NULL) 1296 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1297 1298 ds->ds_mbuf = m; 1299 1300 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap, 1301 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT); 1302 if (error) { 1303 printf("%s: can't load rx DMA map %d, error = %d\n", 1304 sc->sc_dev.dv_xname, idx, error); 1305 panic("epic_add_rxbuf"); /* XXX */ 1306 } 1307 1308 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1309 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1310 1311 EPIC_INIT_RXDESC(sc, idx); 1312 1313 return (0); 1314 } 1315 1316 /* 1317 * Set the EPIC multicast hash table. 1318 * 1319 * NOTE: We rely on a recently-updated mii_media_active here! 1320 */ 1321 void 1322 epic_set_mchash(sc) 1323 struct epic_softc *sc; 1324 { 1325 struct ethercom *ec = &sc->sc_ethercom; 1326 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1327 struct ether_multi *enm; 1328 struct ether_multistep step; 1329 u_int32_t hash, mchash[4]; 1330 1331 /* 1332 * Set up the multicast address filter by passing all multicast 1333 * addresses through a CRC generator, and then using the low-order 1334 * 6 bits as an index into the 64 bit multicast hash table (only 1335 * the lower 16 bits of each 32 bit multicast hash register are 1336 * valid). The high order bits select the register, while the 1337 * rest of the bits select the bit within the register. 1338 */ 1339 1340 if (ifp->if_flags & IFF_PROMISC) 1341 goto allmulti; 1342 1343 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_10_T) { 1344 /* XXX hardware bug in 10Mbps mode. */ 1345 goto allmulti; 1346 } 1347 1348 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0; 1349 1350 ETHER_FIRST_MULTI(step, ec, enm); 1351 while (enm != NULL) { 1352 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1353 /* 1354 * We must listen to a range of multicast addresses. 1355 * For now, just accept all multicasts, rather than 1356 * trying to set only those filter bits needed to match 1357 * the range. (At this time, the only use of address 1358 * ranges is for IP multicast routing, for which the 1359 * range is big enough to require all bits set.) 1360 */ 1361 goto allmulti; 1362 } 1363 1364 hash = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) & 0x3f; 1365 1366 /* Set the corresponding bit in the hash table. */ 1367 mchash[hash >> 4] |= 1 << (hash & 0xf); 1368 1369 ETHER_NEXT_MULTI(step, enm); 1370 } 1371 1372 ifp->if_flags &= ~IFF_ALLMULTI; 1373 goto sethash; 1374 1375 allmulti: 1376 ifp->if_flags |= IFF_ALLMULTI; 1377 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff; 1378 1379 sethash: 1380 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]); 1381 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]); 1382 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]); 1383 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]); 1384 } 1385 1386 /* 1387 * Wait for the MII to become ready. 1388 */ 1389 int 1390 epic_mii_wait(sc, rw) 1391 struct epic_softc *sc; 1392 u_int32_t rw; 1393 { 1394 int i; 1395 1396 for (i = 0; i < 50; i++) { 1397 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL) & rw) 1398 == 0) 1399 break; 1400 delay(2); 1401 } 1402 if (i == 50) { 1403 printf("%s: MII timed out\n", sc->sc_dev.dv_xname); 1404 return (1); 1405 } 1406 1407 return (0); 1408 } 1409 1410 /* 1411 * Read from the MII. 1412 */ 1413 int 1414 epic_mii_read(self, phy, reg) 1415 struct device *self; 1416 int phy, reg; 1417 { 1418 struct epic_softc *sc = (struct epic_softc *)self; 1419 1420 if (epic_mii_wait(sc, MMCTL_WRITE)) 1421 return (0); 1422 1423 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL, 1424 MMCTL_ARG(phy, reg, MMCTL_READ)); 1425 1426 if (epic_mii_wait(sc, MMCTL_READ)) 1427 return (0); 1428 1429 return (bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA) & 1430 MMDATA_MASK); 1431 } 1432 1433 /* 1434 * Write to the MII. 1435 */ 1436 void 1437 epic_mii_write(self, phy, reg, val) 1438 struct device *self; 1439 int phy, reg, val; 1440 { 1441 struct epic_softc *sc = (struct epic_softc *)self; 1442 1443 if (epic_mii_wait(sc, MMCTL_WRITE)) 1444 return; 1445 1446 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA, val); 1447 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL, 1448 MMCTL_ARG(phy, reg, MMCTL_WRITE)); 1449 } 1450 1451 /* 1452 * Callback from PHY when media changes. 1453 */ 1454 void 1455 epic_statchg(self) 1456 struct device *self; 1457 { 1458 struct epic_softc *sc = (struct epic_softc *)self; 1459 u_int32_t txcon; 1460 1461 /* 1462 * Update loopback bits in TXCON to reflect duplex mode. 1463 */ 1464 txcon = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_TXCON); 1465 if (sc->sc_mii.mii_media_active & IFM_FDX) 1466 txcon |= (TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2); 1467 else 1468 txcon &= ~(TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2); 1469 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TXCON, txcon); 1470 1471 /* 1472 * There is a multicast filter bug in 10Mbps mode. Kick the 1473 * multicast filter in case the speed changed. 1474 */ 1475 epic_set_mchash(sc); 1476 } 1477 1478 /* 1479 * Callback from ifmedia to request current media status. 1480 */ 1481 void 1482 epic_mediastatus(ifp, ifmr) 1483 struct ifnet *ifp; 1484 struct ifmediareq *ifmr; 1485 { 1486 struct epic_softc *sc = ifp->if_softc; 1487 1488 mii_pollstat(&sc->sc_mii); 1489 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1490 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1491 } 1492 1493 /* 1494 * Callback from ifmedia to request new media setting. 1495 */ 1496 int 1497 epic_mediachange(ifp) 1498 struct ifnet *ifp; 1499 { 1500 struct epic_softc *sc = ifp->if_softc; 1501 1502 if (ifp->if_flags & IFF_UP) 1503 mii_mediachg(&sc->sc_mii); 1504 return (0); 1505 } 1506