1 /* $OpenBSD: smc83c170.c,v 1.16 2013/11/26 09:50:33 mpi Exp $ */ 2 /* $NetBSD: smc83c170.c,v 1.59 2005/02/27 00:27:02 perry Exp $ */ 3 4 /*- 5 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 10 * NASA Ames Research Center. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Device driver for the Standard Microsystems Corp. 83C170 36 * Ethernet PCI Integrated Controller (EPIC/100). 37 */ 38 39 #include "bpfilter.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/timeout.h> 44 #include <sys/mbuf.h> 45 #include <sys/malloc.h> 46 #include <sys/kernel.h> 47 #include <sys/socket.h> 48 #include <sys/ioctl.h> 49 #include <sys/errno.h> 50 #include <sys/device.h> 51 52 #include <net/if.h> 53 #include <net/if_dl.h> 54 55 #ifdef INET 56 #include <netinet/in.h> 57 #include <netinet/in_systm.h> 58 #include <netinet/ip.h> 59 #include <netinet/if_ether.h> 60 #endif 61 62 #include <net/if_media.h> 63 64 #if NBPFILTER > 0 65 #include <net/bpf.h> 66 #endif 67 68 #include <machine/bus.h> 69 #include <machine/intr.h> 70 71 #include <dev/mii/miivar.h> 72 #include <dev/mii/lxtphyreg.h> 73 74 #include <dev/ic/smc83c170reg.h> 75 #include <dev/ic/smc83c170var.h> 76 77 void epic_start(struct ifnet *); 78 void epic_watchdog(struct ifnet *); 79 int epic_ioctl(struct ifnet *, u_long, caddr_t); 80 int epic_init(struct ifnet *); 81 void epic_stop(struct ifnet *, int); 82 83 void epic_reset(struct epic_softc *); 84 void epic_rxdrain(struct epic_softc *); 85 int epic_add_rxbuf(struct epic_softc *, int); 86 void epic_read_eeprom(struct epic_softc *, int, int, u_int16_t *); 87 void epic_set_mchash(struct epic_softc *); 88 void epic_fixup_clock_source(struct epic_softc *); 89 int epic_mii_read(struct device *, int, int); 90 void epic_mii_write(struct device *, int, int, int); 91 int epic_mii_wait(struct epic_softc *, u_int32_t); 92 void epic_tick(void *); 93 94 void epic_statchg(struct device *); 95 int epic_mediachange(struct ifnet *); 96 void epic_mediastatus(struct ifnet *, struct ifmediareq *); 97 98 struct cfdriver epic_cd = { 99 0, "epic", DV_IFNET 100 }; 101 102 #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \ 103 INTSTAT_TXC | INTSTAT_RXE | INTSTAT_RQE | INTSTAT_RCC) 104 105 int epic_copy_small = 0; 106 107 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) 108 109 /* 110 * Attach an EPIC interface to the system. 111 */ 112 void 113 epic_attach(struct epic_softc *sc, const char *intrstr) 114 { 115 bus_space_tag_t st = sc->sc_st; 116 bus_space_handle_t sh = sc->sc_sh; 117 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 118 int rseg, error, miiflags; 119 u_int i; 120 bus_dma_segment_t seg; 121 u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1]; 122 u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6]; 123 char *nullbuf; 124 125 timeout_set(&sc->sc_mii_timeout, epic_tick, sc); 126 127 /* 128 * Allocate the control data structures, and create and load the 129 * DMA map for it. 130 */ 131 if ((error = bus_dmamem_alloc(sc->sc_dmat, 132 sizeof(struct epic_control_data) + ETHER_PAD_LEN, PAGE_SIZE, 0, 133 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 134 printf(": unable to allocate control data, error = %d\n", 135 error); 136 goto fail_0; 137 } 138 139 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 140 sizeof(struct epic_control_data) + ETHER_PAD_LEN, 141 (caddr_t *)&sc->sc_control_data, 142 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 143 printf(": unable to map control data, error = %d\n", error); 144 goto fail_1; 145 } 146 nullbuf = 147 (char *)sc->sc_control_data + sizeof(struct epic_control_data); 148 memset(nullbuf, 0, ETHER_PAD_LEN); 149 150 if ((error = bus_dmamap_create(sc->sc_dmat, 151 sizeof(struct epic_control_data), 1, 152 sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT, 153 &sc->sc_cddmamap)) != 0) { 154 printf(": unable to create control data DMA map, error = %d\n", 155 error); 156 goto fail_2; 157 } 158 159 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 160 sc->sc_control_data, sizeof(struct epic_control_data), NULL, 161 BUS_DMA_NOWAIT)) != 0) { 162 printf(": unable to load control data DMA map, error = %d\n", 163 error); 164 goto fail_3; 165 } 166 167 /* 168 * Create the transmit buffer DMA maps. 169 */ 170 for (i = 0; i < EPIC_NTXDESC; i++) { 171 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 172 EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 173 &EPIC_DSTX(sc, i)->ds_dmamap)) != 0) { 174 printf(": unable to create tx DMA map %d, error = %d\n", 175 i, error); 176 goto fail_4; 177 } 178 } 179 180 /* 181 * Create the receive buffer DMA maps. 182 */ 183 for (i = 0; i < EPIC_NRXDESC; i++) { 184 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 185 MCLBYTES, 0, BUS_DMA_NOWAIT, 186 &EPIC_DSRX(sc, i)->ds_dmamap)) != 0) { 187 printf(": unable to create rx DMA map %d, error = %d\n", 188 i, error); 189 goto fail_5; 190 } 191 EPIC_DSRX(sc, i)->ds_mbuf = NULL; 192 } 193 194 /* 195 * create and map the pad buffer 196 */ 197 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1, 198 ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) { 199 printf(": unable to create pad buffer DMA map, error = %d\n", 200 error); 201 goto fail_5; 202 } 203 204 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap, 205 nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) { 206 printf(": unable to load pad buffer DMA map, error = %d\n", 207 error); 208 goto fail_6; 209 } 210 bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN, 211 BUS_DMASYNC_PREWRITE); 212 213 /* 214 * Bring the chip out of low-power mode and reset it to a known state. 215 */ 216 bus_space_write_4(st, sh, EPIC_GENCTL, 0); 217 epic_reset(sc); 218 219 /* 220 * Read the Ethernet address from the EEPROM. 221 */ 222 epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea); 223 for (i = 0; i < sizeof(myea)/ sizeof(myea[0]); i++) { 224 enaddr[i * 2] = myea[i] & 0xff; 225 enaddr[i * 2 + 1] = myea[i] >> 8; 226 } 227 228 /* 229 * ...and the device name. 230 */ 231 epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])), 232 mydevname); 233 for (i = 0; i < sizeof(mydevname) / sizeof(mydevname[0]); i++) { 234 devname[i * 2] = mydevname[i] & 0xff; 235 devname[i * 2 + 1] = mydevname[i] >> 8; 236 } 237 238 devname[sizeof(devname) - 1] = ' '; 239 for (i = sizeof(devname) - 1; devname[i] == ' '; i--) { 240 devname[i] = '\0'; 241 if (i == 0) 242 break; 243 } 244 245 printf(", %s : %s, address %s\n", devname, intrstr, 246 ether_sprintf(enaddr)); 247 248 miiflags = 0; 249 if (sc->sc_hwflags & EPIC_HAS_MII_FIBER) 250 miiflags |= MIIF_HAVEFIBER; 251 252 /* 253 * Initialize our media structures and probe the MII. 254 */ 255 sc->sc_mii.mii_ifp = ifp; 256 sc->sc_mii.mii_readreg = epic_mii_read; 257 sc->sc_mii.mii_writereg = epic_mii_write; 258 sc->sc_mii.mii_statchg = epic_statchg; 259 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, epic_mediachange, 260 epic_mediastatus); 261 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 262 MII_OFFSET_ANY, miiflags); 263 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 264 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 265 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 266 } else 267 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 268 269 if (sc->sc_hwflags & EPIC_HAS_BNC) { 270 /* use the next free media instance */ 271 sc->sc_serinst = sc->sc_mii.mii_instance++; 272 ifmedia_add(&sc->sc_mii.mii_media, 273 IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0, 274 sc->sc_serinst), 275 0, NULL); 276 } else 277 sc->sc_serinst = -1; 278 279 bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 280 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 281 ifp->if_softc = sc; 282 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 283 ifp->if_ioctl = epic_ioctl; 284 ifp->if_start = epic_start; 285 ifp->if_watchdog = epic_watchdog; 286 IFQ_SET_MAXLEN(&ifp->if_snd, EPIC_NTXDESC - 1); 287 IFQ_SET_READY(&ifp->if_snd); 288 289 ifp->if_capabilities = IFCAP_VLAN_MTU; 290 291 /* 292 * Attach the interface. 293 */ 294 if_attach(ifp); 295 ether_ifattach(ifp); 296 return; 297 298 /* 299 * Free any resources we've allocated during the failed attach 300 * attempt. Do this in reverse order and fall through. 301 */ 302 fail_6: 303 bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap); 304 fail_5: 305 for (i = 0; i < EPIC_NRXDESC; i++) { 306 if (EPIC_DSRX(sc, i)->ds_dmamap != NULL) 307 bus_dmamap_destroy(sc->sc_dmat, 308 EPIC_DSRX(sc, i)->ds_dmamap); 309 } 310 fail_4: 311 for (i = 0; i < EPIC_NTXDESC; i++) { 312 if (EPIC_DSTX(sc, i)->ds_dmamap != NULL) 313 bus_dmamap_destroy(sc->sc_dmat, 314 EPIC_DSTX(sc, i)->ds_dmamap); 315 } 316 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 317 fail_3: 318 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 319 fail_2: 320 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data, 321 sizeof(struct epic_control_data)); 322 fail_1: 323 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 324 fail_0: 325 return; 326 } 327 328 /* 329 * Start packet transmission on the interface. 330 * [ifnet interface function] 331 */ 332 void 333 epic_start(struct ifnet *ifp) 334 { 335 struct epic_softc *sc = ifp->if_softc; 336 struct mbuf *m0, *m; 337 struct epic_txdesc *txd; 338 struct epic_descsoft *ds; 339 struct epic_fraglist *fr; 340 bus_dmamap_t dmamap; 341 int error, firsttx, nexttx, opending, seg; 342 u_int len; 343 344 /* 345 * Remember the previous txpending and the first transmit 346 * descriptor we use. 347 */ 348 opending = sc->sc_txpending; 349 firsttx = EPIC_NEXTTX(sc->sc_txlast); 350 351 /* 352 * Loop through the send queue, setting up transmit descriptors 353 * until we drain the queue, or use up all available transmit 354 * descriptors. 355 */ 356 while (sc->sc_txpending < EPIC_NTXDESC) { 357 /* 358 * Grab a packet off the queue. 359 */ 360 IFQ_POLL(&ifp->if_snd, m0); 361 if (m0 == NULL) 362 break; 363 m = NULL; 364 365 /* 366 * Get the last and next available transmit descriptor. 367 */ 368 nexttx = EPIC_NEXTTX(sc->sc_txlast); 369 txd = EPIC_CDTX(sc, nexttx); 370 fr = EPIC_CDFL(sc, nexttx); 371 ds = EPIC_DSTX(sc, nexttx); 372 dmamap = ds->ds_dmamap; 373 374 /* 375 * Load the DMA map. If this fails, the packet either 376 * didn't fit in the alloted number of frags, or we were 377 * short on resources. In this case, we'll copy and try 378 * again. 379 */ 380 if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 381 BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0 || 382 (m0->m_pkthdr.len < ETHER_PAD_LEN && 383 dmamap-> dm_nsegs == EPIC_NFRAGS)) { 384 if (error == 0) 385 bus_dmamap_unload(sc->sc_dmat, dmamap); 386 387 MGETHDR(m, M_DONTWAIT, MT_DATA); 388 if (m == NULL) 389 break; 390 if (m0->m_pkthdr.len > MHLEN) { 391 MCLGET(m, M_DONTWAIT); 392 if ((m->m_flags & M_EXT) == 0) { 393 m_freem(m); 394 break; 395 } 396 } 397 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 398 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 399 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 400 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 401 if (error) 402 break; 403 } 404 IFQ_DEQUEUE(&ifp->if_snd, m0); 405 if (m != NULL) { 406 m_freem(m0); 407 m0 = m; 408 } 409 410 /* Initialize the fraglist. */ 411 for (seg = 0; seg < dmamap->dm_nsegs; seg++) { 412 fr->ef_frags[seg].ef_addr = 413 dmamap->dm_segs[seg].ds_addr; 414 fr->ef_frags[seg].ef_length = 415 dmamap->dm_segs[seg].ds_len; 416 } 417 len = m0->m_pkthdr.len; 418 if (len < ETHER_PAD_LEN) { 419 fr->ef_frags[seg].ef_addr = sc->sc_nulldma; 420 fr->ef_frags[seg].ef_length = ETHER_PAD_LEN - len; 421 len = ETHER_PAD_LEN; 422 seg++; 423 } 424 fr->ef_nfrags = seg; 425 426 EPIC_CDFLSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE); 427 428 /* Sync the DMA map. */ 429 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 430 BUS_DMASYNC_PREWRITE); 431 432 /* 433 * Store a pointer to the packet so we can free it later. 434 */ 435 ds->ds_mbuf = m0; 436 437 /* 438 * Fill in the transmit descriptor. 439 */ 440 txd->et_control = ET_TXCTL_LASTDESC | ET_TXCTL_FRAGLIST; 441 442 /* 443 * If this is the first descriptor we're enqueueing, 444 * don't give it to the EPIC yet. That could cause 445 * a race condition. We'll do it below. 446 */ 447 if (nexttx == firsttx) 448 txd->et_txstatus = TXSTAT_TXLENGTH(len); 449 else 450 txd->et_txstatus = 451 TXSTAT_TXLENGTH(len) | ET_TXSTAT_OWNER; 452 453 EPIC_CDTXSYNC(sc, nexttx, 454 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 455 456 /* Advance the tx pointer. */ 457 sc->sc_txpending++; 458 sc->sc_txlast = nexttx; 459 460 #if NBPFILTER > 0 461 /* 462 * Pass the packet to any BPF listeners. 463 */ 464 if (ifp->if_bpf) 465 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 466 #endif 467 } 468 469 if (sc->sc_txpending == EPIC_NTXDESC) { 470 /* No more slots left; notify upper layer. */ 471 ifp->if_flags |= IFF_OACTIVE; 472 } 473 474 if (sc->sc_txpending != opending) { 475 /* 476 * We enqueued packets. If the transmitter was idle, 477 * reset the txdirty pointer. 478 */ 479 if (opending == 0) 480 sc->sc_txdirty = firsttx; 481 482 /* 483 * Cause a transmit interrupt to happen on the 484 * last packet we enqueued. 485 */ 486 EPIC_CDTX(sc, sc->sc_txlast)->et_control |= ET_TXCTL_IAF; 487 EPIC_CDTXSYNC(sc, sc->sc_txlast, 488 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 489 490 /* 491 * The entire packet chain is set up. Give the 492 * first descriptor to the EPIC now. 493 */ 494 EPIC_CDTX(sc, firsttx)->et_txstatus |= ET_TXSTAT_OWNER; 495 EPIC_CDTXSYNC(sc, firsttx, 496 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 497 498 /* Start the transmitter. */ 499 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND, 500 COMMAND_TXQUEUED); 501 502 /* Set a watchdog timer in case the chip flakes out. */ 503 ifp->if_timer = 5; 504 } 505 } 506 507 /* 508 * Watchdog timer handler. 509 * [ifnet interface function] 510 */ 511 void 512 epic_watchdog(struct ifnet *ifp) 513 { 514 struct epic_softc *sc = ifp->if_softc; 515 516 printf("%s: device timeout\n", sc->sc_dev.dv_xname); 517 ifp->if_oerrors++; 518 519 (void) epic_init(ifp); 520 } 521 522 /* 523 * Handle control requests from the operator. 524 * [ifnet interface function] 525 */ 526 int 527 epic_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 528 { 529 struct epic_softc *sc = ifp->if_softc; 530 struct ifaddr *ifa = (struct ifaddr *)data; 531 struct ifreq *ifr = (struct ifreq *)data; 532 int s, error = 0; 533 534 s = splnet(); 535 536 switch (cmd) { 537 case SIOCSIFADDR: 538 ifp->if_flags |= IFF_UP; 539 540 switch (ifa->ifa_addr->sa_family) { 541 #ifdef INET 542 case AF_INET: 543 epic_init(ifp); 544 arp_ifinit(&sc->sc_arpcom, ifa); 545 break; 546 #endif 547 default: 548 epic_init(ifp); 549 break; 550 } 551 break; 552 553 case SIOCSIFFLAGS: 554 /* 555 * If interface is marked up and not running, then start it. 556 * If it is marked down and running, stop it. 557 * XXX If it's up then re-initialize it. This is so flags 558 * such as IFF_PROMISC are handled. 559 */ 560 if (ifp->if_flags & IFF_UP) 561 epic_init(ifp); 562 else if (ifp->if_flags & IFF_RUNNING) 563 epic_stop(ifp, 1); 564 break; 565 566 case SIOCSIFMEDIA: 567 case SIOCGIFMEDIA: 568 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 569 break; 570 571 default: 572 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 573 } 574 575 if (error == ENETRESET) { 576 if (ifp->if_flags & IFF_RUNNING) { 577 mii_pollstat(&sc->sc_mii); 578 epic_set_mchash(sc); 579 } 580 error = 0; 581 } 582 583 splx(s); 584 return (error); 585 } 586 587 /* 588 * Interrupt handler. 589 */ 590 int 591 epic_intr(void *arg) 592 { 593 struct epic_softc *sc = arg; 594 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 595 struct epic_rxdesc *rxd; 596 struct epic_txdesc *txd; 597 struct epic_descsoft *ds; 598 struct mbuf *m; 599 u_int32_t intstat, rxstatus, txstatus; 600 int i, claimed = 0; 601 u_int len; 602 603 top: 604 /* 605 * Get the interrupt status from the EPIC. 606 */ 607 intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT); 608 if ((intstat & INTSTAT_INT_ACTV) == 0) 609 return (claimed); 610 611 claimed = 1; 612 613 /* 614 * Acknowledge the interrupt. 615 */ 616 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT, 617 intstat & INTMASK); 618 619 /* 620 * Check for receive interrupts. 621 */ 622 if (intstat & (INTSTAT_RCC | INTSTAT_RXE | INTSTAT_RQE)) { 623 for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) { 624 rxd = EPIC_CDRX(sc, i); 625 ds = EPIC_DSRX(sc, i); 626 627 EPIC_CDRXSYNC(sc, i, 628 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 629 630 rxstatus = rxd->er_rxstatus; 631 if (rxstatus & ER_RXSTAT_OWNER) { 632 /* 633 * We have processed all of the 634 * receive buffers. 635 */ 636 break; 637 } 638 639 /* 640 * Make sure the packet arrived intact. If an error 641 * occurred, update stats and reset the descriptor. 642 * The buffer will be reused the next time the 643 * descriptor comes up in the ring. 644 */ 645 if ((rxstatus & ER_RXSTAT_PKTINTACT) == 0) { 646 if (rxstatus & ER_RXSTAT_CRCERROR) 647 printf("%s: CRC error\n", 648 sc->sc_dev.dv_xname); 649 if (rxstatus & ER_RXSTAT_ALIGNERROR) 650 printf("%s: alignment error\n", 651 sc->sc_dev.dv_xname); 652 ifp->if_ierrors++; 653 EPIC_INIT_RXDESC(sc, i); 654 continue; 655 } 656 657 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 658 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 659 660 /* 661 * The EPIC includes the CRC with every packet; 662 * trim it. 663 */ 664 len = RXSTAT_RXLENGTH(rxstatus) - ETHER_CRC_LEN; 665 666 if (len < sizeof(struct ether_header)) { 667 /* 668 * Runt packet; drop it now. 669 */ 670 ifp->if_ierrors++; 671 EPIC_INIT_RXDESC(sc, i); 672 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 673 ds->ds_dmamap->dm_mapsize, 674 BUS_DMASYNC_PREREAD); 675 continue; 676 } 677 678 /* 679 * If the packet is small enough to fit in a 680 * single header mbuf, allocate one and copy 681 * the data into it. This greatly reduces 682 * memory consumption when we receive lots 683 * of small packets. 684 * 685 * Otherwise, we add a new buffer to the receive 686 * chain. If this fails, we drop the packet and 687 * recycle the old buffer. 688 */ 689 if (epic_copy_small != 0 && len <= MHLEN) { 690 MGETHDR(m, M_DONTWAIT, MT_DATA); 691 if (m == NULL) 692 goto dropit; 693 memcpy(mtod(m, caddr_t), 694 mtod(ds->ds_mbuf, caddr_t), len); 695 EPIC_INIT_RXDESC(sc, i); 696 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 697 ds->ds_dmamap->dm_mapsize, 698 BUS_DMASYNC_PREREAD); 699 } else { 700 m = ds->ds_mbuf; 701 if (epic_add_rxbuf(sc, i) != 0) { 702 dropit: 703 ifp->if_ierrors++; 704 EPIC_INIT_RXDESC(sc, i); 705 bus_dmamap_sync(sc->sc_dmat, 706 ds->ds_dmamap, 0, 707 ds->ds_dmamap->dm_mapsize, 708 BUS_DMASYNC_PREREAD); 709 continue; 710 } 711 } 712 713 m->m_pkthdr.rcvif = ifp; 714 m->m_pkthdr.len = m->m_len = len; 715 716 #if NBPFILTER > 0 717 /* 718 * Pass this up to any BPF listeners, but only 719 * pass it up the stack if its for us. 720 */ 721 if (ifp->if_bpf) 722 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 723 #endif 724 725 /* Pass it on. */ 726 ether_input_mbuf(ifp, m); 727 ifp->if_ipackets++; 728 } 729 730 /* Update the receive pointer. */ 731 sc->sc_rxptr = i; 732 733 /* 734 * Check for receive queue underflow. 735 */ 736 if (intstat & INTSTAT_RQE) { 737 printf("%s: receiver queue empty\n", 738 sc->sc_dev.dv_xname); 739 /* 740 * Ring is already built; just restart the 741 * receiver. 742 */ 743 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR, 744 EPIC_CDRXADDR(sc, sc->sc_rxptr)); 745 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND, 746 COMMAND_RXQUEUED | COMMAND_START_RX); 747 } 748 } 749 750 /* 751 * Check for transmission complete interrupts. 752 */ 753 if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) { 754 ifp->if_flags &= ~IFF_OACTIVE; 755 for (i = sc->sc_txdirty; sc->sc_txpending != 0; 756 i = EPIC_NEXTTX(i), sc->sc_txpending--) { 757 txd = EPIC_CDTX(sc, i); 758 ds = EPIC_DSTX(sc, i); 759 760 EPIC_CDTXSYNC(sc, i, 761 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 762 763 txstatus = txd->et_txstatus; 764 if (txstatus & ET_TXSTAT_OWNER) 765 break; 766 767 EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE); 768 769 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 770 0, ds->ds_dmamap->dm_mapsize, 771 BUS_DMASYNC_POSTWRITE); 772 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 773 m_freem(ds->ds_mbuf); 774 ds->ds_mbuf = NULL; 775 776 /* 777 * Check for errors and collisions. 778 */ 779 if ((txstatus & ET_TXSTAT_PACKETTX) == 0) 780 ifp->if_oerrors++; 781 else 782 ifp->if_opackets++; 783 ifp->if_collisions += 784 TXSTAT_COLLISIONS(txstatus); 785 if (txstatus & ET_TXSTAT_CARSENSELOST) 786 printf("%s: lost carrier\n", 787 sc->sc_dev.dv_xname); 788 } 789 790 /* Update the dirty transmit buffer pointer. */ 791 sc->sc_txdirty = i; 792 793 /* 794 * Cancel the watchdog timer if there are no pending 795 * transmissions. 796 */ 797 if (sc->sc_txpending == 0) 798 ifp->if_timer = 0; 799 800 /* 801 * Kick the transmitter after a DMA underrun. 802 */ 803 if (intstat & INTSTAT_TXU) { 804 printf("%s: transmit underrun\n", sc->sc_dev.dv_xname); 805 bus_space_write_4(sc->sc_st, sc->sc_sh, 806 EPIC_COMMAND, COMMAND_TXUGO); 807 if (sc->sc_txpending) 808 bus_space_write_4(sc->sc_st, sc->sc_sh, 809 EPIC_COMMAND, COMMAND_TXQUEUED); 810 } 811 812 /* 813 * Try to get more packets going. 814 */ 815 epic_start(ifp); 816 } 817 818 /* 819 * Check for fatal interrupts. 820 */ 821 if (intstat & INTSTAT_FATAL_INT) { 822 if (intstat & INTSTAT_PTA) 823 printf("%s: PCI target abort error\n", 824 sc->sc_dev.dv_xname); 825 else if (intstat & INTSTAT_PMA) 826 printf("%s: PCI master abort error\n", 827 sc->sc_dev.dv_xname); 828 else if (intstat & INTSTAT_APE) 829 printf("%s: PCI address parity error\n", 830 sc->sc_dev.dv_xname); 831 else if (intstat & INTSTAT_DPE) 832 printf("%s: PCI data parity error\n", 833 sc->sc_dev.dv_xname); 834 else 835 printf("%s: unknown fatal error\n", 836 sc->sc_dev.dv_xname); 837 (void) epic_init(ifp); 838 } 839 840 /* 841 * Check for more interrupts. 842 */ 843 goto top; 844 } 845 846 /* 847 * One second timer, used to tick the MII. 848 */ 849 void 850 epic_tick(void *arg) 851 { 852 struct epic_softc *sc = arg; 853 int s; 854 855 s = splnet(); 856 mii_tick(&sc->sc_mii); 857 splx(s); 858 859 timeout_add_sec(&sc->sc_mii_timeout, 1); 860 } 861 862 /* 863 * Fixup the clock source on the EPIC. 864 */ 865 void 866 epic_fixup_clock_source(struct epic_softc *sc) 867 { 868 int i; 869 870 /* 871 * According to SMC Application Note 7-15, the EPIC's clock 872 * source is incorrect following a reset. This manifests itself 873 * as failure to recognize when host software has written to 874 * a register on the EPIC. The appnote recommends issuing at 875 * least 16 consecutive writes to the CLOCK TEST bit to correctly 876 * configure the clock source. 877 */ 878 for (i = 0; i < 16; i++) 879 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TEST, 880 TEST_CLOCKTEST); 881 } 882 883 /* 884 * Perform a soft reset on the EPIC. 885 */ 886 void 887 epic_reset(struct epic_softc *sc) 888 { 889 890 epic_fixup_clock_source(sc); 891 892 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0); 893 delay(100); 894 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET); 895 delay(100); 896 897 epic_fixup_clock_source(sc); 898 } 899 900 /* 901 * Initialize the interface. Must be called at splnet(). 902 */ 903 int 904 epic_init(struct ifnet *ifp) 905 { 906 struct epic_softc *sc = ifp->if_softc; 907 bus_space_tag_t st = sc->sc_st; 908 bus_space_handle_t sh = sc->sc_sh; 909 struct epic_txdesc *txd; 910 struct epic_descsoft *ds; 911 u_int32_t genctl, reg0; 912 int i, error = 0; 913 914 /* 915 * Cancel any pending I/O. 916 */ 917 epic_stop(ifp, 0); 918 919 /* 920 * Reset the EPIC to a known state. 921 */ 922 epic_reset(sc); 923 924 /* 925 * Magical mystery initialization. 926 */ 927 bus_space_write_4(st, sh, EPIC_TXTEST, 0); 928 929 /* 930 * Initialize the EPIC genctl register: 931 * 932 * - 64 byte receive FIFO threshold 933 * - automatic advance to next receive frame 934 */ 935 genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY; 936 #if BYTE_ORDER == BIG_ENDIAN 937 genctl |= GENCTL_BIG_ENDIAN; 938 #endif 939 bus_space_write_4(st, sh, EPIC_GENCTL, genctl); 940 941 /* 942 * Reset the MII bus and PHY. 943 */ 944 reg0 = bus_space_read_4(st, sh, EPIC_NVCTL); 945 bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1); 946 bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER); 947 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY); 948 delay(100); 949 bus_space_write_4(st, sh, EPIC_GENCTL, genctl); 950 delay(1000); 951 bus_space_write_4(st, sh, EPIC_NVCTL, reg0); 952 953 /* 954 * Initialize Ethernet address. 955 */ 956 reg0 = sc->sc_arpcom.ac_enaddr[1] << 8 | sc->sc_arpcom.ac_enaddr[0]; 957 bus_space_write_4(st, sh, EPIC_LAN0, reg0); 958 reg0 = sc->sc_arpcom.ac_enaddr[3] << 8 | sc->sc_arpcom.ac_enaddr[2]; 959 bus_space_write_4(st, sh, EPIC_LAN1, reg0); 960 reg0 = sc->sc_arpcom.ac_enaddr[5] << 8 | sc->sc_arpcom.ac_enaddr[4]; 961 bus_space_write_4(st, sh, EPIC_LAN2, reg0); 962 963 /* 964 * Initialize receive control. Remember the external buffer 965 * size setting. 966 */ 967 reg0 = bus_space_read_4(st, sh, EPIC_RXCON) & 968 (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0); 969 reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST); 970 if (ifp->if_flags & IFF_PROMISC) 971 reg0 |= RXCON_PROMISCMODE; 972 bus_space_write_4(st, sh, EPIC_RXCON, reg0); 973 974 /* Set the current media. */ 975 epic_mediachange(ifp); 976 977 /* Set up the multicast hash table. */ 978 epic_set_mchash(sc); 979 980 /* 981 * Initialize the transmit descriptor ring. txlast is initialized 982 * to the end of the list so that it will wrap around to the first 983 * descriptor when the first packet is transmitted. 984 */ 985 for (i = 0; i < EPIC_NTXDESC; i++) { 986 txd = EPIC_CDTX(sc, i); 987 memset(txd, 0, sizeof(struct epic_txdesc)); 988 txd->et_bufaddr = EPIC_CDFLADDR(sc, i); 989 txd->et_nextdesc = EPIC_CDTXADDR(sc, EPIC_NEXTTX(i)); 990 EPIC_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 991 } 992 sc->sc_txpending = 0; 993 sc->sc_txdirty = 0; 994 sc->sc_txlast = EPIC_NTXDESC - 1; 995 996 /* 997 * Initialize the receive descriptor ring. 998 */ 999 for (i = 0; i < EPIC_NRXDESC; i++) { 1000 ds = EPIC_DSRX(sc, i); 1001 if (ds->ds_mbuf == NULL) { 1002 if ((error = epic_add_rxbuf(sc, i)) != 0) { 1003 printf("%s: unable to allocate or map rx " 1004 "buffer %d error = %d\n", 1005 sc->sc_dev.dv_xname, i, error); 1006 /* 1007 * XXX Should attempt to run with fewer receive 1008 * XXX buffers instead of just failing. 1009 */ 1010 epic_rxdrain(sc); 1011 goto out; 1012 } 1013 } else 1014 EPIC_INIT_RXDESC(sc, i); 1015 } 1016 sc->sc_rxptr = 0; 1017 1018 /* 1019 * Initialize the interrupt mask and enable interrupts. 1020 */ 1021 bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK); 1022 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA); 1023 1024 /* 1025 * Give the transmit and receive rings to the EPIC. 1026 */ 1027 bus_space_write_4(st, sh, EPIC_PTCDAR, 1028 EPIC_CDTXADDR(sc, EPIC_NEXTTX(sc->sc_txlast))); 1029 bus_space_write_4(st, sh, EPIC_PRCDAR, 1030 EPIC_CDRXADDR(sc, sc->sc_rxptr)); 1031 1032 /* 1033 * Set the EPIC in motion. 1034 */ 1035 bus_space_write_4(st, sh, EPIC_COMMAND, 1036 COMMAND_RXQUEUED | COMMAND_START_RX); 1037 1038 /* 1039 * ...all done! 1040 */ 1041 ifp->if_flags |= IFF_RUNNING; 1042 ifp->if_flags &= ~IFF_OACTIVE; 1043 1044 /* 1045 * Start the one second clock. 1046 */ 1047 timeout_add_sec(&sc->sc_mii_timeout, 1); 1048 1049 /* 1050 * Attempt to start output on the interface. 1051 */ 1052 epic_start(ifp); 1053 1054 out: 1055 if (error) 1056 printf("%s: interface not running\n", sc->sc_dev.dv_xname); 1057 return (error); 1058 } 1059 1060 /* 1061 * Drain the receive queue. 1062 */ 1063 void 1064 epic_rxdrain(struct epic_softc *sc) 1065 { 1066 struct epic_descsoft *ds; 1067 int i; 1068 1069 for (i = 0; i < EPIC_NRXDESC; i++) { 1070 ds = EPIC_DSRX(sc, i); 1071 if (ds->ds_mbuf != NULL) { 1072 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1073 m_freem(ds->ds_mbuf); 1074 ds->ds_mbuf = NULL; 1075 } 1076 } 1077 } 1078 1079 /* 1080 * Stop transmission on the interface. 1081 */ 1082 void 1083 epic_stop(struct ifnet *ifp, int disable) 1084 { 1085 struct epic_softc *sc = ifp->if_softc; 1086 bus_space_tag_t st = sc->sc_st; 1087 bus_space_handle_t sh = sc->sc_sh; 1088 struct epic_descsoft *ds; 1089 u_int32_t reg; 1090 int i; 1091 1092 /* 1093 * Stop the one second clock. 1094 */ 1095 timeout_del(&sc->sc_mii_timeout); 1096 1097 /* 1098 * Mark the interface down and cancel the watchdog timer. 1099 */ 1100 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1101 ifp->if_timer = 0; 1102 1103 /* Down the MII. */ 1104 mii_down(&sc->sc_mii); 1105 1106 /* Paranoia... */ 1107 epic_fixup_clock_source(sc); 1108 1109 /* 1110 * Disable interrupts. 1111 */ 1112 reg = bus_space_read_4(st, sh, EPIC_GENCTL); 1113 bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA); 1114 bus_space_write_4(st, sh, EPIC_INTMASK, 0); 1115 1116 /* 1117 * Stop the DMA engine and take the receiver off-line. 1118 */ 1119 bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA | 1120 COMMAND_STOP_TDMA | COMMAND_STOP_RX); 1121 1122 /* 1123 * Release any queued transmit buffers. 1124 */ 1125 for (i = 0; i < EPIC_NTXDESC; i++) { 1126 ds = EPIC_DSTX(sc, i); 1127 if (ds->ds_mbuf != NULL) { 1128 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1129 m_freem(ds->ds_mbuf); 1130 ds->ds_mbuf = NULL; 1131 } 1132 } 1133 1134 if (disable) 1135 epic_rxdrain(sc); 1136 } 1137 1138 /* 1139 * Read the EPIC Serial EEPROM. 1140 */ 1141 void 1142 epic_read_eeprom(struct epic_softc *sc, int word, int wordcnt, u_int16_t *data) 1143 { 1144 bus_space_tag_t st = sc->sc_st; 1145 bus_space_handle_t sh = sc->sc_sh; 1146 u_int16_t reg; 1147 int i, x; 1148 1149 #define EEPROM_WAIT_READY(st, sh) \ 1150 while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \ 1151 /* nothing */ 1152 1153 /* 1154 * Enable the EEPROM. 1155 */ 1156 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE); 1157 EEPROM_WAIT_READY(st, sh); 1158 1159 for (i = 0; i < wordcnt; i++) { 1160 /* Send CHIP SELECT for one clock tick. */ 1161 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS); 1162 EEPROM_WAIT_READY(st, sh); 1163 1164 /* Shift in the READ opcode. */ 1165 for (x = 3; x > 0; x--) { 1166 reg = EECTL_ENABLE|EECTL_EECS; 1167 if (EPIC_EEPROM_OPC_READ & (1 << (x - 1))) 1168 reg |= EECTL_EEDI; 1169 bus_space_write_4(st, sh, EPIC_EECTL, reg); 1170 EEPROM_WAIT_READY(st, sh); 1171 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK); 1172 EEPROM_WAIT_READY(st, sh); 1173 bus_space_write_4(st, sh, EPIC_EECTL, reg); 1174 EEPROM_WAIT_READY(st, sh); 1175 } 1176 1177 /* Shift in address. */ 1178 for (x = 6; x > 0; x--) { 1179 reg = EECTL_ENABLE|EECTL_EECS; 1180 if ((word + i) & (1 << (x - 1))) 1181 reg |= EECTL_EEDI; 1182 bus_space_write_4(st, sh, EPIC_EECTL, reg); 1183 EEPROM_WAIT_READY(st, sh); 1184 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK); 1185 EEPROM_WAIT_READY(st, sh); 1186 bus_space_write_4(st, sh, EPIC_EECTL, reg); 1187 EEPROM_WAIT_READY(st, sh); 1188 } 1189 1190 /* Shift out data. */ 1191 reg = EECTL_ENABLE|EECTL_EECS; 1192 data[i] = 0; 1193 for (x = 16; x > 0; x--) { 1194 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK); 1195 EEPROM_WAIT_READY(st, sh); 1196 if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO) 1197 data[i] |= (1 << (x - 1)); 1198 bus_space_write_4(st, sh, EPIC_EECTL, reg); 1199 EEPROM_WAIT_READY(st, sh); 1200 } 1201 1202 /* Clear CHIP SELECT. */ 1203 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE); 1204 EEPROM_WAIT_READY(st, sh); 1205 } 1206 1207 /* 1208 * Disable the EEPROM. 1209 */ 1210 bus_space_write_4(st, sh, EPIC_EECTL, 0); 1211 1212 #undef EEPROM_WAIT_READY 1213 } 1214 1215 /* 1216 * Add a receive buffer to the indicated descriptor. 1217 */ 1218 int 1219 epic_add_rxbuf(struct epic_softc *sc, int idx) 1220 { 1221 struct epic_descsoft *ds = EPIC_DSRX(sc, idx); 1222 struct mbuf *m; 1223 int error; 1224 1225 MGETHDR(m, M_DONTWAIT, MT_DATA); 1226 if (m == NULL) 1227 return (ENOBUFS); 1228 1229 MCLGET(m, M_DONTWAIT); 1230 if ((m->m_flags & M_EXT) == 0) { 1231 m_freem(m); 1232 return (ENOBUFS); 1233 } 1234 1235 if (ds->ds_mbuf != NULL) 1236 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1237 1238 ds->ds_mbuf = m; 1239 1240 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap, 1241 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1242 BUS_DMA_READ|BUS_DMA_NOWAIT); 1243 if (error) { 1244 printf("%s: can't load rx DMA map %d, error = %d\n", 1245 sc->sc_dev.dv_xname, idx, error); 1246 panic("epic_add_rxbuf"); /* XXX */ 1247 } 1248 1249 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1250 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1251 1252 EPIC_INIT_RXDESC(sc, idx); 1253 1254 return (0); 1255 } 1256 1257 /* 1258 * Set the EPIC multicast hash table. 1259 * 1260 * NOTE: We rely on a recently-updated mii_media_active here! 1261 */ 1262 void 1263 epic_set_mchash(struct epic_softc *sc) 1264 { 1265 struct arpcom *ac = &sc->sc_arpcom; 1266 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1267 struct ether_multi *enm; 1268 struct ether_multistep step; 1269 u_int32_t hash, mchash[4]; 1270 1271 /* 1272 * Set up the multicast address filter by passing all multicast 1273 * addresses through a CRC generator, and then using the low-order 1274 * 6 bits as an index into the 64 bit multicast hash table (only 1275 * the lower 16 bits of each 32 bit multicast hash register are 1276 * valid). The high order bits select the register, while the 1277 * rest of the bits select the bit within the register. 1278 */ 1279 1280 if (ifp->if_flags & IFF_PROMISC) 1281 goto allmulti; 1282 1283 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_10_T) { 1284 /* XXX hardware bug in 10Mbps mode. */ 1285 goto allmulti; 1286 } 1287 1288 if (ac->ac_multirangecnt > 0) 1289 goto allmulti; 1290 1291 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0; 1292 1293 ETHER_FIRST_MULTI(step, ac, enm); 1294 while (enm != NULL) { 1295 hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 1296 hash >>= 26; 1297 1298 /* Set the corresponding bit in the hash table. */ 1299 mchash[hash >> 4] |= 1 << (hash & 0xf); 1300 1301 ETHER_NEXT_MULTI(step, enm); 1302 } 1303 1304 ifp->if_flags &= ~IFF_ALLMULTI; 1305 goto sethash; 1306 1307 allmulti: 1308 ifp->if_flags |= IFF_ALLMULTI; 1309 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff; 1310 1311 sethash: 1312 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]); 1313 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]); 1314 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]); 1315 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]); 1316 } 1317 1318 /* 1319 * Wait for the MII to become ready. 1320 */ 1321 int 1322 epic_mii_wait(struct epic_softc *sc, u_int32_t rw) 1323 { 1324 int i; 1325 1326 for (i = 0; i < 50; i++) { 1327 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL) & rw) 1328 == 0) 1329 break; 1330 delay(2); 1331 } 1332 if (i == 50) { 1333 printf("%s: MII timed out\n", sc->sc_dev.dv_xname); 1334 return (1); 1335 } 1336 1337 return (0); 1338 } 1339 1340 /* 1341 * Read from the MII. 1342 */ 1343 int 1344 epic_mii_read(struct device *self, int phy, int reg) 1345 { 1346 struct epic_softc *sc = (struct epic_softc *)self; 1347 1348 if (epic_mii_wait(sc, MMCTL_WRITE)) 1349 return (0); 1350 1351 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL, 1352 MMCTL_ARG(phy, reg, MMCTL_READ)); 1353 1354 if (epic_mii_wait(sc, MMCTL_READ)) 1355 return (0); 1356 1357 return (bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA) & 1358 MMDATA_MASK); 1359 } 1360 1361 /* 1362 * Write to the MII. 1363 */ 1364 void 1365 epic_mii_write(struct device *self, int phy, int reg, int val) 1366 { 1367 struct epic_softc *sc = (struct epic_softc *)self; 1368 1369 if (epic_mii_wait(sc, MMCTL_WRITE)) 1370 return; 1371 1372 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA, val); 1373 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL, 1374 MMCTL_ARG(phy, reg, MMCTL_WRITE)); 1375 } 1376 1377 /* 1378 * Callback from PHY when media changes. 1379 */ 1380 void 1381 epic_statchg(struct device *self) 1382 { 1383 struct epic_softc *sc = (struct epic_softc *)self; 1384 u_int32_t txcon, miicfg; 1385 1386 /* 1387 * Update loopback bits in TXCON to reflect duplex mode. 1388 */ 1389 txcon = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_TXCON); 1390 if (sc->sc_mii.mii_media_active & IFM_FDX) 1391 txcon |= (TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2); 1392 else 1393 txcon &= ~(TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2); 1394 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TXCON, txcon); 1395 1396 /* On some cards we need manualy set fullduplex led */ 1397 if (sc->sc_hwflags & EPIC_DUPLEXLED_ON_694) { 1398 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG); 1399 if (IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) 1400 miicfg |= MIICFG_ENABLE; 1401 else 1402 miicfg &= ~MIICFG_ENABLE; 1403 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg); 1404 } 1405 1406 /* 1407 * There is a multicast filter bug in 10Mbps mode. Kick the 1408 * multicast filter in case the speed changed. 1409 */ 1410 epic_set_mchash(sc); 1411 } 1412 1413 /* 1414 * Callback from ifmedia to request current media status. 1415 */ 1416 void 1417 epic_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1418 { 1419 struct epic_softc *sc = ifp->if_softc; 1420 1421 mii_pollstat(&sc->sc_mii); 1422 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1423 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1424 } 1425 1426 /* 1427 * Callback from ifmedia to request new media setting. 1428 */ 1429 int 1430 epic_mediachange(struct ifnet *ifp) 1431 { 1432 struct epic_softc *sc = ifp->if_softc; 1433 struct mii_data *mii = &sc->sc_mii; 1434 struct ifmedia *ifm = &mii->mii_media; 1435 int media = ifm->ifm_cur->ifm_media; 1436 u_int32_t miicfg; 1437 struct mii_softc *miisc; 1438 int cfg; 1439 1440 if (!(ifp->if_flags & IFF_UP)) 1441 return (0); 1442 1443 if (IFM_INST(media) != sc->sc_serinst) { 1444 /* If we're not selecting serial interface, select MII mode */ 1445 #ifdef EPICMEDIADEBUG 1446 printf("%s: parallel mode\n", ifp->if_xname); 1447 #endif 1448 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG); 1449 miicfg &= ~MIICFG_SERMODEENA; 1450 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg); 1451 } 1452 1453 mii_mediachg(mii); 1454 1455 if (IFM_INST(media) == sc->sc_serinst) { 1456 /* select serial interface */ 1457 #ifdef EPICMEDIADEBUG 1458 printf("%s: serial mode\n", ifp->if_xname); 1459 #endif 1460 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG); 1461 miicfg |= (MIICFG_SERMODEENA | MIICFG_ENABLE); 1462 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg); 1463 1464 /* There is no driver to fill this */ 1465 mii->mii_media_active = media; 1466 mii->mii_media_status = 0; 1467 1468 epic_statchg(&sc->sc_dev); 1469 return (0); 1470 } 1471 1472 /* Lookup selected PHY */ 1473 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 1474 miisc = LIST_NEXT(miisc, mii_list)) { 1475 if (IFM_INST(media) == miisc->mii_inst) 1476 break; 1477 } 1478 if (!miisc) { 1479 printf("epic_mediachange: can't happen\n"); /* ??? panic */ 1480 return (0); 1481 } 1482 #ifdef EPICMEDIADEBUG 1483 printf("%s: using phy %s\n", ifp->if_xname, 1484 miisc->mii_dev.dv_xname); 1485 #endif 1486 1487 if (miisc->mii_flags & MIIF_HAVEFIBER) { 1488 /* XXX XXX assume it's a Level1 - should check */ 1489 1490 /* We have to powerup fiber transceivers */ 1491 cfg = PHY_READ(miisc, MII_LXTPHY_CONFIG); 1492 if (IFM_SUBTYPE(media) == IFM_100_FX) { 1493 #ifdef EPICMEDIADEBUG 1494 printf("%s: power up fiber\n", ifp->if_xname); 1495 #endif 1496 cfg |= (CONFIG_LEDC1 | CONFIG_LEDC0); 1497 } else { 1498 #ifdef EPICMEDIADEBUG 1499 printf("%s: power down fiber\n", ifp->if_xname); 1500 #endif 1501 cfg &= ~(CONFIG_LEDC1 | CONFIG_LEDC0); 1502 } 1503 PHY_WRITE(miisc, MII_LXTPHY_CONFIG, cfg); 1504 } 1505 1506 return (0); 1507 } 1508