1 /* $OpenBSD: smc83c170.c,v 1.14 2009/08/10 20:29:54 deraadt Exp $ */ 2 /* $NetBSD: smc83c170.c,v 1.59 2005/02/27 00:27:02 perry Exp $ */ 3 4 /*- 5 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 10 * NASA Ames Research Center. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Device driver for the Standard Microsystems Corp. 83C170 36 * Ethernet PCI Integrated Controller (EPIC/100). 37 */ 38 39 #include "bpfilter.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/timeout.h> 44 #include <sys/mbuf.h> 45 #include <sys/malloc.h> 46 #include <sys/kernel.h> 47 #include <sys/socket.h> 48 #include <sys/ioctl.h> 49 #include <sys/errno.h> 50 #include <sys/device.h> 51 52 #include <net/if.h> 53 #include <net/if_dl.h> 54 55 #ifdef INET 56 #include <netinet/in.h> 57 #include <netinet/in_systm.h> 58 #include <netinet/in_var.h> 59 #include <netinet/ip.h> 60 #include <netinet/if_ether.h> 61 #endif 62 63 #include <net/if_media.h> 64 65 #if NBPFILTER > 0 66 #include <net/bpf.h> 67 #endif 68 69 #include <machine/bus.h> 70 #include <machine/intr.h> 71 72 #include <dev/mii/miivar.h> 73 #include <dev/mii/lxtphyreg.h> 74 75 #include <dev/ic/smc83c170reg.h> 76 #include <dev/ic/smc83c170var.h> 77 78 void epic_start(struct ifnet *); 79 void epic_watchdog(struct ifnet *); 80 int epic_ioctl(struct ifnet *, u_long, caddr_t); 81 int epic_init(struct ifnet *); 82 void epic_stop(struct ifnet *, int); 83 84 void epic_reset(struct epic_softc *); 85 void epic_rxdrain(struct epic_softc *); 86 int epic_add_rxbuf(struct epic_softc *, int); 87 void epic_read_eeprom(struct epic_softc *, int, int, u_int16_t *); 88 void epic_set_mchash(struct epic_softc *); 89 void epic_fixup_clock_source(struct epic_softc *); 90 int epic_mii_read(struct device *, int, int); 91 void epic_mii_write(struct device *, int, int, int); 92 int epic_mii_wait(struct epic_softc *, u_int32_t); 93 void epic_tick(void *); 94 95 void epic_statchg(struct device *); 96 int epic_mediachange(struct ifnet *); 97 void epic_mediastatus(struct ifnet *, struct ifmediareq *); 98 99 struct cfdriver epic_cd = { 100 0, "epic", DV_IFNET 101 }; 102 103 #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \ 104 INTSTAT_TXC | INTSTAT_RXE | INTSTAT_RQE | INTSTAT_RCC) 105 106 int epic_copy_small = 0; 107 108 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) 109 110 /* 111 * Attach an EPIC interface to the system. 112 */ 113 void 114 epic_attach(struct epic_softc *sc, const char *intrstr) 115 { 116 bus_space_tag_t st = sc->sc_st; 117 bus_space_handle_t sh = sc->sc_sh; 118 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 119 int rseg, error, miiflags; 120 u_int i; 121 bus_dma_segment_t seg; 122 u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1]; 123 u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6]; 124 char *nullbuf; 125 126 timeout_set(&sc->sc_mii_timeout, epic_tick, sc); 127 128 /* 129 * Allocate the control data structures, and create and load the 130 * DMA map for it. 131 */ 132 if ((error = bus_dmamem_alloc(sc->sc_dmat, 133 sizeof(struct epic_control_data) + ETHER_PAD_LEN, PAGE_SIZE, 0, 134 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 135 printf(": unable to allocate control data, error = %d\n", 136 error); 137 goto fail_0; 138 } 139 140 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 141 sizeof(struct epic_control_data) + ETHER_PAD_LEN, 142 (caddr_t *)&sc->sc_control_data, 143 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 144 printf(": unable to map control data, error = %d\n", error); 145 goto fail_1; 146 } 147 nullbuf = 148 (char *)sc->sc_control_data + sizeof(struct epic_control_data); 149 memset(nullbuf, 0, ETHER_PAD_LEN); 150 151 if ((error = bus_dmamap_create(sc->sc_dmat, 152 sizeof(struct epic_control_data), 1, 153 sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT, 154 &sc->sc_cddmamap)) != 0) { 155 printf(": unable to create control data DMA map, error = %d\n", 156 error); 157 goto fail_2; 158 } 159 160 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 161 sc->sc_control_data, sizeof(struct epic_control_data), NULL, 162 BUS_DMA_NOWAIT)) != 0) { 163 printf(": unable to load control data DMA map, error = %d\n", 164 error); 165 goto fail_3; 166 } 167 168 /* 169 * Create the transmit buffer DMA maps. 170 */ 171 for (i = 0; i < EPIC_NTXDESC; i++) { 172 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 173 EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 174 &EPIC_DSTX(sc, i)->ds_dmamap)) != 0) { 175 printf(": unable to create tx DMA map %d, error = %d\n", 176 i, error); 177 goto fail_4; 178 } 179 } 180 181 /* 182 * Create the receive buffer DMA maps. 183 */ 184 for (i = 0; i < EPIC_NRXDESC; i++) { 185 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 186 MCLBYTES, 0, BUS_DMA_NOWAIT, 187 &EPIC_DSRX(sc, i)->ds_dmamap)) != 0) { 188 printf(": unable to create rx DMA map %d, error = %d\n", 189 i, error); 190 goto fail_5; 191 } 192 EPIC_DSRX(sc, i)->ds_mbuf = NULL; 193 } 194 195 /* 196 * create and map the pad buffer 197 */ 198 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1, 199 ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) { 200 printf(": unable to create pad buffer DMA map, error = %d\n", 201 error); 202 goto fail_5; 203 } 204 205 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap, 206 nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) { 207 printf(": unable to load pad buffer DMA map, error = %d\n", 208 error); 209 goto fail_6; 210 } 211 bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN, 212 BUS_DMASYNC_PREWRITE); 213 214 /* 215 * Bring the chip out of low-power mode and reset it to a known state. 216 */ 217 bus_space_write_4(st, sh, EPIC_GENCTL, 0); 218 epic_reset(sc); 219 220 /* 221 * Read the Ethernet address from the EEPROM. 222 */ 223 epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea); 224 for (i = 0; i < sizeof(myea)/ sizeof(myea[0]); i++) { 225 enaddr[i * 2] = myea[i] & 0xff; 226 enaddr[i * 2 + 1] = myea[i] >> 8; 227 } 228 229 /* 230 * ...and the device name. 231 */ 232 epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])), 233 mydevname); 234 for (i = 0; i < sizeof(mydevname) / sizeof(mydevname[0]); i++) { 235 devname[i * 2] = mydevname[i] & 0xff; 236 devname[i * 2 + 1] = mydevname[i] >> 8; 237 } 238 239 devname[sizeof(devname) - 1] = ' '; 240 for (i = sizeof(devname) - 1; devname[i] == ' '; i--) { 241 devname[i] = '\0'; 242 if (i == 0) 243 break; 244 } 245 246 printf(", %s : %s, address %s\n", devname, intrstr, 247 ether_sprintf(enaddr)); 248 249 miiflags = 0; 250 if (sc->sc_hwflags & EPIC_HAS_MII_FIBER) 251 miiflags |= MIIF_HAVEFIBER; 252 253 /* 254 * Initialize our media structures and probe the MII. 255 */ 256 sc->sc_mii.mii_ifp = ifp; 257 sc->sc_mii.mii_readreg = epic_mii_read; 258 sc->sc_mii.mii_writereg = epic_mii_write; 259 sc->sc_mii.mii_statchg = epic_statchg; 260 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, epic_mediachange, 261 epic_mediastatus); 262 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 263 MII_OFFSET_ANY, miiflags); 264 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 265 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 266 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 267 } else 268 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 269 270 if (sc->sc_hwflags & EPIC_HAS_BNC) { 271 /* use the next free media instance */ 272 sc->sc_serinst = sc->sc_mii.mii_instance++; 273 ifmedia_add(&sc->sc_mii.mii_media, 274 IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0, 275 sc->sc_serinst), 276 0, NULL); 277 } else 278 sc->sc_serinst = -1; 279 280 bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 281 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 282 ifp->if_softc = sc; 283 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 284 ifp->if_ioctl = epic_ioctl; 285 ifp->if_start = epic_start; 286 ifp->if_watchdog = epic_watchdog; 287 IFQ_SET_MAXLEN(&ifp->if_snd, EPIC_NTXDESC - 1); 288 IFQ_SET_READY(&ifp->if_snd); 289 290 ifp->if_capabilities = IFCAP_VLAN_MTU; 291 292 /* 293 * Attach the interface. 294 */ 295 if_attach(ifp); 296 ether_ifattach(ifp); 297 return; 298 299 /* 300 * Free any resources we've allocated during the failed attach 301 * attempt. Do this in reverse order and fall through. 302 */ 303 fail_6: 304 bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap); 305 fail_5: 306 for (i = 0; i < EPIC_NRXDESC; i++) { 307 if (EPIC_DSRX(sc, i)->ds_dmamap != NULL) 308 bus_dmamap_destroy(sc->sc_dmat, 309 EPIC_DSRX(sc, i)->ds_dmamap); 310 } 311 fail_4: 312 for (i = 0; i < EPIC_NTXDESC; i++) { 313 if (EPIC_DSTX(sc, i)->ds_dmamap != NULL) 314 bus_dmamap_destroy(sc->sc_dmat, 315 EPIC_DSTX(sc, i)->ds_dmamap); 316 } 317 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 318 fail_3: 319 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 320 fail_2: 321 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data, 322 sizeof(struct epic_control_data)); 323 fail_1: 324 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 325 fail_0: 326 return; 327 } 328 329 /* 330 * Start packet transmission on the interface. 331 * [ifnet interface function] 332 */ 333 void 334 epic_start(struct ifnet *ifp) 335 { 336 struct epic_softc *sc = ifp->if_softc; 337 struct mbuf *m0, *m; 338 struct epic_txdesc *txd; 339 struct epic_descsoft *ds; 340 struct epic_fraglist *fr; 341 bus_dmamap_t dmamap; 342 int error, firsttx, nexttx, opending, seg; 343 u_int len; 344 345 /* 346 * Remember the previous txpending and the first transmit 347 * descriptor we use. 348 */ 349 opending = sc->sc_txpending; 350 firsttx = EPIC_NEXTTX(sc->sc_txlast); 351 352 /* 353 * Loop through the send queue, setting up transmit descriptors 354 * until we drain the queue, or use up all available transmit 355 * descriptors. 356 */ 357 while (sc->sc_txpending < EPIC_NTXDESC) { 358 /* 359 * Grab a packet off the queue. 360 */ 361 IFQ_POLL(&ifp->if_snd, m0); 362 if (m0 == NULL) 363 break; 364 m = NULL; 365 366 /* 367 * Get the last and next available transmit descriptor. 368 */ 369 nexttx = EPIC_NEXTTX(sc->sc_txlast); 370 txd = EPIC_CDTX(sc, nexttx); 371 fr = EPIC_CDFL(sc, nexttx); 372 ds = EPIC_DSTX(sc, nexttx); 373 dmamap = ds->ds_dmamap; 374 375 /* 376 * Load the DMA map. If this fails, the packet either 377 * didn't fit in the alloted number of frags, or we were 378 * short on resources. In this case, we'll copy and try 379 * again. 380 */ 381 if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 382 BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0 || 383 (m0->m_pkthdr.len < ETHER_PAD_LEN && 384 dmamap-> dm_nsegs == EPIC_NFRAGS)) { 385 if (error == 0) 386 bus_dmamap_unload(sc->sc_dmat, dmamap); 387 388 MGETHDR(m, M_DONTWAIT, MT_DATA); 389 if (m == NULL) 390 break; 391 if (m0->m_pkthdr.len > MHLEN) { 392 MCLGET(m, M_DONTWAIT); 393 if ((m->m_flags & M_EXT) == 0) { 394 m_freem(m); 395 break; 396 } 397 } 398 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 399 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 400 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 401 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 402 if (error) 403 break; 404 } 405 IFQ_DEQUEUE(&ifp->if_snd, m0); 406 if (m != NULL) { 407 m_freem(m0); 408 m0 = m; 409 } 410 411 /* Initialize the fraglist. */ 412 for (seg = 0; seg < dmamap->dm_nsegs; seg++) { 413 fr->ef_frags[seg].ef_addr = 414 dmamap->dm_segs[seg].ds_addr; 415 fr->ef_frags[seg].ef_length = 416 dmamap->dm_segs[seg].ds_len; 417 } 418 len = m0->m_pkthdr.len; 419 if (len < ETHER_PAD_LEN) { 420 fr->ef_frags[seg].ef_addr = sc->sc_nulldma; 421 fr->ef_frags[seg].ef_length = ETHER_PAD_LEN - len; 422 len = ETHER_PAD_LEN; 423 seg++; 424 } 425 fr->ef_nfrags = seg; 426 427 EPIC_CDFLSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE); 428 429 /* Sync the DMA map. */ 430 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 431 BUS_DMASYNC_PREWRITE); 432 433 /* 434 * Store a pointer to the packet so we can free it later. 435 */ 436 ds->ds_mbuf = m0; 437 438 /* 439 * Fill in the transmit descriptor. 440 */ 441 txd->et_control = ET_TXCTL_LASTDESC | ET_TXCTL_FRAGLIST; 442 443 /* 444 * If this is the first descriptor we're enqueueing, 445 * don't give it to the EPIC yet. That could cause 446 * a race condition. We'll do it below. 447 */ 448 if (nexttx == firsttx) 449 txd->et_txstatus = TXSTAT_TXLENGTH(len); 450 else 451 txd->et_txstatus = 452 TXSTAT_TXLENGTH(len) | ET_TXSTAT_OWNER; 453 454 EPIC_CDTXSYNC(sc, nexttx, 455 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 456 457 /* Advance the tx pointer. */ 458 sc->sc_txpending++; 459 sc->sc_txlast = nexttx; 460 461 #if NBPFILTER > 0 462 /* 463 * Pass the packet to any BPF listeners. 464 */ 465 if (ifp->if_bpf) 466 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 467 #endif 468 } 469 470 if (sc->sc_txpending == EPIC_NTXDESC) { 471 /* No more slots left; notify upper layer. */ 472 ifp->if_flags |= IFF_OACTIVE; 473 } 474 475 if (sc->sc_txpending != opending) { 476 /* 477 * We enqueued packets. If the transmitter was idle, 478 * reset the txdirty pointer. 479 */ 480 if (opending == 0) 481 sc->sc_txdirty = firsttx; 482 483 /* 484 * Cause a transmit interrupt to happen on the 485 * last packet we enqueued. 486 */ 487 EPIC_CDTX(sc, sc->sc_txlast)->et_control |= ET_TXCTL_IAF; 488 EPIC_CDTXSYNC(sc, sc->sc_txlast, 489 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 490 491 /* 492 * The entire packet chain is set up. Give the 493 * first descriptor to the EPIC now. 494 */ 495 EPIC_CDTX(sc, firsttx)->et_txstatus |= ET_TXSTAT_OWNER; 496 EPIC_CDTXSYNC(sc, firsttx, 497 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 498 499 /* Start the transmitter. */ 500 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND, 501 COMMAND_TXQUEUED); 502 503 /* Set a watchdog timer in case the chip flakes out. */ 504 ifp->if_timer = 5; 505 } 506 } 507 508 /* 509 * Watchdog timer handler. 510 * [ifnet interface function] 511 */ 512 void 513 epic_watchdog(struct ifnet *ifp) 514 { 515 struct epic_softc *sc = ifp->if_softc; 516 517 printf("%s: device timeout\n", sc->sc_dev.dv_xname); 518 ifp->if_oerrors++; 519 520 (void) epic_init(ifp); 521 } 522 523 /* 524 * Handle control requests from the operator. 525 * [ifnet interface function] 526 */ 527 int 528 epic_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 529 { 530 struct epic_softc *sc = ifp->if_softc; 531 struct ifaddr *ifa = (struct ifaddr *)data; 532 struct ifreq *ifr = (struct ifreq *)data; 533 int s, error = 0; 534 535 s = splnet(); 536 537 switch (cmd) { 538 case SIOCSIFADDR: 539 ifp->if_flags |= IFF_UP; 540 541 switch (ifa->ifa_addr->sa_family) { 542 #ifdef INET 543 case AF_INET: 544 epic_init(ifp); 545 arp_ifinit(&sc->sc_arpcom, ifa); 546 break; 547 #endif 548 default: 549 epic_init(ifp); 550 break; 551 } 552 break; 553 554 case SIOCSIFFLAGS: 555 /* 556 * If interface is marked up and not running, then start it. 557 * If it is marked down and running, stop it. 558 * XXX If it's up then re-initialize it. This is so flags 559 * such as IFF_PROMISC are handled. 560 */ 561 if (ifp->if_flags & IFF_UP) 562 epic_init(ifp); 563 else if (ifp->if_flags & IFF_RUNNING) 564 epic_stop(ifp, 1); 565 break; 566 567 case SIOCSIFMEDIA: 568 case SIOCGIFMEDIA: 569 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 570 break; 571 572 default: 573 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 574 } 575 576 if (error == ENETRESET) { 577 if (ifp->if_flags & IFF_RUNNING) { 578 mii_pollstat(&sc->sc_mii); 579 epic_set_mchash(sc); 580 } 581 error = 0; 582 } 583 584 splx(s); 585 return (error); 586 } 587 588 /* 589 * Interrupt handler. 590 */ 591 int 592 epic_intr(void *arg) 593 { 594 struct epic_softc *sc = arg; 595 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 596 struct epic_rxdesc *rxd; 597 struct epic_txdesc *txd; 598 struct epic_descsoft *ds; 599 struct mbuf *m; 600 u_int32_t intstat, rxstatus, txstatus; 601 int i, claimed = 0; 602 u_int len; 603 604 top: 605 /* 606 * Get the interrupt status from the EPIC. 607 */ 608 intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT); 609 if ((intstat & INTSTAT_INT_ACTV) == 0) 610 return (claimed); 611 612 claimed = 1; 613 614 /* 615 * Acknowledge the interrupt. 616 */ 617 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT, 618 intstat & INTMASK); 619 620 /* 621 * Check for receive interrupts. 622 */ 623 if (intstat & (INTSTAT_RCC | INTSTAT_RXE | INTSTAT_RQE)) { 624 for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) { 625 rxd = EPIC_CDRX(sc, i); 626 ds = EPIC_DSRX(sc, i); 627 628 EPIC_CDRXSYNC(sc, i, 629 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 630 631 rxstatus = rxd->er_rxstatus; 632 if (rxstatus & ER_RXSTAT_OWNER) { 633 /* 634 * We have processed all of the 635 * receive buffers. 636 */ 637 break; 638 } 639 640 /* 641 * Make sure the packet arrived intact. If an error 642 * occurred, update stats and reset the descriptor. 643 * The buffer will be reused the next time the 644 * descriptor comes up in the ring. 645 */ 646 if ((rxstatus & ER_RXSTAT_PKTINTACT) == 0) { 647 if (rxstatus & ER_RXSTAT_CRCERROR) 648 printf("%s: CRC error\n", 649 sc->sc_dev.dv_xname); 650 if (rxstatus & ER_RXSTAT_ALIGNERROR) 651 printf("%s: alignment error\n", 652 sc->sc_dev.dv_xname); 653 ifp->if_ierrors++; 654 EPIC_INIT_RXDESC(sc, i); 655 continue; 656 } 657 658 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 659 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 660 661 /* 662 * The EPIC includes the CRC with every packet; 663 * trim it. 664 */ 665 len = RXSTAT_RXLENGTH(rxstatus) - ETHER_CRC_LEN; 666 667 if (len < sizeof(struct ether_header)) { 668 /* 669 * Runt packet; drop it now. 670 */ 671 ifp->if_ierrors++; 672 EPIC_INIT_RXDESC(sc, i); 673 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 674 ds->ds_dmamap->dm_mapsize, 675 BUS_DMASYNC_PREREAD); 676 continue; 677 } 678 679 /* 680 * If the packet is small enough to fit in a 681 * single header mbuf, allocate one and copy 682 * the data into it. This greatly reduces 683 * memory consumption when we receive lots 684 * of small packets. 685 * 686 * Otherwise, we add a new buffer to the receive 687 * chain. If this fails, we drop the packet and 688 * recycle the old buffer. 689 */ 690 if (epic_copy_small != 0 && len <= MHLEN) { 691 MGETHDR(m, M_DONTWAIT, MT_DATA); 692 if (m == NULL) 693 goto dropit; 694 memcpy(mtod(m, caddr_t), 695 mtod(ds->ds_mbuf, caddr_t), len); 696 EPIC_INIT_RXDESC(sc, i); 697 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 698 ds->ds_dmamap->dm_mapsize, 699 BUS_DMASYNC_PREREAD); 700 } else { 701 m = ds->ds_mbuf; 702 if (epic_add_rxbuf(sc, i) != 0) { 703 dropit: 704 ifp->if_ierrors++; 705 EPIC_INIT_RXDESC(sc, i); 706 bus_dmamap_sync(sc->sc_dmat, 707 ds->ds_dmamap, 0, 708 ds->ds_dmamap->dm_mapsize, 709 BUS_DMASYNC_PREREAD); 710 continue; 711 } 712 } 713 714 m->m_pkthdr.rcvif = ifp; 715 m->m_pkthdr.len = m->m_len = len; 716 717 #if NBPFILTER > 0 718 /* 719 * Pass this up to any BPF listeners, but only 720 * pass it up the stack if its for us. 721 */ 722 if (ifp->if_bpf) 723 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 724 #endif 725 726 /* Pass it on. */ 727 ether_input_mbuf(ifp, m); 728 ifp->if_ipackets++; 729 } 730 731 /* Update the receive pointer. */ 732 sc->sc_rxptr = i; 733 734 /* 735 * Check for receive queue underflow. 736 */ 737 if (intstat & INTSTAT_RQE) { 738 printf("%s: receiver queue empty\n", 739 sc->sc_dev.dv_xname); 740 /* 741 * Ring is already built; just restart the 742 * receiver. 743 */ 744 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR, 745 EPIC_CDRXADDR(sc, sc->sc_rxptr)); 746 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND, 747 COMMAND_RXQUEUED | COMMAND_START_RX); 748 } 749 } 750 751 /* 752 * Check for transmission complete interrupts. 753 */ 754 if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) { 755 ifp->if_flags &= ~IFF_OACTIVE; 756 for (i = sc->sc_txdirty; sc->sc_txpending != 0; 757 i = EPIC_NEXTTX(i), sc->sc_txpending--) { 758 txd = EPIC_CDTX(sc, i); 759 ds = EPIC_DSTX(sc, i); 760 761 EPIC_CDTXSYNC(sc, i, 762 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 763 764 txstatus = txd->et_txstatus; 765 if (txstatus & ET_TXSTAT_OWNER) 766 break; 767 768 EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE); 769 770 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 771 0, ds->ds_dmamap->dm_mapsize, 772 BUS_DMASYNC_POSTWRITE); 773 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 774 m_freem(ds->ds_mbuf); 775 ds->ds_mbuf = NULL; 776 777 /* 778 * Check for errors and collisions. 779 */ 780 if ((txstatus & ET_TXSTAT_PACKETTX) == 0) 781 ifp->if_oerrors++; 782 else 783 ifp->if_opackets++; 784 ifp->if_collisions += 785 TXSTAT_COLLISIONS(txstatus); 786 if (txstatus & ET_TXSTAT_CARSENSELOST) 787 printf("%s: lost carrier\n", 788 sc->sc_dev.dv_xname); 789 } 790 791 /* Update the dirty transmit buffer pointer. */ 792 sc->sc_txdirty = i; 793 794 /* 795 * Cancel the watchdog timer if there are no pending 796 * transmissions. 797 */ 798 if (sc->sc_txpending == 0) 799 ifp->if_timer = 0; 800 801 /* 802 * Kick the transmitter after a DMA underrun. 803 */ 804 if (intstat & INTSTAT_TXU) { 805 printf("%s: transmit underrun\n", sc->sc_dev.dv_xname); 806 bus_space_write_4(sc->sc_st, sc->sc_sh, 807 EPIC_COMMAND, COMMAND_TXUGO); 808 if (sc->sc_txpending) 809 bus_space_write_4(sc->sc_st, sc->sc_sh, 810 EPIC_COMMAND, COMMAND_TXQUEUED); 811 } 812 813 /* 814 * Try to get more packets going. 815 */ 816 epic_start(ifp); 817 } 818 819 /* 820 * Check for fatal interrupts. 821 */ 822 if (intstat & INTSTAT_FATAL_INT) { 823 if (intstat & INTSTAT_PTA) 824 printf("%s: PCI target abort error\n", 825 sc->sc_dev.dv_xname); 826 else if (intstat & INTSTAT_PMA) 827 printf("%s: PCI master abort error\n", 828 sc->sc_dev.dv_xname); 829 else if (intstat & INTSTAT_APE) 830 printf("%s: PCI address parity error\n", 831 sc->sc_dev.dv_xname); 832 else if (intstat & INTSTAT_DPE) 833 printf("%s: PCI data parity error\n", 834 sc->sc_dev.dv_xname); 835 else 836 printf("%s: unknown fatal error\n", 837 sc->sc_dev.dv_xname); 838 (void) epic_init(ifp); 839 } 840 841 /* 842 * Check for more interrupts. 843 */ 844 goto top; 845 } 846 847 /* 848 * One second timer, used to tick the MII. 849 */ 850 void 851 epic_tick(void *arg) 852 { 853 struct epic_softc *sc = arg; 854 int s; 855 856 s = splnet(); 857 mii_tick(&sc->sc_mii); 858 splx(s); 859 860 timeout_add_sec(&sc->sc_mii_timeout, 1); 861 } 862 863 /* 864 * Fixup the clock source on the EPIC. 865 */ 866 void 867 epic_fixup_clock_source(struct epic_softc *sc) 868 { 869 int i; 870 871 /* 872 * According to SMC Application Note 7-15, the EPIC's clock 873 * source is incorrect following a reset. This manifests itself 874 * as failure to recognize when host software has written to 875 * a register on the EPIC. The appnote recommends issuing at 876 * least 16 consecutive writes to the CLOCK TEST bit to correctly 877 * configure the clock source. 878 */ 879 for (i = 0; i < 16; i++) 880 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TEST, 881 TEST_CLOCKTEST); 882 } 883 884 /* 885 * Perform a soft reset on the EPIC. 886 */ 887 void 888 epic_reset(struct epic_softc *sc) 889 { 890 891 epic_fixup_clock_source(sc); 892 893 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0); 894 delay(100); 895 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET); 896 delay(100); 897 898 epic_fixup_clock_source(sc); 899 } 900 901 /* 902 * Initialize the interface. Must be called at splnet(). 903 */ 904 int 905 epic_init(struct ifnet *ifp) 906 { 907 struct epic_softc *sc = ifp->if_softc; 908 bus_space_tag_t st = sc->sc_st; 909 bus_space_handle_t sh = sc->sc_sh; 910 struct epic_txdesc *txd; 911 struct epic_descsoft *ds; 912 u_int32_t genctl, reg0; 913 int i, error = 0; 914 915 /* 916 * Cancel any pending I/O. 917 */ 918 epic_stop(ifp, 0); 919 920 /* 921 * Reset the EPIC to a known state. 922 */ 923 epic_reset(sc); 924 925 /* 926 * Magical mystery initialization. 927 */ 928 bus_space_write_4(st, sh, EPIC_TXTEST, 0); 929 930 /* 931 * Initialize the EPIC genctl register: 932 * 933 * - 64 byte receive FIFO threshold 934 * - automatic advance to next receive frame 935 */ 936 genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY; 937 #if BYTE_ORDER == BIG_ENDIAN 938 genctl |= GENCTL_BIG_ENDIAN; 939 #endif 940 bus_space_write_4(st, sh, EPIC_GENCTL, genctl); 941 942 /* 943 * Reset the MII bus and PHY. 944 */ 945 reg0 = bus_space_read_4(st, sh, EPIC_NVCTL); 946 bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1); 947 bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER); 948 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY); 949 delay(100); 950 bus_space_write_4(st, sh, EPIC_GENCTL, genctl); 951 delay(1000); 952 bus_space_write_4(st, sh, EPIC_NVCTL, reg0); 953 954 /* 955 * Initialize Ethernet address. 956 */ 957 reg0 = sc->sc_arpcom.ac_enaddr[1] << 8 | sc->sc_arpcom.ac_enaddr[0]; 958 bus_space_write_4(st, sh, EPIC_LAN0, reg0); 959 reg0 = sc->sc_arpcom.ac_enaddr[3] << 8 | sc->sc_arpcom.ac_enaddr[2]; 960 bus_space_write_4(st, sh, EPIC_LAN1, reg0); 961 reg0 = sc->sc_arpcom.ac_enaddr[5] << 8 | sc->sc_arpcom.ac_enaddr[4]; 962 bus_space_write_4(st, sh, EPIC_LAN2, reg0); 963 964 /* 965 * Initialize receive control. Remember the external buffer 966 * size setting. 967 */ 968 reg0 = bus_space_read_4(st, sh, EPIC_RXCON) & 969 (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0); 970 reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST); 971 if (ifp->if_flags & IFF_PROMISC) 972 reg0 |= RXCON_PROMISCMODE; 973 bus_space_write_4(st, sh, EPIC_RXCON, reg0); 974 975 /* Set the current media. */ 976 epic_mediachange(ifp); 977 978 /* Set up the multicast hash table. */ 979 epic_set_mchash(sc); 980 981 /* 982 * Initialize the transmit descriptor ring. txlast is initialized 983 * to the end of the list so that it will wrap around to the first 984 * descriptor when the first packet is transmitted. 985 */ 986 for (i = 0; i < EPIC_NTXDESC; i++) { 987 txd = EPIC_CDTX(sc, i); 988 memset(txd, 0, sizeof(struct epic_txdesc)); 989 txd->et_bufaddr = EPIC_CDFLADDR(sc, i); 990 txd->et_nextdesc = EPIC_CDTXADDR(sc, EPIC_NEXTTX(i)); 991 EPIC_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 992 } 993 sc->sc_txpending = 0; 994 sc->sc_txdirty = 0; 995 sc->sc_txlast = EPIC_NTXDESC - 1; 996 997 /* 998 * Initialize the receive descriptor ring. 999 */ 1000 for (i = 0; i < EPIC_NRXDESC; i++) { 1001 ds = EPIC_DSRX(sc, i); 1002 if (ds->ds_mbuf == NULL) { 1003 if ((error = epic_add_rxbuf(sc, i)) != 0) { 1004 printf("%s: unable to allocate or map rx " 1005 "buffer %d error = %d\n", 1006 sc->sc_dev.dv_xname, i, error); 1007 /* 1008 * XXX Should attempt to run with fewer receive 1009 * XXX buffers instead of just failing. 1010 */ 1011 epic_rxdrain(sc); 1012 goto out; 1013 } 1014 } else 1015 EPIC_INIT_RXDESC(sc, i); 1016 } 1017 sc->sc_rxptr = 0; 1018 1019 /* 1020 * Initialize the interrupt mask and enable interrupts. 1021 */ 1022 bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK); 1023 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA); 1024 1025 /* 1026 * Give the transmit and receive rings to the EPIC. 1027 */ 1028 bus_space_write_4(st, sh, EPIC_PTCDAR, 1029 EPIC_CDTXADDR(sc, EPIC_NEXTTX(sc->sc_txlast))); 1030 bus_space_write_4(st, sh, EPIC_PRCDAR, 1031 EPIC_CDRXADDR(sc, sc->sc_rxptr)); 1032 1033 /* 1034 * Set the EPIC in motion. 1035 */ 1036 bus_space_write_4(st, sh, EPIC_COMMAND, 1037 COMMAND_RXQUEUED | COMMAND_START_RX); 1038 1039 /* 1040 * ...all done! 1041 */ 1042 ifp->if_flags |= IFF_RUNNING; 1043 ifp->if_flags &= ~IFF_OACTIVE; 1044 1045 /* 1046 * Start the one second clock. 1047 */ 1048 timeout_add_sec(&sc->sc_mii_timeout, 1); 1049 1050 /* 1051 * Attempt to start output on the interface. 1052 */ 1053 epic_start(ifp); 1054 1055 out: 1056 if (error) 1057 printf("%s: interface not running\n", sc->sc_dev.dv_xname); 1058 return (error); 1059 } 1060 1061 /* 1062 * Drain the receive queue. 1063 */ 1064 void 1065 epic_rxdrain(struct epic_softc *sc) 1066 { 1067 struct epic_descsoft *ds; 1068 int i; 1069 1070 for (i = 0; i < EPIC_NRXDESC; i++) { 1071 ds = EPIC_DSRX(sc, i); 1072 if (ds->ds_mbuf != NULL) { 1073 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1074 m_freem(ds->ds_mbuf); 1075 ds->ds_mbuf = NULL; 1076 } 1077 } 1078 } 1079 1080 /* 1081 * Stop transmission on the interface. 1082 */ 1083 void 1084 epic_stop(struct ifnet *ifp, int disable) 1085 { 1086 struct epic_softc *sc = ifp->if_softc; 1087 bus_space_tag_t st = sc->sc_st; 1088 bus_space_handle_t sh = sc->sc_sh; 1089 struct epic_descsoft *ds; 1090 u_int32_t reg; 1091 int i; 1092 1093 /* 1094 * Stop the one second clock. 1095 */ 1096 timeout_del(&sc->sc_mii_timeout); 1097 1098 /* 1099 * Mark the interface down and cancel the watchdog timer. 1100 */ 1101 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1102 ifp->if_timer = 0; 1103 1104 /* Down the MII. */ 1105 mii_down(&sc->sc_mii); 1106 1107 /* Paranoia... */ 1108 epic_fixup_clock_source(sc); 1109 1110 /* 1111 * Disable interrupts. 1112 */ 1113 reg = bus_space_read_4(st, sh, EPIC_GENCTL); 1114 bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA); 1115 bus_space_write_4(st, sh, EPIC_INTMASK, 0); 1116 1117 /* 1118 * Stop the DMA engine and take the receiver off-line. 1119 */ 1120 bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA | 1121 COMMAND_STOP_TDMA | COMMAND_STOP_RX); 1122 1123 /* 1124 * Release any queued transmit buffers. 1125 */ 1126 for (i = 0; i < EPIC_NTXDESC; i++) { 1127 ds = EPIC_DSTX(sc, i); 1128 if (ds->ds_mbuf != NULL) { 1129 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1130 m_freem(ds->ds_mbuf); 1131 ds->ds_mbuf = NULL; 1132 } 1133 } 1134 1135 if (disable) 1136 epic_rxdrain(sc); 1137 } 1138 1139 /* 1140 * Read the EPIC Serial EEPROM. 1141 */ 1142 void 1143 epic_read_eeprom(struct epic_softc *sc, int word, int wordcnt, u_int16_t *data) 1144 { 1145 bus_space_tag_t st = sc->sc_st; 1146 bus_space_handle_t sh = sc->sc_sh; 1147 u_int16_t reg; 1148 int i, x; 1149 1150 #define EEPROM_WAIT_READY(st, sh) \ 1151 while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \ 1152 /* nothing */ 1153 1154 /* 1155 * Enable the EEPROM. 1156 */ 1157 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE); 1158 EEPROM_WAIT_READY(st, sh); 1159 1160 for (i = 0; i < wordcnt; i++) { 1161 /* Send CHIP SELECT for one clock tick. */ 1162 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS); 1163 EEPROM_WAIT_READY(st, sh); 1164 1165 /* Shift in the READ opcode. */ 1166 for (x = 3; x > 0; x--) { 1167 reg = EECTL_ENABLE|EECTL_EECS; 1168 if (EPIC_EEPROM_OPC_READ & (1 << (x - 1))) 1169 reg |= EECTL_EEDI; 1170 bus_space_write_4(st, sh, EPIC_EECTL, reg); 1171 EEPROM_WAIT_READY(st, sh); 1172 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK); 1173 EEPROM_WAIT_READY(st, sh); 1174 bus_space_write_4(st, sh, EPIC_EECTL, reg); 1175 EEPROM_WAIT_READY(st, sh); 1176 } 1177 1178 /* Shift in address. */ 1179 for (x = 6; x > 0; x--) { 1180 reg = EECTL_ENABLE|EECTL_EECS; 1181 if ((word + i) & (1 << (x - 1))) 1182 reg |= EECTL_EEDI; 1183 bus_space_write_4(st, sh, EPIC_EECTL, reg); 1184 EEPROM_WAIT_READY(st, sh); 1185 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK); 1186 EEPROM_WAIT_READY(st, sh); 1187 bus_space_write_4(st, sh, EPIC_EECTL, reg); 1188 EEPROM_WAIT_READY(st, sh); 1189 } 1190 1191 /* Shift out data. */ 1192 reg = EECTL_ENABLE|EECTL_EECS; 1193 data[i] = 0; 1194 for (x = 16; x > 0; x--) { 1195 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK); 1196 EEPROM_WAIT_READY(st, sh); 1197 if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO) 1198 data[i] |= (1 << (x - 1)); 1199 bus_space_write_4(st, sh, EPIC_EECTL, reg); 1200 EEPROM_WAIT_READY(st, sh); 1201 } 1202 1203 /* Clear CHIP SELECT. */ 1204 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE); 1205 EEPROM_WAIT_READY(st, sh); 1206 } 1207 1208 /* 1209 * Disable the EEPROM. 1210 */ 1211 bus_space_write_4(st, sh, EPIC_EECTL, 0); 1212 1213 #undef EEPROM_WAIT_READY 1214 } 1215 1216 /* 1217 * Add a receive buffer to the indicated descriptor. 1218 */ 1219 int 1220 epic_add_rxbuf(struct epic_softc *sc, int idx) 1221 { 1222 struct epic_descsoft *ds = EPIC_DSRX(sc, idx); 1223 struct mbuf *m; 1224 int error; 1225 1226 MGETHDR(m, M_DONTWAIT, MT_DATA); 1227 if (m == NULL) 1228 return (ENOBUFS); 1229 1230 MCLGET(m, M_DONTWAIT); 1231 if ((m->m_flags & M_EXT) == 0) { 1232 m_freem(m); 1233 return (ENOBUFS); 1234 } 1235 1236 if (ds->ds_mbuf != NULL) 1237 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1238 1239 ds->ds_mbuf = m; 1240 1241 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap, 1242 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1243 BUS_DMA_READ|BUS_DMA_NOWAIT); 1244 if (error) { 1245 printf("%s: can't load rx DMA map %d, error = %d\n", 1246 sc->sc_dev.dv_xname, idx, error); 1247 panic("epic_add_rxbuf"); /* XXX */ 1248 } 1249 1250 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1251 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1252 1253 EPIC_INIT_RXDESC(sc, idx); 1254 1255 return (0); 1256 } 1257 1258 /* 1259 * Set the EPIC multicast hash table. 1260 * 1261 * NOTE: We rely on a recently-updated mii_media_active here! 1262 */ 1263 void 1264 epic_set_mchash(struct epic_softc *sc) 1265 { 1266 struct arpcom *ac = &sc->sc_arpcom; 1267 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1268 struct ether_multi *enm; 1269 struct ether_multistep step; 1270 u_int32_t hash, mchash[4]; 1271 1272 /* 1273 * Set up the multicast address filter by passing all multicast 1274 * addresses through a CRC generator, and then using the low-order 1275 * 6 bits as an index into the 64 bit multicast hash table (only 1276 * the lower 16 bits of each 32 bit multicast hash register are 1277 * valid). The high order bits select the register, while the 1278 * rest of the bits select the bit within the register. 1279 */ 1280 1281 if (ifp->if_flags & IFF_PROMISC) 1282 goto allmulti; 1283 1284 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_10_T) { 1285 /* XXX hardware bug in 10Mbps mode. */ 1286 goto allmulti; 1287 } 1288 1289 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0; 1290 1291 ETHER_FIRST_MULTI(step, ac, enm); 1292 while (enm != NULL) { 1293 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) 1294 goto allmulti; 1295 1296 hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 1297 hash >>= 26; 1298 1299 /* Set the corresponding bit in the hash table. */ 1300 mchash[hash >> 4] |= 1 << (hash & 0xf); 1301 1302 ETHER_NEXT_MULTI(step, enm); 1303 } 1304 1305 ifp->if_flags &= ~IFF_ALLMULTI; 1306 goto sethash; 1307 1308 allmulti: 1309 ifp->if_flags |= IFF_ALLMULTI; 1310 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff; 1311 1312 sethash: 1313 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]); 1314 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]); 1315 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]); 1316 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]); 1317 } 1318 1319 /* 1320 * Wait for the MII to become ready. 1321 */ 1322 int 1323 epic_mii_wait(struct epic_softc *sc, u_int32_t rw) 1324 { 1325 int i; 1326 1327 for (i = 0; i < 50; i++) { 1328 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL) & rw) 1329 == 0) 1330 break; 1331 delay(2); 1332 } 1333 if (i == 50) { 1334 printf("%s: MII timed out\n", sc->sc_dev.dv_xname); 1335 return (1); 1336 } 1337 1338 return (0); 1339 } 1340 1341 /* 1342 * Read from the MII. 1343 */ 1344 int 1345 epic_mii_read(struct device *self, int phy, int reg) 1346 { 1347 struct epic_softc *sc = (struct epic_softc *)self; 1348 1349 if (epic_mii_wait(sc, MMCTL_WRITE)) 1350 return (0); 1351 1352 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL, 1353 MMCTL_ARG(phy, reg, MMCTL_READ)); 1354 1355 if (epic_mii_wait(sc, MMCTL_READ)) 1356 return (0); 1357 1358 return (bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA) & 1359 MMDATA_MASK); 1360 } 1361 1362 /* 1363 * Write to the MII. 1364 */ 1365 void 1366 epic_mii_write(struct device *self, int phy, int reg, int val) 1367 { 1368 struct epic_softc *sc = (struct epic_softc *)self; 1369 1370 if (epic_mii_wait(sc, MMCTL_WRITE)) 1371 return; 1372 1373 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA, val); 1374 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL, 1375 MMCTL_ARG(phy, reg, MMCTL_WRITE)); 1376 } 1377 1378 /* 1379 * Callback from PHY when media changes. 1380 */ 1381 void 1382 epic_statchg(struct device *self) 1383 { 1384 struct epic_softc *sc = (struct epic_softc *)self; 1385 u_int32_t txcon, miicfg; 1386 1387 /* 1388 * Update loopback bits in TXCON to reflect duplex mode. 1389 */ 1390 txcon = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_TXCON); 1391 if (sc->sc_mii.mii_media_active & IFM_FDX) 1392 txcon |= (TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2); 1393 else 1394 txcon &= ~(TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2); 1395 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TXCON, txcon); 1396 1397 /* On some cards we need manualy set fullduplex led */ 1398 if (sc->sc_hwflags & EPIC_DUPLEXLED_ON_694) { 1399 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG); 1400 if (IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) 1401 miicfg |= MIICFG_ENABLE; 1402 else 1403 miicfg &= ~MIICFG_ENABLE; 1404 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg); 1405 } 1406 1407 /* 1408 * There is a multicast filter bug in 10Mbps mode. Kick the 1409 * multicast filter in case the speed changed. 1410 */ 1411 epic_set_mchash(sc); 1412 } 1413 1414 /* 1415 * Callback from ifmedia to request current media status. 1416 */ 1417 void 1418 epic_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1419 { 1420 struct epic_softc *sc = ifp->if_softc; 1421 1422 mii_pollstat(&sc->sc_mii); 1423 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1424 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1425 } 1426 1427 /* 1428 * Callback from ifmedia to request new media setting. 1429 */ 1430 int 1431 epic_mediachange(struct ifnet *ifp) 1432 { 1433 struct epic_softc *sc = ifp->if_softc; 1434 struct mii_data *mii = &sc->sc_mii; 1435 struct ifmedia *ifm = &mii->mii_media; 1436 int media = ifm->ifm_cur->ifm_media; 1437 u_int32_t miicfg; 1438 struct mii_softc *miisc; 1439 int cfg; 1440 1441 if (!(ifp->if_flags & IFF_UP)) 1442 return (0); 1443 1444 if (IFM_INST(media) != sc->sc_serinst) { 1445 /* If we're not selecting serial interface, select MII mode */ 1446 #ifdef EPICMEDIADEBUG 1447 printf("%s: parallel mode\n", ifp->if_xname); 1448 #endif 1449 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG); 1450 miicfg &= ~MIICFG_SERMODEENA; 1451 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg); 1452 } 1453 1454 mii_mediachg(mii); 1455 1456 if (IFM_INST(media) == sc->sc_serinst) { 1457 /* select serial interface */ 1458 #ifdef EPICMEDIADEBUG 1459 printf("%s: serial mode\n", ifp->if_xname); 1460 #endif 1461 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG); 1462 miicfg |= (MIICFG_SERMODEENA | MIICFG_ENABLE); 1463 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg); 1464 1465 /* There is no driver to fill this */ 1466 mii->mii_media_active = media; 1467 mii->mii_media_status = 0; 1468 1469 epic_statchg(&sc->sc_dev); 1470 return (0); 1471 } 1472 1473 /* Lookup selected PHY */ 1474 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 1475 miisc = LIST_NEXT(miisc, mii_list)) { 1476 if (IFM_INST(media) == miisc->mii_inst) 1477 break; 1478 } 1479 if (!miisc) { 1480 printf("epic_mediachange: can't happen\n"); /* ??? panic */ 1481 return (0); 1482 } 1483 #ifdef EPICMEDIADEBUG 1484 printf("%s: using phy %s\n", ifp->if_xname, 1485 miisc->mii_dev.dv_xname); 1486 #endif 1487 1488 if (miisc->mii_flags & MIIF_HAVEFIBER) { 1489 /* XXX XXX assume it's a Level1 - should check */ 1490 1491 /* We have to powerup fiber transceivers */ 1492 cfg = PHY_READ(miisc, MII_LXTPHY_CONFIG); 1493 if (IFM_SUBTYPE(media) == IFM_100_FX) { 1494 #ifdef EPICMEDIADEBUG 1495 printf("%s: power up fiber\n", ifp->if_xname); 1496 #endif 1497 cfg |= (CONFIG_LEDC1 | CONFIG_LEDC0); 1498 } else { 1499 #ifdef EPICMEDIADEBUG 1500 printf("%s: power down fiber\n", ifp->if_xname); 1501 #endif 1502 cfg &= ~(CONFIG_LEDC1 | CONFIG_LEDC0); 1503 } 1504 PHY_WRITE(miisc, MII_LXTPHY_CONFIG, cfg); 1505 } 1506 1507 return (0); 1508 } 1509