1 /* $NetBSD: smc83c170.c,v 1.53 2003/01/31 00:26:31 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Device driver for the Standard Microsystems Corp. 83C170 42 * Ethernet PCI Integrated Controller (EPIC/100). 43 */ 44 45 #include <sys/cdefs.h> 46 __KERNEL_RCSID(0, "$NetBSD: smc83c170.c,v 1.53 2003/01/31 00:26:31 thorpej Exp $"); 47 48 #include "bpfilter.h" 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/callout.h> 53 #include <sys/mbuf.h> 54 #include <sys/malloc.h> 55 #include <sys/kernel.h> 56 #include <sys/socket.h> 57 #include <sys/ioctl.h> 58 #include <sys/errno.h> 59 #include <sys/device.h> 60 61 #include <uvm/uvm_extern.h> 62 63 #include <net/if.h> 64 #include <net/if_dl.h> 65 #include <net/if_media.h> 66 #include <net/if_ether.h> 67 68 #if NBPFILTER > 0 69 #include <net/bpf.h> 70 #endif 71 72 #include <machine/bus.h> 73 #include <machine/intr.h> 74 75 #include <dev/mii/miivar.h> 76 #include <dev/mii/lxtphyreg.h> 77 78 #include <dev/ic/smc83c170reg.h> 79 #include <dev/ic/smc83c170var.h> 80 81 void epic_start __P((struct ifnet *)); 82 void epic_watchdog __P((struct ifnet *)); 83 int epic_ioctl __P((struct ifnet *, u_long, caddr_t)); 84 int epic_init __P((struct ifnet *)); 85 void epic_stop __P((struct ifnet *, int)); 86 87 void epic_shutdown __P((void *)); 88 89 void epic_reset __P((struct epic_softc *)); 90 void epic_rxdrain __P((struct epic_softc *)); 91 int epic_add_rxbuf __P((struct epic_softc *, int)); 92 void epic_read_eeprom __P((struct epic_softc *, int, int, u_int16_t *)); 93 void epic_set_mchash __P((struct epic_softc *)); 94 void epic_fixup_clock_source __P((struct epic_softc *)); 95 int epic_mii_read __P((struct device *, int, int)); 96 void epic_mii_write __P((struct device *, int, int, int)); 97 int epic_mii_wait __P((struct epic_softc *, u_int32_t)); 98 void epic_tick __P((void *)); 99 100 void epic_statchg __P((struct device *)); 101 int epic_mediachange __P((struct ifnet *)); 102 void epic_mediastatus __P((struct ifnet *, struct ifmediareq *)); 103 104 #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \ 105 INTSTAT_TXC | INTSTAT_RXE | INTSTAT_RQE | INTSTAT_RCC) 106 107 int epic_copy_small = 0; 108 109 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) 110 111 /* 112 * Attach an EPIC interface to the system. 113 */ 114 void 115 epic_attach(sc) 116 struct epic_softc *sc; 117 { 118 bus_space_tag_t st = sc->sc_st; 119 bus_space_handle_t sh = sc->sc_sh; 120 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 121 int rseg, error, miiflags; 122 u_int i; 123 bus_dma_segment_t seg; 124 u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1]; 125 u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6]; 126 char *nullbuf; 127 128 callout_init(&sc->sc_mii_callout); 129 130 /* 131 * Allocate the control data structures, and create and load the 132 * DMA map for it. 133 */ 134 if ((error = bus_dmamem_alloc(sc->sc_dmat, 135 sizeof(struct epic_control_data) + ETHER_PAD_LEN, PAGE_SIZE, 0, 136 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 137 aprint_error( 138 "%s: unable to allocate control data, error = %d\n", 139 sc->sc_dev.dv_xname, error); 140 goto fail_0; 141 } 142 143 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 144 sizeof(struct epic_control_data) + ETHER_PAD_LEN, 145 (caddr_t *)&sc->sc_control_data, 146 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 147 aprint_error("%s: unable to map control data, error = %d\n", 148 sc->sc_dev.dv_xname, error); 149 goto fail_1; 150 } 151 nullbuf = 152 (char *)sc->sc_control_data + sizeof(struct epic_control_data); 153 memset(nullbuf, 0, ETHER_PAD_LEN); 154 155 if ((error = bus_dmamap_create(sc->sc_dmat, 156 sizeof(struct epic_control_data), 1, 157 sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT, 158 &sc->sc_cddmamap)) != 0) { 159 aprint_error("%s: unable to create control data DMA map, " 160 "error = %d\n", sc->sc_dev.dv_xname, error); 161 goto fail_2; 162 } 163 164 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 165 sc->sc_control_data, sizeof(struct epic_control_data), NULL, 166 BUS_DMA_NOWAIT)) != 0) { 167 aprint_error( 168 "%s: unable to load control data DMA map, error = %d\n", 169 sc->sc_dev.dv_xname, error); 170 goto fail_3; 171 } 172 173 /* 174 * Create the transmit buffer DMA maps. 175 */ 176 for (i = 0; i < EPIC_NTXDESC; i++) { 177 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 178 EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 179 &EPIC_DSTX(sc, i)->ds_dmamap)) != 0) { 180 aprint_error("%s: unable to create tx DMA map %d, " 181 "error = %d\n", sc->sc_dev.dv_xname, i, error); 182 goto fail_4; 183 } 184 } 185 186 /* 187 * Create the receive buffer DMA maps. 188 */ 189 for (i = 0; i < EPIC_NRXDESC; i++) { 190 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 191 MCLBYTES, 0, BUS_DMA_NOWAIT, 192 &EPIC_DSRX(sc, i)->ds_dmamap)) != 0) { 193 aprint_error("%s: unable to create rx DMA map %d, " 194 "error = %d\n", sc->sc_dev.dv_xname, i, error); 195 goto fail_5; 196 } 197 EPIC_DSRX(sc, i)->ds_mbuf = NULL; 198 } 199 200 /* 201 * create and map the pad buffer 202 */ 203 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1, 204 ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) { 205 printf("%s: unable to create pad buffer DMA map, " 206 "error = %d\n", sc->sc_dev.dv_xname, error); 207 goto fail_5; 208 } 209 210 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap, 211 nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) { 212 printf("%s: unable to load pad buffer DMA map, " 213 "error = %d\n", sc->sc_dev.dv_xname, error); 214 goto fail_6; 215 } 216 bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN, 217 BUS_DMASYNC_PREWRITE); 218 219 /* 220 * Bring the chip out of low-power mode and reset it to a known state. 221 */ 222 bus_space_write_4(st, sh, EPIC_GENCTL, 0); 223 epic_reset(sc); 224 225 /* 226 * Read the Ethernet address from the EEPROM. 227 */ 228 epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea); 229 for (i = 0; i < sizeof(myea)/ sizeof(myea[0]); i++) { 230 enaddr[i * 2] = myea[i] & 0xff; 231 enaddr[i * 2 + 1] = myea[i] >> 8; 232 } 233 234 /* 235 * ...and the device name. 236 */ 237 epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])), 238 mydevname); 239 for (i = 0; i < sizeof(mydevname) / sizeof(mydevname[0]); i++) { 240 devname[i * 2] = mydevname[i] & 0xff; 241 devname[i * 2 + 1] = mydevname[i] >> 8; 242 } 243 244 devname[sizeof(mydevname)] = '\0'; 245 for (i = sizeof(mydevname) - 1; i >= 0; i--) { 246 if (devname[i] == ' ') 247 devname[i] = '\0'; 248 else 249 break; 250 } 251 252 aprint_normal("%s: %s, Ethernet address %s\n", sc->sc_dev.dv_xname, 253 devname, ether_sprintf(enaddr)); 254 255 miiflags = 0; 256 if (sc->sc_hwflags & EPIC_HAS_MII_FIBER) 257 miiflags |= MIIF_HAVEFIBER; 258 259 /* 260 * Initialize our media structures and probe the MII. 261 */ 262 sc->sc_mii.mii_ifp = ifp; 263 sc->sc_mii.mii_readreg = epic_mii_read; 264 sc->sc_mii.mii_writereg = epic_mii_write; 265 sc->sc_mii.mii_statchg = epic_statchg; 266 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, epic_mediachange, 267 epic_mediastatus); 268 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 269 MII_OFFSET_ANY, miiflags); 270 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 271 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 272 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 273 } else 274 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 275 276 if (sc->sc_hwflags & EPIC_HAS_BNC) { 277 /* use the next free media instance */ 278 sc->sc_serinst = sc->sc_mii.mii_instance++; 279 ifmedia_add(&sc->sc_mii.mii_media, 280 IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0, 281 sc->sc_serinst), 282 0, NULL); 283 aprint_normal("%s: 10base2/BNC\n", sc->sc_dev.dv_xname); 284 } else 285 sc->sc_serinst = -1; 286 287 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 288 ifp->if_softc = sc; 289 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 290 ifp->if_ioctl = epic_ioctl; 291 ifp->if_start = epic_start; 292 ifp->if_watchdog = epic_watchdog; 293 ifp->if_init = epic_init; 294 ifp->if_stop = epic_stop; 295 IFQ_SET_READY(&ifp->if_snd); 296 297 /* 298 * We can support 802.1Q VLAN-sized frames. 299 */ 300 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 301 302 /* 303 * Attach the interface. 304 */ 305 if_attach(ifp); 306 ether_ifattach(ifp, enaddr); 307 308 /* 309 * Make sure the interface is shutdown during reboot. 310 */ 311 sc->sc_sdhook = shutdownhook_establish(epic_shutdown, sc); 312 if (sc->sc_sdhook == NULL) 313 aprint_error("%s: WARNING: unable to establish shutdown hook\n", 314 sc->sc_dev.dv_xname); 315 return; 316 317 /* 318 * Free any resources we've allocated during the failed attach 319 * attempt. Do this in reverse order and fall through. 320 */ 321 fail_6: 322 bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap); 323 fail_5: 324 for (i = 0; i < EPIC_NRXDESC; i++) { 325 if (EPIC_DSRX(sc, i)->ds_dmamap != NULL) 326 bus_dmamap_destroy(sc->sc_dmat, 327 EPIC_DSRX(sc, i)->ds_dmamap); 328 } 329 fail_4: 330 for (i = 0; i < EPIC_NTXDESC; i++) { 331 if (EPIC_DSTX(sc, i)->ds_dmamap != NULL) 332 bus_dmamap_destroy(sc->sc_dmat, 333 EPIC_DSTX(sc, i)->ds_dmamap); 334 } 335 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 336 fail_3: 337 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 338 fail_2: 339 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data, 340 sizeof(struct epic_control_data)); 341 fail_1: 342 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 343 fail_0: 344 return; 345 } 346 347 /* 348 * Shutdown hook. Make sure the interface is stopped at reboot. 349 */ 350 void 351 epic_shutdown(arg) 352 void *arg; 353 { 354 struct epic_softc *sc = arg; 355 356 epic_stop(&sc->sc_ethercom.ec_if, 1); 357 } 358 359 /* 360 * Start packet transmission on the interface. 361 * [ifnet interface function] 362 */ 363 void 364 epic_start(ifp) 365 struct ifnet *ifp; 366 { 367 struct epic_softc *sc = ifp->if_softc; 368 struct mbuf *m0, *m; 369 struct epic_txdesc *txd; 370 struct epic_descsoft *ds; 371 struct epic_fraglist *fr; 372 bus_dmamap_t dmamap; 373 int error, firsttx, nexttx, opending, seg; 374 375 /* 376 * Remember the previous txpending and the first transmit 377 * descriptor we use. 378 */ 379 opending = sc->sc_txpending; 380 firsttx = EPIC_NEXTTX(sc->sc_txlast); 381 382 /* 383 * Loop through the send queue, setting up transmit descriptors 384 * until we drain the queue, or use up all available transmit 385 * descriptors. 386 */ 387 while (sc->sc_txpending < EPIC_NTXDESC) { 388 /* 389 * Grab a packet off the queue. 390 */ 391 IFQ_POLL(&ifp->if_snd, m0); 392 if (m0 == NULL) 393 break; 394 m = NULL; 395 396 /* 397 * Get the last and next available transmit descriptor. 398 */ 399 nexttx = EPIC_NEXTTX(sc->sc_txlast); 400 txd = EPIC_CDTX(sc, nexttx); 401 fr = EPIC_CDFL(sc, nexttx); 402 ds = EPIC_DSTX(sc, nexttx); 403 dmamap = ds->ds_dmamap; 404 405 /* 406 * Load the DMA map. If this fails, the packet either 407 * didn't fit in the alloted number of frags, or we were 408 * short on resources. In this case, we'll copy and try 409 * again. 410 */ 411 if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 412 BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0 || 413 (m0->m_pkthdr.len < ETHER_PAD_LEN && 414 dmamap-> dm_nsegs == EPIC_NFRAGS)) { 415 if (error == 0) 416 bus_dmamap_unload(sc->sc_dmat, dmamap); 417 418 MGETHDR(m, M_DONTWAIT, MT_DATA); 419 if (m == NULL) { 420 printf("%s: unable to allocate Tx mbuf\n", 421 sc->sc_dev.dv_xname); 422 break; 423 } 424 if (m0->m_pkthdr.len > MHLEN) { 425 MCLGET(m, M_DONTWAIT); 426 if ((m->m_flags & M_EXT) == 0) { 427 printf("%s: unable to allocate Tx " 428 "cluster\n", sc->sc_dev.dv_xname); 429 m_freem(m); 430 break; 431 } 432 } 433 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 434 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 435 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 436 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 437 if (error) { 438 printf("%s: unable to load Tx buffer, " 439 "error = %d\n", sc->sc_dev.dv_xname, error); 440 break; 441 } 442 } 443 IFQ_DEQUEUE(&ifp->if_snd, m0); 444 if (m != NULL) { 445 m_freem(m0); 446 m0 = m; 447 } 448 449 /* Initialize the fraglist. */ 450 for (seg = 0; seg < dmamap->dm_nsegs; seg++) { 451 fr->ef_frags[seg].ef_addr = 452 dmamap->dm_segs[seg].ds_addr; 453 fr->ef_frags[seg].ef_length = 454 dmamap->dm_segs[seg].ds_len; 455 } 456 if (m0->m_pkthdr.len < ETHER_PAD_LEN) { 457 fr->ef_frags[seg].ef_addr = sc->sc_nulldma; 458 fr->ef_frags[seg].ef_length = 459 ETHER_PAD_LEN - m0->m_pkthdr.len; 460 seg++; 461 } 462 fr->ef_nfrags = seg; 463 464 EPIC_CDFLSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE); 465 466 /* Sync the DMA map. */ 467 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 468 BUS_DMASYNC_PREWRITE); 469 470 /* 471 * Store a pointer to the packet so we can free it later. 472 */ 473 ds->ds_mbuf = m0; 474 475 /* 476 * Fill in the transmit descriptor. 477 */ 478 txd->et_control = ET_TXCTL_LASTDESC | ET_TXCTL_FRAGLIST; 479 txd->et_txlength = max(m0->m_pkthdr.len, ETHER_PAD_LEN); 480 481 /* 482 * If this is the first descriptor we're enqueueing, 483 * don't give it to the EPIC yet. That could cause 484 * a race condition. We'll do it below. 485 */ 486 if (nexttx == firsttx) 487 txd->et_txstatus = 0; 488 else 489 txd->et_txstatus = ET_TXSTAT_OWNER; 490 491 EPIC_CDTXSYNC(sc, nexttx, 492 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 493 494 /* Advance the tx pointer. */ 495 sc->sc_txpending++; 496 sc->sc_txlast = nexttx; 497 498 #if NBPFILTER > 0 499 /* 500 * Pass the packet to any BPF listeners. 501 */ 502 if (ifp->if_bpf) 503 bpf_mtap(ifp->if_bpf, m0); 504 #endif 505 } 506 507 if (sc->sc_txpending == EPIC_NTXDESC) { 508 /* No more slots left; notify upper layer. */ 509 ifp->if_flags |= IFF_OACTIVE; 510 } 511 512 if (sc->sc_txpending != opending) { 513 /* 514 * We enqueued packets. If the transmitter was idle, 515 * reset the txdirty pointer. 516 */ 517 if (opending == 0) 518 sc->sc_txdirty = firsttx; 519 520 /* 521 * Cause a transmit interrupt to happen on the 522 * last packet we enqueued. 523 */ 524 EPIC_CDTX(sc, sc->sc_txlast)->et_control |= ET_TXCTL_IAF; 525 EPIC_CDTXSYNC(sc, sc->sc_txlast, 526 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 527 528 /* 529 * The entire packet chain is set up. Give the 530 * first descriptor to the EPIC now. 531 */ 532 EPIC_CDTX(sc, firsttx)->et_txstatus = ET_TXSTAT_OWNER; 533 EPIC_CDTXSYNC(sc, firsttx, 534 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 535 536 /* Start the transmitter. */ 537 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND, 538 COMMAND_TXQUEUED); 539 540 /* Set a watchdog timer in case the chip flakes out. */ 541 ifp->if_timer = 5; 542 } 543 } 544 545 /* 546 * Watchdog timer handler. 547 * [ifnet interface function] 548 */ 549 void 550 epic_watchdog(ifp) 551 struct ifnet *ifp; 552 { 553 struct epic_softc *sc = ifp->if_softc; 554 555 printf("%s: device timeout\n", sc->sc_dev.dv_xname); 556 ifp->if_oerrors++; 557 558 (void) epic_init(ifp); 559 } 560 561 /* 562 * Handle control requests from the operator. 563 * [ifnet interface function] 564 */ 565 int 566 epic_ioctl(ifp, cmd, data) 567 struct ifnet *ifp; 568 u_long cmd; 569 caddr_t data; 570 { 571 struct epic_softc *sc = ifp->if_softc; 572 struct ifreq *ifr = (struct ifreq *)data; 573 int s, error; 574 575 s = splnet(); 576 577 switch (cmd) { 578 case SIOCSIFMEDIA: 579 case SIOCGIFMEDIA: 580 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 581 break; 582 583 default: 584 error = ether_ioctl(ifp, cmd, data); 585 if (error == ENETRESET) { 586 /* 587 * Multicast list has changed; set the hardware filter 588 * accordingly. Update our idea of the current media; 589 * epic_set_mchash() needs to know what it is. 590 */ 591 mii_pollstat(&sc->sc_mii); 592 epic_set_mchash(sc); 593 error = 0; 594 } 595 break; 596 } 597 598 splx(s); 599 return (error); 600 } 601 602 /* 603 * Interrupt handler. 604 */ 605 int 606 epic_intr(arg) 607 void *arg; 608 { 609 struct epic_softc *sc = arg; 610 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 611 struct epic_rxdesc *rxd; 612 struct epic_txdesc *txd; 613 struct epic_descsoft *ds; 614 struct mbuf *m; 615 u_int32_t intstat; 616 int i, claimed = 0; 617 u_int len; 618 619 top: 620 /* 621 * Get the interrupt status from the EPIC. 622 */ 623 intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT); 624 if ((intstat & INTSTAT_INT_ACTV) == 0) 625 return (claimed); 626 627 claimed = 1; 628 629 /* 630 * Acknowledge the interrupt. 631 */ 632 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT, 633 intstat & INTMASK); 634 635 /* 636 * Check for receive interrupts. 637 */ 638 if (intstat & (INTSTAT_RCC | INTSTAT_RXE | INTSTAT_RQE)) { 639 for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) { 640 rxd = EPIC_CDRX(sc, i); 641 ds = EPIC_DSRX(sc, i); 642 643 EPIC_CDRXSYNC(sc, i, 644 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 645 646 if (rxd->er_rxstatus & ER_RXSTAT_OWNER) { 647 /* 648 * We have processed all of the 649 * receive buffers. 650 */ 651 break; 652 } 653 654 /* 655 * Make sure the packet arrived intact. If an error 656 * occurred, update stats and reset the descriptor. 657 * The buffer will be reused the next time the 658 * descriptor comes up in the ring. 659 */ 660 if ((rxd->er_rxstatus & ER_RXSTAT_PKTINTACT) == 0) { 661 if (rxd->er_rxstatus & ER_RXSTAT_CRCERROR) 662 printf("%s: CRC error\n", 663 sc->sc_dev.dv_xname); 664 if (rxd->er_rxstatus & ER_RXSTAT_ALIGNERROR) 665 printf("%s: alignment error\n", 666 sc->sc_dev.dv_xname); 667 ifp->if_ierrors++; 668 EPIC_INIT_RXDESC(sc, i); 669 continue; 670 } 671 672 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 673 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 674 675 /* 676 * The EPIC includes the CRC with every packet. 677 */ 678 len = rxd->er_rxlength; 679 680 if (len < sizeof(struct ether_header)) { 681 /* 682 * Runt packet; drop it now. 683 */ 684 ifp->if_ierrors++; 685 EPIC_INIT_RXDESC(sc, i); 686 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 687 ds->ds_dmamap->dm_mapsize, 688 BUS_DMASYNC_PREREAD); 689 continue; 690 } 691 692 /* 693 * If the packet is small enough to fit in a 694 * single header mbuf, allocate one and copy 695 * the data into it. This greatly reduces 696 * memory consumption when we receive lots 697 * of small packets. 698 * 699 * Otherwise, we add a new buffer to the receive 700 * chain. If this fails, we drop the packet and 701 * recycle the old buffer. 702 */ 703 if (epic_copy_small != 0 && len <= MHLEN) { 704 MGETHDR(m, M_DONTWAIT, MT_DATA); 705 if (m == NULL) 706 goto dropit; 707 memcpy(mtod(m, caddr_t), 708 mtod(ds->ds_mbuf, caddr_t), len); 709 EPIC_INIT_RXDESC(sc, i); 710 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 711 ds->ds_dmamap->dm_mapsize, 712 BUS_DMASYNC_PREREAD); 713 } else { 714 m = ds->ds_mbuf; 715 if (epic_add_rxbuf(sc, i) != 0) { 716 dropit: 717 ifp->if_ierrors++; 718 EPIC_INIT_RXDESC(sc, i); 719 bus_dmamap_sync(sc->sc_dmat, 720 ds->ds_dmamap, 0, 721 ds->ds_dmamap->dm_mapsize, 722 BUS_DMASYNC_PREREAD); 723 continue; 724 } 725 } 726 727 m->m_flags |= M_HASFCS; 728 m->m_pkthdr.rcvif = ifp; 729 m->m_pkthdr.len = m->m_len = len; 730 731 #if NBPFILTER > 0 732 /* 733 * Pass this up to any BPF listeners, but only 734 * pass it up the stack if its for us. 735 */ 736 if (ifp->if_bpf) 737 bpf_mtap(ifp->if_bpf, m); 738 #endif 739 740 /* Pass it on. */ 741 (*ifp->if_input)(ifp, m); 742 ifp->if_ipackets++; 743 } 744 745 /* Update the receive pointer. */ 746 sc->sc_rxptr = i; 747 748 /* 749 * Check for receive queue underflow. 750 */ 751 if (intstat & INTSTAT_RQE) { 752 printf("%s: receiver queue empty\n", 753 sc->sc_dev.dv_xname); 754 /* 755 * Ring is already built; just restart the 756 * receiver. 757 */ 758 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR, 759 EPIC_CDRXADDR(sc, sc->sc_rxptr)); 760 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND, 761 COMMAND_RXQUEUED | COMMAND_START_RX); 762 } 763 } 764 765 /* 766 * Check for transmission complete interrupts. 767 */ 768 if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) { 769 ifp->if_flags &= ~IFF_OACTIVE; 770 for (i = sc->sc_txdirty; sc->sc_txpending != 0; 771 i = EPIC_NEXTTX(i), sc->sc_txpending--) { 772 txd = EPIC_CDTX(sc, i); 773 ds = EPIC_DSTX(sc, i); 774 775 EPIC_CDTXSYNC(sc, i, 776 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 777 778 if (txd->et_txstatus & ET_TXSTAT_OWNER) 779 break; 780 781 EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE); 782 783 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 784 0, ds->ds_dmamap->dm_mapsize, 785 BUS_DMASYNC_POSTWRITE); 786 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 787 m_freem(ds->ds_mbuf); 788 ds->ds_mbuf = NULL; 789 790 /* 791 * Check for errors and collisions. 792 */ 793 if ((txd->et_txstatus & ET_TXSTAT_PACKETTX) == 0) 794 ifp->if_oerrors++; 795 else 796 ifp->if_opackets++; 797 ifp->if_collisions += 798 TXSTAT_COLLISIONS(txd->et_txstatus); 799 if (txd->et_txstatus & ET_TXSTAT_CARSENSELOST) 800 printf("%s: lost carrier\n", 801 sc->sc_dev.dv_xname); 802 } 803 804 /* Update the dirty transmit buffer pointer. */ 805 sc->sc_txdirty = i; 806 807 /* 808 * Cancel the watchdog timer if there are no pending 809 * transmissions. 810 */ 811 if (sc->sc_txpending == 0) 812 ifp->if_timer = 0; 813 814 /* 815 * Kick the transmitter after a DMA underrun. 816 */ 817 if (intstat & INTSTAT_TXU) { 818 printf("%s: transmit underrun\n", sc->sc_dev.dv_xname); 819 bus_space_write_4(sc->sc_st, sc->sc_sh, 820 EPIC_COMMAND, COMMAND_TXUGO); 821 if (sc->sc_txpending) 822 bus_space_write_4(sc->sc_st, sc->sc_sh, 823 EPIC_COMMAND, COMMAND_TXQUEUED); 824 } 825 826 /* 827 * Try to get more packets going. 828 */ 829 epic_start(ifp); 830 } 831 832 /* 833 * Check for fatal interrupts. 834 */ 835 if (intstat & INTSTAT_FATAL_INT) { 836 if (intstat & INTSTAT_PTA) 837 printf("%s: PCI target abort error\n", 838 sc->sc_dev.dv_xname); 839 else if (intstat & INTSTAT_PMA) 840 printf("%s: PCI master abort error\n", 841 sc->sc_dev.dv_xname); 842 else if (intstat & INTSTAT_APE) 843 printf("%s: PCI address parity error\n", 844 sc->sc_dev.dv_xname); 845 else if (intstat & INTSTAT_DPE) 846 printf("%s: PCI data parity error\n", 847 sc->sc_dev.dv_xname); 848 else 849 printf("%s: unknown fatal error\n", 850 sc->sc_dev.dv_xname); 851 (void) epic_init(ifp); 852 } 853 854 /* 855 * Check for more interrupts. 856 */ 857 goto top; 858 } 859 860 /* 861 * One second timer, used to tick the MII. 862 */ 863 void 864 epic_tick(arg) 865 void *arg; 866 { 867 struct epic_softc *sc = arg; 868 int s; 869 870 s = splnet(); 871 mii_tick(&sc->sc_mii); 872 splx(s); 873 874 callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc); 875 } 876 877 /* 878 * Fixup the clock source on the EPIC. 879 */ 880 void 881 epic_fixup_clock_source(sc) 882 struct epic_softc *sc; 883 { 884 int i; 885 886 /* 887 * According to SMC Application Note 7-15, the EPIC's clock 888 * source is incorrect following a reset. This manifests itself 889 * as failure to recognize when host software has written to 890 * a register on the EPIC. The appnote recommends issuing at 891 * least 16 consecutive writes to the CLOCK TEST bit to correctly 892 * configure the clock source. 893 */ 894 for (i = 0; i < 16; i++) 895 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TEST, 896 TEST_CLOCKTEST); 897 } 898 899 /* 900 * Perform a soft reset on the EPIC. 901 */ 902 void 903 epic_reset(sc) 904 struct epic_softc *sc; 905 { 906 907 epic_fixup_clock_source(sc); 908 909 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0); 910 delay(100); 911 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET); 912 delay(100); 913 914 epic_fixup_clock_source(sc); 915 } 916 917 /* 918 * Initialize the interface. Must be called at splnet(). 919 */ 920 int 921 epic_init(ifp) 922 struct ifnet *ifp; 923 { 924 struct epic_softc *sc = ifp->if_softc; 925 bus_space_tag_t st = sc->sc_st; 926 bus_space_handle_t sh = sc->sc_sh; 927 u_int8_t *enaddr = LLADDR(ifp->if_sadl); 928 struct epic_txdesc *txd; 929 struct epic_descsoft *ds; 930 u_int32_t genctl, reg0; 931 int i, error = 0; 932 933 /* 934 * Cancel any pending I/O. 935 */ 936 epic_stop(ifp, 0); 937 938 /* 939 * Reset the EPIC to a known state. 940 */ 941 epic_reset(sc); 942 943 /* 944 * Magical mystery initialization. 945 */ 946 bus_space_write_4(st, sh, EPIC_TXTEST, 0); 947 948 /* 949 * Initialize the EPIC genctl register: 950 * 951 * - 64 byte receive FIFO threshold 952 * - automatic advance to next receive frame 953 */ 954 genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY; 955 #if BYTE_ORDER == BIG_ENDIAN 956 genctl |= GENCTL_BIG_ENDIAN; 957 #endif 958 bus_space_write_4(st, sh, EPIC_GENCTL, genctl); 959 960 /* 961 * Reset the MII bus and PHY. 962 */ 963 reg0 = bus_space_read_4(st, sh, EPIC_NVCTL); 964 bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1); 965 bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER); 966 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY); 967 delay(100); 968 bus_space_write_4(st, sh, EPIC_GENCTL, genctl); 969 delay(1000); 970 bus_space_write_4(st, sh, EPIC_NVCTL, reg0); 971 972 /* 973 * Initialize Ethernet address. 974 */ 975 reg0 = enaddr[1] << 8 | enaddr[0]; 976 bus_space_write_4(st, sh, EPIC_LAN0, reg0); 977 reg0 = enaddr[3] << 8 | enaddr[2]; 978 bus_space_write_4(st, sh, EPIC_LAN1, reg0); 979 reg0 = enaddr[5] << 8 | enaddr[4]; 980 bus_space_write_4(st, sh, EPIC_LAN2, reg0); 981 982 /* 983 * Initialize receive control. Remember the external buffer 984 * size setting. 985 */ 986 reg0 = bus_space_read_4(st, sh, EPIC_RXCON) & 987 (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0); 988 reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST); 989 if (ifp->if_flags & IFF_PROMISC) 990 reg0 |= RXCON_PROMISCMODE; 991 bus_space_write_4(st, sh, EPIC_RXCON, reg0); 992 993 /* Set the current media. */ 994 epic_mediachange(ifp); 995 996 /* Set up the multicast hash table. */ 997 epic_set_mchash(sc); 998 999 /* 1000 * Initialize the transmit descriptor ring. txlast is initialized 1001 * to the end of the list so that it will wrap around to the first 1002 * descriptor when the first packet is transmitted. 1003 */ 1004 for (i = 0; i < EPIC_NTXDESC; i++) { 1005 txd = EPIC_CDTX(sc, i); 1006 memset(txd, 0, sizeof(struct epic_txdesc)); 1007 txd->et_bufaddr = EPIC_CDFLADDR(sc, i); 1008 txd->et_nextdesc = EPIC_CDTXADDR(sc, EPIC_NEXTTX(i)); 1009 EPIC_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1010 } 1011 sc->sc_txpending = 0; 1012 sc->sc_txdirty = 0; 1013 sc->sc_txlast = EPIC_NTXDESC - 1; 1014 1015 /* 1016 * Initialize the receive descriptor ring. 1017 */ 1018 for (i = 0; i < EPIC_NRXDESC; i++) { 1019 ds = EPIC_DSRX(sc, i); 1020 if (ds->ds_mbuf == NULL) { 1021 if ((error = epic_add_rxbuf(sc, i)) != 0) { 1022 printf("%s: unable to allocate or map rx " 1023 "buffer %d error = %d\n", 1024 sc->sc_dev.dv_xname, i, error); 1025 /* 1026 * XXX Should attempt to run with fewer receive 1027 * XXX buffers instead of just failing. 1028 */ 1029 epic_rxdrain(sc); 1030 goto out; 1031 } 1032 } else 1033 EPIC_INIT_RXDESC(sc, i); 1034 } 1035 sc->sc_rxptr = 0; 1036 1037 /* 1038 * Initialize the interrupt mask and enable interrupts. 1039 */ 1040 bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK); 1041 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA); 1042 1043 /* 1044 * Give the transmit and receive rings to the EPIC. 1045 */ 1046 bus_space_write_4(st, sh, EPIC_PTCDAR, 1047 EPIC_CDTXADDR(sc, EPIC_NEXTTX(sc->sc_txlast))); 1048 bus_space_write_4(st, sh, EPIC_PRCDAR, 1049 EPIC_CDRXADDR(sc, sc->sc_rxptr)); 1050 1051 /* 1052 * Set the EPIC in motion. 1053 */ 1054 bus_space_write_4(st, sh, EPIC_COMMAND, 1055 COMMAND_RXQUEUED | COMMAND_START_RX); 1056 1057 /* 1058 * ...all done! 1059 */ 1060 ifp->if_flags |= IFF_RUNNING; 1061 ifp->if_flags &= ~IFF_OACTIVE; 1062 1063 /* 1064 * Start the one second clock. 1065 */ 1066 callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc); 1067 1068 /* 1069 * Attempt to start output on the interface. 1070 */ 1071 epic_start(ifp); 1072 1073 out: 1074 if (error) 1075 printf("%s: interface not running\n", sc->sc_dev.dv_xname); 1076 return (error); 1077 } 1078 1079 /* 1080 * Drain the receive queue. 1081 */ 1082 void 1083 epic_rxdrain(sc) 1084 struct epic_softc *sc; 1085 { 1086 struct epic_descsoft *ds; 1087 int i; 1088 1089 for (i = 0; i < EPIC_NRXDESC; i++) { 1090 ds = EPIC_DSRX(sc, i); 1091 if (ds->ds_mbuf != NULL) { 1092 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1093 m_freem(ds->ds_mbuf); 1094 ds->ds_mbuf = NULL; 1095 } 1096 } 1097 } 1098 1099 /* 1100 * Stop transmission on the interface. 1101 */ 1102 void 1103 epic_stop(ifp, disable) 1104 struct ifnet *ifp; 1105 int disable; 1106 { 1107 struct epic_softc *sc = ifp->if_softc; 1108 bus_space_tag_t st = sc->sc_st; 1109 bus_space_handle_t sh = sc->sc_sh; 1110 struct epic_descsoft *ds; 1111 u_int32_t reg; 1112 int i; 1113 1114 /* 1115 * Stop the one second clock. 1116 */ 1117 callout_stop(&sc->sc_mii_callout); 1118 1119 /* Down the MII. */ 1120 mii_down(&sc->sc_mii); 1121 1122 /* Paranoia... */ 1123 epic_fixup_clock_source(sc); 1124 1125 /* 1126 * Disable interrupts. 1127 */ 1128 reg = bus_space_read_4(st, sh, EPIC_GENCTL); 1129 bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA); 1130 bus_space_write_4(st, sh, EPIC_INTMASK, 0); 1131 1132 /* 1133 * Stop the DMA engine and take the receiver off-line. 1134 */ 1135 bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA | 1136 COMMAND_STOP_TDMA | COMMAND_STOP_RX); 1137 1138 /* 1139 * Release any queued transmit buffers. 1140 */ 1141 for (i = 0; i < EPIC_NTXDESC; i++) { 1142 ds = EPIC_DSTX(sc, i); 1143 if (ds->ds_mbuf != NULL) { 1144 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1145 m_freem(ds->ds_mbuf); 1146 ds->ds_mbuf = NULL; 1147 } 1148 } 1149 1150 if (disable) 1151 epic_rxdrain(sc); 1152 1153 /* 1154 * Mark the interface down and cancel the watchdog timer. 1155 */ 1156 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1157 ifp->if_timer = 0; 1158 } 1159 1160 /* 1161 * Read the EPIC Serial EEPROM. 1162 */ 1163 void 1164 epic_read_eeprom(sc, word, wordcnt, data) 1165 struct epic_softc *sc; 1166 int word, wordcnt; 1167 u_int16_t *data; 1168 { 1169 bus_space_tag_t st = sc->sc_st; 1170 bus_space_handle_t sh = sc->sc_sh; 1171 u_int16_t reg; 1172 int i, x; 1173 1174 #define EEPROM_WAIT_READY(st, sh) \ 1175 while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \ 1176 /* nothing */ 1177 1178 /* 1179 * Enable the EEPROM. 1180 */ 1181 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE); 1182 EEPROM_WAIT_READY(st, sh); 1183 1184 for (i = 0; i < wordcnt; i++) { 1185 /* Send CHIP SELECT for one clock tick. */ 1186 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS); 1187 EEPROM_WAIT_READY(st, sh); 1188 1189 /* Shift in the READ opcode. */ 1190 for (x = 3; x > 0; x--) { 1191 reg = EECTL_ENABLE|EECTL_EECS; 1192 if (EPIC_EEPROM_OPC_READ & (1 << (x - 1))) 1193 reg |= EECTL_EEDI; 1194 bus_space_write_4(st, sh, EPIC_EECTL, reg); 1195 EEPROM_WAIT_READY(st, sh); 1196 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK); 1197 EEPROM_WAIT_READY(st, sh); 1198 bus_space_write_4(st, sh, EPIC_EECTL, reg); 1199 EEPROM_WAIT_READY(st, sh); 1200 } 1201 1202 /* Shift in address. */ 1203 for (x = 6; x > 0; x--) { 1204 reg = EECTL_ENABLE|EECTL_EECS; 1205 if ((word + i) & (1 << (x - 1))) 1206 reg |= EECTL_EEDI; 1207 bus_space_write_4(st, sh, EPIC_EECTL, reg); 1208 EEPROM_WAIT_READY(st, sh); 1209 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK); 1210 EEPROM_WAIT_READY(st, sh); 1211 bus_space_write_4(st, sh, EPIC_EECTL, reg); 1212 EEPROM_WAIT_READY(st, sh); 1213 } 1214 1215 /* Shift out data. */ 1216 reg = EECTL_ENABLE|EECTL_EECS; 1217 data[i] = 0; 1218 for (x = 16; x > 0; x--) { 1219 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK); 1220 EEPROM_WAIT_READY(st, sh); 1221 if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO) 1222 data[i] |= (1 << (x - 1)); 1223 bus_space_write_4(st, sh, EPIC_EECTL, reg); 1224 EEPROM_WAIT_READY(st, sh); 1225 } 1226 1227 /* Clear CHIP SELECT. */ 1228 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE); 1229 EEPROM_WAIT_READY(st, sh); 1230 } 1231 1232 /* 1233 * Disable the EEPROM. 1234 */ 1235 bus_space_write_4(st, sh, EPIC_EECTL, 0); 1236 1237 #undef EEPROM_WAIT_READY 1238 } 1239 1240 /* 1241 * Add a receive buffer to the indicated descriptor. 1242 */ 1243 int 1244 epic_add_rxbuf(sc, idx) 1245 struct epic_softc *sc; 1246 int idx; 1247 { 1248 struct epic_descsoft *ds = EPIC_DSRX(sc, idx); 1249 struct mbuf *m; 1250 int error; 1251 1252 MGETHDR(m, M_DONTWAIT, MT_DATA); 1253 if (m == NULL) 1254 return (ENOBUFS); 1255 1256 MCLGET(m, M_DONTWAIT); 1257 if ((m->m_flags & M_EXT) == 0) { 1258 m_freem(m); 1259 return (ENOBUFS); 1260 } 1261 1262 if (ds->ds_mbuf != NULL) 1263 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1264 1265 ds->ds_mbuf = m; 1266 1267 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap, 1268 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1269 BUS_DMA_READ|BUS_DMA_NOWAIT); 1270 if (error) { 1271 printf("%s: can't load rx DMA map %d, error = %d\n", 1272 sc->sc_dev.dv_xname, idx, error); 1273 panic("epic_add_rxbuf"); /* XXX */ 1274 } 1275 1276 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1277 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1278 1279 EPIC_INIT_RXDESC(sc, idx); 1280 1281 return (0); 1282 } 1283 1284 /* 1285 * Set the EPIC multicast hash table. 1286 * 1287 * NOTE: We rely on a recently-updated mii_media_active here! 1288 */ 1289 void 1290 epic_set_mchash(sc) 1291 struct epic_softc *sc; 1292 { 1293 struct ethercom *ec = &sc->sc_ethercom; 1294 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1295 struct ether_multi *enm; 1296 struct ether_multistep step; 1297 u_int32_t hash, mchash[4]; 1298 1299 /* 1300 * Set up the multicast address filter by passing all multicast 1301 * addresses through a CRC generator, and then using the low-order 1302 * 6 bits as an index into the 64 bit multicast hash table (only 1303 * the lower 16 bits of each 32 bit multicast hash register are 1304 * valid). The high order bits select the register, while the 1305 * rest of the bits select the bit within the register. 1306 */ 1307 1308 if (ifp->if_flags & IFF_PROMISC) 1309 goto allmulti; 1310 1311 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_10_T) { 1312 /* XXX hardware bug in 10Mbps mode. */ 1313 goto allmulti; 1314 } 1315 1316 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0; 1317 1318 ETHER_FIRST_MULTI(step, ec, enm); 1319 while (enm != NULL) { 1320 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1321 /* 1322 * We must listen to a range of multicast addresses. 1323 * For now, just accept all multicasts, rather than 1324 * trying to set only those filter bits needed to match 1325 * the range. (At this time, the only use of address 1326 * ranges is for IP multicast routing, for which the 1327 * range is big enough to require all bits set.) 1328 */ 1329 goto allmulti; 1330 } 1331 1332 hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 1333 hash >>= 26; 1334 1335 /* Set the corresponding bit in the hash table. */ 1336 mchash[hash >> 4] |= 1 << (hash & 0xf); 1337 1338 ETHER_NEXT_MULTI(step, enm); 1339 } 1340 1341 ifp->if_flags &= ~IFF_ALLMULTI; 1342 goto sethash; 1343 1344 allmulti: 1345 ifp->if_flags |= IFF_ALLMULTI; 1346 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff; 1347 1348 sethash: 1349 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]); 1350 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]); 1351 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]); 1352 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]); 1353 } 1354 1355 /* 1356 * Wait for the MII to become ready. 1357 */ 1358 int 1359 epic_mii_wait(sc, rw) 1360 struct epic_softc *sc; 1361 u_int32_t rw; 1362 { 1363 int i; 1364 1365 for (i = 0; i < 50; i++) { 1366 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL) & rw) 1367 == 0) 1368 break; 1369 delay(2); 1370 } 1371 if (i == 50) { 1372 printf("%s: MII timed out\n", sc->sc_dev.dv_xname); 1373 return (1); 1374 } 1375 1376 return (0); 1377 } 1378 1379 /* 1380 * Read from the MII. 1381 */ 1382 int 1383 epic_mii_read(self, phy, reg) 1384 struct device *self; 1385 int phy, reg; 1386 { 1387 struct epic_softc *sc = (struct epic_softc *)self; 1388 1389 if (epic_mii_wait(sc, MMCTL_WRITE)) 1390 return (0); 1391 1392 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL, 1393 MMCTL_ARG(phy, reg, MMCTL_READ)); 1394 1395 if (epic_mii_wait(sc, MMCTL_READ)) 1396 return (0); 1397 1398 return (bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA) & 1399 MMDATA_MASK); 1400 } 1401 1402 /* 1403 * Write to the MII. 1404 */ 1405 void 1406 epic_mii_write(self, phy, reg, val) 1407 struct device *self; 1408 int phy, reg, val; 1409 { 1410 struct epic_softc *sc = (struct epic_softc *)self; 1411 1412 if (epic_mii_wait(sc, MMCTL_WRITE)) 1413 return; 1414 1415 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA, val); 1416 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL, 1417 MMCTL_ARG(phy, reg, MMCTL_WRITE)); 1418 } 1419 1420 /* 1421 * Callback from PHY when media changes. 1422 */ 1423 void 1424 epic_statchg(self) 1425 struct device *self; 1426 { 1427 struct epic_softc *sc = (struct epic_softc *)self; 1428 u_int32_t txcon, miicfg; 1429 1430 /* 1431 * Update loopback bits in TXCON to reflect duplex mode. 1432 */ 1433 txcon = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_TXCON); 1434 if (sc->sc_mii.mii_media_active & IFM_FDX) 1435 txcon |= (TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2); 1436 else 1437 txcon &= ~(TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2); 1438 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TXCON, txcon); 1439 1440 /* On some cards we need manualy set fullduplex led */ 1441 if (sc->sc_hwflags & EPIC_DUPLEXLED_ON_694) { 1442 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG); 1443 if (IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) 1444 miicfg |= MIICFG_ENABLE; 1445 else 1446 miicfg &= ~MIICFG_ENABLE; 1447 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg); 1448 } 1449 1450 /* 1451 * There is a multicast filter bug in 10Mbps mode. Kick the 1452 * multicast filter in case the speed changed. 1453 */ 1454 epic_set_mchash(sc); 1455 } 1456 1457 /* 1458 * Callback from ifmedia to request current media status. 1459 */ 1460 void 1461 epic_mediastatus(ifp, ifmr) 1462 struct ifnet *ifp; 1463 struct ifmediareq *ifmr; 1464 { 1465 struct epic_softc *sc = ifp->if_softc; 1466 1467 mii_pollstat(&sc->sc_mii); 1468 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1469 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1470 } 1471 1472 /* 1473 * Callback from ifmedia to request new media setting. 1474 */ 1475 int 1476 epic_mediachange(ifp) 1477 struct ifnet *ifp; 1478 { 1479 struct epic_softc *sc = ifp->if_softc; 1480 struct mii_data *mii = &sc->sc_mii; 1481 struct ifmedia *ifm = &mii->mii_media; 1482 int media = ifm->ifm_cur->ifm_media; 1483 u_int32_t miicfg; 1484 struct mii_softc *miisc; 1485 int cfg; 1486 1487 if (!(ifp->if_flags & IFF_UP)) 1488 return (0); 1489 1490 if (IFM_INST(media) != sc->sc_serinst) { 1491 /* If we're not selecting serial interface, select MII mode */ 1492 #ifdef EPICMEDIADEBUG 1493 printf("%s: parallel mode\n", ifp->if_xname); 1494 #endif 1495 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG); 1496 miicfg &= ~MIICFG_SERMODEENA; 1497 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg); 1498 } 1499 1500 mii_mediachg(mii); 1501 1502 if (IFM_INST(media) == sc->sc_serinst) { 1503 /* select serial interface */ 1504 #ifdef EPICMEDIADEBUG 1505 printf("%s: serial mode\n", ifp->if_xname); 1506 #endif 1507 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG); 1508 miicfg |= (MIICFG_SERMODEENA | MIICFG_ENABLE); 1509 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg); 1510 1511 /* There is no driver to fill this */ 1512 mii->mii_media_active = media; 1513 mii->mii_media_status = 0; 1514 1515 epic_statchg(&sc->sc_dev); 1516 return (0); 1517 } 1518 1519 /* Lookup selected PHY */ 1520 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 1521 miisc = LIST_NEXT(miisc, mii_list)) { 1522 if (IFM_INST(media) == miisc->mii_inst) 1523 break; 1524 } 1525 if (!miisc) { 1526 printf("epic_mediachange: can't happen\n"); /* ??? panic */ 1527 return (0); 1528 } 1529 #ifdef EPICMEDIADEBUG 1530 printf("%s: using phy %s\n", ifp->if_xname, 1531 miisc->mii_dev.dv_xname); 1532 #endif 1533 1534 if (miisc->mii_flags & MIIF_HAVEFIBER) { 1535 /* XXX XXX assume it's a Level1 - should check */ 1536 1537 /* We have to powerup fiber tranceivers */ 1538 cfg = PHY_READ(miisc, MII_LXTPHY_CONFIG); 1539 if (IFM_SUBTYPE(media) == IFM_100_FX) { 1540 #ifdef EPICMEDIADEBUG 1541 printf("%s: power up fiber\n", ifp->if_xname); 1542 #endif 1543 cfg |= (CONFIG_LEDC1 | CONFIG_LEDC0); 1544 } else { 1545 #ifdef EPICMEDIADEBUG 1546 printf("%s: power down fiber\n", ifp->if_xname); 1547 #endif 1548 cfg &= ~(CONFIG_LEDC1 | CONFIG_LEDC0); 1549 } 1550 PHY_WRITE(miisc, MII_LXTPHY_CONFIG, cfg); 1551 } 1552 1553 return (0); 1554 } 1555