1 /* $OpenBSD: if_stge.c,v 1.45 2008/11/28 02:44:18 brad Exp $ */ 2 /* $NetBSD: if_stge.c,v 1.27 2005/05/16 21:35:32 bouyer Exp $ */ 3 4 /*- 5 * Copyright (c) 2001 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Jason R. Thorpe. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Device driver for the Sundance Tech. TC9021 10/100/1000 35 * Ethernet controller. 36 */ 37 38 #include "bpfilter.h" 39 #include "vlan.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/timeout.h> 44 #include <sys/mbuf.h> 45 #include <sys/malloc.h> 46 #include <sys/kernel.h> 47 #include <sys/socket.h> 48 #include <sys/ioctl.h> 49 #include <sys/errno.h> 50 #include <sys/device.h> 51 #include <sys/queue.h> 52 53 #include <net/if.h> 54 #include <net/if_dl.h> 55 56 #ifdef INET 57 #include <netinet/in.h> 58 #include <netinet/in_systm.h> 59 #include <netinet/in_var.h> 60 #include <netinet/ip.h> 61 #include <netinet/if_ether.h> 62 #endif 63 64 #include <net/if_media.h> 65 66 #if NVLAN > 0 67 #include <net/if_types.h> 68 #include <net/if_vlan_var.h> 69 #endif 70 71 #if NBPFILTER > 0 72 #include <net/bpf.h> 73 #endif 74 75 #include <machine/bus.h> 76 #include <machine/intr.h> 77 78 #include <dev/mii/mii.h> 79 #include <dev/mii/miivar.h> 80 #include <dev/mii/mii_bitbang.h> 81 82 #include <dev/pci/pcireg.h> 83 #include <dev/pci/pcivar.h> 84 #include <dev/pci/pcidevs.h> 85 86 #include <dev/pci/if_stgereg.h> 87 88 void stge_start(struct ifnet *); 89 void stge_watchdog(struct ifnet *); 90 int stge_ioctl(struct ifnet *, u_long, caddr_t); 91 int stge_init(struct ifnet *); 92 void stge_stop(struct ifnet *, int); 93 94 void stge_shutdown(void *); 95 96 void stge_reset(struct stge_softc *); 97 void stge_rxdrain(struct stge_softc *); 98 int stge_add_rxbuf(struct stge_softc *, int); 99 void stge_read_eeprom(struct stge_softc *, int, uint16_t *); 100 void stge_tick(void *); 101 102 void stge_stats_update(struct stge_softc *); 103 104 void stge_set_filter(struct stge_softc *); 105 106 int stge_intr(void *); 107 void stge_txintr(struct stge_softc *); 108 void stge_rxintr(struct stge_softc *); 109 110 int stge_mii_readreg(struct device *, int, int); 111 void stge_mii_writereg(struct device *, int, int, int); 112 void stge_mii_statchg(struct device *); 113 114 int stge_mediachange(struct ifnet *); 115 void stge_mediastatus(struct ifnet *, struct ifmediareq *); 116 117 int stge_match(struct device *, void *, void *); 118 void stge_attach(struct device *, struct device *, void *); 119 120 int stge_copy_small = 0; 121 122 struct cfattach stge_ca = { 123 sizeof(struct stge_softc), stge_match, stge_attach, 124 }; 125 126 struct cfdriver stge_cd = { 127 0, "stge", DV_IFNET 128 }; 129 130 uint32_t stge_mii_bitbang_read(struct device *); 131 void stge_mii_bitbang_write(struct device *, uint32_t); 132 133 const struct mii_bitbang_ops stge_mii_bitbang_ops = { 134 stge_mii_bitbang_read, 135 stge_mii_bitbang_write, 136 { 137 PC_MgmtData, /* MII_BIT_MDO */ 138 PC_MgmtData, /* MII_BIT_MDI */ 139 PC_MgmtClk, /* MII_BIT_MDC */ 140 PC_MgmtDir, /* MII_BIT_DIR_HOST_PHY */ 141 0, /* MII_BIT_DIR_PHY_HOST */ 142 } 143 }; 144 145 /* 146 * Devices supported by this driver. 147 */ 148 const struct pci_matchid stge_devices[] = { 149 { PCI_VENDOR_ANTARES, PCI_PRODUCT_ANTARES_TC9021 }, 150 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE550T }, 151 { PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST1023 }, 152 { PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST2021 }, 153 { PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_TC9021 }, 154 { PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_TC9021_ALT }, 155 { PCI_VENDOR_TAMARACK, PCI_PRODUCT_TAMARACK_TC9021 }, 156 { PCI_VENDOR_TAMARACK, PCI_PRODUCT_TAMARACK_TC9021_ALT } 157 }; 158 159 int 160 stge_match(struct device *parent, void *match, void *aux) 161 { 162 return (pci_matchbyid((struct pci_attach_args *)aux, stge_devices, 163 sizeof(stge_devices) / sizeof(stge_devices[0]))); 164 } 165 166 void 167 stge_attach(struct device *parent, struct device *self, void *aux) 168 { 169 struct stge_softc *sc = (struct stge_softc *) self; 170 struct pci_attach_args *pa = aux; 171 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 172 pci_chipset_tag_t pc = pa->pa_pc; 173 pci_intr_handle_t ih; 174 const char *intrstr = NULL; 175 bus_space_tag_t iot, memt; 176 bus_space_handle_t ioh, memh; 177 bus_dma_segment_t seg; 178 bus_size_t iosize; 179 int ioh_valid, memh_valid; 180 int i, rseg, error; 181 int state; 182 183 timeout_set(&sc->sc_timeout, stge_tick, sc); 184 185 sc->sc_rev = PCI_REVISION(pa->pa_class); 186 187 /* 188 * Map the device. 189 */ 190 ioh_valid = (pci_mapreg_map(pa, STGE_PCI_IOBA, 191 PCI_MAPREG_TYPE_IO, 0, 192 &iot, &ioh, NULL, &iosize, 0) == 0); 193 memh_valid = (pci_mapreg_map(pa, STGE_PCI_MMBA, 194 PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0, 195 &memt, &memh, NULL, &iosize, 0) == 0); 196 197 if (memh_valid) { 198 sc->sc_st = memt; 199 sc->sc_sh = memh; 200 } else if (ioh_valid) { 201 sc->sc_st = iot; 202 sc->sc_sh = ioh; 203 } else { 204 printf(": unable to map device registers\n"); 205 return; 206 } 207 208 sc->sc_dmat = pa->pa_dmat; 209 210 /* Get it out of power save mode if needed. */ 211 state = pci_set_powerstate(pc, pa->pa_tag, PCI_PMCSR_STATE_D0); 212 if (state == PCI_PMCSR_STATE_D3) { 213 /* 214 * The card has lost all configuration data in 215 * this state, so punt. 216 */ 217 printf(": unable to wake up from power state D3, " 218 "reboot required.\n"); 219 return; 220 } 221 222 /* 223 * Map and establish our interrupt. 224 */ 225 if (pci_intr_map(pa, &ih)) { 226 printf(": unable to map interrupt\n"); 227 goto fail_0; 228 } 229 intrstr = pci_intr_string(pc, ih); 230 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, stge_intr, sc, 231 sc->sc_dev.dv_xname); 232 if (sc->sc_ih == NULL) { 233 printf(": unable to establish interrupt"); 234 if (intrstr != NULL) 235 printf(" at %s", intrstr); 236 printf("\n"); 237 goto fail_0; 238 } 239 printf(": %s", intrstr); 240 241 /* 242 * Allocate the control data structures, and create and load the 243 * DMA map for it. 244 */ 245 if ((error = bus_dmamem_alloc(sc->sc_dmat, 246 sizeof(struct stge_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 247 0)) != 0) { 248 printf("%s: unable to allocate control data, error = %d\n", 249 sc->sc_dev.dv_xname, error); 250 goto fail_0; 251 } 252 253 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 254 sizeof(struct stge_control_data), (caddr_t *)&sc->sc_control_data, 255 BUS_DMA_COHERENT)) != 0) { 256 printf("%s: unable to map control data, error = %d\n", 257 sc->sc_dev.dv_xname, error); 258 goto fail_1; 259 } 260 261 if ((error = bus_dmamap_create(sc->sc_dmat, 262 sizeof(struct stge_control_data), 1, 263 sizeof(struct stge_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 264 printf("%s: unable to create control data DMA map, " 265 "error = %d\n", sc->sc_dev.dv_xname, error); 266 goto fail_2; 267 } 268 269 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 270 sc->sc_control_data, sizeof(struct stge_control_data), NULL, 271 0)) != 0) { 272 printf("%s: unable to load control data DMA map, error = %d\n", 273 sc->sc_dev.dv_xname, error); 274 goto fail_3; 275 } 276 277 /* 278 * Create the transmit buffer DMA maps. Note that rev B.3 279 * and earlier seem to have a bug regarding multi-fragment 280 * packets. We need to limit the number of Tx segments on 281 * such chips to 1. 282 */ 283 for (i = 0; i < STGE_NTXDESC; i++) { 284 if ((error = bus_dmamap_create(sc->sc_dmat, 285 STGE_JUMBO_FRAMELEN, STGE_NTXFRAGS, MCLBYTES, 0, 0, 286 &sc->sc_txsoft[i].ds_dmamap)) != 0) { 287 printf("%s: unable to create tx DMA map %d, " 288 "error = %d\n", sc->sc_dev.dv_xname, i, error); 289 goto fail_4; 290 } 291 } 292 293 /* 294 * Create the receive buffer DMA maps. 295 */ 296 for (i = 0; i < STGE_NRXDESC; i++) { 297 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 298 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].ds_dmamap)) != 0) { 299 printf("%s: unable to create rx DMA map %d, " 300 "error = %d\n", sc->sc_dev.dv_xname, i, error); 301 goto fail_5; 302 } 303 sc->sc_rxsoft[i].ds_mbuf = NULL; 304 } 305 306 /* 307 * Determine if we're copper or fiber. It affects how we 308 * reset the card. 309 */ 310 if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia) 311 sc->sc_usefiber = 1; 312 else 313 sc->sc_usefiber = 0; 314 315 /* 316 * Reset the chip to a known state. 317 */ 318 stge_reset(sc); 319 320 /* 321 * Reading the station address from the EEPROM doesn't seem 322 * to work, at least on my sample boards. Instead, since 323 * the reset sequence does AutoInit, read it from the station 324 * address registers. For Sundance 1023 you can only read it 325 * from EEPROM. 326 */ 327 if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_SUNDANCE_ST1023) { 328 sc->sc_arpcom.ac_enaddr[0] = CSR_READ_2(sc, 329 STGE_StationAddress0) & 0xff; 330 sc->sc_arpcom.ac_enaddr[1] = CSR_READ_2(sc, 331 STGE_StationAddress0) >> 8; 332 sc->sc_arpcom.ac_enaddr[2] = CSR_READ_2(sc, 333 STGE_StationAddress1) & 0xff; 334 sc->sc_arpcom.ac_enaddr[3] = CSR_READ_2(sc, 335 STGE_StationAddress1) >> 8; 336 sc->sc_arpcom.ac_enaddr[4] = CSR_READ_2(sc, 337 STGE_StationAddress2) & 0xff; 338 sc->sc_arpcom.ac_enaddr[5] = CSR_READ_2(sc, 339 STGE_StationAddress2) >> 8; 340 sc->sc_stge1023 = 0; 341 } else { 342 uint16_t myaddr[ETHER_ADDR_LEN / 2]; 343 for (i = 0; i <ETHER_ADDR_LEN / 2; i++) { 344 stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i, 345 &myaddr[i]); 346 myaddr[i] = letoh16(myaddr[i]); 347 } 348 (void)memcpy(sc->sc_arpcom.ac_enaddr, myaddr, 349 sizeof(sc->sc_arpcom.ac_enaddr)); 350 sc->sc_stge1023 = 1; 351 } 352 353 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 354 355 /* 356 * Read some important bits from the PhyCtrl register. 357 */ 358 sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) & 359 (PC_PhyDuplexPolarity | PC_PhyLnkPolarity); 360 361 /* 362 * Initialize our media structures and probe the MII. 363 */ 364 sc->sc_mii.mii_ifp = ifp; 365 sc->sc_mii.mii_readreg = stge_mii_readreg; 366 sc->sc_mii.mii_writereg = stge_mii_writereg; 367 sc->sc_mii.mii_statchg = stge_mii_statchg; 368 ifmedia_init(&sc->sc_mii.mii_media, 0, stge_mediachange, 369 stge_mediastatus); 370 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 371 MII_OFFSET_ANY, MIIF_DOPAUSE); 372 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 373 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 374 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 375 } else 376 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 377 378 ifp = &sc->sc_arpcom.ac_if; 379 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname); 380 ifp->if_softc = sc; 381 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 382 ifp->if_ioctl = stge_ioctl; 383 ifp->if_start = stge_start; 384 ifp->if_watchdog = stge_watchdog; 385 #ifdef STGE_JUMBO 386 ifp->if_hardmtu = STGE_JUMBO_MTU; 387 #endif 388 IFQ_SET_MAXLEN(&ifp->if_snd, STGE_NTXDESC - 1); 389 IFQ_SET_READY(&ifp->if_snd); 390 391 ifp->if_capabilities = IFCAP_VLAN_MTU; 392 393 #if NVLAN > 0 394 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 395 #endif 396 397 /* 398 * The manual recommends disabling early transmit, so we 399 * do. It's disabled anyway, if using IP checksumming, 400 * since the entire packet must be in the FIFO in order 401 * for the chip to perform the checksum. 402 */ 403 sc->sc_txthresh = 0x0fff; 404 405 /* 406 * Disable MWI if the PCI layer tells us to. 407 */ 408 sc->sc_DMACtrl = 0; 409 #ifdef fake 410 if ((pa->pa_flags & PCI_FLAGS_MWI_OKAY) == 0) 411 sc->sc_DMACtrl |= DMAC_MWIDisable; 412 #endif 413 414 #ifdef STGE_CHECKSUM 415 /* 416 * We can do IPv4/TCPv4/UDPv4 checksums in hardware. 417 */ 418 sc->sc_arpcom.ac_if.if_capabilities |= IFCAP_CSUM_IPv4 | 419 IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4; 420 #endif 421 422 /* 423 * Attach the interface. 424 */ 425 if_attach(ifp); 426 ether_ifattach(ifp); 427 428 /* 429 * Make sure the interface is shutdown during reboot. 430 */ 431 sc->sc_sdhook = shutdownhook_establish(stge_shutdown, sc); 432 if (sc->sc_sdhook == NULL) 433 printf("%s: WARNING: unable to establish shutdown hook\n", 434 sc->sc_dev.dv_xname); 435 return; 436 437 /* 438 * Free any resources we've allocated during the failed attach 439 * attempt. Do this in reverse order and fall through. 440 */ 441 fail_5: 442 for (i = 0; i < STGE_NRXDESC; i++) { 443 if (sc->sc_rxsoft[i].ds_dmamap != NULL) 444 bus_dmamap_destroy(sc->sc_dmat, 445 sc->sc_rxsoft[i].ds_dmamap); 446 } 447 fail_4: 448 for (i = 0; i < STGE_NTXDESC; i++) { 449 if (sc->sc_txsoft[i].ds_dmamap != NULL) 450 bus_dmamap_destroy(sc->sc_dmat, 451 sc->sc_txsoft[i].ds_dmamap); 452 } 453 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 454 fail_3: 455 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 456 fail_2: 457 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data, 458 sizeof(struct stge_control_data)); 459 fail_1: 460 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 461 fail_0: 462 bus_space_unmap(sc->sc_st, sc->sc_sh, iosize); 463 return; 464 } 465 466 /* 467 * stge_shutdown: 468 * 469 * Make sure the interface is stopped at reboot time. 470 */ 471 void 472 stge_shutdown(void *arg) 473 { 474 struct stge_softc *sc = arg; 475 476 stge_stop(&sc->sc_arpcom.ac_if, 1); 477 } 478 479 static void 480 stge_dma_wait(struct stge_softc *sc) 481 { 482 int i; 483 484 for (i = 0; i < STGE_TIMEOUT; i++) { 485 delay(2); 486 if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0) 487 break; 488 } 489 490 if (i == STGE_TIMEOUT) 491 printf("%s: DMA wait timed out\n", sc->sc_dev.dv_xname); 492 } 493 494 /* 495 * stge_start: [ifnet interface function] 496 * 497 * Start packet transmission on the interface. 498 */ 499 void 500 stge_start(struct ifnet *ifp) 501 { 502 struct stge_softc *sc = ifp->if_softc; 503 struct mbuf *m0; 504 struct stge_descsoft *ds; 505 struct stge_tfd *tfd; 506 bus_dmamap_t dmamap; 507 int error, firsttx, nexttx, opending, seg, totlen; 508 uint64_t csum_flags = 0, tfc; 509 510 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 511 return; 512 513 /* 514 * Remember the previous number of pending transmissions 515 * and the first descriptor we will use. 516 */ 517 opending = sc->sc_txpending; 518 firsttx = STGE_NEXTTX(sc->sc_txlast); 519 520 /* 521 * Loop through the send queue, setting up transmit descriptors 522 * until we drain the queue, or use up all available transmit 523 * descriptors. 524 */ 525 for (;;) { 526 /* 527 * Grab a packet off the queue. 528 */ 529 IFQ_POLL(&ifp->if_snd, m0); 530 if (m0 == NULL) 531 break; 532 533 /* 534 * Leave one unused descriptor at the end of the 535 * list to prevent wrapping completely around. 536 */ 537 if (sc->sc_txpending == (STGE_NTXDESC - 1)) 538 break; 539 540 /* 541 * Get the last and next available transmit descriptor. 542 */ 543 nexttx = STGE_NEXTTX(sc->sc_txlast); 544 tfd = &sc->sc_txdescs[nexttx]; 545 ds = &sc->sc_txsoft[nexttx]; 546 547 dmamap = ds->ds_dmamap; 548 549 /* 550 * Load the DMA map. If this fails, the packet either 551 * didn't fit in the alloted number of segments, or we 552 * were short on resources. For the too-many-segments 553 * case, we simply report an error and drop the packet, 554 * since we can't sanely copy a jumbo packet to a single 555 * buffer. 556 */ 557 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 558 BUS_DMA_NOWAIT); 559 if (error) { 560 if (error == EFBIG) { 561 printf("%s: Tx packet consumes too many " 562 "DMA segments (%u), dropping...\n", 563 sc->sc_dev.dv_xname, dmamap->dm_nsegs); 564 IFQ_DEQUEUE(&ifp->if_snd, m0); 565 m_freem(m0); 566 continue; 567 } 568 /* 569 * Short on resources, just stop for now. 570 */ 571 break; 572 } 573 574 IFQ_DEQUEUE(&ifp->if_snd, m0); 575 576 /* 577 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 578 */ 579 580 /* Sync the DMA map. */ 581 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 582 BUS_DMASYNC_PREWRITE); 583 584 /* Initialize the fragment list. */ 585 for (totlen = 0, seg = 0; seg < dmamap->dm_nsegs; seg++) { 586 tfd->tfd_frags[seg].frag_word0 = 587 htole64(FRAG_ADDR(dmamap->dm_segs[seg].ds_addr) | 588 FRAG_LEN(dmamap->dm_segs[seg].ds_len)); 589 totlen += dmamap->dm_segs[seg].ds_len; 590 } 591 592 #ifdef STGE_CHECKSUM 593 /* 594 * Initialize checksumming flags in the descriptor. 595 * Byte-swap constants so the compiler can optimize. 596 */ 597 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 598 csum_flags |= TFD_IPChecksumEnable; 599 600 if (m0->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT) 601 csum_flags |= TFD_TCPChecksumEnable; 602 else if (m0->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT) 603 csum_flags |= TFD_UDPChecksumEnable; 604 #endif 605 606 /* 607 * Initialize the descriptor and give it to the chip. 608 */ 609 tfc = TFD_FrameId(nexttx) | TFD_WordAlign(/*totlen & */3) | 610 TFD_FragCount(seg) | csum_flags; 611 if ((nexttx & STGE_TXINTR_SPACING_MASK) == 0) 612 tfc |= TFD_TxDMAIndicate; 613 614 #if NVLAN > 0 615 /* Check if we have a VLAN tag to insert. */ 616 if (m0->m_flags & M_VLANTAG) 617 tfc |= (TFD_VLANTagInsert | 618 TFD_VID(m0->m_pkthdr.ether_vtag)); 619 #endif 620 621 tfd->tfd_control = htole64(tfc); 622 623 /* Sync the descriptor. */ 624 STGE_CDTXSYNC(sc, nexttx, 625 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 626 627 /* 628 * Kick the transmit DMA logic. 629 */ 630 CSR_WRITE_4(sc, STGE_DMACtrl, 631 sc->sc_DMACtrl | DMAC_TxDMAPollNow); 632 633 /* 634 * Store a pointer to the packet so we can free it later. 635 */ 636 ds->ds_mbuf = m0; 637 638 /* Advance the tx pointer. */ 639 sc->sc_txpending++; 640 sc->sc_txlast = nexttx; 641 642 #if NBPFILTER > 0 643 /* 644 * Pass the packet to any BPF listeners. 645 */ 646 if (ifp->if_bpf) 647 bpf_mtap_ether(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 648 #endif /* NBPFILTER > 0 */ 649 } 650 651 if (sc->sc_txpending == (STGE_NTXDESC - 1)) { 652 /* No more slots left; notify upper layer. */ 653 ifp->if_flags |= IFF_OACTIVE; 654 } 655 656 if (sc->sc_txpending != opending) { 657 /* 658 * We enqueued packets. If the transmitter was idle, 659 * reset the txdirty pointer. 660 */ 661 if (opending == 0) 662 sc->sc_txdirty = firsttx; 663 664 /* Set a watchdog timer in case the chip flakes out. */ 665 ifp->if_timer = 5; 666 } 667 } 668 669 /* 670 * stge_watchdog: [ifnet interface function] 671 * 672 * Watchdog timer handler. 673 */ 674 void 675 stge_watchdog(struct ifnet *ifp) 676 { 677 struct stge_softc *sc = ifp->if_softc; 678 679 /* 680 * Sweep up first, since we don't interrupt every frame. 681 */ 682 stge_txintr(sc); 683 if (sc->sc_txpending != 0) { 684 printf("%s: device timeout\n", sc->sc_dev.dv_xname); 685 ifp->if_oerrors++; 686 687 (void) stge_init(ifp); 688 689 /* Try to get more packets going. */ 690 stge_start(ifp); 691 } 692 } 693 694 /* 695 * stge_ioctl: [ifnet interface function] 696 * 697 * Handle control requests from the operator. 698 */ 699 int 700 stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 701 { 702 struct stge_softc *sc = ifp->if_softc; 703 struct ifaddr *ifa = (struct ifaddr *)data; 704 struct ifreq *ifr = (struct ifreq *)data; 705 int s, error = 0; 706 707 s = splnet(); 708 709 switch (cmd) { 710 case SIOCSIFADDR: 711 ifp->if_flags |= IFF_UP; 712 if (!(ifp->if_flags & IFF_RUNNING)) 713 stge_init(ifp); 714 715 #ifdef INET 716 if (ifa->ifa_addr->sa_family == AF_INET) 717 arp_ifinit(&sc->sc_arpcom, ifa); 718 #endif 719 break; 720 721 case SIOCSIFFLAGS: 722 if (ifp->if_flags & IFF_UP) { 723 if (ifp->if_flags & IFF_RUNNING && 724 (ifp->if_flags ^ sc->stge_if_flags) & 725 IFF_PROMISC) { 726 stge_set_filter(sc); 727 } else { 728 if (!(ifp->if_flags & IFF_RUNNING)) 729 stge_init(ifp); 730 } 731 } else { 732 if (ifp->if_flags & IFF_RUNNING) 733 stge_stop(ifp, 1); 734 } 735 sc->stge_if_flags = ifp->if_flags; 736 break; 737 738 case SIOCSIFMEDIA: 739 case SIOCGIFMEDIA: 740 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 741 break; 742 743 default: 744 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 745 } 746 747 if (error == ENETRESET) { 748 if (ifp->if_flags & IFF_RUNNING) 749 stge_set_filter(sc); 750 error = 0; 751 } 752 753 /* Try to get more packets going. */ 754 stge_start(ifp); 755 756 splx(s); 757 return (error); 758 } 759 760 /* 761 * stge_intr: 762 * 763 * Interrupt service routine. 764 */ 765 int 766 stge_intr(void *arg) 767 { 768 struct stge_softc *sc = arg; 769 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 770 uint32_t txstat; 771 int wantinit; 772 uint16_t isr; 773 774 if ((CSR_READ_2(sc, STGE_IntStatus) & IS_InterruptStatus) == 0) 775 return (0); 776 777 for (wantinit = 0; wantinit == 0;) { 778 isr = CSR_READ_2(sc, STGE_IntStatusAck); 779 if ((isr & sc->sc_IntEnable) == 0) 780 break; 781 782 /* Host interface errors. */ 783 if (isr & IS_HostError) { 784 printf("%s: Host interface error\n", 785 sc->sc_dev.dv_xname); 786 wantinit = 1; 787 continue; 788 } 789 790 /* Receive interrupts. */ 791 if (isr & (IS_RxDMAComplete|IS_RFDListEnd)) { 792 stge_rxintr(sc); 793 if (isr & IS_RFDListEnd) { 794 printf("%s: receive ring overflow\n", 795 sc->sc_dev.dv_xname); 796 /* 797 * XXX Should try to recover from this 798 * XXX more gracefully. 799 */ 800 wantinit = 1; 801 } 802 } 803 804 /* Transmit interrupts. */ 805 if (isr & (IS_TxDMAComplete|IS_TxComplete)) 806 stge_txintr(sc); 807 808 /* Statistics overflow. */ 809 if (isr & IS_UpdateStats) 810 stge_stats_update(sc); 811 812 /* Transmission errors. */ 813 if (isr & IS_TxComplete) { 814 for (;;) { 815 txstat = CSR_READ_4(sc, STGE_TxStatus); 816 if ((txstat & TS_TxComplete) == 0) 817 break; 818 if (txstat & TS_TxUnderrun) { 819 sc->sc_txthresh++; 820 if (sc->sc_txthresh > 0x0fff) 821 sc->sc_txthresh = 0x0fff; 822 printf("%s: transmit underrun, new " 823 "threshold: %d bytes\n", 824 sc->sc_dev.dv_xname, 825 sc->sc_txthresh << 5); 826 } 827 if (txstat & TS_MaxCollisions) 828 printf("%s: excessive collisions\n", 829 sc->sc_dev.dv_xname); 830 } 831 wantinit = 1; 832 } 833 834 } 835 836 if (wantinit) 837 stge_init(ifp); 838 839 CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable); 840 841 /* Try to get more packets going. */ 842 stge_start(ifp); 843 844 return (1); 845 } 846 847 /* 848 * stge_txintr: 849 * 850 * Helper; handle transmit interrupts. 851 */ 852 void 853 stge_txintr(struct stge_softc *sc) 854 { 855 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 856 struct stge_descsoft *ds; 857 uint64_t control; 858 int i; 859 860 ifp->if_flags &= ~IFF_OACTIVE; 861 862 /* 863 * Go through our Tx list and free mbufs for those 864 * frames which have been transmitted. 865 */ 866 for (i = sc->sc_txdirty; sc->sc_txpending != 0; 867 i = STGE_NEXTTX(i), sc->sc_txpending--) { 868 ds = &sc->sc_txsoft[i]; 869 870 STGE_CDTXSYNC(sc, i, 871 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 872 873 control = letoh64(sc->sc_txdescs[i].tfd_control); 874 if ((control & TFD_TFDDone) == 0) 875 break; 876 877 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 878 0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 879 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 880 m_freem(ds->ds_mbuf); 881 ds->ds_mbuf = NULL; 882 } 883 884 /* Update the dirty transmit buffer pointer. */ 885 sc->sc_txdirty = i; 886 887 /* 888 * If there are no more pending transmissions, cancel the watchdog 889 * timer. 890 */ 891 if (sc->sc_txpending == 0) 892 ifp->if_timer = 0; 893 } 894 895 /* 896 * stge_rxintr: 897 * 898 * Helper; handle receive interrupts. 899 */ 900 void 901 stge_rxintr(struct stge_softc *sc) 902 { 903 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 904 struct stge_descsoft *ds; 905 struct mbuf *m, *tailm; 906 uint64_t status; 907 int i, len; 908 909 for (i = sc->sc_rxptr;; i = STGE_NEXTRX(i)) { 910 ds = &sc->sc_rxsoft[i]; 911 912 STGE_CDRXSYNC(sc, i, 913 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 914 915 status = letoh64(sc->sc_rxdescs[i].rfd_status); 916 917 if ((status & RFD_RFDDone) == 0) 918 break; 919 920 if (__predict_false(sc->sc_rxdiscard)) { 921 STGE_INIT_RXDESC(sc, i); 922 if (status & RFD_FrameEnd) { 923 /* Reset our state. */ 924 sc->sc_rxdiscard = 0; 925 } 926 continue; 927 } 928 929 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 930 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 931 932 m = ds->ds_mbuf; 933 934 /* 935 * Add a new receive buffer to the ring. 936 */ 937 if (stge_add_rxbuf(sc, i) != 0) { 938 /* 939 * Failed, throw away what we've done so 940 * far, and discard the rest of the packet. 941 */ 942 ifp->if_ierrors++; 943 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 944 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 945 STGE_INIT_RXDESC(sc, i); 946 if ((status & RFD_FrameEnd) == 0) 947 sc->sc_rxdiscard = 1; 948 if (sc->sc_rxhead != NULL) 949 m_freem(sc->sc_rxhead); 950 STGE_RXCHAIN_RESET(sc); 951 continue; 952 } 953 954 #ifdef DIAGNOSTIC 955 if (status & RFD_FrameStart) { 956 KASSERT(sc->sc_rxhead == NULL); 957 KASSERT(sc->sc_rxtailp == &sc->sc_rxhead); 958 } 959 #endif 960 961 STGE_RXCHAIN_LINK(sc, m); 962 963 /* 964 * If this is not the end of the packet, keep 965 * looking. 966 */ 967 if ((status & RFD_FrameEnd) == 0) { 968 sc->sc_rxlen += m->m_len; 969 continue; 970 } 971 972 /* 973 * Okay, we have the entire packet now... 974 */ 975 *sc->sc_rxtailp = NULL; 976 m = sc->sc_rxhead; 977 tailm = sc->sc_rxtail; 978 979 STGE_RXCHAIN_RESET(sc); 980 981 /* 982 * If the packet had an error, drop it. Note we 983 * count the error later in the periodic stats update. 984 */ 985 if (status & (RFD_RxFIFOOverrun | RFD_RxRuntFrame | 986 RFD_RxAlignmentError | RFD_RxFCSError | 987 RFD_RxLengthError)) { 988 m_freem(m); 989 continue; 990 } 991 992 /* 993 * No errors. 994 * 995 * Note we have configured the chip to not include 996 * the CRC at the end of the packet. 997 */ 998 len = RFD_RxDMAFrameLen(status); 999 tailm->m_len = len - sc->sc_rxlen; 1000 1001 /* 1002 * If the packet is small enough to fit in a 1003 * single header mbuf, allocate one and copy 1004 * the data into it. This greatly reduces 1005 * memory consumption when we receive lots 1006 * of small packets. 1007 */ 1008 if (stge_copy_small != 0 && len <= (MHLEN - 2)) { 1009 struct mbuf *nm; 1010 MGETHDR(nm, M_DONTWAIT, MT_DATA); 1011 if (nm == NULL) { 1012 ifp->if_ierrors++; 1013 m_freem(m); 1014 continue; 1015 } 1016 nm->m_data += 2; 1017 nm->m_pkthdr.len = nm->m_len = len; 1018 m_copydata(m, 0, len, mtod(nm, caddr_t)); 1019 m_freem(m); 1020 m = nm; 1021 } 1022 1023 /* 1024 * Set the incoming checksum information for the packet. 1025 */ 1026 if ((status & RFD_IPDetected) && 1027 (!(status & RFD_IPError))) 1028 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1029 if ((status & RFD_TCPDetected) && 1030 (!(status & RFD_TCPError))) 1031 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 1032 else if ((status & RFD_UDPDetected) && 1033 (!(status & RFD_UDPError))) 1034 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 1035 1036 #if NVLAN > 0 1037 /* Check for VLAN tagged packets. */ 1038 if (status & RFD_VLANDetected) { 1039 m->m_pkthdr.ether_vtag = RFD_TCI(status); 1040 m->m_flags |= M_VLANTAG; 1041 } 1042 #endif 1043 1044 m->m_pkthdr.rcvif = ifp; 1045 m->m_pkthdr.len = len; 1046 1047 #if NBPFILTER > 0 1048 /* 1049 * Pass this up to any BPF listeners, but only 1050 * pass if up the stack if it's for us. 1051 */ 1052 if (ifp->if_bpf) 1053 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); 1054 #endif /* NBPFILTER > 0 */ 1055 1056 /* Pass it on. */ 1057 ether_input_mbuf(ifp, m); 1058 } 1059 1060 /* Update the receive pointer. */ 1061 sc->sc_rxptr = i; 1062 } 1063 1064 /* 1065 * stge_tick: 1066 * 1067 * One second timer, used to tick the MII. 1068 */ 1069 void 1070 stge_tick(void *arg) 1071 { 1072 struct stge_softc *sc = arg; 1073 int s; 1074 1075 s = splnet(); 1076 mii_tick(&sc->sc_mii); 1077 stge_stats_update(sc); 1078 splx(s); 1079 1080 timeout_add_sec(&sc->sc_timeout, 1); 1081 } 1082 1083 /* 1084 * stge_stats_update: 1085 * 1086 * Read the TC9021 statistics counters. 1087 */ 1088 void 1089 stge_stats_update(struct stge_softc *sc) 1090 { 1091 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1092 1093 (void) CSR_READ_4(sc, STGE_OctetRcvOk); 1094 1095 ifp->if_ipackets += 1096 CSR_READ_4(sc, STGE_FramesRcvdOk); 1097 1098 ifp->if_ierrors += 1099 (u_int) CSR_READ_2(sc, STGE_FramesLostRxErrors); 1100 1101 (void) CSR_READ_4(sc, STGE_OctetXmtdOk); 1102 1103 ifp->if_opackets += 1104 CSR_READ_4(sc, STGE_FramesXmtdOk); 1105 1106 ifp->if_collisions += 1107 CSR_READ_4(sc, STGE_LateCollisions) + 1108 CSR_READ_4(sc, STGE_MultiColFrames) + 1109 CSR_READ_4(sc, STGE_SingleColFrames); 1110 1111 ifp->if_oerrors += 1112 (u_int) CSR_READ_2(sc, STGE_FramesAbortXSColls) + 1113 (u_int) CSR_READ_2(sc, STGE_FramesWEXDeferal); 1114 } 1115 1116 /* 1117 * stge_reset: 1118 * 1119 * Perform a soft reset on the TC9021. 1120 */ 1121 void 1122 stge_reset(struct stge_softc *sc) 1123 { 1124 uint32_t ac; 1125 int i; 1126 1127 ac = CSR_READ_4(sc, STGE_AsicCtrl); 1128 1129 /* 1130 * Only assert RstOut if we're fiber. We need GMII clocks 1131 * to be present in order for the reset to complete on fiber 1132 * cards. 1133 */ 1134 CSR_WRITE_4(sc, STGE_AsicCtrl, 1135 ac | AC_GlobalReset | AC_RxReset | AC_TxReset | 1136 AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit | 1137 (sc->sc_usefiber ? AC_RstOut : 0)); 1138 1139 delay(50000); 1140 1141 for (i = 0; i < STGE_TIMEOUT; i++) { 1142 delay(5000); 1143 if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0) 1144 break; 1145 } 1146 1147 if (i == STGE_TIMEOUT) 1148 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname); 1149 1150 delay(1000); 1151 } 1152 1153 /* 1154 * stge_init: [ ifnet interface function ] 1155 * 1156 * Initialize the interface. Must be called at splnet(). 1157 */ 1158 int 1159 stge_init(struct ifnet *ifp) 1160 { 1161 struct stge_softc *sc = ifp->if_softc; 1162 struct stge_descsoft *ds; 1163 int i, error = 0; 1164 1165 /* 1166 * Cancel any pending I/O. 1167 */ 1168 stge_stop(ifp, 0); 1169 1170 /* 1171 * Reset the chip to a known state. 1172 */ 1173 stge_reset(sc); 1174 1175 /* 1176 * Initialize the transmit descriptor ring. 1177 */ 1178 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 1179 for (i = 0; i < STGE_NTXDESC; i++) { 1180 sc->sc_txdescs[i].tfd_next = htole64( 1181 STGE_CDTXADDR(sc, STGE_NEXTTX(i))); 1182 sc->sc_txdescs[i].tfd_control = htole64(TFD_TFDDone); 1183 } 1184 sc->sc_txpending = 0; 1185 sc->sc_txdirty = 0; 1186 sc->sc_txlast = STGE_NTXDESC - 1; 1187 1188 /* 1189 * Initialize the receive descriptor and receive job 1190 * descriptor rings. 1191 */ 1192 for (i = 0; i < STGE_NRXDESC; i++) { 1193 ds = &sc->sc_rxsoft[i]; 1194 if (ds->ds_mbuf == NULL) { 1195 if ((error = stge_add_rxbuf(sc, i)) != 0) { 1196 printf("%s: unable to allocate or map rx " 1197 "buffer %d, error = %d\n", 1198 sc->sc_dev.dv_xname, i, error); 1199 /* 1200 * XXX Should attempt to run with fewer receive 1201 * XXX buffers instead of just failing. 1202 */ 1203 stge_rxdrain(sc); 1204 goto out; 1205 } 1206 } else 1207 STGE_INIT_RXDESC(sc, i); 1208 } 1209 sc->sc_rxptr = 0; 1210 sc->sc_rxdiscard = 0; 1211 STGE_RXCHAIN_RESET(sc); 1212 1213 /* Set the station address. */ 1214 for (i = 0; i < 6; i++) 1215 CSR_WRITE_1(sc, STGE_StationAddress0 + i, 1216 sc->sc_arpcom.ac_enaddr[i]); 1217 1218 /* 1219 * Set the statistics masks. Disable all the RMON stats, 1220 * and disable selected stats in the non-RMON stats registers. 1221 */ 1222 CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff); 1223 CSR_WRITE_4(sc, STGE_StatisticsMask, 1224 (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) | 1225 (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) | 1226 (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) | 1227 (1U << 21)); 1228 1229 /* Set up the receive filter. */ 1230 stge_set_filter(sc); 1231 1232 /* 1233 * Give the transmit and receive ring to the chip. 1234 */ 1235 CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0); /* NOTE: 32-bit DMA */ 1236 CSR_WRITE_4(sc, STGE_TFDListPtrLo, 1237 STGE_CDTXADDR(sc, sc->sc_txdirty)); 1238 1239 CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0); /* NOTE: 32-bit DMA */ 1240 CSR_WRITE_4(sc, STGE_RFDListPtrLo, 1241 STGE_CDRXADDR(sc, sc->sc_rxptr)); 1242 1243 /* 1244 * Initialize the Tx auto-poll period. It's OK to make this number 1245 * large (255 is the max, but we use 127) -- we explicitly kick the 1246 * transmit engine when there's actually a packet. 1247 */ 1248 CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127); 1249 1250 /* ..and the Rx auto-poll period. */ 1251 CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 64); 1252 1253 /* Initialize the Tx start threshold. */ 1254 CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh); 1255 1256 /* RX DMA thresholds, from linux */ 1257 CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30); 1258 CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30); 1259 1260 /* Rx early threhold, from Linux */ 1261 CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff); 1262 1263 /* Tx DMA thresholds, from Linux */ 1264 CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30); 1265 CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04); 1266 1267 /* 1268 * Initialize the Rx DMA interrupt control register. We 1269 * request an interrupt after every incoming packet, but 1270 * defer it for 32us (64 * 512 ns). When the number of 1271 * interrupts pending reaches 8, we stop deferring the 1272 * interrupt, and signal it immediately. 1273 */ 1274 CSR_WRITE_4(sc, STGE_RxDMAIntCtrl, 1275 RDIC_RxFrameCount(8) | RDIC_RxDMAWaitTime(512)); 1276 1277 /* 1278 * Initialize the interrupt mask. 1279 */ 1280 sc->sc_IntEnable = IS_HostError | IS_TxComplete | IS_UpdateStats | 1281 IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd; 1282 CSR_WRITE_2(sc, STGE_IntStatus, 0xffff); 1283 CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable); 1284 1285 /* 1286 * Configure the DMA engine. 1287 * XXX Should auto-tune TxBurstLimit. 1288 */ 1289 CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | 1290 DMAC_TxBurstLimit(3)); 1291 1292 /* 1293 * Send a PAUSE frame when we reach 29,696 bytes in the Rx 1294 * FIFO, and send an un-PAUSE frame when the FIFO is totally 1295 * empty again. 1296 */ 1297 CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16); 1298 CSR_WRITE_2(sc, STGE_FlowOffThresh, 0); 1299 1300 /* 1301 * Set the maximum frame size. 1302 */ 1303 #ifdef STGE_JUMBO 1304 CSR_WRITE_2(sc, STGE_MaxFrameSize, STGE_JUMBO_FRAMELEN); 1305 #else 1306 CSR_WRITE_2(sc, STGE_MaxFrameSize, ETHER_MAX_LEN); 1307 #endif 1308 1309 /* 1310 * Initialize MacCtrl -- do it before setting the media, 1311 * as setting the media will actually program the register. 1312 * 1313 * Note: We have to poke the IFS value before poking 1314 * anything else. 1315 */ 1316 sc->sc_MACCtrl = MC_IFSSelect(0); 1317 CSR_WRITE_4(sc, STGE_MACCtrl, sc->sc_MACCtrl); 1318 1319 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1320 sc->sc_MACCtrl |= MC_AutoVLANuntagging; 1321 1322 sc->sc_MACCtrl |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable; 1323 1324 if (sc->sc_rev >= 6) { /* >= B.2 */ 1325 /* Multi-frag frame bug work-around. */ 1326 CSR_WRITE_2(sc, STGE_DebugCtrl, 1327 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200); 1328 1329 /* Tx Poll Now bug work-around. */ 1330 CSR_WRITE_2(sc, STGE_DebugCtrl, 1331 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010); 1332 /* XXX ? from linux */ 1333 CSR_WRITE_2(sc, STGE_DebugCtrl, 1334 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020); 1335 } 1336 1337 /* 1338 * Set the current media. 1339 */ 1340 mii_mediachg(&sc->sc_mii); 1341 1342 /* 1343 * Start the one second MII clock. 1344 */ 1345 timeout_add_sec(&sc->sc_timeout, 1); 1346 1347 /* 1348 * ...all done! 1349 */ 1350 ifp->if_flags |= IFF_RUNNING; 1351 ifp->if_flags &= ~IFF_OACTIVE; 1352 1353 out: 1354 if (error) 1355 printf("%s: interface not running\n", sc->sc_dev.dv_xname); 1356 return (error); 1357 } 1358 1359 /* 1360 * stge_drain: 1361 * 1362 * Drain the receive queue. 1363 */ 1364 void 1365 stge_rxdrain(struct stge_softc *sc) 1366 { 1367 struct stge_descsoft *ds; 1368 int i; 1369 1370 for (i = 0; i < STGE_NRXDESC; i++) { 1371 ds = &sc->sc_rxsoft[i]; 1372 if (ds->ds_mbuf != NULL) { 1373 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1374 ds->ds_mbuf->m_next = NULL; 1375 m_freem(ds->ds_mbuf); 1376 ds->ds_mbuf = NULL; 1377 } 1378 } 1379 } 1380 1381 /* 1382 * stge_stop: [ ifnet interface function ] 1383 * 1384 * Stop transmission on the interface. 1385 */ 1386 void 1387 stge_stop(struct ifnet *ifp, int disable) 1388 { 1389 struct stge_softc *sc = ifp->if_softc; 1390 struct stge_descsoft *ds; 1391 int i; 1392 1393 /* 1394 * Stop the one second clock. 1395 */ 1396 timeout_del(&sc->sc_timeout); 1397 1398 /* 1399 * Mark the interface down and cancel the watchdog timer. 1400 */ 1401 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1402 ifp->if_timer = 0; 1403 1404 /* Down the MII. */ 1405 mii_down(&sc->sc_mii); 1406 1407 /* 1408 * Disable interrupts. 1409 */ 1410 CSR_WRITE_2(sc, STGE_IntEnable, 0); 1411 1412 /* 1413 * Stop receiver, transmitter, and stats update. 1414 */ 1415 CSR_WRITE_4(sc, STGE_MACCtrl, 1416 MC_StatisticsDisable | MC_TxDisable | MC_RxDisable); 1417 1418 /* 1419 * Stop the transmit and receive DMA. 1420 */ 1421 stge_dma_wait(sc); 1422 CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0); 1423 CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0); 1424 CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0); 1425 CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0); 1426 1427 /* 1428 * Release any queued transmit buffers. 1429 */ 1430 for (i = 0; i < STGE_NTXDESC; i++) { 1431 ds = &sc->sc_txsoft[i]; 1432 if (ds->ds_mbuf != NULL) { 1433 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1434 m_freem(ds->ds_mbuf); 1435 ds->ds_mbuf = NULL; 1436 } 1437 } 1438 1439 if (disable) 1440 stge_rxdrain(sc); 1441 } 1442 1443 static int 1444 stge_eeprom_wait(struct stge_softc *sc) 1445 { 1446 int i; 1447 1448 for (i = 0; i < STGE_TIMEOUT; i++) { 1449 delay(1000); 1450 if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0) 1451 return (0); 1452 } 1453 return (1); 1454 } 1455 1456 /* 1457 * stge_read_eeprom: 1458 * 1459 * Read data from the serial EEPROM. 1460 */ 1461 void 1462 stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data) 1463 { 1464 1465 if (stge_eeprom_wait(sc)) 1466 printf("%s: EEPROM failed to come ready\n", 1467 sc->sc_dev.dv_xname); 1468 1469 CSR_WRITE_2(sc, STGE_EepromCtrl, 1470 EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR)); 1471 if (stge_eeprom_wait(sc)) 1472 printf("%s: EEPROM read timed out\n", 1473 sc->sc_dev.dv_xname); 1474 *data = CSR_READ_2(sc, STGE_EepromData); 1475 } 1476 1477 /* 1478 * stge_add_rxbuf: 1479 * 1480 * Add a receive buffer to the indicated descriptor. 1481 */ 1482 int 1483 stge_add_rxbuf(struct stge_softc *sc, int idx) 1484 { 1485 struct stge_descsoft *ds = &sc->sc_rxsoft[idx]; 1486 struct mbuf *m; 1487 int error; 1488 1489 MGETHDR(m, M_DONTWAIT, MT_DATA); 1490 if (m == NULL) 1491 return (ENOBUFS); 1492 1493 MCLGET(m, M_DONTWAIT); 1494 if ((m->m_flags & M_EXT) == 0) { 1495 m_freem(m); 1496 return (ENOBUFS); 1497 } 1498 1499 m->m_data = m->m_ext.ext_buf + 2; 1500 m->m_len = MCLBYTES - 2; 1501 1502 if (ds->ds_mbuf != NULL) 1503 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1504 1505 ds->ds_mbuf = m; 1506 1507 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap, 1508 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT); 1509 if (error) { 1510 printf("%s: can't load rx DMA map %d, error = %d\n", 1511 sc->sc_dev.dv_xname, idx, error); 1512 panic("stge_add_rxbuf"); /* XXX */ 1513 } 1514 1515 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1516 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1517 1518 STGE_INIT_RXDESC(sc, idx); 1519 1520 return (0); 1521 } 1522 1523 /* 1524 * stge_set_filter: 1525 * 1526 * Set up the receive filter. 1527 */ 1528 void 1529 stge_set_filter(struct stge_softc *sc) 1530 { 1531 struct arpcom *ac = &sc->sc_arpcom; 1532 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1533 struct ether_multi *enm; 1534 struct ether_multistep step; 1535 uint32_t crc; 1536 uint32_t mchash[2]; 1537 1538 sc->sc_ReceiveMode = RM_ReceiveUnicast; 1539 if (ifp->if_flags & IFF_BROADCAST) 1540 sc->sc_ReceiveMode |= RM_ReceiveBroadcast; 1541 1542 /* XXX: ST1023 only works in promiscuous mode */ 1543 if (sc->sc_stge1023) 1544 ifp->if_flags |= IFF_PROMISC; 1545 1546 if (ifp->if_flags & IFF_PROMISC) { 1547 sc->sc_ReceiveMode |= RM_ReceiveAllFrames; 1548 goto allmulti; 1549 } 1550 1551 /* 1552 * Set up the multicast address filter by passing all multicast 1553 * addresses through a CRC generator, and then using the low-order 1554 * 6 bits as an index into the 64 bit multicast hash table. The 1555 * high order bits select the register, while the rest of the bits 1556 * select the bit within the register. 1557 */ 1558 1559 memset(mchash, 0, sizeof(mchash)); 1560 1561 ETHER_FIRST_MULTI(step, ac, enm); 1562 if (enm == NULL) 1563 goto done; 1564 1565 while (enm != NULL) { 1566 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1567 /* 1568 * We must listen to a range of multicast addresses. 1569 * For now, just accept all multicasts, rather than 1570 * trying to set only those filter bits needed to match 1571 * the range. (At this time, the only use of address 1572 * ranges is for IP multicast routing, for which the 1573 * range is big enough to require all bits set.) 1574 */ 1575 goto allmulti; 1576 } 1577 1578 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 1579 1580 /* Just want the 6 least significant bits. */ 1581 crc &= 0x3f; 1582 1583 /* Set the corresponding bit in the hash table. */ 1584 mchash[crc >> 5] |= 1 << (crc & 0x1f); 1585 1586 ETHER_NEXT_MULTI(step, enm); 1587 } 1588 1589 sc->sc_ReceiveMode |= RM_ReceiveMulticastHash; 1590 1591 ifp->if_flags &= ~IFF_ALLMULTI; 1592 goto done; 1593 1594 allmulti: 1595 ifp->if_flags |= IFF_ALLMULTI; 1596 sc->sc_ReceiveMode |= RM_ReceiveMulticast; 1597 1598 done: 1599 if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 1600 /* 1601 * Program the multicast hash table. 1602 */ 1603 CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]); 1604 CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]); 1605 } 1606 1607 CSR_WRITE_2(sc, STGE_ReceiveMode, sc->sc_ReceiveMode); 1608 } 1609 1610 /* 1611 * stge_mii_readreg: [mii interface function] 1612 * 1613 * Read a PHY register on the MII of the TC9021. 1614 */ 1615 int 1616 stge_mii_readreg(struct device *self, int phy, int reg) 1617 { 1618 1619 return (mii_bitbang_readreg(self, &stge_mii_bitbang_ops, phy, reg)); 1620 } 1621 1622 /* 1623 * stge_mii_writereg: [mii interface function] 1624 * 1625 * Write a PHY register on the MII of the TC9021. 1626 */ 1627 void 1628 stge_mii_writereg(struct device *self, int phy, int reg, int val) 1629 { 1630 1631 mii_bitbang_writereg(self, &stge_mii_bitbang_ops, phy, reg, val); 1632 } 1633 1634 /* 1635 * stge_mii_statchg: [mii interface function] 1636 * 1637 * Callback from MII layer when media changes. 1638 */ 1639 void 1640 stge_mii_statchg(struct device *self) 1641 { 1642 struct stge_softc *sc = (struct stge_softc *) self; 1643 1644 if (sc->sc_mii.mii_media_active & IFM_FDX) 1645 sc->sc_MACCtrl |= MC_DuplexSelect; 1646 else 1647 sc->sc_MACCtrl &= ~MC_DuplexSelect; 1648 1649 /* XXX 802.1x flow-control? */ 1650 1651 CSR_WRITE_4(sc, STGE_MACCtrl, sc->sc_MACCtrl); 1652 } 1653 1654 /* 1655 * sste_mii_bitbang_read: [mii bit-bang interface function] 1656 * 1657 * Read the MII serial port for the MII bit-bang module. 1658 */ 1659 uint32_t 1660 stge_mii_bitbang_read(struct device *self) 1661 { 1662 struct stge_softc *sc = (void *) self; 1663 1664 return (CSR_READ_1(sc, STGE_PhyCtrl)); 1665 } 1666 1667 /* 1668 * stge_mii_bitbang_write: [mii big-bang interface function] 1669 * 1670 * Write the MII serial port for the MII bit-bang module. 1671 */ 1672 void 1673 stge_mii_bitbang_write(struct device *self, uint32_t val) 1674 { 1675 struct stge_softc *sc = (void *) self; 1676 1677 CSR_WRITE_1(sc, STGE_PhyCtrl, val | sc->sc_PhyCtrl); 1678 } 1679 1680 /* 1681 * stge_mediastatus: [ifmedia interface function] 1682 * 1683 * Get the current interface media status. 1684 */ 1685 void 1686 stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1687 { 1688 struct stge_softc *sc = ifp->if_softc; 1689 1690 mii_pollstat(&sc->sc_mii); 1691 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1692 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1693 } 1694 1695 /* 1696 * stge_mediachange: [ifmedia interface function] 1697 * 1698 * Set hardware to newly-selected media. 1699 */ 1700 int 1701 stge_mediachange(struct ifnet *ifp) 1702 { 1703 struct stge_softc *sc = ifp->if_softc; 1704 1705 if (ifp->if_flags & IFF_UP) 1706 mii_mediachg(&sc->sc_mii); 1707 return (0); 1708 } 1709