1 /* 2 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Sepherosa Ziehau <sepherosa@gmail.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.4 2007/10/17 13:25:04 sephe Exp $ 35 */ 36 37 #include <sys/param.h> 38 #include <sys/bitops.h> 39 #include <sys/endian.h> 40 #include <sys/kernel.h> 41 #include <sys/bus.h> 42 #include <sys/malloc.h> 43 #include <sys/proc.h> 44 #include <sys/rman.h> 45 #include <sys/serialize.h> 46 #include <sys/socket.h> 47 #include <sys/sockio.h> 48 #include <sys/sysctl.h> 49 50 #include <net/ethernet.h> 51 #include <net/if.h> 52 #include <net/bpf.h> 53 #include <net/if_arp.h> 54 #include <net/if_dl.h> 55 #include <net/if_media.h> 56 #include <net/ifq_var.h> 57 #include <net/vlan/if_vlan_var.h> 58 59 #include <dev/netif/mii_layer/miivar.h> 60 61 #include <bus/pci/pcireg.h> 62 #include <bus/pci/pcivar.h> 63 #include <bus/pci/pcidevs.h> 64 65 #include <dev/netif/et/if_etreg.h> 66 #include <dev/netif/et/if_etvar.h> 67 68 #include "miibus_if.h" 69 70 static int et_probe(device_t); 71 static int et_attach(device_t); 72 static int et_detach(device_t); 73 static int et_shutdown(device_t); 74 75 static int et_miibus_readreg(device_t, int, int); 76 static int et_miibus_writereg(device_t, int, int, int); 77 static void et_miibus_statchg(device_t); 78 79 static void et_init(void *); 80 static int et_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 81 static void et_start(struct ifnet *); 82 static void et_watchdog(struct ifnet *); 83 static int et_ifmedia_upd(struct ifnet *); 84 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *); 85 86 static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS); 87 static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS); 88 89 static void et_intr(void *); 90 static void et_enable_intrs(struct et_softc *, uint32_t); 91 static void et_disable_intrs(struct et_softc *); 92 static void et_rxeof(struct et_softc *); 93 static void et_txeof(struct et_softc *); 94 95 static int et_dma_alloc(device_t); 96 static void et_dma_free(device_t); 97 static int et_dma_mem_create(device_t, bus_size_t, bus_dma_tag_t *, 98 void **, bus_addr_t *, bus_dmamap_t *); 99 static void et_dma_mem_destroy(bus_dma_tag_t, void *, bus_dmamap_t); 100 static int et_dma_mbuf_create(device_t); 101 static void et_dma_mbuf_destroy(device_t, int, const int[]); 102 static void et_dma_ring_addr(void *, bus_dma_segment_t *, int, int); 103 static void et_dma_buf_addr(void *, bus_dma_segment_t *, int, 104 bus_size_t, int); 105 static int et_init_tx_ring(struct et_softc *); 106 static int et_init_rx_ring(struct et_softc *); 107 static void et_free_tx_ring(struct et_softc *); 108 static void et_free_rx_ring(struct et_softc *); 109 static int et_encap(struct et_softc *, struct mbuf **); 110 static int et_newbuf(struct et_rxbuf_data *, int, int, int); 111 static int et_newbuf_cluster(struct et_rxbuf_data *, int, int); 112 static int et_newbuf_hdr(struct et_rxbuf_data *, int, int); 113 114 static void et_stop(struct et_softc *); 115 static int et_chip_init(struct et_softc *); 116 static void et_chip_attach(struct et_softc *); 117 static void et_init_mac(struct et_softc *); 118 static void et_init_rxmac(struct et_softc *); 119 static void et_init_txmac(struct et_softc *); 120 static int et_init_rxdma(struct et_softc *); 121 static int et_init_txdma(struct et_softc *); 122 static int et_start_rxdma(struct et_softc *); 123 static int et_start_txdma(struct et_softc *); 124 static int et_stop_rxdma(struct et_softc *); 125 static int et_stop_txdma(struct et_softc *); 126 static int et_enable_txrx(struct et_softc *, int); 127 static void et_reset(struct et_softc *); 128 static int et_bus_config(device_t); 129 static void et_get_eaddr(device_t, uint8_t[]); 130 static void et_setmulti(struct et_softc *); 131 static void et_tick(void *); 132 static void et_setmedia(struct et_softc *); 133 134 static const struct et_dev { 135 uint16_t vid; 136 uint16_t did; 137 const char *desc; 138 } et_devices[] = { 139 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310, 140 "Agere ET1310 Gigabit Ethernet" }, 141 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST, 142 "Agere ET1310 Fast Ethernet" }, 143 { 0, 0, NULL } 144 }; 145 146 static device_method_t et_methods[] = { 147 DEVMETHOD(device_probe, et_probe), 148 DEVMETHOD(device_attach, et_attach), 149 DEVMETHOD(device_detach, et_detach), 150 DEVMETHOD(device_shutdown, et_shutdown), 151 #if 0 152 DEVMETHOD(device_suspend, et_suspend), 153 DEVMETHOD(device_resume, et_resume), 154 #endif 155 156 DEVMETHOD(bus_print_child, bus_generic_print_child), 157 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 158 159 DEVMETHOD(miibus_readreg, et_miibus_readreg), 160 DEVMETHOD(miibus_writereg, et_miibus_writereg), 161 DEVMETHOD(miibus_statchg, et_miibus_statchg), 162 163 { 0, 0 } 164 }; 165 166 static driver_t et_driver = { 167 "et", 168 et_methods, 169 sizeof(struct et_softc) 170 }; 171 172 static devclass_t et_devclass; 173 174 DECLARE_DUMMY_MODULE(if_et); 175 MODULE_DEPEND(if_et, miibus, 1, 1, 1); 176 DRIVER_MODULE(if_et, pci, et_driver, et_devclass, 0, 0); 177 DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, 0, 0); 178 179 static int et_rx_intr_npkts = 32; 180 static int et_rx_intr_delay = 20; /* x10 usec */ 181 static int et_tx_intr_nsegs = 126; 182 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 183 184 TUNABLE_INT("hw.et.timer", &et_timer); 185 TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts); 186 TUNABLE_INT("hw.et.rx_intr_intvl", &et_rx_intr_delay); 187 TUNABLE_INT("hw.et_tx_intr_nsegs", &et_tx_intr_nsegs); 188 189 struct et_bsize { 190 int bufsize; 191 et_newbuf_t newbuf; 192 }; 193 194 static const struct et_bsize et_bufsize[ET_RX_NRING] = { 195 { .bufsize = 0, .newbuf = et_newbuf_hdr }, 196 { .bufsize = 0, .newbuf = et_newbuf_cluster }, 197 }; 198 199 static int 200 et_probe(device_t dev) 201 { 202 const struct et_dev *d; 203 uint16_t did, vid; 204 205 vid = pci_get_vendor(dev); 206 did = pci_get_device(dev); 207 208 for (d = et_devices; d->desc != NULL; ++d) { 209 if (vid == d->vid && did == d->did) { 210 device_set_desc(dev, d->desc); 211 return 0; 212 } 213 } 214 return ENXIO; 215 } 216 217 static int 218 et_attach(device_t dev) 219 { 220 struct et_softc *sc = device_get_softc(dev); 221 struct ifnet *ifp = &sc->arpcom.ac_if; 222 uint8_t eaddr[ETHER_ADDR_LEN]; 223 int error; 224 225 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 226 callout_init(&sc->sc_tick); 227 228 /* 229 * Initialize tunables 230 */ 231 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 232 sc->sc_rx_intr_delay = et_rx_intr_delay; 233 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 234 sc->sc_timer = et_timer; 235 236 #ifndef BURN_BRIDGES 237 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 238 uint32_t irq, mem; 239 240 irq = pci_read_config(dev, PCIR_INTLINE, 4); 241 mem = pci_read_config(dev, ET_PCIR_BAR, 4); 242 243 device_printf(dev, "chip is in D%d power mode " 244 "-- setting to D0\n", pci_get_powerstate(dev)); 245 246 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 247 248 pci_write_config(dev, PCIR_INTLINE, irq, 4); 249 pci_write_config(dev, ET_PCIR_BAR, mem, 4); 250 } 251 #endif /* !BURN_BRIDGE */ 252 253 /* Enable bus mastering */ 254 pci_enable_busmaster(dev); 255 256 /* 257 * Allocate IO memory 258 */ 259 sc->sc_mem_rid = ET_PCIR_BAR; 260 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 261 &sc->sc_mem_rid, RF_ACTIVE); 262 if (sc->sc_mem_res == NULL) { 263 device_printf(dev, "can't allocate IO memory\n"); 264 return ENXIO; 265 } 266 sc->sc_mem_bt = rman_get_bustag(sc->sc_mem_res); 267 sc->sc_mem_bh = rman_get_bushandle(sc->sc_mem_res); 268 269 /* 270 * Allocate IRQ 271 */ 272 sc->sc_irq_rid = 0; 273 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 274 &sc->sc_irq_rid, 275 RF_SHAREABLE | RF_ACTIVE); 276 if (sc->sc_irq_res == NULL) { 277 device_printf(dev, "can't allocate irq\n"); 278 error = ENXIO; 279 goto fail; 280 } 281 282 /* 283 * Create sysctl tree 284 */ 285 sysctl_ctx_init(&sc->sc_sysctl_ctx); 286 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx, 287 SYSCTL_STATIC_CHILDREN(_hw), 288 OID_AUTO, 289 device_get_nameunit(dev), 290 CTLFLAG_RD, 0, ""); 291 if (sc->sc_sysctl_tree == NULL) { 292 device_printf(dev, "can't add sysctl node\n"); 293 error = ENXIO; 294 goto fail; 295 } 296 297 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx, 298 SYSCTL_CHILDREN(sc->sc_sysctl_tree), 299 OID_AUTO, "rx_intr_npkts", CTLTYPE_INT | CTLFLAG_RW, 300 sc, 0, et_sysctl_rx_intr_npkts, "I", 301 "RX IM, # packets per RX interrupt"); 302 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx, 303 SYSCTL_CHILDREN(sc->sc_sysctl_tree), 304 OID_AUTO, "rx_intr_delay", CTLTYPE_INT | CTLFLAG_RW, 305 sc, 0, et_sysctl_rx_intr_delay, "I", 306 "RX IM, RX interrupt delay (x10 usec)"); 307 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx, 308 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 309 "tx_intr_nsegs", CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0, 310 "TX IM, # segments per TX interrupt"); 311 SYSCTL_ADD_UINT(&sc->sc_sysctl_ctx, 312 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 313 "timer", CTLFLAG_RW, &sc->sc_timer, 0, 314 "TX timer"); 315 316 error = et_bus_config(dev); 317 if (error) 318 goto fail; 319 320 et_get_eaddr(dev, eaddr); 321 322 CSR_WRITE_4(sc, ET_PM, 323 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE); 324 325 et_reset(sc); 326 327 et_disable_intrs(sc); 328 329 error = et_dma_alloc(dev); 330 if (error) 331 goto fail; 332 333 ifp->if_softc = sc; 334 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 335 ifp->if_init = et_init; 336 ifp->if_ioctl = et_ioctl; 337 ifp->if_start = et_start; 338 ifp->if_watchdog = et_watchdog; 339 ifp->if_mtu = ETHERMTU; 340 ifp->if_capabilities = IFCAP_VLAN_MTU; 341 ifp->if_capenable = ifp->if_capabilities; 342 ifq_set_maxlen(&ifp->if_snd, ET_TX_NDESC); 343 ifq_set_ready(&ifp->if_snd); 344 345 et_chip_attach(sc); 346 347 error = mii_phy_probe(dev, &sc->sc_miibus, 348 et_ifmedia_upd, et_ifmedia_sts); 349 if (error) { 350 device_printf(dev, "can't probe any PHY\n"); 351 goto fail; 352 } 353 354 ether_ifattach(ifp, eaddr, NULL); 355 356 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, et_intr, sc, 357 &sc->sc_irq_handle, ifp->if_serializer); 358 if (error) { 359 ether_ifdetach(ifp); 360 device_printf(dev, "can't setup intr\n"); 361 goto fail; 362 } 363 return 0; 364 fail: 365 et_detach(dev); 366 return error; 367 } 368 369 static int 370 et_detach(device_t dev) 371 { 372 struct et_softc *sc = device_get_softc(dev); 373 374 if (device_is_attached(dev)) { 375 struct ifnet *ifp = &sc->arpcom.ac_if; 376 377 lwkt_serialize_enter(ifp->if_serializer); 378 et_stop(sc); 379 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle); 380 lwkt_serialize_exit(ifp->if_serializer); 381 382 ether_ifdetach(ifp); 383 } 384 385 if (sc->sc_sysctl_tree != NULL) 386 sysctl_ctx_free(&sc->sc_sysctl_ctx); 387 388 if (sc->sc_miibus != NULL) 389 device_delete_child(dev, sc->sc_miibus); 390 bus_generic_detach(dev); 391 392 if (sc->sc_irq_res != NULL) { 393 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, 394 sc->sc_irq_res); 395 } 396 397 if (sc->sc_mem_res != NULL) { 398 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, 399 sc->sc_mem_res); 400 } 401 402 et_dma_free(dev); 403 404 return 0; 405 } 406 407 static int 408 et_shutdown(device_t dev) 409 { 410 struct et_softc *sc = device_get_softc(dev); 411 struct ifnet *ifp = &sc->arpcom.ac_if; 412 413 lwkt_serialize_enter(ifp->if_serializer); 414 et_stop(sc); 415 lwkt_serialize_exit(ifp->if_serializer); 416 return 0; 417 } 418 419 static int 420 et_miibus_readreg(device_t dev, int phy, int reg) 421 { 422 struct et_softc *sc = device_get_softc(dev); 423 uint32_t val; 424 int i, ret; 425 426 /* Stop any pending operations */ 427 CSR_WRITE_4(sc, ET_MII_CMD, 0); 428 429 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 430 __SHIFTIN(reg, ET_MII_ADDR_REG); 431 CSR_WRITE_4(sc, ET_MII_ADDR, val); 432 433 /* Start reading */ 434 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 435 436 #define NRETRY 50 437 438 for (i = 0; i < NRETRY; ++i) { 439 val = CSR_READ_4(sc, ET_MII_IND); 440 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 441 break; 442 DELAY(50); 443 } 444 if (i == NRETRY) { 445 if_printf(&sc->arpcom.ac_if, 446 "read phy %d, reg %d timed out\n", phy, reg); 447 ret = 0; 448 goto back; 449 } 450 451 #undef NRETRY 452 453 val = CSR_READ_4(sc, ET_MII_STAT); 454 ret = __SHIFTOUT(val, ET_MII_STAT_VALUE); 455 456 back: 457 /* Make sure that the current operation is stopped */ 458 CSR_WRITE_4(sc, ET_MII_CMD, 0); 459 return ret; 460 } 461 462 static int 463 et_miibus_writereg(device_t dev, int phy, int reg, int val0) 464 { 465 struct et_softc *sc = device_get_softc(dev); 466 uint32_t val; 467 int i; 468 469 /* Stop any pending operations */ 470 CSR_WRITE_4(sc, ET_MII_CMD, 0); 471 472 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 473 __SHIFTIN(reg, ET_MII_ADDR_REG); 474 CSR_WRITE_4(sc, ET_MII_ADDR, val); 475 476 /* Start writing */ 477 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE)); 478 479 #define NRETRY 100 480 481 for (i = 0; i < NRETRY; ++i) { 482 val = CSR_READ_4(sc, ET_MII_IND); 483 if ((val & ET_MII_IND_BUSY) == 0) 484 break; 485 DELAY(50); 486 } 487 if (i == NRETRY) { 488 if_printf(&sc->arpcom.ac_if, 489 "write phy %d, reg %d timed out\n", phy, reg); 490 et_miibus_readreg(dev, phy, reg); 491 } 492 493 #undef NRETRY 494 495 /* Make sure that the current operation is stopped */ 496 CSR_WRITE_4(sc, ET_MII_CMD, 0); 497 return 0; 498 } 499 500 static void 501 et_miibus_statchg(device_t dev) 502 { 503 et_setmedia(device_get_softc(dev)); 504 } 505 506 static int 507 et_ifmedia_upd(struct ifnet *ifp) 508 { 509 struct et_softc *sc = ifp->if_softc; 510 struct mii_data *mii = device_get_softc(sc->sc_miibus); 511 512 if (mii->mii_instance != 0) { 513 struct mii_softc *miisc; 514 515 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 516 mii_phy_reset(miisc); 517 } 518 mii_mediachg(mii); 519 520 return 0; 521 } 522 523 static void 524 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 525 { 526 struct et_softc *sc = ifp->if_softc; 527 struct mii_data *mii = device_get_softc(sc->sc_miibus); 528 529 mii_pollstat(mii); 530 ifmr->ifm_active = mii->mii_media_active; 531 ifmr->ifm_status = mii->mii_media_status; 532 } 533 534 static void 535 et_stop(struct et_softc *sc) 536 { 537 struct ifnet *ifp = &sc->arpcom.ac_if; 538 539 ASSERT_SERIALIZED(ifp->if_serializer); 540 541 callout_stop(&sc->sc_tick); 542 543 et_stop_rxdma(sc); 544 et_stop_txdma(sc); 545 546 et_disable_intrs(sc); 547 548 et_free_tx_ring(sc); 549 et_free_rx_ring(sc); 550 551 et_reset(sc); 552 553 sc->sc_tx = 0; 554 sc->sc_tx_intr = 0; 555 sc->sc_txrx_enabled = 0; 556 557 ifp->if_timer = 0; 558 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 559 } 560 561 static int 562 et_bus_config(device_t dev) 563 { 564 uint32_t val, max_plsz; 565 uint16_t ack_latency, replay_timer; 566 567 /* 568 * Test whether EEPROM is valid 569 * NOTE: Read twice to get the correct value 570 */ 571 pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1); 572 val = pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1); 573 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 574 device_printf(dev, "EEPROM status error 0x%02x\n", val); 575 return ENXIO; 576 } 577 578 /* TODO: LED */ 579 580 /* 581 * Configure ACK latency and replay timer according to 582 * max playload size 583 */ 584 val = pci_read_config(dev, ET_PCIR_DEVICE_CAPS, 4); 585 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ; 586 587 switch (max_plsz) { 588 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 589 ack_latency = ET_PCIV_ACK_LATENCY_128; 590 replay_timer = ET_PCIV_REPLAY_TIMER_128; 591 break; 592 593 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 594 ack_latency = ET_PCIV_ACK_LATENCY_256; 595 replay_timer = ET_PCIV_REPLAY_TIMER_256; 596 break; 597 598 default: 599 ack_latency = pci_read_config(dev, ET_PCIR_ACK_LATENCY, 2); 600 replay_timer = pci_read_config(dev, ET_PCIR_REPLAY_TIMER, 2); 601 device_printf(dev, "ack latency %u, replay timer %u\n", 602 ack_latency, replay_timer); 603 break; 604 } 605 if (ack_latency != 0) { 606 pci_write_config(dev, ET_PCIR_ACK_LATENCY, ack_latency, 2); 607 pci_write_config(dev, ET_PCIR_REPLAY_TIMER, replay_timer, 2); 608 } 609 610 /* 611 * Set L0s and L1 latency timer to 2us 612 */ 613 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2); 614 pci_write_config(dev, ET_PCIR_L0S_L1_LATENCY, val, 1); 615 616 /* 617 * Set max read request size to 2048 bytes 618 */ 619 val = pci_read_config(dev, ET_PCIR_DEVICE_CTRL, 2); 620 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ; 621 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K; 622 pci_write_config(dev, ET_PCIR_DEVICE_CTRL, val, 2); 623 624 return 0; 625 } 626 627 static void 628 et_get_eaddr(device_t dev, uint8_t eaddr[]) 629 { 630 uint32_t val; 631 int i; 632 633 val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4); 634 for (i = 0; i < 4; ++i) 635 eaddr[i] = (val >> (8 * i)) & 0xff; 636 637 val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2); 638 for (; i < ETHER_ADDR_LEN; ++i) 639 eaddr[i] = (val >> (8 * (i - 4))) & 0xff; 640 } 641 642 static void 643 et_reset(struct et_softc *sc) 644 { 645 CSR_WRITE_4(sc, ET_MAC_CFG1, 646 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 647 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 648 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 649 650 CSR_WRITE_4(sc, ET_SWRST, 651 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 652 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 653 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 654 655 CSR_WRITE_4(sc, ET_MAC_CFG1, 656 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 657 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 658 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 659 } 660 661 static void 662 et_disable_intrs(struct et_softc *sc) 663 { 664 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 665 } 666 667 static void 668 et_enable_intrs(struct et_softc *sc, uint32_t intrs) 669 { 670 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs); 671 } 672 673 static int 674 et_dma_alloc(device_t dev) 675 { 676 struct et_softc *sc = device_get_softc(dev); 677 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 678 struct et_txstatus_data *txsd = &sc->sc_tx_status; 679 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 680 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 681 int i, error; 682 683 /* 684 * Create top level DMA tag 685 */ 686 error = bus_dma_tag_create(NULL, 1, 0, 687 BUS_SPACE_MAXADDR_32BIT, 688 BUS_SPACE_MAXADDR, 689 NULL, NULL, 690 MAXBSIZE, 691 BUS_SPACE_UNRESTRICTED, 692 BUS_SPACE_MAXSIZE_32BIT, 693 0, &sc->sc_dtag); 694 if (error) { 695 device_printf(dev, "can't create DMA tag\n"); 696 return error; 697 } 698 699 /* 700 * Create TX ring DMA stuffs 701 */ 702 error = et_dma_mem_create(dev, ET_TX_RING_SIZE, &tx_ring->tr_dtag, 703 (void **)&tx_ring->tr_desc, 704 &tx_ring->tr_paddr, &tx_ring->tr_dmap); 705 if (error) { 706 device_printf(dev, "can't create TX ring DMA stuffs\n"); 707 return error; 708 } 709 710 /* 711 * Create TX status DMA stuffs 712 */ 713 error = et_dma_mem_create(dev, sizeof(uint32_t), &txsd->txsd_dtag, 714 (void **)&txsd->txsd_status, 715 &txsd->txsd_paddr, &txsd->txsd_dmap); 716 if (error) { 717 device_printf(dev, "can't create TX status DMA stuffs\n"); 718 return error; 719 } 720 721 /* 722 * Create DMA stuffs for RX rings 723 */ 724 for (i = 0; i < ET_RX_NRING; ++i) { 725 static const uint32_t rx_ring_posreg[ET_RX_NRING] = 726 { ET_RX_RING0_POS, ET_RX_RING1_POS }; 727 728 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 729 730 error = et_dma_mem_create(dev, ET_RX_RING_SIZE, 731 &rx_ring->rr_dtag, 732 (void **)&rx_ring->rr_desc, 733 &rx_ring->rr_paddr, 734 &rx_ring->rr_dmap); 735 if (error) { 736 device_printf(dev, "can't create DMA stuffs for " 737 "the %d RX ring\n", i); 738 return error; 739 } 740 rx_ring->rr_posreg = rx_ring_posreg[i]; 741 } 742 743 /* 744 * Create RX stat ring DMA stuffs 745 */ 746 error = et_dma_mem_create(dev, ET_RXSTAT_RING_SIZE, 747 &rxst_ring->rsr_dtag, 748 (void **)&rxst_ring->rsr_stat, 749 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap); 750 if (error) { 751 device_printf(dev, "can't create RX stat ring DMA stuffs\n"); 752 return error; 753 } 754 755 /* 756 * Create RX status DMA stuffs 757 */ 758 error = et_dma_mem_create(dev, sizeof(struct et_rxstatus), 759 &rxsd->rxsd_dtag, 760 (void **)&rxsd->rxsd_status, 761 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap); 762 if (error) { 763 device_printf(dev, "can't create RX status DMA stuffs\n"); 764 return error; 765 } 766 767 /* 768 * Create mbuf DMA stuffs 769 */ 770 error = et_dma_mbuf_create(dev); 771 if (error) 772 return error; 773 774 return 0; 775 } 776 777 static void 778 et_dma_free(device_t dev) 779 { 780 struct et_softc *sc = device_get_softc(dev); 781 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 782 struct et_txstatus_data *txsd = &sc->sc_tx_status; 783 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 784 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 785 int i, rx_done[ET_RX_NRING]; 786 787 /* 788 * Destroy TX ring DMA stuffs 789 */ 790 et_dma_mem_destroy(tx_ring->tr_dtag, tx_ring->tr_desc, 791 tx_ring->tr_dmap); 792 793 /* 794 * Destroy TX status DMA stuffs 795 */ 796 et_dma_mem_destroy(txsd->txsd_dtag, txsd->txsd_status, 797 txsd->txsd_dmap); 798 799 /* 800 * Destroy DMA stuffs for RX rings 801 */ 802 for (i = 0; i < ET_RX_NRING; ++i) { 803 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 804 805 et_dma_mem_destroy(rx_ring->rr_dtag, rx_ring->rr_desc, 806 rx_ring->rr_dmap); 807 } 808 809 /* 810 * Destroy RX stat ring DMA stuffs 811 */ 812 et_dma_mem_destroy(rxst_ring->rsr_dtag, rxst_ring->rsr_stat, 813 rxst_ring->rsr_dmap); 814 815 /* 816 * Destroy RX status DMA stuffs 817 */ 818 et_dma_mem_destroy(rxsd->rxsd_dtag, rxsd->rxsd_status, 819 rxsd->rxsd_dmap); 820 821 /* 822 * Destroy mbuf DMA stuffs 823 */ 824 for (i = 0; i < ET_RX_NRING; ++i) 825 rx_done[i] = ET_RX_NDESC; 826 et_dma_mbuf_destroy(dev, ET_TX_NDESC, rx_done); 827 828 /* 829 * Destroy top level DMA tag 830 */ 831 if (sc->sc_dtag != NULL) 832 bus_dma_tag_destroy(sc->sc_dtag); 833 } 834 835 static int 836 et_dma_mbuf_create(device_t dev) 837 { 838 struct et_softc *sc = device_get_softc(dev); 839 struct et_txbuf_data *tbd = &sc->sc_tx_data; 840 int i, error, rx_done[ET_RX_NRING]; 841 842 /* 843 * Create mbuf DMA tag 844 */ 845 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 846 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 847 NULL, NULL, 848 MCLBYTES, ET_NSEG_MAX, 849 BUS_SPACE_MAXSIZE_32BIT, 850 BUS_DMA_ALLOCNOW, &sc->sc_mbuf_dtag); 851 if (error) { 852 device_printf(dev, "can't create mbuf DMA tag\n"); 853 return error; 854 } 855 856 /* 857 * Create spare DMA map for RX mbufs 858 */ 859 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0, &sc->sc_mbuf_tmp_dmap); 860 if (error) { 861 device_printf(dev, "can't create spare mbuf DMA map\n"); 862 bus_dma_tag_destroy(sc->sc_mbuf_dtag); 863 sc->sc_mbuf_dtag = NULL; 864 return error; 865 } 866 867 /* 868 * Create DMA maps for RX mbufs 869 */ 870 bzero(rx_done, sizeof(rx_done)); 871 for (i = 0; i < ET_RX_NRING; ++i) { 872 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 873 int j; 874 875 for (j = 0; j < ET_RX_NDESC; ++j) { 876 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0, 877 &rbd->rbd_buf[j].rb_dmap); 878 if (error) { 879 device_printf(dev, "can't create %d RX mbuf " 880 "for %d RX ring\n", j, i); 881 rx_done[i] = j; 882 et_dma_mbuf_destroy(dev, 0, rx_done); 883 return error; 884 } 885 } 886 rx_done[i] = ET_RX_NDESC; 887 888 rbd->rbd_softc = sc; 889 rbd->rbd_ring = &sc->sc_rx_ring[i]; 890 } 891 892 /* 893 * Create DMA maps for TX mbufs 894 */ 895 for (i = 0; i < ET_TX_NDESC; ++i) { 896 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0, 897 &tbd->tbd_buf[i].tb_dmap); 898 if (error) { 899 device_printf(dev, "can't create %d TX mbuf " 900 "DMA map\n", i); 901 et_dma_mbuf_destroy(dev, i, rx_done); 902 return error; 903 } 904 } 905 906 return 0; 907 } 908 909 static void 910 et_dma_mbuf_destroy(device_t dev, int tx_done, const int rx_done[]) 911 { 912 struct et_softc *sc = device_get_softc(dev); 913 struct et_txbuf_data *tbd = &sc->sc_tx_data; 914 int i; 915 916 if (sc->sc_mbuf_dtag == NULL) 917 return; 918 919 /* 920 * Destroy DMA maps for RX mbufs 921 */ 922 for (i = 0; i < ET_RX_NRING; ++i) { 923 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 924 int j; 925 926 for (j = 0; j < rx_done[i]; ++j) { 927 struct et_rxbuf *rb = &rbd->rbd_buf[j]; 928 929 KASSERT(rb->rb_mbuf == NULL, 930 ("RX mbuf in %d RX ring is not freed yet\n", i)); 931 bus_dmamap_destroy(sc->sc_mbuf_dtag, rb->rb_dmap); 932 } 933 } 934 935 /* 936 * Destroy DMA maps for TX mbufs 937 */ 938 for (i = 0; i < tx_done; ++i) { 939 struct et_txbuf *tb = &tbd->tbd_buf[i]; 940 941 KASSERT(tb->tb_mbuf == NULL, ("TX mbuf is not freed yet\n")); 942 bus_dmamap_destroy(sc->sc_mbuf_dtag, tb->tb_dmap); 943 } 944 945 /* 946 * Destroy spare mbuf DMA map 947 */ 948 bus_dmamap_destroy(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap); 949 950 /* 951 * Destroy mbuf DMA tag 952 */ 953 bus_dma_tag_destroy(sc->sc_mbuf_dtag); 954 sc->sc_mbuf_dtag = NULL; 955 } 956 957 static int 958 et_dma_mem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag, 959 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap) 960 { 961 struct et_softc *sc = device_get_softc(dev); 962 int error; 963 964 error = bus_dma_tag_create(sc->sc_dtag, ET_ALIGN, 0, 965 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 966 NULL, NULL, 967 size, 1, BUS_SPACE_MAXSIZE_32BIT, 968 0, dtag); 969 if (error) { 970 device_printf(dev, "can't create DMA tag\n"); 971 return error; 972 } 973 974 error = bus_dmamem_alloc(*dtag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO, 975 dmap); 976 if (error) { 977 device_printf(dev, "can't allocate DMA mem\n"); 978 bus_dma_tag_destroy(*dtag); 979 *dtag = NULL; 980 return error; 981 } 982 983 error = bus_dmamap_load(*dtag, *dmap, *addr, size, 984 et_dma_ring_addr, paddr, BUS_DMA_WAITOK); 985 if (error) { 986 device_printf(dev, "can't load DMA mem\n"); 987 bus_dmamem_free(*dtag, *addr, *dmap); 988 bus_dma_tag_destroy(*dtag); 989 *dtag = NULL; 990 return error; 991 } 992 return 0; 993 } 994 995 static void 996 et_dma_mem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap) 997 { 998 if (dtag != NULL) { 999 bus_dmamap_unload(dtag, dmap); 1000 bus_dmamem_free(dtag, addr, dmap); 1001 bus_dma_tag_destroy(dtag); 1002 } 1003 } 1004 1005 static void 1006 et_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error) 1007 { 1008 KASSERT(nseg == 1, ("too many segments\n")); 1009 *((bus_addr_t *)arg) = seg->ds_addr; 1010 } 1011 1012 static void 1013 et_chip_attach(struct et_softc *sc) 1014 { 1015 uint32_t val; 1016 1017 /* 1018 * Perform minimal initialization 1019 */ 1020 1021 /* Disable loopback */ 1022 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1023 1024 /* Reset MAC */ 1025 CSR_WRITE_4(sc, ET_MAC_CFG1, 1026 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1027 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1028 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1029 1030 /* 1031 * Setup half duplex mode 1032 */ 1033 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1034 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1035 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1036 ET_MAC_HDX_EXC_DEFER; 1037 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1038 1039 /* Clear MAC control */ 1040 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1041 1042 /* Reset MII */ 1043 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1044 1045 /* Bring MAC out of reset state */ 1046 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1047 1048 /* Enable memory controllers */ 1049 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1050 } 1051 1052 static void 1053 et_intr(void *xsc) 1054 { 1055 struct et_softc *sc = xsc; 1056 struct ifnet *ifp = &sc->arpcom.ac_if; 1057 uint32_t intrs; 1058 1059 ASSERT_SERIALIZED(ifp->if_serializer); 1060 1061 if ((ifp->if_flags & IFF_RUNNING) == 0) 1062 return; 1063 1064 et_disable_intrs(sc); 1065 1066 intrs = CSR_READ_4(sc, ET_INTR_STATUS); 1067 intrs &= ET_INTRS; 1068 if (intrs == 0) /* Not interested */ 1069 goto back; 1070 1071 if (intrs & ET_INTR_RXEOF) 1072 et_rxeof(sc); 1073 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER)) 1074 et_txeof(sc); 1075 if (intrs & ET_INTR_TIMER) 1076 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1077 back: 1078 et_enable_intrs(sc, ET_INTRS); 1079 } 1080 1081 static void 1082 et_init(void *xsc) 1083 { 1084 struct et_softc *sc = xsc; 1085 struct ifnet *ifp = &sc->arpcom.ac_if; 1086 const struct et_bsize *arr; 1087 int error, i; 1088 1089 ASSERT_SERIALIZED(ifp->if_serializer); 1090 1091 et_stop(sc); 1092 1093 arr = ifp->if_mtu <= ETHERMTU ? et_bufsize : NULL; 1094 for (i = 0; i < ET_RX_NRING; ++i) { 1095 sc->sc_rx_data[i].rbd_bufsize = arr[i].bufsize; 1096 sc->sc_rx_data[i].rbd_newbuf = arr[i].newbuf; 1097 } 1098 1099 error = et_init_tx_ring(sc); 1100 if (error) 1101 goto back; 1102 1103 error = et_init_rx_ring(sc); 1104 if (error) 1105 goto back; 1106 1107 error = et_chip_init(sc); 1108 if (error) 1109 goto back; 1110 1111 error = et_enable_txrx(sc, 1); 1112 if (error) 1113 goto back; 1114 1115 et_enable_intrs(sc, ET_INTRS); 1116 1117 callout_reset(&sc->sc_tick, hz, et_tick, sc); 1118 1119 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1120 1121 ifp->if_flags |= IFF_RUNNING; 1122 ifp->if_flags &= ~IFF_OACTIVE; 1123 back: 1124 if (error) 1125 et_stop(sc); 1126 } 1127 1128 static int 1129 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 1130 { 1131 struct et_softc *sc = ifp->if_softc; 1132 struct mii_data *mii = device_get_softc(sc->sc_miibus); 1133 struct ifreq *ifr = (struct ifreq *)data; 1134 int error = 0; 1135 1136 ASSERT_SERIALIZED(ifp->if_serializer); 1137 1138 switch (cmd) { 1139 case SIOCSIFFLAGS: 1140 if (ifp->if_flags & IFF_UP) { 1141 if (ifp->if_flags & IFF_RUNNING) { 1142 if ((ifp->if_flags ^ sc->sc_if_flags) & 1143 (IFF_ALLMULTI | IFF_PROMISC | IFF_BROADCAST)) 1144 et_setmulti(sc); 1145 } else { 1146 et_init(sc); 1147 } 1148 } else { 1149 if (ifp->if_flags & IFF_RUNNING) 1150 et_stop(sc); 1151 } 1152 sc->sc_if_flags = ifp->if_flags; 1153 break; 1154 1155 case SIOCSIFMEDIA: 1156 case SIOCGIFMEDIA: 1157 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1158 break; 1159 1160 case SIOCADDMULTI: 1161 case SIOCDELMULTI: 1162 if (ifp->if_flags & IFF_RUNNING) 1163 et_setmulti(sc); 1164 break; 1165 1166 case SIOCSIFMTU: 1167 /* TODO */ 1168 error = EOPNOTSUPP; 1169 break; 1170 1171 default: 1172 error = ether_ioctl(ifp, cmd, data); 1173 break; 1174 } 1175 return error; 1176 } 1177 1178 static void 1179 et_start(struct ifnet *ifp) 1180 { 1181 struct et_softc *sc = ifp->if_softc; 1182 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1183 int trans; 1184 1185 ASSERT_SERIALIZED(ifp->if_serializer); 1186 1187 if (!sc->sc_txrx_enabled) 1188 return; 1189 1190 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1191 return; 1192 1193 trans = 0; 1194 for (;;) { 1195 struct mbuf *m; 1196 1197 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) { 1198 ifp->if_flags |= IFF_OACTIVE; 1199 break; 1200 } 1201 1202 m = ifq_dequeue(&ifp->if_snd, NULL); 1203 if (m == NULL) 1204 break; 1205 1206 if (et_encap(sc, &m)) { 1207 ifp->if_oerrors++; 1208 ifp->if_flags |= IFF_OACTIVE; 1209 break; 1210 } 1211 trans = 1; 1212 1213 BPF_MTAP(ifp, m); 1214 } 1215 1216 if (trans) 1217 ifp->if_timer = 5; 1218 } 1219 1220 static void 1221 et_watchdog(struct ifnet *ifp) 1222 { 1223 ASSERT_SERIALIZED(ifp->if_serializer); 1224 1225 if_printf(ifp, "watchdog timed out\n"); 1226 1227 ifp->if_init(ifp->if_softc); 1228 ifp->if_start(ifp); 1229 } 1230 1231 static int 1232 et_stop_rxdma(struct et_softc *sc) 1233 { 1234 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1235 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1236 1237 DELAY(5); 1238 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1239 if_printf(&sc->arpcom.ac_if, "can't stop RX DMA engine\n"); 1240 return ETIMEDOUT; 1241 } 1242 return 0; 1243 } 1244 1245 static int 1246 et_stop_txdma(struct et_softc *sc) 1247 { 1248 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1249 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1250 return 0; 1251 } 1252 1253 static void 1254 et_free_tx_ring(struct et_softc *sc) 1255 { 1256 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1257 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1258 int i; 1259 1260 for (i = 0; i < ET_TX_NDESC; ++i) { 1261 struct et_txbuf *tb = &tbd->tbd_buf[i]; 1262 1263 if (tb->tb_mbuf != NULL) { 1264 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap); 1265 m_freem(tb->tb_mbuf); 1266 tb->tb_mbuf = NULL; 1267 } 1268 } 1269 1270 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1271 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 1272 BUS_DMASYNC_PREWRITE); 1273 } 1274 1275 static void 1276 et_free_rx_ring(struct et_softc *sc) 1277 { 1278 int n; 1279 1280 for (n = 0; n < ET_RX_NRING; ++n) { 1281 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1282 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n]; 1283 int i; 1284 1285 for (i = 0; i < ET_RX_NDESC; ++i) { 1286 struct et_rxbuf *rb = &rbd->rbd_buf[i]; 1287 1288 if (rb->rb_mbuf != NULL) { 1289 bus_dmamap_unload(sc->sc_mbuf_dtag, 1290 rb->rb_dmap); 1291 m_freem(rb->rb_mbuf); 1292 rb->rb_mbuf = NULL; 1293 } 1294 } 1295 1296 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE); 1297 bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap, 1298 BUS_DMASYNC_PREWRITE); 1299 } 1300 } 1301 1302 static void 1303 et_setmulti(struct et_softc *sc) 1304 { 1305 struct ifnet *ifp = &sc->arpcom.ac_if; 1306 uint32_t hash[4] = { 0, 0, 0, 0 }; 1307 uint32_t rxmac_ctrl, pktfilt; 1308 struct ifmultiaddr *ifma; 1309 int i, count; 1310 1311 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1312 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1313 1314 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1315 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1316 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1317 goto back; 1318 } 1319 1320 count = 0; 1321 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1322 uint32_t *hp, h; 1323 1324 if (ifma->ifma_addr->sa_family != AF_LINK) 1325 continue; 1326 1327 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 1328 ifma->ifma_addr), ETHER_ADDR_LEN); 1329 h = (h & 0x3f800000) >> 23; 1330 1331 hp = &hash[0]; 1332 if (h >= 32 && h < 64) { 1333 h -= 32; 1334 hp = &hash[1]; 1335 } else if (h >= 64 && h < 96) { 1336 h -= 64; 1337 hp = &hash[2]; 1338 } else if (h >= 96) { 1339 h -= 96; 1340 hp = &hash[3]; 1341 } 1342 *hp |= (1 << h); 1343 1344 ++count; 1345 } 1346 1347 for (i = 0; i < 4; ++i) 1348 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1349 1350 if (count > 0) 1351 pktfilt |= ET_PKTFILT_MCAST; 1352 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1353 back: 1354 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1355 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1356 } 1357 1358 static int 1359 et_chip_init(struct et_softc *sc) 1360 { 1361 struct ifnet *ifp = &sc->arpcom.ac_if; 1362 uint32_t rxq_end; 1363 int error; 1364 1365 /* 1366 * Split internal memory between TX and RX according to MTU 1367 */ 1368 if (ifp->if_mtu < 2048) 1369 rxq_end = 0x2bc; 1370 else if (ifp->if_mtu < 8192) 1371 rxq_end = 0x1ff; 1372 else 1373 rxq_end = 0x1b3; 1374 CSR_WRITE_4(sc, ET_RXQ_START, 0); 1375 CSR_WRITE_4(sc, ET_RXQ_END, rxq_end); 1376 CSR_WRITE_4(sc, ET_TXQ_START, rxq_end + 1); 1377 CSR_WRITE_4(sc, ET_TXQ_END, ET_INTERN_MEM_END); 1378 1379 /* No loopback */ 1380 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1381 1382 /* Clear MSI configure */ 1383 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1384 1385 /* Disable timer */ 1386 CSR_WRITE_4(sc, ET_TIMER, 0); 1387 1388 /* Initialize MAC */ 1389 et_init_mac(sc); 1390 1391 /* Enable memory controllers */ 1392 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1393 1394 /* Initialize RX MAC */ 1395 et_init_rxmac(sc); 1396 1397 /* Initialize TX MAC */ 1398 et_init_txmac(sc); 1399 1400 /* Initialize RX DMA engine */ 1401 error = et_init_rxdma(sc); 1402 if (error) 1403 return error; 1404 1405 /* Initialize TX DMA engine */ 1406 error = et_init_txdma(sc); 1407 if (error) 1408 return error; 1409 1410 return 0; 1411 } 1412 1413 static int 1414 et_init_tx_ring(struct et_softc *sc) 1415 { 1416 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1417 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1418 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1419 1420 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1421 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 1422 BUS_DMASYNC_PREWRITE); 1423 1424 tbd->tbd_start_index = 0; 1425 tbd->tbd_start_wrap = 0; 1426 tbd->tbd_used = 0; 1427 1428 bzero(txsd->txsd_status, sizeof(uint32_t)); 1429 bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap, 1430 BUS_DMASYNC_PREWRITE); 1431 return 0; 1432 } 1433 1434 static int 1435 et_init_rx_ring(struct et_softc *sc) 1436 { 1437 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1438 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1439 int n; 1440 1441 for (n = 0; n < ET_RX_NRING; ++n) { 1442 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1443 int i, error; 1444 1445 for (i = 0; i < ET_RX_NDESC; ++i) { 1446 error = rbd->rbd_newbuf(rbd, i, 1); 1447 if (error) { 1448 if_printf(&sc->arpcom.ac_if, "%d ring %d buf, " 1449 "newbuf failed: %d\n", n, i, error); 1450 return error; 1451 } 1452 } 1453 } 1454 1455 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1456 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap, 1457 BUS_DMASYNC_PREWRITE); 1458 1459 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1460 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap, 1461 BUS_DMASYNC_PREWRITE); 1462 1463 return 0; 1464 } 1465 1466 static void 1467 et_dma_buf_addr(void *xctx, bus_dma_segment_t *segs, int nsegs, 1468 bus_size_t mapsz __unused, int error) 1469 { 1470 struct et_dmamap_ctx *ctx = xctx; 1471 int i; 1472 1473 if (error) 1474 return; 1475 1476 if (nsegs > ctx->nsegs) { 1477 ctx->nsegs = 0; 1478 return; 1479 } 1480 1481 ctx->nsegs = nsegs; 1482 for (i = 0; i < nsegs; ++i) 1483 ctx->segs[i] = segs[i]; 1484 } 1485 1486 static int 1487 et_init_rxdma(struct et_softc *sc) 1488 { 1489 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1490 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1491 struct et_rxdesc_ring *rx_ring; 1492 int error; 1493 1494 error = et_stop_rxdma(sc); 1495 if (error) { 1496 if_printf(&sc->arpcom.ac_if, "can't init RX DMA engine\n"); 1497 return error; 1498 } 1499 1500 /* 1501 * Install RX status 1502 */ 1503 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1504 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1505 1506 /* 1507 * Install RX stat ring 1508 */ 1509 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1510 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1511 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1512 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1513 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1514 1515 /* Match ET_RXSTAT_POS */ 1516 rxst_ring->rsr_index = 0; 1517 rxst_ring->rsr_wrap = 0; 1518 1519 /* 1520 * Install the 2nd RX descriptor ring 1521 */ 1522 rx_ring = &sc->sc_rx_ring[1]; 1523 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1524 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1525 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1526 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1527 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1528 1529 /* Match ET_RX_RING1_POS */ 1530 rx_ring->rr_index = 0; 1531 rx_ring->rr_wrap = 1; 1532 1533 /* 1534 * Install the 1st RX descriptor ring 1535 */ 1536 rx_ring = &sc->sc_rx_ring[0]; 1537 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1538 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1539 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1540 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1541 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1542 1543 /* Match ET_RX_RING0_POS */ 1544 rx_ring->rr_index = 0; 1545 rx_ring->rr_wrap = 1; 1546 1547 /* 1548 * RX intr moderation 1549 */ 1550 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1551 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1552 1553 return 0; 1554 } 1555 1556 static int 1557 et_init_txdma(struct et_softc *sc) 1558 { 1559 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1560 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1561 int error; 1562 1563 error = et_stop_txdma(sc); 1564 if (error) { 1565 if_printf(&sc->arpcom.ac_if, "can't init TX DMA engine\n"); 1566 return error; 1567 } 1568 1569 /* 1570 * Install TX descriptor ring 1571 */ 1572 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1573 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1574 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1575 1576 /* 1577 * Install TX status 1578 */ 1579 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1580 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1581 1582 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1583 1584 /* Match ET_TX_READY_POS */ 1585 tx_ring->tr_ready_index = 0; 1586 tx_ring->tr_ready_wrap = 0; 1587 1588 return 0; 1589 } 1590 1591 static void 1592 et_init_mac(struct et_softc *sc) 1593 { 1594 struct ifnet *ifp = &sc->arpcom.ac_if; 1595 const uint8_t *eaddr = IF_LLADDR(ifp); 1596 uint32_t val; 1597 1598 /* Reset MAC */ 1599 CSR_WRITE_4(sc, ET_MAC_CFG1, 1600 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1601 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1602 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1603 1604 /* 1605 * Setup inter packet gap 1606 */ 1607 val = __SHIFTIN(56, ET_IPG_NONB2B_1) | 1608 __SHIFTIN(88, ET_IPG_NONB2B_2) | 1609 __SHIFTIN(80, ET_IPG_MINIFG) | 1610 __SHIFTIN(96, ET_IPG_B2B); 1611 CSR_WRITE_4(sc, ET_IPG, val); 1612 1613 /* 1614 * Setup half duplex mode 1615 */ 1616 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1617 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1618 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1619 ET_MAC_HDX_EXC_DEFER; 1620 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1621 1622 /* Clear MAC control */ 1623 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1624 1625 /* Reset MII */ 1626 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1627 1628 /* 1629 * Set MAC address 1630 */ 1631 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1632 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1633 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1634 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1635 1636 /* Set max frame length */ 1637 CSR_WRITE_4(sc, ET_MAX_FRMLEN, 1638 ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN); 1639 1640 /* Bring MAC out of reset state */ 1641 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1642 } 1643 1644 static void 1645 et_init_rxmac(struct et_softc *sc) 1646 { 1647 struct ifnet *ifp = &sc->arpcom.ac_if; 1648 const uint8_t *eaddr = IF_LLADDR(ifp); 1649 uint32_t val; 1650 int i; 1651 1652 /* Disable RX MAC and WOL */ 1653 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1654 1655 /* 1656 * Clear all WOL related registers 1657 */ 1658 for (i = 0; i < 3; ++i) 1659 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1660 for (i = 0; i < 20; ++i) 1661 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1662 1663 /* 1664 * Set WOL source address. XXX is this necessary? 1665 */ 1666 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1667 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1668 val = (eaddr[0] << 8) | eaddr[1]; 1669 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1670 1671 /* Clear packet filters */ 1672 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1673 1674 /* No ucast filtering */ 1675 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1676 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1677 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1678 1679 if (ifp->if_mtu > 8192) { 1680 /* 1681 * In order to transmit jumbo packets greater than 8k, 1682 * the FIFO between RX MAC and RX DMA needs to be reduced 1683 * in size to (16k - MTU). In order to implement this, we 1684 * must use "cut through" mode in the RX MAC, which chops 1685 * packets down into segments which are (max_size * 16). 1686 * In this case we selected 256 bytes, since this is the 1687 * size of the PCI-Express TLP's that the 1310 uses. 1688 */ 1689 val = __SHIFTIN(16, ET_RXMAC_MC_SEGSZ_MAX) | 1690 ET_RXMAC_MC_SEGSZ_ENABLE; 1691 } else { 1692 val = 0; 1693 } 1694 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1695 1696 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1697 1698 /* Initialize RX MAC management register */ 1699 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1700 1701 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1702 1703 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1704 ET_RXMAC_MGT_PASS_ECRC | 1705 ET_RXMAC_MGT_PASS_ELEN | 1706 ET_RXMAC_MGT_PASS_ETRUNC | 1707 ET_RXMAC_MGT_CHECK_PKT); 1708 1709 /* 1710 * Configure runt filtering (may not work on certain chip generation) 1711 */ 1712 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG; 1713 CSR_WRITE_4(sc, ET_PKTFILT, val); 1714 1715 /* Enable RX MAC but leave WOL disabled */ 1716 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1717 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1718 1719 /* 1720 * Setup multicast hash and allmulti/promisc mode 1721 */ 1722 et_setmulti(sc); 1723 } 1724 1725 static void 1726 et_init_txmac(struct et_softc *sc) 1727 { 1728 /* Disable TX MAC and FC(?) */ 1729 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1730 1731 /* No flow control yet */ 1732 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0); 1733 1734 /* Enable TX MAC but leave FC(?) diabled */ 1735 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 1736 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 1737 } 1738 1739 static int 1740 et_start_rxdma(struct et_softc *sc) 1741 { 1742 uint32_t val = 0; 1743 1744 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize, 1745 ET_RXDMA_CTRL_RING0_SIZE) | 1746 ET_RXDMA_CTRL_RING0_ENABLE; 1747 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize, 1748 ET_RXDMA_CTRL_RING1_SIZE) | 1749 ET_RXDMA_CTRL_RING1_ENABLE; 1750 1751 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 1752 1753 DELAY(5); 1754 1755 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 1756 if_printf(&sc->arpcom.ac_if, "can't start RX DMA engine\n"); 1757 return ETIMEDOUT; 1758 } 1759 return 0; 1760 } 1761 1762 static int 1763 et_start_txdma(struct et_softc *sc) 1764 { 1765 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 1766 return 0; 1767 } 1768 1769 static int 1770 et_enable_txrx(struct et_softc *sc, int media_upd) 1771 { 1772 struct ifnet *ifp = &sc->arpcom.ac_if; 1773 uint32_t val; 1774 int i, error; 1775 1776 val = CSR_READ_4(sc, ET_MAC_CFG1); 1777 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 1778 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 1779 ET_MAC_CFG1_LOOPBACK); 1780 CSR_WRITE_4(sc, ET_MAC_CFG1, val); 1781 1782 if (media_upd) 1783 et_ifmedia_upd(ifp); 1784 else 1785 et_setmedia(sc); 1786 1787 #define NRETRY 100 1788 1789 for (i = 0; i < NRETRY; ++i) { 1790 val = CSR_READ_4(sc, ET_MAC_CFG1); 1791 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 1792 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 1793 break; 1794 1795 DELAY(10); 1796 } 1797 if (i == NRETRY) { 1798 if_printf(ifp, "can't enable RX/TX\n"); 1799 return 0; 1800 } 1801 sc->sc_txrx_enabled = 1; 1802 1803 #undef NRETRY 1804 1805 /* 1806 * Start TX/RX DMA engine 1807 */ 1808 error = et_start_rxdma(sc); 1809 if (error) 1810 return error; 1811 1812 error = et_start_txdma(sc); 1813 if (error) 1814 return error; 1815 1816 return 0; 1817 } 1818 1819 static void 1820 et_rxeof(struct et_softc *sc) 1821 { 1822 struct ifnet *ifp = &sc->arpcom.ac_if; 1823 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1824 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1825 uint32_t rxs_stat_ring; 1826 int rxst_wrap, rxst_index; 1827 1828 if (!sc->sc_txrx_enabled) 1829 return; 1830 1831 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap, 1832 BUS_DMASYNC_POSTREAD); 1833 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap, 1834 BUS_DMASYNC_POSTREAD); 1835 1836 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring; 1837 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 1838 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX); 1839 1840 while (rxst_index != rxst_ring->rsr_index || 1841 rxst_wrap != rxst_ring->rsr_wrap) { 1842 struct et_rxbuf_data *rbd; 1843 struct et_rxdesc_ring *rx_ring; 1844 struct et_rxstat *st; 1845 struct et_rxbuf *rb; 1846 struct mbuf *m; 1847 int buflen, buf_idx, ring_idx; 1848 uint32_t rxstat_pos, rxring_pos; 1849 1850 KKASSERT(rxst_ring->rsr_index < ET_RX_NSTAT); 1851 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 1852 1853 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN); 1854 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX); 1855 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX); 1856 1857 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 1858 rxst_ring->rsr_index = 0; 1859 rxst_ring->rsr_wrap ^= 1; 1860 } 1861 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index, 1862 ET_RXSTAT_POS_INDEX); 1863 if (rxst_ring->rsr_wrap) 1864 rxstat_pos |= ET_RXSTAT_POS_WRAP; 1865 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 1866 1867 if (ring_idx >= ET_RX_NRING) { 1868 ifp->if_ierrors++; 1869 if_printf(ifp, "invalid ring index %d\n", ring_idx); 1870 continue; 1871 } 1872 if (buf_idx >= ET_RX_NDESC) { 1873 ifp->if_ierrors++; 1874 if_printf(ifp, "invalid buf index %d\n", buf_idx); 1875 continue; 1876 } 1877 1878 rbd = &sc->sc_rx_data[ring_idx]; 1879 rb = &rbd->rbd_buf[buf_idx]; 1880 m = rb->rb_mbuf; 1881 bus_dmamap_sync(sc->sc_mbuf_dtag, rb->rb_dmap, 1882 BUS_DMASYNC_POSTREAD); 1883 1884 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) { 1885 m->m_pkthdr.len = m->m_len = buflen; 1886 m->m_pkthdr.rcvif = ifp; 1887 1888 ifp->if_ipackets++; 1889 ifp->if_input(ifp, m); 1890 } else { 1891 ifp->if_ierrors++; 1892 } 1893 1894 rx_ring = &sc->sc_rx_ring[ring_idx]; 1895 1896 if (buf_idx != rx_ring->rr_index) { 1897 if_printf(ifp, "WARNING!! ring %d, " 1898 "buf_idx %d, rr_idx %d\n", 1899 ring_idx, buf_idx, rx_ring->rr_index); 1900 } 1901 1902 KKASSERT(rx_ring->rr_index < ET_RX_NDESC); 1903 if (++rx_ring->rr_index == ET_RX_NDESC) { 1904 rx_ring->rr_index = 0; 1905 rx_ring->rr_wrap ^= 1; 1906 } 1907 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX); 1908 if (rx_ring->rr_wrap) 1909 rxring_pos |= ET_RX_RING_POS_WRAP; 1910 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 1911 } 1912 } 1913 1914 static int 1915 et_encap(struct et_softc *sc, struct mbuf **m0) 1916 { 1917 struct mbuf *m = *m0; 1918 bus_dma_segment_t segs[ET_NSEG_MAX]; 1919 struct et_dmamap_ctx ctx; 1920 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1921 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1922 struct et_txdesc *td; 1923 bus_dmamap_t map; 1924 int error, maxsegs, first_idx, last_idx, i; 1925 uint32_t tx_ready_pos, last_td_ctrl2; 1926 1927 maxsegs = ET_TX_NDESC - tbd->tbd_used; 1928 if (maxsegs > ET_NSEG_MAX) 1929 maxsegs = ET_NSEG_MAX; 1930 KASSERT(maxsegs >= ET_NSEG_SPARE, 1931 ("not enough spare TX desc (%d)\n", maxsegs)); 1932 1933 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1934 first_idx = tx_ring->tr_ready_index; 1935 map = tbd->tbd_buf[first_idx].tb_dmap; 1936 1937 ctx.nsegs = maxsegs; 1938 ctx.segs = segs; 1939 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m, 1940 et_dma_buf_addr, &ctx, BUS_DMA_NOWAIT); 1941 if (!error && ctx.nsegs == 0) { 1942 bus_dmamap_unload(sc->sc_mbuf_dtag, map); 1943 error = EFBIG; 1944 } 1945 if (error && error != EFBIG) { 1946 if_printf(&sc->arpcom.ac_if, "can't load TX mbuf"); 1947 goto back; 1948 } 1949 if (error) { /* error == EFBIG */ 1950 struct mbuf *m_new; 1951 1952 m_new = m_defrag(m, MB_DONTWAIT); 1953 if (m_new == NULL) { 1954 if_printf(&sc->arpcom.ac_if, "can't defrag TX mbuf\n"); 1955 error = ENOBUFS; 1956 goto back; 1957 } else { 1958 *m0 = m = m_new; 1959 } 1960 1961 ctx.nsegs = maxsegs; 1962 ctx.segs = segs; 1963 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m, 1964 et_dma_buf_addr, &ctx, 1965 BUS_DMA_NOWAIT); 1966 if (error || ctx.nsegs == 0) { 1967 if (ctx.nsegs == 0) { 1968 bus_dmamap_unload(sc->sc_mbuf_dtag, map); 1969 error = EFBIG; 1970 } 1971 if_printf(&sc->arpcom.ac_if, 1972 "can't load defraged TX mbuf\n"); 1973 goto back; 1974 } 1975 } 1976 1977 bus_dmamap_sync(sc->sc_mbuf_dtag, map, BUS_DMASYNC_PREWRITE); 1978 1979 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 1980 sc->sc_tx += ctx.nsegs; 1981 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 1982 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 1983 last_td_ctrl2 |= ET_TDCTRL2_INTR; 1984 } 1985 1986 last_idx = -1; 1987 for (i = 0; i < ctx.nsegs; ++i) { 1988 int idx; 1989 1990 idx = (first_idx + i) % ET_TX_NDESC; 1991 td = &tx_ring->tr_desc[idx]; 1992 td->td_addr_hi = ET_ADDR_HI(segs[i].ds_addr); 1993 td->td_addr_lo = ET_ADDR_LO(segs[i].ds_addr); 1994 td->td_ctrl1 = __SHIFTIN(segs[i].ds_len, ET_TDCTRL1_LEN); 1995 1996 if (i == ctx.nsegs - 1) { /* Last frag */ 1997 td->td_ctrl2 = last_td_ctrl2; 1998 last_idx = idx; 1999 } 2000 2001 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 2002 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 2003 tx_ring->tr_ready_index = 0; 2004 tx_ring->tr_ready_wrap ^= 1; 2005 } 2006 } 2007 td = &tx_ring->tr_desc[first_idx]; 2008 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */ 2009 2010 KKASSERT(last_idx >= 0); 2011 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 2012 tbd->tbd_buf[last_idx].tb_dmap = map; 2013 tbd->tbd_buf[last_idx].tb_mbuf = m; 2014 2015 tbd->tbd_used += ctx.nsegs; 2016 KKASSERT(tbd->tbd_used <= ET_TX_NDESC); 2017 2018 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 2019 BUS_DMASYNC_PREWRITE); 2020 2021 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index, 2022 ET_TX_READY_POS_INDEX); 2023 if (tx_ring->tr_ready_wrap) 2024 tx_ready_pos |= ET_TX_READY_POS_WRAP; 2025 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 2026 2027 error = 0; 2028 back: 2029 if (error) { 2030 m_freem(m); 2031 *m0 = NULL; 2032 } 2033 return error; 2034 } 2035 2036 static void 2037 et_txeof(struct et_softc *sc) 2038 { 2039 struct ifnet *ifp = &sc->arpcom.ac_if; 2040 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 2041 struct et_txbuf_data *tbd = &sc->sc_tx_data; 2042 uint32_t tx_done; 2043 int end, wrap; 2044 2045 if (!sc->sc_txrx_enabled) 2046 return; 2047 2048 if (tbd->tbd_used == 0) 2049 return; 2050 2051 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 2052 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX); 2053 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 2054 2055 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 2056 struct et_txbuf *tb; 2057 2058 KKASSERT(tbd->tbd_start_index < ET_TX_NDESC); 2059 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 2060 2061 bzero(&tx_ring->tr_desc[tbd->tbd_start_index], 2062 sizeof(struct et_txdesc)); 2063 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 2064 BUS_DMASYNC_PREWRITE); 2065 2066 if (tb->tb_mbuf != NULL) { 2067 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap); 2068 m_freem(tb->tb_mbuf); 2069 tb->tb_mbuf = NULL; 2070 } 2071 2072 if (++tbd->tbd_start_index == ET_TX_NDESC) { 2073 tbd->tbd_start_index = 0; 2074 tbd->tbd_start_wrap ^= 1; 2075 } 2076 2077 KKASSERT(tbd->tbd_used > 0); 2078 tbd->tbd_used--; 2079 } 2080 2081 if (tbd->tbd_used == 0) 2082 ifp->if_timer = 0; 2083 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC) 2084 ifp->if_flags &= ~IFF_OACTIVE; 2085 2086 ifp->if_start(ifp); 2087 } 2088 2089 static void 2090 et_tick(void *xsc) 2091 { 2092 struct et_softc *sc = xsc; 2093 struct ifnet *ifp = &sc->arpcom.ac_if; 2094 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2095 2096 lwkt_serialize_enter(ifp->if_serializer); 2097 2098 mii_tick(mii); 2099 if (!sc->sc_txrx_enabled && (mii->mii_media_status & IFM_ACTIVE) && 2100 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2101 if_printf(ifp, "Link up, enable TX/RX\n"); 2102 if (et_enable_txrx(sc, 0) == 0) 2103 ifp->if_start(ifp); 2104 } 2105 callout_reset(&sc->sc_tick, hz, et_tick, sc); 2106 2107 lwkt_serialize_exit(ifp->if_serializer); 2108 } 2109 2110 static int 2111 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init) 2112 { 2113 return et_newbuf(rbd, buf_idx, init, MCLBYTES); 2114 } 2115 2116 static int 2117 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init) 2118 { 2119 return et_newbuf(rbd, buf_idx, init, MHLEN); 2120 } 2121 2122 static int 2123 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0) 2124 { 2125 struct et_softc *sc = rbd->rbd_softc; 2126 struct et_rxdesc_ring *rx_ring; 2127 struct et_rxdesc *desc; 2128 struct et_rxbuf *rb; 2129 struct mbuf *m; 2130 struct et_dmamap_ctx ctx; 2131 bus_dma_segment_t seg; 2132 bus_dmamap_t dmap; 2133 int error, len; 2134 2135 KKASSERT(buf_idx < ET_RX_NDESC); 2136 rb = &rbd->rbd_buf[buf_idx]; 2137 2138 m = m_getl(len0, init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR, &len); 2139 if (m == NULL) { 2140 error = ENOBUFS; 2141 2142 /* XXX for debug */ 2143 if_printf(&sc->arpcom.ac_if, 2144 "m_getl failed, size %d\n", len0); 2145 if (init) { 2146 return error; 2147 } else { 2148 goto back; 2149 } 2150 } 2151 m->m_len = m->m_pkthdr.len = len; 2152 2153 /* 2154 * Try load RX mbuf into temporary DMA tag 2155 */ 2156 ctx.nsegs = 1; 2157 ctx.segs = &seg; 2158 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap, m, 2159 et_dma_buf_addr, &ctx, 2160 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); 2161 if (error || ctx.nsegs == 0) { 2162 if (!error) { 2163 bus_dmamap_unload(sc->sc_mbuf_dtag, 2164 sc->sc_mbuf_tmp_dmap); 2165 error = EFBIG; 2166 if_printf(&sc->arpcom.ac_if, "too many segments?!\n"); 2167 } 2168 m_freem(m); 2169 2170 /* XXX for debug */ 2171 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n"); 2172 if (init) { 2173 return error; 2174 } else { 2175 goto back; 2176 } 2177 } 2178 2179 if (!init) 2180 bus_dmamap_unload(sc->sc_mbuf_dtag, rb->rb_dmap); 2181 rb->rb_mbuf = m; 2182 rb->rb_paddr = seg.ds_addr; 2183 2184 /* 2185 * Swap RX buf's DMA map with the loaded temporary one 2186 */ 2187 dmap = rb->rb_dmap; 2188 rb->rb_dmap = sc->sc_mbuf_tmp_dmap; 2189 sc->sc_mbuf_tmp_dmap = dmap; 2190 2191 error = 0; 2192 back: 2193 rx_ring = rbd->rbd_ring; 2194 desc = &rx_ring->rr_desc[buf_idx]; 2195 2196 desc->rd_addr_hi = ET_ADDR_HI(rb->rb_paddr); 2197 desc->rd_addr_lo = ET_ADDR_LO(rb->rb_paddr); 2198 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX); 2199 2200 bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap, 2201 BUS_DMASYNC_PREWRITE); 2202 return error; 2203 } 2204 2205 static int 2206 et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS) 2207 { 2208 struct et_softc *sc = arg1; 2209 struct ifnet *ifp = &sc->arpcom.ac_if; 2210 int error = 0, v; 2211 2212 lwkt_serialize_enter(ifp->if_serializer); 2213 2214 v = sc->sc_rx_intr_npkts; 2215 error = sysctl_handle_int(oidp, &v, 0, req); 2216 if (error || req->newptr == NULL) 2217 goto back; 2218 if (v <= 0) { 2219 error = EINVAL; 2220 goto back; 2221 } 2222 2223 if (sc->sc_rx_intr_npkts != v) { 2224 if (ifp->if_flags & IFF_RUNNING) 2225 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v); 2226 sc->sc_rx_intr_npkts = v; 2227 } 2228 back: 2229 lwkt_serialize_exit(ifp->if_serializer); 2230 return error; 2231 } 2232 2233 static int 2234 et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS) 2235 { 2236 struct et_softc *sc = arg1; 2237 struct ifnet *ifp = &sc->arpcom.ac_if; 2238 int error = 0, v; 2239 2240 lwkt_serialize_enter(ifp->if_serializer); 2241 2242 v = sc->sc_rx_intr_delay; 2243 error = sysctl_handle_int(oidp, &v, 0, req); 2244 if (error || req->newptr == NULL) 2245 goto back; 2246 if (v <= 0) { 2247 error = EINVAL; 2248 goto back; 2249 } 2250 2251 if (sc->sc_rx_intr_delay != v) { 2252 if (ifp->if_flags & IFF_RUNNING) 2253 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v); 2254 sc->sc_rx_intr_delay = v; 2255 } 2256 back: 2257 lwkt_serialize_exit(ifp->if_serializer); 2258 return error; 2259 } 2260 2261 static void 2262 et_setmedia(struct et_softc *sc) 2263 { 2264 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2265 uint32_t cfg2, ctrl; 2266 2267 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 2268 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 2269 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 2270 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 2271 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN); 2272 2273 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 2274 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 2275 2276 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 2277 cfg2 |= ET_MAC_CFG2_MODE_GMII; 2278 } else { 2279 cfg2 |= ET_MAC_CFG2_MODE_MII; 2280 ctrl |= ET_MAC_CTRL_MODE_MII; 2281 } 2282 2283 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 2284 cfg2 |= ET_MAC_CFG2_FDX; 2285 else 2286 ctrl |= ET_MAC_CTRL_GHDX; 2287 2288 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 2289 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 2290 } 2291