1 /* 2 * Copyright (c) 2007 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Sepherosa Ziehau <sepherosa@gmail.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.11 2008/07/07 13:14:23 sephe Exp $ 35 */ 36 37 #include <sys/param.h> 38 #include <sys/bitops.h> 39 #include <sys/endian.h> 40 #include <sys/kernel.h> 41 #include <sys/bus.h> 42 #include <sys/interrupt.h> 43 #include <sys/malloc.h> 44 #include <sys/proc.h> 45 #include <sys/rman.h> 46 #include <sys/serialize.h> 47 #include <sys/socket.h> 48 #include <sys/sockio.h> 49 #include <sys/sysctl.h> 50 51 #include <net/ethernet.h> 52 #include <net/if.h> 53 #include <net/bpf.h> 54 #include <net/if_arp.h> 55 #include <net/if_dl.h> 56 #include <net/if_media.h> 57 #include <net/ifq_var.h> 58 #include <net/vlan/if_vlan_var.h> 59 60 #include <dev/netif/mii_layer/miivar.h> 61 62 #include <bus/pci/pcireg.h> 63 #include <bus/pci/pcivar.h> 64 #include <bus/pci/pcidevs.h> 65 66 #include <dev/netif/et/if_etreg.h> 67 #include <dev/netif/et/if_etvar.h> 68 69 #include "miibus_if.h" 70 71 static int et_probe(device_t); 72 static int et_attach(device_t); 73 static int et_detach(device_t); 74 static int et_shutdown(device_t); 75 76 static int et_miibus_readreg(device_t, int, int); 77 static int et_miibus_writereg(device_t, int, int, int); 78 static void et_miibus_statchg(device_t); 79 80 static void et_init(void *); 81 static int et_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 82 static void et_start(struct ifnet *); 83 static void et_watchdog(struct ifnet *); 84 static int et_ifmedia_upd(struct ifnet *); 85 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *); 86 87 static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS); 88 static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS); 89 90 static void et_intr(void *); 91 static void et_enable_intrs(struct et_softc *, uint32_t); 92 static void et_disable_intrs(struct et_softc *); 93 static void et_rxeof(struct et_softc *); 94 static void et_txeof(struct et_softc *); 95 96 static int et_dma_alloc(device_t); 97 static void et_dma_free(device_t); 98 static int et_dma_mem_create(device_t, bus_size_t, bus_dma_tag_t *, 99 void **, bus_addr_t *, bus_dmamap_t *); 100 static void et_dma_mem_destroy(bus_dma_tag_t, void *, bus_dmamap_t); 101 static int et_dma_mbuf_create(device_t); 102 static void et_dma_mbuf_destroy(device_t, int, const int[]); 103 static int et_jumbo_mem_alloc(device_t); 104 static void et_jumbo_mem_free(device_t); 105 static void et_dma_ring_addr(void *, bus_dma_segment_t *, int, int); 106 static void et_dma_buf_addr(void *, bus_dma_segment_t *, int, 107 bus_size_t, int); 108 static int et_init_tx_ring(struct et_softc *); 109 static int et_init_rx_ring(struct et_softc *); 110 static void et_free_tx_ring(struct et_softc *); 111 static void et_free_rx_ring(struct et_softc *); 112 static int et_encap(struct et_softc *, struct mbuf **); 113 static struct et_jslot * 114 et_jalloc(struct et_jumbo_data *); 115 static void et_jfree(void *); 116 static void et_jref(void *); 117 static int et_newbuf(struct et_rxbuf_data *, int, int, int); 118 static int et_newbuf_cluster(struct et_rxbuf_data *, int, int); 119 static int et_newbuf_hdr(struct et_rxbuf_data *, int, int); 120 static int et_newbuf_jumbo(struct et_rxbuf_data *, int, int); 121 122 static void et_stop(struct et_softc *); 123 static int et_chip_init(struct et_softc *); 124 static void et_chip_attach(struct et_softc *); 125 static void et_init_mac(struct et_softc *); 126 static void et_init_rxmac(struct et_softc *); 127 static void et_init_txmac(struct et_softc *); 128 static int et_init_rxdma(struct et_softc *); 129 static int et_init_txdma(struct et_softc *); 130 static int et_start_rxdma(struct et_softc *); 131 static int et_start_txdma(struct et_softc *); 132 static int et_stop_rxdma(struct et_softc *); 133 static int et_stop_txdma(struct et_softc *); 134 static int et_enable_txrx(struct et_softc *, int); 135 static void et_reset(struct et_softc *); 136 static int et_bus_config(device_t); 137 static void et_get_eaddr(device_t, uint8_t[]); 138 static void et_setmulti(struct et_softc *); 139 static void et_tick(void *); 140 static void et_setmedia(struct et_softc *); 141 static void et_setup_rxdesc(struct et_rxbuf_data *, int, bus_addr_t); 142 143 static const struct et_dev { 144 uint16_t vid; 145 uint16_t did; 146 const char *desc; 147 } et_devices[] = { 148 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310, 149 "Agere ET1310 Gigabit Ethernet" }, 150 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST, 151 "Agere ET1310 Fast Ethernet" }, 152 { 0, 0, NULL } 153 }; 154 155 static device_method_t et_methods[] = { 156 DEVMETHOD(device_probe, et_probe), 157 DEVMETHOD(device_attach, et_attach), 158 DEVMETHOD(device_detach, et_detach), 159 DEVMETHOD(device_shutdown, et_shutdown), 160 #if 0 161 DEVMETHOD(device_suspend, et_suspend), 162 DEVMETHOD(device_resume, et_resume), 163 #endif 164 165 DEVMETHOD(bus_print_child, bus_generic_print_child), 166 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 167 168 DEVMETHOD(miibus_readreg, et_miibus_readreg), 169 DEVMETHOD(miibus_writereg, et_miibus_writereg), 170 DEVMETHOD(miibus_statchg, et_miibus_statchg), 171 172 { 0, 0 } 173 }; 174 175 static driver_t et_driver = { 176 "et", 177 et_methods, 178 sizeof(struct et_softc) 179 }; 180 181 static devclass_t et_devclass; 182 183 DECLARE_DUMMY_MODULE(if_et); 184 MODULE_DEPEND(if_et, miibus, 1, 1, 1); 185 DRIVER_MODULE(if_et, pci, et_driver, et_devclass, 0, 0); 186 DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, 0, 0); 187 188 static int et_rx_intr_npkts = 32; 189 static int et_rx_intr_delay = 20; /* x10 usec */ 190 static int et_tx_intr_nsegs = 126; 191 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 192 193 TUNABLE_INT("hw.et.timer", &et_timer); 194 TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts); 195 TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay); 196 TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs); 197 198 struct et_bsize { 199 int bufsize; 200 int jumbo; 201 et_newbuf_t newbuf; 202 }; 203 204 static const struct et_bsize et_bufsize_std[ET_RX_NRING] = { 205 { .bufsize = ET_RXDMA_CTRL_RING0_128, .jumbo = 0, 206 .newbuf = et_newbuf_hdr }, 207 { .bufsize = ET_RXDMA_CTRL_RING1_2048, .jumbo = 0, 208 .newbuf = et_newbuf_cluster }, 209 }; 210 211 static const struct et_bsize et_bufsize_jumbo[ET_RX_NRING] = { 212 { .bufsize = ET_RXDMA_CTRL_RING0_128, .jumbo = 0, 213 .newbuf = et_newbuf_hdr }, 214 { .bufsize = ET_RXDMA_CTRL_RING1_16384, .jumbo = 1, 215 .newbuf = et_newbuf_jumbo }, 216 }; 217 218 static int 219 et_probe(device_t dev) 220 { 221 const struct et_dev *d; 222 uint16_t did, vid; 223 224 vid = pci_get_vendor(dev); 225 did = pci_get_device(dev); 226 227 for (d = et_devices; d->desc != NULL; ++d) { 228 if (vid == d->vid && did == d->did) { 229 device_set_desc(dev, d->desc); 230 return 0; 231 } 232 } 233 return ENXIO; 234 } 235 236 static int 237 et_attach(device_t dev) 238 { 239 struct et_softc *sc = device_get_softc(dev); 240 struct ifnet *ifp = &sc->arpcom.ac_if; 241 uint8_t eaddr[ETHER_ADDR_LEN]; 242 int error; 243 244 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 245 callout_init(&sc->sc_tick); 246 247 /* 248 * Initialize tunables 249 */ 250 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 251 sc->sc_rx_intr_delay = et_rx_intr_delay; 252 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 253 sc->sc_timer = et_timer; 254 255 #ifndef BURN_BRIDGES 256 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 257 uint32_t irq, mem; 258 259 irq = pci_read_config(dev, PCIR_INTLINE, 4); 260 mem = pci_read_config(dev, ET_PCIR_BAR, 4); 261 262 device_printf(dev, "chip is in D%d power mode " 263 "-- setting to D0\n", pci_get_powerstate(dev)); 264 265 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 266 267 pci_write_config(dev, PCIR_INTLINE, irq, 4); 268 pci_write_config(dev, ET_PCIR_BAR, mem, 4); 269 } 270 #endif /* !BURN_BRIDGE */ 271 272 /* Enable bus mastering */ 273 pci_enable_busmaster(dev); 274 275 /* 276 * Allocate IO memory 277 */ 278 sc->sc_mem_rid = ET_PCIR_BAR; 279 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 280 &sc->sc_mem_rid, RF_ACTIVE); 281 if (sc->sc_mem_res == NULL) { 282 device_printf(dev, "can't allocate IO memory\n"); 283 return ENXIO; 284 } 285 sc->sc_mem_bt = rman_get_bustag(sc->sc_mem_res); 286 sc->sc_mem_bh = rman_get_bushandle(sc->sc_mem_res); 287 288 /* 289 * Allocate IRQ 290 */ 291 sc->sc_irq_rid = 0; 292 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 293 &sc->sc_irq_rid, 294 RF_SHAREABLE | RF_ACTIVE); 295 if (sc->sc_irq_res == NULL) { 296 device_printf(dev, "can't allocate irq\n"); 297 error = ENXIO; 298 goto fail; 299 } 300 301 /* 302 * Create sysctl tree 303 */ 304 sysctl_ctx_init(&sc->sc_sysctl_ctx); 305 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx, 306 SYSCTL_STATIC_CHILDREN(_hw), 307 OID_AUTO, 308 device_get_nameunit(dev), 309 CTLFLAG_RD, 0, ""); 310 if (sc->sc_sysctl_tree == NULL) { 311 device_printf(dev, "can't add sysctl node\n"); 312 error = ENXIO; 313 goto fail; 314 } 315 316 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx, 317 SYSCTL_CHILDREN(sc->sc_sysctl_tree), 318 OID_AUTO, "rx_intr_npkts", CTLTYPE_INT | CTLFLAG_RW, 319 sc, 0, et_sysctl_rx_intr_npkts, "I", 320 "RX IM, # packets per RX interrupt"); 321 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx, 322 SYSCTL_CHILDREN(sc->sc_sysctl_tree), 323 OID_AUTO, "rx_intr_delay", CTLTYPE_INT | CTLFLAG_RW, 324 sc, 0, et_sysctl_rx_intr_delay, "I", 325 "RX IM, RX interrupt delay (x10 usec)"); 326 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx, 327 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 328 "tx_intr_nsegs", CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0, 329 "TX IM, # segments per TX interrupt"); 330 SYSCTL_ADD_UINT(&sc->sc_sysctl_ctx, 331 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 332 "timer", CTLFLAG_RW, &sc->sc_timer, 0, 333 "TX timer"); 334 335 error = et_bus_config(dev); 336 if (error) 337 goto fail; 338 339 et_get_eaddr(dev, eaddr); 340 341 CSR_WRITE_4(sc, ET_PM, 342 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE); 343 344 et_reset(sc); 345 346 et_disable_intrs(sc); 347 348 error = et_dma_alloc(dev); 349 if (error) 350 goto fail; 351 352 ifp->if_softc = sc; 353 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 354 ifp->if_init = et_init; 355 ifp->if_ioctl = et_ioctl; 356 ifp->if_start = et_start; 357 ifp->if_watchdog = et_watchdog; 358 ifp->if_mtu = ETHERMTU; 359 ifp->if_capabilities = IFCAP_VLAN_MTU; 360 ifp->if_capenable = ifp->if_capabilities; 361 ifq_set_maxlen(&ifp->if_snd, ET_TX_NDESC); 362 ifq_set_ready(&ifp->if_snd); 363 364 et_chip_attach(sc); 365 366 error = mii_phy_probe(dev, &sc->sc_miibus, 367 et_ifmedia_upd, et_ifmedia_sts); 368 if (error) { 369 device_printf(dev, "can't probe any PHY\n"); 370 goto fail; 371 } 372 373 ether_ifattach(ifp, eaddr, NULL); 374 375 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, et_intr, sc, 376 &sc->sc_irq_handle, ifp->if_serializer); 377 if (error) { 378 ether_ifdetach(ifp); 379 device_printf(dev, "can't setup intr\n"); 380 goto fail; 381 } 382 383 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->sc_irq_res)); 384 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 385 386 return 0; 387 fail: 388 et_detach(dev); 389 return error; 390 } 391 392 static int 393 et_detach(device_t dev) 394 { 395 struct et_softc *sc = device_get_softc(dev); 396 397 if (device_is_attached(dev)) { 398 struct ifnet *ifp = &sc->arpcom.ac_if; 399 400 lwkt_serialize_enter(ifp->if_serializer); 401 et_stop(sc); 402 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle); 403 lwkt_serialize_exit(ifp->if_serializer); 404 405 ether_ifdetach(ifp); 406 } 407 408 if (sc->sc_sysctl_tree != NULL) 409 sysctl_ctx_free(&sc->sc_sysctl_ctx); 410 411 if (sc->sc_miibus != NULL) 412 device_delete_child(dev, sc->sc_miibus); 413 bus_generic_detach(dev); 414 415 if (sc->sc_irq_res != NULL) { 416 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, 417 sc->sc_irq_res); 418 } 419 420 if (sc->sc_mem_res != NULL) { 421 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, 422 sc->sc_mem_res); 423 } 424 425 et_dma_free(dev); 426 427 return 0; 428 } 429 430 static int 431 et_shutdown(device_t dev) 432 { 433 struct et_softc *sc = device_get_softc(dev); 434 struct ifnet *ifp = &sc->arpcom.ac_if; 435 436 lwkt_serialize_enter(ifp->if_serializer); 437 et_stop(sc); 438 lwkt_serialize_exit(ifp->if_serializer); 439 return 0; 440 } 441 442 static int 443 et_miibus_readreg(device_t dev, int phy, int reg) 444 { 445 struct et_softc *sc = device_get_softc(dev); 446 uint32_t val; 447 int i, ret; 448 449 /* Stop any pending operations */ 450 CSR_WRITE_4(sc, ET_MII_CMD, 0); 451 452 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 453 __SHIFTIN(reg, ET_MII_ADDR_REG); 454 CSR_WRITE_4(sc, ET_MII_ADDR, val); 455 456 /* Start reading */ 457 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 458 459 #define NRETRY 50 460 461 for (i = 0; i < NRETRY; ++i) { 462 val = CSR_READ_4(sc, ET_MII_IND); 463 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 464 break; 465 DELAY(50); 466 } 467 if (i == NRETRY) { 468 if_printf(&sc->arpcom.ac_if, 469 "read phy %d, reg %d timed out\n", phy, reg); 470 ret = 0; 471 goto back; 472 } 473 474 #undef NRETRY 475 476 val = CSR_READ_4(sc, ET_MII_STAT); 477 ret = __SHIFTOUT(val, ET_MII_STAT_VALUE); 478 479 back: 480 /* Make sure that the current operation is stopped */ 481 CSR_WRITE_4(sc, ET_MII_CMD, 0); 482 return ret; 483 } 484 485 static int 486 et_miibus_writereg(device_t dev, int phy, int reg, int val0) 487 { 488 struct et_softc *sc = device_get_softc(dev); 489 uint32_t val; 490 int i; 491 492 /* Stop any pending operations */ 493 CSR_WRITE_4(sc, ET_MII_CMD, 0); 494 495 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) | 496 __SHIFTIN(reg, ET_MII_ADDR_REG); 497 CSR_WRITE_4(sc, ET_MII_ADDR, val); 498 499 /* Start writing */ 500 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE)); 501 502 #define NRETRY 100 503 504 for (i = 0; i < NRETRY; ++i) { 505 val = CSR_READ_4(sc, ET_MII_IND); 506 if ((val & ET_MII_IND_BUSY) == 0) 507 break; 508 DELAY(50); 509 } 510 if (i == NRETRY) { 511 if_printf(&sc->arpcom.ac_if, 512 "write phy %d, reg %d timed out\n", phy, reg); 513 et_miibus_readreg(dev, phy, reg); 514 } 515 516 #undef NRETRY 517 518 /* Make sure that the current operation is stopped */ 519 CSR_WRITE_4(sc, ET_MII_CMD, 0); 520 return 0; 521 } 522 523 static void 524 et_miibus_statchg(device_t dev) 525 { 526 et_setmedia(device_get_softc(dev)); 527 } 528 529 static int 530 et_ifmedia_upd(struct ifnet *ifp) 531 { 532 struct et_softc *sc = ifp->if_softc; 533 struct mii_data *mii = device_get_softc(sc->sc_miibus); 534 535 if (mii->mii_instance != 0) { 536 struct mii_softc *miisc; 537 538 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 539 mii_phy_reset(miisc); 540 } 541 mii_mediachg(mii); 542 543 return 0; 544 } 545 546 static void 547 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 548 { 549 struct et_softc *sc = ifp->if_softc; 550 struct mii_data *mii = device_get_softc(sc->sc_miibus); 551 552 mii_pollstat(mii); 553 ifmr->ifm_active = mii->mii_media_active; 554 ifmr->ifm_status = mii->mii_media_status; 555 } 556 557 static void 558 et_stop(struct et_softc *sc) 559 { 560 struct ifnet *ifp = &sc->arpcom.ac_if; 561 562 ASSERT_SERIALIZED(ifp->if_serializer); 563 564 callout_stop(&sc->sc_tick); 565 566 et_stop_rxdma(sc); 567 et_stop_txdma(sc); 568 569 et_disable_intrs(sc); 570 571 et_free_tx_ring(sc); 572 et_free_rx_ring(sc); 573 574 et_reset(sc); 575 576 sc->sc_tx = 0; 577 sc->sc_tx_intr = 0; 578 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED; 579 580 ifp->if_timer = 0; 581 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 582 } 583 584 static int 585 et_bus_config(device_t dev) 586 { 587 uint32_t val, max_plsz; 588 uint16_t ack_latency, replay_timer; 589 590 /* 591 * Test whether EEPROM is valid 592 * NOTE: Read twice to get the correct value 593 */ 594 pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1); 595 val = pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1); 596 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 597 device_printf(dev, "EEPROM status error 0x%02x\n", val); 598 return ENXIO; 599 } 600 601 /* TODO: LED */ 602 603 /* 604 * Configure ACK latency and replay timer according to 605 * max playload size 606 */ 607 val = pci_read_config(dev, ET_PCIR_DEVICE_CAPS, 4); 608 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ; 609 610 switch (max_plsz) { 611 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 612 ack_latency = ET_PCIV_ACK_LATENCY_128; 613 replay_timer = ET_PCIV_REPLAY_TIMER_128; 614 break; 615 616 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 617 ack_latency = ET_PCIV_ACK_LATENCY_256; 618 replay_timer = ET_PCIV_REPLAY_TIMER_256; 619 break; 620 621 default: 622 ack_latency = pci_read_config(dev, ET_PCIR_ACK_LATENCY, 2); 623 replay_timer = pci_read_config(dev, ET_PCIR_REPLAY_TIMER, 2); 624 device_printf(dev, "ack latency %u, replay timer %u\n", 625 ack_latency, replay_timer); 626 break; 627 } 628 if (ack_latency != 0) { 629 pci_write_config(dev, ET_PCIR_ACK_LATENCY, ack_latency, 2); 630 pci_write_config(dev, ET_PCIR_REPLAY_TIMER, replay_timer, 2); 631 } 632 633 /* 634 * Set L0s and L1 latency timer to 2us 635 */ 636 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2); 637 pci_write_config(dev, ET_PCIR_L0S_L1_LATENCY, val, 1); 638 639 /* 640 * Set max read request size to 2048 bytes 641 */ 642 val = pci_read_config(dev, ET_PCIR_DEVICE_CTRL, 2); 643 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ; 644 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K; 645 pci_write_config(dev, ET_PCIR_DEVICE_CTRL, val, 2); 646 647 return 0; 648 } 649 650 static void 651 et_get_eaddr(device_t dev, uint8_t eaddr[]) 652 { 653 uint32_t val; 654 int i; 655 656 val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4); 657 for (i = 0; i < 4; ++i) 658 eaddr[i] = (val >> (8 * i)) & 0xff; 659 660 val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2); 661 for (; i < ETHER_ADDR_LEN; ++i) 662 eaddr[i] = (val >> (8 * (i - 4))) & 0xff; 663 } 664 665 static void 666 et_reset(struct et_softc *sc) 667 { 668 CSR_WRITE_4(sc, ET_MAC_CFG1, 669 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 670 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 671 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 672 673 CSR_WRITE_4(sc, ET_SWRST, 674 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 675 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 676 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 677 678 CSR_WRITE_4(sc, ET_MAC_CFG1, 679 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 680 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 681 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 682 } 683 684 static void 685 et_disable_intrs(struct et_softc *sc) 686 { 687 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 688 } 689 690 static void 691 et_enable_intrs(struct et_softc *sc, uint32_t intrs) 692 { 693 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs); 694 } 695 696 static int 697 et_dma_alloc(device_t dev) 698 { 699 struct et_softc *sc = device_get_softc(dev); 700 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 701 struct et_txstatus_data *txsd = &sc->sc_tx_status; 702 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 703 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 704 int i, error; 705 706 /* 707 * Create top level DMA tag 708 */ 709 error = bus_dma_tag_create(NULL, 1, 0, 710 BUS_SPACE_MAXADDR_32BIT, 711 BUS_SPACE_MAXADDR, 712 NULL, NULL, 713 MAXBSIZE, 714 BUS_SPACE_UNRESTRICTED, 715 BUS_SPACE_MAXSIZE_32BIT, 716 0, &sc->sc_dtag); 717 if (error) { 718 device_printf(dev, "can't create DMA tag\n"); 719 return error; 720 } 721 722 /* 723 * Create TX ring DMA stuffs 724 */ 725 error = et_dma_mem_create(dev, ET_TX_RING_SIZE, &tx_ring->tr_dtag, 726 (void **)&tx_ring->tr_desc, 727 &tx_ring->tr_paddr, &tx_ring->tr_dmap); 728 if (error) { 729 device_printf(dev, "can't create TX ring DMA stuffs\n"); 730 return error; 731 } 732 733 /* 734 * Create TX status DMA stuffs 735 */ 736 error = et_dma_mem_create(dev, sizeof(uint32_t), &txsd->txsd_dtag, 737 (void **)&txsd->txsd_status, 738 &txsd->txsd_paddr, &txsd->txsd_dmap); 739 if (error) { 740 device_printf(dev, "can't create TX status DMA stuffs\n"); 741 return error; 742 } 743 744 /* 745 * Create DMA stuffs for RX rings 746 */ 747 for (i = 0; i < ET_RX_NRING; ++i) { 748 static const uint32_t rx_ring_posreg[ET_RX_NRING] = 749 { ET_RX_RING0_POS, ET_RX_RING1_POS }; 750 751 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 752 753 error = et_dma_mem_create(dev, ET_RX_RING_SIZE, 754 &rx_ring->rr_dtag, 755 (void **)&rx_ring->rr_desc, 756 &rx_ring->rr_paddr, 757 &rx_ring->rr_dmap); 758 if (error) { 759 device_printf(dev, "can't create DMA stuffs for " 760 "the %d RX ring\n", i); 761 return error; 762 } 763 rx_ring->rr_posreg = rx_ring_posreg[i]; 764 } 765 766 /* 767 * Create RX stat ring DMA stuffs 768 */ 769 error = et_dma_mem_create(dev, ET_RXSTAT_RING_SIZE, 770 &rxst_ring->rsr_dtag, 771 (void **)&rxst_ring->rsr_stat, 772 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap); 773 if (error) { 774 device_printf(dev, "can't create RX stat ring DMA stuffs\n"); 775 return error; 776 } 777 778 /* 779 * Create RX status DMA stuffs 780 */ 781 error = et_dma_mem_create(dev, sizeof(struct et_rxstatus), 782 &rxsd->rxsd_dtag, 783 (void **)&rxsd->rxsd_status, 784 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap); 785 if (error) { 786 device_printf(dev, "can't create RX status DMA stuffs\n"); 787 return error; 788 } 789 790 /* 791 * Create mbuf DMA stuffs 792 */ 793 error = et_dma_mbuf_create(dev); 794 if (error) 795 return error; 796 797 /* 798 * Create jumbo buffer DMA stuffs 799 * NOTE: Allow it to fail 800 */ 801 if (et_jumbo_mem_alloc(dev) == 0) 802 sc->sc_flags |= ET_FLAG_JUMBO; 803 804 return 0; 805 } 806 807 static void 808 et_dma_free(device_t dev) 809 { 810 struct et_softc *sc = device_get_softc(dev); 811 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 812 struct et_txstatus_data *txsd = &sc->sc_tx_status; 813 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 814 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 815 int i, rx_done[ET_RX_NRING]; 816 817 /* 818 * Destroy TX ring DMA stuffs 819 */ 820 et_dma_mem_destroy(tx_ring->tr_dtag, tx_ring->tr_desc, 821 tx_ring->tr_dmap); 822 823 /* 824 * Destroy TX status DMA stuffs 825 */ 826 et_dma_mem_destroy(txsd->txsd_dtag, txsd->txsd_status, 827 txsd->txsd_dmap); 828 829 /* 830 * Destroy DMA stuffs for RX rings 831 */ 832 for (i = 0; i < ET_RX_NRING; ++i) { 833 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i]; 834 835 et_dma_mem_destroy(rx_ring->rr_dtag, rx_ring->rr_desc, 836 rx_ring->rr_dmap); 837 } 838 839 /* 840 * Destroy RX stat ring DMA stuffs 841 */ 842 et_dma_mem_destroy(rxst_ring->rsr_dtag, rxst_ring->rsr_stat, 843 rxst_ring->rsr_dmap); 844 845 /* 846 * Destroy RX status DMA stuffs 847 */ 848 et_dma_mem_destroy(rxsd->rxsd_dtag, rxsd->rxsd_status, 849 rxsd->rxsd_dmap); 850 851 /* 852 * Destroy mbuf DMA stuffs 853 */ 854 for (i = 0; i < ET_RX_NRING; ++i) 855 rx_done[i] = ET_RX_NDESC; 856 et_dma_mbuf_destroy(dev, ET_TX_NDESC, rx_done); 857 858 /* 859 * Destroy jumbo buffer DMA stuffs 860 */ 861 if (sc->sc_flags & ET_FLAG_JUMBO) 862 et_jumbo_mem_free(dev); 863 864 /* 865 * Destroy top level DMA tag 866 */ 867 if (sc->sc_dtag != NULL) 868 bus_dma_tag_destroy(sc->sc_dtag); 869 } 870 871 static int 872 et_dma_mbuf_create(device_t dev) 873 { 874 struct et_softc *sc = device_get_softc(dev); 875 struct et_txbuf_data *tbd = &sc->sc_tx_data; 876 int i, error, rx_done[ET_RX_NRING]; 877 878 /* 879 * Create mbuf DMA tag 880 */ 881 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 882 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 883 NULL, NULL, 884 ET_JUMBO_FRAMELEN, ET_NSEG_MAX, 885 BUS_SPACE_MAXSIZE_32BIT, 886 BUS_DMA_ALLOCNOW, &sc->sc_mbuf_dtag); 887 if (error) { 888 device_printf(dev, "can't create mbuf DMA tag\n"); 889 return error; 890 } 891 892 /* 893 * Create spare DMA map for RX mbufs 894 */ 895 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0, &sc->sc_mbuf_tmp_dmap); 896 if (error) { 897 device_printf(dev, "can't create spare mbuf DMA map\n"); 898 bus_dma_tag_destroy(sc->sc_mbuf_dtag); 899 sc->sc_mbuf_dtag = NULL; 900 return error; 901 } 902 903 /* 904 * Create DMA maps for RX mbufs 905 */ 906 bzero(rx_done, sizeof(rx_done)); 907 for (i = 0; i < ET_RX_NRING; ++i) { 908 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 909 int j; 910 911 for (j = 0; j < ET_RX_NDESC; ++j) { 912 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0, 913 &rbd->rbd_buf[j].rb_dmap); 914 if (error) { 915 device_printf(dev, "can't create %d RX mbuf " 916 "for %d RX ring\n", j, i); 917 rx_done[i] = j; 918 et_dma_mbuf_destroy(dev, 0, rx_done); 919 return error; 920 } 921 } 922 rx_done[i] = ET_RX_NDESC; 923 924 rbd->rbd_softc = sc; 925 rbd->rbd_ring = &sc->sc_rx_ring[i]; 926 } 927 928 /* 929 * Create DMA maps for TX mbufs 930 */ 931 for (i = 0; i < ET_TX_NDESC; ++i) { 932 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0, 933 &tbd->tbd_buf[i].tb_dmap); 934 if (error) { 935 device_printf(dev, "can't create %d TX mbuf " 936 "DMA map\n", i); 937 et_dma_mbuf_destroy(dev, i, rx_done); 938 return error; 939 } 940 } 941 942 return 0; 943 } 944 945 static void 946 et_dma_mbuf_destroy(device_t dev, int tx_done, const int rx_done[]) 947 { 948 struct et_softc *sc = device_get_softc(dev); 949 struct et_txbuf_data *tbd = &sc->sc_tx_data; 950 int i; 951 952 if (sc->sc_mbuf_dtag == NULL) 953 return; 954 955 /* 956 * Destroy DMA maps for RX mbufs 957 */ 958 for (i = 0; i < ET_RX_NRING; ++i) { 959 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i]; 960 int j; 961 962 for (j = 0; j < rx_done[i]; ++j) { 963 struct et_rxbuf *rb = &rbd->rbd_buf[j]; 964 965 KASSERT(rb->rb_mbuf == NULL, 966 ("RX mbuf in %d RX ring is not freed yet\n", i)); 967 bus_dmamap_destroy(sc->sc_mbuf_dtag, rb->rb_dmap); 968 } 969 } 970 971 /* 972 * Destroy DMA maps for TX mbufs 973 */ 974 for (i = 0; i < tx_done; ++i) { 975 struct et_txbuf *tb = &tbd->tbd_buf[i]; 976 977 KASSERT(tb->tb_mbuf == NULL, ("TX mbuf is not freed yet\n")); 978 bus_dmamap_destroy(sc->sc_mbuf_dtag, tb->tb_dmap); 979 } 980 981 /* 982 * Destroy spare mbuf DMA map 983 */ 984 bus_dmamap_destroy(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap); 985 986 /* 987 * Destroy mbuf DMA tag 988 */ 989 bus_dma_tag_destroy(sc->sc_mbuf_dtag); 990 sc->sc_mbuf_dtag = NULL; 991 } 992 993 static int 994 et_dma_mem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag, 995 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap) 996 { 997 struct et_softc *sc = device_get_softc(dev); 998 int error; 999 1000 error = bus_dma_tag_create(sc->sc_dtag, ET_ALIGN, 0, 1001 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1002 NULL, NULL, 1003 size, 1, BUS_SPACE_MAXSIZE_32BIT, 1004 0, dtag); 1005 if (error) { 1006 device_printf(dev, "can't create DMA tag\n"); 1007 return error; 1008 } 1009 1010 error = bus_dmamem_alloc(*dtag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO, 1011 dmap); 1012 if (error) { 1013 device_printf(dev, "can't allocate DMA mem\n"); 1014 bus_dma_tag_destroy(*dtag); 1015 *dtag = NULL; 1016 return error; 1017 } 1018 1019 error = bus_dmamap_load(*dtag, *dmap, *addr, size, 1020 et_dma_ring_addr, paddr, BUS_DMA_WAITOK); 1021 if (error) { 1022 device_printf(dev, "can't load DMA mem\n"); 1023 bus_dmamem_free(*dtag, *addr, *dmap); 1024 bus_dma_tag_destroy(*dtag); 1025 *dtag = NULL; 1026 return error; 1027 } 1028 return 0; 1029 } 1030 1031 static void 1032 et_dma_mem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap) 1033 { 1034 if (dtag != NULL) { 1035 bus_dmamap_unload(dtag, dmap); 1036 bus_dmamem_free(dtag, addr, dmap); 1037 bus_dma_tag_destroy(dtag); 1038 } 1039 } 1040 1041 static void 1042 et_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error) 1043 { 1044 KASSERT(nseg == 1, ("too many segments\n")); 1045 *((bus_addr_t *)arg) = seg->ds_addr; 1046 } 1047 1048 static void 1049 et_chip_attach(struct et_softc *sc) 1050 { 1051 uint32_t val; 1052 1053 /* 1054 * Perform minimal initialization 1055 */ 1056 1057 /* Disable loopback */ 1058 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1059 1060 /* Reset MAC */ 1061 CSR_WRITE_4(sc, ET_MAC_CFG1, 1062 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1063 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1064 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1065 1066 /* 1067 * Setup half duplex mode 1068 */ 1069 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1070 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1071 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1072 ET_MAC_HDX_EXC_DEFER; 1073 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1074 1075 /* Clear MAC control */ 1076 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1077 1078 /* Reset MII */ 1079 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1080 1081 /* Bring MAC out of reset state */ 1082 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1083 1084 /* Enable memory controllers */ 1085 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1086 } 1087 1088 static void 1089 et_intr(void *xsc) 1090 { 1091 struct et_softc *sc = xsc; 1092 struct ifnet *ifp = &sc->arpcom.ac_if; 1093 uint32_t intrs; 1094 1095 ASSERT_SERIALIZED(ifp->if_serializer); 1096 1097 if ((ifp->if_flags & IFF_RUNNING) == 0) 1098 return; 1099 1100 et_disable_intrs(sc); 1101 1102 intrs = CSR_READ_4(sc, ET_INTR_STATUS); 1103 intrs &= ET_INTRS; 1104 if (intrs == 0) /* Not interested */ 1105 goto back; 1106 1107 if (intrs & ET_INTR_RXEOF) 1108 et_rxeof(sc); 1109 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER)) 1110 et_txeof(sc); 1111 if (intrs & ET_INTR_TIMER) 1112 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1113 back: 1114 et_enable_intrs(sc, ET_INTRS); 1115 } 1116 1117 static void 1118 et_init(void *xsc) 1119 { 1120 struct et_softc *sc = xsc; 1121 struct ifnet *ifp = &sc->arpcom.ac_if; 1122 const struct et_bsize *arr; 1123 int error, i; 1124 1125 ASSERT_SERIALIZED(ifp->if_serializer); 1126 1127 et_stop(sc); 1128 1129 arr = ET_FRAMELEN(ifp->if_mtu) < MCLBYTES ? 1130 et_bufsize_std : et_bufsize_jumbo; 1131 for (i = 0; i < ET_RX_NRING; ++i) { 1132 sc->sc_rx_data[i].rbd_bufsize = arr[i].bufsize; 1133 sc->sc_rx_data[i].rbd_newbuf = arr[i].newbuf; 1134 sc->sc_rx_data[i].rbd_jumbo = arr[i].jumbo; 1135 } 1136 1137 error = et_init_tx_ring(sc); 1138 if (error) 1139 goto back; 1140 1141 error = et_init_rx_ring(sc); 1142 if (error) 1143 goto back; 1144 1145 error = et_chip_init(sc); 1146 if (error) 1147 goto back; 1148 1149 error = et_enable_txrx(sc, 1); 1150 if (error) 1151 goto back; 1152 1153 et_enable_intrs(sc, ET_INTRS); 1154 1155 callout_reset(&sc->sc_tick, hz, et_tick, sc); 1156 1157 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1158 1159 ifp->if_flags |= IFF_RUNNING; 1160 ifp->if_flags &= ~IFF_OACTIVE; 1161 back: 1162 if (error) 1163 et_stop(sc); 1164 } 1165 1166 static int 1167 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 1168 { 1169 struct et_softc *sc = ifp->if_softc; 1170 struct mii_data *mii = device_get_softc(sc->sc_miibus); 1171 struct ifreq *ifr = (struct ifreq *)data; 1172 int error = 0, max_framelen; 1173 1174 ASSERT_SERIALIZED(ifp->if_serializer); 1175 1176 switch (cmd) { 1177 case SIOCSIFFLAGS: 1178 if (ifp->if_flags & IFF_UP) { 1179 if (ifp->if_flags & IFF_RUNNING) { 1180 if ((ifp->if_flags ^ sc->sc_if_flags) & 1181 (IFF_ALLMULTI | IFF_PROMISC)) 1182 et_setmulti(sc); 1183 } else { 1184 et_init(sc); 1185 } 1186 } else { 1187 if (ifp->if_flags & IFF_RUNNING) 1188 et_stop(sc); 1189 } 1190 sc->sc_if_flags = ifp->if_flags; 1191 break; 1192 1193 case SIOCSIFMEDIA: 1194 case SIOCGIFMEDIA: 1195 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1196 break; 1197 1198 case SIOCADDMULTI: 1199 case SIOCDELMULTI: 1200 if (ifp->if_flags & IFF_RUNNING) 1201 et_setmulti(sc); 1202 break; 1203 1204 case SIOCSIFMTU: 1205 if (sc->sc_flags & ET_FLAG_JUMBO) 1206 max_framelen = ET_JUMBO_FRAMELEN; 1207 else 1208 max_framelen = MCLBYTES - 1; 1209 1210 if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) { 1211 error = EOPNOTSUPP; 1212 break; 1213 } 1214 1215 ifp->if_mtu = ifr->ifr_mtu; 1216 if (ifp->if_flags & IFF_RUNNING) 1217 et_init(sc); 1218 break; 1219 1220 default: 1221 error = ether_ioctl(ifp, cmd, data); 1222 break; 1223 } 1224 return error; 1225 } 1226 1227 static void 1228 et_start(struct ifnet *ifp) 1229 { 1230 struct et_softc *sc = ifp->if_softc; 1231 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1232 int trans; 1233 1234 ASSERT_SERIALIZED(ifp->if_serializer); 1235 1236 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) { 1237 ifq_purge(&ifp->if_snd); 1238 return; 1239 } 1240 1241 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1242 return; 1243 1244 trans = 0; 1245 for (;;) { 1246 struct mbuf *m; 1247 1248 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) { 1249 ifp->if_flags |= IFF_OACTIVE; 1250 break; 1251 } 1252 1253 m = ifq_dequeue(&ifp->if_snd, NULL); 1254 if (m == NULL) 1255 break; 1256 1257 if (et_encap(sc, &m)) { 1258 ifp->if_oerrors++; 1259 ifp->if_flags |= IFF_OACTIVE; 1260 break; 1261 } 1262 trans = 1; 1263 1264 BPF_MTAP(ifp, m); 1265 } 1266 1267 if (trans) 1268 ifp->if_timer = 5; 1269 } 1270 1271 static void 1272 et_watchdog(struct ifnet *ifp) 1273 { 1274 ASSERT_SERIALIZED(ifp->if_serializer); 1275 1276 if_printf(ifp, "watchdog timed out\n"); 1277 1278 ifp->if_init(ifp->if_softc); 1279 if_devstart(ifp); 1280 } 1281 1282 static int 1283 et_stop_rxdma(struct et_softc *sc) 1284 { 1285 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1286 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1287 1288 DELAY(5); 1289 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1290 if_printf(&sc->arpcom.ac_if, "can't stop RX DMA engine\n"); 1291 return ETIMEDOUT; 1292 } 1293 return 0; 1294 } 1295 1296 static int 1297 et_stop_txdma(struct et_softc *sc) 1298 { 1299 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1300 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1301 return 0; 1302 } 1303 1304 static void 1305 et_free_tx_ring(struct et_softc *sc) 1306 { 1307 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1308 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1309 int i; 1310 1311 for (i = 0; i < ET_TX_NDESC; ++i) { 1312 struct et_txbuf *tb = &tbd->tbd_buf[i]; 1313 1314 if (tb->tb_mbuf != NULL) { 1315 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap); 1316 m_freem(tb->tb_mbuf); 1317 tb->tb_mbuf = NULL; 1318 } 1319 } 1320 1321 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1322 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 1323 BUS_DMASYNC_PREWRITE); 1324 } 1325 1326 static void 1327 et_free_rx_ring(struct et_softc *sc) 1328 { 1329 int n; 1330 1331 for (n = 0; n < ET_RX_NRING; ++n) { 1332 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1333 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n]; 1334 int i; 1335 1336 for (i = 0; i < ET_RX_NDESC; ++i) { 1337 struct et_rxbuf *rb = &rbd->rbd_buf[i]; 1338 1339 if (rb->rb_mbuf != NULL) { 1340 if (!rbd->rbd_jumbo) { 1341 bus_dmamap_unload(sc->sc_mbuf_dtag, 1342 rb->rb_dmap); 1343 } 1344 m_freem(rb->rb_mbuf); 1345 rb->rb_mbuf = NULL; 1346 } 1347 } 1348 1349 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE); 1350 bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap, 1351 BUS_DMASYNC_PREWRITE); 1352 } 1353 } 1354 1355 static void 1356 et_setmulti(struct et_softc *sc) 1357 { 1358 struct ifnet *ifp = &sc->arpcom.ac_if; 1359 uint32_t hash[4] = { 0, 0, 0, 0 }; 1360 uint32_t rxmac_ctrl, pktfilt; 1361 struct ifmultiaddr *ifma; 1362 int i, count; 1363 1364 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1365 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1366 1367 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1368 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1369 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1370 goto back; 1371 } 1372 1373 count = 0; 1374 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1375 uint32_t *hp, h; 1376 1377 if (ifma->ifma_addr->sa_family != AF_LINK) 1378 continue; 1379 1380 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 1381 ifma->ifma_addr), ETHER_ADDR_LEN); 1382 h = (h & 0x3f800000) >> 23; 1383 1384 hp = &hash[0]; 1385 if (h >= 32 && h < 64) { 1386 h -= 32; 1387 hp = &hash[1]; 1388 } else if (h >= 64 && h < 96) { 1389 h -= 64; 1390 hp = &hash[2]; 1391 } else if (h >= 96) { 1392 h -= 96; 1393 hp = &hash[3]; 1394 } 1395 *hp |= (1 << h); 1396 1397 ++count; 1398 } 1399 1400 for (i = 0; i < 4; ++i) 1401 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1402 1403 if (count > 0) 1404 pktfilt |= ET_PKTFILT_MCAST; 1405 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1406 back: 1407 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1408 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1409 } 1410 1411 static int 1412 et_chip_init(struct et_softc *sc) 1413 { 1414 struct ifnet *ifp = &sc->arpcom.ac_if; 1415 uint32_t rxq_end; 1416 int error, frame_len, rxmem_size; 1417 1418 /* 1419 * Split 16Kbytes internal memory between TX and RX 1420 * according to frame length. 1421 */ 1422 frame_len = ET_FRAMELEN(ifp->if_mtu); 1423 if (frame_len < 2048) { 1424 rxmem_size = ET_MEM_RXSIZE_DEFAULT; 1425 } else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) { 1426 rxmem_size = ET_MEM_SIZE / 2; 1427 } else { 1428 rxmem_size = ET_MEM_SIZE - 1429 roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT); 1430 } 1431 rxq_end = ET_QUEUE_ADDR(rxmem_size); 1432 1433 CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START); 1434 CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end); 1435 CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1); 1436 CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END); 1437 1438 /* No loopback */ 1439 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1440 1441 /* Clear MSI configure */ 1442 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1443 1444 /* Disable timer */ 1445 CSR_WRITE_4(sc, ET_TIMER, 0); 1446 1447 /* Initialize MAC */ 1448 et_init_mac(sc); 1449 1450 /* Enable memory controllers */ 1451 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1452 1453 /* Initialize RX MAC */ 1454 et_init_rxmac(sc); 1455 1456 /* Initialize TX MAC */ 1457 et_init_txmac(sc); 1458 1459 /* Initialize RX DMA engine */ 1460 error = et_init_rxdma(sc); 1461 if (error) 1462 return error; 1463 1464 /* Initialize TX DMA engine */ 1465 error = et_init_txdma(sc); 1466 if (error) 1467 return error; 1468 1469 return 0; 1470 } 1471 1472 static int 1473 et_init_tx_ring(struct et_softc *sc) 1474 { 1475 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1476 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1477 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1478 1479 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1480 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 1481 BUS_DMASYNC_PREWRITE); 1482 1483 tbd->tbd_start_index = 0; 1484 tbd->tbd_start_wrap = 0; 1485 tbd->tbd_used = 0; 1486 1487 bzero(txsd->txsd_status, sizeof(uint32_t)); 1488 bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap, 1489 BUS_DMASYNC_PREWRITE); 1490 return 0; 1491 } 1492 1493 static int 1494 et_init_rx_ring(struct et_softc *sc) 1495 { 1496 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1497 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1498 int n; 1499 1500 for (n = 0; n < ET_RX_NRING; ++n) { 1501 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n]; 1502 int i, error; 1503 1504 for (i = 0; i < ET_RX_NDESC; ++i) { 1505 error = rbd->rbd_newbuf(rbd, i, 1); 1506 if (error) { 1507 if_printf(&sc->arpcom.ac_if, "%d ring %d buf, " 1508 "newbuf failed: %d\n", n, i, error); 1509 return error; 1510 } 1511 } 1512 } 1513 1514 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1515 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap, 1516 BUS_DMASYNC_PREWRITE); 1517 1518 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1519 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap, 1520 BUS_DMASYNC_PREWRITE); 1521 1522 return 0; 1523 } 1524 1525 static void 1526 et_dma_buf_addr(void *xctx, bus_dma_segment_t *segs, int nsegs, 1527 bus_size_t mapsz __unused, int error) 1528 { 1529 struct et_dmamap_ctx *ctx = xctx; 1530 int i; 1531 1532 if (error) 1533 return; 1534 1535 if (nsegs > ctx->nsegs) { 1536 ctx->nsegs = 0; 1537 return; 1538 } 1539 1540 ctx->nsegs = nsegs; 1541 for (i = 0; i < nsegs; ++i) 1542 ctx->segs[i] = segs[i]; 1543 } 1544 1545 static int 1546 et_init_rxdma(struct et_softc *sc) 1547 { 1548 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1549 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1550 struct et_rxdesc_ring *rx_ring; 1551 int error; 1552 1553 error = et_stop_rxdma(sc); 1554 if (error) { 1555 if_printf(&sc->arpcom.ac_if, "can't init RX DMA engine\n"); 1556 return error; 1557 } 1558 1559 /* 1560 * Install RX status 1561 */ 1562 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1563 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1564 1565 /* 1566 * Install RX stat ring 1567 */ 1568 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1569 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1570 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1571 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1572 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1573 1574 /* Match ET_RXSTAT_POS */ 1575 rxst_ring->rsr_index = 0; 1576 rxst_ring->rsr_wrap = 0; 1577 1578 /* 1579 * Install the 2nd RX descriptor ring 1580 */ 1581 rx_ring = &sc->sc_rx_ring[1]; 1582 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1583 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1584 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1585 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1586 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1587 1588 /* Match ET_RX_RING1_POS */ 1589 rx_ring->rr_index = 0; 1590 rx_ring->rr_wrap = 1; 1591 1592 /* 1593 * Install the 1st RX descriptor ring 1594 */ 1595 rx_ring = &sc->sc_rx_ring[0]; 1596 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1597 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1598 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1599 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1600 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1601 1602 /* Match ET_RX_RING0_POS */ 1603 rx_ring->rr_index = 0; 1604 rx_ring->rr_wrap = 1; 1605 1606 /* 1607 * RX intr moderation 1608 */ 1609 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1610 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1611 1612 return 0; 1613 } 1614 1615 static int 1616 et_init_txdma(struct et_softc *sc) 1617 { 1618 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1619 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1620 int error; 1621 1622 error = et_stop_txdma(sc); 1623 if (error) { 1624 if_printf(&sc->arpcom.ac_if, "can't init TX DMA engine\n"); 1625 return error; 1626 } 1627 1628 /* 1629 * Install TX descriptor ring 1630 */ 1631 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1632 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1633 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1634 1635 /* 1636 * Install TX status 1637 */ 1638 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1639 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1640 1641 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1642 1643 /* Match ET_TX_READY_POS */ 1644 tx_ring->tr_ready_index = 0; 1645 tx_ring->tr_ready_wrap = 0; 1646 1647 return 0; 1648 } 1649 1650 static void 1651 et_init_mac(struct et_softc *sc) 1652 { 1653 struct ifnet *ifp = &sc->arpcom.ac_if; 1654 const uint8_t *eaddr = IF_LLADDR(ifp); 1655 uint32_t val; 1656 1657 /* Reset MAC */ 1658 CSR_WRITE_4(sc, ET_MAC_CFG1, 1659 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1660 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1661 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1662 1663 /* 1664 * Setup inter packet gap 1665 */ 1666 val = __SHIFTIN(56, ET_IPG_NONB2B_1) | 1667 __SHIFTIN(88, ET_IPG_NONB2B_2) | 1668 __SHIFTIN(80, ET_IPG_MINIFG) | 1669 __SHIFTIN(96, ET_IPG_B2B); 1670 CSR_WRITE_4(sc, ET_IPG, val); 1671 1672 /* 1673 * Setup half duplex mode 1674 */ 1675 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) | 1676 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) | 1677 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) | 1678 ET_MAC_HDX_EXC_DEFER; 1679 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1680 1681 /* Clear MAC control */ 1682 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1683 1684 /* Reset MII */ 1685 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1686 1687 /* 1688 * Set MAC address 1689 */ 1690 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1691 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1692 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1693 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1694 1695 /* Set max frame length */ 1696 CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu)); 1697 1698 /* Bring MAC out of reset state */ 1699 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1700 } 1701 1702 static void 1703 et_init_rxmac(struct et_softc *sc) 1704 { 1705 struct ifnet *ifp = &sc->arpcom.ac_if; 1706 const uint8_t *eaddr = IF_LLADDR(ifp); 1707 uint32_t val; 1708 int i; 1709 1710 /* Disable RX MAC and WOL */ 1711 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1712 1713 /* 1714 * Clear all WOL related registers 1715 */ 1716 for (i = 0; i < 3; ++i) 1717 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1718 for (i = 0; i < 20; ++i) 1719 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1720 1721 /* 1722 * Set WOL source address. XXX is this necessary? 1723 */ 1724 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1725 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1726 val = (eaddr[0] << 8) | eaddr[1]; 1727 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1728 1729 /* Clear packet filters */ 1730 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1731 1732 /* No ucast filtering */ 1733 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1734 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1735 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1736 1737 if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) { 1738 /* 1739 * In order to transmit jumbo packets greater than 1740 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between 1741 * RX MAC and RX DMA needs to be reduced in size to 1742 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen). In 1743 * order to implement this, we must use "cut through" 1744 * mode in the RX MAC, which chops packets down into 1745 * segments. In this case we selected 256 bytes, 1746 * since this is the size of the PCI-Express TLP's 1747 * that the ET1310 uses. 1748 */ 1749 val = __SHIFTIN(ET_RXMAC_SEGSZ(256), ET_RXMAC_MC_SEGSZ_MAX) | 1750 ET_RXMAC_MC_SEGSZ_ENABLE; 1751 } else { 1752 val = 0; 1753 } 1754 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1755 1756 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1757 1758 /* Initialize RX MAC management register */ 1759 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1760 1761 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1762 1763 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1764 ET_RXMAC_MGT_PASS_ECRC | 1765 ET_RXMAC_MGT_PASS_ELEN | 1766 ET_RXMAC_MGT_PASS_ETRUNC | 1767 ET_RXMAC_MGT_CHECK_PKT); 1768 1769 /* 1770 * Configure runt filtering (may not work on certain chip generation) 1771 */ 1772 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG; 1773 CSR_WRITE_4(sc, ET_PKTFILT, val); 1774 1775 /* Enable RX MAC but leave WOL disabled */ 1776 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1777 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1778 1779 /* 1780 * Setup multicast hash and allmulti/promisc mode 1781 */ 1782 et_setmulti(sc); 1783 } 1784 1785 static void 1786 et_init_txmac(struct et_softc *sc) 1787 { 1788 /* Disable TX MAC and FC(?) */ 1789 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1790 1791 /* No flow control yet */ 1792 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0); 1793 1794 /* Enable TX MAC but leave FC(?) diabled */ 1795 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 1796 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 1797 } 1798 1799 static int 1800 et_start_rxdma(struct et_softc *sc) 1801 { 1802 uint32_t val = 0; 1803 1804 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize, 1805 ET_RXDMA_CTRL_RING0_SIZE) | 1806 ET_RXDMA_CTRL_RING0_ENABLE; 1807 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize, 1808 ET_RXDMA_CTRL_RING1_SIZE) | 1809 ET_RXDMA_CTRL_RING1_ENABLE; 1810 1811 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 1812 1813 DELAY(5); 1814 1815 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 1816 if_printf(&sc->arpcom.ac_if, "can't start RX DMA engine\n"); 1817 return ETIMEDOUT; 1818 } 1819 return 0; 1820 } 1821 1822 static int 1823 et_start_txdma(struct et_softc *sc) 1824 { 1825 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 1826 return 0; 1827 } 1828 1829 static int 1830 et_enable_txrx(struct et_softc *sc, int media_upd) 1831 { 1832 struct ifnet *ifp = &sc->arpcom.ac_if; 1833 uint32_t val; 1834 int i, error; 1835 1836 val = CSR_READ_4(sc, ET_MAC_CFG1); 1837 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 1838 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 1839 ET_MAC_CFG1_LOOPBACK); 1840 CSR_WRITE_4(sc, ET_MAC_CFG1, val); 1841 1842 if (media_upd) 1843 et_ifmedia_upd(ifp); 1844 else 1845 et_setmedia(sc); 1846 1847 #define NRETRY 100 1848 1849 for (i = 0; i < NRETRY; ++i) { 1850 val = CSR_READ_4(sc, ET_MAC_CFG1); 1851 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 1852 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 1853 break; 1854 1855 DELAY(10); 1856 } 1857 if (i == NRETRY) { 1858 if_printf(ifp, "can't enable RX/TX\n"); 1859 return 0; 1860 } 1861 sc->sc_flags |= ET_FLAG_TXRX_ENABLED; 1862 1863 #undef NRETRY 1864 1865 /* 1866 * Start TX/RX DMA engine 1867 */ 1868 error = et_start_rxdma(sc); 1869 if (error) 1870 return error; 1871 1872 error = et_start_txdma(sc); 1873 if (error) 1874 return error; 1875 1876 return 0; 1877 } 1878 1879 static void 1880 et_rxeof(struct et_softc *sc) 1881 { 1882 struct ifnet *ifp = &sc->arpcom.ac_if; 1883 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1884 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1885 uint32_t rxs_stat_ring; 1886 int rxst_wrap, rxst_index; 1887 1888 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 1889 return; 1890 1891 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap, 1892 BUS_DMASYNC_POSTREAD); 1893 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap, 1894 BUS_DMASYNC_POSTREAD); 1895 1896 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring; 1897 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 1898 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX); 1899 1900 while (rxst_index != rxst_ring->rsr_index || 1901 rxst_wrap != rxst_ring->rsr_wrap) { 1902 struct et_rxbuf_data *rbd; 1903 struct et_rxdesc_ring *rx_ring; 1904 struct et_rxstat *st; 1905 struct mbuf *m; 1906 int buflen, buf_idx, ring_idx; 1907 uint32_t rxstat_pos, rxring_pos; 1908 1909 KKASSERT(rxst_ring->rsr_index < ET_RX_NSTAT); 1910 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 1911 1912 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN); 1913 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX); 1914 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX); 1915 1916 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 1917 rxst_ring->rsr_index = 0; 1918 rxst_ring->rsr_wrap ^= 1; 1919 } 1920 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index, 1921 ET_RXSTAT_POS_INDEX); 1922 if (rxst_ring->rsr_wrap) 1923 rxstat_pos |= ET_RXSTAT_POS_WRAP; 1924 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 1925 1926 if (ring_idx >= ET_RX_NRING) { 1927 ifp->if_ierrors++; 1928 if_printf(ifp, "invalid ring index %d\n", ring_idx); 1929 continue; 1930 } 1931 if (buf_idx >= ET_RX_NDESC) { 1932 ifp->if_ierrors++; 1933 if_printf(ifp, "invalid buf index %d\n", buf_idx); 1934 continue; 1935 } 1936 1937 rbd = &sc->sc_rx_data[ring_idx]; 1938 m = rbd->rbd_buf[buf_idx].rb_mbuf; 1939 1940 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) { 1941 if (buflen < ETHER_CRC_LEN) { 1942 m_freem(m); 1943 ifp->if_ierrors++; 1944 } else { 1945 m->m_pkthdr.len = m->m_len = buflen; 1946 m->m_pkthdr.rcvif = ifp; 1947 1948 m_adj(m, -ETHER_CRC_LEN); 1949 1950 ifp->if_ipackets++; 1951 ifp->if_input(ifp, m); 1952 } 1953 } else { 1954 ifp->if_ierrors++; 1955 } 1956 m = NULL; /* Catch invalid reference */ 1957 1958 rx_ring = &sc->sc_rx_ring[ring_idx]; 1959 1960 if (buf_idx != rx_ring->rr_index) { 1961 if_printf(ifp, "WARNING!! ring %d, " 1962 "buf_idx %d, rr_idx %d\n", 1963 ring_idx, buf_idx, rx_ring->rr_index); 1964 } 1965 1966 KKASSERT(rx_ring->rr_index < ET_RX_NDESC); 1967 if (++rx_ring->rr_index == ET_RX_NDESC) { 1968 rx_ring->rr_index = 0; 1969 rx_ring->rr_wrap ^= 1; 1970 } 1971 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX); 1972 if (rx_ring->rr_wrap) 1973 rxring_pos |= ET_RX_RING_POS_WRAP; 1974 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 1975 } 1976 } 1977 1978 static int 1979 et_encap(struct et_softc *sc, struct mbuf **m0) 1980 { 1981 struct mbuf *m = *m0; 1982 bus_dma_segment_t segs[ET_NSEG_MAX]; 1983 struct et_dmamap_ctx ctx; 1984 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1985 struct et_txbuf_data *tbd = &sc->sc_tx_data; 1986 struct et_txdesc *td; 1987 bus_dmamap_t map; 1988 int error, maxsegs, first_idx, last_idx, i; 1989 uint32_t tx_ready_pos, last_td_ctrl2; 1990 1991 maxsegs = ET_TX_NDESC - tbd->tbd_used; 1992 if (maxsegs > ET_NSEG_MAX) 1993 maxsegs = ET_NSEG_MAX; 1994 KASSERT(maxsegs >= ET_NSEG_SPARE, 1995 ("not enough spare TX desc (%d)\n", maxsegs)); 1996 1997 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 1998 first_idx = tx_ring->tr_ready_index; 1999 map = tbd->tbd_buf[first_idx].tb_dmap; 2000 2001 ctx.nsegs = maxsegs; 2002 ctx.segs = segs; 2003 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m, 2004 et_dma_buf_addr, &ctx, BUS_DMA_NOWAIT); 2005 if (!error && ctx.nsegs == 0) { 2006 bus_dmamap_unload(sc->sc_mbuf_dtag, map); 2007 error = EFBIG; 2008 } 2009 if (error && error != EFBIG) { 2010 if_printf(&sc->arpcom.ac_if, "can't load TX mbuf, error %d\n", 2011 error); 2012 goto back; 2013 } 2014 if (error) { /* error == EFBIG */ 2015 struct mbuf *m_new; 2016 2017 m_new = m_defrag(m, MB_DONTWAIT); 2018 if (m_new == NULL) { 2019 if_printf(&sc->arpcom.ac_if, "can't defrag TX mbuf\n"); 2020 error = ENOBUFS; 2021 goto back; 2022 } else { 2023 *m0 = m = m_new; 2024 } 2025 2026 ctx.nsegs = maxsegs; 2027 ctx.segs = segs; 2028 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m, 2029 et_dma_buf_addr, &ctx, 2030 BUS_DMA_NOWAIT); 2031 if (error || ctx.nsegs == 0) { 2032 if (ctx.nsegs == 0) { 2033 bus_dmamap_unload(sc->sc_mbuf_dtag, map); 2034 error = EFBIG; 2035 } 2036 if_printf(&sc->arpcom.ac_if, 2037 "can't load defraged TX mbuf\n"); 2038 goto back; 2039 } 2040 } 2041 2042 bus_dmamap_sync(sc->sc_mbuf_dtag, map, BUS_DMASYNC_PREWRITE); 2043 2044 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 2045 sc->sc_tx += ctx.nsegs; 2046 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 2047 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 2048 last_td_ctrl2 |= ET_TDCTRL2_INTR; 2049 } 2050 2051 last_idx = -1; 2052 for (i = 0; i < ctx.nsegs; ++i) { 2053 int idx; 2054 2055 idx = (first_idx + i) % ET_TX_NDESC; 2056 td = &tx_ring->tr_desc[idx]; 2057 td->td_addr_hi = ET_ADDR_HI(segs[i].ds_addr); 2058 td->td_addr_lo = ET_ADDR_LO(segs[i].ds_addr); 2059 td->td_ctrl1 = __SHIFTIN(segs[i].ds_len, ET_TDCTRL1_LEN); 2060 2061 if (i == ctx.nsegs - 1) { /* Last frag */ 2062 td->td_ctrl2 = last_td_ctrl2; 2063 last_idx = idx; 2064 } 2065 2066 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC); 2067 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 2068 tx_ring->tr_ready_index = 0; 2069 tx_ring->tr_ready_wrap ^= 1; 2070 } 2071 } 2072 td = &tx_ring->tr_desc[first_idx]; 2073 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */ 2074 2075 KKASSERT(last_idx >= 0); 2076 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 2077 tbd->tbd_buf[last_idx].tb_dmap = map; 2078 tbd->tbd_buf[last_idx].tb_mbuf = m; 2079 2080 tbd->tbd_used += ctx.nsegs; 2081 KKASSERT(tbd->tbd_used <= ET_TX_NDESC); 2082 2083 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 2084 BUS_DMASYNC_PREWRITE); 2085 2086 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index, 2087 ET_TX_READY_POS_INDEX); 2088 if (tx_ring->tr_ready_wrap) 2089 tx_ready_pos |= ET_TX_READY_POS_WRAP; 2090 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 2091 2092 error = 0; 2093 back: 2094 if (error) { 2095 m_freem(m); 2096 *m0 = NULL; 2097 } 2098 return error; 2099 } 2100 2101 static void 2102 et_txeof(struct et_softc *sc) 2103 { 2104 struct ifnet *ifp = &sc->arpcom.ac_if; 2105 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 2106 struct et_txbuf_data *tbd = &sc->sc_tx_data; 2107 uint32_t tx_done; 2108 int end, wrap; 2109 2110 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 2111 return; 2112 2113 if (tbd->tbd_used == 0) 2114 return; 2115 2116 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 2117 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX); 2118 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 2119 2120 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 2121 struct et_txbuf *tb; 2122 2123 KKASSERT(tbd->tbd_start_index < ET_TX_NDESC); 2124 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 2125 2126 bzero(&tx_ring->tr_desc[tbd->tbd_start_index], 2127 sizeof(struct et_txdesc)); 2128 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 2129 BUS_DMASYNC_PREWRITE); 2130 2131 if (tb->tb_mbuf != NULL) { 2132 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap); 2133 m_freem(tb->tb_mbuf); 2134 tb->tb_mbuf = NULL; 2135 ifp->if_opackets++; 2136 } 2137 2138 if (++tbd->tbd_start_index == ET_TX_NDESC) { 2139 tbd->tbd_start_index = 0; 2140 tbd->tbd_start_wrap ^= 1; 2141 } 2142 2143 KKASSERT(tbd->tbd_used > 0); 2144 tbd->tbd_used--; 2145 } 2146 2147 if (tbd->tbd_used == 0) 2148 ifp->if_timer = 0; 2149 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC) 2150 ifp->if_flags &= ~IFF_OACTIVE; 2151 2152 if_devstart(ifp); 2153 } 2154 2155 static void 2156 et_tick(void *xsc) 2157 { 2158 struct et_softc *sc = xsc; 2159 struct ifnet *ifp = &sc->arpcom.ac_if; 2160 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2161 2162 lwkt_serialize_enter(ifp->if_serializer); 2163 2164 mii_tick(mii); 2165 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0 && 2166 (mii->mii_media_status & IFM_ACTIVE) && 2167 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 2168 if_printf(ifp, "Link up, enable TX/RX\n"); 2169 if (et_enable_txrx(sc, 0) == 0) 2170 if_devstart(ifp); 2171 } 2172 callout_reset(&sc->sc_tick, hz, et_tick, sc); 2173 2174 lwkt_serialize_exit(ifp->if_serializer); 2175 } 2176 2177 static int 2178 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init) 2179 { 2180 return et_newbuf(rbd, buf_idx, init, MCLBYTES); 2181 } 2182 2183 static int 2184 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init) 2185 { 2186 return et_newbuf(rbd, buf_idx, init, MHLEN); 2187 } 2188 2189 static int 2190 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0) 2191 { 2192 struct et_softc *sc = rbd->rbd_softc; 2193 struct et_rxbuf *rb; 2194 struct mbuf *m; 2195 struct et_dmamap_ctx ctx; 2196 bus_dma_segment_t seg; 2197 bus_dmamap_t dmap; 2198 int error, len; 2199 2200 KASSERT(!rbd->rbd_jumbo, ("calling %s with jumbo ring\n", __func__)); 2201 2202 KKASSERT(buf_idx < ET_RX_NDESC); 2203 rb = &rbd->rbd_buf[buf_idx]; 2204 2205 m = m_getl(len0, init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR, &len); 2206 if (m == NULL) { 2207 error = ENOBUFS; 2208 2209 if (init) { 2210 if_printf(&sc->arpcom.ac_if, 2211 "m_getl failed, size %d\n", len0); 2212 return error; 2213 } else { 2214 goto back; 2215 } 2216 } 2217 m->m_len = m->m_pkthdr.len = len; 2218 2219 /* 2220 * Try load RX mbuf into temporary DMA tag 2221 */ 2222 ctx.nsegs = 1; 2223 ctx.segs = &seg; 2224 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap, m, 2225 et_dma_buf_addr, &ctx, 2226 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); 2227 if (error || ctx.nsegs == 0) { 2228 if (!error) { 2229 bus_dmamap_unload(sc->sc_mbuf_dtag, 2230 sc->sc_mbuf_tmp_dmap); 2231 error = EFBIG; 2232 if_printf(&sc->arpcom.ac_if, "too many segments?!\n"); 2233 } 2234 m_freem(m); 2235 2236 if (init) { 2237 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n"); 2238 return error; 2239 } else { 2240 goto back; 2241 } 2242 } 2243 2244 if (!init) { 2245 bus_dmamap_sync(sc->sc_mbuf_dtag, rb->rb_dmap, 2246 BUS_DMASYNC_POSTREAD); 2247 bus_dmamap_unload(sc->sc_mbuf_dtag, rb->rb_dmap); 2248 } 2249 rb->rb_mbuf = m; 2250 rb->rb_paddr = seg.ds_addr; 2251 2252 /* 2253 * Swap RX buf's DMA map with the loaded temporary one 2254 */ 2255 dmap = rb->rb_dmap; 2256 rb->rb_dmap = sc->sc_mbuf_tmp_dmap; 2257 sc->sc_mbuf_tmp_dmap = dmap; 2258 2259 error = 0; 2260 back: 2261 et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr); 2262 return error; 2263 } 2264 2265 static int 2266 et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS) 2267 { 2268 struct et_softc *sc = arg1; 2269 struct ifnet *ifp = &sc->arpcom.ac_if; 2270 int error = 0, v; 2271 2272 lwkt_serialize_enter(ifp->if_serializer); 2273 2274 v = sc->sc_rx_intr_npkts; 2275 error = sysctl_handle_int(oidp, &v, 0, req); 2276 if (error || req->newptr == NULL) 2277 goto back; 2278 if (v <= 0) { 2279 error = EINVAL; 2280 goto back; 2281 } 2282 2283 if (sc->sc_rx_intr_npkts != v) { 2284 if (ifp->if_flags & IFF_RUNNING) 2285 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v); 2286 sc->sc_rx_intr_npkts = v; 2287 } 2288 back: 2289 lwkt_serialize_exit(ifp->if_serializer); 2290 return error; 2291 } 2292 2293 static int 2294 et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS) 2295 { 2296 struct et_softc *sc = arg1; 2297 struct ifnet *ifp = &sc->arpcom.ac_if; 2298 int error = 0, v; 2299 2300 lwkt_serialize_enter(ifp->if_serializer); 2301 2302 v = sc->sc_rx_intr_delay; 2303 error = sysctl_handle_int(oidp, &v, 0, req); 2304 if (error || req->newptr == NULL) 2305 goto back; 2306 if (v <= 0) { 2307 error = EINVAL; 2308 goto back; 2309 } 2310 2311 if (sc->sc_rx_intr_delay != v) { 2312 if (ifp->if_flags & IFF_RUNNING) 2313 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v); 2314 sc->sc_rx_intr_delay = v; 2315 } 2316 back: 2317 lwkt_serialize_exit(ifp->if_serializer); 2318 return error; 2319 } 2320 2321 static void 2322 et_setmedia(struct et_softc *sc) 2323 { 2324 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2325 uint32_t cfg2, ctrl; 2326 2327 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 2328 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 2329 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 2330 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 2331 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN); 2332 2333 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 2334 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 2335 2336 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 2337 cfg2 |= ET_MAC_CFG2_MODE_GMII; 2338 } else { 2339 cfg2 |= ET_MAC_CFG2_MODE_MII; 2340 ctrl |= ET_MAC_CTRL_MODE_MII; 2341 } 2342 2343 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 2344 cfg2 |= ET_MAC_CFG2_FDX; 2345 else 2346 ctrl |= ET_MAC_CTRL_GHDX; 2347 2348 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 2349 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 2350 } 2351 2352 static int 2353 et_jumbo_mem_alloc(device_t dev) 2354 { 2355 struct et_softc *sc = device_get_softc(dev); 2356 struct et_jumbo_data *jd = &sc->sc_jumbo_data; 2357 bus_addr_t paddr; 2358 uint8_t *buf; 2359 int error, i; 2360 2361 error = et_dma_mem_create(dev, ET_JUMBO_MEM_SIZE, &jd->jd_dtag, 2362 &jd->jd_buf, &paddr, &jd->jd_dmap); 2363 if (error) { 2364 device_printf(dev, "can't create jumbo DMA stuffs\n"); 2365 return error; 2366 } 2367 2368 jd->jd_slots = kmalloc(sizeof(*jd->jd_slots) * ET_JSLOTS, M_DEVBUF, 2369 M_WAITOK | M_ZERO); 2370 lwkt_serialize_init(&jd->jd_serializer); 2371 SLIST_INIT(&jd->jd_free_slots); 2372 2373 buf = jd->jd_buf; 2374 for (i = 0; i < ET_JSLOTS; ++i) { 2375 struct et_jslot *jslot = &jd->jd_slots[i]; 2376 2377 jslot->jslot_data = jd; 2378 jslot->jslot_buf = buf; 2379 jslot->jslot_paddr = paddr; 2380 jslot->jslot_inuse = 0; 2381 jslot->jslot_index = i; 2382 SLIST_INSERT_HEAD(&jd->jd_free_slots, jslot, jslot_link); 2383 2384 buf += ET_JLEN; 2385 paddr += ET_JLEN; 2386 } 2387 return 0; 2388 } 2389 2390 static void 2391 et_jumbo_mem_free(device_t dev) 2392 { 2393 struct et_softc *sc = device_get_softc(dev); 2394 struct et_jumbo_data *jd = &sc->sc_jumbo_data; 2395 2396 KKASSERT(sc->sc_flags & ET_FLAG_JUMBO); 2397 2398 kfree(jd->jd_slots, M_DEVBUF); 2399 et_dma_mem_destroy(jd->jd_dtag, jd->jd_buf, jd->jd_dmap); 2400 } 2401 2402 static struct et_jslot * 2403 et_jalloc(struct et_jumbo_data *jd) 2404 { 2405 struct et_jslot *jslot; 2406 2407 lwkt_serialize_enter(&jd->jd_serializer); 2408 2409 jslot = SLIST_FIRST(&jd->jd_free_slots); 2410 if (jslot) { 2411 SLIST_REMOVE_HEAD(&jd->jd_free_slots, jslot_link); 2412 jslot->jslot_inuse = 1; 2413 } 2414 2415 lwkt_serialize_exit(&jd->jd_serializer); 2416 return jslot; 2417 } 2418 2419 static void 2420 et_jfree(void *xjslot) 2421 { 2422 struct et_jslot *jslot = xjslot; 2423 struct et_jumbo_data *jd = jslot->jslot_data; 2424 2425 if (&jd->jd_slots[jslot->jslot_index] != jslot) { 2426 panic("%s wrong jslot!?\n", __func__); 2427 } else if (jslot->jslot_inuse == 0) { 2428 panic("%s jslot already freed\n", __func__); 2429 } else { 2430 lwkt_serialize_enter(&jd->jd_serializer); 2431 2432 atomic_subtract_int(&jslot->jslot_inuse, 1); 2433 if (jslot->jslot_inuse == 0) { 2434 SLIST_INSERT_HEAD(&jd->jd_free_slots, jslot, 2435 jslot_link); 2436 } 2437 2438 lwkt_serialize_exit(&jd->jd_serializer); 2439 } 2440 } 2441 2442 static void 2443 et_jref(void *xjslot) 2444 { 2445 struct et_jslot *jslot = xjslot; 2446 struct et_jumbo_data *jd = jslot->jslot_data; 2447 2448 if (&jd->jd_slots[jslot->jslot_index] != jslot) 2449 panic("%s wrong jslot!?\n", __func__); 2450 else if (jslot->jslot_inuse == 0) 2451 panic("%s jslot already freed\n", __func__); 2452 else 2453 atomic_add_int(&jslot->jslot_inuse, 1); 2454 } 2455 2456 static int 2457 et_newbuf_jumbo(struct et_rxbuf_data *rbd, int buf_idx, int init) 2458 { 2459 struct et_softc *sc = rbd->rbd_softc; 2460 struct et_rxbuf *rb; 2461 struct mbuf *m; 2462 struct et_jslot *jslot; 2463 int error; 2464 2465 KASSERT(rbd->rbd_jumbo, ("calling %s with non-jumbo ring\n", __func__)); 2466 2467 KKASSERT(buf_idx < ET_RX_NDESC); 2468 rb = &rbd->rbd_buf[buf_idx]; 2469 2470 error = ENOBUFS; 2471 2472 MGETHDR(m, init ? MB_WAIT : MB_DONTWAIT, MT_DATA); 2473 if (m == NULL) { 2474 if (init) { 2475 if_printf(&sc->arpcom.ac_if, "MGETHDR failed\n"); 2476 return error; 2477 } else { 2478 goto back; 2479 } 2480 } 2481 2482 jslot = et_jalloc(&sc->sc_jumbo_data); 2483 if (jslot == NULL) { 2484 m_freem(m); 2485 2486 if (init) { 2487 if_printf(&sc->arpcom.ac_if, 2488 "jslot allocation failed\n"); 2489 return error; 2490 } else { 2491 goto back; 2492 } 2493 } 2494 2495 m->m_ext.ext_arg = jslot; 2496 m->m_ext.ext_buf = jslot->jslot_buf; 2497 m->m_ext.ext_free = et_jfree; 2498 m->m_ext.ext_ref = et_jref; 2499 m->m_ext.ext_size = ET_JUMBO_FRAMELEN; 2500 m->m_flags |= M_EXT; 2501 m->m_data = m->m_ext.ext_buf; 2502 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 2503 2504 rb->rb_mbuf = m; 2505 rb->rb_paddr = jslot->jslot_paddr; 2506 2507 error = 0; 2508 back: 2509 et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr); 2510 return error; 2511 } 2512 2513 static void 2514 et_setup_rxdesc(struct et_rxbuf_data *rbd, int buf_idx, bus_addr_t paddr) 2515 { 2516 struct et_rxdesc_ring *rx_ring = rbd->rbd_ring; 2517 struct et_rxdesc *desc; 2518 2519 KKASSERT(buf_idx < ET_RX_NDESC); 2520 desc = &rx_ring->rr_desc[buf_idx]; 2521 2522 desc->rd_addr_hi = ET_ADDR_HI(paddr); 2523 desc->rd_addr_lo = ET_ADDR_LO(paddr); 2524 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX); 2525 2526 bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap, 2527 BUS_DMASYNC_PREWRITE); 2528 } 2529