1 /*- 2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/ale/if_ale.c,v 1.3 2008/12/03 09:01:12 yongari Exp $ 28 */ 29 30 /* Driver for Atheros AR8121/AR8113/AR8114 PCIe Ethernet. */ 31 32 #include <sys/param.h> 33 #include <sys/endian.h> 34 #include <sys/kernel.h> 35 #include <sys/bus.h> 36 #include <sys/interrupt.h> 37 #include <sys/malloc.h> 38 #include <sys/proc.h> 39 #include <sys/rman.h> 40 #include <sys/serialize.h> 41 #include <sys/socket.h> 42 #include <sys/sockio.h> 43 #include <sys/sysctl.h> 44 45 #include <net/ethernet.h> 46 #include <net/if.h> 47 #include <net/bpf.h> 48 #include <net/if_arp.h> 49 #include <net/if_dl.h> 50 #include <net/if_llc.h> 51 #include <net/if_media.h> 52 #include <net/ifq_var.h> 53 #include <net/vlan/if_vlan_var.h> 54 #include <net/vlan/if_vlan_ether.h> 55 56 #include <netinet/ip.h> 57 58 #include <dev/netif/mii_layer/mii.h> 59 #include <dev/netif/mii_layer/miivar.h> 60 61 #include <bus/pci/pcireg.h> 62 #include <bus/pci/pcivar.h> 63 #include <bus/pci/pcidevs.h> 64 65 #include <dev/netif/ale/if_alereg.h> 66 #include <dev/netif/ale/if_alevar.h> 67 68 /* "device miibus" required. See GENERIC if you get errors here. */ 69 #include "miibus_if.h" 70 71 /* For more information about Tx checksum offload issues see ale_encap(). */ 72 #define ALE_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 73 74 struct ale_dmamap_ctx { 75 int nsegs; 76 bus_dma_segment_t *segs; 77 }; 78 79 static int ale_probe(device_t); 80 static int ale_attach(device_t); 81 static int ale_detach(device_t); 82 static int ale_shutdown(device_t); 83 static int ale_suspend(device_t); 84 static int ale_resume(device_t); 85 86 static int ale_miibus_readreg(device_t, int, int); 87 static int ale_miibus_writereg(device_t, int, int, int); 88 static void ale_miibus_statchg(device_t); 89 90 static void ale_init(void *); 91 static void ale_start(struct ifnet *, struct ifaltq_subque *); 92 static int ale_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 93 static void ale_watchdog(struct ifnet *); 94 static int ale_mediachange(struct ifnet *); 95 static void ale_mediastatus(struct ifnet *, struct ifmediareq *); 96 97 static void ale_intr(void *); 98 static int ale_rxeof(struct ale_softc *sc); 99 static void ale_rx_update_page(struct ale_softc *, struct ale_rx_page **, 100 uint32_t, uint32_t *); 101 static void ale_rxcsum(struct ale_softc *, struct mbuf *, uint32_t); 102 static void ale_txeof(struct ale_softc *); 103 104 static int ale_dma_alloc(struct ale_softc *); 105 static void ale_dma_free(struct ale_softc *); 106 static int ale_check_boundary(struct ale_softc *); 107 static void ale_dmamap_cb(void *, bus_dma_segment_t *, int, int); 108 static void ale_dmamap_buf_cb(void *, bus_dma_segment_t *, int, 109 bus_size_t, int); 110 static int ale_encap(struct ale_softc *, struct mbuf **); 111 static void ale_init_rx_pages(struct ale_softc *); 112 static void ale_init_tx_ring(struct ale_softc *); 113 114 static void ale_stop(struct ale_softc *); 115 static void ale_tick(void *); 116 static void ale_get_macaddr(struct ale_softc *); 117 static void ale_mac_config(struct ale_softc *); 118 static void ale_phy_reset(struct ale_softc *); 119 static void ale_reset(struct ale_softc *); 120 static void ale_rxfilter(struct ale_softc *); 121 static void ale_rxvlan(struct ale_softc *); 122 static void ale_stats_clear(struct ale_softc *); 123 static void ale_stats_update(struct ale_softc *); 124 static void ale_stop_mac(struct ale_softc *); 125 #ifdef notyet 126 static void ale_setlinkspeed(struct ale_softc *); 127 static void ale_setwol(struct ale_softc *); 128 #endif 129 130 static void ale_sysctl_node(struct ale_softc *); 131 static int sysctl_hw_ale_int_mod(SYSCTL_HANDLER_ARGS); 132 133 /* 134 * Devices supported by this driver. 135 */ 136 static struct ale_dev { 137 uint16_t ale_vendorid; 138 uint16_t ale_deviceid; 139 const char *ale_name; 140 } ale_devs[] = { 141 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR81XX, 142 "Atheros AR8121/AR8113/AR8114 PCIe Ethernet" }, 143 }; 144 145 static device_method_t ale_methods[] = { 146 /* Device interface. */ 147 DEVMETHOD(device_probe, ale_probe), 148 DEVMETHOD(device_attach, ale_attach), 149 DEVMETHOD(device_detach, ale_detach), 150 DEVMETHOD(device_shutdown, ale_shutdown), 151 DEVMETHOD(device_suspend, ale_suspend), 152 DEVMETHOD(device_resume, ale_resume), 153 154 /* Bus interface. */ 155 DEVMETHOD(bus_print_child, bus_generic_print_child), 156 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 157 158 /* MII interface. */ 159 DEVMETHOD(miibus_readreg, ale_miibus_readreg), 160 DEVMETHOD(miibus_writereg, ale_miibus_writereg), 161 DEVMETHOD(miibus_statchg, ale_miibus_statchg), 162 163 { NULL, NULL } 164 }; 165 166 static driver_t ale_driver = { 167 "ale", 168 ale_methods, 169 sizeof(struct ale_softc) 170 }; 171 172 static devclass_t ale_devclass; 173 174 DECLARE_DUMMY_MODULE(if_ale); 175 MODULE_VERSION(if_ale, 1); 176 MODULE_DEPEND(if_ale, miibus, 1, 1, 1); 177 DRIVER_MODULE(if_ale, pci, ale_driver, ale_devclass, NULL, NULL); 178 DRIVER_MODULE(miibus, ale, miibus_driver, miibus_devclass, NULL, NULL); 179 180 static int 181 ale_miibus_readreg(device_t dev, int phy, int reg) 182 { 183 struct ale_softc *sc; 184 uint32_t v; 185 int i; 186 187 sc = device_get_softc(dev); 188 189 if (phy != sc->ale_phyaddr) 190 return (0); 191 192 if (sc->ale_flags & ALE_FLAG_FASTETHER) { 193 if (reg == MII_100T2CR || reg == MII_100T2SR || 194 reg == MII_EXTSR) 195 return (0); 196 } 197 198 CSR_WRITE_4(sc, ALE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 199 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 200 for (i = ALE_PHY_TIMEOUT; i > 0; i--) { 201 DELAY(5); 202 v = CSR_READ_4(sc, ALE_MDIO); 203 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 204 break; 205 } 206 207 if (i == 0) { 208 device_printf(sc->ale_dev, "phy read timeout : %d\n", reg); 209 return (0); 210 } 211 212 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 213 } 214 215 static int 216 ale_miibus_writereg(device_t dev, int phy, int reg, int val) 217 { 218 struct ale_softc *sc; 219 uint32_t v; 220 int i; 221 222 sc = device_get_softc(dev); 223 224 if (phy != sc->ale_phyaddr) 225 return (0); 226 227 if (sc->ale_flags & ALE_FLAG_FASTETHER) { 228 if (reg == MII_100T2CR || reg == MII_100T2SR || 229 reg == MII_EXTSR) 230 return (0); 231 } 232 233 CSR_WRITE_4(sc, ALE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 234 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 235 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 236 for (i = ALE_PHY_TIMEOUT; i > 0; i--) { 237 DELAY(5); 238 v = CSR_READ_4(sc, ALE_MDIO); 239 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 240 break; 241 } 242 243 if (i == 0) 244 device_printf(sc->ale_dev, "phy write timeout : %d\n", reg); 245 246 return (0); 247 } 248 249 static void 250 ale_miibus_statchg(device_t dev) 251 { 252 struct ale_softc *sc = device_get_softc(dev); 253 struct ifnet *ifp = &sc->arpcom.ac_if; 254 struct mii_data *mii; 255 uint32_t reg; 256 257 ASSERT_SERIALIZED(ifp->if_serializer); 258 259 if ((ifp->if_flags & IFF_RUNNING) == 0) 260 return; 261 262 mii = device_get_softc(sc->ale_miibus); 263 264 sc->ale_flags &= ~ALE_FLAG_LINK; 265 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 266 (IFM_ACTIVE | IFM_AVALID)) { 267 switch (IFM_SUBTYPE(mii->mii_media_active)) { 268 case IFM_10_T: 269 case IFM_100_TX: 270 sc->ale_flags |= ALE_FLAG_LINK; 271 break; 272 273 case IFM_1000_T: 274 if ((sc->ale_flags & ALE_FLAG_FASTETHER) == 0) 275 sc->ale_flags |= ALE_FLAG_LINK; 276 break; 277 278 default: 279 break; 280 } 281 } 282 283 /* Stop Rx/Tx MACs. */ 284 ale_stop_mac(sc); 285 286 /* Program MACs with resolved speed/duplex/flow-control. */ 287 if ((sc->ale_flags & ALE_FLAG_LINK) != 0) { 288 ale_mac_config(sc); 289 /* Reenable Tx/Rx MACs. */ 290 reg = CSR_READ_4(sc, ALE_MAC_CFG); 291 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 292 CSR_WRITE_4(sc, ALE_MAC_CFG, reg); 293 } 294 } 295 296 static void 297 ale_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 298 { 299 struct ale_softc *sc = ifp->if_softc; 300 struct mii_data *mii = device_get_softc(sc->ale_miibus); 301 302 ASSERT_SERIALIZED(ifp->if_serializer); 303 304 mii_pollstat(mii); 305 ifmr->ifm_status = mii->mii_media_status; 306 ifmr->ifm_active = mii->mii_media_active; 307 } 308 309 static int 310 ale_mediachange(struct ifnet *ifp) 311 { 312 struct ale_softc *sc = ifp->if_softc; 313 struct mii_data *mii = device_get_softc(sc->ale_miibus); 314 int error; 315 316 ASSERT_SERIALIZED(ifp->if_serializer); 317 318 if (mii->mii_instance != 0) { 319 struct mii_softc *miisc; 320 321 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 322 mii_phy_reset(miisc); 323 } 324 error = mii_mediachg(mii); 325 326 return (error); 327 } 328 329 static int 330 ale_probe(device_t dev) 331 { 332 struct ale_dev *sp; 333 int i; 334 uint16_t vendor, devid; 335 336 vendor = pci_get_vendor(dev); 337 devid = pci_get_device(dev); 338 sp = ale_devs; 339 for (i = 0; i < NELEM(ale_devs); i++) { 340 if (vendor == sp->ale_vendorid && 341 devid == sp->ale_deviceid) { 342 device_set_desc(dev, sp->ale_name); 343 return (0); 344 } 345 sp++; 346 } 347 348 return (ENXIO); 349 } 350 351 static void 352 ale_get_macaddr(struct ale_softc *sc) 353 { 354 uint32_t ea[2], reg; 355 int i, vpdc; 356 357 reg = CSR_READ_4(sc, ALE_SPI_CTRL); 358 if ((reg & SPI_VPD_ENB) != 0) { 359 reg &= ~SPI_VPD_ENB; 360 CSR_WRITE_4(sc, ALE_SPI_CTRL, reg); 361 } 362 363 vpdc = pci_get_vpdcap_ptr(sc->ale_dev); 364 if (vpdc) { 365 /* 366 * PCI VPD capability found, let TWSI reload EEPROM. 367 * This will set ethernet address of controller. 368 */ 369 CSR_WRITE_4(sc, ALE_TWSI_CTRL, CSR_READ_4(sc, ALE_TWSI_CTRL) | 370 TWSI_CTRL_SW_LD_START); 371 for (i = 100; i > 0; i--) { 372 DELAY(1000); 373 reg = CSR_READ_4(sc, ALE_TWSI_CTRL); 374 if ((reg & TWSI_CTRL_SW_LD_START) == 0) 375 break; 376 } 377 if (i == 0) 378 device_printf(sc->ale_dev, 379 "reloading EEPROM timeout!\n"); 380 } else { 381 if (bootverbose) 382 device_printf(sc->ale_dev, 383 "PCI VPD capability not found!\n"); 384 } 385 386 ea[0] = CSR_READ_4(sc, ALE_PAR0); 387 ea[1] = CSR_READ_4(sc, ALE_PAR1); 388 sc->ale_eaddr[0] = (ea[1] >> 8) & 0xFF; 389 sc->ale_eaddr[1] = (ea[1] >> 0) & 0xFF; 390 sc->ale_eaddr[2] = (ea[0] >> 24) & 0xFF; 391 sc->ale_eaddr[3] = (ea[0] >> 16) & 0xFF; 392 sc->ale_eaddr[4] = (ea[0] >> 8) & 0xFF; 393 sc->ale_eaddr[5] = (ea[0] >> 0) & 0xFF; 394 } 395 396 static void 397 ale_phy_reset(struct ale_softc *sc) 398 { 399 /* Reset magic from Linux. */ 400 CSR_WRITE_2(sc, ALE_GPHY_CTRL, 401 GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE | GPHY_CTRL_SEL_ANA_RESET | 402 GPHY_CTRL_PHY_PLL_ON); 403 DELAY(1000); 404 CSR_WRITE_2(sc, ALE_GPHY_CTRL, 405 GPHY_CTRL_EXT_RESET | GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE | 406 GPHY_CTRL_SEL_ANA_RESET | GPHY_CTRL_PHY_PLL_ON); 407 DELAY(1000); 408 409 #define ATPHY_DBG_ADDR 0x1D 410 #define ATPHY_DBG_DATA 0x1E 411 412 /* Enable hibernation mode. */ 413 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, 414 ATPHY_DBG_ADDR, 0x0B); 415 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, 416 ATPHY_DBG_DATA, 0xBC00); 417 /* Set Class A/B for all modes. */ 418 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, 419 ATPHY_DBG_ADDR, 0x00); 420 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, 421 ATPHY_DBG_DATA, 0x02EF); 422 /* Enable 10BT power saving. */ 423 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, 424 ATPHY_DBG_ADDR, 0x12); 425 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, 426 ATPHY_DBG_DATA, 0x4C04); 427 /* Adjust 1000T power. */ 428 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, 429 ATPHY_DBG_ADDR, 0x04); 430 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, 431 ATPHY_DBG_ADDR, 0x8BBB); 432 /* 10BT center tap voltage. */ 433 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, 434 ATPHY_DBG_ADDR, 0x05); 435 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, 436 ATPHY_DBG_ADDR, 0x2C46); 437 438 #undef ATPHY_DBG_ADDR 439 #undef ATPHY_DBG_DATA 440 DELAY(1000); 441 } 442 443 static int 444 ale_attach(device_t dev) 445 { 446 struct ale_softc *sc = device_get_softc(dev); 447 struct ifnet *ifp = &sc->arpcom.ac_if; 448 int error = 0; 449 uint32_t rxf_len, txf_len; 450 uint8_t pcie_ptr; 451 452 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 453 sc->ale_dev = dev; 454 455 callout_init(&sc->ale_tick_ch); 456 457 #ifndef BURN_BRIDGES 458 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 459 uint32_t irq, mem; 460 461 irq = pci_read_config(dev, PCIR_INTLINE, 4); 462 mem = pci_read_config(dev, ALE_PCIR_BAR, 4); 463 464 device_printf(dev, "chip is in D%d power mode " 465 "-- setting to D0\n", pci_get_powerstate(dev)); 466 467 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 468 469 pci_write_config(dev, PCIR_INTLINE, irq, 4); 470 pci_write_config(dev, ALE_PCIR_BAR, mem, 4); 471 } 472 #endif /* !BURN_BRIDGE */ 473 474 /* Enable bus mastering */ 475 pci_enable_busmaster(dev); 476 477 /* 478 * Allocate memory mapped IO 479 */ 480 sc->ale_mem_rid = ALE_PCIR_BAR; 481 sc->ale_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 482 &sc->ale_mem_rid, RF_ACTIVE); 483 if (sc->ale_mem_res == NULL) { 484 device_printf(dev, "can't allocate IO memory\n"); 485 return ENXIO; 486 } 487 sc->ale_mem_bt = rman_get_bustag(sc->ale_mem_res); 488 sc->ale_mem_bh = rman_get_bushandle(sc->ale_mem_res); 489 490 /* 491 * Allocate IRQ 492 */ 493 sc->ale_irq_rid = 0; 494 sc->ale_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 495 &sc->ale_irq_rid, 496 RF_SHAREABLE | RF_ACTIVE); 497 if (sc->ale_irq_res == NULL) { 498 device_printf(dev, "can't allocate irq\n"); 499 error = ENXIO; 500 goto fail; 501 } 502 503 /* Set PHY address. */ 504 sc->ale_phyaddr = ALE_PHY_ADDR; 505 506 /* Reset PHY. */ 507 ale_phy_reset(sc); 508 509 /* Reset the ethernet controller. */ 510 ale_reset(sc); 511 512 /* Get PCI and chip id/revision. */ 513 sc->ale_rev = pci_get_revid(dev); 514 if (sc->ale_rev >= 0xF0) { 515 /* L2E Rev. B. AR8114 */ 516 sc->ale_flags |= ALE_FLAG_FASTETHER; 517 } else { 518 if ((CSR_READ_4(sc, ALE_PHY_STATUS) & PHY_STATUS_100M) != 0) { 519 /* L1E AR8121 */ 520 sc->ale_flags |= ALE_FLAG_JUMBO; 521 } else { 522 /* L2E Rev. A. AR8113 */ 523 sc->ale_flags |= ALE_FLAG_FASTETHER; 524 } 525 } 526 527 /* 528 * All known controllers seems to require 4 bytes alignment 529 * of Tx buffers to make Tx checksum offload with custom 530 * checksum generation method work. 531 */ 532 sc->ale_flags |= ALE_FLAG_TXCSUM_BUG; 533 534 /* 535 * All known controllers seems to have issues on Rx checksum 536 * offload for fragmented IP datagrams. 537 */ 538 sc->ale_flags |= ALE_FLAG_RXCSUM_BUG; 539 540 /* 541 * Don't use Tx CMB. It is known to cause RRS update failure 542 * under certain circumstances. Typical phenomenon of the 543 * issue would be unexpected sequence number encountered in 544 * Rx handler. 545 */ 546 sc->ale_flags |= ALE_FLAG_TXCMB_BUG; 547 sc->ale_chip_rev = CSR_READ_4(sc, ALE_MASTER_CFG) >> 548 MASTER_CHIP_REV_SHIFT; 549 if (bootverbose) { 550 device_printf(dev, "PCI device revision : 0x%04x\n", 551 sc->ale_rev); 552 device_printf(dev, "Chip id/revision : 0x%04x\n", 553 sc->ale_chip_rev); 554 } 555 556 /* 557 * Uninitialized hardware returns an invalid chip id/revision 558 * as well as 0xFFFFFFFF for Tx/Rx fifo length. 559 */ 560 txf_len = CSR_READ_4(sc, ALE_SRAM_TX_FIFO_LEN); 561 rxf_len = CSR_READ_4(sc, ALE_SRAM_RX_FIFO_LEN); 562 if (sc->ale_chip_rev == 0xFFFF || txf_len == 0xFFFFFFFF || 563 rxf_len == 0xFFFFFFF) { 564 device_printf(dev,"chip revision : 0x%04x, %u Tx FIFO " 565 "%u Rx FIFO -- not initialized?\n", sc->ale_chip_rev, 566 txf_len, rxf_len); 567 error = ENXIO; 568 goto fail; 569 } 570 device_printf(dev, "%u Tx FIFO, %u Rx FIFO\n", txf_len, rxf_len); 571 572 /* Get DMA parameters from PCIe device control register. */ 573 pcie_ptr = pci_get_pciecap_ptr(dev); 574 if (pcie_ptr) { 575 uint16_t devctl; 576 577 sc->ale_flags |= ALE_FLAG_PCIE; 578 devctl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2); 579 /* Max read request size. */ 580 sc->ale_dma_rd_burst = ((devctl >> 12) & 0x07) << 581 DMA_CFG_RD_BURST_SHIFT; 582 /* Max payload size. */ 583 sc->ale_dma_wr_burst = ((devctl >> 5) & 0x07) << 584 DMA_CFG_WR_BURST_SHIFT; 585 if (bootverbose) { 586 device_printf(dev, "Read request size : %d bytes.\n", 587 128 << ((devctl >> 12) & 0x07)); 588 device_printf(dev, "TLP payload size : %d bytes.\n", 589 128 << ((devctl >> 5) & 0x07)); 590 } 591 } else { 592 sc->ale_dma_rd_burst = DMA_CFG_RD_BURST_128; 593 sc->ale_dma_wr_burst = DMA_CFG_WR_BURST_128; 594 } 595 596 /* Create device sysctl node. */ 597 ale_sysctl_node(sc); 598 599 if ((error = ale_dma_alloc(sc) != 0)) 600 goto fail; 601 602 /* Load station address. */ 603 ale_get_macaddr(sc); 604 605 ifp->if_softc = sc; 606 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 607 ifp->if_ioctl = ale_ioctl; 608 ifp->if_start = ale_start; 609 ifp->if_init = ale_init; 610 ifp->if_watchdog = ale_watchdog; 611 ifq_set_maxlen(&ifp->if_snd, ALE_TX_RING_CNT - 1); 612 ifq_set_ready(&ifp->if_snd); 613 614 ifp->if_capabilities = IFCAP_RXCSUM | 615 IFCAP_VLAN_MTU | 616 IFCAP_VLAN_HWTAGGING; 617 #ifdef notyet 618 ifp->if_capabilities |= IFCAP_TXCSUM; 619 ifp->if_hwassist = ALE_CSUM_FEATURES; 620 #endif 621 ifp->if_capenable = ifp->if_capabilities; 622 623 /* Set up MII bus. */ 624 if ((error = mii_phy_probe(dev, &sc->ale_miibus, ale_mediachange, 625 ale_mediastatus)) != 0) { 626 device_printf(dev, "no PHY found!\n"); 627 goto fail; 628 } 629 630 ether_ifattach(ifp, sc->ale_eaddr, NULL); 631 632 /* Tell the upper layer(s) we support long frames. */ 633 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 634 635 error = bus_setup_intr(dev, sc->ale_irq_res, INTR_MPSAFE, ale_intr, sc, 636 &sc->ale_irq_handle, ifp->if_serializer); 637 if (error) { 638 device_printf(dev, "could not set up interrupt handler.\n"); 639 ether_ifdetach(ifp); 640 goto fail; 641 } 642 643 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->ale_irq_res)); 644 return 0; 645 fail: 646 ale_detach(dev); 647 return (error); 648 } 649 650 static int 651 ale_detach(device_t dev) 652 { 653 struct ale_softc *sc = device_get_softc(dev); 654 655 if (device_is_attached(dev)) { 656 struct ifnet *ifp = &sc->arpcom.ac_if; 657 658 lwkt_serialize_enter(ifp->if_serializer); 659 sc->ale_flags |= ALE_FLAG_DETACH; 660 ale_stop(sc); 661 bus_teardown_intr(dev, sc->ale_irq_res, sc->ale_irq_handle); 662 lwkt_serialize_exit(ifp->if_serializer); 663 664 ether_ifdetach(ifp); 665 } 666 667 if (sc->ale_sysctl_tree != NULL) 668 sysctl_ctx_free(&sc->ale_sysctl_ctx); 669 670 if (sc->ale_miibus != NULL) 671 device_delete_child(dev, sc->ale_miibus); 672 bus_generic_detach(dev); 673 674 if (sc->ale_irq_res != NULL) { 675 bus_release_resource(dev, SYS_RES_IRQ, sc->ale_irq_rid, 676 sc->ale_irq_res); 677 } 678 if (sc->ale_mem_res != NULL) { 679 bus_release_resource(dev, SYS_RES_MEMORY, sc->ale_mem_rid, 680 sc->ale_mem_res); 681 } 682 683 ale_dma_free(sc); 684 685 return (0); 686 } 687 688 #define ALE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 689 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 690 #define ALE_SYSCTL_STAT_ADD64(c, h, n, p, d) \ 691 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 692 693 static void 694 ale_sysctl_node(struct ale_softc *sc) 695 { 696 struct sysctl_ctx_list *ctx; 697 struct sysctl_oid_list *child, *parent; 698 struct sysctl_oid *tree; 699 struct ale_hw_stats *stats; 700 int error; 701 702 sysctl_ctx_init(&sc->ale_sysctl_ctx); 703 sc->ale_sysctl_tree = SYSCTL_ADD_NODE(&sc->ale_sysctl_ctx, 704 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 705 device_get_nameunit(sc->ale_dev), 706 CTLFLAG_RD, 0, ""); 707 if (sc->ale_sysctl_tree == NULL) { 708 device_printf(sc->ale_dev, "can't add sysctl node\n"); 709 return; 710 } 711 712 stats = &sc->ale_stats; 713 ctx = &sc->ale_sysctl_ctx; 714 child = SYSCTL_CHILDREN(sc->ale_sysctl_tree); 715 716 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod", 717 CTLTYPE_INT | CTLFLAG_RW, &sc->ale_int_rx_mod, 0, 718 sysctl_hw_ale_int_mod, "I", "ale Rx interrupt moderation"); 719 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod", 720 CTLTYPE_INT | CTLFLAG_RW, &sc->ale_int_tx_mod, 0, 721 sysctl_hw_ale_int_mod, "I", "ale Tx interrupt moderation"); 722 723 /* 724 * Pull in device tunables. 725 */ 726 sc->ale_int_rx_mod = ALE_IM_RX_TIMER_DEFAULT; 727 error = resource_int_value(device_get_name(sc->ale_dev), 728 device_get_unit(sc->ale_dev), "int_rx_mod", &sc->ale_int_rx_mod); 729 if (error == 0) { 730 if (sc->ale_int_rx_mod < ALE_IM_TIMER_MIN || 731 sc->ale_int_rx_mod > ALE_IM_TIMER_MAX) { 732 device_printf(sc->ale_dev, "int_rx_mod value out of " 733 "range; using default: %d\n", 734 ALE_IM_RX_TIMER_DEFAULT); 735 sc->ale_int_rx_mod = ALE_IM_RX_TIMER_DEFAULT; 736 } 737 } 738 739 sc->ale_int_tx_mod = ALE_IM_TX_TIMER_DEFAULT; 740 error = resource_int_value(device_get_name(sc->ale_dev), 741 device_get_unit(sc->ale_dev), "int_tx_mod", &sc->ale_int_tx_mod); 742 if (error == 0) { 743 if (sc->ale_int_tx_mod < ALE_IM_TIMER_MIN || 744 sc->ale_int_tx_mod > ALE_IM_TIMER_MAX) { 745 device_printf(sc->ale_dev, "int_tx_mod value out of " 746 "range; using default: %d\n", 747 ALE_IM_TX_TIMER_DEFAULT); 748 sc->ale_int_tx_mod = ALE_IM_TX_TIMER_DEFAULT; 749 } 750 } 751 752 /* Misc statistics. */ 753 ALE_SYSCTL_STAT_ADD32(ctx, child, "reset_brk_seq", 754 &stats->reset_brk_seq, 755 "Controller resets due to broken Rx sequnce number"); 756 757 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 758 NULL, "ATE statistics"); 759 parent = SYSCTL_CHILDREN(tree); 760 761 /* Rx statistics. */ 762 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 763 NULL, "Rx MAC statistics"); 764 child = SYSCTL_CHILDREN(tree); 765 ALE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 766 &stats->rx_frames, "Good frames"); 767 ALE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 768 &stats->rx_bcast_frames, "Good broadcast frames"); 769 ALE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 770 &stats->rx_mcast_frames, "Good multicast frames"); 771 ALE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 772 &stats->rx_pause_frames, "Pause control frames"); 773 ALE_SYSCTL_STAT_ADD32(ctx, child, "control_frames", 774 &stats->rx_control_frames, "Control frames"); 775 ALE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", 776 &stats->rx_crcerrs, "CRC errors"); 777 ALE_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 778 &stats->rx_lenerrs, "Frames with length mismatched"); 779 ALE_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 780 &stats->rx_bytes, "Good octets"); 781 ALE_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets", 782 &stats->rx_bcast_bytes, "Good broadcast octets"); 783 ALE_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets", 784 &stats->rx_mcast_bytes, "Good multicast octets"); 785 ALE_SYSCTL_STAT_ADD32(ctx, child, "runts", 786 &stats->rx_runts, "Too short frames"); 787 ALE_SYSCTL_STAT_ADD32(ctx, child, "fragments", 788 &stats->rx_fragments, "Fragmented frames"); 789 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 790 &stats->rx_pkts_64, "64 bytes frames"); 791 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 792 &stats->rx_pkts_65_127, "65 to 127 bytes frames"); 793 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 794 &stats->rx_pkts_128_255, "128 to 255 bytes frames"); 795 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 796 &stats->rx_pkts_256_511, "256 to 511 bytes frames"); 797 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 798 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames"); 799 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 800 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames"); 801 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 802 &stats->rx_pkts_1519_max, "1519 to max frames"); 803 ALE_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs", 804 &stats->rx_pkts_truncated, "Truncated frames due to MTU size"); 805 ALE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 806 &stats->rx_fifo_oflows, "FIFO overflows"); 807 ALE_SYSCTL_STAT_ADD32(ctx, child, "rrs_errs", 808 &stats->rx_rrs_errs, "Return status write-back errors"); 809 ALE_SYSCTL_STAT_ADD32(ctx, child, "align_errs", 810 &stats->rx_alignerrs, "Alignment errors"); 811 ALE_SYSCTL_STAT_ADD32(ctx, child, "filtered", 812 &stats->rx_pkts_filtered, 813 "Frames dropped due to address filtering"); 814 815 /* Tx statistics. */ 816 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 817 NULL, "Tx MAC statistics"); 818 child = SYSCTL_CHILDREN(tree); 819 ALE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 820 &stats->tx_frames, "Good frames"); 821 ALE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 822 &stats->tx_bcast_frames, "Good broadcast frames"); 823 ALE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 824 &stats->tx_mcast_frames, "Good multicast frames"); 825 ALE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 826 &stats->tx_pause_frames, "Pause control frames"); 827 ALE_SYSCTL_STAT_ADD32(ctx, child, "control_frames", 828 &stats->tx_control_frames, "Control frames"); 829 ALE_SYSCTL_STAT_ADD32(ctx, child, "excess_defers", 830 &stats->tx_excess_defer, "Frames with excessive derferrals"); 831 ALE_SYSCTL_STAT_ADD32(ctx, child, "defers", 832 &stats->tx_excess_defer, "Frames with derferrals"); 833 ALE_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 834 &stats->tx_bytes, "Good octets"); 835 ALE_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets", 836 &stats->tx_bcast_bytes, "Good broadcast octets"); 837 ALE_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets", 838 &stats->tx_mcast_bytes, "Good multicast octets"); 839 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 840 &stats->tx_pkts_64, "64 bytes frames"); 841 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 842 &stats->tx_pkts_65_127, "65 to 127 bytes frames"); 843 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 844 &stats->tx_pkts_128_255, "128 to 255 bytes frames"); 845 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 846 &stats->tx_pkts_256_511, "256 to 511 bytes frames"); 847 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 848 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames"); 849 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 850 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames"); 851 ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 852 &stats->tx_pkts_1519_max, "1519 to max frames"); 853 ALE_SYSCTL_STAT_ADD32(ctx, child, "single_colls", 854 &stats->tx_single_colls, "Single collisions"); 855 ALE_SYSCTL_STAT_ADD32(ctx, child, "multi_colls", 856 &stats->tx_multi_colls, "Multiple collisions"); 857 ALE_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 858 &stats->tx_late_colls, "Late collisions"); 859 ALE_SYSCTL_STAT_ADD32(ctx, child, "excess_colls", 860 &stats->tx_excess_colls, "Excessive collisions"); 861 ALE_SYSCTL_STAT_ADD32(ctx, child, "abort", 862 &stats->tx_abort, "Aborted frames due to Excessive collisions"); 863 ALE_SYSCTL_STAT_ADD32(ctx, child, "underruns", 864 &stats->tx_underrun, "FIFO underruns"); 865 ALE_SYSCTL_STAT_ADD32(ctx, child, "desc_underruns", 866 &stats->tx_desc_underrun, "Descriptor write-back errors"); 867 ALE_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 868 &stats->tx_lenerrs, "Frames with length mismatched"); 869 ALE_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs", 870 &stats->tx_pkts_truncated, "Truncated frames due to MTU size"); 871 } 872 873 #undef ALE_SYSCTL_STAT_ADD32 874 #undef ALE_SYSCTL_STAT_ADD64 875 876 struct ale_dmamap_arg { 877 bus_addr_t ale_busaddr; 878 }; 879 880 static void 881 ale_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 882 { 883 struct ale_dmamap_arg *ctx; 884 885 if (error != 0) 886 return; 887 888 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 889 890 ctx = (struct ale_dmamap_arg *)arg; 891 ctx->ale_busaddr = segs[0].ds_addr; 892 } 893 894 /* 895 * Tx descriptors/RXF0/CMB DMA blocks share ALE_DESC_ADDR_HI register 896 * which specifies high address region of DMA blocks. Therefore these 897 * blocks should have the same high address of given 4GB address 898 * space(i.e. crossing 4GB boundary is not allowed). 899 */ 900 static int 901 ale_check_boundary(struct ale_softc *sc) 902 { 903 bus_addr_t rx_cmb_end[ALE_RX_PAGES], tx_cmb_end; 904 bus_addr_t rx_page_end[ALE_RX_PAGES], tx_ring_end; 905 906 rx_page_end[0] = sc->ale_cdata.ale_rx_page[0].page_paddr + 907 sc->ale_pagesize; 908 rx_page_end[1] = sc->ale_cdata.ale_rx_page[1].page_paddr + 909 sc->ale_pagesize; 910 tx_ring_end = sc->ale_cdata.ale_tx_ring_paddr + ALE_TX_RING_SZ; 911 tx_cmb_end = sc->ale_cdata.ale_tx_cmb_paddr + ALE_TX_CMB_SZ; 912 rx_cmb_end[0] = sc->ale_cdata.ale_rx_page[0].cmb_paddr + ALE_RX_CMB_SZ; 913 rx_cmb_end[1] = sc->ale_cdata.ale_rx_page[1].cmb_paddr + ALE_RX_CMB_SZ; 914 915 if ((ALE_ADDR_HI(tx_ring_end) != 916 ALE_ADDR_HI(sc->ale_cdata.ale_tx_ring_paddr)) || 917 (ALE_ADDR_HI(rx_page_end[0]) != 918 ALE_ADDR_HI(sc->ale_cdata.ale_rx_page[0].page_paddr)) || 919 (ALE_ADDR_HI(rx_page_end[1]) != 920 ALE_ADDR_HI(sc->ale_cdata.ale_rx_page[1].page_paddr)) || 921 (ALE_ADDR_HI(tx_cmb_end) != 922 ALE_ADDR_HI(sc->ale_cdata.ale_tx_cmb_paddr)) || 923 (ALE_ADDR_HI(rx_cmb_end[0]) != 924 ALE_ADDR_HI(sc->ale_cdata.ale_rx_page[0].cmb_paddr)) || 925 (ALE_ADDR_HI(rx_cmb_end[1]) != 926 ALE_ADDR_HI(sc->ale_cdata.ale_rx_page[1].cmb_paddr))) 927 return (EFBIG); 928 929 if ((ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(rx_page_end[0])) || 930 (ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(rx_page_end[1])) || 931 (ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(rx_cmb_end[0])) || 932 (ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(rx_cmb_end[1])) || 933 (ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(tx_cmb_end))) 934 return (EFBIG); 935 936 return (0); 937 } 938 939 static int 940 ale_dma_alloc(struct ale_softc *sc) 941 { 942 struct ale_txdesc *txd; 943 bus_addr_t lowaddr; 944 struct ale_dmamap_arg ctx; 945 int error, guard_size, i; 946 947 if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) 948 guard_size = ALE_JUMBO_FRAMELEN; 949 else 950 guard_size = ALE_MAX_FRAMELEN; 951 sc->ale_pagesize = roundup(guard_size + ALE_RX_PAGE_SZ, 952 ALE_RX_PAGE_ALIGN); 953 lowaddr = BUS_SPACE_MAXADDR; 954 again: 955 /* Create parent DMA tag. */ 956 error = bus_dma_tag_create( 957 NULL, /* parent */ 958 1, 0, /* alignment, boundary */ 959 lowaddr, /* lowaddr */ 960 BUS_SPACE_MAXADDR, /* highaddr */ 961 NULL, NULL, /* filter, filterarg */ 962 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 963 0, /* nsegments */ 964 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 965 0, /* flags */ 966 &sc->ale_cdata.ale_parent_tag); 967 if (error != 0) { 968 device_printf(sc->ale_dev, 969 "could not create parent DMA tag.\n"); 970 goto fail; 971 } 972 973 /* Create DMA tag for Tx descriptor ring. */ 974 error = bus_dma_tag_create( 975 sc->ale_cdata.ale_parent_tag, /* parent */ 976 ALE_TX_RING_ALIGN, 0, /* alignment, boundary */ 977 BUS_SPACE_MAXADDR, /* lowaddr */ 978 BUS_SPACE_MAXADDR, /* highaddr */ 979 NULL, NULL, /* filter, filterarg */ 980 ALE_TX_RING_SZ, /* maxsize */ 981 1, /* nsegments */ 982 ALE_TX_RING_SZ, /* maxsegsize */ 983 0, /* flags */ 984 &sc->ale_cdata.ale_tx_ring_tag); 985 if (error != 0) { 986 device_printf(sc->ale_dev, 987 "could not create Tx ring DMA tag.\n"); 988 goto fail; 989 } 990 991 /* Create DMA tag for Rx pages. */ 992 for (i = 0; i < ALE_RX_PAGES; i++) { 993 error = bus_dma_tag_create( 994 sc->ale_cdata.ale_parent_tag, /* parent */ 995 ALE_RX_PAGE_ALIGN, 0, /* alignment, boundary */ 996 BUS_SPACE_MAXADDR, /* lowaddr */ 997 BUS_SPACE_MAXADDR, /* highaddr */ 998 NULL, NULL, /* filter, filterarg */ 999 sc->ale_pagesize, /* maxsize */ 1000 1, /* nsegments */ 1001 sc->ale_pagesize, /* maxsegsize */ 1002 0, /* flags */ 1003 &sc->ale_cdata.ale_rx_page[i].page_tag); 1004 if (error != 0) { 1005 device_printf(sc->ale_dev, 1006 "could not create Rx page %d DMA tag.\n", i); 1007 goto fail; 1008 } 1009 } 1010 1011 /* Create DMA tag for Tx coalescing message block. */ 1012 error = bus_dma_tag_create( 1013 sc->ale_cdata.ale_parent_tag, /* parent */ 1014 ALE_CMB_ALIGN, 0, /* alignment, boundary */ 1015 BUS_SPACE_MAXADDR, /* lowaddr */ 1016 BUS_SPACE_MAXADDR, /* highaddr */ 1017 NULL, NULL, /* filter, filterarg */ 1018 ALE_TX_CMB_SZ, /* maxsize */ 1019 1, /* nsegments */ 1020 ALE_TX_CMB_SZ, /* maxsegsize */ 1021 0, /* flags */ 1022 &sc->ale_cdata.ale_tx_cmb_tag); 1023 if (error != 0) { 1024 device_printf(sc->ale_dev, 1025 "could not create Tx CMB DMA tag.\n"); 1026 goto fail; 1027 } 1028 1029 /* Create DMA tag for Rx coalescing message block. */ 1030 for (i = 0; i < ALE_RX_PAGES; i++) { 1031 error = bus_dma_tag_create( 1032 sc->ale_cdata.ale_parent_tag, /* parent */ 1033 ALE_CMB_ALIGN, 0, /* alignment, boundary */ 1034 BUS_SPACE_MAXADDR, /* lowaddr */ 1035 BUS_SPACE_MAXADDR, /* highaddr */ 1036 NULL, NULL, /* filter, filterarg */ 1037 ALE_RX_CMB_SZ, /* maxsize */ 1038 1, /* nsegments */ 1039 ALE_RX_CMB_SZ, /* maxsegsize */ 1040 0, /* flags */ 1041 &sc->ale_cdata.ale_rx_page[i].cmb_tag); 1042 if (error != 0) { 1043 device_printf(sc->ale_dev, 1044 "could not create Rx page %d CMB DMA tag.\n", i); 1045 goto fail; 1046 } 1047 } 1048 1049 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1050 error = bus_dmamem_alloc(sc->ale_cdata.ale_tx_ring_tag, 1051 (void **)&sc->ale_cdata.ale_tx_ring, 1052 BUS_DMA_WAITOK | BUS_DMA_ZERO, 1053 &sc->ale_cdata.ale_tx_ring_map); 1054 if (error != 0) { 1055 device_printf(sc->ale_dev, 1056 "could not allocate DMA'able memory for Tx ring.\n"); 1057 goto fail; 1058 } 1059 ctx.ale_busaddr = 0; 1060 error = bus_dmamap_load(sc->ale_cdata.ale_tx_ring_tag, 1061 sc->ale_cdata.ale_tx_ring_map, sc->ale_cdata.ale_tx_ring, 1062 ALE_TX_RING_SZ, ale_dmamap_cb, &ctx, 0); 1063 if (error != 0 || ctx.ale_busaddr == 0) { 1064 device_printf(sc->ale_dev, 1065 "could not load DMA'able memory for Tx ring.\n"); 1066 goto fail; 1067 } 1068 sc->ale_cdata.ale_tx_ring_paddr = ctx.ale_busaddr; 1069 1070 /* Rx pages. */ 1071 for (i = 0; i < ALE_RX_PAGES; i++) { 1072 error = bus_dmamem_alloc(sc->ale_cdata.ale_rx_page[i].page_tag, 1073 (void **)&sc->ale_cdata.ale_rx_page[i].page_addr, 1074 BUS_DMA_WAITOK | BUS_DMA_ZERO, 1075 &sc->ale_cdata.ale_rx_page[i].page_map); 1076 if (error != 0) { 1077 device_printf(sc->ale_dev, 1078 "could not allocate DMA'able memory for " 1079 "Rx page %d.\n", i); 1080 goto fail; 1081 } 1082 ctx.ale_busaddr = 0; 1083 error = bus_dmamap_load(sc->ale_cdata.ale_rx_page[i].page_tag, 1084 sc->ale_cdata.ale_rx_page[i].page_map, 1085 sc->ale_cdata.ale_rx_page[i].page_addr, 1086 sc->ale_pagesize, ale_dmamap_cb, &ctx, 0); 1087 if (error != 0 || ctx.ale_busaddr == 0) { 1088 device_printf(sc->ale_dev, 1089 "could not load DMA'able memory for " 1090 "Rx page %d.\n", i); 1091 goto fail; 1092 } 1093 sc->ale_cdata.ale_rx_page[i].page_paddr = ctx.ale_busaddr; 1094 } 1095 1096 /* Tx CMB. */ 1097 error = bus_dmamem_alloc(sc->ale_cdata.ale_tx_cmb_tag, 1098 (void **)&sc->ale_cdata.ale_tx_cmb, 1099 BUS_DMA_WAITOK | BUS_DMA_ZERO, 1100 &sc->ale_cdata.ale_tx_cmb_map); 1101 if (error != 0) { 1102 device_printf(sc->ale_dev, 1103 "could not allocate DMA'able memory for Tx CMB.\n"); 1104 goto fail; 1105 } 1106 ctx.ale_busaddr = 0; 1107 error = bus_dmamap_load(sc->ale_cdata.ale_tx_cmb_tag, 1108 sc->ale_cdata.ale_tx_cmb_map, sc->ale_cdata.ale_tx_cmb, 1109 ALE_TX_CMB_SZ, ale_dmamap_cb, &ctx, 0); 1110 if (error != 0 || ctx.ale_busaddr == 0) { 1111 device_printf(sc->ale_dev, 1112 "could not load DMA'able memory for Tx CMB.\n"); 1113 goto fail; 1114 } 1115 sc->ale_cdata.ale_tx_cmb_paddr = ctx.ale_busaddr; 1116 1117 /* Rx CMB. */ 1118 for (i = 0; i < ALE_RX_PAGES; i++) { 1119 error = bus_dmamem_alloc(sc->ale_cdata.ale_rx_page[i].cmb_tag, 1120 (void **)&sc->ale_cdata.ale_rx_page[i].cmb_addr, 1121 BUS_DMA_WAITOK | BUS_DMA_ZERO, 1122 &sc->ale_cdata.ale_rx_page[i].cmb_map); 1123 if (error != 0) { 1124 device_printf(sc->ale_dev, "could not allocate " 1125 "DMA'able memory for Rx page %d CMB.\n", i); 1126 goto fail; 1127 } 1128 ctx.ale_busaddr = 0; 1129 error = bus_dmamap_load(sc->ale_cdata.ale_rx_page[i].cmb_tag, 1130 sc->ale_cdata.ale_rx_page[i].cmb_map, 1131 sc->ale_cdata.ale_rx_page[i].cmb_addr, 1132 ALE_RX_CMB_SZ, ale_dmamap_cb, &ctx, 0); 1133 if (error != 0 || ctx.ale_busaddr == 0) { 1134 device_printf(sc->ale_dev, "could not load DMA'able " 1135 "memory for Rx page %d CMB.\n", i); 1136 goto fail; 1137 } 1138 sc->ale_cdata.ale_rx_page[i].cmb_paddr = ctx.ale_busaddr; 1139 } 1140 1141 /* 1142 * Tx descriptors/RXF0/CMB DMA blocks share the same 1143 * high address region of 64bit DMA address space. 1144 */ 1145 if (lowaddr != BUS_SPACE_MAXADDR_32BIT && 1146 (error = ale_check_boundary(sc)) != 0) { 1147 device_printf(sc->ale_dev, "4GB boundary crossed, " 1148 "switching to 32bit DMA addressing mode.\n"); 1149 ale_dma_free(sc); 1150 /* 1151 * Limit max allowable DMA address space to 32bit 1152 * and try again. 1153 */ 1154 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1155 goto again; 1156 } 1157 1158 /* 1159 * Create Tx buffer parent tag. 1160 * AR81xx allows 64bit DMA addressing of Tx buffers so it 1161 * needs separate parent DMA tag as parent DMA address space 1162 * could be restricted to be within 32bit address space by 1163 * 4GB boundary crossing. 1164 */ 1165 error = bus_dma_tag_create( 1166 NULL, /* parent */ 1167 1, 0, /* alignment, boundary */ 1168 BUS_SPACE_MAXADDR, /* lowaddr */ 1169 BUS_SPACE_MAXADDR, /* highaddr */ 1170 NULL, NULL, /* filter, filterarg */ 1171 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1172 0, /* nsegments */ 1173 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1174 0, /* flags */ 1175 &sc->ale_cdata.ale_buffer_tag); 1176 if (error != 0) { 1177 device_printf(sc->ale_dev, 1178 "could not create parent buffer DMA tag.\n"); 1179 goto fail; 1180 } 1181 1182 /* Create DMA tag for Tx buffers. */ 1183 error = bus_dma_tag_create( 1184 sc->ale_cdata.ale_buffer_tag, /* parent */ 1185 1, 0, /* alignment, boundary */ 1186 BUS_SPACE_MAXADDR, /* lowaddr */ 1187 BUS_SPACE_MAXADDR, /* highaddr */ 1188 NULL, NULL, /* filter, filterarg */ 1189 ALE_TSO_MAXSIZE, /* maxsize */ 1190 ALE_MAXTXSEGS, /* nsegments */ 1191 ALE_TSO_MAXSEGSIZE, /* maxsegsize */ 1192 0, /* flags */ 1193 &sc->ale_cdata.ale_tx_tag); 1194 if (error != 0) { 1195 device_printf(sc->ale_dev, "could not create Tx DMA tag.\n"); 1196 goto fail; 1197 } 1198 1199 /* Create DMA maps for Tx buffers. */ 1200 for (i = 0; i < ALE_TX_RING_CNT; i++) { 1201 txd = &sc->ale_cdata.ale_txdesc[i]; 1202 txd->tx_m = NULL; 1203 txd->tx_dmamap = NULL; 1204 error = bus_dmamap_create(sc->ale_cdata.ale_tx_tag, 0, 1205 &txd->tx_dmamap); 1206 if (error != 0) { 1207 device_printf(sc->ale_dev, 1208 "could not create Tx dmamap.\n"); 1209 goto fail; 1210 } 1211 } 1212 fail: 1213 return (error); 1214 } 1215 1216 static void 1217 ale_dma_free(struct ale_softc *sc) 1218 { 1219 struct ale_txdesc *txd; 1220 int i; 1221 1222 /* Tx buffers. */ 1223 if (sc->ale_cdata.ale_tx_tag != NULL) { 1224 for (i = 0; i < ALE_TX_RING_CNT; i++) { 1225 txd = &sc->ale_cdata.ale_txdesc[i]; 1226 if (txd->tx_dmamap != NULL) { 1227 bus_dmamap_destroy(sc->ale_cdata.ale_tx_tag, 1228 txd->tx_dmamap); 1229 txd->tx_dmamap = NULL; 1230 } 1231 } 1232 bus_dma_tag_destroy(sc->ale_cdata.ale_tx_tag); 1233 sc->ale_cdata.ale_tx_tag = NULL; 1234 } 1235 /* Tx descriptor ring. */ 1236 if (sc->ale_cdata.ale_tx_ring_tag != NULL) { 1237 if (sc->ale_cdata.ale_tx_ring_map != NULL) 1238 bus_dmamap_unload(sc->ale_cdata.ale_tx_ring_tag, 1239 sc->ale_cdata.ale_tx_ring_map); 1240 if (sc->ale_cdata.ale_tx_ring_map != NULL && 1241 sc->ale_cdata.ale_tx_ring != NULL) 1242 bus_dmamem_free(sc->ale_cdata.ale_tx_ring_tag, 1243 sc->ale_cdata.ale_tx_ring, 1244 sc->ale_cdata.ale_tx_ring_map); 1245 sc->ale_cdata.ale_tx_ring = NULL; 1246 sc->ale_cdata.ale_tx_ring_map = NULL; 1247 bus_dma_tag_destroy(sc->ale_cdata.ale_tx_ring_tag); 1248 sc->ale_cdata.ale_tx_ring_tag = NULL; 1249 } 1250 /* Rx page block. */ 1251 for (i = 0; i < ALE_RX_PAGES; i++) { 1252 if (sc->ale_cdata.ale_rx_page[i].page_tag != NULL) { 1253 if (sc->ale_cdata.ale_rx_page[i].page_map != NULL) 1254 bus_dmamap_unload( 1255 sc->ale_cdata.ale_rx_page[i].page_tag, 1256 sc->ale_cdata.ale_rx_page[i].page_map); 1257 if (sc->ale_cdata.ale_rx_page[i].page_map != NULL && 1258 sc->ale_cdata.ale_rx_page[i].page_addr != NULL) 1259 bus_dmamem_free( 1260 sc->ale_cdata.ale_rx_page[i].page_tag, 1261 sc->ale_cdata.ale_rx_page[i].page_addr, 1262 sc->ale_cdata.ale_rx_page[i].page_map); 1263 sc->ale_cdata.ale_rx_page[i].page_addr = NULL; 1264 sc->ale_cdata.ale_rx_page[i].page_map = NULL; 1265 bus_dma_tag_destroy( 1266 sc->ale_cdata.ale_rx_page[i].page_tag); 1267 sc->ale_cdata.ale_rx_page[i].page_tag = NULL; 1268 } 1269 } 1270 /* Rx CMB. */ 1271 for (i = 0; i < ALE_RX_PAGES; i++) { 1272 if (sc->ale_cdata.ale_rx_page[i].cmb_tag != NULL) { 1273 if (sc->ale_cdata.ale_rx_page[i].cmb_map != NULL) 1274 bus_dmamap_unload( 1275 sc->ale_cdata.ale_rx_page[i].cmb_tag, 1276 sc->ale_cdata.ale_rx_page[i].cmb_map); 1277 if (sc->ale_cdata.ale_rx_page[i].cmb_map != NULL && 1278 sc->ale_cdata.ale_rx_page[i].cmb_addr != NULL) 1279 bus_dmamem_free( 1280 sc->ale_cdata.ale_rx_page[i].cmb_tag, 1281 sc->ale_cdata.ale_rx_page[i].cmb_addr, 1282 sc->ale_cdata.ale_rx_page[i].cmb_map); 1283 sc->ale_cdata.ale_rx_page[i].cmb_addr = NULL; 1284 sc->ale_cdata.ale_rx_page[i].cmb_map = NULL; 1285 bus_dma_tag_destroy( 1286 sc->ale_cdata.ale_rx_page[i].cmb_tag); 1287 sc->ale_cdata.ale_rx_page[i].cmb_tag = NULL; 1288 } 1289 } 1290 /* Tx CMB. */ 1291 if (sc->ale_cdata.ale_tx_cmb_tag != NULL) { 1292 if (sc->ale_cdata.ale_tx_cmb_map != NULL) 1293 bus_dmamap_unload(sc->ale_cdata.ale_tx_cmb_tag, 1294 sc->ale_cdata.ale_tx_cmb_map); 1295 if (sc->ale_cdata.ale_tx_cmb_map != NULL && 1296 sc->ale_cdata.ale_tx_cmb != NULL) 1297 bus_dmamem_free(sc->ale_cdata.ale_tx_cmb_tag, 1298 sc->ale_cdata.ale_tx_cmb, 1299 sc->ale_cdata.ale_tx_cmb_map); 1300 sc->ale_cdata.ale_tx_cmb = NULL; 1301 sc->ale_cdata.ale_tx_cmb_map = NULL; 1302 bus_dma_tag_destroy(sc->ale_cdata.ale_tx_cmb_tag); 1303 sc->ale_cdata.ale_tx_cmb_tag = NULL; 1304 } 1305 if (sc->ale_cdata.ale_buffer_tag != NULL) { 1306 bus_dma_tag_destroy(sc->ale_cdata.ale_buffer_tag); 1307 sc->ale_cdata.ale_buffer_tag = NULL; 1308 } 1309 if (sc->ale_cdata.ale_parent_tag != NULL) { 1310 bus_dma_tag_destroy(sc->ale_cdata.ale_parent_tag); 1311 sc->ale_cdata.ale_parent_tag = NULL; 1312 } 1313 } 1314 1315 static int 1316 ale_shutdown(device_t dev) 1317 { 1318 return (ale_suspend(dev)); 1319 } 1320 1321 #ifdef notyet 1322 1323 /* 1324 * Note, this driver resets the link speed to 10/100Mbps by 1325 * restarting auto-negotiation in suspend/shutdown phase but we 1326 * don't know whether that auto-negotiation would succeed or not 1327 * as driver has no control after powering off/suspend operation. 1328 * If the renegotiation fail WOL may not work. Running at 1Gbps 1329 * will draw more power than 375mA at 3.3V which is specified in 1330 * PCI specification and that would result in complete 1331 * shutdowning power to ethernet controller. 1332 * 1333 * TODO 1334 * Save current negotiated media speed/duplex/flow-control to 1335 * softc and restore the same link again after resuming. PHY 1336 * handling such as power down/resetting to 100Mbps may be better 1337 * handled in suspend method in phy driver. 1338 */ 1339 static void 1340 ale_setlinkspeed(struct ale_softc *sc) 1341 { 1342 struct mii_data *mii; 1343 int aneg, i; 1344 1345 mii = device_get_softc(sc->ale_miibus); 1346 mii_pollstat(mii); 1347 aneg = 0; 1348 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 1349 (IFM_ACTIVE | IFM_AVALID)) { 1350 switch IFM_SUBTYPE(mii->mii_media_active) { 1351 case IFM_10_T: 1352 case IFM_100_TX: 1353 return; 1354 case IFM_1000_T: 1355 aneg++; 1356 break; 1357 default: 1358 break; 1359 } 1360 } 1361 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, MII_100T2CR, 0); 1362 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, 1363 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 1364 ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, 1365 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); 1366 DELAY(1000); 1367 if (aneg != 0) { 1368 /* 1369 * Poll link state until ale(4) get a 10/100Mbps link. 1370 */ 1371 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 1372 mii_pollstat(mii); 1373 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) 1374 == (IFM_ACTIVE | IFM_AVALID)) { 1375 switch (IFM_SUBTYPE( 1376 mii->mii_media_active)) { 1377 case IFM_10_T: 1378 case IFM_100_TX: 1379 ale_mac_config(sc); 1380 return; 1381 default: 1382 break; 1383 } 1384 } 1385 ALE_UNLOCK(sc); 1386 pause("alelnk", hz); 1387 ALE_LOCK(sc); 1388 } 1389 if (i == MII_ANEGTICKS_GIGE) 1390 device_printf(sc->ale_dev, 1391 "establishing a link failed, WOL may not work!"); 1392 } 1393 /* 1394 * No link, force MAC to have 100Mbps, full-duplex link. 1395 * This is the last resort and may/may not work. 1396 */ 1397 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1398 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1399 ale_mac_config(sc); 1400 } 1401 1402 static void 1403 ale_setwol(struct ale_softc *sc) 1404 { 1405 struct ifnet *ifp; 1406 uint32_t reg, pmcs; 1407 uint16_t pmstat; 1408 int pmc; 1409 1410 ALE_LOCK_ASSERT(sc); 1411 1412 if (pci_find_extcap(sc->ale_dev, PCIY_PMG, &pmc) != 0) { 1413 /* Disable WOL. */ 1414 CSR_WRITE_4(sc, ALE_WOL_CFG, 0); 1415 reg = CSR_READ_4(sc, ALE_PCIE_PHYMISC); 1416 reg |= PCIE_PHYMISC_FORCE_RCV_DET; 1417 CSR_WRITE_4(sc, ALE_PCIE_PHYMISC, reg); 1418 /* Force PHY power down. */ 1419 CSR_WRITE_2(sc, ALE_GPHY_CTRL, 1420 GPHY_CTRL_EXT_RESET | GPHY_CTRL_HIB_EN | 1421 GPHY_CTRL_HIB_PULSE | GPHY_CTRL_PHY_PLL_ON | 1422 GPHY_CTRL_SEL_ANA_RESET | GPHY_CTRL_PHY_IDDQ | 1423 GPHY_CTRL_PCLK_SEL_DIS | GPHY_CTRL_PWDOWN_HW); 1424 return; 1425 } 1426 1427 ifp = sc->ale_ifp; 1428 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 1429 if ((sc->ale_flags & ALE_FLAG_FASTETHER) == 0) 1430 ale_setlinkspeed(sc); 1431 } 1432 1433 pmcs = 0; 1434 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 1435 pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB; 1436 CSR_WRITE_4(sc, ALE_WOL_CFG, pmcs); 1437 reg = CSR_READ_4(sc, ALE_MAC_CFG); 1438 reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI | 1439 MAC_CFG_BCAST); 1440 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 1441 reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST; 1442 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1443 reg |= MAC_CFG_RX_ENB; 1444 CSR_WRITE_4(sc, ALE_MAC_CFG, reg); 1445 1446 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 1447 /* WOL disabled, PHY power down. */ 1448 reg = CSR_READ_4(sc, ALE_PCIE_PHYMISC); 1449 reg |= PCIE_PHYMISC_FORCE_RCV_DET; 1450 CSR_WRITE_4(sc, ALE_PCIE_PHYMISC, reg); 1451 CSR_WRITE_2(sc, ALE_GPHY_CTRL, 1452 GPHY_CTRL_EXT_RESET | GPHY_CTRL_HIB_EN | 1453 GPHY_CTRL_HIB_PULSE | GPHY_CTRL_SEL_ANA_RESET | 1454 GPHY_CTRL_PHY_IDDQ | GPHY_CTRL_PCLK_SEL_DIS | 1455 GPHY_CTRL_PWDOWN_HW); 1456 } 1457 /* Request PME. */ 1458 pmstat = pci_read_config(sc->ale_dev, pmc + PCIR_POWER_STATUS, 2); 1459 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1460 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1461 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1462 pci_write_config(sc->ale_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1463 } 1464 1465 #endif /* notyet */ 1466 1467 static int 1468 ale_suspend(device_t dev) 1469 { 1470 struct ale_softc *sc = device_get_softc(dev); 1471 struct ifnet *ifp = &sc->arpcom.ac_if; 1472 1473 lwkt_serialize_enter(ifp->if_serializer); 1474 ale_stop(sc); 1475 #ifdef notyet 1476 ale_setwol(sc); 1477 #endif 1478 lwkt_serialize_exit(ifp->if_serializer); 1479 return (0); 1480 } 1481 1482 static int 1483 ale_resume(device_t dev) 1484 { 1485 struct ale_softc *sc = device_get_softc(dev); 1486 struct ifnet *ifp = &sc->arpcom.ac_if; 1487 uint16_t cmd; 1488 1489 lwkt_serialize_enter(ifp->if_serializer); 1490 1491 /* 1492 * Clear INTx emulation disable for hardwares that 1493 * is set in resume event. From Linux. 1494 */ 1495 cmd = pci_read_config(sc->ale_dev, PCIR_COMMAND, 2); 1496 if ((cmd & 0x0400) != 0) { 1497 cmd &= ~0x0400; 1498 pci_write_config(sc->ale_dev, PCIR_COMMAND, cmd, 2); 1499 } 1500 1501 #ifdef notyet 1502 if (pci_find_extcap(sc->ale_dev, PCIY_PMG, &pmc) == 0) { 1503 uint16_t pmstat; 1504 int pmc; 1505 1506 /* Disable PME and clear PME status. */ 1507 pmstat = pci_read_config(sc->ale_dev, 1508 pmc + PCIR_POWER_STATUS, 2); 1509 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 1510 pmstat &= ~PCIM_PSTAT_PMEENABLE; 1511 pci_write_config(sc->ale_dev, 1512 pmc + PCIR_POWER_STATUS, pmstat, 2); 1513 } 1514 } 1515 #endif 1516 1517 /* Reset PHY. */ 1518 ale_phy_reset(sc); 1519 if ((ifp->if_flags & IFF_UP) != 0) 1520 ale_init(sc); 1521 1522 lwkt_serialize_exit(ifp->if_serializer); 1523 return (0); 1524 } 1525 1526 static int 1527 ale_encap(struct ale_softc *sc, struct mbuf **m_head) 1528 { 1529 struct ale_txdesc *txd, *txd_last; 1530 struct tx_desc *desc; 1531 struct mbuf *m; 1532 bus_dma_segment_t txsegs[ALE_MAXTXSEGS]; 1533 struct ale_dmamap_ctx ctx; 1534 bus_dmamap_t map; 1535 uint32_t cflags, poff, vtag; 1536 int error, i, nsegs, prod; 1537 1538 M_ASSERTPKTHDR((*m_head)); 1539 1540 m = *m_head; 1541 cflags = vtag = 0; 1542 poff = 0; 1543 1544 prod = sc->ale_cdata.ale_tx_prod; 1545 txd = &sc->ale_cdata.ale_txdesc[prod]; 1546 txd_last = txd; 1547 map = txd->tx_dmamap; 1548 1549 ctx.nsegs = ALE_MAXTXSEGS; 1550 ctx.segs = txsegs; 1551 error = bus_dmamap_load_mbuf(sc->ale_cdata.ale_tx_tag, map, 1552 *m_head, ale_dmamap_buf_cb, &ctx, 1553 BUS_DMA_NOWAIT); 1554 if (error == EFBIG) { 1555 m = m_defrag(*m_head, MB_DONTWAIT); 1556 if (m == NULL) { 1557 m_freem(*m_head); 1558 *m_head = NULL; 1559 return (ENOMEM); 1560 } 1561 *m_head = m; 1562 1563 ctx.nsegs = ALE_MAXTXSEGS; 1564 ctx.segs = txsegs; 1565 error = bus_dmamap_load_mbuf(sc->ale_cdata.ale_tx_tag, map, 1566 *m_head, ale_dmamap_buf_cb, &ctx, 1567 BUS_DMA_NOWAIT); 1568 if (error != 0) { 1569 m_freem(*m_head); 1570 *m_head = NULL; 1571 return (error); 1572 } 1573 } else if (error != 0) { 1574 return (error); 1575 } 1576 nsegs = ctx.nsegs; 1577 1578 if (nsegs == 0) { 1579 m_freem(*m_head); 1580 *m_head = NULL; 1581 return (EIO); 1582 } 1583 1584 /* Check descriptor overrun. */ 1585 if (sc->ale_cdata.ale_tx_cnt + nsegs >= ALE_TX_RING_CNT - 2) { 1586 bus_dmamap_unload(sc->ale_cdata.ale_tx_tag, map); 1587 return (ENOBUFS); 1588 } 1589 bus_dmamap_sync(sc->ale_cdata.ale_tx_tag, map, BUS_DMASYNC_PREWRITE); 1590 1591 m = *m_head; 1592 /* Configure Tx checksum offload. */ 1593 if ((m->m_pkthdr.csum_flags & ALE_CSUM_FEATURES) != 0) { 1594 /* 1595 * AR81xx supports Tx custom checksum offload feature 1596 * that offloads single 16bit checksum computation. 1597 * So you can choose one among IP, TCP and UDP. 1598 * Normally driver sets checksum start/insertion 1599 * position from the information of TCP/UDP frame as 1600 * TCP/UDP checksum takes more time than that of IP. 1601 * However it seems that custom checksum offload 1602 * requires 4 bytes aligned Tx buffers due to hardware 1603 * bug. 1604 * AR81xx also supports explicit Tx checksum computation 1605 * if it is told that the size of IP header and TCP 1606 * header(for UDP, the header size does not matter 1607 * because it's fixed length). However with this scheme 1608 * TSO does not work so you have to choose one either 1609 * TSO or explicit Tx checksum offload. I chosen TSO 1610 * plus custom checksum offload with work-around which 1611 * will cover most common usage for this consumer 1612 * ethernet controller. The work-around takes a lot of 1613 * CPU cycles if Tx buffer is not aligned on 4 bytes 1614 * boundary, though. 1615 */ 1616 cflags |= ALE_TD_CXSUM; 1617 /* Set checksum start offset. */ 1618 cflags |= (poff << ALE_TD_CSUM_PLOADOFFSET_SHIFT); 1619 /* Set checksum insertion position of TCP/UDP. */ 1620 cflags |= ((poff + m->m_pkthdr.csum_data) << 1621 ALE_TD_CSUM_XSUMOFFSET_SHIFT); 1622 } 1623 1624 /* Configure VLAN hardware tag insertion. */ 1625 if ((m->m_flags & M_VLANTAG) != 0) { 1626 vtag = ALE_TX_VLAN_TAG(m->m_pkthdr.ether_vlantag); 1627 vtag = ((vtag << ALE_TD_VLAN_SHIFT) & ALE_TD_VLAN_MASK); 1628 cflags |= ALE_TD_INSERT_VLAN_TAG; 1629 } 1630 1631 desc = NULL; 1632 for (i = 0; i < nsegs; i++) { 1633 desc = &sc->ale_cdata.ale_tx_ring[prod]; 1634 desc->addr = htole64(txsegs[i].ds_addr); 1635 desc->len = htole32(ALE_TX_BYTES(txsegs[i].ds_len) | vtag); 1636 desc->flags = htole32(cflags); 1637 sc->ale_cdata.ale_tx_cnt++; 1638 ALE_DESC_INC(prod, ALE_TX_RING_CNT); 1639 } 1640 /* Update producer index. */ 1641 sc->ale_cdata.ale_tx_prod = prod; 1642 1643 /* Finally set EOP on the last descriptor. */ 1644 prod = (prod + ALE_TX_RING_CNT - 1) % ALE_TX_RING_CNT; 1645 desc = &sc->ale_cdata.ale_tx_ring[prod]; 1646 desc->flags |= htole32(ALE_TD_EOP); 1647 1648 /* Swap dmamap of the first and the last. */ 1649 txd = &sc->ale_cdata.ale_txdesc[prod]; 1650 map = txd_last->tx_dmamap; 1651 txd_last->tx_dmamap = txd->tx_dmamap; 1652 txd->tx_dmamap = map; 1653 txd->tx_m = m; 1654 1655 /* Sync descriptors. */ 1656 bus_dmamap_sync(sc->ale_cdata.ale_tx_ring_tag, 1657 sc->ale_cdata.ale_tx_ring_map, BUS_DMASYNC_PREWRITE); 1658 1659 return (0); 1660 } 1661 1662 static void 1663 ale_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1664 { 1665 struct ale_softc *sc = ifp->if_softc; 1666 struct mbuf *m_head; 1667 int enq; 1668 1669 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 1670 ASSERT_SERIALIZED(ifp->if_serializer); 1671 1672 if ((sc->ale_flags & ALE_FLAG_LINK) == 0) { 1673 ifq_purge(&ifp->if_snd); 1674 return; 1675 } 1676 1677 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 1678 return; 1679 1680 /* Reclaim transmitted frames. */ 1681 if (sc->ale_cdata.ale_tx_cnt >= ALE_TX_DESC_HIWAT) 1682 ale_txeof(sc); 1683 1684 enq = 0; 1685 while (!ifq_is_empty(&ifp->if_snd)) { 1686 m_head = ifq_dequeue(&ifp->if_snd, NULL); 1687 if (m_head == NULL) 1688 break; 1689 1690 /* 1691 * Pack the data into the transmit ring. If we 1692 * don't have room, set the OACTIVE flag and wait 1693 * for the NIC to drain the ring. 1694 */ 1695 if (ale_encap(sc, &m_head)) { 1696 if (m_head == NULL) 1697 break; 1698 ifq_prepend(&ifp->if_snd, m_head); 1699 ifq_set_oactive(&ifp->if_snd); 1700 break; 1701 } 1702 enq = 1; 1703 1704 /* 1705 * If there's a BPF listener, bounce a copy of this frame 1706 * to him. 1707 */ 1708 ETHER_BPF_MTAP(ifp, m_head); 1709 } 1710 1711 if (enq) { 1712 /* Kick. */ 1713 CSR_WRITE_4(sc, ALE_MBOX_TPD_PROD_IDX, 1714 sc->ale_cdata.ale_tx_prod); 1715 1716 /* Set a timeout in case the chip goes out to lunch. */ 1717 ifp->if_timer = ALE_TX_TIMEOUT; 1718 } 1719 } 1720 1721 static void 1722 ale_watchdog(struct ifnet *ifp) 1723 { 1724 struct ale_softc *sc = ifp->if_softc; 1725 1726 ASSERT_SERIALIZED(ifp->if_serializer); 1727 1728 if ((sc->ale_flags & ALE_FLAG_LINK) == 0) { 1729 if_printf(ifp, "watchdog timeout (lost link)\n"); 1730 IFNET_STAT_INC(ifp, oerrors, 1); 1731 ale_init(sc); 1732 return; 1733 } 1734 1735 if_printf(ifp, "watchdog timeout -- resetting\n"); 1736 IFNET_STAT_INC(ifp, oerrors, 1); 1737 ale_init(sc); 1738 1739 if (!ifq_is_empty(&ifp->if_snd)) 1740 if_devstart(ifp); 1741 } 1742 1743 static int 1744 ale_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 1745 { 1746 struct ale_softc *sc; 1747 struct ifreq *ifr; 1748 struct mii_data *mii; 1749 int error, mask; 1750 1751 ASSERT_SERIALIZED(ifp->if_serializer); 1752 1753 sc = ifp->if_softc; 1754 ifr = (struct ifreq *)data; 1755 error = 0; 1756 1757 switch (cmd) { 1758 case SIOCSIFMTU: 1759 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ALE_JUMBO_MTU || 1760 ((sc->ale_flags & ALE_FLAG_JUMBO) == 0 && 1761 ifr->ifr_mtu > ETHERMTU)) 1762 error = EINVAL; 1763 else if (ifp->if_mtu != ifr->ifr_mtu) { 1764 ifp->if_mtu = ifr->ifr_mtu; 1765 if ((ifp->if_flags & IFF_RUNNING) != 0) 1766 ale_init(sc); 1767 } 1768 break; 1769 1770 case SIOCSIFFLAGS: 1771 if ((ifp->if_flags & IFF_UP) != 0) { 1772 if ((ifp->if_flags & IFF_RUNNING) != 0) { 1773 if (((ifp->if_flags ^ sc->ale_if_flags) 1774 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1775 ale_rxfilter(sc); 1776 } else { 1777 if ((sc->ale_flags & ALE_FLAG_DETACH) == 0) 1778 ale_init(sc); 1779 } 1780 } else { 1781 if ((ifp->if_flags & IFF_RUNNING) != 0) 1782 ale_stop(sc); 1783 } 1784 sc->ale_if_flags = ifp->if_flags; 1785 break; 1786 1787 case SIOCADDMULTI: 1788 case SIOCDELMULTI: 1789 if ((ifp->if_flags & IFF_RUNNING) != 0) 1790 ale_rxfilter(sc); 1791 break; 1792 1793 case SIOCSIFMEDIA: 1794 case SIOCGIFMEDIA: 1795 mii = device_get_softc(sc->ale_miibus); 1796 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1797 break; 1798 1799 case SIOCSIFCAP: 1800 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1801 if ((mask & IFCAP_TXCSUM) != 0 && 1802 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 1803 ifp->if_capenable ^= IFCAP_TXCSUM; 1804 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1805 ifp->if_hwassist |= ALE_CSUM_FEATURES; 1806 else 1807 ifp->if_hwassist &= ~ALE_CSUM_FEATURES; 1808 } 1809 if ((mask & IFCAP_RXCSUM) != 0 && 1810 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) 1811 ifp->if_capenable ^= IFCAP_RXCSUM; 1812 1813 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 1814 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 1815 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1816 ale_rxvlan(sc); 1817 } 1818 break; 1819 1820 default: 1821 error = ether_ioctl(ifp, cmd, data); 1822 break; 1823 } 1824 return (error); 1825 } 1826 1827 static void 1828 ale_mac_config(struct ale_softc *sc) 1829 { 1830 struct mii_data *mii; 1831 uint32_t reg; 1832 1833 mii = device_get_softc(sc->ale_miibus); 1834 reg = CSR_READ_4(sc, ALE_MAC_CFG); 1835 reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC | 1836 MAC_CFG_SPEED_MASK); 1837 /* Reprogram MAC with resolved speed/duplex. */ 1838 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1839 case IFM_10_T: 1840 case IFM_100_TX: 1841 reg |= MAC_CFG_SPEED_10_100; 1842 break; 1843 case IFM_1000_T: 1844 reg |= MAC_CFG_SPEED_1000; 1845 break; 1846 } 1847 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1848 reg |= MAC_CFG_FULL_DUPLEX; 1849 #ifdef notyet 1850 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1851 reg |= MAC_CFG_TX_FC; 1852 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1853 reg |= MAC_CFG_RX_FC; 1854 #endif 1855 } 1856 CSR_WRITE_4(sc, ALE_MAC_CFG, reg); 1857 } 1858 1859 static void 1860 ale_stats_clear(struct ale_softc *sc) 1861 { 1862 struct smb sb; 1863 uint32_t *reg; 1864 int i; 1865 1866 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; reg++) { 1867 CSR_READ_4(sc, ALE_RX_MIB_BASE + i); 1868 i += sizeof(uint32_t); 1869 } 1870 /* Read Tx statistics. */ 1871 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; reg++) { 1872 CSR_READ_4(sc, ALE_TX_MIB_BASE + i); 1873 i += sizeof(uint32_t); 1874 } 1875 } 1876 1877 static void 1878 ale_stats_update(struct ale_softc *sc) 1879 { 1880 struct ale_hw_stats *stat; 1881 struct smb sb, *smb; 1882 struct ifnet *ifp; 1883 uint32_t *reg; 1884 int i; 1885 1886 ifp = &sc->arpcom.ac_if; 1887 stat = &sc->ale_stats; 1888 smb = &sb; 1889 1890 /* Read Rx statistics. */ 1891 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; reg++) { 1892 *reg = CSR_READ_4(sc, ALE_RX_MIB_BASE + i); 1893 i += sizeof(uint32_t); 1894 } 1895 /* Read Tx statistics. */ 1896 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; reg++) { 1897 *reg = CSR_READ_4(sc, ALE_TX_MIB_BASE + i); 1898 i += sizeof(uint32_t); 1899 } 1900 1901 /* Rx stats. */ 1902 stat->rx_frames += smb->rx_frames; 1903 stat->rx_bcast_frames += smb->rx_bcast_frames; 1904 stat->rx_mcast_frames += smb->rx_mcast_frames; 1905 stat->rx_pause_frames += smb->rx_pause_frames; 1906 stat->rx_control_frames += smb->rx_control_frames; 1907 stat->rx_crcerrs += smb->rx_crcerrs; 1908 stat->rx_lenerrs += smb->rx_lenerrs; 1909 stat->rx_bytes += smb->rx_bytes; 1910 stat->rx_runts += smb->rx_runts; 1911 stat->rx_fragments += smb->rx_fragments; 1912 stat->rx_pkts_64 += smb->rx_pkts_64; 1913 stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 1914 stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 1915 stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 1916 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 1917 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 1918 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 1919 stat->rx_pkts_truncated += smb->rx_pkts_truncated; 1920 stat->rx_fifo_oflows += smb->rx_fifo_oflows; 1921 stat->rx_rrs_errs += smb->rx_rrs_errs; 1922 stat->rx_alignerrs += smb->rx_alignerrs; 1923 stat->rx_bcast_bytes += smb->rx_bcast_bytes; 1924 stat->rx_mcast_bytes += smb->rx_mcast_bytes; 1925 stat->rx_pkts_filtered += smb->rx_pkts_filtered; 1926 1927 /* Tx stats. */ 1928 stat->tx_frames += smb->tx_frames; 1929 stat->tx_bcast_frames += smb->tx_bcast_frames; 1930 stat->tx_mcast_frames += smb->tx_mcast_frames; 1931 stat->tx_pause_frames += smb->tx_pause_frames; 1932 stat->tx_excess_defer += smb->tx_excess_defer; 1933 stat->tx_control_frames += smb->tx_control_frames; 1934 stat->tx_deferred += smb->tx_deferred; 1935 stat->tx_bytes += smb->tx_bytes; 1936 stat->tx_pkts_64 += smb->tx_pkts_64; 1937 stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 1938 stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 1939 stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 1940 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 1941 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 1942 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 1943 stat->tx_single_colls += smb->tx_single_colls; 1944 stat->tx_multi_colls += smb->tx_multi_colls; 1945 stat->tx_late_colls += smb->tx_late_colls; 1946 stat->tx_excess_colls += smb->tx_excess_colls; 1947 stat->tx_abort += smb->tx_abort; 1948 stat->tx_underrun += smb->tx_underrun; 1949 stat->tx_desc_underrun += smb->tx_desc_underrun; 1950 stat->tx_lenerrs += smb->tx_lenerrs; 1951 stat->tx_pkts_truncated += smb->tx_pkts_truncated; 1952 stat->tx_bcast_bytes += smb->tx_bcast_bytes; 1953 stat->tx_mcast_bytes += smb->tx_mcast_bytes; 1954 1955 /* Update counters in ifnet. */ 1956 IFNET_STAT_INC(ifp, opackets, smb->tx_frames); 1957 1958 IFNET_STAT_INC(ifp, collisions, smb->tx_single_colls + 1959 smb->tx_multi_colls * 2 + smb->tx_late_colls + 1960 smb->tx_abort * HDPX_CFG_RETRY_DEFAULT); 1961 1962 /* 1963 * XXX 1964 * tx_pkts_truncated counter looks suspicious. It constantly 1965 * increments with no sign of Tx errors. This may indicate 1966 * the counter name is not correct one so I've removed the 1967 * counter in output errors. 1968 */ 1969 IFNET_STAT_INC(ifp, oerrors, smb->tx_abort + smb->tx_late_colls + 1970 smb->tx_underrun); 1971 1972 IFNET_STAT_INC(ifp, ipackets, smb->rx_frames); 1973 1974 IFNET_STAT_INC(ifp, ierrors, smb->rx_crcerrs + smb->rx_lenerrs + 1975 smb->rx_runts + smb->rx_pkts_truncated + 1976 smb->rx_fifo_oflows + smb->rx_rrs_errs + 1977 smb->rx_alignerrs); 1978 } 1979 1980 static void 1981 ale_intr(void *xsc) 1982 { 1983 struct ale_softc *sc = xsc; 1984 struct ifnet *ifp = &sc->arpcom.ac_if; 1985 uint32_t status; 1986 1987 ASSERT_SERIALIZED(ifp->if_serializer); 1988 1989 status = CSR_READ_4(sc, ALE_INTR_STATUS); 1990 if ((status & ALE_INTRS) == 0) 1991 return; 1992 1993 /* Acknowledge and disable interrupts. */ 1994 CSR_WRITE_4(sc, ALE_INTR_STATUS, status | INTR_DIS_INT); 1995 1996 if ((ifp->if_flags & IFF_RUNNING) != 0) { 1997 int error; 1998 1999 error = ale_rxeof(sc); 2000 if (error) { 2001 sc->ale_stats.reset_brk_seq++; 2002 ale_init(sc); 2003 return; 2004 } 2005 2006 if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) != 0) { 2007 if ((status & INTR_DMA_RD_TO_RST) != 0) 2008 device_printf(sc->ale_dev, 2009 "DMA read error! -- resetting\n"); 2010 if ((status & INTR_DMA_WR_TO_RST) != 0) 2011 device_printf(sc->ale_dev, 2012 "DMA write error! -- resetting\n"); 2013 ale_init(sc); 2014 return; 2015 } 2016 2017 ale_txeof(sc); 2018 if (!ifq_is_empty(&ifp->if_snd)) 2019 if_devstart(ifp); 2020 } 2021 2022 /* Re-enable interrupts. */ 2023 CSR_WRITE_4(sc, ALE_INTR_STATUS, 0x7FFFFFFF); 2024 } 2025 2026 static void 2027 ale_txeof(struct ale_softc *sc) 2028 { 2029 struct ifnet *ifp = &sc->arpcom.ac_if; 2030 struct ale_txdesc *txd; 2031 uint32_t cons, prod; 2032 int prog; 2033 2034 if (sc->ale_cdata.ale_tx_cnt == 0) 2035 return; 2036 2037 bus_dmamap_sync(sc->ale_cdata.ale_tx_ring_tag, 2038 sc->ale_cdata.ale_tx_ring_map, BUS_DMASYNC_POSTREAD); 2039 if ((sc->ale_flags & ALE_FLAG_TXCMB_BUG) == 0) { 2040 bus_dmamap_sync(sc->ale_cdata.ale_tx_cmb_tag, 2041 sc->ale_cdata.ale_tx_cmb_map, BUS_DMASYNC_POSTREAD); 2042 prod = *sc->ale_cdata.ale_tx_cmb & TPD_CNT_MASK; 2043 } else 2044 prod = CSR_READ_2(sc, ALE_TPD_CONS_IDX); 2045 cons = sc->ale_cdata.ale_tx_cons; 2046 /* 2047 * Go through our Tx list and free mbufs for those 2048 * frames which have been transmitted. 2049 */ 2050 for (prog = 0; cons != prod; prog++, 2051 ALE_DESC_INC(cons, ALE_TX_RING_CNT)) { 2052 if (sc->ale_cdata.ale_tx_cnt <= 0) 2053 break; 2054 prog++; 2055 ifq_clr_oactive(&ifp->if_snd); 2056 sc->ale_cdata.ale_tx_cnt--; 2057 txd = &sc->ale_cdata.ale_txdesc[cons]; 2058 if (txd->tx_m != NULL) { 2059 /* Reclaim transmitted mbufs. */ 2060 bus_dmamap_unload(sc->ale_cdata.ale_tx_tag, 2061 txd->tx_dmamap); 2062 m_freem(txd->tx_m); 2063 txd->tx_m = NULL; 2064 } 2065 } 2066 2067 if (prog > 0) { 2068 sc->ale_cdata.ale_tx_cons = cons; 2069 /* 2070 * Unarm watchdog timer only when there is no pending 2071 * Tx descriptors in queue. 2072 */ 2073 if (sc->ale_cdata.ale_tx_cnt == 0) 2074 ifp->if_timer = 0; 2075 } 2076 } 2077 2078 static void 2079 ale_rx_update_page(struct ale_softc *sc, struct ale_rx_page **page, 2080 uint32_t length, uint32_t *prod) 2081 { 2082 struct ale_rx_page *rx_page; 2083 2084 rx_page = *page; 2085 /* Update consumer position. */ 2086 rx_page->cons += roundup(length + sizeof(struct rx_rs), 2087 ALE_RX_PAGE_ALIGN); 2088 if (rx_page->cons >= ALE_RX_PAGE_SZ) { 2089 /* 2090 * End of Rx page reached, let hardware reuse 2091 * this page. 2092 */ 2093 rx_page->cons = 0; 2094 *rx_page->cmb_addr = 0; 2095 bus_dmamap_sync(rx_page->cmb_tag, rx_page->cmb_map, 2096 BUS_DMASYNC_PREWRITE); 2097 CSR_WRITE_1(sc, ALE_RXF0_PAGE0 + sc->ale_cdata.ale_rx_curp, 2098 RXF_VALID); 2099 /* Switch to alternate Rx page. */ 2100 sc->ale_cdata.ale_rx_curp ^= 1; 2101 rx_page = *page = 2102 &sc->ale_cdata.ale_rx_page[sc->ale_cdata.ale_rx_curp]; 2103 /* Page flipped, sync CMB and Rx page. */ 2104 bus_dmamap_sync(rx_page->page_tag, rx_page->page_map, 2105 BUS_DMASYNC_POSTREAD); 2106 bus_dmamap_sync(rx_page->cmb_tag, rx_page->cmb_map, 2107 BUS_DMASYNC_POSTREAD); 2108 /* Sync completed, cache updated producer index. */ 2109 *prod = *rx_page->cmb_addr; 2110 } 2111 } 2112 2113 2114 /* 2115 * It seems that AR81xx controller can compute partial checksum. 2116 * The partial checksum value can be used to accelerate checksum 2117 * computation for fragmented TCP/UDP packets. Upper network stack 2118 * already takes advantage of the partial checksum value in IP 2119 * reassembly stage. But I'm not sure the correctness of the 2120 * partial hardware checksum assistance due to lack of data sheet. 2121 * In addition, the Rx feature of controller that requires copying 2122 * for every frames effectively nullifies one of most nice offload 2123 * capability of controller. 2124 */ 2125 static void 2126 ale_rxcsum(struct ale_softc *sc, struct mbuf *m, uint32_t status) 2127 { 2128 struct ifnet *ifp = &sc->arpcom.ac_if; 2129 struct ip *ip; 2130 char *p; 2131 2132 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2133 if ((status & ALE_RD_IPCSUM_NOK) == 0) 2134 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2135 2136 if ((sc->ale_flags & ALE_FLAG_RXCSUM_BUG) == 0) { 2137 if (((status & ALE_RD_IPV4_FRAG) == 0) && 2138 ((status & (ALE_RD_TCP | ALE_RD_UDP)) != 0) && 2139 ((status & ALE_RD_TCP_UDPCSUM_NOK) == 0)) { 2140 m->m_pkthdr.csum_flags |= 2141 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2142 m->m_pkthdr.csum_data = 0xffff; 2143 } 2144 } else { 2145 if ((status & (ALE_RD_TCP | ALE_RD_UDP)) != 0 && 2146 (status & ALE_RD_TCP_UDPCSUM_NOK) == 0) { 2147 p = mtod(m, char *); 2148 p += ETHER_HDR_LEN; 2149 if ((status & ALE_RD_802_3) != 0) 2150 p += LLC_SNAPFRAMELEN; 2151 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0 && 2152 (status & ALE_RD_VLAN) != 0) 2153 p += EVL_ENCAPLEN; 2154 ip = (struct ip *)p; 2155 if (ip->ip_off != 0 && (status & ALE_RD_IPV4_DF) == 0) 2156 return; 2157 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 2158 CSUM_PSEUDO_HDR; 2159 m->m_pkthdr.csum_data = 0xffff; 2160 } 2161 } 2162 /* 2163 * Don't mark bad checksum for TCP/UDP frames 2164 * as fragmented frames may always have set 2165 * bad checksummed bit of frame status. 2166 */ 2167 } 2168 2169 /* Process received frames. */ 2170 static int 2171 ale_rxeof(struct ale_softc *sc) 2172 { 2173 struct ifnet *ifp = &sc->arpcom.ac_if; 2174 struct ale_rx_page *rx_page; 2175 struct rx_rs *rs; 2176 struct mbuf *m; 2177 uint32_t length, prod, seqno, status, vtags; 2178 int prog; 2179 2180 rx_page = &sc->ale_cdata.ale_rx_page[sc->ale_cdata.ale_rx_curp]; 2181 bus_dmamap_sync(rx_page->cmb_tag, rx_page->cmb_map, 2182 BUS_DMASYNC_POSTREAD); 2183 bus_dmamap_sync(rx_page->page_tag, rx_page->page_map, 2184 BUS_DMASYNC_POSTREAD); 2185 /* 2186 * Don't directly access producer index as hardware may 2187 * update it while Rx handler is in progress. It would 2188 * be even better if there is a way to let hardware 2189 * know how far driver processed its received frames. 2190 * Alternatively, hardware could provide a way to disable 2191 * CMB updates until driver acknowledges the end of CMB 2192 * access. 2193 */ 2194 prod = *rx_page->cmb_addr; 2195 for (prog = 0; ; prog++) { 2196 if (rx_page->cons >= prod) 2197 break; 2198 rs = (struct rx_rs *)(rx_page->page_addr + rx_page->cons); 2199 seqno = ALE_RX_SEQNO(le32toh(rs->seqno)); 2200 if (sc->ale_cdata.ale_rx_seqno != seqno) { 2201 /* 2202 * Normally I believe this should not happen unless 2203 * severe driver bug or corrupted memory. However 2204 * it seems to happen under certain conditions which 2205 * is triggered by abrupt Rx events such as initiation 2206 * of bulk transfer of remote host. It's not easy to 2207 * reproduce this and I doubt it could be related 2208 * with FIFO overflow of hardware or activity of Tx 2209 * CMB updates. I also remember similar behaviour 2210 * seen on RealTek 8139 which uses resembling Rx 2211 * scheme. 2212 */ 2213 if (bootverbose) 2214 device_printf(sc->ale_dev, 2215 "garbled seq: %u, expected: %u -- " 2216 "resetting!\n", seqno, 2217 sc->ale_cdata.ale_rx_seqno); 2218 return (EIO); 2219 } 2220 /* Frame received. */ 2221 sc->ale_cdata.ale_rx_seqno++; 2222 length = ALE_RX_BYTES(le32toh(rs->length)); 2223 status = le32toh(rs->flags); 2224 if ((status & ALE_RD_ERROR) != 0) { 2225 /* 2226 * We want to pass the following frames to upper 2227 * layer regardless of error status of Rx return 2228 * status. 2229 * 2230 * o IP/TCP/UDP checksum is bad. 2231 * o frame length and protocol specific length 2232 * does not match. 2233 */ 2234 if ((status & (ALE_RD_CRC | ALE_RD_CODE | 2235 ALE_RD_DRIBBLE | ALE_RD_RUNT | ALE_RD_OFLOW | 2236 ALE_RD_TRUNC)) != 0) { 2237 ale_rx_update_page(sc, &rx_page, length, &prod); 2238 continue; 2239 } 2240 } 2241 /* 2242 * m_devget(9) is major bottle-neck of ale(4)(It comes 2243 * from hardware limitation). For jumbo frames we could 2244 * get a slightly better performance if driver use 2245 * m_getjcl(9) with proper buffer size argument. However 2246 * that would make code more complicated and I don't 2247 * think users would expect good Rx performance numbers 2248 * on these low-end consumer ethernet controller. 2249 */ 2250 m = m_devget((char *)(rs + 1), length - ETHER_CRC_LEN, 2251 ETHER_ALIGN, ifp, NULL); 2252 if (m == NULL) { 2253 IFNET_STAT_INC(ifp, iqdrops, 1); 2254 ale_rx_update_page(sc, &rx_page, length, &prod); 2255 continue; 2256 } 2257 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 2258 (status & ALE_RD_IPV4) != 0) 2259 ale_rxcsum(sc, m, status); 2260 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 2261 (status & ALE_RD_VLAN) != 0) { 2262 vtags = ALE_RX_VLAN(le32toh(rs->vtags)); 2263 m->m_pkthdr.ether_vlantag = ALE_RX_VLAN_TAG(vtags); 2264 m->m_flags |= M_VLANTAG; 2265 } 2266 2267 /* Pass it to upper layer. */ 2268 ifp->if_input(ifp, m); 2269 2270 ale_rx_update_page(sc, &rx_page, length, &prod); 2271 } 2272 return 0; 2273 } 2274 2275 static void 2276 ale_tick(void *xsc) 2277 { 2278 struct ale_softc *sc = xsc; 2279 struct ifnet *ifp = &sc->arpcom.ac_if; 2280 struct mii_data *mii; 2281 2282 lwkt_serialize_enter(ifp->if_serializer); 2283 2284 mii = device_get_softc(sc->ale_miibus); 2285 mii_tick(mii); 2286 ale_stats_update(sc); 2287 2288 callout_reset(&sc->ale_tick_ch, hz, ale_tick, sc); 2289 2290 lwkt_serialize_exit(ifp->if_serializer); 2291 } 2292 2293 static void 2294 ale_reset(struct ale_softc *sc) 2295 { 2296 uint32_t reg; 2297 int i; 2298 2299 /* Initialize PCIe module. From Linux. */ 2300 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 2301 2302 CSR_WRITE_4(sc, ALE_MASTER_CFG, MASTER_RESET); 2303 for (i = ALE_RESET_TIMEOUT; i > 0; i--) { 2304 DELAY(10); 2305 if ((CSR_READ_4(sc, ALE_MASTER_CFG) & MASTER_RESET) == 0) 2306 break; 2307 } 2308 if (i == 0) 2309 device_printf(sc->ale_dev, "master reset timeout!\n"); 2310 2311 for (i = ALE_RESET_TIMEOUT; i > 0; i--) { 2312 if ((reg = CSR_READ_4(sc, ALE_IDLE_STATUS)) == 0) 2313 break; 2314 DELAY(10); 2315 } 2316 2317 if (i == 0) 2318 device_printf(sc->ale_dev, "reset timeout(0x%08x)!\n", reg); 2319 } 2320 2321 static void 2322 ale_init(void *xsc) 2323 { 2324 struct ale_softc *sc = xsc; 2325 struct ifnet *ifp = &sc->arpcom.ac_if; 2326 struct mii_data *mii; 2327 uint8_t eaddr[ETHER_ADDR_LEN]; 2328 bus_addr_t paddr; 2329 uint32_t reg, rxf_hi, rxf_lo; 2330 2331 ASSERT_SERIALIZED(ifp->if_serializer); 2332 2333 mii = device_get_softc(sc->ale_miibus); 2334 2335 /* 2336 * Cancel any pending I/O. 2337 */ 2338 ale_stop(sc); 2339 2340 /* 2341 * Reset the chip to a known state. 2342 */ 2343 ale_reset(sc); 2344 2345 /* Initialize Tx descriptors, DMA memory blocks. */ 2346 ale_init_rx_pages(sc); 2347 ale_init_tx_ring(sc); 2348 2349 /* Reprogram the station address. */ 2350 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 2351 CSR_WRITE_4(sc, ALE_PAR0, 2352 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 2353 CSR_WRITE_4(sc, ALE_PAR1, eaddr[0] << 8 | eaddr[1]); 2354 2355 /* 2356 * Clear WOL status and disable all WOL feature as WOL 2357 * would interfere Rx operation under normal environments. 2358 */ 2359 CSR_READ_4(sc, ALE_WOL_CFG); 2360 CSR_WRITE_4(sc, ALE_WOL_CFG, 0); 2361 2362 /* 2363 * Set Tx descriptor/RXF0/CMB base addresses. They share 2364 * the same high address part of DMAable region. 2365 */ 2366 paddr = sc->ale_cdata.ale_tx_ring_paddr; 2367 CSR_WRITE_4(sc, ALE_TPD_ADDR_HI, ALE_ADDR_HI(paddr)); 2368 CSR_WRITE_4(sc, ALE_TPD_ADDR_LO, ALE_ADDR_LO(paddr)); 2369 CSR_WRITE_4(sc, ALE_TPD_CNT, 2370 (ALE_TX_RING_CNT << TPD_CNT_SHIFT) & TPD_CNT_MASK); 2371 2372 /* Set Rx page base address, note we use single queue. */ 2373 paddr = sc->ale_cdata.ale_rx_page[0].page_paddr; 2374 CSR_WRITE_4(sc, ALE_RXF0_PAGE0_ADDR_LO, ALE_ADDR_LO(paddr)); 2375 paddr = sc->ale_cdata.ale_rx_page[1].page_paddr; 2376 CSR_WRITE_4(sc, ALE_RXF0_PAGE1_ADDR_LO, ALE_ADDR_LO(paddr)); 2377 2378 /* Set Tx/Rx CMB addresses. */ 2379 paddr = sc->ale_cdata.ale_tx_cmb_paddr; 2380 CSR_WRITE_4(sc, ALE_TX_CMB_ADDR_LO, ALE_ADDR_LO(paddr)); 2381 paddr = sc->ale_cdata.ale_rx_page[0].cmb_paddr; 2382 CSR_WRITE_4(sc, ALE_RXF0_CMB0_ADDR_LO, ALE_ADDR_LO(paddr)); 2383 paddr = sc->ale_cdata.ale_rx_page[1].cmb_paddr; 2384 CSR_WRITE_4(sc, ALE_RXF0_CMB1_ADDR_LO, ALE_ADDR_LO(paddr)); 2385 2386 /* Mark RXF0 is valid. */ 2387 CSR_WRITE_1(sc, ALE_RXF0_PAGE0, RXF_VALID); 2388 CSR_WRITE_1(sc, ALE_RXF0_PAGE1, RXF_VALID); 2389 /* 2390 * No need to initialize RFX1/RXF2/RXF3. We don't use 2391 * multi-queue yet. 2392 */ 2393 2394 /* Set Rx page size, excluding guard frame size. */ 2395 CSR_WRITE_4(sc, ALE_RXF_PAGE_SIZE, ALE_RX_PAGE_SZ); 2396 2397 /* Tell hardware that we're ready to load DMA blocks. */ 2398 CSR_WRITE_4(sc, ALE_DMA_BLOCK, DMA_BLOCK_LOAD); 2399 2400 /* Set Rx/Tx interrupt trigger threshold. */ 2401 CSR_WRITE_4(sc, ALE_INT_TRIG_THRESH, (1 << INT_TRIG_RX_THRESH_SHIFT) | 2402 (4 << INT_TRIG_TX_THRESH_SHIFT)); 2403 /* 2404 * XXX 2405 * Set interrupt trigger timer, its purpose and relation 2406 * with interrupt moderation mechanism is not clear yet. 2407 */ 2408 CSR_WRITE_4(sc, ALE_INT_TRIG_TIMER, 2409 ((ALE_USECS(10) << INT_TRIG_RX_TIMER_SHIFT) | 2410 (ALE_USECS(1000) << INT_TRIG_TX_TIMER_SHIFT))); 2411 2412 /* Configure interrupt moderation timer. */ 2413 reg = ALE_USECS(sc->ale_int_rx_mod) << IM_TIMER_RX_SHIFT; 2414 reg |= ALE_USECS(sc->ale_int_tx_mod) << IM_TIMER_TX_SHIFT; 2415 CSR_WRITE_4(sc, ALE_IM_TIMER, reg); 2416 reg = CSR_READ_4(sc, ALE_MASTER_CFG); 2417 reg &= ~(MASTER_CHIP_REV_MASK | MASTER_CHIP_ID_MASK); 2418 reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB); 2419 if (ALE_USECS(sc->ale_int_rx_mod) != 0) 2420 reg |= MASTER_IM_RX_TIMER_ENB; 2421 if (ALE_USECS(sc->ale_int_tx_mod) != 0) 2422 reg |= MASTER_IM_TX_TIMER_ENB; 2423 CSR_WRITE_4(sc, ALE_MASTER_CFG, reg); 2424 CSR_WRITE_2(sc, ALE_INTR_CLR_TIMER, ALE_USECS(1000)); 2425 2426 /* Set Maximum frame size of controller. */ 2427 if (ifp->if_mtu < ETHERMTU) 2428 sc->ale_max_frame_size = ETHERMTU; 2429 else 2430 sc->ale_max_frame_size = ifp->if_mtu; 2431 sc->ale_max_frame_size += ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN; 2432 CSR_WRITE_4(sc, ALE_FRAME_SIZE, sc->ale_max_frame_size); 2433 2434 /* Configure IPG/IFG parameters. */ 2435 CSR_WRITE_4(sc, ALE_IPG_IFG_CFG, 2436 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) | 2437 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) | 2438 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) | 2439 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK)); 2440 2441 /* Set parameters for half-duplex media. */ 2442 CSR_WRITE_4(sc, ALE_HDPX_CFG, 2443 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 2444 HDPX_CFG_LCOL_MASK) | 2445 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 2446 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 2447 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 2448 HDPX_CFG_ABEBT_MASK) | 2449 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 2450 HDPX_CFG_JAMIPG_MASK)); 2451 2452 /* Configure Tx jumbo frame parameters. */ 2453 if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) { 2454 if (ifp->if_mtu < ETHERMTU) 2455 reg = sc->ale_max_frame_size; 2456 else if (ifp->if_mtu < 6 * 1024) 2457 reg = (sc->ale_max_frame_size * 2) / 3; 2458 else 2459 reg = sc->ale_max_frame_size / 2; 2460 CSR_WRITE_4(sc, ALE_TX_JUMBO_THRESH, 2461 roundup(reg, TX_JUMBO_THRESH_UNIT) >> 2462 TX_JUMBO_THRESH_UNIT_SHIFT); 2463 } 2464 2465 /* Configure TxQ. */ 2466 reg = (128 << (sc->ale_dma_rd_burst >> DMA_CFG_RD_BURST_SHIFT)) 2467 << TXQ_CFG_TX_FIFO_BURST_SHIFT; 2468 reg |= (TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) & 2469 TXQ_CFG_TPD_BURST_MASK; 2470 CSR_WRITE_4(sc, ALE_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE | TXQ_CFG_ENB); 2471 2472 /* Configure Rx jumbo frame & flow control parameters. */ 2473 if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) { 2474 reg = roundup(sc->ale_max_frame_size, RX_JUMBO_THRESH_UNIT); 2475 CSR_WRITE_4(sc, ALE_RX_JUMBO_THRESH, 2476 (((reg >> RX_JUMBO_THRESH_UNIT_SHIFT) << 2477 RX_JUMBO_THRESH_MASK_SHIFT) & RX_JUMBO_THRESH_MASK) | 2478 ((RX_JUMBO_LKAH_DEFAULT << RX_JUMBO_LKAH_SHIFT) & 2479 RX_JUMBO_LKAH_MASK)); 2480 reg = CSR_READ_4(sc, ALE_SRAM_RX_FIFO_LEN); 2481 rxf_hi = (reg * 7) / 10; 2482 rxf_lo = (reg * 3)/ 10; 2483 CSR_WRITE_4(sc, ALE_RX_FIFO_PAUSE_THRESH, 2484 ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) & 2485 RX_FIFO_PAUSE_THRESH_LO_MASK) | 2486 ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) & 2487 RX_FIFO_PAUSE_THRESH_HI_MASK)); 2488 } 2489 2490 /* Disable RSS. */ 2491 CSR_WRITE_4(sc, ALE_RSS_IDT_TABLE0, 0); 2492 CSR_WRITE_4(sc, ALE_RSS_CPU, 0); 2493 2494 /* Configure RxQ. */ 2495 CSR_WRITE_4(sc, ALE_RXQ_CFG, 2496 RXQ_CFG_ALIGN_32 | RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB); 2497 2498 /* Configure DMA parameters. */ 2499 reg = 0; 2500 if ((sc->ale_flags & ALE_FLAG_TXCMB_BUG) == 0) 2501 reg |= DMA_CFG_TXCMB_ENB; 2502 CSR_WRITE_4(sc, ALE_DMA_CFG, 2503 DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI | DMA_CFG_RCB_64 | 2504 sc->ale_dma_rd_burst | reg | 2505 sc->ale_dma_wr_burst | DMA_CFG_RXCMB_ENB | 2506 ((DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) & 2507 DMA_CFG_RD_DELAY_CNT_MASK) | 2508 ((DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) & 2509 DMA_CFG_WR_DELAY_CNT_MASK)); 2510 2511 /* 2512 * Hardware can be configured to issue SMB interrupt based 2513 * on programmed interval. Since there is a callout that is 2514 * invoked for every hz in driver we use that instead of 2515 * relying on periodic SMB interrupt. 2516 */ 2517 CSR_WRITE_4(sc, ALE_SMB_STAT_TIMER, ALE_USECS(0)); 2518 2519 /* Clear MAC statistics. */ 2520 ale_stats_clear(sc); 2521 2522 /* 2523 * Configure Tx/Rx MACs. 2524 * - Auto-padding for short frames. 2525 * - Enable CRC generation. 2526 * Actual reconfiguration of MAC for resolved speed/duplex 2527 * is followed after detection of link establishment. 2528 * AR81xx always does checksum computation regardless of 2529 * MAC_CFG_RXCSUM_ENB bit. In fact, setting the bit will 2530 * cause Rx handling issue for fragmented IP datagrams due 2531 * to silicon bug. 2532 */ 2533 reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX | 2534 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 2535 MAC_CFG_PREAMBLE_MASK); 2536 if ((sc->ale_flags & ALE_FLAG_FASTETHER) != 0) 2537 reg |= MAC_CFG_SPEED_10_100; 2538 else 2539 reg |= MAC_CFG_SPEED_1000; 2540 CSR_WRITE_4(sc, ALE_MAC_CFG, reg); 2541 2542 /* Set up the receive filter. */ 2543 ale_rxfilter(sc); 2544 ale_rxvlan(sc); 2545 2546 /* Acknowledge all pending interrupts and clear it. */ 2547 CSR_WRITE_4(sc, ALE_INTR_MASK, ALE_INTRS); 2548 CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF); 2549 CSR_WRITE_4(sc, ALE_INTR_STATUS, 0); 2550 2551 sc->ale_flags &= ~ALE_FLAG_LINK; 2552 2553 /* Switch to the current media. */ 2554 mii_mediachg(mii); 2555 2556 callout_reset(&sc->ale_tick_ch, hz, ale_tick, sc); 2557 2558 ifp->if_flags |= IFF_RUNNING; 2559 ifq_clr_oactive(&ifp->if_snd); 2560 } 2561 2562 static void 2563 ale_stop(struct ale_softc *sc) 2564 { 2565 struct ifnet *ifp = &sc->arpcom.ac_if; 2566 struct ale_txdesc *txd; 2567 uint32_t reg; 2568 int i; 2569 2570 ASSERT_SERIALIZED(ifp->if_serializer); 2571 2572 /* 2573 * Mark the interface down and cancel the watchdog timer. 2574 */ 2575 ifp->if_flags &= ~IFF_RUNNING; 2576 ifq_clr_oactive(&ifp->if_snd); 2577 ifp->if_timer = 0; 2578 2579 callout_stop(&sc->ale_tick_ch); 2580 sc->ale_flags &= ~ALE_FLAG_LINK; 2581 2582 ale_stats_update(sc); 2583 2584 /* Disable interrupts. */ 2585 CSR_WRITE_4(sc, ALE_INTR_MASK, 0); 2586 CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF); 2587 2588 /* Disable queue processing and DMA. */ 2589 reg = CSR_READ_4(sc, ALE_TXQ_CFG); 2590 reg &= ~TXQ_CFG_ENB; 2591 CSR_WRITE_4(sc, ALE_TXQ_CFG, reg); 2592 reg = CSR_READ_4(sc, ALE_RXQ_CFG); 2593 reg &= ~RXQ_CFG_ENB; 2594 CSR_WRITE_4(sc, ALE_RXQ_CFG, reg); 2595 reg = CSR_READ_4(sc, ALE_DMA_CFG); 2596 reg &= ~(DMA_CFG_TXCMB_ENB | DMA_CFG_RXCMB_ENB); 2597 CSR_WRITE_4(sc, ALE_DMA_CFG, reg); 2598 DELAY(1000); 2599 2600 /* Stop Rx/Tx MACs. */ 2601 ale_stop_mac(sc); 2602 2603 /* Disable interrupts again? XXX */ 2604 CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF); 2605 2606 /* 2607 * Free TX mbufs still in the queues. 2608 */ 2609 for (i = 0; i < ALE_TX_RING_CNT; i++) { 2610 txd = &sc->ale_cdata.ale_txdesc[i]; 2611 if (txd->tx_m != NULL) { 2612 bus_dmamap_unload(sc->ale_cdata.ale_tx_tag, 2613 txd->tx_dmamap); 2614 m_freem(txd->tx_m); 2615 txd->tx_m = NULL; 2616 } 2617 } 2618 } 2619 2620 static void 2621 ale_stop_mac(struct ale_softc *sc) 2622 { 2623 uint32_t reg; 2624 int i; 2625 2626 reg = CSR_READ_4(sc, ALE_MAC_CFG); 2627 if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) { 2628 reg &= ~MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 2629 CSR_WRITE_4(sc, ALE_MAC_CFG, reg); 2630 } 2631 2632 for (i = ALE_TIMEOUT; i > 0; i--) { 2633 reg = CSR_READ_4(sc, ALE_IDLE_STATUS); 2634 if (reg == 0) 2635 break; 2636 DELAY(10); 2637 } 2638 if (i == 0) 2639 device_printf(sc->ale_dev, 2640 "could not disable Tx/Rx MAC(0x%08x)!\n", reg); 2641 } 2642 2643 static void 2644 ale_init_tx_ring(struct ale_softc *sc) 2645 { 2646 struct ale_txdesc *txd; 2647 int i; 2648 2649 sc->ale_cdata.ale_tx_prod = 0; 2650 sc->ale_cdata.ale_tx_cons = 0; 2651 sc->ale_cdata.ale_tx_cnt = 0; 2652 2653 bzero(sc->ale_cdata.ale_tx_ring, ALE_TX_RING_SZ); 2654 bzero(sc->ale_cdata.ale_tx_cmb, ALE_TX_CMB_SZ); 2655 for (i = 0; i < ALE_TX_RING_CNT; i++) { 2656 txd = &sc->ale_cdata.ale_txdesc[i]; 2657 txd->tx_m = NULL; 2658 } 2659 *sc->ale_cdata.ale_tx_cmb = 0; 2660 bus_dmamap_sync(sc->ale_cdata.ale_tx_cmb_tag, 2661 sc->ale_cdata.ale_tx_cmb_map, 2662 BUS_DMASYNC_PREWRITE); 2663 bus_dmamap_sync(sc->ale_cdata.ale_tx_ring_tag, 2664 sc->ale_cdata.ale_tx_ring_map, 2665 BUS_DMASYNC_PREWRITE); 2666 } 2667 2668 static void 2669 ale_init_rx_pages(struct ale_softc *sc) 2670 { 2671 struct ale_rx_page *rx_page; 2672 int i; 2673 2674 sc->ale_cdata.ale_rx_seqno = 0; 2675 sc->ale_cdata.ale_rx_curp = 0; 2676 2677 for (i = 0; i < ALE_RX_PAGES; i++) { 2678 rx_page = &sc->ale_cdata.ale_rx_page[i]; 2679 bzero(rx_page->page_addr, sc->ale_pagesize); 2680 bzero(rx_page->cmb_addr, ALE_RX_CMB_SZ); 2681 rx_page->cons = 0; 2682 *rx_page->cmb_addr = 0; 2683 bus_dmamap_sync(rx_page->page_tag, rx_page->page_map, 2684 BUS_DMASYNC_PREWRITE); 2685 bus_dmamap_sync(rx_page->cmb_tag, rx_page->cmb_map, 2686 BUS_DMASYNC_PREWRITE); 2687 } 2688 } 2689 2690 static void 2691 ale_rxvlan(struct ale_softc *sc) 2692 { 2693 struct ifnet *ifp; 2694 uint32_t reg; 2695 2696 ifp = &sc->arpcom.ac_if; 2697 reg = CSR_READ_4(sc, ALE_MAC_CFG); 2698 reg &= ~MAC_CFG_VLAN_TAG_STRIP; 2699 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2700 reg |= MAC_CFG_VLAN_TAG_STRIP; 2701 CSR_WRITE_4(sc, ALE_MAC_CFG, reg); 2702 } 2703 2704 static void 2705 ale_rxfilter(struct ale_softc *sc) 2706 { 2707 struct ifnet *ifp; 2708 struct ifmultiaddr *ifma; 2709 uint32_t crc; 2710 uint32_t mchash[2]; 2711 uint32_t rxcfg; 2712 2713 ifp = &sc->arpcom.ac_if; 2714 2715 rxcfg = CSR_READ_4(sc, ALE_MAC_CFG); 2716 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 2717 if ((ifp->if_flags & IFF_BROADCAST) != 0) 2718 rxcfg |= MAC_CFG_BCAST; 2719 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 2720 if ((ifp->if_flags & IFF_PROMISC) != 0) 2721 rxcfg |= MAC_CFG_PROMISC; 2722 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 2723 rxcfg |= MAC_CFG_ALLMULTI; 2724 CSR_WRITE_4(sc, ALE_MAR0, 0xFFFFFFFF); 2725 CSR_WRITE_4(sc, ALE_MAR1, 0xFFFFFFFF); 2726 CSR_WRITE_4(sc, ALE_MAC_CFG, rxcfg); 2727 return; 2728 } 2729 2730 /* Program new filter. */ 2731 bzero(mchash, sizeof(mchash)); 2732 2733 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2734 if (ifma->ifma_addr->sa_family != AF_LINK) 2735 continue; 2736 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) 2737 ifma->ifma_addr), ETHER_ADDR_LEN); 2738 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 2739 } 2740 2741 CSR_WRITE_4(sc, ALE_MAR0, mchash[0]); 2742 CSR_WRITE_4(sc, ALE_MAR1, mchash[1]); 2743 CSR_WRITE_4(sc, ALE_MAC_CFG, rxcfg); 2744 } 2745 2746 static int 2747 sysctl_hw_ale_int_mod(SYSCTL_HANDLER_ARGS) 2748 { 2749 return (sysctl_int_range(oidp, arg1, arg2, req, 2750 ALE_IM_TIMER_MIN, ALE_IM_TIMER_MAX)); 2751 } 2752 2753 static void 2754 ale_dmamap_buf_cb(void *xctx, bus_dma_segment_t *segs, int nsegs, 2755 bus_size_t mapsz __unused, int error) 2756 { 2757 struct ale_dmamap_ctx *ctx = xctx; 2758 int i; 2759 2760 if (error) 2761 return; 2762 2763 if (nsegs > ctx->nsegs) { 2764 ctx->nsegs = 0; 2765 return; 2766 } 2767 2768 ctx->nsegs = nsegs; 2769 for (i = 0; i < nsegs; ++i) 2770 ctx->segs[i] = segs[i]; 2771 } 2772