1 /*- 2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/age/if_age.c,v 1.6 2008/11/07 07:02:28 yongari Exp $ 28 */ 29 30 /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */ 31 32 #include <sys/param.h> 33 #include <sys/endian.h> 34 #include <sys/kernel.h> 35 #include <sys/bus.h> 36 #include <sys/interrupt.h> 37 #include <sys/malloc.h> 38 #include <sys/proc.h> 39 #include <sys/rman.h> 40 #include <sys/serialize.h> 41 #include <sys/socket.h> 42 #include <sys/sockio.h> 43 #include <sys/sysctl.h> 44 45 #include <net/ethernet.h> 46 #include <net/if.h> 47 #include <net/bpf.h> 48 #include <net/if_arp.h> 49 #include <net/if_dl.h> 50 #include <net/if_media.h> 51 #include <net/ifq_var.h> 52 #include <net/vlan/if_vlan_var.h> 53 #include <net/vlan/if_vlan_ether.h> 54 55 #include <dev/netif/mii_layer/miivar.h> 56 #include <dev/netif/mii_layer/jmphyreg.h> 57 58 #include <bus/pci/pcireg.h> 59 #include <bus/pci/pcivar.h> 60 #include <bus/pci/pcidevs.h> 61 62 #include <dev/netif/age/if_agereg.h> 63 #include <dev/netif/age/if_agevar.h> 64 65 /* "device miibus" required. See GENERIC if you get errors here. */ 66 #include "miibus_if.h" 67 68 #define AGE_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 69 70 struct age_dmamap_ctx { 71 int nsegs; 72 bus_dma_segment_t *segs; 73 }; 74 75 static int age_probe(device_t); 76 static int age_attach(device_t); 77 static int age_detach(device_t); 78 static int age_shutdown(device_t); 79 static int age_suspend(device_t); 80 static int age_resume(device_t); 81 82 static int age_miibus_readreg(device_t, int, int); 83 static int age_miibus_writereg(device_t, int, int, int); 84 static void age_miibus_statchg(device_t); 85 86 static void age_init(void *); 87 static int age_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 88 static void age_start(struct ifnet *, struct ifaltq_subque *); 89 static void age_watchdog(struct ifnet *); 90 static void age_mediastatus(struct ifnet *, struct ifmediareq *); 91 static int age_mediachange(struct ifnet *); 92 93 static void age_intr(void *); 94 static void age_txintr(struct age_softc *, int); 95 static void age_rxintr(struct age_softc *, int); 96 static void age_rxeof(struct age_softc *sc, struct rx_rdesc *); 97 98 static int age_dma_alloc(struct age_softc *); 99 static void age_dma_free(struct age_softc *); 100 static void age_dmamap_cb(void *, bus_dma_segment_t *, int, int); 101 static void age_dmamap_buf_cb(void *, bus_dma_segment_t *, int, 102 bus_size_t, int); 103 static int age_check_boundary(struct age_softc *); 104 static int age_newbuf(struct age_softc *, struct age_rxdesc *, int); 105 static int age_encap(struct age_softc *, struct mbuf **); 106 static void age_init_tx_ring(struct age_softc *); 107 static int age_init_rx_ring(struct age_softc *); 108 static void age_init_rr_ring(struct age_softc *); 109 static void age_init_cmb_block(struct age_softc *); 110 static void age_init_smb_block(struct age_softc *); 111 112 static void age_tick(void *); 113 static void age_stop(struct age_softc *); 114 static void age_reset(struct age_softc *); 115 static int age_read_vpd_word(struct age_softc *, uint32_t, uint32_t, 116 uint32_t *); 117 static void age_get_macaddr(struct age_softc *); 118 static void age_phy_reset(struct age_softc *); 119 static void age_mac_config(struct age_softc *); 120 static void age_stats_update(struct age_softc *); 121 static void age_stop_txmac(struct age_softc *); 122 static void age_stop_rxmac(struct age_softc *); 123 static void age_rxvlan(struct age_softc *); 124 static void age_rxfilter(struct age_softc *); 125 #ifdef wol_notyet 126 static void age_setwol(struct age_softc *); 127 #endif 128 129 static void age_sysctl_node(struct age_softc *); 130 static int sysctl_age_stats(SYSCTL_HANDLER_ARGS); 131 static int sysctl_hw_age_int_mod(SYSCTL_HANDLER_ARGS); 132 133 /* 134 * Devices supported by this driver. 135 */ 136 static struct age_dev { 137 uint16_t age_vendorid; 138 uint16_t age_deviceid; 139 const char *age_name; 140 } age_devs[] = { 141 { VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L1, 142 "Attansic Technology Corp, L1 Gigabit Ethernet" }, 143 }; 144 145 static device_method_t age_methods[] = { 146 /* Device interface. */ 147 DEVMETHOD(device_probe, age_probe), 148 DEVMETHOD(device_attach, age_attach), 149 DEVMETHOD(device_detach, age_detach), 150 DEVMETHOD(device_shutdown, age_shutdown), 151 DEVMETHOD(device_suspend, age_suspend), 152 DEVMETHOD(device_resume, age_resume), 153 154 /* Bus interface. */ 155 DEVMETHOD(bus_print_child, bus_generic_print_child), 156 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 157 158 /* MII interface. */ 159 DEVMETHOD(miibus_readreg, age_miibus_readreg), 160 DEVMETHOD(miibus_writereg, age_miibus_writereg), 161 DEVMETHOD(miibus_statchg, age_miibus_statchg), 162 163 { NULL, NULL } 164 }; 165 166 static driver_t age_driver = { 167 "age", 168 age_methods, 169 sizeof(struct age_softc) 170 }; 171 172 static devclass_t age_devclass; 173 174 DECLARE_DUMMY_MODULE(if_age); 175 MODULE_DEPEND(if_age, miibus, 1, 1, 1); 176 DRIVER_MODULE(if_age, pci, age_driver, age_devclass, NULL, NULL); 177 DRIVER_MODULE(miibus, age, miibus_driver, miibus_devclass, NULL, NULL); 178 179 /* 180 * Read a PHY register on the MII of the L1. 181 */ 182 static int 183 age_miibus_readreg(device_t dev, int phy, int reg) 184 { 185 struct age_softc *sc; 186 uint32_t v; 187 int i; 188 189 sc = device_get_softc(dev); 190 if (phy != sc->age_phyaddr) 191 return (0); 192 193 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 194 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 195 for (i = AGE_PHY_TIMEOUT; i > 0; i--) { 196 DELAY(1); 197 v = CSR_READ_4(sc, AGE_MDIO); 198 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 199 break; 200 } 201 202 if (i == 0) { 203 device_printf(sc->age_dev, "phy read timeout : %d\n", reg); 204 return (0); 205 } 206 207 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 208 } 209 210 /* 211 * Write a PHY register on the MII of the L1. 212 */ 213 static int 214 age_miibus_writereg(device_t dev, int phy, int reg, int val) 215 { 216 struct age_softc *sc; 217 uint32_t v; 218 int i; 219 220 sc = device_get_softc(dev); 221 if (phy != sc->age_phyaddr) 222 return (0); 223 224 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 225 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 226 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 227 for (i = AGE_PHY_TIMEOUT; i > 0; i--) { 228 DELAY(1); 229 v = CSR_READ_4(sc, AGE_MDIO); 230 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 231 break; 232 } 233 234 if (i == 0) 235 device_printf(sc->age_dev, "phy write timeout : %d\n", reg); 236 237 return (0); 238 } 239 240 /* 241 * Callback from MII layer when media changes. 242 */ 243 static void 244 age_miibus_statchg(device_t dev) 245 { 246 struct age_softc *sc = device_get_softc(dev); 247 struct ifnet *ifp = &sc->arpcom.ac_if; 248 struct mii_data *mii; 249 250 ASSERT_SERIALIZED(ifp->if_serializer); 251 252 if ((ifp->if_flags & IFF_RUNNING) == 0) 253 return; 254 255 mii = device_get_softc(sc->age_miibus); 256 257 sc->age_flags &= ~AGE_FLAG_LINK; 258 if ((mii->mii_media_status & IFM_AVALID) != 0) { 259 switch (IFM_SUBTYPE(mii->mii_media_active)) { 260 case IFM_10_T: 261 case IFM_100_TX: 262 case IFM_1000_T: 263 sc->age_flags |= AGE_FLAG_LINK; 264 break; 265 default: 266 break; 267 } 268 } 269 270 /* Stop Rx/Tx MACs. */ 271 age_stop_rxmac(sc); 272 age_stop_txmac(sc); 273 274 /* Program MACs with resolved speed/duplex/flow-control. */ 275 if ((sc->age_flags & AGE_FLAG_LINK) != 0) { 276 uint32_t reg; 277 278 age_mac_config(sc); 279 280 reg = CSR_READ_4(sc, AGE_MAC_CFG); 281 /* Restart DMA engine and Tx/Rx MAC. */ 282 CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) | 283 DMA_CFG_RD_ENB | DMA_CFG_WR_ENB); 284 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 285 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 286 } 287 } 288 289 /* 290 * Get the current interface media status. 291 */ 292 static void 293 age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 294 { 295 struct age_softc *sc = ifp->if_softc; 296 struct mii_data *mii = device_get_softc(sc->age_miibus); 297 298 ASSERT_SERIALIZED(ifp->if_serializer); 299 300 mii_pollstat(mii); 301 ifmr->ifm_status = mii->mii_media_status; 302 ifmr->ifm_active = mii->mii_media_active; 303 } 304 305 /* 306 * Set hardware to newly-selected media. 307 */ 308 static int 309 age_mediachange(struct ifnet *ifp) 310 { 311 struct age_softc *sc = ifp->if_softc; 312 struct mii_data *mii = device_get_softc(sc->age_miibus); 313 int error; 314 315 ASSERT_SERIALIZED(ifp->if_serializer); 316 317 if (mii->mii_instance != 0) { 318 struct mii_softc *miisc; 319 320 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 321 mii_phy_reset(miisc); 322 } 323 error = mii_mediachg(mii); 324 325 return (error); 326 } 327 328 static int 329 age_read_vpd_word(struct age_softc *sc, uint32_t vpdc, uint32_t offset, 330 uint32_t *word) 331 { 332 int i; 333 334 pci_write_config(sc->age_dev, vpdc + PCIR_VPD_ADDR, offset, 2); 335 for (i = AGE_TIMEOUT; i > 0; i--) { 336 DELAY(10); 337 if ((pci_read_config(sc->age_dev, vpdc + PCIR_VPD_ADDR, 2) & 338 0x8000) == 0x8000) 339 break; 340 } 341 if (i == 0) { 342 device_printf(sc->age_dev, "VPD read timeout!\n"); 343 *word = 0; 344 return (ETIMEDOUT); 345 } 346 347 *word = pci_read_config(sc->age_dev, vpdc + PCIR_VPD_DATA, 4); 348 return (0); 349 } 350 351 static int 352 age_probe(device_t dev) 353 { 354 struct age_dev *sp; 355 int i; 356 uint16_t vendor, devid; 357 358 vendor = pci_get_vendor(dev); 359 devid = pci_get_device(dev); 360 sp = age_devs; 361 for (i = 0; i < NELEM(age_devs); i++, sp++) { 362 if (vendor == sp->age_vendorid && 363 devid == sp->age_deviceid) { 364 device_set_desc(dev, sp->age_name); 365 return (0); 366 } 367 } 368 return (ENXIO); 369 } 370 371 static void 372 age_get_macaddr(struct age_softc *sc) 373 { 374 uint32_t ea[2], off, reg, word; 375 int vpd_error, match, vpdc; 376 377 reg = CSR_READ_4(sc, AGE_SPI_CTRL); 378 if ((reg & SPI_VPD_ENB) != 0) { 379 /* Get VPD stored in TWSI EEPROM. */ 380 reg &= ~SPI_VPD_ENB; 381 CSR_WRITE_4(sc, AGE_SPI_CTRL, reg); 382 } 383 384 ea[0] = ea[1] = 0; 385 vpdc = pci_get_vpdcap_ptr(sc->age_dev); 386 if (vpdc) { 387 vpd_error = 0; 388 389 /* 390 * PCI VPD capability exists, but it seems that it's 391 * not in the standard form as stated in PCI VPD 392 * specification such that driver could not use 393 * pci_get_vpd_readonly(9) with keyword 'NA'. 394 * Search VPD data starting at address 0x0100. The data 395 * should be used as initializers to set AGE_PAR0, 396 * AGE_PAR1 register including other PCI configuration 397 * registers. 398 */ 399 word = 0; 400 match = 0; 401 reg = 0; 402 for (off = AGE_VPD_REG_CONF_START; off < AGE_VPD_REG_CONF_END; 403 off += sizeof(uint32_t)) { 404 vpd_error = age_read_vpd_word(sc, vpdc, off, &word); 405 if (vpd_error != 0) 406 break; 407 if (match != 0) { 408 switch (reg) { 409 case AGE_PAR0: 410 ea[0] = word; 411 break; 412 case AGE_PAR1: 413 ea[1] = word; 414 break; 415 default: 416 break; 417 } 418 match = 0; 419 } else if ((word & 0xFF) == AGE_VPD_REG_CONF_SIG) { 420 match = 1; 421 reg = word >> 16; 422 } else 423 break; 424 } 425 if (off >= AGE_VPD_REG_CONF_END) 426 vpd_error = ENOENT; 427 if (vpd_error == 0) { 428 /* 429 * Don't blindly trust ethernet address obtained 430 * from VPD. Check whether ethernet address is 431 * valid one. Otherwise fall-back to reading 432 * PAR register. 433 */ 434 ea[1] &= 0xFFFF; 435 if ((ea[0] == 0 && ea[1] == 0) || 436 (ea[0] == 0xFFFFFFFF && ea[1] == 0xFFFF)) { 437 if (bootverbose) 438 device_printf(sc->age_dev, 439 "invalid ethernet address " 440 "returned from VPD.\n"); 441 vpd_error = EINVAL; 442 } 443 } 444 if (vpd_error != 0 && (bootverbose)) 445 device_printf(sc->age_dev, "VPD access failure!\n"); 446 } else { 447 vpd_error = ENOENT; 448 if (bootverbose) 449 device_printf(sc->age_dev, 450 "PCI VPD capability not found!\n"); 451 } 452 453 /* 454 * It seems that L1 also provides a way to extract ethernet 455 * address via SPI flash interface. Because SPI flash memory 456 * device of different vendors vary in their instruction 457 * codes for read ID instruction, it's very hard to get 458 * instructions codes without detailed information for the 459 * flash memory device used on ethernet controller. To simplify 460 * code, just read AGE_PAR0/AGE_PAR1 register to get ethernet 461 * address which is supposed to be set by hardware during 462 * power on reset. 463 */ 464 if (vpd_error != 0) { 465 /* 466 * VPD is mapped to SPI flash memory or BIOS set it. 467 */ 468 ea[0] = CSR_READ_4(sc, AGE_PAR0); 469 ea[1] = CSR_READ_4(sc, AGE_PAR1); 470 } 471 472 ea[1] &= 0xFFFF; 473 if ((ea[0] == 0 && ea[1] == 0) || 474 (ea[0] == 0xFFFFFFFF && ea[1] == 0xFFFF)) { 475 device_printf(sc->age_dev, 476 "generating fake ethernet address.\n"); 477 ea[0] = karc4random(); 478 /* Set OUI to ASUSTek COMPUTER INC. */ 479 sc->age_eaddr[0] = 0x00; 480 sc->age_eaddr[1] = 0x1B; 481 sc->age_eaddr[2] = 0xFC; 482 sc->age_eaddr[3] = (ea[0] >> 16) & 0xFF; 483 sc->age_eaddr[4] = (ea[0] >> 8) & 0xFF; 484 sc->age_eaddr[5] = (ea[0] >> 0) & 0xFF; 485 } else { 486 sc->age_eaddr[0] = (ea[1] >> 8) & 0xFF; 487 sc->age_eaddr[1] = (ea[1] >> 0) & 0xFF; 488 sc->age_eaddr[2] = (ea[0] >> 24) & 0xFF; 489 sc->age_eaddr[3] = (ea[0] >> 16) & 0xFF; 490 sc->age_eaddr[4] = (ea[0] >> 8) & 0xFF; 491 sc->age_eaddr[5] = (ea[0] >> 0) & 0xFF; 492 } 493 } 494 495 static void 496 age_phy_reset(struct age_softc *sc) 497 { 498 /* Reset PHY. */ 499 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST); 500 DELAY(1000); 501 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR); 502 DELAY(1000); 503 } 504 505 static int 506 age_attach(device_t dev) 507 { 508 struct age_softc *sc = device_get_softc(dev); 509 struct ifnet *ifp = &sc->arpcom.ac_if; 510 uint8_t pcie_ptr; 511 int error; 512 513 error = 0; 514 sc->age_dev = dev; 515 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 516 517 callout_init(&sc->age_tick_ch); 518 519 #ifndef BURN_BRIDGES 520 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 521 uint32_t irq, mem; 522 523 irq = pci_read_config(dev, PCIR_INTLINE, 4); 524 mem = pci_read_config(dev, AGE_PCIR_BAR, 4); 525 526 device_printf(dev, "chip is in D%d power mode " 527 "-- setting to D0\n", pci_get_powerstate(dev)); 528 529 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 530 531 pci_write_config(dev, PCIR_INTLINE, irq, 4); 532 pci_write_config(dev, AGE_PCIR_BAR, mem, 4); 533 } 534 #endif /* !BURN_BRIDGE */ 535 536 /* Enable bus mastering */ 537 pci_enable_busmaster(dev); 538 539 /* 540 * Allocate memory mapped IO 541 */ 542 sc->age_mem_rid = AGE_PCIR_BAR; 543 sc->age_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 544 &sc->age_mem_rid, RF_ACTIVE); 545 if (sc->age_mem_res == NULL) { 546 device_printf(dev, "can't allocate IO memory\n"); 547 return ENXIO; 548 } 549 sc->age_mem_bt = rman_get_bustag(sc->age_mem_res); 550 sc->age_mem_bh = rman_get_bushandle(sc->age_mem_res); 551 552 /* 553 * Allocate IRQ 554 */ 555 sc->age_irq_rid = 0; 556 sc->age_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 557 &sc->age_irq_rid, 558 RF_SHAREABLE | RF_ACTIVE); 559 if (sc->age_irq_res == NULL) { 560 device_printf(dev, "can't allocate irq\n"); 561 error = ENXIO; 562 goto fail; 563 } 564 565 /* Set PHY address. */ 566 sc->age_phyaddr = AGE_PHY_ADDR; 567 568 /* Reset PHY. */ 569 age_phy_reset(sc); 570 571 /* Reset the ethernet controller. */ 572 age_reset(sc); 573 574 /* Get PCI and chip id/revision. */ 575 sc->age_rev = pci_get_revid(dev); 576 sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >> 577 MASTER_CHIP_REV_SHIFT; 578 if (bootverbose) { 579 device_printf(dev, "PCI device revision : 0x%04x\n", sc->age_rev); 580 device_printf(dev, "Chip id/revision : 0x%04x\n", 581 sc->age_chip_rev); 582 } 583 584 /* 585 * XXX 586 * Unintialized hardware returns an invalid chip id/revision 587 * as well as 0xFFFFFFFF for Tx/Rx fifo length. It seems that 588 * unplugged cable results in putting hardware into automatic 589 * power down mode which in turn returns invalld chip revision. 590 */ 591 if (sc->age_chip_rev == 0xFFFF) { 592 device_printf(dev,"invalid chip revision : 0x%04x -- " 593 "not initialized?\n", sc->age_chip_rev); 594 error = ENXIO; 595 goto fail; 596 } 597 device_printf(dev, "%d Tx FIFO, %d Rx FIFO\n", 598 CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN), 599 CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN)); 600 601 /* Get DMA parameters from PCIe device control register. */ 602 pcie_ptr = pci_get_pciecap_ptr(dev); 603 if (pcie_ptr) { 604 uint16_t devctl; 605 606 sc->age_flags |= AGE_FLAG_PCIE; 607 devctl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2); 608 /* Max read request size. */ 609 sc->age_dma_rd_burst = ((devctl >> 12) & 0x07) << 610 DMA_CFG_RD_BURST_SHIFT; 611 /* Max payload size. */ 612 sc->age_dma_wr_burst = ((devctl >> 5) & 0x07) << 613 DMA_CFG_WR_BURST_SHIFT; 614 if (bootverbose) { 615 device_printf(dev, "Read request size : %d bytes.\n", 616 128 << ((devctl >> 12) & 0x07)); 617 device_printf(dev, "TLP payload size : %d bytes.\n", 618 128 << ((devctl >> 5) & 0x07)); 619 } 620 } else { 621 sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128; 622 sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128; 623 } 624 625 /* Create device sysctl node. */ 626 age_sysctl_node(sc); 627 628 if ((error = age_dma_alloc(sc) != 0)) 629 goto fail; 630 631 /* Load station address. */ 632 age_get_macaddr(sc); 633 634 ifp->if_softc = sc; 635 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 636 ifp->if_ioctl = age_ioctl; 637 ifp->if_start = age_start; 638 ifp->if_init = age_init; 639 ifp->if_watchdog = age_watchdog; 640 ifq_set_maxlen(&ifp->if_snd, AGE_TX_RING_CNT - 1); 641 ifq_set_ready(&ifp->if_snd); 642 643 ifp->if_capabilities = IFCAP_HWCSUM | 644 IFCAP_VLAN_MTU | 645 IFCAP_VLAN_HWTAGGING; 646 ifp->if_hwassist = AGE_CSUM_FEATURES; 647 ifp->if_capenable = ifp->if_capabilities; 648 649 /* Set up MII bus. */ 650 if ((error = mii_phy_probe(dev, &sc->age_miibus, age_mediachange, 651 age_mediastatus)) != 0) { 652 device_printf(dev, "no PHY found!\n"); 653 goto fail; 654 } 655 656 ether_ifattach(ifp, sc->age_eaddr, NULL); 657 658 /* Tell the upper layer(s) we support long frames. */ 659 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 660 661 error = bus_setup_intr(dev, sc->age_irq_res, INTR_MPSAFE, age_intr, sc, 662 &sc->age_irq_handle, ifp->if_serializer); 663 if (error) { 664 device_printf(dev, "could not set up interrupt handler.\n"); 665 ether_ifdetach(ifp); 666 goto fail; 667 } 668 669 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->age_irq_res)); 670 return 0; 671 fail: 672 age_detach(dev); 673 return (error); 674 } 675 676 static int 677 age_detach(device_t dev) 678 { 679 struct age_softc *sc = device_get_softc(dev); 680 681 if (device_is_attached(dev)) { 682 struct ifnet *ifp = &sc->arpcom.ac_if; 683 684 lwkt_serialize_enter(ifp->if_serializer); 685 sc->age_flags |= AGE_FLAG_DETACH; 686 age_stop(sc); 687 bus_teardown_intr(dev, sc->age_irq_res, sc->age_irq_handle); 688 lwkt_serialize_exit(ifp->if_serializer); 689 690 ether_ifdetach(ifp); 691 } 692 693 if (sc->age_sysctl_tree != NULL) 694 sysctl_ctx_free(&sc->age_sysctl_ctx); 695 696 if (sc->age_miibus != NULL) 697 device_delete_child(dev, sc->age_miibus); 698 bus_generic_detach(dev); 699 700 if (sc->age_irq_res != NULL) { 701 bus_release_resource(dev, SYS_RES_IRQ, sc->age_irq_rid, 702 sc->age_irq_res); 703 } 704 if (sc->age_mem_res != NULL) { 705 bus_release_resource(dev, SYS_RES_MEMORY, sc->age_mem_rid, 706 sc->age_mem_res); 707 } 708 709 age_dma_free(sc); 710 711 return (0); 712 } 713 714 static void 715 age_sysctl_node(struct age_softc *sc) 716 { 717 int error; 718 719 sysctl_ctx_init(&sc->age_sysctl_ctx); 720 sc->age_sysctl_tree = SYSCTL_ADD_NODE(&sc->age_sysctl_ctx, 721 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 722 device_get_nameunit(sc->age_dev), 723 CTLFLAG_RD, 0, ""); 724 if (sc->age_sysctl_tree == NULL) { 725 device_printf(sc->age_dev, "can't add sysctl node\n"); 726 return; 727 } 728 729 SYSCTL_ADD_PROC(&sc->age_sysctl_ctx, 730 SYSCTL_CHILDREN(sc->age_sysctl_tree), OID_AUTO, 731 "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_age_stats, 732 "I", "Statistics"); 733 734 SYSCTL_ADD_PROC(&sc->age_sysctl_ctx, 735 SYSCTL_CHILDREN(sc->age_sysctl_tree), OID_AUTO, 736 "int_mod", CTLTYPE_INT | CTLFLAG_RW, &sc->age_int_mod, 0, 737 sysctl_hw_age_int_mod, "I", "age interrupt moderation"); 738 739 /* Pull in device tunables. */ 740 sc->age_int_mod = AGE_IM_TIMER_DEFAULT; 741 error = resource_int_value(device_get_name(sc->age_dev), 742 device_get_unit(sc->age_dev), "int_mod", &sc->age_int_mod); 743 if (error == 0) { 744 if (sc->age_int_mod < AGE_IM_TIMER_MIN || 745 sc->age_int_mod > AGE_IM_TIMER_MAX) { 746 device_printf(sc->age_dev, 747 "int_mod value out of range; using default: %d\n", 748 AGE_IM_TIMER_DEFAULT); 749 sc->age_int_mod = AGE_IM_TIMER_DEFAULT; 750 } 751 } 752 } 753 754 struct age_dmamap_arg { 755 bus_addr_t age_busaddr; 756 }; 757 758 static void 759 age_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 760 { 761 struct age_dmamap_arg *ctx; 762 763 if (error != 0) 764 return; 765 766 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 767 768 ctx = (struct age_dmamap_arg *)arg; 769 ctx->age_busaddr = segs[0].ds_addr; 770 } 771 772 /* 773 * Attansic L1 controller have single register to specify high 774 * address part of DMA blocks. So all descriptor structures and 775 * DMA memory blocks should have the same high address of given 776 * 4GB address space(i.e. crossing 4GB boundary is not allowed). 777 */ 778 static int 779 age_check_boundary(struct age_softc *sc) 780 { 781 bus_addr_t rx_ring_end, rr_ring_end, tx_ring_end; 782 bus_addr_t cmb_block_end, smb_block_end; 783 784 /* Tx/Rx descriptor queue should reside within 4GB boundary. */ 785 tx_ring_end = sc->age_rdata.age_tx_ring_paddr + AGE_TX_RING_SZ; 786 rx_ring_end = sc->age_rdata.age_rx_ring_paddr + AGE_RX_RING_SZ; 787 rr_ring_end = sc->age_rdata.age_rr_ring_paddr + AGE_RR_RING_SZ; 788 cmb_block_end = sc->age_rdata.age_cmb_block_paddr + AGE_CMB_BLOCK_SZ; 789 smb_block_end = sc->age_rdata.age_smb_block_paddr + AGE_SMB_BLOCK_SZ; 790 791 if ((AGE_ADDR_HI(tx_ring_end) != 792 AGE_ADDR_HI(sc->age_rdata.age_tx_ring_paddr)) || 793 (AGE_ADDR_HI(rx_ring_end) != 794 AGE_ADDR_HI(sc->age_rdata.age_rx_ring_paddr)) || 795 (AGE_ADDR_HI(rr_ring_end) != 796 AGE_ADDR_HI(sc->age_rdata.age_rr_ring_paddr)) || 797 (AGE_ADDR_HI(cmb_block_end) != 798 AGE_ADDR_HI(sc->age_rdata.age_cmb_block_paddr)) || 799 (AGE_ADDR_HI(smb_block_end) != 800 AGE_ADDR_HI(sc->age_rdata.age_smb_block_paddr))) 801 return (EFBIG); 802 803 if ((AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(rx_ring_end)) || 804 (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(rr_ring_end)) || 805 (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(cmb_block_end)) || 806 (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(smb_block_end))) 807 return (EFBIG); 808 809 return (0); 810 } 811 812 static int 813 age_dma_alloc(struct age_softc *sc) 814 { 815 struct age_txdesc *txd; 816 struct age_rxdesc *rxd; 817 bus_addr_t lowaddr; 818 struct age_dmamap_arg ctx; 819 int error, i; 820 821 lowaddr = BUS_SPACE_MAXADDR; 822 again: 823 /* Create parent ring/DMA block tag. */ 824 error = bus_dma_tag_create( 825 NULL, /* parent */ 826 1, 0, /* alignment, boundary */ 827 lowaddr, /* lowaddr */ 828 BUS_SPACE_MAXADDR, /* highaddr */ 829 NULL, NULL, /* filter, filterarg */ 830 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 831 0, /* nsegments */ 832 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 833 0, /* flags */ 834 &sc->age_cdata.age_parent_tag); 835 if (error != 0) { 836 device_printf(sc->age_dev, 837 "could not create parent DMA tag.\n"); 838 goto fail; 839 } 840 841 /* Create tag for Tx ring. */ 842 error = bus_dma_tag_create( 843 sc->age_cdata.age_parent_tag, /* parent */ 844 AGE_TX_RING_ALIGN, 0, /* alignment, boundary */ 845 BUS_SPACE_MAXADDR, /* lowaddr */ 846 BUS_SPACE_MAXADDR, /* highaddr */ 847 NULL, NULL, /* filter, filterarg */ 848 AGE_TX_RING_SZ, /* maxsize */ 849 1, /* nsegments */ 850 AGE_TX_RING_SZ, /* maxsegsize */ 851 0, /* flags */ 852 &sc->age_cdata.age_tx_ring_tag); 853 if (error != 0) { 854 device_printf(sc->age_dev, 855 "could not create Tx ring DMA tag.\n"); 856 goto fail; 857 } 858 859 /* Create tag for Rx ring. */ 860 error = bus_dma_tag_create( 861 sc->age_cdata.age_parent_tag, /* parent */ 862 AGE_RX_RING_ALIGN, 0, /* alignment, boundary */ 863 BUS_SPACE_MAXADDR, /* lowaddr */ 864 BUS_SPACE_MAXADDR, /* highaddr */ 865 NULL, NULL, /* filter, filterarg */ 866 AGE_RX_RING_SZ, /* maxsize */ 867 1, /* nsegments */ 868 AGE_RX_RING_SZ, /* maxsegsize */ 869 0, /* flags */ 870 &sc->age_cdata.age_rx_ring_tag); 871 if (error != 0) { 872 device_printf(sc->age_dev, 873 "could not create Rx ring DMA tag.\n"); 874 goto fail; 875 } 876 877 /* Create tag for Rx return ring. */ 878 error = bus_dma_tag_create( 879 sc->age_cdata.age_parent_tag, /* parent */ 880 AGE_RR_RING_ALIGN, 0, /* alignment, boundary */ 881 BUS_SPACE_MAXADDR, /* lowaddr */ 882 BUS_SPACE_MAXADDR, /* highaddr */ 883 NULL, NULL, /* filter, filterarg */ 884 AGE_RR_RING_SZ, /* maxsize */ 885 1, /* nsegments */ 886 AGE_RR_RING_SZ, /* maxsegsize */ 887 0, /* flags */ 888 &sc->age_cdata.age_rr_ring_tag); 889 if (error != 0) { 890 device_printf(sc->age_dev, 891 "could not create Rx return ring DMA tag.\n"); 892 goto fail; 893 } 894 895 /* Create tag for coalesing message block. */ 896 error = bus_dma_tag_create( 897 sc->age_cdata.age_parent_tag, /* parent */ 898 AGE_CMB_ALIGN, 0, /* alignment, boundary */ 899 BUS_SPACE_MAXADDR, /* lowaddr */ 900 BUS_SPACE_MAXADDR, /* highaddr */ 901 NULL, NULL, /* filter, filterarg */ 902 AGE_CMB_BLOCK_SZ, /* maxsize */ 903 1, /* nsegments */ 904 AGE_CMB_BLOCK_SZ, /* maxsegsize */ 905 0, /* flags */ 906 &sc->age_cdata.age_cmb_block_tag); 907 if (error != 0) { 908 device_printf(sc->age_dev, 909 "could not create CMB DMA tag.\n"); 910 goto fail; 911 } 912 913 /* Create tag for statistics message block. */ 914 error = bus_dma_tag_create( 915 sc->age_cdata.age_parent_tag, /* parent */ 916 AGE_SMB_ALIGN, 0, /* alignment, boundary */ 917 BUS_SPACE_MAXADDR, /* lowaddr */ 918 BUS_SPACE_MAXADDR, /* highaddr */ 919 NULL, NULL, /* filter, filterarg */ 920 AGE_SMB_BLOCK_SZ, /* maxsize */ 921 1, /* nsegments */ 922 AGE_SMB_BLOCK_SZ, /* maxsegsize */ 923 0, /* flags */ 924 &sc->age_cdata.age_smb_block_tag); 925 if (error != 0) { 926 device_printf(sc->age_dev, 927 "could not create SMB DMA tag.\n"); 928 goto fail; 929 } 930 931 /* Allocate DMA'able memory and load the DMA map. */ 932 error = bus_dmamem_alloc(sc->age_cdata.age_tx_ring_tag, 933 (void **)&sc->age_rdata.age_tx_ring, 934 BUS_DMA_WAITOK | BUS_DMA_ZERO, 935 &sc->age_cdata.age_tx_ring_map); 936 if (error != 0) { 937 device_printf(sc->age_dev, 938 "could not allocate DMA'able memory for Tx ring.\n"); 939 goto fail; 940 } 941 ctx.age_busaddr = 0; 942 error = bus_dmamap_load(sc->age_cdata.age_tx_ring_tag, 943 sc->age_cdata.age_tx_ring_map, sc->age_rdata.age_tx_ring, 944 AGE_TX_RING_SZ, age_dmamap_cb, &ctx, 0); 945 if (error != 0 || ctx.age_busaddr == 0) { 946 device_printf(sc->age_dev, 947 "could not load DMA'able memory for Tx ring.\n"); 948 goto fail; 949 } 950 sc->age_rdata.age_tx_ring_paddr = ctx.age_busaddr; 951 /* Rx ring */ 952 error = bus_dmamem_alloc(sc->age_cdata.age_rx_ring_tag, 953 (void **)&sc->age_rdata.age_rx_ring, 954 BUS_DMA_WAITOK | BUS_DMA_ZERO, 955 &sc->age_cdata.age_rx_ring_map); 956 if (error != 0) { 957 device_printf(sc->age_dev, 958 "could not allocate DMA'able memory for Rx ring.\n"); 959 goto fail; 960 } 961 ctx.age_busaddr = 0; 962 error = bus_dmamap_load(sc->age_cdata.age_rx_ring_tag, 963 sc->age_cdata.age_rx_ring_map, sc->age_rdata.age_rx_ring, 964 AGE_RX_RING_SZ, age_dmamap_cb, &ctx, 0); 965 if (error != 0 || ctx.age_busaddr == 0) { 966 device_printf(sc->age_dev, 967 "could not load DMA'able memory for Rx ring.\n"); 968 goto fail; 969 } 970 sc->age_rdata.age_rx_ring_paddr = ctx.age_busaddr; 971 /* Rx return ring */ 972 error = bus_dmamem_alloc(sc->age_cdata.age_rr_ring_tag, 973 (void **)&sc->age_rdata.age_rr_ring, 974 BUS_DMA_WAITOK | BUS_DMA_ZERO, 975 &sc->age_cdata.age_rr_ring_map); 976 if (error != 0) { 977 device_printf(sc->age_dev, 978 "could not allocate DMA'able memory for Rx return ring.\n"); 979 goto fail; 980 } 981 ctx.age_busaddr = 0; 982 error = bus_dmamap_load(sc->age_cdata.age_rr_ring_tag, 983 sc->age_cdata.age_rr_ring_map, sc->age_rdata.age_rr_ring, 984 AGE_RR_RING_SZ, age_dmamap_cb, &ctx, 0); 985 if (error != 0 || ctx.age_busaddr == 0) { 986 device_printf(sc->age_dev, 987 "could not load DMA'able memory for Rx return ring.\n"); 988 goto fail; 989 } 990 sc->age_rdata.age_rr_ring_paddr = ctx.age_busaddr; 991 /* CMB block */ 992 error = bus_dmamem_alloc(sc->age_cdata.age_cmb_block_tag, 993 (void **)&sc->age_rdata.age_cmb_block, 994 BUS_DMA_WAITOK | BUS_DMA_ZERO, 995 &sc->age_cdata.age_cmb_block_map); 996 if (error != 0) { 997 device_printf(sc->age_dev, 998 "could not allocate DMA'able memory for CMB block.\n"); 999 goto fail; 1000 } 1001 ctx.age_busaddr = 0; 1002 error = bus_dmamap_load(sc->age_cdata.age_cmb_block_tag, 1003 sc->age_cdata.age_cmb_block_map, sc->age_rdata.age_cmb_block, 1004 AGE_CMB_BLOCK_SZ, age_dmamap_cb, &ctx, 0); 1005 if (error != 0 || ctx.age_busaddr == 0) { 1006 device_printf(sc->age_dev, 1007 "could not load DMA'able memory for CMB block.\n"); 1008 goto fail; 1009 } 1010 sc->age_rdata.age_cmb_block_paddr = ctx.age_busaddr; 1011 /* SMB block */ 1012 error = bus_dmamem_alloc(sc->age_cdata.age_smb_block_tag, 1013 (void **)&sc->age_rdata.age_smb_block, 1014 BUS_DMA_WAITOK | BUS_DMA_ZERO, 1015 &sc->age_cdata.age_smb_block_map); 1016 if (error != 0) { 1017 device_printf(sc->age_dev, 1018 "could not allocate DMA'able memory for SMB block.\n"); 1019 goto fail; 1020 } 1021 ctx.age_busaddr = 0; 1022 error = bus_dmamap_load(sc->age_cdata.age_smb_block_tag, 1023 sc->age_cdata.age_smb_block_map, sc->age_rdata.age_smb_block, 1024 AGE_SMB_BLOCK_SZ, age_dmamap_cb, &ctx, 0); 1025 if (error != 0 || ctx.age_busaddr == 0) { 1026 device_printf(sc->age_dev, 1027 "could not load DMA'able memory for SMB block.\n"); 1028 goto fail; 1029 } 1030 sc->age_rdata.age_smb_block_paddr = ctx.age_busaddr; 1031 1032 /* 1033 * All ring buffer and DMA blocks should have the same 1034 * high address part of 64bit DMA address space. 1035 */ 1036 if (lowaddr != BUS_SPACE_MAXADDR_32BIT && 1037 (error = age_check_boundary(sc)) != 0) { 1038 device_printf(sc->age_dev, "4GB boundary crossed, " 1039 "switching to 32bit DMA addressing mode.\n"); 1040 age_dma_free(sc); 1041 /* Limit DMA address space to 32bit and try again. */ 1042 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1043 goto again; 1044 } 1045 1046 /* 1047 * Create Tx/Rx buffer parent tag. 1048 * L1 supports full 64bit DMA addressing in Tx/Rx buffers 1049 * so it needs separate parent DMA tag. 1050 */ 1051 error = bus_dma_tag_create( 1052 NULL, /* parent */ 1053 1, 0, /* alignment, boundary */ 1054 BUS_SPACE_MAXADDR, /* lowaddr */ 1055 BUS_SPACE_MAXADDR, /* highaddr */ 1056 NULL, NULL, /* filter, filterarg */ 1057 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1058 0, /* nsegments */ 1059 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1060 0, /* flags */ 1061 &sc->age_cdata.age_buffer_tag); 1062 if (error != 0) { 1063 device_printf(sc->age_dev, 1064 "could not create parent buffer DMA tag.\n"); 1065 goto fail; 1066 } 1067 1068 /* Create tag for Tx buffers. */ 1069 error = bus_dma_tag_create( 1070 sc->age_cdata.age_buffer_tag, /* parent */ 1071 1, 0, /* alignment, boundary */ 1072 BUS_SPACE_MAXADDR, /* lowaddr */ 1073 BUS_SPACE_MAXADDR, /* highaddr */ 1074 NULL, NULL, /* filter, filterarg */ 1075 AGE_TSO_MAXSIZE, /* maxsize */ 1076 AGE_MAXTXSEGS, /* nsegments */ 1077 AGE_TSO_MAXSEGSIZE, /* maxsegsize */ 1078 0, /* flags */ 1079 &sc->age_cdata.age_tx_tag); 1080 if (error != 0) { 1081 device_printf(sc->age_dev, "could not create Tx DMA tag.\n"); 1082 goto fail; 1083 } 1084 1085 /* Create tag for Rx buffers. */ 1086 error = bus_dma_tag_create( 1087 sc->age_cdata.age_buffer_tag, /* parent */ 1088 1, 0, /* alignment, boundary */ 1089 BUS_SPACE_MAXADDR, /* lowaddr */ 1090 BUS_SPACE_MAXADDR, /* highaddr */ 1091 NULL, NULL, /* filter, filterarg */ 1092 MCLBYTES, /* maxsize */ 1093 1, /* nsegments */ 1094 MCLBYTES, /* maxsegsize */ 1095 0, /* flags */ 1096 &sc->age_cdata.age_rx_tag); 1097 if (error != 0) { 1098 device_printf(sc->age_dev, "could not create Rx DMA tag.\n"); 1099 goto fail; 1100 } 1101 1102 /* Create DMA maps for Tx buffers. */ 1103 for (i = 0; i < AGE_TX_RING_CNT; i++) { 1104 txd = &sc->age_cdata.age_txdesc[i]; 1105 txd->tx_m = NULL; 1106 txd->tx_dmamap = NULL; 1107 error = bus_dmamap_create(sc->age_cdata.age_tx_tag, 0, 1108 &txd->tx_dmamap); 1109 if (error != 0) { 1110 device_printf(sc->age_dev, 1111 "could not create Tx dmamap.\n"); 1112 goto fail; 1113 } 1114 } 1115 /* Create DMA maps for Rx buffers. */ 1116 if ((error = bus_dmamap_create(sc->age_cdata.age_rx_tag, 0, 1117 &sc->age_cdata.age_rx_sparemap)) != 0) { 1118 device_printf(sc->age_dev, 1119 "could not create spare Rx dmamap.\n"); 1120 goto fail; 1121 } 1122 for (i = 0; i < AGE_RX_RING_CNT; i++) { 1123 rxd = &sc->age_cdata.age_rxdesc[i]; 1124 rxd->rx_m = NULL; 1125 rxd->rx_dmamap = NULL; 1126 error = bus_dmamap_create(sc->age_cdata.age_rx_tag, 0, 1127 &rxd->rx_dmamap); 1128 if (error != 0) { 1129 device_printf(sc->age_dev, 1130 "could not create Rx dmamap.\n"); 1131 goto fail; 1132 } 1133 } 1134 fail: 1135 return (error); 1136 } 1137 1138 static void 1139 age_dma_free(struct age_softc *sc) 1140 { 1141 struct age_txdesc *txd; 1142 struct age_rxdesc *rxd; 1143 int i; 1144 1145 /* Tx buffers */ 1146 if (sc->age_cdata.age_tx_tag != NULL) { 1147 for (i = 0; i < AGE_TX_RING_CNT; i++) { 1148 txd = &sc->age_cdata.age_txdesc[i]; 1149 if (txd->tx_dmamap != NULL) { 1150 bus_dmamap_destroy(sc->age_cdata.age_tx_tag, 1151 txd->tx_dmamap); 1152 txd->tx_dmamap = NULL; 1153 } 1154 } 1155 bus_dma_tag_destroy(sc->age_cdata.age_tx_tag); 1156 sc->age_cdata.age_tx_tag = NULL; 1157 } 1158 /* Rx buffers */ 1159 if (sc->age_cdata.age_rx_tag != NULL) { 1160 for (i = 0; i < AGE_RX_RING_CNT; i++) { 1161 rxd = &sc->age_cdata.age_rxdesc[i]; 1162 if (rxd->rx_dmamap != NULL) { 1163 bus_dmamap_destroy(sc->age_cdata.age_rx_tag, 1164 rxd->rx_dmamap); 1165 rxd->rx_dmamap = NULL; 1166 } 1167 } 1168 if (sc->age_cdata.age_rx_sparemap != NULL) { 1169 bus_dmamap_destroy(sc->age_cdata.age_rx_tag, 1170 sc->age_cdata.age_rx_sparemap); 1171 sc->age_cdata.age_rx_sparemap = NULL; 1172 } 1173 bus_dma_tag_destroy(sc->age_cdata.age_rx_tag); 1174 sc->age_cdata.age_rx_tag = NULL; 1175 } 1176 /* Tx ring. */ 1177 if (sc->age_cdata.age_tx_ring_tag != NULL) { 1178 if (sc->age_cdata.age_tx_ring_map != NULL) 1179 bus_dmamap_unload(sc->age_cdata.age_tx_ring_tag, 1180 sc->age_cdata.age_tx_ring_map); 1181 if (sc->age_cdata.age_tx_ring_map != NULL && 1182 sc->age_rdata.age_tx_ring != NULL) 1183 bus_dmamem_free(sc->age_cdata.age_tx_ring_tag, 1184 sc->age_rdata.age_tx_ring, 1185 sc->age_cdata.age_tx_ring_map); 1186 sc->age_rdata.age_tx_ring = NULL; 1187 sc->age_cdata.age_tx_ring_map = NULL; 1188 bus_dma_tag_destroy(sc->age_cdata.age_tx_ring_tag); 1189 sc->age_cdata.age_tx_ring_tag = NULL; 1190 } 1191 /* Rx ring. */ 1192 if (sc->age_cdata.age_rx_ring_tag != NULL) { 1193 if (sc->age_cdata.age_rx_ring_map != NULL) 1194 bus_dmamap_unload(sc->age_cdata.age_rx_ring_tag, 1195 sc->age_cdata.age_rx_ring_map); 1196 if (sc->age_cdata.age_rx_ring_map != NULL && 1197 sc->age_rdata.age_rx_ring != NULL) 1198 bus_dmamem_free(sc->age_cdata.age_rx_ring_tag, 1199 sc->age_rdata.age_rx_ring, 1200 sc->age_cdata.age_rx_ring_map); 1201 sc->age_rdata.age_rx_ring = NULL; 1202 sc->age_cdata.age_rx_ring_map = NULL; 1203 bus_dma_tag_destroy(sc->age_cdata.age_rx_ring_tag); 1204 sc->age_cdata.age_rx_ring_tag = NULL; 1205 } 1206 /* Rx return ring. */ 1207 if (sc->age_cdata.age_rr_ring_tag != NULL) { 1208 if (sc->age_cdata.age_rr_ring_map != NULL) 1209 bus_dmamap_unload(sc->age_cdata.age_rr_ring_tag, 1210 sc->age_cdata.age_rr_ring_map); 1211 if (sc->age_cdata.age_rr_ring_map != NULL && 1212 sc->age_rdata.age_rr_ring != NULL) 1213 bus_dmamem_free(sc->age_cdata.age_rr_ring_tag, 1214 sc->age_rdata.age_rr_ring, 1215 sc->age_cdata.age_rr_ring_map); 1216 sc->age_rdata.age_rr_ring = NULL; 1217 sc->age_cdata.age_rr_ring_map = NULL; 1218 bus_dma_tag_destroy(sc->age_cdata.age_rr_ring_tag); 1219 sc->age_cdata.age_rr_ring_tag = NULL; 1220 } 1221 /* CMB block */ 1222 if (sc->age_cdata.age_cmb_block_tag != NULL) { 1223 if (sc->age_cdata.age_cmb_block_map != NULL) 1224 bus_dmamap_unload(sc->age_cdata.age_cmb_block_tag, 1225 sc->age_cdata.age_cmb_block_map); 1226 if (sc->age_cdata.age_cmb_block_map != NULL && 1227 sc->age_rdata.age_cmb_block != NULL) 1228 bus_dmamem_free(sc->age_cdata.age_cmb_block_tag, 1229 sc->age_rdata.age_cmb_block, 1230 sc->age_cdata.age_cmb_block_map); 1231 sc->age_rdata.age_cmb_block = NULL; 1232 sc->age_cdata.age_cmb_block_map = NULL; 1233 bus_dma_tag_destroy(sc->age_cdata.age_cmb_block_tag); 1234 sc->age_cdata.age_cmb_block_tag = NULL; 1235 } 1236 /* SMB block */ 1237 if (sc->age_cdata.age_smb_block_tag != NULL) { 1238 if (sc->age_cdata.age_smb_block_map != NULL) 1239 bus_dmamap_unload(sc->age_cdata.age_smb_block_tag, 1240 sc->age_cdata.age_smb_block_map); 1241 if (sc->age_cdata.age_smb_block_map != NULL && 1242 sc->age_rdata.age_smb_block != NULL) 1243 bus_dmamem_free(sc->age_cdata.age_smb_block_tag, 1244 sc->age_rdata.age_smb_block, 1245 sc->age_cdata.age_smb_block_map); 1246 sc->age_rdata.age_smb_block = NULL; 1247 sc->age_cdata.age_smb_block_map = NULL; 1248 bus_dma_tag_destroy(sc->age_cdata.age_smb_block_tag); 1249 sc->age_cdata.age_smb_block_tag = NULL; 1250 } 1251 1252 if (sc->age_cdata.age_buffer_tag != NULL) { 1253 bus_dma_tag_destroy(sc->age_cdata.age_buffer_tag); 1254 sc->age_cdata.age_buffer_tag = NULL; 1255 } 1256 if (sc->age_cdata.age_parent_tag != NULL) { 1257 bus_dma_tag_destroy(sc->age_cdata.age_parent_tag); 1258 sc->age_cdata.age_parent_tag = NULL; 1259 } 1260 } 1261 1262 /* 1263 * Make sure the interface is stopped at reboot time. 1264 */ 1265 static int 1266 age_shutdown(device_t dev) 1267 { 1268 return age_suspend(dev); 1269 } 1270 1271 #ifdef wol_notyet 1272 1273 static void 1274 age_setwol(struct age_softc *sc) 1275 { 1276 struct ifnet *ifp; 1277 struct mii_data *mii; 1278 uint32_t reg, pmcs; 1279 uint16_t pmstat; 1280 int aneg, i, pmc; 1281 1282 AGE_LOCK_ASSERT(sc); 1283 1284 if (pci_find_extcap(sc->age_dev, PCIY_PMG, &pmc) == 0) { 1285 CSR_WRITE_4(sc, AGE_WOL_CFG, 0); 1286 /* 1287 * No PME capability, PHY power down. 1288 * XXX 1289 * Due to an unknown reason powering down PHY resulted 1290 * in unexpected results such as inaccessbility of 1291 * hardware of freshly rebooted system. Disable 1292 * powering down PHY until I got more information for 1293 * Attansic/Atheros PHY hardwares. 1294 */ 1295 #ifdef notyet 1296 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, 1297 MII_BMCR, BMCR_PDOWN); 1298 #endif 1299 return; 1300 } 1301 1302 ifp = sc->age_ifp; 1303 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 1304 /* 1305 * Note, this driver resets the link speed to 10/100Mbps with 1306 * auto-negotiation but we don't know whether that operation 1307 * would succeed or not as it have no control after powering 1308 * off. If the renegotiation fail WOL may not work. Running 1309 * at 1Gbps will draw more power than 375mA at 3.3V which is 1310 * specified in PCI specification and that would result in 1311 * complete shutdowning power to ethernet controller. 1312 * 1313 * TODO 1314 * Save current negotiated media speed/duplex/flow-control 1315 * to softc and restore the same link again after resuming. 1316 * PHY handling such as power down/resetting to 100Mbps 1317 * may be better handled in suspend method in phy driver. 1318 */ 1319 mii = device_get_softc(sc->age_miibus); 1320 mii_pollstat(mii); 1321 aneg = 0; 1322 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1323 switch IFM_SUBTYPE(mii->mii_media_active) { 1324 case IFM_10_T: 1325 case IFM_100_TX: 1326 goto got_link; 1327 case IFM_1000_T: 1328 aneg++; 1329 default: 1330 break; 1331 } 1332 } 1333 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, 1334 MII_100T2CR, 0); 1335 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, 1336 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | 1337 ANAR_10 | ANAR_CSMA); 1338 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, 1339 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); 1340 DELAY(1000); 1341 if (aneg != 0) { 1342 /* Poll link state until age(4) get a 10/100 link. */ 1343 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 1344 mii_pollstat(mii); 1345 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1346 switch (IFM_SUBTYPE( 1347 mii->mii_media_active)) { 1348 case IFM_10_T: 1349 case IFM_100_TX: 1350 age_mac_config(sc); 1351 goto got_link; 1352 default: 1353 break; 1354 } 1355 } 1356 AGE_UNLOCK(sc); 1357 pause("agelnk", hz); 1358 AGE_LOCK(sc); 1359 } 1360 if (i == MII_ANEGTICKS_GIGE) 1361 device_printf(sc->age_dev, 1362 "establishing link failed, " 1363 "WOL may not work!"); 1364 } 1365 /* 1366 * No link, force MAC to have 100Mbps, full-duplex link. 1367 * This is the last resort and may/may not work. 1368 */ 1369 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1370 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1371 age_mac_config(sc); 1372 } 1373 1374 got_link: 1375 pmcs = 0; 1376 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 1377 pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB; 1378 CSR_WRITE_4(sc, AGE_WOL_CFG, pmcs); 1379 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1380 reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC); 1381 reg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST); 1382 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 1383 reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST; 1384 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 1385 reg |= MAC_CFG_RX_ENB; 1386 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 1387 } 1388 1389 /* Request PME. */ 1390 pmstat = pci_read_config(sc->age_dev, pmc + PCIR_POWER_STATUS, 2); 1391 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1392 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1393 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1394 pci_write_config(sc->age_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1395 #ifdef notyet 1396 /* See above for powering down PHY issues. */ 1397 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 1398 /* No WOL, PHY power down. */ 1399 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, 1400 MII_BMCR, BMCR_PDOWN); 1401 } 1402 #endif 1403 } 1404 1405 #endif /* wol_notyet */ 1406 1407 static int 1408 age_suspend(device_t dev) 1409 { 1410 struct age_softc *sc = device_get_softc(dev); 1411 struct ifnet *ifp = &sc->arpcom.ac_if; 1412 1413 lwkt_serialize_enter(ifp->if_serializer); 1414 age_stop(sc); 1415 #ifdef wol_notyet 1416 age_setwol(sc); 1417 #endif 1418 lwkt_serialize_exit(ifp->if_serializer); 1419 1420 return (0); 1421 } 1422 1423 static int 1424 age_resume(device_t dev) 1425 { 1426 struct age_softc *sc = device_get_softc(dev); 1427 struct ifnet *ifp = &sc->arpcom.ac_if; 1428 uint16_t cmd; 1429 1430 lwkt_serialize_enter(ifp->if_serializer); 1431 1432 /* 1433 * Clear INTx emulation disable for hardwares that 1434 * is set in resume event. From Linux. 1435 */ 1436 cmd = pci_read_config(sc->age_dev, PCIR_COMMAND, 2); 1437 if ((cmd & 0x0400) != 0) { 1438 cmd &= ~0x0400; 1439 pci_write_config(sc->age_dev, PCIR_COMMAND, cmd, 2); 1440 } 1441 if ((ifp->if_flags & IFF_UP) != 0) 1442 age_init(sc); 1443 1444 lwkt_serialize_exit(ifp->if_serializer); 1445 1446 return (0); 1447 } 1448 1449 static int 1450 age_encap(struct age_softc *sc, struct mbuf **m_head) 1451 { 1452 struct age_txdesc *txd, *txd_last; 1453 struct tx_desc *desc; 1454 struct mbuf *m; 1455 struct age_dmamap_ctx ctx; 1456 bus_dma_segment_t txsegs[AGE_MAXTXSEGS]; 1457 bus_dmamap_t map; 1458 uint32_t cflags, poff, vtag; 1459 int error, i, nsegs, prod; 1460 1461 M_ASSERTPKTHDR((*m_head)); 1462 1463 m = *m_head; 1464 cflags = vtag = 0; 1465 poff = 0; 1466 1467 prod = sc->age_cdata.age_tx_prod; 1468 txd = &sc->age_cdata.age_txdesc[prod]; 1469 txd_last = txd; 1470 map = txd->tx_dmamap; 1471 1472 ctx.nsegs = AGE_MAXTXSEGS; 1473 ctx.segs = txsegs; 1474 error = bus_dmamap_load_mbuf(sc->age_cdata.age_tx_tag, map, 1475 *m_head, age_dmamap_buf_cb, &ctx, 1476 BUS_DMA_NOWAIT); 1477 if (!error && ctx.nsegs == 0) { 1478 bus_dmamap_unload(sc->age_cdata.age_tx_tag, map); 1479 error = EFBIG; 1480 } 1481 if (error == EFBIG) { 1482 m = m_defrag(*m_head, MB_DONTWAIT); 1483 if (m == NULL) { 1484 m_freem(*m_head); 1485 *m_head = NULL; 1486 return (ENOBUFS); 1487 } 1488 *m_head = m; 1489 1490 ctx.nsegs = AGE_MAXTXSEGS; 1491 ctx.segs = txsegs; 1492 error = bus_dmamap_load_mbuf(sc->age_cdata.age_tx_tag, map, 1493 *m_head, age_dmamap_buf_cb, &ctx, 1494 BUS_DMA_NOWAIT); 1495 if (error || ctx.nsegs == 0) { 1496 if (!error) { 1497 bus_dmamap_unload(sc->age_cdata.age_tx_tag, 1498 map); 1499 error = EFBIG; 1500 } 1501 m_freem(*m_head); 1502 *m_head = NULL; 1503 return (error); 1504 } 1505 } else if (error != 0) { 1506 return (error); 1507 } 1508 nsegs = ctx.nsegs; 1509 1510 if (nsegs == 0) { 1511 m_freem(*m_head); 1512 *m_head = NULL; 1513 return (EIO); 1514 } 1515 1516 /* Check descriptor overrun. */ 1517 if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) { 1518 bus_dmamap_unload(sc->age_cdata.age_tx_tag, map); 1519 return (ENOBUFS); 1520 } 1521 1522 m = *m_head; 1523 /* Configure Tx IP/TCP/UDP checksum offload. */ 1524 if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) { 1525 cflags |= AGE_TD_CSUM; 1526 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 1527 cflags |= AGE_TD_TCPCSUM; 1528 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 1529 cflags |= AGE_TD_UDPCSUM; 1530 /* Set checksum start offset. */ 1531 cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT); 1532 /* Set checksum insertion position of TCP/UDP. */ 1533 cflags |= ((poff + m->m_pkthdr.csum_data) << 1534 AGE_TD_CSUM_XSUMOFFSET_SHIFT); 1535 } 1536 1537 /* Configure VLAN hardware tag insertion. */ 1538 if ((m->m_flags & M_VLANTAG) != 0) { 1539 vtag = AGE_TX_VLAN_TAG(m->m_pkthdr.ether_vlantag); 1540 vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK); 1541 cflags |= AGE_TD_INSERT_VLAN_TAG; 1542 } 1543 1544 desc = NULL; 1545 for (i = 0; i < nsegs; i++) { 1546 desc = &sc->age_rdata.age_tx_ring[prod]; 1547 desc->addr = htole64(txsegs[i].ds_addr); 1548 desc->len = htole32(AGE_TX_BYTES(txsegs[i].ds_len) | vtag); 1549 desc->flags = htole32(cflags); 1550 sc->age_cdata.age_tx_cnt++; 1551 AGE_DESC_INC(prod, AGE_TX_RING_CNT); 1552 } 1553 /* Update producer index. */ 1554 sc->age_cdata.age_tx_prod = prod; 1555 1556 /* Set EOP on the last descriptor. */ 1557 prod = (prod + AGE_TX_RING_CNT - 1) % AGE_TX_RING_CNT; 1558 desc = &sc->age_rdata.age_tx_ring[prod]; 1559 desc->flags |= htole32(AGE_TD_EOP); 1560 1561 /* Swap dmamap of the first and the last. */ 1562 txd = &sc->age_cdata.age_txdesc[prod]; 1563 map = txd_last->tx_dmamap; 1564 txd_last->tx_dmamap = txd->tx_dmamap; 1565 txd->tx_dmamap = map; 1566 txd->tx_m = m; 1567 1568 /* Sync descriptors. */ 1569 bus_dmamap_sync(sc->age_cdata.age_tx_tag, map, BUS_DMASYNC_PREWRITE); 1570 bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag, 1571 sc->age_cdata.age_tx_ring_map, BUS_DMASYNC_PREWRITE); 1572 1573 return (0); 1574 } 1575 1576 static void 1577 age_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1578 { 1579 struct age_softc *sc = ifp->if_softc; 1580 struct mbuf *m_head; 1581 int enq; 1582 1583 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 1584 ASSERT_SERIALIZED(ifp->if_serializer); 1585 1586 if ((sc->age_flags & AGE_FLAG_LINK) == 0) { 1587 ifq_purge(&ifp->if_snd); 1588 return; 1589 } 1590 1591 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 1592 return; 1593 1594 enq = 0; 1595 while (!ifq_is_empty(&ifp->if_snd)) { 1596 m_head = ifq_dequeue(&ifp->if_snd, NULL); 1597 if (m_head == NULL) 1598 break; 1599 1600 /* 1601 * Pack the data into the transmit ring. If we 1602 * don't have room, set the OACTIVE flag and wait 1603 * for the NIC to drain the ring. 1604 */ 1605 if (age_encap(sc, &m_head)) { 1606 if (m_head == NULL) 1607 break; 1608 ifq_prepend(&ifp->if_snd, m_head); 1609 ifq_set_oactive(&ifp->if_snd); 1610 break; 1611 } 1612 enq = 1; 1613 1614 /* 1615 * If there's a BPF listener, bounce a copy of this frame 1616 * to him. 1617 */ 1618 ETHER_BPF_MTAP(ifp, m_head); 1619 } 1620 1621 if (enq) { 1622 /* Update mbox. */ 1623 AGE_COMMIT_MBOX(sc); 1624 /* Set a timeout in case the chip goes out to lunch. */ 1625 ifp->if_timer = AGE_TX_TIMEOUT; 1626 } 1627 } 1628 1629 static void 1630 age_watchdog(struct ifnet *ifp) 1631 { 1632 struct age_softc *sc = ifp->if_softc; 1633 1634 ASSERT_SERIALIZED(ifp->if_serializer); 1635 1636 if ((sc->age_flags & AGE_FLAG_LINK) == 0) { 1637 if_printf(ifp, "watchdog timeout (missed link)\n"); 1638 ifp->if_oerrors++; 1639 age_init(sc); 1640 return; 1641 } 1642 1643 if (sc->age_cdata.age_tx_cnt == 0) { 1644 if_printf(ifp, 1645 "watchdog timeout (missed Tx interrupts) -- recovering\n"); 1646 if (!ifq_is_empty(&ifp->if_snd)) 1647 if_devstart(ifp); 1648 return; 1649 } 1650 1651 if_printf(ifp, "watchdog timeout\n"); 1652 ifp->if_oerrors++; 1653 age_init(sc); 1654 if (!ifq_is_empty(&ifp->if_snd)) 1655 if_devstart(ifp); 1656 } 1657 1658 static int 1659 age_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 1660 { 1661 struct age_softc *sc = ifp->if_softc; 1662 struct ifreq *ifr; 1663 struct mii_data *mii; 1664 uint32_t reg; 1665 int error, mask; 1666 1667 ASSERT_SERIALIZED(ifp->if_serializer); 1668 1669 ifr = (struct ifreq *)data; 1670 error = 0; 1671 switch (cmd) { 1672 case SIOCSIFMTU: 1673 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > AGE_JUMBO_MTU) { 1674 error = EINVAL; 1675 } else if (ifp->if_mtu != ifr->ifr_mtu) { 1676 ifp->if_mtu = ifr->ifr_mtu; 1677 if ((ifp->if_flags & IFF_RUNNING) != 0) 1678 age_init(sc); 1679 } 1680 break; 1681 1682 case SIOCSIFFLAGS: 1683 if ((ifp->if_flags & IFF_UP) != 0) { 1684 if ((ifp->if_flags & IFF_RUNNING) != 0) { 1685 if (((ifp->if_flags ^ sc->age_if_flags) 1686 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1687 age_rxfilter(sc); 1688 } else { 1689 if ((sc->age_flags & AGE_FLAG_DETACH) == 0) 1690 age_init(sc); 1691 } 1692 } else { 1693 if ((ifp->if_flags & IFF_RUNNING) != 0) 1694 age_stop(sc); 1695 } 1696 sc->age_if_flags = ifp->if_flags; 1697 break; 1698 1699 case SIOCADDMULTI: 1700 case SIOCDELMULTI: 1701 if ((ifp->if_flags & IFF_RUNNING) != 0) 1702 age_rxfilter(sc); 1703 break; 1704 1705 case SIOCSIFMEDIA: 1706 case SIOCGIFMEDIA: 1707 mii = device_get_softc(sc->age_miibus); 1708 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1709 break; 1710 1711 case SIOCSIFCAP: 1712 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1713 1714 if ((mask & IFCAP_TXCSUM) != 0 && 1715 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 1716 ifp->if_capenable ^= IFCAP_TXCSUM; 1717 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1718 ifp->if_hwassist |= AGE_CSUM_FEATURES; 1719 else 1720 ifp->if_hwassist &= ~AGE_CSUM_FEATURES; 1721 } 1722 1723 if ((mask & IFCAP_RXCSUM) != 0 && 1724 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { 1725 ifp->if_capenable ^= IFCAP_RXCSUM; 1726 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1727 reg &= ~MAC_CFG_RXCSUM_ENB; 1728 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 1729 reg |= MAC_CFG_RXCSUM_ENB; 1730 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 1731 } 1732 1733 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 1734 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 1735 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1736 age_rxvlan(sc); 1737 } 1738 break; 1739 1740 default: 1741 error = ether_ioctl(ifp, cmd, data); 1742 break; 1743 } 1744 return (error); 1745 } 1746 1747 static void 1748 age_mac_config(struct age_softc *sc) 1749 { 1750 struct mii_data *mii = device_get_softc(sc->age_miibus); 1751 uint32_t reg; 1752 1753 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1754 reg &= ~MAC_CFG_FULL_DUPLEX; 1755 reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC); 1756 reg &= ~MAC_CFG_SPEED_MASK; 1757 1758 /* Reprogram MAC with resolved speed/duplex. */ 1759 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1760 case IFM_10_T: 1761 case IFM_100_TX: 1762 reg |= MAC_CFG_SPEED_10_100; 1763 break; 1764 case IFM_1000_T: 1765 reg |= MAC_CFG_SPEED_1000; 1766 break; 1767 } 1768 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1769 reg |= MAC_CFG_FULL_DUPLEX; 1770 #ifdef notyet 1771 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1772 reg |= MAC_CFG_TX_FC; 1773 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1774 reg |= MAC_CFG_RX_FC; 1775 #endif 1776 } 1777 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 1778 } 1779 1780 static void 1781 age_stats_update(struct age_softc *sc) 1782 { 1783 struct ifnet *ifp = &sc->arpcom.ac_if; 1784 struct age_stats *stat; 1785 struct smb *smb; 1786 1787 stat = &sc->age_stat; 1788 1789 bus_dmamap_sync(sc->age_cdata.age_smb_block_tag, 1790 sc->age_cdata.age_smb_block_map, BUS_DMASYNC_POSTREAD); 1791 1792 smb = sc->age_rdata.age_smb_block; 1793 if (smb->updated == 0) 1794 return; 1795 1796 /* Rx stats. */ 1797 stat->rx_frames += smb->rx_frames; 1798 stat->rx_bcast_frames += smb->rx_bcast_frames; 1799 stat->rx_mcast_frames += smb->rx_mcast_frames; 1800 stat->rx_pause_frames += smb->rx_pause_frames; 1801 stat->rx_control_frames += smb->rx_control_frames; 1802 stat->rx_crcerrs += smb->rx_crcerrs; 1803 stat->rx_lenerrs += smb->rx_lenerrs; 1804 stat->rx_bytes += smb->rx_bytes; 1805 stat->rx_runts += smb->rx_runts; 1806 stat->rx_fragments += smb->rx_fragments; 1807 stat->rx_pkts_64 += smb->rx_pkts_64; 1808 stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 1809 stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 1810 stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 1811 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 1812 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 1813 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 1814 stat->rx_pkts_truncated += smb->rx_pkts_truncated; 1815 stat->rx_fifo_oflows += smb->rx_fifo_oflows; 1816 stat->rx_desc_oflows += smb->rx_desc_oflows; 1817 stat->rx_alignerrs += smb->rx_alignerrs; 1818 stat->rx_bcast_bytes += smb->rx_bcast_bytes; 1819 stat->rx_mcast_bytes += smb->rx_mcast_bytes; 1820 stat->rx_pkts_filtered += smb->rx_pkts_filtered; 1821 1822 /* Tx stats. */ 1823 stat->tx_frames += smb->tx_frames; 1824 stat->tx_bcast_frames += smb->tx_bcast_frames; 1825 stat->tx_mcast_frames += smb->tx_mcast_frames; 1826 stat->tx_pause_frames += smb->tx_pause_frames; 1827 stat->tx_excess_defer += smb->tx_excess_defer; 1828 stat->tx_control_frames += smb->tx_control_frames; 1829 stat->tx_deferred += smb->tx_deferred; 1830 stat->tx_bytes += smb->tx_bytes; 1831 stat->tx_pkts_64 += smb->tx_pkts_64; 1832 stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 1833 stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 1834 stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 1835 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 1836 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 1837 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 1838 stat->tx_single_colls += smb->tx_single_colls; 1839 stat->tx_multi_colls += smb->tx_multi_colls; 1840 stat->tx_late_colls += smb->tx_late_colls; 1841 stat->tx_excess_colls += smb->tx_excess_colls; 1842 stat->tx_underrun += smb->tx_underrun; 1843 stat->tx_desc_underrun += smb->tx_desc_underrun; 1844 stat->tx_lenerrs += smb->tx_lenerrs; 1845 stat->tx_pkts_truncated += smb->tx_pkts_truncated; 1846 stat->tx_bcast_bytes += smb->tx_bcast_bytes; 1847 stat->tx_mcast_bytes += smb->tx_mcast_bytes; 1848 1849 /* Update counters in ifnet. */ 1850 ifp->if_opackets += smb->tx_frames; 1851 1852 ifp->if_collisions += smb->tx_single_colls + 1853 smb->tx_multi_colls + smb->tx_late_colls + 1854 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT; 1855 1856 ifp->if_oerrors += smb->tx_excess_colls + 1857 smb->tx_late_colls + smb->tx_underrun + 1858 smb->tx_pkts_truncated; 1859 1860 ifp->if_ipackets += smb->rx_frames; 1861 1862 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs + 1863 smb->rx_runts + smb->rx_pkts_truncated + 1864 smb->rx_fifo_oflows + smb->rx_desc_oflows + 1865 smb->rx_alignerrs; 1866 1867 /* Update done, clear. */ 1868 smb->updated = 0; 1869 1870 bus_dmamap_sync(sc->age_cdata.age_smb_block_tag, 1871 sc->age_cdata.age_smb_block_map, BUS_DMASYNC_PREWRITE); 1872 } 1873 1874 static void 1875 age_intr(void *xsc) 1876 { 1877 struct age_softc *sc = xsc; 1878 struct ifnet *ifp = &sc->arpcom.ac_if; 1879 struct cmb *cmb; 1880 uint32_t status; 1881 1882 ASSERT_SERIALIZED(ifp->if_serializer); 1883 1884 status = CSR_READ_4(sc, AGE_INTR_STATUS); 1885 if (status == 0 || (status & AGE_INTRS) == 0) 1886 return; 1887 1888 /* Disable and acknowledge interrupts. */ 1889 CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT); 1890 1891 cmb = sc->age_rdata.age_cmb_block; 1892 1893 bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag, 1894 sc->age_cdata.age_cmb_block_map, BUS_DMASYNC_POSTREAD); 1895 status = le32toh(cmb->intr_status); 1896 if ((status & AGE_INTRS) == 0) 1897 goto done; 1898 again: 1899 sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >> 1900 TPD_CONS_SHIFT; 1901 sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >> 1902 RRD_PROD_SHIFT; 1903 1904 /* Let hardware know CMB was served. */ 1905 cmb->intr_status = 0; 1906 bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag, 1907 sc->age_cdata.age_cmb_block_map, BUS_DMASYNC_PREWRITE); 1908 1909 #if 0 1910 kprintf("INTR: 0x%08x\n", status); 1911 status &= ~INTR_DIS_DMA; 1912 CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT); 1913 #endif 1914 1915 if ((ifp->if_flags & IFF_RUNNING) != 0) { 1916 if ((status & INTR_CMB_RX) != 0) 1917 age_rxintr(sc, sc->age_rr_prod); 1918 1919 if ((status & INTR_CMB_TX) != 0) 1920 age_txintr(sc, sc->age_tpd_cons); 1921 1922 if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) != 0) { 1923 if ((status & INTR_DMA_RD_TO_RST) != 0) 1924 device_printf(sc->age_dev, 1925 "DMA read error! -- resetting\n"); 1926 if ((status & INTR_DMA_WR_TO_RST) != 0) 1927 device_printf(sc->age_dev, 1928 "DMA write error! -- resetting\n"); 1929 age_init(sc); 1930 /* XXX return? */ 1931 } 1932 1933 if (!ifq_is_empty(&ifp->if_snd)) 1934 if_devstart(ifp); 1935 1936 if ((status & INTR_SMB) != 0) 1937 age_stats_update(sc); 1938 } 1939 1940 /* Check whether CMB was updated while serving Tx/Rx/SMB handler. */ 1941 bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag, 1942 sc->age_cdata.age_cmb_block_map, BUS_DMASYNC_POSTREAD); 1943 status = le32toh(cmb->intr_status); 1944 if ((status & AGE_INTRS) != 0) 1945 goto again; 1946 done: 1947 /* Re-enable interrupts. */ 1948 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); 1949 } 1950 1951 static void 1952 age_txintr(struct age_softc *sc, int tpd_cons) 1953 { 1954 struct ifnet *ifp = &sc->arpcom.ac_if; 1955 struct age_txdesc *txd; 1956 int cons, prog; 1957 1958 bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag, 1959 sc->age_cdata.age_tx_ring_map, BUS_DMASYNC_POSTREAD); 1960 1961 /* 1962 * Go through our Tx list and free mbufs for those 1963 * frames which have been transmitted. 1964 */ 1965 cons = sc->age_cdata.age_tx_cons; 1966 for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) { 1967 if (sc->age_cdata.age_tx_cnt <= 0) 1968 break; 1969 prog++; 1970 ifq_clr_oactive(&ifp->if_snd); 1971 sc->age_cdata.age_tx_cnt--; 1972 txd = &sc->age_cdata.age_txdesc[cons]; 1973 /* 1974 * Clear Tx descriptors, it's not required but would 1975 * help debugging in case of Tx issues. 1976 */ 1977 txd->tx_desc->addr = 0; 1978 txd->tx_desc->len = 0; 1979 txd->tx_desc->flags = 0; 1980 1981 if (txd->tx_m == NULL) 1982 continue; 1983 /* Reclaim transmitted mbufs. */ 1984 bus_dmamap_unload(sc->age_cdata.age_tx_tag, txd->tx_dmamap); 1985 m_freem(txd->tx_m); 1986 txd->tx_m = NULL; 1987 } 1988 1989 if (prog > 0) { 1990 sc->age_cdata.age_tx_cons = cons; 1991 1992 /* 1993 * Unarm watchdog timer only when there are no pending 1994 * Tx descriptors in queue. 1995 */ 1996 if (sc->age_cdata.age_tx_cnt == 0) 1997 ifp->if_timer = 0; 1998 bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag, 1999 sc->age_cdata.age_tx_ring_map, BUS_DMASYNC_PREWRITE); 2000 } 2001 } 2002 2003 /* Receive a frame. */ 2004 static void 2005 age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd) 2006 { 2007 struct ifnet *ifp = &sc->arpcom.ac_if; 2008 struct age_rxdesc *rxd; 2009 struct rx_desc *desc; 2010 struct mbuf *mp, *m; 2011 uint32_t status, index, vtag; 2012 int count, nsegs, pktlen; 2013 int rx_cons; 2014 2015 status = le32toh(rxrd->flags); 2016 index = le32toh(rxrd->index); 2017 rx_cons = AGE_RX_CONS(index); 2018 nsegs = AGE_RX_NSEGS(index); 2019 2020 sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len)); 2021 if ((status & AGE_RRD_ERROR) != 0 && 2022 (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE | 2023 AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) { 2024 /* 2025 * We want to pass the following frames to upper 2026 * layer regardless of error status of Rx return 2027 * ring. 2028 * 2029 * o IP/TCP/UDP checksum is bad. 2030 * o frame length and protocol specific length 2031 * does not match. 2032 */ 2033 sc->age_cdata.age_rx_cons += nsegs; 2034 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; 2035 return; 2036 } 2037 2038 pktlen = 0; 2039 for (count = 0; count < nsegs; count++, 2040 AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) { 2041 rxd = &sc->age_cdata.age_rxdesc[rx_cons]; 2042 mp = rxd->rx_m; 2043 desc = rxd->rx_desc; 2044 /* Add a new receive buffer to the ring. */ 2045 if (age_newbuf(sc, rxd, 0) != 0) { 2046 ifp->if_iqdrops++; 2047 /* Reuse Rx buffers. */ 2048 if (sc->age_cdata.age_rxhead != NULL) { 2049 m_freem(sc->age_cdata.age_rxhead); 2050 AGE_RXCHAIN_RESET(sc); 2051 } 2052 break; 2053 } 2054 2055 /* The length of the first mbuf is computed last. */ 2056 if (count != 0) { 2057 mp->m_len = AGE_RX_BYTES(le32toh(desc->len)); 2058 pktlen += mp->m_len; 2059 } 2060 2061 /* Chain received mbufs. */ 2062 if (sc->age_cdata.age_rxhead == NULL) { 2063 sc->age_cdata.age_rxhead = mp; 2064 sc->age_cdata.age_rxtail = mp; 2065 } else { 2066 mp->m_flags &= ~M_PKTHDR; 2067 sc->age_cdata.age_rxprev_tail = 2068 sc->age_cdata.age_rxtail; 2069 sc->age_cdata.age_rxtail->m_next = mp; 2070 sc->age_cdata.age_rxtail = mp; 2071 } 2072 2073 if (count == nsegs - 1) { 2074 /* 2075 * It seems that L1 controller has no way 2076 * to tell hardware to strip CRC bytes. 2077 */ 2078 sc->age_cdata.age_rxlen -= ETHER_CRC_LEN; 2079 if (nsegs > 1) { 2080 /* Remove the CRC bytes in chained mbufs. */ 2081 pktlen -= ETHER_CRC_LEN; 2082 if (mp->m_len <= ETHER_CRC_LEN) { 2083 sc->age_cdata.age_rxtail = 2084 sc->age_cdata.age_rxprev_tail; 2085 sc->age_cdata.age_rxtail->m_len -= 2086 (ETHER_CRC_LEN - mp->m_len); 2087 sc->age_cdata.age_rxtail->m_next = NULL; 2088 m_freem(mp); 2089 } else { 2090 mp->m_len -= ETHER_CRC_LEN; 2091 } 2092 } 2093 2094 m = sc->age_cdata.age_rxhead; 2095 m->m_flags |= M_PKTHDR; 2096 m->m_pkthdr.rcvif = ifp; 2097 m->m_pkthdr.len = sc->age_cdata.age_rxlen; 2098 /* Set the first mbuf length. */ 2099 m->m_len = sc->age_cdata.age_rxlen - pktlen; 2100 2101 /* 2102 * Set checksum information. 2103 * It seems that L1 controller can compute partial 2104 * checksum. The partial checksum value can be used 2105 * to accelerate checksum computation for fragmented 2106 * TCP/UDP packets. Upper network stack already 2107 * takes advantage of the partial checksum value in 2108 * IP reassembly stage. But I'm not sure the 2109 * correctness of the partial hardware checksum 2110 * assistance due to lack of data sheet. If it is 2111 * proven to work on L1 I'll enable it. 2112 */ 2113 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 2114 (status & AGE_RRD_IPV4) != 0) { 2115 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2116 if ((status & AGE_RRD_IPCSUM_NOK) == 0) 2117 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2118 if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) && 2119 (status & AGE_RRD_TCP_UDPCSUM_NOK) == 0) { 2120 m->m_pkthdr.csum_flags |= 2121 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2122 m->m_pkthdr.csum_data = 0xffff; 2123 } 2124 /* 2125 * Don't mark bad checksum for TCP/UDP frames 2126 * as fragmented frames may always have set 2127 * bad checksummed bit of descriptor status. 2128 */ 2129 } 2130 2131 /* Check for VLAN tagged frames. */ 2132 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 2133 (status & AGE_RRD_VLAN) != 0) { 2134 vtag = AGE_RX_VLAN(le32toh(rxrd->vtags)); 2135 m->m_pkthdr.ether_vlantag = 2136 AGE_RX_VLAN_TAG(vtag); 2137 m->m_flags |= M_VLANTAG; 2138 } 2139 2140 /* Pass it on. */ 2141 ifp->if_input(ifp, m); 2142 2143 /* Reset mbuf chains. */ 2144 AGE_RXCHAIN_RESET(sc); 2145 } 2146 } 2147 2148 if (count != nsegs) { 2149 sc->age_cdata.age_rx_cons += nsegs; 2150 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; 2151 } else { 2152 sc->age_cdata.age_rx_cons = rx_cons; 2153 } 2154 } 2155 2156 static void 2157 age_rxintr(struct age_softc *sc, int rr_prod) 2158 { 2159 struct rx_rdesc *rxrd; 2160 int rr_cons, nsegs, pktlen, prog; 2161 2162 rr_cons = sc->age_cdata.age_rr_cons; 2163 if (rr_cons == rr_prod) 2164 return; 2165 2166 bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag, 2167 sc->age_cdata.age_rr_ring_map, BUS_DMASYNC_POSTREAD); 2168 2169 for (prog = 0; rr_cons != rr_prod; prog++) { 2170 rxrd = &sc->age_rdata.age_rr_ring[rr_cons]; 2171 nsegs = AGE_RX_NSEGS(le32toh(rxrd->index)); 2172 if (nsegs == 0) 2173 break; 2174 2175 /* 2176 * Check number of segments against received bytes. 2177 * Non-matching value would indicate that hardware 2178 * is still trying to update Rx return descriptors. 2179 * I'm not sure whether this check is really needed. 2180 */ 2181 pktlen = AGE_RX_BYTES(le32toh(rxrd->len)); 2182 if (nsegs != ((pktlen + (MCLBYTES - ETHER_ALIGN - 1)) / 2183 (MCLBYTES - ETHER_ALIGN))) 2184 break; 2185 2186 /* Received a frame. */ 2187 age_rxeof(sc, rxrd); 2188 2189 /* Clear return ring. */ 2190 rxrd->index = 0; 2191 AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT); 2192 } 2193 2194 if (prog > 0) { 2195 /* Update the consumer index. */ 2196 sc->age_cdata.age_rr_cons = rr_cons; 2197 2198 /* Sync descriptors. */ 2199 bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag, 2200 sc->age_cdata.age_rr_ring_map, BUS_DMASYNC_PREWRITE); 2201 2202 /* Notify hardware availability of new Rx buffers. */ 2203 AGE_COMMIT_MBOX(sc); 2204 } 2205 } 2206 2207 static void 2208 age_tick(void *xsc) 2209 { 2210 struct age_softc *sc = xsc; 2211 struct ifnet *ifp = &sc->arpcom.ac_if; 2212 struct mii_data *mii = device_get_softc(sc->age_miibus); 2213 2214 lwkt_serialize_enter(ifp->if_serializer); 2215 2216 mii_tick(mii); 2217 callout_reset(&sc->age_tick_ch, hz, age_tick, sc); 2218 2219 lwkt_serialize_exit(ifp->if_serializer); 2220 } 2221 2222 static void 2223 age_reset(struct age_softc *sc) 2224 { 2225 uint32_t reg; 2226 int i; 2227 2228 CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET); 2229 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2230 DELAY(1); 2231 if ((CSR_READ_4(sc, AGE_MASTER_CFG) & MASTER_RESET) == 0) 2232 break; 2233 } 2234 if (i == 0) 2235 device_printf(sc->age_dev, "master reset timeout!\n"); 2236 2237 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2238 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) 2239 break; 2240 DELAY(10); 2241 } 2242 if (i == 0) 2243 device_printf(sc->age_dev, "reset timeout(0x%08x)!\n", reg); 2244 2245 /* Initialize PCIe module. From Linux. */ 2246 CSR_WRITE_4(sc, 0x12FC, 0x6500); 2247 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 2248 } 2249 2250 static void 2251 age_init(void *xsc) 2252 { 2253 struct age_softc *sc = xsc; 2254 struct ifnet *ifp = &sc->arpcom.ac_if; 2255 struct mii_data *mii; 2256 uint8_t eaddr[ETHER_ADDR_LEN]; 2257 bus_addr_t paddr; 2258 uint32_t reg, fsize; 2259 uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo; 2260 int error; 2261 2262 ASSERT_SERIALIZED(ifp->if_serializer); 2263 2264 mii = device_get_softc(sc->age_miibus); 2265 2266 /* 2267 * Cancel any pending I/O. 2268 */ 2269 age_stop(sc); 2270 2271 /* 2272 * Reset the chip to a known state. 2273 */ 2274 age_reset(sc); 2275 2276 /* Initialize descriptors. */ 2277 error = age_init_rx_ring(sc); 2278 if (error != 0) { 2279 device_printf(sc->age_dev, "no memory for Rx buffers.\n"); 2280 age_stop(sc); 2281 return; 2282 } 2283 age_init_rr_ring(sc); 2284 age_init_tx_ring(sc); 2285 age_init_cmb_block(sc); 2286 age_init_smb_block(sc); 2287 2288 /* Reprogram the station address. */ 2289 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 2290 CSR_WRITE_4(sc, AGE_PAR0, 2291 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 2292 CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]); 2293 2294 /* Set descriptor base addresses. */ 2295 paddr = sc->age_rdata.age_tx_ring_paddr; 2296 CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr)); 2297 paddr = sc->age_rdata.age_rx_ring_paddr; 2298 CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr)); 2299 paddr = sc->age_rdata.age_rr_ring_paddr; 2300 CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr)); 2301 paddr = sc->age_rdata.age_tx_ring_paddr; 2302 CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr)); 2303 paddr = sc->age_rdata.age_cmb_block_paddr; 2304 CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr)); 2305 paddr = sc->age_rdata.age_smb_block_paddr; 2306 CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr)); 2307 2308 /* Set Rx/Rx return descriptor counter. */ 2309 CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT, 2310 ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) & 2311 DESC_RRD_CNT_MASK) | 2312 ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK)); 2313 2314 /* Set Tx descriptor counter. */ 2315 CSR_WRITE_4(sc, AGE_DESC_TPD_CNT, 2316 (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK); 2317 2318 /* Tell hardware that we're ready to load descriptors. */ 2319 CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD); 2320 2321 /* 2322 * Initialize mailbox register. 2323 * Updated producer/consumer index information is exchanged 2324 * through this mailbox register. However Tx producer and 2325 * Rx return consumer/Rx producer are all shared such that 2326 * it's hard to separate code path between Tx and Rx without 2327 * locking. If L1 hardware have a separate mail box register 2328 * for Tx and Rx consumer/producer management we could have 2329 * indepent Tx/Rx handler which in turn Rx handler could have 2330 * been run without any locking. 2331 */ 2332 AGE_COMMIT_MBOX(sc); 2333 2334 /* Configure IPG/IFG parameters. */ 2335 CSR_WRITE_4(sc, AGE_IPG_IFG_CFG, 2336 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) | 2337 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) | 2338 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) | 2339 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK)); 2340 2341 /* Set parameters for half-duplex media. */ 2342 CSR_WRITE_4(sc, AGE_HDPX_CFG, 2343 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 2344 HDPX_CFG_LCOL_MASK) | 2345 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 2346 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 2347 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 2348 HDPX_CFG_ABEBT_MASK) | 2349 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 2350 HDPX_CFG_JAMIPG_MASK)); 2351 2352 /* Configure interrupt moderation timer. */ 2353 CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod)); 2354 reg = CSR_READ_4(sc, AGE_MASTER_CFG); 2355 reg &= ~MASTER_MTIMER_ENB; 2356 if (AGE_USECS(sc->age_int_mod) == 0) 2357 reg &= ~MASTER_ITIMER_ENB; 2358 else 2359 reg |= MASTER_ITIMER_ENB; 2360 CSR_WRITE_4(sc, AGE_MASTER_CFG, reg); 2361 if (bootverbose) 2362 device_printf(sc->age_dev, "interrupt moderation is %d us.\n", 2363 sc->age_int_mod); 2364 CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000)); 2365 2366 /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */ 2367 if (ifp->if_mtu < ETHERMTU) 2368 sc->age_max_frame_size = ETHERMTU; 2369 else 2370 sc->age_max_frame_size = ifp->if_mtu; 2371 sc->age_max_frame_size += ETHER_HDR_LEN + 2372 sizeof(struct ether_vlan_header) + ETHER_CRC_LEN; 2373 CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size); 2374 2375 /* Configure jumbo frame. */ 2376 fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t)); 2377 CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG, 2378 (((fsize / sizeof(uint64_t)) << 2379 RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) | 2380 ((RXQ_JUMBO_CFG_LKAH_DEFAULT << 2381 RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) | 2382 ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) & 2383 RXQ_JUMBO_CFG_RRD_TIMER_MASK)); 2384 2385 /* Configure flow-control parameters. From Linux. */ 2386 if ((sc->age_flags & AGE_FLAG_PCIE) != 0) { 2387 /* 2388 * Magic workaround for old-L1. 2389 * Don't know which hw revision requires this magic. 2390 */ 2391 CSR_WRITE_4(sc, 0x12FC, 0x6500); 2392 /* 2393 * Another magic workaround for flow-control mode 2394 * change. From Linux. 2395 */ 2396 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 2397 } 2398 /* 2399 * TODO 2400 * Should understand pause parameter relationships between FIFO 2401 * size and number of Rx descriptors and Rx return descriptors. 2402 * 2403 * Magic parameters came from Linux. 2404 */ 2405 switch (sc->age_chip_rev) { 2406 case 0x8001: 2407 case 0x9001: 2408 case 0x9002: 2409 case 0x9003: 2410 rxf_hi = AGE_RX_RING_CNT / 16; 2411 rxf_lo = (AGE_RX_RING_CNT * 7) / 8; 2412 rrd_hi = (AGE_RR_RING_CNT * 7) / 8; 2413 rrd_lo = AGE_RR_RING_CNT / 16; 2414 break; 2415 default: 2416 reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN); 2417 rxf_lo = reg / 16; 2418 if (rxf_lo < 192) 2419 rxf_lo = 192; 2420 rxf_hi = (reg * 7) / 8; 2421 if (rxf_hi < rxf_lo) 2422 rxf_hi = rxf_lo + 16; 2423 reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN); 2424 rrd_lo = reg / 8; 2425 rrd_hi = (reg * 7) / 8; 2426 if (rrd_lo < 2) 2427 rrd_lo = 2; 2428 if (rrd_hi < rrd_lo) 2429 rrd_hi = rrd_lo + 3; 2430 break; 2431 } 2432 CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH, 2433 ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) & 2434 RXQ_FIFO_PAUSE_THRESH_LO_MASK) | 2435 ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) & 2436 RXQ_FIFO_PAUSE_THRESH_HI_MASK)); 2437 CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH, 2438 ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) & 2439 RXQ_RRD_PAUSE_THRESH_LO_MASK) | 2440 ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) & 2441 RXQ_RRD_PAUSE_THRESH_HI_MASK)); 2442 2443 /* Configure RxQ. */ 2444 CSR_WRITE_4(sc, AGE_RXQ_CFG, 2445 ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & 2446 RXQ_CFG_RD_BURST_MASK) | 2447 ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT << 2448 RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) | 2449 ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT << 2450 RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) | 2451 RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB); 2452 2453 /* Configure TxQ. */ 2454 CSR_WRITE_4(sc, AGE_TXQ_CFG, 2455 ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) & 2456 TXQ_CFG_TPD_BURST_MASK) | 2457 ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) & 2458 TXQ_CFG_TX_FIFO_BURST_MASK) | 2459 ((TXQ_CFG_TPD_FETCH_DEFAULT << 2460 TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) | 2461 TXQ_CFG_ENB); 2462 2463 CSR_WRITE_4(sc, AGE_TX_JUMBO_TPD_TH_IPG, 2464 (((fsize / sizeof(uint64_t) << TX_JUMBO_TPD_TH_SHIFT)) & 2465 TX_JUMBO_TPD_TH_MASK) | 2466 ((TX_JUMBO_TPD_IPG_DEFAULT << TX_JUMBO_TPD_IPG_SHIFT) & 2467 TX_JUMBO_TPD_IPG_MASK)); 2468 2469 /* Configure DMA parameters. */ 2470 CSR_WRITE_4(sc, AGE_DMA_CFG, 2471 DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 | 2472 sc->age_dma_rd_burst | DMA_CFG_RD_ENB | 2473 sc->age_dma_wr_burst | DMA_CFG_WR_ENB); 2474 2475 /* Configure CMB DMA write threshold. */ 2476 CSR_WRITE_4(sc, AGE_CMB_WR_THRESH, 2477 ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) & 2478 CMB_WR_THRESH_RRD_MASK) | 2479 ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) & 2480 CMB_WR_THRESH_TPD_MASK)); 2481 2482 /* Set CMB/SMB timer and enable them. */ 2483 CSR_WRITE_4(sc, AGE_CMB_WR_TIMER, 2484 ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) | 2485 ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK)); 2486 2487 /* Request SMB updates for every seconds. */ 2488 CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000)); 2489 CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB); 2490 2491 /* 2492 * Disable all WOL bits as WOL can interfere normal Rx 2493 * operation. 2494 */ 2495 CSR_WRITE_4(sc, AGE_WOL_CFG, 0); 2496 2497 /* 2498 * Configure Tx/Rx MACs. 2499 * - Auto-padding for short frames. 2500 * - Enable CRC generation. 2501 * Start with full-duplex/1000Mbps media. Actual reconfiguration 2502 * of MAC is followed after link establishment. 2503 */ 2504 CSR_WRITE_4(sc, AGE_MAC_CFG, 2505 MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | 2506 MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 | 2507 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 2508 MAC_CFG_PREAMBLE_MASK)); 2509 2510 /* Set up the receive filter. */ 2511 age_rxfilter(sc); 2512 age_rxvlan(sc); 2513 2514 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2515 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2516 reg |= MAC_CFG_RXCSUM_ENB; 2517 2518 /* Ack all pending interrupts and clear it. */ 2519 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); 2520 CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS); 2521 2522 /* Finally enable Tx/Rx MAC. */ 2523 CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); 2524 2525 sc->age_flags &= ~AGE_FLAG_LINK; 2526 /* Switch to the current media. */ 2527 mii_mediachg(mii); 2528 2529 callout_reset(&sc->age_tick_ch, hz, age_tick, sc); 2530 2531 ifp->if_flags |= IFF_RUNNING; 2532 ifq_clr_oactive(&ifp->if_snd); 2533 } 2534 2535 static void 2536 age_stop(struct age_softc *sc) 2537 { 2538 struct ifnet *ifp = &sc->arpcom.ac_if; 2539 struct age_txdesc *txd; 2540 struct age_rxdesc *rxd; 2541 uint32_t reg; 2542 int i; 2543 2544 ASSERT_SERIALIZED(ifp->if_serializer); 2545 2546 /* 2547 * Mark the interface down and cancel the watchdog timer. 2548 */ 2549 ifp->if_flags &= ~IFF_RUNNING; 2550 ifq_clr_oactive(&ifp->if_snd); 2551 ifp->if_timer = 0; 2552 2553 sc->age_flags &= ~AGE_FLAG_LINK; 2554 callout_stop(&sc->age_tick_ch); 2555 2556 /* 2557 * Disable interrupts. 2558 */ 2559 CSR_WRITE_4(sc, AGE_INTR_MASK, 0); 2560 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF); 2561 2562 /* Stop CMB/SMB updates. */ 2563 CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0); 2564 2565 /* Stop Rx/Tx MAC. */ 2566 age_stop_rxmac(sc); 2567 age_stop_txmac(sc); 2568 2569 /* Stop DMA. */ 2570 CSR_WRITE_4(sc, AGE_DMA_CFG, 2571 CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB)); 2572 2573 /* Stop TxQ/RxQ. */ 2574 CSR_WRITE_4(sc, AGE_TXQ_CFG, 2575 CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB); 2576 CSR_WRITE_4(sc, AGE_RXQ_CFG, 2577 CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB); 2578 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2579 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) 2580 break; 2581 DELAY(10); 2582 } 2583 if (i == 0) 2584 device_printf(sc->age_dev, 2585 "stopping Rx/Tx MACs timed out(0x%08x)!\n", reg); 2586 2587 /* Reclaim Rx buffers that have been processed. */ 2588 if (sc->age_cdata.age_rxhead != NULL) 2589 m_freem(sc->age_cdata.age_rxhead); 2590 AGE_RXCHAIN_RESET(sc); 2591 2592 /* 2593 * Free RX and TX mbufs still in the queues. 2594 */ 2595 for (i = 0; i < AGE_RX_RING_CNT; i++) { 2596 rxd = &sc->age_cdata.age_rxdesc[i]; 2597 if (rxd->rx_m != NULL) { 2598 bus_dmamap_unload(sc->age_cdata.age_rx_tag, 2599 rxd->rx_dmamap); 2600 m_freem(rxd->rx_m); 2601 rxd->rx_m = NULL; 2602 } 2603 } 2604 for (i = 0; i < AGE_TX_RING_CNT; i++) { 2605 txd = &sc->age_cdata.age_txdesc[i]; 2606 if (txd->tx_m != NULL) { 2607 bus_dmamap_unload(sc->age_cdata.age_tx_tag, 2608 txd->tx_dmamap); 2609 m_freem(txd->tx_m); 2610 txd->tx_m = NULL; 2611 } 2612 } 2613 } 2614 2615 static void 2616 age_stop_txmac(struct age_softc *sc) 2617 { 2618 uint32_t reg; 2619 int i; 2620 2621 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2622 if ((reg & MAC_CFG_TX_ENB) != 0) { 2623 reg &= ~MAC_CFG_TX_ENB; 2624 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2625 } 2626 /* Stop Tx DMA engine. */ 2627 reg = CSR_READ_4(sc, AGE_DMA_CFG); 2628 if ((reg & DMA_CFG_RD_ENB) != 0) { 2629 reg &= ~DMA_CFG_RD_ENB; 2630 CSR_WRITE_4(sc, AGE_DMA_CFG, reg); 2631 } 2632 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2633 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & 2634 (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0) 2635 break; 2636 DELAY(10); 2637 } 2638 if (i == 0) 2639 device_printf(sc->age_dev, "stopping TxMAC timeout!\n"); 2640 } 2641 2642 static void 2643 age_stop_rxmac(struct age_softc *sc) 2644 { 2645 uint32_t reg; 2646 int i; 2647 2648 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2649 if ((reg & MAC_CFG_RX_ENB) != 0) { 2650 reg &= ~MAC_CFG_RX_ENB; 2651 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2652 } 2653 /* Stop Rx DMA engine. */ 2654 reg = CSR_READ_4(sc, AGE_DMA_CFG); 2655 if ((reg & DMA_CFG_WR_ENB) != 0) { 2656 reg &= ~DMA_CFG_WR_ENB; 2657 CSR_WRITE_4(sc, AGE_DMA_CFG, reg); 2658 } 2659 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2660 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & 2661 (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0) 2662 break; 2663 DELAY(10); 2664 } 2665 if (i == 0) 2666 device_printf(sc->age_dev, "stopping RxMAC timeout!\n"); 2667 } 2668 2669 static void 2670 age_init_tx_ring(struct age_softc *sc) 2671 { 2672 struct age_ring_data *rd; 2673 struct age_txdesc *txd; 2674 int i; 2675 2676 sc->age_cdata.age_tx_prod = 0; 2677 sc->age_cdata.age_tx_cons = 0; 2678 sc->age_cdata.age_tx_cnt = 0; 2679 2680 rd = &sc->age_rdata; 2681 bzero(rd->age_tx_ring, AGE_TX_RING_SZ); 2682 for (i = 0; i < AGE_TX_RING_CNT; i++) { 2683 txd = &sc->age_cdata.age_txdesc[i]; 2684 txd->tx_desc = &rd->age_tx_ring[i]; 2685 txd->tx_m = NULL; 2686 } 2687 2688 bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag, 2689 sc->age_cdata.age_tx_ring_map, BUS_DMASYNC_PREWRITE); 2690 } 2691 2692 static int 2693 age_init_rx_ring(struct age_softc *sc) 2694 { 2695 struct age_ring_data *rd; 2696 struct age_rxdesc *rxd; 2697 int i; 2698 2699 sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1; 2700 rd = &sc->age_rdata; 2701 bzero(rd->age_rx_ring, AGE_RX_RING_SZ); 2702 for (i = 0; i < AGE_RX_RING_CNT; i++) { 2703 rxd = &sc->age_cdata.age_rxdesc[i]; 2704 rxd->rx_m = NULL; 2705 rxd->rx_desc = &rd->age_rx_ring[i]; 2706 if (age_newbuf(sc, rxd, 1) != 0) 2707 return (ENOBUFS); 2708 } 2709 2710 bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag, 2711 sc->age_cdata.age_rx_ring_map, BUS_DMASYNC_PREWRITE); 2712 2713 return (0); 2714 } 2715 2716 static void 2717 age_init_rr_ring(struct age_softc *sc) 2718 { 2719 struct age_ring_data *rd; 2720 2721 sc->age_cdata.age_rr_cons = 0; 2722 AGE_RXCHAIN_RESET(sc); 2723 2724 rd = &sc->age_rdata; 2725 bzero(rd->age_rr_ring, AGE_RR_RING_SZ); 2726 bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag, 2727 sc->age_cdata.age_rr_ring_map, BUS_DMASYNC_PREWRITE); 2728 } 2729 2730 static void 2731 age_init_cmb_block(struct age_softc *sc) 2732 { 2733 struct age_ring_data *rd; 2734 2735 rd = &sc->age_rdata; 2736 bzero(rd->age_cmb_block, AGE_CMB_BLOCK_SZ); 2737 bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag, 2738 sc->age_cdata.age_cmb_block_map, BUS_DMASYNC_PREWRITE); 2739 } 2740 2741 static void 2742 age_init_smb_block(struct age_softc *sc) 2743 { 2744 struct age_ring_data *rd; 2745 2746 rd = &sc->age_rdata; 2747 bzero(rd->age_smb_block, AGE_SMB_BLOCK_SZ); 2748 bus_dmamap_sync(sc->age_cdata.age_smb_block_tag, 2749 sc->age_cdata.age_smb_block_map, BUS_DMASYNC_PREWRITE); 2750 } 2751 2752 static int 2753 age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd, int init) 2754 { 2755 struct rx_desc *desc; 2756 struct mbuf *m; 2757 struct age_dmamap_ctx ctx; 2758 bus_dma_segment_t segs[1]; 2759 bus_dmamap_t map; 2760 int error; 2761 2762 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2763 if (m == NULL) 2764 return (ENOBUFS); 2765 2766 m->m_len = m->m_pkthdr.len = MCLBYTES; 2767 m_adj(m, ETHER_ALIGN); 2768 2769 ctx.nsegs = 1; 2770 ctx.segs = segs; 2771 error = bus_dmamap_load_mbuf(sc->age_cdata.age_rx_tag, 2772 sc->age_cdata.age_rx_sparemap, 2773 m, age_dmamap_buf_cb, &ctx, 2774 BUS_DMA_NOWAIT); 2775 if (error || ctx.nsegs == 0) { 2776 if (!error) { 2777 bus_dmamap_unload(sc->age_cdata.age_rx_tag, 2778 sc->age_cdata.age_rx_sparemap); 2779 error = EFBIG; 2780 if_printf(&sc->arpcom.ac_if, "too many segments?!\n"); 2781 } 2782 m_freem(m); 2783 2784 if (init) 2785 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n"); 2786 return (error); 2787 } 2788 KASSERT(ctx.nsegs == 1, 2789 ("%s: %d segments returned!", __func__, ctx.nsegs)); 2790 2791 if (rxd->rx_m != NULL) { 2792 bus_dmamap_sync(sc->age_cdata.age_rx_tag, rxd->rx_dmamap, 2793 BUS_DMASYNC_POSTREAD); 2794 bus_dmamap_unload(sc->age_cdata.age_rx_tag, rxd->rx_dmamap); 2795 } 2796 map = rxd->rx_dmamap; 2797 rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap; 2798 sc->age_cdata.age_rx_sparemap = map; 2799 rxd->rx_m = m; 2800 2801 desc = rxd->rx_desc; 2802 desc->addr = htole64(segs[0].ds_addr); 2803 desc->len = htole32((segs[0].ds_len & AGE_RD_LEN_MASK) << 2804 AGE_RD_LEN_SHIFT); 2805 return (0); 2806 } 2807 2808 static void 2809 age_rxvlan(struct age_softc *sc) 2810 { 2811 struct ifnet *ifp = &sc->arpcom.ac_if; 2812 uint32_t reg; 2813 2814 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2815 reg &= ~MAC_CFG_VLAN_TAG_STRIP; 2816 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2817 reg |= MAC_CFG_VLAN_TAG_STRIP; 2818 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2819 } 2820 2821 static void 2822 age_rxfilter(struct age_softc *sc) 2823 { 2824 struct ifnet *ifp = &sc->arpcom.ac_if; 2825 struct ifmultiaddr *ifma; 2826 uint32_t crc; 2827 uint32_t mchash[2]; 2828 uint32_t rxcfg; 2829 2830 rxcfg = CSR_READ_4(sc, AGE_MAC_CFG); 2831 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 2832 if ((ifp->if_flags & IFF_BROADCAST) != 0) 2833 rxcfg |= MAC_CFG_BCAST; 2834 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 2835 if ((ifp->if_flags & IFF_PROMISC) != 0) 2836 rxcfg |= MAC_CFG_PROMISC; 2837 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 2838 rxcfg |= MAC_CFG_ALLMULTI; 2839 CSR_WRITE_4(sc, AGE_MAR0, 0xFFFFFFFF); 2840 CSR_WRITE_4(sc, AGE_MAR1, 0xFFFFFFFF); 2841 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg); 2842 return; 2843 } 2844 2845 /* Program new filter. */ 2846 bzero(mchash, sizeof(mchash)); 2847 2848 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2849 if (ifma->ifma_addr->sa_family != AF_LINK) 2850 continue; 2851 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) 2852 ifma->ifma_addr), ETHER_ADDR_LEN); 2853 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 2854 } 2855 2856 CSR_WRITE_4(sc, AGE_MAR0, mchash[0]); 2857 CSR_WRITE_4(sc, AGE_MAR1, mchash[1]); 2858 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg); 2859 } 2860 2861 static int 2862 sysctl_age_stats(SYSCTL_HANDLER_ARGS) 2863 { 2864 struct age_softc *sc; 2865 struct age_stats *stats; 2866 int error, result; 2867 2868 result = -1; 2869 error = sysctl_handle_int(oidp, &result, 0, req); 2870 2871 if (error != 0 || req->newptr == NULL) 2872 return (error); 2873 2874 if (result != 1) 2875 return (error); 2876 2877 sc = (struct age_softc *)arg1; 2878 stats = &sc->age_stat; 2879 kprintf("%s statistics:\n", device_get_nameunit(sc->age_dev)); 2880 kprintf("Transmit good frames : %ju\n", 2881 (uintmax_t)stats->tx_frames); 2882 kprintf("Transmit good broadcast frames : %ju\n", 2883 (uintmax_t)stats->tx_bcast_frames); 2884 kprintf("Transmit good multicast frames : %ju\n", 2885 (uintmax_t)stats->tx_mcast_frames); 2886 kprintf("Transmit pause control frames : %u\n", 2887 stats->tx_pause_frames); 2888 kprintf("Transmit control frames : %u\n", 2889 stats->tx_control_frames); 2890 kprintf("Transmit frames with excessive deferrals : %u\n", 2891 stats->tx_excess_defer); 2892 kprintf("Transmit deferrals : %u\n", 2893 stats->tx_deferred); 2894 kprintf("Transmit good octets : %ju\n", 2895 (uintmax_t)stats->tx_bytes); 2896 kprintf("Transmit good broadcast octets : %ju\n", 2897 (uintmax_t)stats->tx_bcast_bytes); 2898 kprintf("Transmit good multicast octets : %ju\n", 2899 (uintmax_t)stats->tx_mcast_bytes); 2900 kprintf("Transmit frames 64 bytes : %ju\n", 2901 (uintmax_t)stats->tx_pkts_64); 2902 kprintf("Transmit frames 65 to 127 bytes : %ju\n", 2903 (uintmax_t)stats->tx_pkts_65_127); 2904 kprintf("Transmit frames 128 to 255 bytes : %ju\n", 2905 (uintmax_t)stats->tx_pkts_128_255); 2906 kprintf("Transmit frames 256 to 511 bytes : %ju\n", 2907 (uintmax_t)stats->tx_pkts_256_511); 2908 kprintf("Transmit frames 512 to 1024 bytes : %ju\n", 2909 (uintmax_t)stats->tx_pkts_512_1023); 2910 kprintf("Transmit frames 1024 to 1518 bytes : %ju\n", 2911 (uintmax_t)stats->tx_pkts_1024_1518); 2912 kprintf("Transmit frames 1519 to MTU bytes : %ju\n", 2913 (uintmax_t)stats->tx_pkts_1519_max); 2914 kprintf("Transmit single collisions : %u\n", 2915 stats->tx_single_colls); 2916 kprintf("Transmit multiple collisions : %u\n", 2917 stats->tx_multi_colls); 2918 kprintf("Transmit late collisions : %u\n", 2919 stats->tx_late_colls); 2920 kprintf("Transmit abort due to excessive collisions : %u\n", 2921 stats->tx_excess_colls); 2922 kprintf("Transmit underruns due to FIFO underruns : %u\n", 2923 stats->tx_underrun); 2924 kprintf("Transmit descriptor write-back errors : %u\n", 2925 stats->tx_desc_underrun); 2926 kprintf("Transmit frames with length mismatched frame size : %u\n", 2927 stats->tx_lenerrs); 2928 kprintf("Transmit frames with truncated due to MTU size : %u\n", 2929 stats->tx_lenerrs); 2930 2931 kprintf("Receive good frames : %ju\n", 2932 (uintmax_t)stats->rx_frames); 2933 kprintf("Receive good broadcast frames : %ju\n", 2934 (uintmax_t)stats->rx_bcast_frames); 2935 kprintf("Receive good multicast frames : %ju\n", 2936 (uintmax_t)stats->rx_mcast_frames); 2937 kprintf("Receive pause control frames : %u\n", 2938 stats->rx_pause_frames); 2939 kprintf("Receive control frames : %u\n", 2940 stats->rx_control_frames); 2941 kprintf("Receive CRC errors : %u\n", 2942 stats->rx_crcerrs); 2943 kprintf("Receive frames with length errors : %u\n", 2944 stats->rx_lenerrs); 2945 kprintf("Receive good octets : %ju\n", 2946 (uintmax_t)stats->rx_bytes); 2947 kprintf("Receive good broadcast octets : %ju\n", 2948 (uintmax_t)stats->rx_bcast_bytes); 2949 kprintf("Receive good multicast octets : %ju\n", 2950 (uintmax_t)stats->rx_mcast_bytes); 2951 kprintf("Receive frames too short : %u\n", 2952 stats->rx_runts); 2953 kprintf("Receive fragmented frames : %ju\n", 2954 (uintmax_t)stats->rx_fragments); 2955 kprintf("Receive frames 64 bytes : %ju\n", 2956 (uintmax_t)stats->rx_pkts_64); 2957 kprintf("Receive frames 65 to 127 bytes : %ju\n", 2958 (uintmax_t)stats->rx_pkts_65_127); 2959 kprintf("Receive frames 128 to 255 bytes : %ju\n", 2960 (uintmax_t)stats->rx_pkts_128_255); 2961 kprintf("Receive frames 256 to 511 bytes : %ju\n", 2962 (uintmax_t)stats->rx_pkts_256_511); 2963 kprintf("Receive frames 512 to 1024 bytes : %ju\n", 2964 (uintmax_t)stats->rx_pkts_512_1023); 2965 kprintf("Receive frames 1024 to 1518 bytes : %ju\n", 2966 (uintmax_t)stats->rx_pkts_1024_1518); 2967 kprintf("Receive frames 1519 to MTU bytes : %ju\n", 2968 (uintmax_t)stats->rx_pkts_1519_max); 2969 kprintf("Receive frames too long : %ju\n", 2970 (uint64_t)stats->rx_pkts_truncated); 2971 kprintf("Receive frames with FIFO overflow : %u\n", 2972 stats->rx_fifo_oflows); 2973 kprintf("Receive frames with return descriptor overflow : %u\n", 2974 stats->rx_desc_oflows); 2975 kprintf("Receive frames with alignment errors : %u\n", 2976 stats->rx_alignerrs); 2977 kprintf("Receive frames dropped due to address filtering : %ju\n", 2978 (uint64_t)stats->rx_pkts_filtered); 2979 2980 return (error); 2981 } 2982 2983 static int 2984 sysctl_hw_age_int_mod(SYSCTL_HANDLER_ARGS) 2985 { 2986 2987 return (sysctl_int_range(oidp, arg1, arg2, req, AGE_IM_TIMER_MIN, 2988 AGE_IM_TIMER_MAX)); 2989 } 2990 2991 static void 2992 age_dmamap_buf_cb(void *xctx, bus_dma_segment_t *segs, int nsegs, 2993 bus_size_t mapsz __unused, int error) 2994 { 2995 struct age_dmamap_ctx *ctx = xctx; 2996 int i; 2997 2998 if (error) 2999 return; 3000 3001 if (nsegs > ctx->nsegs) { 3002 ctx->nsegs = 0; 3003 return; 3004 } 3005 3006 ctx->nsegs = nsegs; 3007 for (i = 0; i < nsegs; ++i) 3008 ctx->segs[i] = segs[i]; 3009 } 3010