1 /*- 2 * Copyright (c) 2009, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/alc/if_alc.c,v 1.6 2009/09/29 23:03:16 yongari Exp $ 28 */ 29 30 /* Driver for Atheros AR8131/AR8132 PCIe Ethernet. */ 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/bus.h> 35 #include <sys/endian.h> 36 #include <sys/kernel.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/mbuf.h> 40 #include <sys/module.h> 41 #include <sys/spinlock.h> 42 #include <sys/rman.h> 43 #include <sys/queue.h> 44 #include <sys/socket.h> 45 #include <sys/sockio.h> 46 #include <sys/sysctl.h> 47 #include <sys/taskqueue.h> 48 49 #include <net/bpf.h> 50 #include <net/if.h> 51 #include <net/if_arp.h> 52 #include <net/ethernet.h> 53 #include <net/if_dl.h> 54 #include <net/if_llc.h> 55 #include <net/if_media.h> 56 #include <net/if_types.h> 57 #include <net/ifq_var.h> 58 #include <net/vlan/if_vlan_var.h> 59 #include <net/vlan/if_vlan_ether.h> 60 61 #include <netinet/in.h> 62 #include <netinet/in_systm.h> 63 #include <netinet/ip.h> 64 #include <netinet/tcp.h> 65 66 #include <dev/netif/mii_layer/mii.h> 67 #include <dev/netif/mii_layer/miivar.h> 68 69 #include <bus/pci/pcireg.h> 70 #include <bus/pci/pcivar.h> 71 72 #include <machine/atomic.h> 73 /* 74 XXX 75 #include <machine/bus.h> 76 #include <machine/in_cksum.h> 77 */ 78 79 #include "if_alcreg.h" 80 #include "if_alcvar.h" 81 82 /* "device miibus" required. See GENERIC if you get errors here. */ 83 #include "miibus_if.h" 84 #undef ALC_USE_CUSTOM_CSUM 85 86 #ifdef ALC_USE_CUSTOM_CSUM 87 #define ALC_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 88 #else 89 #define ALC_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 90 #endif 91 #ifndef IFCAP_VLAN_HWTSO 92 #define IFCAP_VLAN_HWTSO 0 93 #endif 94 95 MODULE_DEPEND(alc, pci, 1, 1, 1); 96 MODULE_DEPEND(alc, ether, 1, 1, 1); 97 MODULE_DEPEND(alc, miibus, 1, 1, 1); 98 99 /* Tunables. */ 100 static int msi_disable = 0; 101 static int msix_disable = 0; 102 TUNABLE_INT("hw.alc.msi_disable", &msi_disable); 103 TUNABLE_INT("hw.alc.msix_disable", &msix_disable); 104 105 /* 106 * Devices supported by this driver. 107 */ 108 109 static struct alc_ident alc_ident_table[] = { 110 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8131, 9 * 1024, 111 "Atheros AR8131 PCIe Gigabit Ethernet" }, 112 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8132, 9 * 1024, 113 "Atheros AR8132 PCIe Fast Ethernet" }, 114 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8151, 6 * 1024, 115 "Atheros AR8151 v1.0 PCIe Gigabit Ethernet" }, 116 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8151_V2, 6 * 1024, 117 "Atheros AR8151 v2.0 PCIe Gigabit Ethernet" }, 118 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8152_B, 6 * 1024, 119 "Atheros AR8152 v1.1 PCIe Fast Ethernet" }, 120 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8152_B2, 6 * 1024, 121 "Atheros AR8152 v2.0 PCIe Fast Ethernet" }, 122 { 0, 0, 0, NULL} 123 }; 124 125 static void alc_aspm(struct alc_softc *, int); 126 static int alc_attach(device_t); 127 static int alc_check_boundary(struct alc_softc *); 128 static int alc_detach(device_t); 129 static void alc_disable_l0s_l1(struct alc_softc *); 130 static int alc_dma_alloc(struct alc_softc *); 131 static void alc_dma_free(struct alc_softc *); 132 static void alc_dmamap_cb(void *, bus_dma_segment_t *, int, int); 133 static int alc_encap(struct alc_softc *, struct mbuf **); 134 static struct alc_ident *alc_find_ident(device_t); 135 #ifndef __NO_STRICT_ALIGNMENT 136 static struct mbuf * 137 alc_fixup_rx(struct ifnet *, struct mbuf *); 138 #endif 139 static void alc_get_macaddr(struct alc_softc *); 140 static void alc_init(void *); 141 static void alc_init_cmb(struct alc_softc *); 142 static void alc_init_locked(struct alc_softc *); 143 static void alc_init_rr_ring(struct alc_softc *); 144 static int alc_init_rx_ring(struct alc_softc *); 145 static void alc_init_smb(struct alc_softc *); 146 static void alc_init_tx_ring(struct alc_softc *); 147 static void alc_int_task(void *, int); 148 static void alc_intr(void *); 149 static int alc_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 150 static void alc_mac_config(struct alc_softc *); 151 static int alc_miibus_readreg(device_t, int, int); 152 static void alc_miibus_statchg(device_t); 153 static int alc_miibus_writereg(device_t, int, int, int); 154 static int alc_mediachange(struct ifnet *); 155 static void alc_mediastatus(struct ifnet *, struct ifmediareq *); 156 static int alc_newbuf(struct alc_softc *, struct alc_rxdesc *); 157 static void alc_phy_down(struct alc_softc *); 158 static void alc_phy_reset(struct alc_softc *); 159 static int alc_probe(device_t); 160 static void alc_reset(struct alc_softc *); 161 static int alc_resume(device_t); 162 static void alc_rxeof(struct alc_softc *, struct rx_rdesc *); 163 static int alc_rxintr(struct alc_softc *, int); 164 static void alc_rxfilter(struct alc_softc *); 165 static void alc_rxvlan(struct alc_softc *); 166 #if 0 167 static void alc_setlinkspeed(struct alc_softc *); 168 /* XXX: WOL */ 169 static void alc_setwol(struct alc_softc *); 170 #endif 171 static int alc_shutdown(device_t); 172 static void alc_start(struct ifnet *); 173 static void alc_start_queue(struct alc_softc *); 174 static void alc_stats_clear(struct alc_softc *); 175 static void alc_stats_update(struct alc_softc *); 176 static void alc_stop(struct alc_softc *); 177 static void alc_stop_mac(struct alc_softc *); 178 static void alc_stop_queue(struct alc_softc *); 179 static int alc_suspend(device_t); 180 static void alc_sysctl_node(struct alc_softc *); 181 static void alc_tick(void *); 182 static void alc_tx_task(void *, int); 183 static void alc_txeof(struct alc_softc *); 184 static void alc_watchdog(struct alc_softc *); 185 static int sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS); 186 static int sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS); 187 188 static device_method_t alc_methods[] = { 189 /* Device interface. */ 190 DEVMETHOD(device_probe, alc_probe), 191 DEVMETHOD(device_attach, alc_attach), 192 DEVMETHOD(device_detach, alc_detach), 193 DEVMETHOD(device_shutdown, alc_shutdown), 194 DEVMETHOD(device_suspend, alc_suspend), 195 DEVMETHOD(device_resume, alc_resume), 196 197 /* MII interface. */ 198 DEVMETHOD(miibus_readreg, alc_miibus_readreg), 199 DEVMETHOD(miibus_writereg, alc_miibus_writereg), 200 DEVMETHOD(miibus_statchg, alc_miibus_statchg), 201 202 { NULL, NULL } 203 }; 204 205 static driver_t alc_driver = { 206 "alc", 207 alc_methods, 208 sizeof(struct alc_softc) 209 }; 210 211 static devclass_t alc_devclass; 212 213 DRIVER_MODULE(alc, pci, alc_driver, alc_devclass, NULL, NULL); 214 DRIVER_MODULE(miibus, alc, miibus_driver, miibus_devclass, NULL, NULL); 215 216 static struct resource_spec alc_res_spec_mem[] = { 217 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, 218 { -1, 0, 0 } 219 }; 220 221 static struct resource_spec alc_irq_spec_legacy[] = { 222 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 223 { -1, 0, 0 } 224 }; 225 226 #ifdef OLD_MSI 227 static struct resource_spec alc_irq_spec_msi[] = { 228 { SYS_RES_IRQ, 1, RF_ACTIVE }, 229 { -1, 0, 0 } 230 }; 231 232 static struct resource_spec alc_irq_spec_msix[] = { 233 { SYS_RES_IRQ, 1, RF_ACTIVE }, 234 { -1, 0, 0 } 235 }; 236 #endif 237 238 static uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0 }; 239 240 static int 241 alc_miibus_readreg(device_t dev, int phy, int reg) 242 { 243 struct alc_softc *sc; 244 uint32_t v; 245 int i; 246 247 sc = device_get_softc(dev); 248 249 if (phy != sc->alc_phyaddr) 250 return (0); 251 252 /* 253 * For AR8132 fast ethernet controller, do not report 1000baseT 254 * capability to mii(4). Even though AR8132 uses the same 255 * model/revision number of F1 gigabit PHY, the PHY has no 256 * ability to establish 1000baseT link. 257 */ 258 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0 && 259 reg == MII_EXTSR) 260 return (0); 261 262 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 263 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 264 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 265 DELAY(5); 266 v = CSR_READ_4(sc, ALC_MDIO); 267 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 268 break; 269 } 270 271 if (i == 0) { 272 device_printf(sc->alc_dev, "phy read timeout : %d\n", reg); 273 return (0); 274 } 275 276 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 277 } 278 279 static int 280 alc_miibus_writereg(device_t dev, int phy, int reg, int val) 281 { 282 struct alc_softc *sc; 283 uint32_t v; 284 int i; 285 286 sc = device_get_softc(dev); 287 288 if (phy != sc->alc_phyaddr) 289 return (0); 290 291 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 292 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 293 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 294 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 295 DELAY(5); 296 v = CSR_READ_4(sc, ALC_MDIO); 297 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 298 break; 299 } 300 301 if (i == 0) 302 device_printf(sc->alc_dev, "phy write timeout : %d\n", reg); 303 304 return (0); 305 } 306 307 static void 308 alc_miibus_statchg(device_t dev) 309 { 310 struct alc_softc *sc; 311 struct mii_data *mii; 312 struct ifnet *ifp; 313 uint32_t reg; 314 315 sc = device_get_softc(dev); 316 317 mii = device_get_softc(sc->alc_miibus); 318 ifp = sc->alc_ifp; 319 if (mii == NULL || ifp == NULL || 320 (ifp->if_flags & IFF_RUNNING) == 0) 321 return; 322 323 sc->alc_flags &= ~ALC_FLAG_LINK; 324 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 325 (IFM_ACTIVE | IFM_AVALID)) { 326 switch (IFM_SUBTYPE(mii->mii_media_active)) { 327 case IFM_10_T: 328 case IFM_100_TX: 329 sc->alc_flags |= ALC_FLAG_LINK; 330 break; 331 case IFM_1000_T: 332 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) 333 sc->alc_flags |= ALC_FLAG_LINK; 334 break; 335 default: 336 break; 337 } 338 } 339 alc_stop_queue(sc); 340 /* Stop Rx/Tx MACs. */ 341 alc_stop_mac(sc); 342 343 /* Program MACs with resolved speed/duplex/flow-control. */ 344 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 345 alc_start_queue(sc); 346 alc_mac_config(sc); 347 /* Re-enable Tx/Rx MACs. */ 348 reg = CSR_READ_4(sc, ALC_MAC_CFG); 349 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 350 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 351 } 352 alc_aspm(sc, IFM_SUBTYPE(mii->mii_media_active)); 353 } 354 355 static void 356 alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 357 { 358 struct alc_softc *sc; 359 struct mii_data *mii; 360 361 sc = ifp->if_softc; 362 ALC_LOCK(sc); 363 if ((ifp->if_flags & IFF_UP) == 0) { 364 ALC_UNLOCK(sc); 365 return; 366 } 367 mii = device_get_softc(sc->alc_miibus); 368 369 mii_pollstat(mii); 370 ALC_UNLOCK(sc); 371 ifmr->ifm_status = mii->mii_media_status; 372 ifmr->ifm_active = mii->mii_media_active; 373 } 374 375 static int 376 alc_mediachange(struct ifnet *ifp) 377 { 378 struct alc_softc *sc; 379 struct mii_data *mii; 380 struct mii_softc *miisc; 381 int error; 382 383 sc = ifp->if_softc; 384 ALC_LOCK(sc); 385 mii = device_get_softc(sc->alc_miibus); 386 if (mii->mii_instance != 0) { 387 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 388 mii_phy_reset(miisc); 389 } 390 error = mii_mediachg(mii); 391 ALC_UNLOCK(sc); 392 393 return (error); 394 } 395 396 static struct alc_ident * 397 alc_find_ident(device_t dev) 398 { 399 struct alc_ident *ident; 400 uint16_t vendor, devid; 401 402 vendor = pci_get_vendor(dev); 403 devid = pci_get_device(dev); 404 for (ident = alc_ident_table; ident->name != NULL; ident++) { 405 if (vendor == ident->vendorid && devid == ident->deviceid) 406 return (ident); 407 } 408 return (NULL); 409 } 410 411 static int 412 alc_probe(device_t dev) 413 { 414 struct alc_ident *ident; 415 416 ident = alc_find_ident(dev); 417 if (ident != NULL) { 418 device_set_desc(dev, ident->name); 419 return (BUS_PROBE_DEFAULT); 420 } 421 return (ENXIO); 422 } 423 424 static void 425 alc_get_macaddr(struct alc_softc *sc) 426 { 427 uint32_t ea[2], opt; 428 uint16_t val; 429 int eeprom, i; 430 431 eeprom = 0; 432 opt = CSR_READ_4(sc, ALC_OPT_CFG); 433 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_OTP_SEL) != 0 && 434 (CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) { 435 /* 436 * EEPROM found, let TWSI reload EEPROM configuration. 437 * This will set ethernet address of controller. 438 */ 439 eeprom++; 440 switch (sc->alc_ident->deviceid) { 441 case DEVICEID_ATHEROS_AR8131: 442 case DEVICEID_ATHEROS_AR8132: 443 if ((opt & OPT_CFG_CLK_ENB) == 0) { 444 opt |= OPT_CFG_CLK_ENB; 445 CSR_WRITE_4(sc, ALC_OPT_CFG, opt); 446 CSR_READ_4(sc, ALC_OPT_CFG); 447 DELAY(1000); 448 } 449 break; 450 case DEVICEID_ATHEROS_AR8151: 451 case DEVICEID_ATHEROS_AR8151_V2: 452 case DEVICEID_ATHEROS_AR8152_B: 453 case DEVICEID_ATHEROS_AR8152_B2: 454 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 455 ALC_MII_DBG_ADDR, 0x00); 456 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 457 ALC_MII_DBG_DATA); 458 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 459 ALC_MII_DBG_DATA, val & 0xFF7F); 460 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 461 ALC_MII_DBG_ADDR, 0x3B); 462 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 463 ALC_MII_DBG_DATA); 464 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 465 ALC_MII_DBG_DATA, val | 0x0008); 466 DELAY(20); 467 break; 468 } 469 470 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG, 471 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB); 472 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 473 CSR_READ_4(sc, ALC_WOL_CFG); 474 475 CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) | 476 TWSI_CFG_SW_LD_START); 477 478 for (i = 100; i > 0; i--) { 479 DELAY(1000); 480 if ((CSR_READ_4(sc, ALC_TWSI_CFG) & 481 TWSI_CFG_SW_LD_START) == 0) 482 break; 483 } 484 if (i == 0) 485 device_printf(sc->alc_dev, 486 "reloading EEPROM timeout!\n"); 487 } else { 488 if (bootverbose) 489 device_printf(sc->alc_dev, "EEPROM not found!\n"); 490 } 491 492 if (eeprom != 0) { 493 switch (sc->alc_ident->deviceid) { 494 case DEVICEID_ATHEROS_AR8131: 495 case DEVICEID_ATHEROS_AR8132: 496 if ((opt & OPT_CFG_CLK_ENB) != 0) { 497 opt &= ~OPT_CFG_CLK_ENB; 498 CSR_WRITE_4(sc, ALC_OPT_CFG, opt); 499 CSR_READ_4(sc, ALC_OPT_CFG); 500 DELAY(1000); 501 } 502 break; 503 case DEVICEID_ATHEROS_AR8151: 504 case DEVICEID_ATHEROS_AR8151_V2: 505 case DEVICEID_ATHEROS_AR8152_B: 506 case DEVICEID_ATHEROS_AR8152_B2: 507 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 508 ALC_MII_DBG_ADDR, 0x00); 509 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 510 ALC_MII_DBG_DATA); 511 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 512 ALC_MII_DBG_DATA, val | 0x0080); 513 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 514 ALC_MII_DBG_ADDR, 0x3B); 515 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 516 ALC_MII_DBG_DATA); 517 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 518 ALC_MII_DBG_DATA, val & 0xFFF7); 519 DELAY(20); 520 break; 521 } 522 } 523 524 ea[0] = CSR_READ_4(sc, ALC_PAR0); 525 ea[1] = CSR_READ_4(sc, ALC_PAR1); 526 sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF; 527 sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF; 528 sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF; 529 sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF; 530 sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF; 531 sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF; 532 } 533 534 static void 535 alc_disable_l0s_l1(struct alc_softc *sc) 536 { 537 uint32_t pmcfg; 538 539 /* Another magic from vendor. */ 540 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 541 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 | 542 PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK | 543 PM_CFG_SERDES_PD_EX_L1); 544 pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB | 545 PM_CFG_SERDES_L1_ENB; 546 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 547 } 548 549 static void 550 alc_phy_reset(struct alc_softc *sc) 551 { 552 uint16_t data; 553 554 /* Reset magic from Linux. */ 555 CSR_WRITE_2(sc, ALC_GPHY_CFG, 556 GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | GPHY_CFG_SEL_ANA_RESET); 557 CSR_READ_2(sc, ALC_GPHY_CFG); 558 DELAY(10 * 1000); 559 560 CSR_WRITE_2(sc, ALC_GPHY_CFG, 561 GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | 562 GPHY_CFG_SEL_ANA_RESET); 563 CSR_READ_2(sc, ALC_GPHY_CFG); 564 DELAY(10 * 1000); 565 566 /* DSP fixup, Vendor magic. */ 567 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B) { 568 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 569 ALC_MII_DBG_ADDR, 0x000A); 570 data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 571 ALC_MII_DBG_DATA); 572 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 573 ALC_MII_DBG_DATA, data & 0xDFFF); 574 } 575 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 || 576 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || 577 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B || 578 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 579 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 580 ALC_MII_DBG_ADDR, 0x003B); 581 data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 582 ALC_MII_DBG_DATA); 583 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 584 ALC_MII_DBG_DATA, data & 0xFFF7); 585 DELAY(20 * 1000); 586 } 587 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151) { 588 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 589 ALC_MII_DBG_ADDR, 0x0029); 590 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 591 ALC_MII_DBG_DATA, 0x929D); 592 } 593 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 || 594 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8132 || 595 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || 596 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 597 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 598 ALC_MII_DBG_ADDR, 0x0029); 599 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 600 ALC_MII_DBG_DATA, 0xB6DD); 601 } 602 603 /* Load DSP codes, vendor magic. */ 604 data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE | 605 ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK); 606 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 607 ALC_MII_DBG_ADDR, MII_ANA_CFG18); 608 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 609 ALC_MII_DBG_DATA, data); 610 611 data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) | 612 ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL | 613 ANA_SERDES_EN_LCKDT; 614 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 615 ALC_MII_DBG_ADDR, MII_ANA_CFG5); 616 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 617 ALC_MII_DBG_DATA, data); 618 619 data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) & 620 ANA_LONG_CABLE_TH_100_MASK) | 621 ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) & 622 ANA_SHORT_CABLE_TH_100_SHIFT) | 623 ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW; 624 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 625 ALC_MII_DBG_ADDR, MII_ANA_CFG54); 626 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 627 ALC_MII_DBG_DATA, data); 628 629 data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) | 630 ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) | 631 ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) | 632 ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK); 633 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 634 ALC_MII_DBG_ADDR, MII_ANA_CFG4); 635 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 636 ALC_MII_DBG_DATA, data); 637 638 data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) | 639 ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB | 640 ANA_OEN_125M; 641 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 642 ALC_MII_DBG_ADDR, MII_ANA_CFG0); 643 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 644 ALC_MII_DBG_DATA, data); 645 DELAY(1000); 646 } 647 648 static void 649 alc_phy_down(struct alc_softc *sc) 650 { 651 switch (sc->alc_ident->deviceid) { 652 case DEVICEID_ATHEROS_AR8151: 653 case DEVICEID_ATHEROS_AR8151_V2: 654 /* 655 * GPHY power down caused more problems on AR8151 v2.0. 656 * When driver is reloaded after GPHY power down, 657 * accesses to PHY/MAC registers hung the system. Only 658 * cold boot recovered from it. I'm not sure whether 659 * AR8151 v1.0 also requires this one though. I don't 660 * have AR8151 v1.0 controller in hand. 661 * The only option left is to isolate the PHY and 662 * initiates power down the PHY which in turn saves 663 * more power when driver is unloaded. 664 */ 665 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 666 MII_BMCR, BMCR_ISO | BMCR_PDOWN); 667 break; 668 default: 669 /* Force PHY down. */ 670 CSR_WRITE_2(sc, ALC_GPHY_CFG, 671 GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | 672 GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ | 673 GPHY_CFG_PWDOWN_HW); 674 DELAY(1000); 675 break; 676 } 677 678 } 679 680 static void 681 alc_aspm(struct alc_softc *sc, int media) 682 { 683 uint32_t pmcfg; 684 uint16_t linkcfg; 685 686 ALC_LOCK_ASSERT(sc); 687 688 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 689 if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) == 690 (ALC_FLAG_APS | ALC_FLAG_PCIE)) { 691 linkcfg = CSR_READ_2(sc, sc->alc_expcap + 692 PCIR_EXPRESS_LINK_CTL); 693 } else { 694 linkcfg = 0; 695 } 696 697 pmcfg &= ~PM_CFG_SERDES_PD_EX_L1; 698 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_LCKDET_TIMER_MASK); 699 pmcfg |= PM_CFG_MAC_ASPM_CHK; 700 pmcfg |= PM_CFG_SERDES_ENB | PM_CFG_RBER_ENB; 701 pmcfg &= ~(PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB); 702 703 if ((sc->alc_flags & ALC_FLAG_APS) != 0) { 704 /* Disable extended sync except AR8152 B v1.0 */ 705 linkcfg &= ~0x80; 706 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B && 707 sc->alc_rev == ATHEROS_AR8152_B_V10) 708 linkcfg |= 0x80; 709 CSR_WRITE_2(sc, sc->alc_expcap + PCIR_EXPRESS_LINK_CTL, 710 linkcfg); 711 pmcfg &= ~(PM_CFG_EN_BUFS_RX_L0S | PM_CFG_SA_DLY_ENB | 712 PM_CFG_HOTRST); 713 pmcfg |= (PM_CFG_L1_ENTRY_TIMER_DEFAULT << 714 PM_CFG_L1_ENTRY_TIMER_SHIFT); 715 pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK; 716 pmcfg |= (PM_CFG_PM_REQ_TIMER_DEFAULT << 717 PM_CFG_PM_REQ_TIMER_SHIFT); 718 pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_PCIE_RECV; 719 } 720 721 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 722 if ((sc->alc_flags & ALC_FLAG_L0S) != 0) 723 pmcfg |= PM_CFG_ASPM_L0S_ENB; 724 if ((sc->alc_flags & ALC_FLAG_L1S) != 0) 725 pmcfg |= PM_CFG_ASPM_L1_ENB; 726 if ((sc->alc_flags & ALC_FLAG_APS) != 0) { 727 if (sc->alc_ident->deviceid == 728 DEVICEID_ATHEROS_AR8152_B) { 729 pmcfg &= ~PM_CFG_ASPM_L0S_ENB; 730 } 731 pmcfg &= ~(PM_CFG_SERDES_L1_ENB | 732 PM_CFG_SERDES_PLL_L1_ENB | 733 PM_CFG_SERDES_BUDS_RX_L1_ENB); 734 pmcfg |= PM_CFG_CLK_SWH_L1; 735 if (media == IFM_100_TX || media == IFM_1000_T) { 736 pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK; 737 switch (sc->alc_ident->deviceid) { 738 case DEVICEID_ATHEROS_AR8152_B: 739 pmcfg |= (7 << 740 PM_CFG_L1_ENTRY_TIMER_SHIFT); 741 break; 742 case DEVICEID_ATHEROS_AR8152_B2: 743 case DEVICEID_ATHEROS_AR8151_V2: 744 pmcfg |= (4 << 745 PM_CFG_L1_ENTRY_TIMER_SHIFT); 746 break; 747 default: 748 pmcfg |= (15 << 749 PM_CFG_L1_ENTRY_TIMER_SHIFT); 750 break; 751 } 752 } 753 } else { 754 pmcfg |= PM_CFG_SERDES_L1_ENB | 755 PM_CFG_SERDES_PLL_L1_ENB | 756 PM_CFG_SERDES_BUDS_RX_L1_ENB; 757 pmcfg &= ~(PM_CFG_CLK_SWH_L1 | 758 PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB); 759 } 760 } else { 761 pmcfg &= ~(PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_L1_ENB | 762 PM_CFG_SERDES_PLL_L1_ENB); 763 pmcfg |= PM_CFG_CLK_SWH_L1; 764 if ((sc->alc_flags & ALC_FLAG_L1S) != 0) 765 pmcfg |= PM_CFG_ASPM_L1_ENB; 766 } 767 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 768 } 769 770 static int 771 alc_attach(device_t dev) 772 { 773 struct alc_softc *sc; 774 struct ifnet *ifp; 775 char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" }; 776 uint16_t burst; 777 int base, error, i, msic, msixc, state; 778 uint32_t cap, ctl, val; 779 780 error = 0; 781 sc = device_get_softc(dev); 782 sc->alc_dev = dev; 783 784 lockinit(&sc->alc_lock, "alc_lock", 0, LK_CANRECURSE); 785 callout_init_mp(&sc->alc_tick_ch); 786 TASK_INIT(&sc->alc_int_task, 0, alc_int_task, sc); 787 sc->alc_ident = alc_find_ident(dev); 788 789 /* Map the device. */ 790 pci_enable_busmaster(dev); 791 sc->alc_res_spec = alc_res_spec_mem; 792 sc->alc_irq_spec = alc_irq_spec_legacy; 793 error = bus_alloc_resources(dev, sc->alc_res_spec, sc->alc_res); 794 if (error != 0) { 795 device_printf(dev, "cannot allocate memory resources.\n"); 796 goto fail; 797 } 798 799 /* Set PHY address. */ 800 sc->alc_phyaddr = ALC_PHY_ADDR; 801 802 /* Initialize DMA parameters. */ 803 sc->alc_dma_rd_burst = 0; 804 sc->alc_dma_wr_burst = 0; 805 sc->alc_rcb = DMA_CFG_RCB_64; 806 if (pci_find_extcap(dev, PCIY_EXPRESS, &base) == 0) { 807 sc->alc_flags |= ALC_FLAG_PCIE; 808 sc->alc_expcap = base; 809 burst = CSR_READ_2(sc, base + PCIR_EXPRESS_DEVICE_CTL); 810 sc->alc_dma_rd_burst = 811 (burst & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12; 812 sc->alc_dma_wr_burst = (burst & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5; 813 if (bootverbose) { 814 device_printf(dev, "Read request size : %u bytes.\n", 815 alc_dma_burst[sc->alc_dma_rd_burst]); 816 device_printf(dev, "TLP payload size : %u bytes.\n", 817 alc_dma_burst[sc->alc_dma_wr_burst]); 818 } 819 if (alc_dma_burst[sc->alc_dma_rd_burst] > 1024) 820 sc->alc_dma_rd_burst = 3; 821 if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024) 822 sc->alc_dma_wr_burst = 3; 823 /* Clear data link and flow-control protocol error. */ 824 val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV); 825 val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP); 826 CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val); 827 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG, 828 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB); 829 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, 830 CSR_READ_4(sc, ALC_PCIE_PHYMISC) | 831 PCIE_PHYMISC_FORCE_RCV_DET); 832 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B && 833 sc->alc_rev == ATHEROS_AR8152_B_V10) { 834 val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2); 835 val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK | 836 PCIE_PHYMISC2_SERDES_TH_MASK); 837 val |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT; 838 val |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT; 839 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val); 840 } 841 842 /* Disable ASPM L0S and L1. */ 843 cap = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CAP); 844 if ((cap & PCIM_LINK_CAP_ASPM) != 0) { 845 ctl = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CTL); 846 if ((ctl & 0x08) != 0) 847 sc->alc_rcb = DMA_CFG_RCB_128; 848 if (bootverbose) 849 device_printf(dev, "RCB %u bytes\n", 850 sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128); 851 state = ctl & 0x03; 852 if (state & 0x01) 853 sc->alc_flags |= ALC_FLAG_L0S; 854 if (state & 0x02) 855 sc->alc_flags |= ALC_FLAG_L1S; 856 if (bootverbose) 857 device_printf(sc->alc_dev, "ASPM %s %s\n", 858 aspm_state[state], 859 state == 0 ? "disabled" : "enabled"); 860 alc_disable_l0s_l1(sc); 861 } else { 862 if (bootverbose) 863 device_printf(sc->alc_dev, "no ASPM support\n"); 864 } 865 } 866 867 /* Reset PHY. */ 868 alc_phy_reset(sc); 869 870 /* Reset the ethernet controller. */ 871 alc_reset(sc); 872 873 /* 874 * One odd thing is AR8132 uses the same PHY hardware(F1 875 * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports 876 * the PHY supports 1000Mbps but that's not true. The PHY 877 * used in AR8132 can't establish gigabit link even if it 878 * shows the same PHY model/revision number of AR8131. 879 */ 880 switch (sc->alc_ident->deviceid) { 881 case DEVICEID_ATHEROS_AR8152_B: 882 case DEVICEID_ATHEROS_AR8152_B2: 883 sc->alc_flags |= ALC_FLAG_APS; 884 /* FALLTHROUGH */ 885 case DEVICEID_ATHEROS_AR8132: 886 sc->alc_flags |= ALC_FLAG_FASTETHER; 887 break; 888 case DEVICEID_ATHEROS_AR8151: 889 case DEVICEID_ATHEROS_AR8151_V2: 890 sc->alc_flags |= ALC_FLAG_APS; 891 /* FALLTHROUGH */ 892 default: 893 break; 894 } 895 sc->alc_flags |= ALC_FLAG_ASPM_MON | ALC_FLAG_JUMBO; 896 897 /* 898 * It seems that AR813x/AR815x has silicon bug for SMB. In 899 * addition, Atheros said that enabling SMB wouldn't improve 900 * performance. However I think it's bad to access lots of 901 * registers to extract MAC statistics. 902 */ 903 sc->alc_flags |= ALC_FLAG_SMB_BUG; 904 905 /* 906 * Don't use Tx CMB. It is known to have silicon bug. 907 */ 908 sc->alc_flags |= ALC_FLAG_CMB_BUG; 909 sc->alc_rev = pci_get_revid(dev); 910 sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >> 911 MASTER_CHIP_REV_SHIFT; 912 if (bootverbose) { 913 device_printf(dev, "PCI device revision : 0x%04x\n", 914 sc->alc_rev); 915 device_printf(dev, "Chip id/revision : 0x%04x\n", 916 sc->alc_chip_rev); 917 } 918 device_printf(dev, "%u Tx FIFO, %u Rx FIFO\n", 919 CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8, 920 CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8); 921 922 /* Allocate IRQ resources. */ 923 msixc = pci_msix_count(dev); 924 msic = pci_msi_count(dev); 925 if (bootverbose) { 926 device_printf(dev, "MSIX count : %d\n", msixc); 927 device_printf(dev, "MSI count : %d\n", msic); 928 } 929 930 #ifdef OLD_MSI 931 /* Prefer MSIX over MSI. */ 932 if (msix_disable == 0 || msi_disable == 0) { 933 if (msix_disable == 0 && msixc == ALC_MSIX_MESSAGES && 934 pci_alloc_msix(dev, &msixc) == 0) { 935 if (msic == ALC_MSIX_MESSAGES) { 936 device_printf(dev, 937 "Using %d MSIX message(s).\n", msixc); 938 sc->alc_flags |= ALC_FLAG_MSIX; 939 sc->alc_irq_spec = alc_irq_spec_msix; 940 } else 941 pci_release_msi(dev); 942 } 943 if (msi_disable == 0 && (sc->alc_flags & ALC_FLAG_MSIX) == 0 && 944 msic == ALC_MSI_MESSAGES && 945 pci_alloc_msi(dev, &msic) == 0) { 946 if (msic == ALC_MSI_MESSAGES) { 947 device_printf(dev, 948 "Using %d MSI message(s).\n", msic); 949 sc->alc_flags |= ALC_FLAG_MSI; 950 sc->alc_irq_spec = alc_irq_spec_msi; 951 } else 952 pci_release_msi(dev); 953 } 954 } 955 #endif 956 957 error = bus_alloc_resources(dev, sc->alc_irq_spec, sc->alc_irq); 958 if (error != 0) { 959 device_printf(dev, "cannot allocate IRQ resources.\n"); 960 goto fail; 961 } 962 963 /* Create device sysctl node. */ 964 alc_sysctl_node(sc); 965 966 if ((error = alc_dma_alloc(sc) != 0)) 967 goto fail; 968 969 /* Load station address. */ 970 alc_get_macaddr(sc); 971 972 ifp = sc->alc_ifp = &sc->arpcom.ac_if; 973 ifp->if_softc = sc; 974 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 975 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 976 ifp->if_ioctl = alc_ioctl; 977 ifp->if_start = alc_start; 978 ifp->if_init = alc_init; 979 ifp->if_snd.ifq_maxlen = ALC_TX_RING_CNT - 1; 980 ifq_set_maxlen(&ifp->if_snd, ifp->if_snd.ifq_maxlen); 981 ifq_set_ready(&ifp->if_snd); 982 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4; 983 ifp->if_hwassist = ALC_CSUM_FEATURES | CSUM_TSO; 984 #if 0 985 /* XXX: WOL */ 986 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) { 987 ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST; 988 sc->alc_flags |= ALC_FLAG_PM; 989 sc->alc_pmcap = base; 990 } 991 #endif 992 ifp->if_capenable = ifp->if_capabilities; 993 994 /* Set up MII bus. */ 995 if ((error = mii_phy_probe(dev, &sc->alc_miibus, alc_mediachange, 996 alc_mediastatus)) != 0) { 997 device_printf(dev, "no PHY found!\n"); 998 goto fail; 999 } 1000 1001 ether_ifattach(ifp, sc->alc_eaddr, NULL); 1002 1003 /* VLAN capability setup. */ 1004 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1005 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; 1006 ifp->if_capenable = ifp->if_capabilities; 1007 /* 1008 * XXX 1009 * It seems enabling Tx checksum offloading makes more trouble. 1010 * Sometimes the controller does not receive any frames when 1011 * Tx checksum offloading is enabled. I'm not sure whether this 1012 * is a bug in Tx checksum offloading logic or I got broken 1013 * sample boards. To safety, don't enable Tx checksum offloading 1014 * by default but give chance to users to toggle it if they know 1015 * their controllers work without problems. 1016 */ 1017 ifp->if_capenable &= ~IFCAP_TXCSUM; 1018 ifp->if_hwassist &= ~ALC_CSUM_FEATURES; 1019 1020 /* Tell the upper layer(s) we support long frames. */ 1021 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1022 1023 /* Create local taskq. */ 1024 TASK_INIT(&sc->alc_tx_task, 1, alc_tx_task, ifp); 1025 sc->alc_tq = taskqueue_create("alc_taskq", M_WAITOK, 1026 taskqueue_thread_enqueue, &sc->alc_tq); 1027 if (sc->alc_tq == NULL) { 1028 device_printf(dev, "could not create taskqueue.\n"); 1029 ether_ifdetach(ifp); 1030 error = ENXIO; 1031 goto fail; 1032 } 1033 taskqueue_start_threads(&sc->alc_tq, 1, TDPRI_KERN_DAEMON, -1, "%s taskq", 1034 device_get_nameunit(sc->alc_dev)); 1035 1036 if ((sc->alc_flags & ALC_FLAG_MSIX) != 0) 1037 msic = ALC_MSIX_MESSAGES; 1038 else if ((sc->alc_flags & ALC_FLAG_MSI) != 0) 1039 msic = ALC_MSI_MESSAGES; 1040 else 1041 msic = 1; 1042 for (i = 0; i < msic; i++) { 1043 error = bus_setup_intr(dev, sc->alc_irq[i], INTR_MPSAFE, 1044 alc_intr, sc, 1045 &sc->alc_intrhand[i], NULL); 1046 if (error != 0) 1047 break; 1048 } 1049 if (error != 0) { 1050 device_printf(dev, "could not set up interrupt handler.\n"); 1051 taskqueue_free(sc->alc_tq); 1052 sc->alc_tq = NULL; 1053 ether_ifdetach(ifp); 1054 goto fail; 1055 } 1056 1057 fail: 1058 if (error != 0) 1059 alc_detach(dev); 1060 1061 return (error); 1062 } 1063 1064 static int 1065 alc_detach(device_t dev) 1066 { 1067 struct alc_softc *sc; 1068 struct ifnet *ifp; 1069 int i, msic; 1070 1071 sc = device_get_softc(dev); 1072 1073 ifp = sc->alc_ifp; 1074 if (device_is_attached(dev)) { 1075 ALC_LOCK(sc); 1076 sc->alc_flags |= ALC_FLAG_DETACH; 1077 alc_stop(sc); 1078 ALC_UNLOCK(sc); 1079 #if 0 1080 /* XXX */ 1081 callout_drain(&sc->alc_tick_ch); 1082 #endif 1083 taskqueue_drain(sc->alc_tq, &sc->alc_int_task); 1084 taskqueue_drain(sc->alc_tq, &sc->alc_tx_task); 1085 ether_ifdetach(ifp); 1086 } 1087 1088 if (sc->alc_tq != NULL) { 1089 taskqueue_drain(sc->alc_tq, &sc->alc_int_task); 1090 taskqueue_free(sc->alc_tq); 1091 sc->alc_tq = NULL; 1092 } 1093 1094 if (sc->alc_miibus != NULL) { 1095 device_delete_child(dev, sc->alc_miibus); 1096 sc->alc_miibus = NULL; 1097 } 1098 bus_generic_detach(dev); 1099 alc_dma_free(sc); 1100 1101 if (ifp != NULL) { 1102 // XXX? if_free(ifp); 1103 sc->alc_ifp = NULL; 1104 } 1105 1106 if ((sc->alc_flags & ALC_FLAG_MSIX) != 0) 1107 msic = ALC_MSIX_MESSAGES; 1108 else if ((sc->alc_flags & ALC_FLAG_MSI) != 0) 1109 msic = ALC_MSI_MESSAGES; 1110 else 1111 msic = 1; 1112 for (i = 0; i < msic; i++) { 1113 if (sc->alc_intrhand[i] != NULL) { 1114 bus_teardown_intr(dev, sc->alc_irq[i], 1115 sc->alc_intrhand[i]); 1116 sc->alc_intrhand[i] = NULL; 1117 } 1118 } 1119 if (sc->alc_res[0] != NULL) 1120 alc_phy_down(sc); 1121 bus_release_resources(dev, sc->alc_irq_spec, sc->alc_irq); 1122 if ((sc->alc_flags & (ALC_FLAG_MSI | ALC_FLAG_MSIX)) != 0) 1123 pci_release_msi(dev); 1124 bus_release_resources(dev, sc->alc_res_spec, sc->alc_res); 1125 lockuninit(&sc->alc_lock); 1126 1127 return (0); 1128 } 1129 1130 #define ALC_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 1131 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 1132 #define ALC_SYSCTL_STAT_ADD64(c, h, n, p, d) \ 1133 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 1134 1135 static void 1136 alc_sysctl_node(struct alc_softc *sc) 1137 { 1138 struct sysctl_ctx_list *ctx; 1139 struct sysctl_oid *tree; 1140 struct sysctl_oid_list *child, *parent; 1141 struct alc_hw_stats *stats; 1142 int error; 1143 1144 stats = &sc->alc_stats; 1145 ctx = &sc->alc_sysctl_ctx; 1146 sysctl_ctx_init(ctx); 1147 1148 tree = SYSCTL_ADD_NODE(ctx, SYSCTL_STATIC_CHILDREN(_hw), 1149 OID_AUTO, 1150 device_get_nameunit(sc->alc_dev), 1151 CTLFLAG_RD, 0, ""); 1152 if (tree == NULL) { 1153 device_printf(sc->alc_dev, "can't add sysctl node\n"); 1154 return; 1155 } 1156 child = SYSCTL_CHILDREN(tree); 1157 1158 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod", 1159 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_rx_mod, 0, 1160 sysctl_hw_alc_int_mod, "I", "alc Rx interrupt moderation"); 1161 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod", 1162 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_tx_mod, 0, 1163 sysctl_hw_alc_int_mod, "I", "alc Tx interrupt moderation"); 1164 /* Pull in device tunables. */ 1165 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT; 1166 error = resource_int_value(device_get_name(sc->alc_dev), 1167 device_get_unit(sc->alc_dev), "int_rx_mod", &sc->alc_int_rx_mod); 1168 if (error == 0) { 1169 if (sc->alc_int_rx_mod < ALC_IM_TIMER_MIN || 1170 sc->alc_int_rx_mod > ALC_IM_TIMER_MAX) { 1171 device_printf(sc->alc_dev, "int_rx_mod value out of " 1172 "range; using default: %d\n", 1173 ALC_IM_RX_TIMER_DEFAULT); 1174 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT; 1175 } 1176 } 1177 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT; 1178 error = resource_int_value(device_get_name(sc->alc_dev), 1179 device_get_unit(sc->alc_dev), "int_tx_mod", &sc->alc_int_tx_mod); 1180 if (error == 0) { 1181 if (sc->alc_int_tx_mod < ALC_IM_TIMER_MIN || 1182 sc->alc_int_tx_mod > ALC_IM_TIMER_MAX) { 1183 device_printf(sc->alc_dev, "int_tx_mod value out of " 1184 "range; using default: %d\n", 1185 ALC_IM_TX_TIMER_DEFAULT); 1186 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT; 1187 } 1188 } 1189 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit", 1190 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_process_limit, 0, 1191 sysctl_hw_alc_proc_limit, "I", 1192 "max number of Rx events to process"); 1193 /* Pull in device tunables. */ 1194 sc->alc_process_limit = ALC_PROC_DEFAULT; 1195 error = resource_int_value(device_get_name(sc->alc_dev), 1196 device_get_unit(sc->alc_dev), "process_limit", 1197 &sc->alc_process_limit); 1198 if (error == 0) { 1199 if (sc->alc_process_limit < ALC_PROC_MIN || 1200 sc->alc_process_limit > ALC_PROC_MAX) { 1201 device_printf(sc->alc_dev, 1202 "process_limit value out of range; " 1203 "using default: %d\n", ALC_PROC_DEFAULT); 1204 sc->alc_process_limit = ALC_PROC_DEFAULT; 1205 } 1206 } 1207 1208 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 1209 NULL, "ALC statistics"); 1210 parent = SYSCTL_CHILDREN(tree); 1211 1212 /* Rx statistics. */ 1213 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 1214 NULL, "Rx MAC statistics"); 1215 child = SYSCTL_CHILDREN(tree); 1216 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 1217 &stats->rx_frames, "Good frames"); 1218 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 1219 &stats->rx_bcast_frames, "Good broadcast frames"); 1220 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 1221 &stats->rx_mcast_frames, "Good multicast frames"); 1222 ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 1223 &stats->rx_pause_frames, "Pause control frames"); 1224 ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames", 1225 &stats->rx_control_frames, "Control frames"); 1226 ALC_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", 1227 &stats->rx_crcerrs, "CRC errors"); 1228 ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 1229 &stats->rx_lenerrs, "Frames with length mismatched"); 1230 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 1231 &stats->rx_bytes, "Good octets"); 1232 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets", 1233 &stats->rx_bcast_bytes, "Good broadcast octets"); 1234 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets", 1235 &stats->rx_mcast_bytes, "Good multicast octets"); 1236 ALC_SYSCTL_STAT_ADD32(ctx, child, "runts", 1237 &stats->rx_runts, "Too short frames"); 1238 ALC_SYSCTL_STAT_ADD32(ctx, child, "fragments", 1239 &stats->rx_fragments, "Fragmented frames"); 1240 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 1241 &stats->rx_pkts_64, "64 bytes frames"); 1242 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 1243 &stats->rx_pkts_65_127, "65 to 127 bytes frames"); 1244 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 1245 &stats->rx_pkts_128_255, "128 to 255 bytes frames"); 1246 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 1247 &stats->rx_pkts_256_511, "256 to 511 bytes frames"); 1248 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 1249 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames"); 1250 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 1251 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames"); 1252 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 1253 &stats->rx_pkts_1519_max, "1519 to max frames"); 1254 ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs", 1255 &stats->rx_pkts_truncated, "Truncated frames due to MTU size"); 1256 ALC_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 1257 &stats->rx_fifo_oflows, "FIFO overflows"); 1258 ALC_SYSCTL_STAT_ADD32(ctx, child, "rrs_errs", 1259 &stats->rx_rrs_errs, "Return status write-back errors"); 1260 ALC_SYSCTL_STAT_ADD32(ctx, child, "align_errs", 1261 &stats->rx_alignerrs, "Alignment errors"); 1262 ALC_SYSCTL_STAT_ADD32(ctx, child, "filtered", 1263 &stats->rx_pkts_filtered, 1264 "Frames dropped due to address filtering"); 1265 1266 /* Tx statistics. */ 1267 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 1268 NULL, "Tx MAC statistics"); 1269 child = SYSCTL_CHILDREN(tree); 1270 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 1271 &stats->tx_frames, "Good frames"); 1272 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 1273 &stats->tx_bcast_frames, "Good broadcast frames"); 1274 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 1275 &stats->tx_mcast_frames, "Good multicast frames"); 1276 ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 1277 &stats->tx_pause_frames, "Pause control frames"); 1278 ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames", 1279 &stats->tx_control_frames, "Control frames"); 1280 ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_defers", 1281 &stats->tx_excess_defer, "Frames with excessive derferrals"); 1282 ALC_SYSCTL_STAT_ADD32(ctx, child, "defers", 1283 &stats->tx_excess_defer, "Frames with derferrals"); 1284 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 1285 &stats->tx_bytes, "Good octets"); 1286 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets", 1287 &stats->tx_bcast_bytes, "Good broadcast octets"); 1288 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets", 1289 &stats->tx_mcast_bytes, "Good multicast octets"); 1290 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 1291 &stats->tx_pkts_64, "64 bytes frames"); 1292 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 1293 &stats->tx_pkts_65_127, "65 to 127 bytes frames"); 1294 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 1295 &stats->tx_pkts_128_255, "128 to 255 bytes frames"); 1296 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 1297 &stats->tx_pkts_256_511, "256 to 511 bytes frames"); 1298 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 1299 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames"); 1300 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 1301 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames"); 1302 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 1303 &stats->tx_pkts_1519_max, "1519 to max frames"); 1304 ALC_SYSCTL_STAT_ADD32(ctx, child, "single_colls", 1305 &stats->tx_single_colls, "Single collisions"); 1306 ALC_SYSCTL_STAT_ADD32(ctx, child, "multi_colls", 1307 &stats->tx_multi_colls, "Multiple collisions"); 1308 ALC_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 1309 &stats->tx_late_colls, "Late collisions"); 1310 ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_colls", 1311 &stats->tx_excess_colls, "Excessive collisions"); 1312 ALC_SYSCTL_STAT_ADD32(ctx, child, "abort", 1313 &stats->tx_abort, "Aborted frames due to Excessive collisions"); 1314 ALC_SYSCTL_STAT_ADD32(ctx, child, "underruns", 1315 &stats->tx_underrun, "FIFO underruns"); 1316 ALC_SYSCTL_STAT_ADD32(ctx, child, "desc_underruns", 1317 &stats->tx_desc_underrun, "Descriptor write-back errors"); 1318 ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 1319 &stats->tx_lenerrs, "Frames with length mismatched"); 1320 ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs", 1321 &stats->tx_pkts_truncated, "Truncated frames due to MTU size"); 1322 } 1323 1324 #undef ALC_SYSCTL_STAT_ADD32 1325 #undef ALC_SYSCTL_STAT_ADD64 1326 1327 struct alc_dmamap_arg { 1328 bus_addr_t alc_busaddr; 1329 }; 1330 1331 static void 1332 alc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1333 { 1334 struct alc_dmamap_arg *ctx; 1335 1336 if (error != 0) 1337 return; 1338 1339 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1340 1341 ctx = (struct alc_dmamap_arg *)arg; 1342 ctx->alc_busaddr = segs[0].ds_addr; 1343 } 1344 1345 /* 1346 * Normal and high Tx descriptors shares single Tx high address. 1347 * Four Rx descriptor/return rings and CMB shares the same Rx 1348 * high address. 1349 */ 1350 static int 1351 alc_check_boundary(struct alc_softc *sc) 1352 { 1353 bus_addr_t cmb_end, rx_ring_end, rr_ring_end, tx_ring_end; 1354 1355 rx_ring_end = sc->alc_rdata.alc_rx_ring_paddr + ALC_RX_RING_SZ; 1356 rr_ring_end = sc->alc_rdata.alc_rr_ring_paddr + ALC_RR_RING_SZ; 1357 cmb_end = sc->alc_rdata.alc_cmb_paddr + ALC_CMB_SZ; 1358 tx_ring_end = sc->alc_rdata.alc_tx_ring_paddr + ALC_TX_RING_SZ; 1359 1360 /* 4GB boundary crossing is not allowed. */ 1361 if ((ALC_ADDR_HI(rx_ring_end) != 1362 ALC_ADDR_HI(sc->alc_rdata.alc_rx_ring_paddr)) || 1363 (ALC_ADDR_HI(rr_ring_end) != 1364 ALC_ADDR_HI(sc->alc_rdata.alc_rr_ring_paddr)) || 1365 (ALC_ADDR_HI(cmb_end) != 1366 ALC_ADDR_HI(sc->alc_rdata.alc_cmb_paddr)) || 1367 (ALC_ADDR_HI(tx_ring_end) != 1368 ALC_ADDR_HI(sc->alc_rdata.alc_tx_ring_paddr))) 1369 return (EFBIG); 1370 /* 1371 * Make sure Rx return descriptor/Rx descriptor/CMB use 1372 * the same high address. 1373 */ 1374 if ((ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(rr_ring_end)) || 1375 (ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(cmb_end))) 1376 return (EFBIG); 1377 1378 return (0); 1379 } 1380 1381 static int 1382 alc_dma_alloc(struct alc_softc *sc) 1383 { 1384 struct alc_txdesc *txd; 1385 struct alc_rxdesc *rxd; 1386 bus_addr_t lowaddr; 1387 struct alc_dmamap_arg ctx; 1388 int error, i; 1389 1390 lowaddr = BUS_SPACE_MAXADDR; 1391 again: 1392 /* Create parent DMA tag. */ 1393 error = bus_dma_tag_create( 1394 sc->alc_cdata.alc_parent_tag, /* parent */ 1395 1, 0, /* alignment, boundary */ 1396 lowaddr, /* lowaddr */ 1397 BUS_SPACE_MAXADDR, /* highaddr */ 1398 NULL, NULL, /* filter, filterarg */ 1399 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1400 0, /* nsegments */ 1401 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1402 0, /* flags */ 1403 &sc->alc_cdata.alc_parent_tag); 1404 if (error != 0) { 1405 device_printf(sc->alc_dev, 1406 "could not create parent DMA tag.\n"); 1407 goto fail; 1408 } 1409 1410 /* Create DMA tag for Tx descriptor ring. */ 1411 error = bus_dma_tag_create( 1412 sc->alc_cdata.alc_parent_tag, /* parent */ 1413 ALC_TX_RING_ALIGN, 0, /* alignment, boundary */ 1414 BUS_SPACE_MAXADDR, /* lowaddr */ 1415 BUS_SPACE_MAXADDR, /* highaddr */ 1416 NULL, NULL, /* filter, filterarg */ 1417 ALC_TX_RING_SZ, /* maxsize */ 1418 1, /* nsegments */ 1419 ALC_TX_RING_SZ, /* maxsegsize */ 1420 0, /* flags */ 1421 &sc->alc_cdata.alc_tx_ring_tag); 1422 if (error != 0) { 1423 device_printf(sc->alc_dev, 1424 "could not create Tx ring DMA tag.\n"); 1425 goto fail; 1426 } 1427 1428 /* Create DMA tag for Rx free descriptor ring. */ 1429 error = bus_dma_tag_create( 1430 sc->alc_cdata.alc_parent_tag, /* parent */ 1431 ALC_RX_RING_ALIGN, 0, /* alignment, boundary */ 1432 BUS_SPACE_MAXADDR, /* lowaddr */ 1433 BUS_SPACE_MAXADDR, /* highaddr */ 1434 NULL, NULL, /* filter, filterarg */ 1435 ALC_RX_RING_SZ, /* maxsize */ 1436 1, /* nsegments */ 1437 ALC_RX_RING_SZ, /* maxsegsize */ 1438 0, /* flags */ 1439 &sc->alc_cdata.alc_rx_ring_tag); 1440 if (error != 0) { 1441 device_printf(sc->alc_dev, 1442 "could not create Rx ring DMA tag.\n"); 1443 goto fail; 1444 } 1445 /* Create DMA tag for Rx return descriptor ring. */ 1446 error = bus_dma_tag_create( 1447 sc->alc_cdata.alc_parent_tag, /* parent */ 1448 ALC_RR_RING_ALIGN, 0, /* alignment, boundary */ 1449 BUS_SPACE_MAXADDR, /* lowaddr */ 1450 BUS_SPACE_MAXADDR, /* highaddr */ 1451 NULL, NULL, /* filter, filterarg */ 1452 ALC_RR_RING_SZ, /* maxsize */ 1453 1, /* nsegments */ 1454 ALC_RR_RING_SZ, /* maxsegsize */ 1455 0, /* flags */ 1456 &sc->alc_cdata.alc_rr_ring_tag); 1457 if (error != 0) { 1458 device_printf(sc->alc_dev, 1459 "could not create Rx return ring DMA tag.\n"); 1460 goto fail; 1461 } 1462 1463 /* Create DMA tag for coalescing message block. */ 1464 error = bus_dma_tag_create( 1465 sc->alc_cdata.alc_parent_tag, /* parent */ 1466 ALC_CMB_ALIGN, 0, /* alignment, boundary */ 1467 BUS_SPACE_MAXADDR, /* lowaddr */ 1468 BUS_SPACE_MAXADDR, /* highaddr */ 1469 NULL, NULL, /* filter, filterarg */ 1470 ALC_CMB_SZ, /* maxsize */ 1471 1, /* nsegments */ 1472 ALC_CMB_SZ, /* maxsegsize */ 1473 0, /* flags */ 1474 &sc->alc_cdata.alc_cmb_tag); 1475 if (error != 0) { 1476 device_printf(sc->alc_dev, 1477 "could not create CMB DMA tag.\n"); 1478 goto fail; 1479 } 1480 /* Create DMA tag for status message block. */ 1481 error = bus_dma_tag_create( 1482 sc->alc_cdata.alc_parent_tag, /* parent */ 1483 ALC_SMB_ALIGN, 0, /* alignment, boundary */ 1484 BUS_SPACE_MAXADDR, /* lowaddr */ 1485 BUS_SPACE_MAXADDR, /* highaddr */ 1486 NULL, NULL, /* filter, filterarg */ 1487 ALC_SMB_SZ, /* maxsize */ 1488 1, /* nsegments */ 1489 ALC_SMB_SZ, /* maxsegsize */ 1490 0, /* flags */ 1491 &sc->alc_cdata.alc_smb_tag); 1492 if (error != 0) { 1493 device_printf(sc->alc_dev, 1494 "could not create SMB DMA tag.\n"); 1495 goto fail; 1496 } 1497 1498 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1499 error = bus_dmamem_alloc(sc->alc_cdata.alc_tx_ring_tag, 1500 (void **)&sc->alc_rdata.alc_tx_ring, 1501 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1502 &sc->alc_cdata.alc_tx_ring_map); 1503 if (error != 0) { 1504 device_printf(sc->alc_dev, 1505 "could not allocate DMA'able memory for Tx ring.\n"); 1506 goto fail; 1507 } 1508 ctx.alc_busaddr = 0; 1509 error = bus_dmamap_load(sc->alc_cdata.alc_tx_ring_tag, 1510 sc->alc_cdata.alc_tx_ring_map, sc->alc_rdata.alc_tx_ring, 1511 ALC_TX_RING_SZ, alc_dmamap_cb, &ctx, 0); 1512 if (error != 0 || ctx.alc_busaddr == 0) { 1513 device_printf(sc->alc_dev, 1514 "could not load DMA'able memory for Tx ring.\n"); 1515 goto fail; 1516 } 1517 sc->alc_rdata.alc_tx_ring_paddr = ctx.alc_busaddr; 1518 1519 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1520 error = bus_dmamem_alloc(sc->alc_cdata.alc_rx_ring_tag, 1521 (void **)&sc->alc_rdata.alc_rx_ring, 1522 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1523 &sc->alc_cdata.alc_rx_ring_map); 1524 if (error != 0) { 1525 device_printf(sc->alc_dev, 1526 "could not allocate DMA'able memory for Rx ring.\n"); 1527 goto fail; 1528 } 1529 ctx.alc_busaddr = 0; 1530 error = bus_dmamap_load(sc->alc_cdata.alc_rx_ring_tag, 1531 sc->alc_cdata.alc_rx_ring_map, sc->alc_rdata.alc_rx_ring, 1532 ALC_RX_RING_SZ, alc_dmamap_cb, &ctx, 0); 1533 if (error != 0 || ctx.alc_busaddr == 0) { 1534 device_printf(sc->alc_dev, 1535 "could not load DMA'able memory for Rx ring.\n"); 1536 goto fail; 1537 } 1538 sc->alc_rdata.alc_rx_ring_paddr = ctx.alc_busaddr; 1539 1540 /* Allocate DMA'able memory and load the DMA map for Rx return ring. */ 1541 error = bus_dmamem_alloc(sc->alc_cdata.alc_rr_ring_tag, 1542 (void **)&sc->alc_rdata.alc_rr_ring, 1543 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1544 &sc->alc_cdata.alc_rr_ring_map); 1545 if (error != 0) { 1546 device_printf(sc->alc_dev, 1547 "could not allocate DMA'able memory for Rx return ring.\n"); 1548 goto fail; 1549 } 1550 ctx.alc_busaddr = 0; 1551 error = bus_dmamap_load(sc->alc_cdata.alc_rr_ring_tag, 1552 sc->alc_cdata.alc_rr_ring_map, sc->alc_rdata.alc_rr_ring, 1553 ALC_RR_RING_SZ, alc_dmamap_cb, &ctx, 0); 1554 if (error != 0 || ctx.alc_busaddr == 0) { 1555 device_printf(sc->alc_dev, 1556 "could not load DMA'able memory for Tx ring.\n"); 1557 goto fail; 1558 } 1559 sc->alc_rdata.alc_rr_ring_paddr = ctx.alc_busaddr; 1560 1561 /* Allocate DMA'able memory and load the DMA map for CMB. */ 1562 error = bus_dmamem_alloc(sc->alc_cdata.alc_cmb_tag, 1563 (void **)&sc->alc_rdata.alc_cmb, 1564 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1565 &sc->alc_cdata.alc_cmb_map); 1566 if (error != 0) { 1567 device_printf(sc->alc_dev, 1568 "could not allocate DMA'able memory for CMB.\n"); 1569 goto fail; 1570 } 1571 ctx.alc_busaddr = 0; 1572 error = bus_dmamap_load(sc->alc_cdata.alc_cmb_tag, 1573 sc->alc_cdata.alc_cmb_map, sc->alc_rdata.alc_cmb, 1574 ALC_CMB_SZ, alc_dmamap_cb, &ctx, 0); 1575 if (error != 0 || ctx.alc_busaddr == 0) { 1576 device_printf(sc->alc_dev, 1577 "could not load DMA'able memory for CMB.\n"); 1578 goto fail; 1579 } 1580 sc->alc_rdata.alc_cmb_paddr = ctx.alc_busaddr; 1581 1582 /* Allocate DMA'able memory and load the DMA map for SMB. */ 1583 error = bus_dmamem_alloc(sc->alc_cdata.alc_smb_tag, 1584 (void **)&sc->alc_rdata.alc_smb, 1585 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1586 &sc->alc_cdata.alc_smb_map); 1587 if (error != 0) { 1588 device_printf(sc->alc_dev, 1589 "could not allocate DMA'able memory for SMB.\n"); 1590 goto fail; 1591 } 1592 ctx.alc_busaddr = 0; 1593 error = bus_dmamap_load(sc->alc_cdata.alc_smb_tag, 1594 sc->alc_cdata.alc_smb_map, sc->alc_rdata.alc_smb, 1595 ALC_SMB_SZ, alc_dmamap_cb, &ctx, 0); 1596 if (error != 0 || ctx.alc_busaddr == 0) { 1597 device_printf(sc->alc_dev, 1598 "could not load DMA'able memory for CMB.\n"); 1599 goto fail; 1600 } 1601 sc->alc_rdata.alc_smb_paddr = ctx.alc_busaddr; 1602 1603 /* Make sure we've not crossed 4GB boundary. */ 1604 if (lowaddr != BUS_SPACE_MAXADDR_32BIT && 1605 (error = alc_check_boundary(sc)) != 0) { 1606 device_printf(sc->alc_dev, "4GB boundary crossed, " 1607 "switching to 32bit DMA addressing mode.\n"); 1608 alc_dma_free(sc); 1609 /* 1610 * Limit max allowable DMA address space to 32bit 1611 * and try again. 1612 */ 1613 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1614 goto again; 1615 } 1616 1617 /* 1618 * Create Tx buffer parent tag. 1619 * AR813x/AR815x allows 64bit DMA addressing of Tx/Rx buffers 1620 * so it needs separate parent DMA tag as parent DMA address 1621 * space could be restricted to be within 32bit address space 1622 * by 4GB boundary crossing. 1623 */ 1624 error = bus_dma_tag_create( 1625 sc->alc_cdata.alc_parent_tag, /* parent */ 1626 1, 0, /* alignment, boundary */ 1627 BUS_SPACE_MAXADDR, /* lowaddr */ 1628 BUS_SPACE_MAXADDR, /* highaddr */ 1629 NULL, NULL, /* filter, filterarg */ 1630 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1631 0, /* nsegments */ 1632 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1633 0, /* flags */ 1634 &sc->alc_cdata.alc_buffer_tag); 1635 if (error != 0) { 1636 device_printf(sc->alc_dev, 1637 "could not create parent buffer DMA tag.\n"); 1638 goto fail; 1639 } 1640 1641 /* Create DMA tag for Tx buffers. */ 1642 error = bus_dma_tag_create( 1643 sc->alc_cdata.alc_buffer_tag, /* parent */ 1644 1, 0, /* alignment, boundary */ 1645 BUS_SPACE_MAXADDR, /* lowaddr */ 1646 BUS_SPACE_MAXADDR, /* highaddr */ 1647 NULL, NULL, /* filter, filterarg */ 1648 ALC_TSO_MAXSIZE, /* maxsize */ 1649 ALC_MAXTXSEGS, /* nsegments */ 1650 ALC_TSO_MAXSEGSIZE, /* maxsegsize */ 1651 0, /* flags */ 1652 &sc->alc_cdata.alc_tx_tag); 1653 if (error != 0) { 1654 device_printf(sc->alc_dev, "could not create Tx DMA tag.\n"); 1655 goto fail; 1656 } 1657 1658 /* Create DMA tag for Rx buffers. */ 1659 error = bus_dma_tag_create( 1660 sc->alc_cdata.alc_buffer_tag, /* parent */ 1661 ALC_RX_BUF_ALIGN, 0, /* alignment, boundary */ 1662 BUS_SPACE_MAXADDR, /* lowaddr */ 1663 BUS_SPACE_MAXADDR, /* highaddr */ 1664 NULL, NULL, /* filter, filterarg */ 1665 MCLBYTES, /* maxsize */ 1666 1, /* nsegments */ 1667 MCLBYTES, /* maxsegsize */ 1668 0, /* flags */ 1669 &sc->alc_cdata.alc_rx_tag); 1670 if (error != 0) { 1671 device_printf(sc->alc_dev, "could not create Rx DMA tag.\n"); 1672 goto fail; 1673 } 1674 /* Create DMA maps for Tx buffers. */ 1675 for (i = 0; i < ALC_TX_RING_CNT; i++) { 1676 txd = &sc->alc_cdata.alc_txdesc[i]; 1677 txd->tx_m = NULL; 1678 txd->tx_dmamap = NULL; 1679 error = bus_dmamap_create(sc->alc_cdata.alc_tx_tag, 1680 BUS_DMA_WAITOK, &txd->tx_dmamap); 1681 if (error != 0) { 1682 device_printf(sc->alc_dev, 1683 "could not create Tx dmamap.\n"); 1684 goto fail; 1685 } 1686 } 1687 /* Create DMA maps for Rx buffers. */ 1688 error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 1689 BUS_DMA_WAITOK, 1690 &sc->alc_cdata.alc_rx_sparemap); 1691 if (error) { 1692 device_printf(sc->alc_dev, 1693 "could not create spare Rx dmamap.\n"); 1694 goto fail; 1695 } 1696 for (i = 0; i < ALC_RX_RING_CNT; i++) { 1697 rxd = &sc->alc_cdata.alc_rxdesc[i]; 1698 rxd->rx_m = NULL; 1699 rxd->rx_dmamap = NULL; 1700 error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 1701 BUS_DMA_WAITOK, 1702 &rxd->rx_dmamap); 1703 if (error != 0) { 1704 device_printf(sc->alc_dev, 1705 "could not create Rx dmamap.\n"); 1706 goto fail; 1707 } 1708 } 1709 1710 fail: 1711 return (error); 1712 } 1713 1714 static void 1715 alc_dma_free(struct alc_softc *sc) 1716 { 1717 struct alc_txdesc *txd; 1718 struct alc_rxdesc *rxd; 1719 int i; 1720 1721 /* Tx buffers. */ 1722 if (sc->alc_cdata.alc_tx_tag != NULL) { 1723 for (i = 0; i < ALC_TX_RING_CNT; i++) { 1724 txd = &sc->alc_cdata.alc_txdesc[i]; 1725 if (txd->tx_dmamap != NULL) { 1726 bus_dmamap_destroy(sc->alc_cdata.alc_tx_tag, 1727 txd->tx_dmamap); 1728 txd->tx_dmamap = NULL; 1729 } 1730 } 1731 bus_dma_tag_destroy(sc->alc_cdata.alc_tx_tag); 1732 sc->alc_cdata.alc_tx_tag = NULL; 1733 } 1734 /* Rx buffers */ 1735 if (sc->alc_cdata.alc_rx_tag != NULL) { 1736 for (i = 0; i < ALC_RX_RING_CNT; i++) { 1737 rxd = &sc->alc_cdata.alc_rxdesc[i]; 1738 if (rxd->rx_dmamap != NULL) { 1739 bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag, 1740 rxd->rx_dmamap); 1741 rxd->rx_dmamap = NULL; 1742 } 1743 } 1744 if (sc->alc_cdata.alc_rx_sparemap != NULL) { 1745 bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag, 1746 sc->alc_cdata.alc_rx_sparemap); 1747 sc->alc_cdata.alc_rx_sparemap = NULL; 1748 } 1749 bus_dma_tag_destroy(sc->alc_cdata.alc_rx_tag); 1750 sc->alc_cdata.alc_rx_tag = NULL; 1751 } 1752 /* Tx descriptor ring. */ 1753 if (sc->alc_cdata.alc_tx_ring_tag != NULL) { 1754 if (sc->alc_cdata.alc_tx_ring_map != NULL) 1755 bus_dmamap_unload(sc->alc_cdata.alc_tx_ring_tag, 1756 sc->alc_cdata.alc_tx_ring_map); 1757 if (sc->alc_cdata.alc_tx_ring_map != NULL && 1758 sc->alc_rdata.alc_tx_ring != NULL) 1759 bus_dmamem_free(sc->alc_cdata.alc_tx_ring_tag, 1760 sc->alc_rdata.alc_tx_ring, 1761 sc->alc_cdata.alc_tx_ring_map); 1762 sc->alc_rdata.alc_tx_ring = NULL; 1763 sc->alc_cdata.alc_tx_ring_map = NULL; 1764 bus_dma_tag_destroy(sc->alc_cdata.alc_tx_ring_tag); 1765 sc->alc_cdata.alc_tx_ring_tag = NULL; 1766 } 1767 /* Rx ring. */ 1768 if (sc->alc_cdata.alc_rx_ring_tag != NULL) { 1769 if (sc->alc_cdata.alc_rx_ring_map != NULL) 1770 bus_dmamap_unload(sc->alc_cdata.alc_rx_ring_tag, 1771 sc->alc_cdata.alc_rx_ring_map); 1772 if (sc->alc_cdata.alc_rx_ring_map != NULL && 1773 sc->alc_rdata.alc_rx_ring != NULL) 1774 bus_dmamem_free(sc->alc_cdata.alc_rx_ring_tag, 1775 sc->alc_rdata.alc_rx_ring, 1776 sc->alc_cdata.alc_rx_ring_map); 1777 sc->alc_rdata.alc_rx_ring = NULL; 1778 sc->alc_cdata.alc_rx_ring_map = NULL; 1779 bus_dma_tag_destroy(sc->alc_cdata.alc_rx_ring_tag); 1780 sc->alc_cdata.alc_rx_ring_tag = NULL; 1781 } 1782 /* Rx return ring. */ 1783 if (sc->alc_cdata.alc_rr_ring_tag != NULL) { 1784 if (sc->alc_cdata.alc_rr_ring_map != NULL) 1785 bus_dmamap_unload(sc->alc_cdata.alc_rr_ring_tag, 1786 sc->alc_cdata.alc_rr_ring_map); 1787 if (sc->alc_cdata.alc_rr_ring_map != NULL && 1788 sc->alc_rdata.alc_rr_ring != NULL) 1789 bus_dmamem_free(sc->alc_cdata.alc_rr_ring_tag, 1790 sc->alc_rdata.alc_rr_ring, 1791 sc->alc_cdata.alc_rr_ring_map); 1792 sc->alc_rdata.alc_rr_ring = NULL; 1793 sc->alc_cdata.alc_rr_ring_map = NULL; 1794 bus_dma_tag_destroy(sc->alc_cdata.alc_rr_ring_tag); 1795 sc->alc_cdata.alc_rr_ring_tag = NULL; 1796 } 1797 /* CMB block */ 1798 if (sc->alc_cdata.alc_cmb_tag != NULL) { 1799 if (sc->alc_cdata.alc_cmb_map != NULL) 1800 bus_dmamap_unload(sc->alc_cdata.alc_cmb_tag, 1801 sc->alc_cdata.alc_cmb_map); 1802 if (sc->alc_cdata.alc_cmb_map != NULL && 1803 sc->alc_rdata.alc_cmb != NULL) 1804 bus_dmamem_free(sc->alc_cdata.alc_cmb_tag, 1805 sc->alc_rdata.alc_cmb, 1806 sc->alc_cdata.alc_cmb_map); 1807 sc->alc_rdata.alc_cmb = NULL; 1808 sc->alc_cdata.alc_cmb_map = NULL; 1809 bus_dma_tag_destroy(sc->alc_cdata.alc_cmb_tag); 1810 sc->alc_cdata.alc_cmb_tag = NULL; 1811 } 1812 /* SMB block */ 1813 if (sc->alc_cdata.alc_smb_tag != NULL) { 1814 if (sc->alc_cdata.alc_smb_map != NULL) 1815 bus_dmamap_unload(sc->alc_cdata.alc_smb_tag, 1816 sc->alc_cdata.alc_smb_map); 1817 if (sc->alc_cdata.alc_smb_map != NULL && 1818 sc->alc_rdata.alc_smb != NULL) 1819 bus_dmamem_free(sc->alc_cdata.alc_smb_tag, 1820 sc->alc_rdata.alc_smb, 1821 sc->alc_cdata.alc_smb_map); 1822 sc->alc_rdata.alc_smb = NULL; 1823 sc->alc_cdata.alc_smb_map = NULL; 1824 bus_dma_tag_destroy(sc->alc_cdata.alc_smb_tag); 1825 sc->alc_cdata.alc_smb_tag = NULL; 1826 } 1827 if (sc->alc_cdata.alc_buffer_tag != NULL) { 1828 bus_dma_tag_destroy(sc->alc_cdata.alc_buffer_tag); 1829 sc->alc_cdata.alc_buffer_tag = NULL; 1830 } 1831 if (sc->alc_cdata.alc_parent_tag != NULL) { 1832 bus_dma_tag_destroy(sc->alc_cdata.alc_parent_tag); 1833 sc->alc_cdata.alc_parent_tag = NULL; 1834 } 1835 } 1836 1837 static int 1838 alc_shutdown(device_t dev) 1839 { 1840 1841 return (alc_suspend(dev)); 1842 } 1843 1844 #if 0 1845 /* XXX: LINK SPEED */ 1846 /* 1847 * Note, this driver resets the link speed to 10/100Mbps by 1848 * restarting auto-negotiation in suspend/shutdown phase but we 1849 * don't know whether that auto-negotiation would succeed or not 1850 * as driver has no control after powering off/suspend operation. 1851 * If the renegotiation fail WOL may not work. Running at 1Gbps 1852 * will draw more power than 375mA at 3.3V which is specified in 1853 * PCI specification and that would result in complete 1854 * shutdowning power to ethernet controller. 1855 * 1856 * TODO 1857 * Save current negotiated media speed/duplex/flow-control to 1858 * softc and restore the same link again after resuming. PHY 1859 * handling such as power down/resetting to 100Mbps may be better 1860 * handled in suspend method in phy driver. 1861 */ 1862 static void 1863 alc_setlinkspeed(struct alc_softc *sc) 1864 { 1865 struct mii_data *mii; 1866 int aneg, i; 1867 1868 mii = device_get_softc(sc->alc_miibus); 1869 mii_pollstat(mii); 1870 aneg = 0; 1871 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 1872 (IFM_ACTIVE | IFM_AVALID)) { 1873 switch IFM_SUBTYPE(mii->mii_media_active) { 1874 case IFM_10_T: 1875 case IFM_100_TX: 1876 return; 1877 case IFM_1000_T: 1878 aneg++; 1879 break; 1880 default: 1881 break; 1882 } 1883 } 1884 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, MII_100T2CR, 0); 1885 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 1886 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 1887 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 1888 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); 1889 DELAY(1000); 1890 if (aneg != 0) { 1891 /* 1892 * Poll link state until alc(4) get a 10/100Mbps link. 1893 */ 1894 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 1895 mii_pollstat(mii); 1896 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) 1897 == (IFM_ACTIVE | IFM_AVALID)) { 1898 switch (IFM_SUBTYPE( 1899 mii->mii_media_active)) { 1900 case IFM_10_T: 1901 case IFM_100_TX: 1902 alc_mac_config(sc); 1903 return; 1904 default: 1905 break; 1906 } 1907 } 1908 ALC_UNLOCK(sc); 1909 pause("alclnk", hz); 1910 ALC_LOCK(sc); 1911 } 1912 if (i == MII_ANEGTICKS_GIGE) 1913 device_printf(sc->alc_dev, 1914 "establishing a link failed, WOL may not work!"); 1915 } 1916 /* 1917 * No link, force MAC to have 100Mbps, full-duplex link. 1918 * This is the last resort and may/may not work. 1919 */ 1920 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1921 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1922 alc_mac_config(sc); 1923 } 1924 #endif 1925 1926 #if 0 1927 /* XXX: WOL */ 1928 static void 1929 alc_setwol(struct alc_softc *sc) 1930 { 1931 struct ifnet *ifp; 1932 uint32_t reg, pmcs; 1933 uint16_t pmstat; 1934 1935 ALC_LOCK_ASSERT(sc); 1936 1937 alc_disable_l0s_l1(sc); 1938 ifp = sc->alc_ifp; 1939 if ((sc->alc_flags & ALC_FLAG_PM) == 0) { 1940 /* Disable WOL. */ 1941 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 1942 reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC); 1943 reg |= PCIE_PHYMISC_FORCE_RCV_DET; 1944 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg); 1945 /* Force PHY power down. */ 1946 alc_phy_down(sc); 1947 CSR_WRITE_4(sc, ALC_MASTER_CFG, 1948 CSR_READ_4(sc, ALC_MASTER_CFG) | MASTER_CLK_SEL_DIS); 1949 return; 1950 } 1951 1952 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 1953 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) 1954 alc_setlinkspeed(sc); 1955 CSR_WRITE_4(sc, ALC_MASTER_CFG, 1956 CSR_READ_4(sc, ALC_MASTER_CFG) & ~MASTER_CLK_SEL_DIS); 1957 } 1958 1959 pmcs = 0; 1960 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 1961 pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB; 1962 CSR_WRITE_4(sc, ALC_WOL_CFG, pmcs); 1963 reg = CSR_READ_4(sc, ALC_MAC_CFG); 1964 reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI | 1965 MAC_CFG_BCAST); 1966 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 1967 reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST; 1968 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1969 reg |= MAC_CFG_RX_ENB; 1970 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 1971 1972 reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC); 1973 reg |= PCIE_PHYMISC_FORCE_RCV_DET; 1974 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg); 1975 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 1976 /* WOL disabled, PHY power down. */ 1977 alc_phy_down(sc); 1978 CSR_WRITE_4(sc, ALC_MASTER_CFG, 1979 CSR_READ_4(sc, ALC_MASTER_CFG) | MASTER_CLK_SEL_DIS); 1980 1981 } 1982 /* Request PME. */ 1983 pmstat = pci_read_config(sc->alc_dev, 1984 sc->alc_pmcap + PCIR_POWER_STATUS, 2); 1985 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1986 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1987 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1988 pci_write_config(sc->alc_dev, 1989 sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2); 1990 } 1991 #endif 1992 1993 static int 1994 alc_suspend(device_t dev) 1995 { 1996 struct alc_softc *sc; 1997 1998 sc = device_get_softc(dev); 1999 2000 ALC_LOCK(sc); 2001 alc_stop(sc); 2002 #if 0 2003 /* XXX: WOL */ 2004 alc_setwol(sc); 2005 #endif 2006 ALC_UNLOCK(sc); 2007 2008 return (0); 2009 } 2010 2011 static int 2012 alc_resume(device_t dev) 2013 { 2014 struct alc_softc *sc; 2015 struct ifnet *ifp; 2016 uint16_t pmstat; 2017 2018 sc = device_get_softc(dev); 2019 2020 ALC_LOCK(sc); 2021 if ((sc->alc_flags & ALC_FLAG_PM) != 0) { 2022 /* Disable PME and clear PME status. */ 2023 pmstat = pci_read_config(sc->alc_dev, 2024 sc->alc_pmcap + PCIR_POWER_STATUS, 2); 2025 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 2026 pmstat &= ~PCIM_PSTAT_PMEENABLE; 2027 pci_write_config(sc->alc_dev, 2028 sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2); 2029 } 2030 } 2031 /* Reset PHY. */ 2032 alc_phy_reset(sc); 2033 ifp = sc->alc_ifp; 2034 if ((ifp->if_flags & IFF_UP) != 0) { 2035 ifp->if_flags &= ~IFF_RUNNING; 2036 alc_init_locked(sc); 2037 } 2038 ALC_UNLOCK(sc); 2039 2040 return (0); 2041 } 2042 2043 static int 2044 alc_encap(struct alc_softc *sc, struct mbuf **m_head) 2045 { 2046 struct alc_txdesc *txd, *txd_last; 2047 struct tx_desc *desc; 2048 struct mbuf *m; 2049 struct ip *ip; 2050 struct tcphdr *tcp; 2051 bus_dma_segment_t txsegs[ALC_MAXTXSEGS]; 2052 bus_dmamap_t map; 2053 uint32_t cflags, hdrlen, ip_off, poff, vtag; 2054 int error, idx, nsegs, prod; 2055 2056 ALC_LOCK_ASSERT(sc); 2057 2058 M_ASSERTPKTHDR((*m_head)); 2059 2060 m = *m_head; 2061 ip = NULL; 2062 tcp = NULL; 2063 ip_off = poff = 0; 2064 #if 0 2065 /* XXX: TSO */ 2066 if ((m->m_pkthdr.csum_flags & (ALC_CSUM_FEATURES | CSUM_TSO)) != 0) { 2067 /* 2068 * AR813x/AR815x requires offset of TCP/UDP header in its 2069 * Tx descriptor to perform Tx checksum offloading. TSO 2070 * also requires TCP header offset and modification of 2071 * IP/TCP header. This kind of operation takes many CPU 2072 * cycles on FreeBSD so fast host CPU is required to get 2073 * smooth TSO performance. 2074 */ 2075 struct ether_header *eh; 2076 2077 if (M_WRITABLE(m) == 0) { 2078 /* Get a writable copy. */ 2079 m = m_dup(*m_head, MB_DONTWAIT); 2080 /* Release original mbufs. */ 2081 m_freem(*m_head); 2082 if (m == NULL) { 2083 *m_head = NULL; 2084 return (ENOBUFS); 2085 } 2086 *m_head = m; 2087 } 2088 2089 ip_off = sizeof(struct ether_header); 2090 m = m_pullup(m, ip_off + sizeof(struct ip)); 2091 if (m == NULL) { 2092 *m_head = NULL; 2093 return (ENOBUFS); 2094 } 2095 eh = mtod(m, struct ether_header *); 2096 /* 2097 * Check if hardware VLAN insertion is off. 2098 * Additional check for LLC/SNAP frame? 2099 */ 2100 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2101 ip_off = sizeof(struct ether_vlan_header); 2102 m = m_pullup(m, ip_off); 2103 if (m == NULL) { 2104 *m_head = NULL; 2105 return (ENOBUFS); 2106 } 2107 } 2108 m = m_pullup(m, ip_off + sizeof(struct ip)); 2109 if (m == NULL) { 2110 *m_head = NULL; 2111 return (ENOBUFS); 2112 } 2113 ip = (struct ip *)(mtod(m, char *) + ip_off); 2114 poff = ip_off + (ip->ip_hl << 2); 2115 2116 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2117 m = m_pullup(m, poff + sizeof(struct tcphdr)); 2118 if (m == NULL) { 2119 *m_head = NULL; 2120 return (ENOBUFS); 2121 } 2122 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 2123 m = m_pullup(m, poff + (tcp->th_off << 2)); 2124 if (m == NULL) { 2125 *m_head = NULL; 2126 return (ENOBUFS); 2127 } 2128 /* 2129 * Due to strict adherence of Microsoft NDIS 2130 * Large Send specification, hardware expects 2131 * a pseudo TCP checksum inserted by upper 2132 * stack. Unfortunately the pseudo TCP 2133 * checksum that NDIS refers to does not include 2134 * TCP payload length so driver should recompute 2135 * the pseudo checksum here. Hopefully this 2136 * wouldn't be much burden on modern CPUs. 2137 * 2138 * Reset IP checksum and recompute TCP pseudo 2139 * checksum as NDIS specification said. 2140 */ 2141 ip->ip_sum = 0; 2142 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, 2143 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 2144 } 2145 *m_head = m; 2146 } 2147 #endif /* TSO */ 2148 2149 prod = sc->alc_cdata.alc_tx_prod; 2150 txd = &sc->alc_cdata.alc_txdesc[prod]; 2151 txd_last = txd; 2152 map = txd->tx_dmamap; 2153 2154 error = bus_dmamap_load_mbuf_defrag( 2155 sc->alc_cdata.alc_tx_tag, map, m_head, 2156 txsegs, ALC_MAXTXSEGS, &nsegs, BUS_DMA_NOWAIT); 2157 if (error) { 2158 m_freem(*m_head); 2159 *m_head = NULL; 2160 return (error); 2161 } 2162 if (nsegs == 0) { 2163 m_freem(*m_head); 2164 *m_head = NULL; 2165 return (EIO); 2166 } 2167 2168 /* Check descriptor overrun. */ 2169 if (sc->alc_cdata.alc_tx_cnt + nsegs >= ALC_TX_RING_CNT - 3) { 2170 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, map); 2171 return (ENOBUFS); 2172 } 2173 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, map, BUS_DMASYNC_PREWRITE); 2174 2175 m = *m_head; 2176 cflags = TD_ETHERNET; 2177 vtag = 0; 2178 desc = NULL; 2179 idx = 0; 2180 /* Configure VLAN hardware tag insertion. */ 2181 if ((m->m_flags & M_VLANTAG) != 0) { 2182 vtag = htons(m->m_pkthdr.ether_vlantag); 2183 vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK; 2184 cflags |= TD_INS_VLAN_TAG; 2185 } 2186 /* Configure Tx checksum offload. */ 2187 if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) { 2188 #ifdef ALC_USE_CUSTOM_CSUM 2189 cflags |= TD_CUSTOM_CSUM; 2190 /* Set checksum start offset. */ 2191 cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) & 2192 TD_PLOAD_OFFSET_MASK; 2193 /* Set checksum insertion position of TCP/UDP. */ 2194 cflags |= (((poff + m->m_pkthdr.csum_data) >> 1) << 2195 TD_CUSTOM_CSUM_OFFSET_SHIFT) & TD_CUSTOM_CSUM_OFFSET_MASK; 2196 #else 2197 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 2198 cflags |= TD_IPCSUM; 2199 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 2200 cflags |= TD_TCPCSUM; 2201 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2202 cflags |= TD_UDPCSUM; 2203 /* Set TCP/UDP header offset. */ 2204 cflags |= (poff << TD_L4HDR_OFFSET_SHIFT) & 2205 TD_L4HDR_OFFSET_MASK; 2206 #endif 2207 } else if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2208 /* Request TSO and set MSS. */ 2209 cflags |= TD_TSO | TD_TSO_DESCV1; 2210 #if 0 2211 /* XXX: TSO */ 2212 cflags |= ((uint32_t)m->m_pkthdr.tso_segsz << TD_MSS_SHIFT) & 2213 TD_MSS_MASK; 2214 /* Set TCP header offset. */ 2215 #endif 2216 cflags |= (poff << TD_TCPHDR_OFFSET_SHIFT) & 2217 TD_TCPHDR_OFFSET_MASK; 2218 /* 2219 * AR813x/AR815x requires the first buffer should 2220 * only hold IP/TCP header data. Payload should 2221 * be handled in other descriptors. 2222 */ 2223 hdrlen = poff + (tcp->th_off << 2); 2224 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2225 desc->len = htole32(TX_BYTES(hdrlen | vtag)); 2226 desc->flags = htole32(cflags); 2227 desc->addr = htole64(txsegs[0].ds_addr); 2228 sc->alc_cdata.alc_tx_cnt++; 2229 ALC_DESC_INC(prod, ALC_TX_RING_CNT); 2230 if (m->m_len - hdrlen > 0) { 2231 /* Handle remaining payload of the first fragment. */ 2232 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2233 desc->len = htole32(TX_BYTES((m->m_len - hdrlen) | 2234 vtag)); 2235 desc->flags = htole32(cflags); 2236 desc->addr = htole64(txsegs[0].ds_addr + hdrlen); 2237 sc->alc_cdata.alc_tx_cnt++; 2238 ALC_DESC_INC(prod, ALC_TX_RING_CNT); 2239 } 2240 /* Handle remaining fragments. */ 2241 idx = 1; 2242 } 2243 for (; idx < nsegs; idx++) { 2244 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2245 desc->len = htole32(TX_BYTES(txsegs[idx].ds_len) | vtag); 2246 desc->flags = htole32(cflags); 2247 desc->addr = htole64(txsegs[idx].ds_addr); 2248 sc->alc_cdata.alc_tx_cnt++; 2249 ALC_DESC_INC(prod, ALC_TX_RING_CNT); 2250 } 2251 /* Update producer index. */ 2252 sc->alc_cdata.alc_tx_prod = prod; 2253 2254 /* Finally set EOP on the last descriptor. */ 2255 prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT; 2256 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2257 desc->flags |= htole32(TD_EOP); 2258 2259 /* Swap dmamap of the first and the last. */ 2260 txd = &sc->alc_cdata.alc_txdesc[prod]; 2261 map = txd_last->tx_dmamap; 2262 txd_last->tx_dmamap = txd->tx_dmamap; 2263 txd->tx_dmamap = map; 2264 txd->tx_m = m; 2265 2266 return (0); 2267 } 2268 2269 static void 2270 alc_tx_task(void *arg, int pending) 2271 { 2272 struct ifnet *ifp; 2273 2274 ifp = (struct ifnet *)arg; 2275 alc_start(ifp); 2276 } 2277 2278 static void 2279 alc_start(struct ifnet *ifp) 2280 { 2281 struct alc_softc *sc; 2282 struct mbuf *m_head; 2283 int enq; 2284 2285 sc = ifp->if_softc; 2286 2287 ALC_LOCK(sc); 2288 2289 /* Reclaim transmitted frames. */ 2290 if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT) 2291 alc_txeof(sc); 2292 2293 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) { 2294 ALC_UNLOCK(sc); 2295 return; 2296 } 2297 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) { 2298 ifq_purge(&ifp->if_snd); 2299 ALC_UNLOCK(sc); 2300 return; 2301 } 2302 2303 for (enq = 0; !ifq_is_empty(&ifp->if_snd); ) { 2304 m_head = ifq_dequeue(&ifp->if_snd, NULL); 2305 if (m_head == NULL) 2306 break; 2307 /* 2308 * Pack the data into the transmit ring. If we 2309 * don't have room, set the OACTIVE flag and wait 2310 * for the NIC to drain the ring. 2311 */ 2312 if (alc_encap(sc, &m_head)) { 2313 if (m_head == NULL) 2314 break; 2315 ifq_prepend(&ifp->if_snd, m_head); 2316 ifp->if_flags |= IFF_OACTIVE; 2317 break; 2318 } 2319 2320 enq++; 2321 /* 2322 * If there's a BPF listener, bounce a copy of this frame 2323 * to him. 2324 */ 2325 ETHER_BPF_MTAP(ifp, m_head); 2326 } 2327 2328 if (enq > 0) { 2329 /* Sync descriptors. */ 2330 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag, 2331 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE); 2332 /* Kick. Assume we're using normal Tx priority queue. */ 2333 CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX, 2334 (sc->alc_cdata.alc_tx_prod << 2335 MBOX_TD_PROD_LO_IDX_SHIFT) & 2336 MBOX_TD_PROD_LO_IDX_MASK); 2337 /* Set a timeout in case the chip goes out to lunch. */ 2338 sc->alc_watchdog_timer = ALC_TX_TIMEOUT; 2339 } 2340 2341 ALC_UNLOCK(sc); 2342 } 2343 2344 static void 2345 alc_watchdog(struct alc_softc *sc) 2346 { 2347 struct ifnet *ifp; 2348 2349 ALC_LOCK_ASSERT(sc); 2350 2351 if (sc->alc_watchdog_timer == 0 || --sc->alc_watchdog_timer) 2352 return; 2353 2354 ifp = sc->alc_ifp; 2355 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) { 2356 if_printf(sc->alc_ifp, "watchdog timeout (lost link)\n"); 2357 ifp->if_oerrors++; 2358 ifp->if_flags &= ~IFF_RUNNING; 2359 alc_init_locked(sc); 2360 return; 2361 } 2362 if_printf(sc->alc_ifp, "watchdog timeout -- resetting\n"); 2363 ifp->if_oerrors++; 2364 ifp->if_flags &= ~IFF_RUNNING; 2365 alc_init_locked(sc); 2366 if (!ifq_is_empty(&ifp->if_snd)) 2367 taskqueue_enqueue(sc->alc_tq, &sc->alc_tx_task); 2368 } 2369 2370 static int 2371 alc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 2372 { 2373 struct alc_softc *sc; 2374 struct ifreq *ifr; 2375 struct mii_data *mii; 2376 int error, mask; 2377 2378 (void)cr; 2379 sc = ifp->if_softc; 2380 ifr = (struct ifreq *)data; 2381 error = 0; 2382 switch (cmd) { 2383 case SIOCSIFMTU: 2384 if (ifr->ifr_mtu < ETHERMIN || 2385 ifr->ifr_mtu > (sc->alc_ident->max_framelen - 2386 sizeof(struct ether_vlan_header) - ETHER_CRC_LEN) || 2387 ((sc->alc_flags & ALC_FLAG_JUMBO) == 0 && 2388 ifr->ifr_mtu > ETHERMTU)) { 2389 error = EINVAL; 2390 } else if (ifp->if_mtu != ifr->ifr_mtu) { 2391 ALC_LOCK(sc); 2392 ifp->if_mtu = ifr->ifr_mtu; 2393 /* AR813x/AR815x has 13 bits MSS field. */ 2394 if (ifp->if_mtu > ALC_TSO_MTU && 2395 (ifp->if_capenable & IFCAP_TSO4) != 0) { 2396 ifp->if_capenable &= ~IFCAP_TSO4; 2397 ifp->if_hwassist &= ~CSUM_TSO; 2398 } 2399 ALC_UNLOCK(sc); 2400 } 2401 break; 2402 case SIOCSIFFLAGS: 2403 ALC_LOCK(sc); 2404 if ((ifp->if_flags & IFF_UP) != 0) { 2405 if ((ifp->if_flags & IFF_RUNNING) != 0 && 2406 ((ifp->if_flags ^ sc->alc_if_flags) & 2407 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 2408 alc_rxfilter(sc); 2409 else if ((sc->alc_flags & ALC_FLAG_DETACH) == 0) 2410 alc_init_locked(sc); 2411 } else if ((ifp->if_flags & IFF_RUNNING) != 0) 2412 alc_stop(sc); 2413 sc->alc_if_flags = ifp->if_flags; 2414 ALC_UNLOCK(sc); 2415 break; 2416 case SIOCADDMULTI: 2417 case SIOCDELMULTI: 2418 ALC_LOCK(sc); 2419 if ((ifp->if_flags & IFF_RUNNING) != 0) 2420 alc_rxfilter(sc); 2421 ALC_UNLOCK(sc); 2422 break; 2423 case SIOCSIFMEDIA: 2424 case SIOCGIFMEDIA: 2425 mii = device_get_softc(sc->alc_miibus); 2426 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 2427 break; 2428 case SIOCSIFCAP: 2429 ALC_LOCK(sc); 2430 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2431 if ((mask & IFCAP_TXCSUM) != 0 && 2432 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 2433 ifp->if_capenable ^= IFCAP_TXCSUM; 2434 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2435 ifp->if_hwassist |= ALC_CSUM_FEATURES; 2436 else 2437 ifp->if_hwassist &= ~ALC_CSUM_FEATURES; 2438 } 2439 if ((mask & IFCAP_TSO4) != 0 && 2440 (ifp->if_capabilities & IFCAP_TSO4) != 0) { 2441 ifp->if_capenable ^= IFCAP_TSO4; 2442 if ((ifp->if_capenable & IFCAP_TSO4) != 0) { 2443 /* AR813x/AR815x has 13 bits MSS field. */ 2444 if (ifp->if_mtu > ALC_TSO_MTU) { 2445 ifp->if_capenable &= ~IFCAP_TSO4; 2446 ifp->if_hwassist &= ~CSUM_TSO; 2447 } else 2448 ifp->if_hwassist |= CSUM_TSO; 2449 } else 2450 ifp->if_hwassist &= ~CSUM_TSO; 2451 } 2452 #if 0 2453 /* XXX: WOL */ 2454 if ((mask & IFCAP_WOL_MCAST) != 0 && 2455 (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0) 2456 ifp->if_capenable ^= IFCAP_WOL_MCAST; 2457 if ((mask & IFCAP_WOL_MAGIC) != 0 && 2458 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 2459 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2460 #endif 2461 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2462 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 2463 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2464 alc_rxvlan(sc); 2465 } 2466 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 2467 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 2468 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 2469 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 2470 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) 2471 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 2472 /* 2473 * VLAN hardware tagging is required to do checksum 2474 * offload or TSO on VLAN interface. Checksum offload 2475 * on VLAN interface also requires hardware checksum 2476 * offload of parent interface. 2477 */ 2478 if ((ifp->if_capenable & IFCAP_TXCSUM) == 0) 2479 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; 2480 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 2481 ifp->if_capenable &= 2482 ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM); 2483 ALC_UNLOCK(sc); 2484 // XXX VLAN_CAPABILITIES(ifp); 2485 break; 2486 default: 2487 error = ether_ioctl(ifp, cmd, data); 2488 break; 2489 } 2490 2491 return (error); 2492 } 2493 2494 static void 2495 alc_mac_config(struct alc_softc *sc) 2496 { 2497 struct mii_data *mii; 2498 uint32_t reg; 2499 2500 ALC_LOCK_ASSERT(sc); 2501 2502 mii = device_get_softc(sc->alc_miibus); 2503 reg = CSR_READ_4(sc, ALC_MAC_CFG); 2504 reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC | 2505 MAC_CFG_SPEED_MASK); 2506 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 || 2507 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || 2508 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 2509 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW; 2510 } 2511 /* Reprogram MAC with resolved speed/duplex. */ 2512 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2513 case IFM_10_T: 2514 case IFM_100_TX: 2515 reg |= MAC_CFG_SPEED_10_100; 2516 break; 2517 case IFM_1000_T: 2518 reg |= MAC_CFG_SPEED_1000; 2519 break; 2520 } 2521 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 2522 reg |= MAC_CFG_FULL_DUPLEX; 2523 #ifdef notyet 2524 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 2525 reg |= MAC_CFG_TX_FC; 2526 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 2527 reg |= MAC_CFG_RX_FC; 2528 #endif 2529 } 2530 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 2531 } 2532 2533 static void 2534 alc_stats_clear(struct alc_softc *sc) 2535 { 2536 struct smb sb, *smb; 2537 uint32_t *reg; 2538 int i; 2539 2540 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 2541 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 2542 sc->alc_cdata.alc_smb_map, 2543 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2544 smb = sc->alc_rdata.alc_smb; 2545 /* Update done, clear. */ 2546 smb->updated = 0; 2547 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 2548 sc->alc_cdata.alc_smb_map, 2549 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2550 } else { 2551 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; 2552 reg++) { 2553 CSR_READ_4(sc, ALC_RX_MIB_BASE + i); 2554 i += sizeof(uint32_t); 2555 } 2556 /* Read Tx statistics. */ 2557 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; 2558 reg++) { 2559 CSR_READ_4(sc, ALC_TX_MIB_BASE + i); 2560 i += sizeof(uint32_t); 2561 } 2562 } 2563 } 2564 2565 static void 2566 alc_stats_update(struct alc_softc *sc) 2567 { 2568 struct alc_hw_stats *stat; 2569 struct smb sb, *smb; 2570 struct ifnet *ifp; 2571 uint32_t *reg; 2572 int i; 2573 2574 ALC_LOCK_ASSERT(sc); 2575 2576 ifp = sc->alc_ifp; 2577 stat = &sc->alc_stats; 2578 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 2579 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 2580 sc->alc_cdata.alc_smb_map, 2581 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2582 smb = sc->alc_rdata.alc_smb; 2583 if (smb->updated == 0) 2584 return; 2585 } else { 2586 smb = &sb; 2587 /* Read Rx statistics. */ 2588 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; 2589 reg++) { 2590 *reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i); 2591 i += sizeof(uint32_t); 2592 } 2593 /* Read Tx statistics. */ 2594 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; 2595 reg++) { 2596 *reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i); 2597 i += sizeof(uint32_t); 2598 } 2599 } 2600 2601 /* Rx stats. */ 2602 stat->rx_frames += smb->rx_frames; 2603 stat->rx_bcast_frames += smb->rx_bcast_frames; 2604 stat->rx_mcast_frames += smb->rx_mcast_frames; 2605 stat->rx_pause_frames += smb->rx_pause_frames; 2606 stat->rx_control_frames += smb->rx_control_frames; 2607 stat->rx_crcerrs += smb->rx_crcerrs; 2608 stat->rx_lenerrs += smb->rx_lenerrs; 2609 stat->rx_bytes += smb->rx_bytes; 2610 stat->rx_runts += smb->rx_runts; 2611 stat->rx_fragments += smb->rx_fragments; 2612 stat->rx_pkts_64 += smb->rx_pkts_64; 2613 stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 2614 stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 2615 stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 2616 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 2617 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 2618 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 2619 stat->rx_pkts_truncated += smb->rx_pkts_truncated; 2620 stat->rx_fifo_oflows += smb->rx_fifo_oflows; 2621 stat->rx_rrs_errs += smb->rx_rrs_errs; 2622 stat->rx_alignerrs += smb->rx_alignerrs; 2623 stat->rx_bcast_bytes += smb->rx_bcast_bytes; 2624 stat->rx_mcast_bytes += smb->rx_mcast_bytes; 2625 stat->rx_pkts_filtered += smb->rx_pkts_filtered; 2626 2627 /* Tx stats. */ 2628 stat->tx_frames += smb->tx_frames; 2629 stat->tx_bcast_frames += smb->tx_bcast_frames; 2630 stat->tx_mcast_frames += smb->tx_mcast_frames; 2631 stat->tx_pause_frames += smb->tx_pause_frames; 2632 stat->tx_excess_defer += smb->tx_excess_defer; 2633 stat->tx_control_frames += smb->tx_control_frames; 2634 stat->tx_deferred += smb->tx_deferred; 2635 stat->tx_bytes += smb->tx_bytes; 2636 stat->tx_pkts_64 += smb->tx_pkts_64; 2637 stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 2638 stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 2639 stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 2640 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 2641 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 2642 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 2643 stat->tx_single_colls += smb->tx_single_colls; 2644 stat->tx_multi_colls += smb->tx_multi_colls; 2645 stat->tx_late_colls += smb->tx_late_colls; 2646 stat->tx_excess_colls += smb->tx_excess_colls; 2647 stat->tx_abort += smb->tx_abort; 2648 stat->tx_underrun += smb->tx_underrun; 2649 stat->tx_desc_underrun += smb->tx_desc_underrun; 2650 stat->tx_lenerrs += smb->tx_lenerrs; 2651 stat->tx_pkts_truncated += smb->tx_pkts_truncated; 2652 stat->tx_bcast_bytes += smb->tx_bcast_bytes; 2653 stat->tx_mcast_bytes += smb->tx_mcast_bytes; 2654 2655 /* Update counters in ifnet. */ 2656 ifp->if_opackets += smb->tx_frames; 2657 2658 ifp->if_collisions += smb->tx_single_colls + 2659 smb->tx_multi_colls * 2 + smb->tx_late_colls + 2660 smb->tx_abort * HDPX_CFG_RETRY_DEFAULT; 2661 2662 /* 2663 * XXX 2664 * tx_pkts_truncated counter looks suspicious. It constantly 2665 * increments with no sign of Tx errors. This may indicate 2666 * the counter name is not correct one so I've removed the 2667 * counter in output errors. 2668 */ 2669 ifp->if_oerrors += smb->tx_abort + smb->tx_late_colls + 2670 smb->tx_underrun; 2671 2672 ifp->if_ipackets += smb->rx_frames; 2673 2674 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs + 2675 smb->rx_runts + smb->rx_pkts_truncated + 2676 smb->rx_fifo_oflows + smb->rx_rrs_errs + 2677 smb->rx_alignerrs; 2678 2679 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 2680 /* Update done, clear. */ 2681 smb->updated = 0; 2682 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 2683 sc->alc_cdata.alc_smb_map, 2684 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2685 } 2686 } 2687 2688 static void 2689 alc_intr(void *arg) 2690 { 2691 struct alc_softc *sc; 2692 uint32_t status; 2693 2694 sc = (struct alc_softc *)arg; 2695 2696 status = CSR_READ_4(sc, ALC_INTR_STATUS); 2697 if ((status & ALC_INTRS) == 0) { 2698 return; 2699 } 2700 /* Disable interrupts. */ 2701 CSR_WRITE_4(sc, ALC_INTR_STATUS, INTR_DIS_INT); 2702 taskqueue_enqueue(sc->alc_tq, &sc->alc_int_task); 2703 2704 return; 2705 } 2706 2707 static void 2708 alc_int_task(void *arg, int pending) 2709 { 2710 struct alc_softc *sc; 2711 struct ifnet *ifp; 2712 uint32_t status; 2713 int more; 2714 2715 sc = (struct alc_softc *)arg; 2716 ifp = sc->alc_ifp; 2717 2718 status = CSR_READ_4(sc, ALC_INTR_STATUS); 2719 more = atomic_readandclear_32(&sc->alc_morework); 2720 if (more != 0) 2721 status |= INTR_RX_PKT; 2722 if ((status & ALC_INTRS) == 0) 2723 goto done; 2724 2725 /* Acknowledge interrupts but still disable interrupts. */ 2726 CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT); 2727 2728 more = 0; 2729 if ((ifp->if_flags & IFF_RUNNING) != 0) { 2730 if ((status & INTR_RX_PKT) != 0) { 2731 more = alc_rxintr(sc, sc->alc_process_limit); 2732 if (more == EAGAIN) 2733 atomic_set_int(&sc->alc_morework, 1); 2734 else if (more == EIO) { 2735 ALC_LOCK(sc); 2736 ifp->if_flags &= ~IFF_RUNNING; 2737 alc_init_locked(sc); 2738 ALC_UNLOCK(sc); 2739 return; 2740 } 2741 } 2742 if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST | 2743 INTR_TXQ_TO_RST)) != 0) { 2744 if ((status & INTR_DMA_RD_TO_RST) != 0) 2745 device_printf(sc->alc_dev, 2746 "DMA read error! -- resetting\n"); 2747 if ((status & INTR_DMA_WR_TO_RST) != 0) 2748 device_printf(sc->alc_dev, 2749 "DMA write error! -- resetting\n"); 2750 if ((status & INTR_TXQ_TO_RST) != 0) 2751 device_printf(sc->alc_dev, 2752 "TxQ reset! -- resetting\n"); 2753 ALC_LOCK(sc); 2754 ifp->if_flags &= ~IFF_RUNNING; 2755 alc_init_locked(sc); 2756 ALC_UNLOCK(sc); 2757 return; 2758 } 2759 if ((ifp->if_flags & IFF_RUNNING) != 0 && 2760 !ifq_is_empty(&ifp->if_snd)) 2761 taskqueue_enqueue(sc->alc_tq, &sc->alc_tx_task); 2762 } 2763 2764 if (more == EAGAIN || 2765 (CSR_READ_4(sc, ALC_INTR_STATUS) & ALC_INTRS) != 0) { 2766 taskqueue_enqueue(sc->alc_tq, &sc->alc_int_task); 2767 return; 2768 } 2769 2770 done: 2771 if ((ifp->if_flags & IFF_RUNNING) != 0) { 2772 /* Re-enable interrupts if we're running. */ 2773 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF); 2774 } 2775 } 2776 2777 static void 2778 alc_txeof(struct alc_softc *sc) 2779 { 2780 struct ifnet *ifp; 2781 struct alc_txdesc *txd; 2782 uint32_t cons, prod; 2783 int prog; 2784 2785 ALC_LOCK_ASSERT(sc); 2786 2787 ifp = sc->alc_ifp; 2788 2789 if (sc->alc_cdata.alc_tx_cnt == 0) 2790 return; 2791 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag, 2792 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_POSTWRITE); 2793 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) { 2794 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, 2795 sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_POSTREAD); 2796 prod = sc->alc_rdata.alc_cmb->cons; 2797 } else 2798 prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX); 2799 /* Assume we're using normal Tx priority queue. */ 2800 prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >> 2801 MBOX_TD_CONS_LO_IDX_SHIFT; 2802 cons = sc->alc_cdata.alc_tx_cons; 2803 /* 2804 * Go through our Tx list and free mbufs for those 2805 * frames which have been transmitted. 2806 */ 2807 for (prog = 0; cons != prod; prog++, 2808 ALC_DESC_INC(cons, ALC_TX_RING_CNT)) { 2809 if (sc->alc_cdata.alc_tx_cnt <= 0) 2810 break; 2811 prog++; 2812 ifp->if_flags &= ~IFF_OACTIVE; 2813 sc->alc_cdata.alc_tx_cnt--; 2814 txd = &sc->alc_cdata.alc_txdesc[cons]; 2815 if (txd->tx_m != NULL) { 2816 /* Reclaim transmitted mbufs. */ 2817 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, 2818 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2819 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, 2820 txd->tx_dmamap); 2821 m_freem(txd->tx_m); 2822 txd->tx_m = NULL; 2823 } 2824 } 2825 2826 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) 2827 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, 2828 sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_PREREAD); 2829 sc->alc_cdata.alc_tx_cons = cons; 2830 /* 2831 * Unarm watchdog timer only when there is no pending 2832 * frames in Tx queue. 2833 */ 2834 if (sc->alc_cdata.alc_tx_cnt == 0) 2835 sc->alc_watchdog_timer = 0; 2836 } 2837 2838 static int 2839 alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd) 2840 { 2841 struct mbuf *m; 2842 bus_dma_segment_t segs[1]; 2843 bus_dmamap_t map; 2844 int nsegs; 2845 int error; 2846 2847 m = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR); 2848 if (m == NULL) 2849 return (ENOBUFS); 2850 m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX; 2851 #ifndef __NO_STRICT_ALIGNMENT 2852 m_adj(m, sizeof(uint64_t)); 2853 #endif 2854 2855 error = bus_dmamap_load_mbuf_segment( 2856 sc->alc_cdata.alc_rx_tag, 2857 sc->alc_cdata.alc_rx_sparemap, 2858 m, segs, 1, &nsegs, BUS_DMA_NOWAIT); 2859 if (error) { 2860 m_freem(m); 2861 return (ENOBUFS); 2862 } 2863 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 2864 2865 if (rxd->rx_m != NULL) { 2866 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap, 2867 BUS_DMASYNC_POSTREAD); 2868 bus_dmamap_unload(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap); 2869 } 2870 map = rxd->rx_dmamap; 2871 rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap; 2872 sc->alc_cdata.alc_rx_sparemap = map; 2873 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap, 2874 BUS_DMASYNC_PREREAD); 2875 rxd->rx_m = m; 2876 rxd->rx_desc->addr = htole64(segs[0].ds_addr); 2877 return (0); 2878 } 2879 2880 static int 2881 alc_rxintr(struct alc_softc *sc, int count) 2882 { 2883 struct ifnet *ifp; 2884 struct rx_rdesc *rrd; 2885 uint32_t nsegs, status; 2886 int rr_cons, prog; 2887 2888 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag, 2889 sc->alc_cdata.alc_rr_ring_map, 2890 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2891 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag, 2892 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_POSTWRITE); 2893 rr_cons = sc->alc_cdata.alc_rr_cons; 2894 ifp = sc->alc_ifp; 2895 for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0;) { 2896 if (count-- <= 0) 2897 break; 2898 rrd = &sc->alc_rdata.alc_rr_ring[rr_cons]; 2899 status = le32toh(rrd->status); 2900 if ((status & RRD_VALID) == 0) 2901 break; 2902 nsegs = RRD_RD_CNT(le32toh(rrd->rdinfo)); 2903 if (nsegs == 0) { 2904 /* This should not happen! */ 2905 device_printf(sc->alc_dev, 2906 "unexpected segment count -- resetting\n"); 2907 return (EIO); 2908 } 2909 alc_rxeof(sc, rrd); 2910 /* Clear Rx return status. */ 2911 rrd->status = 0; 2912 ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT); 2913 sc->alc_cdata.alc_rx_cons += nsegs; 2914 sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT; 2915 prog += nsegs; 2916 } 2917 2918 if (prog > 0) { 2919 /* Update the consumer index. */ 2920 sc->alc_cdata.alc_rr_cons = rr_cons; 2921 /* Sync Rx return descriptors. */ 2922 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag, 2923 sc->alc_cdata.alc_rr_ring_map, 2924 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2925 /* 2926 * Sync updated Rx descriptors such that controller see 2927 * modified buffer addresses. 2928 */ 2929 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag, 2930 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE); 2931 /* 2932 * Let controller know availability of new Rx buffers. 2933 * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors 2934 * it may be possible to update ALC_MBOX_RD0_PROD_IDX 2935 * only when Rx buffer pre-fetching is required. In 2936 * addition we already set ALC_RX_RD_FREE_THRESH to 2937 * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However 2938 * it still seems that pre-fetching needs more 2939 * experimentation. 2940 */ 2941 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, 2942 sc->alc_cdata.alc_rx_cons); 2943 } 2944 2945 return (count > 0 ? 0 : EAGAIN); 2946 } 2947 2948 #ifndef __NO_STRICT_ALIGNMENT 2949 static struct mbuf * 2950 alc_fixup_rx(struct ifnet *ifp, struct mbuf *m) 2951 { 2952 struct mbuf *n; 2953 int i; 2954 uint16_t *src, *dst; 2955 2956 src = mtod(m, uint16_t *); 2957 dst = src - 3; 2958 2959 if (m->m_next == NULL) { 2960 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 2961 *dst++ = *src++; 2962 m->m_data -= 6; 2963 return (m); 2964 } 2965 /* 2966 * Append a new mbuf to received mbuf chain and copy ethernet 2967 * header from the mbuf chain. This can save lots of CPU 2968 * cycles for jumbo frame. 2969 */ 2970 MGETHDR(n, MB_DONTWAIT, MT_DATA); 2971 if (n == NULL) { 2972 ifp->if_iqdrops++; 2973 m_freem(m); 2974 return (NULL); 2975 } 2976 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); 2977 m->m_data += ETHER_HDR_LEN; 2978 m->m_len -= ETHER_HDR_LEN; 2979 n->m_len = ETHER_HDR_LEN; 2980 M_MOVE_PKTHDR(n, m); 2981 n->m_next = m; 2982 return (n); 2983 } 2984 #endif 2985 2986 /* Receive a frame. */ 2987 static void 2988 alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd) 2989 { 2990 struct alc_rxdesc *rxd; 2991 struct ifnet *ifp; 2992 struct mbuf *mp, *m; 2993 uint32_t rdinfo, status, vtag; 2994 int count, nsegs, rx_cons; 2995 2996 ifp = sc->alc_ifp; 2997 status = le32toh(rrd->status); 2998 rdinfo = le32toh(rrd->rdinfo); 2999 rx_cons = RRD_RD_IDX(rdinfo); 3000 nsegs = RRD_RD_CNT(rdinfo); 3001 3002 sc->alc_cdata.alc_rxlen = RRD_BYTES(status); 3003 if ((status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) != 0) { 3004 /* 3005 * We want to pass the following frames to upper 3006 * layer regardless of error status of Rx return 3007 * ring. 3008 * 3009 * o IP/TCP/UDP checksum is bad. 3010 * o frame length and protocol specific length 3011 * does not match. 3012 * 3013 * Force network stack compute checksum for 3014 * errored frames. 3015 */ 3016 status |= RRD_TCP_UDPCSUM_NOK | RRD_IPCSUM_NOK; 3017 if ((RRD_ERR_CRC | RRD_ERR_ALIGN | RRD_ERR_TRUNC | 3018 RRD_ERR_RUNT) != 0) 3019 return; 3020 } 3021 3022 for (count = 0; count < nsegs; count++, 3023 ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) { 3024 rxd = &sc->alc_cdata.alc_rxdesc[rx_cons]; 3025 mp = rxd->rx_m; 3026 /* Add a new receive buffer to the ring. */ 3027 if (alc_newbuf(sc, rxd) != 0) { 3028 ifp->if_iqdrops++; 3029 /* Reuse Rx buffers. */ 3030 if (sc->alc_cdata.alc_rxhead != NULL) 3031 m_freem(sc->alc_cdata.alc_rxhead); 3032 break; 3033 } 3034 3035 /* 3036 * Assume we've received a full sized frame. 3037 * Actual size is fixed when we encounter the end of 3038 * multi-segmented frame. 3039 */ 3040 mp->m_len = sc->alc_buf_size; 3041 3042 /* Chain received mbufs. */ 3043 if (sc->alc_cdata.alc_rxhead == NULL) { 3044 sc->alc_cdata.alc_rxhead = mp; 3045 sc->alc_cdata.alc_rxtail = mp; 3046 } else { 3047 mp->m_flags &= ~M_PKTHDR; 3048 sc->alc_cdata.alc_rxprev_tail = 3049 sc->alc_cdata.alc_rxtail; 3050 sc->alc_cdata.alc_rxtail->m_next = mp; 3051 sc->alc_cdata.alc_rxtail = mp; 3052 } 3053 3054 if (count == nsegs - 1) { 3055 /* Last desc. for this frame. */ 3056 m = sc->alc_cdata.alc_rxhead; 3057 m->m_flags |= M_PKTHDR; 3058 /* 3059 * It seems that L1C/L2C controller has no way 3060 * to tell hardware to strip CRC bytes. 3061 */ 3062 m->m_pkthdr.len = 3063 sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN; 3064 if (nsegs > 1) { 3065 /* Set last mbuf size. */ 3066 mp->m_len = sc->alc_cdata.alc_rxlen - 3067 (nsegs - 1) * sc->alc_buf_size; 3068 /* Remove the CRC bytes in chained mbufs. */ 3069 if (mp->m_len <= ETHER_CRC_LEN) { 3070 sc->alc_cdata.alc_rxtail = 3071 sc->alc_cdata.alc_rxprev_tail; 3072 sc->alc_cdata.alc_rxtail->m_len -= 3073 (ETHER_CRC_LEN - mp->m_len); 3074 sc->alc_cdata.alc_rxtail->m_next = NULL; 3075 m_freem(mp); 3076 } else { 3077 mp->m_len -= ETHER_CRC_LEN; 3078 } 3079 } else 3080 m->m_len = m->m_pkthdr.len; 3081 m->m_pkthdr.rcvif = ifp; 3082 /* 3083 * Due to hardware bugs, Rx checksum offloading 3084 * was intentionally disabled. 3085 */ 3086 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 3087 (status & RRD_VLAN_TAG) != 0) { 3088 vtag = RRD_VLAN(le32toh(rrd->vtag)); 3089 m->m_pkthdr.ether_vlantag = ntohs(vtag); 3090 m->m_flags |= M_VLANTAG; 3091 } 3092 #ifndef __NO_STRICT_ALIGNMENT 3093 m = alc_fixup_rx(ifp, m); 3094 if (m != NULL) 3095 #endif 3096 { 3097 /* Pass it on. */ 3098 (*ifp->if_input)(ifp, m); 3099 } 3100 } 3101 } 3102 /* Reset mbuf chains. */ 3103 ALC_RXCHAIN_RESET(sc); 3104 } 3105 3106 static void 3107 alc_tick(void *arg) 3108 { 3109 struct alc_softc *sc; 3110 struct mii_data *mii; 3111 3112 sc = (struct alc_softc *)arg; 3113 3114 ALC_LOCK(sc); 3115 3116 mii = device_get_softc(sc->alc_miibus); 3117 mii_tick(mii); 3118 alc_stats_update(sc); 3119 /* 3120 * alc(4) does not rely on Tx completion interrupts to reclaim 3121 * transferred buffers. Instead Tx completion interrupts are 3122 * used to hint for scheduling Tx task. So it's necessary to 3123 * release transmitted buffers by kicking Tx completion 3124 * handler. This limits the maximum reclamation delay to a hz. 3125 */ 3126 alc_txeof(sc); 3127 alc_watchdog(sc); 3128 callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc); 3129 ALC_UNLOCK(sc); 3130 } 3131 3132 static void 3133 alc_reset(struct alc_softc *sc) 3134 { 3135 uint32_t reg; 3136 int i; 3137 3138 reg = CSR_READ_4(sc, ALC_MASTER_CFG) & 0xFFFF; 3139 reg |= MASTER_OOB_DIS_OFF | MASTER_RESET; 3140 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 3141 3142 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 3143 DELAY(10); 3144 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0) 3145 break; 3146 } 3147 if (i == 0) 3148 device_printf(sc->alc_dev, "master reset timeout!\n"); 3149 3150 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 3151 if ((reg = CSR_READ_4(sc, ALC_IDLE_STATUS)) == 0) 3152 break; 3153 DELAY(10); 3154 } 3155 3156 if (i == 0) 3157 device_printf(sc->alc_dev, "reset timeout(0x%08x)!\n", reg); 3158 } 3159 3160 static void 3161 alc_init(void *xsc) 3162 { 3163 struct alc_softc *sc; 3164 3165 sc = (struct alc_softc *)xsc; 3166 ALC_LOCK(sc); 3167 alc_init_locked(sc); 3168 ALC_UNLOCK(sc); 3169 } 3170 3171 static void 3172 alc_init_locked(struct alc_softc *sc) 3173 { 3174 struct ifnet *ifp; 3175 struct mii_data *mii; 3176 uint8_t eaddr[ETHER_ADDR_LEN]; 3177 bus_addr_t paddr; 3178 uint32_t reg, rxf_hi, rxf_lo; 3179 3180 ALC_LOCK_ASSERT(sc); 3181 3182 ifp = sc->alc_ifp; 3183 mii = device_get_softc(sc->alc_miibus); 3184 3185 if ((ifp->if_flags & IFF_RUNNING) != 0) 3186 return; 3187 /* 3188 * Cancel any pending I/O. 3189 */ 3190 alc_stop(sc); 3191 /* 3192 * Reset the chip to a known state. 3193 */ 3194 alc_reset(sc); 3195 3196 /* Initialize Rx descriptors. */ 3197 if (alc_init_rx_ring(sc) != 0) { 3198 device_printf(sc->alc_dev, "no memory for Rx buffers.\n"); 3199 alc_stop(sc); 3200 return; 3201 } 3202 alc_init_rr_ring(sc); 3203 alc_init_tx_ring(sc); 3204 alc_init_cmb(sc); 3205 alc_init_smb(sc); 3206 3207 /* Reprogram the station address. */ 3208 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 3209 CSR_WRITE_4(sc, ALC_PAR0, 3210 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 3211 CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]); 3212 /* 3213 * Clear WOL status and disable all WOL feature as WOL 3214 * would interfere Rx operation under normal environments. 3215 */ 3216 CSR_READ_4(sc, ALC_WOL_CFG); 3217 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 3218 /* Set Tx descriptor base addresses. */ 3219 paddr = sc->alc_rdata.alc_tx_ring_paddr; 3220 CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 3221 CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 3222 /* We don't use high priority ring. */ 3223 CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0); 3224 /* Set Tx descriptor counter. */ 3225 CSR_WRITE_4(sc, ALC_TD_RING_CNT, 3226 (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK); 3227 /* Set Rx descriptor base addresses. */ 3228 paddr = sc->alc_rdata.alc_rx_ring_paddr; 3229 CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 3230 CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 3231 /* We use one Rx ring. */ 3232 CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0); 3233 CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0); 3234 CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0); 3235 /* Set Rx descriptor counter. */ 3236 CSR_WRITE_4(sc, ALC_RD_RING_CNT, 3237 (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK); 3238 3239 /* 3240 * Let hardware split jumbo frames into alc_max_buf_sized chunks. 3241 * if it do not fit the buffer size. Rx return descriptor holds 3242 * a counter that indicates how many fragments were made by the 3243 * hardware. The buffer size should be multiple of 8 bytes. 3244 * Since hardware has limit on the size of buffer size, always 3245 * use the maximum value. 3246 * For strict-alignment architectures make sure to reduce buffer 3247 * size by 8 bytes to make room for alignment fixup. 3248 */ 3249 #ifndef __NO_STRICT_ALIGNMENT 3250 sc->alc_buf_size = RX_BUF_SIZE_MAX - sizeof(uint64_t); 3251 #else 3252 sc->alc_buf_size = RX_BUF_SIZE_MAX; 3253 #endif 3254 CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size); 3255 3256 paddr = sc->alc_rdata.alc_rr_ring_paddr; 3257 /* Set Rx return descriptor base addresses. */ 3258 CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 3259 /* We use one Rx return ring. */ 3260 CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0); 3261 CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0); 3262 CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0); 3263 /* Set Rx return descriptor counter. */ 3264 CSR_WRITE_4(sc, ALC_RRD_RING_CNT, 3265 (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK); 3266 paddr = sc->alc_rdata.alc_cmb_paddr; 3267 CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr)); 3268 paddr = sc->alc_rdata.alc_smb_paddr; 3269 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 3270 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr)); 3271 3272 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B) { 3273 /* Reconfigure SRAM - Vendor magic. */ 3274 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_LEN, 0x000002A0); 3275 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_LEN, 0x00000100); 3276 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_ADDR, 0x029F0000); 3277 CSR_WRITE_4(sc, ALC_SRAM_RD0_ADDR, 0x02BF02A0); 3278 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_ADDR, 0x03BF02C0); 3279 CSR_WRITE_4(sc, ALC_SRAM_TD_ADDR, 0x03DF03C0); 3280 CSR_WRITE_4(sc, ALC_TXF_WATER_MARK, 0x00000000); 3281 CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 0x00000000); 3282 } 3283 3284 /* Tell hardware that we're ready to load DMA blocks. */ 3285 CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD); 3286 3287 /* Configure interrupt moderation timer. */ 3288 reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT; 3289 reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT; 3290 CSR_WRITE_4(sc, ALC_IM_TIMER, reg); 3291 /* 3292 * We don't want to automatic interrupt clear as task queue 3293 * for the interrupt should know interrupt status. 3294 */ 3295 reg = MASTER_SA_TIMER_ENB; 3296 if (ALC_USECS(sc->alc_int_rx_mod) != 0) 3297 reg |= MASTER_IM_RX_TIMER_ENB; 3298 if (ALC_USECS(sc->alc_int_tx_mod) != 0) 3299 reg |= MASTER_IM_TX_TIMER_ENB; 3300 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 3301 /* 3302 * Disable interrupt re-trigger timer. We don't want automatic 3303 * re-triggering of un-ACKed interrupts. 3304 */ 3305 CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0)); 3306 /* Configure CMB. */ 3307 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) { 3308 CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4); 3309 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000)); 3310 } else { 3311 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0)); 3312 } 3313 /* 3314 * Hardware can be configured to issue SMB interrupt based 3315 * on programmed interval. Since there is a callout that is 3316 * invoked for every hz in driver we use that instead of 3317 * relying on periodic SMB interrupt. 3318 */ 3319 CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0)); 3320 /* Clear MAC statistics. */ 3321 alc_stats_clear(sc); 3322 3323 /* 3324 * Always use maximum frame size that controller can support. 3325 * Otherwise received frames that has larger frame length 3326 * than alc(4) MTU would be silently dropped in hardware. This 3327 * would make path-MTU discovery hard as sender wouldn't get 3328 * any responses from receiver. alc(4) supports 3329 * multi-fragmented frames on Rx path so it has no issue on 3330 * assembling fragmented frames. Using maximum frame size also 3331 * removes the need to reinitialize hardware when interface 3332 * MTU configuration was changed. 3333 * 3334 * Be conservative in what you do, be liberal in what you 3335 * accept from others - RFC 793. 3336 */ 3337 CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_ident->max_framelen); 3338 3339 /* Disable header split(?) */ 3340 CSR_WRITE_4(sc, ALC_HDS_CFG, 0); 3341 3342 /* Configure IPG/IFG parameters. */ 3343 CSR_WRITE_4(sc, ALC_IPG_IFG_CFG, 3344 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) | 3345 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) | 3346 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) | 3347 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK)); 3348 /* Set parameters for half-duplex media. */ 3349 CSR_WRITE_4(sc, ALC_HDPX_CFG, 3350 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 3351 HDPX_CFG_LCOL_MASK) | 3352 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 3353 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 3354 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 3355 HDPX_CFG_ABEBT_MASK) | 3356 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 3357 HDPX_CFG_JAMIPG_MASK)); 3358 /* 3359 * Set TSO/checksum offload threshold. For frames that is 3360 * larger than this threshold, hardware wouldn't do 3361 * TSO/checksum offloading. 3362 */ 3363 CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH, 3364 (sc->alc_ident->max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) & 3365 TSO_OFFLOAD_THRESH_MASK); 3366 /* Configure TxQ. */ 3367 reg = (alc_dma_burst[sc->alc_dma_rd_burst] << 3368 TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK; 3369 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B || 3370 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 3371 reg >>= 1; 3372 } 3373 reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) & 3374 TXQ_CFG_TD_BURST_MASK; 3375 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE); 3376 3377 /* Configure Rx free descriptor pre-fetching. */ 3378 CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH, 3379 ((RX_RD_FREE_THRESH_HI_DEFAULT << RX_RD_FREE_THRESH_HI_SHIFT) & 3380 RX_RD_FREE_THRESH_HI_MASK) | 3381 ((RX_RD_FREE_THRESH_LO_DEFAULT << RX_RD_FREE_THRESH_LO_SHIFT) & 3382 RX_RD_FREE_THRESH_LO_MASK)); 3383 3384 /* 3385 * Configure flow control parameters. 3386 * XON : 80% of Rx FIFO 3387 * XOFF : 30% of Rx FIFO 3388 */ 3389 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 || 3390 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8132) { 3391 reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN); 3392 rxf_hi = (reg * 8) / 10; 3393 rxf_lo = (reg * 3) / 10; 3394 CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH, 3395 ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) & 3396 RX_FIFO_PAUSE_THRESH_LO_MASK) | 3397 ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) & 3398 RX_FIFO_PAUSE_THRESH_HI_MASK)); 3399 } 3400 3401 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B || 3402 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2) { 3403 CSR_WRITE_4(sc, ALC_SERDES_LOCK, 3404 CSR_READ_4(sc, ALC_SERDES_LOCK) | SERDES_MAC_CLK_SLOWDOWN | 3405 SERDES_PHY_CLK_SLOWDOWN); 3406 } 3407 3408 /* Disable RSS until I understand L1C/L2C's RSS logic. */ 3409 CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0); 3410 CSR_WRITE_4(sc, ALC_RSS_CPU, 0); 3411 3412 /* Configure RxQ. */ 3413 reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & 3414 RXQ_CFG_RD_BURST_MASK; 3415 reg |= RXQ_CFG_RSS_MODE_DIS; 3416 if ((sc->alc_flags & ALC_FLAG_ASPM_MON) != 0) 3417 reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_1M; 3418 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 3419 3420 /* Configure DMA parameters. */ 3421 reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI; 3422 reg |= sc->alc_rcb; 3423 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) 3424 reg |= DMA_CFG_CMB_ENB; 3425 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) 3426 reg |= DMA_CFG_SMB_ENB; 3427 else 3428 reg |= DMA_CFG_SMB_DIS; 3429 reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) << 3430 DMA_CFG_RD_BURST_SHIFT; 3431 reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) << 3432 DMA_CFG_WR_BURST_SHIFT; 3433 reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) & 3434 DMA_CFG_RD_DELAY_CNT_MASK; 3435 reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) & 3436 DMA_CFG_WR_DELAY_CNT_MASK; 3437 CSR_WRITE_4(sc, ALC_DMA_CFG, reg); 3438 3439 /* 3440 * Configure Tx/Rx MACs. 3441 * - Auto-padding for short frames. 3442 * - Enable CRC generation. 3443 * Actual reconfiguration of MAC for resolved speed/duplex 3444 * is followed after detection of link establishment. 3445 * AR813x/AR815x always does checksum computation regardless 3446 * of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to 3447 * have bug in protocol field in Rx return structure so 3448 * these controllers can't handle fragmented frames. Disable 3449 * Rx checksum offloading until there is a newer controller 3450 * that has sane implementation. 3451 */ 3452 reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX | 3453 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 3454 MAC_CFG_PREAMBLE_MASK); 3455 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 || 3456 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || 3457 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 3458 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW; 3459 } 3460 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0) 3461 reg |= MAC_CFG_SPEED_10_100; 3462 else 3463 reg |= MAC_CFG_SPEED_1000; 3464 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 3465 3466 /* Set up the receive filter. */ 3467 alc_rxfilter(sc); 3468 alc_rxvlan(sc); 3469 3470 /* Acknowledge all pending interrupts and clear it. */ 3471 CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS); 3472 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 3473 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0); 3474 3475 sc->alc_flags &= ~ALC_FLAG_LINK; 3476 /* Switch to the current media. */ 3477 mii_mediachg(mii); 3478 3479 callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc); 3480 3481 ifp->if_flags |= IFF_RUNNING; 3482 ifp->if_flags &= ~IFF_OACTIVE; 3483 } 3484 3485 static void 3486 alc_stop(struct alc_softc *sc) 3487 { 3488 struct ifnet *ifp; 3489 struct alc_txdesc *txd; 3490 struct alc_rxdesc *rxd; 3491 uint32_t reg; 3492 int i; 3493 3494 ALC_LOCK_ASSERT(sc); 3495 /* 3496 * Mark the interface down and cancel the watchdog timer. 3497 */ 3498 ifp = sc->alc_ifp; 3499 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3500 sc->alc_flags &= ~ALC_FLAG_LINK; 3501 callout_stop(&sc->alc_tick_ch); 3502 sc->alc_watchdog_timer = 0; 3503 alc_stats_update(sc); 3504 /* Disable interrupts. */ 3505 CSR_WRITE_4(sc, ALC_INTR_MASK, 0); 3506 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 3507 alc_stop_queue(sc); 3508 /* Disable DMA. */ 3509 reg = CSR_READ_4(sc, ALC_DMA_CFG); 3510 reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB); 3511 reg |= DMA_CFG_SMB_DIS; 3512 CSR_WRITE_4(sc, ALC_DMA_CFG, reg); 3513 DELAY(1000); 3514 /* Stop Rx/Tx MACs. */ 3515 alc_stop_mac(sc); 3516 /* Disable interrupts which might be touched in taskq handler. */ 3517 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 3518 3519 /* Reclaim Rx buffers that have been processed. */ 3520 if (sc->alc_cdata.alc_rxhead != NULL) 3521 m_freem(sc->alc_cdata.alc_rxhead); 3522 ALC_RXCHAIN_RESET(sc); 3523 /* 3524 * Free Tx/Rx mbufs still in the queues. 3525 */ 3526 for (i = 0; i < ALC_RX_RING_CNT; i++) { 3527 rxd = &sc->alc_cdata.alc_rxdesc[i]; 3528 if (rxd->rx_m != NULL) { 3529 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, 3530 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3531 bus_dmamap_unload(sc->alc_cdata.alc_rx_tag, 3532 rxd->rx_dmamap); 3533 m_freem(rxd->rx_m); 3534 rxd->rx_m = NULL; 3535 } 3536 } 3537 for (i = 0; i < ALC_TX_RING_CNT; i++) { 3538 txd = &sc->alc_cdata.alc_txdesc[i]; 3539 if (txd->tx_m != NULL) { 3540 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, 3541 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 3542 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, 3543 txd->tx_dmamap); 3544 m_freem(txd->tx_m); 3545 txd->tx_m = NULL; 3546 } 3547 } 3548 } 3549 3550 static void 3551 alc_stop_mac(struct alc_softc *sc) 3552 { 3553 uint32_t reg; 3554 int i; 3555 3556 ALC_LOCK_ASSERT(sc); 3557 3558 /* Disable Rx/Tx MAC. */ 3559 reg = CSR_READ_4(sc, ALC_MAC_CFG); 3560 if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) { 3561 reg &= ~MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 3562 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 3563 } 3564 for (i = ALC_TIMEOUT; i > 0; i--) { 3565 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 3566 if (reg == 0) 3567 break; 3568 DELAY(10); 3569 } 3570 if (i == 0) 3571 device_printf(sc->alc_dev, 3572 "could not disable Rx/Tx MAC(0x%08x)!\n", reg); 3573 } 3574 3575 static void 3576 alc_start_queue(struct alc_softc *sc) 3577 { 3578 uint32_t qcfg[] = { 3579 0, 3580 RXQ_CFG_QUEUE0_ENB, 3581 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB, 3582 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB, 3583 RXQ_CFG_ENB 3584 }; 3585 uint32_t cfg; 3586 3587 ALC_LOCK_ASSERT(sc); 3588 3589 /* Enable RxQ. */ 3590 cfg = CSR_READ_4(sc, ALC_RXQ_CFG); 3591 cfg &= ~RXQ_CFG_ENB; 3592 cfg |= qcfg[1]; 3593 CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg); 3594 /* Enable TxQ. */ 3595 cfg = CSR_READ_4(sc, ALC_TXQ_CFG); 3596 cfg |= TXQ_CFG_ENB; 3597 CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg); 3598 } 3599 3600 static void 3601 alc_stop_queue(struct alc_softc *sc) 3602 { 3603 uint32_t reg; 3604 int i; 3605 3606 ALC_LOCK_ASSERT(sc); 3607 3608 /* Disable RxQ. */ 3609 reg = CSR_READ_4(sc, ALC_RXQ_CFG); 3610 if ((reg & RXQ_CFG_ENB) != 0) { 3611 reg &= ~RXQ_CFG_ENB; 3612 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 3613 } 3614 /* Disable TxQ. */ 3615 reg = CSR_READ_4(sc, ALC_TXQ_CFG); 3616 if ((reg & TXQ_CFG_ENB) == 0) { 3617 reg &= ~TXQ_CFG_ENB; 3618 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg); 3619 } 3620 for (i = ALC_TIMEOUT; i > 0; i--) { 3621 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 3622 if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0) 3623 break; 3624 DELAY(10); 3625 } 3626 if (i == 0) 3627 device_printf(sc->alc_dev, 3628 "could not disable RxQ/TxQ (0x%08x)!\n", reg); 3629 } 3630 3631 static void 3632 alc_init_tx_ring(struct alc_softc *sc) 3633 { 3634 struct alc_ring_data *rd; 3635 struct alc_txdesc *txd; 3636 int i; 3637 3638 ALC_LOCK_ASSERT(sc); 3639 3640 sc->alc_cdata.alc_tx_prod = 0; 3641 sc->alc_cdata.alc_tx_cons = 0; 3642 sc->alc_cdata.alc_tx_cnt = 0; 3643 3644 rd = &sc->alc_rdata; 3645 bzero(rd->alc_tx_ring, ALC_TX_RING_SZ); 3646 for (i = 0; i < ALC_TX_RING_CNT; i++) { 3647 txd = &sc->alc_cdata.alc_txdesc[i]; 3648 txd->tx_m = NULL; 3649 } 3650 3651 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag, 3652 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE); 3653 } 3654 3655 static int 3656 alc_init_rx_ring(struct alc_softc *sc) 3657 { 3658 struct alc_ring_data *rd; 3659 struct alc_rxdesc *rxd; 3660 int i; 3661 3662 ALC_LOCK_ASSERT(sc); 3663 3664 sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1; 3665 sc->alc_morework = 0; 3666 rd = &sc->alc_rdata; 3667 bzero(rd->alc_rx_ring, ALC_RX_RING_SZ); 3668 for (i = 0; i < ALC_RX_RING_CNT; i++) { 3669 rxd = &sc->alc_cdata.alc_rxdesc[i]; 3670 rxd->rx_m = NULL; 3671 rxd->rx_desc = &rd->alc_rx_ring[i]; 3672 if (alc_newbuf(sc, rxd) != 0) 3673 return (ENOBUFS); 3674 } 3675 3676 /* 3677 * Since controller does not update Rx descriptors, driver 3678 * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE 3679 * is enough to ensure coherence. 3680 */ 3681 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag, 3682 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE); 3683 /* Let controller know availability of new Rx buffers. */ 3684 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons); 3685 3686 return (0); 3687 } 3688 3689 static void 3690 alc_init_rr_ring(struct alc_softc *sc) 3691 { 3692 struct alc_ring_data *rd; 3693 3694 ALC_LOCK_ASSERT(sc); 3695 3696 sc->alc_cdata.alc_rr_cons = 0; 3697 ALC_RXCHAIN_RESET(sc); 3698 3699 rd = &sc->alc_rdata; 3700 bzero(rd->alc_rr_ring, ALC_RR_RING_SZ); 3701 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag, 3702 sc->alc_cdata.alc_rr_ring_map, 3703 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3704 } 3705 3706 static void 3707 alc_init_cmb(struct alc_softc *sc) 3708 { 3709 struct alc_ring_data *rd; 3710 3711 ALC_LOCK_ASSERT(sc); 3712 3713 rd = &sc->alc_rdata; 3714 bzero(rd->alc_cmb, ALC_CMB_SZ); 3715 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, sc->alc_cdata.alc_cmb_map, 3716 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3717 } 3718 3719 static void 3720 alc_init_smb(struct alc_softc *sc) 3721 { 3722 struct alc_ring_data *rd; 3723 3724 ALC_LOCK_ASSERT(sc); 3725 3726 rd = &sc->alc_rdata; 3727 bzero(rd->alc_smb, ALC_SMB_SZ); 3728 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, sc->alc_cdata.alc_smb_map, 3729 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3730 } 3731 3732 static void 3733 alc_rxvlan(struct alc_softc *sc) 3734 { 3735 struct ifnet *ifp; 3736 uint32_t reg; 3737 3738 ALC_LOCK_ASSERT(sc); 3739 3740 ifp = sc->alc_ifp; 3741 reg = CSR_READ_4(sc, ALC_MAC_CFG); 3742 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 3743 reg |= MAC_CFG_VLAN_TAG_STRIP; 3744 else 3745 reg &= ~MAC_CFG_VLAN_TAG_STRIP; 3746 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 3747 } 3748 3749 static void 3750 alc_rxfilter(struct alc_softc *sc) 3751 { 3752 struct ifnet *ifp; 3753 struct ifmultiaddr *ifma; 3754 uint32_t crc; 3755 uint32_t mchash[2]; 3756 uint32_t rxcfg; 3757 3758 ALC_LOCK_ASSERT(sc); 3759 3760 ifp = sc->alc_ifp; 3761 3762 bzero(mchash, sizeof(mchash)); 3763 rxcfg = CSR_READ_4(sc, ALC_MAC_CFG); 3764 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 3765 if ((ifp->if_flags & IFF_BROADCAST) != 0) 3766 rxcfg |= MAC_CFG_BCAST; 3767 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 3768 if ((ifp->if_flags & IFF_PROMISC) != 0) 3769 rxcfg |= MAC_CFG_PROMISC; 3770 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 3771 rxcfg |= MAC_CFG_ALLMULTI; 3772 mchash[0] = 0xFFFFFFFF; 3773 mchash[1] = 0xFFFFFFFF; 3774 goto chipit; 3775 } 3776 3777 #if 0 3778 /* XXX */ 3779 if_maddr_rlock(ifp); 3780 #endif 3781 TAILQ_FOREACH(ifma, &sc->alc_ifp->if_multiaddrs, ifma_link) { 3782 if (ifma->ifma_addr->sa_family != AF_LINK) 3783 continue; 3784 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 3785 ifma->ifma_addr), ETHER_ADDR_LEN); 3786 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 3787 } 3788 #if 0 3789 /* XXX */ 3790 if_maddr_runlock(ifp); 3791 #endif 3792 3793 chipit: 3794 CSR_WRITE_4(sc, ALC_MAR0, mchash[0]); 3795 CSR_WRITE_4(sc, ALC_MAR1, mchash[1]); 3796 CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg); 3797 } 3798 3799 static int 3800 sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS) 3801 { 3802 return (sysctl_int_range(oidp, arg1, arg2, req, 3803 ALC_PROC_MIN, ALC_PROC_MAX)); 3804 } 3805 3806 static int 3807 sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS) 3808 { 3809 3810 return (sysctl_int_range(oidp, arg1, arg2, req, 3811 ALC_IM_TIMER_MIN, ALC_IM_TIMER_MAX)); 3812 } 3813