1 /*- 2 * Copyright (c) 2009, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: src/sys/dev/alc/if_alc.c,v 1.6 2009/09/29 23:03:16 yongari Exp $ 28 */ 29 30 /* Driver for Atheros AR8131/AR8132 PCIe Ethernet. */ 31 32 #include <sys/param.h> 33 #include <sys/bitops.h> 34 #include <sys/endian.h> 35 #include <sys/kernel.h> 36 #include <sys/bus.h> 37 #include <sys/interrupt.h> 38 #include <sys/malloc.h> 39 #include <sys/proc.h> 40 #include <sys/rman.h> 41 #include <sys/serialize.h> 42 #include <sys/socket.h> 43 #include <sys/sockio.h> 44 #include <sys/sysctl.h> 45 46 #include <net/ethernet.h> 47 #include <net/if.h> 48 #include <net/bpf.h> 49 #include <net/if_arp.h> 50 #include <net/if_dl.h> 51 #include <net/if_media.h> 52 #include <net/ifq_var.h> 53 #include <net/vlan/if_vlan_var.h> 54 #include <net/vlan/if_vlan_ether.h> 55 56 #include <netinet/tcp.h> 57 58 #include <dev/netif/mii_layer/mii.h> 59 #include <dev/netif/mii_layer/miivar.h> 60 61 #include <bus/pci/pcireg.h> 62 #include <bus/pci/pcivar.h> 63 #include <bus/pci/pcidevs.h> 64 65 #include <dev/netif/alc/if_alcreg.h> 66 #include <dev/netif/alc/if_alcvar.h> 67 68 /* "device miibus" required. See GENERIC if you get errors here. */ 69 #include "miibus_if.h" 70 71 #undef ALC_USE_CUSTOM_CSUM 72 #ifdef ALC_USE_CUSTOM_CSUM 73 #define ALC_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 74 #else 75 #define ALC_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 76 #endif 77 78 /* Tunables. */ 79 static int alc_msi_enable = 1; 80 TUNABLE_INT("hw.alc.msi.enable", &alc_msi_enable); 81 82 /* 83 * Devices supported by this driver. 84 */ 85 86 static struct alc_ident alc_ident_table[] = { 87 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8131, 9 * 1024, 88 "Atheros AR8131 PCIe Gigabit Ethernet" }, 89 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8132, 9 * 1024, 90 "Atheros AR8132 PCIe Fast Ethernet" }, 91 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8151, 6 * 1024, 92 "Atheros AR8151 v1.0 PCIe Gigabit Ethernet" }, 93 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8151_V2, 6 * 1024, 94 "Atheros AR8151 v2.0 PCIe Gigabit Ethernet" }, 95 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8152_B, 6 * 1024, 96 "Atheros AR8152 v1.1 PCIe Fast Ethernet" }, 97 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8152_B2, 6 * 1024, 98 "Atheros AR8152 v2.0 PCIe Fast Ethernet" }, 99 { 0, 0, 0, NULL } 100 }; 101 102 static int alc_attach(device_t); 103 static int alc_probe(device_t); 104 static int alc_detach(device_t); 105 static int alc_shutdown(device_t); 106 static int alc_suspend(device_t); 107 static int alc_resume(device_t); 108 static int alc_miibus_readreg(device_t, int, int); 109 static void alc_miibus_statchg(device_t); 110 static int alc_miibus_writereg(device_t, int, int, int); 111 112 static void alc_init(void *); 113 static void alc_start(struct ifnet *); 114 static void alc_watchdog(struct alc_softc *); 115 static int alc_mediachange(struct ifnet *); 116 static void alc_mediastatus(struct ifnet *, struct ifmediareq *); 117 static int alc_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 118 119 static void alc_aspm(struct alc_softc *, int); 120 #ifdef foo 121 static int alc_check_boundary(struct alc_softc *); 122 #endif 123 static void alc_disable_l0s_l1(struct alc_softc *); 124 static int alc_dma_alloc(struct alc_softc *); 125 static void alc_dma_free(struct alc_softc *); 126 static void alc_dmamap_cb(void *, bus_dma_segment_t *, int, int); 127 static int alc_encap(struct alc_softc *, struct mbuf **); 128 static struct alc_ident *alc_find_ident(device_t); 129 static void alc_get_macaddr(struct alc_softc *); 130 static void alc_init_cmb(struct alc_softc *); 131 static void alc_init_rr_ring(struct alc_softc *); 132 static int alc_init_rx_ring(struct alc_softc *); 133 static void alc_init_smb(struct alc_softc *); 134 static void alc_init_tx_ring(struct alc_softc *); 135 static void alc_intr(void *); 136 static void alc_mac_config(struct alc_softc *); 137 static int alc_newbuf(struct alc_softc *, struct alc_rxdesc *, boolean_t); 138 static void alc_phy_down(struct alc_softc *); 139 static void alc_phy_reset(struct alc_softc *); 140 static void alc_reset(struct alc_softc *); 141 static void alc_rxeof(struct alc_softc *, struct rx_rdesc *); 142 static int alc_rxintr(struct alc_softc *); 143 static void alc_rxfilter(struct alc_softc *); 144 static void alc_rxvlan(struct alc_softc *); 145 #if 0 146 static void alc_setlinkspeed(struct alc_softc *); 147 /* XXX: WOL */ 148 static void alc_setwol(struct alc_softc *); 149 #endif 150 static void alc_start_queue(struct alc_softc *); 151 static void alc_stats_clear(struct alc_softc *); 152 static void alc_stats_update(struct alc_softc *); 153 static void alc_stop(struct alc_softc *); 154 static void alc_stop_mac(struct alc_softc *); 155 static void alc_stop_queue(struct alc_softc *); 156 static void alc_sysctl_node(struct alc_softc *); 157 static void alc_tick(void *); 158 static void alc_txeof(struct alc_softc *); 159 static int sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS); 160 static int sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS); 161 162 static device_method_t alc_methods[] = { 163 /* Device interface. */ 164 DEVMETHOD(device_probe, alc_probe), 165 DEVMETHOD(device_attach, alc_attach), 166 DEVMETHOD(device_detach, alc_detach), 167 DEVMETHOD(device_shutdown, alc_shutdown), 168 DEVMETHOD(device_suspend, alc_suspend), 169 DEVMETHOD(device_resume, alc_resume), 170 171 /* MII interface. */ 172 DEVMETHOD(miibus_readreg, alc_miibus_readreg), 173 DEVMETHOD(miibus_writereg, alc_miibus_writereg), 174 DEVMETHOD(miibus_statchg, alc_miibus_statchg), 175 176 { NULL, NULL } 177 }; 178 179 static DEFINE_CLASS_0(alc, alc_driver, alc_methods, sizeof(struct alc_softc)); 180 static devclass_t alc_devclass; 181 182 DECLARE_DUMMY_MODULE(if_alc); 183 DRIVER_MODULE(if_alc, pci, alc_driver, alc_devclass, NULL, NULL); 184 DRIVER_MODULE(miibus, alc, miibus_driver, miibus_devclass, NULL, NULL); 185 186 static const uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0 }; 187 188 static int 189 alc_miibus_readreg(device_t dev, int phy, int reg) 190 { 191 struct alc_softc *sc; 192 uint32_t v; 193 int i; 194 195 sc = device_get_softc(dev); 196 197 if (phy != sc->alc_phyaddr) 198 return (0); 199 200 /* 201 * For AR8132 fast ethernet controller, do not report 1000baseT 202 * capability to mii(4). Even though AR8132 uses the same 203 * model/revision number of F1 gigabit PHY, the PHY has no 204 * ability to establish 1000baseT link. 205 */ 206 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0 && 207 reg == MII_EXTSR) 208 return (0); 209 210 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 211 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 212 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 213 DELAY(5); 214 v = CSR_READ_4(sc, ALC_MDIO); 215 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 216 break; 217 } 218 219 if (i == 0) { 220 device_printf(sc->alc_dev, "phy read timeout : %d\n", reg); 221 return (0); 222 } 223 224 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 225 } 226 227 static int 228 alc_miibus_writereg(device_t dev, int phy, int reg, int val) 229 { 230 struct alc_softc *sc; 231 uint32_t v; 232 int i; 233 234 sc = device_get_softc(dev); 235 236 if (phy != sc->alc_phyaddr) 237 return (0); 238 239 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 240 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 241 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 242 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 243 DELAY(5); 244 v = CSR_READ_4(sc, ALC_MDIO); 245 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 246 break; 247 } 248 249 if (i == 0) 250 device_printf(sc->alc_dev, "phy write timeout : %d\n", reg); 251 252 return (0); 253 } 254 255 static void 256 alc_miibus_statchg(device_t dev) 257 { 258 struct alc_softc *sc; 259 struct mii_data *mii; 260 struct ifnet *ifp; 261 uint32_t reg; 262 263 sc = device_get_softc(dev); 264 265 mii = device_get_softc(sc->alc_miibus); 266 ifp = sc->alc_ifp; 267 if (mii == NULL || ifp == NULL || 268 (ifp->if_flags & IFF_RUNNING) == 0) 269 return; 270 271 sc->alc_flags &= ~ALC_FLAG_LINK; 272 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 273 (IFM_ACTIVE | IFM_AVALID)) { 274 switch (IFM_SUBTYPE(mii->mii_media_active)) { 275 case IFM_10_T: 276 case IFM_100_TX: 277 sc->alc_flags |= ALC_FLAG_LINK; 278 break; 279 case IFM_1000_T: 280 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) 281 sc->alc_flags |= ALC_FLAG_LINK; 282 break; 283 default: 284 break; 285 } 286 } 287 alc_stop_queue(sc); 288 /* Stop Rx/Tx MACs. */ 289 alc_stop_mac(sc); 290 291 /* Program MACs with resolved speed/duplex/flow-control. */ 292 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 293 alc_start_queue(sc); 294 alc_mac_config(sc); 295 /* Re-enable Tx/Rx MACs. */ 296 reg = CSR_READ_4(sc, ALC_MAC_CFG); 297 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 298 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 299 } 300 alc_aspm(sc, IFM_SUBTYPE(mii->mii_media_active)); 301 } 302 303 static void 304 alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 305 { 306 struct alc_softc *sc; 307 struct mii_data *mii; 308 309 sc = ifp->if_softc; 310 if ((ifp->if_flags & IFF_UP) == 0) 311 return; 312 mii = device_get_softc(sc->alc_miibus); 313 314 mii_pollstat(mii); 315 ifmr->ifm_status = mii->mii_media_status; 316 ifmr->ifm_active = mii->mii_media_active; 317 } 318 319 static int 320 alc_mediachange(struct ifnet *ifp) 321 { 322 struct alc_softc *sc; 323 struct mii_data *mii; 324 struct mii_softc *miisc; 325 int error; 326 327 sc = ifp->if_softc; 328 mii = device_get_softc(sc->alc_miibus); 329 if (mii->mii_instance != 0) { 330 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 331 mii_phy_reset(miisc); 332 } 333 error = mii_mediachg(mii); 334 335 return (error); 336 } 337 338 static struct alc_ident * 339 alc_find_ident(device_t dev) 340 { 341 struct alc_ident *ident; 342 uint16_t vendor, devid; 343 344 vendor = pci_get_vendor(dev); 345 devid = pci_get_device(dev); 346 for (ident = alc_ident_table; ident->name != NULL; ident++) { 347 if (vendor == ident->vendorid && devid == ident->deviceid) 348 return (ident); 349 } 350 return (NULL); 351 } 352 353 static int 354 alc_probe(device_t dev) 355 { 356 struct alc_ident *ident; 357 358 ident = alc_find_ident(dev); 359 if (ident != NULL) { 360 device_set_desc(dev, ident->name); 361 return (BUS_PROBE_DEFAULT); 362 } 363 return (ENXIO); 364 } 365 366 static void 367 alc_get_macaddr(struct alc_softc *sc) 368 { 369 uint32_t ea[2], opt; 370 uint16_t val; 371 int eeprom, i; 372 373 eeprom = 0; 374 opt = CSR_READ_4(sc, ALC_OPT_CFG); 375 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_OTP_SEL) != 0 && 376 (CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) { 377 /* 378 * EEPROM found, let TWSI reload EEPROM configuration. 379 * This will set ethernet address of controller. 380 */ 381 eeprom++; 382 switch (sc->alc_ident->deviceid) { 383 case DEVICEID_ATHEROS_AR8131: 384 case DEVICEID_ATHEROS_AR8132: 385 if ((opt & OPT_CFG_CLK_ENB) == 0) { 386 opt |= OPT_CFG_CLK_ENB; 387 CSR_WRITE_4(sc, ALC_OPT_CFG, opt); 388 CSR_READ_4(sc, ALC_OPT_CFG); 389 DELAY(1000); 390 } 391 break; 392 case DEVICEID_ATHEROS_AR8151: 393 case DEVICEID_ATHEROS_AR8151_V2: 394 case DEVICEID_ATHEROS_AR8152_B: 395 case DEVICEID_ATHEROS_AR8152_B2: 396 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 397 ALC_MII_DBG_ADDR, 0x00); 398 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 399 ALC_MII_DBG_DATA); 400 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 401 ALC_MII_DBG_DATA, val & 0xFF7F); 402 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 403 ALC_MII_DBG_ADDR, 0x3B); 404 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 405 ALC_MII_DBG_DATA); 406 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 407 ALC_MII_DBG_DATA, val | 0x0008); 408 DELAY(20); 409 break; 410 } 411 412 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG, 413 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB); 414 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 415 CSR_READ_4(sc, ALC_WOL_CFG); 416 417 CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) | 418 TWSI_CFG_SW_LD_START); 419 420 for (i = 100; i > 0; i--) { 421 DELAY(1000); 422 if ((CSR_READ_4(sc, ALC_TWSI_CFG) & 423 TWSI_CFG_SW_LD_START) == 0) 424 break; 425 } 426 if (i == 0) 427 device_printf(sc->alc_dev, 428 "reloading EEPROM timeout!\n"); 429 } else { 430 if (bootverbose) 431 device_printf(sc->alc_dev, "EEPROM not found!\n"); 432 } 433 434 if (eeprom != 0) { 435 switch (sc->alc_ident->deviceid) { 436 case DEVICEID_ATHEROS_AR8131: 437 case DEVICEID_ATHEROS_AR8132: 438 if ((opt & OPT_CFG_CLK_ENB) != 0) { 439 opt &= ~OPT_CFG_CLK_ENB; 440 CSR_WRITE_4(sc, ALC_OPT_CFG, opt); 441 CSR_READ_4(sc, ALC_OPT_CFG); 442 DELAY(1000); 443 } 444 break; 445 case DEVICEID_ATHEROS_AR8151: 446 case DEVICEID_ATHEROS_AR8151_V2: 447 case DEVICEID_ATHEROS_AR8152_B: 448 case DEVICEID_ATHEROS_AR8152_B2: 449 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 450 ALC_MII_DBG_ADDR, 0x00); 451 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 452 ALC_MII_DBG_DATA); 453 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 454 ALC_MII_DBG_DATA, val | 0x0080); 455 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 456 ALC_MII_DBG_ADDR, 0x3B); 457 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 458 ALC_MII_DBG_DATA); 459 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 460 ALC_MII_DBG_DATA, val & 0xFFF7); 461 DELAY(20); 462 break; 463 } 464 } 465 466 ea[0] = CSR_READ_4(sc, ALC_PAR0); 467 ea[1] = CSR_READ_4(sc, ALC_PAR1); 468 sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF; 469 sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF; 470 sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF; 471 sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF; 472 sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF; 473 sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF; 474 } 475 476 static void 477 alc_disable_l0s_l1(struct alc_softc *sc) 478 { 479 uint32_t pmcfg; 480 481 /* Another magic from vendor. */ 482 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 483 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 | 484 PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK | 485 PM_CFG_SERDES_PD_EX_L1); 486 pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB | 487 PM_CFG_SERDES_L1_ENB; 488 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 489 } 490 491 static void 492 alc_phy_reset(struct alc_softc *sc) 493 { 494 uint16_t data; 495 496 /* Reset magic from Linux. */ 497 CSR_WRITE_2(sc, ALC_GPHY_CFG, 498 GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | GPHY_CFG_SEL_ANA_RESET); 499 CSR_READ_2(sc, ALC_GPHY_CFG); 500 DELAY(10 * 1000); 501 502 CSR_WRITE_2(sc, ALC_GPHY_CFG, 503 GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | 504 GPHY_CFG_SEL_ANA_RESET); 505 CSR_READ_2(sc, ALC_GPHY_CFG); 506 DELAY(10 * 1000); 507 508 /* DSP fixup, Vendor magic. */ 509 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B) { 510 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 511 ALC_MII_DBG_ADDR, 0x000A); 512 data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 513 ALC_MII_DBG_DATA); 514 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 515 ALC_MII_DBG_DATA, data & 0xDFFF); 516 } 517 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 || 518 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || 519 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B || 520 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 521 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 522 ALC_MII_DBG_ADDR, 0x003B); 523 data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 524 ALC_MII_DBG_DATA); 525 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 526 ALC_MII_DBG_DATA, data & 0xFFF7); 527 DELAY(20 * 1000); 528 } 529 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151) { 530 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 531 ALC_MII_DBG_ADDR, 0x0029); 532 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 533 ALC_MII_DBG_DATA, 0x929D); 534 } 535 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 || 536 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8132 || 537 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || 538 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 539 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 540 ALC_MII_DBG_ADDR, 0x0029); 541 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 542 ALC_MII_DBG_DATA, 0xB6DD); 543 } 544 545 /* Load DSP codes, vendor magic. */ 546 data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE | 547 ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK); 548 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 549 ALC_MII_DBG_ADDR, MII_ANA_CFG18); 550 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 551 ALC_MII_DBG_DATA, data); 552 553 data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) | 554 ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL | 555 ANA_SERDES_EN_LCKDT; 556 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 557 ALC_MII_DBG_ADDR, MII_ANA_CFG5); 558 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 559 ALC_MII_DBG_DATA, data); 560 561 data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) & 562 ANA_LONG_CABLE_TH_100_MASK) | 563 ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) & 564 ANA_SHORT_CABLE_TH_100_SHIFT) | 565 ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW; 566 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 567 ALC_MII_DBG_ADDR, MII_ANA_CFG54); 568 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 569 ALC_MII_DBG_DATA, data); 570 571 data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) | 572 ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) | 573 ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) | 574 ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK); 575 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 576 ALC_MII_DBG_ADDR, MII_ANA_CFG4); 577 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 578 ALC_MII_DBG_DATA, data); 579 580 data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) | 581 ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB | 582 ANA_OEN_125M; 583 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 584 ALC_MII_DBG_ADDR, MII_ANA_CFG0); 585 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 586 ALC_MII_DBG_DATA, data); 587 DELAY(1000); 588 } 589 590 static void 591 alc_phy_down(struct alc_softc *sc) 592 { 593 switch (sc->alc_ident->deviceid) { 594 case DEVICEID_ATHEROS_AR8151: 595 case DEVICEID_ATHEROS_AR8151_V2: 596 /* 597 * GPHY power down caused more problems on AR8151 v2.0. 598 * When driver is reloaded after GPHY power down, 599 * accesses to PHY/MAC registers hung the system. Only 600 * cold boot recovered from it. I'm not sure whether 601 * AR8151 v1.0 also requires this one though. I don't 602 * have AR8151 v1.0 controller in hand. 603 * The only option left is to isolate the PHY and 604 * initiates power down the PHY which in turn saves 605 * more power when driver is unloaded. 606 */ 607 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 608 MII_BMCR, BMCR_ISO | BMCR_PDOWN); 609 break; 610 default: 611 /* Force PHY down. */ 612 CSR_WRITE_2(sc, ALC_GPHY_CFG, 613 GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | 614 GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ | 615 GPHY_CFG_PWDOWN_HW); 616 DELAY(1000); 617 break; 618 } 619 620 } 621 622 static void 623 alc_aspm(struct alc_softc *sc, int media) 624 { 625 uint32_t pmcfg; 626 uint16_t linkcfg; 627 628 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 629 if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) == 630 (ALC_FLAG_APS | ALC_FLAG_PCIE)) { 631 linkcfg = CSR_READ_2(sc, sc->alc_expcap + 632 PCIR_EXPRESS_LINK_CTL); 633 } else { 634 linkcfg = 0; 635 } 636 637 pmcfg &= ~PM_CFG_SERDES_PD_EX_L1; 638 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_LCKDET_TIMER_MASK); 639 pmcfg |= PM_CFG_MAC_ASPM_CHK; 640 pmcfg |= PM_CFG_SERDES_ENB | PM_CFG_RBER_ENB; 641 pmcfg &= ~(PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB); 642 643 if ((sc->alc_flags & ALC_FLAG_APS) != 0) { 644 /* Disable extended sync except AR8152 B v1.0 */ 645 linkcfg &= ~0x80; 646 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B && 647 sc->alc_rev == ATHEROS_AR8152_B_V10) 648 linkcfg |= 0x80; 649 CSR_WRITE_2(sc, sc->alc_expcap + PCIR_EXPRESS_LINK_CTL, 650 linkcfg); 651 pmcfg &= ~(PM_CFG_EN_BUFS_RX_L0S | PM_CFG_SA_DLY_ENB | 652 PM_CFG_HOTRST); 653 pmcfg |= (PM_CFG_L1_ENTRY_TIMER_DEFAULT << 654 PM_CFG_L1_ENTRY_TIMER_SHIFT); 655 pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK; 656 pmcfg |= (PM_CFG_PM_REQ_TIMER_DEFAULT << 657 PM_CFG_PM_REQ_TIMER_SHIFT); 658 pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_PCIE_RECV; 659 } 660 661 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 662 if ((sc->alc_flags & ALC_FLAG_L0S) != 0) 663 pmcfg |= PM_CFG_ASPM_L0S_ENB; 664 if ((sc->alc_flags & ALC_FLAG_L1S) != 0) 665 pmcfg |= PM_CFG_ASPM_L1_ENB; 666 if ((sc->alc_flags & ALC_FLAG_APS) != 0) { 667 if (sc->alc_ident->deviceid == 668 DEVICEID_ATHEROS_AR8152_B) { 669 pmcfg &= ~PM_CFG_ASPM_L0S_ENB; 670 } 671 pmcfg &= ~(PM_CFG_SERDES_L1_ENB | 672 PM_CFG_SERDES_PLL_L1_ENB | 673 PM_CFG_SERDES_BUDS_RX_L1_ENB); 674 pmcfg |= PM_CFG_CLK_SWH_L1; 675 if (media == IFM_100_TX || media == IFM_1000_T) { 676 pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK; 677 switch (sc->alc_ident->deviceid) { 678 case DEVICEID_ATHEROS_AR8152_B: 679 pmcfg |= (7 << 680 PM_CFG_L1_ENTRY_TIMER_SHIFT); 681 break; 682 case DEVICEID_ATHEROS_AR8152_B2: 683 case DEVICEID_ATHEROS_AR8151_V2: 684 pmcfg |= (4 << 685 PM_CFG_L1_ENTRY_TIMER_SHIFT); 686 break; 687 default: 688 pmcfg |= (15 << 689 PM_CFG_L1_ENTRY_TIMER_SHIFT); 690 break; 691 } 692 } 693 } else { 694 pmcfg |= PM_CFG_SERDES_L1_ENB | 695 PM_CFG_SERDES_PLL_L1_ENB | 696 PM_CFG_SERDES_BUDS_RX_L1_ENB; 697 pmcfg &= ~(PM_CFG_CLK_SWH_L1 | 698 PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB); 699 } 700 } else { 701 pmcfg &= ~(PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_L1_ENB | 702 PM_CFG_SERDES_PLL_L1_ENB); 703 pmcfg |= PM_CFG_CLK_SWH_L1; 704 if ((sc->alc_flags & ALC_FLAG_L1S) != 0) 705 pmcfg |= PM_CFG_ASPM_L1_ENB; 706 } 707 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 708 } 709 710 static int 711 alc_attach(device_t dev) 712 { 713 struct alc_softc *sc; 714 struct ifnet *ifp; 715 const char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" }; 716 uint16_t burst; 717 int base, error, state; 718 uint32_t cap, ctl, val; 719 u_int intr_flags; 720 721 error = 0; 722 sc = device_get_softc(dev); 723 sc->alc_dev = dev; 724 725 callout_init_mp(&sc->alc_tick_ch); 726 sc->alc_ident = alc_find_ident(dev); 727 728 /* Enable bus mastering */ 729 pci_enable_busmaster(dev); 730 731 /* Map the device. */ 732 sc->alc_res_rid = PCIR_BAR(0); 733 sc->alc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 734 &sc->alc_res_rid, RF_ACTIVE); 735 if (error != 0) { 736 device_printf(dev, "cannot allocate memory resources.\n"); 737 goto fail; 738 } 739 sc->alc_res_btag = rman_get_bustag(sc->alc_res); 740 sc->alc_res_bhand = rman_get_bushandle(sc->alc_res); 741 742 /* Set PHY address. */ 743 sc->alc_phyaddr = ALC_PHY_ADDR; 744 745 /* Initialize DMA parameters. */ 746 sc->alc_dma_rd_burst = 0; 747 sc->alc_dma_wr_burst = 0; 748 sc->alc_rcb = DMA_CFG_RCB_64; 749 if (pci_find_extcap(dev, PCIY_EXPRESS, &base) == 0) { 750 sc->alc_flags |= ALC_FLAG_PCIE; 751 sc->alc_expcap = base; 752 burst = CSR_READ_2(sc, base + PCIR_EXPRESS_DEVICE_CTL); 753 sc->alc_dma_rd_burst = 754 (burst & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12; 755 sc->alc_dma_wr_burst = (burst & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5; 756 if (bootverbose) { 757 device_printf(dev, "Read request size : %u bytes.\n", 758 alc_dma_burst[sc->alc_dma_rd_burst]); 759 device_printf(dev, "TLP payload size : %u bytes.\n", 760 alc_dma_burst[sc->alc_dma_wr_burst]); 761 } 762 if (alc_dma_burst[sc->alc_dma_rd_burst] > 1024) 763 sc->alc_dma_rd_burst = 3; 764 if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024) 765 sc->alc_dma_wr_burst = 3; 766 /* Clear data link and flow-control protocol error. */ 767 val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV); 768 val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP); 769 CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val); 770 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG, 771 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB); 772 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, 773 CSR_READ_4(sc, ALC_PCIE_PHYMISC) | 774 PCIE_PHYMISC_FORCE_RCV_DET); 775 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B && 776 sc->alc_rev == ATHEROS_AR8152_B_V10) { 777 val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2); 778 val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK | 779 PCIE_PHYMISC2_SERDES_TH_MASK); 780 val |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT; 781 val |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT; 782 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val); 783 } 784 785 /* Disable ASPM L0S and L1. */ 786 cap = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CAP); 787 if ((cap & PCIM_LINK_CAP_ASPM) != 0) { 788 ctl = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CTL); 789 if ((ctl & 0x08) != 0) 790 sc->alc_rcb = DMA_CFG_RCB_128; 791 if (bootverbose) 792 device_printf(dev, "RCB %u bytes\n", 793 sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128); 794 state = ctl & 0x03; 795 if (state & 0x01) 796 sc->alc_flags |= ALC_FLAG_L0S; 797 if (state & 0x02) 798 sc->alc_flags |= ALC_FLAG_L1S; 799 if (bootverbose) 800 device_printf(sc->alc_dev, "ASPM %s %s\n", 801 aspm_state[state], 802 state == 0 ? "disabled" : "enabled"); 803 alc_disable_l0s_l1(sc); 804 } else { 805 if (bootverbose) 806 device_printf(sc->alc_dev, "no ASPM support\n"); 807 } 808 } 809 810 /* Reset PHY. */ 811 alc_phy_reset(sc); 812 813 /* Reset the ethernet controller. */ 814 alc_reset(sc); 815 816 /* 817 * One odd thing is AR8132 uses the same PHY hardware(F1 818 * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports 819 * the PHY supports 1000Mbps but that's not true. The PHY 820 * used in AR8132 can't establish gigabit link even if it 821 * shows the same PHY model/revision number of AR8131. 822 */ 823 switch (sc->alc_ident->deviceid) { 824 case DEVICEID_ATHEROS_AR8152_B: 825 case DEVICEID_ATHEROS_AR8152_B2: 826 sc->alc_flags |= ALC_FLAG_APS; 827 /* FALLTHROUGH */ 828 case DEVICEID_ATHEROS_AR8132: 829 sc->alc_flags |= ALC_FLAG_FASTETHER; 830 break; 831 case DEVICEID_ATHEROS_AR8151: 832 case DEVICEID_ATHEROS_AR8151_V2: 833 sc->alc_flags |= ALC_FLAG_APS; 834 /* FALLTHROUGH */ 835 default: 836 break; 837 } 838 sc->alc_flags |= ALC_FLAG_ASPM_MON | ALC_FLAG_JUMBO; 839 840 /* 841 * It seems that AR813x/AR815x has silicon bug for SMB. In 842 * addition, Atheros said that enabling SMB wouldn't improve 843 * performance. However I think it's bad to access lots of 844 * registers to extract MAC statistics. 845 */ 846 sc->alc_flags |= ALC_FLAG_SMB_BUG; 847 848 /* 849 * Don't use Tx CMB. It is known to have silicon bug. 850 */ 851 sc->alc_flags |= ALC_FLAG_CMB_BUG; 852 sc->alc_rev = pci_get_revid(dev); 853 sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >> 854 MASTER_CHIP_REV_SHIFT; 855 if (bootverbose) { 856 device_printf(dev, "PCI device revision : 0x%04x\n", 857 sc->alc_rev); 858 device_printf(dev, "Chip id/revision : 0x%04x\n", 859 sc->alc_chip_rev); 860 } 861 device_printf(dev, "%u Tx FIFO, %u Rx FIFO\n", 862 CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8, 863 CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8); 864 865 sc->alc_irq_type = pci_alloc_1intr(dev, alc_msi_enable, 866 &sc->alc_irq_rid, &intr_flags); 867 868 /* Allocate IRQ resources. */ 869 sc->alc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 870 &sc->alc_irq_rid, intr_flags); 871 if (error != 0) { 872 device_printf(dev, "cannot allocate IRQ resources.\n"); 873 goto fail; 874 } 875 876 /* Create device sysctl node. */ 877 alc_sysctl_node(sc); 878 879 if ((error = alc_dma_alloc(sc) != 0)) 880 goto fail; 881 882 /* Load station address. */ 883 alc_get_macaddr(sc); 884 885 ifp = sc->alc_ifp = &sc->arpcom.ac_if; 886 ifp->if_softc = sc; 887 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 888 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 889 ifp->if_ioctl = alc_ioctl; 890 ifp->if_start = alc_start; 891 ifp->if_init = alc_init; 892 ifq_set_maxlen(&ifp->if_snd, ALC_TX_RING_CNT - 1); 893 ifq_set_ready(&ifp->if_snd); 894 ifp->if_capabilities = IFCAP_TXCSUM; 895 ifp->if_hwassist = ALC_CSUM_FEATURES; 896 #if 0 897 /* XXX: WOL */ 898 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) { 899 ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST; 900 sc->alc_flags |= ALC_FLAG_PM; 901 sc->alc_pmcap = base; 902 } 903 #endif 904 ifp->if_capenable = ifp->if_capabilities; 905 906 /* VLAN capability setup. */ 907 ifp->if_capabilities |= IFCAP_VLAN_MTU; 908 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; 909 ifp->if_capenable = ifp->if_capabilities; 910 911 /* 912 * XXX 913 * It seems enabling Tx checksum offloading makes more trouble. 914 * Sometimes the controller does not receive any frames when 915 * Tx checksum offloading is enabled. I'm not sure whether this 916 * is a bug in Tx checksum offloading logic or I got broken 917 * sample boards. To safety, don't enable Tx checksum offloading 918 * by default but give chance to users to toggle it if they know 919 * their controllers work without problems. 920 */ 921 ifp->if_capenable &= ~IFCAP_TXCSUM; 922 ifp->if_hwassist &= ~ALC_CSUM_FEATURES; 923 924 /* Set up MII bus. */ 925 if ((error = mii_phy_probe(dev, &sc->alc_miibus, alc_mediachange, 926 alc_mediastatus)) != 0) { 927 device_printf(dev, "no PHY found!\n"); 928 goto fail; 929 } 930 931 ether_ifattach(ifp, sc->alc_eaddr, NULL); 932 933 /* Tell the upper layer(s) we support long frames. */ 934 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 935 936 #if 0 937 /* Create local taskq. */ 938 TASK_INIT(&sc->alc_tx_task, 1, alc_tx_task, ifp); 939 sc->alc_tq = taskqueue_create("alc_taskq", M_WAITOK, 940 taskqueue_thread_enqueue, &sc->alc_tq); 941 if (sc->alc_tq == NULL) { 942 device_printf(dev, "could not create taskqueue.\n"); 943 ether_ifdetach(ifp); 944 error = ENXIO; 945 goto fail; 946 } 947 taskqueue_start_threads(&sc->alc_tq, 1, TDPRI_KERN_DAEMON, -1, "%s taskq", 948 device_get_nameunit(sc->alc_dev)); 949 950 if ((sc->alc_flags & ALC_FLAG_MSIX) != 0) 951 msic = ALC_MSIX_MESSAGES; 952 else if ((sc->alc_flags & ALC_FLAG_MSI) != 0) 953 msic = ALC_MSI_MESSAGES; 954 else 955 msic = 1; 956 for (i = 0; i < msic; i++) { 957 error = bus_setup_intr(dev, sc->alc_irq[i], INTR_MPSAFE, 958 alc_intr, sc, 959 &sc->alc_intrhand[i], NULL); 960 if (error != 0) 961 break; 962 } 963 if (error != 0) { 964 device_printf(dev, "could not set up interrupt handler.\n"); 965 taskqueue_free(sc->alc_tq); 966 sc->alc_tq = NULL; 967 ether_ifdetach(ifp); 968 goto fail; 969 } 970 #else 971 error = bus_setup_intr(dev, sc->alc_irq, INTR_MPSAFE, alc_intr, sc, 972 &sc->alc_intrhand, ifp->if_serializer); 973 if (error) { 974 device_printf(dev, "could not set up interrupt handler.\n"); 975 ether_ifdetach(ifp); 976 goto fail; 977 } 978 #endif 979 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->alc_irq)); 980 981 fail: 982 if (error != 0) 983 alc_detach(dev); 984 985 return (error); 986 } 987 988 static int 989 alc_detach(device_t dev) 990 { 991 struct alc_softc *sc = device_get_softc(dev); 992 993 if (device_is_attached(dev)) { 994 struct ifnet *ifp = sc->alc_ifp; 995 996 lwkt_serialize_enter(ifp->if_serializer); 997 alc_stop(sc); 998 bus_teardown_intr(dev, sc->alc_irq, sc->alc_intrhand); 999 lwkt_serialize_exit(ifp->if_serializer); 1000 1001 ether_ifdetach(ifp); 1002 } 1003 1004 if (sc->alc_miibus != NULL) 1005 device_delete_child(dev, sc->alc_miibus); 1006 bus_generic_detach(dev); 1007 1008 if (sc->alc_res != NULL) 1009 alc_phy_down(sc); 1010 1011 if (sc->alc_irq != NULL) { 1012 bus_release_resource(dev, SYS_RES_IRQ, sc->alc_irq_rid, 1013 sc->alc_irq); 1014 } 1015 if (sc->alc_irq_type == PCI_INTR_TYPE_MSI) 1016 pci_release_msi(dev); 1017 1018 if (sc->alc_res != NULL) { 1019 bus_release_resource(dev, SYS_RES_MEMORY, sc->alc_res_rid, 1020 sc->alc_res); 1021 } 1022 1023 alc_dma_free(sc); 1024 1025 return (0); 1026 } 1027 1028 #define ALC_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 1029 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 1030 #define ALC_SYSCTL_STAT_ADD64(c, h, n, p, d) \ 1031 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 1032 1033 static void 1034 alc_sysctl_node(struct alc_softc *sc) 1035 { 1036 struct sysctl_ctx_list *ctx; 1037 struct sysctl_oid *tree; 1038 struct sysctl_oid_list *child, *parent; 1039 struct alc_hw_stats *stats; 1040 int error; 1041 1042 stats = &sc->alc_stats; 1043 ctx = &sc->alc_sysctl_ctx; 1044 sysctl_ctx_init(ctx); 1045 1046 tree = SYSCTL_ADD_NODE(ctx, SYSCTL_STATIC_CHILDREN(_hw), 1047 OID_AUTO, 1048 device_get_nameunit(sc->alc_dev), 1049 CTLFLAG_RD, 0, ""); 1050 if (tree == NULL) { 1051 device_printf(sc->alc_dev, "can't add sysctl node\n"); 1052 return; 1053 } 1054 child = SYSCTL_CHILDREN(tree); 1055 1056 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod", 1057 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_rx_mod, 0, 1058 sysctl_hw_alc_int_mod, "I", "alc Rx interrupt moderation"); 1059 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod", 1060 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_tx_mod, 0, 1061 sysctl_hw_alc_int_mod, "I", "alc Tx interrupt moderation"); 1062 /* Pull in device tunables. */ 1063 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT; 1064 error = resource_int_value(device_get_name(sc->alc_dev), 1065 device_get_unit(sc->alc_dev), "int_rx_mod", &sc->alc_int_rx_mod); 1066 if (error == 0) { 1067 if (sc->alc_int_rx_mod < ALC_IM_TIMER_MIN || 1068 sc->alc_int_rx_mod > ALC_IM_TIMER_MAX) { 1069 device_printf(sc->alc_dev, "int_rx_mod value out of " 1070 "range; using default: %d\n", 1071 ALC_IM_RX_TIMER_DEFAULT); 1072 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT; 1073 } 1074 } 1075 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT; 1076 error = resource_int_value(device_get_name(sc->alc_dev), 1077 device_get_unit(sc->alc_dev), "int_tx_mod", &sc->alc_int_tx_mod); 1078 if (error == 0) { 1079 if (sc->alc_int_tx_mod < ALC_IM_TIMER_MIN || 1080 sc->alc_int_tx_mod > ALC_IM_TIMER_MAX) { 1081 device_printf(sc->alc_dev, "int_tx_mod value out of " 1082 "range; using default: %d\n", 1083 ALC_IM_TX_TIMER_DEFAULT); 1084 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT; 1085 } 1086 } 1087 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit", 1088 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_process_limit, 0, 1089 sysctl_hw_alc_proc_limit, "I", 1090 "max number of Rx events to process"); 1091 /* Pull in device tunables. */ 1092 sc->alc_process_limit = ALC_PROC_DEFAULT; 1093 error = resource_int_value(device_get_name(sc->alc_dev), 1094 device_get_unit(sc->alc_dev), "process_limit", 1095 &sc->alc_process_limit); 1096 if (error == 0) { 1097 if (sc->alc_process_limit < ALC_PROC_MIN || 1098 sc->alc_process_limit > ALC_PROC_MAX) { 1099 device_printf(sc->alc_dev, 1100 "process_limit value out of range; " 1101 "using default: %d\n", ALC_PROC_DEFAULT); 1102 sc->alc_process_limit = ALC_PROC_DEFAULT; 1103 } 1104 } 1105 1106 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 1107 NULL, "ALC statistics"); 1108 parent = SYSCTL_CHILDREN(tree); 1109 1110 /* Rx statistics. */ 1111 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 1112 NULL, "Rx MAC statistics"); 1113 child = SYSCTL_CHILDREN(tree); 1114 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 1115 &stats->rx_frames, "Good frames"); 1116 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 1117 &stats->rx_bcast_frames, "Good broadcast frames"); 1118 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 1119 &stats->rx_mcast_frames, "Good multicast frames"); 1120 ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 1121 &stats->rx_pause_frames, "Pause control frames"); 1122 ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames", 1123 &stats->rx_control_frames, "Control frames"); 1124 ALC_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", 1125 &stats->rx_crcerrs, "CRC errors"); 1126 ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 1127 &stats->rx_lenerrs, "Frames with length mismatched"); 1128 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 1129 &stats->rx_bytes, "Good octets"); 1130 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets", 1131 &stats->rx_bcast_bytes, "Good broadcast octets"); 1132 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets", 1133 &stats->rx_mcast_bytes, "Good multicast octets"); 1134 ALC_SYSCTL_STAT_ADD32(ctx, child, "runts", 1135 &stats->rx_runts, "Too short frames"); 1136 ALC_SYSCTL_STAT_ADD32(ctx, child, "fragments", 1137 &stats->rx_fragments, "Fragmented frames"); 1138 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 1139 &stats->rx_pkts_64, "64 bytes frames"); 1140 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 1141 &stats->rx_pkts_65_127, "65 to 127 bytes frames"); 1142 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 1143 &stats->rx_pkts_128_255, "128 to 255 bytes frames"); 1144 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 1145 &stats->rx_pkts_256_511, "256 to 511 bytes frames"); 1146 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 1147 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames"); 1148 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 1149 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames"); 1150 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 1151 &stats->rx_pkts_1519_max, "1519 to max frames"); 1152 ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs", 1153 &stats->rx_pkts_truncated, "Truncated frames due to MTU size"); 1154 ALC_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 1155 &stats->rx_fifo_oflows, "FIFO overflows"); 1156 ALC_SYSCTL_STAT_ADD32(ctx, child, "rrs_errs", 1157 &stats->rx_rrs_errs, "Return status write-back errors"); 1158 ALC_SYSCTL_STAT_ADD32(ctx, child, "align_errs", 1159 &stats->rx_alignerrs, "Alignment errors"); 1160 ALC_SYSCTL_STAT_ADD32(ctx, child, "filtered", 1161 &stats->rx_pkts_filtered, 1162 "Frames dropped due to address filtering"); 1163 1164 /* Tx statistics. */ 1165 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 1166 NULL, "Tx MAC statistics"); 1167 child = SYSCTL_CHILDREN(tree); 1168 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 1169 &stats->tx_frames, "Good frames"); 1170 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 1171 &stats->tx_bcast_frames, "Good broadcast frames"); 1172 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 1173 &stats->tx_mcast_frames, "Good multicast frames"); 1174 ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 1175 &stats->tx_pause_frames, "Pause control frames"); 1176 ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames", 1177 &stats->tx_control_frames, "Control frames"); 1178 ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_defers", 1179 &stats->tx_excess_defer, "Frames with excessive derferrals"); 1180 ALC_SYSCTL_STAT_ADD32(ctx, child, "defers", 1181 &stats->tx_excess_defer, "Frames with derferrals"); 1182 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 1183 &stats->tx_bytes, "Good octets"); 1184 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets", 1185 &stats->tx_bcast_bytes, "Good broadcast octets"); 1186 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets", 1187 &stats->tx_mcast_bytes, "Good multicast octets"); 1188 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 1189 &stats->tx_pkts_64, "64 bytes frames"); 1190 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 1191 &stats->tx_pkts_65_127, "65 to 127 bytes frames"); 1192 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 1193 &stats->tx_pkts_128_255, "128 to 255 bytes frames"); 1194 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 1195 &stats->tx_pkts_256_511, "256 to 511 bytes frames"); 1196 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 1197 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames"); 1198 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 1199 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames"); 1200 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 1201 &stats->tx_pkts_1519_max, "1519 to max frames"); 1202 ALC_SYSCTL_STAT_ADD32(ctx, child, "single_colls", 1203 &stats->tx_single_colls, "Single collisions"); 1204 ALC_SYSCTL_STAT_ADD32(ctx, child, "multi_colls", 1205 &stats->tx_multi_colls, "Multiple collisions"); 1206 ALC_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 1207 &stats->tx_late_colls, "Late collisions"); 1208 ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_colls", 1209 &stats->tx_excess_colls, "Excessive collisions"); 1210 ALC_SYSCTL_STAT_ADD32(ctx, child, "abort", 1211 &stats->tx_abort, "Aborted frames due to Excessive collisions"); 1212 ALC_SYSCTL_STAT_ADD32(ctx, child, "underruns", 1213 &stats->tx_underrun, "FIFO underruns"); 1214 ALC_SYSCTL_STAT_ADD32(ctx, child, "desc_underruns", 1215 &stats->tx_desc_underrun, "Descriptor write-back errors"); 1216 ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 1217 &stats->tx_lenerrs, "Frames with length mismatched"); 1218 ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs", 1219 &stats->tx_pkts_truncated, "Truncated frames due to MTU size"); 1220 } 1221 1222 #undef ALC_SYSCTL_STAT_ADD32 1223 #undef ALC_SYSCTL_STAT_ADD64 1224 1225 struct alc_dmamap_arg { 1226 bus_addr_t alc_busaddr; 1227 }; 1228 1229 static void 1230 alc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1231 { 1232 struct alc_dmamap_arg *ctx; 1233 1234 if (error != 0) 1235 return; 1236 1237 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1238 1239 ctx = (struct alc_dmamap_arg *)arg; 1240 ctx->alc_busaddr = segs[0].ds_addr; 1241 } 1242 1243 #ifdef foo 1244 /* 1245 * Normal and high Tx descriptors shares single Tx high address. 1246 * Four Rx descriptor/return rings and CMB shares the same Rx 1247 * high address. 1248 */ 1249 static int 1250 alc_check_boundary(struct alc_softc *sc) 1251 { 1252 bus_addr_t cmb_end, rx_ring_end, rr_ring_end, tx_ring_end; 1253 1254 rx_ring_end = sc->alc_rdata.alc_rx_ring_paddr + ALC_RX_RING_SZ; 1255 rr_ring_end = sc->alc_rdata.alc_rr_ring_paddr + ALC_RR_RING_SZ; 1256 cmb_end = sc->alc_rdata.alc_cmb_paddr + ALC_CMB_SZ; 1257 tx_ring_end = sc->alc_rdata.alc_tx_ring_paddr + ALC_TX_RING_SZ; 1258 1259 /* 4GB boundary crossing is not allowed. */ 1260 if ((ALC_ADDR_HI(rx_ring_end) != 1261 ALC_ADDR_HI(sc->alc_rdata.alc_rx_ring_paddr)) || 1262 (ALC_ADDR_HI(rr_ring_end) != 1263 ALC_ADDR_HI(sc->alc_rdata.alc_rr_ring_paddr)) || 1264 (ALC_ADDR_HI(cmb_end) != 1265 ALC_ADDR_HI(sc->alc_rdata.alc_cmb_paddr)) || 1266 (ALC_ADDR_HI(tx_ring_end) != 1267 ALC_ADDR_HI(sc->alc_rdata.alc_tx_ring_paddr))) 1268 return (EFBIG); 1269 /* 1270 * Make sure Rx return descriptor/Rx descriptor/CMB use 1271 * the same high address. 1272 */ 1273 if ((ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(rr_ring_end)) || 1274 (ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(cmb_end))) 1275 return (EFBIG); 1276 1277 return (0); 1278 } 1279 #endif 1280 1281 static int 1282 alc_dma_alloc(struct alc_softc *sc) 1283 { 1284 struct alc_txdesc *txd; 1285 struct alc_rxdesc *rxd; 1286 struct alc_dmamap_arg ctx; 1287 int error, i; 1288 1289 /* Create parent DMA tag. */ 1290 error = bus_dma_tag_create( 1291 sc->alc_cdata.alc_parent_tag, /* parent */ 1292 1, 0, /* alignment, boundary */ 1293 BUS_SPACE_MAXADDR, /* lowaddr */ 1294 BUS_SPACE_MAXADDR, /* highaddr */ 1295 NULL, NULL, /* filter, filterarg */ 1296 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1297 0, /* nsegments */ 1298 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1299 0, /* flags */ 1300 &sc->alc_cdata.alc_parent_tag); 1301 if (error != 0) { 1302 device_printf(sc->alc_dev, 1303 "could not create parent DMA tag.\n"); 1304 goto fail; 1305 } 1306 1307 /* Create DMA tag for Tx descriptor ring. */ 1308 error = bus_dma_tag_create( 1309 sc->alc_cdata.alc_parent_tag, /* parent */ 1310 ALC_TX_RING_ALIGN, 0, /* alignment, boundary */ 1311 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1312 BUS_SPACE_MAXADDR, /* highaddr */ 1313 NULL, NULL, /* filter, filterarg */ 1314 ALC_TX_RING_SZ, /* maxsize */ 1315 1, /* nsegments */ 1316 ALC_TX_RING_SZ, /* maxsegsize */ 1317 0, /* flags */ 1318 &sc->alc_cdata.alc_tx_ring_tag); 1319 if (error != 0) { 1320 device_printf(sc->alc_dev, 1321 "could not create Tx ring DMA tag.\n"); 1322 goto fail; 1323 } 1324 1325 /* Create DMA tag for Rx free descriptor ring. */ 1326 error = bus_dma_tag_create( 1327 sc->alc_cdata.alc_parent_tag, /* parent */ 1328 ALC_RX_RING_ALIGN, 0, /* alignment, boundary */ 1329 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1330 BUS_SPACE_MAXADDR, /* highaddr */ 1331 NULL, NULL, /* filter, filterarg */ 1332 ALC_RX_RING_SZ, /* maxsize */ 1333 1, /* nsegments */ 1334 ALC_RX_RING_SZ, /* maxsegsize */ 1335 0, /* flags */ 1336 &sc->alc_cdata.alc_rx_ring_tag); 1337 if (error != 0) { 1338 device_printf(sc->alc_dev, 1339 "could not create Rx ring DMA tag.\n"); 1340 goto fail; 1341 } 1342 /* Create DMA tag for Rx return descriptor ring. */ 1343 error = bus_dma_tag_create( 1344 sc->alc_cdata.alc_parent_tag, /* parent */ 1345 ALC_RR_RING_ALIGN, 0, /* alignment, boundary */ 1346 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1347 BUS_SPACE_MAXADDR, /* highaddr */ 1348 NULL, NULL, /* filter, filterarg */ 1349 ALC_RR_RING_SZ, /* maxsize */ 1350 1, /* nsegments */ 1351 ALC_RR_RING_SZ, /* maxsegsize */ 1352 0, /* flags */ 1353 &sc->alc_cdata.alc_rr_ring_tag); 1354 if (error != 0) { 1355 device_printf(sc->alc_dev, 1356 "could not create Rx return ring DMA tag.\n"); 1357 goto fail; 1358 } 1359 1360 /* Create DMA tag for coalescing message block. */ 1361 error = bus_dma_tag_create( 1362 sc->alc_cdata.alc_parent_tag, /* parent */ 1363 ALC_CMB_ALIGN, 0, /* alignment, boundary */ 1364 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1365 BUS_SPACE_MAXADDR, /* highaddr */ 1366 NULL, NULL, /* filter, filterarg */ 1367 ALC_CMB_SZ, /* maxsize */ 1368 1, /* nsegments */ 1369 ALC_CMB_SZ, /* maxsegsize */ 1370 0, /* flags */ 1371 &sc->alc_cdata.alc_cmb_tag); 1372 if (error != 0) { 1373 device_printf(sc->alc_dev, 1374 "could not create CMB DMA tag.\n"); 1375 goto fail; 1376 } 1377 /* Create DMA tag for status message block. */ 1378 error = bus_dma_tag_create( 1379 sc->alc_cdata.alc_parent_tag, /* parent */ 1380 ALC_SMB_ALIGN, 0, /* alignment, boundary */ 1381 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1382 BUS_SPACE_MAXADDR, /* highaddr */ 1383 NULL, NULL, /* filter, filterarg */ 1384 ALC_SMB_SZ, /* maxsize */ 1385 1, /* nsegments */ 1386 ALC_SMB_SZ, /* maxsegsize */ 1387 0, /* flags */ 1388 &sc->alc_cdata.alc_smb_tag); 1389 if (error != 0) { 1390 device_printf(sc->alc_dev, 1391 "could not create SMB DMA tag.\n"); 1392 goto fail; 1393 } 1394 1395 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1396 error = bus_dmamem_alloc(sc->alc_cdata.alc_tx_ring_tag, 1397 (void **)&sc->alc_rdata.alc_tx_ring, 1398 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1399 &sc->alc_cdata.alc_tx_ring_map); 1400 if (error != 0) { 1401 device_printf(sc->alc_dev, 1402 "could not allocate DMA'able memory for Tx ring.\n"); 1403 goto fail; 1404 } 1405 ctx.alc_busaddr = 0; 1406 error = bus_dmamap_load(sc->alc_cdata.alc_tx_ring_tag, 1407 sc->alc_cdata.alc_tx_ring_map, sc->alc_rdata.alc_tx_ring, 1408 ALC_TX_RING_SZ, alc_dmamap_cb, &ctx, 0); 1409 if (error != 0 || ctx.alc_busaddr == 0) { 1410 device_printf(sc->alc_dev, 1411 "could not load DMA'able memory for Tx ring.\n"); 1412 goto fail; 1413 } 1414 sc->alc_rdata.alc_tx_ring_paddr = ctx.alc_busaddr; 1415 1416 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1417 error = bus_dmamem_alloc(sc->alc_cdata.alc_rx_ring_tag, 1418 (void **)&sc->alc_rdata.alc_rx_ring, 1419 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1420 &sc->alc_cdata.alc_rx_ring_map); 1421 if (error != 0) { 1422 device_printf(sc->alc_dev, 1423 "could not allocate DMA'able memory for Rx ring.\n"); 1424 goto fail; 1425 } 1426 ctx.alc_busaddr = 0; 1427 error = bus_dmamap_load(sc->alc_cdata.alc_rx_ring_tag, 1428 sc->alc_cdata.alc_rx_ring_map, sc->alc_rdata.alc_rx_ring, 1429 ALC_RX_RING_SZ, alc_dmamap_cb, &ctx, 0); 1430 if (error != 0 || ctx.alc_busaddr == 0) { 1431 device_printf(sc->alc_dev, 1432 "could not load DMA'able memory for Rx ring.\n"); 1433 goto fail; 1434 } 1435 sc->alc_rdata.alc_rx_ring_paddr = ctx.alc_busaddr; 1436 1437 /* Allocate DMA'able memory and load the DMA map for Rx return ring. */ 1438 error = bus_dmamem_alloc(sc->alc_cdata.alc_rr_ring_tag, 1439 (void **)&sc->alc_rdata.alc_rr_ring, 1440 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1441 &sc->alc_cdata.alc_rr_ring_map); 1442 if (error != 0) { 1443 device_printf(sc->alc_dev, 1444 "could not allocate DMA'able memory for Rx return ring.\n"); 1445 goto fail; 1446 } 1447 ctx.alc_busaddr = 0; 1448 error = bus_dmamap_load(sc->alc_cdata.alc_rr_ring_tag, 1449 sc->alc_cdata.alc_rr_ring_map, sc->alc_rdata.alc_rr_ring, 1450 ALC_RR_RING_SZ, alc_dmamap_cb, &ctx, 0); 1451 if (error != 0 || ctx.alc_busaddr == 0) { 1452 device_printf(sc->alc_dev, 1453 "could not load DMA'able memory for Tx ring.\n"); 1454 goto fail; 1455 } 1456 sc->alc_rdata.alc_rr_ring_paddr = ctx.alc_busaddr; 1457 1458 /* Allocate DMA'able memory and load the DMA map for CMB. */ 1459 error = bus_dmamem_alloc(sc->alc_cdata.alc_cmb_tag, 1460 (void **)&sc->alc_rdata.alc_cmb, 1461 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1462 &sc->alc_cdata.alc_cmb_map); 1463 if (error != 0) { 1464 device_printf(sc->alc_dev, 1465 "could not allocate DMA'able memory for CMB.\n"); 1466 goto fail; 1467 } 1468 ctx.alc_busaddr = 0; 1469 error = bus_dmamap_load(sc->alc_cdata.alc_cmb_tag, 1470 sc->alc_cdata.alc_cmb_map, sc->alc_rdata.alc_cmb, 1471 ALC_CMB_SZ, alc_dmamap_cb, &ctx, 0); 1472 if (error != 0 || ctx.alc_busaddr == 0) { 1473 device_printf(sc->alc_dev, 1474 "could not load DMA'able memory for CMB.\n"); 1475 goto fail; 1476 } 1477 sc->alc_rdata.alc_cmb_paddr = ctx.alc_busaddr; 1478 1479 /* Allocate DMA'able memory and load the DMA map for SMB. */ 1480 error = bus_dmamem_alloc(sc->alc_cdata.alc_smb_tag, 1481 (void **)&sc->alc_rdata.alc_smb, 1482 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1483 &sc->alc_cdata.alc_smb_map); 1484 if (error != 0) { 1485 device_printf(sc->alc_dev, 1486 "could not allocate DMA'able memory for SMB.\n"); 1487 goto fail; 1488 } 1489 ctx.alc_busaddr = 0; 1490 error = bus_dmamap_load(sc->alc_cdata.alc_smb_tag, 1491 sc->alc_cdata.alc_smb_map, sc->alc_rdata.alc_smb, 1492 ALC_SMB_SZ, alc_dmamap_cb, &ctx, 0); 1493 if (error != 0 || ctx.alc_busaddr == 0) { 1494 device_printf(sc->alc_dev, 1495 "could not load DMA'able memory for CMB.\n"); 1496 goto fail; 1497 } 1498 sc->alc_rdata.alc_smb_paddr = ctx.alc_busaddr; 1499 1500 #ifdef foo 1501 /* 1502 * All of the status blocks and descriptor rings are 1503 * allocated at lower 4GB, their addresses high 32bits 1504 * part are same (all 0). 1505 */ 1506 1507 /* Make sure we've not crossed 4GB boundary. */ 1508 if ((error = alc_check_boundary(sc)) != 0) { 1509 device_printf(sc->alc_dev, "4GB boundary crossed, " 1510 "switching to 32bit DMA addressing mode.\n"); 1511 alc_dma_free(sc); 1512 /* 1513 * Limit max allowable DMA address space to 32bit 1514 * and try again. 1515 */ 1516 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1517 goto again; 1518 } 1519 #endif 1520 1521 /* 1522 * Create Tx buffer parent tag. 1523 * AR813x/AR815x allows 64bit DMA addressing of Tx/Rx buffers 1524 * so it needs separate parent DMA tag as parent DMA address 1525 * space could be restricted to be within 32bit address space 1526 * by 4GB boundary crossing. 1527 */ 1528 error = bus_dma_tag_create( 1529 sc->alc_cdata.alc_parent_tag, /* parent */ 1530 1, 0, /* alignment, boundary */ 1531 BUS_SPACE_MAXADDR, /* lowaddr */ 1532 BUS_SPACE_MAXADDR, /* highaddr */ 1533 NULL, NULL, /* filter, filterarg */ 1534 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1535 0, /* nsegments */ 1536 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1537 0, /* flags */ 1538 &sc->alc_cdata.alc_buffer_tag); 1539 if (error != 0) { 1540 device_printf(sc->alc_dev, 1541 "could not create parent buffer DMA tag.\n"); 1542 goto fail; 1543 } 1544 1545 /* Create DMA tag for Tx buffers. */ 1546 error = bus_dma_tag_create( 1547 sc->alc_cdata.alc_buffer_tag, /* parent */ 1548 1, 0, /* alignment, boundary */ 1549 BUS_SPACE_MAXADDR, /* lowaddr */ 1550 BUS_SPACE_MAXADDR, /* highaddr */ 1551 NULL, NULL, /* filter, filterarg */ 1552 ALC_TSO_MAXSIZE, /* maxsize */ 1553 ALC_MAXTXSEGS, /* nsegments */ 1554 ALC_TSO_MAXSEGSIZE, /* maxsegsize */ 1555 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, /* flags */ 1556 &sc->alc_cdata.alc_tx_tag); 1557 if (error != 0) { 1558 device_printf(sc->alc_dev, "could not create Tx DMA tag.\n"); 1559 goto fail; 1560 } 1561 1562 /* Create DMA tag for Rx buffers. */ 1563 error = bus_dma_tag_create( 1564 sc->alc_cdata.alc_buffer_tag, /* parent */ 1565 ALC_RX_BUF_ALIGN, 0, /* alignment, boundary */ 1566 BUS_SPACE_MAXADDR, /* lowaddr */ 1567 BUS_SPACE_MAXADDR, /* highaddr */ 1568 NULL, NULL, /* filter, filterarg */ 1569 MCLBYTES, /* maxsize */ 1570 1, /* nsegments */ 1571 MCLBYTES, /* maxsegsize */ 1572 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED, /* flags */ 1573 &sc->alc_cdata.alc_rx_tag); 1574 if (error != 0) { 1575 device_printf(sc->alc_dev, "could not create Rx DMA tag.\n"); 1576 goto fail; 1577 } 1578 /* Create DMA maps for Tx buffers. */ 1579 for (i = 0; i < ALC_TX_RING_CNT; i++) { 1580 txd = &sc->alc_cdata.alc_txdesc[i]; 1581 txd->tx_m = NULL; 1582 txd->tx_dmamap = NULL; 1583 error = bus_dmamap_create(sc->alc_cdata.alc_tx_tag, 1584 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1585 &txd->tx_dmamap); 1586 if (error != 0) { 1587 device_printf(sc->alc_dev, 1588 "could not create Tx dmamap.\n"); 1589 goto fail; 1590 } 1591 } 1592 /* Create DMA maps for Rx buffers. */ 1593 error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 1594 BUS_DMA_WAITOK, 1595 &sc->alc_cdata.alc_rx_sparemap); 1596 if (error) { 1597 device_printf(sc->alc_dev, 1598 "could not create spare Rx dmamap.\n"); 1599 goto fail; 1600 } 1601 for (i = 0; i < ALC_RX_RING_CNT; i++) { 1602 rxd = &sc->alc_cdata.alc_rxdesc[i]; 1603 rxd->rx_m = NULL; 1604 rxd->rx_dmamap = NULL; 1605 error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 1606 BUS_DMA_WAITOK, 1607 &rxd->rx_dmamap); 1608 if (error != 0) { 1609 device_printf(sc->alc_dev, 1610 "could not create Rx dmamap.\n"); 1611 goto fail; 1612 } 1613 } 1614 1615 fail: 1616 return (error); 1617 } 1618 1619 static void 1620 alc_dma_free(struct alc_softc *sc) 1621 { 1622 struct alc_txdesc *txd; 1623 struct alc_rxdesc *rxd; 1624 int i; 1625 1626 /* Tx buffers. */ 1627 if (sc->alc_cdata.alc_tx_tag != NULL) { 1628 for (i = 0; i < ALC_TX_RING_CNT; i++) { 1629 txd = &sc->alc_cdata.alc_txdesc[i]; 1630 if (txd->tx_dmamap != NULL) { 1631 bus_dmamap_destroy(sc->alc_cdata.alc_tx_tag, 1632 txd->tx_dmamap); 1633 txd->tx_dmamap = NULL; 1634 } 1635 } 1636 bus_dma_tag_destroy(sc->alc_cdata.alc_tx_tag); 1637 sc->alc_cdata.alc_tx_tag = NULL; 1638 } 1639 /* Rx buffers */ 1640 if (sc->alc_cdata.alc_rx_tag != NULL) { 1641 for (i = 0; i < ALC_RX_RING_CNT; i++) { 1642 rxd = &sc->alc_cdata.alc_rxdesc[i]; 1643 if (rxd->rx_dmamap != NULL) { 1644 bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag, 1645 rxd->rx_dmamap); 1646 rxd->rx_dmamap = NULL; 1647 } 1648 } 1649 if (sc->alc_cdata.alc_rx_sparemap != NULL) { 1650 bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag, 1651 sc->alc_cdata.alc_rx_sparemap); 1652 sc->alc_cdata.alc_rx_sparemap = NULL; 1653 } 1654 bus_dma_tag_destroy(sc->alc_cdata.alc_rx_tag); 1655 sc->alc_cdata.alc_rx_tag = NULL; 1656 } 1657 /* Tx descriptor ring. */ 1658 if (sc->alc_cdata.alc_tx_ring_tag != NULL) { 1659 if (sc->alc_cdata.alc_tx_ring_map != NULL) 1660 bus_dmamap_unload(sc->alc_cdata.alc_tx_ring_tag, 1661 sc->alc_cdata.alc_tx_ring_map); 1662 if (sc->alc_cdata.alc_tx_ring_map != NULL && 1663 sc->alc_rdata.alc_tx_ring != NULL) 1664 bus_dmamem_free(sc->alc_cdata.alc_tx_ring_tag, 1665 sc->alc_rdata.alc_tx_ring, 1666 sc->alc_cdata.alc_tx_ring_map); 1667 sc->alc_rdata.alc_tx_ring = NULL; 1668 sc->alc_cdata.alc_tx_ring_map = NULL; 1669 bus_dma_tag_destroy(sc->alc_cdata.alc_tx_ring_tag); 1670 sc->alc_cdata.alc_tx_ring_tag = NULL; 1671 } 1672 /* Rx ring. */ 1673 if (sc->alc_cdata.alc_rx_ring_tag != NULL) { 1674 if (sc->alc_cdata.alc_rx_ring_map != NULL) 1675 bus_dmamap_unload(sc->alc_cdata.alc_rx_ring_tag, 1676 sc->alc_cdata.alc_rx_ring_map); 1677 if (sc->alc_cdata.alc_rx_ring_map != NULL && 1678 sc->alc_rdata.alc_rx_ring != NULL) 1679 bus_dmamem_free(sc->alc_cdata.alc_rx_ring_tag, 1680 sc->alc_rdata.alc_rx_ring, 1681 sc->alc_cdata.alc_rx_ring_map); 1682 sc->alc_rdata.alc_rx_ring = NULL; 1683 sc->alc_cdata.alc_rx_ring_map = NULL; 1684 bus_dma_tag_destroy(sc->alc_cdata.alc_rx_ring_tag); 1685 sc->alc_cdata.alc_rx_ring_tag = NULL; 1686 } 1687 /* Rx return ring. */ 1688 if (sc->alc_cdata.alc_rr_ring_tag != NULL) { 1689 if (sc->alc_cdata.alc_rr_ring_map != NULL) 1690 bus_dmamap_unload(sc->alc_cdata.alc_rr_ring_tag, 1691 sc->alc_cdata.alc_rr_ring_map); 1692 if (sc->alc_cdata.alc_rr_ring_map != NULL && 1693 sc->alc_rdata.alc_rr_ring != NULL) 1694 bus_dmamem_free(sc->alc_cdata.alc_rr_ring_tag, 1695 sc->alc_rdata.alc_rr_ring, 1696 sc->alc_cdata.alc_rr_ring_map); 1697 sc->alc_rdata.alc_rr_ring = NULL; 1698 sc->alc_cdata.alc_rr_ring_map = NULL; 1699 bus_dma_tag_destroy(sc->alc_cdata.alc_rr_ring_tag); 1700 sc->alc_cdata.alc_rr_ring_tag = NULL; 1701 } 1702 /* CMB block */ 1703 if (sc->alc_cdata.alc_cmb_tag != NULL) { 1704 if (sc->alc_cdata.alc_cmb_map != NULL) 1705 bus_dmamap_unload(sc->alc_cdata.alc_cmb_tag, 1706 sc->alc_cdata.alc_cmb_map); 1707 if (sc->alc_cdata.alc_cmb_map != NULL && 1708 sc->alc_rdata.alc_cmb != NULL) 1709 bus_dmamem_free(sc->alc_cdata.alc_cmb_tag, 1710 sc->alc_rdata.alc_cmb, 1711 sc->alc_cdata.alc_cmb_map); 1712 sc->alc_rdata.alc_cmb = NULL; 1713 sc->alc_cdata.alc_cmb_map = NULL; 1714 bus_dma_tag_destroy(sc->alc_cdata.alc_cmb_tag); 1715 sc->alc_cdata.alc_cmb_tag = NULL; 1716 } 1717 /* SMB block */ 1718 if (sc->alc_cdata.alc_smb_tag != NULL) { 1719 if (sc->alc_cdata.alc_smb_map != NULL) 1720 bus_dmamap_unload(sc->alc_cdata.alc_smb_tag, 1721 sc->alc_cdata.alc_smb_map); 1722 if (sc->alc_cdata.alc_smb_map != NULL && 1723 sc->alc_rdata.alc_smb != NULL) 1724 bus_dmamem_free(sc->alc_cdata.alc_smb_tag, 1725 sc->alc_rdata.alc_smb, 1726 sc->alc_cdata.alc_smb_map); 1727 sc->alc_rdata.alc_smb = NULL; 1728 sc->alc_cdata.alc_smb_map = NULL; 1729 bus_dma_tag_destroy(sc->alc_cdata.alc_smb_tag); 1730 sc->alc_cdata.alc_smb_tag = NULL; 1731 } 1732 if (sc->alc_cdata.alc_buffer_tag != NULL) { 1733 bus_dma_tag_destroy(sc->alc_cdata.alc_buffer_tag); 1734 sc->alc_cdata.alc_buffer_tag = NULL; 1735 } 1736 if (sc->alc_cdata.alc_parent_tag != NULL) { 1737 bus_dma_tag_destroy(sc->alc_cdata.alc_parent_tag); 1738 sc->alc_cdata.alc_parent_tag = NULL; 1739 } 1740 } 1741 1742 static int 1743 alc_shutdown(device_t dev) 1744 { 1745 1746 return (alc_suspend(dev)); 1747 } 1748 1749 #if 0 1750 /* XXX: LINK SPEED */ 1751 /* 1752 * Note, this driver resets the link speed to 10/100Mbps by 1753 * restarting auto-negotiation in suspend/shutdown phase but we 1754 * don't know whether that auto-negotiation would succeed or not 1755 * as driver has no control after powering off/suspend operation. 1756 * If the renegotiation fail WOL may not work. Running at 1Gbps 1757 * will draw more power than 375mA at 3.3V which is specified in 1758 * PCI specification and that would result in complete 1759 * shutdowning power to ethernet controller. 1760 * 1761 * TODO 1762 * Save current negotiated media speed/duplex/flow-control to 1763 * softc and restore the same link again after resuming. PHY 1764 * handling such as power down/resetting to 100Mbps may be better 1765 * handled in suspend method in phy driver. 1766 */ 1767 static void 1768 alc_setlinkspeed(struct alc_softc *sc) 1769 { 1770 struct mii_data *mii; 1771 int aneg, i; 1772 1773 mii = device_get_softc(sc->alc_miibus); 1774 mii_pollstat(mii); 1775 aneg = 0; 1776 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 1777 (IFM_ACTIVE | IFM_AVALID)) { 1778 switch IFM_SUBTYPE(mii->mii_media_active) { 1779 case IFM_10_T: 1780 case IFM_100_TX: 1781 return; 1782 case IFM_1000_T: 1783 aneg++; 1784 break; 1785 default: 1786 break; 1787 } 1788 } 1789 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, MII_100T2CR, 0); 1790 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 1791 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 1792 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 1793 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); 1794 DELAY(1000); 1795 if (aneg != 0) { 1796 /* 1797 * Poll link state until alc(4) get a 10/100Mbps link. 1798 */ 1799 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 1800 mii_pollstat(mii); 1801 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) 1802 == (IFM_ACTIVE | IFM_AVALID)) { 1803 switch (IFM_SUBTYPE( 1804 mii->mii_media_active)) { 1805 case IFM_10_T: 1806 case IFM_100_TX: 1807 alc_mac_config(sc); 1808 return; 1809 default: 1810 break; 1811 } 1812 } 1813 ALC_UNLOCK(sc); 1814 pause("alclnk", hz); 1815 ALC_LOCK(sc); 1816 } 1817 if (i == MII_ANEGTICKS_GIGE) 1818 device_printf(sc->alc_dev, 1819 "establishing a link failed, WOL may not work!"); 1820 } 1821 /* 1822 * No link, force MAC to have 100Mbps, full-duplex link. 1823 * This is the last resort and may/may not work. 1824 */ 1825 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1826 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1827 alc_mac_config(sc); 1828 } 1829 #endif 1830 1831 #if 0 1832 /* XXX: WOL */ 1833 static void 1834 alc_setwol(struct alc_softc *sc) 1835 { 1836 struct ifnet *ifp; 1837 uint32_t reg, pmcs; 1838 uint16_t pmstat; 1839 1840 ALC_LOCK_ASSERT(sc); 1841 1842 alc_disable_l0s_l1(sc); 1843 ifp = sc->alc_ifp; 1844 if ((sc->alc_flags & ALC_FLAG_PM) == 0) { 1845 /* Disable WOL. */ 1846 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 1847 reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC); 1848 reg |= PCIE_PHYMISC_FORCE_RCV_DET; 1849 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg); 1850 /* Force PHY power down. */ 1851 alc_phy_down(sc); 1852 CSR_WRITE_4(sc, ALC_MASTER_CFG, 1853 CSR_READ_4(sc, ALC_MASTER_CFG) | MASTER_CLK_SEL_DIS); 1854 return; 1855 } 1856 1857 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 1858 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) 1859 alc_setlinkspeed(sc); 1860 CSR_WRITE_4(sc, ALC_MASTER_CFG, 1861 CSR_READ_4(sc, ALC_MASTER_CFG) & ~MASTER_CLK_SEL_DIS); 1862 } 1863 1864 pmcs = 0; 1865 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 1866 pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB; 1867 CSR_WRITE_4(sc, ALC_WOL_CFG, pmcs); 1868 reg = CSR_READ_4(sc, ALC_MAC_CFG); 1869 reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI | 1870 MAC_CFG_BCAST); 1871 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 1872 reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST; 1873 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1874 reg |= MAC_CFG_RX_ENB; 1875 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 1876 1877 reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC); 1878 reg |= PCIE_PHYMISC_FORCE_RCV_DET; 1879 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg); 1880 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 1881 /* WOL disabled, PHY power down. */ 1882 alc_phy_down(sc); 1883 CSR_WRITE_4(sc, ALC_MASTER_CFG, 1884 CSR_READ_4(sc, ALC_MASTER_CFG) | MASTER_CLK_SEL_DIS); 1885 1886 } 1887 /* Request PME. */ 1888 pmstat = pci_read_config(sc->alc_dev, 1889 sc->alc_pmcap + PCIR_POWER_STATUS, 2); 1890 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1891 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1892 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1893 pci_write_config(sc->alc_dev, 1894 sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2); 1895 } 1896 #endif 1897 1898 static int 1899 alc_suspend(device_t dev) 1900 { 1901 struct alc_softc *sc = device_get_softc(dev); 1902 struct ifnet *ifp = &sc->arpcom.ac_if; 1903 1904 lwkt_serialize_enter(ifp->if_serializer); 1905 alc_stop(sc); 1906 #if 0 1907 /* XXX: WOL */ 1908 alc_setwol(sc); 1909 #endif 1910 lwkt_serialize_exit(ifp->if_serializer); 1911 1912 return (0); 1913 } 1914 1915 static int 1916 alc_resume(device_t dev) 1917 { 1918 struct alc_softc *sc = device_get_softc(dev); 1919 struct ifnet *ifp = &sc->arpcom.ac_if; 1920 uint16_t pmstat; 1921 1922 lwkt_serialize_enter(ifp->if_serializer); 1923 1924 if ((sc->alc_flags & ALC_FLAG_PM) != 0) { 1925 /* Disable PME and clear PME status. */ 1926 pmstat = pci_read_config(sc->alc_dev, 1927 sc->alc_pmcap + PCIR_POWER_STATUS, 2); 1928 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 1929 pmstat &= ~PCIM_PSTAT_PMEENABLE; 1930 pci_write_config(sc->alc_dev, 1931 sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2); 1932 } 1933 } 1934 1935 /* Reset PHY. */ 1936 alc_phy_reset(sc); 1937 if (ifp->if_flags & IFF_UP) 1938 alc_init(sc); 1939 1940 lwkt_serialize_exit(ifp->if_serializer); 1941 1942 return (0); 1943 } 1944 1945 static int 1946 alc_encap(struct alc_softc *sc, struct mbuf **m_head) 1947 { 1948 struct alc_txdesc *txd, *txd_last; 1949 struct tx_desc *desc; 1950 struct mbuf *m; 1951 #if 0 /* XXX: TSO */ 1952 struct ip *ip; 1953 #endif 1954 struct tcphdr *tcp; 1955 bus_dma_segment_t txsegs[ALC_MAXTXSEGS]; 1956 bus_dmamap_t map; 1957 uint32_t cflags, hdrlen, ip_off, poff, vtag; 1958 int error, idx, nsegs, prod; 1959 1960 M_ASSERTPKTHDR((*m_head)); 1961 1962 m = *m_head; 1963 tcp = NULL; 1964 ip_off = poff = 0; 1965 #if 0 /* XXX: TSO */ 1966 ip = NULL; 1967 1968 if ((m->m_pkthdr.csum_flags & (ALC_CSUM_FEATURES | CSUM_TSO)) != 0) { 1969 /* 1970 * AR813x/AR815x requires offset of TCP/UDP header in its 1971 * Tx descriptor to perform Tx checksum offloading. TSO 1972 * also requires TCP header offset and modification of 1973 * IP/TCP header. This kind of operation takes many CPU 1974 * cycles on FreeBSD so fast host CPU is required to get 1975 * smooth TSO performance. 1976 */ 1977 struct ether_header *eh; 1978 1979 if (M_WRITABLE(m) == 0) { 1980 /* Get a writable copy. */ 1981 m = m_dup(*m_head, MB_DONTWAIT); 1982 /* Release original mbufs. */ 1983 m_freem(*m_head); 1984 if (m == NULL) { 1985 *m_head = NULL; 1986 return (ENOBUFS); 1987 } 1988 *m_head = m; 1989 } 1990 1991 ip_off = sizeof(struct ether_header); 1992 m = m_pullup(m, ip_off + sizeof(struct ip)); 1993 if (m == NULL) { 1994 *m_head = NULL; 1995 return (ENOBUFS); 1996 } 1997 eh = mtod(m, struct ether_header *); 1998 /* 1999 * Check if hardware VLAN insertion is off. 2000 * Additional check for LLC/SNAP frame? 2001 */ 2002 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2003 ip_off = sizeof(struct ether_vlan_header); 2004 m = m_pullup(m, ip_off); 2005 if (m == NULL) { 2006 *m_head = NULL; 2007 return (ENOBUFS); 2008 } 2009 } 2010 m = m_pullup(m, ip_off + sizeof(struct ip)); 2011 if (m == NULL) { 2012 *m_head = NULL; 2013 return (ENOBUFS); 2014 } 2015 ip = (struct ip *)(mtod(m, char *) + ip_off); 2016 poff = ip_off + (ip->ip_hl << 2); 2017 2018 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2019 m = m_pullup(m, poff + sizeof(struct tcphdr)); 2020 if (m == NULL) { 2021 *m_head = NULL; 2022 return (ENOBUFS); 2023 } 2024 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 2025 m = m_pullup(m, poff + (tcp->th_off << 2)); 2026 if (m == NULL) { 2027 *m_head = NULL; 2028 return (ENOBUFS); 2029 } 2030 /* 2031 * Due to strict adherence of Microsoft NDIS 2032 * Large Send specification, hardware expects 2033 * a pseudo TCP checksum inserted by upper 2034 * stack. Unfortunately the pseudo TCP 2035 * checksum that NDIS refers to does not include 2036 * TCP payload length so driver should recompute 2037 * the pseudo checksum here. Hopefully this 2038 * wouldn't be much burden on modern CPUs. 2039 * 2040 * Reset IP checksum and recompute TCP pseudo 2041 * checksum as NDIS specification said. 2042 */ 2043 ip->ip_sum = 0; 2044 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, 2045 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 2046 } 2047 *m_head = m; 2048 } 2049 #endif /* TSO */ 2050 2051 prod = sc->alc_cdata.alc_tx_prod; 2052 txd = &sc->alc_cdata.alc_txdesc[prod]; 2053 txd_last = txd; 2054 map = txd->tx_dmamap; 2055 2056 error = bus_dmamap_load_mbuf_defrag( 2057 sc->alc_cdata.alc_tx_tag, map, m_head, 2058 txsegs, ALC_MAXTXSEGS, &nsegs, BUS_DMA_NOWAIT); 2059 if (error) { 2060 m_freem(*m_head); 2061 *m_head = NULL; 2062 return (error); 2063 } 2064 if (nsegs == 0) { 2065 m_freem(*m_head); 2066 *m_head = NULL; 2067 return (EIO); 2068 } 2069 2070 /* Check descriptor overrun. */ 2071 if (sc->alc_cdata.alc_tx_cnt + nsegs >= ALC_TX_RING_CNT - 3) { 2072 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, map); 2073 return (ENOBUFS); 2074 } 2075 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, map, BUS_DMASYNC_PREWRITE); 2076 2077 m = *m_head; 2078 cflags = TD_ETHERNET; 2079 vtag = 0; 2080 desc = NULL; 2081 idx = 0; 2082 /* Configure VLAN hardware tag insertion. */ 2083 if ((m->m_flags & M_VLANTAG) != 0) { 2084 vtag = htons(m->m_pkthdr.ether_vlantag); 2085 vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK; 2086 cflags |= TD_INS_VLAN_TAG; 2087 } 2088 /* Configure Tx checksum offload. */ 2089 if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) { 2090 #ifdef ALC_USE_CUSTOM_CSUM 2091 cflags |= TD_CUSTOM_CSUM; 2092 /* Set checksum start offset. */ 2093 cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) & 2094 TD_PLOAD_OFFSET_MASK; 2095 /* Set checksum insertion position of TCP/UDP. */ 2096 cflags |= (((poff + m->m_pkthdr.csum_data) >> 1) << 2097 TD_CUSTOM_CSUM_OFFSET_SHIFT) & TD_CUSTOM_CSUM_OFFSET_MASK; 2098 #else 2099 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 2100 cflags |= TD_IPCSUM; 2101 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 2102 cflags |= TD_TCPCSUM; 2103 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2104 cflags |= TD_UDPCSUM; 2105 /* Set TCP/UDP header offset. */ 2106 cflags |= (poff << TD_L4HDR_OFFSET_SHIFT) & 2107 TD_L4HDR_OFFSET_MASK; 2108 #endif 2109 } else if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2110 /* Request TSO and set MSS. */ 2111 cflags |= TD_TSO | TD_TSO_DESCV1; 2112 #if 0 2113 /* XXX: TSO */ 2114 cflags |= ((uint32_t)m->m_pkthdr.tso_segsz << TD_MSS_SHIFT) & 2115 TD_MSS_MASK; 2116 /* Set TCP header offset. */ 2117 #endif 2118 cflags |= (poff << TD_TCPHDR_OFFSET_SHIFT) & 2119 TD_TCPHDR_OFFSET_MASK; 2120 /* 2121 * AR813x/AR815x requires the first buffer should 2122 * only hold IP/TCP header data. Payload should 2123 * be handled in other descriptors. 2124 */ 2125 hdrlen = poff + (tcp->th_off << 2); 2126 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2127 desc->len = htole32(TX_BYTES(hdrlen | vtag)); 2128 desc->flags = htole32(cflags); 2129 desc->addr = htole64(txsegs[0].ds_addr); 2130 sc->alc_cdata.alc_tx_cnt++; 2131 ALC_DESC_INC(prod, ALC_TX_RING_CNT); 2132 if (m->m_len - hdrlen > 0) { 2133 /* Handle remaining payload of the first fragment. */ 2134 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2135 desc->len = htole32(TX_BYTES((m->m_len - hdrlen) | 2136 vtag)); 2137 desc->flags = htole32(cflags); 2138 desc->addr = htole64(txsegs[0].ds_addr + hdrlen); 2139 sc->alc_cdata.alc_tx_cnt++; 2140 ALC_DESC_INC(prod, ALC_TX_RING_CNT); 2141 } 2142 /* Handle remaining fragments. */ 2143 idx = 1; 2144 } 2145 for (; idx < nsegs; idx++) { 2146 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2147 desc->len = htole32(TX_BYTES(txsegs[idx].ds_len) | vtag); 2148 desc->flags = htole32(cflags); 2149 desc->addr = htole64(txsegs[idx].ds_addr); 2150 sc->alc_cdata.alc_tx_cnt++; 2151 ALC_DESC_INC(prod, ALC_TX_RING_CNT); 2152 } 2153 /* Update producer index. */ 2154 sc->alc_cdata.alc_tx_prod = prod; 2155 2156 /* Finally set EOP on the last descriptor. */ 2157 prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT; 2158 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2159 desc->flags |= htole32(TD_EOP); 2160 2161 /* Swap dmamap of the first and the last. */ 2162 txd = &sc->alc_cdata.alc_txdesc[prod]; 2163 map = txd_last->tx_dmamap; 2164 txd_last->tx_dmamap = txd->tx_dmamap; 2165 txd->tx_dmamap = map; 2166 txd->tx_m = m; 2167 2168 return (0); 2169 } 2170 2171 static void 2172 alc_start(struct ifnet *ifp) 2173 { 2174 struct alc_softc *sc = ifp->if_softc; 2175 struct mbuf *m_head; 2176 int enq; 2177 2178 ASSERT_SERIALIZED(ifp->if_serializer); 2179 2180 /* Reclaim transmitted frames. */ 2181 if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT) 2182 alc_txeof(sc); 2183 2184 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 2185 return; 2186 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) { 2187 ifq_purge(&ifp->if_snd); 2188 return; 2189 } 2190 2191 for (enq = 0; !ifq_is_empty(&ifp->if_snd); ) { 2192 m_head = ifq_dequeue(&ifp->if_snd, NULL); 2193 if (m_head == NULL) 2194 break; 2195 /* 2196 * Pack the data into the transmit ring. If we 2197 * don't have room, set the OACTIVE flag and wait 2198 * for the NIC to drain the ring. 2199 */ 2200 if (alc_encap(sc, &m_head)) { 2201 if (m_head == NULL) 2202 break; 2203 ifq_prepend(&ifp->if_snd, m_head); 2204 ifq_set_oactive(&ifp->if_snd); 2205 break; 2206 } 2207 2208 enq++; 2209 /* 2210 * If there's a BPF listener, bounce a copy of this frame 2211 * to him. 2212 */ 2213 ETHER_BPF_MTAP(ifp, m_head); 2214 } 2215 2216 if (enq > 0) { 2217 /* Sync descriptors. */ 2218 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag, 2219 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE); 2220 /* Kick. Assume we're using normal Tx priority queue. */ 2221 CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX, 2222 (sc->alc_cdata.alc_tx_prod << 2223 MBOX_TD_PROD_LO_IDX_SHIFT) & 2224 MBOX_TD_PROD_LO_IDX_MASK); 2225 /* Set a timeout in case the chip goes out to lunch. */ 2226 sc->alc_watchdog_timer = ALC_TX_TIMEOUT; 2227 } 2228 } 2229 2230 static void 2231 alc_watchdog(struct alc_softc *sc) 2232 { 2233 struct ifnet *ifp = &sc->arpcom.ac_if; 2234 2235 ASSERT_SERIALIZED(ifp->if_serializer); 2236 2237 if (sc->alc_watchdog_timer == 0 || --sc->alc_watchdog_timer) 2238 return; 2239 2240 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) { 2241 if_printf(sc->alc_ifp, "watchdog timeout (lost link)\n"); 2242 ifp->if_oerrors++; 2243 alc_init(sc); 2244 return; 2245 } 2246 if_printf(sc->alc_ifp, "watchdog timeout -- resetting\n"); 2247 ifp->if_oerrors++; 2248 alc_init(sc); 2249 if (!ifq_is_empty(&ifp->if_snd)) 2250 if_devstart(ifp); 2251 } 2252 2253 static int 2254 alc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 2255 { 2256 struct alc_softc *sc; 2257 struct ifreq *ifr; 2258 struct mii_data *mii; 2259 int error, mask; 2260 2261 ASSERT_SERIALIZED(ifp->if_serializer); 2262 2263 sc = ifp->if_softc; 2264 ifr = (struct ifreq *)data; 2265 error = 0; 2266 switch (cmd) { 2267 case SIOCSIFMTU: 2268 if (ifr->ifr_mtu < ETHERMIN || 2269 ifr->ifr_mtu > (sc->alc_ident->max_framelen - 2270 sizeof(struct ether_vlan_header) - ETHER_CRC_LEN) || 2271 ((sc->alc_flags & ALC_FLAG_JUMBO) == 0 && 2272 ifr->ifr_mtu > ETHERMTU)) { 2273 error = EINVAL; 2274 } else if (ifp->if_mtu != ifr->ifr_mtu) { 2275 ifp->if_mtu = ifr->ifr_mtu; 2276 #if 0 2277 /* AR813x/AR815x has 13 bits MSS field. */ 2278 if (ifp->if_mtu > ALC_TSO_MTU && 2279 (ifp->if_capenable & IFCAP_TSO4) != 0) { 2280 ifp->if_capenable &= ~IFCAP_TSO4; 2281 ifp->if_hwassist &= ~CSUM_TSO; 2282 } 2283 #endif 2284 } 2285 break; 2286 case SIOCSIFFLAGS: 2287 if ((ifp->if_flags & IFF_UP) != 0) { 2288 if ((ifp->if_flags & IFF_RUNNING) != 0 && 2289 ((ifp->if_flags ^ sc->alc_if_flags) & 2290 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 2291 alc_rxfilter(sc); 2292 else if ((ifp->if_flags & IFF_RUNNING) == 0) 2293 alc_init(sc); 2294 } else if ((ifp->if_flags & IFF_RUNNING) != 0) 2295 alc_stop(sc); 2296 sc->alc_if_flags = ifp->if_flags; 2297 break; 2298 case SIOCADDMULTI: 2299 case SIOCDELMULTI: 2300 if ((ifp->if_flags & IFF_RUNNING) != 0) 2301 alc_rxfilter(sc); 2302 break; 2303 case SIOCSIFMEDIA: 2304 case SIOCGIFMEDIA: 2305 mii = device_get_softc(sc->alc_miibus); 2306 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 2307 break; 2308 case SIOCSIFCAP: 2309 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2310 if ((mask & IFCAP_TXCSUM) != 0 && 2311 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 2312 ifp->if_capenable ^= IFCAP_TXCSUM; 2313 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2314 ifp->if_hwassist |= ALC_CSUM_FEATURES; 2315 else 2316 ifp->if_hwassist &= ~ALC_CSUM_FEATURES; 2317 } 2318 #if 0 2319 /* XXX: WOL */ 2320 if ((mask & IFCAP_WOL_MCAST) != 0 && 2321 (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0) 2322 ifp->if_capenable ^= IFCAP_WOL_MCAST; 2323 if ((mask & IFCAP_WOL_MAGIC) != 0 && 2324 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 2325 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2326 #endif 2327 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2328 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 2329 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2330 alc_rxvlan(sc); 2331 } 2332 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 2333 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 2334 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 2335 2336 /* 2337 * VLAN hardware tagging is required to do checksum 2338 * offload or TSO on VLAN interface. Checksum offload 2339 * on VLAN interface also requires hardware checksum 2340 * offload of parent interface. 2341 */ 2342 if ((ifp->if_capenable & IFCAP_TXCSUM) == 0) 2343 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; 2344 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 2345 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; 2346 // XXX VLAN_CAPABILITIES(ifp); 2347 break; 2348 default: 2349 error = ether_ioctl(ifp, cmd, data); 2350 break; 2351 } 2352 2353 return (error); 2354 } 2355 2356 static void 2357 alc_mac_config(struct alc_softc *sc) 2358 { 2359 struct mii_data *mii; 2360 uint32_t reg; 2361 2362 mii = device_get_softc(sc->alc_miibus); 2363 reg = CSR_READ_4(sc, ALC_MAC_CFG); 2364 reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC | 2365 MAC_CFG_SPEED_MASK); 2366 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 || 2367 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || 2368 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 2369 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW; 2370 } 2371 /* Reprogram MAC with resolved speed/duplex. */ 2372 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2373 case IFM_10_T: 2374 case IFM_100_TX: 2375 reg |= MAC_CFG_SPEED_10_100; 2376 break; 2377 case IFM_1000_T: 2378 reg |= MAC_CFG_SPEED_1000; 2379 break; 2380 } 2381 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 2382 reg |= MAC_CFG_FULL_DUPLEX; 2383 #ifdef notyet 2384 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 2385 reg |= MAC_CFG_TX_FC; 2386 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 2387 reg |= MAC_CFG_RX_FC; 2388 #endif 2389 } 2390 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 2391 } 2392 2393 static void 2394 alc_stats_clear(struct alc_softc *sc) 2395 { 2396 struct smb sb, *smb; 2397 uint32_t *reg; 2398 int i; 2399 2400 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 2401 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 2402 sc->alc_cdata.alc_smb_map, 2403 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2404 smb = sc->alc_rdata.alc_smb; 2405 /* Update done, clear. */ 2406 smb->updated = 0; 2407 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 2408 sc->alc_cdata.alc_smb_map, 2409 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2410 } else { 2411 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; 2412 reg++) { 2413 CSR_READ_4(sc, ALC_RX_MIB_BASE + i); 2414 i += sizeof(uint32_t); 2415 } 2416 /* Read Tx statistics. */ 2417 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; 2418 reg++) { 2419 CSR_READ_4(sc, ALC_TX_MIB_BASE + i); 2420 i += sizeof(uint32_t); 2421 } 2422 } 2423 } 2424 2425 static void 2426 alc_stats_update(struct alc_softc *sc) 2427 { 2428 struct alc_hw_stats *stat; 2429 struct smb sb, *smb; 2430 struct ifnet *ifp; 2431 uint32_t *reg; 2432 int i; 2433 2434 ifp = sc->alc_ifp; 2435 stat = &sc->alc_stats; 2436 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 2437 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 2438 sc->alc_cdata.alc_smb_map, 2439 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2440 smb = sc->alc_rdata.alc_smb; 2441 if (smb->updated == 0) 2442 return; 2443 } else { 2444 smb = &sb; 2445 /* Read Rx statistics. */ 2446 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; 2447 reg++) { 2448 *reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i); 2449 i += sizeof(uint32_t); 2450 } 2451 /* Read Tx statistics. */ 2452 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; 2453 reg++) { 2454 *reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i); 2455 i += sizeof(uint32_t); 2456 } 2457 } 2458 2459 /* Rx stats. */ 2460 stat->rx_frames += smb->rx_frames; 2461 stat->rx_bcast_frames += smb->rx_bcast_frames; 2462 stat->rx_mcast_frames += smb->rx_mcast_frames; 2463 stat->rx_pause_frames += smb->rx_pause_frames; 2464 stat->rx_control_frames += smb->rx_control_frames; 2465 stat->rx_crcerrs += smb->rx_crcerrs; 2466 stat->rx_lenerrs += smb->rx_lenerrs; 2467 stat->rx_bytes += smb->rx_bytes; 2468 stat->rx_runts += smb->rx_runts; 2469 stat->rx_fragments += smb->rx_fragments; 2470 stat->rx_pkts_64 += smb->rx_pkts_64; 2471 stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 2472 stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 2473 stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 2474 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 2475 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 2476 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 2477 stat->rx_pkts_truncated += smb->rx_pkts_truncated; 2478 stat->rx_fifo_oflows += smb->rx_fifo_oflows; 2479 stat->rx_rrs_errs += smb->rx_rrs_errs; 2480 stat->rx_alignerrs += smb->rx_alignerrs; 2481 stat->rx_bcast_bytes += smb->rx_bcast_bytes; 2482 stat->rx_mcast_bytes += smb->rx_mcast_bytes; 2483 stat->rx_pkts_filtered += smb->rx_pkts_filtered; 2484 2485 /* Tx stats. */ 2486 stat->tx_frames += smb->tx_frames; 2487 stat->tx_bcast_frames += smb->tx_bcast_frames; 2488 stat->tx_mcast_frames += smb->tx_mcast_frames; 2489 stat->tx_pause_frames += smb->tx_pause_frames; 2490 stat->tx_excess_defer += smb->tx_excess_defer; 2491 stat->tx_control_frames += smb->tx_control_frames; 2492 stat->tx_deferred += smb->tx_deferred; 2493 stat->tx_bytes += smb->tx_bytes; 2494 stat->tx_pkts_64 += smb->tx_pkts_64; 2495 stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 2496 stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 2497 stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 2498 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 2499 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 2500 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 2501 stat->tx_single_colls += smb->tx_single_colls; 2502 stat->tx_multi_colls += smb->tx_multi_colls; 2503 stat->tx_late_colls += smb->tx_late_colls; 2504 stat->tx_excess_colls += smb->tx_excess_colls; 2505 stat->tx_abort += smb->tx_abort; 2506 stat->tx_underrun += smb->tx_underrun; 2507 stat->tx_desc_underrun += smb->tx_desc_underrun; 2508 stat->tx_lenerrs += smb->tx_lenerrs; 2509 stat->tx_pkts_truncated += smb->tx_pkts_truncated; 2510 stat->tx_bcast_bytes += smb->tx_bcast_bytes; 2511 stat->tx_mcast_bytes += smb->tx_mcast_bytes; 2512 2513 /* Update counters in ifnet. */ 2514 ifp->if_opackets += smb->tx_frames; 2515 2516 ifp->if_collisions += smb->tx_single_colls + 2517 smb->tx_multi_colls * 2 + smb->tx_late_colls + 2518 smb->tx_abort * HDPX_CFG_RETRY_DEFAULT; 2519 2520 /* 2521 * XXX 2522 * tx_pkts_truncated counter looks suspicious. It constantly 2523 * increments with no sign of Tx errors. This may indicate 2524 * the counter name is not correct one so I've removed the 2525 * counter in output errors. 2526 */ 2527 ifp->if_oerrors += smb->tx_abort + smb->tx_late_colls + 2528 smb->tx_underrun; 2529 2530 ifp->if_ipackets += smb->rx_frames; 2531 2532 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs + 2533 smb->rx_runts + smb->rx_pkts_truncated + 2534 smb->rx_fifo_oflows + smb->rx_rrs_errs + 2535 smb->rx_alignerrs; 2536 2537 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 2538 /* Update done, clear. */ 2539 smb->updated = 0; 2540 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 2541 sc->alc_cdata.alc_smb_map, 2542 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2543 } 2544 } 2545 2546 static void 2547 alc_intr(void *arg) 2548 { 2549 struct alc_softc *sc = arg; 2550 struct ifnet *ifp = &sc->arpcom.ac_if; 2551 uint32_t status; 2552 2553 ASSERT_SERIALIZED(ifp->if_serializer); 2554 2555 status = CSR_READ_4(sc, ALC_INTR_STATUS); 2556 if ((status & ALC_INTRS) == 0) 2557 return; 2558 2559 /* Acknowledge interrupts and disable interrupts. */ 2560 CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT); 2561 2562 if (ifp->if_flags & IFF_RUNNING) { 2563 if (status & INTR_RX_PKT) { 2564 if (alc_rxintr(sc)) { 2565 alc_init(sc); 2566 return; 2567 } 2568 } 2569 if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST | 2570 INTR_TXQ_TO_RST)) { 2571 if (status & INTR_DMA_RD_TO_RST) { 2572 if_printf(ifp, 2573 "DMA read error! -- resetting\n"); 2574 } 2575 if (status & INTR_DMA_WR_TO_RST) { 2576 if_printf(ifp, 2577 "DMA write error! -- resetting\n"); 2578 } 2579 if (status & INTR_TXQ_TO_RST) 2580 if_printf(ifp, "TxQ reset! -- resetting\n"); 2581 alc_init(sc); 2582 return; 2583 } 2584 if (!ifq_is_empty(&ifp->if_snd)) 2585 if_devstart(ifp); 2586 2587 /* Re-enable interrupts */ 2588 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF); 2589 } 2590 } 2591 2592 static void 2593 alc_txeof(struct alc_softc *sc) 2594 { 2595 struct ifnet *ifp; 2596 struct alc_txdesc *txd; 2597 uint32_t cons, prod; 2598 int prog; 2599 2600 ifp = sc->alc_ifp; 2601 2602 if (sc->alc_cdata.alc_tx_cnt == 0) 2603 return; 2604 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag, 2605 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_POSTWRITE); 2606 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) { 2607 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, 2608 sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_POSTREAD); 2609 prod = sc->alc_rdata.alc_cmb->cons; 2610 } else 2611 prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX); 2612 /* Assume we're using normal Tx priority queue. */ 2613 prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >> 2614 MBOX_TD_CONS_LO_IDX_SHIFT; 2615 cons = sc->alc_cdata.alc_tx_cons; 2616 /* 2617 * Go through our Tx list and free mbufs for those 2618 * frames which have been transmitted. 2619 */ 2620 for (prog = 0; cons != prod; prog++, 2621 ALC_DESC_INC(cons, ALC_TX_RING_CNT)) { 2622 if (sc->alc_cdata.alc_tx_cnt <= 0) 2623 break; 2624 prog++; 2625 ifq_clr_oactive(&ifp->if_snd); 2626 sc->alc_cdata.alc_tx_cnt--; 2627 txd = &sc->alc_cdata.alc_txdesc[cons]; 2628 if (txd->tx_m != NULL) { 2629 /* Reclaim transmitted mbufs. */ 2630 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, 2631 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2632 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, 2633 txd->tx_dmamap); 2634 m_freem(txd->tx_m); 2635 txd->tx_m = NULL; 2636 } 2637 } 2638 2639 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) 2640 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, 2641 sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_PREREAD); 2642 sc->alc_cdata.alc_tx_cons = cons; 2643 /* 2644 * Unarm watchdog timer only when there is no pending 2645 * frames in Tx queue. 2646 */ 2647 if (sc->alc_cdata.alc_tx_cnt == 0) 2648 sc->alc_watchdog_timer = 0; 2649 } 2650 2651 static int 2652 alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd, boolean_t wait) 2653 { 2654 struct mbuf *m; 2655 bus_dma_segment_t segs[1]; 2656 bus_dmamap_t map; 2657 int nsegs; 2658 int error; 2659 2660 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2661 if (m == NULL) 2662 return (ENOBUFS); 2663 m->m_len = m->m_pkthdr.len = MCLBYTES; 2664 #ifdef foo 2665 /* Hardware require 4 bytes align */ 2666 m_adj(m, ETHER_ALIGN); 2667 #endif 2668 2669 error = bus_dmamap_load_mbuf_segment( 2670 sc->alc_cdata.alc_rx_tag, 2671 sc->alc_cdata.alc_rx_sparemap, 2672 m, segs, 1, &nsegs, BUS_DMA_NOWAIT); 2673 if (error) { 2674 m_freem(m); 2675 return (ENOBUFS); 2676 } 2677 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 2678 2679 if (rxd->rx_m != NULL) { 2680 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap, 2681 BUS_DMASYNC_POSTREAD); 2682 bus_dmamap_unload(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap); 2683 } 2684 map = rxd->rx_dmamap; 2685 rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap; 2686 sc->alc_cdata.alc_rx_sparemap = map; 2687 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap, 2688 BUS_DMASYNC_PREREAD); 2689 rxd->rx_m = m; 2690 rxd->rx_desc->addr = htole64(segs[0].ds_addr); 2691 return (0); 2692 } 2693 2694 static int 2695 alc_rxintr(struct alc_softc *sc) 2696 { 2697 struct ifnet *ifp; 2698 struct rx_rdesc *rrd; 2699 uint32_t nsegs, status; 2700 int rr_cons, prog; 2701 2702 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag, 2703 sc->alc_cdata.alc_rr_ring_map, 2704 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2705 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag, 2706 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_POSTWRITE); 2707 rr_cons = sc->alc_cdata.alc_rr_cons; 2708 ifp = sc->alc_ifp; 2709 for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0;) { 2710 rrd = &sc->alc_rdata.alc_rr_ring[rr_cons]; 2711 status = le32toh(rrd->status); 2712 if ((status & RRD_VALID) == 0) 2713 break; 2714 nsegs = RRD_RD_CNT(le32toh(rrd->rdinfo)); 2715 if (nsegs == 0) { 2716 /* This should not happen! */ 2717 device_printf(sc->alc_dev, 2718 "unexpected segment count -- resetting\n"); 2719 return (EIO); 2720 } 2721 alc_rxeof(sc, rrd); 2722 /* Clear Rx return status. */ 2723 rrd->status = 0; 2724 ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT); 2725 sc->alc_cdata.alc_rx_cons += nsegs; 2726 sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT; 2727 prog += nsegs; 2728 } 2729 2730 if (prog > 0) { 2731 /* Update the consumer index. */ 2732 sc->alc_cdata.alc_rr_cons = rr_cons; 2733 /* Sync Rx return descriptors. */ 2734 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag, 2735 sc->alc_cdata.alc_rr_ring_map, 2736 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2737 /* 2738 * Sync updated Rx descriptors such that controller see 2739 * modified buffer addresses. 2740 */ 2741 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag, 2742 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE); 2743 /* 2744 * Let controller know availability of new Rx buffers. 2745 * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors 2746 * it may be possible to update ALC_MBOX_RD0_PROD_IDX 2747 * only when Rx buffer pre-fetching is required. In 2748 * addition we already set ALC_RX_RD_FREE_THRESH to 2749 * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However 2750 * it still seems that pre-fetching needs more 2751 * experimentation. 2752 */ 2753 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, 2754 sc->alc_cdata.alc_rx_cons); 2755 } 2756 2757 return 0; 2758 } 2759 2760 /* Receive a frame. */ 2761 static void 2762 alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd) 2763 { 2764 struct alc_rxdesc *rxd; 2765 struct ifnet *ifp; 2766 struct mbuf *mp, *m; 2767 uint32_t rdinfo, status, vtag; 2768 int count, nsegs, rx_cons; 2769 2770 ifp = sc->alc_ifp; 2771 status = le32toh(rrd->status); 2772 rdinfo = le32toh(rrd->rdinfo); 2773 rx_cons = RRD_RD_IDX(rdinfo); 2774 nsegs = RRD_RD_CNT(rdinfo); 2775 2776 sc->alc_cdata.alc_rxlen = RRD_BYTES(status); 2777 if ((status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) != 0) { 2778 /* 2779 * We want to pass the following frames to upper 2780 * layer regardless of error status of Rx return 2781 * ring. 2782 * 2783 * o IP/TCP/UDP checksum is bad. 2784 * o frame length and protocol specific length 2785 * does not match. 2786 * 2787 * Force network stack compute checksum for 2788 * errored frames. 2789 */ 2790 status |= RRD_TCP_UDPCSUM_NOK | RRD_IPCSUM_NOK; 2791 if ((RRD_ERR_CRC | RRD_ERR_ALIGN | RRD_ERR_TRUNC | 2792 RRD_ERR_RUNT) != 0) 2793 return; 2794 } 2795 2796 for (count = 0; count < nsegs; count++, 2797 ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) { 2798 rxd = &sc->alc_cdata.alc_rxdesc[rx_cons]; 2799 mp = rxd->rx_m; 2800 /* Add a new receive buffer to the ring. */ 2801 if (alc_newbuf(sc, rxd, FALSE) != 0) { 2802 ifp->if_iqdrops++; 2803 /* Reuse Rx buffers. */ 2804 if (sc->alc_cdata.alc_rxhead != NULL) 2805 m_freem(sc->alc_cdata.alc_rxhead); 2806 break; 2807 } 2808 2809 /* 2810 * Assume we've received a full sized frame. 2811 * Actual size is fixed when we encounter the end of 2812 * multi-segmented frame. 2813 */ 2814 mp->m_len = sc->alc_buf_size; 2815 2816 /* Chain received mbufs. */ 2817 if (sc->alc_cdata.alc_rxhead == NULL) { 2818 sc->alc_cdata.alc_rxhead = mp; 2819 sc->alc_cdata.alc_rxtail = mp; 2820 } else { 2821 sc->alc_cdata.alc_rxprev_tail = 2822 sc->alc_cdata.alc_rxtail; 2823 sc->alc_cdata.alc_rxtail->m_next = mp; 2824 sc->alc_cdata.alc_rxtail = mp; 2825 } 2826 2827 if (count == nsegs - 1) { 2828 /* Last desc. for this frame. */ 2829 m = sc->alc_cdata.alc_rxhead; 2830 /* 2831 * It seems that L1C/L2C controller has no way 2832 * to tell hardware to strip CRC bytes. 2833 */ 2834 m->m_pkthdr.len = 2835 sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN; 2836 if (nsegs > 1) { 2837 /* Set last mbuf size. */ 2838 mp->m_len = sc->alc_cdata.alc_rxlen - 2839 (nsegs - 1) * sc->alc_buf_size; 2840 /* Remove the CRC bytes in chained mbufs. */ 2841 if (mp->m_len <= ETHER_CRC_LEN) { 2842 sc->alc_cdata.alc_rxtail = 2843 sc->alc_cdata.alc_rxprev_tail; 2844 sc->alc_cdata.alc_rxtail->m_len -= 2845 (ETHER_CRC_LEN - mp->m_len); 2846 sc->alc_cdata.alc_rxtail->m_next = NULL; 2847 m_freem(mp); 2848 } else { 2849 mp->m_len -= ETHER_CRC_LEN; 2850 } 2851 } else 2852 m->m_len = m->m_pkthdr.len; 2853 m->m_pkthdr.rcvif = ifp; 2854 /* 2855 * Due to hardware bugs, Rx checksum offloading 2856 * was intentionally disabled. 2857 */ 2858 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 2859 (status & RRD_VLAN_TAG) != 0) { 2860 vtag = RRD_VLAN(le32toh(rrd->vtag)); 2861 m->m_pkthdr.ether_vlantag = ntohs(vtag); 2862 m->m_flags |= M_VLANTAG; 2863 } 2864 2865 /* Pass it on. */ 2866 ifp->if_input(ifp, m); 2867 } 2868 } 2869 /* Reset mbuf chains. */ 2870 ALC_RXCHAIN_RESET(sc); 2871 } 2872 2873 static void 2874 alc_tick(void *arg) 2875 { 2876 struct alc_softc *sc = arg; 2877 struct ifnet *ifp = &sc->arpcom.ac_if; 2878 struct mii_data *mii; 2879 2880 lwkt_serialize_enter(ifp->if_serializer); 2881 2882 mii = device_get_softc(sc->alc_miibus); 2883 mii_tick(mii); 2884 alc_stats_update(sc); 2885 /* 2886 * alc(4) does not rely on Tx completion interrupts to reclaim 2887 * transferred buffers. Instead Tx completion interrupts are 2888 * used to hint for scheduling Tx task. So it's necessary to 2889 * release transmitted buffers by kicking Tx completion 2890 * handler. This limits the maximum reclamation delay to a hz. 2891 */ 2892 alc_txeof(sc); 2893 alc_watchdog(sc); 2894 callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc); 2895 2896 lwkt_serialize_exit(ifp->if_serializer); 2897 } 2898 2899 static void 2900 alc_reset(struct alc_softc *sc) 2901 { 2902 uint32_t reg; 2903 int i; 2904 2905 reg = CSR_READ_4(sc, ALC_MASTER_CFG) & 0xFFFF; 2906 reg |= MASTER_OOB_DIS_OFF | MASTER_RESET; 2907 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 2908 2909 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 2910 DELAY(10); 2911 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0) 2912 break; 2913 } 2914 if (i == 0) 2915 device_printf(sc->alc_dev, "master reset timeout!\n"); 2916 2917 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 2918 if ((reg = CSR_READ_4(sc, ALC_IDLE_STATUS)) == 0) 2919 break; 2920 DELAY(10); 2921 } 2922 2923 if (i == 0) 2924 device_printf(sc->alc_dev, "reset timeout(0x%08x)!\n", reg); 2925 } 2926 2927 static void 2928 alc_init(void *xsc) 2929 { 2930 struct alc_softc *sc = xsc; 2931 struct ifnet *ifp = &sc->arpcom.ac_if; 2932 struct mii_data *mii; 2933 uint8_t eaddr[ETHER_ADDR_LEN]; 2934 bus_addr_t paddr; 2935 uint32_t reg, rxf_hi, rxf_lo; 2936 2937 ASSERT_SERIALIZED(ifp->if_serializer); 2938 2939 mii = device_get_softc(sc->alc_miibus); 2940 2941 /* 2942 * Cancel any pending I/O. 2943 */ 2944 alc_stop(sc); 2945 /* 2946 * Reset the chip to a known state. 2947 */ 2948 alc_reset(sc); 2949 2950 /* Initialize Rx descriptors. */ 2951 if (alc_init_rx_ring(sc) != 0) { 2952 device_printf(sc->alc_dev, "no memory for Rx buffers.\n"); 2953 alc_stop(sc); 2954 return; 2955 } 2956 alc_init_rr_ring(sc); 2957 alc_init_tx_ring(sc); 2958 alc_init_cmb(sc); 2959 alc_init_smb(sc); 2960 2961 /* Reprogram the station address. */ 2962 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 2963 CSR_WRITE_4(sc, ALC_PAR0, 2964 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 2965 CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]); 2966 /* 2967 * Clear WOL status and disable all WOL feature as WOL 2968 * would interfere Rx operation under normal environments. 2969 */ 2970 CSR_READ_4(sc, ALC_WOL_CFG); 2971 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 2972 /* Set Tx descriptor base addresses. */ 2973 paddr = sc->alc_rdata.alc_tx_ring_paddr; 2974 CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 2975 CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 2976 /* We don't use high priority ring. */ 2977 CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0); 2978 /* Set Tx descriptor counter. */ 2979 CSR_WRITE_4(sc, ALC_TD_RING_CNT, 2980 (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK); 2981 /* Set Rx descriptor base addresses. */ 2982 paddr = sc->alc_rdata.alc_rx_ring_paddr; 2983 CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 2984 CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 2985 /* We use one Rx ring. */ 2986 CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0); 2987 CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0); 2988 CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0); 2989 /* Set Rx descriptor counter. */ 2990 CSR_WRITE_4(sc, ALC_RD_RING_CNT, 2991 (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK); 2992 2993 /* 2994 * Let hardware split jumbo frames into alc_max_buf_sized chunks. 2995 * if it do not fit the buffer size. Rx return descriptor holds 2996 * a counter that indicates how many fragments were made by the 2997 * hardware. The buffer size should be multiple of 8 bytes. 2998 * Since hardware has limit on the size of buffer size, always 2999 * use the maximum value. 3000 * For strict-alignment architectures make sure to reduce buffer 3001 * size by 8 bytes to make room for alignment fixup. 3002 */ 3003 sc->alc_buf_size = RX_BUF_SIZE_MAX; 3004 CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size); 3005 3006 paddr = sc->alc_rdata.alc_rr_ring_paddr; 3007 /* Set Rx return descriptor base addresses. */ 3008 CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 3009 /* We use one Rx return ring. */ 3010 CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0); 3011 CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0); 3012 CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0); 3013 /* Set Rx return descriptor counter. */ 3014 CSR_WRITE_4(sc, ALC_RRD_RING_CNT, 3015 (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK); 3016 paddr = sc->alc_rdata.alc_cmb_paddr; 3017 CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr)); 3018 paddr = sc->alc_rdata.alc_smb_paddr; 3019 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 3020 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr)); 3021 3022 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B) { 3023 /* Reconfigure SRAM - Vendor magic. */ 3024 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_LEN, 0x000002A0); 3025 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_LEN, 0x00000100); 3026 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_ADDR, 0x029F0000); 3027 CSR_WRITE_4(sc, ALC_SRAM_RD0_ADDR, 0x02BF02A0); 3028 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_ADDR, 0x03BF02C0); 3029 CSR_WRITE_4(sc, ALC_SRAM_TD_ADDR, 0x03DF03C0); 3030 CSR_WRITE_4(sc, ALC_TXF_WATER_MARK, 0x00000000); 3031 CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 0x00000000); 3032 } 3033 3034 /* Tell hardware that we're ready to load DMA blocks. */ 3035 CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD); 3036 3037 /* Configure interrupt moderation timer. */ 3038 reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT; 3039 reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT; 3040 CSR_WRITE_4(sc, ALC_IM_TIMER, reg); 3041 /* 3042 * We don't want to automatic interrupt clear as task queue 3043 * for the interrupt should know interrupt status. 3044 */ 3045 reg = MASTER_SA_TIMER_ENB; 3046 if (ALC_USECS(sc->alc_int_rx_mod) != 0) 3047 reg |= MASTER_IM_RX_TIMER_ENB; 3048 if (ALC_USECS(sc->alc_int_tx_mod) != 0) 3049 reg |= MASTER_IM_TX_TIMER_ENB; 3050 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 3051 /* 3052 * Disable interrupt re-trigger timer. We don't want automatic 3053 * re-triggering of un-ACKed interrupts. 3054 */ 3055 CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0)); 3056 /* Configure CMB. */ 3057 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) { 3058 CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4); 3059 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000)); 3060 } else { 3061 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0)); 3062 } 3063 /* 3064 * Hardware can be configured to issue SMB interrupt based 3065 * on programmed interval. Since there is a callout that is 3066 * invoked for every hz in driver we use that instead of 3067 * relying on periodic SMB interrupt. 3068 */ 3069 CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0)); 3070 /* Clear MAC statistics. */ 3071 alc_stats_clear(sc); 3072 3073 /* 3074 * Always use maximum frame size that controller can support. 3075 * Otherwise received frames that has larger frame length 3076 * than alc(4) MTU would be silently dropped in hardware. This 3077 * would make path-MTU discovery hard as sender wouldn't get 3078 * any responses from receiver. alc(4) supports 3079 * multi-fragmented frames on Rx path so it has no issue on 3080 * assembling fragmented frames. Using maximum frame size also 3081 * removes the need to reinitialize hardware when interface 3082 * MTU configuration was changed. 3083 * 3084 * Be conservative in what you do, be liberal in what you 3085 * accept from others - RFC 793. 3086 */ 3087 CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_ident->max_framelen); 3088 3089 /* Disable header split(?) */ 3090 CSR_WRITE_4(sc, ALC_HDS_CFG, 0); 3091 3092 /* Configure IPG/IFG parameters. */ 3093 CSR_WRITE_4(sc, ALC_IPG_IFG_CFG, 3094 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) | 3095 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) | 3096 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) | 3097 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK)); 3098 /* Set parameters for half-duplex media. */ 3099 CSR_WRITE_4(sc, ALC_HDPX_CFG, 3100 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 3101 HDPX_CFG_LCOL_MASK) | 3102 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 3103 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 3104 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 3105 HDPX_CFG_ABEBT_MASK) | 3106 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 3107 HDPX_CFG_JAMIPG_MASK)); 3108 /* 3109 * Set TSO/checksum offload threshold. For frames that is 3110 * larger than this threshold, hardware wouldn't do 3111 * TSO/checksum offloading. 3112 */ 3113 CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH, 3114 (sc->alc_ident->max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) & 3115 TSO_OFFLOAD_THRESH_MASK); 3116 /* Configure TxQ. */ 3117 reg = (alc_dma_burst[sc->alc_dma_rd_burst] << 3118 TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK; 3119 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B || 3120 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 3121 reg >>= 1; 3122 } 3123 reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) & 3124 TXQ_CFG_TD_BURST_MASK; 3125 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE); 3126 3127 /* Configure Rx free descriptor pre-fetching. */ 3128 CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH, 3129 ((RX_RD_FREE_THRESH_HI_DEFAULT << RX_RD_FREE_THRESH_HI_SHIFT) & 3130 RX_RD_FREE_THRESH_HI_MASK) | 3131 ((RX_RD_FREE_THRESH_LO_DEFAULT << RX_RD_FREE_THRESH_LO_SHIFT) & 3132 RX_RD_FREE_THRESH_LO_MASK)); 3133 3134 /* 3135 * Configure flow control parameters. 3136 * XON : 80% of Rx FIFO 3137 * XOFF : 30% of Rx FIFO 3138 */ 3139 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 || 3140 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8132) { 3141 reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN); 3142 rxf_hi = (reg * 8) / 10; 3143 rxf_lo = (reg * 3) / 10; 3144 CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH, 3145 ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) & 3146 RX_FIFO_PAUSE_THRESH_LO_MASK) | 3147 ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) & 3148 RX_FIFO_PAUSE_THRESH_HI_MASK)); 3149 } 3150 3151 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B || 3152 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2) { 3153 CSR_WRITE_4(sc, ALC_SERDES_LOCK, 3154 CSR_READ_4(sc, ALC_SERDES_LOCK) | SERDES_MAC_CLK_SLOWDOWN | 3155 SERDES_PHY_CLK_SLOWDOWN); 3156 } 3157 3158 /* Disable RSS until I understand L1C/L2C's RSS logic. */ 3159 CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0); 3160 CSR_WRITE_4(sc, ALC_RSS_CPU, 0); 3161 3162 /* Configure RxQ. */ 3163 reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & 3164 RXQ_CFG_RD_BURST_MASK; 3165 reg |= RXQ_CFG_RSS_MODE_DIS; 3166 if ((sc->alc_flags & ALC_FLAG_ASPM_MON) != 0) 3167 reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_1M; 3168 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 3169 3170 /* Configure DMA parameters. */ 3171 reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI; 3172 reg |= sc->alc_rcb; 3173 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) 3174 reg |= DMA_CFG_CMB_ENB; 3175 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) 3176 reg |= DMA_CFG_SMB_ENB; 3177 else 3178 reg |= DMA_CFG_SMB_DIS; 3179 reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) << 3180 DMA_CFG_RD_BURST_SHIFT; 3181 reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) << 3182 DMA_CFG_WR_BURST_SHIFT; 3183 reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) & 3184 DMA_CFG_RD_DELAY_CNT_MASK; 3185 reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) & 3186 DMA_CFG_WR_DELAY_CNT_MASK; 3187 CSR_WRITE_4(sc, ALC_DMA_CFG, reg); 3188 3189 /* 3190 * Configure Tx/Rx MACs. 3191 * - Auto-padding for short frames. 3192 * - Enable CRC generation. 3193 * Actual reconfiguration of MAC for resolved speed/duplex 3194 * is followed after detection of link establishment. 3195 * AR813x/AR815x always does checksum computation regardless 3196 * of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to 3197 * have bug in protocol field in Rx return structure so 3198 * these controllers can't handle fragmented frames. Disable 3199 * Rx checksum offloading until there is a newer controller 3200 * that has sane implementation. 3201 */ 3202 reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX | 3203 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 3204 MAC_CFG_PREAMBLE_MASK); 3205 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 || 3206 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || 3207 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 3208 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW; 3209 } 3210 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0) 3211 reg |= MAC_CFG_SPEED_10_100; 3212 else 3213 reg |= MAC_CFG_SPEED_1000; 3214 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 3215 3216 /* Set up the receive filter. */ 3217 alc_rxfilter(sc); 3218 alc_rxvlan(sc); 3219 3220 /* Acknowledge all pending interrupts and clear it. */ 3221 CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS); 3222 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 3223 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0); 3224 3225 sc->alc_flags &= ~ALC_FLAG_LINK; 3226 /* Switch to the current media. */ 3227 mii_mediachg(mii); 3228 3229 callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc); 3230 3231 ifp->if_flags |= IFF_RUNNING; 3232 ifq_clr_oactive(&ifp->if_snd); 3233 } 3234 3235 static void 3236 alc_stop(struct alc_softc *sc) 3237 { 3238 struct ifnet *ifp = &sc->arpcom.ac_if; 3239 struct alc_txdesc *txd; 3240 struct alc_rxdesc *rxd; 3241 uint32_t reg; 3242 int i; 3243 3244 ASSERT_SERIALIZED(ifp->if_serializer); 3245 3246 /* 3247 * Mark the interface down and cancel the watchdog timer. 3248 */ 3249 ifp->if_flags &= ~IFF_RUNNING; 3250 ifq_clr_oactive(&ifp->if_snd); 3251 sc->alc_flags &= ~ALC_FLAG_LINK; 3252 callout_stop(&sc->alc_tick_ch); 3253 sc->alc_watchdog_timer = 0; 3254 alc_stats_update(sc); 3255 /* Disable interrupts. */ 3256 CSR_WRITE_4(sc, ALC_INTR_MASK, 0); 3257 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 3258 alc_stop_queue(sc); 3259 /* Disable DMA. */ 3260 reg = CSR_READ_4(sc, ALC_DMA_CFG); 3261 reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB); 3262 reg |= DMA_CFG_SMB_DIS; 3263 CSR_WRITE_4(sc, ALC_DMA_CFG, reg); 3264 DELAY(1000); 3265 /* Stop Rx/Tx MACs. */ 3266 alc_stop_mac(sc); 3267 /* Disable interrupts which might be touched in taskq handler. */ 3268 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 3269 3270 /* Reclaim Rx buffers that have been processed. */ 3271 if (sc->alc_cdata.alc_rxhead != NULL) 3272 m_freem(sc->alc_cdata.alc_rxhead); 3273 ALC_RXCHAIN_RESET(sc); 3274 /* 3275 * Free Tx/Rx mbufs still in the queues. 3276 */ 3277 for (i = 0; i < ALC_RX_RING_CNT; i++) { 3278 rxd = &sc->alc_cdata.alc_rxdesc[i]; 3279 if (rxd->rx_m != NULL) { 3280 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, 3281 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3282 bus_dmamap_unload(sc->alc_cdata.alc_rx_tag, 3283 rxd->rx_dmamap); 3284 m_freem(rxd->rx_m); 3285 rxd->rx_m = NULL; 3286 } 3287 } 3288 for (i = 0; i < ALC_TX_RING_CNT; i++) { 3289 txd = &sc->alc_cdata.alc_txdesc[i]; 3290 if (txd->tx_m != NULL) { 3291 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, 3292 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 3293 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, 3294 txd->tx_dmamap); 3295 m_freem(txd->tx_m); 3296 txd->tx_m = NULL; 3297 } 3298 } 3299 } 3300 3301 static void 3302 alc_stop_mac(struct alc_softc *sc) 3303 { 3304 uint32_t reg; 3305 int i; 3306 3307 /* Disable Rx/Tx MAC. */ 3308 reg = CSR_READ_4(sc, ALC_MAC_CFG); 3309 if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) { 3310 reg &= ~MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 3311 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 3312 } 3313 for (i = ALC_TIMEOUT; i > 0; i--) { 3314 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 3315 if (reg == 0) 3316 break; 3317 DELAY(10); 3318 } 3319 if (i == 0) 3320 device_printf(sc->alc_dev, 3321 "could not disable Rx/Tx MAC(0x%08x)!\n", reg); 3322 } 3323 3324 static void 3325 alc_start_queue(struct alc_softc *sc) 3326 { 3327 uint32_t qcfg[] = { 3328 0, 3329 RXQ_CFG_QUEUE0_ENB, 3330 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB, 3331 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB, 3332 RXQ_CFG_ENB 3333 }; 3334 uint32_t cfg; 3335 3336 /* Enable RxQ. */ 3337 cfg = CSR_READ_4(sc, ALC_RXQ_CFG); 3338 cfg &= ~RXQ_CFG_ENB; 3339 cfg |= qcfg[1]; 3340 CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg); 3341 /* Enable TxQ. */ 3342 cfg = CSR_READ_4(sc, ALC_TXQ_CFG); 3343 cfg |= TXQ_CFG_ENB; 3344 CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg); 3345 } 3346 3347 static void 3348 alc_stop_queue(struct alc_softc *sc) 3349 { 3350 uint32_t reg; 3351 int i; 3352 3353 /* Disable RxQ. */ 3354 reg = CSR_READ_4(sc, ALC_RXQ_CFG); 3355 if ((reg & RXQ_CFG_ENB) != 0) { 3356 reg &= ~RXQ_CFG_ENB; 3357 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 3358 } 3359 /* Disable TxQ. */ 3360 reg = CSR_READ_4(sc, ALC_TXQ_CFG); 3361 if ((reg & TXQ_CFG_ENB) == 0) { 3362 reg &= ~TXQ_CFG_ENB; 3363 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg); 3364 } 3365 for (i = ALC_TIMEOUT; i > 0; i--) { 3366 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 3367 if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0) 3368 break; 3369 DELAY(10); 3370 } 3371 if (i == 0) 3372 device_printf(sc->alc_dev, 3373 "could not disable RxQ/TxQ (0x%08x)!\n", reg); 3374 } 3375 3376 static void 3377 alc_init_tx_ring(struct alc_softc *sc) 3378 { 3379 struct alc_ring_data *rd; 3380 struct alc_txdesc *txd; 3381 int i; 3382 3383 sc->alc_cdata.alc_tx_prod = 0; 3384 sc->alc_cdata.alc_tx_cons = 0; 3385 sc->alc_cdata.alc_tx_cnt = 0; 3386 3387 rd = &sc->alc_rdata; 3388 bzero(rd->alc_tx_ring, ALC_TX_RING_SZ); 3389 for (i = 0; i < ALC_TX_RING_CNT; i++) { 3390 txd = &sc->alc_cdata.alc_txdesc[i]; 3391 txd->tx_m = NULL; 3392 } 3393 3394 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag, 3395 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE); 3396 } 3397 3398 static int 3399 alc_init_rx_ring(struct alc_softc *sc) 3400 { 3401 struct alc_ring_data *rd; 3402 struct alc_rxdesc *rxd; 3403 int i; 3404 3405 sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1; 3406 rd = &sc->alc_rdata; 3407 bzero(rd->alc_rx_ring, ALC_RX_RING_SZ); 3408 for (i = 0; i < ALC_RX_RING_CNT; i++) { 3409 rxd = &sc->alc_cdata.alc_rxdesc[i]; 3410 rxd->rx_m = NULL; 3411 rxd->rx_desc = &rd->alc_rx_ring[i]; 3412 if (alc_newbuf(sc, rxd, TRUE) != 0) 3413 return (ENOBUFS); 3414 } 3415 3416 /* 3417 * Since controller does not update Rx descriptors, driver 3418 * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE 3419 * is enough to ensure coherence. 3420 */ 3421 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag, 3422 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE); 3423 /* Let controller know availability of new Rx buffers. */ 3424 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons); 3425 3426 return (0); 3427 } 3428 3429 static void 3430 alc_init_rr_ring(struct alc_softc *sc) 3431 { 3432 struct alc_ring_data *rd; 3433 3434 sc->alc_cdata.alc_rr_cons = 0; 3435 ALC_RXCHAIN_RESET(sc); 3436 3437 rd = &sc->alc_rdata; 3438 bzero(rd->alc_rr_ring, ALC_RR_RING_SZ); 3439 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag, 3440 sc->alc_cdata.alc_rr_ring_map, 3441 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3442 } 3443 3444 static void 3445 alc_init_cmb(struct alc_softc *sc) 3446 { 3447 struct alc_ring_data *rd; 3448 3449 rd = &sc->alc_rdata; 3450 bzero(rd->alc_cmb, ALC_CMB_SZ); 3451 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, sc->alc_cdata.alc_cmb_map, 3452 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3453 } 3454 3455 static void 3456 alc_init_smb(struct alc_softc *sc) 3457 { 3458 struct alc_ring_data *rd; 3459 3460 rd = &sc->alc_rdata; 3461 bzero(rd->alc_smb, ALC_SMB_SZ); 3462 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, sc->alc_cdata.alc_smb_map, 3463 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3464 } 3465 3466 static void 3467 alc_rxvlan(struct alc_softc *sc) 3468 { 3469 struct ifnet *ifp; 3470 uint32_t reg; 3471 3472 ifp = sc->alc_ifp; 3473 reg = CSR_READ_4(sc, ALC_MAC_CFG); 3474 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 3475 reg |= MAC_CFG_VLAN_TAG_STRIP; 3476 else 3477 reg &= ~MAC_CFG_VLAN_TAG_STRIP; 3478 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 3479 } 3480 3481 static void 3482 alc_rxfilter(struct alc_softc *sc) 3483 { 3484 struct ifnet *ifp; 3485 struct ifmultiaddr *ifma; 3486 uint32_t crc; 3487 uint32_t mchash[2]; 3488 uint32_t rxcfg; 3489 3490 ifp = sc->alc_ifp; 3491 3492 bzero(mchash, sizeof(mchash)); 3493 rxcfg = CSR_READ_4(sc, ALC_MAC_CFG); 3494 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 3495 if ((ifp->if_flags & IFF_BROADCAST) != 0) 3496 rxcfg |= MAC_CFG_BCAST; 3497 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 3498 if ((ifp->if_flags & IFF_PROMISC) != 0) 3499 rxcfg |= MAC_CFG_PROMISC; 3500 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 3501 rxcfg |= MAC_CFG_ALLMULTI; 3502 mchash[0] = 0xFFFFFFFF; 3503 mchash[1] = 0xFFFFFFFF; 3504 goto chipit; 3505 } 3506 3507 #if 0 3508 /* XXX */ 3509 if_maddr_rlock(ifp); 3510 #endif 3511 TAILQ_FOREACH(ifma, &sc->alc_ifp->if_multiaddrs, ifma_link) { 3512 if (ifma->ifma_addr->sa_family != AF_LINK) 3513 continue; 3514 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 3515 ifma->ifma_addr), ETHER_ADDR_LEN); 3516 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 3517 } 3518 #if 0 3519 /* XXX */ 3520 if_maddr_runlock(ifp); 3521 #endif 3522 3523 chipit: 3524 CSR_WRITE_4(sc, ALC_MAR0, mchash[0]); 3525 CSR_WRITE_4(sc, ALC_MAR1, mchash[1]); 3526 CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg); 3527 } 3528 3529 static int 3530 sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS) 3531 { 3532 return (sysctl_int_range(oidp, arg1, arg2, req, 3533 ALC_PROC_MIN, ALC_PROC_MAX)); 3534 } 3535 3536 static int 3537 sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS) 3538 { 3539 3540 return (sysctl_int_range(oidp, arg1, arg2, req, 3541 ALC_IM_TIMER_MIN, ALC_IM_TIMER_MAX)); 3542 } 3543