1 /* $OpenBSD: if_alc.c,v 1.42 2017/09/08 05:36:52 deraadt Exp $ */ 2 /*- 3 * Copyright (c) 2009, Pyun YongHyeon <yongari@FreeBSD.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 /* Driver for Atheros AR8131/AR8132 PCIe Ethernet. */ 30 31 #include "bpfilter.h" 32 #include "vlan.h" 33 34 #include <sys/param.h> 35 #include <sys/endian.h> 36 #include <sys/systm.h> 37 #include <sys/sockio.h> 38 #include <sys/mbuf.h> 39 #include <sys/queue.h> 40 #include <sys/kernel.h> 41 #include <sys/device.h> 42 #include <sys/timeout.h> 43 #include <sys/socket.h> 44 45 #include <machine/bus.h> 46 47 #include <net/if.h> 48 #include <net/if_dl.h> 49 #include <net/if_media.h> 50 51 #include <netinet/in.h> 52 #include <netinet/if_ether.h> 53 54 #if NBPFILTER > 0 55 #include <net/bpf.h> 56 #endif 57 58 #include <dev/mii/mii.h> 59 #include <dev/mii/miivar.h> 60 61 #include <dev/pci/pcireg.h> 62 #include <dev/pci/pcivar.h> 63 #include <dev/pci/pcidevs.h> 64 65 #include <dev/pci/if_alcreg.h> 66 67 int alc_match(struct device *, void *, void *); 68 void alc_attach(struct device *, struct device *, void *); 69 int alc_detach(struct device *, int); 70 int alc_activate(struct device *, int); 71 72 int alc_init(struct ifnet *); 73 void alc_start(struct ifnet *); 74 int alc_ioctl(struct ifnet *, u_long, caddr_t); 75 void alc_watchdog(struct ifnet *); 76 int alc_mediachange(struct ifnet *); 77 void alc_mediastatus(struct ifnet *, struct ifmediareq *); 78 79 void alc_aspm(struct alc_softc *, uint64_t); 80 void alc_disable_l0s_l1(struct alc_softc *); 81 int alc_dma_alloc(struct alc_softc *); 82 void alc_dma_free(struct alc_softc *); 83 int alc_encap(struct alc_softc *, struct mbuf *); 84 void alc_get_macaddr(struct alc_softc *); 85 void alc_init_cmb(struct alc_softc *); 86 void alc_init_rr_ring(struct alc_softc *); 87 int alc_init_rx_ring(struct alc_softc *); 88 void alc_init_smb(struct alc_softc *); 89 void alc_init_tx_ring(struct alc_softc *); 90 int alc_intr(void *); 91 void alc_mac_config(struct alc_softc *); 92 int alc_miibus_readreg(struct device *, int, int); 93 void alc_miibus_statchg(struct device *); 94 void alc_miibus_writereg(struct device *, int, int, int); 95 int alc_newbuf(struct alc_softc *, struct alc_rxdesc *); 96 void alc_phy_down(struct alc_softc *); 97 void alc_phy_reset(struct alc_softc *); 98 void alc_reset(struct alc_softc *); 99 void alc_rxeof(struct alc_softc *, struct rx_rdesc *); 100 void alc_rxintr(struct alc_softc *); 101 void alc_iff(struct alc_softc *); 102 void alc_rxvlan(struct alc_softc *); 103 void alc_start_queue(struct alc_softc *); 104 void alc_stats_clear(struct alc_softc *); 105 void alc_stats_update(struct alc_softc *); 106 void alc_stop(struct alc_softc *); 107 void alc_stop_mac(struct alc_softc *); 108 void alc_stop_queue(struct alc_softc *); 109 void alc_tick(void *); 110 void alc_txeof(struct alc_softc *); 111 112 uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0 }; 113 114 const struct pci_matchid alc_devices[] = { 115 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1C }, 116 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L2C }, 117 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1D }, 118 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1D_1 }, 119 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L2C_1 }, 120 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L2C_2 } 121 }; 122 123 struct cfattach alc_ca = { 124 sizeof (struct alc_softc), alc_match, alc_attach, NULL, 125 alc_activate 126 }; 127 128 struct cfdriver alc_cd = { 129 NULL, "alc", DV_IFNET 130 }; 131 132 int alcdebug = 0; 133 #define DPRINTF(x) do { if (alcdebug) printf x; } while (0) 134 135 #define ALC_CSUM_FEATURES (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT) 136 137 int 138 alc_miibus_readreg(struct device *dev, int phy, int reg) 139 { 140 struct alc_softc *sc = (struct alc_softc *)dev; 141 uint32_t v; 142 int i; 143 144 if (phy != sc->alc_phyaddr) 145 return (0); 146 147 /* 148 * For AR8132 fast ethernet controller, do not report 1000baseT 149 * capability to mii(4). Even though AR8132 uses the same 150 * model/revision number of F1 gigabit PHY, the PHY has no 151 * ability to establish 1000baseT link. 152 */ 153 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0 && 154 reg == MII_EXTSR) 155 return (0); 156 157 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 158 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 159 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 160 DELAY(5); 161 v = CSR_READ_4(sc, ALC_MDIO); 162 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 163 break; 164 } 165 166 if (i == 0) { 167 printf("%s: phy read timeout: phy %d, reg %d\n", 168 sc->sc_dev.dv_xname, phy, reg); 169 return (0); 170 } 171 172 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 173 } 174 175 void 176 alc_miibus_writereg(struct device *dev, int phy, int reg, int val) 177 { 178 struct alc_softc *sc = (struct alc_softc *)dev; 179 uint32_t v; 180 int i; 181 182 if (phy != sc->alc_phyaddr) 183 return; 184 185 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 186 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 187 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 188 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 189 DELAY(5); 190 v = CSR_READ_4(sc, ALC_MDIO); 191 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 192 break; 193 } 194 195 if (i == 0) 196 printf("%s: phy write timeout: phy %d, reg %d\n", 197 sc->sc_dev.dv_xname, phy, reg); 198 } 199 200 void 201 alc_miibus_statchg(struct device *dev) 202 { 203 struct alc_softc *sc = (struct alc_softc *)dev; 204 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 205 struct mii_data *mii = &sc->sc_miibus; 206 uint32_t reg; 207 208 if ((ifp->if_flags & IFF_RUNNING) == 0) 209 return; 210 211 sc->alc_flags &= ~ALC_FLAG_LINK; 212 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 213 (IFM_ACTIVE | IFM_AVALID)) { 214 switch (IFM_SUBTYPE(mii->mii_media_active)) { 215 case IFM_10_T: 216 case IFM_100_TX: 217 sc->alc_flags |= ALC_FLAG_LINK; 218 break; 219 case IFM_1000_T: 220 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) 221 sc->alc_flags |= ALC_FLAG_LINK; 222 break; 223 default: 224 break; 225 } 226 } 227 alc_stop_queue(sc); 228 /* Stop Rx/Tx MACs. */ 229 alc_stop_mac(sc); 230 231 /* Program MACs with resolved speed/duplex/flow-control. */ 232 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 233 alc_start_queue(sc); 234 alc_mac_config(sc); 235 /* Re-enable Tx/Rx MACs. */ 236 reg = CSR_READ_4(sc, ALC_MAC_CFG); 237 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 238 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 239 alc_aspm(sc, IFM_SUBTYPE(mii->mii_media_active)); 240 } 241 } 242 243 void 244 alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 245 { 246 struct alc_softc *sc = ifp->if_softc; 247 struct mii_data *mii = &sc->sc_miibus; 248 249 if ((ifp->if_flags & IFF_UP) == 0) 250 return; 251 252 mii_pollstat(mii); 253 ifmr->ifm_status = mii->mii_media_status; 254 ifmr->ifm_active = mii->mii_media_active; 255 } 256 257 int 258 alc_mediachange(struct ifnet *ifp) 259 { 260 struct alc_softc *sc = ifp->if_softc; 261 struct mii_data *mii = &sc->sc_miibus; 262 int error; 263 264 if (mii->mii_instance != 0) { 265 struct mii_softc *miisc; 266 267 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 268 mii_phy_reset(miisc); 269 } 270 error = mii_mediachg(mii); 271 272 return (error); 273 } 274 275 int 276 alc_match(struct device *dev, void *match, void *aux) 277 { 278 return pci_matchbyid((struct pci_attach_args *)aux, alc_devices, 279 nitems(alc_devices)); 280 } 281 282 void 283 alc_get_macaddr(struct alc_softc *sc) 284 { 285 uint32_t ea[2], opt; 286 uint16_t val; 287 int eeprom, i; 288 289 eeprom = 0; 290 opt = CSR_READ_4(sc, ALC_OPT_CFG); 291 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_OTP_SEL) != 0 && 292 (CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) { 293 /* 294 * EEPROM found, let TWSI reload EEPROM configuration. 295 * This will set ethernet address of controller. 296 */ 297 eeprom++; 298 switch (sc->sc_product) { 299 case PCI_PRODUCT_ATTANSIC_L1C: 300 case PCI_PRODUCT_ATTANSIC_L2C: 301 if ((opt & OPT_CFG_CLK_ENB) == 0) { 302 opt |= OPT_CFG_CLK_ENB; 303 CSR_WRITE_4(sc, ALC_OPT_CFG, opt); 304 CSR_READ_4(sc, ALC_OPT_CFG); 305 DELAY(1000); 306 } 307 break; 308 case PCI_PRODUCT_ATTANSIC_L1D: 309 case PCI_PRODUCT_ATTANSIC_L1D_1: 310 case PCI_PRODUCT_ATTANSIC_L2C_1: 311 case PCI_PRODUCT_ATTANSIC_L2C_2: 312 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 313 ALC_MII_DBG_ADDR, 0x00); 314 val = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr, 315 ALC_MII_DBG_DATA); 316 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 317 ALC_MII_DBG_DATA, val & 0xFF7F); 318 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 319 ALC_MII_DBG_ADDR, 0x3B); 320 val = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr, 321 ALC_MII_DBG_DATA); 322 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 323 ALC_MII_DBG_DATA, val | 0x0008); 324 DELAY(20); 325 break; 326 } 327 328 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG, 329 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB); 330 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 331 CSR_READ_4(sc, ALC_WOL_CFG); 332 333 CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) | 334 TWSI_CFG_SW_LD_START); 335 for (i = 100; i > 0; i--) { 336 DELAY(1000); 337 if ((CSR_READ_4(sc, ALC_TWSI_CFG) & 338 TWSI_CFG_SW_LD_START) == 0) 339 break; 340 } 341 if (i == 0) 342 printf("%s: reloading EEPROM timeout!\n", 343 sc->sc_dev.dv_xname); 344 } else { 345 if (alcdebug) 346 printf("%s: EEPROM not found!\n", sc->sc_dev.dv_xname); 347 } 348 if (eeprom != 0) { 349 switch (sc->sc_product) { 350 case PCI_PRODUCT_ATTANSIC_L1C: 351 case PCI_PRODUCT_ATTANSIC_L2C: 352 if ((opt & OPT_CFG_CLK_ENB) != 0) { 353 opt &= ~OPT_CFG_CLK_ENB; 354 CSR_WRITE_4(sc, ALC_OPT_CFG, opt); 355 CSR_READ_4(sc, ALC_OPT_CFG); 356 DELAY(1000); 357 } 358 break; 359 case PCI_PRODUCT_ATTANSIC_L1D: 360 case PCI_PRODUCT_ATTANSIC_L1D_1: 361 case PCI_PRODUCT_ATTANSIC_L2C_1: 362 case PCI_PRODUCT_ATTANSIC_L2C_2: 363 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 364 ALC_MII_DBG_ADDR, 0x00); 365 val = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr, 366 ALC_MII_DBG_DATA); 367 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 368 ALC_MII_DBG_DATA, val | 0x0080); 369 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 370 ALC_MII_DBG_ADDR, 0x3B); 371 val = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr, 372 ALC_MII_DBG_DATA); 373 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 374 ALC_MII_DBG_DATA, val & 0xFFF7); 375 DELAY(20); 376 break; 377 } 378 } 379 380 ea[0] = CSR_READ_4(sc, ALC_PAR0); 381 ea[1] = CSR_READ_4(sc, ALC_PAR1); 382 sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF; 383 sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF; 384 sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF; 385 sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF; 386 sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF; 387 sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF; 388 } 389 390 void 391 alc_disable_l0s_l1(struct alc_softc *sc) 392 { 393 uint32_t pmcfg; 394 395 /* Another magic from vendor. */ 396 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 397 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 | 398 PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK | 399 PM_CFG_SERDES_PD_EX_L1); 400 pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB | 401 PM_CFG_SERDES_L1_ENB; 402 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 403 } 404 405 void 406 alc_phy_reset(struct alc_softc *sc) 407 { 408 uint16_t data; 409 410 /* Reset magic from Linux. */ 411 CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_SEL_ANA_RESET); 412 CSR_READ_2(sc, ALC_GPHY_CFG); 413 DELAY(10 * 1000); 414 415 CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET | 416 GPHY_CFG_SEL_ANA_RESET); 417 CSR_READ_2(sc, ALC_GPHY_CFG); 418 DELAY(10 * 1000); 419 420 /* DSP fixup, Vendor magic. */ 421 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1) { 422 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 423 ALC_MII_DBG_ADDR, 0x000A); 424 data = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr, 425 ALC_MII_DBG_DATA); 426 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 427 ALC_MII_DBG_DATA, data & 0xDFFF); 428 } 429 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D || 430 sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 || 431 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1 || 432 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2) { 433 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 434 ALC_MII_DBG_ADDR, 0x003B); 435 data = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr, 436 ALC_MII_DBG_DATA); 437 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 438 ALC_MII_DBG_DATA, data & 0xFFF7); 439 DELAY(20 * 1000); 440 } 441 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D) { 442 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 443 ALC_MII_DBG_ADDR, 0x0029); 444 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 445 ALC_MII_DBG_DATA, 0x929D); 446 } 447 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1C || 448 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C || 449 sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 || 450 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2) { 451 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 452 ALC_MII_DBG_ADDR, 0x0029); 453 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 454 ALC_MII_DBG_DATA, 0xB6DD); 455 } 456 457 /* Load DSP codes, vendor magic. */ 458 data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE | 459 ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK); 460 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 461 ALC_MII_DBG_ADDR, MII_ANA_CFG18); 462 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 463 ALC_MII_DBG_DATA, data); 464 465 data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) | 466 ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL | 467 ANA_SERDES_EN_LCKDT; 468 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 469 ALC_MII_DBG_ADDR, MII_ANA_CFG5); 470 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 471 ALC_MII_DBG_DATA, data); 472 473 data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) & 474 ANA_LONG_CABLE_TH_100_MASK) | 475 ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) & 476 ANA_SHORT_CABLE_TH_100_SHIFT) | 477 ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW; 478 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 479 ALC_MII_DBG_ADDR, MII_ANA_CFG54); 480 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 481 ALC_MII_DBG_DATA, data); 482 483 data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) | 484 ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) | 485 ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) | 486 ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK); 487 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 488 ALC_MII_DBG_ADDR, MII_ANA_CFG4); 489 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 490 ALC_MII_DBG_DATA, data); 491 492 data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) | 493 ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB | 494 ANA_OEN_125M; 495 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 496 ALC_MII_DBG_ADDR, MII_ANA_CFG0); 497 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 498 ALC_MII_DBG_DATA, data); 499 DELAY(1000); 500 501 /* Disable hibernation. */ 502 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 503 0x0029); 504 data = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr, 505 ALC_MII_DBG_DATA); 506 data &= ~0x8000; 507 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, 508 data); 509 510 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 511 0x000B); 512 data = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr, 513 ALC_MII_DBG_DATA); 514 data &= ~0x8000; 515 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, 516 data); 517 } 518 519 void 520 alc_phy_down(struct alc_softc *sc) 521 { 522 switch (sc->sc_product) { 523 case PCI_PRODUCT_ATTANSIC_L1D: 524 case PCI_PRODUCT_ATTANSIC_L1D_1: 525 /* 526 * GPHY power down caused more problems on AR8151 v2.0. 527 * When driver is reloaded after GPHY power down, 528 * accesses to PHY/MAC registers hung the system. Only 529 * cold boot recovered from it. I'm not sure whether 530 * AR8151 v1.0 also requires this one though. I don't 531 * have AR8151 v1.0 controller in hand. 532 * The only option left is to isolate the PHY and 533 * initiates power down the PHY which in turn saves 534 * more power when driver is unloaded. 535 */ 536 alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, 537 MII_BMCR, BMCR_ISO | BMCR_PDOWN); 538 break; 539 default: 540 /* Force PHY down. */ 541 CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET | 542 GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ | 543 GPHY_CFG_PWDOWN_HW); 544 DELAY(1000); 545 break; 546 } 547 } 548 549 void 550 alc_aspm(struct alc_softc *sc, uint64_t media) 551 { 552 uint32_t pmcfg; 553 uint16_t linkcfg; 554 555 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 556 if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) == 557 (ALC_FLAG_APS | ALC_FLAG_PCIE)) 558 linkcfg = CSR_READ_2(sc, sc->alc_expcap + 559 PCI_PCIE_LCSR); 560 else 561 linkcfg = 0; 562 pmcfg &= ~PM_CFG_SERDES_PD_EX_L1; 563 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_LCKDET_TIMER_MASK); 564 pmcfg |= PM_CFG_MAC_ASPM_CHK; 565 pmcfg |= (PM_CFG_LCKDET_TIMER_DEFAULT << PM_CFG_LCKDET_TIMER_SHIFT); 566 pmcfg &= ~(PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB); 567 568 if ((sc->alc_flags & ALC_FLAG_APS) != 0) { 569 /* Disable extended sync except AR8152 B v1.0 */ 570 linkcfg &= ~0x80; 571 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1 && 572 sc->alc_rev == ATHEROS_AR8152_B_V10) 573 linkcfg |= 0x80; 574 CSR_WRITE_2(sc, sc->alc_expcap + PCI_PCIE_LCSR, 575 linkcfg); 576 pmcfg &= ~(PM_CFG_EN_BUFS_RX_L0S | PM_CFG_SA_DLY_ENB | 577 PM_CFG_HOTRST); 578 pmcfg |= (PM_CFG_L1_ENTRY_TIMER_DEFAULT << 579 PM_CFG_L1_ENTRY_TIMER_SHIFT); 580 pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK; 581 pmcfg |= (PM_CFG_PM_REQ_TIMER_DEFAULT << 582 PM_CFG_PM_REQ_TIMER_SHIFT); 583 pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_PCIE_RECV; 584 } 585 586 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 587 if ((sc->alc_flags & ALC_FLAG_L0S) != 0) 588 pmcfg |= PM_CFG_ASPM_L0S_ENB; 589 if ((sc->alc_flags & ALC_FLAG_L1S) != 0) 590 pmcfg |= PM_CFG_ASPM_L1_ENB; 591 if ((sc->alc_flags & ALC_FLAG_APS) != 0) { 592 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1) 593 pmcfg &= ~PM_CFG_ASPM_L0S_ENB; 594 pmcfg &= ~(PM_CFG_SERDES_L1_ENB | 595 PM_CFG_SERDES_PLL_L1_ENB | 596 PM_CFG_SERDES_BUDS_RX_L1_ENB); 597 pmcfg |= PM_CFG_CLK_SWH_L1; 598 if (media == IFM_100_TX || media == IFM_1000_T) { 599 pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK; 600 switch (sc->sc_product) { 601 case PCI_PRODUCT_ATTANSIC_L2C_1: 602 pmcfg |= (7 << 603 PM_CFG_L1_ENTRY_TIMER_SHIFT); 604 break; 605 case PCI_PRODUCT_ATTANSIC_L1D_1: 606 case PCI_PRODUCT_ATTANSIC_L2C_2: 607 pmcfg |= (4 << 608 PM_CFG_L1_ENTRY_TIMER_SHIFT); 609 break; 610 default: 611 pmcfg |= (15 << 612 PM_CFG_L1_ENTRY_TIMER_SHIFT); 613 break; 614 } 615 } 616 } else { 617 pmcfg |= PM_CFG_SERDES_L1_ENB | 618 PM_CFG_SERDES_PLL_L1_ENB | 619 PM_CFG_SERDES_BUDS_RX_L1_ENB; 620 pmcfg &= ~(PM_CFG_CLK_SWH_L1 | 621 PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB); 622 } 623 } else { 624 pmcfg &= ~(PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_L1_ENB | 625 PM_CFG_SERDES_PLL_L1_ENB); 626 pmcfg |= PM_CFG_CLK_SWH_L1; 627 if ((sc->alc_flags & ALC_FLAG_L1S) != 0) 628 pmcfg |= PM_CFG_ASPM_L1_ENB; 629 } 630 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 631 } 632 633 void 634 alc_attach(struct device *parent, struct device *self, void *aux) 635 { 636 637 struct alc_softc *sc = (struct alc_softc *)self; 638 struct pci_attach_args *pa = aux; 639 pci_chipset_tag_t pc = pa->pa_pc; 640 pci_intr_handle_t ih; 641 const char *intrstr; 642 struct ifnet *ifp; 643 pcireg_t memtype; 644 char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" }; 645 uint16_t burst; 646 int base, state, error = 0; 647 uint32_t cap, ctl, val; 648 649 /* 650 * Allocate IO memory 651 */ 652 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ALC_PCIR_BAR); 653 if (pci_mapreg_map(pa, ALC_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 654 &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) { 655 printf(": can't map mem space\n"); 656 return; 657 } 658 659 if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) { 660 printf(": can't map interrupt\n"); 661 goto fail; 662 } 663 664 /* 665 * Allocate IRQ 666 */ 667 intrstr = pci_intr_string(pc, ih); 668 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, alc_intr, sc, 669 sc->sc_dev.dv_xname); 670 if (sc->sc_irq_handle == NULL) { 671 printf(": could not establish interrupt"); 672 if (intrstr != NULL) 673 printf(" at %s", intrstr); 674 printf("\n"); 675 goto fail; 676 } 677 printf(": %s", intrstr); 678 679 sc->sc_dmat = pa->pa_dmat; 680 sc->sc_pct = pa->pa_pc; 681 sc->sc_pcitag = pa->pa_tag; 682 683 /* Set PHY address. */ 684 sc->alc_phyaddr = ALC_PHY_ADDR; 685 686 /* Get PCI and chip id/revision. */ 687 sc->sc_product = PCI_PRODUCT(pa->pa_id); 688 sc->alc_rev = PCI_REVISION(pa->pa_class); 689 690 /* Initialize DMA parameters. */ 691 sc->alc_dma_rd_burst = 0; 692 sc->alc_dma_wr_burst = 0; 693 sc->alc_rcb = DMA_CFG_RCB_64; 694 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, 695 &base, NULL)) { 696 sc->alc_flags |= ALC_FLAG_PCIE; 697 sc->alc_expcap = base; 698 burst = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 699 base + PCI_PCIE_DCSR) >> 16; 700 sc->alc_dma_rd_burst = (burst & 0x7000) >> 12; 701 sc->alc_dma_wr_burst = (burst & 0x00e0) >> 5; 702 if (alcdebug) { 703 printf("%s: Read request size : %u bytes.\n", 704 sc->sc_dev.dv_xname, 705 alc_dma_burst[sc->alc_dma_rd_burst]); 706 printf("%s: TLP payload size : %u bytes.\n", 707 sc->sc_dev.dv_xname, 708 alc_dma_burst[sc->alc_dma_wr_burst]); 709 } 710 if (alc_dma_burst[sc->alc_dma_rd_burst] > 1024) 711 sc->alc_dma_rd_burst = 3; 712 if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024) 713 sc->alc_dma_wr_burst = 3; 714 /* Clear data link and flow-control protocol error. */ 715 val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV); 716 val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP); 717 CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val); 718 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG, 719 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB); 720 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, 721 CSR_READ_4(sc, ALC_PCIE_PHYMISC) | 722 PCIE_PHYMISC_FORCE_RCV_DET); 723 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1 && 724 sc->alc_rev == ATHEROS_AR8152_B_V10) { 725 val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2); 726 val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK | 727 PCIE_PHYMISC2_SERDES_TH_MASK); 728 val |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT; 729 val |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT; 730 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val); 731 } 732 /* Disable ASPM L0S and L1. */ 733 cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 734 base + PCI_PCIE_LCAP) >> 16; 735 if ((cap & 0x00000c00) != 0) { 736 ctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 737 base + PCI_PCIE_LCSR) >> 16; 738 if ((ctl & 0x08) != 0) 739 sc->alc_rcb = DMA_CFG_RCB_128; 740 if (alcdebug) 741 printf("%s: RCB %u bytes\n", 742 sc->sc_dev.dv_xname, 743 sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128); 744 state = ctl & 0x03; 745 if (state & 0x01) 746 sc->alc_flags |= ALC_FLAG_L0S; 747 if (state & 0x02) 748 sc->alc_flags |= ALC_FLAG_L1S; 749 if (alcdebug) 750 printf("%s: ASPM %s %s\n", 751 sc->sc_dev.dv_xname, 752 aspm_state[state], 753 state == 0 ? "disabled" : "enabled"); 754 alc_disable_l0s_l1(sc); 755 } 756 } 757 758 /* Reset PHY. */ 759 alc_phy_reset(sc); 760 761 /* Reset the ethernet controller. */ 762 alc_reset(sc); 763 764 /* 765 * One odd thing is AR8132 uses the same PHY hardware(F1 766 * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports 767 * the PHY supports 1000Mbps but that's not true. The PHY 768 * used in AR8132 can't establish gigabit link even if it 769 * shows the same PHY model/revision number of AR8131. 770 */ 771 switch (sc->sc_product) { 772 case PCI_PRODUCT_ATTANSIC_L2C_1: 773 case PCI_PRODUCT_ATTANSIC_L2C_2: 774 sc->alc_flags |= ALC_FLAG_APS; 775 /* FALLTHROUGH */ 776 case PCI_PRODUCT_ATTANSIC_L2C: 777 sc->alc_flags |= ALC_FLAG_FASTETHER; 778 break; 779 case PCI_PRODUCT_ATTANSIC_L1D: 780 case PCI_PRODUCT_ATTANSIC_L1D_1: 781 sc->alc_flags |= ALC_FLAG_APS; 782 /* FALLTHROUGH */ 783 default: 784 break; 785 } 786 sc->alc_flags |= ALC_FLAG_ASPM_MON | ALC_FLAG_JUMBO; 787 788 switch (sc->sc_product) { 789 case PCI_PRODUCT_ATTANSIC_L1C: 790 case PCI_PRODUCT_ATTANSIC_L2C: 791 sc->alc_max_framelen = 9 * 1024; 792 break; 793 case PCI_PRODUCT_ATTANSIC_L1D: 794 case PCI_PRODUCT_ATTANSIC_L1D_1: 795 case PCI_PRODUCT_ATTANSIC_L2C_1: 796 case PCI_PRODUCT_ATTANSIC_L2C_2: 797 sc->alc_max_framelen = 6 * 1024; 798 break; 799 } 800 801 /* 802 * It seems that AR813x/AR815x has silicon bug for SMB. In 803 * addition, Atheros said that enabling SMB wouldn't improve 804 * performance. However I think it's bad to access lots of 805 * registers to extract MAC statistics. 806 */ 807 sc->alc_flags |= ALC_FLAG_SMB_BUG; 808 /* 809 * Don't use Tx CMB. It is known to have silicon bug. 810 */ 811 sc->alc_flags |= ALC_FLAG_CMB_BUG; 812 813 sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >> 814 MASTER_CHIP_REV_SHIFT; 815 if (alcdebug) { 816 printf("%s: PCI device revision : 0x%04x\n", 817 sc->sc_dev.dv_xname, sc->alc_rev); 818 printf("%s: Chip id/revision : 0x%04x\n", 819 sc->sc_dev.dv_xname, sc->alc_chip_rev); 820 printf("%s: %u Tx FIFO, %u Rx FIFO\n", sc->sc_dev.dv_xname, 821 CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8, 822 CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8); 823 } 824 825 error = alc_dma_alloc(sc); 826 if (error) 827 goto fail; 828 829 /* Load station address. */ 830 alc_get_macaddr(sc); 831 832 ifp = &sc->sc_arpcom.ac_if; 833 ifp->if_softc = sc; 834 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 835 ifp->if_ioctl = alc_ioctl; 836 ifp->if_start = alc_start; 837 ifp->if_watchdog = alc_watchdog; 838 IFQ_SET_MAXLEN(&ifp->if_snd, ALC_TX_RING_CNT - 1); 839 bcopy(sc->alc_eaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 840 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 841 842 ifp->if_capabilities = IFCAP_VLAN_MTU; 843 844 #ifdef ALC_CHECKSUM 845 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 846 IFCAP_CSUM_UDPv4; 847 #endif 848 849 #if NVLAN > 0 850 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 851 #endif 852 853 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 854 855 /* Set up MII bus. */ 856 sc->sc_miibus.mii_ifp = ifp; 857 sc->sc_miibus.mii_readreg = alc_miibus_readreg; 858 sc->sc_miibus.mii_writereg = alc_miibus_writereg; 859 sc->sc_miibus.mii_statchg = alc_miibus_statchg; 860 861 ifmedia_init(&sc->sc_miibus.mii_media, 0, alc_mediachange, 862 alc_mediastatus); 863 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY, 864 MII_OFFSET_ANY, MIIF_DOPAUSE); 865 866 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) { 867 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname); 868 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL, 869 0, NULL); 870 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL); 871 } else 872 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO); 873 874 if_attach(ifp); 875 ether_ifattach(ifp); 876 877 timeout_set(&sc->alc_tick_ch, alc_tick, sc); 878 879 return; 880 fail: 881 alc_dma_free(sc); 882 if (sc->sc_irq_handle != NULL) 883 pci_intr_disestablish(pc, sc->sc_irq_handle); 884 if (sc->sc_mem_size) 885 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 886 } 887 888 int 889 alc_detach(struct device *self, int flags) 890 { 891 struct alc_softc *sc = (struct alc_softc *)self; 892 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 893 int s; 894 895 s = splnet(); 896 alc_stop(sc); 897 splx(s); 898 899 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 900 901 /* Delete all remaining media. */ 902 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 903 904 ether_ifdetach(ifp); 905 if_detach(ifp); 906 alc_dma_free(sc); 907 908 alc_phy_down(sc); 909 if (sc->sc_irq_handle != NULL) { 910 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 911 sc->sc_irq_handle = NULL; 912 } 913 914 return (0); 915 } 916 917 int 918 alc_activate(struct device *self, int act) 919 { 920 struct alc_softc *sc = (struct alc_softc *)self; 921 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 922 int rv = 0; 923 924 switch (act) { 925 case DVACT_SUSPEND: 926 if (ifp->if_flags & IFF_RUNNING) 927 alc_stop(sc); 928 rv = config_activate_children(self, act); 929 break; 930 case DVACT_RESUME: 931 if (ifp->if_flags & IFF_UP) 932 alc_init(ifp); 933 break; 934 default: 935 rv = config_activate_children(self, act); 936 break; 937 } 938 return (rv); 939 } 940 941 int 942 alc_dma_alloc(struct alc_softc *sc) 943 { 944 struct alc_txdesc *txd; 945 struct alc_rxdesc *rxd; 946 int nsegs, error, i; 947 948 /* 949 * Create DMA stuffs for TX ring 950 */ 951 error = bus_dmamap_create(sc->sc_dmat, ALC_TX_RING_SZ, 1, 952 ALC_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_tx_ring_map); 953 if (error) 954 return (ENOBUFS); 955 956 /* Allocate DMA'able memory for TX ring */ 957 error = bus_dmamem_alloc(sc->sc_dmat, ALC_TX_RING_SZ, 958 ETHER_ALIGN, 0, &sc->alc_rdata.alc_tx_ring_seg, 1, 959 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 960 if (error) { 961 printf("%s: could not allocate DMA'able memory for Tx ring.\n", 962 sc->sc_dev.dv_xname); 963 return error; 964 } 965 966 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_tx_ring_seg, 967 nsegs, ALC_TX_RING_SZ, (caddr_t *)&sc->alc_rdata.alc_tx_ring, 968 BUS_DMA_NOWAIT); 969 if (error) 970 return (ENOBUFS); 971 972 /* Load the DMA map for Tx ring. */ 973 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 974 sc->alc_rdata.alc_tx_ring, ALC_TX_RING_SZ, NULL, BUS_DMA_WAITOK); 975 if (error) { 976 printf("%s: could not load DMA'able memory for Tx ring.\n", 977 sc->sc_dev.dv_xname); 978 bus_dmamem_free(sc->sc_dmat, 979 (bus_dma_segment_t *)&sc->alc_rdata.alc_tx_ring, 1); 980 return error; 981 } 982 983 sc->alc_rdata.alc_tx_ring_paddr = 984 sc->alc_cdata.alc_tx_ring_map->dm_segs[0].ds_addr; 985 986 /* 987 * Create DMA stuffs for RX ring 988 */ 989 error = bus_dmamap_create(sc->sc_dmat, ALC_RX_RING_SZ, 1, 990 ALC_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_ring_map); 991 if (error) 992 return (ENOBUFS); 993 994 /* Allocate DMA'able memory for RX ring */ 995 error = bus_dmamem_alloc(sc->sc_dmat, ALC_RX_RING_SZ, 996 ETHER_ALIGN, 0, &sc->alc_rdata.alc_rx_ring_seg, 1, 997 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 998 if (error) { 999 printf("%s: could not allocate DMA'able memory for Rx ring.\n", 1000 sc->sc_dev.dv_xname); 1001 return error; 1002 } 1003 1004 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rx_ring_seg, 1005 nsegs, ALC_RX_RING_SZ, (caddr_t *)&sc->alc_rdata.alc_rx_ring, 1006 BUS_DMA_NOWAIT); 1007 if (error) 1008 return (ENOBUFS); 1009 1010 /* Load the DMA map for Rx ring. */ 1011 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 1012 sc->alc_rdata.alc_rx_ring, ALC_RX_RING_SZ, NULL, BUS_DMA_WAITOK); 1013 if (error) { 1014 printf("%s: could not load DMA'able memory for Rx ring.\n", 1015 sc->sc_dev.dv_xname); 1016 bus_dmamem_free(sc->sc_dmat, 1017 (bus_dma_segment_t *)sc->alc_rdata.alc_rx_ring, 1); 1018 return error; 1019 } 1020 1021 sc->alc_rdata.alc_rx_ring_paddr = 1022 sc->alc_cdata.alc_rx_ring_map->dm_segs[0].ds_addr; 1023 1024 /* 1025 * Create DMA stuffs for RX return ring 1026 */ 1027 error = bus_dmamap_create(sc->sc_dmat, ALC_RR_RING_SZ, 1, 1028 ALC_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rr_ring_map); 1029 if (error) 1030 return (ENOBUFS); 1031 1032 /* Allocate DMA'able memory for RX return ring */ 1033 error = bus_dmamem_alloc(sc->sc_dmat, ALC_RR_RING_SZ, 1034 ETHER_ALIGN, 0, &sc->alc_rdata.alc_rr_ring_seg, 1, 1035 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1036 if (error) { 1037 printf("%s: could not allocate DMA'able memory for Rx " 1038 "return ring.\n", sc->sc_dev.dv_xname); 1039 return error; 1040 } 1041 1042 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rr_ring_seg, 1043 nsegs, ALC_RR_RING_SZ, (caddr_t *)&sc->alc_rdata.alc_rr_ring, 1044 BUS_DMA_NOWAIT); 1045 if (error) 1046 return (ENOBUFS); 1047 1048 /* Load the DMA map for Rx return ring. */ 1049 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 1050 sc->alc_rdata.alc_rr_ring, ALC_RR_RING_SZ, NULL, BUS_DMA_WAITOK); 1051 if (error) { 1052 printf("%s: could not load DMA'able memory for Rx return ring." 1053 "\n", sc->sc_dev.dv_xname); 1054 bus_dmamem_free(sc->sc_dmat, 1055 (bus_dma_segment_t *)&sc->alc_rdata.alc_rr_ring, 1); 1056 return error; 1057 } 1058 1059 sc->alc_rdata.alc_rr_ring_paddr = 1060 sc->alc_cdata.alc_rr_ring_map->dm_segs[0].ds_addr; 1061 1062 /* 1063 * Create DMA stuffs for CMB block 1064 */ 1065 error = bus_dmamap_create(sc->sc_dmat, ALC_CMB_SZ, 1, 1066 ALC_CMB_SZ, 0, BUS_DMA_NOWAIT, 1067 &sc->alc_cdata.alc_cmb_map); 1068 if (error) 1069 return (ENOBUFS); 1070 1071 /* Allocate DMA'able memory for CMB block */ 1072 error = bus_dmamem_alloc(sc->sc_dmat, ALC_CMB_SZ, 1073 ETHER_ALIGN, 0, &sc->alc_rdata.alc_cmb_seg, 1, 1074 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1075 if (error) { 1076 printf("%s: could not allocate DMA'able memory for " 1077 "CMB block\n", sc->sc_dev.dv_xname); 1078 return error; 1079 } 1080 1081 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_cmb_seg, 1082 nsegs, ALC_CMB_SZ, (caddr_t *)&sc->alc_rdata.alc_cmb, 1083 BUS_DMA_NOWAIT); 1084 if (error) 1085 return (ENOBUFS); 1086 1087 /* Load the DMA map for CMB block. */ 1088 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 1089 sc->alc_rdata.alc_cmb, ALC_CMB_SZ, NULL, 1090 BUS_DMA_WAITOK); 1091 if (error) { 1092 printf("%s: could not load DMA'able memory for CMB block\n", 1093 sc->sc_dev.dv_xname); 1094 bus_dmamem_free(sc->sc_dmat, 1095 (bus_dma_segment_t *)&sc->alc_rdata.alc_cmb, 1); 1096 return error; 1097 } 1098 1099 sc->alc_rdata.alc_cmb_paddr = 1100 sc->alc_cdata.alc_cmb_map->dm_segs[0].ds_addr; 1101 1102 /* 1103 * Create DMA stuffs for SMB block 1104 */ 1105 error = bus_dmamap_create(sc->sc_dmat, ALC_SMB_SZ, 1, 1106 ALC_SMB_SZ, 0, BUS_DMA_NOWAIT, 1107 &sc->alc_cdata.alc_smb_map); 1108 if (error) 1109 return (ENOBUFS); 1110 1111 /* Allocate DMA'able memory for SMB block */ 1112 error = bus_dmamem_alloc(sc->sc_dmat, ALC_SMB_SZ, 1113 ETHER_ALIGN, 0, &sc->alc_rdata.alc_smb_seg, 1, 1114 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 1115 if (error) { 1116 printf("%s: could not allocate DMA'able memory for " 1117 "SMB block\n", sc->sc_dev.dv_xname); 1118 return error; 1119 } 1120 1121 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_smb_seg, 1122 nsegs, ALC_SMB_SZ, (caddr_t *)&sc->alc_rdata.alc_smb, 1123 BUS_DMA_NOWAIT); 1124 if (error) 1125 return (ENOBUFS); 1126 1127 /* Load the DMA map for SMB block */ 1128 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 1129 sc->alc_rdata.alc_smb, ALC_SMB_SZ, NULL, 1130 BUS_DMA_WAITOK); 1131 if (error) { 1132 printf("%s: could not load DMA'able memory for SMB block\n", 1133 sc->sc_dev.dv_xname); 1134 bus_dmamem_free(sc->sc_dmat, 1135 (bus_dma_segment_t *)&sc->alc_rdata.alc_smb, 1); 1136 return error; 1137 } 1138 1139 sc->alc_rdata.alc_smb_paddr = 1140 sc->alc_cdata.alc_smb_map->dm_segs[0].ds_addr; 1141 1142 1143 /* Create DMA maps for Tx buffers. */ 1144 for (i = 0; i < ALC_TX_RING_CNT; i++) { 1145 txd = &sc->alc_cdata.alc_txdesc[i]; 1146 txd->tx_m = NULL; 1147 txd->tx_dmamap = NULL; 1148 error = bus_dmamap_create(sc->sc_dmat, ALC_TSO_MAXSIZE, 1149 ALC_MAXTXSEGS, ALC_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT, 1150 &txd->tx_dmamap); 1151 if (error) { 1152 printf("%s: could not create Tx dmamap.\n", 1153 sc->sc_dev.dv_xname); 1154 return error; 1155 } 1156 } 1157 1158 /* Create DMA maps for Rx buffers. */ 1159 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 1160 BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_sparemap); 1161 if (error) { 1162 printf("%s: could not create spare Rx dmamap.\n", 1163 sc->sc_dev.dv_xname); 1164 return error; 1165 } 1166 1167 for (i = 0; i < ALC_RX_RING_CNT; i++) { 1168 rxd = &sc->alc_cdata.alc_rxdesc[i]; 1169 rxd->rx_m = NULL; 1170 rxd->rx_dmamap = NULL; 1171 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1172 MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap); 1173 if (error) { 1174 printf("%s: could not create Rx dmamap.\n", 1175 sc->sc_dev.dv_xname); 1176 return error; 1177 } 1178 } 1179 1180 return (0); 1181 } 1182 1183 1184 void 1185 alc_dma_free(struct alc_softc *sc) 1186 { 1187 struct alc_txdesc *txd; 1188 struct alc_rxdesc *rxd; 1189 int i; 1190 1191 /* Tx buffers */ 1192 for (i = 0; i < ALC_TX_RING_CNT; i++) { 1193 txd = &sc->alc_cdata.alc_txdesc[i]; 1194 if (txd->tx_dmamap != NULL) { 1195 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); 1196 txd->tx_dmamap = NULL; 1197 } 1198 } 1199 /* Rx buffers */ 1200 for (i = 0; i < ALC_RX_RING_CNT; i++) { 1201 rxd = &sc->alc_cdata.alc_rxdesc[i]; 1202 if (rxd->rx_dmamap != NULL) { 1203 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); 1204 rxd->rx_dmamap = NULL; 1205 } 1206 } 1207 if (sc->alc_cdata.alc_rx_sparemap != NULL) { 1208 bus_dmamap_destroy(sc->sc_dmat, sc->alc_cdata.alc_rx_sparemap); 1209 sc->alc_cdata.alc_rx_sparemap = NULL; 1210 } 1211 1212 /* Tx ring. */ 1213 if (sc->alc_cdata.alc_tx_ring_map != NULL) 1214 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map); 1215 if (sc->alc_cdata.alc_tx_ring_map != NULL && 1216 sc->alc_rdata.alc_tx_ring != NULL) 1217 bus_dmamem_free(sc->sc_dmat, 1218 (bus_dma_segment_t *)sc->alc_rdata.alc_tx_ring, 1); 1219 sc->alc_rdata.alc_tx_ring = NULL; 1220 sc->alc_cdata.alc_tx_ring_map = NULL; 1221 1222 /* Rx ring. */ 1223 if (sc->alc_cdata.alc_rx_ring_map != NULL) 1224 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map); 1225 if (sc->alc_cdata.alc_rx_ring_map != NULL && 1226 sc->alc_rdata.alc_rx_ring != NULL) 1227 bus_dmamem_free(sc->sc_dmat, 1228 (bus_dma_segment_t *)sc->alc_rdata.alc_rx_ring, 1); 1229 sc->alc_rdata.alc_rx_ring = NULL; 1230 sc->alc_cdata.alc_rx_ring_map = NULL; 1231 1232 /* Rx return ring. */ 1233 if (sc->alc_cdata.alc_rr_ring_map != NULL) 1234 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map); 1235 if (sc->alc_cdata.alc_rr_ring_map != NULL && 1236 sc->alc_rdata.alc_rr_ring != NULL) 1237 bus_dmamem_free(sc->sc_dmat, 1238 (bus_dma_segment_t *)sc->alc_rdata.alc_rr_ring, 1); 1239 sc->alc_rdata.alc_rr_ring = NULL; 1240 sc->alc_cdata.alc_rr_ring_map = NULL; 1241 1242 /* CMB block */ 1243 if (sc->alc_cdata.alc_cmb_map != NULL) 1244 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_cmb_map); 1245 if (sc->alc_cdata.alc_cmb_map != NULL && 1246 sc->alc_rdata.alc_cmb != NULL) 1247 bus_dmamem_free(sc->sc_dmat, 1248 (bus_dma_segment_t *)sc->alc_rdata.alc_cmb, 1); 1249 sc->alc_rdata.alc_cmb = NULL; 1250 sc->alc_cdata.alc_cmb_map = NULL; 1251 1252 /* SMB block */ 1253 if (sc->alc_cdata.alc_smb_map != NULL) 1254 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_smb_map); 1255 if (sc->alc_cdata.alc_smb_map != NULL && 1256 sc->alc_rdata.alc_smb != NULL) 1257 bus_dmamem_free(sc->sc_dmat, 1258 (bus_dma_segment_t *)sc->alc_rdata.alc_smb, 1); 1259 sc->alc_rdata.alc_smb = NULL; 1260 sc->alc_cdata.alc_smb_map = NULL; 1261 } 1262 1263 int 1264 alc_encap(struct alc_softc *sc, struct mbuf *m) 1265 { 1266 struct alc_txdesc *txd, *txd_last; 1267 struct tx_desc *desc; 1268 bus_dmamap_t map; 1269 uint32_t cflags, poff, vtag; 1270 int error, idx, prod; 1271 1272 cflags = vtag = 0; 1273 poff = 0; 1274 1275 prod = sc->alc_cdata.alc_tx_prod; 1276 txd = &sc->alc_cdata.alc_txdesc[prod]; 1277 txd_last = txd; 1278 map = txd->tx_dmamap; 1279 1280 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT); 1281 if (error != 0 && error != EFBIG) 1282 goto drop; 1283 if (error != 0) { 1284 if (m_defrag(m, M_DONTWAIT)) { 1285 error = ENOBUFS; 1286 goto drop; 1287 } 1288 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1289 BUS_DMA_NOWAIT); 1290 if (error != 0) 1291 goto drop; 1292 } 1293 1294 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1295 BUS_DMASYNC_PREWRITE); 1296 1297 desc = NULL; 1298 idx = 0; 1299 #if NVLAN > 0 1300 /* Configure VLAN hardware tag insertion. */ 1301 if (m->m_flags & M_VLANTAG) { 1302 vtag = htons(m->m_pkthdr.ether_vtag); 1303 vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK; 1304 cflags |= TD_INS_VLAN_TAG; 1305 } 1306 #endif 1307 /* Configure Tx checksum offload. */ 1308 if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) { 1309 cflags |= TD_CUSTOM_CSUM; 1310 /* Set checksum start offset. */ 1311 cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) & 1312 TD_PLOAD_OFFSET_MASK; 1313 } 1314 1315 for (; idx < map->dm_nsegs; idx++) { 1316 desc = &sc->alc_rdata.alc_tx_ring[prod]; 1317 desc->len = 1318 htole32(TX_BYTES(map->dm_segs[idx].ds_len) | vtag); 1319 desc->flags = htole32(cflags); 1320 desc->addr = htole64(map->dm_segs[idx].ds_addr); 1321 sc->alc_cdata.alc_tx_cnt++; 1322 ALC_DESC_INC(prod, ALC_TX_RING_CNT); 1323 } 1324 1325 /* Update producer index. */ 1326 sc->alc_cdata.alc_tx_prod = prod; 1327 1328 /* Finally set EOP on the last descriptor. */ 1329 prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT; 1330 desc = &sc->alc_rdata.alc_tx_ring[prod]; 1331 desc->flags |= htole32(TD_EOP); 1332 1333 /* Swap dmamap of the first and the last. */ 1334 txd = &sc->alc_cdata.alc_txdesc[prod]; 1335 map = txd_last->tx_dmamap; 1336 txd_last->tx_dmamap = txd->tx_dmamap; 1337 txd->tx_dmamap = map; 1338 txd->tx_m = m; 1339 1340 return (0); 1341 1342 drop: 1343 m_freem(m); 1344 return (error); 1345 } 1346 1347 void 1348 alc_start(struct ifnet *ifp) 1349 { 1350 struct alc_softc *sc = ifp->if_softc; 1351 struct mbuf *m; 1352 int enq = 0; 1353 1354 /* Reclaim transmitted frames. */ 1355 if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT) 1356 alc_txeof(sc); 1357 1358 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) 1359 return; 1360 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) 1361 return; 1362 if (IFQ_IS_EMPTY(&ifp->if_snd)) 1363 return; 1364 1365 for (;;) { 1366 if (sc->alc_cdata.alc_tx_cnt + ALC_MAXTXSEGS >= 1367 ALC_TX_RING_CNT - 3) { 1368 ifq_set_oactive(&ifp->if_snd); 1369 break; 1370 } 1371 1372 IFQ_DEQUEUE(&ifp->if_snd, m); 1373 if (m == NULL) 1374 break; 1375 1376 if (alc_encap(sc, m) != 0) { 1377 ifp->if_oerrors++; 1378 continue; 1379 } 1380 enq++; 1381 1382 #if NBPFILTER > 0 1383 /* 1384 * If there's a BPF listener, bounce a copy of this frame 1385 * to him. 1386 */ 1387 if (ifp->if_bpf != NULL) 1388 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1389 #endif 1390 } 1391 1392 if (enq > 0) { 1393 /* Sync descriptors. */ 1394 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0, 1395 sc->alc_cdata.alc_tx_ring_map->dm_mapsize, 1396 BUS_DMASYNC_PREWRITE); 1397 /* Kick. Assume we're using normal Tx priority queue. */ 1398 CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX, 1399 (sc->alc_cdata.alc_tx_prod << 1400 MBOX_TD_PROD_LO_IDX_SHIFT) & 1401 MBOX_TD_PROD_LO_IDX_MASK); 1402 /* Set a timeout in case the chip goes out to lunch. */ 1403 ifp->if_timer = ALC_TX_TIMEOUT; 1404 } 1405 } 1406 1407 void 1408 alc_watchdog(struct ifnet *ifp) 1409 { 1410 struct alc_softc *sc = ifp->if_softc; 1411 1412 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) { 1413 printf("%s: watchdog timeout (missed link)\n", 1414 sc->sc_dev.dv_xname); 1415 ifp->if_oerrors++; 1416 alc_init(ifp); 1417 return; 1418 } 1419 1420 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1421 ifp->if_oerrors++; 1422 alc_init(ifp); 1423 alc_start(ifp); 1424 } 1425 1426 int 1427 alc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1428 { 1429 struct alc_softc *sc = ifp->if_softc; 1430 struct mii_data *mii = &sc->sc_miibus; 1431 struct ifreq *ifr = (struct ifreq *)data; 1432 int s, error = 0; 1433 1434 s = splnet(); 1435 1436 switch (cmd) { 1437 case SIOCSIFADDR: 1438 ifp->if_flags |= IFF_UP; 1439 if (!(ifp->if_flags & IFF_RUNNING)) 1440 alc_init(ifp); 1441 break; 1442 1443 case SIOCSIFFLAGS: 1444 if (ifp->if_flags & IFF_UP) { 1445 if (ifp->if_flags & IFF_RUNNING) 1446 error = ENETRESET; 1447 else 1448 alc_init(ifp); 1449 } else { 1450 if (ifp->if_flags & IFF_RUNNING) 1451 alc_stop(sc); 1452 } 1453 break; 1454 1455 case SIOCSIFMEDIA: 1456 case SIOCGIFMEDIA: 1457 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1458 break; 1459 1460 default: 1461 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 1462 break; 1463 } 1464 1465 if (error == ENETRESET) { 1466 if (ifp->if_flags & IFF_RUNNING) 1467 alc_iff(sc); 1468 error = 0; 1469 } 1470 1471 splx(s); 1472 return (error); 1473 } 1474 1475 void 1476 alc_mac_config(struct alc_softc *sc) 1477 { 1478 struct mii_data *mii; 1479 uint32_t reg; 1480 1481 mii = &sc->sc_miibus; 1482 reg = CSR_READ_4(sc, ALC_MAC_CFG); 1483 reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC | 1484 MAC_CFG_SPEED_MASK); 1485 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D || 1486 sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 || 1487 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2) 1488 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW; 1489 /* Reprogram MAC with resolved speed/duplex. */ 1490 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1491 case IFM_10_T: 1492 case IFM_100_TX: 1493 reg |= MAC_CFG_SPEED_10_100; 1494 break; 1495 case IFM_1000_T: 1496 reg |= MAC_CFG_SPEED_1000; 1497 break; 1498 } 1499 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1500 reg |= MAC_CFG_FULL_DUPLEX; 1501 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1502 reg |= MAC_CFG_TX_FC; 1503 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1504 reg |= MAC_CFG_RX_FC; 1505 } 1506 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 1507 } 1508 1509 void 1510 alc_stats_clear(struct alc_softc *sc) 1511 { 1512 struct smb sb, *smb; 1513 uint32_t *reg; 1514 int i; 1515 1516 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 1517 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0, 1518 sc->alc_cdata.alc_smb_map->dm_mapsize, 1519 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1520 smb = sc->alc_rdata.alc_smb; 1521 /* Update done, clear. */ 1522 smb->updated = 0; 1523 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0, 1524 sc->alc_cdata.alc_smb_map->dm_mapsize, 1525 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1526 } else { 1527 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; 1528 reg++) { 1529 CSR_READ_4(sc, ALC_RX_MIB_BASE + i); 1530 i += sizeof(uint32_t); 1531 } 1532 /* Read Tx statistics. */ 1533 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; 1534 reg++) { 1535 CSR_READ_4(sc, ALC_TX_MIB_BASE + i); 1536 i += sizeof(uint32_t); 1537 } 1538 } 1539 } 1540 1541 void 1542 alc_stats_update(struct alc_softc *sc) 1543 { 1544 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1545 struct alc_hw_stats *stat; 1546 struct smb sb, *smb; 1547 uint32_t *reg; 1548 int i; 1549 1550 stat = &sc->alc_stats; 1551 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 1552 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0, 1553 sc->alc_cdata.alc_smb_map->dm_mapsize, 1554 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1555 smb = sc->alc_rdata.alc_smb; 1556 if (smb->updated == 0) 1557 return; 1558 } else { 1559 smb = &sb; 1560 /* Read Rx statistics. */ 1561 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; 1562 reg++) { 1563 *reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i); 1564 i += sizeof(uint32_t); 1565 } 1566 /* Read Tx statistics. */ 1567 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; 1568 reg++) { 1569 *reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i); 1570 i += sizeof(uint32_t); 1571 } 1572 } 1573 1574 /* Rx stats. */ 1575 stat->rx_frames += smb->rx_frames; 1576 stat->rx_bcast_frames += smb->rx_bcast_frames; 1577 stat->rx_mcast_frames += smb->rx_mcast_frames; 1578 stat->rx_pause_frames += smb->rx_pause_frames; 1579 stat->rx_control_frames += smb->rx_control_frames; 1580 stat->rx_crcerrs += smb->rx_crcerrs; 1581 stat->rx_lenerrs += smb->rx_lenerrs; 1582 stat->rx_bytes += smb->rx_bytes; 1583 stat->rx_runts += smb->rx_runts; 1584 stat->rx_fragments += smb->rx_fragments; 1585 stat->rx_pkts_64 += smb->rx_pkts_64; 1586 stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 1587 stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 1588 stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 1589 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 1590 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 1591 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 1592 stat->rx_pkts_truncated += smb->rx_pkts_truncated; 1593 stat->rx_fifo_oflows += smb->rx_fifo_oflows; 1594 stat->rx_rrs_errs += smb->rx_rrs_errs; 1595 stat->rx_alignerrs += smb->rx_alignerrs; 1596 stat->rx_bcast_bytes += smb->rx_bcast_bytes; 1597 stat->rx_mcast_bytes += smb->rx_mcast_bytes; 1598 stat->rx_pkts_filtered += smb->rx_pkts_filtered; 1599 1600 /* Tx stats. */ 1601 stat->tx_frames += smb->tx_frames; 1602 stat->tx_bcast_frames += smb->tx_bcast_frames; 1603 stat->tx_mcast_frames += smb->tx_mcast_frames; 1604 stat->tx_pause_frames += smb->tx_pause_frames; 1605 stat->tx_excess_defer += smb->tx_excess_defer; 1606 stat->tx_control_frames += smb->tx_control_frames; 1607 stat->tx_deferred += smb->tx_deferred; 1608 stat->tx_bytes += smb->tx_bytes; 1609 stat->tx_pkts_64 += smb->tx_pkts_64; 1610 stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 1611 stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 1612 stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 1613 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 1614 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 1615 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 1616 stat->tx_single_colls += smb->tx_single_colls; 1617 stat->tx_multi_colls += smb->tx_multi_colls; 1618 stat->tx_late_colls += smb->tx_late_colls; 1619 stat->tx_excess_colls += smb->tx_excess_colls; 1620 stat->tx_underrun += smb->tx_underrun; 1621 stat->tx_desc_underrun += smb->tx_desc_underrun; 1622 stat->tx_lenerrs += smb->tx_lenerrs; 1623 stat->tx_pkts_truncated += smb->tx_pkts_truncated; 1624 stat->tx_bcast_bytes += smb->tx_bcast_bytes; 1625 stat->tx_mcast_bytes += smb->tx_mcast_bytes; 1626 1627 ifp->if_collisions += smb->tx_single_colls + 1628 smb->tx_multi_colls * 2 + smb->tx_late_colls + 1629 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT; 1630 1631 ifp->if_oerrors += smb->tx_late_colls + smb->tx_excess_colls + 1632 smb->tx_underrun + smb->tx_pkts_truncated; 1633 1634 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs + 1635 smb->rx_runts + smb->rx_pkts_truncated + 1636 smb->rx_fifo_oflows + smb->rx_rrs_errs + 1637 smb->rx_alignerrs; 1638 1639 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 1640 /* Update done, clear. */ 1641 smb->updated = 0; 1642 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0, 1643 sc->alc_cdata.alc_smb_map->dm_mapsize, 1644 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1645 } 1646 } 1647 1648 int 1649 alc_intr(void *arg) 1650 { 1651 struct alc_softc *sc = arg; 1652 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1653 uint32_t status; 1654 int claimed = 0; 1655 1656 status = CSR_READ_4(sc, ALC_INTR_STATUS); 1657 if ((status & ALC_INTRS) == 0) 1658 return (0); 1659 1660 /* Disable interrupts. */ 1661 CSR_WRITE_4(sc, ALC_INTR_STATUS, INTR_DIS_INT); 1662 1663 status = CSR_READ_4(sc, ALC_INTR_STATUS); 1664 if ((status & ALC_INTRS) == 0) 1665 goto back; 1666 1667 /* Acknowledge and disable interrupts. */ 1668 CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT); 1669 1670 if (ifp->if_flags & IFF_RUNNING) { 1671 if (status & INTR_RX_PKT) 1672 alc_rxintr(sc); 1673 1674 if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST | 1675 INTR_TXQ_TO_RST)) { 1676 if (status & INTR_DMA_RD_TO_RST) 1677 printf("%s: DMA read error! -- resetting\n", 1678 sc->sc_dev.dv_xname); 1679 if (status & INTR_DMA_WR_TO_RST) 1680 printf("%s: DMA write error! -- resetting\n", 1681 sc->sc_dev.dv_xname); 1682 if (status & INTR_TXQ_TO_RST) 1683 printf("%s: TxQ reset! -- resetting\n", 1684 sc->sc_dev.dv_xname); 1685 alc_init(ifp); 1686 return (0); 1687 } 1688 1689 if (status & INTR_TX_PKT) 1690 alc_txeof(sc); 1691 1692 alc_start(ifp); 1693 } 1694 1695 claimed = 1; 1696 back: 1697 /* Re-enable interrupts. */ 1698 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF); 1699 return (claimed); 1700 } 1701 1702 void 1703 alc_txeof(struct alc_softc *sc) 1704 { 1705 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1706 struct alc_txdesc *txd; 1707 uint32_t cons, prod; 1708 int prog; 1709 1710 if (sc->alc_cdata.alc_tx_cnt == 0) 1711 return; 1712 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0, 1713 sc->alc_cdata.alc_tx_ring_map->dm_mapsize, 1714 BUS_DMASYNC_POSTWRITE); 1715 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) { 1716 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0, 1717 sc->alc_cdata.alc_cmb_map->dm_mapsize, 1718 BUS_DMASYNC_POSTREAD); 1719 prod = sc->alc_rdata.alc_cmb->cons; 1720 } else 1721 prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX); 1722 /* Assume we're using normal Tx priority queue. */ 1723 prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >> 1724 MBOX_TD_CONS_LO_IDX_SHIFT; 1725 cons = sc->alc_cdata.alc_tx_cons; 1726 /* 1727 * Go through our Tx list and free mbufs for those 1728 * frames which have been transmitted. 1729 */ 1730 for (prog = 0; cons != prod; prog++, 1731 ALC_DESC_INC(cons, ALC_TX_RING_CNT)) { 1732 if (sc->alc_cdata.alc_tx_cnt <= 0) 1733 break; 1734 prog++; 1735 ifq_clr_oactive(&ifp->if_snd); 1736 sc->alc_cdata.alc_tx_cnt--; 1737 txd = &sc->alc_cdata.alc_txdesc[cons]; 1738 if (txd->tx_m != NULL) { 1739 /* Reclaim transmitted mbufs. */ 1740 bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0, 1741 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1742 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1743 m_freem(txd->tx_m); 1744 txd->tx_m = NULL; 1745 } 1746 } 1747 1748 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) 1749 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0, 1750 sc->alc_cdata.alc_cmb_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1751 sc->alc_cdata.alc_tx_cons = cons; 1752 /* 1753 * Unarm watchdog timer only when there is no pending 1754 * frames in Tx queue. 1755 */ 1756 if (sc->alc_cdata.alc_tx_cnt == 0) 1757 ifp->if_timer = 0; 1758 } 1759 1760 int 1761 alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd) 1762 { 1763 struct mbuf *m; 1764 bus_dmamap_t map; 1765 int error; 1766 1767 MGETHDR(m, M_DONTWAIT, MT_DATA); 1768 if (m == NULL) 1769 return (ENOBUFS); 1770 MCLGET(m, M_DONTWAIT); 1771 if (!(m->m_flags & M_EXT)) { 1772 m_freem(m); 1773 return (ENOBUFS); 1774 } 1775 1776 m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX; 1777 1778 error = bus_dmamap_load_mbuf(sc->sc_dmat, 1779 sc->alc_cdata.alc_rx_sparemap, m, BUS_DMA_NOWAIT); 1780 1781 if (error != 0) { 1782 m_freem(m); 1783 printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname); 1784 return (error); 1785 } 1786 1787 if (rxd->rx_m != NULL) { 1788 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 1789 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1790 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 1791 } 1792 map = rxd->rx_dmamap; 1793 rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap; 1794 sc->alc_cdata.alc_rx_sparemap = map; 1795 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, rxd->rx_dmamap->dm_mapsize, 1796 BUS_DMASYNC_PREREAD); 1797 rxd->rx_m = m; 1798 rxd->rx_desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr); 1799 return (0); 1800 } 1801 1802 void 1803 alc_rxintr(struct alc_softc *sc) 1804 { 1805 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1806 struct rx_rdesc *rrd; 1807 uint32_t nsegs, status; 1808 int rr_cons, prog; 1809 1810 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0, 1811 sc->alc_cdata.alc_rr_ring_map->dm_mapsize, 1812 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1813 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0, 1814 sc->alc_cdata.alc_rx_ring_map->dm_mapsize, 1815 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1816 rr_cons = sc->alc_cdata.alc_rr_cons; 1817 for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0;) { 1818 rrd = &sc->alc_rdata.alc_rr_ring[rr_cons]; 1819 status = letoh32(rrd->status); 1820 if ((status & RRD_VALID) == 0) 1821 break; 1822 nsegs = RRD_RD_CNT(letoh32(rrd->rdinfo)); 1823 if (nsegs == 0) { 1824 /* This should not happen! */ 1825 if (alcdebug) 1826 printf("%s: unexpected segment count -- " 1827 "resetting\n", sc->sc_dev.dv_xname); 1828 break; 1829 } 1830 alc_rxeof(sc, rrd); 1831 /* Clear Rx return status. */ 1832 rrd->status = 0; 1833 ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT); 1834 sc->alc_cdata.alc_rx_cons += nsegs; 1835 sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT; 1836 prog += nsegs; 1837 } 1838 1839 if (prog > 0) { 1840 /* Update the consumer index. */ 1841 sc->alc_cdata.alc_rr_cons = rr_cons; 1842 /* Sync Rx return descriptors. */ 1843 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0, 1844 sc->alc_cdata.alc_rr_ring_map->dm_mapsize, 1845 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1846 /* 1847 * Sync updated Rx descriptors such that controller see 1848 * modified buffer addresses. 1849 */ 1850 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0, 1851 sc->alc_cdata.alc_rx_ring_map->dm_mapsize, 1852 BUS_DMASYNC_PREWRITE); 1853 /* 1854 * Let controller know availability of new Rx buffers. 1855 * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors 1856 * it may be possible to update ALC_MBOX_RD0_PROD_IDX 1857 * only when Rx buffer pre-fetching is required. In 1858 * addition we already set ALC_RX_RD_FREE_THRESH to 1859 * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However 1860 * it still seems that pre-fetching needs more 1861 * experimentation. 1862 */ 1863 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, 1864 sc->alc_cdata.alc_rx_cons); 1865 } 1866 } 1867 1868 /* Receive a frame. */ 1869 void 1870 alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd) 1871 { 1872 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1873 struct alc_rxdesc *rxd; 1874 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1875 struct mbuf *mp, *m; 1876 uint32_t rdinfo, status; 1877 int count, nsegs, rx_cons; 1878 1879 status = letoh32(rrd->status); 1880 rdinfo = letoh32(rrd->rdinfo); 1881 rx_cons = RRD_RD_IDX(rdinfo); 1882 nsegs = RRD_RD_CNT(rdinfo); 1883 1884 sc->alc_cdata.alc_rxlen = RRD_BYTES(status); 1885 if (status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) { 1886 /* 1887 * We want to pass the following frames to upper 1888 * layer regardless of error status of Rx return 1889 * ring. 1890 * 1891 * o IP/TCP/UDP checksum is bad. 1892 * o frame length and protocol specific length 1893 * does not match. 1894 * 1895 * Force network stack compute checksum for 1896 * errored frames. 1897 */ 1898 if ((status & (RRD_ERR_CRC | RRD_ERR_ALIGN | 1899 RRD_ERR_TRUNC | RRD_ERR_RUNT)) != 0) 1900 return; 1901 } 1902 1903 for (count = 0; count < nsegs; count++, 1904 ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) { 1905 rxd = &sc->alc_cdata.alc_rxdesc[rx_cons]; 1906 mp = rxd->rx_m; 1907 /* Add a new receive buffer to the ring. */ 1908 if (alc_newbuf(sc, rxd) != 0) { 1909 ifp->if_iqdrops++; 1910 /* Reuse Rx buffers. */ 1911 m_freem(sc->alc_cdata.alc_rxhead); 1912 break; 1913 } 1914 1915 /* 1916 * Assume we've received a full sized frame. 1917 * Actual size is fixed when we encounter the end of 1918 * multi-segmented frame. 1919 */ 1920 mp->m_len = sc->alc_buf_size; 1921 1922 /* Chain received mbufs. */ 1923 if (sc->alc_cdata.alc_rxhead == NULL) { 1924 sc->alc_cdata.alc_rxhead = mp; 1925 sc->alc_cdata.alc_rxtail = mp; 1926 } else { 1927 mp->m_flags &= ~M_PKTHDR; 1928 sc->alc_cdata.alc_rxprev_tail = 1929 sc->alc_cdata.alc_rxtail; 1930 sc->alc_cdata.alc_rxtail->m_next = mp; 1931 sc->alc_cdata.alc_rxtail = mp; 1932 } 1933 1934 if (count == nsegs - 1) { 1935 /* Last desc. for this frame. */ 1936 m = sc->alc_cdata.alc_rxhead; 1937 m->m_flags |= M_PKTHDR; 1938 /* 1939 * It seems that L1C/L2C controller has no way 1940 * to tell hardware to strip CRC bytes. 1941 */ 1942 m->m_pkthdr.len = 1943 sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN; 1944 if (nsegs > 1) { 1945 /* Set last mbuf size. */ 1946 mp->m_len = sc->alc_cdata.alc_rxlen - 1947 (nsegs - 1) * sc->alc_buf_size; 1948 /* Remove the CRC bytes in chained mbufs. */ 1949 if (mp->m_len <= ETHER_CRC_LEN) { 1950 sc->alc_cdata.alc_rxtail = 1951 sc->alc_cdata.alc_rxprev_tail; 1952 sc->alc_cdata.alc_rxtail->m_len -= 1953 (ETHER_CRC_LEN - mp->m_len); 1954 sc->alc_cdata.alc_rxtail->m_next = NULL; 1955 m_freem(mp); 1956 } else { 1957 mp->m_len -= ETHER_CRC_LEN; 1958 } 1959 } else 1960 m->m_len = m->m_pkthdr.len; 1961 /* 1962 * Due to hardware bugs, Rx checksum offloading 1963 * was intentionally disabled. 1964 */ 1965 #if NVLAN > 0 1966 if (status & RRD_VLAN_TAG) { 1967 u_int32_t vtag = RRD_VLAN(letoh32(rrd->vtag)); 1968 m->m_pkthdr.ether_vtag = ntohs(vtag); 1969 m->m_flags |= M_VLANTAG; 1970 } 1971 #endif 1972 1973 1974 ml_enqueue(&ml, m); 1975 } 1976 } 1977 if_input(ifp, &ml); 1978 1979 /* Reset mbuf chains. */ 1980 ALC_RXCHAIN_RESET(sc); 1981 } 1982 1983 void 1984 alc_tick(void *xsc) 1985 { 1986 struct alc_softc *sc = xsc; 1987 struct mii_data *mii = &sc->sc_miibus; 1988 int s; 1989 1990 s = splnet(); 1991 mii_tick(mii); 1992 alc_stats_update(sc); 1993 1994 timeout_add_sec(&sc->alc_tick_ch, 1); 1995 splx(s); 1996 } 1997 1998 void 1999 alc_reset(struct alc_softc *sc) 2000 { 2001 uint32_t reg; 2002 int i; 2003 2004 reg = CSR_READ_4(sc, ALC_MASTER_CFG) & 0xFFFF; 2005 reg |= MASTER_OOB_DIS_OFF | MASTER_RESET; 2006 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 2007 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 2008 DELAY(10); 2009 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0) 2010 break; 2011 } 2012 if (i == 0) 2013 printf("%s: master reset timeout!\n", sc->sc_dev.dv_xname); 2014 2015 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 2016 if ((reg = CSR_READ_4(sc, ALC_IDLE_STATUS)) == 0) 2017 break; 2018 DELAY(10); 2019 } 2020 2021 if (i == 0) 2022 printf("%s: reset timeout(0x%08x)!\n", sc->sc_dev.dv_xname, 2023 reg); 2024 } 2025 2026 int 2027 alc_init(struct ifnet *ifp) 2028 { 2029 struct alc_softc *sc = ifp->if_softc; 2030 struct mii_data *mii; 2031 uint8_t eaddr[ETHER_ADDR_LEN]; 2032 bus_addr_t paddr; 2033 uint32_t reg, rxf_hi, rxf_lo; 2034 int error; 2035 2036 /* 2037 * Cancel any pending I/O. 2038 */ 2039 alc_stop(sc); 2040 /* 2041 * Reset the chip to a known state. 2042 */ 2043 alc_reset(sc); 2044 2045 /* Initialize Rx descriptors. */ 2046 error = alc_init_rx_ring(sc); 2047 if (error != 0) { 2048 printf("%s: no memory for Rx buffers.\n", sc->sc_dev.dv_xname); 2049 alc_stop(sc); 2050 return (error); 2051 } 2052 alc_init_rr_ring(sc); 2053 alc_init_tx_ring(sc); 2054 alc_init_cmb(sc); 2055 alc_init_smb(sc); 2056 2057 /* Enable all clocks. */ 2058 CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, 0); 2059 2060 /* Reprogram the station address. */ 2061 bcopy(LLADDR(ifp->if_sadl), eaddr, ETHER_ADDR_LEN); 2062 CSR_WRITE_4(sc, ALC_PAR0, 2063 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 2064 CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]); 2065 /* 2066 * Clear WOL status and disable all WOL feature as WOL 2067 * would interfere Rx operation under normal environments. 2068 */ 2069 CSR_READ_4(sc, ALC_WOL_CFG); 2070 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 2071 /* Set Tx descriptor base addresses. */ 2072 paddr = sc->alc_rdata.alc_tx_ring_paddr; 2073 CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 2074 CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 2075 /* We don't use high priority ring. */ 2076 CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0); 2077 /* Set Tx descriptor counter. */ 2078 CSR_WRITE_4(sc, ALC_TD_RING_CNT, 2079 (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK); 2080 /* Set Rx descriptor base addresses. */ 2081 paddr = sc->alc_rdata.alc_rx_ring_paddr; 2082 CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 2083 CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 2084 /* We use one Rx ring. */ 2085 CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0); 2086 CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0); 2087 CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0); 2088 /* Set Rx descriptor counter. */ 2089 CSR_WRITE_4(sc, ALC_RD_RING_CNT, 2090 (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK); 2091 2092 /* 2093 * Let hardware split jumbo frames into alc_max_buf_sized chunks. 2094 * if it do not fit the buffer size. Rx return descriptor holds 2095 * a counter that indicates how many fragments were made by the 2096 * hardware. The buffer size should be multiple of 8 bytes. 2097 * Since hardware has limit on the size of buffer size, always 2098 * use the maximum value. 2099 * For strict-alignment architectures make sure to reduce buffer 2100 * size by 8 bytes to make room for alignment fixup. 2101 */ 2102 sc->alc_buf_size = RX_BUF_SIZE_MAX; 2103 CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size); 2104 2105 paddr = sc->alc_rdata.alc_rr_ring_paddr; 2106 /* Set Rx return descriptor base addresses. */ 2107 CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 2108 /* We use one Rx return ring. */ 2109 CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0); 2110 CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0); 2111 CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0); 2112 /* Set Rx return descriptor counter. */ 2113 CSR_WRITE_4(sc, ALC_RRD_RING_CNT, 2114 (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK); 2115 paddr = sc->alc_rdata.alc_cmb_paddr; 2116 CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr)); 2117 paddr = sc->alc_rdata.alc_smb_paddr; 2118 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 2119 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr)); 2120 2121 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1) { 2122 /* Reconfigure SRAM - Vendor magic. */ 2123 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_LEN, 0x000002A0); 2124 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_LEN, 0x00000100); 2125 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_ADDR, 0x029F0000); 2126 CSR_WRITE_4(sc, ALC_SRAM_RD0_ADDR, 0x02BF02A0); 2127 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_ADDR, 0x03BF02C0); 2128 CSR_WRITE_4(sc, ALC_SRAM_TD_ADDR, 0x03DF03C0); 2129 CSR_WRITE_4(sc, ALC_TXF_WATER_MARK, 0x00000000); 2130 CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 0x00000000); 2131 } 2132 2133 /* Tell hardware that we're ready to load DMA blocks. */ 2134 CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD); 2135 2136 /* Configure interrupt moderation timer. */ 2137 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT; 2138 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT; 2139 reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT; 2140 reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT; 2141 CSR_WRITE_4(sc, ALC_IM_TIMER, reg); 2142 /* 2143 * We don't want to automatic interrupt clear as task queue 2144 * for the interrupt should know interrupt status. 2145 */ 2146 reg = MASTER_SA_TIMER_ENB; 2147 if (ALC_USECS(sc->alc_int_rx_mod) != 0) 2148 reg |= MASTER_IM_RX_TIMER_ENB; 2149 if (ALC_USECS(sc->alc_int_tx_mod) != 0) 2150 reg |= MASTER_IM_TX_TIMER_ENB; 2151 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 2152 /* 2153 * Disable interrupt re-trigger timer. We don't want automatic 2154 * re-triggering of un-ACKed interrupts. 2155 */ 2156 CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0)); 2157 /* Configure CMB. */ 2158 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) { 2159 CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4); 2160 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000)); 2161 } else 2162 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0)); 2163 /* 2164 * Hardware can be configured to issue SMB interrupt based 2165 * on programmed interval. Since there is a callout that is 2166 * invoked for every hz in driver we use that instead of 2167 * relying on periodic SMB interrupt. 2168 */ 2169 CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0)); 2170 /* Clear MAC statistics. */ 2171 alc_stats_clear(sc); 2172 2173 /* 2174 * Always use maximum frame size that controller can support. 2175 * Otherwise received frames that has larger frame length 2176 * than alc(4) MTU would be silently dropped in hardware. This 2177 * would make path-MTU discovery hard as sender wouldn't get 2178 * any responses from receiver. alc(4) supports 2179 * multi-fragmented frames on Rx path so it has no issue on 2180 * assembling fragmented frames. Using maximum frame size also 2181 * removes the need to reinitialize hardware when interface 2182 * MTU configuration was changed. 2183 * 2184 * Be conservative in what you do, be liberal in what you 2185 * accept from others - RFC 793. 2186 */ 2187 CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_max_framelen); 2188 2189 /* Disable header split(?) */ 2190 CSR_WRITE_4(sc, ALC_HDS_CFG, 0); 2191 2192 /* Configure IPG/IFG parameters. */ 2193 CSR_WRITE_4(sc, ALC_IPG_IFG_CFG, 2194 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) | 2195 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) | 2196 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) | 2197 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK)); 2198 /* Set parameters for half-duplex media. */ 2199 CSR_WRITE_4(sc, ALC_HDPX_CFG, 2200 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 2201 HDPX_CFG_LCOL_MASK) | 2202 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 2203 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 2204 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 2205 HDPX_CFG_ABEBT_MASK) | 2206 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 2207 HDPX_CFG_JAMIPG_MASK)); 2208 /* 2209 * Set TSO/checksum offload threshold. For frames that is 2210 * larger than this threshold, hardware wouldn't do 2211 * TSO/checksum offloading. 2212 */ 2213 CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH, 2214 (sc->alc_max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) & 2215 TSO_OFFLOAD_THRESH_MASK); 2216 /* Configure TxQ. */ 2217 reg = (alc_dma_burst[sc->alc_dma_rd_burst] << 2218 TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK; 2219 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1 || 2220 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2) 2221 reg >>= 1; 2222 reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) & 2223 TXQ_CFG_TD_BURST_MASK; 2224 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE); 2225 2226 /* Configure Rx free descriptor pre-fetching. */ 2227 CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH, 2228 ((RX_RD_FREE_THRESH_HI_DEFAULT << RX_RD_FREE_THRESH_HI_SHIFT) & 2229 RX_RD_FREE_THRESH_HI_MASK) | 2230 ((RX_RD_FREE_THRESH_LO_DEFAULT << RX_RD_FREE_THRESH_LO_SHIFT) & 2231 RX_RD_FREE_THRESH_LO_MASK)); 2232 2233 /* 2234 * Configure flow control parameters. 2235 * XON : 80% of Rx FIFO 2236 * XOFF : 30% of Rx FIFO 2237 */ 2238 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1C || 2239 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C) { 2240 reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN); 2241 rxf_hi = (reg * 8) / 10; 2242 rxf_lo = (reg * 3) / 10; 2243 CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH, 2244 ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) & 2245 RX_FIFO_PAUSE_THRESH_LO_MASK) | 2246 ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) & 2247 RX_FIFO_PAUSE_THRESH_HI_MASK)); 2248 } 2249 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 || 2250 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1) 2251 CSR_WRITE_4(sc, ALC_SERDES_LOCK, 2252 CSR_READ_4(sc, ALC_SERDES_LOCK) | SERDES_MAC_CLK_SLOWDOWN | 2253 SERDES_PHY_CLK_SLOWDOWN); 2254 2255 /* Disable RSS until I understand L1C/L2C's RSS logic. */ 2256 CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0); 2257 CSR_WRITE_4(sc, ALC_RSS_CPU, 0); 2258 2259 /* Configure RxQ. */ 2260 reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & 2261 RXQ_CFG_RD_BURST_MASK; 2262 reg |= RXQ_CFG_RSS_MODE_DIS; 2263 if ((sc->alc_flags & ALC_FLAG_ASPM_MON) != 0) 2264 reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_1M; 2265 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 2266 2267 /* Configure DMA parameters. */ 2268 reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI; 2269 reg |= sc->alc_rcb; 2270 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) 2271 reg |= DMA_CFG_CMB_ENB; 2272 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) 2273 reg |= DMA_CFG_SMB_ENB; 2274 else 2275 reg |= DMA_CFG_SMB_DIS; 2276 reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) << 2277 DMA_CFG_RD_BURST_SHIFT; 2278 reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) << 2279 DMA_CFG_WR_BURST_SHIFT; 2280 reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) & 2281 DMA_CFG_RD_DELAY_CNT_MASK; 2282 reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) & 2283 DMA_CFG_WR_DELAY_CNT_MASK; 2284 CSR_WRITE_4(sc, ALC_DMA_CFG, reg); 2285 2286 /* 2287 * Configure Tx/Rx MACs. 2288 * - Auto-padding for short frames. 2289 * - Enable CRC generation. 2290 * Actual reconfiguration of MAC for resolved speed/duplex 2291 * is followed after detection of link establishment. 2292 * AR813x/AR815x always does checksum computation regardless 2293 * of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to 2294 * have bug in protocol field in Rx return structure so 2295 * these controllers can't handle fragmented frames. Disable 2296 * Rx checksum offloading until there is a newer controller 2297 * that has sane implementation. 2298 */ 2299 reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX | 2300 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 2301 MAC_CFG_PREAMBLE_MASK); 2302 if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D || 2303 sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 || 2304 sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2) 2305 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW; 2306 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0) 2307 reg |= MAC_CFG_SPEED_10_100; 2308 else 2309 reg |= MAC_CFG_SPEED_1000; 2310 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 2311 2312 /* Set up the receive filter. */ 2313 alc_iff(sc); 2314 2315 alc_rxvlan(sc); 2316 2317 /* Acknowledge all pending interrupts and clear it. */ 2318 CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS); 2319 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 2320 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0); 2321 2322 sc->alc_flags &= ~ALC_FLAG_LINK; 2323 /* Switch to the current media. */ 2324 mii = &sc->sc_miibus; 2325 mii_mediachg(mii); 2326 2327 timeout_add_sec(&sc->alc_tick_ch, 1); 2328 2329 ifp->if_flags |= IFF_RUNNING; 2330 ifq_clr_oactive(&ifp->if_snd); 2331 2332 return (0); 2333 } 2334 2335 void 2336 alc_stop(struct alc_softc *sc) 2337 { 2338 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2339 struct alc_txdesc *txd; 2340 struct alc_rxdesc *rxd; 2341 uint32_t reg; 2342 int i; 2343 2344 /* 2345 * Mark the interface down and cancel the watchdog timer. 2346 */ 2347 ifp->if_flags &= ~IFF_RUNNING; 2348 ifq_clr_oactive(&ifp->if_snd); 2349 ifp->if_timer = 0; 2350 2351 timeout_del(&sc->alc_tick_ch); 2352 sc->alc_flags &= ~ALC_FLAG_LINK; 2353 2354 alc_stats_update(sc); 2355 2356 /* Disable interrupts. */ 2357 CSR_WRITE_4(sc, ALC_INTR_MASK, 0); 2358 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 2359 alc_stop_queue(sc); 2360 2361 /* Disable DMA. */ 2362 reg = CSR_READ_4(sc, ALC_DMA_CFG); 2363 reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB); 2364 reg |= DMA_CFG_SMB_DIS; 2365 CSR_WRITE_4(sc, ALC_DMA_CFG, reg); 2366 DELAY(1000); 2367 2368 /* Stop Rx/Tx MACs. */ 2369 alc_stop_mac(sc); 2370 2371 /* Disable interrupts which might be touched in taskq handler. */ 2372 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 2373 2374 /* Reclaim Rx buffers that have been processed. */ 2375 m_freem(sc->alc_cdata.alc_rxhead); 2376 ALC_RXCHAIN_RESET(sc); 2377 /* 2378 * Free Tx/Rx mbufs still in the queues. 2379 */ 2380 for (i = 0; i < ALC_RX_RING_CNT; i++) { 2381 rxd = &sc->alc_cdata.alc_rxdesc[i]; 2382 if (rxd->rx_m != NULL) { 2383 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 2384 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2385 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 2386 m_freem(rxd->rx_m); 2387 rxd->rx_m = NULL; 2388 } 2389 } 2390 for (i = 0; i < ALC_TX_RING_CNT; i++) { 2391 txd = &sc->alc_cdata.alc_txdesc[i]; 2392 if (txd->tx_m != NULL) { 2393 bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0, 2394 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2395 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 2396 m_freem(txd->tx_m); 2397 txd->tx_m = NULL; 2398 } 2399 } 2400 } 2401 2402 void 2403 alc_stop_mac(struct alc_softc *sc) 2404 { 2405 uint32_t reg; 2406 int i; 2407 2408 /* Disable Rx/Tx MAC. */ 2409 reg = CSR_READ_4(sc, ALC_MAC_CFG); 2410 if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) { 2411 reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); 2412 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 2413 } 2414 for (i = ALC_TIMEOUT; i > 0; i--) { 2415 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 2416 if (reg == 0) 2417 break; 2418 DELAY(10); 2419 } 2420 if (i == 0) 2421 printf("%s: could not disable Rx/Tx MAC(0x%08x)!\n", 2422 sc->sc_dev.dv_xname, reg); 2423 } 2424 2425 void 2426 alc_start_queue(struct alc_softc *sc) 2427 { 2428 uint32_t qcfg[] = { 2429 0, 2430 RXQ_CFG_QUEUE0_ENB, 2431 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB, 2432 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB, 2433 RXQ_CFG_ENB 2434 }; 2435 uint32_t cfg; 2436 2437 /* Enable RxQ. */ 2438 cfg = CSR_READ_4(sc, ALC_RXQ_CFG); 2439 cfg &= ~RXQ_CFG_ENB; 2440 cfg |= qcfg[1]; 2441 CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg); 2442 /* Enable TxQ. */ 2443 cfg = CSR_READ_4(sc, ALC_TXQ_CFG); 2444 cfg |= TXQ_CFG_ENB; 2445 CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg); 2446 } 2447 2448 void 2449 alc_stop_queue(struct alc_softc *sc) 2450 { 2451 uint32_t reg; 2452 int i; 2453 2454 /* Disable RxQ. */ 2455 reg = CSR_READ_4(sc, ALC_RXQ_CFG); 2456 if ((reg & RXQ_CFG_ENB) != 0) { 2457 reg &= ~RXQ_CFG_ENB; 2458 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 2459 } 2460 /* Disable TxQ. */ 2461 reg = CSR_READ_4(sc, ALC_TXQ_CFG); 2462 if ((reg & TXQ_CFG_ENB) != 0) { 2463 reg &= ~TXQ_CFG_ENB; 2464 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg); 2465 } 2466 for (i = ALC_TIMEOUT; i > 0; i--) { 2467 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 2468 if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0) 2469 break; 2470 DELAY(10); 2471 } 2472 if (i == 0) 2473 printf("%s: could not disable RxQ/TxQ (0x%08x)!\n", 2474 sc->sc_dev.dv_xname, reg); 2475 } 2476 2477 void 2478 alc_init_tx_ring(struct alc_softc *sc) 2479 { 2480 struct alc_ring_data *rd; 2481 struct alc_txdesc *txd; 2482 int i; 2483 2484 sc->alc_cdata.alc_tx_prod = 0; 2485 sc->alc_cdata.alc_tx_cons = 0; 2486 sc->alc_cdata.alc_tx_cnt = 0; 2487 2488 rd = &sc->alc_rdata; 2489 bzero(rd->alc_tx_ring, ALC_TX_RING_SZ); 2490 for (i = 0; i < ALC_TX_RING_CNT; i++) { 2491 txd = &sc->alc_cdata.alc_txdesc[i]; 2492 txd->tx_m = NULL; 2493 } 2494 2495 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0, 2496 sc->alc_cdata.alc_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2497 } 2498 2499 int 2500 alc_init_rx_ring(struct alc_softc *sc) 2501 { 2502 struct alc_ring_data *rd; 2503 struct alc_rxdesc *rxd; 2504 int i; 2505 2506 sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1; 2507 rd = &sc->alc_rdata; 2508 bzero(rd->alc_rx_ring, ALC_RX_RING_SZ); 2509 for (i = 0; i < ALC_RX_RING_CNT; i++) { 2510 rxd = &sc->alc_cdata.alc_rxdesc[i]; 2511 rxd->rx_m = NULL; 2512 rxd->rx_desc = &rd->alc_rx_ring[i]; 2513 if (alc_newbuf(sc, rxd) != 0) 2514 return (ENOBUFS); 2515 } 2516 2517 /* 2518 * Since controller does not update Rx descriptors, driver 2519 * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE 2520 * is enough to ensure coherence. 2521 */ 2522 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0, 2523 sc->alc_cdata.alc_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 2524 /* Let controller know availability of new Rx buffers. */ 2525 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons); 2526 2527 return (0); 2528 } 2529 2530 void 2531 alc_init_rr_ring(struct alc_softc *sc) 2532 { 2533 struct alc_ring_data *rd; 2534 2535 sc->alc_cdata.alc_rr_cons = 0; 2536 ALC_RXCHAIN_RESET(sc); 2537 2538 rd = &sc->alc_rdata; 2539 bzero(rd->alc_rr_ring, ALC_RR_RING_SZ); 2540 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0, 2541 sc->alc_cdata.alc_rr_ring_map->dm_mapsize, 2542 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2543 } 2544 2545 void 2546 alc_init_cmb(struct alc_softc *sc) 2547 { 2548 struct alc_ring_data *rd; 2549 2550 rd = &sc->alc_rdata; 2551 bzero(rd->alc_cmb, ALC_CMB_SZ); 2552 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0, 2553 sc->alc_cdata.alc_cmb_map->dm_mapsize, 2554 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2555 } 2556 2557 void 2558 alc_init_smb(struct alc_softc *sc) 2559 { 2560 struct alc_ring_data *rd; 2561 2562 rd = &sc->alc_rdata; 2563 bzero(rd->alc_smb, ALC_SMB_SZ); 2564 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0, 2565 sc->alc_cdata.alc_smb_map->dm_mapsize, 2566 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2567 } 2568 2569 void 2570 alc_rxvlan(struct alc_softc *sc) 2571 { 2572 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2573 uint32_t reg; 2574 2575 reg = CSR_READ_4(sc, ALC_MAC_CFG); 2576 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 2577 reg |= MAC_CFG_VLAN_TAG_STRIP; 2578 else 2579 reg &= ~MAC_CFG_VLAN_TAG_STRIP; 2580 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 2581 } 2582 2583 void 2584 alc_iff(struct alc_softc *sc) 2585 { 2586 struct arpcom *ac = &sc->sc_arpcom; 2587 struct ifnet *ifp = &ac->ac_if; 2588 struct ether_multi *enm; 2589 struct ether_multistep step; 2590 uint32_t crc; 2591 uint32_t mchash[2]; 2592 uint32_t rxcfg; 2593 2594 rxcfg = CSR_READ_4(sc, ALC_MAC_CFG); 2595 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 2596 ifp->if_flags &= ~IFF_ALLMULTI; 2597 2598 /* 2599 * Always accept broadcast frames. 2600 */ 2601 rxcfg |= MAC_CFG_BCAST; 2602 2603 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 2604 ifp->if_flags |= IFF_ALLMULTI; 2605 if (ifp->if_flags & IFF_PROMISC) 2606 rxcfg |= MAC_CFG_PROMISC; 2607 else 2608 rxcfg |= MAC_CFG_ALLMULTI; 2609 mchash[0] = mchash[1] = 0xFFFFFFFF; 2610 } else { 2611 /* Program new filter. */ 2612 bzero(mchash, sizeof(mchash)); 2613 2614 ETHER_FIRST_MULTI(step, ac, enm); 2615 while (enm != NULL) { 2616 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 2617 2618 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 2619 2620 ETHER_NEXT_MULTI(step, enm); 2621 } 2622 } 2623 2624 CSR_WRITE_4(sc, ALC_MAR0, mchash[0]); 2625 CSR_WRITE_4(sc, ALC_MAR1, mchash[1]); 2626 CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg); 2627 } 2628