1 /* $NetBSD: if_alc.c,v 1.37 2019/05/28 07:41:49 msaitoh Exp $ */ 2 /* $OpenBSD: if_alc.c,v 1.1 2009/08/08 09:31:13 kevlo Exp $ */ 3 /*- 4 * Copyright (c) 2009, Pyun YongHyeon <yongari@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* Driver for Atheros AR813x/AR815x PCIe Ethernet. */ 31 32 #ifdef _KERNEL_OPT 33 #include "vlan.h" 34 #endif 35 36 #include <sys/param.h> 37 #include <sys/proc.h> 38 #include <sys/endian.h> 39 #include <sys/systm.h> 40 #include <sys/types.h> 41 #include <sys/sockio.h> 42 #include <sys/mbuf.h> 43 #include <sys/queue.h> 44 #include <sys/kernel.h> 45 #include <sys/device.h> 46 #include <sys/callout.h> 47 #include <sys/socket.h> 48 #include <sys/module.h> 49 50 #include <sys/bus.h> 51 52 #include <net/bpf.h> 53 #include <net/if.h> 54 #include <net/if_dl.h> 55 #include <net/if_llc.h> 56 #include <net/if_media.h> 57 #include <net/if_ether.h> 58 59 #ifdef INET 60 #include <netinet/in.h> 61 #include <netinet/in_systm.h> 62 #include <netinet/in_var.h> 63 #include <netinet/ip.h> 64 #endif 65 66 #include <net/if_types.h> 67 #include <net/if_vlanvar.h> 68 69 #include <dev/mii/mii.h> 70 #include <dev/mii/miivar.h> 71 72 #include <dev/pci/pcireg.h> 73 #include <dev/pci/pcivar.h> 74 #include <dev/pci/pcidevs.h> 75 76 #include <dev/pci/if_alcreg.h> 77 78 /* 79 * Devices supported by this driver. 80 */ 81 static struct alc_ident alc_ident_table[] = { 82 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8131, 9 * 1024, 83 "Atheros AR8131 PCIe Gigabit Ethernet" }, 84 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8132, 9 * 1024, 85 "Atheros AR8132 PCIe Fast Ethernet" }, 86 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8151, 6 * 1024, 87 "Atheros AR8151 v1.0 PCIe Gigabit Ethernet" }, 88 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8151_V2, 6 * 1024, 89 "Atheros AR8151 v2.0 PCIe Gigabit Ethernet" }, 90 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8152_B, 6 * 1024, 91 "Atheros AR8152 v1.1 PCIe Fast Ethernet" }, 92 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8152_B2, 6 * 1024, 93 "Atheros AR8152 v2.0 PCIe Fast Ethernet" }, 94 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8161, 9 * 1024, 95 "Atheros AR8161 PCIe Gigabit Ethernet" }, 96 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8162, 9 * 1024, 97 "Atheros AR8162 PCIe Fast Ethernet" }, 98 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8171, 9 * 1024, 99 "Atheros AR8171 PCIe Gigabit Ethernet" }, 100 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8172, 9 * 1024, 101 "Atheros AR8172 PCIe Fast Ethernet" }, 102 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_E2200, 9 * 1024, 103 "Killer E2200 Gigabit Ethernet" }, 104 { 0, 0, 0, NULL }, 105 }; 106 107 static int alc_match(device_t, cfdata_t, void *); 108 static void alc_attach(device_t, device_t, void *); 109 static int alc_detach(device_t, int); 110 111 static int alc_init(struct ifnet *); 112 static int alc_init_backend(struct ifnet *, bool); 113 static void alc_start(struct ifnet *); 114 static int alc_ioctl(struct ifnet *, u_long, void *); 115 static void alc_watchdog(struct ifnet *); 116 static int alc_mediachange(struct ifnet *); 117 static void alc_mediastatus(struct ifnet *, struct ifmediareq *); 118 119 static void alc_aspm(struct alc_softc *, int, int); 120 static void alc_aspm_813x(struct alc_softc *, int); 121 static void alc_aspm_816x(struct alc_softc *, int); 122 static void alc_disable_l0s_l1(struct alc_softc *); 123 static int alc_dma_alloc(struct alc_softc *); 124 static void alc_dma_free(struct alc_softc *); 125 static void alc_dsp_fixup(struct alc_softc *, int); 126 static int alc_encap(struct alc_softc *, struct mbuf **); 127 static struct alc_ident * 128 alc_find_ident(struct pci_attach_args *); 129 static void alc_get_macaddr(struct alc_softc *); 130 static void alc_get_macaddr_813x(struct alc_softc *); 131 static void alc_get_macaddr_816x(struct alc_softc *); 132 static void alc_get_macaddr_par(struct alc_softc *); 133 static void alc_init_cmb(struct alc_softc *); 134 static void alc_init_rr_ring(struct alc_softc *); 135 static int alc_init_rx_ring(struct alc_softc *, bool); 136 static void alc_init_smb(struct alc_softc *); 137 static void alc_init_tx_ring(struct alc_softc *); 138 static int alc_intr(void *); 139 static void alc_mac_config(struct alc_softc *); 140 static int alc_mii_readreg_813x(struct alc_softc *, int, int, uint16_t *); 141 static int alc_mii_readreg_816x(struct alc_softc *, int, int, uint16_t *); 142 static int alc_mii_writereg_813x(struct alc_softc *, int, int, uint16_t); 143 static int alc_mii_writereg_816x(struct alc_softc *, int, int, uint16_t); 144 static int alc_miibus_readreg(device_t, int, int, uint16_t *); 145 static void alc_miibus_statchg(struct ifnet *); 146 static int alc_miibus_writereg(device_t, int, int, uint16_t); 147 static int alc_miidbg_readreg(struct alc_softc *, int, uint16_t *); 148 static int alc_miidbg_writereg(struct alc_softc *, int, uint16_t); 149 static int alc_miiext_readreg(struct alc_softc *, int, int, uint16_t *); 150 static int alc_miiext_writereg(struct alc_softc *, int, int, uint16_t); 151 static int alc_newbuf(struct alc_softc *, struct alc_rxdesc *, bool); 152 static void alc_phy_down(struct alc_softc *); 153 static void alc_phy_reset(struct alc_softc *); 154 static void alc_phy_reset_813x(struct alc_softc *); 155 static void alc_phy_reset_816x(struct alc_softc *); 156 static void alc_reset(struct alc_softc *); 157 static void alc_rxeof(struct alc_softc *, struct rx_rdesc *); 158 static int alc_rxintr(struct alc_softc *); 159 static void alc_iff(struct alc_softc *); 160 static void alc_rxvlan(struct alc_softc *); 161 static void alc_start_queue(struct alc_softc *); 162 static void alc_stats_clear(struct alc_softc *); 163 static void alc_stats_update(struct alc_softc *); 164 static void alc_stop(struct ifnet *, int); 165 static void alc_stop_mac(struct alc_softc *); 166 static void alc_stop_queue(struct alc_softc *); 167 static void alc_tick(void *); 168 static void alc_txeof(struct alc_softc *); 169 170 uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0 }; 171 172 CFATTACH_DECL_NEW(alc, sizeof(struct alc_softc), 173 alc_match, alc_attach, alc_detach, NULL); 174 175 int alcdebug = 0; 176 #define DPRINTF(x) do { if (alcdebug) printf x; } while (0) 177 178 #define ALC_CSUM_FEATURES (M_CSUM_TCPv4 | M_CSUM_UDPv4) 179 180 static int 181 alc_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) 182 { 183 struct alc_softc *sc = device_private(dev); 184 int v; 185 186 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 187 v = alc_mii_readreg_816x(sc, phy, reg, val); 188 else 189 v = alc_mii_readreg_813x(sc, phy, reg, val); 190 return (v); 191 } 192 193 static int 194 alc_mii_readreg_813x(struct alc_softc *sc, int phy, int reg, uint16_t *val) 195 { 196 uint32_t v; 197 int i; 198 199 if (phy != sc->alc_phyaddr) 200 return -1; 201 202 /* 203 * For AR8132 fast ethernet controller, do not report 1000baseT 204 * capability to mii(4). Even though AR8132 uses the same 205 * model/revision number of F1 gigabit PHY, the PHY has no 206 * ability to establish 1000baseT link. 207 */ 208 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0 && reg == MII_EXTSR) { 209 *val = 0; 210 return 0; 211 } 212 213 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 214 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 215 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 216 DELAY(5); 217 v = CSR_READ_4(sc, ALC_MDIO); 218 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 219 break; 220 } 221 222 if (i == 0) { 223 printf("%s: phy read timeout: phy %d, reg %d\n", 224 device_xname(sc->sc_dev), phy, reg); 225 return ETIMEDOUT; 226 } 227 228 *val = (v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT; 229 return 0; 230 } 231 232 static int 233 alc_mii_readreg_816x(struct alc_softc *sc, int phy, int reg, uint16_t *val) 234 { 235 uint32_t clk, v; 236 int i; 237 238 if (phy != sc->alc_phyaddr) 239 return -1; 240 241 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) 242 clk = MDIO_CLK_25_128; 243 else 244 clk = MDIO_CLK_25_4; 245 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 246 MDIO_SUP_PREAMBLE | clk | MDIO_REG_ADDR(reg)); 247 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 248 DELAY(5); 249 v = CSR_READ_4(sc, ALC_MDIO); 250 if ((v & MDIO_OP_BUSY) == 0) 251 break; 252 } 253 254 if (i == 0) { 255 printf("%s: phy read timeout: phy %d, reg %d\n", 256 device_xname(sc->sc_dev), phy, reg); 257 return ETIMEDOUT; 258 } 259 260 *val = (v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT; 261 return 0; 262 } 263 264 static int 265 alc_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) 266 { 267 struct alc_softc *sc = device_private(dev); 268 int rv; 269 270 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 271 rv = alc_mii_writereg_816x(sc, phy, reg, val); 272 else 273 rv = alc_mii_writereg_813x(sc, phy, reg, val); 274 275 return rv; 276 } 277 278 static int 279 alc_mii_writereg_813x(struct alc_softc *sc, int phy, int reg, uint16_t val) 280 { 281 uint32_t v; 282 int i; 283 284 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 285 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 286 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 287 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 288 DELAY(5); 289 v = CSR_READ_4(sc, ALC_MDIO); 290 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 291 break; 292 } 293 294 if (i == 0) { 295 printf("%s: phy write timeout: phy %d, reg %d\n", 296 device_xname(sc->sc_dev), phy, reg); 297 return ETIMEDOUT; 298 } 299 300 return 0; 301 } 302 303 static int 304 alc_mii_writereg_816x(struct alc_softc *sc, int phy, int reg, uint16_t val) 305 { 306 uint32_t clk, v; 307 int i; 308 309 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) 310 clk = MDIO_CLK_25_128; 311 else 312 clk = MDIO_CLK_25_4; 313 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 314 ((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) | MDIO_REG_ADDR(reg) | 315 MDIO_SUP_PREAMBLE | clk); 316 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 317 DELAY(5); 318 v = CSR_READ_4(sc, ALC_MDIO); 319 if ((v & MDIO_OP_BUSY) == 0) 320 break; 321 } 322 323 if (i == 0) { 324 printf("%s: phy write timeout: phy %d, reg %d\n", 325 device_xname(sc->sc_dev), phy, reg); 326 return ETIMEDOUT; 327 } 328 329 return 0; 330 } 331 332 static void 333 alc_miibus_statchg(struct ifnet *ifp) 334 { 335 struct alc_softc *sc = ifp->if_softc; 336 struct mii_data *mii = &sc->sc_miibus; 337 uint32_t reg; 338 339 if ((ifp->if_flags & IFF_RUNNING) == 0) 340 return; 341 342 sc->alc_flags &= ~ALC_FLAG_LINK; 343 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 344 (IFM_ACTIVE | IFM_AVALID)) { 345 switch (IFM_SUBTYPE(mii->mii_media_active)) { 346 case IFM_10_T: 347 case IFM_100_TX: 348 sc->alc_flags |= ALC_FLAG_LINK; 349 break; 350 case IFM_1000_T: 351 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) 352 sc->alc_flags |= ALC_FLAG_LINK; 353 break; 354 default: 355 break; 356 } 357 } 358 /* Stop Rx/Tx MACs. */ 359 alc_stop_mac(sc); 360 361 /* Program MACs with resolved speed/duplex/flow-control. */ 362 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 363 alc_start_queue(sc); 364 alc_mac_config(sc); 365 /* Re-enable Tx/Rx MACs. */ 366 reg = CSR_READ_4(sc, ALC_MAC_CFG); 367 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 368 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 369 } 370 alc_aspm(sc, 0, IFM_SUBTYPE(mii->mii_media_active)); 371 alc_dsp_fixup(sc, IFM_SUBTYPE(mii->mii_media_active)); 372 } 373 374 static int 375 alc_miidbg_readreg(struct alc_softc *sc, int reg, uint16_t *val) 376 { 377 int rv; 378 379 rv = alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 380 reg); 381 if (rv != 0) 382 return rv; 383 384 return (alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr, 385 ALC_MII_DBG_DATA, val)); 386 } 387 388 static int 389 alc_miidbg_writereg(struct alc_softc *sc, int reg, uint16_t val) 390 { 391 int rv; 392 393 rv = alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 394 reg); 395 if (rv != 0) 396 return rv; 397 398 rv = alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, 399 val); 400 401 return rv; 402 } 403 404 static int 405 alc_miiext_readreg(struct alc_softc *sc, int devaddr, int reg, uint16_t *val) 406 { 407 uint32_t clk, v; 408 int i; 409 410 CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) | 411 EXT_MDIO_DEVADDR(devaddr)); 412 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) 413 clk = MDIO_CLK_25_128; 414 else 415 clk = MDIO_CLK_25_4; 416 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 417 MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT); 418 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 419 DELAY(5); 420 v = CSR_READ_4(sc, ALC_MDIO); 421 if ((v & MDIO_OP_BUSY) == 0) 422 break; 423 } 424 425 if (i == 0) { 426 printf("%s: phy ext read timeout: %d\n", 427 device_xname(sc->sc_dev), reg); 428 return ETIMEDOUT; 429 } 430 431 *val = (v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT; 432 return 0; 433 } 434 435 static int 436 alc_miiext_writereg(struct alc_softc *sc, int devaddr, int reg, uint16_t val) 437 { 438 uint32_t clk, v; 439 int i; 440 441 CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) | 442 EXT_MDIO_DEVADDR(devaddr)); 443 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) 444 clk = MDIO_CLK_25_128; 445 else 446 clk = MDIO_CLK_25_4; 447 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 448 ((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) | 449 MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT); 450 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 451 DELAY(5); 452 v = CSR_READ_4(sc, ALC_MDIO); 453 if ((v & MDIO_OP_BUSY) == 0) 454 break; 455 } 456 457 if (i == 0) { 458 printf("%s: phy ext write timeout: reg %d\n", 459 device_xname(sc->sc_dev), reg); 460 return ETIMEDOUT; 461 } 462 463 return 0; 464 } 465 466 static void 467 alc_dsp_fixup(struct alc_softc *sc, int media) 468 { 469 uint16_t agc, len, val; 470 471 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 472 return; 473 if (AR816X_REV(sc->alc_rev) >= AR816X_REV_C0) 474 return; 475 476 /* 477 * Vendor PHY magic. 478 * 1000BT/AZ, wrong cable length 479 */ 480 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 481 alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL6, &len); 482 len = (len >> EXT_CLDCTL6_CAB_LEN_SHIFT) & 483 EXT_CLDCTL6_CAB_LEN_MASK; 484 /* XXX: used to be (alc >> shift) & mask which is 0 */ 485 alc_miidbg_readreg(sc, MII_DBG_AGC, &agc); 486 agc &= DBG_AGC_2_VGA_MASK; 487 agc >>= DBG_AGC_2_VGA_SHIFT; 488 if ((media == IFM_1000_T && len > EXT_CLDCTL6_CAB_LEN_SHORT1G && 489 agc > DBG_AGC_LONG1G_LIMT) || 490 (media == IFM_100_TX && len > DBG_AGC_LONG100M_LIMT && 491 agc > DBG_AGC_LONG1G_LIMT)) { 492 alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT, 493 DBG_AZ_ANADECT_LONG); 494 alc_miiext_readreg(sc, MII_EXT_ANEG, 495 MII_EXT_ANEG_AFE, &val); 496 val |= ANEG_AFEE_10BT_100M_TH; 497 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, 498 val); 499 } else { 500 alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT, 501 DBG_AZ_ANADECT_DEFAULT); 502 alc_miiext_readreg(sc, MII_EXT_ANEG, 503 MII_EXT_ANEG_AFE, &val); 504 val &= ~ANEG_AFEE_10BT_100M_TH; 505 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, 506 val); 507 } 508 if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 && 509 AR816X_REV(sc->alc_rev) == AR816X_REV_B0) { 510 if (media == IFM_1000_T) { 511 /* 512 * Giga link threshold, raise the tolerance of 513 * noise 50%. 514 */ 515 alc_miidbg_readreg(sc, MII_DBG_MSE20DB, &val); 516 val &= ~DBG_MSE20DB_TH_MASK; 517 val |= (DBG_MSE20DB_TH_HI << 518 DBG_MSE20DB_TH_SHIFT); 519 alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val); 520 } else if (media == IFM_100_TX) 521 alc_miidbg_writereg(sc, MII_DBG_MSE16DB, 522 DBG_MSE16DB_UP); 523 } 524 } else { 525 alc_miiext_readreg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, &val); 526 val &= ~ANEG_AFEE_10BT_100M_TH; 527 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, val); 528 if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 && 529 AR816X_REV(sc->alc_rev) == AR816X_REV_B0) { 530 alc_miidbg_writereg(sc, MII_DBG_MSE16DB, 531 DBG_MSE16DB_DOWN); 532 alc_miidbg_readreg(sc, MII_DBG_MSE20DB, &val); 533 val &= ~DBG_MSE20DB_TH_MASK; 534 val |= (DBG_MSE20DB_TH_DEFAULT << DBG_MSE20DB_TH_SHIFT); 535 alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val); 536 } 537 } 538 } 539 540 static void 541 alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 542 { 543 struct alc_softc *sc = ifp->if_softc; 544 struct mii_data *mii = &sc->sc_miibus; 545 546 if ((ifp->if_flags & IFF_UP) == 0) 547 return; 548 549 mii_pollstat(mii); 550 ifmr->ifm_status = mii->mii_media_status; 551 ifmr->ifm_active = mii->mii_media_active; 552 } 553 554 static int 555 alc_mediachange(struct ifnet *ifp) 556 { 557 struct alc_softc *sc = ifp->if_softc; 558 struct mii_data *mii = &sc->sc_miibus; 559 int error; 560 561 if (mii->mii_instance != 0) { 562 struct mii_softc *miisc; 563 564 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 565 mii_phy_reset(miisc); 566 } 567 error = mii_mediachg(mii); 568 569 return (error); 570 } 571 572 static struct alc_ident * 573 alc_find_ident(struct pci_attach_args *pa) 574 { 575 struct alc_ident *ident; 576 uint16_t vendor, devid; 577 578 vendor = PCI_VENDOR(pa->pa_id); 579 devid = PCI_PRODUCT(pa->pa_id); 580 for (ident = alc_ident_table; ident->name != NULL; ident++) { 581 if (vendor == ident->vendorid && devid == ident->deviceid) 582 return (ident); 583 } 584 585 return (NULL); 586 } 587 588 static int 589 alc_match(device_t dev, cfdata_t match, void *aux) 590 { 591 struct pci_attach_args *pa = aux; 592 593 return alc_find_ident(pa) != NULL; 594 } 595 596 static void 597 alc_get_macaddr(struct alc_softc *sc) 598 { 599 600 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 601 alc_get_macaddr_816x(sc); 602 else 603 alc_get_macaddr_813x(sc); 604 } 605 606 static void 607 alc_get_macaddr_813x(struct alc_softc *sc) 608 { 609 uint32_t opt; 610 uint16_t val; 611 int eeprom, i; 612 613 eeprom = 0; 614 opt = CSR_READ_4(sc, ALC_OPT_CFG); 615 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_OTP_SEL) != 0 && 616 (CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) { 617 /* 618 * EEPROM found, let TWSI reload EEPROM configuration. 619 * This will set ethernet address of controller. 620 */ 621 eeprom++; 622 switch (sc->alc_ident->deviceid) { 623 case PCI_PRODUCT_ATTANSIC_AR8131: 624 case PCI_PRODUCT_ATTANSIC_AR8132: 625 if ((opt & OPT_CFG_CLK_ENB) == 0) { 626 opt |= OPT_CFG_CLK_ENB; 627 CSR_WRITE_4(sc, ALC_OPT_CFG, opt); 628 CSR_READ_4(sc, ALC_OPT_CFG); 629 DELAY(1000); 630 } 631 break; 632 case PCI_PRODUCT_ATTANSIC_AR8151: 633 case PCI_PRODUCT_ATTANSIC_AR8151_V2: 634 case PCI_PRODUCT_ATTANSIC_AR8152_B: 635 case PCI_PRODUCT_ATTANSIC_AR8152_B2: 636 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 637 ALC_MII_DBG_ADDR, 0x00); 638 alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr, 639 ALC_MII_DBG_DATA, &val); 640 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 641 ALC_MII_DBG_DATA, val & 0xFF7F); 642 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 643 ALC_MII_DBG_ADDR, 0x3B); 644 alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr, 645 ALC_MII_DBG_DATA, &val); 646 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 647 ALC_MII_DBG_DATA, val | 0x0008); 648 DELAY(20); 649 break; 650 } 651 652 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG, 653 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB); 654 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 655 CSR_READ_4(sc, ALC_WOL_CFG); 656 657 CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) | 658 TWSI_CFG_SW_LD_START); 659 for (i = 100; i > 0; i--) { 660 DELAY(1000); 661 if ((CSR_READ_4(sc, ALC_TWSI_CFG) & 662 TWSI_CFG_SW_LD_START) == 0) 663 break; 664 } 665 if (i == 0) 666 printf("%s: reloading EEPROM timeout!\n", 667 device_xname(sc->sc_dev)); 668 } else { 669 if (alcdebug) 670 printf("%s: EEPROM not found!\n", device_xname(sc->sc_dev)); 671 } 672 if (eeprom != 0) { 673 switch (sc->alc_ident->deviceid) { 674 case PCI_PRODUCT_ATTANSIC_AR8131: 675 case PCI_PRODUCT_ATTANSIC_AR8132: 676 if ((opt & OPT_CFG_CLK_ENB) != 0) { 677 opt &= ~OPT_CFG_CLK_ENB; 678 CSR_WRITE_4(sc, ALC_OPT_CFG, opt); 679 CSR_READ_4(sc, ALC_OPT_CFG); 680 DELAY(1000); 681 } 682 break; 683 case PCI_PRODUCT_ATTANSIC_AR8151: 684 case PCI_PRODUCT_ATTANSIC_AR8151_V2: 685 case PCI_PRODUCT_ATTANSIC_AR8152_B: 686 case PCI_PRODUCT_ATTANSIC_AR8152_B2: 687 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 688 ALC_MII_DBG_ADDR, 0x00); 689 alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr, 690 ALC_MII_DBG_DATA, &val); 691 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 692 ALC_MII_DBG_DATA, val | 0x0080); 693 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 694 ALC_MII_DBG_ADDR, 0x3B); 695 alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr, 696 ALC_MII_DBG_DATA, &val); 697 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 698 ALC_MII_DBG_DATA, val & 0xFFF7); 699 DELAY(20); 700 break; 701 } 702 } 703 704 alc_get_macaddr_par(sc); 705 } 706 707 static void 708 alc_get_macaddr_816x(struct alc_softc *sc) 709 { 710 uint32_t reg; 711 int i, reloaded; 712 713 reloaded = 0; 714 /* Try to reload station address via TWSI. */ 715 for (i = 100; i > 0; i--) { 716 reg = CSR_READ_4(sc, ALC_SLD); 717 if ((reg & (SLD_PROGRESS | SLD_START)) == 0) 718 break; 719 DELAY(1000); 720 } 721 if (i != 0) { 722 CSR_WRITE_4(sc, ALC_SLD, reg | SLD_START); 723 for (i = 100; i > 0; i--) { 724 DELAY(1000); 725 reg = CSR_READ_4(sc, ALC_SLD); 726 if ((reg & SLD_START) == 0) 727 break; 728 } 729 if (i != 0) 730 reloaded++; 731 else if (alcdebug) 732 printf("%s: reloading station address via TWSI timed out!\n", 733 device_xname(sc->sc_dev)); 734 } 735 736 /* Try to reload station address from EEPROM or FLASH. */ 737 if (reloaded == 0) { 738 reg = CSR_READ_4(sc, ALC_EEPROM_LD); 739 if ((reg & (EEPROM_LD_EEPROM_EXIST | 740 EEPROM_LD_FLASH_EXIST)) != 0) { 741 for (i = 100; i > 0; i--) { 742 reg = CSR_READ_4(sc, ALC_EEPROM_LD); 743 if ((reg & (EEPROM_LD_PROGRESS | 744 EEPROM_LD_START)) == 0) 745 break; 746 DELAY(1000); 747 } 748 if (i != 0) { 749 CSR_WRITE_4(sc, ALC_EEPROM_LD, reg | 750 EEPROM_LD_START); 751 for (i = 100; i > 0; i--) { 752 DELAY(1000); 753 reg = CSR_READ_4(sc, ALC_EEPROM_LD); 754 if ((reg & EEPROM_LD_START) == 0) 755 break; 756 } 757 } else if (alcdebug) 758 printf("%s: reloading EEPROM/FLASH timed out!\n", 759 device_xname(sc->sc_dev)); 760 } 761 } 762 763 alc_get_macaddr_par(sc); 764 } 765 766 767 static void 768 alc_get_macaddr_par(struct alc_softc *sc) 769 { 770 uint32_t ea[2]; 771 772 ea[0] = CSR_READ_4(sc, ALC_PAR0); 773 ea[1] = CSR_READ_4(sc, ALC_PAR1); 774 sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF; 775 sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF; 776 sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF; 777 sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF; 778 sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF; 779 sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF; 780 } 781 782 static void 783 alc_disable_l0s_l1(struct alc_softc *sc) 784 { 785 uint32_t pmcfg; 786 787 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 788 /* Another magic from vendor. */ 789 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 790 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 | 791 PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | 792 PM_CFG_MAC_ASPM_CHK | PM_CFG_SERDES_PD_EX_L1); 793 pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | 794 PM_CFG_SERDES_PLL_L1_ENB | PM_CFG_SERDES_L1_ENB; 795 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 796 } 797 } 798 799 static void 800 alc_phy_reset(struct alc_softc *sc) 801 { 802 803 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 804 alc_phy_reset_816x(sc); 805 else 806 alc_phy_reset_813x(sc); 807 } 808 809 static void 810 alc_phy_reset_813x(struct alc_softc *sc) 811 { 812 uint16_t data; 813 814 /* Reset magic from Linux. */ 815 CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_SEL_ANA_RESET); 816 CSR_READ_2(sc, ALC_GPHY_CFG); 817 DELAY(10 * 1000); 818 819 CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET | 820 GPHY_CFG_SEL_ANA_RESET); 821 CSR_READ_2(sc, ALC_GPHY_CFG); 822 DELAY(10 * 1000); 823 824 /* DSP fixup, Vendor magic. */ 825 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B) { 826 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 827 ALC_MII_DBG_ADDR, 0x000A); 828 alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr, 829 ALC_MII_DBG_DATA, &data); 830 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 831 ALC_MII_DBG_DATA, data & 0xDFFF); 832 } 833 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151 || 834 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 || 835 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B || 836 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2) { 837 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 838 ALC_MII_DBG_ADDR, 0x003B); 839 alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr, 840 ALC_MII_DBG_DATA, &data); 841 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 842 ALC_MII_DBG_DATA, data & 0xFFF7); 843 DELAY(20 * 1000); 844 } 845 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151) { 846 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 847 ALC_MII_DBG_ADDR, 0x0029); 848 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 849 ALC_MII_DBG_DATA, 0x929D); 850 } 851 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8131 || 852 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8132 || 853 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 || 854 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2) { 855 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 856 ALC_MII_DBG_ADDR, 0x0029); 857 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 858 ALC_MII_DBG_DATA, 0xB6DD); 859 } 860 861 /* Load DSP codes, vendor magic. */ 862 data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE | 863 ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK); 864 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 865 ALC_MII_DBG_ADDR, MII_ANA_CFG18); 866 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 867 ALC_MII_DBG_DATA, data); 868 869 data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) | 870 ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL | 871 ANA_SERDES_EN_LCKDT; 872 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 873 ALC_MII_DBG_ADDR, MII_ANA_CFG5); 874 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 875 ALC_MII_DBG_DATA, data); 876 877 data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) & 878 ANA_LONG_CABLE_TH_100_MASK) | 879 ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) & 880 ANA_SHORT_CABLE_TH_100_SHIFT) | 881 ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW; 882 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 883 ALC_MII_DBG_ADDR, MII_ANA_CFG54); 884 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 885 ALC_MII_DBG_DATA, data); 886 887 data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) | 888 ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) | 889 ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) | 890 ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK); 891 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 892 ALC_MII_DBG_ADDR, MII_ANA_CFG4); 893 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 894 ALC_MII_DBG_DATA, data); 895 896 data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) | 897 ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB | 898 ANA_OEN_125M; 899 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 900 ALC_MII_DBG_ADDR, MII_ANA_CFG0); 901 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 902 ALC_MII_DBG_DATA, data); 903 DELAY(1000); 904 905 /* Disable hibernation. */ 906 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 907 0x0029); 908 alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr, 909 ALC_MII_DBG_DATA, &data); 910 data &= ~0x8000; 911 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, 912 data); 913 914 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 915 0x000B); 916 alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr, 917 ALC_MII_DBG_DATA, &data); 918 data &= ~0x8000; 919 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, 920 data); 921 } 922 923 static void 924 alc_phy_reset_816x(struct alc_softc *sc) 925 { 926 uint32_t val; 927 uint16_t phyval; 928 929 val = CSR_READ_4(sc, ALC_GPHY_CFG); 930 val &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE | 931 GPHY_CFG_GATE_25M_ENB | GPHY_CFG_PHY_IDDQ | GPHY_CFG_PHY_PLL_ON | 932 GPHY_CFG_PWDOWN_HW | GPHY_CFG_100AB_ENB); 933 val |= GPHY_CFG_SEL_ANA_RESET; 934 #ifdef notyet 935 val |= GPHY_CFG_HIB_PULSE | GPHY_CFG_HIB_EN | GPHY_CFG_SEL_ANA_RESET; 936 #else 937 /* Disable PHY hibernation. */ 938 val &= ~(GPHY_CFG_HIB_PULSE | GPHY_CFG_HIB_EN); 939 #endif 940 CSR_WRITE_4(sc, ALC_GPHY_CFG, val); 941 DELAY(10); 942 CSR_WRITE_4(sc, ALC_GPHY_CFG, val | GPHY_CFG_EXT_RESET); 943 DELAY(800); 944 945 /* Vendor PHY magic. */ 946 #ifdef notyet 947 alc_miidbg_writereg(sc, MII_DBG_LEGCYPS, DBG_LEGCYPS_DEFAULT); 948 alc_miidbg_writereg(sc, MII_DBG_SYSMODCTL, DBG_SYSMODCTL_DEFAULT); 949 alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_VDRVBIAS, 950 EXT_VDRVBIAS_DEFAULT); 951 #else 952 /* Disable PHY hibernation. */ 953 alc_miidbg_writereg(sc, MII_DBG_LEGCYPS, 954 DBG_LEGCYPS_DEFAULT & ~DBG_LEGCYPS_ENB); 955 alc_miidbg_writereg(sc, MII_DBG_HIBNEG, 956 DBG_HIBNEG_DEFAULT & ~(DBG_HIBNEG_PSHIB_EN | DBG_HIBNEG_HIB_PULSE)); 957 alc_miidbg_writereg(sc, MII_DBG_GREENCFG, DBG_GREENCFG_DEFAULT); 958 #endif 959 960 /* XXX Disable EEE. */ 961 val = CSR_READ_4(sc, ALC_LPI_CTL); 962 val &= ~LPI_CTL_ENB; 963 CSR_WRITE_4(sc, ALC_LPI_CTL, val); 964 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_LOCAL_EEEADV, 0); 965 966 /* PHY power saving. */ 967 alc_miidbg_writereg(sc, MII_DBG_TST10BTCFG, DBG_TST10BTCFG_DEFAULT); 968 alc_miidbg_writereg(sc, MII_DBG_SRDSYSMOD, DBG_SRDSYSMOD_DEFAULT); 969 alc_miidbg_writereg(sc, MII_DBG_TST100BTCFG, DBG_TST100BTCFG_DEFAULT); 970 alc_miidbg_writereg(sc, MII_DBG_ANACTL, DBG_ANACTL_DEFAULT); 971 alc_miidbg_readreg(sc, MII_DBG_GREENCFG2, &phyval); 972 phyval &= ~DBG_GREENCFG2_GATE_DFSE_EN; 973 alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, phyval); 974 975 /* RTL8139C, 120m issue. */ 976 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_NLP78, 977 ANEG_NLP78_120M_DEFAULT); 978 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_S3DIG10, 979 ANEG_S3DIG10_DEFAULT); 980 981 if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0) { 982 /* Turn off half amplitude. */ 983 alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3, &phyval); 984 phyval |= EXT_CLDCTL3_BP_CABLE1TH_DET_GT; 985 alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3, phyval); 986 /* Turn off Green feature. */ 987 alc_miidbg_readreg(sc, MII_DBG_GREENCFG2, &phyval); 988 phyval |= DBG_GREENCFG2_BP_GREEN; 989 alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, phyval); 990 /* Turn off half bias. */ 991 alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5, &phyval); 992 val |= EXT_CLDCTL5_BP_VD_HLFBIAS; 993 alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5, phyval); 994 } 995 } 996 997 static void 998 alc_phy_down(struct alc_softc *sc) 999 { 1000 uint32_t gphy; 1001 1002 switch (sc->alc_ident->deviceid) { 1003 case PCI_PRODUCT_ATTANSIC_AR8161: 1004 case PCI_PRODUCT_ATTANSIC_E2200: 1005 case PCI_PRODUCT_ATTANSIC_AR8162: 1006 case PCI_PRODUCT_ATTANSIC_AR8171: 1007 case PCI_PRODUCT_ATTANSIC_AR8172: 1008 gphy = CSR_READ_4(sc, ALC_GPHY_CFG); 1009 gphy &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE | 1010 GPHY_CFG_100AB_ENB | GPHY_CFG_PHY_PLL_ON); 1011 gphy |= GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | 1012 GPHY_CFG_SEL_ANA_RESET; 1013 gphy |= GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW; 1014 CSR_WRITE_4(sc, ALC_GPHY_CFG, gphy); 1015 break; 1016 case PCI_PRODUCT_ATTANSIC_AR8151: 1017 case PCI_PRODUCT_ATTANSIC_AR8151_V2: 1018 case PCI_PRODUCT_ATTANSIC_AR8152_B: 1019 case PCI_PRODUCT_ATTANSIC_AR8152_B2: 1020 /* 1021 * GPHY power down caused more problems on AR8151 v2.0. 1022 * When driver is reloaded after GPHY power down, 1023 * accesses to PHY/MAC registers hung the system. Only 1024 * cold boot recovered from it. I'm not sure whether 1025 * AR8151 v1.0 also requires this one though. I don't 1026 * have AR8151 v1.0 controller in hand. 1027 * The only option left is to isolate the PHY and 1028 * initiates power down the PHY which in turn saves 1029 * more power when driver is unloaded. 1030 */ 1031 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 1032 MII_BMCR, BMCR_ISO | BMCR_PDOWN); 1033 break; 1034 default: 1035 /* Force PHY down. */ 1036 CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET | 1037 GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ | 1038 GPHY_CFG_PWDOWN_HW); 1039 DELAY(1000); 1040 break; 1041 } 1042 } 1043 1044 static void 1045 alc_aspm(struct alc_softc *sc, int init, int media) 1046 { 1047 1048 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 1049 alc_aspm_816x(sc, init); 1050 else 1051 alc_aspm_813x(sc, media); 1052 } 1053 1054 static void 1055 alc_aspm_813x(struct alc_softc *sc, int media) 1056 { 1057 uint32_t pmcfg; 1058 uint16_t linkcfg; 1059 1060 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 1061 if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) == 1062 (ALC_FLAG_APS | ALC_FLAG_PCIE)) 1063 linkcfg = CSR_READ_2(sc, sc->alc_expcap + 1064 PCIE_LCSR); 1065 else 1066 linkcfg = 0; 1067 pmcfg &= ~PM_CFG_SERDES_PD_EX_L1; 1068 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_LCKDET_TIMER_MASK); 1069 pmcfg |= PM_CFG_MAC_ASPM_CHK; 1070 pmcfg |= (PM_CFG_LCKDET_TIMER_DEFAULT << PM_CFG_LCKDET_TIMER_SHIFT); 1071 pmcfg &= ~(PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB); 1072 1073 if ((sc->alc_flags & ALC_FLAG_APS) != 0) { 1074 /* Disable extended sync except AR8152 B v1.0 */ 1075 linkcfg &= ~0x80; 1076 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B && 1077 sc->alc_rev == ATHEROS_AR8152_B_V10) 1078 linkcfg |= 0x80; 1079 CSR_WRITE_2(sc, sc->alc_expcap + PCIE_LCSR, 1080 linkcfg); 1081 pmcfg &= ~(PM_CFG_EN_BUFS_RX_L0S | PM_CFG_SA_DLY_ENB | 1082 PM_CFG_HOTRST); 1083 pmcfg |= (PM_CFG_L1_ENTRY_TIMER_DEFAULT << 1084 PM_CFG_L1_ENTRY_TIMER_SHIFT); 1085 pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK; 1086 pmcfg |= (PM_CFG_PM_REQ_TIMER_DEFAULT << 1087 PM_CFG_PM_REQ_TIMER_SHIFT); 1088 pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_PCIE_RECV; 1089 } 1090 1091 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 1092 if ((sc->alc_flags & ALC_FLAG_L0S) != 0) 1093 pmcfg |= PM_CFG_ASPM_L0S_ENB; 1094 if ((sc->alc_flags & ALC_FLAG_L1S) != 0) 1095 pmcfg |= PM_CFG_ASPM_L1_ENB; 1096 if ((sc->alc_flags & ALC_FLAG_APS) != 0) { 1097 if (sc->alc_ident->deviceid == 1098 PCI_PRODUCT_ATTANSIC_AR8152_B) 1099 pmcfg &= ~PM_CFG_ASPM_L0S_ENB; 1100 pmcfg &= ~(PM_CFG_SERDES_L1_ENB | 1101 PM_CFG_SERDES_PLL_L1_ENB | 1102 PM_CFG_SERDES_BUDS_RX_L1_ENB); 1103 pmcfg |= PM_CFG_CLK_SWH_L1; 1104 if (media == IFM_100_TX || media == IFM_1000_T) { 1105 pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK; 1106 switch (sc->alc_ident->deviceid) { 1107 case PCI_PRODUCT_ATTANSIC_AR8152_B: 1108 pmcfg |= (7 << 1109 PM_CFG_L1_ENTRY_TIMER_SHIFT); 1110 break; 1111 case PCI_PRODUCT_ATTANSIC_AR8152_B2: 1112 case PCI_PRODUCT_ATTANSIC_AR8151_V2: 1113 pmcfg |= (4 << 1114 PM_CFG_L1_ENTRY_TIMER_SHIFT); 1115 break; 1116 default: 1117 pmcfg |= (15 << 1118 PM_CFG_L1_ENTRY_TIMER_SHIFT); 1119 break; 1120 } 1121 } 1122 } else { 1123 pmcfg |= PM_CFG_SERDES_L1_ENB | 1124 PM_CFG_SERDES_PLL_L1_ENB | 1125 PM_CFG_SERDES_BUDS_RX_L1_ENB; 1126 pmcfg &= ~(PM_CFG_CLK_SWH_L1 | 1127 PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB); 1128 } 1129 } else { 1130 pmcfg &= ~(PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_L1_ENB | 1131 PM_CFG_SERDES_PLL_L1_ENB); 1132 pmcfg |= PM_CFG_CLK_SWH_L1; 1133 if ((sc->alc_flags & ALC_FLAG_L1S) != 0) 1134 pmcfg |= PM_CFG_ASPM_L1_ENB; 1135 } 1136 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 1137 } 1138 1139 static void 1140 alc_aspm_816x(struct alc_softc *sc, int init) 1141 { 1142 uint32_t pmcfg; 1143 1144 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 1145 pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_816X_MASK; 1146 pmcfg |= PM_CFG_L1_ENTRY_TIMER_816X_DEFAULT; 1147 pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK; 1148 pmcfg |= PM_CFG_PM_REQ_TIMER_816X_DEFAULT; 1149 pmcfg &= ~PM_CFG_LCKDET_TIMER_MASK; 1150 pmcfg |= PM_CFG_LCKDET_TIMER_DEFAULT; 1151 pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_CLK_SWH_L1 | PM_CFG_PCIE_RECV; 1152 pmcfg &= ~(PM_CFG_RX_L1_AFTER_L0S | PM_CFG_TX_L1_AFTER_L0S | 1153 PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB | 1154 PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB | 1155 PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SA_DLY_ENB | 1156 PM_CFG_MAC_ASPM_CHK | PM_CFG_HOTRST); 1157 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && 1158 (sc->alc_rev & 0x01) != 0) 1159 pmcfg |= PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB; 1160 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 1161 /* Link up, enable both L0s, L1s. */ 1162 pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | 1163 PM_CFG_MAC_ASPM_CHK; 1164 } else { 1165 if (init != 0) 1166 pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | 1167 PM_CFG_MAC_ASPM_CHK; 1168 else if ((sc->sc_ec.ec_if.if_flags & IFF_RUNNING) != 0) 1169 pmcfg |= PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK; 1170 } 1171 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 1172 } 1173 1174 static void 1175 alc_attach(device_t parent, device_t self, void *aux) 1176 { 1177 1178 struct alc_softc *sc = device_private(self); 1179 struct pci_attach_args *pa = aux; 1180 pci_chipset_tag_t pc = pa->pa_pc; 1181 pci_intr_handle_t ih; 1182 const char *intrstr; 1183 struct ifnet *ifp; 1184 struct mii_data * const mii = &sc->sc_miibus; 1185 pcireg_t memtype; 1186 const char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" }; 1187 uint16_t burst; 1188 int base, mii_flags, state, error = 0; 1189 uint32_t cap, ctl, val; 1190 char intrbuf[PCI_INTRSTR_LEN]; 1191 1192 sc->alc_ident = alc_find_ident(pa); 1193 1194 aprint_naive("\n"); 1195 aprint_normal(": %s\n", sc->alc_ident->name); 1196 1197 sc->sc_dev = self; 1198 sc->sc_dmat = pa->pa_dmat; 1199 sc->sc_pct = pa->pa_pc; 1200 sc->sc_pcitag = pa->pa_tag; 1201 1202 /* 1203 * Allocate IO memory 1204 */ 1205 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ALC_PCIR_BAR); 1206 switch (memtype) { 1207 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 1208 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M: 1209 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 1210 break; 1211 default: 1212 aprint_error_dev(self, "invalid base address register\n"); 1213 break; 1214 } 1215 1216 if (pci_mapreg_map(pa, ALC_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 1217 &sc->sc_mem_bh, NULL, &sc->sc_mem_size)) { 1218 aprint_error_dev(self, "could not map mem space\n"); 1219 return; 1220 } 1221 1222 if (pci_intr_map(pa, &ih) != 0) { 1223 printf(": can't map interrupt\n"); 1224 goto fail; 1225 } 1226 1227 /* 1228 * Allocate IRQ 1229 */ 1230 intrstr = pci_intr_string(sc->sc_pct, ih, intrbuf, sizeof(intrbuf)); 1231 sc->sc_irq_handle = pci_intr_establish_xname(pc, ih, IPL_NET, alc_intr, 1232 sc, device_xname(self)); 1233 if (sc->sc_irq_handle == NULL) { 1234 printf(": could not establish interrupt"); 1235 if (intrstr != NULL) 1236 printf(" at %s", intrstr); 1237 printf("\n"); 1238 goto fail; 1239 } 1240 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 1241 1242 /* Set PHY address. */ 1243 sc->alc_phyaddr = ALC_PHY_ADDR; 1244 1245 /* Initialize DMA parameters. */ 1246 sc->alc_dma_rd_burst = 0; 1247 sc->alc_dma_wr_burst = 0; 1248 sc->alc_rcb = DMA_CFG_RCB_64; 1249 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, 1250 &base, NULL)) { 1251 sc->alc_flags |= ALC_FLAG_PCIE; 1252 sc->alc_expcap = base; 1253 burst = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 1254 base + PCIE_DCSR) >> 16; 1255 sc->alc_dma_rd_burst = (burst & 0x7000) >> 12; 1256 sc->alc_dma_wr_burst = (burst & 0x00e0) >> 5; 1257 if (alcdebug) { 1258 printf("%s: Read request size : %u bytes.\n", 1259 device_xname(sc->sc_dev), 1260 alc_dma_burst[sc->alc_dma_rd_burst]); 1261 printf("%s: TLP payload size : %u bytes.\n", 1262 device_xname(sc->sc_dev), 1263 alc_dma_burst[sc->alc_dma_wr_burst]); 1264 } 1265 if (alc_dma_burst[sc->alc_dma_rd_burst] > 1024) 1266 sc->alc_dma_rd_burst = 3; 1267 if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024) 1268 sc->alc_dma_wr_burst = 3; 1269 1270 /* Clear data link and flow-control protocol error. */ 1271 val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV); 1272 val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP); 1273 CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val); 1274 1275 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 1276 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG, 1277 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB); 1278 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, 1279 CSR_READ_4(sc, ALC_PCIE_PHYMISC) | 1280 PCIE_PHYMISC_FORCE_RCV_DET); 1281 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B && 1282 sc->alc_rev == ATHEROS_AR8152_B_V10) { 1283 val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2); 1284 val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK | 1285 PCIE_PHYMISC2_SERDES_TH_MASK); 1286 val |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT; 1287 val |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT; 1288 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val); 1289 } 1290 /* Disable ASPM L0S and L1. */ 1291 cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 1292 base + PCIE_LCAP) >> 16; 1293 if ((cap & PCIE_LCAP_ASPM) != 0) { 1294 ctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 1295 base + PCIE_LCSR) >> 16; 1296 if ((ctl & 0x08) != 0) 1297 sc->alc_rcb = DMA_CFG_RCB_128; 1298 if (alcdebug) 1299 printf("%s: RCB %u bytes\n", 1300 device_xname(sc->sc_dev), 1301 sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128); 1302 state = ctl & 0x03; 1303 if (state & 0x01) 1304 sc->alc_flags |= ALC_FLAG_L0S; 1305 if (state & 0x02) 1306 sc->alc_flags |= ALC_FLAG_L1S; 1307 if (alcdebug) 1308 printf("%s: ASPM %s %s\n", 1309 device_xname(sc->sc_dev), 1310 aspm_state[state], 1311 state == 0 ? "disabled" : "enabled"); 1312 alc_disable_l0s_l1(sc); 1313 } else { 1314 aprint_debug_dev(sc->sc_dev, "no ASPM support\n"); 1315 } 1316 } else { 1317 val = CSR_READ_4(sc, ALC_PDLL_TRNS1); 1318 val &= ~PDLL_TRNS1_D3PLLOFF_ENB; 1319 CSR_WRITE_4(sc, ALC_PDLL_TRNS1, val); 1320 val = CSR_READ_4(sc, ALC_MASTER_CFG); 1321 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && 1322 (sc->alc_rev & 0x01) != 0) { 1323 if ((val & MASTER_WAKEN_25M) == 0 || 1324 (val & MASTER_CLK_SEL_DIS) == 0) { 1325 val |= MASTER_WAKEN_25M | MASTER_CLK_SEL_DIS; 1326 CSR_WRITE_4(sc, ALC_MASTER_CFG, val); 1327 } 1328 } else { 1329 if ((val & MASTER_WAKEN_25M) == 0 || 1330 (val & MASTER_CLK_SEL_DIS) != 0) { 1331 val |= MASTER_WAKEN_25M; 1332 val &= ~MASTER_CLK_SEL_DIS; 1333 CSR_WRITE_4(sc, ALC_MASTER_CFG, val); 1334 } 1335 } 1336 } 1337 alc_aspm(sc, 1, IFM_UNKNOWN); 1338 } 1339 1340 /* Reset PHY. */ 1341 alc_phy_reset(sc); 1342 1343 /* Reset the ethernet controller. */ 1344 alc_stop_mac(sc); 1345 alc_reset(sc); 1346 1347 /* 1348 * One odd thing is AR8132 uses the same PHY hardware(F1 1349 * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports 1350 * the PHY supports 1000Mbps but that's not true. The PHY 1351 * used in AR8132 can't establish gigabit link even if it 1352 * shows the same PHY model/revision number of AR8131. 1353 */ 1354 switch (sc->alc_ident->deviceid) { 1355 case PCI_PRODUCT_ATTANSIC_AR8161: 1356 if (PCI_SUBSYS_ID(pci_conf_read( 1357 sc->sc_pct, sc->sc_pcitag, PCI_SUBSYS_ID_REG)) == 0x0091 && 1358 sc->alc_rev == 0) 1359 sc->alc_flags |= ALC_FLAG_LINK_WAR; 1360 /* FALLTHROUGH */ 1361 case PCI_PRODUCT_ATTANSIC_E2200: 1362 case PCI_PRODUCT_ATTANSIC_AR8171: 1363 sc->alc_flags |= ALC_FLAG_AR816X_FAMILY; 1364 break; 1365 case PCI_PRODUCT_ATTANSIC_AR8162: 1366 case PCI_PRODUCT_ATTANSIC_AR8172: 1367 sc->alc_flags |= ALC_FLAG_FASTETHER | ALC_FLAG_AR816X_FAMILY; 1368 break; 1369 case PCI_PRODUCT_ATTANSIC_AR8152_B: 1370 case PCI_PRODUCT_ATTANSIC_AR8152_B2: 1371 sc->alc_flags |= ALC_FLAG_APS; 1372 /* FALLTHROUGH */ 1373 case PCI_PRODUCT_ATTANSIC_AR8132: 1374 sc->alc_flags |= ALC_FLAG_FASTETHER; 1375 break; 1376 case PCI_PRODUCT_ATTANSIC_AR8151: 1377 case PCI_PRODUCT_ATTANSIC_AR8151_V2: 1378 sc->alc_flags |= ALC_FLAG_APS; 1379 /* FALLTHROUGH */ 1380 default: 1381 break; 1382 } 1383 sc->alc_flags |= ALC_FLAG_JUMBO; 1384 1385 /* 1386 * It seems that AR813x/AR815x has silicon bug for SMB. In 1387 * addition, Atheros said that enabling SMB wouldn't improve 1388 * performance. However I think it's bad to access lots of 1389 * registers to extract MAC statistics. 1390 */ 1391 sc->alc_flags |= ALC_FLAG_SMB_BUG; 1392 /* 1393 * Don't use Tx CMB. It is known to have silicon bug. 1394 */ 1395 sc->alc_flags |= ALC_FLAG_CMB_BUG; 1396 sc->alc_rev = PCI_REVISION(pa->pa_class); 1397 sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >> 1398 MASTER_CHIP_REV_SHIFT; 1399 if (alcdebug) { 1400 printf("%s: PCI device revision : 0x%04x\n", 1401 device_xname(sc->sc_dev), sc->alc_rev); 1402 printf("%s: Chip id/revision : 0x%04x\n", 1403 device_xname(sc->sc_dev), sc->alc_chip_rev); 1404 printf("%s: %u Tx FIFO, %u Rx FIFO\n", device_xname(sc->sc_dev), 1405 CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8, 1406 CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8); 1407 } 1408 1409 error = alc_dma_alloc(sc); 1410 if (error) 1411 goto fail; 1412 1413 callout_init(&sc->sc_tick_ch, 0); 1414 callout_setfunc(&sc->sc_tick_ch, alc_tick, sc); 1415 1416 /* Load station address. */ 1417 alc_get_macaddr(sc); 1418 1419 aprint_normal_dev(self, "Ethernet address %s\n", 1420 ether_sprintf(sc->alc_eaddr)); 1421 1422 ifp = &sc->sc_ec.ec_if; 1423 ifp->if_softc = sc; 1424 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1425 ifp->if_init = alc_init; 1426 ifp->if_ioctl = alc_ioctl; 1427 ifp->if_start = alc_start; 1428 ifp->if_stop = alc_stop; 1429 ifp->if_watchdog = alc_watchdog; 1430 IFQ_SET_MAXLEN(&ifp->if_snd, ALC_TX_RING_CNT - 1); 1431 IFQ_SET_READY(&ifp->if_snd); 1432 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 1433 1434 sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU; 1435 1436 #ifdef ALC_CHECKSUM 1437 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 1438 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 1439 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 1440 #endif 1441 1442 #if NVLAN > 0 1443 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING; 1444 #endif 1445 1446 /* 1447 * XXX 1448 * It seems enabling Tx checksum offloading makes more trouble. 1449 * Sometimes the controller does not receive any frames when 1450 * Tx checksum offloading is enabled. I'm not sure whether this 1451 * is a bug in Tx checksum offloading logic or I got broken 1452 * sample boards. To safety, don't enable Tx checksum offloading 1453 * by default but give chance to users to toggle it if they know 1454 * their controllers work without problems. 1455 * Fortunately, Tx checksum offloading for AR816x family 1456 * seems to work. 1457 */ 1458 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 1459 ifp->if_capenable &= ~IFCAP_CSUM_IPv4_Tx; 1460 ifp->if_capabilities &= ~ALC_CSUM_FEATURES; 1461 } 1462 1463 /* Set up MII bus. */ 1464 mii->mii_ifp = ifp; 1465 mii->mii_readreg = alc_miibus_readreg; 1466 mii->mii_writereg = alc_miibus_writereg; 1467 mii->mii_statchg = alc_miibus_statchg; 1468 1469 sc->sc_ec.ec_mii = mii; 1470 ifmedia_init(&mii->mii_media, 0, alc_mediachange, alc_mediastatus); 1471 mii_flags = 0; 1472 if ((sc->alc_flags & ALC_FLAG_JUMBO) != 0) 1473 mii_flags |= MIIF_DOPAUSE; 1474 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, 1475 MII_OFFSET_ANY, mii_flags); 1476 1477 if (LIST_FIRST(&mii->mii_phys) == NULL) { 1478 printf("%s: no PHY found!\n", device_xname(sc->sc_dev)); 1479 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 1480 0, NULL); 1481 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL); 1482 } else 1483 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 1484 1485 if_attach(ifp); 1486 if_deferred_start_init(ifp, NULL); 1487 ether_ifattach(ifp, sc->alc_eaddr); 1488 1489 if (!pmf_device_register(self, NULL, NULL)) 1490 aprint_error_dev(self, "couldn't establish power handler\n"); 1491 else 1492 pmf_class_network_register(self, ifp); 1493 1494 return; 1495 fail: 1496 alc_dma_free(sc); 1497 if (sc->sc_irq_handle != NULL) { 1498 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 1499 sc->sc_irq_handle = NULL; 1500 } 1501 if (sc->sc_mem_size) { 1502 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 1503 sc->sc_mem_size = 0; 1504 } 1505 } 1506 1507 static int 1508 alc_detach(device_t self, int flags) 1509 { 1510 struct alc_softc *sc = device_private(self); 1511 struct ifnet *ifp = &sc->sc_ec.ec_if; 1512 int s; 1513 1514 s = splnet(); 1515 alc_stop(ifp, 0); 1516 splx(s); 1517 1518 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 1519 1520 /* Delete all remaining media. */ 1521 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 1522 1523 ether_ifdetach(ifp); 1524 if_detach(ifp); 1525 alc_dma_free(sc); 1526 1527 alc_phy_down(sc); 1528 if (sc->sc_irq_handle != NULL) { 1529 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 1530 sc->sc_irq_handle = NULL; 1531 } 1532 if (sc->sc_mem_size) { 1533 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 1534 sc->sc_mem_size = 0; 1535 } 1536 1537 return (0); 1538 } 1539 1540 static int 1541 alc_dma_alloc(struct alc_softc *sc) 1542 { 1543 struct alc_txdesc *txd; 1544 struct alc_rxdesc *rxd; 1545 int nsegs, error, i; 1546 1547 /* 1548 * Create DMA stuffs for TX ring 1549 */ 1550 error = bus_dmamap_create(sc->sc_dmat, ALC_TX_RING_SZ, 1, 1551 ALC_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_tx_ring_map); 1552 if (error) { 1553 sc->alc_cdata.alc_tx_ring_map = NULL; 1554 return (ENOBUFS); 1555 } 1556 1557 /* Allocate DMA'able memory for TX ring */ 1558 error = bus_dmamem_alloc(sc->sc_dmat, ALC_TX_RING_SZ, 1559 ETHER_ALIGN, 0, &sc->alc_rdata.alc_tx_ring_seg, 1, 1560 &nsegs, BUS_DMA_NOWAIT); 1561 if (error) { 1562 printf("%s: could not allocate DMA'able memory for Tx ring.\n", 1563 device_xname(sc->sc_dev)); 1564 return error; 1565 } 1566 1567 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_tx_ring_seg, 1568 nsegs, ALC_TX_RING_SZ, (void **)&sc->alc_rdata.alc_tx_ring, 1569 BUS_DMA_NOWAIT); 1570 if (error) 1571 return (ENOBUFS); 1572 1573 /* Load the DMA map for Tx ring. */ 1574 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 1575 sc->alc_rdata.alc_tx_ring, ALC_TX_RING_SZ, NULL, BUS_DMA_WAITOK); 1576 if (error) { 1577 printf("%s: could not load DMA'able memory for Tx ring.\n", 1578 device_xname(sc->sc_dev)); 1579 bus_dmamem_free(sc->sc_dmat, 1580 &sc->alc_rdata.alc_tx_ring_seg, 1); 1581 return error; 1582 } 1583 1584 sc->alc_rdata.alc_tx_ring_paddr = 1585 sc->alc_cdata.alc_tx_ring_map->dm_segs[0].ds_addr; 1586 1587 /* 1588 * Create DMA stuffs for RX ring 1589 */ 1590 error = bus_dmamap_create(sc->sc_dmat, ALC_RX_RING_SZ, 1, 1591 ALC_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_ring_map); 1592 if (error) 1593 return (ENOBUFS); 1594 1595 /* Allocate DMA'able memory for RX ring */ 1596 error = bus_dmamem_alloc(sc->sc_dmat, ALC_RX_RING_SZ, 1597 ETHER_ALIGN, 0, &sc->alc_rdata.alc_rx_ring_seg, 1, 1598 &nsegs, BUS_DMA_NOWAIT); 1599 if (error) { 1600 printf("%s: could not allocate DMA'able memory for Rx ring.\n", 1601 device_xname(sc->sc_dev)); 1602 return error; 1603 } 1604 1605 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rx_ring_seg, 1606 nsegs, ALC_RX_RING_SZ, (void **)&sc->alc_rdata.alc_rx_ring, 1607 BUS_DMA_NOWAIT); 1608 if (error) 1609 return (ENOBUFS); 1610 1611 /* Load the DMA map for Rx ring. */ 1612 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 1613 sc->alc_rdata.alc_rx_ring, ALC_RX_RING_SZ, NULL, BUS_DMA_WAITOK); 1614 if (error) { 1615 printf("%s: could not load DMA'able memory for Rx ring.\n", 1616 device_xname(sc->sc_dev)); 1617 bus_dmamem_free(sc->sc_dmat, 1618 &sc->alc_rdata.alc_rx_ring_seg, 1); 1619 return error; 1620 } 1621 1622 sc->alc_rdata.alc_rx_ring_paddr = 1623 sc->alc_cdata.alc_rx_ring_map->dm_segs[0].ds_addr; 1624 1625 /* 1626 * Create DMA stuffs for RX return ring 1627 */ 1628 error = bus_dmamap_create(sc->sc_dmat, ALC_RR_RING_SZ, 1, 1629 ALC_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rr_ring_map); 1630 if (error) 1631 return (ENOBUFS); 1632 1633 /* Allocate DMA'able memory for RX return ring */ 1634 error = bus_dmamem_alloc(sc->sc_dmat, ALC_RR_RING_SZ, 1635 ETHER_ALIGN, 0, &sc->alc_rdata.alc_rr_ring_seg, 1, 1636 &nsegs, BUS_DMA_NOWAIT); 1637 if (error) { 1638 printf("%s: could not allocate DMA'able memory for Rx " 1639 "return ring.\n", device_xname(sc->sc_dev)); 1640 return error; 1641 } 1642 1643 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rr_ring_seg, 1644 nsegs, ALC_RR_RING_SZ, (void **)&sc->alc_rdata.alc_rr_ring, 1645 BUS_DMA_NOWAIT); 1646 if (error) 1647 return (ENOBUFS); 1648 1649 /* Load the DMA map for Rx return ring. */ 1650 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 1651 sc->alc_rdata.alc_rr_ring, ALC_RR_RING_SZ, NULL, BUS_DMA_WAITOK); 1652 if (error) { 1653 printf("%s: could not load DMA'able memory for Rx return ring." 1654 "\n", device_xname(sc->sc_dev)); 1655 bus_dmamem_free(sc->sc_dmat, 1656 &sc->alc_rdata.alc_rr_ring_seg, 1); 1657 return error; 1658 } 1659 1660 sc->alc_rdata.alc_rr_ring_paddr = 1661 sc->alc_cdata.alc_rr_ring_map->dm_segs[0].ds_addr; 1662 1663 /* 1664 * Create DMA stuffs for CMB block 1665 */ 1666 error = bus_dmamap_create(sc->sc_dmat, ALC_CMB_SZ, 1, 1667 ALC_CMB_SZ, 0, BUS_DMA_NOWAIT, 1668 &sc->alc_cdata.alc_cmb_map); 1669 if (error) 1670 return (ENOBUFS); 1671 1672 /* Allocate DMA'able memory for CMB block */ 1673 error = bus_dmamem_alloc(sc->sc_dmat, ALC_CMB_SZ, 1674 ETHER_ALIGN, 0, &sc->alc_rdata.alc_cmb_seg, 1, 1675 &nsegs, BUS_DMA_NOWAIT); 1676 if (error) { 1677 printf("%s: could not allocate DMA'able memory for " 1678 "CMB block\n", device_xname(sc->sc_dev)); 1679 return error; 1680 } 1681 1682 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_cmb_seg, 1683 nsegs, ALC_CMB_SZ, (void **)&sc->alc_rdata.alc_cmb, 1684 BUS_DMA_NOWAIT); 1685 if (error) 1686 return (ENOBUFS); 1687 1688 /* Load the DMA map for CMB block. */ 1689 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 1690 sc->alc_rdata.alc_cmb, ALC_CMB_SZ, NULL, 1691 BUS_DMA_WAITOK); 1692 if (error) { 1693 printf("%s: could not load DMA'able memory for CMB block\n", 1694 device_xname(sc->sc_dev)); 1695 bus_dmamem_free(sc->sc_dmat, 1696 &sc->alc_rdata.alc_cmb_seg, 1); 1697 return error; 1698 } 1699 1700 sc->alc_rdata.alc_cmb_paddr = 1701 sc->alc_cdata.alc_cmb_map->dm_segs[0].ds_addr; 1702 1703 /* 1704 * Create DMA stuffs for SMB block 1705 */ 1706 error = bus_dmamap_create(sc->sc_dmat, ALC_SMB_SZ, 1, 1707 ALC_SMB_SZ, 0, BUS_DMA_NOWAIT, 1708 &sc->alc_cdata.alc_smb_map); 1709 if (error) 1710 return (ENOBUFS); 1711 1712 /* Allocate DMA'able memory for SMB block */ 1713 error = bus_dmamem_alloc(sc->sc_dmat, ALC_SMB_SZ, 1714 ETHER_ALIGN, 0, &sc->alc_rdata.alc_smb_seg, 1, 1715 &nsegs, BUS_DMA_NOWAIT); 1716 if (error) { 1717 printf("%s: could not allocate DMA'able memory for " 1718 "SMB block\n", device_xname(sc->sc_dev)); 1719 return error; 1720 } 1721 1722 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_smb_seg, 1723 nsegs, ALC_SMB_SZ, (void **)&sc->alc_rdata.alc_smb, 1724 BUS_DMA_NOWAIT); 1725 if (error) 1726 return (ENOBUFS); 1727 1728 /* Load the DMA map for SMB block */ 1729 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 1730 sc->alc_rdata.alc_smb, ALC_SMB_SZ, NULL, 1731 BUS_DMA_WAITOK); 1732 if (error) { 1733 printf("%s: could not load DMA'able memory for SMB block\n", 1734 device_xname(sc->sc_dev)); 1735 bus_dmamem_free(sc->sc_dmat, 1736 &sc->alc_rdata.alc_smb_seg, 1); 1737 return error; 1738 } 1739 1740 sc->alc_rdata.alc_smb_paddr = 1741 sc->alc_cdata.alc_smb_map->dm_segs[0].ds_addr; 1742 1743 1744 /* Create DMA maps for Tx buffers. */ 1745 for (i = 0; i < ALC_TX_RING_CNT; i++) { 1746 txd = &sc->alc_cdata.alc_txdesc[i]; 1747 txd->tx_m = NULL; 1748 txd->tx_dmamap = NULL; 1749 error = bus_dmamap_create(sc->sc_dmat, ALC_TSO_MAXSIZE, 1750 ALC_MAXTXSEGS, ALC_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT, 1751 &txd->tx_dmamap); 1752 if (error) { 1753 printf("%s: could not create Tx dmamap.\n", 1754 device_xname(sc->sc_dev)); 1755 return error; 1756 } 1757 } 1758 1759 /* Create DMA maps for Rx buffers. */ 1760 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 1761 BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_sparemap); 1762 if (error) { 1763 printf("%s: could not create spare Rx dmamap.\n", 1764 device_xname(sc->sc_dev)); 1765 return error; 1766 } 1767 1768 for (i = 0; i < ALC_RX_RING_CNT; i++) { 1769 rxd = &sc->alc_cdata.alc_rxdesc[i]; 1770 rxd->rx_m = NULL; 1771 rxd->rx_dmamap = NULL; 1772 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1773 MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap); 1774 if (error) { 1775 printf("%s: could not create Rx dmamap.\n", 1776 device_xname(sc->sc_dev)); 1777 return error; 1778 } 1779 } 1780 1781 return (0); 1782 } 1783 1784 1785 static void 1786 alc_dma_free(struct alc_softc *sc) 1787 { 1788 struct alc_txdesc *txd; 1789 struct alc_rxdesc *rxd; 1790 int i; 1791 1792 /* Tx buffers */ 1793 for (i = 0; i < ALC_TX_RING_CNT; i++) { 1794 txd = &sc->alc_cdata.alc_txdesc[i]; 1795 if (txd->tx_dmamap != NULL) { 1796 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); 1797 txd->tx_dmamap = NULL; 1798 } 1799 } 1800 /* Rx buffers */ 1801 for (i = 0; i < ALC_RX_RING_CNT; i++) { 1802 rxd = &sc->alc_cdata.alc_rxdesc[i]; 1803 if (rxd->rx_dmamap != NULL) { 1804 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); 1805 rxd->rx_dmamap = NULL; 1806 } 1807 } 1808 if (sc->alc_cdata.alc_rx_sparemap != NULL) { 1809 bus_dmamap_destroy(sc->sc_dmat, sc->alc_cdata.alc_rx_sparemap); 1810 sc->alc_cdata.alc_rx_sparemap = NULL; 1811 } 1812 1813 /* Tx ring. */ 1814 if (sc->alc_cdata.alc_tx_ring_map != NULL) 1815 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map); 1816 if (sc->alc_cdata.alc_tx_ring_map != NULL && 1817 sc->alc_rdata.alc_tx_ring != NULL) 1818 bus_dmamem_free(sc->sc_dmat, 1819 &sc->alc_rdata.alc_tx_ring_seg, 1); 1820 sc->alc_rdata.alc_tx_ring = NULL; 1821 sc->alc_cdata.alc_tx_ring_map = NULL; 1822 1823 /* Rx ring. */ 1824 if (sc->alc_cdata.alc_rx_ring_map != NULL) 1825 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map); 1826 if (sc->alc_cdata.alc_rx_ring_map != NULL && 1827 sc->alc_rdata.alc_rx_ring != NULL) 1828 bus_dmamem_free(sc->sc_dmat, 1829 &sc->alc_rdata.alc_rx_ring_seg, 1); 1830 sc->alc_rdata.alc_rx_ring = NULL; 1831 sc->alc_cdata.alc_rx_ring_map = NULL; 1832 1833 /* Rx return ring. */ 1834 if (sc->alc_cdata.alc_rr_ring_map != NULL) 1835 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map); 1836 if (sc->alc_cdata.alc_rr_ring_map != NULL && 1837 sc->alc_rdata.alc_rr_ring != NULL) 1838 bus_dmamem_free(sc->sc_dmat, 1839 &sc->alc_rdata.alc_rr_ring_seg, 1); 1840 sc->alc_rdata.alc_rr_ring = NULL; 1841 sc->alc_cdata.alc_rr_ring_map = NULL; 1842 1843 /* CMB block */ 1844 if (sc->alc_cdata.alc_cmb_map != NULL) 1845 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_cmb_map); 1846 if (sc->alc_cdata.alc_cmb_map != NULL && 1847 sc->alc_rdata.alc_cmb != NULL) 1848 bus_dmamem_free(sc->sc_dmat, 1849 &sc->alc_rdata.alc_cmb_seg, 1); 1850 sc->alc_rdata.alc_cmb = NULL; 1851 sc->alc_cdata.alc_cmb_map = NULL; 1852 1853 /* SMB block */ 1854 if (sc->alc_cdata.alc_smb_map != NULL) 1855 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_smb_map); 1856 if (sc->alc_cdata.alc_smb_map != NULL && 1857 sc->alc_rdata.alc_smb != NULL) 1858 bus_dmamem_free(sc->sc_dmat, 1859 &sc->alc_rdata.alc_smb_seg, 1); 1860 sc->alc_rdata.alc_smb = NULL; 1861 sc->alc_cdata.alc_smb_map = NULL; 1862 } 1863 1864 static int 1865 alc_encap(struct alc_softc *sc, struct mbuf **m_head) 1866 { 1867 struct alc_txdesc *txd, *txd_last; 1868 struct tx_desc *desc; 1869 struct mbuf *m; 1870 bus_dmamap_t map; 1871 uint32_t cflags, poff, vtag; 1872 int error, idx, nsegs, prod; 1873 1874 m = *m_head; 1875 cflags = vtag = 0; 1876 poff = 0; 1877 1878 prod = sc->alc_cdata.alc_tx_prod; 1879 txd = &sc->alc_cdata.alc_txdesc[prod]; 1880 txd_last = txd; 1881 map = txd->tx_dmamap; 1882 1883 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT); 1884 1885 if (error == EFBIG) { 1886 error = 0; 1887 1888 *m_head = m_pullup(*m_head, MHLEN); 1889 if (*m_head == NULL) { 1890 printf("%s: can't defrag TX mbuf\n", 1891 device_xname(sc->sc_dev)); 1892 return ENOBUFS; 1893 } 1894 1895 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, 1896 BUS_DMA_NOWAIT); 1897 1898 if (error != 0) { 1899 printf("%s: could not load defragged TX mbuf\n", 1900 device_xname(sc->sc_dev)); 1901 m_freem(*m_head); 1902 *m_head = NULL; 1903 return error; 1904 } 1905 } else if (error) { 1906 printf("%s: could not load TX mbuf\n", device_xname(sc->sc_dev)); 1907 return (error); 1908 } 1909 1910 nsegs = map->dm_nsegs; 1911 1912 if (nsegs == 0) { 1913 m_freem(*m_head); 1914 *m_head = NULL; 1915 return (EIO); 1916 } 1917 1918 /* Check descriptor overrun. */ 1919 if (sc->alc_cdata.alc_tx_cnt + nsegs >= ALC_TX_RING_CNT - 3) { 1920 bus_dmamap_unload(sc->sc_dmat, map); 1921 return (ENOBUFS); 1922 } 1923 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1924 BUS_DMASYNC_PREWRITE); 1925 1926 m = *m_head; 1927 desc = NULL; 1928 idx = 0; 1929 #if NVLAN > 0 1930 /* Configure VLAN hardware tag insertion. */ 1931 if (vlan_has_tag(m)) { 1932 vtag = htons(vlan_get_tag(m)); 1933 vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK; 1934 cflags |= TD_INS_VLAN_TAG; 1935 } 1936 #endif 1937 /* Configure Tx checksum offload. */ 1938 if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) { 1939 cflags |= TD_CUSTOM_CSUM; 1940 /* Set checksum start offset. */ 1941 cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) & 1942 TD_PLOAD_OFFSET_MASK; 1943 } 1944 for (; idx < nsegs; idx++) { 1945 desc = &sc->alc_rdata.alc_tx_ring[prod]; 1946 desc->len = 1947 htole32(TX_BYTES(map->dm_segs[idx].ds_len) | vtag); 1948 desc->flags = htole32(cflags); 1949 desc->addr = htole64(map->dm_segs[idx].ds_addr); 1950 sc->alc_cdata.alc_tx_cnt++; 1951 ALC_DESC_INC(prod, ALC_TX_RING_CNT); 1952 } 1953 /* Update producer index. */ 1954 sc->alc_cdata.alc_tx_prod = prod; 1955 1956 /* Finally set EOP on the last descriptor. */ 1957 prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT; 1958 desc = &sc->alc_rdata.alc_tx_ring[prod]; 1959 desc->flags |= htole32(TD_EOP); 1960 1961 /* Swap dmamap of the first and the last. */ 1962 txd = &sc->alc_cdata.alc_txdesc[prod]; 1963 map = txd_last->tx_dmamap; 1964 txd_last->tx_dmamap = txd->tx_dmamap; 1965 txd->tx_dmamap = map; 1966 txd->tx_m = m; 1967 1968 return (0); 1969 } 1970 1971 static void 1972 alc_start(struct ifnet *ifp) 1973 { 1974 struct alc_softc *sc = ifp->if_softc; 1975 struct mbuf *m_head; 1976 int enq; 1977 1978 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1979 return; 1980 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) 1981 return; 1982 if (IFQ_IS_EMPTY(&ifp->if_snd)) 1983 return; 1984 1985 /* Reclaim transmitted frames. */ 1986 if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT) 1987 alc_txeof(sc); 1988 1989 enq = 0; 1990 for (;;) { 1991 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1992 if (m_head == NULL) 1993 break; 1994 1995 /* 1996 * Pack the data into the transmit ring. If we 1997 * don't have room, set the OACTIVE flag and wait 1998 * for the NIC to drain the ring. 1999 */ 2000 if (alc_encap(sc, &m_head)) { 2001 if (m_head == NULL) 2002 break; 2003 ifp->if_flags |= IFF_OACTIVE; 2004 break; 2005 } 2006 enq = 1; 2007 2008 /* 2009 * If there's a BPF listener, bounce a copy of this frame 2010 * to him. 2011 */ 2012 bpf_mtap(ifp, m_head, BPF_D_OUT); 2013 } 2014 2015 if (enq) { 2016 /* Sync descriptors. */ 2017 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0, 2018 sc->alc_cdata.alc_tx_ring_map->dm_mapsize, 2019 BUS_DMASYNC_PREWRITE); 2020 /* Kick. Assume we're using normal Tx priority queue. */ 2021 CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX, 2022 (sc->alc_cdata.alc_tx_prod << 2023 MBOX_TD_PROD_LO_IDX_SHIFT) & 2024 MBOX_TD_PROD_LO_IDX_MASK); 2025 /* Set a timeout in case the chip goes out to lunch. */ 2026 ifp->if_timer = ALC_TX_TIMEOUT; 2027 } 2028 } 2029 2030 static void 2031 alc_watchdog(struct ifnet *ifp) 2032 { 2033 struct alc_softc *sc = ifp->if_softc; 2034 2035 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) { 2036 printf("%s: watchdog timeout (missed link)\n", 2037 device_xname(sc->sc_dev)); 2038 ifp->if_oerrors++; 2039 alc_init_backend(ifp, false); 2040 return; 2041 } 2042 2043 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev)); 2044 ifp->if_oerrors++; 2045 alc_init_backend(ifp, false); 2046 alc_start(ifp); 2047 } 2048 2049 static int 2050 alc_ioctl(struct ifnet *ifp, u_long cmd, void *data) 2051 { 2052 struct alc_softc *sc = ifp->if_softc; 2053 int s, error = 0; 2054 2055 s = splnet(); 2056 2057 switch (cmd) { 2058 case SIOCSIFADDR: 2059 error = ether_ioctl(ifp, cmd, data); 2060 ifp->if_flags |= IFF_UP; 2061 if (!(ifp->if_flags & IFF_RUNNING)) 2062 alc_init(ifp); 2063 break; 2064 2065 case SIOCSIFFLAGS: 2066 error = ether_ioctl(ifp, cmd, data); 2067 if (ifp->if_flags & IFF_UP) { 2068 if (ifp->if_flags & IFF_RUNNING) 2069 error = ENETRESET; 2070 else 2071 alc_init(ifp); 2072 } else { 2073 if (ifp->if_flags & IFF_RUNNING) 2074 alc_stop(ifp, 0); 2075 } 2076 break; 2077 2078 default: 2079 error = ether_ioctl(ifp, cmd, data); 2080 break; 2081 } 2082 2083 if (error == ENETRESET) { 2084 if (ifp->if_flags & IFF_RUNNING) 2085 alc_iff(sc); 2086 error = 0; 2087 } 2088 2089 splx(s); 2090 return (error); 2091 } 2092 2093 static void 2094 alc_mac_config(struct alc_softc *sc) 2095 { 2096 struct mii_data *mii; 2097 uint32_t reg; 2098 2099 mii = &sc->sc_miibus; 2100 reg = CSR_READ_4(sc, ALC_MAC_CFG); 2101 reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC | 2102 MAC_CFG_SPEED_MASK); 2103 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151 || 2104 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 || 2105 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2) 2106 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW; 2107 /* Reprogram MAC with resolved speed/duplex. */ 2108 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2109 case IFM_10_T: 2110 case IFM_100_TX: 2111 reg |= MAC_CFG_SPEED_10_100; 2112 break; 2113 case IFM_1000_T: 2114 reg |= MAC_CFG_SPEED_1000; 2115 break; 2116 } 2117 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 2118 reg |= MAC_CFG_FULL_DUPLEX; 2119 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 2120 reg |= MAC_CFG_TX_FC; 2121 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 2122 reg |= MAC_CFG_RX_FC; 2123 } 2124 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 2125 } 2126 2127 static void 2128 alc_stats_clear(struct alc_softc *sc) 2129 { 2130 struct smb sb, *smb; 2131 uint32_t *reg; 2132 int i; 2133 2134 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 2135 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0, 2136 sc->alc_cdata.alc_smb_map->dm_mapsize, 2137 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2138 smb = sc->alc_rdata.alc_smb; 2139 /* Update done, clear. */ 2140 smb->updated = 0; 2141 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0, 2142 sc->alc_cdata.alc_smb_map->dm_mapsize, 2143 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2144 } else { 2145 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; 2146 reg++) { 2147 CSR_READ_4(sc, ALC_RX_MIB_BASE + i); 2148 i += sizeof(uint32_t); 2149 } 2150 /* Read Tx statistics. */ 2151 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; 2152 reg++) { 2153 CSR_READ_4(sc, ALC_TX_MIB_BASE + i); 2154 i += sizeof(uint32_t); 2155 } 2156 } 2157 } 2158 2159 static void 2160 alc_stats_update(struct alc_softc *sc) 2161 { 2162 struct ifnet *ifp = &sc->sc_ec.ec_if; 2163 struct alc_hw_stats *stat; 2164 struct smb sb, *smb; 2165 uint32_t *reg; 2166 int i; 2167 2168 stat = &sc->alc_stats; 2169 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 2170 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0, 2171 sc->alc_cdata.alc_smb_map->dm_mapsize, 2172 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2173 smb = sc->alc_rdata.alc_smb; 2174 if (smb->updated == 0) 2175 return; 2176 } else { 2177 smb = &sb; 2178 /* Read Rx statistics. */ 2179 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; 2180 reg++) { 2181 *reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i); 2182 i += sizeof(uint32_t); 2183 } 2184 /* Read Tx statistics. */ 2185 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; 2186 reg++) { 2187 *reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i); 2188 i += sizeof(uint32_t); 2189 } 2190 } 2191 2192 /* Rx stats. */ 2193 stat->rx_frames += smb->rx_frames; 2194 stat->rx_bcast_frames += smb->rx_bcast_frames; 2195 stat->rx_mcast_frames += smb->rx_mcast_frames; 2196 stat->rx_pause_frames += smb->rx_pause_frames; 2197 stat->rx_control_frames += smb->rx_control_frames; 2198 stat->rx_crcerrs += smb->rx_crcerrs; 2199 stat->rx_lenerrs += smb->rx_lenerrs; 2200 stat->rx_bytes += smb->rx_bytes; 2201 stat->rx_runts += smb->rx_runts; 2202 stat->rx_fragments += smb->rx_fragments; 2203 stat->rx_pkts_64 += smb->rx_pkts_64; 2204 stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 2205 stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 2206 stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 2207 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 2208 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 2209 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 2210 stat->rx_pkts_truncated += smb->rx_pkts_truncated; 2211 stat->rx_fifo_oflows += smb->rx_fifo_oflows; 2212 stat->rx_rrs_errs += smb->rx_rrs_errs; 2213 stat->rx_alignerrs += smb->rx_alignerrs; 2214 stat->rx_bcast_bytes += smb->rx_bcast_bytes; 2215 stat->rx_mcast_bytes += smb->rx_mcast_bytes; 2216 stat->rx_pkts_filtered += smb->rx_pkts_filtered; 2217 2218 /* Tx stats. */ 2219 stat->tx_frames += smb->tx_frames; 2220 stat->tx_bcast_frames += smb->tx_bcast_frames; 2221 stat->tx_mcast_frames += smb->tx_mcast_frames; 2222 stat->tx_pause_frames += smb->tx_pause_frames; 2223 stat->tx_excess_defer += smb->tx_excess_defer; 2224 stat->tx_control_frames += smb->tx_control_frames; 2225 stat->tx_deferred += smb->tx_deferred; 2226 stat->tx_bytes += smb->tx_bytes; 2227 stat->tx_pkts_64 += smb->tx_pkts_64; 2228 stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 2229 stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 2230 stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 2231 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 2232 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 2233 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 2234 stat->tx_single_colls += smb->tx_single_colls; 2235 stat->tx_multi_colls += smb->tx_multi_colls; 2236 stat->tx_late_colls += smb->tx_late_colls; 2237 stat->tx_excess_colls += smb->tx_excess_colls; 2238 stat->tx_underrun += smb->tx_underrun; 2239 stat->tx_desc_underrun += smb->tx_desc_underrun; 2240 stat->tx_lenerrs += smb->tx_lenerrs; 2241 stat->tx_pkts_truncated += smb->tx_pkts_truncated; 2242 stat->tx_bcast_bytes += smb->tx_bcast_bytes; 2243 stat->tx_mcast_bytes += smb->tx_mcast_bytes; 2244 2245 /* Update counters in ifnet. */ 2246 ifp->if_opackets += smb->tx_frames; 2247 2248 ifp->if_collisions += smb->tx_single_colls + 2249 smb->tx_multi_colls * 2 + smb->tx_late_colls + 2250 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT; 2251 2252 ifp->if_oerrors += smb->tx_late_colls + smb->tx_excess_colls + 2253 smb->tx_underrun + smb->tx_pkts_truncated; 2254 2255 ifp->if_ipackets += smb->rx_frames; 2256 2257 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs + 2258 smb->rx_runts + smb->rx_pkts_truncated + 2259 smb->rx_fifo_oflows + smb->rx_rrs_errs + 2260 smb->rx_alignerrs; 2261 2262 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 2263 /* Update done, clear. */ 2264 smb->updated = 0; 2265 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0, 2266 sc->alc_cdata.alc_smb_map->dm_mapsize, 2267 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2268 } 2269 } 2270 2271 static int 2272 alc_intr(void *arg) 2273 { 2274 struct alc_softc *sc = arg; 2275 struct ifnet *ifp = &sc->sc_ec.ec_if; 2276 uint32_t status; 2277 2278 status = CSR_READ_4(sc, ALC_INTR_STATUS); 2279 if ((status & ALC_INTRS) == 0) 2280 return (0); 2281 2282 /* Acknowledge and disable interrupts. */ 2283 CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT); 2284 2285 if (ifp->if_flags & IFF_RUNNING) { 2286 if (status & INTR_RX_PKT) { 2287 int error; 2288 2289 error = alc_rxintr(sc); 2290 if (error) { 2291 alc_init_backend(ifp, false); 2292 return (0); 2293 } 2294 } 2295 2296 if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST | 2297 INTR_TXQ_TO_RST)) { 2298 if (status & INTR_DMA_RD_TO_RST) 2299 printf("%s: DMA read error! -- resetting\n", 2300 device_xname(sc->sc_dev)); 2301 if (status & INTR_DMA_WR_TO_RST) 2302 printf("%s: DMA write error! -- resetting\n", 2303 device_xname(sc->sc_dev)); 2304 if (status & INTR_TXQ_TO_RST) 2305 printf("%s: TxQ reset! -- resetting\n", 2306 device_xname(sc->sc_dev)); 2307 alc_init_backend(ifp, false); 2308 return (0); 2309 } 2310 2311 alc_txeof(sc); 2312 if_schedule_deferred_start(ifp); 2313 } 2314 2315 /* Re-enable interrupts. */ 2316 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF); 2317 return (1); 2318 } 2319 2320 static void 2321 alc_txeof(struct alc_softc *sc) 2322 { 2323 struct ifnet *ifp = &sc->sc_ec.ec_if; 2324 struct alc_txdesc *txd; 2325 uint32_t cons, prod; 2326 int prog; 2327 2328 if (sc->alc_cdata.alc_tx_cnt == 0) 2329 return; 2330 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0, 2331 sc->alc_cdata.alc_tx_ring_map->dm_mapsize, 2332 BUS_DMASYNC_POSTREAD); 2333 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) { 2334 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0, 2335 sc->alc_cdata.alc_cmb_map->dm_mapsize, 2336 BUS_DMASYNC_POSTREAD); 2337 prod = sc->alc_rdata.alc_cmb->cons; 2338 } else 2339 prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX); 2340 /* Assume we're using normal Tx priority queue. */ 2341 prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >> 2342 MBOX_TD_CONS_LO_IDX_SHIFT; 2343 cons = sc->alc_cdata.alc_tx_cons; 2344 /* 2345 * Go through our Tx list and free mbufs for those 2346 * frames which have been transmitted. 2347 */ 2348 for (prog = 0; cons != prod; prog++, 2349 ALC_DESC_INC(cons, ALC_TX_RING_CNT)) { 2350 if (sc->alc_cdata.alc_tx_cnt <= 0) 2351 break; 2352 prog++; 2353 ifp->if_flags &= ~IFF_OACTIVE; 2354 sc->alc_cdata.alc_tx_cnt--; 2355 txd = &sc->alc_cdata.alc_txdesc[cons]; 2356 if (txd->tx_m != NULL) { 2357 /* Reclaim transmitted mbufs. */ 2358 bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0, 2359 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2360 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 2361 m_freem(txd->tx_m); 2362 txd->tx_m = NULL; 2363 } 2364 } 2365 2366 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) 2367 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0, 2368 sc->alc_cdata.alc_cmb_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2369 sc->alc_cdata.alc_tx_cons = cons; 2370 /* 2371 * Unarm watchdog timer only when there is no pending 2372 * frames in Tx queue. 2373 */ 2374 if (sc->alc_cdata.alc_tx_cnt == 0) 2375 ifp->if_timer = 0; 2376 } 2377 2378 static int 2379 alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd, bool init) 2380 { 2381 struct mbuf *m; 2382 bus_dmamap_t map; 2383 int error; 2384 2385 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2386 if (m == NULL) 2387 return (ENOBUFS); 2388 MCLGET(m, init ? M_WAITOK : M_DONTWAIT); 2389 if (!(m->m_flags & M_EXT)) { 2390 m_freem(m); 2391 return (ENOBUFS); 2392 } 2393 2394 m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX; 2395 2396 error = bus_dmamap_load_mbuf(sc->sc_dmat, 2397 sc->alc_cdata.alc_rx_sparemap, m, BUS_DMA_NOWAIT); 2398 2399 if (error != 0) { 2400 m_freem(m); 2401 2402 if (init) 2403 printf("%s: can't load RX mbuf\n", device_xname(sc->sc_dev)); 2404 2405 return (error); 2406 } 2407 2408 if (rxd->rx_m != NULL) { 2409 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 2410 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2411 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 2412 } 2413 map = rxd->rx_dmamap; 2414 rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap; 2415 sc->alc_cdata.alc_rx_sparemap = map; 2416 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, rxd->rx_dmamap->dm_mapsize, 2417 BUS_DMASYNC_PREREAD); 2418 rxd->rx_m = m; 2419 rxd->rx_desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr); 2420 return (0); 2421 } 2422 2423 static int 2424 alc_rxintr(struct alc_softc *sc) 2425 { 2426 struct ifnet *ifp = &sc->sc_ec.ec_if; 2427 struct rx_rdesc *rrd; 2428 uint32_t nsegs, status; 2429 int rr_cons, prog; 2430 2431 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0, 2432 sc->alc_cdata.alc_rr_ring_map->dm_mapsize, 2433 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2434 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0, 2435 sc->alc_cdata.alc_rx_ring_map->dm_mapsize, 2436 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2437 rr_cons = sc->alc_cdata.alc_rr_cons; 2438 for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0;) { 2439 rrd = &sc->alc_rdata.alc_rr_ring[rr_cons]; 2440 status = le32toh(rrd->status); 2441 if ((status & RRD_VALID) == 0) 2442 break; 2443 nsegs = RRD_RD_CNT(le32toh(rrd->rdinfo)); 2444 if (nsegs == 0) { 2445 /* This should not happen! */ 2446 if (alcdebug) 2447 printf("%s: unexpected segment count -- " 2448 "resetting\n", device_xname(sc->sc_dev)); 2449 return (EIO); 2450 } 2451 alc_rxeof(sc, rrd); 2452 /* Clear Rx return status. */ 2453 rrd->status = 0; 2454 ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT); 2455 sc->alc_cdata.alc_rx_cons += nsegs; 2456 sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT; 2457 prog += nsegs; 2458 } 2459 2460 if (prog > 0) { 2461 /* Update the consumer index. */ 2462 sc->alc_cdata.alc_rr_cons = rr_cons; 2463 /* Sync Rx return descriptors. */ 2464 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0, 2465 sc->alc_cdata.alc_rr_ring_map->dm_mapsize, 2466 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2467 /* 2468 * Sync updated Rx descriptors such that controller see 2469 * modified buffer addresses. 2470 */ 2471 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0, 2472 sc->alc_cdata.alc_rx_ring_map->dm_mapsize, 2473 BUS_DMASYNC_PREWRITE); 2474 /* 2475 * Let controller know availability of new Rx buffers. 2476 * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors 2477 * it may be possible to update ALC_MBOX_RD0_PROD_IDX 2478 * only when Rx buffer pre-fetching is required. In 2479 * addition we already set ALC_RX_RD_FREE_THRESH to 2480 * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However 2481 * it still seems that pre-fetching needs more 2482 * experimentation. 2483 */ 2484 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, 2485 sc->alc_cdata.alc_rx_cons); 2486 } 2487 2488 return (0); 2489 } 2490 2491 /* Receive a frame. */ 2492 static void 2493 alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd) 2494 { 2495 struct ifnet *ifp = &sc->sc_ec.ec_if; 2496 struct alc_rxdesc *rxd; 2497 struct mbuf *mp, *m; 2498 uint32_t rdinfo, status; 2499 int count, nsegs, rx_cons; 2500 2501 status = le32toh(rrd->status); 2502 rdinfo = le32toh(rrd->rdinfo); 2503 rx_cons = RRD_RD_IDX(rdinfo); 2504 nsegs = RRD_RD_CNT(rdinfo); 2505 2506 sc->alc_cdata.alc_rxlen = RRD_BYTES(status); 2507 if (status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) { 2508 /* 2509 * We want to pass the following frames to upper 2510 * layer regardless of error status of Rx return 2511 * ring. 2512 * 2513 * o IP/TCP/UDP checksum is bad. 2514 * o frame length and protocol specific length 2515 * does not match. 2516 * 2517 * Force network stack compute checksum for 2518 * errored frames. 2519 */ 2520 status |= RRD_TCP_UDPCSUM_NOK | RRD_IPCSUM_NOK; 2521 if ((status & (RRD_ERR_CRC | RRD_ERR_ALIGN | 2522 RRD_ERR_TRUNC | RRD_ERR_RUNT)) != 0) 2523 return; 2524 } 2525 2526 for (count = 0; count < nsegs; count++, 2527 ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) { 2528 rxd = &sc->alc_cdata.alc_rxdesc[rx_cons]; 2529 mp = rxd->rx_m; 2530 /* Add a new receive buffer to the ring. */ 2531 if (alc_newbuf(sc, rxd, false) != 0) { 2532 ifp->if_iqdrops++; 2533 /* Reuse Rx buffers. */ 2534 if (sc->alc_cdata.alc_rxhead != NULL) 2535 m_freem(sc->alc_cdata.alc_rxhead); 2536 break; 2537 } 2538 2539 /* 2540 * Assume we've received a full sized frame. 2541 * Actual size is fixed when we encounter the end of 2542 * multi-segmented frame. 2543 */ 2544 mp->m_len = sc->alc_buf_size; 2545 2546 /* Chain received mbufs. */ 2547 if (sc->alc_cdata.alc_rxhead == NULL) { 2548 sc->alc_cdata.alc_rxhead = mp; 2549 sc->alc_cdata.alc_rxtail = mp; 2550 } else { 2551 m_remove_pkthdr(mp); 2552 sc->alc_cdata.alc_rxprev_tail = 2553 sc->alc_cdata.alc_rxtail; 2554 sc->alc_cdata.alc_rxtail->m_next = mp; 2555 sc->alc_cdata.alc_rxtail = mp; 2556 } 2557 2558 if (count == nsegs - 1) { 2559 /* Last desc. for this frame. */ 2560 m = sc->alc_cdata.alc_rxhead; 2561 KASSERT(m->m_flags & M_PKTHDR); 2562 /* 2563 * It seems that L1C/L2C controller has no way 2564 * to tell hardware to strip CRC bytes. 2565 */ 2566 m->m_pkthdr.len = 2567 sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN; 2568 if (nsegs > 1) { 2569 /* Set last mbuf size. */ 2570 mp->m_len = sc->alc_cdata.alc_rxlen - 2571 (nsegs - 1) * sc->alc_buf_size; 2572 /* Remove the CRC bytes in chained mbufs. */ 2573 if (mp->m_len <= ETHER_CRC_LEN) { 2574 sc->alc_cdata.alc_rxtail = 2575 sc->alc_cdata.alc_rxprev_tail; 2576 sc->alc_cdata.alc_rxtail->m_len -= 2577 (ETHER_CRC_LEN - mp->m_len); 2578 sc->alc_cdata.alc_rxtail->m_next = NULL; 2579 m_freem(mp); 2580 } else { 2581 mp->m_len -= ETHER_CRC_LEN; 2582 } 2583 } else 2584 m->m_len = m->m_pkthdr.len; 2585 m_set_rcvif(m, ifp); 2586 #if NVLAN > 0 2587 /* 2588 * Due to hardware bugs, Rx checksum offloading 2589 * was intentionally disabled. 2590 */ 2591 if (status & RRD_VLAN_TAG) { 2592 uint32_t vtag = RRD_VLAN(le32toh(rrd->vtag)); 2593 vlan_set_tag(m, ntohs(vtag)); 2594 } 2595 #endif 2596 2597 /* Pass it on. */ 2598 if_percpuq_enqueue(ifp->if_percpuq, m); 2599 } 2600 } 2601 /* Reset mbuf chains. */ 2602 ALC_RXCHAIN_RESET(sc); 2603 } 2604 2605 static void 2606 alc_tick(void *xsc) 2607 { 2608 struct alc_softc *sc = xsc; 2609 struct mii_data *mii = &sc->sc_miibus; 2610 int s; 2611 2612 s = splnet(); 2613 mii_tick(mii); 2614 alc_stats_update(sc); 2615 splx(s); 2616 2617 callout_schedule(&sc->sc_tick_ch, hz); 2618 } 2619 2620 static void 2621 alc_osc_reset(struct alc_softc *sc) 2622 { 2623 uint32_t reg; 2624 2625 reg = CSR_READ_4(sc, ALC_MISC3); 2626 reg &= ~MISC3_25M_BY_SW; 2627 reg |= MISC3_25M_NOTO_INTNL; 2628 CSR_WRITE_4(sc, ALC_MISC3, reg); 2629 2630 reg = CSR_READ_4(sc, ALC_MISC); 2631 if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0) { 2632 /* 2633 * Restore over-current protection default value. 2634 * This value could be reset by MAC reset. 2635 */ 2636 reg &= ~MISC_PSW_OCP_MASK; 2637 reg |= (MISC_PSW_OCP_DEFAULT << MISC_PSW_OCP_SHIFT); 2638 reg &= ~MISC_INTNLOSC_OPEN; 2639 CSR_WRITE_4(sc, ALC_MISC, reg); 2640 CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN); 2641 reg = CSR_READ_4(sc, ALC_MISC2); 2642 reg &= ~MISC2_CALB_START; 2643 CSR_WRITE_4(sc, ALC_MISC2, reg); 2644 CSR_WRITE_4(sc, ALC_MISC2, reg | MISC2_CALB_START); 2645 2646 } else { 2647 reg &= ~MISC_INTNLOSC_OPEN; 2648 /* Disable isolate for revision A devices. */ 2649 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1) 2650 reg &= ~MISC_ISO_ENB; 2651 CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN); 2652 CSR_WRITE_4(sc, ALC_MISC, reg); 2653 } 2654 2655 DELAY(20); 2656 } 2657 2658 static void 2659 alc_reset(struct alc_softc *sc) 2660 { 2661 uint32_t pmcfg, reg; 2662 int i; 2663 2664 pmcfg = 0; 2665 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 2666 /* Reset workaround. */ 2667 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, 1); 2668 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && 2669 (sc->alc_rev & 0x01) != 0) { 2670 /* Disable L0s/L1s before reset. */ 2671 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 2672 if ((pmcfg & (PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB)) 2673 != 0) { 2674 pmcfg &= ~(PM_CFG_ASPM_L0S_ENB | 2675 PM_CFG_ASPM_L1_ENB); 2676 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 2677 } 2678 } 2679 } 2680 reg = CSR_READ_4(sc, ALC_MASTER_CFG); 2681 reg |= MASTER_OOB_DIS_OFF | MASTER_RESET; 2682 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 2683 2684 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 2685 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 2686 DELAY(10); 2687 if (CSR_READ_4(sc, ALC_MBOX_RD0_PROD_IDX) == 0) 2688 break; 2689 } 2690 if (i == 0) 2691 printf("%s: MAC reset timeout!\n", device_xname(sc->sc_dev)); 2692 } 2693 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 2694 DELAY(10); 2695 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0) 2696 break; 2697 } 2698 if (i == 0) 2699 printf("%s: master reset timeout!\n", device_xname(sc->sc_dev)); 2700 2701 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 2702 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 2703 if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC | 2704 IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0) 2705 break; 2706 DELAY(10); 2707 } 2708 if (i == 0) 2709 printf("%s: reset timeout(0x%08x)!\n", 2710 device_xname(sc->sc_dev), reg); 2711 2712 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 2713 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && 2714 (sc->alc_rev & 0x01) != 0) { 2715 reg = CSR_READ_4(sc, ALC_MASTER_CFG); 2716 reg |= MASTER_CLK_SEL_DIS; 2717 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 2718 /* Restore L0s/L1s config. */ 2719 if ((pmcfg & (PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB)) 2720 != 0) 2721 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 2722 } 2723 2724 alc_osc_reset(sc); 2725 reg = CSR_READ_4(sc, ALC_MISC3); 2726 reg &= ~MISC3_25M_BY_SW; 2727 reg |= MISC3_25M_NOTO_INTNL; 2728 CSR_WRITE_4(sc, ALC_MISC3, reg); 2729 reg = CSR_READ_4(sc, ALC_MISC); 2730 reg &= ~MISC_INTNLOSC_OPEN; 2731 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1) 2732 reg &= ~MISC_ISO_ENB; 2733 CSR_WRITE_4(sc, ALC_MISC, reg); 2734 DELAY(20); 2735 } 2736 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 || 2737 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B || 2738 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2) 2739 CSR_WRITE_4(sc, ALC_SERDES_LOCK, 2740 CSR_READ_4(sc, ALC_SERDES_LOCK) | SERDES_MAC_CLK_SLOWDOWN | 2741 SERDES_PHY_CLK_SLOWDOWN); 2742 } 2743 2744 static int 2745 alc_init(struct ifnet *ifp) 2746 { 2747 2748 return alc_init_backend(ifp, true); 2749 } 2750 2751 static int 2752 alc_init_backend(struct ifnet *ifp, bool init) 2753 { 2754 struct alc_softc *sc = ifp->if_softc; 2755 struct mii_data *mii; 2756 uint8_t eaddr[ETHER_ADDR_LEN]; 2757 bus_addr_t paddr; 2758 uint32_t reg, rxf_hi, rxf_lo; 2759 int error; 2760 2761 /* 2762 * Cancel any pending I/O. 2763 */ 2764 alc_stop(ifp, 0); 2765 /* 2766 * Reset the chip to a known state. 2767 */ 2768 alc_reset(sc); 2769 2770 /* Initialize Rx descriptors. */ 2771 error = alc_init_rx_ring(sc, init); 2772 if (error != 0) { 2773 printf("%s: no memory for Rx buffers.\n", device_xname(sc->sc_dev)); 2774 alc_stop(ifp, 0); 2775 return (error); 2776 } 2777 alc_init_rr_ring(sc); 2778 alc_init_tx_ring(sc); 2779 alc_init_cmb(sc); 2780 alc_init_smb(sc); 2781 2782 /* Enable all clocks. */ 2783 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 2784 CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, CLK_GATING_DMAW_ENB | 2785 CLK_GATING_DMAR_ENB | CLK_GATING_TXQ_ENB | 2786 CLK_GATING_RXQ_ENB | CLK_GATING_TXMAC_ENB | 2787 CLK_GATING_RXMAC_ENB); 2788 if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0) 2789 CSR_WRITE_4(sc, ALC_IDLE_DECISN_TIMER, 2790 IDLE_DECISN_TIMER_DEFAULT_1MS); 2791 } else 2792 CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, 0); 2793 2794 /* Reprogram the station address. */ 2795 memcpy(eaddr, CLLADDR(ifp->if_sadl), sizeof(eaddr)); 2796 CSR_WRITE_4(sc, ALC_PAR0, 2797 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 2798 CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]); 2799 /* 2800 * Clear WOL status and disable all WOL feature as WOL 2801 * would interfere Rx operation under normal environments. 2802 */ 2803 CSR_READ_4(sc, ALC_WOL_CFG); 2804 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 2805 /* Set Tx descriptor base addresses. */ 2806 paddr = sc->alc_rdata.alc_tx_ring_paddr; 2807 CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 2808 CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 2809 /* We don't use high priority ring. */ 2810 CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0); 2811 /* Set Tx descriptor counter. */ 2812 CSR_WRITE_4(sc, ALC_TD_RING_CNT, 2813 (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK); 2814 /* Set Rx descriptor base addresses. */ 2815 paddr = sc->alc_rdata.alc_rx_ring_paddr; 2816 CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 2817 CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 2818 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 2819 /* We use one Rx ring. */ 2820 CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0); 2821 CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0); 2822 CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0); 2823 } 2824 /* Set Rx descriptor counter. */ 2825 CSR_WRITE_4(sc, ALC_RD_RING_CNT, 2826 (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK); 2827 2828 /* 2829 * Let hardware split jumbo frames into alc_max_buf_sized chunks. 2830 * if it do not fit the buffer size. Rx return descriptor holds 2831 * a counter that indicates how many fragments were made by the 2832 * hardware. The buffer size should be multiple of 8 bytes. 2833 * Since hardware has limit on the size of buffer size, always 2834 * use the maximum value. 2835 * For strict-alignment architectures make sure to reduce buffer 2836 * size by 8 bytes to make room for alignment fixup. 2837 */ 2838 sc->alc_buf_size = RX_BUF_SIZE_MAX; 2839 CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size); 2840 2841 paddr = sc->alc_rdata.alc_rr_ring_paddr; 2842 /* Set Rx return descriptor base addresses. */ 2843 CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 2844 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 2845 /* We use one Rx return ring. */ 2846 CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0); 2847 CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0); 2848 CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0); 2849 }\ 2850 /* Set Rx return descriptor counter. */ 2851 CSR_WRITE_4(sc, ALC_RRD_RING_CNT, 2852 (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK); 2853 paddr = sc->alc_rdata.alc_cmb_paddr; 2854 CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr)); 2855 paddr = sc->alc_rdata.alc_smb_paddr; 2856 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 2857 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr)); 2858 2859 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B) { 2860 /* Reconfigure SRAM - Vendor magic. */ 2861 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_LEN, 0x000002A0); 2862 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_LEN, 0x00000100); 2863 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_ADDR, 0x029F0000); 2864 CSR_WRITE_4(sc, ALC_SRAM_RD0_ADDR, 0x02BF02A0); 2865 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_ADDR, 0x03BF02C0); 2866 CSR_WRITE_4(sc, ALC_SRAM_TD_ADDR, 0x03DF03C0); 2867 CSR_WRITE_4(sc, ALC_TXF_WATER_MARK, 0x00000000); 2868 CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 0x00000000); 2869 } 2870 2871 /* Tell hardware that we're ready to load DMA blocks. */ 2872 CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD); 2873 2874 /* Configure interrupt moderation timer. */ 2875 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT; 2876 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT; 2877 reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT; 2878 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) 2879 reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT; 2880 CSR_WRITE_4(sc, ALC_IM_TIMER, reg); 2881 /* 2882 * We don't want to automatic interrupt clear as task queue 2883 * for the interrupt should know interrupt status. 2884 */ 2885 reg = CSR_READ_4(sc, ALC_MASTER_CFG); 2886 reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB); 2887 reg |= MASTER_SA_TIMER_ENB; 2888 if (ALC_USECS(sc->alc_int_rx_mod) != 0) 2889 reg |= MASTER_IM_RX_TIMER_ENB; 2890 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0 && 2891 ALC_USECS(sc->alc_int_tx_mod) != 0) 2892 reg |= MASTER_IM_TX_TIMER_ENB; 2893 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 2894 /* 2895 * Disable interrupt re-trigger timer. We don't want automatic 2896 * re-triggering of un-ACKed interrupts. 2897 */ 2898 CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0)); 2899 /* Configure CMB. */ 2900 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 2901 CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, ALC_TX_RING_CNT / 3); 2902 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, 2903 ALC_USECS(sc->alc_int_tx_mod)); 2904 } else { 2905 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) { 2906 CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4); 2907 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000)); 2908 } else 2909 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0)); 2910 } 2911 /* 2912 * Hardware can be configured to issue SMB interrupt based 2913 * on programmed interval. Since there is a callout that is 2914 * invoked for every hz in driver we use that instead of 2915 * relying on periodic SMB interrupt. 2916 */ 2917 CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0)); 2918 /* Clear MAC statistics. */ 2919 alc_stats_clear(sc); 2920 2921 /* 2922 * Always use maximum frame size that controller can support. 2923 * Otherwise received frames that has larger frame length 2924 * than alc(4) MTU would be silently dropped in hardware. This 2925 * would make path-MTU discovery hard as sender wouldn't get 2926 * any responses from receiver. alc(4) supports 2927 * multi-fragmented frames on Rx path so it has no issue on 2928 * assembling fragmented frames. Using maximum frame size also 2929 * removes the need to reinitialize hardware when interface 2930 * MTU configuration was changed. 2931 * 2932 * Be conservative in what you do, be liberal in what you 2933 * accept from others - RFC 793. 2934 */ 2935 CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_ident->max_framelen); 2936 2937 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 2938 /* Disable header split(?) */ 2939 CSR_WRITE_4(sc, ALC_HDS_CFG, 0); 2940 2941 /* Configure IPG/IFG parameters. */ 2942 CSR_WRITE_4(sc, ALC_IPG_IFG_CFG, 2943 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & 2944 IPG_IFG_IPGT_MASK) | 2945 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & 2946 IPG_IFG_MIFG_MASK) | 2947 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & 2948 IPG_IFG_IPG1_MASK) | 2949 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & 2950 IPG_IFG_IPG2_MASK)); 2951 /* Set parameters for half-duplex media. */ 2952 CSR_WRITE_4(sc, ALC_HDPX_CFG, 2953 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 2954 HDPX_CFG_LCOL_MASK) | 2955 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 2956 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 2957 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 2958 HDPX_CFG_ABEBT_MASK) | 2959 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 2960 HDPX_CFG_JAMIPG_MASK)); 2961 } 2962 2963 /* 2964 * Set TSO/checksum offload threshold. For frames that is 2965 * larger than this threshold, hardware wouldn't do 2966 * TSO/checksum offloading. 2967 */ 2968 reg = (sc->alc_ident->max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) & 2969 TSO_OFFLOAD_THRESH_MASK; 2970 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 2971 reg |= TSO_OFFLOAD_ERRLGPKT_DROP_ENB; 2972 CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH, reg); 2973 /* Configure TxQ. */ 2974 reg = (alc_dma_burst[sc->alc_dma_rd_burst] << 2975 TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK; 2976 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B || 2977 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2) 2978 reg >>= 1; 2979 reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) & 2980 TXQ_CFG_TD_BURST_MASK; 2981 reg |= TXQ_CFG_IP_OPTION_ENB | TXQ_CFG_8023_ENB; 2982 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE); 2983 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 2984 reg = (TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q1_BURST_SHIFT | 2985 TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q2_BURST_SHIFT | 2986 TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q3_BURST_SHIFT | 2987 HQTD_CFG_BURST_ENB); 2988 CSR_WRITE_4(sc, ALC_HQTD_CFG, reg); 2989 reg = WRR_PRI_RESTRICT_NONE; 2990 reg |= (WRR_PRI_DEFAULT << WRR_PRI0_SHIFT | 2991 WRR_PRI_DEFAULT << WRR_PRI1_SHIFT | 2992 WRR_PRI_DEFAULT << WRR_PRI2_SHIFT | 2993 WRR_PRI_DEFAULT << WRR_PRI3_SHIFT); 2994 CSR_WRITE_4(sc, ALC_WRR, reg); 2995 } else { 2996 /* Configure Rx free descriptor pre-fetching. */ 2997 CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH, 2998 ((RX_RD_FREE_THRESH_HI_DEFAULT << 2999 RX_RD_FREE_THRESH_HI_SHIFT) & RX_RD_FREE_THRESH_HI_MASK) | 3000 ((RX_RD_FREE_THRESH_LO_DEFAULT << 3001 RX_RD_FREE_THRESH_LO_SHIFT) & RX_RD_FREE_THRESH_LO_MASK)); 3002 } 3003 3004 /* 3005 * Configure flow control parameters. 3006 * XON : 80% of Rx FIFO 3007 * XOFF : 30% of Rx FIFO 3008 */ 3009 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 3010 reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN); 3011 reg &= SRAM_RX_FIFO_LEN_MASK; 3012 reg *= 8; 3013 if (reg > 8 * 1024) 3014 reg -= RX_FIFO_PAUSE_816X_RSVD; 3015 else 3016 reg -= RX_BUF_SIZE_MAX; 3017 reg /= 8; 3018 CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH, 3019 ((reg << RX_FIFO_PAUSE_THRESH_LO_SHIFT) & 3020 RX_FIFO_PAUSE_THRESH_LO_MASK) | 3021 (((RX_FIFO_PAUSE_816X_RSVD / 8) << 3022 RX_FIFO_PAUSE_THRESH_HI_SHIFT) & 3023 RX_FIFO_PAUSE_THRESH_HI_MASK)); 3024 } else if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8131 || 3025 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8132) { 3026 reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN); 3027 rxf_hi = (reg * 8) / 10; 3028 rxf_lo = (reg * 3) / 10; 3029 CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH, 3030 ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) & 3031 RX_FIFO_PAUSE_THRESH_LO_MASK) | 3032 ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) & 3033 RX_FIFO_PAUSE_THRESH_HI_MASK)); 3034 } 3035 3036 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 3037 /* Disable RSS until I understand L1C/L2C's RSS logic. */ 3038 CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0); 3039 CSR_WRITE_4(sc, ALC_RSS_CPU, 0); 3040 } 3041 3042 /* Configure RxQ. */ 3043 reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & 3044 RXQ_CFG_RD_BURST_MASK; 3045 reg |= RXQ_CFG_RSS_MODE_DIS; 3046 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 3047 reg |= (RXQ_CFG_816X_IDT_TBL_SIZE_DEFAULT << 3048 RXQ_CFG_816X_IDT_TBL_SIZE_SHIFT) & 3049 RXQ_CFG_816X_IDT_TBL_SIZE_MASK; 3050 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0 && 3051 sc->alc_ident->deviceid != PCI_PRODUCT_ATTANSIC_AR8151_V2) 3052 reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_1M; 3053 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 3054 3055 /* Configure DMA parameters. */ 3056 reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI; 3057 reg |= sc->alc_rcb; 3058 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) 3059 reg |= DMA_CFG_CMB_ENB; 3060 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) 3061 reg |= DMA_CFG_SMB_ENB; 3062 else 3063 reg |= DMA_CFG_SMB_DIS; 3064 reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) << 3065 DMA_CFG_RD_BURST_SHIFT; 3066 reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) << 3067 DMA_CFG_WR_BURST_SHIFT; 3068 reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) & 3069 DMA_CFG_RD_DELAY_CNT_MASK; 3070 reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) & 3071 DMA_CFG_WR_DELAY_CNT_MASK; 3072 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 3073 switch (AR816X_REV(sc->alc_rev)) { 3074 case AR816X_REV_A0: 3075 case AR816X_REV_A1: 3076 reg |= DMA_CFG_RD_CHNL_SEL_1; 3077 break; 3078 case AR816X_REV_B0: 3079 /* FALLTHROUGH */ 3080 default: 3081 reg |= DMA_CFG_RD_CHNL_SEL_3; 3082 break; 3083 } 3084 } 3085 CSR_WRITE_4(sc, ALC_DMA_CFG, reg); 3086 3087 /* 3088 * Configure Tx/Rx MACs. 3089 * - Auto-padding for short frames. 3090 * - Enable CRC generation. 3091 * Actual reconfiguration of MAC for resolved speed/duplex 3092 * is followed after detection of link establishment. 3093 * AR813x/AR815x always does checksum computation regardless 3094 * of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to 3095 * have bug in protocol field in Rx return structure so 3096 * these controllers can't handle fragmented frames. Disable 3097 * Rx checksum offloading until there is a newer controller 3098 * that has sane implementation. 3099 */ 3100 reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX | 3101 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 3102 MAC_CFG_PREAMBLE_MASK); 3103 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 || 3104 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151 || 3105 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 || 3106 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2) 3107 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW; 3108 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0) 3109 reg |= MAC_CFG_SPEED_10_100; 3110 else 3111 reg |= MAC_CFG_SPEED_1000; 3112 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 3113 3114 /* Set up the receive filter. */ 3115 alc_iff(sc); 3116 alc_rxvlan(sc); 3117 3118 /* Acknowledge all pending interrupts and clear it. */ 3119 CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS); 3120 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 3121 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0); 3122 3123 sc->alc_flags &= ~ALC_FLAG_LINK; 3124 /* Switch to the current media. */ 3125 mii = &sc->sc_miibus; 3126 mii_mediachg(mii); 3127 3128 callout_schedule(&sc->sc_tick_ch, hz); 3129 3130 ifp->if_flags |= IFF_RUNNING; 3131 ifp->if_flags &= ~IFF_OACTIVE; 3132 3133 return (0); 3134 } 3135 3136 static void 3137 alc_stop(struct ifnet *ifp, int disable) 3138 { 3139 struct alc_softc *sc = ifp->if_softc; 3140 struct alc_txdesc *txd; 3141 struct alc_rxdesc *rxd; 3142 uint32_t reg; 3143 int i; 3144 3145 callout_stop(&sc->sc_tick_ch); 3146 3147 /* 3148 * Mark the interface down and cancel the watchdog timer. 3149 */ 3150 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3151 ifp->if_timer = 0; 3152 3153 sc->alc_flags &= ~ALC_FLAG_LINK; 3154 3155 alc_stats_update(sc); 3156 3157 mii_down(&sc->sc_miibus); 3158 3159 /* Disable interrupts. */ 3160 CSR_WRITE_4(sc, ALC_INTR_MASK, 0); 3161 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 3162 3163 /* Disable DMA. */ 3164 reg = CSR_READ_4(sc, ALC_DMA_CFG); 3165 reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB); 3166 reg |= DMA_CFG_SMB_DIS; 3167 CSR_WRITE_4(sc, ALC_DMA_CFG, reg); 3168 DELAY(1000); 3169 3170 /* Stop Rx/Tx MACs. */ 3171 alc_stop_mac(sc); 3172 3173 /* Disable interrupts which might be touched in taskq handler. */ 3174 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 3175 3176 /* Disable L0s/L1s */ 3177 alc_aspm(sc, 0, IFM_UNKNOWN); 3178 3179 /* Reclaim Rx buffers that have been processed. */ 3180 if (sc->alc_cdata.alc_rxhead != NULL) 3181 m_freem(sc->alc_cdata.alc_rxhead); 3182 ALC_RXCHAIN_RESET(sc); 3183 /* 3184 * Free Tx/Rx mbufs still in the queues. 3185 */ 3186 for (i = 0; i < ALC_RX_RING_CNT; i++) { 3187 rxd = &sc->alc_cdata.alc_rxdesc[i]; 3188 if (rxd->rx_m != NULL) { 3189 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 3190 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 3191 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 3192 m_freem(rxd->rx_m); 3193 rxd->rx_m = NULL; 3194 } 3195 } 3196 for (i = 0; i < ALC_TX_RING_CNT; i++) { 3197 txd = &sc->alc_cdata.alc_txdesc[i]; 3198 if (txd->tx_m != NULL) { 3199 bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0, 3200 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 3201 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 3202 m_freem(txd->tx_m); 3203 txd->tx_m = NULL; 3204 } 3205 } 3206 } 3207 3208 static void 3209 alc_stop_mac(struct alc_softc *sc) 3210 { 3211 uint32_t reg; 3212 int i; 3213 3214 alc_stop_queue(sc); 3215 /* Disable Rx/Tx MAC. */ 3216 reg = CSR_READ_4(sc, ALC_MAC_CFG); 3217 if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) { 3218 reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); 3219 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 3220 } 3221 for (i = ALC_TIMEOUT; i > 0; i--) { 3222 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 3223 if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC)) == 0) 3224 break; 3225 DELAY(10); 3226 } 3227 if (i == 0) 3228 printf("%s: could not disable Rx/Tx MAC(0x%08x)!\n", 3229 device_xname(sc->sc_dev), reg); 3230 } 3231 3232 static void 3233 alc_start_queue(struct alc_softc *sc) 3234 { 3235 uint32_t qcfg[] = { 3236 0, 3237 RXQ_CFG_QUEUE0_ENB, 3238 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB, 3239 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB, 3240 RXQ_CFG_ENB 3241 }; 3242 uint32_t cfg; 3243 3244 /* Enable RxQ. */ 3245 cfg = CSR_READ_4(sc, ALC_RXQ_CFG); 3246 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 3247 cfg &= ~RXQ_CFG_ENB; 3248 cfg |= qcfg[1]; 3249 } else 3250 cfg |= RXQ_CFG_QUEUE0_ENB; 3251 CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg); 3252 /* Enable TxQ. */ 3253 cfg = CSR_READ_4(sc, ALC_TXQ_CFG); 3254 cfg |= TXQ_CFG_ENB; 3255 CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg); 3256 } 3257 3258 static void 3259 alc_stop_queue(struct alc_softc *sc) 3260 { 3261 uint32_t reg; 3262 int i; 3263 3264 /* Disable RxQ. */ 3265 reg = CSR_READ_4(sc, ALC_RXQ_CFG); 3266 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 3267 if ((reg & RXQ_CFG_ENB) != 0) { 3268 reg &= ~RXQ_CFG_ENB; 3269 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 3270 } 3271 } else { 3272 if ((reg & RXQ_CFG_QUEUE0_ENB) != 0) { 3273 reg &= ~RXQ_CFG_QUEUE0_ENB; 3274 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 3275 } 3276 } 3277 /* Disable TxQ. */ 3278 reg = CSR_READ_4(sc, ALC_TXQ_CFG); 3279 if ((reg & TXQ_CFG_ENB) != 0) { 3280 reg &= ~TXQ_CFG_ENB; 3281 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg); 3282 } 3283 DELAY(40); 3284 for (i = ALC_TIMEOUT; i > 0; i--) { 3285 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 3286 if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0) 3287 break; 3288 DELAY(10); 3289 } 3290 if (i == 0) 3291 printf("%s: could not disable RxQ/TxQ (0x%08x)!\n", 3292 device_xname(sc->sc_dev), reg); 3293 } 3294 3295 static void 3296 alc_init_tx_ring(struct alc_softc *sc) 3297 { 3298 struct alc_ring_data *rd; 3299 struct alc_txdesc *txd; 3300 int i; 3301 3302 sc->alc_cdata.alc_tx_prod = 0; 3303 sc->alc_cdata.alc_tx_cons = 0; 3304 sc->alc_cdata.alc_tx_cnt = 0; 3305 3306 rd = &sc->alc_rdata; 3307 memset(rd->alc_tx_ring, 0, ALC_TX_RING_SZ); 3308 for (i = 0; i < ALC_TX_RING_CNT; i++) { 3309 txd = &sc->alc_cdata.alc_txdesc[i]; 3310 txd->tx_m = NULL; 3311 } 3312 3313 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0, 3314 sc->alc_cdata.alc_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 3315 } 3316 3317 static int 3318 alc_init_rx_ring(struct alc_softc *sc, bool init) 3319 { 3320 struct alc_ring_data *rd; 3321 struct alc_rxdesc *rxd; 3322 int i; 3323 3324 sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1; 3325 rd = &sc->alc_rdata; 3326 memset(rd->alc_rx_ring, 0, ALC_RX_RING_SZ); 3327 for (i = 0; i < ALC_RX_RING_CNT; i++) { 3328 rxd = &sc->alc_cdata.alc_rxdesc[i]; 3329 rxd->rx_m = NULL; 3330 rxd->rx_desc = &rd->alc_rx_ring[i]; 3331 if (alc_newbuf(sc, rxd, init) != 0) 3332 return (ENOBUFS); 3333 } 3334 3335 /* 3336 * Since controller does not update Rx descriptors, driver 3337 * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE 3338 * is enough to ensure coherence. 3339 */ 3340 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0, 3341 sc->alc_cdata.alc_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 3342 /* Let controller know availability of new Rx buffers. */ 3343 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons); 3344 3345 return (0); 3346 } 3347 3348 static void 3349 alc_init_rr_ring(struct alc_softc *sc) 3350 { 3351 struct alc_ring_data *rd; 3352 3353 sc->alc_cdata.alc_rr_cons = 0; 3354 ALC_RXCHAIN_RESET(sc); 3355 3356 rd = &sc->alc_rdata; 3357 memset(rd->alc_rr_ring, 0, ALC_RR_RING_SZ); 3358 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0, 3359 sc->alc_cdata.alc_rr_ring_map->dm_mapsize, 3360 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3361 } 3362 3363 static void 3364 alc_init_cmb(struct alc_softc *sc) 3365 { 3366 struct alc_ring_data *rd; 3367 3368 rd = &sc->alc_rdata; 3369 memset(rd->alc_cmb, 0, ALC_CMB_SZ); 3370 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0, 3371 sc->alc_cdata.alc_cmb_map->dm_mapsize, 3372 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3373 } 3374 3375 static void 3376 alc_init_smb(struct alc_softc *sc) 3377 { 3378 struct alc_ring_data *rd; 3379 3380 rd = &sc->alc_rdata; 3381 memset(rd->alc_smb, 0, ALC_SMB_SZ); 3382 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0, 3383 sc->alc_cdata.alc_smb_map->dm_mapsize, 3384 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3385 } 3386 3387 static void 3388 alc_rxvlan(struct alc_softc *sc) 3389 { 3390 uint32_t reg; 3391 3392 reg = CSR_READ_4(sc, ALC_MAC_CFG); 3393 if (sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING) 3394 reg |= MAC_CFG_VLAN_TAG_STRIP; 3395 else 3396 reg &= ~MAC_CFG_VLAN_TAG_STRIP; 3397 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 3398 } 3399 3400 static void 3401 alc_iff(struct alc_softc *sc) 3402 { 3403 struct ethercom *ec = &sc->sc_ec; 3404 struct ifnet *ifp = &ec->ec_if; 3405 struct ether_multi *enm; 3406 struct ether_multistep step; 3407 uint32_t crc; 3408 uint32_t mchash[2]; 3409 uint32_t rxcfg; 3410 3411 rxcfg = CSR_READ_4(sc, ALC_MAC_CFG); 3412 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 3413 ifp->if_flags &= ~IFF_ALLMULTI; 3414 3415 /* 3416 * Always accept broadcast frames. 3417 */ 3418 rxcfg |= MAC_CFG_BCAST; 3419 3420 if (ifp->if_flags & IFF_PROMISC || ec->ec_multicnt > 0) { 3421 ifp->if_flags |= IFF_ALLMULTI; 3422 if (ifp->if_flags & IFF_PROMISC) 3423 rxcfg |= MAC_CFG_PROMISC; 3424 else 3425 rxcfg |= MAC_CFG_ALLMULTI; 3426 mchash[0] = mchash[1] = 0xFFFFFFFF; 3427 } else { 3428 /* Program new filter. */ 3429 memset(mchash, 0, sizeof(mchash)); 3430 3431 ETHER_LOCK(ec); 3432 ETHER_FIRST_MULTI(step, ec, enm); 3433 while (enm != NULL) { 3434 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 3435 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 3436 ETHER_NEXT_MULTI(step, enm); 3437 } 3438 ETHER_UNLOCK(ec); 3439 } 3440 3441 CSR_WRITE_4(sc, ALC_MAR0, mchash[0]); 3442 CSR_WRITE_4(sc, ALC_MAR1, mchash[1]); 3443 CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg); 3444 } 3445 3446 MODULE(MODULE_CLASS_DRIVER, if_alc, "pci"); 3447 3448 #ifdef _MODULE 3449 #include "ioconf.c" 3450 #endif 3451 3452 static int 3453 if_alc_modcmd(modcmd_t cmd, void *opaque) 3454 { 3455 int error = 0; 3456 3457 switch (cmd) { 3458 case MODULE_CMD_INIT: 3459 #ifdef _MODULE 3460 error = config_init_component(cfdriver_ioconf_if_alc, 3461 cfattach_ioconf_if_alc, cfdata_ioconf_if_alc); 3462 #endif 3463 return error; 3464 case MODULE_CMD_FINI: 3465 #ifdef _MODULE 3466 error = config_fini_component(cfdriver_ioconf_if_alc, 3467 cfattach_ioconf_if_alc, cfdata_ioconf_if_alc); 3468 #endif 3469 return error; 3470 default: 3471 return ENOTTY; 3472 } 3473 } 3474