1 /* $NetBSD: if_alc.c,v 1.43 2019/10/30 07:26:28 msaitoh Exp $ */ 2 /* $OpenBSD: if_alc.c,v 1.1 2009/08/08 09:31:13 kevlo Exp $ */ 3 /*- 4 * Copyright (c) 2009, Pyun YongHyeon <yongari@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* Driver for Atheros AR813x/AR815x PCIe Ethernet. */ 31 32 #ifdef _KERNEL_OPT 33 #include "vlan.h" 34 #endif 35 36 #include <sys/param.h> 37 #include <sys/proc.h> 38 #include <sys/endian.h> 39 #include <sys/systm.h> 40 #include <sys/types.h> 41 #include <sys/sockio.h> 42 #include <sys/mbuf.h> 43 #include <sys/queue.h> 44 #include <sys/kernel.h> 45 #include <sys/device.h> 46 #include <sys/callout.h> 47 #include <sys/socket.h> 48 #include <sys/module.h> 49 50 #include <sys/bus.h> 51 52 #include <net/bpf.h> 53 #include <net/if.h> 54 #include <net/if_dl.h> 55 #include <net/if_llc.h> 56 #include <net/if_media.h> 57 #include <net/if_ether.h> 58 59 #ifdef INET 60 #include <netinet/in.h> 61 #include <netinet/in_systm.h> 62 #include <netinet/in_var.h> 63 #include <netinet/ip.h> 64 #endif 65 66 #include <net/if_types.h> 67 #include <net/if_vlanvar.h> 68 69 #include <dev/mii/mii.h> 70 #include <dev/mii/miivar.h> 71 72 #include <dev/pci/pcireg.h> 73 #include <dev/pci/pcivar.h> 74 #include <dev/pci/pcidevs.h> 75 76 #include <dev/pci/if_alcreg.h> 77 78 /* 79 * Devices supported by this driver. 80 */ 81 static struct alc_ident alc_ident_table[] = { 82 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8131, 9 * 1024, 83 "Atheros AR8131 PCIe Gigabit Ethernet" }, 84 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8132, 9 * 1024, 85 "Atheros AR8132 PCIe Fast Ethernet" }, 86 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8151, 6 * 1024, 87 "Atheros AR8151 v1.0 PCIe Gigabit Ethernet" }, 88 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8151_V2, 6 * 1024, 89 "Atheros AR8151 v2.0 PCIe Gigabit Ethernet" }, 90 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8152_B, 6 * 1024, 91 "Atheros AR8152 v1.1 PCIe Fast Ethernet" }, 92 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8152_B2, 6 * 1024, 93 "Atheros AR8152 v2.0 PCIe Fast Ethernet" }, 94 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8161, 9 * 1024, 95 "Atheros AR8161 PCIe Gigabit Ethernet" }, 96 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8162, 9 * 1024, 97 "Atheros AR8162 PCIe Fast Ethernet" }, 98 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8171, 9 * 1024, 99 "Atheros AR8171 PCIe Gigabit Ethernet" }, 100 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8172, 9 * 1024, 101 "Atheros AR8172 PCIe Fast Ethernet" }, 102 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_E2200, 9 * 1024, 103 "Killer E2200 Gigabit Ethernet" }, 104 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_E2400, 9 * 1024, 105 "Killer E2400 Gigabit Ethernet" }, 106 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_E2500, 9 * 1024, 107 "Killer E2500 Gigabit Ethernet" }, 108 { 0, 0, 0, NULL }, 109 }; 110 111 static int alc_match(device_t, cfdata_t, void *); 112 static void alc_attach(device_t, device_t, void *); 113 static int alc_detach(device_t, int); 114 115 static int alc_init(struct ifnet *); 116 static int alc_init_backend(struct ifnet *, bool); 117 static void alc_start(struct ifnet *); 118 static int alc_ioctl(struct ifnet *, u_long, void *); 119 static void alc_watchdog(struct ifnet *); 120 static int alc_mediachange(struct ifnet *); 121 static void alc_mediastatus(struct ifnet *, struct ifmediareq *); 122 123 static void alc_aspm(struct alc_softc *, int, int); 124 static void alc_aspm_813x(struct alc_softc *, int); 125 static void alc_aspm_816x(struct alc_softc *, int); 126 static void alc_disable_l0s_l1(struct alc_softc *); 127 static int alc_dma_alloc(struct alc_softc *); 128 static void alc_dma_free(struct alc_softc *); 129 static void alc_dsp_fixup(struct alc_softc *, int); 130 static int alc_encap(struct alc_softc *, struct mbuf **); 131 static struct alc_ident * 132 alc_find_ident(struct pci_attach_args *); 133 static void alc_get_macaddr(struct alc_softc *); 134 static void alc_get_macaddr_813x(struct alc_softc *); 135 static void alc_get_macaddr_816x(struct alc_softc *); 136 static void alc_get_macaddr_par(struct alc_softc *); 137 static void alc_init_cmb(struct alc_softc *); 138 static void alc_init_rr_ring(struct alc_softc *); 139 static int alc_init_rx_ring(struct alc_softc *, bool); 140 static void alc_init_smb(struct alc_softc *); 141 static void alc_init_tx_ring(struct alc_softc *); 142 static int alc_intr(void *); 143 static void alc_mac_config(struct alc_softc *); 144 static int alc_mii_readreg_813x(struct alc_softc *, int, int, uint16_t *); 145 static int alc_mii_readreg_816x(struct alc_softc *, int, int, uint16_t *); 146 static int alc_mii_writereg_813x(struct alc_softc *, int, int, uint16_t); 147 static int alc_mii_writereg_816x(struct alc_softc *, int, int, uint16_t); 148 static int alc_miibus_readreg(device_t, int, int, uint16_t *); 149 static void alc_miibus_statchg(struct ifnet *); 150 static int alc_miibus_writereg(device_t, int, int, uint16_t); 151 static int alc_miidbg_readreg(struct alc_softc *, int, uint16_t *); 152 static int alc_miidbg_writereg(struct alc_softc *, int, uint16_t); 153 static int alc_miiext_readreg(struct alc_softc *, int, int, uint16_t *); 154 static int alc_miiext_writereg(struct alc_softc *, int, int, uint16_t); 155 static int alc_newbuf(struct alc_softc *, struct alc_rxdesc *, bool); 156 static void alc_phy_down(struct alc_softc *); 157 static void alc_phy_reset(struct alc_softc *); 158 static void alc_phy_reset_813x(struct alc_softc *); 159 static void alc_phy_reset_816x(struct alc_softc *); 160 static void alc_reset(struct alc_softc *); 161 static void alc_rxeof(struct alc_softc *, struct rx_rdesc *); 162 static int alc_rxintr(struct alc_softc *); 163 static void alc_iff(struct alc_softc *); 164 static void alc_rxvlan(struct alc_softc *); 165 static void alc_start_queue(struct alc_softc *); 166 static void alc_stats_clear(struct alc_softc *); 167 static void alc_stats_update(struct alc_softc *); 168 static void alc_stop(struct ifnet *, int); 169 static void alc_stop_mac(struct alc_softc *); 170 static void alc_stop_queue(struct alc_softc *); 171 static void alc_tick(void *); 172 static void alc_txeof(struct alc_softc *); 173 static void alc_init_pcie(struct alc_softc *); 174 175 static uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0, 0 }; 176 177 CFATTACH_DECL_NEW(alc, sizeof(struct alc_softc), 178 alc_match, alc_attach, alc_detach, NULL); 179 180 int alcdebug = 0; 181 #define DPRINTF(x) do { if (alcdebug) printf x; } while (0) 182 183 #define ALC_CSUM_FEATURES (M_CSUM_TCPv4 | M_CSUM_UDPv4) 184 185 static int 186 alc_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) 187 { 188 struct alc_softc *sc = device_private(dev); 189 int v; 190 191 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 192 v = alc_mii_readreg_816x(sc, phy, reg, val); 193 else 194 v = alc_mii_readreg_813x(sc, phy, reg, val); 195 return (v); 196 } 197 198 static int 199 alc_mii_readreg_813x(struct alc_softc *sc, int phy, int reg, uint16_t *val) 200 { 201 uint32_t v; 202 int i; 203 204 if (phy != sc->alc_phyaddr) 205 return -1; 206 207 /* 208 * For AR8132 fast ethernet controller, do not report 1000baseT 209 * capability to mii(4). Even though AR8132 uses the same 210 * model/revision number of F1 gigabit PHY, the PHY has no 211 * ability to establish 1000baseT link. 212 */ 213 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0 && reg == MII_EXTSR) { 214 *val = 0; 215 return 0; 216 } 217 218 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 219 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 220 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 221 DELAY(5); 222 v = CSR_READ_4(sc, ALC_MDIO); 223 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 224 break; 225 } 226 227 if (i == 0) { 228 printf("%s: phy read timeout: phy %d, reg %d\n", 229 device_xname(sc->sc_dev), phy, reg); 230 return ETIMEDOUT; 231 } 232 233 *val = (v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT; 234 return 0; 235 } 236 237 static int 238 alc_mii_readreg_816x(struct alc_softc *sc, int phy, int reg, uint16_t *val) 239 { 240 uint32_t clk, v; 241 int i; 242 243 if (phy != sc->alc_phyaddr) 244 return -1; 245 246 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) 247 clk = MDIO_CLK_25_128; 248 else 249 clk = MDIO_CLK_25_4; 250 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 251 MDIO_SUP_PREAMBLE | clk | MDIO_REG_ADDR(reg)); 252 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 253 DELAY(5); 254 v = CSR_READ_4(sc, ALC_MDIO); 255 if ((v & MDIO_OP_BUSY) == 0) 256 break; 257 } 258 259 if (i == 0) { 260 printf("%s: phy read timeout: phy %d, reg %d\n", 261 device_xname(sc->sc_dev), phy, reg); 262 return ETIMEDOUT; 263 } 264 265 *val = (v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT; 266 return 0; 267 } 268 269 static int 270 alc_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) 271 { 272 struct alc_softc *sc = device_private(dev); 273 int rv; 274 275 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 276 rv = alc_mii_writereg_816x(sc, phy, reg, val); 277 else 278 rv = alc_mii_writereg_813x(sc, phy, reg, val); 279 280 return rv; 281 } 282 283 static int 284 alc_mii_writereg_813x(struct alc_softc *sc, int phy, int reg, uint16_t val) 285 { 286 uint32_t v; 287 int i; 288 289 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 290 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 291 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 292 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 293 DELAY(5); 294 v = CSR_READ_4(sc, ALC_MDIO); 295 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 296 break; 297 } 298 299 if (i == 0) { 300 printf("%s: phy write timeout: phy %d, reg %d\n", 301 device_xname(sc->sc_dev), phy, reg); 302 return ETIMEDOUT; 303 } 304 305 return 0; 306 } 307 308 static int 309 alc_mii_writereg_816x(struct alc_softc *sc, int phy, int reg, uint16_t val) 310 { 311 uint32_t clk, v; 312 int i; 313 314 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) 315 clk = MDIO_CLK_25_128; 316 else 317 clk = MDIO_CLK_25_4; 318 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 319 ((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) | MDIO_REG_ADDR(reg) | 320 MDIO_SUP_PREAMBLE | clk); 321 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 322 DELAY(5); 323 v = CSR_READ_4(sc, ALC_MDIO); 324 if ((v & MDIO_OP_BUSY) == 0) 325 break; 326 } 327 328 if (i == 0) { 329 printf("%s: phy write timeout: phy %d, reg %d\n", 330 device_xname(sc->sc_dev), phy, reg); 331 return ETIMEDOUT; 332 } 333 334 return 0; 335 } 336 337 static void 338 alc_miibus_statchg(struct ifnet *ifp) 339 { 340 struct alc_softc *sc = ifp->if_softc; 341 struct mii_data *mii = &sc->sc_miibus; 342 uint32_t reg; 343 344 if ((ifp->if_flags & IFF_RUNNING) == 0) 345 return; 346 347 sc->alc_flags &= ~ALC_FLAG_LINK; 348 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 349 (IFM_ACTIVE | IFM_AVALID)) { 350 switch (IFM_SUBTYPE(mii->mii_media_active)) { 351 case IFM_10_T: 352 case IFM_100_TX: 353 sc->alc_flags |= ALC_FLAG_LINK; 354 break; 355 case IFM_1000_T: 356 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) 357 sc->alc_flags |= ALC_FLAG_LINK; 358 break; 359 default: 360 break; 361 } 362 } 363 /* Stop Rx/Tx MACs. */ 364 alc_stop_mac(sc); 365 366 /* Program MACs with resolved speed/duplex/flow-control. */ 367 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 368 alc_start_queue(sc); 369 alc_mac_config(sc); 370 /* Re-enable Tx/Rx MACs. */ 371 reg = CSR_READ_4(sc, ALC_MAC_CFG); 372 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 373 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 374 } 375 alc_aspm(sc, 0, IFM_SUBTYPE(mii->mii_media_active)); 376 alc_dsp_fixup(sc, IFM_SUBTYPE(mii->mii_media_active)); 377 } 378 379 static int 380 alc_miidbg_readreg(struct alc_softc *sc, int reg, uint16_t *val) 381 { 382 int rv; 383 384 rv = alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 385 reg); 386 if (rv != 0) 387 return rv; 388 389 return (alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr, 390 ALC_MII_DBG_DATA, val)); 391 } 392 393 static int 394 alc_miidbg_writereg(struct alc_softc *sc, int reg, uint16_t val) 395 { 396 int rv; 397 398 rv = alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 399 reg); 400 if (rv != 0) 401 return rv; 402 403 rv = alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, 404 val); 405 406 return rv; 407 } 408 409 static int 410 alc_miiext_readreg(struct alc_softc *sc, int devaddr, int reg, uint16_t *val) 411 { 412 uint32_t clk, v; 413 int i; 414 415 CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) | 416 EXT_MDIO_DEVADDR(devaddr)); 417 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) 418 clk = MDIO_CLK_25_128; 419 else 420 clk = MDIO_CLK_25_4; 421 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 422 MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT); 423 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 424 DELAY(5); 425 v = CSR_READ_4(sc, ALC_MDIO); 426 if ((v & MDIO_OP_BUSY) == 0) 427 break; 428 } 429 430 if (i == 0) { 431 printf("%s: phy ext read timeout: %d\n", 432 device_xname(sc->sc_dev), reg); 433 return ETIMEDOUT; 434 } 435 436 *val = (v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT; 437 return 0; 438 } 439 440 static int 441 alc_miiext_writereg(struct alc_softc *sc, int devaddr, int reg, uint16_t val) 442 { 443 uint32_t clk, v; 444 int i; 445 446 CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) | 447 EXT_MDIO_DEVADDR(devaddr)); 448 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) 449 clk = MDIO_CLK_25_128; 450 else 451 clk = MDIO_CLK_25_4; 452 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 453 ((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) | 454 MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT); 455 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 456 DELAY(5); 457 v = CSR_READ_4(sc, ALC_MDIO); 458 if ((v & MDIO_OP_BUSY) == 0) 459 break; 460 } 461 462 if (i == 0) { 463 printf("%s: phy ext write timeout: reg %d\n", 464 device_xname(sc->sc_dev), reg); 465 return ETIMEDOUT; 466 } 467 468 return 0; 469 } 470 471 static void 472 alc_dsp_fixup(struct alc_softc *sc, int media) 473 { 474 uint16_t agc, len, val; 475 476 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 477 return; 478 if (AR816X_REV(sc->alc_rev) >= AR816X_REV_C0) 479 return; 480 481 /* 482 * Vendor PHY magic. 483 * 1000BT/AZ, wrong cable length 484 */ 485 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 486 alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL6, &len); 487 len = (len >> EXT_CLDCTL6_CAB_LEN_SHIFT) & 488 EXT_CLDCTL6_CAB_LEN_MASK; 489 /* XXX: used to be (alc >> shift) & mask which is 0 */ 490 alc_miidbg_readreg(sc, MII_DBG_AGC, &agc); 491 agc &= DBG_AGC_2_VGA_MASK; 492 agc >>= DBG_AGC_2_VGA_SHIFT; 493 if ((media == IFM_1000_T && len > EXT_CLDCTL6_CAB_LEN_SHORT1G && 494 agc > DBG_AGC_LONG1G_LIMT) || 495 (media == IFM_100_TX && len > DBG_AGC_LONG100M_LIMT && 496 agc > DBG_AGC_LONG1G_LIMT)) { 497 alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT, 498 DBG_AZ_ANADECT_LONG); 499 alc_miiext_readreg(sc, MII_EXT_ANEG, 500 MII_EXT_ANEG_AFE, &val); 501 val |= ANEG_AFEE_10BT_100M_TH; 502 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, 503 val); 504 } else { 505 alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT, 506 DBG_AZ_ANADECT_DEFAULT); 507 alc_miiext_readreg(sc, MII_EXT_ANEG, 508 MII_EXT_ANEG_AFE, &val); 509 val &= ~ANEG_AFEE_10BT_100M_TH; 510 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, 511 val); 512 } 513 if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 && 514 AR816X_REV(sc->alc_rev) == AR816X_REV_B0) { 515 if (media == IFM_1000_T) { 516 /* 517 * Giga link threshold, raise the tolerance of 518 * noise 50%. 519 */ 520 alc_miidbg_readreg(sc, MII_DBG_MSE20DB, &val); 521 val &= ~DBG_MSE20DB_TH_MASK; 522 val |= (DBG_MSE20DB_TH_HI << 523 DBG_MSE20DB_TH_SHIFT); 524 alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val); 525 } else if (media == IFM_100_TX) 526 alc_miidbg_writereg(sc, MII_DBG_MSE16DB, 527 DBG_MSE16DB_UP); 528 } 529 } else { 530 alc_miiext_readreg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, &val); 531 val &= ~ANEG_AFEE_10BT_100M_TH; 532 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, val); 533 if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 && 534 AR816X_REV(sc->alc_rev) == AR816X_REV_B0) { 535 alc_miidbg_writereg(sc, MII_DBG_MSE16DB, 536 DBG_MSE16DB_DOWN); 537 alc_miidbg_readreg(sc, MII_DBG_MSE20DB, &val); 538 val &= ~DBG_MSE20DB_TH_MASK; 539 val |= (DBG_MSE20DB_TH_DEFAULT << DBG_MSE20DB_TH_SHIFT); 540 alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val); 541 } 542 } 543 } 544 545 static void 546 alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 547 { 548 struct alc_softc *sc = ifp->if_softc; 549 struct mii_data *mii = &sc->sc_miibus; 550 551 if ((ifp->if_flags & IFF_UP) == 0) 552 return; 553 554 mii_pollstat(mii); 555 ifmr->ifm_status = mii->mii_media_status; 556 ifmr->ifm_active = mii->mii_media_active; 557 } 558 559 static int 560 alc_mediachange(struct ifnet *ifp) 561 { 562 struct alc_softc *sc = ifp->if_softc; 563 struct mii_data *mii = &sc->sc_miibus; 564 int error; 565 566 if (mii->mii_instance != 0) { 567 struct mii_softc *miisc; 568 569 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 570 mii_phy_reset(miisc); 571 } 572 error = mii_mediachg(mii); 573 574 return (error); 575 } 576 577 static struct alc_ident * 578 alc_find_ident(struct pci_attach_args *pa) 579 { 580 struct alc_ident *ident; 581 uint16_t vendor, devid; 582 583 vendor = PCI_VENDOR(pa->pa_id); 584 devid = PCI_PRODUCT(pa->pa_id); 585 for (ident = alc_ident_table; ident->name != NULL; ident++) { 586 if (vendor == ident->vendorid && devid == ident->deviceid) 587 return (ident); 588 } 589 590 return (NULL); 591 } 592 593 static int 594 alc_match(device_t dev, cfdata_t match, void *aux) 595 { 596 struct pci_attach_args *pa = aux; 597 598 return alc_find_ident(pa) != NULL; 599 } 600 601 static void 602 alc_get_macaddr(struct alc_softc *sc) 603 { 604 605 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 606 alc_get_macaddr_816x(sc); 607 else 608 alc_get_macaddr_813x(sc); 609 } 610 611 static void 612 alc_get_macaddr_813x(struct alc_softc *sc) 613 { 614 uint32_t opt; 615 uint16_t val; 616 int eeprom, i; 617 618 eeprom = 0; 619 opt = CSR_READ_4(sc, ALC_OPT_CFG); 620 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_OTP_SEL) != 0 && 621 (CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) { 622 /* 623 * EEPROM found, let TWSI reload EEPROM configuration. 624 * This will set ethernet address of controller. 625 */ 626 eeprom++; 627 switch (sc->alc_ident->deviceid) { 628 case PCI_PRODUCT_ATTANSIC_AR8131: 629 case PCI_PRODUCT_ATTANSIC_AR8132: 630 if ((opt & OPT_CFG_CLK_ENB) == 0) { 631 opt |= OPT_CFG_CLK_ENB; 632 CSR_WRITE_4(sc, ALC_OPT_CFG, opt); 633 CSR_READ_4(sc, ALC_OPT_CFG); 634 DELAY(1000); 635 } 636 break; 637 case PCI_PRODUCT_ATTANSIC_AR8151: 638 case PCI_PRODUCT_ATTANSIC_AR8151_V2: 639 case PCI_PRODUCT_ATTANSIC_AR8152_B: 640 case PCI_PRODUCT_ATTANSIC_AR8152_B2: 641 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 642 ALC_MII_DBG_ADDR, 0x00); 643 alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr, 644 ALC_MII_DBG_DATA, &val); 645 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 646 ALC_MII_DBG_DATA, val & 0xFF7F); 647 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 648 ALC_MII_DBG_ADDR, 0x3B); 649 alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr, 650 ALC_MII_DBG_DATA, &val); 651 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 652 ALC_MII_DBG_DATA, val | 0x0008); 653 DELAY(20); 654 break; 655 } 656 657 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG, 658 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB); 659 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 660 CSR_READ_4(sc, ALC_WOL_CFG); 661 662 CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) | 663 TWSI_CFG_SW_LD_START); 664 for (i = 100; i > 0; i--) { 665 DELAY(1000); 666 if ((CSR_READ_4(sc, ALC_TWSI_CFG) & 667 TWSI_CFG_SW_LD_START) == 0) 668 break; 669 } 670 if (i == 0) 671 printf("%s: reloading EEPROM timeout!\n", 672 device_xname(sc->sc_dev)); 673 } else { 674 if (alcdebug) 675 printf("%s: EEPROM not found!\n", device_xname(sc->sc_dev)); 676 } 677 if (eeprom != 0) { 678 switch (sc->alc_ident->deviceid) { 679 case PCI_PRODUCT_ATTANSIC_AR8131: 680 case PCI_PRODUCT_ATTANSIC_AR8132: 681 if ((opt & OPT_CFG_CLK_ENB) != 0) { 682 opt &= ~OPT_CFG_CLK_ENB; 683 CSR_WRITE_4(sc, ALC_OPT_CFG, opt); 684 CSR_READ_4(sc, ALC_OPT_CFG); 685 DELAY(1000); 686 } 687 break; 688 case PCI_PRODUCT_ATTANSIC_AR8151: 689 case PCI_PRODUCT_ATTANSIC_AR8151_V2: 690 case PCI_PRODUCT_ATTANSIC_AR8152_B: 691 case PCI_PRODUCT_ATTANSIC_AR8152_B2: 692 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 693 ALC_MII_DBG_ADDR, 0x00); 694 alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr, 695 ALC_MII_DBG_DATA, &val); 696 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 697 ALC_MII_DBG_DATA, val | 0x0080); 698 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 699 ALC_MII_DBG_ADDR, 0x3B); 700 alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr, 701 ALC_MII_DBG_DATA, &val); 702 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 703 ALC_MII_DBG_DATA, val & 0xFFF7); 704 DELAY(20); 705 break; 706 } 707 } 708 709 alc_get_macaddr_par(sc); 710 } 711 712 static void 713 alc_get_macaddr_816x(struct alc_softc *sc) 714 { 715 uint32_t reg; 716 int i, reloaded; 717 718 reloaded = 0; 719 /* Try to reload station address via TWSI. */ 720 for (i = 100; i > 0; i--) { 721 reg = CSR_READ_4(sc, ALC_SLD); 722 if ((reg & (SLD_PROGRESS | SLD_START)) == 0) 723 break; 724 DELAY(1000); 725 } 726 if (i != 0) { 727 CSR_WRITE_4(sc, ALC_SLD, reg | SLD_START); 728 for (i = 100; i > 0; i--) { 729 DELAY(1000); 730 reg = CSR_READ_4(sc, ALC_SLD); 731 if ((reg & SLD_START) == 0) 732 break; 733 } 734 if (i != 0) 735 reloaded++; 736 else if (alcdebug) 737 printf("%s: reloading station address via TWSI timed out!\n", 738 device_xname(sc->sc_dev)); 739 } 740 741 /* Try to reload station address from EEPROM or FLASH. */ 742 if (reloaded == 0) { 743 reg = CSR_READ_4(sc, ALC_EEPROM_LD); 744 if ((reg & (EEPROM_LD_EEPROM_EXIST | 745 EEPROM_LD_FLASH_EXIST)) != 0) { 746 for (i = 100; i > 0; i--) { 747 reg = CSR_READ_4(sc, ALC_EEPROM_LD); 748 if ((reg & (EEPROM_LD_PROGRESS | 749 EEPROM_LD_START)) == 0) 750 break; 751 DELAY(1000); 752 } 753 if (i != 0) { 754 CSR_WRITE_4(sc, ALC_EEPROM_LD, reg | 755 EEPROM_LD_START); 756 for (i = 100; i > 0; i--) { 757 DELAY(1000); 758 reg = CSR_READ_4(sc, ALC_EEPROM_LD); 759 if ((reg & EEPROM_LD_START) == 0) 760 break; 761 } 762 } else if (alcdebug) 763 printf("%s: reloading EEPROM/FLASH timed out!\n", 764 device_xname(sc->sc_dev)); 765 } 766 } 767 768 alc_get_macaddr_par(sc); 769 } 770 771 static void 772 alc_get_macaddr_par(struct alc_softc *sc) 773 { 774 uint32_t ea[2]; 775 776 ea[0] = CSR_READ_4(sc, ALC_PAR0); 777 ea[1] = CSR_READ_4(sc, ALC_PAR1); 778 sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF; 779 sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF; 780 sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF; 781 sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF; 782 sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF; 783 sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF; 784 } 785 786 static void 787 alc_disable_l0s_l1(struct alc_softc *sc) 788 { 789 uint32_t pmcfg; 790 791 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 792 /* Another magic from vendor. */ 793 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 794 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 | 795 PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | 796 PM_CFG_MAC_ASPM_CHK | PM_CFG_SERDES_PD_EX_L1); 797 pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | 798 PM_CFG_SERDES_PLL_L1_ENB | PM_CFG_SERDES_L1_ENB; 799 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 800 } 801 } 802 803 static void 804 alc_phy_reset(struct alc_softc *sc) 805 { 806 807 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 808 alc_phy_reset_816x(sc); 809 else 810 alc_phy_reset_813x(sc); 811 } 812 813 static void 814 alc_phy_reset_813x(struct alc_softc *sc) 815 { 816 uint16_t data; 817 818 /* Reset magic from Linux. */ 819 CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_SEL_ANA_RESET); 820 CSR_READ_2(sc, ALC_GPHY_CFG); 821 DELAY(10 * 1000); 822 823 CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET | 824 GPHY_CFG_SEL_ANA_RESET); 825 CSR_READ_2(sc, ALC_GPHY_CFG); 826 DELAY(10 * 1000); 827 828 /* DSP fixup, Vendor magic. */ 829 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B) { 830 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 831 ALC_MII_DBG_ADDR, 0x000A); 832 alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr, 833 ALC_MII_DBG_DATA, &data); 834 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 835 ALC_MII_DBG_DATA, data & 0xDFFF); 836 } 837 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151 || 838 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 || 839 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B || 840 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2) { 841 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 842 ALC_MII_DBG_ADDR, 0x003B); 843 alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr, 844 ALC_MII_DBG_DATA, &data); 845 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 846 ALC_MII_DBG_DATA, data & 0xFFF7); 847 DELAY(20 * 1000); 848 } 849 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151) { 850 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 851 ALC_MII_DBG_ADDR, 0x0029); 852 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 853 ALC_MII_DBG_DATA, 0x929D); 854 } 855 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8131 || 856 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8132 || 857 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 || 858 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2) { 859 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 860 ALC_MII_DBG_ADDR, 0x0029); 861 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 862 ALC_MII_DBG_DATA, 0xB6DD); 863 } 864 865 /* Load DSP codes, vendor magic. */ 866 data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE | 867 ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK); 868 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 869 ALC_MII_DBG_ADDR, MII_ANA_CFG18); 870 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 871 ALC_MII_DBG_DATA, data); 872 873 data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) | 874 ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL | 875 ANA_SERDES_EN_LCKDT; 876 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 877 ALC_MII_DBG_ADDR, MII_ANA_CFG5); 878 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 879 ALC_MII_DBG_DATA, data); 880 881 data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) & 882 ANA_LONG_CABLE_TH_100_MASK) | 883 ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) & 884 ANA_SHORT_CABLE_TH_100_SHIFT) | 885 ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW; 886 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 887 ALC_MII_DBG_ADDR, MII_ANA_CFG54); 888 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 889 ALC_MII_DBG_DATA, data); 890 891 data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) | 892 ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) | 893 ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) | 894 ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK); 895 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 896 ALC_MII_DBG_ADDR, MII_ANA_CFG4); 897 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 898 ALC_MII_DBG_DATA, data); 899 900 data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) | 901 ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB | 902 ANA_OEN_125M; 903 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 904 ALC_MII_DBG_ADDR, MII_ANA_CFG0); 905 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 906 ALC_MII_DBG_DATA, data); 907 DELAY(1000); 908 909 /* Disable hibernation. */ 910 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 911 0x0029); 912 alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr, 913 ALC_MII_DBG_DATA, &data); 914 data &= ~0x8000; 915 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, 916 data); 917 918 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 919 0x000B); 920 alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr, 921 ALC_MII_DBG_DATA, &data); 922 data &= ~0x8000; 923 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, 924 data); 925 } 926 927 static void 928 alc_phy_reset_816x(struct alc_softc *sc) 929 { 930 uint32_t val; 931 uint16_t phyval; 932 933 val = CSR_READ_4(sc, ALC_GPHY_CFG); 934 val &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE | 935 GPHY_CFG_GATE_25M_ENB | GPHY_CFG_PHY_IDDQ | GPHY_CFG_PHY_PLL_ON | 936 GPHY_CFG_PWDOWN_HW | GPHY_CFG_100AB_ENB); 937 val |= GPHY_CFG_SEL_ANA_RESET; 938 #ifdef notyet 939 val |= GPHY_CFG_HIB_PULSE | GPHY_CFG_HIB_EN | GPHY_CFG_SEL_ANA_RESET; 940 #else 941 /* Disable PHY hibernation. */ 942 val &= ~(GPHY_CFG_HIB_PULSE | GPHY_CFG_HIB_EN); 943 #endif 944 CSR_WRITE_4(sc, ALC_GPHY_CFG, val); 945 DELAY(10); 946 CSR_WRITE_4(sc, ALC_GPHY_CFG, val | GPHY_CFG_EXT_RESET); 947 DELAY(800); 948 949 /* Vendor PHY magic. */ 950 #ifdef notyet 951 alc_miidbg_writereg(sc, MII_DBG_LEGCYPS, DBG_LEGCYPS_DEFAULT); 952 alc_miidbg_writereg(sc, MII_DBG_SYSMODCTL, DBG_SYSMODCTL_DEFAULT); 953 alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_VDRVBIAS, 954 EXT_VDRVBIAS_DEFAULT); 955 #else 956 /* Disable PHY hibernation. */ 957 alc_miidbg_writereg(sc, MII_DBG_LEGCYPS, 958 DBG_LEGCYPS_DEFAULT & ~DBG_LEGCYPS_ENB); 959 alc_miidbg_writereg(sc, MII_DBG_HIBNEG, 960 DBG_HIBNEG_DEFAULT & ~(DBG_HIBNEG_PSHIB_EN | DBG_HIBNEG_HIB_PULSE)); 961 alc_miidbg_writereg(sc, MII_DBG_GREENCFG, DBG_GREENCFG_DEFAULT); 962 #endif 963 964 /* XXX Disable EEE. */ 965 val = CSR_READ_4(sc, ALC_LPI_CTL); 966 val &= ~LPI_CTL_ENB; 967 CSR_WRITE_4(sc, ALC_LPI_CTL, val); 968 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_LOCAL_EEEADV, 0); 969 970 /* PHY power saving. */ 971 alc_miidbg_writereg(sc, MII_DBG_TST10BTCFG, DBG_TST10BTCFG_DEFAULT); 972 alc_miidbg_writereg(sc, MII_DBG_SRDSYSMOD, DBG_SRDSYSMOD_DEFAULT); 973 alc_miidbg_writereg(sc, MII_DBG_TST100BTCFG, DBG_TST100BTCFG_DEFAULT); 974 alc_miidbg_writereg(sc, MII_DBG_ANACTL, DBG_ANACTL_DEFAULT); 975 alc_miidbg_readreg(sc, MII_DBG_GREENCFG2, &phyval); 976 phyval &= ~DBG_GREENCFG2_GATE_DFSE_EN; 977 alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, phyval); 978 979 /* RTL8139C, 120m issue. */ 980 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_NLP78, 981 ANEG_NLP78_120M_DEFAULT); 982 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_S3DIG10, 983 ANEG_S3DIG10_DEFAULT); 984 985 if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0) { 986 /* Turn off half amplitude. */ 987 alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3, &phyval); 988 phyval |= EXT_CLDCTL3_BP_CABLE1TH_DET_GT; 989 alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3, phyval); 990 /* Turn off Green feature. */ 991 alc_miidbg_readreg(sc, MII_DBG_GREENCFG2, &phyval); 992 phyval |= DBG_GREENCFG2_BP_GREEN; 993 alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, phyval); 994 /* Turn off half bias. */ 995 alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5, &phyval); 996 val |= EXT_CLDCTL5_BP_VD_HLFBIAS; 997 alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5, phyval); 998 } 999 } 1000 1001 static void 1002 alc_phy_down(struct alc_softc *sc) 1003 { 1004 uint32_t gphy; 1005 1006 switch (sc->alc_ident->deviceid) { 1007 case PCI_PRODUCT_ATTANSIC_AR8161: 1008 case PCI_PRODUCT_ATTANSIC_E2200: 1009 case PCI_PRODUCT_ATTANSIC_E2400: 1010 case PCI_PRODUCT_ATTANSIC_E2500: 1011 case PCI_PRODUCT_ATTANSIC_AR8162: 1012 case PCI_PRODUCT_ATTANSIC_AR8171: 1013 case PCI_PRODUCT_ATTANSIC_AR8172: 1014 gphy = CSR_READ_4(sc, ALC_GPHY_CFG); 1015 gphy &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE | 1016 GPHY_CFG_100AB_ENB | GPHY_CFG_PHY_PLL_ON); 1017 gphy |= GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | 1018 GPHY_CFG_SEL_ANA_RESET; 1019 gphy |= GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW; 1020 CSR_WRITE_4(sc, ALC_GPHY_CFG, gphy); 1021 break; 1022 case PCI_PRODUCT_ATTANSIC_AR8151: 1023 case PCI_PRODUCT_ATTANSIC_AR8151_V2: 1024 case PCI_PRODUCT_ATTANSIC_AR8152_B: 1025 case PCI_PRODUCT_ATTANSIC_AR8152_B2: 1026 /* 1027 * GPHY power down caused more problems on AR8151 v2.0. 1028 * When driver is reloaded after GPHY power down, 1029 * accesses to PHY/MAC registers hung the system. Only 1030 * cold boot recovered from it. I'm not sure whether 1031 * AR8151 v1.0 also requires this one though. I don't 1032 * have AR8151 v1.0 controller in hand. 1033 * The only option left is to isolate the PHY and 1034 * initiates power down the PHY which in turn saves 1035 * more power when driver is unloaded. 1036 */ 1037 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, 1038 MII_BMCR, BMCR_ISO | BMCR_PDOWN); 1039 break; 1040 default: 1041 /* Force PHY down. */ 1042 CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET | 1043 GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ | 1044 GPHY_CFG_PWDOWN_HW); 1045 DELAY(1000); 1046 break; 1047 } 1048 } 1049 1050 static void 1051 alc_aspm(struct alc_softc *sc, int init, int media) 1052 { 1053 1054 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 1055 alc_aspm_816x(sc, init); 1056 else 1057 alc_aspm_813x(sc, media); 1058 } 1059 1060 static void 1061 alc_aspm_813x(struct alc_softc *sc, int media) 1062 { 1063 uint32_t pmcfg; 1064 uint16_t linkcfg; 1065 1066 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 1067 if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) == 1068 (ALC_FLAG_APS | ALC_FLAG_PCIE)) 1069 linkcfg = CSR_READ_2(sc, sc->alc_expcap + 1070 PCIE_LCSR); 1071 else 1072 linkcfg = 0; 1073 pmcfg &= ~PM_CFG_SERDES_PD_EX_L1; 1074 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_LCKDET_TIMER_MASK); 1075 pmcfg |= PM_CFG_MAC_ASPM_CHK; 1076 pmcfg |= (PM_CFG_LCKDET_TIMER_DEFAULT << PM_CFG_LCKDET_TIMER_SHIFT); 1077 pmcfg &= ~(PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB); 1078 1079 if ((sc->alc_flags & ALC_FLAG_APS) != 0) { 1080 /* Disable extended sync except AR8152 B v1.0 */ 1081 linkcfg &= ~0x80; 1082 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B && 1083 sc->alc_rev == ATHEROS_AR8152_B_V10) 1084 linkcfg |= 0x80; 1085 CSR_WRITE_2(sc, sc->alc_expcap + PCIE_LCSR, 1086 linkcfg); 1087 pmcfg &= ~(PM_CFG_EN_BUFS_RX_L0S | PM_CFG_SA_DLY_ENB | 1088 PM_CFG_HOTRST); 1089 pmcfg |= (PM_CFG_L1_ENTRY_TIMER_DEFAULT << 1090 PM_CFG_L1_ENTRY_TIMER_SHIFT); 1091 pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK; 1092 pmcfg |= (PM_CFG_PM_REQ_TIMER_DEFAULT << 1093 PM_CFG_PM_REQ_TIMER_SHIFT); 1094 pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_PCIE_RECV; 1095 } 1096 1097 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 1098 if ((sc->alc_flags & ALC_FLAG_L0S) != 0) 1099 pmcfg |= PM_CFG_ASPM_L0S_ENB; 1100 if ((sc->alc_flags & ALC_FLAG_L1S) != 0) 1101 pmcfg |= PM_CFG_ASPM_L1_ENB; 1102 if ((sc->alc_flags & ALC_FLAG_APS) != 0) { 1103 if (sc->alc_ident->deviceid == 1104 PCI_PRODUCT_ATTANSIC_AR8152_B) 1105 pmcfg &= ~PM_CFG_ASPM_L0S_ENB; 1106 pmcfg &= ~(PM_CFG_SERDES_L1_ENB | 1107 PM_CFG_SERDES_PLL_L1_ENB | 1108 PM_CFG_SERDES_BUDS_RX_L1_ENB); 1109 pmcfg |= PM_CFG_CLK_SWH_L1; 1110 if (media == IFM_100_TX || media == IFM_1000_T) { 1111 pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK; 1112 switch (sc->alc_ident->deviceid) { 1113 case PCI_PRODUCT_ATTANSIC_AR8152_B: 1114 pmcfg |= (7 << 1115 PM_CFG_L1_ENTRY_TIMER_SHIFT); 1116 break; 1117 case PCI_PRODUCT_ATTANSIC_AR8152_B2: 1118 case PCI_PRODUCT_ATTANSIC_AR8151_V2: 1119 pmcfg |= (4 << 1120 PM_CFG_L1_ENTRY_TIMER_SHIFT); 1121 break; 1122 default: 1123 pmcfg |= (15 << 1124 PM_CFG_L1_ENTRY_TIMER_SHIFT); 1125 break; 1126 } 1127 } 1128 } else { 1129 pmcfg |= PM_CFG_SERDES_L1_ENB | 1130 PM_CFG_SERDES_PLL_L1_ENB | 1131 PM_CFG_SERDES_BUDS_RX_L1_ENB; 1132 pmcfg &= ~(PM_CFG_CLK_SWH_L1 | 1133 PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB); 1134 } 1135 } else { 1136 pmcfg &= ~(PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_L1_ENB | 1137 PM_CFG_SERDES_PLL_L1_ENB); 1138 pmcfg |= PM_CFG_CLK_SWH_L1; 1139 if ((sc->alc_flags & ALC_FLAG_L1S) != 0) 1140 pmcfg |= PM_CFG_ASPM_L1_ENB; 1141 } 1142 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 1143 } 1144 1145 static void 1146 alc_aspm_816x(struct alc_softc *sc, int init) 1147 { 1148 uint32_t pmcfg; 1149 1150 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 1151 pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_816X_MASK; 1152 pmcfg |= PM_CFG_L1_ENTRY_TIMER_816X_DEFAULT; 1153 pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK; 1154 pmcfg |= PM_CFG_PM_REQ_TIMER_816X_DEFAULT; 1155 pmcfg &= ~PM_CFG_LCKDET_TIMER_MASK; 1156 pmcfg |= PM_CFG_LCKDET_TIMER_DEFAULT; 1157 pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_CLK_SWH_L1 | PM_CFG_PCIE_RECV; 1158 pmcfg &= ~(PM_CFG_RX_L1_AFTER_L0S | PM_CFG_TX_L1_AFTER_L0S | 1159 PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB | 1160 PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB | 1161 PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SA_DLY_ENB | 1162 PM_CFG_MAC_ASPM_CHK | PM_CFG_HOTRST); 1163 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && 1164 (sc->alc_rev & 0x01) != 0) 1165 pmcfg |= PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB; 1166 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 1167 /* Link up, enable both L0s, L1s. */ 1168 pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | 1169 PM_CFG_MAC_ASPM_CHK; 1170 } else { 1171 if (init != 0) 1172 pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | 1173 PM_CFG_MAC_ASPM_CHK; 1174 else if ((sc->sc_ec.ec_if.if_flags & IFF_RUNNING) != 0) 1175 pmcfg |= PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK; 1176 } 1177 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 1178 } 1179 1180 static void 1181 alc_init_pcie(struct alc_softc *sc) 1182 { 1183 const char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" }; 1184 uint32_t cap, ctl, val; 1185 int state; 1186 1187 /* Clear data link and flow-control protocol error. */ 1188 val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV); 1189 val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP); 1190 CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val); 1191 1192 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 1193 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG, 1194 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB); 1195 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, 1196 CSR_READ_4(sc, ALC_PCIE_PHYMISC) | 1197 PCIE_PHYMISC_FORCE_RCV_DET); 1198 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B && 1199 sc->alc_rev == ATHEROS_AR8152_B_V10) { 1200 val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2); 1201 val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK | 1202 PCIE_PHYMISC2_SERDES_TH_MASK); 1203 val |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT; 1204 val |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT; 1205 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val); 1206 } 1207 /* Disable ASPM L0S and L1. */ 1208 cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 1209 sc->alc_expcap + PCIE_LCAP) >> 16; 1210 if ((cap & PCIE_LCAP_ASPM) != 0) { 1211 ctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 1212 sc->alc_expcap + PCIE_LCSR) >> 16; 1213 if ((ctl & 0x08) != 0) 1214 sc->alc_rcb = DMA_CFG_RCB_128; 1215 if (alcdebug) 1216 printf("%s: RCB %u bytes\n", 1217 device_xname(sc->sc_dev), 1218 sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128); 1219 state = ctl & 0x03; 1220 if (state & 0x01) 1221 sc->alc_flags |= ALC_FLAG_L0S; 1222 if (state & 0x02) 1223 sc->alc_flags |= ALC_FLAG_L1S; 1224 if (alcdebug) 1225 printf("%s: ASPM %s %s\n", 1226 device_xname(sc->sc_dev), 1227 aspm_state[state], 1228 state == 0 ? "disabled" : "enabled"); 1229 alc_disable_l0s_l1(sc); 1230 } else { 1231 aprint_debug_dev(sc->sc_dev, "no ASPM support\n"); 1232 } 1233 } else { 1234 val = CSR_READ_4(sc, ALC_PDLL_TRNS1); 1235 val &= ~PDLL_TRNS1_D3PLLOFF_ENB; 1236 CSR_WRITE_4(sc, ALC_PDLL_TRNS1, val); 1237 val = CSR_READ_4(sc, ALC_MASTER_CFG); 1238 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && 1239 (sc->alc_rev & 0x01) != 0) { 1240 if ((val & MASTER_WAKEN_25M) == 0 || 1241 (val & MASTER_CLK_SEL_DIS) == 0) { 1242 val |= MASTER_WAKEN_25M | MASTER_CLK_SEL_DIS; 1243 CSR_WRITE_4(sc, ALC_MASTER_CFG, val); 1244 } 1245 } else { 1246 if ((val & MASTER_WAKEN_25M) == 0 || 1247 (val & MASTER_CLK_SEL_DIS) != 0) { 1248 val |= MASTER_WAKEN_25M; 1249 val &= ~MASTER_CLK_SEL_DIS; 1250 CSR_WRITE_4(sc, ALC_MASTER_CFG, val); 1251 } 1252 } 1253 } 1254 alc_aspm(sc, 1, IFM_UNKNOWN); 1255 } 1256 1257 static void 1258 alc_attach(device_t parent, device_t self, void *aux) 1259 { 1260 1261 struct alc_softc *sc = device_private(self); 1262 struct pci_attach_args *pa = aux; 1263 pci_chipset_tag_t pc = pa->pa_pc; 1264 pci_intr_handle_t ih; 1265 const char *intrstr; 1266 struct ifnet *ifp; 1267 struct mii_data * const mii = &sc->sc_miibus; 1268 pcireg_t memtype; 1269 uint16_t burst; 1270 int base, mii_flags, error = 0; 1271 char intrbuf[PCI_INTRSTR_LEN]; 1272 1273 sc->alc_ident = alc_find_ident(pa); 1274 sc->alc_rev = PCI_REVISION(pa->pa_class); 1275 1276 aprint_naive("\n"); 1277 aprint_normal(": %s\n", sc->alc_ident->name); 1278 1279 sc->sc_dev = self; 1280 sc->sc_dmat = pa->pa_dmat; 1281 sc->sc_pct = pa->pa_pc; 1282 sc->sc_pcitag = pa->pa_tag; 1283 1284 /* 1285 * Allocate IO memory 1286 */ 1287 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ALC_PCIR_BAR); 1288 switch (memtype) { 1289 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 1290 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M: 1291 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 1292 break; 1293 default: 1294 aprint_error_dev(self, "invalid base address register\n"); 1295 break; 1296 } 1297 1298 if (pci_mapreg_map(pa, ALC_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 1299 &sc->sc_mem_bh, NULL, &sc->sc_mem_size)) { 1300 aprint_error_dev(self, "could not map mem space\n"); 1301 return; 1302 } 1303 1304 if (pci_intr_map(pa, &ih) != 0) { 1305 printf(": can't map interrupt\n"); 1306 goto fail; 1307 } 1308 1309 /* 1310 * Allocate IRQ 1311 */ 1312 intrstr = pci_intr_string(sc->sc_pct, ih, intrbuf, sizeof(intrbuf)); 1313 sc->sc_irq_handle = pci_intr_establish_xname(pc, ih, IPL_NET, alc_intr, 1314 sc, device_xname(self)); 1315 if (sc->sc_irq_handle == NULL) { 1316 printf(": could not establish interrupt"); 1317 if (intrstr != NULL) 1318 printf(" at %s", intrstr); 1319 printf("\n"); 1320 goto fail; 1321 } 1322 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 1323 1324 /* Set PHY address. */ 1325 sc->alc_phyaddr = ALC_PHY_ADDR; 1326 1327 /* Initialize DMA parameters. */ 1328 sc->alc_dma_rd_burst = 0; 1329 sc->alc_dma_wr_burst = 0; 1330 sc->alc_rcb = DMA_CFG_RCB_64; 1331 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, 1332 &base, NULL)) { 1333 sc->alc_flags |= ALC_FLAG_PCIE; 1334 sc->alc_expcap = base; 1335 burst = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 1336 base + PCIE_DCSR) >> 16; 1337 sc->alc_dma_rd_burst = (burst & 0x7000) >> 12; 1338 sc->alc_dma_wr_burst = (burst & 0x00e0) >> 5; 1339 if (alcdebug) { 1340 printf("%s: Read request size : %u bytes.\n", 1341 device_xname(sc->sc_dev), 1342 alc_dma_burst[sc->alc_dma_rd_burst]); 1343 printf("%s: TLP payload size : %u bytes.\n", 1344 device_xname(sc->sc_dev), 1345 alc_dma_burst[sc->alc_dma_wr_burst]); 1346 } 1347 if (alc_dma_burst[sc->alc_dma_rd_burst] > 1024) 1348 sc->alc_dma_rd_burst = 3; 1349 if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024) 1350 sc->alc_dma_wr_burst = 3; 1351 /* 1352 * Force maximum payload size to 128 bytes for 1353 * E2200/E2400/E2500. 1354 * Otherwise it triggers DMA write error. 1355 */ 1356 if ((sc->alc_flags & ALC_FLAG_E2X00) != 0) 1357 sc->alc_dma_wr_burst = 0; 1358 alc_init_pcie(sc); 1359 } 1360 1361 /* Reset PHY. */ 1362 alc_phy_reset(sc); 1363 1364 /* Reset the ethernet controller. */ 1365 alc_stop_mac(sc); 1366 alc_reset(sc); 1367 1368 /* 1369 * One odd thing is AR8132 uses the same PHY hardware(F1 1370 * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports 1371 * the PHY supports 1000Mbps but that's not true. The PHY 1372 * used in AR8132 can't establish gigabit link even if it 1373 * shows the same PHY model/revision number of AR8131. 1374 */ 1375 switch (sc->alc_ident->deviceid) { 1376 case PCI_PRODUCT_ATTANSIC_E2200: 1377 case PCI_PRODUCT_ATTANSIC_E2400: 1378 case PCI_PRODUCT_ATTANSIC_E2500: 1379 sc->alc_flags |= ALC_FLAG_E2X00; 1380 /* FALLTHROUGH */ 1381 case PCI_PRODUCT_ATTANSIC_AR8161: 1382 if (PCI_SUBSYS_ID(pci_conf_read( 1383 sc->sc_pct, sc->sc_pcitag, PCI_SUBSYS_ID_REG)) == 0x0091 && 1384 sc->alc_rev == 0) 1385 sc->alc_flags |= ALC_FLAG_LINK_WAR; 1386 /* FALLTHROUGH */ 1387 case PCI_PRODUCT_ATTANSIC_AR8171: 1388 sc->alc_flags |= ALC_FLAG_AR816X_FAMILY; 1389 break; 1390 case PCI_PRODUCT_ATTANSIC_AR8162: 1391 case PCI_PRODUCT_ATTANSIC_AR8172: 1392 sc->alc_flags |= ALC_FLAG_FASTETHER | ALC_FLAG_AR816X_FAMILY; 1393 break; 1394 case PCI_PRODUCT_ATTANSIC_AR8152_B: 1395 case PCI_PRODUCT_ATTANSIC_AR8152_B2: 1396 sc->alc_flags |= ALC_FLAG_APS; 1397 /* FALLTHROUGH */ 1398 case PCI_PRODUCT_ATTANSIC_AR8132: 1399 sc->alc_flags |= ALC_FLAG_FASTETHER; 1400 break; 1401 case PCI_PRODUCT_ATTANSIC_AR8151: 1402 case PCI_PRODUCT_ATTANSIC_AR8151_V2: 1403 sc->alc_flags |= ALC_FLAG_APS; 1404 /* FALLTHROUGH */ 1405 default: 1406 break; 1407 } 1408 sc->alc_flags |= ALC_FLAG_JUMBO; 1409 1410 /* 1411 * It seems that AR813x/AR815x has silicon bug for SMB. In 1412 * addition, Atheros said that enabling SMB wouldn't improve 1413 * performance. However I think it's bad to access lots of 1414 * registers to extract MAC statistics. 1415 */ 1416 sc->alc_flags |= ALC_FLAG_SMB_BUG; 1417 /* 1418 * Don't use Tx CMB. It is known to have silicon bug. 1419 */ 1420 sc->alc_flags |= ALC_FLAG_CMB_BUG; 1421 sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >> 1422 MASTER_CHIP_REV_SHIFT; 1423 if (alcdebug) { 1424 printf("%s: PCI device revision : 0x%04x\n", 1425 device_xname(sc->sc_dev), sc->alc_rev); 1426 printf("%s: Chip id/revision : 0x%04x\n", 1427 device_xname(sc->sc_dev), sc->alc_chip_rev); 1428 printf("%s: %u Tx FIFO, %u Rx FIFO\n", device_xname(sc->sc_dev), 1429 CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8, 1430 CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8); 1431 } 1432 1433 error = alc_dma_alloc(sc); 1434 if (error) 1435 goto fail; 1436 1437 callout_init(&sc->sc_tick_ch, 0); 1438 callout_setfunc(&sc->sc_tick_ch, alc_tick, sc); 1439 1440 /* Load station address. */ 1441 alc_get_macaddr(sc); 1442 1443 aprint_normal_dev(self, "Ethernet address %s\n", 1444 ether_sprintf(sc->alc_eaddr)); 1445 1446 ifp = &sc->sc_ec.ec_if; 1447 ifp->if_softc = sc; 1448 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1449 ifp->if_init = alc_init; 1450 ifp->if_ioctl = alc_ioctl; 1451 ifp->if_start = alc_start; 1452 ifp->if_stop = alc_stop; 1453 ifp->if_watchdog = alc_watchdog; 1454 IFQ_SET_MAXLEN(&ifp->if_snd, ALC_TX_RING_CNT - 1); 1455 IFQ_SET_READY(&ifp->if_snd); 1456 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 1457 1458 sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU; 1459 1460 #ifdef ALC_CHECKSUM 1461 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 1462 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 1463 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 1464 #endif 1465 1466 #if NVLAN > 0 1467 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING; 1468 sc->sc_ec.ec_capenable |= ETHERCAP_VLAN_HWTAGGING; 1469 #endif 1470 1471 /* 1472 * XXX 1473 * It seems enabling Tx checksum offloading makes more trouble. 1474 * Sometimes the controller does not receive any frames when 1475 * Tx checksum offloading is enabled. I'm not sure whether this 1476 * is a bug in Tx checksum offloading logic or I got broken 1477 * sample boards. To safety, don't enable Tx checksum offloading 1478 * by default but give chance to users to toggle it if they know 1479 * their controllers work without problems. 1480 * Fortunately, Tx checksum offloading for AR816x family 1481 * seems to work. 1482 */ 1483 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 1484 ifp->if_capenable &= ~IFCAP_CSUM_IPv4_Tx; 1485 ifp->if_capabilities &= ~ALC_CSUM_FEATURES; 1486 } 1487 1488 /* Set up MII bus. */ 1489 mii->mii_ifp = ifp; 1490 mii->mii_readreg = alc_miibus_readreg; 1491 mii->mii_writereg = alc_miibus_writereg; 1492 mii->mii_statchg = alc_miibus_statchg; 1493 1494 sc->sc_ec.ec_mii = mii; 1495 ifmedia_init(&mii->mii_media, 0, alc_mediachange, alc_mediastatus); 1496 mii_flags = 0; 1497 if ((sc->alc_flags & ALC_FLAG_JUMBO) != 0) 1498 mii_flags |= MIIF_DOPAUSE; 1499 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, 1500 MII_OFFSET_ANY, mii_flags); 1501 1502 if (LIST_FIRST(&mii->mii_phys) == NULL) { 1503 printf("%s: no PHY found!\n", device_xname(sc->sc_dev)); 1504 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 1505 0, NULL); 1506 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL); 1507 } else 1508 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 1509 1510 if_attach(ifp); 1511 if_deferred_start_init(ifp, NULL); 1512 ether_ifattach(ifp, sc->alc_eaddr); 1513 1514 if (!pmf_device_register(self, NULL, NULL)) 1515 aprint_error_dev(self, "couldn't establish power handler\n"); 1516 else 1517 pmf_class_network_register(self, ifp); 1518 1519 return; 1520 fail: 1521 alc_dma_free(sc); 1522 if (sc->sc_irq_handle != NULL) { 1523 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 1524 sc->sc_irq_handle = NULL; 1525 } 1526 if (sc->sc_mem_size) { 1527 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 1528 sc->sc_mem_size = 0; 1529 } 1530 } 1531 1532 static int 1533 alc_detach(device_t self, int flags) 1534 { 1535 struct alc_softc *sc = device_private(self); 1536 struct ifnet *ifp = &sc->sc_ec.ec_if; 1537 int s; 1538 1539 s = splnet(); 1540 alc_stop(ifp, 0); 1541 splx(s); 1542 1543 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 1544 1545 /* Delete all remaining media. */ 1546 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY); 1547 1548 ether_ifdetach(ifp); 1549 if_detach(ifp); 1550 alc_dma_free(sc); 1551 1552 alc_phy_down(sc); 1553 if (sc->sc_irq_handle != NULL) { 1554 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 1555 sc->sc_irq_handle = NULL; 1556 } 1557 if (sc->sc_mem_size) { 1558 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 1559 sc->sc_mem_size = 0; 1560 } 1561 1562 return (0); 1563 } 1564 1565 static int 1566 alc_dma_alloc(struct alc_softc *sc) 1567 { 1568 struct alc_txdesc *txd; 1569 struct alc_rxdesc *rxd; 1570 int nsegs, error, i; 1571 1572 /* 1573 * Create DMA stuffs for TX ring 1574 */ 1575 error = bus_dmamap_create(sc->sc_dmat, ALC_TX_RING_SZ, 1, 1576 ALC_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_tx_ring_map); 1577 if (error) { 1578 sc->alc_cdata.alc_tx_ring_map = NULL; 1579 return (ENOBUFS); 1580 } 1581 1582 /* Allocate DMA'able memory for TX ring */ 1583 error = bus_dmamem_alloc(sc->sc_dmat, ALC_TX_RING_SZ, 1584 ETHER_ALIGN, 0, &sc->alc_rdata.alc_tx_ring_seg, 1, 1585 &nsegs, BUS_DMA_NOWAIT); 1586 if (error) { 1587 printf("%s: could not allocate DMA'able memory for Tx ring.\n", 1588 device_xname(sc->sc_dev)); 1589 return error; 1590 } 1591 1592 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_tx_ring_seg, 1593 nsegs, ALC_TX_RING_SZ, (void **)&sc->alc_rdata.alc_tx_ring, 1594 BUS_DMA_NOWAIT); 1595 if (error) 1596 return (ENOBUFS); 1597 1598 /* Load the DMA map for Tx ring. */ 1599 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 1600 sc->alc_rdata.alc_tx_ring, ALC_TX_RING_SZ, NULL, BUS_DMA_WAITOK); 1601 if (error) { 1602 printf("%s: could not load DMA'able memory for Tx ring.\n", 1603 device_xname(sc->sc_dev)); 1604 bus_dmamem_free(sc->sc_dmat, 1605 &sc->alc_rdata.alc_tx_ring_seg, 1); 1606 return error; 1607 } 1608 1609 sc->alc_rdata.alc_tx_ring_paddr = 1610 sc->alc_cdata.alc_tx_ring_map->dm_segs[0].ds_addr; 1611 1612 /* 1613 * Create DMA stuffs for RX ring 1614 */ 1615 error = bus_dmamap_create(sc->sc_dmat, ALC_RX_RING_SZ, 1, 1616 ALC_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_ring_map); 1617 if (error) 1618 return (ENOBUFS); 1619 1620 /* Allocate DMA'able memory for RX ring */ 1621 error = bus_dmamem_alloc(sc->sc_dmat, ALC_RX_RING_SZ, 1622 ETHER_ALIGN, 0, &sc->alc_rdata.alc_rx_ring_seg, 1, 1623 &nsegs, BUS_DMA_NOWAIT); 1624 if (error) { 1625 printf("%s: could not allocate DMA'able memory for Rx ring.\n", 1626 device_xname(sc->sc_dev)); 1627 return error; 1628 } 1629 1630 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rx_ring_seg, 1631 nsegs, ALC_RX_RING_SZ, (void **)&sc->alc_rdata.alc_rx_ring, 1632 BUS_DMA_NOWAIT); 1633 if (error) 1634 return (ENOBUFS); 1635 1636 /* Load the DMA map for Rx ring. */ 1637 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 1638 sc->alc_rdata.alc_rx_ring, ALC_RX_RING_SZ, NULL, BUS_DMA_WAITOK); 1639 if (error) { 1640 printf("%s: could not load DMA'able memory for Rx ring.\n", 1641 device_xname(sc->sc_dev)); 1642 bus_dmamem_free(sc->sc_dmat, 1643 &sc->alc_rdata.alc_rx_ring_seg, 1); 1644 return error; 1645 } 1646 1647 sc->alc_rdata.alc_rx_ring_paddr = 1648 sc->alc_cdata.alc_rx_ring_map->dm_segs[0].ds_addr; 1649 1650 /* 1651 * Create DMA stuffs for RX return ring 1652 */ 1653 error = bus_dmamap_create(sc->sc_dmat, ALC_RR_RING_SZ, 1, 1654 ALC_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rr_ring_map); 1655 if (error) 1656 return (ENOBUFS); 1657 1658 /* Allocate DMA'able memory for RX return ring */ 1659 error = bus_dmamem_alloc(sc->sc_dmat, ALC_RR_RING_SZ, 1660 ETHER_ALIGN, 0, &sc->alc_rdata.alc_rr_ring_seg, 1, 1661 &nsegs, BUS_DMA_NOWAIT); 1662 if (error) { 1663 printf("%s: could not allocate DMA'able memory for Rx " 1664 "return ring.\n", device_xname(sc->sc_dev)); 1665 return error; 1666 } 1667 1668 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rr_ring_seg, 1669 nsegs, ALC_RR_RING_SZ, (void **)&sc->alc_rdata.alc_rr_ring, 1670 BUS_DMA_NOWAIT); 1671 if (error) 1672 return (ENOBUFS); 1673 1674 /* Load the DMA map for Rx return ring. */ 1675 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 1676 sc->alc_rdata.alc_rr_ring, ALC_RR_RING_SZ, NULL, BUS_DMA_WAITOK); 1677 if (error) { 1678 printf("%s: could not load DMA'able memory for Rx return ring." 1679 "\n", device_xname(sc->sc_dev)); 1680 bus_dmamem_free(sc->sc_dmat, 1681 &sc->alc_rdata.alc_rr_ring_seg, 1); 1682 return error; 1683 } 1684 1685 sc->alc_rdata.alc_rr_ring_paddr = 1686 sc->alc_cdata.alc_rr_ring_map->dm_segs[0].ds_addr; 1687 1688 /* 1689 * Create DMA stuffs for CMB block 1690 */ 1691 error = bus_dmamap_create(sc->sc_dmat, ALC_CMB_SZ, 1, 1692 ALC_CMB_SZ, 0, BUS_DMA_NOWAIT, 1693 &sc->alc_cdata.alc_cmb_map); 1694 if (error) 1695 return (ENOBUFS); 1696 1697 /* Allocate DMA'able memory for CMB block */ 1698 error = bus_dmamem_alloc(sc->sc_dmat, ALC_CMB_SZ, 1699 ETHER_ALIGN, 0, &sc->alc_rdata.alc_cmb_seg, 1, 1700 &nsegs, BUS_DMA_NOWAIT); 1701 if (error) { 1702 printf("%s: could not allocate DMA'able memory for " 1703 "CMB block\n", device_xname(sc->sc_dev)); 1704 return error; 1705 } 1706 1707 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_cmb_seg, 1708 nsegs, ALC_CMB_SZ, (void **)&sc->alc_rdata.alc_cmb, 1709 BUS_DMA_NOWAIT); 1710 if (error) 1711 return (ENOBUFS); 1712 1713 /* Load the DMA map for CMB block. */ 1714 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 1715 sc->alc_rdata.alc_cmb, ALC_CMB_SZ, NULL, 1716 BUS_DMA_WAITOK); 1717 if (error) { 1718 printf("%s: could not load DMA'able memory for CMB block\n", 1719 device_xname(sc->sc_dev)); 1720 bus_dmamem_free(sc->sc_dmat, 1721 &sc->alc_rdata.alc_cmb_seg, 1); 1722 return error; 1723 } 1724 1725 sc->alc_rdata.alc_cmb_paddr = 1726 sc->alc_cdata.alc_cmb_map->dm_segs[0].ds_addr; 1727 1728 /* 1729 * Create DMA stuffs for SMB block 1730 */ 1731 error = bus_dmamap_create(sc->sc_dmat, ALC_SMB_SZ, 1, 1732 ALC_SMB_SZ, 0, BUS_DMA_NOWAIT, 1733 &sc->alc_cdata.alc_smb_map); 1734 if (error) 1735 return (ENOBUFS); 1736 1737 /* Allocate DMA'able memory for SMB block */ 1738 error = bus_dmamem_alloc(sc->sc_dmat, ALC_SMB_SZ, 1739 ETHER_ALIGN, 0, &sc->alc_rdata.alc_smb_seg, 1, 1740 &nsegs, BUS_DMA_NOWAIT); 1741 if (error) { 1742 printf("%s: could not allocate DMA'able memory for " 1743 "SMB block\n", device_xname(sc->sc_dev)); 1744 return error; 1745 } 1746 1747 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_smb_seg, 1748 nsegs, ALC_SMB_SZ, (void **)&sc->alc_rdata.alc_smb, 1749 BUS_DMA_NOWAIT); 1750 if (error) 1751 return (ENOBUFS); 1752 1753 /* Load the DMA map for SMB block */ 1754 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 1755 sc->alc_rdata.alc_smb, ALC_SMB_SZ, NULL, 1756 BUS_DMA_WAITOK); 1757 if (error) { 1758 printf("%s: could not load DMA'able memory for SMB block\n", 1759 device_xname(sc->sc_dev)); 1760 bus_dmamem_free(sc->sc_dmat, 1761 &sc->alc_rdata.alc_smb_seg, 1); 1762 return error; 1763 } 1764 1765 sc->alc_rdata.alc_smb_paddr = 1766 sc->alc_cdata.alc_smb_map->dm_segs[0].ds_addr; 1767 1768 1769 /* Create DMA maps for Tx buffers. */ 1770 for (i = 0; i < ALC_TX_RING_CNT; i++) { 1771 txd = &sc->alc_cdata.alc_txdesc[i]; 1772 txd->tx_m = NULL; 1773 txd->tx_dmamap = NULL; 1774 error = bus_dmamap_create(sc->sc_dmat, ALC_TSO_MAXSIZE, 1775 ALC_MAXTXSEGS, ALC_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT, 1776 &txd->tx_dmamap); 1777 if (error) { 1778 printf("%s: could not create Tx dmamap.\n", 1779 device_xname(sc->sc_dev)); 1780 return error; 1781 } 1782 } 1783 1784 /* Create DMA maps for Rx buffers. */ 1785 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 1786 BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_sparemap); 1787 if (error) { 1788 printf("%s: could not create spare Rx dmamap.\n", 1789 device_xname(sc->sc_dev)); 1790 return error; 1791 } 1792 1793 for (i = 0; i < ALC_RX_RING_CNT; i++) { 1794 rxd = &sc->alc_cdata.alc_rxdesc[i]; 1795 rxd->rx_m = NULL; 1796 rxd->rx_dmamap = NULL; 1797 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1798 MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap); 1799 if (error) { 1800 printf("%s: could not create Rx dmamap.\n", 1801 device_xname(sc->sc_dev)); 1802 return error; 1803 } 1804 } 1805 1806 return (0); 1807 } 1808 1809 static void 1810 alc_dma_free(struct alc_softc *sc) 1811 { 1812 struct alc_txdesc *txd; 1813 struct alc_rxdesc *rxd; 1814 int i; 1815 1816 /* Tx buffers */ 1817 for (i = 0; i < ALC_TX_RING_CNT; i++) { 1818 txd = &sc->alc_cdata.alc_txdesc[i]; 1819 if (txd->tx_dmamap != NULL) { 1820 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); 1821 txd->tx_dmamap = NULL; 1822 } 1823 } 1824 /* Rx buffers */ 1825 for (i = 0; i < ALC_RX_RING_CNT; i++) { 1826 rxd = &sc->alc_cdata.alc_rxdesc[i]; 1827 if (rxd->rx_dmamap != NULL) { 1828 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap); 1829 rxd->rx_dmamap = NULL; 1830 } 1831 } 1832 if (sc->alc_cdata.alc_rx_sparemap != NULL) { 1833 bus_dmamap_destroy(sc->sc_dmat, sc->alc_cdata.alc_rx_sparemap); 1834 sc->alc_cdata.alc_rx_sparemap = NULL; 1835 } 1836 1837 /* Tx ring. */ 1838 if (sc->alc_cdata.alc_tx_ring_map != NULL) 1839 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map); 1840 if (sc->alc_cdata.alc_tx_ring_map != NULL && 1841 sc->alc_rdata.alc_tx_ring != NULL) 1842 bus_dmamem_free(sc->sc_dmat, 1843 &sc->alc_rdata.alc_tx_ring_seg, 1); 1844 sc->alc_rdata.alc_tx_ring = NULL; 1845 sc->alc_cdata.alc_tx_ring_map = NULL; 1846 1847 /* Rx ring. */ 1848 if (sc->alc_cdata.alc_rx_ring_map != NULL) 1849 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map); 1850 if (sc->alc_cdata.alc_rx_ring_map != NULL && 1851 sc->alc_rdata.alc_rx_ring != NULL) 1852 bus_dmamem_free(sc->sc_dmat, 1853 &sc->alc_rdata.alc_rx_ring_seg, 1); 1854 sc->alc_rdata.alc_rx_ring = NULL; 1855 sc->alc_cdata.alc_rx_ring_map = NULL; 1856 1857 /* Rx return ring. */ 1858 if (sc->alc_cdata.alc_rr_ring_map != NULL) 1859 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map); 1860 if (sc->alc_cdata.alc_rr_ring_map != NULL && 1861 sc->alc_rdata.alc_rr_ring != NULL) 1862 bus_dmamem_free(sc->sc_dmat, 1863 &sc->alc_rdata.alc_rr_ring_seg, 1); 1864 sc->alc_rdata.alc_rr_ring = NULL; 1865 sc->alc_cdata.alc_rr_ring_map = NULL; 1866 1867 /* CMB block */ 1868 if (sc->alc_cdata.alc_cmb_map != NULL) 1869 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_cmb_map); 1870 if (sc->alc_cdata.alc_cmb_map != NULL && 1871 sc->alc_rdata.alc_cmb != NULL) 1872 bus_dmamem_free(sc->sc_dmat, 1873 &sc->alc_rdata.alc_cmb_seg, 1); 1874 sc->alc_rdata.alc_cmb = NULL; 1875 sc->alc_cdata.alc_cmb_map = NULL; 1876 1877 /* SMB block */ 1878 if (sc->alc_cdata.alc_smb_map != NULL) 1879 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_smb_map); 1880 if (sc->alc_cdata.alc_smb_map != NULL && 1881 sc->alc_rdata.alc_smb != NULL) 1882 bus_dmamem_free(sc->sc_dmat, 1883 &sc->alc_rdata.alc_smb_seg, 1); 1884 sc->alc_rdata.alc_smb = NULL; 1885 sc->alc_cdata.alc_smb_map = NULL; 1886 } 1887 1888 static int 1889 alc_encap(struct alc_softc *sc, struct mbuf **m_head) 1890 { 1891 struct alc_txdesc *txd, *txd_last; 1892 struct tx_desc *desc; 1893 struct mbuf *m; 1894 bus_dmamap_t map; 1895 uint32_t cflags, poff, vtag; 1896 int error, idx, nsegs, prod; 1897 1898 m = *m_head; 1899 cflags = vtag = 0; 1900 poff = 0; 1901 1902 prod = sc->alc_cdata.alc_tx_prod; 1903 txd = &sc->alc_cdata.alc_txdesc[prod]; 1904 txd_last = txd; 1905 map = txd->tx_dmamap; 1906 1907 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT); 1908 1909 if (error == EFBIG) { 1910 error = 0; 1911 1912 *m_head = m_pullup(*m_head, MHLEN); 1913 if (*m_head == NULL) { 1914 printf("%s: can't defrag TX mbuf\n", 1915 device_xname(sc->sc_dev)); 1916 return ENOBUFS; 1917 } 1918 1919 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, 1920 BUS_DMA_NOWAIT); 1921 1922 if (error != 0) { 1923 printf("%s: could not load defragged TX mbuf\n", 1924 device_xname(sc->sc_dev)); 1925 m_freem(*m_head); 1926 *m_head = NULL; 1927 return error; 1928 } 1929 } else if (error) { 1930 printf("%s: could not load TX mbuf\n", device_xname(sc->sc_dev)); 1931 return (error); 1932 } 1933 1934 nsegs = map->dm_nsegs; 1935 1936 if (nsegs == 0) { 1937 m_freem(*m_head); 1938 *m_head = NULL; 1939 return (EIO); 1940 } 1941 1942 /* Check descriptor overrun. */ 1943 if (sc->alc_cdata.alc_tx_cnt + nsegs >= ALC_TX_RING_CNT - 3) { 1944 bus_dmamap_unload(sc->sc_dmat, map); 1945 return (ENOBUFS); 1946 } 1947 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1948 BUS_DMASYNC_PREWRITE); 1949 1950 m = *m_head; 1951 desc = NULL; 1952 idx = 0; 1953 #if NVLAN > 0 1954 /* Configure VLAN hardware tag insertion. */ 1955 if (vlan_has_tag(m)) { 1956 vtag = htons(vlan_get_tag(m)); 1957 vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK; 1958 cflags |= TD_INS_VLAN_TAG; 1959 } 1960 #endif 1961 /* Configure Tx checksum offload. */ 1962 if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) { 1963 cflags |= TD_CUSTOM_CSUM; 1964 /* Set checksum start offset. */ 1965 cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) & 1966 TD_PLOAD_OFFSET_MASK; 1967 } 1968 for (; idx < nsegs; idx++) { 1969 desc = &sc->alc_rdata.alc_tx_ring[prod]; 1970 desc->len = 1971 htole32(TX_BYTES(map->dm_segs[idx].ds_len) | vtag); 1972 desc->flags = htole32(cflags); 1973 desc->addr = htole64(map->dm_segs[idx].ds_addr); 1974 sc->alc_cdata.alc_tx_cnt++; 1975 ALC_DESC_INC(prod, ALC_TX_RING_CNT); 1976 } 1977 /* Update producer index. */ 1978 sc->alc_cdata.alc_tx_prod = prod; 1979 1980 /* Finally set EOP on the last descriptor. */ 1981 prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT; 1982 desc = &sc->alc_rdata.alc_tx_ring[prod]; 1983 desc->flags |= htole32(TD_EOP); 1984 1985 /* Swap dmamap of the first and the last. */ 1986 txd = &sc->alc_cdata.alc_txdesc[prod]; 1987 map = txd_last->tx_dmamap; 1988 txd_last->tx_dmamap = txd->tx_dmamap; 1989 txd->tx_dmamap = map; 1990 txd->tx_m = m; 1991 1992 return (0); 1993 } 1994 1995 static void 1996 alc_start(struct ifnet *ifp) 1997 { 1998 struct alc_softc *sc = ifp->if_softc; 1999 struct mbuf *m_head; 2000 int enq; 2001 2002 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 2003 return; 2004 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) 2005 return; 2006 if (IFQ_IS_EMPTY(&ifp->if_snd)) 2007 return; 2008 2009 /* Reclaim transmitted frames. */ 2010 if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT) 2011 alc_txeof(sc); 2012 2013 enq = 0; 2014 for (;;) { 2015 IFQ_DEQUEUE(&ifp->if_snd, m_head); 2016 if (m_head == NULL) 2017 break; 2018 2019 /* 2020 * Pack the data into the transmit ring. If we 2021 * don't have room, set the OACTIVE flag and wait 2022 * for the NIC to drain the ring. 2023 */ 2024 if (alc_encap(sc, &m_head)) { 2025 if (m_head == NULL) 2026 break; 2027 ifp->if_flags |= IFF_OACTIVE; 2028 break; 2029 } 2030 enq = 1; 2031 2032 /* 2033 * If there's a BPF listener, bounce a copy of this frame 2034 * to him. 2035 */ 2036 bpf_mtap(ifp, m_head, BPF_D_OUT); 2037 } 2038 2039 if (enq) { 2040 /* Sync descriptors. */ 2041 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0, 2042 sc->alc_cdata.alc_tx_ring_map->dm_mapsize, 2043 BUS_DMASYNC_PREWRITE); 2044 /* Kick. Assume we're using normal Tx priority queue. */ 2045 CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX, 2046 (sc->alc_cdata.alc_tx_prod << 2047 MBOX_TD_PROD_LO_IDX_SHIFT) & 2048 MBOX_TD_PROD_LO_IDX_MASK); 2049 /* Set a timeout in case the chip goes out to lunch. */ 2050 ifp->if_timer = ALC_TX_TIMEOUT; 2051 } 2052 } 2053 2054 static void 2055 alc_watchdog(struct ifnet *ifp) 2056 { 2057 struct alc_softc *sc = ifp->if_softc; 2058 2059 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) { 2060 printf("%s: watchdog timeout (missed link)\n", 2061 device_xname(sc->sc_dev)); 2062 ifp->if_oerrors++; 2063 alc_init_backend(ifp, false); 2064 return; 2065 } 2066 2067 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev)); 2068 ifp->if_oerrors++; 2069 alc_init_backend(ifp, false); 2070 alc_start(ifp); 2071 } 2072 2073 static int 2074 alc_ioctl(struct ifnet *ifp, u_long cmd, void *data) 2075 { 2076 struct alc_softc *sc = ifp->if_softc; 2077 int s, error = 0; 2078 2079 s = splnet(); 2080 2081 switch (cmd) { 2082 case SIOCSIFADDR: 2083 error = ether_ioctl(ifp, cmd, data); 2084 ifp->if_flags |= IFF_UP; 2085 if (!(ifp->if_flags & IFF_RUNNING)) 2086 alc_init(ifp); 2087 break; 2088 2089 case SIOCSIFFLAGS: 2090 error = ether_ioctl(ifp, cmd, data); 2091 if (ifp->if_flags & IFF_UP) { 2092 if (ifp->if_flags & IFF_RUNNING) 2093 error = ENETRESET; 2094 else 2095 alc_init(ifp); 2096 } else { 2097 if (ifp->if_flags & IFF_RUNNING) 2098 alc_stop(ifp, 0); 2099 } 2100 break; 2101 2102 default: 2103 error = ether_ioctl(ifp, cmd, data); 2104 break; 2105 } 2106 2107 if (error == ENETRESET) { 2108 if (ifp->if_flags & IFF_RUNNING) 2109 alc_iff(sc); 2110 error = 0; 2111 } 2112 2113 splx(s); 2114 return (error); 2115 } 2116 2117 static void 2118 alc_mac_config(struct alc_softc *sc) 2119 { 2120 struct mii_data *mii; 2121 uint32_t reg; 2122 2123 mii = &sc->sc_miibus; 2124 reg = CSR_READ_4(sc, ALC_MAC_CFG); 2125 reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC | 2126 MAC_CFG_SPEED_MASK); 2127 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151 || 2128 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 || 2129 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2) 2130 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW; 2131 /* Reprogram MAC with resolved speed/duplex. */ 2132 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2133 case IFM_10_T: 2134 case IFM_100_TX: 2135 reg |= MAC_CFG_SPEED_10_100; 2136 break; 2137 case IFM_1000_T: 2138 reg |= MAC_CFG_SPEED_1000; 2139 break; 2140 } 2141 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 2142 reg |= MAC_CFG_FULL_DUPLEX; 2143 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 2144 reg |= MAC_CFG_TX_FC; 2145 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 2146 reg |= MAC_CFG_RX_FC; 2147 } 2148 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 2149 } 2150 2151 static void 2152 alc_stats_clear(struct alc_softc *sc) 2153 { 2154 struct smb sb, *smb; 2155 uint32_t *reg; 2156 int i; 2157 2158 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 2159 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0, 2160 sc->alc_cdata.alc_smb_map->dm_mapsize, 2161 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2162 smb = sc->alc_rdata.alc_smb; 2163 /* Update done, clear. */ 2164 smb->updated = 0; 2165 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0, 2166 sc->alc_cdata.alc_smb_map->dm_mapsize, 2167 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2168 } else { 2169 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; 2170 reg++) { 2171 CSR_READ_4(sc, ALC_RX_MIB_BASE + i); 2172 i += sizeof(uint32_t); 2173 } 2174 /* Read Tx statistics. */ 2175 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; 2176 reg++) { 2177 CSR_READ_4(sc, ALC_TX_MIB_BASE + i); 2178 i += sizeof(uint32_t); 2179 } 2180 } 2181 } 2182 2183 static void 2184 alc_stats_update(struct alc_softc *sc) 2185 { 2186 struct ifnet *ifp = &sc->sc_ec.ec_if; 2187 struct alc_hw_stats *stat; 2188 struct smb sb, *smb; 2189 uint32_t *reg; 2190 int i; 2191 2192 stat = &sc->alc_stats; 2193 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 2194 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0, 2195 sc->alc_cdata.alc_smb_map->dm_mapsize, 2196 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2197 smb = sc->alc_rdata.alc_smb; 2198 if (smb->updated == 0) 2199 return; 2200 } else { 2201 smb = &sb; 2202 /* Read Rx statistics. */ 2203 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; 2204 reg++) { 2205 *reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i); 2206 i += sizeof(uint32_t); 2207 } 2208 /* Read Tx statistics. */ 2209 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; 2210 reg++) { 2211 *reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i); 2212 i += sizeof(uint32_t); 2213 } 2214 } 2215 2216 /* Rx stats. */ 2217 stat->rx_frames += smb->rx_frames; 2218 stat->rx_bcast_frames += smb->rx_bcast_frames; 2219 stat->rx_mcast_frames += smb->rx_mcast_frames; 2220 stat->rx_pause_frames += smb->rx_pause_frames; 2221 stat->rx_control_frames += smb->rx_control_frames; 2222 stat->rx_crcerrs += smb->rx_crcerrs; 2223 stat->rx_lenerrs += smb->rx_lenerrs; 2224 stat->rx_bytes += smb->rx_bytes; 2225 stat->rx_runts += smb->rx_runts; 2226 stat->rx_fragments += smb->rx_fragments; 2227 stat->rx_pkts_64 += smb->rx_pkts_64; 2228 stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 2229 stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 2230 stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 2231 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 2232 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 2233 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 2234 stat->rx_pkts_truncated += smb->rx_pkts_truncated; 2235 stat->rx_fifo_oflows += smb->rx_fifo_oflows; 2236 stat->rx_rrs_errs += smb->rx_rrs_errs; 2237 stat->rx_alignerrs += smb->rx_alignerrs; 2238 stat->rx_bcast_bytes += smb->rx_bcast_bytes; 2239 stat->rx_mcast_bytes += smb->rx_mcast_bytes; 2240 stat->rx_pkts_filtered += smb->rx_pkts_filtered; 2241 2242 /* Tx stats. */ 2243 stat->tx_frames += smb->tx_frames; 2244 stat->tx_bcast_frames += smb->tx_bcast_frames; 2245 stat->tx_mcast_frames += smb->tx_mcast_frames; 2246 stat->tx_pause_frames += smb->tx_pause_frames; 2247 stat->tx_excess_defer += smb->tx_excess_defer; 2248 stat->tx_control_frames += smb->tx_control_frames; 2249 stat->tx_deferred += smb->tx_deferred; 2250 stat->tx_bytes += smb->tx_bytes; 2251 stat->tx_pkts_64 += smb->tx_pkts_64; 2252 stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 2253 stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 2254 stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 2255 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 2256 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 2257 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 2258 stat->tx_single_colls += smb->tx_single_colls; 2259 stat->tx_multi_colls += smb->tx_multi_colls; 2260 stat->tx_late_colls += smb->tx_late_colls; 2261 stat->tx_excess_colls += smb->tx_excess_colls; 2262 stat->tx_underrun += smb->tx_underrun; 2263 stat->tx_desc_underrun += smb->tx_desc_underrun; 2264 stat->tx_lenerrs += smb->tx_lenerrs; 2265 stat->tx_pkts_truncated += smb->tx_pkts_truncated; 2266 stat->tx_bcast_bytes += smb->tx_bcast_bytes; 2267 stat->tx_mcast_bytes += smb->tx_mcast_bytes; 2268 2269 /* Update counters in ifnet. */ 2270 ifp->if_opackets += smb->tx_frames; 2271 2272 ifp->if_collisions += smb->tx_single_colls + 2273 smb->tx_multi_colls * 2 + smb->tx_late_colls + 2274 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT; 2275 2276 ifp->if_oerrors += smb->tx_late_colls + smb->tx_excess_colls + 2277 smb->tx_underrun + smb->tx_pkts_truncated; 2278 2279 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs + 2280 smb->rx_runts + smb->rx_pkts_truncated + 2281 smb->rx_fifo_oflows + smb->rx_rrs_errs + 2282 smb->rx_alignerrs; 2283 2284 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 2285 /* Update done, clear. */ 2286 smb->updated = 0; 2287 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0, 2288 sc->alc_cdata.alc_smb_map->dm_mapsize, 2289 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2290 } 2291 } 2292 2293 static int 2294 alc_intr(void *arg) 2295 { 2296 struct alc_softc *sc = arg; 2297 struct ifnet *ifp = &sc->sc_ec.ec_if; 2298 uint32_t status; 2299 2300 status = CSR_READ_4(sc, ALC_INTR_STATUS); 2301 if ((status & ALC_INTRS) == 0) 2302 return (0); 2303 2304 /* Acknowledge and disable interrupts. */ 2305 CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT); 2306 2307 if (ifp->if_flags & IFF_RUNNING) { 2308 if (status & INTR_RX_PKT) { 2309 int error; 2310 2311 error = alc_rxintr(sc); 2312 if (error) { 2313 alc_init_backend(ifp, false); 2314 return (0); 2315 } 2316 } 2317 2318 if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST | 2319 INTR_TXQ_TO_RST)) { 2320 if (status & INTR_DMA_RD_TO_RST) 2321 printf("%s: DMA read error! -- resetting\n", 2322 device_xname(sc->sc_dev)); 2323 if (status & INTR_DMA_WR_TO_RST) 2324 printf("%s: DMA write error! -- resetting\n", 2325 device_xname(sc->sc_dev)); 2326 if (status & INTR_TXQ_TO_RST) 2327 printf("%s: TxQ reset! -- resetting\n", 2328 device_xname(sc->sc_dev)); 2329 alc_init_backend(ifp, false); 2330 return (0); 2331 } 2332 2333 alc_txeof(sc); 2334 if_schedule_deferred_start(ifp); 2335 } 2336 2337 /* Re-enable interrupts. */ 2338 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF); 2339 return (1); 2340 } 2341 2342 static void 2343 alc_txeof(struct alc_softc *sc) 2344 { 2345 struct ifnet *ifp = &sc->sc_ec.ec_if; 2346 struct alc_txdesc *txd; 2347 uint32_t cons, prod; 2348 int prog; 2349 2350 if (sc->alc_cdata.alc_tx_cnt == 0) 2351 return; 2352 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0, 2353 sc->alc_cdata.alc_tx_ring_map->dm_mapsize, 2354 BUS_DMASYNC_POSTREAD); 2355 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) { 2356 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0, 2357 sc->alc_cdata.alc_cmb_map->dm_mapsize, 2358 BUS_DMASYNC_POSTREAD); 2359 prod = sc->alc_rdata.alc_cmb->cons; 2360 } else 2361 prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX); 2362 /* Assume we're using normal Tx priority queue. */ 2363 prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >> 2364 MBOX_TD_CONS_LO_IDX_SHIFT; 2365 cons = sc->alc_cdata.alc_tx_cons; 2366 /* 2367 * Go through our Tx list and free mbufs for those 2368 * frames which have been transmitted. 2369 */ 2370 for (prog = 0; cons != prod; prog++, 2371 ALC_DESC_INC(cons, ALC_TX_RING_CNT)) { 2372 if (sc->alc_cdata.alc_tx_cnt <= 0) 2373 break; 2374 prog++; 2375 ifp->if_flags &= ~IFF_OACTIVE; 2376 sc->alc_cdata.alc_tx_cnt--; 2377 txd = &sc->alc_cdata.alc_txdesc[cons]; 2378 if (txd->tx_m != NULL) { 2379 /* Reclaim transmitted mbufs. */ 2380 bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0, 2381 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2382 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 2383 m_freem(txd->tx_m); 2384 txd->tx_m = NULL; 2385 } 2386 } 2387 2388 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) 2389 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0, 2390 sc->alc_cdata.alc_cmb_map->dm_mapsize, BUS_DMASYNC_PREREAD); 2391 sc->alc_cdata.alc_tx_cons = cons; 2392 /* 2393 * Unarm watchdog timer only when there is no pending 2394 * frames in Tx queue. 2395 */ 2396 if (sc->alc_cdata.alc_tx_cnt == 0) 2397 ifp->if_timer = 0; 2398 } 2399 2400 static int 2401 alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd, bool init) 2402 { 2403 struct mbuf *m; 2404 bus_dmamap_t map; 2405 int error; 2406 2407 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA); 2408 if (m == NULL) 2409 return (ENOBUFS); 2410 MCLGET(m, init ? M_WAITOK : M_DONTWAIT); 2411 if (!(m->m_flags & M_EXT)) { 2412 m_freem(m); 2413 return (ENOBUFS); 2414 } 2415 2416 m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX; 2417 2418 error = bus_dmamap_load_mbuf(sc->sc_dmat, 2419 sc->alc_cdata.alc_rx_sparemap, m, BUS_DMA_NOWAIT); 2420 2421 if (error != 0) { 2422 m_freem(m); 2423 2424 if (init) 2425 printf("%s: can't load RX mbuf\n", device_xname(sc->sc_dev)); 2426 2427 return (error); 2428 } 2429 2430 if (rxd->rx_m != NULL) { 2431 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 2432 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2433 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 2434 } 2435 map = rxd->rx_dmamap; 2436 rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap; 2437 sc->alc_cdata.alc_rx_sparemap = map; 2438 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, rxd->rx_dmamap->dm_mapsize, 2439 BUS_DMASYNC_PREREAD); 2440 rxd->rx_m = m; 2441 rxd->rx_desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr); 2442 return (0); 2443 } 2444 2445 static int 2446 alc_rxintr(struct alc_softc *sc) 2447 { 2448 struct ifnet *ifp = &sc->sc_ec.ec_if; 2449 struct rx_rdesc *rrd; 2450 uint32_t nsegs, status; 2451 int rr_cons, prog; 2452 2453 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0, 2454 sc->alc_cdata.alc_rr_ring_map->dm_mapsize, 2455 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2456 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0, 2457 sc->alc_cdata.alc_rx_ring_map->dm_mapsize, 2458 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2459 rr_cons = sc->alc_cdata.alc_rr_cons; 2460 for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0;) { 2461 rrd = &sc->alc_rdata.alc_rr_ring[rr_cons]; 2462 status = le32toh(rrd->status); 2463 if ((status & RRD_VALID) == 0) 2464 break; 2465 nsegs = RRD_RD_CNT(le32toh(rrd->rdinfo)); 2466 if (nsegs == 0) { 2467 /* This should not happen! */ 2468 if (alcdebug) 2469 printf("%s: unexpected segment count -- " 2470 "resetting\n", device_xname(sc->sc_dev)); 2471 return (EIO); 2472 } 2473 alc_rxeof(sc, rrd); 2474 /* Clear Rx return status. */ 2475 rrd->status = 0; 2476 ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT); 2477 sc->alc_cdata.alc_rx_cons += nsegs; 2478 sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT; 2479 prog += nsegs; 2480 } 2481 2482 if (prog > 0) { 2483 /* Update the consumer index. */ 2484 sc->alc_cdata.alc_rr_cons = rr_cons; 2485 /* Sync Rx return descriptors. */ 2486 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0, 2487 sc->alc_cdata.alc_rr_ring_map->dm_mapsize, 2488 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2489 /* 2490 * Sync updated Rx descriptors such that controller see 2491 * modified buffer addresses. 2492 */ 2493 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0, 2494 sc->alc_cdata.alc_rx_ring_map->dm_mapsize, 2495 BUS_DMASYNC_PREWRITE); 2496 /* 2497 * Let controller know availability of new Rx buffers. 2498 * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors 2499 * it may be possible to update ALC_MBOX_RD0_PROD_IDX 2500 * only when Rx buffer pre-fetching is required. In 2501 * addition we already set ALC_RX_RD_FREE_THRESH to 2502 * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However 2503 * it still seems that pre-fetching needs more 2504 * experimentation. 2505 */ 2506 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, 2507 sc->alc_cdata.alc_rx_cons); 2508 } 2509 2510 return (0); 2511 } 2512 2513 /* Receive a frame. */ 2514 static void 2515 alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd) 2516 { 2517 struct ifnet *ifp = &sc->sc_ec.ec_if; 2518 struct alc_rxdesc *rxd; 2519 struct mbuf *mp, *m; 2520 uint32_t rdinfo, status; 2521 int count, nsegs, rx_cons; 2522 2523 status = le32toh(rrd->status); 2524 rdinfo = le32toh(rrd->rdinfo); 2525 rx_cons = RRD_RD_IDX(rdinfo); 2526 nsegs = RRD_RD_CNT(rdinfo); 2527 2528 sc->alc_cdata.alc_rxlen = RRD_BYTES(status); 2529 if (status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) { 2530 /* 2531 * We want to pass the following frames to upper 2532 * layer regardless of error status of Rx return 2533 * ring. 2534 * 2535 * o IP/TCP/UDP checksum is bad. 2536 * o frame length and protocol specific length 2537 * does not match. 2538 * 2539 * Force network stack compute checksum for 2540 * errored frames. 2541 */ 2542 status |= RRD_TCP_UDPCSUM_NOK | RRD_IPCSUM_NOK; 2543 if ((status & (RRD_ERR_CRC | RRD_ERR_ALIGN | 2544 RRD_ERR_TRUNC | RRD_ERR_RUNT)) != 0) 2545 return; 2546 } 2547 2548 for (count = 0; count < nsegs; count++, 2549 ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) { 2550 rxd = &sc->alc_cdata.alc_rxdesc[rx_cons]; 2551 mp = rxd->rx_m; 2552 /* Add a new receive buffer to the ring. */ 2553 if (alc_newbuf(sc, rxd, false) != 0) { 2554 ifp->if_iqdrops++; 2555 /* Reuse Rx buffers. */ 2556 if (sc->alc_cdata.alc_rxhead != NULL) 2557 m_freem(sc->alc_cdata.alc_rxhead); 2558 break; 2559 } 2560 2561 /* 2562 * Assume we've received a full sized frame. 2563 * Actual size is fixed when we encounter the end of 2564 * multi-segmented frame. 2565 */ 2566 mp->m_len = sc->alc_buf_size; 2567 2568 /* Chain received mbufs. */ 2569 if (sc->alc_cdata.alc_rxhead == NULL) { 2570 sc->alc_cdata.alc_rxhead = mp; 2571 sc->alc_cdata.alc_rxtail = mp; 2572 } else { 2573 m_remove_pkthdr(mp); 2574 sc->alc_cdata.alc_rxprev_tail = 2575 sc->alc_cdata.alc_rxtail; 2576 sc->alc_cdata.alc_rxtail->m_next = mp; 2577 sc->alc_cdata.alc_rxtail = mp; 2578 } 2579 2580 if (count == nsegs - 1) { 2581 /* Last desc. for this frame. */ 2582 m = sc->alc_cdata.alc_rxhead; 2583 KASSERT(m->m_flags & M_PKTHDR); 2584 /* 2585 * It seems that L1C/L2C controller has no way 2586 * to tell hardware to strip CRC bytes. 2587 */ 2588 m->m_pkthdr.len = 2589 sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN; 2590 if (nsegs > 1) { 2591 /* Set last mbuf size. */ 2592 mp->m_len = sc->alc_cdata.alc_rxlen - 2593 (nsegs - 1) * sc->alc_buf_size; 2594 /* Remove the CRC bytes in chained mbufs. */ 2595 if (mp->m_len <= ETHER_CRC_LEN) { 2596 sc->alc_cdata.alc_rxtail = 2597 sc->alc_cdata.alc_rxprev_tail; 2598 sc->alc_cdata.alc_rxtail->m_len -= 2599 (ETHER_CRC_LEN - mp->m_len); 2600 sc->alc_cdata.alc_rxtail->m_next = NULL; 2601 m_freem(mp); 2602 } else { 2603 mp->m_len -= ETHER_CRC_LEN; 2604 } 2605 } else 2606 m->m_len = m->m_pkthdr.len; 2607 m_set_rcvif(m, ifp); 2608 #if NVLAN > 0 2609 /* 2610 * Due to hardware bugs, Rx checksum offloading 2611 * was intentionally disabled. 2612 */ 2613 if (status & RRD_VLAN_TAG) { 2614 uint32_t vtag = RRD_VLAN(le32toh(rrd->vtag)); 2615 vlan_set_tag(m, ntohs(vtag)); 2616 } 2617 #endif 2618 2619 /* Pass it on. */ 2620 if_percpuq_enqueue(ifp->if_percpuq, m); 2621 } 2622 } 2623 /* Reset mbuf chains. */ 2624 ALC_RXCHAIN_RESET(sc); 2625 } 2626 2627 static void 2628 alc_tick(void *xsc) 2629 { 2630 struct alc_softc *sc = xsc; 2631 struct mii_data *mii = &sc->sc_miibus; 2632 int s; 2633 2634 s = splnet(); 2635 mii_tick(mii); 2636 alc_stats_update(sc); 2637 splx(s); 2638 2639 callout_schedule(&sc->sc_tick_ch, hz); 2640 } 2641 2642 static void 2643 alc_osc_reset(struct alc_softc *sc) 2644 { 2645 uint32_t reg; 2646 2647 reg = CSR_READ_4(sc, ALC_MISC3); 2648 reg &= ~MISC3_25M_BY_SW; 2649 reg |= MISC3_25M_NOTO_INTNL; 2650 CSR_WRITE_4(sc, ALC_MISC3, reg); 2651 2652 reg = CSR_READ_4(sc, ALC_MISC); 2653 if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0) { 2654 /* 2655 * Restore over-current protection default value. 2656 * This value could be reset by MAC reset. 2657 */ 2658 reg &= ~MISC_PSW_OCP_MASK; 2659 reg |= (MISC_PSW_OCP_DEFAULT << MISC_PSW_OCP_SHIFT); 2660 reg &= ~MISC_INTNLOSC_OPEN; 2661 CSR_WRITE_4(sc, ALC_MISC, reg); 2662 CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN); 2663 reg = CSR_READ_4(sc, ALC_MISC2); 2664 reg &= ~MISC2_CALB_START; 2665 CSR_WRITE_4(sc, ALC_MISC2, reg); 2666 CSR_WRITE_4(sc, ALC_MISC2, reg | MISC2_CALB_START); 2667 2668 } else { 2669 reg &= ~MISC_INTNLOSC_OPEN; 2670 /* Disable isolate for revision A devices. */ 2671 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1) 2672 reg &= ~MISC_ISO_ENB; 2673 CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN); 2674 CSR_WRITE_4(sc, ALC_MISC, reg); 2675 } 2676 2677 DELAY(20); 2678 } 2679 2680 static void 2681 alc_reset(struct alc_softc *sc) 2682 { 2683 uint32_t pmcfg, reg; 2684 int i; 2685 2686 pmcfg = 0; 2687 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 2688 /* Reset workaround. */ 2689 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, 1); 2690 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && 2691 (sc->alc_rev & 0x01) != 0) { 2692 /* Disable L0s/L1s before reset. */ 2693 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 2694 if ((pmcfg & (PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB)) 2695 != 0) { 2696 pmcfg &= ~(PM_CFG_ASPM_L0S_ENB | 2697 PM_CFG_ASPM_L1_ENB); 2698 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 2699 } 2700 } 2701 } 2702 reg = CSR_READ_4(sc, ALC_MASTER_CFG); 2703 reg |= MASTER_OOB_DIS_OFF | MASTER_RESET; 2704 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 2705 2706 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 2707 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 2708 DELAY(10); 2709 if (CSR_READ_4(sc, ALC_MBOX_RD0_PROD_IDX) == 0) 2710 break; 2711 } 2712 if (i == 0) 2713 printf("%s: MAC reset timeout!\n", device_xname(sc->sc_dev)); 2714 } 2715 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 2716 DELAY(10); 2717 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0) 2718 break; 2719 } 2720 if (i == 0) 2721 printf("%s: master reset timeout!\n", device_xname(sc->sc_dev)); 2722 2723 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 2724 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 2725 if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC | 2726 IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0) 2727 break; 2728 DELAY(10); 2729 } 2730 if (i == 0) 2731 printf("%s: reset timeout(0x%08x)!\n", 2732 device_xname(sc->sc_dev), reg); 2733 2734 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 2735 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && 2736 (sc->alc_rev & 0x01) != 0) { 2737 reg = CSR_READ_4(sc, ALC_MASTER_CFG); 2738 reg |= MASTER_CLK_SEL_DIS; 2739 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 2740 /* Restore L0s/L1s config. */ 2741 if ((pmcfg & (PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB)) 2742 != 0) 2743 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 2744 } 2745 2746 alc_osc_reset(sc); 2747 reg = CSR_READ_4(sc, ALC_MISC3); 2748 reg &= ~MISC3_25M_BY_SW; 2749 reg |= MISC3_25M_NOTO_INTNL; 2750 CSR_WRITE_4(sc, ALC_MISC3, reg); 2751 reg = CSR_READ_4(sc, ALC_MISC); 2752 reg &= ~MISC_INTNLOSC_OPEN; 2753 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1) 2754 reg &= ~MISC_ISO_ENB; 2755 CSR_WRITE_4(sc, ALC_MISC, reg); 2756 DELAY(20); 2757 } 2758 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 || 2759 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B || 2760 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2) 2761 CSR_WRITE_4(sc, ALC_SERDES_LOCK, 2762 CSR_READ_4(sc, ALC_SERDES_LOCK) | SERDES_MAC_CLK_SLOWDOWN | 2763 SERDES_PHY_CLK_SLOWDOWN); 2764 } 2765 2766 static int 2767 alc_init(struct ifnet *ifp) 2768 { 2769 2770 return alc_init_backend(ifp, true); 2771 } 2772 2773 static int 2774 alc_init_backend(struct ifnet *ifp, bool init) 2775 { 2776 struct alc_softc *sc = ifp->if_softc; 2777 struct mii_data *mii; 2778 uint8_t eaddr[ETHER_ADDR_LEN]; 2779 bus_addr_t paddr; 2780 uint32_t reg, rxf_hi, rxf_lo; 2781 int error; 2782 2783 /* 2784 * Cancel any pending I/O. 2785 */ 2786 alc_stop(ifp, 0); 2787 /* 2788 * Reset the chip to a known state. 2789 */ 2790 alc_reset(sc); 2791 2792 /* Initialize Rx descriptors. */ 2793 error = alc_init_rx_ring(sc, init); 2794 if (error != 0) { 2795 printf("%s: no memory for Rx buffers.\n", device_xname(sc->sc_dev)); 2796 alc_stop(ifp, 0); 2797 return (error); 2798 } 2799 alc_init_rr_ring(sc); 2800 alc_init_tx_ring(sc); 2801 alc_init_cmb(sc); 2802 alc_init_smb(sc); 2803 2804 /* Enable all clocks. */ 2805 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 2806 CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, CLK_GATING_DMAW_ENB | 2807 CLK_GATING_DMAR_ENB | CLK_GATING_TXQ_ENB | 2808 CLK_GATING_RXQ_ENB | CLK_GATING_TXMAC_ENB | 2809 CLK_GATING_RXMAC_ENB); 2810 if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0) 2811 CSR_WRITE_4(sc, ALC_IDLE_DECISN_TIMER, 2812 IDLE_DECISN_TIMER_DEFAULT_1MS); 2813 } else 2814 CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, 0); 2815 2816 /* Reprogram the station address. */ 2817 memcpy(eaddr, CLLADDR(ifp->if_sadl), sizeof(eaddr)); 2818 CSR_WRITE_4(sc, ALC_PAR0, 2819 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 2820 CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]); 2821 /* 2822 * Clear WOL status and disable all WOL feature as WOL 2823 * would interfere Rx operation under normal environments. 2824 */ 2825 CSR_READ_4(sc, ALC_WOL_CFG); 2826 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 2827 /* Set Tx descriptor base addresses. */ 2828 paddr = sc->alc_rdata.alc_tx_ring_paddr; 2829 CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 2830 CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 2831 /* We don't use high priority ring. */ 2832 CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0); 2833 /* Set Tx descriptor counter. */ 2834 CSR_WRITE_4(sc, ALC_TD_RING_CNT, 2835 (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK); 2836 /* Set Rx descriptor base addresses. */ 2837 paddr = sc->alc_rdata.alc_rx_ring_paddr; 2838 CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 2839 CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 2840 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 2841 /* We use one Rx ring. */ 2842 CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0); 2843 CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0); 2844 CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0); 2845 } 2846 /* Set Rx descriptor counter. */ 2847 CSR_WRITE_4(sc, ALC_RD_RING_CNT, 2848 (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK); 2849 2850 /* 2851 * Let hardware split jumbo frames into alc_max_buf_sized chunks. 2852 * if it do not fit the buffer size. Rx return descriptor holds 2853 * a counter that indicates how many fragments were made by the 2854 * hardware. The buffer size should be multiple of 8 bytes. 2855 * Since hardware has limit on the size of buffer size, always 2856 * use the maximum value. 2857 * For strict-alignment architectures make sure to reduce buffer 2858 * size by 8 bytes to make room for alignment fixup. 2859 */ 2860 sc->alc_buf_size = RX_BUF_SIZE_MAX; 2861 CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size); 2862 2863 paddr = sc->alc_rdata.alc_rr_ring_paddr; 2864 /* Set Rx return descriptor base addresses. */ 2865 CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 2866 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 2867 /* We use one Rx return ring. */ 2868 CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0); 2869 CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0); 2870 CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0); 2871 } 2872 /* Set Rx return descriptor counter. */ 2873 CSR_WRITE_4(sc, ALC_RRD_RING_CNT, 2874 (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK); 2875 paddr = sc->alc_rdata.alc_cmb_paddr; 2876 CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr)); 2877 paddr = sc->alc_rdata.alc_smb_paddr; 2878 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 2879 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr)); 2880 2881 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B) { 2882 /* Reconfigure SRAM - Vendor magic. */ 2883 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_LEN, 0x000002A0); 2884 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_LEN, 0x00000100); 2885 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_ADDR, 0x029F0000); 2886 CSR_WRITE_4(sc, ALC_SRAM_RD0_ADDR, 0x02BF02A0); 2887 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_ADDR, 0x03BF02C0); 2888 CSR_WRITE_4(sc, ALC_SRAM_TD_ADDR, 0x03DF03C0); 2889 CSR_WRITE_4(sc, ALC_TXF_WATER_MARK, 0x00000000); 2890 CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 0x00000000); 2891 } 2892 2893 /* Tell hardware that we're ready to load DMA blocks. */ 2894 CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD); 2895 2896 /* Configure interrupt moderation timer. */ 2897 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT; 2898 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT; 2899 reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT; 2900 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) 2901 reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT; 2902 CSR_WRITE_4(sc, ALC_IM_TIMER, reg); 2903 /* 2904 * We don't want to automatic interrupt clear as task queue 2905 * for the interrupt should know interrupt status. 2906 */ 2907 reg = CSR_READ_4(sc, ALC_MASTER_CFG); 2908 reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB); 2909 reg |= MASTER_SA_TIMER_ENB; 2910 if (ALC_USECS(sc->alc_int_rx_mod) != 0) 2911 reg |= MASTER_IM_RX_TIMER_ENB; 2912 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0 && 2913 ALC_USECS(sc->alc_int_tx_mod) != 0) 2914 reg |= MASTER_IM_TX_TIMER_ENB; 2915 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 2916 /* 2917 * Disable interrupt re-trigger timer. We don't want automatic 2918 * re-triggering of un-ACKed interrupts. 2919 */ 2920 CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0)); 2921 /* Configure CMB. */ 2922 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 2923 CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, ALC_TX_RING_CNT / 3); 2924 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, 2925 ALC_USECS(sc->alc_int_tx_mod)); 2926 } else { 2927 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) { 2928 CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4); 2929 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000)); 2930 } else 2931 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0)); 2932 } 2933 /* 2934 * Hardware can be configured to issue SMB interrupt based 2935 * on programmed interval. Since there is a callout that is 2936 * invoked for every hz in driver we use that instead of 2937 * relying on periodic SMB interrupt. 2938 */ 2939 CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0)); 2940 /* Clear MAC statistics. */ 2941 alc_stats_clear(sc); 2942 2943 /* 2944 * Always use maximum frame size that controller can support. 2945 * Otherwise received frames that has larger frame length 2946 * than alc(4) MTU would be silently dropped in hardware. This 2947 * would make path-MTU discovery hard as sender wouldn't get 2948 * any responses from receiver. alc(4) supports 2949 * multi-fragmented frames on Rx path so it has no issue on 2950 * assembling fragmented frames. Using maximum frame size also 2951 * removes the need to reinitialize hardware when interface 2952 * MTU configuration was changed. 2953 * 2954 * Be conservative in what you do, be liberal in what you 2955 * accept from others - RFC 793. 2956 */ 2957 CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_ident->max_framelen); 2958 2959 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 2960 /* Disable header split(?) */ 2961 CSR_WRITE_4(sc, ALC_HDS_CFG, 0); 2962 2963 /* Configure IPG/IFG parameters. */ 2964 CSR_WRITE_4(sc, ALC_IPG_IFG_CFG, 2965 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & 2966 IPG_IFG_IPGT_MASK) | 2967 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & 2968 IPG_IFG_MIFG_MASK) | 2969 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & 2970 IPG_IFG_IPG1_MASK) | 2971 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & 2972 IPG_IFG_IPG2_MASK)); 2973 /* Set parameters for half-duplex media. */ 2974 CSR_WRITE_4(sc, ALC_HDPX_CFG, 2975 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 2976 HDPX_CFG_LCOL_MASK) | 2977 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 2978 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 2979 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 2980 HDPX_CFG_ABEBT_MASK) | 2981 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 2982 HDPX_CFG_JAMIPG_MASK)); 2983 } 2984 2985 /* 2986 * Set TSO/checksum offload threshold. For frames that is 2987 * larger than this threshold, hardware wouldn't do 2988 * TSO/checksum offloading. 2989 */ 2990 reg = (sc->alc_ident->max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) & 2991 TSO_OFFLOAD_THRESH_MASK; 2992 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 2993 reg |= TSO_OFFLOAD_ERRLGPKT_DROP_ENB; 2994 CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH, reg); 2995 /* Configure TxQ. */ 2996 reg = (alc_dma_burst[sc->alc_dma_rd_burst] << 2997 TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK; 2998 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B || 2999 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2) 3000 reg >>= 1; 3001 reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) & 3002 TXQ_CFG_TD_BURST_MASK; 3003 reg |= TXQ_CFG_IP_OPTION_ENB | TXQ_CFG_8023_ENB; 3004 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE); 3005 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 3006 reg = (TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q1_BURST_SHIFT | 3007 TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q2_BURST_SHIFT | 3008 TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q3_BURST_SHIFT | 3009 HQTD_CFG_BURST_ENB); 3010 CSR_WRITE_4(sc, ALC_HQTD_CFG, reg); 3011 reg = WRR_PRI_RESTRICT_NONE; 3012 reg |= (WRR_PRI_DEFAULT << WRR_PRI0_SHIFT | 3013 WRR_PRI_DEFAULT << WRR_PRI1_SHIFT | 3014 WRR_PRI_DEFAULT << WRR_PRI2_SHIFT | 3015 WRR_PRI_DEFAULT << WRR_PRI3_SHIFT); 3016 CSR_WRITE_4(sc, ALC_WRR, reg); 3017 } else { 3018 /* Configure Rx free descriptor pre-fetching. */ 3019 CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH, 3020 ((RX_RD_FREE_THRESH_HI_DEFAULT << 3021 RX_RD_FREE_THRESH_HI_SHIFT) & RX_RD_FREE_THRESH_HI_MASK) | 3022 ((RX_RD_FREE_THRESH_LO_DEFAULT << 3023 RX_RD_FREE_THRESH_LO_SHIFT) & RX_RD_FREE_THRESH_LO_MASK)); 3024 } 3025 3026 /* 3027 * Configure flow control parameters. 3028 * XON : 80% of Rx FIFO 3029 * XOFF : 30% of Rx FIFO 3030 */ 3031 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 3032 reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN); 3033 reg &= SRAM_RX_FIFO_LEN_MASK; 3034 reg *= 8; 3035 if (reg > 8 * 1024) 3036 reg -= RX_FIFO_PAUSE_816X_RSVD; 3037 else 3038 reg -= RX_BUF_SIZE_MAX; 3039 reg /= 8; 3040 CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH, 3041 ((reg << RX_FIFO_PAUSE_THRESH_LO_SHIFT) & 3042 RX_FIFO_PAUSE_THRESH_LO_MASK) | 3043 (((RX_FIFO_PAUSE_816X_RSVD / 8) << 3044 RX_FIFO_PAUSE_THRESH_HI_SHIFT) & 3045 RX_FIFO_PAUSE_THRESH_HI_MASK)); 3046 } else if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8131 || 3047 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8132) { 3048 reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN); 3049 rxf_hi = (reg * 8) / 10; 3050 rxf_lo = (reg * 3) / 10; 3051 CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH, 3052 ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) & 3053 RX_FIFO_PAUSE_THRESH_LO_MASK) | 3054 ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) & 3055 RX_FIFO_PAUSE_THRESH_HI_MASK)); 3056 } 3057 3058 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 3059 /* Disable RSS until I understand L1C/L2C's RSS logic. */ 3060 CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0); 3061 CSR_WRITE_4(sc, ALC_RSS_CPU, 0); 3062 } 3063 3064 /* Configure RxQ. */ 3065 reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & 3066 RXQ_CFG_RD_BURST_MASK; 3067 reg |= RXQ_CFG_RSS_MODE_DIS; 3068 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 3069 reg |= (RXQ_CFG_816X_IDT_TBL_SIZE_DEFAULT << 3070 RXQ_CFG_816X_IDT_TBL_SIZE_SHIFT) & 3071 RXQ_CFG_816X_IDT_TBL_SIZE_MASK; 3072 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) 3073 reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M; 3074 } else { 3075 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0 && 3076 sc->alc_ident->deviceid != PCI_PRODUCT_ATTANSIC_AR8151_V2) 3077 reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M; 3078 } 3079 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 3080 3081 /* Configure DMA parameters. */ 3082 reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI; 3083 reg |= sc->alc_rcb; 3084 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) 3085 reg |= DMA_CFG_CMB_ENB; 3086 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) 3087 reg |= DMA_CFG_SMB_ENB; 3088 else 3089 reg |= DMA_CFG_SMB_DIS; 3090 reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) << 3091 DMA_CFG_RD_BURST_SHIFT; 3092 reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) << 3093 DMA_CFG_WR_BURST_SHIFT; 3094 reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) & 3095 DMA_CFG_RD_DELAY_CNT_MASK; 3096 reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) & 3097 DMA_CFG_WR_DELAY_CNT_MASK; 3098 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 3099 switch (AR816X_REV(sc->alc_rev)) { 3100 case AR816X_REV_A0: 3101 case AR816X_REV_A1: 3102 reg |= DMA_CFG_RD_CHNL_SEL_2; 3103 break; 3104 case AR816X_REV_B0: 3105 /* FALLTHROUGH */ 3106 default: 3107 reg |= DMA_CFG_RD_CHNL_SEL_4; 3108 break; 3109 } 3110 } 3111 CSR_WRITE_4(sc, ALC_DMA_CFG, reg); 3112 3113 /* 3114 * Configure Tx/Rx MACs. 3115 * - Auto-padding for short frames. 3116 * - Enable CRC generation. 3117 * Actual reconfiguration of MAC for resolved speed/duplex 3118 * is followed after detection of link establishment. 3119 * AR813x/AR815x always does checksum computation regardless 3120 * of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to 3121 * have bug in protocol field in Rx return structure so 3122 * these controllers can't handle fragmented frames. Disable 3123 * Rx checksum offloading until there is a newer controller 3124 * that has sane implementation. 3125 */ 3126 reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX | 3127 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 3128 MAC_CFG_PREAMBLE_MASK); 3129 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 || 3130 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151 || 3131 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 || 3132 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2) 3133 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW; 3134 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0) 3135 reg |= MAC_CFG_SPEED_10_100; 3136 else 3137 reg |= MAC_CFG_SPEED_1000; 3138 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 3139 3140 /* Set up the receive filter. */ 3141 alc_iff(sc); 3142 alc_rxvlan(sc); 3143 3144 /* Acknowledge all pending interrupts and clear it. */ 3145 CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS); 3146 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 3147 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0); 3148 3149 sc->alc_flags &= ~ALC_FLAG_LINK; 3150 /* Switch to the current media. */ 3151 mii = &sc->sc_miibus; 3152 mii_mediachg(mii); 3153 3154 callout_schedule(&sc->sc_tick_ch, hz); 3155 3156 ifp->if_flags |= IFF_RUNNING; 3157 ifp->if_flags &= ~IFF_OACTIVE; 3158 3159 return (0); 3160 } 3161 3162 static void 3163 alc_stop(struct ifnet *ifp, int disable) 3164 { 3165 struct alc_softc *sc = ifp->if_softc; 3166 struct alc_txdesc *txd; 3167 struct alc_rxdesc *rxd; 3168 uint32_t reg; 3169 int i; 3170 3171 callout_stop(&sc->sc_tick_ch); 3172 3173 /* 3174 * Mark the interface down and cancel the watchdog timer. 3175 */ 3176 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3177 ifp->if_timer = 0; 3178 3179 sc->alc_flags &= ~ALC_FLAG_LINK; 3180 3181 alc_stats_update(sc); 3182 3183 mii_down(&sc->sc_miibus); 3184 3185 /* Disable interrupts. */ 3186 CSR_WRITE_4(sc, ALC_INTR_MASK, 0); 3187 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 3188 3189 /* Disable DMA. */ 3190 reg = CSR_READ_4(sc, ALC_DMA_CFG); 3191 reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB); 3192 reg |= DMA_CFG_SMB_DIS; 3193 CSR_WRITE_4(sc, ALC_DMA_CFG, reg); 3194 DELAY(1000); 3195 3196 /* Stop Rx/Tx MACs. */ 3197 alc_stop_mac(sc); 3198 3199 /* Disable interrupts which might be touched in taskq handler. */ 3200 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 3201 3202 /* Disable L0s/L1s */ 3203 alc_aspm(sc, 0, IFM_UNKNOWN); 3204 3205 /* Reclaim Rx buffers that have been processed. */ 3206 if (sc->alc_cdata.alc_rxhead != NULL) 3207 m_freem(sc->alc_cdata.alc_rxhead); 3208 ALC_RXCHAIN_RESET(sc); 3209 /* 3210 * Free Tx/Rx mbufs still in the queues. 3211 */ 3212 for (i = 0; i < ALC_RX_RING_CNT; i++) { 3213 rxd = &sc->alc_cdata.alc_rxdesc[i]; 3214 if (rxd->rx_m != NULL) { 3215 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, 3216 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 3217 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 3218 m_freem(rxd->rx_m); 3219 rxd->rx_m = NULL; 3220 } 3221 } 3222 for (i = 0; i < ALC_TX_RING_CNT; i++) { 3223 txd = &sc->alc_cdata.alc_txdesc[i]; 3224 if (txd->tx_m != NULL) { 3225 bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0, 3226 txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 3227 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 3228 m_freem(txd->tx_m); 3229 txd->tx_m = NULL; 3230 } 3231 } 3232 } 3233 3234 static void 3235 alc_stop_mac(struct alc_softc *sc) 3236 { 3237 uint32_t reg; 3238 int i; 3239 3240 alc_stop_queue(sc); 3241 /* Disable Rx/Tx MAC. */ 3242 reg = CSR_READ_4(sc, ALC_MAC_CFG); 3243 if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) { 3244 reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); 3245 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 3246 } 3247 for (i = ALC_TIMEOUT; i > 0; i--) { 3248 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 3249 if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC)) == 0) 3250 break; 3251 DELAY(10); 3252 } 3253 if (i == 0) 3254 printf("%s: could not disable Rx/Tx MAC(0x%08x)!\n", 3255 device_xname(sc->sc_dev), reg); 3256 } 3257 3258 static void 3259 alc_start_queue(struct alc_softc *sc) 3260 { 3261 uint32_t qcfg[] = { 3262 0, 3263 RXQ_CFG_QUEUE0_ENB, 3264 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB, 3265 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB, 3266 RXQ_CFG_ENB 3267 }; 3268 uint32_t cfg; 3269 3270 /* Enable RxQ. */ 3271 cfg = CSR_READ_4(sc, ALC_RXQ_CFG); 3272 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 3273 cfg &= ~RXQ_CFG_ENB; 3274 cfg |= qcfg[1]; 3275 } else 3276 cfg |= RXQ_CFG_QUEUE0_ENB; 3277 CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg); 3278 /* Enable TxQ. */ 3279 cfg = CSR_READ_4(sc, ALC_TXQ_CFG); 3280 cfg |= TXQ_CFG_ENB; 3281 CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg); 3282 } 3283 3284 static void 3285 alc_stop_queue(struct alc_softc *sc) 3286 { 3287 uint32_t reg; 3288 int i; 3289 3290 /* Disable RxQ. */ 3291 reg = CSR_READ_4(sc, ALC_RXQ_CFG); 3292 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 3293 if ((reg & RXQ_CFG_ENB) != 0) { 3294 reg &= ~RXQ_CFG_ENB; 3295 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 3296 } 3297 } else { 3298 if ((reg & RXQ_CFG_QUEUE0_ENB) != 0) { 3299 reg &= ~RXQ_CFG_QUEUE0_ENB; 3300 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 3301 } 3302 } 3303 /* Disable TxQ. */ 3304 reg = CSR_READ_4(sc, ALC_TXQ_CFG); 3305 if ((reg & TXQ_CFG_ENB) != 0) { 3306 reg &= ~TXQ_CFG_ENB; 3307 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg); 3308 } 3309 DELAY(40); 3310 for (i = ALC_TIMEOUT; i > 0; i--) { 3311 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 3312 if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0) 3313 break; 3314 DELAY(10); 3315 } 3316 if (i == 0) 3317 printf("%s: could not disable RxQ/TxQ (0x%08x)!\n", 3318 device_xname(sc->sc_dev), reg); 3319 } 3320 3321 static void 3322 alc_init_tx_ring(struct alc_softc *sc) 3323 { 3324 struct alc_ring_data *rd; 3325 struct alc_txdesc *txd; 3326 int i; 3327 3328 sc->alc_cdata.alc_tx_prod = 0; 3329 sc->alc_cdata.alc_tx_cons = 0; 3330 sc->alc_cdata.alc_tx_cnt = 0; 3331 3332 rd = &sc->alc_rdata; 3333 memset(rd->alc_tx_ring, 0, ALC_TX_RING_SZ); 3334 for (i = 0; i < ALC_TX_RING_CNT; i++) { 3335 txd = &sc->alc_cdata.alc_txdesc[i]; 3336 txd->tx_m = NULL; 3337 } 3338 3339 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0, 3340 sc->alc_cdata.alc_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 3341 } 3342 3343 static int 3344 alc_init_rx_ring(struct alc_softc *sc, bool init) 3345 { 3346 struct alc_ring_data *rd; 3347 struct alc_rxdesc *rxd; 3348 int i; 3349 3350 sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1; 3351 rd = &sc->alc_rdata; 3352 memset(rd->alc_rx_ring, 0, ALC_RX_RING_SZ); 3353 for (i = 0; i < ALC_RX_RING_CNT; i++) { 3354 rxd = &sc->alc_cdata.alc_rxdesc[i]; 3355 rxd->rx_m = NULL; 3356 rxd->rx_desc = &rd->alc_rx_ring[i]; 3357 if (alc_newbuf(sc, rxd, init) != 0) 3358 return (ENOBUFS); 3359 } 3360 3361 /* 3362 * Since controller does not update Rx descriptors, driver 3363 * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE 3364 * is enough to ensure coherence. 3365 */ 3366 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0, 3367 sc->alc_cdata.alc_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 3368 /* Let controller know availability of new Rx buffers. */ 3369 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons); 3370 3371 return (0); 3372 } 3373 3374 static void 3375 alc_init_rr_ring(struct alc_softc *sc) 3376 { 3377 struct alc_ring_data *rd; 3378 3379 sc->alc_cdata.alc_rr_cons = 0; 3380 ALC_RXCHAIN_RESET(sc); 3381 3382 rd = &sc->alc_rdata; 3383 memset(rd->alc_rr_ring, 0, ALC_RR_RING_SZ); 3384 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0, 3385 sc->alc_cdata.alc_rr_ring_map->dm_mapsize, 3386 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3387 } 3388 3389 static void 3390 alc_init_cmb(struct alc_softc *sc) 3391 { 3392 struct alc_ring_data *rd; 3393 3394 rd = &sc->alc_rdata; 3395 memset(rd->alc_cmb, 0, ALC_CMB_SZ); 3396 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0, 3397 sc->alc_cdata.alc_cmb_map->dm_mapsize, 3398 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3399 } 3400 3401 static void 3402 alc_init_smb(struct alc_softc *sc) 3403 { 3404 struct alc_ring_data *rd; 3405 3406 rd = &sc->alc_rdata; 3407 memset(rd->alc_smb, 0, ALC_SMB_SZ); 3408 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0, 3409 sc->alc_cdata.alc_smb_map->dm_mapsize, 3410 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3411 } 3412 3413 static void 3414 alc_rxvlan(struct alc_softc *sc) 3415 { 3416 uint32_t reg; 3417 3418 reg = CSR_READ_4(sc, ALC_MAC_CFG); 3419 if (sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING) 3420 reg |= MAC_CFG_VLAN_TAG_STRIP; 3421 else 3422 reg &= ~MAC_CFG_VLAN_TAG_STRIP; 3423 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 3424 } 3425 3426 static void 3427 alc_iff(struct alc_softc *sc) 3428 { 3429 struct ethercom *ec = &sc->sc_ec; 3430 struct ifnet *ifp = &ec->ec_if; 3431 struct ether_multi *enm; 3432 struct ether_multistep step; 3433 uint32_t crc; 3434 uint32_t mchash[2]; 3435 uint32_t rxcfg; 3436 3437 rxcfg = CSR_READ_4(sc, ALC_MAC_CFG); 3438 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 3439 ifp->if_flags &= ~IFF_ALLMULTI; 3440 3441 /* 3442 * Always accept broadcast frames. 3443 */ 3444 rxcfg |= MAC_CFG_BCAST; 3445 3446 if (ifp->if_flags & IFF_PROMISC || ec->ec_multicnt > 0) { 3447 ifp->if_flags |= IFF_ALLMULTI; 3448 if (ifp->if_flags & IFF_PROMISC) 3449 rxcfg |= MAC_CFG_PROMISC; 3450 else 3451 rxcfg |= MAC_CFG_ALLMULTI; 3452 mchash[0] = mchash[1] = 0xFFFFFFFF; 3453 } else { 3454 /* Program new filter. */ 3455 memset(mchash, 0, sizeof(mchash)); 3456 3457 ETHER_LOCK(ec); 3458 ETHER_FIRST_MULTI(step, ec, enm); 3459 while (enm != NULL) { 3460 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 3461 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 3462 ETHER_NEXT_MULTI(step, enm); 3463 } 3464 ETHER_UNLOCK(ec); 3465 } 3466 3467 CSR_WRITE_4(sc, ALC_MAR0, mchash[0]); 3468 CSR_WRITE_4(sc, ALC_MAR1, mchash[1]); 3469 CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg); 3470 } 3471 3472 MODULE(MODULE_CLASS_DRIVER, if_alc, "pci"); 3473 3474 #ifdef _MODULE 3475 #include "ioconf.c" 3476 #endif 3477 3478 static int 3479 if_alc_modcmd(modcmd_t cmd, void *opaque) 3480 { 3481 int error = 0; 3482 3483 switch (cmd) { 3484 case MODULE_CMD_INIT: 3485 #ifdef _MODULE 3486 error = config_init_component(cfdriver_ioconf_if_alc, 3487 cfattach_ioconf_if_alc, cfdata_ioconf_if_alc); 3488 #endif 3489 return error; 3490 case MODULE_CMD_FINI: 3491 #ifdef _MODULE 3492 error = config_fini_component(cfdriver_ioconf_if_alc, 3493 cfattach_ioconf_if_alc, cfdata_ioconf_if_alc); 3494 #endif 3495 return error; 3496 default: 3497 return ENOTTY; 3498 } 3499 } 3500