xref: /openbsd-src/sys/dev/pci/if_alc.c (revision 4b70baf6e17fc8b27fc1f7fa7929335753fa94c3)
1 /*	$OpenBSD: if_alc.c,v 1.47 2019/03/27 07:55:24 kevlo Exp $	*/
2 /*-
3  * Copyright (c) 2009, Pyun YongHyeon <yongari@FreeBSD.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /* Driver for Atheros AR813x/AR815x/AR816x/AR817x PCIe Ethernet. */
30 
31 #include "bpfilter.h"
32 #include "vlan.h"
33 
34 #include <sys/param.h>
35 #include <sys/endian.h>
36 #include <sys/systm.h>
37 #include <sys/sockio.h>
38 #include <sys/mbuf.h>
39 #include <sys/queue.h>
40 #include <sys/kernel.h>
41 #include <sys/device.h>
42 #include <sys/timeout.h>
43 #include <sys/socket.h>
44 
45 #include <machine/bus.h>
46 
47 #include <net/if.h>
48 #include <net/if_dl.h>
49 #include <net/if_media.h>
50 
51 #include <netinet/in.h>
52 #include <netinet/if_ether.h>
53 
54 #if NBPFILTER > 0
55 #include <net/bpf.h>
56 #endif
57 
58 #include <dev/mii/mii.h>
59 #include <dev/mii/miivar.h>
60 
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 #include <dev/pci/pcidevs.h>
64 
65 #include <dev/pci/if_alcreg.h>
66 
67 int	alc_match(struct device *, void *, void *);
68 void	alc_attach(struct device *, struct device *, void *);
69 int	alc_detach(struct device *, int);
70 int	alc_activate(struct device *, int);
71 
72 int	alc_init(struct ifnet *);
73 void	alc_start(struct ifnet *);
74 int	alc_ioctl(struct ifnet *, u_long, caddr_t);
75 void	alc_watchdog(struct ifnet *);
76 int	alc_mediachange(struct ifnet *);
77 void	alc_mediastatus(struct ifnet *, struct ifmediareq *);
78 
79 void	alc_aspm(struct alc_softc *, int, uint64_t);
80 void	alc_aspm_813x(struct alc_softc *, uint64_t);
81 void	alc_aspm_816x(struct alc_softc *, int);
82 void	alc_disable_l0s_l1(struct alc_softc *);
83 int	alc_dma_alloc(struct alc_softc *);
84 void	alc_dma_free(struct alc_softc *);
85 int	alc_encap(struct alc_softc *, struct mbuf *);
86 void	alc_get_macaddr(struct alc_softc *);
87 void	alc_get_macaddr_813x(struct alc_softc *);
88 void	alc_get_macaddr_816x(struct alc_softc *);
89 void	alc_get_macaddr_par(struct alc_softc *);
90 void	alc_init_cmb(struct alc_softc *);
91 void	alc_init_rr_ring(struct alc_softc *);
92 int	alc_init_rx_ring(struct alc_softc *);
93 void	alc_init_smb(struct alc_softc *);
94 void	alc_init_tx_ring(struct alc_softc *);
95 int	alc_intr(void *);
96 void	alc_mac_config(struct alc_softc *);
97 int	alc_mii_readreg_813x(struct device *, int, int);
98 int	alc_mii_readreg_816x(struct device *, int, int);
99 void	alc_mii_writereg_813x(struct device *, int, int, int);
100 void	alc_mii_writereg_816x(struct device *, int, int, int);
101 void	alc_dsp_fixup(struct alc_softc *, int);
102 int	alc_miibus_readreg(struct device *, int, int);
103 void	alc_miibus_statchg(struct device *);
104 void	alc_miibus_writereg(struct device *, int, int, int);
105 int	alc_miidbg_readreg(struct alc_softc *, int);
106 void	alc_miidbg_writereg(struct alc_softc *, int, int);
107 int	alc_miiext_readreg(struct alc_softc *, int, int);
108 void	alc_miiext_writereg(struct alc_softc *, int, int, int);
109 void	alc_phy_reset_813x(struct alc_softc *);
110 void	alc_phy_reset_816x(struct alc_softc *);
111 int	alc_newbuf(struct alc_softc *, struct alc_rxdesc *);
112 void	alc_phy_down(struct alc_softc *);
113 void	alc_phy_reset(struct alc_softc *);
114 void	alc_reset(struct alc_softc *);
115 void	alc_rxeof(struct alc_softc *, struct rx_rdesc *);
116 int	alc_rxintr(struct alc_softc *);
117 void	alc_iff(struct alc_softc *);
118 void	alc_rxvlan(struct alc_softc *);
119 void	alc_start_queue(struct alc_softc *);
120 void	alc_stats_clear(struct alc_softc *);
121 void	alc_stats_update(struct alc_softc *);
122 void	alc_stop(struct alc_softc *);
123 void	alc_stop_mac(struct alc_softc *);
124 void	alc_stop_queue(struct alc_softc *);
125 void	alc_tick(void *);
126 void	alc_txeof(struct alc_softc *);
127 void	alc_init_pcie(struct alc_softc *, int);
128 void	alc_config_msi(struct alc_softc *);
129 int	alc_dma_alloc(struct alc_softc *);
130 void	alc_dma_free(struct alc_softc *);
131 int	alc_encap(struct alc_softc *, struct mbuf *);
132 void	alc_osc_reset(struct alc_softc *);
133 
134 uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0, 0 };
135 
136 const struct pci_matchid alc_devices[] = {
137 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1C },
138 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L2C },
139 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1D },
140 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1D_1 },
141 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L2C_1 },
142 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L2C_2 },
143 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8161 },
144 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8162 },
145 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8171 },
146 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8172 },
147 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_E2200 },
148 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_E2400 },
149 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_E2500 }
150 };
151 
152 struct cfattach alc_ca = {
153 	sizeof (struct alc_softc), alc_match, alc_attach, alc_detach,
154 	alc_activate
155 };
156 
157 struct cfdriver alc_cd = {
158 	NULL, "alc", DV_IFNET
159 };
160 
161 int alcdebug = 0;
162 #define	DPRINTF(x)	do { if (alcdebug) printf x; } while (0)
163 
164 #define ALC_CSUM_FEATURES	(M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)
165 
166 int
167 alc_miibus_readreg(struct device *dev, int phy, int reg)
168 {
169 	struct alc_softc *sc = (struct alc_softc *)dev;
170 	uint32_t v;
171 
172 	if (phy != sc->alc_phyaddr)
173 		return (0);
174 
175 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
176 		v = alc_mii_readreg_816x(dev, phy, reg);
177 	else
178 		v = alc_mii_readreg_813x(dev, phy, reg);
179 
180 	return (v);
181 }
182 
183 int
184 alc_mii_readreg_813x(struct device *dev, int phy, int reg)
185 {
186 	struct alc_softc *sc = (struct alc_softc *)dev;
187 	uint32_t v;
188 	int i;
189 
190 	if (phy != sc->alc_phyaddr)
191 		return (0);
192 
193 	/*
194 	 * For AR8132 fast ethernet controller, do not report 1000baseT
195 	 * capability to mii(4). Even though AR8132 uses the same
196 	 * model/revision number of F1 gigabit PHY, the PHY has no
197 	 * ability to establish 1000baseT link.
198 	 */
199 	if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0 &&
200 	    reg == MII_EXTSR)
201 		return (0);
202 
203 	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
204 	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
205 	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
206 		DELAY(5);
207 		v = CSR_READ_4(sc, ALC_MDIO);
208 		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
209 			break;
210 	}
211 
212 	if (i == 0) {
213 		printf("%s: phy read timeout: phy %d, reg %d\n",
214 		    sc->sc_dev.dv_xname, phy, reg);
215 		return (0);
216 	}
217 
218 	return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
219 }
220 
221 int
222 alc_mii_readreg_816x(struct device *dev, int phy, int reg)
223 {
224 	struct alc_softc *sc = (struct alc_softc *)dev;
225 	uint32_t clk, v;
226 	int i;
227 
228 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
229 		clk = MDIO_CLK_25_128;
230 	else
231 		clk = MDIO_CLK_25_4;
232 	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
233 		MDIO_SUP_PREAMBLE | clk | MDIO_REG_ADDR(reg));
234 	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
235 		DELAY(5);
236 		v = CSR_READ_4(sc, ALC_MDIO);
237 		if ((v & MDIO_OP_BUSY) == 0)
238 			break;
239 	}
240 
241 	return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
242 }
243 
244 void
245 alc_miibus_writereg(struct device *dev, int phy, int reg, int val)
246 {
247 	struct alc_softc *sc = (struct alc_softc *)dev;
248 
249 	if (phy != sc->alc_phyaddr)
250 		return;
251 
252 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
253 		alc_mii_writereg_816x(dev, phy, reg, val);
254 	else
255 		alc_mii_writereg_813x(dev, phy, reg, val);
256 }
257 
258 void
259 alc_mii_writereg_813x(struct device *dev, int phy, int reg, int val)
260 {
261 	struct alc_softc *sc = (struct alc_softc *)dev;
262 	uint32_t v;
263 	int i;
264 
265 	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
266 	    (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
267 	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
268 	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
269 		DELAY(5);
270 		v = CSR_READ_4(sc, ALC_MDIO);
271 		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
272 			break;
273 	}
274 
275 	if (i == 0)
276 		printf("%s: phy write timeout: phy %d, reg %d\n",
277 		    sc->sc_dev.dv_xname, phy, reg);
278 }
279 
280 void
281 alc_mii_writereg_816x(struct device *dev, int phy, int reg, int val)
282 {
283 	struct alc_softc *sc = (struct alc_softc *)dev;
284 	uint32_t clk, v;
285 	int i;
286 
287 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
288 		clk = MDIO_CLK_25_128;
289 	else
290 		clk = MDIO_CLK_25_4;
291 	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
292 	    ((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) | MDIO_REG_ADDR(reg) |
293 	    MDIO_SUP_PREAMBLE | clk);
294 	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
295 		DELAY(5);
296 		v = CSR_READ_4(sc, ALC_MDIO);
297 		if ((v & MDIO_OP_BUSY) == 0)
298 			break;
299 	}
300 	if (i == 0)
301 		printf("%s: phy write timeout: phy %d, reg %d\n",
302 		    sc->sc_dev.dv_xname, phy, reg);
303 }
304 
305 void
306 alc_miibus_statchg(struct device *dev)
307 {
308 	struct alc_softc *sc = (struct alc_softc *)dev;
309 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
310 	struct mii_data *mii = &sc->sc_miibus;
311 	uint32_t reg;
312 
313 	if ((ifp->if_flags & IFF_RUNNING) == 0)
314 		return;
315 
316 	sc->alc_flags &= ~ALC_FLAG_LINK;
317 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
318 	    (IFM_ACTIVE | IFM_AVALID)) {
319 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
320 		case IFM_10_T:
321 		case IFM_100_TX:
322 			sc->alc_flags |= ALC_FLAG_LINK;
323 			break;
324 		case IFM_1000_T:
325 			if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
326 				sc->alc_flags |= ALC_FLAG_LINK;
327 			break;
328 		default:
329 			break;
330 		}
331 	}
332 	alc_stop_queue(sc);
333 	/* Stop Rx/Tx MACs. */
334 	alc_stop_mac(sc);
335 
336 	/* Program MACs with resolved speed/duplex/flow-control. */
337 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
338 		alc_start_queue(sc);
339 		alc_mac_config(sc);
340 		/* Re-enable Tx/Rx MACs. */
341 		reg = CSR_READ_4(sc, ALC_MAC_CFG);
342 		reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
343 		CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
344 	}
345 
346 	alc_aspm(sc, 0, IFM_SUBTYPE(mii->mii_media_active));
347 	alc_dsp_fixup(sc, IFM_SUBTYPE(mii->mii_media_active));
348 }
349 
350 int
351 alc_miidbg_readreg(struct alc_softc *sc, int reg)
352 {
353 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
354 	    reg);
355 	return (alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
356 	    ALC_MII_DBG_DATA));
357 }
358 
359 
360 void
361 alc_miidbg_writereg(struct alc_softc *sc, int reg, int val)
362 {
363 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
364 	    reg);
365 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA,
366 	    val);
367 }
368 
369 int
370 alc_miiext_readreg(struct alc_softc *sc, int devaddr, int reg)
371 {
372 	uint32_t clk, v;
373 	int i;
374 
375 	CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) |
376 	    EXT_MDIO_DEVADDR(devaddr));
377 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
378 		clk = MDIO_CLK_25_128;
379 	else
380 		clk = MDIO_CLK_25_4;
381 	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
382 	    MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT);
383 	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
384 		DELAY(5);
385 		v = CSR_READ_4(sc, ALC_MDIO);
386 		if ((v & MDIO_OP_BUSY) == 0)
387 			break;
388 	}
389 
390 	return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
391 }
392 
393 void
394 alc_miiext_writereg(struct alc_softc *sc, int devaddr, int reg, int val)
395 {
396 	uint32_t clk, v;
397 	int i;
398 
399 	CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) |
400 			EXT_MDIO_DEVADDR(devaddr));
401 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
402 		clk = MDIO_CLK_25_128;
403 	else
404 		clk = MDIO_CLK_25_4;
405 	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
406 			((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) |
407 			MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT);
408 	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
409 		DELAY(5);
410 		v = CSR_READ_4(sc, ALC_MDIO);
411 		if ((v & MDIO_OP_BUSY) == 0)
412 			break;
413 	}
414 }
415 
416 void
417 alc_dsp_fixup(struct alc_softc *sc, int media)
418 {
419 	uint16_t agc, len, val;
420 
421 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
422 		return;
423 	if (AR816X_REV(sc->alc_rev) >= AR816X_REV_C0)
424 		return;
425 
426 	/*
427 	 * Vendor PHY magic.
428 	 * 1000BT/AZ, wrong cable length
429 	 */
430 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
431 		len = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL6);
432 		len = (len >> EXT_CLDCTL6_CAB_LEN_SHIFT) &
433 			EXT_CLDCTL6_CAB_LEN_MASK;
434 		agc = alc_miidbg_readreg(sc, MII_DBG_AGC);
435 		agc = (agc >> DBG_AGC_2_VGA_SHIFT) & DBG_AGC_2_VGA_MASK;
436 		if ((media == IFM_1000_T && len > EXT_CLDCTL6_CAB_LEN_SHORT1G &&
437 			agc > DBG_AGC_LONG1G_LIMT) ||
438 			(media == IFM_100_TX && len > DBG_AGC_LONG100M_LIMT &&
439 			agc > DBG_AGC_LONG1G_LIMT)) {
440 				alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT,
441 				    DBG_AZ_ANADECT_LONG);
442 				val = alc_miiext_readreg(sc, MII_EXT_ANEG,
443 				    MII_EXT_ANEG_AFE);
444 				val |= ANEG_AFEE_10BT_100M_TH;
445 				alc_miiext_writereg(sc, MII_EXT_ANEG,
446 				    MII_EXT_ANEG_AFE, val);
447 		} else {
448 			alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT,
449 			    DBG_AZ_ANADECT_DEFAULT);
450 			val = alc_miiext_readreg(sc, MII_EXT_ANEG,
451 			    MII_EXT_ANEG_AFE);
452 			val &= ~ANEG_AFEE_10BT_100M_TH;
453 			alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE,
454 			    val);
455 		}
456 		if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 &&
457 		    AR816X_REV(sc->alc_rev) == AR816X_REV_B0) {
458 			if (media == IFM_1000_T) {
459 				/*
460 				 * Giga link threshold, raise the tolerance of
461 				 * noise 50%.
462 				 */
463 				val = alc_miidbg_readreg(sc, MII_DBG_MSE20DB);
464 				val &= ~DBG_MSE20DB_TH_MASK;
465 				val |= (DBG_MSE20DB_TH_HI <<
466 				    DBG_MSE20DB_TH_SHIFT);
467 				alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val);
468 			} else if (media == IFM_100_TX)
469 				alc_miidbg_writereg(sc, MII_DBG_MSE16DB,
470 				    DBG_MSE16DB_UP);
471 		}
472 	} else {
473 		val = alc_miiext_readreg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE);
474 		val &= ~ANEG_AFEE_10BT_100M_TH;
475 		alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, val);
476 		if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 &&
477 		    AR816X_REV(sc->alc_rev) == AR816X_REV_B0) {
478 			alc_miidbg_writereg(sc, MII_DBG_MSE16DB,
479 			    DBG_MSE16DB_DOWN);
480 			val = alc_miidbg_readreg(sc, MII_DBG_MSE20DB);
481 			val &= ~DBG_MSE20DB_TH_MASK;
482 			val |= (DBG_MSE20DB_TH_DEFAULT << DBG_MSE20DB_TH_SHIFT);
483 			alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val);
484 		}
485 	}
486 }
487 
488 void
489 alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
490 {
491 	struct alc_softc *sc = ifp->if_softc;
492 	struct mii_data *mii = &sc->sc_miibus;
493 
494 	if ((ifp->if_flags & IFF_UP) == 0)
495 		return;
496 
497 	mii_pollstat(mii);
498 	ifmr->ifm_status = mii->mii_media_status;
499 	ifmr->ifm_active = mii->mii_media_active;
500 }
501 
502 int
503 alc_mediachange(struct ifnet *ifp)
504 {
505 	struct alc_softc *sc = ifp->if_softc;
506 	struct mii_data *mii = &sc->sc_miibus;
507 	int error;
508 
509 	if (mii->mii_instance != 0) {
510 		struct mii_softc *miisc;
511 
512 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
513 			mii_phy_reset(miisc);
514 	}
515 	error = mii_mediachg(mii);
516 
517 	return (error);
518 }
519 
520 int
521 alc_match(struct device *dev, void *match, void *aux)
522 {
523 	return pci_matchbyid((struct pci_attach_args *)aux, alc_devices,
524 	    nitems(alc_devices));
525 }
526 
527 void
528 alc_get_macaddr(struct alc_softc *sc)
529 {
530 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
531 		alc_get_macaddr_816x(sc);
532 	else
533 		alc_get_macaddr_813x(sc);
534 }
535 
536 void
537 alc_get_macaddr_813x(struct alc_softc *sc)
538 {
539 	uint32_t opt;
540 	uint16_t val;
541 	int eeprom, i;
542 
543 	eeprom = 0;
544 	opt = CSR_READ_4(sc, ALC_OPT_CFG);
545 	if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_OTP_SEL) != 0 &&
546 	    (CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) {
547 		/*
548 		 * EEPROM found, let TWSI reload EEPROM configuration.
549 		 * This will set ethernet address of controller.
550 		 */
551 		eeprom++;
552 		switch (sc->sc_product) {
553 		case PCI_PRODUCT_ATTANSIC_L1C:
554 		case PCI_PRODUCT_ATTANSIC_L2C:
555 			if ((opt & OPT_CFG_CLK_ENB) == 0) {
556 				opt |= OPT_CFG_CLK_ENB;
557 				CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
558 				CSR_READ_4(sc, ALC_OPT_CFG);
559 				DELAY(1000);
560 			}
561 			break;
562 		case PCI_PRODUCT_ATTANSIC_L1D:
563 		case PCI_PRODUCT_ATTANSIC_L1D_1:
564 		case PCI_PRODUCT_ATTANSIC_L2C_1:
565 		case PCI_PRODUCT_ATTANSIC_L2C_2:
566 			alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
567 			    ALC_MII_DBG_ADDR, 0x00);
568 			val = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
569 			    ALC_MII_DBG_DATA);
570 			alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
571 			    ALC_MII_DBG_DATA, val & 0xFF7F);
572 			alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
573 			    ALC_MII_DBG_ADDR, 0x3B);
574 			val = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
575 			    ALC_MII_DBG_DATA);
576 			alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
577 			    ALC_MII_DBG_DATA, val | 0x0008);
578 			DELAY(20);
579 			break;
580 		}
581 
582 		CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
583 		    CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
584 		CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
585 		CSR_READ_4(sc, ALC_WOL_CFG);
586 
587 		CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) |
588 		    TWSI_CFG_SW_LD_START);
589 		for (i = 100; i > 0; i--) {
590 			DELAY(1000);
591 			if ((CSR_READ_4(sc, ALC_TWSI_CFG) &
592 			    TWSI_CFG_SW_LD_START) == 0)
593 				break;
594 		}
595 		if (i == 0)
596 			printf("%s: reloading EEPROM timeout!\n",
597 			    sc->sc_dev.dv_xname);
598 	} else {
599 		if (alcdebug)
600 			printf("%s: EEPROM not found!\n", sc->sc_dev.dv_xname);
601 	}
602 	if (eeprom != 0) {
603 		switch (sc->sc_product) {
604 		case PCI_PRODUCT_ATTANSIC_L1C:
605 		case PCI_PRODUCT_ATTANSIC_L2C:
606 			if ((opt & OPT_CFG_CLK_ENB) != 0) {
607 				opt &= ~OPT_CFG_CLK_ENB;
608 				CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
609 				CSR_READ_4(sc, ALC_OPT_CFG);
610 				DELAY(1000);
611 			}
612 			break;
613 		case PCI_PRODUCT_ATTANSIC_L1D:
614 		case PCI_PRODUCT_ATTANSIC_L1D_1:
615 		case PCI_PRODUCT_ATTANSIC_L2C_1:
616 		case PCI_PRODUCT_ATTANSIC_L2C_2:
617 			alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
618 			    ALC_MII_DBG_ADDR, 0x00);
619 			val = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
620 			    ALC_MII_DBG_DATA);
621 			alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
622 			    ALC_MII_DBG_DATA, val | 0x0080);
623 			alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
624 			    ALC_MII_DBG_ADDR, 0x3B);
625 			val = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
626 			    ALC_MII_DBG_DATA);
627 			alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
628 			    ALC_MII_DBG_DATA, val & 0xFFF7);
629 			DELAY(20);
630 			break;
631 		}
632 	}
633 
634 	alc_get_macaddr_par(sc);
635 }
636 
637 void
638 alc_get_macaddr_816x(struct alc_softc *sc)
639 {
640 	uint32_t reg;
641 	int i, reloaded;
642 
643 	reloaded = 0;
644 	/* Try to reload station address via TWSI. */
645 	for (i = 100; i > 0; i--) {
646 		reg = CSR_READ_4(sc, ALC_SLD);
647 		if ((reg & (SLD_PROGRESS | SLD_START)) == 0)
648 			break;
649 		DELAY(1000);
650 	}
651 	if (i != 0) {
652 		CSR_WRITE_4(sc, ALC_SLD, reg | SLD_START);
653 		for (i = 100; i > 0; i--) {
654 			DELAY(1000);
655 			reg = CSR_READ_4(sc, ALC_SLD);
656 			if ((reg & SLD_START) == 0)
657 				break;
658 		}
659 	}
660 
661 	/* Try to reload station address from EEPROM or FLASH. */
662 	if (reloaded == 0) {
663 		reg = CSR_READ_4(sc, ALC_EEPROM_LD);
664 		if ((reg & (EEPROM_LD_EEPROM_EXIST |
665 			EEPROM_LD_FLASH_EXIST)) != 0) {
666 			for (i = 100; i > 0; i--) {
667 				reg = CSR_READ_4(sc, ALC_EEPROM_LD);
668 				if ((reg & (EEPROM_LD_PROGRESS |
669 				    EEPROM_LD_START)) == 0)
670 					break;
671 				DELAY(1000);
672 			}
673 			if (i != 0) {
674 				CSR_WRITE_4(sc, ALC_EEPROM_LD, reg |
675 				EEPROM_LD_START);
676 				for (i = 100; i > 0; i--) {
677 					DELAY(1000);
678 					reg = CSR_READ_4(sc, ALC_EEPROM_LD);
679 					if ((reg & EEPROM_LD_START) == 0)
680 						break;
681 				}
682 			}
683 		}
684 	}
685 
686 	alc_get_macaddr_par(sc);
687 }
688 
689 void
690 alc_get_macaddr_par(struct alc_softc *sc)
691 {
692 	uint32_t ea[2];
693 
694 	ea[0] = CSR_READ_4(sc, ALC_PAR0);
695 	ea[1] = CSR_READ_4(sc, ALC_PAR1);
696 	sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF;
697 	sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF;
698 	sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF;
699 	sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF;
700 	sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF;
701 	sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF;
702 }
703 
704 void
705 alc_disable_l0s_l1(struct alc_softc *sc)
706 {
707 	uint32_t pmcfg;
708 
709 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
710 		/* Another magic from vendor. */
711 		pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
712 		pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 |
713 		    PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK |
714 		    PM_CFG_SERDES_PD_EX_L1);
715 		pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB |
716 		    PM_CFG_SERDES_L1_ENB;
717 		CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
718 	}
719 }
720 
721 void
722 alc_phy_reset(struct alc_softc *sc)
723 {
724 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
725 		alc_phy_reset_816x(sc);
726 	else
727 		alc_phy_reset_813x(sc);
728 }
729 
730 void
731 alc_phy_reset_813x(struct alc_softc *sc)
732 {
733 	uint16_t data;
734 
735 	/* Reset magic from Linux. */
736 	CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_SEL_ANA_RESET);
737 	CSR_READ_2(sc, ALC_GPHY_CFG);
738 	DELAY(10 * 1000);
739 
740 	CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET |
741 	    GPHY_CFG_SEL_ANA_RESET);
742 	CSR_READ_2(sc, ALC_GPHY_CFG);
743 	DELAY(10 * 1000);
744 
745 	/* DSP fixup, Vendor magic. */
746 	if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1) {
747 		alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
748 		    ALC_MII_DBG_ADDR, 0x000A);
749 		data = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
750 		    ALC_MII_DBG_DATA);
751 		alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
752 		    ALC_MII_DBG_DATA, data & 0xDFFF);
753 	}
754 	if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D ||
755 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 ||
756 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1 ||
757 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2) {
758 		alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
759 		    ALC_MII_DBG_ADDR, 0x003B);
760 		data = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
761 		    ALC_MII_DBG_DATA);
762 		alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
763 		    ALC_MII_DBG_DATA, data & 0xFFF7);
764 		DELAY(20 * 1000);
765 	}
766 	if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D) {
767 		alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
768 		    ALC_MII_DBG_ADDR, 0x0029);
769 		alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
770 		    ALC_MII_DBG_DATA, 0x929D);
771 	}
772 	if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1C ||
773 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C ||
774 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 ||
775 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2) {
776 		alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
777 		    ALC_MII_DBG_ADDR, 0x0029);
778 		alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
779 		    ALC_MII_DBG_DATA, 0xB6DD);
780 	}
781 
782 	/* Load DSP codes, vendor magic. */
783 	data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE |
784 	    ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK);
785 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
786 	    ALC_MII_DBG_ADDR, MII_ANA_CFG18);
787 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
788 	    ALC_MII_DBG_DATA, data);
789 
790 	data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) |
791 	    ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL |
792 	    ANA_SERDES_EN_LCKDT;
793 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
794 	    ALC_MII_DBG_ADDR, MII_ANA_CFG5);
795 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
796 	    ALC_MII_DBG_DATA, data);
797 
798 	data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) &
799 	    ANA_LONG_CABLE_TH_100_MASK) |
800 	    ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) &
801 	    ANA_SHORT_CABLE_TH_100_SHIFT) |
802 	    ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW;
803 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
804 	    ALC_MII_DBG_ADDR, MII_ANA_CFG54);
805 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
806 	    ALC_MII_DBG_DATA, data);
807 
808 	data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) |
809 	    ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) |
810 	    ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) |
811 	    ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK);
812 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
813 	    ALC_MII_DBG_ADDR, MII_ANA_CFG4);
814 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
815 	    ALC_MII_DBG_DATA, data);
816 
817 	data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) |
818 	    ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB |
819 	    ANA_OEN_125M;
820 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
821 	    ALC_MII_DBG_ADDR, MII_ANA_CFG0);
822 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
823 	    ALC_MII_DBG_DATA, data);
824 	DELAY(1000);
825 
826 	/* Disable hibernation. */
827 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
828 	    0x0029);
829 	data = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
830 	    ALC_MII_DBG_DATA);
831 	data &= ~0x8000;
832 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA,
833 	    data);
834 
835 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
836 	    0x000B);
837 	data = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
838 	    ALC_MII_DBG_DATA);
839 	data &= ~0x8000;
840 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA,
841 	    data);
842 }
843 
844 void
845 alc_phy_reset_816x(struct alc_softc *sc)
846 {
847 	uint32_t val;
848 
849 	val = CSR_READ_4(sc, ALC_GPHY_CFG);
850 	val &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE |
851 	    GPHY_CFG_GATE_25M_ENB | GPHY_CFG_PHY_IDDQ | GPHY_CFG_PHY_PLL_ON |
852 	    GPHY_CFG_PWDOWN_HW | GPHY_CFG_100AB_ENB);
853 	val |= GPHY_CFG_SEL_ANA_RESET;
854 	/* Disable PHY hibernation. */
855 	val &= ~(GPHY_CFG_HIB_PULSE | GPHY_CFG_HIB_EN);
856 	CSR_WRITE_4(sc, ALC_GPHY_CFG, val);
857 	DELAY(10);
858 	CSR_WRITE_4(sc, ALC_GPHY_CFG, val | GPHY_CFG_EXT_RESET);
859 	DELAY(800);
860 	/* Vendor PHY magic. */
861 	/* Disable PHY hibernation. */
862 	alc_miidbg_writereg(sc, MII_DBG_LEGCYPS,
863 	    DBG_LEGCYPS_DEFAULT & ~DBG_LEGCYPS_ENB);
864 	alc_miidbg_writereg(sc, MII_DBG_HIBNEG, DBG_HIBNEG_DEFAULT &
865 	    ~(DBG_HIBNEG_PSHIB_EN | DBG_HIBNEG_HIB_PULSE));
866 	alc_miidbg_writereg(sc, MII_DBG_GREENCFG, DBG_GREENCFG_DEFAULT);
867 	/* XXX Disable EEE. */
868 	val = CSR_READ_4(sc, ALC_LPI_CTL);
869 	val &= ~LPI_CTL_ENB;
870 	CSR_WRITE_4(sc, ALC_LPI_CTL, val);
871 	alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_LOCAL_EEEADV, 0);
872 	/* PHY power saving. */
873 	alc_miidbg_writereg(sc, MII_DBG_TST10BTCFG, DBG_TST10BTCFG_DEFAULT);
874 	alc_miidbg_writereg(sc, MII_DBG_SRDSYSMOD, DBG_SRDSYSMOD_DEFAULT);
875 	alc_miidbg_writereg(sc, MII_DBG_TST100BTCFG, DBG_TST100BTCFG_DEFAULT);
876 	alc_miidbg_writereg(sc, MII_DBG_ANACTL, DBG_ANACTL_DEFAULT);
877 	val = alc_miidbg_readreg(sc, MII_DBG_GREENCFG2);
878 	val &= ~DBG_GREENCFG2_GATE_DFSE_EN;
879 	alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, val);
880 	/* RTL8139C, 120m issue. */
881 	alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_NLP78,
882 	    ANEG_NLP78_120M_DEFAULT);
883 	alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_S3DIG10,
884 	    ANEG_S3DIG10_DEFAULT);
885 	if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0) {
886 		/* Turn off half amplitude. */
887 		val = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3);
888 		val |= EXT_CLDCTL3_BP_CABLE1TH_DET_GT;
889 		alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3, val);
890 		/* Turn off Green feature. */
891 		val = alc_miidbg_readreg(sc, MII_DBG_GREENCFG2);
892 		val |= DBG_GREENCFG2_BP_GREEN;
893 		alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, val);
894 		/* Turn off half bias. */
895 		val = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5);
896 		val |= EXT_CLDCTL5_BP_VD_HLFBIAS;
897 		alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5, val);
898 	}
899 }
900 
901 void
902 alc_phy_down(struct alc_softc *sc)
903 {
904 	uint32_t gphy;
905 
906 	switch (sc->sc_product) {
907 	case PCI_PRODUCT_ATTANSIC_AR8161:
908 	case PCI_PRODUCT_ATTANSIC_E2200:
909 	case PCI_PRODUCT_ATTANSIC_E2400:
910 	case PCI_PRODUCT_ATTANSIC_E2500:
911 	case PCI_PRODUCT_ATTANSIC_AR8162:
912 	case PCI_PRODUCT_ATTANSIC_AR8171:
913 	case PCI_PRODUCT_ATTANSIC_AR8172:
914 		gphy = CSR_READ_4(sc, ALC_GPHY_CFG);
915 		gphy &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE |
916 		    GPHY_CFG_100AB_ENB | GPHY_CFG_PHY_PLL_ON);
917 		gphy |= GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
918 		    GPHY_CFG_SEL_ANA_RESET;
919 		gphy |= GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW;
920 		CSR_WRITE_4(sc, ALC_GPHY_CFG, gphy);
921 		break;
922 	case PCI_PRODUCT_ATTANSIC_L1D:
923 	case PCI_PRODUCT_ATTANSIC_L1D_1:
924 	case PCI_PRODUCT_ATTANSIC_L2C_1:
925 	case PCI_PRODUCT_ATTANSIC_L2C_2:
926 		/*
927 		 * GPHY power down caused more problems on AR8151 v2.0.
928 		 * When driver is reloaded after GPHY power down,
929 		 * accesses to PHY/MAC registers hung the system. Only
930 		 * cold boot recovered from it.  I'm not sure whether
931 		 * AR8151 v1.0 also requires this one though.  I don't
932 		 * have AR8151 v1.0 controller in hand.
933 		 * The only option left is to isolate the PHY and
934 		 * initiates power down the PHY which in turn saves
935 		 * more power when driver is unloaded.
936 		 */
937 		alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
938 		    MII_BMCR, BMCR_ISO | BMCR_PDOWN);
939 		break;
940 	default:
941 		/* Force PHY down. */
942 		CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET |
943 		    GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ |
944 		    GPHY_CFG_PWDOWN_HW);
945 		DELAY(1000);
946 		break;
947 	}
948 }
949 
950 void
951 alc_aspm(struct alc_softc *sc, int init, uint64_t media)
952 {
953 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
954 		alc_aspm_816x(sc, init);
955 	else
956 		alc_aspm_813x(sc, media);
957 }
958 
959 void
960 alc_aspm_813x(struct alc_softc *sc, uint64_t media)
961 {
962 	uint32_t pmcfg;
963 	uint16_t linkcfg;
964 
965 	pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
966 	if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) ==
967 	    (ALC_FLAG_APS | ALC_FLAG_PCIE))
968 		linkcfg = CSR_READ_2(sc, sc->alc_expcap + PCI_PCIE_LCSR);
969 	else
970 		linkcfg = 0;
971 	pmcfg &= ~PM_CFG_SERDES_PD_EX_L1;
972 	pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_LCKDET_TIMER_MASK);
973 	pmcfg |= PM_CFG_MAC_ASPM_CHK;
974 	pmcfg |= (PM_CFG_LCKDET_TIMER_DEFAULT << PM_CFG_LCKDET_TIMER_SHIFT);
975 	pmcfg &= ~(PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
976 
977 	if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
978 		/* Disable extended sync except AR8152 B v1.0 */
979 		linkcfg &= ~0x80;
980 		if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1 &&
981 		    sc->alc_rev == ATHEROS_AR8152_B_V10)
982 			linkcfg |= 0x80;
983 		CSR_WRITE_2(sc, sc->alc_expcap + PCI_PCIE_LCSR, linkcfg);
984 		pmcfg &= ~(PM_CFG_EN_BUFS_RX_L0S | PM_CFG_SA_DLY_ENB |
985 		    PM_CFG_HOTRST);
986 		pmcfg |= (PM_CFG_L1_ENTRY_TIMER_DEFAULT <<
987 		    PM_CFG_L1_ENTRY_TIMER_SHIFT);
988 		pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK;
989 		pmcfg |= (PM_CFG_PM_REQ_TIMER_DEFAULT <<
990 		    PM_CFG_PM_REQ_TIMER_SHIFT);
991 		pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_PCIE_RECV;
992 	}
993 
994 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
995 		if ((sc->alc_flags & ALC_FLAG_L0S) != 0)
996 			pmcfg |= PM_CFG_ASPM_L0S_ENB;
997 		if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
998 			pmcfg |= PM_CFG_ASPM_L1_ENB;
999 		if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
1000 			if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1)
1001 				pmcfg &= ~PM_CFG_ASPM_L0S_ENB;
1002 			pmcfg &= ~(PM_CFG_SERDES_L1_ENB |
1003 			    PM_CFG_SERDES_PLL_L1_ENB |
1004 			    PM_CFG_SERDES_BUDS_RX_L1_ENB);
1005 			pmcfg |= PM_CFG_CLK_SWH_L1;
1006 			if (media == IFM_100_TX || media == IFM_1000_T) {
1007 				pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK;
1008 				switch (sc->sc_product) {
1009 				case PCI_PRODUCT_ATTANSIC_L2C_1:
1010 					pmcfg |= (7 <<
1011 					    PM_CFG_L1_ENTRY_TIMER_SHIFT);
1012 					break;
1013 				case PCI_PRODUCT_ATTANSIC_L1D_1:
1014 				case PCI_PRODUCT_ATTANSIC_L2C_2:
1015 					pmcfg |= (4 <<
1016 					    PM_CFG_L1_ENTRY_TIMER_SHIFT);
1017 					break;
1018 				default:
1019 					pmcfg |= (15 <<
1020 					    PM_CFG_L1_ENTRY_TIMER_SHIFT);
1021 					break;
1022 				}
1023 			}
1024 		} else {
1025 			pmcfg |= PM_CFG_SERDES_L1_ENB |
1026 			    PM_CFG_SERDES_PLL_L1_ENB |
1027 			    PM_CFG_SERDES_BUDS_RX_L1_ENB;
1028 			pmcfg &= ~(PM_CFG_CLK_SWH_L1 |
1029 			    PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
1030 		}
1031 	} else {
1032 		pmcfg &= ~(PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_L1_ENB |
1033 		    PM_CFG_SERDES_PLL_L1_ENB);
1034 		pmcfg |= PM_CFG_CLK_SWH_L1;
1035 		if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
1036 			pmcfg |= PM_CFG_ASPM_L1_ENB;
1037 	}
1038 	CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
1039 }
1040 
1041 void
1042 alc_aspm_816x(struct alc_softc *sc, int init)
1043 {
1044 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1045 	uint32_t pmcfg;
1046 
1047 	pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
1048 	pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_816X_MASK;
1049 	pmcfg |= PM_CFG_L1_ENTRY_TIMER_816X_DEFAULT;
1050 	pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK;
1051 	pmcfg |= PM_CFG_PM_REQ_TIMER_816X_DEFAULT;
1052 	pmcfg &= ~PM_CFG_LCKDET_TIMER_MASK;
1053 	pmcfg |= PM_CFG_LCKDET_TIMER_DEFAULT;
1054 	pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_CLK_SWH_L1 | PM_CFG_PCIE_RECV;
1055 	pmcfg &= ~(PM_CFG_RX_L1_AFTER_L0S | PM_CFG_TX_L1_AFTER_L0S |
1056 	    PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB |
1057 	    PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB |
1058 	    PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SA_DLY_ENB |
1059 	    PM_CFG_MAC_ASPM_CHK | PM_CFG_HOTRST);
1060 	if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
1061 	    (sc->alc_rev & 0x01) != 0)
1062 	pmcfg |= PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB;
1063 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
1064 	/* Link up, enable both L0s, L1s. */
1065 		pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB |
1066 		    PM_CFG_MAC_ASPM_CHK;
1067 	} else {
1068 		if (init != 0)
1069 			pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB |
1070 			    PM_CFG_MAC_ASPM_CHK;
1071 		else if ((ifp->if_flags & IFF_RUNNING) != 0)
1072 			pmcfg |= PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK;
1073 	}
1074 	CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
1075 }
1076 
1077 void
1078 alc_init_pcie(struct alc_softc *sc, int base)
1079 {
1080 	const char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" };
1081 	uint32_t cap, ctl, val;
1082 	int state;
1083 
1084 	/* Clear data link and flow-control protocol error. */
1085 	val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV);
1086 	val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP);
1087 	CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val);
1088 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
1089 		CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
1090 		CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
1091 		CSR_WRITE_4(sc, ALC_PCIE_PHYMISC,
1092 		CSR_READ_4(sc, ALC_PCIE_PHYMISC) |
1093 		PCIE_PHYMISC_FORCE_RCV_DET);
1094 		if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1  &&
1095 			sc->alc_rev == ATHEROS_AR8152_B_V10) {
1096 			val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2);
1097 			val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK |
1098 			    PCIE_PHYMISC2_SERDES_TH_MASK);
1099 			val |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT;
1100 			val |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
1101 			CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val);
1102 		}
1103 		/* Disable ASPM L0S and L1. */
1104 		cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1105 		    base + PCI_PCIE_LCAP) >> 16;
1106 		if ((cap & 0x00000c00) != 0) {
1107 			ctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1108 			    base + PCI_PCIE_LCSR) >> 16;
1109 			if ((ctl & 0x08) != 0)
1110 				sc->alc_rcb = DMA_CFG_RCB_128;
1111 			if (alcdebug)
1112 				printf("%s: RCB %u bytes\n",
1113 				    sc->sc_dev.dv_xname,
1114 				    sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128);
1115 			state = ctl & 0x03;
1116 			if (state & 0x01)
1117 				sc->alc_flags |= ALC_FLAG_L0S;
1118 			if (state & 0x02)
1119 				sc->alc_flags |= ALC_FLAG_L1S;
1120 			if (alcdebug)
1121 				printf("%s: ASPM %s %s\n",
1122 				    sc->sc_dev.dv_xname,
1123 				    aspm_state[state],
1124 				    state == 0 ? "disabled" : "enabled");
1125 			alc_disable_l0s_l1(sc);
1126 		}
1127 	} else {
1128 		val = CSR_READ_4(sc, ALC_PDLL_TRNS1);
1129 		val &= ~PDLL_TRNS1_D3PLLOFF_ENB;
1130 		CSR_WRITE_4(sc, ALC_PDLL_TRNS1, val);
1131 		val = CSR_READ_4(sc, ALC_MASTER_CFG);
1132 		if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
1133 		    (sc->alc_rev & 0x01) != 0) {
1134 			if ((val & MASTER_WAKEN_25M) == 0 ||
1135 			    (val & MASTER_CLK_SEL_DIS) == 0) {
1136 				val |= MASTER_WAKEN_25M | MASTER_CLK_SEL_DIS;
1137 				CSR_WRITE_4(sc, ALC_MASTER_CFG, val);
1138 			}
1139 		} else {
1140 			if ((val & MASTER_WAKEN_25M) == 0 ||
1141 			    (val & MASTER_CLK_SEL_DIS) != 0) {
1142 				val |= MASTER_WAKEN_25M;
1143 				val &= ~MASTER_CLK_SEL_DIS;
1144 				CSR_WRITE_4(sc, ALC_MASTER_CFG, val);
1145 			}
1146 		}
1147 	}
1148 }
1149 
1150 void
1151 alc_config_msi(struct alc_softc *sc)
1152 {
1153 	uint32_t ctl, mod;
1154 
1155 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
1156 		/*
1157 		 * It seems interrupt moderation is controlled by
1158 		 * ALC_MSI_RETRANS_TIMER register if MSI/MSIX is active.
1159 		 * Driver uses RX interrupt moderation parameter to
1160 		 * program ALC_MSI_RETRANS_TIMER register.
1161 		 */
1162 		ctl = CSR_READ_4(sc, ALC_MSI_RETRANS_TIMER);
1163 		ctl &= ~MSI_RETRANS_TIMER_MASK;
1164 		ctl &= ~MSI_RETRANS_MASK_SEL_LINE;
1165 		mod = ALC_USECS(sc->alc_int_rx_mod);
1166 		if (mod == 0)
1167 			mod = 1;
1168 		ctl |= mod;
1169 		if ((sc->alc_flags & ALC_FLAG_MSI) != 0)
1170 			CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, ctl |
1171 			    MSI_RETRANS_MASK_SEL_LINE);
1172 		else
1173 			CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, 0);
1174 	}
1175 }
1176 
1177 void
1178 alc_attach(struct device *parent, struct device *self, void *aux)
1179 {
1180 	struct alc_softc *sc = (struct alc_softc *)self;
1181 	struct pci_attach_args *pa = aux;
1182 	pci_chipset_tag_t pc = pa->pa_pc;
1183 	pci_intr_handle_t ih;
1184 	const char *intrstr;
1185 	struct ifnet *ifp;
1186 	pcireg_t memtype;
1187 	uint16_t burst;
1188 	int base, error = 0;
1189 
1190 	/* Set PHY address. */
1191 	sc->alc_phyaddr = ALC_PHY_ADDR;
1192 
1193 	/* Get PCI and chip id/revision. */
1194 	sc->sc_product = PCI_PRODUCT(pa->pa_id);
1195 	sc->alc_rev = PCI_REVISION(pa->pa_class);
1196 
1197 	/*
1198 	 * One odd thing is AR8132 uses the same PHY hardware(F1
1199 	 * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports
1200 	 * the PHY supports 1000Mbps but that's not true. The PHY
1201 	 * used in AR8132 can't establish gigabit link even if it
1202 	 * shows the same PHY model/revision number of AR8131.
1203 	 */
1204 	switch (sc->sc_product) {
1205 	case PCI_PRODUCT_ATTANSIC_E2200:
1206 	case PCI_PRODUCT_ATTANSIC_E2400:
1207 	case PCI_PRODUCT_ATTANSIC_E2500:
1208 		sc->alc_flags |= ALC_FLAG_E2X00;
1209 		/* FALLTHROUGH */
1210 	case PCI_PRODUCT_ATTANSIC_AR8161:
1211 		if (AR816X_REV(sc->alc_rev) == 0)
1212 			sc->alc_flags |= ALC_FLAG_LINK_WAR;
1213 		/* FALLTHROUGH */
1214 	case PCI_PRODUCT_ATTANSIC_AR8171:
1215 		sc->alc_flags |= ALC_FLAG_AR816X_FAMILY;
1216 		break;
1217 	case PCI_PRODUCT_ATTANSIC_AR8162:
1218 	case PCI_PRODUCT_ATTANSIC_AR8172:
1219 		sc->alc_flags |= ALC_FLAG_FASTETHER | ALC_FLAG_AR816X_FAMILY;
1220 		break;
1221 	case PCI_PRODUCT_ATTANSIC_L2C_1:
1222 	case PCI_PRODUCT_ATTANSIC_L2C_2:
1223 		sc->alc_flags |= ALC_FLAG_APS;
1224 		/* FALLTHROUGH */
1225 	case PCI_PRODUCT_ATTANSIC_L2C:
1226 		sc->alc_flags |= ALC_FLAG_FASTETHER;
1227 		break;
1228 	case PCI_PRODUCT_ATTANSIC_L1D:
1229 	case PCI_PRODUCT_ATTANSIC_L1D_1:
1230 		sc->alc_flags |= ALC_FLAG_APS;
1231 		/* FALLTHROUGH */
1232 	default:
1233 		break;
1234 	}
1235 	sc->alc_flags |= ALC_FLAG_JUMBO;
1236 
1237 	/*
1238 	 * Allocate IO memory
1239 	 */
1240 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ALC_PCIR_BAR);
1241 	if (pci_mapreg_map(pa, ALC_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
1242 	    &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) {
1243 		printf(": can't map mem space\n");
1244 		return;
1245 	}
1246 
1247 	sc->alc_flags |= ALC_FLAG_MSI;
1248 	if (pci_intr_map_msi(pa, &ih) != 0) {
1249 		if (pci_intr_map(pa, &ih) != 0) {
1250 			printf(": can't map interrupt\n");
1251 			goto fail;
1252 		}
1253 		sc->alc_flags &= ~ALC_FLAG_MSI;
1254 	}
1255 
1256 	/*
1257 	 * Allocate IRQ
1258 	 */
1259 	intrstr = pci_intr_string(pc, ih);
1260 	sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, alc_intr, sc,
1261 	    sc->sc_dev.dv_xname);
1262 	if (sc->sc_irq_handle == NULL) {
1263 		printf(": could not establish interrupt");
1264 		if (intrstr != NULL)
1265 			printf(" at %s", intrstr);
1266 		printf("\n");
1267 		goto fail;
1268 	}
1269 	printf(": %s", intrstr);
1270 
1271 	alc_config_msi(sc);
1272 
1273 	sc->sc_dmat = pa->pa_dmat;
1274 	sc->sc_pct = pa->pa_pc;
1275 	sc->sc_pcitag = pa->pa_tag;
1276 
1277 	switch (sc->sc_product) {
1278 	case PCI_PRODUCT_ATTANSIC_L1D:
1279 	case PCI_PRODUCT_ATTANSIC_L1D_1:
1280 	case PCI_PRODUCT_ATTANSIC_L2C_1:
1281 	case PCI_PRODUCT_ATTANSIC_L2C_2:
1282 		sc->alc_max_framelen = 6 * 1024;
1283 		break;
1284 	default:
1285 		sc->alc_max_framelen = 9 * 1024;
1286 		break;
1287 	}
1288 
1289 	/*
1290 	 * It seems that AR813x/AR815x has silicon bug for SMB. In
1291 	 * addition, Atheros said that enabling SMB wouldn't improve
1292 	 * performance. However I think it's bad to access lots of
1293 	 * registers to extract MAC statistics.
1294 	 */
1295 	sc->alc_flags |= ALC_FLAG_SMB_BUG;
1296 	/*
1297 	 * Don't use Tx CMB. It is known to have silicon bug.
1298 	 */
1299 	sc->alc_flags |= ALC_FLAG_CMB_BUG;
1300 	sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >>
1301 	    MASTER_CHIP_REV_SHIFT;
1302 	if (alcdebug) {
1303 		printf("%s: PCI device revision : 0x%04x\n",
1304 		    sc->sc_dev.dv_xname, sc->alc_rev);
1305 		printf("%s: Chip id/revision : 0x%04x\n",
1306 		    sc->sc_dev.dv_xname, sc->alc_chip_rev);
1307 		printf("%s: %u Tx FIFO, %u Rx FIFO\n", sc->sc_dev.dv_xname,
1308 		    CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8,
1309 		    CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8);
1310 	}
1311 
1312 	/* Initialize DMA parameters. */
1313 	sc->alc_dma_rd_burst = 0;
1314 	sc->alc_dma_wr_burst = 0;
1315 	sc->alc_rcb = DMA_CFG_RCB_64;
1316 	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
1317 	    &base, NULL)) {
1318 		sc->alc_flags |= ALC_FLAG_PCIE;
1319 		sc->alc_expcap = base;
1320 		burst = CSR_READ_2(sc, base + PCI_PCIE_DCSR);
1321 		sc->alc_dma_rd_burst = (burst & 0x7000) >> 12;
1322 		sc->alc_dma_wr_burst = (burst & 0x00e0) >> 5;
1323 		if (alcdebug) {
1324 			printf("%s: Read request size : %u bytes.\n",
1325 			    sc->sc_dev.dv_xname,
1326 			    alc_dma_burst[sc->alc_dma_rd_burst]);
1327 			printf("%s: TLP payload size : %u bytes.\n",
1328 			    sc->sc_dev.dv_xname,
1329 			    alc_dma_burst[sc->alc_dma_wr_burst]);
1330 		}
1331 		if (alc_dma_burst[sc->alc_dma_rd_burst] > 1024)
1332 			sc->alc_dma_rd_burst = 3;
1333 		if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024)
1334 			sc->alc_dma_wr_burst = 3;
1335 		/*
1336 		 * Force maximum payload size to 128 bytes for
1337 		 * E2200/E2400/E2500.
1338 		 * Otherwise it triggers DMA write error.
1339 		 */
1340 		if ((sc->alc_flags & ALC_FLAG_E2X00) != 0)
1341 			sc->alc_dma_wr_burst = 0;
1342 		alc_init_pcie(sc, base);
1343 	}
1344 
1345 	/* Reset PHY. */
1346 	alc_phy_reset(sc);
1347 
1348 	/* Reset the ethernet controller. */
1349 	alc_stop_mac(sc);
1350 	alc_reset(sc);
1351 
1352 	error = alc_dma_alloc(sc);
1353 	if (error)
1354 		goto fail;
1355 
1356 	/* Load station address. */
1357 	alc_get_macaddr(sc);
1358 
1359 	ifp = &sc->sc_arpcom.ac_if;
1360 	ifp->if_softc = sc;
1361 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1362 	ifp->if_ioctl = alc_ioctl;
1363 	ifp->if_start = alc_start;
1364 	ifp->if_watchdog = alc_watchdog;
1365 	IFQ_SET_MAXLEN(&ifp->if_snd, ALC_TX_RING_CNT - 1);
1366 	bcopy(sc->alc_eaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
1367 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1368 
1369 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1370 
1371 #ifdef ALC_CHECKSUM
1372 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
1373 	    IFCAP_CSUM_UDPv4;
1374 #endif
1375 
1376 #if NVLAN > 0
1377 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1378 #endif
1379 
1380 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
1381 
1382 	/* Set up MII bus. */
1383 	sc->sc_miibus.mii_ifp = ifp;
1384 	sc->sc_miibus.mii_readreg = alc_miibus_readreg;
1385 	sc->sc_miibus.mii_writereg = alc_miibus_writereg;
1386 	sc->sc_miibus.mii_statchg = alc_miibus_statchg;
1387 
1388 	ifmedia_init(&sc->sc_miibus.mii_media, 0, alc_mediachange,
1389 	    alc_mediastatus);
1390 	mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
1391 	    MII_OFFSET_ANY, MIIF_DOPAUSE);
1392 
1393 	if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
1394 		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
1395 		ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
1396 		    0, NULL);
1397 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
1398 	} else
1399 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
1400 
1401 	if_attach(ifp);
1402 	ether_ifattach(ifp);
1403 
1404 	timeout_set(&sc->alc_tick_ch, alc_tick, sc);
1405 
1406 	return;
1407 fail:
1408 	alc_dma_free(sc);
1409 	if (sc->sc_irq_handle != NULL)
1410 		pci_intr_disestablish(pc, sc->sc_irq_handle);
1411 	if (sc->sc_mem_size)
1412 		bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
1413 }
1414 
1415 int
1416 alc_detach(struct device *self, int flags)
1417 {
1418 	struct alc_softc *sc = (struct alc_softc *)self;
1419 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1420 	int s;
1421 
1422 	s = splnet();
1423 	alc_stop(sc);
1424 	splx(s);
1425 
1426 	mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
1427 
1428 	/* Delete all remaining media. */
1429 	ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
1430 
1431 	ether_ifdetach(ifp);
1432 	if_detach(ifp);
1433 	alc_dma_free(sc);
1434 
1435 	alc_phy_down(sc);
1436 	if (sc->sc_irq_handle != NULL) {
1437 		pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
1438 		sc->sc_irq_handle = NULL;
1439 	}
1440 
1441 	return (0);
1442 }
1443 
1444 int
1445 alc_activate(struct device *self, int act)
1446 {
1447 	struct alc_softc *sc = (struct alc_softc *)self;
1448 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1449 	int rv = 0;
1450 
1451 	switch (act) {
1452 	case DVACT_SUSPEND:
1453 		if (ifp->if_flags & IFF_RUNNING)
1454 			alc_stop(sc);
1455 		rv = config_activate_children(self, act);
1456 		break;
1457 	case DVACT_RESUME:
1458 		if (ifp->if_flags & IFF_UP)
1459 			alc_init(ifp);
1460 		break;
1461 	default:
1462 		rv = config_activate_children(self, act);
1463 		break;
1464 	}
1465 	return (rv);
1466 }
1467 
1468 int
1469 alc_dma_alloc(struct alc_softc *sc)
1470 {
1471 	struct alc_txdesc *txd;
1472 	struct alc_rxdesc *rxd;
1473 	int nsegs, error, i;
1474 
1475 	/*
1476 	 * Create DMA stuffs for TX ring
1477 	 */
1478 	error = bus_dmamap_create(sc->sc_dmat, ALC_TX_RING_SZ, 1,
1479 	    ALC_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_tx_ring_map);
1480 	if (error)
1481 		return (ENOBUFS);
1482 
1483 	/* Allocate DMA'able memory for TX ring */
1484 	error = bus_dmamem_alloc(sc->sc_dmat, ALC_TX_RING_SZ,
1485 	    ETHER_ALIGN, 0, &sc->alc_rdata.alc_tx_ring_seg, 1,
1486 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1487 	if (error) {
1488 		printf("%s: could not allocate DMA'able memory for Tx ring.\n",
1489 		    sc->sc_dev.dv_xname);
1490 		return (error);
1491 	}
1492 
1493 	error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_tx_ring_seg,
1494 	    nsegs, ALC_TX_RING_SZ, (caddr_t *)&sc->alc_rdata.alc_tx_ring,
1495 	    BUS_DMA_NOWAIT);
1496 	if (error)
1497 		return (ENOBUFS);
1498 
1499 	/* Load the DMA map for Tx ring. */
1500 	error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map,
1501 	    sc->alc_rdata.alc_tx_ring, ALC_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
1502 	if (error) {
1503 		printf("%s: could not load DMA'able memory for Tx ring.\n",
1504 		    sc->sc_dev.dv_xname);
1505 		bus_dmamem_free(sc->sc_dmat,
1506 		    (bus_dma_segment_t *)&sc->alc_rdata.alc_tx_ring, 1);
1507 		return (error);
1508 	}
1509 
1510 	sc->alc_rdata.alc_tx_ring_paddr =
1511 	    sc->alc_cdata.alc_tx_ring_map->dm_segs[0].ds_addr;
1512 
1513 	/*
1514 	 * Create DMA stuffs for RX ring
1515 	 */
1516 	error = bus_dmamap_create(sc->sc_dmat, ALC_RX_RING_SZ, 1,
1517 	    ALC_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_ring_map);
1518 	if (error)
1519 		return (ENOBUFS);
1520 
1521 	/* Allocate DMA'able memory for RX ring */
1522 	error = bus_dmamem_alloc(sc->sc_dmat, ALC_RX_RING_SZ,
1523 	    ETHER_ALIGN, 0, &sc->alc_rdata.alc_rx_ring_seg, 1,
1524 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1525 	if (error) {
1526 		printf("%s: could not allocate DMA'able memory for Rx ring.\n",
1527 		    sc->sc_dev.dv_xname);
1528 		return (error);
1529 	}
1530 
1531 	error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rx_ring_seg,
1532 	    nsegs, ALC_RX_RING_SZ, (caddr_t *)&sc->alc_rdata.alc_rx_ring,
1533 	    BUS_DMA_NOWAIT);
1534 	if (error)
1535 		return (ENOBUFS);
1536 
1537 	/* Load the DMA map for Rx ring. */
1538 	error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map,
1539 	    sc->alc_rdata.alc_rx_ring, ALC_RX_RING_SZ, NULL, BUS_DMA_WAITOK);
1540 	if (error) {
1541 		printf("%s: could not load DMA'able memory for Rx ring.\n",
1542 		    sc->sc_dev.dv_xname);
1543 		bus_dmamem_free(sc->sc_dmat,
1544 		    (bus_dma_segment_t *)sc->alc_rdata.alc_rx_ring, 1);
1545 		return (error);
1546 	}
1547 
1548 	sc->alc_rdata.alc_rx_ring_paddr =
1549 	    sc->alc_cdata.alc_rx_ring_map->dm_segs[0].ds_addr;
1550 
1551 	/*
1552 	 * Create DMA stuffs for RX return ring
1553 	 */
1554 	error = bus_dmamap_create(sc->sc_dmat, ALC_RR_RING_SZ, 1,
1555 	    ALC_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rr_ring_map);
1556 	if (error)
1557 		return (ENOBUFS);
1558 
1559 	/* Allocate DMA'able memory for RX return ring */
1560 	error = bus_dmamem_alloc(sc->sc_dmat, ALC_RR_RING_SZ,
1561 	    ETHER_ALIGN, 0, &sc->alc_rdata.alc_rr_ring_seg, 1,
1562 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1563 	if (error) {
1564 		printf("%s: could not allocate DMA'able memory for Rx "
1565 		    "return ring.\n", sc->sc_dev.dv_xname);
1566 		return (error);
1567 	}
1568 
1569 	error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rr_ring_seg,
1570 	    nsegs, ALC_RR_RING_SZ, (caddr_t *)&sc->alc_rdata.alc_rr_ring,
1571 	    BUS_DMA_NOWAIT);
1572 	if (error)
1573 		return (ENOBUFS);
1574 
1575 	/*  Load the DMA map for Rx return ring. */
1576 	error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map,
1577 	    sc->alc_rdata.alc_rr_ring, ALC_RR_RING_SZ, NULL, BUS_DMA_WAITOK);
1578 	if (error) {
1579 		printf("%s: could not load DMA'able memory for Rx return ring."
1580 		    "\n", sc->sc_dev.dv_xname);
1581 		bus_dmamem_free(sc->sc_dmat,
1582 		    (bus_dma_segment_t *)&sc->alc_rdata.alc_rr_ring, 1);
1583 		return (error);
1584 	}
1585 
1586 	sc->alc_rdata.alc_rr_ring_paddr =
1587 	    sc->alc_cdata.alc_rr_ring_map->dm_segs[0].ds_addr;
1588 
1589 	/*
1590 	 * Create DMA stuffs for CMB block
1591 	 */
1592 	error = bus_dmamap_create(sc->sc_dmat, ALC_CMB_SZ, 1,
1593 	    ALC_CMB_SZ, 0, BUS_DMA_NOWAIT,
1594 	    &sc->alc_cdata.alc_cmb_map);
1595 	if (error)
1596 		return (ENOBUFS);
1597 
1598 	/* Allocate DMA'able memory for CMB block */
1599 	error = bus_dmamem_alloc(sc->sc_dmat, ALC_CMB_SZ,
1600 	    ETHER_ALIGN, 0, &sc->alc_rdata.alc_cmb_seg, 1,
1601 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1602 	if (error) {
1603 		printf("%s: could not allocate DMA'able memory for "
1604 		    "CMB block\n", sc->sc_dev.dv_xname);
1605 		return (error);
1606 	}
1607 
1608 	error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_cmb_seg,
1609 	    nsegs, ALC_CMB_SZ, (caddr_t *)&sc->alc_rdata.alc_cmb,
1610 	    BUS_DMA_NOWAIT);
1611 	if (error)
1612 		return (ENOBUFS);
1613 
1614 	/*  Load the DMA map for CMB block. */
1615 	error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_cmb_map,
1616 	    sc->alc_rdata.alc_cmb, ALC_CMB_SZ, NULL,
1617 	    BUS_DMA_WAITOK);
1618 	if (error) {
1619 		printf("%s: could not load DMA'able memory for CMB block\n",
1620 		    sc->sc_dev.dv_xname);
1621 		bus_dmamem_free(sc->sc_dmat,
1622 		    (bus_dma_segment_t *)&sc->alc_rdata.alc_cmb, 1);
1623 		return (error);
1624 	}
1625 
1626 	sc->alc_rdata.alc_cmb_paddr =
1627 	    sc->alc_cdata.alc_cmb_map->dm_segs[0].ds_addr;
1628 
1629 	/*
1630 	 * Create DMA stuffs for SMB block
1631 	 */
1632 	error = bus_dmamap_create(sc->sc_dmat, ALC_SMB_SZ, 1,
1633 	    ALC_SMB_SZ, 0, BUS_DMA_NOWAIT,
1634 	    &sc->alc_cdata.alc_smb_map);
1635 	if (error)
1636 		return (ENOBUFS);
1637 
1638 	/* Allocate DMA'able memory for SMB block */
1639 	error = bus_dmamem_alloc(sc->sc_dmat, ALC_SMB_SZ,
1640 	    ETHER_ALIGN, 0, &sc->alc_rdata.alc_smb_seg, 1,
1641 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1642 	if (error) {
1643 		printf("%s: could not allocate DMA'able memory for "
1644 		    "SMB block\n", sc->sc_dev.dv_xname);
1645 		return (error);
1646 	}
1647 
1648 	error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_smb_seg,
1649 	    nsegs, ALC_SMB_SZ, (caddr_t *)&sc->alc_rdata.alc_smb,
1650 	    BUS_DMA_NOWAIT);
1651 	if (error)
1652 		return (ENOBUFS);
1653 
1654 	/*  Load the DMA map for SMB block */
1655 	error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_smb_map,
1656 	    sc->alc_rdata.alc_smb, ALC_SMB_SZ, NULL,
1657 	    BUS_DMA_WAITOK);
1658 	if (error) {
1659 		printf("%s: could not load DMA'able memory for SMB block\n",
1660 		    sc->sc_dev.dv_xname);
1661 		bus_dmamem_free(sc->sc_dmat,
1662 		    (bus_dma_segment_t *)&sc->alc_rdata.alc_smb, 1);
1663 		return (error);
1664 	}
1665 
1666 	sc->alc_rdata.alc_smb_paddr =
1667 	    sc->alc_cdata.alc_smb_map->dm_segs[0].ds_addr;
1668 
1669 
1670 	/* Create DMA maps for Tx buffers. */
1671 	for (i = 0; i < ALC_TX_RING_CNT; i++) {
1672 		txd = &sc->alc_cdata.alc_txdesc[i];
1673 		txd->tx_m = NULL;
1674 		txd->tx_dmamap = NULL;
1675 		error = bus_dmamap_create(sc->sc_dmat, ALC_TSO_MAXSIZE,
1676 		    ALC_MAXTXSEGS, ALC_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
1677 		    &txd->tx_dmamap);
1678 		if (error) {
1679 			printf("%s: could not create Tx dmamap.\n",
1680 			    sc->sc_dev.dv_xname);
1681 			return (error);
1682 		}
1683 	}
1684 
1685 	/* Create DMA maps for Rx buffers. */
1686 	error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1687 	    BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_sparemap);
1688 	if (error) {
1689 		printf("%s: could not create spare Rx dmamap.\n",
1690 		    sc->sc_dev.dv_xname);
1691 		return (error);
1692 	}
1693 
1694 	for (i = 0; i < ALC_RX_RING_CNT; i++) {
1695 		rxd = &sc->alc_cdata.alc_rxdesc[i];
1696 		rxd->rx_m = NULL;
1697 		rxd->rx_dmamap = NULL;
1698 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1699 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
1700 		if (error) {
1701 			printf("%s: could not create Rx dmamap.\n",
1702 			    sc->sc_dev.dv_xname);
1703 			return (error);
1704 		}
1705 	}
1706 
1707 	return (0);
1708 }
1709 
1710 void
1711 alc_dma_free(struct alc_softc *sc)
1712 {
1713 	struct alc_txdesc *txd;
1714 	struct alc_rxdesc *rxd;
1715 	int i;
1716 
1717 	/* Tx buffers */
1718 	for (i = 0; i < ALC_TX_RING_CNT; i++) {
1719 		txd = &sc->alc_cdata.alc_txdesc[i];
1720 		if (txd->tx_dmamap != NULL) {
1721 			bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
1722 			txd->tx_dmamap = NULL;
1723 		}
1724 	}
1725 	/* Rx buffers */
1726 	for (i = 0; i < ALC_RX_RING_CNT; i++) {
1727 		rxd = &sc->alc_cdata.alc_rxdesc[i];
1728 		if (rxd->rx_dmamap != NULL) {
1729 			bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
1730 			rxd->rx_dmamap = NULL;
1731 		}
1732 	}
1733 	if (sc->alc_cdata.alc_rx_sparemap != NULL) {
1734 		bus_dmamap_destroy(sc->sc_dmat, sc->alc_cdata.alc_rx_sparemap);
1735 		sc->alc_cdata.alc_rx_sparemap = NULL;
1736 	}
1737 
1738 	/* Tx ring. */
1739 	if (sc->alc_cdata.alc_tx_ring_map != NULL)
1740 		bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map);
1741 	if (sc->alc_cdata.alc_tx_ring_map != NULL &&
1742 	    sc->alc_rdata.alc_tx_ring != NULL)
1743 		bus_dmamem_free(sc->sc_dmat,
1744 		    (bus_dma_segment_t *)sc->alc_rdata.alc_tx_ring, 1);
1745 	sc->alc_rdata.alc_tx_ring = NULL;
1746 	sc->alc_cdata.alc_tx_ring_map = NULL;
1747 
1748 	/* Rx ring. */
1749 	if (sc->alc_cdata.alc_rx_ring_map != NULL)
1750 		bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map);
1751 	if (sc->alc_cdata.alc_rx_ring_map != NULL &&
1752 	    sc->alc_rdata.alc_rx_ring != NULL)
1753 		bus_dmamem_free(sc->sc_dmat,
1754 		    (bus_dma_segment_t *)sc->alc_rdata.alc_rx_ring, 1);
1755 	sc->alc_rdata.alc_rx_ring = NULL;
1756 	sc->alc_cdata.alc_rx_ring_map = NULL;
1757 
1758 	/* Rx return ring. */
1759 	if (sc->alc_cdata.alc_rr_ring_map != NULL)
1760 		bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map);
1761 	if (sc->alc_cdata.alc_rr_ring_map != NULL &&
1762 	    sc->alc_rdata.alc_rr_ring != NULL)
1763 		bus_dmamem_free(sc->sc_dmat,
1764 		    (bus_dma_segment_t *)sc->alc_rdata.alc_rr_ring, 1);
1765 	sc->alc_rdata.alc_rr_ring = NULL;
1766 	sc->alc_cdata.alc_rr_ring_map = NULL;
1767 
1768 	/* CMB block */
1769 	if (sc->alc_cdata.alc_cmb_map != NULL)
1770 		bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_cmb_map);
1771 	if (sc->alc_cdata.alc_cmb_map != NULL &&
1772 	    sc->alc_rdata.alc_cmb != NULL)
1773 		bus_dmamem_free(sc->sc_dmat,
1774 		    (bus_dma_segment_t *)sc->alc_rdata.alc_cmb, 1);
1775 	sc->alc_rdata.alc_cmb = NULL;
1776 	sc->alc_cdata.alc_cmb_map = NULL;
1777 
1778 	/* SMB block */
1779 	if (sc->alc_cdata.alc_smb_map != NULL)
1780 		bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_smb_map);
1781 	if (sc->alc_cdata.alc_smb_map != NULL &&
1782 	    sc->alc_rdata.alc_smb != NULL)
1783 		bus_dmamem_free(sc->sc_dmat,
1784 		    (bus_dma_segment_t *)sc->alc_rdata.alc_smb, 1);
1785 	sc->alc_rdata.alc_smb = NULL;
1786 	sc->alc_cdata.alc_smb_map = NULL;
1787 }
1788 
1789 int
1790 alc_encap(struct alc_softc *sc, struct mbuf *m)
1791 {
1792 	struct alc_txdesc *txd, *txd_last;
1793 	struct tx_desc *desc;
1794 	bus_dmamap_t map;
1795 	uint32_t cflags, poff, vtag;
1796 	int error, idx, prod;
1797 
1798 	cflags = vtag = 0;
1799 	poff = 0;
1800 
1801 	prod = sc->alc_cdata.alc_tx_prod;
1802 	txd = &sc->alc_cdata.alc_txdesc[prod];
1803 	txd_last = txd;
1804 	map = txd->tx_dmamap;
1805 
1806 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT);
1807 	if (error != 0 && error != EFBIG)
1808 		goto drop;
1809 	if (error != 0) {
1810 		if (m_defrag(m, M_DONTWAIT)) {
1811 			error = ENOBUFS;
1812 			goto drop;
1813 		}
1814 		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1815 		    BUS_DMA_NOWAIT);
1816 		if (error != 0)
1817 			goto drop;
1818 	}
1819 
1820 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1821 	    BUS_DMASYNC_PREWRITE);
1822 
1823 	desc = NULL;
1824 	idx = 0;
1825 #if NVLAN > 0
1826 	/* Configure VLAN hardware tag insertion. */
1827 	if (m->m_flags & M_VLANTAG) {
1828 		vtag = htons(m->m_pkthdr.ether_vtag);
1829 		vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK;
1830 		cflags |= TD_INS_VLAN_TAG;
1831 	}
1832 #endif
1833 	/* Configure Tx checksum offload. */
1834 	if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) {
1835 		cflags |= TD_CUSTOM_CSUM;
1836 		/* Set checksum start offset. */
1837 		cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) &
1838 		    TD_PLOAD_OFFSET_MASK;
1839 	}
1840 
1841 	for (; idx < map->dm_nsegs; idx++) {
1842 		desc = &sc->alc_rdata.alc_tx_ring[prod];
1843 		desc->len =
1844 		    htole32(TX_BYTES(map->dm_segs[idx].ds_len) | vtag);
1845 		desc->flags = htole32(cflags);
1846 		desc->addr = htole64(map->dm_segs[idx].ds_addr);
1847 		sc->alc_cdata.alc_tx_cnt++;
1848 		ALC_DESC_INC(prod, ALC_TX_RING_CNT);
1849 	}
1850 
1851 	/* Update producer index. */
1852 	sc->alc_cdata.alc_tx_prod = prod;
1853 
1854 	/* Finally set EOP on the last descriptor. */
1855 	prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT;
1856 	desc = &sc->alc_rdata.alc_tx_ring[prod];
1857 	desc->flags |= htole32(TD_EOP);
1858 
1859 	/* Swap dmamap of the first and the last. */
1860 	txd = &sc->alc_cdata.alc_txdesc[prod];
1861 	map = txd_last->tx_dmamap;
1862 	txd_last->tx_dmamap = txd->tx_dmamap;
1863 	txd->tx_dmamap = map;
1864 	txd->tx_m = m;
1865 
1866 	return (0);
1867 
1868 drop:
1869 	m_freem(m);
1870 	return (error);
1871 }
1872 
1873 void
1874 alc_start(struct ifnet *ifp)
1875 {
1876 	struct alc_softc *sc = ifp->if_softc;
1877 	struct mbuf *m;
1878 	int enq = 0;
1879 
1880 	/* Reclaim transmitted frames. */
1881 	if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT)
1882 		alc_txeof(sc);
1883 
1884 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1885 		return;
1886 	if ((sc->alc_flags & ALC_FLAG_LINK) == 0)
1887 		return;
1888 	if (IFQ_IS_EMPTY(&ifp->if_snd))
1889 		return;
1890 
1891 	for (;;) {
1892 		if (sc->alc_cdata.alc_tx_cnt + ALC_MAXTXSEGS >=
1893 		    ALC_TX_RING_CNT - 3) {
1894 			ifq_set_oactive(&ifp->if_snd);
1895 			break;
1896 		}
1897 
1898 		IFQ_DEQUEUE(&ifp->if_snd, m);
1899 		if (m == NULL)
1900 			break;
1901 
1902 		if (alc_encap(sc, m) != 0) {
1903 			ifp->if_oerrors++;
1904 			continue;
1905 		}
1906 		enq++;
1907 
1908 #if NBPFILTER > 0
1909 		/*
1910 		 * If there's a BPF listener, bounce a copy of this frame
1911 		 * to him.
1912 		 */
1913 		if (ifp->if_bpf != NULL)
1914 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1915 #endif
1916 	}
1917 
1918 	if (enq > 0) {
1919 		/* Sync descriptors. */
1920 		bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
1921 		    sc->alc_cdata.alc_tx_ring_map->dm_mapsize,
1922 		    BUS_DMASYNC_PREWRITE);
1923 		/* Kick. Assume we're using normal Tx priority queue. */
1924 		if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
1925 			CSR_WRITE_2(sc, ALC_MBOX_TD_PRI0_PROD_IDX,
1926 			    (uint16_t)sc->alc_cdata.alc_tx_prod);
1927 		else
1928 			CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX,
1929 			    (sc->alc_cdata.alc_tx_prod <<
1930 			    MBOX_TD_PROD_LO_IDX_SHIFT) &
1931 			    MBOX_TD_PROD_LO_IDX_MASK);
1932 		/* Set a timeout in case the chip goes out to lunch. */
1933 		ifp->if_timer = ALC_TX_TIMEOUT;
1934 	}
1935 }
1936 
1937 void
1938 alc_watchdog(struct ifnet *ifp)
1939 {
1940 	struct alc_softc *sc = ifp->if_softc;
1941 
1942 	if ((sc->alc_flags & ALC_FLAG_LINK) == 0) {
1943 		printf("%s: watchdog timeout (missed link)\n",
1944 		    sc->sc_dev.dv_xname);
1945 		ifp->if_oerrors++;
1946 		alc_init(ifp);
1947 		return;
1948 	}
1949 
1950 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1951 	ifp->if_oerrors++;
1952 	alc_init(ifp);
1953 	alc_start(ifp);
1954 }
1955 
1956 int
1957 alc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1958 {
1959 	struct alc_softc *sc = ifp->if_softc;
1960 	struct mii_data *mii = &sc->sc_miibus;
1961 	struct ifreq *ifr = (struct ifreq *)data;
1962 	int s, error = 0;
1963 
1964 	s = splnet();
1965 
1966 	switch (cmd) {
1967 	case SIOCSIFADDR:
1968 		ifp->if_flags |= IFF_UP;
1969 		if (!(ifp->if_flags & IFF_RUNNING))
1970 			alc_init(ifp);
1971 		break;
1972 
1973 	case SIOCSIFFLAGS:
1974 		if (ifp->if_flags & IFF_UP) {
1975 			if (ifp->if_flags & IFF_RUNNING)
1976 				error = ENETRESET;
1977 			else
1978 				alc_init(ifp);
1979 		} else {
1980 			if (ifp->if_flags & IFF_RUNNING)
1981 				alc_stop(sc);
1982 		}
1983 		break;
1984 
1985 	case SIOCSIFMEDIA:
1986 	case SIOCGIFMEDIA:
1987 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1988 		break;
1989 
1990 	default:
1991 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1992 		break;
1993 	}
1994 
1995 	if (error == ENETRESET) {
1996 		if (ifp->if_flags & IFF_RUNNING)
1997 			alc_iff(sc);
1998 		error = 0;
1999 	}
2000 
2001 	splx(s);
2002 	return (error);
2003 }
2004 
2005 void
2006 alc_mac_config(struct alc_softc *sc)
2007 {
2008 	struct mii_data *mii;
2009 	uint32_t reg;
2010 
2011 	mii = &sc->sc_miibus;
2012 	reg = CSR_READ_4(sc, ALC_MAC_CFG);
2013 	reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
2014 	    MAC_CFG_SPEED_MASK);
2015 	if ((sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D ||
2016 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 ||
2017 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2 ||
2018 	    sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
2019 		reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
2020 	/* Reprogram MAC with resolved speed/duplex. */
2021 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
2022 	case IFM_10_T:
2023 	case IFM_100_TX:
2024 		reg |= MAC_CFG_SPEED_10_100;
2025 		break;
2026 	case IFM_1000_T:
2027 		reg |= MAC_CFG_SPEED_1000;
2028 		break;
2029 	}
2030 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2031 		reg |= MAC_CFG_FULL_DUPLEX;
2032 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2033 			reg |= MAC_CFG_TX_FC;
2034 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2035 			reg |= MAC_CFG_RX_FC;
2036 	}
2037 	CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2038 }
2039 
2040 void
2041 alc_stats_clear(struct alc_softc *sc)
2042 {
2043 	struct smb sb, *smb;
2044 	uint32_t *reg;
2045 	int i;
2046 
2047 	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2048 		bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
2049 		    sc->alc_cdata.alc_smb_map->dm_mapsize,
2050 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2051 		smb = sc->alc_rdata.alc_smb;
2052 		/* Update done, clear. */
2053 		smb->updated = 0;
2054 		bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
2055 		    sc->alc_cdata.alc_smb_map->dm_mapsize,
2056 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2057 	} else {
2058 		for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
2059 		    reg++) {
2060 			CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
2061 			i += sizeof(uint32_t);
2062 		}
2063 		/* Read Tx statistics. */
2064 		for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
2065 		    reg++) {
2066 			CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
2067 			i += sizeof(uint32_t);
2068 		}
2069 	}
2070 }
2071 
2072 void
2073 alc_stats_update(struct alc_softc *sc)
2074 {
2075 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2076 	struct alc_hw_stats *stat;
2077 	struct smb sb, *smb;
2078 	uint32_t *reg;
2079 	int i;
2080 
2081 	stat = &sc->alc_stats;
2082 	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2083 		bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
2084 		    sc->alc_cdata.alc_smb_map->dm_mapsize,
2085 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2086 		smb = sc->alc_rdata.alc_smb;
2087 		if (smb->updated == 0)
2088 			return;
2089 	} else {
2090 		smb = &sb;
2091 		/* Read Rx statistics. */
2092 		for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
2093 		    reg++) {
2094 			*reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
2095 			i += sizeof(uint32_t);
2096 		}
2097 		/* Read Tx statistics. */
2098 		for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
2099 		    reg++) {
2100 			*reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
2101 			i += sizeof(uint32_t);
2102 		}
2103 	}
2104 
2105 	/* Rx stats. */
2106 	stat->rx_frames += smb->rx_frames;
2107 	stat->rx_bcast_frames += smb->rx_bcast_frames;
2108 	stat->rx_mcast_frames += smb->rx_mcast_frames;
2109 	stat->rx_pause_frames += smb->rx_pause_frames;
2110 	stat->rx_control_frames += smb->rx_control_frames;
2111 	stat->rx_crcerrs += smb->rx_crcerrs;
2112 	stat->rx_lenerrs += smb->rx_lenerrs;
2113 	stat->rx_bytes += smb->rx_bytes;
2114 	stat->rx_runts += smb->rx_runts;
2115 	stat->rx_fragments += smb->rx_fragments;
2116 	stat->rx_pkts_64 += smb->rx_pkts_64;
2117 	stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
2118 	stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
2119 	stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
2120 	stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
2121 	stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
2122 	stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
2123 	stat->rx_pkts_truncated += smb->rx_pkts_truncated;
2124 	stat->rx_fifo_oflows += smb->rx_fifo_oflows;
2125 	stat->rx_rrs_errs += smb->rx_rrs_errs;
2126 	stat->rx_alignerrs += smb->rx_alignerrs;
2127 	stat->rx_bcast_bytes += smb->rx_bcast_bytes;
2128 	stat->rx_mcast_bytes += smb->rx_mcast_bytes;
2129 	stat->rx_pkts_filtered += smb->rx_pkts_filtered;
2130 
2131 	/* Tx stats. */
2132 	stat->tx_frames += smb->tx_frames;
2133 	stat->tx_bcast_frames += smb->tx_bcast_frames;
2134 	stat->tx_mcast_frames += smb->tx_mcast_frames;
2135 	stat->tx_pause_frames += smb->tx_pause_frames;
2136 	stat->tx_excess_defer += smb->tx_excess_defer;
2137 	stat->tx_control_frames += smb->tx_control_frames;
2138 	stat->tx_deferred += smb->tx_deferred;
2139 	stat->tx_bytes += smb->tx_bytes;
2140 	stat->tx_pkts_64 += smb->tx_pkts_64;
2141 	stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
2142 	stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
2143 	stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
2144 	stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
2145 	stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
2146 	stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
2147 	stat->tx_single_colls += smb->tx_single_colls;
2148 	stat->tx_multi_colls += smb->tx_multi_colls;
2149 	stat->tx_late_colls += smb->tx_late_colls;
2150 	stat->tx_excess_colls += smb->tx_excess_colls;
2151 	stat->tx_underrun += smb->tx_underrun;
2152 	stat->tx_desc_underrun += smb->tx_desc_underrun;
2153 	stat->tx_lenerrs += smb->tx_lenerrs;
2154 	stat->tx_pkts_truncated += smb->tx_pkts_truncated;
2155 	stat->tx_bcast_bytes += smb->tx_bcast_bytes;
2156 	stat->tx_mcast_bytes += smb->tx_mcast_bytes;
2157 
2158 	ifp->if_collisions += smb->tx_single_colls +
2159 	    smb->tx_multi_colls * 2 + smb->tx_late_colls +
2160 	    smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT;
2161 
2162 	ifp->if_oerrors += smb->tx_late_colls + smb->tx_excess_colls +
2163 	    smb->tx_underrun + smb->tx_pkts_truncated;
2164 
2165 	ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
2166 	    smb->rx_runts + smb->rx_pkts_truncated +
2167 	    smb->rx_fifo_oflows + smb->rx_rrs_errs +
2168 	    smb->rx_alignerrs;
2169 
2170 	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2171 		/* Update done, clear. */
2172 		smb->updated = 0;
2173 		bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
2174 		    sc->alc_cdata.alc_smb_map->dm_mapsize,
2175 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2176 	}
2177 }
2178 
2179 int
2180 alc_intr(void *arg)
2181 {
2182 	struct alc_softc *sc = arg;
2183 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2184 	uint32_t status;
2185 	int claimed = 0;
2186 
2187 	status = CSR_READ_4(sc, ALC_INTR_STATUS);
2188 	if ((status & ALC_INTRS) == 0)
2189 		return (0);
2190 
2191 	/* Disable interrupts. */
2192 	CSR_WRITE_4(sc, ALC_INTR_STATUS, INTR_DIS_INT);
2193 
2194 	status = CSR_READ_4(sc, ALC_INTR_STATUS);
2195 	if ((status & ALC_INTRS) == 0)
2196 		goto back;
2197 
2198 	/* Acknowledge and disable interrupts. */
2199 	CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT);
2200 
2201 	if (ifp->if_flags & IFF_RUNNING) {
2202 		int error = 0;
2203 
2204 		if (status & INTR_RX_PKT) {
2205 			error = alc_rxintr(sc);
2206 			if (error) {
2207 				alc_init(ifp);
2208 				return (0);
2209 			}
2210 		}
2211 		if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST |
2212 		    INTR_TXQ_TO_RST)) {
2213 			if (status & INTR_DMA_RD_TO_RST)
2214 				printf("%s: DMA read error! -- resetting\n",
2215 				    sc->sc_dev.dv_xname);
2216 			if (status & INTR_DMA_WR_TO_RST)
2217 				printf("%s: DMA write error! -- resetting\n",
2218 				    sc->sc_dev.dv_xname);
2219 			if (status & INTR_TXQ_TO_RST)
2220 				printf("%s: TxQ reset! -- resetting\n",
2221 				    sc->sc_dev.dv_xname);
2222 			alc_init(ifp);
2223 			return (0);
2224 		}
2225 
2226 		alc_txeof(sc);
2227 		alc_start(ifp);
2228 	}
2229 
2230 	claimed = 1;
2231 back:
2232 	/* Re-enable interrupts. */
2233 	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF);
2234 	return (claimed);
2235 }
2236 
2237 void
2238 alc_txeof(struct alc_softc *sc)
2239 {
2240 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2241 	struct alc_txdesc *txd;
2242 	uint32_t cons, prod;
2243 	int prog;
2244 
2245 	if (sc->alc_cdata.alc_tx_cnt == 0)
2246 		return;
2247 	bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
2248 	    sc->alc_cdata.alc_tx_ring_map->dm_mapsize,
2249 	    BUS_DMASYNC_POSTWRITE);
2250 	if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
2251 		bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
2252 		    sc->alc_cdata.alc_cmb_map->dm_mapsize,
2253 		    BUS_DMASYNC_POSTREAD);
2254 		prod = sc->alc_rdata.alc_cmb->cons;
2255 	} else {
2256 		if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
2257 			prod = CSR_READ_2(sc, ALC_MBOX_TD_PRI0_CONS_IDX);
2258 		else {
2259 			prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX);
2260 			/* Assume we're using normal Tx priority queue. */
2261 			prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >>
2262 			    MBOX_TD_CONS_LO_IDX_SHIFT;
2263 		}
2264 	}
2265 	cons = sc->alc_cdata.alc_tx_cons;
2266 	/*
2267 	 * Go through our Tx list and free mbufs for those
2268 	 * frames which have been transmitted.
2269 	 */
2270 	for (prog = 0; cons != prod; prog++,
2271 	    ALC_DESC_INC(cons, ALC_TX_RING_CNT)) {
2272 		if (sc->alc_cdata.alc_tx_cnt <= 0)
2273 			break;
2274 		prog++;
2275 		ifq_clr_oactive(&ifp->if_snd);
2276 		sc->alc_cdata.alc_tx_cnt--;
2277 		txd = &sc->alc_cdata.alc_txdesc[cons];
2278 		if (txd->tx_m != NULL) {
2279 			/* Reclaim transmitted mbufs. */
2280 			bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
2281 			    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2282 			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
2283 			m_freem(txd->tx_m);
2284 			txd->tx_m = NULL;
2285 		}
2286 	}
2287 
2288 	if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2289 	    bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
2290 	        sc->alc_cdata.alc_cmb_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2291 	sc->alc_cdata.alc_tx_cons = cons;
2292 	/*
2293 	 * Unarm watchdog timer only when there is no pending
2294 	 * frames in Tx queue.
2295 	 */
2296 	if (sc->alc_cdata.alc_tx_cnt == 0)
2297 		ifp->if_timer = 0;
2298 }
2299 
2300 int
2301 alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd)
2302 {
2303 	struct mbuf *m;
2304 	bus_dmamap_t map;
2305 	int error;
2306 
2307 	MGETHDR(m, M_DONTWAIT, MT_DATA);
2308 	if (m == NULL)
2309 		return (ENOBUFS);
2310 	MCLGET(m, M_DONTWAIT);
2311 	if (!(m->m_flags & M_EXT)) {
2312 		m_freem(m);
2313 		return (ENOBUFS);
2314 	}
2315 
2316 	m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX;
2317 
2318 	error = bus_dmamap_load_mbuf(sc->sc_dmat,
2319 	    sc->alc_cdata.alc_rx_sparemap, m, BUS_DMA_NOWAIT);
2320 
2321 	if (error != 0) {
2322 		m_freem(m);
2323 		printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname);
2324 		return (error);
2325 	}
2326 
2327 	if (rxd->rx_m != NULL) {
2328 		bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
2329 		    rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2330 		bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2331 	}
2332 	map = rxd->rx_dmamap;
2333 	rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap;
2334 	sc->alc_cdata.alc_rx_sparemap = map;
2335 	bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, rxd->rx_dmamap->dm_mapsize,
2336 	    BUS_DMASYNC_PREREAD);
2337 	rxd->rx_m = m;
2338 	rxd->rx_desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr);
2339 	return (0);
2340 }
2341 
2342 int
2343 alc_rxintr(struct alc_softc *sc)
2344 {
2345 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2346 	struct rx_rdesc *rrd;
2347 	uint32_t nsegs, status;
2348 	int rr_cons, prog;
2349 
2350 	bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
2351 	    sc->alc_cdata.alc_rr_ring_map->dm_mapsize,
2352 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2353 	bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
2354 	    sc->alc_cdata.alc_rx_ring_map->dm_mapsize,
2355 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2356 	rr_cons = sc->alc_cdata.alc_rr_cons;
2357 	for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0;) {
2358 		rrd = &sc->alc_rdata.alc_rr_ring[rr_cons];
2359 		status = letoh32(rrd->status);
2360 		if ((status & RRD_VALID) == 0)
2361 			break;
2362 		nsegs = RRD_RD_CNT(letoh32(rrd->rdinfo));
2363 		if (nsegs == 0) {
2364 			/* This should not happen! */
2365 			if (alcdebug)
2366 				printf("%s: unexpected segment count -- "
2367 				    "resetting\n", sc->sc_dev.dv_xname);
2368 			return (EIO);
2369 		}
2370 		alc_rxeof(sc, rrd);
2371 		/* Clear Rx return status. */
2372 		rrd->status = 0;
2373 		ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT);
2374 		sc->alc_cdata.alc_rx_cons += nsegs;
2375 		sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT;
2376 		prog += nsegs;
2377 	}
2378 
2379 	if (prog > 0) {
2380 		/* Update the consumer index. */
2381 		sc->alc_cdata.alc_rr_cons = rr_cons;
2382 		/* Sync Rx return descriptors. */
2383 		bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
2384 		    sc->alc_cdata.alc_rr_ring_map->dm_mapsize,
2385 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2386 		/*
2387 		 * Sync updated Rx descriptors such that controller see
2388 		 * modified buffer addresses.
2389 		 */
2390 		bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
2391 		    sc->alc_cdata.alc_rx_ring_map->dm_mapsize,
2392 		    BUS_DMASYNC_PREWRITE);
2393 		/*
2394 		 * Let controller know availability of new Rx buffers.
2395 		 * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors
2396 		 * it may be possible to update ALC_MBOX_RD0_PROD_IDX
2397 		 * only when Rx buffer pre-fetching is required. In
2398 		 * addition we already set ALC_RX_RD_FREE_THRESH to
2399 		 * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However
2400 		 * it still seems that pre-fetching needs more
2401 		 * experimentation.
2402 		 */
2403 		if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
2404 			CSR_WRITE_2(sc, ALC_MBOX_RD0_PROD_IDX,
2405 			    (uint16_t)sc->alc_cdata.alc_rx_cons);
2406 		else
2407 			CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX,
2408 			    sc->alc_cdata.alc_rx_cons);
2409 	}
2410 
2411 	return (0);
2412 }
2413 
2414 /* Receive a frame. */
2415 void
2416 alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd)
2417 {
2418 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2419 	struct alc_rxdesc *rxd;
2420 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2421 	struct mbuf *mp, *m;
2422 	uint32_t rdinfo, status;
2423 	int count, nsegs, rx_cons;
2424 
2425 	status = letoh32(rrd->status);
2426 	rdinfo = letoh32(rrd->rdinfo);
2427 	rx_cons = RRD_RD_IDX(rdinfo);
2428 	nsegs = RRD_RD_CNT(rdinfo);
2429 
2430 	sc->alc_cdata.alc_rxlen = RRD_BYTES(status);
2431 	if (status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) {
2432 		/*
2433 		 * We want to pass the following frames to upper
2434 		 * layer regardless of error status of Rx return
2435 		 * ring.
2436 		 *
2437 		 *  o IP/TCP/UDP checksum is bad.
2438 		 *  o frame length and protocol specific length
2439 		 *     does not match.
2440 		 *
2441 		 *  Force network stack compute checksum for
2442 		 *  errored frames.
2443 		 */
2444 		if ((status & (RRD_ERR_CRC | RRD_ERR_ALIGN |
2445 		    RRD_ERR_TRUNC | RRD_ERR_RUNT)) != 0)
2446 			return;
2447 	}
2448 
2449 	for (count = 0; count < nsegs; count++,
2450 	    ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) {
2451 		rxd = &sc->alc_cdata.alc_rxdesc[rx_cons];
2452 		mp = rxd->rx_m;
2453 		/* Add a new receive buffer to the ring. */
2454 		if (alc_newbuf(sc, rxd) != 0) {
2455 			ifp->if_iqdrops++;
2456 			/* Reuse Rx buffers. */
2457 			m_freem(sc->alc_cdata.alc_rxhead);
2458 			break;
2459 		}
2460 
2461 		/*
2462 		 * Assume we've received a full sized frame.
2463 		 * Actual size is fixed when we encounter the end of
2464 		 * multi-segmented frame.
2465 		 */
2466 		mp->m_len = sc->alc_buf_size;
2467 
2468 		/* Chain received mbufs. */
2469 		if (sc->alc_cdata.alc_rxhead == NULL) {
2470 			sc->alc_cdata.alc_rxhead = mp;
2471 			sc->alc_cdata.alc_rxtail = mp;
2472 		} else {
2473 			mp->m_flags &= ~M_PKTHDR;
2474 			sc->alc_cdata.alc_rxprev_tail =
2475 			    sc->alc_cdata.alc_rxtail;
2476 			sc->alc_cdata.alc_rxtail->m_next = mp;
2477 			sc->alc_cdata.alc_rxtail = mp;
2478 		}
2479 
2480 		if (count == nsegs - 1) {
2481 			/* Last desc. for this frame. */
2482 			m = sc->alc_cdata.alc_rxhead;
2483 			m->m_flags |= M_PKTHDR;
2484 			/*
2485 			 * It seems that L1C/L2C controller has no way
2486 			 * to tell hardware to strip CRC bytes.
2487 			 */
2488 			m->m_pkthdr.len =
2489 			    sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN;
2490 			if (nsegs > 1) {
2491 				/* Set last mbuf size. */
2492 				mp->m_len = sc->alc_cdata.alc_rxlen -
2493 				    (nsegs - 1) * sc->alc_buf_size;
2494 				/* Remove the CRC bytes in chained mbufs. */
2495 				if (mp->m_len <= ETHER_CRC_LEN) {
2496 					sc->alc_cdata.alc_rxtail =
2497 					    sc->alc_cdata.alc_rxprev_tail;
2498 					sc->alc_cdata.alc_rxtail->m_len -=
2499 					    (ETHER_CRC_LEN - mp->m_len);
2500 					sc->alc_cdata.alc_rxtail->m_next = NULL;
2501 					m_freem(mp);
2502 				} else {
2503 					mp->m_len -= ETHER_CRC_LEN;
2504 				}
2505 			} else
2506 				m->m_len = m->m_pkthdr.len;
2507 			/*
2508 			 * Due to hardware bugs, Rx checksum offloading
2509 			 * was intentionally disabled.
2510 			 */
2511 #if NVLAN > 0
2512 			if (status & RRD_VLAN_TAG) {
2513 				u_int32_t vtag = RRD_VLAN(letoh32(rrd->vtag));
2514 				m->m_pkthdr.ether_vtag = ntohs(vtag);
2515 				m->m_flags |= M_VLANTAG;
2516 			}
2517 #endif
2518 
2519 
2520 			ml_enqueue(&ml, m);
2521 		}
2522 	}
2523 	if_input(ifp, &ml);
2524 
2525 	/* Reset mbuf chains. */
2526 	ALC_RXCHAIN_RESET(sc);
2527 }
2528 
2529 void
2530 alc_tick(void *xsc)
2531 {
2532 	struct alc_softc *sc = xsc;
2533 	struct mii_data *mii = &sc->sc_miibus;
2534 	int s;
2535 
2536 	s = splnet();
2537 	mii_tick(mii);
2538 	alc_stats_update(sc);
2539 
2540 	timeout_add_sec(&sc->alc_tick_ch, 1);
2541 	splx(s);
2542 }
2543 
2544 void
2545 alc_osc_reset(struct alc_softc *sc)
2546 {
2547 	uint32_t reg;
2548 
2549 	reg = CSR_READ_4(sc, ALC_MISC3);
2550 	reg &= ~MISC3_25M_BY_SW;
2551 	reg |= MISC3_25M_NOTO_INTNL;
2552 	CSR_WRITE_4(sc, ALC_MISC3, reg);
2553 	reg = CSR_READ_4(sc, ALC_MISC);
2554 	if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0) {
2555 		/*
2556 		 * Restore over-current protection default value.
2557 		 * This value could be reset by MAC reset.
2558 		 */
2559 		reg &= ~MISC_PSW_OCP_MASK;
2560 		reg |= (MISC_PSW_OCP_DEFAULT << MISC_PSW_OCP_SHIFT);
2561 		reg &= ~MISC_INTNLOSC_OPEN;
2562 		CSR_WRITE_4(sc, ALC_MISC, reg);
2563 		CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN);
2564 		reg = CSR_READ_4(sc, ALC_MISC2);
2565 		reg &= ~MISC2_CALB_START;
2566 		CSR_WRITE_4(sc, ALC_MISC2, reg);
2567 		CSR_WRITE_4(sc, ALC_MISC2, reg | MISC2_CALB_START);
2568 	} else {
2569 		reg &= ~MISC_INTNLOSC_OPEN;
2570 		/* Disable isolate for revision A devices. */
2571 		if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1)
2572 			reg &= ~MISC_ISO_ENB;
2573 		CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN);
2574 		CSR_WRITE_4(sc, ALC_MISC, reg);
2575 	}
2576 	DELAY(20);
2577 }
2578 
2579 void
2580 alc_reset(struct alc_softc *sc)
2581 {
2582 	uint32_t reg, pmcfg = 0;
2583 	int i;
2584 
2585 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2586 		/* Reset workaround. */
2587 		CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, 1);
2588 		if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
2589 		    (sc->alc_rev & 0x01) != 0) {
2590 			/* Disable L0s/L1s before reset. */
2591 			pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
2592 			if ((pmcfg & (PM_CFG_ASPM_L0S_ENB |
2593 			    PM_CFG_ASPM_L1_ENB))!= 0) {
2594 				pmcfg &= ~(PM_CFG_ASPM_L0S_ENB |
2595 				    PM_CFG_ASPM_L1_ENB);
2596 				CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
2597 			}
2598 		}
2599 	}
2600 	reg = CSR_READ_4(sc, ALC_MASTER_CFG);
2601 	reg |= MASTER_OOB_DIS_OFF | MASTER_RESET;
2602 	CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
2603 
2604 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2605 		for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2606 			DELAY(10);
2607 			if (CSR_READ_4(sc, ALC_MBOX_RD0_PROD_IDX) == 0)
2608 				break;
2609 		}
2610 		if (i == 0)
2611 			printf("MAC reset timeout!\n");
2612 	}
2613 	for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2614 		DELAY(10);
2615 		if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0)
2616 			break;
2617 	}
2618 	if (i == 0)
2619 		printf("%s: master reset timeout!\n", sc->sc_dev.dv_xname);
2620 
2621 	for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2622 		reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
2623 		if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC |
2624 		    IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
2625 			break;
2626 		DELAY(10);
2627 	}
2628 
2629 	if (i == 0)
2630 		printf("%s: reset timeout(0x%08x)!\n", sc->sc_dev.dv_xname,
2631 		    reg);
2632 
2633 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2634 		if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
2635 		    (sc->alc_rev & 0x01) != 0) {
2636 			reg = CSR_READ_4(sc, ALC_MASTER_CFG);
2637 			reg |= MASTER_CLK_SEL_DIS;
2638 			CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
2639 			/* Restore L0s/L1s config. */
2640 			if ((pmcfg & (PM_CFG_ASPM_L0S_ENB |
2641 			    PM_CFG_ASPM_L1_ENB)) != 0)
2642 				CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
2643 		}
2644 		alc_osc_reset(sc);
2645 		reg = CSR_READ_4(sc, ALC_MISC3);
2646 		reg &= ~MISC3_25M_BY_SW;
2647 		reg |= MISC3_25M_NOTO_INTNL;
2648 		CSR_WRITE_4(sc, ALC_MISC3, reg);
2649 		reg = CSR_READ_4(sc, ALC_MISC);
2650 		reg &= ~MISC_INTNLOSC_OPEN;
2651 		if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1)
2652 			reg &= ~MISC_ISO_ENB;
2653 		CSR_WRITE_4(sc, ALC_MISC, reg);
2654 		DELAY(20);
2655 	}
2656 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 ||
2657 	    sc->sc_product ==  PCI_PRODUCT_ATTANSIC_L2C_1 ||
2658 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2)
2659 		CSR_WRITE_4(sc, ALC_SERDES_LOCK,
2660 		    CSR_READ_4(sc, ALC_SERDES_LOCK) |
2661 		    SERDES_MAC_CLK_SLOWDOWN | SERDES_PHY_CLK_SLOWDOWN);
2662 }
2663 
2664 int
2665 alc_init(struct ifnet *ifp)
2666 {
2667 	struct alc_softc *sc = ifp->if_softc;
2668 	uint8_t eaddr[ETHER_ADDR_LEN];
2669 	bus_addr_t paddr;
2670 	uint32_t reg, rxf_hi, rxf_lo;
2671 	int error;
2672 
2673 	/*
2674 	 * Cancel any pending I/O.
2675 	 */
2676 	alc_stop(sc);
2677 	/*
2678 	 * Reset the chip to a known state.
2679 	 */
2680 	alc_reset(sc);
2681 
2682 	/* Initialize Rx descriptors. */
2683 	error = alc_init_rx_ring(sc);
2684 	if (error != 0) {
2685 		printf("%s: no memory for Rx buffers.\n", sc->sc_dev.dv_xname);
2686 		alc_stop(sc);
2687 		return (error);
2688 	}
2689 	alc_init_rr_ring(sc);
2690 	alc_init_tx_ring(sc);
2691 	alc_init_cmb(sc);
2692 	alc_init_smb(sc);
2693 
2694 	/* Enable all clocks. */
2695 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2696 		CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, CLK_GATING_DMAW_ENB |
2697 		    CLK_GATING_DMAR_ENB | CLK_GATING_TXQ_ENB |
2698 		    CLK_GATING_RXQ_ENB | CLK_GATING_TXMAC_ENB |
2699 		    CLK_GATING_RXMAC_ENB);
2700 		if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0)
2701 			CSR_WRITE_4(sc, ALC_IDLE_DECISN_TIMER,
2702 			    IDLE_DECISN_TIMER_DEFAULT_1MS);
2703 	} else
2704 		CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, 0);
2705 
2706 	/* Reprogram the station address. */
2707 	bcopy(LLADDR(ifp->if_sadl), eaddr, ETHER_ADDR_LEN);
2708 	CSR_WRITE_4(sc, ALC_PAR0,
2709 	    eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2710 	CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]);
2711 	/*
2712 	 * Clear WOL status and disable all WOL feature as WOL
2713 	 * would interfere Rx operation under normal environments.
2714 	 */
2715 	CSR_READ_4(sc, ALC_WOL_CFG);
2716 	CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
2717 	/* Set Tx descriptor base addresses. */
2718 	paddr = sc->alc_rdata.alc_tx_ring_paddr;
2719 	CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2720 	CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2721 	/* We don't use high priority ring. */
2722 	CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0);
2723 	/* Set Tx descriptor counter. */
2724 	CSR_WRITE_4(sc, ALC_TD_RING_CNT,
2725 	    (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK);
2726 	/* Set Rx descriptor base addresses. */
2727 	paddr = sc->alc_rdata.alc_rx_ring_paddr;
2728 	CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2729 	CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2730 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
2731 		/* We use one Rx ring. */
2732 		CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0);
2733 		CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0);
2734 		CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0);
2735 	}
2736 	/* Set Rx descriptor counter. */
2737 	CSR_WRITE_4(sc, ALC_RD_RING_CNT,
2738 	    (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK);
2739 
2740 	/*
2741 	 * Let hardware split jumbo frames into alc_max_buf_sized chunks.
2742 	 * if it do not fit the buffer size. Rx return descriptor holds
2743 	 * a counter that indicates how many fragments were made by the
2744 	 * hardware. The buffer size should be multiple of 8 bytes.
2745 	 * Since hardware has limit on the size of buffer size, always
2746 	 * use the maximum value.
2747 	 * For strict-alignment architectures make sure to reduce buffer
2748 	 * size by 8 bytes to make room for alignment fixup.
2749 	 */
2750 	sc->alc_buf_size = RX_BUF_SIZE_MAX;
2751 	CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size);
2752 
2753 	paddr = sc->alc_rdata.alc_rr_ring_paddr;
2754 	/* Set Rx return descriptor base addresses. */
2755 	CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2756 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
2757 		/* We use one Rx return ring. */
2758 		CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0);
2759 		CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0);
2760 		CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0);
2761 	}
2762 	/* Set Rx return descriptor counter. */
2763 	CSR_WRITE_4(sc, ALC_RRD_RING_CNT,
2764 	    (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK);
2765 	paddr = sc->alc_rdata.alc_cmb_paddr;
2766 	CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2767 	paddr = sc->alc_rdata.alc_smb_paddr;
2768 	CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2769 	CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2770 
2771 	if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1) {
2772 		/* Reconfigure SRAM - Vendor magic. */
2773 		CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_LEN, 0x000002A0);
2774 		CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_LEN, 0x00000100);
2775 		CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_ADDR, 0x029F0000);
2776 		CSR_WRITE_4(sc, ALC_SRAM_RD0_ADDR, 0x02BF02A0);
2777 		CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_ADDR, 0x03BF02C0);
2778 		CSR_WRITE_4(sc, ALC_SRAM_TD_ADDR, 0x03DF03C0);
2779 		CSR_WRITE_4(sc, ALC_TXF_WATER_MARK, 0x00000000);
2780 		CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 0x00000000);
2781 	}
2782 
2783 	/* Tell hardware that we're ready to load DMA blocks. */
2784 	CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD);
2785 
2786 	/* Configure interrupt moderation timer. */
2787 	sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
2788 	sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
2789 	reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT;
2790 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0)
2791 	    reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT;
2792 	CSR_WRITE_4(sc, ALC_IM_TIMER, reg);
2793 	/*
2794 	 * We don't want to automatic interrupt clear as task queue
2795 	 * for the interrupt should know interrupt status.
2796 	 */
2797 	reg = CSR_READ_4(sc, ALC_MASTER_CFG);
2798 	reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB);
2799 	reg |= MASTER_SA_TIMER_ENB;
2800 	if (ALC_USECS(sc->alc_int_rx_mod) != 0)
2801 		reg |= MASTER_IM_RX_TIMER_ENB;
2802 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0 &&
2803 	    ALC_USECS(sc->alc_int_tx_mod) != 0)
2804 		reg |= MASTER_IM_TX_TIMER_ENB;
2805 	CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
2806 	/*
2807 	 * Disable interrupt re-trigger timer. We don't want automatic
2808 	 * re-triggering of un-ACKed interrupts.
2809 	 */
2810 	CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0));
2811 	/* Configure CMB. */
2812 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2813 		CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, ALC_TX_RING_CNT / 3);
2814 		CSR_WRITE_4(sc, ALC_CMB_TX_TIMER,
2815 		    ALC_USECS(sc->alc_int_tx_mod));
2816 	} else {
2817 		if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
2818 			CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4);
2819 			CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000));
2820 		} else
2821 			CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0));
2822 	}
2823 	/*
2824 	 * Hardware can be configured to issue SMB interrupt based
2825 	 * on programmed interval. Since there is a callout that is
2826 	 * invoked for every hz in driver we use that instead of
2827 	 * relying on periodic SMB interrupt.
2828 	 */
2829 	CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0));
2830 	/* Clear MAC statistics. */
2831 	alc_stats_clear(sc);
2832 
2833 	/*
2834 	 * Always use maximum frame size that controller can support.
2835 	 * Otherwise received frames that has larger frame length
2836 	 * than alc(4) MTU would be silently dropped in hardware. This
2837 	 * would make path-MTU discovery hard as sender wouldn't get
2838 	 * any responses from receiver. alc(4) supports
2839 	 * multi-fragmented frames on Rx path so it has no issue on
2840 	 * assembling fragmented frames. Using maximum frame size also
2841 	 * removes the need to reinitialize hardware when interface
2842 	 * MTU configuration was changed.
2843 	 *
2844 	 * Be conservative in what you do, be liberal in what you
2845 	 * accept from others - RFC 793.
2846 	 */
2847 	CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_max_framelen);
2848 
2849 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
2850 		/* Disable header split(?) */
2851 		CSR_WRITE_4(sc, ALC_HDS_CFG, 0);
2852 		/* Configure IPG/IFG parameters. */
2853 		CSR_WRITE_4(sc, ALC_IPG_IFG_CFG,
2854 		    ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) &
2855 		    IPG_IFG_IPGT_MASK) |
2856 		    ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) &
2857 		    IPG_IFG_MIFG_MASK) |
2858 		    ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) &
2859 		    IPG_IFG_IPG1_MASK) |
2860 		    ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) &
2861 		    IPG_IFG_IPG2_MASK));
2862 		/* Set parameters for half-duplex media. */
2863 		CSR_WRITE_4(sc, ALC_HDPX_CFG,
2864 		    ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
2865 		    HDPX_CFG_LCOL_MASK) |
2866 		    ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
2867 		    HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
2868 		    ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
2869 		    HDPX_CFG_ABEBT_MASK) |
2870 		    ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
2871 		    HDPX_CFG_JAMIPG_MASK));
2872 	}
2873 
2874 	/*
2875 	 * Set TSO/checksum offload threshold. For frames that is
2876 	 * larger than this threshold, hardware wouldn't do
2877 	 * TSO/checksum offloading.
2878 	 */
2879 	reg = (sc->alc_max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) &
2880 	    TSO_OFFLOAD_THRESH_MASK;
2881 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
2882 		reg |= TSO_OFFLOAD_ERRLGPKT_DROP_ENB;
2883 	CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH, reg);
2884 	/* Configure TxQ. */
2885 	reg = (alc_dma_burst[sc->alc_dma_rd_burst] <<
2886 	    TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK;
2887 	if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1 ||
2888 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2)
2889 		reg >>= 1;
2890 	reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) &
2891 	    TXQ_CFG_TD_BURST_MASK;
2892 	reg |= TXQ_CFG_IP_OPTION_ENB | TXQ_CFG_8023_ENB;
2893 	CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE);
2894 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2895 		reg = (TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q1_BURST_SHIFT |
2896 		    TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q2_BURST_SHIFT |
2897 		    TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q3_BURST_SHIFT |
2898 		    HQTD_CFG_BURST_ENB);
2899 		CSR_WRITE_4(sc, ALC_HQTD_CFG, reg);
2900 		reg = WRR_PRI_RESTRICT_NONE;
2901 		reg |= (WRR_PRI_DEFAULT << WRR_PRI0_SHIFT |
2902 		    WRR_PRI_DEFAULT << WRR_PRI1_SHIFT |
2903 		    WRR_PRI_DEFAULT << WRR_PRI2_SHIFT |
2904 		    WRR_PRI_DEFAULT << WRR_PRI3_SHIFT);
2905 		CSR_WRITE_4(sc, ALC_WRR, reg);
2906 	} else {
2907 		/* Configure Rx free descriptor pre-fetching. */
2908 		CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH,
2909 		    ((RX_RD_FREE_THRESH_HI_DEFAULT <<
2910 		    RX_RD_FREE_THRESH_HI_SHIFT) & RX_RD_FREE_THRESH_HI_MASK) |
2911 		    ((RX_RD_FREE_THRESH_LO_DEFAULT <<
2912 		    RX_RD_FREE_THRESH_LO_SHIFT) & RX_RD_FREE_THRESH_LO_MASK));
2913 	}
2914 
2915 	/*
2916 	 * Configure flow control parameters.
2917 	 * XON  : 80% of Rx FIFO
2918 	 * XOFF : 30% of Rx FIFO
2919 	 */
2920 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2921 		reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
2922 		reg &= SRAM_RX_FIFO_LEN_MASK;
2923 		reg *= 8;
2924 		if (reg > 8 * 1024)
2925 			reg -= RX_FIFO_PAUSE_816X_RSVD;
2926 		else
2927 		    reg -= RX_BUF_SIZE_MAX;
2928 		reg /= 8;
2929 		CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
2930 		    ((reg << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
2931 		    RX_FIFO_PAUSE_THRESH_LO_MASK) |
2932 		    (((RX_FIFO_PAUSE_816X_RSVD / 8) <<
2933 		    RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
2934 		    RX_FIFO_PAUSE_THRESH_HI_MASK));
2935 	} else if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1C||
2936 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C) {
2937 		reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
2938 		rxf_hi = (reg * 8) / 10;
2939 		rxf_lo = (reg * 3) / 10;
2940 		CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
2941 		    ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
2942 		    RX_FIFO_PAUSE_THRESH_LO_MASK) |
2943 		    ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
2944 		    RX_FIFO_PAUSE_THRESH_HI_MASK));
2945 	}
2946 
2947 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
2948 		/* Disable RSS until I understand L1C/L2C's RSS logic. */
2949 		CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0);
2950 		CSR_WRITE_4(sc, ALC_RSS_CPU, 0);
2951 	}
2952 
2953 	/* Configure RxQ. */
2954 	reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
2955 	    RXQ_CFG_RD_BURST_MASK;
2956 	reg |= RXQ_CFG_RSS_MODE_DIS;
2957 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2958 		reg |= (RXQ_CFG_816X_IDT_TBL_SIZE_DEFAULT <<
2959 		    RXQ_CFG_816X_IDT_TBL_SIZE_SHIFT) &
2960 		    RXQ_CFG_816X_IDT_TBL_SIZE_MASK;
2961 		if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
2962 			reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M;
2963 	} else {
2964 		if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0 &&
2965 		    sc->sc_product != PCI_PRODUCT_ATTANSIC_L1D_1)
2966 			reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M;
2967 	}
2968 	CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
2969 
2970 	/* Configure DMA parameters. */
2971 	reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI;
2972 	reg |= sc->alc_rcb;
2973 	if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2974 		reg |= DMA_CFG_CMB_ENB;
2975 	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0)
2976 		reg |= DMA_CFG_SMB_ENB;
2977 	else
2978 		reg |= DMA_CFG_SMB_DIS;
2979 	reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) <<
2980 	    DMA_CFG_RD_BURST_SHIFT;
2981 	reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) <<
2982 	    DMA_CFG_WR_BURST_SHIFT;
2983 	reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) &
2984 	    DMA_CFG_RD_DELAY_CNT_MASK;
2985 	reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
2986 	    DMA_CFG_WR_DELAY_CNT_MASK;
2987 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2988 		switch (AR816X_REV(sc->alc_rev)) {
2989 		case AR816X_REV_A0:
2990 		case AR816X_REV_A1:
2991 			reg |= DMA_CFG_RD_CHNL_SEL_2;
2992 			break;
2993 		case AR816X_REV_B0:
2994 			/* FALLTHROUGH */
2995 		default:
2996 			reg |= DMA_CFG_RD_CHNL_SEL_4;
2997 			break;
2998 		}
2999 	}
3000 	CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
3001 
3002 	/*
3003 	 * Configure Tx/Rx MACs.
3004 	 *  - Auto-padding for short frames.
3005 	 *  - Enable CRC generation.
3006 	 *  Actual reconfiguration of MAC for resolved speed/duplex
3007 	 *  is followed after detection of link establishment.
3008 	 *  AR813x/AR815x always does checksum computation regardless
3009 	 *  of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to
3010 	 *  have bug in protocol field in Rx return structure so
3011 	 *  these controllers can't handle fragmented frames. Disable
3012 	 *  Rx checksum offloading until there is a newer controller
3013 	 *  that has sane implementation.
3014 	 */
3015 	reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
3016 	    ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
3017 	    MAC_CFG_PREAMBLE_MASK);
3018 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 ||
3019 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D ||
3020 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 ||
3021 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2)
3022 		reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
3023 	if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0)
3024 		reg |= MAC_CFG_SPEED_10_100;
3025 	else
3026 		reg |= MAC_CFG_SPEED_1000;
3027 	CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3028 
3029 	/* Set up the receive filter. */
3030 	alc_iff(sc);
3031 
3032 	alc_rxvlan(sc);
3033 
3034 	/* Acknowledge all pending interrupts and clear it. */
3035 	CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS);
3036 	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3037 	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0);
3038 
3039 	ifp->if_flags |= IFF_RUNNING;
3040 	ifq_clr_oactive(&ifp->if_snd);
3041 
3042 	sc->alc_flags &= ~ALC_FLAG_LINK;
3043 	/* Switch to the current media. */
3044 	alc_mediachange(ifp);
3045 
3046 	timeout_add_sec(&sc->alc_tick_ch, 1);
3047 
3048 	return (0);
3049 }
3050 
3051 void
3052 alc_stop(struct alc_softc *sc)
3053 {
3054 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
3055 	struct alc_txdesc *txd;
3056 	struct alc_rxdesc *rxd;
3057 	uint32_t reg;
3058 	int i;
3059 
3060 	/*
3061 	 * Mark the interface down and cancel the watchdog timer.
3062 	 */
3063 	ifp->if_flags &= ~IFF_RUNNING;
3064 	ifq_clr_oactive(&ifp->if_snd);
3065 	ifp->if_timer = 0;
3066 
3067 	timeout_del(&sc->alc_tick_ch);
3068 	sc->alc_flags &= ~ALC_FLAG_LINK;
3069 
3070 	alc_stats_update(sc);
3071 
3072 	/* Disable interrupts. */
3073 	CSR_WRITE_4(sc, ALC_INTR_MASK, 0);
3074 	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3075 
3076 	/* Disable DMA. */
3077 	reg = CSR_READ_4(sc, ALC_DMA_CFG);
3078 	reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB);
3079 	reg |= DMA_CFG_SMB_DIS;
3080 	CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
3081 	DELAY(1000);
3082 
3083 	/* Stop Rx/Tx MACs. */
3084 	alc_stop_mac(sc);
3085 
3086 	/* Disable interrupts which might be touched in taskq handler. */
3087 	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3088 
3089 	/* Disable L0s/L1s */
3090 	reg = CSR_READ_4(sc, ALC_PM_CFG);
3091 	if ((reg & (PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB))!= 0) {
3092 		reg &= ~(PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB);
3093 		CSR_WRITE_4(sc, ALC_PM_CFG, reg);
3094 	}
3095 
3096 	/* Reclaim Rx buffers that have been processed. */
3097 	m_freem(sc->alc_cdata.alc_rxhead);
3098 	ALC_RXCHAIN_RESET(sc);
3099 	/*
3100 	 * Free Tx/Rx mbufs still in the queues.
3101 	 */
3102 	for (i = 0; i < ALC_RX_RING_CNT; i++) {
3103 		rxd = &sc->alc_cdata.alc_rxdesc[i];
3104 		if (rxd->rx_m != NULL) {
3105 			bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
3106 			    rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3107 			bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
3108 			m_freem(rxd->rx_m);
3109 			rxd->rx_m = NULL;
3110 		}
3111 	}
3112 	for (i = 0; i < ALC_TX_RING_CNT; i++) {
3113 		txd = &sc->alc_cdata.alc_txdesc[i];
3114 		if (txd->tx_m != NULL) {
3115 			bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
3116 			    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3117 			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
3118 			m_freem(txd->tx_m);
3119 			txd->tx_m = NULL;
3120 		}
3121 	}
3122 }
3123 
3124 void
3125 alc_stop_mac(struct alc_softc *sc)
3126 {
3127 	uint32_t reg;
3128 	int i;
3129 
3130 	alc_stop_queue(sc);
3131 	/* Disable Rx/Tx MAC. */
3132 	reg = CSR_READ_4(sc, ALC_MAC_CFG);
3133 	if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
3134 		reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
3135 		CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3136 	}
3137 	for (i = ALC_TIMEOUT; i > 0; i--) {
3138 		reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
3139 		if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC)) == 0)
3140 			break;
3141 		DELAY(10);
3142 	}
3143 	if (i == 0)
3144 		printf("%s: could not disable Rx/Tx MAC(0x%08x)!\n",
3145 		    sc->sc_dev.dv_xname, reg);
3146 }
3147 
3148 void
3149 alc_start_queue(struct alc_softc *sc)
3150 {
3151 	uint32_t qcfg[] = {
3152 		0,
3153 		RXQ_CFG_QUEUE0_ENB,
3154 		RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB,
3155 		RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB,
3156 		RXQ_CFG_ENB
3157 	};
3158 	uint32_t cfg;
3159 
3160 	/* Enable RxQ. */
3161 	cfg = CSR_READ_4(sc, ALC_RXQ_CFG);
3162 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
3163 		cfg &= ~RXQ_CFG_ENB;
3164 		cfg |= qcfg[1];
3165 	} else
3166 		cfg |= RXQ_CFG_QUEUE0_ENB;
3167 
3168 	CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg);
3169 	/* Enable TxQ. */
3170 	cfg = CSR_READ_4(sc, ALC_TXQ_CFG);
3171 	cfg |= TXQ_CFG_ENB;
3172 	CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg);
3173 }
3174 
3175 void
3176 alc_stop_queue(struct alc_softc *sc)
3177 {
3178 	uint32_t reg;
3179 	int i;
3180 
3181 	/* Disable RxQ. */
3182 	reg = CSR_READ_4(sc, ALC_RXQ_CFG);
3183 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
3184 		if ((reg & RXQ_CFG_ENB) != 0) {
3185 			reg &= ~RXQ_CFG_ENB;
3186 			CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
3187 		}
3188 	} else {
3189 		if ((reg & RXQ_CFG_QUEUE0_ENB) != 0) {
3190 			reg &= ~RXQ_CFG_QUEUE0_ENB;
3191 			CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
3192 		}
3193 	}
3194 	/* Disable TxQ. */
3195 	reg = CSR_READ_4(sc, ALC_TXQ_CFG);
3196 	if ((reg & TXQ_CFG_ENB) != 0) {
3197 		reg &= ~TXQ_CFG_ENB;
3198 		CSR_WRITE_4(sc, ALC_TXQ_CFG, reg);
3199 	}
3200 	DELAY(40);
3201 	for (i = ALC_TIMEOUT; i > 0; i--) {
3202 		reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
3203 		if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
3204 			break;
3205 		DELAY(10);
3206 	}
3207 	if (i == 0)
3208 		printf("%s: could not disable RxQ/TxQ (0x%08x)!\n",
3209 		    sc->sc_dev.dv_xname, reg);
3210 }
3211 
3212 void
3213 alc_init_tx_ring(struct alc_softc *sc)
3214 {
3215 	struct alc_ring_data *rd;
3216 	struct alc_txdesc *txd;
3217 	int i;
3218 
3219 	sc->alc_cdata.alc_tx_prod = 0;
3220 	sc->alc_cdata.alc_tx_cons = 0;
3221 	sc->alc_cdata.alc_tx_cnt = 0;
3222 
3223 	rd = &sc->alc_rdata;
3224 	bzero(rd->alc_tx_ring, ALC_TX_RING_SZ);
3225 	for (i = 0; i < ALC_TX_RING_CNT; i++) {
3226 		txd = &sc->alc_cdata.alc_txdesc[i];
3227 		txd->tx_m = NULL;
3228 	}
3229 
3230 	bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
3231 	    sc->alc_cdata.alc_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
3232 }
3233 
3234 int
3235 alc_init_rx_ring(struct alc_softc *sc)
3236 {
3237 	struct alc_ring_data *rd;
3238 	struct alc_rxdesc *rxd;
3239 	int i;
3240 
3241 	sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1;
3242 	rd = &sc->alc_rdata;
3243 	bzero(rd->alc_rx_ring, ALC_RX_RING_SZ);
3244 	for (i = 0; i < ALC_RX_RING_CNT; i++) {
3245 		rxd = &sc->alc_cdata.alc_rxdesc[i];
3246 		rxd->rx_m = NULL;
3247 		rxd->rx_desc = &rd->alc_rx_ring[i];
3248 		if (alc_newbuf(sc, rxd) != 0)
3249 			return (ENOBUFS);
3250 	}
3251 
3252 	/*
3253 	 * Since controller does not update Rx descriptors, driver
3254 	 * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE
3255 	 * is enough to ensure coherence.
3256 	 */
3257 	bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
3258 	    sc->alc_cdata.alc_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
3259 	/* Let controller know availability of new Rx buffers. */
3260 	CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons);
3261 
3262 	return (0);
3263 }
3264 
3265 void
3266 alc_init_rr_ring(struct alc_softc *sc)
3267 {
3268 	struct alc_ring_data *rd;
3269 
3270 	sc->alc_cdata.alc_rr_cons = 0;
3271 	ALC_RXCHAIN_RESET(sc);
3272 
3273 	rd = &sc->alc_rdata;
3274 	bzero(rd->alc_rr_ring, ALC_RR_RING_SZ);
3275 	bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
3276 	    sc->alc_cdata.alc_rr_ring_map->dm_mapsize,
3277 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3278 }
3279 
3280 void
3281 alc_init_cmb(struct alc_softc *sc)
3282 {
3283 	struct alc_ring_data *rd;
3284 
3285 	rd = &sc->alc_rdata;
3286 	bzero(rd->alc_cmb, ALC_CMB_SZ);
3287 	bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
3288 	    sc->alc_cdata.alc_cmb_map->dm_mapsize,
3289 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3290 }
3291 
3292 void
3293 alc_init_smb(struct alc_softc *sc)
3294 {
3295 	struct alc_ring_data *rd;
3296 
3297 	rd = &sc->alc_rdata;
3298 	bzero(rd->alc_smb, ALC_SMB_SZ);
3299 	bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
3300 	    sc->alc_cdata.alc_smb_map->dm_mapsize,
3301 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3302 }
3303 
3304 void
3305 alc_rxvlan(struct alc_softc *sc)
3306 {
3307 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
3308 	uint32_t reg;
3309 
3310 	reg = CSR_READ_4(sc, ALC_MAC_CFG);
3311 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
3312 		reg |= MAC_CFG_VLAN_TAG_STRIP;
3313 	else
3314 		reg &= ~MAC_CFG_VLAN_TAG_STRIP;
3315 	CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3316 }
3317 
3318 void
3319 alc_iff(struct alc_softc *sc)
3320 {
3321 	struct arpcom *ac = &sc->sc_arpcom;
3322 	struct ifnet *ifp = &ac->ac_if;
3323 	struct ether_multi *enm;
3324 	struct ether_multistep step;
3325 	uint32_t crc;
3326 	uint32_t mchash[2];
3327 	uint32_t rxcfg;
3328 
3329 	rxcfg = CSR_READ_4(sc, ALC_MAC_CFG);
3330 	rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
3331 	ifp->if_flags &= ~IFF_ALLMULTI;
3332 
3333 	/*
3334 	 * Always accept broadcast frames.
3335 	 */
3336 	rxcfg |= MAC_CFG_BCAST;
3337 
3338 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
3339 		ifp->if_flags |= IFF_ALLMULTI;
3340 		if (ifp->if_flags & IFF_PROMISC)
3341 			rxcfg |= MAC_CFG_PROMISC;
3342 		else
3343 			rxcfg |= MAC_CFG_ALLMULTI;
3344 		mchash[0] = mchash[1] = 0xFFFFFFFF;
3345 	} else {
3346 		/* Program new filter. */
3347 		bzero(mchash, sizeof(mchash));
3348 
3349 		ETHER_FIRST_MULTI(step, ac, enm);
3350 		while (enm != NULL) {
3351 			crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
3352 
3353 			mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
3354 
3355 			ETHER_NEXT_MULTI(step, enm);
3356 		}
3357 	}
3358 
3359 	CSR_WRITE_4(sc, ALC_MAR0, mchash[0]);
3360 	CSR_WRITE_4(sc, ALC_MAR1, mchash[1]);
3361 	CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg);
3362 }
3363