xref: /openbsd-src/sys/dev/pci/if_alc.c (revision b0f539e9923c93d213bbde92bfd6b7a67cb6927c)
1 /*	$OpenBSD: if_alc.c,v 1.49 2019/05/23 01:57:19 kevlo Exp $	*/
2 /*-
3  * Copyright (c) 2009, Pyun YongHyeon <yongari@FreeBSD.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 /* Driver for Atheros AR813x/AR815x/AR816x/AR817x PCIe Ethernet. */
30 
31 #include "bpfilter.h"
32 #include "vlan.h"
33 
34 #include <sys/param.h>
35 #include <sys/endian.h>
36 #include <sys/systm.h>
37 #include <sys/sockio.h>
38 #include <sys/mbuf.h>
39 #include <sys/queue.h>
40 #include <sys/kernel.h>
41 #include <sys/device.h>
42 #include <sys/timeout.h>
43 #include <sys/socket.h>
44 
45 #include <machine/bus.h>
46 
47 #include <net/if.h>
48 #include <net/if_dl.h>
49 #include <net/if_media.h>
50 
51 #include <netinet/in.h>
52 #include <netinet/if_ether.h>
53 
54 #if NBPFILTER > 0
55 #include <net/bpf.h>
56 #endif
57 
58 #include <dev/mii/mii.h>
59 #include <dev/mii/miivar.h>
60 
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63 #include <dev/pci/pcidevs.h>
64 
65 #include <dev/pci/if_alcreg.h>
66 
67 int	alc_match(struct device *, void *, void *);
68 void	alc_attach(struct device *, struct device *, void *);
69 int	alc_detach(struct device *, int);
70 int	alc_activate(struct device *, int);
71 
72 int	alc_init(struct ifnet *);
73 void	alc_start(struct ifnet *);
74 int	alc_ioctl(struct ifnet *, u_long, caddr_t);
75 void	alc_watchdog(struct ifnet *);
76 int	alc_mediachange(struct ifnet *);
77 void	alc_mediastatus(struct ifnet *, struct ifmediareq *);
78 
79 void	alc_aspm(struct alc_softc *, int, uint64_t);
80 void	alc_aspm_813x(struct alc_softc *, uint64_t);
81 void	alc_aspm_816x(struct alc_softc *, int);
82 void	alc_disable_l0s_l1(struct alc_softc *);
83 int	alc_dma_alloc(struct alc_softc *);
84 void	alc_dma_free(struct alc_softc *);
85 int	alc_encap(struct alc_softc *, struct mbuf *);
86 void	alc_get_macaddr(struct alc_softc *);
87 void	alc_get_macaddr_813x(struct alc_softc *);
88 void	alc_get_macaddr_816x(struct alc_softc *);
89 void	alc_get_macaddr_par(struct alc_softc *);
90 void	alc_init_cmb(struct alc_softc *);
91 void	alc_init_rr_ring(struct alc_softc *);
92 int	alc_init_rx_ring(struct alc_softc *);
93 void	alc_init_smb(struct alc_softc *);
94 void	alc_init_tx_ring(struct alc_softc *);
95 int	alc_intr(void *);
96 void	alc_mac_config(struct alc_softc *);
97 int	alc_mii_readreg_813x(struct device *, int, int);
98 int	alc_mii_readreg_816x(struct device *, int, int);
99 void	alc_mii_writereg_813x(struct device *, int, int, int);
100 void	alc_mii_writereg_816x(struct device *, int, int, int);
101 void	alc_dsp_fixup(struct alc_softc *, int);
102 int	alc_miibus_readreg(struct device *, int, int);
103 void	alc_miibus_statchg(struct device *);
104 void	alc_miibus_writereg(struct device *, int, int, int);
105 int	alc_miidbg_readreg(struct alc_softc *, int);
106 void	alc_miidbg_writereg(struct alc_softc *, int, int);
107 int	alc_miiext_readreg(struct alc_softc *, int, int);
108 void	alc_miiext_writereg(struct alc_softc *, int, int, int);
109 void	alc_phy_reset_813x(struct alc_softc *);
110 void	alc_phy_reset_816x(struct alc_softc *);
111 int	alc_newbuf(struct alc_softc *, struct alc_rxdesc *);
112 void	alc_phy_down(struct alc_softc *);
113 void	alc_phy_reset(struct alc_softc *);
114 void	alc_reset(struct alc_softc *);
115 void	alc_rxeof(struct alc_softc *, struct rx_rdesc *);
116 int	alc_rxintr(struct alc_softc *);
117 void	alc_iff(struct alc_softc *);
118 void	alc_rxvlan(struct alc_softc *);
119 void	alc_start_queue(struct alc_softc *);
120 void	alc_stats_clear(struct alc_softc *);
121 void	alc_stats_update(struct alc_softc *);
122 void	alc_stop(struct alc_softc *);
123 void	alc_stop_mac(struct alc_softc *);
124 void	alc_stop_queue(struct alc_softc *);
125 void	alc_tick(void *);
126 void	alc_txeof(struct alc_softc *);
127 void	alc_init_pcie(struct alc_softc *, int);
128 void	alc_config_msi(struct alc_softc *);
129 int	alc_dma_alloc(struct alc_softc *);
130 void	alc_dma_free(struct alc_softc *);
131 int	alc_encap(struct alc_softc *, struct mbuf *);
132 void	alc_osc_reset(struct alc_softc *);
133 
134 uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0, 0 };
135 
136 const struct pci_matchid alc_devices[] = {
137 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1C },
138 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L2C },
139 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1D },
140 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1D_1 },
141 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L2C_1 },
142 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L2C_2 },
143 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8161 },
144 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8162 },
145 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8171 },
146 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8172 },
147 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_E2200 },
148 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_E2400 },
149 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_E2500 }
150 };
151 
152 struct cfattach alc_ca = {
153 	sizeof (struct alc_softc), alc_match, alc_attach, alc_detach,
154 	alc_activate
155 };
156 
157 struct cfdriver alc_cd = {
158 	NULL, "alc", DV_IFNET
159 };
160 
161 int alcdebug = 0;
162 #define	DPRINTF(x)	do { if (alcdebug) printf x; } while (0)
163 
164 #define ALC_CSUM_FEATURES	(M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)
165 
166 int
167 alc_miibus_readreg(struct device *dev, int phy, int reg)
168 {
169 	struct alc_softc *sc = (struct alc_softc *)dev;
170 	uint32_t v;
171 
172 	if (phy != sc->alc_phyaddr)
173 		return (0);
174 
175 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
176 		v = alc_mii_readreg_816x(dev, phy, reg);
177 	else
178 		v = alc_mii_readreg_813x(dev, phy, reg);
179 
180 	return (v);
181 }
182 
183 int
184 alc_mii_readreg_813x(struct device *dev, int phy, int reg)
185 {
186 	struct alc_softc *sc = (struct alc_softc *)dev;
187 	uint32_t v;
188 	int i;
189 
190 	if (phy != sc->alc_phyaddr)
191 		return (0);
192 
193 	/*
194 	 * For AR8132 fast ethernet controller, do not report 1000baseT
195 	 * capability to mii(4). Even though AR8132 uses the same
196 	 * model/revision number of F1 gigabit PHY, the PHY has no
197 	 * ability to establish 1000baseT link.
198 	 */
199 	if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0 &&
200 	    reg == MII_EXTSR)
201 		return (0);
202 
203 	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
204 	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
205 	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
206 		DELAY(5);
207 		v = CSR_READ_4(sc, ALC_MDIO);
208 		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
209 			break;
210 	}
211 
212 	if (i == 0) {
213 		printf("%s: phy read timeout: phy %d, reg %d\n",
214 		    sc->sc_dev.dv_xname, phy, reg);
215 		return (0);
216 	}
217 
218 	return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
219 }
220 
221 int
222 alc_mii_readreg_816x(struct device *dev, int phy, int reg)
223 {
224 	struct alc_softc *sc = (struct alc_softc *)dev;
225 	uint32_t clk, v;
226 	int i;
227 
228 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
229 		clk = MDIO_CLK_25_128;
230 	else
231 		clk = MDIO_CLK_25_4;
232 	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
233 		MDIO_SUP_PREAMBLE | clk | MDIO_REG_ADDR(reg));
234 	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
235 		DELAY(5);
236 		v = CSR_READ_4(sc, ALC_MDIO);
237 		if ((v & MDIO_OP_BUSY) == 0)
238 			break;
239 	}
240 
241 	return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
242 }
243 
244 void
245 alc_miibus_writereg(struct device *dev, int phy, int reg, int val)
246 {
247 	struct alc_softc *sc = (struct alc_softc *)dev;
248 
249 	if (phy != sc->alc_phyaddr)
250 		return;
251 
252 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
253 		alc_mii_writereg_816x(dev, phy, reg, val);
254 	else
255 		alc_mii_writereg_813x(dev, phy, reg, val);
256 }
257 
258 void
259 alc_mii_writereg_813x(struct device *dev, int phy, int reg, int val)
260 {
261 	struct alc_softc *sc = (struct alc_softc *)dev;
262 	uint32_t v;
263 	int i;
264 
265 	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
266 	    (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
267 	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
268 	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
269 		DELAY(5);
270 		v = CSR_READ_4(sc, ALC_MDIO);
271 		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
272 			break;
273 	}
274 
275 	if (i == 0)
276 		printf("%s: phy write timeout: phy %d, reg %d\n",
277 		    sc->sc_dev.dv_xname, phy, reg);
278 }
279 
280 void
281 alc_mii_writereg_816x(struct device *dev, int phy, int reg, int val)
282 {
283 	struct alc_softc *sc = (struct alc_softc *)dev;
284 	uint32_t clk, v;
285 	int i;
286 
287 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
288 		clk = MDIO_CLK_25_128;
289 	else
290 		clk = MDIO_CLK_25_4;
291 	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
292 	    ((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) | MDIO_REG_ADDR(reg) |
293 	    MDIO_SUP_PREAMBLE | clk);
294 	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
295 		DELAY(5);
296 		v = CSR_READ_4(sc, ALC_MDIO);
297 		if ((v & MDIO_OP_BUSY) == 0)
298 			break;
299 	}
300 
301 	if (i == 0)
302 		printf("%s: phy write timeout: phy %d, reg %d\n",
303 		    sc->sc_dev.dv_xname, phy, reg);
304 }
305 
306 void
307 alc_miibus_statchg(struct device *dev)
308 {
309 	struct alc_softc *sc = (struct alc_softc *)dev;
310 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
311 	struct mii_data *mii = &sc->sc_miibus;
312 	uint32_t reg;
313 
314 	if ((ifp->if_flags & IFF_RUNNING) == 0)
315 		return;
316 
317 	sc->alc_flags &= ~ALC_FLAG_LINK;
318 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
319 	    (IFM_ACTIVE | IFM_AVALID)) {
320 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
321 		case IFM_10_T:
322 		case IFM_100_TX:
323 			sc->alc_flags |= ALC_FLAG_LINK;
324 			break;
325 		case IFM_1000_T:
326 			if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
327 				sc->alc_flags |= ALC_FLAG_LINK;
328 			break;
329 		default:
330 			break;
331 		}
332 	}
333 	alc_stop_queue(sc);
334 	/* Stop Rx/Tx MACs. */
335 	alc_stop_mac(sc);
336 
337 	/* Program MACs with resolved speed/duplex/flow-control. */
338 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
339 		alc_start_queue(sc);
340 		alc_mac_config(sc);
341 		/* Re-enable Tx/Rx MACs. */
342 		reg = CSR_READ_4(sc, ALC_MAC_CFG);
343 		reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
344 		CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
345 	}
346 	alc_aspm(sc, 0, IFM_SUBTYPE(mii->mii_media_active));
347 	alc_dsp_fixup(sc, IFM_SUBTYPE(mii->mii_media_active));
348 }
349 
350 int
351 alc_miidbg_readreg(struct alc_softc *sc, int reg)
352 {
353 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
354 	    reg);
355 	return (alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
356 	    ALC_MII_DBG_DATA));
357 }
358 
359 
360 void
361 alc_miidbg_writereg(struct alc_softc *sc, int reg, int val)
362 {
363 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
364 	    reg);
365 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA,
366 	    val);
367 }
368 
369 int
370 alc_miiext_readreg(struct alc_softc *sc, int devaddr, int reg)
371 {
372 	uint32_t clk, v;
373 	int i;
374 
375 	CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) |
376 	    EXT_MDIO_DEVADDR(devaddr));
377 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
378 		clk = MDIO_CLK_25_128;
379 	else
380 		clk = MDIO_CLK_25_4;
381 	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
382 	    MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT);
383 	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
384 		DELAY(5);
385 		v = CSR_READ_4(sc, ALC_MDIO);
386 		if ((v & MDIO_OP_BUSY) == 0)
387 			break;
388 	}
389 
390 	return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
391 }
392 
393 void
394 alc_miiext_writereg(struct alc_softc *sc, int devaddr, int reg, int val)
395 {
396 	uint32_t clk, v;
397 	int i;
398 
399 	CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) |
400 	    EXT_MDIO_DEVADDR(devaddr));
401 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
402 		clk = MDIO_CLK_25_128;
403 	else
404 		clk = MDIO_CLK_25_4;
405 	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
406 	    ((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) |
407 	    MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT);
408 	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
409 		DELAY(5);
410 		v = CSR_READ_4(sc, ALC_MDIO);
411 		if ((v & MDIO_OP_BUSY) == 0)
412 			break;
413 	}
414 }
415 
416 void
417 alc_dsp_fixup(struct alc_softc *sc, int media)
418 {
419 	uint16_t agc, len, val;
420 
421 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
422 		return;
423 	if (AR816X_REV(sc->alc_rev) >= AR816X_REV_C0)
424 		return;
425 
426 	/*
427 	 * Vendor PHY magic.
428 	 * 1000BT/AZ, wrong cable length
429 	 */
430 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
431 		len = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL6);
432 		len = (len >> EXT_CLDCTL6_CAB_LEN_SHIFT) &
433 			EXT_CLDCTL6_CAB_LEN_MASK;
434 		agc = alc_miidbg_readreg(sc, MII_DBG_AGC);
435 		agc = (agc >> DBG_AGC_2_VGA_SHIFT) & DBG_AGC_2_VGA_MASK;
436 		if ((media == IFM_1000_T && len > EXT_CLDCTL6_CAB_LEN_SHORT1G &&
437 			agc > DBG_AGC_LONG1G_LIMT) ||
438 			(media == IFM_100_TX && len > DBG_AGC_LONG100M_LIMT &&
439 			agc > DBG_AGC_LONG1G_LIMT)) {
440 				alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT,
441 				    DBG_AZ_ANADECT_LONG);
442 				val = alc_miiext_readreg(sc, MII_EXT_ANEG,
443 				    MII_EXT_ANEG_AFE);
444 				val |= ANEG_AFEE_10BT_100M_TH;
445 				alc_miiext_writereg(sc, MII_EXT_ANEG,
446 				    MII_EXT_ANEG_AFE, val);
447 		} else {
448 			alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT,
449 			    DBG_AZ_ANADECT_DEFAULT);
450 			val = alc_miiext_readreg(sc, MII_EXT_ANEG,
451 			    MII_EXT_ANEG_AFE);
452 			val &= ~ANEG_AFEE_10BT_100M_TH;
453 			alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE,
454 			    val);
455 		}
456 		if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 &&
457 		    AR816X_REV(sc->alc_rev) == AR816X_REV_B0) {
458 			if (media == IFM_1000_T) {
459 				/*
460 				 * Giga link threshold, raise the tolerance of
461 				 * noise 50%.
462 				 */
463 				val = alc_miidbg_readreg(sc, MII_DBG_MSE20DB);
464 				val &= ~DBG_MSE20DB_TH_MASK;
465 				val |= (DBG_MSE20DB_TH_HI <<
466 				    DBG_MSE20DB_TH_SHIFT);
467 				alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val);
468 			} else if (media == IFM_100_TX)
469 				alc_miidbg_writereg(sc, MII_DBG_MSE16DB,
470 				    DBG_MSE16DB_UP);
471 		}
472 	} else {
473 		val = alc_miiext_readreg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE);
474 		val &= ~ANEG_AFEE_10BT_100M_TH;
475 		alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, val);
476 		if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 &&
477 		    AR816X_REV(sc->alc_rev) == AR816X_REV_B0) {
478 			alc_miidbg_writereg(sc, MII_DBG_MSE16DB,
479 			    DBG_MSE16DB_DOWN);
480 			val = alc_miidbg_readreg(sc, MII_DBG_MSE20DB);
481 			val &= ~DBG_MSE20DB_TH_MASK;
482 			val |= (DBG_MSE20DB_TH_DEFAULT << DBG_MSE20DB_TH_SHIFT);
483 			alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val);
484 		}
485 	}
486 }
487 
488 void
489 alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
490 {
491 	struct alc_softc *sc = ifp->if_softc;
492 	struct mii_data *mii = &sc->sc_miibus;
493 
494 	if ((ifp->if_flags & IFF_UP) == 0)
495 		return;
496 
497 	mii_pollstat(mii);
498 	ifmr->ifm_status = mii->mii_media_status;
499 	ifmr->ifm_active = mii->mii_media_active;
500 }
501 
502 int
503 alc_mediachange(struct ifnet *ifp)
504 {
505 	struct alc_softc *sc = ifp->if_softc;
506 	struct mii_data *mii = &sc->sc_miibus;
507 	int error;
508 
509 	if (mii->mii_instance != 0) {
510 		struct mii_softc *miisc;
511 
512 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
513 			mii_phy_reset(miisc);
514 	}
515 	error = mii_mediachg(mii);
516 
517 	return (error);
518 }
519 
520 int
521 alc_match(struct device *dev, void *match, void *aux)
522 {
523 	return pci_matchbyid((struct pci_attach_args *)aux, alc_devices,
524 	    nitems(alc_devices));
525 }
526 
527 void
528 alc_get_macaddr(struct alc_softc *sc)
529 {
530 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
531 		alc_get_macaddr_816x(sc);
532 	else
533 		alc_get_macaddr_813x(sc);
534 }
535 
536 void
537 alc_get_macaddr_813x(struct alc_softc *sc)
538 {
539 	uint32_t opt;
540 	uint16_t val;
541 	int eeprom, i;
542 
543 	eeprom = 0;
544 	opt = CSR_READ_4(sc, ALC_OPT_CFG);
545 	if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_OTP_SEL) != 0 &&
546 	    (CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) {
547 		/*
548 		 * EEPROM found, let TWSI reload EEPROM configuration.
549 		 * This will set ethernet address of controller.
550 		 */
551 		eeprom++;
552 		switch (sc->sc_product) {
553 		case PCI_PRODUCT_ATTANSIC_L1C:
554 		case PCI_PRODUCT_ATTANSIC_L2C:
555 			if ((opt & OPT_CFG_CLK_ENB) == 0) {
556 				opt |= OPT_CFG_CLK_ENB;
557 				CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
558 				CSR_READ_4(sc, ALC_OPT_CFG);
559 				DELAY(1000);
560 			}
561 			break;
562 		case PCI_PRODUCT_ATTANSIC_L1D:
563 		case PCI_PRODUCT_ATTANSIC_L1D_1:
564 		case PCI_PRODUCT_ATTANSIC_L2C_1:
565 		case PCI_PRODUCT_ATTANSIC_L2C_2:
566 			alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
567 			    ALC_MII_DBG_ADDR, 0x00);
568 			val = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
569 			    ALC_MII_DBG_DATA);
570 			alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
571 			    ALC_MII_DBG_DATA, val & 0xFF7F);
572 			alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
573 			    ALC_MII_DBG_ADDR, 0x3B);
574 			val = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
575 			    ALC_MII_DBG_DATA);
576 			alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
577 			    ALC_MII_DBG_DATA, val | 0x0008);
578 			DELAY(20);
579 			break;
580 		}
581 
582 		CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
583 		    CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
584 		CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
585 		CSR_READ_4(sc, ALC_WOL_CFG);
586 
587 		CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) |
588 		    TWSI_CFG_SW_LD_START);
589 		for (i = 100; i > 0; i--) {
590 			DELAY(1000);
591 			if ((CSR_READ_4(sc, ALC_TWSI_CFG) &
592 			    TWSI_CFG_SW_LD_START) == 0)
593 				break;
594 		}
595 		if (i == 0)
596 			printf("%s: reloading EEPROM timeout!\n",
597 			    sc->sc_dev.dv_xname);
598 	} else {
599 		if (alcdebug)
600 			printf("%s: EEPROM not found!\n", sc->sc_dev.dv_xname);
601 	}
602 	if (eeprom != 0) {
603 		switch (sc->sc_product) {
604 		case PCI_PRODUCT_ATTANSIC_L1C:
605 		case PCI_PRODUCT_ATTANSIC_L2C:
606 			if ((opt & OPT_CFG_CLK_ENB) != 0) {
607 				opt &= ~OPT_CFG_CLK_ENB;
608 				CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
609 				CSR_READ_4(sc, ALC_OPT_CFG);
610 				DELAY(1000);
611 			}
612 			break;
613 		case PCI_PRODUCT_ATTANSIC_L1D:
614 		case PCI_PRODUCT_ATTANSIC_L1D_1:
615 		case PCI_PRODUCT_ATTANSIC_L2C_1:
616 		case PCI_PRODUCT_ATTANSIC_L2C_2:
617 			alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
618 			    ALC_MII_DBG_ADDR, 0x00);
619 			val = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
620 			    ALC_MII_DBG_DATA);
621 			alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
622 			    ALC_MII_DBG_DATA, val | 0x0080);
623 			alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
624 			    ALC_MII_DBG_ADDR, 0x3B);
625 			val = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
626 			    ALC_MII_DBG_DATA);
627 			alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
628 			    ALC_MII_DBG_DATA, val & 0xFFF7);
629 			DELAY(20);
630 			break;
631 		}
632 	}
633 
634 	alc_get_macaddr_par(sc);
635 }
636 
637 void
638 alc_get_macaddr_816x(struct alc_softc *sc)
639 {
640 	uint32_t reg;
641 	int i, reloaded;
642 
643 	reloaded = 0;
644 	/* Try to reload station address via TWSI. */
645 	for (i = 100; i > 0; i--) {
646 		reg = CSR_READ_4(sc, ALC_SLD);
647 		if ((reg & (SLD_PROGRESS | SLD_START)) == 0)
648 			break;
649 		DELAY(1000);
650 	}
651 	if (i != 0) {
652 		CSR_WRITE_4(sc, ALC_SLD, reg | SLD_START);
653 		for (i = 100; i > 0; i--) {
654 			DELAY(1000);
655 			reg = CSR_READ_4(sc, ALC_SLD);
656 			if ((reg & SLD_START) == 0)
657 				break;
658 		}
659 	}
660 
661 	/* Try to reload station address from EEPROM or FLASH. */
662 	if (reloaded == 0) {
663 		reg = CSR_READ_4(sc, ALC_EEPROM_LD);
664 		if ((reg & (EEPROM_LD_EEPROM_EXIST |
665 		    EEPROM_LD_FLASH_EXIST)) != 0) {
666 			for (i = 100; i > 0; i--) {
667 				reg = CSR_READ_4(sc, ALC_EEPROM_LD);
668 				if ((reg & (EEPROM_LD_PROGRESS |
669 				    EEPROM_LD_START)) == 0)
670 					break;
671 				DELAY(1000);
672 			}
673 			if (i != 0) {
674 				CSR_WRITE_4(sc, ALC_EEPROM_LD, reg |
675 				    EEPROM_LD_START);
676 				for (i = 100; i > 0; i--) {
677 					DELAY(1000);
678 					reg = CSR_READ_4(sc, ALC_EEPROM_LD);
679 					if ((reg & EEPROM_LD_START) == 0)
680 						break;
681 				}
682 			}
683 		}
684 	}
685 
686 	alc_get_macaddr_par(sc);
687 }
688 
689 void
690 alc_get_macaddr_par(struct alc_softc *sc)
691 {
692 	uint32_t ea[2];
693 
694 	ea[0] = CSR_READ_4(sc, ALC_PAR0);
695 	ea[1] = CSR_READ_4(sc, ALC_PAR1);
696 	sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF;
697 	sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF;
698 	sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF;
699 	sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF;
700 	sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF;
701 	sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF;
702 }
703 
704 void
705 alc_disable_l0s_l1(struct alc_softc *sc)
706 {
707 	uint32_t pmcfg;
708 
709 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
710 		/* Another magic from vendor. */
711 		pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
712 		pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 |
713 		    PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB |
714 		    PM_CFG_MAC_ASPM_CHK | PM_CFG_SERDES_PD_EX_L1);
715 		pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB |
716 		    PM_CFG_SERDES_PLL_L1_ENB | PM_CFG_SERDES_L1_ENB;
717 		CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
718 	}
719 }
720 
721 void
722 alc_phy_reset(struct alc_softc *sc)
723 {
724 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
725 		alc_phy_reset_816x(sc);
726 	else
727 		alc_phy_reset_813x(sc);
728 }
729 
730 void
731 alc_phy_reset_813x(struct alc_softc *sc)
732 {
733 	uint16_t data;
734 
735 	/* Reset magic from Linux. */
736 	CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_SEL_ANA_RESET);
737 	CSR_READ_2(sc, ALC_GPHY_CFG);
738 	DELAY(10 * 1000);
739 
740 	CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET |
741 	    GPHY_CFG_SEL_ANA_RESET);
742 	CSR_READ_2(sc, ALC_GPHY_CFG);
743 	DELAY(10 * 1000);
744 
745 	/* DSP fixup, Vendor magic. */
746 	if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1) {
747 		alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
748 		    ALC_MII_DBG_ADDR, 0x000A);
749 		data = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
750 		    ALC_MII_DBG_DATA);
751 		alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
752 		    ALC_MII_DBG_DATA, data & 0xDFFF);
753 	}
754 	if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D ||
755 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 ||
756 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1 ||
757 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2) {
758 		alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
759 		    ALC_MII_DBG_ADDR, 0x003B);
760 		data = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
761 		    ALC_MII_DBG_DATA);
762 		alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
763 		    ALC_MII_DBG_DATA, data & 0xFFF7);
764 		DELAY(20 * 1000);
765 	}
766 	if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D) {
767 		alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
768 		    ALC_MII_DBG_ADDR, 0x0029);
769 		alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
770 		    ALC_MII_DBG_DATA, 0x929D);
771 	}
772 	if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1C ||
773 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C ||
774 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 ||
775 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2) {
776 		alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
777 		    ALC_MII_DBG_ADDR, 0x0029);
778 		alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
779 		    ALC_MII_DBG_DATA, 0xB6DD);
780 	}
781 
782 	/* Load DSP codes, vendor magic. */
783 	data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE |
784 	    ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK);
785 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
786 	    ALC_MII_DBG_ADDR, MII_ANA_CFG18);
787 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
788 	    ALC_MII_DBG_DATA, data);
789 
790 	data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) |
791 	    ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL |
792 	    ANA_SERDES_EN_LCKDT;
793 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
794 	    ALC_MII_DBG_ADDR, MII_ANA_CFG5);
795 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
796 	    ALC_MII_DBG_DATA, data);
797 
798 	data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) &
799 	    ANA_LONG_CABLE_TH_100_MASK) |
800 	    ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) &
801 	    ANA_SHORT_CABLE_TH_100_SHIFT) |
802 	    ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW;
803 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
804 	    ALC_MII_DBG_ADDR, MII_ANA_CFG54);
805 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
806 	    ALC_MII_DBG_DATA, data);
807 
808 	data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) |
809 	    ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) |
810 	    ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) |
811 	    ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK);
812 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
813 	    ALC_MII_DBG_ADDR, MII_ANA_CFG4);
814 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
815 	    ALC_MII_DBG_DATA, data);
816 
817 	data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) |
818 	    ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB |
819 	    ANA_OEN_125M;
820 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
821 	    ALC_MII_DBG_ADDR, MII_ANA_CFG0);
822 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
823 	    ALC_MII_DBG_DATA, data);
824 	DELAY(1000);
825 
826 	/* Disable hibernation. */
827 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
828 	    0x0029);
829 	data = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
830 	    ALC_MII_DBG_DATA);
831 	data &= ~0x8000;
832 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA,
833 	    data);
834 
835 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
836 	    0x000B);
837 	data = alc_miibus_readreg(&sc->sc_dev, sc->alc_phyaddr,
838 	    ALC_MII_DBG_DATA);
839 	data &= ~0x8000;
840 	alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA,
841 	    data);
842 }
843 
844 void
845 alc_phy_reset_816x(struct alc_softc *sc)
846 {
847 	uint32_t val;
848 
849 	val = CSR_READ_4(sc, ALC_GPHY_CFG);
850 	val &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE |
851 	    GPHY_CFG_GATE_25M_ENB | GPHY_CFG_PHY_IDDQ | GPHY_CFG_PHY_PLL_ON |
852 	    GPHY_CFG_PWDOWN_HW | GPHY_CFG_100AB_ENB);
853 	val |= GPHY_CFG_SEL_ANA_RESET;
854 	/* Disable PHY hibernation. */
855 	val &= ~(GPHY_CFG_HIB_PULSE | GPHY_CFG_HIB_EN);
856 	CSR_WRITE_4(sc, ALC_GPHY_CFG, val);
857 	DELAY(10);
858 	CSR_WRITE_4(sc, ALC_GPHY_CFG, val | GPHY_CFG_EXT_RESET);
859 	DELAY(800);
860 	/* Vendor PHY magic. */
861 	/* Disable PHY hibernation. */
862 	alc_miidbg_writereg(sc, MII_DBG_LEGCYPS,
863 	    DBG_LEGCYPS_DEFAULT & ~DBG_LEGCYPS_ENB);
864 	alc_miidbg_writereg(sc, MII_DBG_HIBNEG, DBG_HIBNEG_DEFAULT &
865 	    ~(DBG_HIBNEG_PSHIB_EN | DBG_HIBNEG_HIB_PULSE));
866 	alc_miidbg_writereg(sc, MII_DBG_GREENCFG, DBG_GREENCFG_DEFAULT);
867 	/* XXX Disable EEE. */
868 	val = CSR_READ_4(sc, ALC_LPI_CTL);
869 	val &= ~LPI_CTL_ENB;
870 	CSR_WRITE_4(sc, ALC_LPI_CTL, val);
871 	alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_LOCAL_EEEADV, 0);
872 	/* PHY power saving. */
873 	alc_miidbg_writereg(sc, MII_DBG_TST10BTCFG, DBG_TST10BTCFG_DEFAULT);
874 	alc_miidbg_writereg(sc, MII_DBG_SRDSYSMOD, DBG_SRDSYSMOD_DEFAULT);
875 	alc_miidbg_writereg(sc, MII_DBG_TST100BTCFG, DBG_TST100BTCFG_DEFAULT);
876 	alc_miidbg_writereg(sc, MII_DBG_ANACTL, DBG_ANACTL_DEFAULT);
877 	val = alc_miidbg_readreg(sc, MII_DBG_GREENCFG2);
878 	val &= ~DBG_GREENCFG2_GATE_DFSE_EN;
879 	alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, val);
880 	/* RTL8139C, 120m issue. */
881 	alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_NLP78,
882 	    ANEG_NLP78_120M_DEFAULT);
883 	alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_S3DIG10,
884 	    ANEG_S3DIG10_DEFAULT);
885 	if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0) {
886 		/* Turn off half amplitude. */
887 		val = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3);
888 		val |= EXT_CLDCTL3_BP_CABLE1TH_DET_GT;
889 		alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3, val);
890 		/* Turn off Green feature. */
891 		val = alc_miidbg_readreg(sc, MII_DBG_GREENCFG2);
892 		val |= DBG_GREENCFG2_BP_GREEN;
893 		alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, val);
894 		/* Turn off half bias. */
895 		val = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5);
896 		val |= EXT_CLDCTL5_BP_VD_HLFBIAS;
897 		alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5, val);
898 	}
899 }
900 
901 void
902 alc_phy_down(struct alc_softc *sc)
903 {
904 	uint32_t gphy;
905 
906 	switch (sc->sc_product) {
907 	case PCI_PRODUCT_ATTANSIC_AR8161:
908 	case PCI_PRODUCT_ATTANSIC_E2200:
909 	case PCI_PRODUCT_ATTANSIC_E2400:
910 	case PCI_PRODUCT_ATTANSIC_E2500:
911 	case PCI_PRODUCT_ATTANSIC_AR8162:
912 	case PCI_PRODUCT_ATTANSIC_AR8171:
913 	case PCI_PRODUCT_ATTANSIC_AR8172:
914 		gphy = CSR_READ_4(sc, ALC_GPHY_CFG);
915 		gphy &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE |
916 		    GPHY_CFG_100AB_ENB | GPHY_CFG_PHY_PLL_ON);
917 		gphy |= GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
918 		    GPHY_CFG_SEL_ANA_RESET;
919 		gphy |= GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW;
920 		CSR_WRITE_4(sc, ALC_GPHY_CFG, gphy);
921 		break;
922 	case PCI_PRODUCT_ATTANSIC_L1D:
923 	case PCI_PRODUCT_ATTANSIC_L1D_1:
924 	case PCI_PRODUCT_ATTANSIC_L2C_1:
925 	case PCI_PRODUCT_ATTANSIC_L2C_2:
926 		/*
927 		 * GPHY power down caused more problems on AR8151 v2.0.
928 		 * When driver is reloaded after GPHY power down,
929 		 * accesses to PHY/MAC registers hung the system. Only
930 		 * cold boot recovered from it.  I'm not sure whether
931 		 * AR8151 v1.0 also requires this one though.  I don't
932 		 * have AR8151 v1.0 controller in hand.
933 		 * The only option left is to isolate the PHY and
934 		 * initiates power down the PHY which in turn saves
935 		 * more power when driver is unloaded.
936 		 */
937 		alc_miibus_writereg(&sc->sc_dev, sc->alc_phyaddr,
938 		    MII_BMCR, BMCR_ISO | BMCR_PDOWN);
939 		break;
940 	default:
941 		/* Force PHY down. */
942 		CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET |
943 		    GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ |
944 		    GPHY_CFG_PWDOWN_HW);
945 		DELAY(1000);
946 		break;
947 	}
948 }
949 
950 void
951 alc_aspm(struct alc_softc *sc, int init, uint64_t media)
952 {
953 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
954 		alc_aspm_816x(sc, init);
955 	else
956 		alc_aspm_813x(sc, media);
957 }
958 
959 void
960 alc_aspm_813x(struct alc_softc *sc, uint64_t media)
961 {
962 	uint32_t pmcfg;
963 	uint16_t linkcfg;
964 
965 	pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
966 	if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) ==
967 	    (ALC_FLAG_APS | ALC_FLAG_PCIE))
968 		linkcfg = CSR_READ_2(sc, sc->alc_expcap + PCI_PCIE_LCSR);
969 	else
970 		linkcfg = 0;
971 	pmcfg &= ~PM_CFG_SERDES_PD_EX_L1;
972 	pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_LCKDET_TIMER_MASK);
973 	pmcfg |= PM_CFG_MAC_ASPM_CHK;
974 	pmcfg |= (PM_CFG_LCKDET_TIMER_DEFAULT << PM_CFG_LCKDET_TIMER_SHIFT);
975 	pmcfg &= ~(PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
976 
977 	if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
978 		/* Disable extended sync except AR8152 B v1.0 */
979 		linkcfg &= ~0x80;
980 		if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1 &&
981 		    sc->alc_rev == ATHEROS_AR8152_B_V10)
982 			linkcfg |= 0x80;
983 		CSR_WRITE_2(sc, sc->alc_expcap + PCI_PCIE_LCSR, linkcfg);
984 		pmcfg &= ~(PM_CFG_EN_BUFS_RX_L0S | PM_CFG_SA_DLY_ENB |
985 		    PM_CFG_HOTRST);
986 		pmcfg |= (PM_CFG_L1_ENTRY_TIMER_DEFAULT <<
987 		    PM_CFG_L1_ENTRY_TIMER_SHIFT);
988 		pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK;
989 		pmcfg |= (PM_CFG_PM_REQ_TIMER_DEFAULT <<
990 		    PM_CFG_PM_REQ_TIMER_SHIFT);
991 		pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_PCIE_RECV;
992 	}
993 
994 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
995 		if ((sc->alc_flags & ALC_FLAG_L0S) != 0)
996 			pmcfg |= PM_CFG_ASPM_L0S_ENB;
997 		if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
998 			pmcfg |= PM_CFG_ASPM_L1_ENB;
999 		if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
1000 			if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1)
1001 				pmcfg &= ~PM_CFG_ASPM_L0S_ENB;
1002 			pmcfg &= ~(PM_CFG_SERDES_L1_ENB |
1003 			    PM_CFG_SERDES_PLL_L1_ENB |
1004 			    PM_CFG_SERDES_BUDS_RX_L1_ENB);
1005 			pmcfg |= PM_CFG_CLK_SWH_L1;
1006 			if (media == IFM_100_TX || media == IFM_1000_T) {
1007 				pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK;
1008 				switch (sc->sc_product) {
1009 				case PCI_PRODUCT_ATTANSIC_L2C_1:
1010 					pmcfg |= (7 <<
1011 					    PM_CFG_L1_ENTRY_TIMER_SHIFT);
1012 					break;
1013 				case PCI_PRODUCT_ATTANSIC_L1D_1:
1014 				case PCI_PRODUCT_ATTANSIC_L2C_2:
1015 					pmcfg |= (4 <<
1016 					    PM_CFG_L1_ENTRY_TIMER_SHIFT);
1017 					break;
1018 				default:
1019 					pmcfg |= (15 <<
1020 					    PM_CFG_L1_ENTRY_TIMER_SHIFT);
1021 					break;
1022 				}
1023 			}
1024 		} else {
1025 			pmcfg |= PM_CFG_SERDES_L1_ENB |
1026 			    PM_CFG_SERDES_PLL_L1_ENB |
1027 			    PM_CFG_SERDES_BUDS_RX_L1_ENB;
1028 			pmcfg &= ~(PM_CFG_CLK_SWH_L1 |
1029 			    PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
1030 		}
1031 	} else {
1032 		pmcfg &= ~(PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_L1_ENB |
1033 		    PM_CFG_SERDES_PLL_L1_ENB);
1034 		pmcfg |= PM_CFG_CLK_SWH_L1;
1035 		if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
1036 			pmcfg |= PM_CFG_ASPM_L1_ENB;
1037 	}
1038 	CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
1039 }
1040 
1041 void
1042 alc_aspm_816x(struct alc_softc *sc, int init)
1043 {
1044 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1045 	uint32_t pmcfg;
1046 
1047 	pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
1048 	pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_816X_MASK;
1049 	pmcfg |= PM_CFG_L1_ENTRY_TIMER_816X_DEFAULT;
1050 	pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK;
1051 	pmcfg |= PM_CFG_PM_REQ_TIMER_816X_DEFAULT;
1052 	pmcfg &= ~PM_CFG_LCKDET_TIMER_MASK;
1053 	pmcfg |= PM_CFG_LCKDET_TIMER_DEFAULT;
1054 	pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_CLK_SWH_L1 | PM_CFG_PCIE_RECV;
1055 	pmcfg &= ~(PM_CFG_RX_L1_AFTER_L0S | PM_CFG_TX_L1_AFTER_L0S |
1056 	    PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB |
1057 	    PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB |
1058 	    PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SA_DLY_ENB |
1059 	    PM_CFG_MAC_ASPM_CHK | PM_CFG_HOTRST);
1060 	if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
1061 	    (sc->alc_rev & 0x01) != 0)
1062 		pmcfg |= PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB;
1063 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
1064 		/* Link up, enable both L0s, L1s. */
1065 		pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB |
1066 		    PM_CFG_MAC_ASPM_CHK;
1067 	} else {
1068 		if (init != 0)
1069 			pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB |
1070 			    PM_CFG_MAC_ASPM_CHK;
1071 		else if ((ifp->if_flags & IFF_RUNNING) != 0)
1072 			pmcfg |= PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK;
1073 	}
1074 	CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
1075 }
1076 
1077 void
1078 alc_init_pcie(struct alc_softc *sc, int base)
1079 {
1080 	const char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" };
1081 	uint32_t cap, ctl, val;
1082 	int state;
1083 
1084 	/* Clear data link and flow-control protocol error. */
1085 	val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV);
1086 	val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP);
1087 	CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val);
1088 
1089 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
1090 		CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
1091 		    CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
1092 		CSR_WRITE_4(sc, ALC_PCIE_PHYMISC,
1093 		    CSR_READ_4(sc, ALC_PCIE_PHYMISC) |
1094 		    PCIE_PHYMISC_FORCE_RCV_DET);
1095 		if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1 &&
1096 		    sc->alc_rev == ATHEROS_AR8152_B_V10) {
1097 			val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2);
1098 			val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK |
1099 			    PCIE_PHYMISC2_SERDES_TH_MASK);
1100 			val |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT;
1101 			val |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
1102 			CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val);
1103 		}
1104 		/* Disable ASPM L0S and L1. */
1105 		cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1106 		    base + PCI_PCIE_LCAP) >> 16;
1107 		if ((cap & 0x00000c00) != 0) {
1108 			ctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1109 			    base + PCI_PCIE_LCSR) >> 16;
1110 			if ((ctl & 0x08) != 0)
1111 				sc->alc_rcb = DMA_CFG_RCB_128;
1112 			if (alcdebug)
1113 				printf("%s: RCB %u bytes\n",
1114 				    sc->sc_dev.dv_xname,
1115 				    sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128);
1116 			state = ctl & 0x03;
1117 			if (state & 0x01)
1118 				sc->alc_flags |= ALC_FLAG_L0S;
1119 			if (state & 0x02)
1120 				sc->alc_flags |= ALC_FLAG_L1S;
1121 			if (alcdebug)
1122 				printf("%s: ASPM %s %s\n",
1123 				    sc->sc_dev.dv_xname,
1124 				    aspm_state[state],
1125 				    state == 0 ? "disabled" : "enabled");
1126 			alc_disable_l0s_l1(sc);
1127 		}
1128 	} else {
1129 		val = CSR_READ_4(sc, ALC_PDLL_TRNS1);
1130 		val &= ~PDLL_TRNS1_D3PLLOFF_ENB;
1131 		CSR_WRITE_4(sc, ALC_PDLL_TRNS1, val);
1132 		val = CSR_READ_4(sc, ALC_MASTER_CFG);
1133 		if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
1134 		    (sc->alc_rev & 0x01) != 0) {
1135 			if ((val & MASTER_WAKEN_25M) == 0 ||
1136 			    (val & MASTER_CLK_SEL_DIS) == 0) {
1137 				val |= MASTER_WAKEN_25M | MASTER_CLK_SEL_DIS;
1138 				CSR_WRITE_4(sc, ALC_MASTER_CFG, val);
1139 			}
1140 		} else {
1141 			if ((val & MASTER_WAKEN_25M) == 0 ||
1142 			    (val & MASTER_CLK_SEL_DIS) != 0) {
1143 				val |= MASTER_WAKEN_25M;
1144 				val &= ~MASTER_CLK_SEL_DIS;
1145 				CSR_WRITE_4(sc, ALC_MASTER_CFG, val);
1146 			}
1147 		}
1148 	}
1149 }
1150 
1151 void
1152 alc_config_msi(struct alc_softc *sc)
1153 {
1154 	uint32_t ctl, mod;
1155 
1156 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
1157 		/*
1158 		 * It seems interrupt moderation is controlled by
1159 		 * ALC_MSI_RETRANS_TIMER register if MSI/MSIX is active.
1160 		 * Driver uses RX interrupt moderation parameter to
1161 		 * program ALC_MSI_RETRANS_TIMER register.
1162 		 */
1163 		ctl = CSR_READ_4(sc, ALC_MSI_RETRANS_TIMER);
1164 		ctl &= ~MSI_RETRANS_TIMER_MASK;
1165 		ctl &= ~MSI_RETRANS_MASK_SEL_LINE;
1166 		mod = ALC_USECS(sc->alc_int_rx_mod);
1167 		if (mod == 0)
1168 			mod = 1;
1169 		ctl |= mod;
1170 		if ((sc->alc_flags & ALC_FLAG_MSI) != 0)
1171 			CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, ctl |
1172 			    MSI_RETRANS_MASK_SEL_LINE);
1173 		else
1174 			CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, 0);
1175 	}
1176 }
1177 
1178 void
1179 alc_attach(struct device *parent, struct device *self, void *aux)
1180 {
1181 	struct alc_softc *sc = (struct alc_softc *)self;
1182 	struct pci_attach_args *pa = aux;
1183 	pci_chipset_tag_t pc = pa->pa_pc;
1184 	pci_intr_handle_t ih;
1185 	const char *intrstr;
1186 	struct ifnet *ifp;
1187 	pcireg_t memtype;
1188 	uint16_t burst;
1189 	int base, error = 0;
1190 
1191 	/* Set PHY address. */
1192 	sc->alc_phyaddr = ALC_PHY_ADDR;
1193 
1194 	/* Get PCI and chip id/revision. */
1195 	sc->sc_product = PCI_PRODUCT(pa->pa_id);
1196 	sc->alc_rev = PCI_REVISION(pa->pa_class);
1197 
1198 	/*
1199 	 * One odd thing is AR8132 uses the same PHY hardware(F1
1200 	 * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports
1201 	 * the PHY supports 1000Mbps but that's not true. The PHY
1202 	 * used in AR8132 can't establish gigabit link even if it
1203 	 * shows the same PHY model/revision number of AR8131.
1204 	 */
1205 	switch (sc->sc_product) {
1206 	case PCI_PRODUCT_ATTANSIC_E2200:
1207 	case PCI_PRODUCT_ATTANSIC_E2400:
1208 	case PCI_PRODUCT_ATTANSIC_E2500:
1209 		sc->alc_flags |= ALC_FLAG_E2X00;
1210 		/* FALLTHROUGH */
1211 	case PCI_PRODUCT_ATTANSIC_AR8161:
1212 		if (AR816X_REV(sc->alc_rev) == 0)
1213 			sc->alc_flags |= ALC_FLAG_LINK_WAR;
1214 		/* FALLTHROUGH */
1215 	case PCI_PRODUCT_ATTANSIC_AR8171:
1216 		sc->alc_flags |= ALC_FLAG_AR816X_FAMILY;
1217 		break;
1218 	case PCI_PRODUCT_ATTANSIC_AR8162:
1219 	case PCI_PRODUCT_ATTANSIC_AR8172:
1220 		sc->alc_flags |= ALC_FLAG_FASTETHER | ALC_FLAG_AR816X_FAMILY;
1221 		break;
1222 	case PCI_PRODUCT_ATTANSIC_L2C_1:
1223 	case PCI_PRODUCT_ATTANSIC_L2C_2:
1224 		sc->alc_flags |= ALC_FLAG_APS;
1225 		/* FALLTHROUGH */
1226 	case PCI_PRODUCT_ATTANSIC_L2C:
1227 		sc->alc_flags |= ALC_FLAG_FASTETHER;
1228 		break;
1229 	case PCI_PRODUCT_ATTANSIC_L1D:
1230 	case PCI_PRODUCT_ATTANSIC_L1D_1:
1231 		sc->alc_flags |= ALC_FLAG_APS;
1232 		/* FALLTHROUGH */
1233 	default:
1234 		break;
1235 	}
1236 	sc->alc_flags |= ALC_FLAG_JUMBO;
1237 
1238 	/*
1239 	 * Allocate IO memory
1240 	 */
1241 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ALC_PCIR_BAR);
1242 	if (pci_mapreg_map(pa, ALC_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
1243 	    &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) {
1244 		printf(": can't map mem space\n");
1245 		return;
1246 	}
1247 
1248 	sc->alc_flags |= ALC_FLAG_MSI;
1249 	if (pci_intr_map_msi(pa, &ih) != 0) {
1250 		if (pci_intr_map(pa, &ih) != 0) {
1251 			printf(": can't map interrupt\n");
1252 			goto fail;
1253 		}
1254 		sc->alc_flags &= ~ALC_FLAG_MSI;
1255 	}
1256 
1257 	/*
1258 	 * Allocate IRQ
1259 	 */
1260 	intrstr = pci_intr_string(pc, ih);
1261 	sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, alc_intr, sc,
1262 	    sc->sc_dev.dv_xname);
1263 	if (sc->sc_irq_handle == NULL) {
1264 		printf(": could not establish interrupt");
1265 		if (intrstr != NULL)
1266 			printf(" at %s", intrstr);
1267 		printf("\n");
1268 		goto fail;
1269 	}
1270 	printf(": %s", intrstr);
1271 
1272 	alc_config_msi(sc);
1273 
1274 	sc->sc_dmat = pa->pa_dmat;
1275 	sc->sc_pct = pa->pa_pc;
1276 	sc->sc_pcitag = pa->pa_tag;
1277 
1278 	switch (sc->sc_product) {
1279 	case PCI_PRODUCT_ATTANSIC_L1D:
1280 	case PCI_PRODUCT_ATTANSIC_L1D_1:
1281 	case PCI_PRODUCT_ATTANSIC_L2C_1:
1282 	case PCI_PRODUCT_ATTANSIC_L2C_2:
1283 		sc->alc_max_framelen = 6 * 1024;
1284 		break;
1285 	default:
1286 		sc->alc_max_framelen = 9 * 1024;
1287 		break;
1288 	}
1289 
1290 	/*
1291 	 * It seems that AR813x/AR815x has silicon bug for SMB. In
1292 	 * addition, Atheros said that enabling SMB wouldn't improve
1293 	 * performance. However I think it's bad to access lots of
1294 	 * registers to extract MAC statistics.
1295 	 */
1296 	sc->alc_flags |= ALC_FLAG_SMB_BUG;
1297 	/*
1298 	 * Don't use Tx CMB. It is known to have silicon bug.
1299 	 */
1300 	sc->alc_flags |= ALC_FLAG_CMB_BUG;
1301 	sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >>
1302 	    MASTER_CHIP_REV_SHIFT;
1303 	if (alcdebug) {
1304 		printf("%s: PCI device revision : 0x%04x\n",
1305 		    sc->sc_dev.dv_xname, sc->alc_rev);
1306 		printf("%s: Chip id/revision : 0x%04x\n",
1307 		    sc->sc_dev.dv_xname, sc->alc_chip_rev);
1308 		printf("%s: %u Tx FIFO, %u Rx FIFO\n", sc->sc_dev.dv_xname,
1309 		    CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8,
1310 		    CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8);
1311 	}
1312 
1313 	/* Initialize DMA parameters. */
1314 	sc->alc_dma_rd_burst = 0;
1315 	sc->alc_dma_wr_burst = 0;
1316 	sc->alc_rcb = DMA_CFG_RCB_64;
1317 	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
1318 	    &base, NULL)) {
1319 		sc->alc_flags |= ALC_FLAG_PCIE;
1320 		sc->alc_expcap = base;
1321 		burst = CSR_READ_2(sc, base + PCI_PCIE_DCSR);
1322 		sc->alc_dma_rd_burst = (burst & 0x7000) >> 12;
1323 		sc->alc_dma_wr_burst = (burst & 0x00e0) >> 5;
1324 		if (alcdebug) {
1325 			printf("%s: Read request size : %u bytes.\n",
1326 			    sc->sc_dev.dv_xname,
1327 			    alc_dma_burst[sc->alc_dma_rd_burst]);
1328 			printf("%s: TLP payload size : %u bytes.\n",
1329 			    sc->sc_dev.dv_xname,
1330 			    alc_dma_burst[sc->alc_dma_wr_burst]);
1331 		}
1332 		if (alc_dma_burst[sc->alc_dma_rd_burst] > 1024)
1333 			sc->alc_dma_rd_burst = 3;
1334 		if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024)
1335 			sc->alc_dma_wr_burst = 3;
1336 		/*
1337 		 * Force maximum payload size to 128 bytes for
1338 		 * E2200/E2400/E2500.
1339 		 * Otherwise it triggers DMA write error.
1340 		 */
1341 		if ((sc->alc_flags & ALC_FLAG_E2X00) != 0)
1342 			sc->alc_dma_wr_burst = 0;
1343 		alc_init_pcie(sc, base);
1344 	}
1345 
1346 	/* Reset PHY. */
1347 	alc_phy_reset(sc);
1348 
1349 	/* Reset the ethernet controller. */
1350 	alc_stop_mac(sc);
1351 	alc_reset(sc);
1352 
1353 	error = alc_dma_alloc(sc);
1354 	if (error)
1355 		goto fail;
1356 
1357 	/* Load station address. */
1358 	alc_get_macaddr(sc);
1359 
1360 	ifp = &sc->sc_arpcom.ac_if;
1361 	ifp->if_softc = sc;
1362 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1363 	ifp->if_ioctl = alc_ioctl;
1364 	ifp->if_start = alc_start;
1365 	ifp->if_watchdog = alc_watchdog;
1366 	IFQ_SET_MAXLEN(&ifp->if_snd, ALC_TX_RING_CNT - 1);
1367 	bcopy(sc->alc_eaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
1368 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1369 
1370 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1371 
1372 #ifdef ALC_CHECKSUM
1373 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
1374 	    IFCAP_CSUM_UDPv4;
1375 #endif
1376 
1377 #if NVLAN > 0
1378 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1379 #endif
1380 
1381 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
1382 
1383 	/* Set up MII bus. */
1384 	sc->sc_miibus.mii_ifp = ifp;
1385 	sc->sc_miibus.mii_readreg = alc_miibus_readreg;
1386 	sc->sc_miibus.mii_writereg = alc_miibus_writereg;
1387 	sc->sc_miibus.mii_statchg = alc_miibus_statchg;
1388 
1389 	ifmedia_init(&sc->sc_miibus.mii_media, 0, alc_mediachange,
1390 	    alc_mediastatus);
1391 	mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
1392 	    MII_OFFSET_ANY, MIIF_DOPAUSE);
1393 
1394 	if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
1395 		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
1396 		ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
1397 		    0, NULL);
1398 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
1399 	} else
1400 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
1401 
1402 	if_attach(ifp);
1403 	ether_ifattach(ifp);
1404 
1405 	timeout_set(&sc->alc_tick_ch, alc_tick, sc);
1406 
1407 	return;
1408 fail:
1409 	alc_dma_free(sc);
1410 	if (sc->sc_irq_handle != NULL)
1411 		pci_intr_disestablish(pc, sc->sc_irq_handle);
1412 	if (sc->sc_mem_size)
1413 		bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
1414 }
1415 
1416 int
1417 alc_detach(struct device *self, int flags)
1418 {
1419 	struct alc_softc *sc = (struct alc_softc *)self;
1420 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1421 	int s;
1422 
1423 	s = splnet();
1424 	alc_stop(sc);
1425 	splx(s);
1426 
1427 	mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
1428 
1429 	/* Delete all remaining media. */
1430 	ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
1431 
1432 	ether_ifdetach(ifp);
1433 	if_detach(ifp);
1434 	alc_dma_free(sc);
1435 
1436 	alc_phy_down(sc);
1437 	if (sc->sc_irq_handle != NULL) {
1438 		pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
1439 		sc->sc_irq_handle = NULL;
1440 	}
1441 
1442 	return (0);
1443 }
1444 
1445 int
1446 alc_activate(struct device *self, int act)
1447 {
1448 	struct alc_softc *sc = (struct alc_softc *)self;
1449 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1450 	int rv = 0;
1451 
1452 	switch (act) {
1453 	case DVACT_SUSPEND:
1454 		if (ifp->if_flags & IFF_RUNNING)
1455 			alc_stop(sc);
1456 		rv = config_activate_children(self, act);
1457 		break;
1458 	case DVACT_RESUME:
1459 		if (ifp->if_flags & IFF_UP)
1460 			alc_init(ifp);
1461 		break;
1462 	default:
1463 		rv = config_activate_children(self, act);
1464 		break;
1465 	}
1466 	return (rv);
1467 }
1468 
1469 int
1470 alc_dma_alloc(struct alc_softc *sc)
1471 {
1472 	struct alc_txdesc *txd;
1473 	struct alc_rxdesc *rxd;
1474 	int nsegs, error, i;
1475 
1476 	/*
1477 	 * Create DMA stuffs for TX ring
1478 	 */
1479 	error = bus_dmamap_create(sc->sc_dmat, ALC_TX_RING_SZ, 1,
1480 	    ALC_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_tx_ring_map);
1481 	if (error)
1482 		return (ENOBUFS);
1483 
1484 	/* Allocate DMA'able memory for TX ring */
1485 	error = bus_dmamem_alloc(sc->sc_dmat, ALC_TX_RING_SZ,
1486 	    ETHER_ALIGN, 0, &sc->alc_rdata.alc_tx_ring_seg, 1,
1487 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1488 	if (error) {
1489 		printf("%s: could not allocate DMA'able memory for Tx ring.\n",
1490 		    sc->sc_dev.dv_xname);
1491 		return (error);
1492 	}
1493 
1494 	error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_tx_ring_seg,
1495 	    nsegs, ALC_TX_RING_SZ, (caddr_t *)&sc->alc_rdata.alc_tx_ring,
1496 	    BUS_DMA_NOWAIT);
1497 	if (error)
1498 		return (ENOBUFS);
1499 
1500 	/* Load the DMA map for Tx ring. */
1501 	error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map,
1502 	    sc->alc_rdata.alc_tx_ring, ALC_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
1503 	if (error) {
1504 		printf("%s: could not load DMA'able memory for Tx ring.\n",
1505 		    sc->sc_dev.dv_xname);
1506 		bus_dmamem_free(sc->sc_dmat,
1507 		    (bus_dma_segment_t *)&sc->alc_rdata.alc_tx_ring, 1);
1508 		return (error);
1509 	}
1510 
1511 	sc->alc_rdata.alc_tx_ring_paddr =
1512 	    sc->alc_cdata.alc_tx_ring_map->dm_segs[0].ds_addr;
1513 
1514 	/*
1515 	 * Create DMA stuffs for RX ring
1516 	 */
1517 	error = bus_dmamap_create(sc->sc_dmat, ALC_RX_RING_SZ, 1,
1518 	    ALC_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_ring_map);
1519 	if (error)
1520 		return (ENOBUFS);
1521 
1522 	/* Allocate DMA'able memory for RX ring */
1523 	error = bus_dmamem_alloc(sc->sc_dmat, ALC_RX_RING_SZ,
1524 	    ETHER_ALIGN, 0, &sc->alc_rdata.alc_rx_ring_seg, 1,
1525 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1526 	if (error) {
1527 		printf("%s: could not allocate DMA'able memory for Rx ring.\n",
1528 		    sc->sc_dev.dv_xname);
1529 		return (error);
1530 	}
1531 
1532 	error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rx_ring_seg,
1533 	    nsegs, ALC_RX_RING_SZ, (caddr_t *)&sc->alc_rdata.alc_rx_ring,
1534 	    BUS_DMA_NOWAIT);
1535 	if (error)
1536 		return (ENOBUFS);
1537 
1538 	/* Load the DMA map for Rx ring. */
1539 	error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map,
1540 	    sc->alc_rdata.alc_rx_ring, ALC_RX_RING_SZ, NULL, BUS_DMA_WAITOK);
1541 	if (error) {
1542 		printf("%s: could not load DMA'able memory for Rx ring.\n",
1543 		    sc->sc_dev.dv_xname);
1544 		bus_dmamem_free(sc->sc_dmat,
1545 		    (bus_dma_segment_t *)sc->alc_rdata.alc_rx_ring, 1);
1546 		return (error);
1547 	}
1548 
1549 	sc->alc_rdata.alc_rx_ring_paddr =
1550 	    sc->alc_cdata.alc_rx_ring_map->dm_segs[0].ds_addr;
1551 
1552 	/*
1553 	 * Create DMA stuffs for RX return ring
1554 	 */
1555 	error = bus_dmamap_create(sc->sc_dmat, ALC_RR_RING_SZ, 1,
1556 	    ALC_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rr_ring_map);
1557 	if (error)
1558 		return (ENOBUFS);
1559 
1560 	/* Allocate DMA'able memory for RX return ring */
1561 	error = bus_dmamem_alloc(sc->sc_dmat, ALC_RR_RING_SZ,
1562 	    ETHER_ALIGN, 0, &sc->alc_rdata.alc_rr_ring_seg, 1,
1563 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1564 	if (error) {
1565 		printf("%s: could not allocate DMA'able memory for Rx "
1566 		    "return ring.\n", sc->sc_dev.dv_xname);
1567 		return (error);
1568 	}
1569 
1570 	error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rr_ring_seg,
1571 	    nsegs, ALC_RR_RING_SZ, (caddr_t *)&sc->alc_rdata.alc_rr_ring,
1572 	    BUS_DMA_NOWAIT);
1573 	if (error)
1574 		return (ENOBUFS);
1575 
1576 	/*  Load the DMA map for Rx return ring. */
1577 	error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map,
1578 	    sc->alc_rdata.alc_rr_ring, ALC_RR_RING_SZ, NULL, BUS_DMA_WAITOK);
1579 	if (error) {
1580 		printf("%s: could not load DMA'able memory for Rx return ring."
1581 		    "\n", sc->sc_dev.dv_xname);
1582 		bus_dmamem_free(sc->sc_dmat,
1583 		    (bus_dma_segment_t *)&sc->alc_rdata.alc_rr_ring, 1);
1584 		return (error);
1585 	}
1586 
1587 	sc->alc_rdata.alc_rr_ring_paddr =
1588 	    sc->alc_cdata.alc_rr_ring_map->dm_segs[0].ds_addr;
1589 
1590 	/*
1591 	 * Create DMA stuffs for CMB block
1592 	 */
1593 	error = bus_dmamap_create(sc->sc_dmat, ALC_CMB_SZ, 1,
1594 	    ALC_CMB_SZ, 0, BUS_DMA_NOWAIT,
1595 	    &sc->alc_cdata.alc_cmb_map);
1596 	if (error)
1597 		return (ENOBUFS);
1598 
1599 	/* Allocate DMA'able memory for CMB block */
1600 	error = bus_dmamem_alloc(sc->sc_dmat, ALC_CMB_SZ,
1601 	    ETHER_ALIGN, 0, &sc->alc_rdata.alc_cmb_seg, 1,
1602 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1603 	if (error) {
1604 		printf("%s: could not allocate DMA'able memory for "
1605 		    "CMB block\n", sc->sc_dev.dv_xname);
1606 		return (error);
1607 	}
1608 
1609 	error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_cmb_seg,
1610 	    nsegs, ALC_CMB_SZ, (caddr_t *)&sc->alc_rdata.alc_cmb,
1611 	    BUS_DMA_NOWAIT);
1612 	if (error)
1613 		return (ENOBUFS);
1614 
1615 	/*  Load the DMA map for CMB block. */
1616 	error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_cmb_map,
1617 	    sc->alc_rdata.alc_cmb, ALC_CMB_SZ, NULL,
1618 	    BUS_DMA_WAITOK);
1619 	if (error) {
1620 		printf("%s: could not load DMA'able memory for CMB block\n",
1621 		    sc->sc_dev.dv_xname);
1622 		bus_dmamem_free(sc->sc_dmat,
1623 		    (bus_dma_segment_t *)&sc->alc_rdata.alc_cmb, 1);
1624 		return (error);
1625 	}
1626 
1627 	sc->alc_rdata.alc_cmb_paddr =
1628 	    sc->alc_cdata.alc_cmb_map->dm_segs[0].ds_addr;
1629 
1630 	/*
1631 	 * Create DMA stuffs for SMB block
1632 	 */
1633 	error = bus_dmamap_create(sc->sc_dmat, ALC_SMB_SZ, 1,
1634 	    ALC_SMB_SZ, 0, BUS_DMA_NOWAIT,
1635 	    &sc->alc_cdata.alc_smb_map);
1636 	if (error)
1637 		return (ENOBUFS);
1638 
1639 	/* Allocate DMA'able memory for SMB block */
1640 	error = bus_dmamem_alloc(sc->sc_dmat, ALC_SMB_SZ,
1641 	    ETHER_ALIGN, 0, &sc->alc_rdata.alc_smb_seg, 1,
1642 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1643 	if (error) {
1644 		printf("%s: could not allocate DMA'able memory for "
1645 		    "SMB block\n", sc->sc_dev.dv_xname);
1646 		return (error);
1647 	}
1648 
1649 	error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_smb_seg,
1650 	    nsegs, ALC_SMB_SZ, (caddr_t *)&sc->alc_rdata.alc_smb,
1651 	    BUS_DMA_NOWAIT);
1652 	if (error)
1653 		return (ENOBUFS);
1654 
1655 	/*  Load the DMA map for SMB block */
1656 	error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_smb_map,
1657 	    sc->alc_rdata.alc_smb, ALC_SMB_SZ, NULL,
1658 	    BUS_DMA_WAITOK);
1659 	if (error) {
1660 		printf("%s: could not load DMA'able memory for SMB block\n",
1661 		    sc->sc_dev.dv_xname);
1662 		bus_dmamem_free(sc->sc_dmat,
1663 		    (bus_dma_segment_t *)&sc->alc_rdata.alc_smb, 1);
1664 		return (error);
1665 	}
1666 
1667 	sc->alc_rdata.alc_smb_paddr =
1668 	    sc->alc_cdata.alc_smb_map->dm_segs[0].ds_addr;
1669 
1670 
1671 	/* Create DMA maps for Tx buffers. */
1672 	for (i = 0; i < ALC_TX_RING_CNT; i++) {
1673 		txd = &sc->alc_cdata.alc_txdesc[i];
1674 		txd->tx_m = NULL;
1675 		txd->tx_dmamap = NULL;
1676 		error = bus_dmamap_create(sc->sc_dmat, ALC_TSO_MAXSIZE,
1677 		    ALC_MAXTXSEGS, ALC_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
1678 		    &txd->tx_dmamap);
1679 		if (error) {
1680 			printf("%s: could not create Tx dmamap.\n",
1681 			    sc->sc_dev.dv_xname);
1682 			return (error);
1683 		}
1684 	}
1685 
1686 	/* Create DMA maps for Rx buffers. */
1687 	error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1688 	    BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_sparemap);
1689 	if (error) {
1690 		printf("%s: could not create spare Rx dmamap.\n",
1691 		    sc->sc_dev.dv_xname);
1692 		return (error);
1693 	}
1694 
1695 	for (i = 0; i < ALC_RX_RING_CNT; i++) {
1696 		rxd = &sc->alc_cdata.alc_rxdesc[i];
1697 		rxd->rx_m = NULL;
1698 		rxd->rx_dmamap = NULL;
1699 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1700 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
1701 		if (error) {
1702 			printf("%s: could not create Rx dmamap.\n",
1703 			    sc->sc_dev.dv_xname);
1704 			return (error);
1705 		}
1706 	}
1707 
1708 	return (0);
1709 }
1710 
1711 void
1712 alc_dma_free(struct alc_softc *sc)
1713 {
1714 	struct alc_txdesc *txd;
1715 	struct alc_rxdesc *rxd;
1716 	int i;
1717 
1718 	/* Tx buffers */
1719 	for (i = 0; i < ALC_TX_RING_CNT; i++) {
1720 		txd = &sc->alc_cdata.alc_txdesc[i];
1721 		if (txd->tx_dmamap != NULL) {
1722 			bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
1723 			txd->tx_dmamap = NULL;
1724 		}
1725 	}
1726 	/* Rx buffers */
1727 	for (i = 0; i < ALC_RX_RING_CNT; i++) {
1728 		rxd = &sc->alc_cdata.alc_rxdesc[i];
1729 		if (rxd->rx_dmamap != NULL) {
1730 			bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
1731 			rxd->rx_dmamap = NULL;
1732 		}
1733 	}
1734 	if (sc->alc_cdata.alc_rx_sparemap != NULL) {
1735 		bus_dmamap_destroy(sc->sc_dmat, sc->alc_cdata.alc_rx_sparemap);
1736 		sc->alc_cdata.alc_rx_sparemap = NULL;
1737 	}
1738 
1739 	/* Tx ring. */
1740 	if (sc->alc_cdata.alc_tx_ring_map != NULL)
1741 		bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map);
1742 	if (sc->alc_cdata.alc_tx_ring_map != NULL &&
1743 	    sc->alc_rdata.alc_tx_ring != NULL)
1744 		bus_dmamem_free(sc->sc_dmat,
1745 		    (bus_dma_segment_t *)sc->alc_rdata.alc_tx_ring, 1);
1746 	sc->alc_rdata.alc_tx_ring = NULL;
1747 	sc->alc_cdata.alc_tx_ring_map = NULL;
1748 
1749 	/* Rx ring. */
1750 	if (sc->alc_cdata.alc_rx_ring_map != NULL)
1751 		bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map);
1752 	if (sc->alc_cdata.alc_rx_ring_map != NULL &&
1753 	    sc->alc_rdata.alc_rx_ring != NULL)
1754 		bus_dmamem_free(sc->sc_dmat,
1755 		    (bus_dma_segment_t *)sc->alc_rdata.alc_rx_ring, 1);
1756 	sc->alc_rdata.alc_rx_ring = NULL;
1757 	sc->alc_cdata.alc_rx_ring_map = NULL;
1758 
1759 	/* Rx return ring. */
1760 	if (sc->alc_cdata.alc_rr_ring_map != NULL)
1761 		bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map);
1762 	if (sc->alc_cdata.alc_rr_ring_map != NULL &&
1763 	    sc->alc_rdata.alc_rr_ring != NULL)
1764 		bus_dmamem_free(sc->sc_dmat,
1765 		    (bus_dma_segment_t *)sc->alc_rdata.alc_rr_ring, 1);
1766 	sc->alc_rdata.alc_rr_ring = NULL;
1767 	sc->alc_cdata.alc_rr_ring_map = NULL;
1768 
1769 	/* CMB block */
1770 	if (sc->alc_cdata.alc_cmb_map != NULL)
1771 		bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_cmb_map);
1772 	if (sc->alc_cdata.alc_cmb_map != NULL &&
1773 	    sc->alc_rdata.alc_cmb != NULL)
1774 		bus_dmamem_free(sc->sc_dmat,
1775 		    (bus_dma_segment_t *)sc->alc_rdata.alc_cmb, 1);
1776 	sc->alc_rdata.alc_cmb = NULL;
1777 	sc->alc_cdata.alc_cmb_map = NULL;
1778 
1779 	/* SMB block */
1780 	if (sc->alc_cdata.alc_smb_map != NULL)
1781 		bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_smb_map);
1782 	if (sc->alc_cdata.alc_smb_map != NULL &&
1783 	    sc->alc_rdata.alc_smb != NULL)
1784 		bus_dmamem_free(sc->sc_dmat,
1785 		    (bus_dma_segment_t *)sc->alc_rdata.alc_smb, 1);
1786 	sc->alc_rdata.alc_smb = NULL;
1787 	sc->alc_cdata.alc_smb_map = NULL;
1788 }
1789 
1790 int
1791 alc_encap(struct alc_softc *sc, struct mbuf *m)
1792 {
1793 	struct alc_txdesc *txd, *txd_last;
1794 	struct tx_desc *desc;
1795 	bus_dmamap_t map;
1796 	uint32_t cflags, poff, vtag;
1797 	int error, idx, prod;
1798 
1799 	cflags = vtag = 0;
1800 	poff = 0;
1801 
1802 	prod = sc->alc_cdata.alc_tx_prod;
1803 	txd = &sc->alc_cdata.alc_txdesc[prod];
1804 	txd_last = txd;
1805 	map = txd->tx_dmamap;
1806 
1807 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT);
1808 	if (error != 0 && error != EFBIG)
1809 		goto drop;
1810 	if (error != 0) {
1811 		if (m_defrag(m, M_DONTWAIT)) {
1812 			error = ENOBUFS;
1813 			goto drop;
1814 		}
1815 		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
1816 		    BUS_DMA_NOWAIT);
1817 		if (error != 0)
1818 			goto drop;
1819 	}
1820 
1821 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1822 	    BUS_DMASYNC_PREWRITE);
1823 
1824 	desc = NULL;
1825 	idx = 0;
1826 #if NVLAN > 0
1827 	/* Configure VLAN hardware tag insertion. */
1828 	if (m->m_flags & M_VLANTAG) {
1829 		vtag = htons(m->m_pkthdr.ether_vtag);
1830 		vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK;
1831 		cflags |= TD_INS_VLAN_TAG;
1832 	}
1833 #endif
1834 	/* Configure Tx checksum offload. */
1835 	if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) {
1836 		cflags |= TD_CUSTOM_CSUM;
1837 		/* Set checksum start offset. */
1838 		cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) &
1839 		    TD_PLOAD_OFFSET_MASK;
1840 	}
1841 
1842 	for (; idx < map->dm_nsegs; idx++) {
1843 		desc = &sc->alc_rdata.alc_tx_ring[prod];
1844 		desc->len =
1845 		    htole32(TX_BYTES(map->dm_segs[idx].ds_len) | vtag);
1846 		desc->flags = htole32(cflags);
1847 		desc->addr = htole64(map->dm_segs[idx].ds_addr);
1848 		sc->alc_cdata.alc_tx_cnt++;
1849 		ALC_DESC_INC(prod, ALC_TX_RING_CNT);
1850 	}
1851 
1852 	/* Update producer index. */
1853 	sc->alc_cdata.alc_tx_prod = prod;
1854 
1855 	/* Finally set EOP on the last descriptor. */
1856 	prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT;
1857 	desc = &sc->alc_rdata.alc_tx_ring[prod];
1858 	desc->flags |= htole32(TD_EOP);
1859 
1860 	/* Swap dmamap of the first and the last. */
1861 	txd = &sc->alc_cdata.alc_txdesc[prod];
1862 	map = txd_last->tx_dmamap;
1863 	txd_last->tx_dmamap = txd->tx_dmamap;
1864 	txd->tx_dmamap = map;
1865 	txd->tx_m = m;
1866 
1867 	return (0);
1868 
1869 drop:
1870 	m_freem(m);
1871 	return (error);
1872 }
1873 
1874 void
1875 alc_start(struct ifnet *ifp)
1876 {
1877 	struct alc_softc *sc = ifp->if_softc;
1878 	struct mbuf *m;
1879 	int enq = 0;
1880 
1881 	/* Reclaim transmitted frames. */
1882 	if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT)
1883 		alc_txeof(sc);
1884 
1885 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
1886 		return;
1887 	if ((sc->alc_flags & ALC_FLAG_LINK) == 0)
1888 		return;
1889 	if (IFQ_IS_EMPTY(&ifp->if_snd))
1890 		return;
1891 
1892 	for (;;) {
1893 		if (sc->alc_cdata.alc_tx_cnt + ALC_MAXTXSEGS >=
1894 		    ALC_TX_RING_CNT - 3) {
1895 			ifq_set_oactive(&ifp->if_snd);
1896 			break;
1897 		}
1898 
1899 		IFQ_DEQUEUE(&ifp->if_snd, m);
1900 		if (m == NULL)
1901 			break;
1902 
1903 		if (alc_encap(sc, m) != 0) {
1904 			ifp->if_oerrors++;
1905 			continue;
1906 		}
1907 		enq++;
1908 
1909 #if NBPFILTER > 0
1910 		/*
1911 		 * If there's a BPF listener, bounce a copy of this frame
1912 		 * to him.
1913 		 */
1914 		if (ifp->if_bpf != NULL)
1915 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1916 #endif
1917 	}
1918 
1919 	if (enq > 0) {
1920 		/* Sync descriptors. */
1921 		bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
1922 		    sc->alc_cdata.alc_tx_ring_map->dm_mapsize,
1923 		    BUS_DMASYNC_PREWRITE);
1924 		/* Kick. Assume we're using normal Tx priority queue. */
1925 		if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
1926 			CSR_WRITE_2(sc, ALC_MBOX_TD_PRI0_PROD_IDX,
1927 			    (uint16_t)sc->alc_cdata.alc_tx_prod);
1928 		else
1929 			CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX,
1930 			    (sc->alc_cdata.alc_tx_prod <<
1931 			    MBOX_TD_PROD_LO_IDX_SHIFT) &
1932 			    MBOX_TD_PROD_LO_IDX_MASK);
1933 		/* Set a timeout in case the chip goes out to lunch. */
1934 		ifp->if_timer = ALC_TX_TIMEOUT;
1935 	}
1936 }
1937 
1938 void
1939 alc_watchdog(struct ifnet *ifp)
1940 {
1941 	struct alc_softc *sc = ifp->if_softc;
1942 
1943 	if ((sc->alc_flags & ALC_FLAG_LINK) == 0) {
1944 		printf("%s: watchdog timeout (missed link)\n",
1945 		    sc->sc_dev.dv_xname);
1946 		ifp->if_oerrors++;
1947 		alc_init(ifp);
1948 		return;
1949 	}
1950 
1951 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1952 	ifp->if_oerrors++;
1953 	alc_init(ifp);
1954 	alc_start(ifp);
1955 }
1956 
1957 int
1958 alc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1959 {
1960 	struct alc_softc *sc = ifp->if_softc;
1961 	struct mii_data *mii = &sc->sc_miibus;
1962 	struct ifreq *ifr = (struct ifreq *)data;
1963 	int s, error = 0;
1964 
1965 	s = splnet();
1966 
1967 	switch (cmd) {
1968 	case SIOCSIFADDR:
1969 		ifp->if_flags |= IFF_UP;
1970 		if (!(ifp->if_flags & IFF_RUNNING))
1971 			alc_init(ifp);
1972 		break;
1973 
1974 	case SIOCSIFFLAGS:
1975 		if (ifp->if_flags & IFF_UP) {
1976 			if (ifp->if_flags & IFF_RUNNING)
1977 				error = ENETRESET;
1978 			else
1979 				alc_init(ifp);
1980 		} else {
1981 			if (ifp->if_flags & IFF_RUNNING)
1982 				alc_stop(sc);
1983 		}
1984 		break;
1985 
1986 	case SIOCSIFMEDIA:
1987 	case SIOCGIFMEDIA:
1988 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1989 		break;
1990 
1991 	default:
1992 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1993 		break;
1994 	}
1995 
1996 	if (error == ENETRESET) {
1997 		if (ifp->if_flags & IFF_RUNNING)
1998 			alc_iff(sc);
1999 		error = 0;
2000 	}
2001 
2002 	splx(s);
2003 	return (error);
2004 }
2005 
2006 void
2007 alc_mac_config(struct alc_softc *sc)
2008 {
2009 	struct mii_data *mii;
2010 	uint32_t reg;
2011 
2012 	mii = &sc->sc_miibus;
2013 	reg = CSR_READ_4(sc, ALC_MAC_CFG);
2014 	reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
2015 	    MAC_CFG_SPEED_MASK);
2016 	if ((sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D ||
2017 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 ||
2018 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2 ||
2019 	    sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
2020 		reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
2021 	/* Reprogram MAC with resolved speed/duplex. */
2022 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
2023 	case IFM_10_T:
2024 	case IFM_100_TX:
2025 		reg |= MAC_CFG_SPEED_10_100;
2026 		break;
2027 	case IFM_1000_T:
2028 		reg |= MAC_CFG_SPEED_1000;
2029 		break;
2030 	}
2031 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2032 		reg |= MAC_CFG_FULL_DUPLEX;
2033 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2034 			reg |= MAC_CFG_TX_FC;
2035 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2036 			reg |= MAC_CFG_RX_FC;
2037 	}
2038 	CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2039 }
2040 
2041 void
2042 alc_stats_clear(struct alc_softc *sc)
2043 {
2044 	struct smb sb, *smb;
2045 	uint32_t *reg;
2046 	int i;
2047 
2048 	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2049 		bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
2050 		    sc->alc_cdata.alc_smb_map->dm_mapsize,
2051 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2052 		smb = sc->alc_rdata.alc_smb;
2053 		/* Update done, clear. */
2054 		smb->updated = 0;
2055 		bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
2056 		    sc->alc_cdata.alc_smb_map->dm_mapsize,
2057 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2058 	} else {
2059 		for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
2060 		    reg++) {
2061 			CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
2062 			i += sizeof(uint32_t);
2063 		}
2064 		/* Read Tx statistics. */
2065 		for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
2066 		    reg++) {
2067 			CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
2068 			i += sizeof(uint32_t);
2069 		}
2070 	}
2071 }
2072 
2073 void
2074 alc_stats_update(struct alc_softc *sc)
2075 {
2076 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2077 	struct alc_hw_stats *stat;
2078 	struct smb sb, *smb;
2079 	uint32_t *reg;
2080 	int i;
2081 
2082 	stat = &sc->alc_stats;
2083 	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2084 		bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
2085 		    sc->alc_cdata.alc_smb_map->dm_mapsize,
2086 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2087 		smb = sc->alc_rdata.alc_smb;
2088 		if (smb->updated == 0)
2089 			return;
2090 	} else {
2091 		smb = &sb;
2092 		/* Read Rx statistics. */
2093 		for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
2094 		    reg++) {
2095 			*reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
2096 			i += sizeof(uint32_t);
2097 		}
2098 		/* Read Tx statistics. */
2099 		for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
2100 		    reg++) {
2101 			*reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
2102 			i += sizeof(uint32_t);
2103 		}
2104 	}
2105 
2106 	/* Rx stats. */
2107 	stat->rx_frames += smb->rx_frames;
2108 	stat->rx_bcast_frames += smb->rx_bcast_frames;
2109 	stat->rx_mcast_frames += smb->rx_mcast_frames;
2110 	stat->rx_pause_frames += smb->rx_pause_frames;
2111 	stat->rx_control_frames += smb->rx_control_frames;
2112 	stat->rx_crcerrs += smb->rx_crcerrs;
2113 	stat->rx_lenerrs += smb->rx_lenerrs;
2114 	stat->rx_bytes += smb->rx_bytes;
2115 	stat->rx_runts += smb->rx_runts;
2116 	stat->rx_fragments += smb->rx_fragments;
2117 	stat->rx_pkts_64 += smb->rx_pkts_64;
2118 	stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
2119 	stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
2120 	stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
2121 	stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
2122 	stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
2123 	stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
2124 	stat->rx_pkts_truncated += smb->rx_pkts_truncated;
2125 	stat->rx_fifo_oflows += smb->rx_fifo_oflows;
2126 	stat->rx_rrs_errs += smb->rx_rrs_errs;
2127 	stat->rx_alignerrs += smb->rx_alignerrs;
2128 	stat->rx_bcast_bytes += smb->rx_bcast_bytes;
2129 	stat->rx_mcast_bytes += smb->rx_mcast_bytes;
2130 	stat->rx_pkts_filtered += smb->rx_pkts_filtered;
2131 
2132 	/* Tx stats. */
2133 	stat->tx_frames += smb->tx_frames;
2134 	stat->tx_bcast_frames += smb->tx_bcast_frames;
2135 	stat->tx_mcast_frames += smb->tx_mcast_frames;
2136 	stat->tx_pause_frames += smb->tx_pause_frames;
2137 	stat->tx_excess_defer += smb->tx_excess_defer;
2138 	stat->tx_control_frames += smb->tx_control_frames;
2139 	stat->tx_deferred += smb->tx_deferred;
2140 	stat->tx_bytes += smb->tx_bytes;
2141 	stat->tx_pkts_64 += smb->tx_pkts_64;
2142 	stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
2143 	stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
2144 	stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
2145 	stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
2146 	stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
2147 	stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
2148 	stat->tx_single_colls += smb->tx_single_colls;
2149 	stat->tx_multi_colls += smb->tx_multi_colls;
2150 	stat->tx_late_colls += smb->tx_late_colls;
2151 	stat->tx_excess_colls += smb->tx_excess_colls;
2152 	stat->tx_underrun += smb->tx_underrun;
2153 	stat->tx_desc_underrun += smb->tx_desc_underrun;
2154 	stat->tx_lenerrs += smb->tx_lenerrs;
2155 	stat->tx_pkts_truncated += smb->tx_pkts_truncated;
2156 	stat->tx_bcast_bytes += smb->tx_bcast_bytes;
2157 	stat->tx_mcast_bytes += smb->tx_mcast_bytes;
2158 
2159 	ifp->if_collisions += smb->tx_single_colls +
2160 	    smb->tx_multi_colls * 2 + smb->tx_late_colls +
2161 	    smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT;
2162 
2163 	ifp->if_oerrors += smb->tx_late_colls + smb->tx_excess_colls +
2164 	    smb->tx_underrun + smb->tx_pkts_truncated;
2165 
2166 	ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
2167 	    smb->rx_runts + smb->rx_pkts_truncated +
2168 	    smb->rx_fifo_oflows + smb->rx_rrs_errs +
2169 	    smb->rx_alignerrs;
2170 
2171 	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2172 		/* Update done, clear. */
2173 		smb->updated = 0;
2174 		bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
2175 		    sc->alc_cdata.alc_smb_map->dm_mapsize,
2176 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2177 	}
2178 }
2179 
2180 int
2181 alc_intr(void *arg)
2182 {
2183 	struct alc_softc *sc = arg;
2184 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2185 	uint32_t status;
2186 	int claimed = 0;
2187 
2188 	status = CSR_READ_4(sc, ALC_INTR_STATUS);
2189 	if ((status & ALC_INTRS) == 0)
2190 		return (0);
2191 
2192 	/* Disable interrupts. */
2193 	CSR_WRITE_4(sc, ALC_INTR_STATUS, INTR_DIS_INT);
2194 
2195 	status = CSR_READ_4(sc, ALC_INTR_STATUS);
2196 	if ((status & ALC_INTRS) == 0)
2197 		goto back;
2198 
2199 	/* Acknowledge and disable interrupts. */
2200 	CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT);
2201 
2202 	if (ifp->if_flags & IFF_RUNNING) {
2203 		int error = 0;
2204 
2205 		if (status & INTR_RX_PKT) {
2206 			error = alc_rxintr(sc);
2207 			if (error) {
2208 				alc_init(ifp);
2209 				return (0);
2210 			}
2211 		}
2212 		if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST |
2213 		    INTR_TXQ_TO_RST)) {
2214 			if (status & INTR_DMA_RD_TO_RST)
2215 				printf("%s: DMA read error! -- resetting\n",
2216 				    sc->sc_dev.dv_xname);
2217 			if (status & INTR_DMA_WR_TO_RST)
2218 				printf("%s: DMA write error! -- resetting\n",
2219 				    sc->sc_dev.dv_xname);
2220 			if (status & INTR_TXQ_TO_RST)
2221 				printf("%s: TxQ reset! -- resetting\n",
2222 				    sc->sc_dev.dv_xname);
2223 			alc_init(ifp);
2224 			return (0);
2225 		}
2226 
2227 		alc_txeof(sc);
2228 		alc_start(ifp);
2229 	}
2230 
2231 	claimed = 1;
2232 back:
2233 	/* Re-enable interrupts. */
2234 	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF);
2235 	return (claimed);
2236 }
2237 
2238 void
2239 alc_txeof(struct alc_softc *sc)
2240 {
2241 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2242 	struct alc_txdesc *txd;
2243 	uint32_t cons, prod;
2244 	int prog;
2245 
2246 	if (sc->alc_cdata.alc_tx_cnt == 0)
2247 		return;
2248 	bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
2249 	    sc->alc_cdata.alc_tx_ring_map->dm_mapsize,
2250 	    BUS_DMASYNC_POSTWRITE);
2251 	if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
2252 		bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
2253 		    sc->alc_cdata.alc_cmb_map->dm_mapsize,
2254 		    BUS_DMASYNC_POSTREAD);
2255 		prod = sc->alc_rdata.alc_cmb->cons;
2256 	} else {
2257 		if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
2258 			prod = CSR_READ_2(sc, ALC_MBOX_TD_PRI0_CONS_IDX);
2259 		else {
2260 			prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX);
2261 			/* Assume we're using normal Tx priority queue. */
2262 			prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >>
2263 			    MBOX_TD_CONS_LO_IDX_SHIFT;
2264 		}
2265 	}
2266 	cons = sc->alc_cdata.alc_tx_cons;
2267 	/*
2268 	 * Go through our Tx list and free mbufs for those
2269 	 * frames which have been transmitted.
2270 	 */
2271 	for (prog = 0; cons != prod; prog++,
2272 	    ALC_DESC_INC(cons, ALC_TX_RING_CNT)) {
2273 		if (sc->alc_cdata.alc_tx_cnt <= 0)
2274 			break;
2275 		prog++;
2276 		ifq_clr_oactive(&ifp->if_snd);
2277 		sc->alc_cdata.alc_tx_cnt--;
2278 		txd = &sc->alc_cdata.alc_txdesc[cons];
2279 		if (txd->tx_m != NULL) {
2280 			/* Reclaim transmitted mbufs. */
2281 			bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
2282 			    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2283 			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
2284 			m_freem(txd->tx_m);
2285 			txd->tx_m = NULL;
2286 		}
2287 	}
2288 
2289 	if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2290 	    bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
2291 	        sc->alc_cdata.alc_cmb_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2292 	sc->alc_cdata.alc_tx_cons = cons;
2293 	/*
2294 	 * Unarm watchdog timer only when there is no pending
2295 	 * frames in Tx queue.
2296 	 */
2297 	if (sc->alc_cdata.alc_tx_cnt == 0)
2298 		ifp->if_timer = 0;
2299 }
2300 
2301 int
2302 alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd)
2303 {
2304 	struct mbuf *m;
2305 	bus_dmamap_t map;
2306 	int error;
2307 
2308 	MGETHDR(m, M_DONTWAIT, MT_DATA);
2309 	if (m == NULL)
2310 		return (ENOBUFS);
2311 	MCLGET(m, M_DONTWAIT);
2312 	if (!(m->m_flags & M_EXT)) {
2313 		m_freem(m);
2314 		return (ENOBUFS);
2315 	}
2316 
2317 	m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX;
2318 
2319 	error = bus_dmamap_load_mbuf(sc->sc_dmat,
2320 	    sc->alc_cdata.alc_rx_sparemap, m, BUS_DMA_NOWAIT);
2321 
2322 	if (error != 0) {
2323 		m_freem(m);
2324 		printf("%s: can't load RX mbuf\n", sc->sc_dev.dv_xname);
2325 		return (error);
2326 	}
2327 
2328 	if (rxd->rx_m != NULL) {
2329 		bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
2330 		    rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2331 		bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2332 	}
2333 	map = rxd->rx_dmamap;
2334 	rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap;
2335 	sc->alc_cdata.alc_rx_sparemap = map;
2336 	bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, rxd->rx_dmamap->dm_mapsize,
2337 	    BUS_DMASYNC_PREREAD);
2338 	rxd->rx_m = m;
2339 	rxd->rx_desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr);
2340 	return (0);
2341 }
2342 
2343 int
2344 alc_rxintr(struct alc_softc *sc)
2345 {
2346 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2347 	struct rx_rdesc *rrd;
2348 	uint32_t nsegs, status;
2349 	int rr_cons, prog;
2350 
2351 	bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
2352 	    sc->alc_cdata.alc_rr_ring_map->dm_mapsize,
2353 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2354 	bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
2355 	    sc->alc_cdata.alc_rx_ring_map->dm_mapsize,
2356 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2357 	rr_cons = sc->alc_cdata.alc_rr_cons;
2358 	for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0;) {
2359 		rrd = &sc->alc_rdata.alc_rr_ring[rr_cons];
2360 		status = letoh32(rrd->status);
2361 		if ((status & RRD_VALID) == 0)
2362 			break;
2363 		nsegs = RRD_RD_CNT(letoh32(rrd->rdinfo));
2364 		if (nsegs == 0) {
2365 			/* This should not happen! */
2366 			if (alcdebug)
2367 				printf("%s: unexpected segment count -- "
2368 				    "resetting\n", sc->sc_dev.dv_xname);
2369 			return (EIO);
2370 		}
2371 		alc_rxeof(sc, rrd);
2372 		/* Clear Rx return status. */
2373 		rrd->status = 0;
2374 		ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT);
2375 		sc->alc_cdata.alc_rx_cons += nsegs;
2376 		sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT;
2377 		prog += nsegs;
2378 	}
2379 
2380 	if (prog > 0) {
2381 		/* Update the consumer index. */
2382 		sc->alc_cdata.alc_rr_cons = rr_cons;
2383 		/* Sync Rx return descriptors. */
2384 		bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
2385 		    sc->alc_cdata.alc_rr_ring_map->dm_mapsize,
2386 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2387 		/*
2388 		 * Sync updated Rx descriptors such that controller see
2389 		 * modified buffer addresses.
2390 		 */
2391 		bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
2392 		    sc->alc_cdata.alc_rx_ring_map->dm_mapsize,
2393 		    BUS_DMASYNC_PREWRITE);
2394 		/*
2395 		 * Let controller know availability of new Rx buffers.
2396 		 * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors
2397 		 * it may be possible to update ALC_MBOX_RD0_PROD_IDX
2398 		 * only when Rx buffer pre-fetching is required. In
2399 		 * addition we already set ALC_RX_RD_FREE_THRESH to
2400 		 * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However
2401 		 * it still seems that pre-fetching needs more
2402 		 * experimentation.
2403 		 */
2404 		if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
2405 			CSR_WRITE_2(sc, ALC_MBOX_RD0_PROD_IDX,
2406 			    (uint16_t)sc->alc_cdata.alc_rx_cons);
2407 		else
2408 			CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX,
2409 			    sc->alc_cdata.alc_rx_cons);
2410 	}
2411 
2412 	return (0);
2413 }
2414 
2415 /* Receive a frame. */
2416 void
2417 alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd)
2418 {
2419 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2420 	struct alc_rxdesc *rxd;
2421 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2422 	struct mbuf *mp, *m;
2423 	uint32_t rdinfo, status;
2424 	int count, nsegs, rx_cons;
2425 
2426 	status = letoh32(rrd->status);
2427 	rdinfo = letoh32(rrd->rdinfo);
2428 	rx_cons = RRD_RD_IDX(rdinfo);
2429 	nsegs = RRD_RD_CNT(rdinfo);
2430 
2431 	sc->alc_cdata.alc_rxlen = RRD_BYTES(status);
2432 	if (status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) {
2433 		/*
2434 		 * We want to pass the following frames to upper
2435 		 * layer regardless of error status of Rx return
2436 		 * ring.
2437 		 *
2438 		 *  o IP/TCP/UDP checksum is bad.
2439 		 *  o frame length and protocol specific length
2440 		 *     does not match.
2441 		 *
2442 		 *  Force network stack compute checksum for
2443 		 *  errored frames.
2444 		 */
2445 		if ((status & (RRD_ERR_CRC | RRD_ERR_ALIGN |
2446 		    RRD_ERR_TRUNC | RRD_ERR_RUNT)) != 0)
2447 			return;
2448 	}
2449 
2450 	for (count = 0; count < nsegs; count++,
2451 	    ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) {
2452 		rxd = &sc->alc_cdata.alc_rxdesc[rx_cons];
2453 		mp = rxd->rx_m;
2454 		/* Add a new receive buffer to the ring. */
2455 		if (alc_newbuf(sc, rxd) != 0) {
2456 			ifp->if_iqdrops++;
2457 			/* Reuse Rx buffers. */
2458 			m_freem(sc->alc_cdata.alc_rxhead);
2459 			break;
2460 		}
2461 
2462 		/*
2463 		 * Assume we've received a full sized frame.
2464 		 * Actual size is fixed when we encounter the end of
2465 		 * multi-segmented frame.
2466 		 */
2467 		mp->m_len = sc->alc_buf_size;
2468 
2469 		/* Chain received mbufs. */
2470 		if (sc->alc_cdata.alc_rxhead == NULL) {
2471 			sc->alc_cdata.alc_rxhead = mp;
2472 			sc->alc_cdata.alc_rxtail = mp;
2473 		} else {
2474 			mp->m_flags &= ~M_PKTHDR;
2475 			sc->alc_cdata.alc_rxprev_tail =
2476 			    sc->alc_cdata.alc_rxtail;
2477 			sc->alc_cdata.alc_rxtail->m_next = mp;
2478 			sc->alc_cdata.alc_rxtail = mp;
2479 		}
2480 
2481 		if (count == nsegs - 1) {
2482 			/* Last desc. for this frame. */
2483 			m = sc->alc_cdata.alc_rxhead;
2484 			m->m_flags |= M_PKTHDR;
2485 			/*
2486 			 * It seems that L1C/L2C controller has no way
2487 			 * to tell hardware to strip CRC bytes.
2488 			 */
2489 			m->m_pkthdr.len =
2490 			    sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN;
2491 			if (nsegs > 1) {
2492 				/* Set last mbuf size. */
2493 				mp->m_len = sc->alc_cdata.alc_rxlen -
2494 				    (nsegs - 1) * sc->alc_buf_size;
2495 				/* Remove the CRC bytes in chained mbufs. */
2496 				if (mp->m_len <= ETHER_CRC_LEN) {
2497 					sc->alc_cdata.alc_rxtail =
2498 					    sc->alc_cdata.alc_rxprev_tail;
2499 					sc->alc_cdata.alc_rxtail->m_len -=
2500 					    (ETHER_CRC_LEN - mp->m_len);
2501 					sc->alc_cdata.alc_rxtail->m_next = NULL;
2502 					m_freem(mp);
2503 				} else {
2504 					mp->m_len -= ETHER_CRC_LEN;
2505 				}
2506 			} else
2507 				m->m_len = m->m_pkthdr.len;
2508 			/*
2509 			 * Due to hardware bugs, Rx checksum offloading
2510 			 * was intentionally disabled.
2511 			 */
2512 #if NVLAN > 0
2513 			if (status & RRD_VLAN_TAG) {
2514 				u_int32_t vtag = RRD_VLAN(letoh32(rrd->vtag));
2515 				m->m_pkthdr.ether_vtag = ntohs(vtag);
2516 				m->m_flags |= M_VLANTAG;
2517 			}
2518 #endif
2519 
2520 
2521 			ml_enqueue(&ml, m);
2522 		}
2523 	}
2524 	if_input(ifp, &ml);
2525 
2526 	/* Reset mbuf chains. */
2527 	ALC_RXCHAIN_RESET(sc);
2528 }
2529 
2530 void
2531 alc_tick(void *xsc)
2532 {
2533 	struct alc_softc *sc = xsc;
2534 	struct mii_data *mii = &sc->sc_miibus;
2535 	int s;
2536 
2537 	s = splnet();
2538 	mii_tick(mii);
2539 	alc_stats_update(sc);
2540 
2541 	timeout_add_sec(&sc->alc_tick_ch, 1);
2542 	splx(s);
2543 }
2544 
2545 void
2546 alc_osc_reset(struct alc_softc *sc)
2547 {
2548 	uint32_t reg;
2549 
2550 	reg = CSR_READ_4(sc, ALC_MISC3);
2551 	reg &= ~MISC3_25M_BY_SW;
2552 	reg |= MISC3_25M_NOTO_INTNL;
2553 	CSR_WRITE_4(sc, ALC_MISC3, reg);
2554 	reg = CSR_READ_4(sc, ALC_MISC);
2555 	if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0) {
2556 		/*
2557 		 * Restore over-current protection default value.
2558 		 * This value could be reset by MAC reset.
2559 		 */
2560 		reg &= ~MISC_PSW_OCP_MASK;
2561 		reg |= (MISC_PSW_OCP_DEFAULT << MISC_PSW_OCP_SHIFT);
2562 		reg &= ~MISC_INTNLOSC_OPEN;
2563 		CSR_WRITE_4(sc, ALC_MISC, reg);
2564 		CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN);
2565 		reg = CSR_READ_4(sc, ALC_MISC2);
2566 		reg &= ~MISC2_CALB_START;
2567 		CSR_WRITE_4(sc, ALC_MISC2, reg);
2568 		CSR_WRITE_4(sc, ALC_MISC2, reg | MISC2_CALB_START);
2569 	} else {
2570 		reg &= ~MISC_INTNLOSC_OPEN;
2571 		/* Disable isolate for revision A devices. */
2572 		if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1)
2573 			reg &= ~MISC_ISO_ENB;
2574 		CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN);
2575 		CSR_WRITE_4(sc, ALC_MISC, reg);
2576 	}
2577 	DELAY(20);
2578 }
2579 
2580 void
2581 alc_reset(struct alc_softc *sc)
2582 {
2583 	uint32_t reg, pmcfg = 0;
2584 	int i;
2585 
2586 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2587 		/* Reset workaround. */
2588 		CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, 1);
2589 		if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
2590 		    (sc->alc_rev & 0x01) != 0) {
2591 			/* Disable L0s/L1s before reset. */
2592 			pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
2593 			if ((pmcfg & (PM_CFG_ASPM_L0S_ENB |
2594 			    PM_CFG_ASPM_L1_ENB))!= 0) {
2595 				pmcfg &= ~(PM_CFG_ASPM_L0S_ENB |
2596 				    PM_CFG_ASPM_L1_ENB);
2597 				CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
2598 			}
2599 		}
2600 	}
2601 	reg = CSR_READ_4(sc, ALC_MASTER_CFG);
2602 	reg |= MASTER_OOB_DIS_OFF | MASTER_RESET;
2603 	CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
2604 
2605 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2606 		for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2607 			DELAY(10);
2608 			if (CSR_READ_4(sc, ALC_MBOX_RD0_PROD_IDX) == 0)
2609 				break;
2610 		}
2611 		if (i == 0)
2612 			printf("MAC reset timeout!\n");
2613 	}
2614 	for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2615 		DELAY(10);
2616 		if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0)
2617 			break;
2618 	}
2619 	if (i == 0)
2620 		printf("%s: master reset timeout!\n", sc->sc_dev.dv_xname);
2621 
2622 	for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2623 		reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
2624 		if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC |
2625 		    IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
2626 			break;
2627 		DELAY(10);
2628 	}
2629 
2630 	if (i == 0)
2631 		printf("%s: reset timeout(0x%08x)!\n", sc->sc_dev.dv_xname,
2632 		    reg);
2633 
2634 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2635 		if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
2636 		    (sc->alc_rev & 0x01) != 0) {
2637 			reg = CSR_READ_4(sc, ALC_MASTER_CFG);
2638 			reg |= MASTER_CLK_SEL_DIS;
2639 			CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
2640 			/* Restore L0s/L1s config. */
2641 			if ((pmcfg & (PM_CFG_ASPM_L0S_ENB |
2642 			    PM_CFG_ASPM_L1_ENB)) != 0)
2643 				CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
2644 		}
2645 		alc_osc_reset(sc);
2646 		reg = CSR_READ_4(sc, ALC_MISC3);
2647 		reg &= ~MISC3_25M_BY_SW;
2648 		reg |= MISC3_25M_NOTO_INTNL;
2649 		CSR_WRITE_4(sc, ALC_MISC3, reg);
2650 		reg = CSR_READ_4(sc, ALC_MISC);
2651 		reg &= ~MISC_INTNLOSC_OPEN;
2652 		if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1)
2653 			reg &= ~MISC_ISO_ENB;
2654 		CSR_WRITE_4(sc, ALC_MISC, reg);
2655 		DELAY(20);
2656 	}
2657 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 ||
2658 	    sc->sc_product ==  PCI_PRODUCT_ATTANSIC_L2C_1 ||
2659 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2)
2660 		CSR_WRITE_4(sc, ALC_SERDES_LOCK,
2661 		    CSR_READ_4(sc, ALC_SERDES_LOCK) |
2662 		    SERDES_MAC_CLK_SLOWDOWN | SERDES_PHY_CLK_SLOWDOWN);
2663 }
2664 
2665 int
2666 alc_init(struct ifnet *ifp)
2667 {
2668 	struct alc_softc *sc = ifp->if_softc;
2669 	uint8_t eaddr[ETHER_ADDR_LEN];
2670 	bus_addr_t paddr;
2671 	uint32_t reg, rxf_hi, rxf_lo;
2672 	int error;
2673 
2674 	/*
2675 	 * Cancel any pending I/O.
2676 	 */
2677 	alc_stop(sc);
2678 	/*
2679 	 * Reset the chip to a known state.
2680 	 */
2681 	alc_reset(sc);
2682 
2683 	/* Initialize Rx descriptors. */
2684 	error = alc_init_rx_ring(sc);
2685 	if (error != 0) {
2686 		printf("%s: no memory for Rx buffers.\n", sc->sc_dev.dv_xname);
2687 		alc_stop(sc);
2688 		return (error);
2689 	}
2690 	alc_init_rr_ring(sc);
2691 	alc_init_tx_ring(sc);
2692 	alc_init_cmb(sc);
2693 	alc_init_smb(sc);
2694 
2695 	/* Enable all clocks. */
2696 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2697 		CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, CLK_GATING_DMAW_ENB |
2698 		    CLK_GATING_DMAR_ENB | CLK_GATING_TXQ_ENB |
2699 		    CLK_GATING_RXQ_ENB | CLK_GATING_TXMAC_ENB |
2700 		    CLK_GATING_RXMAC_ENB);
2701 		if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0)
2702 			CSR_WRITE_4(sc, ALC_IDLE_DECISN_TIMER,
2703 			    IDLE_DECISN_TIMER_DEFAULT_1MS);
2704 	} else
2705 		CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, 0);
2706 
2707 	/* Reprogram the station address. */
2708 	bcopy(LLADDR(ifp->if_sadl), eaddr, ETHER_ADDR_LEN);
2709 	CSR_WRITE_4(sc, ALC_PAR0,
2710 	    eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2711 	CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]);
2712 	/*
2713 	 * Clear WOL status and disable all WOL feature as WOL
2714 	 * would interfere Rx operation under normal environments.
2715 	 */
2716 	CSR_READ_4(sc, ALC_WOL_CFG);
2717 	CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
2718 	/* Set Tx descriptor base addresses. */
2719 	paddr = sc->alc_rdata.alc_tx_ring_paddr;
2720 	CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2721 	CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2722 	/* We don't use high priority ring. */
2723 	CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0);
2724 	/* Set Tx descriptor counter. */
2725 	CSR_WRITE_4(sc, ALC_TD_RING_CNT,
2726 	    (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK);
2727 	/* Set Rx descriptor base addresses. */
2728 	paddr = sc->alc_rdata.alc_rx_ring_paddr;
2729 	CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2730 	CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2731 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
2732 		/* We use one Rx ring. */
2733 		CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0);
2734 		CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0);
2735 		CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0);
2736 	}
2737 	/* Set Rx descriptor counter. */
2738 	CSR_WRITE_4(sc, ALC_RD_RING_CNT,
2739 	    (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK);
2740 
2741 	/*
2742 	 * Let hardware split jumbo frames into alc_max_buf_sized chunks.
2743 	 * if it do not fit the buffer size. Rx return descriptor holds
2744 	 * a counter that indicates how many fragments were made by the
2745 	 * hardware. The buffer size should be multiple of 8 bytes.
2746 	 * Since hardware has limit on the size of buffer size, always
2747 	 * use the maximum value.
2748 	 * For strict-alignment architectures make sure to reduce buffer
2749 	 * size by 8 bytes to make room for alignment fixup.
2750 	 */
2751 	sc->alc_buf_size = RX_BUF_SIZE_MAX;
2752 	CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size);
2753 
2754 	paddr = sc->alc_rdata.alc_rr_ring_paddr;
2755 	/* Set Rx return descriptor base addresses. */
2756 	CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2757 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
2758 		/* We use one Rx return ring. */
2759 		CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0);
2760 		CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0);
2761 		CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0);
2762 	}
2763 	/* Set Rx return descriptor counter. */
2764 	CSR_WRITE_4(sc, ALC_RRD_RING_CNT,
2765 	    (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK);
2766 	paddr = sc->alc_rdata.alc_cmb_paddr;
2767 	CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2768 	paddr = sc->alc_rdata.alc_smb_paddr;
2769 	CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2770 	CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2771 
2772 	if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1) {
2773 		/* Reconfigure SRAM - Vendor magic. */
2774 		CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_LEN, 0x000002A0);
2775 		CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_LEN, 0x00000100);
2776 		CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_ADDR, 0x029F0000);
2777 		CSR_WRITE_4(sc, ALC_SRAM_RD0_ADDR, 0x02BF02A0);
2778 		CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_ADDR, 0x03BF02C0);
2779 		CSR_WRITE_4(sc, ALC_SRAM_TD_ADDR, 0x03DF03C0);
2780 		CSR_WRITE_4(sc, ALC_TXF_WATER_MARK, 0x00000000);
2781 		CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 0x00000000);
2782 	}
2783 
2784 	/* Tell hardware that we're ready to load DMA blocks. */
2785 	CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD);
2786 
2787 	/* Configure interrupt moderation timer. */
2788 	sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
2789 	sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
2790 	reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT;
2791 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0)
2792 	    reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT;
2793 	CSR_WRITE_4(sc, ALC_IM_TIMER, reg);
2794 	/*
2795 	 * We don't want to automatic interrupt clear as task queue
2796 	 * for the interrupt should know interrupt status.
2797 	 */
2798 	reg = CSR_READ_4(sc, ALC_MASTER_CFG);
2799 	reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB);
2800 	reg |= MASTER_SA_TIMER_ENB;
2801 	if (ALC_USECS(sc->alc_int_rx_mod) != 0)
2802 		reg |= MASTER_IM_RX_TIMER_ENB;
2803 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0 &&
2804 	    ALC_USECS(sc->alc_int_tx_mod) != 0)
2805 		reg |= MASTER_IM_TX_TIMER_ENB;
2806 	CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
2807 	/*
2808 	 * Disable interrupt re-trigger timer. We don't want automatic
2809 	 * re-triggering of un-ACKed interrupts.
2810 	 */
2811 	CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0));
2812 	/* Configure CMB. */
2813 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2814 		CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, ALC_TX_RING_CNT / 3);
2815 		CSR_WRITE_4(sc, ALC_CMB_TX_TIMER,
2816 		    ALC_USECS(sc->alc_int_tx_mod));
2817 	} else {
2818 		if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
2819 			CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4);
2820 			CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000));
2821 		} else
2822 			CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0));
2823 	}
2824 	/*
2825 	 * Hardware can be configured to issue SMB interrupt based
2826 	 * on programmed interval. Since there is a callout that is
2827 	 * invoked for every hz in driver we use that instead of
2828 	 * relying on periodic SMB interrupt.
2829 	 */
2830 	CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0));
2831 	/* Clear MAC statistics. */
2832 	alc_stats_clear(sc);
2833 
2834 	/*
2835 	 * Always use maximum frame size that controller can support.
2836 	 * Otherwise received frames that has larger frame length
2837 	 * than alc(4) MTU would be silently dropped in hardware. This
2838 	 * would make path-MTU discovery hard as sender wouldn't get
2839 	 * any responses from receiver. alc(4) supports
2840 	 * multi-fragmented frames on Rx path so it has no issue on
2841 	 * assembling fragmented frames. Using maximum frame size also
2842 	 * removes the need to reinitialize hardware when interface
2843 	 * MTU configuration was changed.
2844 	 *
2845 	 * Be conservative in what you do, be liberal in what you
2846 	 * accept from others - RFC 793.
2847 	 */
2848 	CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_max_framelen);
2849 
2850 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
2851 		/* Disable header split(?) */
2852 		CSR_WRITE_4(sc, ALC_HDS_CFG, 0);
2853 		/* Configure IPG/IFG parameters. */
2854 		CSR_WRITE_4(sc, ALC_IPG_IFG_CFG,
2855 		    ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) &
2856 		    IPG_IFG_IPGT_MASK) |
2857 		    ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) &
2858 		    IPG_IFG_MIFG_MASK) |
2859 		    ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) &
2860 		    IPG_IFG_IPG1_MASK) |
2861 		    ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) &
2862 		    IPG_IFG_IPG2_MASK));
2863 		/* Set parameters for half-duplex media. */
2864 		CSR_WRITE_4(sc, ALC_HDPX_CFG,
2865 		    ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
2866 		    HDPX_CFG_LCOL_MASK) |
2867 		    ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
2868 		    HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
2869 		    ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
2870 		    HDPX_CFG_ABEBT_MASK) |
2871 		    ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
2872 		    HDPX_CFG_JAMIPG_MASK));
2873 	}
2874 
2875 	/*
2876 	 * Set TSO/checksum offload threshold. For frames that is
2877 	 * larger than this threshold, hardware wouldn't do
2878 	 * TSO/checksum offloading.
2879 	 */
2880 	reg = (sc->alc_max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) &
2881 	    TSO_OFFLOAD_THRESH_MASK;
2882 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
2883 		reg |= TSO_OFFLOAD_ERRLGPKT_DROP_ENB;
2884 	CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH, reg);
2885 	/* Configure TxQ. */
2886 	reg = (alc_dma_burst[sc->alc_dma_rd_burst] <<
2887 	    TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK;
2888 	if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_1 ||
2889 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2)
2890 		reg >>= 1;
2891 	reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) &
2892 	    TXQ_CFG_TD_BURST_MASK;
2893 	reg |= TXQ_CFG_IP_OPTION_ENB | TXQ_CFG_8023_ENB;
2894 	CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE);
2895 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2896 		reg = (TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q1_BURST_SHIFT |
2897 		    TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q2_BURST_SHIFT |
2898 		    TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q3_BURST_SHIFT |
2899 		    HQTD_CFG_BURST_ENB);
2900 		CSR_WRITE_4(sc, ALC_HQTD_CFG, reg);
2901 		reg = WRR_PRI_RESTRICT_NONE;
2902 		reg |= (WRR_PRI_DEFAULT << WRR_PRI0_SHIFT |
2903 		    WRR_PRI_DEFAULT << WRR_PRI1_SHIFT |
2904 		    WRR_PRI_DEFAULT << WRR_PRI2_SHIFT |
2905 		    WRR_PRI_DEFAULT << WRR_PRI3_SHIFT);
2906 		CSR_WRITE_4(sc, ALC_WRR, reg);
2907 	} else {
2908 		/* Configure Rx free descriptor pre-fetching. */
2909 		CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH,
2910 		    ((RX_RD_FREE_THRESH_HI_DEFAULT <<
2911 		    RX_RD_FREE_THRESH_HI_SHIFT) & RX_RD_FREE_THRESH_HI_MASK) |
2912 		    ((RX_RD_FREE_THRESH_LO_DEFAULT <<
2913 		    RX_RD_FREE_THRESH_LO_SHIFT) & RX_RD_FREE_THRESH_LO_MASK));
2914 	}
2915 
2916 	/*
2917 	 * Configure flow control parameters.
2918 	 * XON  : 80% of Rx FIFO
2919 	 * XOFF : 30% of Rx FIFO
2920 	 */
2921 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2922 		reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
2923 		reg &= SRAM_RX_FIFO_LEN_MASK;
2924 		reg *= 8;
2925 		if (reg > 8 * 1024)
2926 			reg -= RX_FIFO_PAUSE_816X_RSVD;
2927 		else
2928 		    reg -= RX_BUF_SIZE_MAX;
2929 		reg /= 8;
2930 		CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
2931 		    ((reg << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
2932 		    RX_FIFO_PAUSE_THRESH_LO_MASK) |
2933 		    (((RX_FIFO_PAUSE_816X_RSVD / 8) <<
2934 		    RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
2935 		    RX_FIFO_PAUSE_THRESH_HI_MASK));
2936 	} else if (sc->sc_product == PCI_PRODUCT_ATTANSIC_L1C||
2937 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C) {
2938 		reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
2939 		rxf_hi = (reg * 8) / 10;
2940 		rxf_lo = (reg * 3) / 10;
2941 		CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
2942 		    ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
2943 		    RX_FIFO_PAUSE_THRESH_LO_MASK) |
2944 		    ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
2945 		    RX_FIFO_PAUSE_THRESH_HI_MASK));
2946 	}
2947 
2948 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
2949 		/* Disable RSS until I understand L1C/L2C's RSS logic. */
2950 		CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0);
2951 		CSR_WRITE_4(sc, ALC_RSS_CPU, 0);
2952 	}
2953 
2954 	/* Configure RxQ. */
2955 	reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
2956 	    RXQ_CFG_RD_BURST_MASK;
2957 	reg |= RXQ_CFG_RSS_MODE_DIS;
2958 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2959 		reg |= (RXQ_CFG_816X_IDT_TBL_SIZE_DEFAULT <<
2960 		    RXQ_CFG_816X_IDT_TBL_SIZE_SHIFT) &
2961 		    RXQ_CFG_816X_IDT_TBL_SIZE_MASK;
2962 		if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
2963 			reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M;
2964 	} else {
2965 		if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0 &&
2966 		    sc->sc_product != PCI_PRODUCT_ATTANSIC_L1D_1)
2967 			reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M;
2968 	}
2969 	CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
2970 
2971 	/* Configure DMA parameters. */
2972 	reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI;
2973 	reg |= sc->alc_rcb;
2974 	if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2975 		reg |= DMA_CFG_CMB_ENB;
2976 	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0)
2977 		reg |= DMA_CFG_SMB_ENB;
2978 	else
2979 		reg |= DMA_CFG_SMB_DIS;
2980 	reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) <<
2981 	    DMA_CFG_RD_BURST_SHIFT;
2982 	reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) <<
2983 	    DMA_CFG_WR_BURST_SHIFT;
2984 	reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) &
2985 	    DMA_CFG_RD_DELAY_CNT_MASK;
2986 	reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
2987 	    DMA_CFG_WR_DELAY_CNT_MASK;
2988 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2989 		switch (AR816X_REV(sc->alc_rev)) {
2990 		case AR816X_REV_A0:
2991 		case AR816X_REV_A1:
2992 			reg |= DMA_CFG_RD_CHNL_SEL_2;
2993 			break;
2994 		case AR816X_REV_B0:
2995 			/* FALLTHROUGH */
2996 		default:
2997 			reg |= DMA_CFG_RD_CHNL_SEL_4;
2998 			break;
2999 		}
3000 	}
3001 	CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
3002 
3003 	/*
3004 	 * Configure Tx/Rx MACs.
3005 	 *  - Auto-padding for short frames.
3006 	 *  - Enable CRC generation.
3007 	 *  Actual reconfiguration of MAC for resolved speed/duplex
3008 	 *  is followed after detection of link establishment.
3009 	 *  AR813x/AR815x always does checksum computation regardless
3010 	 *  of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to
3011 	 *  have bug in protocol field in Rx return structure so
3012 	 *  these controllers can't handle fragmented frames. Disable
3013 	 *  Rx checksum offloading until there is a newer controller
3014 	 *  that has sane implementation.
3015 	 */
3016 	reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
3017 	    ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
3018 	    MAC_CFG_PREAMBLE_MASK);
3019 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 ||
3020 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D ||
3021 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L1D_1 ||
3022 	    sc->sc_product == PCI_PRODUCT_ATTANSIC_L2C_2)
3023 		reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
3024 	if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0)
3025 		reg |= MAC_CFG_SPEED_10_100;
3026 	else
3027 		reg |= MAC_CFG_SPEED_1000;
3028 	CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3029 
3030 	/* Set up the receive filter. */
3031 	alc_iff(sc);
3032 
3033 	alc_rxvlan(sc);
3034 
3035 	/* Acknowledge all pending interrupts and clear it. */
3036 	CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS);
3037 	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3038 	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0);
3039 
3040 	ifp->if_flags |= IFF_RUNNING;
3041 	ifq_clr_oactive(&ifp->if_snd);
3042 
3043 	sc->alc_flags &= ~ALC_FLAG_LINK;
3044 	/* Switch to the current media. */
3045 	alc_mediachange(ifp);
3046 
3047 	timeout_add_sec(&sc->alc_tick_ch, 1);
3048 
3049 	return (0);
3050 }
3051 
3052 void
3053 alc_stop(struct alc_softc *sc)
3054 {
3055 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
3056 	struct alc_txdesc *txd;
3057 	struct alc_rxdesc *rxd;
3058 	uint32_t reg;
3059 	int i;
3060 
3061 	/*
3062 	 * Mark the interface down and cancel the watchdog timer.
3063 	 */
3064 	ifp->if_flags &= ~IFF_RUNNING;
3065 	ifq_clr_oactive(&ifp->if_snd);
3066 	ifp->if_timer = 0;
3067 
3068 	timeout_del(&sc->alc_tick_ch);
3069 	sc->alc_flags &= ~ALC_FLAG_LINK;
3070 
3071 	alc_stats_update(sc);
3072 
3073 	/* Disable interrupts. */
3074 	CSR_WRITE_4(sc, ALC_INTR_MASK, 0);
3075 	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3076 
3077 	/* Disable DMA. */
3078 	reg = CSR_READ_4(sc, ALC_DMA_CFG);
3079 	reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB);
3080 	reg |= DMA_CFG_SMB_DIS;
3081 	CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
3082 	DELAY(1000);
3083 
3084 	/* Stop Rx/Tx MACs. */
3085 	alc_stop_mac(sc);
3086 
3087 	/* Disable interrupts which might be touched in taskq handler. */
3088 	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3089 
3090 	/* Disable L0s/L1s */
3091 	reg = CSR_READ_4(sc, ALC_PM_CFG);
3092 	if ((reg & (PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB))!= 0) {
3093 		reg &= ~(PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB);
3094 		CSR_WRITE_4(sc, ALC_PM_CFG, reg);
3095 	}
3096 
3097 	/* Reclaim Rx buffers that have been processed. */
3098 	m_freem(sc->alc_cdata.alc_rxhead);
3099 	ALC_RXCHAIN_RESET(sc);
3100 	/*
3101 	 * Free Tx/Rx mbufs still in the queues.
3102 	 */
3103 	for (i = 0; i < ALC_RX_RING_CNT; i++) {
3104 		rxd = &sc->alc_cdata.alc_rxdesc[i];
3105 		if (rxd->rx_m != NULL) {
3106 			bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
3107 			    rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3108 			bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
3109 			m_freem(rxd->rx_m);
3110 			rxd->rx_m = NULL;
3111 		}
3112 	}
3113 	for (i = 0; i < ALC_TX_RING_CNT; i++) {
3114 		txd = &sc->alc_cdata.alc_txdesc[i];
3115 		if (txd->tx_m != NULL) {
3116 			bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
3117 			    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3118 			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
3119 			m_freem(txd->tx_m);
3120 			txd->tx_m = NULL;
3121 		}
3122 	}
3123 }
3124 
3125 void
3126 alc_stop_mac(struct alc_softc *sc)
3127 {
3128 	uint32_t reg;
3129 	int i;
3130 
3131 	alc_stop_queue(sc);
3132 	/* Disable Rx/Tx MAC. */
3133 	reg = CSR_READ_4(sc, ALC_MAC_CFG);
3134 	if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
3135 		reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
3136 		CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3137 	}
3138 	for (i = ALC_TIMEOUT; i > 0; i--) {
3139 		reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
3140 		if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC)) == 0)
3141 			break;
3142 		DELAY(10);
3143 	}
3144 	if (i == 0)
3145 		printf("%s: could not disable Rx/Tx MAC(0x%08x)!\n",
3146 		    sc->sc_dev.dv_xname, reg);
3147 }
3148 
3149 void
3150 alc_start_queue(struct alc_softc *sc)
3151 {
3152 	uint32_t qcfg[] = {
3153 		0,
3154 		RXQ_CFG_QUEUE0_ENB,
3155 		RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB,
3156 		RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB,
3157 		RXQ_CFG_ENB
3158 	};
3159 	uint32_t cfg;
3160 
3161 	/* Enable RxQ. */
3162 	cfg = CSR_READ_4(sc, ALC_RXQ_CFG);
3163 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
3164 		cfg &= ~RXQ_CFG_ENB;
3165 		cfg |= qcfg[1];
3166 	} else
3167 		cfg |= RXQ_CFG_QUEUE0_ENB;
3168 
3169 	CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg);
3170 	/* Enable TxQ. */
3171 	cfg = CSR_READ_4(sc, ALC_TXQ_CFG);
3172 	cfg |= TXQ_CFG_ENB;
3173 	CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg);
3174 }
3175 
3176 void
3177 alc_stop_queue(struct alc_softc *sc)
3178 {
3179 	uint32_t reg;
3180 	int i;
3181 
3182 	/* Disable RxQ. */
3183 	reg = CSR_READ_4(sc, ALC_RXQ_CFG);
3184 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
3185 		if ((reg & RXQ_CFG_ENB) != 0) {
3186 			reg &= ~RXQ_CFG_ENB;
3187 			CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
3188 		}
3189 	} else {
3190 		if ((reg & RXQ_CFG_QUEUE0_ENB) != 0) {
3191 			reg &= ~RXQ_CFG_QUEUE0_ENB;
3192 			CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
3193 		}
3194 	}
3195 	/* Disable TxQ. */
3196 	reg = CSR_READ_4(sc, ALC_TXQ_CFG);
3197 	if ((reg & TXQ_CFG_ENB) != 0) {
3198 		reg &= ~TXQ_CFG_ENB;
3199 		CSR_WRITE_4(sc, ALC_TXQ_CFG, reg);
3200 	}
3201 	DELAY(40);
3202 	for (i = ALC_TIMEOUT; i > 0; i--) {
3203 		reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
3204 		if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
3205 			break;
3206 		DELAY(10);
3207 	}
3208 	if (i == 0)
3209 		printf("%s: could not disable RxQ/TxQ (0x%08x)!\n",
3210 		    sc->sc_dev.dv_xname, reg);
3211 }
3212 
3213 void
3214 alc_init_tx_ring(struct alc_softc *sc)
3215 {
3216 	struct alc_ring_data *rd;
3217 	struct alc_txdesc *txd;
3218 	int i;
3219 
3220 	sc->alc_cdata.alc_tx_prod = 0;
3221 	sc->alc_cdata.alc_tx_cons = 0;
3222 	sc->alc_cdata.alc_tx_cnt = 0;
3223 
3224 	rd = &sc->alc_rdata;
3225 	bzero(rd->alc_tx_ring, ALC_TX_RING_SZ);
3226 	for (i = 0; i < ALC_TX_RING_CNT; i++) {
3227 		txd = &sc->alc_cdata.alc_txdesc[i];
3228 		txd->tx_m = NULL;
3229 	}
3230 
3231 	bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
3232 	    sc->alc_cdata.alc_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
3233 }
3234 
3235 int
3236 alc_init_rx_ring(struct alc_softc *sc)
3237 {
3238 	struct alc_ring_data *rd;
3239 	struct alc_rxdesc *rxd;
3240 	int i;
3241 
3242 	sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1;
3243 	rd = &sc->alc_rdata;
3244 	bzero(rd->alc_rx_ring, ALC_RX_RING_SZ);
3245 	for (i = 0; i < ALC_RX_RING_CNT; i++) {
3246 		rxd = &sc->alc_cdata.alc_rxdesc[i];
3247 		rxd->rx_m = NULL;
3248 		rxd->rx_desc = &rd->alc_rx_ring[i];
3249 		if (alc_newbuf(sc, rxd) != 0)
3250 			return (ENOBUFS);
3251 	}
3252 
3253 	/*
3254 	 * Since controller does not update Rx descriptors, driver
3255 	 * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE
3256 	 * is enough to ensure coherence.
3257 	 */
3258 	bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
3259 	    sc->alc_cdata.alc_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
3260 	/* Let controller know availability of new Rx buffers. */
3261 	CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons);
3262 
3263 	return (0);
3264 }
3265 
3266 void
3267 alc_init_rr_ring(struct alc_softc *sc)
3268 {
3269 	struct alc_ring_data *rd;
3270 
3271 	sc->alc_cdata.alc_rr_cons = 0;
3272 	ALC_RXCHAIN_RESET(sc);
3273 
3274 	rd = &sc->alc_rdata;
3275 	bzero(rd->alc_rr_ring, ALC_RR_RING_SZ);
3276 	bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
3277 	    sc->alc_cdata.alc_rr_ring_map->dm_mapsize,
3278 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3279 }
3280 
3281 void
3282 alc_init_cmb(struct alc_softc *sc)
3283 {
3284 	struct alc_ring_data *rd;
3285 
3286 	rd = &sc->alc_rdata;
3287 	bzero(rd->alc_cmb, ALC_CMB_SZ);
3288 	bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
3289 	    sc->alc_cdata.alc_cmb_map->dm_mapsize,
3290 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3291 }
3292 
3293 void
3294 alc_init_smb(struct alc_softc *sc)
3295 {
3296 	struct alc_ring_data *rd;
3297 
3298 	rd = &sc->alc_rdata;
3299 	bzero(rd->alc_smb, ALC_SMB_SZ);
3300 	bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
3301 	    sc->alc_cdata.alc_smb_map->dm_mapsize,
3302 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3303 }
3304 
3305 void
3306 alc_rxvlan(struct alc_softc *sc)
3307 {
3308 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
3309 	uint32_t reg;
3310 
3311 	reg = CSR_READ_4(sc, ALC_MAC_CFG);
3312 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
3313 		reg |= MAC_CFG_VLAN_TAG_STRIP;
3314 	else
3315 		reg &= ~MAC_CFG_VLAN_TAG_STRIP;
3316 	CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3317 }
3318 
3319 void
3320 alc_iff(struct alc_softc *sc)
3321 {
3322 	struct arpcom *ac = &sc->sc_arpcom;
3323 	struct ifnet *ifp = &ac->ac_if;
3324 	struct ether_multi *enm;
3325 	struct ether_multistep step;
3326 	uint32_t crc;
3327 	uint32_t mchash[2];
3328 	uint32_t rxcfg;
3329 
3330 	rxcfg = CSR_READ_4(sc, ALC_MAC_CFG);
3331 	rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
3332 	ifp->if_flags &= ~IFF_ALLMULTI;
3333 
3334 	/*
3335 	 * Always accept broadcast frames.
3336 	 */
3337 	rxcfg |= MAC_CFG_BCAST;
3338 
3339 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
3340 		ifp->if_flags |= IFF_ALLMULTI;
3341 		if (ifp->if_flags & IFF_PROMISC)
3342 			rxcfg |= MAC_CFG_PROMISC;
3343 		else
3344 			rxcfg |= MAC_CFG_ALLMULTI;
3345 		mchash[0] = mchash[1] = 0xFFFFFFFF;
3346 	} else {
3347 		/* Program new filter. */
3348 		bzero(mchash, sizeof(mchash));
3349 
3350 		ETHER_FIRST_MULTI(step, ac, enm);
3351 		while (enm != NULL) {
3352 			crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
3353 
3354 			mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
3355 
3356 			ETHER_NEXT_MULTI(step, enm);
3357 		}
3358 	}
3359 
3360 	CSR_WRITE_4(sc, ALC_MAR0, mchash[0]);
3361 	CSR_WRITE_4(sc, ALC_MAR1, mchash[1]);
3362 	CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg);
3363 }
3364