xref: /dflybsd-src/sys/dev/netif/alc/if_alc.c (revision 98dffbe0d47d06146981b227e06bb90e81519860)
1 /*-
2  * Copyright (c) 2009, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/alc/if_alc.c,v 1.6 2009/09/29 23:03:16 yongari Exp $
28  */
29 
30 /* Driver for Atheros AR8131/AR8132 PCIe Ethernet. */
31 
32 #include <sys/param.h>
33 #include <sys/bitops.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/bus.h>
37 #include <sys/interrupt.h>
38 #include <sys/malloc.h>
39 #include <sys/proc.h>
40 #include <sys/rman.h>
41 #include <sys/serialize.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
45 #include <sys/in_cksum.h>
46 
47 #include <net/ethernet.h>
48 #include <net/if.h>
49 #include <net/bpf.h>
50 #include <net/if_arp.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/ifq_var.h>
54 #include <net/vlan/if_vlan_var.h>
55 #include <net/vlan/if_vlan_ether.h>
56 
57 #include <netinet/ip.h>
58 #include <netinet/tcp.h>
59 
60 #include <dev/netif/mii_layer/mii.h>
61 #include <dev/netif/mii_layer/miivar.h>
62 
63 #include <bus/pci/pcireg.h>
64 #include <bus/pci/pcivar.h>
65 #include "pcidevs.h"
66 
67 #include <dev/netif/alc/if_alcreg.h>
68 #include <dev/netif/alc/if_alcvar.h>
69 
70 /* "device miibus" required.  See GENERIC if you get errors here. */
71 #include "miibus_if.h"
72 
73 #undef ALC_USE_CUSTOM_CSUM
74 #ifdef ALC_USE_CUSTOM_CSUM
75 #define	ALC_CSUM_FEATURES	(CSUM_TCP | CSUM_UDP)
76 #else
77 #define	ALC_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
78 #endif
79 
80 #define ALC_LOCK(sc)
81 #define ALC_UNLOCK(sc)
82 #define	ALC_LOCK_ASSERT(sc)
83 
84 #define PCIER_LINK_CAP			PCIER_LINKCAP
85 #define PCIEM_LINK_CAP_ASPM		PCIEM_LNKCAP_ASPM_MASK
86 #define PCIER_LINK_CTL			PCIER_LINKCTRL
87 #define PCIEM_LINK_CTL_RCB		PCIEM_LNKCTL_RCB
88 #define PCIEM_LINK_CTL_ASPMC		PCIEM_LNKCTL_ASPM_MASK
89 #define PCIEM_LINK_CTL_ASPMC_L0S	PCIEM_LNKCTL_ASPM_L0S
90 #define PCIEM_LINK_CTL_ASPMC_L1		PCIEM_LNKCTL_ASPM_L1
91 #define PCIEM_LINK_CTL_EXTENDED_SYNC	PCIEM_LNKCTL_EXTENDED_SYNC
92 #define PCIER_DEVICE_CTL		PCIER_DEVCTRL
93 #define PCIEM_CTL_MAX_READ_REQUEST	PCIEM_DEVCTL_MAX_READRQ_MASK
94 #define PCIEM_CTL_MAX_PAYLOAD		PCIEM_DEVCTL_MAX_PAYLOAD_MASK
95 
96 /* Tunables. */
97 static int alc_msi_enable = 1;
98 TUNABLE_INT("hw.alc.msi.enable", &alc_msi_enable);
99 
100 /*
101  * Devices supported by this driver.
102  */
103 
104 static struct alc_ident alc_ident_table[] = {
105 	{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8131, 9 * 1024,
106 		"Atheros AR8131 PCIe Gigabit Ethernet" },
107 	{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8132, 9 * 1024,
108 		"Atheros AR8132 PCIe Fast Ethernet" },
109 	{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8151, 6 * 1024,
110 		"Atheros AR8151 v1.0 PCIe Gigabit Ethernet" },
111 	{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8151_V2, 6 * 1024,
112 		"Atheros AR8151 v2.0 PCIe Gigabit Ethernet" },
113 	{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8152_B, 6 * 1024,
114 		"Atheros AR8152 v1.1 PCIe Fast Ethernet" },
115 	{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8152_B2, 6 * 1024,
116 		"Atheros AR8152 v2.0 PCIe Fast Ethernet" },
117 	{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8161, 9 * 1024,
118 		"Atheros AR8161 PCIe Gigabit Ethernet" },
119 	{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8162, 9 * 1024,
120 		"Atheros AR8162 PCIe Fast Ethernet" },
121 	{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8171, 9 * 1024,
122 		"Atheros AR8171 PCIe Gigabit Ethernet" },
123 	{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8172, 9 * 1024,
124 		"Atheros AR8172 PCIe Fast Ethernet" },
125 	{ VENDORID_ATHEROS, DEVICEID_ATHEROS_E2200, 9 * 1024,
126 		"Killer E2200 Gigabit Ethernet" },
127 	{ VENDORID_ATHEROS, DEVICEID_ATHEROS_E2400, 9 * 1024,
128 		"Killer E2400 Gigabit Ethernet" },
129 	{ 0, 0, 0, NULL}
130 };
131 
132 static int	alc_attach(device_t);
133 static int	alc_probe(device_t);
134 static int	alc_detach(device_t);
135 static int	alc_shutdown(device_t);
136 static int	alc_suspend(device_t);
137 static int	alc_resume(device_t);
138 static int	alc_miibus_readreg(device_t, int, int);
139 static void	alc_miibus_statchg(device_t);
140 static int	alc_miibus_writereg(device_t, int, int, int);
141 static uint32_t	alc_miidbg_readreg(struct alc_softc *, int);
142 static uint32_t	alc_miidbg_writereg(struct alc_softc *, int, int);
143 static uint32_t	alc_miiext_readreg(struct alc_softc *, int, int);
144 static uint32_t	alc_miiext_writereg(struct alc_softc *, int, int, int);
145 static void	alc_init(void *);
146 static void	alc_start(struct ifnet *, struct ifaltq_subque *);
147 static void	alc_watchdog(struct alc_softc *);
148 static int	alc_mediachange(struct ifnet *);
149 static int	alc_mediachange_locked(struct alc_softc *);
150 static void	alc_mediastatus(struct ifnet *, struct ifmediareq *);
151 static int	alc_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
152 
153 static void	alc_aspm(struct alc_softc *, int, int);
154 static void	alc_aspm_813x(struct alc_softc *, int);
155 static void	alc_aspm_816x(struct alc_softc *, int);
156 #ifdef foo
157 static int	alc_check_boundary(struct alc_softc *);
158 #endif
159 static void	alc_config_msi(struct alc_softc *);
160 static void	alc_disable_l0s_l1(struct alc_softc *);
161 static int	alc_dma_alloc(struct alc_softc *);
162 static void	alc_dma_free(struct alc_softc *);
163 static void	alc_dmamap_cb(void *, bus_dma_segment_t *, int, int);
164 static void	alc_dsp_fixup(struct alc_softc *, int);
165 static int	alc_encap(struct alc_softc *, struct mbuf **);
166 static struct alc_ident *alc_find_ident(device_t);
167 static void	alc_get_macaddr(struct alc_softc *);
168 static void	alc_get_macaddr_813x(struct alc_softc *);
169 static void	alc_get_macaddr_816x(struct alc_softc *);
170 static void	alc_get_macaddr_par(struct alc_softc *);
171 static void	alc_init_cmb(struct alc_softc *);
172 static void	alc_init_rr_ring(struct alc_softc *);
173 static int	alc_init_rx_ring(struct alc_softc *);
174 static void	alc_init_smb(struct alc_softc *);
175 static void	alc_init_tx_ring(struct alc_softc *);
176 static void	alc_intr(void *);
177 static void	alc_mac_config(struct alc_softc *);
178 static uint32_t	alc_mii_readreg_813x(struct alc_softc *, int, int);
179 static uint32_t	alc_mii_readreg_816x(struct alc_softc *, int, int);
180 static uint32_t	alc_mii_writereg_813x(struct alc_softc *, int, int, int);
181 static uint32_t alc_mii_writereg_816x(struct alc_softc *, int, int, int);
182 static int	alc_newbuf(struct alc_softc *, struct alc_rxdesc *, boolean_t);
183 static void	alc_osc_reset(struct alc_softc *);
184 static void	alc_phy_down(struct alc_softc *);
185 static void	alc_phy_reset(struct alc_softc *);
186 static void	alc_phy_reset_813x(struct alc_softc *);
187 static void	alc_phy_reset_816x(struct alc_softc *);
188 static void	alc_reset(struct alc_softc *);
189 static void	alc_rxeof(struct alc_softc *, struct rx_rdesc *);
190 static int	alc_rxintr(struct alc_softc *);
191 static void	alc_rxfilter(struct alc_softc *);
192 static void	alc_rxvlan(struct alc_softc *);
193 #if 0
194 static void	alc_setlinkspeed(struct alc_softc *);
195 /* XXX: WOL */
196 static void	alc_setwol(struct alc_softc *);
197 static void	alc_setwol_813x(struct alc_softc *);
198 static void	alc_setwol_816x(struct alc_softc *);
199 #endif
200 static void	alc_start_queue(struct alc_softc *);
201 static void	alc_stats_clear(struct alc_softc *);
202 static void	alc_stats_update(struct alc_softc *);
203 static void	alc_stop(struct alc_softc *);
204 static void	alc_stop_mac(struct alc_softc *);
205 static void	alc_stop_queue(struct alc_softc *);
206 static void	alc_sysctl_node(struct alc_softc *);
207 static void	alc_tick(void *);
208 static void	alc_txeof(struct alc_softc *);
209 static int	sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS);
210 static int	sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS);
211 
212 static device_method_t alc_methods[] = {
213 	/* Device interface. */
214 	DEVMETHOD(device_probe,		alc_probe),
215 	DEVMETHOD(device_attach,	alc_attach),
216 	DEVMETHOD(device_detach,	alc_detach),
217 	DEVMETHOD(device_shutdown,	alc_shutdown),
218 	DEVMETHOD(device_suspend,	alc_suspend),
219 	DEVMETHOD(device_resume,	alc_resume),
220 
221 	/* MII interface. */
222 	DEVMETHOD(miibus_readreg,	alc_miibus_readreg),
223 	DEVMETHOD(miibus_writereg,	alc_miibus_writereg),
224 	DEVMETHOD(miibus_statchg,	alc_miibus_statchg),
225 
226 	{ NULL, NULL }
227 };
228 
229 static DEFINE_CLASS_0(alc, alc_driver, alc_methods, sizeof(struct alc_softc));
230 static devclass_t alc_devclass;
231 
232 DECLARE_DUMMY_MODULE(if_alc);
233 DRIVER_MODULE(if_alc, pci, alc_driver, alc_devclass, NULL, NULL);
234 DRIVER_MODULE(miibus, alc, miibus_driver, miibus_devclass, NULL, NULL);
235 
236 static const uint32_t alc_dma_burst[] =
237     { 128, 256, 512, 1024, 2048, 4096, 0, 0 };
238 
239 static int
240 alc_miibus_readreg(device_t dev, int phy, int reg)
241 {
242 	struct alc_softc *sc;
243 	int v;
244 
245 	sc = device_get_softc(dev);
246 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
247 		v = alc_mii_readreg_816x(sc, phy, reg);
248 	else
249 		v = alc_mii_readreg_813x(sc, phy, reg);
250 	return (v);
251 }
252 
253 static uint32_t
254 alc_mii_readreg_813x(struct alc_softc *sc, int phy, int reg)
255 {
256 	uint32_t v;
257 	int i;
258 
259 	/*
260 	 * For AR8132 fast ethernet controller, do not report 1000baseT
261 	 * capability to mii(4). Even though AR8132 uses the same
262 	 * model/revision number of F1 gigabit PHY, the PHY has no
263 	 * ability to establish 1000baseT link.
264 	 */
265 	if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0 &&
266 	    reg == MII_EXTSR)
267 		return (0);
268 
269 	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
270 	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
271 	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
272 		DELAY(5);
273 		v = CSR_READ_4(sc, ALC_MDIO);
274 		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
275 			break;
276 	}
277 
278 	if (i == 0) {
279 		device_printf(sc->alc_dev, "phy read timeout : %d\n", reg);
280 		return (0);
281 	}
282 
283 	return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
284 }
285 
286 static uint32_t
287 alc_mii_readreg_816x(struct alc_softc *sc, int phy, int reg)
288 {
289 	uint32_t clk, v;
290 	int i;
291 
292 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
293 		clk = MDIO_CLK_25_128;
294 	else
295 		clk = MDIO_CLK_25_4;
296 	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
297 	    MDIO_SUP_PREAMBLE | clk | MDIO_REG_ADDR(reg));
298 	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
299 		DELAY(5);
300 		v = CSR_READ_4(sc, ALC_MDIO);
301 		if ((v & MDIO_OP_BUSY) == 0)
302 			break;
303 	}
304 
305 	if (i == 0) {
306 		device_printf(sc->alc_dev, "phy read timeout : %d\n", reg);
307 		return (0);
308 	}
309 
310 	return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
311 }
312 
313 static int
314 alc_miibus_writereg(device_t dev, int phy, int reg, int val)
315 {
316 	struct alc_softc *sc;
317 	int v;
318 
319 	sc = device_get_softc(dev);
320 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
321 		v = alc_mii_writereg_816x(sc, phy, reg, val);
322 	else
323 		v = alc_mii_writereg_813x(sc, phy, reg, val);
324 	return (v);
325 }
326 
327 static uint32_t
328 alc_mii_writereg_813x(struct alc_softc *sc, int phy, int reg, int val)
329 {
330 	uint32_t v;
331 	int i;
332 
333 	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
334 	    (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
335 	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
336 	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
337 		DELAY(5);
338 		v = CSR_READ_4(sc, ALC_MDIO);
339 		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
340 			break;
341 	}
342 
343 	if (i == 0)
344 		device_printf(sc->alc_dev, "phy write timeout : %d\n", reg);
345 
346 	return (0);
347 }
348 
349 static uint32_t
350 alc_mii_writereg_816x(struct alc_softc *sc, int phy, int reg, int val)
351 {
352 	uint32_t clk, v;
353 	int i;
354 
355 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
356 		clk = MDIO_CLK_25_128;
357 	else
358 		clk = MDIO_CLK_25_4;
359 	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
360 	    ((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) | MDIO_REG_ADDR(reg) |
361 	    MDIO_SUP_PREAMBLE | clk);
362 	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
363 		DELAY(5);
364 		v = CSR_READ_4(sc, ALC_MDIO);
365 		if ((v & MDIO_OP_BUSY) == 0)
366 			break;
367 	}
368 
369 	if (i == 0)
370 		device_printf(sc->alc_dev, "phy write timeout : %d\n", reg);
371 
372 	return (0);
373 }
374 
375 static void
376 alc_miibus_statchg(device_t dev)
377 {
378 	struct alc_softc *sc;
379 	struct mii_data *mii;
380 	struct ifnet *ifp;
381 	uint32_t reg;
382 
383 	sc = device_get_softc(dev);
384 
385 	mii = device_get_softc(sc->alc_miibus);
386 	ifp = sc->alc_ifp;
387 	if (mii == NULL || ifp == NULL ||
388 	    (ifp->if_flags & IFF_RUNNING) == 0)
389 		return;
390 
391 	sc->alc_flags &= ~ALC_FLAG_LINK;
392 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
393 	    (IFM_ACTIVE | IFM_AVALID)) {
394 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
395 		case IFM_10_T:
396 		case IFM_100_TX:
397 			sc->alc_flags |= ALC_FLAG_LINK;
398 			break;
399 		case IFM_1000_T:
400 			if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
401 				sc->alc_flags |= ALC_FLAG_LINK;
402 			break;
403 		default:
404 			break;
405 		}
406 	}
407 	/* Stop Rx/Tx MACs. */
408 	alc_stop_mac(sc);
409 
410 	/* Program MACs with resolved speed/duplex/flow-control. */
411 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
412 		alc_start_queue(sc);
413 		alc_mac_config(sc);
414 		/* Re-enable Tx/Rx MACs. */
415 		reg = CSR_READ_4(sc, ALC_MAC_CFG);
416 		reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
417 		CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
418 	}
419 	alc_aspm(sc, 0, IFM_SUBTYPE(mii->mii_media_active));
420 	alc_dsp_fixup(sc, IFM_SUBTYPE(mii->mii_media_active));
421 }
422 
423 static uint32_t
424 alc_miidbg_readreg(struct alc_softc *sc, int reg)
425 {
426 
427 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
428 	    reg);
429 	return (alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
430 	    ALC_MII_DBG_DATA));
431 }
432 
433 static uint32_t
434 alc_miidbg_writereg(struct alc_softc *sc, int reg, int val)
435 {
436 
437 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
438 	    reg);
439 	return (alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
440 	    ALC_MII_DBG_DATA, val));
441 }
442 
443 static uint32_t
444 alc_miiext_readreg(struct alc_softc *sc, int devaddr, int reg)
445 {
446 	uint32_t clk, v;
447 	int i;
448 
449 	CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) |
450 	    EXT_MDIO_DEVADDR(devaddr));
451 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
452 		clk = MDIO_CLK_25_128;
453 	else
454 		clk = MDIO_CLK_25_4;
455 	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
456 	    MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT);
457 	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
458 		DELAY(5);
459 		v = CSR_READ_4(sc, ALC_MDIO);
460 		if ((v & MDIO_OP_BUSY) == 0)
461 			break;
462 	}
463 
464 	if (i == 0) {
465 		device_printf(sc->alc_dev, "phy ext read timeout : %d, %d\n",
466 		    devaddr, reg);
467 		return (0);
468 	}
469 
470 	return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
471 }
472 
473 static uint32_t
474 alc_miiext_writereg(struct alc_softc *sc, int devaddr, int reg, int val)
475 {
476 	uint32_t clk, v;
477 	int i;
478 
479 	CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) |
480 	    EXT_MDIO_DEVADDR(devaddr));
481 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
482 		clk = MDIO_CLK_25_128;
483 	else
484 		clk = MDIO_CLK_25_4;
485 	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
486 	    ((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) |
487 	    MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT);
488 	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
489 		DELAY(5);
490 		v = CSR_READ_4(sc, ALC_MDIO);
491 		if ((v & MDIO_OP_BUSY) == 0)
492 			break;
493 	}
494 
495 	if (i == 0)
496 		device_printf(sc->alc_dev, "phy ext write timeout : %d, %d\n",
497 		    devaddr, reg);
498 
499 	return (0);
500 }
501 
502 static void
503 alc_dsp_fixup(struct alc_softc *sc, int media)
504 {
505 	uint16_t agc, len, val;
506 
507 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
508 		return;
509 	if (AR816X_REV(sc->alc_rev) >= AR816X_REV_C0)
510 		return;
511 
512 	/*
513 	 * Vendor PHY magic.
514 	 * 1000BT/AZ, wrong cable length
515 	 */
516 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
517 		len = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL6);
518 		len = (len >> EXT_CLDCTL6_CAB_LEN_SHIFT) &
519 		    EXT_CLDCTL6_CAB_LEN_MASK;
520 		agc = alc_miidbg_readreg(sc, MII_DBG_AGC);
521 		agc = (agc >> DBG_AGC_2_VGA_SHIFT) & DBG_AGC_2_VGA_MASK;
522 		if ((media == IFM_1000_T && len > EXT_CLDCTL6_CAB_LEN_SHORT1G &&
523 		    agc > DBG_AGC_LONG1G_LIMT) ||
524 		    (media == IFM_100_TX && len > DBG_AGC_LONG100M_LIMT &&
525 		    agc > DBG_AGC_LONG1G_LIMT)) {
526 			alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT,
527 			    DBG_AZ_ANADECT_LONG);
528 			val = alc_miiext_readreg(sc, MII_EXT_ANEG,
529 			    MII_EXT_ANEG_AFE);
530 			val |= ANEG_AFEE_10BT_100M_TH;
531 			alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE,
532 			    val);
533 		} else {
534 			alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT,
535 			    DBG_AZ_ANADECT_DEFAULT);
536 			val = alc_miiext_readreg(sc, MII_EXT_ANEG,
537 			    MII_EXT_ANEG_AFE);
538 			val &= ~ANEG_AFEE_10BT_100M_TH;
539 			alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE,
540 			    val);
541 		}
542 		if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 &&
543 		    AR816X_REV(sc->alc_rev) == AR816X_REV_B0) {
544 			if (media == IFM_1000_T) {
545 				/*
546 				 * Giga link threshold, raise the tolerance of
547 				 * noise 50%.
548 				 */
549 				val = alc_miidbg_readreg(sc, MII_DBG_MSE20DB);
550 				val &= ~DBG_MSE20DB_TH_MASK;
551 				val |= (DBG_MSE20DB_TH_HI <<
552 				    DBG_MSE20DB_TH_SHIFT);
553 				alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val);
554 			} else if (media == IFM_100_TX)
555 				alc_miidbg_writereg(sc, MII_DBG_MSE16DB,
556 				    DBG_MSE16DB_UP);
557 		}
558 	} else {
559 		val = alc_miiext_readreg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE);
560 		val &= ~ANEG_AFEE_10BT_100M_TH;
561 		alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, val);
562 		if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 &&
563 		    AR816X_REV(sc->alc_rev) == AR816X_REV_B0) {
564 			alc_miidbg_writereg(sc, MII_DBG_MSE16DB,
565 			    DBG_MSE16DB_DOWN);
566 			val = alc_miidbg_readreg(sc, MII_DBG_MSE20DB);
567 			val &= ~DBG_MSE20DB_TH_MASK;
568 			val |= (DBG_MSE20DB_TH_DEFAULT << DBG_MSE20DB_TH_SHIFT);
569 			alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val);
570 		}
571 	}
572 }
573 
574 static void
575 alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
576 {
577 	struct alc_softc *sc;
578 	struct mii_data *mii;
579 
580 	sc = ifp->if_softc;
581 	if ((ifp->if_flags & IFF_UP) == 0) {
582 		return;
583 	}
584 	mii = device_get_softc(sc->alc_miibus);
585 
586 	mii_pollstat(mii);
587 	ifmr->ifm_status = mii->mii_media_status;
588 	ifmr->ifm_active = mii->mii_media_active;
589 }
590 
591 static int
592 alc_mediachange(struct ifnet *ifp)
593 {
594 	struct alc_softc *sc;
595 	int error;
596 
597 	sc = ifp->if_softc;
598 	ALC_LOCK(sc);
599 	error = alc_mediachange_locked(sc);
600 	ALC_UNLOCK(sc);
601 
602 	return (error);
603 }
604 
605 static int
606 alc_mediachange_locked(struct alc_softc *sc)
607 {
608 	struct mii_data *mii;
609 	struct mii_softc *miisc;
610 	int error;
611 
612 	ALC_LOCK_ASSERT(sc);
613 
614 	mii = device_get_softc(sc->alc_miibus);
615 	if (mii->mii_instance != 0) {
616 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
617 			mii_phy_reset(miisc);
618 	}
619 	error = mii_mediachg(mii);
620 
621 	return (error);
622 }
623 
624 static struct alc_ident *
625 alc_find_ident(device_t dev)
626 {
627 	struct alc_ident *ident;
628 	uint16_t vendor, devid;
629 
630 	vendor = pci_get_vendor(dev);
631 	devid = pci_get_device(dev);
632 	for (ident = alc_ident_table; ident->name != NULL; ident++) {
633 		if (vendor == ident->vendorid && devid == ident->deviceid)
634 			return (ident);
635 	}
636 
637 	return (NULL);
638 }
639 
640 static int
641 alc_probe(device_t dev)
642 {
643 	struct alc_ident *ident;
644 
645 	ident = alc_find_ident(dev);
646 	if (ident != NULL) {
647 		device_set_desc(dev, ident->name);
648 		return (BUS_PROBE_DEFAULT);
649 	}
650 
651 	return (ENXIO);
652 }
653 
654 static void
655 alc_get_macaddr(struct alc_softc *sc)
656 {
657 
658 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
659 		alc_get_macaddr_816x(sc);
660 	else
661 		alc_get_macaddr_813x(sc);
662 }
663 
664 static void
665 alc_get_macaddr_813x(struct alc_softc *sc)
666 {
667 	uint32_t opt;
668 	uint16_t val;
669 	int eeprom, i;
670 
671 	eeprom = 0;
672 	opt = CSR_READ_4(sc, ALC_OPT_CFG);
673 	if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_OTP_SEL) != 0 &&
674 	    (CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) {
675 		/*
676 		 * EEPROM found, let TWSI reload EEPROM configuration.
677 		 * This will set ethernet address of controller.
678 		 */
679 		eeprom++;
680 		switch (sc->alc_ident->deviceid) {
681 		case DEVICEID_ATHEROS_AR8131:
682 		case DEVICEID_ATHEROS_AR8132:
683 			if ((opt & OPT_CFG_CLK_ENB) == 0) {
684 				opt |= OPT_CFG_CLK_ENB;
685 				CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
686 				CSR_READ_4(sc, ALC_OPT_CFG);
687 				DELAY(1000);
688 			}
689 			break;
690 		case DEVICEID_ATHEROS_AR8151:
691 		case DEVICEID_ATHEROS_AR8151_V2:
692 		case DEVICEID_ATHEROS_AR8152_B:
693 		case DEVICEID_ATHEROS_AR8152_B2:
694 			alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
695 					    ALC_MII_DBG_ADDR, 0x00);
696 			val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
697 						 ALC_MII_DBG_DATA);
698 			alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
699 					    ALC_MII_DBG_DATA, val & 0xFF7F);
700 			alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
701 					    ALC_MII_DBG_ADDR, 0x3B);
702 			val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
703 						 ALC_MII_DBG_DATA);
704 			alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
705 					    ALC_MII_DBG_DATA, val | 0x0008);
706 			DELAY(20);
707 			break;
708 		}
709 
710 		CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
711 			CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
712 		CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
713 		CSR_READ_4(sc, ALC_WOL_CFG);
714 
715 		CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) |
716 			    TWSI_CFG_SW_LD_START);
717 		for (i = 100; i > 0; i--) {
718 			DELAY(1000);
719 			if ((CSR_READ_4(sc, ALC_TWSI_CFG) &
720 			    TWSI_CFG_SW_LD_START) == 0)
721 				break;
722 		}
723 		if (i == 0)
724 			device_printf(sc->alc_dev,
725 			    "reloading EEPROM timeout!\n");
726 	} else {
727 		if (bootverbose)
728 			device_printf(sc->alc_dev, "EEPROM not found!\n");
729 	}
730 	if (eeprom != 0) {
731 		switch (sc->alc_ident->deviceid) {
732 		case DEVICEID_ATHEROS_AR8131:
733 		case DEVICEID_ATHEROS_AR8132:
734 			if ((opt & OPT_CFG_CLK_ENB) != 0) {
735 				opt &= ~OPT_CFG_CLK_ENB;
736 				CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
737 				CSR_READ_4(sc, ALC_OPT_CFG);
738 				DELAY(1000);
739 			}
740 			break;
741 		case DEVICEID_ATHEROS_AR8151:
742 		case DEVICEID_ATHEROS_AR8151_V2:
743 		case DEVICEID_ATHEROS_AR8152_B:
744 		case DEVICEID_ATHEROS_AR8152_B2:
745 			alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
746 					    ALC_MII_DBG_ADDR, 0x00);
747 			val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
748 						 ALC_MII_DBG_DATA);
749 			alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
750 					    ALC_MII_DBG_DATA, val | 0x0080);
751 			alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
752 					    ALC_MII_DBG_ADDR, 0x3B);
753 			val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
754 						 ALC_MII_DBG_DATA);
755 			alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
756 					    ALC_MII_DBG_DATA, val & 0xFFF7);
757 			DELAY(20);
758 			break;
759 		}
760 	}
761 
762 	alc_get_macaddr_par(sc);
763 }
764 
765 static void
766 alc_get_macaddr_816x(struct alc_softc *sc)
767 {
768 	uint32_t reg;
769 	int i, reloaded;
770 
771 	reloaded = 0;
772 	/* Try to reload station address via TWSI. */
773 	for (i = 100; i > 0; i--) {
774 		reg = CSR_READ_4(sc, ALC_SLD);
775 		if ((reg & (SLD_PROGRESS | SLD_START)) == 0)
776 			break;
777 		DELAY(1000);
778 	}
779 	if (i != 0) {
780 		CSR_WRITE_4(sc, ALC_SLD, reg | SLD_START);
781 		for (i = 100; i > 0; i--) {
782 			DELAY(1000);
783 			reg = CSR_READ_4(sc, ALC_SLD);
784 			if ((reg & SLD_START) == 0)
785 				break;
786 		}
787 		if (i != 0)
788 			reloaded++;
789 		else if (bootverbose)
790 			device_printf(sc->alc_dev,
791 			    "reloading station address via TWSI timed out!\n");
792 	}
793 
794 	/* Try to reload station address from EEPROM or FLASH. */
795 	if (reloaded == 0) {
796 		reg = CSR_READ_4(sc, ALC_EEPROM_LD);
797 		if ((reg & (EEPROM_LD_EEPROM_EXIST |
798 		    EEPROM_LD_FLASH_EXIST)) != 0) {
799 			for (i = 100; i > 0; i--) {
800 				reg = CSR_READ_4(sc, ALC_EEPROM_LD);
801 				if ((reg & (EEPROM_LD_PROGRESS |
802 				    EEPROM_LD_START)) == 0)
803 					break;
804 				DELAY(1000);
805 			}
806 			if (i != 0) {
807 				CSR_WRITE_4(sc, ALC_EEPROM_LD, reg |
808 				    EEPROM_LD_START);
809 				for (i = 100; i > 0; i--) {
810 					DELAY(1000);
811 					reg = CSR_READ_4(sc, ALC_EEPROM_LD);
812 					if ((reg & EEPROM_LD_START) == 0)
813 						break;
814 				}
815 			} else if (bootverbose)
816 				device_printf(sc->alc_dev,
817 				    "reloading EEPROM/FLASH timed out!\n");
818 		}
819 	}
820 
821 	alc_get_macaddr_par(sc);
822 }
823 
824 static void
825 alc_get_macaddr_par(struct alc_softc *sc)
826 {
827 	uint32_t ea[2];
828 
829 	ea[0] = CSR_READ_4(sc, ALC_PAR0);
830 	ea[1] = CSR_READ_4(sc, ALC_PAR1);
831 	sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF;
832 	sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF;
833 	sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF;
834 	sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF;
835 	sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF;
836 	sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF;
837 }
838 
839 static void
840 alc_disable_l0s_l1(struct alc_softc *sc)
841 {
842 	uint32_t pmcfg;
843 
844 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
845 		/* Another magic from vendor. */
846 		pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
847 		pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 |
848 		    PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB |
849 		    PM_CFG_MAC_ASPM_CHK | PM_CFG_SERDES_PD_EX_L1);
850 		pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB |
851 		    PM_CFG_SERDES_PLL_L1_ENB | PM_CFG_SERDES_L1_ENB;
852 		CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
853 	}
854 }
855 
856 static void
857 alc_phy_reset(struct alc_softc *sc)
858 {
859 
860 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
861 		alc_phy_reset_816x(sc);
862 	else
863 		alc_phy_reset_813x(sc);
864 }
865 
866 static void
867 alc_phy_reset_813x(struct alc_softc *sc)
868 {
869 	uint16_t data;
870 
871 	/* Reset magic from Linux. */
872 	CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_SEL_ANA_RESET);
873 	CSR_READ_2(sc, ALC_GPHY_CFG);
874 	DELAY(10 * 1000);
875 
876 	CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET |
877 	    GPHY_CFG_SEL_ANA_RESET);
878 	CSR_READ_2(sc, ALC_GPHY_CFG);
879 	DELAY(10 * 1000);
880 
881 	/* DSP fixup, Vendor magic. */
882 	if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B) {
883 		alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
884 				    ALC_MII_DBG_ADDR, 0x000A);
885 		data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
886 					 ALC_MII_DBG_DATA);
887 		alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
888 				    ALC_MII_DBG_DATA, data & 0xDFFF);
889 	}
890 	if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 ||
891 	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 ||
892 	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B ||
893 	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) {
894 		alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
895 				    ALC_MII_DBG_ADDR, 0x003B);
896 		data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
897 					  ALC_MII_DBG_DATA);
898 		alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
899 				    ALC_MII_DBG_DATA, data & 0xFFF7);
900 		DELAY(20 * 1000);
901 	}
902 	if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151) {
903 		alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
904 				    ALC_MII_DBG_ADDR, 0x0029);
905 		alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
906 				    ALC_MII_DBG_DATA, 0x929D);
907 	}
908 	if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 ||
909 	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8132 ||
910 	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 ||
911 	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) {
912 		alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
913 				    ALC_MII_DBG_ADDR, 0x0029);
914 		alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
915 				    ALC_MII_DBG_DATA, 0xB6DD);
916 	}
917 
918 	/* Load DSP codes, vendor magic. */
919 	data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE |
920 	    ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK);
921 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
922 	    ALC_MII_DBG_ADDR, MII_ANA_CFG18);
923 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
924 	    ALC_MII_DBG_DATA, data);
925 
926 	data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) |
927 	    ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL |
928 	    ANA_SERDES_EN_LCKDT;
929 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
930 	    ALC_MII_DBG_ADDR, MII_ANA_CFG5);
931 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
932 	    ALC_MII_DBG_DATA, data);
933 
934 	data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) &
935 	    ANA_LONG_CABLE_TH_100_MASK) |
936 	    ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) &
937 	    ANA_SHORT_CABLE_TH_100_SHIFT) |
938 	    ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW;
939 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
940 	    ALC_MII_DBG_ADDR, MII_ANA_CFG54);
941 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
942 	    ALC_MII_DBG_DATA, data);
943 
944 	data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) |
945 	    ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) |
946 	    ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) |
947 	    ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK);
948 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
949 	    ALC_MII_DBG_ADDR, MII_ANA_CFG4);
950 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
951 	    ALC_MII_DBG_DATA, data);
952 
953 	data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) |
954 	    ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB |
955 	    ANA_OEN_125M;
956 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
957 	    ALC_MII_DBG_ADDR, MII_ANA_CFG0);
958 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
959 	    ALC_MII_DBG_DATA, data);
960 	DELAY(1000);
961 
962 	/* Disable hibernation. */
963 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
964 	    0x0029);
965 	data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
966 	    ALC_MII_DBG_DATA);
967 	data &= ~0x8000;
968 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA,
969 	    data);
970 
971 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
972 	    0x000B);
973 	data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
974 	    ALC_MII_DBG_DATA);
975 	data &= ~0x8000;
976 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA,
977 	    data);
978 }
979 
980 static void
981 alc_phy_reset_816x(struct alc_softc *sc)
982 {
983 	uint32_t val;
984 
985 	val = CSR_READ_4(sc, ALC_GPHY_CFG);
986 	val &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE |
987 	    GPHY_CFG_GATE_25M_ENB | GPHY_CFG_PHY_IDDQ | GPHY_CFG_PHY_PLL_ON |
988 	    GPHY_CFG_PWDOWN_HW | GPHY_CFG_100AB_ENB);
989 	val |= GPHY_CFG_SEL_ANA_RESET;
990 #ifdef notyet
991 	val |= GPHY_CFG_HIB_PULSE | GPHY_CFG_HIB_EN | GPHY_CFG_SEL_ANA_RESET;
992 #else
993 	/* Disable PHY hibernation. */
994 	val &= ~(GPHY_CFG_HIB_PULSE | GPHY_CFG_HIB_EN);
995 #endif
996 	CSR_WRITE_4(sc, ALC_GPHY_CFG, val);
997 	DELAY(10);
998 	CSR_WRITE_4(sc, ALC_GPHY_CFG, val | GPHY_CFG_EXT_RESET);
999 	DELAY(800);
1000 
1001 	/* Vendor PHY magic. */
1002 #ifdef notyet
1003 	alc_miidbg_writereg(sc, MII_DBG_LEGCYPS, DBG_LEGCYPS_DEFAULT);
1004 	alc_miidbg_writereg(sc, MII_DBG_SYSMODCTL, DBG_SYSMODCTL_DEFAULT);
1005 	alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_VDRVBIAS,
1006 	    EXT_VDRVBIAS_DEFAULT);
1007 #else
1008 	/* Disable PHY hibernation. */
1009 	alc_miidbg_writereg(sc, MII_DBG_LEGCYPS,
1010 	    DBG_LEGCYPS_DEFAULT & ~DBG_LEGCYPS_ENB);
1011 	alc_miidbg_writereg(sc, MII_DBG_HIBNEG,
1012 	    DBG_HIBNEG_DEFAULT & ~(DBG_HIBNEG_PSHIB_EN | DBG_HIBNEG_HIB_PULSE));
1013 	alc_miidbg_writereg(sc, MII_DBG_GREENCFG, DBG_GREENCFG_DEFAULT);
1014 #endif
1015 
1016 	/* XXX Disable EEE. */
1017 	val = CSR_READ_4(sc, ALC_LPI_CTL);
1018 	val &= ~LPI_CTL_ENB;
1019 	CSR_WRITE_4(sc, ALC_LPI_CTL, val);
1020 	alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_LOCAL_EEEADV, 0);
1021 
1022 	/* PHY power saving. */
1023 	alc_miidbg_writereg(sc, MII_DBG_TST10BTCFG, DBG_TST10BTCFG_DEFAULT);
1024 	alc_miidbg_writereg(sc, MII_DBG_SRDSYSMOD, DBG_SRDSYSMOD_DEFAULT);
1025 	alc_miidbg_writereg(sc, MII_DBG_TST100BTCFG, DBG_TST100BTCFG_DEFAULT);
1026 	alc_miidbg_writereg(sc, MII_DBG_ANACTL, DBG_ANACTL_DEFAULT);
1027 	val = alc_miidbg_readreg(sc, MII_DBG_GREENCFG2);
1028 	val &= ~DBG_GREENCFG2_GATE_DFSE_EN;
1029 	alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, val);
1030 
1031 	/* RTL8139C, 120m issue. */
1032 	alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_NLP78,
1033 	    ANEG_NLP78_120M_DEFAULT);
1034 	alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_S3DIG10,
1035 	    ANEG_S3DIG10_DEFAULT);
1036 
1037 	if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0) {
1038 		/* Turn off half amplitude. */
1039 		val = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3);
1040 		val |= EXT_CLDCTL3_BP_CABLE1TH_DET_GT;
1041 		alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3, val);
1042 		/* Turn off Green feature. */
1043 		val = alc_miidbg_readreg(sc, MII_DBG_GREENCFG2);
1044 		val |= DBG_GREENCFG2_BP_GREEN;
1045 		alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, val);
1046 		/* Turn off half bias. */
1047 		val = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5);
1048 		val |= EXT_CLDCTL5_BP_VD_HLFBIAS;
1049 		alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5, val);
1050 	}
1051 }
1052 
1053 static void
1054 alc_phy_down(struct alc_softc *sc)
1055 {
1056 	uint32_t gphy;
1057 
1058 	switch (sc->alc_ident->deviceid) {
1059 	case DEVICEID_ATHEROS_AR8161:
1060 	case DEVICEID_ATHEROS_E2200:
1061 	case DEVICEID_ATHEROS_E2400:
1062 	case DEVICEID_ATHEROS_AR8162:
1063 	case DEVICEID_ATHEROS_AR8171:
1064 	case DEVICEID_ATHEROS_AR8172:
1065 		gphy = CSR_READ_4(sc, ALC_GPHY_CFG);
1066 		gphy &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE |
1067 		GPHY_CFG_100AB_ENB | GPHY_CFG_PHY_PLL_ON);
1068 		gphy |= GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
1069 		GPHY_CFG_SEL_ANA_RESET;
1070 		gphy |= GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW;
1071 		CSR_WRITE_4(sc, ALC_GPHY_CFG, gphy);
1072 		break;
1073 	case DEVICEID_ATHEROS_AR8151:
1074 	case DEVICEID_ATHEROS_AR8151_V2:
1075 	case DEVICEID_ATHEROS_AR8152_B:
1076 	case DEVICEID_ATHEROS_AR8152_B2:
1077 		/*
1078 		 * GPHY power down caused more problems on AR8151 v2.0.
1079 		 * When driver is reloaded after GPHY power down,
1080 		 * accesses to PHY/MAC registers hung the system. Only
1081 		 * cold boot recovered from it.  I'm not sure whether
1082 		 * AR8151 v1.0 also requires this one though.  I don't
1083 		 * have AR8151 v1.0 controller in hand.
1084 		 * The only option left is to isolate the PHY and
1085 		 * initiates power down the PHY which in turn saves
1086 		 * more power when driver is unloaded.
1087 		 */
1088 		alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
1089 				    MII_BMCR, BMCR_ISO | BMCR_PDOWN);
1090 		break;
1091 	default:
1092 		/* Force PHY down. */
1093 		CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET |
1094 		    GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ |
1095 		    GPHY_CFG_PWDOWN_HW);
1096 		DELAY(1000);
1097 		break;
1098 	}
1099 }
1100 
1101 static void
1102 alc_aspm(struct alc_softc *sc, int init, int media)
1103 {
1104 
1105 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
1106 		alc_aspm_816x(sc, init);
1107 	else
1108 		alc_aspm_813x(sc, media);
1109 }
1110 
1111 static void
1112 alc_aspm_813x(struct alc_softc *sc, int media)
1113 {
1114 	uint32_t pmcfg;
1115 	uint16_t linkcfg;
1116 
1117 	if ((sc->alc_flags & ALC_FLAG_LINK) == 0)
1118 		return;
1119 
1120 	pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
1121 	if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) ==
1122 	    (ALC_FLAG_APS | ALC_FLAG_PCIE))
1123 		linkcfg = CSR_READ_2(sc, sc->alc_expcap +
1124 					 PCIR_EXPRESS_LINK_CTL);
1125 	else
1126 		linkcfg = 0;
1127 	pmcfg &= ~PM_CFG_SERDES_PD_EX_L1;
1128 	pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_LCKDET_TIMER_MASK);
1129 	pmcfg |= PM_CFG_MAC_ASPM_CHK;
1130 	pmcfg |= (PM_CFG_LCKDET_TIMER_DEFAULT << PM_CFG_LCKDET_TIMER_SHIFT);
1131 	pmcfg &= ~(PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
1132 
1133 	if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
1134 		/* Disable extended sync except AR8152 B v1.0 */
1135 		linkcfg &= ~PCIEM_LINK_CTL_EXTENDED_SYNC;
1136 		if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B &&
1137 		    sc->alc_rev == ATHEROS_AR8152_B_V10)
1138 			linkcfg |= PCIEM_LINK_CTL_EXTENDED_SYNC;
1139 		CSR_WRITE_2(sc, sc->alc_expcap + PCIER_LINK_CTL,
1140 			    linkcfg);
1141 		pmcfg &= ~(PM_CFG_EN_BUFS_RX_L0S | PM_CFG_SA_DLY_ENB |
1142 			   PM_CFG_HOTRST);
1143 		pmcfg |= (PM_CFG_L1_ENTRY_TIMER_DEFAULT <<
1144 			  PM_CFG_L1_ENTRY_TIMER_SHIFT);
1145 		pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK;
1146 		pmcfg |= (PM_CFG_PM_REQ_TIMER_DEFAULT <<
1147 			  PM_CFG_PM_REQ_TIMER_SHIFT);
1148 		pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_PCIE_RECV;
1149 	}
1150 
1151 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
1152 		if ((sc->alc_flags & ALC_FLAG_L0S) != 0)
1153 			pmcfg |= PM_CFG_ASPM_L0S_ENB;
1154 		if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
1155 			pmcfg |= PM_CFG_ASPM_L1_ENB;
1156 		if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
1157 			if (sc->alc_ident->deviceid ==
1158 			    DEVICEID_ATHEROS_AR8152_B)
1159 				pmcfg &= ~PM_CFG_ASPM_L0S_ENB;
1160 			pmcfg &= ~(PM_CFG_SERDES_L1_ENB |
1161 				   PM_CFG_SERDES_PLL_L1_ENB |
1162 				   PM_CFG_SERDES_BUDS_RX_L1_ENB);
1163 			pmcfg |= PM_CFG_CLK_SWH_L1;
1164 			if (media == IFM_100_TX || media == IFM_1000_T) {
1165 				pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK;
1166 				switch (sc->alc_ident->deviceid) {
1167 				case DEVICEID_ATHEROS_AR8152_B:
1168 					pmcfg |= (7 <<
1169 						PM_CFG_L1_ENTRY_TIMER_SHIFT);
1170 					break;
1171 				case DEVICEID_ATHEROS_AR8152_B2:
1172 				case DEVICEID_ATHEROS_AR8151_V2:
1173 					pmcfg |= (4 <<
1174 						PM_CFG_L1_ENTRY_TIMER_SHIFT);
1175 					break;
1176 				default:
1177 					pmcfg |= (15 <<
1178 						PM_CFG_L1_ENTRY_TIMER_SHIFT);
1179 					break;
1180 				}
1181 			}
1182 		} else {
1183 			pmcfg |= PM_CFG_SERDES_L1_ENB |
1184 				PM_CFG_SERDES_PLL_L1_ENB |
1185 				PM_CFG_SERDES_BUDS_RX_L1_ENB;
1186 			pmcfg &= ~(PM_CFG_CLK_SWH_L1 |
1187 				PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
1188 		}
1189 	} else {
1190 		pmcfg &= ~(PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_L1_ENB |
1191 			   PM_CFG_SERDES_PLL_L1_ENB);
1192 		pmcfg |= PM_CFG_CLK_SWH_L1;
1193 		if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
1194 			pmcfg |= PM_CFG_ASPM_L1_ENB;
1195 	}
1196 	CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
1197 }
1198 
1199 static void
1200 alc_aspm_816x(struct alc_softc *sc, int init)
1201 {
1202 	uint32_t pmcfg;
1203 
1204 	pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
1205 	pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_816X_MASK;
1206 	pmcfg |= PM_CFG_L1_ENTRY_TIMER_816X_DEFAULT;
1207 	pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK;
1208 	pmcfg |= PM_CFG_PM_REQ_TIMER_816X_DEFAULT;
1209 	pmcfg &= ~PM_CFG_LCKDET_TIMER_MASK;
1210 	pmcfg |= PM_CFG_LCKDET_TIMER_DEFAULT;
1211 	pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_CLK_SWH_L1 | PM_CFG_PCIE_RECV;
1212 	pmcfg &= ~(PM_CFG_RX_L1_AFTER_L0S | PM_CFG_TX_L1_AFTER_L0S |
1213 	    PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB |
1214 	    PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB |
1215 	    PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SA_DLY_ENB |
1216 	    PM_CFG_MAC_ASPM_CHK | PM_CFG_HOTRST);
1217 	if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
1218 	    (sc->alc_rev & 0x01) != 0)
1219 		pmcfg |= PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB;
1220 	if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
1221 		/* Link up, enable both L0s, L1s. */
1222 		pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB |
1223 		    PM_CFG_MAC_ASPM_CHK;
1224 	} else {
1225 		if (init != 0)
1226 			pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB |
1227 			    PM_CFG_MAC_ASPM_CHK;
1228 		else if ((sc->alc_ifp->if_flags & IFF_RUNNING) != 0)
1229 			pmcfg |= PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK;
1230 	}
1231 	CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
1232 }
1233 
1234 static void
1235 alc_init_pcie(struct alc_softc *sc)
1236 {
1237 	const char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" };
1238 	uint32_t cap, ctl, val;
1239 	int state;
1240 
1241 	/* Clear data link and flow-control protocol error. */
1242 	val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV);
1243 	val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP);
1244 	CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val);
1245 
1246 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
1247 		CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
1248 		    CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
1249 		CSR_WRITE_4(sc, ALC_PCIE_PHYMISC,
1250 		    CSR_READ_4(sc, ALC_PCIE_PHYMISC) |
1251 		    PCIE_PHYMISC_FORCE_RCV_DET);
1252 		if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B &&
1253 		    sc->alc_rev == ATHEROS_AR8152_B_V10) {
1254 			val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2);
1255 			val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK |
1256 			    PCIE_PHYMISC2_SERDES_TH_MASK);
1257 			val |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT;
1258 			val |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
1259 			CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val);
1260 		}
1261 		/* Disable ASPM L0S and L1. */
1262 		cap = CSR_READ_2(sc, sc->alc_expcap + PCIER_LINK_CAP);
1263 		if ((cap & PCIEM_LINK_CAP_ASPM) != 0) {
1264 			ctl = CSR_READ_2(sc, sc->alc_expcap + PCIER_LINK_CTL);
1265 			if ((ctl & PCIEM_LINK_CTL_RCB) != 0)
1266 				sc->alc_rcb = DMA_CFG_RCB_128;
1267 			if (bootverbose)
1268 				device_printf(sc->alc_dev, "RCB %u bytes\n",
1269 				    sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128);
1270 			state = ctl & PCIEM_LINK_CTL_ASPMC;
1271 			if (state & PCIEM_LINK_CTL_ASPMC_L0S)
1272 				sc->alc_flags |= ALC_FLAG_L0S;
1273 			if (state & PCIEM_LINK_CTL_ASPMC_L1)
1274 				sc->alc_flags |= ALC_FLAG_L1S;
1275 			if (bootverbose)
1276 				device_printf(sc->alc_dev, "ASPM %s %s\n",
1277 				    aspm_state[state],
1278 				    state == 0 ? "disabled" : "enabled");
1279 			alc_disable_l0s_l1(sc);
1280 		} else {
1281 			if (bootverbose)
1282 				device_printf(sc->alc_dev,
1283 				    "no ASPM support\n");
1284 		}
1285 	} else {
1286 		val = CSR_READ_4(sc, ALC_PDLL_TRNS1);
1287 		val &= ~PDLL_TRNS1_D3PLLOFF_ENB;
1288 		CSR_WRITE_4(sc, ALC_PDLL_TRNS1, val);
1289 		val = CSR_READ_4(sc, ALC_MASTER_CFG);
1290 		if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
1291 		    (sc->alc_rev & 0x01) != 0) {
1292 			if ((val & MASTER_WAKEN_25M) == 0 ||
1293 			    (val & MASTER_CLK_SEL_DIS) == 0) {
1294 				val |= MASTER_WAKEN_25M | MASTER_CLK_SEL_DIS;
1295 				CSR_WRITE_4(sc, ALC_MASTER_CFG, val);
1296 			}
1297 		} else {
1298 			if ((val & MASTER_WAKEN_25M) == 0 ||
1299 			    (val & MASTER_CLK_SEL_DIS) != 0) {
1300 				val |= MASTER_WAKEN_25M;
1301 				val &= ~MASTER_CLK_SEL_DIS;
1302 				CSR_WRITE_4(sc, ALC_MASTER_CFG, val);
1303 			}
1304 		}
1305 	}
1306 	alc_aspm(sc, 1, IFM_UNKNOWN);
1307 }
1308 
1309 static void
1310 alc_config_msi(struct alc_softc *sc)
1311 {
1312 	uint32_t ctl, mod;
1313 
1314 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
1315 		/*
1316 		 * It seems interrupt moderation is controlled by
1317 		 * ALC_MSI_RETRANS_TIMER register if MSI/MSIX is active.
1318 		 * Driver uses RX interrupt moderation parameter to
1319 		 * program ALC_MSI_RETRANS_TIMER register.
1320 		 */
1321 		ctl = CSR_READ_4(sc, ALC_MSI_RETRANS_TIMER);
1322 		ctl &= ~MSI_RETRANS_TIMER_MASK;
1323 		ctl &= ~MSI_RETRANS_MASK_SEL_LINE;
1324 		mod = ALC_USECS(sc->alc_int_rx_mod);
1325 		if (mod == 0)
1326 			mod = 1;
1327 		ctl |= mod;
1328 		if (sc->alc_irq_type == PCI_INTR_TYPE_MSI)
1329 			CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, ctl |
1330 			    MSI_RETRANS_MASK_SEL_LINE);
1331 		else
1332 			CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, 0);
1333 	}
1334 }
1335 
1336 static int
1337 alc_attach(device_t dev)
1338 {
1339 	struct alc_softc *sc;
1340 	struct ifnet *ifp;
1341 	uint16_t burst;
1342 	int base, error;
1343 	u_int intr_flags;
1344 
1345 	error = 0;
1346 	sc = device_get_softc(dev);
1347 	sc->alc_dev = dev;
1348 	sc->alc_rev = pci_get_revid(dev);
1349 
1350 	callout_init_mp(&sc->alc_tick_ch);
1351 	sc->alc_ident = alc_find_ident(dev);
1352 
1353 	/* Enable bus mastering */
1354 	pci_enable_busmaster(dev);
1355 
1356 	/* Map the device. */
1357 	sc->alc_res_rid = PCIR_BAR(0);
1358 	sc->alc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1359 	    &sc->alc_res_rid, RF_ACTIVE);
1360 	if (error != 0) {
1361 		device_printf(dev, "cannot allocate memory resources.\n");
1362 		goto fail;
1363 	}
1364 	sc->alc_res_btag = rman_get_bustag(sc->alc_res);
1365 	sc->alc_res_bhand = rman_get_bushandle(sc->alc_res);
1366 
1367 	/* Set PHY address. */
1368 	sc->alc_phyaddr = ALC_PHY_ADDR;
1369 
1370 	/*
1371 	 * One odd thing is AR8132 uses the same PHY hardware(F1
1372 	 * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports
1373 	 * the PHY supports 1000Mbps but that's not true. The PHY
1374 	 * used in AR8132 can't establish gigabit link even if it
1375 	 * shows the same PHY model/revision number of AR8131.
1376 	 */
1377 	switch (sc->alc_ident->deviceid) {
1378 	case DEVICEID_ATHEROS_E2200:
1379 	case DEVICEID_ATHEROS_E2400:
1380 		sc->alc_flags |= ALC_FLAG_E2X00;
1381 		/* FALLTHROUGH */
1382 	case DEVICEID_ATHEROS_AR8161:
1383 		if (pci_get_subvendor(dev) == VENDORID_ATHEROS &&
1384 		    pci_get_subdevice(dev) == 0x0091 && sc->alc_rev == 0)
1385 			sc->alc_flags |= ALC_FLAG_LINK_WAR;
1386 		/* FALLTHROUGH */
1387 	case DEVICEID_ATHEROS_AR8171:
1388 		sc->alc_flags |= ALC_FLAG_AR816X_FAMILY;
1389 		break;
1390 	case DEVICEID_ATHEROS_AR8162:
1391 	case DEVICEID_ATHEROS_AR8172:
1392 		sc->alc_flags |= ALC_FLAG_FASTETHER | ALC_FLAG_AR816X_FAMILY;
1393 		break;
1394 	case DEVICEID_ATHEROS_AR8152_B:
1395 	case DEVICEID_ATHEROS_AR8152_B2:
1396 		sc->alc_flags |= ALC_FLAG_APS;
1397 		/* FALLTHROUGH */
1398 	case DEVICEID_ATHEROS_AR8132:
1399 		sc->alc_flags |= ALC_FLAG_FASTETHER;
1400 		break;
1401 	case DEVICEID_ATHEROS_AR8151:
1402 	case DEVICEID_ATHEROS_AR8151_V2:
1403 		sc->alc_flags |= ALC_FLAG_APS;
1404 		/* FALLTHROUGH */
1405 	default:
1406 		break;
1407 	}
1408 	sc->alc_flags |= ALC_FLAG_JUMBO;
1409 
1410 	/*
1411 	 * It seems that AR813x/AR815x has silicon bug for SMB. In
1412 	 * addition, Atheros said that enabling SMB wouldn't improve
1413 	 * performance. However I think it's bad to access lots of
1414 	 * registers to extract MAC statistics.
1415 	 */
1416 	sc->alc_flags |= ALC_FLAG_SMB_BUG;
1417 
1418 	/*
1419 	 * Don't use Tx CMB. It is known to have silicon bug.
1420 	 */
1421 	sc->alc_flags |= ALC_FLAG_CMB_BUG;
1422 	sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >>
1423 	    MASTER_CHIP_REV_SHIFT;
1424 	if (bootverbose) {
1425 		device_printf(dev, "PCI device revision : 0x%04x\n",
1426 		    sc->alc_rev);
1427 		device_printf(dev, "Chip id/revision : 0x%04x\n",
1428 		    sc->alc_chip_rev);
1429 		if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
1430 			device_printf(dev, "AR816x revision : 0x%x\n",
1431 				AR816X_REV(sc->alc_rev));
1432 	}
1433 	device_printf(dev, "%u Tx FIFO, %u Rx FIFO\n",
1434 	    CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8,
1435 	    CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8);
1436 
1437 	/* Initialize DMA parameters. */
1438 	sc->alc_dma_rd_burst = 0;
1439 	sc->alc_dma_wr_burst = 0;
1440 	sc->alc_rcb = DMA_CFG_RCB_64;
1441 	if (pci_find_extcap(dev, PCIY_EXPRESS, &base) == 0) {
1442 		sc->alc_flags |= ALC_FLAG_PCIE;
1443 		sc->alc_expcap = base;
1444 		burst = CSR_READ_2(sc, base + PCIER_DEVICE_CTL);
1445 		sc->alc_dma_rd_burst =
1446 		    (burst & PCIEM_CTL_MAX_READ_REQUEST) >> 12;
1447 		sc->alc_dma_wr_burst = (burst & PCIEM_CTL_MAX_PAYLOAD) >> 5;
1448 		if (bootverbose) {
1449 			device_printf(dev, "Read request size : %u bytes.\n",
1450 			    alc_dma_burst[sc->alc_dma_rd_burst]);
1451 			device_printf(dev, "TLP payload size : %u bytes.\n",
1452 			    alc_dma_burst[sc->alc_dma_wr_burst]);
1453 		}
1454 		if (alc_dma_burst[sc->alc_dma_rd_burst] > 1024)
1455 			sc->alc_dma_rd_burst = 3;
1456 		if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024)
1457 			sc->alc_dma_wr_burst = 3;
1458 		/*
1459 		 * Force maximum payload size to 128 bytes for E2200/E2400.
1460 		 * Otherwise it triggers DMA write error.
1461 		 */
1462 		if ((sc->alc_flags & ALC_FLAG_E2X00) != 0)
1463 			sc->alc_dma_wr_burst = 0;
1464 		alc_init_pcie(sc);
1465 	}
1466 
1467 	/* Reset PHY. */
1468 	alc_phy_reset(sc);
1469 
1470 	/* Reset the ethernet controller. */
1471 	alc_stop_mac(sc);
1472 	alc_reset(sc);
1473 
1474 	sc->alc_irq_type = pci_alloc_1intr(dev, alc_msi_enable,
1475 	    &sc->alc_irq_rid, &intr_flags);
1476 
1477 	/* Allocate IRQ resources. */
1478 	sc->alc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1479 	    &sc->alc_irq_rid, intr_flags);
1480 	if (error != 0) {
1481 		device_printf(dev, "cannot allocate IRQ resources.\n");
1482 		goto fail;
1483 	}
1484 
1485 	/* Create device sysctl node. */
1486 	alc_sysctl_node(sc);
1487 
1488 	if ((error = alc_dma_alloc(sc)) != 0)
1489 		goto fail;
1490 
1491 	/* Load station address. */
1492 	alc_get_macaddr(sc);
1493 
1494 	ifp = sc->alc_ifp = &sc->arpcom.ac_if;
1495 	ifp->if_softc = sc;
1496 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1497 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1498 	ifp->if_ioctl = alc_ioctl;
1499 	ifp->if_start = alc_start;
1500 	ifp->if_init = alc_init;
1501 	ifq_set_maxlen(&ifp->if_snd, ALC_TX_RING_CNT - 1);
1502 	ifq_set_ready(&ifp->if_snd);
1503 	ifp->if_capabilities = IFCAP_TXCSUM;
1504 	ifp->if_hwassist = ALC_CSUM_FEATURES;
1505 #if 0
1506 /* XXX: WOL */
1507 	if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) {
1508 		ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST;
1509 		sc->alc_flags |= ALC_FLAG_PM;
1510 		sc->alc_pmcap = base;
1511 	}
1512 #endif
1513 	ifp->if_capenable = ifp->if_capabilities;
1514 
1515 	/* VLAN capability setup. */
1516 	ifp->if_capabilities |= IFCAP_VLAN_MTU;
1517 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
1518 	ifp->if_capenable = ifp->if_capabilities;
1519 
1520 	/*
1521 	 * XXX
1522 	 * It seems enabling Tx checksum offloading makes more trouble.
1523 	 * Sometimes the controller does not receive any frames when
1524 	 * Tx checksum offloading is enabled. I'm not sure whether this
1525 	 * is a bug in Tx checksum offloading logic or I got broken
1526 	 * sample boards. To safety, don't enable Tx checksum offloading
1527 	 * by default but give chance to users to toggle it if they know
1528 	 * their controllers work without problems.
1529 	 * Fortunately, Tx checksum offloading for AR816x family
1530 	 * seems to work.
1531 	 */
1532 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
1533 		ifp->if_capenable &= ~IFCAP_TXCSUM;
1534 		ifp->if_hwassist &= ~ALC_CSUM_FEATURES;
1535 	}
1536 
1537 	/* Set up MII bus. */
1538 	if ((error = mii_phy_probe(dev, &sc->alc_miibus, alc_mediachange,
1539 	    alc_mediastatus)) != 0) {
1540 		device_printf(dev, "no PHY found!\n");
1541 		goto fail;
1542 	}
1543 
1544 	ether_ifattach(ifp, sc->alc_eaddr, NULL);
1545 
1546 	/* Tell the upper layer(s) we support long frames. */
1547 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1548 
1549 	ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->alc_irq));
1550 #if 0
1551 	/* Create local taskq. */
1552 	TASK_INIT(&sc->alc_tx_task, 1, alc_tx_task, ifp);
1553 	sc->alc_tq = taskqueue_create("alc_taskq", M_WAITOK,
1554 				      taskqueue_thread_enqueue, &sc->alc_tq);
1555 	if (sc->alc_tq == NULL) {
1556 		device_printf(dev, "could not create taskqueue.\n");
1557 		ether_ifdetach(ifp);
1558 		error = ENXIO;
1559 		goto fail;
1560 	}
1561 	taskqueue_start_threads(&sc->alc_tq, 1, TDPRI_KERN_DAEMON, -1, "%s taskq",
1562 	    device_get_nameunit(sc->alc_dev));
1563 
1564 	alc_config_msi(sc);
1565 	if ((sc->alc_flags & ALC_FLAG_MSIX) != 0)
1566 		msic = ALC_MSIX_MESSAGES;
1567 	else if ((sc->alc_flags & ALC_FLAG_MSI) != 0)
1568 		msic = ALC_MSI_MESSAGES;
1569 	else
1570 		msic = 1;
1571 	for (i = 0; i < msic; i++) {
1572 		error = bus_setup_intr(dev, sc->alc_irq[i], INTR_MPSAFE,
1573 				       alc_intr, sc,
1574 				       &sc->alc_intrhand[i], NULL);
1575 		if (error != 0)
1576 			break;
1577 	}
1578 	if (error != 0) {
1579 		device_printf(dev, "could not set up interrupt handler.\n");
1580 		taskqueue_free(sc->alc_tq);
1581 		sc->alc_tq = NULL;
1582 		ether_ifdetach(ifp);
1583 		goto fail;
1584 	}
1585 #else
1586 	alc_config_msi(sc);
1587 	error = bus_setup_intr(dev, sc->alc_irq, INTR_MPSAFE, alc_intr, sc,
1588 	    &sc->alc_intrhand, ifp->if_serializer);
1589 	if (error) {
1590 		device_printf(dev, "could not set up interrupt handler.\n");
1591 		ether_ifdetach(ifp);
1592 		goto fail;
1593 	}
1594 #endif
1595 
1596 fail:
1597 	if (error != 0)
1598 		alc_detach(dev);
1599 
1600 	return (error);
1601 }
1602 
1603 static int
1604 alc_detach(device_t dev)
1605 {
1606 	struct alc_softc *sc = device_get_softc(dev);
1607 
1608 	if (device_is_attached(dev)) {
1609 		struct ifnet *ifp = sc->alc_ifp;
1610 
1611 		lwkt_serialize_enter(ifp->if_serializer);
1612 		alc_stop(sc);
1613 		bus_teardown_intr(dev, sc->alc_irq, sc->alc_intrhand);
1614 		lwkt_serialize_exit(ifp->if_serializer);
1615 
1616 		ether_ifdetach(ifp);
1617 	}
1618 
1619 	if (sc->alc_miibus != NULL)
1620 		device_delete_child(dev, sc->alc_miibus);
1621 	bus_generic_detach(dev);
1622 
1623 	if (sc->alc_res != NULL)
1624 		alc_phy_down(sc);
1625 
1626 	if (sc->alc_irq != NULL) {
1627 		bus_release_resource(dev, SYS_RES_IRQ, sc->alc_irq_rid,
1628 		    sc->alc_irq);
1629 	}
1630 	if (sc->alc_irq_type == PCI_INTR_TYPE_MSI)
1631 		pci_release_msi(dev);
1632 
1633 	if (sc->alc_res != NULL) {
1634 		bus_release_resource(dev, SYS_RES_MEMORY, sc->alc_res_rid,
1635 		    sc->alc_res);
1636 	}
1637 
1638 	alc_dma_free(sc);
1639 
1640 	return (0);
1641 }
1642 
1643 #define	ALC_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
1644 	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
1645 #define	ALC_SYSCTL_STAT_ADD64(c, h, n, p, d)	\
1646 	    SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
1647 
1648 static void
1649 alc_sysctl_node(struct alc_softc *sc)
1650 {
1651 	struct sysctl_ctx_list *ctx;
1652 	struct sysctl_oid *tree;
1653 	struct sysctl_oid_list *child, *parent;
1654 	struct alc_hw_stats *stats;
1655 	int error;
1656 
1657 	stats = &sc->alc_stats;
1658 	ctx = device_get_sysctl_ctx(sc->alc_dev);
1659 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->alc_dev));
1660 
1661 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod",
1662 	    CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_rx_mod, 0,
1663 	    sysctl_hw_alc_int_mod, "I", "alc Rx interrupt moderation");
1664 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod",
1665 	    CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_tx_mod, 0,
1666 	    sysctl_hw_alc_int_mod, "I", "alc Tx interrupt moderation");
1667 	/* Pull in device tunables. */
1668 	sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
1669 	error = resource_int_value(device_get_name(sc->alc_dev),
1670 	    device_get_unit(sc->alc_dev), "int_rx_mod", &sc->alc_int_rx_mod);
1671 	if (error == 0) {
1672 		if (sc->alc_int_rx_mod < ALC_IM_TIMER_MIN ||
1673 		    sc->alc_int_rx_mod > ALC_IM_TIMER_MAX) {
1674 			device_printf(sc->alc_dev, "int_rx_mod value out of "
1675 			    "range; using default: %d\n",
1676 			    ALC_IM_RX_TIMER_DEFAULT);
1677 			sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
1678 		}
1679 	}
1680 	sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
1681 	error = resource_int_value(device_get_name(sc->alc_dev),
1682 	    device_get_unit(sc->alc_dev), "int_tx_mod", &sc->alc_int_tx_mod);
1683 	if (error == 0) {
1684 		if (sc->alc_int_tx_mod < ALC_IM_TIMER_MIN ||
1685 		    sc->alc_int_tx_mod > ALC_IM_TIMER_MAX) {
1686 			device_printf(sc->alc_dev, "int_tx_mod value out of "
1687 			    "range; using default: %d\n",
1688 			    ALC_IM_TX_TIMER_DEFAULT);
1689 			sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
1690 		}
1691 	}
1692 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
1693 	    CTLTYPE_INT | CTLFLAG_RW, &sc->alc_process_limit, 0,
1694 	    sysctl_hw_alc_proc_limit, "I",
1695 	    "max number of Rx events to process");
1696 	/* Pull in device tunables. */
1697 	sc->alc_process_limit = ALC_PROC_DEFAULT;
1698 	error = resource_int_value(device_get_name(sc->alc_dev),
1699 	    device_get_unit(sc->alc_dev), "process_limit",
1700 	    &sc->alc_process_limit);
1701 	if (error == 0) {
1702 		if (sc->alc_process_limit < ALC_PROC_MIN ||
1703 		    sc->alc_process_limit > ALC_PROC_MAX) {
1704 			device_printf(sc->alc_dev,
1705 			    "process_limit value out of range; "
1706 			    "using default: %d\n", ALC_PROC_DEFAULT);
1707 			sc->alc_process_limit = ALC_PROC_DEFAULT;
1708 		}
1709 	}
1710 
1711 	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
1712 	    NULL, "ALC statistics");
1713 	parent = SYSCTL_CHILDREN(tree);
1714 
1715 	/* Rx statistics. */
1716 	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
1717 	    NULL, "Rx MAC statistics");
1718 	child = SYSCTL_CHILDREN(tree);
1719 	ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1720 	    &stats->rx_frames, "Good frames");
1721 	ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
1722 	    &stats->rx_bcast_frames, "Good broadcast frames");
1723 	ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
1724 	    &stats->rx_mcast_frames, "Good multicast frames");
1725 	ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
1726 	    &stats->rx_pause_frames, "Pause control frames");
1727 	ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames",
1728 	    &stats->rx_control_frames, "Control frames");
1729 	ALC_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
1730 	    &stats->rx_crcerrs, "CRC errors");
1731 	ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
1732 	    &stats->rx_lenerrs, "Frames with length mismatched");
1733 	ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
1734 	    &stats->rx_bytes, "Good octets");
1735 	ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets",
1736 	    &stats->rx_bcast_bytes, "Good broadcast octets");
1737 	ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets",
1738 	    &stats->rx_mcast_bytes, "Good multicast octets");
1739 	ALC_SYSCTL_STAT_ADD32(ctx, child, "runts",
1740 	    &stats->rx_runts, "Too short frames");
1741 	ALC_SYSCTL_STAT_ADD32(ctx, child, "fragments",
1742 	    &stats->rx_fragments, "Fragmented frames");
1743 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
1744 	    &stats->rx_pkts_64, "64 bytes frames");
1745 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
1746 	    &stats->rx_pkts_65_127, "65 to 127 bytes frames");
1747 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
1748 	    &stats->rx_pkts_128_255, "128 to 255 bytes frames");
1749 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
1750 	    &stats->rx_pkts_256_511, "256 to 511 bytes frames");
1751 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
1752 	    &stats->rx_pkts_512_1023, "512 to 1023 bytes frames");
1753 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
1754 	    &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames");
1755 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
1756 	    &stats->rx_pkts_1519_max, "1519 to max frames");
1757 	ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs",
1758 	    &stats->rx_pkts_truncated, "Truncated frames due to MTU size");
1759 	ALC_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
1760 	    &stats->rx_fifo_oflows, "FIFO overflows");
1761 	ALC_SYSCTL_STAT_ADD32(ctx, child, "rrs_errs",
1762 	    &stats->rx_rrs_errs, "Return status write-back errors");
1763 	ALC_SYSCTL_STAT_ADD32(ctx, child, "align_errs",
1764 	    &stats->rx_alignerrs, "Alignment errors");
1765 	ALC_SYSCTL_STAT_ADD32(ctx, child, "filtered",
1766 	    &stats->rx_pkts_filtered,
1767 	    "Frames dropped due to address filtering");
1768 
1769 	/* Tx statistics. */
1770 	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
1771 	    NULL, "Tx MAC statistics");
1772 	child = SYSCTL_CHILDREN(tree);
1773 	ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1774 	    &stats->tx_frames, "Good frames");
1775 	ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
1776 	    &stats->tx_bcast_frames, "Good broadcast frames");
1777 	ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
1778 	    &stats->tx_mcast_frames, "Good multicast frames");
1779 	ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
1780 	    &stats->tx_pause_frames, "Pause control frames");
1781 	ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames",
1782 	    &stats->tx_control_frames, "Control frames");
1783 	ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_defers",
1784 	    &stats->tx_excess_defer, "Frames with excessive derferrals");
1785 	ALC_SYSCTL_STAT_ADD32(ctx, child, "defers",
1786 	    &stats->tx_excess_defer, "Frames with derferrals");
1787 	ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
1788 	    &stats->tx_bytes, "Good octets");
1789 	ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets",
1790 	    &stats->tx_bcast_bytes, "Good broadcast octets");
1791 	ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets",
1792 	    &stats->tx_mcast_bytes, "Good multicast octets");
1793 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
1794 	    &stats->tx_pkts_64, "64 bytes frames");
1795 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
1796 	    &stats->tx_pkts_65_127, "65 to 127 bytes frames");
1797 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
1798 	    &stats->tx_pkts_128_255, "128 to 255 bytes frames");
1799 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
1800 	    &stats->tx_pkts_256_511, "256 to 511 bytes frames");
1801 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
1802 	    &stats->tx_pkts_512_1023, "512 to 1023 bytes frames");
1803 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
1804 	    &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames");
1805 	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
1806 	    &stats->tx_pkts_1519_max, "1519 to max frames");
1807 	ALC_SYSCTL_STAT_ADD32(ctx, child, "single_colls",
1808 	    &stats->tx_single_colls, "Single collisions");
1809 	ALC_SYSCTL_STAT_ADD32(ctx, child, "multi_colls",
1810 	    &stats->tx_multi_colls, "Multiple collisions");
1811 	ALC_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
1812 	    &stats->tx_late_colls, "Late collisions");
1813 	ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_colls",
1814 	    &stats->tx_excess_colls, "Excessive collisions");
1815 	ALC_SYSCTL_STAT_ADD32(ctx, child, "underruns",
1816 	    &stats->tx_underrun, "FIFO underruns");
1817 	ALC_SYSCTL_STAT_ADD32(ctx, child, "desc_underruns",
1818 	    &stats->tx_desc_underrun, "Descriptor write-back errors");
1819 	ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
1820 	    &stats->tx_lenerrs, "Frames with length mismatched");
1821 	ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs",
1822 	    &stats->tx_pkts_truncated, "Truncated frames due to MTU size");
1823 }
1824 
1825 #undef ALC_SYSCTL_STAT_ADD32
1826 #undef ALC_SYSCTL_STAT_ADD64
1827 
1828 struct alc_dmamap_arg {
1829 	bus_addr_t	alc_busaddr;
1830 };
1831 
1832 static void
1833 alc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1834 {
1835 	struct alc_dmamap_arg *ctx;
1836 
1837 	if (error != 0)
1838 		return;
1839 
1840 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1841 
1842 	ctx = (struct alc_dmamap_arg *)arg;
1843 	ctx->alc_busaddr = segs[0].ds_addr;
1844 }
1845 
1846 #ifdef foo
1847 /*
1848  * Normal and high Tx descriptors shares single Tx high address.
1849  * Four Rx descriptor/return rings and CMB shares the same Rx
1850  * high address.
1851  */
1852 static int
1853 alc_check_boundary(struct alc_softc *sc)
1854 {
1855 	bus_addr_t cmb_end, rx_ring_end, rr_ring_end, tx_ring_end;
1856 
1857 	rx_ring_end = sc->alc_rdata.alc_rx_ring_paddr + ALC_RX_RING_SZ;
1858 	rr_ring_end = sc->alc_rdata.alc_rr_ring_paddr + ALC_RR_RING_SZ;
1859 	cmb_end = sc->alc_rdata.alc_cmb_paddr + ALC_CMB_SZ;
1860 	tx_ring_end = sc->alc_rdata.alc_tx_ring_paddr + ALC_TX_RING_SZ;
1861 
1862 	/* 4GB boundary crossing is not allowed. */
1863 	if ((ALC_ADDR_HI(rx_ring_end) !=
1864 	    ALC_ADDR_HI(sc->alc_rdata.alc_rx_ring_paddr)) ||
1865 	    (ALC_ADDR_HI(rr_ring_end) !=
1866 	    ALC_ADDR_HI(sc->alc_rdata.alc_rr_ring_paddr)) ||
1867 	    (ALC_ADDR_HI(cmb_end) !=
1868 	    ALC_ADDR_HI(sc->alc_rdata.alc_cmb_paddr)) ||
1869 	    (ALC_ADDR_HI(tx_ring_end) !=
1870 	    ALC_ADDR_HI(sc->alc_rdata.alc_tx_ring_paddr)))
1871 		return (EFBIG);
1872 	/*
1873 	 * Make sure Rx return descriptor/Rx descriptor/CMB use
1874 	 * the same high address.
1875 	 */
1876 	if ((ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(rr_ring_end)) ||
1877 	    (ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(cmb_end)))
1878 		return (EFBIG);
1879 
1880 	return (0);
1881 }
1882 #endif
1883 
1884 static int
1885 alc_dma_alloc(struct alc_softc *sc)
1886 {
1887 	struct alc_txdesc *txd;
1888 	struct alc_rxdesc *rxd;
1889 	struct alc_dmamap_arg ctx;
1890 	int error, i;
1891 
1892 	/* Create parent DMA tag. */
1893 	error = bus_dma_tag_create(
1894 	    sc->alc_cdata.alc_parent_tag, /* parent */
1895 	    1, 0,			/* alignment, boundary */
1896 	    BUS_SPACE_MAXADDR,		/* lowaddr */
1897 	    BUS_SPACE_MAXADDR,		/* highaddr */
1898 	    NULL, NULL,			/* filter, filterarg */
1899 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1900 	    0,				/* nsegments */
1901 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1902 	    0,				/* flags */
1903 	    &sc->alc_cdata.alc_parent_tag);
1904 	if (error != 0) {
1905 		device_printf(sc->alc_dev,
1906 		    "could not create parent DMA tag.\n");
1907 		goto fail;
1908 	}
1909 
1910 	/* Create DMA tag for Tx descriptor ring. */
1911 	error = bus_dma_tag_create(
1912 	    sc->alc_cdata.alc_parent_tag, /* parent */
1913 	    ALC_TX_RING_ALIGN, 0,	/* alignment, boundary */
1914 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1915 	    BUS_SPACE_MAXADDR,		/* highaddr */
1916 	    NULL, NULL,			/* filter, filterarg */
1917 	    ALC_TX_RING_SZ,		/* maxsize */
1918 	    1,				/* nsegments */
1919 	    ALC_TX_RING_SZ,		/* maxsegsize */
1920 	    0,				/* flags */
1921 	    &sc->alc_cdata.alc_tx_ring_tag);
1922 	if (error != 0) {
1923 		device_printf(sc->alc_dev,
1924 		    "could not create Tx ring DMA tag.\n");
1925 		goto fail;
1926 	}
1927 
1928 	/* Create DMA tag for Rx free descriptor ring. */
1929 	error = bus_dma_tag_create(
1930 	    sc->alc_cdata.alc_parent_tag, /* parent */
1931 	    ALC_RX_RING_ALIGN, 0,	/* alignment, boundary */
1932 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1933 	    BUS_SPACE_MAXADDR,		/* highaddr */
1934 	    NULL, NULL,			/* filter, filterarg */
1935 	    ALC_RX_RING_SZ,		/* maxsize */
1936 	    1,				/* nsegments */
1937 	    ALC_RX_RING_SZ,		/* maxsegsize */
1938 	    0,				/* flags */
1939 	    &sc->alc_cdata.alc_rx_ring_tag);
1940 	if (error != 0) {
1941 		device_printf(sc->alc_dev,
1942 		    "could not create Rx ring DMA tag.\n");
1943 		goto fail;
1944 	}
1945 	/* Create DMA tag for Rx return descriptor ring. */
1946 	error = bus_dma_tag_create(
1947 	    sc->alc_cdata.alc_parent_tag, /* parent */
1948 	    ALC_RR_RING_ALIGN, 0,	/* alignment, boundary */
1949 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1950 	    BUS_SPACE_MAXADDR,		/* highaddr */
1951 	    NULL, NULL,			/* filter, filterarg */
1952 	    ALC_RR_RING_SZ,		/* maxsize */
1953 	    1,				/* nsegments */
1954 	    ALC_RR_RING_SZ,		/* maxsegsize */
1955 	    0,				/* flags */
1956 	    &sc->alc_cdata.alc_rr_ring_tag);
1957 	if (error != 0) {
1958 		device_printf(sc->alc_dev,
1959 		    "could not create Rx return ring DMA tag.\n");
1960 		goto fail;
1961 	}
1962 
1963 	/* Create DMA tag for coalescing message block. */
1964 	error = bus_dma_tag_create(
1965 	    sc->alc_cdata.alc_parent_tag, /* parent */
1966 	    ALC_CMB_ALIGN, 0,		/* alignment, boundary */
1967 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1968 	    BUS_SPACE_MAXADDR,		/* highaddr */
1969 	    NULL, NULL,			/* filter, filterarg */
1970 	    ALC_CMB_SZ,			/* maxsize */
1971 	    1,				/* nsegments */
1972 	    ALC_CMB_SZ,			/* maxsegsize */
1973 	    0,				/* flags */
1974 	    &sc->alc_cdata.alc_cmb_tag);
1975 	if (error != 0) {
1976 		device_printf(sc->alc_dev,
1977 		    "could not create CMB DMA tag.\n");
1978 		goto fail;
1979 	}
1980 	/* Create DMA tag for status message block. */
1981 	error = bus_dma_tag_create(
1982 	    sc->alc_cdata.alc_parent_tag, /* parent */
1983 	    ALC_SMB_ALIGN, 0,		/* alignment, boundary */
1984 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1985 	    BUS_SPACE_MAXADDR,		/* highaddr */
1986 	    NULL, NULL,			/* filter, filterarg */
1987 	    ALC_SMB_SZ,			/* maxsize */
1988 	    1,				/* nsegments */
1989 	    ALC_SMB_SZ,			/* maxsegsize */
1990 	    0,				/* flags */
1991 	    &sc->alc_cdata.alc_smb_tag);
1992 	if (error != 0) {
1993 		device_printf(sc->alc_dev,
1994 		    "could not create SMB DMA tag.\n");
1995 		goto fail;
1996 	}
1997 
1998 	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
1999 	error = bus_dmamem_alloc(sc->alc_cdata.alc_tx_ring_tag,
2000 	    (void **)&sc->alc_rdata.alc_tx_ring,
2001 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2002 	    &sc->alc_cdata.alc_tx_ring_map);
2003 	if (error != 0) {
2004 		device_printf(sc->alc_dev,
2005 		    "could not allocate DMA'able memory for Tx ring.\n");
2006 		goto fail;
2007 	}
2008 	ctx.alc_busaddr = 0;
2009 	error = bus_dmamap_load(sc->alc_cdata.alc_tx_ring_tag,
2010 	    sc->alc_cdata.alc_tx_ring_map, sc->alc_rdata.alc_tx_ring,
2011 	    ALC_TX_RING_SZ, alc_dmamap_cb, &ctx, 0);
2012 	if (error != 0 || ctx.alc_busaddr == 0) {
2013 		device_printf(sc->alc_dev,
2014 		    "could not load DMA'able memory for Tx ring.\n");
2015 		goto fail;
2016 	}
2017 	sc->alc_rdata.alc_tx_ring_paddr = ctx.alc_busaddr;
2018 
2019 	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
2020 	error = bus_dmamem_alloc(sc->alc_cdata.alc_rx_ring_tag,
2021 	    (void **)&sc->alc_rdata.alc_rx_ring,
2022 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2023 	    &sc->alc_cdata.alc_rx_ring_map);
2024 	if (error != 0) {
2025 		device_printf(sc->alc_dev,
2026 		    "could not allocate DMA'able memory for Rx ring.\n");
2027 		goto fail;
2028 	}
2029 	ctx.alc_busaddr = 0;
2030 	error = bus_dmamap_load(sc->alc_cdata.alc_rx_ring_tag,
2031 	    sc->alc_cdata.alc_rx_ring_map, sc->alc_rdata.alc_rx_ring,
2032 	    ALC_RX_RING_SZ, alc_dmamap_cb, &ctx, 0);
2033 	if (error != 0 || ctx.alc_busaddr == 0) {
2034 		device_printf(sc->alc_dev,
2035 		    "could not load DMA'able memory for Rx ring.\n");
2036 		goto fail;
2037 	}
2038 	sc->alc_rdata.alc_rx_ring_paddr = ctx.alc_busaddr;
2039 
2040 	/* Allocate DMA'able memory and load the DMA map for Rx return ring. */
2041 	error = bus_dmamem_alloc(sc->alc_cdata.alc_rr_ring_tag,
2042 	    (void **)&sc->alc_rdata.alc_rr_ring,
2043 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2044 	    &sc->alc_cdata.alc_rr_ring_map);
2045 	if (error != 0) {
2046 		device_printf(sc->alc_dev,
2047 		    "could not allocate DMA'able memory for Rx return ring.\n");
2048 		goto fail;
2049 	}
2050 	ctx.alc_busaddr = 0;
2051 	error = bus_dmamap_load(sc->alc_cdata.alc_rr_ring_tag,
2052 	    sc->alc_cdata.alc_rr_ring_map, sc->alc_rdata.alc_rr_ring,
2053 	    ALC_RR_RING_SZ, alc_dmamap_cb, &ctx, 0);
2054 	if (error != 0 || ctx.alc_busaddr == 0) {
2055 		device_printf(sc->alc_dev,
2056 		    "could not load DMA'able memory for Tx ring.\n");
2057 		goto fail;
2058 	}
2059 	sc->alc_rdata.alc_rr_ring_paddr = ctx.alc_busaddr;
2060 
2061 	/* Allocate DMA'able memory and load the DMA map for CMB. */
2062 	error = bus_dmamem_alloc(sc->alc_cdata.alc_cmb_tag,
2063 	    (void **)&sc->alc_rdata.alc_cmb,
2064 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2065 	    &sc->alc_cdata.alc_cmb_map);
2066 	if (error != 0) {
2067 		device_printf(sc->alc_dev,
2068 		    "could not allocate DMA'able memory for CMB.\n");
2069 		goto fail;
2070 	}
2071 	ctx.alc_busaddr = 0;
2072 	error = bus_dmamap_load(sc->alc_cdata.alc_cmb_tag,
2073 	    sc->alc_cdata.alc_cmb_map, sc->alc_rdata.alc_cmb,
2074 	    ALC_CMB_SZ, alc_dmamap_cb, &ctx, 0);
2075 	if (error != 0 || ctx.alc_busaddr == 0) {
2076 		device_printf(sc->alc_dev,
2077 		    "could not load DMA'able memory for CMB.\n");
2078 		goto fail;
2079 	}
2080 	sc->alc_rdata.alc_cmb_paddr = ctx.alc_busaddr;
2081 
2082 	/* Allocate DMA'able memory and load the DMA map for SMB. */
2083 	error = bus_dmamem_alloc(sc->alc_cdata.alc_smb_tag,
2084 	    (void **)&sc->alc_rdata.alc_smb,
2085 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2086 	    &sc->alc_cdata.alc_smb_map);
2087 	if (error != 0) {
2088 		device_printf(sc->alc_dev,
2089 		    "could not allocate DMA'able memory for SMB.\n");
2090 		goto fail;
2091 	}
2092 	ctx.alc_busaddr = 0;
2093 	error = bus_dmamap_load(sc->alc_cdata.alc_smb_tag,
2094 	    sc->alc_cdata.alc_smb_map, sc->alc_rdata.alc_smb,
2095 	    ALC_SMB_SZ, alc_dmamap_cb, &ctx, 0);
2096 	if (error != 0 || ctx.alc_busaddr == 0) {
2097 		device_printf(sc->alc_dev,
2098 		    "could not load DMA'able memory for CMB.\n");
2099 		goto fail;
2100 	}
2101 	sc->alc_rdata.alc_smb_paddr = ctx.alc_busaddr;
2102 
2103 #ifdef foo
2104 	/*
2105 	 * All of the status blocks and descriptor rings are
2106 	 * allocated at lower 4GB, their addresses high 32bits
2107 	 * part are same (all 0).
2108 	 */
2109 
2110 	/* Make sure we've not crossed 4GB boundary. */
2111 	if ((error = alc_check_boundary(sc)) != 0) {
2112 		device_printf(sc->alc_dev, "4GB boundary crossed, "
2113 		    "switching to 32bit DMA addressing mode.\n");
2114 		alc_dma_free(sc);
2115 		/*
2116 		 * Limit max allowable DMA address space to 32bit
2117 		 * and try again.
2118 		 */
2119 		lowaddr = BUS_SPACE_MAXADDR_32BIT;
2120 		goto again;
2121 	}
2122 #endif
2123 
2124 	/*
2125 	 * Create Tx buffer parent tag.
2126 	 * AR81[3567]x allows 64bit DMA addressing of Tx/Rx buffers
2127 	 * so it needs separate parent DMA tag as parent DMA address
2128 	 * space could be restricted to be within 32bit address space
2129 	 * by 4GB boundary crossing.
2130 	 */
2131 	error = bus_dma_tag_create(
2132 	    sc->alc_cdata.alc_parent_tag, /* parent */
2133 	    1, 0,			/* alignment, boundary */
2134 	    BUS_SPACE_MAXADDR,		/* lowaddr */
2135 	    BUS_SPACE_MAXADDR,		/* highaddr */
2136 	    NULL, NULL,			/* filter, filterarg */
2137 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
2138 	    0,				/* nsegments */
2139 	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
2140 	    0,				/* flags */
2141 	    &sc->alc_cdata.alc_buffer_tag);
2142 	if (error != 0) {
2143 		device_printf(sc->alc_dev,
2144 		    "could not create parent buffer DMA tag.\n");
2145 		goto fail;
2146 	}
2147 
2148 	/* Create DMA tag for Tx buffers. */
2149 	error = bus_dma_tag_create(
2150 	    sc->alc_cdata.alc_buffer_tag, /* parent */
2151 	    1, 0,			/* alignment, boundary */
2152 	    BUS_SPACE_MAXADDR,		/* lowaddr */
2153 	    BUS_SPACE_MAXADDR,		/* highaddr */
2154 	    NULL, NULL,			/* filter, filterarg */
2155 	    ALC_TSO_MAXSIZE,		/* maxsize */
2156 	    ALC_MAXTXSEGS,		/* nsegments */
2157 	    ALC_TSO_MAXSEGSIZE,		/* maxsegsize */
2158 	    BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, /* flags */
2159 	    &sc->alc_cdata.alc_tx_tag);
2160 	if (error != 0) {
2161 		device_printf(sc->alc_dev, "could not create Tx DMA tag.\n");
2162 		goto fail;
2163 	}
2164 
2165 	/* Create DMA tag for Rx buffers. */
2166 	error = bus_dma_tag_create(
2167 	    sc->alc_cdata.alc_buffer_tag, /* parent */
2168 	    ALC_RX_BUF_ALIGN, 0,	/* alignment, boundary */
2169 	    BUS_SPACE_MAXADDR,		/* lowaddr */
2170 	    BUS_SPACE_MAXADDR,		/* highaddr */
2171 	    NULL, NULL,			/* filter, filterarg */
2172 	    MCLBYTES,			/* maxsize */
2173 	    1,				/* nsegments */
2174 	    MCLBYTES,			/* maxsegsize */
2175 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED, /* flags */
2176 	    &sc->alc_cdata.alc_rx_tag);
2177 	if (error != 0) {
2178 		device_printf(sc->alc_dev, "could not create Rx DMA tag.\n");
2179 		goto fail;
2180 	}
2181 	/* Create DMA maps for Tx buffers. */
2182 	for (i = 0; i < ALC_TX_RING_CNT; i++) {
2183 		txd = &sc->alc_cdata.alc_txdesc[i];
2184 		txd->tx_m = NULL;
2185 		txd->tx_dmamap = NULL;
2186 		error = bus_dmamap_create(sc->alc_cdata.alc_tx_tag,
2187 					  BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2188 					  &txd->tx_dmamap);
2189 		if (error != 0) {
2190 			device_printf(sc->alc_dev,
2191 			    "could not create Tx dmamap.\n");
2192 			goto fail;
2193 		}
2194 	}
2195 	/* Create DMA maps for Rx buffers. */
2196 	error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag,
2197 				  BUS_DMA_WAITOK,
2198 				  &sc->alc_cdata.alc_rx_sparemap);
2199 	if (error) {
2200 		device_printf(sc->alc_dev,
2201 		    "could not create spare Rx dmamap.\n");
2202 		goto fail;
2203 	}
2204 	for (i = 0; i < ALC_RX_RING_CNT; i++) {
2205 		rxd = &sc->alc_cdata.alc_rxdesc[i];
2206 		rxd->rx_m = NULL;
2207 		rxd->rx_dmamap = NULL;
2208 		error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag,
2209 					  BUS_DMA_WAITOK,
2210 					  &rxd->rx_dmamap);
2211 		if (error != 0) {
2212 			device_printf(sc->alc_dev,
2213 			    "could not create Rx dmamap.\n");
2214 			goto fail;
2215 		}
2216 	}
2217 
2218 fail:
2219 	return (error);
2220 }
2221 
2222 static void
2223 alc_dma_free(struct alc_softc *sc)
2224 {
2225 	struct alc_txdesc *txd;
2226 	struct alc_rxdesc *rxd;
2227 	int i;
2228 
2229 	/* Tx buffers. */
2230 	if (sc->alc_cdata.alc_tx_tag != NULL) {
2231 		for (i = 0; i < ALC_TX_RING_CNT; i++) {
2232 			txd = &sc->alc_cdata.alc_txdesc[i];
2233 			if (txd->tx_dmamap != NULL) {
2234 				bus_dmamap_destroy(sc->alc_cdata.alc_tx_tag,
2235 				    txd->tx_dmamap);
2236 				txd->tx_dmamap = NULL;
2237 			}
2238 		}
2239 		bus_dma_tag_destroy(sc->alc_cdata.alc_tx_tag);
2240 		sc->alc_cdata.alc_tx_tag = NULL;
2241 	}
2242 	/* Rx buffers */
2243 	if (sc->alc_cdata.alc_rx_tag != NULL) {
2244 		for (i = 0; i < ALC_RX_RING_CNT; i++) {
2245 			rxd = &sc->alc_cdata.alc_rxdesc[i];
2246 			if (rxd->rx_dmamap != NULL) {
2247 				bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag,
2248 				    rxd->rx_dmamap);
2249 				rxd->rx_dmamap = NULL;
2250 			}
2251 		}
2252 		if (sc->alc_cdata.alc_rx_sparemap != NULL) {
2253 			bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag,
2254 			    sc->alc_cdata.alc_rx_sparemap);
2255 			sc->alc_cdata.alc_rx_sparemap = NULL;
2256 		}
2257 		bus_dma_tag_destroy(sc->alc_cdata.alc_rx_tag);
2258 		sc->alc_cdata.alc_rx_tag = NULL;
2259 	}
2260 	/* Tx descriptor ring. */
2261 	if (sc->alc_cdata.alc_tx_ring_tag != NULL) {
2262 		if (sc->alc_rdata.alc_tx_ring_paddr != 0)
2263 			bus_dmamap_unload(sc->alc_cdata.alc_tx_ring_tag,
2264 			    sc->alc_cdata.alc_tx_ring_map);
2265 		if (sc->alc_rdata.alc_tx_ring != NULL)
2266 			bus_dmamem_free(sc->alc_cdata.alc_tx_ring_tag,
2267 			    sc->alc_rdata.alc_tx_ring,
2268 			    sc->alc_cdata.alc_tx_ring_map);
2269 		sc->alc_rdata.alc_tx_ring_paddr = 0;
2270 		sc->alc_rdata.alc_tx_ring = NULL;
2271 		sc->alc_cdata.alc_tx_ring_map = NULL;
2272 		bus_dma_tag_destroy(sc->alc_cdata.alc_tx_ring_tag);
2273 		sc->alc_cdata.alc_tx_ring_tag = NULL;
2274 	}
2275 	/* Rx ring. */
2276 	if (sc->alc_cdata.alc_rx_ring_tag != NULL) {
2277 		if (sc->alc_rdata.alc_rx_ring_paddr != 0)
2278 			bus_dmamap_unload(sc->alc_cdata.alc_rx_ring_tag,
2279 			    sc->alc_cdata.alc_rx_ring_map);
2280 		if (sc->alc_rdata.alc_rx_ring != NULL)
2281 			bus_dmamem_free(sc->alc_cdata.alc_rx_ring_tag,
2282 			    sc->alc_rdata.alc_rx_ring,
2283 			    sc->alc_cdata.alc_rx_ring_map);
2284 		sc->alc_rdata.alc_rx_ring_paddr = 0;
2285 		sc->alc_rdata.alc_rx_ring = NULL;
2286 		sc->alc_cdata.alc_rx_ring_map = NULL;
2287 		bus_dma_tag_destroy(sc->alc_cdata.alc_rx_ring_tag);
2288 		sc->alc_cdata.alc_rx_ring_tag = NULL;
2289 	}
2290 	/* Rx return ring. */
2291 	if (sc->alc_cdata.alc_rr_ring_tag != NULL) {
2292 		if (sc->alc_rdata.alc_rr_ring_paddr != 0)
2293 			bus_dmamap_unload(sc->alc_cdata.alc_rr_ring_tag,
2294 			    sc->alc_cdata.alc_rr_ring_map);
2295 		if (sc->alc_rdata.alc_rr_ring != NULL)
2296 			bus_dmamem_free(sc->alc_cdata.alc_rr_ring_tag,
2297 			    sc->alc_rdata.alc_rr_ring,
2298 			    sc->alc_cdata.alc_rr_ring_map);
2299 		sc->alc_rdata.alc_rr_ring_paddr = 0;
2300 		sc->alc_rdata.alc_rr_ring = NULL;
2301 		sc->alc_cdata.alc_rr_ring_map = NULL;
2302 		bus_dma_tag_destroy(sc->alc_cdata.alc_rr_ring_tag);
2303 		sc->alc_cdata.alc_rr_ring_tag = NULL;
2304 	}
2305 	/* CMB block */
2306 	if (sc->alc_cdata.alc_cmb_tag != NULL) {
2307 		if (sc->alc_rdata.alc_cmb_paddr != 0)
2308 			bus_dmamap_unload(sc->alc_cdata.alc_cmb_tag,
2309 			    sc->alc_cdata.alc_cmb_map);
2310 		if (sc->alc_rdata.alc_cmb != NULL)
2311 			bus_dmamem_free(sc->alc_cdata.alc_cmb_tag,
2312 			    sc->alc_rdata.alc_cmb,
2313 			    sc->alc_cdata.alc_cmb_map);
2314 		sc->alc_rdata.alc_cmb_paddr = 0;
2315 		sc->alc_rdata.alc_cmb = NULL;
2316 		sc->alc_cdata.alc_cmb_map = NULL;
2317 		bus_dma_tag_destroy(sc->alc_cdata.alc_cmb_tag);
2318 		sc->alc_cdata.alc_cmb_tag = NULL;
2319 	}
2320 	/* SMB block */
2321 	if (sc->alc_cdata.alc_smb_tag != NULL) {
2322 		if (sc->alc_rdata.alc_smb_paddr != 0)
2323 			bus_dmamap_unload(sc->alc_cdata.alc_smb_tag,
2324 			    sc->alc_cdata.alc_smb_map);
2325 		if (sc->alc_rdata.alc_smb != NULL)
2326 			bus_dmamem_free(sc->alc_cdata.alc_smb_tag,
2327 			    sc->alc_rdata.alc_smb,
2328 			    sc->alc_cdata.alc_smb_map);
2329 		sc->alc_rdata.alc_smb_paddr = 0;
2330 		sc->alc_rdata.alc_smb = NULL;
2331 		sc->alc_cdata.alc_smb_map = NULL;
2332 		bus_dma_tag_destroy(sc->alc_cdata.alc_smb_tag);
2333 		sc->alc_cdata.alc_smb_tag = NULL;
2334 	}
2335 	if (sc->alc_cdata.alc_buffer_tag != NULL) {
2336 		bus_dma_tag_destroy(sc->alc_cdata.alc_buffer_tag);
2337 		sc->alc_cdata.alc_buffer_tag = NULL;
2338 	}
2339 	if (sc->alc_cdata.alc_parent_tag != NULL) {
2340 		bus_dma_tag_destroy(sc->alc_cdata.alc_parent_tag);
2341 		sc->alc_cdata.alc_parent_tag = NULL;
2342 	}
2343 }
2344 
2345 static int
2346 alc_shutdown(device_t dev)
2347 {
2348 
2349 	return (alc_suspend(dev));
2350 }
2351 
2352 #if 0
2353 /* XXX: LINK SPEED */
2354 /*
2355  * Note, this driver resets the link speed to 10/100Mbps by
2356  * restarting auto-negotiation in suspend/shutdown phase but we
2357  * don't know whether that auto-negotiation would succeed or not
2358  * as driver has no control after powering off/suspend operation.
2359  * If the renegotiation fail WOL may not work. Running at 1Gbps
2360  * will draw more power than 375mA at 3.3V which is specified in
2361  * PCI specification and that would result in complete
2362  * shutdowning power to ethernet controller.
2363  *
2364  * TODO
2365  * Save current negotiated media speed/duplex/flow-control to
2366  * softc and restore the same link again after resuming. PHY
2367  * handling such as power down/resetting to 100Mbps may be better
2368  * handled in suspend method in phy driver.
2369  */
2370 static void
2371 alc_setlinkspeed(struct alc_softc *sc)
2372 {
2373 	struct mii_data *mii;
2374 	int aneg, i;
2375 
2376 	mii = device_get_softc(sc->alc_miibus);
2377 	mii_pollstat(mii);
2378 	aneg = 0;
2379 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
2380 	    (IFM_ACTIVE | IFM_AVALID)) {
2381 		switch IFM_SUBTYPE(mii->mii_media_active) {
2382 		case IFM_10_T:
2383 		case IFM_100_TX:
2384 			return;
2385 		case IFM_1000_T:
2386 			aneg++;
2387 			break;
2388 		default:
2389 			break;
2390 		}
2391 	}
2392 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, MII_100T2CR, 0);
2393 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
2394 	    MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
2395 	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
2396 	    MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
2397 	DELAY(1000);
2398 	if (aneg != 0) {
2399 		/*
2400 		 * Poll link state until alc(4) get a 10/100Mbps link.
2401 		 */
2402 		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
2403 			mii_pollstat(mii);
2404 			if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
2405 			    == (IFM_ACTIVE | IFM_AVALID)) {
2406 				switch (IFM_SUBTYPE(
2407 				    mii->mii_media_active)) {
2408 				case IFM_10_T:
2409 				case IFM_100_TX:
2410 					alc_mac_config(sc);
2411 					return;
2412 				default:
2413 					break;
2414 				}
2415 			}
2416 			ALC_UNLOCK(sc);
2417 			pause("alclnk", hz);
2418 			ALC_LOCK(sc);
2419 		}
2420 		if (i == MII_ANEGTICKS_GIGE)
2421 			device_printf(sc->alc_dev,
2422 			    "establishing a link failed, WOL may not work!");
2423 	}
2424 	/*
2425 	 * No link, force MAC to have 100Mbps, full-duplex link.
2426 	 * This is the last resort and may/may not work.
2427 	 */
2428 	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
2429 	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
2430 	alc_mac_config(sc);
2431 }
2432 #endif
2433 
2434 #if 0
2435 /* XXX: WOL */
2436 static void
2437 alc_setwol(struct alc_softc *sc)
2438 {
2439 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
2440 		alc_setwol_816x(sc);
2441 	else
2442 		alc_setwol_813x(sc);
2443 }
2444 
2445 static void
2446 alc_setwol_813x(struct alc_softc *sc)
2447 {
2448 	struct ifnet *ifp;
2449 	uint32_t reg, pmcs;
2450 	uint16_t pmstat;
2451 
2452 	ALC_LOCK_ASSERT(sc);
2453 
2454 	alc_disable_l0s_l1(sc);
2455 	ifp = sc->alc_ifp;
2456 	if ((sc->alc_flags & ALC_FLAG_PM) == 0) {
2457 		/* Disable WOL. */
2458 		CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
2459 		reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC);
2460 		reg |= PCIE_PHYMISC_FORCE_RCV_DET;
2461 		CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg);
2462 		/* Force PHY power down. */
2463 		alc_phy_down(sc);
2464 		CSR_WRITE_4(sc, ALC_MASTER_CFG,
2465 			CSR_READ_4(sc, ALC_MASTER_CFG) | MASTER_CLK_SEL_DIS);
2466 		return;
2467 	}
2468 
2469 	if ((ifp->if_capenable & IFCAP_WOL) != 0) {
2470 		if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
2471 			alc_setlinkspeed(sc);
2472 		CSR_WRITE_4(sc, ALC_MASTER_CFG,
2473 			CSR_READ_4(sc, ALC_MASTER_CFG) & ~MASTER_CLK_SEL_DIS);
2474 	}
2475 
2476 	pmcs = 0;
2477 	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
2478 		pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB;
2479 	CSR_WRITE_4(sc, ALC_WOL_CFG, pmcs);
2480 	reg = CSR_READ_4(sc, ALC_MAC_CFG);
2481 	reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI |
2482 	    MAC_CFG_BCAST);
2483 	if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
2484 		reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST;
2485 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
2486 		reg |= MAC_CFG_RX_ENB;
2487 	CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2488 
2489 	reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC);
2490 	reg |= PCIE_PHYMISC_FORCE_RCV_DET;
2491 	CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg);
2492 	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
2493 		/* WOL disabled, PHY power down. */
2494 		alc_phy_down(sc);
2495 		CSR_WRITE_4(sc, ALC_MASTER_CFG,
2496 			CSR_READ_4(sc, ALC_MASTER_CFG) | MASTER_CLK_SEL_DIS);
2497 
2498 	}
2499 	/* Request PME. */
2500 	pmstat = pci_read_config(sc->alc_dev,
2501 				 sc->alc_pmcap + PCIR_POWER_STATUS, 2);
2502 	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2503 	if ((ifp->if_capenable & IFCAP_WOL) != 0)
2504 		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2505 	pci_write_config(sc->alc_dev,
2506 			 sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2);
2507 }
2508 
2509 static void
2510 alc_setwol_816x(struct alc_softc *sc)
2511 {
2512 	struct ifnet *ifp;
2513 	uint32_t gphy, mac, master, pmcs, reg;
2514 	uint16_t pmstat;
2515 
2516 	ALC_LOCK_ASSERT(sc);
2517 
2518 	ifp = sc->alc_ifp;
2519 	master = CSR_READ_4(sc, ALC_MASTER_CFG);
2520 	master &= ~MASTER_CLK_SEL_DIS;
2521 	gphy = CSR_READ_4(sc, ALC_GPHY_CFG);
2522 	gphy &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE | GPHY_CFG_100AB_ENB |
2523 	    GPHY_CFG_PHY_PLL_ON);
2524 	gphy |= GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | GPHY_CFG_SEL_ANA_RESET;
2525 	if ((sc->alc_flags & ALC_FLAG_PM) == 0) {
2526 		CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
2527 		gphy |= GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW;
2528 		mac = CSR_READ_4(sc, ALC_MAC_CFG);
2529 	} else {
2530 		if ((ifp->if_capenable & IFCAP_WOL) != 0) {
2531 			gphy |= GPHY_CFG_EXT_RESET;
2532 			if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
2533 				alc_setlinkspeed(sc);
2534 		}
2535 		pmcs = 0;
2536 		if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
2537 			pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB;
2538 		CSR_WRITE_4(sc, ALC_WOL_CFG, pmcs);
2539 		mac = CSR_READ_4(sc, ALC_MAC_CFG);
2540 		mac &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI |
2541 		    MAC_CFG_BCAST);
2542 		if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
2543 			mac |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST;
2544 		if ((ifp->if_capenable & IFCAP_WOL) != 0)
2545 			mac |= MAC_CFG_RX_ENB;
2546 		alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_S3DIG10,
2547 		    ANEG_S3DIG10_SL);
2548 	}
2549 
2550 	/* Enable OSC. */
2551 	reg = CSR_READ_4(sc, ALC_MISC);
2552 	reg &= ~MISC_INTNLOSC_OPEN;
2553 	CSR_WRITE_4(sc, ALC_MISC, reg);
2554 	reg |= MISC_INTNLOSC_OPEN;
2555 	CSR_WRITE_4(sc, ALC_MISC, reg);
2556 	CSR_WRITE_4(sc, ALC_MASTER_CFG, master);
2557 	CSR_WRITE_4(sc, ALC_MAC_CFG, mac);
2558 	CSR_WRITE_4(sc, ALC_GPHY_CFG, gphy);
2559 	reg = CSR_READ_4(sc, ALC_PDLL_TRNS1);
2560 	reg |= PDLL_TRNS1_D3PLLOFF_ENB;
2561 	CSR_WRITE_4(sc, ALC_PDLL_TRNS1, reg);
2562 
2563 	if ((sc->alc_flags & ALC_FLAG_PM) != 0) {
2564 		/* Request PME. */
2565 		pmstat = pci_read_config(sc->alc_dev,
2566 		    sc->alc_pmcap + PCIR_POWER_STATUS, 2);
2567 		pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
2568 		if ((ifp->if_capenable & IFCAP_WOL) != 0)
2569 			pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2570 		pci_write_config(sc->alc_dev,
2571 		    sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2);
2572 	}
2573 }
2574 
2575 #endif
2576 
2577 static int
2578 alc_suspend(device_t dev)
2579 {
2580 	struct alc_softc *sc = device_get_softc(dev);
2581 	struct ifnet *ifp = &sc->arpcom.ac_if;
2582 
2583 	lwkt_serialize_enter(ifp->if_serializer);
2584 	alc_stop(sc);
2585 #if 0
2586 /* XXX: WOL */
2587 	alc_setwol(sc);
2588 #endif
2589 	lwkt_serialize_exit(ifp->if_serializer);
2590 
2591 	return (0);
2592 }
2593 
2594 static int
2595 alc_resume(device_t dev)
2596 {
2597 	struct alc_softc *sc = device_get_softc(dev);
2598 	struct ifnet *ifp = &sc->arpcom.ac_if;
2599 	uint16_t pmstat;
2600 
2601 	lwkt_serialize_enter(ifp->if_serializer);
2602 
2603 	if ((sc->alc_flags & ALC_FLAG_PM) != 0) {
2604 		/* Disable PME and clear PME status. */
2605 		pmstat = pci_read_config(sc->alc_dev,
2606 		    sc->alc_pmcap + PCIR_POWER_STATUS, 2);
2607 		if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
2608 			pmstat &= ~PCIM_PSTAT_PMEENABLE;
2609 			pci_write_config(sc->alc_dev,
2610 			    sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2);
2611 		}
2612 	}
2613 
2614 	/* Reset PHY. */
2615 	alc_phy_reset(sc);
2616 	if (ifp->if_flags & IFF_UP)
2617 		alc_init(sc);
2618 
2619 	lwkt_serialize_exit(ifp->if_serializer);
2620 
2621 	return (0);
2622 }
2623 
2624 static int
2625 alc_encap(struct alc_softc *sc, struct mbuf **m_head)
2626 {
2627 	struct alc_txdesc *txd, *txd_last;
2628 	struct tx_desc *desc;
2629 	struct mbuf *m;
2630 	struct ip *ip;
2631 	struct tcphdr *tcp;
2632 	bus_dma_segment_t txsegs[ALC_MAXTXSEGS];
2633 	bus_dmamap_t map;
2634 	uint32_t cflags, hdrlen, ip_off, poff, vtag;
2635 	int error, idx, nsegs, prod;
2636 
2637 	M_ASSERTPKTHDR((*m_head));
2638 
2639 	m = *m_head;
2640 	ip = NULL;
2641 	tcp = NULL;
2642 	ip_off = poff = 0;
2643 	if ((m->m_pkthdr.csum_flags & (ALC_CSUM_FEATURES | CSUM_TSO)) != 0) {
2644 		/*
2645 		 * AR81[3567]x requires offset of TCP/UDP header in its
2646 		 * Tx descriptor to perform Tx checksum offloading. TSO
2647 		 * also requires TCP header offset and modification of
2648 		 * IP/TCP header. This kind of operation takes many CPU
2649 		 * cycles on FreeBSD so fast host CPU is required to get
2650 		 * smooth TSO performance.
2651 		 */
2652 		struct ether_header *eh;
2653 
2654 		if (M_WRITABLE(m) == 0) {
2655 			/* Get a writable copy. */
2656 			m = m_dup(*m_head, M_NOWAIT);
2657 			/* Release original mbufs. */
2658 			m_freem(*m_head);
2659 			if (m == NULL) {
2660 				*m_head = NULL;
2661 				return (ENOBUFS);
2662 			}
2663 			*m_head = m;
2664 		}
2665 
2666 		ip_off = sizeof(struct ether_header);
2667 		m = m_pullup(m, ip_off + sizeof(struct ip));
2668 		if (m == NULL) {
2669 			*m_head = NULL;
2670 			return (ENOBUFS);
2671 		}
2672 		eh = mtod(m, struct ether_header *);
2673 		/*
2674 		 * Check if hardware VLAN insertion is off.
2675 		 * Additional check for LLC/SNAP frame?
2676 		 */
2677 		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2678 			ip_off = sizeof(struct ether_vlan_header);
2679 			m = m_pullup(m, ip_off);
2680 			if (m == NULL) {
2681 				*m_head = NULL;
2682 				return (ENOBUFS);
2683 			}
2684 		}
2685 		m = m_pullup(m, ip_off + sizeof(struct ip));
2686 		if (m == NULL) {
2687 			*m_head = NULL;
2688 			return (ENOBUFS);
2689 		}
2690 		ip = (struct ip *)(mtod(m, char *) + ip_off);
2691 		poff = ip_off + (ip->ip_hl << 2);
2692 		if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2693 			m = m_pullup(m, poff + sizeof(struct tcphdr));
2694 			if (m == NULL) {
2695 				*m_head = NULL;
2696 				return (ENOBUFS);
2697 			}
2698 			tcp = (struct tcphdr *)(mtod(m, char *) + poff);
2699 			m = m_pullup(m, poff + (tcp->th_off << 2));
2700 			if (m == NULL) {
2701 				*m_head = NULL;
2702 				return (ENOBUFS);
2703 			}
2704 			/*
2705 			 * Due to strict adherence of Microsoft NDIS
2706 			 * Large Send specification, hardware expects
2707 			 * a pseudo TCP checksum inserted by upper
2708 			 * stack. Unfortunately the pseudo TCP
2709 			 * checksum that NDIS refers to does not include
2710 			 * TCP payload length so driver should recompute
2711 			 * the pseudo checksum here. Hopefully this
2712 			 * wouldn't be much burden on modern CPUs.
2713 			 *
2714 			 * Reset IP checksum and recompute TCP pseudo
2715 			 * checksum as NDIS specification said.
2716 			 */
2717 			ip = (struct ip *)(mtod(m, char *) + ip_off);
2718 			tcp = (struct tcphdr *)(mtod(m, char *) + poff);
2719 			ip->ip_sum = 0;
2720 			tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
2721 			    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2722 		}
2723 		*m_head = m;
2724 	}
2725 
2726 	prod = sc->alc_cdata.alc_tx_prod;
2727 	txd = &sc->alc_cdata.alc_txdesc[prod];
2728 	txd_last = txd;
2729 	map = txd->tx_dmamap;
2730 
2731 	error = bus_dmamap_load_mbuf_defrag(
2732 			sc->alc_cdata.alc_tx_tag, map, m_head,
2733 			txsegs, ALC_MAXTXSEGS, &nsegs, BUS_DMA_NOWAIT);
2734 	if (error) {
2735 		m_freem(*m_head);
2736 		*m_head = NULL;
2737 		return (error);
2738 	}
2739 	if (nsegs == 0) {
2740 		m_freem(*m_head);
2741 		*m_head = NULL;
2742 		return (EIO);
2743 	}
2744 
2745 	/* Check descriptor overrun. */
2746 	if (sc->alc_cdata.alc_tx_cnt + nsegs >= ALC_TX_RING_CNT - 3) {
2747 		bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, map);
2748 		return (ENOBUFS);
2749 	}
2750 	bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, map, BUS_DMASYNC_PREWRITE);
2751 
2752 	m = *m_head;
2753 	cflags = TD_ETHERNET;
2754 	vtag = 0;
2755 	desc = NULL;
2756 	idx = 0;
2757 	/* Configure VLAN hardware tag insertion. */
2758 	if ((m->m_flags & M_VLANTAG) != 0) {
2759 		vtag = htons(m->m_pkthdr.ether_vlantag);
2760 		vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK;
2761 		cflags |= TD_INS_VLAN_TAG;
2762 	}
2763 	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2764 		/* Request TSO and set MSS. */
2765 		cflags |= TD_TSO | TD_TSO_DESCV1;
2766 		cflags |= ((uint32_t)m->m_pkthdr.tso_segsz << TD_MSS_SHIFT) &
2767 		    TD_MSS_MASK;
2768 		/* Set TCP header offset. */
2769 		cflags |= (poff << TD_TCPHDR_OFFSET_SHIFT) &
2770 		    TD_TCPHDR_OFFSET_MASK;
2771 		/*
2772 		 * AR81[3567]x requires the first buffer should
2773 		 * only hold IP/TCP header data. Payload should
2774 		 * be handled in other descriptors.
2775 		 */
2776 		hdrlen = poff + (tcp->th_off << 2);
2777 		desc = &sc->alc_rdata.alc_tx_ring[prod];
2778 		desc->len = htole32(TX_BYTES(hdrlen | vtag));
2779 		desc->flags = htole32(cflags);
2780 		desc->addr = htole64(txsegs[0].ds_addr);
2781 		sc->alc_cdata.alc_tx_cnt++;
2782 		ALC_DESC_INC(prod, ALC_TX_RING_CNT);
2783 		if (m->m_len - hdrlen > 0) {
2784 			/* Handle remaining payload of the first fragment. */
2785 			desc = &sc->alc_rdata.alc_tx_ring[prod];
2786 			desc->len = htole32(TX_BYTES((m->m_len - hdrlen) |
2787 			    vtag));
2788 			desc->flags = htole32(cflags);
2789 			desc->addr = htole64(txsegs[0].ds_addr + hdrlen);
2790 			sc->alc_cdata.alc_tx_cnt++;
2791 			ALC_DESC_INC(prod, ALC_TX_RING_CNT);
2792 		}
2793 		/* Handle remaining fragments. */
2794 		idx = 1;
2795 	} else if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) {
2796 		/* Configure Tx checksum offload. */
2797 #ifdef ALC_USE_CUSTOM_CSUM
2798 		cflags |= TD_CUSTOM_CSUM;
2799 		/* Set checksum start offset. */
2800 		cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) &
2801 		    TD_PLOAD_OFFSET_MASK;
2802 		/* Set checksum insertion position of TCP/UDP. */
2803 		cflags |= (((poff + m->m_pkthdr.csum_data) >> 1) <<
2804 		    TD_CUSTOM_CSUM_OFFSET_SHIFT) & TD_CUSTOM_CSUM_OFFSET_MASK;
2805 #else
2806 		if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2807 			cflags |= TD_IPCSUM;
2808 		if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2809 			cflags |= TD_TCPCSUM;
2810 		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2811 			cflags |= TD_UDPCSUM;
2812 		/* Set TCP/UDP header offset. */
2813 		cflags |= (poff << TD_L4HDR_OFFSET_SHIFT) &
2814 		    TD_L4HDR_OFFSET_MASK;
2815 #endif
2816 	}
2817 
2818 	for (; idx < nsegs; idx++) {
2819 		desc = &sc->alc_rdata.alc_tx_ring[prod];
2820 		desc->len = htole32(TX_BYTES(txsegs[idx].ds_len) | vtag);
2821 		desc->flags = htole32(cflags);
2822 		desc->addr = htole64(txsegs[idx].ds_addr);
2823 		sc->alc_cdata.alc_tx_cnt++;
2824 		ALC_DESC_INC(prod, ALC_TX_RING_CNT);
2825 	}
2826 	/* Update producer index. */
2827 	sc->alc_cdata.alc_tx_prod = prod;
2828 
2829 	/* Finally set EOP on the last descriptor. */
2830 	prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT;
2831 	desc = &sc->alc_rdata.alc_tx_ring[prod];
2832 	desc->flags |= htole32(TD_EOP);
2833 
2834 	/* Swap dmamap of the first and the last. */
2835 	txd = &sc->alc_cdata.alc_txdesc[prod];
2836 	map = txd_last->tx_dmamap;
2837 	txd_last->tx_dmamap = txd->tx_dmamap;
2838 	txd->tx_dmamap = map;
2839 	txd->tx_m = m;
2840 
2841 	return (0);
2842 }
2843 
2844 static void
2845 alc_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
2846 {
2847 	struct alc_softc *sc = ifp->if_softc;
2848 	struct mbuf *m_head;
2849 	int enq;
2850 
2851 	ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
2852 	ASSERT_SERIALIZED(ifp->if_serializer);
2853 
2854 	/* Reclaim transmitted frames. */
2855 	if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT)
2856 		alc_txeof(sc);
2857 
2858 	if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
2859 		return;
2860 	if ((sc->alc_flags & ALC_FLAG_LINK) == 0) {
2861 		ifq_purge(&ifp->if_snd);
2862 		return;
2863 	}
2864 
2865 	for (enq = 0; !ifq_is_empty(&ifp->if_snd); ) {
2866 		m_head = ifq_dequeue(&ifp->if_snd);
2867 		if (m_head == NULL)
2868 			break;
2869 		/*
2870 		 * Pack the data into the transmit ring. If we
2871 		 * don't have room, set the OACTIVE flag and wait
2872 		 * for the NIC to drain the ring.
2873 		 */
2874 		if (alc_encap(sc, &m_head)) {
2875 			if (m_head == NULL)
2876 				break;
2877 			ifq_prepend(&ifp->if_snd, m_head);
2878 			ifq_set_oactive(&ifp->if_snd);
2879 			break;
2880 		}
2881 
2882 		enq++;
2883 		/*
2884 		 * If there's a BPF listener, bounce a copy of this frame
2885 		 * to him.
2886 		 */
2887 		ETHER_BPF_MTAP(ifp, m_head);
2888 	}
2889 
2890 	if (enq > 0) {
2891 		/* Sync descriptors. */
2892 		bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
2893 		    sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE);
2894 		/* Kick. Assume we're using normal Tx priority queue. */
2895 		if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
2896 			CSR_WRITE_2(sc, ALC_MBOX_TD_PRI0_PROD_IDX,
2897 			    (uint16_t)sc->alc_cdata.alc_tx_prod);
2898 		else
2899 			CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX,
2900 			    (sc->alc_cdata.alc_tx_prod <<
2901 			    MBOX_TD_PROD_LO_IDX_SHIFT) &
2902 			    MBOX_TD_PROD_LO_IDX_MASK);
2903 		/* Set a timeout in case the chip goes out to lunch. */
2904 		sc->alc_watchdog_timer = ALC_TX_TIMEOUT;
2905 	}
2906 }
2907 
2908 static void
2909 alc_watchdog(struct alc_softc *sc)
2910 {
2911 	struct ifnet *ifp = &sc->arpcom.ac_if;
2912 
2913 	ASSERT_SERIALIZED(ifp->if_serializer);
2914 
2915 	if (sc->alc_watchdog_timer == 0 || --sc->alc_watchdog_timer)
2916 		return;
2917 
2918 	if ((sc->alc_flags & ALC_FLAG_LINK) == 0) {
2919 		if_printf(sc->alc_ifp, "watchdog timeout (lost link)\n");
2920 		IFNET_STAT_INC(ifp, oerrors, 1);
2921 		alc_init(sc);
2922 		return;
2923 	}
2924 	if_printf(sc->alc_ifp, "watchdog timeout -- resetting\n");
2925 	IFNET_STAT_INC(ifp, oerrors, 1);
2926 	alc_init(sc);
2927 	if (!ifq_is_empty(&ifp->if_snd))
2928 		if_devstart(ifp);
2929 }
2930 
2931 static int
2932 alc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
2933 {
2934 	struct alc_softc *sc;
2935 	struct ifreq *ifr;
2936 	struct mii_data *mii;
2937 	int error, mask;
2938 
2939 	ASSERT_SERIALIZED(ifp->if_serializer);
2940 
2941 	sc = ifp->if_softc;
2942 	ifr = (struct ifreq *)data;
2943 	error = 0;
2944 	switch (cmd) {
2945 	case SIOCSIFMTU:
2946 		if (ifr->ifr_mtu < ETHERMIN ||
2947 		    ifr->ifr_mtu > (sc->alc_ident->max_framelen -
2948 			    sizeof(struct ether_vlan_header) - ETHER_CRC_LEN) ||
2949 		    ((sc->alc_flags & ALC_FLAG_JUMBO) == 0 &&
2950 		    ifr->ifr_mtu > ETHERMTU)) {
2951 			error = EINVAL;
2952 		} else if (ifp->if_mtu != ifr->ifr_mtu) {
2953 			ifp->if_mtu = ifr->ifr_mtu;
2954 #if 0
2955 			/* AR81[3567]x has 13 bits MSS field. */
2956 			if (ifp->if_mtu > ALC_TSO_MTU &&
2957 			    (ifp->if_capenable & IFCAP_TSO4) != 0) {
2958 				ifp->if_capenable &= ~IFCAP_TSO4;
2959 				ifp->if_hwassist &= ~CSUM_TSO;
2960 			}
2961 #endif
2962 		}
2963 		break;
2964 	case SIOCSIFFLAGS:
2965 		if ((ifp->if_flags & IFF_UP) != 0) {
2966 			if ((ifp->if_flags & IFF_RUNNING) != 0 &&
2967 			    ((ifp->if_flags ^ sc->alc_if_flags) &
2968 			    (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2969 				alc_rxfilter(sc);
2970 			else if ((ifp->if_flags & IFF_RUNNING) == 0)
2971 				alc_init(sc);
2972 		} else if ((ifp->if_flags & IFF_RUNNING) != 0)
2973 			alc_stop(sc);
2974 		sc->alc_if_flags = ifp->if_flags;
2975 		break;
2976 	case SIOCADDMULTI:
2977 	case SIOCDELMULTI:
2978 		if ((ifp->if_flags & IFF_RUNNING) != 0)
2979 			alc_rxfilter(sc);
2980 		break;
2981 	case SIOCSIFMEDIA:
2982 	case SIOCGIFMEDIA:
2983 		mii = device_get_softc(sc->alc_miibus);
2984 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2985 		break;
2986 	case SIOCSIFCAP:
2987 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2988 		if ((mask & IFCAP_TXCSUM) != 0 &&
2989 		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
2990 			ifp->if_capenable ^= IFCAP_TXCSUM;
2991 			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2992 				ifp->if_hwassist |= ALC_CSUM_FEATURES;
2993 			else
2994 				ifp->if_hwassist &= ~ALC_CSUM_FEATURES;
2995 		}
2996 #if 0
2997 /* XXX: WOL */
2998 		if ((mask & IFCAP_WOL_MCAST) != 0 &&
2999 		    (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0)
3000 			ifp->if_capenable ^= IFCAP_WOL_MCAST;
3001 		if ((mask & IFCAP_WOL_MAGIC) != 0 &&
3002 		    (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
3003 			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
3004 #endif
3005 		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
3006 		    (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
3007 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3008 			alc_rxvlan(sc);
3009 		}
3010 		if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
3011 		    (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
3012 			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
3013 
3014 		/*
3015 		 * VLAN hardware tagging is required to do checksum
3016 		 * offload or TSO on VLAN interface. Checksum offload
3017 		 * on VLAN interface also requires hardware checksum
3018 		 * offload of parent interface.
3019 		 */
3020 		if ((ifp->if_capenable & IFCAP_TXCSUM) == 0)
3021 			ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
3022 		if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
3023 			ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
3024 // XXX		VLAN_CAPABILITIES(ifp);
3025 		break;
3026 	default:
3027 		error = ether_ioctl(ifp, cmd, data);
3028 		break;
3029 	}
3030 
3031 	return (error);
3032 }
3033 
3034 static void
3035 alc_mac_config(struct alc_softc *sc)
3036 {
3037 	struct mii_data *mii;
3038 	uint32_t reg;
3039 
3040 	mii = device_get_softc(sc->alc_miibus);
3041 	reg = CSR_READ_4(sc, ALC_MAC_CFG);
3042 	reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
3043 	    MAC_CFG_SPEED_MASK);
3044 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 ||
3045 	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 ||
3046 	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 ||
3047 	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) {
3048 		reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
3049 	}
3050 	/* Reprogram MAC with resolved speed/duplex. */
3051 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
3052 	case IFM_10_T:
3053 	case IFM_100_TX:
3054 		reg |= MAC_CFG_SPEED_10_100;
3055 		break;
3056 	case IFM_1000_T:
3057 		reg |= MAC_CFG_SPEED_1000;
3058 		break;
3059 	}
3060 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
3061 		reg |= MAC_CFG_FULL_DUPLEX;
3062 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
3063 			reg |= MAC_CFG_TX_FC;
3064 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
3065 			reg |= MAC_CFG_RX_FC;
3066 	}
3067 	CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3068 }
3069 
3070 static void
3071 alc_stats_clear(struct alc_softc *sc)
3072 {
3073 	struct smb sb, *smb;
3074 	uint32_t *reg;
3075 	int i;
3076 
3077 	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
3078 		bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
3079 		    sc->alc_cdata.alc_smb_map,
3080 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3081 		smb = sc->alc_rdata.alc_smb;
3082 		/* Update done, clear. */
3083 		smb->updated = 0;
3084 		bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
3085 		    sc->alc_cdata.alc_smb_map,
3086 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3087 	} else {
3088 		for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
3089 		    reg++) {
3090 			CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
3091 			i += sizeof(uint32_t);
3092 		}
3093 		/* Read Tx statistics. */
3094 		for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
3095 		    reg++) {
3096 			CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
3097 			i += sizeof(uint32_t);
3098 		}
3099 	}
3100 }
3101 
3102 static void
3103 alc_stats_update(struct alc_softc *sc)
3104 {
3105 	struct alc_hw_stats *stat;
3106 	struct smb sb, *smb;
3107 	struct ifnet *ifp;
3108 	uint32_t *reg;
3109 	int i;
3110 
3111 	ifp = sc->alc_ifp;
3112 	stat = &sc->alc_stats;
3113 	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
3114 		bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
3115 		    sc->alc_cdata.alc_smb_map,
3116 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3117 		smb = sc->alc_rdata.alc_smb;
3118 		if (smb->updated == 0)
3119 			return;
3120 	} else {
3121 		smb = &sb;
3122 		/* Read Rx statistics. */
3123 		for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
3124 		    reg++) {
3125 			*reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
3126 			i += sizeof(uint32_t);
3127 		}
3128 		/* Read Tx statistics. */
3129 		for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
3130 		    reg++) {
3131 			*reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
3132 			i += sizeof(uint32_t);
3133 		}
3134 	}
3135 
3136 	/* Rx stats. */
3137 	stat->rx_frames += smb->rx_frames;
3138 	stat->rx_bcast_frames += smb->rx_bcast_frames;
3139 	stat->rx_mcast_frames += smb->rx_mcast_frames;
3140 	stat->rx_pause_frames += smb->rx_pause_frames;
3141 	stat->rx_control_frames += smb->rx_control_frames;
3142 	stat->rx_crcerrs += smb->rx_crcerrs;
3143 	stat->rx_lenerrs += smb->rx_lenerrs;
3144 	stat->rx_bytes += smb->rx_bytes;
3145 	stat->rx_runts += smb->rx_runts;
3146 	stat->rx_fragments += smb->rx_fragments;
3147 	stat->rx_pkts_64 += smb->rx_pkts_64;
3148 	stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
3149 	stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
3150 	stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
3151 	stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
3152 	stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
3153 	stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
3154 	stat->rx_pkts_truncated += smb->rx_pkts_truncated;
3155 	stat->rx_fifo_oflows += smb->rx_fifo_oflows;
3156 	stat->rx_rrs_errs += smb->rx_rrs_errs;
3157 	stat->rx_alignerrs += smb->rx_alignerrs;
3158 	stat->rx_bcast_bytes += smb->rx_bcast_bytes;
3159 	stat->rx_mcast_bytes += smb->rx_mcast_bytes;
3160 	stat->rx_pkts_filtered += smb->rx_pkts_filtered;
3161 
3162 	/* Tx stats. */
3163 	stat->tx_frames += smb->tx_frames;
3164 	stat->tx_bcast_frames += smb->tx_bcast_frames;
3165 	stat->tx_mcast_frames += smb->tx_mcast_frames;
3166 	stat->tx_pause_frames += smb->tx_pause_frames;
3167 	stat->tx_excess_defer += smb->tx_excess_defer;
3168 	stat->tx_control_frames += smb->tx_control_frames;
3169 	stat->tx_deferred += smb->tx_deferred;
3170 	stat->tx_bytes += smb->tx_bytes;
3171 	stat->tx_pkts_64 += smb->tx_pkts_64;
3172 	stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
3173 	stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
3174 	stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
3175 	stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
3176 	stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
3177 	stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
3178 	stat->tx_single_colls += smb->tx_single_colls;
3179 	stat->tx_multi_colls += smb->tx_multi_colls;
3180 	stat->tx_late_colls += smb->tx_late_colls;
3181 	stat->tx_excess_colls += smb->tx_excess_colls;
3182 	stat->tx_underrun += smb->tx_underrun;
3183 	stat->tx_desc_underrun += smb->tx_desc_underrun;
3184 	stat->tx_lenerrs += smb->tx_lenerrs;
3185 	stat->tx_pkts_truncated += smb->tx_pkts_truncated;
3186 	stat->tx_bcast_bytes += smb->tx_bcast_bytes;
3187 	stat->tx_mcast_bytes += smb->tx_mcast_bytes;
3188 
3189 	/* Update counters in ifnet. */
3190 	IFNET_STAT_INC(ifp, opackets, smb->tx_frames);
3191 
3192 	IFNET_STAT_INC(ifp, collisions, smb->tx_single_colls +
3193 	    smb->tx_multi_colls * 2 + smb->tx_late_colls +
3194 	    smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT);
3195 
3196 	IFNET_STAT_INC(ifp, oerrors,
3197 	    smb->tx_excess_colls + smb->tx_late_colls + smb->tx_underrun);
3198 
3199 	IFNET_STAT_INC(ifp, ipackets, smb->rx_frames);
3200 
3201 	IFNET_STAT_INC(ifp, ierrors, smb->rx_crcerrs + smb->rx_lenerrs +
3202 	    smb->rx_runts + smb->rx_pkts_truncated +
3203 	    smb->rx_fifo_oflows + smb->rx_rrs_errs +
3204 	    smb->rx_alignerrs);
3205 
3206 	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
3207 		/* Update done, clear. */
3208 		smb->updated = 0;
3209 		bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
3210 		    sc->alc_cdata.alc_smb_map,
3211 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3212 	}
3213 }
3214 
3215 static void
3216 alc_intr(void *arg)
3217 {
3218 	struct alc_softc *sc = arg;
3219 	struct ifnet *ifp = &sc->arpcom.ac_if;
3220 	uint32_t status;
3221 
3222 	ASSERT_SERIALIZED(ifp->if_serializer);
3223 
3224 	status = CSR_READ_4(sc, ALC_INTR_STATUS);
3225 	if ((status & ALC_INTRS) == 0)
3226 		return;
3227 
3228 	/* Acknowledge interrupts and disable interrupts. */
3229 	CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT);
3230 
3231 	if (ifp->if_flags & IFF_RUNNING) {
3232 		if (status & INTR_RX_PKT) {
3233 			if (alc_rxintr(sc)) {
3234 				alc_init(sc);
3235 				return;
3236 			}
3237 		}
3238 		if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST |
3239 		    INTR_TXQ_TO_RST)) {
3240 			if (status & INTR_DMA_RD_TO_RST) {
3241 				if_printf(ifp,
3242 				    "DMA read error! -- resetting\n");
3243 			}
3244 			if (status & INTR_DMA_WR_TO_RST) {
3245 				if_printf(ifp,
3246 				    "DMA write error! -- resetting\n");
3247 			}
3248 			if (status & INTR_TXQ_TO_RST)
3249 				if_printf(ifp, "TxQ reset! -- resetting\n");
3250 			alc_init(sc);
3251 			return;
3252 		}
3253 		if (!ifq_is_empty(&ifp->if_snd))
3254 			if_devstart(ifp);
3255 
3256 		/* Re-enable interrupts */
3257 		CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF);
3258 	}
3259 }
3260 
3261 static void
3262 alc_txeof(struct alc_softc *sc)
3263 {
3264 	struct ifnet *ifp;
3265 	struct alc_txdesc *txd;
3266 	uint32_t cons, prod;
3267 	int prog;
3268 
3269 	ifp = sc->alc_ifp;
3270 
3271 	if (sc->alc_cdata.alc_tx_cnt == 0)
3272 		return;
3273 	bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
3274 	    sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_POSTWRITE);
3275 	if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
3276 		bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag,
3277 		    sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_POSTREAD);
3278 		prod = sc->alc_rdata.alc_cmb->cons;
3279 	} else {
3280 		if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
3281 			prod = CSR_READ_2(sc, ALC_MBOX_TD_PRI0_CONS_IDX);
3282 		else {
3283 			prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX);
3284 			/* Assume we're using normal Tx priority queue. */
3285 			prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >>
3286 			    MBOX_TD_CONS_LO_IDX_SHIFT;
3287 		}
3288 	}
3289 	cons = sc->alc_cdata.alc_tx_cons;
3290 	/*
3291 	 * Go through our Tx list and free mbufs for those
3292 	 * frames which have been transmitted.
3293 	 */
3294 	for (prog = 0; cons != prod; prog++,
3295 	    ALC_DESC_INC(cons, ALC_TX_RING_CNT)) {
3296 		if (sc->alc_cdata.alc_tx_cnt <= 0)
3297 			break;
3298 		prog++;
3299 		ifq_clr_oactive(&ifp->if_snd);
3300 		sc->alc_cdata.alc_tx_cnt--;
3301 		txd = &sc->alc_cdata.alc_txdesc[cons];
3302 		if (txd->tx_m != NULL) {
3303 			/* Reclaim transmitted mbufs. */
3304 			bus_dmamap_sync(sc->alc_cdata.alc_tx_tag,
3305 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3306 			bus_dmamap_unload(sc->alc_cdata.alc_tx_tag,
3307 			    txd->tx_dmamap);
3308 			m_freem(txd->tx_m);
3309 			txd->tx_m = NULL;
3310 		}
3311 	}
3312 
3313 	if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
3314 		bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag,
3315 		    sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_PREREAD);
3316 	sc->alc_cdata.alc_tx_cons = cons;
3317 	/*
3318 	 * Unarm watchdog timer only when there is no pending
3319 	 * frames in Tx queue.
3320 	 */
3321 	if (sc->alc_cdata.alc_tx_cnt == 0)
3322 		sc->alc_watchdog_timer = 0;
3323 }
3324 
3325 static int
3326 alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd, boolean_t wait)
3327 {
3328 	struct mbuf *m;
3329 	bus_dma_segment_t segs[1];
3330 	bus_dmamap_t map;
3331 	int nsegs;
3332 	int error;
3333 
3334 	m = m_getcl(wait ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR);
3335 	if (m == NULL)
3336 		return (ENOBUFS);
3337 	m->m_len = m->m_pkthdr.len = MCLBYTES;
3338 #ifdef foo
3339 	/* Hardware require 4 bytes align */
3340 	m_adj(m, ETHER_ALIGN);
3341 #endif
3342 
3343 	error = bus_dmamap_load_mbuf_segment(
3344 			sc->alc_cdata.alc_rx_tag,
3345 			sc->alc_cdata.alc_rx_sparemap,
3346 			m, segs, 1, &nsegs, BUS_DMA_NOWAIT);
3347 	if (error) {
3348 		m_freem(m);
3349 		return (ENOBUFS);
3350 	}
3351 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
3352 
3353 	if (rxd->rx_m != NULL) {
3354 		bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap,
3355 		    BUS_DMASYNC_POSTREAD);
3356 		bus_dmamap_unload(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap);
3357 	}
3358 	map = rxd->rx_dmamap;
3359 	rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap;
3360 	sc->alc_cdata.alc_rx_sparemap = map;
3361 	bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap,
3362 	    BUS_DMASYNC_PREREAD);
3363 	rxd->rx_m = m;
3364 	rxd->rx_desc->addr = htole64(segs[0].ds_addr);
3365 	return (0);
3366 }
3367 
3368 static int
3369 alc_rxintr(struct alc_softc *sc)
3370 {
3371 	struct ifnet *ifp;
3372 	struct rx_rdesc *rrd;
3373 	uint32_t nsegs, status;
3374 	int rr_cons, prog;
3375 
3376 	bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
3377 	    sc->alc_cdata.alc_rr_ring_map,
3378 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3379 	bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
3380 	    sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_POSTWRITE);
3381 	rr_cons = sc->alc_cdata.alc_rr_cons;
3382 	ifp = sc->alc_ifp;
3383 	for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0;) {
3384 		rrd = &sc->alc_rdata.alc_rr_ring[rr_cons];
3385 		status = le32toh(rrd->status);
3386 		if ((status & RRD_VALID) == 0)
3387 			break;
3388 		nsegs = RRD_RD_CNT(le32toh(rrd->rdinfo));
3389 		if (nsegs == 0) {
3390 			/* This should not happen! */
3391 			device_printf(sc->alc_dev,
3392 			    "unexpected segment count -- resetting\n");
3393 			return (EIO);
3394 		}
3395 		alc_rxeof(sc, rrd);
3396 		/* Clear Rx return status. */
3397 		rrd->status = 0;
3398 		ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT);
3399 		sc->alc_cdata.alc_rx_cons += nsegs;
3400 		sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT;
3401 		prog += nsegs;
3402 	}
3403 
3404 	if (prog > 0) {
3405 		/* Update the consumer index. */
3406 		sc->alc_cdata.alc_rr_cons = rr_cons;
3407 		/* Sync Rx return descriptors. */
3408 		bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
3409 		    sc->alc_cdata.alc_rr_ring_map,
3410 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3411 		/*
3412 		 * Sync updated Rx descriptors such that controller see
3413 		 * modified buffer addresses.
3414 		 */
3415 		bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
3416 		    sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE);
3417 		/*
3418 		 * Let controller know availability of new Rx buffers.
3419 		 * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors
3420 		 * it may be possible to update ALC_MBOX_RD0_PROD_IDX
3421 		 * only when Rx buffer pre-fetching is required. In
3422 		 * addition we already set ALC_RX_RD_FREE_THRESH to
3423 		 * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However
3424 		 * it still seems that pre-fetching needs more
3425 		 * experimentation.
3426 		 */
3427 		if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
3428 			CSR_WRITE_2(sc, ALC_MBOX_RD0_PROD_IDX,
3429 				(uint16_t)sc->alc_cdata.alc_rx_cons);
3430 		else
3431 			CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX,
3432 				sc->alc_cdata.alc_rx_cons);
3433 	}
3434 
3435 	return 0;
3436 }
3437 
3438 /* Receive a frame. */
3439 static void
3440 alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd)
3441 {
3442 	struct alc_rxdesc *rxd;
3443 	struct ifnet *ifp;
3444 	struct mbuf *mp, *m;
3445 	uint32_t rdinfo, status, vtag;
3446 	int count, nsegs, rx_cons;
3447 
3448 	ifp = sc->alc_ifp;
3449 	status = le32toh(rrd->status);
3450 	rdinfo = le32toh(rrd->rdinfo);
3451 	rx_cons = RRD_RD_IDX(rdinfo);
3452 	nsegs = RRD_RD_CNT(rdinfo);
3453 
3454 	sc->alc_cdata.alc_rxlen = RRD_BYTES(status);
3455 	if ((status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) != 0) {
3456 		/*
3457 		 * We want to pass the following frames to upper
3458 		 * layer regardless of error status of Rx return
3459 		 * ring.
3460 		 *
3461 		 *  o IP/TCP/UDP checksum is bad.
3462 		 *  o frame length and protocol specific length
3463 		 *     does not match.
3464 		 *
3465 		 *  Force network stack compute checksum for
3466 		 *  errored frames.
3467 		 */
3468 		status |= RRD_TCP_UDPCSUM_NOK | RRD_IPCSUM_NOK;
3469 		if ((status & (RRD_ERR_CRC | RRD_ERR_ALIGN |
3470 		     RRD_ERR_TRUNC | RRD_ERR_RUNT)) != 0)
3471 			return;
3472 	}
3473 
3474 	for (count = 0; count < nsegs; count++,
3475 	    ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) {
3476 		rxd = &sc->alc_cdata.alc_rxdesc[rx_cons];
3477 		mp = rxd->rx_m;
3478 		/* Add a new receive buffer to the ring. */
3479 		if (alc_newbuf(sc, rxd, FALSE) != 0) {
3480 			IFNET_STAT_INC(ifp, iqdrops, 1);
3481 			/* Reuse Rx buffers. */
3482 			if (sc->alc_cdata.alc_rxhead != NULL)
3483 				m_freem(sc->alc_cdata.alc_rxhead);
3484 			break;
3485 		}
3486 
3487 		/*
3488 		 * Assume we've received a full sized frame.
3489 		 * Actual size is fixed when we encounter the end of
3490 		 * multi-segmented frame.
3491 		 */
3492 		mp->m_len = sc->alc_buf_size;
3493 
3494 		/* Chain received mbufs. */
3495 		if (sc->alc_cdata.alc_rxhead == NULL) {
3496 			sc->alc_cdata.alc_rxhead = mp;
3497 			sc->alc_cdata.alc_rxtail = mp;
3498 		} else {
3499 			/*mp->m_flags &= ~M_PKTHDR;*/
3500 			sc->alc_cdata.alc_rxprev_tail =
3501 			    sc->alc_cdata.alc_rxtail;
3502 			sc->alc_cdata.alc_rxtail->m_next = mp;
3503 			sc->alc_cdata.alc_rxtail = mp;
3504 		}
3505 
3506 		if (count == nsegs - 1) {
3507 			/* Last desc. for this frame. */
3508 			m = sc->alc_cdata.alc_rxhead;
3509 			/*m->m_flags |= M_PKTHDR;*/
3510 
3511 			/*
3512 			 * It seems that L1C/L2C controller has no way
3513 			 * to tell hardware to strip CRC bytes.
3514 			 */
3515 			m->m_pkthdr.len =
3516 			    sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN;
3517 			if (nsegs > 1) {
3518 				/* Set last mbuf size. */
3519 				mp->m_len = sc->alc_cdata.alc_rxlen -
3520 				    (nsegs - 1) * sc->alc_buf_size;
3521 				/* Remove the CRC bytes in chained mbufs. */
3522 				if (mp->m_len <= ETHER_CRC_LEN) {
3523 					sc->alc_cdata.alc_rxtail =
3524 					    sc->alc_cdata.alc_rxprev_tail;
3525 					sc->alc_cdata.alc_rxtail->m_len -=
3526 					    (ETHER_CRC_LEN - mp->m_len);
3527 					sc->alc_cdata.alc_rxtail->m_next = NULL;
3528 					m_freem(mp);
3529 				} else {
3530 					mp->m_len -= ETHER_CRC_LEN;
3531 				}
3532 			} else
3533 				m->m_len = m->m_pkthdr.len;
3534 			m->m_pkthdr.rcvif = ifp;
3535 			/*
3536 			 * Due to hardware bugs, Rx checksum offloading
3537 			 * was intentionally disabled.
3538 			 */
3539 			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
3540 			    (status & RRD_VLAN_TAG) != 0) {
3541 				vtag = RRD_VLAN(le32toh(rrd->vtag));
3542 				m->m_pkthdr.ether_vlantag = ntohs(vtag);
3543 				m->m_flags |= M_VLANTAG;
3544 			}
3545 
3546 			/* Pass it on. */
3547 			ifp->if_input(ifp, m, NULL, -1);
3548 		}
3549 	}
3550 	/* Reset mbuf chains. */
3551 	ALC_RXCHAIN_RESET(sc);
3552 }
3553 
3554 static void
3555 alc_tick(void *arg)
3556 {
3557 	struct alc_softc *sc = arg;
3558 	struct ifnet *ifp = &sc->arpcom.ac_if;
3559 	struct mii_data *mii;
3560 
3561 	lwkt_serialize_enter(ifp->if_serializer);
3562 
3563 	mii = device_get_softc(sc->alc_miibus);
3564 	mii_tick(mii);
3565 	alc_stats_update(sc);
3566 	/*
3567 	 * alc(4) does not rely on Tx completion interrupts to reclaim
3568 	 * transferred buffers. Instead Tx completion interrupts are
3569 	 * used to hint for scheduling Tx task. So it's necessary to
3570 	 * release transmitted buffers by kicking Tx completion
3571 	 * handler. This limits the maximum reclamation delay to a hz.
3572 	 */
3573 	alc_txeof(sc);
3574 	alc_watchdog(sc);
3575 	callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc);
3576 
3577 #if 0
3578 	/* poll for debugging */
3579 	alc_intr(sc);
3580 #endif
3581 
3582 	lwkt_serialize_exit(ifp->if_serializer);
3583 }
3584 
3585 static void
3586 alc_osc_reset(struct alc_softc *sc)
3587 {
3588 	uint32_t reg;
3589 
3590 	reg = CSR_READ_4(sc, ALC_MISC3);
3591 	reg &= ~MISC3_25M_BY_SW;
3592 	reg |= MISC3_25M_NOTO_INTNL;
3593 	CSR_WRITE_4(sc, ALC_MISC3, reg);
3594 
3595 	reg = CSR_READ_4(sc, ALC_MISC);
3596 	if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0) {
3597 		/*
3598 		 * Restore over-current protection default value.
3599 		 * This value could be reset by MAC reset.
3600 		 */
3601 		reg &= ~MISC_PSW_OCP_MASK;
3602 		reg |= (MISC_PSW_OCP_DEFAULT << MISC_PSW_OCP_SHIFT);
3603 		reg &= ~MISC_INTNLOSC_OPEN;
3604 		CSR_WRITE_4(sc, ALC_MISC, reg);
3605 		CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN);
3606 		reg = CSR_READ_4(sc, ALC_MISC2);
3607 		reg &= ~MISC2_CALB_START;
3608 		CSR_WRITE_4(sc, ALC_MISC2, reg);
3609 		CSR_WRITE_4(sc, ALC_MISC2, reg | MISC2_CALB_START);
3610 
3611 	} else {
3612 		reg &= ~MISC_INTNLOSC_OPEN;
3613 		/* Disable isolate for revision A devices. */
3614 		if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1)
3615 			reg &= ~MISC_ISO_ENB;
3616 		CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN);
3617 		CSR_WRITE_4(sc, ALC_MISC, reg);
3618 	}
3619 
3620 	DELAY(20);
3621 }
3622 
3623 static void
3624 alc_reset(struct alc_softc *sc)
3625 {
3626 	uint32_t pmcfg, reg;
3627 	int i;
3628 
3629         pmcfg = 0;
3630 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
3631 		/* Reset workaround. */
3632 		CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, 1);
3633 		if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
3634 		    (sc->alc_rev & 0x01) != 0) {
3635 			/* Disable L0s/L1s before reset. */
3636 			pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
3637 			if ((pmcfg & (PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB))
3638 			    != 0) {
3639 				pmcfg &= ~(PM_CFG_ASPM_L0S_ENB |
3640 				    PM_CFG_ASPM_L1_ENB);
3641 				CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
3642 			}
3643 		}
3644 	}
3645 	reg = CSR_READ_4(sc, ALC_MASTER_CFG);
3646 	reg |= MASTER_OOB_DIS_OFF | MASTER_RESET;
3647 	CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
3648 
3649 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
3650 		for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
3651 			DELAY(10);
3652 			if (CSR_READ_4(sc, ALC_MBOX_RD0_PROD_IDX) == 0)
3653 				break;
3654 		}
3655 		if (i == 0)
3656 			device_printf(sc->alc_dev, "MAC reset timeout!\n");
3657 	}
3658 
3659 	for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
3660 		DELAY(10);
3661 		if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0)
3662 			break;
3663 	}
3664 	if (i == 0)
3665 		device_printf(sc->alc_dev, "master reset timeout!\n");
3666 
3667 	for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
3668 		reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
3669 		if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC |
3670 		    IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
3671 			break;
3672 		DELAY(10);
3673 	}
3674 
3675 	if (i == 0)
3676 		device_printf(sc->alc_dev, "reset timeout(0x%08x)!\n", reg);
3677 
3678 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
3679 		if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
3680 		    (sc->alc_rev & 0x01) != 0) {
3681 			reg = CSR_READ_4(sc, ALC_MASTER_CFG);
3682 			reg |= MASTER_CLK_SEL_DIS;
3683 			CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
3684 			/* Restore L0s/L1s config. */
3685 			if ((pmcfg & (PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB))
3686 			    != 0)
3687 				CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
3688 		}
3689 
3690 		alc_osc_reset(sc);
3691 		reg = CSR_READ_4(sc, ALC_MISC3);
3692 		reg &= ~MISC3_25M_BY_SW;
3693 		reg |= MISC3_25M_NOTO_INTNL;
3694 		CSR_WRITE_4(sc, ALC_MISC3, reg);
3695 		reg = CSR_READ_4(sc, ALC_MISC);
3696 		reg &= ~MISC_INTNLOSC_OPEN;
3697 		if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1)
3698 			reg &= ~MISC_ISO_ENB;
3699 		CSR_WRITE_4(sc, ALC_MISC, reg);
3700 		DELAY(20);
3701 	}
3702 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 ||
3703 	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B ||
3704 	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2)
3705 		CSR_WRITE_4(sc, ALC_SERDES_LOCK,
3706 		    CSR_READ_4(sc, ALC_SERDES_LOCK) | SERDES_MAC_CLK_SLOWDOWN |
3707 		    SERDES_PHY_CLK_SLOWDOWN);
3708 }
3709 
3710 static void
3711 alc_init(void *xsc)
3712 {
3713 	struct alc_softc *sc = xsc;
3714 	struct ifnet *ifp = &sc->arpcom.ac_if;
3715 	struct mii_data *mii;
3716 	uint8_t eaddr[ETHER_ADDR_LEN];
3717 	bus_addr_t paddr;
3718 	uint32_t reg, rxf_hi, rxf_lo;
3719 
3720 	ASSERT_SERIALIZED(ifp->if_serializer);
3721 
3722 	mii = device_get_softc(sc->alc_miibus);
3723 
3724 	/*
3725 	 * Cancel any pending I/O.
3726 	 */
3727 	alc_stop(sc);
3728 	/*
3729 	 * Reset the chip to a known state.
3730 	 */
3731 	alc_reset(sc);
3732 
3733 	/* Initialize Rx descriptors. */
3734 	if (alc_init_rx_ring(sc) != 0) {
3735 		device_printf(sc->alc_dev, "no memory for Rx buffers.\n");
3736 		alc_stop(sc);
3737 		return;
3738 	}
3739 	alc_init_rr_ring(sc);
3740 	alc_init_tx_ring(sc);
3741 	alc_init_cmb(sc);
3742 	alc_init_smb(sc);
3743 
3744 	/* Enable all clocks. */
3745 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
3746 		CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, CLK_GATING_DMAW_ENB |
3747 		    CLK_GATING_DMAR_ENB | CLK_GATING_TXQ_ENB |
3748 		    CLK_GATING_RXQ_ENB | CLK_GATING_TXMAC_ENB |
3749 		    CLK_GATING_RXMAC_ENB);
3750 		if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0)
3751 			CSR_WRITE_4(sc, ALC_IDLE_DECISN_TIMER,
3752 			    IDLE_DECISN_TIMER_DEFAULT_1MS);
3753 	} else
3754 		CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, 0);
3755 
3756 	/* Reprogram the station address. */
3757 	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
3758 	CSR_WRITE_4(sc, ALC_PAR0,
3759 	    eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
3760 	CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]);
3761 	/*
3762 	 * Clear WOL status and disable all WOL feature as WOL
3763 	 * would interfere Rx operation under normal environments.
3764 	 */
3765 	CSR_READ_4(sc, ALC_WOL_CFG);
3766 	CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
3767 	/* Set Tx descriptor base addresses. */
3768 	paddr = sc->alc_rdata.alc_tx_ring_paddr;
3769 	CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
3770 	CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
3771 	/* We don't use high priority ring. */
3772 	CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0);
3773 	/* Set Tx descriptor counter. */
3774 	CSR_WRITE_4(sc, ALC_TD_RING_CNT,
3775 	    (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK);
3776 	/* Set Rx descriptor base addresses. */
3777 	paddr = sc->alc_rdata.alc_rx_ring_paddr;
3778 	CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
3779 	CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
3780 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
3781 		/* We use one Rx ring. */
3782 		CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0);
3783 		CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0);
3784 		CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0);
3785 	}
3786 	/* Set Rx descriptor counter. */
3787 	CSR_WRITE_4(sc, ALC_RD_RING_CNT,
3788 	    (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK);
3789 
3790 	/*
3791 	 * Let hardware split jumbo frames into alc_max_buf_sized chunks.
3792 	 * if it do not fit the buffer size. Rx return descriptor holds
3793 	 * a counter that indicates how many fragments were made by the
3794 	 * hardware. The buffer size should be multiple of 8 bytes.
3795 	 * Since hardware has limit on the size of buffer size, always
3796 	 * use the maximum value.
3797 	 * For strict-alignment architectures make sure to reduce buffer
3798 	 * size by 8 bytes to make room for alignment fixup.
3799 	 */
3800 	sc->alc_buf_size = RX_BUF_SIZE_MAX;
3801 	CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size);
3802 
3803 	paddr = sc->alc_rdata.alc_rr_ring_paddr;
3804 	/* Set Rx return descriptor base addresses. */
3805 	CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
3806 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
3807 		/* We use one Rx return ring. */
3808 		CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0);
3809 		CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0);
3810 		CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0);
3811 	}
3812 	/* Set Rx return descriptor counter. */
3813 	CSR_WRITE_4(sc, ALC_RRD_RING_CNT,
3814 	    (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK);
3815 	paddr = sc->alc_rdata.alc_cmb_paddr;
3816 	CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
3817 	paddr = sc->alc_rdata.alc_smb_paddr;
3818 	CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
3819 	CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
3820 
3821 	if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B) {
3822 		/* Reconfigure SRAM - Vendor magic. */
3823 		CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_LEN, 0x000002A0);
3824 		CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_LEN, 0x00000100);
3825 		CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_ADDR, 0x029F0000);
3826 		CSR_WRITE_4(sc, ALC_SRAM_RD0_ADDR, 0x02BF02A0);
3827 		CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_ADDR, 0x03BF02C0);
3828 		CSR_WRITE_4(sc, ALC_SRAM_TD_ADDR, 0x03DF03C0);
3829 		CSR_WRITE_4(sc, ALC_TXF_WATER_MARK, 0x00000000);
3830 		CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 0x00000000);
3831 	}
3832 
3833 	/* Tell hardware that we're ready to load DMA blocks. */
3834 	CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD);
3835 
3836 	/* Configure interrupt moderation timer. */
3837 	reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT;
3838 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0)
3839 		reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT;
3840 	CSR_WRITE_4(sc, ALC_IM_TIMER, reg);
3841 	/*
3842 	 * We don't want to automatic interrupt clear as task queue
3843 	 * for the interrupt should know interrupt status.
3844 	 */
3845 	reg = CSR_READ_4(sc, ALC_MASTER_CFG);
3846 	reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB);
3847 	reg |= MASTER_SA_TIMER_ENB;
3848 	if (ALC_USECS(sc->alc_int_rx_mod) != 0)
3849 		reg |= MASTER_IM_RX_TIMER_ENB;
3850 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0 &&
3851 	    ALC_USECS(sc->alc_int_tx_mod) != 0)
3852 		reg |= MASTER_IM_TX_TIMER_ENB;
3853 	CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
3854 	/*
3855 	 * Disable interrupt re-trigger timer. We don't want automatic
3856 	 * re-triggering of un-ACKed interrupts.
3857 	 */
3858 	CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0));
3859 	/* Configure CMB. */
3860 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
3861 		CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, ALC_TX_RING_CNT / 3);
3862 		CSR_WRITE_4(sc, ALC_CMB_TX_TIMER,
3863 		    ALC_USECS(sc->alc_int_tx_mod));
3864 	} else {
3865 		if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
3866 			CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4);
3867 			CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000));
3868 		} else
3869 			CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0));
3870 	}
3871 
3872 	/*
3873 	 * Hardware can be configured to issue SMB interrupt based
3874 	 * on programmed interval. Since there is a callout that is
3875 	 * invoked for every hz in driver we use that instead of
3876 	 * relying on periodic SMB interrupt.
3877 	 */
3878 	CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0));
3879 	/* Clear MAC statistics. */
3880 	alc_stats_clear(sc);
3881 
3882 	/*
3883 	 * Always use maximum frame size that controller can support.
3884 	 * Otherwise received frames that has larger frame length
3885 	 * than alc(4) MTU would be silently dropped in hardware. This
3886 	 * would make path-MTU discovery hard as sender wouldn't get
3887 	 * any responses from receiver. alc(4) supports
3888 	 * multi-fragmented frames on Rx path so it has no issue on
3889 	 * assembling fragmented frames. Using maximum frame size also
3890 	 * removes the need to reinitialize hardware when interface
3891 	 * MTU configuration was changed.
3892 	 *
3893 	 * Be conservative in what you do, be liberal in what you
3894 	 * accept from others - RFC 793.
3895 	 */
3896 	CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_ident->max_framelen);
3897 
3898 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
3899 		/* Disable header split(?) */
3900 		CSR_WRITE_4(sc, ALC_HDS_CFG, 0);
3901 
3902 		/* Configure IPG/IFG parameters. */
3903 		CSR_WRITE_4(sc, ALC_IPG_IFG_CFG,
3904 		    ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) &
3905 			IPG_IFG_IPGT_MASK) |
3906 		    ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) &
3907 			IPG_IFG_MIFG_MASK) |
3908 		    ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) &
3909 			IPG_IFG_IPG1_MASK) |
3910 		    ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) &
3911 			IPG_IFG_IPG2_MASK));
3912 		/* Set parameters for half-duplex media. */
3913 		CSR_WRITE_4(sc, ALC_HDPX_CFG,
3914 		    ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
3915 		    HDPX_CFG_LCOL_MASK) |
3916 		    ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
3917 		    HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
3918 		    ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
3919 		    HDPX_CFG_ABEBT_MASK) |
3920 		    ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
3921 		    HDPX_CFG_JAMIPG_MASK));
3922 	}
3923 
3924 	/*
3925 	 * Set TSO/checksum offload threshold. For frames that is
3926 	 * larger than this threshold, hardware wouldn't do
3927 	 * TSO/checksum offloading.
3928 	 */
3929 	reg = (sc->alc_ident->max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) &
3930 		TSO_OFFLOAD_THRESH_MASK;
3931 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
3932 		reg |= TSO_OFFLOAD_ERRLGPKT_DROP_ENB;
3933 	CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH, reg);
3934 	/* Configure TxQ. */
3935 	reg = (alc_dma_burst[sc->alc_dma_rd_burst] <<
3936 	    TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK;
3937 	if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B ||
3938 	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) {
3939 		reg >>= 1;
3940 	}
3941 	reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) &
3942 	    TXQ_CFG_TD_BURST_MASK;
3943 	reg |= TXQ_CFG_IP_OPTION_ENB | TXQ_CFG_8023_ENB;
3944 	CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE);
3945 
3946 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
3947 		reg = (TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q1_BURST_SHIFT |
3948 		    TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q2_BURST_SHIFT |
3949 		    TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q3_BURST_SHIFT |
3950 		    HQTD_CFG_BURST_ENB);
3951 		CSR_WRITE_4(sc, ALC_HQTD_CFG, reg);
3952 		reg = WRR_PRI_RESTRICT_NONE;
3953 		reg |= (WRR_PRI_DEFAULT << WRR_PRI0_SHIFT |
3954 		    WRR_PRI_DEFAULT << WRR_PRI1_SHIFT |
3955 		    WRR_PRI_DEFAULT << WRR_PRI2_SHIFT |
3956 		    WRR_PRI_DEFAULT << WRR_PRI3_SHIFT);
3957 		CSR_WRITE_4(sc, ALC_WRR, reg);
3958 	} else {
3959 		/* Configure Rx free descriptor pre-fetching. */
3960 		CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH,
3961 		    ((RX_RD_FREE_THRESH_HI_DEFAULT <<
3962 		    RX_RD_FREE_THRESH_HI_SHIFT) & RX_RD_FREE_THRESH_HI_MASK) |
3963 		    ((RX_RD_FREE_THRESH_LO_DEFAULT <<
3964 		    RX_RD_FREE_THRESH_LO_SHIFT) & RX_RD_FREE_THRESH_LO_MASK));
3965 	}
3966 
3967 	/*
3968 	 * Configure flow control parameters.
3969 	 * XON  : 80% of Rx FIFO
3970 	 * XOFF : 30% of Rx FIFO
3971 	 */
3972 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
3973 		reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
3974 		reg &= SRAM_RX_FIFO_LEN_MASK;
3975 		reg *= 8;
3976 		if (reg > 8 * 1024)
3977 			reg -= RX_FIFO_PAUSE_816X_RSVD;
3978 		else
3979 			reg -= RX_BUF_SIZE_MAX;
3980 		reg /= 8;
3981 		CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
3982 		    ((reg << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
3983 		    RX_FIFO_PAUSE_THRESH_LO_MASK) |
3984 		    (((RX_FIFO_PAUSE_816X_RSVD / 8) <<
3985 		    RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
3986 		    RX_FIFO_PAUSE_THRESH_HI_MASK));
3987 	} else if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 ||
3988 	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8132) {
3989 		reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
3990 		rxf_hi = (reg * 8) / 10;
3991 		rxf_lo = (reg * 3) / 10;
3992 		CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
3993 			((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
3994 			 RX_FIFO_PAUSE_THRESH_LO_MASK) |
3995 			((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
3996 			 RX_FIFO_PAUSE_THRESH_HI_MASK));
3997 	}
3998 
3999 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
4000 		/* Disable RSS until I understand L1C/L2C's RSS logic. */
4001 		CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0);
4002 		CSR_WRITE_4(sc, ALC_RSS_CPU, 0);
4003 	}
4004 
4005 	/* Configure RxQ. */
4006 	reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
4007 	    RXQ_CFG_RD_BURST_MASK;
4008 	reg |= RXQ_CFG_RSS_MODE_DIS;
4009 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
4010 		reg |= (RXQ_CFG_816X_IDT_TBL_SIZE_DEFAULT <<
4011 		    RXQ_CFG_816X_IDT_TBL_SIZE_SHIFT) &
4012 		    RXQ_CFG_816X_IDT_TBL_SIZE_MASK;
4013 		if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
4014 			reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M;
4015 	} else {
4016 		if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0 &&
4017 		    sc->alc_ident->deviceid != DEVICEID_ATHEROS_AR8151_V2)
4018 			reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M;
4019 	}
4020 	CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
4021 
4022 	/* Configure DMA parameters. */
4023 	reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI;
4024 	reg |= sc->alc_rcb;
4025 	if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
4026 		reg |= DMA_CFG_CMB_ENB;
4027 	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0)
4028 		reg |= DMA_CFG_SMB_ENB;
4029 	else
4030 		reg |= DMA_CFG_SMB_DIS;
4031 	reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) <<
4032 	    DMA_CFG_RD_BURST_SHIFT;
4033 	reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) <<
4034 	    DMA_CFG_WR_BURST_SHIFT;
4035 	reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) &
4036 	    DMA_CFG_RD_DELAY_CNT_MASK;
4037 	reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
4038 	    DMA_CFG_WR_DELAY_CNT_MASK;
4039 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
4040 		switch (AR816X_REV(sc->alc_rev)) {
4041 		case AR816X_REV_A0:
4042 		case AR816X_REV_A1:
4043 			reg |= DMA_CFG_RD_CHNL_SEL_2;
4044 			break;
4045 		case AR816X_REV_B0:
4046 			/* FALLTHROUGH */
4047 		default:
4048 			reg |= DMA_CFG_RD_CHNL_SEL_4;
4049 			break;
4050 		}
4051 	}
4052 	CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
4053 
4054 	/*
4055 	 * Configure Tx/Rx MACs.
4056 	 *  - Auto-padding for short frames.
4057 	 *  - Enable CRC generation.
4058 	 *  Actual reconfiguration of MAC for resolved speed/duplex
4059 	 *  is followed after detection of link establishment.
4060 	 *  AR813x/AR815x always does checksum computation regardless
4061 	 *  of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to
4062 	 *  have bug in protocol field in Rx return structure so
4063 	 *  these controllers can't handle fragmented frames. Disable
4064 	 *  Rx checksum offloading until there is a newer controller
4065 	 *  that has sane implementation.
4066 	 */
4067 	reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
4068 	    ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
4069 	    MAC_CFG_PREAMBLE_MASK);
4070 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 ||
4071 	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 ||
4072 	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 ||
4073 	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) {
4074 		reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
4075 	}
4076 	if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0)
4077 		reg |= MAC_CFG_SPEED_10_100;
4078 	else
4079 		reg |= MAC_CFG_SPEED_1000;
4080 	CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
4081 
4082 	/* Set up the receive filter. */
4083 	alc_rxfilter(sc);
4084 	alc_rxvlan(sc);
4085 
4086 	/* Acknowledge all pending interrupts and clear it. */
4087 	CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS);
4088 	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
4089 	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0);
4090 
4091 	ifp->if_flags |= IFF_RUNNING;
4092 	ifq_clr_oactive(&ifp->if_snd);
4093 
4094 	sc->alc_flags &= ~ALC_FLAG_LINK;
4095 	/* Switch to the current media. */
4096 	/*mii_mediachg(mii);*/
4097 	alc_mediachange_locked(sc);
4098 
4099 	callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc);
4100 
4101 }
4102 
4103 static void
4104 alc_stop(struct alc_softc *sc)
4105 {
4106 	struct ifnet *ifp = &sc->arpcom.ac_if;
4107 	struct alc_txdesc *txd;
4108 	struct alc_rxdesc *rxd;
4109 	uint32_t reg;
4110 	int i;
4111 
4112 	ASSERT_SERIALIZED(ifp->if_serializer);
4113 
4114 	/*
4115 	 * Mark the interface down and cancel the watchdog timer.
4116 	 */
4117 	ifp->if_flags &= ~IFF_RUNNING;
4118 	ifq_clr_oactive(&ifp->if_snd);
4119 	sc->alc_flags &= ~ALC_FLAG_LINK;
4120 	callout_stop(&sc->alc_tick_ch);
4121 	sc->alc_watchdog_timer = 0;
4122 	alc_stats_update(sc);
4123 	/* Disable interrupts. */
4124 	CSR_WRITE_4(sc, ALC_INTR_MASK, 0);
4125 	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
4126 	/* Disable DMA. */
4127 	reg = CSR_READ_4(sc, ALC_DMA_CFG);
4128 	reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB);
4129 	reg |= DMA_CFG_SMB_DIS;
4130 	CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
4131 	DELAY(1000);
4132 	/* Stop Rx/Tx MACs. */
4133 	alc_stop_mac(sc);
4134 	/* Disable interrupts which might be touched in taskq handler. */
4135 	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
4136 
4137 	/* Disable L0s/L1s */
4138 	alc_aspm(sc, 0, IFM_UNKNOWN);
4139 	/* Reclaim Rx buffers that have been processed. */
4140 	if (sc->alc_cdata.alc_rxhead != NULL)
4141 		m_freem(sc->alc_cdata.alc_rxhead);
4142 	ALC_RXCHAIN_RESET(sc);
4143 	/*
4144 	 * Free Tx/Rx mbufs still in the queues.
4145 	 */
4146 	for (i = 0; i < ALC_RX_RING_CNT; i++) {
4147 		rxd = &sc->alc_cdata.alc_rxdesc[i];
4148 		if (rxd->rx_m != NULL) {
4149 			bus_dmamap_sync(sc->alc_cdata.alc_rx_tag,
4150 			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
4151 			bus_dmamap_unload(sc->alc_cdata.alc_rx_tag,
4152 			    rxd->rx_dmamap);
4153 			m_freem(rxd->rx_m);
4154 			rxd->rx_m = NULL;
4155 		}
4156 	}
4157 	for (i = 0; i < ALC_TX_RING_CNT; i++) {
4158 		txd = &sc->alc_cdata.alc_txdesc[i];
4159 		if (txd->tx_m != NULL) {
4160 			bus_dmamap_sync(sc->alc_cdata.alc_tx_tag,
4161 			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
4162 			bus_dmamap_unload(sc->alc_cdata.alc_tx_tag,
4163 			    txd->tx_dmamap);
4164 			m_freem(txd->tx_m);
4165 			txd->tx_m = NULL;
4166 		}
4167 	}
4168 }
4169 
4170 static void
4171 alc_stop_mac(struct alc_softc *sc)
4172 {
4173 	uint32_t reg;
4174 	int i;
4175 
4176 	alc_stop_queue(sc);
4177 	/* Disable Rx/Tx MAC. */
4178 	reg = CSR_READ_4(sc, ALC_MAC_CFG);
4179 	if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
4180 		reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
4181 		CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
4182 	}
4183 	for (i = ALC_TIMEOUT; i > 0; i--) {
4184 		reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
4185 		if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC)) == 0)
4186 			break;
4187 		DELAY(10);
4188 	}
4189 	if (i == 0)
4190 		device_printf(sc->alc_dev,
4191 		    "could not disable Rx/Tx MAC(0x%08x)!\n", reg);
4192 }
4193 
4194 static void
4195 alc_start_queue(struct alc_softc *sc)
4196 {
4197 	uint32_t qcfg[] = {
4198 		0,
4199 		RXQ_CFG_QUEUE0_ENB,
4200 		RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB,
4201 		RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB,
4202 		RXQ_CFG_ENB
4203 	};
4204 	uint32_t cfg;
4205 
4206 	/* Enable RxQ. */
4207 	cfg = CSR_READ_4(sc, ALC_RXQ_CFG);
4208 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
4209 		cfg &= ~RXQ_CFG_ENB;
4210 		cfg |= qcfg[1];
4211 	} else
4212 		cfg |= RXQ_CFG_QUEUE0_ENB;
4213 	CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg);
4214 	/* Enable TxQ. */
4215 	cfg = CSR_READ_4(sc, ALC_TXQ_CFG);
4216 	cfg |= TXQ_CFG_ENB;
4217 	CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg);
4218 }
4219 
4220 static void
4221 alc_stop_queue(struct alc_softc *sc)
4222 {
4223 	uint32_t reg;
4224 	int i;
4225 
4226 	/* Disable RxQ. */
4227 	reg = CSR_READ_4(sc, ALC_RXQ_CFG);
4228 	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
4229 		if ((reg & RXQ_CFG_ENB) != 0) {
4230 			reg &= ~RXQ_CFG_ENB;
4231 			CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
4232 		}
4233 	} else {
4234 		if ((reg & RXQ_CFG_QUEUE0_ENB) != 0) {
4235 			reg &= ~RXQ_CFG_QUEUE0_ENB;
4236 			CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
4237 		}
4238 	}
4239 	/* Disable TxQ. */
4240 	reg = CSR_READ_4(sc, ALC_TXQ_CFG);
4241 	if ((reg & TXQ_CFG_ENB) != 0) {
4242 		reg &= ~TXQ_CFG_ENB;
4243 		CSR_WRITE_4(sc, ALC_TXQ_CFG, reg);
4244 	}
4245 	DELAY(40);
4246 	for (i = ALC_TIMEOUT; i > 0; i--) {
4247 		reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
4248 		if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
4249 			break;
4250 		DELAY(10);
4251 	}
4252 	if (i == 0)
4253 		device_printf(sc->alc_dev,
4254 		    "could not disable RxQ/TxQ (0x%08x)!\n", reg);
4255 }
4256 
4257 static void
4258 alc_init_tx_ring(struct alc_softc *sc)
4259 {
4260 	struct alc_ring_data *rd;
4261 	struct alc_txdesc *txd;
4262 	int i;
4263 
4264 	sc->alc_cdata.alc_tx_prod = 0;
4265 	sc->alc_cdata.alc_tx_cons = 0;
4266 	sc->alc_cdata.alc_tx_cnt = 0;
4267 
4268 	rd = &sc->alc_rdata;
4269 	bzero(rd->alc_tx_ring, ALC_TX_RING_SZ);
4270 	for (i = 0; i < ALC_TX_RING_CNT; i++) {
4271 		txd = &sc->alc_cdata.alc_txdesc[i];
4272 		txd->tx_m = NULL;
4273 	}
4274 
4275 	bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
4276 	    sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE);
4277 }
4278 
4279 static int
4280 alc_init_rx_ring(struct alc_softc *sc)
4281 {
4282 	struct alc_ring_data *rd;
4283 	struct alc_rxdesc *rxd;
4284 	int i;
4285 
4286 	sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1;
4287 	rd = &sc->alc_rdata;
4288 	bzero(rd->alc_rx_ring, ALC_RX_RING_SZ);
4289 	for (i = 0; i < ALC_RX_RING_CNT; i++) {
4290 		rxd = &sc->alc_cdata.alc_rxdesc[i];
4291 		rxd->rx_m = NULL;
4292 		rxd->rx_desc = &rd->alc_rx_ring[i];
4293 		if (alc_newbuf(sc, rxd, TRUE) != 0)
4294 			return (ENOBUFS);
4295 	}
4296 
4297 	/*
4298 	 * Since controller does not update Rx descriptors, driver
4299 	 * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE
4300 	 * is enough to ensure coherence.
4301 	 */
4302 	bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
4303 	    sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE);
4304 	/* Let controller know availability of new Rx buffers. */
4305 	CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons);
4306 
4307 	return (0);
4308 }
4309 
4310 static void
4311 alc_init_rr_ring(struct alc_softc *sc)
4312 {
4313 	struct alc_ring_data *rd;
4314 
4315 	sc->alc_cdata.alc_rr_cons = 0;
4316 	ALC_RXCHAIN_RESET(sc);
4317 
4318 	rd = &sc->alc_rdata;
4319 	bzero(rd->alc_rr_ring, ALC_RR_RING_SZ);
4320 	bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
4321 	    sc->alc_cdata.alc_rr_ring_map,
4322 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4323 }
4324 
4325 static void
4326 alc_init_cmb(struct alc_softc *sc)
4327 {
4328 	struct alc_ring_data *rd;
4329 
4330 	rd = &sc->alc_rdata;
4331 	bzero(rd->alc_cmb, ALC_CMB_SZ);
4332 	bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, sc->alc_cdata.alc_cmb_map,
4333 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4334 }
4335 
4336 static void
4337 alc_init_smb(struct alc_softc *sc)
4338 {
4339 	struct alc_ring_data *rd;
4340 
4341 	rd = &sc->alc_rdata;
4342 	bzero(rd->alc_smb, ALC_SMB_SZ);
4343 	bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, sc->alc_cdata.alc_smb_map,
4344 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4345 }
4346 
4347 static void
4348 alc_rxvlan(struct alc_softc *sc)
4349 {
4350 	struct ifnet *ifp;
4351 	uint32_t reg;
4352 
4353 	ifp = sc->alc_ifp;
4354 	reg = CSR_READ_4(sc, ALC_MAC_CFG);
4355 	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
4356 		reg |= MAC_CFG_VLAN_TAG_STRIP;
4357 	else
4358 		reg &= ~MAC_CFG_VLAN_TAG_STRIP;
4359 	CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
4360 }
4361 
4362 static void
4363 alc_rxfilter(struct alc_softc *sc)
4364 {
4365 	struct ifnet *ifp;
4366 	struct ifmultiaddr *ifma;
4367 	uint32_t crc;
4368 	uint32_t mchash[2];
4369 	uint32_t rxcfg;
4370 
4371 	ifp = sc->alc_ifp;
4372 
4373 	bzero(mchash, sizeof(mchash));
4374 	rxcfg = CSR_READ_4(sc, ALC_MAC_CFG);
4375 	rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
4376 	if ((ifp->if_flags & IFF_BROADCAST) != 0)
4377 		rxcfg |= MAC_CFG_BCAST;
4378 	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
4379 		if ((ifp->if_flags & IFF_PROMISC) != 0)
4380 			rxcfg |= MAC_CFG_PROMISC;
4381 		if ((ifp->if_flags & IFF_ALLMULTI) != 0)
4382 			rxcfg |= MAC_CFG_ALLMULTI;
4383 		mchash[0] = 0xFFFFFFFF;
4384 		mchash[1] = 0xFFFFFFFF;
4385 		goto chipit;
4386 	}
4387 
4388 #if 0
4389 	/* XXX */
4390 	if_maddr_rlock(ifp);
4391 #endif
4392 	TAILQ_FOREACH(ifma, &sc->alc_ifp->if_multiaddrs, ifma_link) {
4393 		if (ifma->ifma_addr->sa_family != AF_LINK)
4394 			continue;
4395 		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
4396 		    ifma->ifma_addr), ETHER_ADDR_LEN);
4397 		mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
4398 	}
4399 #if 0
4400 	/* XXX */
4401 	if_maddr_runlock(ifp);
4402 #endif
4403 
4404 chipit:
4405 	CSR_WRITE_4(sc, ALC_MAR0, mchash[0]);
4406 	CSR_WRITE_4(sc, ALC_MAR1, mchash[1]);
4407 	CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg);
4408 }
4409 
4410 static int
4411 sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS)
4412 {
4413 	return (sysctl_int_range(oidp, arg1, arg2, req,
4414 	    ALC_PROC_MIN, ALC_PROC_MAX));
4415 }
4416 
4417 static int
4418 sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS)
4419 {
4420 
4421 	return (sysctl_int_range(oidp, arg1, arg2, req,
4422 	    ALC_IM_TIMER_MIN, ALC_IM_TIMER_MAX));
4423 }
4424