xref: /openbsd-src/sys/dev/pci/if_ale.c (revision 0b7734b3d77bb9b21afec6f4621cae6c805dbd45)
1 /*	$OpenBSD: if_ale.c,v 1.44 2016/04/13 10:34:32 mpi Exp $	*/
2 /*-
3  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  * $FreeBSD: src/sys/dev/ale/if_ale.c,v 1.3 2008/12/03 09:01:12 yongari Exp $
29  */
30 
31 /* Driver for Atheros AR8121/AR8113/AR8114 PCIe Ethernet. */
32 
33 #include "bpfilter.h"
34 #include "vlan.h"
35 
36 #include <sys/param.h>
37 #include <sys/endian.h>
38 #include <sys/systm.h>
39 #include <sys/types.h>
40 #include <sys/sockio.h>
41 #include <sys/mbuf.h>
42 #include <sys/queue.h>
43 #include <sys/kernel.h>
44 #include <sys/device.h>
45 #include <sys/timeout.h>
46 #include <sys/socket.h>
47 
48 #include <machine/bus.h>
49 
50 #include <net/if.h>
51 #include <net/if_dl.h>
52 #include <net/if_llc.h>
53 #include <net/if_media.h>
54 
55 #include <netinet/in.h>
56 #include <netinet/ip.h>
57 #include <netinet/if_ether.h>
58 
59 #if NBPFILTER > 0
60 #include <net/bpf.h>
61 #endif
62 
63 #include <dev/mii/mii.h>
64 #include <dev/mii/miivar.h>
65 
66 #include <dev/pci/pcireg.h>
67 #include <dev/pci/pcivar.h>
68 #include <dev/pci/pcidevs.h>
69 
70 #include <dev/pci/if_alereg.h>
71 
72 int	ale_match(struct device *, void *, void *);
73 void	ale_attach(struct device *, struct device *, void *);
74 int	ale_detach(struct device *, int);
75 int	ale_activate(struct device *, int);
76 
77 int	ale_miibus_readreg(struct device *, int, int);
78 void	ale_miibus_writereg(struct device *, int, int, int);
79 void	ale_miibus_statchg(struct device *);
80 
81 int	ale_init(struct ifnet *);
82 void	ale_start(struct ifnet *);
83 int	ale_ioctl(struct ifnet *, u_long, caddr_t);
84 void	ale_watchdog(struct ifnet *);
85 int	ale_mediachange(struct ifnet *);
86 void	ale_mediastatus(struct ifnet *, struct ifmediareq *);
87 
88 int	ale_intr(void *);
89 int	ale_rxeof(struct ale_softc *sc);
90 void	ale_rx_update_page(struct ale_softc *, struct ale_rx_page **,
91 	    uint32_t, uint32_t *);
92 void	ale_rxcsum(struct ale_softc *, struct mbuf *, uint32_t);
93 void	ale_txeof(struct ale_softc *);
94 
95 int	ale_dma_alloc(struct ale_softc *);
96 void	ale_dma_free(struct ale_softc *);
97 int	ale_encap(struct ale_softc *, struct mbuf *);
98 void	ale_init_rx_pages(struct ale_softc *);
99 void	ale_init_tx_ring(struct ale_softc *);
100 
101 void	ale_stop(struct ale_softc *);
102 void	ale_tick(void *);
103 void	ale_get_macaddr(struct ale_softc *);
104 void	ale_mac_config(struct ale_softc *);
105 void	ale_phy_reset(struct ale_softc *);
106 void	ale_reset(struct ale_softc *);
107 void	ale_iff(struct ale_softc *);
108 void	ale_rxvlan(struct ale_softc *);
109 void	ale_stats_clear(struct ale_softc *);
110 void	ale_stats_update(struct ale_softc *);
111 void	ale_stop_mac(struct ale_softc *);
112 
113 const struct pci_matchid ale_devices[] = {
114 	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_L1E }
115 };
116 
117 struct cfattach ale_ca = {
118 	sizeof (struct ale_softc), ale_match, ale_attach, NULL,
119 	ale_activate
120 };
121 
122 struct cfdriver ale_cd = {
123 	NULL, "ale", DV_IFNET
124 };
125 
126 int aledebug = 0;
127 #define DPRINTF(x)	do { if (aledebug) printf x; } while (0)
128 
129 #define ALE_CSUM_FEATURES	(M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)
130 
131 int
132 ale_miibus_readreg(struct device *dev, int phy, int reg)
133 {
134 	struct ale_softc *sc = (struct ale_softc *)dev;
135 	uint32_t v;
136 	int i;
137 
138 	if (phy != sc->ale_phyaddr)
139 		return (0);
140 
141 	if ((sc->ale_flags & ALE_FLAG_FASTETHER) != 0 &&
142 	    reg == MII_EXTSR)
143 		return (0);
144 
145 	CSR_WRITE_4(sc, ALE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
146 	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
147 	for (i = ALE_PHY_TIMEOUT; i > 0; i--) {
148 		DELAY(5);
149 		v = CSR_READ_4(sc, ALE_MDIO);
150 		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
151 			break;
152 	}
153 
154 	if (i == 0) {
155 		printf("%s: phy read timeout: phy %d, reg %d\n",
156 		    sc->sc_dev.dv_xname, phy, reg);
157 		return (0);
158 	}
159 
160 	return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
161 }
162 
163 void
164 ale_miibus_writereg(struct device *dev, int phy, int reg, int val)
165 {
166 	struct ale_softc *sc = (struct ale_softc *)dev;
167 	uint32_t v;
168 	int i;
169 
170 	if (phy != sc->ale_phyaddr)
171 		return;
172 
173 	CSR_WRITE_4(sc, ALE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
174 	    (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
175 	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
176 	for (i = ALE_PHY_TIMEOUT; i > 0; i--) {
177 		DELAY(5);
178 		v = CSR_READ_4(sc, ALE_MDIO);
179 		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
180 			break;
181 	}
182 
183 	if (i == 0)
184 		printf("%s: phy write timeout: phy %d, reg %d\n",
185 		    sc->sc_dev.dv_xname, phy, reg);
186 }
187 
188 void
189 ale_miibus_statchg(struct device *dev)
190 {
191 	struct ale_softc *sc = (struct ale_softc *)dev;
192 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
193 	struct mii_data *mii = &sc->sc_miibus;
194 	uint32_t reg;
195 
196 	if ((ifp->if_flags & IFF_RUNNING) == 0)
197 		return;
198 
199 	sc->ale_flags &= ~ALE_FLAG_LINK;
200 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
201 	    (IFM_ACTIVE | IFM_AVALID)) {
202 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
203 		case IFM_10_T:
204 		case IFM_100_TX:
205 			sc->ale_flags |= ALE_FLAG_LINK;
206 			break;
207 
208 		case IFM_1000_T:
209 			if ((sc->ale_flags & ALE_FLAG_FASTETHER) == 0)
210 				sc->ale_flags |= ALE_FLAG_LINK;
211 			break;
212 
213 		default:
214 			break;
215 		}
216 	}
217 
218 	/* Stop Rx/Tx MACs. */
219 	ale_stop_mac(sc);
220 
221 	/* Program MACs with resolved speed/duplex/flow-control. */
222 	if ((sc->ale_flags & ALE_FLAG_LINK) != 0) {
223 		ale_mac_config(sc);
224 		/* Reenable Tx/Rx MACs. */
225 		reg = CSR_READ_4(sc, ALE_MAC_CFG);
226 		reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
227 		CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
228 	}
229 }
230 
231 void
232 ale_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
233 {
234 	struct ale_softc *sc = ifp->if_softc;
235 	struct mii_data *mii = &sc->sc_miibus;
236 
237 	if ((ifp->if_flags & IFF_UP) == 0)
238 		return;
239 
240 	mii_pollstat(mii);
241 	ifmr->ifm_status = mii->mii_media_status;
242 	ifmr->ifm_active = mii->mii_media_active;
243 }
244 
245 int
246 ale_mediachange(struct ifnet *ifp)
247 {
248 	struct ale_softc *sc = ifp->if_softc;
249 	struct mii_data *mii = &sc->sc_miibus;
250 	int error;
251 
252 	if (mii->mii_instance != 0) {
253 		struct mii_softc *miisc;
254 
255 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
256 			mii_phy_reset(miisc);
257 	}
258 	error = mii_mediachg(mii);
259 
260 	return (error);
261 }
262 
263 int
264 ale_match(struct device *dev, void *match, void *aux)
265 {
266 	return pci_matchbyid((struct pci_attach_args *)aux, ale_devices,
267 	    sizeof (ale_devices) / sizeof (ale_devices[0]));
268 }
269 
270 void
271 ale_get_macaddr(struct ale_softc *sc)
272 {
273 	uint32_t ea[2], reg;
274 	int i, vpdc;
275 
276 	reg = CSR_READ_4(sc, ALE_SPI_CTRL);
277 	if ((reg & SPI_VPD_ENB) != 0) {
278 		reg &= ~SPI_VPD_ENB;
279 		CSR_WRITE_4(sc, ALE_SPI_CTRL, reg);
280 	}
281 
282 	if (pci_get_capability(sc->sc_pct, sc->sc_pcitag, PCI_CAP_VPD,
283 	    &vpdc, NULL)) {
284 		/*
285 		 * PCI VPD capability found, let TWSI reload EEPROM.
286 		 * This will set ethernet address of controller.
287 		 */
288 		CSR_WRITE_4(sc, ALE_TWSI_CTRL, CSR_READ_4(sc, ALE_TWSI_CTRL) |
289 		    TWSI_CTRL_SW_LD_START);
290 		for (i = 100; i > 0; i--) {
291 			DELAY(1000);
292 			reg = CSR_READ_4(sc, ALE_TWSI_CTRL);
293 			if ((reg & TWSI_CTRL_SW_LD_START) == 0)
294 				break;
295 		}
296 		if (i == 0)
297 			printf("%s: reloading EEPROM timeout!\n",
298 			    sc->sc_dev.dv_xname);
299 	} else {
300 		if (aledebug)
301 			printf("%s: PCI VPD capability not found!\n",
302 			    sc->sc_dev.dv_xname);
303 	}
304 
305 	ea[0] = CSR_READ_4(sc, ALE_PAR0);
306 	ea[1] = CSR_READ_4(sc, ALE_PAR1);
307 	sc->ale_eaddr[0] = (ea[1] >> 8) & 0xFF;
308 	sc->ale_eaddr[1] = (ea[1] >> 0) & 0xFF;
309 	sc->ale_eaddr[2] = (ea[0] >> 24) & 0xFF;
310 	sc->ale_eaddr[3] = (ea[0] >> 16) & 0xFF;
311 	sc->ale_eaddr[4] = (ea[0] >> 8) & 0xFF;
312 	sc->ale_eaddr[5] = (ea[0] >> 0) & 0xFF;
313 }
314 
315 void
316 ale_phy_reset(struct ale_softc *sc)
317 {
318 	/* Reset magic from Linux. */
319 	CSR_WRITE_2(sc, ALE_GPHY_CTRL,
320 	    GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE | GPHY_CTRL_SEL_ANA_RESET |
321 	    GPHY_CTRL_PHY_PLL_ON);
322 	DELAY(1000);
323 	CSR_WRITE_2(sc, ALE_GPHY_CTRL,
324 	    GPHY_CTRL_EXT_RESET | GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE |
325 	    GPHY_CTRL_SEL_ANA_RESET | GPHY_CTRL_PHY_PLL_ON);
326 	DELAY(1000);
327 
328 #define	ATPHY_DBG_ADDR		0x1D
329 #define	ATPHY_DBG_DATA		0x1E
330 
331 	/* Enable hibernation mode. */
332 	ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
333 	    ATPHY_DBG_ADDR, 0x0B);
334 	ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
335 	    ATPHY_DBG_DATA, 0xBC00);
336 	/* Set Class A/B for all modes. */
337 	ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
338 	    ATPHY_DBG_ADDR, 0x00);
339 	ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
340 	    ATPHY_DBG_DATA, 0x02EF);
341 	/* Enable 10BT power saving. */
342 	ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
343 	    ATPHY_DBG_ADDR, 0x12);
344 	ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
345 	    ATPHY_DBG_DATA, 0x4C04);
346 	/* Adjust 1000T power. */
347 	ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
348 	    ATPHY_DBG_ADDR, 0x04);
349 	ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
350 	    ATPHY_DBG_ADDR, 0x8BBB);
351 	/* 10BT center tap voltage. */
352 	ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
353 	    ATPHY_DBG_ADDR, 0x05);
354 	ale_miibus_writereg(&sc->sc_dev, sc->ale_phyaddr,
355 	    ATPHY_DBG_ADDR, 0x2C46);
356 
357 #undef	ATPHY_DBG_ADDR
358 #undef	ATPHY_DBG_DATA
359 	DELAY(1000);
360 }
361 
362 void
363 ale_attach(struct device *parent, struct device *self, void *aux)
364 {
365 	struct ale_softc *sc = (struct ale_softc *)self;
366 	struct pci_attach_args *pa = aux;
367 	pci_chipset_tag_t pc = pa->pa_pc;
368 	pci_intr_handle_t ih;
369 	const char *intrstr;
370 	struct ifnet *ifp;
371 	pcireg_t memtype;
372 	int mii_flags, error = 0;
373 	uint32_t rxf_len, txf_len;
374 	const char *chipname;
375 
376 	/*
377 	 * Allocate IO memory
378 	 */
379 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ALE_PCIR_BAR);
380 	if (pci_mapreg_map(pa, ALE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
381 	    &sc->sc_mem_bh, NULL, &sc->sc_mem_size, 0)) {
382 		printf(": can't map mem space\n");
383 		return;
384 	}
385 
386 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
387 		printf(": can't map interrupt\n");
388 		goto fail;
389 	}
390 
391 	/*
392 	 * Allocate IRQ
393 	 */
394 	intrstr = pci_intr_string(pc, ih);
395 	sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, ale_intr, sc,
396 	    sc->sc_dev.dv_xname);
397 	if (sc->sc_irq_handle == NULL) {
398 		printf(": could not establish interrupt");
399 		if (intrstr != NULL)
400 			printf(" at %s", intrstr);
401 		printf("\n");
402 		goto fail;
403 	}
404 
405 	sc->sc_dmat = pa->pa_dmat;
406 	sc->sc_pct = pa->pa_pc;
407 	sc->sc_pcitag = pa->pa_tag;
408 
409 	/* Set PHY address. */
410 	sc->ale_phyaddr = ALE_PHY_ADDR;
411 
412 	/* Reset PHY. */
413 	ale_phy_reset(sc);
414 
415 	/* Reset the ethernet controller. */
416 	ale_reset(sc);
417 
418 	/* Get PCI and chip id/revision. */
419 	sc->ale_rev = PCI_REVISION(pa->pa_class);
420 	if (sc->ale_rev >= 0xF0) {
421 		/* L2E Rev. B. AR8114 */
422 		sc->ale_flags |= ALE_FLAG_FASTETHER;
423 		chipname = "AR8114";
424 	} else {
425 		if ((CSR_READ_4(sc, ALE_PHY_STATUS) & PHY_STATUS_100M) != 0) {
426 			/* L1E AR8121 */
427 			sc->ale_flags |= ALE_FLAG_JUMBO;
428 			chipname = "AR8121";
429 		} else {
430 			/* L2E Rev. A. AR8113 */
431 			sc->ale_flags |= ALE_FLAG_FASTETHER;
432 			chipname = "AR8113";
433 		}
434 	}
435 
436 	printf(": %s, %s", chipname, intrstr);
437 
438 	/*
439 	 * All known controllers seems to require 4 bytes alignment
440 	 * of Tx buffers to make Tx checksum offload with custom
441 	 * checksum generation method work.
442 	 */
443 	sc->ale_flags |= ALE_FLAG_TXCSUM_BUG;
444 
445 	/*
446 	 * All known controllers seems to have issues on Rx checksum
447 	 * offload for fragmented IP datagrams.
448 	 */
449 	sc->ale_flags |= ALE_FLAG_RXCSUM_BUG;
450 
451 	/*
452 	 * Don't use Tx CMB. It is known to cause RRS update failure
453 	 * under certain circumstances. Typical phenomenon of the
454 	 * issue would be unexpected sequence number encountered in
455 	 * Rx handler.
456 	 */
457 	sc->ale_flags |= ALE_FLAG_TXCMB_BUG;
458 	sc->ale_chip_rev = CSR_READ_4(sc, ALE_MASTER_CFG) >>
459 	    MASTER_CHIP_REV_SHIFT;
460 	if (aledebug) {
461 		printf("%s: PCI device revision : 0x%04x\n",
462 		    sc->sc_dev.dv_xname, sc->ale_rev);
463 		printf("%s: Chip id/revision : 0x%04x\n",
464 		    sc->sc_dev.dv_xname, sc->ale_chip_rev);
465 	}
466 
467 	/*
468 	 * Uninitialized hardware returns an invalid chip id/revision
469 	 * as well as 0xFFFFFFFF for Tx/Rx fifo length.
470 	 */
471 	txf_len = CSR_READ_4(sc, ALE_SRAM_TX_FIFO_LEN);
472 	rxf_len = CSR_READ_4(sc, ALE_SRAM_RX_FIFO_LEN);
473 	if (sc->ale_chip_rev == 0xFFFF || txf_len == 0xFFFFFFFF ||
474 	    rxf_len == 0xFFFFFFF) {
475 		printf("%s: chip revision : 0x%04x, %u Tx FIFO "
476 		    "%u Rx FIFO -- not initialized?\n", sc->sc_dev.dv_xname,
477 		    sc->ale_chip_rev, txf_len, rxf_len);
478 		goto fail;
479 	}
480 
481 	if (aledebug) {
482 		printf("%s: %u Tx FIFO, %u Rx FIFO\n", sc->sc_dev.dv_xname,
483 		    txf_len, rxf_len);
484 	}
485 
486 	/* Set max allowable DMA size. */
487 	sc->ale_dma_rd_burst = DMA_CFG_RD_BURST_128;
488 	sc->ale_dma_wr_burst = DMA_CFG_WR_BURST_128;
489 
490 	error = ale_dma_alloc(sc);
491 	if (error)
492 		goto fail;
493 
494 	/* Load station address. */
495 	ale_get_macaddr(sc);
496 
497 	ifp = &sc->sc_arpcom.ac_if;
498 	ifp->if_softc = sc;
499 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
500 	ifp->if_ioctl = ale_ioctl;
501 	ifp->if_start = ale_start;
502 	ifp->if_watchdog = ale_watchdog;
503 	IFQ_SET_MAXLEN(&ifp->if_snd, ALE_TX_RING_CNT - 1);
504 	bcopy(sc->ale_eaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
505 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
506 
507 	ifp->if_capabilities = IFCAP_VLAN_MTU;
508 
509 #ifdef ALE_CHECKSUM
510 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
511 	    IFCAP_CSUM_UDPv4;
512 #endif
513 
514 #if NVLAN > 0
515 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
516 #endif
517 
518 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
519 
520 	/* Set up MII bus. */
521 	sc->sc_miibus.mii_ifp = ifp;
522 	sc->sc_miibus.mii_readreg = ale_miibus_readreg;
523 	sc->sc_miibus.mii_writereg = ale_miibus_writereg;
524 	sc->sc_miibus.mii_statchg = ale_miibus_statchg;
525 
526 	ifmedia_init(&sc->sc_miibus.mii_media, 0, ale_mediachange,
527 	    ale_mediastatus);
528 	mii_flags = 0;
529 	if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0)
530 		mii_flags |= MIIF_DOPAUSE;
531 	mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
532 	    MII_OFFSET_ANY, mii_flags);
533 
534 	if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
535 		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
536 		ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
537 		    0, NULL);
538 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
539 	} else
540 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
541 
542 	if_attach(ifp);
543 	ether_ifattach(ifp);
544 
545 	timeout_set(&sc->ale_tick_ch, ale_tick, sc);
546 
547 	return;
548 fail:
549 	ale_dma_free(sc);
550 	if (sc->sc_irq_handle != NULL)
551 		pci_intr_disestablish(pc, sc->sc_irq_handle);
552 	if (sc->sc_mem_size)
553 		bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
554 }
555 
556 int
557 ale_detach(struct device *self, int flags)
558 {
559 	struct ale_softc *sc = (struct ale_softc *)self;
560 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
561 	int s;
562 
563 	s = splnet();
564 	ale_stop(sc);
565 	splx(s);
566 
567 	mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
568 
569 	/* Delete all remaining media. */
570 	ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
571 
572 	ether_ifdetach(ifp);
573 	if_detach(ifp);
574 	ale_dma_free(sc);
575 
576 	if (sc->sc_irq_handle != NULL) {
577 		pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
578 		sc->sc_irq_handle = NULL;
579 	}
580 
581 	return (0);
582 }
583 
584 int
585 ale_activate(struct device *self, int act)
586 {
587 	struct ale_softc *sc = (struct ale_softc *)self;
588 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
589 	int rv = 0;
590 
591 	switch (act) {
592 	case DVACT_SUSPEND:
593 		if (ifp->if_flags & IFF_RUNNING)
594 			ale_stop(sc);
595 		rv = config_activate_children(self, act);
596 		break;
597 	case DVACT_RESUME:
598 		if (ifp->if_flags & IFF_UP)
599 			ale_init(ifp);
600 		break;
601 	default:
602 		rv = config_activate_children(self, act);
603 		break;
604 	}
605 	return (rv);
606 }
607 
608 int
609 ale_dma_alloc(struct ale_softc *sc)
610 {
611 	struct ale_txdesc *txd;
612 	int nsegs, error, guard_size, i;
613 
614 	if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0)
615 		guard_size = ALE_JUMBO_FRAMELEN;
616 	else
617 		guard_size = ALE_MAX_FRAMELEN;
618 	sc->ale_pagesize = roundup(guard_size + ALE_RX_PAGE_SZ,
619 	    ALE_RX_PAGE_ALIGN);
620 
621 	/*
622 	 * Create DMA stuffs for TX ring
623 	 */
624 	error = bus_dmamap_create(sc->sc_dmat, ALE_TX_RING_SZ, 1,
625 	    ALE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->ale_cdata.ale_tx_ring_map);
626 	if (error)
627 		return (ENOBUFS);
628 
629 	/* Allocate DMA'able memory for TX ring */
630 	error = bus_dmamem_alloc(sc->sc_dmat, ALE_TX_RING_SZ,
631 	    ETHER_ALIGN, 0, &sc->ale_cdata.ale_tx_ring_seg, 1,
632 	    &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO);
633 	if (error) {
634 		printf("%s: could not allocate DMA'able memory for Tx ring.\n",
635 		    sc->sc_dev.dv_xname);
636 		return error;
637 	}
638 
639 	error = bus_dmamem_map(sc->sc_dmat, &sc->ale_cdata.ale_tx_ring_seg,
640 	    nsegs, ALE_TX_RING_SZ, (caddr_t *)&sc->ale_cdata.ale_tx_ring,
641 	    BUS_DMA_NOWAIT);
642 	if (error)
643 		return (ENOBUFS);
644 
645 	/* Load the DMA map for Tx ring. */
646 	error = bus_dmamap_load(sc->sc_dmat, sc->ale_cdata.ale_tx_ring_map,
647 	    sc->ale_cdata.ale_tx_ring, ALE_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
648 	if (error) {
649 		printf("%s: could not load DMA'able memory for Tx ring.\n",
650 		    sc->sc_dev.dv_xname);
651 		bus_dmamem_free(sc->sc_dmat,
652 		    (bus_dma_segment_t *)&sc->ale_cdata.ale_tx_ring, 1);
653 		return error;
654 	}
655 	sc->ale_cdata.ale_tx_ring_paddr =
656 	    sc->ale_cdata.ale_tx_ring_map->dm_segs[0].ds_addr;
657 
658 	for (i = 0; i < ALE_RX_PAGES; i++) {
659 		/*
660 		 * Create DMA stuffs for RX pages
661 		 */
662 		error = bus_dmamap_create(sc->sc_dmat, sc->ale_pagesize, 1,
663 		    sc->ale_pagesize, 0, BUS_DMA_NOWAIT,
664 		    &sc->ale_cdata.ale_rx_page[i].page_map);
665 		if (error)
666 			return (ENOBUFS);
667 
668 		/* Allocate DMA'able memory for RX pages */
669 		error = bus_dmamem_alloc(sc->sc_dmat, sc->ale_pagesize,
670 		    ETHER_ALIGN, 0, &sc->ale_cdata.ale_rx_page[i].page_seg,
671 		    1, &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO);
672 		if (error) {
673 			printf("%s: could not allocate DMA'able memory for "
674 			    "Rx ring.\n", sc->sc_dev.dv_xname);
675 			return error;
676 		}
677 		error = bus_dmamem_map(sc->sc_dmat,
678 		    &sc->ale_cdata.ale_rx_page[i].page_seg, nsegs,
679 		    sc->ale_pagesize,
680 		    (caddr_t *)&sc->ale_cdata.ale_rx_page[i].page_addr,
681 		    BUS_DMA_NOWAIT);
682 		if (error)
683 			return (ENOBUFS);
684 
685 		/* Load the DMA map for Rx pages. */
686 		error = bus_dmamap_load(sc->sc_dmat,
687 		    sc->ale_cdata.ale_rx_page[i].page_map,
688 		    sc->ale_cdata.ale_rx_page[i].page_addr,
689 		    sc->ale_pagesize, NULL, BUS_DMA_WAITOK);
690 		if (error) {
691 			printf("%s: could not load DMA'able memory for "
692 			    "Rx pages.\n", sc->sc_dev.dv_xname);
693 			bus_dmamem_free(sc->sc_dmat,
694 			    (bus_dma_segment_t *)sc->ale_cdata.ale_rx_page[i].page_addr, 1);
695 			return error;
696 		}
697 		sc->ale_cdata.ale_rx_page[i].page_paddr =
698 		    sc->ale_cdata.ale_rx_page[i].page_map->dm_segs[0].ds_addr;
699 	}
700 
701 	/*
702 	 * Create DMA stuffs for Tx CMB.
703 	 */
704 	error = bus_dmamap_create(sc->sc_dmat, ALE_TX_CMB_SZ, 1,
705 	    ALE_TX_CMB_SZ, 0, BUS_DMA_NOWAIT, &sc->ale_cdata.ale_tx_cmb_map);
706 	if (error)
707 		return (ENOBUFS);
708 
709 	/* Allocate DMA'able memory for Tx CMB. */
710 	error = bus_dmamem_alloc(sc->sc_dmat, ALE_TX_CMB_SZ, ETHER_ALIGN, 0,
711 	    &sc->ale_cdata.ale_tx_cmb_seg, 1, &nsegs,
712 	    BUS_DMA_WAITOK |BUS_DMA_ZERO);
713 
714 	if (error) {
715 		printf("%s: could not allocate DMA'able memory for Tx CMB.\n",
716 		    sc->sc_dev.dv_xname);
717 		return error;
718 	}
719 
720 	error = bus_dmamem_map(sc->sc_dmat, &sc->ale_cdata.ale_tx_cmb_seg,
721 	    nsegs, ALE_TX_CMB_SZ, (caddr_t *)&sc->ale_cdata.ale_tx_cmb,
722 	    BUS_DMA_NOWAIT);
723 	if (error)
724 		return (ENOBUFS);
725 
726 	/* Load the DMA map for Tx CMB. */
727 	error = bus_dmamap_load(sc->sc_dmat, sc->ale_cdata.ale_tx_cmb_map,
728 	    sc->ale_cdata.ale_tx_cmb, ALE_TX_CMB_SZ, NULL, BUS_DMA_WAITOK);
729 	if (error) {
730 		printf("%s: could not load DMA'able memory for Tx CMB.\n",
731 		    sc->sc_dev.dv_xname);
732 		bus_dmamem_free(sc->sc_dmat,
733 		    (bus_dma_segment_t *)&sc->ale_cdata.ale_tx_cmb, 1);
734 		return error;
735 	}
736 
737 	sc->ale_cdata.ale_tx_cmb_paddr =
738 	    sc->ale_cdata.ale_tx_cmb_map->dm_segs[0].ds_addr;
739 
740 	for (i = 0; i < ALE_RX_PAGES; i++) {
741 		/*
742 		 * Create DMA stuffs for Rx CMB.
743 		 */
744 		error = bus_dmamap_create(sc->sc_dmat, ALE_RX_CMB_SZ, 1,
745 		    ALE_RX_CMB_SZ, 0, BUS_DMA_NOWAIT,
746 		    &sc->ale_cdata.ale_rx_page[i].cmb_map);
747 		if (error)
748 			return (ENOBUFS);
749 
750 		/* Allocate DMA'able memory for Rx CMB */
751 		error = bus_dmamem_alloc(sc->sc_dmat, ALE_RX_CMB_SZ,
752 		    ETHER_ALIGN, 0, &sc->ale_cdata.ale_rx_page[i].cmb_seg, 1,
753 		    &nsegs, BUS_DMA_WAITOK | BUS_DMA_ZERO);
754 		if (error) {
755 			printf("%s: could not allocate DMA'able memory for "
756 			    "Rx CMB\n", sc->sc_dev.dv_xname);
757 			return error;
758 		}
759 		error = bus_dmamem_map(sc->sc_dmat,
760 		    &sc->ale_cdata.ale_rx_page[i].cmb_seg, nsegs,
761 		    ALE_RX_CMB_SZ,
762 		    (caddr_t *)&sc->ale_cdata.ale_rx_page[i].cmb_addr,
763 		    BUS_DMA_NOWAIT);
764 		if (error)
765 			return (ENOBUFS);
766 
767 		/* Load the DMA map for Rx CMB */
768 		error = bus_dmamap_load(sc->sc_dmat,
769 		    sc->ale_cdata.ale_rx_page[i].cmb_map,
770 		    sc->ale_cdata.ale_rx_page[i].cmb_addr,
771 		    ALE_RX_CMB_SZ, NULL, BUS_DMA_WAITOK);
772 		if (error) {
773 			printf("%s: could not load DMA'able memory for Rx CMB"
774 			    "\n", sc->sc_dev.dv_xname);
775 			bus_dmamem_free(sc->sc_dmat,
776 			    (bus_dma_segment_t *)&sc->ale_cdata.ale_rx_page[i].cmb_addr, 1);
777 			return error;
778 		}
779 		sc->ale_cdata.ale_rx_page[i].cmb_paddr =
780 		    sc->ale_cdata.ale_rx_page[i].cmb_map->dm_segs[0].ds_addr;
781 	}
782 
783 
784 	/* Create DMA maps for Tx buffers. */
785 	for (i = 0; i < ALE_TX_RING_CNT; i++) {
786 		txd = &sc->ale_cdata.ale_txdesc[i];
787 		txd->tx_m = NULL;
788 		txd->tx_dmamap = NULL;
789 		error = bus_dmamap_create(sc->sc_dmat, ALE_TSO_MAXSIZE,
790 		    ALE_MAXTXSEGS, ALE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
791 		    &txd->tx_dmamap);
792 		if (error) {
793 			printf("%s: could not create Tx dmamap.\n",
794 			    sc->sc_dev.dv_xname);
795 			return error;
796 		}
797 	}
798 
799 	return (0);
800 }
801 
802 void
803 ale_dma_free(struct ale_softc *sc)
804 {
805 	struct ale_txdesc *txd;
806 	int i;
807 
808 	/* Tx buffers. */
809 	for (i = 0; i < ALE_TX_RING_CNT; i++) {
810 		txd = &sc->ale_cdata.ale_txdesc[i];
811 		if (txd->tx_dmamap != NULL) {
812 			bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
813 			txd->tx_dmamap = NULL;
814 		}
815 	}
816 
817 	/* Tx descriptor ring. */
818 	if (sc->ale_cdata.ale_tx_ring_map != NULL)
819 		bus_dmamap_unload(sc->sc_dmat, sc->ale_cdata.ale_tx_ring_map);
820 	if (sc->ale_cdata.ale_tx_ring_map != NULL &&
821 	    sc->ale_cdata.ale_tx_ring != NULL)
822 		bus_dmamem_free(sc->sc_dmat,
823 		    (bus_dma_segment_t *)sc->ale_cdata.ale_tx_ring, 1);
824 	sc->ale_cdata.ale_tx_ring = NULL;
825 	sc->ale_cdata.ale_tx_ring_map = NULL;
826 
827 	/* Rx page block. */
828 	for (i = 0; i < ALE_RX_PAGES; i++) {
829 		if (sc->ale_cdata.ale_rx_page[i].page_map != NULL)
830 			bus_dmamap_unload(sc->sc_dmat,
831 			    sc->ale_cdata.ale_rx_page[i].page_map);
832 		if (sc->ale_cdata.ale_rx_page[i].page_map != NULL &&
833 		    sc->ale_cdata.ale_rx_page[i].page_addr != NULL)
834 			bus_dmamem_free(sc->sc_dmat,
835 			    (bus_dma_segment_t *)sc->ale_cdata.ale_rx_page[i].page_addr, 1);
836 		sc->ale_cdata.ale_rx_page[i].page_addr = NULL;
837 		sc->ale_cdata.ale_rx_page[i].page_map = NULL;
838 	}
839 
840 	/* Rx CMB. */
841 	for (i = 0; i < ALE_RX_PAGES; i++) {
842 		if (sc->ale_cdata.ale_rx_page[i].cmb_map != NULL)
843 			bus_dmamap_unload(sc->sc_dmat,
844 			    sc->ale_cdata.ale_rx_page[i].cmb_map);
845 		if (sc->ale_cdata.ale_rx_page[i].cmb_map != NULL &&
846 		    sc->ale_cdata.ale_rx_page[i].cmb_addr != NULL)
847 			bus_dmamem_free(sc->sc_dmat,
848 			    (bus_dma_segment_t *)sc->ale_cdata.ale_rx_page[i].cmb_addr, 1);
849 		sc->ale_cdata.ale_rx_page[i].cmb_addr = NULL;
850 		sc->ale_cdata.ale_rx_page[i].cmb_map = NULL;
851 	}
852 
853 	/* Tx CMB. */
854 	if (sc->ale_cdata.ale_tx_cmb_map != NULL)
855 		bus_dmamap_unload(sc->sc_dmat, sc->ale_cdata.ale_tx_cmb_map);
856 	if (sc->ale_cdata.ale_tx_cmb_map != NULL &&
857 	    sc->ale_cdata.ale_tx_cmb != NULL)
858 		bus_dmamem_free(sc->sc_dmat,
859 		    (bus_dma_segment_t *)sc->ale_cdata.ale_tx_cmb, 1);
860 	sc->ale_cdata.ale_tx_cmb = NULL;
861 	sc->ale_cdata.ale_tx_cmb_map = NULL;
862 
863 }
864 
865 int
866 ale_encap(struct ale_softc *sc, struct mbuf *m)
867 {
868 	struct ale_txdesc *txd, *txd_last;
869 	struct tx_desc *desc;
870 	bus_dmamap_t map;
871 	uint32_t cflags, poff, vtag;
872 	int error, i, prod;
873 
874 	cflags = vtag = 0;
875 	poff = 0;
876 
877 	prod = sc->ale_cdata.ale_tx_prod;
878 	txd = &sc->ale_cdata.ale_txdesc[prod];
879 	txd_last = txd;
880 	map = txd->tx_dmamap;
881 
882 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT);
883 	if (error != 0 && error != EFBIG)
884 		goto drop;
885 	if (error != 0) {
886 		if (m_defrag(m, M_DONTWAIT)) {
887 			error = ENOBUFS;
888 			goto drop;
889 		}
890 		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
891 		    BUS_DMA_NOWAIT);
892 		if (error != 0)
893 			goto drop;
894 	}
895 
896 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
897 	    BUS_DMASYNC_PREWRITE);
898 
899 	/* Configure Tx checksum offload. */
900 	if ((m->m_pkthdr.csum_flags & ALE_CSUM_FEATURES) != 0) {
901 		/*
902 		 * AR81xx supports Tx custom checksum offload feature
903 		 * that offloads single 16bit checksum computation.
904 		 * So you can choose one among IP, TCP and UDP.
905 		 * Normally driver sets checksum start/insertion
906 		 * position from the information of TCP/UDP frame as
907 		 * TCP/UDP checksum takes more time than that of IP.
908 		 * However it seems that custom checksum offload
909 		 * requires 4 bytes aligned Tx buffers due to hardware
910 		 * bug.
911 		 * AR81xx also supports explicit Tx checksum computation
912 		 * if it is told that the size of IP header and TCP
913 		 * header(for UDP, the header size does not matter
914 		 * because it's fixed length). However with this scheme
915 		 * TSO does not work so you have to choose one either
916 		 * TSO or explicit Tx checksum offload. I chosen TSO
917 		 * plus custom checksum offload with work-around which
918 		 * will cover most common usage for this consumer
919 		 * ethernet controller. The work-around takes a lot of
920 		 * CPU cycles if Tx buffer is not aligned on 4 bytes
921 		 * boundary, though.
922 		 */
923 		cflags |= ALE_TD_CXSUM;
924 		/* Set checksum start offset. */
925 		cflags |= (poff << ALE_TD_CSUM_PLOADOFFSET_SHIFT);
926 	}
927 
928 #if NVLAN > 0
929 	/* Configure VLAN hardware tag insertion. */
930 	if (m->m_flags & M_VLANTAG) {
931 		vtag = ALE_TX_VLAN_TAG(m->m_pkthdr.ether_vtag);
932 		vtag = ((vtag << ALE_TD_VLAN_SHIFT) & ALE_TD_VLAN_MASK);
933 		cflags |= ALE_TD_INSERT_VLAN_TAG;
934 	}
935 #endif
936 
937 	desc = NULL;
938 	for (i = 0; i < map->dm_nsegs; i++) {
939 		desc = &sc->ale_cdata.ale_tx_ring[prod];
940 		desc->addr = htole64(map->dm_segs[i].ds_addr);
941 		desc->len =
942 		    htole32(ALE_TX_BYTES(map->dm_segs[i].ds_len) | vtag);
943 		desc->flags = htole32(cflags);
944 		sc->ale_cdata.ale_tx_cnt++;
945 		ALE_DESC_INC(prod, ALE_TX_RING_CNT);
946 	}
947 
948 	/* Update producer index. */
949 	sc->ale_cdata.ale_tx_prod = prod;
950 
951 	/* Finally set EOP on the last descriptor. */
952 	prod = (prod + ALE_TX_RING_CNT - 1) % ALE_TX_RING_CNT;
953 	desc = &sc->ale_cdata.ale_tx_ring[prod];
954 	desc->flags |= htole32(ALE_TD_EOP);
955 
956 	/* Swap dmamap of the first and the last. */
957 	txd = &sc->ale_cdata.ale_txdesc[prod];
958 	map = txd_last->tx_dmamap;
959 	txd_last->tx_dmamap = txd->tx_dmamap;
960 	txd->tx_dmamap = map;
961 	txd->tx_m = m;
962 
963 	/* Sync descriptors. */
964 	bus_dmamap_sync(sc->sc_dmat, sc->ale_cdata.ale_tx_ring_map, 0,
965 	    sc->ale_cdata.ale_tx_ring_map->dm_mapsize,
966 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
967 
968 	return (0);
969 
970  drop:
971 	m_freem(m);
972 	return (error);
973 }
974 
975 void
976 ale_start(struct ifnet *ifp)
977 {
978         struct ale_softc *sc = ifp->if_softc;
979 	struct mbuf *m;
980 	int enq;
981 
982 	/* Reclaim transmitted frames. */
983 	if (sc->ale_cdata.ale_tx_cnt >= ALE_TX_DESC_HIWAT)
984 		ale_txeof(sc);
985 
986 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
987 		return;
988 	if ((sc->ale_flags & ALE_FLAG_LINK) == 0)
989 		return;
990 	if (IFQ_IS_EMPTY(&ifp->if_snd))
991 		return;
992 
993 	enq = 0;
994 	for (;;) {
995 		/* Check descriptor overrun. */
996 		if (sc->ale_cdata.ale_tx_cnt + ALE_MAXTXSEGS >=
997 		    ALE_TX_RING_CNT - 2) {
998 			ifq_set_oactive(&ifp->if_snd);
999 			break;
1000 		}
1001 
1002 		IFQ_DEQUEUE(&ifp->if_snd, m);
1003 		if (m == NULL)
1004 			break;
1005 
1006 		/*
1007 		 * Pack the data into the transmit ring. If we
1008 		 * don't have room, set the OACTIVE flag and wait
1009 		 * for the NIC to drain the ring.
1010 		 */
1011 		if (ale_encap(sc, m) != 0) {
1012 			ifp->if_oerrors++;
1013 			continue;
1014 		}
1015 
1016 		enq = 1;
1017 
1018 #if NBPFILTER > 0
1019 		/*
1020 		 * If there's a BPF listener, bounce a copy of this frame
1021 		 * to him.
1022 		 */
1023 		if (ifp->if_bpf != NULL)
1024 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1025 #endif
1026 	}
1027 
1028 	if (enq) {
1029 		/* Kick. */
1030 		CSR_WRITE_4(sc, ALE_MBOX_TPD_PROD_IDX,
1031 		    sc->ale_cdata.ale_tx_prod);
1032 
1033 		/* Set a timeout in case the chip goes out to lunch. */
1034 		ifp->if_timer = ALE_TX_TIMEOUT;
1035 	}
1036 }
1037 
1038 void
1039 ale_watchdog(struct ifnet *ifp)
1040 {
1041 	struct ale_softc *sc = ifp->if_softc;
1042 
1043 	if ((sc->ale_flags & ALE_FLAG_LINK) == 0) {
1044 		printf("%s: watchdog timeout (missed link)\n",
1045 		    sc->sc_dev.dv_xname);
1046 		ifp->if_oerrors++;
1047 		ale_init(ifp);
1048 		return;
1049 	}
1050 
1051 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1052 	ifp->if_oerrors++;
1053 	ale_init(ifp);
1054 	ale_start(ifp);
1055 }
1056 
1057 int
1058 ale_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1059 {
1060 	struct ale_softc *sc = ifp->if_softc;
1061 	struct mii_data *mii = &sc->sc_miibus;
1062 	struct ifreq *ifr = (struct ifreq *)data;
1063 	int s, error = 0;
1064 
1065 	s = splnet();
1066 
1067 	switch (cmd) {
1068 	case SIOCSIFADDR:
1069 		ifp->if_flags |= IFF_UP;
1070 		if (!(ifp->if_flags & IFF_RUNNING))
1071 			ale_init(ifp);
1072 		break;
1073 
1074 	case SIOCSIFFLAGS:
1075 		if (ifp->if_flags & IFF_UP) {
1076 			if (ifp->if_flags & IFF_RUNNING)
1077 				error = ENETRESET;
1078 			else
1079 				ale_init(ifp);
1080 		} else {
1081 			if (ifp->if_flags & IFF_RUNNING)
1082 				ale_stop(sc);
1083 		}
1084 		break;
1085 
1086 	case SIOCSIFMEDIA:
1087 	case SIOCGIFMEDIA:
1088 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1089 		break;
1090 
1091 	default:
1092 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
1093 		break;
1094 	}
1095 
1096 	if (error == ENETRESET) {
1097 		if (ifp->if_flags & IFF_RUNNING)
1098 			ale_iff(sc);
1099 		error = 0;
1100 	}
1101 
1102 	splx(s);
1103 	return (error);
1104 }
1105 
1106 void
1107 ale_mac_config(struct ale_softc *sc)
1108 {
1109 	struct mii_data *mii;
1110 	uint32_t reg;
1111 
1112 	mii = &sc->sc_miibus;
1113 	reg = CSR_READ_4(sc, ALE_MAC_CFG);
1114 	reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
1115 	    MAC_CFG_SPEED_MASK);
1116 	/* Reprogram MAC with resolved speed/duplex. */
1117 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1118 	case IFM_10_T:
1119 	case IFM_100_TX:
1120 		reg |= MAC_CFG_SPEED_10_100;
1121 		break;
1122 	case IFM_1000_T:
1123 		reg |= MAC_CFG_SPEED_1000;
1124 		break;
1125 	}
1126 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1127 		reg |= MAC_CFG_FULL_DUPLEX;
1128 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1129 			reg |= MAC_CFG_TX_FC;
1130 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1131 			reg |= MAC_CFG_RX_FC;
1132 	}
1133 	CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
1134 }
1135 
1136 void
1137 ale_stats_clear(struct ale_softc *sc)
1138 {
1139 	struct smb sb;
1140 	uint32_t *reg;
1141 	int i;
1142 
1143 	for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; reg++) {
1144 		CSR_READ_4(sc, ALE_RX_MIB_BASE + i);
1145 		i += sizeof(uint32_t);
1146 	}
1147 	/* Read Tx statistics. */
1148 	for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; reg++) {
1149 		CSR_READ_4(sc, ALE_TX_MIB_BASE + i);
1150 		i += sizeof(uint32_t);
1151 	}
1152 }
1153 
1154 void
1155 ale_stats_update(struct ale_softc *sc)
1156 {
1157 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1158 	struct ale_hw_stats *stat;
1159 	struct smb sb, *smb;
1160 	uint32_t *reg;
1161 	int i;
1162 
1163 	stat = &sc->ale_stats;
1164 	smb = &sb;
1165 
1166 	/* Read Rx statistics. */
1167 	for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; reg++) {
1168 		*reg = CSR_READ_4(sc, ALE_RX_MIB_BASE + i);
1169 		i += sizeof(uint32_t);
1170 	}
1171 	/* Read Tx statistics. */
1172 	for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; reg++) {
1173 		*reg = CSR_READ_4(sc, ALE_TX_MIB_BASE + i);
1174 		i += sizeof(uint32_t);
1175 	}
1176 
1177 	/* Rx stats. */
1178 	stat->rx_frames += smb->rx_frames;
1179 	stat->rx_bcast_frames += smb->rx_bcast_frames;
1180 	stat->rx_mcast_frames += smb->rx_mcast_frames;
1181 	stat->rx_pause_frames += smb->rx_pause_frames;
1182 	stat->rx_control_frames += smb->rx_control_frames;
1183 	stat->rx_crcerrs += smb->rx_crcerrs;
1184 	stat->rx_lenerrs += smb->rx_lenerrs;
1185 	stat->rx_bytes += smb->rx_bytes;
1186 	stat->rx_runts += smb->rx_runts;
1187 	stat->rx_fragments += smb->rx_fragments;
1188 	stat->rx_pkts_64 += smb->rx_pkts_64;
1189 	stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
1190 	stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
1191 	stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
1192 	stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
1193 	stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
1194 	stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
1195 	stat->rx_pkts_truncated += smb->rx_pkts_truncated;
1196 	stat->rx_fifo_oflows += smb->rx_fifo_oflows;
1197 	stat->rx_rrs_errs += smb->rx_rrs_errs;
1198 	stat->rx_alignerrs += smb->rx_alignerrs;
1199 	stat->rx_bcast_bytes += smb->rx_bcast_bytes;
1200 	stat->rx_mcast_bytes += smb->rx_mcast_bytes;
1201 	stat->rx_pkts_filtered += smb->rx_pkts_filtered;
1202 
1203 	/* Tx stats. */
1204 	stat->tx_frames += smb->tx_frames;
1205 	stat->tx_bcast_frames += smb->tx_bcast_frames;
1206 	stat->tx_mcast_frames += smb->tx_mcast_frames;
1207 	stat->tx_pause_frames += smb->tx_pause_frames;
1208 	stat->tx_excess_defer += smb->tx_excess_defer;
1209 	stat->tx_control_frames += smb->tx_control_frames;
1210 	stat->tx_deferred += smb->tx_deferred;
1211 	stat->tx_bytes += smb->tx_bytes;
1212 	stat->tx_pkts_64 += smb->tx_pkts_64;
1213 	stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
1214 	stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
1215 	stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
1216 	stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
1217 	stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
1218 	stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
1219 	stat->tx_single_colls += smb->tx_single_colls;
1220 	stat->tx_multi_colls += smb->tx_multi_colls;
1221 	stat->tx_late_colls += smb->tx_late_colls;
1222 	stat->tx_excess_colls += smb->tx_excess_colls;
1223 	stat->tx_underrun += smb->tx_underrun;
1224 	stat->tx_desc_underrun += smb->tx_desc_underrun;
1225 	stat->tx_lenerrs += smb->tx_lenerrs;
1226 	stat->tx_pkts_truncated += smb->tx_pkts_truncated;
1227 	stat->tx_bcast_bytes += smb->tx_bcast_bytes;
1228 	stat->tx_mcast_bytes += smb->tx_mcast_bytes;
1229 
1230 	/* Update counters in ifnet. */
1231 	ifp->if_opackets += smb->tx_frames;
1232 
1233 	ifp->if_collisions += smb->tx_single_colls +
1234 	    smb->tx_multi_colls * 2 + smb->tx_late_colls +
1235 	    smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT;
1236 
1237 	ifp->if_oerrors += smb->tx_late_colls + smb->tx_excess_colls +
1238 	    smb->tx_underrun + smb->tx_pkts_truncated;
1239 
1240 	ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
1241 	    smb->rx_runts + smb->rx_pkts_truncated +
1242 	    smb->rx_fifo_oflows + smb->rx_rrs_errs +
1243 	    smb->rx_alignerrs;
1244 }
1245 
1246 int
1247 ale_intr(void *xsc)
1248 {
1249 	struct ale_softc *sc = xsc;
1250 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1251 	uint32_t status;
1252 
1253 	status = CSR_READ_4(sc, ALE_INTR_STATUS);
1254 	if ((status & ALE_INTRS) == 0)
1255 		return (0);
1256 
1257 	/* Acknowledge and disable interrupts. */
1258 	CSR_WRITE_4(sc, ALE_INTR_STATUS, status | INTR_DIS_INT);
1259 
1260 	if (ifp->if_flags & IFF_RUNNING) {
1261 		int error;
1262 
1263 		error = ale_rxeof(sc);
1264 		if (error) {
1265 			sc->ale_stats.reset_brk_seq++;
1266 			ale_init(ifp);
1267 			return (0);
1268 		}
1269 
1270 		if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) {
1271 			if (status & INTR_DMA_RD_TO_RST)
1272 				printf("%s: DMA read error! -- resetting\n",
1273 				    sc->sc_dev.dv_xname);
1274 			if (status & INTR_DMA_WR_TO_RST)
1275 				printf("%s: DMA write error! -- resetting\n",
1276 				    sc->sc_dev.dv_xname);
1277 			ale_init(ifp);
1278 			return (0);
1279 		}
1280 
1281 		ale_txeof(sc);
1282 		ale_start(ifp);
1283 	}
1284 
1285 	/* Re-enable interrupts. */
1286 	CSR_WRITE_4(sc, ALE_INTR_STATUS, 0x7FFFFFFF);
1287 	return (1);
1288 }
1289 
1290 void
1291 ale_txeof(struct ale_softc *sc)
1292 {
1293 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1294 	struct ale_txdesc *txd;
1295 	uint32_t cons, prod;
1296 	int prog;
1297 
1298 	if (sc->ale_cdata.ale_tx_cnt == 0)
1299 		return;
1300 
1301 	bus_dmamap_sync(sc->sc_dmat, sc->ale_cdata.ale_tx_ring_map, 0,
1302 	    sc->ale_cdata.ale_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1303 	if ((sc->ale_flags & ALE_FLAG_TXCMB_BUG) == 0) {
1304 		bus_dmamap_sync(sc->sc_dmat, sc->ale_cdata.ale_tx_cmb_map, 0,
1305 		    sc->ale_cdata.ale_tx_cmb_map->dm_mapsize,
1306 		    BUS_DMASYNC_POSTREAD);
1307 		prod = *sc->ale_cdata.ale_tx_cmb & TPD_CNT_MASK;
1308 	} else
1309 		prod = CSR_READ_2(sc, ALE_TPD_CONS_IDX);
1310 	cons = sc->ale_cdata.ale_tx_cons;
1311 	/*
1312 	 * Go through our Tx list and free mbufs for those
1313 	 * frames which have been transmitted.
1314 	 */
1315 	for (prog = 0; cons != prod; prog++,
1316 	     ALE_DESC_INC(cons, ALE_TX_RING_CNT)) {
1317 		if (sc->ale_cdata.ale_tx_cnt <= 0)
1318 			break;
1319 		prog++;
1320 		ifq_clr_oactive(&ifp->if_snd);
1321 		sc->ale_cdata.ale_tx_cnt--;
1322 		txd = &sc->ale_cdata.ale_txdesc[cons];
1323 		if (txd->tx_m != NULL) {
1324 			/* Reclaim transmitted mbufs. */
1325 			bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
1326 			    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1327 			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1328 			m_freem(txd->tx_m);
1329 			txd->tx_m = NULL;
1330 		}
1331 	}
1332 
1333 	if (prog > 0) {
1334 		sc->ale_cdata.ale_tx_cons = cons;
1335 		/*
1336 		 * Unarm watchdog timer only when there is no pending
1337 		 * Tx descriptors in queue.
1338 		 */
1339 		if (sc->ale_cdata.ale_tx_cnt == 0)
1340 			ifp->if_timer = 0;
1341 	}
1342 }
1343 
1344 void
1345 ale_rx_update_page(struct ale_softc *sc, struct ale_rx_page **page,
1346     uint32_t length, uint32_t *prod)
1347 {
1348 	struct ale_rx_page *rx_page;
1349 
1350 	rx_page = *page;
1351 	/* Update consumer position. */
1352 	rx_page->cons += roundup(length + sizeof(struct rx_rs),
1353 	    ALE_RX_PAGE_ALIGN);
1354 	if (rx_page->cons >= ALE_RX_PAGE_SZ) {
1355 		/*
1356 		 * End of Rx page reached, let hardware reuse
1357 		 * this page.
1358 		 */
1359 		rx_page->cons = 0;
1360 		*rx_page->cmb_addr = 0;
1361 		bus_dmamap_sync(sc->sc_dmat, rx_page->cmb_map, 0,
1362 		    rx_page->cmb_map->dm_mapsize,
1363 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1364 		CSR_WRITE_1(sc, ALE_RXF0_PAGE0 + sc->ale_cdata.ale_rx_curp,
1365 		    RXF_VALID);
1366 		/* Switch to alternate Rx page. */
1367 		sc->ale_cdata.ale_rx_curp ^= 1;
1368 		rx_page = *page =
1369 		    &sc->ale_cdata.ale_rx_page[sc->ale_cdata.ale_rx_curp];
1370 		/* Page flipped, sync CMB and Rx page. */
1371 		bus_dmamap_sync(sc->sc_dmat, rx_page->page_map, 0,
1372 		    rx_page->page_map->dm_mapsize,
1373 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1374 		bus_dmamap_sync(sc->sc_dmat, rx_page->cmb_map, 0,
1375 		    rx_page->cmb_map->dm_mapsize,
1376 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1377 		/* Sync completed, cache updated producer index. */
1378 		*prod = *rx_page->cmb_addr;
1379 	}
1380 }
1381 
1382 
1383 /*
1384  * It seems that AR81xx controller can compute partial checksum.
1385  * The partial checksum value can be used to accelerate checksum
1386  * computation for fragmented TCP/UDP packets. Upper network stack
1387  * already takes advantage of the partial checksum value in IP
1388  * reassembly stage. But I'm not sure the correctness of the
1389  * partial hardware checksum assistance due to lack of data sheet.
1390  * In addition, the Rx feature of controller that requires copying
1391  * for every frames effectively nullifies one of most nice offload
1392  * capability of controller.
1393  */
1394 void
1395 ale_rxcsum(struct ale_softc *sc, struct mbuf *m, uint32_t status)
1396 {
1397 	struct ip *ip;
1398 	char *p;
1399 
1400 	if ((status & ALE_RD_IPCSUM_NOK) == 0)
1401 		m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1402 
1403 	if ((sc->ale_flags & ALE_FLAG_RXCSUM_BUG) == 0) {
1404 		if (((status & ALE_RD_IPV4_FRAG) == 0) &&
1405 		    ((status & (ALE_RD_TCP | ALE_RD_UDP)) != 0) &&
1406 		    ((status & ALE_RD_TCP_UDPCSUM_NOK) == 0)) {
1407 			m->m_pkthdr.csum_flags |=
1408 			    M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1409 		}
1410 	} else {
1411 		if ((status & (ALE_RD_TCP | ALE_RD_UDP)) != 0 &&
1412 		    (status & ALE_RD_TCP_UDPCSUM_NOK) == 0) {
1413 			p = mtod(m, char *);
1414 			p += ETHER_HDR_LEN;
1415 			if ((status & ALE_RD_802_3) != 0)
1416 				p += LLC_SNAPFRAMELEN;
1417 #if NVLAN > 0
1418 			if (status & ALE_RD_VLAN)
1419 				p += EVL_ENCAPLEN;
1420 #endif
1421 			ip = (struct ip *)p;
1422 			if (ip->ip_off != 0 && (status & ALE_RD_IPV4_DF) == 0)
1423 				return;
1424 			m->m_pkthdr.csum_flags |=
1425 			    M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1426 		}
1427 	}
1428 	/*
1429 	 * Don't mark bad checksum for TCP/UDP frames
1430 	 * as fragmented frames may always have set
1431 	 * bad checksummed bit of frame status.
1432 	 */
1433 }
1434 
1435 /* Process received frames. */
1436 int
1437 ale_rxeof(struct ale_softc *sc)
1438 {
1439 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1440 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1441 	struct ale_rx_page *rx_page;
1442 	struct rx_rs *rs;
1443 	struct mbuf *m;
1444 	uint32_t length, prod, seqno, status;
1445 	int prog;
1446 
1447 	rx_page = &sc->ale_cdata.ale_rx_page[sc->ale_cdata.ale_rx_curp];
1448 	bus_dmamap_sync(sc->sc_dmat, rx_page->cmb_map, 0,
1449 	    rx_page->cmb_map->dm_mapsize,
1450 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1451 	bus_dmamap_sync(sc->sc_dmat, rx_page->page_map, 0,
1452 	    rx_page->page_map->dm_mapsize,
1453 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1454 	/*
1455 	 * Don't directly access producer index as hardware may
1456 	 * update it while Rx handler is in progress. It would
1457 	 * be even better if there is a way to let hardware
1458 	 * know how far driver processed its received frames.
1459 	 * Alternatively, hardware could provide a way to disable
1460 	 * CMB updates until driver acknowledges the end of CMB
1461 	 * access.
1462 	 */
1463 	prod = *rx_page->cmb_addr;
1464 	for (prog = 0; ; prog++) {
1465 		if (rx_page->cons >= prod)
1466 			break;
1467 		rs = (struct rx_rs *)(rx_page->page_addr + rx_page->cons);
1468 		seqno = ALE_RX_SEQNO(letoh32(rs->seqno));
1469 		if (sc->ale_cdata.ale_rx_seqno != seqno) {
1470 			/*
1471 			 * Normally I believe this should not happen unless
1472 			 * severe driver bug or corrupted memory. However
1473 			 * it seems to happen under certain conditions which
1474 			 * is triggered by abrupt Rx events such as initiation
1475 			 * of bulk transfer of remote host. It's not easy to
1476 			 * reproduce this and I doubt it could be related
1477 			 * with FIFO overflow of hardware or activity of Tx
1478 			 * CMB updates. I also remember similar behaviour
1479 			 * seen on Realtek 8139 which uses resembling Rx
1480 			 * scheme.
1481 			 */
1482 			if (aledebug)
1483 				printf("%s: garbled seq: %u, expected: %u -- "
1484 				    "resetting!\n", sc->sc_dev.dv_xname,
1485 				    seqno, sc->ale_cdata.ale_rx_seqno);
1486 			return (EIO);
1487 		}
1488 		/* Frame received. */
1489 		sc->ale_cdata.ale_rx_seqno++;
1490 		length = ALE_RX_BYTES(letoh32(rs->length));
1491 		status = letoh32(rs->flags);
1492 		if (status & ALE_RD_ERROR) {
1493 			/*
1494 			 * We want to pass the following frames to upper
1495 			 * layer regardless of error status of Rx return
1496 			 * status.
1497 			 *
1498 			 *  o IP/TCP/UDP checksum is bad.
1499 			 *  o frame length and protocol specific length
1500 			 *     does not match.
1501 			 */
1502 			if (status & (ALE_RD_CRC | ALE_RD_CODE |
1503 			    ALE_RD_DRIBBLE | ALE_RD_RUNT | ALE_RD_OFLOW |
1504 			    ALE_RD_TRUNC)) {
1505 				ale_rx_update_page(sc, &rx_page, length, &prod);
1506 				continue;
1507 			}
1508 		}
1509 		/*
1510 		 * m_devget(9) is major bottle-neck of ale(4)(It comes
1511 		 * from hardware limitation). For jumbo frames we could
1512 		 * get a slightly better performance if driver use
1513 		 * m_getjcl(9) with proper buffer size argument. However
1514 		 * that would make code more complicated and I don't
1515 		 * think users would expect good Rx performance numbers
1516 		 * on these low-end consumer ethernet controller.
1517 		 */
1518 		m = m_devget((char *)(rs + 1), length - ETHER_CRC_LEN,
1519 		    ETHER_ALIGN);
1520 		if (m == NULL) {
1521 			ifp->if_iqdrops++;
1522 			ale_rx_update_page(sc, &rx_page, length, &prod);
1523 			continue;
1524 		}
1525 		if (status & ALE_RD_IPV4)
1526 			ale_rxcsum(sc, m, status);
1527 #if NVLAN > 0
1528 		if (status & ALE_RD_VLAN) {
1529 			uint32_t vtags = ALE_RX_VLAN(letoh32(rs->vtags));
1530 			m->m_pkthdr.ether_vtag = ALE_RX_VLAN_TAG(vtags);
1531 			m->m_flags |= M_VLANTAG;
1532 		}
1533 #endif
1534 
1535 		ml_enqueue(&ml, m);
1536 
1537 		ale_rx_update_page(sc, &rx_page, length, &prod);
1538 	}
1539 
1540 	if_input(ifp, &ml);
1541 
1542 	return 0;
1543 }
1544 
1545 void
1546 ale_tick(void *xsc)
1547 {
1548 	struct ale_softc *sc = xsc;
1549 	struct mii_data *mii = &sc->sc_miibus;
1550 	int s;
1551 
1552 	s = splnet();
1553 	mii_tick(mii);
1554 	ale_stats_update(sc);
1555 
1556 	timeout_add_sec(&sc->ale_tick_ch, 1);
1557 	splx(s);
1558 }
1559 
1560 void
1561 ale_reset(struct ale_softc *sc)
1562 {
1563 	uint32_t reg;
1564 	int i;
1565 
1566 	/* Initialize PCIe module. From Linux. */
1567 	CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
1568 
1569 	CSR_WRITE_4(sc, ALE_MASTER_CFG, MASTER_RESET);
1570 	for (i = ALE_RESET_TIMEOUT; i > 0; i--) {
1571 		DELAY(10);
1572 		if ((CSR_READ_4(sc, ALE_MASTER_CFG) & MASTER_RESET) == 0)
1573 			break;
1574 	}
1575 	if (i == 0)
1576 		printf("%s: master reset timeout!\n", sc->sc_dev.dv_xname);
1577 
1578 	for (i = ALE_RESET_TIMEOUT; i > 0; i--) {
1579 		if ((reg = CSR_READ_4(sc, ALE_IDLE_STATUS)) == 0)
1580 			break;
1581 		DELAY(10);
1582 	}
1583 
1584 	if (i == 0)
1585 		printf("%s: reset timeout(0x%08x)!\n", sc->sc_dev.dv_xname,
1586 		    reg);
1587 }
1588 
1589 int
1590 ale_init(struct ifnet *ifp)
1591 {
1592 	struct ale_softc *sc = ifp->if_softc;
1593 	struct mii_data *mii;
1594 	uint8_t eaddr[ETHER_ADDR_LEN];
1595 	bus_addr_t paddr;
1596 	uint32_t reg, rxf_hi, rxf_lo;
1597 
1598 	/*
1599 	 * Cancel any pending I/O.
1600 	 */
1601 	ale_stop(sc);
1602 
1603 	/*
1604 	 * Reset the chip to a known state.
1605 	 */
1606 	ale_reset(sc);
1607 
1608 	/* Initialize Tx descriptors, DMA memory blocks. */
1609 	ale_init_rx_pages(sc);
1610 	ale_init_tx_ring(sc);
1611 
1612 	/* Reprogram the station address. */
1613 	bcopy(LLADDR(ifp->if_sadl), eaddr, ETHER_ADDR_LEN);
1614 	CSR_WRITE_4(sc, ALE_PAR0,
1615 	    eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
1616 	CSR_WRITE_4(sc, ALE_PAR1, eaddr[0] << 8 | eaddr[1]);
1617 
1618 	/*
1619 	 * Clear WOL status and disable all WOL feature as WOL
1620 	 * would interfere Rx operation under normal environments.
1621 	 */
1622 	CSR_READ_4(sc, ALE_WOL_CFG);
1623 	CSR_WRITE_4(sc, ALE_WOL_CFG, 0);
1624 
1625 	/*
1626 	 * Set Tx descriptor/RXF0/CMB base addresses. They share
1627 	 * the same high address part of DMAable region.
1628 	 */
1629 	paddr = sc->ale_cdata.ale_tx_ring_paddr;
1630 	CSR_WRITE_4(sc, ALE_TPD_ADDR_HI, ALE_ADDR_HI(paddr));
1631 	CSR_WRITE_4(sc, ALE_TPD_ADDR_LO, ALE_ADDR_LO(paddr));
1632 	CSR_WRITE_4(sc, ALE_TPD_CNT,
1633 	    (ALE_TX_RING_CNT << TPD_CNT_SHIFT) & TPD_CNT_MASK);
1634 
1635 	/* Set Rx page base address, note we use single queue. */
1636 	paddr = sc->ale_cdata.ale_rx_page[0].page_paddr;
1637 	CSR_WRITE_4(sc, ALE_RXF0_PAGE0_ADDR_LO, ALE_ADDR_LO(paddr));
1638 	paddr = sc->ale_cdata.ale_rx_page[1].page_paddr;
1639 	CSR_WRITE_4(sc, ALE_RXF0_PAGE1_ADDR_LO, ALE_ADDR_LO(paddr));
1640 
1641 	/* Set Tx/Rx CMB addresses. */
1642 	paddr = sc->ale_cdata.ale_tx_cmb_paddr;
1643 	CSR_WRITE_4(sc, ALE_TX_CMB_ADDR_LO, ALE_ADDR_LO(paddr));
1644 	paddr = sc->ale_cdata.ale_rx_page[0].cmb_paddr;
1645 	CSR_WRITE_4(sc, ALE_RXF0_CMB0_ADDR_LO, ALE_ADDR_LO(paddr));
1646 	paddr = sc->ale_cdata.ale_rx_page[1].cmb_paddr;
1647 	CSR_WRITE_4(sc, ALE_RXF0_CMB1_ADDR_LO, ALE_ADDR_LO(paddr));
1648 
1649 	/* Mark RXF0 is valid. */
1650 	CSR_WRITE_1(sc, ALE_RXF0_PAGE0, RXF_VALID);
1651 	CSR_WRITE_1(sc, ALE_RXF0_PAGE1, RXF_VALID);
1652 	/*
1653 	 * No need to initialize RFX1/RXF2/RXF3. We don't use
1654 	 * multi-queue yet.
1655 	 */
1656 
1657 	/* Set Rx page size, excluding guard frame size. */
1658 	CSR_WRITE_4(sc, ALE_RXF_PAGE_SIZE, ALE_RX_PAGE_SZ);
1659 
1660 	/* Tell hardware that we're ready to load DMA blocks. */
1661 	CSR_WRITE_4(sc, ALE_DMA_BLOCK, DMA_BLOCK_LOAD);
1662 
1663 	/* Set Rx/Tx interrupt trigger threshold. */
1664 	CSR_WRITE_4(sc, ALE_INT_TRIG_THRESH, (1 << INT_TRIG_RX_THRESH_SHIFT) |
1665 	    (4 << INT_TRIG_TX_THRESH_SHIFT));
1666 	/*
1667 	 * XXX
1668 	 * Set interrupt trigger timer, its purpose and relation
1669 	 * with interrupt moderation mechanism is not clear yet.
1670 	 */
1671 	CSR_WRITE_4(sc, ALE_INT_TRIG_TIMER,
1672 	    ((ALE_USECS(10) << INT_TRIG_RX_TIMER_SHIFT) |
1673 	    (ALE_USECS(1000) << INT_TRIG_TX_TIMER_SHIFT)));
1674 
1675 	/* Configure interrupt moderation timer. */
1676 	sc->ale_int_rx_mod = ALE_IM_RX_TIMER_DEFAULT;
1677 	sc->ale_int_tx_mod = ALE_IM_TX_TIMER_DEFAULT;
1678 	reg = ALE_USECS(sc->ale_int_rx_mod) << IM_TIMER_RX_SHIFT;
1679 	reg |= ALE_USECS(sc->ale_int_tx_mod) << IM_TIMER_TX_SHIFT;
1680 	CSR_WRITE_4(sc, ALE_IM_TIMER, reg);
1681 	reg = CSR_READ_4(sc, ALE_MASTER_CFG);
1682 	reg &= ~(MASTER_CHIP_REV_MASK | MASTER_CHIP_ID_MASK);
1683 	reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB);
1684 	if (ALE_USECS(sc->ale_int_rx_mod) != 0)
1685 		reg |= MASTER_IM_RX_TIMER_ENB;
1686 	if (ALE_USECS(sc->ale_int_tx_mod) != 0)
1687 		reg |= MASTER_IM_TX_TIMER_ENB;
1688 	CSR_WRITE_4(sc, ALE_MASTER_CFG, reg);
1689 	CSR_WRITE_2(sc, ALE_INTR_CLR_TIMER, ALE_USECS(1000));
1690 
1691 	/* Set Maximum frame size of controller. */
1692 	if (ifp->if_mtu < ETHERMTU)
1693 		sc->ale_max_frame_size = ETHERMTU;
1694 	else
1695 		sc->ale_max_frame_size = ifp->if_mtu;
1696 	sc->ale_max_frame_size += ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN;
1697 	CSR_WRITE_4(sc, ALE_FRAME_SIZE, sc->ale_max_frame_size);
1698 
1699 	/* Configure IPG/IFG parameters. */
1700 	CSR_WRITE_4(sc, ALE_IPG_IFG_CFG,
1701 	    ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) |
1702 	    ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
1703 	    ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
1704 	    ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK));
1705 
1706 	/* Set parameters for half-duplex media. */
1707 	CSR_WRITE_4(sc, ALE_HDPX_CFG,
1708 	    ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
1709 	    HDPX_CFG_LCOL_MASK) |
1710 	    ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
1711 	    HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
1712 	    ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
1713 	    HDPX_CFG_ABEBT_MASK) |
1714 	    ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
1715 	    HDPX_CFG_JAMIPG_MASK));
1716 
1717 	/* Configure Tx jumbo frame parameters. */
1718 	if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) {
1719 		if (ifp->if_mtu < ETHERMTU)
1720 			reg = sc->ale_max_frame_size;
1721 		else if (ifp->if_mtu < 6 * 1024)
1722 			reg = (sc->ale_max_frame_size * 2) / 3;
1723 		else
1724 			reg = sc->ale_max_frame_size / 2;
1725 		CSR_WRITE_4(sc, ALE_TX_JUMBO_THRESH,
1726 		    roundup(reg, TX_JUMBO_THRESH_UNIT) >>
1727 		    TX_JUMBO_THRESH_UNIT_SHIFT);
1728 	}
1729 
1730 	/* Configure TxQ. */
1731 	reg = (128 << (sc->ale_dma_rd_burst >> DMA_CFG_RD_BURST_SHIFT))
1732 	    << TXQ_CFG_TX_FIFO_BURST_SHIFT;
1733 	reg |= (TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) &
1734 	    TXQ_CFG_TPD_BURST_MASK;
1735 	CSR_WRITE_4(sc, ALE_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE | TXQ_CFG_ENB);
1736 
1737 	/* Configure Rx jumbo frame & flow control parameters. */
1738 	if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) {
1739 		reg = roundup(sc->ale_max_frame_size, RX_JUMBO_THRESH_UNIT);
1740 		CSR_WRITE_4(sc, ALE_RX_JUMBO_THRESH,
1741 		    (((reg >> RX_JUMBO_THRESH_UNIT_SHIFT) <<
1742 		    RX_JUMBO_THRESH_MASK_SHIFT) & RX_JUMBO_THRESH_MASK) |
1743 		    ((RX_JUMBO_LKAH_DEFAULT << RX_JUMBO_LKAH_SHIFT) &
1744 		    RX_JUMBO_LKAH_MASK));
1745 		reg = CSR_READ_4(sc, ALE_SRAM_RX_FIFO_LEN);
1746 		rxf_hi = (reg * 7) / 10;
1747 		rxf_lo = (reg * 3)/ 10;
1748 		CSR_WRITE_4(sc, ALE_RX_FIFO_PAUSE_THRESH,
1749 		    ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
1750 		    RX_FIFO_PAUSE_THRESH_LO_MASK) |
1751 		    ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
1752 		     RX_FIFO_PAUSE_THRESH_HI_MASK));
1753 	}
1754 
1755 	/* Disable RSS. */
1756 	CSR_WRITE_4(sc, ALE_RSS_IDT_TABLE0, 0);
1757 	CSR_WRITE_4(sc, ALE_RSS_CPU, 0);
1758 
1759 	/* Configure RxQ. */
1760 	CSR_WRITE_4(sc, ALE_RXQ_CFG,
1761 	    RXQ_CFG_ALIGN_32 | RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB);
1762 
1763 	/* Configure DMA parameters. */
1764 	reg = 0;
1765 	if ((sc->ale_flags & ALE_FLAG_TXCMB_BUG) == 0)
1766 		reg |= DMA_CFG_TXCMB_ENB;
1767 	CSR_WRITE_4(sc, ALE_DMA_CFG,
1768 	    DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI | DMA_CFG_RCB_64 |
1769 	    sc->ale_dma_rd_burst | reg |
1770 	    sc->ale_dma_wr_burst | DMA_CFG_RXCMB_ENB |
1771 	    ((DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) &
1772 	    DMA_CFG_RD_DELAY_CNT_MASK) |
1773 	    ((DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
1774 	    DMA_CFG_WR_DELAY_CNT_MASK));
1775 
1776 	/*
1777 	 * Hardware can be configured to issue SMB interrupt based
1778 	 * on programmed interval. Since there is a callout that is
1779 	 * invoked for every hz in driver we use that instead of
1780 	 * relying on periodic SMB interrupt.
1781 	 */
1782 	CSR_WRITE_4(sc, ALE_SMB_STAT_TIMER, ALE_USECS(0));
1783 
1784 	/* Clear MAC statistics. */
1785 	ale_stats_clear(sc);
1786 
1787 	/*
1788 	 * Configure Tx/Rx MACs.
1789 	 *  - Auto-padding for short frames.
1790 	 *  - Enable CRC generation.
1791 	 *  Actual reconfiguration of MAC for resolved speed/duplex
1792 	 *  is followed after detection of link establishment.
1793 	 *  AR81xx always does checksum computation regardless of
1794 	 *  MAC_CFG_RXCSUM_ENB bit. In fact, setting the bit will
1795 	 *  cause Rx handling issue for fragmented IP datagrams due
1796 	 *  to silicon bug.
1797 	 */
1798 	reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
1799 	    ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
1800 	    MAC_CFG_PREAMBLE_MASK);
1801 	if ((sc->ale_flags & ALE_FLAG_FASTETHER) != 0)
1802 		reg |= MAC_CFG_SPEED_10_100;
1803 	else
1804 		reg |= MAC_CFG_SPEED_1000;
1805 	CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
1806 
1807 	/* Set up the receive filter. */
1808 	ale_iff(sc);
1809 
1810 	ale_rxvlan(sc);
1811 
1812 	/* Acknowledge all pending interrupts and clear it. */
1813 	CSR_WRITE_4(sc, ALE_INTR_MASK, ALE_INTRS);
1814 	CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF);
1815 	CSR_WRITE_4(sc, ALE_INTR_STATUS, 0);
1816 
1817 	sc->ale_flags &= ~ALE_FLAG_LINK;
1818 
1819 	/* Switch to the current media. */
1820 	mii = &sc->sc_miibus;
1821 	mii_mediachg(mii);
1822 
1823 	timeout_add_sec(&sc->ale_tick_ch, 1);
1824 
1825 	ifp->if_flags |= IFF_RUNNING;
1826 	ifq_clr_oactive(&ifp->if_snd);
1827 
1828 	return 0;
1829 }
1830 
1831 void
1832 ale_stop(struct ale_softc *sc)
1833 {
1834 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1835 	struct ale_txdesc *txd;
1836 	uint32_t reg;
1837 	int i;
1838 
1839 	/*
1840 	 * Mark the interface down and cancel the watchdog timer.
1841 	 */
1842 	ifp->if_flags &= ~IFF_RUNNING;
1843 	ifq_clr_oactive(&ifp->if_snd);
1844 	ifp->if_timer = 0;
1845 
1846 	timeout_del(&sc->ale_tick_ch);
1847 	sc->ale_flags &= ~ALE_FLAG_LINK;
1848 
1849 	ale_stats_update(sc);
1850 
1851 	/* Disable interrupts. */
1852 	CSR_WRITE_4(sc, ALE_INTR_MASK, 0);
1853 	CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF);
1854 
1855 	/* Disable queue processing and DMA. */
1856 	reg = CSR_READ_4(sc, ALE_TXQ_CFG);
1857 	reg &= ~TXQ_CFG_ENB;
1858 	CSR_WRITE_4(sc, ALE_TXQ_CFG, reg);
1859 	reg = CSR_READ_4(sc, ALE_RXQ_CFG);
1860 	reg &= ~RXQ_CFG_ENB;
1861 	CSR_WRITE_4(sc, ALE_RXQ_CFG, reg);
1862 	reg = CSR_READ_4(sc, ALE_DMA_CFG);
1863 	reg &= ~(DMA_CFG_TXCMB_ENB | DMA_CFG_RXCMB_ENB);
1864 	CSR_WRITE_4(sc, ALE_DMA_CFG, reg);
1865 	DELAY(1000);
1866 
1867 	/* Stop Rx/Tx MACs. */
1868 	ale_stop_mac(sc);
1869 
1870 	/* Disable interrupts again? XXX */
1871 	CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF);
1872 
1873 	/*
1874 	 * Free TX mbufs still in the queues.
1875 	 */
1876 	for (i = 0; i < ALE_TX_RING_CNT; i++) {
1877 		txd = &sc->ale_cdata.ale_txdesc[i];
1878 		if (txd->tx_m != NULL) {
1879 			bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
1880 			    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1881 			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1882 			m_freem(txd->tx_m);
1883 			txd->tx_m = NULL;
1884 		}
1885         }
1886 }
1887 
1888 void
1889 ale_stop_mac(struct ale_softc *sc)
1890 {
1891 	uint32_t reg;
1892 	int i;
1893 
1894 	reg = CSR_READ_4(sc, ALE_MAC_CFG);
1895 	if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
1896 		reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
1897 		CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
1898 	}
1899 
1900 	for (i = ALE_TIMEOUT; i > 0; i--) {
1901 		reg = CSR_READ_4(sc, ALE_IDLE_STATUS);
1902 		if (reg == 0)
1903 			break;
1904 		DELAY(10);
1905 	}
1906 	if (i == 0)
1907 		printf("%s: could not disable Tx/Rx MAC(0x%08x)!\n",
1908 		    sc->sc_dev.dv_xname, reg);
1909 }
1910 
1911 void
1912 ale_init_tx_ring(struct ale_softc *sc)
1913 {
1914 	struct ale_txdesc *txd;
1915 	int i;
1916 
1917 	sc->ale_cdata.ale_tx_prod = 0;
1918 	sc->ale_cdata.ale_tx_cons = 0;
1919 	sc->ale_cdata.ale_tx_cnt = 0;
1920 
1921 	bzero(sc->ale_cdata.ale_tx_ring, ALE_TX_RING_SZ);
1922 	bzero(sc->ale_cdata.ale_tx_cmb, ALE_TX_CMB_SZ);
1923 	for (i = 0; i < ALE_TX_RING_CNT; i++) {
1924 		txd = &sc->ale_cdata.ale_txdesc[i];
1925 		txd->tx_m = NULL;
1926 	}
1927 	*sc->ale_cdata.ale_tx_cmb = 0;
1928 	bus_dmamap_sync(sc->sc_dmat, sc->ale_cdata.ale_tx_cmb_map, 0,
1929 	    sc->ale_cdata.ale_tx_cmb_map->dm_mapsize,
1930 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1931 	bus_dmamap_sync(sc->sc_dmat, sc->ale_cdata.ale_tx_ring_map, 0,
1932 	    sc->ale_cdata.ale_tx_ring_map->dm_mapsize,
1933 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1934 }
1935 
1936 void
1937 ale_init_rx_pages(struct ale_softc *sc)
1938 {
1939 	struct ale_rx_page *rx_page;
1940 	int i;
1941 
1942 	sc->ale_cdata.ale_rx_seqno = 0;
1943 	sc->ale_cdata.ale_rx_curp = 0;
1944 
1945 	for (i = 0; i < ALE_RX_PAGES; i++) {
1946 		rx_page = &sc->ale_cdata.ale_rx_page[i];
1947 		bzero(rx_page->page_addr, sc->ale_pagesize);
1948 		bzero(rx_page->cmb_addr, ALE_RX_CMB_SZ);
1949 		rx_page->cons = 0;
1950 		*rx_page->cmb_addr = 0;
1951 		bus_dmamap_sync(sc->sc_dmat, rx_page->page_map, 0,
1952 		    rx_page->page_map->dm_mapsize,
1953 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1954 		bus_dmamap_sync(sc->sc_dmat, rx_page->cmb_map, 0,
1955 		    rx_page->cmb_map->dm_mapsize,
1956 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1957 	}
1958 }
1959 
1960 void
1961 ale_rxvlan(struct ale_softc *sc)
1962 {
1963 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1964 	uint32_t reg;
1965 
1966 	reg = CSR_READ_4(sc, ALE_MAC_CFG);
1967 	reg &= ~MAC_CFG_VLAN_TAG_STRIP;
1968 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1969 		reg |= MAC_CFG_VLAN_TAG_STRIP;
1970 	CSR_WRITE_4(sc, ALE_MAC_CFG, reg);
1971 }
1972 
1973 void
1974 ale_iff(struct ale_softc *sc)
1975 {
1976 	struct arpcom *ac = &sc->sc_arpcom;
1977 	struct ifnet *ifp = &ac->ac_if;
1978 	struct ether_multi *enm;
1979 	struct ether_multistep step;
1980 	uint32_t crc;
1981 	uint32_t mchash[2];
1982 	uint32_t rxcfg;
1983 
1984 	rxcfg = CSR_READ_4(sc, ALE_MAC_CFG);
1985 	rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
1986 	ifp->if_flags &= ~IFF_ALLMULTI;
1987 
1988 	/*
1989 	 * Always accept broadcast frames.
1990 	 */
1991 	rxcfg |= MAC_CFG_BCAST;
1992 
1993 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1994 		ifp->if_flags |= IFF_ALLMULTI;
1995 		if (ifp->if_flags & IFF_PROMISC)
1996 			rxcfg |= MAC_CFG_PROMISC;
1997 		else
1998 			rxcfg |= MAC_CFG_ALLMULTI;
1999 		mchash[0] = mchash[1] = 0xFFFFFFFF;
2000 	} else {
2001 		/* Program new filter. */
2002 		bzero(mchash, sizeof(mchash));
2003 
2004 		ETHER_FIRST_MULTI(step, ac, enm);
2005 		while (enm != NULL) {
2006 			crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2007 
2008 			mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
2009 
2010 			ETHER_NEXT_MULTI(step, enm);
2011 		}
2012 	}
2013 
2014 	CSR_WRITE_4(sc, ALE_MAR0, mchash[0]);
2015 	CSR_WRITE_4(sc, ALE_MAR1, mchash[1]);
2016 	CSR_WRITE_4(sc, ALE_MAC_CFG, rxcfg);
2017 }
2018