xref: /netbsd-src/sys/dev/pci/if_age.c (revision b757af438b42b93f8c6571f026d8b8ef3eaf5fc9)
1 /*	$NetBSD: if_age.c,v 1.40 2011/10/25 21:47:38 bouyer Exp $ */
2 /*	$OpenBSD: if_age.c,v 1.1 2009/01/16 05:00:34 kevlo Exp $	*/
3 
4 /*-
5  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: if_age.c,v 1.40 2011/10/25 21:47:38 bouyer Exp $");
35 
36 #include "vlan.h"
37 
38 #include <sys/param.h>
39 #include <sys/proc.h>
40 #include <sys/endian.h>
41 #include <sys/systm.h>
42 #include <sys/types.h>
43 #include <sys/sockio.h>
44 #include <sys/mbuf.h>
45 #include <sys/queue.h>
46 #include <sys/kernel.h>
47 #include <sys/device.h>
48 #include <sys/callout.h>
49 #include <sys/socket.h>
50 
51 #include <net/if.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/if_ether.h>
55 
56 #ifdef INET
57 #include <netinet/in.h>
58 #include <netinet/in_systm.h>
59 #include <netinet/in_var.h>
60 #include <netinet/ip.h>
61 #endif
62 
63 #include <net/if_types.h>
64 #include <net/if_vlanvar.h>
65 
66 #include <net/bpf.h>
67 
68 #include <sys/rnd.h>
69 
70 #include <dev/mii/mii.h>
71 #include <dev/mii/miivar.h>
72 
73 #include <dev/pci/pcireg.h>
74 #include <dev/pci/pcivar.h>
75 #include <dev/pci/pcidevs.h>
76 
77 #include <dev/pci/if_agereg.h>
78 
79 static int	age_match(device_t, cfdata_t, void *);
80 static void	age_attach(device_t, device_t, void *);
81 static int	age_detach(device_t, int);
82 
83 static bool	age_resume(device_t, const pmf_qual_t *);
84 
85 static int	age_miibus_readreg(device_t, int, int);
86 static void	age_miibus_writereg(device_t, int, int, int);
87 static void	age_miibus_statchg(device_t);
88 
89 static int	age_init(struct ifnet *);
90 static int	age_ioctl(struct ifnet *, u_long, void *);
91 static void	age_start(struct ifnet *);
92 static void	age_watchdog(struct ifnet *);
93 static bool	age_shutdown(device_t, int);
94 static void	age_mediastatus(struct ifnet *, struct ifmediareq *);
95 static int	age_mediachange(struct ifnet *);
96 
97 static int	age_intr(void *);
98 static int	age_dma_alloc(struct age_softc *);
99 static void	age_dma_free(struct age_softc *);
100 static void	age_get_macaddr(struct age_softc *, uint8_t[]);
101 static void	age_phy_reset(struct age_softc *);
102 
103 static int	age_encap(struct age_softc *, struct mbuf **);
104 static void	age_init_tx_ring(struct age_softc *);
105 static int	age_init_rx_ring(struct age_softc *);
106 static void	age_init_rr_ring(struct age_softc *);
107 static void	age_init_cmb_block(struct age_softc *);
108 static void	age_init_smb_block(struct age_softc *);
109 static int	age_newbuf(struct age_softc *, struct age_rxdesc *, int);
110 static void	age_mac_config(struct age_softc *);
111 static void	age_txintr(struct age_softc *, int);
112 static void	age_rxeof(struct age_softc *sc, struct rx_rdesc *);
113 static void	age_rxintr(struct age_softc *, int);
114 static void	age_tick(void *);
115 static void	age_reset(struct age_softc *);
116 static void	age_stop(struct ifnet *, int);
117 static void	age_stats_update(struct age_softc *);
118 static void	age_stop_txmac(struct age_softc *);
119 static void	age_stop_rxmac(struct age_softc *);
120 static void	age_rxvlan(struct age_softc *sc);
121 static void	age_rxfilter(struct age_softc *);
122 
123 CFATTACH_DECL_NEW(age, sizeof(struct age_softc),
124     age_match, age_attach, age_detach, NULL);
125 
126 int agedebug = 0;
127 #define	DPRINTF(x)	do { if (agedebug) printf x; } while (0)
128 
129 #define ETHER_ALIGN 2
130 #define AGE_CSUM_FEATURES	(M_CSUM_TCPv4 | M_CSUM_UDPv4)
131 
132 static int
133 age_match(device_t dev, cfdata_t match, void *aux)
134 {
135 	struct pci_attach_args *pa = aux;
136 
137 	return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ATTANSIC &&
138 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ATTANSIC_ETHERNET_GIGA);
139 }
140 
141 static void
142 age_attach(device_t parent, device_t self, void *aux)
143 {
144 	struct age_softc *sc = device_private(self);
145 	struct pci_attach_args *pa = aux;
146 	pci_intr_handle_t ih;
147 	const char *intrstr;
148 	struct ifnet *ifp = &sc->sc_ec.ec_if;
149 	pcireg_t memtype;
150 	int error = 0;
151 
152 	aprint_naive("\n");
153 	aprint_normal(": Attansic/Atheros L1 Gigabit Ethernet\n");
154 
155 	sc->sc_dev = self;
156 	sc->sc_dmat = pa->pa_dmat;
157 	sc->sc_pct = pa->pa_pc;
158 	sc->sc_pcitag = pa->pa_tag;
159 
160 	/*
161 	 * Allocate IO memory
162 	 */
163 	memtype = pci_mapreg_type(sc->sc_pct, sc->sc_pcitag, AGE_PCIR_BAR);
164 	switch (memtype) {
165         case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
166         case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M:
167         case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
168 		break;
169         default:
170 		aprint_error_dev(self, "invalid base address register\n");
171 		break;
172 	}
173 
174 	if (pci_mapreg_map(pa, AGE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
175 	    &sc->sc_mem_bh, NULL, &sc->sc_mem_size) != 0) {
176 		aprint_error_dev(self, "could not map mem space\n");
177 		return;
178 	}
179 
180 	if (pci_intr_map(pa, &ih) != 0) {
181 		aprint_error_dev(self, "could not map interrupt\n");
182 		goto fail;
183 	}
184 
185 	/*
186 	 * Allocate IRQ
187 	 */
188 	intrstr = pci_intr_string(sc->sc_pct, ih);
189 	sc->sc_irq_handle = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
190 	    age_intr, sc);
191 	if (sc->sc_irq_handle == NULL) {
192 		aprint_error_dev(self, "could not establish interrupt");
193 		if (intrstr != NULL)
194 			aprint_error(" at %s", intrstr);
195 		aprint_error("\n");
196 		goto fail;
197 	}
198 	aprint_normal_dev(self, "%s\n", intrstr);
199 
200 	/* Set PHY address. */
201 	sc->age_phyaddr = AGE_PHY_ADDR;
202 
203 	/* Reset PHY. */
204 	age_phy_reset(sc);
205 
206 	/* Reset the ethernet controller. */
207 	age_reset(sc);
208 
209 	/* Get PCI and chip id/revision. */
210 	sc->age_rev = PCI_REVISION(pa->pa_class);
211 	sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >>
212 	    MASTER_CHIP_REV_SHIFT;
213 
214 	aprint_debug_dev(self, "PCI device revision : 0x%04x\n", sc->age_rev);
215 	aprint_debug_dev(self, "Chip id/revision : 0x%04x\n", sc->age_chip_rev);
216 
217 	if (agedebug) {
218 		aprint_debug_dev(self, "%d Tx FIFO, %d Rx FIFO\n",
219 		    CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN),
220 		    CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN));
221 	}
222 
223 	/* Set max allowable DMA size. */
224 	sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128;
225 	sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128;
226 
227 	/* Allocate DMA stuffs */
228 	error = age_dma_alloc(sc);
229 	if (error)
230 		goto fail;
231 
232 	callout_init(&sc->sc_tick_ch, 0);
233 	callout_setfunc(&sc->sc_tick_ch, age_tick, sc);
234 
235 	/* Load station address. */
236 	age_get_macaddr(sc, sc->sc_enaddr);
237 
238 	aprint_normal_dev(self, "Ethernet address %s\n",
239 	    ether_sprintf(sc->sc_enaddr));
240 
241 	ifp->if_softc = sc;
242 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
243 	ifp->if_init = age_init;
244 	ifp->if_ioctl = age_ioctl;
245 	ifp->if_start = age_start;
246 	ifp->if_stop = age_stop;
247 	ifp->if_watchdog = age_watchdog;
248 	ifp->if_baudrate = IF_Gbps(1);
249 	IFQ_SET_MAXLEN(&ifp->if_snd, AGE_TX_RING_CNT - 1);
250 	IFQ_SET_READY(&ifp->if_snd);
251 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
252 
253 	sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU;
254 
255 	ifp->if_capabilities |= IFCAP_CSUM_IPv4_Rx |
256 				IFCAP_CSUM_TCPv4_Rx |
257 				IFCAP_CSUM_UDPv4_Rx;
258 #ifdef AGE_CHECKSUM
259 	ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx |
260 				IFCAP_CSUM_TCPv4_Tx |
261 				IFCAP_CSUM_UDPv4_Tx;
262 #endif
263 
264 #if NVLAN > 0
265 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
266 #endif
267 
268 	/* Set up MII bus. */
269 	sc->sc_miibus.mii_ifp = ifp;
270 	sc->sc_miibus.mii_readreg = age_miibus_readreg;
271 	sc->sc_miibus.mii_writereg = age_miibus_writereg;
272 	sc->sc_miibus.mii_statchg = age_miibus_statchg;
273 
274 	sc->sc_ec.ec_mii = &sc->sc_miibus;
275 	ifmedia_init(&sc->sc_miibus.mii_media, 0, age_mediachange,
276 	    age_mediastatus);
277 	mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
278 	   MII_OFFSET_ANY, MIIF_DOPAUSE);
279 
280 	if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
281 		aprint_error_dev(self, "no PHY found!\n");
282 		ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
283 		    0, NULL);
284 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
285 	} else
286 		ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
287 
288 	if_attach(ifp);
289 	ether_ifattach(ifp, sc->sc_enaddr);
290 
291 	if (pmf_device_register1(self, NULL, age_resume, age_shutdown))
292 		pmf_class_network_register(self, ifp);
293 	else
294 		aprint_error_dev(self, "couldn't establish power handler\n");
295 
296 	return;
297 
298 fail:
299 	age_dma_free(sc);
300 	if (sc->sc_irq_handle != NULL) {
301 		pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
302 		sc->sc_irq_handle = NULL;
303 	}
304 	if (sc->sc_mem_size) {
305 		bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
306 		sc->sc_mem_size = 0;
307 	}
308 }
309 
310 static int
311 age_detach(device_t self, int flags)
312 {
313 	struct age_softc *sc = device_private(self);
314 	struct ifnet *ifp = &sc->sc_ec.ec_if;
315 	int s;
316 
317 	pmf_device_deregister(self);
318 	s = splnet();
319 	age_stop(ifp, 0);
320 	splx(s);
321 
322 	mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
323 
324 	/* Delete all remaining media. */
325 	ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
326 
327 	ether_ifdetach(ifp);
328 	if_detach(ifp);
329 	age_dma_free(sc);
330 
331 	if (sc->sc_irq_handle != NULL) {
332 		pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
333 		sc->sc_irq_handle = NULL;
334 	}
335 	if (sc->sc_mem_size) {
336 		bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
337 		sc->sc_mem_size = 0;
338 	}
339 	return 0;
340 }
341 
342 /*
343  *	Read a PHY register on the MII of the L1.
344  */
345 static int
346 age_miibus_readreg(device_t dev, int phy, int reg)
347 {
348 	struct age_softc *sc = device_private(dev);
349 	uint32_t v;
350 	int i;
351 
352 	if (phy != sc->age_phyaddr)
353 		return 0;
354 
355 	CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
356 	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
357 	for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
358 		DELAY(1);
359 		v = CSR_READ_4(sc, AGE_MDIO);
360 		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
361 			break;
362 	}
363 
364 	if (i == 0) {
365 		printf("%s: phy read timeout: phy %d, reg %d\n",
366 			device_xname(sc->sc_dev), phy, reg);
367 		return 0;
368 	}
369 
370 	return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
371 }
372 
373 /*
374  * 	Write a PHY register on the MII of the L1.
375  */
376 static void
377 age_miibus_writereg(device_t dev, int phy, int reg, int val)
378 {
379 	struct age_softc *sc = device_private(dev);
380 	uint32_t v;
381 	int i;
382 
383 	if (phy != sc->age_phyaddr)
384 		return;
385 
386 	CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
387 	    (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
388 	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
389 
390 	for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
391 		DELAY(1);
392 		v = CSR_READ_4(sc, AGE_MDIO);
393 		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
394 			break;
395 	}
396 
397 	if (i == 0) {
398 		printf("%s: phy write timeout: phy %d, reg %d\n",
399 		    device_xname(sc->sc_dev), phy, reg);
400 	}
401 }
402 
403 /*
404  *	Callback from MII layer when media changes.
405  */
406 static void
407 age_miibus_statchg(device_t dev)
408 {
409 	struct age_softc *sc = device_private(dev);
410 	struct ifnet *ifp = &sc->sc_ec.ec_if;
411 	struct mii_data *mii;
412 
413 	if ((ifp->if_flags & IFF_RUNNING) == 0)
414 		return;
415 
416 	mii = &sc->sc_miibus;
417 
418 	sc->age_flags &= ~AGE_FLAG_LINK;
419 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
420 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
421 		case IFM_10_T:
422 		case IFM_100_TX:
423 		case IFM_1000_T:
424 			sc->age_flags |= AGE_FLAG_LINK;
425 			break;
426 		default:
427 			break;
428 		}
429 	}
430 
431 	/* Stop Rx/Tx MACs. */
432 	age_stop_rxmac(sc);
433 	age_stop_txmac(sc);
434 
435 	/* Program MACs with resolved speed/duplex/flow-control. */
436 	if ((sc->age_flags & AGE_FLAG_LINK) != 0) {
437 		uint32_t reg;
438 
439 		age_mac_config(sc);
440 		reg = CSR_READ_4(sc, AGE_MAC_CFG);
441 		/* Restart DMA engine and Tx/Rx MAC. */
442 		CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) |
443 		    DMA_CFG_RD_ENB | DMA_CFG_WR_ENB);
444 		reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
445 		CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
446 	}
447 }
448 
449 /*
450  *	Get the current interface media status.
451  */
452 static void
453 age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
454 {
455 	struct age_softc *sc = ifp->if_softc;
456 	struct mii_data *mii = &sc->sc_miibus;
457 
458 	mii_pollstat(mii);
459 	ifmr->ifm_status = mii->mii_media_status;
460 	ifmr->ifm_active = mii->mii_media_active;
461 }
462 
463 /*
464  *	Set hardware to newly-selected media.
465  */
466 static int
467 age_mediachange(struct ifnet *ifp)
468 {
469 	struct age_softc *sc = ifp->if_softc;
470 	struct mii_data *mii = &sc->sc_miibus;
471 	int error;
472 
473 	if (mii->mii_instance != 0) {
474 		struct mii_softc *miisc;
475 
476 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
477 			mii_phy_reset(miisc);
478 	}
479 	error = mii_mediachg(mii);
480 
481 	return error;
482 }
483 
484 static int
485 age_intr(void *arg)
486 {
487         struct age_softc *sc = arg;
488         struct ifnet *ifp = &sc->sc_ec.ec_if;
489 	struct cmb *cmb;
490         uint32_t status;
491 
492 	status = CSR_READ_4(sc, AGE_INTR_STATUS);
493 	if (status == 0 || (status & AGE_INTRS) == 0)
494 		return 0;
495 
496 	cmb = sc->age_rdata.age_cmb_block;
497 	if (cmb == NULL) {
498 		/* Happens when bringing up the interface
499 		 * w/o having a carrier. Ack the interrupt.
500 		 */
501 		CSR_WRITE_4(sc, AGE_INTR_STATUS, status);
502 		return 0;
503 	}
504 
505 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
506 	    sc->age_cdata.age_cmb_block_map->dm_mapsize,
507 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
508 	status = le32toh(cmb->intr_status);
509 	/* ACK/reenable interrupts */
510 	CSR_WRITE_4(sc, AGE_INTR_STATUS, status);
511 	while ((status & AGE_INTRS) != 0) {
512 		sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >>
513 		    TPD_CONS_SHIFT;
514 		sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >>
515 		    RRD_PROD_SHIFT;
516 
517 		/* Let hardware know CMB was served. */
518 		cmb->intr_status = 0;
519 		bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
520 		    sc->age_cdata.age_cmb_block_map->dm_mapsize,
521 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
522 
523 		if (ifp->if_flags & IFF_RUNNING) {
524 			if (status & INTR_CMB_RX)
525 				age_rxintr(sc, sc->age_rr_prod);
526 
527 			if (status & INTR_CMB_TX)
528 				age_txintr(sc, sc->age_tpd_cons);
529 
530 			if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) {
531 				if (status & INTR_DMA_RD_TO_RST)
532 					printf("%s: DMA read error! -- "
533 					    "resetting\n",
534 					    device_xname(sc->sc_dev));
535 				if (status & INTR_DMA_WR_TO_RST)
536 					printf("%s: DMA write error! -- "
537 					    "resetting\n",
538 					    device_xname(sc->sc_dev));
539 				age_init(ifp);
540 			}
541 
542 			age_start(ifp);
543 
544 			if (status & INTR_SMB)
545 				age_stats_update(sc);
546 		}
547 		/* check if more interrupts did came in */
548 		bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
549 		    sc->age_cdata.age_cmb_block_map->dm_mapsize,
550 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
551 		status = le32toh(cmb->intr_status);
552 	}
553 
554 	return 1;
555 }
556 
557 static void
558 age_get_macaddr(struct age_softc *sc, uint8_t eaddr[])
559 {
560 	uint32_t ea[2], reg;
561 	int i, vpdc;
562 
563 	reg = CSR_READ_4(sc, AGE_SPI_CTRL);
564 	if ((reg & SPI_VPD_ENB) != 0) {
565 		/* Get VPD stored in TWSI EEPROM. */
566 		reg &= ~SPI_VPD_ENB;
567 		CSR_WRITE_4(sc, AGE_SPI_CTRL, reg);
568 	}
569 
570 	if (pci_get_capability(sc->sc_pct, sc->sc_pcitag,
571 	    PCI_CAP_VPD, &vpdc, NULL)) {
572 		/*
573 		 * PCI VPD capability found, let TWSI reload EEPROM.
574 		 * This will set Ethernet address of controller.
575 		 */
576 		CSR_WRITE_4(sc, AGE_TWSI_CTRL, CSR_READ_4(sc, AGE_TWSI_CTRL) |
577 		    TWSI_CTRL_SW_LD_START);
578 		for (i = 100; i > 0; i++) {
579 			DELAY(1000);
580 			reg = CSR_READ_4(sc, AGE_TWSI_CTRL);
581 			if ((reg & TWSI_CTRL_SW_LD_START) == 0)
582 				break;
583 		}
584 		if (i == 0)
585 			printf("%s: reloading EEPROM timeout!\n",
586 			    device_xname(sc->sc_dev));
587 	} else {
588 		if (agedebug)
589 			printf("%s: PCI VPD capability not found!\n",
590 			    device_xname(sc->sc_dev));
591 	}
592 
593 	ea[0] = CSR_READ_4(sc, AGE_PAR0);
594 	ea[1] = CSR_READ_4(sc, AGE_PAR1);
595 
596 	eaddr[0] = (ea[1] >> 8) & 0xFF;
597 	eaddr[1] = (ea[1] >> 0) & 0xFF;
598 	eaddr[2] = (ea[0] >> 24) & 0xFF;
599 	eaddr[3] = (ea[0] >> 16) & 0xFF;
600 	eaddr[4] = (ea[0] >> 8) & 0xFF;
601 	eaddr[5] = (ea[0] >> 0) & 0xFF;
602 }
603 
604 static void
605 age_phy_reset(struct age_softc *sc)
606 {
607 	uint16_t reg, pn;
608 	int i, linkup;
609 
610 	/* Reset PHY. */
611 	CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST);
612 	DELAY(2000);
613 	CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR);
614 	DELAY(2000);
615 
616 #define ATPHY_DBG_ADDR		0x1D
617 #define ATPHY_DBG_DATA		0x1E
618 #define ATPHY_CDTC		0x16
619 #define PHY_CDTC_ENB		0x0001
620 #define PHY_CDTC_POFF		8
621 #define ATPHY_CDTS		0x1C
622 #define PHY_CDTS_STAT_OK	0x0000
623 #define PHY_CDTS_STAT_SHORT	0x0100
624 #define PHY_CDTS_STAT_OPEN	0x0200
625 #define PHY_CDTS_STAT_INVAL	0x0300
626 #define PHY_CDTS_STAT_MASK	0x0300
627 
628 	/* Check power saving mode. Magic from Linux. */
629 	age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET);
630 	for (linkup = 0, pn = 0; pn < 4; pn++) {
631 		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, ATPHY_CDTC,
632 		    (pn << PHY_CDTC_POFF) | PHY_CDTC_ENB);
633 		for (i = 200; i > 0; i--) {
634 			DELAY(1000);
635 			reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr,
636 			    ATPHY_CDTC);
637 			if ((reg & PHY_CDTC_ENB) == 0)
638 				break;
639 		}
640 		DELAY(1000);
641 		reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr,
642 		    ATPHY_CDTS);
643 		if ((reg & PHY_CDTS_STAT_MASK) != PHY_CDTS_STAT_OPEN) {
644 			linkup++;
645 			break;
646 		}
647 	}
648 	age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR,
649 	    BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
650 	if (linkup == 0) {
651 		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
652 		    ATPHY_DBG_ADDR, 0);
653 		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
654 		    ATPHY_DBG_DATA, 0x124E);
655 		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
656 		    ATPHY_DBG_ADDR, 1);
657 		reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr,
658 		    ATPHY_DBG_DATA);
659 		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
660 		    ATPHY_DBG_DATA, reg | 0x03);
661 		/* XXX */
662 		DELAY(1500 * 1000);
663 		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
664 		    ATPHY_DBG_ADDR, 0);
665 		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
666 		    ATPHY_DBG_DATA, 0x024E);
667 	}
668 
669 #undef ATPHY_DBG_ADDR
670 #undef ATPHY_DBG_DATA
671 #undef ATPHY_CDTC
672 #undef PHY_CDTC_ENB
673 #undef PHY_CDTC_POFF
674 #undef ATPHY_CDTS
675 #undef PHY_CDTS_STAT_OK
676 #undef PHY_CDTS_STAT_SHORT
677 #undef PHY_CDTS_STAT_OPEN
678 #undef PHY_CDTS_STAT_INVAL
679 #undef PHY_CDTS_STAT_MASK
680 }
681 
682 static int
683 age_dma_alloc(struct age_softc *sc)
684 {
685 	struct age_txdesc *txd;
686 	struct age_rxdesc *rxd;
687 	int nsegs, error, i;
688 
689 	/*
690 	 * Create DMA stuffs for TX ring
691 	 */
692 	error = bus_dmamap_create(sc->sc_dmat, AGE_TX_RING_SZ, 1,
693 	    AGE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_tx_ring_map);
694 	if (error) {
695 		sc->age_cdata.age_tx_ring_map = NULL;
696 		return ENOBUFS;
697 	}
698 
699 	/* Allocate DMA'able memory for TX ring */
700 	error = bus_dmamem_alloc(sc->sc_dmat, AGE_TX_RING_SZ,
701 	    ETHER_ALIGN, 0, &sc->age_rdata.age_tx_ring_seg, 1,
702 	    &nsegs, BUS_DMA_NOWAIT);
703 	if (error) {
704 		printf("%s: could not allocate DMA'able memory for Tx ring, "
705 		    "error = %i\n", device_xname(sc->sc_dev), error);
706 		return error;
707 	}
708 
709 	error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_tx_ring_seg,
710 	    nsegs, AGE_TX_RING_SZ, (void **)&sc->age_rdata.age_tx_ring,
711 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
712 	if (error)
713 		return ENOBUFS;
714 
715 	memset(sc->age_rdata.age_tx_ring, 0, AGE_TX_RING_SZ);
716 
717 	/*  Load the DMA map for Tx ring. */
718 	error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_tx_ring_map,
719 	    sc->age_rdata.age_tx_ring, AGE_TX_RING_SZ, NULL, BUS_DMA_NOWAIT);
720 	if (error) {
721 		printf("%s: could not load DMA'able memory for Tx ring, "
722 		    "error = %i\n", device_xname(sc->sc_dev), error);
723 		bus_dmamem_free(sc->sc_dmat,
724 		    &sc->age_rdata.age_tx_ring_seg, 1);
725 		return error;
726 	}
727 
728 	sc->age_rdata.age_tx_ring_paddr =
729 	    sc->age_cdata.age_tx_ring_map->dm_segs[0].ds_addr;
730 
731 	/*
732 	 * Create DMA stuffs for RX ring
733 	 */
734 	error = bus_dmamap_create(sc->sc_dmat, AGE_RX_RING_SZ, 1,
735 	    AGE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_ring_map);
736 	if (error) {
737 		sc->age_cdata.age_rx_ring_map = NULL;
738 		return ENOBUFS;
739 	}
740 
741 	/* Allocate DMA'able memory for RX ring */
742 	error = bus_dmamem_alloc(sc->sc_dmat, AGE_RX_RING_SZ,
743 	    ETHER_ALIGN, 0, &sc->age_rdata.age_rx_ring_seg, 1,
744 	    &nsegs, BUS_DMA_NOWAIT);
745 	if (error) {
746 		printf("%s: could not allocate DMA'able memory for Rx ring, "
747 		    "error = %i.\n", device_xname(sc->sc_dev), error);
748 		return error;
749 	}
750 
751 	error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rx_ring_seg,
752 	    nsegs, AGE_RX_RING_SZ, (void **)&sc->age_rdata.age_rx_ring,
753 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
754 	if (error)
755 		return ENOBUFS;
756 
757 	memset(sc->age_rdata.age_rx_ring, 0, AGE_RX_RING_SZ);
758 
759 	/* Load the DMA map for Rx ring. */
760 	error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rx_ring_map,
761 	    sc->age_rdata.age_rx_ring, AGE_RX_RING_SZ, NULL, BUS_DMA_NOWAIT);
762 	if (error) {
763 		printf("%s: could not load DMA'able memory for Rx ring, "
764 		    "error = %i.\n", device_xname(sc->sc_dev), error);
765 		bus_dmamem_free(sc->sc_dmat,
766 		    &sc->age_rdata.age_rx_ring_seg, 1);
767 		return error;
768 	}
769 
770 	sc->age_rdata.age_rx_ring_paddr =
771 	    sc->age_cdata.age_rx_ring_map->dm_segs[0].ds_addr;
772 
773 	/*
774 	 * Create DMA stuffs for RX return ring
775 	 */
776 	error = bus_dmamap_create(sc->sc_dmat, AGE_RR_RING_SZ, 1,
777 	    AGE_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rr_ring_map);
778 	if (error) {
779 		sc->age_cdata.age_rr_ring_map = NULL;
780 		return ENOBUFS;
781 	}
782 
783 	/* Allocate DMA'able memory for RX return ring */
784 	error = bus_dmamem_alloc(sc->sc_dmat, AGE_RR_RING_SZ,
785 	    ETHER_ALIGN, 0, &sc->age_rdata.age_rr_ring_seg, 1,
786 	    &nsegs, BUS_DMA_NOWAIT);
787 	if (error) {
788 		printf("%s: could not allocate DMA'able memory for Rx "
789 		    "return ring, error = %i.\n",
790 		    device_xname(sc->sc_dev), error);
791 		return error;
792 	}
793 
794 	error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rr_ring_seg,
795 	    nsegs, AGE_RR_RING_SZ, (void **)&sc->age_rdata.age_rr_ring,
796 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
797 	if (error)
798 		return ENOBUFS;
799 
800 	memset(sc->age_rdata.age_rr_ring, 0, AGE_RR_RING_SZ);
801 
802 	/*  Load the DMA map for Rx return ring. */
803 	error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rr_ring_map,
804 	    sc->age_rdata.age_rr_ring, AGE_RR_RING_SZ, NULL, BUS_DMA_NOWAIT);
805 	if (error) {
806 		printf("%s: could not load DMA'able memory for Rx return ring, "
807 		    "error = %i\n", device_xname(sc->sc_dev), error);
808 		bus_dmamem_free(sc->sc_dmat,
809 		    &sc->age_rdata.age_rr_ring_seg, 1);
810 		return error;
811 	}
812 
813 	sc->age_rdata.age_rr_ring_paddr =
814 	    sc->age_cdata.age_rr_ring_map->dm_segs[0].ds_addr;
815 
816 	/*
817 	 * Create DMA stuffs for CMB block
818 	 */
819 	error = bus_dmamap_create(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 1,
820 	    AGE_CMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT,
821 	    &sc->age_cdata.age_cmb_block_map);
822 	if (error) {
823 		sc->age_cdata.age_cmb_block_map = NULL;
824 		return ENOBUFS;
825 	}
826 
827 	/* Allocate DMA'able memory for CMB block */
828 	error = bus_dmamem_alloc(sc->sc_dmat, AGE_CMB_BLOCK_SZ,
829 	    ETHER_ALIGN, 0, &sc->age_rdata.age_cmb_block_seg, 1,
830 	    &nsegs, BUS_DMA_NOWAIT);
831 	if (error) {
832 		printf("%s: could not allocate DMA'able memory for "
833 		    "CMB block, error = %i\n", device_xname(sc->sc_dev), error);
834 		return error;
835 	}
836 
837 	error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_cmb_block_seg,
838 	    nsegs, AGE_CMB_BLOCK_SZ, (void **)&sc->age_rdata.age_cmb_block,
839 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
840 	if (error)
841 		return ENOBUFS;
842 
843 	memset(sc->age_rdata.age_cmb_block, 0, AGE_CMB_BLOCK_SZ);
844 
845 	/*  Load the DMA map for CMB block. */
846 	error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_cmb_block_map,
847 	    sc->age_rdata.age_cmb_block, AGE_CMB_BLOCK_SZ, NULL,
848 	    BUS_DMA_NOWAIT);
849 	if (error) {
850 		printf("%s: could not load DMA'able memory for CMB block, "
851 		    "error = %i\n", device_xname(sc->sc_dev), error);
852 		bus_dmamem_free(sc->sc_dmat,
853 		    &sc->age_rdata.age_cmb_block_seg, 1);
854 		return error;
855 	}
856 
857 	sc->age_rdata.age_cmb_block_paddr =
858 	    sc->age_cdata.age_cmb_block_map->dm_segs[0].ds_addr;
859 
860 	/*
861 	 * Create DMA stuffs for SMB block
862 	 */
863 	error = bus_dmamap_create(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 1,
864 	    AGE_SMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT,
865 	    &sc->age_cdata.age_smb_block_map);
866 	if (error) {
867 		sc->age_cdata.age_smb_block_map = NULL;
868 		return ENOBUFS;
869 	}
870 
871 	/* Allocate DMA'able memory for SMB block */
872 	error = bus_dmamem_alloc(sc->sc_dmat, AGE_SMB_BLOCK_SZ,
873 	    ETHER_ALIGN, 0, &sc->age_rdata.age_smb_block_seg, 1,
874 	    &nsegs, BUS_DMA_NOWAIT);
875 	if (error) {
876 		printf("%s: could not allocate DMA'able memory for "
877 		    "SMB block, error = %i\n", device_xname(sc->sc_dev), error);
878 		return error;
879 	}
880 
881 	error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_smb_block_seg,
882 	    nsegs, AGE_SMB_BLOCK_SZ, (void **)&sc->age_rdata.age_smb_block,
883 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
884 	if (error)
885 		return ENOBUFS;
886 
887 	memset(sc->age_rdata.age_smb_block, 0, AGE_SMB_BLOCK_SZ);
888 
889 	/*  Load the DMA map for SMB block */
890 	error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_smb_block_map,
891 	    sc->age_rdata.age_smb_block, AGE_SMB_BLOCK_SZ, NULL,
892 	    BUS_DMA_NOWAIT);
893 	if (error) {
894 		printf("%s: could not load DMA'able memory for SMB block, "
895 		    "error = %i\n", device_xname(sc->sc_dev), error);
896 		bus_dmamem_free(sc->sc_dmat,
897 		    &sc->age_rdata.age_smb_block_seg, 1);
898 		return error;
899 	}
900 
901 	sc->age_rdata.age_smb_block_paddr =
902 	    sc->age_cdata.age_smb_block_map->dm_segs[0].ds_addr;
903 
904 	/* Create DMA maps for Tx buffers. */
905 	for (i = 0; i < AGE_TX_RING_CNT; i++) {
906 		txd = &sc->age_cdata.age_txdesc[i];
907 		txd->tx_m = NULL;
908 		txd->tx_dmamap = NULL;
909 		error = bus_dmamap_create(sc->sc_dmat, AGE_TSO_MAXSIZE,
910 		    AGE_MAXTXSEGS, AGE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
911 		    &txd->tx_dmamap);
912 		if (error) {
913 			txd->tx_dmamap = NULL;
914 			printf("%s: could not create Tx dmamap, error = %i.\n",
915 			    device_xname(sc->sc_dev), error);
916 			return error;
917 		}
918 	}
919 
920 	/* Create DMA maps for Rx buffers. */
921 	error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
922 	    BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_sparemap);
923 	if (error) {
924 		sc->age_cdata.age_rx_sparemap = NULL;
925 		printf("%s: could not create spare Rx dmamap, error = %i.\n",
926 		    device_xname(sc->sc_dev), error);
927 		return error;
928 	}
929 	for (i = 0; i < AGE_RX_RING_CNT; i++) {
930 		rxd = &sc->age_cdata.age_rxdesc[i];
931 		rxd->rx_m = NULL;
932 		rxd->rx_dmamap = NULL;
933 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
934 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
935 		if (error) {
936 			rxd->rx_dmamap = NULL;
937 			printf("%s: could not create Rx dmamap, error = %i.\n",
938 			    device_xname(sc->sc_dev), error);
939 			return error;
940 		}
941 	}
942 
943 	return 0;
944 }
945 
946 static void
947 age_dma_free(struct age_softc *sc)
948 {
949 	struct age_txdesc *txd;
950 	struct age_rxdesc *rxd;
951 	int i;
952 
953 	/* Tx buffers */
954 	for (i = 0; i < AGE_TX_RING_CNT; i++) {
955 		txd = &sc->age_cdata.age_txdesc[i];
956 		if (txd->tx_dmamap != NULL) {
957 			bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
958 			txd->tx_dmamap = NULL;
959 		}
960 	}
961 	/* Rx buffers */
962 	for (i = 0; i < AGE_RX_RING_CNT; i++) {
963 		rxd = &sc->age_cdata.age_rxdesc[i];
964 		if (rxd->rx_dmamap != NULL) {
965 			bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
966 			rxd->rx_dmamap = NULL;
967 		}
968 	}
969 	if (sc->age_cdata.age_rx_sparemap != NULL) {
970 		bus_dmamap_destroy(sc->sc_dmat, sc->age_cdata.age_rx_sparemap);
971 		sc->age_cdata.age_rx_sparemap = NULL;
972 	}
973 
974 	/* Tx ring. */
975 	if (sc->age_cdata.age_tx_ring_map != NULL)
976 		bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_tx_ring_map);
977 	if (sc->age_cdata.age_tx_ring_map != NULL &&
978 	    sc->age_rdata.age_tx_ring != NULL)
979 		bus_dmamem_free(sc->sc_dmat,
980 		    &sc->age_rdata.age_tx_ring_seg, 1);
981 	sc->age_rdata.age_tx_ring = NULL;
982 	sc->age_cdata.age_tx_ring_map = NULL;
983 
984 	/* Rx ring. */
985 	if (sc->age_cdata.age_rx_ring_map != NULL)
986 		bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rx_ring_map);
987 	if (sc->age_cdata.age_rx_ring_map != NULL &&
988 	    sc->age_rdata.age_rx_ring != NULL)
989 		bus_dmamem_free(sc->sc_dmat,
990 		    &sc->age_rdata.age_rx_ring_seg, 1);
991 	sc->age_rdata.age_rx_ring = NULL;
992 	sc->age_cdata.age_rx_ring_map = NULL;
993 
994 	/* Rx return ring. */
995 	if (sc->age_cdata.age_rr_ring_map != NULL)
996 		bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rr_ring_map);
997 	if (sc->age_cdata.age_rr_ring_map != NULL &&
998 	    sc->age_rdata.age_rr_ring != NULL)
999 		bus_dmamem_free(sc->sc_dmat,
1000 		    &sc->age_rdata.age_rr_ring_seg, 1);
1001 	sc->age_rdata.age_rr_ring = NULL;
1002 	sc->age_cdata.age_rr_ring_map = NULL;
1003 
1004 	/* CMB block */
1005 	if (sc->age_cdata.age_cmb_block_map != NULL)
1006 		bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_cmb_block_map);
1007 	if (sc->age_cdata.age_cmb_block_map != NULL &&
1008 	    sc->age_rdata.age_cmb_block != NULL)
1009 		bus_dmamem_free(sc->sc_dmat,
1010 		    &sc->age_rdata.age_cmb_block_seg, 1);
1011 	sc->age_rdata.age_cmb_block = NULL;
1012 	sc->age_cdata.age_cmb_block_map = NULL;
1013 
1014 	/* SMB block */
1015 	if (sc->age_cdata.age_smb_block_map != NULL)
1016 		bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_smb_block_map);
1017 	if (sc->age_cdata.age_smb_block_map != NULL &&
1018 	    sc->age_rdata.age_smb_block != NULL)
1019 		bus_dmamem_free(sc->sc_dmat,
1020 		    &sc->age_rdata.age_smb_block_seg, 1);
1021 	sc->age_rdata.age_smb_block = NULL;
1022 	sc->age_cdata.age_smb_block_map = NULL;
1023 }
1024 
1025 static void
1026 age_start(struct ifnet *ifp)
1027 {
1028         struct age_softc *sc = ifp->if_softc;
1029         struct mbuf *m_head;
1030 	int enq;
1031 
1032 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1033 		return;
1034 	if ((sc->age_flags & AGE_FLAG_LINK) == 0)
1035 		return;
1036 	if (IFQ_IS_EMPTY(&ifp->if_snd))
1037 		return;
1038 
1039 	enq = 0;
1040 	for (;;) {
1041 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1042 		if (m_head == NULL)
1043 			break;
1044 
1045 		/*
1046 		 * Pack the data into the transmit ring. If we
1047 		 * don't have room, set the OACTIVE flag and wait
1048 		 * for the NIC to drain the ring.
1049 		 */
1050 		if (age_encap(sc, &m_head)) {
1051 			if (m_head == NULL)
1052 				break;
1053 			IF_PREPEND(&ifp->if_snd, m_head);
1054 			ifp->if_flags |= IFF_OACTIVE;
1055 			break;
1056 		}
1057 		enq = 1;
1058 
1059 		/*
1060 		 * If there's a BPF listener, bounce a copy of this frame
1061 		 * to him.
1062 		 */
1063 		bpf_mtap(ifp, m_head);
1064 	}
1065 
1066 	if (enq) {
1067 		/* Update mbox. */
1068 		AGE_COMMIT_MBOX(sc);
1069 		/* Set a timeout in case the chip goes out to lunch. */
1070 		ifp->if_timer = AGE_TX_TIMEOUT;
1071 	}
1072 }
1073 
1074 static void
1075 age_watchdog(struct ifnet *ifp)
1076 {
1077 	struct age_softc *sc = ifp->if_softc;
1078 
1079 	if ((sc->age_flags & AGE_FLAG_LINK) == 0) {
1080 		printf("%s: watchdog timeout (missed link)\n",
1081 		    device_xname(sc->sc_dev));
1082 		ifp->if_oerrors++;
1083 		age_init(ifp);
1084 		return;
1085 	}
1086 
1087 	if (sc->age_cdata.age_tx_cnt == 0) {
1088 		printf("%s: watchdog timeout (missed Tx interrupts) "
1089 		    "-- recovering\n", device_xname(sc->sc_dev));
1090 		age_start(ifp);
1091 		return;
1092 	}
1093 
1094 	printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
1095 	ifp->if_oerrors++;
1096 	age_init(ifp);
1097 	age_start(ifp);
1098 }
1099 
1100 static bool
1101 age_shutdown(device_t self, int howto)
1102 {
1103 	struct age_softc *sc;
1104 	struct ifnet *ifp;
1105 
1106 	sc = device_private(self);
1107 	ifp = &sc->sc_ec.ec_if;
1108 	age_stop(ifp, 1);
1109 
1110 	return true;
1111 }
1112 
1113 
1114 static int
1115 age_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1116 {
1117 	struct age_softc *sc = ifp->if_softc;
1118 	int s, error;
1119 
1120 	s = splnet();
1121 
1122 	error = ether_ioctl(ifp, cmd, data);
1123 	if (error == ENETRESET) {
1124 		if (ifp->if_flags & IFF_RUNNING)
1125 			age_rxfilter(sc);
1126 		error = 0;
1127 	}
1128 
1129 	splx(s);
1130 	return error;
1131 }
1132 
1133 static void
1134 age_mac_config(struct age_softc *sc)
1135 {
1136 	struct mii_data *mii;
1137 	uint32_t reg;
1138 
1139 	mii = &sc->sc_miibus;
1140 
1141 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
1142 	reg &= ~MAC_CFG_FULL_DUPLEX;
1143 	reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC);
1144 	reg &= ~MAC_CFG_SPEED_MASK;
1145 
1146 	/* Reprogram MAC with resolved speed/duplex. */
1147 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1148 	case IFM_10_T:
1149 	case IFM_100_TX:
1150 		reg |= MAC_CFG_SPEED_10_100;
1151 		break;
1152 	case IFM_1000_T:
1153 		reg |= MAC_CFG_SPEED_1000;
1154 		break;
1155 	}
1156 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1157 		reg |= MAC_CFG_FULL_DUPLEX;
1158 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1159 			reg |= MAC_CFG_TX_FC;
1160 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1161 			reg |= MAC_CFG_RX_FC;
1162 	}
1163 
1164 	CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
1165 }
1166 
1167 static bool
1168 age_resume(device_t dv, const pmf_qual_t *qual)
1169 {
1170 	struct age_softc *sc = device_private(dv);
1171 	uint16_t cmd;
1172 
1173 	/*
1174 	 * Clear INTx emulation disable for hardware that
1175 	 * is set in resume event. From Linux.
1176 	 */
1177 	cmd = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
1178 	if ((cmd & PCI_COMMAND_INTERRUPT_DISABLE) != 0) {
1179 		cmd &= ~PCI_COMMAND_INTERRUPT_DISABLE;
1180 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
1181 		    PCI_COMMAND_STATUS_REG, cmd);
1182 	}
1183 
1184 	return true;
1185 }
1186 
1187 static int
1188 age_encap(struct age_softc *sc, struct mbuf **m_head)
1189 {
1190 	struct age_txdesc *txd, *txd_last;
1191 	struct tx_desc *desc;
1192 	struct mbuf *m;
1193 	bus_dmamap_t map;
1194 	uint32_t cflags, poff, vtag;
1195 	int error, i, nsegs, prod;
1196 #if NVLAN > 0
1197 	struct m_tag *mtag;
1198 #endif
1199 
1200 	m = *m_head;
1201 	cflags = vtag = 0;
1202 	poff = 0;
1203 
1204 	prod = sc->age_cdata.age_tx_prod;
1205 	txd = &sc->age_cdata.age_txdesc[prod];
1206 	txd_last = txd;
1207 	map = txd->tx_dmamap;
1208 
1209 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT);
1210 
1211 	if (error == EFBIG) {
1212 		error = 0;
1213 
1214 		*m_head = m_pullup(*m_head, MHLEN);
1215 		if (*m_head == NULL) {
1216 			printf("%s: can't defrag TX mbuf\n",
1217 			    device_xname(sc->sc_dev));
1218 			return ENOBUFS;
1219 		}
1220 
1221 		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head,
1222 		  	    BUS_DMA_NOWAIT);
1223 
1224 		if (error != 0) {
1225 			printf("%s: could not load defragged TX mbuf\n",
1226 			    device_xname(sc->sc_dev));
1227 			m_freem(*m_head);
1228 			*m_head = NULL;
1229 			return error;
1230 		}
1231 	} else if (error) {
1232 		printf("%s: could not load TX mbuf\n", device_xname(sc->sc_dev));
1233 		return error;
1234 	}
1235 
1236 	nsegs = map->dm_nsegs;
1237 
1238 	if (nsegs == 0) {
1239 		m_freem(*m_head);
1240 		*m_head = NULL;
1241 		return EIO;
1242 	}
1243 
1244 	/* Check descriptor overrun. */
1245 	if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) {
1246 		bus_dmamap_unload(sc->sc_dmat, map);
1247 		return ENOBUFS;
1248 	}
1249 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1250 	    BUS_DMASYNC_PREWRITE);
1251 
1252 	m = *m_head;
1253 	/* Configure Tx IP/TCP/UDP checksum offload. */
1254 	if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) {
1255 		cflags |= AGE_TD_CSUM;
1256 		if ((m->m_pkthdr.csum_flags & M_CSUM_TCPv4) != 0)
1257 			cflags |= AGE_TD_TCPCSUM;
1258 		if ((m->m_pkthdr.csum_flags & M_CSUM_UDPv4) != 0)
1259 			cflags |= AGE_TD_UDPCSUM;
1260 		/* Set checksum start offset. */
1261 		cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT);
1262 	}
1263 
1264 #if NVLAN > 0
1265 	/* Configure VLAN hardware tag insertion. */
1266 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m))) {
1267 		vtag = AGE_TX_VLAN_TAG(htons(VLAN_TAG_VALUE(mtag)));
1268 		vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK);
1269 		cflags |= AGE_TD_INSERT_VLAN_TAG;
1270 	}
1271 #endif
1272 
1273 	desc = NULL;
1274 	KASSERT(nsegs > 0);
1275 	for (i = 0; ; i++) {
1276 		desc = &sc->age_rdata.age_tx_ring[prod];
1277 		desc->addr = htole64(map->dm_segs[i].ds_addr);
1278 		desc->len =
1279 		    htole32(AGE_TX_BYTES(map->dm_segs[i].ds_len) | vtag);
1280 		desc->flags = htole32(cflags);
1281 		sc->age_cdata.age_tx_cnt++;
1282 		if (i == (nsegs - 1))
1283 			break;
1284 
1285 		/* sync this descriptor and go to the next one */
1286 		bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map,
1287 		    prod * sizeof(struct tx_desc), sizeof(struct tx_desc),
1288 		    BUS_DMASYNC_PREWRITE);
1289 		AGE_DESC_INC(prod, AGE_TX_RING_CNT);
1290 	}
1291 
1292 	/* Set EOP on the last descriptor and sync it. */
1293 	desc->flags |= htole32(AGE_TD_EOP);
1294 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map,
1295 	    prod * sizeof(struct tx_desc), sizeof(struct tx_desc),
1296 	    BUS_DMASYNC_PREWRITE);
1297 
1298 	if (nsegs > 1) {
1299 		/* Swap dmamap of the first and the last. */
1300 		txd = &sc->age_cdata.age_txdesc[prod];
1301 		map = txd_last->tx_dmamap;
1302 		txd_last->tx_dmamap = txd->tx_dmamap;
1303 		txd->tx_dmamap = map;
1304 		txd->tx_m = m;
1305 		KASSERT(txd_last->tx_m == NULL);
1306 	} else {
1307 		KASSERT(txd_last == &sc->age_cdata.age_txdesc[prod]);
1308 		txd_last->tx_m = m;
1309 	}
1310 
1311 	/* Update producer index. */
1312 	AGE_DESC_INC(prod, AGE_TX_RING_CNT);
1313 	sc->age_cdata.age_tx_prod = prod;
1314 
1315 	return 0;
1316 }
1317 
1318 static void
1319 age_txintr(struct age_softc *sc, int tpd_cons)
1320 {
1321 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1322 	struct age_txdesc *txd;
1323 	int cons, prog;
1324 
1325 
1326 	if (sc->age_cdata.age_tx_cnt <= 0) {
1327 		if (ifp->if_timer != 0)
1328 			printf("timer running without packets\n");
1329 		if (sc->age_cdata.age_tx_cnt)
1330 			printf("age_tx_cnt corrupted\n");
1331 	}
1332 
1333 	/*
1334 	 * Go through our Tx list and free mbufs for those
1335 	 * frames which have been transmitted.
1336 	 */
1337 	cons = sc->age_cdata.age_tx_cons;
1338 	for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) {
1339 		if (sc->age_cdata.age_tx_cnt <= 0)
1340 			break;
1341 		prog++;
1342 		ifp->if_flags &= ~IFF_OACTIVE;
1343 		sc->age_cdata.age_tx_cnt--;
1344 		txd = &sc->age_cdata.age_txdesc[cons];
1345 		/*
1346 		 * Clear Tx descriptors, it's not required but would
1347 		 * help debugging in case of Tx issues.
1348 		 */
1349 		bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map,
1350 		    cons * sizeof(struct tx_desc), sizeof(struct tx_desc),
1351 		    BUS_DMASYNC_POSTWRITE);
1352 		txd->tx_desc->addr = 0;
1353 		txd->tx_desc->len = 0;
1354 		txd->tx_desc->flags = 0;
1355 
1356 		if (txd->tx_m == NULL)
1357 			continue;
1358 		/* Reclaim transmitted mbufs. */
1359 		bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1360 		m_freem(txd->tx_m);
1361 		txd->tx_m = NULL;
1362 	}
1363 
1364 	if (prog > 0) {
1365 		sc->age_cdata.age_tx_cons = cons;
1366 
1367 		/*
1368 		 * Unarm watchdog timer only when there are no pending
1369 		 * Tx descriptors in queue.
1370 		 */
1371 		if (sc->age_cdata.age_tx_cnt == 0)
1372 			ifp->if_timer = 0;
1373 	}
1374 }
1375 
1376 /* Receive a frame. */
1377 static void
1378 age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd)
1379 {
1380 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1381 	struct age_rxdesc *rxd;
1382 	struct rx_desc *desc;
1383 	struct mbuf *mp, *m;
1384 	uint32_t status, index;
1385 	int count, nsegs, pktlen;
1386 	int rx_cons;
1387 
1388 	status = le32toh(rxrd->flags);
1389 	index = le32toh(rxrd->index);
1390 	rx_cons = AGE_RX_CONS(index);
1391 	nsegs = AGE_RX_NSEGS(index);
1392 
1393 	sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len));
1394 	if ((status & AGE_RRD_ERROR) != 0 &&
1395 	    (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE |
1396 	    AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) {
1397 		/*
1398 		 * We want to pass the following frames to upper
1399 		 * layer regardless of error status of Rx return
1400 		 * ring.
1401 		 *
1402 		 *  o IP/TCP/UDP checksum is bad.
1403 		 *  o frame length and protocol specific length
1404 		 *     does not match.
1405 		 */
1406 		sc->age_cdata.age_rx_cons += nsegs;
1407 		sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
1408 		return;
1409 	}
1410 
1411 	pktlen = 0;
1412 	for (count = 0; count < nsegs; count++,
1413 	    AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) {
1414 		rxd = &sc->age_cdata.age_rxdesc[rx_cons];
1415 		mp = rxd->rx_m;
1416 		desc = rxd->rx_desc;
1417 		/* Add a new receive buffer to the ring. */
1418 		if (age_newbuf(sc, rxd, 0) != 0) {
1419 			ifp->if_iqdrops++;
1420 			/* Reuse Rx buffers. */
1421 			if (sc->age_cdata.age_rxhead != NULL) {
1422 				m_freem(sc->age_cdata.age_rxhead);
1423 				AGE_RXCHAIN_RESET(sc);
1424 			}
1425 			break;
1426 		}
1427 
1428 		/* The length of the first mbuf is computed last. */
1429 		if (count != 0) {
1430 			mp->m_len = AGE_RX_BYTES(le32toh(desc->len));
1431 			pktlen += mp->m_len;
1432 		}
1433 
1434 		/* Chain received mbufs. */
1435 		if (sc->age_cdata.age_rxhead == NULL) {
1436 			sc->age_cdata.age_rxhead = mp;
1437 			sc->age_cdata.age_rxtail = mp;
1438 		} else {
1439 			mp->m_flags &= ~M_PKTHDR;
1440 			sc->age_cdata.age_rxprev_tail =
1441 			    sc->age_cdata.age_rxtail;
1442 			sc->age_cdata.age_rxtail->m_next = mp;
1443 			sc->age_cdata.age_rxtail = mp;
1444 		}
1445 
1446 		if (count == nsegs - 1) {
1447 			/*
1448 			 * It seems that L1 controller has no way
1449 			 * to tell hardware to strip CRC bytes.
1450 			 */
1451 			sc->age_cdata.age_rxlen -= ETHER_CRC_LEN;
1452 			if (nsegs > 1) {
1453 				/* Remove the CRC bytes in chained mbufs. */
1454 				pktlen -= ETHER_CRC_LEN;
1455 				if (mp->m_len <= ETHER_CRC_LEN) {
1456 					sc->age_cdata.age_rxtail =
1457 					    sc->age_cdata.age_rxprev_tail;
1458 					sc->age_cdata.age_rxtail->m_len -=
1459 					    (ETHER_CRC_LEN - mp->m_len);
1460 					sc->age_cdata.age_rxtail->m_next = NULL;
1461 					m_freem(mp);
1462 				} else {
1463 					mp->m_len -= ETHER_CRC_LEN;
1464 				}
1465 			}
1466 
1467 			m = sc->age_cdata.age_rxhead;
1468 			m->m_flags |= M_PKTHDR;
1469 			m->m_pkthdr.rcvif = ifp;
1470 			m->m_pkthdr.len = sc->age_cdata.age_rxlen;
1471 			/* Set the first mbuf length. */
1472 			m->m_len = sc->age_cdata.age_rxlen - pktlen;
1473 
1474 			/*
1475 			 * Set checksum information.
1476 			 * It seems that L1 controller can compute partial
1477 			 * checksum. The partial checksum value can be used
1478 			 * to accelerate checksum computation for fragmented
1479 			 * TCP/UDP packets. Upper network stack already
1480 			 * takes advantage of the partial checksum value in
1481 			 * IP reassembly stage. But I'm not sure the
1482 			 * correctness of the partial hardware checksum
1483 			 * assistance due to lack of data sheet. If it is
1484 			 * proven to work on L1 I'll enable it.
1485 			 */
1486 			if (status & AGE_RRD_IPV4) {
1487 				if (status & AGE_RRD_IPCSUM_NOK)
1488 					m->m_pkthdr.csum_flags |=
1489 					    M_CSUM_IPv4_BAD;
1490 				if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) &&
1491 				    (status & AGE_RRD_TCP_UDPCSUM_NOK)) {
1492 					m->m_pkthdr.csum_flags |=
1493 					    M_CSUM_TCP_UDP_BAD;
1494 				}
1495 				/*
1496 				 * Don't mark bad checksum for TCP/UDP frames
1497 				 * as fragmented frames may always have set
1498 				 * bad checksummed bit of descriptor status.
1499 				 */
1500 			}
1501 #if NVLAN > 0
1502 			/* Check for VLAN tagged frames. */
1503 			if (status & AGE_RRD_VLAN) {
1504 				uint32_t vtag = AGE_RX_VLAN(le32toh(rxrd->vtags));
1505 				VLAN_INPUT_TAG(ifp, m, AGE_RX_VLAN_TAG(vtag),
1506 					continue);
1507 			}
1508 #endif
1509 
1510 			bpf_mtap(ifp, m);
1511 			/* Pass it on. */
1512 			ether_input(ifp, m);
1513 
1514 			/* Reset mbuf chains. */
1515 			AGE_RXCHAIN_RESET(sc);
1516 		}
1517 	}
1518 
1519 	if (count != nsegs) {
1520 		sc->age_cdata.age_rx_cons += nsegs;
1521 		sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
1522 	} else
1523 		sc->age_cdata.age_rx_cons = rx_cons;
1524 }
1525 
1526 static void
1527 age_rxintr(struct age_softc *sc, int rr_prod)
1528 {
1529 	struct rx_rdesc *rxrd;
1530 	int rr_cons, nsegs, pktlen, prog;
1531 
1532 	rr_cons = sc->age_cdata.age_rr_cons;
1533 	if (rr_cons == rr_prod)
1534 		return;
1535 
1536 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
1537 	    sc->age_cdata.age_rr_ring_map->dm_mapsize,
1538 	    BUS_DMASYNC_POSTREAD);
1539 
1540 	for (prog = 0; rr_cons != rr_prod; prog++) {
1541 		rxrd = &sc->age_rdata.age_rr_ring[rr_cons];
1542 		nsegs = AGE_RX_NSEGS(le32toh(rxrd->index));
1543 		if (nsegs == 0)
1544 			break;
1545 		/*
1546 		 * Check number of segments against received bytes
1547 		 * Non-matching value would indicate that hardware
1548 		 * is still trying to update Rx return descriptors.
1549 		 * I'm not sure whether this check is really needed.
1550 		 */
1551 		pktlen = AGE_RX_BYTES(le32toh(rxrd->len));
1552 		if (nsegs != ((pktlen + (MCLBYTES - ETHER_ALIGN - 1)) /
1553 		    (MCLBYTES - ETHER_ALIGN)))
1554 			break;
1555 
1556 		/* Received a frame. */
1557 		age_rxeof(sc, rxrd);
1558 
1559 		/* Clear return ring. */
1560 		rxrd->index = 0;
1561 		AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT);
1562 	}
1563 
1564 	if (prog > 0) {
1565 		/* Update the consumer index. */
1566 		sc->age_cdata.age_rr_cons = rr_cons;
1567 
1568 		/* Sync descriptors. */
1569 		bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
1570 		    sc->age_cdata.age_rr_ring_map->dm_mapsize,
1571 		    BUS_DMASYNC_PREWRITE);
1572 
1573 		/* Notify hardware availability of new Rx buffers. */
1574 		AGE_COMMIT_MBOX(sc);
1575 	}
1576 }
1577 
1578 static void
1579 age_tick(void *xsc)
1580 {
1581 	struct age_softc *sc = xsc;
1582 	struct mii_data *mii = &sc->sc_miibus;
1583 	int s;
1584 
1585 	s = splnet();
1586 	mii_tick(mii);
1587 	splx(s);
1588 
1589 	callout_schedule(&sc->sc_tick_ch, hz);
1590 }
1591 
1592 static void
1593 age_reset(struct age_softc *sc)
1594 {
1595 	uint32_t reg;
1596 	int i;
1597 
1598 	CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET);
1599 	CSR_READ_4(sc, AGE_MASTER_CFG);
1600 	DELAY(1000);
1601 	for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
1602 		if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
1603 			break;
1604 		DELAY(10);
1605 	}
1606 
1607 	if (i == 0)
1608 		printf("%s: reset timeout(0x%08x)!\n", device_xname(sc->sc_dev),
1609 		    reg);
1610 
1611 	/* Initialize PCIe module. From Linux. */
1612 	CSR_WRITE_4(sc, 0x12FC, 0x6500);
1613 	CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
1614 }
1615 
1616 static int
1617 age_init(struct ifnet *ifp)
1618 {
1619 	struct age_softc *sc = ifp->if_softc;
1620 	struct mii_data *mii;
1621 	uint8_t eaddr[ETHER_ADDR_LEN];
1622 	bus_addr_t paddr;
1623 	uint32_t reg, fsize;
1624 	uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo;
1625 	int error;
1626 
1627 	/*
1628 	 * Cancel any pending I/O.
1629 	 */
1630 	age_stop(ifp, 0);
1631 
1632 	/*
1633 	 * Reset the chip to a known state.
1634 	 */
1635 	age_reset(sc);
1636 
1637 	/* Initialize descriptors. */
1638 	error = age_init_rx_ring(sc);
1639         if (error != 0) {
1640 		printf("%s: no memory for Rx buffers.\n", device_xname(sc->sc_dev));
1641 		age_stop(ifp, 0);
1642 		return error;
1643         }
1644 	age_init_rr_ring(sc);
1645 	age_init_tx_ring(sc);
1646 	age_init_cmb_block(sc);
1647 	age_init_smb_block(sc);
1648 
1649 	/* Reprogram the station address. */
1650 	memcpy(eaddr, CLLADDR(ifp->if_sadl), sizeof(eaddr));
1651 	CSR_WRITE_4(sc, AGE_PAR0,
1652 	    eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
1653 	CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]);
1654 
1655 	/* Set descriptor base addresses. */
1656 	paddr = sc->age_rdata.age_tx_ring_paddr;
1657 	CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr));
1658 	paddr = sc->age_rdata.age_rx_ring_paddr;
1659 	CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr));
1660 	paddr = sc->age_rdata.age_rr_ring_paddr;
1661 	CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr));
1662 	paddr = sc->age_rdata.age_tx_ring_paddr;
1663 	CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr));
1664 	paddr = sc->age_rdata.age_cmb_block_paddr;
1665 	CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr));
1666 	paddr = sc->age_rdata.age_smb_block_paddr;
1667 	CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr));
1668 
1669 	/* Set Rx/Rx return descriptor counter. */
1670 	CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT,
1671 	    ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) &
1672 	    DESC_RRD_CNT_MASK) |
1673 	    ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK));
1674 
1675 	/* Set Tx descriptor counter. */
1676 	CSR_WRITE_4(sc, AGE_DESC_TPD_CNT,
1677 	    (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK);
1678 
1679 	/* Tell hardware that we're ready to load descriptors. */
1680 	CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD);
1681 
1682         /*
1683 	 * Initialize mailbox register.
1684 	 * Updated producer/consumer index information is exchanged
1685 	 * through this mailbox register. However Tx producer and
1686 	 * Rx return consumer/Rx producer are all shared such that
1687 	 * it's hard to separate code path between Tx and Rx without
1688 	 * locking. If L1 hardware have a separate mail box register
1689 	 * for Tx and Rx consumer/producer management we could have
1690 	 * indepent Tx/Rx handler which in turn Rx handler could have
1691 	 * been run without any locking.
1692 	*/
1693 	AGE_COMMIT_MBOX(sc);
1694 
1695 	/* Configure IPG/IFG parameters. */
1696 	CSR_WRITE_4(sc, AGE_IPG_IFG_CFG,
1697 	    ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) |
1698 	    ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
1699 	    ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
1700 	    ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK));
1701 
1702 	/* Set parameters for half-duplex media. */
1703 	CSR_WRITE_4(sc, AGE_HDPX_CFG,
1704 	    ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
1705 	    HDPX_CFG_LCOL_MASK) |
1706 	    ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
1707 	    HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
1708 	    ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
1709 	    HDPX_CFG_ABEBT_MASK) |
1710 	    ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
1711 	     HDPX_CFG_JAMIPG_MASK));
1712 
1713 	/* Configure interrupt moderation timer. */
1714 	sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
1715 	CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod));
1716 	reg = CSR_READ_4(sc, AGE_MASTER_CFG);
1717 	reg &= ~MASTER_MTIMER_ENB;
1718 	if (AGE_USECS(sc->age_int_mod) == 0)
1719 		reg &= ~MASTER_ITIMER_ENB;
1720 	else
1721 		reg |= MASTER_ITIMER_ENB;
1722 	CSR_WRITE_4(sc, AGE_MASTER_CFG, reg);
1723 	if (agedebug)
1724 		printf("%s: interrupt moderation is %d us.\n",
1725 		    device_xname(sc->sc_dev), sc->age_int_mod);
1726 	CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000));
1727 
1728 	/* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
1729 	if (ifp->if_mtu < ETHERMTU)
1730 		sc->age_max_frame_size = ETHERMTU;
1731 	else
1732 		sc->age_max_frame_size = ifp->if_mtu;
1733 	sc->age_max_frame_size += ETHER_HDR_LEN +
1734 	    sizeof(struct ether_vlan_header) + ETHER_CRC_LEN;
1735 	CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size);
1736 
1737 	/* Configure jumbo frame. */
1738 	fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t));
1739 	CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG,
1740 	    (((fsize / sizeof(uint64_t)) <<
1741 	    RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) |
1742 	    ((RXQ_JUMBO_CFG_LKAH_DEFAULT <<
1743 	    RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) |
1744 	    ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) &
1745 	    RXQ_JUMBO_CFG_RRD_TIMER_MASK));
1746 
1747 	/* Configure flow-control parameters. From Linux. */
1748 	if ((sc->age_flags & AGE_FLAG_PCIE) != 0) {
1749 		/*
1750 		 * Magic workaround for old-L1.
1751 		 * Don't know which hw revision requires this magic.
1752 		 */
1753 		CSR_WRITE_4(sc, 0x12FC, 0x6500);
1754 		/*
1755 		 * Another magic workaround for flow-control mode
1756 		 * change. From Linux.
1757 		 */
1758 		CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
1759 	}
1760 	/*
1761 	 * TODO
1762 	 *  Should understand pause parameter relationships between FIFO
1763 	 *  size and number of Rx descriptors and Rx return descriptors.
1764 	 *
1765 	 *  Magic parameters came from Linux.
1766 	 */
1767 	switch (sc->age_chip_rev) {
1768 	case 0x8001:
1769 	case 0x9001:
1770 	case 0x9002:
1771 	case 0x9003:
1772 		rxf_hi = AGE_RX_RING_CNT / 16;
1773 		rxf_lo = (AGE_RX_RING_CNT * 7) / 8;
1774 		rrd_hi = (AGE_RR_RING_CNT * 7) / 8;
1775 		rrd_lo = AGE_RR_RING_CNT / 16;
1776 		break;
1777 	default:
1778 		reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN);
1779 		rxf_lo = reg / 16;
1780 		if (rxf_lo < 192)
1781 			rxf_lo = 192;
1782 		rxf_hi = (reg * 7) / 8;
1783 		if (rxf_hi < rxf_lo)
1784 			rxf_hi = rxf_lo + 16;
1785 		reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN);
1786 		rrd_lo = reg / 8;
1787 		rrd_hi = (reg * 7) / 8;
1788 		if (rrd_lo < 2)
1789 			rrd_lo = 2;
1790 		if (rrd_hi < rrd_lo)
1791 			rrd_hi = rrd_lo + 3;
1792 		break;
1793 	}
1794 	CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH,
1795 	    ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) &
1796 	    RXQ_FIFO_PAUSE_THRESH_LO_MASK) |
1797 	    ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) &
1798 	    RXQ_FIFO_PAUSE_THRESH_HI_MASK));
1799 	CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH,
1800 	    ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) &
1801 	    RXQ_RRD_PAUSE_THRESH_LO_MASK) |
1802 	    ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) &
1803 	    RXQ_RRD_PAUSE_THRESH_HI_MASK));
1804 
1805 	/* Configure RxQ. */
1806 	CSR_WRITE_4(sc, AGE_RXQ_CFG,
1807 	    ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
1808 	    RXQ_CFG_RD_BURST_MASK) |
1809 	    ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT <<
1810 	    RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) |
1811 	    ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT <<
1812 	    RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) |
1813 	    RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB);
1814 
1815 	/* Configure TxQ. */
1816 	CSR_WRITE_4(sc, AGE_TXQ_CFG,
1817 	    ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) &
1818 	    TXQ_CFG_TPD_BURST_MASK) |
1819 	    ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) &
1820 	    TXQ_CFG_TX_FIFO_BURST_MASK) |
1821 	    ((TXQ_CFG_TPD_FETCH_DEFAULT <<
1822 	    TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) |
1823 	    TXQ_CFG_ENB);
1824 
1825 	/* Configure DMA parameters. */
1826 	CSR_WRITE_4(sc, AGE_DMA_CFG,
1827 	    DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 |
1828 	    sc->age_dma_rd_burst | DMA_CFG_RD_ENB |
1829 	    sc->age_dma_wr_burst | DMA_CFG_WR_ENB);
1830 
1831 	/* Configure CMB DMA write threshold. */
1832 	CSR_WRITE_4(sc, AGE_CMB_WR_THRESH,
1833 	    ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) &
1834 	    CMB_WR_THRESH_RRD_MASK) |
1835 	    ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) &
1836 	    CMB_WR_THRESH_TPD_MASK));
1837 
1838 	/* Set CMB/SMB timer and enable them. */
1839 	CSR_WRITE_4(sc, AGE_CMB_WR_TIMER,
1840 	    ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) |
1841 	    ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK));
1842 
1843 	/* Request SMB updates for every seconds. */
1844 	CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000));
1845 	CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB);
1846 
1847 	/*
1848 	 * Disable all WOL bits as WOL can interfere normal Rx
1849 	 * operation.
1850 	 */
1851 	CSR_WRITE_4(sc, AGE_WOL_CFG, 0);
1852 
1853         /*
1854 	 * Configure Tx/Rx MACs.
1855 	 *  - Auto-padding for short frames.
1856 	 *  - Enable CRC generation.
1857 	 *  Start with full-duplex/1000Mbps media. Actual reconfiguration
1858 	 *  of MAC is followed after link establishment.
1859 	 */
1860 	CSR_WRITE_4(sc, AGE_MAC_CFG,
1861 	    MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD |
1862 	    MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 |
1863 	    ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
1864 	    MAC_CFG_PREAMBLE_MASK));
1865 
1866 	/* Set up the receive filter. */
1867 	age_rxfilter(sc);
1868 	age_rxvlan(sc);
1869 
1870 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
1871 	reg |= MAC_CFG_RXCSUM_ENB;
1872 
1873 	/* Ack all pending interrupts and clear it. */
1874 	CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
1875 	CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS);
1876 
1877 	/* Finally enable Tx/Rx MAC. */
1878 	CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
1879 
1880 	sc->age_flags &= ~AGE_FLAG_LINK;
1881 
1882 	/* Switch to the current media. */
1883 	mii = &sc->sc_miibus;
1884 	mii_mediachg(mii);
1885 
1886 	callout_schedule(&sc->sc_tick_ch, hz);
1887 
1888 	ifp->if_flags |= IFF_RUNNING;
1889 	ifp->if_flags &= ~IFF_OACTIVE;
1890 
1891 	return 0;
1892 }
1893 
1894 static void
1895 age_stop(struct ifnet *ifp, int disable)
1896 {
1897 	struct age_softc *sc = ifp->if_softc;
1898 	struct age_txdesc *txd;
1899 	struct age_rxdesc *rxd;
1900 	uint32_t reg;
1901 	int i;
1902 
1903 	callout_stop(&sc->sc_tick_ch);
1904 
1905 	/*
1906 	 * Mark the interface down and cancel the watchdog timer.
1907 	 */
1908 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1909 	ifp->if_timer = 0;
1910 
1911 	sc->age_flags &= ~AGE_FLAG_LINK;
1912 
1913 	mii_down(&sc->sc_miibus);
1914 
1915 	/*
1916 	 * Disable interrupts.
1917 	 */
1918 	CSR_WRITE_4(sc, AGE_INTR_MASK, 0);
1919 	CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF);
1920 
1921 	/* Stop CMB/SMB updates. */
1922 	CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0);
1923 
1924 	/* Stop Rx/Tx MAC. */
1925 	age_stop_rxmac(sc);
1926 	age_stop_txmac(sc);
1927 
1928 	/* Stop DMA. */
1929 	CSR_WRITE_4(sc, AGE_DMA_CFG,
1930 	    CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB));
1931 
1932 	/* Stop TxQ/RxQ. */
1933 	CSR_WRITE_4(sc, AGE_TXQ_CFG,
1934 	    CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB);
1935 	CSR_WRITE_4(sc, AGE_RXQ_CFG,
1936 	    CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB);
1937 	for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
1938 		if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
1939 			break;
1940 		DELAY(10);
1941 	}
1942 	if (i == 0)
1943 		printf("%s: stopping Rx/Tx MACs timed out(0x%08x)!\n",
1944 		    device_xname(sc->sc_dev), reg);
1945 
1946 	/* Reclaim Rx buffers that have been processed. */
1947 	if (sc->age_cdata.age_rxhead != NULL)
1948 		m_freem(sc->age_cdata.age_rxhead);
1949 	AGE_RXCHAIN_RESET(sc);
1950 
1951 	/*
1952 	 * Free RX and TX mbufs still in the queues.
1953 	 */
1954 	for (i = 0; i < AGE_RX_RING_CNT; i++) {
1955 		rxd = &sc->age_cdata.age_rxdesc[i];
1956 		if (rxd->rx_m != NULL) {
1957 			bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1958 			m_freem(rxd->rx_m);
1959 			rxd->rx_m = NULL;
1960 		}
1961 	}
1962 	for (i = 0; i < AGE_TX_RING_CNT; i++) {
1963 		txd = &sc->age_cdata.age_txdesc[i];
1964 		if (txd->tx_m != NULL) {
1965 			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1966 			m_freem(txd->tx_m);
1967 			txd->tx_m = NULL;
1968 		}
1969 	}
1970 }
1971 
1972 static void
1973 age_stats_update(struct age_softc *sc)
1974 {
1975 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1976 	struct age_stats *stat;
1977 	struct smb *smb;
1978 
1979 	stat = &sc->age_stat;
1980 
1981 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
1982 	    sc->age_cdata.age_smb_block_map->dm_mapsize,
1983 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1984 
1985 	smb = sc->age_rdata.age_smb_block;
1986 	if (smb->updated == 0)
1987 		return;
1988 
1989 	/* Rx stats. */
1990 	stat->rx_frames += smb->rx_frames;
1991 	stat->rx_bcast_frames += smb->rx_bcast_frames;
1992 	stat->rx_mcast_frames += smb->rx_mcast_frames;
1993 	stat->rx_pause_frames += smb->rx_pause_frames;
1994 	stat->rx_control_frames += smb->rx_control_frames;
1995 	stat->rx_crcerrs += smb->rx_crcerrs;
1996 	stat->rx_lenerrs += smb->rx_lenerrs;
1997 	stat->rx_bytes += smb->rx_bytes;
1998 	stat->rx_runts += smb->rx_runts;
1999 	stat->rx_fragments += smb->rx_fragments;
2000 	stat->rx_pkts_64 += smb->rx_pkts_64;
2001 	stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
2002 	stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
2003 	stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
2004 	stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
2005 	stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
2006 	stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
2007 	stat->rx_pkts_truncated += smb->rx_pkts_truncated;
2008 	stat->rx_fifo_oflows += smb->rx_fifo_oflows;
2009 	stat->rx_desc_oflows += smb->rx_desc_oflows;
2010 	stat->rx_alignerrs += smb->rx_alignerrs;
2011 	stat->rx_bcast_bytes += smb->rx_bcast_bytes;
2012 	stat->rx_mcast_bytes += smb->rx_mcast_bytes;
2013 	stat->rx_pkts_filtered += smb->rx_pkts_filtered;
2014 
2015 	/* Tx stats. */
2016 	stat->tx_frames += smb->tx_frames;
2017 	stat->tx_bcast_frames += smb->tx_bcast_frames;
2018 	stat->tx_mcast_frames += smb->tx_mcast_frames;
2019 	stat->tx_pause_frames += smb->tx_pause_frames;
2020 	stat->tx_excess_defer += smb->tx_excess_defer;
2021 	stat->tx_control_frames += smb->tx_control_frames;
2022 	stat->tx_deferred += smb->tx_deferred;
2023 	stat->tx_bytes += smb->tx_bytes;
2024 	stat->tx_pkts_64 += smb->tx_pkts_64;
2025 	stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
2026 	stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
2027 	stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
2028 	stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
2029 	stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
2030 	stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
2031 	stat->tx_single_colls += smb->tx_single_colls;
2032 	stat->tx_multi_colls += smb->tx_multi_colls;
2033 	stat->tx_late_colls += smb->tx_late_colls;
2034 	stat->tx_excess_colls += smb->tx_excess_colls;
2035 	stat->tx_underrun += smb->tx_underrun;
2036 	stat->tx_desc_underrun += smb->tx_desc_underrun;
2037 	stat->tx_lenerrs += smb->tx_lenerrs;
2038 	stat->tx_pkts_truncated += smb->tx_pkts_truncated;
2039 	stat->tx_bcast_bytes += smb->tx_bcast_bytes;
2040 	stat->tx_mcast_bytes += smb->tx_mcast_bytes;
2041 
2042 	/* Update counters in ifnet. */
2043 	ifp->if_opackets += smb->tx_frames;
2044 
2045 	ifp->if_collisions += smb->tx_single_colls +
2046 	    smb->tx_multi_colls + smb->tx_late_colls +
2047 	    smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT;
2048 
2049 	ifp->if_oerrors += smb->tx_excess_colls +
2050 	    smb->tx_late_colls + smb->tx_underrun +
2051 	    smb->tx_pkts_truncated;
2052 
2053 	ifp->if_ipackets += smb->rx_frames;
2054 
2055 	ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
2056 	    smb->rx_runts + smb->rx_pkts_truncated +
2057 	    smb->rx_fifo_oflows + smb->rx_desc_oflows +
2058 	    smb->rx_alignerrs;
2059 
2060 	/* Update done, clear. */
2061 	smb->updated = 0;
2062 
2063 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
2064 	    sc->age_cdata.age_smb_block_map->dm_mapsize,
2065 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2066 }
2067 
2068 static void
2069 age_stop_txmac(struct age_softc *sc)
2070 {
2071 	uint32_t reg;
2072 	int i;
2073 
2074 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
2075 	if ((reg & MAC_CFG_TX_ENB) != 0) {
2076 		reg &= ~MAC_CFG_TX_ENB;
2077 		CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2078 	}
2079 	/* Stop Tx DMA engine. */
2080 	reg = CSR_READ_4(sc, AGE_DMA_CFG);
2081 	if ((reg & DMA_CFG_RD_ENB) != 0) {
2082 		reg &= ~DMA_CFG_RD_ENB;
2083 		CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2084 	}
2085 	for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2086 		if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2087 		    (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0)
2088 			break;
2089 		DELAY(10);
2090 	}
2091 	if (i == 0)
2092 		printf("%s: stopping TxMAC timeout!\n", device_xname(sc->sc_dev));
2093 }
2094 
2095 static void
2096 age_stop_rxmac(struct age_softc *sc)
2097 {
2098 	uint32_t reg;
2099 	int i;
2100 
2101 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
2102 	if ((reg & MAC_CFG_RX_ENB) != 0) {
2103 		reg &= ~MAC_CFG_RX_ENB;
2104 		CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2105 	}
2106 	/* Stop Rx DMA engine. */
2107 	reg = CSR_READ_4(sc, AGE_DMA_CFG);
2108 	if ((reg & DMA_CFG_WR_ENB) != 0) {
2109 		reg &= ~DMA_CFG_WR_ENB;
2110 		CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2111 	}
2112 	for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2113 		if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2114 		    (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0)
2115 			break;
2116 		DELAY(10);
2117 	}
2118 	if (i == 0)
2119 		printf("%s: stopping RxMAC timeout!\n", device_xname(sc->sc_dev));
2120 }
2121 
2122 static void
2123 age_init_tx_ring(struct age_softc *sc)
2124 {
2125 	struct age_ring_data *rd;
2126 	struct age_txdesc *txd;
2127 	int i;
2128 
2129 	sc->age_cdata.age_tx_prod = 0;
2130 	sc->age_cdata.age_tx_cons = 0;
2131 	sc->age_cdata.age_tx_cnt = 0;
2132 
2133 	rd = &sc->age_rdata;
2134 	memset(rd->age_tx_ring, 0, AGE_TX_RING_SZ);
2135 	for (i = 0; i < AGE_TX_RING_CNT; i++) {
2136 		txd = &sc->age_cdata.age_txdesc[i];
2137 		txd->tx_desc = &rd->age_tx_ring[i];
2138 		txd->tx_m = NULL;
2139 	}
2140 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0,
2141 	    sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2142 }
2143 
2144 static int
2145 age_init_rx_ring(struct age_softc *sc)
2146 {
2147 	struct age_ring_data *rd;
2148 	struct age_rxdesc *rxd;
2149 	int i;
2150 
2151 	sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1;
2152 	rd = &sc->age_rdata;
2153 	memset(rd->age_rx_ring, 0, AGE_RX_RING_SZ);
2154 	for (i = 0; i < AGE_RX_RING_CNT; i++) {
2155 		rxd = &sc->age_cdata.age_rxdesc[i];
2156 		rxd->rx_m = NULL;
2157 		rxd->rx_desc = &rd->age_rx_ring[i];
2158 		if (age_newbuf(sc, rxd, 1) != 0)
2159 			return ENOBUFS;
2160 	}
2161 
2162 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0,
2163 	    sc->age_cdata.age_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2164 
2165 	return 0;
2166 }
2167 
2168 static void
2169 age_init_rr_ring(struct age_softc *sc)
2170 {
2171 	struct age_ring_data *rd;
2172 
2173 	sc->age_cdata.age_rr_cons = 0;
2174 	AGE_RXCHAIN_RESET(sc);
2175 
2176 	rd = &sc->age_rdata;
2177 	memset(rd->age_rr_ring, 0, AGE_RR_RING_SZ);
2178 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
2179 	    sc->age_cdata.age_rr_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2180 }
2181 
2182 static void
2183 age_init_cmb_block(struct age_softc *sc)
2184 {
2185 	struct age_ring_data *rd;
2186 
2187 	rd = &sc->age_rdata;
2188 	memset(rd->age_cmb_block, 0, AGE_CMB_BLOCK_SZ);
2189 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
2190 	    sc->age_cdata.age_cmb_block_map->dm_mapsize,
2191 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2192 }
2193 
2194 static void
2195 age_init_smb_block(struct age_softc *sc)
2196 {
2197 	struct age_ring_data *rd;
2198 
2199 	rd = &sc->age_rdata;
2200 	memset(rd->age_smb_block, 0, AGE_SMB_BLOCK_SZ);
2201 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
2202 	    sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2203 }
2204 
2205 static int
2206 age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd, int init)
2207 {
2208 	struct rx_desc *desc;
2209 	struct mbuf *m;
2210 	bus_dmamap_t map;
2211 	int error;
2212 
2213 	MGETHDR(m, M_DONTWAIT, MT_DATA);
2214 	if (m == NULL)
2215 		return ENOBUFS;
2216 	MCLGET(m, M_DONTWAIT);
2217 	if (!(m->m_flags & M_EXT)) {
2218 		 m_freem(m);
2219 		 return ENOBUFS;
2220 	}
2221 
2222 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2223 	m_adj(m, ETHER_ALIGN);
2224 
2225 	error = bus_dmamap_load_mbuf(sc->sc_dmat,
2226 	    sc->age_cdata.age_rx_sparemap, m, BUS_DMA_NOWAIT);
2227 
2228 	if (error != 0) {
2229 		if (!error) {
2230 			bus_dmamap_unload(sc->sc_dmat,
2231 			    sc->age_cdata.age_rx_sparemap);
2232 			error = EFBIG;
2233 			printf("%s: too many segments?!\n",
2234 			    device_xname(sc->sc_dev));
2235 		}
2236 		m_freem(m);
2237 
2238 		if (init)
2239 			printf("%s: can't load RX mbuf\n", device_xname(sc->sc_dev));
2240 		return error;
2241 	}
2242 
2243 	if (rxd->rx_m != NULL) {
2244 		bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
2245 		    rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2246 		bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2247 	}
2248 	map = rxd->rx_dmamap;
2249 	rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap;
2250 	sc->age_cdata.age_rx_sparemap = map;
2251 	rxd->rx_m = m;
2252 
2253 	desc = rxd->rx_desc;
2254 	desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr);
2255 	desc->len =
2256 	    htole32((rxd->rx_dmamap->dm_segs[0].ds_len & AGE_RD_LEN_MASK) <<
2257 	    AGE_RD_LEN_SHIFT);
2258 
2259 	return 0;
2260 }
2261 
2262 static void
2263 age_rxvlan(struct age_softc *sc)
2264 {
2265 	uint32_t reg;
2266 
2267 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
2268 	reg &= ~MAC_CFG_VLAN_TAG_STRIP;
2269 	if (sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2270 		reg |= MAC_CFG_VLAN_TAG_STRIP;
2271 	CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2272 }
2273 
2274 static void
2275 age_rxfilter(struct age_softc *sc)
2276 {
2277 	struct ethercom *ec = &sc->sc_ec;
2278 	struct ifnet *ifp = &sc->sc_ec.ec_if;
2279 	struct ether_multi *enm;
2280 	struct ether_multistep step;
2281 	uint32_t crc;
2282 	uint32_t mchash[2];
2283 	uint32_t rxcfg;
2284 
2285 	rxcfg = CSR_READ_4(sc, AGE_MAC_CFG);
2286 	rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
2287 	ifp->if_flags &= ~IFF_ALLMULTI;
2288 
2289 	/*
2290 	 * Always accept broadcast frames.
2291 	 */
2292 	rxcfg |= MAC_CFG_BCAST;
2293 
2294 	if (ifp->if_flags & IFF_PROMISC || ec->ec_multicnt > 0) {
2295 		ifp->if_flags |= IFF_ALLMULTI;
2296 		if (ifp->if_flags & IFF_PROMISC)
2297 			rxcfg |= MAC_CFG_PROMISC;
2298 		else
2299 			rxcfg |= MAC_CFG_ALLMULTI;
2300 		mchash[0] = mchash[1] = 0xFFFFFFFF;
2301 	} else {
2302 		/* Program new filter. */
2303 		memset(mchash, 0, sizeof(mchash));
2304 
2305 		ETHER_FIRST_MULTI(step, ec, enm);
2306 		while (enm != NULL) {
2307 			crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
2308 			mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
2309 			ETHER_NEXT_MULTI(step, enm);
2310 		}
2311 	}
2312 
2313 	CSR_WRITE_4(sc, AGE_MAR0, mchash[0]);
2314 	CSR_WRITE_4(sc, AGE_MAR1, mchash[1]);
2315 	CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
2316 }
2317