xref: /netbsd-src/sys/dev/pci/if_age.c (revision 53d1339bf7f9c7367b35a9e1ebe693f9b047a47b)
1 /*	$NetBSD: if_age.c,v 1.69 2020/03/01 02:51:42 thorpej Exp $ */
2 /*	$OpenBSD: if_age.c,v 1.1 2009/01/16 05:00:34 kevlo Exp $	*/
3 
4 /*-
5  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: if_age.c,v 1.69 2020/03/01 02:51:42 thorpej Exp $");
35 
36 #include "vlan.h"
37 
38 #include <sys/param.h>
39 #include <sys/proc.h>
40 #include <sys/endian.h>
41 #include <sys/systm.h>
42 #include <sys/types.h>
43 #include <sys/sockio.h>
44 #include <sys/mbuf.h>
45 #include <sys/queue.h>
46 #include <sys/kernel.h>
47 #include <sys/device.h>
48 #include <sys/callout.h>
49 #include <sys/socket.h>
50 
51 #include <net/if.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/if_ether.h>
55 
56 #ifdef INET
57 #include <netinet/in.h>
58 #include <netinet/in_systm.h>
59 #include <netinet/in_var.h>
60 #include <netinet/ip.h>
61 #endif
62 
63 #include <net/if_types.h>
64 #include <net/if_vlanvar.h>
65 
66 #include <net/bpf.h>
67 
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
70 
71 #include <dev/pci/pcireg.h>
72 #include <dev/pci/pcivar.h>
73 #include <dev/pci/pcidevs.h>
74 
75 #include <dev/pci/if_agereg.h>
76 
77 static int	age_match(device_t, cfdata_t, void *);
78 static void	age_attach(device_t, device_t, void *);
79 static int	age_detach(device_t, int);
80 
81 static bool	age_resume(device_t, const pmf_qual_t *);
82 
83 static int	age_miibus_readreg(device_t, int, int, uint16_t *);
84 static int	age_miibus_writereg(device_t, int, int, uint16_t);
85 static void	age_miibus_statchg(struct ifnet *);
86 
87 static int	age_init(struct ifnet *);
88 static int	age_ioctl(struct ifnet *, u_long, void *);
89 static void	age_start(struct ifnet *);
90 static void	age_watchdog(struct ifnet *);
91 static bool	age_shutdown(device_t, int);
92 static void	age_mediastatus(struct ifnet *, struct ifmediareq *);
93 static int	age_mediachange(struct ifnet *);
94 
95 static int	age_intr(void *);
96 static int	age_dma_alloc(struct age_softc *);
97 static void	age_dma_free(struct age_softc *);
98 static void	age_get_macaddr(struct age_softc *, uint8_t[]);
99 static void	age_phy_reset(struct age_softc *);
100 
101 static int	age_encap(struct age_softc *, struct mbuf **);
102 static void	age_init_tx_ring(struct age_softc *);
103 static int	age_init_rx_ring(struct age_softc *);
104 static void	age_init_rr_ring(struct age_softc *);
105 static void	age_init_cmb_block(struct age_softc *);
106 static void	age_init_smb_block(struct age_softc *);
107 static int	age_newbuf(struct age_softc *, struct age_rxdesc *, int);
108 static void	age_mac_config(struct age_softc *);
109 static void	age_txintr(struct age_softc *, int);
110 static void	age_rxeof(struct age_softc *sc, struct rx_rdesc *);
111 static void	age_rxintr(struct age_softc *, int);
112 static void	age_tick(void *);
113 static void	age_reset(struct age_softc *);
114 static void	age_stop(struct ifnet *, int);
115 static void	age_stats_update(struct age_softc *);
116 static void	age_stop_txmac(struct age_softc *);
117 static void	age_stop_rxmac(struct age_softc *);
118 static void	age_rxvlan(struct age_softc *sc);
119 static void	age_rxfilter(struct age_softc *);
120 
121 CFATTACH_DECL_NEW(age, sizeof(struct age_softc),
122     age_match, age_attach, age_detach, NULL);
123 
124 int agedebug = 0;
125 #define	DPRINTF(x)	do { if (agedebug) printf x; } while (0)
126 
127 #define AGE_CSUM_FEATURES	(M_CSUM_TCPv4 | M_CSUM_UDPv4)
128 
129 static int
130 age_match(device_t dev, cfdata_t match, void *aux)
131 {
132 	struct pci_attach_args *pa = aux;
133 
134 	return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ATTANSIC &&
135 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ATTANSIC_ETHERNET_GIGA);
136 }
137 
138 static void
139 age_attach(device_t parent, device_t self, void *aux)
140 {
141 	struct age_softc *sc = device_private(self);
142 	struct pci_attach_args *pa = aux;
143 	pci_intr_handle_t ih;
144 	const char *intrstr;
145 	struct ifnet *ifp = &sc->sc_ec.ec_if;
146 	struct mii_data * const mii = &sc->sc_miibus;
147 	pcireg_t memtype;
148 	int error = 0;
149 	char intrbuf[PCI_INTRSTR_LEN];
150 
151 	aprint_naive("\n");
152 	aprint_normal(": Attansic/Atheros L1 Gigabit Ethernet\n");
153 
154 	sc->sc_dev = self;
155 	sc->sc_pct = pa->pa_pc;
156 	sc->sc_pcitag = pa->pa_tag;
157 
158 	if (pci_dma64_available(pa))
159 		sc->sc_dmat = pa->pa_dmat64;
160 	else
161 		sc->sc_dmat = pa->pa_dmat;
162 
163 	/*
164 	 * Allocate IO memory
165 	 */
166 	memtype = pci_mapreg_type(sc->sc_pct, sc->sc_pcitag, AGE_PCIR_BAR);
167 	switch (memtype) {
168 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
169 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M:
170 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
171 		break;
172 	default:
173 		aprint_error_dev(self, "invalid base address register\n");
174 		break;
175 	}
176 
177 	if (pci_mapreg_map(pa, AGE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
178 	    &sc->sc_mem_bh, NULL, &sc->sc_mem_size) != 0) {
179 		aprint_error_dev(self, "could not map mem space\n");
180 		return;
181 	}
182 
183 	if (pci_intr_map(pa, &ih) != 0) {
184 		aprint_error_dev(self, "could not map interrupt\n");
185 		goto fail;
186 	}
187 
188 	/*
189 	 * Allocate IRQ
190 	 */
191 	intrstr = pci_intr_string(sc->sc_pct, ih, intrbuf, sizeof(intrbuf));
192 	sc->sc_irq_handle = pci_intr_establish_xname(sc->sc_pct, ih, IPL_NET,
193 	    age_intr, sc, device_xname(self));
194 	if (sc->sc_irq_handle == NULL) {
195 		aprint_error_dev(self, "could not establish interrupt");
196 		if (intrstr != NULL)
197 			aprint_error(" at %s", intrstr);
198 		aprint_error("\n");
199 		goto fail;
200 	}
201 	aprint_normal_dev(self, "%s\n", intrstr);
202 
203 	/* Set PHY address. */
204 	sc->age_phyaddr = AGE_PHY_ADDR;
205 
206 	/* Reset PHY. */
207 	age_phy_reset(sc);
208 
209 	/* Reset the ethernet controller. */
210 	age_reset(sc);
211 
212 	/* Get PCI and chip id/revision. */
213 	sc->age_rev = PCI_REVISION(pa->pa_class);
214 	sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >>
215 	    MASTER_CHIP_REV_SHIFT;
216 
217 	aprint_debug_dev(self, "PCI device revision : 0x%04x\n", sc->age_rev);
218 	aprint_debug_dev(self, "Chip id/revision : 0x%04x\n", sc->age_chip_rev);
219 
220 	if (agedebug) {
221 		aprint_debug_dev(self, "%d Tx FIFO, %d Rx FIFO\n",
222 		    CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN),
223 		    CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN));
224 	}
225 
226 	/* Set max allowable DMA size. */
227 	sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128;
228 	sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128;
229 
230 	/* Allocate DMA stuffs */
231 	error = age_dma_alloc(sc);
232 	if (error)
233 		goto fail;
234 
235 	callout_init(&sc->sc_tick_ch, 0);
236 	callout_setfunc(&sc->sc_tick_ch, age_tick, sc);
237 
238 	/* Load station address. */
239 	age_get_macaddr(sc, sc->sc_enaddr);
240 
241 	aprint_normal_dev(self, "Ethernet address %s\n",
242 	    ether_sprintf(sc->sc_enaddr));
243 
244 	ifp->if_softc = sc;
245 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
246 	ifp->if_init = age_init;
247 	ifp->if_ioctl = age_ioctl;
248 	ifp->if_start = age_start;
249 	ifp->if_stop = age_stop;
250 	ifp->if_watchdog = age_watchdog;
251 	ifp->if_baudrate = IF_Gbps(1);
252 	IFQ_SET_MAXLEN(&ifp->if_snd, AGE_TX_RING_CNT - 1);
253 	IFQ_SET_READY(&ifp->if_snd);
254 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
255 
256 	sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU;
257 
258 	ifp->if_capabilities |= IFCAP_CSUM_IPv4_Rx |
259 				IFCAP_CSUM_TCPv4_Rx |
260 				IFCAP_CSUM_UDPv4_Rx;
261 #ifdef AGE_CHECKSUM
262 	ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx |
263 				IFCAP_CSUM_TCPv4_Tx |
264 				IFCAP_CSUM_UDPv4_Tx;
265 #endif
266 
267 #if NVLAN > 0
268 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
269 	sc->sc_ec.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
270 #endif
271 
272 	/* Set up MII bus. */
273 	mii->mii_ifp = ifp;
274 	mii->mii_readreg = age_miibus_readreg;
275 	mii->mii_writereg = age_miibus_writereg;
276 	mii->mii_statchg = age_miibus_statchg;
277 
278 	sc->sc_ec.ec_mii = mii;
279 	ifmedia_init(&mii->mii_media, 0, age_mediachange, age_mediastatus);
280 	mii_attach(self, mii, 0xffffffff, MII_PHY_ANY,
281 	   MII_OFFSET_ANY, MIIF_DOPAUSE);
282 
283 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
284 		aprint_error_dev(self, "no PHY found!\n");
285 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
286 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
287 	} else
288 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
289 
290 	if_attach(ifp);
291 	if_deferred_start_init(ifp, NULL);
292 	ether_ifattach(ifp, sc->sc_enaddr);
293 
294 	if (pmf_device_register1(self, NULL, age_resume, age_shutdown))
295 		pmf_class_network_register(self, ifp);
296 	else
297 		aprint_error_dev(self, "couldn't establish power handler\n");
298 
299 	return;
300 
301 fail:
302 	age_dma_free(sc);
303 	if (sc->sc_irq_handle != NULL) {
304 		pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
305 		sc->sc_irq_handle = NULL;
306 	}
307 	if (sc->sc_mem_size) {
308 		bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
309 		sc->sc_mem_size = 0;
310 	}
311 }
312 
313 static int
314 age_detach(device_t self, int flags)
315 {
316 	struct age_softc *sc = device_private(self);
317 	struct ifnet *ifp = &sc->sc_ec.ec_if;
318 	int s;
319 
320 	pmf_device_deregister(self);
321 	s = splnet();
322 	age_stop(ifp, 0);
323 	splx(s);
324 
325 	mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
326 
327 	ether_ifdetach(ifp);
328 	if_detach(ifp);
329 	age_dma_free(sc);
330 
331 	/* Delete all remaining media. */
332 	ifmedia_fini(&sc->sc_miibus.mii_media);
333 
334 	if (sc->sc_irq_handle != NULL) {
335 		pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
336 		sc->sc_irq_handle = NULL;
337 	}
338 	if (sc->sc_mem_size) {
339 		bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
340 		sc->sc_mem_size = 0;
341 	}
342 	return 0;
343 }
344 
345 /*
346  *	Read a PHY register on the MII of the L1.
347  */
348 static int
349 age_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val)
350 {
351 	struct age_softc *sc = device_private(dev);
352 	uint32_t v;
353 	int i;
354 
355 	if (phy != sc->age_phyaddr)
356 		return -1;
357 
358 	CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
359 	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
360 	for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
361 		DELAY(1);
362 		v = CSR_READ_4(sc, AGE_MDIO);
363 		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
364 			break;
365 	}
366 
367 	if (i == 0) {
368 		printf("%s: phy read timeout: phy %d, reg %d\n",
369 			device_xname(sc->sc_dev), phy, reg);
370 		return ETIMEDOUT;
371 	}
372 
373 	*val = (v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT;
374 	return 0;
375 }
376 
377 /*
378  *	Write a PHY register on the MII of the L1.
379  */
380 static int
381 age_miibus_writereg(device_t dev, int phy, int reg, uint16_t val)
382 {
383 	struct age_softc *sc = device_private(dev);
384 	uint32_t v;
385 	int i;
386 
387 	if (phy != sc->age_phyaddr)
388 		return -1;
389 
390 	CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
391 	    (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
392 	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
393 
394 	for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
395 		DELAY(1);
396 		v = CSR_READ_4(sc, AGE_MDIO);
397 		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
398 			break;
399 	}
400 
401 	if (i == 0) {
402 		printf("%s: phy write timeout: phy %d, reg %d\n",
403 		    device_xname(sc->sc_dev), phy, reg);
404 		return ETIMEDOUT;
405 	}
406 
407 	return 0;
408 }
409 
410 /*
411  *	Callback from MII layer when media changes.
412  */
413 static void
414 age_miibus_statchg(struct ifnet *ifp)
415 {
416 	struct age_softc *sc = ifp->if_softc;
417 	struct mii_data *mii = &sc->sc_miibus;
418 
419 	if ((ifp->if_flags & IFF_RUNNING) == 0)
420 		return;
421 
422 	sc->age_flags &= ~AGE_FLAG_LINK;
423 	if ((mii->mii_media_status & IFM_AVALID) != 0) {
424 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
425 		case IFM_10_T:
426 		case IFM_100_TX:
427 		case IFM_1000_T:
428 			sc->age_flags |= AGE_FLAG_LINK;
429 			break;
430 		default:
431 			break;
432 		}
433 	}
434 
435 	/* Stop Rx/Tx MACs. */
436 	age_stop_rxmac(sc);
437 	age_stop_txmac(sc);
438 
439 	/* Program MACs with resolved speed/duplex/flow-control. */
440 	if ((sc->age_flags & AGE_FLAG_LINK) != 0) {
441 		uint32_t reg;
442 
443 		age_mac_config(sc);
444 		reg = CSR_READ_4(sc, AGE_MAC_CFG);
445 		/* Restart DMA engine and Tx/Rx MAC. */
446 		CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) |
447 		    DMA_CFG_RD_ENB | DMA_CFG_WR_ENB);
448 		reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
449 		CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
450 	}
451 }
452 
453 /*
454  *	Get the current interface media status.
455  */
456 static void
457 age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
458 {
459 	struct age_softc *sc = ifp->if_softc;
460 	struct mii_data *mii = &sc->sc_miibus;
461 
462 	mii_pollstat(mii);
463 	ifmr->ifm_status = mii->mii_media_status;
464 	ifmr->ifm_active = mii->mii_media_active;
465 }
466 
467 /*
468  *	Set hardware to newly-selected media.
469  */
470 static int
471 age_mediachange(struct ifnet *ifp)
472 {
473 	struct age_softc *sc = ifp->if_softc;
474 	struct mii_data *mii = &sc->sc_miibus;
475 	int error;
476 
477 	if (mii->mii_instance != 0) {
478 		struct mii_softc *miisc;
479 
480 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
481 			mii_phy_reset(miisc);
482 	}
483 	error = mii_mediachg(mii);
484 
485 	return error;
486 }
487 
488 static int
489 age_intr(void *arg)
490 {
491 	struct age_softc *sc = arg;
492 	struct ifnet *ifp = &sc->sc_ec.ec_if;
493 	struct cmb *cmb;
494 	uint32_t status;
495 
496 	status = CSR_READ_4(sc, AGE_INTR_STATUS);
497 	if (status == 0 || (status & AGE_INTRS) == 0)
498 		return 0;
499 
500 	cmb = sc->age_rdata.age_cmb_block;
501 	if (cmb == NULL) {
502 		/* Happens when bringing up the interface
503 		 * w/o having a carrier. Ack the interrupt.
504 		 */
505 		CSR_WRITE_4(sc, AGE_INTR_STATUS, status);
506 		return 0;
507 	}
508 
509 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
510 	    sc->age_cdata.age_cmb_block_map->dm_mapsize,
511 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
512 	status = le32toh(cmb->intr_status);
513 	/* ACK/reenable interrupts */
514 	CSR_WRITE_4(sc, AGE_INTR_STATUS, status);
515 	while ((status & AGE_INTRS) != 0) {
516 		sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >>
517 		    TPD_CONS_SHIFT;
518 		sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >>
519 		    RRD_PROD_SHIFT;
520 
521 		/* Let hardware know CMB was served. */
522 		cmb->intr_status = 0;
523 		bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
524 		    sc->age_cdata.age_cmb_block_map->dm_mapsize,
525 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
526 
527 		if (ifp->if_flags & IFF_RUNNING) {
528 			if (status & INTR_CMB_RX)
529 				age_rxintr(sc, sc->age_rr_prod);
530 
531 			if (status & INTR_CMB_TX)
532 				age_txintr(sc, sc->age_tpd_cons);
533 
534 			if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) {
535 				if (status & INTR_DMA_RD_TO_RST)
536 					printf("%s: DMA read error! -- "
537 					    "resetting\n",
538 					    device_xname(sc->sc_dev));
539 				if (status & INTR_DMA_WR_TO_RST)
540 					printf("%s: DMA write error! -- "
541 					    "resetting\n",
542 					    device_xname(sc->sc_dev));
543 				age_init(ifp);
544 			}
545 
546 			if_schedule_deferred_start(ifp);
547 
548 			if (status & INTR_SMB)
549 				age_stats_update(sc);
550 		}
551 		/* check if more interrupts did came in */
552 		bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
553 		    sc->age_cdata.age_cmb_block_map->dm_mapsize,
554 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
555 		status = le32toh(cmb->intr_status);
556 	}
557 
558 	return 1;
559 }
560 
561 static void
562 age_get_macaddr(struct age_softc *sc, uint8_t eaddr[])
563 {
564 	uint32_t ea[2], reg;
565 	int i, vpdc;
566 
567 	reg = CSR_READ_4(sc, AGE_SPI_CTRL);
568 	if ((reg & SPI_VPD_ENB) != 0) {
569 		/* Get VPD stored in TWSI EEPROM. */
570 		reg &= ~SPI_VPD_ENB;
571 		CSR_WRITE_4(sc, AGE_SPI_CTRL, reg);
572 	}
573 
574 	if (pci_get_capability(sc->sc_pct, sc->sc_pcitag,
575 	    PCI_CAP_VPD, &vpdc, NULL)) {
576 		/*
577 		 * PCI VPD capability found, let TWSI reload EEPROM.
578 		 * This will set Ethernet address of controller.
579 		 */
580 		CSR_WRITE_4(sc, AGE_TWSI_CTRL, CSR_READ_4(sc, AGE_TWSI_CTRL) |
581 		    TWSI_CTRL_SW_LD_START);
582 		for (i = 100; i > 0; i--) {
583 			DELAY(1000);
584 			reg = CSR_READ_4(sc, AGE_TWSI_CTRL);
585 			if ((reg & TWSI_CTRL_SW_LD_START) == 0)
586 				break;
587 		}
588 		if (i == 0)
589 			printf("%s: reloading EEPROM timeout!\n",
590 			    device_xname(sc->sc_dev));
591 	} else {
592 		if (agedebug)
593 			printf("%s: PCI VPD capability not found!\n",
594 			    device_xname(sc->sc_dev));
595 	}
596 
597 	ea[0] = CSR_READ_4(sc, AGE_PAR0);
598 	ea[1] = CSR_READ_4(sc, AGE_PAR1);
599 
600 	eaddr[0] = (ea[1] >> 8) & 0xFF;
601 	eaddr[1] = (ea[1] >> 0) & 0xFF;
602 	eaddr[2] = (ea[0] >> 24) & 0xFF;
603 	eaddr[3] = (ea[0] >> 16) & 0xFF;
604 	eaddr[4] = (ea[0] >> 8) & 0xFF;
605 	eaddr[5] = (ea[0] >> 0) & 0xFF;
606 }
607 
608 static void
609 age_phy_reset(struct age_softc *sc)
610 {
611 	uint16_t reg, pn;
612 	int i, linkup;
613 
614 	/* Reset PHY. */
615 	CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST);
616 	DELAY(2000);
617 	CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR);
618 	DELAY(2000);
619 
620 #define ATPHY_DBG_ADDR		0x1D
621 #define ATPHY_DBG_DATA		0x1E
622 #define ATPHY_CDTC		0x16
623 #define PHY_CDTC_ENB		0x0001
624 #define PHY_CDTC_POFF		8
625 #define ATPHY_CDTS		0x1C
626 #define PHY_CDTS_STAT_OK	0x0000
627 #define PHY_CDTS_STAT_SHORT	0x0100
628 #define PHY_CDTS_STAT_OPEN	0x0200
629 #define PHY_CDTS_STAT_INVAL	0x0300
630 #define PHY_CDTS_STAT_MASK	0x0300
631 
632 	/* Check power saving mode. Magic from Linux. */
633 	age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET);
634 	for (linkup = 0, pn = 0; pn < 4; pn++) {
635 		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, ATPHY_CDTC,
636 		    (pn << PHY_CDTC_POFF) | PHY_CDTC_ENB);
637 		for (i = 200; i > 0; i--) {
638 			DELAY(1000);
639 			age_miibus_readreg(sc->sc_dev, sc->age_phyaddr,
640 			    ATPHY_CDTC, &reg);
641 			if ((reg & PHY_CDTC_ENB) == 0)
642 				break;
643 		}
644 		DELAY(1000);
645 		age_miibus_readreg(sc->sc_dev, sc->age_phyaddr,
646 		    ATPHY_CDTS, &reg);
647 		if ((reg & PHY_CDTS_STAT_MASK) != PHY_CDTS_STAT_OPEN) {
648 			linkup++;
649 			break;
650 		}
651 	}
652 	age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR,
653 	    BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
654 	if (linkup == 0) {
655 		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
656 		    ATPHY_DBG_ADDR, 0);
657 		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
658 		    ATPHY_DBG_DATA, 0x124E);
659 		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
660 		    ATPHY_DBG_ADDR, 1);
661 		age_miibus_readreg(sc->sc_dev, sc->age_phyaddr,
662 		    ATPHY_DBG_DATA, &reg);
663 		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
664 		    ATPHY_DBG_DATA, reg | 0x03);
665 		/* XXX */
666 		DELAY(1500 * 1000);
667 		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
668 		    ATPHY_DBG_ADDR, 0);
669 		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
670 		    ATPHY_DBG_DATA, 0x024E);
671 	}
672 
673 #undef ATPHY_DBG_ADDR
674 #undef ATPHY_DBG_DATA
675 #undef ATPHY_CDTC
676 #undef PHY_CDTC_ENB
677 #undef PHY_CDTC_POFF
678 #undef ATPHY_CDTS
679 #undef PHY_CDTS_STAT_OK
680 #undef PHY_CDTS_STAT_SHORT
681 #undef PHY_CDTS_STAT_OPEN
682 #undef PHY_CDTS_STAT_INVAL
683 #undef PHY_CDTS_STAT_MASK
684 }
685 
686 static int
687 age_dma_alloc(struct age_softc *sc)
688 {
689 	struct age_txdesc *txd;
690 	struct age_rxdesc *rxd;
691 	int nsegs, error, i;
692 
693 	/*
694 	 * Create DMA stuffs for TX ring
695 	 */
696 	error = bus_dmamap_create(sc->sc_dmat, AGE_TX_RING_SZ, 1,
697 	    AGE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_tx_ring_map);
698 	if (error) {
699 		sc->age_cdata.age_tx_ring_map = NULL;
700 		return ENOBUFS;
701 	}
702 
703 	/* Allocate DMA'able memory for TX ring */
704 	error = bus_dmamem_alloc(sc->sc_dmat, AGE_TX_RING_SZ,
705 	    PAGE_SIZE, 0, &sc->age_rdata.age_tx_ring_seg, 1,
706 	    &nsegs, BUS_DMA_NOWAIT);
707 	if (error) {
708 		printf("%s: could not allocate DMA'able memory for Tx ring, "
709 		    "error = %i\n", device_xname(sc->sc_dev), error);
710 		return error;
711 	}
712 
713 	error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_tx_ring_seg,
714 	    nsegs, AGE_TX_RING_SZ, (void **)&sc->age_rdata.age_tx_ring,
715 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
716 	if (error)
717 		return ENOBUFS;
718 
719 	memset(sc->age_rdata.age_tx_ring, 0, AGE_TX_RING_SZ);
720 
721 	/*  Load the DMA map for Tx ring. */
722 	error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_tx_ring_map,
723 	    sc->age_rdata.age_tx_ring, AGE_TX_RING_SZ, NULL, BUS_DMA_NOWAIT);
724 	if (error) {
725 		printf("%s: could not load DMA'able memory for Tx ring, "
726 		    "error = %i\n", device_xname(sc->sc_dev), error);
727 		bus_dmamem_free(sc->sc_dmat,
728 		    &sc->age_rdata.age_tx_ring_seg, 1);
729 		return error;
730 	}
731 
732 	sc->age_rdata.age_tx_ring_paddr =
733 	    sc->age_cdata.age_tx_ring_map->dm_segs[0].ds_addr;
734 
735 	/*
736 	 * Create DMA stuffs for RX ring
737 	 */
738 	error = bus_dmamap_create(sc->sc_dmat, AGE_RX_RING_SZ, 1,
739 	    AGE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_ring_map);
740 	if (error) {
741 		sc->age_cdata.age_rx_ring_map = NULL;
742 		return ENOBUFS;
743 	}
744 
745 	/* Allocate DMA'able memory for RX ring */
746 	error = bus_dmamem_alloc(sc->sc_dmat, AGE_RX_RING_SZ,
747 	    PAGE_SIZE, 0, &sc->age_rdata.age_rx_ring_seg, 1,
748 	    &nsegs, BUS_DMA_NOWAIT);
749 	if (error) {
750 		printf("%s: could not allocate DMA'able memory for Rx ring, "
751 		    "error = %i.\n", device_xname(sc->sc_dev), error);
752 		return error;
753 	}
754 
755 	error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rx_ring_seg,
756 	    nsegs, AGE_RX_RING_SZ, (void **)&sc->age_rdata.age_rx_ring,
757 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
758 	if (error)
759 		return ENOBUFS;
760 
761 	memset(sc->age_rdata.age_rx_ring, 0, AGE_RX_RING_SZ);
762 
763 	/* Load the DMA map for Rx ring. */
764 	error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rx_ring_map,
765 	    sc->age_rdata.age_rx_ring, AGE_RX_RING_SZ, NULL, BUS_DMA_NOWAIT);
766 	if (error) {
767 		printf("%s: could not load DMA'able memory for Rx ring, "
768 		    "error = %i.\n", device_xname(sc->sc_dev), error);
769 		bus_dmamem_free(sc->sc_dmat,
770 		    &sc->age_rdata.age_rx_ring_seg, 1);
771 		return error;
772 	}
773 
774 	sc->age_rdata.age_rx_ring_paddr =
775 	    sc->age_cdata.age_rx_ring_map->dm_segs[0].ds_addr;
776 
777 	/*
778 	 * Create DMA stuffs for RX return ring
779 	 */
780 	error = bus_dmamap_create(sc->sc_dmat, AGE_RR_RING_SZ, 1,
781 	    AGE_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rr_ring_map);
782 	if (error) {
783 		sc->age_cdata.age_rr_ring_map = NULL;
784 		return ENOBUFS;
785 	}
786 
787 	/* Allocate DMA'able memory for RX return ring */
788 	error = bus_dmamem_alloc(sc->sc_dmat, AGE_RR_RING_SZ,
789 	    PAGE_SIZE, 0, &sc->age_rdata.age_rr_ring_seg, 1,
790 	    &nsegs, BUS_DMA_NOWAIT);
791 	if (error) {
792 		printf("%s: could not allocate DMA'able memory for Rx "
793 		    "return ring, error = %i.\n",
794 		    device_xname(sc->sc_dev), error);
795 		return error;
796 	}
797 
798 	error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rr_ring_seg,
799 	    nsegs, AGE_RR_RING_SZ, (void **)&sc->age_rdata.age_rr_ring,
800 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
801 	if (error)
802 		return ENOBUFS;
803 
804 	memset(sc->age_rdata.age_rr_ring, 0, AGE_RR_RING_SZ);
805 
806 	/*  Load the DMA map for Rx return ring. */
807 	error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rr_ring_map,
808 	    sc->age_rdata.age_rr_ring, AGE_RR_RING_SZ, NULL, BUS_DMA_NOWAIT);
809 	if (error) {
810 		printf("%s: could not load DMA'able memory for Rx return ring, "
811 		    "error = %i\n", device_xname(sc->sc_dev), error);
812 		bus_dmamem_free(sc->sc_dmat,
813 		    &sc->age_rdata.age_rr_ring_seg, 1);
814 		return error;
815 	}
816 
817 	sc->age_rdata.age_rr_ring_paddr =
818 	    sc->age_cdata.age_rr_ring_map->dm_segs[0].ds_addr;
819 
820 	/*
821 	 * Create DMA stuffs for CMB block
822 	 */
823 	error = bus_dmamap_create(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 1,
824 	    AGE_CMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT,
825 	    &sc->age_cdata.age_cmb_block_map);
826 	if (error) {
827 		sc->age_cdata.age_cmb_block_map = NULL;
828 		return ENOBUFS;
829 	}
830 
831 	/* Allocate DMA'able memory for CMB block */
832 	error = bus_dmamem_alloc(sc->sc_dmat, AGE_CMB_BLOCK_SZ,
833 	    PAGE_SIZE, 0, &sc->age_rdata.age_cmb_block_seg, 1,
834 	    &nsegs, BUS_DMA_NOWAIT);
835 	if (error) {
836 		printf("%s: could not allocate DMA'able memory for "
837 		    "CMB block, error = %i\n", device_xname(sc->sc_dev), error);
838 		return error;
839 	}
840 
841 	error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_cmb_block_seg,
842 	    nsegs, AGE_CMB_BLOCK_SZ, (void **)&sc->age_rdata.age_cmb_block,
843 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
844 	if (error)
845 		return ENOBUFS;
846 
847 	memset(sc->age_rdata.age_cmb_block, 0, AGE_CMB_BLOCK_SZ);
848 
849 	/*  Load the DMA map for CMB block. */
850 	error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_cmb_block_map,
851 	    sc->age_rdata.age_cmb_block, AGE_CMB_BLOCK_SZ, NULL,
852 	    BUS_DMA_NOWAIT);
853 	if (error) {
854 		printf("%s: could not load DMA'able memory for CMB block, "
855 		    "error = %i\n", device_xname(sc->sc_dev), error);
856 		bus_dmamem_free(sc->sc_dmat,
857 		    &sc->age_rdata.age_cmb_block_seg, 1);
858 		return error;
859 	}
860 
861 	sc->age_rdata.age_cmb_block_paddr =
862 	    sc->age_cdata.age_cmb_block_map->dm_segs[0].ds_addr;
863 
864 	/*
865 	 * Create DMA stuffs for SMB block
866 	 */
867 	error = bus_dmamap_create(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 1,
868 	    AGE_SMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT,
869 	    &sc->age_cdata.age_smb_block_map);
870 	if (error) {
871 		sc->age_cdata.age_smb_block_map = NULL;
872 		return ENOBUFS;
873 	}
874 
875 	/* Allocate DMA'able memory for SMB block */
876 	error = bus_dmamem_alloc(sc->sc_dmat, AGE_SMB_BLOCK_SZ,
877 	    PAGE_SIZE, 0, &sc->age_rdata.age_smb_block_seg, 1,
878 	    &nsegs, BUS_DMA_NOWAIT);
879 	if (error) {
880 		printf("%s: could not allocate DMA'able memory for "
881 		    "SMB block, error = %i\n", device_xname(sc->sc_dev), error);
882 		return error;
883 	}
884 
885 	error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_smb_block_seg,
886 	    nsegs, AGE_SMB_BLOCK_SZ, (void **)&sc->age_rdata.age_smb_block,
887 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
888 	if (error)
889 		return ENOBUFS;
890 
891 	memset(sc->age_rdata.age_smb_block, 0, AGE_SMB_BLOCK_SZ);
892 
893 	/*  Load the DMA map for SMB block */
894 	error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_smb_block_map,
895 	    sc->age_rdata.age_smb_block, AGE_SMB_BLOCK_SZ, NULL,
896 	    BUS_DMA_NOWAIT);
897 	if (error) {
898 		printf("%s: could not load DMA'able memory for SMB block, "
899 		    "error = %i\n", device_xname(sc->sc_dev), error);
900 		bus_dmamem_free(sc->sc_dmat,
901 		    &sc->age_rdata.age_smb_block_seg, 1);
902 		return error;
903 	}
904 
905 	sc->age_rdata.age_smb_block_paddr =
906 	    sc->age_cdata.age_smb_block_map->dm_segs[0].ds_addr;
907 
908 	/*
909 	 * All of the memory we allocated above needs to be within
910 	 * the same 4GB segment.  Make sure this is so.
911 	 *
912 	 * XXX We don't care WHAT 4GB segment they're in, just that
913 	 * XXX they're all in the same one.  Need some bus_dma API
914 	 * XXX help to make this easier to enforce when we actually
915 	 * XXX perform the allocation.
916 	 */
917 	if (! (AGE_ADDR_HI(sc->age_rdata.age_tx_ring_paddr) ==
918 	       AGE_ADDR_HI(sc->age_rdata.age_rx_ring_paddr)
919 
920 	    && AGE_ADDR_HI(sc->age_rdata.age_tx_ring_paddr) ==
921 	       AGE_ADDR_HI(sc->age_rdata.age_rr_ring_paddr)
922 
923 	    && AGE_ADDR_HI(sc->age_rdata.age_tx_ring_paddr) ==
924 	       AGE_ADDR_HI(sc->age_rdata.age_cmb_block_paddr)
925 
926 	    && AGE_ADDR_HI(sc->age_rdata.age_tx_ring_paddr) ==
927 	       AGE_ADDR_HI(sc->age_rdata.age_smb_block_paddr))) {
928 		aprint_error_dev(sc->sc_dev,
929 		    "control data allocation constraints failed\n");
930 		return ENOBUFS;
931 	}
932 
933 	/* Create DMA maps for Tx buffers. */
934 	for (i = 0; i < AGE_TX_RING_CNT; i++) {
935 		txd = &sc->age_cdata.age_txdesc[i];
936 		txd->tx_m = NULL;
937 		txd->tx_dmamap = NULL;
938 		error = bus_dmamap_create(sc->sc_dmat, AGE_TSO_MAXSIZE,
939 		    AGE_MAXTXSEGS, AGE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
940 		    &txd->tx_dmamap);
941 		if (error) {
942 			txd->tx_dmamap = NULL;
943 			printf("%s: could not create Tx dmamap, error = %i.\n",
944 			    device_xname(sc->sc_dev), error);
945 			return error;
946 		}
947 	}
948 
949 	/* Create DMA maps for Rx buffers. */
950 	error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
951 	    BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_sparemap);
952 	if (error) {
953 		sc->age_cdata.age_rx_sparemap = NULL;
954 		printf("%s: could not create spare Rx dmamap, error = %i.\n",
955 		    device_xname(sc->sc_dev), error);
956 		return error;
957 	}
958 	for (i = 0; i < AGE_RX_RING_CNT; i++) {
959 		rxd = &sc->age_cdata.age_rxdesc[i];
960 		rxd->rx_m = NULL;
961 		rxd->rx_dmamap = NULL;
962 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
963 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
964 		if (error) {
965 			rxd->rx_dmamap = NULL;
966 			printf("%s: could not create Rx dmamap, error = %i.\n",
967 			    device_xname(sc->sc_dev), error);
968 			return error;
969 		}
970 	}
971 
972 	return 0;
973 }
974 
975 static void
976 age_dma_free(struct age_softc *sc)
977 {
978 	struct age_txdesc *txd;
979 	struct age_rxdesc *rxd;
980 	int i;
981 
982 	/* Tx buffers */
983 	for (i = 0; i < AGE_TX_RING_CNT; i++) {
984 		txd = &sc->age_cdata.age_txdesc[i];
985 		if (txd->tx_dmamap != NULL) {
986 			bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
987 			txd->tx_dmamap = NULL;
988 		}
989 	}
990 	/* Rx buffers */
991 	for (i = 0; i < AGE_RX_RING_CNT; i++) {
992 		rxd = &sc->age_cdata.age_rxdesc[i];
993 		if (rxd->rx_dmamap != NULL) {
994 			bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
995 			rxd->rx_dmamap = NULL;
996 		}
997 	}
998 	if (sc->age_cdata.age_rx_sparemap != NULL) {
999 		bus_dmamap_destroy(sc->sc_dmat, sc->age_cdata.age_rx_sparemap);
1000 		sc->age_cdata.age_rx_sparemap = NULL;
1001 	}
1002 
1003 	/* Tx ring. */
1004 	if (sc->age_cdata.age_tx_ring_map != NULL)
1005 		bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_tx_ring_map);
1006 	if (sc->age_cdata.age_tx_ring_map != NULL &&
1007 	    sc->age_rdata.age_tx_ring != NULL)
1008 		bus_dmamem_free(sc->sc_dmat,
1009 		    &sc->age_rdata.age_tx_ring_seg, 1);
1010 	sc->age_rdata.age_tx_ring = NULL;
1011 	sc->age_cdata.age_tx_ring_map = NULL;
1012 
1013 	/* Rx ring. */
1014 	if (sc->age_cdata.age_rx_ring_map != NULL)
1015 		bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rx_ring_map);
1016 	if (sc->age_cdata.age_rx_ring_map != NULL &&
1017 	    sc->age_rdata.age_rx_ring != NULL)
1018 		bus_dmamem_free(sc->sc_dmat,
1019 		    &sc->age_rdata.age_rx_ring_seg, 1);
1020 	sc->age_rdata.age_rx_ring = NULL;
1021 	sc->age_cdata.age_rx_ring_map = NULL;
1022 
1023 	/* Rx return ring. */
1024 	if (sc->age_cdata.age_rr_ring_map != NULL)
1025 		bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rr_ring_map);
1026 	if (sc->age_cdata.age_rr_ring_map != NULL &&
1027 	    sc->age_rdata.age_rr_ring != NULL)
1028 		bus_dmamem_free(sc->sc_dmat,
1029 		    &sc->age_rdata.age_rr_ring_seg, 1);
1030 	sc->age_rdata.age_rr_ring = NULL;
1031 	sc->age_cdata.age_rr_ring_map = NULL;
1032 
1033 	/* CMB block */
1034 	if (sc->age_cdata.age_cmb_block_map != NULL)
1035 		bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_cmb_block_map);
1036 	if (sc->age_cdata.age_cmb_block_map != NULL &&
1037 	    sc->age_rdata.age_cmb_block != NULL)
1038 		bus_dmamem_free(sc->sc_dmat,
1039 		    &sc->age_rdata.age_cmb_block_seg, 1);
1040 	sc->age_rdata.age_cmb_block = NULL;
1041 	sc->age_cdata.age_cmb_block_map = NULL;
1042 
1043 	/* SMB block */
1044 	if (sc->age_cdata.age_smb_block_map != NULL)
1045 		bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_smb_block_map);
1046 	if (sc->age_cdata.age_smb_block_map != NULL &&
1047 	    sc->age_rdata.age_smb_block != NULL)
1048 		bus_dmamem_free(sc->sc_dmat,
1049 		    &sc->age_rdata.age_smb_block_seg, 1);
1050 	sc->age_rdata.age_smb_block = NULL;
1051 	sc->age_cdata.age_smb_block_map = NULL;
1052 }
1053 
1054 static void
1055 age_start(struct ifnet *ifp)
1056 {
1057 	struct age_softc *sc = ifp->if_softc;
1058 	struct mbuf *m_head;
1059 	int enq;
1060 
1061 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1062 		return;
1063 	if ((sc->age_flags & AGE_FLAG_LINK) == 0)
1064 		return;
1065 	if (IFQ_IS_EMPTY(&ifp->if_snd))
1066 		return;
1067 
1068 	enq = 0;
1069 	for (;;) {
1070 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1071 		if (m_head == NULL)
1072 			break;
1073 
1074 		/*
1075 		 * Pack the data into the transmit ring. If we
1076 		 * don't have room, set the OACTIVE flag and wait
1077 		 * for the NIC to drain the ring.
1078 		 */
1079 		if (age_encap(sc, &m_head)) {
1080 			if (m_head == NULL)
1081 				break;
1082 			IF_PREPEND(&ifp->if_snd, m_head);
1083 			ifp->if_flags |= IFF_OACTIVE;
1084 			break;
1085 		}
1086 		enq = 1;
1087 
1088 		/*
1089 		 * If there's a BPF listener, bounce a copy of this frame
1090 		 * to him.
1091 		 */
1092 		bpf_mtap(ifp, m_head, BPF_D_OUT);
1093 	}
1094 
1095 	if (enq) {
1096 		/* Update mbox. */
1097 		AGE_COMMIT_MBOX(sc);
1098 		/* Set a timeout in case the chip goes out to lunch. */
1099 		ifp->if_timer = AGE_TX_TIMEOUT;
1100 	}
1101 }
1102 
1103 static void
1104 age_watchdog(struct ifnet *ifp)
1105 {
1106 	struct age_softc *sc = ifp->if_softc;
1107 
1108 	if ((sc->age_flags & AGE_FLAG_LINK) == 0) {
1109 		printf("%s: watchdog timeout (missed link)\n",
1110 		    device_xname(sc->sc_dev));
1111 		if_statinc(ifp, if_oerrors);
1112 		age_init(ifp);
1113 		return;
1114 	}
1115 
1116 	if (sc->age_cdata.age_tx_cnt == 0) {
1117 		printf("%s: watchdog timeout (missed Tx interrupts) "
1118 		    "-- recovering\n", device_xname(sc->sc_dev));
1119 		age_start(ifp);
1120 		return;
1121 	}
1122 
1123 	printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
1124 	if_statinc(ifp, if_oerrors);
1125 	age_init(ifp);
1126 	age_start(ifp);
1127 }
1128 
1129 static bool
1130 age_shutdown(device_t self, int howto)
1131 {
1132 	struct age_softc *sc;
1133 	struct ifnet *ifp;
1134 
1135 	sc = device_private(self);
1136 	ifp = &sc->sc_ec.ec_if;
1137 	age_stop(ifp, 1);
1138 
1139 	return true;
1140 }
1141 
1142 static int
1143 age_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1144 {
1145 	struct age_softc *sc = ifp->if_softc;
1146 	int s, error;
1147 
1148 	s = splnet();
1149 
1150 	error = ether_ioctl(ifp, cmd, data);
1151 	if (error == ENETRESET) {
1152 		if (ifp->if_flags & IFF_RUNNING)
1153 			age_rxfilter(sc);
1154 		error = 0;
1155 	}
1156 
1157 	splx(s);
1158 	return error;
1159 }
1160 
1161 static void
1162 age_mac_config(struct age_softc *sc)
1163 {
1164 	struct mii_data *mii;
1165 	uint32_t reg;
1166 
1167 	mii = &sc->sc_miibus;
1168 
1169 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
1170 	reg &= ~MAC_CFG_FULL_DUPLEX;
1171 	reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC);
1172 	reg &= ~MAC_CFG_SPEED_MASK;
1173 
1174 	/* Reprogram MAC with resolved speed/duplex. */
1175 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1176 	case IFM_10_T:
1177 	case IFM_100_TX:
1178 		reg |= MAC_CFG_SPEED_10_100;
1179 		break;
1180 	case IFM_1000_T:
1181 		reg |= MAC_CFG_SPEED_1000;
1182 		break;
1183 	}
1184 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1185 		reg |= MAC_CFG_FULL_DUPLEX;
1186 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1187 			reg |= MAC_CFG_TX_FC;
1188 		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1189 			reg |= MAC_CFG_RX_FC;
1190 	}
1191 
1192 	CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
1193 }
1194 
1195 static bool
1196 age_resume(device_t dv, const pmf_qual_t *qual)
1197 {
1198 	struct age_softc *sc = device_private(dv);
1199 	uint16_t cmd;
1200 
1201 	/*
1202 	 * Clear INTx emulation disable for hardware that
1203 	 * is set in resume event. From Linux.
1204 	 */
1205 	cmd = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
1206 	if ((cmd & PCI_COMMAND_INTERRUPT_DISABLE) != 0) {
1207 		cmd &= ~PCI_COMMAND_INTERRUPT_DISABLE;
1208 		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
1209 		    PCI_COMMAND_STATUS_REG, cmd);
1210 	}
1211 
1212 	return true;
1213 }
1214 
1215 static int
1216 age_encap(struct age_softc *sc, struct mbuf **m_head)
1217 {
1218 	struct age_txdesc *txd, *txd_last;
1219 	struct tx_desc *desc;
1220 	struct mbuf *m;
1221 	bus_dmamap_t map;
1222 	uint32_t cflags, poff, vtag;
1223 	int error, i, nsegs, prod;
1224 
1225 	m = *m_head;
1226 	cflags = vtag = 0;
1227 	poff = 0;
1228 
1229 	prod = sc->age_cdata.age_tx_prod;
1230 	txd = &sc->age_cdata.age_txdesc[prod];
1231 	txd_last = txd;
1232 	map = txd->tx_dmamap;
1233 
1234 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT);
1235 
1236 	if (error == EFBIG) {
1237 		error = 0;
1238 
1239 		*m_head = m_pullup(*m_head, MHLEN);
1240 		if (*m_head == NULL) {
1241 			printf("%s: can't defrag TX mbuf\n",
1242 			    device_xname(sc->sc_dev));
1243 			return ENOBUFS;
1244 		}
1245 
1246 		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head,
1247 			    BUS_DMA_NOWAIT);
1248 
1249 		if (error != 0) {
1250 			printf("%s: could not load defragged TX mbuf\n",
1251 			    device_xname(sc->sc_dev));
1252 			m_freem(*m_head);
1253 			*m_head = NULL;
1254 			return error;
1255 		}
1256 	} else if (error) {
1257 		printf("%s: could not load TX mbuf\n", device_xname(sc->sc_dev));
1258 		return error;
1259 	}
1260 
1261 	nsegs = map->dm_nsegs;
1262 
1263 	if (nsegs == 0) {
1264 		m_freem(*m_head);
1265 		*m_head = NULL;
1266 		return EIO;
1267 	}
1268 
1269 	/* Check descriptor overrun. */
1270 	if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) {
1271 		bus_dmamap_unload(sc->sc_dmat, map);
1272 		return ENOBUFS;
1273 	}
1274 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1275 	    BUS_DMASYNC_PREWRITE);
1276 
1277 	m = *m_head;
1278 	/* Configure Tx IP/TCP/UDP checksum offload. */
1279 	if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) {
1280 		cflags |= AGE_TD_CSUM;
1281 		if ((m->m_pkthdr.csum_flags & M_CSUM_TCPv4) != 0)
1282 			cflags |= AGE_TD_TCPCSUM;
1283 		if ((m->m_pkthdr.csum_flags & M_CSUM_UDPv4) != 0)
1284 			cflags |= AGE_TD_UDPCSUM;
1285 		/* Set checksum start offset. */
1286 		cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT);
1287 	}
1288 
1289 #if NVLAN > 0
1290 	/* Configure VLAN hardware tag insertion. */
1291 	if (vlan_has_tag(m)) {
1292 		vtag = AGE_TX_VLAN_TAG(htons(vlan_get_tag(m)));
1293 		vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK);
1294 		cflags |= AGE_TD_INSERT_VLAN_TAG;
1295 	}
1296 #endif
1297 
1298 	desc = NULL;
1299 	KASSERT(nsegs > 0);
1300 	for (i = 0; ; i++) {
1301 		desc = &sc->age_rdata.age_tx_ring[prod];
1302 		desc->addr = htole64(map->dm_segs[i].ds_addr);
1303 		desc->len =
1304 		    htole32(AGE_TX_BYTES(map->dm_segs[i].ds_len) | vtag);
1305 		desc->flags = htole32(cflags);
1306 		sc->age_cdata.age_tx_cnt++;
1307 		if (i == (nsegs - 1))
1308 			break;
1309 
1310 		/* Sync this descriptor and go to the next one */
1311 		bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map,
1312 		    prod * sizeof(struct tx_desc), sizeof(struct tx_desc),
1313 		    BUS_DMASYNC_PREWRITE);
1314 		AGE_DESC_INC(prod, AGE_TX_RING_CNT);
1315 	}
1316 
1317 	/* Set EOP on the last descriptor and sync it. */
1318 	desc->flags |= htole32(AGE_TD_EOP);
1319 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map,
1320 	    prod * sizeof(struct tx_desc), sizeof(struct tx_desc),
1321 	    BUS_DMASYNC_PREWRITE);
1322 
1323 	if (nsegs > 1) {
1324 		/* Swap dmamap of the first and the last. */
1325 		txd = &sc->age_cdata.age_txdesc[prod];
1326 		map = txd_last->tx_dmamap;
1327 		txd_last->tx_dmamap = txd->tx_dmamap;
1328 		txd->tx_dmamap = map;
1329 		txd->tx_m = m;
1330 		KASSERT(txd_last->tx_m == NULL);
1331 	} else {
1332 		KASSERT(txd_last == &sc->age_cdata.age_txdesc[prod]);
1333 		txd_last->tx_m = m;
1334 	}
1335 
1336 	/* Update producer index. */
1337 	AGE_DESC_INC(prod, AGE_TX_RING_CNT);
1338 	sc->age_cdata.age_tx_prod = prod;
1339 
1340 	return 0;
1341 }
1342 
1343 static void
1344 age_txintr(struct age_softc *sc, int tpd_cons)
1345 {
1346 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1347 	struct age_txdesc *txd;
1348 	int cons, prog;
1349 
1350 	if (sc->age_cdata.age_tx_cnt <= 0) {
1351 		if (ifp->if_timer != 0)
1352 			printf("timer running without packets\n");
1353 		if (sc->age_cdata.age_tx_cnt)
1354 			printf("age_tx_cnt corrupted\n");
1355 	}
1356 
1357 	/*
1358 	 * Go through our Tx list and free mbufs for those
1359 	 * frames which have been transmitted.
1360 	 */
1361 	cons = sc->age_cdata.age_tx_cons;
1362 	for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) {
1363 		if (sc->age_cdata.age_tx_cnt <= 0)
1364 			break;
1365 		prog++;
1366 		ifp->if_flags &= ~IFF_OACTIVE;
1367 		sc->age_cdata.age_tx_cnt--;
1368 		txd = &sc->age_cdata.age_txdesc[cons];
1369 		/*
1370 		 * Clear Tx descriptors, it's not required but would
1371 		 * help debugging in case of Tx issues.
1372 		 */
1373 		bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map,
1374 		    cons * sizeof(struct tx_desc), sizeof(struct tx_desc),
1375 		    BUS_DMASYNC_POSTWRITE);
1376 		txd->tx_desc->addr = 0;
1377 		txd->tx_desc->len = 0;
1378 		txd->tx_desc->flags = 0;
1379 
1380 		if (txd->tx_m == NULL)
1381 			continue;
1382 		/* Reclaim transmitted mbufs. */
1383 		bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1384 		m_freem(txd->tx_m);
1385 		txd->tx_m = NULL;
1386 	}
1387 
1388 	if (prog > 0) {
1389 		sc->age_cdata.age_tx_cons = cons;
1390 
1391 		/*
1392 		 * Unarm watchdog timer only when there are no pending
1393 		 * Tx descriptors in queue.
1394 		 */
1395 		if (sc->age_cdata.age_tx_cnt == 0)
1396 			ifp->if_timer = 0;
1397 	}
1398 }
1399 
1400 /* Receive a frame. */
1401 static void
1402 age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd)
1403 {
1404 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1405 	struct age_rxdesc *rxd;
1406 	struct rx_desc *desc;
1407 	struct mbuf *mp, *m;
1408 	uint32_t status, index;
1409 	int count, nsegs, pktlen;
1410 	int rx_cons;
1411 
1412 	status = le32toh(rxrd->flags);
1413 	index = le32toh(rxrd->index);
1414 	rx_cons = AGE_RX_CONS(index);
1415 	nsegs = AGE_RX_NSEGS(index);
1416 
1417 	sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len));
1418 	if ((status & AGE_RRD_ERROR) != 0 &&
1419 	    (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE |
1420 	    AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) {
1421 		/*
1422 		 * We want to pass the following frames to upper
1423 		 * layer regardless of error status of Rx return
1424 		 * ring.
1425 		 *
1426 		 *  o IP/TCP/UDP checksum is bad.
1427 		 *  o frame length and protocol specific length
1428 		 *     does not match.
1429 		 */
1430 		sc->age_cdata.age_rx_cons += nsegs;
1431 		sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
1432 		return;
1433 	}
1434 
1435 	pktlen = 0;
1436 	for (count = 0; count < nsegs; count++,
1437 	    AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) {
1438 		rxd = &sc->age_cdata.age_rxdesc[rx_cons];
1439 		mp = rxd->rx_m;
1440 		desc = rxd->rx_desc;
1441 		/* Add a new receive buffer to the ring. */
1442 		if (age_newbuf(sc, rxd, 0) != 0) {
1443 			if_statinc(ifp, if_iqdrops);
1444 			/* Reuse Rx buffers. */
1445 			if (sc->age_cdata.age_rxhead != NULL) {
1446 				m_freem(sc->age_cdata.age_rxhead);
1447 				AGE_RXCHAIN_RESET(sc);
1448 			}
1449 			break;
1450 		}
1451 
1452 		/* The length of the first mbuf is computed last. */
1453 		if (count != 0) {
1454 			mp->m_len = AGE_RX_BYTES(le32toh(desc->len));
1455 			pktlen += mp->m_len;
1456 		}
1457 
1458 		/* Chain received mbufs. */
1459 		if (sc->age_cdata.age_rxhead == NULL) {
1460 			sc->age_cdata.age_rxhead = mp;
1461 			sc->age_cdata.age_rxtail = mp;
1462 		} else {
1463 			m_remove_pkthdr(mp);
1464 			sc->age_cdata.age_rxprev_tail =
1465 			    sc->age_cdata.age_rxtail;
1466 			sc->age_cdata.age_rxtail->m_next = mp;
1467 			sc->age_cdata.age_rxtail = mp;
1468 		}
1469 
1470 		if (count == nsegs - 1) {
1471 			/*
1472 			 * It seems that L1 controller has no way
1473 			 * to tell hardware to strip CRC bytes.
1474 			 */
1475 			sc->age_cdata.age_rxlen -= ETHER_CRC_LEN;
1476 			if (nsegs > 1) {
1477 				/* Remove the CRC bytes in chained mbufs. */
1478 				pktlen -= ETHER_CRC_LEN;
1479 				if (mp->m_len <= ETHER_CRC_LEN) {
1480 					sc->age_cdata.age_rxtail =
1481 					    sc->age_cdata.age_rxprev_tail;
1482 					sc->age_cdata.age_rxtail->m_len -=
1483 					    (ETHER_CRC_LEN - mp->m_len);
1484 					sc->age_cdata.age_rxtail->m_next = NULL;
1485 					m_freem(mp);
1486 				} else {
1487 					mp->m_len -= ETHER_CRC_LEN;
1488 				}
1489 			}
1490 
1491 			m = sc->age_cdata.age_rxhead;
1492 			KASSERT(m->m_flags & M_PKTHDR);
1493 			m_set_rcvif(m, ifp);
1494 			m->m_pkthdr.len = sc->age_cdata.age_rxlen;
1495 			/* Set the first mbuf length. */
1496 			m->m_len = sc->age_cdata.age_rxlen - pktlen;
1497 
1498 			/*
1499 			 * Set checksum information.
1500 			 * It seems that L1 controller can compute partial
1501 			 * checksum. The partial checksum value can be used
1502 			 * to accelerate checksum computation for fragmented
1503 			 * TCP/UDP packets. Upper network stack already
1504 			 * takes advantage of the partial checksum value in
1505 			 * IP reassembly stage. But I'm not sure the
1506 			 * correctness of the partial hardware checksum
1507 			 * assistance due to lack of data sheet. If it is
1508 			 * proven to work on L1 I'll enable it.
1509 			 */
1510 			if (status & AGE_RRD_IPV4) {
1511 				if (status & AGE_RRD_IPCSUM_NOK)
1512 					m->m_pkthdr.csum_flags |=
1513 					    M_CSUM_IPv4_BAD;
1514 				if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) &&
1515 				    (status & AGE_RRD_TCP_UDPCSUM_NOK)) {
1516 					m->m_pkthdr.csum_flags |=
1517 					    M_CSUM_TCP_UDP_BAD;
1518 				}
1519 				/*
1520 				 * Don't mark bad checksum for TCP/UDP frames
1521 				 * as fragmented frames may always have set
1522 				 * bad checksummed bit of descriptor status.
1523 				 */
1524 			}
1525 #if NVLAN > 0
1526 			/* Check for VLAN tagged frames. */
1527 			if (status & AGE_RRD_VLAN) {
1528 				uint32_t vtag = AGE_RX_VLAN(le32toh(rxrd->vtags));
1529 				vlan_set_tag(m, AGE_RX_VLAN_TAG(vtag));
1530 			}
1531 #endif
1532 
1533 			/* Pass it on. */
1534 			if_percpuq_enqueue(ifp->if_percpuq, m);
1535 
1536 			/* Reset mbuf chains. */
1537 			AGE_RXCHAIN_RESET(sc);
1538 		}
1539 	}
1540 
1541 	if (count != nsegs) {
1542 		sc->age_cdata.age_rx_cons += nsegs;
1543 		sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
1544 	} else
1545 		sc->age_cdata.age_rx_cons = rx_cons;
1546 }
1547 
1548 static void
1549 age_rxintr(struct age_softc *sc, int rr_prod)
1550 {
1551 	struct rx_rdesc *rxrd;
1552 	int rr_cons, nsegs, pktlen, prog;
1553 
1554 	rr_cons = sc->age_cdata.age_rr_cons;
1555 	if (rr_cons == rr_prod)
1556 		return;
1557 
1558 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
1559 	    sc->age_cdata.age_rr_ring_map->dm_mapsize,
1560 	    BUS_DMASYNC_POSTREAD);
1561 
1562 	for (prog = 0; rr_cons != rr_prod; prog++) {
1563 		rxrd = &sc->age_rdata.age_rr_ring[rr_cons];
1564 		nsegs = AGE_RX_NSEGS(le32toh(rxrd->index));
1565 		if (nsegs == 0)
1566 			break;
1567 		/*
1568 		 * Check number of segments against received bytes
1569 		 * Non-matching value would indicate that hardware
1570 		 * is still trying to update Rx return descriptors.
1571 		 * I'm not sure whether this check is really needed.
1572 		 */
1573 		pktlen = AGE_RX_BYTES(le32toh(rxrd->len));
1574 		if (nsegs != ((pktlen + (MCLBYTES - ETHER_ALIGN - 1)) /
1575 		    (MCLBYTES - ETHER_ALIGN)))
1576 			break;
1577 
1578 		/* Received a frame. */
1579 		age_rxeof(sc, rxrd);
1580 
1581 		/* Clear return ring. */
1582 		rxrd->index = 0;
1583 		AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT);
1584 	}
1585 
1586 	if (prog > 0) {
1587 		/* Update the consumer index. */
1588 		sc->age_cdata.age_rr_cons = rr_cons;
1589 
1590 		/* Sync descriptors. */
1591 		bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
1592 		    sc->age_cdata.age_rr_ring_map->dm_mapsize,
1593 		    BUS_DMASYNC_PREWRITE);
1594 
1595 		/* Notify hardware availability of new Rx buffers. */
1596 		AGE_COMMIT_MBOX(sc);
1597 	}
1598 }
1599 
1600 static void
1601 age_tick(void *xsc)
1602 {
1603 	struct age_softc *sc = xsc;
1604 	struct mii_data *mii = &sc->sc_miibus;
1605 	int s;
1606 
1607 	s = splnet();
1608 	mii_tick(mii);
1609 	splx(s);
1610 
1611 	callout_schedule(&sc->sc_tick_ch, hz);
1612 }
1613 
1614 static void
1615 age_reset(struct age_softc *sc)
1616 {
1617 	uint32_t reg;
1618 	int i;
1619 
1620 	CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET);
1621 	CSR_READ_4(sc, AGE_MASTER_CFG);
1622 	DELAY(1000);
1623 	for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
1624 		if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
1625 			break;
1626 		DELAY(10);
1627 	}
1628 
1629 	if (i == 0)
1630 		printf("%s: reset timeout(0x%08x)!\n", device_xname(sc->sc_dev),
1631 		    reg);
1632 
1633 	/* Initialize PCIe module. From Linux. */
1634 	CSR_WRITE_4(sc, 0x12FC, 0x6500);
1635 	CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
1636 }
1637 
1638 static int
1639 age_init(struct ifnet *ifp)
1640 {
1641 	struct age_softc *sc = ifp->if_softc;
1642 	struct mii_data *mii;
1643 	uint8_t eaddr[ETHER_ADDR_LEN];
1644 	bus_addr_t paddr;
1645 	uint32_t reg, fsize;
1646 	uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo;
1647 	int error;
1648 
1649 	/*
1650 	 * Cancel any pending I/O.
1651 	 */
1652 	age_stop(ifp, 0);
1653 
1654 	/*
1655 	 * Reset the chip to a known state.
1656 	 */
1657 	age_reset(sc);
1658 
1659 	/* Initialize descriptors. */
1660 	error = age_init_rx_ring(sc);
1661 	if (error != 0) {
1662 		printf("%s: no memory for Rx buffers.\n", device_xname(sc->sc_dev));
1663 		age_stop(ifp, 0);
1664 		return error;
1665 	}
1666 	age_init_rr_ring(sc);
1667 	age_init_tx_ring(sc);
1668 	age_init_cmb_block(sc);
1669 	age_init_smb_block(sc);
1670 
1671 	/* Reprogram the station address. */
1672 	memcpy(eaddr, CLLADDR(ifp->if_sadl), sizeof(eaddr));
1673 	CSR_WRITE_4(sc, AGE_PAR0,
1674 	    eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
1675 	CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]);
1676 
1677 	/* Set descriptor base addresses. */
1678 	paddr = sc->age_rdata.age_tx_ring_paddr;
1679 	CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr));
1680 	paddr = sc->age_rdata.age_rx_ring_paddr;
1681 	CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr));
1682 	paddr = sc->age_rdata.age_rr_ring_paddr;
1683 	CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr));
1684 	paddr = sc->age_rdata.age_tx_ring_paddr;
1685 	CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr));
1686 	paddr = sc->age_rdata.age_cmb_block_paddr;
1687 	CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr));
1688 	paddr = sc->age_rdata.age_smb_block_paddr;
1689 	CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr));
1690 
1691 	/* Set Rx/Rx return descriptor counter. */
1692 	CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT,
1693 	    ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) &
1694 	    DESC_RRD_CNT_MASK) |
1695 	    ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK));
1696 
1697 	/* Set Tx descriptor counter. */
1698 	CSR_WRITE_4(sc, AGE_DESC_TPD_CNT,
1699 	    (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK);
1700 
1701 	/* Tell hardware that we're ready to load descriptors. */
1702 	CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD);
1703 
1704 	/*
1705 	 * Initialize mailbox register.
1706 	 * Updated producer/consumer index information is exchanged
1707 	 * through this mailbox register. However Tx producer and
1708 	 * Rx return consumer/Rx producer are all shared such that
1709 	 * it's hard to separate code path between Tx and Rx without
1710 	 * locking. If L1 hardware have a separate mail box register
1711 	 * for Tx and Rx consumer/producer management we could have
1712 	 * indepent Tx/Rx handler which in turn Rx handler could have
1713 	 * been run without any locking.
1714 	*/
1715 	AGE_COMMIT_MBOX(sc);
1716 
1717 	/* Configure IPG/IFG parameters. */
1718 	CSR_WRITE_4(sc, AGE_IPG_IFG_CFG,
1719 	    ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) |
1720 	    ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
1721 	    ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
1722 	    ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK));
1723 
1724 	/* Set parameters for half-duplex media. */
1725 	CSR_WRITE_4(sc, AGE_HDPX_CFG,
1726 	    ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
1727 	    HDPX_CFG_LCOL_MASK) |
1728 	    ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
1729 	    HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
1730 	    ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
1731 	    HDPX_CFG_ABEBT_MASK) |
1732 	    ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
1733 	     HDPX_CFG_JAMIPG_MASK));
1734 
1735 	/* Configure interrupt moderation timer. */
1736 	sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
1737 	CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod));
1738 	reg = CSR_READ_4(sc, AGE_MASTER_CFG);
1739 	reg &= ~MASTER_MTIMER_ENB;
1740 	if (AGE_USECS(sc->age_int_mod) == 0)
1741 		reg &= ~MASTER_ITIMER_ENB;
1742 	else
1743 		reg |= MASTER_ITIMER_ENB;
1744 	CSR_WRITE_4(sc, AGE_MASTER_CFG, reg);
1745 	if (agedebug)
1746 		printf("%s: interrupt moderation is %d us.\n",
1747 		    device_xname(sc->sc_dev), sc->age_int_mod);
1748 	CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000));
1749 
1750 	/* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
1751 	if (ifp->if_mtu < ETHERMTU)
1752 		sc->age_max_frame_size = ETHERMTU;
1753 	else
1754 		sc->age_max_frame_size = ifp->if_mtu;
1755 	sc->age_max_frame_size += ETHER_HDR_LEN +
1756 	    sizeof(struct ether_vlan_header) + ETHER_CRC_LEN;
1757 	CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size);
1758 
1759 	/* Configure jumbo frame. */
1760 	fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t));
1761 	CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG,
1762 	    (((fsize / sizeof(uint64_t)) <<
1763 	    RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) |
1764 	    ((RXQ_JUMBO_CFG_LKAH_DEFAULT <<
1765 	    RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) |
1766 	    ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) &
1767 	    RXQ_JUMBO_CFG_RRD_TIMER_MASK));
1768 
1769 	/* Configure flow-control parameters. From Linux. */
1770 	if ((sc->age_flags & AGE_FLAG_PCIE) != 0) {
1771 		/*
1772 		 * Magic workaround for old-L1.
1773 		 * Don't know which hw revision requires this magic.
1774 		 */
1775 		CSR_WRITE_4(sc, 0x12FC, 0x6500);
1776 		/*
1777 		 * Another magic workaround for flow-control mode
1778 		 * change. From Linux.
1779 		 */
1780 		CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
1781 	}
1782 	/*
1783 	 * TODO
1784 	 *  Should understand pause parameter relationships between FIFO
1785 	 *  size and number of Rx descriptors and Rx return descriptors.
1786 	 *
1787 	 *  Magic parameters came from Linux.
1788 	 */
1789 	switch (sc->age_chip_rev) {
1790 	case 0x8001:
1791 	case 0x9001:
1792 	case 0x9002:
1793 	case 0x9003:
1794 		rxf_hi = AGE_RX_RING_CNT / 16;
1795 		rxf_lo = (AGE_RX_RING_CNT * 7) / 8;
1796 		rrd_hi = (AGE_RR_RING_CNT * 7) / 8;
1797 		rrd_lo = AGE_RR_RING_CNT / 16;
1798 		break;
1799 	default:
1800 		reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN);
1801 		rxf_lo = reg / 16;
1802 		if (rxf_lo < 192)
1803 			rxf_lo = 192;
1804 		rxf_hi = (reg * 7) / 8;
1805 		if (rxf_hi < rxf_lo)
1806 			rxf_hi = rxf_lo + 16;
1807 		reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN);
1808 		rrd_lo = reg / 8;
1809 		rrd_hi = (reg * 7) / 8;
1810 		if (rrd_lo < 2)
1811 			rrd_lo = 2;
1812 		if (rrd_hi < rrd_lo)
1813 			rrd_hi = rrd_lo + 3;
1814 		break;
1815 	}
1816 	CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH,
1817 	    ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) &
1818 	    RXQ_FIFO_PAUSE_THRESH_LO_MASK) |
1819 	    ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) &
1820 	    RXQ_FIFO_PAUSE_THRESH_HI_MASK));
1821 	CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH,
1822 	    ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) &
1823 	    RXQ_RRD_PAUSE_THRESH_LO_MASK) |
1824 	    ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) &
1825 	    RXQ_RRD_PAUSE_THRESH_HI_MASK));
1826 
1827 	/* Configure RxQ. */
1828 	CSR_WRITE_4(sc, AGE_RXQ_CFG,
1829 	    ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
1830 	    RXQ_CFG_RD_BURST_MASK) |
1831 	    ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT <<
1832 	    RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) |
1833 	    ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT <<
1834 	    RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) |
1835 	    RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB);
1836 
1837 	/* Configure TxQ. */
1838 	CSR_WRITE_4(sc, AGE_TXQ_CFG,
1839 	    ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) &
1840 	    TXQ_CFG_TPD_BURST_MASK) |
1841 	    ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) &
1842 	    TXQ_CFG_TX_FIFO_BURST_MASK) |
1843 	    ((TXQ_CFG_TPD_FETCH_DEFAULT <<
1844 	    TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) |
1845 	    TXQ_CFG_ENB);
1846 
1847 	/* Configure DMA parameters. */
1848 	CSR_WRITE_4(sc, AGE_DMA_CFG,
1849 	    DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 |
1850 	    sc->age_dma_rd_burst | DMA_CFG_RD_ENB |
1851 	    sc->age_dma_wr_burst | DMA_CFG_WR_ENB);
1852 
1853 	/* Configure CMB DMA write threshold. */
1854 	CSR_WRITE_4(sc, AGE_CMB_WR_THRESH,
1855 	    ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) &
1856 	    CMB_WR_THRESH_RRD_MASK) |
1857 	    ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) &
1858 	    CMB_WR_THRESH_TPD_MASK));
1859 
1860 	/* Set CMB/SMB timer and enable them. */
1861 	CSR_WRITE_4(sc, AGE_CMB_WR_TIMER,
1862 	    ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) |
1863 	    ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK));
1864 
1865 	/* Request SMB updates for every seconds. */
1866 	CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000));
1867 	CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB);
1868 
1869 	/*
1870 	 * Disable all WOL bits as WOL can interfere normal Rx
1871 	 * operation.
1872 	 */
1873 	CSR_WRITE_4(sc, AGE_WOL_CFG, 0);
1874 
1875 	/*
1876 	 * Configure Tx/Rx MACs.
1877 	 *  - Auto-padding for short frames.
1878 	 *  - Enable CRC generation.
1879 	 *  Start with full-duplex/1000Mbps media. Actual reconfiguration
1880 	 *  of MAC is followed after link establishment.
1881 	 */
1882 	CSR_WRITE_4(sc, AGE_MAC_CFG,
1883 	    MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD |
1884 	    MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 |
1885 	    ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
1886 	    MAC_CFG_PREAMBLE_MASK));
1887 
1888 	/* Set up the receive filter. */
1889 	age_rxfilter(sc);
1890 	age_rxvlan(sc);
1891 
1892 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
1893 	reg |= MAC_CFG_RXCSUM_ENB;
1894 
1895 	/* Ack all pending interrupts and clear it. */
1896 	CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
1897 	CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS);
1898 
1899 	/* Finally enable Tx/Rx MAC. */
1900 	CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
1901 
1902 	sc->age_flags &= ~AGE_FLAG_LINK;
1903 
1904 	/* Switch to the current media. */
1905 	mii = &sc->sc_miibus;
1906 	mii_mediachg(mii);
1907 
1908 	callout_schedule(&sc->sc_tick_ch, hz);
1909 
1910 	ifp->if_flags |= IFF_RUNNING;
1911 	ifp->if_flags &= ~IFF_OACTIVE;
1912 
1913 	return 0;
1914 }
1915 
1916 static void
1917 age_stop(struct ifnet *ifp, int disable)
1918 {
1919 	struct age_softc *sc = ifp->if_softc;
1920 	struct age_txdesc *txd;
1921 	struct age_rxdesc *rxd;
1922 	uint32_t reg;
1923 	int i;
1924 
1925 	callout_stop(&sc->sc_tick_ch);
1926 
1927 	/*
1928 	 * Mark the interface down and cancel the watchdog timer.
1929 	 */
1930 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1931 	ifp->if_timer = 0;
1932 
1933 	sc->age_flags &= ~AGE_FLAG_LINK;
1934 
1935 	mii_down(&sc->sc_miibus);
1936 
1937 	/*
1938 	 * Disable interrupts.
1939 	 */
1940 	CSR_WRITE_4(sc, AGE_INTR_MASK, 0);
1941 	CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF);
1942 
1943 	/* Stop CMB/SMB updates. */
1944 	CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0);
1945 
1946 	/* Stop Rx/Tx MAC. */
1947 	age_stop_rxmac(sc);
1948 	age_stop_txmac(sc);
1949 
1950 	/* Stop DMA. */
1951 	CSR_WRITE_4(sc, AGE_DMA_CFG,
1952 	    CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB));
1953 
1954 	/* Stop TxQ/RxQ. */
1955 	CSR_WRITE_4(sc, AGE_TXQ_CFG,
1956 	    CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB);
1957 	CSR_WRITE_4(sc, AGE_RXQ_CFG,
1958 	    CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB);
1959 	for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
1960 		if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
1961 			break;
1962 		DELAY(10);
1963 	}
1964 	if (i == 0)
1965 		printf("%s: stopping Rx/Tx MACs timed out(0x%08x)!\n",
1966 		    device_xname(sc->sc_dev), reg);
1967 
1968 	/* Reclaim Rx buffers that have been processed. */
1969 	if (sc->age_cdata.age_rxhead != NULL)
1970 		m_freem(sc->age_cdata.age_rxhead);
1971 	AGE_RXCHAIN_RESET(sc);
1972 
1973 	/*
1974 	 * Free RX and TX mbufs still in the queues.
1975 	 */
1976 	for (i = 0; i < AGE_RX_RING_CNT; i++) {
1977 		rxd = &sc->age_cdata.age_rxdesc[i];
1978 		if (rxd->rx_m != NULL) {
1979 			bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1980 			m_freem(rxd->rx_m);
1981 			rxd->rx_m = NULL;
1982 		}
1983 	}
1984 	for (i = 0; i < AGE_TX_RING_CNT; i++) {
1985 		txd = &sc->age_cdata.age_txdesc[i];
1986 		if (txd->tx_m != NULL) {
1987 			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1988 			m_freem(txd->tx_m);
1989 			txd->tx_m = NULL;
1990 		}
1991 	}
1992 }
1993 
1994 static void
1995 age_stats_update(struct age_softc *sc)
1996 {
1997 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1998 	struct age_stats *stat;
1999 	struct smb *smb;
2000 
2001 	stat = &sc->age_stat;
2002 
2003 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
2004 	    sc->age_cdata.age_smb_block_map->dm_mapsize,
2005 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2006 
2007 	smb = sc->age_rdata.age_smb_block;
2008 	if (smb->updated == 0)
2009 		return;
2010 
2011 	/* Rx stats. */
2012 	stat->rx_frames += smb->rx_frames;
2013 	stat->rx_bcast_frames += smb->rx_bcast_frames;
2014 	stat->rx_mcast_frames += smb->rx_mcast_frames;
2015 	stat->rx_pause_frames += smb->rx_pause_frames;
2016 	stat->rx_control_frames += smb->rx_control_frames;
2017 	stat->rx_crcerrs += smb->rx_crcerrs;
2018 	stat->rx_lenerrs += smb->rx_lenerrs;
2019 	stat->rx_bytes += smb->rx_bytes;
2020 	stat->rx_runts += smb->rx_runts;
2021 	stat->rx_fragments += smb->rx_fragments;
2022 	stat->rx_pkts_64 += smb->rx_pkts_64;
2023 	stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
2024 	stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
2025 	stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
2026 	stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
2027 	stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
2028 	stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
2029 	stat->rx_pkts_truncated += smb->rx_pkts_truncated;
2030 	stat->rx_fifo_oflows += smb->rx_fifo_oflows;
2031 	stat->rx_desc_oflows += smb->rx_desc_oflows;
2032 	stat->rx_alignerrs += smb->rx_alignerrs;
2033 	stat->rx_bcast_bytes += smb->rx_bcast_bytes;
2034 	stat->rx_mcast_bytes += smb->rx_mcast_bytes;
2035 	stat->rx_pkts_filtered += smb->rx_pkts_filtered;
2036 
2037 	/* Tx stats. */
2038 	stat->tx_frames += smb->tx_frames;
2039 	stat->tx_bcast_frames += smb->tx_bcast_frames;
2040 	stat->tx_mcast_frames += smb->tx_mcast_frames;
2041 	stat->tx_pause_frames += smb->tx_pause_frames;
2042 	stat->tx_excess_defer += smb->tx_excess_defer;
2043 	stat->tx_control_frames += smb->tx_control_frames;
2044 	stat->tx_deferred += smb->tx_deferred;
2045 	stat->tx_bytes += smb->tx_bytes;
2046 	stat->tx_pkts_64 += smb->tx_pkts_64;
2047 	stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
2048 	stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
2049 	stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
2050 	stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
2051 	stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
2052 	stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
2053 	stat->tx_single_colls += smb->tx_single_colls;
2054 	stat->tx_multi_colls += smb->tx_multi_colls;
2055 	stat->tx_late_colls += smb->tx_late_colls;
2056 	stat->tx_excess_colls += smb->tx_excess_colls;
2057 	stat->tx_underrun += smb->tx_underrun;
2058 	stat->tx_desc_underrun += smb->tx_desc_underrun;
2059 	stat->tx_lenerrs += smb->tx_lenerrs;
2060 	stat->tx_pkts_truncated += smb->tx_pkts_truncated;
2061 	stat->tx_bcast_bytes += smb->tx_bcast_bytes;
2062 	stat->tx_mcast_bytes += smb->tx_mcast_bytes;
2063 
2064 	/* Update counters in ifnet. */
2065 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2066 
2067 	if_statadd_ref(nsr, if_opackets, smb->tx_frames);
2068 
2069 	if_statadd_ref(nsr, if_collisions,
2070 	    smb->tx_single_colls +
2071 	    smb->tx_multi_colls + smb->tx_late_colls +
2072 	    smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT);
2073 
2074 	if_statadd_ref(nsr, if_oerrors,
2075 	    smb->tx_excess_colls +
2076 	    smb->tx_late_colls + smb->tx_underrun +
2077 	    smb->tx_pkts_truncated);
2078 
2079 	if_statadd_ref(nsr, if_ierrors,
2080 	    smb->rx_crcerrs + smb->rx_lenerrs +
2081 	    smb->rx_runts + smb->rx_pkts_truncated +
2082 	    smb->rx_fifo_oflows + smb->rx_desc_oflows +
2083 	    smb->rx_alignerrs);
2084 
2085 	IF_STAT_PUTREF(ifp);
2086 
2087 	/* Update done, clear. */
2088 	smb->updated = 0;
2089 
2090 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
2091 	    sc->age_cdata.age_smb_block_map->dm_mapsize,
2092 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2093 }
2094 
2095 static void
2096 age_stop_txmac(struct age_softc *sc)
2097 {
2098 	uint32_t reg;
2099 	int i;
2100 
2101 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
2102 	if ((reg & MAC_CFG_TX_ENB) != 0) {
2103 		reg &= ~MAC_CFG_TX_ENB;
2104 		CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2105 	}
2106 	/* Stop Tx DMA engine. */
2107 	reg = CSR_READ_4(sc, AGE_DMA_CFG);
2108 	if ((reg & DMA_CFG_RD_ENB) != 0) {
2109 		reg &= ~DMA_CFG_RD_ENB;
2110 		CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2111 	}
2112 	for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2113 		if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2114 		    (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0)
2115 			break;
2116 		DELAY(10);
2117 	}
2118 	if (i == 0)
2119 		printf("%s: stopping TxMAC timeout!\n", device_xname(sc->sc_dev));
2120 }
2121 
2122 static void
2123 age_stop_rxmac(struct age_softc *sc)
2124 {
2125 	uint32_t reg;
2126 	int i;
2127 
2128 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
2129 	if ((reg & MAC_CFG_RX_ENB) != 0) {
2130 		reg &= ~MAC_CFG_RX_ENB;
2131 		CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2132 	}
2133 	/* Stop Rx DMA engine. */
2134 	reg = CSR_READ_4(sc, AGE_DMA_CFG);
2135 	if ((reg & DMA_CFG_WR_ENB) != 0) {
2136 		reg &= ~DMA_CFG_WR_ENB;
2137 		CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2138 	}
2139 	for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2140 		if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2141 		    (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0)
2142 			break;
2143 		DELAY(10);
2144 	}
2145 	if (i == 0)
2146 		printf("%s: stopping RxMAC timeout!\n", device_xname(sc->sc_dev));
2147 }
2148 
2149 static void
2150 age_init_tx_ring(struct age_softc *sc)
2151 {
2152 	struct age_ring_data *rd;
2153 	struct age_txdesc *txd;
2154 	int i;
2155 
2156 	sc->age_cdata.age_tx_prod = 0;
2157 	sc->age_cdata.age_tx_cons = 0;
2158 	sc->age_cdata.age_tx_cnt = 0;
2159 
2160 	rd = &sc->age_rdata;
2161 	memset(rd->age_tx_ring, 0, AGE_TX_RING_SZ);
2162 	for (i = 0; i < AGE_TX_RING_CNT; i++) {
2163 		txd = &sc->age_cdata.age_txdesc[i];
2164 		txd->tx_desc = &rd->age_tx_ring[i];
2165 		txd->tx_m = NULL;
2166 	}
2167 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0,
2168 	    sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2169 }
2170 
2171 static int
2172 age_init_rx_ring(struct age_softc *sc)
2173 {
2174 	struct age_ring_data *rd;
2175 	struct age_rxdesc *rxd;
2176 	int i;
2177 
2178 	sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1;
2179 	rd = &sc->age_rdata;
2180 	memset(rd->age_rx_ring, 0, AGE_RX_RING_SZ);
2181 	for (i = 0; i < AGE_RX_RING_CNT; i++) {
2182 		rxd = &sc->age_cdata.age_rxdesc[i];
2183 		rxd->rx_m = NULL;
2184 		rxd->rx_desc = &rd->age_rx_ring[i];
2185 		if (age_newbuf(sc, rxd, 1) != 0)
2186 			return ENOBUFS;
2187 	}
2188 
2189 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0,
2190 	    sc->age_cdata.age_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2191 
2192 	return 0;
2193 }
2194 
2195 static void
2196 age_init_rr_ring(struct age_softc *sc)
2197 {
2198 	struct age_ring_data *rd;
2199 
2200 	sc->age_cdata.age_rr_cons = 0;
2201 	AGE_RXCHAIN_RESET(sc);
2202 
2203 	rd = &sc->age_rdata;
2204 	memset(rd->age_rr_ring, 0, AGE_RR_RING_SZ);
2205 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
2206 	    sc->age_cdata.age_rr_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2207 }
2208 
2209 static void
2210 age_init_cmb_block(struct age_softc *sc)
2211 {
2212 	struct age_ring_data *rd;
2213 
2214 	rd = &sc->age_rdata;
2215 	memset(rd->age_cmb_block, 0, AGE_CMB_BLOCK_SZ);
2216 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
2217 	    sc->age_cdata.age_cmb_block_map->dm_mapsize,
2218 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2219 }
2220 
2221 static void
2222 age_init_smb_block(struct age_softc *sc)
2223 {
2224 	struct age_ring_data *rd;
2225 
2226 	rd = &sc->age_rdata;
2227 	memset(rd->age_smb_block, 0, AGE_SMB_BLOCK_SZ);
2228 	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
2229 	    sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2230 }
2231 
2232 static int
2233 age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd, int init)
2234 {
2235 	struct rx_desc *desc;
2236 	struct mbuf *m;
2237 	bus_dmamap_t map;
2238 	int error;
2239 
2240 	MGETHDR(m, M_DONTWAIT, MT_DATA);
2241 	if (m == NULL)
2242 		return ENOBUFS;
2243 	MCLGET(m, M_DONTWAIT);
2244 	if (!(m->m_flags & M_EXT)) {
2245 		 m_freem(m);
2246 		 return ENOBUFS;
2247 	}
2248 
2249 	m->m_len = m->m_pkthdr.len = MCLBYTES;
2250 	m_adj(m, ETHER_ALIGN);
2251 
2252 	error = bus_dmamap_load_mbuf(sc->sc_dmat,
2253 	    sc->age_cdata.age_rx_sparemap, m, BUS_DMA_NOWAIT);
2254 
2255 	if (error != 0) {
2256 		m_freem(m);
2257 
2258 		if (init)
2259 			printf("%s: can't load RX mbuf\n", device_xname(sc->sc_dev));
2260 		return error;
2261 	}
2262 
2263 	if (rxd->rx_m != NULL) {
2264 		bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
2265 		    rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2266 		bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2267 	}
2268 	map = rxd->rx_dmamap;
2269 	rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap;
2270 	sc->age_cdata.age_rx_sparemap = map;
2271 	rxd->rx_m = m;
2272 
2273 	desc = rxd->rx_desc;
2274 	desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr);
2275 	desc->len =
2276 	    htole32((rxd->rx_dmamap->dm_segs[0].ds_len & AGE_RD_LEN_MASK) <<
2277 	    AGE_RD_LEN_SHIFT);
2278 
2279 	return 0;
2280 }
2281 
2282 static void
2283 age_rxvlan(struct age_softc *sc)
2284 {
2285 	uint32_t reg;
2286 
2287 	reg = CSR_READ_4(sc, AGE_MAC_CFG);
2288 	reg &= ~MAC_CFG_VLAN_TAG_STRIP;
2289 	if (sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2290 		reg |= MAC_CFG_VLAN_TAG_STRIP;
2291 	CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2292 }
2293 
2294 static void
2295 age_rxfilter(struct age_softc *sc)
2296 {
2297 	struct ethercom *ec = &sc->sc_ec;
2298 	struct ifnet *ifp = &sc->sc_ec.ec_if;
2299 	struct ether_multi *enm;
2300 	struct ether_multistep step;
2301 	uint32_t crc;
2302 	uint32_t mchash[2];
2303 	uint32_t rxcfg;
2304 
2305 	rxcfg = CSR_READ_4(sc, AGE_MAC_CFG);
2306 	rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
2307 	ifp->if_flags &= ~IFF_ALLMULTI;
2308 
2309 	/*
2310 	 * Always accept broadcast frames.
2311 	 */
2312 	rxcfg |= MAC_CFG_BCAST;
2313 
2314 	/* Program new filter. */
2315 	if ((ifp->if_flags & IFF_PROMISC) != 0)
2316 		goto update;
2317 
2318 	memset(mchash, 0, sizeof(mchash));
2319 
2320 	ETHER_LOCK(ec);
2321 	ETHER_FIRST_MULTI(step, ec, enm);
2322 	while (enm != NULL) {
2323 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2324 			/* XXX Use ETHER_F_ALLMULTI in future. */
2325 			ifp->if_flags |= IFF_ALLMULTI;
2326 			ETHER_UNLOCK(ec);
2327 			goto update;
2328 		}
2329 		crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2330 		mchash[crc >> 31] |= 1U << ((crc >> 26) & 0x1f);
2331 		ETHER_NEXT_MULTI(step, enm);
2332 	}
2333 	ETHER_UNLOCK(ec);
2334 
2335 update:
2336 	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2337 		if (ifp->if_flags & IFF_PROMISC) {
2338 			rxcfg |= MAC_CFG_PROMISC;
2339 			/* XXX Use ETHER_F_ALLMULTI in future. */
2340 			ifp->if_flags |= IFF_ALLMULTI;
2341 		} else
2342 			rxcfg |= MAC_CFG_ALLMULTI;
2343 		mchash[0] = mchash[1] = 0xFFFFFFFF;
2344 	}
2345 	CSR_WRITE_4(sc, AGE_MAR0, mchash[0]);
2346 	CSR_WRITE_4(sc, AGE_MAR1, mchash[1]);
2347 	CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
2348 }
2349