xref: /netbsd-src/sys/dev/pci/if_nfe.c (revision ce2c90c7c172d95d2402a5b3d96d8f8e6d138a21)
1 /*	$NetBSD: if_nfe.c,v 1.6 2006/10/12 01:31:30 christos Exp $	*/
2 /*	$OpenBSD: if_nfe.c,v 1.52 2006/03/02 09:04:00 jsg Exp $	*/
3 
4 /*-
5  * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6  * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
22 
23 #include <sys/cdefs.h>
24 __KERNEL_RCSID(0, "$NetBSD: if_nfe.c,v 1.6 2006/10/12 01:31:30 christos Exp $");
25 
26 #include "opt_inet.h"
27 #include "bpfilter.h"
28 #include "vlan.h"
29 
30 #include <sys/param.h>
31 #include <sys/endian.h>
32 #include <sys/systm.h>
33 #include <sys/types.h>
34 #include <sys/sockio.h>
35 #include <sys/mbuf.h>
36 #include <sys/queue.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/device.h>
40 #include <sys/socket.h>
41 
42 #include <machine/bus.h>
43 
44 #include <net/if.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47 #include <net/if_ether.h>
48 #include <net/if_arp.h>
49 
50 #ifdef INET
51 #include <netinet/in.h>
52 #include <netinet/in_systm.h>
53 #include <netinet/in_var.h>
54 #include <netinet/ip.h>
55 #include <netinet/if_inarp.h>
56 #endif
57 
58 #if NVLAN > 0
59 #include <net/if_types.h>
60 #endif
61 
62 #if NBPFILTER > 0
63 #include <net/bpf.h>
64 #endif
65 
66 #include <dev/mii/mii.h>
67 #include <dev/mii/miivar.h>
68 
69 #include <dev/pci/pcireg.h>
70 #include <dev/pci/pcivar.h>
71 #include <dev/pci/pcidevs.h>
72 
73 #include <dev/pci/if_nfereg.h>
74 #include <dev/pci/if_nfevar.h>
75 
76 int	nfe_match(struct device *, struct cfdata *, void *);
77 void	nfe_attach(struct device *, struct device *, void *);
78 void	nfe_power(int, void *);
79 void	nfe_miibus_statchg(struct device *);
80 int	nfe_miibus_readreg(struct device *, int, int);
81 void	nfe_miibus_writereg(struct device *, int, int, int);
82 int	nfe_intr(void *);
83 int	nfe_ioctl(struct ifnet *, u_long, caddr_t);
84 void	nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
85 void	nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
86 void	nfe_txdesc32_rsync(struct nfe_softc *, int, int, int);
87 void	nfe_txdesc64_rsync(struct nfe_softc *, int, int, int);
88 void	nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
89 void	nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
90 void	nfe_rxeof(struct nfe_softc *);
91 void	nfe_txeof(struct nfe_softc *);
92 int	nfe_encap(struct nfe_softc *, struct mbuf *);
93 void	nfe_start(struct ifnet *);
94 void	nfe_watchdog(struct ifnet *);
95 int	nfe_init(struct ifnet *);
96 void	nfe_stop(struct ifnet *, int);
97 struct	nfe_jbuf *nfe_jalloc(struct nfe_softc *);
98 void	nfe_jfree(struct mbuf *, caddr_t, size_t, void *);
99 int	nfe_jpool_alloc(struct nfe_softc *);
100 void	nfe_jpool_free(struct nfe_softc *);
101 int	nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
102 void	nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
103 void	nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
104 int	nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
105 void	nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
106 void	nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
107 int	nfe_ifmedia_upd(struct ifnet *);
108 void	nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
109 void	nfe_setmulti(struct nfe_softc *);
110 void	nfe_get_macaddr(struct nfe_softc *, uint8_t *);
111 void	nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
112 void	nfe_tick(void *);
113 
114 CFATTACH_DECL(nfe, sizeof(struct nfe_softc), nfe_match, nfe_attach, NULL, NULL);
115 
116 /*#define NFE_NO_JUMBO*/
117 
118 #ifdef NFE_DEBUG
119 int nfedebug = 0;
120 #define DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
121 #define DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
122 #else
123 #define DPRINTF(x)
124 #define DPRINTFN(n,x)
125 #endif
126 
127 /* deal with naming differences */
128 
129 #define	PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 \
130 	PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1
131 #define	PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 \
132 	PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2
133 #define	PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 \
134 	PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN
135 
136 #define	PCI_PRODUCT_NVIDIA_CK804_LAN1 \
137 	PCI_PRODUCT_NVIDIA_NFORCE4_LAN1
138 #define	PCI_PRODUCT_NVIDIA_CK804_LAN2 \
139 	PCI_PRODUCT_NVIDIA_NFORCE4_LAN2
140 
141 #define	PCI_PRODUCT_NVIDIA_MCP51_LAN1 \
142 	PCI_PRODUCT_NVIDIA_NFORCE430_LAN1
143 #define	PCI_PRODUCT_NVIDIA_MCP51_LAN2 \
144 	PCI_PRODUCT_NVIDIA_NFORCE430_LAN2
145 
146 #ifdef	_LP64
147 #define	__LP64__ 1
148 #endif
149 
150 const struct nfe_product {
151 	pci_vendor_id_t		vendor;
152 	pci_product_id_t	product;
153 } nfe_devices[] = {
154 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN },
155 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN },
156 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 },
157 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 },
158 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 },
159 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 },
160 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 },
161 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 },
162 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 },
163 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 },
164 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 },
165 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 },
166 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 },
167 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 },
168 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 },
169 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 },
170 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 },
171 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 },
172 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 },
173 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 },
174 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 },
175 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 },
176 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 }
177 };
178 
179 int
180 nfe_match(struct device *dev __unused, struct cfdata *match __unused, void *aux)
181 {
182 	struct pci_attach_args *pa = aux;
183 	const struct nfe_product *np;
184 	int i;
185 
186 	for (i = 0; i < sizeof(nfe_devices) / sizeof(nfe_devices[0]); i++) {
187 		np = &nfe_devices[i];
188 		if (PCI_VENDOR(pa->pa_id) == np->vendor &&
189 		    PCI_PRODUCT(pa->pa_id) == np->product)
190 			return 1;
191 	}
192 	return 0;
193 }
194 
195 void
196 nfe_attach(struct device *parent __unused, struct device *self, void *aux)
197 {
198 	struct nfe_softc *sc = (struct nfe_softc *)self;
199 	struct pci_attach_args *pa = aux;
200 	pci_chipset_tag_t pc = pa->pa_pc;
201 	pci_intr_handle_t ih;
202 	const char *intrstr;
203 	struct ifnet *ifp;
204 	bus_size_t memsize;
205 	pcireg_t memtype;
206 
207 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA);
208 	switch (memtype) {
209 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
210 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
211 		if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt,
212 		    &sc->sc_memh, NULL, &memsize) == 0)
213 			break;
214 		/* FALLTHROUGH */
215 	default:
216 		printf(": could not map mem space\n");
217 		return;
218 	}
219 
220 	if (pci_intr_map(pa, &ih) != 0) {
221 		printf(": could not map interrupt\n");
222 		return;
223 	}
224 
225 	intrstr = pci_intr_string(pc, ih);
226 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc);
227 	if (sc->sc_ih == NULL) {
228 		printf(": could not establish interrupt");
229 		if (intrstr != NULL)
230 			printf(" at %s", intrstr);
231 		printf("\n");
232 		return;
233 	}
234 	printf(": %s", intrstr);
235 
236 	sc->sc_dmat = pa->pa_dmat;
237 
238 	nfe_get_macaddr(sc, sc->sc_enaddr);
239 	printf(", address %s\n", ether_sprintf(sc->sc_enaddr));
240 
241 	sc->sc_flags = 0;
242 
243 	switch (PCI_PRODUCT(pa->pa_id)) {
244 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
245 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
246 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
247 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
248 		sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
249 		break;
250 	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
251 	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
252 	case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
253 	case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
254 	case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
255 	case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
256 		sc->sc_flags |= NFE_40BIT_ADDR;
257 		break;
258 	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
259 	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
260 	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
261 	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
262 		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
263 		break;
264 	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
265 	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
266 	case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
267 	case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
268 	case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
269 	case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
270 		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
271 		    NFE_HW_VLAN;
272 		break;
273 	}
274 
275 #ifndef NFE_NO_JUMBO
276 	/* enable jumbo frames for adapters that support it */
277 	if (sc->sc_flags & NFE_JUMBO_SUP)
278 		sc->sc_flags |= NFE_USE_JUMBO;
279 #endif
280 
281 	/*
282 	 * Allocate Tx and Rx rings.
283 	 */
284 	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
285 		printf("%s: could not allocate Tx ring\n",
286 		    sc->sc_dev.dv_xname);
287 		return;
288 	}
289 
290 	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
291 		printf("%s: could not allocate Rx ring\n",
292 		    sc->sc_dev.dv_xname);
293 		nfe_free_tx_ring(sc, &sc->txq);
294 		return;
295 	}
296 
297 	ifp = &sc->sc_ethercom.ec_if;
298 	ifp->if_softc = sc;
299 	ifp->if_mtu = ETHERMTU;
300 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
301 	ifp->if_ioctl = nfe_ioctl;
302 	ifp->if_start = nfe_start;
303 	ifp->if_watchdog = nfe_watchdog;
304 	ifp->if_init = nfe_init;
305 	ifp->if_baudrate = IF_Gbps(1);
306 	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
307 	IFQ_SET_READY(&ifp->if_snd);
308 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
309 
310 #if NVLAN > 0
311 	if (sc->sc_flags & NFE_HW_VLAN)
312 		sc->sc_ethercom.ec_capabilities |=
313 			ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
314 #endif
315 #ifdef NFE_CSUM
316 	if (sc->sc_flags & NFE_HW_CSUM) {
317 		ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
318 		    IFCAP_CSUM_UDPv4;
319 	}
320 #endif
321 
322 	sc->sc_mii.mii_ifp = ifp;
323 	sc->sc_mii.mii_readreg = nfe_miibus_readreg;
324 	sc->sc_mii.mii_writereg = nfe_miibus_writereg;
325 	sc->sc_mii.mii_statchg = nfe_miibus_statchg;
326 
327 	ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd,
328 	    nfe_ifmedia_sts);
329 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
330 	    MII_OFFSET_ANY, 0);
331 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
332 		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
333 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
334 		    0, NULL);
335 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
336 	} else
337 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
338 
339 	if_attach(ifp);
340 	ether_ifattach(ifp, sc->sc_enaddr);
341 
342 	callout_init(&sc->sc_tick_ch);
343 	callout_setfunc(&sc->sc_tick_ch, nfe_tick, sc);
344 
345 	sc->sc_powerhook = powerhook_establish(sc->sc_dev.dv_xname,
346 	    nfe_power, sc);
347 }
348 
349 void
350 nfe_power(int why, void *arg)
351 {
352 	struct nfe_softc *sc = arg;
353 	struct ifnet *ifp;
354 
355 	if (why == PWR_RESUME) {
356 		ifp = &sc->sc_ethercom.ec_if;
357 		if (ifp->if_flags & IFF_UP) {
358 			ifp->if_flags &= ~IFF_RUNNING;
359 			nfe_init(ifp);
360 			if (ifp->if_flags & IFF_RUNNING)
361 				nfe_start(ifp);
362 		}
363 	}
364 }
365 
366 void
367 nfe_miibus_statchg(struct device *dev)
368 {
369 	struct nfe_softc *sc = (struct nfe_softc *)dev;
370 	struct mii_data *mii = &sc->sc_mii;
371 	uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
372 
373 	phy = NFE_READ(sc, NFE_PHY_IFACE);
374 	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
375 
376 	seed = NFE_READ(sc, NFE_RNDSEED);
377 	seed &= ~NFE_SEED_MASK;
378 
379 	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
380 		phy  |= NFE_PHY_HDX;	/* half-duplex */
381 		misc |= NFE_MISC1_HDX;
382 	}
383 
384 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
385 	case IFM_1000_T:	/* full-duplex only */
386 		link |= NFE_MEDIA_1000T;
387 		seed |= NFE_SEED_1000T;
388 		phy  |= NFE_PHY_1000T;
389 		break;
390 	case IFM_100_TX:
391 		link |= NFE_MEDIA_100TX;
392 		seed |= NFE_SEED_100TX;
393 		phy  |= NFE_PHY_100TX;
394 		break;
395 	case IFM_10_T:
396 		link |= NFE_MEDIA_10T;
397 		seed |= NFE_SEED_10T;
398 		break;
399 	}
400 
401 	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
402 
403 	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
404 	NFE_WRITE(sc, NFE_MISC1, misc);
405 	NFE_WRITE(sc, NFE_LINKSPEED, link);
406 }
407 
408 int
409 nfe_miibus_readreg(struct device *dev, int phy, int reg)
410 {
411 	struct nfe_softc *sc = (struct nfe_softc *)dev;
412 	uint32_t val;
413 	int ntries;
414 
415 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
416 
417 	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
418 		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
419 		DELAY(100);
420 	}
421 
422 	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
423 
424 	for (ntries = 0; ntries < 1000; ntries++) {
425 		DELAY(100);
426 		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
427 			break;
428 	}
429 	if (ntries == 1000) {
430 		DPRINTFN(2, ("%s: timeout waiting for PHY\n",
431 		    sc->sc_dev.dv_xname));
432 		return 0;
433 	}
434 
435 	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
436 		DPRINTFN(2, ("%s: could not read PHY\n",
437 		    sc->sc_dev.dv_xname));
438 		return 0;
439 	}
440 
441 	val = NFE_READ(sc, NFE_PHY_DATA);
442 	if (val != 0xffffffff && val != 0)
443 		sc->mii_phyaddr = phy;
444 
445 	DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n",
446 	    sc->sc_dev.dv_xname, phy, reg, val));
447 
448 	return val;
449 }
450 
451 void
452 nfe_miibus_writereg(struct device *dev, int phy, int reg, int val)
453 {
454 	struct nfe_softc *sc = (struct nfe_softc *)dev;
455 	uint32_t ctl;
456 	int ntries;
457 
458 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
459 
460 	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
461 		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
462 		DELAY(100);
463 	}
464 
465 	NFE_WRITE(sc, NFE_PHY_DATA, val);
466 	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
467 	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
468 
469 	for (ntries = 0; ntries < 1000; ntries++) {
470 		DELAY(100);
471 		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
472 			break;
473 	}
474 #ifdef NFE_DEBUG
475 	if (nfedebug >= 2 && ntries == 1000)
476 		printf("could not write to PHY\n");
477 #endif
478 }
479 
480 int
481 nfe_intr(void *arg)
482 {
483 	struct nfe_softc *sc = arg;
484 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
485 	uint32_t r;
486 
487 	if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0)
488 		return 0;	/* not for us */
489 	NFE_WRITE(sc, NFE_IRQ_STATUS, r);
490 
491 	DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
492 
493 	if (r & NFE_IRQ_LINK) {
494 		NFE_READ(sc, NFE_PHY_STATUS);
495 		NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
496 		DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname));
497 	}
498 
499 	if (ifp->if_flags & IFF_RUNNING) {
500 		/* check Rx ring */
501 		nfe_rxeof(sc);
502 
503 		/* check Tx ring */
504 		nfe_txeof(sc);
505 	}
506 
507 	return 1;
508 }
509 
510 int
511 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
512 {
513 	struct nfe_softc *sc = ifp->if_softc;
514 	struct ifreq *ifr = (struct ifreq *)data;
515 	struct ifaddr *ifa = (struct ifaddr *)data;
516 	int s, error = 0;
517 
518 	s = splnet();
519 
520 	switch (cmd) {
521 	case SIOCSIFADDR:
522 		ifp->if_flags |= IFF_UP;
523 		nfe_init(ifp);
524 		switch (ifa->ifa_addr->sa_family) {
525 #ifdef INET
526 		case AF_INET:
527 			arp_ifinit(ifp, ifa);
528 			break;
529 #endif
530 		default:
531 			break;
532 		}
533 		break;
534 	case SIOCSIFMTU:
535 		if (ifr->ifr_mtu < ETHERMIN ||
536 		    ((sc->sc_flags & NFE_USE_JUMBO) &&
537 		    ifr->ifr_mtu > ETHERMTU_JUMBO) ||
538 		    (!(sc->sc_flags & NFE_USE_JUMBO) &&
539 		    ifr->ifr_mtu > ETHERMTU))
540 			error = EINVAL;
541 		else if (ifp->if_mtu != ifr->ifr_mtu)
542 			ifp->if_mtu = ifr->ifr_mtu;
543 		break;
544 	case SIOCSIFFLAGS:
545 		if (ifp->if_flags & IFF_UP) {
546 			/*
547 			 * If only the PROMISC or ALLMULTI flag changes, then
548 			 * don't do a full re-init of the chip, just update
549 			 * the Rx filter.
550 			 */
551 			if ((ifp->if_flags & IFF_RUNNING) &&
552 			    ((ifp->if_flags ^ sc->sc_if_flags) &
553 			     (IFF_ALLMULTI | IFF_PROMISC)) != 0)
554 				nfe_setmulti(sc);
555 			else
556 				nfe_init(ifp);
557 		} else {
558 			if (ifp->if_flags & IFF_RUNNING)
559 				nfe_stop(ifp, 1);
560 		}
561 		sc->sc_if_flags = ifp->if_flags;
562 		break;
563 	case SIOCADDMULTI:
564 	case SIOCDELMULTI:
565 		error = (cmd == SIOCADDMULTI) ?
566 		    ether_addmulti(ifr, &sc->sc_ethercom) :
567 		    ether_delmulti(ifr, &sc->sc_ethercom);
568 
569 		if (error == ENETRESET) {
570 			if (ifp->if_flags & IFF_RUNNING)
571 				nfe_setmulti(sc);
572 			error = 0;
573 		}
574 		break;
575 	case SIOCSIFMEDIA:
576 	case SIOCGIFMEDIA:
577 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
578 		break;
579 	default:
580 		error = ether_ioctl(ifp, cmd, data);
581 		if (error == ENETRESET) {
582 			if (ifp->if_flags & IFF_RUNNING)
583 				nfe_setmulti(sc);
584 			error = 0;
585 		}
586 		break;
587 
588 	}
589 
590 	splx(s);
591 
592 	return error;
593 }
594 
595 void
596 nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
597 {
598 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
599 	    (caddr_t)desc32 - (caddr_t)sc->txq.desc32,
600 	    sizeof (struct nfe_desc32), ops);
601 }
602 
603 void
604 nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
605 {
606 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
607 	    (caddr_t)desc64 - (caddr_t)sc->txq.desc64,
608 	    sizeof (struct nfe_desc64), ops);
609 }
610 
611 void
612 nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops)
613 {
614 	if (end > start) {
615 		bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
616 		    (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32,
617 		    (caddr_t)&sc->txq.desc32[end] -
618 		    (caddr_t)&sc->txq.desc32[start], ops);
619 		return;
620 	}
621 	/* sync from 'start' to end of ring */
622 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
623 	    (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32,
624 	    (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] -
625 	    (caddr_t)&sc->txq.desc32[start], ops);
626 
627 	/* sync from start of ring to 'end' */
628 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
629 	    (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops);
630 }
631 
632 void
633 nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops)
634 {
635 	if (end > start) {
636 		bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
637 		    (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64,
638 		    (caddr_t)&sc->txq.desc64[end] -
639 		    (caddr_t)&sc->txq.desc64[start], ops);
640 		return;
641 	}
642 	/* sync from 'start' to end of ring */
643 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
644 	    (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64,
645 	    (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] -
646 	    (caddr_t)&sc->txq.desc64[start], ops);
647 
648 	/* sync from start of ring to 'end' */
649 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
650 	    (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops);
651 }
652 
653 void
654 nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
655 {
656 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
657 	    (caddr_t)desc32 - (caddr_t)sc->rxq.desc32,
658 	    sizeof (struct nfe_desc32), ops);
659 }
660 
661 void
662 nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
663 {
664 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
665 	    (caddr_t)desc64 - (caddr_t)sc->rxq.desc64,
666 	    sizeof (struct nfe_desc64), ops);
667 }
668 
669 void
670 nfe_rxeof(struct nfe_softc *sc)
671 {
672 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
673 	struct nfe_desc32 *desc32;
674 	struct nfe_desc64 *desc64;
675 	struct nfe_rx_data *data;
676 	struct nfe_jbuf *jbuf;
677 	struct mbuf *m, *mnew;
678 	bus_addr_t physaddr;
679 	uint16_t flags;
680 	int error, len;
681 
682 	desc32 = NULL;
683 	desc64 = NULL;
684 	for (;;) {
685 		data = &sc->rxq.data[sc->rxq.cur];
686 
687 		if (sc->sc_flags & NFE_40BIT_ADDR) {
688 			desc64 = &sc->rxq.desc64[sc->rxq.cur];
689 			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
690 
691 			flags = le16toh(desc64->flags);
692 			len = le16toh(desc64->length) & 0x3fff;
693 		} else {
694 			desc32 = &sc->rxq.desc32[sc->rxq.cur];
695 			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
696 
697 			flags = le16toh(desc32->flags);
698 			len = le16toh(desc32->length) & 0x3fff;
699 		}
700 
701 		if (flags & NFE_RX_READY)
702 			break;
703 
704 		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
705 			if (!(flags & NFE_RX_VALID_V1))
706 				goto skip;
707 
708 			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
709 				flags &= ~NFE_RX_ERROR;
710 				len--;	/* fix buffer length */
711 			}
712 		} else {
713 			if (!(flags & NFE_RX_VALID_V2))
714 				goto skip;
715 
716 			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
717 				flags &= ~NFE_RX_ERROR;
718 				len--;	/* fix buffer length */
719 			}
720 		}
721 
722 		if (flags & NFE_RX_ERROR) {
723 			ifp->if_ierrors++;
724 			goto skip;
725 		}
726 
727 		/*
728 		 * Try to allocate a new mbuf for this ring element and load
729 		 * it before processing the current mbuf. If the ring element
730 		 * cannot be loaded, drop the received packet and reuse the
731 		 * old mbuf. In the unlikely case that the old mbuf can't be
732 		 * reloaded either, explicitly panic.
733 		 */
734 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
735 		if (mnew == NULL) {
736 			ifp->if_ierrors++;
737 			goto skip;
738 		}
739 
740 		if (sc->sc_flags & NFE_USE_JUMBO) {
741 			if ((jbuf = nfe_jalloc(sc)) == NULL) {
742 				m_freem(mnew);
743 				ifp->if_ierrors++;
744 				goto skip;
745 			}
746 			MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc);
747 
748 			bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap,
749 			    mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES,
750 			    BUS_DMASYNC_POSTREAD);
751 
752 			physaddr = jbuf->physaddr;
753 		} else {
754 			MCLGET(mnew, M_DONTWAIT);
755 			if (!(mnew->m_flags & M_EXT)) {
756 				m_freem(mnew);
757 				ifp->if_ierrors++;
758 				goto skip;
759 			}
760 
761 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
762 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
763 			bus_dmamap_unload(sc->sc_dmat, data->map);
764 
765 			error = bus_dmamap_load(sc->sc_dmat, data->map,
766 			    mtod(mnew, void *), MCLBYTES, NULL,
767 			    BUS_DMA_READ | BUS_DMA_NOWAIT);
768 			if (error != 0) {
769 				m_freem(mnew);
770 
771 				/* try to reload the old mbuf */
772 				error = bus_dmamap_load(sc->sc_dmat, data->map,
773 				    mtod(data->m, void *), MCLBYTES, NULL,
774 				    BUS_DMA_READ | BUS_DMA_NOWAIT);
775 				if (error != 0) {
776 					/* very unlikely that it will fail.. */
777 					panic("%s: could not load old rx mbuf",
778 					    sc->sc_dev.dv_xname);
779 				}
780 				ifp->if_ierrors++;
781 				goto skip;
782 			}
783 			physaddr = data->map->dm_segs[0].ds_addr;
784 		}
785 
786 		/*
787 		 * New mbuf successfully loaded, update Rx ring and continue
788 		 * processing.
789 		 */
790 		m = data->m;
791 		data->m = mnew;
792 
793 		/* finalize mbuf */
794 		m->m_pkthdr.len = m->m_len = len;
795 		m->m_pkthdr.rcvif = ifp;
796 
797 #ifdef notyet
798 		if (sc->sc_flags & NFE_HW_CSUM) {
799 			if (flags & NFE_RX_IP_CSUMOK)
800 				m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
801 			if (flags & NFE_RX_UDP_CSUMOK)
802 				m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
803 			if (flags & NFE_RX_TCP_CSUMOK)
804 				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
805 		}
806 #elif defined(NFE_CSUM)
807 		if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK))
808 			m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
809 #endif
810 
811 #if NBPFILTER > 0
812 		if (ifp->if_bpf)
813 			bpf_mtap(ifp->if_bpf, m);
814 #endif
815 		ifp->if_ipackets++;
816 		(*ifp->if_input)(ifp, m);
817 
818 		/* update mapping address in h/w descriptor */
819 		if (sc->sc_flags & NFE_40BIT_ADDR) {
820 #if defined(__LP64__)
821 			desc64->physaddr[0] = htole32(physaddr >> 32);
822 #endif
823 			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
824 		} else {
825 			desc32->physaddr = htole32(physaddr);
826 		}
827 
828 skip:		if (sc->sc_flags & NFE_40BIT_ADDR) {
829 			desc64->length = htole16(sc->rxq.bufsz);
830 			desc64->flags = htole16(NFE_RX_READY);
831 
832 			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
833 		} else {
834 			desc32->length = htole16(sc->rxq.bufsz);
835 			desc32->flags = htole16(NFE_RX_READY);
836 
837 			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
838 		}
839 
840 		sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
841 	}
842 }
843 
844 void
845 nfe_txeof(struct nfe_softc *sc)
846 {
847 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
848 	struct nfe_desc32 *desc32;
849 	struct nfe_desc64 *desc64;
850 	struct nfe_tx_data *data = NULL;
851 	uint16_t flags;
852 
853 	while (sc->txq.next != sc->txq.cur) {
854 		if (sc->sc_flags & NFE_40BIT_ADDR) {
855 			desc64 = &sc->txq.desc64[sc->txq.next];
856 			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
857 
858 			flags = le16toh(desc64->flags);
859 		} else {
860 			desc32 = &sc->txq.desc32[sc->txq.next];
861 			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
862 
863 			flags = le16toh(desc32->flags);
864 		}
865 
866 		if (flags & NFE_TX_VALID)
867 			break;
868 
869 		data = &sc->txq.data[sc->txq.next];
870 
871 		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
872 			if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
873 				goto skip;
874 
875 			if ((flags & NFE_TX_ERROR_V1) != 0) {
876 				printf("%s: tx v1 error 0x%04x\n",
877 				    sc->sc_dev.dv_xname, flags);
878 				ifp->if_oerrors++;
879 			} else
880 				ifp->if_opackets++;
881 		} else {
882 			if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
883 				goto skip;
884 
885 			if ((flags & NFE_TX_ERROR_V2) != 0) {
886 				printf("%s: tx v2 error 0x%04x\n",
887 				    sc->sc_dev.dv_xname, flags);
888 				ifp->if_oerrors++;
889 			} else
890 				ifp->if_opackets++;
891 		}
892 
893 		if (data->m == NULL) {	/* should not get there */
894 			printf("%s: last fragment bit w/o associated mbuf!\n",
895 			    sc->sc_dev.dv_xname);
896 			goto skip;
897 		}
898 
899 		/* last fragment of the mbuf chain transmitted */
900 		bus_dmamap_sync(sc->sc_dmat, data->active, 0,
901 		    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
902 		bus_dmamap_unload(sc->sc_dmat, data->active);
903 		m_freem(data->m);
904 		data->m = NULL;
905 
906 		ifp->if_timer = 0;
907 
908 skip:		sc->txq.queued--;
909 		sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT;
910 	}
911 
912 	if (data != NULL) {	/* at least one slot freed */
913 		ifp->if_flags &= ~IFF_OACTIVE;
914 		nfe_start(ifp);
915 	}
916 }
917 
918 int
919 nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
920 {
921 	struct nfe_desc32 *desc32;
922 	struct nfe_desc64 *desc64;
923 	struct nfe_tx_data *data;
924 	bus_dmamap_t map;
925 	uint16_t flags = NFE_TX_VALID;
926 #if NVLAN > 0
927 	struct m_tag *mtag;
928 	uint32_t vtag = 0;
929 #endif
930 	int error, i;
931 
932 	desc32 = NULL;
933 	desc64 = NULL;
934 	data = NULL;
935 	map = sc->txq.data[sc->txq.cur].map;
936 
937 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT);
938 	if (error != 0) {
939 		printf("%s: could not map mbuf (error %d)\n",
940 		    sc->sc_dev.dv_xname, error);
941 		return error;
942 	}
943 
944 	if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) {
945 		bus_dmamap_unload(sc->sc_dmat, map);
946 		return ENOBUFS;
947 	}
948 
949 #if NVLAN > 0
950 	/* setup h/w VLAN tagging */
951 	if (sc->sc_ethercom.ec_nvlans) {
952 		mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL);
953 		vtag = NFE_TX_VTAG | VLAN_TAG_VALUE(mtag);
954 	}
955 #endif
956 #ifdef NFE_CSUM
957 	if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
958 		flags |= NFE_TX_IP_CSUM;
959 	if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT))
960 		flags |= NFE_TX_TCP_CSUM;
961 #endif
962 
963 	for (i = 0; i < map->dm_nsegs; i++) {
964 		data = &sc->txq.data[sc->txq.cur];
965 
966 		if (sc->sc_flags & NFE_40BIT_ADDR) {
967 			desc64 = &sc->txq.desc64[sc->txq.cur];
968 #if defined(__LP64__)
969 			desc64->physaddr[0] =
970 			    htole32(map->dm_segs[i].ds_addr >> 32);
971 #endif
972 			desc64->physaddr[1] =
973 			    htole32(map->dm_segs[i].ds_addr & 0xffffffff);
974 			desc64->length = htole16(map->dm_segs[i].ds_len - 1);
975 			desc64->flags = htole16(flags);
976 #if NVLAN > 0
977 			desc64->vtag = htole32(vtag);
978 #endif
979 		} else {
980 			desc32 = &sc->txq.desc32[sc->txq.cur];
981 
982 			desc32->physaddr = htole32(map->dm_segs[i].ds_addr);
983 			desc32->length = htole16(map->dm_segs[i].ds_len - 1);
984 			desc32->flags = htole16(flags);
985 		}
986 
987 		/* csum flags and vtag belong to the first fragment only */
988 		if (map->dm_nsegs > 1) {
989 			flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
990 #if NVLAN > 0
991 			vtag = 0;
992 #endif
993 		}
994 
995 		sc->txq.queued++;
996 		sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT;
997 	}
998 
999 	/* the whole mbuf chain has been DMA mapped, fix last descriptor */
1000 	if (sc->sc_flags & NFE_40BIT_ADDR) {
1001 		flags |= NFE_TX_LASTFRAG_V2;
1002 		desc64->flags = htole16(flags);
1003 	} else {
1004 		if (sc->sc_flags & NFE_JUMBO_SUP)
1005 			flags |= NFE_TX_LASTFRAG_V2;
1006 		else
1007 			flags |= NFE_TX_LASTFRAG_V1;
1008 		desc32->flags = htole16(flags);
1009 	}
1010 
1011 	data->m = m0;
1012 	data->active = map;
1013 
1014 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1015 	    BUS_DMASYNC_PREWRITE);
1016 
1017 	return 0;
1018 }
1019 
1020 void
1021 nfe_start(struct ifnet *ifp)
1022 {
1023 	struct nfe_softc *sc = ifp->if_softc;
1024 	int old = sc->txq.cur;
1025 	struct mbuf *m0;
1026 
1027 	for (;;) {
1028 		IFQ_POLL(&ifp->if_snd, m0);
1029 		if (m0 == NULL)
1030 			break;
1031 
1032 		if (nfe_encap(sc, m0) != 0) {
1033 			ifp->if_flags |= IFF_OACTIVE;
1034 			break;
1035 		}
1036 
1037 		/* packet put in h/w queue, remove from s/w queue */
1038 		IFQ_DEQUEUE(&ifp->if_snd, m0);
1039 
1040 #if NBPFILTER > 0
1041 		if (ifp->if_bpf != NULL)
1042 			bpf_mtap(ifp->if_bpf, m0);
1043 #endif
1044 	}
1045 	if (sc->txq.cur == old)	/* nothing sent */
1046 		return;
1047 
1048 	if (sc->sc_flags & NFE_40BIT_ADDR)
1049 		nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
1050 	else
1051 		nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
1052 
1053 	/* kick Tx */
1054 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1055 
1056 	/*
1057 	 * Set a timeout in case the chip goes out to lunch.
1058 	 */
1059 	ifp->if_timer = 5;
1060 }
1061 
1062 void
1063 nfe_watchdog(struct ifnet *ifp)
1064 {
1065 	struct nfe_softc *sc = ifp->if_softc;
1066 
1067 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1068 
1069 	ifp->if_flags &= ~IFF_RUNNING;
1070 	nfe_init(ifp);
1071 
1072 	ifp->if_oerrors++;
1073 }
1074 
1075 int
1076 nfe_init(struct ifnet *ifp)
1077 {
1078 	struct nfe_softc *sc = ifp->if_softc;
1079 	uint32_t tmp;
1080 
1081 	if (ifp->if_flags & IFF_RUNNING)
1082 		return 0;
1083 
1084 	nfe_stop(ifp, 0);
1085 
1086 	NFE_WRITE(sc, NFE_TX_UNK, 0);
1087 	NFE_WRITE(sc, NFE_STATUS, 0);
1088 
1089 	sc->rxtxctl = NFE_RXTX_BIT2;
1090 	if (sc->sc_flags & NFE_40BIT_ADDR)
1091 		sc->rxtxctl |= NFE_RXTX_V3MAGIC;
1092 	else if (sc->sc_flags & NFE_JUMBO_SUP)
1093 		sc->rxtxctl |= NFE_RXTX_V2MAGIC;
1094 #ifdef NFE_CSUM
1095 	if (sc->sc_flags & NFE_HW_CSUM)
1096 		sc->rxtxctl |= NFE_RXTX_RXCSUM;
1097 #endif
1098 #if NVLAN > 0
1099 	/*
1100 	 * Although the adapter is capable of stripping VLAN tags from received
1101 	 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1102 	 * purpose.  This will be done in software by our network stack.
1103 	 */
1104 	if (sc->sc_flags & NFE_HW_VLAN)
1105 		sc->rxtxctl |= NFE_RXTX_VTAG_INSERT;
1106 #endif
1107 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1108 	DELAY(10);
1109 	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1110 
1111 #if NVLAN
1112 	if (sc->sc_flags & NFE_HW_VLAN)
1113 		NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1114 #endif
1115 
1116 	NFE_WRITE(sc, NFE_SETUP_R6, 0);
1117 
1118 	/* set MAC address */
1119 	nfe_set_macaddr(sc, sc->sc_enaddr);
1120 
1121 	/* tell MAC where rings are in memory */
1122 #ifdef __LP64__
1123 	NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1124 #endif
1125 	NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1126 #ifdef __LP64__
1127 	NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1128 #endif
1129 	NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1130 
1131 	NFE_WRITE(sc, NFE_RING_SIZE,
1132 	    (NFE_RX_RING_COUNT - 1) << 16 |
1133 	    (NFE_TX_RING_COUNT - 1));
1134 
1135 	NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1136 
1137 	/* force MAC to wakeup */
1138 	tmp = NFE_READ(sc, NFE_PWR_STATE);
1139 	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1140 	DELAY(10);
1141 	tmp = NFE_READ(sc, NFE_PWR_STATE);
1142 	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1143 
1144 #if 1
1145 	/* configure interrupts coalescing/mitigation */
1146 	NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
1147 #else
1148 	/* no interrupt mitigation: one interrupt per packet */
1149 	NFE_WRITE(sc, NFE_IMTIMER, 970);
1150 #endif
1151 
1152 	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1153 	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1154 	NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1155 
1156 	/* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1157 	NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1158 
1159 	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1160 	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
1161 
1162 	sc->rxtxctl &= ~NFE_RXTX_BIT2;
1163 	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1164 	DELAY(10);
1165 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1166 
1167 	/* set Rx filter */
1168 	nfe_setmulti(sc);
1169 
1170 	nfe_ifmedia_upd(ifp);
1171 
1172 	/* enable Rx */
1173 	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1174 
1175 	/* enable Tx */
1176 	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1177 
1178 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1179 
1180 	/* enable interrupts */
1181 	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1182 
1183 	callout_schedule(&sc->sc_tick_ch, hz);
1184 
1185 	ifp->if_flags |= IFF_RUNNING;
1186 	ifp->if_flags &= ~IFF_OACTIVE;
1187 
1188 	return 0;
1189 }
1190 
1191 void
1192 nfe_stop(struct ifnet *ifp, int disable __unused)
1193 {
1194 	struct nfe_softc *sc = ifp->if_softc;
1195 
1196 	callout_stop(&sc->sc_tick_ch);
1197 
1198 	ifp->if_timer = 0;
1199 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1200 
1201 	mii_down(&sc->sc_mii);
1202 
1203 	/* abort Tx */
1204 	NFE_WRITE(sc, NFE_TX_CTL, 0);
1205 
1206 	/* disable Rx */
1207 	NFE_WRITE(sc, NFE_RX_CTL, 0);
1208 
1209 	/* disable interrupts */
1210 	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1211 
1212 	/* reset Tx and Rx rings */
1213 	nfe_reset_tx_ring(sc, &sc->txq);
1214 	nfe_reset_rx_ring(sc, &sc->rxq);
1215 }
1216 
1217 int
1218 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1219 {
1220 	struct nfe_desc32 *desc32;
1221 	struct nfe_desc64 *desc64;
1222 	struct nfe_rx_data *data;
1223 	struct nfe_jbuf *jbuf;
1224 	void **desc;
1225 	bus_addr_t physaddr;
1226 	int i, nsegs, error, descsize;
1227 
1228 	if (sc->sc_flags & NFE_40BIT_ADDR) {
1229 		desc = (void **)&ring->desc64;
1230 		descsize = sizeof (struct nfe_desc64);
1231 	} else {
1232 		desc = (void **)&ring->desc32;
1233 		descsize = sizeof (struct nfe_desc32);
1234 	}
1235 
1236 	ring->cur = ring->next = 0;
1237 	ring->bufsz = MCLBYTES;
1238 
1239 	error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1,
1240 	    NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1241 	if (error != 0) {
1242 		printf("%s: could not create desc DMA map\n",
1243 		    sc->sc_dev.dv_xname);
1244 		goto fail;
1245 	}
1246 
1247 	error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize,
1248 	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1249 	if (error != 0) {
1250 		printf("%s: could not allocate DMA memory\n",
1251 		    sc->sc_dev.dv_xname);
1252 		goto fail;
1253 	}
1254 
1255 	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1256 	    NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1257 	if (error != 0) {
1258 		printf("%s: could not map desc DMA memory\n",
1259 		    sc->sc_dev.dv_xname);
1260 		goto fail;
1261 	}
1262 
1263 	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1264 	    NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1265 	if (error != 0) {
1266 		printf("%s: could not load desc DMA map\n",
1267 		    sc->sc_dev.dv_xname);
1268 		goto fail;
1269 	}
1270 
1271 	bzero(*desc, NFE_RX_RING_COUNT * descsize);
1272 	ring->physaddr = ring->map->dm_segs[0].ds_addr;
1273 
1274 	if (sc->sc_flags & NFE_USE_JUMBO) {
1275 		ring->bufsz = NFE_JBYTES;
1276 		if ((error = nfe_jpool_alloc(sc)) != 0) {
1277 			printf("%s: could not allocate jumbo frames\n",
1278 			    sc->sc_dev.dv_xname);
1279 			goto fail;
1280 		}
1281 	}
1282 
1283 	/*
1284 	 * Pre-allocate Rx buffers and populate Rx ring.
1285 	 */
1286 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1287 		data = &sc->rxq.data[i];
1288 
1289 		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
1290 		if (data->m == NULL) {
1291 			printf("%s: could not allocate rx mbuf\n",
1292 			    sc->sc_dev.dv_xname);
1293 			error = ENOMEM;
1294 			goto fail;
1295 		}
1296 
1297 		if (sc->sc_flags & NFE_USE_JUMBO) {
1298 			if ((jbuf = nfe_jalloc(sc)) == NULL) {
1299 				printf("%s: could not allocate jumbo buffer\n",
1300 				    sc->sc_dev.dv_xname);
1301 				goto fail;
1302 			}
1303 			MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree,
1304 			    sc);
1305 
1306 			physaddr = jbuf->physaddr;
1307 		} else {
1308 			error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1309 			    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map);
1310 			if (error != 0) {
1311 				printf("%s: could not create DMA map\n",
1312 				    sc->sc_dev.dv_xname);
1313 				goto fail;
1314 			}
1315 			MCLGET(data->m, M_DONTWAIT);
1316 			if (!(data->m->m_flags & M_EXT)) {
1317 				printf("%s: could not allocate mbuf cluster\n",
1318 				    sc->sc_dev.dv_xname);
1319 				error = ENOMEM;
1320 				goto fail;
1321 			}
1322 
1323 			error = bus_dmamap_load(sc->sc_dmat, data->map,
1324 			    mtod(data->m, void *), MCLBYTES, NULL,
1325 			    BUS_DMA_READ | BUS_DMA_NOWAIT);
1326 			if (error != 0) {
1327 				printf("%s: could not load rx buf DMA map",
1328 				    sc->sc_dev.dv_xname);
1329 				goto fail;
1330 			}
1331 			physaddr = data->map->dm_segs[0].ds_addr;
1332 		}
1333 
1334 		if (sc->sc_flags & NFE_40BIT_ADDR) {
1335 			desc64 = &sc->rxq.desc64[i];
1336 #if defined(__LP64__)
1337 			desc64->physaddr[0] = htole32(physaddr >> 32);
1338 #endif
1339 			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
1340 			desc64->length = htole16(sc->rxq.bufsz);
1341 			desc64->flags = htole16(NFE_RX_READY);
1342 		} else {
1343 			desc32 = &sc->rxq.desc32[i];
1344 			desc32->physaddr = htole32(physaddr);
1345 			desc32->length = htole16(sc->rxq.bufsz);
1346 			desc32->flags = htole16(NFE_RX_READY);
1347 		}
1348 	}
1349 
1350 	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1351 	    BUS_DMASYNC_PREWRITE);
1352 
1353 	return 0;
1354 
1355 fail:	nfe_free_rx_ring(sc, ring);
1356 	return error;
1357 }
1358 
1359 void
1360 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1361 {
1362 	int i;
1363 
1364 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1365 		if (sc->sc_flags & NFE_40BIT_ADDR) {
1366 			ring->desc64[i].length = htole16(ring->bufsz);
1367 			ring->desc64[i].flags = htole16(NFE_RX_READY);
1368 		} else {
1369 			ring->desc32[i].length = htole16(ring->bufsz);
1370 			ring->desc32[i].flags = htole16(NFE_RX_READY);
1371 		}
1372 	}
1373 
1374 	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1375 	    BUS_DMASYNC_PREWRITE);
1376 
1377 	ring->cur = ring->next = 0;
1378 }
1379 
1380 void
1381 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1382 {
1383 	struct nfe_rx_data *data;
1384 	void *desc;
1385 	int i, descsize;
1386 
1387 	if (sc->sc_flags & NFE_40BIT_ADDR) {
1388 		desc = ring->desc64;
1389 		descsize = sizeof (struct nfe_desc64);
1390 	} else {
1391 		desc = ring->desc32;
1392 		descsize = sizeof (struct nfe_desc32);
1393 	}
1394 
1395 	if (desc != NULL) {
1396 		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1397 		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1398 		bus_dmamap_unload(sc->sc_dmat, ring->map);
1399 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1400 		    NFE_RX_RING_COUNT * descsize);
1401 		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1402 	}
1403 
1404 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1405 		data = &ring->data[i];
1406 
1407 		if (data->map != NULL) {
1408 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1409 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1410 			bus_dmamap_unload(sc->sc_dmat, data->map);
1411 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1412 		}
1413 		if (data->m != NULL)
1414 			m_freem(data->m);
1415 	}
1416 }
1417 
1418 struct nfe_jbuf *
1419 nfe_jalloc(struct nfe_softc *sc)
1420 {
1421 	struct nfe_jbuf *jbuf;
1422 
1423 	jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
1424 	if (jbuf == NULL)
1425 		return NULL;
1426 	SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
1427 	return jbuf;
1428 }
1429 
1430 /*
1431  * This is called automatically by the network stack when the mbuf is freed.
1432  * Caution must be taken that the NIC might be reset by the time the mbuf is
1433  * freed.
1434  */
1435 void
1436 nfe_jfree(struct mbuf *m, caddr_t buf, size_t size __unused, void *arg)
1437 {
1438 	struct nfe_softc *sc = arg;
1439 	struct nfe_jbuf *jbuf;
1440 	int i;
1441 
1442 	/* find the jbuf from the base pointer */
1443 	i = (buf - sc->rxq.jpool) / NFE_JBYTES;
1444 	if (i < 0 || i >= NFE_JPOOL_COUNT) {
1445 		printf("%s: request to free a buffer (%p) not managed by us\n",
1446 		    sc->sc_dev.dv_xname, buf);
1447 		return;
1448 	}
1449 	jbuf = &sc->rxq.jbuf[i];
1450 
1451 	/* ..and put it back in the free list */
1452 	SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext);
1453 
1454         if (m != NULL)
1455                 pool_cache_put(&mbpool_cache, m);
1456 }
1457 
1458 int
1459 nfe_jpool_alloc(struct nfe_softc *sc)
1460 {
1461 	struct nfe_rx_ring *ring = &sc->rxq;
1462 	struct nfe_jbuf *jbuf;
1463 	bus_addr_t physaddr;
1464 	caddr_t buf;
1465 	int i, nsegs, error;
1466 
1467 	/*
1468 	 * Allocate a big chunk of DMA'able memory.
1469 	 */
1470 	error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1,
1471 	    NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap);
1472 	if (error != 0) {
1473 		printf("%s: could not create jumbo DMA map\n",
1474 		    sc->sc_dev.dv_xname);
1475 		goto fail;
1476 	}
1477 
1478 	error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0,
1479 	    &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT);
1480 	if (error != 0) {
1481 		printf("%s could not allocate jumbo DMA memory\n",
1482 		    sc->sc_dev.dv_xname);
1483 		goto fail;
1484 	}
1485 
1486 	error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE,
1487 	    &ring->jpool, BUS_DMA_NOWAIT);
1488 	if (error != 0) {
1489 		printf("%s: could not map jumbo DMA memory\n",
1490 		    sc->sc_dev.dv_xname);
1491 		goto fail;
1492 	}
1493 
1494 	error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool,
1495 	    NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
1496 	if (error != 0) {
1497 		printf("%s: could not load jumbo DMA map\n",
1498 		    sc->sc_dev.dv_xname);
1499 		goto fail;
1500 	}
1501 
1502 	/* ..and split it into 9KB chunks */
1503 	SLIST_INIT(&ring->jfreelist);
1504 
1505 	buf = ring->jpool;
1506 	physaddr = ring->jmap->dm_segs[0].ds_addr;
1507 	for (i = 0; i < NFE_JPOOL_COUNT; i++) {
1508 		jbuf = &ring->jbuf[i];
1509 
1510 		jbuf->buf = buf;
1511 		jbuf->physaddr = physaddr;
1512 
1513 		SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1514 
1515 		buf += NFE_JBYTES;
1516 		physaddr += NFE_JBYTES;
1517 	}
1518 
1519 	return 0;
1520 
1521 fail:	nfe_jpool_free(sc);
1522 	return error;
1523 }
1524 
1525 void
1526 nfe_jpool_free(struct nfe_softc *sc)
1527 {
1528 	struct nfe_rx_ring *ring = &sc->rxq;
1529 
1530 	if (ring->jmap != NULL) {
1531 		bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0,
1532 		    ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1533 		bus_dmamap_unload(sc->sc_dmat, ring->jmap);
1534 		bus_dmamap_destroy(sc->sc_dmat, ring->jmap);
1535 	}
1536 	if (ring->jpool != NULL) {
1537 		bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE);
1538 		bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1);
1539 	}
1540 }
1541 
1542 int
1543 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1544 {
1545 	int i, nsegs, error;
1546 	void **desc;
1547 	int descsize;
1548 
1549 	if (sc->sc_flags & NFE_40BIT_ADDR) {
1550 		desc = (void **)&ring->desc64;
1551 		descsize = sizeof (struct nfe_desc64);
1552 	} else {
1553 		desc = (void **)&ring->desc32;
1554 		descsize = sizeof (struct nfe_desc32);
1555 	}
1556 
1557 	ring->queued = 0;
1558 	ring->cur = ring->next = 0;
1559 
1560 	error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1,
1561 	    NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1562 
1563 	if (error != 0) {
1564 		printf("%s: could not create desc DMA map\n",
1565 		    sc->sc_dev.dv_xname);
1566 		goto fail;
1567 	}
1568 
1569 	error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize,
1570 	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1571 	if (error != 0) {
1572 		printf("%s: could not allocate DMA memory\n",
1573 		    sc->sc_dev.dv_xname);
1574 		goto fail;
1575 	}
1576 
1577 	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1578 	    NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1579 	if (error != 0) {
1580 		printf("%s: could not map desc DMA memory\n",
1581 		    sc->sc_dev.dv_xname);
1582 		goto fail;
1583 	}
1584 
1585 	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1586 	    NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1587 	if (error != 0) {
1588 		printf("%s: could not load desc DMA map\n",
1589 		    sc->sc_dev.dv_xname);
1590 		goto fail;
1591 	}
1592 
1593 	bzero(*desc, NFE_TX_RING_COUNT * descsize);
1594 	ring->physaddr = ring->map->dm_segs[0].ds_addr;
1595 
1596 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1597 		error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES,
1598 		    NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT,
1599 		    &ring->data[i].map);
1600 		if (error != 0) {
1601 			printf("%s: could not create DMA map\n",
1602 			    sc->sc_dev.dv_xname);
1603 			goto fail;
1604 		}
1605 	}
1606 
1607 	return 0;
1608 
1609 fail:	nfe_free_tx_ring(sc, ring);
1610 	return error;
1611 }
1612 
1613 void
1614 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1615 {
1616 	struct nfe_tx_data *data;
1617 	int i;
1618 
1619 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1620 		if (sc->sc_flags & NFE_40BIT_ADDR)
1621 			ring->desc64[i].flags = 0;
1622 		else
1623 			ring->desc32[i].flags = 0;
1624 
1625 		data = &ring->data[i];
1626 
1627 		if (data->m != NULL) {
1628 			bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1629 			    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1630 			bus_dmamap_unload(sc->sc_dmat, data->active);
1631 			m_freem(data->m);
1632 			data->m = NULL;
1633 		}
1634 	}
1635 
1636 	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1637 	    BUS_DMASYNC_PREWRITE);
1638 
1639 	ring->queued = 0;
1640 	ring->cur = ring->next = 0;
1641 }
1642 
1643 void
1644 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1645 {
1646 	struct nfe_tx_data *data;
1647 	void *desc;
1648 	int i, descsize;
1649 
1650 	if (sc->sc_flags & NFE_40BIT_ADDR) {
1651 		desc = ring->desc64;
1652 		descsize = sizeof (struct nfe_desc64);
1653 	} else {
1654 		desc = ring->desc32;
1655 		descsize = sizeof (struct nfe_desc32);
1656 	}
1657 
1658 	if (desc != NULL) {
1659 		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1660 		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1661 		bus_dmamap_unload(sc->sc_dmat, ring->map);
1662 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1663 		    NFE_TX_RING_COUNT * descsize);
1664 		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1665 	}
1666 
1667 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1668 		data = &ring->data[i];
1669 
1670 		if (data->m != NULL) {
1671 			bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1672 			    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1673 			bus_dmamap_unload(sc->sc_dmat, data->active);
1674 			m_freem(data->m);
1675 		}
1676 	}
1677 
1678 	/* ..and now actually destroy the DMA mappings */
1679 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1680 		data = &ring->data[i];
1681 		if (data->map == NULL)
1682 			continue;
1683 		bus_dmamap_destroy(sc->sc_dmat, data->map);
1684 	}
1685 }
1686 
1687 int
1688 nfe_ifmedia_upd(struct ifnet *ifp)
1689 {
1690 	struct nfe_softc *sc = ifp->if_softc;
1691 	struct mii_data *mii = &sc->sc_mii;
1692 	struct mii_softc *miisc;
1693 
1694 	if (mii->mii_instance != 0) {
1695 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1696 			mii_phy_reset(miisc);
1697 	}
1698 	return mii_mediachg(mii);
1699 }
1700 
1701 void
1702 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1703 {
1704 	struct nfe_softc *sc = ifp->if_softc;
1705 	struct mii_data *mii = &sc->sc_mii;
1706 
1707 	mii_pollstat(mii);
1708 	ifmr->ifm_status = mii->mii_media_status;
1709 	ifmr->ifm_active = mii->mii_media_active;
1710 }
1711 
1712 void
1713 nfe_setmulti(struct nfe_softc *sc)
1714 {
1715 	struct ethercom *ec = &sc->sc_ethercom;
1716 	struct ifnet *ifp = &ec->ec_if;
1717 	struct ether_multi *enm;
1718 	struct ether_multistep step;
1719 	uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1720 	uint32_t filter = NFE_RXFILTER_MAGIC;
1721 	int i;
1722 
1723 	if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1724 		bzero(addr, ETHER_ADDR_LEN);
1725 		bzero(mask, ETHER_ADDR_LEN);
1726 		goto done;
1727 	}
1728 
1729 	bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1730 	bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1731 
1732 	ETHER_FIRST_MULTI(step, ec, enm);
1733 	while (enm != NULL) {
1734 		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1735 			ifp->if_flags |= IFF_ALLMULTI;
1736 			bzero(addr, ETHER_ADDR_LEN);
1737 			bzero(mask, ETHER_ADDR_LEN);
1738 			goto done;
1739 		}
1740 		for (i = 0; i < ETHER_ADDR_LEN; i++) {
1741 			addr[i] &=  enm->enm_addrlo[i];
1742 			mask[i] &= ~enm->enm_addrlo[i];
1743 		}
1744 		ETHER_NEXT_MULTI(step, enm);
1745 	}
1746 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1747 		mask[i] |= addr[i];
1748 
1749 done:
1750 	addr[0] |= 0x01;	/* make sure multicast bit is set */
1751 
1752 	NFE_WRITE(sc, NFE_MULTIADDR_HI,
1753 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1754 	NFE_WRITE(sc, NFE_MULTIADDR_LO,
1755 	    addr[5] <<  8 | addr[4]);
1756 	NFE_WRITE(sc, NFE_MULTIMASK_HI,
1757 	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1758 	NFE_WRITE(sc, NFE_MULTIMASK_LO,
1759 	    mask[5] <<  8 | mask[4]);
1760 
1761 	filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1762 	NFE_WRITE(sc, NFE_RXFILTER, filter);
1763 }
1764 
1765 void
1766 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1767 {
1768 	uint32_t tmp;
1769 
1770 	tmp = NFE_READ(sc, NFE_MACADDR_LO);
1771 	addr[0] = (tmp >> 8) & 0xff;
1772 	addr[1] = (tmp & 0xff);
1773 
1774 	tmp = NFE_READ(sc, NFE_MACADDR_HI);
1775 	addr[2] = (tmp >> 24) & 0xff;
1776 	addr[3] = (tmp >> 16) & 0xff;
1777 	addr[4] = (tmp >>  8) & 0xff;
1778 	addr[5] = (tmp & 0xff);
1779 }
1780 
1781 void
1782 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1783 {
1784 	NFE_WRITE(sc, NFE_MACADDR_LO,
1785 	    addr[5] <<  8 | addr[4]);
1786 	NFE_WRITE(sc, NFE_MACADDR_HI,
1787 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1788 }
1789 
1790 void
1791 nfe_tick(void *arg)
1792 {
1793 	struct nfe_softc *sc = arg;
1794 	int s;
1795 
1796 	s = splnet();
1797 	mii_tick(&sc->sc_mii);
1798 	splx(s);
1799 
1800 	callout_schedule(&sc->sc_tick_ch, hz);
1801 }
1802