xref: /openbsd-src/sys/dev/pci/if_nfe.c (revision 8bb071cdcfd5bb410ffc4d6bba584292a9a77626)
1 /*	$OpenBSD: if_nfe.c,v 1.127 2024/08/31 16:23:09 deraadt Exp $	*/
2 
3 /*-
4  * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr>
5  * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
21 
22 #include "bpfilter.h"
23 #include "vlan.h"
24 
25 #include <sys/param.h>
26 #include <sys/endian.h>
27 #include <sys/systm.h>
28 #include <sys/sockio.h>
29 #include <sys/mbuf.h>
30 #include <sys/queue.h>
31 #include <sys/device.h>
32 #include <sys/timeout.h>
33 
34 #include <machine/bus.h>
35 
36 #include <net/if.h>
37 #include <net/if_media.h>
38 
39 #include <netinet/in.h>
40 #include <netinet/if_ether.h>
41 
42 #if NBPFILTER > 0
43 #include <net/bpf.h>
44 #endif
45 
46 #include <dev/mii/miivar.h>
47 
48 #include <dev/pci/pcireg.h>
49 #include <dev/pci/pcivar.h>
50 #include <dev/pci/pcidevs.h>
51 
52 #include <dev/pci/if_nfereg.h>
53 #include <dev/pci/if_nfevar.h>
54 
55 int	nfe_match(struct device *, void *, void *);
56 void	nfe_attach(struct device *, struct device *, void *);
57 int	nfe_activate(struct device *, int);
58 void	nfe_miibus_statchg(struct device *);
59 int	nfe_miibus_readreg(struct device *, int, int);
60 void	nfe_miibus_writereg(struct device *, int, int, int);
61 int	nfe_intr(void *);
62 int	nfe_ioctl(struct ifnet *, u_long, caddr_t);
63 void	nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
64 void	nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
65 void	nfe_txdesc32_rsync(struct nfe_softc *, int, int, int);
66 void	nfe_txdesc64_rsync(struct nfe_softc *, int, int, int);
67 void	nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
68 void	nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
69 void	nfe_rxeof(struct nfe_softc *);
70 void	nfe_txeof(struct nfe_softc *);
71 int	nfe_encap(struct nfe_softc *, struct mbuf *);
72 void	nfe_start(struct ifnet *);
73 void	nfe_watchdog(struct ifnet *);
74 int	nfe_init(struct ifnet *);
75 void	nfe_stop(struct ifnet *, int);
76 int	nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
77 void	nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
78 void	nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
79 int	nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
80 void	nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
81 void	nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
82 int	nfe_ifmedia_upd(struct ifnet *);
83 void	nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
84 void	nfe_iff(struct nfe_softc *);
85 void	nfe_get_macaddr(struct nfe_softc *, uint8_t *);
86 void	nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
87 void	nfe_tick(void *);
88 #ifndef SMALL_KERNEL
89 int	nfe_wol(struct ifnet*, int);
90 #endif
91 
92 const struct cfattach nfe_ca = {
93 	sizeof (struct nfe_softc), nfe_match, nfe_attach, NULL,
94 	nfe_activate
95 };
96 
97 struct cfdriver nfe_cd = {
98 	NULL, "nfe", DV_IFNET
99 };
100 
101 #ifdef NFE_DEBUG
102 int nfedebug = 0;
103 #define DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
104 #define DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
105 #else
106 #define DPRINTF(x)
107 #define DPRINTFN(n,x)
108 #endif
109 
110 const struct pci_matchid nfe_devices[] = {
111 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN },
112 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN },
113 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 },
114 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 },
115 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 },
116 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 },
117 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 },
118 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 },
119 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 },
120 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 },
121 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 },
122 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 },
123 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 },
124 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 },
125 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 },
126 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 },
127 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 },
128 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 },
129 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 },
130 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 },
131 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 },
132 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 },
133 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 },
134 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 },
135 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 },
136 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 },
137 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 },
138 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 },
139 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 },
140 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 },
141 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 },
142 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 },
143 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 },
144 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 },
145 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 },
146 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 },
147 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 },
148 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 },
149 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 },
150 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN }
151 };
152 
153 int
154 nfe_match(struct device *dev, void *match, void *aux)
155 {
156 	return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices,
157 	    sizeof (nfe_devices) / sizeof (nfe_devices[0]));
158 }
159 
160 int
161 nfe_activate(struct device *self, int act)
162 {
163 	struct nfe_softc *sc = (struct nfe_softc *)self;
164 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
165 
166 	switch (act) {
167 	case DVACT_SUSPEND:
168 		if (ifp->if_flags & IFF_RUNNING)
169 			nfe_stop(ifp, 0);
170 		break;
171 	case DVACT_RESUME:
172 		if (ifp->if_flags & IFF_UP)
173 			nfe_init(ifp);
174 		break;
175 	}
176 	return (0);
177 }
178 
179 
180 void
181 nfe_attach(struct device *parent, struct device *self, void *aux)
182 {
183 	struct nfe_softc *sc = (struct nfe_softc *)self;
184 	struct pci_attach_args *pa = aux;
185 	pci_chipset_tag_t pc = pa->pa_pc;
186 	pci_intr_handle_t ih;
187 	const char *intrstr;
188 	struct ifnet *ifp;
189 	bus_size_t memsize;
190 	pcireg_t memtype;
191 
192 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA);
193 	if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt,
194 	    &sc->sc_memh, NULL, &memsize, 0)) {
195 		printf(": can't map mem space\n");
196 		return;
197 	}
198 
199 	if (pci_intr_map(pa, &ih) != 0) {
200 		printf(": can't map interrupt\n");
201 		return;
202 	}
203 
204 	intrstr = pci_intr_string(pc, ih);
205 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc,
206 	    sc->sc_dev.dv_xname);
207 	if (sc->sc_ih == NULL) {
208 		printf(": could not establish interrupt");
209 		if (intrstr != NULL)
210 			printf(" at %s", intrstr);
211 		printf("\n");
212 		return;
213 	}
214 	printf(": %s", intrstr);
215 
216 	sc->sc_dmat = pa->pa_dmat;
217 	sc->sc_flags = 0;
218 
219 	switch (PCI_PRODUCT(pa->pa_id)) {
220 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
221 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
222 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
223 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
224 		sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
225 		break;
226 	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
227 	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
228 		sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT;
229 		break;
230 	case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
231 	case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
232 	case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
233 	case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
234 	case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
235 	case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
236 	case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
237 	case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
238 	case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
239 	case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
240 	case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
241 	case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
242 		sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR |
243 		    NFE_PWR_MGMT;
244 		break;
245 	case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
246 	case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
247 	case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
248 	case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
249 		sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM |
250 		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
251 		break;
252 	case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
253 	case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
254 	case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
255 	case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
256 	case PCI_PRODUCT_NVIDIA_MCP89_LAN:
257 		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
258 		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
259 		break;
260 	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
261 	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
262 	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
263 	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
264 		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
265 		break;
266 	case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
267 	case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
268 	case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
269 	case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
270 		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
271 		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
272 		break;
273 	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
274 	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
275 		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
276 		    NFE_HW_VLAN | NFE_PWR_MGMT;
277 		break;
278 	}
279 
280 	if (sc->sc_flags & NFE_PWR_MGMT) {
281 		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
282 		NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
283 		DELAY(100);
284 		NFE_WRITE(sc, NFE_MAC_RESET, 0);
285 		DELAY(100);
286 		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
287 		NFE_WRITE(sc, NFE_PWR2_CTL,
288 		    NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK);
289 	}
290 
291 	nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr);
292 	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
293 
294 	/*
295 	 * Allocate Tx and Rx rings.
296 	 */
297 	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
298 		printf("%s: could not allocate Tx ring\n",
299 		    sc->sc_dev.dv_xname);
300 		return;
301 	}
302 
303 	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
304 		printf("%s: could not allocate Rx ring\n",
305 		    sc->sc_dev.dv_xname);
306 		nfe_free_tx_ring(sc, &sc->txq);
307 		return;
308 	}
309 
310 	ifp = &sc->sc_arpcom.ac_if;
311 	ifp->if_softc = sc;
312 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
313 	ifp->if_ioctl = nfe_ioctl;
314 	ifp->if_start = nfe_start;
315 	ifp->if_watchdog = nfe_watchdog;
316 	ifq_init_maxlen(&ifp->if_snd, NFE_IFQ_MAXLEN);
317 	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
318 
319 	ifp->if_capabilities = IFCAP_VLAN_MTU;
320 
321 #ifndef SMALL_KERNEL
322 	ifp->if_capabilities |= IFCAP_WOL;
323 	ifp->if_wol = nfe_wol;
324 	nfe_wol(ifp, 0);
325 #endif
326 
327 #if NVLAN > 0
328 	if (sc->sc_flags & NFE_HW_VLAN)
329 		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
330 #endif
331 
332 	if (sc->sc_flags & NFE_HW_CSUM) {
333 		ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
334 		    IFCAP_CSUM_UDPv4;
335 	}
336 
337 	sc->sc_mii.mii_ifp = ifp;
338 	sc->sc_mii.mii_readreg = nfe_miibus_readreg;
339 	sc->sc_mii.mii_writereg = nfe_miibus_writereg;
340 	sc->sc_mii.mii_statchg = nfe_miibus_statchg;
341 
342 	ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd,
343 	    nfe_ifmedia_sts);
344 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 0, 0);
345 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
346 		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
347 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
348 		    0, NULL);
349 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
350 	} else
351 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
352 
353 	if_attach(ifp);
354 	ether_ifattach(ifp);
355 
356 	timeout_set(&sc->sc_tick_ch, nfe_tick, sc);
357 }
358 
359 void
360 nfe_miibus_statchg(struct device *dev)
361 {
362 	struct nfe_softc *sc = (struct nfe_softc *)dev;
363 	struct mii_data *mii = &sc->sc_mii;
364 	uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
365 
366 	phy = NFE_READ(sc, NFE_PHY_IFACE);
367 	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
368 
369 	seed = NFE_READ(sc, NFE_RNDSEED);
370 	seed &= ~NFE_SEED_MASK;
371 
372 	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
373 		phy  |= NFE_PHY_HDX;	/* half-duplex */
374 		misc |= NFE_MISC1_HDX;
375 	}
376 
377 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
378 	case IFM_1000_T:	/* full-duplex only */
379 		link |= NFE_MEDIA_1000T;
380 		seed |= NFE_SEED_1000T;
381 		phy  |= NFE_PHY_1000T;
382 		break;
383 	case IFM_100_TX:
384 		link |= NFE_MEDIA_100TX;
385 		seed |= NFE_SEED_100TX;
386 		phy  |= NFE_PHY_100TX;
387 		break;
388 	case IFM_10_T:
389 		link |= NFE_MEDIA_10T;
390 		seed |= NFE_SEED_10T;
391 		break;
392 	}
393 
394 	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
395 
396 	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
397 	NFE_WRITE(sc, NFE_MISC1, misc);
398 	NFE_WRITE(sc, NFE_LINKSPEED, link);
399 }
400 
401 int
402 nfe_miibus_readreg(struct device *dev, int phy, int reg)
403 {
404 	struct nfe_softc *sc = (struct nfe_softc *)dev;
405 	uint32_t val;
406 	int ntries;
407 
408 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
409 
410 	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
411 		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
412 		DELAY(100);
413 	}
414 
415 	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
416 
417 	for (ntries = 0; ntries < 1000; ntries++) {
418 		DELAY(100);
419 		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
420 			break;
421 	}
422 	if (ntries == 1000) {
423 		DPRINTFN(2, ("%s: timeout waiting for PHY\n",
424 		    sc->sc_dev.dv_xname));
425 		return 0;
426 	}
427 
428 	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
429 		DPRINTFN(2, ("%s: could not read PHY\n",
430 		    sc->sc_dev.dv_xname));
431 		return 0;
432 	}
433 
434 	val = NFE_READ(sc, NFE_PHY_DATA);
435 	if (val != 0xffffffff && val != 0)
436 		sc->mii_phyaddr = phy;
437 
438 	DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n",
439 	    sc->sc_dev.dv_xname, phy, reg, val));
440 
441 	return val;
442 }
443 
444 void
445 nfe_miibus_writereg(struct device *dev, int phy, int reg, int val)
446 {
447 	struct nfe_softc *sc = (struct nfe_softc *)dev;
448 	uint32_t ctl;
449 	int ntries;
450 
451 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
452 
453 	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
454 		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
455 		DELAY(100);
456 	}
457 
458 	NFE_WRITE(sc, NFE_PHY_DATA, val);
459 	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
460 	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
461 
462 	for (ntries = 0; ntries < 1000; ntries++) {
463 		DELAY(100);
464 		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
465 			break;
466 	}
467 #ifdef NFE_DEBUG
468 	if (nfedebug >= 2 && ntries == 1000)
469 		printf("could not write to PHY\n");
470 #endif
471 }
472 
473 int
474 nfe_intr(void *arg)
475 {
476 	struct nfe_softc *sc = arg;
477 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
478 	uint32_t r;
479 
480 	if ((r = NFE_READ(sc, NFE_IRQ_STATUS) & NFE_IRQ_WANTED) == 0)
481 		return 0;	/* not for us */
482 	NFE_WRITE(sc, NFE_IRQ_STATUS, r);
483 
484 	DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
485 
486 	if (r & NFE_IRQ_LINK) {
487 		NFE_READ(sc, NFE_PHY_STATUS);
488 		NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
489 		DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname));
490 	}
491 
492 	if (ifp->if_flags & IFF_RUNNING) {
493 		/* check Rx ring */
494 		nfe_rxeof(sc);
495 
496 		/* check Tx ring */
497 		nfe_txeof(sc);
498 	}
499 
500 	return 1;
501 }
502 
503 int
504 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
505 {
506 	struct nfe_softc *sc = ifp->if_softc;
507 	struct ifreq *ifr = (struct ifreq *)data;
508 	int s, error = 0;
509 
510 	s = splnet();
511 
512 	switch (cmd) {
513 	case SIOCSIFADDR:
514 		ifp->if_flags |= IFF_UP;
515 		if (!(ifp->if_flags & IFF_RUNNING))
516 			nfe_init(ifp);
517 		break;
518 
519 	case SIOCSIFFLAGS:
520 		if (ifp->if_flags & IFF_UP) {
521 			if (ifp->if_flags & IFF_RUNNING)
522 				error = ENETRESET;
523 			else
524 				nfe_init(ifp);
525 		} else {
526 			if (ifp->if_flags & IFF_RUNNING)
527 				nfe_stop(ifp, 1);
528 		}
529 		break;
530 
531 	case SIOCSIFMEDIA:
532 	case SIOCGIFMEDIA:
533 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
534 		break;
535 
536 	default:
537 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
538 	}
539 
540 	if (error == ENETRESET) {
541 		if (ifp->if_flags & IFF_RUNNING)
542 			nfe_iff(sc);
543 		error = 0;
544 	}
545 
546 	splx(s);
547 	return error;
548 }
549 
550 void
551 nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
552 {
553 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
554 	    (caddr_t)desc32 - (caddr_t)sc->txq.desc32,
555 	    sizeof (struct nfe_desc32), ops);
556 }
557 
558 void
559 nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
560 {
561 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
562 	    (caddr_t)desc64 - (caddr_t)sc->txq.desc64,
563 	    sizeof (struct nfe_desc64), ops);
564 }
565 
566 void
567 nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops)
568 {
569 	if (end > start) {
570 		bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
571 		    (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32,
572 		    (caddr_t)&sc->txq.desc32[end] -
573 		    (caddr_t)&sc->txq.desc32[start], ops);
574 		return;
575 	}
576 	/* sync from 'start' to end of ring */
577 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
578 	    (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32,
579 	    (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] -
580 	    (caddr_t)&sc->txq.desc32[start], ops);
581 
582 	/* sync from start of ring to 'end' */
583 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
584 	    (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops);
585 }
586 
587 void
588 nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops)
589 {
590 	if (end > start) {
591 		bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
592 		    (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64,
593 		    (caddr_t)&sc->txq.desc64[end] -
594 		    (caddr_t)&sc->txq.desc64[start], ops);
595 		return;
596 	}
597 	/* sync from 'start' to end of ring */
598 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
599 	    (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64,
600 	    (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] -
601 	    (caddr_t)&sc->txq.desc64[start], ops);
602 
603 	/* sync from start of ring to 'end' */
604 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
605 	    (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops);
606 }
607 
608 void
609 nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
610 {
611 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
612 	    (caddr_t)desc32 - (caddr_t)sc->rxq.desc32,
613 	    sizeof (struct nfe_desc32), ops);
614 }
615 
616 void
617 nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
618 {
619 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
620 	    (caddr_t)desc64 - (caddr_t)sc->rxq.desc64,
621 	    sizeof (struct nfe_desc64), ops);
622 }
623 
624 void
625 nfe_rxeof(struct nfe_softc *sc)
626 {
627 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
628 	struct nfe_desc32 *desc32;
629 	struct nfe_desc64 *desc64;
630 	struct nfe_rx_data *data;
631 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
632 	struct mbuf *m, *mnew;
633 	bus_addr_t physaddr;
634 #if NVLAN > 0
635 	uint32_t vtag;
636 #endif
637 	uint16_t flags;
638 	int error, len;
639 
640 	for (;;) {
641 		data = &sc->rxq.data[sc->rxq.cur];
642 
643 		if (sc->sc_flags & NFE_40BIT_ADDR) {
644 			desc64 = &sc->rxq.desc64[sc->rxq.cur];
645 			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
646 
647 			flags = letoh16(desc64->flags);
648 			len = letoh16(desc64->length) & 0x3fff;
649 #if NVLAN > 0
650 			vtag = letoh32(desc64->physaddr[1]);
651 #endif
652 		} else {
653 			desc32 = &sc->rxq.desc32[sc->rxq.cur];
654 			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
655 
656 			flags = letoh16(desc32->flags);
657 			len = letoh16(desc32->length) & 0x3fff;
658 		}
659 
660 		if (flags & NFE_RX_READY)
661 			break;
662 
663 		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
664 			if (!(flags & NFE_RX_VALID_V1))
665 				goto skip;
666 
667 			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
668 				flags &= ~NFE_RX_ERROR;
669 				len--;	/* fix buffer length */
670 			}
671 		} else {
672 			if (!(flags & NFE_RX_VALID_V2))
673 				goto skip;
674 
675 			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
676 				flags &= ~NFE_RX_ERROR;
677 				len--;	/* fix buffer length */
678 			}
679 		}
680 
681 		if (flags & NFE_RX_ERROR) {
682 			ifp->if_ierrors++;
683 			goto skip;
684 		}
685 
686 		/*
687 		 * Try to allocate a new mbuf for this ring element and load
688 		 * it before processing the current mbuf. If the ring element
689 		 * cannot be loaded, drop the received packet and reuse the
690 		 * old mbuf. In the unlikely case that the old mbuf can't be
691 		 * reloaded either, explicitly panic.
692 		 */
693 		mnew = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
694 		if (mnew == NULL) {
695 			ifp->if_ierrors++;
696 			goto skip;
697 		}
698 		mnew->m_pkthdr.len = mnew->m_len = MCLBYTES;
699 
700 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
701 		    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
702 		bus_dmamap_unload(sc->sc_dmat, data->map);
703 
704 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, mnew,
705 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
706 		if (error != 0) {
707 			m_freem(mnew);
708 
709 			/* try to reload the old mbuf */
710 			error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map,
711 			    m, BUS_DMA_READ | BUS_DMA_NOWAIT);
712 			if (error != 0) {
713 				/* very unlikely that it will fail.. */
714 				panic("%s: could not load old rx mbuf",
715 				    sc->sc_dev.dv_xname);
716 			}
717 			ifp->if_ierrors++;
718 			goto skip;
719 		}
720 		physaddr = data->map->dm_segs[0].ds_addr;
721 
722 		/*
723 		 * New mbuf successfully loaded, update Rx ring and continue
724 		 * processing.
725 		 */
726 		m = data->m;
727 		data->m = mnew;
728 
729 		/* finalize mbuf */
730 		m->m_pkthdr.len = m->m_len = len;
731 
732 		if ((sc->sc_flags & NFE_HW_CSUM) &&
733 		    (flags & NFE_RX_IP_CSUMOK)) {
734 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
735 			if (flags & NFE_RX_UDP_CSUMOK)
736 				m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
737 			if (flags & NFE_RX_TCP_CSUMOK)
738 				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
739 		}
740 
741 #if NVLAN > 0
742 		if ((vtag & NFE_RX_VTAG) && (sc->sc_flags & NFE_HW_VLAN)) {
743 			m->m_pkthdr.ether_vtag = vtag & 0xffff;
744 			m->m_flags |= M_VLANTAG;
745 		}
746 #endif
747 
748 		ml_enqueue(&ml, m);
749 
750 		/* update mapping address in h/w descriptor */
751 		if (sc->sc_flags & NFE_40BIT_ADDR) {
752 #if defined(__LP64__)
753 			desc64->physaddr[0] = htole32(physaddr >> 32);
754 #endif
755 			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
756 		} else {
757 			desc32->physaddr = htole32(physaddr);
758 		}
759 
760 skip:		if (sc->sc_flags & NFE_40BIT_ADDR) {
761 			desc64->length = htole16(sc->rxq.bufsz);
762 			desc64->flags = htole16(NFE_RX_READY);
763 
764 			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
765 		} else {
766 			desc32->length = htole16(sc->rxq.bufsz);
767 			desc32->flags = htole16(NFE_RX_READY);
768 
769 			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
770 		}
771 
772 		sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
773 	}
774 	if_input(ifp, &ml);
775 }
776 
777 void
778 nfe_txeof(struct nfe_softc *sc)
779 {
780 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
781 	struct nfe_desc32 *desc32;
782 	struct nfe_desc64 *desc64;
783 	struct nfe_tx_data *data = NULL;
784 	uint16_t flags;
785 
786 	while (sc->txq.next != sc->txq.cur) {
787 		if (sc->sc_flags & NFE_40BIT_ADDR) {
788 			desc64 = &sc->txq.desc64[sc->txq.next];
789 			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
790 
791 			flags = letoh16(desc64->flags);
792 		} else {
793 			desc32 = &sc->txq.desc32[sc->txq.next];
794 			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
795 
796 			flags = letoh16(desc32->flags);
797 		}
798 
799 		if (flags & NFE_TX_VALID)
800 			break;
801 
802 		data = &sc->txq.data[sc->txq.next];
803 
804 		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
805 			if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
806 				goto skip;
807 
808 			if ((flags & NFE_TX_ERROR_V1) != 0) {
809 				printf("%s: tx v1 error %b\n",
810 				    sc->sc_dev.dv_xname, flags, NFE_V1_TXERR);
811 				ifp->if_oerrors++;
812 			}
813 		} else {
814 			if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
815 				goto skip;
816 
817 			if ((flags & NFE_TX_ERROR_V2) != 0) {
818 				printf("%s: tx v2 error %b\n",
819 				    sc->sc_dev.dv_xname, flags, NFE_V2_TXERR);
820 				ifp->if_oerrors++;
821 			}
822 		}
823 
824 		if (data->m == NULL) {	/* should not get there */
825 			printf("%s: last fragment bit w/o associated mbuf!\n",
826 			    sc->sc_dev.dv_xname);
827 			goto skip;
828 		}
829 
830 		/* last fragment of the mbuf chain transmitted */
831 		bus_dmamap_sync(sc->sc_dmat, data->active, 0,
832 		    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
833 		bus_dmamap_unload(sc->sc_dmat, data->active);
834 		m_freem(data->m);
835 		data->m = NULL;
836 
837 		ifp->if_timer = 0;
838 
839 skip:		sc->txq.queued--;
840 		sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT;
841 	}
842 
843 	if (data != NULL) {	/* at least one slot freed */
844 		ifq_clr_oactive(&ifp->if_snd);
845 		nfe_start(ifp);
846 	}
847 }
848 
849 int
850 nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
851 {
852 	struct nfe_desc32 *desc32;
853 	struct nfe_desc64 *desc64;
854 	struct nfe_tx_data *data;
855 	bus_dmamap_t map;
856 	uint16_t flags = 0;
857 	uint32_t vtag = 0;
858 	int error, i, first = sc->txq.cur;
859 
860 	map = sc->txq.data[first].map;
861 
862 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT);
863 	if (error != 0) {
864 		printf("%s: can't map mbuf (error %d)\n",
865 		    sc->sc_dev.dv_xname, error);
866 		return error;
867 	}
868 
869 	if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) {
870 		bus_dmamap_unload(sc->sc_dmat, map);
871 		return ENOBUFS;
872 	}
873 
874 #if NVLAN > 0
875 	/* setup h/w VLAN tagging */
876 	if (m0->m_flags & M_VLANTAG)
877 		vtag = NFE_TX_VTAG | m0->m_pkthdr.ether_vtag;
878 #endif
879 	if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
880 		flags |= NFE_TX_IP_CSUM;
881 	if (m0->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
882 		flags |= NFE_TX_TCP_UDP_CSUM;
883 
884 	for (i = 0; i < map->dm_nsegs; i++) {
885 		data = &sc->txq.data[sc->txq.cur];
886 
887 		if (sc->sc_flags & NFE_40BIT_ADDR) {
888 			desc64 = &sc->txq.desc64[sc->txq.cur];
889 #if defined(__LP64__)
890 			desc64->physaddr[0] =
891 			    htole32(map->dm_segs[i].ds_addr >> 32);
892 #endif
893 			desc64->physaddr[1] =
894 			    htole32(map->dm_segs[i].ds_addr & 0xffffffff);
895 			desc64->length = htole16(map->dm_segs[i].ds_len - 1);
896 			desc64->flags = htole16(flags);
897 			desc64->vtag = htole32(vtag);
898 		} else {
899 			desc32 = &sc->txq.desc32[sc->txq.cur];
900 
901 			desc32->physaddr = htole32(map->dm_segs[i].ds_addr);
902 			desc32->length = htole16(map->dm_segs[i].ds_len - 1);
903 			desc32->flags = htole16(flags);
904 		}
905 
906 		if (map->dm_nsegs > 1) {
907 			/*
908 			 * Checksum flags and vtag belong to the first fragment
909 			 * only.
910 			 */
911 			flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
912 			vtag = 0;
913 
914 			/*
915 			 * Setting of the valid bit in the first descriptor is
916 			 * deferred until the whole chain is fully setup.
917 			 */
918 			flags |= NFE_TX_VALID;
919 		}
920 
921 		sc->txq.queued++;
922 		sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT;
923 	}
924 
925 	/* the whole mbuf chain has been setup */
926 	if (sc->sc_flags & NFE_40BIT_ADDR) {
927 		/* fix last descriptor */
928 		flags |= NFE_TX_LASTFRAG_V2;
929 		desc64->flags = htole16(flags);
930 
931 		/* finally, set the valid bit in the first descriptor */
932 		sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID);
933 	} else {
934 		/* fix last descriptor */
935 		if (sc->sc_flags & NFE_JUMBO_SUP)
936 			flags |= NFE_TX_LASTFRAG_V2;
937 		else
938 			flags |= NFE_TX_LASTFRAG_V1;
939 		desc32->flags = htole16(flags);
940 
941 		/* finally, set the valid bit in the first descriptor */
942 		sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID);
943 	}
944 
945 	data->m = m0;
946 	data->active = map;
947 
948 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
949 	    BUS_DMASYNC_PREWRITE);
950 
951 	return 0;
952 }
953 
954 void
955 nfe_start(struct ifnet *ifp)
956 {
957 	struct nfe_softc *sc = ifp->if_softc;
958 	int old = sc->txq.cur;
959 	struct mbuf *m0;
960 
961 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
962 		return;
963 
964 	for (;;) {
965 		m0 = ifq_deq_begin(&ifp->if_snd);
966 		if (m0 == NULL)
967 			break;
968 
969 		if (nfe_encap(sc, m0) != 0) {
970 			ifq_deq_rollback(&ifp->if_snd, m0);
971 			ifq_set_oactive(&ifp->if_snd);
972 			break;
973 		}
974 
975 		/* packet put in h/w queue, remove from s/w queue */
976 		ifq_deq_commit(&ifp->if_snd, m0);
977 
978 #if NBPFILTER > 0
979 		if (ifp->if_bpf != NULL)
980 			bpf_mtap_ether(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
981 #endif
982 	}
983 	if (sc->txq.cur == old)	/* nothing sent */
984 		return;
985 
986 	if (sc->sc_flags & NFE_40BIT_ADDR)
987 		nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
988 	else
989 		nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
990 
991 	/* kick Tx */
992 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
993 
994 	/*
995 	 * Set a timeout in case the chip goes out to lunch.
996 	 */
997 	ifp->if_timer = 5;
998 }
999 
1000 void
1001 nfe_watchdog(struct ifnet *ifp)
1002 {
1003 	struct nfe_softc *sc = ifp->if_softc;
1004 
1005 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1006 
1007 	nfe_init(ifp);
1008 
1009 	ifp->if_oerrors++;
1010 }
1011 
1012 int
1013 nfe_init(struct ifnet *ifp)
1014 {
1015 	struct nfe_softc *sc = ifp->if_softc;
1016 	uint32_t tmp;
1017 
1018 	nfe_stop(ifp, 0);
1019 
1020 	NFE_WRITE(sc, NFE_TX_UNK, 0);
1021 	NFE_WRITE(sc, NFE_STATUS, 0);
1022 
1023 	sc->rxtxctl = NFE_RXTX_BIT2;
1024 	if (sc->sc_flags & NFE_40BIT_ADDR)
1025 		sc->rxtxctl |= NFE_RXTX_V3MAGIC;
1026 	else if (sc->sc_flags & NFE_JUMBO_SUP)
1027 		sc->rxtxctl |= NFE_RXTX_V2MAGIC;
1028 
1029 	if (sc->sc_flags & NFE_HW_CSUM)
1030 		sc->rxtxctl |= NFE_RXTX_RXCSUM;
1031 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1032 		sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
1033 
1034 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1035 	DELAY(10);
1036 	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1037 
1038 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1039 		NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1040 	else
1041 		NFE_WRITE(sc, NFE_VTAG_CTL, 0);
1042 
1043 	NFE_WRITE(sc, NFE_SETUP_R6, 0);
1044 
1045 	/* set MAC address */
1046 	nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
1047 
1048 	/* tell MAC where rings are in memory */
1049 #ifdef __LP64__
1050 	NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1051 #endif
1052 	NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1053 #ifdef __LP64__
1054 	NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1055 #endif
1056 	NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1057 
1058 	NFE_WRITE(sc, NFE_RING_SIZE,
1059 	    (NFE_RX_RING_COUNT - 1) << 16 |
1060 	    (NFE_TX_RING_COUNT - 1));
1061 
1062 	NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1063 
1064 	/* force MAC to wakeup */
1065 	tmp = NFE_READ(sc, NFE_PWR_STATE);
1066 	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1067 	DELAY(10);
1068 	tmp = NFE_READ(sc, NFE_PWR_STATE);
1069 	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1070 
1071 #if 1
1072 	/* configure interrupts coalescing/mitigation */
1073 	NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
1074 #else
1075 	/* no interrupt mitigation: one interrupt per packet */
1076 	NFE_WRITE(sc, NFE_IMTIMER, 970);
1077 #endif
1078 
1079 	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1080 	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1081 	NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1082 
1083 	/* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1084 	NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1085 
1086 	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1087 
1088 	sc->rxtxctl &= ~NFE_RXTX_BIT2;
1089 	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1090 	DELAY(10);
1091 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1092 
1093 	/* program promiscuous mode and multicast filters */
1094 	nfe_iff(sc);
1095 
1096 	nfe_ifmedia_upd(ifp);
1097 
1098 	/* enable Rx */
1099 	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1100 
1101 	/* enable Tx */
1102 	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1103 
1104 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1105 
1106 	/* enable interrupts */
1107 	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1108 
1109 	timeout_add_sec(&sc->sc_tick_ch, 1);
1110 
1111 	ifp->if_flags |= IFF_RUNNING;
1112 	ifq_clr_oactive(&ifp->if_snd);
1113 
1114 	return 0;
1115 }
1116 
1117 void
1118 nfe_stop(struct ifnet *ifp, int disable)
1119 {
1120 	struct nfe_softc *sc = ifp->if_softc;
1121 
1122 	timeout_del(&sc->sc_tick_ch);
1123 
1124 	ifp->if_timer = 0;
1125 	ifp->if_flags &= ~IFF_RUNNING;
1126 	ifq_clr_oactive(&ifp->if_snd);
1127 
1128 	mii_down(&sc->sc_mii);
1129 
1130 	/* abort Tx */
1131 	NFE_WRITE(sc, NFE_TX_CTL, 0);
1132 
1133 	if ((sc->sc_flags & NFE_WOL) == 0) {
1134 		/* disable Rx */
1135 		NFE_WRITE(sc, NFE_RX_CTL, 0);
1136 
1137 		/* disable interrupts */
1138 		NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1139 	}
1140 
1141 	/* reset Tx and Rx rings */
1142 	nfe_reset_tx_ring(sc, &sc->txq);
1143 	nfe_reset_rx_ring(sc, &sc->rxq);
1144 }
1145 
1146 int
1147 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1148 {
1149 	struct nfe_desc32 *desc32;
1150 	struct nfe_desc64 *desc64;
1151 	struct nfe_rx_data *data;
1152 	void **desc;
1153 	bus_addr_t physaddr;
1154 	int i, nsegs, error, descsize;
1155 
1156 	if (sc->sc_flags & NFE_40BIT_ADDR) {
1157 		desc = (void **)&ring->desc64;
1158 		descsize = sizeof (struct nfe_desc64);
1159 	} else {
1160 		desc = (void **)&ring->desc32;
1161 		descsize = sizeof (struct nfe_desc32);
1162 	}
1163 
1164 	ring->cur = ring->next = 0;
1165 	ring->bufsz = MCLBYTES;
1166 
1167 	error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1,
1168 	    NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1169 	if (error != 0) {
1170 		printf("%s: could not create desc DMA map\n",
1171 		    sc->sc_dev.dv_xname);
1172 		goto fail;
1173 	}
1174 
1175 	error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize,
1176 	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1177 	if (error != 0) {
1178 		printf("%s: could not allocate DMA memory\n",
1179 		    sc->sc_dev.dv_xname);
1180 		goto fail;
1181 	}
1182 
1183 	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1184 	    NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1185 	if (error != 0) {
1186 		printf("%s: can't map desc DMA memory\n",
1187 		    sc->sc_dev.dv_xname);
1188 		goto fail;
1189 	}
1190 
1191 	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1192 	    NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1193 	if (error != 0) {
1194 		printf("%s: could not load desc DMA map\n",
1195 		    sc->sc_dev.dv_xname);
1196 		goto fail;
1197 	}
1198 	ring->physaddr = ring->map->dm_segs[0].ds_addr;
1199 
1200 	/*
1201 	 * Pre-allocate Rx buffers and populate Rx ring.
1202 	 */
1203 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1204 		data = &sc->rxq.data[i];
1205 
1206 		data->m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
1207 		if (data->m == NULL) {
1208 			printf("%s: could not allocate rx mbuf\n",
1209 			    sc->sc_dev.dv_xname);
1210 			error = ENOMEM;
1211 			goto fail;
1212 		}
1213 		data->m->m_pkthdr.len = data->m->m_len = MCLBYTES;
1214 
1215 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1216 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map);
1217 		if (error != 0) {
1218 			printf("%s: could not create DMA map\n",
1219 			    sc->sc_dev.dv_xname);
1220 			goto fail;
1221 		}
1222 
1223 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, data->m,
1224 		    BUS_DMA_READ | BUS_DMA_NOWAIT);
1225 		if (error != 0) {
1226 			printf("%s: could not load rx buf DMA map",
1227 			    sc->sc_dev.dv_xname);
1228 			goto fail;
1229 		}
1230 		physaddr = data->map->dm_segs[0].ds_addr;
1231 
1232 		if (sc->sc_flags & NFE_40BIT_ADDR) {
1233 			desc64 = &sc->rxq.desc64[i];
1234 #if defined(__LP64__)
1235 			desc64->physaddr[0] = htole32(physaddr >> 32);
1236 #endif
1237 			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
1238 			desc64->length = htole16(sc->rxq.bufsz);
1239 			desc64->flags = htole16(NFE_RX_READY);
1240 		} else {
1241 			desc32 = &sc->rxq.desc32[i];
1242 			desc32->physaddr = htole32(physaddr);
1243 			desc32->length = htole16(sc->rxq.bufsz);
1244 			desc32->flags = htole16(NFE_RX_READY);
1245 		}
1246 	}
1247 
1248 	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1249 	    BUS_DMASYNC_PREWRITE);
1250 
1251 	return 0;
1252 
1253 fail:	nfe_free_rx_ring(sc, ring);
1254 	return error;
1255 }
1256 
1257 void
1258 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1259 {
1260 	int i;
1261 
1262 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1263 		if (sc->sc_flags & NFE_40BIT_ADDR) {
1264 			ring->desc64[i].length = htole16(ring->bufsz);
1265 			ring->desc64[i].flags = htole16(NFE_RX_READY);
1266 		} else {
1267 			ring->desc32[i].length = htole16(ring->bufsz);
1268 			ring->desc32[i].flags = htole16(NFE_RX_READY);
1269 		}
1270 	}
1271 
1272 	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1273 	    BUS_DMASYNC_PREWRITE);
1274 
1275 	ring->cur = ring->next = 0;
1276 }
1277 
1278 void
1279 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1280 {
1281 	struct nfe_rx_data *data;
1282 	void *desc;
1283 	int i, descsize;
1284 
1285 	if (sc->sc_flags & NFE_40BIT_ADDR) {
1286 		desc = ring->desc64;
1287 		descsize = sizeof (struct nfe_desc64);
1288 	} else {
1289 		desc = ring->desc32;
1290 		descsize = sizeof (struct nfe_desc32);
1291 	}
1292 
1293 	if (desc != NULL) {
1294 		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1295 		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1296 		bus_dmamap_unload(sc->sc_dmat, ring->map);
1297 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1298 		    NFE_RX_RING_COUNT * descsize);
1299 		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1300 	}
1301 
1302 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1303 		data = &ring->data[i];
1304 
1305 		if (data->map != NULL) {
1306 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1307 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1308 			bus_dmamap_unload(sc->sc_dmat, data->map);
1309 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1310 		}
1311 		m_freem(data->m);
1312 	}
1313 }
1314 
1315 int
1316 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1317 {
1318 	int i, nsegs, error;
1319 	void **desc;
1320 	int descsize;
1321 
1322 	if (sc->sc_flags & NFE_40BIT_ADDR) {
1323 		desc = (void **)&ring->desc64;
1324 		descsize = sizeof (struct nfe_desc64);
1325 	} else {
1326 		desc = (void **)&ring->desc32;
1327 		descsize = sizeof (struct nfe_desc32);
1328 	}
1329 
1330 	ring->queued = 0;
1331 	ring->cur = ring->next = 0;
1332 
1333 	error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1,
1334 	    NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1335 
1336 	if (error != 0) {
1337 		printf("%s: could not create desc DMA map\n",
1338 		    sc->sc_dev.dv_xname);
1339 		goto fail;
1340 	}
1341 
1342 	error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize,
1343 	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1344 	if (error != 0) {
1345 		printf("%s: could not allocate DMA memory\n",
1346 		    sc->sc_dev.dv_xname);
1347 		goto fail;
1348 	}
1349 
1350 	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1351 	    NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1352 	if (error != 0) {
1353 		printf("%s: can't map desc DMA memory\n",
1354 		    sc->sc_dev.dv_xname);
1355 		goto fail;
1356 	}
1357 
1358 	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1359 	    NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1360 	if (error != 0) {
1361 		printf("%s: could not load desc DMA map\n",
1362 		    sc->sc_dev.dv_xname);
1363 		goto fail;
1364 	}
1365 	ring->physaddr = ring->map->dm_segs[0].ds_addr;
1366 
1367 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1368 		error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES,
1369 		    NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT,
1370 		    &ring->data[i].map);
1371 		if (error != 0) {
1372 			printf("%s: could not create DMA map\n",
1373 			    sc->sc_dev.dv_xname);
1374 			goto fail;
1375 		}
1376 	}
1377 
1378 	return 0;
1379 
1380 fail:	nfe_free_tx_ring(sc, ring);
1381 	return error;
1382 }
1383 
1384 void
1385 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1386 {
1387 	struct nfe_tx_data *data;
1388 	int i;
1389 
1390 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1391 		if (sc->sc_flags & NFE_40BIT_ADDR)
1392 			ring->desc64[i].flags = 0;
1393 		else
1394 			ring->desc32[i].flags = 0;
1395 
1396 		data = &ring->data[i];
1397 
1398 		if (data->m != NULL) {
1399 			bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1400 			    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1401 			bus_dmamap_unload(sc->sc_dmat, data->active);
1402 			m_freem(data->m);
1403 			data->m = NULL;
1404 		}
1405 	}
1406 
1407 	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1408 	    BUS_DMASYNC_PREWRITE);
1409 
1410 	ring->queued = 0;
1411 	ring->cur = ring->next = 0;
1412 }
1413 
1414 void
1415 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1416 {
1417 	struct nfe_tx_data *data;
1418 	void *desc;
1419 	int i, descsize;
1420 
1421 	if (sc->sc_flags & NFE_40BIT_ADDR) {
1422 		desc = ring->desc64;
1423 		descsize = sizeof (struct nfe_desc64);
1424 	} else {
1425 		desc = ring->desc32;
1426 		descsize = sizeof (struct nfe_desc32);
1427 	}
1428 
1429 	if (desc != NULL) {
1430 		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1431 		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1432 		bus_dmamap_unload(sc->sc_dmat, ring->map);
1433 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1434 		    NFE_TX_RING_COUNT * descsize);
1435 		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1436 	}
1437 
1438 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1439 		data = &ring->data[i];
1440 
1441 		if (data->m != NULL) {
1442 			bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1443 			    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1444 			bus_dmamap_unload(sc->sc_dmat, data->active);
1445 			m_freem(data->m);
1446 		}
1447 	}
1448 
1449 	/* ..and now actually destroy the DMA mappings */
1450 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1451 		data = &ring->data[i];
1452 		if (data->map == NULL)
1453 			continue;
1454 		bus_dmamap_destroy(sc->sc_dmat, data->map);
1455 	}
1456 }
1457 
1458 int
1459 nfe_ifmedia_upd(struct ifnet *ifp)
1460 {
1461 	struct nfe_softc *sc = ifp->if_softc;
1462 	struct mii_data *mii = &sc->sc_mii;
1463 	struct mii_softc *miisc;
1464 
1465 	if (mii->mii_instance != 0) {
1466 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1467 			mii_phy_reset(miisc);
1468 	}
1469 	return mii_mediachg(mii);
1470 }
1471 
1472 void
1473 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1474 {
1475 	struct nfe_softc *sc = ifp->if_softc;
1476 	struct mii_data *mii = &sc->sc_mii;
1477 
1478 	mii_pollstat(mii);
1479 	ifmr->ifm_status = mii->mii_media_status;
1480 	ifmr->ifm_active = mii->mii_media_active;
1481 }
1482 
1483 void
1484 nfe_iff(struct nfe_softc *sc)
1485 {
1486 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1487 	struct arpcom *ac = &sc->sc_arpcom;
1488 	struct ether_multi *enm;
1489 	struct ether_multistep step;
1490 	uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1491 	uint32_t filter;
1492 	int i;
1493 
1494 	filter = NFE_RXFILTER_MAGIC;
1495 	ifp->if_flags &= ~IFF_ALLMULTI;
1496 
1497 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1498 		ifp->if_flags |= IFF_ALLMULTI;
1499 		if (ifp->if_flags & IFF_PROMISC)
1500 			filter |= NFE_PROMISC;
1501 		else
1502 			filter |= NFE_U2M;
1503 		bzero(addr, ETHER_ADDR_LEN);
1504 		bzero(mask, ETHER_ADDR_LEN);
1505 	} else {
1506 		filter |= NFE_U2M;
1507 
1508 		bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1509 		bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1510 
1511 		ETHER_FIRST_MULTI(step, ac, enm);
1512 		while (enm != NULL) {
1513 			for (i = 0; i < ETHER_ADDR_LEN; i++) {
1514 				addr[i] &=  enm->enm_addrlo[i];
1515 				mask[i] &= ~enm->enm_addrlo[i];
1516 			}
1517 
1518 			ETHER_NEXT_MULTI(step, enm);
1519 		}
1520 
1521 		for (i = 0; i < ETHER_ADDR_LEN; i++)
1522 			mask[i] |= addr[i];
1523 	}
1524 
1525 	addr[0] |= 0x01;	/* make sure multicast bit is set */
1526 
1527 	NFE_WRITE(sc, NFE_MULTIADDR_HI,
1528 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1529 	NFE_WRITE(sc, NFE_MULTIADDR_LO,
1530 	    addr[5] <<  8 | addr[4]);
1531 	NFE_WRITE(sc, NFE_MULTIMASK_HI,
1532 	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1533 	NFE_WRITE(sc, NFE_MULTIMASK_LO,
1534 	    mask[5] <<  8 | mask[4]);
1535 	NFE_WRITE(sc, NFE_RXFILTER, filter);
1536 }
1537 
1538 void
1539 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1540 {
1541 	uint32_t tmp;
1542 
1543 	if (sc->sc_flags & NFE_CORRECT_MACADDR) {
1544 		tmp = NFE_READ(sc, NFE_MACADDR_HI);
1545 		addr[0] = (tmp & 0xff);
1546 		addr[1] = (tmp >>  8) & 0xff;
1547 		addr[2] = (tmp >> 16) & 0xff;
1548 		addr[3] = (tmp >> 24) & 0xff;
1549 
1550 		tmp = NFE_READ(sc, NFE_MACADDR_LO);
1551 		addr[4] = (tmp & 0xff);
1552 		addr[5] = (tmp >> 8) & 0xff;
1553 
1554 	} else {
1555 		tmp = NFE_READ(sc, NFE_MACADDR_LO);
1556 		addr[0] = (tmp >> 8) & 0xff;
1557 		addr[1] = (tmp & 0xff);
1558 
1559 		tmp = NFE_READ(sc, NFE_MACADDR_HI);
1560 		addr[2] = (tmp >> 24) & 0xff;
1561 		addr[3] = (tmp >> 16) & 0xff;
1562 		addr[4] = (tmp >>  8) & 0xff;
1563 		addr[5] = (tmp & 0xff);
1564 	}
1565 }
1566 
1567 void
1568 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1569 {
1570 	NFE_WRITE(sc, NFE_MACADDR_LO,
1571 	    addr[5] <<  8 | addr[4]);
1572 	NFE_WRITE(sc, NFE_MACADDR_HI,
1573 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1574 }
1575 
1576 void
1577 nfe_tick(void *arg)
1578 {
1579 	struct nfe_softc *sc = arg;
1580 	int s;
1581 
1582 	s = splnet();
1583 	mii_tick(&sc->sc_mii);
1584 	splx(s);
1585 
1586 	timeout_add_sec(&sc->sc_tick_ch, 1);
1587 }
1588 
1589 #ifndef SMALL_KERNEL
1590 int
1591 nfe_wol(struct ifnet *ifp, int enable)
1592 {
1593 	struct nfe_softc *sc = ifp->if_softc;
1594 
1595 	if (enable) {
1596 		sc->sc_flags |= NFE_WOL;
1597 		NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE);
1598 	} else {
1599 		sc->sc_flags &= ~NFE_WOL;
1600 		NFE_WRITE(sc, NFE_WOL_CTL, 0);
1601 	}
1602 
1603 	return 0;
1604 }
1605 #endif
1606