xref: /netbsd-src/sys/dev/pci/if_nfe.c (revision 7f21db1c0118155e0dd40b75182e30c589d9f63e)
1 /*	$NetBSD: if_nfe.c,v 1.49 2010/01/19 22:07:01 pooka Exp $	*/
2 /*	$OpenBSD: if_nfe.c,v 1.77 2008/02/05 16:52:50 brad Exp $	*/
3 
4 /*-
5  * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr>
6  * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
22 
23 #include <sys/cdefs.h>
24 __KERNEL_RCSID(0, "$NetBSD: if_nfe.c,v 1.49 2010/01/19 22:07:01 pooka Exp $");
25 
26 #include "opt_inet.h"
27 #include "vlan.h"
28 
29 #include <sys/param.h>
30 #include <sys/endian.h>
31 #include <sys/systm.h>
32 #include <sys/types.h>
33 #include <sys/sockio.h>
34 #include <sys/mbuf.h>
35 #include <sys/mutex.h>
36 #include <sys/queue.h>
37 #include <sys/kernel.h>
38 #include <sys/device.h>
39 #include <sys/callout.h>
40 #include <sys/socket.h>
41 
42 #include <sys/bus.h>
43 
44 #include <net/if.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47 #include <net/if_ether.h>
48 #include <net/if_arp.h>
49 
50 #ifdef INET
51 #include <netinet/in.h>
52 #include <netinet/in_systm.h>
53 #include <netinet/in_var.h>
54 #include <netinet/ip.h>
55 #include <netinet/if_inarp.h>
56 #endif
57 
58 #if NVLAN > 0
59 #include <net/if_types.h>
60 #endif
61 
62 #include <net/bpf.h>
63 
64 #include <dev/mii/mii.h>
65 #include <dev/mii/miivar.h>
66 
67 #include <dev/pci/pcireg.h>
68 #include <dev/pci/pcivar.h>
69 #include <dev/pci/pcidevs.h>
70 
71 #include <dev/pci/if_nfereg.h>
72 #include <dev/pci/if_nfevar.h>
73 
74 static int nfe_ifflags_cb(struct ethercom *);
75 
76 int	nfe_match(device_t, cfdata_t, void *);
77 void	nfe_attach(device_t, device_t, void *);
78 void	nfe_power(int, void *);
79 void	nfe_miibus_statchg(device_t);
80 int	nfe_miibus_readreg(device_t, int, int);
81 void	nfe_miibus_writereg(device_t, int, int, int);
82 int	nfe_intr(void *);
83 int	nfe_ioctl(struct ifnet *, u_long, void *);
84 void	nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
85 void	nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
86 void	nfe_txdesc32_rsync(struct nfe_softc *, int, int, int);
87 void	nfe_txdesc64_rsync(struct nfe_softc *, int, int, int);
88 void	nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
89 void	nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
90 void	nfe_rxeof(struct nfe_softc *);
91 void	nfe_txeof(struct nfe_softc *);
92 int	nfe_encap(struct nfe_softc *, struct mbuf *);
93 void	nfe_start(struct ifnet *);
94 void	nfe_watchdog(struct ifnet *);
95 int	nfe_init(struct ifnet *);
96 void	nfe_stop(struct ifnet *, int);
97 struct	nfe_jbuf *nfe_jalloc(struct nfe_softc *, int);
98 void	nfe_jfree(struct mbuf *, void *, size_t, void *);
99 int	nfe_jpool_alloc(struct nfe_softc *);
100 void	nfe_jpool_free(struct nfe_softc *);
101 int	nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
102 void	nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
103 void	nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
104 int	nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
105 void	nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
106 void	nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
107 void	nfe_setmulti(struct nfe_softc *);
108 void	nfe_get_macaddr(struct nfe_softc *, uint8_t *);
109 void	nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
110 void	nfe_tick(void *);
111 void	nfe_poweron(device_t);
112 bool	nfe_resume(device_t, pmf_qual_t);
113 
114 CFATTACH_DECL_NEW(nfe, sizeof(struct nfe_softc), nfe_match, nfe_attach,
115     NULL, NULL);
116 
117 /* #define NFE_NO_JUMBO */
118 
119 #ifdef NFE_DEBUG
120 int nfedebug = 0;
121 #define DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
122 #define DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
123 #else
124 #define DPRINTF(x)
125 #define DPRINTFN(n,x)
126 #endif
127 
128 /* deal with naming differences */
129 
130 #define	PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 \
131 	PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1
132 #define	PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 \
133 	PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2
134 #define	PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 \
135 	PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN
136 
137 #define	PCI_PRODUCT_NVIDIA_CK804_LAN1 \
138 	PCI_PRODUCT_NVIDIA_NFORCE4_LAN1
139 #define	PCI_PRODUCT_NVIDIA_CK804_LAN2 \
140 	PCI_PRODUCT_NVIDIA_NFORCE4_LAN2
141 
142 #define	PCI_PRODUCT_NVIDIA_MCP51_LAN1 \
143 	PCI_PRODUCT_NVIDIA_NFORCE430_LAN1
144 #define	PCI_PRODUCT_NVIDIA_MCP51_LAN2 \
145 	PCI_PRODUCT_NVIDIA_NFORCE430_LAN2
146 
147 #ifdef	_LP64
148 #define	__LP64__ 1
149 #endif
150 
151 const struct nfe_product {
152 	pci_vendor_id_t		vendor;
153 	pci_product_id_t	product;
154 } nfe_devices[] = {
155 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN },
156 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN },
157 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 },
158 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 },
159 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 },
160 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 },
161 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 },
162 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 },
163 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 },
164 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 },
165 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 },
166 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 },
167 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 },
168 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 },
169 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 },
170 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 },
171 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 },
172 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 },
173 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 },
174 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 },
175 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 },
176 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 },
177 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 },
178 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 },
179 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 },
180 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 },
181 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 },
182 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 },
183 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 },
184 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 },
185 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 },
186 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 },
187 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 },
188 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 },
189 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 },
190 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 },
191 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 },
192 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 },
193 	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 }
194 };
195 
196 int
197 nfe_match(device_t dev, cfdata_t match, void *aux)
198 {
199 	struct pci_attach_args *pa = aux;
200 	const struct nfe_product *np;
201 	int i;
202 
203 	for (i = 0; i < __arraycount(nfe_devices); i++) {
204 		np = &nfe_devices[i];
205 		if (PCI_VENDOR(pa->pa_id) == np->vendor &&
206 		    PCI_PRODUCT(pa->pa_id) == np->product)
207 			return 1;
208 	}
209 	return 0;
210 }
211 
212 void
213 nfe_attach(device_t parent, device_t self, void *aux)
214 {
215 	struct nfe_softc *sc = device_private(self);
216 	struct pci_attach_args *pa = aux;
217 	pci_chipset_tag_t pc = pa->pa_pc;
218 	pci_intr_handle_t ih;
219 	const char *intrstr;
220 	struct ifnet *ifp;
221 	bus_size_t memsize;
222 	pcireg_t memtype;
223 	char devinfo[256];
224 	int mii_flags = 0;
225 
226 	sc->sc_dev = self;
227 	pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo, sizeof(devinfo));
228 	aprint_normal(": %s (rev. 0x%02x)\n", devinfo, PCI_REVISION(pa->pa_class));
229 
230 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA);
231 	switch (memtype) {
232 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
233 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
234 		if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt,
235 		    &sc->sc_memh, NULL, &memsize) == 0)
236 			break;
237 		/* FALLTHROUGH */
238 	default:
239 		aprint_error_dev(self, "could not map mem space\n");
240 		return;
241 	}
242 
243 	if (pci_intr_map(pa, &ih) != 0) {
244 		aprint_error_dev(self, "could not map interrupt\n");
245 		goto fail;
246 	}
247 
248 	intrstr = pci_intr_string(pc, ih);
249 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc);
250 	if (sc->sc_ih == NULL) {
251 		aprint_error_dev(self, "could not establish interrupt");
252 		if (intrstr != NULL)
253 			aprint_error(" at %s", intrstr);
254 		aprint_error("\n");
255 		goto fail;
256 	}
257 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
258 
259 	sc->sc_dmat = pa->pa_dmat;
260 
261 	sc->sc_flags = 0;
262 
263 	switch (PCI_PRODUCT(pa->pa_id)) {
264 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
265 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
266 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
267 	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
268 		sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
269 		break;
270 	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
271 	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
272 		sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT;
273 		break;
274 	case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
275 	case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
276 	case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
277 	case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
278 	case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
279 	case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
280 	case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
281 	case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
282 	case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
283 	case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
284 	case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
285 	case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
286 		sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR |
287 		    NFE_PWR_MGMT;
288 		break;
289 	case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
290 	case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
291 	case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
292 	case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
293 		sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM |
294 		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
295 		break;
296 	case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
297 	case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
298 	case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
299 	case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
300 		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
301 		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
302 		break;
303 	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
304 	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
305 	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
306 	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
307 		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
308 		break;
309 	case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
310 	case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
311 	case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
312 	case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
313 		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
314 		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
315 		mii_flags = MIIF_DOPAUSE;
316 		break;
317 	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
318 	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
319 		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
320 		    NFE_HW_VLAN | NFE_PWR_MGMT;
321 		break;
322 	}
323 
324 	nfe_poweron(self);
325 
326 #ifndef NFE_NO_JUMBO
327 	/* enable jumbo frames for adapters that support it */
328 	if (sc->sc_flags & NFE_JUMBO_SUP)
329 		sc->sc_flags |= NFE_USE_JUMBO;
330 #endif
331 
332 	/* Check for reversed ethernet address */
333 	if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
334 		sc->sc_flags |= NFE_CORRECT_MACADDR;
335 
336 	nfe_get_macaddr(sc, sc->sc_enaddr);
337 	aprint_normal_dev(self, "Ethernet address %s\n",
338 	    ether_sprintf(sc->sc_enaddr));
339 
340 	/*
341 	 * Allocate Tx and Rx rings.
342 	 */
343 	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
344 		aprint_error_dev(self, "could not allocate Tx ring\n");
345 		goto fail;
346 	}
347 
348 	mutex_init(&sc->rxq.mtx, MUTEX_DEFAULT, IPL_NET);
349 
350 	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
351 		aprint_error_dev(self, "could not allocate Rx ring\n");
352 		nfe_free_tx_ring(sc, &sc->txq);
353 		goto fail;
354 	}
355 
356 	ifp = &sc->sc_ethercom.ec_if;
357 	ifp->if_softc = sc;
358 	ifp->if_mtu = ETHERMTU;
359 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
360 	ifp->if_ioctl = nfe_ioctl;
361 	ifp->if_start = nfe_start;
362 	ifp->if_stop = nfe_stop;
363 	ifp->if_watchdog = nfe_watchdog;
364 	ifp->if_init = nfe_init;
365 	ifp->if_baudrate = IF_Gbps(1);
366 	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
367 	IFQ_SET_READY(&ifp->if_snd);
368 	strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
369 
370 	if (sc->sc_flags & NFE_USE_JUMBO)
371 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
372 
373 #if NVLAN > 0
374 	if (sc->sc_flags & NFE_HW_VLAN)
375 		sc->sc_ethercom.ec_capabilities |=
376 			ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
377 #endif
378 	if (sc->sc_flags & NFE_HW_CSUM) {
379 		ifp->if_capabilities |=
380 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
381 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
382 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
383 	}
384 
385 	sc->sc_mii.mii_ifp = ifp;
386 	sc->sc_mii.mii_readreg = nfe_miibus_readreg;
387 	sc->sc_mii.mii_writereg = nfe_miibus_writereg;
388 	sc->sc_mii.mii_statchg = nfe_miibus_statchg;
389 
390 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
391 	ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
392 	    ether_mediastatus);
393 
394 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
395 	    MII_OFFSET_ANY, mii_flags);
396 
397 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
398 		aprint_error_dev(self, "no PHY found!\n");
399 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
400 		    0, NULL);
401 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
402 	} else
403 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
404 
405 	if_attach(ifp);
406 	ether_ifattach(ifp, sc->sc_enaddr);
407 	ether_set_ifflags_cb(&sc->sc_ethercom, nfe_ifflags_cb);
408 
409 	callout_init(&sc->sc_tick_ch, 0);
410 	callout_setfunc(&sc->sc_tick_ch, nfe_tick, sc);
411 
412 	if (pmf_device_register(self, NULL, nfe_resume))
413 		pmf_class_network_register(self, ifp);
414 	else
415 		aprint_error_dev(self, "couldn't establish power handler\n");
416 
417 	return;
418 
419 fail:
420 	if (sc->sc_ih != NULL) {
421 		pci_intr_disestablish(pc, sc->sc_ih);
422 		sc->sc_ih = NULL;
423 	}
424 	if (memsize)
425 		bus_space_unmap(sc->sc_memt, sc->sc_memh, memsize);
426 }
427 
428 void
429 nfe_miibus_statchg(device_t dev)
430 {
431 	struct nfe_softc *sc = device_private(dev);
432 	struct mii_data *mii = &sc->sc_mii;
433 	uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
434 
435 	phy = NFE_READ(sc, NFE_PHY_IFACE);
436 	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
437 
438 	seed = NFE_READ(sc, NFE_RNDSEED);
439 	seed &= ~NFE_SEED_MASK;
440 
441 	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
442 		phy  |= NFE_PHY_HDX;	/* half-duplex */
443 		misc |= NFE_MISC1_HDX;
444 	}
445 
446 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
447 	case IFM_1000_T:	/* full-duplex only */
448 		link |= NFE_MEDIA_1000T;
449 		seed |= NFE_SEED_1000T;
450 		phy  |= NFE_PHY_1000T;
451 		break;
452 	case IFM_100_TX:
453 		link |= NFE_MEDIA_100TX;
454 		seed |= NFE_SEED_100TX;
455 		phy  |= NFE_PHY_100TX;
456 		break;
457 	case IFM_10_T:
458 		link |= NFE_MEDIA_10T;
459 		seed |= NFE_SEED_10T;
460 		break;
461 	}
462 
463 	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
464 
465 	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
466 	NFE_WRITE(sc, NFE_MISC1, misc);
467 	NFE_WRITE(sc, NFE_LINKSPEED, link);
468 }
469 
470 int
471 nfe_miibus_readreg(device_t dev, int phy, int reg)
472 {
473 	struct nfe_softc *sc = device_private(dev);
474 	uint32_t val;
475 	int ntries;
476 
477 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
478 
479 	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
480 		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
481 		DELAY(100);
482 	}
483 
484 	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
485 
486 	for (ntries = 0; ntries < 1000; ntries++) {
487 		DELAY(100);
488 		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
489 			break;
490 	}
491 	if (ntries == 1000) {
492 		DPRINTFN(2, ("%s: timeout waiting for PHY\n",
493 		    device_xname(sc->sc_dev)));
494 		return 0;
495 	}
496 
497 	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
498 		DPRINTFN(2, ("%s: could not read PHY\n",
499 		    device_xname(sc->sc_dev)));
500 		return 0;
501 	}
502 
503 	val = NFE_READ(sc, NFE_PHY_DATA);
504 	if (val != 0xffffffff && val != 0)
505 		sc->mii_phyaddr = phy;
506 
507 	DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n",
508 	    device_xname(sc->sc_dev), phy, reg, val));
509 
510 	return val;
511 }
512 
513 void
514 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
515 {
516 	struct nfe_softc *sc = device_private(dev);
517 	uint32_t ctl;
518 	int ntries;
519 
520 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
521 
522 	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
523 		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
524 		DELAY(100);
525 	}
526 
527 	NFE_WRITE(sc, NFE_PHY_DATA, val);
528 	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
529 	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
530 
531 	for (ntries = 0; ntries < 1000; ntries++) {
532 		DELAY(100);
533 		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
534 			break;
535 	}
536 #ifdef NFE_DEBUG
537 	if (nfedebug >= 2 && ntries == 1000)
538 		printf("could not write to PHY\n");
539 #endif
540 }
541 
542 int
543 nfe_intr(void *arg)
544 {
545 	struct nfe_softc *sc = arg;
546 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
547 	uint32_t r;
548 	int handled;
549 
550 	if ((ifp->if_flags & IFF_UP) == 0)
551 		return 0;
552 
553 	handled = 0;
554 
555 	for (;;) {
556 		r = NFE_READ(sc, NFE_IRQ_STATUS);
557 		if ((r & NFE_IRQ_WANTED) == 0)
558 			break;
559 
560 		NFE_WRITE(sc, NFE_IRQ_STATUS, r);
561 		handled = 1;
562 		DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
563 
564 		if ((r & (NFE_IRQ_RXERR|NFE_IRQ_RX_NOBUF|NFE_IRQ_RX)) != 0) {
565 			/* check Rx ring */
566 			nfe_rxeof(sc);
567 		}
568 		if ((r & (NFE_IRQ_TXERR|NFE_IRQ_TXERR2|NFE_IRQ_TX_DONE)) != 0) {
569 			/* check Tx ring */
570 			nfe_txeof(sc);
571 		}
572 		if ((r & NFE_IRQ_LINK) != 0) {
573 			NFE_READ(sc, NFE_PHY_STATUS);
574 			NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
575 			DPRINTF(("%s: link state changed\n",
576 			    device_xname(sc->sc_dev)));
577 		}
578 	}
579 
580 	if (handled && !IF_IS_EMPTY(&ifp->if_snd))
581 		nfe_start(ifp);
582 
583 	return handled;
584 }
585 
586 static int
587 nfe_ifflags_cb(struct ethercom *ec)
588 {
589 	struct ifnet *ifp = &ec->ec_if;
590 	struct nfe_softc *sc = ifp->if_softc;
591 	int change = ifp->if_flags ^ sc->sc_if_flags;
592 
593 	/*
594 	 * If only the PROMISC flag changes, then
595 	 * don't do a full re-init of the chip, just update
596 	 * the Rx filter.
597 	 */
598 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
599 		return ENETRESET;
600 	else if ((change & IFF_PROMISC) != 0)
601 		nfe_setmulti(sc);
602 
603 	return 0;
604 }
605 
606 int
607 nfe_ioctl(struct ifnet *ifp, u_long cmd, void *data)
608 {
609 	struct nfe_softc *sc = ifp->if_softc;
610 	struct ifaddr *ifa = (struct ifaddr *)data;
611 	int s, error = 0;
612 
613 	s = splnet();
614 
615 	switch (cmd) {
616 	case SIOCINITIFADDR:
617 		ifp->if_flags |= IFF_UP;
618 		nfe_init(ifp);
619 		switch (ifa->ifa_addr->sa_family) {
620 #ifdef INET
621 		case AF_INET:
622 			arp_ifinit(ifp, ifa);
623 			break;
624 #endif
625 		default:
626 			break;
627 		}
628 		break;
629 	default:
630 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
631 			break;
632 
633 		error = 0;
634 
635 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
636 			;
637 		else if (ifp->if_flags & IFF_RUNNING)
638 			nfe_setmulti(sc);
639 		break;
640 	}
641 	sc->sc_if_flags = ifp->if_flags;
642 
643 	splx(s);
644 
645 	return error;
646 }
647 
648 void
649 nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
650 {
651 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
652 	    (char *)desc32 - (char *)sc->txq.desc32,
653 	    sizeof (struct nfe_desc32), ops);
654 }
655 
656 void
657 nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
658 {
659 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
660 	    (char *)desc64 - (char *)sc->txq.desc64,
661 	    sizeof (struct nfe_desc64), ops);
662 }
663 
664 void
665 nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops)
666 {
667 	if (end > start) {
668 		bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
669 		    (char *)&sc->txq.desc32[start] - (char *)sc->txq.desc32,
670 		    (char *)&sc->txq.desc32[end] -
671 		    (char *)&sc->txq.desc32[start], ops);
672 		return;
673 	}
674 	/* sync from 'start' to end of ring */
675 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
676 	    (char *)&sc->txq.desc32[start] - (char *)sc->txq.desc32,
677 	    (char *)&sc->txq.desc32[NFE_TX_RING_COUNT] -
678 	    (char *)&sc->txq.desc32[start], ops);
679 
680 	/* sync from start of ring to 'end' */
681 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
682 	    (char *)&sc->txq.desc32[end] - (char *)sc->txq.desc32, ops);
683 }
684 
685 void
686 nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops)
687 {
688 	if (end > start) {
689 		bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
690 		    (char *)&sc->txq.desc64[start] - (char *)sc->txq.desc64,
691 		    (char *)&sc->txq.desc64[end] -
692 		    (char *)&sc->txq.desc64[start], ops);
693 		return;
694 	}
695 	/* sync from 'start' to end of ring */
696 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
697 	    (char *)&sc->txq.desc64[start] - (char *)sc->txq.desc64,
698 	    (char *)&sc->txq.desc64[NFE_TX_RING_COUNT] -
699 	    (char *)&sc->txq.desc64[start], ops);
700 
701 	/* sync from start of ring to 'end' */
702 	bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
703 	    (char *)&sc->txq.desc64[end] - (char *)sc->txq.desc64, ops);
704 }
705 
706 void
707 nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
708 {
709 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
710 	    (char *)desc32 - (char *)sc->rxq.desc32,
711 	    sizeof (struct nfe_desc32), ops);
712 }
713 
714 void
715 nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
716 {
717 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
718 	    (char *)desc64 - (char *)sc->rxq.desc64,
719 	    sizeof (struct nfe_desc64), ops);
720 }
721 
722 void
723 nfe_rxeof(struct nfe_softc *sc)
724 {
725 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
726 	struct nfe_desc32 *desc32;
727 	struct nfe_desc64 *desc64;
728 	struct nfe_rx_data *data;
729 	struct nfe_jbuf *jbuf;
730 	struct mbuf *m, *mnew;
731 	bus_addr_t physaddr;
732 	uint16_t flags;
733 	int error, len, i;
734 
735 	desc32 = NULL;
736 	desc64 = NULL;
737 	for (i = sc->rxq.cur;; i = NFE_RX_NEXTDESC(i)) {
738 		data = &sc->rxq.data[i];
739 
740 		if (sc->sc_flags & NFE_40BIT_ADDR) {
741 			desc64 = &sc->rxq.desc64[i];
742 			nfe_rxdesc64_sync(sc, desc64,
743 			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
744 
745 			flags = le16toh(desc64->flags);
746 			len = le16toh(desc64->length) & 0x3fff;
747 		} else {
748 			desc32 = &sc->rxq.desc32[i];
749 			nfe_rxdesc32_sync(sc, desc32,
750 			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
751 
752 			flags = le16toh(desc32->flags);
753 			len = le16toh(desc32->length) & 0x3fff;
754 		}
755 
756 		if ((flags & NFE_RX_READY) != 0)
757 			break;
758 
759 		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
760 			if ((flags & NFE_RX_VALID_V1) == 0)
761 				goto skip;
762 
763 			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
764 				flags &= ~NFE_RX_ERROR;
765 				len--;	/* fix buffer length */
766 			}
767 		} else {
768 			if ((flags & NFE_RX_VALID_V2) == 0)
769 				goto skip;
770 
771 			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
772 				flags &= ~NFE_RX_ERROR;
773 				len--;	/* fix buffer length */
774 			}
775 		}
776 
777 		if (flags & NFE_RX_ERROR) {
778 			ifp->if_ierrors++;
779 			goto skip;
780 		}
781 
782 		/*
783 		 * Try to allocate a new mbuf for this ring element and load
784 		 * it before processing the current mbuf. If the ring element
785 		 * cannot be loaded, drop the received packet and reuse the
786 		 * old mbuf. In the unlikely case that the old mbuf can't be
787 		 * reloaded either, explicitly panic.
788 		 */
789 		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
790 		if (mnew == NULL) {
791 			ifp->if_ierrors++;
792 			goto skip;
793 		}
794 
795 		if (sc->sc_flags & NFE_USE_JUMBO) {
796 			physaddr =
797 			    sc->rxq.jbuf[sc->rxq.jbufmap[i]].physaddr;
798 			if ((jbuf = nfe_jalloc(sc, i)) == NULL) {
799 				if (len > MCLBYTES) {
800 					m_freem(mnew);
801 					ifp->if_ierrors++;
802 					goto skip1;
803 				}
804 				MCLGET(mnew, M_DONTWAIT);
805 				if ((mnew->m_flags & M_EXT) == 0) {
806 					m_freem(mnew);
807 					ifp->if_ierrors++;
808 					goto skip1;
809 				}
810 
811 				(void)memcpy(mtod(mnew, void *),
812 				    mtod(data->m, const void *), len);
813 				m = mnew;
814 				goto mbufcopied;
815 			} else {
816 				MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc);
817 				bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap,
818 				    mtod(data->m, char *) - (char *)sc->rxq.jpool,
819 				    NFE_JBYTES, BUS_DMASYNC_POSTREAD);
820 
821 				physaddr = jbuf->physaddr;
822 			}
823 		} else {
824 			MCLGET(mnew, M_DONTWAIT);
825 			if ((mnew->m_flags & M_EXT) == 0) {
826 				m_freem(mnew);
827 				ifp->if_ierrors++;
828 				goto skip;
829 			}
830 
831 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
832 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
833 			bus_dmamap_unload(sc->sc_dmat, data->map);
834 
835 			error = bus_dmamap_load(sc->sc_dmat, data->map,
836 			    mtod(mnew, void *), MCLBYTES, NULL,
837 			    BUS_DMA_READ | BUS_DMA_NOWAIT);
838 			if (error != 0) {
839 				m_freem(mnew);
840 
841 				/* try to reload the old mbuf */
842 				error = bus_dmamap_load(sc->sc_dmat, data->map,
843 				    mtod(data->m, void *), MCLBYTES, NULL,
844 				    BUS_DMA_READ | BUS_DMA_NOWAIT);
845 				if (error != 0) {
846 					/* very unlikely that it will fail.. */
847 					panic("%s: could not load old rx mbuf",
848 					    device_xname(sc->sc_dev));
849 				}
850 				ifp->if_ierrors++;
851 				goto skip;
852 			}
853 			physaddr = data->map->dm_segs[0].ds_addr;
854 		}
855 
856 		/*
857 		 * New mbuf successfully loaded, update Rx ring and continue
858 		 * processing.
859 		 */
860 		m = data->m;
861 		data->m = mnew;
862 
863 mbufcopied:
864 		/* finalize mbuf */
865 		m->m_pkthdr.len = m->m_len = len;
866 		m->m_pkthdr.rcvif = ifp;
867 
868 		if ((sc->sc_flags & NFE_HW_CSUM) != 0) {
869 			/*
870 			 * XXX
871 			 * no way to check M_CSUM_IPv4_BAD or non-IPv4 packets?
872 			 */
873 			if (flags & NFE_RX_IP_CSUMOK) {
874 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
875 				DPRINTFN(3, ("%s: ip4csum-rx ok\n",
876 				    device_xname(sc->sc_dev)));
877 			}
878 			/*
879 			 * XXX
880 			 * no way to check M_CSUM_TCP_UDP_BAD or
881 			 * other protocols?
882 			 */
883 			if (flags & NFE_RX_UDP_CSUMOK) {
884 				m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
885 				DPRINTFN(3, ("%s: udp4csum-rx ok\n",
886 				    device_xname(sc->sc_dev)));
887 			} else if (flags & NFE_RX_TCP_CSUMOK) {
888 				m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
889 				DPRINTFN(3, ("%s: tcp4csum-rx ok\n",
890 				    device_xname(sc->sc_dev)));
891 			}
892 		}
893 		if (ifp->if_bpf)
894 			bpf_ops->bpf_mtap(ifp->if_bpf, m);
895 		ifp->if_ipackets++;
896 		(*ifp->if_input)(ifp, m);
897 
898 skip1:
899 		/* update mapping address in h/w descriptor */
900 		if (sc->sc_flags & NFE_40BIT_ADDR) {
901 #if defined(__LP64__)
902 			desc64->physaddr[0] = htole32(physaddr >> 32);
903 #endif
904 			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
905 		} else {
906 			desc32->physaddr = htole32(physaddr);
907 		}
908 
909 skip:
910 		if (sc->sc_flags & NFE_40BIT_ADDR) {
911 			desc64->length = htole16(sc->rxq.bufsz);
912 			desc64->flags = htole16(NFE_RX_READY);
913 
914 			nfe_rxdesc64_sync(sc, desc64,
915 			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
916 		} else {
917 			desc32->length = htole16(sc->rxq.bufsz);
918 			desc32->flags = htole16(NFE_RX_READY);
919 
920 			nfe_rxdesc32_sync(sc, desc32,
921 			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
922 		}
923 	}
924 	/* update current RX pointer */
925 	sc->rxq.cur = i;
926 }
927 
928 void
929 nfe_txeof(struct nfe_softc *sc)
930 {
931 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
932 	struct nfe_desc32 *desc32;
933 	struct nfe_desc64 *desc64;
934 	struct nfe_tx_data *data = NULL;
935 	int i;
936 	uint16_t flags;
937 	char buf[128];
938 
939 	for (i = sc->txq.next;
940 	    sc->txq.queued > 0;
941 	    i = NFE_TX_NEXTDESC(i), sc->txq.queued--) {
942 		if (sc->sc_flags & NFE_40BIT_ADDR) {
943 			desc64 = &sc->txq.desc64[i];
944 			nfe_txdesc64_sync(sc, desc64,
945 			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
946 
947 			flags = le16toh(desc64->flags);
948 		} else {
949 			desc32 = &sc->txq.desc32[i];
950 			nfe_txdesc32_sync(sc, desc32,
951 			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
952 
953 			flags = le16toh(desc32->flags);
954 		}
955 
956 		if ((flags & NFE_TX_VALID) != 0)
957 			break;
958 
959 		data = &sc->txq.data[i];
960 
961 		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
962 			if ((flags & NFE_TX_LASTFRAG_V1) == 0 &&
963 			    data->m == NULL)
964 				continue;
965 
966 			if ((flags & NFE_TX_ERROR_V1) != 0) {
967 				snprintb(buf, sizeof(buf), NFE_V1_TXERR, flags);
968 				aprint_error_dev(sc->sc_dev, "tx v1 error %s\n",
969 				    buf);
970 				ifp->if_oerrors++;
971 			} else
972 				ifp->if_opackets++;
973 		} else {
974 			if ((flags & NFE_TX_LASTFRAG_V2) == 0 &&
975 			    data->m == NULL)
976 				continue;
977 
978 			if ((flags & NFE_TX_ERROR_V2) != 0) {
979 				snprintb(buf, sizeof(buf), NFE_V2_TXERR, flags);
980 				aprint_error_dev(sc->sc_dev, "tx v2 error %s\n",
981 				    buf);
982 				ifp->if_oerrors++;
983 			} else
984 				ifp->if_opackets++;
985 		}
986 
987 		if (data->m == NULL) {	/* should not get there */
988 			aprint_error_dev(sc->sc_dev,
989 			    "last fragment bit w/o associated mbuf!\n");
990 			continue;
991 		}
992 
993 		/* last fragment of the mbuf chain transmitted */
994 		bus_dmamap_sync(sc->sc_dmat, data->active, 0,
995 		    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
996 		bus_dmamap_unload(sc->sc_dmat, data->active);
997 		m_freem(data->m);
998 		data->m = NULL;
999 	}
1000 
1001 	sc->txq.next = i;
1002 
1003 	if (sc->txq.queued < NFE_TX_RING_COUNT) {
1004 		/* at least one slot freed */
1005 		ifp->if_flags &= ~IFF_OACTIVE;
1006 	}
1007 
1008 	if (sc->txq.queued == 0) {
1009 		/* all queued packets are sent */
1010 		ifp->if_timer = 0;
1011 	}
1012 }
1013 
1014 int
1015 nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
1016 {
1017 	struct nfe_desc32 *desc32;
1018 	struct nfe_desc64 *desc64;
1019 	struct nfe_tx_data *data;
1020 	bus_dmamap_t map;
1021 	uint16_t flags, csumflags;
1022 #if NVLAN > 0
1023 	struct m_tag *mtag;
1024 	uint32_t vtag = 0;
1025 #endif
1026 	int error, i, first;
1027 
1028 	desc32 = NULL;
1029 	desc64 = NULL;
1030 	data = NULL;
1031 
1032 	flags = 0;
1033 	csumflags = 0;
1034 	first = sc->txq.cur;
1035 
1036 	map = sc->txq.data[first].map;
1037 
1038 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT);
1039 	if (error != 0) {
1040 		aprint_error_dev(sc->sc_dev, "could not map mbuf (error %d)\n",
1041 		    error);
1042 		return error;
1043 	}
1044 
1045 	if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) {
1046 		bus_dmamap_unload(sc->sc_dmat, map);
1047 		return ENOBUFS;
1048 	}
1049 
1050 #if NVLAN > 0
1051 	/* setup h/w VLAN tagging */
1052 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL)
1053 		vtag = NFE_TX_VTAG | VLAN_TAG_VALUE(mtag);
1054 #endif
1055 	if ((sc->sc_flags & NFE_HW_CSUM) != 0) {
1056 		if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4)
1057 			csumflags |= NFE_TX_IP_CSUM;
1058 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4))
1059 			csumflags |= NFE_TX_TCP_UDP_CSUM;
1060 	}
1061 
1062 	for (i = 0; i < map->dm_nsegs; i++) {
1063 		data = &sc->txq.data[sc->txq.cur];
1064 
1065 		if (sc->sc_flags & NFE_40BIT_ADDR) {
1066 			desc64 = &sc->txq.desc64[sc->txq.cur];
1067 #if defined(__LP64__)
1068 			desc64->physaddr[0] =
1069 			    htole32(map->dm_segs[i].ds_addr >> 32);
1070 #endif
1071 			desc64->physaddr[1] =
1072 			    htole32(map->dm_segs[i].ds_addr & 0xffffffff);
1073 			desc64->length = htole16(map->dm_segs[i].ds_len - 1);
1074 			desc64->flags = htole16(flags);
1075 			desc64->vtag = 0;
1076 		} else {
1077 			desc32 = &sc->txq.desc32[sc->txq.cur];
1078 
1079 			desc32->physaddr = htole32(map->dm_segs[i].ds_addr);
1080 			desc32->length = htole16(map->dm_segs[i].ds_len - 1);
1081 			desc32->flags = htole16(flags);
1082 		}
1083 
1084 		/*
1085 		 * Setting of the valid bit in the first descriptor is
1086 		 * deferred until the whole chain is fully setup.
1087 		 */
1088 		flags |= NFE_TX_VALID;
1089 
1090 		sc->txq.queued++;
1091 		sc->txq.cur = NFE_TX_NEXTDESC(sc->txq.cur);
1092 	}
1093 
1094 	/* the whole mbuf chain has been setup */
1095 	if (sc->sc_flags & NFE_40BIT_ADDR) {
1096 		/* fix last descriptor */
1097 		flags |= NFE_TX_LASTFRAG_V2;
1098 		desc64->flags = htole16(flags);
1099 
1100 		/* Checksum flags and vtag belong to the first fragment only. */
1101 #if NVLAN > 0
1102 		sc->txq.desc64[first].vtag = htole32(vtag);
1103 #endif
1104 		sc->txq.desc64[first].flags |= htole16(csumflags);
1105 
1106 		/* finally, set the valid bit in the first descriptor */
1107 		sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID);
1108 	} else {
1109 		/* fix last descriptor */
1110 		if (sc->sc_flags & NFE_JUMBO_SUP)
1111 			flags |= NFE_TX_LASTFRAG_V2;
1112 		else
1113 			flags |= NFE_TX_LASTFRAG_V1;
1114 		desc32->flags = htole16(flags);
1115 
1116 		/* Checksum flags belong to the first fragment only. */
1117 		sc->txq.desc32[first].flags |= htole16(csumflags);
1118 
1119 		/* finally, set the valid bit in the first descriptor */
1120 		sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID);
1121 	}
1122 
1123 	data->m = m0;
1124 	data->active = map;
1125 
1126 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1127 	    BUS_DMASYNC_PREWRITE);
1128 
1129 	return 0;
1130 }
1131 
1132 void
1133 nfe_start(struct ifnet *ifp)
1134 {
1135 	struct nfe_softc *sc = ifp->if_softc;
1136 	int old = sc->txq.queued;
1137 	struct mbuf *m0;
1138 
1139 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1140 		return;
1141 
1142 	for (;;) {
1143 		IFQ_POLL(&ifp->if_snd, m0);
1144 		if (m0 == NULL)
1145 			break;
1146 
1147 		if (nfe_encap(sc, m0) != 0) {
1148 			ifp->if_flags |= IFF_OACTIVE;
1149 			break;
1150 		}
1151 
1152 		/* packet put in h/w queue, remove from s/w queue */
1153 		IFQ_DEQUEUE(&ifp->if_snd, m0);
1154 
1155 		if (ifp->if_bpf != NULL)
1156 			bpf_ops->bpf_mtap(ifp->if_bpf, m0);
1157 	}
1158 
1159 	if (sc->txq.queued != old) {
1160 		/* packets are queued */
1161 		if (sc->sc_flags & NFE_40BIT_ADDR)
1162 			nfe_txdesc64_rsync(sc, old, sc->txq.cur,
1163 			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1164 		else
1165 			nfe_txdesc32_rsync(sc, old, sc->txq.cur,
1166 			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1167 		/* kick Tx */
1168 		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1169 
1170 		/*
1171 		 * Set a timeout in case the chip goes out to lunch.
1172 		 */
1173 		ifp->if_timer = 5;
1174 	}
1175 }
1176 
1177 void
1178 nfe_watchdog(struct ifnet *ifp)
1179 {
1180 	struct nfe_softc *sc = ifp->if_softc;
1181 
1182 	aprint_error_dev(sc->sc_dev, "watchdog timeout\n");
1183 
1184 	ifp->if_flags &= ~IFF_RUNNING;
1185 	nfe_init(ifp);
1186 
1187 	ifp->if_oerrors++;
1188 }
1189 
1190 int
1191 nfe_init(struct ifnet *ifp)
1192 {
1193 	struct nfe_softc *sc = ifp->if_softc;
1194 	uint32_t tmp;
1195 	int rc = 0, s;
1196 
1197 	if (ifp->if_flags & IFF_RUNNING)
1198 		return 0;
1199 
1200 	nfe_stop(ifp, 0);
1201 
1202 	NFE_WRITE(sc, NFE_TX_UNK, 0);
1203 	NFE_WRITE(sc, NFE_STATUS, 0);
1204 
1205 	sc->rxtxctl = NFE_RXTX_BIT2;
1206 	if (sc->sc_flags & NFE_40BIT_ADDR)
1207 		sc->rxtxctl |= NFE_RXTX_V3MAGIC;
1208 	else if (sc->sc_flags & NFE_JUMBO_SUP)
1209 		sc->rxtxctl |= NFE_RXTX_V2MAGIC;
1210 	if (sc->sc_flags & NFE_HW_CSUM)
1211 		sc->rxtxctl |= NFE_RXTX_RXCSUM;
1212 #if NVLAN > 0
1213 	/*
1214 	 * Although the adapter is capable of stripping VLAN tags from received
1215 	 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1216 	 * purpose.  This will be done in software by our network stack.
1217 	 */
1218 	if (sc->sc_flags & NFE_HW_VLAN)
1219 		sc->rxtxctl |= NFE_RXTX_VTAG_INSERT;
1220 #endif
1221 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1222 	DELAY(10);
1223 	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1224 
1225 #if NVLAN
1226 	if (sc->sc_flags & NFE_HW_VLAN)
1227 		NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1228 #endif
1229 
1230 	NFE_WRITE(sc, NFE_SETUP_R6, 0);
1231 
1232 	/* set MAC address */
1233 	nfe_set_macaddr(sc, sc->sc_enaddr);
1234 
1235 	/* tell MAC where rings are in memory */
1236 #ifdef __LP64__
1237 	NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1238 #endif
1239 	NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1240 #ifdef __LP64__
1241 	NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1242 #endif
1243 	NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1244 
1245 	NFE_WRITE(sc, NFE_RING_SIZE,
1246 	    (NFE_RX_RING_COUNT - 1) << 16 |
1247 	    (NFE_TX_RING_COUNT - 1));
1248 
1249 	NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1250 
1251 	/* force MAC to wakeup */
1252 	tmp = NFE_READ(sc, NFE_PWR_STATE);
1253 	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1254 	DELAY(10);
1255 	tmp = NFE_READ(sc, NFE_PWR_STATE);
1256 	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1257 
1258 	s = splnet();
1259 	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1260 	nfe_intr(sc); /* XXX clear IRQ status registers */
1261 	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1262 	splx(s);
1263 
1264 #if 1
1265 	/* configure interrupts coalescing/mitigation */
1266 	NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
1267 #else
1268 	/* no interrupt mitigation: one interrupt per packet */
1269 	NFE_WRITE(sc, NFE_IMTIMER, 970);
1270 #endif
1271 
1272 	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1273 	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1274 	NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1275 
1276 	/* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1277 	NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1278 
1279 	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1280 	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE);
1281 
1282 	sc->rxtxctl &= ~NFE_RXTX_BIT2;
1283 	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1284 	DELAY(10);
1285 	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1286 
1287 	/* set Rx filter */
1288 	nfe_setmulti(sc);
1289 
1290 	if ((rc = ether_mediachange(ifp)) != 0)
1291 		goto out;
1292 
1293 	nfe_tick(sc);
1294 
1295 	/* enable Rx */
1296 	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1297 
1298 	/* enable Tx */
1299 	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1300 
1301 	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1302 
1303 	/* enable interrupts */
1304 	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1305 
1306 	callout_schedule(&sc->sc_tick_ch, hz);
1307 
1308 	ifp->if_flags |= IFF_RUNNING;
1309 	ifp->if_flags &= ~IFF_OACTIVE;
1310 
1311 out:
1312 	return rc;
1313 }
1314 
1315 void
1316 nfe_stop(struct ifnet *ifp, int disable)
1317 {
1318 	struct nfe_softc *sc = ifp->if_softc;
1319 
1320 	callout_stop(&sc->sc_tick_ch);
1321 
1322 	ifp->if_timer = 0;
1323 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1324 
1325 	mii_down(&sc->sc_mii);
1326 
1327 	/* abort Tx */
1328 	NFE_WRITE(sc, NFE_TX_CTL, 0);
1329 
1330 	/* disable Rx */
1331 	NFE_WRITE(sc, NFE_RX_CTL, 0);
1332 
1333 	/* disable interrupts */
1334 	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1335 
1336 	/* reset Tx and Rx rings */
1337 	nfe_reset_tx_ring(sc, &sc->txq);
1338 	nfe_reset_rx_ring(sc, &sc->rxq);
1339 }
1340 
1341 int
1342 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1343 {
1344 	struct nfe_desc32 *desc32;
1345 	struct nfe_desc64 *desc64;
1346 	struct nfe_rx_data *data;
1347 	struct nfe_jbuf *jbuf;
1348 	void **desc;
1349 	bus_addr_t physaddr;
1350 	int i, nsegs, error, descsize;
1351 
1352 	if (sc->sc_flags & NFE_40BIT_ADDR) {
1353 		desc = (void **)&ring->desc64;
1354 		descsize = sizeof (struct nfe_desc64);
1355 	} else {
1356 		desc = (void **)&ring->desc32;
1357 		descsize = sizeof (struct nfe_desc32);
1358 	}
1359 
1360 	ring->cur = ring->next = 0;
1361 	ring->bufsz = MCLBYTES;
1362 
1363 	error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1,
1364 	    NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1365 	if (error != 0) {
1366 		aprint_error_dev(sc->sc_dev,
1367 		    "could not create desc DMA map\n");
1368 		ring->map = NULL;
1369 		goto fail;
1370 	}
1371 
1372 	error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize,
1373 	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1374 	if (error != 0) {
1375 		aprint_error_dev(sc->sc_dev,
1376 		    "could not allocate DMA memory\n");
1377 		goto fail;
1378 	}
1379 
1380 	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1381 	    NFE_RX_RING_COUNT * descsize, (void **)desc, BUS_DMA_NOWAIT);
1382 	if (error != 0) {
1383 		aprint_error_dev(sc->sc_dev,
1384 		    "could not map desc DMA memory\n");
1385 		goto fail;
1386 	}
1387 
1388 	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1389 	    NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1390 	if (error != 0) {
1391 		aprint_error_dev(sc->sc_dev, "could not load desc DMA map\n");
1392 		goto fail;
1393 	}
1394 
1395 	memset(*desc, 0, NFE_RX_RING_COUNT * descsize);
1396 	ring->physaddr = ring->map->dm_segs[0].ds_addr;
1397 
1398 	if (sc->sc_flags & NFE_USE_JUMBO) {
1399 		ring->bufsz = NFE_JBYTES;
1400 		if ((error = nfe_jpool_alloc(sc)) != 0) {
1401 			aprint_error_dev(sc->sc_dev,
1402 			    "could not allocate jumbo frames\n");
1403 			goto fail;
1404 		}
1405 	}
1406 
1407 	/*
1408 	 * Pre-allocate Rx buffers and populate Rx ring.
1409 	 */
1410 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1411 		data = &sc->rxq.data[i];
1412 
1413 		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
1414 		if (data->m == NULL) {
1415 			aprint_error_dev(sc->sc_dev,
1416 			    "could not allocate rx mbuf\n");
1417 			error = ENOMEM;
1418 			goto fail;
1419 		}
1420 
1421 		if (sc->sc_flags & NFE_USE_JUMBO) {
1422 			if ((jbuf = nfe_jalloc(sc, i)) == NULL) {
1423 				aprint_error_dev(sc->sc_dev,
1424 				    "could not allocate jumbo buffer\n");
1425 				goto fail;
1426 			}
1427 			MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree,
1428 			    sc);
1429 
1430 			physaddr = jbuf->physaddr;
1431 		} else {
1432 			error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1433 			    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map);
1434 			if (error != 0) {
1435 				aprint_error_dev(sc->sc_dev,
1436 				    "could not create DMA map\n");
1437 				data->map = NULL;
1438 				goto fail;
1439 			}
1440 			MCLGET(data->m, M_DONTWAIT);
1441 			if (!(data->m->m_flags & M_EXT)) {
1442 				aprint_error_dev(sc->sc_dev,
1443 				    "could not allocate mbuf cluster\n");
1444 				error = ENOMEM;
1445 				goto fail;
1446 			}
1447 
1448 			error = bus_dmamap_load(sc->sc_dmat, data->map,
1449 			    mtod(data->m, void *), MCLBYTES, NULL,
1450 			    BUS_DMA_READ | BUS_DMA_NOWAIT);
1451 			if (error != 0) {
1452 				aprint_error_dev(sc->sc_dev,
1453 				    "could not load rx buf DMA map");
1454 				goto fail;
1455 			}
1456 			physaddr = data->map->dm_segs[0].ds_addr;
1457 		}
1458 
1459 		if (sc->sc_flags & NFE_40BIT_ADDR) {
1460 			desc64 = &sc->rxq.desc64[i];
1461 #if defined(__LP64__)
1462 			desc64->physaddr[0] = htole32(physaddr >> 32);
1463 #endif
1464 			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
1465 			desc64->length = htole16(sc->rxq.bufsz);
1466 			desc64->flags = htole16(NFE_RX_READY);
1467 		} else {
1468 			desc32 = &sc->rxq.desc32[i];
1469 			desc32->physaddr = htole32(physaddr);
1470 			desc32->length = htole16(sc->rxq.bufsz);
1471 			desc32->flags = htole16(NFE_RX_READY);
1472 		}
1473 	}
1474 
1475 	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1476 	    BUS_DMASYNC_PREWRITE);
1477 
1478 	return 0;
1479 
1480 fail:	nfe_free_rx_ring(sc, ring);
1481 	return error;
1482 }
1483 
1484 void
1485 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1486 {
1487 	int i;
1488 
1489 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1490 		if (sc->sc_flags & NFE_40BIT_ADDR) {
1491 			ring->desc64[i].length = htole16(ring->bufsz);
1492 			ring->desc64[i].flags = htole16(NFE_RX_READY);
1493 		} else {
1494 			ring->desc32[i].length = htole16(ring->bufsz);
1495 			ring->desc32[i].flags = htole16(NFE_RX_READY);
1496 		}
1497 	}
1498 
1499 	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1500 	    BUS_DMASYNC_PREWRITE);
1501 
1502 	ring->cur = ring->next = 0;
1503 }
1504 
1505 void
1506 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1507 {
1508 	struct nfe_rx_data *data;
1509 	void *desc;
1510 	int i, descsize;
1511 
1512 	if (sc->sc_flags & NFE_40BIT_ADDR) {
1513 		desc = ring->desc64;
1514 		descsize = sizeof (struct nfe_desc64);
1515 	} else {
1516 		desc = ring->desc32;
1517 		descsize = sizeof (struct nfe_desc32);
1518 	}
1519 
1520 	if (desc != NULL) {
1521 		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1522 		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1523 		bus_dmamap_unload(sc->sc_dmat, ring->map);
1524 		bus_dmamem_unmap(sc->sc_dmat, (void *)desc,
1525 		    NFE_RX_RING_COUNT * descsize);
1526 		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1527 	}
1528 
1529 	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1530 		data = &ring->data[i];
1531 
1532 		if (data->map != NULL) {
1533 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1534 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1535 			bus_dmamap_unload(sc->sc_dmat, data->map);
1536 			bus_dmamap_destroy(sc->sc_dmat, data->map);
1537 		}
1538 		if (data->m != NULL)
1539 			m_freem(data->m);
1540 	}
1541 }
1542 
1543 struct nfe_jbuf *
1544 nfe_jalloc(struct nfe_softc *sc, int i)
1545 {
1546 	struct nfe_jbuf *jbuf;
1547 
1548 	mutex_enter(&sc->rxq.mtx);
1549 	jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
1550 	if (jbuf != NULL)
1551 		SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
1552 	mutex_exit(&sc->rxq.mtx);
1553 	if (jbuf == NULL)
1554 		return NULL;
1555 	sc->rxq.jbufmap[i] =
1556 	    ((char *)jbuf->buf - (char *)sc->rxq.jpool) / NFE_JBYTES;
1557 	return jbuf;
1558 }
1559 
1560 /*
1561  * This is called automatically by the network stack when the mbuf is freed.
1562  * Caution must be taken that the NIC might be reset by the time the mbuf is
1563  * freed.
1564  */
1565 void
1566 nfe_jfree(struct mbuf *m, void *buf, size_t size, void *arg)
1567 {
1568 	struct nfe_softc *sc = arg;
1569 	struct nfe_jbuf *jbuf;
1570 	int i;
1571 
1572 	/* find the jbuf from the base pointer */
1573 	i = ((char *)buf - (char *)sc->rxq.jpool) / NFE_JBYTES;
1574 	if (i < 0 || i >= NFE_JPOOL_COUNT) {
1575 		aprint_error_dev(sc->sc_dev,
1576 		    "request to free a buffer (%p) not managed by us\n", buf);
1577 		return;
1578 	}
1579 	jbuf = &sc->rxq.jbuf[i];
1580 
1581 	/* ..and put it back in the free list */
1582 	mutex_enter(&sc->rxq.mtx);
1583 	SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext);
1584 	mutex_exit(&sc->rxq.mtx);
1585 
1586 	if (m != NULL)
1587 		pool_cache_put(mb_cache, m);
1588 }
1589 
1590 int
1591 nfe_jpool_alloc(struct nfe_softc *sc)
1592 {
1593 	struct nfe_rx_ring *ring = &sc->rxq;
1594 	struct nfe_jbuf *jbuf;
1595 	bus_addr_t physaddr;
1596 	char *buf;
1597 	int i, nsegs, error;
1598 
1599 	/*
1600 	 * Allocate a big chunk of DMA'able memory.
1601 	 */
1602 	error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1,
1603 	    NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap);
1604 	if (error != 0) {
1605 		aprint_error_dev(sc->sc_dev,
1606 		    "could not create jumbo DMA map\n");
1607 		ring->jmap = NULL;
1608 		goto fail;
1609 	}
1610 
1611 	error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0,
1612 	    &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT);
1613 	if (error != 0) {
1614 		aprint_error_dev(sc->sc_dev,
1615 		    "could not allocate jumbo DMA memory\n");
1616 		goto fail;
1617 	}
1618 
1619 	error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE,
1620 	    &ring->jpool, BUS_DMA_NOWAIT);
1621 	if (error != 0) {
1622 		aprint_error_dev(sc->sc_dev,
1623 		    "could not map jumbo DMA memory\n");
1624 		goto fail;
1625 	}
1626 
1627 	error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool,
1628 	    NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
1629 	if (error != 0) {
1630 		aprint_error_dev(sc->sc_dev,
1631 		    "could not load jumbo DMA map\n");
1632 		goto fail;
1633 	}
1634 
1635 	/* ..and split it into 9KB chunks */
1636 	SLIST_INIT(&ring->jfreelist);
1637 
1638 	buf = ring->jpool;
1639 	physaddr = ring->jmap->dm_segs[0].ds_addr;
1640 	for (i = 0; i < NFE_JPOOL_COUNT; i++) {
1641 		jbuf = &ring->jbuf[i];
1642 
1643 		jbuf->buf = buf;
1644 		jbuf->physaddr = physaddr;
1645 
1646 		SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1647 
1648 		buf += NFE_JBYTES;
1649 		physaddr += NFE_JBYTES;
1650 	}
1651 
1652 	return 0;
1653 
1654 fail:	nfe_jpool_free(sc);
1655 	return error;
1656 }
1657 
1658 void
1659 nfe_jpool_free(struct nfe_softc *sc)
1660 {
1661 	struct nfe_rx_ring *ring = &sc->rxq;
1662 
1663 	if (ring->jmap != NULL) {
1664 		bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0,
1665 		    ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1666 		bus_dmamap_unload(sc->sc_dmat, ring->jmap);
1667 		bus_dmamap_destroy(sc->sc_dmat, ring->jmap);
1668 	}
1669 	if (ring->jpool != NULL) {
1670 		bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE);
1671 		bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1);
1672 	}
1673 }
1674 
1675 int
1676 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1677 {
1678 	int i, nsegs, error;
1679 	void **desc;
1680 	int descsize;
1681 
1682 	if (sc->sc_flags & NFE_40BIT_ADDR) {
1683 		desc = (void **)&ring->desc64;
1684 		descsize = sizeof (struct nfe_desc64);
1685 	} else {
1686 		desc = (void **)&ring->desc32;
1687 		descsize = sizeof (struct nfe_desc32);
1688 	}
1689 
1690 	ring->queued = 0;
1691 	ring->cur = ring->next = 0;
1692 
1693 	error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1,
1694 	    NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1695 
1696 	if (error != 0) {
1697 		aprint_error_dev(sc->sc_dev,
1698 		    "could not create desc DMA map\n");
1699 		ring->map = NULL;
1700 		goto fail;
1701 	}
1702 
1703 	error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize,
1704 	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1705 	if (error != 0) {
1706 		aprint_error_dev(sc->sc_dev,
1707 		    "could not allocate DMA memory\n");
1708 		goto fail;
1709 	}
1710 
1711 	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1712 	    NFE_TX_RING_COUNT * descsize, (void **)desc, BUS_DMA_NOWAIT);
1713 	if (error != 0) {
1714 		aprint_error_dev(sc->sc_dev,
1715 		    "could not map desc DMA memory\n");
1716 		goto fail;
1717 	}
1718 
1719 	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1720 	    NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1721 	if (error != 0) {
1722 		aprint_error_dev(sc->sc_dev, "could not load desc DMA map\n");
1723 		goto fail;
1724 	}
1725 
1726 	memset(*desc, 0, NFE_TX_RING_COUNT * descsize);
1727 	ring->physaddr = ring->map->dm_segs[0].ds_addr;
1728 
1729 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1730 		error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES,
1731 		    NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT,
1732 		    &ring->data[i].map);
1733 		if (error != 0) {
1734 			aprint_error_dev(sc->sc_dev,
1735 			    "could not create DMA map\n");
1736 			ring->data[i].map = NULL;
1737 			goto fail;
1738 		}
1739 	}
1740 
1741 	return 0;
1742 
1743 fail:	nfe_free_tx_ring(sc, ring);
1744 	return error;
1745 }
1746 
1747 void
1748 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1749 {
1750 	struct nfe_tx_data *data;
1751 	int i;
1752 
1753 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1754 		if (sc->sc_flags & NFE_40BIT_ADDR)
1755 			ring->desc64[i].flags = 0;
1756 		else
1757 			ring->desc32[i].flags = 0;
1758 
1759 		data = &ring->data[i];
1760 
1761 		if (data->m != NULL) {
1762 			bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1763 			    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1764 			bus_dmamap_unload(sc->sc_dmat, data->active);
1765 			m_freem(data->m);
1766 			data->m = NULL;
1767 		}
1768 	}
1769 
1770 	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1771 	    BUS_DMASYNC_PREWRITE);
1772 
1773 	ring->queued = 0;
1774 	ring->cur = ring->next = 0;
1775 }
1776 
1777 void
1778 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1779 {
1780 	struct nfe_tx_data *data;
1781 	void *desc;
1782 	int i, descsize;
1783 
1784 	if (sc->sc_flags & NFE_40BIT_ADDR) {
1785 		desc = ring->desc64;
1786 		descsize = sizeof (struct nfe_desc64);
1787 	} else {
1788 		desc = ring->desc32;
1789 		descsize = sizeof (struct nfe_desc32);
1790 	}
1791 
1792 	if (desc != NULL) {
1793 		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1794 		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1795 		bus_dmamap_unload(sc->sc_dmat, ring->map);
1796 		bus_dmamem_unmap(sc->sc_dmat, (void *)desc,
1797 		    NFE_TX_RING_COUNT * descsize);
1798 		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1799 	}
1800 
1801 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1802 		data = &ring->data[i];
1803 
1804 		if (data->m != NULL) {
1805 			bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1806 			    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1807 			bus_dmamap_unload(sc->sc_dmat, data->active);
1808 			m_freem(data->m);
1809 		}
1810 	}
1811 
1812 	/* ..and now actually destroy the DMA mappings */
1813 	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1814 		data = &ring->data[i];
1815 		if (data->map == NULL)
1816 			continue;
1817 		bus_dmamap_destroy(sc->sc_dmat, data->map);
1818 	}
1819 }
1820 
1821 void
1822 nfe_setmulti(struct nfe_softc *sc)
1823 {
1824 	struct ethercom *ec = &sc->sc_ethercom;
1825 	struct ifnet *ifp = &ec->ec_if;
1826 	struct ether_multi *enm;
1827 	struct ether_multistep step;
1828 	uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1829 	uint32_t filter = NFE_RXFILTER_MAGIC;
1830 	int i;
1831 
1832 	if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1833 		memset(addr, 0, ETHER_ADDR_LEN);
1834 		memset(mask, 0, ETHER_ADDR_LEN);
1835 		goto done;
1836 	}
1837 
1838 	memcpy(addr, etherbroadcastaddr, ETHER_ADDR_LEN);
1839 	memcpy(mask, etherbroadcastaddr, ETHER_ADDR_LEN);
1840 
1841 	ETHER_FIRST_MULTI(step, ec, enm);
1842 	while (enm != NULL) {
1843 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1844 			ifp->if_flags |= IFF_ALLMULTI;
1845 			memset(addr, 0, ETHER_ADDR_LEN);
1846 			memset(mask, 0, ETHER_ADDR_LEN);
1847 			goto done;
1848 		}
1849 		for (i = 0; i < ETHER_ADDR_LEN; i++) {
1850 			addr[i] &=  enm->enm_addrlo[i];
1851 			mask[i] &= ~enm->enm_addrlo[i];
1852 		}
1853 		ETHER_NEXT_MULTI(step, enm);
1854 	}
1855 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1856 		mask[i] |= addr[i];
1857 
1858 done:
1859 	addr[0] |= 0x01;	/* make sure multicast bit is set */
1860 
1861 	NFE_WRITE(sc, NFE_MULTIADDR_HI,
1862 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1863 	NFE_WRITE(sc, NFE_MULTIADDR_LO,
1864 	    addr[5] <<  8 | addr[4]);
1865 	NFE_WRITE(sc, NFE_MULTIMASK_HI,
1866 	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1867 	NFE_WRITE(sc, NFE_MULTIMASK_LO,
1868 	    mask[5] <<  8 | mask[4]);
1869 
1870 	filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1871 	NFE_WRITE(sc, NFE_RXFILTER, filter);
1872 }
1873 
1874 void
1875 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1876 {
1877 	uint32_t tmp;
1878 
1879 	if ((sc->sc_flags & NFE_CORRECT_MACADDR) != 0) {
1880 		tmp = NFE_READ(sc, NFE_MACADDR_HI);
1881 		addr[0] = (tmp & 0xff);
1882 		addr[1] = (tmp >>  8) & 0xff;
1883 		addr[2] = (tmp >> 16) & 0xff;
1884 		addr[3] = (tmp >> 24) & 0xff;
1885 
1886 		tmp = NFE_READ(sc, NFE_MACADDR_LO);
1887 		addr[4] = (tmp & 0xff);
1888 		addr[5] = (tmp >> 8) & 0xff;
1889 
1890 	} else {
1891 		tmp = NFE_READ(sc, NFE_MACADDR_LO);
1892 		addr[0] = (tmp >> 8) & 0xff;
1893 		addr[1] = (tmp & 0xff);
1894 
1895 		tmp = NFE_READ(sc, NFE_MACADDR_HI);
1896 		addr[2] = (tmp >> 24) & 0xff;
1897 		addr[3] = (tmp >> 16) & 0xff;
1898 		addr[4] = (tmp >>  8) & 0xff;
1899 		addr[5] = (tmp & 0xff);
1900 	}
1901 }
1902 
1903 void
1904 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1905 {
1906 	NFE_WRITE(sc, NFE_MACADDR_LO,
1907 	    addr[5] <<  8 | addr[4]);
1908 	NFE_WRITE(sc, NFE_MACADDR_HI,
1909 	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1910 }
1911 
1912 void
1913 nfe_tick(void *arg)
1914 {
1915 	struct nfe_softc *sc = arg;
1916 	int s;
1917 
1918 	s = splnet();
1919 	mii_tick(&sc->sc_mii);
1920 	splx(s);
1921 
1922 	callout_schedule(&sc->sc_tick_ch, hz);
1923 }
1924 
1925 void
1926 nfe_poweron(device_t self)
1927 {
1928 	struct nfe_softc *sc = device_private(self);
1929 
1930 	if ((sc->sc_flags & NFE_PWR_MGMT) != 0) {
1931 		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
1932 		NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
1933 		DELAY(100);
1934 		NFE_WRITE(sc, NFE_MAC_RESET, 0);
1935 		DELAY(100);
1936 		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
1937 		NFE_WRITE(sc, NFE_PWR2_CTL,
1938 		    NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK);
1939 	}
1940 }
1941 
1942 bool
1943 nfe_resume(device_t dv, pmf_qual_t qual)
1944 {
1945 	nfe_poweron(dv);
1946 
1947 	return true;
1948 }
1949