xref: /netbsd-src/sys/arch/powerpc/ibm4xx/dev/if_emac.c (revision 53b02e147d4ed531c0d2a5ca9b3e8026ba3e99b5)
1 /*	$NetBSD: if_emac.c,v 1.56 2021/03/30 02:25:24 rin Exp $	*/
2 
3 /*
4  * Copyright 2001, 2002 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Simon Burge and Jason Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *      This product includes software developed for the NetBSD Project by
20  *      Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * emac(4) supports following ibm4xx's EMACs.
40  *   XXXX: ZMII and 'TCP Accelaration Hardware' not support yet...
41  *
42  *            tested
43  *            ------
44  * 405EP	-  10/100 x2
45  * 405EX/EXr	o  10/100/1000 x2 (EXr x1), STA v2, 256bit hash-Table, RGMII
46  * 405GP/GPr	o  10/100
47  * 440EP	-  10/100 x2, ZMII
48  * 440GP	-  10/100 x2, ZMII
49  * 440GX	-  10/100/1000 x4, ZMII/RGMII(ch 2, 3), TAH(ch 2, 3)
50  * 440SP	-  10/100/1000
51  * 440SPe	-  10/100/1000, STA v2
52  */
53 
54 #include <sys/cdefs.h>
55 __KERNEL_RCSID(0, "$NetBSD: if_emac.c,v 1.56 2021/03/30 02:25:24 rin Exp $");
56 
57 #ifdef _KERNEL_OPT
58 #include "opt_emac.h"
59 #endif
60 
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/mbuf.h>
64 #include <sys/kernel.h>
65 #include <sys/socket.h>
66 #include <sys/ioctl.h>
67 #include <sys/cpu.h>
68 #include <sys/device.h>
69 
70 #include <sys/rndsource.h>
71 
72 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
73 
74 #include <net/if.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/if_ether.h>
78 
79 #include <net/bpf.h>
80 
81 #include <powerpc/ibm4xx/cpu.h>
82 #include <powerpc/ibm4xx/dcr4xx.h>
83 #include <powerpc/ibm4xx/mal405gp.h>
84 #include <powerpc/ibm4xx/dev/emacreg.h>
85 #include <powerpc/ibm4xx/dev/if_emacreg.h>
86 #include <powerpc/ibm4xx/dev/if_emacvar.h>
87 #include <powerpc/ibm4xx/dev/malvar.h>
88 #include <powerpc/ibm4xx/dev/opbreg.h>
89 #include <powerpc/ibm4xx/dev/opbvar.h>
90 #include <powerpc/ibm4xx/dev/plbvar.h>
91 #if defined(EMAC_ZMII_PHY) || defined(EMAC_RGMII_PHY)
92 #include <powerpc/ibm4xx/dev/rmiivar.h>
93 #endif
94 
95 #include <dev/mii/miivar.h>
96 
97 #include "locators.h"
98 
99 
100 /*
101  * Transmit descriptor list size.  There are two Tx channels, each with
102  * up to 256 hardware descriptors available.  We currently use one Tx
103  * channel.  We tell the upper layers that they can queue a lot of
104  * packets, and we go ahead and manage up to 64 of them at a time.  We
105  * allow up to 16 DMA segments per packet.
106  */
107 #define	EMAC_NTXSEGS		16
108 #define	EMAC_TXQUEUELEN		64
109 #define	EMAC_TXQUEUELEN_MASK	(EMAC_TXQUEUELEN - 1)
110 #define	EMAC_TXQUEUE_GC		(EMAC_TXQUEUELEN / 4)
111 #define	EMAC_NTXDESC		256
112 #define	EMAC_NTXDESC_MASK	(EMAC_NTXDESC - 1)
113 #define	EMAC_NEXTTX(x)		(((x) + 1) & EMAC_NTXDESC_MASK)
114 #define	EMAC_NEXTTXS(x)		(((x) + 1) & EMAC_TXQUEUELEN_MASK)
115 
116 /*
117  * Receive descriptor list size.  There is one Rx channel with up to 256
118  * hardware descriptors available.  We allocate 64 receive descriptors,
119  * each with a 2k buffer (MCLBYTES).
120  */
121 #define	EMAC_NRXDESC		64
122 #define	EMAC_NRXDESC_MASK	(EMAC_NRXDESC - 1)
123 #define	EMAC_NEXTRX(x)		(((x) + 1) & EMAC_NRXDESC_MASK)
124 #define	EMAC_PREVRX(x)		(((x) - 1) & EMAC_NRXDESC_MASK)
125 
126 /*
127  * Transmit/receive descriptors that are DMA'd to the EMAC.
128  */
129 struct emac_control_data {
130 	struct mal_descriptor ecd_txdesc[EMAC_NTXDESC];
131 	struct mal_descriptor ecd_rxdesc[EMAC_NRXDESC];
132 };
133 
134 #define	EMAC_CDOFF(x)		offsetof(struct emac_control_data, x)
135 #define	EMAC_CDTXOFF(x)		EMAC_CDOFF(ecd_txdesc[(x)])
136 #define	EMAC_CDRXOFF(x)		EMAC_CDOFF(ecd_rxdesc[(x)])
137 
138 /*
139  * Software state for transmit jobs.
140  */
141 struct emac_txsoft {
142 	struct mbuf *txs_mbuf;		/* head of mbuf chain */
143 	bus_dmamap_t txs_dmamap;	/* our DMA map */
144 	int txs_firstdesc;		/* first descriptor in packet */
145 	int txs_lastdesc;		/* last descriptor in packet */
146 	int txs_ndesc;			/* # of descriptors used */
147 };
148 
149 /*
150  * Software state for receive descriptors.
151  */
152 struct emac_rxsoft {
153 	struct mbuf *rxs_mbuf;		/* head of mbuf chain */
154 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
155 };
156 
157 /*
158  * Software state per device.
159  */
160 struct emac_softc {
161 	device_t sc_dev;		/* generic device information */
162 	int sc_instance;		/* instance no. */
163 	bus_space_tag_t sc_st;		/* bus space tag */
164 	bus_space_handle_t sc_sh;	/* bus space handle */
165 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
166 	struct ethercom sc_ethercom;	/* ethernet common data */
167 	void *sc_sdhook;		/* shutdown hook */
168 	void *sc_powerhook;		/* power management hook */
169 
170 	struct mii_data sc_mii;		/* MII/media information */
171 	struct callout sc_callout;	/* tick callout */
172 
173 	uint32_t sc_mr1;		/* copy of Mode Register 1 */
174 	uint32_t sc_stacr_read;		/* Read opcode of STAOPC of STACR */
175 	uint32_t sc_stacr_write;	/* Write opcode of STAOPC of STACR */
176 	uint32_t sc_stacr_bits;		/* misc bits of STACR */
177 	bool sc_stacr_completed;	/* Operation completed of STACR */
178 	int sc_htsize;			/* Hash Table size */
179 
180 	bus_dmamap_t sc_cddmamap;	/* control data dma map */
181 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
182 
183 	/* Software state for transmit/receive descriptors. */
184 	struct emac_txsoft sc_txsoft[EMAC_TXQUEUELEN];
185 	struct emac_rxsoft sc_rxsoft[EMAC_NRXDESC];
186 
187 	/* Control data structures. */
188 	struct emac_control_data *sc_control_data;
189 #define	sc_txdescs	sc_control_data->ecd_txdesc
190 #define	sc_rxdescs	sc_control_data->ecd_rxdesc
191 
192 #ifdef EMAC_EVENT_COUNTERS
193 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
194 	struct evcnt sc_ev_txintr;	/* Tx interrupts */
195 	struct evcnt sc_ev_rxde;	/* Rx descriptor interrupts */
196 	struct evcnt sc_ev_txde;	/* Tx descriptor interrupts */
197 	struct evcnt sc_ev_intr;	/* General EMAC interrupts */
198 
199 	struct evcnt sc_ev_txreap;	/* Calls to Tx descriptor reaper */
200 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
201 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
202 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
203 	struct evcnt sc_ev_tu;		/* Tx underrun */
204 #endif /* EMAC_EVENT_COUNTERS */
205 
206 	int sc_txfree;			/* number of free Tx descriptors */
207 	int sc_txnext;			/* next ready Tx descriptor */
208 
209 	int sc_txsfree;			/* number of free Tx jobs */
210 	int sc_txsnext;			/* next ready Tx job */
211 	int sc_txsdirty;		/* dirty Tx jobs */
212 
213 	int sc_rxptr;			/* next ready RX descriptor/descsoft */
214 
215 	krndsource_t rnd_source;	/* random source */
216 
217 	void (*sc_rmii_enable)(device_t, int);		/* reduced MII enable */
218 	void (*sc_rmii_disable)(device_t, int);		/* reduced MII disable*/
219 	void (*sc_rmii_speed)(device_t, int, int);	/* reduced MII speed */
220 };
221 
222 #ifdef EMAC_EVENT_COUNTERS
223 #define	EMAC_EVCNT_INCR(ev)	(ev)->ev_count++
224 #else
225 #define	EMAC_EVCNT_INCR(ev)	/* nothing */
226 #endif
227 
228 #define	EMAC_CDTXADDR(sc, x)	((sc)->sc_cddma + EMAC_CDTXOFF((x)))
229 #define	EMAC_CDRXADDR(sc, x)	((sc)->sc_cddma + EMAC_CDRXOFF((x)))
230 
231 #define	EMAC_CDTXSYNC(sc, x, n, ops)					\
232 do {									\
233 	int __x, __n;							\
234 									\
235 	__x = (x);							\
236 	__n = (n);							\
237 									\
238 	/* If it will wrap around, sync to the end of the ring. */	\
239 	if ((__x + __n) > EMAC_NTXDESC) {				\
240 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
241 		    EMAC_CDTXOFF(__x), sizeof(struct mal_descriptor) *	\
242 		    (EMAC_NTXDESC - __x), (ops));			\
243 		__n -= (EMAC_NTXDESC - __x);				\
244 		__x = 0;						\
245 	}								\
246 									\
247 	/* Now sync whatever is left. */				\
248 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
249 	    EMAC_CDTXOFF(__x), sizeof(struct mal_descriptor) * __n, (ops)); \
250 } while (/*CONSTCOND*/0)
251 
252 #define	EMAC_CDRXSYNC(sc, x, ops)					\
253 do {									\
254 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
255 	    EMAC_CDRXOFF((x)), sizeof(struct mal_descriptor), (ops));	\
256 } while (/*CONSTCOND*/0)
257 
258 #define	EMAC_INIT_RXDESC(sc, x)						\
259 do {									\
260 	struct emac_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
261 	struct mal_descriptor *__rxd = &(sc)->sc_rxdescs[(x)];		\
262 	struct mbuf *__m = __rxs->rxs_mbuf;				\
263 									\
264 	/*								\
265 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
266 	 * so that the payload after the Ethernet header is aligned	\
267 	 * to a 4-byte boundary.					\
268 	 */								\
269 	__m->m_data = __m->m_ext.ext_buf + 2;				\
270 									\
271 	__rxd->md_data = __rxs->rxs_dmamap->dm_segs[0].ds_addr + 2;	\
272 	__rxd->md_data_len = __m->m_ext.ext_size - 2;			\
273 	__rxd->md_stat_ctrl = MAL_RX_EMPTY | MAL_RX_INTERRUPT |		\
274 	    /* Set wrap on last descriptor. */				\
275 	    (((x) == EMAC_NRXDESC - 1) ? MAL_RX_WRAP : 0);		\
276 	EMAC_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); \
277 } while (/*CONSTCOND*/0)
278 
279 #define	EMAC_WRITE(sc, reg, val) \
280 	bus_space_write_stream_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
281 #define	EMAC_READ(sc, reg) \
282 	bus_space_read_stream_4((sc)->sc_st, (sc)->sc_sh, (reg))
283 
284 #define	EMAC_SET_FILTER(aht, crc) \
285 do {									\
286 	(aht)[3 - (((crc) >> 26) >> 4)] |= 1 << (((crc) >> 26) & 0xf);	\
287 } while (/*CONSTCOND*/0)
288 #define	EMAC_SET_FILTER256(aht, crc) \
289 do {									\
290 	(aht)[7 - (((crc) >> 24) >> 5)] |= 1 << (((crc) >> 24) & 0x1f);	\
291 } while (/*CONSTCOND*/0)
292 
293 static int	emac_match(device_t, cfdata_t, void *);
294 static void	emac_attach(device_t, device_t, void *);
295 
296 static int	emac_intr(void *);
297 static void	emac_shutdown(void *);
298 
299 static void	emac_start(struct ifnet *);
300 static int	emac_ioctl(struct ifnet *, u_long, void *);
301 static int	emac_init(struct ifnet *);
302 static void	emac_stop(struct ifnet *, int);
303 static void	emac_watchdog(struct ifnet *);
304 
305 static int	emac_add_rxbuf(struct emac_softc *, int);
306 static void	emac_rxdrain(struct emac_softc *);
307 static int	emac_set_filter(struct emac_softc *);
308 static int	emac_txreap(struct emac_softc *);
309 
310 static void	emac_soft_reset(struct emac_softc *);
311 static void	emac_smart_reset(struct emac_softc *);
312 
313 static int	emac_mii_readreg(device_t, int, int, uint16_t *);
314 static int	emac_mii_writereg(device_t, int, int, uint16_t);
315 static void	emac_mii_statchg(struct ifnet *);
316 static uint32_t	emac_mii_wait(struct emac_softc *);
317 static void	emac_mii_tick(void *);
318 
319 int		emac_copy_small = 0;
320 
321 CFATTACH_DECL_NEW(emac, sizeof(struct emac_softc),
322     emac_match, emac_attach, NULL, NULL);
323 
324 
325 static int
326 emac_match(device_t parent, cfdata_t cf, void *aux)
327 {
328 	struct opb_attach_args *oaa = aux;
329 
330 	/* match only on-chip ethernet devices */
331 	if (strcmp(oaa->opb_name, cf->cf_name) == 0)
332 		return 1;
333 
334 	return 0;
335 }
336 
337 static void
338 emac_attach(device_t parent, device_t self, void *aux)
339 {
340 	struct opb_attach_args *oaa = aux;
341 	struct emac_softc *sc = device_private(self);
342 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
343 	struct mii_data *mii = &sc->sc_mii;
344 	const char * xname = device_xname(self);
345 	bus_dma_segment_t seg;
346 	int error, i, nseg, opb_freq, opbc, mii_phy = MII_PHY_ANY;
347 	const uint8_t *enaddr;
348 	prop_dictionary_t dict = device_properties(self);
349 	prop_data_t ea;
350 
351 	bus_space_map(oaa->opb_bt, oaa->opb_addr, EMAC_NREG, 0, &sc->sc_sh);
352 
353 	sc->sc_dev = self;
354 	sc->sc_instance = oaa->opb_instance;
355 	sc->sc_st = oaa->opb_bt;
356 	sc->sc_dmat = oaa->opb_dmat;
357 
358 	callout_init(&sc->sc_callout, 0);
359 
360 	aprint_naive("\n");
361 	aprint_normal(": Ethernet Media Access Controller\n");
362 
363 	/* Fetch the Ethernet address. */
364 	ea = prop_dictionary_get(dict, "mac-address");
365 	if (ea == NULL) {
366 		aprint_error_dev(self, "unable to get mac-address property\n");
367 		return;
368 	}
369 	KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
370 	KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
371 	enaddr = prop_data_data_nocopy(ea);
372 	aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(enaddr));
373 
374 #if defined(EMAC_ZMII_PHY) || defined(EMAC_RGMII_PHY)
375 	/* Fetch the MII offset. */
376 	prop_dictionary_get_uint32(dict, "mii-phy", &mii_phy);
377 
378 #ifdef EMAC_ZMII_PHY
379 	if (oaa->opb_flags & OPB_FLAGS_EMAC_RMII_ZMII)
380 		zmii_attach(parent, sc->sc_instance, &sc->sc_rmii_enable,
381 		    &sc->sc_rmii_disable, &sc->sc_rmii_speed);
382 #endif
383 #ifdef EMAC_RGMII_PHY
384 	if (oaa->opb_flags & OPB_FLAGS_EMAC_RMII_RGMII)
385 		rgmii_attach(parent, sc->sc_instance, &sc->sc_rmii_enable,
386 		    &sc->sc_rmii_disable, &sc->sc_rmii_speed);
387 #endif
388 #endif
389 
390 	/*
391 	 * Allocate the control data structures, and create and load the
392 	 * DMA map for it.
393 	 */
394 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
395 	    sizeof(struct emac_control_data), 0, 0, &seg, 1, &nseg, 0)) != 0) {
396 		aprint_error_dev(self,
397 		    "unable to allocate control data, error = %d\n", error);
398 		goto fail_0;
399 	}
400 
401 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
402 	    sizeof(struct emac_control_data), (void **)&sc->sc_control_data,
403 	    BUS_DMA_COHERENT)) != 0) {
404 		aprint_error_dev(self,
405 		    "unable to map control data, error = %d\n", error);
406 		goto fail_1;
407 	}
408 
409 	if ((error = bus_dmamap_create(sc->sc_dmat,
410 	    sizeof(struct emac_control_data), 1,
411 	    sizeof(struct emac_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
412 		aprint_error_dev(self,
413 		    "unable to create control data DMA map, error = %d\n",
414 		    error);
415 		goto fail_2;
416 	}
417 
418 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
419 	    sc->sc_control_data, sizeof(struct emac_control_data), NULL,
420 	    0)) != 0) {
421 		aprint_error_dev(self,
422 		    "unable to load control data DMA map, error = %d\n", error);
423 		goto fail_3;
424 	}
425 
426 	/*
427 	 * Create the transmit buffer DMA maps.
428 	 */
429 	for (i = 0; i < EMAC_TXQUEUELEN; i++) {
430 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
431 		    EMAC_NTXSEGS, MCLBYTES, 0, 0,
432 		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
433 			aprint_error_dev(self,
434 			    "unable to create tx DMA map %d, error = %d\n",
435 			    i, error);
436 			goto fail_4;
437 		}
438 	}
439 
440 	/*
441 	 * Create the receive buffer DMA maps.
442 	 */
443 	for (i = 0; i < EMAC_NRXDESC; i++) {
444 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
445 		    MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
446 			aprint_error_dev(self,
447 			    "unable to create rx DMA map %d, error = %d\n",
448 			    i, error);
449 			goto fail_5;
450 		}
451 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
452 	}
453 
454 	/* Soft Reset the EMAC.  The chip to a known state. */
455 	emac_soft_reset(sc);
456 
457 	opb_freq = opb_get_frequency();
458 	switch (opb_freq) {
459 	case  33333333: opbc =  STACR_OPBC_33MHZ; break;
460 	case  50000000: opbc =  STACR_OPBC_50MHZ; break;
461 	case  66666666: opbc =  STACR_OPBC_66MHZ; break;
462 	case  83333333: opbc =  STACR_OPBC_83MHZ; break;
463 	case 100000000: opbc = STACR_OPBC_100MHZ; break;
464 
465 	default:
466 		if (opb_freq > 100000000) {
467 			opbc = STACR_OPBC_A100MHZ;
468 			break;
469 		}
470 		aprint_error_dev(self, "unsupport OPB frequency %dMHz\n",
471 		    opb_freq / 1000 / 1000);
472 		goto fail_5;
473 	}
474 	if (oaa->opb_flags & OPB_FLAGS_EMAC_GBE) {
475 		sc->sc_mr1 =
476 		    MR1_RFS_GBE(MR1__FS_16KB)	|
477 		    MR1_TFS_GBE(MR1__FS_16KB)	|
478 		    MR1_TR0_MULTIPLE		|
479 		    MR1_OBCI(opbc);
480 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
481 
482 		if (oaa->opb_flags & OPB_FLAGS_EMAC_STACV2) {
483 			sc->sc_stacr_read = STACR_STAOPC_READ;
484 			sc->sc_stacr_write = STACR_STAOPC_WRITE;
485 			sc->sc_stacr_bits = STACR_OC;
486 			sc->sc_stacr_completed = false;
487 		} else {
488 			sc->sc_stacr_read = STACR_READ;
489 			sc->sc_stacr_write = STACR_WRITE;
490 			sc->sc_stacr_completed = true;
491 		}
492 	} else {
493 		/*
494 		 * Set up Mode Register 1 - set receive and transmit FIFOs to
495 		 * maximum size, allow transmit of multiple packets (only
496 		 * channel 0 is used).
497 		 *
498 		 * XXX: Allow pause packets??
499 		 */
500 		sc->sc_mr1 =
501 		    MR1_RFS(MR1__FS_4KB) |
502 		    MR1_TFS(MR1__FS_2KB) |
503 		    MR1_TR0_MULTIPLE;
504 
505 		sc->sc_stacr_read = STACR_READ;
506 		sc->sc_stacr_write = STACR_WRITE;
507 		sc->sc_stacr_bits = STACR_OPBC(opbc);
508 		sc->sc_stacr_completed = true;
509 	}
510 
511 	intr_establish_xname(oaa->opb_irq, IST_LEVEL, IPL_NET, emac_intr, sc,
512 	    device_xname(self));
513 	mal_intr_establish(sc->sc_instance, sc);
514 
515 	if (oaa->opb_flags & OPB_FLAGS_EMAC_HT256)
516 		sc->sc_htsize = 256;
517 	else
518 		sc->sc_htsize = 64;
519 
520 	/* Clear all interrupts */
521 	EMAC_WRITE(sc, EMAC_ISR, ISR_ALL);
522 
523 	/*
524 	 * Initialise the media structures.
525 	 */
526 	mii->mii_ifp = ifp;
527 	mii->mii_readreg = emac_mii_readreg;
528 	mii->mii_writereg = emac_mii_writereg;
529 	mii->mii_statchg = emac_mii_statchg;
530 
531 	sc->sc_ethercom.ec_mii = mii;
532 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
533 	mii_attach(self, mii, 0xffffffff, mii_phy, MII_OFFSET_ANY,
534 	    MIIF_DOPAUSE);
535 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
536 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
537 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
538 	} else
539 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
540 
541 	ifp = &sc->sc_ethercom.ec_if;
542 	strcpy(ifp->if_xname, xname);
543 	ifp->if_softc = sc;
544 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
545 	ifp->if_start = emac_start;
546 	ifp->if_ioctl = emac_ioctl;
547 	ifp->if_init = emac_init;
548 	ifp->if_stop = emac_stop;
549 	ifp->if_watchdog = emac_watchdog;
550 	IFQ_SET_READY(&ifp->if_snd);
551 
552 	/*
553 	 * We can support 802.1Q VLAN-sized frames.
554 	 */
555 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
556 
557 	/*
558 	 * Attach the interface.
559 	 */
560 	if_attach(ifp);
561 	if_deferred_start_init(ifp, NULL);
562 	ether_ifattach(ifp, enaddr);
563 
564 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
565 	    RND_FLAG_DEFAULT);
566 
567 #ifdef EMAC_EVENT_COUNTERS
568 	/*
569 	 * Attach the event counters.
570 	 */
571 	evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR,
572 	    NULL, xname, "txintr");
573 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
574 	    NULL, xname, "rxintr");
575 	evcnt_attach_dynamic(&sc->sc_ev_txde, EVCNT_TYPE_INTR,
576 	    NULL, xname, "txde");
577 	evcnt_attach_dynamic(&sc->sc_ev_rxde, EVCNT_TYPE_INTR,
578 	    NULL, xname, "rxde");
579 	evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR,
580 	    NULL, xname, "intr");
581 
582 	evcnt_attach_dynamic(&sc->sc_ev_txreap, EVCNT_TYPE_MISC,
583 	    NULL, xname, "txreap");
584 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
585 	    NULL, xname, "txsstall");
586 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
587 	    NULL, xname, "txdstall");
588 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
589 	    NULL, xname, "txdrop");
590 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
591 	    NULL, xname, "tu");
592 #endif /* EMAC_EVENT_COUNTERS */
593 
594 	/*
595 	 * Make sure the interface is shutdown during reboot.
596 	 */
597 	sc->sc_sdhook = shutdownhook_establish(emac_shutdown, sc);
598 	if (sc->sc_sdhook == NULL)
599 		aprint_error_dev(self,
600 		    "WARNING: unable to establish shutdown hook\n");
601 
602 	return;
603 
604 	/*
605 	 * Free any resources we've allocated during the failed attach
606 	 * attempt.  Do this in reverse order and fall through.
607 	 */
608 fail_5:
609 	for (i = 0; i < EMAC_NRXDESC; i++) {
610 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
611 			bus_dmamap_destroy(sc->sc_dmat,
612 			    sc->sc_rxsoft[i].rxs_dmamap);
613 	}
614 fail_4:
615 	for (i = 0; i < EMAC_TXQUEUELEN; i++) {
616 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
617 			bus_dmamap_destroy(sc->sc_dmat,
618 			    sc->sc_txsoft[i].txs_dmamap);
619 	}
620 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
621 fail_3:
622 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
623 fail_2:
624 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
625 	    sizeof(struct emac_control_data));
626 fail_1:
627 	bus_dmamem_free(sc->sc_dmat, &seg, nseg);
628 fail_0:
629 	return;
630 }
631 
632 /*
633  * EMAC General interrupt handler
634  */
635 static int
636 emac_intr(void *arg)
637 {
638 	struct emac_softc *sc = arg;
639 	uint32_t status;
640 
641 	EMAC_EVCNT_INCR(&sc->sc_ev_intr);
642 	status = EMAC_READ(sc, EMAC_ISR);
643 
644 	/* Clear the interrupt status bits. */
645 	EMAC_WRITE(sc, EMAC_ISR, status);
646 
647 	return 1;
648 }
649 
650 static void
651 emac_shutdown(void *arg)
652 {
653 	struct emac_softc *sc = arg;
654 
655 	emac_stop(&sc->sc_ethercom.ec_if, 0);
656 }
657 
658 
659 /*
660  * ifnet interface functions
661  */
662 
663 static void
664 emac_start(struct ifnet *ifp)
665 {
666 	struct emac_softc *sc = ifp->if_softc;
667 	struct mbuf *m0;
668 	struct emac_txsoft *txs;
669 	bus_dmamap_t dmamap;
670 	int error, firsttx, nexttx, lasttx, ofree, seg;
671 
672 	lasttx = 0;	/* XXX gcc */
673 
674 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
675 		return;
676 
677 	/*
678 	 * Remember the previous number of free descriptors.
679 	 */
680 	ofree = sc->sc_txfree;
681 
682 	/*
683 	 * Loop through the send queue, setting up transmit descriptors
684 	 * until we drain the queue, or use up all available transmit
685 	 * descriptors.
686 	 */
687 	for (;;) {
688 		/* Grab a packet off the queue. */
689 		IFQ_POLL(&ifp->if_snd, m0);
690 		if (m0 == NULL)
691 			break;
692 
693 		/*
694 		 * Get a work queue entry.  Reclaim used Tx descriptors if
695 		 * we are running low.
696 		 */
697 		if (sc->sc_txsfree < EMAC_TXQUEUE_GC) {
698 			emac_txreap(sc);
699 			if (sc->sc_txsfree == 0) {
700 				EMAC_EVCNT_INCR(&sc->sc_ev_txsstall);
701 				break;
702 			}
703 		}
704 
705 		txs = &sc->sc_txsoft[sc->sc_txsnext];
706 		dmamap = txs->txs_dmamap;
707 
708 		/*
709 		 * Load the DMA map.  If this fails, the packet either
710 		 * didn't fit in the alloted number of segments, or we
711 		 * were short on resources.  In this case, we'll copy
712 		 * and try again.
713 		 */
714 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
715 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
716 		if (error) {
717 			if (error == EFBIG) {
718 				EMAC_EVCNT_INCR(&sc->sc_ev_txdrop);
719 				aprint_error_ifnet(ifp,
720 				    "Tx packet consumes too many "
721 				    "DMA segments, dropping...\n");
722 				    IFQ_DEQUEUE(&ifp->if_snd, m0);
723 				    m_freem(m0);
724 				    continue;
725 			}
726 			/* Short on resources, just stop for now. */
727 			break;
728 		}
729 
730 		/*
731 		 * Ensure we have enough descriptors free to describe
732 		 * the packet.
733 		 */
734 		if (dmamap->dm_nsegs > sc->sc_txfree) {
735 			/*
736 			 * Not enough free descriptors to transmit this
737 			 * packet.  We haven't committed anything yet,
738 			 * so just unload the DMA map, put the packet
739 			 * back on the queue, and punt.  Notify the upper
740 			 * layer that there are not more slots left.
741 			 *
742 			 */
743 			ifp->if_flags |= IFF_OACTIVE;
744 			bus_dmamap_unload(sc->sc_dmat, dmamap);
745 			EMAC_EVCNT_INCR(&sc->sc_ev_txdstall);
746 			break;
747 		}
748 
749 		IFQ_DEQUEUE(&ifp->if_snd, m0);
750 
751 		/*
752 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
753 		 */
754 
755 		/* Sync the DMA map. */
756 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
757 		    BUS_DMASYNC_PREWRITE);
758 
759 		/*
760 		 * Store a pointer to the packet so that we can free it
761 		 * later.
762 		 */
763 		txs->txs_mbuf = m0;
764 		txs->txs_firstdesc = sc->sc_txnext;
765 		txs->txs_ndesc = dmamap->dm_nsegs;
766 
767 		/*
768 		 * Initialize the transmit descriptor.
769 		 */
770 		firsttx = sc->sc_txnext;
771 		for (nexttx = sc->sc_txnext, seg = 0;
772 		     seg < dmamap->dm_nsegs;
773 		     seg++, nexttx = EMAC_NEXTTX(nexttx)) {
774 			struct mal_descriptor *txdesc =
775 			    &sc->sc_txdescs[nexttx];
776 
777 			/*
778 			 * If this is the first descriptor we're
779 			 * enqueueing, don't set the TX_READY bit just
780 			 * yet.  That could cause a race condition.
781 			 * We'll do it below.
782 			 */
783 			txdesc->md_data = dmamap->dm_segs[seg].ds_addr;
784 			txdesc->md_data_len = dmamap->dm_segs[seg].ds_len;
785 			txdesc->md_stat_ctrl =
786 			    (txdesc->md_stat_ctrl & MAL_TX_WRAP) |
787 			    (nexttx == firsttx ? 0 : MAL_TX_READY) |
788 			    EMAC_TXC_GFCS | EMAC_TXC_GPAD;
789 			lasttx = nexttx;
790 		}
791 
792 		/* Set the LAST bit on the last segment. */
793 		sc->sc_txdescs[lasttx].md_stat_ctrl |= MAL_TX_LAST;
794 
795 		/*
796 		 * Set up last segment descriptor to send an interrupt after
797 		 * that descriptor is transmitted, and bypass existing Tx
798 		 * descriptor reaping method (for now...).
799 		 */
800 		sc->sc_txdescs[lasttx].md_stat_ctrl |= MAL_TX_INTERRUPT;
801 
802 
803 		txs->txs_lastdesc = lasttx;
804 
805 		/* Sync the descriptors we're using. */
806 		EMAC_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
807 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
808 
809 		/*
810 		 * The entire packet chain is set up.  Give the
811 		 * first descriptor to the chip now.
812 		 */
813 		sc->sc_txdescs[firsttx].md_stat_ctrl |= MAL_TX_READY;
814 		EMAC_CDTXSYNC(sc, firsttx, 1,
815 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
816 		/*
817 		 * Tell the EMAC that a new packet is available.
818 		 */
819 		EMAC_WRITE(sc, EMAC_TMR0, TMR0_GNP0 | TMR0_TFAE_2);
820 
821 		/* Advance the tx pointer. */
822 		sc->sc_txfree -= txs->txs_ndesc;
823 		sc->sc_txnext = nexttx;
824 
825 		sc->sc_txsfree--;
826 		sc->sc_txsnext = EMAC_NEXTTXS(sc->sc_txsnext);
827 
828 		/*
829 		 * Pass the packet to any BPF listeners.
830 		 */
831 		bpf_mtap(ifp, m0, BPF_D_OUT);
832 	}
833 
834 	if (sc->sc_txfree == 0)
835 		/* No more slots left; notify upper layer. */
836 		ifp->if_flags |= IFF_OACTIVE;
837 
838 	if (sc->sc_txfree != ofree)
839 		/* Set a watchdog timer in case the chip flakes out. */
840 		ifp->if_timer = 5;
841 }
842 
843 static int
844 emac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
845 {
846 	struct emac_softc *sc = ifp->if_softc;
847 	int s, error;
848 
849 	s = splnet();
850 
851 	switch (cmd) {
852 	case SIOCSIFMTU:
853 	{
854 		struct ifreq *ifr = (struct ifreq *)data;
855 		int maxmtu;
856 
857 		if (sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU)
858 			maxmtu = EMAC_MAX_MTU;
859 		else
860 			maxmtu = ETHERMTU;
861 
862 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > maxmtu)
863 			error = EINVAL;
864 		else if ((error = ifioctl_common(ifp, cmd, data)) != ENETRESET)
865 			break;
866 		else if (ifp->if_flags & IFF_UP)
867 			error = emac_init(ifp);
868 		else
869 			error = 0;
870 		break;
871 	}
872 
873 	default:
874 		error = ether_ioctl(ifp, cmd, data);
875 		if (error == ENETRESET) {
876 			/*
877 			 * Multicast list has changed; set the hardware filter
878 			 * accordingly.
879 			 */
880 			if (ifp->if_flags & IFF_RUNNING)
881 				error = emac_set_filter(sc);
882 			else
883 				error = 0;
884 		}
885 	}
886 
887 	/* try to get more packets going */
888 	emac_start(ifp);
889 
890 	splx(s);
891 	return error;
892 }
893 
894 static int
895 emac_init(struct ifnet *ifp)
896 {
897 	struct emac_softc *sc = ifp->if_softc;
898 	struct emac_rxsoft *rxs;
899 	const uint8_t *enaddr = CLLADDR(ifp->if_sadl);
900 	int error, i;
901 
902 	error = 0;
903 
904 	/* Cancel any pending I/O. */
905 	emac_stop(ifp, 0);
906 
907 	/* Reset the chip to a known state. */
908 	emac_soft_reset(sc);
909 
910 	/*
911 	 * Initialise the transmit descriptor ring.
912 	 */
913 	memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
914 	/* set wrap on last descriptor */
915 	sc->sc_txdescs[EMAC_NTXDESC - 1].md_stat_ctrl |= MAL_TX_WRAP;
916 	EMAC_CDTXSYNC(sc, 0, EMAC_NTXDESC,
917 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
918 	sc->sc_txfree = EMAC_NTXDESC;
919 	sc->sc_txnext = 0;
920 
921 	/*
922 	 * Initialise the transmit job descriptors.
923 	 */
924 	for (i = 0; i < EMAC_TXQUEUELEN; i++)
925 		sc->sc_txsoft[i].txs_mbuf = NULL;
926 	sc->sc_txsfree = EMAC_TXQUEUELEN;
927 	sc->sc_txsnext = 0;
928 	sc->sc_txsdirty = 0;
929 
930 	/*
931 	 * Initialise the receiver descriptor and receive job
932 	 * descriptor rings.
933 	 */
934 	for (i = 0; i < EMAC_NRXDESC; i++) {
935 		rxs = &sc->sc_rxsoft[i];
936 		if (rxs->rxs_mbuf == NULL) {
937 			if ((error = emac_add_rxbuf(sc, i)) != 0) {
938 				aprint_error_ifnet(ifp,
939 				    "unable to allocate or map rx buffer %d,"
940 				    " error = %d\n",
941 				    i, error);
942 				/*
943 				 * XXX Should attempt to run with fewer receive
944 				 * XXX buffers instead of just failing.
945 				 */
946 				emac_rxdrain(sc);
947 				goto out;
948 			}
949 		} else
950 			EMAC_INIT_RXDESC(sc, i);
951 	}
952 	sc->sc_rxptr = 0;
953 
954 	/*
955 	 * Set the current media.
956 	 */
957 	if ((error = ether_mediachange(ifp)) != 0)
958 		goto out;
959 
960 	/*
961 	 * Load the MAC address.
962 	 */
963 	EMAC_WRITE(sc, EMAC_IAHR, enaddr[0] << 8 | enaddr[1]);
964 	EMAC_WRITE(sc, EMAC_IALR,
965 	    enaddr[2] << 24 | enaddr[3] << 16 | enaddr[4] << 8 | enaddr[5]);
966 
967 	/* Enable the transmit and receive channel on the MAL. */
968 	error = mal_start(sc->sc_instance,
969 	    EMAC_CDTXADDR(sc, 0), EMAC_CDRXADDR(sc, 0));
970 	if (error)
971 		goto out;
972 
973 	sc->sc_mr1 &= ~MR1_JPSM;
974 	if (ifp->if_mtu > ETHERMTU)
975 		/* Enable Jumbo Packet Support Mode */
976 		sc->sc_mr1 |= MR1_JPSM;
977 
978 	/* Set fifos, media modes. */
979 	EMAC_WRITE(sc, EMAC_MR1, sc->sc_mr1);
980 
981 	/*
982 	 * Enable Individual and (possibly) Broadcast Address modes,
983 	 * runt packets, and strip padding.
984 	 */
985 	EMAC_WRITE(sc, EMAC_RMR, RMR_IAE | RMR_RRP | RMR_SP | RMR_TFAE_2 |
986 	    (ifp->if_flags & IFF_PROMISC ? RMR_PME : 0) |
987 	    (ifp->if_flags & IFF_BROADCAST ? RMR_BAE : 0));
988 
989 	/*
990 	 * Set multicast filter.
991 	 */
992 	emac_set_filter(sc);
993 
994 	/*
995 	 * Set low- and urgent-priority request thresholds.
996 	 */
997 	EMAC_WRITE(sc, EMAC_TMR1,
998 	    ((7 << TMR1_TLR_SHIFT) & TMR1_TLR_MASK) | /* 16 word burst */
999 	    ((15 << TMR1_TUR_SHIFT) & TMR1_TUR_MASK));
1000 	/*
1001 	 * Set Transmit Request Threshold Register.
1002 	 */
1003 	EMAC_WRITE(sc, EMAC_TRTR, TRTR_256);
1004 
1005 	/*
1006 	 * Set high and low receive watermarks.
1007 	 */
1008 	EMAC_WRITE(sc, EMAC_RWMR,
1009 	    30 << RWMR_RLWM_SHIFT | 64 << RWMR_RLWM_SHIFT);
1010 
1011 	/*
1012 	 * Set frame gap.
1013 	 */
1014 	EMAC_WRITE(sc, EMAC_IPGVR, 8);
1015 
1016 	/*
1017 	 * Set interrupt status enable bits for EMAC.
1018 	 */
1019 	EMAC_WRITE(sc, EMAC_ISER,
1020 	    ISR_TXPE |		/* TX Parity Error */
1021 	    ISR_RXPE |		/* RX Parity Error */
1022 	    ISR_TXUE |		/* TX Underrun Event */
1023 	    ISR_RXOE |		/* RX Overrun Event */
1024 	    ISR_OVR  |		/* Overrun Error */
1025 	    ISR_PP   |		/* Pause Packet */
1026 	    ISR_BP   |		/* Bad Packet */
1027 	    ISR_RP   |		/* Runt Packet */
1028 	    ISR_SE   |		/* Short Event */
1029 	    ISR_ALE  |		/* Alignment Error */
1030 	    ISR_BFCS |		/* Bad FCS */
1031 	    ISR_PTLE |		/* Packet Too Long Error */
1032 	    ISR_ORE  |		/* Out of Range Error */
1033 	    ISR_IRE  |		/* In Range Error */
1034 	    ISR_SE0  |		/* Signal Quality Error 0 (SQE) */
1035 	    ISR_TE0  |		/* Transmit Error 0 */
1036 	    ISR_MOS  |		/* MMA Operation Succeeded */
1037 	    ISR_MOF);		/* MMA Operation Failed */
1038 
1039 	/*
1040 	 * Enable the transmit and receive channel on the EMAC.
1041 	 */
1042 	EMAC_WRITE(sc, EMAC_MR0, MR0_TXE | MR0_RXE);
1043 
1044 	/*
1045 	 * Start the one second MII clock.
1046 	 */
1047 	callout_reset(&sc->sc_callout, hz, emac_mii_tick, sc);
1048 
1049 	/*
1050 	 * ... all done!
1051 	 */
1052 	ifp->if_flags |= IFF_RUNNING;
1053 	ifp->if_flags &= ~IFF_OACTIVE;
1054 
1055  out:
1056 	if (error) {
1057 		ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1058 		ifp->if_timer = 0;
1059 		aprint_error_ifnet(ifp, "interface not running\n");
1060 	}
1061 	return error;
1062 }
1063 
1064 static void
1065 emac_stop(struct ifnet *ifp, int disable)
1066 {
1067 	struct emac_softc *sc = ifp->if_softc;
1068 	struct emac_txsoft *txs;
1069 	int i;
1070 
1071 	/* Stop the one second clock. */
1072 	callout_stop(&sc->sc_callout);
1073 
1074 	/* Down the MII */
1075 	mii_down(&sc->sc_mii);
1076 
1077 	/* Disable interrupts. */
1078 	EMAC_WRITE(sc, EMAC_ISER, 0);
1079 
1080 	/* Disable the receive and transmit channels. */
1081 	mal_stop(sc->sc_instance);
1082 
1083 	/* Disable the transmit enable and receive MACs. */
1084 	EMAC_WRITE(sc, EMAC_MR0,
1085 	    EMAC_READ(sc, EMAC_MR0) & ~(MR0_TXE | MR0_RXE));
1086 
1087 	/* Release any queued transmit buffers. */
1088 	for (i = 0; i < EMAC_TXQUEUELEN; i++) {
1089 		txs = &sc->sc_txsoft[i];
1090 		if (txs->txs_mbuf != NULL) {
1091 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1092 			m_freem(txs->txs_mbuf);
1093 			txs->txs_mbuf = NULL;
1094 		}
1095 	}
1096 
1097 	if (disable)
1098 		emac_rxdrain(sc);
1099 
1100 	/*
1101 	 * Mark the interface down and cancel the watchdog timer.
1102 	 */
1103 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1104 	ifp->if_timer = 0;
1105 }
1106 
1107 static void
1108 emac_watchdog(struct ifnet *ifp)
1109 {
1110 	struct emac_softc *sc = ifp->if_softc;
1111 
1112 	/*
1113 	 * Since we're not interrupting every packet, sweep
1114 	 * up before we report an error.
1115 	 */
1116 	emac_txreap(sc);
1117 
1118 	if (sc->sc_txfree != EMAC_NTXDESC) {
1119 		aprint_error_ifnet(ifp,
1120 		    "device timeout (txfree %d txsfree %d txnext %d)\n",
1121 		    sc->sc_txfree, sc->sc_txsfree, sc->sc_txnext);
1122 		if_statinc(ifp, if_oerrors);
1123 
1124 		/* Reset the interface. */
1125 		(void)emac_init(ifp);
1126 	} else if (ifp->if_flags & IFF_DEBUG)
1127 		aprint_error_ifnet(ifp, "recovered from device timeout\n");
1128 
1129 	/* try to get more packets going */
1130 	emac_start(ifp);
1131 }
1132 
1133 static int
1134 emac_add_rxbuf(struct emac_softc *sc, int idx)
1135 {
1136 	struct emac_rxsoft *rxs = &sc->sc_rxsoft[idx];
1137 	struct mbuf *m;
1138 	int error;
1139 
1140 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1141 	if (m == NULL)
1142 		return ENOBUFS;
1143 
1144 	MCLGET(m, M_DONTWAIT);
1145 	if ((m->m_flags & M_EXT) == 0) {
1146 		m_freem(m);
1147 		return ENOBUFS;
1148 	}
1149 
1150 	if (rxs->rxs_mbuf != NULL)
1151 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1152 
1153 	rxs->rxs_mbuf = m;
1154 
1155 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
1156 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1157 	if (error) {
1158 		aprint_error_dev(sc->sc_dev,
1159 		    "can't load rx DMA map %d, error = %d\n", idx, error);
1160 		panic("emac_add_rxbuf");		/* XXX */
1161 	}
1162 
1163 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1164 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1165 
1166 	EMAC_INIT_RXDESC(sc, idx);
1167 
1168 	return 0;
1169 }
1170 
1171 static void
1172 emac_rxdrain(struct emac_softc *sc)
1173 {
1174 	struct emac_rxsoft *rxs;
1175 	int i;
1176 
1177 	for (i = 0; i < EMAC_NRXDESC; i++) {
1178 		rxs = &sc->sc_rxsoft[i];
1179 		if (rxs->rxs_mbuf != NULL) {
1180 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1181 			m_freem(rxs->rxs_mbuf);
1182 			rxs->rxs_mbuf = NULL;
1183 		}
1184 	}
1185 }
1186 
1187 static int
1188 emac_set_filter(struct emac_softc *sc)
1189 {
1190 	struct ethercom *ec = &sc->sc_ethercom;
1191 	struct ether_multistep step;
1192 	struct ether_multi *enm;
1193 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1194 	uint32_t rmr, crc, mask, tmp, reg, gaht[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
1195 	int regs, cnt = 0, i;
1196 
1197 	if (sc->sc_htsize == 256) {
1198 		reg = EMAC_GAHT256(0);
1199 		regs = 8;
1200 	} else {
1201 		reg = EMAC_GAHT64(0);
1202 		regs = 4;
1203 	}
1204 	mask = (1ULL << (sc->sc_htsize / regs)) - 1;
1205 
1206 	rmr = EMAC_READ(sc, EMAC_RMR);
1207 	rmr &= ~(RMR_PMME | RMR_MAE);
1208 	ifp->if_flags &= ~IFF_ALLMULTI;
1209 
1210 	ETHER_LOCK(ec);
1211 	ETHER_FIRST_MULTI(step, ec, enm);
1212 	while (enm != NULL) {
1213 		if (memcmp(enm->enm_addrlo,
1214 		    enm->enm_addrhi, ETHER_ADDR_LEN) != 0) {
1215 			/*
1216 			 * We must listen to a range of multicast addresses.
1217 			 * For now, just accept all multicasts, rather than
1218 			 * trying to set only those filter bits needed to match
1219 			 * the range.  (At this time, the only use of address
1220 			 * ranges is for IP multicast routing, for which the
1221 			 * range is big enough to require all bits set.)
1222 			 */
1223 			gaht[0] = gaht[1] = gaht[2] = gaht[3] =
1224 			    gaht[4] = gaht[5] = gaht[6] = gaht[7] = mask;
1225 			break;
1226 		}
1227 
1228 		crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1229 
1230 		if (sc->sc_htsize == 256)
1231 			EMAC_SET_FILTER256(gaht, crc);
1232 		else
1233 			EMAC_SET_FILTER(gaht, crc);
1234 
1235 		ETHER_NEXT_MULTI(step, enm);
1236 		cnt++;
1237 	}
1238 	ETHER_UNLOCK(ec);
1239 
1240 	for (i = 1, tmp = gaht[0]; i < regs; i++)
1241 		tmp &= gaht[i];
1242 	if (tmp == mask) {
1243 		/* All categories are true. */
1244 		ifp->if_flags |= IFF_ALLMULTI;
1245 		rmr |= RMR_PMME;
1246 	} else if (cnt != 0) {
1247 		/* Some categories are true. */
1248 		for (i = 0; i < regs; i++)
1249 			EMAC_WRITE(sc, reg + (i << 2), gaht[i]);
1250 		rmr |= RMR_MAE;
1251 	}
1252 	EMAC_WRITE(sc, EMAC_RMR, rmr);
1253 
1254 	return 0;
1255 }
1256 
1257 /*
1258  * Reap completed Tx descriptors.
1259  */
1260 static int
1261 emac_txreap(struct emac_softc *sc)
1262 {
1263 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1264 	struct emac_txsoft *txs;
1265 	int handled, i;
1266 	uint32_t txstat, count;
1267 
1268 	EMAC_EVCNT_INCR(&sc->sc_ev_txreap);
1269 	handled = 0;
1270 
1271 	ifp->if_flags &= ~IFF_OACTIVE;
1272 
1273 	count = 0;
1274 	/*
1275 	 * Go through our Tx list and free mbufs for those
1276 	 * frames that have been transmitted.
1277 	 */
1278 	for (i = sc->sc_txsdirty; sc->sc_txsfree != EMAC_TXQUEUELEN;
1279 	    i = EMAC_NEXTTXS(i), sc->sc_txsfree++) {
1280 		txs = &sc->sc_txsoft[i];
1281 
1282 		EMAC_CDTXSYNC(sc, txs->txs_lastdesc,
1283 		    txs->txs_dmamap->dm_nsegs,
1284 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1285 
1286 		txstat = sc->sc_txdescs[txs->txs_lastdesc].md_stat_ctrl;
1287 		if (txstat & MAL_TX_READY)
1288 			break;
1289 
1290 		handled = 1;
1291 
1292 		/*
1293 		 * Check for errors and collisions.
1294 		 */
1295 		if (txstat & (EMAC_TXS_UR | EMAC_TXS_ED))
1296 			if_statinc(ifp, if_oerrors);
1297 
1298 #ifdef EMAC_EVENT_COUNTERS
1299 		if (txstat & EMAC_TXS_UR)
1300 			EMAC_EVCNT_INCR(&sc->sc_ev_tu);
1301 #endif /* EMAC_EVENT_COUNTERS */
1302 
1303 		if (txstat &
1304 		    (EMAC_TXS_EC | EMAC_TXS_MC | EMAC_TXS_SC | EMAC_TXS_LC)) {
1305 			if (txstat & EMAC_TXS_EC)
1306 				if_statadd(ifp, if_collisions, 16);
1307 			else if (txstat & EMAC_TXS_MC)
1308 				if_statadd(ifp, if_collisions, 2); /* XXX? */
1309 			else if (txstat & EMAC_TXS_SC)
1310 				if_statinc(ifp, if_collisions);
1311 			if (txstat & EMAC_TXS_LC)
1312 				if_statinc(ifp, if_collisions);
1313 		} else
1314 			if_statinc(ifp, if_opackets);
1315 
1316 		if (ifp->if_flags & IFF_DEBUG) {
1317 			if (txstat & EMAC_TXS_ED)
1318 				aprint_error_ifnet(ifp, "excessive deferral\n");
1319 			if (txstat & EMAC_TXS_EC)
1320 				aprint_error_ifnet(ifp,
1321 				    "excessive collisions\n");
1322 		}
1323 
1324 		sc->sc_txfree += txs->txs_ndesc;
1325 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1326 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1327 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1328 		m_freem(txs->txs_mbuf);
1329 		txs->txs_mbuf = NULL;
1330 
1331 		count++;
1332 	}
1333 
1334 	/* Update the dirty transmit buffer pointer. */
1335 	sc->sc_txsdirty = i;
1336 
1337 	/*
1338 	 * If there are no more pending transmissions, cancel the watchdog
1339 	 * timer.
1340 	 */
1341 	if (sc->sc_txsfree == EMAC_TXQUEUELEN)
1342 		ifp->if_timer = 0;
1343 
1344 	if (count != 0)
1345 		rnd_add_uint32(&sc->rnd_source, count);
1346 
1347 	return handled;
1348 }
1349 
1350 
1351 /*
1352  * Reset functions
1353  */
1354 
1355 static void
1356 emac_soft_reset(struct emac_softc *sc)
1357 {
1358 	uint32_t sdr;
1359 	int t = 0;
1360 
1361 	/*
1362 	 * The PHY must provide a TX Clk in order perform a soft reset the
1363 	 * EMAC.  If none is present, select the internal clock,
1364 	 * SDR0_MFR[E0CS, E1CS].  After the soft reset, select the external
1365 	 * clock.
1366 	 */
1367 
1368 	sdr = mfsdr(DCR_SDR0_MFR);
1369 	sdr |= SDR0_MFR_ECS(sc->sc_instance);
1370 	mtsdr(DCR_SDR0_MFR, sdr);
1371 
1372 	EMAC_WRITE(sc, EMAC_MR0, MR0_SRST);
1373 
1374 	sdr = mfsdr(DCR_SDR0_MFR);
1375 	sdr &= ~SDR0_MFR_ECS(sc->sc_instance);
1376 	mtsdr(DCR_SDR0_MFR, sdr);
1377 
1378 	delay(5);
1379 
1380 	/* wait finish */
1381 	while (EMAC_READ(sc, EMAC_MR0) & MR0_SRST) {
1382 		if (++t == 1000000 /* 1sec XXXXX */) {
1383 			aprint_error_dev(sc->sc_dev, "Soft Reset failed\n");
1384 			return;
1385 		}
1386 		delay(1);
1387 	}
1388 }
1389 
1390 static void
1391 emac_smart_reset(struct emac_softc *sc)
1392 {
1393 	uint32_t mr0;
1394 	int t = 0;
1395 
1396 	mr0 = EMAC_READ(sc, EMAC_MR0);
1397 	if (mr0 & (MR0_TXE | MR0_RXE)) {
1398 		mr0 &= ~(MR0_TXE | MR0_RXE);
1399 		EMAC_WRITE(sc, EMAC_MR0, mr0);
1400 
1401 		/* wait idel state */
1402 		while ((EMAC_READ(sc, EMAC_MR0) & (MR0_TXI | MR0_RXI)) !=
1403 		    (MR0_TXI | MR0_RXI)) {
1404 			if (++t == 1000000 /* 1sec XXXXX */) {
1405 				aprint_error_dev(sc->sc_dev,
1406 				    "Smart Reset failed\n");
1407 				return;
1408 			}
1409 			delay(1);
1410 		}
1411 	}
1412 }
1413 
1414 
1415 /*
1416  * MII related functions
1417  */
1418 
1419 static int
1420 emac_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
1421 {
1422 	struct emac_softc *sc = device_private(self);
1423 	uint32_t sta_reg;
1424 	int rv;
1425 
1426 	if (sc->sc_rmii_enable)
1427 		sc->sc_rmii_enable(device_parent(self), sc->sc_instance);
1428 
1429 	/* wait for PHY data transfer to complete */
1430 	if ((rv = emac_mii_wait(sc)) != 0)
1431 		goto fail;
1432 
1433 	sta_reg =
1434 	    sc->sc_stacr_read		|
1435 	    (reg << STACR_PRA_SHIFT)	|
1436 	    (phy << STACR_PCDA_SHIFT)	|
1437 	    sc->sc_stacr_bits;
1438 	EMAC_WRITE(sc, EMAC_STACR, sta_reg);
1439 
1440 	if ((rv = emac_mii_wait(sc)) != 0)
1441 		goto fail;
1442 	sta_reg = EMAC_READ(sc, EMAC_STACR);
1443 
1444 	if (sta_reg & STACR_PHYE) {
1445 		rv = -1;
1446 		goto fail;
1447 	}
1448 	*val = sta_reg >> STACR_PHYD_SHIFT;
1449 
1450 fail:
1451 	if (sc->sc_rmii_disable)
1452 		sc->sc_rmii_disable(device_parent(self), sc->sc_instance);
1453 	return rv;
1454 }
1455 
1456 static int
1457 emac_mii_writereg(device_t self, int phy, int reg, uint16_t val)
1458 {
1459 	struct emac_softc *sc = device_private(self);
1460 	uint32_t sta_reg;
1461 	int rv;
1462 
1463 	if (sc->sc_rmii_enable)
1464 		sc->sc_rmii_enable(device_parent(self), sc->sc_instance);
1465 
1466 	/* wait for PHY data transfer to complete */
1467 	if ((rv = emac_mii_wait(sc)) != 0)
1468 		goto out;
1469 
1470 	sta_reg =
1471 	    (val << STACR_PHYD_SHIFT)	|
1472 	    sc->sc_stacr_write		|
1473 	    (reg << STACR_PRA_SHIFT)	|
1474 	    (phy << STACR_PCDA_SHIFT)	|
1475 	    sc->sc_stacr_bits;
1476 	EMAC_WRITE(sc, EMAC_STACR, sta_reg);
1477 
1478 	if ((rv = emac_mii_wait(sc)) != 0)
1479 		goto out;
1480 	if (EMAC_READ(sc, EMAC_STACR) & STACR_PHYE) {
1481 		aprint_error_dev(sc->sc_dev, "MII PHY Error\n");
1482 		rv = -1;
1483 	}
1484 
1485 out:
1486 	if (sc->sc_rmii_disable)
1487 		sc->sc_rmii_disable(device_parent(self), sc->sc_instance);
1488 
1489 	return rv;
1490 }
1491 
1492 static void
1493 emac_mii_statchg(struct ifnet *ifp)
1494 {
1495 	struct emac_softc *sc = ifp->if_softc;
1496 	struct mii_data *mii = &sc->sc_mii;
1497 
1498 	/*
1499 	 * MR1 can only be written immediately after a reset...
1500 	 */
1501 	emac_smart_reset(sc);
1502 
1503 	sc->sc_mr1 &= ~(MR1_FDE | MR1_ILE | MR1_EIFC | MR1_MF_MASK | MR1_IST);
1504 	if (mii->mii_media_active & IFM_FDX)
1505 		sc->sc_mr1 |= (MR1_FDE | MR1_EIFC | MR1_IST);
1506 	if (mii->mii_media_active & IFM_FLOW)
1507 		sc->sc_mr1 |= MR1_EIFC;
1508 	if (mii->mii_media_active & IFM_LOOP)
1509 		sc->sc_mr1 |= MR1_ILE;
1510 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1511 	case IFM_1000_T:
1512 		sc->sc_mr1 |= (MR1_MF_1000MBS | MR1_IST);
1513 		break;
1514 
1515 	case IFM_100_TX:
1516 		sc->sc_mr1 |= (MR1_MF_100MBS | MR1_IST);
1517 		break;
1518 
1519 	case IFM_10_T:
1520 		sc->sc_mr1 |= MR1_MF_10MBS;
1521 		break;
1522 
1523 	case IFM_NONE:
1524 		break;
1525 
1526 	default:
1527 		aprint_error_dev(sc->sc_dev, "unknown sub-type %d\n",
1528 		    IFM_SUBTYPE(mii->mii_media_active));
1529 		break;
1530 	}
1531 	if (sc->sc_rmii_speed)
1532 		sc->sc_rmii_speed(device_parent(sc->sc_dev), sc->sc_instance,
1533 		    IFM_SUBTYPE(mii->mii_media_active));
1534 
1535 	EMAC_WRITE(sc, EMAC_MR1, sc->sc_mr1);
1536 
1537 	/* Enable TX and RX if already RUNNING */
1538 	if (ifp->if_flags & IFF_RUNNING)
1539 		EMAC_WRITE(sc, EMAC_MR0, MR0_TXE | MR0_RXE);
1540 }
1541 
1542 static uint32_t
1543 emac_mii_wait(struct emac_softc *sc)
1544 {
1545 	int i;
1546 	uint32_t oc;
1547 
1548 	/* wait for PHY data transfer to complete */
1549 	i = 0;
1550 	oc = EMAC_READ(sc, EMAC_STACR) & STACR_OC;
1551 	while ((oc == STACR_OC) != sc->sc_stacr_completed) {
1552 		delay(7);
1553 		if (i++ > 5) {
1554 			aprint_error_dev(sc->sc_dev, "MII timed out\n");
1555 			return ETIMEDOUT;
1556 		}
1557 		oc = EMAC_READ(sc, EMAC_STACR) & STACR_OC;
1558 	}
1559 	return 0;
1560 }
1561 
1562 static void
1563 emac_mii_tick(void *arg)
1564 {
1565 	struct emac_softc *sc = arg;
1566 	int s;
1567 
1568 	if (!device_is_active(sc->sc_dev))
1569 		return;
1570 
1571 	s = splnet();
1572 	mii_tick(&sc->sc_mii);
1573 	splx(s);
1574 
1575 	callout_reset(&sc->sc_callout, hz, emac_mii_tick, sc);
1576 }
1577 
1578 int
1579 emac_txeob_intr(void *arg)
1580 {
1581 	struct emac_softc *sc = arg;
1582 	int handled = 0;
1583 
1584 	EMAC_EVCNT_INCR(&sc->sc_ev_txintr);
1585 	handled |= emac_txreap(sc);
1586 
1587 	/* try to get more packets going */
1588 	if_schedule_deferred_start(&sc->sc_ethercom.ec_if);
1589 
1590 	return handled;
1591 }
1592 
1593 int
1594 emac_rxeob_intr(void *arg)
1595 {
1596 	struct emac_softc *sc = arg;
1597 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1598 	struct emac_rxsoft *rxs;
1599 	struct mbuf *m;
1600 	uint32_t rxstat, count;
1601 	int i, len;
1602 
1603 	EMAC_EVCNT_INCR(&sc->sc_ev_rxintr);
1604 
1605 	count = 0;
1606 	for (i = sc->sc_rxptr; ; i = EMAC_NEXTRX(i)) {
1607 		rxs = &sc->sc_rxsoft[i];
1608 
1609 		EMAC_CDRXSYNC(sc, i,
1610 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1611 
1612 		rxstat = sc->sc_rxdescs[i].md_stat_ctrl;
1613 
1614 		if (rxstat & MAL_RX_EMPTY) {
1615 			/*
1616 			 * We have processed all of the receive buffers.
1617 			 */
1618 			/* Flush current empty descriptor */
1619 			EMAC_CDRXSYNC(sc, i,
1620 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1621 			break;
1622 		}
1623 
1624 		/*
1625 		 * If an error occurred, update stats, clear the status
1626 		 * word, and leave the packet buffer in place.  It will
1627 		 * simply be reused the next time the ring comes around.
1628 		 */
1629 		if (rxstat & (EMAC_RXS_OE | EMAC_RXS_BP | EMAC_RXS_SE |
1630 		    EMAC_RXS_AE | EMAC_RXS_BFCS | EMAC_RXS_PTL | EMAC_RXS_ORE |
1631 		    EMAC_RXS_IRE)) {
1632 #define	PRINTERR(bit, str)					\
1633 			if (rxstat & (bit))			\
1634 				aprint_error_ifnet(ifp,		\
1635 				    "receive error: %s\n", str)
1636 			if_statinc(ifp, if_ierrors);
1637 			PRINTERR(EMAC_RXS_OE, "overrun error");
1638 			PRINTERR(EMAC_RXS_BP, "bad packet");
1639 			PRINTERR(EMAC_RXS_RP, "runt packet");
1640 			PRINTERR(EMAC_RXS_SE, "short event");
1641 			PRINTERR(EMAC_RXS_AE, "alignment error");
1642 			PRINTERR(EMAC_RXS_BFCS, "bad FCS");
1643 			PRINTERR(EMAC_RXS_PTL, "packet too long");
1644 			PRINTERR(EMAC_RXS_ORE, "out of range error");
1645 			PRINTERR(EMAC_RXS_IRE, "in range error");
1646 #undef PRINTERR
1647 			EMAC_INIT_RXDESC(sc, i);
1648 			continue;
1649 		}
1650 
1651 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1652 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1653 
1654 		/*
1655 		 * No errors; receive the packet.  Note, the 405GP emac
1656 		 * includes the CRC with every packet.
1657 		 */
1658 		len = sc->sc_rxdescs[i].md_data_len - ETHER_CRC_LEN;
1659 
1660 		/*
1661 		 * If the packet is small enough to fit in a
1662 		 * single header mbuf, allocate one and copy
1663 		 * the data into it.  This greatly reduces
1664 		 * memory consumption when we receive lots
1665 		 * of small packets.
1666 		 *
1667 		 * Otherwise, we add a new buffer to the receive
1668 		 * chain.  If this fails, we drop the packet and
1669 		 * recycle the old buffer.
1670 		 */
1671 		if (emac_copy_small != 0 && len <= MHLEN) {
1672 			MGETHDR(m, M_DONTWAIT, MT_DATA);
1673 			if (m == NULL)
1674 				goto dropit;
1675 			memcpy(mtod(m, void *),
1676 			    mtod(rxs->rxs_mbuf, void *), len);
1677 			EMAC_INIT_RXDESC(sc, i);
1678 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1679 			    rxs->rxs_dmamap->dm_mapsize,
1680 			    BUS_DMASYNC_PREREAD);
1681 		} else {
1682 			m = rxs->rxs_mbuf;
1683 			if (emac_add_rxbuf(sc, i) != 0) {
1684  dropit:
1685 				if_statinc(ifp, if_ierrors);
1686 				EMAC_INIT_RXDESC(sc, i);
1687 				bus_dmamap_sync(sc->sc_dmat,
1688 				    rxs->rxs_dmamap, 0,
1689 				    rxs->rxs_dmamap->dm_mapsize,
1690 				    BUS_DMASYNC_PREREAD);
1691 				continue;
1692 			}
1693 		}
1694 
1695 		m_set_rcvif(m, ifp);
1696 		m->m_pkthdr.len = m->m_len = len;
1697 
1698 		/* Pass it on. */
1699 		if_percpuq_enqueue(ifp->if_percpuq, m);
1700 
1701 		count++;
1702 	}
1703 
1704 	/* Update the receive pointer. */
1705 	sc->sc_rxptr = i;
1706 
1707 	if (count != 0)
1708 		rnd_add_uint32(&sc->rnd_source, count);
1709 
1710 	return 1;
1711 }
1712 
1713 int
1714 emac_txde_intr(void *arg)
1715 {
1716 	struct emac_softc *sc = arg;
1717 
1718 	EMAC_EVCNT_INCR(&sc->sc_ev_txde);
1719 	aprint_error_dev(sc->sc_dev, "emac_txde_intr\n");
1720 	return 1;
1721 }
1722 
1723 int
1724 emac_rxde_intr(void *arg)
1725 {
1726 	struct emac_softc *sc = arg;
1727 	int i;
1728 
1729 	EMAC_EVCNT_INCR(&sc->sc_ev_rxde);
1730 	aprint_error_dev(sc->sc_dev, "emac_rxde_intr\n");
1731 	/*
1732 	 * XXX!
1733 	 * This is a bit drastic; we just drop all descriptors that aren't
1734 	 * "clean".  We should probably send any that are up the stack.
1735 	 */
1736 	for (i = 0; i < EMAC_NRXDESC; i++) {
1737 		EMAC_CDRXSYNC(sc, i,
1738 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1739 
1740 		if (sc->sc_rxdescs[i].md_data_len != MCLBYTES)
1741 			EMAC_INIT_RXDESC(sc, i);
1742 	}
1743 
1744 	return 1;
1745 }
1746