xref: /netbsd-src/sys/arch/sgimips/mace/if_mec.c (revision de4fa6c51a9708fc05f88b618fa6fad87c9508ec)
1 /* $NetBSD: if_mec.c,v 1.39 2009/09/02 17:22:53 tsutsui Exp $ */
2 
3 /*-
4  * Copyright (c) 2004, 2008 Izumi Tsutsui.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /*
28  * Copyright (c) 2003 Christopher SEKIYA
29  * All rights reserved.
30  *
31  * Redistribution and use in source and binary forms, with or without
32  * modification, are permitted provided that the following conditions
33  * are met:
34  * 1. Redistributions of source code must retain the above copyright
35  *    notice, this list of conditions and the following disclaimer.
36  * 2. Redistributions in binary form must reproduce the above copyright
37  *    notice, this list of conditions and the following disclaimer in the
38  *    documentation and/or other materials provided with the distribution.
39  * 3. All advertising materials mentioning features or use of this software
40  *    must display the following acknowledgement:
41  *          This product includes software developed for the
42  *          NetBSD Project.  See http://www.NetBSD.org/ for
43  *          information about NetBSD.
44  * 4. The name of the author may not be used to endorse or promote products
45  *    derived from this software without specific prior written permission.
46  *
47  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
48  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
49  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
50  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
51  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
52  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
56  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57  */
58 
59 /*
60  * MACE MAC-110 Ethernet driver
61  */
62 
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: if_mec.c,v 1.39 2009/09/02 17:22:53 tsutsui Exp $");
65 
66 #include "opt_ddb.h"
67 #include "bpfilter.h"
68 #include "rnd.h"
69 
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/device.h>
73 #include <sys/callout.h>
74 #include <sys/mbuf.h>
75 #include <sys/malloc.h>
76 #include <sys/kernel.h>
77 #include <sys/socket.h>
78 #include <sys/ioctl.h>
79 #include <sys/errno.h>
80 
81 #if NRND > 0
82 #include <sys/rnd.h>
83 #endif
84 
85 #include <net/if.h>
86 #include <net/if_dl.h>
87 #include <net/if_media.h>
88 #include <net/if_ether.h>
89 
90 #include <netinet/in.h>
91 #include <netinet/in_systm.h>
92 #include <netinet/ip.h>
93 #include <netinet/tcp.h>
94 #include <netinet/udp.h>
95 
96 #if NBPFILTER > 0
97 #include <net/bpf.h>
98 #endif
99 
100 #include <machine/bus.h>
101 #include <machine/intr.h>
102 #include <machine/machtype.h>
103 
104 #include <dev/mii/mii.h>
105 #include <dev/mii/miivar.h>
106 
107 #include <sgimips/mace/macevar.h>
108 #include <sgimips/mace/if_mecreg.h>
109 
110 #include <dev/arcbios/arcbios.h>
111 #include <dev/arcbios/arcbiosvar.h>
112 
113 /* #define MEC_DEBUG */
114 
115 #ifdef MEC_DEBUG
116 #define MEC_DEBUG_RESET		0x01
117 #define MEC_DEBUG_START		0x02
118 #define MEC_DEBUG_STOP		0x04
119 #define MEC_DEBUG_INTR		0x08
120 #define MEC_DEBUG_RXINTR	0x10
121 #define MEC_DEBUG_TXINTR	0x20
122 #define MEC_DEBUG_TXSEGS	0x40
123 uint32_t mec_debug = 0;
124 #define DPRINTF(x, y)	if (mec_debug & (x)) printf y
125 #else
126 #define DPRINTF(x, y)	/* nothing */
127 #endif
128 
129 /* #define MEC_EVENT_COUNTERS */
130 
131 #ifdef MEC_EVENT_COUNTERS
132 #define MEC_EVCNT_INCR(ev)	(ev)->ev_count++
133 #else
134 #define MEC_EVCNT_INCR(ev)	do {} while (/* CONSTCOND */ 0)
135 #endif
136 
137 /*
138  * Transmit descriptor list size
139  */
140 #define MEC_NTXDESC		64
141 #define MEC_NTXDESC_MASK	(MEC_NTXDESC - 1)
142 #define MEC_NEXTTX(x)		(((x) + 1) & MEC_NTXDESC_MASK)
143 #define MEC_NTXDESC_RSVD	4
144 #define MEC_NTXDESC_INTR	8
145 
146 /*
147  * software state for TX
148  */
149 struct mec_txsoft {
150 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
151 	bus_dmamap_t txs_dmamap;	/* our DMA map */
152 	uint32_t txs_flags;
153 #define MEC_TXS_BUFLEN_MASK	0x0000007f	/* data len in txd_buf */
154 #define MEC_TXS_TXDPTR		0x00000080	/* concat txd_ptr is used */
155 };
156 
157 /*
158  * Transmit buffer descriptor
159  */
160 #define MEC_TXDESCSIZE		128
161 #define MEC_NTXPTR		3
162 #define MEC_TXD_BUFOFFSET	sizeof(uint64_t)
163 #define MEC_TXD_BUFOFFSET1	\
164 	(sizeof(uint64_t) + sizeof(uint64_t) * MEC_NTXPTR)
165 #define MEC_TXD_BUFSIZE		(MEC_TXDESCSIZE - MEC_TXD_BUFOFFSET)
166 #define MEC_TXD_BUFSIZE1	(MEC_TXDESCSIZE - MEC_TXD_BUFOFFSET1)
167 #define MEC_TXD_BUFSTART(len)	(MEC_TXD_BUFSIZE - (len))
168 #define MEC_TXD_ALIGN		8
169 #define MEC_TXD_ALIGNMASK	(MEC_TXD_ALIGN - 1)
170 #define MEC_TXD_ROUNDUP(addr)	\
171 	(((addr) + MEC_TXD_ALIGNMASK) & ~(uint64_t)MEC_TXD_ALIGNMASK)
172 #define MEC_NTXSEG		16
173 
174 struct mec_txdesc {
175 	volatile uint64_t txd_cmd;
176 #define MEC_TXCMD_DATALEN	0x000000000000ffff	/* data length */
177 #define MEC_TXCMD_BUFSTART	0x00000000007f0000	/* start byte offset */
178 #define  TXCMD_BUFSTART(x)	((x) << 16)
179 #define MEC_TXCMD_TERMDMA	0x0000000000800000	/* stop DMA on abort */
180 #define MEC_TXCMD_TXINT		0x0000000001000000	/* INT after TX done */
181 #define MEC_TXCMD_PTR1		0x0000000002000000	/* valid 1st txd_ptr */
182 #define MEC_TXCMD_PTR2		0x0000000004000000	/* valid 2nd txd_ptr */
183 #define MEC_TXCMD_PTR3		0x0000000008000000	/* valid 3rd txd_ptr */
184 #define MEC_TXCMD_UNUSED	0xfffffffff0000000ULL	/* should be zero */
185 
186 #define txd_stat	txd_cmd
187 #define MEC_TXSTAT_LEN		0x000000000000ffff	/* TX length */
188 #define MEC_TXSTAT_COLCNT	0x00000000000f0000	/* collision count */
189 #define MEC_TXSTAT_COLCNT_SHIFT	16
190 #define MEC_TXSTAT_LATE_COL	0x0000000000100000	/* late collision */
191 #define MEC_TXSTAT_CRCERROR	0x0000000000200000	/* */
192 #define MEC_TXSTAT_DEFERRED	0x0000000000400000	/* */
193 #define MEC_TXSTAT_SUCCESS	0x0000000000800000	/* TX complete */
194 #define MEC_TXSTAT_TOOBIG	0x0000000001000000	/* */
195 #define MEC_TXSTAT_UNDERRUN	0x0000000002000000	/* */
196 #define MEC_TXSTAT_COLLISIONS	0x0000000004000000	/* */
197 #define MEC_TXSTAT_EXDEFERRAL	0x0000000008000000	/* */
198 #define MEC_TXSTAT_COLLIDED	0x0000000010000000	/* */
199 #define MEC_TXSTAT_UNUSED	0x7fffffffe0000000ULL	/* should be zero */
200 #define MEC_TXSTAT_SENT		0x8000000000000000ULL	/* packet sent */
201 
202 	union {
203 		uint64_t txptr[MEC_NTXPTR];
204 #define MEC_TXPTR_UNUSED2	0x0000000000000007	/* should be zero */
205 #define MEC_TXPTR_DMAADDR	0x00000000fffffff8	/* TX DMA address */
206 #define MEC_TXPTR_LEN		0x0000ffff00000000ULL	/* buffer length */
207 #define  TXPTR_LEN(x)		((uint64_t)(x) << 32)
208 #define MEC_TXPTR_UNUSED1	0xffff000000000000ULL	/* should be zero */
209 
210 		uint8_t txbuf[MEC_TXD_BUFSIZE];
211 	} txd_data;
212 #define txd_ptr		txd_data.txptr
213 #define txd_buf		txd_data.txbuf
214 };
215 
216 /*
217  * Receive buffer size
218  */
219 #define MEC_NRXDESC		16
220 #define MEC_NRXDESC_MASK	(MEC_NRXDESC - 1)
221 #define MEC_NEXTRX(x)		(((x) + 1) & MEC_NRXDESC_MASK)
222 
223 /*
224  * Receive buffer description
225  */
226 #define MEC_RXDESCSIZE		4096	/* umm, should be 4kbyte aligned */
227 #define MEC_RXD_NRXPAD		3
228 #define MEC_RXD_DMAOFFSET	(1 + MEC_RXD_NRXPAD)
229 #define MEC_RXD_BUFOFFSET	(MEC_RXD_DMAOFFSET * sizeof(uint64_t))
230 #define MEC_RXD_BUFSIZE		(MEC_RXDESCSIZE - MEC_RXD_BUFOFFSET)
231 
232 struct mec_rxdesc {
233 	volatile uint64_t rxd_stat;
234 #define MEC_RXSTAT_LEN		0x000000000000ffff	/* data length */
235 #define MEC_RXSTAT_VIOLATION	0x0000000000010000	/* code violation (?) */
236 #define MEC_RXSTAT_UNUSED2	0x0000000000020000	/* unknown (?) */
237 #define MEC_RXSTAT_CRCERROR	0x0000000000040000	/* CRC error */
238 #define MEC_RXSTAT_MULTICAST	0x0000000000080000	/* multicast packet */
239 #define MEC_RXSTAT_BROADCAST	0x0000000000100000	/* broadcast packet */
240 #define MEC_RXSTAT_INVALID	0x0000000000200000	/* invalid preamble */
241 #define MEC_RXSTAT_LONGEVENT	0x0000000000400000	/* long packet */
242 #define MEC_RXSTAT_BADPACKET	0x0000000000800000	/* bad packet */
243 #define MEC_RXSTAT_CAREVENT	0x0000000001000000	/* carrier event */
244 #define MEC_RXSTAT_MATCHMCAST	0x0000000002000000	/* match multicast */
245 #define MEC_RXSTAT_MATCHMAC	0x0000000004000000	/* match MAC */
246 #define MEC_RXSTAT_SEQNUM	0x00000000f8000000	/* sequence number */
247 #define MEC_RXSTAT_CKSUM	0x0000ffff00000000ULL	/* IP checksum */
248 #define  RXSTAT_CKSUM(x)	(((uint64_t)(x) & MEC_RXSTAT_CKSUM) >> 32)
249 #define MEC_RXSTAT_UNUSED1	0x7fff000000000000ULL	/* should be zero */
250 #define MEC_RXSTAT_RECEIVED	0x8000000000000000ULL	/* set to 1 on RX */
251 	uint64_t rxd_pad1[MEC_RXD_NRXPAD];
252 	uint8_t  rxd_buf[MEC_RXD_BUFSIZE];
253 };
254 
255 /*
256  * control structures for DMA ops
257  */
258 struct mec_control_data {
259 	/*
260 	 * TX descriptors and buffers
261 	 */
262 	struct mec_txdesc mcd_txdesc[MEC_NTXDESC];
263 
264 	/*
265 	 * RX descriptors and buffers
266 	 */
267 	struct mec_rxdesc mcd_rxdesc[MEC_NRXDESC];
268 };
269 
270 /*
271  * It _seems_ there are some restrictions on descriptor address:
272  *
273  * - Base address of txdescs should be 8kbyte aligned
274  * - Each txdesc should be 128byte aligned
275  * - Each rxdesc should be 4kbyte aligned
276  *
277  * So we should specify 8k align to allocalte txdescs.
278  * In this case, sizeof(struct mec_txdesc) * MEC_NTXDESC is 8192
279  * so rxdescs are also allocated at 4kbyte aligned.
280  */
281 #define MEC_CONTROL_DATA_ALIGN	(8 * 1024)
282 
283 #define MEC_CDOFF(x)	offsetof(struct mec_control_data, x)
284 #define MEC_CDTXOFF(x)	MEC_CDOFF(mcd_txdesc[(x)])
285 #define MEC_CDRXOFF(x)	MEC_CDOFF(mcd_rxdesc[(x)])
286 
287 /*
288  * software state per device
289  */
290 struct mec_softc {
291 	device_t sc_dev;		/* generic device structures */
292 
293 	bus_space_tag_t sc_st;		/* bus_space tag */
294 	bus_space_handle_t sc_sh;	/* bus_space handle */
295 	bus_dma_tag_t sc_dmat;		/* bus_dma tag */
296 
297 	struct ethercom sc_ethercom;	/* Ethernet common part */
298 
299 	struct mii_data sc_mii;		/* MII/media information */
300 	int sc_phyaddr;			/* MII address */
301 	struct callout sc_tick_ch;	/* tick callout */
302 
303 	uint8_t sc_enaddr[ETHER_ADDR_LEN]; /* MAC address */
304 
305 	bus_dmamap_t sc_cddmamap;	/* bus_dma map for control data */
306 #define sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
307 
308 	/* pointer to allocated control data */
309 	struct mec_control_data *sc_control_data;
310 #define sc_txdesc	sc_control_data->mcd_txdesc
311 #define sc_rxdesc	sc_control_data->mcd_rxdesc
312 
313 	/* software state for TX descs */
314 	struct mec_txsoft sc_txsoft[MEC_NTXDESC];
315 
316 	int sc_txpending;		/* number of TX requests pending */
317 	int sc_txdirty;			/* first dirty TX descriptor */
318 	int sc_txlast;			/* last used TX descriptor */
319 
320 	int sc_rxptr;			/* next ready RX buffer */
321 
322 #if NRND > 0
323 	rndsource_element_t sc_rnd_source; /* random source */
324 #endif
325 #ifdef MEC_EVENT_COUNTERS
326 	struct evcnt sc_ev_txpkts;	/* TX packets queued total */
327 	struct evcnt sc_ev_txdpad;	/* TX packets padded in txdesc buf */
328 	struct evcnt sc_ev_txdbuf;	/* TX packets copied to txdesc buf */
329 	struct evcnt sc_ev_txptr1;	/* TX packets using concat ptr1 */
330 	struct evcnt sc_ev_txptr1a;	/* TX packets  w/ptr1  ~160bytes */
331 	struct evcnt sc_ev_txptr1b;	/* TX packets  w/ptr1  ~256bytes */
332 	struct evcnt sc_ev_txptr1c;	/* TX packets  w/ptr1  ~512bytes */
333 	struct evcnt sc_ev_txptr1d;	/* TX packets  w/ptr1 ~1024bytes */
334 	struct evcnt sc_ev_txptr1e;	/* TX packets  w/ptr1 >1024bytes */
335 	struct evcnt sc_ev_txptr2;	/* TX packets using concat ptr1,2 */
336 	struct evcnt sc_ev_txptr2a;	/* TX packets  w/ptr2  ~160bytes */
337 	struct evcnt sc_ev_txptr2b;	/* TX packets  w/ptr2  ~256bytes */
338 	struct evcnt sc_ev_txptr2c;	/* TX packets  w/ptr2  ~512bytes */
339 	struct evcnt sc_ev_txptr2d;	/* TX packets  w/ptr2 ~1024bytes */
340 	struct evcnt sc_ev_txptr2e;	/* TX packets  w/ptr2 >1024bytes */
341 	struct evcnt sc_ev_txptr3;	/* TX packets using concat ptr1,2,3 */
342 	struct evcnt sc_ev_txptr3a;	/* TX packets  w/ptr3  ~160bytes */
343 	struct evcnt sc_ev_txptr3b;	/* TX packets  w/ptr3  ~256bytes */
344 	struct evcnt sc_ev_txptr3c;	/* TX packets  w/ptr3  ~512bytes */
345 	struct evcnt sc_ev_txptr3d;	/* TX packets  w/ptr3 ~1024bytes */
346 	struct evcnt sc_ev_txptr3e;	/* TX packets  w/ptr3 >1024bytes */
347 	struct evcnt sc_ev_txmbuf;	/* TX packets copied to new mbufs */
348 	struct evcnt sc_ev_txmbufa;	/* TX packets  w/mbuf  ~160bytes */
349 	struct evcnt sc_ev_txmbufb;	/* TX packets  w/mbuf  ~256bytes */
350 	struct evcnt sc_ev_txmbufc;	/* TX packets  w/mbuf  ~512bytes */
351 	struct evcnt sc_ev_txmbufd;	/* TX packets  w/mbuf ~1024bytes */
352 	struct evcnt sc_ev_txmbufe;	/* TX packets  w/mbuf >1024bytes */
353 	struct evcnt sc_ev_txptrs;	/* TX packets using ptrs total */
354 	struct evcnt sc_ev_txptrc0;	/* TX packets  w/ptrs no hdr chain */
355 	struct evcnt sc_ev_txptrc1;	/* TX packets  w/ptrs  1 hdr chain */
356 	struct evcnt sc_ev_txptrc2;	/* TX packets  w/ptrs  2 hdr chains */
357 	struct evcnt sc_ev_txptrc3;	/* TX packets  w/ptrs  3 hdr chains */
358 	struct evcnt sc_ev_txptrc4;	/* TX packets  w/ptrs  4 hdr chains */
359 	struct evcnt sc_ev_txptrc5;	/* TX packets  w/ptrs  5 hdr chains */
360 	struct evcnt sc_ev_txptrc6;	/* TX packets  w/ptrs >5 hdr chains */
361 	struct evcnt sc_ev_txptrh0;	/* TX packets  w/ptrs  ~8bytes hdr */
362 	struct evcnt sc_ev_txptrh1;	/* TX packets  w/ptrs ~16bytes hdr */
363 	struct evcnt sc_ev_txptrh2;	/* TX packets  w/ptrs ~32bytes hdr */
364 	struct evcnt sc_ev_txptrh3;	/* TX packets  w/ptrs ~64bytes hdr */
365 	struct evcnt sc_ev_txptrh4;	/* TX packets  w/ptrs ~80bytes hdr */
366 	struct evcnt sc_ev_txptrh5;	/* TX packets  w/ptrs ~96bytes hdr */
367 	struct evcnt sc_ev_txdstall;	/* TX stalled due to no txdesc */
368 	struct evcnt sc_ev_txempty;	/* TX empty interrupts */
369 	struct evcnt sc_ev_txsent;	/* TX sent interrupts */
370 #endif
371 };
372 
373 #define MEC_CDTXADDR(sc, x)	((sc)->sc_cddma + MEC_CDTXOFF(x))
374 #define MEC_CDRXADDR(sc, x)	((sc)->sc_cddma + MEC_CDRXOFF(x))
375 
376 #define MEC_TXDESCSYNC(sc, x, ops)					\
377 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
378 	    MEC_CDTXOFF(x), MEC_TXDESCSIZE, (ops))
379 #define MEC_TXCMDSYNC(sc, x, ops)					\
380 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
381 	    MEC_CDTXOFF(x), sizeof(uint64_t), (ops))
382 
383 #define MEC_RXSTATSYNC(sc, x, ops)					\
384 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
385 	    MEC_CDRXOFF(x), sizeof(uint64_t), (ops))
386 #define MEC_RXBUFSYNC(sc, x, len, ops)					\
387 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
388 	    MEC_CDRXOFF(x) + MEC_RXD_BUFOFFSET,				\
389 	    MEC_ETHER_ALIGN + (len), (ops))
390 
391 /* XXX these values should be moved to <net/if_ether.h> ? */
392 #define ETHER_PAD_LEN	(ETHER_MIN_LEN - ETHER_CRC_LEN)
393 #define MEC_ETHER_ALIGN	2
394 
395 static int	mec_match(device_t, cfdata_t, void *);
396 static void	mec_attach(device_t, device_t, void *);
397 
398 static int	mec_mii_readreg(device_t, int, int);
399 static void	mec_mii_writereg(device_t, int, int, int);
400 static int	mec_mii_wait(struct mec_softc *);
401 static void	mec_statchg(device_t);
402 
403 static void	enaddr_aton(const char *, uint8_t *);
404 
405 static int	mec_init(struct ifnet * ifp);
406 static void	mec_start(struct ifnet *);
407 static void	mec_watchdog(struct ifnet *);
408 static void	mec_tick(void *);
409 static int	mec_ioctl(struct ifnet *, u_long, void *);
410 static void	mec_reset(struct mec_softc *);
411 static void	mec_setfilter(struct mec_softc *);
412 static int	mec_intr(void *arg);
413 static void	mec_stop(struct ifnet *, int);
414 static void	mec_rxintr(struct mec_softc *);
415 static void	mec_rxcsum(struct mec_softc *, struct mbuf *, uint16_t,
416 		    uint32_t);
417 static void	mec_txintr(struct mec_softc *, uint32_t);
418 static bool	mec_shutdown(device_t, int);
419 
420 CFATTACH_DECL_NEW(mec, sizeof(struct mec_softc),
421     mec_match, mec_attach, NULL, NULL);
422 
423 static int mec_matched = 0;
424 
425 static int
426 mec_match(device_t parent, cfdata_t cf, void *aux)
427 {
428 
429 	/* allow only one device */
430 	if (mec_matched)
431 		return 0;
432 
433 	mec_matched = 1;
434 	return 1;
435 }
436 
437 static void
438 mec_attach(device_t parent, device_t self, void *aux)
439 {
440 	struct mec_softc *sc = device_private(self);
441 	struct mace_attach_args *maa = aux;
442 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
443 	uint64_t address, command;
444 	const char *macaddr;
445 	struct mii_softc *child;
446 	bus_dma_segment_t seg;
447 	int i, err, rseg;
448 	bool mac_is_fake;
449 
450 	sc->sc_dev = self;
451 	sc->sc_st = maa->maa_st;
452 	if (bus_space_subregion(sc->sc_st, maa->maa_sh,
453 	    maa->maa_offset, 0,	&sc->sc_sh) != 0) {
454 		aprint_error(": can't map i/o space\n");
455 		return;
456 	}
457 
458 	/* set up DMA structures */
459 	sc->sc_dmat = maa->maa_dmat;
460 
461 	/*
462 	 * Allocate the control data structures, and create and load the
463 	 * DMA map for it.
464 	 */
465 	if ((err = bus_dmamem_alloc(sc->sc_dmat,
466 	    sizeof(struct mec_control_data), MEC_CONTROL_DATA_ALIGN, 0,
467 	    &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
468 		aprint_error(": unable to allocate control data, error = %d\n",
469 		    err);
470 		goto fail_0;
471 	}
472 	/*
473 	 * XXX needs re-think...
474 	 * control data structures contain whole RX data buffer, so
475 	 * BUS_DMA_COHERENT (which disables cache) may cause some performance
476 	 * issue on copying data from the RX buffer to mbuf on normal memory,
477 	 * though we have to make sure all bus_dmamap_sync(9) ops are called
478 	 * properly in that case.
479 	 */
480 	if ((err = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
481 	    sizeof(struct mec_control_data),
482 	    (void **)&sc->sc_control_data, /*BUS_DMA_COHERENT*/ 0)) != 0) {
483 		aprint_error(": unable to map control data, error = %d\n", err);
484 		goto fail_1;
485 	}
486 	memset(sc->sc_control_data, 0, sizeof(struct mec_control_data));
487 
488 	if ((err = bus_dmamap_create(sc->sc_dmat,
489 	    sizeof(struct mec_control_data), 1,
490 	    sizeof(struct mec_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
491 		aprint_error(": unable to create control data DMA map,"
492 		    " error = %d\n", err);
493 		goto fail_2;
494 	}
495 	if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
496 	    sc->sc_control_data, sizeof(struct mec_control_data), NULL,
497 	    BUS_DMA_NOWAIT)) != 0) {
498 		aprint_error(": unable to load control data DMA map,"
499 		    " error = %d\n", err);
500 		goto fail_3;
501 	}
502 
503 	/* create TX buffer DMA maps */
504 	for (i = 0; i < MEC_NTXDESC; i++) {
505 		if ((err = bus_dmamap_create(sc->sc_dmat,
506 		    MCLBYTES, MEC_NTXSEG, MCLBYTES, PAGE_SIZE, 0,
507 		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
508 			aprint_error(": unable to create tx DMA map %d,"
509 			    " error = %d\n", i, err);
510 			goto fail_4;
511 		}
512 	}
513 
514 	callout_init(&sc->sc_tick_ch, 0);
515 
516 	/* get Ethernet address from ARCBIOS */
517 	if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) {
518 		aprint_error(": unable to get MAC address!\n");
519 		goto fail_4;
520 	}
521 	/*
522 	 * On some machines the DS2502 chip storing the serial number/
523 	 * mac address is on the pci riser board - if this board is
524 	 * missing, ARCBIOS will not know a good ethernet address (but
525 	 * otherwise the machine will work fine).
526 	 */
527 	mac_is_fake = false;
528 	if (strcmp(macaddr, "ff:ff:ff:ff:ff:ff") == 0) {
529 		uint32_t ui = 0;
530 		const char * netaddr =
531 			ARCBIOS->GetEnvironmentVariable("netaddr");
532 
533 		/*
534 		 * Create a MAC address by abusing the "netaddr" env var
535 		 */
536 		sc->sc_enaddr[0] = 0xf2;
537 		sc->sc_enaddr[1] = 0x0b;
538 		sc->sc_enaddr[2] = 0xa4;
539 		if (netaddr) {
540 			mac_is_fake = true;
541 			while (*netaddr) {
542 				int v = 0;
543 				while (*netaddr && *netaddr != '.') {
544 					if (*netaddr >= '0' && *netaddr <= '9')
545 						v = v*10 + (*netaddr - '0');
546 					netaddr++;
547 				}
548 				ui <<= 8;
549 				ui |= v;
550 				if (*netaddr == '.')
551 					netaddr++;
552 			}
553 		}
554 		memcpy(sc->sc_enaddr+3, ((uint8_t *)&ui)+1, 3);
555 	}
556 	if (!mac_is_fake)
557 		enaddr_aton(macaddr, sc->sc_enaddr);
558 
559 	/* set the Ethernet address */
560 	address = 0;
561 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
562 		address = address << 8;
563 		address |= sc->sc_enaddr[i];
564 	}
565 	bus_space_write_8(sc->sc_st, sc->sc_sh, MEC_STATION, address);
566 
567 	/* reset device */
568 	mec_reset(sc);
569 
570 	command = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_MAC_CONTROL);
571 
572 	aprint_normal(": MAC-110 Ethernet, rev %u\n",
573 	    (u_int)((command & MEC_MAC_REVISION) >> MEC_MAC_REVISION_SHIFT));
574 
575 	if (mac_is_fake)
576 		aprint_normal_dev(self,
577 		    "could not get ethernet address from firmware"
578 		    " - generated one from the \"netaddr\" environment"
579 		    " variable\n");
580 	aprint_normal_dev(self, "Ethernet address %s\n",
581 	    ether_sprintf(sc->sc_enaddr));
582 
583 	/* Done, now attach everything */
584 
585 	sc->sc_mii.mii_ifp = ifp;
586 	sc->sc_mii.mii_readreg = mec_mii_readreg;
587 	sc->sc_mii.mii_writereg = mec_mii_writereg;
588 	sc->sc_mii.mii_statchg = mec_statchg;
589 
590 	/* Set up PHY properties */
591 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
592 	ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
593 	    ether_mediastatus);
594 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
595 	    MII_OFFSET_ANY, 0);
596 
597 	child = LIST_FIRST(&sc->sc_mii.mii_phys);
598 	if (child == NULL) {
599 		/* No PHY attached */
600 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
601 		    0, NULL);
602 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
603 	} else {
604 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
605 		sc->sc_phyaddr = child->mii_phy;
606 	}
607 
608 	strcpy(ifp->if_xname, device_xname(self));
609 	ifp->if_softc = sc;
610 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
611 	ifp->if_ioctl = mec_ioctl;
612 	ifp->if_start = mec_start;
613 	ifp->if_watchdog = mec_watchdog;
614 	ifp->if_init = mec_init;
615 	ifp->if_stop = mec_stop;
616 	ifp->if_mtu = ETHERMTU;
617 	IFQ_SET_READY(&ifp->if_snd);
618 
619 	/* mec has dumb RX cksum support */
620 	ifp->if_capabilities = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx;
621 
622 	/* We can support 802.1Q VLAN-sized frames. */
623 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
624 
625 	/* attach the interface */
626 	if_attach(ifp);
627 	ether_ifattach(ifp, sc->sc_enaddr);
628 
629 	/* establish interrupt */
630 	cpu_intr_establish(maa->maa_intr, maa->maa_intrmask, mec_intr, sc);
631 
632 #if NRND > 0
633 	rnd_attach_source(&sc->sc_rnd_source, device_xname(self),
634 	    RND_TYPE_NET, 0);
635 #endif
636 
637 #ifdef MEC_EVENT_COUNTERS
638 	evcnt_attach_dynamic(&sc->sc_ev_txpkts , EVCNT_TYPE_MISC,
639 	    NULL, device_xname(self), "TX pkts queued total");
640 	evcnt_attach_dynamic(&sc->sc_ev_txdpad , EVCNT_TYPE_MISC,
641 	    NULL, device_xname(self), "TX pkts padded in txdesc buf");
642 	evcnt_attach_dynamic(&sc->sc_ev_txdbuf , EVCNT_TYPE_MISC,
643 	    NULL, device_xname(self), "TX pkts copied to txdesc buf");
644 	evcnt_attach_dynamic(&sc->sc_ev_txptr1 , EVCNT_TYPE_MISC,
645 	    NULL, device_xname(self), "TX pkts using concat ptr1");
646 	evcnt_attach_dynamic(&sc->sc_ev_txptr1a , EVCNT_TYPE_MISC,
647 	    NULL, device_xname(self), "TX pkts  w/ptr1  ~160bytes");
648 	evcnt_attach_dynamic(&sc->sc_ev_txptr1b , EVCNT_TYPE_MISC,
649 	    NULL, device_xname(self), "TX pkts  w/ptr1  ~256bytes");
650 	evcnt_attach_dynamic(&sc->sc_ev_txptr1c , EVCNT_TYPE_MISC,
651 	    NULL, device_xname(self), "TX pkts  w/ptr1  ~512bytes");
652 	evcnt_attach_dynamic(&sc->sc_ev_txptr1d , EVCNT_TYPE_MISC,
653 	    NULL, device_xname(self), "TX pkts  w/ptr1 ~1024bytes");
654 	evcnt_attach_dynamic(&sc->sc_ev_txptr1e , EVCNT_TYPE_MISC,
655 	    NULL, device_xname(self), "TX pkts  w/ptr1 >1024bytes");
656 	evcnt_attach_dynamic(&sc->sc_ev_txptr2 , EVCNT_TYPE_MISC,
657 	    NULL, device_xname(self), "TX pkts using concat ptr1,2");
658 	evcnt_attach_dynamic(&sc->sc_ev_txptr2a , EVCNT_TYPE_MISC,
659 	    NULL, device_xname(self), "TX pkts  w/ptr2  ~160bytes");
660 	evcnt_attach_dynamic(&sc->sc_ev_txptr2b , EVCNT_TYPE_MISC,
661 	    NULL, device_xname(self), "TX pkts  w/ptr2  ~256bytes");
662 	evcnt_attach_dynamic(&sc->sc_ev_txptr2c , EVCNT_TYPE_MISC,
663 	    NULL, device_xname(self), "TX pkts  w/ptr2  ~512bytes");
664 	evcnt_attach_dynamic(&sc->sc_ev_txptr2d , EVCNT_TYPE_MISC,
665 	    NULL, device_xname(self), "TX pkts  w/ptr2 ~1024bytes");
666 	evcnt_attach_dynamic(&sc->sc_ev_txptr2e , EVCNT_TYPE_MISC,
667 	    NULL, device_xname(self), "TX pkts  w/ptr2 >1024bytes");
668 	evcnt_attach_dynamic(&sc->sc_ev_txptr3 , EVCNT_TYPE_MISC,
669 	    NULL, device_xname(self), "TX pkts using concat ptr1,2,3");
670 	evcnt_attach_dynamic(&sc->sc_ev_txptr3a , EVCNT_TYPE_MISC,
671 	    NULL, device_xname(self), "TX pkts  w/ptr3  ~160bytes");
672 	evcnt_attach_dynamic(&sc->sc_ev_txptr3b , EVCNT_TYPE_MISC,
673 	    NULL, device_xname(self), "TX pkts  w/ptr3  ~256bytes");
674 	evcnt_attach_dynamic(&sc->sc_ev_txptr3c , EVCNT_TYPE_MISC,
675 	    NULL, device_xname(self), "TX pkts  w/ptr3  ~512bytes");
676 	evcnt_attach_dynamic(&sc->sc_ev_txptr3d , EVCNT_TYPE_MISC,
677 	    NULL, device_xname(self), "TX pkts  w/ptr3 ~1024bytes");
678 	evcnt_attach_dynamic(&sc->sc_ev_txptr3e , EVCNT_TYPE_MISC,
679 	    NULL, device_xname(self), "TX pkts  w/ptr3 >1024bytes");
680 	evcnt_attach_dynamic(&sc->sc_ev_txmbuf , EVCNT_TYPE_MISC,
681 	    NULL, device_xname(self), "TX pkts copied to new mbufs");
682 	evcnt_attach_dynamic(&sc->sc_ev_txmbufa , EVCNT_TYPE_MISC,
683 	    NULL, device_xname(self), "TX pkts  w/mbuf  ~160bytes");
684 	evcnt_attach_dynamic(&sc->sc_ev_txmbufb , EVCNT_TYPE_MISC,
685 	    NULL, device_xname(self), "TX pkts  w/mbuf  ~256bytes");
686 	evcnt_attach_dynamic(&sc->sc_ev_txmbufc , EVCNT_TYPE_MISC,
687 	    NULL, device_xname(self), "TX pkts  w/mbuf  ~512bytes");
688 	evcnt_attach_dynamic(&sc->sc_ev_txmbufd , EVCNT_TYPE_MISC,
689 	    NULL, device_xname(self), "TX pkts  w/mbuf ~1024bytes");
690 	evcnt_attach_dynamic(&sc->sc_ev_txmbufe , EVCNT_TYPE_MISC,
691 	    NULL, device_xname(self), "TX pkts  w/mbuf >1024bytes");
692 	evcnt_attach_dynamic(&sc->sc_ev_txptrs , EVCNT_TYPE_MISC,
693 	    NULL, device_xname(self), "TX pkts using ptrs total");
694 	evcnt_attach_dynamic(&sc->sc_ev_txptrc0 , EVCNT_TYPE_MISC,
695 	    NULL, device_xname(self), "TX pkts  w/ptrs no hdr chain");
696 	evcnt_attach_dynamic(&sc->sc_ev_txptrc1 , EVCNT_TYPE_MISC,
697 	    NULL, device_xname(self), "TX pkts  w/ptrs  1 hdr chain");
698 	evcnt_attach_dynamic(&sc->sc_ev_txptrc2 , EVCNT_TYPE_MISC,
699 	    NULL, device_xname(self), "TX pkts  w/ptrs  2 hdr chains");
700 	evcnt_attach_dynamic(&sc->sc_ev_txptrc3 , EVCNT_TYPE_MISC,
701 	    NULL, device_xname(self), "TX pkts  w/ptrs  3 hdr chains");
702 	evcnt_attach_dynamic(&sc->sc_ev_txptrc4 , EVCNT_TYPE_MISC,
703 	    NULL, device_xname(self), "TX pkts  w/ptrs  4 hdr chains");
704 	evcnt_attach_dynamic(&sc->sc_ev_txptrc5 , EVCNT_TYPE_MISC,
705 	    NULL, device_xname(self), "TX pkts  w/ptrs  5 hdr chains");
706 	evcnt_attach_dynamic(&sc->sc_ev_txptrc6 , EVCNT_TYPE_MISC,
707 	    NULL, device_xname(self), "TX pkts  w/ptrs >5 hdr chains");
708 	evcnt_attach_dynamic(&sc->sc_ev_txptrh0 , EVCNT_TYPE_MISC,
709 	    NULL, device_xname(self), "TX pkts  w/ptrs  ~8bytes hdr");
710 	evcnt_attach_dynamic(&sc->sc_ev_txptrh1 , EVCNT_TYPE_MISC,
711 	    NULL, device_xname(self), "TX pkts  w/ptrs ~16bytes hdr");
712 	evcnt_attach_dynamic(&sc->sc_ev_txptrh2 , EVCNT_TYPE_MISC,
713 	    NULL, device_xname(self), "TX pkts  w/ptrs ~32bytes hdr");
714 	evcnt_attach_dynamic(&sc->sc_ev_txptrh3 , EVCNT_TYPE_MISC,
715 	    NULL, device_xname(self), "TX pkts  w/ptrs ~64bytes hdr");
716 	evcnt_attach_dynamic(&sc->sc_ev_txptrh4 , EVCNT_TYPE_MISC,
717 	    NULL, device_xname(self), "TX pkts  w/ptrs ~80bytes hdr");
718 	evcnt_attach_dynamic(&sc->sc_ev_txptrh5 , EVCNT_TYPE_MISC,
719 	    NULL, device_xname(self), "TX pkts  w/ptrs ~96bytes hdr");
720 	evcnt_attach_dynamic(&sc->sc_ev_txdstall , EVCNT_TYPE_MISC,
721 	    NULL, device_xname(self), "TX stalled due to no txdesc");
722 	evcnt_attach_dynamic(&sc->sc_ev_txempty , EVCNT_TYPE_MISC,
723 	    NULL, device_xname(self), "TX empty interrupts");
724 	evcnt_attach_dynamic(&sc->sc_ev_txsent , EVCNT_TYPE_MISC,
725 	    NULL, device_xname(self), "TX sent interrupts");
726 #endif
727 
728 	/* set shutdown hook to reset interface on powerdown */
729 	if (pmf_device_register1(self, NULL, NULL, mec_shutdown))
730 		pmf_class_network_register(self, ifp);
731 	else
732 		aprint_error_dev(self, "couldn't establish power handler\n");
733 
734 	return;
735 
736 	/*
737 	 * Free any resources we've allocated during the failed attach
738 	 * attempt.  Do this in reverse order and fall though.
739 	 */
740  fail_4:
741 	for (i = 0; i < MEC_NTXDESC; i++) {
742 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
743 			bus_dmamap_destroy(sc->sc_dmat,
744 			    sc->sc_txsoft[i].txs_dmamap);
745 	}
746 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
747  fail_3:
748 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
749  fail_2:
750 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
751 	    sizeof(struct mec_control_data));
752  fail_1:
753 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
754  fail_0:
755 	return;
756 }
757 
758 static int
759 mec_mii_readreg(device_t self, int phy, int reg)
760 {
761 	struct mec_softc *sc = device_private(self);
762 	bus_space_tag_t st = sc->sc_st;
763 	bus_space_handle_t sh = sc->sc_sh;
764 	uint64_t val;
765 	int i;
766 
767 	if (mec_mii_wait(sc) != 0)
768 		return 0;
769 
770 	bus_space_write_8(st, sh, MEC_PHY_ADDRESS,
771 	    (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER));
772 	delay(25);
773 	bus_space_write_8(st, sh, MEC_PHY_READ_INITIATE, 1);
774 	delay(25);
775 	mec_mii_wait(sc);
776 
777 	for (i = 0; i < 20; i++) {
778 		delay(30);
779 
780 		val = bus_space_read_8(st, sh, MEC_PHY_DATA);
781 
782 		if ((val & MEC_PHY_DATA_BUSY) == 0)
783 			return val & MEC_PHY_DATA_VALUE;
784 	}
785 	return 0;
786 }
787 
788 static void
789 mec_mii_writereg(device_t self, int phy, int reg, int val)
790 {
791 	struct mec_softc *sc = device_private(self);
792 	bus_space_tag_t st = sc->sc_st;
793 	bus_space_handle_t sh = sc->sc_sh;
794 
795 	if (mec_mii_wait(sc) != 0) {
796 		printf("timed out writing %x: %x\n", reg, val);
797 		return;
798 	}
799 
800 	bus_space_write_8(st, sh, MEC_PHY_ADDRESS,
801 	    (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER));
802 
803 	delay(60);
804 
805 	bus_space_write_8(st, sh, MEC_PHY_DATA, val & MEC_PHY_DATA_VALUE);
806 
807 	delay(60);
808 
809 	mec_mii_wait(sc);
810 }
811 
812 static int
813 mec_mii_wait(struct mec_softc *sc)
814 {
815 	uint32_t busy;
816 	int i, s;
817 
818 	for (i = 0; i < 100; i++) {
819 		delay(30);
820 
821 		s = splhigh();
822 		busy = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_PHY_DATA);
823 		splx(s);
824 
825 		if ((busy & MEC_PHY_DATA_BUSY) == 0)
826 			return 0;
827 #if 0
828 		if (busy == 0xffff) /* XXX ? */
829 			return 0;
830 #endif
831 	}
832 
833 	printf("%s: MII timed out\n", device_xname(sc->sc_dev));
834 	return 1;
835 }
836 
837 static void
838 mec_statchg(device_t self)
839 {
840 	struct mec_softc *sc = device_private(self);
841 	bus_space_tag_t st = sc->sc_st;
842 	bus_space_handle_t sh = sc->sc_sh;
843 	uint32_t control;
844 
845 	control = bus_space_read_8(st, sh, MEC_MAC_CONTROL);
846 	control &= ~(MEC_MAC_IPGT | MEC_MAC_IPGR1 | MEC_MAC_IPGR2 |
847 	    MEC_MAC_FULL_DUPLEX | MEC_MAC_SPEED_SELECT);
848 
849 	/* must also set IPG here for duplex stuff ... */
850 	if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0) {
851 		control |= MEC_MAC_FULL_DUPLEX;
852 	} else {
853 		/* set IPG */
854 		control |= MEC_MAC_IPG_DEFAULT;
855 	}
856 
857 	bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
858 }
859 
860 /*
861  * XXX
862  * maybe this function should be moved to common part
863  * (sgimips/machdep.c or elsewhere) for all on-board network devices.
864  */
865 static void
866 enaddr_aton(const char *str, uint8_t *eaddr)
867 {
868 	int i;
869 	char c;
870 
871 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
872 		if (*str == ':')
873 			str++;
874 
875 		c = *str++;
876 		if (isdigit(c)) {
877 			eaddr[i] = (c - '0');
878 		} else if (isxdigit(c)) {
879 			eaddr[i] = (toupper(c) + 10 - 'A');
880 		}
881 		c = *str++;
882 		if (isdigit(c)) {
883 			eaddr[i] = (eaddr[i] << 4) | (c - '0');
884 		} else if (isxdigit(c)) {
885 			eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A');
886 		}
887 	}
888 }
889 
890 static int
891 mec_init(struct ifnet *ifp)
892 {
893 	struct mec_softc *sc = ifp->if_softc;
894 	bus_space_tag_t st = sc->sc_st;
895 	bus_space_handle_t sh = sc->sc_sh;
896 	struct mec_rxdesc *rxd;
897 	int i, rc;
898 
899 	/* cancel any pending I/O */
900 	mec_stop(ifp, 0);
901 
902 	/* reset device */
903 	mec_reset(sc);
904 
905 	/* setup filter for multicast or promisc mode */
906 	mec_setfilter(sc);
907 
908 	/* set the TX ring pointer to the base address */
909 	bus_space_write_8(st, sh, MEC_TX_RING_BASE, MEC_CDTXADDR(sc, 0));
910 
911 	sc->sc_txpending = 0;
912 	sc->sc_txdirty = 0;
913 	sc->sc_txlast = MEC_NTXDESC - 1;
914 
915 	/* put RX buffers into FIFO */
916 	for (i = 0; i < MEC_NRXDESC; i++) {
917 		rxd = &sc->sc_rxdesc[i];
918 		rxd->rxd_stat = 0;
919 		MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
920 		MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD);
921 		bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i));
922 	}
923 	sc->sc_rxptr = 0;
924 
925 #if 0	/* XXX no info */
926 	bus_space_write_8(st, sh, MEC_TIMER, 0);
927 #endif
928 
929 	/*
930 	 * MEC_DMA_TX_INT_ENABLE will be set later otherwise it causes
931 	 * spurious interrupts when TX buffers are empty
932 	 */
933 	bus_space_write_8(st, sh, MEC_DMA_CONTROL,
934 	    (MEC_RXD_DMAOFFSET << MEC_DMA_RX_DMA_OFFSET_SHIFT) |
935 	    (MEC_NRXDESC << MEC_DMA_RX_INT_THRESH_SHIFT) |
936 	    MEC_DMA_TX_DMA_ENABLE | /* MEC_DMA_TX_INT_ENABLE | */
937 	    MEC_DMA_RX_DMA_ENABLE | MEC_DMA_RX_INT_ENABLE);
938 
939 	callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc);
940 
941 	if ((rc = ether_mediachange(ifp)) != 0)
942 		return rc;
943 
944 	ifp->if_flags |= IFF_RUNNING;
945 	ifp->if_flags &= ~IFF_OACTIVE;
946 	mec_start(ifp);
947 
948 	return 0;
949 }
950 
951 static void
952 mec_reset(struct mec_softc *sc)
953 {
954 	bus_space_tag_t st = sc->sc_st;
955 	bus_space_handle_t sh = sc->sc_sh;
956 	uint64_t control;
957 
958 	/* stop DMA first */
959 	bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0);
960 
961 	/* reset chip */
962 	bus_space_write_8(st, sh, MEC_MAC_CONTROL, MEC_MAC_CORE_RESET);
963 	delay(1000);
964 	bus_space_write_8(st, sh, MEC_MAC_CONTROL, 0);
965 	delay(1000);
966 
967 	/* Default to 100/half and let auto-negotiation work its magic */
968 	control = MEC_MAC_SPEED_SELECT | MEC_MAC_FILTER_MATCHMULTI |
969 	    MEC_MAC_IPG_DEFAULT;
970 
971 	bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
972 	/* stop DMA again for sanity */
973 	bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0);
974 
975 	DPRINTF(MEC_DEBUG_RESET, ("mec: control now %llx\n",
976 	    bus_space_read_8(st, sh, MEC_MAC_CONTROL)));
977 }
978 
979 static void
980 mec_start(struct ifnet *ifp)
981 {
982 	struct mec_softc *sc = ifp->if_softc;
983 	struct mbuf *m0, *m;
984 	struct mec_txdesc *txd;
985 	struct mec_txsoft *txs;
986 	bus_dmamap_t dmamap;
987 	bus_space_tag_t st = sc->sc_st;
988 	bus_space_handle_t sh = sc->sc_sh;
989 	int error, firsttx, nexttx, opending;
990 	int len, bufoff, buflen, nsegs, align, resid, pseg, nptr, slen, i;
991 	uint32_t txdcmd;
992 
993 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
994 		return;
995 
996 	/*
997 	 * Remember the previous txpending and the first transmit descriptor.
998 	 */
999 	opending = sc->sc_txpending;
1000 	firsttx = MEC_NEXTTX(sc->sc_txlast);
1001 
1002 	DPRINTF(MEC_DEBUG_START,
1003 	    ("%s: opending = %d, firsttx = %d\n", __func__, opending, firsttx));
1004 
1005 	while (sc->sc_txpending < MEC_NTXDESC - 1) {
1006 		/* Grab a packet off the queue. */
1007 		IFQ_POLL(&ifp->if_snd, m0);
1008 		if (m0 == NULL)
1009 			break;
1010 		m = NULL;
1011 
1012 		/*
1013 		 * Get the next available transmit descriptor.
1014 		 */
1015 		nexttx = MEC_NEXTTX(sc->sc_txlast);
1016 		txd = &sc->sc_txdesc[nexttx];
1017 		txs = &sc->sc_txsoft[nexttx];
1018 		dmamap = txs->txs_dmamap;
1019 		txs->txs_flags = 0;
1020 
1021 		buflen = 0;
1022 		bufoff = 0;
1023 		resid = 0;
1024 		nptr = 0;	/* XXX gcc */
1025 		pseg = 0;	/* XXX gcc */
1026 
1027 		len = m0->m_pkthdr.len;
1028 
1029 		DPRINTF(MEC_DEBUG_START,
1030 		    ("%s: len = %d, nexttx = %d, txpending = %d\n",
1031 		    __func__, len, nexttx, sc->sc_txpending));
1032 
1033 		if (len <= MEC_TXD_BUFSIZE) {
1034 			/*
1035 			 * If a TX packet will fit into small txdesc buffer,
1036 			 * just copy it into there. Maybe it's faster than
1037 			 * checking alignment and calling bus_dma(9) etc.
1038 			 */
1039 			DPRINTF(MEC_DEBUG_START, ("%s: short packet\n",
1040 			    __func__));
1041 			IFQ_DEQUEUE(&ifp->if_snd, m0);
1042 
1043 			/*
1044 			 * I don't know if MEC chip does auto padding,
1045 			 * but do it manually for safety.
1046 			 */
1047 			if (len < ETHER_PAD_LEN) {
1048 				MEC_EVCNT_INCR(&sc->sc_ev_txdpad);
1049 				bufoff = MEC_TXD_BUFSTART(ETHER_PAD_LEN);
1050 				m_copydata(m0, 0, len, txd->txd_buf + bufoff);
1051 				memset(txd->txd_buf + bufoff + len, 0,
1052 				    ETHER_PAD_LEN - len);
1053 				len = buflen = ETHER_PAD_LEN;
1054 			} else {
1055 				MEC_EVCNT_INCR(&sc->sc_ev_txdbuf);
1056 				bufoff = MEC_TXD_BUFSTART(len);
1057 				m_copydata(m0, 0, len, txd->txd_buf + bufoff);
1058 				buflen = len;
1059 			}
1060 		} else {
1061 			/*
1062 			 * If the packet won't fit the static buffer in txdesc,
1063 			 * we have to use the concatenate pointers to handle it.
1064 			 */
1065 			DPRINTF(MEC_DEBUG_START, ("%s: long packet\n",
1066 			    __func__));
1067 			txs->txs_flags = MEC_TXS_TXDPTR;
1068 
1069 			/*
1070 			 * Call bus_dmamap_load_mbuf(9) first to see
1071 			 * how many chains the TX mbuf has.
1072 			 */
1073 			error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1074 			    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1075 			if (error == 0) {
1076 				/*
1077 				 * Check chains which might contain headers.
1078 				 * They might be so much fragmented and
1079 				 * it's better to copy them into txdesc buffer
1080 				 * since they would be small enough.
1081 				 */
1082 				nsegs = dmamap->dm_nsegs;
1083 				for (pseg = 0; pseg < nsegs; pseg++) {
1084 					slen = dmamap->dm_segs[pseg].ds_len;
1085 					if (buflen + slen >
1086 					    MEC_TXD_BUFSIZE1 - MEC_TXD_ALIGN)
1087 						break;
1088 					buflen += slen;
1089 				}
1090 				/*
1091 				 * Check if the rest chains can be fit into
1092 				 * the concatinate pointers.
1093 				 */
1094 				align = dmamap->dm_segs[pseg].ds_addr &
1095 				    MEC_TXD_ALIGNMASK;
1096 				if (align > 0) {
1097 					/*
1098 					 * If the first chain isn't uint64_t
1099 					 * aligned, append the unaligned part
1100 					 * into txdesc buffer too.
1101 					 */
1102 					resid = MEC_TXD_ALIGN - align;
1103 					buflen += resid;
1104 					for (; pseg < nsegs; pseg++) {
1105 						slen =
1106 						  dmamap->dm_segs[pseg].ds_len;
1107 						if (slen > resid)
1108 							break;
1109 						resid -= slen;
1110 					}
1111 				} else if (pseg == 0) {
1112 					/*
1113 					 * In this case, the first chain is
1114 					 * uint64_t aligned but it's too long
1115 					 * to put into txdesc buf.
1116 					 * We have to put some data into
1117 					 * txdesc buf even in this case,
1118 					 * so put MEC_TXD_ALIGN bytes there.
1119 					 */
1120 					buflen = resid = MEC_TXD_ALIGN;
1121 				}
1122 				nptr = nsegs - pseg;
1123 				if (nptr <= MEC_NTXPTR) {
1124 					bufoff = MEC_TXD_BUFSTART(buflen);
1125 
1126 					/*
1127 					 * Check if all the rest chains are
1128 					 * uint64_t aligned.
1129 					 */
1130 					align = 0;
1131 					for (i = pseg + 1; i < nsegs; i++)
1132 						align |=
1133 						    dmamap->dm_segs[i].ds_addr
1134 						    & MEC_TXD_ALIGNMASK;
1135 					if (align != 0) {
1136 						/* chains are not aligned */
1137 						error = -1;
1138 					}
1139 				} else {
1140 					/* The TX mbuf chains doesn't fit. */
1141 					error = -1;
1142 				}
1143 				if (error == -1)
1144 					bus_dmamap_unload(sc->sc_dmat, dmamap);
1145 			}
1146 			if (error != 0) {
1147 				/*
1148 				 * The TX mbuf chains can't be put into
1149 				 * the concatinate buffers. In this case,
1150 				 * we have to allocate a new contiguous mbuf
1151 				 * and copy data into it.
1152 				 *
1153 				 * Even in this case, the Ethernet header in
1154 				 * the TX mbuf might be unaligned and trailing
1155 				 * data might be word aligned, so put 2 byte
1156 				 * (MEC_ETHER_ALIGN) padding at the top of the
1157 				 * allocated mbuf and copy TX packets.
1158 				 * 6 bytes (MEC_ALIGN_BYTES - MEC_ETHER_ALIGN)
1159 				 * at the top of the new mbuf won't be uint64_t
1160 				 * alignd, but we have to put some data into
1161 				 * txdesc buffer anyway even if the buffer
1162 				 * is uint64_t aligned.
1163 				 */
1164 				DPRINTF(MEC_DEBUG_START|MEC_DEBUG_TXSEGS,
1165 				    ("%s: re-allocating mbuf\n", __func__));
1166 
1167 				MGETHDR(m, M_DONTWAIT, MT_DATA);
1168 				if (m == NULL) {
1169 					printf("%s: unable to allocate "
1170 					    "TX mbuf\n",
1171 					    device_xname(sc->sc_dev));
1172 					break;
1173 				}
1174 				if (len > (MHLEN - MEC_ETHER_ALIGN)) {
1175 					MCLGET(m, M_DONTWAIT);
1176 					if ((m->m_flags & M_EXT) == 0) {
1177 						printf("%s: unable to allocate "
1178 						    "TX cluster\n",
1179 						    device_xname(sc->sc_dev));
1180 						m_freem(m);
1181 						break;
1182 					}
1183 				}
1184 				m->m_data += MEC_ETHER_ALIGN;
1185 
1186 				/*
1187 				 * Copy whole data (including unaligned part)
1188 				 * for following bpf_mtap().
1189 				 */
1190 				m_copydata(m0, 0, len, mtod(m, void *));
1191 				m->m_pkthdr.len = m->m_len = len;
1192 				error = bus_dmamap_load_mbuf(sc->sc_dmat,
1193 				    dmamap, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1194 				if (dmamap->dm_nsegs > 1) {
1195 					/* should not happen, but for sanity */
1196 					bus_dmamap_unload(sc->sc_dmat, dmamap);
1197 					error = -1;
1198 				}
1199 				if (error != 0) {
1200 					printf("%s: unable to load TX buffer, "
1201 					    "error = %d\n",
1202 					    device_xname(sc->sc_dev), error);
1203 					m_freem(m);
1204 					break;
1205 				}
1206 				/*
1207 				 * Only the first segment should be put into
1208 				 * the concatinate pointer in this case.
1209 				 */
1210 				pseg = 0;
1211 				nptr = 1;
1212 
1213 				/*
1214 				 * Set lenght of unaligned part which will be
1215 				 * copied into txdesc buffer.
1216 				 */
1217 				buflen = MEC_TXD_ALIGN - MEC_ETHER_ALIGN;
1218 				bufoff = MEC_TXD_BUFSTART(buflen);
1219 				resid = buflen;
1220 #ifdef MEC_EVENT_COUNTERS
1221 				MEC_EVCNT_INCR(&sc->sc_ev_txmbuf);
1222 				if (len <= 160)
1223 					MEC_EVCNT_INCR(&sc->sc_ev_txmbufa);
1224 				else if (len <= 256)
1225 					MEC_EVCNT_INCR(&sc->sc_ev_txmbufb);
1226 				else if (len <= 512)
1227 					MEC_EVCNT_INCR(&sc->sc_ev_txmbufc);
1228 				else if (len <= 1024)
1229 					MEC_EVCNT_INCR(&sc->sc_ev_txmbufd);
1230 				else
1231 					MEC_EVCNT_INCR(&sc->sc_ev_txmbufe);
1232 #endif
1233 			}
1234 #ifdef MEC_EVENT_COUNTERS
1235 			else {
1236 				MEC_EVCNT_INCR(&sc->sc_ev_txptrs);
1237 				if (nptr == 1) {
1238 					MEC_EVCNT_INCR(&sc->sc_ev_txptr1);
1239 					if (len <= 160)
1240 						MEC_EVCNT_INCR(
1241 						    &sc->sc_ev_txptr1a);
1242 					else if (len <= 256)
1243 						MEC_EVCNT_INCR(
1244 						    &sc->sc_ev_txptr1b);
1245 					else if (len <= 512)
1246 						MEC_EVCNT_INCR(
1247 						    &sc->sc_ev_txptr1c);
1248 					else if (len <= 1024)
1249 						MEC_EVCNT_INCR(
1250 						    &sc->sc_ev_txptr1d);
1251 					else
1252 						MEC_EVCNT_INCR(
1253 						    &sc->sc_ev_txptr1e);
1254 				} else if (nptr == 2) {
1255 					MEC_EVCNT_INCR(&sc->sc_ev_txptr2);
1256 					if (len <= 160)
1257 						MEC_EVCNT_INCR(
1258 						    &sc->sc_ev_txptr2a);
1259 					else if (len <= 256)
1260 						MEC_EVCNT_INCR(
1261 						    &sc->sc_ev_txptr2b);
1262 					else if (len <= 512)
1263 						MEC_EVCNT_INCR(
1264 						    &sc->sc_ev_txptr2c);
1265 					else if (len <= 1024)
1266 						MEC_EVCNT_INCR(
1267 						    &sc->sc_ev_txptr2d);
1268 					else
1269 						MEC_EVCNT_INCR(
1270 						    &sc->sc_ev_txptr2e);
1271 				} else if (nptr == 3) {
1272 					MEC_EVCNT_INCR(&sc->sc_ev_txptr3);
1273 					if (len <= 160)
1274 						MEC_EVCNT_INCR(
1275 						    &sc->sc_ev_txptr3a);
1276 					else if (len <= 256)
1277 						MEC_EVCNT_INCR(
1278 						    &sc->sc_ev_txptr3b);
1279 					else if (len <= 512)
1280 						MEC_EVCNT_INCR(
1281 						    &sc->sc_ev_txptr3c);
1282 					else if (len <= 1024)
1283 						MEC_EVCNT_INCR(
1284 						    &sc->sc_ev_txptr3d);
1285 					else
1286 						MEC_EVCNT_INCR(
1287 						    &sc->sc_ev_txptr3e);
1288 				}
1289 				if (pseg == 0)
1290 					MEC_EVCNT_INCR(&sc->sc_ev_txptrc0);
1291 				else if (pseg == 1)
1292 					MEC_EVCNT_INCR(&sc->sc_ev_txptrc1);
1293 				else if (pseg == 2)
1294 					MEC_EVCNT_INCR(&sc->sc_ev_txptrc2);
1295 				else if (pseg == 3)
1296 					MEC_EVCNT_INCR(&sc->sc_ev_txptrc3);
1297 				else if (pseg == 4)
1298 					MEC_EVCNT_INCR(&sc->sc_ev_txptrc4);
1299 				else if (pseg == 5)
1300 					MEC_EVCNT_INCR(&sc->sc_ev_txptrc5);
1301 				else
1302 					MEC_EVCNT_INCR(&sc->sc_ev_txptrc6);
1303 				if (buflen <= 8)
1304 					MEC_EVCNT_INCR(&sc->sc_ev_txptrh0);
1305 				else if (buflen <= 16)
1306 					MEC_EVCNT_INCR(&sc->sc_ev_txptrh1);
1307 				else if (buflen <= 32)
1308 					MEC_EVCNT_INCR(&sc->sc_ev_txptrh2);
1309 				else if (buflen <= 64)
1310 					MEC_EVCNT_INCR(&sc->sc_ev_txptrh3);
1311 				else if (buflen <= 80)
1312 					MEC_EVCNT_INCR(&sc->sc_ev_txptrh4);
1313 				else
1314 					MEC_EVCNT_INCR(&sc->sc_ev_txptrh5);
1315 			}
1316 #endif
1317 			m_copydata(m0, 0, buflen, txd->txd_buf + bufoff);
1318 
1319 			IFQ_DEQUEUE(&ifp->if_snd, m0);
1320 			if (m != NULL) {
1321 				m_freem(m0);
1322 				m0 = m;
1323 			}
1324 
1325 			/*
1326 			 * sync the DMA map for TX mbuf
1327 			 */
1328 			bus_dmamap_sync(sc->sc_dmat, dmamap, buflen,
1329 			    len - buflen, BUS_DMASYNC_PREWRITE);
1330 		}
1331 
1332 #if NBPFILTER > 0
1333 		/*
1334 		 * Pass packet to bpf if there is a listener.
1335 		 */
1336 		if (ifp->if_bpf)
1337 			bpf_mtap(ifp->if_bpf, m0);
1338 #endif
1339 		MEC_EVCNT_INCR(&sc->sc_ev_txpkts);
1340 
1341 		/*
1342 		 * setup the transmit descriptor.
1343 		 */
1344 		txdcmd = TXCMD_BUFSTART(MEC_TXDESCSIZE - buflen) | (len - 1);
1345 
1346 		/*
1347 		 * Set MEC_TXCMD_TXINT every MEC_NTXDESC_INTR packets
1348 		 * if more than half txdescs have been queued
1349 		 * because TX_EMPTY interrupts will rarely happen
1350 		 * if TX queue is so stacked.
1351 		 */
1352 		if (sc->sc_txpending > (MEC_NTXDESC / 2) &&
1353 		    (nexttx & (MEC_NTXDESC_INTR - 1)) == 0)
1354 			txdcmd |= MEC_TXCMD_TXINT;
1355 
1356 		if ((txs->txs_flags & MEC_TXS_TXDPTR) != 0) {
1357 			bus_dma_segment_t *segs = dmamap->dm_segs;
1358 
1359 			DPRINTF(MEC_DEBUG_TXSEGS,
1360 			    ("%s: nsegs = %d, pseg = %d, nptr = %d\n",
1361 			    __func__, dmamap->dm_nsegs, pseg, nptr));
1362 
1363 			switch (nptr) {
1364 			case 3:
1365 				KASSERT((segs[pseg + 2].ds_addr &
1366 				    MEC_TXD_ALIGNMASK) == 0);
1367 				txdcmd |= MEC_TXCMD_PTR3;
1368 				txd->txd_ptr[2] =
1369 				    TXPTR_LEN(segs[pseg + 2].ds_len - 1) |
1370 				    segs[pseg + 2].ds_addr;
1371 				/* FALLTHROUGH */
1372 			case 2:
1373 				KASSERT((segs[pseg + 1].ds_addr &
1374 				    MEC_TXD_ALIGNMASK) == 0);
1375 				txdcmd |= MEC_TXCMD_PTR2;
1376 				txd->txd_ptr[1] =
1377 				    TXPTR_LEN(segs[pseg + 1].ds_len - 1) |
1378 				    segs[pseg + 1].ds_addr;
1379 				/* FALLTHROUGH */
1380 			case 1:
1381 				txdcmd |= MEC_TXCMD_PTR1;
1382 				txd->txd_ptr[0] =
1383 				    TXPTR_LEN(segs[pseg].ds_len - resid - 1) |
1384 				    (segs[pseg].ds_addr + resid);
1385 				break;
1386 			default:
1387 				panic("%s: impossible nptr in %s",
1388 				    device_xname(sc->sc_dev), __func__);
1389 				/* NOTREACHED */
1390 			}
1391 			/*
1392 			 * Store a pointer to the packet so we can
1393 			 * free it later.
1394 			 */
1395 			txs->txs_mbuf = m0;
1396 		} else {
1397 			/*
1398 			 * In this case all data are copied to buffer in txdesc,
1399 			 * we can free TX mbuf here.
1400 			 */
1401 			m_freem(m0);
1402 		}
1403 		txd->txd_cmd = txdcmd;
1404 
1405 		DPRINTF(MEC_DEBUG_START,
1406 		    ("%s: txd_cmd    = 0x%016llx\n",
1407 		    __func__, txd->txd_cmd));
1408 		DPRINTF(MEC_DEBUG_START,
1409 		    ("%s: txd_ptr[0] = 0x%016llx\n",
1410 		    __func__, txd->txd_ptr[0]));
1411 		DPRINTF(MEC_DEBUG_START,
1412 		    ("%s: txd_ptr[1] = 0x%016llx\n",
1413 		    __func__, txd->txd_ptr[1]));
1414 		DPRINTF(MEC_DEBUG_START,
1415 		    ("%s: txd_ptr[2] = 0x%016llx\n",
1416 		    __func__, txd->txd_ptr[2]));
1417 		DPRINTF(MEC_DEBUG_START,
1418 		    ("%s: len = %d (0x%04x), buflen = %d (0x%02x)\n",
1419 		    __func__, len, len, buflen, buflen));
1420 
1421 		/* sync TX descriptor */
1422 		MEC_TXDESCSYNC(sc, nexttx,
1423 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1424 
1425 		/* start TX */
1426 		bus_space_write_8(st, sh, MEC_TX_RING_PTR, MEC_NEXTTX(nexttx));
1427 
1428 		/* advance the TX pointer. */
1429 		sc->sc_txpending++;
1430 		sc->sc_txlast = nexttx;
1431 	}
1432 
1433 	if (sc->sc_txpending == MEC_NTXDESC - 1) {
1434 		/* No more slots; notify upper layer. */
1435 		MEC_EVCNT_INCR(&sc->sc_ev_txdstall);
1436 		ifp->if_flags |= IFF_OACTIVE;
1437 	}
1438 
1439 	if (sc->sc_txpending != opending) {
1440 		/*
1441 		 * If the transmitter was idle,
1442 		 * reset the txdirty pointer and re-enable TX interrupt.
1443 		 */
1444 		if (opending == 0) {
1445 			sc->sc_txdirty = firsttx;
1446 			bus_space_write_8(st, sh, MEC_TX_ALIAS,
1447 			    MEC_TX_ALIAS_INT_ENABLE);
1448 		}
1449 
1450 		/* Set a watchdog timer in case the chip flakes out. */
1451 		ifp->if_timer = 5;
1452 	}
1453 }
1454 
1455 static void
1456 mec_stop(struct ifnet *ifp, int disable)
1457 {
1458 	struct mec_softc *sc = ifp->if_softc;
1459 	struct mec_txsoft *txs;
1460 	int i;
1461 
1462 	DPRINTF(MEC_DEBUG_STOP, ("%s\n", __func__));
1463 
1464 	ifp->if_timer = 0;
1465 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1466 
1467 	callout_stop(&sc->sc_tick_ch);
1468 	mii_down(&sc->sc_mii);
1469 
1470 	/* release any TX buffers */
1471 	for (i = 0; i < MEC_NTXDESC; i++) {
1472 		txs = &sc->sc_txsoft[i];
1473 		if ((txs->txs_flags & MEC_TXS_TXDPTR) != 0) {
1474 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1475 			m_freem(txs->txs_mbuf);
1476 			txs->txs_mbuf = NULL;
1477 		}
1478 	}
1479 }
1480 
1481 static int
1482 mec_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1483 {
1484 	int s, error;
1485 
1486 	s = splnet();
1487 
1488 	error = ether_ioctl(ifp, cmd, data);
1489 	if (error == ENETRESET) {
1490 		/*
1491 		 * Multicast list has changed; set the hardware filter
1492 		 * accordingly.
1493 		 */
1494 		if (ifp->if_flags & IFF_RUNNING)
1495 			error = mec_init(ifp);
1496 		else
1497 			error = 0;
1498 	}
1499 
1500 	/* Try to get more packets going. */
1501 	mec_start(ifp);
1502 
1503 	splx(s);
1504 	return error;
1505 }
1506 
1507 static void
1508 mec_watchdog(struct ifnet *ifp)
1509 {
1510 	struct mec_softc *sc = ifp->if_softc;
1511 
1512 	printf("%s: device timeout\n", device_xname(sc->sc_dev));
1513 	ifp->if_oerrors++;
1514 
1515 	mec_init(ifp);
1516 }
1517 
1518 static void
1519 mec_tick(void *arg)
1520 {
1521 	struct mec_softc *sc = arg;
1522 	int s;
1523 
1524 	s = splnet();
1525 	mii_tick(&sc->sc_mii);
1526 	splx(s);
1527 
1528 	callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc);
1529 }
1530 
1531 static void
1532 mec_setfilter(struct mec_softc *sc)
1533 {
1534 	struct ethercom *ec = &sc->sc_ethercom;
1535 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1536 	struct ether_multi *enm;
1537 	struct ether_multistep step;
1538 	bus_space_tag_t st = sc->sc_st;
1539 	bus_space_handle_t sh = sc->sc_sh;
1540 	uint64_t mchash;
1541 	uint32_t control, hash;
1542 	int mcnt;
1543 
1544 	control = bus_space_read_8(st, sh, MEC_MAC_CONTROL);
1545 	control &= ~MEC_MAC_FILTER_MASK;
1546 
1547 	if (ifp->if_flags & IFF_PROMISC) {
1548 		control |= MEC_MAC_FILTER_PROMISC;
1549 		bus_space_write_8(st, sh, MEC_MULTICAST, 0xffffffffffffffffULL);
1550 		bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1551 		return;
1552 	}
1553 
1554 	mcnt = 0;
1555 	mchash = 0;
1556 	ETHER_FIRST_MULTI(step, ec, enm);
1557 	while (enm != NULL) {
1558 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1559 			/* set allmulti for a range of multicast addresses */
1560 			control |= MEC_MAC_FILTER_ALLMULTI;
1561 			bus_space_write_8(st, sh, MEC_MULTICAST,
1562 			    0xffffffffffffffffULL);
1563 			bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1564 			return;
1565 		}
1566 
1567 #define mec_calchash(addr)	(ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
1568 
1569 		hash = mec_calchash(enm->enm_addrlo);
1570 		mchash |= 1 << hash;
1571 		mcnt++;
1572 		ETHER_NEXT_MULTI(step, enm);
1573 	}
1574 
1575 	ifp->if_flags &= ~IFF_ALLMULTI;
1576 
1577 	if (mcnt > 0)
1578 		control |= MEC_MAC_FILTER_MATCHMULTI;
1579 
1580 	bus_space_write_8(st, sh, MEC_MULTICAST, mchash);
1581 	bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1582 }
1583 
1584 static int
1585 mec_intr(void *arg)
1586 {
1587 	struct mec_softc *sc = arg;
1588 	bus_space_tag_t st = sc->sc_st;
1589 	bus_space_handle_t sh = sc->sc_sh;
1590 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1591 	uint32_t statreg, statack, txptr;
1592 	int handled, sent;
1593 
1594 	DPRINTF(MEC_DEBUG_INTR, ("%s: called\n", __func__));
1595 
1596 	handled = sent = 0;
1597 
1598 	for (;;) {
1599 		statreg = bus_space_read_8(st, sh, MEC_INT_STATUS);
1600 
1601 		DPRINTF(MEC_DEBUG_INTR,
1602 		    ("%s: INT_STAT = 0x%08x\n", __func__, statreg));
1603 
1604 		statack = statreg & MEC_INT_STATUS_MASK;
1605 		if (statack == 0)
1606 			break;
1607 		bus_space_write_8(st, sh, MEC_INT_STATUS, statack);
1608 
1609 		handled = 1;
1610 
1611 		if (statack &
1612 		    (MEC_INT_RX_THRESHOLD |
1613 		     MEC_INT_RX_FIFO_UNDERFLOW)) {
1614 			mec_rxintr(sc);
1615 		}
1616 
1617 		if (statack &
1618 		    (MEC_INT_TX_EMPTY |
1619 		     MEC_INT_TX_PACKET_SENT |
1620 		     MEC_INT_TX_ABORT)) {
1621 			txptr = (statreg & MEC_INT_TX_RING_BUFFER_ALIAS)
1622 			    >> MEC_INT_TX_RING_BUFFER_SHIFT;
1623 			mec_txintr(sc, txptr);
1624 			sent = 1;
1625 			if ((statack & MEC_INT_TX_EMPTY) != 0) {
1626 				/*
1627 				 * disable TX interrupt to stop
1628 				 * TX empty interrupt
1629 				 */
1630 				bus_space_write_8(st, sh, MEC_TX_ALIAS, 0);
1631 				DPRINTF(MEC_DEBUG_INTR,
1632 				    ("%s: disable TX_INT\n", __func__));
1633 			}
1634 #ifdef MEC_EVENT_COUNTERS
1635 			if ((statack & MEC_INT_TX_EMPTY) != 0)
1636 				MEC_EVCNT_INCR(&sc->sc_ev_txempty);
1637 			if ((statack & MEC_INT_TX_PACKET_SENT) != 0)
1638 				MEC_EVCNT_INCR(&sc->sc_ev_txsent);
1639 #endif
1640 		}
1641 
1642 		if (statack &
1643 		    (MEC_INT_TX_LINK_FAIL |
1644 		     MEC_INT_TX_MEM_ERROR |
1645 		     MEC_INT_TX_ABORT |
1646 		     MEC_INT_RX_FIFO_UNDERFLOW |
1647 		     MEC_INT_RX_DMA_UNDERFLOW)) {
1648 			printf("%s: %s: interrupt status = 0x%08x\n",
1649 			    device_xname(sc->sc_dev), __func__, statreg);
1650 			mec_init(ifp);
1651 			break;
1652 		}
1653 	}
1654 
1655 	if (sent && !IFQ_IS_EMPTY(&ifp->if_snd)) {
1656 		/* try to get more packets going */
1657 		mec_start(ifp);
1658 	}
1659 
1660 #if NRND > 0
1661 	if (handled)
1662 		rnd_add_uint32(&sc->sc_rnd_source, statreg);
1663 #endif
1664 
1665 	return handled;
1666 }
1667 
1668 static void
1669 mec_rxintr(struct mec_softc *sc)
1670 {
1671 	bus_space_tag_t st = sc->sc_st;
1672 	bus_space_handle_t sh = sc->sc_sh;
1673 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1674 	struct mbuf *m;
1675 	struct mec_rxdesc *rxd;
1676 	uint64_t rxstat;
1677 	u_int len;
1678 	int i;
1679 	uint32_t crc;
1680 
1681 	DPRINTF(MEC_DEBUG_RXINTR, ("%s: called\n", __func__));
1682 
1683 	for (i = sc->sc_rxptr;; i = MEC_NEXTRX(i)) {
1684 		rxd = &sc->sc_rxdesc[i];
1685 
1686 		MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_POSTREAD);
1687 		rxstat = rxd->rxd_stat;
1688 
1689 		DPRINTF(MEC_DEBUG_RXINTR,
1690 		    ("%s: rxstat = 0x%016llx, rxptr = %d\n",
1691 		    __func__, rxstat, i));
1692 		DPRINTF(MEC_DEBUG_RXINTR, ("%s: rxfifo = 0x%08x\n",
1693 		    __func__, (u_int)bus_space_read_8(st, sh, MEC_RX_FIFO)));
1694 
1695 		if ((rxstat & MEC_RXSTAT_RECEIVED) == 0) {
1696 			MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1697 			break;
1698 		}
1699 
1700 		len = rxstat & MEC_RXSTAT_LEN;
1701 
1702 		if (len < ETHER_MIN_LEN ||
1703 		    len > (MCLBYTES - MEC_ETHER_ALIGN)) {
1704 			/* invalid length packet; drop it. */
1705 			DPRINTF(MEC_DEBUG_RXINTR,
1706 			    ("%s: wrong packet\n", __func__));
1707  dropit:
1708 			ifp->if_ierrors++;
1709 			rxd->rxd_stat = 0;
1710 			MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1711 			bus_space_write_8(st, sh, MEC_MCL_RX_FIFO,
1712 			    MEC_CDRXADDR(sc, i));
1713 			continue;
1714 		}
1715 
1716 		/*
1717 		 * If 802.1Q VLAN MTU is enabled, ignore the bad packet errror.
1718 		 */
1719 		if ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) != 0)
1720 			rxstat &= ~MEC_RXSTAT_BADPACKET;
1721 
1722 		if (rxstat &
1723 		    (MEC_RXSTAT_BADPACKET |
1724 		     MEC_RXSTAT_LONGEVENT |
1725 		     MEC_RXSTAT_INVALID   |
1726 		     MEC_RXSTAT_CRCERROR  |
1727 		     MEC_RXSTAT_VIOLATION)) {
1728 			printf("%s: %s: status = 0x%016llx\n",
1729 			    device_xname(sc->sc_dev), __func__, rxstat);
1730 			goto dropit;
1731 		}
1732 
1733 		/*
1734 		 * The MEC includes the CRC with every packet.  Trim
1735 		 * it off here.
1736 		 */
1737 		len -= ETHER_CRC_LEN;
1738 
1739 		/*
1740 		 * now allocate an mbuf (and possibly a cluster) to hold
1741 		 * the received packet.
1742 		 */
1743 		MGETHDR(m, M_DONTWAIT, MT_DATA);
1744 		if (m == NULL) {
1745 			printf("%s: unable to allocate RX mbuf\n",
1746 			    device_xname(sc->sc_dev));
1747 			goto dropit;
1748 		}
1749 		if (len > (MHLEN - MEC_ETHER_ALIGN)) {
1750 			MCLGET(m, M_DONTWAIT);
1751 			if ((m->m_flags & M_EXT) == 0) {
1752 				printf("%s: unable to allocate RX cluster\n",
1753 				    device_xname(sc->sc_dev));
1754 				m_freem(m);
1755 				m = NULL;
1756 				goto dropit;
1757 			}
1758 		}
1759 
1760 		/*
1761 		 * Note MEC chip seems to insert 2 byte padding at the top of
1762 		 * RX buffer, but we copy whole buffer to avoid unaligned copy.
1763 		 */
1764 		MEC_RXBUFSYNC(sc, i, len + ETHER_CRC_LEN, BUS_DMASYNC_POSTREAD);
1765 		memcpy(mtod(m, void *), rxd->rxd_buf, MEC_ETHER_ALIGN + len);
1766 		crc = be32dec(rxd->rxd_buf + MEC_ETHER_ALIGN + len);
1767 		MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD);
1768 		m->m_data += MEC_ETHER_ALIGN;
1769 
1770 		/* put RX buffer into FIFO again */
1771 		rxd->rxd_stat = 0;
1772 		MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1773 		bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i));
1774 
1775 		m->m_pkthdr.rcvif = ifp;
1776 		m->m_pkthdr.len = m->m_len = len;
1777 		if ((ifp->if_csum_flags_rx & (M_CSUM_TCPv4|M_CSUM_UDPv4)) != 0)
1778 			mec_rxcsum(sc, m, RXSTAT_CKSUM(rxstat), crc);
1779 
1780 		ifp->if_ipackets++;
1781 
1782 #if NBPFILTER > 0
1783 		/*
1784 		 * Pass this up to any BPF listeners, but only
1785 		 * pass it up the stack if it's for us.
1786 		 */
1787 		if (ifp->if_bpf)
1788 			bpf_mtap(ifp->if_bpf, m);
1789 #endif
1790 
1791 		/* Pass it on. */
1792 		(*ifp->if_input)(ifp, m);
1793 	}
1794 
1795 	/* update RX pointer */
1796 	sc->sc_rxptr = i;
1797 }
1798 
1799 static void
1800 mec_rxcsum(struct mec_softc *sc, struct mbuf *m, uint16_t rxcsum, uint32_t crc)
1801 {
1802 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1803 	struct ether_header *eh;
1804 	struct ip *ip;
1805 	struct udphdr *uh;
1806 	u_int len, pktlen, hlen;
1807 	uint32_t csum_data, dsum;
1808 	int csum_flags;
1809 	const uint16_t *dp;
1810 
1811 	csum_data = 0;
1812 	csum_flags = 0;
1813 
1814 	len = m->m_len;
1815 	if (len < ETHER_HDR_LEN + sizeof(struct ip))
1816 		goto out;
1817 	pktlen = len - ETHER_HDR_LEN;
1818 	eh = mtod(m, struct ether_header *);
1819 	if (ntohs(eh->ether_type) != ETHERTYPE_IP)
1820 		goto out;
1821 	ip = (struct ip *)((uint8_t *)eh + ETHER_HDR_LEN);
1822 	if (ip->ip_v != IPVERSION)
1823 		goto out;
1824 
1825 	hlen = ip->ip_hl << 2;
1826 	if (hlen < sizeof(struct ip))
1827 		goto out;
1828 
1829 	/*
1830 	 * Bail if too short, has random trailing garbage, truncated,
1831 	 * fragment, or has ethernet pad.
1832 	 */
1833 	if (ntohs(ip->ip_len) < hlen ||
1834 	    ntohs(ip->ip_len) != pktlen ||
1835 	    (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK)) != 0)
1836 		goto out;
1837 
1838 	switch (ip->ip_p) {
1839 	case IPPROTO_TCP:
1840 		if ((ifp->if_csum_flags_rx & M_CSUM_TCPv4) == 0 ||
1841 		    pktlen < (hlen + sizeof(struct tcphdr)))
1842 			goto out;
1843 		csum_flags = M_CSUM_TCPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR;
1844 		break;
1845 	case IPPROTO_UDP:
1846 		if ((ifp->if_csum_flags_rx & M_CSUM_UDPv4) == 0 ||
1847 		    pktlen < (hlen + sizeof(struct udphdr)))
1848 			goto out;
1849 		uh = (struct udphdr *)((uint8_t *)ip + hlen);
1850 		if (uh->uh_sum == 0)
1851 			goto out;	/* no checksum */
1852 		csum_flags = M_CSUM_UDPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR;
1853 		break;
1854 	default:
1855 		goto out;
1856 	}
1857 
1858 	/*
1859 	 * The computed checksum includes Ethernet header, IP headers,
1860 	 * and CRC, so we have to deduct them.
1861 	 * Note IP header cksum should be 0xffff so we don't have to
1862 	 * dedecut them.
1863 	 */
1864 	dsum = 0;
1865 
1866 	/* deduct Ethernet header */
1867 	dp = (const uint16_t *)eh;
1868 	for (hlen = 0; hlen < (ETHER_HDR_LEN / sizeof(uint16_t)); hlen++)
1869 		dsum += ntohs(*dp++);
1870 
1871 	/* deduct CRC */
1872 	if (len & 1) {
1873 		dsum += (crc >> 24) & 0x00ff;
1874 		dsum += (crc >>  8) & 0xffff;
1875 		dsum += (crc <<  8) & 0xff00;
1876 	} else {
1877 		dsum += (crc >> 16) & 0xffff;
1878 		dsum += (crc >>  0) & 0xffff;
1879 	}
1880 	while (dsum >> 16)
1881 		dsum = (dsum >> 16) + (dsum & 0xffff);
1882 
1883 	csum_data = rxcsum;
1884 	csum_data += (uint16_t)~dsum;
1885 
1886 	while (csum_data >> 16)
1887 		csum_data = (csum_data >> 16) + (csum_data & 0xffff);
1888 
1889  out:
1890 	m->m_pkthdr.csum_flags = csum_flags;
1891 	m->m_pkthdr.csum_data = csum_data;
1892 }
1893 
1894 static void
1895 mec_txintr(struct mec_softc *sc, uint32_t txptr)
1896 {
1897 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1898 	struct mec_txdesc *txd;
1899 	struct mec_txsoft *txs;
1900 	bus_dmamap_t dmamap;
1901 	uint64_t txstat;
1902 	int i;
1903 	u_int col;
1904 
1905 	DPRINTF(MEC_DEBUG_TXINTR, ("%s: called\n", __func__));
1906 
1907 	for (i = sc->sc_txdirty; i != txptr && sc->sc_txpending != 0;
1908 	    i = MEC_NEXTTX(i), sc->sc_txpending--) {
1909 		txd = &sc->sc_txdesc[i];
1910 
1911 		MEC_TXCMDSYNC(sc, i,
1912 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1913 
1914 		txstat = txd->txd_stat;
1915 		DPRINTF(MEC_DEBUG_TXINTR,
1916 		    ("%s: dirty = %d, txstat = 0x%016llx\n",
1917 		    __func__, i, txstat));
1918 		if ((txstat & MEC_TXSTAT_SENT) == 0) {
1919 			MEC_TXCMDSYNC(sc, i, BUS_DMASYNC_PREREAD);
1920 			break;
1921 		}
1922 
1923 		txs = &sc->sc_txsoft[i];
1924 		if ((txs->txs_flags & MEC_TXS_TXDPTR) != 0) {
1925 			dmamap = txs->txs_dmamap;
1926 			bus_dmamap_sync(sc->sc_dmat, dmamap, 0,
1927 			    dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1928 			bus_dmamap_unload(sc->sc_dmat, dmamap);
1929 			m_freem(txs->txs_mbuf);
1930 			txs->txs_mbuf = NULL;
1931 		}
1932 
1933 		col = (txstat & MEC_TXSTAT_COLCNT) >> MEC_TXSTAT_COLCNT_SHIFT;
1934 		ifp->if_collisions += col;
1935 
1936 		if ((txstat & MEC_TXSTAT_SUCCESS) == 0) {
1937 			printf("%s: TX error: txstat = 0x%016llx\n",
1938 			    device_xname(sc->sc_dev), txstat);
1939 			ifp->if_oerrors++;
1940 		} else
1941 			ifp->if_opackets++;
1942 	}
1943 
1944 	/* update the dirty TX buffer pointer */
1945 	sc->sc_txdirty = i;
1946 	DPRINTF(MEC_DEBUG_INTR,
1947 	    ("%s: sc_txdirty = %2d, sc_txpending = %2d\n",
1948 	    __func__, sc->sc_txdirty, sc->sc_txpending));
1949 
1950 	/* cancel the watchdog timer if there are no pending TX packets */
1951 	if (sc->sc_txpending == 0)
1952 		ifp->if_timer = 0;
1953 	if (sc->sc_txpending < MEC_NTXDESC - MEC_NTXDESC_RSVD)
1954 		ifp->if_flags &= ~IFF_OACTIVE;
1955 }
1956 
1957 static bool
1958 mec_shutdown(device_t self, int howto)
1959 {
1960 	struct mec_softc *sc = device_private(self);
1961 
1962 	mec_stop(&sc->sc_ethercom.ec_if, 1);
1963 	/* make sure to stop DMA etc. */
1964 	mec_reset(sc);
1965 
1966 	return true;
1967 }
1968