xref: /netbsd-src/sys/arch/sgimips/mace/if_mec.c (revision e5fbc36ada28f9b9a5836ecffaf4a06aa1ebb687)
1 /* $NetBSD: if_mec.c,v 1.65 2023/12/20 15:29:07 thorpej Exp $ */
2 
3 /*-
4  * Copyright (c) 2004, 2008 Izumi Tsutsui.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /*
28  * Copyright (c) 2003 Christopher SEKIYA
29  * All rights reserved.
30  *
31  * Redistribution and use in source and binary forms, with or without
32  * modification, are permitted provided that the following conditions
33  * are met:
34  * 1. Redistributions of source code must retain the above copyright
35  *    notice, this list of conditions and the following disclaimer.
36  * 2. Redistributions in binary form must reproduce the above copyright
37  *    notice, this list of conditions and the following disclaimer in the
38  *    documentation and/or other materials provided with the distribution.
39  * 3. All advertising materials mentioning features or use of this software
40  *    must display the following acknowledgement:
41  *          This product includes software developed for the
42  *          NetBSD Project.  See http://www.NetBSD.org/ for
43  *          information about NetBSD.
44  * 4. The name of the author may not be used to endorse or promote products
45  *    derived from this software without specific prior written permission.
46  *
47  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
48  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
49  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
50  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
51  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
52  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
53  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
54  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
55  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
56  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57  */
58 
59 /*
60  * MACE MAC-110 Ethernet driver
61  */
62 
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: if_mec.c,v 1.65 2023/12/20 15:29:07 thorpej Exp $");
65 
66 #include "opt_ddb.h"
67 
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/device.h>
71 #include <sys/callout.h>
72 #include <sys/mbuf.h>
73 #include <sys/kernel.h>
74 #include <sys/socket.h>
75 #include <sys/ioctl.h>
76 #include <sys/errno.h>
77 
78 #include <sys/rndsource.h>
79 
80 #include <net/if.h>
81 #include <net/if_dl.h>
82 #include <net/if_media.h>
83 #include <net/if_ether.h>
84 
85 #include <netinet/in.h>
86 #include <netinet/in_systm.h>
87 #include <netinet/ip.h>
88 #include <netinet/tcp.h>
89 #include <netinet/udp.h>
90 
91 #include <net/bpf.h>
92 
93 #include <sys/bus.h>
94 #include <machine/intr.h>
95 #include <machine/machtype.h>
96 
97 #include <dev/mii/mii.h>
98 #include <dev/mii/miivar.h>
99 
100 #include <sgimips/mace/macevar.h>
101 #include <sgimips/mace/if_mecreg.h>
102 
103 #include <dev/arcbios/arcbios.h>
104 #include <dev/arcbios/arcbiosvar.h>
105 
106 /* #define MEC_DEBUG */
107 
108 #ifdef MEC_DEBUG
109 #define MEC_DEBUG_RESET		0x01
110 #define MEC_DEBUG_START		0x02
111 #define MEC_DEBUG_STOP		0x04
112 #define MEC_DEBUG_INTR		0x08
113 #define MEC_DEBUG_RXINTR	0x10
114 #define MEC_DEBUG_TXINTR	0x20
115 #define MEC_DEBUG_TXSEGS	0x40
116 uint32_t mec_debug = 0;
117 #define DPRINTF(x, y)	if (mec_debug & (x)) printf y
118 #else
119 #define DPRINTF(x, y)	/* nothing */
120 #endif
121 
122 /* #define MEC_EVENT_COUNTERS */
123 
124 #ifdef MEC_EVENT_COUNTERS
125 #define MEC_EVCNT_INCR(ev)	(ev)->ev_count++
126 #else
127 #define MEC_EVCNT_INCR(ev)	do {} while (/* CONSTCOND */ 0)
128 #endif
129 
130 /*
131  * Transmit descriptor list size
132  */
133 #define MEC_NTXDESC		64
134 #define MEC_NTXDESC_MASK	(MEC_NTXDESC - 1)
135 #define MEC_NEXTTX(x)		(((x) + 1) & MEC_NTXDESC_MASK)
136 #define MEC_NTXDESC_RSVD	4
137 #define MEC_NTXDESC_INTR	8
138 
139 /*
140  * software state for TX
141  */
142 struct mec_txsoft {
143 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
144 	bus_dmamap_t txs_dmamap;	/* our DMA map */
145 	uint32_t txs_flags;
146 #define MEC_TXS_BUFLEN_MASK	0x0000007f	/* data len in txd_buf */
147 #define MEC_TXS_TXDPTR		0x00000080	/* concat txd_ptr is used */
148 };
149 
150 /*
151  * Transmit buffer descriptor
152  */
153 #define MEC_TXDESCSIZE		128
154 #define MEC_NTXPTR		3
155 #define MEC_TXD_BUFOFFSET	sizeof(uint64_t)
156 #define MEC_TXD_BUFOFFSET1	\
157 	(sizeof(uint64_t) + sizeof(uint64_t) * MEC_NTXPTR)
158 #define MEC_TXD_BUFSIZE		(MEC_TXDESCSIZE - MEC_TXD_BUFOFFSET)
159 #define MEC_TXD_BUFSIZE1	(MEC_TXDESCSIZE - MEC_TXD_BUFOFFSET1)
160 #define MEC_TXD_BUFSTART(len)	(MEC_TXD_BUFSIZE - (len))
161 #define MEC_TXD_ALIGN		8
162 #define MEC_TXD_ALIGNMASK	(MEC_TXD_ALIGN - 1)
163 #define MEC_TXD_ROUNDUP(addr)	\
164 	(((addr) + MEC_TXD_ALIGNMASK) & ~(uint64_t)MEC_TXD_ALIGNMASK)
165 #define MEC_NTXSEG		16
166 
167 struct mec_txdesc {
168 	volatile uint64_t txd_cmd;
169 #define MEC_TXCMD_DATALEN	0x000000000000ffff	/* data length */
170 #define MEC_TXCMD_BUFSTART	0x00000000007f0000	/* start byte offset */
171 #define  TXCMD_BUFSTART(x)	((x) << 16)
172 #define MEC_TXCMD_TERMDMA	0x0000000000800000	/* stop DMA on abort */
173 #define MEC_TXCMD_TXINT		0x0000000001000000	/* INT after TX done */
174 #define MEC_TXCMD_PTR1		0x0000000002000000	/* valid 1st txd_ptr */
175 #define MEC_TXCMD_PTR2		0x0000000004000000	/* valid 2nd txd_ptr */
176 #define MEC_TXCMD_PTR3		0x0000000008000000	/* valid 3rd txd_ptr */
177 #define MEC_TXCMD_UNUSED	0xfffffffff0000000ULL	/* should be zero */
178 
179 #define txd_stat	txd_cmd
180 #define MEC_TXSTAT_LEN		0x000000000000ffff	/* TX length */
181 #define MEC_TXSTAT_COLCNT	0x00000000000f0000	/* collision count */
182 #define MEC_TXSTAT_COLCNT_SHIFT	16
183 #define MEC_TXSTAT_LATE_COL	0x0000000000100000	/* late collision */
184 #define MEC_TXSTAT_CRCERROR	0x0000000000200000	/* */
185 #define MEC_TXSTAT_DEFERRED	0x0000000000400000	/* */
186 #define MEC_TXSTAT_SUCCESS	0x0000000000800000	/* TX complete */
187 #define MEC_TXSTAT_TOOBIG	0x0000000001000000	/* */
188 #define MEC_TXSTAT_UNDERRUN	0x0000000002000000	/* */
189 #define MEC_TXSTAT_COLLISIONS	0x0000000004000000	/* */
190 #define MEC_TXSTAT_EXDEFERRAL	0x0000000008000000	/* */
191 #define MEC_TXSTAT_COLLIDED	0x0000000010000000	/* */
192 #define MEC_TXSTAT_UNUSED	0x7fffffffe0000000ULL	/* should be zero */
193 #define MEC_TXSTAT_SENT		0x8000000000000000ULL	/* packet sent */
194 
195 	union {
196 		uint64_t txptr[MEC_NTXPTR];
197 #define MEC_TXPTR_UNUSED2	0x0000000000000007	/* should be zero */
198 #define MEC_TXPTR_DMAADDR	0x00000000fffffff8	/* TX DMA address */
199 #define MEC_TXPTR_LEN		0x0000ffff00000000ULL	/* buffer length */
200 #define  TXPTR_LEN(x)		((uint64_t)(x) << 32)
201 #define MEC_TXPTR_UNUSED1	0xffff000000000000ULL	/* should be zero */
202 
203 		uint8_t txbuf[MEC_TXD_BUFSIZE];
204 	} txd_data;
205 #define txd_ptr		txd_data.txptr
206 #define txd_buf		txd_data.txbuf
207 };
208 
209 /*
210  * Receive buffer size
211  */
212 #define MEC_NRXDESC		16
213 #define MEC_NRXDESC_MASK	(MEC_NRXDESC - 1)
214 #define MEC_NEXTRX(x)		(((x) + 1) & MEC_NRXDESC_MASK)
215 
216 /*
217  * Receive buffer description
218  */
219 #define MEC_RXDESCSIZE		4096	/* umm, should be 4kbyte aligned */
220 #define MEC_RXD_NRXPAD		3
221 #define MEC_RXD_DMAOFFSET	(1 + MEC_RXD_NRXPAD)
222 #define MEC_RXD_BUFOFFSET	(MEC_RXD_DMAOFFSET * sizeof(uint64_t))
223 #define MEC_RXD_BUFSIZE		(MEC_RXDESCSIZE - MEC_RXD_BUFOFFSET)
224 
225 struct mec_rxdesc {
226 	volatile uint64_t rxd_stat;
227 #define MEC_RXSTAT_LEN		0x000000000000ffff	/* data length */
228 #define MEC_RXSTAT_VIOLATION	0x0000000000010000	/* code violation (?) */
229 #define MEC_RXSTAT_UNUSED2	0x0000000000020000	/* unknown (?) */
230 #define MEC_RXSTAT_CRCERROR	0x0000000000040000	/* CRC error */
231 #define MEC_RXSTAT_MULTICAST	0x0000000000080000	/* multicast packet */
232 #define MEC_RXSTAT_BROADCAST	0x0000000000100000	/* broadcast packet */
233 #define MEC_RXSTAT_INVALID	0x0000000000200000	/* invalid preamble */
234 #define MEC_RXSTAT_LONGEVENT	0x0000000000400000	/* long packet */
235 #define MEC_RXSTAT_BADPACKET	0x0000000000800000	/* bad packet */
236 #define MEC_RXSTAT_CAREVENT	0x0000000001000000	/* carrier event */
237 #define MEC_RXSTAT_MATCHMCAST	0x0000000002000000	/* match multicast */
238 #define MEC_RXSTAT_MATCHMAC	0x0000000004000000	/* match MAC */
239 #define MEC_RXSTAT_SEQNUM	0x00000000f8000000	/* sequence number */
240 #define MEC_RXSTAT_CKSUM	0x0000ffff00000000ULL	/* IP checksum */
241 #define  RXSTAT_CKSUM(x)	(((uint64_t)(x) & MEC_RXSTAT_CKSUM) >> 32)
242 #define MEC_RXSTAT_UNUSED1	0x7fff000000000000ULL	/* should be zero */
243 #define MEC_RXSTAT_RECEIVED	0x8000000000000000ULL	/* set to 1 on RX */
244 	uint64_t rxd_pad1[MEC_RXD_NRXPAD];
245 	uint8_t  rxd_buf[MEC_RXD_BUFSIZE];
246 };
247 
248 /*
249  * control structures for DMA ops
250  */
251 struct mec_control_data {
252 	/*
253 	 * TX descriptors and buffers
254 	 */
255 	struct mec_txdesc mcd_txdesc[MEC_NTXDESC];
256 
257 	/*
258 	 * RX descriptors and buffers
259 	 */
260 	struct mec_rxdesc mcd_rxdesc[MEC_NRXDESC];
261 };
262 
263 /*
264  * It _seems_ there are some restrictions on descriptor address:
265  *
266  * - Base address of txdescs should be 8kbyte aligned
267  * - Each txdesc should be 128byte aligned
268  * - Each rxdesc should be 4kbyte aligned
269  *
270  * So we should specify 8k align to allocalte txdescs.
271  * In this case, sizeof(struct mec_txdesc) * MEC_NTXDESC is 8192
272  * so rxdescs are also allocated at 4kbyte aligned.
273  */
274 #define MEC_CONTROL_DATA_ALIGN	(8 * 1024)
275 
276 #define MEC_CDOFF(x)	offsetof(struct mec_control_data, x)
277 #define MEC_CDTXOFF(x)	MEC_CDOFF(mcd_txdesc[(x)])
278 #define MEC_CDRXOFF(x)	MEC_CDOFF(mcd_rxdesc[(x)])
279 
280 /*
281  * software state per device
282  */
283 struct mec_softc {
284 	device_t sc_dev;		/* generic device structures */
285 
286 	bus_space_tag_t sc_st;		/* bus_space tag */
287 	bus_space_handle_t sc_sh;	/* bus_space handle */
288 	bus_dma_tag_t sc_dmat;		/* bus_dma tag */
289 
290 	struct ethercom sc_ethercom;	/* Ethernet common part */
291 
292 	struct mii_data sc_mii;		/* MII/media information */
293 	int sc_phyaddr;			/* MII address */
294 	struct callout sc_tick_ch;	/* tick callout */
295 
296 	uint8_t sc_enaddr[ETHER_ADDR_LEN]; /* MAC address */
297 
298 	bus_dmamap_t sc_cddmamap;	/* bus_dma map for control data */
299 #define sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
300 
301 	/* pointer to allocated control data */
302 	struct mec_control_data *sc_control_data;
303 #define sc_txdesc	sc_control_data->mcd_txdesc
304 #define sc_rxdesc	sc_control_data->mcd_rxdesc
305 
306 	/* software state for TX descs */
307 	struct mec_txsoft sc_txsoft[MEC_NTXDESC];
308 
309 	int sc_txpending;		/* number of TX requests pending */
310 	int sc_txdirty;			/* first dirty TX descriptor */
311 	int sc_txlast;			/* last used TX descriptor */
312 
313 	int sc_rxptr;			/* next ready RX buffer */
314 
315 	krndsource_t sc_rnd_source; /* random source */
316 #ifdef MEC_EVENT_COUNTERS
317 	struct evcnt sc_ev_txpkts;	/* TX packets queued total */
318 	struct evcnt sc_ev_txdpad;	/* TX packets padded in txdesc buf */
319 	struct evcnt sc_ev_txdbuf;	/* TX packets copied to txdesc buf */
320 	struct evcnt sc_ev_txptr1;	/* TX packets using concat ptr1 */
321 	struct evcnt sc_ev_txptr1a;	/* TX packets  w/ptr1  ~160bytes */
322 	struct evcnt sc_ev_txptr1b;	/* TX packets  w/ptr1  ~256bytes */
323 	struct evcnt sc_ev_txptr1c;	/* TX packets  w/ptr1  ~512bytes */
324 	struct evcnt sc_ev_txptr1d;	/* TX packets  w/ptr1 ~1024bytes */
325 	struct evcnt sc_ev_txptr1e;	/* TX packets  w/ptr1 >1024bytes */
326 	struct evcnt sc_ev_txptr2;	/* TX packets using concat ptr1,2 */
327 	struct evcnt sc_ev_txptr2a;	/* TX packets  w/ptr2  ~160bytes */
328 	struct evcnt sc_ev_txptr2b;	/* TX packets  w/ptr2  ~256bytes */
329 	struct evcnt sc_ev_txptr2c;	/* TX packets  w/ptr2  ~512bytes */
330 	struct evcnt sc_ev_txptr2d;	/* TX packets  w/ptr2 ~1024bytes */
331 	struct evcnt sc_ev_txptr2e;	/* TX packets  w/ptr2 >1024bytes */
332 	struct evcnt sc_ev_txptr3;	/* TX packets using concat ptr1,2,3 */
333 	struct evcnt sc_ev_txptr3a;	/* TX packets  w/ptr3  ~160bytes */
334 	struct evcnt sc_ev_txptr3b;	/* TX packets  w/ptr3  ~256bytes */
335 	struct evcnt sc_ev_txptr3c;	/* TX packets  w/ptr3  ~512bytes */
336 	struct evcnt sc_ev_txptr3d;	/* TX packets  w/ptr3 ~1024bytes */
337 	struct evcnt sc_ev_txptr3e;	/* TX packets  w/ptr3 >1024bytes */
338 	struct evcnt sc_ev_txmbuf;	/* TX packets copied to new mbufs */
339 	struct evcnt sc_ev_txmbufa;	/* TX packets  w/mbuf  ~160bytes */
340 	struct evcnt sc_ev_txmbufb;	/* TX packets  w/mbuf  ~256bytes */
341 	struct evcnt sc_ev_txmbufc;	/* TX packets  w/mbuf  ~512bytes */
342 	struct evcnt sc_ev_txmbufd;	/* TX packets  w/mbuf ~1024bytes */
343 	struct evcnt sc_ev_txmbufe;	/* TX packets  w/mbuf >1024bytes */
344 	struct evcnt sc_ev_txptrs;	/* TX packets using ptrs total */
345 	struct evcnt sc_ev_txptrc0;	/* TX packets  w/ptrs no hdr chain */
346 	struct evcnt sc_ev_txptrc1;	/* TX packets  w/ptrs  1 hdr chain */
347 	struct evcnt sc_ev_txptrc2;	/* TX packets  w/ptrs  2 hdr chains */
348 	struct evcnt sc_ev_txptrc3;	/* TX packets  w/ptrs  3 hdr chains */
349 	struct evcnt sc_ev_txptrc4;	/* TX packets  w/ptrs  4 hdr chains */
350 	struct evcnt sc_ev_txptrc5;	/* TX packets  w/ptrs  5 hdr chains */
351 	struct evcnt sc_ev_txptrc6;	/* TX packets  w/ptrs >5 hdr chains */
352 	struct evcnt sc_ev_txptrh0;	/* TX packets  w/ptrs  ~8bytes hdr */
353 	struct evcnt sc_ev_txptrh1;	/* TX packets  w/ptrs ~16bytes hdr */
354 	struct evcnt sc_ev_txptrh2;	/* TX packets  w/ptrs ~32bytes hdr */
355 	struct evcnt sc_ev_txptrh3;	/* TX packets  w/ptrs ~64bytes hdr */
356 	struct evcnt sc_ev_txptrh4;	/* TX packets  w/ptrs ~80bytes hdr */
357 	struct evcnt sc_ev_txptrh5;	/* TX packets  w/ptrs ~96bytes hdr */
358 	struct evcnt sc_ev_txdstall;	/* TX stalled due to no txdesc */
359 	struct evcnt sc_ev_txempty;	/* TX empty interrupts */
360 	struct evcnt sc_ev_txsent;	/* TX sent interrupts */
361 #endif
362 };
363 
364 #define MEC_CDTXADDR(sc, x)	((sc)->sc_cddma + MEC_CDTXOFF(x))
365 #define MEC_CDRXADDR(sc, x)	((sc)->sc_cddma + MEC_CDRXOFF(x))
366 
367 #define MEC_TXDESCSYNC(sc, x, ops)					\
368 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
369 	    MEC_CDTXOFF(x), MEC_TXDESCSIZE, (ops))
370 #define MEC_TXCMDSYNC(sc, x, ops)					\
371 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
372 	    MEC_CDTXOFF(x), sizeof(uint64_t), (ops))
373 
374 #define MEC_RXSTATSYNC(sc, x, ops)					\
375 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
376 	    MEC_CDRXOFF(x), sizeof(uint64_t), (ops))
377 #define MEC_RXBUFSYNC(sc, x, len, ops)					\
378 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
379 	    MEC_CDRXOFF(x) + MEC_RXD_BUFOFFSET,				\
380 	    MEC_ETHER_ALIGN + (len), (ops))
381 
382 /* XXX these values should be moved to <net/if_ether.h> ? */
383 #define ETHER_PAD_LEN	(ETHER_MIN_LEN - ETHER_CRC_LEN)
384 #define MEC_ETHER_ALIGN	2
385 
386 static int	mec_match(device_t, cfdata_t, void *);
387 static void	mec_attach(device_t, device_t, void *);
388 
389 static int	mec_mii_readreg(device_t, int, int, uint16_t *);
390 static int	mec_mii_writereg(device_t, int, int, uint16_t);
391 static int	mec_mii_wait(struct mec_softc *);
392 static void	mec_statchg(struct ifnet *);
393 
394 static int	mec_init(struct ifnet * ifp);
395 static void	mec_start(struct ifnet *);
396 static void	mec_watchdog(struct ifnet *);
397 static void	mec_tick(void *);
398 static int	mec_ioctl(struct ifnet *, u_long, void *);
399 static void	mec_reset(struct mec_softc *);
400 static void	mec_setfilter(struct mec_softc *);
401 static int	mec_intr(void *arg);
402 static void	mec_stop(struct ifnet *, int);
403 static void	mec_rxintr(struct mec_softc *);
404 static void	mec_rxcsum(struct mec_softc *, struct mbuf *, uint16_t,
405 		    uint32_t);
406 static void	mec_txintr(struct mec_softc *, uint32_t);
407 static bool	mec_shutdown(device_t, int);
408 
409 CFATTACH_DECL_NEW(mec, sizeof(struct mec_softc),
410     mec_match, mec_attach, NULL, NULL);
411 
412 static int mec_matched = 0;
413 
414 static int
mec_match(device_t parent,cfdata_t cf,void * aux)415 mec_match(device_t parent, cfdata_t cf, void *aux)
416 {
417 
418 	/* allow only one device */
419 	if (mec_matched)
420 		return 0;
421 
422 	mec_matched = 1;
423 	return 1;
424 }
425 
426 static void
mec_attach(device_t parent,device_t self,void * aux)427 mec_attach(device_t parent, device_t self, void *aux)
428 {
429 	struct mec_softc *sc = device_private(self);
430 	struct mace_attach_args *maa = aux;
431 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
432 	struct mii_data *mii = &sc->sc_mii;
433 	uint64_t address, command;
434 	const char *macaddr;
435 	struct mii_softc *child;
436 	bus_dma_segment_t seg;
437 	int i, err, rseg;
438 	bool mac_is_fake;
439 
440 	sc->sc_dev = self;
441 	sc->sc_st = maa->maa_st;
442 	if (bus_space_subregion(sc->sc_st, maa->maa_sh,
443 	    maa->maa_offset, 0,	&sc->sc_sh) != 0) {
444 		aprint_error(": can't map i/o space\n");
445 		return;
446 	}
447 
448 	/* set up DMA structures */
449 	sc->sc_dmat = maa->maa_dmat;
450 
451 	/*
452 	 * Allocate the control data structures, and create and load the
453 	 * DMA map for it.
454 	 */
455 	if ((err = bus_dmamem_alloc(sc->sc_dmat,
456 	    sizeof(struct mec_control_data), MEC_CONTROL_DATA_ALIGN, 0,
457 	    &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
458 		aprint_error(": unable to allocate control data, error = %d\n",
459 		    err);
460 		goto fail_0;
461 	}
462 	/*
463 	 * XXX needs re-think...
464 	 * control data structures contain whole RX data buffer, so
465 	 * BUS_DMA_COHERENT (which disables cache) may cause some performance
466 	 * issue on copying data from the RX buffer to mbuf on normal memory,
467 	 * though we have to make sure all bus_dmamap_sync(9) ops are called
468 	 * properly in that case.
469 	 */
470 	if ((err = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
471 	    sizeof(struct mec_control_data),
472 	    (void **)&sc->sc_control_data, /*BUS_DMA_COHERENT*/ 0)) != 0) {
473 		aprint_error(": unable to map control data, error = %d\n", err);
474 		goto fail_1;
475 	}
476 	memset(sc->sc_control_data, 0, sizeof(struct mec_control_data));
477 
478 	if ((err = bus_dmamap_create(sc->sc_dmat,
479 	    sizeof(struct mec_control_data), 1,
480 	    sizeof(struct mec_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
481 		aprint_error(": unable to create control data DMA map,"
482 		    " error = %d\n", err);
483 		goto fail_2;
484 	}
485 	if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
486 	    sc->sc_control_data, sizeof(struct mec_control_data), NULL,
487 	    BUS_DMA_NOWAIT)) != 0) {
488 		aprint_error(": unable to load control data DMA map,"
489 		    " error = %d\n", err);
490 		goto fail_3;
491 	}
492 
493 	/* create TX buffer DMA maps */
494 	for (i = 0; i < MEC_NTXDESC; i++) {
495 		if ((err = bus_dmamap_create(sc->sc_dmat,
496 		    MCLBYTES, MEC_NTXSEG, MCLBYTES, PAGE_SIZE, 0,
497 		    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
498 			aprint_error(": unable to create tx DMA map %d,"
499 			    " error = %d\n", i, err);
500 			goto fail_4;
501 		}
502 	}
503 
504 	callout_init(&sc->sc_tick_ch, 0);
505 
506 	/* get Ethernet address from ARCBIOS */
507 	if ((macaddr = arcbios_GetEnvironmentVariable("eaddr")) == NULL) {
508 		aprint_error(": unable to get MAC address!\n");
509 		goto fail_4;
510 	}
511 	/*
512 	 * On some machines the DS2502 chip storing the serial number/
513 	 * mac address is on the pci riser board - if this board is
514 	 * missing, ARCBIOS will not know a good ethernet address (but
515 	 * otherwise the machine will work fine).
516 	 */
517 	mac_is_fake = false;
518 	if (strcmp(macaddr, "ff:ff:ff:ff:ff:ff") == 0) {
519 		uint32_t ui = 0;
520 		const char * netaddr =
521 			arcbios_GetEnvironmentVariable("netaddr");
522 
523 		/*
524 		 * Create a MAC address by abusing the "netaddr" env var
525 		 */
526 		sc->sc_enaddr[0] = 0xf2;
527 		sc->sc_enaddr[1] = 0x0b;
528 		sc->sc_enaddr[2] = 0xa4;
529 		if (netaddr) {
530 			mac_is_fake = true;
531 			while (*netaddr) {
532 				int v = 0;
533 				while (*netaddr && *netaddr != '.') {
534 					if (*netaddr >= '0' && *netaddr <= '9')
535 						v = v*10 + (*netaddr - '0');
536 					netaddr++;
537 				}
538 				ui <<= 8;
539 				ui |= v;
540 				if (*netaddr == '.')
541 					netaddr++;
542 			}
543 		}
544 		memcpy(sc->sc_enaddr+3, ((uint8_t *)&ui)+1, 3);
545 	}
546 	if (!mac_is_fake)
547 		ether_aton_r(sc->sc_enaddr, sizeof(sc->sc_enaddr), macaddr);
548 
549 	/* set the Ethernet address */
550 	address = 0;
551 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
552 		address = address << 8;
553 		address |= sc->sc_enaddr[i];
554 	}
555 	bus_space_write_8(sc->sc_st, sc->sc_sh, MEC_STATION, address);
556 
557 	/* reset device */
558 	mec_reset(sc);
559 
560 	command = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_MAC_CONTROL);
561 
562 	aprint_normal(": MAC-110 Ethernet, rev %u\n",
563 	    (u_int)((command & MEC_MAC_REVISION) >> MEC_MAC_REVISION_SHIFT));
564 
565 	if (mac_is_fake)
566 		aprint_normal_dev(self,
567 		    "could not get ethernet address from firmware"
568 		    " - generated one from the \"netaddr\" environment"
569 		    " variable\n");
570 	aprint_normal_dev(self, "Ethernet address %s\n",
571 	    ether_sprintf(sc->sc_enaddr));
572 
573 	/* Done, now attach everything */
574 
575 	mii->mii_ifp = ifp;
576 	mii->mii_readreg = mec_mii_readreg;
577 	mii->mii_writereg = mec_mii_writereg;
578 	mii->mii_statchg = mec_statchg;
579 
580 	/* Set up PHY properties */
581 	sc->sc_ethercom.ec_mii = mii;
582 	ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
583 	mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
584 
585 	child = LIST_FIRST(&mii->mii_phys);
586 	if (child == NULL) {
587 		/* No PHY attached */
588 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL,
589 		    0, NULL);
590 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
591 	} else {
592 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
593 		sc->sc_phyaddr = child->mii_phy;
594 	}
595 
596 	strcpy(ifp->if_xname, device_xname(self));
597 	ifp->if_softc = sc;
598 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
599 	ifp->if_ioctl = mec_ioctl;
600 	ifp->if_start = mec_start;
601 	ifp->if_watchdog = mec_watchdog;
602 	ifp->if_init = mec_init;
603 	ifp->if_stop = mec_stop;
604 	ifp->if_mtu = ETHERMTU;
605 	IFQ_SET_READY(&ifp->if_snd);
606 
607 	/* mec has dumb RX cksum support */
608 	ifp->if_capabilities = IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx;
609 
610 	/* We can support 802.1Q VLAN-sized frames. */
611 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
612 
613 	/* attach the interface */
614 	if_attach(ifp);
615 	if_deferred_start_init(ifp, NULL);
616 	ether_ifattach(ifp, sc->sc_enaddr);
617 
618 	/* establish interrupt */
619 	cpu_intr_establish(maa->maa_intr, maa->maa_intrmask, mec_intr, sc);
620 
621 	rnd_attach_source(&sc->sc_rnd_source, device_xname(self),
622 	    RND_TYPE_NET, RND_FLAG_DEFAULT);
623 
624 #ifdef MEC_EVENT_COUNTERS
625 	evcnt_attach_dynamic(&sc->sc_ev_txpkts , EVCNT_TYPE_MISC,
626 	    NULL, device_xname(self), "TX pkts queued total");
627 	evcnt_attach_dynamic(&sc->sc_ev_txdpad , EVCNT_TYPE_MISC,
628 	    NULL, device_xname(self), "TX pkts padded in txdesc buf");
629 	evcnt_attach_dynamic(&sc->sc_ev_txdbuf , EVCNT_TYPE_MISC,
630 	    NULL, device_xname(self), "TX pkts copied to txdesc buf");
631 	evcnt_attach_dynamic(&sc->sc_ev_txptr1 , EVCNT_TYPE_MISC,
632 	    NULL, device_xname(self), "TX pkts using concat ptr1");
633 	evcnt_attach_dynamic(&sc->sc_ev_txptr1a , EVCNT_TYPE_MISC,
634 	    NULL, device_xname(self), "TX pkts  w/ptr1  ~160bytes");
635 	evcnt_attach_dynamic(&sc->sc_ev_txptr1b , EVCNT_TYPE_MISC,
636 	    NULL, device_xname(self), "TX pkts  w/ptr1  ~256bytes");
637 	evcnt_attach_dynamic(&sc->sc_ev_txptr1c , EVCNT_TYPE_MISC,
638 	    NULL, device_xname(self), "TX pkts  w/ptr1  ~512bytes");
639 	evcnt_attach_dynamic(&sc->sc_ev_txptr1d , EVCNT_TYPE_MISC,
640 	    NULL, device_xname(self), "TX pkts  w/ptr1 ~1024bytes");
641 	evcnt_attach_dynamic(&sc->sc_ev_txptr1e , EVCNT_TYPE_MISC,
642 	    NULL, device_xname(self), "TX pkts  w/ptr1 >1024bytes");
643 	evcnt_attach_dynamic(&sc->sc_ev_txptr2 , EVCNT_TYPE_MISC,
644 	    NULL, device_xname(self), "TX pkts using concat ptr1,2");
645 	evcnt_attach_dynamic(&sc->sc_ev_txptr2a , EVCNT_TYPE_MISC,
646 	    NULL, device_xname(self), "TX pkts  w/ptr2  ~160bytes");
647 	evcnt_attach_dynamic(&sc->sc_ev_txptr2b , EVCNT_TYPE_MISC,
648 	    NULL, device_xname(self), "TX pkts  w/ptr2  ~256bytes");
649 	evcnt_attach_dynamic(&sc->sc_ev_txptr2c , EVCNT_TYPE_MISC,
650 	    NULL, device_xname(self), "TX pkts  w/ptr2  ~512bytes");
651 	evcnt_attach_dynamic(&sc->sc_ev_txptr2d , EVCNT_TYPE_MISC,
652 	    NULL, device_xname(self), "TX pkts  w/ptr2 ~1024bytes");
653 	evcnt_attach_dynamic(&sc->sc_ev_txptr2e , EVCNT_TYPE_MISC,
654 	    NULL, device_xname(self), "TX pkts  w/ptr2 >1024bytes");
655 	evcnt_attach_dynamic(&sc->sc_ev_txptr3 , EVCNT_TYPE_MISC,
656 	    NULL, device_xname(self), "TX pkts using concat ptr1,2,3");
657 	evcnt_attach_dynamic(&sc->sc_ev_txptr3a , EVCNT_TYPE_MISC,
658 	    NULL, device_xname(self), "TX pkts  w/ptr3  ~160bytes");
659 	evcnt_attach_dynamic(&sc->sc_ev_txptr3b , EVCNT_TYPE_MISC,
660 	    NULL, device_xname(self), "TX pkts  w/ptr3  ~256bytes");
661 	evcnt_attach_dynamic(&sc->sc_ev_txptr3c , EVCNT_TYPE_MISC,
662 	    NULL, device_xname(self), "TX pkts  w/ptr3  ~512bytes");
663 	evcnt_attach_dynamic(&sc->sc_ev_txptr3d , EVCNT_TYPE_MISC,
664 	    NULL, device_xname(self), "TX pkts  w/ptr3 ~1024bytes");
665 	evcnt_attach_dynamic(&sc->sc_ev_txptr3e , EVCNT_TYPE_MISC,
666 	    NULL, device_xname(self), "TX pkts  w/ptr3 >1024bytes");
667 	evcnt_attach_dynamic(&sc->sc_ev_txmbuf , EVCNT_TYPE_MISC,
668 	    NULL, device_xname(self), "TX pkts copied to new mbufs");
669 	evcnt_attach_dynamic(&sc->sc_ev_txmbufa , EVCNT_TYPE_MISC,
670 	    NULL, device_xname(self), "TX pkts  w/mbuf  ~160bytes");
671 	evcnt_attach_dynamic(&sc->sc_ev_txmbufb , EVCNT_TYPE_MISC,
672 	    NULL, device_xname(self), "TX pkts  w/mbuf  ~256bytes");
673 	evcnt_attach_dynamic(&sc->sc_ev_txmbufc , EVCNT_TYPE_MISC,
674 	    NULL, device_xname(self), "TX pkts  w/mbuf  ~512bytes");
675 	evcnt_attach_dynamic(&sc->sc_ev_txmbufd , EVCNT_TYPE_MISC,
676 	    NULL, device_xname(self), "TX pkts  w/mbuf ~1024bytes");
677 	evcnt_attach_dynamic(&sc->sc_ev_txmbufe , EVCNT_TYPE_MISC,
678 	    NULL, device_xname(self), "TX pkts  w/mbuf >1024bytes");
679 	evcnt_attach_dynamic(&sc->sc_ev_txptrs , EVCNT_TYPE_MISC,
680 	    NULL, device_xname(self), "TX pkts using ptrs total");
681 	evcnt_attach_dynamic(&sc->sc_ev_txptrc0 , EVCNT_TYPE_MISC,
682 	    NULL, device_xname(self), "TX pkts  w/ptrs no hdr chain");
683 	evcnt_attach_dynamic(&sc->sc_ev_txptrc1 , EVCNT_TYPE_MISC,
684 	    NULL, device_xname(self), "TX pkts  w/ptrs  1 hdr chain");
685 	evcnt_attach_dynamic(&sc->sc_ev_txptrc2 , EVCNT_TYPE_MISC,
686 	    NULL, device_xname(self), "TX pkts  w/ptrs  2 hdr chains");
687 	evcnt_attach_dynamic(&sc->sc_ev_txptrc3 , EVCNT_TYPE_MISC,
688 	    NULL, device_xname(self), "TX pkts  w/ptrs  3 hdr chains");
689 	evcnt_attach_dynamic(&sc->sc_ev_txptrc4 , EVCNT_TYPE_MISC,
690 	    NULL, device_xname(self), "TX pkts  w/ptrs  4 hdr chains");
691 	evcnt_attach_dynamic(&sc->sc_ev_txptrc5 , EVCNT_TYPE_MISC,
692 	    NULL, device_xname(self), "TX pkts  w/ptrs  5 hdr chains");
693 	evcnt_attach_dynamic(&sc->sc_ev_txptrc6 , EVCNT_TYPE_MISC,
694 	    NULL, device_xname(self), "TX pkts  w/ptrs >5 hdr chains");
695 	evcnt_attach_dynamic(&sc->sc_ev_txptrh0 , EVCNT_TYPE_MISC,
696 	    NULL, device_xname(self), "TX pkts  w/ptrs  ~8bytes hdr");
697 	evcnt_attach_dynamic(&sc->sc_ev_txptrh1 , EVCNT_TYPE_MISC,
698 	    NULL, device_xname(self), "TX pkts  w/ptrs ~16bytes hdr");
699 	evcnt_attach_dynamic(&sc->sc_ev_txptrh2 , EVCNT_TYPE_MISC,
700 	    NULL, device_xname(self), "TX pkts  w/ptrs ~32bytes hdr");
701 	evcnt_attach_dynamic(&sc->sc_ev_txptrh3 , EVCNT_TYPE_MISC,
702 	    NULL, device_xname(self), "TX pkts  w/ptrs ~64bytes hdr");
703 	evcnt_attach_dynamic(&sc->sc_ev_txptrh4 , EVCNT_TYPE_MISC,
704 	    NULL, device_xname(self), "TX pkts  w/ptrs ~80bytes hdr");
705 	evcnt_attach_dynamic(&sc->sc_ev_txptrh5 , EVCNT_TYPE_MISC,
706 	    NULL, device_xname(self), "TX pkts  w/ptrs ~96bytes hdr");
707 	evcnt_attach_dynamic(&sc->sc_ev_txdstall , EVCNT_TYPE_MISC,
708 	    NULL, device_xname(self), "TX stalled due to no txdesc");
709 	evcnt_attach_dynamic(&sc->sc_ev_txempty , EVCNT_TYPE_MISC,
710 	    NULL, device_xname(self), "TX empty interrupts");
711 	evcnt_attach_dynamic(&sc->sc_ev_txsent , EVCNT_TYPE_MISC,
712 	    NULL, device_xname(self), "TX sent interrupts");
713 #endif
714 
715 	/* set shutdown hook to reset interface on powerdown */
716 	if (pmf_device_register1(self, NULL, NULL, mec_shutdown))
717 		pmf_class_network_register(self, ifp);
718 	else
719 		aprint_error_dev(self, "couldn't establish power handler\n");
720 
721 	return;
722 
723 	/*
724 	 * Free any resources we've allocated during the failed attach
725 	 * attempt.  Do this in reverse order and fall though.
726 	 */
727  fail_4:
728 	for (i = 0; i < MEC_NTXDESC; i++) {
729 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
730 			bus_dmamap_destroy(sc->sc_dmat,
731 			    sc->sc_txsoft[i].txs_dmamap);
732 	}
733 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
734  fail_3:
735 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
736  fail_2:
737 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
738 	    sizeof(struct mec_control_data));
739  fail_1:
740 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
741  fail_0:
742 	return;
743 }
744 
745 static int
mec_mii_readreg(device_t self,int phy,int reg,uint16_t * val)746 mec_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
747 {
748 	struct mec_softc *sc = device_private(self);
749 	bus_space_tag_t st = sc->sc_st;
750 	bus_space_handle_t sh = sc->sc_sh;
751 	uint64_t data;
752 	int i, rv;
753 
754 	if ((rv = mec_mii_wait(sc)) != 0)
755 		return rv;
756 
757 	bus_space_write_8(st, sh, MEC_PHY_ADDRESS,
758 	    (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER));
759 	delay(25);
760 	bus_space_write_8(st, sh, MEC_PHY_READ_INITIATE, 1);
761 	delay(25);
762 	mec_mii_wait(sc);
763 
764 	for (i = 0; i < 20; i++) {
765 		delay(30);
766 
767 		data = bus_space_read_8(st, sh, MEC_PHY_DATA);
768 
769 		if ((data & MEC_PHY_DATA_BUSY) == 0) {
770 			*val = data & MEC_PHY_DATA_VALUE;
771 			return 0;
772 		}
773 	}
774 	return -1;
775 }
776 
777 static int
mec_mii_writereg(device_t self,int phy,int reg,uint16_t val)778 mec_mii_writereg(device_t self, int phy, int reg, uint16_t val)
779 {
780 	struct mec_softc *sc = device_private(self);
781 	bus_space_tag_t st = sc->sc_st;
782 	bus_space_handle_t sh = sc->sc_sh;
783 	int rv;
784 
785 	if ((rv = mec_mii_wait(sc)) != 0) {
786 		printf("timed out writing %x: %hx\n", reg, val);
787 		return rv;
788 	}
789 
790 	bus_space_write_8(st, sh, MEC_PHY_ADDRESS,
791 	    (phy << MEC_PHY_ADDR_DEVSHIFT) | (reg & MEC_PHY_ADDR_REGISTER));
792 
793 	delay(60);
794 
795 	bus_space_write_8(st, sh, MEC_PHY_DATA, val & MEC_PHY_DATA_VALUE);
796 
797 	delay(60);
798 
799 	mec_mii_wait(sc);
800 
801 	return 0;
802 }
803 
804 static int
mec_mii_wait(struct mec_softc * sc)805 mec_mii_wait(struct mec_softc *sc)
806 {
807 	uint32_t busy;
808 	int i, s;
809 
810 	for (i = 0; i < 100; i++) {
811 		delay(30);
812 
813 		s = splhigh();
814 		busy = bus_space_read_8(sc->sc_st, sc->sc_sh, MEC_PHY_DATA);
815 		splx(s);
816 
817 		if ((busy & MEC_PHY_DATA_BUSY) == 0)
818 			return 0;
819 #if 0
820 		if (busy == 0xffff) /* XXX ? */
821 			return 0;
822 #endif
823 	}
824 
825 	printf("%s: MII timed out\n", device_xname(sc->sc_dev));
826 	return ETIMEDOUT;
827 }
828 
829 static void
mec_statchg(struct ifnet * ifp)830 mec_statchg(struct ifnet *ifp)
831 {
832 	struct mec_softc *sc = ifp->if_softc;
833 	bus_space_tag_t st = sc->sc_st;
834 	bus_space_handle_t sh = sc->sc_sh;
835 	uint32_t control;
836 
837 	control = bus_space_read_8(st, sh, MEC_MAC_CONTROL);
838 	control &= ~(MEC_MAC_IPGT | MEC_MAC_IPGR1 | MEC_MAC_IPGR2 |
839 	    MEC_MAC_FULL_DUPLEX | MEC_MAC_SPEED_SELECT);
840 
841 	/* must also set IPG here for duplex stuff ... */
842 	if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0) {
843 		control |= MEC_MAC_FULL_DUPLEX;
844 	} else {
845 		/* set IPG */
846 		control |= MEC_MAC_IPG_DEFAULT;
847 	}
848 
849 	bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
850 }
851 
852 static int
mec_init(struct ifnet * ifp)853 mec_init(struct ifnet *ifp)
854 {
855 	struct mec_softc *sc = ifp->if_softc;
856 	bus_space_tag_t st = sc->sc_st;
857 	bus_space_handle_t sh = sc->sc_sh;
858 	struct mec_rxdesc *rxd;
859 	int i, rc;
860 
861 	/* cancel any pending I/O */
862 	mec_stop(ifp, 0);
863 
864 	/* reset device */
865 	mec_reset(sc);
866 
867 	/* setup filter for multicast or promisc mode */
868 	mec_setfilter(sc);
869 
870 	/* set the TX ring pointer to the base address */
871 	bus_space_write_8(st, sh, MEC_TX_RING_BASE, MEC_CDTXADDR(sc, 0));
872 
873 	sc->sc_txpending = 0;
874 	sc->sc_txdirty = 0;
875 	sc->sc_txlast = MEC_NTXDESC - 1;
876 
877 	/* put RX buffers into FIFO */
878 	for (i = 0; i < MEC_NRXDESC; i++) {
879 		rxd = &sc->sc_rxdesc[i];
880 		rxd->rxd_stat = 0;
881 		MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
882 		MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD);
883 		bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i));
884 	}
885 	sc->sc_rxptr = 0;
886 
887 #if 0	/* XXX no info */
888 	bus_space_write_8(st, sh, MEC_TIMER, 0);
889 #endif
890 
891 	/*
892 	 * MEC_DMA_TX_INT_ENABLE will be set later otherwise it causes
893 	 * spurious interrupts when TX buffers are empty
894 	 */
895 	bus_space_write_8(st, sh, MEC_DMA_CONTROL,
896 	    (MEC_RXD_DMAOFFSET << MEC_DMA_RX_DMA_OFFSET_SHIFT) |
897 	    (MEC_NRXDESC << MEC_DMA_RX_INT_THRESH_SHIFT) |
898 	    MEC_DMA_TX_DMA_ENABLE | /* MEC_DMA_TX_INT_ENABLE | */
899 	    MEC_DMA_RX_DMA_ENABLE | MEC_DMA_RX_INT_ENABLE);
900 
901 	callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc);
902 
903 	if ((rc = ether_mediachange(ifp)) != 0)
904 		return rc;
905 
906 	ifp->if_flags |= IFF_RUNNING;
907 	mec_start(ifp);
908 
909 	return 0;
910 }
911 
912 static void
mec_reset(struct mec_softc * sc)913 mec_reset(struct mec_softc *sc)
914 {
915 	bus_space_tag_t st = sc->sc_st;
916 	bus_space_handle_t sh = sc->sc_sh;
917 	uint64_t control;
918 
919 	/* stop DMA first */
920 	bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0);
921 
922 	/* reset chip */
923 	bus_space_write_8(st, sh, MEC_MAC_CONTROL, MEC_MAC_CORE_RESET);
924 	delay(1000);
925 	bus_space_write_8(st, sh, MEC_MAC_CONTROL, 0);
926 	delay(1000);
927 
928 	/* Default to 100/half and let auto-negotiation work its magic */
929 	control = MEC_MAC_SPEED_SELECT | MEC_MAC_FILTER_MATCHMULTI |
930 	    MEC_MAC_IPG_DEFAULT;
931 
932 	bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
933 	/* stop DMA again for sanity */
934 	bus_space_write_8(st, sh, MEC_DMA_CONTROL, 0);
935 
936 	DPRINTF(MEC_DEBUG_RESET, ("mec: control now %llx\n",
937 	    bus_space_read_8(st, sh, MEC_MAC_CONTROL)));
938 }
939 
940 static void
mec_start(struct ifnet * ifp)941 mec_start(struct ifnet *ifp)
942 {
943 	struct mec_softc *sc = ifp->if_softc;
944 	struct mbuf *m0, *m;
945 	struct mec_txdesc *txd;
946 	struct mec_txsoft *txs;
947 	bus_dmamap_t dmamap;
948 	bus_space_tag_t st = sc->sc_st;
949 	bus_space_handle_t sh = sc->sc_sh;
950 	int error, firsttx, nexttx, opending;
951 	int len, bufoff, buflen, nsegs, align, resid, pseg, nptr, slen, i;
952 	uint32_t txdcmd;
953 
954 	if ((ifp->if_flags & IFF_RUNNING) == 0)
955 		return;
956 
957 	/*
958 	 * Remember the previous txpending and the first transmit descriptor.
959 	 */
960 	opending = sc->sc_txpending;
961 	firsttx = MEC_NEXTTX(sc->sc_txlast);
962 
963 	DPRINTF(MEC_DEBUG_START,
964 	    ("%s: opending = %d, firsttx = %d\n", __func__, opending, firsttx));
965 
966 	while (sc->sc_txpending < MEC_NTXDESC - 1) {
967 		/* Grab a packet off the queue. */
968 		IFQ_POLL(&ifp->if_snd, m0);
969 		if (m0 == NULL)
970 			break;
971 		m = NULL;
972 
973 		/*
974 		 * Get the next available transmit descriptor.
975 		 */
976 		nexttx = MEC_NEXTTX(sc->sc_txlast);
977 		txd = &sc->sc_txdesc[nexttx];
978 		txs = &sc->sc_txsoft[nexttx];
979 		dmamap = txs->txs_dmamap;
980 		txs->txs_flags = 0;
981 
982 		buflen = 0;
983 		bufoff = 0;
984 		resid = 0;
985 		nptr = 0;	/* XXX gcc */
986 		pseg = 0;	/* XXX gcc */
987 
988 		len = m0->m_pkthdr.len;
989 
990 		DPRINTF(MEC_DEBUG_START,
991 		    ("%s: len = %d, nexttx = %d, txpending = %d\n",
992 		    __func__, len, nexttx, sc->sc_txpending));
993 
994 		if (len <= MEC_TXD_BUFSIZE) {
995 			/*
996 			 * If a TX packet will fit into small txdesc buffer,
997 			 * just copy it into there. Maybe it's faster than
998 			 * checking alignment and calling bus_dma(9) etc.
999 			 */
1000 			DPRINTF(MEC_DEBUG_START, ("%s: short packet\n",
1001 			    __func__));
1002 			IFQ_DEQUEUE(&ifp->if_snd, m0);
1003 
1004 			/*
1005 			 * I don't know if MEC chip does auto padding,
1006 			 * but do it manually for safety.
1007 			 */
1008 			if (len < ETHER_PAD_LEN) {
1009 				MEC_EVCNT_INCR(&sc->sc_ev_txdpad);
1010 				bufoff = MEC_TXD_BUFSTART(ETHER_PAD_LEN);
1011 				m_copydata(m0, 0, len, txd->txd_buf + bufoff);
1012 				memset(txd->txd_buf + bufoff + len, 0,
1013 				    ETHER_PAD_LEN - len);
1014 				len = buflen = ETHER_PAD_LEN;
1015 			} else {
1016 				MEC_EVCNT_INCR(&sc->sc_ev_txdbuf);
1017 				bufoff = MEC_TXD_BUFSTART(len);
1018 				m_copydata(m0, 0, len, txd->txd_buf + bufoff);
1019 				buflen = len;
1020 			}
1021 		} else {
1022 			/*
1023 			 * If the packet won't fit the static buffer in txdesc,
1024 			 * we have to use the concatenate pointers to handle it.
1025 			 */
1026 			DPRINTF(MEC_DEBUG_START, ("%s: long packet\n",
1027 			    __func__));
1028 			txs->txs_flags = MEC_TXS_TXDPTR;
1029 
1030 			/*
1031 			 * Call bus_dmamap_load_mbuf(9) first to see
1032 			 * how many chains the TX mbuf has.
1033 			 */
1034 			error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1035 			    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1036 			if (error == 0) {
1037 				/*
1038 				 * Check chains which might contain headers.
1039 				 * They might be so much fragmented and
1040 				 * it's better to copy them into txdesc buffer
1041 				 * since they would be small enough.
1042 				 */
1043 				nsegs = dmamap->dm_nsegs;
1044 				for (pseg = 0; pseg < nsegs; pseg++) {
1045 					slen = dmamap->dm_segs[pseg].ds_len;
1046 					if (buflen + slen >
1047 					    MEC_TXD_BUFSIZE1 - MEC_TXD_ALIGN)
1048 						break;
1049 					buflen += slen;
1050 				}
1051 				/*
1052 				 * Check if the rest chains can be fit into
1053 				 * the concatinate pointers.
1054 				 */
1055 				align = dmamap->dm_segs[pseg].ds_addr &
1056 				    MEC_TXD_ALIGNMASK;
1057 				if (align > 0) {
1058 					/*
1059 					 * If the first chain isn't uint64_t
1060 					 * aligned, append the unaligned part
1061 					 * into txdesc buffer too.
1062 					 */
1063 					resid = MEC_TXD_ALIGN - align;
1064 					buflen += resid;
1065 					for (; pseg < nsegs; pseg++) {
1066 						slen =
1067 						  dmamap->dm_segs[pseg].ds_len;
1068 						if (slen > resid)
1069 							break;
1070 						resid -= slen;
1071 					}
1072 				} else if (pseg == 0) {
1073 					/*
1074 					 * In this case, the first chain is
1075 					 * uint64_t aligned but it's too long
1076 					 * to put into txdesc buf.
1077 					 * We have to put some data into
1078 					 * txdesc buf even in this case,
1079 					 * so put MEC_TXD_ALIGN bytes there.
1080 					 */
1081 					buflen = resid = MEC_TXD_ALIGN;
1082 				}
1083 				nptr = nsegs - pseg;
1084 				if (nptr <= MEC_NTXPTR) {
1085 					bufoff = MEC_TXD_BUFSTART(buflen);
1086 
1087 					/*
1088 					 * Check if all the rest chains are
1089 					 * uint64_t aligned.
1090 					 */
1091 					align = 0;
1092 					for (i = pseg + 1; i < nsegs; i++)
1093 						align |=
1094 						    dmamap->dm_segs[i].ds_addr
1095 						    & MEC_TXD_ALIGNMASK;
1096 					if (align != 0) {
1097 						/* chains are not aligned */
1098 						error = -1;
1099 					}
1100 				} else {
1101 					/* The TX mbuf chains doesn't fit. */
1102 					error = -1;
1103 				}
1104 				if (error == -1)
1105 					bus_dmamap_unload(sc->sc_dmat, dmamap);
1106 			}
1107 			if (error != 0) {
1108 				/*
1109 				 * The TX mbuf chains can't be put into
1110 				 * the concatinate buffers. In this case,
1111 				 * we have to allocate a new contiguous mbuf
1112 				 * and copy data into it.
1113 				 *
1114 				 * Even in this case, the Ethernet header in
1115 				 * the TX mbuf might be unaligned and trailing
1116 				 * data might be word aligned, so put 2 byte
1117 				 * (MEC_ETHER_ALIGN) padding at the top of the
1118 				 * allocated mbuf and copy TX packets.
1119 				 * 6 bytes (MEC_ALIGN_BYTES - MEC_ETHER_ALIGN)
1120 				 * at the top of the new mbuf won't be uint64_t
1121 				 * alignd, but we have to put some data into
1122 				 * txdesc buffer anyway even if the buffer
1123 				 * is uint64_t aligned.
1124 				 */
1125 				DPRINTF(MEC_DEBUG_START | MEC_DEBUG_TXSEGS,
1126 				    ("%s: re-allocating mbuf\n", __func__));
1127 
1128 				MGETHDR(m, M_DONTWAIT, MT_DATA);
1129 				if (m == NULL) {
1130 					printf("%s: unable to allocate "
1131 					    "TX mbuf\n",
1132 					    device_xname(sc->sc_dev));
1133 					break;
1134 				}
1135 				if (len > (MHLEN - MEC_ETHER_ALIGN)) {
1136 					MCLGET(m, M_DONTWAIT);
1137 					if ((m->m_flags & M_EXT) == 0) {
1138 						printf("%s: unable to allocate "
1139 						    "TX cluster\n",
1140 						    device_xname(sc->sc_dev));
1141 						m_freem(m);
1142 						break;
1143 					}
1144 				}
1145 				m->m_data += MEC_ETHER_ALIGN;
1146 
1147 				/*
1148 				 * Copy whole data (including unaligned part)
1149 				 * for following bpf_mtap().
1150 				 */
1151 				m_copydata(m0, 0, len, mtod(m, void *));
1152 				m->m_pkthdr.len = m->m_len = len;
1153 				error = bus_dmamap_load_mbuf(sc->sc_dmat,
1154 				    dmamap, m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1155 				if (dmamap->dm_nsegs > 1) {
1156 					/* should not happen, but for sanity */
1157 					bus_dmamap_unload(sc->sc_dmat, dmamap);
1158 					error = -1;
1159 				}
1160 				if (error != 0) {
1161 					printf("%s: unable to load TX buffer, "
1162 					    "error = %d\n",
1163 					    device_xname(sc->sc_dev), error);
1164 					m_freem(m);
1165 					break;
1166 				}
1167 				/*
1168 				 * Only the first segment should be put into
1169 				 * the concatinate pointer in this case.
1170 				 */
1171 				pseg = 0;
1172 				nptr = 1;
1173 
1174 				/*
1175 				 * Set length of unaligned part which will be
1176 				 * copied into txdesc buffer.
1177 				 */
1178 				buflen = MEC_TXD_ALIGN - MEC_ETHER_ALIGN;
1179 				bufoff = MEC_TXD_BUFSTART(buflen);
1180 				resid = buflen;
1181 #ifdef MEC_EVENT_COUNTERS
1182 				MEC_EVCNT_INCR(&sc->sc_ev_txmbuf);
1183 				if (len <= 160)
1184 					MEC_EVCNT_INCR(&sc->sc_ev_txmbufa);
1185 				else if (len <= 256)
1186 					MEC_EVCNT_INCR(&sc->sc_ev_txmbufb);
1187 				else if (len <= 512)
1188 					MEC_EVCNT_INCR(&sc->sc_ev_txmbufc);
1189 				else if (len <= 1024)
1190 					MEC_EVCNT_INCR(&sc->sc_ev_txmbufd);
1191 				else
1192 					MEC_EVCNT_INCR(&sc->sc_ev_txmbufe);
1193 #endif
1194 			}
1195 #ifdef MEC_EVENT_COUNTERS
1196 			else {
1197 				MEC_EVCNT_INCR(&sc->sc_ev_txptrs);
1198 				if (nptr == 1) {
1199 					MEC_EVCNT_INCR(&sc->sc_ev_txptr1);
1200 					if (len <= 160)
1201 						MEC_EVCNT_INCR(
1202 						    &sc->sc_ev_txptr1a);
1203 					else if (len <= 256)
1204 						MEC_EVCNT_INCR(
1205 						    &sc->sc_ev_txptr1b);
1206 					else if (len <= 512)
1207 						MEC_EVCNT_INCR(
1208 						    &sc->sc_ev_txptr1c);
1209 					else if (len <= 1024)
1210 						MEC_EVCNT_INCR(
1211 						    &sc->sc_ev_txptr1d);
1212 					else
1213 						MEC_EVCNT_INCR(
1214 						    &sc->sc_ev_txptr1e);
1215 				} else if (nptr == 2) {
1216 					MEC_EVCNT_INCR(&sc->sc_ev_txptr2);
1217 					if (len <= 160)
1218 						MEC_EVCNT_INCR(
1219 						    &sc->sc_ev_txptr2a);
1220 					else if (len <= 256)
1221 						MEC_EVCNT_INCR(
1222 						    &sc->sc_ev_txptr2b);
1223 					else if (len <= 512)
1224 						MEC_EVCNT_INCR(
1225 						    &sc->sc_ev_txptr2c);
1226 					else if (len <= 1024)
1227 						MEC_EVCNT_INCR(
1228 						    &sc->sc_ev_txptr2d);
1229 					else
1230 						MEC_EVCNT_INCR(
1231 						    &sc->sc_ev_txptr2e);
1232 				} else if (nptr == 3) {
1233 					MEC_EVCNT_INCR(&sc->sc_ev_txptr3);
1234 					if (len <= 160)
1235 						MEC_EVCNT_INCR(
1236 						    &sc->sc_ev_txptr3a);
1237 					else if (len <= 256)
1238 						MEC_EVCNT_INCR(
1239 						    &sc->sc_ev_txptr3b);
1240 					else if (len <= 512)
1241 						MEC_EVCNT_INCR(
1242 						    &sc->sc_ev_txptr3c);
1243 					else if (len <= 1024)
1244 						MEC_EVCNT_INCR(
1245 						    &sc->sc_ev_txptr3d);
1246 					else
1247 						MEC_EVCNT_INCR(
1248 						    &sc->sc_ev_txptr3e);
1249 				}
1250 				if (pseg == 0)
1251 					MEC_EVCNT_INCR(&sc->sc_ev_txptrc0);
1252 				else if (pseg == 1)
1253 					MEC_EVCNT_INCR(&sc->sc_ev_txptrc1);
1254 				else if (pseg == 2)
1255 					MEC_EVCNT_INCR(&sc->sc_ev_txptrc2);
1256 				else if (pseg == 3)
1257 					MEC_EVCNT_INCR(&sc->sc_ev_txptrc3);
1258 				else if (pseg == 4)
1259 					MEC_EVCNT_INCR(&sc->sc_ev_txptrc4);
1260 				else if (pseg == 5)
1261 					MEC_EVCNT_INCR(&sc->sc_ev_txptrc5);
1262 				else
1263 					MEC_EVCNT_INCR(&sc->sc_ev_txptrc6);
1264 				if (buflen <= 8)
1265 					MEC_EVCNT_INCR(&sc->sc_ev_txptrh0);
1266 				else if (buflen <= 16)
1267 					MEC_EVCNT_INCR(&sc->sc_ev_txptrh1);
1268 				else if (buflen <= 32)
1269 					MEC_EVCNT_INCR(&sc->sc_ev_txptrh2);
1270 				else if (buflen <= 64)
1271 					MEC_EVCNT_INCR(&sc->sc_ev_txptrh3);
1272 				else if (buflen <= 80)
1273 					MEC_EVCNT_INCR(&sc->sc_ev_txptrh4);
1274 				else
1275 					MEC_EVCNT_INCR(&sc->sc_ev_txptrh5);
1276 			}
1277 #endif
1278 			m_copydata(m0, 0, buflen, txd->txd_buf + bufoff);
1279 
1280 			IFQ_DEQUEUE(&ifp->if_snd, m0);
1281 			if (m != NULL) {
1282 				m_freem(m0);
1283 				m0 = m;
1284 			}
1285 
1286 			/*
1287 			 * sync the DMA map for TX mbuf
1288 			 */
1289 			bus_dmamap_sync(sc->sc_dmat, dmamap, buflen,
1290 			    len - buflen, BUS_DMASYNC_PREWRITE);
1291 		}
1292 
1293 		/*
1294 		 * Pass packet to bpf if there is a listener.
1295 		 */
1296 		bpf_mtap(ifp, m0, BPF_D_OUT);
1297 		MEC_EVCNT_INCR(&sc->sc_ev_txpkts);
1298 
1299 		/*
1300 		 * setup the transmit descriptor.
1301 		 */
1302 		txdcmd = TXCMD_BUFSTART(MEC_TXDESCSIZE - buflen) | (len - 1);
1303 
1304 		/*
1305 		 * Set MEC_TXCMD_TXINT every MEC_NTXDESC_INTR packets
1306 		 * if more than half txdescs have been queued
1307 		 * because TX_EMPTY interrupts will rarely happen
1308 		 * if TX queue is so stacked.
1309 		 */
1310 		if (sc->sc_txpending > (MEC_NTXDESC / 2) &&
1311 		    (nexttx & (MEC_NTXDESC_INTR - 1)) == 0)
1312 			txdcmd |= MEC_TXCMD_TXINT;
1313 
1314 		if ((txs->txs_flags & MEC_TXS_TXDPTR) != 0) {
1315 			bus_dma_segment_t *segs = dmamap->dm_segs;
1316 
1317 			DPRINTF(MEC_DEBUG_TXSEGS,
1318 			    ("%s: nsegs = %d, pseg = %d, nptr = %d\n",
1319 			    __func__, dmamap->dm_nsegs, pseg, nptr));
1320 
1321 			switch (nptr) {
1322 			case 3:
1323 				KASSERT((segs[pseg + 2].ds_addr &
1324 				    MEC_TXD_ALIGNMASK) == 0);
1325 				txdcmd |= MEC_TXCMD_PTR3;
1326 				txd->txd_ptr[2] =
1327 				    TXPTR_LEN(segs[pseg + 2].ds_len - 1) |
1328 				    segs[pseg + 2].ds_addr;
1329 				/* FALLTHROUGH */
1330 			case 2:
1331 				KASSERT((segs[pseg + 1].ds_addr &
1332 				    MEC_TXD_ALIGNMASK) == 0);
1333 				txdcmd |= MEC_TXCMD_PTR2;
1334 				txd->txd_ptr[1] =
1335 				    TXPTR_LEN(segs[pseg + 1].ds_len - 1) |
1336 				    segs[pseg + 1].ds_addr;
1337 				/* FALLTHROUGH */
1338 			case 1:
1339 				txdcmd |= MEC_TXCMD_PTR1;
1340 				txd->txd_ptr[0] =
1341 				    TXPTR_LEN(segs[pseg].ds_len - resid - 1) |
1342 				    (segs[pseg].ds_addr + resid);
1343 				break;
1344 			default:
1345 				panic("%s: impossible nptr in %s",
1346 				    device_xname(sc->sc_dev), __func__);
1347 				/* NOTREACHED */
1348 			}
1349 			/*
1350 			 * Store a pointer to the packet so we can
1351 			 * free it later.
1352 			 */
1353 			txs->txs_mbuf = m0;
1354 		} else {
1355 			/*
1356 			 * In this case all data are copied to buffer in txdesc,
1357 			 * we can free TX mbuf here.
1358 			 */
1359 			m_freem(m0);
1360 		}
1361 		txd->txd_cmd = txdcmd;
1362 
1363 		DPRINTF(MEC_DEBUG_START,
1364 		    ("%s: txd_cmd    = 0x%016llx\n",
1365 		    __func__, txd->txd_cmd));
1366 		DPRINTF(MEC_DEBUG_START,
1367 		    ("%s: txd_ptr[0] = 0x%016llx\n",
1368 		    __func__, txd->txd_ptr[0]));
1369 		DPRINTF(MEC_DEBUG_START,
1370 		    ("%s: txd_ptr[1] = 0x%016llx\n",
1371 		    __func__, txd->txd_ptr[1]));
1372 		DPRINTF(MEC_DEBUG_START,
1373 		    ("%s: txd_ptr[2] = 0x%016llx\n",
1374 		    __func__, txd->txd_ptr[2]));
1375 		DPRINTF(MEC_DEBUG_START,
1376 		    ("%s: len = %d (0x%04x), buflen = %d (0x%02x)\n",
1377 		    __func__, len, len, buflen, buflen));
1378 
1379 		/* sync TX descriptor */
1380 		MEC_TXDESCSYNC(sc, nexttx,
1381 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1382 
1383 		/* start TX */
1384 		bus_space_write_8(st, sh, MEC_TX_RING_PTR, MEC_NEXTTX(nexttx));
1385 
1386 		/* advance the TX pointer. */
1387 		sc->sc_txpending++;
1388 		sc->sc_txlast = nexttx;
1389 	}
1390 
1391 	if (sc->sc_txpending == MEC_NTXDESC - 1) {
1392 		/* No more slots. */
1393 		MEC_EVCNT_INCR(&sc->sc_ev_txdstall);
1394 	}
1395 
1396 	if (sc->sc_txpending != opending) {
1397 		/*
1398 		 * If the transmitter was idle,
1399 		 * reset the txdirty pointer and re-enable TX interrupt.
1400 		 */
1401 		if (opending == 0) {
1402 			sc->sc_txdirty = firsttx;
1403 			bus_space_write_8(st, sh, MEC_TX_ALIAS,
1404 			    MEC_TX_ALIAS_INT_ENABLE);
1405 		}
1406 
1407 		/* Set a watchdog timer in case the chip flakes out. */
1408 		ifp->if_timer = 5;
1409 	}
1410 }
1411 
1412 static void
mec_stop(struct ifnet * ifp,int disable)1413 mec_stop(struct ifnet *ifp, int disable)
1414 {
1415 	struct mec_softc *sc = ifp->if_softc;
1416 	struct mec_txsoft *txs;
1417 	int i;
1418 
1419 	DPRINTF(MEC_DEBUG_STOP, ("%s\n", __func__));
1420 
1421 	ifp->if_timer = 0;
1422 	ifp->if_flags &= ~IFF_RUNNING;
1423 
1424 	callout_stop(&sc->sc_tick_ch);
1425 	mii_down(&sc->sc_mii);
1426 
1427 	/* release any TX buffers */
1428 	for (i = 0; i < MEC_NTXDESC; i++) {
1429 		txs = &sc->sc_txsoft[i];
1430 		if ((txs->txs_flags & MEC_TXS_TXDPTR) != 0) {
1431 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1432 			m_freem(txs->txs_mbuf);
1433 			txs->txs_mbuf = NULL;
1434 		}
1435 	}
1436 }
1437 
1438 static int
mec_ioctl(struct ifnet * ifp,u_long cmd,void * data)1439 mec_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1440 {
1441 	int s, error;
1442 
1443 	s = splnet();
1444 
1445 	error = ether_ioctl(ifp, cmd, data);
1446 	if (error == ENETRESET) {
1447 		/*
1448 		 * Multicast list has changed; set the hardware filter
1449 		 * accordingly.
1450 		 */
1451 		if (ifp->if_flags & IFF_RUNNING)
1452 			error = mec_init(ifp);
1453 		else
1454 			error = 0;
1455 	}
1456 
1457 	/* Try to get more packets going. */
1458 	mec_start(ifp);
1459 
1460 	splx(s);
1461 	return error;
1462 }
1463 
1464 static void
mec_watchdog(struct ifnet * ifp)1465 mec_watchdog(struct ifnet *ifp)
1466 {
1467 	struct mec_softc *sc = ifp->if_softc;
1468 
1469 	printf("%s: device timeout\n", device_xname(sc->sc_dev));
1470 	if_statinc(ifp, if_oerrors);
1471 
1472 	mec_init(ifp);
1473 }
1474 
1475 static void
mec_tick(void * arg)1476 mec_tick(void *arg)
1477 {
1478 	struct mec_softc *sc = arg;
1479 	int s;
1480 
1481 	s = splnet();
1482 	mii_tick(&sc->sc_mii);
1483 	splx(s);
1484 
1485 	callout_reset(&sc->sc_tick_ch, hz, mec_tick, sc);
1486 }
1487 
1488 static void
mec_setfilter(struct mec_softc * sc)1489 mec_setfilter(struct mec_softc *sc)
1490 {
1491 	struct ethercom *ec = &sc->sc_ethercom;
1492 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1493 	struct ether_multi *enm;
1494 	struct ether_multistep step;
1495 	bus_space_tag_t st = sc->sc_st;
1496 	bus_space_handle_t sh = sc->sc_sh;
1497 	uint64_t mchash;
1498 	uint32_t control, hash;
1499 	int mcnt;
1500 
1501 	control = bus_space_read_8(st, sh, MEC_MAC_CONTROL);
1502 	control &= ~MEC_MAC_FILTER_MASK;
1503 
1504 	if (ifp->if_flags & IFF_PROMISC) {
1505 		control |= MEC_MAC_FILTER_PROMISC;
1506 		bus_space_write_8(st, sh, MEC_MULTICAST, 0xffffffffffffffffULL);
1507 		bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1508 		return;
1509 	}
1510 
1511 	mcnt = 0;
1512 	mchash = 0;
1513 	ETHER_LOCK(ec);
1514 	ETHER_FIRST_MULTI(step, ec, enm);
1515 	while (enm != NULL) {
1516 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1517 			/* set allmulti for a range of multicast addresses */
1518 			control |= MEC_MAC_FILTER_ALLMULTI;
1519 			bus_space_write_8(st, sh, MEC_MULTICAST,
1520 			    0xffffffffffffffffULL);
1521 			bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1522 			ETHER_UNLOCK(ec);
1523 			return;
1524 		}
1525 
1526 #define mec_calchash(addr)	(ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
1527 
1528 		hash = mec_calchash(enm->enm_addrlo);
1529 		mchash |= 1 << hash;
1530 		mcnt++;
1531 		ETHER_NEXT_MULTI(step, enm);
1532 	}
1533 	ETHER_UNLOCK(ec);
1534 
1535 	ifp->if_flags &= ~IFF_ALLMULTI;
1536 
1537 	if (mcnt > 0)
1538 		control |= MEC_MAC_FILTER_MATCHMULTI;
1539 
1540 	bus_space_write_8(st, sh, MEC_MULTICAST, mchash);
1541 	bus_space_write_8(st, sh, MEC_MAC_CONTROL, control);
1542 }
1543 
1544 static int
mec_intr(void * arg)1545 mec_intr(void *arg)
1546 {
1547 	struct mec_softc *sc = arg;
1548 	bus_space_tag_t st = sc->sc_st;
1549 	bus_space_handle_t sh = sc->sc_sh;
1550 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1551 	uint32_t statreg, statack, txptr;
1552 	int handled, sent;
1553 
1554 	DPRINTF(MEC_DEBUG_INTR, ("%s: called\n", __func__));
1555 
1556 	handled = sent = 0;
1557 
1558 	for (;;) {
1559 		statreg = bus_space_read_8(st, sh, MEC_INT_STATUS);
1560 
1561 		DPRINTF(MEC_DEBUG_INTR,
1562 		    ("%s: INT_STAT = 0x%08x\n", __func__, statreg));
1563 
1564 		statack = statreg & MEC_INT_STATUS_MASK;
1565 		if (statack == 0)
1566 			break;
1567 		bus_space_write_8(st, sh, MEC_INT_STATUS, statack);
1568 
1569 		handled = 1;
1570 
1571 		if (statack &
1572 		    (MEC_INT_RX_THRESHOLD |
1573 		     MEC_INT_RX_FIFO_UNDERFLOW)) {
1574 			mec_rxintr(sc);
1575 		}
1576 
1577 		if (statack &
1578 		    (MEC_INT_TX_EMPTY |
1579 		     MEC_INT_TX_PACKET_SENT |
1580 		     MEC_INT_TX_ABORT)) {
1581 			txptr = (statreg & MEC_INT_TX_RING_BUFFER_ALIAS)
1582 			    >> MEC_INT_TX_RING_BUFFER_SHIFT;
1583 			mec_txintr(sc, txptr);
1584 			sent = 1;
1585 			if ((statack & MEC_INT_TX_EMPTY) != 0) {
1586 				/*
1587 				 * disable TX interrupt to stop
1588 				 * TX empty interrupt
1589 				 */
1590 				bus_space_write_8(st, sh, MEC_TX_ALIAS, 0);
1591 				DPRINTF(MEC_DEBUG_INTR,
1592 				    ("%s: disable TX_INT\n", __func__));
1593 			}
1594 #ifdef MEC_EVENT_COUNTERS
1595 			if ((statack & MEC_INT_TX_EMPTY) != 0)
1596 				MEC_EVCNT_INCR(&sc->sc_ev_txempty);
1597 			if ((statack & MEC_INT_TX_PACKET_SENT) != 0)
1598 				MEC_EVCNT_INCR(&sc->sc_ev_txsent);
1599 #endif
1600 		}
1601 
1602 		if (statack &
1603 		    (MEC_INT_TX_LINK_FAIL |
1604 		     MEC_INT_TX_MEM_ERROR |
1605 		     MEC_INT_TX_ABORT |
1606 		     MEC_INT_RX_DMA_UNDERFLOW)) {
1607 			printf("%s: %s: interrupt status = 0x%08x\n",
1608 			    device_xname(sc->sc_dev), __func__, statreg);
1609 			mec_init(ifp);
1610 			break;
1611 		}
1612 	}
1613 
1614 	if (sent) {
1615 		/* try to get more packets going */
1616 		if_schedule_deferred_start(ifp);
1617 	}
1618 
1619 	if (handled)
1620 		rnd_add_uint32(&sc->sc_rnd_source, statreg);
1621 
1622 	return handled;
1623 }
1624 
1625 static void
mec_rxintr(struct mec_softc * sc)1626 mec_rxintr(struct mec_softc *sc)
1627 {
1628 	bus_space_tag_t st = sc->sc_st;
1629 	bus_space_handle_t sh = sc->sc_sh;
1630 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1631 	struct mbuf *m;
1632 	struct mec_rxdesc *rxd;
1633 	uint64_t rxstat;
1634 	u_int len;
1635 	int i;
1636 	uint32_t crc;
1637 
1638 	DPRINTF(MEC_DEBUG_RXINTR, ("%s: called\n", __func__));
1639 
1640 	for (i = sc->sc_rxptr;; i = MEC_NEXTRX(i)) {
1641 		rxd = &sc->sc_rxdesc[i];
1642 
1643 		MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_POSTREAD);
1644 		rxstat = rxd->rxd_stat;
1645 
1646 		DPRINTF(MEC_DEBUG_RXINTR,
1647 		    ("%s: rxstat = 0x%016llx, rxptr = %d\n",
1648 		    __func__, rxstat, i));
1649 		DPRINTF(MEC_DEBUG_RXINTR, ("%s: rxfifo = 0x%08x\n",
1650 		    __func__, (u_int)bus_space_read_8(st, sh, MEC_RX_FIFO)));
1651 
1652 		if ((rxstat & MEC_RXSTAT_RECEIVED) == 0) {
1653 			MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1654 			break;
1655 		}
1656 
1657 		len = rxstat & MEC_RXSTAT_LEN;
1658 
1659 		if (len < ETHER_MIN_LEN ||
1660 		    len > (MCLBYTES - MEC_ETHER_ALIGN)) {
1661 			/* invalid length packet; drop it. */
1662 			DPRINTF(MEC_DEBUG_RXINTR,
1663 			    ("%s: wrong packet\n", __func__));
1664  dropit:
1665 			if_statinc(ifp, if_ierrors);
1666 			rxd->rxd_stat = 0;
1667 			MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1668 			bus_space_write_8(st, sh, MEC_MCL_RX_FIFO,
1669 			    MEC_CDRXADDR(sc, i));
1670 			continue;
1671 		}
1672 
1673 		/*
1674 		 * If 802.1Q VLAN MTU is enabled, ignore the bad packet error.
1675 		 */
1676 		if ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) != 0)
1677 			rxstat &= ~MEC_RXSTAT_BADPACKET;
1678 
1679 		if (rxstat &
1680 		    (MEC_RXSTAT_BADPACKET |
1681 		     MEC_RXSTAT_LONGEVENT |
1682 		     MEC_RXSTAT_INVALID   |
1683 		     MEC_RXSTAT_CRCERROR  |
1684 		     MEC_RXSTAT_VIOLATION)) {
1685 			printf("%s: mec_rxintr: status = 0x%016"PRIx64"\n",
1686 			    device_xname(sc->sc_dev), rxstat);
1687 			goto dropit;
1688 		}
1689 
1690 		/*
1691 		 * The MEC includes the CRC with every packet.  Trim
1692 		 * it off here.
1693 		 */
1694 		len -= ETHER_CRC_LEN;
1695 
1696 		/*
1697 		 * now allocate an mbuf (and possibly a cluster) to hold
1698 		 * the received packet.
1699 		 */
1700 		MGETHDR(m, M_DONTWAIT, MT_DATA);
1701 		if (m == NULL) {
1702 			printf("%s: unable to allocate RX mbuf\n",
1703 			    device_xname(sc->sc_dev));
1704 			goto dropit;
1705 		}
1706 		if (len > (MHLEN - MEC_ETHER_ALIGN)) {
1707 			MCLGET(m, M_DONTWAIT);
1708 			if ((m->m_flags & M_EXT) == 0) {
1709 				printf("%s: unable to allocate RX cluster\n",
1710 				    device_xname(sc->sc_dev));
1711 				m_freem(m);
1712 				m = NULL;
1713 				goto dropit;
1714 			}
1715 		}
1716 
1717 		/*
1718 		 * Note MEC chip seems to insert 2 byte padding at the top of
1719 		 * RX buffer, but we copy whole buffer to avoid unaligned copy.
1720 		 */
1721 		MEC_RXBUFSYNC(sc, i, len + ETHER_CRC_LEN, BUS_DMASYNC_POSTREAD);
1722 		memcpy(mtod(m, void *), rxd->rxd_buf, MEC_ETHER_ALIGN + len);
1723 		crc = be32dec(rxd->rxd_buf + MEC_ETHER_ALIGN + len);
1724 		MEC_RXBUFSYNC(sc, i, ETHER_MAX_LEN, BUS_DMASYNC_PREREAD);
1725 		m->m_data += MEC_ETHER_ALIGN;
1726 
1727 		/* put RX buffer into FIFO again */
1728 		rxd->rxd_stat = 0;
1729 		MEC_RXSTATSYNC(sc, i, BUS_DMASYNC_PREREAD);
1730 		bus_space_write_8(st, sh, MEC_MCL_RX_FIFO, MEC_CDRXADDR(sc, i));
1731 
1732 		m_set_rcvif(m, ifp);
1733 		m->m_pkthdr.len = m->m_len = len;
1734 		if ((ifp->if_csum_flags_rx & (M_CSUM_TCPv4|M_CSUM_UDPv4)) != 0)
1735 			mec_rxcsum(sc, m, RXSTAT_CKSUM(rxstat), crc);
1736 
1737 		/* Pass it on. */
1738 		if_percpuq_enqueue(ifp->if_percpuq, m);
1739 	}
1740 
1741 	/* update RX pointer */
1742 	sc->sc_rxptr = i;
1743 }
1744 
1745 static void
mec_rxcsum(struct mec_softc * sc,struct mbuf * m,uint16_t rxcsum,uint32_t crc)1746 mec_rxcsum(struct mec_softc *sc, struct mbuf *m, uint16_t rxcsum, uint32_t crc)
1747 {
1748 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1749 	struct ether_header *eh;
1750 	struct ip *ip;
1751 	struct udphdr *uh;
1752 	u_int len, pktlen, hlen;
1753 	uint32_t csum_data, dsum;
1754 	int csum_flags;
1755 	const uint16_t *dp;
1756 
1757 	csum_data = 0;
1758 	csum_flags = 0;
1759 
1760 	len = m->m_len;
1761 	if (len < ETHER_HDR_LEN + sizeof(struct ip))
1762 		goto out;
1763 	pktlen = len - ETHER_HDR_LEN;
1764 	eh = mtod(m, struct ether_header *);
1765 	if (ntohs(eh->ether_type) != ETHERTYPE_IP)
1766 		goto out;
1767 	ip = (struct ip *)((uint8_t *)eh + ETHER_HDR_LEN);
1768 	if (ip->ip_v != IPVERSION)
1769 		goto out;
1770 
1771 	hlen = ip->ip_hl << 2;
1772 	if (hlen < sizeof(struct ip))
1773 		goto out;
1774 
1775 	/*
1776 	 * Bail if too short, has random trailing garbage, truncated,
1777 	 * fragment, or has ethernet pad.
1778 	 */
1779 	if (ntohs(ip->ip_len) < hlen ||
1780 	    ntohs(ip->ip_len) != pktlen ||
1781 	    (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK)) != 0)
1782 		goto out;
1783 
1784 	switch (ip->ip_p) {
1785 	case IPPROTO_TCP:
1786 		if ((ifp->if_csum_flags_rx & M_CSUM_TCPv4) == 0 ||
1787 		    pktlen < (hlen + sizeof(struct tcphdr)))
1788 			goto out;
1789 		csum_flags = M_CSUM_TCPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR;
1790 		break;
1791 	case IPPROTO_UDP:
1792 		if ((ifp->if_csum_flags_rx & M_CSUM_UDPv4) == 0 ||
1793 		    pktlen < (hlen + sizeof(struct udphdr)))
1794 			goto out;
1795 		uh = (struct udphdr *)((uint8_t *)ip + hlen);
1796 		if (uh->uh_sum == 0)
1797 			goto out;	/* no checksum */
1798 		csum_flags = M_CSUM_UDPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR;
1799 		break;
1800 	default:
1801 		goto out;
1802 	}
1803 
1804 	/*
1805 	 * The computed checksum includes Ethernet header, IP headers,
1806 	 * and CRC, so we have to deduct them.
1807 	 * Note IP header cksum should be 0xffff so we don't have to
1808 	 * dedecut them.
1809 	 */
1810 	dsum = 0;
1811 
1812 	/* deduct Ethernet header */
1813 	dp = (const uint16_t *)eh;
1814 	for (hlen = 0; hlen < (ETHER_HDR_LEN / sizeof(uint16_t)); hlen++)
1815 		dsum += ntohs(*dp++);
1816 
1817 	/* deduct CRC */
1818 	if (len & 1) {
1819 		dsum += (crc >> 24) & 0x00ff;
1820 		dsum += (crc >>  8) & 0xffff;
1821 		dsum += (crc <<  8) & 0xff00;
1822 	} else {
1823 		dsum += (crc >> 16) & 0xffff;
1824 		dsum += (crc >>  0) & 0xffff;
1825 	}
1826 	while (dsum >> 16)
1827 		dsum = (dsum >> 16) + (dsum & 0xffff);
1828 
1829 	csum_data = rxcsum;
1830 	csum_data += (uint16_t)~dsum;
1831 
1832 	while (csum_data >> 16)
1833 		csum_data = (csum_data >> 16) + (csum_data & 0xffff);
1834 
1835  out:
1836 	m->m_pkthdr.csum_flags = csum_flags;
1837 	m->m_pkthdr.csum_data = csum_data;
1838 }
1839 
1840 static void
mec_txintr(struct mec_softc * sc,uint32_t txptr)1841 mec_txintr(struct mec_softc *sc, uint32_t txptr)
1842 {
1843 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1844 	struct mec_txdesc *txd;
1845 	struct mec_txsoft *txs;
1846 	bus_dmamap_t dmamap;
1847 	uint64_t txstat;
1848 	int i;
1849 	u_int col;
1850 
1851 	DPRINTF(MEC_DEBUG_TXINTR, ("%s: called\n", __func__));
1852 
1853 	for (i = sc->sc_txdirty; i != txptr && sc->sc_txpending != 0;
1854 	    i = MEC_NEXTTX(i), sc->sc_txpending--) {
1855 		txd = &sc->sc_txdesc[i];
1856 
1857 		MEC_TXCMDSYNC(sc, i,
1858 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1859 
1860 		txstat = txd->txd_stat;
1861 		DPRINTF(MEC_DEBUG_TXINTR,
1862 		    ("%s: dirty = %d, txstat = 0x%016llx\n",
1863 		    __func__, i, txstat));
1864 		if ((txstat & MEC_TXSTAT_SENT) == 0) {
1865 			MEC_TXCMDSYNC(sc, i, BUS_DMASYNC_PREREAD);
1866 			break;
1867 		}
1868 
1869 		txs = &sc->sc_txsoft[i];
1870 		if ((txs->txs_flags & MEC_TXS_TXDPTR) != 0) {
1871 			dmamap = txs->txs_dmamap;
1872 			bus_dmamap_sync(sc->sc_dmat, dmamap, 0,
1873 			    dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1874 			bus_dmamap_unload(sc->sc_dmat, dmamap);
1875 			m_freem(txs->txs_mbuf);
1876 			txs->txs_mbuf = NULL;
1877 		}
1878 
1879 		col = (txstat & MEC_TXSTAT_COLCNT) >> MEC_TXSTAT_COLCNT_SHIFT;
1880 		if (col)
1881 			if_statadd(ifp, if_collisions, col);
1882 
1883 		if ((txstat & MEC_TXSTAT_SUCCESS) == 0) {
1884 			printf("%s: TX error: txstat = 0x%016"PRIx64"\n",
1885 			    device_xname(sc->sc_dev), txstat);
1886 			if_statinc(ifp, if_oerrors);
1887 		} else
1888 			if_statinc(ifp, if_opackets);
1889 	}
1890 
1891 	/* update the dirty TX buffer pointer */
1892 	sc->sc_txdirty = i;
1893 	DPRINTF(MEC_DEBUG_INTR,
1894 	    ("%s: sc_txdirty = %2d, sc_txpending = %2d\n",
1895 	    __func__, sc->sc_txdirty, sc->sc_txpending));
1896 
1897 	/* cancel the watchdog timer if there are no pending TX packets */
1898 	if (sc->sc_txpending == 0)
1899 		ifp->if_timer = 0;
1900 }
1901 
1902 static bool
mec_shutdown(device_t self,int howto)1903 mec_shutdown(device_t self, int howto)
1904 {
1905 	struct mec_softc *sc = device_private(self);
1906 
1907 	mec_stop(&sc->sc_ethercom.ec_if, 1);
1908 	/* make sure to stop DMA etc. */
1909 	mec_reset(sc);
1910 
1911 	return true;
1912 }
1913