xref: /netbsd-src/sys/arch/arm/at91/at91emac.c (revision f3cfa6f6ce31685c6c4a758bc430e69eb99f50a4)
1 /*	$NetBSD: at91emac.c,v 1.29 2019/05/28 07:41:46 msaitoh Exp $	*/
2 
3 /*
4  * Copyright (c) 2007 Embedtronics Oy
5  * All rights reserved.
6  *
7  * Based on arch/arm/ep93xx/epe.c
8  *
9  * Copyright (c) 2004 Jesse Off
10  * All rights reserved.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: at91emac.c,v 1.29 2019/05/28 07:41:46 msaitoh Exp $");
36 
37 #include <sys/types.h>
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/ioctl.h>
41 #include <sys/kernel.h>
42 #include <sys/proc.h>
43 #include <sys/malloc.h>
44 #include <sys/time.h>
45 #include <sys/device.h>
46 #include <uvm/uvm_extern.h>
47 
48 #include <sys/bus.h>
49 #include <machine/intr.h>
50 
51 #include <arm/cpufunc.h>
52 
53 #include <net/if.h>
54 #include <net/if_dl.h>
55 #include <net/if_types.h>
56 #include <net/if_media.h>
57 #include <net/if_ether.h>
58 #include <net/bpf.h>
59 
60 #include <dev/mii/mii.h>
61 #include <dev/mii/miivar.h>
62 
63 #ifdef INET
64 #include <netinet/in.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/in_var.h>
67 #include <netinet/ip.h>
68 #include <netinet/if_inarp.h>
69 #endif
70 
71 #include <arm/at91/at91var.h>
72 #include <arm/at91/at91emacreg.h>
73 #include <arm/at91/at91emacvar.h>
74 
75 #define DEFAULT_MDCDIV	32
76 
77 #ifndef EMAC_FAST
78 #define EMAC_FAST
79 #endif
80 
81 #ifndef EMAC_FAST
82 #define EMAC_READ(x) \
83 	bus_space_read_4(sc->sc_iot, sc->sc_ioh, (EPE_ ## x))
84 #define EMAC_WRITE(x, y) \
85 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, (EPE_ ## x), (y))
86 #else
87 #define EMAC_READ(x) ETHREG(x)
88 #define EMAC_WRITE(x, y) ETHREG(x) = (y)
89 #endif /* ! EMAC_FAST */
90 
91 static int	emac_match(device_t, cfdata_t, void *);
92 static void	emac_attach(device_t, device_t, void *);
93 static void	emac_init(struct emac_softc *);
94 static int	emac_intr(void* arg);
95 static int	emac_gctx(struct emac_softc *);
96 static int	emac_mediachange(struct ifnet *);
97 static void	emac_mediastatus(struct ifnet *, struct ifmediareq *);
98 int		emac_mii_readreg (device_t, int, int, uint16_t *);
99 int		emac_mii_writereg (device_t, int, int, uint16_t);
100 void		emac_statchg (struct ifnet *);
101 void		emac_tick (void *);
102 static int	emac_ifioctl (struct ifnet *, u_long, void *);
103 static void	emac_ifstart (struct ifnet *);
104 static void	emac_ifwatchdog (struct ifnet *);
105 static int	emac_ifinit (struct ifnet *);
106 static void	emac_ifstop (struct ifnet *, int);
107 static void	emac_setaddr (struct ifnet *);
108 
109 CFATTACH_DECL_NEW(at91emac, sizeof(struct emac_softc),
110     emac_match, emac_attach, NULL, NULL);
111 
112 #ifdef	EMAC_DEBUG
113 int emac_debug = EMAC_DEBUG;
114 #define	DPRINTFN(n, fmt)	if (emac_debug >= (n)) printf fmt
115 #else
116 #define	DPRINTFN(n, fmt)
117 #endif
118 
119 static int
120 emac_match(device_t parent, cfdata_t match, void *aux)
121 {
122 	if (strcmp(match->cf_name, "at91emac") == 0)
123 		return 2;
124 	return 0;
125 }
126 
127 static void
128 emac_attach(device_t parent, device_t self, void *aux)
129 {
130 	struct emac_softc		*sc = device_private(self);
131 	struct at91bus_attach_args	*sa = aux;
132 	prop_data_t			enaddr;
133 	uint32_t			u;
134 
135 	printf("\n");
136 	sc->sc_dev = self;
137 	sc->sc_iot = sa->sa_iot;
138 	sc->sc_pid = sa->sa_pid;
139 	sc->sc_dmat = sa->sa_dmat;
140 
141 	if (bus_space_map(sa->sa_iot, sa->sa_addr, sa->sa_size, 0, &sc->sc_ioh))
142 		panic("%s: Cannot map registers", device_xname(self));
143 
144 	/* enable peripheral clock */
145 	at91_peripheral_clock(sc->sc_pid, 1);
146 
147 	/* configure emac: */
148 	EMAC_WRITE(ETH_CTL, 0);			// disable everything
149 	EMAC_WRITE(ETH_IDR, -1);		// disable interrupts
150 	EMAC_WRITE(ETH_RBQP, 0);		// clear receive
151 	EMAC_WRITE(ETH_CFG,
152 	    ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
153 	EMAC_WRITE(ETH_TCR, 0);			// send nothing
154 	//(void)EMAC_READ(ETH_ISR);
155 	u = EMAC_READ(ETH_TSR);
156 	EMAC_WRITE(ETH_TSR, (u & (ETH_TSR_UND | ETH_TSR_COMP | ETH_TSR_BNQ
157 				  | ETH_TSR_IDLE | ETH_TSR_RLE
158 				  | ETH_TSR_COL | ETH_TSR_OVR)));
159 	u = EMAC_READ(ETH_RSR);
160 	EMAC_WRITE(ETH_RSR, (u & (ETH_RSR_OVR | ETH_RSR_REC | ETH_RSR_BNA)));
161 
162 	/* Fetch the Ethernet address from property if set. */
163 	enaddr = prop_dictionary_get(device_properties(self), "mac-address");
164 
165 	if (enaddr != NULL) {
166 		KASSERT(prop_object_type(enaddr) == PROP_TYPE_DATA);
167 		KASSERT(prop_data_size(enaddr) == ETHER_ADDR_LEN);
168 		memcpy(sc->sc_enaddr, prop_data_data_nocopy(enaddr),
169 		       ETHER_ADDR_LEN);
170 	} else {
171 		static const uint8_t hardcoded[ETHER_ADDR_LEN] = {
172 		  0x00, 0x0d, 0x10, 0x81, 0x0c, 0x94
173 		};
174 		memcpy(sc->sc_enaddr, hardcoded, ETHER_ADDR_LEN);
175 	}
176 
177 	at91_intr_establish(sc->sc_pid, IPL_NET, INTR_HIGH_LEVEL, emac_intr,
178 	    sc);
179 	emac_init(sc);
180 }
181 
182 static int
183 emac_gctx(struct emac_softc *sc)
184 {
185 	struct ifnet * ifp = &sc->sc_ec.ec_if;
186 	uint32_t tsr;
187 
188 	tsr = EMAC_READ(ETH_TSR);
189 	if (!(tsr & ETH_TSR_BNQ)) {
190 		// no space left
191 		return 0;
192 	}
193 
194 	// free sent frames
195 	while (sc->txqc > (tsr & ETH_TSR_IDLE ? 0 : 1)) {
196 		int i = sc->txqi % TX_QLEN;
197 		bus_dmamap_sync(sc->sc_dmat, sc->txq[i].m_dmamap, 0,
198 		    sc->txq[i].m->m_pkthdr.len, BUS_DMASYNC_POSTWRITE);
199 		bus_dmamap_unload(sc->sc_dmat, sc->txq[i].m_dmamap);
200 		m_freem(sc->txq[i].m);
201 		DPRINTFN(2,("%s: freed idx #%i mbuf %p (txqc=%i)\n",
202 			__FUNCTION__, i, sc->txq[i].m, sc->txqc));
203 		sc->txq[i].m = NULL;
204 		sc->txqi = (i + 1) % TX_QLEN;
205 		sc->txqc--;
206 	}
207 
208 	// mark we're free
209 	if (ifp->if_flags & IFF_OACTIVE) {
210 		ifp->if_flags &= ~IFF_OACTIVE;
211 		/* Disable transmit-buffer-free interrupt */
212 		/*EMAC_WRITE(ETH_IDR, ETH_ISR_TBRE);*/
213 	}
214 
215 	return 1;
216 }
217 
218 static int
219 emac_intr(void *arg)
220 {
221 	struct emac_softc *sc = (struct emac_softc *)arg;
222 	struct ifnet * ifp = &sc->sc_ec.ec_if;
223 	uint32_t imr, isr, ctl;
224 	int bi;
225 
226 	imr = ~EMAC_READ(ETH_IMR);
227 	if (!(imr & (ETH_ISR_RCOM | ETH_ISR_TBRE | ETH_ISR_TIDLE
228 	    | ETH_ISR_RBNA | ETH_ISR_ROVR))) {
229 		// interrupt not enabled, can't be us
230 		return 0;
231 	}
232 
233 	isr = EMAC_READ(ETH_ISR) & imr;
234 #ifdef EMAC_DEBUG
235 	uint32_t rsr =
236 #endif
237 	EMAC_READ(ETH_RSR);		// get receive status register
238 
239 	DPRINTFN(2, ("%s: isr=0x%08X rsr=0x%08X imr=0x%08X\n", __FUNCTION__,
240 		isr, rsr, imr));
241 
242 	if (isr & ETH_ISR_RBNA) {		// out of receive buffers
243 		EMAC_WRITE(ETH_RSR, ETH_RSR_BNA);	// clear interrupt
244 		ctl = EMAC_READ(ETH_CTL);		// get current control register value
245 		EMAC_WRITE(ETH_CTL, ctl & ~ETH_CTL_RE);	// disable receiver
246 		EMAC_WRITE(ETH_RSR, ETH_RSR_BNA);	// clear BNA bit
247 		EMAC_WRITE(ETH_CTL, ctl |  ETH_CTL_RE);	// re-enable receiver
248 		ifp->if_ierrors++;
249 		ifp->if_ipackets++;
250 		DPRINTFN(1,("%s: out of receive buffers\n", __FUNCTION__));
251 	}
252 	if (isr & ETH_ISR_ROVR) {
253 		EMAC_WRITE(ETH_RSR, ETH_RSR_OVR);	// clear interrupt
254 		ifp->if_ierrors++;
255 		ifp->if_ipackets++;
256 		DPRINTFN(1,("%s: receive overrun\n", __FUNCTION__));
257 	}
258 
259 	if (isr & ETH_ISR_RCOM) {			// packet has been received!
260 		uint32_t nfo;
261 		// @@@ if memory is NOT coherent, then we're in trouble @@@@
262 //		bus_dmamap_sync(sc->sc_dmat, sc->rbqpage_dmamap, 0, sc->rbqlen, BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
263 //		printf("## RDSC[%i].ADDR=0x%08X\n", sc->rxqi % RX_QLEN, sc->RDSC[sc->rxqi % RX_QLEN].Addr);
264 		DPRINTFN(2,("#2 RDSC[%i].INFO=0x%08X\n", sc->rxqi % RX_QLEN,
265 			sc->RDSC[sc->rxqi % RX_QLEN].Info));
266 		while (sc->RDSC[(bi = sc->rxqi % RX_QLEN)].Addr & ETH_RDSC_F_USED) {
267 			int fl;
268 			struct mbuf *m;
269 
270 			nfo = sc->RDSC[bi].Info;
271 			fl = (nfo & ETH_RDSC_I_LEN) - 4;
272 			DPRINTFN(2,("## nfo=0x%08X\n", nfo));
273 
274 			MGETHDR(m, M_DONTWAIT, MT_DATA);
275 			if (m != NULL) MCLGET(m, M_DONTWAIT);
276 			if (m != NULL && (m->m_flags & M_EXT)) {
277 				bus_dmamap_sync(sc->sc_dmat,
278 				    sc->rxq[bi].m_dmamap, 0,
279 				    MCLBYTES, BUS_DMASYNC_POSTREAD);
280 				bus_dmamap_unload(sc->sc_dmat,
281 					sc->rxq[bi].m_dmamap);
282 				m_set_rcvif(sc->rxq[bi].m, ifp);
283 				sc->rxq[bi].m->m_pkthdr.len =
284 					sc->rxq[bi].m->m_len = fl;
285 				DPRINTFN(2,("received %u bytes packet\n", fl));
286 				if_percpuq_enqueue(ifp->if_percpuq, sc->rxq[bi].m);
287 				if (mtod(m, intptr_t) & 3) {
288 					m_adj(m, mtod(m, intptr_t) & 3);
289 				}
290 				sc->rxq[bi].m = m;
291 				bus_dmamap_load(sc->sc_dmat,
292 					sc->rxq[bi].m_dmamap,
293 					m->m_ext.ext_buf, MCLBYTES,
294 					NULL, BUS_DMA_NOWAIT);
295 				bus_dmamap_sync(sc->sc_dmat,
296 				    sc->rxq[bi].m_dmamap, 0,
297 				    MCLBYTES, BUS_DMASYNC_PREREAD);
298 				sc->RDSC[bi].Info = 0;
299 				sc->RDSC[bi].Addr =
300 					sc->rxq[bi].m_dmamap->dm_segs[0].ds_addr
301 					| (bi == (RX_QLEN-1) ? ETH_RDSC_F_WRAP : 0);
302 			} else {
303 				/* Drop packets until we can get replacement
304 				 * empty mbufs for the RXDQ.
305 				 */
306 				if (m != NULL) {
307 					m_freem(m);
308 				}
309 				ifp->if_ierrors++;
310 			}
311 			sc->rxqi++;
312 		}
313 //		bus_dmamap_sync(sc->sc_dmat, sc->rbqpage_dmamap, 0, sc->rbqlen, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
314 	}
315 
316 	if (emac_gctx(sc) > 0)
317 		if_schedule_deferred_start(ifp);
318 #if 0 // reloop
319 	irq = EMAC_READ(IntStsC);
320 	if ((irq & (IntSts_RxSQ | IntSts_ECI)) != 0)
321 		goto begin;
322 #endif
323 
324 	return (1);
325 }
326 
327 
328 static void
329 emac_init(struct emac_softc *sc)
330 {
331 	bus_dma_segment_t segs;
332 	void *addr;
333 	int rsegs, err, i;
334 	struct ifnet * ifp = &sc->sc_ec.ec_if;
335 	struct mii_data * const mii = &sc->sc_mii;
336 	uint32_t u;
337 #if 0
338 	int mdcdiv = DEFAULT_MDCDIV;
339 #endif
340 
341 	callout_init(&sc->emac_tick_ch, 0);
342 
343 	// ok...
344 	EMAC_WRITE(ETH_CTL, ETH_CTL_MPE);	// disable everything
345 	EMAC_WRITE(ETH_IDR, -1);		// disable interrupts
346 	EMAC_WRITE(ETH_RBQP, 0);		// clear receive
347 	EMAC_WRITE(ETH_CFG,
348 	    ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
349 	EMAC_WRITE(ETH_TCR, 0);			// send nothing
350 //	(void)EMAC_READ(ETH_ISR);
351 	u = EMAC_READ(ETH_TSR);
352 	EMAC_WRITE(ETH_TSR, (u & (ETH_TSR_UND | ETH_TSR_COMP | ETH_TSR_BNQ
353 				  | ETH_TSR_IDLE | ETH_TSR_RLE
354 				  | ETH_TSR_COL | ETH_TSR_OVR)));
355 	u = EMAC_READ(ETH_RSR);
356 	EMAC_WRITE(ETH_RSR, (u & (ETH_RSR_OVR | ETH_RSR_REC | ETH_RSR_BNA)));
357 
358 	/* configure EMAC */
359 	EMAC_WRITE(ETH_CFG,
360 	    ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
361 	EMAC_WRITE(ETH_CTL, ETH_CTL_MPE);
362 #if 0
363 	if (device_cfdata(sc->sc_dev)->cf_flags)
364 		mdcdiv = device_cfdata(sc->sc_dev)->cf_flags;
365 #endif
366 	/* set ethernet address */
367 	EMAC_WRITE(ETH_SA1L, (sc->sc_enaddr[3] << 24)
368 		   | (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[1] << 8)
369 		   | (sc->sc_enaddr[0]));
370 	EMAC_WRITE(ETH_SA1H, (sc->sc_enaddr[5] << 8)
371 		   | (sc->sc_enaddr[4]));
372 	EMAC_WRITE(ETH_SA2L, 0);
373 	EMAC_WRITE(ETH_SA2H, 0);
374 	EMAC_WRITE(ETH_SA3L, 0);
375 	EMAC_WRITE(ETH_SA3H, 0);
376 	EMAC_WRITE(ETH_SA4L, 0);
377 	EMAC_WRITE(ETH_SA4H, 0);
378 
379 	/* Allocate a page of memory for receive queue descriptors */
380 	sc->rbqlen = (ETH_RDSC_SIZE * (RX_QLEN + 1) * 2 + PAGE_SIZE - 1) / PAGE_SIZE;
381 	sc->rbqlen *= PAGE_SIZE;
382 	DPRINTFN(1,("%s: rbqlen=%i\n", __FUNCTION__, sc->rbqlen));
383 
384 	err = bus_dmamem_alloc(sc->sc_dmat, sc->rbqlen, 0,
385 		MAX(16384, PAGE_SIZE),	// see EMAC errata why forced to 16384 byte boundary
386 		&segs, 1, &rsegs, BUS_DMA_WAITOK);
387 	if (err == 0) {
388 		DPRINTFN(1,("%s: -> bus_dmamem_map\n", __FUNCTION__));
389 		err = bus_dmamem_map(sc->sc_dmat, &segs, 1, sc->rbqlen,
390 			&sc->rbqpage, (BUS_DMA_WAITOK | BUS_DMA_COHERENT));
391 	}
392 	if (err == 0) {
393 		DPRINTFN(1,("%s: -> bus_dmamap_create\n", __FUNCTION__));
394 		err = bus_dmamap_create(sc->sc_dmat, sc->rbqlen, 1,
395 			sc->rbqlen, MAX(16384, PAGE_SIZE), BUS_DMA_WAITOK,
396 			&sc->rbqpage_dmamap);
397 	}
398 	if (err == 0) {
399 		DPRINTFN(1,("%s: -> bus_dmamap_load\n", __FUNCTION__));
400 		err = bus_dmamap_load(sc->sc_dmat, sc->rbqpage_dmamap,
401 			sc->rbqpage, sc->rbqlen, NULL, BUS_DMA_WAITOK);
402 	}
403 	if (err != 0) {
404 		panic("%s: Cannot get DMA memory", device_xname(sc->sc_dev));
405 	}
406 	sc->rbqpage_dsaddr = sc->rbqpage_dmamap->dm_segs[0].ds_addr;
407 
408 	memset(sc->rbqpage, 0, sc->rbqlen);
409 
410 	/* Set up pointers to start of each queue in kernel addr space.
411 	 * Each descriptor queue or status queue entry uses 2 words
412 	 */
413 	sc->RDSC = (void*)sc->rbqpage;
414 
415 	/* Populate the RXQ with mbufs */
416 	sc->rxqi = 0;
417 	for (i = 0; i < RX_QLEN; i++) {
418 		struct mbuf *m;
419 
420 		err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
421 		    PAGE_SIZE, BUS_DMA_WAITOK, &sc->rxq[i].m_dmamap);
422 		if (err)
423 			panic("%s: dmamap_create failed: %i\n",
424 			    __FUNCTION__, err);
425 
426 		MGETHDR(m, M_WAIT, MT_DATA);
427 		MCLGET(m, M_WAIT);
428 		sc->rxq[i].m = m;
429 		if (mtod(m, intptr_t) & 3) {
430 			m_adj(m, mtod(m, intptr_t) & 3);
431 		}
432 		err = bus_dmamap_load(sc->sc_dmat, sc->rxq[i].m_dmamap,
433 			m->m_ext.ext_buf, MCLBYTES, NULL,
434 			BUS_DMA_WAITOK);
435 		if (err)
436 			panic("%s: dmamap_load failed: %i\n",
437 			    __FUNCTION__, err);
438 
439 		sc->RDSC[i].Addr = sc->rxq[i].m_dmamap->dm_segs[0].ds_addr
440 			| (i == (RX_QLEN-1) ? ETH_RDSC_F_WRAP : 0);
441 		sc->RDSC[i].Info = 0;
442 		bus_dmamap_sync(sc->sc_dmat, sc->rxq[i].m_dmamap, 0,
443 			MCLBYTES, BUS_DMASYNC_PREREAD);
444 	}
445 
446 	/* prepare transmit queue */
447 	for (i = 0; i < TX_QLEN; i++) {
448 		err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
449 					(BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW),
450 					&sc->txq[i].m_dmamap);
451 		if (err)
452 			panic("ARGH #1");
453 		sc->txq[i].m = NULL;
454 	}
455 
456 	/* Program each queue's start addr, cur addr, and len registers
457 	 * with the physical addresses.
458 	 */
459 	bus_dmamap_sync(sc->sc_dmat, sc->rbqpage_dmamap, 0, sc->rbqlen,
460 			 BUS_DMASYNC_PREREAD);
461 	addr = (void *)sc->rbqpage_dmamap->dm_segs[0].ds_addr;
462 	EMAC_WRITE(ETH_RBQP, (uint32_t)addr);
463 
464 	/* Divide HCLK by 32 for MDC clock */
465 	mii->mii_ifp = ifp;
466 	mii->mii_readreg = emac_mii_readreg;
467 	mii->mii_writereg = emac_mii_writereg;
468 	mii->mii_statchg = emac_statchg;
469 	sc->sc_ec.ec_mii = mii;
470 	ifmedia_init(&mii->mii_media, IFM_IMASK, emac_mediachange,
471 		emac_mediastatus);
472 	mii_attach((device_t )sc, mii, 0xffffffff, MII_PHY_ANY,
473 		MII_OFFSET_ANY, 0);
474 	ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
475 
476 	// enable / disable interrupts
477 
478 #if 0
479 	// enable / disable interrupts
480 	EMAC_WRITE(ETH_IDR, -1);
481 	EMAC_WRITE(ETH_IER, ETH_ISR_RCOM | ETH_ISR_TBRE | ETH_ISR_TIDLE
482 		   | ETH_ISR_RBNA | ETH_ISR_ROVR);
483 //	(void)EMAC_READ(ETH_ISR); // why
484 
485 	// enable transmitter / receiver
486 	EMAC_WRITE(ETH_CTL, ETH_CTL_TE | ETH_CTL_RE | ETH_CTL_ISR
487 		   | ETH_CTL_CSR | ETH_CTL_MPE);
488 #endif
489 	/*
490 	 * We can support 802.1Q VLAN-sized frames.
491 	 */
492 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
493 
494 	strcpy(ifp->if_xname, device_xname(sc->sc_dev));
495 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
496 	ifp->if_ioctl = emac_ifioctl;
497 	ifp->if_start = emac_ifstart;
498 	ifp->if_watchdog = emac_ifwatchdog;
499 	ifp->if_init = emac_ifinit;
500 	ifp->if_stop = emac_ifstop;
501 	ifp->if_timer = 0;
502 	ifp->if_softc = sc;
503 	IFQ_SET_READY(&ifp->if_snd);
504 	if_attach(ifp);
505 	if_deferred_start_init(ifp, NULL);
506 	ether_ifattach(ifp, (sc)->sc_enaddr);
507 }
508 
509 static int
510 emac_mediachange(struct ifnet *ifp)
511 {
512 	if (ifp->if_flags & IFF_UP)
513 		emac_ifinit(ifp);
514 	return (0);
515 }
516 
517 static void
518 emac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
519 {
520 	struct emac_softc *sc = ifp->if_softc;
521 
522 	mii_pollstat(&sc->sc_mii);
523 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
524 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
525 }
526 
527 
528 int
529 emac_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
530 {
531 #ifndef EMAC_FAST
532 	struct emac_softc *sc = device_private(self);
533 #endif
534 
535 	EMAC_WRITE(ETH_MAN, (ETH_MAN_HIGH | ETH_MAN_RW_RD
536 			     | ((phy << ETH_MAN_PHYA_SHIFT) & ETH_MAN_PHYA)
537 			     | ((reg << ETH_MAN_REGA_SHIFT) & ETH_MAN_REGA)
538 			     | ETH_MAN_CODE_IEEE802_3));
539 	while (!(EMAC_READ(ETH_SR) & ETH_SR_IDLE))
540 		;
541 	*val = EMAC_READ(ETH_MAN) & ETH_MAN_DATA;
542 
543 	return 0;
544 }
545 
546 int
547 emac_mii_writereg(device_t self, int phy, int reg, uint16_t val)
548 {
549 #ifndef EMAC_FAST
550 	struct emac_softc *sc = device_private(self);
551 #endif
552 
553 	EMAC_WRITE(ETH_MAN, (ETH_MAN_HIGH | ETH_MAN_RW_WR
554 			     | ((phy << ETH_MAN_PHYA_SHIFT) & ETH_MAN_PHYA)
555 			     | ((reg << ETH_MAN_REGA_SHIFT) & ETH_MAN_REGA)
556 			     | ETH_MAN_CODE_IEEE802_3
557 			     | (val & ETH_MAN_DATA)));
558 	while (!(EMAC_READ(ETH_SR) & ETH_SR_IDLE))
559 		;
560 
561 	return 0;
562 }
563 
564 void
565 emac_statchg(struct ifnet *ifp)
566 {
567 	struct emac_softc *sc = ifp->if_softc;
568 	uint32_t reg;
569 
570 	/*
571 	 * We must keep the MAC and the PHY in sync as
572 	 * to the status of full-duplex!
573 	 */
574 	reg = EMAC_READ(ETH_CFG);
575 	if (sc->sc_mii.mii_media_active & IFM_FDX)
576 		reg |= ETH_CFG_FD;
577 	else
578 		reg &= ~ETH_CFG_FD;
579 	EMAC_WRITE(ETH_CFG, reg);
580 }
581 
582 void
583 emac_tick(void *arg)
584 {
585 	struct emac_softc* sc = (struct emac_softc *)arg;
586 	struct ifnet * ifp = &sc->sc_ec.ec_if;
587 	int s;
588 	uint32_t misses;
589 
590 	ifp->if_collisions += EMAC_READ(ETH_SCOL) + EMAC_READ(ETH_MCOL);
591 	/* These misses are ok, they will happen if the RAM/CPU can't keep up */
592 	misses = EMAC_READ(ETH_DRFC);
593 	if (misses > 0)
594 		printf("%s: %d rx misses\n", device_xname(sc->sc_dev), misses);
595 
596 	s = splnet();
597 	if (emac_gctx(sc) > 0 && IFQ_IS_EMPTY(&ifp->if_snd) == 0) {
598 		emac_ifstart(ifp);
599 	}
600 	splx(s);
601 
602 	mii_tick(&sc->sc_mii);
603 	callout_reset(&sc->emac_tick_ch, hz, emac_tick, sc);
604 }
605 
606 
607 static int
608 emac_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
609 {
610 	int s, error;
611 
612 	s = splnet();
613 	switch (cmd) {
614 	default:
615 		error = ether_ioctl(ifp, cmd, data);
616 		if (error == ENETRESET) {
617 			if (ifp->if_flags & IFF_RUNNING)
618 				emac_setaddr(ifp);
619 			error = 0;
620 		}
621 	}
622 	splx(s);
623 	return error;
624 }
625 
626 static void
627 emac_ifstart(struct ifnet *ifp)
628 {
629 	struct emac_softc *sc = (struct emac_softc *)ifp->if_softc;
630 	struct mbuf *m;
631 	bus_dma_segment_t *segs;
632 	int s, bi, err, nsegs;
633 
634 	s = splnet();
635 start:
636 	if (emac_gctx(sc) == 0) {
637 		/* Enable transmit-buffer-free interrupt */
638 		EMAC_WRITE(ETH_IER, ETH_ISR_TBRE);
639 		ifp->if_flags |= IFF_OACTIVE;
640 		ifp->if_timer = 10;
641 		splx(s);
642 		return;
643 	}
644 
645 	ifp->if_timer = 0;
646 
647 	IFQ_POLL(&ifp->if_snd, m);
648 	if (m == NULL) {
649 		splx(s);
650 		return;
651 	}
652 //more:
653 	bi = (sc->txqi + sc->txqc) % TX_QLEN;
654 	if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, sc->txq[bi].m_dmamap, m,
655 		BUS_DMA_NOWAIT)) ||
656 		sc->txq[bi].m_dmamap->dm_segs[0].ds_addr & 0x3 ||
657 		sc->txq[bi].m_dmamap->dm_nsegs > 1) {
658 		/* Copy entire mbuf chain to new single */
659 		struct mbuf *mn;
660 
661 		if (err == 0)
662 			bus_dmamap_unload(sc->sc_dmat, sc->txq[bi].m_dmamap);
663 
664 		MGETHDR(mn, M_DONTWAIT, MT_DATA);
665 		if (mn == NULL) goto stop;
666 		if (m->m_pkthdr.len > MHLEN) {
667 			MCLGET(mn, M_DONTWAIT);
668 			if ((mn->m_flags & M_EXT) == 0) {
669 				m_freem(mn);
670 				goto stop;
671 			}
672 		}
673 		m_copydata(m, 0, m->m_pkthdr.len, mtod(mn, void *));
674 		mn->m_pkthdr.len = mn->m_len = m->m_pkthdr.len;
675 		IFQ_DEQUEUE(&ifp->if_snd, m);
676 		m_freem(m);
677 		m = mn;
678 		bus_dmamap_load_mbuf(sc->sc_dmat, sc->txq[bi].m_dmamap, m,
679 			BUS_DMA_NOWAIT);
680 	} else {
681 		IFQ_DEQUEUE(&ifp->if_snd, m);
682 	}
683 
684 	bpf_mtap(ifp, m, BPF_D_OUT);
685 
686 	nsegs = sc->txq[bi].m_dmamap->dm_nsegs;
687 	segs = sc->txq[bi].m_dmamap->dm_segs;
688 	if (nsegs > 1) {
689 		panic("#### ARGH #2");
690 	}
691 
692 	sc->txq[bi].m = m;
693 	sc->txqc++;
694 
695 	DPRINTFN(2,("%s: start sending idx #%i mbuf %p (txqc=%i, phys %p), len=%u\n", __FUNCTION__, bi, sc->txq[bi].m, sc->txqc, (void*)segs->ds_addr,
696 		       (unsigned)m->m_pkthdr.len));
697 #ifdef	DIAGNOSTIC
698 	if (sc->txqc > TX_QLEN) {
699 		panic("%s: txqc %i > %i", __FUNCTION__, sc->txqc, TX_QLEN);
700 	}
701 #endif
702 
703 	bus_dmamap_sync(sc->sc_dmat, sc->txq[bi].m_dmamap, 0,
704 		sc->txq[bi].m_dmamap->dm_mapsize,
705 		BUS_DMASYNC_PREWRITE);
706 
707 	EMAC_WRITE(ETH_TAR, segs->ds_addr);
708 	EMAC_WRITE(ETH_TCR, m->m_pkthdr.len);
709 	if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
710 		goto start;
711 stop:
712 
713 	splx(s);
714 	return;
715 }
716 
717 static void
718 emac_ifwatchdog(struct ifnet *ifp)
719 {
720 	struct emac_softc *sc = (struct emac_softc *)ifp->if_softc;
721 
722 	if ((ifp->if_flags & IFF_RUNNING) == 0)
723 		return;
724 	printf("%s: device timeout, CTL = 0x%08x, CFG = 0x%08x\n",
725 		device_xname(sc->sc_dev), EMAC_READ(ETH_CTL), EMAC_READ(ETH_CFG));
726 }
727 
728 static int
729 emac_ifinit(struct ifnet *ifp)
730 {
731 	struct emac_softc *sc = ifp->if_softc;
732 	int s = splnet();
733 
734 	callout_stop(&sc->emac_tick_ch);
735 
736 	// enable interrupts
737 	EMAC_WRITE(ETH_IDR, -1);
738 	EMAC_WRITE(ETH_IER, ETH_ISR_RCOM | ETH_ISR_TBRE | ETH_ISR_TIDLE
739 		   | ETH_ISR_RBNA | ETH_ISR_ROVR);
740 
741 	// enable transmitter / receiver
742 	EMAC_WRITE(ETH_CTL, ETH_CTL_TE | ETH_CTL_RE | ETH_CTL_ISR
743 		   | ETH_CTL_CSR | ETH_CTL_MPE);
744 
745 	mii_mediachg(&sc->sc_mii);
746 	callout_reset(&sc->emac_tick_ch, hz, emac_tick, sc);
747 	ifp->if_flags |= IFF_RUNNING;
748 	splx(s);
749 	return 0;
750 }
751 
752 static void
753 emac_ifstop(struct ifnet *ifp, int disable)
754 {
755 //	uint32_t u;
756 	struct emac_softc *sc = ifp->if_softc;
757 
758 #if 0
759 	EMAC_WRITE(ETH_CTL, ETH_CTL_MPE);	// disable everything
760 	EMAC_WRITE(ETH_IDR, -1);		// disable interrupts
761 //	EMAC_WRITE(ETH_RBQP, 0);		// clear receive
762 	EMAC_WRITE(ETH_CFG,
763 	    ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
764 	EMAC_WRITE(ETH_TCR, 0);			// send nothing
765 //	(void)EMAC_READ(ETH_ISR);
766 	u = EMAC_READ(ETH_TSR);
767 	EMAC_WRITE(ETH_TSR, (u & (ETH_TSR_UND | ETH_TSR_COMP | ETH_TSR_BNQ
768 				  | ETH_TSR_IDLE | ETH_TSR_RLE
769 				  | ETH_TSR_COL | ETH_TSR_OVR)));
770 	u = EMAC_READ(ETH_RSR);
771 	EMAC_WRITE(ETH_RSR, (u & (ETH_RSR_OVR | ETH_RSR_REC | ETH_RSR_BNA)));
772 #endif
773 	callout_stop(&sc->emac_tick_ch);
774 
775 	/* Down the MII. */
776 	mii_down(&sc->sc_mii);
777 
778 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
779 	ifp->if_timer = 0;
780 	sc->sc_mii.mii_media_status &= ~IFM_ACTIVE;
781 }
782 
783 static void
784 emac_setaddr(struct ifnet *ifp)
785 {
786 	struct emac_softc *sc = ifp->if_softc;
787 	struct ethercom *ec = &sc->sc_ec;
788 	struct ether_multi *enm;
789 	struct ether_multistep step;
790 	uint8_t ias[3][ETHER_ADDR_LEN];
791 	uint32_t h, nma = 0, hashes[2] = { 0, 0 };
792 	uint32_t ctl = EMAC_READ(ETH_CTL);
793 	uint32_t cfg = EMAC_READ(ETH_CFG);
794 
795 	/* disable receiver temporarily */
796 	EMAC_WRITE(ETH_CTL, ctl & ~ETH_CTL_RE);
797 
798 	cfg &= ~(ETH_CFG_MTI | ETH_CFG_UNI | ETH_CFG_CAF | ETH_CFG_UNI);
799 
800 	if (ifp->if_flags & IFF_PROMISC) {
801 		cfg |=	ETH_CFG_CAF;
802 	} else {
803 		cfg &= ~ETH_CFG_CAF;
804 	}
805 
806 	// ETH_CFG_BIG?
807 
808 	ifp->if_flags &= ~IFF_ALLMULTI;
809 
810 	ETHER_LOCK(ec);
811 	ETHER_FIRST_MULTI(step, ec, enm);
812 	while (enm != NULL) {
813 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
814 			/*
815 			 * We must listen to a range of multicast addresses.
816 			 * For now, just accept all multicasts, rather than
817 			 * trying to set only those filter bits needed to match
818 			 * the range.  (At this time, the only use of address
819 			 * ranges is for IP multicast routing, for which the
820 			 * range is big enough to require all bits set.)
821 			 */
822 			cfg |= ETH_CFG_CAF;
823 			hashes[0] = 0xffffffffUL;
824 			hashes[1] = 0xffffffffUL;
825 			ifp->if_flags |= IFF_ALLMULTI;
826 			nma = 0;
827 			break;
828 		}
829 
830 		if (nma < 3) {
831 			/* We can program 3 perfect address filters for mcast */
832 			memcpy(ias[nma], enm->enm_addrlo, ETHER_ADDR_LEN);
833 		} else {
834 			/*
835 			 * XXX: Datasheet is not very clear here, I'm not sure
836 			 * if I'm doing this right.  --joff
837 			 */
838 			h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
839 
840 			/* Just want the 6 most-significant bits. */
841 			h = h >> 26;
842 
843 			hashes[ h / 32 ] |=  (1 << (h % 32));
844 			cfg |= ETH_CFG_MTI;
845 		}
846 		ETHER_NEXT_MULTI(step, enm);
847 		nma++;
848 	}
849 	ETHER_UNLOCK(ec);
850 
851 	// program...
852 	DPRINTFN(1,("%s: en0 %02x:%02x:%02x:%02x:%02x:%02x\n", __FUNCTION__,
853 		    sc->sc_enaddr[0], sc->sc_enaddr[1], sc->sc_enaddr[2],
854 		    sc->sc_enaddr[3], sc->sc_enaddr[4], sc->sc_enaddr[5]));
855 	EMAC_WRITE(ETH_SA1L, (sc->sc_enaddr[3] << 24)
856 		   | (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[1] << 8)
857 		   | (sc->sc_enaddr[0]));
858 	EMAC_WRITE(ETH_SA1H, (sc->sc_enaddr[5] << 8)
859 		   | (sc->sc_enaddr[4]));
860 	if (nma > 1) {
861 		DPRINTFN(1,("%s: en1 %02x:%02x:%02x:%02x:%02x:%02x\n",
862 			__FUNCTION__,
863 			ias[0][0], ias[0][1], ias[0][2],
864 			ias[0][3], ias[0][4], ias[0][5]));
865 		EMAC_WRITE(ETH_SA2L, (ias[0][3] << 24)
866 			   | (ias[0][2] << 16) | (ias[0][1] << 8)
867 			   | (ias[0][0]));
868 		EMAC_WRITE(ETH_SA2H, (ias[0][4] << 8)
869 			   | (ias[0][5]));
870 	}
871 	if (nma > 2) {
872 		DPRINTFN(1,("%s: en2 %02x:%02x:%02x:%02x:%02x:%02x\n",
873 			__FUNCTION__,
874 			ias[1][0], ias[1][1], ias[1][2],
875 			ias[1][3], ias[1][4], ias[1][5]));
876 		EMAC_WRITE(ETH_SA3L, (ias[1][3] << 24)
877 			   | (ias[1][2] << 16) | (ias[1][1] << 8)
878 			   | (ias[1][0]));
879 		EMAC_WRITE(ETH_SA3H, (ias[1][4] << 8)
880 			   | (ias[1][5]));
881 	}
882 	if (nma > 3) {
883 		DPRINTFN(1,("%s: en3 %02x:%02x:%02x:%02x:%02x:%02x\n",
884 			__FUNCTION__,
885 			ias[2][0], ias[2][1], ias[2][2],
886 			ias[2][3], ias[2][4], ias[2][5]));
887 		EMAC_WRITE(ETH_SA3L, (ias[2][3] << 24)
888 			   | (ias[2][2] << 16) | (ias[2][1] << 8)
889 			   | (ias[2][0]));
890 		EMAC_WRITE(ETH_SA3H, (ias[2][4] << 8)
891 			   | (ias[2][5]));
892 	}
893 	EMAC_WRITE(ETH_HSH, hashes[0]);
894 	EMAC_WRITE(ETH_HSL, hashes[1]);
895 	EMAC_WRITE(ETH_CFG, cfg);
896 	EMAC_WRITE(ETH_CTL, ctl | ETH_CTL_RE);
897 }
898