xref: /netbsd-src/sys/arch/arm/at91/at91emac.c (revision c42dbd0ed2e61fe6eda8590caa852ccf34719964)
1 /*	$NetBSD: at91emac.c,v 1.35 2022/09/27 06:36:41 skrll Exp $	*/
2 
3 /*
4  * Copyright (c) 2007 Embedtronics Oy
5  * All rights reserved.
6  *
7  * Based on arch/arm/ep93xx/epe.c
8  *
9  * Copyright (c) 2004 Jesse Off
10  * All rights reserved.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: at91emac.c,v 1.35 2022/09/27 06:36:41 skrll Exp $");
36 
37 #include <sys/types.h>
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/ioctl.h>
41 #include <sys/kernel.h>
42 #include <sys/proc.h>
43 #include <sys/time.h>
44 #include <sys/device.h>
45 #include <uvm/uvm_extern.h>
46 
47 #include <sys/bus.h>
48 #include <machine/intr.h>
49 
50 #include <arm/cpufunc.h>
51 
52 #include <net/if.h>
53 #include <net/if_dl.h>
54 #include <net/if_types.h>
55 #include <net/if_media.h>
56 #include <net/if_ether.h>
57 #include <net/bpf.h>
58 
59 #include <dev/mii/mii.h>
60 #include <dev/mii/miivar.h>
61 
62 #ifdef INET
63 #include <netinet/in.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/in_var.h>
66 #include <netinet/ip.h>
67 #include <netinet/if_inarp.h>
68 #endif
69 
70 #include <arm/at91/at91var.h>
71 #include <arm/at91/at91emacreg.h>
72 #include <arm/at91/at91emacvar.h>
73 
74 #define DEFAULT_MDCDIV	32
75 
76 #ifndef EMAC_FAST
77 #define EMAC_FAST
78 #endif
79 
80 #ifndef EMAC_FAST
81 #define EMAC_READ(x) \
82 	bus_space_read_4(sc->sc_iot, sc->sc_ioh, (EPE_ ## x))
83 #define EMAC_WRITE(x, y) \
84 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, (EPE_ ## x), (y))
85 #else
86 #define EMAC_READ(x) ETHREG(x)
87 #define EMAC_WRITE(x, y) ETHREG(x) = (y)
88 #endif /* ! EMAC_FAST */
89 
90 static int	emac_match(device_t, cfdata_t, void *);
91 static void	emac_attach(device_t, device_t, void *);
92 static void	emac_init(struct emac_softc *);
93 static int	emac_intr(void* arg);
94 static int	emac_gctx(struct emac_softc *);
95 int		emac_mii_readreg (device_t, int, int, uint16_t *);
96 int		emac_mii_writereg (device_t, int, int, uint16_t);
97 void		emac_statchg (struct ifnet *);
98 void		emac_tick (void *);
99 static int	emac_ifioctl (struct ifnet *, u_long, void *);
100 static void	emac_ifstart (struct ifnet *);
101 static void	emac_ifwatchdog (struct ifnet *);
102 static int	emac_ifinit (struct ifnet *);
103 static void	emac_ifstop (struct ifnet *, int);
104 static void	emac_setaddr (struct ifnet *);
105 
106 CFATTACH_DECL_NEW(at91emac, sizeof(struct emac_softc),
107     emac_match, emac_attach, NULL, NULL);
108 
109 #ifdef	EMAC_DEBUG
110 int emac_debug = EMAC_DEBUG;
111 #define	DPRINTFN(n, fmt)	if (emac_debug >= (n)) printf fmt
112 #else
113 #define	DPRINTFN(n, fmt)
114 #endif
115 
116 static int
117 emac_match(device_t parent, cfdata_t match, void *aux)
118 {
119 	if (strcmp(match->cf_name, "at91emac") == 0)
120 		return 2;
121 	return 0;
122 }
123 
124 static void
125 emac_attach(device_t parent, device_t self, void *aux)
126 {
127 	struct emac_softc		*sc = device_private(self);
128 	struct at91bus_attach_args	*sa = aux;
129 	prop_data_t			enaddr;
130 	uint32_t			u;
131 
132 	printf("\n");
133 	sc->sc_dev = self;
134 	sc->sc_iot = sa->sa_iot;
135 	sc->sc_pid = sa->sa_pid;
136 	sc->sc_dmat = sa->sa_dmat;
137 
138 	if (bus_space_map(sa->sa_iot, sa->sa_addr, sa->sa_size, 0, &sc->sc_ioh))
139 		panic("%s: Cannot map registers", device_xname(self));
140 
141 	/* enable peripheral clock */
142 	at91_peripheral_clock(sc->sc_pid, 1);
143 
144 	/* configure emac: */
145 	EMAC_WRITE(ETH_CTL, 0);			// disable everything
146 	EMAC_WRITE(ETH_IDR, -1);		// disable interrupts
147 	EMAC_WRITE(ETH_RBQP, 0);		// clear receive
148 	EMAC_WRITE(ETH_CFG,
149 	    ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
150 	EMAC_WRITE(ETH_TCR, 0);			// send nothing
151 	//(void)EMAC_READ(ETH_ISR);
152 	u = EMAC_READ(ETH_TSR);
153 	EMAC_WRITE(ETH_TSR, (u & (ETH_TSR_UND | ETH_TSR_COMP | ETH_TSR_BNQ
154 				  | ETH_TSR_IDLE | ETH_TSR_RLE
155 				  | ETH_TSR_COL | ETH_TSR_OVR)));
156 	u = EMAC_READ(ETH_RSR);
157 	EMAC_WRITE(ETH_RSR, (u & (ETH_RSR_OVR | ETH_RSR_REC | ETH_RSR_BNA)));
158 
159 	/* Fetch the Ethernet address from property if set. */
160 	enaddr = prop_dictionary_get(device_properties(self), "mac-address");
161 
162 	if (enaddr != NULL) {
163 		KASSERT(prop_object_type(enaddr) == PROP_TYPE_DATA);
164 		KASSERT(prop_data_size(enaddr) == ETHER_ADDR_LEN);
165 		memcpy(sc->sc_enaddr, prop_data_data_nocopy(enaddr),
166 		       ETHER_ADDR_LEN);
167 	} else {
168 		static const uint8_t hardcoded[ETHER_ADDR_LEN] = {
169 		  0x00, 0x0d, 0x10, 0x81, 0x0c, 0x94
170 		};
171 		memcpy(sc->sc_enaddr, hardcoded, ETHER_ADDR_LEN);
172 	}
173 
174 	at91_intr_establish(sc->sc_pid, IPL_NET, INTR_HIGH_LEVEL, emac_intr,
175 	    sc);
176 	emac_init(sc);
177 }
178 
179 static int
180 emac_gctx(struct emac_softc *sc)
181 {
182 	uint32_t tsr;
183 
184 	tsr = EMAC_READ(ETH_TSR);
185 	if (!(tsr & ETH_TSR_BNQ)) {
186 		// no space left
187 		return 0;
188 	}
189 
190 	// free sent frames
191 	while (sc->txqc > (tsr & ETH_TSR_IDLE ? 0 : 1)) {
192 		int i = sc->txqi % TX_QLEN;
193 		bus_dmamap_sync(sc->sc_dmat, sc->txq[i].m_dmamap, 0,
194 		    sc->txq[i].m->m_pkthdr.len, BUS_DMASYNC_POSTWRITE);
195 		bus_dmamap_unload(sc->sc_dmat, sc->txq[i].m_dmamap);
196 		m_freem(sc->txq[i].m);
197 		DPRINTFN(2,("%s: freed idx #%i mbuf %p (txqc=%i)\n",
198 			__FUNCTION__, i, sc->txq[i].m, sc->txqc));
199 		sc->txq[i].m = NULL;
200 		sc->txqi = (i + 1) % TX_QLEN;
201 		sc->txqc--;
202 	}
203 
204 	// mark we're free
205 	if (sc->tx_busy) {
206 		sc->tx_busy = false;
207 		/* Disable transmit-buffer-free interrupt */
208 		/*EMAC_WRITE(ETH_IDR, ETH_ISR_TBRE);*/
209 	}
210 
211 	return 1;
212 }
213 
214 static int
215 emac_intr(void *arg)
216 {
217 	struct emac_softc *sc = (struct emac_softc *)arg;
218 	struct ifnet * ifp = &sc->sc_ec.ec_if;
219 	uint32_t imr, isr, ctl;
220 	int bi;
221 
222 	imr = ~EMAC_READ(ETH_IMR);
223 	if (!(imr & (ETH_ISR_RCOM | ETH_ISR_TBRE | ETH_ISR_TIDLE
224 	    | ETH_ISR_RBNA | ETH_ISR_ROVR))) {
225 		// interrupt not enabled, can't be us
226 		return 0;
227 	}
228 
229 	isr = EMAC_READ(ETH_ISR) & imr;
230 #ifdef EMAC_DEBUG
231 	uint32_t rsr =
232 #endif
233 	EMAC_READ(ETH_RSR);		// get receive status register
234 
235 	DPRINTFN(2, ("%s: isr=0x%08X rsr=0x%08X imr=0x%08X\n", __FUNCTION__,
236 		isr, rsr, imr));
237 
238 	if (isr & ETH_ISR_RBNA) {		// out of receive buffers
239 		EMAC_WRITE(ETH_RSR, ETH_RSR_BNA);	// clear interrupt
240 		ctl = EMAC_READ(ETH_CTL);		// get current control register value
241 		EMAC_WRITE(ETH_CTL, ctl & ~ETH_CTL_RE);	// disable receiver
242 		EMAC_WRITE(ETH_RSR, ETH_RSR_BNA);	// clear BNA bit
243 		EMAC_WRITE(ETH_CTL, ctl |  ETH_CTL_RE);	// re-enable receiver
244 		if_statinc(ifp, if_ierrors);
245 		if_statinc(ifp, if_ipackets);
246 		DPRINTFN(1,("%s: out of receive buffers\n", __FUNCTION__));
247 	}
248 	if (isr & ETH_ISR_ROVR) {
249 		EMAC_WRITE(ETH_RSR, ETH_RSR_OVR);	// clear interrupt
250 		if_statinc(ifp, if_ierrors);
251 		if_statinc(ifp, if_ipackets);
252 		DPRINTFN(1,("%s: receive overrun\n", __FUNCTION__));
253 	}
254 
255 	if (isr & ETH_ISR_RCOM) {			// packet has been received!
256 		uint32_t nfo;
257 		// @@@ if memory is NOT coherent, then we're in trouble @@@@
258 //		bus_dmamap_sync(sc->sc_dmat, sc->rbqpage_dmamap, 0, sc->rbqlen, BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
259 //		printf("## RDSC[%i].ADDR=0x%08X\n", sc->rxqi % RX_QLEN, sc->RDSC[sc->rxqi % RX_QLEN].Addr);
260 		DPRINTFN(2,("#2 RDSC[%i].INFO=0x%08X\n", sc->rxqi % RX_QLEN,
261 			sc->RDSC[sc->rxqi % RX_QLEN].Info));
262 		while (sc->RDSC[(bi = sc->rxqi % RX_QLEN)].Addr & ETH_RDSC_F_USED) {
263 			int fl;
264 			struct mbuf *m;
265 
266 			nfo = sc->RDSC[bi].Info;
267 			fl = (nfo & ETH_RDSC_I_LEN) - 4;
268 			DPRINTFN(2,("## nfo=0x%08X\n", nfo));
269 
270 			MGETHDR(m, M_DONTWAIT, MT_DATA);
271 			if (m != NULL) MCLGET(m, M_DONTWAIT);
272 			if (m != NULL && (m->m_flags & M_EXT)) {
273 				bus_dmamap_sync(sc->sc_dmat,
274 				    sc->rxq[bi].m_dmamap, 0,
275 				    MCLBYTES, BUS_DMASYNC_POSTREAD);
276 				bus_dmamap_unload(sc->sc_dmat,
277 					sc->rxq[bi].m_dmamap);
278 				m_set_rcvif(sc->rxq[bi].m, ifp);
279 				sc->rxq[bi].m->m_pkthdr.len =
280 					sc->rxq[bi].m->m_len = fl;
281 				DPRINTFN(2,("received %u bytes packet\n", fl));
282 				if_percpuq_enqueue(ifp->if_percpuq, sc->rxq[bi].m);
283 				if (mtod(m, intptr_t) & 3) {
284 					m_adj(m, mtod(m, intptr_t) & 3);
285 				}
286 				sc->rxq[bi].m = m;
287 				bus_dmamap_load(sc->sc_dmat,
288 					sc->rxq[bi].m_dmamap,
289 					m->m_ext.ext_buf, MCLBYTES,
290 					NULL, BUS_DMA_NOWAIT);
291 				bus_dmamap_sync(sc->sc_dmat,
292 				    sc->rxq[bi].m_dmamap, 0,
293 				    MCLBYTES, BUS_DMASYNC_PREREAD);
294 				sc->RDSC[bi].Info = 0;
295 				sc->RDSC[bi].Addr =
296 					sc->rxq[bi].m_dmamap->dm_segs[0].ds_addr
297 					| (bi == (RX_QLEN-1) ? ETH_RDSC_F_WRAP : 0);
298 			} else {
299 				/* Drop packets until we can get replacement
300 				 * empty mbufs for the RXDQ.
301 				 */
302 				if (m != NULL) {
303 					m_freem(m);
304 				}
305 				if_statinc(ifp, if_ierrors);
306 			}
307 			sc->rxqi++;
308 		}
309 //		bus_dmamap_sync(sc->sc_dmat, sc->rbqpage_dmamap, 0, sc->rbqlen, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
310 	}
311 
312 	if (emac_gctx(sc) > 0)
313 		if_schedule_deferred_start(ifp);
314 #if 0 // reloop
315 	irq = EMAC_READ(IntStsC);
316 	if ((irq & (IntSts_RxSQ | IntSts_ECI)) != 0)
317 		goto begin;
318 #endif
319 
320 	return (1);
321 }
322 
323 
324 static void
325 emac_init(struct emac_softc *sc)
326 {
327 	bus_dma_segment_t segs;
328 	void *addr;
329 	int rsegs, err, i;
330 	struct ifnet * ifp = &sc->sc_ec.ec_if;
331 	struct mii_data * const mii = &sc->sc_mii;
332 	uint32_t u;
333 #if 0
334 	int mdcdiv = DEFAULT_MDCDIV;
335 #endif
336 
337 	callout_init(&sc->emac_tick_ch, 0);
338 
339 	// ok...
340 	EMAC_WRITE(ETH_CTL, ETH_CTL_MPE);	// disable everything
341 	EMAC_WRITE(ETH_IDR, -1);		// disable interrupts
342 	EMAC_WRITE(ETH_RBQP, 0);		// clear receive
343 	EMAC_WRITE(ETH_CFG,
344 	    ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
345 	EMAC_WRITE(ETH_TCR, 0);			// send nothing
346 //	(void)EMAC_READ(ETH_ISR);
347 	u = EMAC_READ(ETH_TSR);
348 	EMAC_WRITE(ETH_TSR, (u & (ETH_TSR_UND | ETH_TSR_COMP | ETH_TSR_BNQ
349 				  | ETH_TSR_IDLE | ETH_TSR_RLE
350 				  | ETH_TSR_COL | ETH_TSR_OVR)));
351 	u = EMAC_READ(ETH_RSR);
352 	EMAC_WRITE(ETH_RSR, (u & (ETH_RSR_OVR | ETH_RSR_REC | ETH_RSR_BNA)));
353 
354 	/* configure EMAC */
355 	EMAC_WRITE(ETH_CFG,
356 	    ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
357 	EMAC_WRITE(ETH_CTL, ETH_CTL_MPE);
358 #if 0
359 	if (device_cfdata(sc->sc_dev)->cf_flags)
360 		mdcdiv = device_cfdata(sc->sc_dev)->cf_flags;
361 #endif
362 	/* set ethernet address */
363 	EMAC_WRITE(ETH_SA1L, (sc->sc_enaddr[3] << 24)
364 		   | (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[1] << 8)
365 		   | (sc->sc_enaddr[0]));
366 	EMAC_WRITE(ETH_SA1H, (sc->sc_enaddr[5] << 8)
367 		   | (sc->sc_enaddr[4]));
368 	EMAC_WRITE(ETH_SA2L, 0);
369 	EMAC_WRITE(ETH_SA2H, 0);
370 	EMAC_WRITE(ETH_SA3L, 0);
371 	EMAC_WRITE(ETH_SA3H, 0);
372 	EMAC_WRITE(ETH_SA4L, 0);
373 	EMAC_WRITE(ETH_SA4H, 0);
374 
375 	/* Allocate a page of memory for receive queue descriptors */
376 	sc->rbqlen = (ETH_RDSC_SIZE * (RX_QLEN + 1) * 2 + PAGE_SIZE - 1) / PAGE_SIZE;
377 	sc->rbqlen *= PAGE_SIZE;
378 	DPRINTFN(1,("%s: rbqlen=%i\n", __FUNCTION__, sc->rbqlen));
379 
380 	err = bus_dmamem_alloc(sc->sc_dmat, sc->rbqlen, 0,
381 		MAX(16384, PAGE_SIZE),	// see EMAC errata why forced to 16384 byte boundary
382 		&segs, 1, &rsegs, BUS_DMA_WAITOK);
383 	if (err == 0) {
384 		DPRINTFN(1,("%s: -> bus_dmamem_map\n", __FUNCTION__));
385 		err = bus_dmamem_map(sc->sc_dmat, &segs, 1, sc->rbqlen,
386 			&sc->rbqpage, (BUS_DMA_WAITOK | BUS_DMA_COHERENT));
387 	}
388 	if (err == 0) {
389 		DPRINTFN(1,("%s: -> bus_dmamap_create\n", __FUNCTION__));
390 		err = bus_dmamap_create(sc->sc_dmat, sc->rbqlen, 1,
391 			sc->rbqlen, MAX(16384, PAGE_SIZE), BUS_DMA_WAITOK,
392 			&sc->rbqpage_dmamap);
393 	}
394 	if (err == 0) {
395 		DPRINTFN(1,("%s: -> bus_dmamap_load\n", __FUNCTION__));
396 		err = bus_dmamap_load(sc->sc_dmat, sc->rbqpage_dmamap,
397 			sc->rbqpage, sc->rbqlen, NULL, BUS_DMA_WAITOK);
398 	}
399 	if (err != 0) {
400 		panic("%s: Cannot get DMA memory", device_xname(sc->sc_dev));
401 	}
402 	sc->rbqpage_dsaddr = sc->rbqpage_dmamap->dm_segs[0].ds_addr;
403 
404 	memset(sc->rbqpage, 0, sc->rbqlen);
405 
406 	/* Set up pointers to start of each queue in kernel addr space.
407 	 * Each descriptor queue or status queue entry uses 2 words
408 	 */
409 	sc->RDSC = (void*)sc->rbqpage;
410 
411 	/* Populate the RXQ with mbufs */
412 	sc->rxqi = 0;
413 	for (i = 0; i < RX_QLEN; i++) {
414 		struct mbuf *m;
415 
416 		err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
417 		    PAGE_SIZE, BUS_DMA_WAITOK, &sc->rxq[i].m_dmamap);
418 		if (err)
419 			panic("%s: dmamap_create failed: %i\n",
420 			    __FUNCTION__, err);
421 
422 		MGETHDR(m, M_WAIT, MT_DATA);
423 		MCLGET(m, M_WAIT);
424 		sc->rxq[i].m = m;
425 		if (mtod(m, intptr_t) & 3) {
426 			m_adj(m, mtod(m, intptr_t) & 3);
427 		}
428 		err = bus_dmamap_load(sc->sc_dmat, sc->rxq[i].m_dmamap,
429 			m->m_ext.ext_buf, MCLBYTES, NULL,
430 			BUS_DMA_WAITOK);
431 		if (err)
432 			panic("%s: dmamap_load failed: %i\n",
433 			    __FUNCTION__, err);
434 
435 		sc->RDSC[i].Addr = sc->rxq[i].m_dmamap->dm_segs[0].ds_addr
436 			| (i == (RX_QLEN-1) ? ETH_RDSC_F_WRAP : 0);
437 		sc->RDSC[i].Info = 0;
438 		bus_dmamap_sync(sc->sc_dmat, sc->rxq[i].m_dmamap, 0,
439 			MCLBYTES, BUS_DMASYNC_PREREAD);
440 	}
441 
442 	/* prepare transmit queue */
443 	for (i = 0; i < TX_QLEN; i++) {
444 		err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
445 					(BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW),
446 					&sc->txq[i].m_dmamap);
447 		if (err)
448 			panic("ARGH #1");
449 		sc->txq[i].m = NULL;
450 	}
451 
452 	/* Program each queue's start addr, cur addr, and len registers
453 	 * with the physical addresses.
454 	 */
455 	bus_dmamap_sync(sc->sc_dmat, sc->rbqpage_dmamap, 0, sc->rbqlen,
456 			 BUS_DMASYNC_PREREAD);
457 	addr = (void *)sc->rbqpage_dmamap->dm_segs[0].ds_addr;
458 	EMAC_WRITE(ETH_RBQP, (uint32_t)addr);
459 
460 	/* Divide HCLK by 32 for MDC clock */
461 	mii->mii_ifp = ifp;
462 	mii->mii_readreg = emac_mii_readreg;
463 	mii->mii_writereg = emac_mii_writereg;
464 	mii->mii_statchg = emac_statchg;
465 	sc->sc_ec.ec_mii = mii;
466 	ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange,
467 		ether_mediastatus);
468 	mii_attach((device_t )sc, mii, 0xffffffff, MII_PHY_ANY,
469 		MII_OFFSET_ANY, 0);
470 	ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
471 
472 	// enable / disable interrupts
473 
474 #if 0
475 	// enable / disable interrupts
476 	EMAC_WRITE(ETH_IDR, -1);
477 	EMAC_WRITE(ETH_IER, ETH_ISR_RCOM | ETH_ISR_TBRE | ETH_ISR_TIDLE
478 		   | ETH_ISR_RBNA | ETH_ISR_ROVR);
479 //	(void)EMAC_READ(ETH_ISR); // why
480 
481 	// enable transmitter / receiver
482 	EMAC_WRITE(ETH_CTL, ETH_CTL_TE | ETH_CTL_RE | ETH_CTL_ISR
483 		   | ETH_CTL_CSR | ETH_CTL_MPE);
484 #endif
485 	/*
486 	 * We can support 802.1Q VLAN-sized frames.
487 	 */
488 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
489 
490 	strcpy(ifp->if_xname, device_xname(sc->sc_dev));
491 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
492 	ifp->if_ioctl = emac_ifioctl;
493 	ifp->if_start = emac_ifstart;
494 	ifp->if_watchdog = emac_ifwatchdog;
495 	ifp->if_init = emac_ifinit;
496 	ifp->if_stop = emac_ifstop;
497 	ifp->if_timer = 0;
498 	ifp->if_softc = sc;
499 	IFQ_SET_READY(&ifp->if_snd);
500 	if_attach(ifp);
501 	if_deferred_start_init(ifp, NULL);
502 	ether_ifattach(ifp, (sc)->sc_enaddr);
503 }
504 
505 int
506 emac_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
507 {
508 #ifndef EMAC_FAST
509 	struct emac_softc *sc = device_private(self);
510 #endif
511 
512 	EMAC_WRITE(ETH_MAN, (ETH_MAN_HIGH | ETH_MAN_RW_RD
513 			     | ((phy << ETH_MAN_PHYA_SHIFT) & ETH_MAN_PHYA)
514 			     | ((reg << ETH_MAN_REGA_SHIFT) & ETH_MAN_REGA)
515 			     | ETH_MAN_CODE_IEEE802_3));
516 	while (!(EMAC_READ(ETH_SR) & ETH_SR_IDLE))
517 		;
518 	*val = EMAC_READ(ETH_MAN) & ETH_MAN_DATA;
519 
520 	return 0;
521 }
522 
523 int
524 emac_mii_writereg(device_t self, int phy, int reg, uint16_t val)
525 {
526 #ifndef EMAC_FAST
527 	struct emac_softc *sc = device_private(self);
528 #endif
529 
530 	EMAC_WRITE(ETH_MAN, (ETH_MAN_HIGH | ETH_MAN_RW_WR
531 			     | ((phy << ETH_MAN_PHYA_SHIFT) & ETH_MAN_PHYA)
532 			     | ((reg << ETH_MAN_REGA_SHIFT) & ETH_MAN_REGA)
533 			     | ETH_MAN_CODE_IEEE802_3
534 			     | (val & ETH_MAN_DATA)));
535 	while (!(EMAC_READ(ETH_SR) & ETH_SR_IDLE))
536 		;
537 
538 	return 0;
539 }
540 
541 void
542 emac_statchg(struct ifnet *ifp)
543 {
544 	struct emac_softc *sc = ifp->if_softc;
545 	uint32_t reg;
546 
547 	/*
548 	 * We must keep the MAC and the PHY in sync as
549 	 * to the status of full-duplex!
550 	 */
551 	reg = EMAC_READ(ETH_CFG);
552 	if (sc->sc_mii.mii_media_active & IFM_FDX)
553 		reg |= ETH_CFG_FD;
554 	else
555 		reg &= ~ETH_CFG_FD;
556 	EMAC_WRITE(ETH_CFG, reg);
557 }
558 
559 void
560 emac_tick(void *arg)
561 {
562 	struct emac_softc* sc = (struct emac_softc *)arg;
563 	struct ifnet * ifp = &sc->sc_ec.ec_if;
564 	int s;
565 	uint32_t misses;
566 
567 	if_statadd(ifp, if_collisions, EMAC_READ(ETH_SCOL) + EMAC_READ(ETH_MCOL));
568 	/* These misses are ok, they will happen if the RAM/CPU can't keep up */
569 	misses = EMAC_READ(ETH_DRFC);
570 	if (misses > 0)
571 		printf("%s: %d rx misses\n", device_xname(sc->sc_dev), misses);
572 
573 	s = splnet();
574 	if (emac_gctx(sc) > 0 && IFQ_IS_EMPTY(&ifp->if_snd) == 0) {
575 		emac_ifstart(ifp);
576 	}
577 	splx(s);
578 
579 	mii_tick(&sc->sc_mii);
580 	callout_reset(&sc->emac_tick_ch, hz, emac_tick, sc);
581 }
582 
583 
584 static int
585 emac_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
586 {
587 	int s, error;
588 
589 	s = splnet();
590 	switch (cmd) {
591 	default:
592 		error = ether_ioctl(ifp, cmd, data);
593 		if (error == ENETRESET) {
594 			if (ifp->if_flags & IFF_RUNNING)
595 				emac_setaddr(ifp);
596 			error = 0;
597 		}
598 	}
599 	splx(s);
600 	return error;
601 }
602 
603 static void
604 emac_ifstart(struct ifnet *ifp)
605 {
606 	struct emac_softc *sc = (struct emac_softc *)ifp->if_softc;
607 	struct mbuf *m;
608 	bus_dma_segment_t *segs;
609 	int s, bi, err, nsegs;
610 
611 	s = splnet();
612 start:
613 	if (emac_gctx(sc) == 0) {
614 		/* Enable transmit-buffer-free interrupt */
615 		EMAC_WRITE(ETH_IER, ETH_ISR_TBRE);
616 		sc->tx_busy = true;
617 		ifp->if_timer = 10;
618 		splx(s);
619 		return;
620 	}
621 
622 	ifp->if_timer = 0;
623 
624 	IFQ_POLL(&ifp->if_snd, m);
625 	if (m == NULL) {
626 		splx(s);
627 		return;
628 	}
629 //more:
630 	bi = (sc->txqi + sc->txqc) % TX_QLEN;
631 	if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, sc->txq[bi].m_dmamap, m,
632 		BUS_DMA_NOWAIT)) ||
633 		sc->txq[bi].m_dmamap->dm_segs[0].ds_addr & 0x3 ||
634 		sc->txq[bi].m_dmamap->dm_nsegs > 1) {
635 		/* Copy entire mbuf chain to new single */
636 		struct mbuf *mn;
637 
638 		if (err == 0)
639 			bus_dmamap_unload(sc->sc_dmat, sc->txq[bi].m_dmamap);
640 
641 		MGETHDR(mn, M_DONTWAIT, MT_DATA);
642 		if (mn == NULL) goto stop;
643 		if (m->m_pkthdr.len > MHLEN) {
644 			MCLGET(mn, M_DONTWAIT);
645 			if ((mn->m_flags & M_EXT) == 0) {
646 				m_freem(mn);
647 				goto stop;
648 			}
649 		}
650 		m_copydata(m, 0, m->m_pkthdr.len, mtod(mn, void *));
651 		mn->m_pkthdr.len = mn->m_len = m->m_pkthdr.len;
652 		IFQ_DEQUEUE(&ifp->if_snd, m);
653 		m_freem(m);
654 		m = mn;
655 		bus_dmamap_load_mbuf(sc->sc_dmat, sc->txq[bi].m_dmamap, m,
656 			BUS_DMA_NOWAIT);
657 	} else {
658 		IFQ_DEQUEUE(&ifp->if_snd, m);
659 	}
660 
661 	bpf_mtap(ifp, m, BPF_D_OUT);
662 
663 	nsegs = sc->txq[bi].m_dmamap->dm_nsegs;
664 	segs = sc->txq[bi].m_dmamap->dm_segs;
665 	if (nsegs > 1) {
666 		panic("#### ARGH #2");
667 	}
668 
669 	sc->txq[bi].m = m;
670 	sc->txqc++;
671 
672 	DPRINTFN(2,("%s: start sending idx #%i mbuf %p (txqc=%i, phys %p), len=%u\n", __FUNCTION__, bi, sc->txq[bi].m, sc->txqc, (void*)segs->ds_addr,
673 		       (unsigned)m->m_pkthdr.len));
674 #ifdef	DIAGNOSTIC
675 	if (sc->txqc > TX_QLEN) {
676 		panic("%s: txqc %i > %i", __FUNCTION__, sc->txqc, TX_QLEN);
677 	}
678 #endif
679 
680 	bus_dmamap_sync(sc->sc_dmat, sc->txq[bi].m_dmamap, 0,
681 		sc->txq[bi].m_dmamap->dm_mapsize,
682 		BUS_DMASYNC_PREWRITE);
683 
684 	EMAC_WRITE(ETH_TAR, segs->ds_addr);
685 	EMAC_WRITE(ETH_TCR, m->m_pkthdr.len);
686 	if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
687 		goto start;
688 stop:
689 
690 	splx(s);
691 	return;
692 }
693 
694 static void
695 emac_ifwatchdog(struct ifnet *ifp)
696 {
697 	struct emac_softc *sc = (struct emac_softc *)ifp->if_softc;
698 
699 	if ((ifp->if_flags & IFF_RUNNING) == 0)
700 		return;
701 	printf("%s: device timeout, CTL = 0x%08x, CFG = 0x%08x\n",
702 		device_xname(sc->sc_dev), EMAC_READ(ETH_CTL), EMAC_READ(ETH_CFG));
703 }
704 
705 static int
706 emac_ifinit(struct ifnet *ifp)
707 {
708 	struct emac_softc *sc = ifp->if_softc;
709 	int s = splnet();
710 
711 	callout_stop(&sc->emac_tick_ch);
712 
713 	// enable interrupts
714 	EMAC_WRITE(ETH_IDR, -1);
715 	EMAC_WRITE(ETH_IER, ETH_ISR_RCOM | ETH_ISR_TBRE | ETH_ISR_TIDLE
716 		   | ETH_ISR_RBNA | ETH_ISR_ROVR);
717 
718 	// enable transmitter / receiver
719 	EMAC_WRITE(ETH_CTL, ETH_CTL_TE | ETH_CTL_RE | ETH_CTL_ISR
720 		   | ETH_CTL_CSR | ETH_CTL_MPE);
721 
722 	mii_mediachg(&sc->sc_mii);
723 	callout_reset(&sc->emac_tick_ch, hz, emac_tick, sc);
724 	ifp->if_flags |= IFF_RUNNING;
725 	splx(s);
726 	return 0;
727 }
728 
729 static void
730 emac_ifstop(struct ifnet *ifp, int disable)
731 {
732 //	uint32_t u;
733 	struct emac_softc *sc = ifp->if_softc;
734 
735 #if 0
736 	EMAC_WRITE(ETH_CTL, ETH_CTL_MPE);	// disable everything
737 	EMAC_WRITE(ETH_IDR, -1);		// disable interrupts
738 //	EMAC_WRITE(ETH_RBQP, 0);		// clear receive
739 	EMAC_WRITE(ETH_CFG,
740 	    ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
741 	EMAC_WRITE(ETH_TCR, 0);			// send nothing
742 //	(void)EMAC_READ(ETH_ISR);
743 	u = EMAC_READ(ETH_TSR);
744 	EMAC_WRITE(ETH_TSR, (u & (ETH_TSR_UND | ETH_TSR_COMP | ETH_TSR_BNQ
745 				  | ETH_TSR_IDLE | ETH_TSR_RLE
746 				  | ETH_TSR_COL | ETH_TSR_OVR)));
747 	u = EMAC_READ(ETH_RSR);
748 	EMAC_WRITE(ETH_RSR, (u & (ETH_RSR_OVR | ETH_RSR_REC | ETH_RSR_BNA)));
749 #endif
750 	callout_stop(&sc->emac_tick_ch);
751 
752 	/* Down the MII. */
753 	mii_down(&sc->sc_mii);
754 
755 	ifp->if_flags &= ~IFF_RUNNING;
756 	ifp->if_timer = 0;
757 	sc->sc_mii.mii_media_status &= ~IFM_ACTIVE;
758 }
759 
760 static void
761 emac_setaddr(struct ifnet *ifp)
762 {
763 	struct emac_softc *sc = ifp->if_softc;
764 	struct ethercom *ec = &sc->sc_ec;
765 	struct ether_multi *enm;
766 	struct ether_multistep step;
767 	uint8_t ias[3][ETHER_ADDR_LEN];
768 	uint32_t h, nma = 0, hashes[2] = { 0, 0 };
769 	uint32_t ctl = EMAC_READ(ETH_CTL);
770 	uint32_t cfg = EMAC_READ(ETH_CFG);
771 
772 	/* disable receiver temporarily */
773 	EMAC_WRITE(ETH_CTL, ctl & ~ETH_CTL_RE);
774 
775 	cfg &= ~(ETH_CFG_MTI | ETH_CFG_UNI | ETH_CFG_CAF);
776 
777 	if (ifp->if_flags & IFF_PROMISC) {
778 		cfg |=	ETH_CFG_CAF;
779 	} else {
780 		cfg &= ~ETH_CFG_CAF;
781 	}
782 
783 	// ETH_CFG_BIG?
784 
785 	ifp->if_flags &= ~IFF_ALLMULTI;
786 
787 	ETHER_LOCK(ec);
788 	ETHER_FIRST_MULTI(step, ec, enm);
789 	while (enm != NULL) {
790 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
791 			/*
792 			 * We must listen to a range of multicast addresses.
793 			 * For now, just accept all multicasts, rather than
794 			 * trying to set only those filter bits needed to match
795 			 * the range.  (At this time, the only use of address
796 			 * ranges is for IP multicast routing, for which the
797 			 * range is big enough to require all bits set.)
798 			 */
799 			cfg |= ETH_CFG_CAF;
800 			hashes[0] = 0xffffffffUL;
801 			hashes[1] = 0xffffffffUL;
802 			ifp->if_flags |= IFF_ALLMULTI;
803 			nma = 0;
804 			break;
805 		}
806 
807 		if (nma < 3) {
808 			/* We can program 3 perfect address filters for mcast */
809 			memcpy(ias[nma], enm->enm_addrlo, ETHER_ADDR_LEN);
810 		} else {
811 			/*
812 			 * XXX: Datasheet is not very clear here, I'm not sure
813 			 * if I'm doing this right.  --joff
814 			 */
815 			h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
816 
817 			/* Just want the 6 most-significant bits. */
818 			h = h >> 26;
819 
820 			hashes[ h / 32 ] |=  (1 << (h % 32));
821 			cfg |= ETH_CFG_MTI;
822 		}
823 		ETHER_NEXT_MULTI(step, enm);
824 		nma++;
825 	}
826 	ETHER_UNLOCK(ec);
827 
828 	// program...
829 	DPRINTFN(1,("%s: en0 %02x:%02x:%02x:%02x:%02x:%02x\n", __FUNCTION__,
830 		    sc->sc_enaddr[0], sc->sc_enaddr[1], sc->sc_enaddr[2],
831 		    sc->sc_enaddr[3], sc->sc_enaddr[4], sc->sc_enaddr[5]));
832 	EMAC_WRITE(ETH_SA1L, (sc->sc_enaddr[3] << 24)
833 		   | (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[1] << 8)
834 		   | (sc->sc_enaddr[0]));
835 	EMAC_WRITE(ETH_SA1H, (sc->sc_enaddr[5] << 8)
836 		   | (sc->sc_enaddr[4]));
837 	if (nma > 1) {
838 		DPRINTFN(1,("%s: en1 %02x:%02x:%02x:%02x:%02x:%02x\n",
839 			__FUNCTION__,
840 			ias[0][0], ias[0][1], ias[0][2],
841 			ias[0][3], ias[0][4], ias[0][5]));
842 		EMAC_WRITE(ETH_SA2L, (ias[0][3] << 24)
843 			   | (ias[0][2] << 16) | (ias[0][1] << 8)
844 			   | (ias[0][0]));
845 		EMAC_WRITE(ETH_SA2H, (ias[0][4] << 8)
846 			   | (ias[0][5]));
847 	}
848 	if (nma > 2) {
849 		DPRINTFN(1,("%s: en2 %02x:%02x:%02x:%02x:%02x:%02x\n",
850 			__FUNCTION__,
851 			ias[1][0], ias[1][1], ias[1][2],
852 			ias[1][3], ias[1][4], ias[1][5]));
853 		EMAC_WRITE(ETH_SA3L, (ias[1][3] << 24)
854 			   | (ias[1][2] << 16) | (ias[1][1] << 8)
855 			   | (ias[1][0]));
856 		EMAC_WRITE(ETH_SA3H, (ias[1][4] << 8)
857 			   | (ias[1][5]));
858 	}
859 	if (nma > 3) {
860 		DPRINTFN(1,("%s: en3 %02x:%02x:%02x:%02x:%02x:%02x\n",
861 			__FUNCTION__,
862 			ias[2][0], ias[2][1], ias[2][2],
863 			ias[2][3], ias[2][4], ias[2][5]));
864 		EMAC_WRITE(ETH_SA3L, (ias[2][3] << 24)
865 			   | (ias[2][2] << 16) | (ias[2][1] << 8)
866 			   | (ias[2][0]));
867 		EMAC_WRITE(ETH_SA3H, (ias[2][4] << 8)
868 			   | (ias[2][5]));
869 	}
870 	EMAC_WRITE(ETH_HSH, hashes[0]);
871 	EMAC_WRITE(ETH_HSL, hashes[1]);
872 	EMAC_WRITE(ETH_CFG, cfg);
873 	EMAC_WRITE(ETH_CTL, ctl | ETH_CTL_RE);
874 }
875