xref: /netbsd-src/sys/arch/arm/at91/at91emac.c (revision 404fbe5fb94ca1e054339640cabb2801ce52dd30)
1 /*	$Id: at91emac.c,v 1.2 2008/07/03 01:15:38 matt Exp $	*/
2 /*	$NetBSD: at91emac.c,v 1.2 2008/07/03 01:15:38 matt Exp $	*/
3 
4 /*
5  * Copyright (c) 2007 Embedtronics Oy
6  * All rights reserved.
7  *
8  * Based on arch/arm/ep93xx/epe.c
9  *
10  * Copyright (c) 2004 Jesse Off
11  * All rights reserved.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *	This product includes software developed by the NetBSD
24  *	Foundation, Inc. and its contributors.
25  * 4. Neither the name of The NetBSD Foundation nor the names of its
26  *    contributors may be used to endorse or promote products derived
27  *    from this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
30  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
31  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
32  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
33  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
36  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
37  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39  * POSSIBILITY OF SUCH DAMAGE.
40  */
41 
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: at91emac.c,v 1.2 2008/07/03 01:15:38 matt Exp $");
44 
45 #include <sys/types.h>
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/ioctl.h>
49 #include <sys/kernel.h>
50 #include <sys/proc.h>
51 #include <sys/malloc.h>
52 #include <sys/time.h>
53 #include <sys/device.h>
54 #include <uvm/uvm_extern.h>
55 
56 #include <machine/bus.h>
57 #include <machine/intr.h>
58 
59 #include <arm/cpufunc.h>
60 
61 #include <net/if.h>
62 #include <net/if_dl.h>
63 #include <net/if_types.h>
64 #include <net/if_media.h>
65 #include <net/if_ether.h>
66 
67 #include <dev/mii/mii.h>
68 #include <dev/mii/miivar.h>
69 
70 #ifdef INET
71 #include <netinet/in.h>
72 #include <netinet/in_systm.h>
73 #include <netinet/in_var.h>
74 #include <netinet/ip.h>
75 #include <netinet/if_inarp.h>
76 #endif
77 
78 #ifdef NS
79 #include <netns/ns.h>
80 #include <netns/ns_if.h>
81 #endif
82 
83 #include "bpfilter.h"
84 #if NBPFILTER > 0
85 #include <net/bpf.h>
86 #include <net/bpfdesc.h>
87 #endif
88 
89 #ifdef IPKDB_AT91	// @@@
90 #include <ipkdb/ipkdb.h>
91 #endif
92 
93 #include <arm/at91/at91var.h>
94 #include <arm/at91/at91emacreg.h>
95 #include <arm/at91/at91emacvar.h>
96 
97 #define DEFAULT_MDCDIV	32
98 
99 #ifndef EMAC_FAST
100 #define EMAC_FAST
101 #endif
102 
103 #ifndef EMAC_FAST
104 #define EMAC_READ(x) \
105 	bus_space_read_4(sc->sc_iot, sc->sc_ioh, (EPE_ ## x))
106 #define EMAC_WRITE(x, y) \
107 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, (EPE_ ## x), (y))
108 #else
109 #define EMAC_READ(x) ETHREG(x)
110 #define EMAC_WRITE(x, y) ETHREG(x) = (y)
111 #endif /* ! EMAC_FAST */
112 
113 static int	emac_match(device_t, cfdata_t, void *);
114 static void	emac_attach(device_t, device_t, void *);
115 static void	emac_init(struct emac_softc *);
116 static int      emac_intr(void* arg);
117 static int	emac_gctx(struct emac_softc *);
118 static int	emac_mediachange(struct ifnet *);
119 static void	emac_mediastatus(struct ifnet *, struct ifmediareq *);
120 int		emac_mii_readreg (device_t, int, int);
121 void		emac_mii_writereg (device_t, int, int, int);
122 void		emac_statchg (device_t );
123 void		emac_tick (void *);
124 static int	emac_ifioctl (struct ifnet *, u_long, void *);
125 static void	emac_ifstart (struct ifnet *);
126 static void	emac_ifwatchdog (struct ifnet *);
127 static int	emac_ifinit (struct ifnet *);
128 static void	emac_ifstop (struct ifnet *, int);
129 static void	emac_setaddr (struct ifnet *);
130 
131 CFATTACH_DECL(at91emac, sizeof(struct emac_softc),
132     emac_match, emac_attach, NULL, NULL);
133 
134 #ifdef	EMAC_DEBUG
135 int emac_debug = EMAC_DEBUG;
136 #define	DPRINTFN(n,fmt)	if (emac_debug >= (n)) printf fmt
137 #else
138 #define	DPRINTFN(n,fmt)
139 #endif
140 
141 static int
142 emac_match(device_t parent, cfdata_t match, void *aux)
143 {
144 	if (strcmp(match->cf_name, "at91emac") == 0)
145 		return 2;
146 	return 0;
147 }
148 
149 static void
150 emac_attach(device_t parent, device_t self, void *aux)
151 {
152 	struct emac_softc		*sc = device_private(self);
153 	struct at91bus_attach_args	*sa = aux;
154 	prop_data_t			enaddr;
155 	uint32_t			u;
156 
157 	printf("\n");
158 	sc->sc_dev = self;
159 	sc->sc_iot = sa->sa_iot;
160 	sc->sc_pid = sa->sa_pid;
161 	sc->sc_dmat = sa->sa_dmat;
162 
163 	if (bus_space_map(sa->sa_iot, sa->sa_addr, sa->sa_size, 0, &sc->sc_ioh))
164 		panic("%s: Cannot map registers", device_xname(self));
165 
166 	/* enable peripheral clock */
167 	at91_peripheral_clock(sc->sc_pid, 1);
168 
169 	/* configure emac: */
170 	EMAC_WRITE(ETH_CTL, 0);			// disable everything
171 	EMAC_WRITE(ETH_IDR, -1);		// disable interrupts
172 	EMAC_WRITE(ETH_RBQP, 0);		// clear receive
173 	EMAC_WRITE(ETH_CFG, ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
174 	EMAC_WRITE(ETH_TCR, 0);			// send nothing
175 	//(void)EMAC_READ(ETH_ISR);
176 	u = EMAC_READ(ETH_TSR);
177 	EMAC_WRITE(ETH_TSR, (u & (ETH_TSR_UND | ETH_TSR_COMP | ETH_TSR_BNQ
178 				  | ETH_TSR_IDLE | ETH_TSR_RLE
179 				  | ETH_TSR_COL|ETH_TSR_OVR)));
180 	u = EMAC_READ(ETH_RSR);
181 	EMAC_WRITE(ETH_RSR, (u & (ETH_RSR_OVR|ETH_RSR_REC|ETH_RSR_BNA)));
182 
183 	/* Fetch the Ethernet address from property if set. */
184 	enaddr = prop_dictionary_get(device_properties(self), "mac-addr");
185 
186 	if (enaddr != NULL) {
187 		KASSERT(prop_object_type(enaddr) == PROP_TYPE_DATA);
188 		KASSERT(prop_data_size(enaddr) == ETHER_ADDR_LEN);
189 		memcpy(sc->sc_enaddr, prop_data_data_nocopy(enaddr),
190 		       ETHER_ADDR_LEN);
191 	} else {
192 		static const uint8_t hardcoded[ETHER_ADDR_LEN] = {
193 		  0x00, 0x0d, 0x10, 0x81, 0x0c, 0x94
194 		};
195 		memcpy(sc->sc_enaddr, hardcoded, ETHER_ADDR_LEN);
196 	}
197 
198         at91_intr_establish(sc->sc_pid, IPL_NET, INTR_HIGH_LEVEL, emac_intr, sc);
199 	emac_init(sc);
200 }
201 
202 static int
203 emac_gctx(struct emac_softc *sc)
204 {
205 	struct ifnet * ifp = &sc->sc_ec.ec_if;
206 	u_int32_t tsr;
207 
208 	tsr = EMAC_READ(ETH_TSR);
209 	if (!(tsr & ETH_TSR_BNQ)) {
210 		// no space left
211 		return 0;
212 	}
213 
214 	// free sent frames
215 	while (sc->txqc > (tsr & ETH_TSR_IDLE ? 0 : 1)) {
216 		int i = sc->txqi % TX_QLEN;
217 		bus_dmamap_sync(sc->sc_dmat, sc->txq[i].m_dmamap, 0,
218 				sc->txq[i].m->m_pkthdr.len, BUS_DMASYNC_POSTWRITE);
219 		bus_dmamap_unload(sc->sc_dmat, sc->txq[i].m_dmamap);
220 		m_freem(sc->txq[i].m);
221 		DPRINTFN(2,("%s: freed idx #%i mbuf %p (txqc=%i)\n", __FUNCTION__, i, sc->txq[i].m, sc->txqc));
222 		sc->txq[i].m = NULL;
223 		sc->txqi = (i + 1) % TX_QLEN;
224 		sc->txqc--;
225 	}
226 
227 	// mark we're free
228 	if (ifp->if_flags & IFF_OACTIVE) {
229 		ifp->if_flags &= ~IFF_OACTIVE;
230 		/* Disable transmit-buffer-free interrupt */
231 		/*EMAC_WRITE(ETH_IDR, ETH_ISR_TBRE);*/
232 	}
233 
234 	return 1;
235 }
236 
237 static int
238 emac_intr(void *arg)
239 {
240 	struct emac_softc *sc = (struct emac_softc *)arg;
241 	struct ifnet * ifp = &sc->sc_ec.ec_if;
242 	u_int32_t imr, isr, rsr, ctl;
243 	int bi;
244 
245 	imr = ~EMAC_READ(ETH_IMR);
246 	if (!(imr & (ETH_ISR_RCOM|ETH_ISR_TBRE|ETH_ISR_TIDLE|ETH_ISR_RBNA|ETH_ISR_ROVR))) {
247 		// interrupt not enabled, can't be us
248 		return 0;
249 	}
250 
251 	isr = EMAC_READ(ETH_ISR) & imr;
252 	rsr = EMAC_READ(ETH_RSR);		// get receive status register
253 
254 	DPRINTFN(2, ("%s: isr=0x%08X rsr=0x%08X imr=0x%08X\n", __FUNCTION__, isr, rsr, imr));
255 
256 	if (isr & ETH_ISR_RBNA) {		// out of receive buffers
257 		EMAC_WRITE(ETH_RSR, ETH_RSR_BNA);	// clear interrupt
258 		ctl = EMAC_READ(ETH_CTL);		// get current control register value
259 		EMAC_WRITE(ETH_CTL, ctl & ~ETH_CTL_RE);	// disable receiver
260 		EMAC_WRITE(ETH_RSR, ETH_RSR_BNA);	// clear BNA bit
261 		EMAC_WRITE(ETH_CTL, ctl |  ETH_CTL_RE);	// re-enable receiver
262 		ifp->if_ierrors++;
263 		ifp->if_ipackets++;
264 		DPRINTFN(1,("%s: out of receive buffers\n", __FUNCTION__));
265 	}
266 	if (isr & ETH_ISR_ROVR) {
267 		EMAC_WRITE(ETH_RSR, ETH_RSR_OVR);	// clear interrupt
268 		ifp->if_ierrors++;
269 		ifp->if_ipackets++;
270 		DPRINTFN(1,("%s: receive overrun\n", __FUNCTION__));
271 	}
272 
273 	if (isr & ETH_ISR_RCOM) {			// packet has been received!
274 		uint32_t nfo;
275 		// @@@ if memory is NOT coherent, then we're in trouble @@@@
276 //		bus_dmamap_sync(sc->sc_dmat, sc->rbqpage_dmamap, 0, sc->rbqlen, BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
277 //		printf("## RDSC[%i].ADDR=0x%08X\n", sc->rxqi % RX_QLEN, sc->RDSC[sc->rxqi % RX_QLEN].Addr);
278 		DPRINTFN(2,("#2 RDSC[%i].INFO=0x%08X\n", sc->rxqi % RX_QLEN, sc->RDSC[sc->rxqi % RX_QLEN].Info));
279 		while (sc->RDSC[(bi = sc->rxqi % RX_QLEN)].Addr & ETH_RDSC_F_USED) {
280 			int fl;
281 			struct mbuf *m;
282 
283 			nfo = sc->RDSC[bi].Info;
284 		  	fl = (nfo & ETH_RDSC_I_LEN) - 4;
285 			DPRINTFN(2,("## nfo=0x%08X\n", nfo));
286 
287 			MGETHDR(m, M_DONTWAIT, MT_DATA);
288 			if (m != NULL) MCLGET(m, M_DONTWAIT);
289 			if (m != NULL && (m->m_flags & M_EXT)) {
290 				bus_dmamap_sync(sc->sc_dmat, sc->rxq[bi].m_dmamap, 0,
291 						MCLBYTES, BUS_DMASYNC_POSTREAD);
292 				bus_dmamap_unload(sc->sc_dmat,
293 					sc->rxq[bi].m_dmamap);
294 				sc->rxq[bi].m->m_pkthdr.rcvif = ifp;
295 				sc->rxq[bi].m->m_pkthdr.len =
296 					sc->rxq[bi].m->m_len = fl;
297 #if NBPFILTER > 0
298 				if (ifp->if_bpf)
299 					bpf_mtap(ifp->if_bpf, sc->rxq[bi].m);
300 #endif /* NBPFILTER > 0 */
301 				DPRINTFN(2,("received %u bytes packet\n", fl));
302                                 (*ifp->if_input)(ifp, sc->rxq[bi].m);
303 				if (mtod(m, intptr_t) & 3) {
304 					m_adj(m, mtod(m, intptr_t) & 3);
305 				}
306 				sc->rxq[bi].m = m;
307 				bus_dmamap_load(sc->sc_dmat,
308 					sc->rxq[bi].m_dmamap,
309 					m->m_ext.ext_buf, MCLBYTES,
310 					NULL, BUS_DMA_NOWAIT);
311 				bus_dmamap_sync(sc->sc_dmat, sc->rxq[bi].m_dmamap, 0,
312 						MCLBYTES, BUS_DMASYNC_PREREAD);
313 				sc->RDSC[bi].Info = 0;
314 				sc->RDSC[bi].Addr =
315 					sc->rxq[bi].m_dmamap->dm_segs[0].ds_addr
316 					| (bi == (RX_QLEN-1) ? ETH_RDSC_F_WRAP : 0);
317 			} else {
318 				/* Drop packets until we can get replacement
319 				 * empty mbufs for the RXDQ.
320 				 */
321 				if (m != NULL) {
322 					m_freem(m);
323 				}
324 				ifp->if_ierrors++;
325 			}
326 			sc->rxqi++;
327 		}
328 //		bus_dmamap_sync(sc->sc_dmat, sc->rbqpage_dmamap, 0, sc->rbqlen, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
329 	}
330 
331 	if (emac_gctx(sc) > 0 && IFQ_IS_EMPTY(&ifp->if_snd) == 0) {
332 		emac_ifstart(ifp);
333 	}
334 #if 0 // reloop
335 	irq = EMAC_READ(IntStsC);
336 	if ((irq & (IntSts_RxSQ|IntSts_ECI)) != 0)
337 		goto begin;
338 #endif
339 
340 	return (1);
341 }
342 
343 
344 static void
345 emac_init(struct emac_softc *sc)
346 {
347 	bus_dma_segment_t segs;
348 	void *addr;
349 	int rsegs, err, i;
350 	struct ifnet * ifp = &sc->sc_ec.ec_if;
351 	uint32_t u;
352 #if 0
353 	int mdcdiv = DEFAULT_MDCDIV;
354 #endif
355 
356 	callout_init(&sc->emac_tick_ch, 0);
357 
358 	// ok...
359 	EMAC_WRITE(ETH_CTL, ETH_CTL_MPE);	// disable everything
360 	EMAC_WRITE(ETH_IDR, -1);		// disable interrupts
361 	EMAC_WRITE(ETH_RBQP, 0);		// clear receive
362 	EMAC_WRITE(ETH_CFG, ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
363 	EMAC_WRITE(ETH_TCR, 0);			// send nothing
364 //	(void)EMAC_READ(ETH_ISR);
365 	u = EMAC_READ(ETH_TSR);
366 	EMAC_WRITE(ETH_TSR, (u & (ETH_TSR_UND | ETH_TSR_COMP | ETH_TSR_BNQ
367 				  | ETH_TSR_IDLE | ETH_TSR_RLE
368 				  | ETH_TSR_COL|ETH_TSR_OVR)));
369 	u = EMAC_READ(ETH_RSR);
370 	EMAC_WRITE(ETH_RSR, (u & (ETH_RSR_OVR|ETH_RSR_REC|ETH_RSR_BNA)));
371 
372 	/* configure EMAC */
373 	EMAC_WRITE(ETH_CFG, ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
374 	EMAC_WRITE(ETH_CTL, ETH_CTL_MPE);
375 #if 0
376 	if (device_cfdata(&sc->sc_dev)->cf_flags)
377 		mdcdiv = device_cfdata(&sc->sc_dev)->cf_flags;
378 #endif
379 	/* set ethernet address */
380 	EMAC_WRITE(ETH_SA1L, (sc->sc_enaddr[3] << 24)
381 		   | (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[1] << 8)
382 		   | (sc->sc_enaddr[0]));
383 	EMAC_WRITE(ETH_SA1H, (sc->sc_enaddr[5] << 8)
384 		   | (sc->sc_enaddr[4]));
385 	EMAC_WRITE(ETH_SA2L, 0);
386 	EMAC_WRITE(ETH_SA2H, 0);
387 	EMAC_WRITE(ETH_SA3L, 0);
388 	EMAC_WRITE(ETH_SA3H, 0);
389 	EMAC_WRITE(ETH_SA4L, 0);
390 	EMAC_WRITE(ETH_SA4H, 0);
391 
392 	/* Allocate a page of memory for receive queue descriptors */
393 	sc->rbqlen = (ETH_RDSC_SIZE * (RX_QLEN + 1) * 2 + PAGE_SIZE - 1) / PAGE_SIZE;
394 	sc->rbqlen *= PAGE_SIZE;
395 	DPRINTFN(1,("%s: rbqlen=%i\n", __FUNCTION__, sc->rbqlen));
396 
397 	err = bus_dmamem_alloc(sc->sc_dmat, sc->rbqlen, 0,
398 		MAX(16384, PAGE_SIZE),	// see EMAC errata why forced to 16384 byte boundary
399 		&segs, 1, &rsegs, BUS_DMA_WAITOK);
400 	if (err == 0) {
401 		DPRINTFN(1,("%s: -> bus_dmamem_map\n", __FUNCTION__));
402 		err = bus_dmamem_map(sc->sc_dmat, &segs, 1, sc->rbqlen,
403 			&sc->rbqpage, (BUS_DMA_WAITOK|BUS_DMA_COHERENT));
404 	}
405 	if (err == 0) {
406 		DPRINTFN(1,("%s: -> bus_dmamap_create\n", __FUNCTION__));
407 		err = bus_dmamap_create(sc->sc_dmat, sc->rbqlen, 1,
408 			sc->rbqlen, MAX(16384, PAGE_SIZE), BUS_DMA_WAITOK,
409 			&sc->rbqpage_dmamap);
410 	}
411 	if (err == 0) {
412 		DPRINTFN(1,("%s: -> bus_dmamap_load\n", __FUNCTION__));
413 		err = bus_dmamap_load(sc->sc_dmat, sc->rbqpage_dmamap,
414 			sc->rbqpage, sc->rbqlen, NULL, BUS_DMA_WAITOK);
415 	}
416 	if (err != 0) {
417 		panic("%s: Cannot get DMA memory", device_xname(sc->sc_dev));
418 	}
419 	sc->rbqpage_dsaddr = sc->rbqpage_dmamap->dm_segs[0].ds_addr;
420 
421 	bzero(sc->rbqpage, sc->rbqlen);
422 
423 	/* Set up pointers to start of each queue in kernel addr space.
424 	 * Each descriptor queue or status queue entry uses 2 words
425 	 */
426 	sc->RDSC = (void*)sc->rbqpage;
427 
428 	/* Populate the RXQ with mbufs */
429 	sc->rxqi = 0;
430 	for(i = 0; i < RX_QLEN; i++) {
431 		struct mbuf *m;
432 
433 		err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, PAGE_SIZE,
434 			BUS_DMA_WAITOK, &sc->rxq[i].m_dmamap);
435 		if (err) {
436 			panic("%s: dmamap_create failed: %i\n", __FUNCTION__, err);
437 		}
438 		MGETHDR(m, M_WAIT, MT_DATA);
439 		MCLGET(m, M_WAIT);
440 		sc->rxq[i].m = m;
441 		if (mtod(m, intptr_t) & 3) {
442 			m_adj(m, mtod(m, intptr_t) & 3);
443 		}
444 		err = bus_dmamap_load(sc->sc_dmat, sc->rxq[i].m_dmamap,
445 			m->m_ext.ext_buf, MCLBYTES, NULL,
446 			BUS_DMA_WAITOK);
447 		if (err) {
448 			panic("%s: dmamap_load failed: %i\n", __FUNCTION__, err);
449 		}
450 		sc->RDSC[i].Addr = sc->rxq[i].m_dmamap->dm_segs[0].ds_addr
451 			| (i == (RX_QLEN-1) ? ETH_RDSC_F_WRAP : 0);
452 		sc->RDSC[i].Info = 0;
453 		bus_dmamap_sync(sc->sc_dmat, sc->rxq[i].m_dmamap, 0,
454 			MCLBYTES, BUS_DMASYNC_PREREAD);
455 	}
456 
457 	/* prepare transmit queue */
458 	for (i = 0; i < TX_QLEN; i++) {
459 		err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
460 					(BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW),
461 					&sc->txq[i].m_dmamap);
462 		if (err)
463 			panic("ARGH #1");
464 		sc->txq[i].m = NULL;
465 	}
466 
467 	/* Program each queue's start addr, cur addr, and len registers
468 	 * with the physical addresses.
469 	 */
470 	bus_dmamap_sync(sc->sc_dmat, sc->rbqpage_dmamap, 0, sc->rbqlen,
471 			 BUS_DMASYNC_PREREAD);
472 	addr = (void *)sc->rbqpage_dmamap->dm_segs[0].ds_addr;
473 	EMAC_WRITE(ETH_RBQP, (u_int32_t)addr);
474 
475 	/* Divide HCLK by 32 for MDC clock */
476 	sc->sc_mii.mii_ifp = ifp;
477 	sc->sc_mii.mii_readreg = emac_mii_readreg;
478 	sc->sc_mii.mii_writereg = emac_mii_writereg;
479 	sc->sc_mii.mii_statchg = emac_statchg;
480 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, emac_mediachange,
481 		emac_mediastatus);
482 	mii_attach((device_t )sc, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
483 		MII_OFFSET_ANY, 0);
484 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
485 
486 	// enable / disable interrupts
487 
488 #if 0
489 	// enable / disable interrupts
490 	EMAC_WRITE(ETH_IDR, -1);
491 	EMAC_WRITE(ETH_IER, ETH_ISR_RCOM | ETH_ISR_TBRE | ETH_ISR_TIDLE
492 		   | ETH_ISR_RBNA | ETH_ISR_ROVR);
493 //	(void)EMAC_READ(ETH_ISR); // why
494 
495 	// enable transmitter / receiver
496 	EMAC_WRITE(ETH_CTL, ETH_CTL_TE | ETH_CTL_RE | ETH_CTL_ISR
497 		   | ETH_CTL_CSR | ETH_CTL_MPE);
498 #endif
499 	/*
500 	 * We can support 802.1Q VLAN-sized frames.
501 	 */
502 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
503 
504         strcpy(ifp->if_xname, device_xname(sc->sc_dev));
505         ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST;
506         ifp->if_ioctl = emac_ifioctl;
507         ifp->if_start = emac_ifstart;
508         ifp->if_watchdog = emac_ifwatchdog;
509         ifp->if_init = emac_ifinit;
510         ifp->if_stop = emac_ifstop;
511         ifp->if_timer = 0;
512 	ifp->if_softc = sc;
513         IFQ_SET_READY(&ifp->if_snd);
514         if_attach(ifp);
515         ether_ifattach(ifp, (sc)->sc_enaddr);
516 }
517 
518 static int
519 emac_mediachange(ifp)
520 	struct ifnet *ifp;
521 {
522 	if (ifp->if_flags & IFF_UP)
523 		emac_ifinit(ifp);
524 	return (0);
525 }
526 
527 static void
528 emac_mediastatus(ifp, ifmr)
529 	struct ifnet *ifp;
530 	struct ifmediareq *ifmr;
531 {
532 	struct emac_softc *sc = ifp->if_softc;
533 
534 	mii_pollstat(&sc->sc_mii);
535 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
536 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
537 }
538 
539 
540 int
541 emac_mii_readreg(self, phy, reg)
542 	device_t self;
543 	int phy, reg;
544 {
545 	struct emac_softc *sc;
546 
547 	sc = (struct emac_softc *)self;
548 	EMAC_WRITE(ETH_MAN, (ETH_MAN_HIGH | ETH_MAN_RW_RD
549 			     | ((phy << ETH_MAN_PHYA_SHIFT) & ETH_MAN_PHYA)
550 			     | ((reg << ETH_MAN_REGA_SHIFT) & ETH_MAN_REGA)
551 			     | ETH_MAN_CODE_IEEE802_3));
552 	while (!(EMAC_READ(ETH_SR) & ETH_SR_IDLE)) ;
553 	return (EMAC_READ(ETH_MAN) & ETH_MAN_DATA);
554 }
555 
556 void
557 emac_mii_writereg(self, phy, reg, val)
558 	device_t self;
559 	int phy, reg, val;
560 {
561 	struct emac_softc *sc;
562 	sc = (struct emac_softc *)self;
563 	EMAC_WRITE(ETH_MAN, (ETH_MAN_HIGH | ETH_MAN_RW_WR
564 			     | ((phy << ETH_MAN_PHYA_SHIFT) & ETH_MAN_PHYA)
565 			     | ((reg << ETH_MAN_REGA_SHIFT) & ETH_MAN_REGA)
566 			     | ETH_MAN_CODE_IEEE802_3
567 			     | (val & ETH_MAN_DATA)));
568 	while (!(EMAC_READ(ETH_SR) & ETH_SR_IDLE)) ;
569 }
570 
571 
572 void
573 emac_statchg(self)
574         device_t self;
575 {
576         struct emac_softc *sc = (struct emac_softc *)self;
577         u_int32_t reg;
578 
579         /*
580          * We must keep the MAC and the PHY in sync as
581          * to the status of full-duplex!
582          */
583 	reg = EMAC_READ(ETH_CFG);
584         if (sc->sc_mii.mii_media_active & IFM_FDX)
585                 reg |= ETH_CFG_FD;
586         else
587                 reg &= ~ETH_CFG_FD;
588 	EMAC_WRITE(ETH_CFG, reg);
589 }
590 
591 void
592 emac_tick(arg)
593 	void *arg;
594 {
595 	struct emac_softc* sc = (struct emac_softc *)arg;
596 	struct ifnet * ifp = &sc->sc_ec.ec_if;
597 	int s;
598 	u_int32_t misses;
599 
600 	ifp->if_collisions += EMAC_READ(ETH_SCOL) + EMAC_READ(ETH_MCOL);
601 	/* These misses are ok, they will happen if the RAM/CPU can't keep up */
602 	misses = EMAC_READ(ETH_DRFC);
603 	if (misses > 0)
604 		printf("%s: %d rx misses\n", device_xname(sc->sc_dev), misses);
605 
606 	s = splnet();
607 	if (emac_gctx(sc) > 0 && IFQ_IS_EMPTY(&ifp->if_snd) == 0) {
608 		emac_ifstart(ifp);
609 	}
610 	splx(s);
611 
612 	mii_tick(&sc->sc_mii);
613 	callout_reset(&sc->emac_tick_ch, hz, emac_tick, sc);
614 }
615 
616 
617 static int
618 emac_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
619 {
620 	struct emac_softc *sc = ifp->if_softc;
621 	struct ifreq *ifr = (struct ifreq *)data;
622 	int s, error;
623 
624 	s = splnet();
625 	switch(cmd) {
626 	case SIOCSIFMEDIA:
627 	case SIOCGIFMEDIA:
628 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
629 		break;
630 	default:
631 		error = ether_ioctl(ifp, cmd, data);
632 		if (error == ENETRESET) {
633 			if (ifp->if_flags & IFF_RUNNING)
634 				emac_setaddr(ifp);
635 			error = 0;
636 		}
637 	}
638 	splx(s);
639 	return error;
640 }
641 
642 static void
643 emac_ifstart(ifp)
644 	struct ifnet *ifp;
645 {
646 	struct emac_softc *sc = (struct emac_softc *)ifp->if_softc;
647 	struct mbuf *m;
648 	bus_dma_segment_t *segs;
649 	int s, bi, err, nsegs;
650 
651 	s = splnet();
652 start:
653 	if (emac_gctx(sc) == 0) {
654 		/* Enable transmit-buffer-free interrupt */
655 		EMAC_WRITE(ETH_IER, ETH_ISR_TBRE);
656 		ifp->if_flags |= IFF_OACTIVE;
657 		ifp->if_timer = 10;
658 		splx(s);
659 		return;
660 	}
661 
662 	ifp->if_timer = 0;
663 
664 	IFQ_POLL(&ifp->if_snd, m);
665 	if (m == NULL) {
666 		splx(s);
667 		return;
668 	}
669 //more:
670 	bi = (sc->txqi + sc->txqc) % TX_QLEN;
671 	if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, sc->txq[bi].m_dmamap, m,
672 		BUS_DMA_NOWAIT)) ||
673 		sc->txq[bi].m_dmamap->dm_segs[0].ds_addr & 0x3 ||
674 		sc->txq[bi].m_dmamap->dm_nsegs > 1) {
675 		/* Copy entire mbuf chain to new single */
676 		struct mbuf *mn;
677 
678 		if (err == 0)
679 			bus_dmamap_unload(sc->sc_dmat, sc->txq[bi].m_dmamap);
680 
681 		MGETHDR(mn, M_DONTWAIT, MT_DATA);
682 		if (mn == NULL) goto stop;
683 		if (m->m_pkthdr.len > MHLEN) {
684 			MCLGET(mn, M_DONTWAIT);
685 			if ((mn->m_flags & M_EXT) == 0) {
686 				m_freem(mn);
687 				goto stop;
688 			}
689 		}
690 		m_copydata(m, 0, m->m_pkthdr.len, mtod(mn, void *));
691 		mn->m_pkthdr.len = mn->m_len = m->m_pkthdr.len;
692 		IFQ_DEQUEUE(&ifp->if_snd, m);
693 		m_freem(m);
694 		m = mn;
695 		bus_dmamap_load_mbuf(sc->sc_dmat, sc->txq[bi].m_dmamap, m,
696 			BUS_DMA_NOWAIT);
697 	} else {
698 		IFQ_DEQUEUE(&ifp->if_snd, m);
699 	}
700 
701 #if NBPFILTER > 0
702 	if (ifp->if_bpf)
703 		bpf_mtap(ifp->if_bpf, m);
704 #endif /* NBPFILTER > 0 */
705 
706 	nsegs = sc->txq[bi].m_dmamap->dm_nsegs;
707 	segs = sc->txq[bi].m_dmamap->dm_segs;
708 	if (nsegs > 1) {
709 		panic("#### ARGH #2");
710 	}
711 
712 	sc->txq[bi].m = m;
713 	sc->txqc++;
714 
715 	DPRINTFN(2,("%s: start sending idx #%i mbuf %p (txqc=%i, phys %p), len=%u\n", __FUNCTION__, bi, sc->txq[bi].m, sc->txqc, (void*)segs->ds_addr,
716 		       (unsigned)m->m_pkthdr.len));
717 #ifdef	DIAGNOSTIC
718 	if (sc->txqc > TX_QLEN) {
719 		panic("%s: txqc %i > %i", __FUNCTION__, sc->txqc, TX_QLEN);
720 	}
721 #endif
722 
723 	bus_dmamap_sync(sc->sc_dmat, sc->txq[bi].m_dmamap, 0,
724 		sc->txq[bi].m_dmamap->dm_mapsize,
725 		BUS_DMASYNC_PREWRITE);
726 
727 	EMAC_WRITE(ETH_TAR, segs->ds_addr);
728 	EMAC_WRITE(ETH_TCR, m->m_pkthdr.len);
729 	if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
730 		goto start;
731 stop:
732 
733 	splx(s);
734 	return;
735 }
736 
737 static void
738 emac_ifwatchdog(ifp)
739 	struct ifnet *ifp;
740 {
741 	struct emac_softc *sc = (struct emac_softc *)ifp->if_softc;
742 
743 	if ((ifp->if_flags & IFF_RUNNING) == 0)
744 		return;
745        	printf("%s: device timeout, CTL = 0x%08x, CFG = 0x%08x\n",
746 		device_xname(sc->sc_dev), EMAC_READ(ETH_CTL), EMAC_READ(ETH_CFG));
747 }
748 
749 static int
750 emac_ifinit(ifp)
751 	struct ifnet *ifp;
752 {
753 	struct emac_softc *sc = ifp->if_softc;
754 	int s = splnet();
755 
756 	callout_stop(&sc->emac_tick_ch);
757 
758 	// enable interrupts
759 	EMAC_WRITE(ETH_IDR, -1);
760 	EMAC_WRITE(ETH_IER, ETH_ISR_RCOM | ETH_ISR_TBRE | ETH_ISR_TIDLE
761 		   | ETH_ISR_RBNA | ETH_ISR_ROVR);
762 
763 	// enable transmitter / receiver
764 	EMAC_WRITE(ETH_CTL, ETH_CTL_TE | ETH_CTL_RE | ETH_CTL_ISR
765 		   | ETH_CTL_CSR | ETH_CTL_MPE);
766 
767 	mii_mediachg(&sc->sc_mii);
768 	callout_reset(&sc->emac_tick_ch, hz, emac_tick, sc);
769         ifp->if_flags |= IFF_RUNNING;
770 	splx(s);
771 	return 0;
772 }
773 
774 static void
775 emac_ifstop(ifp, disable)
776 	struct ifnet *ifp;
777 	int disable;
778 {
779 //	u_int32_t u;
780 	struct emac_softc *sc = ifp->if_softc;
781 
782 #if 0
783 	EMAC_WRITE(ETH_CTL, ETH_CTL_MPE);	// disable everything
784 	EMAC_WRITE(ETH_IDR, -1);		// disable interrupts
785 //	EMAC_WRITE(ETH_RBQP, 0);		// clear receive
786 	EMAC_WRITE(ETH_CFG, ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
787 	EMAC_WRITE(ETH_TCR, 0);			// send nothing
788 //	(void)EMAC_READ(ETH_ISR);
789 	u = EMAC_READ(ETH_TSR);
790 	EMAC_WRITE(ETH_TSR, (u & (ETH_TSR_UND | ETH_TSR_COMP | ETH_TSR_BNQ
791 				  | ETH_TSR_IDLE | ETH_TSR_RLE
792 				  | ETH_TSR_COL|ETH_TSR_OVR)));
793 	u = EMAC_READ(ETH_RSR);
794 	EMAC_WRITE(ETH_RSR, (u & (ETH_RSR_OVR|ETH_RSR_REC|ETH_RSR_BNA)));
795 #endif
796 	callout_stop(&sc->emac_tick_ch);
797 
798 	/* Down the MII. */
799 	mii_down(&sc->sc_mii);
800 
801 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
802 	ifp->if_timer = 0;
803 	sc->sc_mii.mii_media_status &= ~IFM_ACTIVE;
804 }
805 
806 static void
807 emac_setaddr(ifp)
808 	struct ifnet *ifp;
809 {
810 	struct emac_softc *sc = ifp->if_softc;
811 	struct ethercom *ac = &sc->sc_ec;
812 	struct ether_multi *enm;
813 	struct ether_multistep step;
814 	u_int8_t ias[3][ETHER_ADDR_LEN];
815 	u_int32_t h, nma = 0, hashes[2] = { 0, 0 };
816 	u_int32_t ctl = EMAC_READ(ETH_CTL);
817 	u_int32_t cfg = EMAC_READ(ETH_CFG);
818 
819 	/* disable receiver temporarily */
820 	EMAC_WRITE(ETH_CTL, ctl & ~ETH_CTL_RE);
821 
822 	cfg &= ~(ETH_CFG_MTI | ETH_CFG_UNI | ETH_CFG_CAF | ETH_CFG_UNI);
823 
824 	if (ifp->if_flags & IFF_PROMISC) {
825 		cfg |=  ETH_CFG_CAF;
826 	} else {
827 		cfg &= ~ETH_CFG_CAF;
828 	}
829 
830 	// ETH_CFG_BIG?
831 
832 	ifp->if_flags &= ~IFF_ALLMULTI;
833 
834 	ETHER_FIRST_MULTI(step, ac, enm);
835 	while (enm != NULL) {
836 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
837 			/*
838 			 * We must listen to a range of multicast addresses.
839 			 * For now, just accept all multicasts, rather than
840 			 * trying to set only those filter bits needed to match
841 			 * the range.  (At this time, the only use of address
842 			 * ranges is for IP multicast routing, for which the
843 			 * range is big enough to require all bits set.)
844 			 */
845 			cfg |= ETH_CFG_CAF;
846 			hashes[0] = 0xffffffffUL;
847 			hashes[1] = 0xffffffffUL;
848 			ifp->if_flags |= IFF_ALLMULTI;
849 			nma = 0;
850 			break;
851 		}
852 
853 		if (nma < 3) {
854 			/* We can program 3 perfect address filters for mcast */
855 			memcpy(ias[nma], enm->enm_addrlo, ETHER_ADDR_LEN);
856 		} else {
857 			/*
858 			 * XXX: Datasheet is not very clear here, I'm not sure
859 			 * if I'm doing this right.  --joff
860 			 */
861 			h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
862 
863 			/* Just want the 6 most-significant bits. */
864 			h = h >> 26;
865 
866 			hashes[ h / 32 ] |=  (1 << (h % 32));
867 			cfg |= ETH_CFG_MTI;
868 		}
869 		ETHER_NEXT_MULTI(step, enm);
870 		nma++;
871 	}
872 
873 	// program...
874 	DPRINTFN(1,("%s: en0 %02x:%02x:%02x:%02x:%02x:%02x\n", __FUNCTION__,
875 		    sc->sc_enaddr[0], sc->sc_enaddr[1], sc->sc_enaddr[2],
876 		    sc->sc_enaddr[3], sc->sc_enaddr[4], sc->sc_enaddr[5]));
877 	EMAC_WRITE(ETH_SA1L, (sc->sc_enaddr[3] << 24)
878 		   | (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[1] << 8)
879 		   | (sc->sc_enaddr[0]));
880 	EMAC_WRITE(ETH_SA1H, (sc->sc_enaddr[5] << 8)
881 		   | (sc->sc_enaddr[4]));
882 	if (nma > 1) {
883 		DPRINTFN(1,("%s: en1 %02x:%02x:%02x:%02x:%02x:%02x\n", __FUNCTION__,
884 		       ias[0][0], ias[0][1], ias[0][2],
885 		       ias[0][3], ias[0][4], ias[0][5]));
886 		EMAC_WRITE(ETH_SA2L, (ias[0][3] << 24)
887 			   | (ias[0][2] << 16) | (ias[0][1] << 8)
888 			   | (ias[0][0]));
889 		EMAC_WRITE(ETH_SA2H, (ias[0][4] << 8)
890 			   | (ias[0][5]));
891 	}
892 	if (nma > 2) {
893 		DPRINTFN(1,("%s: en2 %02x:%02x:%02x:%02x:%02x:%02x\n", __FUNCTION__,
894 		       ias[1][0], ias[1][1], ias[1][2],
895 		       ias[1][3], ias[1][4], ias[1][5]));
896 		EMAC_WRITE(ETH_SA3L, (ias[1][3] << 24)
897 			   | (ias[1][2] << 16) | (ias[1][1] << 8)
898 			   | (ias[1][0]));
899 		EMAC_WRITE(ETH_SA3H, (ias[1][4] << 8)
900 			   | (ias[1][5]));
901 	}
902 	if (nma > 3) {
903 		DPRINTFN(1,("%s: en3 %02x:%02x:%02x:%02x:%02x:%02x\n", __FUNCTION__,
904 		       ias[2][0], ias[2][1], ias[2][2],
905 		       ias[2][3], ias[2][4], ias[2][5]));
906 		EMAC_WRITE(ETH_SA3L, (ias[2][3] << 24)
907 			   | (ias[2][2] << 16) | (ias[2][1] << 8)
908 			   | (ias[2][0]));
909 		EMAC_WRITE(ETH_SA3H, (ias[2][4] << 8)
910 			   | (ias[2][5]));
911 	}
912 	EMAC_WRITE(ETH_HSH, hashes[0]);
913 	EMAC_WRITE(ETH_HSL, hashes[1]);
914 	EMAC_WRITE(ETH_CFG, cfg);
915 	EMAC_WRITE(ETH_CTL, ctl | ETH_CTL_RE);
916 }
917