xref: /netbsd-src/sys/dev/cadence/if_cemac.c (revision 1b97a28dfeb41e9bb820c78c627630ae3e812101)
1 /*	$NetBSD: if_cemac.c,v 1.45 2024/10/15 00:58:15 lloyd Exp $	*/
2 
3 /*
4  * Copyright (c) 2015  Genetec Corporation.  All rights reserved.
5  * Written by Hashimoto Kenichi for Genetec Corporation.
6  *
7  * Based on arch/arm/at91/at91emac.c
8  *
9  * Copyright (c) 2007 Embedtronics Oy
10  * All rights reserved.
11  *
12  * Copyright (c) 2004 Jesse Off
13  * All rights reserved.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 /*
38  * Cadence EMAC/GEM ethernet controller IP driver
39  * used by arm/at91, arm/zynq SoC
40  */
41 
42 /*
43  * Lock order:
44  *
45  *	IFNET_LOCK -> sc_mcast_lock
46  *	IFNET_LOCK -> sc_intr_lock
47  */
48 
49 
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: if_cemac.c,v 1.45 2024/10/15 00:58:15 lloyd Exp $");
52 
53 #include <sys/param.h>
54 #include <sys/types.h>
55 
56 #include <sys/bus.h>
57 #include <sys/device.h>
58 #include <sys/kernel.h>
59 #include <sys/proc.h>
60 #include <sys/systm.h>
61 #include <sys/time.h>
62 
63 #include <net/if.h>
64 #include <net/if_dl.h>
65 #include <net/if_types.h>
66 #include <net/if_media.h>
67 #include <net/if_ether.h>
68 #include <net/bpf.h>
69 
70 #include <dev/mii/mii.h>
71 #include <dev/mii/miivar.h>
72 
73 #ifdef INET
74 #include <netinet/in.h>
75 #include <netinet/in_systm.h>
76 #include <netinet/in_var.h>
77 #include <netinet/ip.h>
78 #include <netinet/if_inarp.h>
79 #endif
80 
81 #include <dev/cadence/cemacreg.h>
82 #include <dev/cadence/if_cemacvar.h>
83 
84 #ifndef CEMAC_WATCHDOG_TIMEOUT
85 #define CEMAC_WATCHDOG_TIMEOUT 5
86 #endif
87 static int cemac_watchdog_timeout = CEMAC_WATCHDOG_TIMEOUT;
88 
89 #define DEFAULT_MDCDIV	32
90 
91 #define CEMAC_READ(x) \
92 	bus_space_read_4(sc->sc_iot, sc->sc_ioh, (x))
93 #define CEMAC_WRITE(x, y) \
94 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, (x), (y))
95 #define CEMAC_GEM_WRITE(x, y)						      \
96     do {								      \
97 	if (ISSET(sc->cemac_flags, CEMAC_FLAG_GEM))			      \
98 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, (GEM_##x), (y));    \
99 	else								      \
100 		bus_space_write_4(sc->sc_iot, sc->sc_ioh, (ETH_##x), (y));    \
101     } while(0)
102 
103 static void	cemac_init(struct cemac_softc *);
104 static int	cemac_gctx(struct cemac_softc *);
105 static int	cemac_mediachange(struct ifnet *);
106 static void	cemac_mediastatus(struct ifnet *, struct ifmediareq *);
107 static int	cemac_mii_readreg(device_t, int, int, uint16_t *);
108 static int	cemac_mii_writereg(device_t, int, int, uint16_t);
109 static void	cemac_statchg(struct ifnet *);
110 static void	cemac_tick(void *);
111 static int	cemac_ifioctl(struct ifnet *, u_long, void *);
112 static void	cemac_ifstart(struct ifnet *);
113 static void	cemac_ifstart_locked(struct ifnet *);
114 static void	cemac_ifwatchdog(struct ifnet *);
115 static int	cemac_ifinit(struct ifnet *);
116 static void	cemac_ifstop(struct ifnet *, int);
117 static void	cemac_setaddr(struct ifnet *);
118 
119 #ifdef	CEMAC_DEBUG
120 int cemac_debug = CEMAC_DEBUG;
121 #define	DPRINTFN(n, fmt)	if (cemac_debug >= (n)) printf fmt
122 #else
123 #define	DPRINTFN(n, fmt)
124 #endif
125 
126 /*
127  * Perform an interface watchdog reset.
128  */
129 static void
130 cemac_handle_reset_work(struct work *work, void *arg)
131 {
132 	struct cemac_softc * const sc = arg;
133 	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
134 
135 	printf("%s: watchdog timeout -- resetting\n", ifp->if_xname);
136 
137 	/* Don't want ioctl operations to happen */
138 	IFNET_LOCK(ifp);
139 
140 	/* reset the interface. */
141 	cemac_ifinit(ifp);
142 
143 	IFNET_UNLOCK(ifp);
144 
145 	/*
146 	 * There are still some upper layer processing which call
147 	 * ifp->if_start(). e.g. ALTQ or one CPU system
148 	 */
149 	/* Try to get more packets going. */
150 	ifp->if_start(ifp);
151 
152 	atomic_store_relaxed(&sc->sc_reset_pending, 0);
153 }
154 
155 
156 void
157 cemac_attach_common(struct cemac_softc *sc)
158 {
159 	uint32_t u;
160 
161 	aprint_naive("\n");
162 	if (ISSET(sc->cemac_flags, CEMAC_FLAG_GEM))
163 		aprint_normal(": Cadence Gigabit Ethernet Controller\n");
164 	else
165 		aprint_normal(": Cadence Ethernet Controller\n");
166 
167 	/* configure emac: */
168 	CEMAC_WRITE(ETH_CTL, 0);		// disable everything
169 	CEMAC_WRITE(ETH_IDR, -1);		// disable interrupts
170 	CEMAC_WRITE(ETH_RBQP, 0);		// clear receive
171 	CEMAC_WRITE(ETH_TBQP, 0);		// clear transmit
172 	if (ISSET(sc->cemac_flags, CEMAC_FLAG_GEM))
173 		CEMAC_WRITE(ETH_CFG,
174 		    GEM_CFG_CLK_64 | GEM_CFG_GEN | ETH_CFG_SPD | ETH_CFG_FD);
175 	else
176 		CEMAC_WRITE(ETH_CFG,
177 		    ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
178 	//CEMAC_WRITE(ETH_TCR, 0);		// send nothing
179 	//(void)CEMAC_READ(ETH_ISR);
180 	u = CEMAC_READ(ETH_TSR);
181 	CEMAC_WRITE(ETH_TSR, (u & (ETH_TSR_UND | ETH_TSR_COMP | ETH_TSR_BNQ
182 				  | ETH_TSR_IDLE | ETH_TSR_RLE
183 				  | ETH_TSR_COL | ETH_TSR_OVR)));
184 	u = CEMAC_READ(ETH_RSR);
185 	CEMAC_WRITE(ETH_RSR, (u & (ETH_RSR_OVR | ETH_RSR_REC | ETH_RSR_BNA)));
186 
187 	/* Fetch the Ethernet address from property if set. */
188 	prop_dictionary_t prop = device_properties(sc->sc_dev);
189 	prop_data_t enaddr = prop_dictionary_get(prop, "mac-address");
190 
191 	if (enaddr != NULL) {
192 		KASSERT(prop_object_type(enaddr) == PROP_TYPE_DATA);
193 		KASSERT(prop_data_size(enaddr) == ETHER_ADDR_LEN);
194 		memcpy(sc->sc_enaddr, prop_data_value(enaddr),
195 		       ETHER_ADDR_LEN);
196 	} else {
197 		static const uint8_t hardcoded[ETHER_ADDR_LEN] = {
198 			0x00, 0x0d, 0x10, 0x81, 0x0c, 0x94
199 		};
200 		memcpy(sc->sc_enaddr, hardcoded, ETHER_ADDR_LEN);
201 	}
202 
203 	cemac_init(sc);
204 }
205 
206 static int
207 cemac_gctx(struct cemac_softc *sc)
208 {
209 	uint32_t tsr;
210 
211 	tsr = CEMAC_READ(ETH_TSR);
212 	if (!ISSET(sc->cemac_flags, CEMAC_FLAG_GEM)) {
213 		// no space left
214 		if (!(tsr & ETH_TSR_BNQ))
215 			return 0;
216 	} else {
217 		if (tsr & GEM_TSR_TXGO)
218 			return 0;
219 	}
220 	CEMAC_WRITE(ETH_TSR, tsr);
221 
222 	// free sent frames
223 	while (sc->txqc > (ISSET(sc->cemac_flags, CEMAC_FLAG_GEM) ? 0 :
224 		(tsr & ETH_TSR_IDLE ? 0 : 1))) {
225 		int bi = sc->txqi % TX_QLEN;
226 
227 		DPRINTFN(3,("%s: TDSC[%i].Addr 0x%08x\n",
228 			__FUNCTION__, bi, sc->TDSC[bi].Addr));
229 		DPRINTFN(3,("%s: TDSC[%i].Info 0x%08x\n",
230 			__FUNCTION__, bi, sc->TDSC[bi].Info));
231 
232 		bus_dmamap_sync(sc->sc_dmat, sc->txq[bi].m_dmamap, 0,
233 		    sc->txq[bi].m->m_pkthdr.len, BUS_DMASYNC_POSTWRITE);
234 		bus_dmamap_unload(sc->sc_dmat, sc->txq[bi].m_dmamap);
235 		m_freem(sc->txq[bi].m);
236 		DPRINTFN(2,("%s: freed idx #%i mbuf %p (txqc=%i)\n",
237 		    __FUNCTION__, bi, sc->txq[bi].m, sc->txqc));
238 		sc->txq[bi].m = NULL;
239 		sc->txqi = (bi + 1) % TX_QLEN;
240 		sc->txqc--;
241 	}
242 
243 	// mark we're free
244 	if (sc->sc_txbusy) {
245 		sc->sc_txbusy = false;
246 		/* Disable transmit-buffer-free interrupt */
247 		/*CEMAC_WRITE(ETH_IDR, ETH_ISR_TBRE);*/
248 	}
249 
250 	return 1;
251 }
252 
253 int
254 cemac_intr(void *arg)
255 {
256 	struct cemac_softc * const sc = arg;
257 	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
258 	uint32_t imr, isr, ctl;
259 #ifdef	CEMAC_DEBUG
260 	uint32_t rsr;
261 #endif
262 	int bi;
263 
264 	mutex_enter(sc->sc_intr_lock);
265 	if (sc->sc_stopping) {
266 		mutex_exit(sc->sc_intr_lock);
267 		return 0;
268 	}
269 
270 	imr = ~CEMAC_READ(ETH_IMR);
271 	if (!(imr & (ETH_ISR_RCOM | ETH_ISR_TBRE | ETH_ISR_TIDLE |
272 	    ETH_ISR_RBNA | ETH_ISR_ROVR | ETH_ISR_TCOM))) {
273 		// interrupt not enabled, can't be us
274 		mutex_exit(sc->sc_intr_lock);
275 		return 0;
276 	}
277 
278 	isr = CEMAC_READ(ETH_ISR);
279 	CEMAC_WRITE(ETH_ISR, isr);
280 	isr &= imr;
281 
282 	if (isr == 0) {
283 		mutex_exit(sc->sc_intr_lock);
284 		return 0;
285 	}
286 
287 #ifdef	CEMAC_DEBUG
288 	rsr = CEMAC_READ(ETH_RSR);		// get receive status register
289 #endif
290 	DPRINTFN(2, ("%s: isr=0x%08X rsr=0x%08X imr=0x%08X\n", __FUNCTION__,
291 	    isr, rsr, imr));
292 
293 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
294 	// out of receive buffers
295 	if (isr & ETH_ISR_RBNA) {
296 		// clear interrupt
297 		CEMAC_WRITE(ETH_RSR, ETH_RSR_BNA);
298 
299 		ctl = CEMAC_READ(ETH_CTL);
300 		// disable receiver
301 		CEMAC_WRITE(ETH_CTL, ctl & ~ETH_CTL_RE);
302 		// clear BNA bit
303 		CEMAC_WRITE(ETH_RSR, ETH_RSR_BNA);
304 		// re-enable receiver
305 		CEMAC_WRITE(ETH_CTL, ctl |  ETH_CTL_RE);
306 
307 		if_statinc_ref(ifp, nsr, if_ierrors);
308 		if_statinc_ref(ifp, nsr, if_ipackets);
309 		DPRINTFN(1,("%s: out of receive buffers\n", __FUNCTION__));
310 	}
311 	if (isr & ETH_ISR_ROVR) {
312 		// clear interrupt
313 		CEMAC_WRITE(ETH_RSR, ETH_RSR_OVR);
314 		if_statinc_ref(ifp, nsr, if_ierrors);
315 		if_statinc_ref(ifp, nsr, if_ipackets);
316 		DPRINTFN(1,("%s: receive overrun\n", __FUNCTION__));
317 	}
318 
319 	// packet has been received!
320 	if (isr & ETH_ISR_RCOM) {
321 		uint32_t nfo;
322 		DPRINTFN(2,("#2 RDSC[%i].INFO=0x%08X\n", sc->rxqi % RX_QLEN,
323 		    sc->RDSC[sc->rxqi % RX_QLEN].Info));
324 		while (sc->RDSC[(bi = sc->rxqi % RX_QLEN)].Addr & ETH_RDSC_F_USED) {
325 			int fl, csum;
326 			struct mbuf *m;
327 
328 			nfo = sc->RDSC[bi].Info;
329 			fl = (nfo & ETH_RDSC_I_LEN) - 4;
330 			DPRINTFN(2,("## nfo=0x%08X\n", nfo));
331 
332 			MGETHDR(m, M_DONTWAIT, MT_DATA);
333 			if (m != NULL)
334 				MCLGET(m, M_DONTWAIT);
335 			if (m != NULL && (m->m_flags & M_EXT)) {
336 				bus_dmamap_sync(sc->sc_dmat,
337 				    sc->rxq[bi].m_dmamap, 0, MCLBYTES,
338 				    BUS_DMASYNC_POSTREAD);
339 				bus_dmamap_unload(sc->sc_dmat,
340 					sc->rxq[bi].m_dmamap);
341 				m_set_rcvif(sc->rxq[bi].m, ifp);
342 				sc->rxq[bi].m->m_pkthdr.len =
343 					sc->rxq[bi].m->m_len = fl;
344 				switch (nfo & ETH_RDSC_I_CHKSUM) {
345 				case ETH_RDSC_I_CHKSUM_IP:
346 					csum = M_CSUM_IPv4;
347 					break;
348 				case ETH_RDSC_I_CHKSUM_UDP:
349 					csum = M_CSUM_IPv4 | M_CSUM_UDPv4 |
350 					    M_CSUM_UDPv6;
351 					break;
352 				case ETH_RDSC_I_CHKSUM_TCP:
353 					csum = M_CSUM_IPv4 | M_CSUM_TCPv4 |
354 					    M_CSUM_TCPv6;
355 					break;
356 				default:
357 					csum = 0;
358 					break;
359 				}
360 				sc->rxq[bi].m->m_pkthdr.csum_flags = csum;
361 				DPRINTFN(2,("received %u bytes packet\n", fl));
362 				if_percpuq_enqueue(ifp->if_percpuq,
363 						   sc->rxq[bi].m);
364 				if (mtod(m, intptr_t) & 3)
365 					m_adj(m, mtod(m, intptr_t) & 3);
366 				sc->rxq[bi].m = m;
367 				bus_dmamap_load(sc->sc_dmat,
368 				    sc->rxq[bi].m_dmamap, m->m_ext.ext_buf,
369 					MCLBYTES, NULL, BUS_DMA_NOWAIT);
370 				bus_dmamap_sync(sc->sc_dmat,
371 				    sc->rxq[bi].m_dmamap, 0, MCLBYTES,
372 				    BUS_DMASYNC_PREREAD);
373 				sc->RDSC[bi].Info = 0;
374 				sc->RDSC[bi].Addr =
375 				    sc->rxq[bi].m_dmamap->dm_segs[0].ds_addr
376 				    | (bi == (RX_QLEN-1) ? ETH_RDSC_F_WRAP : 0);
377 			} else {
378 				/* Drop packets until we can get replacement
379 				 * empty mbufs for the RXDQ.
380 				 */
381 				m_freem(m);
382 				if_statinc_ref(ifp, nsr, if_ierrors);
383 			}
384 			sc->rxqi++;
385 		}
386 	}
387 
388 	IF_STAT_PUTREF(ifp);
389 
390 	if (cemac_gctx(sc) > 0)
391 		if_schedule_deferred_start(ifp);
392 #if 0 // reloop
393 	irq = CEMAC_READ(IntStsC);
394 	if ((irq & (IntSts_RxSQ | IntSts_ECI)) != 0)
395 		goto begin;
396 #endif
397 
398 	mutex_exit(sc->sc_intr_lock);
399 
400 	return 1;
401 }
402 
403 
404 static int
405 cemac_ifflags_cb(struct ethercom *ec)
406 {
407 	struct ifnet * const ifp = &ec->ec_if;
408 	struct cemac_softc * const sc = ifp->if_softc;
409 	int ret = 0;
410 
411 	KASSERT(IFNET_LOCKED(ifp));
412 	mutex_enter(sc->sc_mcast_lock);
413 
414 	u_short change = ifp->if_flags ^ sc->sc_if_flags;
415 	sc->sc_if_flags = ifp->if_flags;
416 
417 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
418 		ret = ENETRESET;
419 	} else if ((change & IFF_PROMISC) != 0) {
420 		if ((sc->sc_if_flags & IFF_RUNNING) != 0)
421 			cemac_setaddr(ifp);
422 	}
423 	mutex_exit(sc->sc_mcast_lock);
424 
425 	return ret;
426 }
427 
428 static void
429 cemac_init(struct cemac_softc *sc)
430 {
431 	bus_dma_segment_t segs;
432 	int rsegs, err, i;
433 	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
434 	struct mii_data * const mii = &sc->sc_mii;
435 	uint32_t u;
436 #if 0
437 	int mdcdiv = DEFAULT_MDCDIV;
438 #endif
439 
440 	callout_init(&sc->cemac_tick_ch, CALLOUT_MPSAFE);
441 	callout_setfunc(&sc->cemac_tick_ch, cemac_tick, sc);
442 
443 	// ok...
444 	CEMAC_WRITE(ETH_CTL, ETH_CTL_MPE);	// disable everything
445 	CEMAC_WRITE(ETH_IDR, -1);		// disable interrupts
446 	CEMAC_WRITE(ETH_RBQP, 0);		// clear receive
447 	CEMAC_WRITE(ETH_TBQP, 0);		// clear transmit
448 	if (ISSET(sc->cemac_flags, CEMAC_FLAG_GEM))
449 		CEMAC_WRITE(ETH_CFG,
450 		    GEM_CFG_CLK_64 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
451 	else
452 		CEMAC_WRITE(ETH_CFG,
453 		    ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
454 	if (ISSET(sc->cemac_flags, CEMAC_FLAG_GEM)) {
455 		CEMAC_WRITE(GEM_DMA_CFG,
456 		    __SHIFTIN((MCLBYTES + 63) / 64, GEM_DMA_CFG_RX_BUF_SIZE) |
457 		    __SHIFTIN(3, GEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL) |
458 		    GEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL |
459 		    __SHIFTIN(16, GEM_DMA_CFG_AHB_FIXED_BURST_LEN) |
460 		    GEM_DMA_CFG_DISC_WHEN_NO_AHB);
461 	}
462 //	CEMAC_WRITE(ETH_TCR, 0);			// send nothing
463 //	(void)CEMAC_READ(ETH_ISR);
464 	u = CEMAC_READ(ETH_TSR);
465 	CEMAC_WRITE(ETH_TSR, (u & (ETH_TSR_UND | ETH_TSR_COMP | ETH_TSR_BNQ
466 		    | ETH_TSR_IDLE | ETH_TSR_RLE
467 		    | ETH_TSR_COL | ETH_TSR_OVR)));
468 	u = CEMAC_READ(ETH_RSR);
469 	CEMAC_WRITE(ETH_RSR, (u & (ETH_RSR_OVR | ETH_RSR_REC | ETH_RSR_BNA)));
470 
471 #if 0
472 	if (device_cfdata(sc->sc_dev)->cf_flags)
473 		mdcdiv = device_cfdata(sc->sc_dev)->cf_flags;
474 #endif
475 	/* set ethernet address */
476 	CEMAC_GEM_WRITE(SA1L, (sc->sc_enaddr[3] << 24)
477 	    | (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[1] << 8)
478 	    | (sc->sc_enaddr[0]));
479 	CEMAC_GEM_WRITE(SA1H, (sc->sc_enaddr[5] << 8)
480 	    | (sc->sc_enaddr[4]));
481 	CEMAC_GEM_WRITE(SA2L, 0);
482 	CEMAC_GEM_WRITE(SA2H, 0);
483 	CEMAC_GEM_WRITE(SA3L, 0);
484 	CEMAC_GEM_WRITE(SA3H, 0);
485 	CEMAC_GEM_WRITE(SA4L, 0);
486 	CEMAC_GEM_WRITE(SA4H, 0);
487 
488 	char wqname[MAXCOMLEN];
489 	snprintf(wqname, sizeof(wqname), "%sReset", device_xname(sc->sc_dev));
490 	int error = workqueue_create(&sc->sc_reset_wq, wqname,
491 	    cemac_handle_reset_work, sc, PRI_NONE, IPL_SOFTCLOCK,
492 	    WQ_MPSAFE);
493 	if (error) {
494 		aprint_error_dev(sc->sc_dev,
495 		    "unable to create reset workqueue\n");
496 		return;
497 	}
498 
499 	/* Allocate memory for receive queue descriptors */
500 	sc->rbqlen = roundup(ETH_DSC_SIZE * (RX_QLEN + 1) * 2, PAGE_SIZE);
501 	DPRINTFN(1,("%s: rbqlen=%i\n", __FUNCTION__, sc->rbqlen));
502 
503 	// see EMAC errata why forced to 16384 byte boundary
504 	err = bus_dmamem_alloc(sc->sc_dmat, sc->rbqlen, 0,
505 	    MAX(16384, PAGE_SIZE), &segs, 1, &rsegs, BUS_DMA_WAITOK);
506 	if (err == 0) {
507 		DPRINTFN(1,("%s: -> bus_dmamem_map\n", __FUNCTION__));
508 		err = bus_dmamem_map(sc->sc_dmat, &segs, 1, sc->rbqlen,
509 		    &sc->rbqpage, (BUS_DMA_WAITOK | BUS_DMA_COHERENT));
510 	}
511 	if (err == 0) {
512 		DPRINTFN(1,("%s: -> bus_dmamap_create\n", __FUNCTION__));
513 		err = bus_dmamap_create(sc->sc_dmat, sc->rbqlen, 1,
514 		    sc->rbqlen, MAX(16384, PAGE_SIZE), BUS_DMA_WAITOK,
515 		    &sc->rbqpage_dmamap);
516 	}
517 	if (err == 0) {
518 		DPRINTFN(1,("%s: -> bus_dmamap_load\n", __FUNCTION__));
519 		err = bus_dmamap_load(sc->sc_dmat, sc->rbqpage_dmamap,
520 		    sc->rbqpage, sc->rbqlen, NULL, BUS_DMA_WAITOK);
521 	}
522 	if (err != 0)
523 		panic("%s: Cannot get DMA memory", device_xname(sc->sc_dev));
524 
525 	sc->rbqpage_dsaddr = sc->rbqpage_dmamap->dm_segs[0].ds_addr;
526 	memset(sc->rbqpage, 0, sc->rbqlen);
527 
528 	/* Allocate memory for transmit queue descriptors */
529 	sc->tbqlen = roundup(ETH_DSC_SIZE * (TX_QLEN + 1) * 2, PAGE_SIZE);
530 	DPRINTFN(1,("%s: tbqlen=%i\n", __FUNCTION__, sc->tbqlen));
531 
532 	// see EMAC errata why forced to 16384 byte boundary
533 	err = bus_dmamem_alloc(sc->sc_dmat, sc->tbqlen, 0,
534 	    MAX(16384, PAGE_SIZE), &segs, 1, &rsegs, BUS_DMA_WAITOK);
535 	if (err == 0) {
536 		DPRINTFN(1,("%s: -> bus_dmamem_map\n", __FUNCTION__));
537 		err = bus_dmamem_map(sc->sc_dmat, &segs, 1, sc->tbqlen,
538 		    &sc->tbqpage, (BUS_DMA_WAITOK | BUS_DMA_COHERENT));
539 	}
540 	if (err == 0) {
541 		DPRINTFN(1,("%s: -> bus_dmamap_create\n", __FUNCTION__));
542 		err = bus_dmamap_create(sc->sc_dmat, sc->tbqlen, 1,
543 		    sc->tbqlen, MAX(16384, PAGE_SIZE), BUS_DMA_WAITOK,
544 		    &sc->tbqpage_dmamap);
545 	}
546 	if (err == 0) {
547 		DPRINTFN(1,("%s: -> bus_dmamap_load\n", __FUNCTION__));
548 		err = bus_dmamap_load(sc->sc_dmat, sc->tbqpage_dmamap,
549 		    sc->tbqpage, sc->tbqlen, NULL, BUS_DMA_WAITOK);
550 	}
551 	if (err != 0)
552 		panic("%s: Cannot get DMA memory", device_xname(sc->sc_dev));
553 
554 	sc->tbqpage_dsaddr = sc->tbqpage_dmamap->dm_segs[0].ds_addr;
555 	memset(sc->tbqpage, 0, sc->tbqlen);
556 
557 	/* Set up pointers to start of each queue in kernel addr space.
558 	 * Each descriptor queue or status queue entry uses 2 words
559 	 */
560 	sc->RDSC = (void *)sc->rbqpage;
561 	sc->TDSC = (void *)sc->tbqpage;
562 
563 	/* init TX queue */
564 	for (i = 0; i < TX_QLEN; i++) {
565 		sc->TDSC[i].Addr = 0;
566 		sc->TDSC[i].Info = ETH_TDSC_I_USED |
567 		    (i == (TX_QLEN - 1) ? ETH_TDSC_I_WRAP : 0);
568 	}
569 
570 	/* Populate the RXQ with mbufs */
571 	sc->rxqi = 0;
572 	for (i = 0; i < RX_QLEN; i++) {
573 		struct mbuf *m;
574 
575 		err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
576 		    PAGE_SIZE, BUS_DMA_WAITOK, &sc->rxq[i].m_dmamap);
577 		if (err) {
578 			panic("%s: dmamap_create failed: %i\n", __FUNCTION__,
579 			    err);
580 		}
581 		MGETHDR(m, M_WAIT, MT_DATA);
582 		MCLGET(m, M_WAIT);
583 		sc->rxq[i].m = m;
584 		if (mtod(m, intptr_t) & 3) {
585 			m_adj(m, mtod(m, intptr_t) & 3);
586 		}
587 		err = bus_dmamap_load(sc->sc_dmat, sc->rxq[i].m_dmamap,
588 		    m->m_ext.ext_buf, MCLBYTES, NULL,
589 		    BUS_DMA_WAITOK);
590 		if (err) {
591 			panic("%s: dmamap_load failed: %i\n", __FUNCTION__, err);
592 		}
593 		sc->RDSC[i].Addr = sc->rxq[i].m_dmamap->dm_segs[0].ds_addr
594 		    | (i == (RX_QLEN-1) ? ETH_RDSC_F_WRAP : 0);
595 		sc->RDSC[i].Info = 0;
596 		bus_dmamap_sync(sc->sc_dmat, sc->rxq[i].m_dmamap, 0,
597 		    MCLBYTES, BUS_DMASYNC_PREREAD);
598 	}
599 
600 	/* prepare transmit queue */
601 	for (i = 0; i < TX_QLEN; i++) {
602 		err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
603 		    (BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW),
604 		    &sc->txq[i].m_dmamap);
605 		if (err)
606 			panic("ARGH #1");
607 		sc->txq[i].m = NULL;
608 	}
609 
610 	/* Program each queue's start addr, cur addr, and len registers
611 	 * with the physical addresses.
612 	 */
613 	CEMAC_WRITE(ETH_RBQP, (uint32_t)sc->rbqpage_dsaddr);
614 	CEMAC_WRITE(ETH_TBQP, (uint32_t)sc->tbqpage_dsaddr);
615 
616 	sc->sc_mcast_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
617 	sc->sc_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
618 
619 	/* Divide HCLK by 32 for MDC clock */
620 	sc->sc_ethercom.ec_mii = mii;
621 	mii->mii_ifp = ifp;
622 	mii->mii_readreg = cemac_mii_readreg;
623 	mii->mii_writereg = cemac_mii_writereg;
624 	mii->mii_statchg = cemac_statchg;
625 	ifmedia_init(&mii->mii_media, IFM_IMASK, cemac_mediachange,
626 	    cemac_mediastatus);
627 	mii_attach(sc->sc_dev, mii, 0xffffffff, sc->sc_phyno, MII_OFFSET_ANY, 0);
628 	ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
629 
630 #if 0
631 	// enable / disable interrupts
632 	CEMAC_WRITE(ETH_IDR, -1);
633 	CEMAC_WRITE(ETH_IER, ETH_ISR_RCOM | ETH_ISR_TBRE | ETH_ISR_TIDLE
634 	    | ETH_ISR_RBNA | ETH_ISR_ROVR | ETH_ISR_TCOM);
635 //	(void)CEMAC_READ(ETH_ISR); // why
636 
637 	// enable transmitter / receiver
638 	CEMAC_WRITE(ETH_CTL, ETH_CTL_TE | ETH_CTL_RE | ETH_CTL_ISR
639 	    | ETH_CTL_CSR | ETH_CTL_MPE);
640 #endif
641 	/*
642 	 * We can support hardware checksumming.
643 	 */
644 	ifp->if_capabilities |=
645 	    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
646 	    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
647 	    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
648 	    IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx |
649 	    IFCAP_CSUM_UDPv6_Tx | IFCAP_CSUM_UDPv6_Rx;
650 
651 	/*
652 	 * We can support 802.1Q VLAN-sized frames.
653 	 */
654 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
655 
656 	strcpy(ifp->if_xname, device_xname(sc->sc_dev));
657 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
658 	ifp->if_extflags = IFEF_MPSAFE;
659 	ifp->if_ioctl = cemac_ifioctl;
660 	ifp->if_start = cemac_ifstart;
661 	ifp->if_watchdog = cemac_ifwatchdog;
662 	ifp->if_init = cemac_ifinit;
663 	ifp->if_stop = cemac_ifstop;
664 	ifp->if_softc = sc;
665 	IFQ_SET_READY(&ifp->if_snd);
666 	if_attach(ifp);
667 	if_deferred_start_init(ifp, NULL);
668 	ether_ifattach(ifp, (sc)->sc_enaddr);
669 	ether_set_ifflags_cb(&sc->sc_ethercom, cemac_ifflags_cb);
670 }
671 
672 static int
673 cemac_mediachange(struct ifnet *ifp)
674 {
675 	if (ifp->if_flags & IFF_UP)
676 		cemac_ifinit(ifp);
677 	return 0;
678 }
679 
680 static void
681 cemac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
682 {
683 	struct cemac_softc * const sc = ifp->if_softc;
684 
685 	mii_pollstat(&sc->sc_mii);
686 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
687 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
688 }
689 
690 
691 static int
692 cemac_mii_readreg(device_t self, int phy, int reg, uint16_t *val)
693 {
694 	struct cemac_softc * const sc = device_private(self);
695 
696 	CEMAC_WRITE(ETH_MAN, (ETH_MAN_HIGH | ETH_MAN_RW_RD
697 			     | ((phy << ETH_MAN_PHYA_SHIFT) & ETH_MAN_PHYA)
698 			     | ((reg << ETH_MAN_REGA_SHIFT) & ETH_MAN_REGA)
699 			     | ETH_MAN_CODE_IEEE802_3));
700 	while (!(CEMAC_READ(ETH_SR) & ETH_SR_IDLE))
701 		;
702 
703 	*val = CEMAC_READ(ETH_MAN) & ETH_MAN_DATA;
704 	return 0;
705 }
706 
707 static int
708 cemac_mii_writereg(device_t self, int phy, int reg, uint16_t val)
709 {
710 	struct cemac_softc * const sc = device_private(self);
711 
712 	CEMAC_WRITE(ETH_MAN, (ETH_MAN_HIGH | ETH_MAN_RW_WR
713 			     | ((phy << ETH_MAN_PHYA_SHIFT) & ETH_MAN_PHYA)
714 			     | ((reg << ETH_MAN_REGA_SHIFT) & ETH_MAN_REGA)
715 			     | ETH_MAN_CODE_IEEE802_3
716 			     | (val & ETH_MAN_DATA)));
717 	while (!(CEMAC_READ(ETH_SR) & ETH_SR_IDLE))
718 		;
719 
720 	return 0;
721 }
722 
723 
724 static void
725 cemac_statchg(struct ifnet *ifp)
726 {
727 	struct cemac_softc * const sc = ifp->if_softc;
728 	struct mii_data *mii = &sc->sc_mii;
729 	uint32_t reg;
730 
731 	/*
732 	 * We must keep the MAC and the PHY in sync as
733 	 * to the status of full-duplex!
734 	 */
735 	reg = CEMAC_READ(ETH_CFG);
736 	reg &= ~ETH_CFG_FD;
737 	if (sc->sc_mii.mii_media_active & IFM_FDX)
738 		reg |= ETH_CFG_FD;
739 
740 	reg &= ~ETH_CFG_SPD;
741 	if (ISSET(sc->cemac_flags, CEMAC_FLAG_GEM))
742 		reg &= ~GEM_CFG_GEN;
743 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
744 	case IFM_10_T:
745 		break;
746 	case IFM_100_TX:
747 		reg |= ETH_CFG_SPD;
748 		break;
749 	case IFM_1000_T:
750 		reg |= ETH_CFG_SPD | GEM_CFG_GEN;
751 		break;
752 	default:
753 		break;
754 	}
755 	CEMAC_WRITE(ETH_CFG, reg);
756 }
757 
758 static bool
759 cemac_watchdog_check(struct cemac_softc * const sc)
760 {
761 
762 	KASSERT(mutex_owned(sc->sc_intr_lock));
763 
764 	if (!sc->sc_tx_sending)
765 		return true;
766 
767 	if (time_uptime - sc->sc_tx_lastsent <= cemac_watchdog_timeout)
768 		return true;
769 
770 	return false;
771 }
772 
773 static bool
774 cemac_watchdog_tick(struct ifnet *ifp)
775 {
776 	struct cemac_softc * const sc = ifp->if_softc;
777 
778 	KASSERT(mutex_owned(sc->sc_intr_lock));
779 
780 	if (!sc->sc_trigger_reset && cemac_watchdog_check(sc))
781 		return true;
782 
783 	if (atomic_swap_uint(&sc->sc_reset_pending, 1) == 0)
784 		workqueue_enqueue(sc->sc_reset_wq, &sc->sc_reset_work, NULL);
785 
786 	return false;
787 }
788 
789 
790 static void
791 cemac_tick(void *arg)
792 {
793 	struct cemac_softc * const sc = arg;
794 	struct ifnet * const ifp = &sc->sc_ethercom.ec_if;
795 
796 	mutex_enter(sc->sc_intr_lock);
797 	if (sc->sc_stopping) {
798 		mutex_exit(sc->sc_intr_lock);
799 		return;
800 	}
801 
802 	if (ISSET(sc->cemac_flags, CEMAC_FLAG_GEM))
803 		if_statadd(ifp, if_collisions,
804 		    CEMAC_READ(GEM_SCOL) + CEMAC_READ(GEM_MCOL));
805 	else
806 		if_statadd(ifp, if_collisions,
807 		    CEMAC_READ(ETH_SCOL) + CEMAC_READ(ETH_MCOL));
808 
809 	/* These misses are ok, they will happen if the RAM/CPU can't keep up */
810 	if (!ISSET(sc->cemac_flags, CEMAC_FLAG_GEM)) {
811 		uint32_t misses = CEMAC_READ(ETH_DRFC);
812 		if (misses > 0)
813 			aprint_normal_ifnet(ifp, "%d rx misses\n", misses);
814 	}
815 
816 	mii_tick(&sc->sc_mii);
817 
818 	const bool ok = cemac_watchdog_tick(ifp);
819 	if (ok)
820 		callout_schedule(&sc->cemac_tick_ch, hz);
821 
822 	mutex_exit(sc->sc_intr_lock);
823 }
824 
825 
826 static int
827 cemac_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
828 {
829 	struct cemac_softc * const sc = ifp->if_softc;
830 	int error;
831 
832  	switch (cmd) {
833 	case SIOCADDMULTI:
834 	case SIOCDELMULTI:
835 		break;
836  	default:
837 		KASSERT(IFNET_LOCKED(ifp));
838 	}
839 
840 	const int s = splnet();
841 	error = ether_ioctl(ifp, cmd, data);
842 	splx(s);
843 
844 	if (error == ENETRESET) {
845  		error = 0;
846 
847 		if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
848 			mutex_enter(sc->sc_mcast_lock);
849 			if ((sc->sc_if_flags & IFF_RUNNING) != 0)
850 				cemac_setaddr(ifp);
851 
852 			mutex_exit(sc->sc_mcast_lock);
853  		}
854  	}
855 
856 	return error;
857 }
858 
859 
860 
861 static void
862 cemac_ifstart(struct ifnet *ifp)
863 {
864 	struct cemac_softc * const sc = ifp->if_softc;
865 	KASSERT(if_is_mpsafe(ifp));
866 
867 	mutex_enter(sc->sc_intr_lock);
868 	if (!sc->sc_stopping) {
869 		cemac_ifstart_locked(ifp);
870 	}
871 	mutex_exit(sc->sc_intr_lock);
872 }
873 
874 static void
875 cemac_ifstart_locked(struct ifnet *ifp)
876 {
877 	struct cemac_softc * const sc = ifp->if_softc;
878 	struct mbuf *m;
879 	bus_dma_segment_t *segs;
880 	int bi, err, nsegs;
881 
882 	KASSERT(mutex_owned(sc->sc_intr_lock));
883 
884 start:
885 	if (cemac_gctx(sc) == 0) {
886 		/* Enable transmit-buffer-free interrupt */
887 		CEMAC_WRITE(ETH_IER, ETH_ISR_TBRE);
888 		sc->sc_txbusy = true;
889 		return;
890 	}
891 
892 	IFQ_POLL(&ifp->if_snd, m);
893 	if (m == NULL) {
894 		return;
895 	}
896 
897 	bi = (sc->txqi + sc->txqc) % TX_QLEN;
898 	if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, sc->txq[bi].m_dmamap, m,
899 		BUS_DMA_NOWAIT)) ||
900 		sc->txq[bi].m_dmamap->dm_segs[0].ds_addr & 0x3 ||
901 		sc->txq[bi].m_dmamap->dm_nsegs > 1) {
902 		/* Copy entire mbuf chain to new single */
903 		struct mbuf *mn;
904 
905 		if (err == 0)
906 			bus_dmamap_unload(sc->sc_dmat, sc->txq[bi].m_dmamap);
907 
908 		MGETHDR(mn, M_DONTWAIT, MT_DATA);
909 		if (mn == NULL)
910 			return;
911 		if (m->m_pkthdr.len > MHLEN) {
912 			MCLGET(mn, M_DONTWAIT);
913 			if ((mn->m_flags & M_EXT) == 0) {
914 				m_freem(mn);
915 				return;
916 			}
917 		}
918 		m_copydata(m, 0, m->m_pkthdr.len, mtod(mn, void *));
919 		mn->m_pkthdr.len = mn->m_len = m->m_pkthdr.len;
920 		IFQ_DEQUEUE(&ifp->if_snd, m);
921 		m_freem(m);
922 		m = mn;
923 		bus_dmamap_load_mbuf(sc->sc_dmat, sc->txq[bi].m_dmamap, m,
924 		    BUS_DMA_NOWAIT);
925 	} else {
926 		IFQ_DEQUEUE(&ifp->if_snd, m);
927 	}
928 
929 	bpf_mtap(ifp, m, BPF_D_OUT);
930 
931 	nsegs = sc->txq[bi].m_dmamap->dm_nsegs;
932 	segs = sc->txq[bi].m_dmamap->dm_segs;
933 	if (nsegs > 1)
934 		panic("#### ARGH #2");
935 
936 	sc->txq[bi].m = m;
937 	sc->txqc++;
938 
939 	DPRINTFN(2,("%s: start sending idx #%i mbuf %p (txqc=%i, phys %p), "
940 	    "len=%u\n", __FUNCTION__, bi, sc->txq[bi].m, sc->txqc,
941 	     (void *)segs->ds_addr, (unsigned)m->m_pkthdr.len));
942 #ifdef	DIAGNOSTIC
943 	if (sc->txqc > TX_QLEN)
944 		panic("%s: txqc %i > %i", __FUNCTION__, sc->txqc, TX_QLEN);
945 #endif
946 
947 	bus_dmamap_sync(sc->sc_dmat, sc->txq[bi].m_dmamap, 0,
948 	    sc->txq[bi].m_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
949 
950 	if (ISSET(sc->cemac_flags, CEMAC_FLAG_GEM)) {
951 		sc->TDSC[bi].Addr = segs->ds_addr;
952 		sc->TDSC[bi].Info =
953 		    __SHIFTIN(m->m_pkthdr.len, ETH_TDSC_I_LEN) |
954 		    ETH_TDSC_I_LAST_BUF |
955 		    (bi == (TX_QLEN - 1) ? ETH_TDSC_I_WRAP : 0);
956 
957 		DPRINTFN(3,("%s: TDSC[%i].Addr 0x%08x\n",
958 			__FUNCTION__, bi, sc->TDSC[bi].Addr));
959 		DPRINTFN(3,("%s: TDSC[%i].Info 0x%08x\n",
960 			__FUNCTION__, bi, sc->TDSC[bi].Info));
961 
962 		uint32_t ctl = CEMAC_READ(ETH_CTL) | GEM_CTL_STARTTX;
963 		CEMAC_WRITE(ETH_CTL, ctl);
964 		DPRINTFN(3,("%s: ETH_CTL 0x%08x\n", __FUNCTION__,
965 		    CEMAC_READ(ETH_CTL)));
966 	} else {
967 		CEMAC_WRITE(ETH_TAR, segs->ds_addr);
968 		CEMAC_WRITE(ETH_TCR, m->m_pkthdr.len);
969 	}
970 	sc->sc_tx_lastsent = time_uptime;
971 
972 	if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
973 		goto start;
974 
975 	return;
976 }
977 
978 static void
979 cemac_ifwatchdog(struct ifnet *ifp)
980 {
981 	struct cemac_softc * const sc = ifp->if_softc;
982 
983 	if ((ifp->if_flags & IFF_RUNNING) == 0)
984 		return;
985 	aprint_error_ifnet(ifp, "device timeout, CTL = 0x%08x, CFG = 0x%08x\n",
986 	    CEMAC_READ(ETH_CTL), CEMAC_READ(ETH_CFG));
987 }
988 
989 static int
990 cemac_ifinit(struct ifnet *ifp)
991 {
992 	struct cemac_softc * const sc = ifp->if_softc;
993 	uint32_t dma, cfg;
994 
995 	ASSERT_SLEEPABLE();
996 	KASSERT(IFNET_LOCKED(ifp));
997 
998 	/* Cancel pending I/O and flush buffers. */
999 	cemac_ifstop(ifp, 0);
1000 
1001 	if (ISSET(sc->cemac_flags, CEMAC_FLAG_GEM)) {
1002 
1003 		if (ifp->if_capenable &
1004 		    (IFCAP_CSUM_IPv4_Tx |
1005 			IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx |
1006 			IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_UDPv6_Tx)) {
1007 			dma = CEMAC_READ(GEM_DMA_CFG);
1008 			dma |= GEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN;
1009 			CEMAC_WRITE(GEM_DMA_CFG, dma);
1010 		}
1011 		if (ifp->if_capenable &
1012 		    (IFCAP_CSUM_IPv4_Rx |
1013 			IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
1014 			IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx)) {
1015 			cfg = CEMAC_READ(ETH_CFG);
1016 			cfg |= GEM_CFG_RXCOEN;
1017 			CEMAC_WRITE(ETH_CFG, cfg);
1018 		}
1019 	}
1020 
1021 	// enable interrupts
1022 	CEMAC_WRITE(ETH_IDR, -1);
1023 	CEMAC_WRITE(ETH_IER, ETH_ISR_RCOM | ETH_ISR_TBRE | ETH_ISR_TIDLE
1024 	    | ETH_ISR_RBNA | ETH_ISR_ROVR | ETH_ISR_TCOM);
1025 
1026 	// enable transmitter / receiver
1027 	CEMAC_WRITE(ETH_CTL, ETH_CTL_TE | ETH_CTL_RE | ETH_CTL_ISR
1028 	    | ETH_CTL_CSR | ETH_CTL_MPE);
1029 
1030 	mii_mediachg(&sc->sc_mii);
1031 	callout_reset(&sc->cemac_tick_ch, hz, cemac_tick, sc);
1032 	ifp->if_flags |= IFF_RUNNING;
1033 
1034 	mutex_enter(sc->sc_intr_lock);
1035 	sc->sc_stopping = false;
1036 	mutex_exit(sc->sc_intr_lock);
1037 
1038 	return 0;
1039 }
1040 
1041 static void
1042 cemac_ifstop(struct ifnet *ifp, int disable)
1043 {
1044 //	uint32_t u;
1045 	struct cemac_softc * const sc = ifp->if_softc;
1046 
1047 	ASSERT_SLEEPABLE();
1048 	KASSERT(IFNET_LOCKED(ifp));
1049 
1050 	ifp->if_flags &= ~IFF_RUNNING;
1051 
1052 	mutex_enter(sc->sc_mcast_lock);
1053 	sc->sc_if_flags = ifp->if_flags;
1054 	mutex_exit(sc->sc_mcast_lock);
1055 
1056 	mutex_enter(sc->sc_intr_lock);
1057 	sc->sc_stopping = true;
1058 	mutex_exit(sc->sc_intr_lock);
1059 
1060 #if 0
1061 	CEMAC_WRITE(ETH_CTL, ETH_CTL_MPE);	// disable everything
1062 	CEMAC_WRITE(ETH_IDR, -1);		// disable interrupts
1063 //	CEMAC_WRITE(ETH_RBQP, 0);		// clear receive
1064 	if (ISSET(sc->cemac_flags, CEMAC_FLAG_GEM))
1065 		CEMAC_WRITE(ETH_CFG,
1066 		    GEM_CFG_CLK_64 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
1067 	else
1068 		CEMAC_WRITE(ETH_CFG,
1069 		    ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG);
1070 //	CEMAC_WRITE(ETH_TCR, 0);			// send nothing
1071 //	(void)CEMAC_READ(ETH_ISR);
1072 	u = CEMAC_READ(ETH_TSR);
1073 	CEMAC_WRITE(ETH_TSR, (u & (ETH_TSR_UND | ETH_TSR_COMP | ETH_TSR_BNQ
1074 				  | ETH_TSR_IDLE | ETH_TSR_RLE
1075 				  | ETH_TSR_COL | ETH_TSR_OVR)));
1076 	u = CEMAC_READ(ETH_RSR);
1077 	CEMAC_WRITE(ETH_RSR, (u & (ETH_RSR_OVR | ETH_RSR_REC | ETH_RSR_BNA)));
1078 #endif
1079 	callout_halt(&sc->cemac_tick_ch, NULL);
1080 
1081 	/* Down the MII. */
1082 	mii_down(&sc->sc_mii);
1083 
1084 	ifp->if_flags &= ~IFF_RUNNING;
1085 	sc->sc_txbusy = false;
1086 	sc->sc_mii.mii_media_status &= ~IFM_ACTIVE;
1087 }
1088 
1089 static void
1090 cemac_setaddr(struct ifnet *ifp)
1091 {
1092 	struct cemac_softc * const sc = ifp->if_softc;
1093 	struct ethercom *ec = &sc->sc_ethercom;
1094 	struct ether_multi *enm;
1095 	struct ether_multistep step;
1096 	uint8_t ias[3][ETHER_ADDR_LEN];
1097 	uint32_t h, nma = 0, hashes[2] = { 0, 0 };
1098 	uint32_t ctl = CEMAC_READ(ETH_CTL);
1099 	uint32_t cfg = CEMAC_READ(ETH_CFG);
1100 
1101 	KASSERT(mutex_owned(sc->sc_mcast_lock));
1102 
1103 	/* disable receiver temporarily */
1104 	CEMAC_WRITE(ETH_CTL, ctl & ~ETH_CTL_RE);
1105 
1106 	cfg &= ~(ETH_CFG_MTI | ETH_CFG_UNI | ETH_CFG_CAF | ETH_CFG_UNI);
1107 
1108 	if (sc->sc_if_flags & IFF_PROMISC) {
1109 		cfg |=	ETH_CFG_CAF;
1110 	} else {
1111 		cfg &= ~ETH_CFG_CAF;
1112 	}
1113 
1114 	// ETH_CFG_BIG?
1115 
1116 	ETHER_LOCK(ec);
1117 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
1118 
1119 	ETHER_FIRST_MULTI(step, ec, enm);
1120 	while (enm != NULL) {
1121 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1122 			/*
1123 			 * We must listen to a range of multicast addresses.
1124 			 * For now, just accept all multicasts, rather than
1125 			 * trying to set only those filter bits needed to match
1126 			 * the range.  (At this time, the only use of address
1127 			 * ranges is for IP multicast routing, for which the
1128 			 * range is big enough to require all bits set.)
1129 			 */
1130 			cfg |= ETH_CFG_MTI;
1131 			hashes[0] = 0xffffffffUL;
1132 			hashes[1] = 0xffffffffUL;
1133 			nma = 0;
1134 			ec->ec_flags |= ETHER_F_ALLMULTI;
1135 			break;
1136 		}
1137 
1138 		if (nma < 3) {
1139 			/* We can program 3 perfect address filters for mcast */
1140 			memcpy(ias[nma], enm->enm_addrlo, ETHER_ADDR_LEN);
1141 		} else {
1142 			/*
1143 			 * XXX: Datasheet is not very clear here, I'm not sure
1144 			 * if I'm doing this right.  --joff
1145 			 */
1146 			h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1147 
1148 			/* Just want the 6 most-significant bits. */
1149 			h = h >> 26;
1150 #if 0
1151 			hashes[h / 32] |=  (1 << (h % 32));
1152 #else
1153 			hashes[0] = 0xffffffffUL;
1154 			hashes[1] = 0xffffffffUL;
1155 #endif
1156 			cfg |= ETH_CFG_MTI;
1157 		}
1158 		ETHER_NEXT_MULTI(step, enm);
1159 		nma++;
1160 	}
1161 	ETHER_UNLOCK(ec);
1162 
1163 	// program...
1164 	DPRINTFN(1,("%s: en0 %02x:%02x:%02x:%02x:%02x:%02x\n", __FUNCTION__,
1165 		sc->sc_enaddr[0], sc->sc_enaddr[1], sc->sc_enaddr[2],
1166 		sc->sc_enaddr[3], sc->sc_enaddr[4], sc->sc_enaddr[5]));
1167 	CEMAC_GEM_WRITE(SA1L, (sc->sc_enaddr[3] << 24)
1168 	    | (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[1] << 8)
1169 	    | (sc->sc_enaddr[0]));
1170 	CEMAC_GEM_WRITE(SA1H, (sc->sc_enaddr[5] << 8)
1171 	    | (sc->sc_enaddr[4]));
1172 	if (nma > 0) {
1173 		DPRINTFN(1,("%s: en1 %02x:%02x:%02x:%02x:%02x:%02x\n",
1174 		    __FUNCTION__,
1175 		    ias[0][0], ias[0][1], ias[0][2],
1176 		    ias[0][3], ias[0][4], ias[0][5]));
1177 		CEMAC_WRITE(ETH_SA2L, (ias[0][3] << 24)
1178 		    | (ias[0][2] << 16) | (ias[0][1] << 8)
1179 		    | (ias[0][0]));
1180 		CEMAC_WRITE(ETH_SA2H, (ias[0][4] << 8)
1181 		    | (ias[0][5]));
1182 	}
1183 	if (nma > 1) {
1184 		DPRINTFN(1,("%s: en2 %02x:%02x:%02x:%02x:%02x:%02x\n",
1185 		    __FUNCTION__,
1186 		    ias[1][0], ias[1][1], ias[1][2],
1187 		    ias[1][3], ias[1][4], ias[1][5]));
1188 		CEMAC_WRITE(ETH_SA3L, (ias[1][3] << 24)
1189 		    | (ias[1][2] << 16) | (ias[1][1] << 8)
1190 		    | (ias[1][0]));
1191 		CEMAC_WRITE(ETH_SA3H, (ias[1][4] << 8)
1192 		    | (ias[1][5]));
1193 	}
1194 	if (nma > 2) {
1195 		DPRINTFN(1,("%s: en3 %02x:%02x:%02x:%02x:%02x:%02x\n",
1196 		    __FUNCTION__,
1197 		    ias[2][0], ias[2][1], ias[2][2],
1198 		    ias[2][3], ias[2][4], ias[2][5]));
1199 		CEMAC_WRITE(ETH_SA4L, (ias[2][3] << 24)
1200 		    | (ias[2][2] << 16) | (ias[2][1] << 8)
1201 		    | (ias[2][0]));
1202 		CEMAC_WRITE(ETH_SA4H, (ias[2][4] << 8)
1203 		    | (ias[2][5]));
1204 	}
1205 	CEMAC_GEM_WRITE(HSH, hashes[0]);
1206 	CEMAC_GEM_WRITE(HSL, hashes[1]);
1207 	CEMAC_WRITE(ETH_CFG, cfg);
1208 	CEMAC_WRITE(ETH_CTL, ctl | ETH_CTL_RE);
1209 }
1210