xref: /netbsd-src/sys/dev/ic/dp83932.c (revision cef8759bd76c1b621f8eab8faa6f208faabc2e15)
1 /*	$NetBSD: dp83932.c,v 1.46 2020/03/15 22:19:00 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Device driver for the National Semiconductor DP83932
34  * Systems-Oriented Network Interface Controller (SONIC).
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: dp83932.c,v 1.46 2020/03/15 22:19:00 thorpej Exp $");
39 
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/mbuf.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46 #include <sys/socket.h>
47 #include <sys/ioctl.h>
48 #include <sys/errno.h>
49 #include <sys/device.h>
50 
51 #include <net/if.h>
52 #include <net/if_dl.h>
53 #include <net/if_ether.h>
54 
55 #include <net/bpf.h>
56 
57 #include <sys/bus.h>
58 #include <sys/intr.h>
59 
60 #include <dev/ic/dp83932reg.h>
61 #include <dev/ic/dp83932var.h>
62 
63 static void	sonic_start(struct ifnet *);
64 static void	sonic_watchdog(struct ifnet *);
65 static int	sonic_ioctl(struct ifnet *, u_long, void *);
66 static int	sonic_init(struct ifnet *);
67 static void	sonic_stop(struct ifnet *, int);
68 
69 static bool	sonic_shutdown(device_t, int);
70 
71 static void	sonic_reset(struct sonic_softc *);
72 static void	sonic_rxdrain(struct sonic_softc *);
73 static int	sonic_add_rxbuf(struct sonic_softc *, int);
74 static void	sonic_set_filter(struct sonic_softc *);
75 
76 static uint16_t sonic_txintr(struct sonic_softc *);
77 static void	sonic_rxintr(struct sonic_softc *);
78 
79 int	sonic_copy_small = 0;
80 
81 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
82 
83 /*
84  * sonic_attach:
85  *
86  *	Attach a SONIC interface to the system.
87  */
88 void
89 sonic_attach(struct sonic_softc *sc, const uint8_t *enaddr)
90 {
91 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
92 	int i, rseg, error;
93 	bus_dma_segment_t seg;
94 	size_t cdatasize;
95 	uint8_t *nullbuf;
96 
97 	/*
98 	 * Allocate the control data structures, and create and load the
99 	 * DMA map for it.
100 	 */
101 	if (sc->sc_32bit)
102 		cdatasize = sizeof(struct sonic_control_data32);
103 	else
104 		cdatasize = sizeof(struct sonic_control_data16);
105 
106 	if ((error = bus_dmamem_alloc(sc->sc_dmat, cdatasize + ETHER_PAD_LEN,
107 	     PAGE_SIZE, (64 * 1024), &seg, 1, &rseg,
108 	     BUS_DMA_NOWAIT)) != 0) {
109 		aprint_error_dev(sc->sc_dev,
110 		    "unable to allocate control data, error = %d\n", error);
111 		goto fail_0;
112 	}
113 
114 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
115 	    cdatasize + ETHER_PAD_LEN, (void **) &sc->sc_cdata16,
116 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
117 		aprint_error_dev(sc->sc_dev,
118 		    "unable to map control data, error = %d\n", error);
119 		goto fail_1;
120 	}
121 	nullbuf = (uint8_t *)sc->sc_cdata16 + cdatasize;
122 	memset(nullbuf, 0, ETHER_PAD_LEN);
123 
124 	if ((error = bus_dmamap_create(sc->sc_dmat,
125 	     cdatasize, 1, cdatasize, 0, BUS_DMA_NOWAIT,
126 	     &sc->sc_cddmamap)) != 0) {
127 		aprint_error_dev(sc->sc_dev,
128 		    "unable to create control data DMA map, error = %d\n",
129 		    error);
130 		goto fail_2;
131 	}
132 
133 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
134 	     sc->sc_cdata16, cdatasize, NULL, BUS_DMA_NOWAIT)) != 0) {
135 		aprint_error_dev(sc->sc_dev,
136 		    "unable to load control data DMA map, error = %d\n", error);
137 		goto fail_3;
138 	}
139 
140 	/*
141 	 * Create the transmit buffer DMA maps.
142 	 */
143 	for (i = 0; i < SONIC_NTXDESC; i++) {
144 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
145 		     SONIC_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
146 		     &sc->sc_txsoft[i].ds_dmamap)) != 0) {
147 			aprint_error_dev(sc->sc_dev,
148 			    "unable to create tx DMA map %d, error = %d\n",
149 			    i, error);
150 			goto fail_4;
151 		}
152 	}
153 
154 	/*
155 	 * Create the receive buffer DMA maps.
156 	 */
157 	for (i = 0; i < SONIC_NRXDESC; i++) {
158 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
159 		     MCLBYTES, 0, BUS_DMA_NOWAIT,
160 		     &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
161 			aprint_error_dev(sc->sc_dev,
162 			    "unable to create rx DMA map %d, error = %d\n",
163 			    i, error);
164 			goto fail_5;
165 		}
166 		sc->sc_rxsoft[i].ds_mbuf = NULL;
167 	}
168 
169 	/*
170 	 * create and map the pad buffer
171 	 */
172 	if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
173 	    ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT, &sc->sc_nulldmamap)) != 0) {
174 		aprint_error_dev(sc->sc_dev,
175 		    "unable to create pad buffer DMA map, error = %d\n", error);
176 		goto fail_5;
177 	}
178 
179 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
180 	    nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
181 		aprint_error_dev(sc->sc_dev,
182 		    "unable to load pad buffer DMA map, error = %d\n", error);
183 		goto fail_6;
184 	}
185 	bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
186 	    BUS_DMASYNC_PREWRITE);
187 
188 	/*
189 	 * Reset the chip to a known state.
190 	 */
191 	sonic_reset(sc);
192 
193 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
194 	    ether_sprintf(enaddr));
195 
196 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
197 	ifp->if_softc = sc;
198 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
199 	ifp->if_ioctl = sonic_ioctl;
200 	ifp->if_start = sonic_start;
201 	ifp->if_watchdog = sonic_watchdog;
202 	ifp->if_init = sonic_init;
203 	ifp->if_stop = sonic_stop;
204 	IFQ_SET_READY(&ifp->if_snd);
205 
206 	/*
207 	 * We can support 802.1Q VLAN-sized frames.
208 	 */
209 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
210 
211 	/*
212 	 * Attach the interface.
213 	 */
214 	if_attach(ifp);
215 	if_deferred_start_init(ifp, NULL);
216 	ether_ifattach(ifp, enaddr);
217 
218 	/*
219 	 * Make sure the interface is shutdown during reboot.
220 	 */
221 	if (pmf_device_register1(sc->sc_dev, NULL, NULL, sonic_shutdown))
222 		pmf_class_network_register(sc->sc_dev, ifp);
223 	else
224 		aprint_error_dev(sc->sc_dev,
225 		    "couldn't establish power handler\n");
226 
227 	return;
228 
229 	/*
230 	 * Free any resources we've allocated during the failed attach
231 	 * attempt.  Do this in reverse order and fall through.
232 	 */
233  fail_6:
234 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
235  fail_5:
236 	for (i = 0; i < SONIC_NRXDESC; i++) {
237 		if (sc->sc_rxsoft[i].ds_dmamap != NULL)
238 			bus_dmamap_destroy(sc->sc_dmat,
239 			    sc->sc_rxsoft[i].ds_dmamap);
240 	}
241  fail_4:
242 	for (i = 0; i < SONIC_NTXDESC; i++) {
243 		if (sc->sc_txsoft[i].ds_dmamap != NULL)
244 			bus_dmamap_destroy(sc->sc_dmat,
245 			    sc->sc_txsoft[i].ds_dmamap);
246 	}
247 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
248  fail_3:
249 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
250  fail_2:
251 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_cdata16, cdatasize);
252  fail_1:
253 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
254  fail_0:
255 	return;
256 }
257 
258 /*
259  * sonic_shutdown:
260  *
261  *	Make sure the interface is stopped at reboot.
262  */
263 bool
264 sonic_shutdown(device_t self, int howto)
265 {
266 	struct sonic_softc *sc = device_private(self);
267 
268 	sonic_stop(&sc->sc_ethercom.ec_if, 1);
269 
270 	return true;
271 }
272 
273 /*
274  * sonic_start:		[ifnet interface function]
275  *
276  *	Start packet transmission on the interface.
277  */
278 void
279 sonic_start(struct ifnet *ifp)
280 {
281 	struct sonic_softc *sc = ifp->if_softc;
282 	struct mbuf *m0, *m;
283 	struct sonic_tda16 *tda16;
284 	struct sonic_tda32 *tda32;
285 	struct sonic_descsoft *ds;
286 	bus_dmamap_t dmamap;
287 	int error, olasttx, nexttx, opending, totlen, olseg;
288 	int seg = 0;	/* XXX: gcc */
289 
290 	if ((ifp->if_flags & IFF_RUNNING) != IFF_RUNNING)
291 		return;
292 
293 	/*
294 	 * Remember the previous txpending and the current "last txdesc
295 	 * used" index.
296 	 */
297 	opending = sc->sc_txpending;
298 	olasttx = sc->sc_txlast;
299 
300 	/*
301 	 * Loop through the send queue, setting up transmit descriptors
302 	 * until we drain the queue, or use up all available transmit
303 	 * descriptors.  Leave one at the end for sanity's sake.
304 	 */
305 	while (sc->sc_txpending < (SONIC_NTXDESC - 1)) {
306 		/*
307 		 * Grab a packet off the queue.
308 		 */
309 		IFQ_POLL(&ifp->if_snd, m0);
310 		if (m0 == NULL)
311 			break;
312 		m = NULL;
313 
314 		/*
315 		 * Get the next available transmit descriptor.
316 		 */
317 		nexttx = SONIC_NEXTTX(sc->sc_txlast);
318 		ds = &sc->sc_txsoft[nexttx];
319 		dmamap = ds->ds_dmamap;
320 
321 		/*
322 		 * Load the DMA map.  If this fails, the packet either
323 		 * didn't fit in the allotted number of frags, or we were
324 		 * short on resources.  In this case, we'll copy and try
325 		 * again.
326 		 */
327 		if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
328 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT)) != 0 ||
329 		    (m0->m_pkthdr.len < ETHER_PAD_LEN &&
330 		    dmamap->dm_nsegs == SONIC_NTXFRAGS)) {
331 			if (error == 0)
332 				bus_dmamap_unload(sc->sc_dmat, dmamap);
333 			MGETHDR(m, M_DONTWAIT, MT_DATA);
334 			if (m == NULL) {
335 				printf("%s: unable to allocate Tx mbuf\n",
336 				    device_xname(sc->sc_dev));
337 				break;
338 			}
339 			if (m0->m_pkthdr.len > MHLEN) {
340 				MCLGET(m, M_DONTWAIT);
341 				if ((m->m_flags & M_EXT) == 0) {
342 					printf("%s: unable to allocate Tx "
343 					    "cluster\n",
344 					    device_xname(sc->sc_dev));
345 					m_freem(m);
346 					break;
347 				}
348 			}
349 			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
350 			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
351 			error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
352 			    m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
353 			if (error) {
354 				printf("%s: unable to load Tx buffer, "
355 				    "error = %d\n", device_xname(sc->sc_dev),
356 				    error);
357 				m_freem(m);
358 				break;
359 			}
360 		}
361 		IFQ_DEQUEUE(&ifp->if_snd, m0);
362 		if (m != NULL) {
363 			m_freem(m0);
364 			m0 = m;
365 		}
366 
367 		/*
368 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
369 		 */
370 
371 		/* Sync the DMA map. */
372 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
373 		    BUS_DMASYNC_PREWRITE);
374 
375 		/*
376 		 * Store a pointer to the packet so we can free it later.
377 		 */
378 		ds->ds_mbuf = m0;
379 
380 		/*
381 		 * Initialize the transmit descriptor.
382 		 */
383 		totlen = 0;
384 		if (sc->sc_32bit) {
385 			tda32 = &sc->sc_tda32[nexttx];
386 			for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
387 				tda32->tda_frags[seg].frag_ptr1 =
388 				    htosonic32(sc,
389 				    (dmamap->dm_segs[seg].ds_addr >> 16) &
390 				    0xffff);
391 				tda32->tda_frags[seg].frag_ptr0 =
392 				    htosonic32(sc,
393 				    dmamap->dm_segs[seg].ds_addr & 0xffff);
394 				tda32->tda_frags[seg].frag_size =
395 				    htosonic32(sc, dmamap->dm_segs[seg].ds_len);
396 				totlen += dmamap->dm_segs[seg].ds_len;
397 			}
398 			if (totlen < ETHER_PAD_LEN) {
399 				tda32->tda_frags[seg].frag_ptr1 =
400 				    htosonic32(sc,
401 				    (sc->sc_nulldma >> 16) & 0xffff);
402 				tda32->tda_frags[seg].frag_ptr0 =
403 				    htosonic32(sc, sc->sc_nulldma & 0xffff);
404 				tda32->tda_frags[seg].frag_size =
405 				    htosonic32(sc, ETHER_PAD_LEN - totlen);
406 				totlen = ETHER_PAD_LEN;
407 				seg++;
408 			}
409 
410 			tda32->tda_status = 0;
411 			tda32->tda_pktconfig = 0;
412 			tda32->tda_pktsize = htosonic32(sc, totlen);
413 			tda32->tda_fragcnt = htosonic32(sc, seg);
414 
415 			/* Link it up. */
416 			tda32->tda_frags[seg].frag_ptr0 =
417 			    htosonic32(sc, SONIC_CDTXADDR32(sc,
418 			    SONIC_NEXTTX(nexttx)) & 0xffff);
419 
420 			/* Sync the Tx descriptor. */
421 			SONIC_CDTXSYNC32(sc, nexttx,
422 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
423 		} else {
424 			tda16 = &sc->sc_tda16[nexttx];
425 			for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
426 				tda16->tda_frags[seg].frag_ptr1 =
427 				    htosonic16(sc,
428 				    (dmamap->dm_segs[seg].ds_addr >> 16) &
429 				    0xffff);
430 				tda16->tda_frags[seg].frag_ptr0 =
431 				    htosonic16(sc,
432 				    dmamap->dm_segs[seg].ds_addr & 0xffff);
433 				tda16->tda_frags[seg].frag_size =
434 				    htosonic16(sc, dmamap->dm_segs[seg].ds_len);
435 				totlen += dmamap->dm_segs[seg].ds_len;
436 			}
437 			if (totlen < ETHER_PAD_LEN) {
438 				tda16->tda_frags[seg].frag_ptr1 =
439 				    htosonic16(sc,
440 				    (sc->sc_nulldma >> 16) & 0xffff);
441 				tda16->tda_frags[seg].frag_ptr0 =
442 				    htosonic16(sc, sc->sc_nulldma & 0xffff);
443 				tda16->tda_frags[seg].frag_size =
444 				    htosonic16(sc, ETHER_PAD_LEN - totlen);
445 				totlen = ETHER_PAD_LEN;
446 				seg++;
447 			}
448 
449 			tda16->tda_status = 0;
450 			tda16->tda_pktconfig = 0;
451 			tda16->tda_pktsize = htosonic16(sc, totlen);
452 			tda16->tda_fragcnt = htosonic16(sc, seg);
453 
454 			/* Link it up. */
455 			tda16->tda_frags[seg].frag_ptr0 =
456 			    htosonic16(sc, SONIC_CDTXADDR16(sc,
457 			    SONIC_NEXTTX(nexttx)) & 0xffff);
458 
459 			/* Sync the Tx descriptor. */
460 			SONIC_CDTXSYNC16(sc, nexttx,
461 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
462 		}
463 
464 		/* Advance the Tx pointer. */
465 		sc->sc_txpending++;
466 		sc->sc_txlast = nexttx;
467 
468 		/*
469 		 * Pass the packet to any BPF listeners.
470 		 */
471 		bpf_mtap(ifp, m0, BPF_D_OUT);
472 	}
473 
474 	if (sc->sc_txpending != opending) {
475 		/*
476 		 * We enqueued packets.  If the transmitter was idle,
477 		 * reset the txdirty pointer.
478 		 */
479 		if (opending == 0)
480 			sc->sc_txdirty = SONIC_NEXTTX(olasttx);
481 
482 		/*
483 		 * Stop the SONIC on the last packet we've set up,
484 		 * and clear end-of-list on the descriptor previous
485 		 * to our new chain.
486 		 *
487 		 * NOTE: our `seg' variable should still be valid!
488 		 */
489 		if (sc->sc_32bit) {
490 			olseg =
491 			    sonic32toh(sc, sc->sc_tda32[olasttx].tda_fragcnt);
492 			sc->sc_tda32[sc->sc_txlast].tda_frags[seg].frag_ptr0 |=
493 			    htosonic32(sc, TDA_LINK_EOL);
494 			SONIC_CDTXSYNC32(sc, sc->sc_txlast,
495 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
496 			sc->sc_tda32[olasttx].tda_frags[olseg].frag_ptr0 &=
497 			    htosonic32(sc, ~TDA_LINK_EOL);
498 			SONIC_CDTXSYNC32(sc, olasttx,
499 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
500 		} else {
501 			olseg =
502 			    sonic16toh(sc, sc->sc_tda16[olasttx].tda_fragcnt);
503 			sc->sc_tda16[sc->sc_txlast].tda_frags[seg].frag_ptr0 |=
504 			    htosonic16(sc, TDA_LINK_EOL);
505 			SONIC_CDTXSYNC16(sc, sc->sc_txlast,
506 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
507 			sc->sc_tda16[olasttx].tda_frags[olseg].frag_ptr0 &=
508 			    htosonic16(sc, ~TDA_LINK_EOL);
509 			SONIC_CDTXSYNC16(sc, olasttx,
510 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
511 		}
512 
513 		/* Start the transmitter. */
514 		CSR_WRITE(sc, SONIC_CR, CR_TXP);
515 
516 		/* Set a watchdog timer in case the chip flakes out. */
517 		ifp->if_timer = 5;
518 	}
519 }
520 
521 /*
522  * sonic_watchdog:	[ifnet interface function]
523  *
524  *	Watchdog timer handler.
525  */
526 void
527 sonic_watchdog(struct ifnet *ifp)
528 {
529 	struct sonic_softc *sc = ifp->if_softc;
530 
531 	printf("%s: device timeout\n", device_xname(sc->sc_dev));
532 	if_statinc(ifp, if_oerrors);
533 
534 	(void)sonic_init(ifp);
535 }
536 
537 /*
538  * sonic_ioctl:		[ifnet interface function]
539  *
540  *	Handle control requests from the operator.
541  */
542 int
543 sonic_ioctl(struct ifnet *ifp, u_long cmd, void *data)
544 {
545 	int s, error;
546 
547 	s = splnet();
548 
549 	error = ether_ioctl(ifp, cmd, data);
550 	if (error == ENETRESET) {
551 		/*
552 		 * Multicast list has changed; set the hardware
553 		 * filter accordingly.
554 		 */
555 		if (ifp->if_flags & IFF_RUNNING)
556 			(void)sonic_init(ifp);
557 		error = 0;
558 	}
559 
560 	splx(s);
561 	return error;
562 }
563 
564 /*
565  * sonic_intr:
566  *
567  *	Interrupt service routine.
568  */
569 int
570 sonic_intr(void *arg)
571 {
572 	struct sonic_softc *sc = arg;
573 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
574 	uint16_t isr;
575 	int handled = 0, wantinit;
576 
577 	for (wantinit = 0; wantinit == 0;) {
578 		isr = CSR_READ(sc, SONIC_ISR) & sc->sc_imr;
579 		if (isr == 0)
580 			break;
581 		CSR_WRITE(sc, SONIC_ISR, isr);	/* ACK */
582 
583 		handled = 1;
584 
585 		if (isr & IMR_PRX)
586 			sonic_rxintr(sc);
587 
588 		if (isr & (IMR_PTX | IMR_TXER)) {
589 			if (sonic_txintr(sc) & TCR_FU) {
590 				printf("%s: transmit FIFO underrun\n",
591 				    device_xname(sc->sc_dev));
592 				wantinit = 1;
593 			}
594 		}
595 
596 		if (isr & (IMR_RFO | IMR_RBA | IMR_RBE | IMR_RDE)) {
597 #define	PRINTERR(bit, str)						\
598 			if (isr & (bit))				\
599 				printf("%s: %s\n",device_xname(sc->sc_dev), str)
600 			PRINTERR(IMR_RFO, "receive FIFO overrun");
601 			PRINTERR(IMR_RBA, "receive buffer exceeded");
602 			PRINTERR(IMR_RBE, "receive buffers exhausted");
603 			PRINTERR(IMR_RDE, "receive descriptors exhausted");
604 			wantinit = 1;
605 		}
606 	}
607 
608 	if (handled) {
609 		if (wantinit)
610 			(void)sonic_init(ifp);
611 		if_schedule_deferred_start(ifp);
612 	}
613 
614 	return handled;
615 }
616 
617 /*
618  * sonic_txintr:
619  *
620  *	Helper; handle transmit complete interrupts.
621  */
622 uint16_t
623 sonic_txintr(struct sonic_softc *sc)
624 {
625 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
626 	struct sonic_descsoft *ds;
627 	struct sonic_tda32 *tda32;
628 	struct sonic_tda16 *tda16;
629 	uint16_t status, totstat = 0;
630 	int i;
631 
632 	for (i = sc->sc_txdirty; sc->sc_txpending != 0;
633 	     i = SONIC_NEXTTX(i), sc->sc_txpending--) {
634 		ds = &sc->sc_txsoft[i];
635 
636 		if (sc->sc_32bit) {
637 			SONIC_CDTXSYNC32(sc, i,
638 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
639 			tda32 = &sc->sc_tda32[i];
640 			status = sonic32toh(sc, tda32->tda_status);
641 			SONIC_CDTXSYNC32(sc, i, BUS_DMASYNC_PREREAD);
642 		} else {
643 			SONIC_CDTXSYNC16(sc, i,
644 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
645 			tda16 = &sc->sc_tda16[i];
646 			status = sonic16toh(sc, tda16->tda_status);
647 			SONIC_CDTXSYNC16(sc, i, BUS_DMASYNC_PREREAD);
648 		}
649 
650 		if ((status & ~(TCR_EXDIS |TCR_CRCI |TCR_POWC |TCR_PINT)) == 0)
651 			break;
652 
653 		totstat |= status;
654 
655 		bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
656 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
657 		bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
658 		m_freem(ds->ds_mbuf);
659 		ds->ds_mbuf = NULL;
660 
661 		/*
662 		 * Check for errors and collisions.
663 		 */
664 		net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
665 		if (status & TCR_PTX)
666 			if_statinc_ref(nsr, if_opackets);
667 		else
668 			if_statinc_ref(nsr, if_oerrors);
669 		if (TDA_STATUS_NCOL(status))
670 			if_statadd_ref(nsr, if_collisions,
671 			    TDA_STATUS_NCOL(status));
672 		IF_STAT_PUTREF(ifp);
673 	}
674 
675 	/* Update the dirty transmit buffer pointer. */
676 	sc->sc_txdirty = i;
677 
678 	/*
679 	 * Cancel the watchdog timer if there are no pending
680 	 * transmissions.
681 	 */
682 	if (sc->sc_txpending == 0)
683 		ifp->if_timer = 0;
684 
685 	return totstat;
686 }
687 
688 /*
689  * sonic_rxintr:
690  *
691  *	Helper; handle receive interrupts.
692  */
693 void
694 sonic_rxintr(struct sonic_softc *sc)
695 {
696 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
697 	struct sonic_descsoft *ds;
698 	struct sonic_rda32 *rda32;
699 	struct sonic_rda16 *rda16;
700 	struct mbuf *m;
701 	int i, len;
702 	uint16_t status, bytecount /*, ptr0, ptr1, seqno */;
703 
704 	for (i = sc->sc_rxptr;; i = SONIC_NEXTRX(i)) {
705 		ds = &sc->sc_rxsoft[i];
706 
707 		if (sc->sc_32bit) {
708 			SONIC_CDRXSYNC32(sc, i,
709 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
710 			rda32 = &sc->sc_rda32[i];
711 			SONIC_CDRXSYNC32(sc, i, BUS_DMASYNC_PREREAD);
712 			if (rda32->rda_inuse != 0)
713 				break;
714 			status = sonic32toh(sc, rda32->rda_status);
715 			bytecount = sonic32toh(sc, rda32->rda_bytecount);
716 			/* ptr0 = sonic32toh(sc, rda32->rda_pkt_ptr0); */
717 			/* ptr1 = sonic32toh(sc, rda32->rda_pkt_ptr1); */
718 			/* seqno = sonic32toh(sc, rda32->rda_seqno); */
719 		} else {
720 			SONIC_CDRXSYNC16(sc, i,
721 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
722 			rda16 = &sc->sc_rda16[i];
723 			SONIC_CDRXSYNC16(sc, i, BUS_DMASYNC_PREREAD);
724 			if (rda16->rda_inuse != 0)
725 				break;
726 			status = sonic16toh(sc, rda16->rda_status);
727 			bytecount = sonic16toh(sc, rda16->rda_bytecount);
728 			/* ptr0 = sonic16toh(sc, rda16->rda_pkt_ptr0); */
729 			/* ptr1 = sonic16toh(sc, rda16->rda_pkt_ptr1); */
730 			/* seqno = sonic16toh(sc, rda16->rda_seqno); */
731 		}
732 
733 		/*
734 		 * Make absolutely sure this is the only packet
735 		 * in this receive buffer.  Our entire Rx buffer
736 		 * management scheme depends on this, and if the
737 		 * SONIC didn't follow our rule, it means we've
738 		 * misconfigured it.
739 		 */
740 		KASSERT(status & RCR_LPKT);
741 
742 		/*
743 		 * Make sure the packet arrived OK.  If an error occurred,
744 		 * update stats and reset the descriptor.  The buffer will
745 		 * be reused the next time the descriptor comes up in the
746 		 * ring.
747 		 */
748 		if ((status & RCR_PRX) == 0) {
749 			if (status & RCR_FAER)
750 				printf("%s: Rx frame alignment error\n",
751 				    device_xname(sc->sc_dev));
752 			else if (status & RCR_CRCR)
753 				printf("%s: Rx CRC error\n",
754 				    device_xname(sc->sc_dev));
755 			if_statinc(ifp, if_ierrors);
756 			SONIC_INIT_RXDESC(sc, i);
757 			continue;
758 		}
759 
760 		bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
761 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
762 
763 		/*
764 		 * The SONIC includes the CRC with every packet.
765 		 */
766 		len = bytecount - ETHER_CRC_LEN;
767 
768 		/*
769 		 * Ok, if the chip is in 32-bit mode, then receive
770 		 * buffers must be aligned to 32-bit boundaries,
771 		 * which means the payload is misaligned.  In this
772 		 * case, we must allocate a new mbuf, and copy the
773 		 * packet into it, scooted forward 2 bytes to ensure
774 		 * proper alignment.
775 		 *
776 		 * Note, in 16-bit mode, we can configure the SONIC
777 		 * to do what we want, and we have.
778 		 */
779 #ifndef __NO_STRICT_ALIGNMENT
780 		if (sc->sc_32bit) {
781 			MGETHDR(m, M_DONTWAIT, MT_DATA);
782 			if (m == NULL)
783 				goto dropit;
784 			if (len > (MHLEN - 2)) {
785 				MCLGET(m, M_DONTWAIT);
786 				if ((m->m_flags & M_EXT) == 0) {
787 					m_freem(m);
788 					goto dropit;
789 				}
790 			}
791 			m->m_data += 2;
792 			/*
793 			 * Note that we use a cluster for incoming frames,
794 			 * so the buffer is virtually contiguous.
795 			 */
796 			memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *),
797 			    len);
798 			SONIC_INIT_RXDESC(sc, i);
799 			bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
800 			    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
801 		} else
802 #endif /* ! __NO_STRICT_ALIGNMENT */
803 		/*
804 		 * If the packet is small enough to fit in a single
805 		 * header mbuf, allocate one and copy the data into
806 		 * it.  This greatly reduces memory consumption when
807 		 * we receive lots of small packets.
808 		 */
809 		if (sonic_copy_small != 0 && len <= (MHLEN - 2)) {
810 			MGETHDR(m, M_DONTWAIT, MT_DATA);
811 			if (m == NULL)
812 				goto dropit;
813 			m->m_data += 2;
814 			/*
815 			 * Note that we use a cluster for incoming frames,
816 			 * so the buffer is virtually contiguous.
817 			 */
818 			memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *),
819 			    len);
820 			SONIC_INIT_RXDESC(sc, i);
821 			bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
822 			    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
823 		} else {
824 			m = ds->ds_mbuf;
825 			if (sonic_add_rxbuf(sc, i) != 0) {
826  dropit:
827 				if_statinc(ifp, if_ierrors);
828 				SONIC_INIT_RXDESC(sc, i);
829 				bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
830 				    ds->ds_dmamap->dm_mapsize,
831 				    BUS_DMASYNC_PREREAD);
832 				continue;
833 			}
834 		}
835 
836 		m_set_rcvif(m, ifp);
837 		m->m_pkthdr.len = m->m_len = len;
838 
839 		/* Pass it on. */
840 		if_percpuq_enqueue(ifp->if_percpuq, m);
841 	}
842 
843 	/* Update the receive pointer. */
844 	sc->sc_rxptr = i;
845 	CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_PREVRX(i)));
846 }
847 
848 /*
849  * sonic_reset:
850  *
851  *	Perform a soft reset on the SONIC.
852  */
853 void
854 sonic_reset(struct sonic_softc *sc)
855 {
856 
857 	/* stop TX, RX and timer, and ensure RST is clear */
858 	CSR_WRITE(sc, SONIC_CR, CR_STP | CR_RXDIS | CR_HTX);
859 	delay(1000);
860 
861 	CSR_WRITE(sc, SONIC_CR, CR_RST);
862 	delay(1000);
863 
864 	/* clear all interrupts */
865 	CSR_WRITE(sc, SONIC_IMR, 0);
866 	CSR_WRITE(sc, SONIC_ISR, IMR_ALL);
867 
868 	CSR_WRITE(sc, SONIC_CR, 0);
869 	delay(1000);
870 }
871 
872 /*
873  * sonic_init:		[ifnet interface function]
874  *
875  *	Initialize the interface.  Must be called at splnet().
876  */
877 int
878 sonic_init(struct ifnet *ifp)
879 {
880 	struct sonic_softc *sc = ifp->if_softc;
881 	struct sonic_descsoft *ds;
882 	int i, error = 0;
883 	uint16_t reg;
884 
885 	/*
886 	 * Cancel any pending I/O.
887 	 */
888 	sonic_stop(ifp, 0);
889 
890 	/*
891 	 * Reset the SONIC to a known state.
892 	 */
893 	sonic_reset(sc);
894 
895 	/*
896 	 * Bring the SONIC into reset state, and program the DCR.
897 	 *
898 	 * Note: We don't bother optimizing the transmit and receive
899 	 * thresholds, here. TFT/RFT values should be set in MD attachments.
900 	 */
901 	reg = sc->sc_dcr;
902 	if (sc->sc_32bit)
903 		reg |= DCR_DW;
904 	CSR_WRITE(sc, SONIC_CR, CR_RST);
905 	CSR_WRITE(sc, SONIC_DCR, reg);
906 	CSR_WRITE(sc, SONIC_DCR2, sc->sc_dcr2);
907 	CSR_WRITE(sc, SONIC_CR, 0);
908 
909 	/*
910 	 * Initialize the transmit descriptors.
911 	 */
912 	if (sc->sc_32bit) {
913 		for (i = 0; i < SONIC_NTXDESC; i++) {
914 			memset(&sc->sc_tda32[i], 0, sizeof(struct sonic_tda32));
915 			SONIC_CDTXSYNC32(sc, i,
916 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
917 		}
918 	} else {
919 		for (i = 0; i < SONIC_NTXDESC; i++) {
920 			memset(&sc->sc_tda16[i], 0, sizeof(struct sonic_tda16));
921 			SONIC_CDTXSYNC16(sc, i,
922 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
923 		}
924 	}
925 	sc->sc_txpending = 0;
926 	sc->sc_txdirty = 0;
927 	sc->sc_txlast = SONIC_NTXDESC - 1;
928 
929 	/*
930 	 * Initialize the receive descriptor ring.
931 	 */
932 	for (i = 0; i < SONIC_NRXDESC; i++) {
933 		ds = &sc->sc_rxsoft[i];
934 		if (ds->ds_mbuf == NULL) {
935 			if ((error = sonic_add_rxbuf(sc, i)) != 0) {
936 				printf("%s: unable to allocate or map Rx "
937 				    "buffer %d, error = %d\n",
938 				    device_xname(sc->sc_dev), i, error);
939 				/*
940 				 * XXX Should attempt to run with fewer receive
941 				 * XXX buffers instead of just failing.
942 				 */
943 				sonic_rxdrain(sc);
944 				goto out;
945 			}
946 		} else
947 			SONIC_INIT_RXDESC(sc, i);
948 	}
949 	sc->sc_rxptr = 0;
950 
951 	/* Give the transmit ring to the SONIC. */
952 	CSR_WRITE(sc, SONIC_UTDAR, (SONIC_CDTXADDR(sc, 0) >> 16) & 0xffff);
953 	CSR_WRITE(sc, SONIC_CTDAR, SONIC_CDTXADDR(sc, 0) & 0xffff);
954 
955 	/* Give the receive descriptor ring to the SONIC. */
956 	CSR_WRITE(sc, SONIC_URDAR, (SONIC_CDRXADDR(sc, 0) >> 16) & 0xffff);
957 	CSR_WRITE(sc, SONIC_CRDAR, SONIC_CDRXADDR(sc, 0) & 0xffff);
958 
959 	/* Give the receive buffer ring to the SONIC. */
960 	CSR_WRITE(sc, SONIC_URRAR, (SONIC_CDRRADDR(sc, 0) >> 16) & 0xffff);
961 	CSR_WRITE(sc, SONIC_RSAR, SONIC_CDRRADDR(sc, 0) & 0xffff);
962 	if (sc->sc_32bit)
963 		CSR_WRITE(sc, SONIC_REAR,
964 		    (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) +
965 		    sizeof(struct sonic_rra32)) & 0xffff);
966 	else
967 		CSR_WRITE(sc, SONIC_REAR,
968 		    (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) +
969 		    sizeof(struct sonic_rra16)) & 0xffff);
970 	CSR_WRITE(sc, SONIC_RRR, SONIC_CDRRADDR(sc, 0) & 0xffff);
971 	CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1));
972 
973 	/*
974 	 * Set the End-Of-Buffer counter such that only one packet
975 	 * will be placed into each buffer we provide.  Note we are
976 	 * following the recommendation of section 3.4.4 of the manual
977 	 * here, and have "lengthened" the receive buffers accordingly.
978 	 */
979 	if (sc->sc_32bit)
980 		CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN + 2) / 2);
981 	else
982 		CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN / 2));
983 
984 	/* Reset the receive sequence counter. */
985 	CSR_WRITE(sc, SONIC_RSC, 0);
986 
987 	/* Clear the tally registers. */
988 	CSR_WRITE(sc, SONIC_CRCETC, 0xffff);
989 	CSR_WRITE(sc, SONIC_FAET, 0xffff);
990 	CSR_WRITE(sc, SONIC_MPT, 0xffff);
991 
992 	/* Set the receive filter. */
993 	sonic_set_filter(sc);
994 
995 	/*
996 	 * Set the interrupt mask register.
997 	 */
998 	sc->sc_imr = IMR_RFO | IMR_RBA | IMR_RBE | IMR_RDE |
999 	    IMR_TXER | IMR_PTX | IMR_PRX;
1000 	CSR_WRITE(sc, SONIC_IMR, sc->sc_imr);
1001 
1002 	/*
1003 	 * Start the receive process in motion.  Note, we don't
1004 	 * start the transmit process until we actually try to
1005 	 * transmit packets.
1006 	 */
1007 	CSR_WRITE(sc, SONIC_CR, CR_RXEN | CR_RRRA);
1008 
1009 	/*
1010 	 * ...all done!
1011 	 */
1012 	ifp->if_flags |= IFF_RUNNING;
1013 
1014  out:
1015 	if (error)
1016 		printf("%s: interface not running\n", device_xname(sc->sc_dev));
1017 	return error;
1018 }
1019 
1020 /*
1021  * sonic_rxdrain:
1022  *
1023  *	Drain the receive queue.
1024  */
1025 void
1026 sonic_rxdrain(struct sonic_softc *sc)
1027 {
1028 	struct sonic_descsoft *ds;
1029 	int i;
1030 
1031 	for (i = 0; i < SONIC_NRXDESC; i++) {
1032 		ds = &sc->sc_rxsoft[i];
1033 		if (ds->ds_mbuf != NULL) {
1034 			bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1035 			m_freem(ds->ds_mbuf);
1036 			ds->ds_mbuf = NULL;
1037 		}
1038 	}
1039 }
1040 
1041 /*
1042  * sonic_stop:		[ifnet interface function]
1043  *
1044  *	Stop transmission on the interface.
1045  */
1046 void
1047 sonic_stop(struct ifnet *ifp, int disable)
1048 {
1049 	struct sonic_softc *sc = ifp->if_softc;
1050 	struct sonic_descsoft *ds;
1051 	int i;
1052 
1053 	/*
1054 	 * Disable interrupts.
1055 	 */
1056 	CSR_WRITE(sc, SONIC_IMR, 0);
1057 
1058 	/*
1059 	 * Stop the transmitter, receiver, and timer.
1060 	 */
1061 	CSR_WRITE(sc, SONIC_CR, CR_HTX | CR_RXDIS | CR_STP);
1062 	for (i = 0; i < 1000; i++) {
1063 		if ((CSR_READ(sc, SONIC_CR) & (CR_TXP | CR_RXEN | CR_ST)) == 0)
1064 			break;
1065 		delay(2);
1066 	}
1067 	if ((CSR_READ(sc, SONIC_CR) & (CR_TXP | CR_RXEN | CR_ST)) != 0)
1068 		printf("%s: SONIC failed to stop\n", device_xname(sc->sc_dev));
1069 
1070 	/*
1071 	 * Release any queued transmit buffers.
1072 	 */
1073 	for (i = 0; i < SONIC_NTXDESC; i++) {
1074 		ds = &sc->sc_txsoft[i];
1075 		if (ds->ds_mbuf != NULL) {
1076 			bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1077 			m_freem(ds->ds_mbuf);
1078 			ds->ds_mbuf = NULL;
1079 		}
1080 	}
1081 
1082 	/*
1083 	 * Mark the interface down and cancel the watchdog timer.
1084 	 */
1085 	ifp->if_flags &= ~IFF_RUNNING;
1086 	ifp->if_timer = 0;
1087 
1088 	if (disable)
1089 		sonic_rxdrain(sc);
1090 }
1091 
1092 /*
1093  * sonic_add_rxbuf:
1094  *
1095  *	Add a receive buffer to the indicated descriptor.
1096  */
1097 int
1098 sonic_add_rxbuf(struct sonic_softc *sc, int idx)
1099 {
1100 	struct sonic_descsoft *ds = &sc->sc_rxsoft[idx];
1101 	struct mbuf *m;
1102 	int error;
1103 
1104 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1105 	if (m == NULL)
1106 		return ENOBUFS;
1107 
1108 	MCLGET(m, M_DONTWAIT);
1109 	if ((m->m_flags & M_EXT) == 0) {
1110 		m_freem(m);
1111 		return ENOBUFS;
1112 	}
1113 
1114 	if (ds->ds_mbuf != NULL)
1115 		bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1116 
1117 	ds->ds_mbuf = m;
1118 
1119 	error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1120 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1121 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
1122 	if (error) {
1123 		printf("%s: can't load rx DMA map %d, error = %d\n",
1124 		    device_xname(sc->sc_dev), idx, error);
1125 		panic("sonic_add_rxbuf");	/* XXX */
1126 	}
1127 
1128 	bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1129 	    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1130 
1131 	SONIC_INIT_RXDESC(sc, idx);
1132 
1133 	return 0;
1134 }
1135 
1136 static void
1137 sonic_set_camentry(struct sonic_softc *sc, int entry, const uint8_t *enaddr)
1138 {
1139 
1140 	if (sc->sc_32bit) {
1141 		struct sonic_cda32 *cda = &sc->sc_cda32[entry];
1142 
1143 		cda->cda_entry = htosonic32(sc, entry);
1144 		cda->cda_addr0 = htosonic32(sc, enaddr[0] | (enaddr[1] << 8));
1145 		cda->cda_addr1 = htosonic32(sc, enaddr[2] | (enaddr[3] << 8));
1146 		cda->cda_addr2 = htosonic32(sc, enaddr[4] | (enaddr[5] << 8));
1147 	} else {
1148 		struct sonic_cda16 *cda = &sc->sc_cda16[entry];
1149 
1150 		cda->cda_entry = htosonic16(sc, entry);
1151 		cda->cda_addr0 = htosonic16(sc, enaddr[0] | (enaddr[1] << 8));
1152 		cda->cda_addr1 = htosonic16(sc, enaddr[2] | (enaddr[3] << 8));
1153 		cda->cda_addr2 = htosonic16(sc, enaddr[4] | (enaddr[5] << 8));
1154 	}
1155 }
1156 
1157 /*
1158  * sonic_set_filter:
1159  *
1160  *	Set the SONIC receive filter.
1161  */
1162 void
1163 sonic_set_filter(struct sonic_softc *sc)
1164 {
1165 	struct ethercom *ec = &sc->sc_ethercom;
1166 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1167 	struct ether_multi *enm;
1168 	struct ether_multistep step;
1169 	int i, entry = 0;
1170 	uint16_t camvalid = 0;
1171 	uint16_t rcr = 0;
1172 
1173 	if (ifp->if_flags & IFF_BROADCAST)
1174 		rcr |= RCR_BRD;
1175 
1176 	if (ifp->if_flags & IFF_PROMISC) {
1177 		rcr |= RCR_PRO;
1178 		goto allmulti;
1179 	}
1180 
1181 	/* Put our station address in the first CAM slot. */
1182 	sonic_set_camentry(sc, entry, CLLADDR(ifp->if_sadl));
1183 	camvalid |= (1U << entry);
1184 	entry++;
1185 
1186 	/* Add the multicast addresses to the CAM. */
1187 	ETHER_LOCK(ec);
1188 	ETHER_FIRST_MULTI(step, ec, enm);
1189 	while (enm != NULL) {
1190 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1191 			/*
1192 			 * We must listen to a range of multicast addresses.
1193 			 * The only way to do this on the SONIC is to enable
1194 			 * reception of all multicast packets.
1195 			 */
1196 			ETHER_UNLOCK(ec);
1197 			goto allmulti;
1198 		}
1199 
1200 		if (entry == SONIC_NCAMENT) {
1201 			/*
1202 			 * Out of CAM slots.  Have to enable reception
1203 			 * of all multicast addresses.
1204 			 */
1205 			ETHER_UNLOCK(ec);
1206 			goto allmulti;
1207 		}
1208 
1209 		sonic_set_camentry(sc, entry, enm->enm_addrlo);
1210 		camvalid |= (1U << entry);
1211 		entry++;
1212 
1213 		ETHER_NEXT_MULTI(step, enm);
1214 	}
1215 	ETHER_UNLOCK(ec);
1216 
1217 	ifp->if_flags &= ~IFF_ALLMULTI;
1218 	goto setit;
1219 
1220  allmulti:
1221 	/* Use only the first CAM slot (station address). */
1222 	camvalid = 0x0001;
1223 	entry = 1;
1224 	rcr |= RCR_AMC;
1225 
1226  setit:
1227 	/* set mask for the CAM Enable register */
1228 	if (sc->sc_32bit) {
1229 		if (entry == SONIC_NCAMENT)
1230 			sc->sc_cdaenable32 = htosonic32(sc, camvalid);
1231 		else
1232 			sc->sc_cda32[entry].cda_entry =
1233 			    htosonic32(sc, camvalid);
1234 	} else {
1235 		if (entry == SONIC_NCAMENT)
1236 			sc->sc_cdaenable16 = htosonic16(sc, camvalid);
1237 		else
1238 			sc->sc_cda16[entry].cda_entry =
1239 			    htosonic16(sc, camvalid);
1240 	}
1241 
1242 	/* Load the CAM. */
1243 	SONIC_CDCAMSYNC(sc, BUS_DMASYNC_PREWRITE);
1244 	CSR_WRITE(sc, SONIC_CDP, SONIC_CDCAMADDR(sc) & 0xffff);
1245 	CSR_WRITE(sc, SONIC_CDC, entry);
1246 	CSR_WRITE(sc, SONIC_CR, CR_LCAM);
1247 	for (i = 0; i < 10000; i++) {
1248 		if ((CSR_READ(sc, SONIC_CR) & CR_LCAM) == 0)
1249 			break;
1250 		delay(2);
1251 	}
1252 	if (CSR_READ(sc, SONIC_CR) & CR_LCAM)
1253 		printf("%s: CAM load failed\n", device_xname(sc->sc_dev));
1254 	SONIC_CDCAMSYNC(sc, BUS_DMASYNC_POSTWRITE);
1255 
1256 	/* Set the receive control register. */
1257 	CSR_WRITE(sc, SONIC_RCR, rcr);
1258 }
1259