xref: /netbsd-src/sys/dev/ic/dp83932.c (revision 274254cdae52594c1aa480a736aef78313d15c9c)
1 /*	$NetBSD: dp83932.c,v 1.27 2008/08/23 15:46:47 tsutsui Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Device driver for the National Semiconductor DP83932
34  * Systems-Oriented Network Interface Controller (SONIC).
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: dp83932.c,v 1.27 2008/08/23 15:46:47 tsutsui Exp $");
39 
40 #include "bpfilter.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/ioctl.h>
49 #include <sys/errno.h>
50 #include <sys/device.h>
51 
52 #include <uvm/uvm_extern.h>
53 
54 #include <net/if.h>
55 #include <net/if_dl.h>
56 #include <net/if_ether.h>
57 
58 #if NBPFILTER > 0
59 #include <net/bpf.h>
60 #endif
61 
62 #include <sys/bus.h>
63 #include <sys/intr.h>
64 
65 #include <dev/ic/dp83932reg.h>
66 #include <dev/ic/dp83932var.h>
67 
68 void	sonic_start(struct ifnet *);
69 void	sonic_watchdog(struct ifnet *);
70 int	sonic_ioctl(struct ifnet *, u_long, void *);
71 int	sonic_init(struct ifnet *);
72 void	sonic_stop(struct ifnet *, int);
73 
74 void	sonic_shutdown(void *);
75 
76 void	sonic_reset(struct sonic_softc *);
77 void	sonic_rxdrain(struct sonic_softc *);
78 int	sonic_add_rxbuf(struct sonic_softc *, int);
79 void	sonic_set_filter(struct sonic_softc *);
80 
81 uint16_t sonic_txintr(struct sonic_softc *);
82 void	sonic_rxintr(struct sonic_softc *);
83 
84 int	sonic_copy_small = 0;
85 
86 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
87 
88 /*
89  * sonic_attach:
90  *
91  *	Attach a SONIC interface to the system.
92  */
93 void
94 sonic_attach(struct sonic_softc *sc, const uint8_t *enaddr)
95 {
96 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
97 	int i, rseg, error;
98 	bus_dma_segment_t seg;
99 	size_t cdatasize;
100 	uint8_t *nullbuf;
101 
102 	/*
103 	 * Allocate the control data structures, and create and load the
104 	 * DMA map for it.
105 	 */
106 	if (sc->sc_32bit)
107 		cdatasize = sizeof(struct sonic_control_data32);
108 	else
109 		cdatasize = sizeof(struct sonic_control_data16);
110 
111 	if ((error = bus_dmamem_alloc(sc->sc_dmat, cdatasize + ETHER_PAD_LEN,
112 	     PAGE_SIZE, (64 * 1024), &seg, 1, &rseg,
113 	     BUS_DMA_NOWAIT)) != 0) {
114 		aprint_error_dev(sc->sc_dev,
115 		    "unable to allocate control data, error = %d\n", error);
116 		goto fail_0;
117 	}
118 
119 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
120 	    cdatasize + ETHER_PAD_LEN, (void **) &sc->sc_cdata16,
121 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
122 		aprint_error_dev(sc->sc_dev,
123 		    "unable to map control data, error = %d\n", error);
124 		goto fail_1;
125 	}
126 	nullbuf = (uint8_t *)sc->sc_cdata16 + cdatasize;
127 	memset(nullbuf, 0, ETHER_PAD_LEN);
128 
129 	if ((error = bus_dmamap_create(sc->sc_dmat,
130 	     cdatasize, 1, cdatasize, 0, BUS_DMA_NOWAIT,
131 	     &sc->sc_cddmamap)) != 0) {
132 		aprint_error_dev(sc->sc_dev,
133 		    "unable to create control data DMA map, error = %d\n",
134 		    error);
135 		goto fail_2;
136 	}
137 
138 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
139 	     sc->sc_cdata16, cdatasize, NULL, BUS_DMA_NOWAIT)) != 0) {
140 		aprint_error_dev(sc->sc_dev,
141 		    "unable to load control data DMA map, error = %d\n", error);
142 		goto fail_3;
143 	}
144 
145 	/*
146 	 * Create the transmit buffer DMA maps.
147 	 */
148 	for (i = 0; i < SONIC_NTXDESC; i++) {
149 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
150 		     SONIC_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
151 		     &sc->sc_txsoft[i].ds_dmamap)) != 0) {
152 			aprint_error_dev(sc->sc_dev,
153 			    "unable to create tx DMA map %d, error = %d\n",
154 			    i, error);
155 			goto fail_4;
156 		}
157 	}
158 
159 	/*
160 	 * Create the receive buffer DMA maps.
161 	 */
162 	for (i = 0; i < SONIC_NRXDESC; i++) {
163 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
164 		     MCLBYTES, 0, BUS_DMA_NOWAIT,
165 		     &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
166 			aprint_error_dev(sc->sc_dev,
167 			    "unable to create rx DMA map %d, error = %d\n",
168 			    i, error);
169 			goto fail_5;
170 		}
171 		sc->sc_rxsoft[i].ds_mbuf = NULL;
172 	}
173 
174 	/*
175 	 * create and map the pad buffer
176 	 */
177 	if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
178 	    ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT, &sc->sc_nulldmamap)) != 0) {
179 		aprint_error_dev(sc->sc_dev,
180 		    "unable to create pad buffer DMA map, error = %d\n", error);
181 		goto fail_5;
182 	}
183 
184 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
185 	    nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
186 		aprint_error_dev(sc->sc_dev,
187 		    "unable to load pad buffer DMA map, error = %d\n", error);
188 		goto fail_6;
189 	}
190 	bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
191 	    BUS_DMASYNC_PREWRITE);
192 
193 	/*
194 	 * Reset the chip to a known state.
195 	 */
196 	sonic_reset(sc);
197 
198 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
199 	    ether_sprintf(enaddr));
200 
201 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
202 	ifp->if_softc = sc;
203 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
204 	ifp->if_ioctl = sonic_ioctl;
205 	ifp->if_start = sonic_start;
206 	ifp->if_watchdog = sonic_watchdog;
207 	ifp->if_init = sonic_init;
208 	ifp->if_stop = sonic_stop;
209 	IFQ_SET_READY(&ifp->if_snd);
210 
211 	/*
212 	 * We can support 802.1Q VLAN-sized frames.
213 	 */
214 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
215 
216 	/*
217 	 * Attach the interface.
218 	 */
219 	if_attach(ifp);
220 	ether_ifattach(ifp, enaddr);
221 
222 	/*
223 	 * Make sure the interface is shutdown during reboot.
224 	 */
225 	sc->sc_sdhook = shutdownhook_establish(sonic_shutdown, sc);
226 	if (sc->sc_sdhook == NULL)
227 		aprint_error_dev(sc->sc_dev,
228 		    "WARNING: unable to establish shutdown hook\n");
229 	return;
230 
231 	/*
232 	 * Free any resources we've allocated during the failed attach
233 	 * attempt.  Do this in reverse order and fall through.
234 	 */
235  fail_6:
236 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
237  fail_5:
238 	for (i = 0; i < SONIC_NRXDESC; i++) {
239 		if (sc->sc_rxsoft[i].ds_dmamap != NULL)
240 			bus_dmamap_destroy(sc->sc_dmat,
241 			    sc->sc_rxsoft[i].ds_dmamap);
242 	}
243  fail_4:
244 	for (i = 0; i < SONIC_NTXDESC; i++) {
245 		if (sc->sc_txsoft[i].ds_dmamap != NULL)
246 			bus_dmamap_destroy(sc->sc_dmat,
247 			    sc->sc_txsoft[i].ds_dmamap);
248 	}
249 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
250  fail_3:
251 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
252  fail_2:
253 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_cdata16, cdatasize);
254  fail_1:
255 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
256  fail_0:
257 	return;
258 }
259 
260 /*
261  * sonic_shutdown:
262  *
263  *	Make sure the interface is stopped at reboot.
264  */
265 void
266 sonic_shutdown(void *arg)
267 {
268 	struct sonic_softc *sc = arg;
269 
270 	sonic_stop(&sc->sc_ethercom.ec_if, 1);
271 }
272 
273 /*
274  * sonic_start:		[ifnet interface function]
275  *
276  *	Start packet transmission on the interface.
277  */
278 void
279 sonic_start(struct ifnet *ifp)
280 {
281 	struct sonic_softc *sc = ifp->if_softc;
282 	struct mbuf *m0, *m;
283 	struct sonic_tda16 *tda16;
284 	struct sonic_tda32 *tda32;
285 	struct sonic_descsoft *ds;
286 	bus_dmamap_t dmamap;
287 	int error, olasttx, nexttx, opending, totlen, olseg;
288 	int seg = 0;	/* XXX: gcc */
289 
290 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
291 		return;
292 
293 	/*
294 	 * Remember the previous txpending and the current "last txdesc
295 	 * used" index.
296 	 */
297 	opending = sc->sc_txpending;
298 	olasttx = sc->sc_txlast;
299 
300 	/*
301 	 * Loop through the send queue, setting up transmit descriptors
302 	 * until we drain the queue, or use up all available transmit
303 	 * descriptors.  Leave one at the end for sanity's sake.
304 	 */
305 	while (sc->sc_txpending < (SONIC_NTXDESC - 1)) {
306 		/*
307 		 * Grab a packet off the queue.
308 		 */
309 		IFQ_POLL(&ifp->if_snd, m0);
310 		if (m0 == NULL)
311 			break;
312 		m = NULL;
313 
314 		/*
315 		 * Get the next available transmit descriptor.
316 		 */
317 		nexttx = SONIC_NEXTTX(sc->sc_txlast);
318 		ds = &sc->sc_txsoft[nexttx];
319 		dmamap = ds->ds_dmamap;
320 
321 		/*
322 		 * Load the DMA map.  If this fails, the packet either
323 		 * didn't fit in the allotted number of frags, or we were
324 		 * short on resources.  In this case, we'll copy and try
325 		 * again.
326 		 */
327 		if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
328 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0 ||
329 		    (m0->m_pkthdr.len < ETHER_PAD_LEN &&
330 		    dmamap->dm_nsegs == SONIC_NTXFRAGS)) {
331 			if (error == 0)
332 				bus_dmamap_unload(sc->sc_dmat, dmamap);
333 			MGETHDR(m, M_DONTWAIT, MT_DATA);
334 			if (m == NULL) {
335 				printf("%s: unable to allocate Tx mbuf\n",
336 				    device_xname(sc->sc_dev));
337 				break;
338 			}
339 			if (m0->m_pkthdr.len > MHLEN) {
340 				MCLGET(m, M_DONTWAIT);
341 				if ((m->m_flags & M_EXT) == 0) {
342 					printf("%s: unable to allocate Tx "
343 					    "cluster\n",
344 					    device_xname(sc->sc_dev));
345 					m_freem(m);
346 					break;
347 				}
348 			}
349 			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
350 			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
351 			error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
352 			    m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
353 			if (error) {
354 				printf("%s: unable to load Tx buffer, "
355 				    "error = %d\n", device_xname(sc->sc_dev),
356 				    error);
357 				m_freem(m);
358 				break;
359 			}
360 		}
361 		IFQ_DEQUEUE(&ifp->if_snd, m0);
362 		if (m != NULL) {
363 			m_freem(m0);
364 			m0 = m;
365 		}
366 
367 		/*
368 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
369 		 */
370 
371 		/* Sync the DMA map. */
372 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
373 		    BUS_DMASYNC_PREWRITE);
374 
375 		/*
376 		 * Store a pointer to the packet so we can free it later.
377 		 */
378 		ds->ds_mbuf = m0;
379 
380 		/*
381 		 * Initialize the transmit descriptor.
382 		 */
383 		totlen = 0;
384 		if (sc->sc_32bit) {
385 			tda32 = &sc->sc_tda32[nexttx];
386 			for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
387 				tda32->tda_frags[seg].frag_ptr1 =
388 				    htosonic32(sc,
389 				    (dmamap->dm_segs[seg].ds_addr >> 16) &
390 				    0xffff);
391 				tda32->tda_frags[seg].frag_ptr0 =
392 				    htosonic32(sc,
393 				    dmamap->dm_segs[seg].ds_addr & 0xffff);
394 				tda32->tda_frags[seg].frag_size =
395 				    htosonic32(sc, dmamap->dm_segs[seg].ds_len);
396 				totlen += dmamap->dm_segs[seg].ds_len;
397 			}
398 			if (totlen < ETHER_PAD_LEN) {
399 				tda32->tda_frags[seg].frag_ptr1 =
400 				    htosonic32(sc,
401 				    (sc->sc_nulldma >> 16) & 0xffff);
402 				tda32->tda_frags[seg].frag_ptr0 =
403 				    htosonic32(sc, sc->sc_nulldma & 0xffff);
404 				tda32->tda_frags[seg].frag_size =
405 				    htosonic32(sc, ETHER_PAD_LEN - totlen);
406 				totlen = ETHER_PAD_LEN;
407 				seg++;
408 			}
409 
410 			tda32->tda_status = 0;
411 			tda32->tda_pktconfig = 0;
412 			tda32->tda_pktsize = htosonic32(sc, totlen);
413 			tda32->tda_fragcnt = htosonic32(sc, seg);
414 
415 			/* Link it up. */
416 			tda32->tda_frags[seg].frag_ptr0 =
417 			    htosonic32(sc, SONIC_CDTXADDR32(sc,
418 			    SONIC_NEXTTX(nexttx)) & 0xffff);
419 
420 			/* Sync the Tx descriptor. */
421 			SONIC_CDTXSYNC32(sc, nexttx,
422 			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
423 		} else {
424 			tda16 = &sc->sc_tda16[nexttx];
425 			for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
426 				tda16->tda_frags[seg].frag_ptr1 =
427 				    htosonic16(sc,
428 				    (dmamap->dm_segs[seg].ds_addr >> 16) &
429 				    0xffff);
430 				tda16->tda_frags[seg].frag_ptr0 =
431 				    htosonic16(sc,
432 				    dmamap->dm_segs[seg].ds_addr & 0xffff);
433 				tda16->tda_frags[seg].frag_size =
434 				    htosonic16(sc, dmamap->dm_segs[seg].ds_len);
435 				totlen += dmamap->dm_segs[seg].ds_len;
436 			}
437 			if (totlen < ETHER_PAD_LEN) {
438 				tda16->tda_frags[seg].frag_ptr1 =
439 				    htosonic16(sc,
440 				    (sc->sc_nulldma >> 16) & 0xffff);
441 				tda16->tda_frags[seg].frag_ptr0 =
442 				    htosonic16(sc, sc->sc_nulldma & 0xffff);
443 				tda16->tda_frags[seg].frag_size =
444 				    htosonic16(sc, ETHER_PAD_LEN - totlen);
445 				totlen = ETHER_PAD_LEN;
446 				seg++;
447 			}
448 
449 			tda16->tda_status = 0;
450 			tda16->tda_pktconfig = 0;
451 			tda16->tda_pktsize = htosonic16(sc, totlen);
452 			tda16->tda_fragcnt = htosonic16(sc, seg);
453 
454 			/* Link it up. */
455 			tda16->tda_frags[seg].frag_ptr0 =
456 			    htosonic16(sc, SONIC_CDTXADDR16(sc,
457 			    SONIC_NEXTTX(nexttx)) & 0xffff);
458 
459 			/* Sync the Tx descriptor. */
460 			SONIC_CDTXSYNC16(sc, nexttx,
461 			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
462 		}
463 
464 		/* Advance the Tx pointer. */
465 		sc->sc_txpending++;
466 		sc->sc_txlast = nexttx;
467 
468 #if NBPFILTER > 0
469 		/*
470 		 * Pass the packet to any BPF listeners.
471 		 */
472 		if (ifp->if_bpf)
473 			bpf_mtap(ifp->if_bpf, m0);
474 #endif
475 	}
476 
477 	if (sc->sc_txpending == (SONIC_NTXDESC - 1)) {
478 		/* No more slots left; notify upper layer. */
479 		ifp->if_flags |= IFF_OACTIVE;
480 	}
481 
482 	if (sc->sc_txpending != opending) {
483 		/*
484 		 * We enqueued packets.  If the transmitter was idle,
485 		 * reset the txdirty pointer.
486 		 */
487 		if (opending == 0)
488 			sc->sc_txdirty = SONIC_NEXTTX(olasttx);
489 
490 		/*
491 		 * Stop the SONIC on the last packet we've set up,
492 		 * and clear end-of-list on the descriptor previous
493 		 * to our new chain.
494 		 *
495 		 * NOTE: our `seg' variable should still be valid!
496 		 */
497 		if (sc->sc_32bit) {
498 			olseg =
499 			    sonic32toh(sc, sc->sc_tda32[olasttx].tda_fragcnt);
500 			sc->sc_tda32[sc->sc_txlast].tda_frags[seg].frag_ptr0 |=
501 			    htosonic32(sc, TDA_LINK_EOL);
502 			SONIC_CDTXSYNC32(sc, sc->sc_txlast,
503 			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
504 			sc->sc_tda32[olasttx].tda_frags[olseg].frag_ptr0 &=
505 			    htosonic32(sc, ~TDA_LINK_EOL);
506 			SONIC_CDTXSYNC32(sc, olasttx,
507 			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
508 		} else {
509 			olseg =
510 			    sonic16toh(sc, sc->sc_tda16[olasttx].tda_fragcnt);
511 			sc->sc_tda16[sc->sc_txlast].tda_frags[seg].frag_ptr0 |=
512 			    htosonic16(sc, TDA_LINK_EOL);
513 			SONIC_CDTXSYNC16(sc, sc->sc_txlast,
514 			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
515 			sc->sc_tda16[olasttx].tda_frags[olseg].frag_ptr0 &=
516 			    htosonic16(sc, ~TDA_LINK_EOL);
517 			SONIC_CDTXSYNC16(sc, olasttx,
518 			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
519 		}
520 
521 		/* Start the transmitter. */
522 		CSR_WRITE(sc, SONIC_CR, CR_TXP);
523 
524 		/* Set a watchdog timer in case the chip flakes out. */
525 		ifp->if_timer = 5;
526 	}
527 }
528 
529 /*
530  * sonic_watchdog:	[ifnet interface function]
531  *
532  *	Watchdog timer handler.
533  */
534 void
535 sonic_watchdog(struct ifnet *ifp)
536 {
537 	struct sonic_softc *sc = ifp->if_softc;
538 
539 	printf("%s: device timeout\n", device_xname(sc->sc_dev));
540 	ifp->if_oerrors++;
541 
542 	(void)sonic_init(ifp);
543 }
544 
545 /*
546  * sonic_ioctl:		[ifnet interface function]
547  *
548  *	Handle control requests from the operator.
549  */
550 int
551 sonic_ioctl(struct ifnet *ifp, u_long cmd, void *data)
552 {
553 	int s, error;
554 
555 	s = splnet();
556 
557 	error = ether_ioctl(ifp, cmd, data);
558 	if (error == ENETRESET) {
559 		/*
560 		 * Multicast list has changed; set the hardware
561 		 * filter accordingly.
562 		 */
563 		if (ifp->if_flags & IFF_RUNNING)
564 			(void)sonic_init(ifp);
565 		error = 0;
566 	}
567 
568 	splx(s);
569 	return error;
570 }
571 
572 /*
573  * sonic_intr:
574  *
575  *	Interrupt service routine.
576  */
577 int
578 sonic_intr(void *arg)
579 {
580 	struct sonic_softc *sc = arg;
581 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
582 	uint16_t isr;
583 	int handled = 0, wantinit;
584 
585 	for (wantinit = 0; wantinit == 0;) {
586 		isr = CSR_READ(sc, SONIC_ISR) & sc->sc_imr;
587 		if (isr == 0)
588 			break;
589 		CSR_WRITE(sc, SONIC_ISR, isr);	/* ACK */
590 
591 		handled = 1;
592 
593 		if (isr & IMR_PRX)
594 			sonic_rxintr(sc);
595 
596 		if (isr & (IMR_PTX|IMR_TXER)) {
597 			if (sonic_txintr(sc) & TCR_FU) {
598 				printf("%s: transmit FIFO underrun\n",
599 				    device_xname(sc->sc_dev));
600 				wantinit = 1;
601 			}
602 		}
603 
604 		if (isr & (IMR_RFO|IMR_RBA|IMR_RBE|IMR_RDE)) {
605 #define	PRINTERR(bit, str)						\
606 			if (isr & (bit))				\
607 				printf("%s: %s\n",device_xname(sc->sc_dev), str)
608 			PRINTERR(IMR_RFO, "receive FIFO overrun");
609 			PRINTERR(IMR_RBA, "receive buffer exceeded");
610 			PRINTERR(IMR_RBE, "receive buffers exhausted");
611 			PRINTERR(IMR_RDE, "receive descriptors exhausted");
612 			wantinit = 1;
613 		}
614 	}
615 
616 	if (handled) {
617 		if (wantinit)
618 			(void)sonic_init(ifp);
619 		sonic_start(ifp);
620 	}
621 
622 	return handled;
623 }
624 
625 /*
626  * sonic_txintr:
627  *
628  *	Helper; handle transmit complete interrupts.
629  */
630 uint16_t
631 sonic_txintr(struct sonic_softc *sc)
632 {
633 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
634 	struct sonic_descsoft *ds;
635 	struct sonic_tda32 *tda32;
636 	struct sonic_tda16 *tda16;
637 	uint16_t status, totstat = 0;
638 	int i;
639 
640 	ifp->if_flags &= ~IFF_OACTIVE;
641 
642 	for (i = sc->sc_txdirty; sc->sc_txpending != 0;
643 	     i = SONIC_NEXTTX(i), sc->sc_txpending--) {
644 		ds = &sc->sc_txsoft[i];
645 
646 		if (sc->sc_32bit) {
647 			SONIC_CDTXSYNC32(sc, i,
648 			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
649 			tda32 = &sc->sc_tda32[i];
650 			status = sonic32toh(sc, tda32->tda_status);
651 			SONIC_CDTXSYNC32(sc, i, BUS_DMASYNC_PREREAD);
652 		} else {
653 			SONIC_CDTXSYNC16(sc, i,
654 			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
655 			tda16 = &sc->sc_tda16[i];
656 			status = sonic16toh(sc, tda16->tda_status);
657 			SONIC_CDTXSYNC16(sc, i, BUS_DMASYNC_PREREAD);
658 		}
659 
660 		if ((status & ~(TCR_EXDIS|TCR_CRCI|TCR_POWC|TCR_PINT)) == 0)
661 			break;
662 
663 		totstat |= status;
664 
665 		bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
666 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
667 		bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
668 		m_freem(ds->ds_mbuf);
669 		ds->ds_mbuf = NULL;
670 
671 		/*
672 		 * Check for errors and collisions.
673 		 */
674 		if (status & TCR_PTX)
675 			ifp->if_opackets++;
676 		else
677 			ifp->if_oerrors++;
678 		ifp->if_collisions += TDA_STATUS_NCOL(status);
679 	}
680 
681 	/* Update the dirty transmit buffer pointer. */
682 	sc->sc_txdirty = i;
683 
684 	/*
685 	 * Cancel the watchdog timer if there are no pending
686 	 * transmissions.
687 	 */
688 	if (sc->sc_txpending == 0)
689 		ifp->if_timer = 0;
690 
691 	return totstat;
692 }
693 
694 /*
695  * sonic_rxintr:
696  *
697  *	Helper; handle receive interrupts.
698  */
699 void
700 sonic_rxintr(struct sonic_softc *sc)
701 {
702 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
703 	struct sonic_descsoft *ds;
704 	struct sonic_rda32 *rda32;
705 	struct sonic_rda16 *rda16;
706 	struct mbuf *m;
707 	int i, len;
708 	uint16_t status, bytecount, ptr0, ptr1, seqno;
709 
710 	for (i = sc->sc_rxptr;; i = SONIC_NEXTRX(i)) {
711 		ds = &sc->sc_rxsoft[i];
712 
713 		if (sc->sc_32bit) {
714 			SONIC_CDRXSYNC32(sc, i,
715 			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
716 			rda32 = &sc->sc_rda32[i];
717 			SONIC_CDRXSYNC32(sc, i, BUS_DMASYNC_PREREAD);
718 			if (rda32->rda_inuse != 0)
719 				break;
720 			status = sonic32toh(sc, rda32->rda_status);
721 			bytecount = sonic32toh(sc, rda32->rda_bytecount);
722 			ptr0 = sonic32toh(sc, rda32->rda_pkt_ptr0);
723 			ptr1 = sonic32toh(sc, rda32->rda_pkt_ptr1);
724 			seqno = sonic32toh(sc, rda32->rda_seqno);
725 		} else {
726 			SONIC_CDRXSYNC16(sc, i,
727 			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
728 			rda16 = &sc->sc_rda16[i];
729 			SONIC_CDRXSYNC16(sc, i, BUS_DMASYNC_PREREAD);
730 			if (rda16->rda_inuse != 0)
731 				break;
732 			status = sonic16toh(sc, rda16->rda_status);
733 			bytecount = sonic16toh(sc, rda16->rda_bytecount);
734 			ptr0 = sonic16toh(sc, rda16->rda_pkt_ptr0);
735 			ptr1 = sonic16toh(sc, rda16->rda_pkt_ptr1);
736 			seqno = sonic16toh(sc, rda16->rda_seqno);
737 		}
738 
739 		/*
740 		 * Make absolutely sure this is the only packet
741 		 * in this receive buffer.  Our entire Rx buffer
742 		 * management scheme depends on this, and if the
743 		 * SONIC didn't follow our rule, it means we've
744 		 * misconfigured it.
745 		 */
746 		KASSERT(status & RCR_LPKT);
747 
748 		/*
749 		 * Make sure the packet arrived OK.  If an error occurred,
750 		 * update stats and reset the descriptor.  The buffer will
751 		 * be reused the next time the descriptor comes up in the
752 		 * ring.
753 		 */
754 		if ((status & RCR_PRX) == 0) {
755 			if (status & RCR_FAER)
756 				printf("%s: Rx frame alignment error\n",
757 				    device_xname(sc->sc_dev));
758 			else if (status & RCR_CRCR)
759 				printf("%s: Rx CRC error\n",
760 				    device_xname(sc->sc_dev));
761 			ifp->if_ierrors++;
762 			SONIC_INIT_RXDESC(sc, i);
763 			continue;
764 		}
765 
766 		bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
767 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
768 
769 		/*
770 		 * The SONIC includes the CRC with every packet.
771 		 */
772 		len = bytecount - ETHER_CRC_LEN;
773 
774 		/*
775 		 * Ok, if the chip is in 32-bit mode, then receive
776 		 * buffers must be aligned to 32-bit boundaries,
777 		 * which means the payload is misaligned.  In this
778 		 * case, we must allocate a new mbuf, and copy the
779 		 * packet into it, scooted forward 2 bytes to ensure
780 		 * proper alignment.
781 		 *
782 		 * Note, in 16-bit mode, we can configure the SONIC
783 		 * to do what we want, and we have.
784 		 */
785 #ifndef __NO_STRICT_ALIGNMENT
786 		if (sc->sc_32bit) {
787 			MGETHDR(m, M_DONTWAIT, MT_DATA);
788 			if (m == NULL)
789 				goto dropit;
790 			if (len > (MHLEN - 2)) {
791 				MCLGET(m, M_DONTWAIT);
792 				if ((m->m_flags & M_EXT) == 0)
793 					goto dropit;
794 			}
795 			m->m_data += 2;
796 			/*
797 			 * Note that we use a cluster for incoming frames,
798 			 * so the buffer is virtually contiguous.
799 			 */
800 			memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *),
801 			    len);
802 			SONIC_INIT_RXDESC(sc, i);
803 			bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
804 			    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
805 		} else
806 #endif /* ! __NO_STRICT_ALIGNMENT */
807 		/*
808 		 * If the packet is small enough to fit in a single
809 		 * header mbuf, allocate one and copy the data into
810 		 * it.  This greatly reduces memory consumption when
811 		 * we receive lots of small packets.
812 		 */
813 		if (sonic_copy_small != 0 && len <= (MHLEN - 2)) {
814 			MGETHDR(m, M_DONTWAIT, MT_DATA);
815 			if (m == NULL)
816 				goto dropit;
817 			m->m_data += 2;
818 			/*
819 			 * Note that we use a cluster for incoming frames,
820 			 * so the buffer is virtually contiguous.
821 			 */
822 			memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *),
823 			    len);
824 			SONIC_INIT_RXDESC(sc, i);
825 			bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
826 			    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
827 		} else {
828 			m = ds->ds_mbuf;
829 			if (sonic_add_rxbuf(sc, i) != 0) {
830  dropit:
831 				ifp->if_ierrors++;
832 				SONIC_INIT_RXDESC(sc, i);
833 				bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
834 				    ds->ds_dmamap->dm_mapsize,
835 				    BUS_DMASYNC_PREREAD);
836 				continue;
837 			}
838 		}
839 
840 		ifp->if_ipackets++;
841 		m->m_pkthdr.rcvif = ifp;
842 		m->m_pkthdr.len = m->m_len = len;
843 
844 #if NBPFILTER > 0
845 		/*
846 		 * Pass this up to any BPF listeners.
847 		 */
848 		if (ifp->if_bpf)
849 			bpf_mtap(ifp->if_bpf, m);
850 #endif /* NBPFILTER > 0 */
851 
852 		/* Pass it on. */
853 		(*ifp->if_input)(ifp, m);
854 	}
855 
856 	/* Update the receive pointer. */
857 	sc->sc_rxptr = i;
858 	CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_PREVRX(i)));
859 }
860 
861 /*
862  * sonic_reset:
863  *
864  *	Perform a soft reset on the SONIC.
865  */
866 void
867 sonic_reset(struct sonic_softc *sc)
868 {
869 
870 	/* stop TX, RX and timer, and ensure RST is clear */
871 	CSR_WRITE(sc, SONIC_CR, CR_STP | CR_RXDIS | CR_HTX);
872 	delay(1000);
873 
874 	CSR_WRITE(sc, SONIC_CR, CR_RST);
875 	delay(1000);
876 
877 	/* clear all interrupts */
878 	CSR_WRITE(sc, SONIC_IMR, 0);
879 	CSR_WRITE(sc, SONIC_ISR, IMR_ALL);
880 
881 	CSR_WRITE(sc, SONIC_CR, 0);
882 	delay(1000);
883 }
884 
885 /*
886  * sonic_init:		[ifnet interface function]
887  *
888  *	Initialize the interface.  Must be called at splnet().
889  */
890 int
891 sonic_init(struct ifnet *ifp)
892 {
893 	struct sonic_softc *sc = ifp->if_softc;
894 	struct sonic_descsoft *ds;
895 	int i, error = 0;
896 	uint16_t reg;
897 
898 	/*
899 	 * Cancel any pending I/O.
900 	 */
901 	sonic_stop(ifp, 0);
902 
903 	/*
904 	 * Reset the SONIC to a known state.
905 	 */
906 	sonic_reset(sc);
907 
908 	/*
909 	 * Bring the SONIC into reset state, and program the DCR.
910 	 *
911 	 * Note: We don't bother optimizing the transmit and receive
912 	 * thresholds, here. TFT/RFT values should be set in MD attachments.
913 	 */
914 	reg = sc->sc_dcr;
915 	if (sc->sc_32bit)
916 		reg |= DCR_DW;
917 	CSR_WRITE(sc, SONIC_CR, CR_RST);
918 	CSR_WRITE(sc, SONIC_DCR, reg);
919 	CSR_WRITE(sc, SONIC_DCR2, sc->sc_dcr2);
920 	CSR_WRITE(sc, SONIC_CR, 0);
921 
922 	/*
923 	 * Initialize the transmit descriptors.
924 	 */
925 	if (sc->sc_32bit) {
926 		for (i = 0; i < SONIC_NTXDESC; i++) {
927 			memset(&sc->sc_tda32[i], 0, sizeof(struct sonic_tda32));
928 			SONIC_CDTXSYNC32(sc, i,
929 			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
930 		}
931 	} else {
932 		for (i = 0; i < SONIC_NTXDESC; i++) {
933 			memset(&sc->sc_tda16[i], 0, sizeof(struct sonic_tda16));
934 			SONIC_CDTXSYNC16(sc, i,
935 			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
936 		}
937 	}
938 	sc->sc_txpending = 0;
939 	sc->sc_txdirty = 0;
940 	sc->sc_txlast = SONIC_NTXDESC - 1;
941 
942 	/*
943 	 * Initialize the receive descriptor ring.
944 	 */
945 	for (i = 0; i < SONIC_NRXDESC; i++) {
946 		ds = &sc->sc_rxsoft[i];
947 		if (ds->ds_mbuf == NULL) {
948 			if ((error = sonic_add_rxbuf(sc, i)) != 0) {
949 				printf("%s: unable to allocate or map Rx "
950 				    "buffer %d, error = %d\n",
951 				    device_xname(sc->sc_dev), i, error);
952 				/*
953 				 * XXX Should attempt to run with fewer receive
954 				 * XXX buffers instead of just failing.
955 				 */
956 				sonic_rxdrain(sc);
957 				goto out;
958 			}
959 		} else
960 			SONIC_INIT_RXDESC(sc, i);
961 	}
962 	sc->sc_rxptr = 0;
963 
964 	/* Give the transmit ring to the SONIC. */
965 	CSR_WRITE(sc, SONIC_UTDAR, (SONIC_CDTXADDR(sc, 0) >> 16) & 0xffff);
966 	CSR_WRITE(sc, SONIC_CTDAR, SONIC_CDTXADDR(sc, 0) & 0xffff);
967 
968 	/* Give the receive descriptor ring to the SONIC. */
969 	CSR_WRITE(sc, SONIC_URDAR, (SONIC_CDRXADDR(sc, 0) >> 16) & 0xffff);
970 	CSR_WRITE(sc, SONIC_CRDAR, SONIC_CDRXADDR(sc, 0) & 0xffff);
971 
972 	/* Give the receive buffer ring to the SONIC. */
973 	CSR_WRITE(sc, SONIC_URRAR, (SONIC_CDRRADDR(sc, 0) >> 16) & 0xffff);
974 	CSR_WRITE(sc, SONIC_RSAR, SONIC_CDRRADDR(sc, 0) & 0xffff);
975 	if (sc->sc_32bit)
976 		CSR_WRITE(sc, SONIC_REAR,
977 		    (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) +
978 		    sizeof(struct sonic_rra32)) & 0xffff);
979 	else
980 		CSR_WRITE(sc, SONIC_REAR,
981 		    (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) +
982 		    sizeof(struct sonic_rra16)) & 0xffff);
983 	CSR_WRITE(sc, SONIC_RRR, SONIC_CDRRADDR(sc, 0) & 0xffff);
984 	CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1));
985 
986 	/*
987 	 * Set the End-Of-Buffer counter such that only one packet
988 	 * will be placed into each buffer we provide.  Note we are
989 	 * following the recommendation of section 3.4.4 of the manual
990 	 * here, and have "lengthened" the receive buffers accordingly.
991 	 */
992 	if (sc->sc_32bit)
993 		CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN + 2) / 2);
994 	else
995 		CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN / 2));
996 
997 	/* Reset the receive sequence counter. */
998 	CSR_WRITE(sc, SONIC_RSC, 0);
999 
1000 	/* Clear the tally registers. */
1001 	CSR_WRITE(sc, SONIC_CRCETC, 0xffff);
1002 	CSR_WRITE(sc, SONIC_FAET, 0xffff);
1003 	CSR_WRITE(sc, SONIC_MPT, 0xffff);
1004 
1005 	/* Set the receive filter. */
1006 	sonic_set_filter(sc);
1007 
1008 	/*
1009 	 * Set the interrupt mask register.
1010 	 */
1011 	sc->sc_imr = IMR_RFO | IMR_RBA | IMR_RBE | IMR_RDE |
1012 	    IMR_TXER | IMR_PTX | IMR_PRX;
1013 	CSR_WRITE(sc, SONIC_IMR, sc->sc_imr);
1014 
1015 	/*
1016 	 * Start the receive process in motion.  Note, we don't
1017 	 * start the transmit process until we actually try to
1018 	 * transmit packets.
1019 	 */
1020 	CSR_WRITE(sc, SONIC_CR, CR_RXEN | CR_RRRA);
1021 
1022 	/*
1023 	 * ...all done!
1024 	 */
1025 	ifp->if_flags |= IFF_RUNNING;
1026 	ifp->if_flags &= ~IFF_OACTIVE;
1027 
1028  out:
1029 	if (error)
1030 		printf("%s: interface not running\n", device_xname(sc->sc_dev));
1031 	return error;
1032 }
1033 
1034 /*
1035  * sonic_rxdrain:
1036  *
1037  *	Drain the receive queue.
1038  */
1039 void
1040 sonic_rxdrain(struct sonic_softc *sc)
1041 {
1042 	struct sonic_descsoft *ds;
1043 	int i;
1044 
1045 	for (i = 0; i < SONIC_NRXDESC; i++) {
1046 		ds = &sc->sc_rxsoft[i];
1047 		if (ds->ds_mbuf != NULL) {
1048 			bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1049 			m_freem(ds->ds_mbuf);
1050 			ds->ds_mbuf = NULL;
1051 		}
1052 	}
1053 }
1054 
1055 /*
1056  * sonic_stop:		[ifnet interface function]
1057  *
1058  *	Stop transmission on the interface.
1059  */
1060 void
1061 sonic_stop(struct ifnet *ifp, int disable)
1062 {
1063 	struct sonic_softc *sc = ifp->if_softc;
1064 	struct sonic_descsoft *ds;
1065 	int i;
1066 
1067 	/*
1068 	 * Disable interrupts.
1069 	 */
1070 	CSR_WRITE(sc, SONIC_IMR, 0);
1071 
1072 	/*
1073 	 * Stop the transmitter, receiver, and timer.
1074 	 */
1075 	CSR_WRITE(sc, SONIC_CR, CR_HTX|CR_RXDIS|CR_STP);
1076 	for (i = 0; i < 1000; i++) {
1077 		if ((CSR_READ(sc, SONIC_CR) & (CR_TXP|CR_RXEN|CR_ST)) == 0)
1078 			break;
1079 		delay(2);
1080 	}
1081 	if ((CSR_READ(sc, SONIC_CR) & (CR_TXP|CR_RXEN|CR_ST)) != 0)
1082 		printf("%s: SONIC failed to stop\n", device_xname(sc->sc_dev));
1083 
1084 	/*
1085 	 * Release any queued transmit buffers.
1086 	 */
1087 	for (i = 0; i < SONIC_NTXDESC; i++) {
1088 		ds = &sc->sc_txsoft[i];
1089 		if (ds->ds_mbuf != NULL) {
1090 			bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1091 			m_freem(ds->ds_mbuf);
1092 			ds->ds_mbuf = NULL;
1093 		}
1094 	}
1095 
1096 	/*
1097 	 * Mark the interface down and cancel the watchdog timer.
1098 	 */
1099 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1100 	ifp->if_timer = 0;
1101 
1102 	if (disable)
1103 		sonic_rxdrain(sc);
1104 }
1105 
1106 /*
1107  * sonic_add_rxbuf:
1108  *
1109  *	Add a receive buffer to the indicated descriptor.
1110  */
1111 int
1112 sonic_add_rxbuf(struct sonic_softc *sc, int idx)
1113 {
1114 	struct sonic_descsoft *ds = &sc->sc_rxsoft[idx];
1115 	struct mbuf *m;
1116 	int error;
1117 
1118 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1119 	if (m == NULL)
1120 		return ENOBUFS;
1121 
1122 	MCLGET(m, M_DONTWAIT);
1123 	if ((m->m_flags & M_EXT) == 0) {
1124 		m_freem(m);
1125 		return ENOBUFS;
1126 	}
1127 
1128 	if (ds->ds_mbuf != NULL)
1129 		bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1130 
1131 	ds->ds_mbuf = m;
1132 
1133 	error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1134 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1135 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
1136 	if (error) {
1137 		printf("%s: can't load rx DMA map %d, error = %d\n",
1138 		    device_xname(sc->sc_dev), idx, error);
1139 		panic("sonic_add_rxbuf");	/* XXX */
1140 	}
1141 
1142 	bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1143 	    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1144 
1145 	SONIC_INIT_RXDESC(sc, idx);
1146 
1147 	return 0;
1148 }
1149 
1150 static void
1151 sonic_set_camentry(struct sonic_softc *sc, int entry, const uint8_t *enaddr)
1152 {
1153 
1154 	if (sc->sc_32bit) {
1155 		struct sonic_cda32 *cda = &sc->sc_cda32[entry];
1156 
1157 		cda->cda_entry = htosonic32(sc, entry);
1158 		cda->cda_addr0 = htosonic32(sc, enaddr[0] | (enaddr[1] << 8));
1159 		cda->cda_addr1 = htosonic32(sc, enaddr[2] | (enaddr[3] << 8));
1160 		cda->cda_addr2 = htosonic32(sc, enaddr[4] | (enaddr[5] << 8));
1161 	} else {
1162 		struct sonic_cda16 *cda = &sc->sc_cda16[entry];
1163 
1164 		cda->cda_entry = htosonic16(sc, entry);
1165 		cda->cda_addr0 = htosonic16(sc, enaddr[0] | (enaddr[1] << 8));
1166 		cda->cda_addr1 = htosonic16(sc, enaddr[2] | (enaddr[3] << 8));
1167 		cda->cda_addr2 = htosonic16(sc, enaddr[4] | (enaddr[5] << 8));
1168 	}
1169 }
1170 
1171 /*
1172  * sonic_set_filter:
1173  *
1174  *	Set the SONIC receive filter.
1175  */
1176 void
1177 sonic_set_filter(struct sonic_softc *sc)
1178 {
1179 	struct ethercom *ec = &sc->sc_ethercom;
1180 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1181 	struct ether_multi *enm;
1182 	struct ether_multistep step;
1183 	int i, entry = 0;
1184 	uint16_t camvalid = 0;
1185 	uint16_t rcr = 0;
1186 
1187 	if (ifp->if_flags & IFF_BROADCAST)
1188 		rcr |= RCR_BRD;
1189 
1190 	if (ifp->if_flags & IFF_PROMISC) {
1191 		rcr |= RCR_PRO;
1192 		goto allmulti;
1193 	}
1194 
1195 	/* Put our station address in the first CAM slot. */
1196 	sonic_set_camentry(sc, entry, CLLADDR(ifp->if_sadl));
1197 	camvalid |= (1U << entry);
1198 	entry++;
1199 
1200 	/* Add the multicast addresses to the CAM. */
1201 	ETHER_FIRST_MULTI(step, ec, enm);
1202 	while (enm != NULL) {
1203 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1204 			/*
1205 			 * We must listen to a range of multicast addresses.
1206 			 * The only way to do this on the SONIC is to enable
1207 			 * reception of all multicast packets.
1208 			 */
1209 			goto allmulti;
1210 		}
1211 
1212 		if (entry == SONIC_NCAMENT) {
1213 			/*
1214 			 * Out of CAM slots.  Have to enable reception
1215 			 * of all multicast addresses.
1216 			 */
1217 			goto allmulti;
1218 		}
1219 
1220 		sonic_set_camentry(sc, entry, enm->enm_addrlo);
1221 		camvalid |= (1U << entry);
1222 		entry++;
1223 
1224 		ETHER_NEXT_MULTI(step, enm);
1225 	}
1226 
1227 	ifp->if_flags &= ~IFF_ALLMULTI;
1228 	goto setit;
1229 
1230  allmulti:
1231 	/* Use only the first CAM slot (station address). */
1232 	camvalid = 0x0001;
1233 	entry = 1;
1234 	rcr |= RCR_AMC;
1235 
1236  setit:
1237 	/* set mask for the CAM Enable register */
1238 	if (sc->sc_32bit) {
1239 		if (entry == SONIC_NCAMENT)
1240 			sc->sc_cdaenable32 = htosonic32(sc, camvalid);
1241 		else
1242 			sc->sc_cda32[entry].cda_entry =
1243 			    htosonic32(sc, camvalid);
1244 	} else {
1245 		if (entry == SONIC_NCAMENT)
1246 			sc->sc_cdaenable16 = htosonic16(sc, camvalid);
1247 		else
1248 			sc->sc_cda16[entry].cda_entry =
1249 			    htosonic16(sc, camvalid);
1250 	}
1251 
1252 	/* Load the CAM. */
1253 	SONIC_CDCAMSYNC(sc, BUS_DMASYNC_PREWRITE);
1254 	CSR_WRITE(sc, SONIC_CDP, SONIC_CDCAMADDR(sc) & 0xffff);
1255 	CSR_WRITE(sc, SONIC_CDC, entry);
1256 	CSR_WRITE(sc, SONIC_CR, CR_LCAM);
1257 	for (i = 0; i < 10000; i++) {
1258 		if ((CSR_READ(sc, SONIC_CR) & CR_LCAM) == 0)
1259 			break;
1260 		delay(2);
1261 	}
1262 	if (CSR_READ(sc, SONIC_CR) & CR_LCAM)
1263 		printf("%s: CAM load failed\n", device_xname(sc->sc_dev));
1264 	SONIC_CDCAMSYNC(sc, BUS_DMASYNC_POSTWRITE);
1265 
1266 	/* Set the receive control register. */
1267 	CSR_WRITE(sc, SONIC_RCR, rcr);
1268 }
1269