xref: /netbsd-src/sys/dev/ic/dp83932.c (revision d16b7486a53dcb8072b60ec6fcb4373a2d0c27b7)
1 /*	$NetBSD: dp83932.c,v 1.49 2022/09/25 18:43:32 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Device driver for the National Semiconductor DP83932
34  * Systems-Oriented Network Interface Controller (SONIC).
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: dp83932.c,v 1.49 2022/09/25 18:43:32 thorpej Exp $");
39 
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/mbuf.h>
44 #include <sys/kernel.h>
45 #include <sys/socket.h>
46 #include <sys/ioctl.h>
47 #include <sys/errno.h>
48 #include <sys/device.h>
49 
50 #include <sys/rndsource.h>
51 
52 #include <net/if.h>
53 #include <net/if_dl.h>
54 #include <net/if_ether.h>
55 
56 #include <net/bpf.h>
57 
58 #include <sys/bus.h>
59 #include <sys/intr.h>
60 
61 #include <dev/ic/dp83932reg.h>
62 #include <dev/ic/dp83932var.h>
63 
64 static void	sonic_start(struct ifnet *);
65 static void	sonic_watchdog(struct ifnet *);
66 static int	sonic_ioctl(struct ifnet *, u_long, void *);
67 static int	sonic_init(struct ifnet *);
68 static void	sonic_stop(struct ifnet *, int);
69 
70 static bool	sonic_shutdown(device_t, int);
71 
72 static void	sonic_reset(struct sonic_softc *);
73 static void	sonic_rxdrain(struct sonic_softc *);
74 static int	sonic_add_rxbuf(struct sonic_softc *, int);
75 static void	sonic_set_filter(struct sonic_softc *);
76 
77 static uint16_t sonic_txintr(struct sonic_softc *);
78 static void	sonic_rxintr(struct sonic_softc *);
79 
80 int	sonic_copy_small = 0;
81 
82 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
83 
84 /*
85  * sonic_attach:
86  *
87  *	Attach a SONIC interface to the system.
88  */
89 void
90 sonic_attach(struct sonic_softc *sc, const uint8_t *enaddr)
91 {
92 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
93 	int i, rseg, error;
94 	bus_dma_segment_t seg;
95 	size_t cdatasize;
96 	uint8_t *nullbuf;
97 
98 	/*
99 	 * Allocate the control data structures, and create and load the
100 	 * DMA map for it.
101 	 */
102 	if (sc->sc_32bit)
103 		cdatasize = sizeof(struct sonic_control_data32);
104 	else
105 		cdatasize = sizeof(struct sonic_control_data16);
106 
107 	if ((error = bus_dmamem_alloc(sc->sc_dmat, cdatasize + ETHER_PAD_LEN,
108 	     PAGE_SIZE, (64 * 1024), &seg, 1, &rseg,
109 	     BUS_DMA_NOWAIT)) != 0) {
110 		aprint_error_dev(sc->sc_dev,
111 		    "unable to allocate control data, error = %d\n", error);
112 		goto fail_0;
113 	}
114 
115 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
116 	    cdatasize + ETHER_PAD_LEN, (void **) &sc->sc_cdata16,
117 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
118 		aprint_error_dev(sc->sc_dev,
119 		    "unable to map control data, error = %d\n", error);
120 		goto fail_1;
121 	}
122 	nullbuf = (uint8_t *)sc->sc_cdata16 + cdatasize;
123 	memset(nullbuf, 0, ETHER_PAD_LEN);
124 
125 	if ((error = bus_dmamap_create(sc->sc_dmat,
126 	     cdatasize, 1, cdatasize, 0, BUS_DMA_NOWAIT,
127 	     &sc->sc_cddmamap)) != 0) {
128 		aprint_error_dev(sc->sc_dev,
129 		    "unable to create control data DMA map, error = %d\n",
130 		    error);
131 		goto fail_2;
132 	}
133 
134 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
135 	     sc->sc_cdata16, cdatasize, NULL, BUS_DMA_NOWAIT)) != 0) {
136 		aprint_error_dev(sc->sc_dev,
137 		    "unable to load control data DMA map, error = %d\n", error);
138 		goto fail_3;
139 	}
140 
141 	/*
142 	 * Create the transmit buffer DMA maps.
143 	 */
144 	for (i = 0; i < SONIC_NTXDESC; i++) {
145 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
146 		     SONIC_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
147 		     &sc->sc_txsoft[i].ds_dmamap)) != 0) {
148 			aprint_error_dev(sc->sc_dev,
149 			    "unable to create tx DMA map %d, error = %d\n",
150 			    i, error);
151 			goto fail_4;
152 		}
153 	}
154 
155 	/*
156 	 * Create the receive buffer DMA maps.
157 	 */
158 	for (i = 0; i < SONIC_NRXDESC; i++) {
159 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
160 		     MCLBYTES, 0, BUS_DMA_NOWAIT,
161 		     &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
162 			aprint_error_dev(sc->sc_dev,
163 			    "unable to create rx DMA map %d, error = %d\n",
164 			    i, error);
165 			goto fail_5;
166 		}
167 		sc->sc_rxsoft[i].ds_mbuf = NULL;
168 	}
169 
170 	/*
171 	 * create and map the pad buffer
172 	 */
173 	if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
174 	    ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT, &sc->sc_nulldmamap)) != 0) {
175 		aprint_error_dev(sc->sc_dev,
176 		    "unable to create pad buffer DMA map, error = %d\n", error);
177 		goto fail_5;
178 	}
179 
180 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
181 	    nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
182 		aprint_error_dev(sc->sc_dev,
183 		    "unable to load pad buffer DMA map, error = %d\n", error);
184 		goto fail_6;
185 	}
186 	bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
187 	    BUS_DMASYNC_PREWRITE);
188 
189 	/*
190 	 * Reset the chip to a known state.
191 	 */
192 	sonic_reset(sc);
193 
194 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
195 	    ether_sprintf(enaddr));
196 
197 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
198 	ifp->if_softc = sc;
199 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
200 	ifp->if_ioctl = sonic_ioctl;
201 	ifp->if_start = sonic_start;
202 	ifp->if_watchdog = sonic_watchdog;
203 	ifp->if_init = sonic_init;
204 	ifp->if_stop = sonic_stop;
205 	IFQ_SET_READY(&ifp->if_snd);
206 
207 	/*
208 	 * We can support 802.1Q VLAN-sized frames.
209 	 */
210 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
211 
212 	/*
213 	 * Attach the interface.
214 	 */
215 	if_attach(ifp);
216 	if_deferred_start_init(ifp, NULL);
217 	ether_ifattach(ifp, enaddr);
218 
219 	rnd_attach_source(&sc->sc_rndsource, ifp->if_xname, RND_TYPE_NET,
220 	    RND_FLAG_DEFAULT);
221 
222 	/*
223 	 * Make sure the interface is shutdown during reboot.
224 	 */
225 	if (pmf_device_register1(sc->sc_dev, NULL, NULL, sonic_shutdown))
226 		pmf_class_network_register(sc->sc_dev, ifp);
227 	else
228 		aprint_error_dev(sc->sc_dev,
229 		    "couldn't establish power handler\n");
230 
231 	return;
232 
233 	/*
234 	 * Free any resources we've allocated during the failed attach
235 	 * attempt.  Do this in reverse order and fall through.
236 	 */
237  fail_6:
238 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
239  fail_5:
240 	for (i = 0; i < SONIC_NRXDESC; i++) {
241 		if (sc->sc_rxsoft[i].ds_dmamap != NULL)
242 			bus_dmamap_destroy(sc->sc_dmat,
243 			    sc->sc_rxsoft[i].ds_dmamap);
244 	}
245  fail_4:
246 	for (i = 0; i < SONIC_NTXDESC; i++) {
247 		if (sc->sc_txsoft[i].ds_dmamap != NULL)
248 			bus_dmamap_destroy(sc->sc_dmat,
249 			    sc->sc_txsoft[i].ds_dmamap);
250 	}
251 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
252  fail_3:
253 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
254  fail_2:
255 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_cdata16, cdatasize);
256  fail_1:
257 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
258  fail_0:
259 	return;
260 }
261 
262 /*
263  * sonic_shutdown:
264  *
265  *	Make sure the interface is stopped at reboot.
266  */
267 bool
268 sonic_shutdown(device_t self, int howto)
269 {
270 	struct sonic_softc *sc = device_private(self);
271 
272 	sonic_stop(&sc->sc_ethercom.ec_if, 1);
273 
274 	return true;
275 }
276 
277 /*
278  * sonic_start:		[ifnet interface function]
279  *
280  *	Start packet transmission on the interface.
281  */
282 void
283 sonic_start(struct ifnet *ifp)
284 {
285 	struct sonic_softc *sc = ifp->if_softc;
286 	struct mbuf *m0, *m;
287 	struct sonic_tda16 *tda16;
288 	struct sonic_tda32 *tda32;
289 	struct sonic_descsoft *ds;
290 	bus_dmamap_t dmamap;
291 	int error, olasttx, nexttx, opending, totlen, olseg;
292 	int seg = 0;	/* XXX: gcc */
293 
294 	if ((ifp->if_flags & IFF_RUNNING) != IFF_RUNNING)
295 		return;
296 
297 	/*
298 	 * Remember the previous txpending and the current "last txdesc
299 	 * used" index.
300 	 */
301 	opending = sc->sc_txpending;
302 	olasttx = sc->sc_txlast;
303 
304 	/*
305 	 * Loop through the send queue, setting up transmit descriptors
306 	 * until we drain the queue, or use up all available transmit
307 	 * descriptors.  Leave one at the end for sanity's sake.
308 	 */
309 	while (sc->sc_txpending < (SONIC_NTXDESC - 1)) {
310 		/*
311 		 * Grab a packet off the queue.
312 		 */
313 		IFQ_POLL(&ifp->if_snd, m0);
314 		if (m0 == NULL)
315 			break;
316 		m = NULL;
317 
318 		/*
319 		 * Get the next available transmit descriptor.
320 		 */
321 		nexttx = SONIC_NEXTTX(sc->sc_txlast);
322 		ds = &sc->sc_txsoft[nexttx];
323 		dmamap = ds->ds_dmamap;
324 
325 		/*
326 		 * Load the DMA map.  If this fails, the packet either
327 		 * didn't fit in the allotted number of frags, or we were
328 		 * short on resources.  In this case, we'll copy and try
329 		 * again.
330 		 */
331 		if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
332 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT)) != 0 ||
333 		    (m0->m_pkthdr.len < ETHER_PAD_LEN &&
334 		    dmamap->dm_nsegs == SONIC_NTXFRAGS)) {
335 			if (error == 0)
336 				bus_dmamap_unload(sc->sc_dmat, dmamap);
337 			MGETHDR(m, M_DONTWAIT, MT_DATA);
338 			if (m == NULL) {
339 				printf("%s: unable to allocate Tx mbuf\n",
340 				    device_xname(sc->sc_dev));
341 				break;
342 			}
343 			if (m0->m_pkthdr.len > MHLEN) {
344 				MCLGET(m, M_DONTWAIT);
345 				if ((m->m_flags & M_EXT) == 0) {
346 					printf("%s: unable to allocate Tx "
347 					    "cluster\n",
348 					    device_xname(sc->sc_dev));
349 					m_freem(m);
350 					break;
351 				}
352 			}
353 			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
354 			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
355 			error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
356 			    m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
357 			if (error) {
358 				printf("%s: unable to load Tx buffer, "
359 				    "error = %d\n", device_xname(sc->sc_dev),
360 				    error);
361 				m_freem(m);
362 				break;
363 			}
364 		}
365 		IFQ_DEQUEUE(&ifp->if_snd, m0);
366 		if (m != NULL) {
367 			m_freem(m0);
368 			m0 = m;
369 		}
370 
371 		/*
372 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
373 		 */
374 
375 		/* Sync the DMA map. */
376 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
377 		    BUS_DMASYNC_PREWRITE);
378 
379 		/*
380 		 * Store a pointer to the packet so we can free it later.
381 		 */
382 		ds->ds_mbuf = m0;
383 
384 		/*
385 		 * Initialize the transmit descriptor.
386 		 */
387 		totlen = 0;
388 		if (sc->sc_32bit) {
389 			tda32 = &sc->sc_tda32[nexttx];
390 			for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
391 				tda32->tda_frags[seg].frag_ptr1 =
392 				    htosonic32(sc,
393 				    (dmamap->dm_segs[seg].ds_addr >> 16) &
394 				    0xffff);
395 				tda32->tda_frags[seg].frag_ptr0 =
396 				    htosonic32(sc,
397 				    dmamap->dm_segs[seg].ds_addr & 0xffff);
398 				tda32->tda_frags[seg].frag_size =
399 				    htosonic32(sc, dmamap->dm_segs[seg].ds_len);
400 				totlen += dmamap->dm_segs[seg].ds_len;
401 			}
402 			if (totlen < ETHER_PAD_LEN) {
403 				tda32->tda_frags[seg].frag_ptr1 =
404 				    htosonic32(sc,
405 				    (sc->sc_nulldma >> 16) & 0xffff);
406 				tda32->tda_frags[seg].frag_ptr0 =
407 				    htosonic32(sc, sc->sc_nulldma & 0xffff);
408 				tda32->tda_frags[seg].frag_size =
409 				    htosonic32(sc, ETHER_PAD_LEN - totlen);
410 				totlen = ETHER_PAD_LEN;
411 				seg++;
412 			}
413 
414 			tda32->tda_status = 0;
415 			tda32->tda_pktconfig = 0;
416 			tda32->tda_pktsize = htosonic32(sc, totlen);
417 			tda32->tda_fragcnt = htosonic32(sc, seg);
418 
419 			/* Link it up. */
420 			tda32->tda_frags[seg].frag_ptr0 =
421 			    htosonic32(sc, SONIC_CDTXADDR32(sc,
422 			    SONIC_NEXTTX(nexttx)) & 0xffff);
423 
424 			/* Sync the Tx descriptor. */
425 			SONIC_CDTXSYNC32(sc, nexttx,
426 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
427 		} else {
428 			tda16 = &sc->sc_tda16[nexttx];
429 			for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
430 				tda16->tda_frags[seg].frag_ptr1 =
431 				    htosonic16(sc,
432 				    (dmamap->dm_segs[seg].ds_addr >> 16) &
433 				    0xffff);
434 				tda16->tda_frags[seg].frag_ptr0 =
435 				    htosonic16(sc,
436 				    dmamap->dm_segs[seg].ds_addr & 0xffff);
437 				tda16->tda_frags[seg].frag_size =
438 				    htosonic16(sc, dmamap->dm_segs[seg].ds_len);
439 				totlen += dmamap->dm_segs[seg].ds_len;
440 			}
441 			if (totlen < ETHER_PAD_LEN) {
442 				tda16->tda_frags[seg].frag_ptr1 =
443 				    htosonic16(sc,
444 				    (sc->sc_nulldma >> 16) & 0xffff);
445 				tda16->tda_frags[seg].frag_ptr0 =
446 				    htosonic16(sc, sc->sc_nulldma & 0xffff);
447 				tda16->tda_frags[seg].frag_size =
448 				    htosonic16(sc, ETHER_PAD_LEN - totlen);
449 				totlen = ETHER_PAD_LEN;
450 				seg++;
451 			}
452 
453 			tda16->tda_status = 0;
454 			tda16->tda_pktconfig = 0;
455 			tda16->tda_pktsize = htosonic16(sc, totlen);
456 			tda16->tda_fragcnt = htosonic16(sc, seg);
457 
458 			/* Link it up. */
459 			tda16->tda_frags[seg].frag_ptr0 =
460 			    htosonic16(sc, SONIC_CDTXADDR16(sc,
461 			    SONIC_NEXTTX(nexttx)) & 0xffff);
462 
463 			/* Sync the Tx descriptor. */
464 			SONIC_CDTXSYNC16(sc, nexttx,
465 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
466 		}
467 
468 		/* Advance the Tx pointer. */
469 		sc->sc_txpending++;
470 		sc->sc_txlast = nexttx;
471 
472 		/*
473 		 * Pass the packet to any BPF listeners.
474 		 */
475 		bpf_mtap(ifp, m0, BPF_D_OUT);
476 	}
477 
478 	if (sc->sc_txpending != opending) {
479 		/*
480 		 * We enqueued packets.  If the transmitter was idle,
481 		 * reset the txdirty pointer.
482 		 */
483 		if (opending == 0)
484 			sc->sc_txdirty = SONIC_NEXTTX(olasttx);
485 
486 		/*
487 		 * Stop the SONIC on the last packet we've set up,
488 		 * and clear end-of-list on the descriptor previous
489 		 * to our new chain.
490 		 *
491 		 * NOTE: our `seg' variable should still be valid!
492 		 */
493 		if (sc->sc_32bit) {
494 			olseg =
495 			    sonic32toh(sc, sc->sc_tda32[olasttx].tda_fragcnt);
496 			sc->sc_tda32[sc->sc_txlast].tda_frags[seg].frag_ptr0 |=
497 			    htosonic32(sc, TDA_LINK_EOL);
498 			SONIC_CDTXSYNC32(sc, sc->sc_txlast,
499 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
500 			sc->sc_tda32[olasttx].tda_frags[olseg].frag_ptr0 &=
501 			    htosonic32(sc, ~TDA_LINK_EOL);
502 			SONIC_CDTXSYNC32(sc, olasttx,
503 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
504 		} else {
505 			olseg =
506 			    sonic16toh(sc, sc->sc_tda16[olasttx].tda_fragcnt);
507 			sc->sc_tda16[sc->sc_txlast].tda_frags[seg].frag_ptr0 |=
508 			    htosonic16(sc, TDA_LINK_EOL);
509 			SONIC_CDTXSYNC16(sc, sc->sc_txlast,
510 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
511 			sc->sc_tda16[olasttx].tda_frags[olseg].frag_ptr0 &=
512 			    htosonic16(sc, ~TDA_LINK_EOL);
513 			SONIC_CDTXSYNC16(sc, olasttx,
514 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
515 		}
516 
517 		/* Start the transmitter. */
518 		CSR_WRITE(sc, SONIC_CR, CR_TXP);
519 
520 		/* Set a watchdog timer in case the chip flakes out. */
521 		ifp->if_timer = 5;
522 	}
523 }
524 
525 /*
526  * sonic_watchdog:	[ifnet interface function]
527  *
528  *	Watchdog timer handler.
529  */
530 void
531 sonic_watchdog(struct ifnet *ifp)
532 {
533 	struct sonic_softc *sc = ifp->if_softc;
534 
535 	printf("%s: device timeout\n", device_xname(sc->sc_dev));
536 	if_statinc(ifp, if_oerrors);
537 
538 	(void)sonic_init(ifp);
539 }
540 
541 /*
542  * sonic_ioctl:		[ifnet interface function]
543  *
544  *	Handle control requests from the operator.
545  */
546 int
547 sonic_ioctl(struct ifnet *ifp, u_long cmd, void *data)
548 {
549 	int s, error;
550 
551 	s = splnet();
552 
553 	error = ether_ioctl(ifp, cmd, data);
554 	if (error == ENETRESET) {
555 		/*
556 		 * Multicast list has changed; set the hardware
557 		 * filter accordingly.
558 		 */
559 		if (ifp->if_flags & IFF_RUNNING)
560 			(void)sonic_init(ifp);
561 		error = 0;
562 	}
563 
564 	splx(s);
565 	return error;
566 }
567 
568 /*
569  * sonic_intr:
570  *
571  *	Interrupt service routine.
572  */
573 int
574 sonic_intr(void *arg)
575 {
576 	struct sonic_softc *sc = arg;
577 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
578 	uint16_t isr;
579 	int handled = 0, wantinit;
580 
581 	for (wantinit = 0; wantinit == 0;) {
582 		isr = CSR_READ(sc, SONIC_ISR) & sc->sc_imr;
583 		if (isr == 0)
584 			break;
585 		CSR_WRITE(sc, SONIC_ISR, isr);	/* ACK */
586 
587 		handled = 1;
588 
589 		if (isr & IMR_PRX)
590 			sonic_rxintr(sc);
591 
592 		if (isr & (IMR_PTX | IMR_TXER)) {
593 			if (sonic_txintr(sc) & TCR_FU) {
594 				printf("%s: transmit FIFO underrun\n",
595 				    device_xname(sc->sc_dev));
596 				wantinit = 1;
597 			}
598 		}
599 
600 		if (isr & (IMR_RFO | IMR_RBA | IMR_RBE | IMR_RDE)) {
601 #define	PRINTERR(bit, str)						\
602 			if (isr & (bit))				\
603 				printf("%s: %s\n",device_xname(sc->sc_dev), str)
604 			PRINTERR(IMR_RFO, "receive FIFO overrun");
605 			PRINTERR(IMR_RBA, "receive buffer exceeded");
606 			PRINTERR(IMR_RBE, "receive buffers exhausted");
607 			PRINTERR(IMR_RDE, "receive descriptors exhausted");
608 			wantinit = 1;
609 		}
610 	}
611 
612 	if (handled) {
613 		if (wantinit)
614 			(void)sonic_init(ifp);
615 		if_schedule_deferred_start(ifp);
616 	}
617 
618 	return handled;
619 }
620 
621 /*
622  * sonic_txintr:
623  *
624  *	Helper; handle transmit complete interrupts.
625  */
626 uint16_t
627 sonic_txintr(struct sonic_softc *sc)
628 {
629 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
630 	struct sonic_descsoft *ds;
631 	struct sonic_tda32 *tda32;
632 	struct sonic_tda16 *tda16;
633 	uint16_t status, totstat = 0;
634 	int i, count;
635 
636 	count = 0;
637 	for (i = sc->sc_txdirty; sc->sc_txpending != 0;
638 	     i = SONIC_NEXTTX(i), sc->sc_txpending--) {
639 		ds = &sc->sc_txsoft[i];
640 
641 		if (sc->sc_32bit) {
642 			SONIC_CDTXSYNC32(sc, i,
643 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
644 			tda32 = &sc->sc_tda32[i];
645 			status = sonic32toh(sc, tda32->tda_status);
646 			SONIC_CDTXSYNC32(sc, i, BUS_DMASYNC_PREREAD);
647 		} else {
648 			SONIC_CDTXSYNC16(sc, i,
649 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
650 			tda16 = &sc->sc_tda16[i];
651 			status = sonic16toh(sc, tda16->tda_status);
652 			SONIC_CDTXSYNC16(sc, i, BUS_DMASYNC_PREREAD);
653 		}
654 
655 		if ((status & ~(TCR_EXDIS |TCR_CRCI |TCR_POWC |TCR_PINT)) == 0)
656 			break;
657 
658 		totstat |= status;
659 
660 		bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
661 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
662 		bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
663 		m_freem(ds->ds_mbuf);
664 		ds->ds_mbuf = NULL;
665 
666 		/*
667 		 * Check for errors and collisions.
668 		 */
669 		net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
670 		if (status & TCR_PTX) {
671 			if_statinc_ref(nsr, if_opackets);
672 			count++;
673 		} else
674 			if_statinc_ref(nsr, if_oerrors);
675 		if (TDA_STATUS_NCOL(status))
676 			if_statadd_ref(nsr, if_collisions,
677 			    TDA_STATUS_NCOL(status));
678 		IF_STAT_PUTREF(ifp);
679 	}
680 
681 	/* Update the dirty transmit buffer pointer. */
682 	sc->sc_txdirty = i;
683 
684 	/*
685 	 * Cancel the watchdog timer if there are no pending
686 	 * transmissions.
687 	 */
688 	if (sc->sc_txpending == 0)
689 		ifp->if_timer = 0;
690 
691 	if (totstat != 0)
692 		rnd_add_uint32(&sc->sc_rndsource, totstat);
693 
694 	return totstat;
695 }
696 
697 /*
698  * sonic_rxintr:
699  *
700  *	Helper; handle receive interrupts.
701  */
702 void
703 sonic_rxintr(struct sonic_softc *sc)
704 {
705 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
706 	struct sonic_descsoft *ds;
707 	struct sonic_rda32 *rda32;
708 	struct sonic_rda16 *rda16;
709 	struct mbuf *m;
710 	int i, len, count;
711 	uint16_t status, bytecount /*, ptr0, ptr1, seqno */;
712 
713 	count = 0;
714 	for (i = sc->sc_rxptr;; i = SONIC_NEXTRX(i)) {
715 		ds = &sc->sc_rxsoft[i];
716 
717 		if (sc->sc_32bit) {
718 			SONIC_CDRXSYNC32(sc, i,
719 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
720 			rda32 = &sc->sc_rda32[i];
721 			SONIC_CDRXSYNC32(sc, i, BUS_DMASYNC_PREREAD);
722 			if (rda32->rda_inuse != 0)
723 				break;
724 			status = sonic32toh(sc, rda32->rda_status);
725 			bytecount = sonic32toh(sc, rda32->rda_bytecount);
726 			/* ptr0 = sonic32toh(sc, rda32->rda_pkt_ptr0); */
727 			/* ptr1 = sonic32toh(sc, rda32->rda_pkt_ptr1); */
728 			/* seqno = sonic32toh(sc, rda32->rda_seqno); */
729 		} else {
730 			SONIC_CDRXSYNC16(sc, i,
731 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
732 			rda16 = &sc->sc_rda16[i];
733 			SONIC_CDRXSYNC16(sc, i, BUS_DMASYNC_PREREAD);
734 			if (rda16->rda_inuse != 0)
735 				break;
736 			status = sonic16toh(sc, rda16->rda_status);
737 			bytecount = sonic16toh(sc, rda16->rda_bytecount);
738 			/* ptr0 = sonic16toh(sc, rda16->rda_pkt_ptr0); */
739 			/* ptr1 = sonic16toh(sc, rda16->rda_pkt_ptr1); */
740 			/* seqno = sonic16toh(sc, rda16->rda_seqno); */
741 		}
742 
743 		/*
744 		 * Make absolutely sure this is the only packet
745 		 * in this receive buffer.  Our entire Rx buffer
746 		 * management scheme depends on this, and if the
747 		 * SONIC didn't follow our rule, it means we've
748 		 * misconfigured it.
749 		 */
750 		KASSERT(status & RCR_LPKT);
751 
752 		/*
753 		 * Make sure the packet arrived OK.  If an error occurred,
754 		 * update stats and reset the descriptor.  The buffer will
755 		 * be reused the next time the descriptor comes up in the
756 		 * ring.
757 		 */
758 		if ((status & RCR_PRX) == 0) {
759 			if (status & RCR_FAER)
760 				printf("%s: Rx frame alignment error\n",
761 				    device_xname(sc->sc_dev));
762 			else if (status & RCR_CRCR)
763 				printf("%s: Rx CRC error\n",
764 				    device_xname(sc->sc_dev));
765 			if_statinc(ifp, if_ierrors);
766 			SONIC_INIT_RXDESC(sc, i);
767 			continue;
768 		}
769 
770 		bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
771 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
772 
773 		/*
774 		 * The SONIC includes the CRC with every packet.
775 		 */
776 		len = bytecount - ETHER_CRC_LEN;
777 
778 		/*
779 		 * Ok, if the chip is in 32-bit mode, then receive
780 		 * buffers must be aligned to 32-bit boundaries,
781 		 * which means the payload is misaligned.  In this
782 		 * case, we must allocate a new mbuf, and copy the
783 		 * packet into it, scooted forward 2 bytes to ensure
784 		 * proper alignment.
785 		 *
786 		 * Note, in 16-bit mode, we can configure the SONIC
787 		 * to do what we want, and we have.
788 		 */
789 #ifndef __NO_STRICT_ALIGNMENT
790 		if (sc->sc_32bit) {
791 			MGETHDR(m, M_DONTWAIT, MT_DATA);
792 			if (m == NULL)
793 				goto dropit;
794 			if (len > (MHLEN - 2)) {
795 				MCLGET(m, M_DONTWAIT);
796 				if ((m->m_flags & M_EXT) == 0) {
797 					m_freem(m);
798 					goto dropit;
799 				}
800 			}
801 			m->m_data += 2;
802 			/*
803 			 * Note that we use a cluster for incoming frames,
804 			 * so the buffer is virtually contiguous.
805 			 */
806 			memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *),
807 			    len);
808 			SONIC_INIT_RXDESC(sc, i);
809 			bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
810 			    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
811 		} else
812 #endif /* ! __NO_STRICT_ALIGNMENT */
813 		/*
814 		 * If the packet is small enough to fit in a single
815 		 * header mbuf, allocate one and copy the data into
816 		 * it.  This greatly reduces memory consumption when
817 		 * we receive lots of small packets.
818 		 */
819 		if (sonic_copy_small != 0 && len <= (MHLEN - 2)) {
820 			MGETHDR(m, M_DONTWAIT, MT_DATA);
821 			if (m == NULL)
822 				goto dropit;
823 			m->m_data += 2;
824 			/*
825 			 * Note that we use a cluster for incoming frames,
826 			 * so the buffer is virtually contiguous.
827 			 */
828 			memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *),
829 			    len);
830 			SONIC_INIT_RXDESC(sc, i);
831 			bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
832 			    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
833 		} else {
834 			m = ds->ds_mbuf;
835 			if (sonic_add_rxbuf(sc, i) != 0) {
836  dropit:
837 				if_statinc(ifp, if_ierrors);
838 				SONIC_INIT_RXDESC(sc, i);
839 				bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
840 				    ds->ds_dmamap->dm_mapsize,
841 				    BUS_DMASYNC_PREREAD);
842 				continue;
843 			}
844 		}
845 
846 		m_set_rcvif(m, ifp);
847 		m->m_pkthdr.len = m->m_len = len;
848 
849 		/* Pass it on. */
850 		if_percpuq_enqueue(ifp->if_percpuq, m);
851 
852 		count++;
853 	}
854 
855 	/* Update the receive pointer. */
856 	sc->sc_rxptr = i;
857 	CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_PREVRX(i)));
858 
859 	if (count != 0)
860 		rnd_add_uint32(&sc->sc_rndsource, count);
861 }
862 
863 /*
864  * sonic_reset:
865  *
866  *	Perform a soft reset on the SONIC.
867  */
868 void
869 sonic_reset(struct sonic_softc *sc)
870 {
871 
872 	/* stop TX, RX and timer, and ensure RST is clear */
873 	CSR_WRITE(sc, SONIC_CR, CR_STP | CR_RXDIS | CR_HTX);
874 	delay(1000);
875 
876 	CSR_WRITE(sc, SONIC_CR, CR_RST);
877 	delay(1000);
878 
879 	/* clear all interrupts */
880 	CSR_WRITE(sc, SONIC_IMR, 0);
881 	CSR_WRITE(sc, SONIC_ISR, IMR_ALL);
882 
883 	CSR_WRITE(sc, SONIC_CR, 0);
884 	delay(1000);
885 }
886 
887 /*
888  * sonic_init:		[ifnet interface function]
889  *
890  *	Initialize the interface.  Must be called at splnet().
891  */
892 int
893 sonic_init(struct ifnet *ifp)
894 {
895 	struct sonic_softc *sc = ifp->if_softc;
896 	struct sonic_descsoft *ds;
897 	int i, error = 0;
898 	uint16_t reg;
899 
900 	/*
901 	 * Cancel any pending I/O.
902 	 */
903 	sonic_stop(ifp, 0);
904 
905 	/*
906 	 * Reset the SONIC to a known state.
907 	 */
908 	sonic_reset(sc);
909 
910 	/*
911 	 * Bring the SONIC into reset state, and program the DCR.
912 	 *
913 	 * Note: We don't bother optimizing the transmit and receive
914 	 * thresholds, here. TFT/RFT values should be set in MD attachments.
915 	 */
916 	reg = sc->sc_dcr;
917 	if (sc->sc_32bit)
918 		reg |= DCR_DW;
919 	CSR_WRITE(sc, SONIC_CR, CR_RST);
920 	CSR_WRITE(sc, SONIC_DCR, reg);
921 	CSR_WRITE(sc, SONIC_DCR2, sc->sc_dcr2);
922 	CSR_WRITE(sc, SONIC_CR, 0);
923 
924 	/*
925 	 * Initialize the transmit descriptors.
926 	 */
927 	if (sc->sc_32bit) {
928 		for (i = 0; i < SONIC_NTXDESC; i++) {
929 			memset(&sc->sc_tda32[i], 0, sizeof(struct sonic_tda32));
930 			SONIC_CDTXSYNC32(sc, i,
931 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
932 		}
933 	} else {
934 		for (i = 0; i < SONIC_NTXDESC; i++) {
935 			memset(&sc->sc_tda16[i], 0, sizeof(struct sonic_tda16));
936 			SONIC_CDTXSYNC16(sc, i,
937 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
938 		}
939 	}
940 	sc->sc_txpending = 0;
941 	sc->sc_txdirty = 0;
942 	sc->sc_txlast = SONIC_NTXDESC - 1;
943 
944 	/*
945 	 * Initialize the receive descriptor ring.
946 	 */
947 	for (i = 0; i < SONIC_NRXDESC; i++) {
948 		ds = &sc->sc_rxsoft[i];
949 		if (ds->ds_mbuf == NULL) {
950 			if ((error = sonic_add_rxbuf(sc, i)) != 0) {
951 				printf("%s: unable to allocate or map Rx "
952 				    "buffer %d, error = %d\n",
953 				    device_xname(sc->sc_dev), i, error);
954 				/*
955 				 * XXX Should attempt to run with fewer receive
956 				 * XXX buffers instead of just failing.
957 				 */
958 				sonic_rxdrain(sc);
959 				goto out;
960 			}
961 		} else
962 			SONIC_INIT_RXDESC(sc, i);
963 	}
964 	sc->sc_rxptr = 0;
965 
966 	/* Give the transmit ring to the SONIC. */
967 	CSR_WRITE(sc, SONIC_UTDAR, (SONIC_CDTXADDR(sc, 0) >> 16) & 0xffff);
968 	CSR_WRITE(sc, SONIC_CTDAR, SONIC_CDTXADDR(sc, 0) & 0xffff);
969 
970 	/* Give the receive descriptor ring to the SONIC. */
971 	CSR_WRITE(sc, SONIC_URDAR, (SONIC_CDRXADDR(sc, 0) >> 16) & 0xffff);
972 	CSR_WRITE(sc, SONIC_CRDAR, SONIC_CDRXADDR(sc, 0) & 0xffff);
973 
974 	/* Give the receive buffer ring to the SONIC. */
975 	CSR_WRITE(sc, SONIC_URRAR, (SONIC_CDRRADDR(sc, 0) >> 16) & 0xffff);
976 	CSR_WRITE(sc, SONIC_RSAR, SONIC_CDRRADDR(sc, 0) & 0xffff);
977 	if (sc->sc_32bit)
978 		CSR_WRITE(sc, SONIC_REAR,
979 		    (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) +
980 		    sizeof(struct sonic_rra32)) & 0xffff);
981 	else
982 		CSR_WRITE(sc, SONIC_REAR,
983 		    (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) +
984 		    sizeof(struct sonic_rra16)) & 0xffff);
985 	CSR_WRITE(sc, SONIC_RRR, SONIC_CDRRADDR(sc, 0) & 0xffff);
986 	CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1));
987 
988 	/*
989 	 * Set the End-Of-Buffer counter such that only one packet
990 	 * will be placed into each buffer we provide.  Note we are
991 	 * following the recommendation of section 3.4.4 of the manual
992 	 * here, and have "lengthened" the receive buffers accordingly.
993 	 */
994 	if (sc->sc_32bit)
995 		CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN + 2) / 2);
996 	else
997 		CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN / 2));
998 
999 	/* Reset the receive sequence counter. */
1000 	CSR_WRITE(sc, SONIC_RSC, 0);
1001 
1002 	/* Clear the tally registers. */
1003 	CSR_WRITE(sc, SONIC_CRCETC, 0xffff);
1004 	CSR_WRITE(sc, SONIC_FAET, 0xffff);
1005 	CSR_WRITE(sc, SONIC_MPT, 0xffff);
1006 
1007 	/* Set the receive filter. */
1008 	sonic_set_filter(sc);
1009 
1010 	/*
1011 	 * Set the interrupt mask register.
1012 	 */
1013 	sc->sc_imr = IMR_RFO | IMR_RBA | IMR_RBE | IMR_RDE |
1014 	    IMR_TXER | IMR_PTX | IMR_PRX;
1015 	CSR_WRITE(sc, SONIC_IMR, sc->sc_imr);
1016 
1017 	/*
1018 	 * Start the receive process in motion.  Note, we don't
1019 	 * start the transmit process until we actually try to
1020 	 * transmit packets.
1021 	 */
1022 	CSR_WRITE(sc, SONIC_CR, CR_RXEN | CR_RRRA);
1023 
1024 	/*
1025 	 * ...all done!
1026 	 */
1027 	ifp->if_flags |= IFF_RUNNING;
1028 
1029  out:
1030 	if (error)
1031 		printf("%s: interface not running\n", device_xname(sc->sc_dev));
1032 	return error;
1033 }
1034 
1035 /*
1036  * sonic_rxdrain:
1037  *
1038  *	Drain the receive queue.
1039  */
1040 void
1041 sonic_rxdrain(struct sonic_softc *sc)
1042 {
1043 	struct sonic_descsoft *ds;
1044 	int i;
1045 
1046 	for (i = 0; i < SONIC_NRXDESC; i++) {
1047 		ds = &sc->sc_rxsoft[i];
1048 		if (ds->ds_mbuf != NULL) {
1049 			bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1050 			m_freem(ds->ds_mbuf);
1051 			ds->ds_mbuf = NULL;
1052 		}
1053 	}
1054 }
1055 
1056 /*
1057  * sonic_stop:		[ifnet interface function]
1058  *
1059  *	Stop transmission on the interface.
1060  */
1061 void
1062 sonic_stop(struct ifnet *ifp, int disable)
1063 {
1064 	struct sonic_softc *sc = ifp->if_softc;
1065 	struct sonic_descsoft *ds;
1066 	int i;
1067 
1068 	/*
1069 	 * Disable interrupts.
1070 	 */
1071 	CSR_WRITE(sc, SONIC_IMR, 0);
1072 
1073 	/*
1074 	 * Stop the transmitter, receiver, and timer.
1075 	 */
1076 	CSR_WRITE(sc, SONIC_CR, CR_HTX | CR_RXDIS | CR_STP);
1077 	for (i = 0; i < 1000; i++) {
1078 		if ((CSR_READ(sc, SONIC_CR) & (CR_TXP | CR_RXEN | CR_ST)) == 0)
1079 			break;
1080 		delay(2);
1081 	}
1082 	if ((CSR_READ(sc, SONIC_CR) & (CR_TXP | CR_RXEN | CR_ST)) != 0)
1083 		printf("%s: SONIC failed to stop\n", device_xname(sc->sc_dev));
1084 
1085 	/*
1086 	 * Release any queued transmit buffers.
1087 	 */
1088 	for (i = 0; i < SONIC_NTXDESC; i++) {
1089 		ds = &sc->sc_txsoft[i];
1090 		if (ds->ds_mbuf != NULL) {
1091 			bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1092 			m_freem(ds->ds_mbuf);
1093 			ds->ds_mbuf = NULL;
1094 		}
1095 	}
1096 
1097 	/*
1098 	 * Mark the interface down and cancel the watchdog timer.
1099 	 */
1100 	ifp->if_flags &= ~IFF_RUNNING;
1101 	ifp->if_timer = 0;
1102 
1103 	if (disable)
1104 		sonic_rxdrain(sc);
1105 }
1106 
1107 /*
1108  * sonic_add_rxbuf:
1109  *
1110  *	Add a receive buffer to the indicated descriptor.
1111  */
1112 int
1113 sonic_add_rxbuf(struct sonic_softc *sc, int idx)
1114 {
1115 	struct sonic_descsoft *ds = &sc->sc_rxsoft[idx];
1116 	struct mbuf *m;
1117 	int error;
1118 
1119 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1120 	if (m == NULL)
1121 		return ENOBUFS;
1122 
1123 	MCLGET(m, M_DONTWAIT);
1124 	if ((m->m_flags & M_EXT) == 0) {
1125 		m_freem(m);
1126 		return ENOBUFS;
1127 	}
1128 
1129 	if (ds->ds_mbuf != NULL)
1130 		bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1131 
1132 	ds->ds_mbuf = m;
1133 
1134 	error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1135 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1136 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
1137 	if (error) {
1138 		printf("%s: can't load rx DMA map %d, error = %d\n",
1139 		    device_xname(sc->sc_dev), idx, error);
1140 		panic("sonic_add_rxbuf");	/* XXX */
1141 	}
1142 
1143 	bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1144 	    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1145 
1146 	SONIC_INIT_RXDESC(sc, idx);
1147 
1148 	return 0;
1149 }
1150 
1151 static void
1152 sonic_set_camentry(struct sonic_softc *sc, int entry, const uint8_t *enaddr)
1153 {
1154 
1155 	if (sc->sc_32bit) {
1156 		struct sonic_cda32 *cda = &sc->sc_cda32[entry];
1157 
1158 		cda->cda_entry = htosonic32(sc, entry);
1159 		cda->cda_addr0 = htosonic32(sc, enaddr[0] | (enaddr[1] << 8));
1160 		cda->cda_addr1 = htosonic32(sc, enaddr[2] | (enaddr[3] << 8));
1161 		cda->cda_addr2 = htosonic32(sc, enaddr[4] | (enaddr[5] << 8));
1162 	} else {
1163 		struct sonic_cda16 *cda = &sc->sc_cda16[entry];
1164 
1165 		cda->cda_entry = htosonic16(sc, entry);
1166 		cda->cda_addr0 = htosonic16(sc, enaddr[0] | (enaddr[1] << 8));
1167 		cda->cda_addr1 = htosonic16(sc, enaddr[2] | (enaddr[3] << 8));
1168 		cda->cda_addr2 = htosonic16(sc, enaddr[4] | (enaddr[5] << 8));
1169 	}
1170 }
1171 
1172 /*
1173  * sonic_set_filter:
1174  *
1175  *	Set the SONIC receive filter.
1176  */
1177 void
1178 sonic_set_filter(struct sonic_softc *sc)
1179 {
1180 	struct ethercom *ec = &sc->sc_ethercom;
1181 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1182 	struct ether_multi *enm;
1183 	struct ether_multistep step;
1184 	int i, entry = 0;
1185 	uint16_t camvalid = 0;
1186 	uint16_t rcr = 0;
1187 
1188 	if (ifp->if_flags & IFF_BROADCAST)
1189 		rcr |= RCR_BRD;
1190 
1191 	if (ifp->if_flags & IFF_PROMISC) {
1192 		rcr |= RCR_PRO;
1193 		goto allmulti;
1194 	}
1195 
1196 	/* Put our station address in the first CAM slot. */
1197 	sonic_set_camentry(sc, entry, CLLADDR(ifp->if_sadl));
1198 	camvalid |= (1U << entry);
1199 	entry++;
1200 
1201 	/* Add the multicast addresses to the CAM. */
1202 	ETHER_LOCK(ec);
1203 	ETHER_FIRST_MULTI(step, ec, enm);
1204 	while (enm != NULL) {
1205 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1206 			/*
1207 			 * We must listen to a range of multicast addresses.
1208 			 * The only way to do this on the SONIC is to enable
1209 			 * reception of all multicast packets.
1210 			 */
1211 			ETHER_UNLOCK(ec);
1212 			goto allmulti;
1213 		}
1214 
1215 		if (entry == SONIC_NCAMENT) {
1216 			/*
1217 			 * Out of CAM slots.  Have to enable reception
1218 			 * of all multicast addresses.
1219 			 */
1220 			ETHER_UNLOCK(ec);
1221 			goto allmulti;
1222 		}
1223 
1224 		sonic_set_camentry(sc, entry, enm->enm_addrlo);
1225 		camvalid |= (1U << entry);
1226 		entry++;
1227 
1228 		ETHER_NEXT_MULTI(step, enm);
1229 	}
1230 	ETHER_UNLOCK(ec);
1231 
1232 	ifp->if_flags &= ~IFF_ALLMULTI;
1233 	goto setit;
1234 
1235  allmulti:
1236 	/* Use only the first CAM slot (station address). */
1237 	camvalid = 0x0001;
1238 	entry = 1;
1239 	rcr |= RCR_AMC;
1240 
1241  setit:
1242 	/* set mask for the CAM Enable register */
1243 	if (sc->sc_32bit) {
1244 		if (entry == SONIC_NCAMENT)
1245 			sc->sc_cdaenable32 = htosonic32(sc, camvalid);
1246 		else
1247 			sc->sc_cda32[entry].cda_entry =
1248 			    htosonic32(sc, camvalid);
1249 	} else {
1250 		if (entry == SONIC_NCAMENT)
1251 			sc->sc_cdaenable16 = htosonic16(sc, camvalid);
1252 		else
1253 			sc->sc_cda16[entry].cda_entry =
1254 			    htosonic16(sc, camvalid);
1255 	}
1256 
1257 	/* Load the CAM. */
1258 	SONIC_CDCAMSYNC(sc, BUS_DMASYNC_PREWRITE);
1259 	CSR_WRITE(sc, SONIC_CDP, SONIC_CDCAMADDR(sc) & 0xffff);
1260 	CSR_WRITE(sc, SONIC_CDC, entry);
1261 	CSR_WRITE(sc, SONIC_CR, CR_LCAM);
1262 	for (i = 0; i < 10000; i++) {
1263 		if ((CSR_READ(sc, SONIC_CR) & CR_LCAM) == 0)
1264 			break;
1265 		delay(2);
1266 	}
1267 	if (CSR_READ(sc, SONIC_CR) & CR_LCAM)
1268 		printf("%s: CAM load failed\n", device_xname(sc->sc_dev));
1269 	SONIC_CDCAMSYNC(sc, BUS_DMASYNC_POSTWRITE);
1270 
1271 	/* Set the receive control register. */
1272 	CSR_WRITE(sc, SONIC_RCR, rcr);
1273 }
1274