xref: /netbsd-src/sys/dev/ic/dp83932.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: dp83932.c,v 1.47 2021/02/20 09:36:31 rin Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Device driver for the National Semiconductor DP83932
34  * Systems-Oriented Network Interface Controller (SONIC).
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: dp83932.c,v 1.47 2021/02/20 09:36:31 rin Exp $");
39 
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/mbuf.h>
44 #include <sys/malloc.h>
45 #include <sys/kernel.h>
46 #include <sys/socket.h>
47 #include <sys/ioctl.h>
48 #include <sys/errno.h>
49 #include <sys/device.h>
50 
51 #include <sys/rndsource.h>
52 
53 #include <net/if.h>
54 #include <net/if_dl.h>
55 #include <net/if_ether.h>
56 
57 #include <net/bpf.h>
58 
59 #include <sys/bus.h>
60 #include <sys/intr.h>
61 
62 #include <dev/ic/dp83932reg.h>
63 #include <dev/ic/dp83932var.h>
64 
65 static void	sonic_start(struct ifnet *);
66 static void	sonic_watchdog(struct ifnet *);
67 static int	sonic_ioctl(struct ifnet *, u_long, void *);
68 static int	sonic_init(struct ifnet *);
69 static void	sonic_stop(struct ifnet *, int);
70 
71 static bool	sonic_shutdown(device_t, int);
72 
73 static void	sonic_reset(struct sonic_softc *);
74 static void	sonic_rxdrain(struct sonic_softc *);
75 static int	sonic_add_rxbuf(struct sonic_softc *, int);
76 static void	sonic_set_filter(struct sonic_softc *);
77 
78 static uint16_t sonic_txintr(struct sonic_softc *);
79 static void	sonic_rxintr(struct sonic_softc *);
80 
81 int	sonic_copy_small = 0;
82 
83 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
84 
85 /*
86  * sonic_attach:
87  *
88  *	Attach a SONIC interface to the system.
89  */
90 void
91 sonic_attach(struct sonic_softc *sc, const uint8_t *enaddr)
92 {
93 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
94 	int i, rseg, error;
95 	bus_dma_segment_t seg;
96 	size_t cdatasize;
97 	uint8_t *nullbuf;
98 
99 	/*
100 	 * Allocate the control data structures, and create and load the
101 	 * DMA map for it.
102 	 */
103 	if (sc->sc_32bit)
104 		cdatasize = sizeof(struct sonic_control_data32);
105 	else
106 		cdatasize = sizeof(struct sonic_control_data16);
107 
108 	if ((error = bus_dmamem_alloc(sc->sc_dmat, cdatasize + ETHER_PAD_LEN,
109 	     PAGE_SIZE, (64 * 1024), &seg, 1, &rseg,
110 	     BUS_DMA_NOWAIT)) != 0) {
111 		aprint_error_dev(sc->sc_dev,
112 		    "unable to allocate control data, error = %d\n", error);
113 		goto fail_0;
114 	}
115 
116 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
117 	    cdatasize + ETHER_PAD_LEN, (void **) &sc->sc_cdata16,
118 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
119 		aprint_error_dev(sc->sc_dev,
120 		    "unable to map control data, error = %d\n", error);
121 		goto fail_1;
122 	}
123 	nullbuf = (uint8_t *)sc->sc_cdata16 + cdatasize;
124 	memset(nullbuf, 0, ETHER_PAD_LEN);
125 
126 	if ((error = bus_dmamap_create(sc->sc_dmat,
127 	     cdatasize, 1, cdatasize, 0, BUS_DMA_NOWAIT,
128 	     &sc->sc_cddmamap)) != 0) {
129 		aprint_error_dev(sc->sc_dev,
130 		    "unable to create control data DMA map, error = %d\n",
131 		    error);
132 		goto fail_2;
133 	}
134 
135 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
136 	     sc->sc_cdata16, cdatasize, NULL, BUS_DMA_NOWAIT)) != 0) {
137 		aprint_error_dev(sc->sc_dev,
138 		    "unable to load control data DMA map, error = %d\n", error);
139 		goto fail_3;
140 	}
141 
142 	/*
143 	 * Create the transmit buffer DMA maps.
144 	 */
145 	for (i = 0; i < SONIC_NTXDESC; i++) {
146 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
147 		     SONIC_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
148 		     &sc->sc_txsoft[i].ds_dmamap)) != 0) {
149 			aprint_error_dev(sc->sc_dev,
150 			    "unable to create tx DMA map %d, error = %d\n",
151 			    i, error);
152 			goto fail_4;
153 		}
154 	}
155 
156 	/*
157 	 * Create the receive buffer DMA maps.
158 	 */
159 	for (i = 0; i < SONIC_NRXDESC; i++) {
160 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
161 		     MCLBYTES, 0, BUS_DMA_NOWAIT,
162 		     &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
163 			aprint_error_dev(sc->sc_dev,
164 			    "unable to create rx DMA map %d, error = %d\n",
165 			    i, error);
166 			goto fail_5;
167 		}
168 		sc->sc_rxsoft[i].ds_mbuf = NULL;
169 	}
170 
171 	/*
172 	 * create and map the pad buffer
173 	 */
174 	if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
175 	    ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT, &sc->sc_nulldmamap)) != 0) {
176 		aprint_error_dev(sc->sc_dev,
177 		    "unable to create pad buffer DMA map, error = %d\n", error);
178 		goto fail_5;
179 	}
180 
181 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
182 	    nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
183 		aprint_error_dev(sc->sc_dev,
184 		    "unable to load pad buffer DMA map, error = %d\n", error);
185 		goto fail_6;
186 	}
187 	bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
188 	    BUS_DMASYNC_PREWRITE);
189 
190 	/*
191 	 * Reset the chip to a known state.
192 	 */
193 	sonic_reset(sc);
194 
195 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
196 	    ether_sprintf(enaddr));
197 
198 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
199 	ifp->if_softc = sc;
200 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
201 	ifp->if_ioctl = sonic_ioctl;
202 	ifp->if_start = sonic_start;
203 	ifp->if_watchdog = sonic_watchdog;
204 	ifp->if_init = sonic_init;
205 	ifp->if_stop = sonic_stop;
206 	IFQ_SET_READY(&ifp->if_snd);
207 
208 	/*
209 	 * We can support 802.1Q VLAN-sized frames.
210 	 */
211 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
212 
213 	/*
214 	 * Attach the interface.
215 	 */
216 	if_attach(ifp);
217 	if_deferred_start_init(ifp, NULL);
218 	ether_ifattach(ifp, enaddr);
219 
220 	rnd_attach_source(&sc->sc_rndsource, ifp->if_xname, RND_TYPE_NET,
221 	    RND_FLAG_DEFAULT);
222 
223 	/*
224 	 * Make sure the interface is shutdown during reboot.
225 	 */
226 	if (pmf_device_register1(sc->sc_dev, NULL, NULL, sonic_shutdown))
227 		pmf_class_network_register(sc->sc_dev, ifp);
228 	else
229 		aprint_error_dev(sc->sc_dev,
230 		    "couldn't establish power handler\n");
231 
232 	return;
233 
234 	/*
235 	 * Free any resources we've allocated during the failed attach
236 	 * attempt.  Do this in reverse order and fall through.
237 	 */
238  fail_6:
239 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
240  fail_5:
241 	for (i = 0; i < SONIC_NRXDESC; i++) {
242 		if (sc->sc_rxsoft[i].ds_dmamap != NULL)
243 			bus_dmamap_destroy(sc->sc_dmat,
244 			    sc->sc_rxsoft[i].ds_dmamap);
245 	}
246  fail_4:
247 	for (i = 0; i < SONIC_NTXDESC; i++) {
248 		if (sc->sc_txsoft[i].ds_dmamap != NULL)
249 			bus_dmamap_destroy(sc->sc_dmat,
250 			    sc->sc_txsoft[i].ds_dmamap);
251 	}
252 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
253  fail_3:
254 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
255  fail_2:
256 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_cdata16, cdatasize);
257  fail_1:
258 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
259  fail_0:
260 	return;
261 }
262 
263 /*
264  * sonic_shutdown:
265  *
266  *	Make sure the interface is stopped at reboot.
267  */
268 bool
269 sonic_shutdown(device_t self, int howto)
270 {
271 	struct sonic_softc *sc = device_private(self);
272 
273 	sonic_stop(&sc->sc_ethercom.ec_if, 1);
274 
275 	return true;
276 }
277 
278 /*
279  * sonic_start:		[ifnet interface function]
280  *
281  *	Start packet transmission on the interface.
282  */
283 void
284 sonic_start(struct ifnet *ifp)
285 {
286 	struct sonic_softc *sc = ifp->if_softc;
287 	struct mbuf *m0, *m;
288 	struct sonic_tda16 *tda16;
289 	struct sonic_tda32 *tda32;
290 	struct sonic_descsoft *ds;
291 	bus_dmamap_t dmamap;
292 	int error, olasttx, nexttx, opending, totlen, olseg;
293 	int seg = 0;	/* XXX: gcc */
294 
295 	if ((ifp->if_flags & IFF_RUNNING) != IFF_RUNNING)
296 		return;
297 
298 	/*
299 	 * Remember the previous txpending and the current "last txdesc
300 	 * used" index.
301 	 */
302 	opending = sc->sc_txpending;
303 	olasttx = sc->sc_txlast;
304 
305 	/*
306 	 * Loop through the send queue, setting up transmit descriptors
307 	 * until we drain the queue, or use up all available transmit
308 	 * descriptors.  Leave one at the end for sanity's sake.
309 	 */
310 	while (sc->sc_txpending < (SONIC_NTXDESC - 1)) {
311 		/*
312 		 * Grab a packet off the queue.
313 		 */
314 		IFQ_POLL(&ifp->if_snd, m0);
315 		if (m0 == NULL)
316 			break;
317 		m = NULL;
318 
319 		/*
320 		 * Get the next available transmit descriptor.
321 		 */
322 		nexttx = SONIC_NEXTTX(sc->sc_txlast);
323 		ds = &sc->sc_txsoft[nexttx];
324 		dmamap = ds->ds_dmamap;
325 
326 		/*
327 		 * Load the DMA map.  If this fails, the packet either
328 		 * didn't fit in the allotted number of frags, or we were
329 		 * short on resources.  In this case, we'll copy and try
330 		 * again.
331 		 */
332 		if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
333 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT)) != 0 ||
334 		    (m0->m_pkthdr.len < ETHER_PAD_LEN &&
335 		    dmamap->dm_nsegs == SONIC_NTXFRAGS)) {
336 			if (error == 0)
337 				bus_dmamap_unload(sc->sc_dmat, dmamap);
338 			MGETHDR(m, M_DONTWAIT, MT_DATA);
339 			if (m == NULL) {
340 				printf("%s: unable to allocate Tx mbuf\n",
341 				    device_xname(sc->sc_dev));
342 				break;
343 			}
344 			if (m0->m_pkthdr.len > MHLEN) {
345 				MCLGET(m, M_DONTWAIT);
346 				if ((m->m_flags & M_EXT) == 0) {
347 					printf("%s: unable to allocate Tx "
348 					    "cluster\n",
349 					    device_xname(sc->sc_dev));
350 					m_freem(m);
351 					break;
352 				}
353 			}
354 			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
355 			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
356 			error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
357 			    m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
358 			if (error) {
359 				printf("%s: unable to load Tx buffer, "
360 				    "error = %d\n", device_xname(sc->sc_dev),
361 				    error);
362 				m_freem(m);
363 				break;
364 			}
365 		}
366 		IFQ_DEQUEUE(&ifp->if_snd, m0);
367 		if (m != NULL) {
368 			m_freem(m0);
369 			m0 = m;
370 		}
371 
372 		/*
373 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
374 		 */
375 
376 		/* Sync the DMA map. */
377 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
378 		    BUS_DMASYNC_PREWRITE);
379 
380 		/*
381 		 * Store a pointer to the packet so we can free it later.
382 		 */
383 		ds->ds_mbuf = m0;
384 
385 		/*
386 		 * Initialize the transmit descriptor.
387 		 */
388 		totlen = 0;
389 		if (sc->sc_32bit) {
390 			tda32 = &sc->sc_tda32[nexttx];
391 			for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
392 				tda32->tda_frags[seg].frag_ptr1 =
393 				    htosonic32(sc,
394 				    (dmamap->dm_segs[seg].ds_addr >> 16) &
395 				    0xffff);
396 				tda32->tda_frags[seg].frag_ptr0 =
397 				    htosonic32(sc,
398 				    dmamap->dm_segs[seg].ds_addr & 0xffff);
399 				tda32->tda_frags[seg].frag_size =
400 				    htosonic32(sc, dmamap->dm_segs[seg].ds_len);
401 				totlen += dmamap->dm_segs[seg].ds_len;
402 			}
403 			if (totlen < ETHER_PAD_LEN) {
404 				tda32->tda_frags[seg].frag_ptr1 =
405 				    htosonic32(sc,
406 				    (sc->sc_nulldma >> 16) & 0xffff);
407 				tda32->tda_frags[seg].frag_ptr0 =
408 				    htosonic32(sc, sc->sc_nulldma & 0xffff);
409 				tda32->tda_frags[seg].frag_size =
410 				    htosonic32(sc, ETHER_PAD_LEN - totlen);
411 				totlen = ETHER_PAD_LEN;
412 				seg++;
413 			}
414 
415 			tda32->tda_status = 0;
416 			tda32->tda_pktconfig = 0;
417 			tda32->tda_pktsize = htosonic32(sc, totlen);
418 			tda32->tda_fragcnt = htosonic32(sc, seg);
419 
420 			/* Link it up. */
421 			tda32->tda_frags[seg].frag_ptr0 =
422 			    htosonic32(sc, SONIC_CDTXADDR32(sc,
423 			    SONIC_NEXTTX(nexttx)) & 0xffff);
424 
425 			/* Sync the Tx descriptor. */
426 			SONIC_CDTXSYNC32(sc, nexttx,
427 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
428 		} else {
429 			tda16 = &sc->sc_tda16[nexttx];
430 			for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
431 				tda16->tda_frags[seg].frag_ptr1 =
432 				    htosonic16(sc,
433 				    (dmamap->dm_segs[seg].ds_addr >> 16) &
434 				    0xffff);
435 				tda16->tda_frags[seg].frag_ptr0 =
436 				    htosonic16(sc,
437 				    dmamap->dm_segs[seg].ds_addr & 0xffff);
438 				tda16->tda_frags[seg].frag_size =
439 				    htosonic16(sc, dmamap->dm_segs[seg].ds_len);
440 				totlen += dmamap->dm_segs[seg].ds_len;
441 			}
442 			if (totlen < ETHER_PAD_LEN) {
443 				tda16->tda_frags[seg].frag_ptr1 =
444 				    htosonic16(sc,
445 				    (sc->sc_nulldma >> 16) & 0xffff);
446 				tda16->tda_frags[seg].frag_ptr0 =
447 				    htosonic16(sc, sc->sc_nulldma & 0xffff);
448 				tda16->tda_frags[seg].frag_size =
449 				    htosonic16(sc, ETHER_PAD_LEN - totlen);
450 				totlen = ETHER_PAD_LEN;
451 				seg++;
452 			}
453 
454 			tda16->tda_status = 0;
455 			tda16->tda_pktconfig = 0;
456 			tda16->tda_pktsize = htosonic16(sc, totlen);
457 			tda16->tda_fragcnt = htosonic16(sc, seg);
458 
459 			/* Link it up. */
460 			tda16->tda_frags[seg].frag_ptr0 =
461 			    htosonic16(sc, SONIC_CDTXADDR16(sc,
462 			    SONIC_NEXTTX(nexttx)) & 0xffff);
463 
464 			/* Sync the Tx descriptor. */
465 			SONIC_CDTXSYNC16(sc, nexttx,
466 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
467 		}
468 
469 		/* Advance the Tx pointer. */
470 		sc->sc_txpending++;
471 		sc->sc_txlast = nexttx;
472 
473 		/*
474 		 * Pass the packet to any BPF listeners.
475 		 */
476 		bpf_mtap(ifp, m0, BPF_D_OUT);
477 	}
478 
479 	if (sc->sc_txpending != opending) {
480 		/*
481 		 * We enqueued packets.  If the transmitter was idle,
482 		 * reset the txdirty pointer.
483 		 */
484 		if (opending == 0)
485 			sc->sc_txdirty = SONIC_NEXTTX(olasttx);
486 
487 		/*
488 		 * Stop the SONIC on the last packet we've set up,
489 		 * and clear end-of-list on the descriptor previous
490 		 * to our new chain.
491 		 *
492 		 * NOTE: our `seg' variable should still be valid!
493 		 */
494 		if (sc->sc_32bit) {
495 			olseg =
496 			    sonic32toh(sc, sc->sc_tda32[olasttx].tda_fragcnt);
497 			sc->sc_tda32[sc->sc_txlast].tda_frags[seg].frag_ptr0 |=
498 			    htosonic32(sc, TDA_LINK_EOL);
499 			SONIC_CDTXSYNC32(sc, sc->sc_txlast,
500 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
501 			sc->sc_tda32[olasttx].tda_frags[olseg].frag_ptr0 &=
502 			    htosonic32(sc, ~TDA_LINK_EOL);
503 			SONIC_CDTXSYNC32(sc, olasttx,
504 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
505 		} else {
506 			olseg =
507 			    sonic16toh(sc, sc->sc_tda16[olasttx].tda_fragcnt);
508 			sc->sc_tda16[sc->sc_txlast].tda_frags[seg].frag_ptr0 |=
509 			    htosonic16(sc, TDA_LINK_EOL);
510 			SONIC_CDTXSYNC16(sc, sc->sc_txlast,
511 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
512 			sc->sc_tda16[olasttx].tda_frags[olseg].frag_ptr0 &=
513 			    htosonic16(sc, ~TDA_LINK_EOL);
514 			SONIC_CDTXSYNC16(sc, olasttx,
515 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
516 		}
517 
518 		/* Start the transmitter. */
519 		CSR_WRITE(sc, SONIC_CR, CR_TXP);
520 
521 		/* Set a watchdog timer in case the chip flakes out. */
522 		ifp->if_timer = 5;
523 	}
524 }
525 
526 /*
527  * sonic_watchdog:	[ifnet interface function]
528  *
529  *	Watchdog timer handler.
530  */
531 void
532 sonic_watchdog(struct ifnet *ifp)
533 {
534 	struct sonic_softc *sc = ifp->if_softc;
535 
536 	printf("%s: device timeout\n", device_xname(sc->sc_dev));
537 	if_statinc(ifp, if_oerrors);
538 
539 	(void)sonic_init(ifp);
540 }
541 
542 /*
543  * sonic_ioctl:		[ifnet interface function]
544  *
545  *	Handle control requests from the operator.
546  */
547 int
548 sonic_ioctl(struct ifnet *ifp, u_long cmd, void *data)
549 {
550 	int s, error;
551 
552 	s = splnet();
553 
554 	error = ether_ioctl(ifp, cmd, data);
555 	if (error == ENETRESET) {
556 		/*
557 		 * Multicast list has changed; set the hardware
558 		 * filter accordingly.
559 		 */
560 		if (ifp->if_flags & IFF_RUNNING)
561 			(void)sonic_init(ifp);
562 		error = 0;
563 	}
564 
565 	splx(s);
566 	return error;
567 }
568 
569 /*
570  * sonic_intr:
571  *
572  *	Interrupt service routine.
573  */
574 int
575 sonic_intr(void *arg)
576 {
577 	struct sonic_softc *sc = arg;
578 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
579 	uint16_t isr;
580 	int handled = 0, wantinit;
581 
582 	for (wantinit = 0; wantinit == 0;) {
583 		isr = CSR_READ(sc, SONIC_ISR) & sc->sc_imr;
584 		if (isr == 0)
585 			break;
586 		CSR_WRITE(sc, SONIC_ISR, isr);	/* ACK */
587 
588 		handled = 1;
589 
590 		if (isr & IMR_PRX)
591 			sonic_rxintr(sc);
592 
593 		if (isr & (IMR_PTX | IMR_TXER)) {
594 			if (sonic_txintr(sc) & TCR_FU) {
595 				printf("%s: transmit FIFO underrun\n",
596 				    device_xname(sc->sc_dev));
597 				wantinit = 1;
598 			}
599 		}
600 
601 		if (isr & (IMR_RFO | IMR_RBA | IMR_RBE | IMR_RDE)) {
602 #define	PRINTERR(bit, str)						\
603 			if (isr & (bit))				\
604 				printf("%s: %s\n",device_xname(sc->sc_dev), str)
605 			PRINTERR(IMR_RFO, "receive FIFO overrun");
606 			PRINTERR(IMR_RBA, "receive buffer exceeded");
607 			PRINTERR(IMR_RBE, "receive buffers exhausted");
608 			PRINTERR(IMR_RDE, "receive descriptors exhausted");
609 			wantinit = 1;
610 		}
611 	}
612 
613 	if (handled) {
614 		if (wantinit)
615 			(void)sonic_init(ifp);
616 		if_schedule_deferred_start(ifp);
617 	}
618 
619 	return handled;
620 }
621 
622 /*
623  * sonic_txintr:
624  *
625  *	Helper; handle transmit complete interrupts.
626  */
627 uint16_t
628 sonic_txintr(struct sonic_softc *sc)
629 {
630 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
631 	struct sonic_descsoft *ds;
632 	struct sonic_tda32 *tda32;
633 	struct sonic_tda16 *tda16;
634 	uint16_t status, totstat = 0;
635 	int i, count;
636 
637 	count = 0;
638 	for (i = sc->sc_txdirty; sc->sc_txpending != 0;
639 	     i = SONIC_NEXTTX(i), sc->sc_txpending--) {
640 		ds = &sc->sc_txsoft[i];
641 
642 		if (sc->sc_32bit) {
643 			SONIC_CDTXSYNC32(sc, i,
644 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
645 			tda32 = &sc->sc_tda32[i];
646 			status = sonic32toh(sc, tda32->tda_status);
647 			SONIC_CDTXSYNC32(sc, i, BUS_DMASYNC_PREREAD);
648 		} else {
649 			SONIC_CDTXSYNC16(sc, i,
650 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
651 			tda16 = &sc->sc_tda16[i];
652 			status = sonic16toh(sc, tda16->tda_status);
653 			SONIC_CDTXSYNC16(sc, i, BUS_DMASYNC_PREREAD);
654 		}
655 
656 		if ((status & ~(TCR_EXDIS |TCR_CRCI |TCR_POWC |TCR_PINT)) == 0)
657 			break;
658 
659 		totstat |= status;
660 
661 		bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
662 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
663 		bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
664 		m_freem(ds->ds_mbuf);
665 		ds->ds_mbuf = NULL;
666 
667 		/*
668 		 * Check for errors and collisions.
669 		 */
670 		net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
671 		if (status & TCR_PTX) {
672 			if_statinc_ref(nsr, if_opackets);
673 			count++;
674 		} else
675 			if_statinc_ref(nsr, if_oerrors);
676 		if (TDA_STATUS_NCOL(status))
677 			if_statadd_ref(nsr, if_collisions,
678 			    TDA_STATUS_NCOL(status));
679 		IF_STAT_PUTREF(ifp);
680 	}
681 
682 	/* Update the dirty transmit buffer pointer. */
683 	sc->sc_txdirty = i;
684 
685 	/*
686 	 * Cancel the watchdog timer if there are no pending
687 	 * transmissions.
688 	 */
689 	if (sc->sc_txpending == 0)
690 		ifp->if_timer = 0;
691 
692 	if (count != 0)
693 		rnd_add_uint32(&sc->sc_rndsource, count);
694 
695 	return totstat;
696 }
697 
698 /*
699  * sonic_rxintr:
700  *
701  *	Helper; handle receive interrupts.
702  */
703 void
704 sonic_rxintr(struct sonic_softc *sc)
705 {
706 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
707 	struct sonic_descsoft *ds;
708 	struct sonic_rda32 *rda32;
709 	struct sonic_rda16 *rda16;
710 	struct mbuf *m;
711 	int i, len, count;
712 	uint16_t status, bytecount /*, ptr0, ptr1, seqno */;
713 
714 	count = 0;
715 	for (i = sc->sc_rxptr;; i = SONIC_NEXTRX(i)) {
716 		ds = &sc->sc_rxsoft[i];
717 
718 		if (sc->sc_32bit) {
719 			SONIC_CDRXSYNC32(sc, i,
720 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
721 			rda32 = &sc->sc_rda32[i];
722 			SONIC_CDRXSYNC32(sc, i, BUS_DMASYNC_PREREAD);
723 			if (rda32->rda_inuse != 0)
724 				break;
725 			status = sonic32toh(sc, rda32->rda_status);
726 			bytecount = sonic32toh(sc, rda32->rda_bytecount);
727 			/* ptr0 = sonic32toh(sc, rda32->rda_pkt_ptr0); */
728 			/* ptr1 = sonic32toh(sc, rda32->rda_pkt_ptr1); */
729 			/* seqno = sonic32toh(sc, rda32->rda_seqno); */
730 		} else {
731 			SONIC_CDRXSYNC16(sc, i,
732 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
733 			rda16 = &sc->sc_rda16[i];
734 			SONIC_CDRXSYNC16(sc, i, BUS_DMASYNC_PREREAD);
735 			if (rda16->rda_inuse != 0)
736 				break;
737 			status = sonic16toh(sc, rda16->rda_status);
738 			bytecount = sonic16toh(sc, rda16->rda_bytecount);
739 			/* ptr0 = sonic16toh(sc, rda16->rda_pkt_ptr0); */
740 			/* ptr1 = sonic16toh(sc, rda16->rda_pkt_ptr1); */
741 			/* seqno = sonic16toh(sc, rda16->rda_seqno); */
742 		}
743 
744 		/*
745 		 * Make absolutely sure this is the only packet
746 		 * in this receive buffer.  Our entire Rx buffer
747 		 * management scheme depends on this, and if the
748 		 * SONIC didn't follow our rule, it means we've
749 		 * misconfigured it.
750 		 */
751 		KASSERT(status & RCR_LPKT);
752 
753 		/*
754 		 * Make sure the packet arrived OK.  If an error occurred,
755 		 * update stats and reset the descriptor.  The buffer will
756 		 * be reused the next time the descriptor comes up in the
757 		 * ring.
758 		 */
759 		if ((status & RCR_PRX) == 0) {
760 			if (status & RCR_FAER)
761 				printf("%s: Rx frame alignment error\n",
762 				    device_xname(sc->sc_dev));
763 			else if (status & RCR_CRCR)
764 				printf("%s: Rx CRC error\n",
765 				    device_xname(sc->sc_dev));
766 			if_statinc(ifp, if_ierrors);
767 			SONIC_INIT_RXDESC(sc, i);
768 			continue;
769 		}
770 
771 		bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
772 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
773 
774 		/*
775 		 * The SONIC includes the CRC with every packet.
776 		 */
777 		len = bytecount - ETHER_CRC_LEN;
778 
779 		/*
780 		 * Ok, if the chip is in 32-bit mode, then receive
781 		 * buffers must be aligned to 32-bit boundaries,
782 		 * which means the payload is misaligned.  In this
783 		 * case, we must allocate a new mbuf, and copy the
784 		 * packet into it, scooted forward 2 bytes to ensure
785 		 * proper alignment.
786 		 *
787 		 * Note, in 16-bit mode, we can configure the SONIC
788 		 * to do what we want, and we have.
789 		 */
790 #ifndef __NO_STRICT_ALIGNMENT
791 		if (sc->sc_32bit) {
792 			MGETHDR(m, M_DONTWAIT, MT_DATA);
793 			if (m == NULL)
794 				goto dropit;
795 			if (len > (MHLEN - 2)) {
796 				MCLGET(m, M_DONTWAIT);
797 				if ((m->m_flags & M_EXT) == 0) {
798 					m_freem(m);
799 					goto dropit;
800 				}
801 			}
802 			m->m_data += 2;
803 			/*
804 			 * Note that we use a cluster for incoming frames,
805 			 * so the buffer is virtually contiguous.
806 			 */
807 			memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *),
808 			    len);
809 			SONIC_INIT_RXDESC(sc, i);
810 			bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
811 			    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
812 		} else
813 #endif /* ! __NO_STRICT_ALIGNMENT */
814 		/*
815 		 * If the packet is small enough to fit in a single
816 		 * header mbuf, allocate one and copy the data into
817 		 * it.  This greatly reduces memory consumption when
818 		 * we receive lots of small packets.
819 		 */
820 		if (sonic_copy_small != 0 && len <= (MHLEN - 2)) {
821 			MGETHDR(m, M_DONTWAIT, MT_DATA);
822 			if (m == NULL)
823 				goto dropit;
824 			m->m_data += 2;
825 			/*
826 			 * Note that we use a cluster for incoming frames,
827 			 * so the buffer is virtually contiguous.
828 			 */
829 			memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *),
830 			    len);
831 			SONIC_INIT_RXDESC(sc, i);
832 			bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
833 			    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
834 		} else {
835 			m = ds->ds_mbuf;
836 			if (sonic_add_rxbuf(sc, i) != 0) {
837  dropit:
838 				if_statinc(ifp, if_ierrors);
839 				SONIC_INIT_RXDESC(sc, i);
840 				bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
841 				    ds->ds_dmamap->dm_mapsize,
842 				    BUS_DMASYNC_PREREAD);
843 				continue;
844 			}
845 		}
846 
847 		m_set_rcvif(m, ifp);
848 		m->m_pkthdr.len = m->m_len = len;
849 
850 		/* Pass it on. */
851 		if_percpuq_enqueue(ifp->if_percpuq, m);
852 
853 		count++;
854 	}
855 
856 	/* Update the receive pointer. */
857 	sc->sc_rxptr = i;
858 	CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_PREVRX(i)));
859 
860 	if (count != 0)
861 		rnd_add_uint32(&sc->sc_rndsource, count);
862 }
863 
864 /*
865  * sonic_reset:
866  *
867  *	Perform a soft reset on the SONIC.
868  */
869 void
870 sonic_reset(struct sonic_softc *sc)
871 {
872 
873 	/* stop TX, RX and timer, and ensure RST is clear */
874 	CSR_WRITE(sc, SONIC_CR, CR_STP | CR_RXDIS | CR_HTX);
875 	delay(1000);
876 
877 	CSR_WRITE(sc, SONIC_CR, CR_RST);
878 	delay(1000);
879 
880 	/* clear all interrupts */
881 	CSR_WRITE(sc, SONIC_IMR, 0);
882 	CSR_WRITE(sc, SONIC_ISR, IMR_ALL);
883 
884 	CSR_WRITE(sc, SONIC_CR, 0);
885 	delay(1000);
886 }
887 
888 /*
889  * sonic_init:		[ifnet interface function]
890  *
891  *	Initialize the interface.  Must be called at splnet().
892  */
893 int
894 sonic_init(struct ifnet *ifp)
895 {
896 	struct sonic_softc *sc = ifp->if_softc;
897 	struct sonic_descsoft *ds;
898 	int i, error = 0;
899 	uint16_t reg;
900 
901 	/*
902 	 * Cancel any pending I/O.
903 	 */
904 	sonic_stop(ifp, 0);
905 
906 	/*
907 	 * Reset the SONIC to a known state.
908 	 */
909 	sonic_reset(sc);
910 
911 	/*
912 	 * Bring the SONIC into reset state, and program the DCR.
913 	 *
914 	 * Note: We don't bother optimizing the transmit and receive
915 	 * thresholds, here. TFT/RFT values should be set in MD attachments.
916 	 */
917 	reg = sc->sc_dcr;
918 	if (sc->sc_32bit)
919 		reg |= DCR_DW;
920 	CSR_WRITE(sc, SONIC_CR, CR_RST);
921 	CSR_WRITE(sc, SONIC_DCR, reg);
922 	CSR_WRITE(sc, SONIC_DCR2, sc->sc_dcr2);
923 	CSR_WRITE(sc, SONIC_CR, 0);
924 
925 	/*
926 	 * Initialize the transmit descriptors.
927 	 */
928 	if (sc->sc_32bit) {
929 		for (i = 0; i < SONIC_NTXDESC; i++) {
930 			memset(&sc->sc_tda32[i], 0, sizeof(struct sonic_tda32));
931 			SONIC_CDTXSYNC32(sc, i,
932 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
933 		}
934 	} else {
935 		for (i = 0; i < SONIC_NTXDESC; i++) {
936 			memset(&sc->sc_tda16[i], 0, sizeof(struct sonic_tda16));
937 			SONIC_CDTXSYNC16(sc, i,
938 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
939 		}
940 	}
941 	sc->sc_txpending = 0;
942 	sc->sc_txdirty = 0;
943 	sc->sc_txlast = SONIC_NTXDESC - 1;
944 
945 	/*
946 	 * Initialize the receive descriptor ring.
947 	 */
948 	for (i = 0; i < SONIC_NRXDESC; i++) {
949 		ds = &sc->sc_rxsoft[i];
950 		if (ds->ds_mbuf == NULL) {
951 			if ((error = sonic_add_rxbuf(sc, i)) != 0) {
952 				printf("%s: unable to allocate or map Rx "
953 				    "buffer %d, error = %d\n",
954 				    device_xname(sc->sc_dev), i, error);
955 				/*
956 				 * XXX Should attempt to run with fewer receive
957 				 * XXX buffers instead of just failing.
958 				 */
959 				sonic_rxdrain(sc);
960 				goto out;
961 			}
962 		} else
963 			SONIC_INIT_RXDESC(sc, i);
964 	}
965 	sc->sc_rxptr = 0;
966 
967 	/* Give the transmit ring to the SONIC. */
968 	CSR_WRITE(sc, SONIC_UTDAR, (SONIC_CDTXADDR(sc, 0) >> 16) & 0xffff);
969 	CSR_WRITE(sc, SONIC_CTDAR, SONIC_CDTXADDR(sc, 0) & 0xffff);
970 
971 	/* Give the receive descriptor ring to the SONIC. */
972 	CSR_WRITE(sc, SONIC_URDAR, (SONIC_CDRXADDR(sc, 0) >> 16) & 0xffff);
973 	CSR_WRITE(sc, SONIC_CRDAR, SONIC_CDRXADDR(sc, 0) & 0xffff);
974 
975 	/* Give the receive buffer ring to the SONIC. */
976 	CSR_WRITE(sc, SONIC_URRAR, (SONIC_CDRRADDR(sc, 0) >> 16) & 0xffff);
977 	CSR_WRITE(sc, SONIC_RSAR, SONIC_CDRRADDR(sc, 0) & 0xffff);
978 	if (sc->sc_32bit)
979 		CSR_WRITE(sc, SONIC_REAR,
980 		    (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) +
981 		    sizeof(struct sonic_rra32)) & 0xffff);
982 	else
983 		CSR_WRITE(sc, SONIC_REAR,
984 		    (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) +
985 		    sizeof(struct sonic_rra16)) & 0xffff);
986 	CSR_WRITE(sc, SONIC_RRR, SONIC_CDRRADDR(sc, 0) & 0xffff);
987 	CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1));
988 
989 	/*
990 	 * Set the End-Of-Buffer counter such that only one packet
991 	 * will be placed into each buffer we provide.  Note we are
992 	 * following the recommendation of section 3.4.4 of the manual
993 	 * here, and have "lengthened" the receive buffers accordingly.
994 	 */
995 	if (sc->sc_32bit)
996 		CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN + 2) / 2);
997 	else
998 		CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN / 2));
999 
1000 	/* Reset the receive sequence counter. */
1001 	CSR_WRITE(sc, SONIC_RSC, 0);
1002 
1003 	/* Clear the tally registers. */
1004 	CSR_WRITE(sc, SONIC_CRCETC, 0xffff);
1005 	CSR_WRITE(sc, SONIC_FAET, 0xffff);
1006 	CSR_WRITE(sc, SONIC_MPT, 0xffff);
1007 
1008 	/* Set the receive filter. */
1009 	sonic_set_filter(sc);
1010 
1011 	/*
1012 	 * Set the interrupt mask register.
1013 	 */
1014 	sc->sc_imr = IMR_RFO | IMR_RBA | IMR_RBE | IMR_RDE |
1015 	    IMR_TXER | IMR_PTX | IMR_PRX;
1016 	CSR_WRITE(sc, SONIC_IMR, sc->sc_imr);
1017 
1018 	/*
1019 	 * Start the receive process in motion.  Note, we don't
1020 	 * start the transmit process until we actually try to
1021 	 * transmit packets.
1022 	 */
1023 	CSR_WRITE(sc, SONIC_CR, CR_RXEN | CR_RRRA);
1024 
1025 	/*
1026 	 * ...all done!
1027 	 */
1028 	ifp->if_flags |= IFF_RUNNING;
1029 
1030  out:
1031 	if (error)
1032 		printf("%s: interface not running\n", device_xname(sc->sc_dev));
1033 	return error;
1034 }
1035 
1036 /*
1037  * sonic_rxdrain:
1038  *
1039  *	Drain the receive queue.
1040  */
1041 void
1042 sonic_rxdrain(struct sonic_softc *sc)
1043 {
1044 	struct sonic_descsoft *ds;
1045 	int i;
1046 
1047 	for (i = 0; i < SONIC_NRXDESC; i++) {
1048 		ds = &sc->sc_rxsoft[i];
1049 		if (ds->ds_mbuf != NULL) {
1050 			bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1051 			m_freem(ds->ds_mbuf);
1052 			ds->ds_mbuf = NULL;
1053 		}
1054 	}
1055 }
1056 
1057 /*
1058  * sonic_stop:		[ifnet interface function]
1059  *
1060  *	Stop transmission on the interface.
1061  */
1062 void
1063 sonic_stop(struct ifnet *ifp, int disable)
1064 {
1065 	struct sonic_softc *sc = ifp->if_softc;
1066 	struct sonic_descsoft *ds;
1067 	int i;
1068 
1069 	/*
1070 	 * Disable interrupts.
1071 	 */
1072 	CSR_WRITE(sc, SONIC_IMR, 0);
1073 
1074 	/*
1075 	 * Stop the transmitter, receiver, and timer.
1076 	 */
1077 	CSR_WRITE(sc, SONIC_CR, CR_HTX | CR_RXDIS | CR_STP);
1078 	for (i = 0; i < 1000; i++) {
1079 		if ((CSR_READ(sc, SONIC_CR) & (CR_TXP | CR_RXEN | CR_ST)) == 0)
1080 			break;
1081 		delay(2);
1082 	}
1083 	if ((CSR_READ(sc, SONIC_CR) & (CR_TXP | CR_RXEN | CR_ST)) != 0)
1084 		printf("%s: SONIC failed to stop\n", device_xname(sc->sc_dev));
1085 
1086 	/*
1087 	 * Release any queued transmit buffers.
1088 	 */
1089 	for (i = 0; i < SONIC_NTXDESC; i++) {
1090 		ds = &sc->sc_txsoft[i];
1091 		if (ds->ds_mbuf != NULL) {
1092 			bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1093 			m_freem(ds->ds_mbuf);
1094 			ds->ds_mbuf = NULL;
1095 		}
1096 	}
1097 
1098 	/*
1099 	 * Mark the interface down and cancel the watchdog timer.
1100 	 */
1101 	ifp->if_flags &= ~IFF_RUNNING;
1102 	ifp->if_timer = 0;
1103 
1104 	if (disable)
1105 		sonic_rxdrain(sc);
1106 }
1107 
1108 /*
1109  * sonic_add_rxbuf:
1110  *
1111  *	Add a receive buffer to the indicated descriptor.
1112  */
1113 int
1114 sonic_add_rxbuf(struct sonic_softc *sc, int idx)
1115 {
1116 	struct sonic_descsoft *ds = &sc->sc_rxsoft[idx];
1117 	struct mbuf *m;
1118 	int error;
1119 
1120 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1121 	if (m == NULL)
1122 		return ENOBUFS;
1123 
1124 	MCLGET(m, M_DONTWAIT);
1125 	if ((m->m_flags & M_EXT) == 0) {
1126 		m_freem(m);
1127 		return ENOBUFS;
1128 	}
1129 
1130 	if (ds->ds_mbuf != NULL)
1131 		bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1132 
1133 	ds->ds_mbuf = m;
1134 
1135 	error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1136 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1137 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
1138 	if (error) {
1139 		printf("%s: can't load rx DMA map %d, error = %d\n",
1140 		    device_xname(sc->sc_dev), idx, error);
1141 		panic("sonic_add_rxbuf");	/* XXX */
1142 	}
1143 
1144 	bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1145 	    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1146 
1147 	SONIC_INIT_RXDESC(sc, idx);
1148 
1149 	return 0;
1150 }
1151 
1152 static void
1153 sonic_set_camentry(struct sonic_softc *sc, int entry, const uint8_t *enaddr)
1154 {
1155 
1156 	if (sc->sc_32bit) {
1157 		struct sonic_cda32 *cda = &sc->sc_cda32[entry];
1158 
1159 		cda->cda_entry = htosonic32(sc, entry);
1160 		cda->cda_addr0 = htosonic32(sc, enaddr[0] | (enaddr[1] << 8));
1161 		cda->cda_addr1 = htosonic32(sc, enaddr[2] | (enaddr[3] << 8));
1162 		cda->cda_addr2 = htosonic32(sc, enaddr[4] | (enaddr[5] << 8));
1163 	} else {
1164 		struct sonic_cda16 *cda = &sc->sc_cda16[entry];
1165 
1166 		cda->cda_entry = htosonic16(sc, entry);
1167 		cda->cda_addr0 = htosonic16(sc, enaddr[0] | (enaddr[1] << 8));
1168 		cda->cda_addr1 = htosonic16(sc, enaddr[2] | (enaddr[3] << 8));
1169 		cda->cda_addr2 = htosonic16(sc, enaddr[4] | (enaddr[5] << 8));
1170 	}
1171 }
1172 
1173 /*
1174  * sonic_set_filter:
1175  *
1176  *	Set the SONIC receive filter.
1177  */
1178 void
1179 sonic_set_filter(struct sonic_softc *sc)
1180 {
1181 	struct ethercom *ec = &sc->sc_ethercom;
1182 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1183 	struct ether_multi *enm;
1184 	struct ether_multistep step;
1185 	int i, entry = 0;
1186 	uint16_t camvalid = 0;
1187 	uint16_t rcr = 0;
1188 
1189 	if (ifp->if_flags & IFF_BROADCAST)
1190 		rcr |= RCR_BRD;
1191 
1192 	if (ifp->if_flags & IFF_PROMISC) {
1193 		rcr |= RCR_PRO;
1194 		goto allmulti;
1195 	}
1196 
1197 	/* Put our station address in the first CAM slot. */
1198 	sonic_set_camentry(sc, entry, CLLADDR(ifp->if_sadl));
1199 	camvalid |= (1U << entry);
1200 	entry++;
1201 
1202 	/* Add the multicast addresses to the CAM. */
1203 	ETHER_LOCK(ec);
1204 	ETHER_FIRST_MULTI(step, ec, enm);
1205 	while (enm != NULL) {
1206 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1207 			/*
1208 			 * We must listen to a range of multicast addresses.
1209 			 * The only way to do this on the SONIC is to enable
1210 			 * reception of all multicast packets.
1211 			 */
1212 			ETHER_UNLOCK(ec);
1213 			goto allmulti;
1214 		}
1215 
1216 		if (entry == SONIC_NCAMENT) {
1217 			/*
1218 			 * Out of CAM slots.  Have to enable reception
1219 			 * of all multicast addresses.
1220 			 */
1221 			ETHER_UNLOCK(ec);
1222 			goto allmulti;
1223 		}
1224 
1225 		sonic_set_camentry(sc, entry, enm->enm_addrlo);
1226 		camvalid |= (1U << entry);
1227 		entry++;
1228 
1229 		ETHER_NEXT_MULTI(step, enm);
1230 	}
1231 	ETHER_UNLOCK(ec);
1232 
1233 	ifp->if_flags &= ~IFF_ALLMULTI;
1234 	goto setit;
1235 
1236  allmulti:
1237 	/* Use only the first CAM slot (station address). */
1238 	camvalid = 0x0001;
1239 	entry = 1;
1240 	rcr |= RCR_AMC;
1241 
1242  setit:
1243 	/* set mask for the CAM Enable register */
1244 	if (sc->sc_32bit) {
1245 		if (entry == SONIC_NCAMENT)
1246 			sc->sc_cdaenable32 = htosonic32(sc, camvalid);
1247 		else
1248 			sc->sc_cda32[entry].cda_entry =
1249 			    htosonic32(sc, camvalid);
1250 	} else {
1251 		if (entry == SONIC_NCAMENT)
1252 			sc->sc_cdaenable16 = htosonic16(sc, camvalid);
1253 		else
1254 			sc->sc_cda16[entry].cda_entry =
1255 			    htosonic16(sc, camvalid);
1256 	}
1257 
1258 	/* Load the CAM. */
1259 	SONIC_CDCAMSYNC(sc, BUS_DMASYNC_PREWRITE);
1260 	CSR_WRITE(sc, SONIC_CDP, SONIC_CDCAMADDR(sc) & 0xffff);
1261 	CSR_WRITE(sc, SONIC_CDC, entry);
1262 	CSR_WRITE(sc, SONIC_CR, CR_LCAM);
1263 	for (i = 0; i < 10000; i++) {
1264 		if ((CSR_READ(sc, SONIC_CR) & CR_LCAM) == 0)
1265 			break;
1266 		delay(2);
1267 	}
1268 	if (CSR_READ(sc, SONIC_CR) & CR_LCAM)
1269 		printf("%s: CAM load failed\n", device_xname(sc->sc_dev));
1270 	SONIC_CDCAMSYNC(sc, BUS_DMASYNC_POSTWRITE);
1271 
1272 	/* Set the receive control register. */
1273 	CSR_WRITE(sc, SONIC_RCR, rcr);
1274 }
1275