xref: /netbsd-src/sys/dev/ic/dp83932.c (revision be6f2fcee7fefd8149c125c7283a8c03adc8149e)
1 /*	$NetBSD: dp83932.c,v 1.50 2024/06/29 12:11:11 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Device driver for the National Semiconductor DP83932
34  * Systems-Oriented Network Interface Controller (SONIC).
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: dp83932.c,v 1.50 2024/06/29 12:11:11 riastradh Exp $");
39 
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/mbuf.h>
44 #include <sys/kernel.h>
45 #include <sys/socket.h>
46 #include <sys/ioctl.h>
47 #include <sys/errno.h>
48 #include <sys/device.h>
49 
50 #include <sys/rndsource.h>
51 
52 #include <net/if.h>
53 #include <net/if_dl.h>
54 #include <net/if_ether.h>
55 
56 #include <net/bpf.h>
57 
58 #include <sys/bus.h>
59 #include <sys/intr.h>
60 
61 #include <dev/ic/dp83932reg.h>
62 #include <dev/ic/dp83932var.h>
63 
64 static void	sonic_start(struct ifnet *);
65 static void	sonic_watchdog(struct ifnet *);
66 static int	sonic_ioctl(struct ifnet *, u_long, void *);
67 static int	sonic_init(struct ifnet *);
68 static void	sonic_stop(struct ifnet *, int);
69 
70 static bool	sonic_shutdown(device_t, int);
71 
72 static void	sonic_reset(struct sonic_softc *);
73 static void	sonic_rxdrain(struct sonic_softc *);
74 static int	sonic_add_rxbuf(struct sonic_softc *, int);
75 static void	sonic_set_filter(struct sonic_softc *);
76 
77 static uint16_t sonic_txintr(struct sonic_softc *);
78 static void	sonic_rxintr(struct sonic_softc *);
79 
80 int	sonic_copy_small = 0;
81 
82 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
83 
84 /*
85  * sonic_attach:
86  *
87  *	Attach a SONIC interface to the system.
88  */
89 void
sonic_attach(struct sonic_softc * sc,const uint8_t * enaddr)90 sonic_attach(struct sonic_softc *sc, const uint8_t *enaddr)
91 {
92 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
93 	int i, rseg, error;
94 	bus_dma_segment_t seg;
95 	size_t cdatasize;
96 	uint8_t *nullbuf;
97 
98 	/*
99 	 * Allocate the control data structures, and create and load the
100 	 * DMA map for it.
101 	 */
102 	if (sc->sc_32bit)
103 		cdatasize = sizeof(struct sonic_control_data32);
104 	else
105 		cdatasize = sizeof(struct sonic_control_data16);
106 
107 	if ((error = bus_dmamem_alloc(sc->sc_dmat, cdatasize + ETHER_PAD_LEN,
108 	     PAGE_SIZE, (64 * 1024), &seg, 1, &rseg,
109 	     BUS_DMA_NOWAIT)) != 0) {
110 		aprint_error_dev(sc->sc_dev,
111 		    "unable to allocate control data, error = %d\n", error);
112 		goto fail_0;
113 	}
114 
115 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
116 	    cdatasize + ETHER_PAD_LEN, (void **) &sc->sc_cdata16,
117 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
118 		aprint_error_dev(sc->sc_dev,
119 		    "unable to map control data, error = %d\n", error);
120 		goto fail_1;
121 	}
122 	nullbuf = (uint8_t *)sc->sc_cdata16 + cdatasize;
123 	memset(nullbuf, 0, ETHER_PAD_LEN);
124 
125 	if ((error = bus_dmamap_create(sc->sc_dmat,
126 	     cdatasize, 1, cdatasize, 0, BUS_DMA_NOWAIT,
127 	     &sc->sc_cddmamap)) != 0) {
128 		aprint_error_dev(sc->sc_dev,
129 		    "unable to create control data DMA map, error = %d\n",
130 		    error);
131 		goto fail_2;
132 	}
133 
134 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
135 	     sc->sc_cdata16, cdatasize, NULL, BUS_DMA_NOWAIT)) != 0) {
136 		aprint_error_dev(sc->sc_dev,
137 		    "unable to load control data DMA map, error = %d\n", error);
138 		goto fail_3;
139 	}
140 
141 	/*
142 	 * Create the transmit buffer DMA maps.
143 	 */
144 	for (i = 0; i < SONIC_NTXDESC; i++) {
145 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
146 		     SONIC_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
147 		     &sc->sc_txsoft[i].ds_dmamap)) != 0) {
148 			aprint_error_dev(sc->sc_dev,
149 			    "unable to create tx DMA map %d, error = %d\n",
150 			    i, error);
151 			goto fail_4;
152 		}
153 	}
154 
155 	/*
156 	 * Create the receive buffer DMA maps.
157 	 */
158 	for (i = 0; i < SONIC_NRXDESC; i++) {
159 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
160 		     MCLBYTES, 0, BUS_DMA_NOWAIT,
161 		     &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
162 			aprint_error_dev(sc->sc_dev,
163 			    "unable to create rx DMA map %d, error = %d\n",
164 			    i, error);
165 			goto fail_5;
166 		}
167 		sc->sc_rxsoft[i].ds_mbuf = NULL;
168 	}
169 
170 	/*
171 	 * create and map the pad buffer
172 	 */
173 	if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
174 	    ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT, &sc->sc_nulldmamap)) != 0) {
175 		aprint_error_dev(sc->sc_dev,
176 		    "unable to create pad buffer DMA map, error = %d\n", error);
177 		goto fail_5;
178 	}
179 
180 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
181 	    nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
182 		aprint_error_dev(sc->sc_dev,
183 		    "unable to load pad buffer DMA map, error = %d\n", error);
184 		goto fail_6;
185 	}
186 	bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
187 	    BUS_DMASYNC_PREWRITE);
188 
189 	/*
190 	 * Reset the chip to a known state.
191 	 */
192 	sonic_reset(sc);
193 
194 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
195 	    ether_sprintf(enaddr));
196 
197 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
198 	ifp->if_softc = sc;
199 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
200 	ifp->if_ioctl = sonic_ioctl;
201 	ifp->if_start = sonic_start;
202 	ifp->if_watchdog = sonic_watchdog;
203 	ifp->if_init = sonic_init;
204 	ifp->if_stop = sonic_stop;
205 	IFQ_SET_READY(&ifp->if_snd);
206 
207 	/*
208 	 * We can support 802.1Q VLAN-sized frames.
209 	 */
210 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
211 
212 	/*
213 	 * Attach the interface.
214 	 */
215 	if_attach(ifp);
216 	if_deferred_start_init(ifp, NULL);
217 	ether_ifattach(ifp, enaddr);
218 
219 	rnd_attach_source(&sc->sc_rndsource, ifp->if_xname, RND_TYPE_NET,
220 	    RND_FLAG_DEFAULT);
221 
222 	/*
223 	 * Make sure the interface is shutdown during reboot.
224 	 */
225 	if (pmf_device_register1(sc->sc_dev, NULL, NULL, sonic_shutdown))
226 		pmf_class_network_register(sc->sc_dev, ifp);
227 	else
228 		aprint_error_dev(sc->sc_dev,
229 		    "couldn't establish power handler\n");
230 
231 	return;
232 
233 	/*
234 	 * Free any resources we've allocated during the failed attach
235 	 * attempt.  Do this in reverse order and fall through.
236 	 */
237  fail_6:
238 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
239  fail_5:
240 	for (i = 0; i < SONIC_NRXDESC; i++) {
241 		if (sc->sc_rxsoft[i].ds_dmamap != NULL)
242 			bus_dmamap_destroy(sc->sc_dmat,
243 			    sc->sc_rxsoft[i].ds_dmamap);
244 	}
245  fail_4:
246 	for (i = 0; i < SONIC_NTXDESC; i++) {
247 		if (sc->sc_txsoft[i].ds_dmamap != NULL)
248 			bus_dmamap_destroy(sc->sc_dmat,
249 			    sc->sc_txsoft[i].ds_dmamap);
250 	}
251 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
252  fail_3:
253 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
254  fail_2:
255 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_cdata16, cdatasize);
256  fail_1:
257 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
258  fail_0:
259 	return;
260 }
261 
262 /*
263  * sonic_shutdown:
264  *
265  *	Make sure the interface is stopped at reboot.
266  */
267 bool
sonic_shutdown(device_t self,int howto)268 sonic_shutdown(device_t self, int howto)
269 {
270 	struct sonic_softc *sc = device_private(self);
271 
272 	sonic_stop(&sc->sc_ethercom.ec_if, 1);
273 
274 	return true;
275 }
276 
277 /*
278  * sonic_start:		[ifnet interface function]
279  *
280  *	Start packet transmission on the interface.
281  */
282 void
sonic_start(struct ifnet * ifp)283 sonic_start(struct ifnet *ifp)
284 {
285 	struct sonic_softc *sc = ifp->if_softc;
286 	struct mbuf *m0, *m;
287 	struct sonic_tda16 *tda16;
288 	struct sonic_tda32 *tda32;
289 	struct sonic_descsoft *ds;
290 	bus_dmamap_t dmamap;
291 	int error, olasttx, nexttx, opending, totlen, olseg;
292 	int seg = 0;	/* XXX: gcc */
293 
294 	if ((ifp->if_flags & IFF_RUNNING) != IFF_RUNNING)
295 		return;
296 
297 	/*
298 	 * Remember the previous txpending and the current "last txdesc
299 	 * used" index.
300 	 */
301 	opending = sc->sc_txpending;
302 	olasttx = sc->sc_txlast;
303 
304 	/*
305 	 * Loop through the send queue, setting up transmit descriptors
306 	 * until we drain the queue, or use up all available transmit
307 	 * descriptors.  Leave one at the end for sanity's sake.
308 	 */
309 	while (sc->sc_txpending < (SONIC_NTXDESC - 1)) {
310 		/*
311 		 * Grab a packet off the queue.
312 		 */
313 		IFQ_POLL(&ifp->if_snd, m0);
314 		if (m0 == NULL)
315 			break;
316 		m = NULL;
317 
318 		/*
319 		 * Get the next available transmit descriptor.
320 		 */
321 		nexttx = SONIC_NEXTTX(sc->sc_txlast);
322 		ds = &sc->sc_txsoft[nexttx];
323 		dmamap = ds->ds_dmamap;
324 
325 		/*
326 		 * Load the DMA map.  If this fails, the packet either
327 		 * didn't fit in the allotted number of frags, or we were
328 		 * short on resources.  In this case, we'll copy and try
329 		 * again.
330 		 */
331 		if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
332 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT)) != 0 ||
333 		    (m0->m_pkthdr.len < ETHER_PAD_LEN &&
334 		    dmamap->dm_nsegs == SONIC_NTXFRAGS)) {
335 			if (error == 0)
336 				bus_dmamap_unload(sc->sc_dmat, dmamap);
337 			MGETHDR(m, M_DONTWAIT, MT_DATA);
338 			if (m == NULL) {
339 				printf("%s: unable to allocate Tx mbuf\n",
340 				    device_xname(sc->sc_dev));
341 				break;
342 			}
343 			if (m0->m_pkthdr.len > MHLEN) {
344 				MCLGET(m, M_DONTWAIT);
345 				if ((m->m_flags & M_EXT) == 0) {
346 					printf("%s: unable to allocate Tx "
347 					    "cluster\n",
348 					    device_xname(sc->sc_dev));
349 					m_freem(m);
350 					break;
351 				}
352 			}
353 			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
354 			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
355 			error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
356 			    m, BUS_DMA_WRITE | BUS_DMA_NOWAIT);
357 			if (error) {
358 				printf("%s: unable to load Tx buffer, "
359 				    "error = %d\n", device_xname(sc->sc_dev),
360 				    error);
361 				m_freem(m);
362 				break;
363 			}
364 		}
365 		IFQ_DEQUEUE(&ifp->if_snd, m0);
366 		if (m != NULL) {
367 			m_freem(m0);
368 			m0 = m;
369 		}
370 
371 		/*
372 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
373 		 */
374 
375 		/* Sync the DMA map. */
376 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
377 		    BUS_DMASYNC_PREWRITE);
378 
379 		/*
380 		 * Store a pointer to the packet so we can free it later.
381 		 */
382 		ds->ds_mbuf = m0;
383 
384 		/*
385 		 * Initialize the transmit descriptor.
386 		 */
387 		totlen = 0;
388 		if (sc->sc_32bit) {
389 			tda32 = &sc->sc_tda32[nexttx];
390 			for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
391 				tda32->tda_frags[seg].frag_ptr1 =
392 				    htosonic32(sc,
393 				    (dmamap->dm_segs[seg].ds_addr >> 16) &
394 				    0xffff);
395 				tda32->tda_frags[seg].frag_ptr0 =
396 				    htosonic32(sc,
397 				    dmamap->dm_segs[seg].ds_addr & 0xffff);
398 				tda32->tda_frags[seg].frag_size =
399 				    htosonic32(sc, dmamap->dm_segs[seg].ds_len);
400 				totlen += dmamap->dm_segs[seg].ds_len;
401 			}
402 			if (totlen < ETHER_PAD_LEN) {
403 				tda32->tda_frags[seg].frag_ptr1 =
404 				    htosonic32(sc,
405 				    (sc->sc_nulldma >> 16) & 0xffff);
406 				tda32->tda_frags[seg].frag_ptr0 =
407 				    htosonic32(sc, sc->sc_nulldma & 0xffff);
408 				tda32->tda_frags[seg].frag_size =
409 				    htosonic32(sc, ETHER_PAD_LEN - totlen);
410 				totlen = ETHER_PAD_LEN;
411 				seg++;
412 			}
413 
414 			tda32->tda_status = 0;
415 			tda32->tda_pktconfig = 0;
416 			tda32->tda_pktsize = htosonic32(sc, totlen);
417 			tda32->tda_fragcnt = htosonic32(sc, seg);
418 
419 			/* Link it up. */
420 			tda32->tda_frags[seg].frag_ptr0 =
421 			    htosonic32(sc, SONIC_CDTXADDR32(sc,
422 			    SONIC_NEXTTX(nexttx)) & 0xffff);
423 
424 			/* Sync the Tx descriptor. */
425 			SONIC_CDTXSYNC32(sc, nexttx,
426 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
427 		} else {
428 			tda16 = &sc->sc_tda16[nexttx];
429 			for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
430 				tda16->tda_frags[seg].frag_ptr1 =
431 				    htosonic16(sc,
432 				    (dmamap->dm_segs[seg].ds_addr >> 16) &
433 				    0xffff);
434 				tda16->tda_frags[seg].frag_ptr0 =
435 				    htosonic16(sc,
436 				    dmamap->dm_segs[seg].ds_addr & 0xffff);
437 				tda16->tda_frags[seg].frag_size =
438 				    htosonic16(sc, dmamap->dm_segs[seg].ds_len);
439 				totlen += dmamap->dm_segs[seg].ds_len;
440 			}
441 			if (totlen < ETHER_PAD_LEN) {
442 				tda16->tda_frags[seg].frag_ptr1 =
443 				    htosonic16(sc,
444 				    (sc->sc_nulldma >> 16) & 0xffff);
445 				tda16->tda_frags[seg].frag_ptr0 =
446 				    htosonic16(sc, sc->sc_nulldma & 0xffff);
447 				tda16->tda_frags[seg].frag_size =
448 				    htosonic16(sc, ETHER_PAD_LEN - totlen);
449 				totlen = ETHER_PAD_LEN;
450 				seg++;
451 			}
452 
453 			tda16->tda_status = 0;
454 			tda16->tda_pktconfig = 0;
455 			tda16->tda_pktsize = htosonic16(sc, totlen);
456 			tda16->tda_fragcnt = htosonic16(sc, seg);
457 
458 			/* Link it up. */
459 			tda16->tda_frags[seg].frag_ptr0 =
460 			    htosonic16(sc, SONIC_CDTXADDR16(sc,
461 			    SONIC_NEXTTX(nexttx)) & 0xffff);
462 
463 			/* Sync the Tx descriptor. */
464 			SONIC_CDTXSYNC16(sc, nexttx,
465 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
466 		}
467 
468 		/* Advance the Tx pointer. */
469 		sc->sc_txpending++;
470 		sc->sc_txlast = nexttx;
471 
472 		/*
473 		 * Pass the packet to any BPF listeners.
474 		 */
475 		bpf_mtap(ifp, m0, BPF_D_OUT);
476 	}
477 
478 	if (sc->sc_txpending != opending) {
479 		/*
480 		 * We enqueued packets.  If the transmitter was idle,
481 		 * reset the txdirty pointer.
482 		 */
483 		if (opending == 0)
484 			sc->sc_txdirty = SONIC_NEXTTX(olasttx);
485 
486 		/*
487 		 * Stop the SONIC on the last packet we've set up,
488 		 * and clear end-of-list on the descriptor previous
489 		 * to our new chain.
490 		 *
491 		 * NOTE: our `seg' variable should still be valid!
492 		 */
493 		if (sc->sc_32bit) {
494 			olseg =
495 			    sonic32toh(sc, sc->sc_tda32[olasttx].tda_fragcnt);
496 			sc->sc_tda32[sc->sc_txlast].tda_frags[seg].frag_ptr0 |=
497 			    htosonic32(sc, TDA_LINK_EOL);
498 			SONIC_CDTXSYNC32(sc, sc->sc_txlast,
499 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
500 			sc->sc_tda32[olasttx].tda_frags[olseg].frag_ptr0 &=
501 			    htosonic32(sc, ~TDA_LINK_EOL);
502 			SONIC_CDTXSYNC32(sc, olasttx,
503 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
504 		} else {
505 			olseg =
506 			    sonic16toh(sc, sc->sc_tda16[olasttx].tda_fragcnt);
507 			sc->sc_tda16[sc->sc_txlast].tda_frags[seg].frag_ptr0 |=
508 			    htosonic16(sc, TDA_LINK_EOL);
509 			SONIC_CDTXSYNC16(sc, sc->sc_txlast,
510 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
511 			sc->sc_tda16[olasttx].tda_frags[olseg].frag_ptr0 &=
512 			    htosonic16(sc, ~TDA_LINK_EOL);
513 			SONIC_CDTXSYNC16(sc, olasttx,
514 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
515 		}
516 
517 		/* Start the transmitter. */
518 		CSR_WRITE(sc, SONIC_CR, CR_TXP);
519 
520 		/* Set a watchdog timer in case the chip flakes out. */
521 		ifp->if_timer = 5;
522 	}
523 }
524 
525 /*
526  * sonic_watchdog:	[ifnet interface function]
527  *
528  *	Watchdog timer handler.
529  */
530 void
sonic_watchdog(struct ifnet * ifp)531 sonic_watchdog(struct ifnet *ifp)
532 {
533 	struct sonic_softc *sc = ifp->if_softc;
534 
535 	printf("%s: device timeout\n", device_xname(sc->sc_dev));
536 	if_statinc(ifp, if_oerrors);
537 
538 	(void)sonic_init(ifp);
539 }
540 
541 /*
542  * sonic_ioctl:		[ifnet interface function]
543  *
544  *	Handle control requests from the operator.
545  */
546 int
sonic_ioctl(struct ifnet * ifp,u_long cmd,void * data)547 sonic_ioctl(struct ifnet *ifp, u_long cmd, void *data)
548 {
549 	int s, error;
550 
551 	s = splnet();
552 
553 	error = ether_ioctl(ifp, cmd, data);
554 	if (error == ENETRESET) {
555 		/*
556 		 * Multicast list has changed; set the hardware
557 		 * filter accordingly.
558 		 */
559 		if (ifp->if_flags & IFF_RUNNING)
560 			(void)sonic_init(ifp);
561 		error = 0;
562 	}
563 
564 	splx(s);
565 	return error;
566 }
567 
568 /*
569  * sonic_intr:
570  *
571  *	Interrupt service routine.
572  */
573 int
sonic_intr(void * arg)574 sonic_intr(void *arg)
575 {
576 	struct sonic_softc *sc = arg;
577 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
578 	uint16_t isr;
579 	int handled = 0, wantinit;
580 
581 	for (wantinit = 0; wantinit == 0;) {
582 		isr = CSR_READ(sc, SONIC_ISR) & sc->sc_imr;
583 		if (isr == 0)
584 			break;
585 		CSR_WRITE(sc, SONIC_ISR, isr);	/* ACK */
586 
587 		handled = 1;
588 
589 		if (isr & IMR_PRX)
590 			sonic_rxintr(sc);
591 
592 		if (isr & (IMR_PTX | IMR_TXER)) {
593 			if (sonic_txintr(sc) & TCR_FU) {
594 				printf("%s: transmit FIFO underrun\n",
595 				    device_xname(sc->sc_dev));
596 				wantinit = 1;
597 			}
598 		}
599 
600 		if (isr & (IMR_RFO | IMR_RBA | IMR_RBE | IMR_RDE)) {
601 #define	PRINTERR(bit, str)						\
602 			if (isr & (bit))				\
603 				printf("%s: %s\n",device_xname(sc->sc_dev), str)
604 			PRINTERR(IMR_RFO, "receive FIFO overrun");
605 			PRINTERR(IMR_RBA, "receive buffer exceeded");
606 			PRINTERR(IMR_RBE, "receive buffers exhausted");
607 			PRINTERR(IMR_RDE, "receive descriptors exhausted");
608 			wantinit = 1;
609 		}
610 	}
611 
612 	if (handled) {
613 		if (wantinit)
614 			(void)sonic_init(ifp);
615 		if_schedule_deferred_start(ifp);
616 	}
617 
618 	return handled;
619 }
620 
621 /*
622  * sonic_txintr:
623  *
624  *	Helper; handle transmit complete interrupts.
625  */
626 uint16_t
sonic_txintr(struct sonic_softc * sc)627 sonic_txintr(struct sonic_softc *sc)
628 {
629 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
630 	struct sonic_descsoft *ds;
631 	struct sonic_tda32 *tda32;
632 	struct sonic_tda16 *tda16;
633 	uint16_t status, totstat = 0;
634 	int i, count;
635 
636 	count = 0;
637 	for (i = sc->sc_txdirty; sc->sc_txpending != 0;
638 	     i = SONIC_NEXTTX(i), sc->sc_txpending--) {
639 		ds = &sc->sc_txsoft[i];
640 
641 		if (sc->sc_32bit) {
642 			SONIC_CDTXSYNC32(sc, i,
643 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
644 			tda32 = &sc->sc_tda32[i];
645 			status = sonic32toh(sc, tda32->tda_status);
646 			SONIC_CDTXSYNC32(sc, i, BUS_DMASYNC_PREREAD);
647 		} else {
648 			SONIC_CDTXSYNC16(sc, i,
649 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
650 			tda16 = &sc->sc_tda16[i];
651 			status = sonic16toh(sc, tda16->tda_status);
652 			SONIC_CDTXSYNC16(sc, i, BUS_DMASYNC_PREREAD);
653 		}
654 
655 		if ((status & ~(TCR_EXDIS |TCR_CRCI |TCR_POWC |TCR_PINT)) == 0)
656 			break;
657 
658 		totstat |= status;
659 
660 		bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
661 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
662 		bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
663 		m_freem(ds->ds_mbuf);
664 		ds->ds_mbuf = NULL;
665 
666 		/*
667 		 * Check for errors and collisions.
668 		 */
669 		net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
670 		if (status & TCR_PTX) {
671 			if_statinc_ref(ifp, nsr, if_opackets);
672 			count++;
673 		} else {
674 			if_statinc_ref(ifp, nsr, if_oerrors);
675 		}
676 		if (TDA_STATUS_NCOL(status)) {
677 			if_statadd_ref(ifp, nsr, if_collisions,
678 			    TDA_STATUS_NCOL(status));
679 		}
680 		IF_STAT_PUTREF(ifp);
681 	}
682 
683 	/* Update the dirty transmit buffer pointer. */
684 	sc->sc_txdirty = i;
685 
686 	/*
687 	 * Cancel the watchdog timer if there are no pending
688 	 * transmissions.
689 	 */
690 	if (sc->sc_txpending == 0)
691 		ifp->if_timer = 0;
692 
693 	if (totstat != 0)
694 		rnd_add_uint32(&sc->sc_rndsource, totstat);
695 
696 	return totstat;
697 }
698 
699 /*
700  * sonic_rxintr:
701  *
702  *	Helper; handle receive interrupts.
703  */
704 void
sonic_rxintr(struct sonic_softc * sc)705 sonic_rxintr(struct sonic_softc *sc)
706 {
707 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
708 	struct sonic_descsoft *ds;
709 	struct sonic_rda32 *rda32;
710 	struct sonic_rda16 *rda16;
711 	struct mbuf *m;
712 	int i, len, count;
713 	uint16_t status, bytecount /*, ptr0, ptr1, seqno */;
714 
715 	count = 0;
716 	for (i = sc->sc_rxptr;; i = SONIC_NEXTRX(i)) {
717 		ds = &sc->sc_rxsoft[i];
718 
719 		if (sc->sc_32bit) {
720 			SONIC_CDRXSYNC32(sc, i,
721 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
722 			rda32 = &sc->sc_rda32[i];
723 			SONIC_CDRXSYNC32(sc, i, BUS_DMASYNC_PREREAD);
724 			if (rda32->rda_inuse != 0)
725 				break;
726 			status = sonic32toh(sc, rda32->rda_status);
727 			bytecount = sonic32toh(sc, rda32->rda_bytecount);
728 			/* ptr0 = sonic32toh(sc, rda32->rda_pkt_ptr0); */
729 			/* ptr1 = sonic32toh(sc, rda32->rda_pkt_ptr1); */
730 			/* seqno = sonic32toh(sc, rda32->rda_seqno); */
731 		} else {
732 			SONIC_CDRXSYNC16(sc, i,
733 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
734 			rda16 = &sc->sc_rda16[i];
735 			SONIC_CDRXSYNC16(sc, i, BUS_DMASYNC_PREREAD);
736 			if (rda16->rda_inuse != 0)
737 				break;
738 			status = sonic16toh(sc, rda16->rda_status);
739 			bytecount = sonic16toh(sc, rda16->rda_bytecount);
740 			/* ptr0 = sonic16toh(sc, rda16->rda_pkt_ptr0); */
741 			/* ptr1 = sonic16toh(sc, rda16->rda_pkt_ptr1); */
742 			/* seqno = sonic16toh(sc, rda16->rda_seqno); */
743 		}
744 
745 		/*
746 		 * Make absolutely sure this is the only packet
747 		 * in this receive buffer.  Our entire Rx buffer
748 		 * management scheme depends on this, and if the
749 		 * SONIC didn't follow our rule, it means we've
750 		 * misconfigured it.
751 		 */
752 		KASSERT(status & RCR_LPKT);
753 
754 		/*
755 		 * Make sure the packet arrived OK.  If an error occurred,
756 		 * update stats and reset the descriptor.  The buffer will
757 		 * be reused the next time the descriptor comes up in the
758 		 * ring.
759 		 */
760 		if ((status & RCR_PRX) == 0) {
761 			if (status & RCR_FAER)
762 				printf("%s: Rx frame alignment error\n",
763 				    device_xname(sc->sc_dev));
764 			else if (status & RCR_CRCR)
765 				printf("%s: Rx CRC error\n",
766 				    device_xname(sc->sc_dev));
767 			if_statinc(ifp, if_ierrors);
768 			SONIC_INIT_RXDESC(sc, i);
769 			continue;
770 		}
771 
772 		bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
773 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
774 
775 		/*
776 		 * The SONIC includes the CRC with every packet.
777 		 */
778 		len = bytecount - ETHER_CRC_LEN;
779 
780 		/*
781 		 * Ok, if the chip is in 32-bit mode, then receive
782 		 * buffers must be aligned to 32-bit boundaries,
783 		 * which means the payload is misaligned.  In this
784 		 * case, we must allocate a new mbuf, and copy the
785 		 * packet into it, scooted forward 2 bytes to ensure
786 		 * proper alignment.
787 		 *
788 		 * Note, in 16-bit mode, we can configure the SONIC
789 		 * to do what we want, and we have.
790 		 */
791 #ifndef __NO_STRICT_ALIGNMENT
792 		if (sc->sc_32bit) {
793 			MGETHDR(m, M_DONTWAIT, MT_DATA);
794 			if (m == NULL)
795 				goto dropit;
796 			if (len > (MHLEN - 2)) {
797 				MCLGET(m, M_DONTWAIT);
798 				if ((m->m_flags & M_EXT) == 0) {
799 					m_freem(m);
800 					goto dropit;
801 				}
802 			}
803 			m->m_data += 2;
804 			/*
805 			 * Note that we use a cluster for incoming frames,
806 			 * so the buffer is virtually contiguous.
807 			 */
808 			memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *),
809 			    len);
810 			SONIC_INIT_RXDESC(sc, i);
811 			bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
812 			    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
813 		} else
814 #endif /* ! __NO_STRICT_ALIGNMENT */
815 		/*
816 		 * If the packet is small enough to fit in a single
817 		 * header mbuf, allocate one and copy the data into
818 		 * it.  This greatly reduces memory consumption when
819 		 * we receive lots of small packets.
820 		 */
821 		if (sonic_copy_small != 0 && len <= (MHLEN - 2)) {
822 			MGETHDR(m, M_DONTWAIT, MT_DATA);
823 			if (m == NULL)
824 				goto dropit;
825 			m->m_data += 2;
826 			/*
827 			 * Note that we use a cluster for incoming frames,
828 			 * so the buffer is virtually contiguous.
829 			 */
830 			memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *),
831 			    len);
832 			SONIC_INIT_RXDESC(sc, i);
833 			bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
834 			    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
835 		} else {
836 			m = ds->ds_mbuf;
837 			if (sonic_add_rxbuf(sc, i) != 0) {
838  dropit:
839 				if_statinc(ifp, if_ierrors);
840 				SONIC_INIT_RXDESC(sc, i);
841 				bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
842 				    ds->ds_dmamap->dm_mapsize,
843 				    BUS_DMASYNC_PREREAD);
844 				continue;
845 			}
846 		}
847 
848 		m_set_rcvif(m, ifp);
849 		m->m_pkthdr.len = m->m_len = len;
850 
851 		/* Pass it on. */
852 		if_percpuq_enqueue(ifp->if_percpuq, m);
853 
854 		count++;
855 	}
856 
857 	/* Update the receive pointer. */
858 	sc->sc_rxptr = i;
859 	CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_PREVRX(i)));
860 
861 	if (count != 0)
862 		rnd_add_uint32(&sc->sc_rndsource, count);
863 }
864 
865 /*
866  * sonic_reset:
867  *
868  *	Perform a soft reset on the SONIC.
869  */
870 void
sonic_reset(struct sonic_softc * sc)871 sonic_reset(struct sonic_softc *sc)
872 {
873 
874 	/* stop TX, RX and timer, and ensure RST is clear */
875 	CSR_WRITE(sc, SONIC_CR, CR_STP | CR_RXDIS | CR_HTX);
876 	delay(1000);
877 
878 	CSR_WRITE(sc, SONIC_CR, CR_RST);
879 	delay(1000);
880 
881 	/* clear all interrupts */
882 	CSR_WRITE(sc, SONIC_IMR, 0);
883 	CSR_WRITE(sc, SONIC_ISR, IMR_ALL);
884 
885 	CSR_WRITE(sc, SONIC_CR, 0);
886 	delay(1000);
887 }
888 
889 /*
890  * sonic_init:		[ifnet interface function]
891  *
892  *	Initialize the interface.  Must be called at splnet().
893  */
894 int
sonic_init(struct ifnet * ifp)895 sonic_init(struct ifnet *ifp)
896 {
897 	struct sonic_softc *sc = ifp->if_softc;
898 	struct sonic_descsoft *ds;
899 	int i, error = 0;
900 	uint16_t reg;
901 
902 	/*
903 	 * Cancel any pending I/O.
904 	 */
905 	sonic_stop(ifp, 0);
906 
907 	/*
908 	 * Reset the SONIC to a known state.
909 	 */
910 	sonic_reset(sc);
911 
912 	/*
913 	 * Bring the SONIC into reset state, and program the DCR.
914 	 *
915 	 * Note: We don't bother optimizing the transmit and receive
916 	 * thresholds, here. TFT/RFT values should be set in MD attachments.
917 	 */
918 	reg = sc->sc_dcr;
919 	if (sc->sc_32bit)
920 		reg |= DCR_DW;
921 	CSR_WRITE(sc, SONIC_CR, CR_RST);
922 	CSR_WRITE(sc, SONIC_DCR, reg);
923 	CSR_WRITE(sc, SONIC_DCR2, sc->sc_dcr2);
924 	CSR_WRITE(sc, SONIC_CR, 0);
925 
926 	/*
927 	 * Initialize the transmit descriptors.
928 	 */
929 	if (sc->sc_32bit) {
930 		for (i = 0; i < SONIC_NTXDESC; i++) {
931 			memset(&sc->sc_tda32[i], 0, sizeof(struct sonic_tda32));
932 			SONIC_CDTXSYNC32(sc, i,
933 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
934 		}
935 	} else {
936 		for (i = 0; i < SONIC_NTXDESC; i++) {
937 			memset(&sc->sc_tda16[i], 0, sizeof(struct sonic_tda16));
938 			SONIC_CDTXSYNC16(sc, i,
939 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
940 		}
941 	}
942 	sc->sc_txpending = 0;
943 	sc->sc_txdirty = 0;
944 	sc->sc_txlast = SONIC_NTXDESC - 1;
945 
946 	/*
947 	 * Initialize the receive descriptor ring.
948 	 */
949 	for (i = 0; i < SONIC_NRXDESC; i++) {
950 		ds = &sc->sc_rxsoft[i];
951 		if (ds->ds_mbuf == NULL) {
952 			if ((error = sonic_add_rxbuf(sc, i)) != 0) {
953 				printf("%s: unable to allocate or map Rx "
954 				    "buffer %d, error = %d\n",
955 				    device_xname(sc->sc_dev), i, error);
956 				/*
957 				 * XXX Should attempt to run with fewer receive
958 				 * XXX buffers instead of just failing.
959 				 */
960 				sonic_rxdrain(sc);
961 				goto out;
962 			}
963 		} else
964 			SONIC_INIT_RXDESC(sc, i);
965 	}
966 	sc->sc_rxptr = 0;
967 
968 	/* Give the transmit ring to the SONIC. */
969 	CSR_WRITE(sc, SONIC_UTDAR, (SONIC_CDTXADDR(sc, 0) >> 16) & 0xffff);
970 	CSR_WRITE(sc, SONIC_CTDAR, SONIC_CDTXADDR(sc, 0) & 0xffff);
971 
972 	/* Give the receive descriptor ring to the SONIC. */
973 	CSR_WRITE(sc, SONIC_URDAR, (SONIC_CDRXADDR(sc, 0) >> 16) & 0xffff);
974 	CSR_WRITE(sc, SONIC_CRDAR, SONIC_CDRXADDR(sc, 0) & 0xffff);
975 
976 	/* Give the receive buffer ring to the SONIC. */
977 	CSR_WRITE(sc, SONIC_URRAR, (SONIC_CDRRADDR(sc, 0) >> 16) & 0xffff);
978 	CSR_WRITE(sc, SONIC_RSAR, SONIC_CDRRADDR(sc, 0) & 0xffff);
979 	if (sc->sc_32bit)
980 		CSR_WRITE(sc, SONIC_REAR,
981 		    (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) +
982 		    sizeof(struct sonic_rra32)) & 0xffff);
983 	else
984 		CSR_WRITE(sc, SONIC_REAR,
985 		    (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) +
986 		    sizeof(struct sonic_rra16)) & 0xffff);
987 	CSR_WRITE(sc, SONIC_RRR, SONIC_CDRRADDR(sc, 0) & 0xffff);
988 	CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1));
989 
990 	/*
991 	 * Set the End-Of-Buffer counter such that only one packet
992 	 * will be placed into each buffer we provide.  Note we are
993 	 * following the recommendation of section 3.4.4 of the manual
994 	 * here, and have "lengthened" the receive buffers accordingly.
995 	 */
996 	if (sc->sc_32bit)
997 		CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN + 2) / 2);
998 	else
999 		CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN / 2));
1000 
1001 	/* Reset the receive sequence counter. */
1002 	CSR_WRITE(sc, SONIC_RSC, 0);
1003 
1004 	/* Clear the tally registers. */
1005 	CSR_WRITE(sc, SONIC_CRCETC, 0xffff);
1006 	CSR_WRITE(sc, SONIC_FAET, 0xffff);
1007 	CSR_WRITE(sc, SONIC_MPT, 0xffff);
1008 
1009 	/* Set the receive filter. */
1010 	sonic_set_filter(sc);
1011 
1012 	/*
1013 	 * Set the interrupt mask register.
1014 	 */
1015 	sc->sc_imr = IMR_RFO | IMR_RBA | IMR_RBE | IMR_RDE |
1016 	    IMR_TXER | IMR_PTX | IMR_PRX;
1017 	CSR_WRITE(sc, SONIC_IMR, sc->sc_imr);
1018 
1019 	/*
1020 	 * Start the receive process in motion.  Note, we don't
1021 	 * start the transmit process until we actually try to
1022 	 * transmit packets.
1023 	 */
1024 	CSR_WRITE(sc, SONIC_CR, CR_RXEN | CR_RRRA);
1025 
1026 	/*
1027 	 * ...all done!
1028 	 */
1029 	ifp->if_flags |= IFF_RUNNING;
1030 
1031  out:
1032 	if (error)
1033 		printf("%s: interface not running\n", device_xname(sc->sc_dev));
1034 	return error;
1035 }
1036 
1037 /*
1038  * sonic_rxdrain:
1039  *
1040  *	Drain the receive queue.
1041  */
1042 void
sonic_rxdrain(struct sonic_softc * sc)1043 sonic_rxdrain(struct sonic_softc *sc)
1044 {
1045 	struct sonic_descsoft *ds;
1046 	int i;
1047 
1048 	for (i = 0; i < SONIC_NRXDESC; i++) {
1049 		ds = &sc->sc_rxsoft[i];
1050 		if (ds->ds_mbuf != NULL) {
1051 			bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1052 			m_freem(ds->ds_mbuf);
1053 			ds->ds_mbuf = NULL;
1054 		}
1055 	}
1056 }
1057 
1058 /*
1059  * sonic_stop:		[ifnet interface function]
1060  *
1061  *	Stop transmission on the interface.
1062  */
1063 void
sonic_stop(struct ifnet * ifp,int disable)1064 sonic_stop(struct ifnet *ifp, int disable)
1065 {
1066 	struct sonic_softc *sc = ifp->if_softc;
1067 	struct sonic_descsoft *ds;
1068 	int i;
1069 
1070 	/*
1071 	 * Disable interrupts.
1072 	 */
1073 	CSR_WRITE(sc, SONIC_IMR, 0);
1074 
1075 	/*
1076 	 * Stop the transmitter, receiver, and timer.
1077 	 */
1078 	CSR_WRITE(sc, SONIC_CR, CR_HTX | CR_RXDIS | CR_STP);
1079 	for (i = 0; i < 1000; i++) {
1080 		if ((CSR_READ(sc, SONIC_CR) & (CR_TXP | CR_RXEN | CR_ST)) == 0)
1081 			break;
1082 		delay(2);
1083 	}
1084 	if ((CSR_READ(sc, SONIC_CR) & (CR_TXP | CR_RXEN | CR_ST)) != 0)
1085 		printf("%s: SONIC failed to stop\n", device_xname(sc->sc_dev));
1086 
1087 	/*
1088 	 * Release any queued transmit buffers.
1089 	 */
1090 	for (i = 0; i < SONIC_NTXDESC; i++) {
1091 		ds = &sc->sc_txsoft[i];
1092 		if (ds->ds_mbuf != NULL) {
1093 			bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1094 			m_freem(ds->ds_mbuf);
1095 			ds->ds_mbuf = NULL;
1096 		}
1097 	}
1098 
1099 	/*
1100 	 * Mark the interface down and cancel the watchdog timer.
1101 	 */
1102 	ifp->if_flags &= ~IFF_RUNNING;
1103 	ifp->if_timer = 0;
1104 
1105 	if (disable)
1106 		sonic_rxdrain(sc);
1107 }
1108 
1109 /*
1110  * sonic_add_rxbuf:
1111  *
1112  *	Add a receive buffer to the indicated descriptor.
1113  */
1114 int
sonic_add_rxbuf(struct sonic_softc * sc,int idx)1115 sonic_add_rxbuf(struct sonic_softc *sc, int idx)
1116 {
1117 	struct sonic_descsoft *ds = &sc->sc_rxsoft[idx];
1118 	struct mbuf *m;
1119 	int error;
1120 
1121 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1122 	if (m == NULL)
1123 		return ENOBUFS;
1124 
1125 	MCLGET(m, M_DONTWAIT);
1126 	if ((m->m_flags & M_EXT) == 0) {
1127 		m_freem(m);
1128 		return ENOBUFS;
1129 	}
1130 
1131 	if (ds->ds_mbuf != NULL)
1132 		bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1133 
1134 	ds->ds_mbuf = m;
1135 
1136 	error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1137 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1138 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
1139 	if (error) {
1140 		printf("%s: can't load rx DMA map %d, error = %d\n",
1141 		    device_xname(sc->sc_dev), idx, error);
1142 		panic("sonic_add_rxbuf");	/* XXX */
1143 	}
1144 
1145 	bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1146 	    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1147 
1148 	SONIC_INIT_RXDESC(sc, idx);
1149 
1150 	return 0;
1151 }
1152 
1153 static void
sonic_set_camentry(struct sonic_softc * sc,int entry,const uint8_t * enaddr)1154 sonic_set_camentry(struct sonic_softc *sc, int entry, const uint8_t *enaddr)
1155 {
1156 
1157 	if (sc->sc_32bit) {
1158 		struct sonic_cda32 *cda = &sc->sc_cda32[entry];
1159 
1160 		cda->cda_entry = htosonic32(sc, entry);
1161 		cda->cda_addr0 = htosonic32(sc, enaddr[0] | (enaddr[1] << 8));
1162 		cda->cda_addr1 = htosonic32(sc, enaddr[2] | (enaddr[3] << 8));
1163 		cda->cda_addr2 = htosonic32(sc, enaddr[4] | (enaddr[5] << 8));
1164 	} else {
1165 		struct sonic_cda16 *cda = &sc->sc_cda16[entry];
1166 
1167 		cda->cda_entry = htosonic16(sc, entry);
1168 		cda->cda_addr0 = htosonic16(sc, enaddr[0] | (enaddr[1] << 8));
1169 		cda->cda_addr1 = htosonic16(sc, enaddr[2] | (enaddr[3] << 8));
1170 		cda->cda_addr2 = htosonic16(sc, enaddr[4] | (enaddr[5] << 8));
1171 	}
1172 }
1173 
1174 /*
1175  * sonic_set_filter:
1176  *
1177  *	Set the SONIC receive filter.
1178  */
1179 void
sonic_set_filter(struct sonic_softc * sc)1180 sonic_set_filter(struct sonic_softc *sc)
1181 {
1182 	struct ethercom *ec = &sc->sc_ethercom;
1183 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1184 	struct ether_multi *enm;
1185 	struct ether_multistep step;
1186 	int i, entry = 0;
1187 	uint16_t camvalid = 0;
1188 	uint16_t rcr = 0;
1189 
1190 	if (ifp->if_flags & IFF_BROADCAST)
1191 		rcr |= RCR_BRD;
1192 
1193 	if (ifp->if_flags & IFF_PROMISC) {
1194 		rcr |= RCR_PRO;
1195 		goto allmulti;
1196 	}
1197 
1198 	/* Put our station address in the first CAM slot. */
1199 	sonic_set_camentry(sc, entry, CLLADDR(ifp->if_sadl));
1200 	camvalid |= (1U << entry);
1201 	entry++;
1202 
1203 	/* Add the multicast addresses to the CAM. */
1204 	ETHER_LOCK(ec);
1205 	ETHER_FIRST_MULTI(step, ec, enm);
1206 	while (enm != NULL) {
1207 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1208 			/*
1209 			 * We must listen to a range of multicast addresses.
1210 			 * The only way to do this on the SONIC is to enable
1211 			 * reception of all multicast packets.
1212 			 */
1213 			ETHER_UNLOCK(ec);
1214 			goto allmulti;
1215 		}
1216 
1217 		if (entry == SONIC_NCAMENT) {
1218 			/*
1219 			 * Out of CAM slots.  Have to enable reception
1220 			 * of all multicast addresses.
1221 			 */
1222 			ETHER_UNLOCK(ec);
1223 			goto allmulti;
1224 		}
1225 
1226 		sonic_set_camentry(sc, entry, enm->enm_addrlo);
1227 		camvalid |= (1U << entry);
1228 		entry++;
1229 
1230 		ETHER_NEXT_MULTI(step, enm);
1231 	}
1232 	ETHER_UNLOCK(ec);
1233 
1234 	ifp->if_flags &= ~IFF_ALLMULTI;
1235 	goto setit;
1236 
1237  allmulti:
1238 	/* Use only the first CAM slot (station address). */
1239 	camvalid = 0x0001;
1240 	entry = 1;
1241 	rcr |= RCR_AMC;
1242 
1243  setit:
1244 	/* set mask for the CAM Enable register */
1245 	if (sc->sc_32bit) {
1246 		if (entry == SONIC_NCAMENT)
1247 			sc->sc_cdaenable32 = htosonic32(sc, camvalid);
1248 		else
1249 			sc->sc_cda32[entry].cda_entry =
1250 			    htosonic32(sc, camvalid);
1251 	} else {
1252 		if (entry == SONIC_NCAMENT)
1253 			sc->sc_cdaenable16 = htosonic16(sc, camvalid);
1254 		else
1255 			sc->sc_cda16[entry].cda_entry =
1256 			    htosonic16(sc, camvalid);
1257 	}
1258 
1259 	/* Load the CAM. */
1260 	SONIC_CDCAMSYNC(sc, BUS_DMASYNC_PREWRITE);
1261 	CSR_WRITE(sc, SONIC_CDP, SONIC_CDCAMADDR(sc) & 0xffff);
1262 	CSR_WRITE(sc, SONIC_CDC, entry);
1263 	CSR_WRITE(sc, SONIC_CR, CR_LCAM);
1264 	for (i = 0; i < 10000; i++) {
1265 		if ((CSR_READ(sc, SONIC_CR) & CR_LCAM) == 0)
1266 			break;
1267 		delay(2);
1268 	}
1269 	if (CSR_READ(sc, SONIC_CR) & CR_LCAM)
1270 		printf("%s: CAM load failed\n", device_xname(sc->sc_dev));
1271 	SONIC_CDCAMSYNC(sc, BUS_DMASYNC_POSTWRITE);
1272 
1273 	/* Set the receive control register. */
1274 	CSR_WRITE(sc, SONIC_RCR, rcr);
1275 }
1276