xref: /openbsd-src/sys/dev/ic/aic6915.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: aic6915.c,v 1.21 2016/04/13 10:49:26 mpi Exp $	*/
2 /*	$NetBSD: aic6915.c,v 1.15 2005/12/24 20:27:29 perry Exp $	*/
3 
4 /*-
5  * Copyright (c) 2001 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Jason R. Thorpe.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Device driver for the Adaptec AIC-6915 (``Starfire'')
35  * 10/100 Ethernet controller.
36  */
37 
38 #include "bpfilter.h"
39 
40 #include <sys/param.h>
41 #include <sys/endian.h>
42 #include <sys/systm.h>
43 #include <sys/timeout.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/socket.h>
48 #include <sys/ioctl.h>
49 #include <sys/errno.h>
50 #include <sys/device.h>
51 
52 #include <net/if.h>
53 #include <net/if_dl.h>
54 
55 #include <netinet/in.h>
56 #include <netinet/if_ether.h>
57 
58 #include <net/if_media.h>
59 
60 #if NBPFILTER > 0
61 #include <net/bpf.h>
62 #endif
63 
64 #include <machine/bus.h>
65 #include <machine/intr.h>
66 
67 #include <dev/mii/miivar.h>
68 
69 #include <dev/ic/aic6915.h>
70 
71 void	sf_start(struct ifnet *);
72 void	sf_watchdog(struct ifnet *);
73 int	sf_ioctl(struct ifnet *, u_long, caddr_t);
74 int	sf_init(struct ifnet *);
75 void	sf_stop(struct ifnet *, int);
76 
77 void	sf_txintr(struct sf_softc *);
78 void	sf_rxintr(struct sf_softc *);
79 void	sf_stats_update(struct sf_softc *);
80 
81 void	sf_reset(struct sf_softc *);
82 void	sf_macreset(struct sf_softc *);
83 void	sf_rxdrain(struct sf_softc *);
84 int	sf_add_rxbuf(struct sf_softc *, int);
85 uint8_t	sf_read_eeprom(struct sf_softc *, int);
86 void	sf_set_filter(struct sf_softc *);
87 
88 int	sf_mii_read(struct device *, int, int);
89 void	sf_mii_write(struct device *, int, int, int);
90 void	sf_mii_statchg(struct device *);
91 
92 void	sf_tick(void *);
93 
94 int	sf_mediachange(struct ifnet *);
95 void	sf_mediastatus(struct ifnet *, struct ifmediareq *);
96 
97 uint32_t sf_reg_read(struct sf_softc *, bus_addr_t);
98 void	sf_reg_write(struct sf_softc *, bus_addr_t , uint32_t);
99 
100 void	sf_set_filter_perfect(struct sf_softc *, int , uint8_t *);
101 void	sf_set_filter_hash(struct sf_softc *, uint8_t *);
102 
103 struct cfdriver sf_cd = {
104 	NULL, "sf", DV_IFNET
105 };
106 
107 #define	sf_funcreg_read(sc, reg)					\
108 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh_func, (reg))
109 #define	sf_funcreg_write(sc, reg, val)					\
110 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh_func, (reg), (val))
111 
112 uint32_t
113 sf_reg_read(struct sf_softc *sc, bus_addr_t reg)
114 {
115 
116 	if (__predict_false(sc->sc_iomapped)) {
117 		bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
118 		    reg);
119 		return (bus_space_read_4(sc->sc_st, sc->sc_sh,
120 		    SF_IndirectIoDataPort));
121 	}
122 
123 	return (bus_space_read_4(sc->sc_st, sc->sc_sh, reg));
124 }
125 
126 void
127 sf_reg_write(struct sf_softc *sc, bus_addr_t reg, uint32_t val)
128 {
129 
130 	if (__predict_false(sc->sc_iomapped)) {
131 		bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoAccess,
132 		    reg);
133 		bus_space_write_4(sc->sc_st, sc->sc_sh, SF_IndirectIoDataPort,
134 		    val);
135 		return;
136 	}
137 
138 	bus_space_write_4(sc->sc_st, sc->sc_sh, reg, val);
139 }
140 
141 #define	sf_genreg_read(sc, reg)						\
142 	sf_reg_read((sc), (reg) + SF_GENREG_OFFSET)
143 #define	sf_genreg_write(sc, reg, val)					\
144 	sf_reg_write((sc), (reg) + SF_GENREG_OFFSET, (val))
145 
146 /*
147  * sf_attach:
148  *
149  *	Attach a Starfire interface to the system.
150  */
151 void
152 sf_attach(struct sf_softc *sc)
153 {
154 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
155 	int i, rseg, error;
156 	bus_dma_segment_t seg;
157 	u_int8_t enaddr[ETHER_ADDR_LEN];
158 
159 	timeout_set(&sc->sc_mii_timeout, sf_tick, sc);
160 
161 	/*
162 	 * If we're I/O mapped, the functional register handle is
163 	 * the same as the base handle.  If we're memory mapped,
164 	 * carve off a chunk of the register space for the functional
165 	 * registers, to save on arithmetic later.
166 	 */
167 	if (sc->sc_iomapped)
168 		sc->sc_sh_func = sc->sc_sh;
169 	else {
170 		if ((error = bus_space_subregion(sc->sc_st, sc->sc_sh,
171 		    SF_GENREG_OFFSET, SF_FUNCREG_SIZE, &sc->sc_sh_func)) != 0) {
172 			printf("%s: unable to sub-region functional "
173 			    "registers, error = %d\n", sc->sc_dev.dv_xname,
174 			    error);
175 			return;
176 		}
177 	}
178 
179 	/*
180 	 * Initialize the transmit threshold for this interface.  The
181 	 * manual describes the default as 4 * 16 bytes.  We start out
182 	 * at 10 * 16 bytes, to avoid a bunch of initial underruns on
183 	 * several platforms.
184 	 */
185 	sc->sc_txthresh = 10;
186 
187 	/*
188 	 * Allocate the control data structures, and create and load the
189 	 * DMA map for it.
190 	 */
191 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
192 	    sizeof(struct sf_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
193 	    BUS_DMA_NOWAIT)) != 0) {
194 		printf("%s: unable to allocate control data, error = %d\n",
195 		    sc->sc_dev.dv_xname, error);
196 		goto fail_0;
197 	}
198 
199 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
200 	    sizeof(struct sf_control_data), (caddr_t *)&sc->sc_control_data,
201 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
202 		printf("%s: unable to map control data, error = %d\n",
203 		    sc->sc_dev.dv_xname, error);
204 		goto fail_1;
205 	}
206 
207 	if ((error = bus_dmamap_create(sc->sc_dmat,
208 	    sizeof(struct sf_control_data), 1,
209 	    sizeof(struct sf_control_data), 0, BUS_DMA_NOWAIT,
210 	    &sc->sc_cddmamap)) != 0) {
211 		printf("%s: unable to create control data DMA map, "
212 		    "error = %d\n", sc->sc_dev.dv_xname, error);
213 		goto fail_2;
214 	}
215 
216 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
217 	    sc->sc_control_data, sizeof(struct sf_control_data), NULL,
218 	    BUS_DMA_NOWAIT)) != 0) {
219 		printf("%s: unable to load control data DMA map, error = %d\n",
220 		    sc->sc_dev.dv_xname, error);
221 		goto fail_3;
222 	}
223 
224 	/*
225 	 * Create the transmit buffer DMA maps.
226 	 */
227 	for (i = 0; i < SF_NTXDESC; i++) {
228 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
229 		    SF_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
230 		    &sc->sc_txsoft[i].ds_dmamap)) != 0) {
231 			printf("%s: unable to create tx DMA map %d, "
232 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
233 			goto fail_4;
234 		}
235 	}
236 
237 	/*
238 	 * Create the receive buffer DMA maps.
239 	 */
240 	for (i = 0; i < SF_NRXDESC; i++) {
241 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
242 		    MCLBYTES, 0, BUS_DMA_NOWAIT,
243 		    &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
244 			printf("%s: unable to create rx DMA map %d, "
245 			    "error = %d\n", sc->sc_dev.dv_xname, i, error);
246 			goto fail_5;
247 		}
248 	}
249 
250 	/*
251 	 * Reset the chip to a known state.
252 	 */
253 	sf_reset(sc);
254 
255 	/*
256 	 * Read the Ethernet address from the EEPROM.
257 	 */
258 	for (i = 0; i < ETHER_ADDR_LEN; i++)
259 		enaddr[i] = sf_read_eeprom(sc, (15 + (ETHER_ADDR_LEN - 1)) - i);
260 
261 	printf(", address %s\n", ether_sprintf(enaddr));
262 
263 #ifdef DEBUG
264 	if (sf_funcreg_read(sc, SF_PciDeviceConfig) & PDC_System64)
265 		printf("%s: 64-bit PCI slot detected\n", sc->sc_dev.dv_xname);
266 #endif
267 
268 	/*
269 	 * Initialize our media structures and probe the MII.
270 	 */
271 	sc->sc_mii.mii_ifp = ifp;
272 	sc->sc_mii.mii_readreg = sf_mii_read;
273 	sc->sc_mii.mii_writereg = sf_mii_write;
274 	sc->sc_mii.mii_statchg = sf_mii_statchg;
275 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, sf_mediachange,
276 	    sf_mediastatus);
277 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
278 	    MII_OFFSET_ANY, 0);
279 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
280 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
281 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
282 	} else
283 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
284 	bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN);
285 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
286 	ifp = &sc->sc_arpcom.ac_if;
287 	ifp->if_softc = sc;
288 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
289 	ifp->if_ioctl = sf_ioctl;
290 	ifp->if_start = sf_start;
291 	ifp->if_watchdog = sf_watchdog;
292 	IFQ_SET_MAXLEN(&ifp->if_snd, SF_NTXDESC_MASK);
293 
294 	/*
295 	 * Attach the interface.
296 	 */
297 	if_attach(ifp);
298 	ether_ifattach(ifp);
299 	return;
300 
301 	/*
302 	 * Free any resources we've allocated during the failed attach
303 	 * attempt.  Do this in reverse order an fall through.
304 	 */
305  fail_5:
306 	for (i = 0; i < SF_NRXDESC; i++) {
307 		if (sc->sc_rxsoft[i].ds_dmamap != NULL)
308 			bus_dmamap_destroy(sc->sc_dmat,
309 			    sc->sc_rxsoft[i].ds_dmamap);
310 	}
311  fail_4:
312 	for (i = 0; i < SF_NTXDESC; i++) {
313 		if (sc->sc_txsoft[i].ds_dmamap != NULL)
314 			bus_dmamap_destroy(sc->sc_dmat,
315 			    sc->sc_txsoft[i].ds_dmamap);
316 	}
317 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
318  fail_3:
319 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
320  fail_2:
321 	bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control_data,
322 	    sizeof(struct sf_control_data));
323  fail_1:
324 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
325  fail_0:
326 	return;
327 }
328 
329 /*
330  * sf_start:		[ifnet interface function]
331  *
332  *	Start packet transmission on the interface.
333  */
334 void
335 sf_start(struct ifnet *ifp)
336 {
337 	struct sf_softc *sc = ifp->if_softc;
338 	struct mbuf *m0, *m;
339 	struct sf_txdesc0 *txd;
340 	struct sf_descsoft *ds;
341 	bus_dmamap_t dmamap;
342 	int error, producer, last = -1, opending, seg;
343 
344 	/*
345 	 * Remember the previous number of pending transmits.
346 	 */
347 	opending = sc->sc_txpending;
348 
349 	/*
350 	 * Find out where we're sitting.
351 	 */
352 	producer = SF_TXDINDEX_TO_HOST(
353 	    TDQPI_HiPrTxProducerIndex_get(
354 	    sf_funcreg_read(sc, SF_TxDescQueueProducerIndex)));
355 
356 	/*
357 	 * Loop through the send queue, setting up transmit descriptors
358 	 * until we drain the queue, or use up all available transmit
359 	 * descriptors.  Leave a blank one at the end for sanity's sake.
360 	 */
361 	while (sc->sc_txpending < (SF_NTXDESC - 1)) {
362 		/*
363 		 * Grab a packet off the queue.
364 		 */
365 		m0 = ifq_deq_begin(&ifp->if_snd);
366 		if (m0 == NULL)
367 			break;
368 		m = NULL;
369 
370 		/*
371 		 * Get the transmit descriptor.
372 		 */
373 		txd = &sc->sc_txdescs[producer];
374 		ds = &sc->sc_txsoft[producer];
375 		dmamap = ds->ds_dmamap;
376 
377 		/*
378 		 * Load the DMA map.  If this fails, the packet either
379 		 * didn't fit in the allotted number of frags, or we were
380 		 * short on resources.  In this case, we'll copy and try
381 		 * again.
382 		 */
383 		if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
384 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
385 			MGETHDR(m, M_DONTWAIT, MT_DATA);
386 			if (m == NULL) {
387 				ifq_deq_rollback(&ifp->if_snd, m0);
388 				printf("%s: unable to allocate Tx mbuf\n",
389 				    sc->sc_dev.dv_xname);
390 				break;
391 			}
392 			if (m0->m_pkthdr.len > MHLEN) {
393 				MCLGET(m, M_DONTWAIT);
394 				if ((m->m_flags & M_EXT) == 0) {
395 					ifq_deq_rollback(&ifp->if_snd, m0);
396 					printf("%s: unable to allocate Tx "
397 					    "cluster\n", sc->sc_dev.dv_xname);
398 					m_freem(m);
399 					break;
400 				}
401 			}
402 			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
403 			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
404 			error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
405 			    m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
406 			if (error) {
407 				ifq_deq_rollback(&ifp->if_snd, m0);
408 				printf("%s: unable to load Tx buffer, "
409 				    "error = %d\n", sc->sc_dev.dv_xname, error);
410 				m_freem(m);
411 				break;
412 			}
413 		}
414 
415 		/*
416 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
417 		 */
418 		ifq_deq_commit(&ifp->if_snd, m0);
419 		if (m != NULL) {
420 			m_freem(m0);
421 			m0 = m;
422 		}
423 
424 		/* Initialize the descriptor. */
425 		txd->td_word0 =
426 		    htole32(TD_W0_ID | TD_W0_CRCEN | m0->m_pkthdr.len);
427 		if (producer == (SF_NTXDESC - 1))
428 			txd->td_word0 |= TD_W0_END;
429 		txd->td_word1 = htole32(dmamap->dm_nsegs);
430 		for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
431 			txd->td_frags[seg].fr_addr =
432 			    htole32(dmamap->dm_segs[seg].ds_addr);
433 			txd->td_frags[seg].fr_len =
434 			    htole32(dmamap->dm_segs[seg].ds_len);
435 		}
436 
437 		/* Sync the descriptor and the DMA map. */
438 		SF_CDTXDSYNC(sc, producer, BUS_DMASYNC_PREWRITE);
439 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
440 		    BUS_DMASYNC_PREWRITE);
441 
442 		/*
443 		 * Store a pointer to the packet so we can free it later.
444 		 */
445 		ds->ds_mbuf = m0;
446 
447 		/* Advance the Tx pointer. */
448 		sc->sc_txpending++;
449 		last = producer;
450 		producer = SF_NEXTTX(producer);
451 
452 #if NBPFILTER > 0
453 		/*
454 		 * Pass the packet to any BPF listeners.
455 		 */
456 		if (ifp->if_bpf)
457 			bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
458 #endif
459 	}
460 
461 	if (sc->sc_txpending == (SF_NTXDESC - 1)) {
462 		/* No more slots left; notify upper layer. */
463 		ifq_set_oactive(&ifp->if_snd);
464 	}
465 
466 	if (sc->sc_txpending != opending) {
467 		KASSERT(last != -1);
468 		/*
469 		 * We enqueued packets.  Cause a transmit interrupt to
470 		 * happen on the last packet we enqueued, and give the
471 		 * new descriptors to the chip by writing the new
472 		 * producer index.
473 		 */
474 		sc->sc_txdescs[last].td_word0 |= TD_W0_INTR;
475 		SF_CDTXDSYNC(sc, last, BUS_DMASYNC_PREWRITE);
476 
477 		sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
478 		    TDQPI_HiPrTxProducerIndex(SF_TXDINDEX_TO_CHIP(producer)));
479 
480 		/* Set a watchdog timer in case the chip flakes out. */
481 		ifp->if_timer = 5;
482 	}
483 }
484 
485 /*
486  * sf_watchdog:		[ifnet interface function]
487  *
488  *	Watchdog timer handler.
489  */
490 void
491 sf_watchdog(struct ifnet *ifp)
492 {
493 	struct sf_softc *sc = ifp->if_softc;
494 
495 	printf("%s: device timeout\n", sc->sc_dev.dv_xname);
496 	ifp->if_oerrors++;
497 
498 	(void) sf_init(ifp);
499 
500 	/* Try to get more packets going. */
501 	sf_start(ifp);
502 }
503 
504 /*
505  * sf_ioctl:		[ifnet interface function]
506  *
507  *	Handle control requests from the operator.
508  */
509 int
510 sf_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
511 {
512 	struct sf_softc *sc = (struct sf_softc *)ifp->if_softc;
513 	struct ifreq *ifr = (struct ifreq *) data;
514 	int s, error = 0;
515 
516 	s = splnet();
517 
518 	switch (cmd) {
519 	case SIOCSIFADDR:
520 		ifp->if_flags |= IFF_UP;
521 		if (!(ifp->if_flags & IFF_RUNNING))
522 			sf_init(ifp);
523 		break;
524 
525 	case SIOCSIFFLAGS:
526 		if (ifp->if_flags & IFF_UP) {
527 			if (ifp->if_flags & IFF_RUNNING &&
528 			    ((ifp->if_flags ^ sc->sc_flags) &
529 			     IFF_PROMISC)) {
530 				sf_set_filter(sc);
531 			} else {
532 				if (!(ifp->if_flags & IFF_RUNNING))
533 					sf_init(ifp);
534 			}
535 		} else {
536 			if (ifp->if_flags & IFF_RUNNING)
537 				sf_stop(ifp, 1);
538 		}
539 		sc->sc_flags = ifp->if_flags;
540 		break;
541 
542 	case SIOCGIFMEDIA:
543 	case SIOCSIFMEDIA:
544 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
545 		break;
546 
547 	default:
548 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
549 	}
550 
551 	if (error == ENETRESET) {
552 		if (ifp->if_flags & IFF_RUNNING)
553 			sf_set_filter(sc);
554 		error = 0;
555 	}
556 
557 	/* Try to get more packets going. */
558 	sf_start(ifp);
559 
560 	splx(s);
561 	return (error);
562 }
563 
564 /*
565  * sf_intr:
566  *
567  *	Interrupt service routine.
568  */
569 int
570 sf_intr(void *arg)
571 {
572 	struct sf_softc *sc = arg;
573 	uint32_t isr;
574 	int handled = 0, wantinit = 0;
575 
576 	for (;;) {
577 		/* Reading clears all interrupts we're interested in. */
578 		isr = sf_funcreg_read(sc, SF_InterruptStatus);
579 		if ((isr & IS_PCIPadInt) == 0)
580 			break;
581 
582 		handled = 1;
583 
584 		/* Handle receive interrupts. */
585 		if (isr & IS_RxQ1DoneInt)
586 			sf_rxintr(sc);
587 
588 		/* Handle transmit completion interrupts. */
589 		if (isr & (IS_TxDmaDoneInt|IS_TxQueueDoneInt))
590 			sf_txintr(sc);
591 
592 		/* Handle abnormal interrupts. */
593 		if (isr & IS_AbnormalInterrupt) {
594 			/* Statistics. */
595 			if (isr & IS_StatisticWrapInt)
596 				sf_stats_update(sc);
597 
598 			/* DMA errors. */
599 			if (isr & IS_DmaErrInt) {
600 				wantinit = 1;
601 				printf("%s: WARNING: DMA error\n",
602 				    sc->sc_dev.dv_xname);
603 			}
604 
605 			/* Transmit FIFO underruns. */
606 			if (isr & IS_TxDataLowInt) {
607 				if (sc->sc_txthresh < 0xff)
608 					sc->sc_txthresh++;
609 #ifdef DEBUG
610 				printf("%s: transmit FIFO underrun, new "
611 				    "threshold: %d bytes\n",
612 				    sc->sc_dev.dv_xname,
613 				    sc->sc_txthresh * 16);
614 #endif
615 				sf_funcreg_write(sc, SF_TransmitFrameCSR,
616 				    sc->sc_TransmitFrameCSR |
617 				    TFCSR_TransmitThreshold(sc->sc_txthresh));
618 				sf_funcreg_write(sc, SF_TxDescQueueCtrl,
619 				    sc->sc_TxDescQueueCtrl |
620 				    TDQC_TxHighPriorityFifoThreshold(
621 							sc->sc_txthresh));
622 			}
623 		}
624 	}
625 
626 	if (handled) {
627 		/* Reset the interface, if necessary. */
628 		if (wantinit)
629 			sf_init(&sc->sc_arpcom.ac_if);
630 
631 		/* Try and get more packets going. */
632 		sf_start(&sc->sc_arpcom.ac_if);
633 	}
634 
635 	return (handled);
636 }
637 
638 /*
639  * sf_txintr:
640  *
641  *	Helper -- handle transmit completion interrupts.
642  */
643 void
644 sf_txintr(struct sf_softc *sc)
645 {
646 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
647 	struct sf_descsoft *ds;
648 	uint32_t cqci, tcd;
649 	int consumer, producer, txidx;
650 
651  try_again:
652 	cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
653 
654 	consumer = CQCI_TxCompletionConsumerIndex_get(cqci);
655 	producer = CQPI_TxCompletionProducerIndex_get(
656 	    sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
657 
658 	if (consumer == producer)
659 		return;
660 
661 	ifq_clr_oactive(&ifp->if_snd);
662 
663 	while (consumer != producer) {
664 		SF_CDTXCSYNC(sc, consumer, BUS_DMASYNC_POSTREAD);
665 		tcd = letoh32(sc->sc_txcomp[consumer].tcd_word0);
666 
667 		txidx = SF_TCD_INDEX_TO_HOST(TCD_INDEX(tcd));
668 #ifdef DIAGNOSTIC
669 		if ((tcd & TCD_PR) == 0)
670 			printf("%s: Tx queue mismatch, index %d\n",
671 			    sc->sc_dev.dv_xname, txidx);
672 #endif
673 		/*
674 		 * NOTE: stats are updated later.  We're just
675 		 * releasing packets that have been DMA'd to
676 		 * the chip.
677 		 */
678 		ds = &sc->sc_txsoft[txidx];
679 		SF_CDTXDSYNC(sc, txidx, BUS_DMASYNC_POSTWRITE);
680 		bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
681 		    0, ds->ds_dmamap->dm_mapsize,
682 		    BUS_DMASYNC_POSTWRITE);
683 		m_freem(ds->ds_mbuf);
684 		ds->ds_mbuf = NULL;
685 
686 		consumer = SF_NEXTTCD(consumer);
687 		sc->sc_txpending--;
688 	}
689 
690 	/* XXXJRT -- should be KDASSERT() */
691 	KASSERT(sc->sc_txpending >= 0);
692 
693 	/* If all packets are done, cancel the watchdog timer. */
694 	if (sc->sc_txpending == 0)
695 		ifp->if_timer = 0;
696 
697 	/* Update the consumer index. */
698 	sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
699 	    (cqci & ~CQCI_TxCompletionConsumerIndex(0x7ff)) |
700 	     CQCI_TxCompletionConsumerIndex(consumer));
701 
702 	/* Double check for new completions. */
703 	goto try_again;
704 }
705 
706 /*
707  * sf_rxintr:
708  *
709  *	Helper -- handle receive interrupts.
710  */
711 void
712 sf_rxintr(struct sf_softc *sc)
713 {
714 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
715 	struct sf_descsoft *ds;
716 	struct sf_rcd_full *rcd;
717 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
718 	struct mbuf *m;
719 	uint32_t cqci, word0;
720 	int consumer, producer, bufproducer, rxidx, len;
721 
722 	cqci = sf_funcreg_read(sc, SF_CompletionQueueConsumerIndex);
723 
724 	consumer = CQCI_RxCompletionQ1ConsumerIndex_get(cqci);
725 	producer = CQPI_RxCompletionQ1ProducerIndex_get(
726 	    sf_funcreg_read(sc, SF_CompletionQueueProducerIndex));
727 	bufproducer = RXQ1P_RxDescQ1Producer_get(
728 	    sf_funcreg_read(sc, SF_RxDescQueue1Ptrs));
729 
730 	if (consumer == producer)
731 		return;
732 
733 	while (consumer != producer) {
734 		rcd = &sc->sc_rxcomp[consumer];
735 		SF_CDRXCSYNC(sc, consumer,
736 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
737 		SF_CDRXCSYNC(sc, consumer,
738 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
739 
740 		word0 = letoh32(rcd->rcd_word0);
741 		rxidx = RCD_W0_EndIndex(word0);
742 
743 		ds = &sc->sc_rxsoft[rxidx];
744 
745 		consumer = SF_NEXTRCD(consumer);
746 		bufproducer = SF_NEXTRX(bufproducer);
747 
748 		if ((word0 & RCD_W0_OK) == 0) {
749 			SF_INIT_RXDESC(sc, rxidx);
750 			continue;
751 		}
752 
753 		bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
754 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
755 
756 		/*
757 		 * No errors; receive the packet.  Note that we have
758 		 * configured the Starfire to NOT transfer the CRC
759 		 * with the packet.
760 		 */
761 		len = RCD_W0_Length(word0);
762 
763 #ifndef __STRICT_ALIGNMENT
764 		/*
765 		 * Allocate a new mbuf cluster.  If that fails, we are
766 		 * out of memory, and must drop the packet and recycle
767 		 * the buffer that's already attached to this descriptor.
768 		 */
769 		m = ds->ds_mbuf;
770 		if (sf_add_rxbuf(sc, rxidx) != 0) {
771 			ifp->if_ierrors++;
772 			SF_INIT_RXDESC(sc, rxidx);
773 			bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
774 			    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
775 			continue;
776 		}
777 #else
778 		/*
779 		 * The Starfire's receive buffer must be 4-byte aligned.
780 		 * But this means that the data after the Ethernet header
781 		 * is misaligned.  We must allocate a new buffer and
782 		 * copy the data, shifted forward 2 bytes.
783 		 */
784 		MGETHDR(m, M_DONTWAIT, MT_DATA);
785 		if (m == NULL) {
786  dropit:
787 			ifp->if_ierrors++;
788 			SF_INIT_RXDESC(sc, rxidx);
789 			bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
790 			    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
791 			continue;
792 		}
793 		if (len > (MHLEN - 2)) {
794 			MCLGET(m, M_DONTWAIT);
795 			if ((m->m_flags & M_EXT) == 0) {
796 				m_freem(m);
797 				goto dropit;
798 			}
799 		}
800 		m->m_data += 2;
801 
802 		/*
803 		 * Note that we use cluster for incoming frames, so the
804 		 * buffer is virtually contiguous.
805 		 */
806 		memcpy(mtod(m, caddr_t), mtod(ds->ds_mbuf, caddr_t), len);
807 
808 		/* Allow the receive descriptor to continue using its mbuf. */
809 		SF_INIT_RXDESC(sc, rxidx);
810 		bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
811 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
812 #endif /* __STRICT_ALIGNMENT */
813 
814 		m->m_pkthdr.len = m->m_len = len;
815 
816 		ml_enqueue(&ml, m);
817 	}
818 
819 	if_input(ifp, &ml);
820 
821 	/* Update the chip's pointers. */
822 	sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
823 	    (cqci & ~CQCI_RxCompletionQ1ConsumerIndex(0x7ff)) |
824 	     CQCI_RxCompletionQ1ConsumerIndex(consumer));
825 	sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
826 	    RXQ1P_RxDescQ1Producer(bufproducer));
827 }
828 
829 /*
830  * sf_tick:
831  *
832  *	One second timer, used to tick the MII and update stats.
833  */
834 void
835 sf_tick(void *arg)
836 {
837 	struct sf_softc *sc = arg;
838 	int s;
839 
840 	s = splnet();
841 	mii_tick(&sc->sc_mii);
842 	sf_stats_update(sc);
843 	splx(s);
844 
845 	timeout_add_sec(&sc->sc_mii_timeout, 1);
846 }
847 
848 /*
849  * sf_stats_update:
850  *
851  *	Read the statitistics counters.
852  */
853 void
854 sf_stats_update(struct sf_softc *sc)
855 {
856 	struct sf_stats stats;
857 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
858 	uint32_t *p;
859 	u_int i;
860 
861 	p = &stats.TransmitOKFrames;
862 	for (i = 0; i < (sizeof(stats) / sizeof(uint32_t)); i++) {
863 		*p++ = sf_genreg_read(sc,
864 		    SF_STATS_BASE + (i * sizeof(uint32_t)));
865 		sf_genreg_write(sc, SF_STATS_BASE + (i * sizeof(uint32_t)), 0);
866 	}
867 
868 	ifp->if_opackets += stats.TransmitOKFrames;
869 
870 	ifp->if_collisions += stats.SingleCollisionFrames +
871 	    stats.MultipleCollisionFrames;
872 
873 	ifp->if_oerrors += stats.TransmitAbortDueToExcessiveCollisions +
874 	    stats.TransmitAbortDueToExcessingDeferral +
875 	    stats.FramesLostDueToInternalTransmitErrors;
876 
877 	ifp->if_ierrors += stats.ReceiveCRCErrors + stats.AlignmentErrors +
878 	    stats.ReceiveFramesTooLong + stats.ReceiveFramesTooShort +
879 	    stats.ReceiveFramesJabbersError +
880 	    stats.FramesLostDueToInternalReceiveErrors;
881 }
882 
883 /*
884  * sf_reset:
885  *
886  *	Perform a soft reset on the Starfire.
887  */
888 void
889 sf_reset(struct sf_softc *sc)
890 {
891 	int i;
892 
893 	sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
894 
895 	sf_macreset(sc);
896 
897 	sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_SoftReset);
898 	for (i = 0; i < 1000; i++) {
899 		delay(10);
900 		if ((sf_funcreg_read(sc, SF_PciDeviceConfig) &
901 		     PDC_SoftReset) == 0)
902 			break;
903 	}
904 
905 	if (i == 1000) {
906 		printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname);
907 		sf_funcreg_write(sc, SF_PciDeviceConfig, 0);
908 	}
909 
910 	delay(1000);
911 }
912 
913 /*
914  * sf_macreset:
915  *
916  *	Reset the MAC portion of the Starfire.
917  */
918 void
919 sf_macreset(struct sf_softc *sc)
920 {
921 
922 	sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1 | MC1_SoftRst);
923 	delay(1000);
924 	sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
925 }
926 
927 /*
928  * sf_init:		[ifnet interface function]
929  *
930  *	Initialize the interface.  Must be called at splnet().
931  */
932 int
933 sf_init(struct ifnet *ifp)
934 {
935 	struct sf_softc *sc = ifp->if_softc;
936 	struct sf_descsoft *ds;
937 	int error = 0;
938 	u_int i;
939 
940 	/*
941 	 * Cancel any pending I/O.
942 	 */
943 	sf_stop(ifp, 0);
944 
945 	/*
946 	 * Reset the Starfire to a known state.
947 	 */
948 	sf_reset(sc);
949 
950 	/* Clear the stat counters. */
951 	for (i = 0; i < sizeof(struct sf_stats); i += sizeof(uint32_t))
952 		sf_genreg_write(sc, SF_STATS_BASE + i, 0);
953 
954 	/*
955 	 * Initialize the transmit descriptor ring.
956 	 */
957 	memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
958 	sf_funcreg_write(sc, SF_TxDescQueueHighAddr, 0);
959 	sf_funcreg_write(sc, SF_HiPrTxDescQueueBaseAddr, SF_CDTXDADDR(sc, 0));
960 	sf_funcreg_write(sc, SF_LoPrTxDescQueueBaseAddr, 0);
961 
962 	/*
963 	 * Initialize the transmit completion ring.
964 	 */
965 	for (i = 0; i < SF_NTCD; i++) {
966 		sc->sc_txcomp[i].tcd_word0 = TCD_DMA_ID;
967 		SF_CDTXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
968 	}
969 	sf_funcreg_write(sc, SF_CompletionQueueHighAddr, 0);
970 	sf_funcreg_write(sc, SF_TxCompletionQueueCtrl, SF_CDTXCADDR(sc, 0));
971 
972 	/*
973 	 * Initialize the receive descriptor ring.
974 	 */
975 	for (i = 0; i < SF_NRXDESC; i++) {
976 		ds = &sc->sc_rxsoft[i];
977 		if (ds->ds_mbuf == NULL) {
978 			if ((error = sf_add_rxbuf(sc, i)) != 0) {
979 				printf("%s: unable to allocate or map rx "
980 				    "buffer %d, error = %d\n",
981 				    sc->sc_dev.dv_xname, i, error);
982 				/*
983 				 * XXX Should attempt to run with fewer receive
984 				 * XXX buffers instead of just failing.
985 				 */
986 				sf_rxdrain(sc);
987 				goto out;
988 			}
989 		} else
990 			SF_INIT_RXDESC(sc, i);
991 	}
992 	sf_funcreg_write(sc, SF_RxDescQueueHighAddress, 0);
993 	sf_funcreg_write(sc, SF_RxDescQueue1LowAddress, SF_CDRXDADDR(sc, 0));
994 	sf_funcreg_write(sc, SF_RxDescQueue2LowAddress, 0);
995 
996 	/*
997 	 * Initialize the receive completion ring.
998 	 */
999 	for (i = 0; i < SF_NRCD; i++) {
1000 		sc->sc_rxcomp[i].rcd_word0 = RCD_W0_ID;
1001 		sc->sc_rxcomp[i].rcd_word1 = 0;
1002 		sc->sc_rxcomp[i].rcd_word2 = 0;
1003 		sc->sc_rxcomp[i].rcd_timestamp = 0;
1004 		SF_CDRXCSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1005 	}
1006 	sf_funcreg_write(sc, SF_RxCompletionQueue1Ctrl, SF_CDRXCADDR(sc, 0) |
1007 	    RCQ1C_RxCompletionQ1Type(3));
1008 	sf_funcreg_write(sc, SF_RxCompletionQueue2Ctrl, 0);
1009 
1010 	/*
1011 	 * Initialize the Tx CSR.
1012 	 */
1013 	sc->sc_TransmitFrameCSR = 0;
1014 	sf_funcreg_write(sc, SF_TransmitFrameCSR,
1015 	    sc->sc_TransmitFrameCSR |
1016 	    TFCSR_TransmitThreshold(sc->sc_txthresh));
1017 
1018 	/*
1019 	 * Initialize the Tx descriptor control register.
1020 	 */
1021 	sc->sc_TxDescQueueCtrl = TDQC_SkipLength(0) |
1022 	    TDQC_TxDmaBurstSize(4) |	/* default */
1023 	    TDQC_MinFrameSpacing(3) |	/* 128 bytes */
1024 	    TDQC_TxDescType(0);
1025 	sf_funcreg_write(sc, SF_TxDescQueueCtrl,
1026 	    sc->sc_TxDescQueueCtrl |
1027 	    TDQC_TxHighPriorityFifoThreshold(sc->sc_txthresh));
1028 
1029 	/*
1030 	 * Initialize the Rx descriptor control registers.
1031 	 */
1032 	sf_funcreg_write(sc, SF_RxDescQueue1Ctrl,
1033 	    RDQ1C_RxQ1BufferLength(MCLBYTES) |
1034 	    RDQ1C_RxDescSpacing(0));
1035 	sf_funcreg_write(sc, SF_RxDescQueue2Ctrl, 0);
1036 
1037 	/*
1038 	 * Initialize the Tx descriptor producer indices.
1039 	 */
1040 	sf_funcreg_write(sc, SF_TxDescQueueProducerIndex,
1041 	    TDQPI_HiPrTxProducerIndex(0) |
1042 	    TDQPI_LoPrTxProducerIndex(0));
1043 
1044 	/*
1045 	 * Initialize the Rx descriptor producer indices.
1046 	 */
1047 	sf_funcreg_write(sc, SF_RxDescQueue1Ptrs,
1048 	    RXQ1P_RxDescQ1Producer(SF_NRXDESC - 1));
1049 	sf_funcreg_write(sc, SF_RxDescQueue2Ptrs,
1050 	    RXQ2P_RxDescQ2Producer(0));
1051 
1052 	/*
1053 	 * Initialize the Tx and Rx completion queue consumer indices.
1054 	 */
1055 	sf_funcreg_write(sc, SF_CompletionQueueConsumerIndex,
1056 	    CQCI_TxCompletionConsumerIndex(0) |
1057 	    CQCI_RxCompletionQ1ConsumerIndex(0));
1058 	sf_funcreg_write(sc, SF_RxHiPrCompletionPtrs, 0);
1059 
1060 	/*
1061 	 * Initialize the Rx DMA control register.
1062 	 */
1063 	sf_funcreg_write(sc, SF_RxDmaCtrl,
1064 	    RDC_RxHighPriorityThreshold(6) |	/* default */
1065 	    RDC_RxBurstSize(4));		/* default */
1066 
1067 	/*
1068 	 * Set the receive filter.
1069 	 */
1070 	sc->sc_RxAddressFilteringCtl = 0;
1071 	sf_set_filter(sc);
1072 
1073 	/*
1074 	 * Set MacConfig1.  When we set the media, MacConfig1 will
1075 	 * actually be written and the MAC part reset.
1076 	 */
1077 	sc->sc_MacConfig1 = MC1_PadEn;
1078 
1079 	/*
1080 	 * Set the media.
1081 	 */
1082 	mii_mediachg(&sc->sc_mii);
1083 
1084 	/*
1085 	 * Initialize the interrupt register.
1086 	 */
1087 	sc->sc_InterruptEn = IS_PCIPadInt | IS_RxQ1DoneInt |
1088 	    IS_TxQueueDoneInt | IS_TxDmaDoneInt | IS_DmaErrInt |
1089 	    IS_StatisticWrapInt;
1090 	sf_funcreg_write(sc, SF_InterruptEn, sc->sc_InterruptEn);
1091 
1092 	sf_funcreg_write(sc, SF_PciDeviceConfig, PDC_IntEnable |
1093 	    PDC_PCIMstDmaEn | (1 << PDC_FifoThreshold_SHIFT));
1094 
1095 	/*
1096 	 * Start the transmit and receive processes.
1097 	 */
1098 	sf_funcreg_write(sc, SF_GeneralEthernetCtrl,
1099 	    GEC_TxDmaEn|GEC_RxDmaEn|GEC_TransmitEn|GEC_ReceiveEn);
1100 
1101 	/* Start the on second clock. */
1102 	timeout_add_sec(&sc->sc_mii_timeout, 1);
1103 
1104 	/*
1105 	 * Note that the interface is now running.
1106 	 */
1107 	ifp->if_flags |= IFF_RUNNING;
1108 	ifq_clr_oactive(&ifp->if_snd);
1109 
1110  out:
1111 	if (error) {
1112 		ifp->if_flags &= ~IFF_RUNNING;
1113 		ifq_clr_oactive(&ifp->if_snd);
1114 		ifp->if_timer = 0;
1115 		printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1116 	}
1117 	return (error);
1118 }
1119 
1120 /*
1121  * sf_rxdrain:
1122  *
1123  *	Drain the receive queue.
1124  */
1125 void
1126 sf_rxdrain(struct sf_softc *sc)
1127 {
1128 	struct sf_descsoft *ds;
1129 	int i;
1130 
1131 	for (i = 0; i < SF_NRXDESC; i++) {
1132 		ds = &sc->sc_rxsoft[i];
1133 		if (ds->ds_mbuf != NULL) {
1134 			bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1135 			m_freem(ds->ds_mbuf);
1136 			ds->ds_mbuf = NULL;
1137 		}
1138 	}
1139 }
1140 
1141 /*
1142  * sf_stop:		[ifnet interface function]
1143  *
1144  *	Stop transmission on the interface.
1145  */
1146 void
1147 sf_stop(struct ifnet *ifp, int disable)
1148 {
1149 	struct sf_softc *sc = ifp->if_softc;
1150 	struct sf_descsoft *ds;
1151 	int i;
1152 
1153 	/* Stop the one second clock. */
1154 	timeout_del(&sc->sc_mii_timeout);
1155 
1156 	/* Down the MII. */
1157 	mii_down(&sc->sc_mii);
1158 
1159 	/* Disable interrupts. */
1160 	sf_funcreg_write(sc, SF_InterruptEn, 0);
1161 
1162 	/* Stop the transmit and receive processes. */
1163 	sf_funcreg_write(sc, SF_GeneralEthernetCtrl, 0);
1164 
1165 	/*
1166 	 * Release any queued transmit buffers.
1167 	 */
1168 	for (i = 0; i < SF_NTXDESC; i++) {
1169 		ds = &sc->sc_txsoft[i];
1170 		if (ds->ds_mbuf != NULL) {
1171 			bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1172 			m_freem(ds->ds_mbuf);
1173 			ds->ds_mbuf = NULL;
1174 		}
1175 	}
1176 	sc->sc_txpending = 0;
1177 
1178 	if (disable)
1179 		sf_rxdrain(sc);
1180 
1181 	/*
1182 	 * Mark the interface down and cancel the watchdog timer.
1183 	 */
1184 	ifp->if_flags &= ~IFF_RUNNING;
1185 	ifq_clr_oactive(&ifp->if_snd);
1186 	ifp->if_timer = 0;
1187 }
1188 
1189 /*
1190  * sf_read_eeprom:
1191  *
1192  *	Read from the Starfire EEPROM.
1193  */
1194 uint8_t
1195 sf_read_eeprom(struct sf_softc *sc, int offset)
1196 {
1197 	uint32_t reg;
1198 
1199 	reg = sf_genreg_read(sc, SF_EEPROM_BASE + (offset & ~3));
1200 
1201 	return ((reg >> (8 * (offset & 3))) & 0xff);
1202 }
1203 
1204 /*
1205  * sf_add_rxbuf:
1206  *
1207  *	Add a receive buffer to the indicated descriptor.
1208  */
1209 int
1210 sf_add_rxbuf(struct sf_softc *sc, int idx)
1211 {
1212 	struct sf_descsoft *ds = &sc->sc_rxsoft[idx];
1213 	struct mbuf *m;
1214 	int error;
1215 
1216 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1217 	if (m == NULL)
1218 		return (ENOBUFS);
1219 
1220 	MCLGET(m, M_DONTWAIT);
1221 	if ((m->m_flags & M_EXT) == 0) {
1222 		m_freem(m);
1223 		return (ENOBUFS);
1224 	}
1225 
1226 	if (ds->ds_mbuf != NULL)
1227 		bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1228 
1229 	ds->ds_mbuf = m;
1230 
1231 	error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1232 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1233 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
1234 	if (error) {
1235 		printf("%s: can't load rx DMA map %d, error = %d\n",
1236 		    sc->sc_dev.dv_xname, idx, error);
1237 		panic("sf_add_rxbuf"); /* XXX */
1238 	}
1239 
1240 	bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1241 	    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1242 
1243 	SF_INIT_RXDESC(sc, idx);
1244 
1245 	return (0);
1246 }
1247 
1248 void
1249 sf_set_filter_perfect(struct sf_softc *sc, int slot, uint8_t *enaddr)
1250 {
1251 	uint32_t reg0, reg1, reg2;
1252 
1253 	reg0 = enaddr[5] | (enaddr[4] << 8);
1254 	reg1 = enaddr[3] | (enaddr[2] << 8);
1255 	reg2 = enaddr[1] | (enaddr[0] << 8);
1256 
1257 	sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 0, reg0);
1258 	sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 4, reg1);
1259 	sf_genreg_write(sc, SF_PERFECT_BASE + (slot * 0x10) + 8, reg2);
1260 }
1261 
1262 void
1263 sf_set_filter_hash(struct sf_softc *sc, uint8_t *enaddr)
1264 {
1265 	uint32_t hash, slot, reg;
1266 
1267 	hash = ether_crc32_be(enaddr, ETHER_ADDR_LEN) >> 23;
1268 	slot = hash >> 4;
1269 
1270 	reg = sf_genreg_read(sc, SF_HASH_BASE + (slot * 0x10));
1271 	reg |= 1 << (hash & 0xf);
1272 	sf_genreg_write(sc, SF_HASH_BASE + (slot * 0x10), reg);
1273 }
1274 
1275 /*
1276  * sf_set_filter:
1277  *
1278  *	Set the Starfire receive filter.
1279  */
1280 void
1281 sf_set_filter(struct sf_softc *sc)
1282 {
1283 	struct arpcom *ac = &sc->sc_arpcom;
1284 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1285 	struct ether_multi *enm;
1286 	struct ether_multistep step;
1287 	int i;
1288 
1289 	/* Start by clearing the perfect and hash tables. */
1290 	for (i = 0; i < SF_PERFECT_SIZE; i += sizeof(uint32_t))
1291 		sf_genreg_write(sc, SF_PERFECT_BASE + i, 0);
1292 
1293 	for (i = 0; i < SF_HASH_SIZE; i += sizeof(uint32_t))
1294 		sf_genreg_write(sc, SF_HASH_BASE + i, 0);
1295 
1296 	/*
1297 	 * Clear the perfect and hash mode bits.
1298 	 */
1299 	sc->sc_RxAddressFilteringCtl &=
1300 	    ~(RAFC_PerfectFilteringMode(3) | RAFC_HashFilteringMode(3));
1301 
1302 	if (ifp->if_flags & IFF_BROADCAST)
1303 		sc->sc_RxAddressFilteringCtl |= RAFC_PassBroadcast;
1304 	else
1305 		sc->sc_RxAddressFilteringCtl &= ~RAFC_PassBroadcast;
1306 
1307 	if (ifp->if_flags & IFF_PROMISC) {
1308 		sc->sc_RxAddressFilteringCtl |= RAFC_PromiscuousMode;
1309 		goto allmulti;
1310 	} else
1311 		sc->sc_RxAddressFilteringCtl &= ~RAFC_PromiscuousMode;
1312 
1313 	/*
1314 	 * Set normal perfect filtering mode.
1315 	 */
1316 	sc->sc_RxAddressFilteringCtl |= RAFC_PerfectFilteringMode(1);
1317 
1318 	/*
1319 	 * First, write the station address to the perfect filter
1320 	 * table.
1321 	 */
1322 	sf_set_filter_perfect(sc, 0, LLADDR(ifp->if_sadl));
1323 
1324 	if (ac->ac_multirangecnt > 0)
1325 		goto allmulti;
1326 
1327 	/*
1328 	 * Now set the hash bits for each multicast address in our
1329 	 * list.
1330 	 */
1331 	ETHER_FIRST_MULTI(step, ac, enm);
1332 	if (enm == NULL)
1333 		goto done;
1334 	while (enm != NULL) {
1335 		sf_set_filter_hash(sc, enm->enm_addrlo);
1336 		ETHER_NEXT_MULTI(step, enm);
1337 	}
1338 
1339 	/*
1340 	 * Set "hash only multicast dest, match regardless of VLAN ID".
1341 	 */
1342 	sc->sc_RxAddressFilteringCtl |= RAFC_HashFilteringMode(2);
1343 	goto done;
1344 
1345  allmulti:
1346 	/*
1347 	 * XXX RAFC_PassMulticast is sub-optimal if using VLAN mode.
1348 	 */
1349 	sc->sc_RxAddressFilteringCtl |= RAFC_PassMulticast;
1350 	ifp->if_flags |= IFF_ALLMULTI;
1351 
1352  done:
1353 	sf_funcreg_write(sc, SF_RxAddressFilteringCtl,
1354 	    sc->sc_RxAddressFilteringCtl);
1355 }
1356 
1357 /*
1358  * sf_mii_read:		[mii interface function]
1359  *
1360  *	Read from the MII.
1361  */
1362 int
1363 sf_mii_read(struct device *self, int phy, int reg)
1364 {
1365 	struct sf_softc *sc = (void *) self;
1366 	uint32_t v;
1367 	int i;
1368 
1369 	for (i = 0; i < 1000; i++) {
1370 		v = sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg));
1371 		if (v & MiiDataValid)
1372 			break;
1373 		delay(1);
1374 	}
1375 
1376 	if ((v & MiiDataValid) == 0)
1377 		return (0);
1378 
1379 	if (MiiRegDataPort(v) == 0xffff)
1380 		return (0);
1381 
1382 	return (MiiRegDataPort(v));
1383 }
1384 
1385 /*
1386  * sf_mii_write:	[mii interface function]
1387  *
1388  *	Write to the MII.
1389  */
1390 void
1391 sf_mii_write(struct device *self, int phy, int reg, int val)
1392 {
1393 	struct sf_softc *sc = (void *) self;
1394 	int i;
1395 
1396 	sf_genreg_write(sc, SF_MII_PHY_REG(phy, reg), val);
1397 
1398 	for (i = 0; i < 1000; i++) {
1399 		if ((sf_genreg_read(sc, SF_MII_PHY_REG(phy, reg)) &
1400 		     MiiBusy) == 0)
1401 			return;
1402 		delay(1);
1403 	}
1404 
1405 	printf("%s: MII write timed out\n", sc->sc_dev.dv_xname);
1406 }
1407 
1408 /*
1409  * sf_mii_statchg:	[mii interface function]
1410  *
1411  *	Callback from the PHY when the media changes.
1412  */
1413 void
1414 sf_mii_statchg(struct device *self)
1415 {
1416 	struct sf_softc *sc = (void *) self;
1417 	uint32_t ipg;
1418 
1419 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
1420 		sc->sc_MacConfig1 |= MC1_FullDuplex;
1421 		ipg = 0x15;
1422 	} else {
1423 		sc->sc_MacConfig1 &= ~MC1_FullDuplex;
1424 		ipg = 0x11;
1425 	}
1426 
1427 	sf_genreg_write(sc, SF_MacConfig1, sc->sc_MacConfig1);
1428 	sf_macreset(sc);
1429 
1430 	sf_genreg_write(sc, SF_BkToBkIPG, ipg);
1431 }
1432 
1433 /*
1434  * sf_mediastatus:	[ifmedia interface function]
1435  *
1436  *	Callback from ifmedia to request current media status.
1437  */
1438 void
1439 sf_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1440 {
1441 	struct sf_softc *sc = ifp->if_softc;
1442 
1443 	mii_pollstat(&sc->sc_mii);
1444 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1445 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1446 }
1447 
1448 /*
1449  * sf_mediachange:	[ifmedia interface function]
1450  *
1451  *	Callback from ifmedia to request new media setting.
1452  */
1453 int
1454 sf_mediachange(struct ifnet *ifp)
1455 {
1456 	struct sf_softc *sc = ifp->if_softc;
1457 
1458 	if (ifp->if_flags & IFF_UP)
1459 		mii_mediachg(&sc->sc_mii);
1460 	return (0);
1461 }
1462