xref: /openbsd-src/sys/dev/pci/if_ste.c (revision a28daedfc357b214be5c701aa8ba8adb29a7f1c2)
1 /*	$OpenBSD: if_ste.c,v 1.43 2008/11/28 02:44:18 brad Exp $ */
2 /*
3  * Copyright (c) 1997, 1998, 1999
4  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/pci/if_ste.c,v 1.14 1999/12/07 20:14:42 wpaul Exp $
34  */
35 
36 #include "bpfilter.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/mbuf.h>
41 #include <sys/protosw.h>
42 #include <sys/socket.h>
43 #include <sys/ioctl.h>
44 #include <sys/errno.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/timeout.h>
48 
49 #include <net/if.h>
50 #include <net/if_dl.h>
51 #include <net/if_types.h>
52 
53 #ifdef INET
54 #include <netinet/in.h>
55 #include <netinet/in_systm.h>
56 #include <netinet/in_var.h>
57 #include <netinet/ip.h>
58 #include <netinet/if_ether.h>
59 #endif
60 
61 #include <net/if_media.h>
62 
63 #if NBPFILTER > 0
64 #include <net/bpf.h>
65 #endif
66 
67 #include <uvm/uvm_extern.h>              /* for vtophys */
68 
69 #include <sys/device.h>
70 
71 #include <dev/mii/mii.h>
72 #include <dev/mii/miivar.h>
73 
74 #include <dev/pci/pcireg.h>
75 #include <dev/pci/pcivar.h>
76 #include <dev/pci/pcidevs.h>
77 
78 #define STE_USEIOSPACE
79 
80 #include <dev/pci/if_stereg.h>
81 
82 int	ste_probe(struct device *, void *, void *);
83 void	ste_attach(struct device *, struct device *, void *);
84 int	ste_intr(void *);
85 void	ste_shutdown(void *);
86 void	ste_init(void *);
87 void	ste_rxeoc(struct ste_softc *);
88 void	ste_rxeof(struct ste_softc *);
89 void	ste_txeoc(struct ste_softc *);
90 void	ste_txeof(struct ste_softc *);
91 void	ste_stats_update(void *);
92 void	ste_stop(struct ste_softc *);
93 void	ste_reset(struct ste_softc *);
94 int	ste_ioctl(struct ifnet *, u_long, caddr_t);
95 int	ste_encap(struct ste_softc *, struct ste_chain *,
96 	    struct mbuf *);
97 void	ste_start(struct ifnet *);
98 void	ste_watchdog(struct ifnet *);
99 int	ste_newbuf(struct ste_softc *,
100 	    struct ste_chain_onefrag *,
101 	    struct mbuf *);
102 int	ste_ifmedia_upd(struct ifnet *);
103 void	ste_ifmedia_sts(struct ifnet *, struct ifmediareq *);
104 
105 void	ste_mii_sync(struct ste_softc *);
106 void	ste_mii_send(struct ste_softc *, u_int32_t, int);
107 int	ste_mii_readreg(struct ste_softc *,
108 	    struct ste_mii_frame *);
109 int	ste_mii_writereg(struct ste_softc *,
110 	    struct ste_mii_frame *);
111 int	ste_miibus_readreg(struct device *, int, int);
112 void	ste_miibus_writereg(struct device *, int, int, int);
113 void	ste_miibus_statchg(struct device *);
114 
115 int	ste_eeprom_wait(struct ste_softc *);
116 int	ste_read_eeprom(struct ste_softc *, caddr_t, int,
117 	    int, int);
118 void	ste_wait(struct ste_softc *);
119 void	ste_setmulti(struct ste_softc *);
120 int	ste_init_rx_list(struct ste_softc *);
121 void	ste_init_tx_list(struct ste_softc *);
122 
123 #define STE_SETBIT4(sc, reg, x)				\
124 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
125 
126 #define STE_CLRBIT4(sc, reg, x)				\
127 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
128 
129 #define STE_SETBIT2(sc, reg, x)				\
130 	CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | x)
131 
132 #define STE_CLRBIT2(sc, reg, x)				\
133 	CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~x)
134 
135 #define STE_SETBIT1(sc, reg, x)				\
136 	CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | x)
137 
138 #define STE_CLRBIT1(sc, reg, x)				\
139 	CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~x)
140 
141 
142 #define MII_SET(x)		STE_SETBIT1(sc, STE_PHYCTL, x)
143 #define MII_CLR(x)		STE_CLRBIT1(sc, STE_PHYCTL, x)
144 
145 struct cfattach ste_ca = {
146 	sizeof(struct ste_softc), ste_probe, ste_attach
147 };
148 
149 struct cfdriver ste_cd = {
150 	0, "ste", DV_IFNET
151 };
152 
153 /*
154  * Sync the PHYs by setting data bit and strobing the clock 32 times.
155  */
156 void
157 ste_mii_sync(struct ste_softc *sc)
158 {
159 	int		i;
160 
161 	MII_SET(STE_PHYCTL_MDIR|STE_PHYCTL_MDATA);
162 
163 	for (i = 0; i < 32; i++) {
164 		MII_SET(STE_PHYCTL_MCLK);
165 		DELAY(1);
166 		MII_CLR(STE_PHYCTL_MCLK);
167 		DELAY(1);
168 	}
169 
170 	return;
171 }
172 
173 /*
174  * Clock a series of bits through the MII.
175  */
176 void
177 ste_mii_send(struct ste_softc *sc, u_int32_t bits, int cnt)
178 {
179 	int		i;
180 
181 	MII_CLR(STE_PHYCTL_MCLK);
182 
183 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
184                 if (bits & i) {
185 			MII_SET(STE_PHYCTL_MDATA);
186                 } else {
187 			MII_CLR(STE_PHYCTL_MDATA);
188                 }
189 		DELAY(1);
190 		MII_CLR(STE_PHYCTL_MCLK);
191 		DELAY(1);
192 		MII_SET(STE_PHYCTL_MCLK);
193 	}
194 }
195 
196 /*
197  * Read an PHY register through the MII.
198  */
199 int
200 ste_mii_readreg(struct ste_softc *sc, struct ste_mii_frame *frame)
201 {
202 	int		ack, i, s;
203 
204 	s = splnet();
205 
206 	/*
207 	 * Set up frame for RX.
208 	 */
209 	frame->mii_stdelim = STE_MII_STARTDELIM;
210 	frame->mii_opcode = STE_MII_READOP;
211 	frame->mii_turnaround = 0;
212 	frame->mii_data = 0;
213 
214 	CSR_WRITE_2(sc, STE_PHYCTL, 0);
215 	/*
216  	 * Turn on data xmit.
217 	 */
218 	MII_SET(STE_PHYCTL_MDIR);
219 
220 	ste_mii_sync(sc);
221 
222 	/*
223 	 * Send command/address info.
224 	 */
225 	ste_mii_send(sc, frame->mii_stdelim, 2);
226 	ste_mii_send(sc, frame->mii_opcode, 2);
227 	ste_mii_send(sc, frame->mii_phyaddr, 5);
228 	ste_mii_send(sc, frame->mii_regaddr, 5);
229 
230 	/* Turn off xmit. */
231 	MII_CLR(STE_PHYCTL_MDIR);
232 
233 	/* Idle bit */
234 	MII_CLR((STE_PHYCTL_MCLK|STE_PHYCTL_MDATA));
235 	DELAY(1);
236 	MII_SET(STE_PHYCTL_MCLK);
237 	DELAY(1);
238 
239 	/* Check for ack */
240 	MII_CLR(STE_PHYCTL_MCLK);
241 	DELAY(1);
242 	ack = CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA;
243 	MII_SET(STE_PHYCTL_MCLK);
244 	DELAY(1);
245 
246 	/*
247 	 * Now try reading data bits. If the ack failed, we still
248 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
249 	 */
250 	if (ack) {
251 		for(i = 0; i < 16; i++) {
252 			MII_CLR(STE_PHYCTL_MCLK);
253 			DELAY(1);
254 			MII_SET(STE_PHYCTL_MCLK);
255 			DELAY(1);
256 		}
257 		goto fail;
258 	}
259 
260 	for (i = 0x8000; i; i >>= 1) {
261 		MII_CLR(STE_PHYCTL_MCLK);
262 		DELAY(1);
263 		if (!ack) {
264 			if (CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA)
265 				frame->mii_data |= i;
266 			DELAY(1);
267 		}
268 		MII_SET(STE_PHYCTL_MCLK);
269 		DELAY(1);
270 	}
271 
272 fail:
273 
274 	MII_CLR(STE_PHYCTL_MCLK);
275 	DELAY(1);
276 	MII_SET(STE_PHYCTL_MCLK);
277 	DELAY(1);
278 
279 	splx(s);
280 
281 	if (ack)
282 		return(1);
283 	return(0);
284 }
285 
286 /*
287  * Write to a PHY register through the MII.
288  */
289 int
290 ste_mii_writereg(struct ste_softc *sc, struct ste_mii_frame *frame)
291 {
292 	int		s;
293 
294 	s = splnet();
295 	/*
296 	 * Set up frame for TX.
297 	 */
298 
299 	frame->mii_stdelim = STE_MII_STARTDELIM;
300 	frame->mii_opcode = STE_MII_WRITEOP;
301 	frame->mii_turnaround = STE_MII_TURNAROUND;
302 
303 	/*
304  	 * Turn on data output.
305 	 */
306 	MII_SET(STE_PHYCTL_MDIR);
307 
308 	ste_mii_sync(sc);
309 
310 	ste_mii_send(sc, frame->mii_stdelim, 2);
311 	ste_mii_send(sc, frame->mii_opcode, 2);
312 	ste_mii_send(sc, frame->mii_phyaddr, 5);
313 	ste_mii_send(sc, frame->mii_regaddr, 5);
314 	ste_mii_send(sc, frame->mii_turnaround, 2);
315 	ste_mii_send(sc, frame->mii_data, 16);
316 
317 	/* Idle bit. */
318 	MII_SET(STE_PHYCTL_MCLK);
319 	DELAY(1);
320 	MII_CLR(STE_PHYCTL_MCLK);
321 	DELAY(1);
322 
323 	/*
324 	 * Turn off xmit.
325 	 */
326 	MII_CLR(STE_PHYCTL_MDIR);
327 
328 	splx(s);
329 
330 	return(0);
331 }
332 
333 int
334 ste_miibus_readreg(struct device *self, int phy, int reg)
335 {
336 	struct ste_softc	*sc = (struct ste_softc *)self;
337 	struct ste_mii_frame	frame;
338 
339 	if (sc->ste_one_phy && phy != 0)
340 		return (0);
341 
342 	bzero((char *)&frame, sizeof(frame));
343 
344 	frame.mii_phyaddr = phy;
345 	frame.mii_regaddr = reg;
346 	ste_mii_readreg(sc, &frame);
347 
348 	return(frame.mii_data);
349 }
350 
351 void
352 ste_miibus_writereg(struct device *self, int phy, int reg, int data)
353 {
354 	struct ste_softc	*sc = (struct ste_softc *)self;
355 	struct ste_mii_frame	frame;
356 
357 	bzero((char *)&frame, sizeof(frame));
358 
359 	frame.mii_phyaddr = phy;
360 	frame.mii_regaddr = reg;
361 	frame.mii_data = data;
362 
363 	ste_mii_writereg(sc, &frame);
364 
365 	return;
366 }
367 
368 void
369 ste_miibus_statchg(struct device *self)
370 {
371 	struct ste_softc	*sc = (struct ste_softc *)self;
372 	struct mii_data		*mii;
373 	int fdx, fcur;
374 
375 	mii = &sc->sc_mii;
376 
377 	fcur = CSR_READ_2(sc, STE_MACCTL0) & STE_MACCTL0_FULLDUPLEX;
378 	fdx = (mii->mii_media_active & IFM_GMASK) == IFM_FDX;
379 
380 	if ((fcur && fdx) || (! fcur && ! fdx))
381 		return;
382 
383 	STE_SETBIT4(sc, STE_DMACTL,
384 	    STE_DMACTL_RXDMA_STALL |STE_DMACTL_TXDMA_STALL);
385 	ste_wait(sc);
386 
387 	if (fdx)
388 		STE_SETBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX);
389 	else
390 		STE_CLRBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX);
391 
392 	STE_SETBIT4(sc, STE_DMACTL,
393 	    STE_DMACTL_RXDMA_UNSTALL | STE_DMACTL_TXDMA_UNSTALL);
394 
395 	return;
396 }
397 
398 int
399 ste_ifmedia_upd(struct ifnet *ifp)
400 {
401 	struct ste_softc	*sc;
402 	struct mii_data		*mii;
403 
404 	sc = ifp->if_softc;
405 	mii = &sc->sc_mii;
406 	sc->ste_link = 0;
407 	if (mii->mii_instance) {
408 		struct mii_softc	*miisc;
409 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
410 			mii_phy_reset(miisc);
411 	}
412 	mii_mediachg(mii);
413 
414 	return(0);
415 }
416 
417 void
418 ste_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
419 {
420 	struct ste_softc	*sc;
421 	struct mii_data		*mii;
422 
423 	sc = ifp->if_softc;
424 	mii = &sc->sc_mii;
425 
426 	mii_pollstat(mii);
427 	ifmr->ifm_active = mii->mii_media_active;
428 	ifmr->ifm_status = mii->mii_media_status;
429 
430 	return;
431 }
432 
433 void
434 ste_wait(struct ste_softc *sc)
435 {
436 	int		i;
437 
438 	for (i = 0; i < STE_TIMEOUT; i++) {
439 		if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG))
440 			break;
441 	}
442 
443 	if (i == STE_TIMEOUT)
444 		printf("%s: command never completed!\n", sc->sc_dev.dv_xname);
445 
446 	return;
447 }
448 
449 /*
450  * The EEPROM is slow: give it time to come ready after issuing
451  * it a command.
452  */
453 int
454 ste_eeprom_wait(struct ste_softc *sc)
455 {
456 	int		i;
457 
458 	DELAY(1000);
459 
460 	for (i = 0; i < 100; i++) {
461 		if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY)
462 			DELAY(1000);
463 		else
464 			break;
465 	}
466 
467 	if (i == 100) {
468 		printf("%s: eeprom failed to come ready\n",
469 		    sc->sc_dev.dv_xname);
470 		return(1);
471 	}
472 
473 	return(0);
474 }
475 
476 /*
477  * Read a sequence of words from the EEPROM. Note that ethernet address
478  * data is stored in the EEPROM in network byte order.
479  */
480 int
481 ste_read_eeprom(struct ste_softc *sc, caddr_t dest, int off, int cnt, int swap)
482 {
483 	int			err = 0, i;
484 	u_int16_t		word = 0, *ptr;
485 
486 	if (ste_eeprom_wait(sc))
487 		return(1);
488 
489 	for (i = 0; i < cnt; i++) {
490 		CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i));
491 		err = ste_eeprom_wait(sc);
492 		if (err)
493 			break;
494 		word = CSR_READ_2(sc, STE_EEPROM_DATA);
495 		ptr = (u_int16_t *)(dest + (i * 2));
496 		if (swap)
497 			*ptr = ntohs(word);
498 		else
499 			*ptr = word;
500 	}
501 
502 	return(err ? 1 : 0);
503 }
504 
505 void
506 ste_setmulti(struct ste_softc *sc)
507 {
508 	struct ifnet		*ifp;
509 	struct arpcom		*ac = &sc->arpcom;
510 	struct ether_multi	*enm;
511 	struct ether_multistep	step;
512 	int			h = 0;
513 	u_int32_t		hashes[2] = { 0, 0 };
514 
515 	ifp = &sc->arpcom.ac_if;
516 allmulti:
517 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
518 		STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI);
519 		STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH);
520 		return;
521 	}
522 
523 	/* first, zot all the existing hash bits */
524 	CSR_WRITE_2(sc, STE_MAR0, 0);
525 	CSR_WRITE_2(sc, STE_MAR1, 0);
526 	CSR_WRITE_2(sc, STE_MAR2, 0);
527 	CSR_WRITE_2(sc, STE_MAR3, 0);
528 
529 	/* now program new ones */
530 	ETHER_FIRST_MULTI(step, ac, enm);
531 	while (enm != NULL) {
532 		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
533 			ifp->if_flags |= IFF_ALLMULTI;
534 			goto allmulti;
535 		}
536 		h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) & 0x3F;
537 		if (h < 32)
538 			hashes[0] |= (1 << h);
539 		else
540 			hashes[1] |= (1 << (h - 32));
541 		ETHER_NEXT_MULTI(step, enm);
542 	}
543 
544 	CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF);
545 	CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF);
546 	CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF);
547 	CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF);
548 	STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI);
549 	STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH);
550 
551 	return;
552 }
553 
554 int
555 ste_intr(void *xsc)
556 {
557 	struct ste_softc	*sc;
558 	struct ifnet		*ifp;
559 	u_int16_t		status;
560 	int			claimed = 0;
561 
562 	sc = xsc;
563 	ifp = &sc->arpcom.ac_if;
564 
565 	/* See if this is really our interrupt. */
566 	if (!(CSR_READ_2(sc, STE_ISR) & STE_ISR_INTLATCH))
567 		return claimed;
568 
569 	for (;;) {
570 		status = CSR_READ_2(sc, STE_ISR_ACK);
571 
572 		if (!(status & STE_INTRS))
573 			break;
574 
575 		claimed = 1;
576 
577 		if (status & STE_ISR_RX_DMADONE) {
578 			ste_rxeoc(sc);
579 			ste_rxeof(sc);
580 		}
581 
582 		if (status & STE_ISR_TX_DMADONE)
583 			ste_txeof(sc);
584 
585 		if (status & STE_ISR_TX_DONE)
586 			ste_txeoc(sc);
587 
588 		if (status & STE_ISR_STATS_OFLOW) {
589 			timeout_del(&sc->sc_stats_tmo);
590 			ste_stats_update(sc);
591 		}
592 
593 		if (status & STE_ISR_LINKEVENT)
594 			mii_pollstat(&sc->sc_mii);
595 
596 		if (status & STE_ISR_HOSTERR) {
597 			ste_reset(sc);
598 			ste_init(sc);
599 		}
600 	}
601 
602 	/* Re-enable interrupts */
603 	CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
604 
605 	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
606 		ste_start(ifp);
607 
608 	return claimed;
609 }
610 
611 void
612 ste_rxeoc(struct ste_softc *sc)
613 {
614 	struct ste_chain_onefrag *cur_rx;
615 
616 	if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) {
617 		cur_rx = sc->ste_cdata.ste_rx_head;
618 		do {
619 			cur_rx = cur_rx->ste_next;
620 			/* If the ring is empty, just return. */
621 			if (cur_rx == sc->ste_cdata.ste_rx_head)
622 				return;
623 		} while (cur_rx->ste_ptr->ste_status == 0);
624 		if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) {
625 			/* We've fallen behind the chip: catch it. */
626 			sc->ste_cdata.ste_rx_head = cur_rx;
627 		}
628 	}
629 }
630 
631 /*
632  * A frame has been uploaded: pass the resulting mbuf chain up to
633  * the higher level protocols.
634  */
635 void
636 ste_rxeof(struct ste_softc *sc)
637 {
638         struct mbuf		*m;
639         struct ifnet		*ifp;
640 	struct ste_chain_onefrag	*cur_rx;
641 	int			total_len = 0, count=0;
642 	u_int32_t		rxstat;
643 
644 	ifp = &sc->arpcom.ac_if;
645 
646 	while((rxstat = sc->ste_cdata.ste_rx_head->ste_ptr->ste_status)
647 	      & STE_RXSTAT_DMADONE) {
648 		if ((STE_RX_LIST_CNT - count) < 3)
649 			break;
650 
651 		cur_rx = sc->ste_cdata.ste_rx_head;
652 		sc->ste_cdata.ste_rx_head = cur_rx->ste_next;
653 
654 		/*
655 		 * If an error occurs, update stats, clear the
656 		 * status word and leave the mbuf cluster in place:
657 		 * it should simply get re-used next time this descriptor
658 	 	 * comes up in the ring.
659 		 */
660 		if (rxstat & STE_RXSTAT_FRAME_ERR) {
661 			ifp->if_ierrors++;
662 			cur_rx->ste_ptr->ste_status = 0;
663 			continue;
664 		}
665 
666 		/*
667 		 * If there error bit was not set, the upload complete
668 		 * bit should be set which means we have a valid packet.
669 		 * If not, something truly strange has happened.
670 		 */
671 		if (!(rxstat & STE_RXSTAT_DMADONE)) {
672 			printf("%s: bad receive status -- packet dropped",
673 				sc->sc_dev.dv_xname);
674 			ifp->if_ierrors++;
675 			cur_rx->ste_ptr->ste_status = 0;
676 			continue;
677 		}
678 
679 		/* No errors; receive the packet. */
680 		m = cur_rx->ste_mbuf;
681 		total_len = cur_rx->ste_ptr->ste_status & STE_RXSTAT_FRAMELEN;
682 
683 		/*
684 		 * Try to conjure up a new mbuf cluster. If that
685 		 * fails, it means we have an out of memory condition and
686 		 * should leave the buffer in place and continue. This will
687 		 * result in a lost packet, but there's little else we
688 		 * can do in this situation.
689 		 */
690 		if (ste_newbuf(sc, cur_rx, NULL) == ENOBUFS) {
691 			ifp->if_ierrors++;
692 			cur_rx->ste_ptr->ste_status = 0;
693 			continue;
694 		}
695 
696 		m->m_pkthdr.rcvif = ifp;
697 		m->m_pkthdr.len = m->m_len = total_len;
698 
699 		ifp->if_ipackets++;
700 
701 #if NBPFILTER > 0
702 		if (ifp->if_bpf)
703 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
704 #endif
705 
706 		/* pass it on. */
707 		ether_input_mbuf(ifp, m);
708 
709 		cur_rx->ste_ptr->ste_status = 0;
710 		count++;
711 	}
712 
713 	return;
714 }
715 
716 void
717 ste_txeoc(struct ste_softc *sc)
718 {
719 	u_int8_t		txstat;
720 	struct ifnet		*ifp;
721 
722 	ifp = &sc->arpcom.ac_if;
723 
724 	while ((txstat = CSR_READ_1(sc, STE_TX_STATUS)) &
725 	    STE_TXSTATUS_TXDONE) {
726 		if (txstat & STE_TXSTATUS_UNDERRUN ||
727 		    txstat & STE_TXSTATUS_EXCESSCOLLS ||
728 		    txstat & STE_TXSTATUS_RECLAIMERR) {
729 			ifp->if_oerrors++;
730 			printf("%s: transmission error: %x\n",
731 			    sc->sc_dev.dv_xname, txstat);
732 
733 			ste_reset(sc);
734 			ste_init(sc);
735 
736 			if (txstat & STE_TXSTATUS_UNDERRUN &&
737 			    sc->ste_tx_thresh < ETHER_MAX_DIX_LEN) {
738 				sc->ste_tx_thresh += STE_MIN_FRAMELEN;
739 				printf("%s: tx underrun, increasing tx"
740 				    " start threshold to %d bytes\n",
741 				    sc->sc_dev.dv_xname, sc->ste_tx_thresh);
742 			}
743 			CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh);
744 			CSR_WRITE_2(sc, STE_TX_RECLAIM_THRESH,
745 			    (ETHER_MAX_DIX_LEN >> 4));
746 		}
747 		ste_init(sc);
748 		CSR_WRITE_2(sc, STE_TX_STATUS, txstat);
749 	}
750 
751 	return;
752 }
753 
754 void
755 ste_txeof(struct ste_softc *sc)
756 {
757 	struct ste_chain	*cur_tx = NULL;
758 	struct ifnet		*ifp;
759 	int			idx;
760 
761 	ifp = &sc->arpcom.ac_if;
762 
763 	idx = sc->ste_cdata.ste_tx_cons;
764 	while(idx != sc->ste_cdata.ste_tx_prod) {
765 		cur_tx = &sc->ste_cdata.ste_tx_chain[idx];
766 
767 		if (!(cur_tx->ste_ptr->ste_ctl & STE_TXCTL_DMADONE))
768 			break;
769 
770 		m_freem(cur_tx->ste_mbuf);
771 		cur_tx->ste_mbuf = NULL;
772 		ifp->if_flags &= ~IFF_OACTIVE;
773 		ifp->if_opackets++;
774 
775 		STE_INC(idx, STE_TX_LIST_CNT);
776 	}
777 
778 	sc->ste_cdata.ste_tx_cons = idx;
779 	if (idx == sc->ste_cdata.ste_tx_prod)
780 		ifp->if_timer = 0;
781 
782 	return;
783 }
784 
785 void
786 ste_stats_update(void *xsc)
787 {
788 	struct ste_softc	*sc;
789 	struct ifnet		*ifp;
790 	struct mii_data		*mii;
791 	int			s;
792 
793 	s = splnet();
794 
795 	sc = xsc;
796 	ifp = &sc->arpcom.ac_if;
797 	mii = &sc->sc_mii;
798 
799 	ifp->if_collisions += CSR_READ_1(sc, STE_LATE_COLLS)
800 	    + CSR_READ_1(sc, STE_MULTI_COLLS)
801 	    + CSR_READ_1(sc, STE_SINGLE_COLLS);
802 
803 	if (!sc->ste_link) {
804 		mii_pollstat(mii);
805 		if (mii->mii_media_status & IFM_ACTIVE &&
806 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
807 			sc->ste_link++;
808 			/*
809 			 * we don't get a call-back on re-init so do it
810 			 * otherwise we get stuck in the wrong link state
811 			 */
812 			ste_miibus_statchg((struct device *)sc);
813 			if (!IFQ_IS_EMPTY(&ifp->if_snd))
814 				ste_start(ifp);
815 		}
816 	}
817 
818 	timeout_add_sec(&sc->sc_stats_tmo, 1);
819 	splx(s);
820 
821 	return;
822 }
823 
824 const struct pci_matchid ste_devices[] = {
825 	{ PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST201_1 },
826 	{ PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST201_2 },
827 	{ PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_550TX }
828 };
829 
830 /*
831  * Probe for a Sundance ST201 chip. Check the PCI vendor and device
832  * IDs against our list and return a device name if we find a match.
833  */
834 int
835 ste_probe(struct device *parent, void *match, void *aux)
836 {
837 	return (pci_matchbyid((struct pci_attach_args *)aux, ste_devices,
838 	    sizeof(ste_devices)/sizeof(ste_devices[0])));
839 }
840 
841 /*
842  * Attach the interface. Allocate softc structures, do ifmedia
843  * setup and ethernet/BPF attach.
844  */
845 void
846 ste_attach(struct device *parent, struct device *self, void *aux)
847 {
848 	const char		*intrstr = NULL;
849 	pcireg_t		command;
850 	struct ste_softc	*sc = (struct ste_softc *)self;
851 	struct pci_attach_args	*pa = aux;
852 	pci_chipset_tag_t	pc = pa->pa_pc;
853 	pci_intr_handle_t	ih;
854 	struct ifnet		*ifp;
855 	bus_size_t		size;
856 
857 	/*
858 	 * Handle power management nonsense.
859 	 */
860 	command = pci_conf_read(pc, pa->pa_tag, STE_PCI_CAPID) & 0x000000FF;
861 	if (command == 0x01) {
862 
863 		command = pci_conf_read(pc, pa->pa_tag, STE_PCI_PWRMGMTCTRL);
864 		if (command & STE_PSTATE_MASK) {
865 			u_int32_t		iobase, membase, irq;
866 
867 			/* Save important PCI config data. */
868 			iobase = pci_conf_read(pc, pa->pa_tag, STE_PCI_LOIO);
869 			membase = pci_conf_read(pc, pa->pa_tag, STE_PCI_LOMEM);
870 			irq = pci_conf_read(pc, pa->pa_tag, STE_PCI_INTLINE);
871 
872 			/* Reset the power state. */
873 			printf("%s: chip is in D%d power mode -- setting to D0\n",
874 				sc->sc_dev.dv_xname, command & STE_PSTATE_MASK);
875 			command &= 0xFFFFFFFC;
876 			pci_conf_write(pc, pa->pa_tag, STE_PCI_PWRMGMTCTRL, command);
877 
878 			/* Restore PCI config data. */
879 			pci_conf_write(pc, pa->pa_tag, STE_PCI_LOIO, iobase);
880 			pci_conf_write(pc, pa->pa_tag, STE_PCI_LOMEM, membase);
881 			pci_conf_write(pc, pa->pa_tag, STE_PCI_INTLINE, irq);
882 		}
883 	}
884 
885 	/*
886 	 * Only use one PHY since this chip reports multiple
887 	 * Note on the DFE-550 the PHY is at 1 on the DFE-580
888 	 * it is at 0 & 1.  It is rev 0x12.
889 	 */
890 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_DLINK &&
891 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_DLINK_550TX &&
892 	    PCI_REVISION(pa->pa_class) == 0x12)
893 		sc->ste_one_phy = 1;
894 
895 	/*
896 	 * Map control/status registers.
897 	 */
898 
899 #ifdef STE_USEIOSPACE
900 	if (pci_mapreg_map(pa, STE_PCI_LOIO,
901 	    PCI_MAPREG_TYPE_IO, 0,
902 	    &sc->ste_btag, &sc->ste_bhandle, NULL, &size, 0)) {
903 		printf(": can't map i/o space\n");
904 		return;
905 	}
906  #else
907 	if (pci_mapreg_map(pa, STE_PCI_LOMEM,
908 	    PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
909 	    &sc->ste_btag, &sc->ste_bhandle, NULL, &size, 0)) {
910 		printf(": can't map mem space\n");
911 		return;
912 	}
913 #endif
914 
915 	/* Allocate interrupt */
916 	if (pci_intr_map(pa, &ih)) {
917 		printf(": couldn't map interrupt\n");
918 		goto fail_1;
919 	}
920 	intrstr = pci_intr_string(pc, ih);
921 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, ste_intr, sc,
922 	    self->dv_xname);
923 	if (sc->sc_ih == NULL) {
924 		printf(": couldn't establish interrupt");
925 		if (intrstr != NULL)
926 			printf(" at %s", intrstr);
927 		printf("\n");
928 		goto fail_1;
929 	}
930 	printf(": %s", intrstr);
931 
932 	/* Reset the adapter. */
933 	ste_reset(sc);
934 
935 	/*
936 	 * Get station address from the EEPROM.
937 	 */
938 	if (ste_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
939 	    STE_EEADDR_NODE0, 3, 0)) {
940 		printf(": failed to read station address\n");
941 		goto fail_2;
942 	}
943 
944 	printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
945 
946 	sc->ste_ldata_ptr = malloc(sizeof(struct ste_list_data) + 8,
947 	    M_DEVBUF, M_DONTWAIT);
948 	if (sc->ste_ldata_ptr == NULL) {
949 		printf(": no memory for list buffers!\n");
950 		goto fail_2;
951 	}
952 
953 	sc->ste_ldata = (struct ste_list_data *)sc->ste_ldata_ptr;
954 	bzero(sc->ste_ldata, sizeof(struct ste_list_data));
955 
956 	ifp = &sc->arpcom.ac_if;
957 	ifp->if_softc = sc;
958 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
959 	ifp->if_ioctl = ste_ioctl;
960 	ifp->if_start = ste_start;
961 	ifp->if_watchdog = ste_watchdog;
962 	ifp->if_baudrate = 10000000;
963 	IFQ_SET_MAXLEN(&ifp->if_snd, STE_TX_LIST_CNT - 1);
964 	IFQ_SET_READY(&ifp->if_snd);
965 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
966 	ifp->if_capabilities = IFCAP_VLAN_MTU;
967 
968 	sc->ste_tx_thresh = STE_TXSTART_THRESH;
969 
970 	sc->sc_mii.mii_ifp = ifp;
971 	sc->sc_mii.mii_readreg = ste_miibus_readreg;
972 	sc->sc_mii.mii_writereg = ste_miibus_writereg;
973 	sc->sc_mii.mii_statchg = ste_miibus_statchg;
974 	ifmedia_init(&sc->sc_mii.mii_media, 0, ste_ifmedia_upd,ste_ifmedia_sts);
975 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
976 	    0);
977 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
978 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
979 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
980 	} else
981 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
982 
983 	/*
984 	 * Call MI attach routines.
985 	 */
986 	if_attach(ifp);
987 	ether_ifattach(ifp);
988 
989 	shutdownhook_establish(ste_shutdown, sc);
990 	return;
991 
992 fail_2:
993 	pci_intr_disestablish(pc, sc->sc_ih);
994 
995 fail_1:
996 	bus_space_unmap(sc->ste_btag, sc->ste_bhandle, size);
997 }
998 
999 int
1000 ste_newbuf(struct ste_softc *sc, struct ste_chain_onefrag *c, struct mbuf *m)
1001 {
1002 	struct mbuf		*m_new = NULL;
1003 
1004 	if (m == NULL) {
1005 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1006 		if (m_new == NULL)
1007 			return(ENOBUFS);
1008 		MCLGET(m_new, M_DONTWAIT);
1009 		if (!(m_new->m_flags & M_EXT)) {
1010 			m_freem(m_new);
1011 			return(ENOBUFS);
1012 		}
1013 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1014 	} else {
1015 		m_new = m;
1016 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1017 		m_new->m_data = m_new->m_ext.ext_buf;
1018 	}
1019 
1020 	m_adj(m_new, ETHER_ALIGN);
1021 
1022 	c->ste_mbuf = m_new;
1023 	c->ste_ptr->ste_status = 0;
1024 	c->ste_ptr->ste_frag.ste_addr = vtophys(mtod(m_new, vaddr_t));
1025 	c->ste_ptr->ste_frag.ste_len = (ETHER_MAX_DIX_LEN + ETHER_VLAN_ENCAP_LEN) | STE_FRAG_LAST;
1026 
1027 	return(0);
1028 }
1029 
1030 int
1031 ste_init_rx_list(struct ste_softc *sc)
1032 {
1033 	struct ste_chain_data	*cd;
1034 	struct ste_list_data	*ld;
1035 	int			i;
1036 
1037 	cd = &sc->ste_cdata;
1038 	ld = sc->ste_ldata;
1039 
1040 	for (i = 0; i < STE_RX_LIST_CNT; i++) {
1041 		cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i];
1042 		if (ste_newbuf(sc, &cd->ste_rx_chain[i], NULL) == ENOBUFS)
1043 			return(ENOBUFS);
1044 		if (i == (STE_RX_LIST_CNT - 1)) {
1045 			cd->ste_rx_chain[i].ste_next =
1046 			    &cd->ste_rx_chain[0];
1047 			ld->ste_rx_list[i].ste_next =
1048 			    vtophys((vaddr_t)&ld->ste_rx_list[0]);
1049 		} else {
1050 			cd->ste_rx_chain[i].ste_next =
1051 			    &cd->ste_rx_chain[i + 1];
1052 			ld->ste_rx_list[i].ste_next =
1053 			    vtophys((vaddr_t)&ld->ste_rx_list[i + 1]);
1054 		}
1055 		ld->ste_rx_list[i].ste_status = 0;
1056 	}
1057 
1058 	cd->ste_rx_head = &cd->ste_rx_chain[0];
1059 
1060 	return(0);
1061 }
1062 
1063 void
1064 ste_init_tx_list(struct ste_softc *sc)
1065 {
1066 	struct ste_chain_data	*cd;
1067 	struct ste_list_data	*ld;
1068 	int			i;
1069 
1070 	cd = &sc->ste_cdata;
1071 	ld = sc->ste_ldata;
1072 	for (i = 0; i < STE_TX_LIST_CNT; i++) {
1073 		cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i];
1074 		cd->ste_tx_chain[i].ste_phys = vtophys((vaddr_t)&ld->ste_tx_list[i]);
1075 		if (i == (STE_TX_LIST_CNT - 1))
1076 			cd->ste_tx_chain[i].ste_next =
1077 			    &cd->ste_tx_chain[0];
1078 		else
1079 			cd->ste_tx_chain[i].ste_next =
1080 			    &cd->ste_tx_chain[i + 1];
1081 	}
1082 
1083 	bzero((char *)ld->ste_tx_list,
1084 	    sizeof(struct ste_desc) * STE_TX_LIST_CNT);
1085 
1086 	cd->ste_tx_prod = 0;
1087 	cd->ste_tx_cons = 0;
1088 
1089 	return;
1090 }
1091 
1092 void
1093 ste_init(void *xsc)
1094 {
1095 	struct ste_softc	*sc = (struct ste_softc *)xsc;
1096 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1097 	struct mii_data		*mii;
1098 	int			i, s;
1099 
1100 	s = splnet();
1101 
1102 	ste_stop(sc);
1103 
1104 	mii = &sc->sc_mii;
1105 
1106 	/* Init our MAC address */
1107 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1108 		CSR_WRITE_1(sc, STE_PAR0 + i, sc->arpcom.ac_enaddr[i]);
1109 	}
1110 
1111 	/* Init RX list */
1112 	if (ste_init_rx_list(sc) == ENOBUFS) {
1113 		printf("%s: initialization failed: no "
1114 		    "memory for RX buffers\n", sc->sc_dev.dv_xname);
1115 		ste_stop(sc);
1116 		splx(s);
1117 		return;
1118 	}
1119 
1120 	/* Set RX polling interval */
1121 	CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 64);
1122 
1123 	/* Init TX descriptors */
1124 	ste_init_tx_list(sc);
1125 
1126 	/* Set the TX freethresh value */
1127 	CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, ETHER_MAX_DIX_LEN >> 8);
1128 
1129 	/* Set the TX start threshold for best performance. */
1130 	CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh);
1131 
1132 	/* Set the TX reclaim threshold. */
1133 	CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (ETHER_MAX_DIX_LEN >> 4));
1134 
1135 	/* Set up the RX filter. */
1136 	CSR_WRITE_1(sc, STE_RX_MODE, STE_RXMODE_UNICAST);
1137 
1138 	/* If we want promiscuous mode, set the allframes bit. */
1139 	if (ifp->if_flags & IFF_PROMISC) {
1140 		STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC);
1141 	} else {
1142 		STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC);
1143 	}
1144 
1145 	/* Set capture broadcast bit to accept broadcast frames. */
1146 	if (ifp->if_flags & IFF_BROADCAST) {
1147 		STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST);
1148 	} else {
1149 		STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST);
1150 	}
1151 
1152 	ste_setmulti(sc);
1153 
1154 	/* Load the address of the RX list. */
1155 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL);
1156 	ste_wait(sc);
1157 	CSR_WRITE_4(sc, STE_RX_DMALIST_PTR,
1158 	    vtophys((vaddr_t)&sc->ste_ldata->ste_rx_list[0]));
1159 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
1160 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
1161 
1162 	/* Set TX polling interval (defer until we TX first packet) */
1163 	CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0);
1164 
1165 	/* Load address of the TX list */
1166 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1167 	ste_wait(sc);
1168 	CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0);
1169 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1170 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1171 	ste_wait(sc);
1172 	sc->ste_tx_prev=NULL;
1173 
1174 	/* Enable receiver and transmitter */
1175 	CSR_WRITE_2(sc, STE_MACCTL0, 0);
1176 	CSR_WRITE_2(sc, STE_MACCTL1, 0);
1177 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE);
1178 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE);
1179 
1180 	/* Enable stats counters. */
1181 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE);
1182 
1183 	/* Enable interrupts. */
1184 	CSR_WRITE_2(sc, STE_ISR, 0xFFFF);
1185 	CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
1186 
1187 	/* Accept VLAN length packets */
1188 	CSR_WRITE_2(sc, STE_MAX_FRAMELEN,
1189 	    ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
1190 
1191 	ste_ifmedia_upd(ifp);
1192 
1193 	ifp->if_flags |= IFF_RUNNING;
1194 	ifp->if_flags &= ~IFF_OACTIVE;
1195 
1196 	splx(s);
1197 
1198 	timeout_set(&sc->sc_stats_tmo, ste_stats_update, sc);
1199 	timeout_add_sec(&sc->sc_stats_tmo, 1);
1200 
1201 	return;
1202 }
1203 
1204 void
1205 ste_stop(struct ste_softc *sc)
1206 {
1207 	int			i;
1208 	struct ifnet		*ifp;
1209 
1210 	ifp = &sc->arpcom.ac_if;
1211 
1212 	timeout_del(&sc->sc_stats_tmo);
1213 
1214 	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
1215 
1216 	CSR_WRITE_2(sc, STE_IMR, 0);
1217 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_DISABLE);
1218 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_DISABLE);
1219 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_DISABLE);
1220 	STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1221 	STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL);
1222 	ste_wait(sc);
1223 	/*
1224 	 * Try really hard to stop the RX engine or under heavy RX
1225 	 * data chip will write into de-allocated memory.
1226 	 */
1227 	ste_reset(sc);
1228 
1229 	sc->ste_link = 0;
1230 
1231 	for (i = 0; i < STE_RX_LIST_CNT; i++) {
1232 		if (sc->ste_cdata.ste_rx_chain[i].ste_mbuf != NULL) {
1233 			m_freem(sc->ste_cdata.ste_rx_chain[i].ste_mbuf);
1234 			sc->ste_cdata.ste_rx_chain[i].ste_mbuf = NULL;
1235 		}
1236 	}
1237 
1238 	for (i = 0; i < STE_TX_LIST_CNT; i++) {
1239 		if (sc->ste_cdata.ste_tx_chain[i].ste_mbuf != NULL) {
1240 			m_freem(sc->ste_cdata.ste_tx_chain[i].ste_mbuf);
1241 			sc->ste_cdata.ste_tx_chain[i].ste_mbuf = NULL;
1242 		}
1243 	}
1244 
1245 	bzero(sc->ste_ldata, sizeof(struct ste_list_data));
1246 
1247 	return;
1248 }
1249 
1250 void
1251 ste_reset(struct ste_softc *sc)
1252 {
1253 	int		i;
1254 
1255 	STE_SETBIT4(sc, STE_ASICCTL,
1256 	    STE_ASICCTL_GLOBAL_RESET|STE_ASICCTL_RX_RESET|
1257 	    STE_ASICCTL_TX_RESET|STE_ASICCTL_DMA_RESET|
1258 	    STE_ASICCTL_FIFO_RESET|STE_ASICCTL_NETWORK_RESET|
1259 	    STE_ASICCTL_AUTOINIT_RESET|STE_ASICCTL_HOST_RESET|
1260 	    STE_ASICCTL_EXTRESET_RESET);
1261 
1262 	DELAY(100000);
1263 
1264 	for (i = 0; i < STE_TIMEOUT; i++) {
1265 		if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY))
1266 			break;
1267 	}
1268 
1269 	if (i == STE_TIMEOUT)
1270 		printf("%s: global reset never completed\n",
1271 		    sc->sc_dev.dv_xname);
1272 }
1273 
1274 int
1275 ste_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1276 {
1277 	struct ste_softc	*sc = ifp->if_softc;
1278 	struct ifaddr		*ifa = (struct ifaddr *) data;
1279 	struct ifreq		*ifr = (struct ifreq *) data;
1280 	struct mii_data		*mii;
1281 	int			s, error = 0;
1282 
1283 	s = splnet();
1284 
1285 	switch(command) {
1286 	case SIOCSIFADDR:
1287 		ifp->if_flags |= IFF_UP;
1288 		switch (ifa->ifa_addr->sa_family) {
1289 		case AF_INET:
1290 			ste_init(sc);
1291 			arp_ifinit(&sc->arpcom, ifa);
1292 			break;
1293 		default:
1294 			ste_init(sc);
1295 			break;
1296 		}
1297 		break;
1298 
1299 	case SIOCSIFFLAGS:
1300 		if (ifp->if_flags & IFF_UP) {
1301 			if (ifp->if_flags & IFF_RUNNING &&
1302 			    ifp->if_flags & IFF_PROMISC &&
1303 			    !(sc->ste_if_flags & IFF_PROMISC)) {
1304 				STE_SETBIT1(sc, STE_RX_MODE,
1305 				    STE_RXMODE_PROMISC);
1306 			} else if (ifp->if_flags & IFF_RUNNING &&
1307 			    !(ifp->if_flags & IFF_PROMISC) &&
1308 			    sc->ste_if_flags & IFF_PROMISC) {
1309 				STE_CLRBIT1(sc, STE_RX_MODE,
1310 				    STE_RXMODE_PROMISC);
1311 			}
1312 			if (ifp->if_flags & IFF_RUNNING &&
1313 			    (ifp->if_flags ^ sc->ste_if_flags) & IFF_ALLMULTI)
1314 				ste_setmulti(sc);
1315 			if (!(ifp->if_flags & IFF_RUNNING)) {
1316 				sc->ste_tx_thresh = STE_TXSTART_THRESH;
1317 				ste_init(sc);
1318 			}
1319 		} else {
1320 			if (ifp->if_flags & IFF_RUNNING)
1321 				ste_stop(sc);
1322 		}
1323 		sc->ste_if_flags = ifp->if_flags;
1324 		error = 0;
1325 		break;
1326 
1327 	case SIOCGIFMEDIA:
1328 	case SIOCSIFMEDIA:
1329 		mii = &sc->sc_mii;
1330 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1331 		break;
1332 
1333 	default:
1334 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
1335 	}
1336 
1337 	if (error == ENETRESET) {
1338 		if (ifp->if_flags & IFF_RUNNING)
1339 			ste_setmulti(sc);
1340 		error = 0;
1341 	}
1342 
1343 	splx(s);
1344 	return(error);
1345 }
1346 
1347 int
1348 ste_encap(struct ste_softc *sc, struct ste_chain *c, struct mbuf *m_head)
1349 {
1350 	int			frag = 0;
1351 	struct ste_frag		*f = NULL;
1352 	struct mbuf		*m;
1353 	struct ste_desc		*d;
1354 
1355 	d = c->ste_ptr;
1356 	d->ste_ctl = 0;
1357 
1358 encap_retry:
1359 	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1360 		if (m->m_len != 0) {
1361 			if (frag == STE_MAXFRAGS)
1362 				break;
1363 			f = &d->ste_frags[frag];
1364 			f->ste_addr = vtophys(mtod(m, vaddr_t));
1365 			f->ste_len = m->m_len;
1366 			frag++;
1367 		}
1368 	}
1369 
1370 	if (m != NULL) {
1371 		struct mbuf *mn;
1372 
1373 		/*
1374 		 * We ran out of segments. We have to recopy this
1375 		 * mbuf chain first. Bail out if we can't get the
1376 		 * new buffers.
1377 		 */
1378 		MGETHDR(mn, M_DONTWAIT, MT_DATA);
1379 		if (mn == NULL) {
1380 			m_freem(m_head);
1381 			return ENOMEM;
1382 		}
1383 		if (m_head->m_pkthdr.len > MHLEN) {
1384 			MCLGET(mn, M_DONTWAIT);
1385 			if ((mn->m_flags & M_EXT) == 0) {
1386 				m_freem(mn);
1387 				m_freem(m_head);
1388 				return ENOMEM;
1389 			}
1390 		}
1391 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1392 			   mtod(mn, caddr_t));
1393 		mn->m_pkthdr.len = mn->m_len = m_head->m_pkthdr.len;
1394 		m_freem(m_head);
1395 		m_head = mn;
1396 		goto encap_retry;
1397 	}
1398 
1399 	c->ste_mbuf = m_head;
1400 	d->ste_frags[frag - 1].ste_len |= STE_FRAG_LAST;
1401 	d->ste_ctl = 1;
1402 
1403 	return(0);
1404 }
1405 
1406 void
1407 ste_start(struct ifnet *ifp)
1408 {
1409 	struct ste_softc	*sc;
1410 	struct mbuf		*m_head = NULL;
1411 	struct ste_chain	*cur_tx;
1412 	int			idx;
1413 
1414 	sc = ifp->if_softc;
1415 
1416 	if (!sc->ste_link)
1417 		return;
1418 
1419 	if (ifp->if_flags & IFF_OACTIVE)
1420 		return;
1421 
1422 	idx = sc->ste_cdata.ste_tx_prod;
1423 
1424 	while(sc->ste_cdata.ste_tx_chain[idx].ste_mbuf == NULL) {
1425 		/*
1426 		 * We cannot re-use the last (free) descriptor;
1427 		 * the chip may not have read its ste_next yet.
1428 		 */
1429 		if (STE_NEXT(idx, STE_TX_LIST_CNT) ==
1430 		    sc->ste_cdata.ste_tx_cons) {
1431 			ifp->if_flags |= IFF_OACTIVE;
1432 			break;
1433 		}
1434 
1435 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1436 		if (m_head == NULL)
1437 			break;
1438 
1439 		cur_tx = &sc->ste_cdata.ste_tx_chain[idx];
1440 
1441 		if (ste_encap(sc, cur_tx, m_head) != 0)
1442 			break;
1443 
1444 		cur_tx->ste_ptr->ste_next = 0;
1445 
1446 		if (sc->ste_tx_prev == NULL) {
1447 			cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1;
1448 			/* Load address of the TX list */
1449 			STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1450 			ste_wait(sc);
1451 
1452 			CSR_WRITE_4(sc, STE_TX_DMALIST_PTR,
1453 			    vtophys((vaddr_t)&sc->ste_ldata->ste_tx_list[0]));
1454 
1455 			/* Set TX polling interval to start TX engine */
1456 			CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64);
1457 
1458 			STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1459 			ste_wait(sc);
1460 		}else{
1461 			cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1;
1462 			sc->ste_tx_prev->ste_ptr->ste_next
1463 				= cur_tx->ste_phys;
1464 		}
1465 
1466 		sc->ste_tx_prev = cur_tx;
1467 
1468 #if NBPFILTER > 0
1469 		/*
1470 		 * If there's a BPF listener, bounce a copy of this frame
1471 		 * to him.
1472 	 	 */
1473 		if (ifp->if_bpf)
1474 			bpf_mtap(ifp->if_bpf, cur_tx->ste_mbuf,
1475 			    BPF_DIRECTION_OUT);
1476 #endif
1477 
1478 		STE_INC(idx, STE_TX_LIST_CNT);
1479 		ifp->if_timer = 5;
1480 	}
1481 	sc->ste_cdata.ste_tx_prod = idx;
1482 
1483 	return;
1484 }
1485 
1486 void
1487 ste_watchdog(struct ifnet *ifp)
1488 {
1489 	struct ste_softc	*sc;
1490 
1491 	sc = ifp->if_softc;
1492 
1493 	ifp->if_oerrors++;
1494 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1495 
1496 	ste_txeoc(sc);
1497 	ste_txeof(sc);
1498 	ste_rxeoc(sc);
1499 	ste_rxeof(sc);
1500 	ste_reset(sc);
1501 	ste_init(sc);
1502 
1503 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1504 		ste_start(ifp);
1505 
1506 	return;
1507 }
1508 
1509 void
1510 ste_shutdown(void *v)
1511 {
1512 	struct ste_softc	*sc = (struct ste_softc *)v;
1513 
1514 	ste_stop(sc);
1515 }
1516