xref: /openbsd-src/sys/dev/pci/if_ste.c (revision 5738bc6274bbfbba95d07e5dc8d380a26b687449)
1 /*	$OpenBSD: if_ste.c,v 1.24 2004/08/22 18:16:20 canacar Exp $ */
2 /*
3  * Copyright (c) 1997, 1998, 1999
4  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/pci/if_ste.c,v 1.14 1999/12/07 20:14:42 wpaul Exp $
34  */
35 
36 #include "bpfilter.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/mbuf.h>
41 #include <sys/protosw.h>
42 #include <sys/socket.h>
43 #include <sys/ioctl.h>
44 #include <sys/errno.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/timeout.h>
48 
49 #include <net/if.h>
50 #include <net/if_dl.h>
51 #include <net/if_types.h>
52 
53 #ifdef INET
54 #include <netinet/in.h>
55 #include <netinet/in_systm.h>
56 #include <netinet/in_var.h>
57 #include <netinet/ip.h>
58 #include <netinet/if_ether.h>
59 #endif
60 
61 #include <net/if_media.h>
62 
63 #if NBPFILTER > 0
64 #include <net/bpf.h>
65 #endif
66 
67 #include <uvm/uvm_extern.h>              /* for vtophys */
68 
69 #include <sys/device.h>
70 
71 #include <dev/mii/mii.h>
72 #include <dev/mii/miivar.h>
73 
74 #include <dev/pci/pcireg.h>
75 #include <dev/pci/pcivar.h>
76 #include <dev/pci/pcidevs.h>
77 
78 #define STE_USEIOSPACE
79 
80 #include <dev/pci/if_stereg.h>
81 
82 int ste_probe(struct device *, void *, void *);
83 void ste_attach(struct device *, struct device *, void *);
84 int ste_intr(void *);
85 void ste_shutdown(void *);
86 void ste_init(void *);
87 void ste_rxeof(struct ste_softc *);
88 void ste_txeoc(struct ste_softc *);
89 void ste_txeof(struct ste_softc *);
90 void ste_stats_update(void *);
91 void ste_stop(struct ste_softc *);
92 void ste_reset(struct ste_softc *);
93 int ste_ioctl(struct ifnet *, u_long, caddr_t);
94 int ste_encap(struct ste_softc *, struct ste_chain *,
95 					struct mbuf *);
96 void ste_start(struct ifnet *);
97 void ste_watchdog(struct ifnet *);
98 int ste_newbuf(struct ste_softc *,
99 					struct ste_chain_onefrag *,
100 					struct mbuf *);
101 int ste_ifmedia_upd(struct ifnet *);
102 void ste_ifmedia_sts(struct ifnet *, struct ifmediareq *);
103 
104 void ste_mii_sync(struct ste_softc *);
105 void ste_mii_send(struct ste_softc *, u_int32_t, int);
106 int ste_mii_readreg(struct ste_softc *,
107 					struct ste_mii_frame *);
108 int ste_mii_writereg(struct ste_softc *,
109 					struct ste_mii_frame *);
110 int ste_miibus_readreg(struct device *, int, int);
111 void ste_miibus_writereg(struct device *, int, int, int);
112 void ste_miibus_statchg(struct device *);
113 
114 int ste_eeprom_wait(struct ste_softc *);
115 int ste_read_eeprom(struct ste_softc *, caddr_t, int,
116 							int, int);
117 void ste_wait(struct ste_softc *);
118 void ste_setmulti(struct ste_softc *);
119 int ste_init_rx_list(struct ste_softc *);
120 void ste_init_tx_list(struct ste_softc *);
121 
122 #define STE_SETBIT4(sc, reg, x)				\
123 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
124 
125 #define STE_CLRBIT4(sc, reg, x)				\
126 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
127 
128 #define STE_SETBIT2(sc, reg, x)				\
129 	CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | x)
130 
131 #define STE_CLRBIT2(sc, reg, x)				\
132 	CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~x)
133 
134 #define STE_SETBIT1(sc, reg, x)				\
135 	CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | x)
136 
137 #define STE_CLRBIT1(sc, reg, x)				\
138 	CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~x)
139 
140 
141 #define MII_SET(x)		STE_SETBIT1(sc, STE_PHYCTL, x)
142 #define MII_CLR(x)		STE_CLRBIT1(sc, STE_PHYCTL, x)
143 
144 /*
145  * Sync the PHYs by setting data bit and strobing the clock 32 times.
146  */
147 void ste_mii_sync(sc)
148 	struct ste_softc		*sc;
149 {
150 	register int		i;
151 
152 	MII_SET(STE_PHYCTL_MDIR|STE_PHYCTL_MDATA);
153 
154 	for (i = 0; i < 32; i++) {
155 		MII_SET(STE_PHYCTL_MCLK);
156 		DELAY(1);
157 		MII_CLR(STE_PHYCTL_MCLK);
158 		DELAY(1);
159 	}
160 
161 	return;
162 }
163 
164 /*
165  * Clock a series of bits through the MII.
166  */
167 void ste_mii_send(sc, bits, cnt)
168 	struct ste_softc		*sc;
169 	u_int32_t		bits;
170 	int			cnt;
171 {
172 	int			i;
173 
174 	MII_CLR(STE_PHYCTL_MCLK);
175 
176 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
177                 if (bits & i) {
178 			MII_SET(STE_PHYCTL_MDATA);
179                 } else {
180 			MII_CLR(STE_PHYCTL_MDATA);
181                 }
182 		DELAY(1);
183 		MII_CLR(STE_PHYCTL_MCLK);
184 		DELAY(1);
185 		MII_SET(STE_PHYCTL_MCLK);
186 	}
187 }
188 
189 /*
190  * Read an PHY register through the MII.
191  */
192 int ste_mii_readreg(sc, frame)
193 	struct ste_softc		*sc;
194 	struct ste_mii_frame	*frame;
195 
196 {
197 	int			i, ack, s;
198 
199 	s = splimp();
200 
201 	/*
202 	 * Set up frame for RX.
203 	 */
204 	frame->mii_stdelim = STE_MII_STARTDELIM;
205 	frame->mii_opcode = STE_MII_READOP;
206 	frame->mii_turnaround = 0;
207 	frame->mii_data = 0;
208 
209 	CSR_WRITE_2(sc, STE_PHYCTL, 0);
210 	/*
211  	 * Turn on data xmit.
212 	 */
213 	MII_SET(STE_PHYCTL_MDIR);
214 
215 	ste_mii_sync(sc);
216 
217 	/*
218 	 * Send command/address info.
219 	 */
220 	ste_mii_send(sc, frame->mii_stdelim, 2);
221 	ste_mii_send(sc, frame->mii_opcode, 2);
222 	ste_mii_send(sc, frame->mii_phyaddr, 5);
223 	ste_mii_send(sc, frame->mii_regaddr, 5);
224 
225 	/* Turn off xmit. */
226 	MII_CLR(STE_PHYCTL_MDIR);
227 
228 	/* Idle bit */
229 	MII_CLR((STE_PHYCTL_MCLK|STE_PHYCTL_MDATA));
230 	DELAY(1);
231 	MII_SET(STE_PHYCTL_MCLK);
232 	DELAY(1);
233 
234 	/* Check for ack */
235 	MII_CLR(STE_PHYCTL_MCLK);
236 	DELAY(1);
237 	ack = CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA;
238 	MII_SET(STE_PHYCTL_MCLK);
239 	DELAY(1);
240 
241 	/*
242 	 * Now try reading data bits. If the ack failed, we still
243 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
244 	 */
245 	if (ack) {
246 		for(i = 0; i < 16; i++) {
247 			MII_CLR(STE_PHYCTL_MCLK);
248 			DELAY(1);
249 			MII_SET(STE_PHYCTL_MCLK);
250 			DELAY(1);
251 		}
252 		goto fail;
253 	}
254 
255 	for (i = 0x8000; i; i >>= 1) {
256 		MII_CLR(STE_PHYCTL_MCLK);
257 		DELAY(1);
258 		if (!ack) {
259 			if (CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA)
260 				frame->mii_data |= i;
261 			DELAY(1);
262 		}
263 		MII_SET(STE_PHYCTL_MCLK);
264 		DELAY(1);
265 	}
266 
267 fail:
268 
269 	MII_CLR(STE_PHYCTL_MCLK);
270 	DELAY(1);
271 	MII_SET(STE_PHYCTL_MCLK);
272 	DELAY(1);
273 
274 	splx(s);
275 
276 	if (ack)
277 		return(1);
278 	return(0);
279 }
280 
281 /*
282  * Write to a PHY register through the MII.
283  */
284 int ste_mii_writereg(sc, frame)
285 	struct ste_softc		*sc;
286 	struct ste_mii_frame	*frame;
287 
288 {
289 	int			s;
290 
291 	s = splimp();
292 	/*
293 	 * Set up frame for TX.
294 	 */
295 
296 	frame->mii_stdelim = STE_MII_STARTDELIM;
297 	frame->mii_opcode = STE_MII_WRITEOP;
298 	frame->mii_turnaround = STE_MII_TURNAROUND;
299 
300 	/*
301  	 * Turn on data output.
302 	 */
303 	MII_SET(STE_PHYCTL_MDIR);
304 
305 	ste_mii_sync(sc);
306 
307 	ste_mii_send(sc, frame->mii_stdelim, 2);
308 	ste_mii_send(sc, frame->mii_opcode, 2);
309 	ste_mii_send(sc, frame->mii_phyaddr, 5);
310 	ste_mii_send(sc, frame->mii_regaddr, 5);
311 	ste_mii_send(sc, frame->mii_turnaround, 2);
312 	ste_mii_send(sc, frame->mii_data, 16);
313 
314 	/* Idle bit. */
315 	MII_SET(STE_PHYCTL_MCLK);
316 	DELAY(1);
317 	MII_CLR(STE_PHYCTL_MCLK);
318 	DELAY(1);
319 
320 	/*
321 	 * Turn off xmit.
322 	 */
323 	MII_CLR(STE_PHYCTL_MDIR);
324 
325 	splx(s);
326 
327 	return(0);
328 }
329 
330 int ste_miibus_readreg(self, phy, reg)
331 	struct device		*self;
332 	int			phy, reg;
333 {
334 	struct ste_softc	*sc = (struct ste_softc *)self;
335 	struct ste_mii_frame	frame;
336 
337 	bzero((char *)&frame, sizeof(frame));
338 
339 	frame.mii_phyaddr = phy;
340 	frame.mii_regaddr = reg;
341 	ste_mii_readreg(sc, &frame);
342 
343 	return(frame.mii_data);
344 }
345 
346 void ste_miibus_writereg(self, phy, reg, data)
347 	struct device		*self;
348 	int			phy, reg, data;
349 {
350 	struct ste_softc	*sc = (struct ste_softc *)self;
351 	struct ste_mii_frame	frame;
352 
353 	bzero((char *)&frame, sizeof(frame));
354 
355 	frame.mii_phyaddr = phy;
356 	frame.mii_regaddr = reg;
357 	frame.mii_data = data;
358 
359 	ste_mii_writereg(sc, &frame);
360 
361 	return;
362 }
363 
364 void ste_miibus_statchg(self)
365 	struct device		*self;
366 {
367 	struct ste_softc	*sc = (struct ste_softc *)self;
368 	struct mii_data		*mii;
369 	int fdx, fcur;
370 
371 	mii = &sc->sc_mii;
372 
373 	fcur = CSR_READ_2(sc, STE_MACCTL0) & STE_MACCTL0_FULLDUPLEX;
374 	fdx = (mii->mii_media_active & IFM_GMASK) == IFM_FDX;
375 
376 	if ((fcur && fdx) || (! fcur && ! fdx))
377 		return;
378 
379 	STE_SETBIT4(sc, STE_DMACTL,
380 	    STE_DMACTL_RXDMA_STALL |STE_DMACTL_TXDMA_STALL);
381 	ste_wait(sc);
382 
383 	if (fdx)
384 		STE_SETBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX);
385 	else
386 		STE_CLRBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX);
387 
388 	printf("%s: %s-duplex\n", sc->sc_dev.dv_xname, fdx ? "full":"half");
389 
390 	STE_SETBIT4(sc, STE_DMACTL,
391 	    STE_DMACTL_RXDMA_UNSTALL | STE_DMACTL_TXDMA_UNSTALL);
392 
393 	return;
394 }
395 
396 int ste_ifmedia_upd(ifp)
397 	struct ifnet		*ifp;
398 {
399 	struct ste_softc	*sc;
400 	struct mii_data		*mii;
401 
402 	sc = ifp->if_softc;
403 	mii = &sc->sc_mii;
404 	sc->ste_link = 0;
405 	if (mii->mii_instance) {
406 		struct mii_softc	*miisc;
407 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
408 			mii_phy_reset(miisc);
409 	}
410 	mii_mediachg(mii);
411 
412 	return(0);
413 }
414 
415 void ste_ifmedia_sts(ifp, ifmr)
416 	struct ifnet		*ifp;
417 	struct ifmediareq	*ifmr;
418 {
419 	struct ste_softc	*sc;
420 	struct mii_data		*mii;
421 
422 	sc = ifp->if_softc;
423 	mii = &sc->sc_mii;
424 
425 	mii_pollstat(mii);
426 	ifmr->ifm_active = mii->mii_media_active;
427 	ifmr->ifm_status = mii->mii_media_status;
428 
429 	return;
430 }
431 
432 void ste_wait(sc)
433 	struct ste_softc		*sc;
434 {
435 	register int		i;
436 
437 	for (i = 0; i < STE_TIMEOUT; i++) {
438 		if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG))
439 			break;
440 	}
441 
442 	if (i == STE_TIMEOUT)
443 		printf("%s: command never completed!\n", sc->sc_dev.dv_xname);
444 
445 	return;
446 }
447 
448 /*
449  * The EEPROM is slow: give it time to come ready after issuing
450  * it a command.
451  */
452 int ste_eeprom_wait(sc)
453 	struct ste_softc		*sc;
454 {
455 	int			i;
456 
457 	DELAY(1000);
458 
459 	for (i = 0; i < 100; i++) {
460 		if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY)
461 			DELAY(1000);
462 		else
463 			break;
464 	}
465 
466 	if (i == 100) {
467 		printf("%s: eeprom failed to come ready\n", sc->sc_dev.dv_xname);
468 		return(1);
469 	}
470 
471 	return(0);
472 }
473 
474 /*
475  * Read a sequence of words from the EEPROM. Note that ethernet address
476  * data is stored in the EEPROM in network byte order.
477  */
478 int ste_read_eeprom(sc, dest, off, cnt, swap)
479 	struct ste_softc		*sc;
480 	caddr_t			dest;
481 	int			off;
482 	int			cnt;
483 	int			swap;
484 {
485 	int			err = 0, i;
486 	u_int16_t		word = 0, *ptr;
487 
488 	if (ste_eeprom_wait(sc))
489 		return(1);
490 
491 	for (i = 0; i < cnt; i++) {
492 		CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i));
493 		err = ste_eeprom_wait(sc);
494 		if (err)
495 			break;
496 		word = CSR_READ_2(sc, STE_EEPROM_DATA);
497 		ptr = (u_int16_t *)(dest + (i * 2));
498 		if (swap)
499 			*ptr = ntohs(word);
500 		else
501 			*ptr = word;
502 	}
503 
504 	return(err ? 1 : 0);
505 }
506 
507 void ste_setmulti(sc)
508 	struct ste_softc	*sc;
509 {
510 	struct ifnet		*ifp;
511 	struct arpcom		*ac = &sc->arpcom;
512 	struct ether_multi	*enm;
513 	struct ether_multistep	step;
514 	int			h = 0;
515 	u_int32_t		hashes[2] = { 0, 0 };
516 
517 	ifp = &sc->arpcom.ac_if;
518 allmulti:
519 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
520 		STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI);
521 		STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH);
522 		return;
523 	}
524 
525 	/* first, zot all the existing hash bits */
526 	CSR_WRITE_2(sc, STE_MAR0, 0);
527 	CSR_WRITE_2(sc, STE_MAR1, 0);
528 	CSR_WRITE_2(sc, STE_MAR2, 0);
529 	CSR_WRITE_2(sc, STE_MAR3, 0);
530 
531 	/* now program new ones */
532 	ETHER_FIRST_MULTI(step, ac, enm);
533 	while (enm != NULL) {
534 		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
535 			ifp->if_flags |= IFF_ALLMULTI;
536 			goto allmulti;
537 		}
538 		h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) &
539 		    0x0000003F;
540 		if (h < 32)
541 			hashes[0] |= (1 << h);
542 		else
543 			hashes[1] |= (1 << (h - 32));
544 		ETHER_NEXT_MULTI(step, enm);
545 	}
546 
547 	CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF);
548 	CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF);
549 	CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF);
550 	CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF);
551 	STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI);
552 	STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH);
553 
554 	return;
555 }
556 
557 int ste_intr(xsc)
558 	void			*xsc;
559 {
560 	struct ste_softc	*sc;
561 	struct ifnet		*ifp;
562 	u_int16_t		status;
563 	int			claimed = 0;
564 
565 	sc = xsc;
566 	ifp = &sc->arpcom.ac_if;
567 
568 	/* See if this is really our interrupt. */
569 	if (!(CSR_READ_2(sc, STE_ISR) & STE_ISR_INTLATCH))
570 		return claimed;
571 
572 	for (;;) {
573 		status = CSR_READ_2(sc, STE_ISR_ACK);
574 
575 		if (!(status & STE_INTRS))
576 			break;
577 
578 		claimed = 1;
579 
580 		if (status & STE_ISR_RX_DMADONE)
581 			ste_rxeof(sc);
582 
583 		if (status & STE_ISR_TX_DMADONE)
584 			ste_txeof(sc);
585 
586 		if (status & STE_ISR_TX_DONE)
587 			ste_txeoc(sc);
588 
589 		if (status & STE_ISR_LINKEVENT)
590 			mii_pollstat(&sc->sc_mii);
591 
592 		if (status & STE_ISR_HOSTERR) {
593 			ste_reset(sc);
594 			ste_init(sc);
595 		}
596 	}
597 
598 	/* Re-enable interrupts */
599 	CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
600 
601 	if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
602 		ste_start(ifp);
603 
604 	return claimed;
605 }
606 
607 /*
608  * A frame has been uploaded: pass the resulting mbuf chain up to
609  * the higher level protocols.
610  */
611 void ste_rxeof(sc)
612 	struct ste_softc		*sc;
613 {
614         struct mbuf		*m;
615         struct ifnet		*ifp;
616 	struct ste_chain_onefrag	*cur_rx;
617 	int			total_len = 0, count=0;
618 	u_int32_t		rxstat;
619 
620 	ifp = &sc->arpcom.ac_if;
621 
622 	if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) {
623 		cur_rx = sc->ste_cdata.ste_rx_head;
624 		do {
625 			cur_rx = cur_rx->ste_next;
626 			/* If the ring is empty, just return. */
627 			if (cur_rx == sc->ste_cdata.ste_rx_head)
628 				return;
629 		} while (cur_rx->ste_ptr->ste_status == 0);
630 
631 		if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0)
632 		/* We've fallen behind the chip: catch it. */
633 			sc->ste_cdata.ste_rx_head = cur_rx;
634 	}
635 
636 	while((rxstat = sc->ste_cdata.ste_rx_head->ste_ptr->ste_status)
637 	      & STE_RXSTAT_DMADONE) {
638 		if ((STE_RX_LIST_CNT - count) < 3)
639 			break;
640 
641 		cur_rx = sc->ste_cdata.ste_rx_head;
642 		sc->ste_cdata.ste_rx_head = cur_rx->ste_next;
643 
644 		/*
645 		 * If an error occurs, update stats, clear the
646 		 * status word and leave the mbuf cluster in place:
647 		 * it should simply get re-used next time this descriptor
648 	 	 * comes up in the ring.
649 		 */
650 		if (rxstat & STE_RXSTAT_FRAME_ERR) {
651 			ifp->if_ierrors++;
652 			cur_rx->ste_ptr->ste_status = 0;
653 			continue;
654 		}
655 
656 		/*
657 		 * If there error bit was not set, the upload complete
658 		 * bit should be set which means we have a valid packet.
659 		 * If not, something truly strange has happened.
660 		 */
661 		if (!(rxstat & STE_RXSTAT_DMADONE)) {
662 			printf("%s: bad receive status -- packet dropped",
663 				sc->sc_dev.dv_xname);
664 			ifp->if_ierrors++;
665 			cur_rx->ste_ptr->ste_status = 0;
666 			continue;
667 		}
668 
669 		/* No errors; receive the packet. */
670 		m = cur_rx->ste_mbuf;
671 		total_len = cur_rx->ste_ptr->ste_status & STE_RXSTAT_FRAMELEN;
672 
673 		/*
674 		 * Try to conjure up a new mbuf cluster. If that
675 		 * fails, it means we have an out of memory condition and
676 		 * should leave the buffer in place and continue. This will
677 		 * result in a lost packet, but there's little else we
678 		 * can do in this situation.
679 		 */
680 		if (ste_newbuf(sc, cur_rx, NULL) == ENOBUFS) {
681 			ifp->if_ierrors++;
682 			cur_rx->ste_ptr->ste_status = 0;
683 			continue;
684 		}
685 
686 		ifp->if_ipackets++;
687 		m->m_pkthdr.rcvif = ifp;
688 		m->m_pkthdr.len = m->m_len = total_len;
689 
690 #if NBPFILTER > 0
691 		if (ifp->if_bpf)
692 			bpf_mtap(ifp->if_bpf, m);
693 #endif
694 
695 		/* pass it on. */
696 		ether_input_mbuf(ifp, m);
697 
698 		cur_rx->ste_ptr->ste_status = 0;
699 		count++;
700 	}
701 
702 	return;
703 }
704 
705 void ste_txeoc(sc)
706 	struct ste_softc	*sc;
707 {
708 	u_int8_t		txstat;
709 	struct ifnet		*ifp;
710 
711 	ifp = &sc->arpcom.ac_if;
712 
713 	while ((txstat = CSR_READ_1(sc, STE_TX_STATUS)) &
714 	    STE_TXSTATUS_TXDONE) {
715 		if (txstat & STE_TXSTATUS_UNDERRUN ||
716 		    txstat & STE_TXSTATUS_EXCESSCOLLS ||
717 		    txstat & STE_TXSTATUS_RECLAIMERR) {
718 			ifp->if_oerrors++;
719 			printf("%s: transmission error: %x\n",
720 			    sc->sc_dev.dv_xname, txstat);
721 
722 			ste_reset(sc);
723 			ste_init(sc);
724 
725 			if (txstat & STE_TXSTATUS_UNDERRUN &&
726 			    sc->ste_tx_thresh < STE_PACKET_SIZE) {
727 				sc->ste_tx_thresh += STE_MIN_FRAMELEN;
728 				printf("%s: tx underrun, increasing tx"
729 				    " start threshold to %d bytes\n",
730 				    sc->sc_dev.dv_xname, sc->ste_tx_thresh);
731 			}
732 			CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh);
733 			CSR_WRITE_2(sc, STE_TX_RECLAIM_THRESH,
734 			    (STE_PACKET_SIZE >> 4));
735 		}
736 		ste_init(sc);
737 		CSR_WRITE_2(sc, STE_TX_STATUS, txstat);
738 	}
739 
740 	return;
741 }
742 
743 void ste_txeof(sc)
744 	struct ste_softc	*sc;
745 {
746 	struct ste_chain	*cur_tx = NULL;
747 	struct ifnet		*ifp;
748 	int			idx;
749 
750 	ifp = &sc->arpcom.ac_if;
751 
752 	idx = sc->ste_cdata.ste_tx_cons;
753 	while(idx != sc->ste_cdata.ste_tx_prod) {
754 		cur_tx = &sc->ste_cdata.ste_tx_chain[idx];
755 
756 		if (!(cur_tx->ste_ptr->ste_ctl & STE_TXCTL_DMADONE))
757 			break;
758 
759 		if (cur_tx->ste_mbuf != NULL) {
760 			m_freem(cur_tx->ste_mbuf);
761 			cur_tx->ste_mbuf = NULL;
762 			ifp->if_flags &= ~IFF_OACTIVE;
763 		}
764 
765 		ifp->if_opackets++;
766 
767 		STE_INC(idx, STE_TX_LIST_CNT);
768 	}
769 
770 	sc->ste_cdata.ste_tx_cons = idx;
771 	if (idx == sc->ste_cdata.ste_tx_prod)
772 		ifp->if_timer = 0;
773 
774 	return;
775 }
776 
777 void ste_stats_update(xsc)
778 	void			*xsc;
779 {
780 	struct ste_softc	*sc;
781 	struct ifnet		*ifp;
782 	struct mii_data		*mii;
783 	int			s;
784 
785 	s = splimp();
786 
787 	sc = xsc;
788 	ifp = &sc->arpcom.ac_if;
789 	mii = &sc->sc_mii;
790 
791 	ifp->if_collisions += CSR_READ_1(sc, STE_LATE_COLLS)
792 	    + CSR_READ_1(sc, STE_MULTI_COLLS)
793 	    + CSR_READ_1(sc, STE_SINGLE_COLLS);
794 
795 	mii_tick(mii);
796 	if (!sc->ste_link && mii->mii_media_status & IFM_ACTIVE &&
797 	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
798 		sc->ste_link++;
799 		if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
800 			ste_start(ifp);
801 	}
802 
803 	timeout_add(&sc->sc_stats_tmo, hz);
804 	splx(s);
805 
806 	return;
807 }
808 
809 const struct pci_matchid ste_devices[] = {
810 	{ PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST201 },
811 	{ PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_550TX },
812 };
813 
814 /*
815  * Probe for a Sundance ST201 chip. Check the PCI vendor and device
816  * IDs against our list and return a device name if we find a match.
817  */
818 int ste_probe(parent, match, aux)
819 	struct device		*parent;
820 	void			*match, *aux;
821 {
822 	return (pci_matchbyid((struct pci_attach_args *)aux, ste_devices,
823 	    sizeof(ste_devices)/sizeof(ste_devices[0])));
824 }
825 
826 /*
827  * Attach the interface. Allocate softc structures, do ifmedia
828  * setup and ethernet/BPF attach.
829  */
830 void ste_attach(parent, self, aux)
831 	struct device		*parent, *self;
832 	void			*aux;
833 {
834 	int			s;
835 	const char		*intrstr = NULL;
836 	u_int32_t		command;
837 	struct ste_softc	*sc = (struct ste_softc *)self;
838 	struct pci_attach_args	*pa = aux;
839 	pci_chipset_tag_t	pc = pa->pa_pc;
840 	pci_intr_handle_t	ih;
841 	struct ifnet		*ifp;
842 	bus_addr_t		iobase;
843 	bus_size_t		iosize;
844 
845 	s = splimp();
846 
847 	/*
848 	 * Handle power management nonsense.
849 	 */
850 	command = pci_conf_read(pc, pa->pa_tag, STE_PCI_CAPID) & 0x000000FF;
851 	if (command == 0x01) {
852 
853 		command = pci_conf_read(pc, pa->pa_tag, STE_PCI_PWRMGMTCTRL);
854 		if (command & STE_PSTATE_MASK) {
855 			u_int32_t		iobase, membase, irq;
856 
857 			/* Save important PCI config data. */
858 			iobase = pci_conf_read(pc, pa->pa_tag, STE_PCI_LOIO);
859 			membase = pci_conf_read(pc, pa->pa_tag, STE_PCI_LOMEM);
860 			irq = pci_conf_read(pc, pa->pa_tag, STE_PCI_INTLINE);
861 
862 			/* Reset the power state. */
863 			printf("%s: chip is in D%d power mode -- setting to D0\n",
864 				sc->sc_dev.dv_xname, command & STE_PSTATE_MASK);
865 			command &= 0xFFFFFFFC;
866 			pci_conf_write(pc, pa->pa_tag, STE_PCI_PWRMGMTCTRL, command);
867 
868 			/* Restore PCI config data. */
869 			pci_conf_write(pc, pa->pa_tag, STE_PCI_LOIO, iobase);
870 			pci_conf_write(pc, pa->pa_tag, STE_PCI_LOMEM, membase);
871 			pci_conf_write(pc, pa->pa_tag, STE_PCI_INTLINE, irq);
872 		}
873 	}
874 
875 	/*
876 	 * Map control/status registers.
877 	 */
878 	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
879 	command |= PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE |
880 	    PCI_COMMAND_MASTER_ENABLE;
881 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
882 	command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
883 
884 #ifdef STE_USEIOSPACE
885 	if (!(command & PCI_COMMAND_IO_ENABLE)) {
886 		printf(": failed to enable I/O ports\n");
887 		goto fail;
888 	}
889 	if (pci_io_find(pc, pa->pa_tag, STE_PCI_LOIO, &iobase, &iosize)) {
890 		printf(": can't find I/O space\n");
891 		goto fail;
892 	}
893 	if (bus_space_map(pa->pa_iot, iobase, iosize, 0, &sc->ste_bhandle)) {
894 		printf(": can't map I/O space\n");
895 		goto fail;
896 	}
897 	sc->ste_btag = pa->pa_iot;
898 #else
899 	if (!(command & PCI_COMMAND_MEM_ENABLE)) {
900 		printf(": failed to enable memory mapping\n");
901 		goto fail;
902 	}
903 	if (pci_mem_find(pc, pa->pa_tag, STE_PCI_LOMEM, &iobase, &iosize,NULL)){
904 		printf(": can't find mem space\n");
905 		goto fail;
906 	}
907 	if (bus_space_map(pa->pa_memt, iobase, iosize, 0, &sc->ste_bhandle)) {
908 		printf(": can't map mem space\n");
909 		goto fail;
910 	}
911 	sc->ste_btag = pa->pa_memt;
912 #endif
913 
914 	/* Allocate interrupt */
915 	if (pci_intr_map(pa, &ih)) {
916 		printf(": couldn't map interrupt\n");
917 		goto fail;
918 	}
919 	intrstr = pci_intr_string(pc, ih);
920 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, ste_intr, sc,
921 	    self->dv_xname);
922 	if (sc->sc_ih == NULL) {
923 		printf(": couldn't establish interrupt");
924 		if (intrstr != NULL)
925 			printf(" at %s", intrstr);
926 		printf("\n");
927 		goto fail;
928 	}
929 	printf(": %s", intrstr);
930 
931 	/* Reset the adapter. */
932 	ste_reset(sc);
933 
934 	/*
935 	 * Get station address from the EEPROM.
936 	 */
937 	ste_read_eeprom(sc,(caddr_t)&sc->arpcom.ac_enaddr,STE_EEADDR_NODE0,3,0);
938 
939 	printf(" address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
940 
941 	sc->ste_ldata_ptr = malloc(sizeof(struct ste_list_data) + 8,
942 				M_DEVBUF, M_DONTWAIT);
943 	if (sc->ste_ldata_ptr == NULL) {
944 		printf("%s: no memory for list buffers!\n", sc->sc_dev.dv_xname);
945 		goto fail;
946 	}
947 
948 	sc->ste_ldata = (struct ste_list_data *)sc->ste_ldata_ptr;
949 	bzero(sc->ste_ldata, sizeof(struct ste_list_data));
950 
951 	ifp = &sc->arpcom.ac_if;
952 	ifp->if_softc = sc;
953 	ifp->if_mtu = ETHERMTU;
954 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
955 	ifp->if_ioctl = ste_ioctl;
956 	ifp->if_output = ether_output;
957 	ifp->if_start = ste_start;
958 	ifp->if_watchdog = ste_watchdog;
959 	ifp->if_baudrate = 10000000;
960 	IFQ_SET_MAXLEN(&ifp->if_snd, STE_TX_LIST_CNT - 1);
961 	IFQ_SET_READY(&ifp->if_snd);
962 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
963 
964 	sc->ste_tx_thresh = STE_TXSTART_THRESH;
965 
966 	sc->sc_mii.mii_ifp = ifp;
967 	sc->sc_mii.mii_readreg = ste_miibus_readreg;
968 	sc->sc_mii.mii_writereg = ste_miibus_writereg;
969 	sc->sc_mii.mii_statchg = ste_miibus_statchg;
970 	ifmedia_init(&sc->sc_mii.mii_media, 0, ste_ifmedia_upd,ste_ifmedia_sts);
971 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
972 	    0);
973 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
974 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
975 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
976 	} else
977 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
978 
979 	/*
980 	 * Call MI attach routines.
981 	 */
982 	if_attach(ifp);
983 	ether_ifattach(ifp);
984 
985 	shutdownhook_establish(ste_shutdown, sc);
986 
987 fail:
988 	splx(s);
989 	return;
990 }
991 
992 int ste_newbuf(sc, c, m)
993 	struct ste_softc	*sc;
994 	struct ste_chain_onefrag	*c;
995 	struct mbuf		*m;
996 {
997 	struct mbuf		*m_new = NULL;
998 
999 	if (m == NULL) {
1000 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1001 		if (m_new == NULL)
1002 			return(ENOBUFS);
1003 		MCLGET(m_new, M_DONTWAIT);
1004 		if (!(m_new->m_flags & M_EXT)) {
1005 			m_freem(m_new);
1006 			return(ENOBUFS);
1007 		}
1008 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1009 	} else {
1010 		m_new = m;
1011 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1012 		m_new->m_data = m_new->m_ext.ext_buf;
1013 	}
1014 
1015 	m_adj(m_new, ETHER_ALIGN);
1016 
1017 	c->ste_mbuf = m_new;
1018 	c->ste_ptr->ste_status = 0;
1019 	c->ste_ptr->ste_frag.ste_addr = vtophys(mtod(m_new, caddr_t));
1020 	c->ste_ptr->ste_frag.ste_len = 1536 | STE_FRAG_LAST;
1021 
1022 	return(0);
1023 }
1024 
1025 int ste_init_rx_list(sc)
1026 	struct ste_softc	*sc;
1027 {
1028 	struct ste_chain_data	*cd;
1029 	struct ste_list_data	*ld;
1030 	int			i;
1031 
1032 	cd = &sc->ste_cdata;
1033 	ld = sc->ste_ldata;
1034 
1035 	for (i = 0; i < STE_RX_LIST_CNT; i++) {
1036 		cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i];
1037 		if (ste_newbuf(sc, &cd->ste_rx_chain[i], NULL) == ENOBUFS)
1038 			return(ENOBUFS);
1039 		if (i == (STE_RX_LIST_CNT - 1)) {
1040 			cd->ste_rx_chain[i].ste_next =
1041 			    &cd->ste_rx_chain[0];
1042 			ld->ste_rx_list[i].ste_next =
1043 			    vtophys(&ld->ste_rx_list[0]);
1044 		} else {
1045 			cd->ste_rx_chain[i].ste_next =
1046 			    &cd->ste_rx_chain[i + 1];
1047 			ld->ste_rx_list[i].ste_next =
1048 			    vtophys(&ld->ste_rx_list[i + 1]);
1049 		}
1050 		ld->ste_rx_list[i].ste_status = 0;
1051 	}
1052 
1053 	cd->ste_rx_head = &cd->ste_rx_chain[0];
1054 
1055 	return(0);
1056 }
1057 
1058 void ste_init_tx_list(sc)
1059 	struct ste_softc	*sc;
1060 {
1061 	struct ste_chain_data	*cd;
1062 	struct ste_list_data	*ld;
1063 	int			i;
1064 
1065 	cd = &sc->ste_cdata;
1066 	ld = sc->ste_ldata;
1067 	for (i = 0; i < STE_TX_LIST_CNT; i++) {
1068 		cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i];
1069 		cd->ste_tx_chain[i].ste_phys = vtophys(&ld->ste_tx_list[i]);
1070 		if (i == (STE_TX_LIST_CNT - 1))
1071 			cd->ste_tx_chain[i].ste_next =
1072 			    &cd->ste_tx_chain[0];
1073 		else
1074 			cd->ste_tx_chain[i].ste_next =
1075 			    &cd->ste_tx_chain[i + 1];
1076 	}
1077 
1078 	bzero((char *)ld->ste_tx_list,
1079 	    sizeof(struct ste_desc) * STE_TX_LIST_CNT);
1080 
1081 	cd->ste_tx_prod = 0;
1082 	cd->ste_tx_cons = 0;
1083 
1084 	return;
1085 }
1086 
1087 void ste_init(xsc)
1088 	void			*xsc;
1089 {
1090 	struct ste_softc	*sc = (struct ste_softc *)xsc;
1091 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1092 	struct mii_data		*mii;
1093 	int			i, s;
1094 
1095 	s = splimp();
1096 
1097 	ste_stop(sc);
1098 
1099 	mii = &sc->sc_mii;
1100 
1101 	/* Init our MAC address */
1102 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1103 		CSR_WRITE_1(sc, STE_PAR0 + i, sc->arpcom.ac_enaddr[i]);
1104 	}
1105 
1106 	/* Init RX list */
1107 	if (ste_init_rx_list(sc) == ENOBUFS) {
1108 		printf("%s: initialization failed: no "
1109 		    "memory for RX buffers\n", sc->sc_dev.dv_xname);
1110 		ste_stop(sc);
1111 		splx(s);
1112 		return;
1113 	}
1114 
1115 	/* Set RX polling interval */
1116 	CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 64);
1117 
1118 	/* Init TX descriptors */
1119 	ste_init_tx_list(sc);
1120 
1121 	/* Set the TX freethresh value */
1122 	CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, STE_PACKET_SIZE >> 8);
1123 
1124 	/* Set the TX start threshold for best performance. */
1125 	CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh);
1126 
1127 	/* Set the TX reclaim threshold. */
1128 	CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (STE_PACKET_SIZE >> 4));
1129 
1130 	/* Set up the RX filter. */
1131 	CSR_WRITE_1(sc, STE_RX_MODE, STE_RXMODE_UNICAST);
1132 
1133 	/* If we want promiscuous mode, set the allframes bit. */
1134 	if (ifp->if_flags & IFF_PROMISC) {
1135 		STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC);
1136 	} else {
1137 		STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC);
1138 	}
1139 
1140 	/* Set capture broadcast bit to accept broadcast frames. */
1141 	if (ifp->if_flags & IFF_BROADCAST) {
1142 		STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST);
1143 	} else {
1144 		STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST);
1145 	}
1146 
1147 	ste_setmulti(sc);
1148 
1149 	/* Load the address of the RX list. */
1150 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL);
1151 	ste_wait(sc);
1152 	CSR_WRITE_4(sc, STE_RX_DMALIST_PTR,
1153 	    vtophys(&sc->ste_ldata->ste_rx_list[0]));
1154 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
1155 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
1156 
1157 	/* Set TX polling interval (defer until we TX first packet) */
1158 	CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0);
1159 
1160 	/* Load address of the TX list */
1161 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1162 	ste_wait(sc);
1163 	CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0);
1164 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1165 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1166 	ste_wait(sc);
1167 	sc->ste_tx_prev=NULL;
1168 
1169 	/* Enable receiver and transmitter */
1170 	CSR_WRITE_2(sc, STE_MACCTL0, 0);
1171 	CSR_WRITE_2(sc, STE_MACCTL1, 0);
1172 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE);
1173 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE);
1174 
1175 	/* Enable stats counters. */
1176 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE);
1177 
1178 	/* Enable interrupts. */
1179 	CSR_WRITE_2(sc, STE_ISR, 0xFFFF);
1180 	CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
1181 
1182 	ste_ifmedia_upd(ifp);
1183 
1184 	ifp->if_flags |= IFF_RUNNING;
1185 	ifp->if_flags &= ~IFF_OACTIVE;
1186 
1187 	splx(s);
1188 
1189 	timeout_set(&sc->sc_stats_tmo, ste_stats_update, sc);
1190 	timeout_add(&sc->sc_stats_tmo, hz);
1191 
1192 	return;
1193 }
1194 
1195 void ste_stop(sc)
1196 	struct ste_softc	*sc;
1197 {
1198 	int			i;
1199 	struct ifnet		*ifp;
1200 
1201 	ifp = &sc->arpcom.ac_if;
1202 
1203 	timeout_del(&sc->sc_stats_tmo);
1204 
1205 	CSR_WRITE_2(sc, STE_IMR, 0);
1206 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_DISABLE);
1207 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_DISABLE);
1208 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_DISABLE);
1209 	STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1210 	STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL);
1211 	ste_wait(sc);
1212 	/*
1213 	 * Try really hard to stop the RX engine or under heavy RX
1214 	 * data chip will write into de-allocated memory.
1215 	 */
1216 	ste_reset(sc);
1217 
1218 	sc->ste_link = 0;
1219 
1220 	for (i = 0; i < STE_RX_LIST_CNT; i++) {
1221 		if (sc->ste_cdata.ste_rx_chain[i].ste_mbuf != NULL) {
1222 			m_freem(sc->ste_cdata.ste_rx_chain[i].ste_mbuf);
1223 			sc->ste_cdata.ste_rx_chain[i].ste_mbuf = NULL;
1224 		}
1225 	}
1226 
1227 	for (i = 0; i < STE_TX_LIST_CNT; i++) {
1228 		if (sc->ste_cdata.ste_tx_chain[i].ste_mbuf != NULL) {
1229 			m_freem(sc->ste_cdata.ste_tx_chain[i].ste_mbuf);
1230 			sc->ste_cdata.ste_tx_chain[i].ste_mbuf = NULL;
1231 		}
1232 	}
1233 
1234 	bzero(sc->ste_ldata, sizeof(struct ste_list_data));
1235 
1236 	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
1237 
1238 	return;
1239 }
1240 
1241 void ste_reset(sc)
1242 	struct ste_softc	*sc;
1243 {
1244 	int			i;
1245 
1246 	STE_SETBIT4(sc, STE_ASICCTL,
1247 	    STE_ASICCTL_GLOBAL_RESET|STE_ASICCTL_RX_RESET|
1248 	    STE_ASICCTL_TX_RESET|STE_ASICCTL_DMA_RESET|
1249 	    STE_ASICCTL_FIFO_RESET|STE_ASICCTL_NETWORK_RESET|
1250 	    STE_ASICCTL_AUTOINIT_RESET|STE_ASICCTL_HOST_RESET|
1251 	    STE_ASICCTL_EXTRESET_RESET);
1252 
1253 	DELAY(100000);
1254 
1255 	for (i = 0; i < STE_TIMEOUT; i++) {
1256 		if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY))
1257 			break;
1258 	}
1259 
1260 	if (i == STE_TIMEOUT)
1261 		printf("%s: global reset never completed\n", sc->sc_dev.dv_xname);
1262 
1263 	return;
1264 }
1265 
1266 int ste_ioctl(ifp, command, data)
1267 	struct ifnet		*ifp;
1268 	u_long			command;
1269 	caddr_t			data;
1270 {
1271 	struct ste_softc	*sc = ifp->if_softc;
1272 	struct ifreq		*ifr = (struct ifreq *) data;
1273 	struct ifaddr		*ifa = (struct ifaddr *)data;
1274 	struct mii_data		*mii;
1275 	int			s, error = 0;
1276 
1277 	s = splimp();
1278 
1279 	if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) {
1280 		splx(s);
1281 		return error;
1282 	}
1283 
1284 	switch(command) {
1285 	case SIOCSIFADDR:
1286 		ifp->if_flags |= IFF_UP;
1287 		switch (ifa->ifa_addr->sa_family) {
1288 		case AF_INET:
1289 			ste_init(sc);
1290 			arp_ifinit(&sc->arpcom, ifa);
1291 			break;
1292 		default:
1293 			ste_init(sc);
1294 			break;
1295 		}
1296 		break;
1297 	case SIOCSIFFLAGS:
1298 		if (ifp->if_flags & IFF_UP) {
1299 			if (ifp->if_flags & IFF_RUNNING &&
1300 			    ifp->if_flags & IFF_PROMISC &&
1301 			    !(sc->ste_if_flags & IFF_PROMISC)) {
1302 				STE_SETBIT1(sc, STE_RX_MODE,
1303 				    STE_RXMODE_PROMISC);
1304 			} else if (ifp->if_flags & IFF_RUNNING &&
1305 			    !(ifp->if_flags & IFF_PROMISC) &&
1306 			    sc->ste_if_flags & IFF_PROMISC) {
1307 				STE_CLRBIT1(sc, STE_RX_MODE,
1308 				    STE_RXMODE_PROMISC);
1309 			}
1310 			if (ifp->if_flags & IFF_RUNNING &&
1311 			    (ifp->if_flags ^ sc->ste_if_flags) & IFF_ALLMULTI)
1312 				ste_setmulti(sc);
1313 			if (!(ifp->if_flags & IFF_RUNNING)) {
1314 				sc->ste_tx_thresh = STE_TXSTART_THRESH;
1315 				ste_init(sc);
1316 			}
1317 		} else {
1318 			if (ifp->if_flags & IFF_RUNNING)
1319 				ste_stop(sc);
1320 		}
1321 		sc->ste_if_flags = ifp->if_flags;
1322 		error = 0;
1323 		break;
1324 	case SIOCADDMULTI:
1325 	case SIOCDELMULTI:
1326 		error = (command == SIOCADDMULTI) ?
1327 		    ether_addmulti(ifr, &sc->arpcom) :
1328 		    ether_delmulti(ifr, &sc->arpcom);
1329 
1330 		if (error == ENETRESET) {
1331 			/*
1332 			 * Multicast list has changed; set the hardware
1333 			 * filter accordingly.
1334 			 */
1335 			ste_setmulti(sc);
1336 			error = 0;
1337 		}
1338 		break;
1339 	case SIOCGIFMEDIA:
1340 	case SIOCSIFMEDIA:
1341 		mii = &sc->sc_mii;
1342 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1343 		break;
1344 	default:
1345 		error = EINVAL;
1346 		break;
1347 	}
1348 
1349 	splx(s);
1350 
1351 	return(error);
1352 }
1353 
1354 int ste_encap(sc, c, m_head)
1355 	struct ste_softc	*sc;
1356 	struct ste_chain	*c;
1357 	struct mbuf		*m_head;
1358 {
1359 	int			frag = 0;
1360 	struct ste_frag		*f = NULL;
1361 	struct mbuf		*m;
1362 	struct ste_desc		*d;
1363 
1364 	d = c->ste_ptr;
1365 	d->ste_ctl = 0;
1366 
1367 encap_retry:
1368 	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1369 		if (m->m_len != 0) {
1370 			if (frag == STE_MAXFRAGS)
1371 				break;
1372 			f = &d->ste_frags[frag];
1373 			f->ste_addr = vtophys(mtod(m, vaddr_t));
1374 			f->ste_len = m->m_len;
1375 			frag++;
1376 		}
1377 	}
1378 
1379 	if (m != NULL) {
1380 		struct mbuf *mn;
1381 
1382 		/*
1383 		 * We ran out of segments. We have to recopy this
1384 		 * mbuf chain first. Bail out if we can't get the
1385 		 * new buffers.
1386 		 */
1387 		MGETHDR(mn, M_DONTWAIT, MT_DATA);
1388 		if (mn == NULL) {
1389 			m_freem(m_head);
1390 			return ENOMEM;
1391 		}
1392 		if (m_head->m_pkthdr.len > MHLEN) {
1393 			MCLGET(mn, M_DONTWAIT);
1394 			if ((mn->m_flags & M_EXT) == 0) {
1395 				m_freem(mn);
1396 				m_freem(m_head);
1397 				return ENOMEM;
1398 			}
1399 		}
1400 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1401 			   mtod(mn, caddr_t));
1402 		mn->m_pkthdr.len = mn->m_len = m_head->m_pkthdr.len;
1403 		m_freem(m_head);
1404 		m_head = mn;
1405 		goto encap_retry;
1406 	}
1407 
1408 	c->ste_mbuf = m_head;
1409 	d->ste_frags[frag - 1].ste_len |= STE_FRAG_LAST;
1410 	d->ste_ctl = 1;
1411 
1412 	return(0);
1413 }
1414 
1415 void ste_start(ifp)
1416 	struct ifnet		*ifp;
1417 {
1418 	struct ste_softc	*sc;
1419 	struct mbuf		*m_head = NULL;
1420 	struct ste_chain	*cur_tx = NULL;
1421 	int			idx;
1422 
1423 	sc = ifp->if_softc;
1424 
1425 	if (!sc->ste_link)
1426 		return;
1427 
1428 	if (ifp->if_flags & IFF_OACTIVE)
1429 		return;
1430 
1431 	idx = sc->ste_cdata.ste_tx_prod;
1432 
1433 	while(sc->ste_cdata.ste_tx_chain[idx].ste_mbuf == NULL) {
1434 		/*
1435 		 * We cannot re-use the last (free) descriptor;
1436 		 * the chip may not have read its ste_next yet.
1437 		 */
1438 		if (STE_NEXT(idx, STE_TX_LIST_CNT) ==
1439 		    sc->ste_cdata.ste_tx_cons) {
1440 			ifp->if_flags |= IFF_OACTIVE;
1441 			break;
1442 		}
1443 
1444 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1445 		if (m_head == NULL)
1446 			break;
1447 
1448 		cur_tx = &sc->ste_cdata.ste_tx_chain[idx];
1449 
1450 		if (ste_encap(sc, cur_tx, m_head) != 0)
1451 			break;
1452 
1453 		cur_tx->ste_ptr->ste_next = 0;
1454 
1455 		if (sc->ste_tx_prev == NULL) {
1456 			cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1;
1457 			/* Load address of the TX list */
1458 			STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1459 			ste_wait(sc);
1460 
1461 			CSR_WRITE_4(sc, STE_TX_DMALIST_PTR,
1462 			    vtophys(&sc->ste_ldata->ste_tx_list[0]));
1463 
1464 			/* Set TX polling interval to start TX engine */
1465 			CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64);
1466 
1467 			STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1468 			ste_wait(sc);
1469 		}else{
1470 			cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1;
1471 			sc->ste_tx_prev->ste_ptr->ste_next
1472 				= cur_tx->ste_phys;
1473 		}
1474 
1475 		sc->ste_tx_prev = cur_tx;
1476 
1477 #if NBPFILTER > 0
1478 		/*
1479 		 * If there's a BPF listener, bounce a copy of this frame
1480 		 * to him.
1481 	 	 */
1482 		if (ifp->if_bpf)
1483 			bpf_mtap(ifp->if_bpf, cur_tx->ste_mbuf);
1484 #endif
1485 
1486 		STE_INC(idx, STE_TX_LIST_CNT);
1487 		ifp->if_timer = 5;
1488 		sc->ste_cdata.ste_tx_prod = idx;
1489 	}
1490 
1491 	return;
1492 }
1493 
1494 void ste_watchdog(ifp)
1495 	struct ifnet		*ifp;
1496 {
1497 	struct ste_softc	*sc;
1498 
1499 	sc = ifp->if_softc;
1500 
1501 	ifp->if_oerrors++;
1502 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1503 
1504 	ste_txeoc(sc);
1505 	ste_txeof(sc);
1506 	ste_rxeof(sc);
1507 	ste_reset(sc);
1508 	ste_init(sc);
1509 
1510 	if (IFQ_IS_EMPTY(&ifp->if_snd) == 0)
1511 		ste_start(ifp);
1512 
1513 	return;
1514 }
1515 
1516 void ste_shutdown(v)
1517 	void			*v;
1518 {
1519 	struct ste_softc	*sc = (struct ste_softc *)v;
1520 
1521 	ste_stop(sc);
1522 }
1523 
1524 struct cfattach ste_ca = {
1525 	sizeof(struct ste_softc), ste_probe, ste_attach
1526 };
1527 
1528 struct cfdriver ste_cd = {
1529 	0, "ste", DV_IFNET
1530 };
1531 
1532