xref: /openbsd-src/sys/dev/pci/if_ste.c (revision 0b7734b3d77bb9b21afec6f4621cae6c805dbd45)
1 /*	$OpenBSD: if_ste.c,v 1.64 2016/04/13 10:34:32 mpi Exp $ */
2 /*
3  * Copyright (c) 1997, 1998, 1999
4  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * $FreeBSD: src/sys/pci/if_ste.c,v 1.14 1999/12/07 20:14:42 wpaul Exp $
34  */
35 
36 #include "bpfilter.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/mbuf.h>
41 #include <sys/protosw.h>
42 #include <sys/socket.h>
43 #include <sys/ioctl.h>
44 #include <sys/errno.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/timeout.h>
48 
49 #include <net/if.h>
50 
51 #include <netinet/in.h>
52 #include <netinet/if_ether.h>
53 
54 #include <net/if_media.h>
55 
56 #if NBPFILTER > 0
57 #include <net/bpf.h>
58 #endif
59 
60 #include <uvm/uvm_extern.h>              /* for vtophys */
61 
62 #include <sys/device.h>
63 
64 #include <dev/mii/mii.h>
65 #include <dev/mii/miivar.h>
66 
67 #include <dev/pci/pcireg.h>
68 #include <dev/pci/pcivar.h>
69 #include <dev/pci/pcidevs.h>
70 
71 #define STE_USEIOSPACE
72 
73 #include <dev/pci/if_stereg.h>
74 
75 int	ste_probe(struct device *, void *, void *);
76 void	ste_attach(struct device *, struct device *, void *);
77 int	ste_intr(void *);
78 void	ste_init(void *);
79 void	ste_rxeoc(struct ste_softc *);
80 void	ste_rxeof(struct ste_softc *);
81 void	ste_txeoc(struct ste_softc *);
82 void	ste_txeof(struct ste_softc *);
83 void	ste_stats_update(void *);
84 void	ste_stop(struct ste_softc *);
85 void	ste_reset(struct ste_softc *);
86 int	ste_ioctl(struct ifnet *, u_long, caddr_t);
87 int	ste_encap(struct ste_softc *, struct ste_chain *,
88 	    struct mbuf *);
89 void	ste_start(struct ifnet *);
90 void	ste_watchdog(struct ifnet *);
91 int	ste_newbuf(struct ste_softc *,
92 	    struct ste_chain_onefrag *,
93 	    struct mbuf *);
94 int	ste_ifmedia_upd(struct ifnet *);
95 void	ste_ifmedia_sts(struct ifnet *, struct ifmediareq *);
96 
97 void	ste_mii_sync(struct ste_softc *);
98 void	ste_mii_send(struct ste_softc *, u_int32_t, int);
99 int	ste_mii_readreg(struct ste_softc *,
100 	    struct ste_mii_frame *);
101 int	ste_mii_writereg(struct ste_softc *,
102 	    struct ste_mii_frame *);
103 int	ste_miibus_readreg(struct device *, int, int);
104 void	ste_miibus_writereg(struct device *, int, int, int);
105 void	ste_miibus_statchg(struct device *);
106 
107 int	ste_eeprom_wait(struct ste_softc *);
108 int	ste_read_eeprom(struct ste_softc *, caddr_t, int,
109 	    int, int);
110 void	ste_wait(struct ste_softc *);
111 void	ste_iff(struct ste_softc *);
112 int	ste_init_rx_list(struct ste_softc *);
113 void	ste_init_tx_list(struct ste_softc *);
114 
115 #define STE_SETBIT4(sc, reg, x)				\
116 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
117 
118 #define STE_CLRBIT4(sc, reg, x)				\
119 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
120 
121 #define STE_SETBIT2(sc, reg, x)				\
122 	CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | x)
123 
124 #define STE_CLRBIT2(sc, reg, x)				\
125 	CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~x)
126 
127 #define STE_SETBIT1(sc, reg, x)				\
128 	CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | x)
129 
130 #define STE_CLRBIT1(sc, reg, x)				\
131 	CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~x)
132 
133 
134 #define MII_SET(x)		STE_SETBIT1(sc, STE_PHYCTL, x)
135 #define MII_CLR(x)		STE_CLRBIT1(sc, STE_PHYCTL, x)
136 
137 const struct pci_matchid ste_devices[] = {
138 	{ PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DFE550TX },
139 	{ PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST201_1 },
140 	{ PCI_VENDOR_SUNDANCE, PCI_PRODUCT_SUNDANCE_ST201_2 }
141 };
142 
143 struct cfattach ste_ca = {
144 	sizeof(struct ste_softc), ste_probe, ste_attach
145 };
146 
147 struct cfdriver ste_cd = {
148 	NULL, "ste", DV_IFNET
149 };
150 
151 /*
152  * Sync the PHYs by setting data bit and strobing the clock 32 times.
153  */
154 void
155 ste_mii_sync(struct ste_softc *sc)
156 {
157 	int		i;
158 
159 	MII_SET(STE_PHYCTL_MDIR|STE_PHYCTL_MDATA);
160 
161 	for (i = 0; i < 32; i++) {
162 		MII_SET(STE_PHYCTL_MCLK);
163 		DELAY(1);
164 		MII_CLR(STE_PHYCTL_MCLK);
165 		DELAY(1);
166 	}
167 }
168 
169 /*
170  * Clock a series of bits through the MII.
171  */
172 void
173 ste_mii_send(struct ste_softc *sc, u_int32_t bits, int cnt)
174 {
175 	int		i;
176 
177 	MII_CLR(STE_PHYCTL_MCLK);
178 
179 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
180                 if (bits & i) {
181 			MII_SET(STE_PHYCTL_MDATA);
182                 } else {
183 			MII_CLR(STE_PHYCTL_MDATA);
184                 }
185 		DELAY(1);
186 		MII_CLR(STE_PHYCTL_MCLK);
187 		DELAY(1);
188 		MII_SET(STE_PHYCTL_MCLK);
189 	}
190 }
191 
192 /*
193  * Read an PHY register through the MII.
194  */
195 int
196 ste_mii_readreg(struct ste_softc *sc, struct ste_mii_frame *frame)
197 {
198 	int		ack, i, s;
199 
200 	s = splnet();
201 
202 	/*
203 	 * Set up frame for RX.
204 	 */
205 	frame->mii_stdelim = STE_MII_STARTDELIM;
206 	frame->mii_opcode = STE_MII_READOP;
207 	frame->mii_turnaround = 0;
208 	frame->mii_data = 0;
209 
210 	CSR_WRITE_2(sc, STE_PHYCTL, 0);
211 	/*
212  	 * Turn on data xmit.
213 	 */
214 	MII_SET(STE_PHYCTL_MDIR);
215 
216 	ste_mii_sync(sc);
217 
218 	/*
219 	 * Send command/address info.
220 	 */
221 	ste_mii_send(sc, frame->mii_stdelim, 2);
222 	ste_mii_send(sc, frame->mii_opcode, 2);
223 	ste_mii_send(sc, frame->mii_phyaddr, 5);
224 	ste_mii_send(sc, frame->mii_regaddr, 5);
225 
226 	/* Turn off xmit. */
227 	MII_CLR(STE_PHYCTL_MDIR);
228 
229 	/* Idle bit */
230 	MII_CLR((STE_PHYCTL_MCLK|STE_PHYCTL_MDATA));
231 	DELAY(1);
232 	MII_SET(STE_PHYCTL_MCLK);
233 	DELAY(1);
234 
235 	/* Check for ack */
236 	MII_CLR(STE_PHYCTL_MCLK);
237 	DELAY(1);
238 	ack = CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA;
239 	MII_SET(STE_PHYCTL_MCLK);
240 	DELAY(1);
241 
242 	/*
243 	 * Now try reading data bits. If the ack failed, we still
244 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
245 	 */
246 	if (ack) {
247 		for(i = 0; i < 16; i++) {
248 			MII_CLR(STE_PHYCTL_MCLK);
249 			DELAY(1);
250 			MII_SET(STE_PHYCTL_MCLK);
251 			DELAY(1);
252 		}
253 		goto fail;
254 	}
255 
256 	for (i = 0x8000; i; i >>= 1) {
257 		MII_CLR(STE_PHYCTL_MCLK);
258 		DELAY(1);
259 		if (!ack) {
260 			if (CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA)
261 				frame->mii_data |= i;
262 			DELAY(1);
263 		}
264 		MII_SET(STE_PHYCTL_MCLK);
265 		DELAY(1);
266 	}
267 
268 fail:
269 
270 	MII_CLR(STE_PHYCTL_MCLK);
271 	DELAY(1);
272 	MII_SET(STE_PHYCTL_MCLK);
273 	DELAY(1);
274 
275 	splx(s);
276 
277 	if (ack)
278 		return(1);
279 	return(0);
280 }
281 
282 /*
283  * Write to a PHY register through the MII.
284  */
285 int
286 ste_mii_writereg(struct ste_softc *sc, struct ste_mii_frame *frame)
287 {
288 	int		s;
289 
290 	s = splnet();
291 	/*
292 	 * Set up frame for TX.
293 	 */
294 
295 	frame->mii_stdelim = STE_MII_STARTDELIM;
296 	frame->mii_opcode = STE_MII_WRITEOP;
297 	frame->mii_turnaround = STE_MII_TURNAROUND;
298 
299 	/*
300  	 * Turn on data output.
301 	 */
302 	MII_SET(STE_PHYCTL_MDIR);
303 
304 	ste_mii_sync(sc);
305 
306 	ste_mii_send(sc, frame->mii_stdelim, 2);
307 	ste_mii_send(sc, frame->mii_opcode, 2);
308 	ste_mii_send(sc, frame->mii_phyaddr, 5);
309 	ste_mii_send(sc, frame->mii_regaddr, 5);
310 	ste_mii_send(sc, frame->mii_turnaround, 2);
311 	ste_mii_send(sc, frame->mii_data, 16);
312 
313 	/* Idle bit. */
314 	MII_SET(STE_PHYCTL_MCLK);
315 	DELAY(1);
316 	MII_CLR(STE_PHYCTL_MCLK);
317 	DELAY(1);
318 
319 	/*
320 	 * Turn off xmit.
321 	 */
322 	MII_CLR(STE_PHYCTL_MDIR);
323 
324 	splx(s);
325 
326 	return(0);
327 }
328 
329 int
330 ste_miibus_readreg(struct device *self, int phy, int reg)
331 {
332 	struct ste_softc	*sc = (struct ste_softc *)self;
333 	struct ste_mii_frame	frame;
334 
335 	if (sc->ste_one_phy && phy != 0)
336 		return (0);
337 
338 	bzero(&frame, sizeof(frame));
339 
340 	frame.mii_phyaddr = phy;
341 	frame.mii_regaddr = reg;
342 	ste_mii_readreg(sc, &frame);
343 
344 	return(frame.mii_data);
345 }
346 
347 void
348 ste_miibus_writereg(struct device *self, int phy, int reg, int data)
349 {
350 	struct ste_softc	*sc = (struct ste_softc *)self;
351 	struct ste_mii_frame	frame;
352 
353 	bzero(&frame, sizeof(frame));
354 
355 	frame.mii_phyaddr = phy;
356 	frame.mii_regaddr = reg;
357 	frame.mii_data = data;
358 
359 	ste_mii_writereg(sc, &frame);
360 }
361 
362 void
363 ste_miibus_statchg(struct device *self)
364 {
365 	struct ste_softc	*sc = (struct ste_softc *)self;
366 	struct mii_data		*mii;
367 	int fdx, fcur;
368 
369 	mii = &sc->sc_mii;
370 
371 	fcur = CSR_READ_2(sc, STE_MACCTL0) & STE_MACCTL0_FULLDUPLEX;
372 	fdx = (mii->mii_media_active & IFM_GMASK) == IFM_FDX;
373 
374 	if ((fcur && fdx) || (! fcur && ! fdx))
375 		return;
376 
377 	STE_SETBIT4(sc, STE_DMACTL,
378 	    STE_DMACTL_RXDMA_STALL |STE_DMACTL_TXDMA_STALL);
379 	ste_wait(sc);
380 
381 	if (fdx)
382 		STE_SETBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX);
383 	else
384 		STE_CLRBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX);
385 
386 	STE_SETBIT4(sc, STE_DMACTL,
387 	    STE_DMACTL_RXDMA_UNSTALL | STE_DMACTL_TXDMA_UNSTALL);
388 }
389 
390 int
391 ste_ifmedia_upd(struct ifnet *ifp)
392 {
393 	struct ste_softc	*sc;
394 	struct mii_data		*mii;
395 
396 	sc = ifp->if_softc;
397 	mii = &sc->sc_mii;
398 	sc->ste_link = 0;
399 	if (mii->mii_instance) {
400 		struct mii_softc	*miisc;
401 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
402 			mii_phy_reset(miisc);
403 	}
404 	mii_mediachg(mii);
405 
406 	return(0);
407 }
408 
409 void
410 ste_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
411 {
412 	struct ste_softc	*sc;
413 	struct mii_data		*mii;
414 
415 	sc = ifp->if_softc;
416 	mii = &sc->sc_mii;
417 
418 	mii_pollstat(mii);
419 	ifmr->ifm_active = mii->mii_media_active;
420 	ifmr->ifm_status = mii->mii_media_status;
421 }
422 
423 void
424 ste_wait(struct ste_softc *sc)
425 {
426 	int		i;
427 
428 	for (i = 0; i < STE_TIMEOUT; i++) {
429 		if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG))
430 			break;
431 	}
432 
433 	if (i == STE_TIMEOUT)
434 		printf("%s: command never completed!\n", sc->sc_dev.dv_xname);
435 }
436 
437 /*
438  * The EEPROM is slow: give it time to come ready after issuing
439  * it a command.
440  */
441 int
442 ste_eeprom_wait(struct ste_softc *sc)
443 {
444 	int		i;
445 
446 	DELAY(1000);
447 
448 	for (i = 0; i < 100; i++) {
449 		if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY)
450 			DELAY(1000);
451 		else
452 			break;
453 	}
454 
455 	if (i == 100) {
456 		printf("%s: eeprom failed to come ready\n",
457 		    sc->sc_dev.dv_xname);
458 		return(1);
459 	}
460 
461 	return(0);
462 }
463 
464 /*
465  * Read a sequence of words from the EEPROM. Note that ethernet address
466  * data is stored in the EEPROM in network byte order.
467  */
468 int
469 ste_read_eeprom(struct ste_softc *sc, caddr_t dest, int off, int cnt, int swap)
470 {
471 	int			err = 0, i;
472 	u_int16_t		word = 0, *ptr;
473 
474 	if (ste_eeprom_wait(sc))
475 		return(1);
476 
477 	for (i = 0; i < cnt; i++) {
478 		CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i));
479 		err = ste_eeprom_wait(sc);
480 		if (err)
481 			break;
482 		word = CSR_READ_2(sc, STE_EEPROM_DATA);
483 		ptr = (u_int16_t *)(dest + (i * 2));
484 		if (swap)
485 			*ptr = ntohs(word);
486 		else
487 			*ptr = word;
488 	}
489 
490 	return(err ? 1 : 0);
491 }
492 
493 void
494 ste_iff(struct ste_softc *sc)
495 {
496 	struct ifnet		*ifp = &sc->arpcom.ac_if;
497 	struct arpcom		*ac = &sc->arpcom;
498 	struct ether_multi	*enm;
499 	struct ether_multistep	step;
500 	u_int32_t		rxmode, hashes[2];
501 	int			h = 0;
502 
503 	rxmode = CSR_READ_1(sc, STE_RX_MODE);
504 	rxmode &= ~(STE_RXMODE_ALLMULTI | STE_RXMODE_BROADCAST |
505 	    STE_RXMODE_MULTIHASH | STE_RXMODE_PROMISC |
506 	    STE_RXMODE_UNICAST);
507 	bzero(hashes, sizeof(hashes));
508 	ifp->if_flags &= ~IFF_ALLMULTI;
509 
510 	/*
511 	 * Always accept broadcast frames.
512 	 * Always accept frames destined to our station address.
513 	 */
514 	rxmode |= STE_RXMODE_BROADCAST | STE_RXMODE_UNICAST;
515 
516 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
517 		ifp->if_flags |= IFF_ALLMULTI;
518 		rxmode |= STE_RXMODE_ALLMULTI;
519 		if (ifp->if_flags & IFF_PROMISC)
520 			rxmode |= STE_RXMODE_PROMISC;
521 	} else {
522 		rxmode |= STE_RXMODE_MULTIHASH;
523 
524 		/* now program new ones */
525 		ETHER_FIRST_MULTI(step, ac, enm);
526 		while (enm != NULL) {
527 			h = ether_crc32_be(enm->enm_addrlo,
528 			    ETHER_ADDR_LEN) & 0x3F;
529 
530 			if (h < 32)
531 				hashes[0] |= (1 << h);
532 			else
533 				hashes[1] |= (1 << (h - 32));
534 
535 			ETHER_NEXT_MULTI(step, enm);
536 		}
537 	}
538 
539 	CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF);
540 	CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF);
541 	CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF);
542 	CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF);
543 	CSR_WRITE_1(sc, STE_RX_MODE, rxmode);
544 }
545 
546 int
547 ste_intr(void *xsc)
548 {
549 	struct ste_softc	*sc;
550 	struct ifnet		*ifp;
551 	u_int16_t		status;
552 	int			claimed = 0;
553 
554 	sc = xsc;
555 	ifp = &sc->arpcom.ac_if;
556 
557 	/* See if this is really our interrupt. */
558 	if (!(CSR_READ_2(sc, STE_ISR) & STE_ISR_INTLATCH))
559 		return claimed;
560 
561 	for (;;) {
562 		status = CSR_READ_2(sc, STE_ISR_ACK);
563 
564 		if (!(status & STE_INTRS))
565 			break;
566 
567 		claimed = 1;
568 
569 		if (status & STE_ISR_RX_DMADONE) {
570 			ste_rxeoc(sc);
571 			ste_rxeof(sc);
572 		}
573 
574 		if (status & STE_ISR_TX_DMADONE)
575 			ste_txeof(sc);
576 
577 		if (status & STE_ISR_TX_DONE)
578 			ste_txeoc(sc);
579 
580 		if (status & STE_ISR_STATS_OFLOW) {
581 			timeout_del(&sc->sc_stats_tmo);
582 			ste_stats_update(sc);
583 		}
584 
585 		if (status & STE_ISR_LINKEVENT)
586 			mii_pollstat(&sc->sc_mii);
587 
588 		if (status & STE_ISR_HOSTERR)
589 			ste_init(sc);
590 	}
591 
592 	/* Re-enable interrupts */
593 	CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
594 
595 	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
596 		ste_start(ifp);
597 
598 	return claimed;
599 }
600 
601 void
602 ste_rxeoc(struct ste_softc *sc)
603 {
604 	struct ste_chain_onefrag *cur_rx;
605 
606 	if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) {
607 		cur_rx = sc->ste_cdata.ste_rx_head;
608 		do {
609 			cur_rx = cur_rx->ste_next;
610 			/* If the ring is empty, just return. */
611 			if (cur_rx == sc->ste_cdata.ste_rx_head)
612 				return;
613 		} while (cur_rx->ste_ptr->ste_status == 0);
614 		if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) {
615 			/* We've fallen behind the chip: catch it. */
616 			sc->ste_cdata.ste_rx_head = cur_rx;
617 		}
618 	}
619 }
620 
621 /*
622  * A frame has been uploaded: pass the resulting mbuf chain up to
623  * the higher level protocols.
624  */
625 void
626 ste_rxeof(struct ste_softc *sc)
627 {
628         struct mbuf		*m;
629 	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
630         struct ifnet		*ifp;
631 	struct ste_chain_onefrag	*cur_rx;
632 	int			total_len = 0, count=0;
633 	u_int32_t		rxstat;
634 
635 	ifp = &sc->arpcom.ac_if;
636 
637 	while((rxstat = sc->ste_cdata.ste_rx_head->ste_ptr->ste_status)
638 	      & STE_RXSTAT_DMADONE) {
639 		if ((STE_RX_LIST_CNT - count) < 3)
640 			break;
641 
642 		cur_rx = sc->ste_cdata.ste_rx_head;
643 		sc->ste_cdata.ste_rx_head = cur_rx->ste_next;
644 
645 		/*
646 		 * If an error occurs, update stats, clear the
647 		 * status word and leave the mbuf cluster in place:
648 		 * it should simply get re-used next time this descriptor
649 	 	 * comes up in the ring.
650 		 */
651 		if (rxstat & STE_RXSTAT_FRAME_ERR) {
652 			ifp->if_ierrors++;
653 			cur_rx->ste_ptr->ste_status = 0;
654 			continue;
655 		}
656 
657 		/*
658 		 * If there error bit was not set, the upload complete
659 		 * bit should be set which means we have a valid packet.
660 		 * If not, something truly strange has happened.
661 		 */
662 		if (!(rxstat & STE_RXSTAT_DMADONE)) {
663 			printf("%s: bad receive status -- packet dropped",
664 				sc->sc_dev.dv_xname);
665 			ifp->if_ierrors++;
666 			cur_rx->ste_ptr->ste_status = 0;
667 			continue;
668 		}
669 
670 		/* No errors; receive the packet. */
671 		m = cur_rx->ste_mbuf;
672 		total_len = cur_rx->ste_ptr->ste_status & STE_RXSTAT_FRAMELEN;
673 
674 		/*
675 		 * Try to conjure up a new mbuf cluster. If that
676 		 * fails, it means we have an out of memory condition and
677 		 * should leave the buffer in place and continue. This will
678 		 * result in a lost packet, but there's little else we
679 		 * can do in this situation.
680 		 */
681 		if (ste_newbuf(sc, cur_rx, NULL) == ENOBUFS) {
682 			ifp->if_ierrors++;
683 			cur_rx->ste_ptr->ste_status = 0;
684 			continue;
685 		}
686 
687 		m->m_pkthdr.len = m->m_len = total_len;
688 
689 		ml_enqueue(&ml, m);
690 
691 		cur_rx->ste_ptr->ste_status = 0;
692 		count++;
693 	}
694 
695 	if_input(ifp, &ml);
696 }
697 
698 void
699 ste_txeoc(struct ste_softc *sc)
700 {
701 	u_int8_t		txstat;
702 	struct ifnet		*ifp;
703 
704 	ifp = &sc->arpcom.ac_if;
705 
706 	while ((txstat = CSR_READ_1(sc, STE_TX_STATUS)) &
707 	    STE_TXSTATUS_TXDONE) {
708 		if (txstat & STE_TXSTATUS_UNDERRUN ||
709 		    txstat & STE_TXSTATUS_EXCESSCOLLS ||
710 		    txstat & STE_TXSTATUS_RECLAIMERR) {
711 			ifp->if_oerrors++;
712 			printf("%s: transmission error: %x\n",
713 			    sc->sc_dev.dv_xname, txstat);
714 
715 			ste_init(sc);
716 
717 			if (txstat & STE_TXSTATUS_UNDERRUN &&
718 			    sc->ste_tx_thresh < ETHER_MAX_DIX_LEN) {
719 				sc->ste_tx_thresh += STE_MIN_FRAMELEN;
720 				printf("%s: tx underrun, increasing tx"
721 				    " start threshold to %d bytes\n",
722 				    sc->sc_dev.dv_xname, sc->ste_tx_thresh);
723 			}
724 			CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh);
725 			CSR_WRITE_2(sc, STE_TX_RECLAIM_THRESH,
726 			    (ETHER_MAX_DIX_LEN >> 4));
727 		}
728 		ste_init(sc);
729 		CSR_WRITE_2(sc, STE_TX_STATUS, txstat);
730 	}
731 }
732 
733 void
734 ste_txeof(struct ste_softc *sc)
735 {
736 	struct ste_chain	*cur_tx = NULL;
737 	struct ifnet		*ifp;
738 	int			idx;
739 
740 	ifp = &sc->arpcom.ac_if;
741 
742 	idx = sc->ste_cdata.ste_tx_cons;
743 	while(idx != sc->ste_cdata.ste_tx_prod) {
744 		cur_tx = &sc->ste_cdata.ste_tx_chain[idx];
745 
746 		if (!(cur_tx->ste_ptr->ste_ctl & STE_TXCTL_DMADONE))
747 			break;
748 
749 		m_freem(cur_tx->ste_mbuf);
750 		cur_tx->ste_mbuf = NULL;
751 		ifq_clr_oactive(&ifp->if_snd);
752 		ifp->if_opackets++;
753 
754 		STE_INC(idx, STE_TX_LIST_CNT);
755 	}
756 
757 	sc->ste_cdata.ste_tx_cons = idx;
758 	if (idx == sc->ste_cdata.ste_tx_prod)
759 		ifp->if_timer = 0;
760 }
761 
762 void
763 ste_stats_update(void *xsc)
764 {
765 	struct ste_softc	*sc;
766 	struct ifnet		*ifp;
767 	struct mii_data		*mii;
768 	int			s;
769 
770 	s = splnet();
771 
772 	sc = xsc;
773 	ifp = &sc->arpcom.ac_if;
774 	mii = &sc->sc_mii;
775 
776 	ifp->if_collisions += CSR_READ_1(sc, STE_LATE_COLLS)
777 	    + CSR_READ_1(sc, STE_MULTI_COLLS)
778 	    + CSR_READ_1(sc, STE_SINGLE_COLLS);
779 
780 	if (!sc->ste_link) {
781 		mii_pollstat(mii);
782 		if (mii->mii_media_status & IFM_ACTIVE &&
783 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
784 			sc->ste_link++;
785 			/*
786 			 * we don't get a call-back on re-init so do it
787 			 * otherwise we get stuck in the wrong link state
788 			 */
789 			ste_miibus_statchg((struct device *)sc);
790 			if (!IFQ_IS_EMPTY(&ifp->if_snd))
791 				ste_start(ifp);
792 		}
793 	}
794 
795 	timeout_add_sec(&sc->sc_stats_tmo, 1);
796 	splx(s);
797 }
798 
799 /*
800  * Probe for a Sundance ST201 chip. Check the PCI vendor and device
801  * IDs against our list and return a device name if we find a match.
802  */
803 int
804 ste_probe(struct device *parent, void *match, void *aux)
805 {
806 	return (pci_matchbyid((struct pci_attach_args *)aux, ste_devices,
807 	    nitems(ste_devices)));
808 }
809 
810 /*
811  * Attach the interface. Allocate softc structures, do ifmedia
812  * setup and ethernet/BPF attach.
813  */
814 void
815 ste_attach(struct device *parent, struct device *self, void *aux)
816 {
817 	const char		*intrstr = NULL;
818 	struct ste_softc	*sc = (struct ste_softc *)self;
819 	struct pci_attach_args	*pa = aux;
820 	pci_chipset_tag_t	pc = pa->pa_pc;
821 	pci_intr_handle_t	ih;
822 	struct ifnet		*ifp;
823 	bus_size_t		size;
824 
825 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
826 
827 	/*
828 	 * Only use one PHY since this chip reports multiple
829 	 * Note on the DFE-550TX the PHY is at 1 on the DFE-580TX
830 	 * it is at 0 & 1.  It is rev 0x12.
831 	 */
832 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_DLINK &&
833 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_DLINK_DFE550TX &&
834 	    PCI_REVISION(pa->pa_class) == 0x12)
835 		sc->ste_one_phy = 1;
836 
837 	/*
838 	 * Map control/status registers.
839 	 */
840 
841 #ifdef STE_USEIOSPACE
842 	if (pci_mapreg_map(pa, STE_PCI_LOIO,
843 	    PCI_MAPREG_TYPE_IO, 0,
844 	    &sc->ste_btag, &sc->ste_bhandle, NULL, &size, 0)) {
845 		printf(": can't map i/o space\n");
846 		return;
847 	}
848  #else
849 	if (pci_mapreg_map(pa, STE_PCI_LOMEM,
850 	    PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0,
851 	    &sc->ste_btag, &sc->ste_bhandle, NULL, &size, 0)) {
852 		printf(": can't map mem space\n");
853 		return;
854 	}
855 #endif
856 
857 	/* Allocate interrupt */
858 	if (pci_intr_map(pa, &ih)) {
859 		printf(": couldn't map interrupt\n");
860 		goto fail_1;
861 	}
862 	intrstr = pci_intr_string(pc, ih);
863 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, ste_intr, sc,
864 	    self->dv_xname);
865 	if (sc->sc_ih == NULL) {
866 		printf(": couldn't establish interrupt");
867 		if (intrstr != NULL)
868 			printf(" at %s", intrstr);
869 		printf("\n");
870 		goto fail_1;
871 	}
872 	printf(": %s", intrstr);
873 
874 	/* Reset the adapter. */
875 	ste_reset(sc);
876 
877 	/*
878 	 * Get station address from the EEPROM.
879 	 */
880 	if (ste_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
881 	    STE_EEADDR_NODE0, 3, 0)) {
882 		printf(": failed to read station address\n");
883 		goto fail_2;
884 	}
885 
886 	printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
887 
888 	sc->ste_ldata_ptr = malloc(sizeof(struct ste_list_data) + 8,
889 	    M_DEVBUF, M_DONTWAIT);
890 	if (sc->ste_ldata_ptr == NULL) {
891 		printf(": no memory for list buffers!\n");
892 		goto fail_2;
893 	}
894 
895 	sc->ste_ldata = (struct ste_list_data *)sc->ste_ldata_ptr;
896 	bzero(sc->ste_ldata, sizeof(struct ste_list_data));
897 
898 	ifp = &sc->arpcom.ac_if;
899 	ifp->if_softc = sc;
900 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
901 	ifp->if_ioctl = ste_ioctl;
902 	ifp->if_start = ste_start;
903 	ifp->if_watchdog = ste_watchdog;
904 	IFQ_SET_MAXLEN(&ifp->if_snd, STE_TX_LIST_CNT - 1);
905 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
906 	ifp->if_capabilities = IFCAP_VLAN_MTU;
907 
908 	sc->ste_tx_thresh = STE_TXSTART_THRESH;
909 
910 	sc->sc_mii.mii_ifp = ifp;
911 	sc->sc_mii.mii_readreg = ste_miibus_readreg;
912 	sc->sc_mii.mii_writereg = ste_miibus_writereg;
913 	sc->sc_mii.mii_statchg = ste_miibus_statchg;
914 	ifmedia_init(&sc->sc_mii.mii_media, 0, ste_ifmedia_upd,ste_ifmedia_sts);
915 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
916 	    0);
917 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
918 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
919 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
920 	} else
921 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
922 
923 	/*
924 	 * Call MI attach routines.
925 	 */
926 	if_attach(ifp);
927 	ether_ifattach(ifp);
928 	return;
929 
930 fail_2:
931 	pci_intr_disestablish(pc, sc->sc_ih);
932 
933 fail_1:
934 	bus_space_unmap(sc->ste_btag, sc->ste_bhandle, size);
935 }
936 
937 int
938 ste_newbuf(struct ste_softc *sc, struct ste_chain_onefrag *c, struct mbuf *m)
939 {
940 	struct mbuf		*m_new = NULL;
941 
942 	if (m == NULL) {
943 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
944 		if (m_new == NULL)
945 			return(ENOBUFS);
946 		MCLGET(m_new, M_DONTWAIT);
947 		if (!(m_new->m_flags & M_EXT)) {
948 			m_freem(m_new);
949 			return(ENOBUFS);
950 		}
951 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
952 	} else {
953 		m_new = m;
954 		m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
955 		m_new->m_data = m_new->m_ext.ext_buf;
956 	}
957 
958 	m_adj(m_new, ETHER_ALIGN);
959 
960 	c->ste_mbuf = m_new;
961 	c->ste_ptr->ste_status = 0;
962 	c->ste_ptr->ste_frag.ste_addr = vtophys(mtod(m_new, vaddr_t));
963 	c->ste_ptr->ste_frag.ste_len = (ETHER_MAX_DIX_LEN + ETHER_VLAN_ENCAP_LEN) | STE_FRAG_LAST;
964 
965 	return(0);
966 }
967 
968 int
969 ste_init_rx_list(struct ste_softc *sc)
970 {
971 	struct ste_chain_data	*cd;
972 	struct ste_list_data	*ld;
973 	int			i;
974 
975 	cd = &sc->ste_cdata;
976 	ld = sc->ste_ldata;
977 
978 	for (i = 0; i < STE_RX_LIST_CNT; i++) {
979 		cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i];
980 		if (ste_newbuf(sc, &cd->ste_rx_chain[i], NULL) == ENOBUFS)
981 			return(ENOBUFS);
982 		if (i == (STE_RX_LIST_CNT - 1)) {
983 			cd->ste_rx_chain[i].ste_next =
984 			    &cd->ste_rx_chain[0];
985 			ld->ste_rx_list[i].ste_next =
986 			    vtophys((vaddr_t)&ld->ste_rx_list[0]);
987 		} else {
988 			cd->ste_rx_chain[i].ste_next =
989 			    &cd->ste_rx_chain[i + 1];
990 			ld->ste_rx_list[i].ste_next =
991 			    vtophys((vaddr_t)&ld->ste_rx_list[i + 1]);
992 		}
993 		ld->ste_rx_list[i].ste_status = 0;
994 	}
995 
996 	cd->ste_rx_head = &cd->ste_rx_chain[0];
997 
998 	return(0);
999 }
1000 
1001 void
1002 ste_init_tx_list(struct ste_softc *sc)
1003 {
1004 	struct ste_chain_data	*cd;
1005 	struct ste_list_data	*ld;
1006 	int			i;
1007 
1008 	cd = &sc->ste_cdata;
1009 	ld = sc->ste_ldata;
1010 	for (i = 0; i < STE_TX_LIST_CNT; i++) {
1011 		cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i];
1012 		cd->ste_tx_chain[i].ste_phys = vtophys((vaddr_t)&ld->ste_tx_list[i]);
1013 		if (i == (STE_TX_LIST_CNT - 1))
1014 			cd->ste_tx_chain[i].ste_next =
1015 			    &cd->ste_tx_chain[0];
1016 		else
1017 			cd->ste_tx_chain[i].ste_next =
1018 			    &cd->ste_tx_chain[i + 1];
1019 	}
1020 
1021 	bzero(ld->ste_tx_list, sizeof(struct ste_desc) * STE_TX_LIST_CNT);
1022 
1023 	cd->ste_tx_prod = 0;
1024 	cd->ste_tx_cons = 0;
1025 }
1026 
1027 void
1028 ste_init(void *xsc)
1029 {
1030 	struct ste_softc	*sc = (struct ste_softc *)xsc;
1031 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1032 	struct mii_data		*mii;
1033 	int			i, s;
1034 
1035 	s = splnet();
1036 
1037 	ste_stop(sc);
1038 	/* Reset the chip to a known state. */
1039 	ste_reset(sc);
1040 
1041 	mii = &sc->sc_mii;
1042 
1043 	/* Init our MAC address */
1044 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1045 		CSR_WRITE_1(sc, STE_PAR0 + i, sc->arpcom.ac_enaddr[i]);
1046 	}
1047 
1048 	/* Init RX list */
1049 	if (ste_init_rx_list(sc) == ENOBUFS) {
1050 		printf("%s: initialization failed: no "
1051 		    "memory for RX buffers\n", sc->sc_dev.dv_xname);
1052 		ste_stop(sc);
1053 		splx(s);
1054 		return;
1055 	}
1056 
1057 	/* Set RX polling interval */
1058 	CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 64);
1059 
1060 	/* Init TX descriptors */
1061 	ste_init_tx_list(sc);
1062 
1063 	/* Set the TX freethresh value */
1064 	CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, ETHER_MAX_DIX_LEN >> 8);
1065 
1066 	/* Set the TX start threshold for best performance. */
1067 	CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh);
1068 
1069 	/* Set the TX reclaim threshold. */
1070 	CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (ETHER_MAX_DIX_LEN >> 4));
1071 
1072 	/* Program promiscuous mode and multicast filters. */
1073 	ste_iff(sc);
1074 
1075 	/* Load the address of the RX list. */
1076 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL);
1077 	ste_wait(sc);
1078 	CSR_WRITE_4(sc, STE_RX_DMALIST_PTR,
1079 	    vtophys((vaddr_t)&sc->ste_ldata->ste_rx_list[0]));
1080 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
1081 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
1082 
1083 	/* Set TX polling interval (defer until we TX first packet) */
1084 	CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0);
1085 
1086 	/* Load address of the TX list */
1087 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1088 	ste_wait(sc);
1089 	CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0);
1090 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1091 	STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1092 	ste_wait(sc);
1093 	sc->ste_tx_prev=NULL;
1094 
1095 	/* Enable receiver and transmitter */
1096 	CSR_WRITE_2(sc, STE_MACCTL0, 0);
1097 	CSR_WRITE_2(sc, STE_MACCTL1, 0);
1098 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE);
1099 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE);
1100 
1101 	/* Enable stats counters. */
1102 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE);
1103 
1104 	/* Enable interrupts. */
1105 	CSR_WRITE_2(sc, STE_ISR, 0xFFFF);
1106 	CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
1107 
1108 	/* Accept VLAN length packets */
1109 	CSR_WRITE_2(sc, STE_MAX_FRAMELEN,
1110 	    ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
1111 
1112 	ste_ifmedia_upd(ifp);
1113 
1114 	ifp->if_flags |= IFF_RUNNING;
1115 	ifq_clr_oactive(&ifp->if_snd);
1116 
1117 	splx(s);
1118 
1119 	timeout_set(&sc->sc_stats_tmo, ste_stats_update, sc);
1120 	timeout_add_sec(&sc->sc_stats_tmo, 1);
1121 }
1122 
1123 void
1124 ste_stop(struct ste_softc *sc)
1125 {
1126 	int			i;
1127 	struct ifnet		*ifp;
1128 
1129 	ifp = &sc->arpcom.ac_if;
1130 
1131 	timeout_del(&sc->sc_stats_tmo);
1132 
1133 	ifp->if_flags &= ~IFF_RUNNING;
1134 	ifq_clr_oactive(&ifp->if_snd);
1135 
1136 	CSR_WRITE_2(sc, STE_IMR, 0);
1137 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_DISABLE);
1138 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_DISABLE);
1139 	STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_DISABLE);
1140 	STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1141 	STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL);
1142 	ste_wait(sc);
1143 	/*
1144 	 * Try really hard to stop the RX engine or under heavy RX
1145 	 * data chip will write into de-allocated memory.
1146 	 */
1147 	ste_reset(sc);
1148 
1149 	sc->ste_link = 0;
1150 
1151 	for (i = 0; i < STE_RX_LIST_CNT; i++) {
1152 		if (sc->ste_cdata.ste_rx_chain[i].ste_mbuf != NULL) {
1153 			m_freem(sc->ste_cdata.ste_rx_chain[i].ste_mbuf);
1154 			sc->ste_cdata.ste_rx_chain[i].ste_mbuf = NULL;
1155 		}
1156 	}
1157 
1158 	for (i = 0; i < STE_TX_LIST_CNT; i++) {
1159 		if (sc->ste_cdata.ste_tx_chain[i].ste_mbuf != NULL) {
1160 			m_freem(sc->ste_cdata.ste_tx_chain[i].ste_mbuf);
1161 			sc->ste_cdata.ste_tx_chain[i].ste_mbuf = NULL;
1162 		}
1163 	}
1164 
1165 	bzero(sc->ste_ldata, sizeof(struct ste_list_data));
1166 }
1167 
1168 void
1169 ste_reset(struct ste_softc *sc)
1170 {
1171 	int		i;
1172 
1173 	STE_SETBIT4(sc, STE_ASICCTL,
1174 	    STE_ASICCTL_GLOBAL_RESET|STE_ASICCTL_RX_RESET|
1175 	    STE_ASICCTL_TX_RESET|STE_ASICCTL_DMA_RESET|
1176 	    STE_ASICCTL_FIFO_RESET|STE_ASICCTL_NETWORK_RESET|
1177 	    STE_ASICCTL_AUTOINIT_RESET|STE_ASICCTL_HOST_RESET|
1178 	    STE_ASICCTL_EXTRESET_RESET);
1179 
1180 	DELAY(100000);
1181 
1182 	for (i = 0; i < STE_TIMEOUT; i++) {
1183 		if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY))
1184 			break;
1185 	}
1186 
1187 	if (i == STE_TIMEOUT)
1188 		printf("%s: global reset never completed\n",
1189 		    sc->sc_dev.dv_xname);
1190 }
1191 
1192 int
1193 ste_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1194 {
1195 	struct ste_softc	*sc = ifp->if_softc;
1196 	struct ifreq		*ifr = (struct ifreq *) data;
1197 	int			s, error = 0;
1198 
1199 	s = splnet();
1200 
1201 	switch(command) {
1202 	case SIOCSIFADDR:
1203 		ifp->if_flags |= IFF_UP;
1204 		if (!(ifp->if_flags & IFF_RUNNING))
1205 			ste_init(sc);
1206 		break;
1207 
1208 	case SIOCSIFFLAGS:
1209 		if (ifp->if_flags & IFF_UP) {
1210 			if (ifp->if_flags & IFF_RUNNING)
1211 				error = ENETRESET;
1212 			else {
1213 				sc->ste_tx_thresh = STE_TXSTART_THRESH;
1214 				ste_init(sc);
1215 			}
1216 		} else {
1217 			if (ifp->if_flags & IFF_RUNNING)
1218 				ste_stop(sc);
1219 		}
1220 		break;
1221 
1222 	case SIOCGIFMEDIA:
1223 	case SIOCSIFMEDIA:
1224 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1225 		break;
1226 
1227 	default:
1228 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
1229 	}
1230 
1231 	if (error == ENETRESET) {
1232 		if (ifp->if_flags & IFF_RUNNING)
1233 			ste_iff(sc);
1234 		error = 0;
1235 	}
1236 
1237 	splx(s);
1238 	return(error);
1239 }
1240 
1241 int
1242 ste_encap(struct ste_softc *sc, struct ste_chain *c, struct mbuf *m_head)
1243 {
1244 	int			frag = 0;
1245 	struct ste_frag		*f = NULL;
1246 	struct mbuf		*m;
1247 	struct ste_desc		*d;
1248 
1249 	d = c->ste_ptr;
1250 	d->ste_ctl = 0;
1251 
1252 encap_retry:
1253 	for (m = m_head, frag = 0; m != NULL; m = m->m_next) {
1254 		if (m->m_len != 0) {
1255 			if (frag == STE_MAXFRAGS)
1256 				break;
1257 			f = &d->ste_frags[frag];
1258 			f->ste_addr = vtophys(mtod(m, vaddr_t));
1259 			f->ste_len = m->m_len;
1260 			frag++;
1261 		}
1262 	}
1263 
1264 	if (m != NULL) {
1265 		struct mbuf *mn;
1266 
1267 		/*
1268 		 * We ran out of segments. We have to recopy this
1269 		 * mbuf chain first. Bail out if we can't get the
1270 		 * new buffers.
1271 		 */
1272 		MGETHDR(mn, M_DONTWAIT, MT_DATA);
1273 		if (mn == NULL) {
1274 			m_freem(m_head);
1275 			return ENOMEM;
1276 		}
1277 		if (m_head->m_pkthdr.len > MHLEN) {
1278 			MCLGET(mn, M_DONTWAIT);
1279 			if ((mn->m_flags & M_EXT) == 0) {
1280 				m_freem(mn);
1281 				m_freem(m_head);
1282 				return ENOMEM;
1283 			}
1284 		}
1285 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1286 			   mtod(mn, caddr_t));
1287 		mn->m_pkthdr.len = mn->m_len = m_head->m_pkthdr.len;
1288 		m_freem(m_head);
1289 		m_head = mn;
1290 		goto encap_retry;
1291 	}
1292 
1293 	c->ste_mbuf = m_head;
1294 	d->ste_frags[frag - 1].ste_len |= STE_FRAG_LAST;
1295 	d->ste_ctl = 1;
1296 
1297 	return(0);
1298 }
1299 
1300 void
1301 ste_start(struct ifnet *ifp)
1302 {
1303 	struct ste_softc	*sc;
1304 	struct mbuf		*m_head = NULL;
1305 	struct ste_chain	*cur_tx;
1306 	int			idx;
1307 
1308 	sc = ifp->if_softc;
1309 
1310 	if (!sc->ste_link)
1311 		return;
1312 
1313 	if (ifq_is_oactive(&ifp->if_snd))
1314 		return;
1315 
1316 	idx = sc->ste_cdata.ste_tx_prod;
1317 
1318 	while(sc->ste_cdata.ste_tx_chain[idx].ste_mbuf == NULL) {
1319 		/*
1320 		 * We cannot re-use the last (free) descriptor;
1321 		 * the chip may not have read its ste_next yet.
1322 		 */
1323 		if (STE_NEXT(idx, STE_TX_LIST_CNT) ==
1324 		    sc->ste_cdata.ste_tx_cons) {
1325 			ifq_set_oactive(&ifp->if_snd);
1326 			break;
1327 		}
1328 
1329 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1330 		if (m_head == NULL)
1331 			break;
1332 
1333 		cur_tx = &sc->ste_cdata.ste_tx_chain[idx];
1334 
1335 		if (ste_encap(sc, cur_tx, m_head) != 0)
1336 			break;
1337 
1338 		cur_tx->ste_ptr->ste_next = 0;
1339 
1340 		if (sc->ste_tx_prev == NULL) {
1341 			cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1;
1342 			/* Load address of the TX list */
1343 			STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
1344 			ste_wait(sc);
1345 
1346 			CSR_WRITE_4(sc, STE_TX_DMALIST_PTR,
1347 			    vtophys((vaddr_t)&sc->ste_ldata->ste_tx_list[0]));
1348 
1349 			/* Set TX polling interval to start TX engine */
1350 			CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64);
1351 
1352 			STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
1353 			ste_wait(sc);
1354 		}else{
1355 			cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1;
1356 			sc->ste_tx_prev->ste_ptr->ste_next
1357 				= cur_tx->ste_phys;
1358 		}
1359 
1360 		sc->ste_tx_prev = cur_tx;
1361 
1362 #if NBPFILTER > 0
1363 		/*
1364 		 * If there's a BPF listener, bounce a copy of this frame
1365 		 * to him.
1366 	 	 */
1367 		if (ifp->if_bpf)
1368 			bpf_mtap(ifp->if_bpf, cur_tx->ste_mbuf,
1369 			    BPF_DIRECTION_OUT);
1370 #endif
1371 
1372 		STE_INC(idx, STE_TX_LIST_CNT);
1373 		ifp->if_timer = 5;
1374 	}
1375 	sc->ste_cdata.ste_tx_prod = idx;
1376 }
1377 
1378 void
1379 ste_watchdog(struct ifnet *ifp)
1380 {
1381 	struct ste_softc	*sc;
1382 
1383 	sc = ifp->if_softc;
1384 
1385 	ifp->if_oerrors++;
1386 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1387 
1388 	ste_txeoc(sc);
1389 	ste_txeof(sc);
1390 	ste_rxeoc(sc);
1391 	ste_rxeof(sc);
1392 	ste_init(sc);
1393 
1394 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1395 		ste_start(ifp);
1396 }
1397